parent
ee837c1132
commit
194271df50
9 changed files with 735 additions and 75 deletions
@ -0,0 +1,131 @@ |
||||
#include "precomp.hpp" |
||||
#include "layers.hpp" |
||||
#include <math.h> |
||||
|
||||
namespace cv |
||||
{ |
||||
namespace dnn |
||||
{ |
||||
|
||||
struct ReLUFunctor |
||||
{ |
||||
float negative_slope; |
||||
|
||||
ReLUFunctor(LayerParams ¶ms) |
||||
{ |
||||
if (params.has("negative_slope")) |
||||
negative_slope = params.get<float>("negative_slope"); |
||||
else |
||||
negative_slope = 0.f; |
||||
} |
||||
|
||||
inline float operator()(float x) |
||||
{ |
||||
return (x >= 0) ? x : negative_slope * x; |
||||
} |
||||
}; |
||||
|
||||
struct TanHFunctor |
||||
{ |
||||
TanHFunctor(LayerParams ¶ms) {} |
||||
|
||||
inline float operator()(float x) |
||||
{ |
||||
return tanh(x); |
||||
} |
||||
}; |
||||
|
||||
REGISTER_LAYER_CLASS(ReLU, ElementWiseLayer<ReLUFunctor>) |
||||
REGISTER_LAYER_CLASS(TanH, ElementWiseLayer<TanHFunctor>) |
||||
REGISTER_LAYER_CLASS(Convolution, ConvolutionLayer) |
||||
REGISTER_LAYER_CLASS(Pooling, PoolingLayer) |
||||
REGISTER_LAYER_CLASS(InnerProduct, FullyConnectedLayer) |
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
|
||||
PoolingLayer::PoolingLayer(LayerParams ¶ms) |
||||
{ |
||||
|
||||
} |
||||
|
||||
void PoolingLayer::allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs) |
||||
{ |
||||
|
||||
} |
||||
|
||||
void PoolingLayer::forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs) |
||||
{ |
||||
|
||||
} |
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
|
||||
ConvolutionLayer::ConvolutionLayer(LayerParams ¶ms) |
||||
{ |
||||
|
||||
} |
||||
|
||||
void ConvolutionLayer::allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs) |
||||
{ |
||||
|
||||
} |
||||
|
||||
|
||||
template <typename Dtype> |
||||
void im2col_cpu(const Dtype* data_im, const int channels, |
||||
const int height, const int width, const int kernel_h, const int kernel_w, |
||||
const int pad_h, const int pad_w, |
||||
const int stride_h, const int stride_w, |
||||
Dtype* data_col) |
||||
{ |
||||
int height_col = (height + 2 * pad_h - kernel_h) / stride_h + 1; |
||||
int width_col = (width + 2 * pad_w - kernel_w) / stride_w + 1; |
||||
int channels_col = channels * kernel_h * kernel_w; |
||||
for (int c = 0; c < channels_col; ++c) { |
||||
int w_offset = c % kernel_w; |
||||
int h_offset = (c / kernel_w) % kernel_h; |
||||
int c_im = c / kernel_h / kernel_w; |
||||
for (int h = 0; h < height_col; ++h) { |
||||
for (int w = 0; w < width_col; ++w) { |
||||
int h_pad = h * stride_h - pad_h + h_offset; |
||||
int w_pad = w * stride_w - pad_w + w_offset; |
||||
if (h_pad >= 0 && h_pad < height && w_pad >= 0 && w_pad < width) |
||||
data_col[(c * height_col + h) * width_col + w] = |
||||
data_im[(c_im * height + h_pad) * width + w_pad]; |
||||
else |
||||
data_col[(c * height_col + h) * width_col + w] = 0; |
||||
} |
||||
} |
||||
} |
||||
} |
||||
|
||||
void ConvolutionLayer::forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs) |
||||
{ |
||||
CV_Assert(inputs.size() == outputs.size()); |
||||
|
||||
for (size_t i = 0; i < outputs.size(); i++) |
||||
{ |
||||
|
||||
} |
||||
} |
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
|
||||
|
||||
FullyConnectedLayer::FullyConnectedLayer(LayerParams ¶ms) |
||||
{ |
||||
|
||||
} |
||||
|
||||
void FullyConnectedLayer::allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs) |
||||
{ |
||||
|
||||
} |
||||
|
||||
void FullyConnectedLayer::forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs) |
||||
{ |
||||
|
||||
} |
||||
|
||||
} |
||||
} |
@ -0,0 +1,76 @@ |
||||
#ifndef __OPENCV_DNN_LAYERS_HPP__ |
||||
#define __OPENCV_DNN_LAYERS_HPP__ |
||||
#include <opencv2/dnn.hpp> |
||||
|
||||
namespace cv |
||||
{ |
||||
namespace dnn |
||||
{ |
||||
|
||||
template<typename Func> |
||||
class ElementWiseLayer : public Layer |
||||
{ |
||||
Func func; |
||||
public: |
||||
|
||||
ElementWiseLayer(LayerParams &_params) : func(_params) {} |
||||
|
||||
void allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs) |
||||
{ |
||||
CV_Assert(inputs.size() == 1); |
||||
outputs[0] = *inputs[0]; |
||||
} |
||||
|
||||
void forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs) |
||||
{ |
||||
CV_Assert(inputs.size() == 1 && outputs.size() == 1); |
||||
CV_Assert(inputs[0]->getMatRef().data == outputs[0].getMatRef().data); |
||||
|
||||
float *data = outputs[0].getMatRef().ptr<float>(); |
||||
|
||||
//Vec4i shape = outputs[0].shape();
|
||||
//CV_Assert(pitch[i] == shape[i] * sizeof(float) );
|
||||
|
||||
for (size_t i = 0; i < outputs[0].total(); i++) |
||||
data[i] = func(data[i]); |
||||
} |
||||
}; |
||||
|
||||
class PoolingLayer : public Layer |
||||
{ |
||||
int type; |
||||
int strideH, strideW; |
||||
int sizeH, sizeW; |
||||
|
||||
public: |
||||
PoolingLayer(LayerParams ¶ms); |
||||
void allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs); |
||||
void forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs); |
||||
}; |
||||
|
||||
class ConvolutionLayer : public Layer |
||||
{ |
||||
int groups; |
||||
int strideH, strideW; |
||||
int sizeH, sizeW; |
||||
|
||||
public: |
||||
ConvolutionLayer(LayerParams ¶ms); |
||||
void allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs); |
||||
void forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs); |
||||
}; |
||||
|
||||
class FullyConnectedLayer : public Layer |
||||
{ |
||||
int numOutputs; |
||||
|
||||
public: |
||||
FullyConnectedLayer(LayerParams ¶ms); |
||||
void allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs); |
||||
void forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs); |
||||
}; |
||||
} |
||||
} |
||||
|
||||
|
||||
#endif |
Loading…
Reference in new issue