Added initModule() procedure to explicitly init builtin layers

pull/265/head
Vitaliy Lyudvichenko 10 years ago
parent 160d86440c
commit 06f949a590
  1. 50
      modules/dnn/include/opencv2/dnn/dnn.hpp
  2. 71
      modules/dnn/include/opencv2/dnn/layer.hpp
  3. 64
      modules/dnn/src/init.cpp
  4. 8
      modules/dnn/src/layers/blank_layer.hpp
  5. 15
      modules/dnn/src/layers/concat_layer.cpp
  6. 20
      modules/dnn/src/layers/concat_layer.hpp
  7. 46
      modules/dnn/src/layers/convolution_layer.cpp
  8. 50
      modules/dnn/src/layers/convolution_layer.hpp
  9. 0
      modules/dnn/src/layers/deconvolution_layer.cpp
  10. 22
      modules/dnn/src/layers/elementwise_layers.hpp
  11. 22
      modules/dnn/src/layers/fully_connected_layer.cpp
  12. 26
      modules/dnn/src/layers/fully_connected_layer.hpp
  13. 29
      modules/dnn/src/layers/lrn_layer.cpp
  14. 34
      modules/dnn/src/layers/lrn_layer.hpp
  15. 17
      modules/dnn/src/layers/mvn_layer.cpp
  16. 24
      modules/dnn/src/layers/mvn_layer.hpp
  17. 31
      modules/dnn/src/layers/pooling_layer.cpp
  18. 37
      modules/dnn/src/layers/pooling_layer.hpp
  19. 23
      modules/dnn/src/layers/reshape_layer.cpp
  20. 30
      modules/dnn/src/layers/reshape_layer.hpp
  21. 19
      modules/dnn/src/layers/slice_layer.cpp
  22. 26
      modules/dnn/src/layers/slice_layer.hpp
  23. 16
      modules/dnn/src/layers/softmax_layer.cpp
  24. 21
      modules/dnn/src/layers/softmax_layer.hpp
  25. 18
      modules/dnn/src/layers/split_layer.cpp
  26. 25
      modules/dnn/src/layers/split_layer.hpp

@ -10,6 +10,8 @@ namespace cv
{
namespace dnn
{
CV_EXPORTS void initModule();
class CV_EXPORTS LayerParams : public Dict
{
public:
@ -90,56 +92,10 @@ namespace dnn
CV_EXPORTS Ptr<Importer> createTorchImporter(const String &filename, bool isBinary = true);
CV_EXPORTS Blob readTorchMat(const String &filename, bool isBinary = true);
//Layer factory allows to create instances of registered layers.
class CV_EXPORTS LayerRegister
{
public:
typedef Ptr<Layer>(*Constuctor)(LayerParams &params);
static void registerLayer(const String &type, Constuctor constructor);
static void unregisterLayer(const String &type);
static Ptr<Layer> createLayerInstance(const String &type, LayerParams& params);
private:
LayerRegister();
struct Impl;
static Ptr<Impl> impl;
};
//allows automatically register created layer on module load time
struct _LayerRegisterer
{
String type;
_LayerRegisterer(const String &type, LayerRegister::Constuctor constuctor)
{
this->type = type;
LayerRegister::registerLayer(type, constuctor);
}
~_LayerRegisterer()
{
LayerRegister::unregisterLayer(type);
}
};
//registers layer on module load time
#define REGISTER_LAYER_FUNC(type, constuctorFunc) \
static _LayerRegisterer __layerRegisterer_##type(#type, constuctorFunc);
#define REGISTER_LAYER_CLASS(type, class) \
Ptr<Layer> __layerRegisterer_func_##type(LayerParams &params) \
{ return Ptr<Layer>(new class(params)); } \
static _LayerRegisterer __layerRegisterer_##type(#type, __layerRegisterer_func_##type);
}
}
#include <opencv2/dnn/layer.hpp>
#include <opencv2/dnn/dnn.inl.hpp>
#endif /* __OPENCV_DNN_DNN_HPP__ */

@ -0,0 +1,71 @@
#ifndef __OPENCV_DNN_LAYER_HPP__
#define __OPENCV_DNN_LAYER_HPP__
#include <opencv2/dnn.hpp>
namespace cv
{
namespace dnn
{
//Layer factory allows to create instances of registered layers.
class CV_EXPORTS LayerRegister
{
public:
typedef Ptr<Layer>(*Constuctor)(LayerParams &params);
static void registerLayer(const String &type, Constuctor constructor);
static void unregisterLayer(const String &type);
static Ptr<Layer> createLayerInstance(const String &type, LayerParams& params);
private:
LayerRegister();
struct Impl;
static Ptr<Impl> impl;
};
template<typename LayerClass>
Ptr<Layer> _layerDynamicRegisterer(LayerParams &params)
{
return Ptr<Layer>(new LayerClass(params));
}
#define REG_RUNTIME_LAYER_FUNC(type, constuctorFunc) \
LayerRegister::registerLayer(#type, constuctorFunc);
#define REG_RUNTIME_LAYER_CLASS(type, class) \
LayerRegister::registerLayer(#type, _layerDynamicRegisterer<class>);
//allows automatically register created layer on module load time
struct _LayerStaticRegisterer
{
String type;
_LayerStaticRegisterer(const String &type, LayerRegister::Constuctor constuctor)
{
this->type = type;
LayerRegister::registerLayer(type, constuctor);
}
~_LayerStaticRegisterer()
{
LayerRegister::unregisterLayer(type);
}
};
//registers layer constructor on module load time
#define REG_STATIC_LAYER_FUNC(type, constuctorFunc) \
static _LayerStaticRegisterer __LayerStaticRegisterer_##type(#type, constuctorFunc);
//registers layer class on module load time
#define REG_STATIC_LAYER_CLASS(type, class) \
Ptr<Layer> __LayerStaticRegisterer_func_##type(LayerParams &params) \
{ return Ptr<Layer>(new class(params)); } \
static _LayerStaticRegisterer __LayerStaticRegisterer_##type(#type, __LayerStaticRegisterer_func_##type);
}
}
#endif

@ -0,0 +1,64 @@
#include "precomp.hpp"
#include "layers/concat_layer.hpp"
#include "layers/convolution_layer.hpp"
#include "layers/blank_layer.hpp"
#include "layers/elementwise_layers.hpp"
#include "layers/fully_connected_layer.hpp"
#include "layers/lrn_layer.hpp"
#include "layers/mvn_layer.hpp"
#include "layers/pooling_layer.hpp"
#include "layers/reshape_layer.hpp"
#include "layers/slice_layer.hpp"
#include "layers/softmax_layer.hpp"
#include "layers/split_layer.hpp"
namespace cv
{
namespace dnn
{
struct AutoInitializer
{
bool status;
AutoInitializer() : status(false)
{
cv::dnn::initModule();
}
};
static AutoInitializer init;
void initModule()
{
if (init.status)
return;
REG_RUNTIME_LAYER_CLASS(Slice, SliceLayer)
REG_RUNTIME_LAYER_CLASS(Softmax, SoftMaxLayer)
REG_RUNTIME_LAYER_CLASS(Split, SplitLayer)
REG_RUNTIME_LAYER_CLASS(Reshape, ReshapeLayer)
REG_STATIC_LAYER_FUNC(Flatten, createFlattenLayer)
REG_RUNTIME_LAYER_CLASS(Pooling, PoolingLayer)
REG_RUNTIME_LAYER_CLASS(MVN, MVNLayer)
REG_RUNTIME_LAYER_CLASS(LRN, LRNLayer)
REG_RUNTIME_LAYER_CLASS(InnerProduct, FullyConnectedLayer)
REG_RUNTIME_LAYER_CLASS(ReLU, ElementWiseLayer<ReLUFunctor>)
REG_RUNTIME_LAYER_CLASS(TanH, ElementWiseLayer<TanHFunctor>)
REG_RUNTIME_LAYER_CLASS(BNLL, ElementWiseLayer<BNLLFunctor>)
REG_RUNTIME_LAYER_CLASS(Power, ElementWiseLayer<PowerFunctor>)
REG_RUNTIME_LAYER_CLASS(AbsVal, ElementWiseLayer<AbsValFunctor>)
REG_RUNTIME_LAYER_CLASS(Sigmoid, ElementWiseLayer<SigmoidFunctor>)
REG_RUNTIME_LAYER_CLASS(Dropout, BlankLayer)
REG_RUNTIME_LAYER_CLASS(Convolution, ConvolutionLayer)
REG_RUNTIME_LAYER_CLASS(Deconvolution, DeConvolutionLayer)
REG_RUNTIME_LAYER_CLASS(Concat, ConcatLayer)
init.status = true;
}
}
}

@ -1,5 +1,6 @@
#ifndef __OPENCV_DNN_LAYERS_BLANK_LAYER_HPP__
#define __OPENCV_DNN_LAYERS_BLANK_LAYER_HPP__
#include "../precomp.hpp"
#include "layers_common.hpp"
namespace cv
{
@ -27,7 +28,6 @@ namespace dnn
outputs[i] = *inputs[i];
}
};
REGISTER_LAYER_CLASS(Dropout, BlankLayer)
}
}
}
#endif

@ -1,24 +1,11 @@
#include "../precomp.hpp"
#include "layers_common.hpp"
#include "concat_layer.hpp"
namespace cv
{
namespace dnn
{
class ConcatLayer : public Layer
{
int axis;
public:
ConcatLayer(LayerParams& params);
void allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
void forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
};
REGISTER_LAYER_CLASS(Concat, ConcatLayer)
ConcatLayer::ConcatLayer(LayerParams &params)
{
axis = params.get<int>("axis", 1);

@ -0,0 +1,20 @@
#ifndef __OPENCV_DNN_LAYERS_CONCAT_LAYER_HPP__
#define __OPENCV_DNN_LAYERS_CONCAT_LAYER_HPP__
#include "../precomp.hpp"
namespace cv
{
namespace dnn
{
class ConcatLayer : public Layer
{
int axis;
public:
ConcatLayer(LayerParams& params);
void allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
void forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
};
}
}
#endif

@ -1,56 +1,12 @@
#include "../precomp.hpp"
#include "layers_common.hpp"
#include "convolution_layer.hpp"
#include "im2col.hpp"
namespace cv
{
namespace dnn
{
//TODO: simultaneously convolution and bias addition for cache optimization
class ConvolutionLayer : public Layer
{
protected:
bool bias;
int numOutput, group;
int padH, padW;
int kerH, kerW;
int strideH, strideW;
int inpH, inpW, inpCn;
int outH, outW, outCn;
int topH, topW, topCn; //switched between inp/out on deconv/conv
int inpGroupCn, outGroupCn;
int ksize;
Mat colMat, biasOnesMat;
inline bool is1x1() const;
virtual void computeInpOutShape(const Blob &inpBlob);
void im2col(Blob &inpBlob, int imNum, int cnGroup);
public:
ConvolutionLayer() {}
ConvolutionLayer(LayerParams &params);
void allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
void forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
};
class DeConvolutionLayer : public ConvolutionLayer
{
protected:
void computeInpOutShape(const Blob &inpBlob);
void col2im(Mat &dstMat);
public:
DeConvolutionLayer(LayerParams &params) : ConvolutionLayer(params) {}
void forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
};
REGISTER_LAYER_CLASS(Convolution, ConvolutionLayer)
REGISTER_LAYER_CLASS(Deconvolution, DeConvolutionLayer)
ConvolutionLayer::ConvolutionLayer(LayerParams &params)
{
getKernelParams(params, kerH, kerW, padH, padW, strideH, strideW);

@ -0,0 +1,50 @@
#ifndef __OPENCV_DNN_LAYERS_CONVOLUTION_LAYER_HPP__
#define __OPENCV_DNN_LAYERS_CONVOLUTION_LAYER_HPP__
#include "../precomp.hpp"
namespace cv
{
namespace dnn
{
//TODO: simultaneously convolution and bias addition for cache optimization
class ConvolutionLayer : public Layer
{
protected:
bool bias;
int numOutput, group;
int padH, padW;
int kerH, kerW;
int strideH, strideW;
int inpH, inpW, inpCn;
int outH, outW, outCn;
int topH, topW, topCn; //switched between inp/out on deconv/conv
int inpGroupCn, outGroupCn;
int ksize;
Mat colMat, biasOnesMat;
inline bool is1x1() const;
virtual void computeInpOutShape(const Blob &inpBlob);
void im2col(Blob &inpBlob, int imNum, int cnGroup);
public:
ConvolutionLayer() {}
ConvolutionLayer(LayerParams &params);
void allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
void forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
};
class DeConvolutionLayer : public ConvolutionLayer
{
protected:
void computeInpOutShape(const Blob &inpBlob);
void col2im(Mat &dstMat);
public:
DeConvolutionLayer(LayerParams &params) : ConvolutionLayer(params) {}
void forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
};
}
}
#endif

@ -1,16 +1,19 @@
#ifndef __OPENCV_DNN_LAYERS_ELEMENTWISE_LAYERS_HPP__
#define __OPENCV_DNN_LAYERS_ELEMENTWISE_LAYERS_HPP__
#include "../precomp.hpp"
#include "layers_common.hpp"
#include <cmath>
using std::abs;
using std::exp;
using std::tanh;
using std::pow;
namespace cv
{
namespace dnn
{
using std::abs;
using std::exp;
using std::tanh;
using std::pow;
template<typename Func>
class ElementWiseLayer : public Layer
{
@ -135,13 +138,6 @@ namespace dnn
return log((TFloat)1 + exp(-abs(x)));
}
};
REGISTER_LAYER_CLASS(ReLU, ElementWiseLayer<ReLUFunctor>)
REGISTER_LAYER_CLASS(TanH, ElementWiseLayer<TanHFunctor>)
REGISTER_LAYER_CLASS(BNLL, ElementWiseLayer<BNLLFunctor>)
REGISTER_LAYER_CLASS(Power, ElementWiseLayer<PowerFunctor>)
REGISTER_LAYER_CLASS(AbsVal, ElementWiseLayer<AbsValFunctor>)
REGISTER_LAYER_CLASS(Sigmoid, ElementWiseLayer<SigmoidFunctor>)
}
}
}
#endif

@ -1,31 +1,11 @@
#include "../precomp.hpp"
#include "layers_common.hpp"
#include <iostream>
#include "fully_connected_layer.hpp"
namespace cv
{
namespace dnn
{
class FullyConnectedLayer : public Layer
{
bool bias;
int numOutputs;
int axis_, axis;
int innerSize;
void reshape(const Blob &inp, Blob &out);
public:
FullyConnectedLayer(LayerParams &params);
void allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
void forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
};
REGISTER_LAYER_CLASS(InnerProduct, FullyConnectedLayer)
FullyConnectedLayer::FullyConnectedLayer(LayerParams &params)
{
numOutputs = params.get<int>("num_output");

@ -0,0 +1,26 @@
#ifndef __OPENCV_DNN_LAYERS_FULLY_CONNECTED_LAYER_HPP__
#define __OPENCV_DNN_LAYERS_FULLY_CONNECTED_LAYER_HPP__
#include "../precomp.hpp"
namespace cv
{
namespace dnn
{
class FullyConnectedLayer : public Layer
{
bool bias;
int numOutputs;
int axis_, axis;
int innerSize;
void reshape(const Blob &inp, Blob &out);
public:
FullyConnectedLayer(LayerParams &params);
void allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
void forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
};
}
}
#endif

@ -1,5 +1,6 @@
#include "../precomp.hpp"
#include "layers_common.hpp"
#include "lrn_layer.hpp"
#include <opencv2/imgproc.hpp>
#include <algorithm>
@ -7,34 +8,6 @@ namespace cv
{
namespace dnn
{
class LRNLayer : public Layer
{
enum
{
CHANNEL_NRM,
SPATIAL_NRM,
SPATIAL_CONTRAST_NRM //cuda-convnet feature
} type;
int size;
double alpha, beta;
Blob bufBlob;
void channelNoramlization(Blob &src, Blob &dst);
void spatialNormalization(Blob &src, Blob &dst);
public:
LRNLayer(LayerParams &params);
void allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
void forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
};
REGISTER_LAYER_CLASS(LRN, LRNLayer)
LRNLayer::LRNLayer(LayerParams &params)
{
String nrmType = params.get<String>("norm_region", "ACROSS_CHANNELS");

@ -0,0 +1,34 @@
#ifndef __OPENCV_DNN_LAYERS_LRN_LAYER_HPP__
#define __OPENCV_DNN_LAYERS_LRN_LAYER_HPP__
#include "../precomp.hpp"
namespace cv
{
namespace dnn
{
class LRNLayer : public Layer
{
enum
{
CHANNEL_NRM,
SPATIAL_NRM,
SPATIAL_CONTRAST_NRM //cuda-convnet feature
} type;
int size;
double alpha, beta;
Blob bufBlob;
void channelNoramlization(Blob &src, Blob &dst);
void spatialNormalization(Blob &src, Blob &dst);
public:
LRNLayer(LayerParams &params);
void allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
void forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
};
}
}
#endif

@ -1,27 +1,12 @@
#include "../precomp.hpp"
#include "layers_common.hpp"
#include "mvn_layer.hpp"
namespace cv
{
namespace dnn
{
class MVNLayer : public Layer
{
double eps;
bool acrossChannels, normalizeVariance;
public:
MVNLayer(LayerParams &params);
void allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
void forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
};
REGISTER_LAYER_CLASS(MVN, MVNLayer)
MVNLayer::MVNLayer(LayerParams &params)
{
eps = params.get<double>("eps", 1e-9);

@ -0,0 +1,24 @@
#ifndef __OPENCV_DNN_LAYERS_MVN_LAYER_HPP__
#define __OPENCV_DNN_LAYERS_MVN_LAYER_HPP__
#include "../precomp.hpp"
namespace cv
{
namespace dnn
{
class MVNLayer : public Layer
{
double eps;
bool acrossChannels, normalizeVariance;
public:
MVNLayer(LayerParams &params);
void allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
void forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
};
}
}
#endif

@ -1,5 +1,6 @@
#include "../precomp.hpp"
#include "layers_common.hpp"
#include "pooling_layer.hpp"
#include <float.h>
#include <algorithm>
using std::max;
@ -10,36 +11,6 @@ namespace cv
namespace dnn
{
//TODO: add ceil_mode param
class PoolingLayer : public Layer
{
enum
{
MAX,
AVE,
STOCHASTIC
};
int type;
int padH, padW;
int strideH, strideW;
int kernelH, kernelW;
int inpH, inpW;
int outH, outW;
void computeOutputShape(int inpH, int inpW);
void maxPooling(Blob &input, Blob &output);
void avePooling(Blob &input, Blob &output);
public:
PoolingLayer(LayerParams &params);
void allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
void forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
};
REGISTER_LAYER_CLASS(Pooling, PoolingLayer)
PoolingLayer::PoolingLayer(LayerParams &params)
{

@ -0,0 +1,37 @@
#ifndef __OPENCV_DNN_LAYERS_POOLING_LAYER_HPP__
#define __OPENCV_DNN_LAYERS_POOLING_LAYER_HPP__
#include "../precomp.hpp"
namespace cv
{
namespace dnn
{
class PoolingLayer : public Layer
{
enum
{
MAX,
AVE,
STOCHASTIC
};
int type;
int padH, padW;
int strideH, strideW;
int kernelH, kernelW;
int inpH, inpW;
int outH, outW;
void computeOutputShape(int inpH, int inpW);
void maxPooling(Blob &input, Blob &output);
void avePooling(Blob &input, Blob &output);
public:
PoolingLayer(LayerParams &params);
void allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
void forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
};
}
}
#endif

@ -1,28 +1,12 @@
#include "../precomp.hpp"
#include "layers_common.hpp"
#include "reshape_layer.hpp"
namespace cv
{
namespace dnn
{
//TODO: Extend cv::Mat::reshape method
class ReshapeLayer : public Layer
{
public:
ReshapeLayer(LayerParams &params);
void allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
void forward(std::vector<Blob*>&, std::vector<Blob>&) {}
protected:
BlobShape shapeDesc;
int inAxis, inNumAxes, autoAxisIdx;
void computeOutputShape(int startAxis, int endAxis, BlobShape &inpShape, BlobShape &outShape);
};
ReshapeLayer::ReshapeLayer(LayerParams &params)
{
inAxis = params.get<int>("axis", 0);
@ -137,10 +121,5 @@ Ptr<Layer> createFlattenLayer(LayerParams&)
return Ptr<Layer>(new ReshapeLayer(params));
}
REGISTER_LAYER_CLASS(Reshape, ReshapeLayer)
REGISTER_LAYER_FUNC(Flatten, createFlattenLayer)
}
}

@ -0,0 +1,30 @@
#ifndef __OPENCV_DNN_LAYERS_RESHAPE_LAYER_HPP__
#define __OPENCV_DNN_LAYERS_RESHAPE_LAYER_HPP__
#include "../precomp.hpp"
namespace cv
{
namespace dnn
{
class ReshapeLayer : public Layer
{
public:
ReshapeLayer(LayerParams &params);
void allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
void forward(std::vector<Blob*>&, std::vector<Blob>&) {}
protected:
BlobShape shapeDesc;
int inAxis, inNumAxes, autoAxisIdx;
void computeOutputShape(int startAxis, int endAxis, BlobShape &inpShape, BlobShape &outShape);
};
Ptr<Layer> createFlattenLayer(LayerParams&);
}
}
#endif

@ -1,29 +1,12 @@
#include "../precomp.hpp"
#include "layers_common.hpp"
#include "slice_layer.hpp"
namespace cv
{
namespace dnn
{
class SliceLayer : public Layer
{
public:
SliceLayer(LayerParams &params);
void allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
void forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
private:
int inAxis;
std::vector<int> slicePoints;
};
REGISTER_LAYER_CLASS(Slice, SliceLayer)
SliceLayer::SliceLayer(LayerParams &params)
{
inAxis = params.get<int>("axis", 1);

@ -0,0 +1,26 @@
#ifndef __OPENCV_DNN_LAYERS_SLICE_LAYER_HPP__
#define __OPENCV_DNN_LAYERS_SLICE_LAYER_HPP__
#include "../precomp.hpp"
namespace cv
{
namespace dnn
{
class SliceLayer : public Layer
{
public:
SliceLayer(LayerParams &params);
void allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
void forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
private:
int inAxis;
std::vector<int> slicePoints;
};
}
}
#endif

@ -1,5 +1,6 @@
#include "../precomp.hpp"
#include "layers_common.hpp"
#include "softmax_layer.hpp"
#include <algorithm>
#include <stdlib.h>
using std::max;
@ -9,21 +10,6 @@ namespace cv
namespace dnn
{
//TODO: set default axis number to 1, and add custom shape length in FullyConnected
class SoftMaxLayer : public Layer
{
int axis_, axis;
Blob maxAggregator;
public:
SoftMaxLayer(LayerParams &params);
void allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
void forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
};
REGISTER_LAYER_CLASS(Softmax, SoftMaxLayer);
SoftMaxLayer::SoftMaxLayer(LayerParams &params)
{
//hotfix!!!

@ -0,0 +1,21 @@
#ifndef __OPENCV_DNN_LAYERS_SOFTMAX_LAYER_HPP__
#define __OPENCV_DNN_LAYERS_SOFTMAX_LAYER_HPP__
#include "../precomp.hpp"
namespace cv
{
namespace dnn
{
class SoftMaxLayer : public Layer
{
int axis_, axis;
Blob maxAggregator;
public:
SoftMaxLayer(LayerParams &params);
void allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
void forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
};
}
}
#endif

@ -1,5 +1,6 @@
#include "../precomp.hpp"
#include "layers_common.hpp"
#include "split_layer.hpp"
namespace cv
{
@ -7,23 +8,6 @@ namespace dnn
{
//TODO: maybe "top_count" param is useless because it can be determined by output connections number?
class SplitLayer : public Layer
{
public:
SplitLayer(LayerParams &params);
void allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
void forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
private:
int outputsNum;
};
REGISTER_LAYER_CLASS(Split, SplitLayer)
SplitLayer::SplitLayer(LayerParams &params)
{
if (params.has("top_count"))

@ -0,0 +1,25 @@
#ifndef __OPENCV_DNN_LAYERS_SPLIT_LAYER_HPP__
#define __OPENCV_DNN_LAYERS_SPLIT_LAYER_HPP__
#include "../precomp.hpp"
namespace cv
{
namespace dnn
{
class SplitLayer : public Layer
{
public:
SplitLayer(LayerParams &params);
void allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
void forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
private:
int outputsNum;
};
}
}
#endif
Loading…
Cancel
Save