More refactoring over Blob.

More refactoring over Blob.
Fix warnings and eliminated zlib dependency in cnpy.h
pull/265/head
Vitaliy Lyudvichenko 10 years ago
parent 0ebe30a362
commit d02bced118
  1. 204
      modules/dnn/include/opencv2/dnn/blob.hpp
  2. 260
      modules/dnn/include/opencv2/dnn/blob.inl.hpp
  3. 75
      modules/dnn/src/blob.cpp
  4. 36
      modules/dnn/src/caffe_importer.cpp
  5. 5
      modules/dnn/src/dnn.cpp
  6. 4
      modules/dnn/src/layers/convolution_layer.cpp
  7. 26
      modules/dnn/src/layers/fully_connected_layer.cpp
  8. 2
      modules/dnn/src/layers/pooling_layer.cpp
  9. 10
      modules/dnn/src/layers/softmax_layer.cpp
  10. 12
      modules/dnn/test/cnpy.h
  11. 13
      modules/dnn/test/npy_blob.hpp
  12. 3
      modules/dnn/test/test_alexnet.cpp

@ -2,11 +2,47 @@
#define __OPENCV_DNN_DNN_BLOB_HPP__ #define __OPENCV_DNN_DNN_BLOB_HPP__
#include <opencv2/core.hpp> #include <opencv2/core.hpp>
#include <vector> #include <vector>
#include <ostream>
namespace cv namespace cv
{ {
namespace dnn namespace dnn
{ {
struct BlobShape
{
explicit BlobShape(int ndims, int fill = 1);
BlobShape(int num, int cn, int rows, int cols);
BlobShape(int ndims, const int *sizes);
BlobShape(const std::vector<int> &sizes);
template<int n>
BlobShape(const Vec<int, n> &shape);
int dims() const;
int size(int axis) const;
int &size(int axis);
//do the same as size()
int operator[](int axis) const;
int &operator[](int axis);
int xsize(int axis) const;
const int *ptr() const;
bool equal(const BlobShape &other) const;
private:
BlobShape();
cv::AutoBuffer<int,4> sz;
};
bool operator== (const BlobShape &l, const BlobShape &r);
//maybe useless
CV_EXPORTS std::ostream &operator<< (std::ostream &stream, const BlobShape &shape);
/** @brief provides convenient methods for continuous n-dimensional array processing, dedicated for convolution neural networks /** @brief provides convenient methods for continuous n-dimensional array processing, dedicated for convolution neural networks
It's realized as wrapper over \ref cv::Mat and \ref cv::UMat and will support methods for CPU/GPU switching It's realized as wrapper over \ref cv::Mat and \ref cv::UMat and will support methods for CPU/GPU switching
*/ */
@ -16,16 +52,13 @@ namespace dnn
explicit Blob(); explicit Blob();
explicit Blob(InputArray in); explicit Blob(InputArray in);
void create(int ndims, const int *sizes, int type = CV_32F); void create(const BlobShape &shape, int type = CV_32F);
void create(Vec4i shape, int type = CV_32F);
void create(int num, int cn, int rows, int cols, int type = CV_32F);
void fill(InputArray in); void fill(InputArray in);
void fill(int ndims, const int *sizes, int type, void *data, bool deepCopy = true); void fill(const BlobShape &shape, int type, void *data, bool deepCopy = true);
Mat& getMatRef(); Mat& getMatRef();
const Mat& getMatRef() const; const Mat& getMatRef() const;
Mat getMat();
Mat getMat(int n, int cn); Mat getMat(int n, int cn);
//shape getters //shape getters
@ -37,14 +70,14 @@ namespace dnn
Python-like indexing is supported, so \p axis can be negative, i. e. -1 is last dimension. Python-like indexing is supported, so \p axis can be negative, i. e. -1 is last dimension.
Supposed that size of non-existing dimensions equal to 1, so the method always finished. Supposed that size of non-existing dimensions equal to 1, so the method always finished.
*/ */
int size(int axis) const; int xsize(int axis) const;
/** @brief returns size of corresponding dimension (axis) /** @brief returns size of corresponding dimension (axis)
@param axis dimension index @param axis dimension index
Python-like indexing is supported, so \p axis can be negative, i. e. -1 is last dimension. Python-like indexing is supported, so \p axis can be negative, i. e. -1 is last dimension.
@note Unlike ::size, if \p axis points to non-existing dimension then an error will be generated. @note Unlike ::xsize, if \p axis points to non-existing dimension then an error will be generated.
*/ */
int sizeAt(int axis) const; int size(int axis) const;
/** @brief returns number of elements /** @brief returns number of elements
@param startAxis starting axis (inverse indexing can be used) @param startAxis starting axis (inverse indexing can be used)
@ -59,7 +92,9 @@ namespace dnn
/** @brief returns real shape of the blob /** @brief returns real shape of the blob
*/ */
std::vector<int> shape() const; BlobShape shape() const;
bool equalShape(const Blob &other) const;
//shape getters, oriented for 4-dim Blobs processing //shape getters, oriented for 4-dim Blobs processing
int cols() const; int cols() const;
@ -85,156 +120,9 @@ namespace dnn
Mat m; Mat m;
}; };
//////////////////////////////////////////////////////////////////////////
inline int Blob::canonicalAxis(int axis) const
{
CV_Assert(-dims() <= axis && axis < dims());
if (axis < 0)
{
return dims() + axis;
}
return axis;
}
inline int Blob::size(int axis) const
{
if (axis < 0)
axis += dims();
if (axis < 0 || axis >= dims())
return 1;
return sizes()[axis];
}
inline int Blob::sizeAt(int axis) const
{
CV_Assert(-dims() <= axis && axis < dims());
if (axis < 0)
axis += dims();
return sizes()[axis];
}
inline size_t Blob::total(int startAxis, int endAxis) const
{
startAxis = canonicalAxis(startAxis);
if (endAxis == -1)
endAxis = dims();
CV_Assert(startAxis <= endAxis && endAxis <= dims());
size_t size = 1; //assume that blob isn't empty
for (int i = startAxis; i < endAxis; i++)
size *= (size_t)sizes()[i];
return size;
}
inline int Blob::offset(int n, int cn, int row, int col) const
{
CV_DbgAssert(0 <= n && n < num() && 0 <= cn && cn < channels() && 0 <= row && row < rows() && 0 <= col && col < cols());
return ((n*channels() + cn)*rows() + row)*cols() + col;
}
inline float *Blob::ptrf(int n, int cn, int row, int col)
{
CV_Assert(type() == CV_32F);
return (float*)m.data + offset(n, cn, row, col);
}
inline uchar *Blob::ptrRaw(int n, int cn, int row, int col)
{
return m.data + m.elemSize() * offset(n, cn, row, col);
}
template<typename TFloat>
inline TFloat* Blob::ptr(int n, int cn, int row, int col)
{
CV_Assert(type() == cv::DataDepth<TFloat>::value);
return (TFloat*) ptrRaw(n, cn, row, col);
}
inline std::vector<int> Blob::shape() const
{
return std::vector<int>(sizes(), sizes() + dims());
}
inline Mat& Blob::getMatRef()
{
return m;
}
inline const Mat& Blob::getMatRef() const
{
return m;
}
inline Mat Blob::getMat()
{
return m;
}
inline Mat Blob::getMat(int n, int cn)
{
return Mat(rows(), cols(), m.type(), this->ptrRaw(n, cn));
}
inline int Blob::cols() const
{
return size(-1);
}
inline int Blob::rows() const
{
return size(-2);
}
inline Size Blob::size2() const
{
return Size(cols(), rows());
}
inline int Blob::channels() const
{
return size(-3);
}
inline int Blob::num() const
{
return size(-4);
}
inline int Blob::type() const
{
return m.depth();
}
inline bool Blob::isFloat() const
{
return (type() == CV_32F);
}
inline bool Blob::isDouble() const
{
return (type() == CV_32F);
}
inline const int * Blob::sizes() const
{
return &m.size[0];
}
inline int Blob::dims() const
{
return m.dims;
}
} }
} }
#include "blob.inl.hpp"
#endif #endif

@ -0,0 +1,260 @@
#ifndef __OPENCV_DNN_DNN_BLOB_INL_HPP__
#define __OPENCV_DNN_DNN_BLOB_INL_HPP__
#include "blob.hpp"
namespace cv
{
namespace dnn
{
inline BlobShape::BlobShape(int ndims, int fill) : sz( (size_t)std::max(ndims, 1) )
{
for (int i = 0; i < ndims; i++)
sz[i] = fill;
}
inline BlobShape::BlobShape(int ndims, const int *sizes) : sz( (size_t)std::max(ndims, 1) )
{
CV_Assert(ndims > 0);
for (int i = 0; i < ndims; i++)
sz[i] = sizes[i];
}
inline BlobShape::BlobShape(int num, int cn, int rows, int cols) : sz(4)
{
sz[0] = num;
sz[1] = cn;
sz[2] = rows;
sz[3] = cols;
}
inline BlobShape::BlobShape(const std::vector<int> &sizes) : sz( sizes.size() )
{
CV_Assert(sizes.size() > 0);
for (int i = 0; i < (int)sizes.size(); i++)
sz[i] = sizes[i];
}
template<int n>
inline BlobShape::BlobShape(const Vec<int, n> &shape) : sz(n)
{
for (int i = 0; i < n; i++)
sz[i] = shape[i];
}
inline int BlobShape::dims() const
{
return (int)sz.size();
}
inline int BlobShape::xsize(int axis) const
{
if (axis < -dims() || axis >= dims())
return 1;
return sz[(axis < 0) ? axis + dims() : axis];
}
inline int BlobShape::size(int axis) const
{
CV_Assert(-dims() <= axis && axis < dims());
return sz[(axis < 0) ? axis + dims() : axis];
}
inline int &BlobShape::size(int axis)
{
CV_Assert(-dims() <= axis && axis < dims());
return sz[(axis < 0) ? axis + dims() : axis];
}
inline int BlobShape::operator[] (int axis) const
{
CV_Assert(-dims() <= axis && axis < dims());
return sz[(axis < 0) ? axis + dims() : axis];
}
inline int &BlobShape::operator[] (int axis)
{
CV_Assert(-dims() <= axis && axis < dims());
return sz[(axis < 0) ? axis + dims() : axis];
}
inline const int *BlobShape::ptr() const
{
return sz;
}
inline bool BlobShape::equal(const BlobShape &other) const
{
if (this->dims() != other.dims())
return false;
for (int i = 0; i < other.dims(); i++)
{
if (sz[i] != other.sz[i])
return false;
}
return true;
}
inline bool operator== (const BlobShape &l, const BlobShape &r)
{
return l.equal(r);
}
inline int Blob::canonicalAxis(int axis) const
{
CV_Assert(-dims() <= axis && axis < dims());
if (axis < 0)
{
return dims() + axis;
}
return axis;
}
inline int Blob::dims() const
{
return m.dims;
}
inline int Blob::xsize(int axis) const
{
if (axis < -dims() || axis >= dims())
return 1;
return sizes()[(axis < 0) ? axis + dims() : axis];
}
inline int Blob::size(int axis) const
{
CV_Assert(-dims() <= axis && axis < dims());
return sizes()[(axis < 0) ? axis + dims() : axis];
}
inline size_t Blob::total(int startAxis, int endAxis) const
{
if (startAxis < 0)
startAxis += dims();
if (endAxis == -1)
endAxis = dims();
CV_Assert(0 <= startAxis && startAxis <= endAxis && endAxis <= dims());
size_t size = 1; //assume that blob isn't empty
for (int i = startAxis; i < endAxis; i++)
size *= (size_t)sizes()[i];
return size;
}
inline int Blob::offset(int n, int cn, int row, int col) const
{
CV_DbgAssert(0 <= n && n < num() && 0 <= cn && cn < channels() && 0 <= row && row < rows() && 0 <= col && col < cols());
return ((n*channels() + cn)*rows() + row)*cols() + col;
}
inline float *Blob::ptrf(int n, int cn, int row, int col)
{
CV_Assert(type() == CV_32F);
return (float*)m.data + offset(n, cn, row, col);
}
inline uchar *Blob::ptrRaw(int n, int cn, int row, int col)
{
return m.data + m.elemSize() * offset(n, cn, row, col);
}
template<typename TFloat>
inline TFloat* Blob::ptr(int n, int cn, int row, int col)
{
CV_Assert(type() == cv::DataDepth<TFloat>::value);
return (TFloat*) ptrRaw(n, cn, row, col);
}
inline BlobShape Blob::shape() const
{
return BlobShape(dims(), sizes());
}
inline bool Blob::equalShape(const Blob &other) const
{
if (this->dims() != other.dims())
return false;
for (int i = 0; i < dims(); i++)
{
if (this->sizes()[i] != other.sizes()[i])
return false;
}
return true;
}
inline Mat& Blob::getMatRef()
{
return m;
}
inline const Mat& Blob::getMatRef() const
{
return m;
}
inline Mat Blob::getMat(int n, int cn)
{
return Mat(rows(), cols(), m.type(), this->ptrRaw(n, cn));
}
inline int Blob::cols() const
{
return xsize(3);
}
inline int Blob::rows() const
{
return xsize(2);
}
inline int Blob::channels() const
{
return xsize(1);
}
inline int Blob::num() const
{
return xsize(0);
}
inline Size Blob::size2() const
{
return Size(cols(), rows());
}
inline int Blob::type() const
{
return m.depth();
}
inline bool Blob::isFloat() const
{
return (type() == CV_32F);
}
inline bool Blob::isDouble() const
{
return (type() == CV_32F);
}
inline const int * Blob::sizes() const
{
return &m.size[0];
}
}
}
#endif

@ -26,8 +26,7 @@ namespace dnn
int type = mat.type(); int type = mat.type();
int dstType = CV_MAKE_TYPE(CV_MAT_DEPTH(type), 1); int dstType = CV_MAKE_TYPE(CV_MAT_DEPTH(type), 1);
int size[3] = { cn, rows, cols }; this->create(BlobShape(1, cn, rows, cols), dstType);
this->create(3, size, dstType);
uchar *data = m.data; uchar *data = m.data;
int step = rows * cols * CV_ELEM_SIZE(dstType); int step = rows * cols * CV_ELEM_SIZE(dstType);
@ -54,45 +53,18 @@ namespace dnn
} }
} }
inline void squeezeShape_(const int srcDims, const int *srcSizes, const int dstDims, int *dstSizes) void Blob::fill(const BlobShape &shape, int type, void *data, bool deepCopy)
{
const int m = std::min(dstDims, srcDims);
//copy common(last) dimensions
for (int i = 0; i < m; i++)
dstSizes[dstDims - 1 - i] = srcSizes[srcDims - 1 - i];
//either flatten extra dimensions
for (int i = m; i < srcDims; i++)
dstSizes[0] *= srcSizes[srcDims - 1 - i];
//either fill gaps
for (int i = m; i < dstDims; i++)
dstSizes[dstDims - 1 - i] = 1;
}
static Vec4i squeezeShape4(const int ndims, const int *sizes)
{
Vec4i res;
squeezeShape_(ndims, sizes, 4, &res[0]);
return res;
}
void Blob::fill(int ndims, const int *sizes, int type, void *data, bool deepCopy)
{ {
CV_Assert(type == CV_32F || type == CV_64F); CV_Assert(type == CV_32F || type == CV_64F);
Vec4i shape = squeezeShape4(ndims, sizes);
if (deepCopy) if (deepCopy)
{ {
m.create(4, &shape[0], type); m.create(shape.dims(), shape.ptr(), type);
size_t dataSize = m.total() * m.elemSize(); memcpy(m.data, data, m.total() * m.elemSize());
memcpy(m.data, data, dataSize);
} }
else else
{ {
m = Mat(shape.channels, &shape[0], type, data); m = Mat(shape.dims(), shape.ptr(), type, data);
} }
} }
@ -104,29 +76,44 @@ namespace dnn
*this = Blob(in); *this = Blob(in);
} }
void Blob::create(int ndims, const int *sizes, int type) void Blob::create(const BlobShape &shape, int type)
{ {
CV_Assert(type == CV_32F || type == CV_64F); CV_Assert(type == CV_32F || type == CV_64F);
Vec4i shape = squeezeShape4(ndims, sizes); m.create(shape.dims(), shape.ptr(), type);
m.create(shape.channels, &shape[0], type);
} }
void Blob::create(Vec4i shape, int type) inline void squeezeShape(const int srcDims, const int *srcSizes, const int dstDims, int *dstSizes)
{ {
m.create(shape.channels, &shape[0], type); const int m = std::min(dstDims, srcDims);
}
void Blob::create(int num, int cn, int rows, int cols, int type) //copy common(last) dimensions
{ for (int i = 0; i < m; i++)
Vec4i shape(num, cn, rows, cols); dstSizes[dstDims - 1 - i] = srcSizes[srcDims - 1 - i];
create(4, &shape[0], type);
//either flatten extra dimensions
for (int i = m; i < srcDims; i++)
dstSizes[0] *= srcSizes[srcDims - 1 - i];
//either fill gaps
for (int i = m; i < dstDims; i++)
dstSizes[dstDims - 1 - i] = 1;
} }
Vec4i Blob::shape4() const Vec4i Blob::shape4() const
{ {
return squeezeShape4(dims(), sizes()); return Vec4i(num(), channels(), rows(), cols());
} }
std::ostream &operator<< (std::ostream &stream, const BlobShape &shape)
{
stream << "[";
for (int i = 0; i < shape.dims() - 1; i++)
stream << shape[i] << ", ";
if (shape.dims() > 0)
stream << shape[-1];
return stream << "]";
}
} }
} }

@ -128,39 +128,41 @@ namespace
} }
} }
void blobFromProto(const caffe::BlobProto &protoBlob, cv::dnn::Blob &dstBlob) BlobShape blobShapeFromProto(const caffe::BlobProto &pbBlob)
{ {
AutoBuffer<int, 4> shape; if (pbBlob.has_num() || pbBlob.has_channels() || pbBlob.has_height() || pbBlob.has_width())
if (protoBlob.has_num() || protoBlob.has_channels() || protoBlob.has_height() || protoBlob.has_width())
{ {
shape.resize(4); return BlobShape(pbBlob.num(), pbBlob.channels(), pbBlob.height(), pbBlob.width());
shape[0] = protoBlob.num();
shape[1] = protoBlob.channels();
shape[2] = protoBlob.height();
shape[3] = protoBlob.width();
} }
else if (protoBlob.has_shape()) else if (pbBlob.has_shape())
{ {
const caffe::BlobShape &_shape = protoBlob.shape(); const caffe::BlobShape &_shape = pbBlob.shape();
shape.resize(_shape.dim_size()); BlobShape shape(_shape.dim_size());
for (int i = 0; i < _shape.dim_size(); i++) for (int i = 0; i < _shape.dim_size(); i++)
shape[i] = _shape.dim(i); shape[i] = _shape.dim(i);
return shape;
} }
else else
{ {
CV_Error(cv::Error::StsAssert, "Unknown shape of input blob"); CV_Error(cv::Error::StsAssert, "Unknown shape of input blob");
return BlobShape(-1);
}
} }
dstBlob.create(shape.size(), shape, CV_32F); void blobFromProto(const caffe::BlobProto &pbBlob, cv::dnn::Blob &dstBlob)
CV_Assert(protoBlob.data_size() == (int)dstBlob.getMatRef().total()); {
BlobShape shape = blobShapeFromProto(pbBlob);
dstBlob.create(shape, CV_32F);
CV_Assert(pbBlob.data_size() == (int)dstBlob.getMatRef().total());
CV_DbgAssert(protoBlob.GetDescriptor()->FindFieldByLowercaseName("data")->cpp_type() == FieldDescriptor::CPPTYPE_FLOAT); CV_DbgAssert(pbBlob.GetDescriptor()->FindFieldByLowercaseName("data")->cpp_type() == FieldDescriptor::CPPTYPE_FLOAT);
float *dstData = dstBlob.getMatRef().ptr<float>(); float *dstData = dstBlob.getMatRef().ptr<float>();
for (int i = 0; i < protoBlob.data_size(); i++) for (int i = 0; i < pbBlob.data_size(); i++)
dstData[i] = protoBlob.data(i); dstData[i] = pbBlob.data(i);
} }
void extractBinaryLayerParms(const caffe::LayerParameter& layer, LayerParams& layerParams) void extractBinaryLayerParms(const caffe::LayerParameter& layer, LayerParams& layerParams)

@ -309,9 +309,10 @@ struct Net::Impl
//forward itself //forward itself
if (ld.layerInstance && layerId != 0) if (ld.layerInstance && layerId != 0)
{
//std::cout << ld.name << " shape:" << ld.outputBlobs[0].shape4() << std::endl;
ld.layerInstance->forward(ld.inputBlobs, ld.outputBlobs); ld.layerInstance->forward(ld.inputBlobs, ld.outputBlobs);
}
//std::cout << ld.name << " shape:" << ld.outputBlobs[0].shape() << std::endl;
ld.flag = 1; ld.flag = 1;
} }

@ -73,9 +73,7 @@ namespace dnn
for (size_t i = 0; i < inputs.size(); i++) for (size_t i = 0; i < inputs.size(); i++)
{ {
CV_Assert(inputs[i]->rows() == inH && inputs[i]->cols() == inW && inputs[i]->channels() == inCn); CV_Assert(inputs[i]->rows() == inH && inputs[i]->cols() == inW && inputs[i]->channels() == inCn);
int num = inputs[i]->num(); outputs[i].create(BlobShape(inputs[i]->num(), numOutput, outH, outW));
outputs[i].create(num, numOutput, outH, outW);
} }
kerSize = kernelH * kernelW * groupCn; kerSize = kernelH * kernelW * groupCn;

@ -10,7 +10,7 @@ namespace dnn
{ {
bool bias; bool bias;
int numOutputs; int numOutputs;
int axis; int axis_, axis;
int innerSize; int innerSize;
@ -30,9 +30,8 @@ namespace dnn
{ {
numOutputs = params.get<int>("num_output"); numOutputs = params.get<int>("num_output");
bias = params.get<bool>("bias_term", true); bias = params.get<bool>("bias_term", true);
axis = params.get<int>("axis", 1); axis_ = params.get<int>("axis", 1);
CV_Assert(0 <= axis && axis < 4);
CV_Assert(params.learnedBlobs.size() >= 1); CV_Assert(params.learnedBlobs.size() >= 1);
CV_Assert(!bias || (params.learnedBlobs.size() >= 2 && (int)params.learnedBlobs[1].total() == numOutputs)); CV_Assert(!bias || (params.learnedBlobs.size() >= 2 && (int)params.learnedBlobs[1].total() == numOutputs));
@ -48,7 +47,9 @@ namespace dnn
{ {
CV_Assert(inputs.size() > 0); CV_Assert(inputs.size() > 0);
axis = inputs[0]->canonicalAxis(axis_);
innerSize = (int)inputs[0]->total(axis); innerSize = (int)inputs[0]->total(axis);
CV_Assert((size_t)innerSize * (size_t)numOutputs == learnedParams[0].total()); CV_Assert((size_t)innerSize * (size_t)numOutputs == learnedParams[0].total());
CV_Assert(learnedParams[0].rows() == numOutputs && learnedParams[0].cols() == innerSize); CV_Assert(learnedParams[0].rows() == numOutputs && learnedParams[0].cols() == innerSize);
@ -56,7 +57,7 @@ namespace dnn
for (size_t i = 0; i < inputs.size(); i++) for (size_t i = 0; i < inputs.size(); i++)
{ {
if (i != 0) if (i != 0)
CV_Assert(inputs[i]->total(axis) == (size_t)innerSize); CV_Assert(inputs[i]->equalShape(*inputs[0]));
this->reshape(*inputs[i], outputs[i]); this->reshape(*inputs[i], outputs[i]);
} }
@ -64,12 +65,9 @@ namespace dnn
void FullyConnectedLayer::reshape(const Blob &inp, Blob &out) void FullyConnectedLayer::reshape(const Blob &inp, Blob &out)
{ {
Vec4i inpShape = inp.shape4(); BlobShape inpShape = inp.shape();
Vec4i outShape = Vec4i::all(1); BlobShape outShape(axis+1, inpShape.ptr());
outShape[axis] = numOutputs;
for (int a = 0; a < axis; a++)
outShape[a] = inpShape[a];
outShape[3] = numOutputs;
out.create(outShape, inp.type()); out.create(outShape, inp.type());
} }
@ -82,12 +80,12 @@ namespace dnn
int N = numOutputs; int N = numOutputs;
int K = innerSize; int K = innerSize;
Mat srcMat(M, K, CV_32F, inputs[i]->ptrf()); Mat srcMat(M, K, inputs[i]->type(), inputs[i]->ptrf());
Mat weights(N, K, CV_32F, learnedParams[0].ptrf()); Mat weight(N, K, learnedParams[0].type(), learnedParams[0].ptrf());
Mat dstMat(M, N, CV_32F, outputs[i].ptrf()); Mat dstMat(M, N, outputs[i].type(), outputs[i].ptrf());
//important: Caffe stores weights as transposed array //important: Caffe stores weights as transposed array
cv::gemm(srcMat, weights, 1, noArray(), 0, dstMat, GEMM_2_T); cv::gemm(srcMat, weight, 1, noArray(), 0, dstMat, GEMM_2_T);
if (bias) if (bias)
{ {

@ -73,7 +73,7 @@ namespace dnn
for (size_t i = 0; i < inputs.size(); i++) for (size_t i = 0; i < inputs.size(); i++)
{ {
CV_Assert(inputs[i]->rows() == inH && inputs[i]->cols() == inW); CV_Assert(inputs[i]->rows() == inH && inputs[i]->cols() == inW);
outputs[i].create(inputs[i]->num(), inputs[i]->channels(), pooledH, pooledW); outputs[i].create(BlobShape(inputs[i]->num(), inputs[i]->channels(), pooledH, pooledW));
} }
} }

@ -11,7 +11,7 @@ namespace dnn
//TODO: set default axis number to 1, and add custom shape length in FullyConnected //TODO: set default axis number to 1, and add custom shape length in FullyConnected
class SoftMaxLayer : public Layer class SoftMaxLayer : public Layer
{ {
int axis; int axis_, axis;
Blob maxAggregator; Blob maxAggregator;
public: public:
@ -27,15 +27,15 @@ namespace dnn
SoftMaxLayer::SoftMaxLayer(LayerParams &params) SoftMaxLayer::SoftMaxLayer(LayerParams &params)
{ {
//hotfix!!! //hotfix!!!
axis = params.get<int>("axis", 3); axis_ = params.get<int>("axis", 1);
CV_Assert(0 <= axis && axis < 4);
} }
void SoftMaxLayer::allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs) void SoftMaxLayer::allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs)
{ {
CV_Assert(inputs.size() == 1); CV_Assert(inputs.size() == 1);
axis = inputs[0]->canonicalAxis(axis_);
Vec4i shape = inputs[0]->shape4(); BlobShape shape = inputs[0]->shape();
outputs.resize(1); outputs.resize(1);
outputs[0].create(shape); outputs[0].create(shape);
@ -87,7 +87,7 @@ namespace dnn
} }
} }
cv::exp(dst.getMat(), dst.getMat()); cv::exp(dst.getMatRef(), dst.getMatRef());
for (size_t outerDim = 0; outerDim < outerSize; outerDim++) for (size_t outerDim = 0; outerDim < outerSize; outerDim++)
{ {

@ -13,8 +13,10 @@
#include<typeinfo> #include<typeinfo>
#include<iostream> #include<iostream>
#include<cassert> #include<cassert>
//#include<zlib.h>
#include<map> #include<map>
#if defined(HAVE_ZLIB) && HAVE_ZLIB
#include<zlib.h>
#endif
namespace cnpy { namespace cnpy {
@ -57,7 +59,7 @@ namespace cnpy {
template<> std::vector<char>& operator+=(std::vector<char>& lhs, const char* rhs); template<> std::vector<char>& operator+=(std::vector<char>& lhs, const char* rhs);
template<typename T> std::string tostring(T i, int pad = 0, char padval = ' ') { template<typename T> std::string tostring(T i, int = 0, char = ' ') {
std::stringstream s; std::stringstream s;
s << i; s << i;
return s.str(); return s.str();
@ -152,8 +154,12 @@ namespace cnpy {
int nbytes = nels*sizeof(T) + npy_header.size(); int nbytes = nels*sizeof(T) + npy_header.size();
//get the CRC of the data to be added //get the CRC of the data to be added
#if defined(HAVE_ZLIB) && HAVE_ZLIB
unsigned int crc = crc32(0L,(unsigned char*)&npy_header[0],npy_header.size()); unsigned int crc = crc32(0L,(unsigned char*)&npy_header[0],npy_header.size());
crc = crc32(crc,(unsigned char*)data,nels*sizeof(T)); crc = crc32(crc,(unsigned char*)data,nels*sizeof(T));
#else
unsigned int crc = 0;
#endif
//build the local header //build the local header
std::vector<char> local_header; std::vector<char> local_header;
@ -204,7 +210,7 @@ namespace cnpy {
fclose(fp); fclose(fp);
} }
template<typename T> std::vector<char> create_npy_header(const T* data, const unsigned int* shape, const unsigned int ndims) { template<typename T> std::vector<char> create_npy_header(const T*, const unsigned int* shape, const unsigned int ndims) {
std::vector<char> dict; std::vector<char> dict;
dict += "{'descr': '"; dict += "{'descr': '";

@ -1,24 +1,15 @@
#ifndef __OPENCV_DNN_TEST_NPY_BLOB_HPP__ #ifndef __OPENCV_DNN_TEST_NPY_BLOB_HPP__
#define __OPENCV_DNN_TEST_NPY_BLOB_HPP__ #define __OPENCV_DNN_TEST_NPY_BLOB_HPP__
#include "test_precomp.hpp" #include "test_precomp.hpp"
#ifdef __GNUC__
# pragma GCC diagnostic ignored "-Wunused-parameter"
# pragma GCC diagnostic push
#endif
#include "cnpy.h" #include "cnpy.h"
#ifdef __GNUC__
# pragma GCC diagnostic pop
#endif
inline cv::dnn::Blob blobFromNPY(const cv::String &path) inline cv::dnn::Blob blobFromNPY(const cv::String &path)
{ {
cnpy::NpyArray npyBlob = cnpy::npy_load(path.c_str()); cnpy::NpyArray npyBlob = cnpy::npy_load(path.c_str());
cv::dnn::BlobShape shape((int)npyBlob.shape.size(), (int*)&npyBlob.shape[0]);
cv::dnn::Blob blob; cv::dnn::Blob blob;
blob.fill((int)npyBlob.shape.size(), (int*)&npyBlob.shape[0], CV_32F, npyBlob.data); blob.fill(shape, CV_32F, npyBlob.data);
npyBlob.destruct(); npyBlob.destruct();
return blob; return blob;

@ -29,8 +29,9 @@ inline void normAssert(InputArray ref, InputArray get, const char *comment = "")
EXPECT_LE(normInf, 0.001) << comment; EXPECT_LE(normInf, 0.001) << comment;
} }
inline void normAssert(Blob ref, Blob test, const char *comment = "") inline void normAssert(Blob &ref, Blob &test, const char *comment = "")
{ {
EXPECT_EQ(ref.shape(), test.shape());
normAssert(ref.getMatRef(), test.getMatRef(), comment); normAssert(ref.getMatRef(), test.getMatRef(), comment);
} }

Loading…
Cancel
Save