From 0ebe30a36230c00150b6a766966014b7399cc309 Mon Sep 17 00:00:00 2001 From: Vitaliy Lyudvichenko Date: Sat, 4 Jul 2015 22:41:55 +0300 Subject: [PATCH] Blob class was significantly refactored --- modules/dnn/include/opencv2/dnn/blob.hpp | 240 ++++++++++++++++++ modules/dnn/include/opencv2/dnn/dict.hpp | 4 +- modules/dnn/include/opencv2/dnn/dnn.hpp | 92 ++----- modules/dnn/include/opencv2/dnn/dnn.inl.hpp | 122 +-------- modules/dnn/src/blob.cpp | 132 ++++++++++ modules/dnn/src/caffe/glog_emulator.hpp | 4 +- modules/dnn/src/dnn.cpp | 110 -------- modules/dnn/src/layers/concat_layer.cpp | 12 +- modules/dnn/src/layers/convolution_layer.cpp | 8 +- modules/dnn/src/layers/elementwise_layers.cpp | 4 +- .../dnn/src/layers/fully_connected_layer.cpp | 10 +- modules/dnn/src/layers/lrn_layer.cpp | 4 +- modules/dnn/src/layers/pooling_layer.cpp | 8 +- modules/dnn/src/layers/softmax_layer.cpp | 8 +- modules/dnn/test/cnpy.h | 2 +- modules/dnn/test/npy_blob.hpp | 2 +- modules/dnn/test/test_layers.cpp | 2 +- 17 files changed, 431 insertions(+), 333 deletions(-) create mode 100644 modules/dnn/include/opencv2/dnn/blob.hpp create mode 100644 modules/dnn/src/blob.cpp diff --git a/modules/dnn/include/opencv2/dnn/blob.hpp b/modules/dnn/include/opencv2/dnn/blob.hpp new file mode 100644 index 000000000..b91114021 --- /dev/null +++ b/modules/dnn/include/opencv2/dnn/blob.hpp @@ -0,0 +1,240 @@ +#ifndef __OPENCV_DNN_DNN_BLOB_HPP__ +#define __OPENCV_DNN_DNN_BLOB_HPP__ +#include +#include + +namespace cv +{ +namespace dnn +{ + /** @brief provides convenient methods for continuous n-dimensional array processing, dedicated for convolution neural networks + It's realized as wrapper over \ref cv::Mat and \ref cv::UMat and will support methods for CPU/GPU switching + */ + class CV_EXPORTS Blob + { + public: + explicit Blob(); + explicit Blob(InputArray in); + + void create(int ndims, const int *sizes, int type = CV_32F); + void create(Vec4i shape, int type = CV_32F); + void create(int num, int cn, int rows, int cols, int type = CV_32F); + + void fill(InputArray in); + void fill(int ndims, const int *sizes, int type, void *data, bool deepCopy = true); + + Mat& getMatRef(); + const Mat& getMatRef() const; + Mat getMat(); + Mat getMat(int n, int cn); + + //shape getters + ///returns real count of blob dimensions + int dims() const; + + /** @brief returns size of corresponding dimension (axis) + @param axis dimension index + Python-like indexing is supported, so \p axis can be negative, i. e. -1 is last dimension. + Supposed that size of non-existing dimensions equal to 1, so the method always finished. + */ + int size(int axis) const; + + /** @brief returns size of corresponding dimension (axis) + @param axis dimension index + Python-like indexing is supported, so \p axis can be negative, i. e. -1 is last dimension. + @note Unlike ::size, if \p axis points to non-existing dimension then an error will be generated. + */ + int sizeAt(int axis) const; + + /** @brief returns number of elements + @param startAxis starting axis (inverse indexing can be used) + @param endAxis ending (excluded) axis + @see ::canonicalAxis + */ + size_t total(int startAxis = 0, int endAxis = -1) const; + + /** @brief converts axis index to canonical format (where 0 <= axis <= ::dims) + */ + int canonicalAxis(int axis) const; + + /** @brief returns real shape of the blob + */ + std::vector shape() const; + + //shape getters, oriented for 4-dim Blobs processing + int cols() const; + int rows() const; + int channels() const; + int num() const; + Size size2() const; + Vec4i shape4() const; + + //CPU data pointer functions + int offset(int n = 0, int cn = 0, int row = 0, int col = 0) const; + uchar *ptrRaw(int n = 0, int cn = 0, int row = 0, int col = 0); + float *ptrf(int n = 0, int cn = 0, int row = 0, int col = 0); + template + TFloat *ptr(int n = 0, int cn = 0, int row = 0, int col = 0); + + int type() const; + bool isFloat() const; + bool isDouble() const; + + private: + const int *sizes() const; + + Mat m; + }; + + ////////////////////////////////////////////////////////////////////////// + + inline int Blob::canonicalAxis(int axis) const + { + CV_Assert(-dims() <= axis && axis < dims()); + + if (axis < 0) + { + return dims() + axis; + } + return axis; + } + + inline int Blob::size(int axis) const + { + if (axis < 0) + axis += dims(); + + if (axis < 0 || axis >= dims()) + return 1; + + return sizes()[axis]; + } + + inline int Blob::sizeAt(int axis) const + { + CV_Assert(-dims() <= axis && axis < dims()); + + if (axis < 0) + axis += dims(); + + return sizes()[axis]; + } + + inline size_t Blob::total(int startAxis, int endAxis) const + { + startAxis = canonicalAxis(startAxis); + + if (endAxis == -1) + endAxis = dims(); + + CV_Assert(startAxis <= endAxis && endAxis <= dims()); + + size_t size = 1; //assume that blob isn't empty + for (int i = startAxis; i < endAxis; i++) + size *= (size_t)sizes()[i]; + + return size; + } + + inline int Blob::offset(int n, int cn, int row, int col) const + { + CV_DbgAssert(0 <= n && n < num() && 0 <= cn && cn < channels() && 0 <= row && row < rows() && 0 <= col && col < cols()); + return ((n*channels() + cn)*rows() + row)*cols() + col; + } + + inline float *Blob::ptrf(int n, int cn, int row, int col) + { + CV_Assert(type() == CV_32F); + return (float*)m.data + offset(n, cn, row, col); + } + + inline uchar *Blob::ptrRaw(int n, int cn, int row, int col) + { + return m.data + m.elemSize() * offset(n, cn, row, col); + } + + template + inline TFloat* Blob::ptr(int n, int cn, int row, int col) + { + CV_Assert(type() == cv::DataDepth::value); + return (TFloat*) ptrRaw(n, cn, row, col); + } + + inline std::vector Blob::shape() const + { + return std::vector(sizes(), sizes() + dims()); + } + + inline Mat& Blob::getMatRef() + { + return m; + } + + inline const Mat& Blob::getMatRef() const + { + return m; + } + + inline Mat Blob::getMat() + { + return m; + } + + inline Mat Blob::getMat(int n, int cn) + { + return Mat(rows(), cols(), m.type(), this->ptrRaw(n, cn)); + } + + inline int Blob::cols() const + { + return size(-1); + } + + inline int Blob::rows() const + { + return size(-2); + } + + inline Size Blob::size2() const + { + return Size(cols(), rows()); + } + + inline int Blob::channels() const + { + return size(-3); + } + + inline int Blob::num() const + { + return size(-4); + } + + inline int Blob::type() const + { + return m.depth(); + } + + inline bool Blob::isFloat() const + { + return (type() == CV_32F); + } + + inline bool Blob::isDouble() const + { + return (type() == CV_32F); + } + + inline const int * Blob::sizes() const + { + return &m.size[0]; + } + + inline int Blob::dims() const + { + return m.dims; + } +} +} + +#endif diff --git a/modules/dnn/include/opencv2/dnn/dict.hpp b/modules/dnn/include/opencv2/dnn/dict.hpp index e398a60b4..c98781ced 100644 --- a/modules/dnn/include/opencv2/dnn/dict.hpp +++ b/modules/dnn/include/opencv2/dnn/dict.hpp @@ -1,5 +1,5 @@ -#ifndef __OPENCV_DNN_DICT_HPP__ -#define __OPENCV_DNN_DICT_HPP__ +#ifndef __OPENCV_DNN_DNN_DICT_HPP__ +#define __OPENCV_DNN_DNN_DICT_HPP__ #include #include diff --git a/modules/dnn/include/opencv2/dnn/dnn.hpp b/modules/dnn/include/opencv2/dnn/dnn.hpp index daf167f57..e98dd3b7f 100644 --- a/modules/dnn/include/opencv2/dnn/dnn.hpp +++ b/modules/dnn/include/opencv2/dnn/dnn.hpp @@ -7,62 +7,12 @@ #include #include +#include namespace cv { namespace dnn { - class Layer; - class NetConfiguration; - class Net; - class Blob; - class LayerParams; - - //wrapper over cv::Mat and cv::UMat - class CV_EXPORTS Blob - { - public: - explicit Blob(); - explicit Blob(InputArray in); - - void create(int ndims, const int *sizes, int type = CV_32F); - void create(Vec4i shape, int type = CV_32F); - void create(int num, int cn, int rows, int cols, int type = CV_32F); - - void fill(InputArray in); - void fill(int ndims, const int *sizes, int type, void *data, bool deepCopy = true); - - Mat& getMatRef(); - const Mat& getMatRef() const; - Mat getMat(); - Mat getMat(int num, int channel); - - //shape getters - int cols() const; - int rows() const; - int channels() const; - int num() const; - Size size2() const; - Vec4i shape() const; - int size(int index) const; - size_t total(int startAxis = 0, int endAxis = -1) const; - - uchar *rawPtr(int num = 0, int cn = 0, int row = 0, int col = 0); - - template - TFloat *ptr(int num = 0, int cn = 0, int row = 0, int col = 0); - - int type() const; - bool isFloat() const; - bool isDouble() const; - - private: - const int *sizes() const; - int dims() const; - - Mat m; - }; - class CV_EXPORTS LayerParams : public Dict { public: @@ -70,26 +20,7 @@ namespace dnn std::vector learnedBlobs; }; - class CV_EXPORTS LayerRegister - { - public: - - typedef Ptr (*Constuctor)(LayerParams ¶ms); - - static void registerLayer(const String &type, Constuctor constructor); - - static void unregisterLayer(const String &type); - - static Ptr createLayerInstance(const String &type, LayerParams& params); - - private: - LayerRegister(); - - struct Impl; - static Ptr impl; - }; - - //this class allows to build new Layers + //Interface class allows to build new Layers class CV_EXPORTS Layer { public: @@ -166,6 +97,25 @@ namespace dnn CV_EXPORTS Ptr createCaffeImporter(const String &prototxt, const String &caffeModel); + //Layer factory allows to create instances of registered layers. + class CV_EXPORTS LayerRegister + { + public: + + typedef Ptr(*Constuctor)(LayerParams ¶ms); + + static void registerLayer(const String &type, Constuctor constructor); + + static void unregisterLayer(const String &type); + + static Ptr createLayerInstance(const String &type, LayerParams& params); + + private: + LayerRegister(); + + struct Impl; + static Ptr impl; + }; //allows automatically register created layer on module load time struct _LayerRegisterer diff --git a/modules/dnn/include/opencv2/dnn/dnn.inl.hpp b/modules/dnn/include/opencv2/dnn/dnn.inl.hpp index 7d477fe17..700141661 100644 --- a/modules/dnn/include/opencv2/dnn/dnn.inl.hpp +++ b/modules/dnn/include/opencv2/dnn/dnn.inl.hpp @@ -1,5 +1,5 @@ -#ifndef __OPENCV_DNN_INL_HPP__ -#define __OPENCV_DNN_INL_HPP__ +#ifndef __OPENCV_DNN_DNN_INL_HPP__ +#define __OPENCV_DNN_DNN_INL_HPP__ #include @@ -7,123 +7,7 @@ namespace cv { namespace dnn { - inline Mat& Blob::getMatRef() - { - return m; - } - - inline const Mat& Blob::getMatRef() const - { - return m; - } - - inline Mat Blob::getMat() - { - return m; - } - - inline Mat Blob::getMat(int num, int channel) - { - CV_Assert(0 <= num && num < this->num() && 0 <= channel && channel < this->channels()); - return Mat(rows(), cols(), m.type(), this->rawPtr(num, channel)); - } - - inline int Blob::cols() const - { - CV_DbgAssert(m.dims > 2); - return m.size[m.dims-1]; - } - - inline int Blob::rows() const - { - CV_DbgAssert(m.dims > 2); - return m.size[m.dims-2]; - } - - inline Size Blob::size2() const - { - return Size(cols(), rows()); - } - - inline int Blob::channels() const - { - CV_DbgAssert(m.dims >= 3); - return m.size[m.dims-3]; - } - - inline int Blob::num() const - { - CV_DbgAssert(m.dims == 4); - return m.size[0]; - } - - inline Vec4i Blob::shape() const - { - CV_DbgAssert(m.dims == 4); - return Vec4i(m.size.p); - } - - inline int Blob::size(int index) const - { - CV_Assert(index >= 0 && index < dims()); - return sizes()[index]; - } - - inline size_t Blob::total(int startAxis, int endAxis) const - { - if (endAxis == -1) - endAxis = dims(); - - CV_Assert(0 <= startAxis && startAxis <= endAxis && endAxis <= dims()); - - size_t size = 1; //assume that blob isn't empty - for (int i = startAxis; i < endAxis; i++) - size *= (size_t) sizes()[i]; - - return size; - } - - inline uchar* Blob::rawPtr(int num, int cn, int row, int col) - { - CV_DbgAssert(m.dims == 4); - return m.data + num * m.step[0] + cn * m.step[1] + row * m.step[2] + col * m.step[3]; - } - - template - TFloat *Blob::ptr(int n, int cn, int row, int col) - { - CV_Assert(m.type() == cv::DataType::type); - CV_Assert(0 <= n && n < num() && 0 <= cn && cn < channels() && 0 <= row && row < rows() && 0 <= col && col < cols()); - return (TFloat*) rawPtr(n, cn, row, col); - } - - inline int Blob::type() const - { - return m.depth(); - } - - inline bool Blob::isFloat() const - { - return (type() == CV_32F); - } - - inline bool Blob::isDouble() const - { - return (type() == CV_32F); - } - - inline const int * Blob::sizes() const - { - return &m.size[0]; - } - - inline int Blob::dims() const - { - return m.dims; - } - - - + //code is absent ... today } } diff --git a/modules/dnn/src/blob.cpp b/modules/dnn/src/blob.cpp new file mode 100644 index 000000000..ea16bb4d9 --- /dev/null +++ b/modules/dnn/src/blob.cpp @@ -0,0 +1,132 @@ +#include "precomp.hpp" + +namespace cv +{ +namespace dnn +{ + + Blob::Blob() + { + int zeros[4] = { 0, 0, 0, 0 }; + m = Mat(4, zeros, CV_32F, NULL); + } + + Blob::Blob(InputArray in) + { + CV_Assert(in.isMat() || in.isUMat()); + + if (in.isMat()) + { + Mat mat = in.getMat(); + + CV_Assert(mat.dims == 2); + int rows = mat.rows; + int cols = mat.cols; + int cn = mat.channels(); + int type = mat.type(); + int dstType = CV_MAKE_TYPE(CV_MAT_DEPTH(type), 1); + + int size[3] = { cn, rows, cols }; + this->create(3, size, dstType); + uchar *data = m.data; + int step = rows * cols * CV_ELEM_SIZE(dstType); + + if (cn == 1) + { + Mat wrapper2D(rows, cols, dstType, m.data); + mat.copyTo(wrapper2D); + } + else + { + std::vector wrappers(cn); + for (int i = 0; i < cn; i++) + { + wrappers[i] = Mat(rows, cols, dstType, data); + data += step; + } + + cv::split(mat, wrappers); + } + } + else + { + CV_Error(cv::Error::StsNotImplemented, "Not Implemented"); + } + } + + inline void squeezeShape_(const int srcDims, const int *srcSizes, const int dstDims, int *dstSizes) + { + const int m = std::min(dstDims, srcDims); + + //copy common(last) dimensions + for (int i = 0; i < m; i++) + dstSizes[dstDims - 1 - i] = srcSizes[srcDims - 1 - i]; + + //either flatten extra dimensions + for (int i = m; i < srcDims; i++) + dstSizes[0] *= srcSizes[srcDims - 1 - i]; + + //either fill gaps + for (int i = m; i < dstDims; i++) + dstSizes[dstDims - 1 - i] = 1; + } + + static Vec4i squeezeShape4(const int ndims, const int *sizes) + { + Vec4i res; + squeezeShape_(ndims, sizes, 4, &res[0]); + return res; + } + + void Blob::fill(int ndims, const int *sizes, int type, void *data, bool deepCopy) + { + CV_Assert(type == CV_32F || type == CV_64F); + + Vec4i shape = squeezeShape4(ndims, sizes); + + if (deepCopy) + { + m.create(4, &shape[0], type); + size_t dataSize = m.total() * m.elemSize(); + memcpy(m.data, data, dataSize); + } + else + { + m = Mat(shape.channels, &shape[0], type, data); + } + } + + void Blob::fill(InputArray in) + { + CV_Assert(in.isMat() || in.isMatVector()); + + //TODO + *this = Blob(in); + } + + void Blob::create(int ndims, const int *sizes, int type) + { + CV_Assert(type == CV_32F || type == CV_64F); + Vec4i shape = squeezeShape4(ndims, sizes); + m.create(shape.channels, &shape[0], type); + } + + void Blob::create(Vec4i shape, int type) + { + m.create(shape.channels, &shape[0], type); + } + + void Blob::create(int num, int cn, int rows, int cols, int type) + { + Vec4i shape(num, cn, rows, cols); + create(4, &shape[0], type); + } + + Vec4i Blob::shape4() const + { + return squeezeShape4(dims(), sizes()); + } + + +} +} \ No newline at end of file diff --git a/modules/dnn/src/caffe/glog_emulator.hpp b/modules/dnn/src/caffe/glog_emulator.hpp index 887bd7a2a..518066adb 100644 --- a/modules/dnn/src/caffe/glog_emulator.hpp +++ b/modules/dnn/src/caffe/glog_emulator.hpp @@ -1,4 +1,5 @@ -#pragma once +#ifndef __OPENCV_DNN_CAFFE_GLOG_EMULATOR__ +#define __OPENCV_DNN_CAFFE_GLOG_EMULATOR__ #include #include #include @@ -52,3 +53,4 @@ public: }; } +#endif \ No newline at end of file diff --git a/modules/dnn/src/dnn.cpp b/modules/dnn/src/dnn.cpp index fe214b705..43ef32703 100644 --- a/modules/dnn/src/dnn.cpp +++ b/modules/dnn/src/dnn.cpp @@ -16,114 +16,6 @@ namespace cv namespace dnn { -Blob::Blob() -{ - int zeros[4] = {0, 0, 0, 0}; - m = Mat(4, zeros, CV_32F, NULL); -} - -Blob::Blob(InputArray in) -{ - CV_Assert(in.isMat() || in.isUMat()); - - if (in.isMat()) - { - Mat mat = in.getMat(); - - CV_Assert(mat.dims == 2); - int rows = mat.rows; - int cols = mat.cols; - int cn = mat.channels(); - int type = mat.type(); - int dstType = CV_MAKE_TYPE(CV_MAT_DEPTH(type), 1); - - int size[3] = { cn, rows, cols }; - this->create(3, size, dstType); - uchar *data = m.data; - int step = rows * cols * CV_ELEM_SIZE(dstType); - - if (cn == 1) - { - Mat wrapper2D(rows, cols, dstType, m.data); - mat.copyTo(wrapper2D); - } - else - { - std::vector wrappers(cn); - for (int i = 0; i < cn; i++) - { - wrappers[i] = Mat(rows, cols, dstType, data); - data += step; - } - - cv::split(mat, wrappers); - } - } - else - { - CV_Error(cv::Error::StsNotImplemented, "Not Implemented"); - } -} - -static Vec4i blobNormalizeShape(int ndims, const int *sizes) -{ - Vec4i shape = Vec4i::all(1); - - for (int i = 0; i < std::min(3, ndims); i++) - shape[3 - i] = sizes[ndims-1 - i]; - - for (int i = 3; i < ndims; i++) - shape[0] *= sizes[ndims-1 - i]; - - return shape; -} - -void Blob::fill(int ndims, const int *sizes, int type, void *data, bool deepCopy) -{ - CV_Assert(type == CV_32F || type == CV_64F); - - Vec4i shape = blobNormalizeShape(ndims, sizes); - - if (deepCopy) - { - m.create(4, &shape[0], type); - size_t dataSize = m.total() * m.elemSize(); - memcpy(m.data, data, dataSize); - } - else - { - m = Mat(shape.channels, &shape[0], type, data); - } -} - -void Blob::fill(InputArray in) -{ - CV_Assert(in.isMat() || in.isMatVector()); - - //TODO - *this = Blob(in); -} - -void Blob::create(int ndims, const int *sizes, int type) -{ - CV_Assert(type == CV_32F || type == CV_64F); - Vec4i shape = blobNormalizeShape(ndims, sizes); - m.create(shape.channels, &shape[0], type); -} - -void Blob::create(Vec4i shape, int type) -{ - m.create(shape.channels, &shape[0], type); -} - -void Blob::create(int num, int cn, int rows, int cols, int type) -{ - Vec4i shape(num, cn, rows, cols); - create(4, &shape[0], type); -} - -////////////////////////////////////////////////////////////////////////// - struct LayerOutId { int lid; @@ -378,8 +270,6 @@ struct Net::Impl if (ld.layerInstance) ld.layerInstance->allocate(ld.inputBlobs, ld.outputBlobs); - //std::cout << ld.name << " shape:" << ld.outputBlobs[0].shape() << std::endl; - ld.flag = 1; } diff --git a/modules/dnn/src/layers/concat_layer.cpp b/modules/dnn/src/layers/concat_layer.cpp index 70eae6d12..d434d8035 100644 --- a/modules/dnn/src/layers/concat_layer.cpp +++ b/modules/dnn/src/layers/concat_layer.cpp @@ -34,8 +34,8 @@ namespace dnn int axisSum = 0; for (size_t i = 0; i < inputs.size(); i++) { - Vec4i refShape = inputs[0]->shape(); - Vec4i curShape = inputs[i]->shape(); + Vec4i refShape = inputs[0]->shape4(); + Vec4i curShape = inputs[i]->shape4(); for (int axisId = 0; axisId < 4; axisId++) { @@ -46,7 +46,7 @@ namespace dnn axisSum += curShape[axis]; } - Vec4i shape = inputs[0]->shape(); + Vec4i shape = inputs[0]->shape4(); shape[axis] = axisSum; outputs.resize(1); outputs[0].create(shape); @@ -54,13 +54,13 @@ namespace dnn void ConcatLayer::forward(std::vector &inputs, std::vector &outputs) { - float *dstPtr = outputs[0].ptr(); + float *dstPtr = outputs[0].ptrf(); if (axis == 0) { for (size_t i = 0; i < inputs.size(); i++) { - const float *srcPtr = inputs[i]->ptr(); + const float *srcPtr = inputs[i]->ptrf(); memcpy(dstPtr, srcPtr, inputs[i]->total() * sizeof(float)); dstPtr += inputs[i]->total(); } @@ -72,7 +72,7 @@ namespace dnn for (size_t i = 0; i < inputs.size(); i++) { Blob &inp = *inputs[i]; - memcpy(dstPtr, inp.ptr(n), inp.total(1) * sizeof(float)); + memcpy(dstPtr, inp.ptrf(n), inp.total(1) * sizeof(float)); dstPtr += inp.total(1); } } diff --git a/modules/dnn/src/layers/convolution_layer.cpp b/modules/dnn/src/layers/convolution_layer.cpp index 3a3b6214f..8bc9d4ae9 100644 --- a/modules/dnn/src/layers/convolution_layer.cpp +++ b/modules/dnn/src/layers/convolution_layer.cpp @@ -131,11 +131,11 @@ namespace dnn { for (int g = 0; g < group; g++) { - float *srcPtr = input.ptr(n, g*groupCn); + float *srcPtr = input.ptrf(n, g*groupCn); im2col_cpu(srcPtr, groupCn, inH, inW, kernelH, kernelW, padH, padW, strideH, strideW, srcColPtr); - float *kerPtr = learnedParams[0].ptr(g*groupCnOut); - float *dstPtr = output.ptr(n, g*groupCnOut); + float *kerPtr = learnedParams[0].ptrf(g*groupCnOut); + float *dstPtr = output.ptrf(n, g*groupCnOut); Mat kerMat(groupCnOut, kerSize, CV_32F, kerPtr); Mat dstMat(groupCnOut, outH*outW, CV_32F, dstPtr); @@ -144,7 +144,7 @@ namespace dnn if (bias) { - float *biasPtr = learnedParams[1].ptr() + g*groupCnOut; + float *biasPtr = learnedParams[1].ptrf() + g*groupCnOut; Mat biasMat(groupCnOut, 1, CV_32F, biasPtr); cv::gemm(biasMat, biasOnesMat, 1, dstMat, 1, dstMat); } diff --git a/modules/dnn/src/layers/elementwise_layers.cpp b/modules/dnn/src/layers/elementwise_layers.cpp index 735dc7212..27482735e 100644 --- a/modules/dnn/src/layers/elementwise_layers.cpp +++ b/modules/dnn/src/layers/elementwise_layers.cpp @@ -28,8 +28,8 @@ namespace dnn for (size_t i = 0; i < inputs.size(); i++) { - CV_Assert(inputs[i]->ptr() == outputs[i].ptr()); - float *data = outputs[i].ptr(); + CV_Assert(inputs[i]->ptrf() == outputs[i].ptrf()); + float *data = outputs[i].ptrf(); size_t size = outputs[i].total(); for (size_t j = 0; j < size; j++) diff --git a/modules/dnn/src/layers/fully_connected_layer.cpp b/modules/dnn/src/layers/fully_connected_layer.cpp index 7195af5e9..616b6bb73 100644 --- a/modules/dnn/src/layers/fully_connected_layer.cpp +++ b/modules/dnn/src/layers/fully_connected_layer.cpp @@ -64,7 +64,7 @@ namespace dnn void FullyConnectedLayer::reshape(const Blob &inp, Blob &out) { - Vec4i inpShape = inp.shape(); + Vec4i inpShape = inp.shape4(); Vec4i outShape = Vec4i::all(1); for (int a = 0; a < axis; a++) @@ -82,9 +82,9 @@ namespace dnn int N = numOutputs; int K = innerSize; - Mat srcMat(M, K, CV_32F, inputs[i]->ptr()); - Mat weights(N, K, CV_32F, learnedParams[0].ptr()); - Mat dstMat(M, N, CV_32F, outputs[i].ptr()); + Mat srcMat(M, K, CV_32F, inputs[i]->ptrf()); + Mat weights(N, K, CV_32F, learnedParams[0].ptrf()); + Mat dstMat(M, N, CV_32F, outputs[i].ptrf()); //important: Caffe stores weights as transposed array cv::gemm(srcMat, weights, 1, noArray(), 0, dstMat, GEMM_2_T); @@ -92,7 +92,7 @@ namespace dnn if (bias) { Mat biasOnesMat = Mat::ones(M, 1, CV_32F); - Mat biasMat(1, N, CV_32F, learnedParams[1].ptr()); + Mat biasMat(1, N, CV_32F, learnedParams[1].ptrf()); cv::gemm(biasOnesMat, biasMat, 1, dstMat, 1, dstMat); } } diff --git a/modules/dnn/src/layers/lrn_layer.cpp b/modules/dnn/src/layers/lrn_layer.cpp index cb676f08c..4b52b7a45 100644 --- a/modules/dnn/src/layers/lrn_layer.cpp +++ b/modules/dnn/src/layers/lrn_layer.cpp @@ -58,7 +58,7 @@ namespace dnn CV_Assert(inputs.size() == 1); outputs.resize(1); - Vec4i shape = inputs[0]->shape(); + Vec4i shape = inputs[0]->shape4(); outputs[0].create(shape); shape[0] = 1; //maybe make shape[0] = 1 too @@ -86,7 +86,7 @@ namespace dnn void LRNLayer::channelNoramlization(Blob &srcBlob, Blob &dstBlob) { - CV_DbgAssert(srcBlob.rawPtr() != dstBlob.rawPtr()); + CV_DbgAssert(srcBlob.ptrRaw() != dstBlob.ptrRaw()); int num = srcBlob.num(); int channels = srcBlob.channels(); diff --git a/modules/dnn/src/layers/pooling_layer.cpp b/modules/dnn/src/layers/pooling_layer.cpp index 5b7997ea0..5fe154b8d 100644 --- a/modules/dnn/src/layers/pooling_layer.cpp +++ b/modules/dnn/src/layers/pooling_layer.cpp @@ -104,8 +104,8 @@ namespace dnn { for (int c = 0; c < input.channels(); ++c) { - float *srcData = input.ptr(n, c); - float *dstData = output.ptr(n, c); + float *srcData = input.ptrf(n, c); + float *dstData = output.ptrf(n, c); for (int ph = 0; ph < pooledH; ++ph) { @@ -141,8 +141,8 @@ namespace dnn { for (int c = 0; c < input.channels(); ++c) { - float *srcData = input.ptr(n, c); - float *dstData = output.ptr(n, c); + float *srcData = input.ptrf(n, c); + float *dstData = output.ptrf(n, c); for (int ph = 0; ph < pooledH; ++ph) { diff --git a/modules/dnn/src/layers/softmax_layer.cpp b/modules/dnn/src/layers/softmax_layer.cpp index b0d010276..fe0e8bd0d 100644 --- a/modules/dnn/src/layers/softmax_layer.cpp +++ b/modules/dnn/src/layers/softmax_layer.cpp @@ -35,7 +35,7 @@ namespace dnn { CV_Assert(inputs.size() == 1); - Vec4i shape = inputs[0]->shape(); + Vec4i shape = inputs[0]->shape4(); outputs.resize(1); outputs[0].create(shape); @@ -48,9 +48,9 @@ namespace dnn Blob &src = *inputs[0]; Blob &dst = outputs[0]; - float *srcPtr = src.ptr(); - float *dstPtr = dst.ptr(); - float *bufPtr = maxAggregator.ptr(); + float *srcPtr = src.ptrf(); + float *dstPtr = dst.ptrf(); + float *bufPtr = maxAggregator.ptrf(); size_t outerSize = src.total(0, axis); size_t channels = src.size(axis); diff --git a/modules/dnn/test/cnpy.h b/modules/dnn/test/cnpy.h index b11013b9d..a071ab89f 100644 --- a/modules/dnn/test/cnpy.h +++ b/modules/dnn/test/cnpy.h @@ -13,7 +13,7 @@ #include #include #include -#include +//#include #include namespace cnpy { diff --git a/modules/dnn/test/npy_blob.hpp b/modules/dnn/test/npy_blob.hpp index 997b36fa7..b7a7d44b8 100644 --- a/modules/dnn/test/npy_blob.hpp +++ b/modules/dnn/test/npy_blob.hpp @@ -26,7 +26,7 @@ inline cv::dnn::Blob blobFromNPY(const cv::String &path) inline void saveBlobToNPY(cv::dnn::Blob &blob, const cv::String &path) { - cv::Vec4i shape = blob.shape(); + cv::Vec4i shape = blob.shape4(); cnpy::npy_save(path.c_str(), blob.ptr(), (unsigned*)&shape[0], 4); } diff --git a/modules/dnn/test/test_layers.cpp b/modules/dnn/test/test_layers.cpp index 67ae1a0c9..6171e5278 100644 --- a/modules/dnn/test/test_layers.cpp +++ b/modules/dnn/test/test_layers.cpp @@ -48,7 +48,7 @@ static void testLayer(String proto, String caffemodel = String()) net.forward(); Blob out = net.getBlob("output"); - EXPECT_TRUE(isEqual(ref.shape(), out.shape())); + EXPECT_TRUE(isEqual(ref.shape4(), out.shape4())); Mat &mRef = ref.getMatRef(); Mat &mOut = out.getMatRef();