Blob class was significantly refactored

pull/265/head
Vitaliy Lyudvichenko 10 years ago
parent 983823468d
commit 0ebe30a362
  1. 240
      modules/dnn/include/opencv2/dnn/blob.hpp
  2. 4
      modules/dnn/include/opencv2/dnn/dict.hpp
  3. 92
      modules/dnn/include/opencv2/dnn/dnn.hpp
  4. 122
      modules/dnn/include/opencv2/dnn/dnn.inl.hpp
  5. 132
      modules/dnn/src/blob.cpp
  6. 4
      modules/dnn/src/caffe/glog_emulator.hpp
  7. 110
      modules/dnn/src/dnn.cpp
  8. 12
      modules/dnn/src/layers/concat_layer.cpp
  9. 8
      modules/dnn/src/layers/convolution_layer.cpp
  10. 4
      modules/dnn/src/layers/elementwise_layers.cpp
  11. 10
      modules/dnn/src/layers/fully_connected_layer.cpp
  12. 4
      modules/dnn/src/layers/lrn_layer.cpp
  13. 8
      modules/dnn/src/layers/pooling_layer.cpp
  14. 8
      modules/dnn/src/layers/softmax_layer.cpp
  15. 2
      modules/dnn/test/cnpy.h
  16. 2
      modules/dnn/test/npy_blob.hpp
  17. 2
      modules/dnn/test/test_layers.cpp

@ -0,0 +1,240 @@
#ifndef __OPENCV_DNN_DNN_BLOB_HPP__
#define __OPENCV_DNN_DNN_BLOB_HPP__
#include <opencv2/core.hpp>
#include <vector>
namespace cv
{
namespace dnn
{
/** @brief provides convenient methods for continuous n-dimensional array processing, dedicated for convolution neural networks
It's realized as wrapper over \ref cv::Mat and \ref cv::UMat and will support methods for CPU/GPU switching
*/
class CV_EXPORTS Blob
{
public:
explicit Blob();
explicit Blob(InputArray in);
void create(int ndims, const int *sizes, int type = CV_32F);
void create(Vec4i shape, int type = CV_32F);
void create(int num, int cn, int rows, int cols, int type = CV_32F);
void fill(InputArray in);
void fill(int ndims, const int *sizes, int type, void *data, bool deepCopy = true);
Mat& getMatRef();
const Mat& getMatRef() const;
Mat getMat();
Mat getMat(int n, int cn);
//shape getters
///returns real count of blob dimensions
int dims() const;
/** @brief returns size of corresponding dimension (axis)
@param axis dimension index
Python-like indexing is supported, so \p axis can be negative, i. e. -1 is last dimension.
Supposed that size of non-existing dimensions equal to 1, so the method always finished.
*/
int size(int axis) const;
/** @brief returns size of corresponding dimension (axis)
@param axis dimension index
Python-like indexing is supported, so \p axis can be negative, i. e. -1 is last dimension.
@note Unlike ::size, if \p axis points to non-existing dimension then an error will be generated.
*/
int sizeAt(int axis) const;
/** @brief returns number of elements
@param startAxis starting axis (inverse indexing can be used)
@param endAxis ending (excluded) axis
@see ::canonicalAxis
*/
size_t total(int startAxis = 0, int endAxis = -1) const;
/** @brief converts axis index to canonical format (where 0 <= axis <= ::dims)
*/
int canonicalAxis(int axis) const;
/** @brief returns real shape of the blob
*/
std::vector<int> shape() const;
//shape getters, oriented for 4-dim Blobs processing
int cols() const;
int rows() const;
int channels() const;
int num() const;
Size size2() const;
Vec4i shape4() const;
//CPU data pointer functions
int offset(int n = 0, int cn = 0, int row = 0, int col = 0) const;
uchar *ptrRaw(int n = 0, int cn = 0, int row = 0, int col = 0);
float *ptrf(int n = 0, int cn = 0, int row = 0, int col = 0);
template<typename TFloat>
TFloat *ptr(int n = 0, int cn = 0, int row = 0, int col = 0);
int type() const;
bool isFloat() const;
bool isDouble() const;
private:
const int *sizes() const;
Mat m;
};
//////////////////////////////////////////////////////////////////////////
inline int Blob::canonicalAxis(int axis) const
{
CV_Assert(-dims() <= axis && axis < dims());
if (axis < 0)
{
return dims() + axis;
}
return axis;
}
inline int Blob::size(int axis) const
{
if (axis < 0)
axis += dims();
if (axis < 0 || axis >= dims())
return 1;
return sizes()[axis];
}
inline int Blob::sizeAt(int axis) const
{
CV_Assert(-dims() <= axis && axis < dims());
if (axis < 0)
axis += dims();
return sizes()[axis];
}
inline size_t Blob::total(int startAxis, int endAxis) const
{
startAxis = canonicalAxis(startAxis);
if (endAxis == -1)
endAxis = dims();
CV_Assert(startAxis <= endAxis && endAxis <= dims());
size_t size = 1; //assume that blob isn't empty
for (int i = startAxis; i < endAxis; i++)
size *= (size_t)sizes()[i];
return size;
}
inline int Blob::offset(int n, int cn, int row, int col) const
{
CV_DbgAssert(0 <= n && n < num() && 0 <= cn && cn < channels() && 0 <= row && row < rows() && 0 <= col && col < cols());
return ((n*channels() + cn)*rows() + row)*cols() + col;
}
inline float *Blob::ptrf(int n, int cn, int row, int col)
{
CV_Assert(type() == CV_32F);
return (float*)m.data + offset(n, cn, row, col);
}
inline uchar *Blob::ptrRaw(int n, int cn, int row, int col)
{
return m.data + m.elemSize() * offset(n, cn, row, col);
}
template<typename TFloat>
inline TFloat* Blob::ptr(int n, int cn, int row, int col)
{
CV_Assert(type() == cv::DataDepth<TFloat>::value);
return (TFloat*) ptrRaw(n, cn, row, col);
}
inline std::vector<int> Blob::shape() const
{
return std::vector<int>(sizes(), sizes() + dims());
}
inline Mat& Blob::getMatRef()
{
return m;
}
inline const Mat& Blob::getMatRef() const
{
return m;
}
inline Mat Blob::getMat()
{
return m;
}
inline Mat Blob::getMat(int n, int cn)
{
return Mat(rows(), cols(), m.type(), this->ptrRaw(n, cn));
}
inline int Blob::cols() const
{
return size(-1);
}
inline int Blob::rows() const
{
return size(-2);
}
inline Size Blob::size2() const
{
return Size(cols(), rows());
}
inline int Blob::channels() const
{
return size(-3);
}
inline int Blob::num() const
{
return size(-4);
}
inline int Blob::type() const
{
return m.depth();
}
inline bool Blob::isFloat() const
{
return (type() == CV_32F);
}
inline bool Blob::isDouble() const
{
return (type() == CV_32F);
}
inline const int * Blob::sizes() const
{
return &m.size[0];
}
inline int Blob::dims() const
{
return m.dims;
}
}
}
#endif

@ -1,5 +1,5 @@
#ifndef __OPENCV_DNN_DICT_HPP__
#define __OPENCV_DNN_DICT_HPP__
#ifndef __OPENCV_DNN_DNN_DICT_HPP__
#define __OPENCV_DNN_DNN_DICT_HPP__
#include <opencv2/core.hpp>
#include <map>

@ -7,62 +7,12 @@
#include <opencv2/core.hpp>
#include <opencv2/dnn/dict.hpp>
#include <opencv2/dnn/blob.hpp>
namespace cv
{
namespace dnn
{
class Layer;
class NetConfiguration;
class Net;
class Blob;
class LayerParams;
//wrapper over cv::Mat and cv::UMat
class CV_EXPORTS Blob
{
public:
explicit Blob();
explicit Blob(InputArray in);
void create(int ndims, const int *sizes, int type = CV_32F);
void create(Vec4i shape, int type = CV_32F);
void create(int num, int cn, int rows, int cols, int type = CV_32F);
void fill(InputArray in);
void fill(int ndims, const int *sizes, int type, void *data, bool deepCopy = true);
Mat& getMatRef();
const Mat& getMatRef() const;
Mat getMat();
Mat getMat(int num, int channel);
//shape getters
int cols() const;
int rows() const;
int channels() const;
int num() const;
Size size2() const;
Vec4i shape() const;
int size(int index) const;
size_t total(int startAxis = 0, int endAxis = -1) const;
uchar *rawPtr(int num = 0, int cn = 0, int row = 0, int col = 0);
template<typename TFloat>
TFloat *ptr(int num = 0, int cn = 0, int row = 0, int col = 0);
int type() const;
bool isFloat() const;
bool isDouble() const;
private:
const int *sizes() const;
int dims() const;
Mat m;
};
class CV_EXPORTS LayerParams : public Dict
{
public:
@ -70,26 +20,7 @@ namespace dnn
std::vector<Blob> learnedBlobs;
};
class CV_EXPORTS LayerRegister
{
public:
typedef Ptr<Layer> (*Constuctor)(LayerParams &params);
static void registerLayer(const String &type, Constuctor constructor);
static void unregisterLayer(const String &type);
static Ptr<Layer> createLayerInstance(const String &type, LayerParams& params);
private:
LayerRegister();
struct Impl;
static Ptr<Impl> impl;
};
//this class allows to build new Layers
//Interface class allows to build new Layers
class CV_EXPORTS Layer
{
public:
@ -166,6 +97,25 @@ namespace dnn
CV_EXPORTS Ptr<Importer> createCaffeImporter(const String &prototxt, const String &caffeModel);
//Layer factory allows to create instances of registered layers.
class CV_EXPORTS LayerRegister
{
public:
typedef Ptr<Layer>(*Constuctor)(LayerParams &params);
static void registerLayer(const String &type, Constuctor constructor);
static void unregisterLayer(const String &type);
static Ptr<Layer> createLayerInstance(const String &type, LayerParams& params);
private:
LayerRegister();
struct Impl;
static Ptr<Impl> impl;
};
//allows automatically register created layer on module load time
struct _LayerRegisterer

@ -1,5 +1,5 @@
#ifndef __OPENCV_DNN_INL_HPP__
#define __OPENCV_DNN_INL_HPP__
#ifndef __OPENCV_DNN_DNN_INL_HPP__
#define __OPENCV_DNN_DNN_INL_HPP__
#include <opencv2/dnn.hpp>
@ -7,123 +7,7 @@ namespace cv
{
namespace dnn
{
inline Mat& Blob::getMatRef()
{
return m;
}
inline const Mat& Blob::getMatRef() const
{
return m;
}
inline Mat Blob::getMat()
{
return m;
}
inline Mat Blob::getMat(int num, int channel)
{
CV_Assert(0 <= num && num < this->num() && 0 <= channel && channel < this->channels());
return Mat(rows(), cols(), m.type(), this->rawPtr(num, channel));
}
inline int Blob::cols() const
{
CV_DbgAssert(m.dims > 2);
return m.size[m.dims-1];
}
inline int Blob::rows() const
{
CV_DbgAssert(m.dims > 2);
return m.size[m.dims-2];
}
inline Size Blob::size2() const
{
return Size(cols(), rows());
}
inline int Blob::channels() const
{
CV_DbgAssert(m.dims >= 3);
return m.size[m.dims-3];
}
inline int Blob::num() const
{
CV_DbgAssert(m.dims == 4);
return m.size[0];
}
inline Vec4i Blob::shape() const
{
CV_DbgAssert(m.dims == 4);
return Vec4i(m.size.p);
}
inline int Blob::size(int index) const
{
CV_Assert(index >= 0 && index < dims());
return sizes()[index];
}
inline size_t Blob::total(int startAxis, int endAxis) const
{
if (endAxis == -1)
endAxis = dims();
CV_Assert(0 <= startAxis && startAxis <= endAxis && endAxis <= dims());
size_t size = 1; //assume that blob isn't empty
for (int i = startAxis; i < endAxis; i++)
size *= (size_t) sizes()[i];
return size;
}
inline uchar* Blob::rawPtr(int num, int cn, int row, int col)
{
CV_DbgAssert(m.dims == 4);
return m.data + num * m.step[0] + cn * m.step[1] + row * m.step[2] + col * m.step[3];
}
template<typename TFloat>
TFloat *Blob::ptr(int n, int cn, int row, int col)
{
CV_Assert(m.type() == cv::DataType<TFloat>::type);
CV_Assert(0 <= n && n < num() && 0 <= cn && cn < channels() && 0 <= row && row < rows() && 0 <= col && col < cols());
return (TFloat*) rawPtr(n, cn, row, col);
}
inline int Blob::type() const
{
return m.depth();
}
inline bool Blob::isFloat() const
{
return (type() == CV_32F);
}
inline bool Blob::isDouble() const
{
return (type() == CV_32F);
}
inline const int * Blob::sizes() const
{
return &m.size[0];
}
inline int Blob::dims() const
{
return m.dims;
}
//code is absent ... today
}
}

@ -0,0 +1,132 @@
#include "precomp.hpp"
namespace cv
{
namespace dnn
{
Blob::Blob()
{
int zeros[4] = { 0, 0, 0, 0 };
m = Mat(4, zeros, CV_32F, NULL);
}
Blob::Blob(InputArray in)
{
CV_Assert(in.isMat() || in.isUMat());
if (in.isMat())
{
Mat mat = in.getMat();
CV_Assert(mat.dims == 2);
int rows = mat.rows;
int cols = mat.cols;
int cn = mat.channels();
int type = mat.type();
int dstType = CV_MAKE_TYPE(CV_MAT_DEPTH(type), 1);
int size[3] = { cn, rows, cols };
this->create(3, size, dstType);
uchar *data = m.data;
int step = rows * cols * CV_ELEM_SIZE(dstType);
if (cn == 1)
{
Mat wrapper2D(rows, cols, dstType, m.data);
mat.copyTo(wrapper2D);
}
else
{
std::vector<Mat> wrappers(cn);
for (int i = 0; i < cn; i++)
{
wrappers[i] = Mat(rows, cols, dstType, data);
data += step;
}
cv::split(mat, wrappers);
}
}
else
{
CV_Error(cv::Error::StsNotImplemented, "Not Implemented");
}
}
inline void squeezeShape_(const int srcDims, const int *srcSizes, const int dstDims, int *dstSizes)
{
const int m = std::min(dstDims, srcDims);
//copy common(last) dimensions
for (int i = 0; i < m; i++)
dstSizes[dstDims - 1 - i] = srcSizes[srcDims - 1 - i];
//either flatten extra dimensions
for (int i = m; i < srcDims; i++)
dstSizes[0] *= srcSizes[srcDims - 1 - i];
//either fill gaps
for (int i = m; i < dstDims; i++)
dstSizes[dstDims - 1 - i] = 1;
}
static Vec4i squeezeShape4(const int ndims, const int *sizes)
{
Vec4i res;
squeezeShape_(ndims, sizes, 4, &res[0]);
return res;
}
void Blob::fill(int ndims, const int *sizes, int type, void *data, bool deepCopy)
{
CV_Assert(type == CV_32F || type == CV_64F);
Vec4i shape = squeezeShape4(ndims, sizes);
if (deepCopy)
{
m.create(4, &shape[0], type);
size_t dataSize = m.total() * m.elemSize();
memcpy(m.data, data, dataSize);
}
else
{
m = Mat(shape.channels, &shape[0], type, data);
}
}
void Blob::fill(InputArray in)
{
CV_Assert(in.isMat() || in.isMatVector());
//TODO
*this = Blob(in);
}
void Blob::create(int ndims, const int *sizes, int type)
{
CV_Assert(type == CV_32F || type == CV_64F);
Vec4i shape = squeezeShape4(ndims, sizes);
m.create(shape.channels, &shape[0], type);
}
void Blob::create(Vec4i shape, int type)
{
m.create(shape.channels, &shape[0], type);
}
void Blob::create(int num, int cn, int rows, int cols, int type)
{
Vec4i shape(num, cn, rows, cols);
create(4, &shape[0], type);
}
Vec4i Blob::shape4() const
{
return squeezeShape4(dims(), sizes());
}
}
}

@ -1,4 +1,5 @@
#pragma once
#ifndef __OPENCV_DNN_CAFFE_GLOG_EMULATOR__
#define __OPENCV_DNN_CAFFE_GLOG_EMULATOR__
#include <stdlib.h>
#include <iostream>
#include <sstream>
@ -52,3 +53,4 @@ public:
};
}
#endif

@ -16,114 +16,6 @@ namespace cv
namespace dnn
{
Blob::Blob()
{
int zeros[4] = {0, 0, 0, 0};
m = Mat(4, zeros, CV_32F, NULL);
}
Blob::Blob(InputArray in)
{
CV_Assert(in.isMat() || in.isUMat());
if (in.isMat())
{
Mat mat = in.getMat();
CV_Assert(mat.dims == 2);
int rows = mat.rows;
int cols = mat.cols;
int cn = mat.channels();
int type = mat.type();
int dstType = CV_MAKE_TYPE(CV_MAT_DEPTH(type), 1);
int size[3] = { cn, rows, cols };
this->create(3, size, dstType);
uchar *data = m.data;
int step = rows * cols * CV_ELEM_SIZE(dstType);
if (cn == 1)
{
Mat wrapper2D(rows, cols, dstType, m.data);
mat.copyTo(wrapper2D);
}
else
{
std::vector<Mat> wrappers(cn);
for (int i = 0; i < cn; i++)
{
wrappers[i] = Mat(rows, cols, dstType, data);
data += step;
}
cv::split(mat, wrappers);
}
}
else
{
CV_Error(cv::Error::StsNotImplemented, "Not Implemented");
}
}
static Vec4i blobNormalizeShape(int ndims, const int *sizes)
{
Vec4i shape = Vec4i::all(1);
for (int i = 0; i < std::min(3, ndims); i++)
shape[3 - i] = sizes[ndims-1 - i];
for (int i = 3; i < ndims; i++)
shape[0] *= sizes[ndims-1 - i];
return shape;
}
void Blob::fill(int ndims, const int *sizes, int type, void *data, bool deepCopy)
{
CV_Assert(type == CV_32F || type == CV_64F);
Vec4i shape = blobNormalizeShape(ndims, sizes);
if (deepCopy)
{
m.create(4, &shape[0], type);
size_t dataSize = m.total() * m.elemSize();
memcpy(m.data, data, dataSize);
}
else
{
m = Mat(shape.channels, &shape[0], type, data);
}
}
void Blob::fill(InputArray in)
{
CV_Assert(in.isMat() || in.isMatVector());
//TODO
*this = Blob(in);
}
void Blob::create(int ndims, const int *sizes, int type)
{
CV_Assert(type == CV_32F || type == CV_64F);
Vec4i shape = blobNormalizeShape(ndims, sizes);
m.create(shape.channels, &shape[0], type);
}
void Blob::create(Vec4i shape, int type)
{
m.create(shape.channels, &shape[0], type);
}
void Blob::create(int num, int cn, int rows, int cols, int type)
{
Vec4i shape(num, cn, rows, cols);
create(4, &shape[0], type);
}
//////////////////////////////////////////////////////////////////////////
struct LayerOutId
{
int lid;
@ -378,8 +270,6 @@ struct Net::Impl
if (ld.layerInstance)
ld.layerInstance->allocate(ld.inputBlobs, ld.outputBlobs);
//std::cout << ld.name << " shape:" << ld.outputBlobs[0].shape() << std::endl;
ld.flag = 1;
}

@ -34,8 +34,8 @@ namespace dnn
int axisSum = 0;
for (size_t i = 0; i < inputs.size(); i++)
{
Vec4i refShape = inputs[0]->shape();
Vec4i curShape = inputs[i]->shape();
Vec4i refShape = inputs[0]->shape4();
Vec4i curShape = inputs[i]->shape4();
for (int axisId = 0; axisId < 4; axisId++)
{
@ -46,7 +46,7 @@ namespace dnn
axisSum += curShape[axis];
}
Vec4i shape = inputs[0]->shape();
Vec4i shape = inputs[0]->shape4();
shape[axis] = axisSum;
outputs.resize(1);
outputs[0].create(shape);
@ -54,13 +54,13 @@ namespace dnn
void ConcatLayer::forward(std::vector<Blob *> &inputs, std::vector<Blob> &outputs)
{
float *dstPtr = outputs[0].ptr<float>();
float *dstPtr = outputs[0].ptrf();
if (axis == 0)
{
for (size_t i = 0; i < inputs.size(); i++)
{
const float *srcPtr = inputs[i]->ptr<float>();
const float *srcPtr = inputs[i]->ptrf();
memcpy(dstPtr, srcPtr, inputs[i]->total() * sizeof(float));
dstPtr += inputs[i]->total();
}
@ -72,7 +72,7 @@ namespace dnn
for (size_t i = 0; i < inputs.size(); i++)
{
Blob &inp = *inputs[i];
memcpy(dstPtr, inp.ptr<float>(n), inp.total(1) * sizeof(float));
memcpy(dstPtr, inp.ptrf(n), inp.total(1) * sizeof(float));
dstPtr += inp.total(1);
}
}

@ -131,11 +131,11 @@ namespace dnn
{
for (int g = 0; g < group; g++)
{
float *srcPtr = input.ptr<float>(n, g*groupCn);
float *srcPtr = input.ptrf(n, g*groupCn);
im2col_cpu(srcPtr, groupCn, inH, inW, kernelH, kernelW, padH, padW, strideH, strideW, srcColPtr);
float *kerPtr = learnedParams[0].ptr<float>(g*groupCnOut);
float *dstPtr = output.ptr<float>(n, g*groupCnOut);
float *kerPtr = learnedParams[0].ptrf(g*groupCnOut);
float *dstPtr = output.ptrf(n, g*groupCnOut);
Mat kerMat(groupCnOut, kerSize, CV_32F, kerPtr);
Mat dstMat(groupCnOut, outH*outW, CV_32F, dstPtr);
@ -144,7 +144,7 @@ namespace dnn
if (bias)
{
float *biasPtr = learnedParams[1].ptr<float>() + g*groupCnOut;
float *biasPtr = learnedParams[1].ptrf() + g*groupCnOut;
Mat biasMat(groupCnOut, 1, CV_32F, biasPtr);
cv::gemm(biasMat, biasOnesMat, 1, dstMat, 1, dstMat);
}

@ -28,8 +28,8 @@ namespace dnn
for (size_t i = 0; i < inputs.size(); i++)
{
CV_Assert(inputs[i]->ptr<float>() == outputs[i].ptr<float>());
float *data = outputs[i].ptr<float>();
CV_Assert(inputs[i]->ptrf() == outputs[i].ptrf());
float *data = outputs[i].ptrf();
size_t size = outputs[i].total();
for (size_t j = 0; j < size; j++)

@ -64,7 +64,7 @@ namespace dnn
void FullyConnectedLayer::reshape(const Blob &inp, Blob &out)
{
Vec4i inpShape = inp.shape();
Vec4i inpShape = inp.shape4();
Vec4i outShape = Vec4i::all(1);
for (int a = 0; a < axis; a++)
@ -82,9 +82,9 @@ namespace dnn
int N = numOutputs;
int K = innerSize;
Mat srcMat(M, K, CV_32F, inputs[i]->ptr<float>());
Mat weights(N, K, CV_32F, learnedParams[0].ptr<float>());
Mat dstMat(M, N, CV_32F, outputs[i].ptr<float>());
Mat srcMat(M, K, CV_32F, inputs[i]->ptrf());
Mat weights(N, K, CV_32F, learnedParams[0].ptrf());
Mat dstMat(M, N, CV_32F, outputs[i].ptrf());
//important: Caffe stores weights as transposed array
cv::gemm(srcMat, weights, 1, noArray(), 0, dstMat, GEMM_2_T);
@ -92,7 +92,7 @@ namespace dnn
if (bias)
{
Mat biasOnesMat = Mat::ones(M, 1, CV_32F);
Mat biasMat(1, N, CV_32F, learnedParams[1].ptr<float>());
Mat biasMat(1, N, CV_32F, learnedParams[1].ptrf());
cv::gemm(biasOnesMat, biasMat, 1, dstMat, 1, dstMat);
}
}

@ -58,7 +58,7 @@ namespace dnn
CV_Assert(inputs.size() == 1);
outputs.resize(1);
Vec4i shape = inputs[0]->shape();
Vec4i shape = inputs[0]->shape4();
outputs[0].create(shape);
shape[0] = 1; //maybe make shape[0] = 1 too
@ -86,7 +86,7 @@ namespace dnn
void LRNLayer::channelNoramlization(Blob &srcBlob, Blob &dstBlob)
{
CV_DbgAssert(srcBlob.rawPtr() != dstBlob.rawPtr());
CV_DbgAssert(srcBlob.ptrRaw() != dstBlob.ptrRaw());
int num = srcBlob.num();
int channels = srcBlob.channels();

@ -104,8 +104,8 @@ namespace dnn
{
for (int c = 0; c < input.channels(); ++c)
{
float *srcData = input.ptr<float>(n, c);
float *dstData = output.ptr<float>(n, c);
float *srcData = input.ptrf(n, c);
float *dstData = output.ptrf(n, c);
for (int ph = 0; ph < pooledH; ++ph)
{
@ -141,8 +141,8 @@ namespace dnn
{
for (int c = 0; c < input.channels(); ++c)
{
float *srcData = input.ptr<float>(n, c);
float *dstData = output.ptr<float>(n, c);
float *srcData = input.ptrf(n, c);
float *dstData = output.ptrf(n, c);
for (int ph = 0; ph < pooledH; ++ph)
{

@ -35,7 +35,7 @@ namespace dnn
{
CV_Assert(inputs.size() == 1);
Vec4i shape = inputs[0]->shape();
Vec4i shape = inputs[0]->shape4();
outputs.resize(1);
outputs[0].create(shape);
@ -48,9 +48,9 @@ namespace dnn
Blob &src = *inputs[0];
Blob &dst = outputs[0];
float *srcPtr = src.ptr<float>();
float *dstPtr = dst.ptr<float>();
float *bufPtr = maxAggregator.ptr<float>();
float *srcPtr = src.ptrf();
float *dstPtr = dst.ptrf();
float *bufPtr = maxAggregator.ptrf();
size_t outerSize = src.total(0, axis);
size_t channels = src.size(axis);

@ -13,7 +13,7 @@
#include<typeinfo>
#include<iostream>
#include<cassert>
#include<zlib.h>
//#include<zlib.h>
#include<map>
namespace cnpy {

@ -26,7 +26,7 @@ inline cv::dnn::Blob blobFromNPY(const cv::String &path)
inline void saveBlobToNPY(cv::dnn::Blob &blob, const cv::String &path)
{
cv::Vec4i shape = blob.shape();
cv::Vec4i shape = blob.shape4();
cnpy::npy_save(path.c_str(), blob.ptr<float>(), (unsigned*)&shape[0], 4);
}

@ -48,7 +48,7 @@ static void testLayer(String proto, String caffemodel = String())
net.forward();
Blob out = net.getBlob("output");
EXPECT_TRUE(isEqual(ref.shape(), out.shape()));
EXPECT_TRUE(isEqual(ref.shape4(), out.shape4()));
Mat &mRef = ref.getMatRef();
Mat &mOut = out.getMatRef();

Loading…
Cancel
Save