diff --git a/modules/dnn/src/layers/blank_layer.cpp b/modules/dnn/src/layers/blank_layer.cpp index d7c901338..363bdbbc1 100644 --- a/modules/dnn/src/layers/blank_layer.cpp +++ b/modules/dnn/src/layers/blank_layer.cpp @@ -49,18 +49,16 @@ class BlankLayerImpl : public BlankLayer public: BlankLayerImpl(const LayerParams&) {} - void allocate(const std::vector &inputs, std::vector &outputs) + bool getMemoryShapes(const std::vector &inputs, + const int requiredOutputs, + std::vector &outputs, + std::vector &internals) const { - outputs.resize(inputs.size()); - for (size_t i = 0; i < inputs.size(); i++) - outputs[i] = *inputs[i]; + Layer::getMemoryShapes(inputs, requiredOutputs, outputs, internals); + return true; } - void forward(std::vector &inputs, std::vector &outputs, std::vector &internals) - { - for (size_t i = 0; i < inputs.size(); i++) - outputs[i] = *inputs[i]; - } + void forward(std::vector &inputs, std::vector &outputs, std::vector &internals) {} }; Ptr BlankLayer::create(const LayerParams& params) diff --git a/modules/dnn/src/layers/elementwise_layers.cpp b/modules/dnn/src/layers/elementwise_layers.cpp index cf5bcb1bf..1d34887f2 100644 --- a/modules/dnn/src/layers/elementwise_layers.cpp +++ b/modules/dnn/src/layers/elementwise_layers.cpp @@ -37,9 +37,9 @@ public: ElementWiseLayer(bool run_parallel_=false, const Func &f=Func()) : func(f), run_parallel(run_parallel_) {} bool getMemoryShapes(const std::vector &inputs, - const int requiredOutputs, - std::vector &outputs, - std::vector &internals) const + const int requiredOutputs, + std::vector &outputs, + std::vector &internals) const { Layer::getMemoryShapes(inputs, requiredOutputs, outputs, internals); return true; diff --git a/modules/dnn/src/layers/pooling_layer.cpp b/modules/dnn/src/layers/pooling_layer.cpp index c378a68e8..aed3ffda3 100644 --- a/modules/dnn/src/layers/pooling_layer.cpp +++ b/modules/dnn/src/layers/pooling_layer.cpp @@ -203,7 +203,13 @@ public: CV_Assert(inputs.size() != 0); Size in(inputs[0][3], inputs[0][2]), out; - if (padMode.empty()) { + if (globalPooling) + { + out.height = 1; + out.width = 1; + } + else if (padMode.empty()) + { //Yeah, something strange Caffe scheme-) out.height = static_cast(ceil(static_cast(in.height + 2 * pad.height - kernel.height) / stride.height)) + 1; diff --git a/modules/dnn/src/layers/reshape_layer.cpp b/modules/dnn/src/layers/reshape_layer.cpp index 7d8f284d5..a98e4e962 100644 --- a/modules/dnn/src/layers/reshape_layer.cpp +++ b/modules/dnn/src/layers/reshape_layer.cpp @@ -149,6 +149,7 @@ public: outputs.push_back(MatShape()); computeShapeByReshapeMask(inputs[i], newShapeDesc, newShapeRange, outputs.back()); } + internals = outputs; return true; } @@ -160,9 +161,16 @@ public: Mat srcBlob = *inputs[0]; int dims = srcBlob.dims; MatShape inputShape = shape(srcBlob), outShape = shape(outputs[0]); - bool channelsReduced = dims > (int)outShape.size() || - (dims == 4 && inputShape[1] > outShape[1]); - performReordering = enableReordering && dims == 4 && channelsReduced; + + // input.total() == output.total(). So if reordering is require, + // one of the sizes will be are not equal. + // Example where reordering is require: from 1x128x4x4 to 1x2048 + // Example where reordering is NOT require: from 1x1024x1x1 to 1x1024. + bool reorderingRequire = false; + const int minDims = min(dims, (int)outShape.size()); + for (int i = 0; !reorderingRequire && i < minDims; ++i) + reorderingRequire = inputShape[i] != outShape[i]; + performReordering = enableReordering && reorderingRequire; } void forward(std::vector &inputs, std::vector &outputs, std::vector &internals) @@ -170,13 +178,11 @@ public: for (size_t i = 0; i < inputs.size(); i++) { Mat srcBlob = *inputs[i]; - MatShape inputShape = shape(srcBlob), outShape = shape(outputs[i]); + MatShape inputShape = shape(srcBlob); if (performReordering) { - Mat reordered_blob(inputShape, srcBlob.type()); - - float *dstData = reordered_blob.ptr(); + float *dstData = internals[i].ptr(); const float *srcData = srcBlob.ptr(); int num = inputShape[0], channels = inputShape[1], height = inputShape[2], width = inputShape[3]; @@ -196,8 +202,7 @@ public: } } } - - outputs[i] = reordered_blob.reshape(1, outShape); + internals[i].copyTo(outputs[i]); } } }