diff --git a/modules/dnn/src/layers/prior_box_layer.cpp b/modules/dnn/src/layers/prior_box_layer.cpp index 73a4afbaf7..b21ccab966 100644 --- a/modules/dnn/src/layers/prior_box_layer.cpp +++ b/modules/dnn/src/layers/prior_box_layer.cpp @@ -419,8 +419,6 @@ public: stepY = _stepY; } - int _outChannelSize = _layerHeight * _layerWidth * _numPriors * 4; - float* outputPtr = outputs[0].ptr(); float _boxWidth, _boxHeight; for (size_t h = 0; h < _layerHeight; ++h) @@ -444,6 +442,8 @@ public: // clip the prior's coordidate such that it is within [0, 1] if (_clip) { + int _outChannelSize = _layerHeight * _layerWidth * _numPriors * 4; + outputPtr = outputs[0].ptr(); for (size_t d = 0; d < _outChannelSize; ++d) { outputPtr[d] = std::min(std::max(outputPtr[d], 0.), 1.); diff --git a/modules/dnn/src/tensorflow/tf_graph_simplifier.cpp b/modules/dnn/src/tensorflow/tf_graph_simplifier.cpp index 4d571df8b5..ef609863c6 100644 --- a/modules/dnn/src/tensorflow/tf_graph_simplifier.cpp +++ b/modules/dnn/src/tensorflow/tf_graph_simplifier.cpp @@ -219,7 +219,7 @@ public: Mat epsMat = getTensorContent(inputNodes.back()->attr().at("value").tensor()); CV_Assert(epsMat.total() == 1, epsMat.type() == CV_32FC1); - fusedNode->mutable_input()->ReleaseLast(); + fusedNode->mutable_input()->RemoveLast(); fusedNode->clear_attr(); tensorflow::AttrValue epsilon; epsilon.set_f(epsMat.at(0)); @@ -254,7 +254,7 @@ public: Mat epsMat = getTensorContent(inputNodes.back()->attr().at("value").tensor()); CV_Assert(epsMat.total() == 1, epsMat.type() == CV_32FC1); - fusedNode->mutable_input()->ReleaseLast(); + fusedNode->mutable_input()->RemoveLast(); fusedNode->clear_attr(); tensorflow::AttrValue epsilon; epsilon.set_f(epsMat.at(0)); diff --git a/modules/dnn/test/test_layers.cpp b/modules/dnn/test/test_layers.cpp index 703f8320b2..edc45b1be3 100644 --- a/modules/dnn/test/test_layers.cpp +++ b/modules/dnn/test/test_layers.cpp @@ -842,7 +842,7 @@ TEST(Layer_PriorBox, squares) LayerParams lp; lp.name = "testPriorBox"; lp.type = "PriorBox"; - lp.set("min_size", 32); + lp.set("min_size", 2); lp.set("flip", true); lp.set("clip", true); float variance[] = {0.1f, 0.1f, 0.2f, 0.2f}; @@ -858,8 +858,8 @@ TEST(Layer_PriorBox, squares) net.setInput(blobFromImage(inp)); Mat out = net.forward(); - Mat target = (Mat_(4, 4) << -7.75f, -15.5f, 8.25f, 16.5f, - -7.25f, -15.5f, 8.75f, 16.5f, + Mat target = (Mat_(4, 4) << 0.0, 0.0, 0.75, 1.0, + 0.25, 0.0, 1.0, 1.0, 0.1f, 0.1f, 0.2f, 0.2f, 0.1f, 0.1f, 0.2f, 0.2f); normAssert(out.reshape(1, 4), target);