Merge pull request #11182 from dkurt:fix_11102_part_2

pull/11198/head
Alexander Alekhin 7 years ago
commit e8a67de0d2
  1. 4
      modules/dnn/src/layers/prior_box_layer.cpp
  2. 4
      modules/dnn/src/tensorflow/tf_graph_simplifier.cpp
  3. 6
      modules/dnn/test/test_layers.cpp

@ -419,8 +419,6 @@ public:
stepY = _stepY;
}
int _outChannelSize = _layerHeight * _layerWidth * _numPriors * 4;
float* outputPtr = outputs[0].ptr<float>();
float _boxWidth, _boxHeight;
for (size_t h = 0; h < _layerHeight; ++h)
@ -444,6 +442,8 @@ public:
// clip the prior's coordidate such that it is within [0, 1]
if (_clip)
{
int _outChannelSize = _layerHeight * _layerWidth * _numPriors * 4;
outputPtr = outputs[0].ptr<float>();
for (size_t d = 0; d < _outChannelSize; ++d)
{
outputPtr[d] = std::min<float>(std::max<float>(outputPtr[d], 0.), 1.);

@ -219,7 +219,7 @@ public:
Mat epsMat = getTensorContent(inputNodes.back()->attr().at("value").tensor());
CV_Assert(epsMat.total() == 1, epsMat.type() == CV_32FC1);
fusedNode->mutable_input()->ReleaseLast();
fusedNode->mutable_input()->RemoveLast();
fusedNode->clear_attr();
tensorflow::AttrValue epsilon;
epsilon.set_f(epsMat.at<float>(0));
@ -254,7 +254,7 @@ public:
Mat epsMat = getTensorContent(inputNodes.back()->attr().at("value").tensor());
CV_Assert(epsMat.total() == 1, epsMat.type() == CV_32FC1);
fusedNode->mutable_input()->ReleaseLast();
fusedNode->mutable_input()->RemoveLast();
fusedNode->clear_attr();
tensorflow::AttrValue epsilon;
epsilon.set_f(epsMat.at<float>(0));

@ -842,7 +842,7 @@ TEST(Layer_PriorBox, squares)
LayerParams lp;
lp.name = "testPriorBox";
lp.type = "PriorBox";
lp.set("min_size", 32);
lp.set("min_size", 2);
lp.set("flip", true);
lp.set("clip", true);
float variance[] = {0.1f, 0.1f, 0.2f, 0.2f};
@ -858,8 +858,8 @@ TEST(Layer_PriorBox, squares)
net.setInput(blobFromImage(inp));
Mat out = net.forward();
Mat target = (Mat_<float>(4, 4) << -7.75f, -15.5f, 8.25f, 16.5f,
-7.25f, -15.5f, 8.75f, 16.5f,
Mat target = (Mat_<float>(4, 4) << 0.0, 0.0, 0.75, 1.0,
0.25, 0.0, 1.0, 1.0,
0.1f, 0.1f, 0.2f, 0.2f,
0.1f, 0.1f, 0.2f, 0.2f);
normAssert(out.reshape(1, 4), target);

Loading…
Cancel
Save