Merge pull request #18077 from l-bat:reduce_sum

* Supported ReduceSum op

* Skip test
pull/18084/head
Liubov Batanina 4 years ago committed by GitHub
parent 2b227f00f2
commit f3cebb3e1b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 26
      modules/dnn/src/layers/pooling_layer.cpp
  2. 41
      modules/dnn/src/tensorflow/tf_importer.cpp
  3. 2
      modules/dnn/test/test_darknet_importer.cpp
  4. 12
      modules/dnn/test/test_tf_importer.cpp

@ -98,6 +98,8 @@ public:
type = AVE; type = AVE;
else if (pool == "stochastic") else if (pool == "stochastic")
type = STOCHASTIC; type = STOCHASTIC;
else if (pool == "sum")
type = SUM;
else else
CV_Error(Error::StsBadArg, "Unknown pooling type \"" + pool + "\""); CV_Error(Error::StsBadArg, "Unknown pooling type \"" + pool + "\"");
@ -195,7 +197,7 @@ public:
return type == MAX || type == AVE; return type == MAX || type == AVE;
} }
else else
return type != STOCHASTIC; return type != STOCHASTIC && type != SUM;
} }
#endif #endif
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
@ -288,7 +290,7 @@ public:
maxPooling(inputs[0], outputs[0], mask); maxPooling(inputs[0], outputs[0], mask);
break; break;
} }
case AVE: case AVE: case SUM:
CV_Assert_N(inputs.size() == 1, outputs.size() == 1); CV_Assert_N(inputs.size() == 1, outputs.size() == 1);
avePooling(inputs[0], outputs[0]); avePooling(inputs[0], outputs[0]);
break; break;
@ -366,7 +368,7 @@ public:
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs, virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{ {
CV_Assert_N((inputs.size() == 1 && (type == MAX || type == AVE)) || inputs.size() == 2, nodes.size() == inputs.size()); CV_Assert_N((inputs.size() == 1 && (type == MAX || type == AVE || type == SUM)) || inputs.size() == 2, nodes.size() == inputs.size());
auto& ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node; auto& ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
ngraph::op::PadType pad_type = ngraph::op::PadType::EXPLICIT; ngraph::op::PadType pad_type = ngraph::op::PadType::EXPLICIT;
@ -381,6 +383,19 @@ virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inp
exclude_pad, rounding_type, pad_type); exclude_pad, rounding_type, pad_type);
return Ptr<BackendNode>(new InfEngineNgraphNode(ave_pool)); return Ptr<BackendNode>(new InfEngineNgraphNode(ave_pool));
} }
else if (type == SUM) {
ngraph::Shape inpShape = ieInpNode->get_shape();
CV_Assert(inpShape.size() == 2 + kernel_size.size());
std::vector<int64_t> axes;
for (size_t i = 0; i < kernel_size.size(); i++)
{
if (inpShape[2 + i] == kernel_size[i])
axes.push_back(2 + i);
}
auto reduction_axes = std::make_shared<ngraph::op::Constant>(ngraph::element::i64, ngraph::Shape{axes.size()}, axes);
auto reduce_sum = std::make_shared<ngraph::op::v1::ReduceSum>(ieInpNode, reduction_axes, true);
return Ptr<BackendNode>(new InfEngineNgraphNode(reduce_sum));
}
else if (type == MAX) { else if (type == MAX) {
auto max_pool = std::make_shared<ngraph::op::v1::MaxPool>(ieInpNode, ngraph::Strides(strides), auto max_pool = std::make_shared<ngraph::op::v1::MaxPool>(ieInpNode, ngraph::Strides(strides),
ngraph::Shape(pads_begin), ngraph::Shape(pads_end), ngraph::Shape(kernel_size), ngraph::Shape(pads_begin), ngraph::Shape(pads_end), ngraph::Shape(kernel_size),
@ -739,7 +754,7 @@ virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inp
} }
} }
} }
else if (poolingType == AVE) else if (poolingType == AVE || poolingType == SUM)
{ {
for( ; x0 < x1; ++x0) for( ; x0 < x1; ++x0)
{ {
@ -750,7 +765,7 @@ virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inp
xend = min(xend, inp_width); xend = min(xend, inp_width);
float inv_kernel_area = avePoolPaddedArea ? xdelta * ydelta * ddelta : float inv_kernel_area = avePoolPaddedArea ? xdelta * ydelta * ddelta :
((dend - dstart) * (yend - ystart) * (xend - xstart)); ((dend - dstart) * (yend - ystart) * (xend - xstart));
inv_kernel_area = 1.0 / inv_kernel_area; inv_kernel_area = poolingType == AVE ? 1.0 / inv_kernel_area : 1.0;
#if CV_SIMD128 #if CV_SIMD128
if( isPool2D && xstart > 0 && x0 + 7 < x1 && (x0 + 7) * stride_w - pad_l + kernel_w < inp_width ) if( isPool2D && xstart > 0 && x0 + 7 < x1 && (x0 + 7) * stride_w - pad_l + kernel_w < inp_width )
{ {
@ -1095,6 +1110,7 @@ private:
MAX, MAX,
AVE, AVE,
STOCHASTIC, STOCHASTIC,
SUM,
ROI, // RoI pooling, https://arxiv.org/pdf/1504.08083.pdf ROI, // RoI pooling, https://arxiv.org/pdf/1504.08083.pdf
PSROI // Position-sensitive RoI pooling, https://arxiv.org/pdf/1605.06409.pdf PSROI // Position-sensitive RoI pooling, https://arxiv.org/pdf/1605.06409.pdf
}; };

@ -2067,7 +2067,7 @@ void TFImporter::populateNet(Net dstNet)
connect(layer_id, dstNet, parsePin(layer.input(0)), id, 0); connect(layer_id, dstNet, parsePin(layer.input(0)), id, 0);
connect(layer_id, dstNet, parsePin(layer.input(1)), id, 1); connect(layer_id, dstNet, parsePin(layer.input(1)), id, 1);
} }
else if (type == "Mean") else if (type == "Mean" || type == "Sum")
{ {
// Computes the mean of elements across dimensions of a tensor. // Computes the mean of elements across dimensions of a tensor.
// If keepdims is false (default) reduces input_tensor along the dimensions given in axis, // If keepdims is false (default) reduces input_tensor along the dimensions given in axis,
@ -2116,7 +2116,7 @@ void TFImporter::populateNet(Net dstNet)
LayerParams avgLp; LayerParams avgLp;
std::string avgName = name + "/avg"; std::string avgName = name + "/avg";
CV_Assert(layer_id.find(avgName) == layer_id.end()); CV_Assert(layer_id.find(avgName) == layer_id.end());
avgLp.set("pool", "ave"); avgLp.set("pool", type == "Mean" ? "ave" : "sum");
// pooling kernel H x 1 // pooling kernel H x 1
avgLp.set("global_pooling_h", true); avgLp.set("global_pooling_h", true);
avgLp.set("kernel_w", 1); avgLp.set("kernel_w", 1);
@ -2153,11 +2153,44 @@ void TFImporter::populateNet(Net dstNet)
layer_id[name] = id; layer_id[name] = id;
connect(layer_id, dstNet, Pin(avgName), id, 0); connect(layer_id, dstNet, Pin(avgName), id, 0);
connect(layer_id, dstNet, Pin(layerShapeName), id, 1); connect(layer_id, dstNet, Pin(layerShapeName), id, 1);
} else if (indices.total() == 1) {
int axis = toNCHW(indices.at<int>(0));
if (axis == 2 || axis == 3)
{
layerParams.set("pool", type == "Mean" ? "ave" : "sum");
layerParams.set(axis == 2 ? "kernel_w" : "kernel_h", 1);
layerParams.set(axis == 2 ? "global_pooling_h" : "global_pooling_w", true);
int id = dstNet.addLayer(name, "Pooling", layerParams);
layer_id[name] = id;
connect(layer_id, dstNet, parsePin(layer.input(0)), id, 0);
if (!keepDims)
{
// To keep correct order after squeeze dims we first need to change layout from NCHW to NHWC
LayerParams permLP;
int order[] = {0, 2, 3, 1}; // From OpenCV's NCHW to NHWC.
permLP.set("order", DictValue::arrayInt<int*>(order, 4));
std::string permName = name + "/nchw";
CV_Assert(layer_id.find(permName) == layer_id.end());
int permId = dstNet.addLayer(permName, "Permute", permLP);
layer_id[permName] = permId;
connect(layer_id, dstNet, Pin(name), permId, 0);
LayerParams squeezeLp;
std::string squeezeName = name + "/squeeze";
CV_Assert(layer_id.find(squeezeName) == layer_id.end());
squeezeLp.set("axis", indices.at<int>(0));
squeezeLp.set("end_axis", indices.at<int>(0) + 1);
int squeezeId = dstNet.addLayer(squeezeName, "Flatten", squeezeLp);
layer_id[squeezeName] = squeezeId;
connect(layer_id, dstNet, Pin(permName), squeezeId, 0);
}
}
} else { } else {
if (indices.total() != 2 || indices.at<int>(0) != 1 || indices.at<int>(1) != 2) if (indices.total() != 2 || indices.at<int>(0) != 1 || indices.at<int>(1) != 2)
CV_Error(Error::StsNotImplemented, "Unsupported mode of reduce_mean operation."); CV_Error(Error::StsNotImplemented, "Unsupported mode of reduce_mean or reduce_sum operation.");
layerParams.set("pool", "ave"); layerParams.set("pool", type == "Mean" ? "ave" : "sum");
layerParams.set("global_pooling", true); layerParams.set("global_pooling", true);
int id = dstNet.addLayer(name, "Pooling", layerParams); int id = dstNet.addLayer(name, "Pooling", layerParams);
layer_id[name] = id; layer_id[name] = id;

@ -755,6 +755,8 @@ TEST_P(Test_Darknet_layers, connected)
TEST_P(Test_Darknet_layers, relu) TEST_P(Test_Darknet_layers, relu)
{ {
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
testDarknetLayer("relu"); testDarknetLayer("relu");
} }

@ -128,6 +128,13 @@ TEST_P(Test_TensorFlow_layers, reduce_mean)
runTensorFlowNet("global_pool_by_axis"); runTensorFlowNet("global_pool_by_axis");
} }
TEST_P(Test_TensorFlow_layers, reduce_sum)
{
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
runTensorFlowNet("sum_pool_by_axis");
}
TEST_P(Test_TensorFlow_layers, conv_single_conv) TEST_P(Test_TensorFlow_layers, conv_single_conv)
{ {
runTensorFlowNet("single_conv"); runTensorFlowNet("single_conv");
@ -340,6 +347,11 @@ TEST_P(Test_TensorFlow_layers, pooling_reduce_mean)
runTensorFlowNet("reduce_mean"); // an average pooling over all spatial dimensions. runTensorFlowNet("reduce_mean"); // an average pooling over all spatial dimensions.
} }
TEST_P(Test_TensorFlow_layers, pooling_reduce_sum)
{
runTensorFlowNet("reduce_sum"); // a SUM pooling over all spatial dimensions.
}
TEST_P(Test_TensorFlow_layers, max_pool_grad) TEST_P(Test_TensorFlow_layers, max_pool_grad)
{ {
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)

Loading…
Cancel
Save