support ReduceLayer without reshape layer.

pull/22199/head
Zihao Mu 2 years ago
parent 98c33c605d
commit d4640f4647
  1. 3
      modules/dnn/include/opencv2/dnn/all_layers.hpp
  2. 29
      modules/dnn/src/int8layers/reduce_layer.cpp
  3. 28
      modules/dnn/src/layers/reduce_layer.cpp
  4. 28
      modules/dnn/src/onnx/onnx_importer.cpp
  5. 1
      modules/dnn/test/test_onnx_importer.cpp

@ -334,7 +334,8 @@ CV__DNN_INLINE_NS_BEGIN
{ {
public: public:
int reduceType; int reduceType;
std::vector<size_t> reduceDims; // reduceDims contains the dimensions that need to be reduced, targetDims is the target output dimension.
std::vector<size_t> reduceDims, targetDims;
static Ptr<ReduceLayer> create(const LayerParams& params); static Ptr<ReduceLayer> create(const LayerParams& params);
}; };

@ -38,6 +38,15 @@ public:
{ {
reduceDims[i] = tempDims.get<int>(i); reduceDims[i] = tempDims.get<int>(i);
} }
CV_Assert(params.has("target_dims"));
tempDims = params.get("target_dims");
n = tempDims.size();
targetDims.resize(n);
for (i = 0; i < n; i++)
{
targetDims[i] = tempDims.get<int>(i);
}
} }
virtual bool supportBackend(int backendId) CV_OVERRIDE virtual bool supportBackend(int backendId) CV_OVERRIDE
@ -161,18 +170,30 @@ public:
std::vector<MatShape> &internals) const CV_OVERRIDE std::vector<MatShape> &internals) const CV_OVERRIDE
{ {
CV_Assert(inputs.size() > 0); CV_Assert(inputs.size() > 0);
CV_Assert(reduceDims.size() != 0 && inputs[0].size() >= reduceDims.size()); CV_Assert( reduceDims.size() !=0 && targetDims.size() != 0 && inputs[0].size() >= reduceDims.size());
std::vector<int> outShape; // outShapeTmp can save the right number of `total(outShapeTmp)`. And the outShape is used as the final output shape.
std::vector<int> outShapeTmp, outShape;
outShape.assign(targetDims.begin(), targetDims.end());
if (inputs[0].size() == reduceDims.size()) if (inputs[0].size() == reduceDims.size())
outShape.push_back(1); outShapeTmp.push_back(1);
else else
{ {
for (int i = 0; i < inputs[0].size() - reduceDims.size(); i++) for (int i = 0; i < inputs[0].size() - reduceDims.size(); i++)
{ {
outShape.push_back(inputs[0][i]); outShapeTmp.push_back(inputs[0][i]);
} }
} }
// Support dynamic shape of Batch size.
// Note that: when there are multiple dynamic inputs, we will give an error.
if (total(outShape) != total(outShapeTmp))
{
if (outShape[0] != outShapeTmp[0])
outShape[0] = outShapeTmp[0];
}
CV_Assert(total(outShape) == total(outShapeTmp));
outputs.assign(1, outShape); outputs.assign(1, outShape);
return false; return false;

@ -61,6 +61,15 @@ public:
{ {
reduceDims[i] = tempDims.get<int>(i); reduceDims[i] = tempDims.get<int>(i);
} }
CV_Assert(params.has("target_dims"));
tempDims = params.get("target_dims");
n = tempDims.size();
targetDims.resize(n);
for (i = 0; i < n; i++)
{
targetDims[i] = tempDims.get<int>(i);
}
} }
virtual bool supportBackend(int backendId) CV_OVERRIDE virtual bool supportBackend(int backendId) CV_OVERRIDE
@ -325,18 +334,29 @@ public:
std::vector<MatShape> &internals) const CV_OVERRIDE std::vector<MatShape> &internals) const CV_OVERRIDE
{ {
CV_Assert(inputs.size() > 0); CV_Assert(inputs.size() > 0);
CV_Assert(reduceDims.size() != 0 && inputs[0].size() >= reduceDims.size()); CV_Assert( reduceDims.size() !=0 && targetDims.size() != 0 && inputs[0].size() >= reduceDims.size());
std::vector<int> outShape; // outShapeTmp can save the right number of `total(outShapeTmp)`. And the outShape is used as the final output shape.
std::vector<int> outShapeTmp, outShape;
outShape.assign(targetDims.begin(), targetDims.end());
if (inputs[0].size() == reduceDims.size()) if (inputs[0].size() == reduceDims.size())
outShape.push_back(1); outShapeTmp.push_back(1);
else else
{ {
for (int i = 0; i < inputs[0].size() - reduceDims.size(); i++) for (int i = 0; i < inputs[0].size() - reduceDims.size(); i++)
{ {
outShape.push_back(inputs[0][i]); outShapeTmp.push_back(inputs[0][i]);
} }
} }
// Support dynamic shape of Batch size.
// Note that: when there are multiple dynamic inputs, we will give an error.
if (total(outShape) != total(outShapeTmp) && outShape[0] != outShapeTmp[0])
{
outShape[0] = outShapeTmp[0];
}
CV_Assert(total(outShape) == total(outShapeTmp));
outputs.assign(1, outShape); outputs.assign(1, outShape);
return false; return false;

@ -1191,7 +1191,7 @@ void ONNXImporter::parseReduce(LayerParams& layerParams, const opencv_onnx::Node
int axesNum = axesMat.total(); int axesNum = axesMat.total();
for (int i = 0; i < axesNum; i++) for (int i = 0; i < axesNum; i++)
{ {
int axis = normalize_axis(static_cast<int>(axesMat.at<float>(i)), inpShape.size()); int axis = normalize_axis(axesMat.at<int>(i), inpShape.size());
shouldDelete[axis] = true; shouldDelete[axis] = true;
} }
} }
@ -1220,7 +1220,7 @@ void ONNXImporter::parseReduce(LayerParams& layerParams, const opencv_onnx::Node
} }
} }
MatShape targetShape; std::vector<int> targetShape;
for (int i = 0; i < inpShape.size(); ++i) for (int i = 0; i < inpShape.size(); ++i)
{ {
if (!shouldDelete[i]) if (!shouldDelete[i])
@ -1290,30 +1290,10 @@ void ONNXImporter::parseReduce(LayerParams& layerParams, const opencv_onnx::Node
} }
} }
LayerParams reduceLp = layerParams; layerParams.set("deleted_dims", DictValue::arrayInt(&deletedDims[0], deletedDims.size()));
reduceLp.name = layerParams.name + "/reduce"; layerParams.set("target_dims", DictValue::arrayInt(&targetShape[0], targetShape.size()));
CV_Assert(layer_id.find(reduceLp.name) == layer_id.end());
reduceLp.set("deleted_dims", DictValue::arrayInt(&deletedDims[0], deletedDims.size()));
node_proto.set_input(0, inputString); node_proto.set_input(0, inputString);
node_proto.set_output(0, reduceLp.name);
addLayer(reduceLp, node_proto);
layerParams.type = (depth == CV_8S) ? "ReshapeInt8" : "Reshape";
layerParams.set("dim", DictValue::arrayInt(&targetShape[0], targetShape.size()));
// Set batchsize dim as dynamic to be compatible with batch size >= 2.
if (targetShape.size() > 1)
{
std::vector<int> dynamicAxes = {0}; // The index of batchsize dim is 0.
std::vector<int> inputIndices = {0};
layerParams.set("has_dynamic_shapes", true);
layerParams.set("dynamic_axes", DictValue::arrayInt(dynamicAxes.data(), dynamicAxes.size()));
layerParams.set("input_indices", DictValue::arrayInt(inputIndices.data(), inputIndices.size()));
}
node_proto.set_input(0, node_proto.output(0));
node_proto.set_output(0, output_name); node_proto.set_output(0, output_name);
addLayer(layerParams, node_proto); addLayer(layerParams, node_proto);

@ -411,7 +411,6 @@ TEST_P(Test_ONNX_layers, ReduceMean)
TEST_P(Test_ONNX_layers, ReduceSum) TEST_P(Test_ONNX_layers, ReduceSum)
{ {
testONNXModels("reduce_sum"); testONNXModels("reduce_sum");
testONNXModels("reduce_sum_axis");
testONNXModels("reduce_sum_axis_dynamic_batch"); testONNXModels("reduce_sum_axis_dynamic_batch");
} }

Loading…
Cancel
Save