Merge pull request #25277 from alexlyulkov:al/dnn-int-tests

Added int tests for CumSum, Scatter, Tile and ReduceSum dnn layers #25277

Fixed bug in tile layer.
Fixed bug in reduce layer by reimplementing the layer. 

Fixed types filter in Scatter and ScatterND layers

PR for extra: https://github.com/opencv/opencv_extra/pull/1161


### Pull Request Readiness Checklist

See details at https://github.com/opencv/opencv/wiki/How_to_contribute#making-a-good-pull-request

- [x] I agree to contribute to the project under Apache 2 License.
- [x] To the best of my knowledge, the proposed patch is not based on a code under GPL or another license that is incompatible with OpenCV
- [x] The PR is proposed to the proper branch
- [ ] There is a reference to the original bug report and related work
- [x] There is accuracy test, performance test and test data in opencv_extra repository, if applicable
      Patch to opencv_extra has the same branch name.
- [x] The feature is well documented and sample code can be built with the project CMake
pull/25335/head
alexlyulkov 8 months ago committed by GitHub
parent 87e0246bb0
commit 5144766380
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 2
      modules/dnn/src/layers/scatterND_layer.cpp
  2. 2
      modules/dnn/src/layers/scatter_layer.cpp
  3. 2
      modules/dnn/src/layers/tile_layer.cpp
  4. 3
      modules/dnn/src/onnx/onnx_importer.cpp
  5. 193
      modules/dnn/test/test_int.cpp
  6. 19
      modules/dnn/test/test_onnx_importer.cpp

@ -76,7 +76,7 @@ public:
std::vector<MatType>& internals) const CV_OVERRIDE std::vector<MatType>& internals) const CV_OVERRIDE
{ {
CV_CheckEQ(inputs.size(), (size_t)3, ""); CV_CheckEQ(inputs.size(), (size_t)3, "");
CV_CheckType(inputs[0], inputs[0] == CV_32F || inputs[0] == CV_32S || inputs[0] == CV_16F || inputs[0] == CV_8U, ""); CV_CheckType(inputs[0], inputs[0] == CV_32F || inputs[0] == CV_32S || inputs[0] == CV_64S || inputs[0] == CV_16F || inputs[0] == CV_8U, "");
CV_CheckType(inputs[1], inputs[1] == CV_64S || inputs[1] == CV_32S, ""); CV_CheckType(inputs[1], inputs[1] == CV_64S || inputs[1] == CV_32S, "");
CV_CheckTypeEQ(inputs[2], inputs[0], ""); CV_CheckTypeEQ(inputs[2], inputs[0], "");
outputs.assign(1, inputs[0]); outputs.assign(1, inputs[0]);

@ -70,7 +70,7 @@ public:
std::vector<MatType>& internals) const CV_OVERRIDE std::vector<MatType>& internals) const CV_OVERRIDE
{ {
CV_CheckEQ(inputs.size(), (size_t)3, ""); CV_CheckEQ(inputs.size(), (size_t)3, "");
CV_CheckType(inputs[0], inputs[0] == CV_32F || inputs[0] == CV_32S || inputs[0] == CV_16F || inputs[0] == CV_8U, ""); CV_CheckType(inputs[0], inputs[0] == CV_32F || inputs[0] == CV_32S || inputs[0] == CV_64S || inputs[0] == CV_16F || inputs[0] == CV_8U, "");
CV_CheckType(inputs[1], inputs[1] == CV_64S || inputs[1] == CV_32S, ""); CV_CheckType(inputs[1], inputs[1] == CV_64S || inputs[1] == CV_32S, "");
CV_CheckTypeEQ(inputs[2], inputs[0], ""); CV_CheckTypeEQ(inputs[2], inputs[0], "");
outputs.assign(1, inputs[0]); outputs.assign(1, inputs[0]);

@ -86,8 +86,8 @@ public:
{ {
tmp = tmp.reshape(0, dims); tmp = tmp.reshape(0, dims);
tmp = cv::repeat(tmp, 1, rep_i); tmp = cv::repeat(tmp, 1, rep_i);
dims *= out_shape[i];
} }
dims *= out_shape[i];
} }
tmp = tmp.reshape(0, out_shape); tmp = tmp.reshape(0, out_shape);

@ -1194,6 +1194,9 @@ void ONNXImporter::parseReduce(LayerParams& layerParams, const opencv_onnx::Node
int num_inputs = node_proto.input_size(); int num_inputs = node_proto.input_size();
CV_Check(num_inputs, num_inputs >= 1 && num_inputs <= 2, "DNN/ONNX: Reduce layers should have at least one input and at most two inputs"); CV_Check(num_inputs, num_inputs >= 1 && num_inputs <= 2, "DNN/ONNX: Reduce layers should have at least one input and at most two inputs");
if (num_inputs >= 2)
CV_CheckTrue(constBlobs.find(node_proto.input(1)) != constBlobs.end(), "Reduce layer doesn't support non contant axes");
// "axes" is turned to one of the inputs since opset 18, // "axes" is turned to one of the inputs since opset 18,
// except for ReduceSum, which has "axes" input since opset 13. // except for ReduceSum, which has "axes" input since opset 13.
if (!layerParams.has("axes") && num_inputs == 2 && constBlobs.find(node_proto.input(1)) != constBlobs.end()) { if (!layerParams.has("axes") && num_inputs == 2 && constBlobs.find(node_proto.input(1)) != constBlobs.end()) {

@ -496,4 +496,197 @@ INSTANTIATE_TEST_CASE_P(/**/, Test_Flatten_Int, Combine(
dnnBackendsAndTargets() dnnBackendsAndTargets()
)); ));
typedef testing::TestWithParam<tuple<int, tuple<Backend, Target> > > Test_Tile_Int;
TEST_P(Test_Tile_Int, random)
{
int matType = get<0>(GetParam());
tuple<Backend, Target> backend_target= get<1>(GetParam());
Backend backend = get<0>(backend_target);
Target target = get<1>(backend_target);
std::vector<int> inShape{2, 3, 4, 5};
int64_t low = matType == CV_64S ? 1000000000000000ll : 1000000000;
Mat input(inShape, matType);
cv::randu(input, low, low + 100);
std::vector<int> repeats{1, 1, 2, 3};
Net net;
LayerParams lp;
lp.type = "Tile";
lp.name = "testLayer";
lp.set("repeats", DictValue::arrayInt<int*>(repeats.data(), repeats.size()));
net.addLayerToPrev(lp.name, lp.type, lp);
net.setInput(input);
net.setPreferableBackend(backend);
net.setPreferableTarget(target);
Mat re;
re = net.forward();
EXPECT_EQ(re.depth(), matType);
EXPECT_EQ(re.size.dims(), 4);
EXPECT_EQ(re.size[0], inShape[0] * repeats[0]);
EXPECT_EQ(re.size[1], inShape[1] * repeats[1]);
EXPECT_EQ(re.size[2], inShape[2] * repeats[2]);
EXPECT_EQ(re.size[3], inShape[3] * repeats[3]);
std::vector<int> inIndices(4);
std::vector<int> reIndices(4);
for (int i0 = 0; i0 < re.size[0]; ++i0)
{
inIndices[0] = i0 % inShape[0];
reIndices[0] = i0;
for (int i1 = 0; i1 < re.size[1]; ++i1)
{
inIndices[1] = i1 % inShape[1];
reIndices[1] = i1;
for (int i2 = 0; i2 < re.size[2]; ++i2)
{
inIndices[2] = i2 % inShape[2];
reIndices[2] = i2;
for (int i3 = 0; i3 < re.size[3]; ++i3)
{
inIndices[3] = i3 % inShape[3];
reIndices[3] = i3;
EXPECT_EQ(getValueAt(re, reIndices.data()), getValueAt(input, inIndices.data()));
}
}
}
}
}
INSTANTIATE_TEST_CASE_P(/**/, Test_Tile_Int, Combine(
testing::Values(CV_32S, CV_64S),
dnnBackendsAndTargets()
));
typedef testing::TestWithParam<tuple<int, tuple<Backend, Target> > > Test_Reduce_Int;
TEST_P(Test_Reduce_Int, random)
{
int matType = get<0>(GetParam());
tuple<Backend, Target> backend_target= get<1>(GetParam());
Backend backend = get<0>(backend_target);
Target target = get<1>(backend_target);
std::vector<int> inShape{5, 4, 3, 2};
int64_t low = matType == CV_64S ? 1000000000000000ll : 100000000;
Mat input(inShape, matType);
cv::randu(input, low, low + 100);
std::vector<int> axes{1};
Net net;
LayerParams lp;
lp.type = "Reduce";
lp.name = "testLayer";
lp.set("reduce", "SUM");
lp.set("keepdims", false);
lp.set("axes", DictValue::arrayInt<int*>(axes.data(), axes.size()));
net.addLayerToPrev(lp.name, lp.type, lp);
net.setInput(input);
net.setPreferableBackend(backend);
net.setPreferableTarget(target);
Mat re;
re = net.forward();
EXPECT_EQ(re.depth(), matType);
EXPECT_EQ(re.size.dims(), 3);
EXPECT_EQ(re.size[0], inShape[0]);
EXPECT_EQ(re.size[1], inShape[2]);
EXPECT_EQ(re.size[2], inShape[3]);
std::vector<int> inIndices(4);
std::vector<int> reIndices(3);
for (int i0 = 0; i0 < re.size[0]; ++i0)
{
inIndices[0] = i0;
reIndices[0] = i0;
for (int i1 = 0; i1 < re.size[1]; ++i1)
{
inIndices[2] = i1;
reIndices[1] = i1;
for (int i2 = 0; i2 < re.size[2]; ++i2)
{
inIndices[3] = i2;
reIndices[2] = i2;
int64_t value = 0;
for (int j = 0; j < input.size[1]; ++j)
{
inIndices[1] = j;
value += getValueAt(input, inIndices.data());
}
EXPECT_EQ(getValueAt(re, reIndices.data()), value);
}
}
}
}
typedef testing::TestWithParam<tuple<int, tuple<Backend, Target> > > Test_Reduce_Int;
TEST_P(Test_Reduce_Int, two_axes)
{
int matType = get<0>(GetParam());
tuple<Backend, Target> backend_target= get<1>(GetParam());
Backend backend = get<0>(backend_target);
Target target = get<1>(backend_target);
std::vector<int> inShape{5, 4, 3, 2};
int64_t low = matType == CV_64S ? 100000000000000ll : 10000000;
Mat input(inShape, matType);
cv::randu(input, low, low + 100);
std::vector<int> axes{1, 3};
Net net;
LayerParams lp;
lp.type = "Reduce";
lp.name = "testLayer";
lp.set("reduce", "SUM");
lp.set("keepdims", false);
lp.set("axes", DictValue::arrayInt<int*>(axes.data(), axes.size()));
net.addLayerToPrev(lp.name, lp.type, lp);
net.setInput(input);
net.setPreferableBackend(backend);
net.setPreferableTarget(target);
Mat re;
re = net.forward();
EXPECT_EQ(re.depth(), matType);
EXPECT_EQ(re.size.dims(), 2);
EXPECT_EQ(re.size[0], inShape[0]);
EXPECT_EQ(re.size[1], inShape[2]);
std::vector<int> inIndices(4);
std::vector<int> reIndices(2);
for (int i0 = 0; i0 < re.size[0]; ++i0)
{
inIndices[0] = i0;
reIndices[0] = i0;
for (int i1 = 0; i1 < re.size[1]; ++i1)
{
inIndices[2] = i1;
reIndices[1] = i1;
int64_t value = 0;
for (int i2 = 0; i2 < input.size[3]; ++i2)
{
inIndices[3] = i2;
for (int j = 0; j < input.size[1]; ++j)
{
inIndices[1] = j;
value += getValueAt(input, inIndices.data());
}
}
EXPECT_EQ(getValueAt(re, reIndices.data()), value);
}
}
}
INSTANTIATE_TEST_CASE_P(/**/, Test_Reduce_Int, Combine(
testing::Values(CV_32S, CV_64S),
dnnBackendsAndTargets()
));
}} // namespace }} // namespace

@ -2630,6 +2630,25 @@ TEST_P(Test_ONNX_layers, CumSum)
testONNXModels("cumsum_1d_exclusive_1_reverse"); testONNXModels("cumsum_1d_exclusive_1_reverse");
testONNXModels("cumsum_2d_dim_1"); testONNXModels("cumsum_2d_dim_1");
testONNXModels("cumsum_3d_dim_2"); testONNXModels("cumsum_3d_dim_2");
testONNXModels("cumsum_3d_dim_2_int32");
testONNXModels("cumsum_3d_dim_2_int64");
}
TEST_P(Test_ONNX_layers, ReduceSumInt)
{
testONNXModels("reduce_sum_int64");
}
TEST_P(Test_ONNX_layers, ScatterInt)
{
testONNXModels("scatter_int32", npy, 0, 0, false, true, 3);
testONNXModels("scatter_int64", npy, 0, 0, false, true, 3);
}
TEST_P(Test_ONNX_layers, TileInt)
{
testONNXModels("tile_int32");
testONNXModels("tile_int64");
} }
static void testYOLO(const std::string& weightPath, const std::vector<int>& refClassIds, static void testYOLO(const std::string& weightPath, const std::vector<int>& refClassIds,

Loading…
Cancel
Save