From 51447663802debc5054c7af346888d22acc1b12f Mon Sep 17 00:00:00 2001 From: alexlyulkov Date: Thu, 4 Apr 2024 14:23:48 +0300 Subject: [PATCH] Merge pull request #25277 from alexlyulkov:al/dnn-int-tests Added int tests for CumSum, Scatter, Tile and ReduceSum dnn layers #25277 Fixed bug in tile layer. Fixed bug in reduce layer by reimplementing the layer. Fixed types filter in Scatter and ScatterND layers PR for extra: https://github.com/opencv/opencv_extra/pull/1161 ### Pull Request Readiness Checklist See details at https://github.com/opencv/opencv/wiki/How_to_contribute#making-a-good-pull-request - [x] I agree to contribute to the project under Apache 2 License. - [x] To the best of my knowledge, the proposed patch is not based on a code under GPL or another license that is incompatible with OpenCV - [x] The PR is proposed to the proper branch - [ ] There is a reference to the original bug report and related work - [x] There is accuracy test, performance test and test data in opencv_extra repository, if applicable Patch to opencv_extra has the same branch name. - [x] The feature is well documented and sample code can be built with the project CMake --- modules/dnn/src/layers/scatterND_layer.cpp | 2 +- modules/dnn/src/layers/scatter_layer.cpp | 2 +- modules/dnn/src/layers/tile_layer.cpp | 2 +- modules/dnn/src/onnx/onnx_importer.cpp | 3 + modules/dnn/test/test_int.cpp | 193 +++++++++++++++++++++ modules/dnn/test/test_onnx_importer.cpp | 19 ++ 6 files changed, 218 insertions(+), 3 deletions(-) diff --git a/modules/dnn/src/layers/scatterND_layer.cpp b/modules/dnn/src/layers/scatterND_layer.cpp index b0d26938b4..41c1a85a01 100644 --- a/modules/dnn/src/layers/scatterND_layer.cpp +++ b/modules/dnn/src/layers/scatterND_layer.cpp @@ -76,7 +76,7 @@ public: std::vector& internals) const CV_OVERRIDE { CV_CheckEQ(inputs.size(), (size_t)3, ""); - CV_CheckType(inputs[0], inputs[0] == CV_32F || inputs[0] == CV_32S || inputs[0] == CV_16F || inputs[0] == CV_8U, ""); + CV_CheckType(inputs[0], inputs[0] == CV_32F || inputs[0] == CV_32S || inputs[0] == CV_64S || inputs[0] == CV_16F || inputs[0] == CV_8U, ""); CV_CheckType(inputs[1], inputs[1] == CV_64S || inputs[1] == CV_32S, ""); CV_CheckTypeEQ(inputs[2], inputs[0], ""); outputs.assign(1, inputs[0]); diff --git a/modules/dnn/src/layers/scatter_layer.cpp b/modules/dnn/src/layers/scatter_layer.cpp index 48757c6332..4f2d36e0a1 100644 --- a/modules/dnn/src/layers/scatter_layer.cpp +++ b/modules/dnn/src/layers/scatter_layer.cpp @@ -70,7 +70,7 @@ public: std::vector& internals) const CV_OVERRIDE { CV_CheckEQ(inputs.size(), (size_t)3, ""); - CV_CheckType(inputs[0], inputs[0] == CV_32F || inputs[0] == CV_32S || inputs[0] == CV_16F || inputs[0] == CV_8U, ""); + CV_CheckType(inputs[0], inputs[0] == CV_32F || inputs[0] == CV_32S || inputs[0] == CV_64S || inputs[0] == CV_16F || inputs[0] == CV_8U, ""); CV_CheckType(inputs[1], inputs[1] == CV_64S || inputs[1] == CV_32S, ""); CV_CheckTypeEQ(inputs[2], inputs[0], ""); outputs.assign(1, inputs[0]); diff --git a/modules/dnn/src/layers/tile_layer.cpp b/modules/dnn/src/layers/tile_layer.cpp index 72a59e62c1..09d4511a8d 100644 --- a/modules/dnn/src/layers/tile_layer.cpp +++ b/modules/dnn/src/layers/tile_layer.cpp @@ -86,8 +86,8 @@ public: { tmp = tmp.reshape(0, dims); tmp = cv::repeat(tmp, 1, rep_i); - dims *= out_shape[i]; } + dims *= out_shape[i]; } tmp = tmp.reshape(0, out_shape); diff --git a/modules/dnn/src/onnx/onnx_importer.cpp b/modules/dnn/src/onnx/onnx_importer.cpp index 7fffdc0bcd..eb2d1b3ea5 100644 --- a/modules/dnn/src/onnx/onnx_importer.cpp +++ b/modules/dnn/src/onnx/onnx_importer.cpp @@ -1194,6 +1194,9 @@ void ONNXImporter::parseReduce(LayerParams& layerParams, const opencv_onnx::Node int num_inputs = node_proto.input_size(); CV_Check(num_inputs, num_inputs >= 1 && num_inputs <= 2, "DNN/ONNX: Reduce layers should have at least one input and at most two inputs"); + if (num_inputs >= 2) + CV_CheckTrue(constBlobs.find(node_proto.input(1)) != constBlobs.end(), "Reduce layer doesn't support non contant axes"); + // "axes" is turned to one of the inputs since opset 18, // except for ReduceSum, which has "axes" input since opset 13. if (!layerParams.has("axes") && num_inputs == 2 && constBlobs.find(node_proto.input(1)) != constBlobs.end()) { diff --git a/modules/dnn/test/test_int.cpp b/modules/dnn/test/test_int.cpp index 4ec6ae4ec2..397dee59d8 100644 --- a/modules/dnn/test/test_int.cpp +++ b/modules/dnn/test/test_int.cpp @@ -496,4 +496,197 @@ INSTANTIATE_TEST_CASE_P(/**/, Test_Flatten_Int, Combine( dnnBackendsAndTargets() )); +typedef testing::TestWithParam > > Test_Tile_Int; +TEST_P(Test_Tile_Int, random) +{ + int matType = get<0>(GetParam()); + tuple backend_target= get<1>(GetParam()); + Backend backend = get<0>(backend_target); + Target target = get<1>(backend_target); + + std::vector inShape{2, 3, 4, 5}; + int64_t low = matType == CV_64S ? 1000000000000000ll : 1000000000; + Mat input(inShape, matType); + cv::randu(input, low, low + 100); + std::vector repeats{1, 1, 2, 3}; + + Net net; + LayerParams lp; + lp.type = "Tile"; + lp.name = "testLayer"; + lp.set("repeats", DictValue::arrayInt(repeats.data(), repeats.size())); + net.addLayerToPrev(lp.name, lp.type, lp); + + net.setInput(input); + net.setPreferableBackend(backend); + net.setPreferableTarget(target); + + Mat re; + re = net.forward(); + EXPECT_EQ(re.depth(), matType); + EXPECT_EQ(re.size.dims(), 4); + EXPECT_EQ(re.size[0], inShape[0] * repeats[0]); + EXPECT_EQ(re.size[1], inShape[1] * repeats[1]); + EXPECT_EQ(re.size[2], inShape[2] * repeats[2]); + EXPECT_EQ(re.size[3], inShape[3] * repeats[3]); + + std::vector inIndices(4); + std::vector reIndices(4); + for (int i0 = 0; i0 < re.size[0]; ++i0) + { + inIndices[0] = i0 % inShape[0]; + reIndices[0] = i0; + for (int i1 = 0; i1 < re.size[1]; ++i1) + { + inIndices[1] = i1 % inShape[1]; + reIndices[1] = i1; + for (int i2 = 0; i2 < re.size[2]; ++i2) + { + inIndices[2] = i2 % inShape[2]; + reIndices[2] = i2; + for (int i3 = 0; i3 < re.size[3]; ++i3) + { + inIndices[3] = i3 % inShape[3]; + reIndices[3] = i3; + EXPECT_EQ(getValueAt(re, reIndices.data()), getValueAt(input, inIndices.data())); + } + } + } + } +} + +INSTANTIATE_TEST_CASE_P(/**/, Test_Tile_Int, Combine( + testing::Values(CV_32S, CV_64S), + dnnBackendsAndTargets() +)); + +typedef testing::TestWithParam > > Test_Reduce_Int; +TEST_P(Test_Reduce_Int, random) +{ + int matType = get<0>(GetParam()); + tuple backend_target= get<1>(GetParam()); + Backend backend = get<0>(backend_target); + Target target = get<1>(backend_target); + + std::vector inShape{5, 4, 3, 2}; + int64_t low = matType == CV_64S ? 1000000000000000ll : 100000000; + Mat input(inShape, matType); + cv::randu(input, low, low + 100); + std::vector axes{1}; + + Net net; + LayerParams lp; + lp.type = "Reduce"; + lp.name = "testLayer"; + lp.set("reduce", "SUM"); + lp.set("keepdims", false); + lp.set("axes", DictValue::arrayInt(axes.data(), axes.size())); + net.addLayerToPrev(lp.name, lp.type, lp); + + net.setInput(input); + net.setPreferableBackend(backend); + net.setPreferableTarget(target); + + Mat re; + re = net.forward(); + EXPECT_EQ(re.depth(), matType); + EXPECT_EQ(re.size.dims(), 3); + EXPECT_EQ(re.size[0], inShape[0]); + EXPECT_EQ(re.size[1], inShape[2]); + EXPECT_EQ(re.size[2], inShape[3]); + + std::vector inIndices(4); + std::vector reIndices(3); + + for (int i0 = 0; i0 < re.size[0]; ++i0) + { + inIndices[0] = i0; + reIndices[0] = i0; + for (int i1 = 0; i1 < re.size[1]; ++i1) + { + inIndices[2] = i1; + reIndices[1] = i1; + for (int i2 = 0; i2 < re.size[2]; ++i2) + { + inIndices[3] = i2; + reIndices[2] = i2; + + int64_t value = 0; + for (int j = 0; j < input.size[1]; ++j) + { + inIndices[1] = j; + value += getValueAt(input, inIndices.data()); + } + EXPECT_EQ(getValueAt(re, reIndices.data()), value); + } + } + } +} + +typedef testing::TestWithParam > > Test_Reduce_Int; +TEST_P(Test_Reduce_Int, two_axes) +{ + int matType = get<0>(GetParam()); + tuple backend_target= get<1>(GetParam()); + Backend backend = get<0>(backend_target); + Target target = get<1>(backend_target); + + std::vector inShape{5, 4, 3, 2}; + int64_t low = matType == CV_64S ? 100000000000000ll : 10000000; + Mat input(inShape, matType); + cv::randu(input, low, low + 100); + std::vector axes{1, 3}; + + Net net; + LayerParams lp; + lp.type = "Reduce"; + lp.name = "testLayer"; + lp.set("reduce", "SUM"); + lp.set("keepdims", false); + lp.set("axes", DictValue::arrayInt(axes.data(), axes.size())); + net.addLayerToPrev(lp.name, lp.type, lp); + + net.setInput(input); + net.setPreferableBackend(backend); + net.setPreferableTarget(target); + + Mat re; + re = net.forward(); + EXPECT_EQ(re.depth(), matType); + EXPECT_EQ(re.size.dims(), 2); + EXPECT_EQ(re.size[0], inShape[0]); + EXPECT_EQ(re.size[1], inShape[2]); + + std::vector inIndices(4); + std::vector reIndices(2); + + for (int i0 = 0; i0 < re.size[0]; ++i0) + { + inIndices[0] = i0; + reIndices[0] = i0; + for (int i1 = 0; i1 < re.size[1]; ++i1) + { + inIndices[2] = i1; + reIndices[1] = i1; + int64_t value = 0; + for (int i2 = 0; i2 < input.size[3]; ++i2) + { + inIndices[3] = i2; + + for (int j = 0; j < input.size[1]; ++j) + { + inIndices[1] = j; + value += getValueAt(input, inIndices.data()); + } + } + EXPECT_EQ(getValueAt(re, reIndices.data()), value); + } + } +} + +INSTANTIATE_TEST_CASE_P(/**/, Test_Reduce_Int, Combine( + testing::Values(CV_32S, CV_64S), + dnnBackendsAndTargets() +)); + }} // namespace diff --git a/modules/dnn/test/test_onnx_importer.cpp b/modules/dnn/test/test_onnx_importer.cpp index 8413de8ad9..c1127a9d0d 100644 --- a/modules/dnn/test/test_onnx_importer.cpp +++ b/modules/dnn/test/test_onnx_importer.cpp @@ -2630,6 +2630,25 @@ TEST_P(Test_ONNX_layers, CumSum) testONNXModels("cumsum_1d_exclusive_1_reverse"); testONNXModels("cumsum_2d_dim_1"); testONNXModels("cumsum_3d_dim_2"); + testONNXModels("cumsum_3d_dim_2_int32"); + testONNXModels("cumsum_3d_dim_2_int64"); +} + +TEST_P(Test_ONNX_layers, ReduceSumInt) +{ + testONNXModels("reduce_sum_int64"); +} + +TEST_P(Test_ONNX_layers, ScatterInt) +{ + testONNXModels("scatter_int32", npy, 0, 0, false, true, 3); + testONNXModels("scatter_int64", npy, 0, 0, false, true, 3); +} + +TEST_P(Test_ONNX_layers, TileInt) +{ + testONNXModels("tile_int32"); + testONNXModels("tile_int64"); } static void testYOLO(const std::string& weightPath, const std::vector& refClassIds,