Merge pull request #25120 from LaurentBerger:I25103

Fixed ReduceMean layer behaviour #25120

### Pull Request Readiness Checklist

See details at https://github.com/opencv/opencv/wiki/How_to_contribute#making-a-good-pull-request

- [x] I agree to contribute to the project under Apache 2 License.
- [x] To the best of my knowledge, the proposed patch is not based on a code under GPL or another license that is incompatible with OpenCV
- [x] The PR is proposed to the proper branch
- [x] There is a reference to the original bug report and related work
- [] There is accuracy test, performance test and test data in opencv_extra repository, if applicable
      Patch to opencv_extra has the same branch name.
- [ ] The feature is well documented and sample code can be built with the project CMake

a93c31e3c9/onnxruntime/core/providers/cpu/reduction/reduction_ops.cc (L433-L443)
pull/25149/head
Laurent Berger 11 months ago committed by GitHub
parent 500c55a808
commit 5fe3933346
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 5
      modules/dnn/src/layers/reduce_layer.cpp
  2. 44
      modules/dnn/test/test_layers.cpp

@ -380,9 +380,10 @@ public:
if (unprojected_indices[j] < shape_src[unreduced_axes[j]]) {
break;
}
unprojected_indices[j] = 0;
unprojected_indices[j] -= shape_src[unreduced_axes[j]];
current_step -= shape_src[unreduced_axes[j]] * steps_src[unreduced_axes[j]];
++unprojected_indices[j - 1];
current_step = steps_src[unreduced_axes[j - 1]];
current_step += steps_src[unreduced_axes[j - 1]];
}
}
}

@ -1795,6 +1795,50 @@ INSTANTIATE_TEST_CASE_P(/**/, Layer_Test_ShuffleChannel, Combine(
/*group*/ Values(1, 2, 3, 6), dnnBackendsAndTargets(/*with IE*/ false)
));
TEST(Layer_Test_ReduceMean, accuracy_input_0)
{
vector<int> szData = { 2, 1, 2, 1 ,2 };
std::vector<float> initData = { 0, 1, 2, 3, 4, 5, 6, 7 };
Mat inpInitA(szData, CV_32FC1, Mat(initData).data);
std::vector<float> resAxes0 = { 2, 3, 4, 5 };
std::vector<float> resAxes1 = { 0, 1, 2, 3, 4, 5, 6, 7 };
std::vector<float> resAxes2 = { 1, 2, 5, 6 };
std::vector<float> resAxes3 = { 0, 1, 2, 3, 4, 5, 6, 7 };
std::vector<float> resAxes4 = { 0.5, 2.5, 4.5, 6.5 };
std::vector < vector<float>> resReduceMean = { resAxes0, resAxes1, resAxes2, resAxes3, resAxes4 };
for (int i = 0; i < resReduceMean.size(); i++)
{
Net net;
LayerParams lp;
lp.set("keepdims", 0);
lp.type = "Reduce";
lp.set("reduce", "MEAN");
lp.name = "testReduceMean";
lp.set("axes", i);
lp.blobs.push_back(inpInitA);
net.addLayerToPrev(lp.name, lp.type, lp);
net.setInput(inpInitA);
net.setPreferableBackend(DNN_BACKEND_OPENCV);
Mat output = net.forward();
MatShape gt_shape;
for (int j = 0; j < szData.size(); j++)
{
if (i == j) continue;
gt_shape.push_back(szData[j]);
}
EXPECT_EQ(gt_shape, shape(output));
Mat a = output.reshape(1, output.total());
normAssert(a, Mat(resReduceMean[i]));
}
}
// Check if relu is not fused to convolution if we requested it's output
TEST(Layer_Test_Convolution, relu_fusion)
{

Loading…
Cancel
Save