Merge pull request #25420 from Abdurrahheem:ash/01D-batchnorm

0/1D test for BatchNorm layer #25420

This PR introduces support for 0/1D inputs in `BatchNorm` layer.

### Pull Request Readiness Checklist

See details at https://github.com/opencv/opencv/wiki/How_to_contribute#making-a-good-pull-request

- [x] I agree to contribute to the project under Apache 2 License.
- [x] To the best of my knowledge, the proposed patch is not based on a code under GPL or another license that is incompatible with OpenCV
- [x] The PR is proposed to the proper branch
- [x] There is a reference to the original bug report and related work
- [x] There is accuracy test, performance test and test data in opencv_extra repository, if applicable
      Patch to opencv_extra has the same branch name.
- [x] The feature is well documented and sample code can be built with the project CMake
pull/25480/head
Abduragim Shtanchaev 8 months ago committed by GitHub
parent 4f81d78c39
commit f08933b051
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 15
      modules/dnn/src/layers/batch_norm_layer.cpp
  2. 49
      modules/dnn/test/test_layers_1d.cpp

@ -163,6 +163,11 @@ public:
std::vector<MatShape> &outputs,
std::vector<MatShape> &internals) const CV_OVERRIDE
{
if (inputs[0].empty()) { // Support for 0D input
outputs.push_back(MatShape()); // Output is also a scalar.
return true;
}
dims = inputs[0].size();
if (!useGlobalStats && inputs[0][0] != 1)
CV_Error(Error::StsNotImplemented, "Batch normalization in training mode with batch size > 1");
@ -272,6 +277,15 @@ public:
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
if (inputs[0].dims <= 1) { // Handling for 0D and 1D
Mat &inpBlob = inputs[0];
Mat &outBlob = outputs[0];
CV_Assert(inpBlob.total() == weights_.total());
cv::multiply(inpBlob, weights_, outBlob);
cv::add(outBlob, bias_, outBlob);
return;
}
CV_Assert(blobs.size() >= 2);
CV_Assert(inputs.size() == 1);
@ -284,7 +298,6 @@ public:
for (size_t ii = 0; ii < outputs.size(); ii++)
{
Mat &outBlob = outputs[ii];
for(int num = 0; num < outBlob.size[0]; num++)
{
for (int n = 0; n < outBlob.size[1]; n++)

@ -603,6 +603,55 @@ INSTANTIATE_TEST_CASE_P(/*nothting*/, Layer_FullyConnected_Test,
std::vector<int>({4})
));
typedef testing::TestWithParam<std::vector<int>> Layer_BatchNorm_Test;
TEST_P(Layer_BatchNorm_Test, Accuracy_01D)
{
std::vector<int> input_shape = GetParam();
// Layer parameters
LayerParams lp;
lp.type = "BatchNorm";
lp.name = "BatchNormLayer";
lp.set("has_weight", false);
lp.set("has_bias", false);
RNG& rng = TS::ptr()->get_rng();
float inp_value = rng.uniform(0.0, 10.0);
Mat meanMat(input_shape.size(), input_shape.data(), CV_32F, inp_value);
Mat varMat(input_shape.size(), input_shape.data(), CV_32F, inp_value);
vector<Mat> blobs = {meanMat, varMat};
lp.blobs = blobs;
// Create the layer
Ptr<Layer> layer = BatchNormLayer::create(lp);
Mat input(input_shape.size(), input_shape.data(), CV_32F, 1.0);
cv::randn(input, 0, 1);
std::vector<Mat> inputs{input};
std::vector<Mat> outputs;
runLayer(layer, inputs, outputs);
//create output_ref to compare with outputs
Mat output_ref = input.clone();
cv::sqrt(varMat + 1e-5, varMat);
output_ref = (output_ref - meanMat) / varMat;
ASSERT_EQ(outputs.size(), 1);
ASSERT_EQ(shape(output_ref), shape(outputs[0]));
normAssert(output_ref, outputs[0]);
}
INSTANTIATE_TEST_CASE_P(/*nothting*/, Layer_BatchNorm_Test,
testing::Values(
std::vector<int>({}),
std::vector<int>({4}),
std::vector<int>({1, 4}),
std::vector<int>({4, 1})
));
typedef testing::TestWithParam<tuple<std::vector<int>>> Layer_Const_Test;
TEST_P(Layer_Const_Test, Accuracy_01D)
{

Loading…
Cancel
Save