From efbc9f0b66fc0689a0b1fa9f6646dde8a8cc5d1a Mon Sep 17 00:00:00 2001 From: Abduragim Shtanchaev <44877829+Abdurrahheem@users.noreply.github.com> Date: Thu, 4 Jul 2024 16:25:31 +0300 Subject: [PATCH] Merge pull request #25861 from Abdurrahheem:ash/torch-attention-export-fix-4x Merge pull request #25861 from Abdurrahheem:ash/torch-attention-export-fix-4x Support for Unflatten operation requred by Attention layer - 4.x #25861 ### Pull Request Readiness Checklist All test data and models for PR are located [#1190](https://github.com/opencv/opencv_extra/pull/1190) This PR fixes issue reised when importing batched vanilla `Attention` layer from `PyTorch` via ONNX. Currently batched version of `Attention` layer in PyTorch [has unflatten operation inside](https://github.com/pytorch/pytorch/blob/e3b3431c4203e9eeead48f96d4afd462f0b81de5/torch/nn/functional.py#L5500C17-L5500C31). `unflatten` operation causes issue in `reshape` layer (see the Reshape_2 in the graph below) due to incorrect output of `slice` layer. This PR particularly fixes `slice` and `concat` layers to handle `unflatten` operation. image See details at https://github.com/opencv/opencv/wiki/How_to_contribute#making-a-good-pull-request - [x] I agree to contribute to the project under Apache 2 License. - [x] To the best of my knowledge, the proposed patch is not based on a code under GPL or another license that is incompatible with OpenCV - [x] The PR is proposed to the proper branch - [x] There is a reference to the original bug report and related work - [x] There is accuracy test, performance test and test data in opencv_extra repository, if applicable Patch to opencv_extra has the same branch name. - [x] The feature is well documented and sample code can be built with the project CMake --- modules/dnn/src/layers/concat_layer.cpp | 2 ++ modules/dnn/src/layers/slice_layer.cpp | 14 +++++++++----- modules/dnn/test/test_onnx_importer.cpp | 7 +++++++ 3 files changed, 18 insertions(+), 5 deletions(-) diff --git a/modules/dnn/src/layers/concat_layer.cpp b/modules/dnn/src/layers/concat_layer.cpp index 3a6466bd80..6cb083e453 100644 --- a/modules/dnn/src/layers/concat_layer.cpp +++ b/modules/dnn/src/layers/concat_layer.cpp @@ -302,6 +302,8 @@ public: ranges[cAxis].start = 0; for (size_t i = 0; i < inputs.size(); i++) { + if (inputs[i].empty()) + continue; ranges[cAxis].end = ranges[cAxis].start + inputs[i].size[cAxis]; for (int j = 0; j < outMat.dims; ++j) { diff --git a/modules/dnn/src/layers/slice_layer.cpp b/modules/dnn/src/layers/slice_layer.cpp index de302ec291..195ed7cb24 100644 --- a/modules/dnn/src/layers/slice_layer.cpp +++ b/modules/dnn/src/layers/slice_layer.cpp @@ -69,10 +69,12 @@ Range normalizeRange(const Range& input_range, int n) { Range range = input_range; - range.start = std::min(std::max(range.start, -n), n - 1); - if (range.start < 0) - { - range.start += n; + if (range.start != n){ + range.start = std::min(std::max(range.start, -n), n - 1); + if (range.start < 0) + { + range.start += n; + } } range.end = std::min(std::max(range.end, -n), n); @@ -610,7 +612,9 @@ public: { for (size_t i = 0; i < outputs.size(); i++) { - inpMat(finalSliceRanges[i]).copyTo(outputs[i]); + if (finalSliceRanges[i][0].start != finalSliceRanges[i][0].end){ + inpMat(finalSliceRanges[i]).copyTo(outputs[i]); + } } } else diff --git a/modules/dnn/test/test_onnx_importer.cpp b/modules/dnn/test/test_onnx_importer.cpp index 8855eb6439..e560ff2dbe 100644 --- a/modules/dnn/test/test_onnx_importer.cpp +++ b/modules/dnn/test/test_onnx_importer.cpp @@ -3110,6 +3110,13 @@ TEST_P(Test_ONNX_layers, Attention) { TEST_P(Test_ONNX_layers, AttentionSingleHead) { testONNXModels("attention_single_head"); } +TEST_P(Test_ONNX_layers, PyTorchAttentionSingleHead){ + testONNXModels("pytorch_attention_single_head"); +} + +TEST_P(Test_ONNX_layers, PyTorchUnflatten){ + testONNXModels("unflatten"); +} TEST_P(Test_ONNX_nets, ViT_B_32) { applyTestTag(CV_TEST_TAG_LONG, CV_TEST_TAG_DEBUG_LONG);