Merge pull request #24386 from fengyuentau:fix_dtype_nary_eltwise

dnn: fix inconsistent input dtype for nary eltwise layers #24386

Resolves https://github.com/opencv/opencv/issues/24385
Merge with https://github.com/opencv/opencv_extra/pull/1107
Relates https://github.com/opencv/opencv/pull/24092#discussion_r1353964405

### Pull Request Readiness Checklist

See details at https://github.com/opencv/opencv/wiki/How_to_contribute#making-a-good-pull-request

- [x] I agree to contribute to the project under Apache 2 License.
- [x] To the best of my knowledge, the proposed patch is not based on a code under GPL or another license that is incompatible with OpenCV
- [x] The PR is proposed to the proper branch
- [x] There is a reference to the original bug report and related work
- [x] There is accuracy test, performance test and test data in opencv_extra repository, if applicable
      Patch to opencv_extra has the same branch name.
- [x] The feature is well documented and sample code can be built with the project CMake
pull/24371/head
Yuantao Feng 1 year ago committed by GitHub
parent 58285e5468
commit 0507043a55
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 21
      modules/dnn/src/layers/const_layer.cpp
  2. 2
      modules/dnn/src/onnx/onnx_importer.cpp
  3. 3
      modules/dnn/test/test_onnx_importer.cpp

@ -62,10 +62,15 @@ public:
{
std::vector<UMat> outputs;
outs.getUMatVector(outputs);
if (outs.depth() == CV_16S)
convertFp16(blobs[0], outputs[0]);
if (outs.depth() == CV_16S) {
auto blob = blobs[0];
if (blob.type() != CV_32F) {
blob.convertTo(blob, CV_32F);
}
convertFp16(blob, outputs[0]);
}
else
blobs[0].copyTo(outputs[0]);
blobs[0].convertTo(outputs[0], outputs[0].type());
return true;
}
#endif
@ -80,7 +85,7 @@ public:
std::vector<Mat> outputs;
outputs_arr.getMatVector(outputs);
blobs[0].copyTo(outputs[0]);
blobs[0].convertTo(outputs[0], outputs[0].type());
}
#ifdef HAVE_CANN
@ -126,6 +131,8 @@ public:
ngraph::element::Type dType;
if (blobs[0].depth() == CV_32F) {
dType = ngraph::element::f32;
} else if (blobs[0].depth() == CV_32S) {
dType = ngraph::element::i32;
} else if (blobs[0].depth() == CV_8S) {
dType = ngraph::element::i8;
} else {
@ -163,7 +170,11 @@ public:
auto context = reinterpret_cast<csl::CSLContext*>(context_);
CV_Assert(blobs.size() == 1);
return make_cuda_node<cuda4dnn::ConstOp>(preferableTarget, std::move(context->stream), blobs[0]);
Mat blob = blobs[0];
if (blob.type() != CV_32F) {
blob.convertTo(blob, CV_32F);
}
return make_cuda_node<cuda4dnn::ConstOp>(preferableTarget, std::move(context->stream), blob);
}
#endif

@ -383,7 +383,7 @@ void runLayer(LayerParams& params, const std::vector<Mat>& inputs,
{
inpShapes[i] = shape(inputs[i]);
if (i > 0 && ddepth != inputs[i].depth())
CV_Error(Error::StsNotImplemented, "Mixed input data types.");
CV_Error(Error::StsNotImplemented, cv::format("Mixed input data types. Required type: %d, actual type: %d", ddepth, inputs[i].depth()));
// Quantize and Dequantize layer have different output type than input.
if (params.type != "Quantize" && params.type != "Dequantize")

@ -675,6 +675,9 @@ TEST_P(Test_ONNX_layers, Compare_GT)
testONNXModels("greater");
}
TEST_P(Test_ONNX_layers, Greater_input_dtype_int64) {
testONNXModels("greater_input_dtype_int64");
}
TEST_P(Test_ONNX_layers, Compare_LT)
{

Loading…
Cancel
Save