Merge pull request #25755 from alexlyulkov:al/more-types

Added more types support to dnn layers #25755

Added support of more types to dnn layers for CPU, CUDA and OpenVINO backends.
Now most of the multi-type layers support uint8, int8, int32, int64, float32, float16, bool types.

### Pull Request Readiness Checklist

See details at https://github.com/opencv/opencv/wiki/How_to_contribute#making-a-good-pull-request

- [x] I agree to contribute to the project under Apache 2 License.
- [x] To the best of my knowledge, the proposed patch is not based on a code under GPL or another license that is incompatible with OpenCV
- [x] The PR is proposed to the proper branch
- [ ] There is a reference to the original bug report and related work
- [ ] There is accuracy test, performance test and test data in opencv_extra repository, if applicable
      Patch to opencv_extra has the same branch name.
- [x] The feature is well documented and sample code can be built with the project CMake
pull/25840/head
alexlyulkov 7 months ago committed by GitHub
parent fd7cb1be85
commit 12b8ed1443
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 2
      modules/dnn/src/cuda/concat.cu
  2. 2
      modules/dnn/src/cuda/fill_copy.cu
  3. 1
      modules/dnn/src/cuda/padding.cu
  4. 1
      modules/dnn/src/cuda/permute.cu
  5. 1
      modules/dnn/src/cuda/slice.cu
  6. 5
      modules/dnn/src/layers/concat_layer.cpp
  7. 5
      modules/dnn/src/layers/const_layer.cpp
  8. 3
      modules/dnn/src/layers/expand_layer.cpp
  9. 4
      modules/dnn/src/layers/flatten_layer.cpp
  10. 8
      modules/dnn/src/layers/gather_elements_layer.cpp
  11. 2
      modules/dnn/src/layers/gather_layer.cpp
  12. 8
      modules/dnn/src/layers/max_unpooling_layer.cpp
  13. 21
      modules/dnn/src/layers/padding_layer.cpp
  14. 28
      modules/dnn/src/layers/permute_layer.cpp
  15. 8
      modules/dnn/src/layers/pooling_layer.cpp
  16. 3
      modules/dnn/src/layers/reduce_layer.cpp
  17. 10
      modules/dnn/src/layers/reorg_layer.cpp
  18. 8
      modules/dnn/src/layers/reshape_layer.cpp
  19. 8
      modules/dnn/src/layers/scatterND_layer.cpp
  20. 8
      modules/dnn/src/layers/scatter_layer.cpp
  21. 27
      modules/dnn/src/layers/slice_layer.cpp
  22. 22
      modules/dnn/src/layers/split_layer.cpp
  23. 6
      modules/dnn/src/op_cuda.hpp
  24. 266
      modules/dnn/test/test_int.cpp

@ -156,6 +156,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels {
template void concat<uint8_t>(const Stream&, TensorSpan<uint8_t>, std::size_t, TensorView<uint8_t>, std::size_t);
template void concat<int32_t>(const Stream&, TensorSpan<int32_t>, std::size_t, TensorView<int32_t>, std::size_t);
template void concat<int64_t>(const Stream&, TensorSpan<int64_t>, std::size_t, TensorView<int64_t>, std::size_t);
template void concat<bool>(const Stream&, TensorSpan<bool>, std::size_t, TensorView<bool>, std::size_t);
template <class T, std::size_t Rank> static
void launch_concat_with_offsets(
@ -283,5 +284,6 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels {
template void concat_with_offsets(const Stream&, TensorSpan<uint8_t>, TensorView<uint8_t>, std::vector<std::size_t>);
template void concat_with_offsets(const Stream&, TensorSpan<int32_t>, TensorView<int32_t>, std::vector<std::size_t>);
template void concat_with_offsets(const Stream&, TensorSpan<int64_t>, TensorView<int64_t>, std::vector<std::size_t>);
template void concat_with_offsets(const Stream&, TensorSpan<bool>, TensorView<bool>, std::vector<std::size_t>);
}}}} /* namespace cv::dnn::cuda4dnn::kernels */

@ -71,6 +71,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels {
template void fill(const Stream&, Span<uint8_t>, uint8_t);
template void fill(const Stream&, Span<int>, int);
template void fill(const Stream&, Span<int64_t>, int64_t);
template void fill(const Stream&, Span<bool>, bool);
template <class T, std::size_t N> static
void launch_vectorized_copy(const Stream& stream, Span<T> output, View<T> input) {
@ -101,5 +102,6 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels {
template void copy(const Stream&, Span<uint8_t>, View<uint8_t>);
template void copy(const Stream&, Span<int32_t>, View<int32_t>);
template void copy(const Stream&, Span<int64_t>, View<int64_t>);
template void copy(const Stream&, Span<bool>, View<bool>);
}}}} /* namespace cv::dnn::cuda4dnn::kernels */

@ -201,5 +201,6 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels {
template void copy_with_reflection101(const Stream&, TensorSpan<uint8_t>, TensorView<uint8_t>, std::vector<std::pair<std::size_t, std::size_t>> ranges);
template void copy_with_reflection101(const Stream&, TensorSpan<int32_t>, TensorView<int32_t>, std::vector<std::pair<std::size_t, std::size_t>> ranges);
template void copy_with_reflection101(const Stream&, TensorSpan<int64_t>, TensorView<int64_t>, std::vector<std::pair<std::size_t, std::size_t>> ranges);
template void copy_with_reflection101(const Stream&, TensorSpan<bool>, TensorView<bool>, std::vector<std::pair<std::size_t, std::size_t>> ranges);
}}}} /* namespace namespace cv::dnn::cuda4dnn::kernels */

@ -292,5 +292,6 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels {
template void permute(const Stream&, TensorSpan<uint8_t>, TensorView<uint8_t>, std::vector<std::size_t>);
template void permute(const Stream&, TensorSpan<int32_t>, TensorView<int32_t>, std::vector<std::size_t>);
template void permute(const Stream&, TensorSpan<int64_t>, TensorView<int64_t>, std::vector<std::size_t>);
template void permute(const Stream&, TensorSpan<bool>, TensorView<bool>, std::vector<std::size_t>);
}}}} /* namespace cv::dnn::cuda4dnn::kernels */

@ -203,5 +203,6 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels {
template void slice(const Stream&, TensorSpan<uint8_t>, TensorView<uint8_t>, std::vector<std::size_t>);
template void slice(const Stream&, TensorSpan<int32_t>, TensorView<int32_t>, std::vector<std::size_t>);
template void slice(const Stream&, TensorSpan<int64_t>, TensorView<int64_t>, std::vector<std::size_t>);
template void slice(const Stream&, TensorSpan<bool>, TensorView<bool>, std::vector<std::size_t>);
}}}} /* namespace cv::dnn::cuda4dnn::kernels */

@ -340,7 +340,10 @@ public:
auto input_wrapper = inputs[0].dynamicCast<CUDABackendWrapper>();
auto concat_axis = normalize_axis(axis, input_wrapper->getRank());
return make_cuda_node_with_type<cuda4dnn::ConcatOp>(preferableTarget, inputs[0]->getHostMatDepth(), std::move(context->stream), concat_axis, padding);
if (inputs[0]->getHostMatDepth() == CV_Bool)
return make_cuda_node_bool<cuda4dnn::ConcatOp>(std::move(context->stream), concat_axis, padding);
else
return make_cuda_node_with_type<cuda4dnn::ConcatOp>(preferableTarget, inputs[0]->getHostMatDepth(), std::move(context->stream), concat_axis, padding);
}
#endif

@ -172,7 +172,10 @@ public:
CV_Assert(blobs.size() == 1);
Mat blob = blobs[0];
return make_cuda_node_with_type<cuda4dnn::ConstOp>(preferableTarget, blob.type(), std::move(context->stream), blob);
if (blob.type() == CV_Bool)
return make_cuda_node_bool<cuda4dnn::ConstOp>(std::move(context->stream), blob);
else
return make_cuda_node_with_type<cuda4dnn::ConstOp>(preferableTarget, blob.type(), std::move(context->stream), blob);
}
#endif
};

@ -85,9 +85,6 @@ public:
std::vector<MatType>& internals) const CV_OVERRIDE
{
CV_Assert(inputs.size());
for (auto input : inputs)
CV_CheckType(input, input == CV_32F || input == CV_16F || input == CV_8S || input == CV_32S || input == CV_64S, "");
outputs.assign(requiredOutputs, inputs[0]);
}

@ -129,9 +129,9 @@ public:
for (auto input : inputs)
{
if (preferableTarget == DNN_TARGET_OPENCL_FP16)
CV_CheckType(input, input == CV_16F || input == CV_32S || input == CV_64S, "");
CV_CheckType(input, input == CV_16F || input == CV_32S || input == CV_64S || input == CV_8S || input == CV_8U || input == CV_Bool, "");
else
CV_CheckType(input, input == CV_32F || input == CV_32S || input == CV_64S, "");
CV_CheckType(input, input == CV_32F || input == CV_32S || input == CV_64S || input == CV_8S || input == CV_8U || input == CV_Bool, "");
}
outputs.assign(requiredOutputs, inputs[0]);

@ -67,7 +67,7 @@ public:
std::vector<MatType>& internals) const CV_OVERRIDE
{
CV_CheckEQ(inputs.size(), (size_t)2, "");
CV_CheckType(inputs[0], inputs[0] == CV_32F || inputs[0] == CV_32S || inputs[0] == CV_64S || inputs[0] == CV_16F || inputs[0] == CV_8U, "");
CV_CheckType(inputs[0], inputs[0] == CV_32F || inputs[0] == CV_32S || inputs[0] == CV_64S || inputs[0] == CV_16F || inputs[0] == CV_8U || inputs[0] == CV_8S || inputs[0] == CV_Bool, "");
CV_CheckType(inputs[1], inputs[1] == CV_64S || inputs[1] == CV_32S, "");
outputs.assign(1, inputs[0]);
}
@ -159,9 +159,15 @@ public:
{
switch (type)
{
case CV_Bool:
forward_impl<bool, T_INDEX>(std::forward<Args>(args)...);
break;
case CV_8U:
forward_impl<uint8_t, T_INDEX>(std::forward<Args>(args)...);
break;
case CV_8S:
forward_impl<int8_t, T_INDEX>(std::forward<Args>(args)...);
break;
case CV_16F:
forward_impl<int16_t, T_INDEX>(std::forward<Args>(args)...);
break;

@ -54,7 +54,7 @@ public:
std::vector<MatType>& internals) const CV_OVERRIDE
{
CV_CheckEQ(inputs.size(), (size_t)2, "");
CV_CheckType(inputs[0], inputs[0] == CV_32F || inputs[0] == CV_32S || inputs[0] == CV_64S || inputs[0] == CV_16F || inputs[0] == CV_8U, "");
CV_CheckType(inputs[0], inputs[0] == CV_32F || inputs[0] == CV_32S || inputs[0] == CV_64S || inputs[0] == CV_16F || inputs[0] == CV_8U || inputs[0] == CV_8S || inputs[0] == CV_Bool, "");
CV_CheckType(inputs[1], inputs[1] == CV_64S || inputs[1] == CV_32S, "");
outputs.assign(1, inputs[0]);
}

@ -75,7 +75,7 @@ public:
std::vector<MatType>& internals) const CV_OVERRIDE
{
CV_CheckGE(inputs.size(), (size_t)2, "");
CV_CheckType(inputs[0], inputs[0] == CV_32F || inputs[0] == CV_16F, "");
CV_CheckType(inputs[0], inputs[0] == CV_32F || inputs[0] == CV_16F || inputs[0] == CV_32S || inputs[0] == CV_64S || inputs[0] == CV_8S || inputs[0] == CV_8U, "");
CV_CheckType(inputs[1], inputs[1] == CV_64S || inputs[1] == CV_32S, "");
outputs.assign(1, inputs[0]);
}
@ -107,6 +107,12 @@ public:
{
switch (type)
{
case CV_8S:
run<int8_t, T_INDEX>(std::forward<Args>(args)...);
break;
case CV_8U:
run<uint8_t, T_INDEX>(std::forward<Args>(args)...);
break;
case CV_32S:
run<int32_t, T_INDEX>(std::forward<Args>(args)...);
break;

@ -83,12 +83,10 @@ public:
std::vector<MatType>& internals) const CV_OVERRIDE
{
CV_CheckEQ(inputs.size(), 1u, "");
if (preferableTarget == DNN_TARGET_CUDA_FP16 || preferableTarget == DNN_TARGET_CUDA)
CV_CheckType(inputs[0], inputs[0] == CV_32F || inputs[0] == CV_32S || inputs[0] == CV_64S, "");
else if (preferableTarget == DNN_TARGET_OPENCL_FP16)
CV_CheckType(inputs[0], inputs[0] == CV_16F || inputs[0] == CV_8S || inputs[0] == CV_32S || inputs[0] == CV_64S, "");
if (preferableTarget == DNN_TARGET_OPENCL_FP16)
CV_CheckType(inputs[0], inputs[0] == CV_16F || inputs[0] == CV_8S || inputs[0] == CV_8U || inputs[0] == CV_32S || inputs[0] == CV_64S || inputs[0] == CV_Bool, "");
else
CV_CheckType(inputs[0], inputs[0] == CV_32F || inputs[0] == CV_8S || inputs[0] == CV_32S || inputs[0] == CV_64S, "");
CV_CheckType(inputs[0], inputs[0] == CV_32F || inputs[0] == CV_8S || inputs[0] == CV_8U || inputs[0] == CV_32S || inputs[0] == CV_64S || inputs[0] == CV_Bool, "");
outputs.assign(requiredOutputs, inputs[0]);
}
@ -207,7 +205,10 @@ public:
else
CV_Error(Error::StsNotImplemented, "Unsupported padding mode");
return make_cuda_node_with_type<cuda4dnn::PaddingOp>(preferableTarget, inputs[0]->getHostMatDepth(), std::move(context->stream), ptype, paddingValue, dstRanges);
if (inputs[0]->getHostMatDepth() == CV_Bool)
return make_cuda_node_bool<cuda4dnn::PaddingOp>(std::move(context->stream), ptype, paddingValue, dstRanges);
else
return make_cuda_node_with_type<cuda4dnn::PaddingOp>(preferableTarget, inputs[0]->getHostMatDepth(), std::move(context->stream), ptype, paddingValue, dstRanges);
}
#endif
@ -275,8 +276,10 @@ public:
std::shared_ptr<ov::op::v0::Constant> arg_pad_value;
float paddingValueFloat = paddingValue;
int8_t paddingValueInt8 = paddingValue;
uint8_t paddingValueUInt8 = paddingValue;
int32_t paddingValueInt32 = paddingValue;
int64_t paddingValueInt64 = paddingValue;
bool paddingValueBool = paddingValue;
switch(ieInpNode.get_element_type())
{
case ov::element::f32:
@ -285,12 +288,18 @@ public:
case ov::element::i8:
arg_pad_value = std::make_shared<ov::op::v0::Constant>(ov::element::i8, ov::Shape{}, &paddingValueInt8);
break;
case ov::element::u8:
arg_pad_value = std::make_shared<ov::op::v0::Constant>(ov::element::u8, ov::Shape{}, &paddingValueUInt8);
break;
case ov::element::i32:
arg_pad_value = std::make_shared<ov::op::v0::Constant>(ov::element::i32, ov::Shape{}, &paddingValueInt32);
break;
case ov::element::i64:
arg_pad_value = std::make_shared<ov::op::v0::Constant>(ov::element::i64, ov::Shape{}, &paddingValueInt64);
break;
case ov::element::boolean:
arg_pad_value = std::make_shared<ov::op::v0::Constant>(ov::element::boolean, ov::Shape{}, &paddingValueBool);
break;
default:
CV_Error(Error::BadDepth, "");
};

@ -187,12 +187,10 @@ public:
CV_Assert(inputs.size());
for (auto input : inputs)
{
if (preferableTarget == DNN_TARGET_CUDA_FP16 || preferableTarget == DNN_TARGET_CUDA)
CV_CheckType(input, input == CV_32F || input == CV_32S || input == CV_64S, "");
else if (preferableTarget == DNN_TARGET_OPENCL_FP16)
CV_CheckType(input, input == CV_16F || input == CV_8S || input == CV_32S || input == CV_64S, "");
if (preferableTarget == DNN_TARGET_OPENCL_FP16)
CV_CheckType(input, input == CV_16F || input == CV_32S || input == CV_64S || input == CV_8S || input == CV_8U || input == CV_Bool, "");
else
CV_CheckType(input, input == CV_32F || input == CV_8S || input == CV_32S || input == CV_64S, "");
CV_CheckType(input, input == CV_32F || input == CV_32S || input == CV_64S || input == CV_8S || input == CV_8U || input == CV_Bool, "");
}
outputs.assign(requiredOutputs, inputs[0]);
@ -367,15 +365,10 @@ public:
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
CV_OCL_RUN(IS_DNN_OPENCL_TARGET(preferableTarget) &&
inputs_arr.depth() != CV_8S && inputs_arr.depth() != CV_64S,
inputs_arr.depth() != CV_8S && inputs_arr.depth() != CV_8U &&
inputs_arr.depth() != CV_Bool && inputs_arr.depth() != CV_64S,
forward_ocl(inputs_arr, outputs_arr, internals_arr))
if (inputs_arr.depth() == CV_16F)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
@ -414,6 +407,12 @@ public:
case CV_8S:
forward_impl<int8_t>(inputs[k], outputs[k]);
break;
case CV_8U:
forward_impl<uint8_t>(inputs[k], outputs[k]);
break;
case CV_Bool:
forward_impl<bool>(inputs[k], outputs[k]);
break;
default:
CV_Error(Error::BadDepth, "unsupported mat type");
}
@ -521,7 +520,10 @@ public:
) override
{
auto context = reinterpret_cast<csl::CSLContext*>(context_);
return make_cuda_node_with_type<cuda4dnn::PermuteOp>(preferableTarget, inputs[0]->getHostMatDepth(), std::move(context->stream), _order);
if (inputs[0]->getHostMatDepth() == CV_Bool)
return make_cuda_node_bool<cuda4dnn::PermuteOp>(std::move(context->stream), _order);
else
return make_cuda_node_with_type<cuda4dnn::PermuteOp>(preferableTarget, inputs[0]->getHostMatDepth(), std::move(context->stream), _order);
}
#endif

@ -1276,12 +1276,10 @@ public:
std::vector<MatType>& internals) const CV_OVERRIDE
{
CV_Assert(inputs.size());
if (preferableTarget == DNN_TARGET_CUDA_FP16 || preferableTarget == DNN_TARGET_CUDA)
CV_CheckTypeEQ(inputs[0], CV_32F, "Unsupported type");
else if (preferableTarget == DNN_TARGET_OPENCL_FP16)
CV_CheckType(inputs[0], inputs[0] == CV_16F || inputs[0] == CV_8S, "");
if (preferableTarget == DNN_TARGET_OPENCL_FP16)
CV_CheckType(inputs[0], inputs[0] == CV_16F, "");
else
CV_CheckType(inputs[0], inputs[0] == CV_32F || inputs[0] == CV_8S, "");
CV_CheckType(inputs[0], inputs[0] == CV_32F, "");
outputs.push_back(inputs[0]);
if (type == MAX && requiredOutputs == 2) {

@ -137,7 +137,7 @@ public:
std::vector<MatType>& outputs,
std::vector<MatType>& internals) const CV_OVERRIDE
{
CV_CheckType(inputs[0], inputs[0] == CV_32F || inputs[0] == CV_32S || inputs[0] == CV_64S || inputs[0] == CV_16F || inputs[0] == CV_8U, "");
CV_CheckType(inputs[0], inputs[0] == CV_32F || inputs[0] == CV_32S || inputs[0] == CV_64S || inputs[0] == CV_16F || inputs[0] == CV_8U || inputs[0] == CV_8S, "");
outputs.assign(1, inputs[0]);
}
@ -501,6 +501,7 @@ public:
inline void typeDispatch(const int type, Args&&... args) {
switch (type) {
case CV_8U: opDispatch<uint8_t>(std::forward<Args>(args)...); break;
case CV_8S: opDispatch<int8_t>(std::forward<Args>(args)...); break;
case CV_32S: opDispatch<int32_t>(std::forward<Args>(args)...); break;
case CV_64S: opDispatch<int64_t>(std::forward<Args>(args)...); break;
case CV_32F: opDispatch<float>(std::forward<Args>(args)...); break;

@ -106,12 +106,10 @@ public:
CV_Assert(inputs.size());
for (auto input : inputs)
{
if (preferableTarget == DNN_TARGET_CUDA_FP16 || preferableTarget == DNN_TARGET_CUDA)
CV_CheckTypeEQ(input, CV_32F, "Unsupported type for CUDA");
else if (preferableTarget == DNN_TARGET_OPENCL_FP16)
CV_CheckType(input, input == CV_16F || input == CV_8S || input == CV_32S || input == CV_64S, "");
if (preferableTarget == DNN_TARGET_OPENCL_FP16)
CV_CheckType(input, input == CV_16F || input == CV_8S || input == CV_8U || input == CV_32S || input == CV_64S, "");
else
CV_CheckType(input, input == CV_32F || input == CV_8S || input == CV_32S || input == CV_64S, "");
CV_CheckType(input, input == CV_32F || input == CV_8S || input == CV_8U || input == CV_32S || input == CV_64S, "");
}
outputs.assign(requiredOutputs, inputs[0]);
@ -235,7 +233,7 @@ public:
) override
{
auto context = reinterpret_cast<csl::CSLContext*>(context_);
return make_cuda_node<cuda4dnn::ReorgOp>(preferableTarget, std::move(context->stream), reorgStride);
return make_cuda_node_with_type<cuda4dnn::ReorgOp>(preferableTarget, inputs[0]->getHostMatDepth(), std::move(context->stream), reorgStride);
}
#endif

@ -268,12 +268,10 @@ public:
CV_Assert(inputs.size());
for (auto input : inputs)
{
if (preferableTarget == DNN_TARGET_CUDA_FP16 || preferableTarget == DNN_TARGET_CUDA)
CV_CheckType(input, input == CV_32F || input == CV_32S || input == CV_64S, "");
else if (preferableTarget == DNN_TARGET_OPENCL_FP16)
CV_CheckType(input, input == CV_16F || input == CV_8S || input == CV_32S || input == CV_64S, "");
if (preferableTarget == DNN_TARGET_OPENCL_FP16)
CV_CheckType(input, input == CV_16F || input == CV_8S || input == CV_8U || input == CV_32S || input == CV_64S || input == CV_Bool, "");
else
CV_CheckType(input, input == CV_32F || input == CV_8S || input == CV_32S || input == CV_64S, "");
CV_CheckType(input, input == CV_32F || input == CV_8S || input == CV_8U || input == CV_32S || input == CV_64S || input == CV_Bool, "");
}
outputs.assign(requiredOutputs, inputs[0]);

@ -79,7 +79,7 @@ public:
std::vector<MatType>& internals) const CV_OVERRIDE
{
CV_CheckEQ(inputs.size(), (size_t)3, "");
CV_CheckType(inputs[0], inputs[0] == CV_32F || inputs[0] == CV_32S || inputs[0] == CV_64S || inputs[0] == CV_16F || inputs[0] == CV_8U, "");
CV_CheckType(inputs[0], inputs[0] == CV_32F || inputs[0] == CV_32S || inputs[0] == CV_64S || inputs[0] == CV_16F || inputs[0] == CV_8U || inputs[0] == CV_8S || inputs[0] == CV_Bool, "");
CV_CheckType(inputs[1], inputs[1] == CV_64S || inputs[1] == CV_32S, "");
CV_CheckTypeEQ(inputs[2], inputs[0], "");
outputs.assign(1, inputs[0]);
@ -187,9 +187,15 @@ public:
{
switch (type)
{
case CV_Bool:
reductionDispatch<bool, T_INDEX>(std::forward<Args>(args)...);
break;
case CV_8U:
reductionDispatch<uint8_t, T_INDEX>(std::forward<Args>(args)...);
break;
case CV_8S:
reductionDispatch<int8_t, T_INDEX>(std::forward<Args>(args)...);
break;
case CV_32S:
reductionDispatch<int32_t, T_INDEX>(std::forward<Args>(args)...);
break;

@ -73,7 +73,7 @@ public:
std::vector<MatType>& internals) const CV_OVERRIDE
{
CV_CheckEQ(inputs.size(), (size_t)3, "");
CV_CheckType(inputs[0], inputs[0] == CV_32F || inputs[0] == CV_32S || inputs[0] == CV_64S || inputs[0] == CV_16F || inputs[0] == CV_8U, "");
CV_CheckType(inputs[0], inputs[0] == CV_32F || inputs[0] == CV_32S || inputs[0] == CV_64S || inputs[0] == CV_16F || inputs[0] == CV_8U || inputs[0] == CV_8S || inputs[0] == CV_Bool, "");
CV_CheckType(inputs[1], inputs[1] == CV_64S || inputs[1] == CV_32S, "");
CV_CheckTypeEQ(inputs[2], inputs[0], "");
outputs.assign(1, inputs[0]);
@ -182,9 +182,15 @@ public:
{
switch (type)
{
case CV_Bool:
reductionDispatch<bool, T_INDEX>(std::forward<Args>(args)...);
break;
case CV_8U:
reductionDispatch<uint8_t, T_INDEX>(std::forward<Args>(args)...);
break;
case CV_8S:
reductionDispatch<int8_t, T_INDEX>(std::forward<Args>(args)...);
break;
case CV_32S:
reductionDispatch<int32_t, T_INDEX>(std::forward<Args>(args)...);
break;

@ -287,12 +287,10 @@ public:
CV_CheckEQ(inputs.size(), (size_t)1, "");
for (auto input : inputs)
{
if (preferableTarget == DNN_TARGET_CUDA_FP16 || preferableTarget == DNN_TARGET_CUDA)
CV_CheckType(input, input == CV_32F || input == CV_32S || input == CV_64S, "");
else if (preferableTarget == DNN_TARGET_OPENCL_FP16)
CV_CheckType(input, input == CV_16F || input == CV_8S || input == CV_32S || input == CV_64S, "");
if (preferableTarget == DNN_TARGET_OPENCL_FP16)
CV_CheckType(input, input == CV_16F || input == CV_8S || input == CV_8U || input == CV_32S || input == CV_64S || input == CV_Bool, "");
else
CV_CheckType(input, input == CV_32F || input == CV_8S || input == CV_32S || input == CV_64S, "");
CV_CheckType(input, input == CV_32F || input == CV_8S || input == CV_8U || input == CV_32S || input == CV_64S || input == CV_Bool, "");
}
outputs.assign(requiredOutputs, inputs[0]);
@ -651,6 +649,10 @@ public:
getSliceRecursive<int16_t>(inpMat, inpIdx, finalSliceRanges[i], sliceSteps[i], 0, dimsNum, outputs[i], outIdx);
else if (inpMat.type() == CV_8S)
getSliceRecursive<int8_t>(inpMat, inpIdx, finalSliceRanges[i], sliceSteps[i], 0, dimsNum, outputs[i], outIdx);
else if (inpMat.type() == CV_8U)
getSliceRecursive<uint8_t>(inpMat, inpIdx, finalSliceRanges[i], sliceSteps[i], 0, dimsNum, outputs[i], outIdx);
else if (inpMat.type() == CV_Bool)
getSliceRecursive<bool>(inpMat, inpIdx, finalSliceRanges[i], sliceSteps[i], 0, dimsNum, outputs[i], outIdx);
else
getSliceRecursive<float>(inpMat, inpIdx, finalSliceRanges[i], sliceSteps[i], 0, dimsNum, outputs[i], outIdx);
// flip for negative steps
@ -826,8 +828,10 @@ public:
offsets_i.push_back(range.start);
offsets.push_back(std::move(offsets_i));
}
return make_cuda_node_with_type<cuda4dnn::SliceOp>(preferableTarget, inputs[0]->getHostMatDepth(), std::move(context->stream), std::move(offsets));
if (inputs[0]->getHostMatDepth() == CV_Bool)
return make_cuda_node_bool<cuda4dnn::SliceOp>(std::move(context->stream), std::move(offsets));
else
return make_cuda_node_with_type<cuda4dnn::SliceOp>(preferableTarget, inputs[0]->getHostMatDepth(), std::move(context->stream), std::move(offsets));
}
#endif
@ -911,18 +915,15 @@ public:
CV_CheckEQ(inputs.size(), (size_t)2, "");
for (auto input : inputs)
{
if (preferableTarget == DNN_TARGET_CUDA_FP16 || preferableTarget == DNN_TARGET_CUDA)
CV_CheckTypeEQ(input, CV_32F, "Unsupported type");
else if (preferableTarget == DNN_TARGET_OPENCL_FP16)
CV_CheckType(input, input == CV_16F || input == CV_8S || input == CV_32S || input == CV_64S, "");
if (preferableTarget == DNN_TARGET_OPENCL_FP16)
CV_CheckType(input, input == CV_16F || input == CV_8S || input == CV_8U || input == CV_32S || input == CV_64S || input == CV_Bool, "");
else
CV_CheckType(input, input == CV_32F || input == CV_8S || input == CV_32S || input == CV_64S, "");
CV_CheckType(input, input == CV_32F || input == CV_8S || input == CV_8U || input == CV_32S || input == CV_64S || input == CV_Bool, "");
}
outputs.assign(requiredOutputs, inputs[0]);
}
void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays) CV_OVERRIDE
{
std::vector<Mat> inputs;

@ -90,6 +90,22 @@ public:
return false;
}
void getTypes(const std::vector<MatType>& inputs,
const int requiredOutputs,
const int requiredInternals,
std::vector<MatType>& outputs,
std::vector<MatType>& internals) const CV_OVERRIDE
{
CV_CheckEQ(inputs.size(), 1u, "");
if (preferableTarget == DNN_TARGET_OPENCL_FP16)
CV_CheckType(inputs[0], inputs[0] == CV_16F || inputs[0] == CV_8S || inputs[0] == CV_8U || inputs[0] == CV_32S || inputs[0] == CV_64S || inputs[0] == CV_Bool, "");
else
CV_CheckType(inputs[0], inputs[0] == CV_32F || inputs[0] == CV_8S || inputs[0] == CV_8U || inputs[0] == CV_32S || inputs[0] == CV_64S || inputs[0] == CV_Bool, "");
outputs.assign(requiredOutputs, inputs[0]);
}
void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
@ -113,7 +129,11 @@ public:
) override
{
auto context = reinterpret_cast<csl::CSLContext*>(context_);
return make_cuda_node<cuda4dnn::SplitOp>(preferableTarget, std::move(context->stream));
if (inputs[0]->getHostMatDepth() == CV_Bool)
return make_cuda_node_bool<cuda4dnn::SplitOp>(std::move(context->stream));
else
return make_cuda_node_with_type<cuda4dnn::SplitOp>(preferableTarget, inputs[0]->getHostMatDepth(), std::move(context->stream));
}
#endif
};

@ -131,6 +131,12 @@ namespace cv { namespace dnn {
copyMatToTensorImpl(srcMat, destTensor, stream);
}
template <> inline
void copyMatToTensor(const Mat& srcMat, const TensorSpan<bool> destTensor, const Stream& stream) {
CV_CheckTypeEQ(srcMat.type(), CV_Bool, "");
copyMatToTensorImpl(srcMat, destTensor, stream);
}
/** @brief copies data from a TensorType to a cv::Mat
*
* \tparam T the type of the elements contained in TensorType object

@ -12,7 +12,13 @@ namespace opencv_test { namespace {
int64_t getValueAt(const Mat &m, const int *indices)
{
if (m.type() == CV_32S)
if (m.type() == CV_Bool)
return m.at<bool>(indices);
else if (m.type() == CV_8U)
return m.at<uint8_t>(indices);
else if (m.type() == CV_8S)
return m.at<int8_t>(indices);
else if (m.type() == CV_32S)
return m.at<int32_t>(indices);
else if (m.type() == CV_64S)
return m.at<int64_t>(indices);
@ -21,6 +27,41 @@ int64_t getValueAt(const Mat &m, const int *indices)
return -1;
}
int64_t getValueAt(const Mat &m, int index)
{
if (m.type() == CV_Bool)
return m.ptr<bool>()[index];
else if (m.type() == CV_8U)
return m.ptr<uint8_t>()[index];
else if (m.type() == CV_8S)
return m.ptr<int8_t>()[index];
else if (m.type() == CV_32S)
return m.ptr<int32_t>()[index];
else if (m.type() == CV_64S)
return m.ptr<int64_t>()[index];
else
CV_Error(Error::BadDepth, "Unsupported type");
return -1;
}
void fillRandom(Mat& m, int matType, Backend backend)
{
if (matType == CV_64S && backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
cv::randu(m, 1000000000, 1000000100); // Looks like OpenVINO uses int32 internal values for int64 operations
else if (matType == CV_64S)
cv::randu(m, 1000000000000000ll, 1000000000000100ll);
else if (matType == CV_32S)
cv::randu(m, 1000000000, 1000000100);
else if (matType == CV_8S)
cv::randu(m, -50, 50);
else if (matType == CV_8U)
cv::randu(m, 0, 100);
else if (matType == CV_Bool)
cv::randu(m, 0, 2);
else
CV_Error(Error::BadDepth, "Unsupported type");
}
typedef testing::TestWithParam<tuple<int, tuple<Backend, Target> > > Test_NaryEltwise_Int;
TEST_P(Test_NaryEltwise_Int, random)
{
@ -30,19 +71,19 @@ TEST_P(Test_NaryEltwise_Int, random)
Target target = get<1>(backend_target);
std::vector<int> inShape{2, 3, 4, 5};
int64_t low = matType == CV_64S ? 1000000000000000ll : 1000000000;
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
low = 1000000000; // Looks like OpenVINO uses int32 internal values for int64 operations
Mat input1(inShape, matType);
cv::randu(input1, low, low + 100);
Mat input2(inShape, matType);
cv::randu(input2, low, low + 100);
fillRandom(input1, matType, backend);
fillRandom(input2, matType, backend);
Net net;
LayerParams lp;
lp.type = "NaryEltwise";
lp.name = "testLayer";
lp.set("operation", "add");
if (matType == CV_Bool)
lp.set("operation", "or");
else
lp.set("operation", "add");
int id = net.addLayerToPrev(lp.name, lp.type, lp);
net.connect(0, 1, id, 1);
@ -78,7 +119,10 @@ TEST_P(Test_NaryEltwise_Int, random)
for (int i3 = 0; i3 < re.size[3]; ++i3)
{
reIndices[3] = i3;
EXPECT_EQ(getValueAt(re, reIndices.data()), getValueAt(input1, reIndices.data()) + getValueAt(input2, reIndices.data()));
if (matType == CV_Bool)
EXPECT_EQ(getValueAt(re, reIndices.data()), getValueAt(input1, reIndices.data()) | getValueAt(input2, reIndices.data()));
else
EXPECT_EQ(getValueAt(re, reIndices.data()), getValueAt(input1, reIndices.data()) + getValueAt(input2, reIndices.data()));
}
}
}
@ -86,7 +130,7 @@ TEST_P(Test_NaryEltwise_Int, random)
}
INSTANTIATE_TEST_CASE_P(/**/, Test_NaryEltwise_Int, Combine(
testing::Values(CV_32S, CV_64S),
testing::Values(CV_Bool, CV_8U, CV_8S, CV_32S, CV_64S),
dnnBackendsAndTargets()
));
@ -99,13 +143,10 @@ TEST_P(Test_Const_Int, random)
Target target = get<1>(backend_target);
std::vector<int> inShape{2, 3, 4, 5};
int64_t low = matType == CV_64S ? 1000000000000000ll : 1000000000;
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
low = 1000000000; // Looks like OpenVINO uses int32 internal values for int64 operations
Mat input1(inShape, matType);
cv::randu(input1, low, low + 100);
Mat inputConst(inShape, matType);
cv::randu(inputConst, low, low + 100);
fillRandom(input1, matType, backend);
fillRandom(inputConst, matType, backend);
Net net;
@ -118,7 +159,10 @@ TEST_P(Test_Const_Int, random)
LayerParams lp;
lp.type = "NaryEltwise";
lp.name = "testLayer";
lp.set("operation", "add");
if (matType == CV_Bool)
lp.set("operation", "or");
else
lp.set("operation", "add");
int idSum = net.addLayer(lp.name, lp.type, lp);
net.connect(0, 0, idSum, 0);
@ -150,7 +194,10 @@ TEST_P(Test_Const_Int, random)
for (int i3 = 0; i3 < re.size[3]; ++i3)
{
reIndices[3] = i3;
EXPECT_EQ(getValueAt(re, reIndices.data()), getValueAt(input1, reIndices.data()) + getValueAt(inputConst, reIndices.data()));
if (matType == CV_Bool)
EXPECT_EQ(getValueAt(re, reIndices.data()), getValueAt(input1, reIndices.data()) | getValueAt(inputConst, reIndices.data()));
else
EXPECT_EQ(getValueAt(re, reIndices.data()), getValueAt(input1, reIndices.data()) + getValueAt(inputConst, reIndices.data()));
}
}
}
@ -158,7 +205,7 @@ TEST_P(Test_Const_Int, random)
}
INSTANTIATE_TEST_CASE_P(/**/, Test_Const_Int, Combine(
testing::Values(CV_32S, CV_64S),
testing::Values(CV_Bool, CV_8U, CV_8S, CV_32S, CV_64S),
dnnBackendsAndTargets()
));
@ -173,15 +220,17 @@ TEST_P(Test_ScatterND_Int, random)
Target target = get<1>(backend_target);
std::vector<int> inShape{2, 3, 4, 5};
int64_t low = matType == CV_64S ? 1000000000000000ll : 1000000000;
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
low = 1000000000; // Looks like OpenVINO uses int32 internal values for int64 operations
Mat input(inShape, matType);
cv::randu(input, low, low + 100);
fillRandom(input, matType, backend);
std::vector<int64_t> indicesValues{0, 1, 2, 3,
1, 2, 3, 4};
std::vector<int64_t> updatesValues{25, 35};
if (matType == CV_Bool)
{
updatesValues[0] = 1;
updatesValues[1] = 0;
}
Mat indices(2, 4, indicesType);
std::vector<int> updatesShape{2};
@ -199,8 +248,14 @@ TEST_P(Test_ScatterND_Int, random)
{
if (matType == CV_32S)
updates.ptr<int32_t>()[i] = updatesValues[i];
else
else if (matType == CV_64S)
updates.ptr<int64_t>()[i] = updatesValues[i];
else if (matType == CV_8S)
updates.ptr<int8_t>()[i] = updatesValues[i];
else if (matType == CV_8U)
updates.ptr<uint8_t>()[i] = updatesValues[i];
else if (matType == CV_Bool)
updates.ptr<bool>()[i] = updatesValues[i];
}
Net net;
@ -267,7 +322,7 @@ TEST_P(Test_ScatterND_Int, random)
}
INSTANTIATE_TEST_CASE_P(/**/, Test_ScatterND_Int, Combine(
testing::Values(CV_32S, CV_64S),
testing::Values(CV_Bool, CV_8U, CV_8S, CV_32S, CV_64S),
testing::Values(CV_32S, CV_64S),
dnnBackendsAndTargets()
));
@ -280,15 +335,12 @@ TEST_P(Test_Concat_Int, random)
Backend backend = get<0>(backend_target);
Target target = get<1>(backend_target);
int64_t low = matType == CV_64S ? 1000000000000000ll : 1000000000;
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
low = 1000000000; // Looks like OpenVINO uses int32 internal values for int64 operations
std::vector<int> inShape1{2, 3, 4, 5};
Mat input1(inShape1, matType);
cv::randu(input1, low, low + 100);
fillRandom(input1, matType, backend);
std::vector<int> inShape2{2, 2, 4, 5};
Mat input2(inShape2, matType);
cv::randu(input2, low, low + 100);
fillRandom(input2, matType, backend);
Net net;
LayerParams lp;
@ -354,7 +406,7 @@ TEST_P(Test_Concat_Int, random)
}
INSTANTIATE_TEST_CASE_P(/**/, Test_Concat_Int, Combine(
testing::Values(CV_32S, CV_64S),
testing::Values(CV_Bool, CV_8U, CV_8S, CV_32S, CV_64S),
dnnBackendsAndTargets()
));
@ -370,11 +422,8 @@ TEST_P(Test_ArgMax_Int, random)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // There is a problem with OpenVINO and custom int64 layers. After model compilation the output tensor type changes from int64 to int32
std::vector<int> inShape{5, 4, 3, 2};
int64_t low = matType == CV_64S ? 1000000000000000ll : 100000000;
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
low = 1000000000; // Looks like OpenVINO uses int32 internal values for int64 operations
Mat input(inShape, matType);
cv::randu(input, low, low + 100);
fillRandom(input, matType, backend);
Net net;
LayerParams lp;
@ -413,7 +462,7 @@ TEST_P(Test_ArgMax_Int, random)
inIndices[3] = i2;
reIndices[2] = i2;
int64_t max_value = 0;
int64_t max_value = -1000000000000000000l;
int64_t index = 0;
for (int j = 0; j < input.size[1]; ++j)
{
@ -432,7 +481,7 @@ TEST_P(Test_ArgMax_Int, random)
}
INSTANTIATE_TEST_CASE_P(/**/, Test_ArgMax_Int, Combine(
testing::Values(CV_32S, CV_64S),
testing::Values(CV_8U, CV_8S, CV_32S, CV_64S),
dnnBackendsAndTargets()
));
@ -445,11 +494,8 @@ TEST_P(Test_Blank_Int, random)
Target target = get<1>(backend_target);
std::vector<int> inShape{2, 3, 4, 5};
int64_t low = matType == CV_64S ? 1000000000000000ll : 1000000000;
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
low = 1000000000; // Looks like OpenVINO uses int32 internal values for int64 operations
Mat input(inShape, matType);
cv::randu(input, low, low + 100);
fillRandom(input, matType, backend);
Net net;
LayerParams lp;
@ -491,7 +537,7 @@ TEST_P(Test_Blank_Int, random)
}
INSTANTIATE_TEST_CASE_P(/**/, Test_Blank_Int, Combine(
testing::Values(CV_32S, CV_64S),
testing::Values(CV_Bool, CV_8U, CV_8S, CV_32S, CV_64S),
dnnBackendsAndTargets()
));
@ -504,11 +550,8 @@ TEST_P(Test_Expand_Int, random)
Target target = get<1>(backend_target);
std::vector<int> inShape{2, 3, 1, 5};
int64_t low = matType == CV_64S ? 1000000000000000ll : 1000000000;
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
low = 1000000000; // Looks like OpenVINO uses int32 internal values for int64 operations
Mat input(inShape, matType);
cv::randu(input, low, low + 100);
fillRandom(input, matType, backend);
std::vector<int> outShape{2, 1, 4, 5};
Net net;
@ -557,7 +600,7 @@ TEST_P(Test_Expand_Int, random)
}
INSTANTIATE_TEST_CASE_P(/**/, Test_Expand_Int, Combine(
testing::Values(CV_32S, CV_64S),
testing::Values(CV_Bool, CV_8U, CV_8S, CV_32S, CV_64S),
dnnBackendsAndTargets()
));
@ -570,11 +613,8 @@ TEST_P(Test_Permute_Int, random)
Target target = get<1>(backend_target);
std::vector<int> inShape{2, 3, 4, 5};
int64_t low = matType == CV_64S ? 1000000000000000ll : 1000000000;
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
low = 1000000000; // Looks like OpenVINO uses int32 internal values for int64 operations
Mat input(inShape, matType);
cv::randu(input, low, low + 100);
fillRandom(input, matType, backend);
std::vector<int> order{0, 2, 3, 1};
Net net;
@ -623,7 +663,7 @@ TEST_P(Test_Permute_Int, random)
}
INSTANTIATE_TEST_CASE_P(/**/, Test_Permute_Int, Combine(
testing::Values(CV_32S, CV_64S),
testing::Values(CV_Bool, CV_8U, CV_8S, CV_32S, CV_64S),
dnnBackendsAndTargets()
));
@ -637,11 +677,8 @@ TEST_P(Test_GatherElements_Int, random)
Target target = get<1>(backend_target);
std::vector<int> inShape{2, 3, 4, 5};
int64_t low = matType == CV_64S ? 1000000000000000ll : 1000000000;
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
low = 1000000000; // Looks like OpenVINO uses int32 internal values for int64 operations
Mat input(inShape, matType);
cv::randu(input, low, low + 100);
fillRandom(input, matType, backend);
std::vector<int> indicesShape{2, 3, 10, 5};
Mat indicesMat(indicesShape, indicesType);
@ -697,7 +734,7 @@ TEST_P(Test_GatherElements_Int, random)
}
INSTANTIATE_TEST_CASE_P(/**/, Test_GatherElements_Int, Combine(
testing::Values(CV_32S, CV_64S),
testing::Values(CV_Bool, CV_8U, CV_8S, CV_32S, CV_64S),
testing::Values(CV_32S, CV_64S),
dnnBackendsAndTargets()
));
@ -712,11 +749,8 @@ TEST_P(Test_Gather_Int, random)
Target target = get<1>(backend_target);
std::vector<int> inShape{5, 1};
int64_t low = matType == CV_64S ? 1000000000000000ll : 1000000000;
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
low = 1000000000; // Looks like OpenVINO uses int32 internal values for int64 operations
Mat input(inShape, matType);
cv::randu(input, low, low + 100);
fillRandom(input, matType, backend);
std::vector<int> indices_shape = {1, 1};
Mat indicesMat = cv::Mat(indices_shape, indicesType, 0.0);
@ -752,7 +786,7 @@ TEST_P(Test_Gather_Int, random)
}
INSTANTIATE_TEST_CASE_P(/**/, Test_Gather_Int, Combine(
testing::Values(CV_32S, CV_64S),
testing::Values(CV_Bool, CV_8U, CV_8S, CV_32S, CV_64S),
testing::Values(CV_32S, CV_64S),
dnnBackendsAndTargets()
));
@ -768,7 +802,10 @@ TEST_P(Test_Cast_Int, random)
std::vector<int> inShape{2, 3, 4, 5};
Mat input(inShape, inMatType);
cv::randu(input, 200, 300);
if (inMatType == CV_Bool || outMatType == CV_Bool)
cv::randu(input, 0, 1.1);
else
cv::randu(input, 0, 100);
Mat outputRef;
input.convertTo(outputRef, outMatType);
@ -793,8 +830,8 @@ TEST_P(Test_Cast_Int, random)
}
INSTANTIATE_TEST_CASE_P(/**/, Test_Cast_Int, Combine(
testing::Values(CV_32S, CV_64S),
testing::Values(CV_32S, CV_64S),
testing::Values(CV_Bool, CV_8U, CV_8S, CV_32S, CV_64S),
testing::Values(CV_Bool, CV_8U, CV_8S, CV_32S, CV_64S),
dnnBackendsAndTargets()
));
@ -807,19 +844,17 @@ TEST_P(Test_Pad_Int, random)
Target target = get<1>(backend_target);
std::vector<int> inShape{2, 3, 4, 5};
int64_t low = matType == CV_64S ? 1000000000000000ll : 1000000000;
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
low = 1000000000; // Looks like OpenVINO uses int32 internal values for int64 operations
Mat input(inShape, matType);
cv::randu(input, low, low + 100);
fillRandom(input, matType, backend);
std::vector<int> paddings{0, 0, 0, 0, 1, 0, 0, 1};
int64_t padValue = matType == CV_Bool ? 1 : 25;
Net net;
LayerParams lp;
lp.type = "Padding";
lp.name = "testLayer";
lp.set("paddings", DictValue::arrayInt<int*>(&paddings[0], paddings.size()));
lp.set<double>("value", 25);
lp.set<double>("value", padValue);
net.addLayerToPrev(lp.name, lp.type, lp);
@ -856,7 +891,7 @@ TEST_P(Test_Pad_Int, random)
inIndices[3] = i3;
if (i2 < 1 || i3 >= input.size[3])
{
EXPECT_EQ(getValueAt(re, reIndices.data()), 25l);
EXPECT_EQ(getValueAt(re, reIndices.data()), padValue);
}
else
{
@ -869,7 +904,7 @@ TEST_P(Test_Pad_Int, random)
}
INSTANTIATE_TEST_CASE_P(/**/, Test_Pad_Int, Combine(
testing::Values(CV_32S, CV_64S),
testing::Values(CV_Bool, CV_8U, CV_8S, CV_32S, CV_64S),
dnnBackendsAndTargets()
));
@ -884,11 +919,8 @@ TEST_P(Test_Slice_Int, random)
std::vector<int> inputShape{1, 16, 6, 8};
std::vector<int> begin{0, 4, 0, 0};
std::vector<int> end{1, 8, 6, 8};
int64_t low = matType == CV_64S ? 1000000000000000ll : 1000000000;
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
low = 1000000000; // Looks like OpenVINO uses int32 internal values for int64 operations
Mat input(inputShape, matType);
cv::randu(input, low, low + 100);
fillRandom(input, matType, backend);
std::vector<Range> range(4);
for (int i = 0; i < 4; ++i)
@ -907,12 +939,18 @@ TEST_P(Test_Slice_Int, random)
net.setPreferableTarget(target);
Mat out = net.forward();
EXPECT_GT(cv::norm(out, NORM_INF), 0);
normAssert(out, input(range));
Mat gt = input(range);
EXPECT_EQ(out.size.dims(), 4);
EXPECT_EQ(out.size[0], gt.size[0]);
EXPECT_EQ(out.size[1], gt.size[1]);
EXPECT_EQ(out.size[2], gt.size[2]);
EXPECT_EQ(out.size[3], gt.size[3]);
for (int i = 0; i < out.total(); ++i)
EXPECT_EQ(getValueAt(out, i), getValueAt(gt, i));
}
INSTANTIATE_TEST_CASE_P(/**/, Test_Slice_Int, Combine(
testing::Values(CV_32S, CV_64S),
testing::Values(CV_Bool, CV_8U, CV_8S, CV_32S, CV_64S),
dnnBackendsAndTargets()
));
@ -926,11 +964,8 @@ TEST_P(Test_Reshape_Int, random)
std::vector<int> inShape{2, 3, 4, 5};
std::vector<int> outShape{2, 3, 2, 10};
int64_t low = matType == CV_64S ? 1000000000000000ll : 1000000000;
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
low = 1000000000; // Looks like OpenVINO uses int32 internal values for int64 operations
Mat input(inShape, matType);
cv::randu(input, low, low + 100);
fillRandom(input, matType, backend);
Net net;
LayerParams lp;
@ -953,17 +988,11 @@ TEST_P(Test_Reshape_Int, random)
EXPECT_EQ(re.size[3], outShape[3]);
for (int i = 0; i < input.total(); ++i)
{
if (matType == CV_32S) {
EXPECT_EQ(re.ptr<int32_t>()[i], input.ptr<int32_t>()[i]);
} else {
EXPECT_EQ(re.ptr<int64_t>()[i], input.ptr<int64_t>()[i]);
}
}
EXPECT_EQ(getValueAt(re, i), getValueAt(input, i));
}
INSTANTIATE_TEST_CASE_P(/**/, Test_Reshape_Int, Combine(
testing::Values(CV_32S, CV_64S),
testing::Values(CV_Bool, CV_8U, CV_8S, CV_32S, CV_64S),
dnnBackendsAndTargets()
));
@ -976,11 +1005,8 @@ TEST_P(Test_Flatten_Int, random)
Target target = get<1>(backend_target);
std::vector<int> inShape{2, 3, 4, 5};
int64_t low = matType == CV_64S ? 1000000000000000ll : 1000000000;
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
low = 1000000000; // Looks like OpenVINO uses int32 internal values for int64 operations
Mat input(inShape, matType);
cv::randu(input, low, low + 100);
fillRandom(input, matType, backend);
Net net;
LayerParams lp;
@ -1001,17 +1027,11 @@ TEST_P(Test_Flatten_Int, random)
EXPECT_EQ(re.size[1], inShape[1] * inShape[2] * inShape[3]);
for (int i = 0; i < input.total(); ++i)
{
if (matType == CV_32S) {
EXPECT_EQ(re.ptr<int32_t>()[i], input.ptr<int32_t>()[i]);
} else {
EXPECT_EQ(re.ptr<int64_t>()[i], input.ptr<int64_t>()[i]);
}
}
EXPECT_EQ(getValueAt(re, i), getValueAt(input, i));
}
INSTANTIATE_TEST_CASE_P(/**/, Test_Flatten_Int, Combine(
testing::Values(CV_32S, CV_64S),
testing::Values(CV_Bool, CV_8U, CV_8S, CV_32S, CV_64S),
dnnBackendsAndTargets()
));
@ -1024,11 +1044,8 @@ TEST_P(Test_Tile_Int, random)
Target target = get<1>(backend_target);
std::vector<int> inShape{2, 3, 4, 5};
int64_t low = matType == CV_64S ? 1000000000000000ll : 1000000000;
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
low = 1000000000; // Looks like OpenVINO uses int32 internal values for int64 operations
Mat input(inShape, matType);
cv::randu(input, low, low + 100);
fillRandom(input, matType, backend);
std::vector<int> repeats{1, 1, 2, 3};
Net net;
@ -1077,7 +1094,7 @@ TEST_P(Test_Tile_Int, random)
}
INSTANTIATE_TEST_CASE_P(/**/, Test_Tile_Int, Combine(
testing::Values(CV_32S, CV_64S),
testing::Values(CV_Bool, CV_8U, CV_8S, CV_32S, CV_64S),
dnnBackendsAndTargets()
));
@ -1093,13 +1110,21 @@ TEST_P(Test_Reduce_Int, random)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // There is a problem with OpenVINO and custom int64 layers. After model compilation the output tensor type changes from int64 to int32
std::vector<int> inShape{5, 4, 3, 2};
int64_t low = matType == CV_64S ? 1000000000000000ll : 100000000;
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
low = 100000000; // Looks like OpenVINO uses int32 internal values for int64 operations
Mat input(inShape, matType);
cv::randu(input, low, low + 100);
std::vector<int> axes{1};
if (matType == CV_64S && backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
cv::randu(input, 100000000, 100000100); // Looks like OpenVINO uses int32 internal values for int64 operations
else if (matType == CV_64S)
cv::randu(input, 1000000000000000ll, 1000000000000100ll);
else if (matType == CV_32S)
cv::randu(input, 100000000, 100000100);
else if (matType == CV_8S)
cv::randu(input, -25, 25);
else if (matType == CV_8U)
cv::randu(input, 0, 50);
else
CV_Error(Error::BadDepth, "Unsupported type");
std::vector<int> axes{1};
Net net;
LayerParams lp;
@ -1162,11 +1187,20 @@ TEST_P(Test_Reduce_Int, two_axes)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // There is a problem with OpenVINO and custom int64 layers. After model compilation the output tensor type changes from int64 to int32
std::vector<int> inShape{5, 4, 3, 2};
int64_t low = matType == CV_64S ? 100000000000000ll : 10000000;
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
low = 10000000; // Looks like OpenVINO uses int32 internal values for int64 operations
Mat input(inShape, matType);
cv::randu(input, low, low + 100);
if (matType == CV_64S && backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
cv::randu(input, 100000000, 100000100); // Looks like OpenVINO uses int32 internal values for int64 operations
else if (matType == CV_64S)
cv::randu(input, 1000000000000000ll, 1000000000000100ll);
else if (matType == CV_32S)
cv::randu(input, 100000000, 100000100);
else if (matType == CV_8S)
cv::randu(input, -15, 15);
else if (matType == CV_8U)
cv::randu(input, 0, 30);
else
CV_Error(Error::BadDepth, "Unsupported type");
std::vector<int> axes{1, 3};
Net net;
@ -1217,7 +1251,7 @@ TEST_P(Test_Reduce_Int, two_axes)
}
INSTANTIATE_TEST_CASE_P(/**/, Test_Reduce_Int, Combine(
testing::Values(CV_32S, CV_64S),
testing::Values(CV_8U, CV_8S, CV_32S, CV_64S),
dnnBackendsAndTargets()
));

Loading…
Cancel
Save