Added int support to flatten, permute, reshape, slice layers (#25236)

Co-authored-by: Alexander Lyulkov <alexander.lyulkov@opencv.ai>
pull/25263/head
alexlyulkov 9 months ago committed by GitHub
parent aa5ea340f7
commit f2cf3c8890
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 4
      modules/dnn/src/cuda/permute.cu
  2. 2
      modules/dnn/src/cuda/slice.cu
  3. 21
      modules/dnn/src/layers/flatten_layer.cpp
  4. 4
      modules/dnn/src/layers/permute_layer.cpp
  5. 4
      modules/dnn/src/layers/reshape_layer.cpp
  6. 4
      modules/dnn/src/layers/slice_layer.cpp
  7. 139
      modules/dnn/test/test_int.cpp

@ -107,6 +107,8 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels {
template void transpose(const Stream&, Span<__half>, View<__half>, std::size_t, std::size_t);
template void transpose(const Stream&, Span<float>, View<float>, std::size_t, std::size_t);
template void transpose(const Stream&, Span<int32_t>, View<int32_t>, std::size_t, std::size_t);
template void transpose(const Stream&, Span<int64_t>, View<int64_t>, std::size_t, std::size_t);
template <class T, std::size_t Rank> static
void launch_permute_kernel(
@ -284,5 +286,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels {
template void permute(const Stream&, TensorSpan<__half>, TensorView<__half>, std::vector<std::size_t>);
#endif
template void permute(const Stream&, TensorSpan<float>, TensorView<float>, std::vector<std::size_t>);
template void permute(const Stream&, TensorSpan<int32_t>, TensorView<int32_t>, std::vector<std::size_t>);
template void permute(const Stream&, TensorSpan<int64_t>, TensorView<int64_t>, std::vector<std::size_t>);
}}}} /* namespace cv::dnn::cuda4dnn::kernels */

@ -199,5 +199,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels {
template void slice(const Stream&, TensorSpan<__half>, TensorView<__half>, std::vector<std::size_t>);
#endif
template void slice(const Stream&, TensorSpan<float>, TensorView<float>, std::vector<std::size_t>);
template void slice(const Stream&, TensorSpan<int32_t>, TensorView<int32_t>, std::vector<std::size_t>);
template void slice(const Stream&, TensorSpan<int64_t>, TensorView<int64_t>, std::vector<std::size_t>);
}}}} /* namespace cv::dnn::cuda4dnn::kernels */

@ -118,6 +118,25 @@ public:
return true;
}
void getTypes(const std::vector<MatType>& inputs,
const int requiredOutputs,
const int requiredInternals,
std::vector<MatType>& outputs,
std::vector<MatType>& internals) const CV_OVERRIDE
{
CV_Assert(inputs.size());
for (auto input : inputs)
{
if (preferableTarget == DNN_TARGET_OPENCL_FP16)
CV_CheckType(input, input == CV_16F || input == CV_32S || input == CV_64S, "");
else
CV_CheckType(input, input == CV_32F || input == CV_32S || input == CV_64S, "");
}
outputs.assign(requiredOutputs, inputs[0]);
}
void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays) CV_OVERRIDE
{
std::vector<Mat> inputs;
@ -240,7 +259,7 @@ public:
) override
{
auto context = reinterpret_cast<csl::CSLContext*>(context_);
return make_cuda_node<cuda4dnn::ReshapeOp>(preferableTarget, std::move(context->stream));
return make_cuda_node_with_type<cuda4dnn::ReshapeOp>(preferableTarget, inputs[0]->getHostMatDepth(), std::move(context->stream));
}
#endif

@ -188,7 +188,7 @@ public:
for (auto input : inputs)
{
if (preferableTarget == DNN_TARGET_CUDA_FP16 || preferableTarget == DNN_TARGET_CUDA)
CV_CheckTypeEQ(input, CV_32F, "Unsupported type");
CV_CheckType(input, input == CV_32F || input == CV_32S || input == CV_64S, "");
else if (preferableTarget == DNN_TARGET_OPENCL_FP16)
CV_CheckType(input, input == CV_16F || input == CV_8S || input == CV_32S || input == CV_64S, "");
else
@ -521,7 +521,7 @@ public:
) override
{
auto context = reinterpret_cast<csl::CSLContext*>(context_);
return make_cuda_node<cuda4dnn::PermuteOp>(preferableTarget, std::move(context->stream), _order);
return make_cuda_node_with_type<cuda4dnn::PermuteOp>(preferableTarget, inputs[0]->getHostMatDepth(), std::move(context->stream), _order);
}
#endif

@ -269,7 +269,7 @@ public:
for (auto input : inputs)
{
if (preferableTarget == DNN_TARGET_CUDA_FP16 || preferableTarget == DNN_TARGET_CUDA)
CV_CheckTypeEQ(input, CV_32F, "Unsupported type");
CV_CheckType(input, input == CV_32F || input == CV_32S || input == CV_64S, "");
else if (preferableTarget == DNN_TARGET_OPENCL_FP16)
CV_CheckType(input, input == CV_16F || input == CV_8S || input == CV_32S || input == CV_64S, "");
else
@ -417,7 +417,7 @@ public:
) override
{
auto context = reinterpret_cast<csl::CSLContext*>(context_);
return make_cuda_node<cuda4dnn::ReshapeOp>(preferableTarget, std::move(context->stream));
return make_cuda_node_with_type<cuda4dnn::ReshapeOp>(preferableTarget, inputs[0]->getHostMatDepth(), std::move(context->stream));
}
#endif

@ -288,7 +288,7 @@ public:
for (auto input : inputs)
{
if (preferableTarget == DNN_TARGET_CUDA_FP16 || preferableTarget == DNN_TARGET_CUDA)
CV_CheckEQ(input, CV_32F, "Unsupported type");
CV_CheckType(input, input == CV_32F || input == CV_32S || input == CV_64S, "");
else if (preferableTarget == DNN_TARGET_OPENCL_FP16)
CV_CheckType(input, input == CV_16F || input == CV_8S || input == CV_32S || input == CV_64S, "");
else
@ -827,7 +827,7 @@ public:
offsets.push_back(std::move(offsets_i));
}
return make_cuda_node<cuda4dnn::SliceOp>(preferableTarget, std::move(context->stream), std::move(offsets));
return make_cuda_node_with_type<cuda4dnn::SliceOp>(preferableTarget, inputs[0]->getHostMatDepth(), std::move(context->stream), std::move(offsets));
}
#endif

@ -137,9 +137,6 @@ TEST_P(Test_Permute_Int, random)
Backend backend = get<0>(backend_target);
Target target = get<1>(backend_target);
if(backend == DNN_BACKEND_CUDA)
applyTestTag(CV_TEST_TAG_DNN_SKIP_CUDA);
std::vector<int> inShape{2, 3, 4, 5};
int64_t low = matType == CV_64S ? 1000000000000000ll : 1000000000;
Mat input(inShape, matType);
@ -363,4 +360,140 @@ INSTANTIATE_TEST_CASE_P(/**/, Test_Cast_Int, Combine(
dnnBackendsAndTargets()
));
typedef testing::TestWithParam<tuple<int, tuple<Backend, Target> > > Test_Slice_Int;
TEST_P(Test_Slice_Int, random)
{
int matType = get<0>(GetParam());
tuple<Backend, Target> backend_target= get<1>(GetParam());
Backend backend = get<0>(backend_target);
Target target = get<1>(backend_target);
std::vector<int> inputShape{1, 16, 6, 8};
std::vector<int> begin{0, 4, 0, 0};
std::vector<int> end{1, 8, 6, 8};
int64_t low = matType == CV_64S ? 1000000000000000ll : 1000000000;
Mat input(inputShape, matType);
cv::randu(input, low, low + 100);
std::vector<Range> range(4);
for (int i = 0; i < 4; ++i)
range[i] = Range(begin[i], end[i]);
Net net;
LayerParams lp;
lp.type = "Slice";
lp.name = "testLayer";
lp.set("begin", DictValue::arrayInt<int*>(&(begin[0]), 4));
lp.set("end", DictValue::arrayInt<int*>(&(end[0]), 4));
net.addLayerToPrev(lp.name, lp.type, lp);
net.setInput(input);
net.setPreferableBackend(backend);
net.setPreferableTarget(target);
Mat out = net.forward();
EXPECT_GT(cv::norm(out, NORM_INF), 0);
normAssert(out, input(range));
}
INSTANTIATE_TEST_CASE_P(/**/, Test_Slice_Int, Combine(
testing::Values(CV_32S, CV_64S),
dnnBackendsAndTargets()
));
typedef testing::TestWithParam<tuple<int, tuple<Backend, Target> > > Test_Reshape_Int;
TEST_P(Test_Reshape_Int, random)
{
int matType = get<0>(GetParam());
tuple<Backend, Target> backend_target= get<1>(GetParam());
Backend backend = get<0>(backend_target);
Target target = get<1>(backend_target);
std::vector<int> inShape{2, 3, 4, 5};
std::vector<int> outShape{2, 3, 2, 10};
int64_t low = matType == CV_64S ? 1000000000000000ll : 1000000000;
Mat input(inShape, matType);
cv::randu(input, low, low + 100);
Net net;
LayerParams lp;
lp.type = "Reshape";
lp.name = "testLayer";
lp.set("dim", DictValue::arrayInt<int*>(&outShape[0], outShape.size()));
net.addLayerToPrev(lp.name, lp.type, lp);
net.setInput(input);
net.setPreferableBackend(backend);
net.setPreferableTarget(target);
Mat re;
re = net.forward();
EXPECT_EQ(re.depth(), matType);
EXPECT_EQ(re.size.dims(), 4);
EXPECT_EQ(re.size[0], outShape[0]);
EXPECT_EQ(re.size[1], outShape[1]);
EXPECT_EQ(re.size[2], outShape[2]);
EXPECT_EQ(re.size[3], outShape[3]);
for (int i = 0; i < input.total(); ++i)
{
if (matType == CV_32S) {
EXPECT_EQ(re.ptr<int32_t>()[i], input.ptr<int32_t>()[i]);
} else {
EXPECT_EQ(re.ptr<int64_t>()[i], input.ptr<int64_t>()[i]);
}
}
}
INSTANTIATE_TEST_CASE_P(/**/, Test_Reshape_Int, Combine(
testing::Values(CV_32S, CV_64S),
dnnBackendsAndTargets()
));
typedef testing::TestWithParam<tuple<int, tuple<Backend, Target> > > Test_Flatten_Int;
TEST_P(Test_Flatten_Int, random)
{
int matType = get<0>(GetParam());
tuple<Backend, Target> backend_target= get<1>(GetParam());
Backend backend = get<0>(backend_target);
Target target = get<1>(backend_target);
std::vector<int> inShape{2, 3, 4, 5};
int64_t low = matType == CV_64S ? 1000000000000000ll : 1000000000;
Mat input(inShape, matType);
cv::randu(input, low, low + 100);
Net net;
LayerParams lp;
lp.type = "Flatten";
lp.name = "testLayer";
lp.set("axis", 1);
net.addLayerToPrev(lp.name, lp.type, lp);
net.setInput(input);
net.setPreferableBackend(backend);
net.setPreferableTarget(target);
Mat re;
re = net.forward();
EXPECT_EQ(re.depth(), matType);
EXPECT_EQ(re.size.dims(), 2);
EXPECT_EQ(re.size[0], inShape[0]);
EXPECT_EQ(re.size[1], inShape[1] * inShape[2] * inShape[3]);
for (int i = 0; i < input.total(); ++i)
{
if (matType == CV_32S) {
EXPECT_EQ(re.ptr<int32_t>()[i], input.ptr<int32_t>()[i]);
} else {
EXPECT_EQ(re.ptr<int64_t>()[i], input.ptr<int64_t>()[i]);
}
}
}
INSTANTIATE_TEST_CASE_P(/**/, Test_Flatten_Int, Combine(
testing::Values(CV_32S, CV_64S),
dnnBackendsAndTargets()
));
}} // namespace

Loading…
Cancel
Save