mirror of https://github.com/opencv/opencv.git
Merge pull request #17363 from YashasSamaga:cuda4dnn-eltwise-fusion2
cuda4dnn(conv): fuse eltwise with convolutions * fuse eltwise with convolutions * manually rebase to avoid bad git mergepull/17791/head
parent
44d473fba0
commit
d0e6d2438c
22 changed files with 1609 additions and 273 deletions
@ -0,0 +1,121 @@ |
||||
// This file is part of OpenCV project. |
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory |
||||
// of this distribution and at http://opencv.org/license.html. |
||||
|
||||
#include <cuda_runtime.h> |
||||
#include <cuda_fp16.h> |
||||
|
||||
#include "functors.hpp" |
||||
#include "vector_traits.hpp" |
||||
#include "grid_stride_range.hpp" |
||||
#include "execution.hpp" |
||||
|
||||
#include "../cuda4dnn/csl/stream.hpp" |
||||
#include "../cuda4dnn/csl/span.hpp" |
||||
|
||||
using namespace cv::dnn::cuda4dnn::csl; |
||||
using namespace cv::dnn::cuda4dnn::csl::device; |
||||
|
||||
namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels { |
||||
|
||||
namespace raw { |
||||
|
||||
template <class T, class ActivationOp, class EltwiseOp, std::size_t N> |
||||
__global__ void generic_op_eltwise_op_inplace_vec(Span<T> inplace_output, View<T> eltwise, const typename ActivationOp::Params act_params, const typename EltwiseOp::Params eltwise_params) { |
||||
using vector_type = get_vector_type_t<T, N>; |
||||
|
||||
auto inplace_output_vPtr = vector_type::get_pointer(inplace_output.data()); |
||||
auto eltwise_vPtr = vector_type::get_pointer(eltwise.data()); |
||||
|
||||
ActivationOp activation_op(act_params); |
||||
EltwiseOp eltwise_op(eltwise_params); |
||||
|
||||
for (auto i : grid_stride_range(inplace_output.size() / vector_type::size())) { |
||||
vector_type output_vec, eltwise_vec; |
||||
v_load(output_vec, inplace_output_vPtr[i]); |
||||
v_load(eltwise_vec, eltwise_vPtr[i]); |
||||
for(int j = 0; j < output_vec.size(); j++) |
||||
output_vec.data[j] = eltwise_op(activation_op(output_vec.data[j]), eltwise_vec.data[j]); |
||||
v_store(inplace_output_vPtr[i], output_vec); |
||||
} |
||||
} |
||||
} |
||||
|
||||
template <class T, class ActivationOp, class EltwiseOp, std::size_t N> static |
||||
void launch_vectorized_generic_op_eltwise_op_inplace(const Stream& stream, Span<T> inplace_output, View<T> eltwise, const typename ActivationOp::Params& act_params, const typename EltwiseOp::Params& eltwise_params) { |
||||
CV_Assert(is_fully_aligned<T>(inplace_output, N)); |
||||
CV_Assert(is_fully_aligned<T>(eltwise, N)); |
||||
|
||||
auto kernel = raw::generic_op_eltwise_op_inplace_vec<T, ActivationOp, EltwiseOp, N>; |
||||
auto policy = make_policy(kernel, inplace_output.size() / N, 0, stream); |
||||
launch_kernel(kernel, policy, inplace_output, eltwise, act_params, eltwise_params); |
||||
} |
||||
|
||||
template <class T, class ActivationOp, class EltwiseOp> static |
||||
void generic_op_eltwise_op_inplace(const Stream& stream, Span<T> inplace_output, View<T> eltwise, const typename ActivationOp::Params& act_params = {}, const typename EltwiseOp::Params& eltwise_params = {}) { |
||||
CV_Assert(inplace_output.size() == eltwise.size()); |
||||
|
||||
if (is_fully_aligned<T>(inplace_output, 4) && is_fully_aligned<T>(eltwise, 4)) { |
||||
launch_vectorized_generic_op_eltwise_op_inplace<T, ActivationOp, EltwiseOp, 4>(stream, inplace_output, eltwise, act_params, eltwise_params); |
||||
} else if (is_fully_aligned<T>(inplace_output, 2) && is_fully_aligned<T>(eltwise, 2)) { |
||||
launch_vectorized_generic_op_eltwise_op_inplace<T, ActivationOp, EltwiseOp, 2>(stream, inplace_output, eltwise, act_params, eltwise_params); |
||||
} else { |
||||
launch_vectorized_generic_op_eltwise_op_inplace<T, ActivationOp, EltwiseOp, 1>(stream, inplace_output, eltwise, act_params, eltwise_params); |
||||
} |
||||
} |
||||
|
||||
template <class T> |
||||
void relu_eltwise_sum_2_inplace(const Stream& stream, Span<T> inplace_output, View<T> eltwise, T slope) { |
||||
generic_op_eltwise_op_inplace<T, ReLUFunctor<T>, SumFunctor<T>>(stream, inplace_output, eltwise, {slope}); |
||||
} |
||||
|
||||
template <class T> |
||||
void clipped_relu_eltwise_sum_2_inplace(const Stream& stream, Span<T> inplace_output, View<T> eltwise, T floor, T ceiling) { |
||||
CV_Assert(static_cast<double>(floor) <= static_cast<double>(ceiling)); |
||||
generic_op_eltwise_op_inplace<T, ClippedReLUFunctor<T>, SumFunctor<T>>(stream, inplace_output, eltwise, {floor, ceiling}); |
||||
} |
||||
|
||||
template <class T> |
||||
void tanh_eltwise_sum_2_inplace(const Stream& stream, Span<T> inplace_output, View<T> eltwise) { |
||||
generic_op_eltwise_op_inplace<T, TanHFunctor<T>, SumFunctor<T>>(stream, inplace_output, eltwise); |
||||
} |
||||
|
||||
template <class T> |
||||
void swish_eltwise_sum_2_inplace(const Stream& stream, Span<T> inplace_output, View<T> eltwise) { |
||||
generic_op_eltwise_op_inplace<T, SwishFunctor<T>, SumFunctor<T>>(stream, inplace_output, eltwise); |
||||
} |
||||
|
||||
template <class T> |
||||
void mish_eltwise_sum_2_inplace(const Stream& stream, Span<T> inplace_output, View<T> eltwise) { |
||||
generic_op_eltwise_op_inplace<T, MishFunctor<T>, SumFunctor<T>>(stream, inplace_output, eltwise); |
||||
} |
||||
|
||||
template <class T> |
||||
void sigmoid_eltwise_sum_2_inplace(const Stream& stream, Span<T> inplace_output, View<T> eltwise) { |
||||
generic_op_eltwise_op_inplace<T, SigmoidFunctor<T>, SumFunctor<T>>(stream, inplace_output, eltwise); |
||||
} |
||||
|
||||
template <class T> |
||||
void power_eltwise_sum_2_inplace(const Stream& stream, Span<T> inplace_output, View<T> eltwise, T exp, T scale, T shift) { |
||||
generic_op_eltwise_op_inplace<T, PowerFunctor<T>, SumFunctor<T>>(stream, inplace_output, eltwise, {exp, scale, shift}); |
||||
} |
||||
|
||||
#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530) |
||||
template void relu_eltwise_sum_2_inplace<__half>(const Stream&, Span<__half>, View<__half>, __half); |
||||
template void clipped_relu_eltwise_sum_2_inplace<__half>(const Stream&, Span<__half>, View<__half>, __half, __half); |
||||
template void tanh_eltwise_sum_2_inplace<__half>(const Stream&, Span<__half>, View<__half>); |
||||
template void swish_eltwise_sum_2_inplace<__half>(const Stream&, Span<__half>, View<__half>); |
||||
template void mish_eltwise_sum_2_inplace<__half>(const Stream&, Span<__half>, View<__half>); |
||||
template void sigmoid_eltwise_sum_2_inplace<__half>(const Stream&, Span<__half>, View<__half>); |
||||
template void power_eltwise_sum_2_inplace<__half>(const Stream&, Span<__half>, View<__half>, __half, __half, __half); |
||||
#endif |
||||
|
||||
template void relu_eltwise_sum_2_inplace<float>(const Stream&, Span<float>, View<float>, float); |
||||
template void clipped_relu_eltwise_sum_2_inplace<float>(const Stream&, Span<float>, View<float>, float, float); |
||||
template void tanh_eltwise_sum_2_inplace<float>(const Stream&, Span<float>, View<float>); |
||||
template void swish_eltwise_sum_2_inplace<float>(const Stream&, Span<float>, View<float>); |
||||
template void mish_eltwise_sum_2_inplace<float>(const Stream&, Span<float>, View<float>); |
||||
template void sigmoid_eltwise_sum_2_inplace<float>(const Stream&, Span<float>, View<float>); |
||||
template void power_eltwise_sum_2_inplace<float>(const Stream&, Span<float>, View<float>, float, float, float); |
||||
|
||||
}}}} /* namespace cv::dnn::cuda4dnn::kernels */ |
@ -0,0 +1,125 @@ |
||||
// This file is part of OpenCV project. |
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory |
||||
// of this distribution and at http://opencv.org/license.html. |
||||
|
||||
#include <cuda_runtime.h> |
||||
#include <cuda_fp16.h> |
||||
|
||||
#include "functors.hpp" |
||||
#include "types.hpp" |
||||
#include "vector_traits.hpp" |
||||
#include "grid_stride_range.hpp" |
||||
#include "execution.hpp" |
||||
|
||||
#include "../cuda4dnn/csl/stream.hpp" |
||||
#include "../cuda4dnn/csl/span.hpp" |
||||
|
||||
using namespace cv::dnn::cuda4dnn::csl; |
||||
using namespace cv::dnn::cuda4dnn::csl::device; |
||||
|
||||
namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels { |
||||
|
||||
namespace raw { |
||||
|
||||
template <class T, class ActivationOp, class EltwiseOp, std::size_t N> |
||||
__global__ void biasN_generic_op_eltwise_op_inplace_vec(Span<T> inplace_output, size_type inner_size, View<T> bias, View<T> eltwise, const typename ActivationOp::Params act_params, const typename EltwiseOp::Params eltwise_params) { |
||||
using vector_type = get_vector_type_t<T, N>; |
||||
|
||||
auto inplace_output_vPtr = vector_type::get_pointer(inplace_output.data()); |
||||
auto eltwise_vPtr = vector_type::get_pointer(eltwise.data()); |
||||
|
||||
ActivationOp activation_op(act_params); |
||||
EltwiseOp eltwise_op(eltwise_params); |
||||
|
||||
for (auto i : grid_stride_range(inplace_output.size() / vector_type::size())) { |
||||
const index_type bias_idx = (i / inner_size) % bias.size(); |
||||
|
||||
vector_type output_vec, eltwise_vec; |
||||
v_load(output_vec, inplace_output_vPtr[i]); |
||||
v_load(eltwise_vec, eltwise_vPtr[i]); |
||||
for(int j = 0; j < output_vec.size(); j++) |
||||
output_vec.data[j] = eltwise_op(activation_op(output_vec.data[j] + bias[bias_idx]), eltwise_vec.data[j]); |
||||
v_store(inplace_output_vPtr[i], output_vec); |
||||
} |
||||
} |
||||
} |
||||
|
||||
template <class T, class ActivationOp, class EltwiseOp, std::size_t N> static |
||||
void launch_vectorized_biasN_generic_op_eltwise_op_inplace(const Stream& stream, Span<T> inplace_output, std::size_t inner_size, View<T> bias, View<T> eltwise, const typename ActivationOp::Params& act_params, const typename EltwiseOp::Params& eltwise_params) { |
||||
CV_Assert(is_fully_aligned<T>(inplace_output, N)); |
||||
CV_Assert(is_fully_aligned<T>(eltwise, N)); |
||||
CV_Assert(inner_size % N == 0); |
||||
|
||||
auto kernel = raw::biasN_generic_op_eltwise_op_inplace_vec<T, ActivationOp, EltwiseOp, N>; |
||||
auto policy = make_policy(kernel, inplace_output.size() / N, 0, stream); |
||||
launch_kernel(kernel, policy, inplace_output, inner_size / N, bias, eltwise, act_params, eltwise_params); |
||||
} |
||||
|
||||
template <class T, class ActivationOp, class EltwiseOp> static |
||||
void biasN_generic_op_eltwise_op_inplace(const Stream& stream, Span<T> inplace_output, std::size_t inner_size, View<T> bias, View<T> eltwise, const typename ActivationOp::Params& act_params = {}, const typename EltwiseOp::Params& eltwise_params = {}) { |
||||
CV_Assert(inplace_output.size() == eltwise.size()); |
||||
|
||||
if (is_fully_aligned<T>(inplace_output, 4) && is_fully_aligned<T>(eltwise, 4) && inner_size % 4 == 0) { |
||||
launch_vectorized_biasN_generic_op_eltwise_op_inplace<T, ActivationOp, EltwiseOp, 4>(stream, inplace_output, inner_size, bias, eltwise, act_params, eltwise_params); |
||||
} else if (is_fully_aligned<T>(inplace_output, 2) && is_fully_aligned<T>(eltwise, 2) && inner_size % 2 == 0) { |
||||
launch_vectorized_biasN_generic_op_eltwise_op_inplace<T, ActivationOp, EltwiseOp, 2>(stream, inplace_output, inner_size, bias, eltwise, act_params, eltwise_params); |
||||
} else { |
||||
launch_vectorized_biasN_generic_op_eltwise_op_inplace<T, ActivationOp, EltwiseOp, 1>(stream, inplace_output, inner_size, bias, eltwise, act_params, eltwise_params); |
||||
} |
||||
} |
||||
|
||||
template <class T> |
||||
void biasN_relu_eltwise_sum_2_inplace(const Stream& stream, Span<T> inplace_output, std::size_t inner_size, View<T> bias, View<T> eltwise, T slope) { |
||||
biasN_generic_op_eltwise_op_inplace<T, ReLUFunctor<T>, SumFunctor<T>>(stream, inplace_output, inner_size, bias, eltwise, {slope}); |
||||
} |
||||
|
||||
template <class T> |
||||
void biasN_clipped_relu_eltwise_sum_2_inplace(const Stream& stream, Span<T> inplace_output, std::size_t inner_size, View<T> bias, View<T> eltwise, T floor, T ceiling) { |
||||
CV_Assert(static_cast<double>(floor) <= static_cast<double>(ceiling)); |
||||
biasN_generic_op_eltwise_op_inplace<T, ClippedReLUFunctor<T>, SumFunctor<T>>(stream, inplace_output, inner_size, bias, eltwise, {floor, ceiling}); |
||||
} |
||||
|
||||
template <class T> |
||||
void biasN_tanh_eltwise_sum_2_inplace(const Stream& stream, Span<T> inplace_output, std::size_t inner_size, View<T> bias, View<T> eltwise) { |
||||
biasN_generic_op_eltwise_op_inplace<T, TanHFunctor<T>, SumFunctor<T>>(stream, inplace_output, inner_size, bias, eltwise); |
||||
} |
||||
|
||||
template <class T> |
||||
void biasN_swish_eltwise_sum_2_inplace(const Stream& stream, Span<T> inplace_output, std::size_t inner_size, View<T> bias, View<T> eltwise) { |
||||
biasN_generic_op_eltwise_op_inplace<T, SwishFunctor<T>, SumFunctor<T>>(stream, inplace_output, inner_size, bias, eltwise); |
||||
} |
||||
|
||||
template <class T> |
||||
void biasN_mish_eltwise_sum_2_inplace(const Stream& stream, Span<T> inplace_output, std::size_t inner_size, View<T> bias, View<T> eltwise) { |
||||
biasN_generic_op_eltwise_op_inplace<T, MishFunctor<T>, SumFunctor<T>>(stream, inplace_output, inner_size, bias, eltwise); |
||||
} |
||||
|
||||
template <class T> |
||||
void biasN_sigmoid_eltwise_sum_2_inplace(const Stream& stream, Span<T> inplace_output, std::size_t inner_size, View<T> bias, View<T> eltwise) { |
||||
biasN_generic_op_eltwise_op_inplace<T, SigmoidFunctor<T>, SumFunctor<T>>(stream, inplace_output, inner_size, bias, eltwise); |
||||
} |
||||
|
||||
template <class T> |
||||
void biasN_power_eltwise_sum_2_inplace(const Stream& stream, Span<T> inplace_output, std::size_t inner_size, View<T> bias, View<T> eltwise, T exp, T scale, T shift) { |
||||
biasN_generic_op_eltwise_op_inplace<T, PowerFunctor<T>, SumFunctor<T>>(stream, inplace_output, inner_size, bias, eltwise, {exp, scale, shift}); |
||||
} |
||||
|
||||
#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530) |
||||
template void biasN_relu_eltwise_sum_2_inplace<__half>(const Stream&, Span<__half>, std::size_t, View<__half>, View<__half>, __half); |
||||
template void biasN_clipped_relu_eltwise_sum_2_inplace<__half>(const Stream&, Span<__half>, std::size_t, View<__half>, View<__half>, __half, __half); |
||||
template void biasN_tanh_eltwise_sum_2_inplace<__half>(const Stream&, Span<__half>, std::size_t, View<__half>, View<__half>); |
||||
template void biasN_swish_eltwise_sum_2_inplace<__half>(const Stream&, Span<__half>, std::size_t, View<__half>, View<__half>); |
||||
template void biasN_mish_eltwise_sum_2_inplace<__half>(const Stream&, Span<__half>, std::size_t, View<__half>, View<__half>); |
||||
template void biasN_sigmoid_eltwise_sum_2_inplace<__half>(const Stream&, Span<__half>, std::size_t, View<__half>, View<__half>); |
||||
template void biasN_power_eltwise_sum_2_inplace<__half>(const Stream&, Span<__half>, std::size_t, View<__half>, View<__half>, __half, __half, __half); |
||||
#endif |
||||
|
||||
template void biasN_relu_eltwise_sum_2_inplace<float>(const Stream&, Span<float>, std::size_t, View<float>, View<float>, float); |
||||
template void biasN_clipped_relu_eltwise_sum_2_inplace<float>(const Stream&, Span<float>, std::size_t, View<float>, View<float>, float, float); |
||||
template void biasN_tanh_eltwise_sum_2_inplace<float>(const Stream&, Span<float>, std::size_t, View<float>, View<float>); |
||||
template void biasN_swish_eltwise_sum_2_inplace<float>(const Stream&, Span<float>, std::size_t, View<float>, View<float>); |
||||
template void biasN_mish_eltwise_sum_2_inplace<float>(const Stream&, Span<float>, std::size_t, View<float>, View<float>); |
||||
template void biasN_sigmoid_eltwise_sum_2_inplace<float>(const Stream&, Span<float>, std::size_t, View<float>, View<float>); |
||||
template void biasN_power_eltwise_sum_2_inplace<float>(const Stream&, Span<float>, std::size_t, View<float>, View<float>, float, float, float); |
||||
|
||||
}}}} /* namespace cv::dnn::cuda4dnn::kernels */ |
@ -0,0 +1,132 @@ |
||||
// This file is part of OpenCV project. |
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory |
||||
// of this distribution and at http://opencv.org/license.html. |
||||
|
||||
#include <cuda_runtime.h> |
||||
#include <cuda_fp16.h> |
||||
|
||||
#include "functors.hpp" |
||||
#include "types.hpp" |
||||
#include "vector_traits.hpp" |
||||
#include "grid_stride_range.hpp" |
||||
#include "execution.hpp" |
||||
|
||||
#include "../cuda4dnn/csl/stream.hpp" |
||||
#include "../cuda4dnn/csl/span.hpp" |
||||
|
||||
using namespace cv::dnn::cuda4dnn::csl; |
||||
using namespace cv::dnn::cuda4dnn::csl::device; |
||||
|
||||
namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels { |
||||
|
||||
namespace raw { |
||||
template <class T, class EltwiseOp, class ActivationOp, std::size_t N> |
||||
__global__ void biasN_eltwise_op_generic_op_inplace_vec(Span<T> inplace_output, size_type inner_size, View<T> bias, View<T> eltwise, const typename EltwiseOp::Params eltwise_params, const typename ActivationOp::Params act_params) { |
||||
using vector_type = get_vector_type_t<T, N>; |
||||
|
||||
auto inplace_output_vPtr = vector_type::get_pointer(inplace_output.data()); |
||||
auto eltwise_vPtr = vector_type::get_pointer(eltwise.data()); |
||||
|
||||
EltwiseOp eltwise_op(eltwise_params); |
||||
ActivationOp activation_op(act_params); |
||||
|
||||
for (auto i : grid_stride_range(inplace_output.size() / vector_type::size())) { |
||||
const index_type bias_idx = (i / inner_size) % bias.size(); |
||||
|
||||
vector_type output_vec, eltwise_vec; |
||||
v_load(output_vec, inplace_output_vPtr[i]); |
||||
v_load(eltwise_vec, eltwise_vPtr[i]); |
||||
for(int j = 0; j < output_vec.size(); j++) |
||||
output_vec.data[j] = activation_op(eltwise_op(output_vec.data[j] + bias[bias_idx], eltwise_vec.data[j])); |
||||
v_store(inplace_output_vPtr[i], output_vec); |
||||
} |
||||
} |
||||
} |
||||
|
||||
template <class T, class EltwiseOp, class ActivationOp, std::size_t N> static |
||||
void launch_vectorized_biasN_eltwise_op_generic_op_inplace(const Stream& stream, Span<T> inplace_output, std::size_t inner_size, View<T> bias, View<T> eltwise, const typename EltwiseOp::Params& eltwise_params, const typename ActivationOp::Params& act_params) { |
||||
CV_Assert(is_fully_aligned<T>(inplace_output, N)); |
||||
CV_Assert(inplace_output.size() % bias.size() == 0); |
||||
CV_Assert(is_fully_aligned<T>(eltwise, N)); |
||||
CV_Assert(inner_size % N == 0); |
||||
|
||||
auto kernel = raw::biasN_eltwise_op_generic_op_inplace_vec<T, EltwiseOp, ActivationOp, N>; |
||||
auto policy = make_policy(kernel, inplace_output.size() / N, 0, stream); |
||||
launch_kernel(kernel, policy, inplace_output, inner_size / N, bias, eltwise, eltwise_params, act_params); |
||||
} |
||||
|
||||
template <class T, class EltwiseOp, class ActivationOp> static |
||||
void biasN_eltwise_op_generic_op_inplace(const Stream& stream, Span<T> inplace_output, std::size_t inner_size, View<T> bias, View<T> eltwise, const typename EltwiseOp::Params& eltwise_params = {}, const typename ActivationOp::Params& act_params = {}) { |
||||
CV_Assert(inplace_output.size() == eltwise.size()); |
||||
|
||||
if (is_fully_aligned<T>(inplace_output, 4) && is_fully_aligned<T>(eltwise, 4) && inner_size % 4 == 0) { |
||||
launch_vectorized_biasN_eltwise_op_generic_op_inplace<T, EltwiseOp, ActivationOp, 4>(stream, inplace_output, inner_size, bias, eltwise, eltwise_params, act_params); |
||||
} else if (is_fully_aligned<T>(inplace_output, 2) && is_fully_aligned<T>(eltwise, 2) && inner_size % 2 == 0) { |
||||
launch_vectorized_biasN_eltwise_op_generic_op_inplace<T, EltwiseOp, ActivationOp, 2>(stream, inplace_output, inner_size, bias, eltwise, eltwise_params, act_params); |
||||
} else { |
||||
launch_vectorized_biasN_eltwise_op_generic_op_inplace<T, EltwiseOp, ActivationOp, 1>(stream, inplace_output, inner_size, bias, eltwise, eltwise_params, act_params); |
||||
} |
||||
} |
||||
|
||||
template <class T> |
||||
void biasN_eltwise_sum_2_identity_inplace(const Stream& stream, Span<T> inplace_output, std::size_t inner_size, View<T> bias, View<T> eltwise) { |
||||
biasN_eltwise_op_generic_op_inplace<T, SumFunctor<T>, IdentityFunctor<T>>(stream, inplace_output, inner_size, bias, eltwise); |
||||
} |
||||
|
||||
template <class T> |
||||
void biasN_eltwise_sum_2_relu_inplace(const Stream& stream, Span<T> inplace_output, std::size_t inner_size, View<T> bias, View<T> eltwise, T slope) { |
||||
biasN_eltwise_op_generic_op_inplace<T, SumFunctor<T>, ReLUFunctor<T>>(stream, inplace_output, inner_size, bias, eltwise, {}, {slope}); |
||||
} |
||||
|
||||
template <class T> |
||||
void biasN_eltwise_sum_2_clipped_relu_inplace(const Stream& stream, Span<T> inplace_output, std::size_t inner_size, View<T> bias, View<T> eltwise, T floor, T ceiling) { |
||||
CV_Assert(static_cast<double>(floor) <= static_cast<double>(ceiling)); |
||||
biasN_eltwise_op_generic_op_inplace<T, SumFunctor<T>, ClippedReLUFunctor<T>>(stream, inplace_output, inner_size, bias, eltwise, {}, {floor, ceiling}); |
||||
} |
||||
|
||||
template <class T> |
||||
void biasN_eltwise_sum_2_tanh_inplace(const Stream& stream, Span<T> inplace_output, std::size_t inner_size, View<T> bias, View<T> eltwise) { |
||||
biasN_eltwise_op_generic_op_inplace<T, SumFunctor<T>, TanHFunctor<T>>(stream, inplace_output, inner_size, bias, eltwise); |
||||
} |
||||
|
||||
template <class T> |
||||
void biasN_eltwise_sum_2_swish_inplace(const Stream& stream, Span<T> inplace_output, std::size_t inner_size, View<T> bias, View<T> eltwise) { |
||||
biasN_eltwise_op_generic_op_inplace<T, SumFunctor<T>, SwishFunctor<T>>(stream, inplace_output, inner_size, bias, eltwise); |
||||
} |
||||
|
||||
template <class T> |
||||
void biasN_eltwise_sum_2_mish_inplace(const Stream& stream, Span<T> inplace_output, std::size_t inner_size, View<T> bias, View<T> eltwise) { |
||||
biasN_eltwise_op_generic_op_inplace<T, SumFunctor<T>, MishFunctor<T>>(stream, inplace_output, inner_size, bias, eltwise); |
||||
} |
||||
|
||||
template <class T> |
||||
void biasN_eltwise_sum_2_sigmoid_inplace(const Stream& stream, Span<T> inplace_output, std::size_t inner_size, View<T> bias, View<T> eltwise) { |
||||
biasN_eltwise_op_generic_op_inplace<T, SumFunctor<T>, SigmoidFunctor<T>>(stream, inplace_output, inner_size, bias, eltwise); |
||||
} |
||||
|
||||
template <class T> |
||||
void biasN_eltwise_sum_2_power_inplace(const Stream& stream, Span<T> inplace_output, std::size_t inner_size, View<T> bias, View<T> eltwise, T exp, T scale, T shift) { |
||||
biasN_eltwise_op_generic_op_inplace<T, SumFunctor<T>, PowerFunctor<T>>(stream, inplace_output, inner_size, bias, eltwise, {}, {exp, scale, shift}); |
||||
} |
||||
|
||||
#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530) |
||||
template void biasN_eltwise_sum_2_identity_inplace<__half>(const Stream&, Span<__half>, std::size_t, View<__half>, View<__half>); |
||||
template void biasN_eltwise_sum_2_relu_inplace<__half>(const Stream&, Span<__half>, std::size_t, View<__half>, View<__half>, __half); |
||||
template void biasN_eltwise_sum_2_clipped_relu_inplace<__half>(const Stream&, Span<__half>, std::size_t, View<__half>, View<__half>, __half, __half); |
||||
template void biasN_eltwise_sum_2_tanh_inplace<__half>(const Stream&, Span<__half>, std::size_t, View<__half>, View<__half>); |
||||
template void biasN_eltwise_sum_2_swish_inplace<__half>(const Stream&, Span<__half>, std::size_t, View<__half>, View<__half>); |
||||
template void biasN_eltwise_sum_2_mish_inplace<__half>(const Stream&, Span<__half>, std::size_t, View<__half>, View<__half>); |
||||
template void biasN_eltwise_sum_2_sigmoid_inplace<__half>(const Stream&, Span<__half>, std::size_t, View<__half>, View<__half>); |
||||
template void biasN_eltwise_sum_2_power_inplace<__half>(const Stream&, Span<__half>, std::size_t, View<__half>, View<__half>, __half, __half, __half); |
||||
#endif |
||||
|
||||
template void biasN_eltwise_sum_2_identity_inplace<float>(const Stream&, Span<float>, std::size_t, View<float>, View<float>); |
||||
template void biasN_eltwise_sum_2_relu_inplace<float>(const Stream&, Span<float>, std::size_t, View<float>, View<float>, float); |
||||
template void biasN_eltwise_sum_2_clipped_relu_inplace<float>(const Stream&, Span<float>, std::size_t, View<float>, View<float>, float, float); |
||||
template void biasN_eltwise_sum_2_tanh_inplace<float>(const Stream&, Span<float>, std::size_t, View<float>, View<float>); |
||||
template void biasN_eltwise_sum_2_swish_inplace<float>(const Stream&, Span<float>, std::size_t, View<float>, View<float>); |
||||
template void biasN_eltwise_sum_2_mish_inplace<float>(const Stream&, Span<float>, std::size_t, View<float>, View<float>); |
||||
template void biasN_eltwise_sum_2_sigmoid_inplace<float>(const Stream&, Span<float>, std::size_t, View<float>, View<float>); |
||||
template void biasN_eltwise_sum_2_power_inplace<float>(const Stream&, Span<float>, std::size_t, View<float>, View<float>, float, float, float); |
||||
|
||||
}}}} /* namespace cv::dnn::cuda4dnn::kernels */ |
@ -0,0 +1,125 @@ |
||||
// This file is part of OpenCV project. |
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory |
||||
// of this distribution and at http://opencv.org/license.html. |
||||
|
||||
#include <cuda_runtime.h> |
||||
#include <cuda_fp16.h> |
||||
|
||||
#include "functors.hpp" |
||||
#include "types.hpp" |
||||
#include "vector_traits.hpp" |
||||
#include "grid_stride_range.hpp" |
||||
#include "execution.hpp" |
||||
|
||||
#include "../cuda4dnn/csl/stream.hpp" |
||||
#include "../cuda4dnn/csl/span.hpp" |
||||
|
||||
using namespace cv::dnn::cuda4dnn::csl; |
||||
using namespace cv::dnn::cuda4dnn::csl::device; |
||||
|
||||
namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels { |
||||
|
||||
namespace raw { |
||||
|
||||
template <class T, class EltwiseOp, class ActivationOp, std::size_t N> |
||||
__global__ void eltwise_op_generic_op_vec(Span<T> output, View<T> x, View<T> y, const typename EltwiseOp::Params eltwise_params, const typename ActivationOp::Params act_params) { |
||||
using vector_type = get_vector_type_t<T, N>; |
||||
|
||||
auto output_vPtr = vector_type::get_pointer(output.data()); |
||||
auto x_vPtr = vector_type::get_pointer(x.data()); |
||||
auto y_vPtr = vector_type::get_pointer(y.data()); |
||||
|
||||
EltwiseOp eltwise_op(eltwise_params); |
||||
ActivationOp activation_op(act_params); |
||||
|
||||
for (auto i : grid_stride_range(output.size() / vector_type::size())) { |
||||
vector_type vec_x, vec_y; |
||||
v_load(vec_x, x_vPtr[i]); |
||||
v_load(vec_y, y_vPtr[i]); |
||||
for(int j = 0; j < vec_x.size(); j++) |
||||
vec_x.data[j] = activation_op(eltwise_op(vec_x.data[j], vec_y.data[j])); |
||||
v_store(output_vPtr[i], vec_x); |
||||
} |
||||
} |
||||
} |
||||
|
||||
template <class T, class EltwiseOp, class ActivationOp, std::size_t N> static |
||||
void launch_vectorized_eltwise_op_generic_op(const Stream& stream, Span<T> output, View<T> x, View<T> y, const typename EltwiseOp::Params& eltwise_params, const typename ActivationOp::Params& act_params) { |
||||
CV_Assert(is_fully_aligned<T>(output, N)); |
||||
CV_Assert(is_fully_aligned<T>(x, N)); |
||||
CV_Assert(is_fully_aligned<T>(y, N)); |
||||
|
||||
auto kernel = raw::eltwise_op_generic_op_vec<T, EltwiseOp, ActivationOp, N>; |
||||
auto policy = make_policy(kernel, output.size() / N, 0, stream); |
||||
launch_kernel(kernel, policy, output, x, y, eltwise_params, act_params); |
||||
} |
||||
|
||||
template <class T, class EltwiseOp, class ActivationOp> static |
||||
void eltwise_op_generic_op(const Stream& stream, Span<T> output, View<T> x, View<T> y, const typename EltwiseOp::Params& eltwise_params = {}, const typename ActivationOp::Params& act_params = {}) { |
||||
CV_Assert(output.size() == x.size()); |
||||
CV_Assert(output.size() == y.size()); |
||||
|
||||
if (is_fully_aligned<T>(output, 4) && is_fully_aligned<T>(x, 4) && is_fully_aligned<T>(y, 4)) { |
||||
launch_vectorized_eltwise_op_generic_op<T, EltwiseOp, ActivationOp, 4>(stream, output, x, y, eltwise_params, act_params); |
||||
} else if (is_fully_aligned<T>(output, 2) && is_fully_aligned<T>(x, 2) && is_fully_aligned<T>(y, 4)) { |
||||
launch_vectorized_eltwise_op_generic_op<T, EltwiseOp, ActivationOp, 2>(stream, output, x, y, eltwise_params, act_params); |
||||
} else { |
||||
launch_vectorized_eltwise_op_generic_op<T, EltwiseOp, ActivationOp, 1>(stream, output, x, y, eltwise_params, act_params); |
||||
} |
||||
} |
||||
|
||||
template <class T> |
||||
void eltwise_sum_2_relu(const Stream& stream, Span<T> output, View<T> x, View<T> y, T slope) { |
||||
eltwise_op_generic_op<T, SumFunctor<T>, ReLUFunctor<T>>(stream, output, x, y, {}, {slope}); |
||||
} |
||||
|
||||
template <class T> |
||||
void eltwise_sum_2_clipped_relu(const Stream& stream, Span<T> output, View<T> x, View<T> y, T floor, T ceiling) { |
||||
CV_Assert(static_cast<double>(floor) <= static_cast<double>(ceiling)); |
||||
eltwise_op_generic_op<T, SumFunctor<T>, ClippedReLUFunctor<T>>(stream, output, x, y, {}, {floor, ceiling}); |
||||
} |
||||
|
||||
template <class T> |
||||
void eltwise_sum_2_tanh(const Stream& stream, Span<T> output, View<T> x, View<T> y) { |
||||
eltwise_op_generic_op<T, SumFunctor<T>, TanHFunctor<T>>(stream, output, x, y); |
||||
} |
||||
|
||||
template <class T> |
||||
void eltwise_sum_2_swish(const Stream& stream, Span<T> output, View<T> x, View<T> y) { |
||||
eltwise_op_generic_op<T, SumFunctor<T>, SwishFunctor<T>>(stream, output, x, y); |
||||
} |
||||
|
||||
template <class T> |
||||
void eltwise_sum_2_mish(const Stream& stream, Span<T> output, View<T> x, View<T> y) { |
||||
eltwise_op_generic_op<T, SumFunctor<T>, MishFunctor<T>>(stream, output, x, y); |
||||
} |
||||
|
||||
template <class T> |
||||
void eltwise_sum_2_sigmoid(const Stream& stream, Span<T> output, View<T> x, View<T> y) { |
||||
eltwise_op_generic_op<T, SumFunctor<T>, SigmoidFunctor<T>>(stream, output, x, y); |
||||
} |
||||
|
||||
template <class T> |
||||
void eltwise_sum_2_power(const Stream& stream, Span<T> output, View<T> x, View<T> y, T exp, T scale, T shift) { |
||||
eltwise_op_generic_op<T, SumFunctor<T>, PowerFunctor<T>>(stream, output, x, y, {}, {exp, scale, shift}); |
||||
} |
||||
|
||||
#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530) |
||||
template void eltwise_sum_2_relu<__half>(const Stream&, Span<__half>, View<__half>, View<__half>, __half); |
||||
template void eltwise_sum_2_clipped_relu<__half>(const Stream&, Span<__half>, View<__half>, View<__half>, __half, __half); |
||||
template void eltwise_sum_2_tanh<__half>(const Stream&, Span<__half>, View<__half>, View<__half>); |
||||
template void eltwise_sum_2_swish<__half>(const Stream&, Span<__half>, View<__half>, View<__half>); |
||||
template void eltwise_sum_2_mish<__half>(const Stream&, Span<__half>, View<__half>, View<__half>); |
||||
template void eltwise_sum_2_sigmoid<__half>(const Stream&, Span<__half>, View<__half>, View<__half>); |
||||
template void eltwise_sum_2_power<__half>(const Stream&, Span<__half>, View<__half>, View<__half>, __half, __half, __half); |
||||
#endif |
||||
|
||||
template void eltwise_sum_2_relu<float>(const Stream&, Span<float>, View<float>, View<float>, float); |
||||
template void eltwise_sum_2_clipped_relu<float>(const Stream&, Span<float>, View<float>, View<float>, float, float); |
||||
template void eltwise_sum_2_tanh<float>(const Stream&, Span<float>, View<float>, View<float>); |
||||
template void eltwise_sum_2_swish<float>(const Stream&, Span<float>, View<float>, View<float>); |
||||
template void eltwise_sum_2_mish<float>(const Stream&, Span<float>, View<float>, View<float>); |
||||
template void eltwise_sum_2_sigmoid<float>(const Stream&, Span<float>, View<float>, View<float>); |
||||
template void eltwise_sum_2_power<float>(const Stream&, Span<float>, View<float>, View<float>, float, float, float); |
||||
|
||||
}}}} /* namespace cv::dnn::cuda4dnn::kernels */ |
@ -0,0 +1,40 @@ |
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
|
||||
#ifndef OPENCV_DNN_SRC_CUDA4DNN_KERNELS_ACTIVATION_ELTWISE_HPP |
||||
#define OPENCV_DNN_SRC_CUDA4DNN_KERNELS_ACTIVATION_ELTWISE_HPP |
||||
|
||||
#include "../csl/stream.hpp" |
||||
#include "../csl/span.hpp" |
||||
|
||||
#include <cstddef> |
||||
|
||||
namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels { |
||||
|
||||
/* inplace_output = activation(inplace_output) + eltwise */ |
||||
|
||||
template <class T> |
||||
void relu_eltwise_sum_2_inplace(const csl::Stream& stream, csl::Span<T> inplace_output, csl::View<T> eltwise, T slope); |
||||
|
||||
template <class T> |
||||
void clipped_relu_eltwise_sum_2_inplace(const csl::Stream& stream, csl::Span<T> inplace_output, csl::View<T> eltwise, T floor, T ceiling); |
||||
|
||||
template <class T> |
||||
void tanh_eltwise_sum_2_inplace(const csl::Stream& stream, csl::Span<T> inplace_output, csl::View<T> eltwise); |
||||
|
||||
template <class T> |
||||
void swish_eltwise_sum_2_inplace(const csl::Stream& stream, csl::Span<T> inplace_output, csl::View<T> eltwise); |
||||
|
||||
template <class T> |
||||
void mish_eltwise_sum_2_inplace(const csl::Stream& stream, csl::Span<T> inplace_output, csl::View<T> eltwise); |
||||
|
||||
template <class T> |
||||
void sigmoid_eltwise_sum_2_inplace(const csl::Stream& stream, csl::Span<T> inplace_output, csl::View<T> eltwise); |
||||
|
||||
template <class T> |
||||
void power_eltwise_sum_2_inplace(const csl::Stream& stream, csl::Span<T> inplace_output, csl::View<T> eltwise, T exp, T scale, T shift); |
||||
|
||||
}}}} /* namespace cv::dnn::cuda4dnn::kernels */ |
||||
|
||||
#endif /* OPENCV_DNN_SRC_CUDA4DNN_KERNELS_ACTIVATION_ELTWISE_HPP */ |
@ -0,0 +1,42 @@ |
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
|
||||
#ifndef OPENCV_DNN_SRC_CUDA4DNN_KERNELS_BIAS_ACTIVATION_ELTWISE_HPP |
||||
#define OPENCV_DNN_SRC_CUDA4DNN_KERNELS_BIAS_ACTIVATION_ELTWISE_HPP |
||||
|
||||
#include "../csl/stream.hpp" |
||||
#include "../csl/span.hpp" |
||||
|
||||
#include <cstddef> |
||||
|
||||
namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels { |
||||
|
||||
/* inplace_output = activation(inplace_output + bias) + eltwise
|
||||
* broadcasting on `bias` is allowed but not on `eltwise` |
||||
*/ |
||||
|
||||
template <class T> |
||||
void biasN_relu_eltwise_sum_2_inplace(const csl::Stream& stream, csl::Span<T> inplace_output, std::size_t inner_size, csl::View<T> bias, csl::View<T> eltwise, T slope); |
||||
|
||||
template <class T> |
||||
void biasN_clipped_relu_eltwise_sum_2_inplace(const csl::Stream& stream, csl::Span<T> inplace_output, std::size_t inner_size, csl::View<T> bias, csl::View<T> eltwise, T floor, T ceiling); |
||||
|
||||
template <class T> |
||||
void biasN_tanh_eltwise_sum_2_inplace(const csl::Stream& stream, csl::Span<T> inplace_output, std::size_t inner_size, csl::View<T> bias, csl::View<T> eltwise); |
||||
|
||||
template <class T> |
||||
void biasN_sigmoid_eltwise_sum_2_inplace(const csl::Stream& stream, csl::Span<T> inplace_output, std::size_t inner_size, csl::View<T> bias, csl::View<T> eltwise); |
||||
|
||||
template <class T> |
||||
void biasN_swish_eltwise_sum_2_inplace(const csl::Stream& stream, csl::Span<T> inplace_output, std::size_t inner_size, csl::View<T> bias, csl::View<T> eltwise); |
||||
|
||||
template <class T> |
||||
void biasN_mish_eltwise_sum_2_inplace(const csl::Stream& stream, csl::Span<T> inplace_output, std::size_t inner_size, csl::View<T> bias, csl::View<T> eltwise); |
||||
|
||||
template <class T> |
||||
void biasN_power_eltwise_sum_2_inplace(const csl::Stream& stream, csl::Span<T> inplace_output, std::size_t inner_size, csl::View<T> bias, csl::View<T> eltwise, T exp, T scale, T shift); |
||||
|
||||
}}}} /* namespace cv::dnn::cuda4dnn::kernels */ |
||||
|
||||
#endif /* OPENCV_DNN_SRC_CUDA4DNN_KERNELS_BIAS_ACTIVATION_ELTWISE_HPP */ |
@ -0,0 +1,45 @@ |
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
|
||||
#ifndef OPENCV_DNN_SRC_CUDA4DNN_KERNELS_BIAS_ELTWISE_ACTIVATION_HPP |
||||
#define OPENCV_DNN_SRC_CUDA4DNN_KERNELS_BIAS_ELTWISE_ACTIVATION_HPP |
||||
|
||||
#include "../csl/stream.hpp" |
||||
#include "../csl/span.hpp" |
||||
|
||||
#include <cstddef> |
||||
|
||||
namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels { |
||||
|
||||
/* inplace_output = activation(inplace_output + bias + eltwise)
|
||||
* broadcasting on `bias` is allowed but not on `eltwise` |
||||
*/ |
||||
|
||||
template <class T> |
||||
void biasN_eltwise_sum_2_identity_inplace(const csl::Stream& stream, csl::Span<T> inplace_output, std::size_t inner_size, csl::View<T> bias, csl::View<T> eltwise); |
||||
|
||||
template <class T> |
||||
void biasN_eltwise_sum_2_relu_inplace(const csl::Stream& stream, csl::Span<T> inplace_output, std::size_t inner_size, csl::View<T> bias, csl::View<T> eltwise, T slope); |
||||
|
||||
template <class T> |
||||
void biasN_eltwise_sum_2_clipped_relu_inplace(const csl::Stream& stream, csl::Span<T> inplace_output, std::size_t inner_size, csl::View<T> bias, csl::View<T> eltwise, T floor, T ceiling); |
||||
|
||||
template <class T> |
||||
void biasN_eltwise_sum_2_tanh_inplace(const csl::Stream& stream, csl::Span<T> inplace_output, std::size_t inner_size, csl::View<T> bias, csl::View<T> eltwise); |
||||
|
||||
template <class T> |
||||
void biasN_eltwise_sum_2_swish_inplace(const csl::Stream& stream, csl::Span<T> inplace_output, std::size_t inner_size, csl::View<T> bias, csl::View<T> eltwise); |
||||
|
||||
template <class T> |
||||
void biasN_eltwise_sum_2_mish_inplace(const csl::Stream& stream, csl::Span<T> inplace_output, std::size_t inner_size, csl::View<T> bias, csl::View<T> eltwise); |
||||
|
||||
template <class T> |
||||
void biasN_eltwise_sum_2_sigmoid_inplace(const csl::Stream& stream, csl::Span<T> inplace_output, std::size_t inner_size, csl::View<T> bias, csl::View<T> eltwise); |
||||
|
||||
template <class T> |
||||
void biasN_eltwise_sum_2_power_inplace(const csl::Stream& stream, csl::Span<T> inplace_output, std::size_t inner_size, csl::View<T> bias, csl::View<T> eltwise, T exp, T scale, T shift); |
||||
|
||||
}}}} /* namespace cv::dnn::cuda4dnn::kernels */ |
||||
|
||||
#endif /* OPENCV_DNN_SRC_CUDA4DNN_KERNELS_BIAS_ELTWISE_ACTIVATION_HPP */ |
@ -0,0 +1,40 @@ |
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
|
||||
#ifndef OPENCV_DNN_SRC_CUDA4DNN_KERNELS_ELTWISE_ACTIVATION_HPP |
||||
#define OPENCV_DNN_SRC_CUDA4DNN_KERNELS_ELTWISE_ACTIVATION_HPP |
||||
|
||||
#include "../csl/stream.hpp" |
||||
#include "../csl/span.hpp" |
||||
|
||||
#include <cstddef> |
||||
|
||||
namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels { |
||||
|
||||
/* output = activation(x + y) */ |
||||
|
||||
template <class T> |
||||
void eltwise_sum_2_relu(const csl::Stream& stream, csl::Span<T> output, csl::View<T> x, csl::View<T> y, T slope); |
||||
|
||||
template <class T> |
||||
void eltwise_sum_2_clipped_relu(const csl::Stream& stream, csl::Span<T> output, csl::View<T> x, csl::View<T> y, T floor, T ceiling); |
||||
|
||||
template <class T> |
||||
void eltwise_sum_2_tanh(const csl::Stream& stream, csl::Span<T> output, csl::View<T> x, csl::View<T> y); |
||||
|
||||
template <class T> |
||||
void eltwise_sum_2_swish(const csl::Stream& stream, csl::Span<T> output, csl::View<T> x, csl::View<T> y); |
||||
|
||||
template <class T> |
||||
void eltwise_sum_2_mish(const csl::Stream& stream, csl::Span<T> output, csl::View<T> x, csl::View<T> y); |
||||
|
||||
template <class T> |
||||
void eltwise_sum_2_sigmoid(const csl::Stream& stream, csl::Span<T> output, csl::View<T> x, csl::View<T> y); |
||||
|
||||
template <class T> |
||||
void eltwise_sum_2_power(const csl::Stream& stream, csl::Span<T> output, csl::View<T> x, csl::View<T> y, T exp, T scale, T shift); |
||||
|
||||
}}}} /* namespace cv::dnn::cuda4dnn::kernels */ |
||||
|
||||
#endif /* OPENCV_DNN_SRC_CUDA4DNN_KERNELS_ELTWISE_ACTIVATION_HPP */ |
Loading…
Reference in new issue