/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2013, OpenCV Foundation, all rights reserved. // Copyright (C) 2017, Intel Corporation, all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #include "../precomp.hpp" #include "layers_common.hpp" #include "../op_cuda.hpp" #include "../op_halide.hpp" #include "../op_inf_engine.hpp" #include "../ie_ngraph.hpp" #include "../op_vkcom.hpp" #include "../op_webnn.hpp" #include "../op_cann.hpp" #include #include #include #include #ifdef HAVE_OPENCL #include "opencl_kernels_dnn.hpp" #endif #ifdef HAVE_CUDA #include "../cuda4dnn/primitives/activation.hpp" using namespace cv::dnn::cuda4dnn; #endif #include namespace cv { namespace dnn { using std::abs; using std::exp; using std::expm1; using std::tanh; using std::pow; using std::ceil; using std::floor; using std::log; using std::log1p; using std::sqrt; using std::round; using std::acos; using std::acosh; using std::asin; using std::asinh; using std::atan; using std::atanh; using std::cos; using std::cosh; using std::erf; using std::sin; using std::sinh; using std::tan; template class ElementWiseLayer : public Func::Layer { public: class PBody : public cv::ParallelLoopBody { public: const Func* func_; const Mat* src_; Mat* dst_; int nstripes_; PBody(const Func &func, const Mat &src, Mat& dst, int nstripes) { func_ = &func; src_ = &src; dst_ = &dst; nstripes_ = nstripes; } void operator()(const Range &r) const CV_OVERRIDE { int nstripes = nstripes_, nsamples = 1, outCn = 1; size_t planeSize = 1; if (src_->dims > 1) { nsamples = src_->size[0]; outCn = src_->size[1]; } else outCn = src_->size[0]; for (int i = 2; i < src_->dims; ++i) planeSize *= src_->size[i]; size_t stripeSize = (planeSize + nstripes - 1)/nstripes; size_t stripeStart = r.start*stripeSize; size_t stripeEnd = std::min(r.end*stripeSize, planeSize); for( int i = 0; i < nsamples; i++ ) { const float* srcptr = src_->ptr(i) + stripeStart; float* dstptr = dst_->ptr(i) + stripeStart; func_->apply(srcptr, dstptr, stripeStart, (int)(stripeEnd - stripeStart), planeSize, 0, outCn); } } }; ElementWiseLayer(const Func &f=Func()) { func = f; } virtual bool supportBackend(int backendId) CV_OVERRIDE { return func.supportBackend(backendId, this->preferableTarget); } virtual void finalize(InputArrayOfArrays, OutputArrayOfArrays) CV_OVERRIDE { func.finalize(); } virtual Ptr tryAttach(const Ptr& node) CV_OVERRIDE { switch (node->backendId) { case DNN_BACKEND_HALIDE: { #ifdef HAVE_HALIDE auto base = node.dynamicCast(); Halide::Func& input = base->funcs.back(); Halide::Var x("x"), y("y"), c("c"), n("n"); Halide::Func top = (this->name.empty() ? Halide::Func() : Halide::Func(this->name)); func.attachHalide(input(x, y, c, n), top); return Ptr(new HalideBackendNode(base, top)); #endif // HAVE_HALIDE break; } } return Ptr(); } virtual Ptr initHalide(const std::vector > &inputs) CV_OVERRIDE { #ifdef HAVE_HALIDE Halide::Buffer input = halideBuffer(inputs[0]); Halide::Var x("x"), y("y"), c("c"), n("n"); Halide::Func top = (this->name.empty() ? Halide::Func() : Halide::Func(this->name)); func.attachHalide(input(x, y, c, n), top); return Ptr(new HalideBackendNode(top)); #endif // HAVE_HALIDE return Ptr(); } #ifdef HAVE_CANN virtual Ptr initCann(const std::vector > &inputs, const std::vector > &outputs, const std::vector >& nodes) CV_OVERRIDE { return func.initCannOp(Layer::name, inputs, nodes); } #endif // HAVE_CANN #ifdef HAVE_DNN_NGRAPH virtual Ptr initNgraph(const std::vector >& inputs, const std::vector >& nodes) CV_OVERRIDE { auto& ieInpNode = nodes[0].dynamicCast()->node; auto node = func.initNgraphAPI(ieInpNode); return Ptr(new InfEngineNgraphNode(node)); } #endif // HAVE_DNN_NGRAPH #ifdef HAVE_WEBNN virtual Ptr initWebnn(const std::vector >& inputs, const std::vector >& nodes) CV_OVERRIDE { Ptr node = nodes[0].dynamicCast(); auto& webnnInpOperand = node->operand; auto& webnnGraphBuilder = node->net->builder; auto operand = func.initWebnnAPI(webnnGraphBuilder, webnnInpOperand); return Ptr(new WebnnBackendNode(operand)); } #endif virtual bool tryFuse(Ptr& top) CV_OVERRIDE { return func.tryFuse(top); } void getScaleShift(Mat& scale_, Mat& shift_) const CV_OVERRIDE { func.getScaleShift(scale_, shift_); } bool getMemoryShapes(const std::vector &inputs, const int requiredOutputs, std::vector &outputs, std::vector &internals) const CV_OVERRIDE { Layer::getMemoryShapes(inputs, requiredOutputs, outputs, internals); return true; } void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE { CV_TRACE_FUNCTION(); CV_OCL_RUN(IS_DNN_OPENCL_TARGET(this->preferableTarget), func.applyOCL(inputs_arr, outputs_arr, internals_arr)) if (inputs_arr.depth() == CV_16F) { Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr); return; } std::vector inputs, outputs; inputs_arr.getMatVector(inputs); outputs_arr.getMatVector(outputs); for (size_t i = 0; i < inputs.size(); i++) { const Mat &src = inputs[i]; Mat &dst = outputs[i]; CV_Assert(src.size == dst.size && src.type() == dst.type() && src.isContinuous() && dst.isContinuous() && src.type() == CV_32F); const int nstripes = getNumThreads(); PBody body(func, src, dst, nstripes); parallel_for_(Range(0, nstripes), body, nstripes); } } void forwardSlice(const float* src, float* dst, int len, size_t planeSize, int cn0, int cn1) const CV_OVERRIDE { func.apply(src, dst, -1, len, planeSize, cn0, cn1); } #ifdef HAVE_CUDA Ptr initCUDA( void *context_, const std::vector>& inputs, const std::vector>& outputs ) override { auto context = reinterpret_cast(context_); return func.initCUDA(Layer::preferableTarget, context->stream); } #endif virtual bool tryQuantize(const std::vector > &scales, const std::vector > &zeropoints, LayerParams& params) CV_OVERRIDE { return func.tryQuantize(scales, zeropoints, params); } virtual int64 getFLOPS(const std::vector &inputs, const std::vector &outputs) const CV_OVERRIDE { long flops = 0; for (int i = 0; i < outputs.size(); i++) { flops += total(outputs[i]) * func.getFLOPSPerElement(); } return flops; } Func func; }; #ifdef HAVE_OPENCL static String oclGetTMacro(const UMat &m) { String str_name = ocl::typeToStr(m.type()); if (str_name == "short") str_name = "half"; return format("-DT=%s -Dconvert_T=convert_%s ", str_name.c_str(), str_name.c_str()); } #endif struct BaseFunctor { void finalize() {} bool tryFuse(Ptr&) { return false; } void getScaleShift(Mat&, Mat&) const {} bool tryQuantize(const std::vector>&, const std::vector>&, LayerParams&) { return false; } }; struct ReLUFunctor : public BaseFunctor { typedef ReLULayer Layer; float slope; explicit ReLUFunctor(float slope_=1.f) : slope(slope_) {} bool supportBackend(int backendId, int) { #ifdef HAVE_DNN_NGRAPH if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) return true; #endif #ifdef HAVE_WEBNN if (backendId == DNN_BACKEND_WEBNN) { // TODO: support PRELU if (slope != 0) { CV_LOG_WARNING(NULL, "PRELU is not supported now."); } return slope == 0; } #endif return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA || backendId == DNN_BACKEND_HALIDE || backendId == DNN_BACKEND_CANN; } void apply(const float* srcptr, float* dstptr, int stripeStart, int len, size_t planeSize, int cn0, int cn1) const { CV_UNUSED(stripeStart); float s = slope; for( int cn = cn0; cn < cn1; cn++, srcptr += planeSize, dstptr += planeSize ) { int i = 0; #if CV_SIMD128 v_float32x4 s4 = v_setall_f32(s), z = v_setzero_f32(); for( ; i <= len - 16; i += 16 ) { v_float32x4 x0 = v_load(srcptr + i); v_float32x4 x1 = v_load(srcptr + i + 4); v_float32x4 x2 = v_load(srcptr + i + 8); v_float32x4 x3 = v_load(srcptr + i + 12); x0 = v_select(v_ge(x0, z), x0, v_mul(x0, s4)); x1 = v_select(v_ge(x1, z), x1, v_mul(x1, s4)); x2 = v_select(v_ge(x2, z), x2, v_mul(x2, s4)); x3 = v_select(v_ge(x3, z), x3, v_mul(x3, s4)); v_store(dstptr + i, x0); v_store(dstptr + i + 4, x1); v_store(dstptr + i + 8, x2); v_store(dstptr + i + 12, x3); } #endif for( ; i < len; i++ ) { float x = srcptr[i]; dstptr[i] = x >= 0.f ? x : s*x; } } } #ifdef HAVE_CUDA Ptr initCUDA(int target, csl::Stream stream) { return make_cuda_node(target, stream, slope); } #endif #ifdef HAVE_OPENCL bool initKernel(ocl::Kernel &ker, const UMat &src) const { const char *buildoptSlope = (slope == 0) ? "-DRELU_NO_SLOPE" : ""; String buildopt = oclGetTMacro(src) + buildoptSlope; if (!ker.create("ReLUForward", ocl::dnn::activations_oclsrc, buildopt)) return false; if (slope != 0) ker.set(3, (float)slope); return true; } bool applyOCL(InputArrayOfArrays inps, OutputArrayOfArrays outs, OutputArrayOfArrays internals) { std::vector inputs; std::vector outputs; inps.getUMatVector(inputs); outs.getUMatVector(outputs); for (size_t i = 0; i < inputs.size(); i++) { UMat& src = inputs[i]; UMat& dst = outputs[i]; CV_Assert(src.isContinuous() && dst.isContinuous() && !src.offset && !dst.offset); ocl::Kernel kernel; CV_Assert(initKernel(kernel, src)); kernel.set(0, (int)src.total()); kernel.set(1, ocl::KernelArg::PtrReadOnly(src)); kernel.set(2, ocl::KernelArg::PtrWriteOnly(dst)); size_t gSize = src.total(); CV_Assert(kernel.run(1, &gSize, NULL, false)); } return true; } #endif #ifdef HAVE_HALIDE void attachHalide(const Halide::Expr& input, Halide::Func& top) { Halide::Var x("x"), y("y"), c("c"), n("n"); if (slope) { top(x, y, c, n) = select(input >= 0.0f, input, slope * input); } else { top(x, y, c, n) = max(input, 0.0f); } } #endif // HAVE_HALIDE #ifdef HAVE_CANN Ptr initCannOp(const std::string& name, const std::vector > &inputs, const std::vector >& nodes) { auto x = inputs[0].dynamicCast(); auto op_x = nodes[0].dynamicCast()->getOp(); auto x_desc = x->getTensorDesc(); auto output_desc = std::make_shared(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT); if (slope) { auto op = std::make_shared(name); op->set_input_x_by_name(*op_x, x->name.c_str()); op->update_input_desc_x(*x_desc); op->set_attr_negative_slope(slope); op->update_output_desc_y(*output_desc); return Ptr(new CannBackendNode(op)); } auto op = std::make_shared(name); op->set_input_x_by_name(*op_x, x->name.c_str()); op->update_input_desc_x(*x_desc); op->update_output_desc_y(*output_desc); return Ptr(new CannBackendNode(op)); } #endif #ifdef HAVE_DNN_NGRAPH std::shared_ptr initNgraphAPI(const ov::Output& node) { if (slope) { auto param = std::make_shared(ov::element::f32, ov::Shape{1}, &slope); return std::make_shared(node, param); } return std::make_shared(node); } #endif // HAVE_DNN_NGRAPH #ifdef HAVE_WEBNN ml::Operand initWebnnAPI(const ml::GraphBuilder& builder, const ml::Operand& input) { return builder.Relu(input); } #endif bool tryQuantize(const std::vector > &scales, const std::vector > &zeropoints, LayerParams& params) { if (slope != 0.f) { float inpScale = scales[0][0], outScale = scales[1][0]; int inpZp = zeropoints[0][0], outZp = zeropoints[1][0]; Mat lookUpTable(1, 256, CV_8S); int8_t* table = lookUpTable.ptr(); for (int i = -128; i < 128; i++) { float x = inpScale*(i - inpZp); float y = x >= 0.f ? x : slope*x; int quantized = outZp + (int)std::round(y/outScale); table[i+128] = saturate_cast(quantized); } params.blobs.clear(); params.blobs.push_back(lookUpTable); } params.set("input_scale", scales[0][0]); params.set("input_zeropoint", zeropoints[0][0]); params.set("slope", slope); return true; } int64 getFLOPSPerElement() const { return 1; } }; struct ReLU6Functor : public BaseFunctor { typedef ReLU6Layer Layer; float minValue, maxValue; ReLU6Functor(float minValue_ = 0.0f, float maxValue_ = 6.0f) : minValue(minValue_), maxValue(maxValue_) { CV_Assert(minValue <= maxValue); } bool supportBackend(int backendId, int) { #ifdef HAVE_INF_ENGINE if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) return true; #endif return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA || backendId == DNN_BACKEND_HALIDE || backendId == DNN_BACKEND_WEBNN || backendId == DNN_BACKEND_CANN; } void apply(const float* srcptr, float* dstptr, int stripeStart, int len, size_t planeSize, int cn0, int cn1) const { CV_UNUSED(stripeStart); for( int cn = cn0; cn < cn1; cn++, srcptr += planeSize, dstptr += planeSize ) { int i = 0; #if CV_SIMD128 v_float32x4 minV = v_setall_f32(minValue), maxV = v_setall_f32(maxValue); for( ; i <= len - 16; i += 16 ) { v_float32x4 x0 = v_load(srcptr + i); v_float32x4 x1 = v_load(srcptr + i + 4); v_float32x4 x2 = v_load(srcptr + i + 8); v_float32x4 x3 = v_load(srcptr + i + 12); x0 = v_min(v_max(minV, x0), maxV); x1 = v_min(v_max(minV, x1), maxV); x2 = v_min(v_max(minV, x2), maxV); x3 = v_min(v_max(minV, x3), maxV); v_store(dstptr + i, x0); v_store(dstptr + i + 4, x1); v_store(dstptr + i + 8, x2); v_store(dstptr + i + 12, x3); } #endif for( ; i < len; i++ ) { float x = srcptr[i]; if (x >= minValue) dstptr[i] = x <= maxValue ? x : maxValue; else dstptr[i] = minValue; } } } #ifdef HAVE_OPENCL bool applyOCL(InputArrayOfArrays inps, OutputArrayOfArrays outs, OutputArrayOfArrays internals) { std::vector inputs; std::vector outputs; inps.getUMatVector(inputs); outs.getUMatVector(outputs); String buildopt = oclGetTMacro(inputs[0]); for (size_t i = 0; i < inputs.size(); i++) { UMat& src = inputs[i]; UMat& dst = outputs[i]; ocl::Kernel kernel("ReLU6Forward", ocl::dnn::activations_oclsrc, buildopt); kernel.set(0, (int)src.total()); kernel.set(1, ocl::KernelArg::PtrReadOnly(src)); kernel.set(2, ocl::KernelArg::PtrWriteOnly(dst)); kernel.set(3, (float)minValue); kernel.set(4, (float)maxValue); size_t gSize = src.total(); CV_Assert(kernel.run(1, &gSize, NULL, false)); } return true; } #endif #ifdef HAVE_CUDA Ptr initCUDA(int target, csl::Stream stream) { return make_cuda_node(target, stream, minValue, maxValue); } #endif #ifdef HAVE_HALIDE void attachHalide(const Halide::Expr& input, Halide::Func& top) { Halide::Var x("x"), y("y"), c("c"), n("n"); top(x, y, c, n) = clamp(input, minValue, maxValue); } #endif // HAVE_HALIDE #ifdef HAVE_CANN Ptr initCannOp(const std::string& name, const std::vector > &inputs, const std::vector >& nodes) { auto x = inputs[0].dynamicCast(); auto op = std::make_shared(name); auto op_x = nodes[0].dynamicCast()->getOp(); op->set_input_x_by_name(*op_x, x->name.c_str()); auto x_desc = x->getTensorDesc(); op->update_input_desc_x(*x_desc); Mat min_value_mat(1, 1, CV_32F, Scalar(minValue)); std::vector shape_{1}; auto op_const_minv = std::make_shared(min_value_mat.data, min_value_mat.type(), shape_, cv::format("%s_min_value", name.c_str())); op->set_input_clip_value_min(*(op_const_minv->getOp())); op->update_input_desc_clip_value_min(*(op_const_minv->getTensorDesc())); Mat max_value_mat(1, 1, CV_32F, Scalar(maxValue)); auto op_const_maxv = std::make_shared(max_value_mat.data, max_value_mat.type(), shape_, cv::format("%s_max_value", name.c_str())); op->set_input_clip_value_max(*(op_const_maxv->getOp())); op->update_input_desc_clip_value_max(*(op_const_maxv->getTensorDesc())); auto output_desc = std::make_shared(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT); op->update_output_desc_y(*output_desc); return Ptr(new CannBackendNode(op)); } #endif #ifdef HAVE_DNN_NGRAPH std::shared_ptr initNgraphAPI(const ov::Output& node) { return std::make_shared(node, minValue, maxValue); } #endif // HAVE_DNN_NGRAPH #ifdef HAVE_WEBNN ml::Operand initWebnnAPI(const ml::GraphBuilder& builder, const ml::Operand& input) { ml::ClampOptions clampOptions; clampOptions.minValue = minValue; clampOptions.maxValue = maxValue; return builder.Clamp(input, &clampOptions); } #endif bool tryQuantize(const std::vector > &scales, const std::vector > &zeropoints, LayerParams& params) { params.set("input_scale", scales[0][0]); params.set("input_zeropoint", zeropoints[0][0]); return true; } int64 getFLOPSPerElement() const { return 2; } }; template struct BaseDefaultFunctor : public BaseFunctor { void apply(const float* srcptr, float* dstptr, int stripeStart, int len, size_t planeSize, int cn0, int cn1) const { CV_UNUSED(stripeStart); for( int cn = cn0; cn < cn1; cn++, srcptr += planeSize, dstptr += planeSize ) { for( int i = 0; i < len; i++ ) { float x = srcptr[i]; dstptr[i] = static_cast(this)->calculate(x); } } } #ifdef HAVE_OPENCL bool applyOCL(InputArrayOfArrays inps, OutputArrayOfArrays outs, OutputArrayOfArrays internals) { std::vector inputs; std::vector outputs; inps.getUMatVector(inputs); outs.getUMatVector(outputs); String buildopt = oclGetTMacro(inputs[0]); for (size_t i = 0; i < inputs.size(); i++) { UMat& src = inputs[i]; UMat& dst = outputs[i]; ocl::Kernel kernel(ocl_kernel_name, ocl::dnn::activations_oclsrc, buildopt); kernel.set(0, static_cast(src.total())); kernel.set(1, ocl::KernelArg::PtrReadOnly(src)); kernel.set(2, ocl::KernelArg::PtrWriteOnly(dst)); static_cast(this)->setKernelParams(kernel); size_t gSize = src.total(); CV_Assert(kernel.run(1, &gSize, nullptr, false)); } return true; } #endif inline void setKernelParams(ocl::Kernel& kernel) const {} bool tryQuantize(const std::vector > &scales, const std::vector > &zeropoints, LayerParams& params) { float inpScale = scales[0][0], outScale = scales[1][0]; int inpZp = zeropoints[0][0], outZp = zeropoints[1][0]; Mat lookUpTable(1, 256, CV_8S); int8_t* table = lookUpTable.ptr(); for (int i = -128; i < 128; i++) { float x = inpScale * static_cast(i - inpZp); float y = static_cast(this)->calculate(x); int quantized = outZp + static_cast(std::round(y/outScale)); table[i+128] = saturate_cast(quantized); } params.blobs.clear(); params.blobs.push_back(lookUpTable); params.set("input_scale", scales[0][0]); params.set("input_zeropoint", zeropoints[0][0]); return true; } #ifdef HAVE_CUDA Ptr initCUDA(int target, csl::Stream stream) { CV_Error(Error::StsNotImplemented, ""); } #endif #ifdef HAVE_HALIDE void attachHalide(const Halide::Expr& input, Halide::Func& top) { CV_Error(Error::StsNotImplemented, ""); } #endif // HAVE_HALIDE #ifdef HAVE_CANN Ptr initCannOp(const std::string& name, const std::vector > &inputs, const std::vector >& nodes) { CV_Error(Error::StsNotImplemented, ""); } #endif // HAVE_CANN #ifdef HAVE_DNN_NGRAPH std::shared_ptr initNgraphAPI(const ov::Output& node) { CV_Error(Error::StsNotImplemented, ""); } #endif // HAVE_DNN_NGRAPH #ifdef HAVE_WEBNN ml::Operand initWebnnAPI(const ml::GraphBuilder& builder, const ml::Operand& input) { CV_Error(Error::StsNotImplemented, ""); } #endif private: static const char* const ocl_kernel_name; }; namespace { // Refer to v_erf in modules/core/include/opencv2/core/hal/intrin_math.hpp constexpr float c_erf_coef0 = 0.3275911f; constexpr float c_erf_coef1 = 1.061405429f; constexpr float c_erf_coef2 = -1.453152027f; constexpr float c_erf_coef3 = 1.421413741f; constexpr float c_erf_coef4 = -0.284496736f; constexpr float c_erf_coef5 = 0.254829592f; inline float erf_approx(float v) { float t = 1.f / fmaf(fabsf(v), c_erf_coef0, 1.f); float r = fmaf(c_erf_coef1, t, c_erf_coef2); r = fmaf(r, t, c_erf_coef3); r = fmaf(r, t, c_erf_coef4); r = fmaf(r, t, c_erf_coef5); r = 1.f - r * t * expf(-v * v); return std::copysignf(r, v); } } struct GeluFunctor : public BaseFunctor { using Layer = GeluLayer; int vlanes; explicit GeluFunctor() { #if (CV_SIMD || CV_SIMD_SCALABLE) vlanes = VTraits::vlanes(); #else vlanes = 1; #endif } bool supportBackend(int backendId, int) { return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH; } void apply(const float* srcptr, float* dstptr, int stripeStart, int len, size_t planeSize, int cn0, int cn1) const { CV_UNUSED(stripeStart); for (int cn = cn0; cn < cn1; cn++, srcptr += planeSize, dstptr += planeSize) { int i = 0; #if (CV_SIMD || CV_SIMD_SCALABLE) // 0.5f * x * (1.0f + erf(x * M_SQRT1_2)); v_float32 half = vx_setall_f32(0.5f), one = vx_setall_f32(1.0f), reciprocal_sqrt2 = vx_setall_f32(M_SQRT1_2); for (; i <= len - vlanes; i += vlanes) { v_float32 x0 = vx_load(srcptr + i); // t = x * M_SQRT1_2 v_float32 t0 = v_mul(reciprocal_sqrt2, x0); // t = 1.0f + t t0 = v_add(one, v_erf(t0)); // x = 0.5 * x x0 = v_mul(half, x0); // x = x * t x0 = v_mul(x0, t0); vx_store(dstptr + i, x0); } #endif // 0.5f * x * (1.0f + erf(x * M_SQRT1_2)); for( ; i < len; i++ ) { float x = srcptr[i]; dstptr[i] = 0.5f * x * (1.0f + erf_approx(x * M_SQRT1_2)); } } } #ifdef HAVE_CUDA Ptr initCUDA(int target, csl::Stream stream) { return make_cuda_node(target, stream); } #endif #ifdef HAVE_OPENCL bool initKernel(ocl::Kernel &ker, const UMat &src) const { String buildopt = oclGetTMacro(src); if (!ker.create("GeluForward", ocl::dnn::activations_oclsrc, buildopt)) return false; return true; } bool applyOCL(InputArrayOfArrays inps, OutputArrayOfArrays outs, OutputArrayOfArrays internals) { std::vector inputs; std::vector outputs; inps.getUMatVector(inputs); outs.getUMatVector(outputs); for (size_t i = 0; i < inputs.size(); i++) { UMat& src = inputs[i]; UMat& dst = outputs[i]; CV_Assert(src.isContinuous() && dst.isContinuous() && !src.offset && !dst.offset); ocl::Kernel kernel; CV_Assert(initKernel(kernel, src)); kernel.set(0, (int)src.total()); kernel.set(1, ocl::KernelArg::PtrReadOnly(src)); kernel.set(2, ocl::KernelArg::PtrWriteOnly(dst)); size_t gSize = src.total(); CV_Assert(kernel.run(1, &gSize, NULL, false)); } return true; } #endif #ifdef HAVE_DNN_NGRAPH std::shared_ptr initNgraphAPI(const ov::Output& node) { return std::make_shared(node); } #endif // HAVE_DNN_NGRAPH int64 getFLOPSPerElement() const { return 100; } }; namespace GeluApproximationConstants { static constexpr float sqrt_2_pi = 0.7978845834732056f; static constexpr float coef_sqrt_2_pi = 0.044714998453855515f * sqrt_2_pi; } struct GeluApproximationFunctor : public BaseDefaultFunctor { typedef GeluApproximationLayer Layer; explicit GeluApproximationFunctor() {} bool supportBackend(int backendId, int) { return backendId == DNN_BACKEND_OPENCV; } inline float calculate(float x) const { return 0.5f * x * (1.f + tanh(x * (GeluApproximationConstants::sqrt_2_pi + GeluApproximationConstants::coef_sqrt_2_pi * x * x))); } int64 getFLOPSPerElement() const { return 100; } }; template<> const char* const BaseDefaultFunctor::ocl_kernel_name = "GeluApproximationForward"; struct TanHFunctor : public BaseDefaultFunctor { typedef TanHLayer Layer; bool supportBackend(int backendId, int) { #ifdef HAVE_INF_ENGINE if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) return true; #endif return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA || backendId == DNN_BACKEND_HALIDE || backendId == DNN_BACKEND_CANN; } inline float calculate(float x) const { return tanh(x); } #ifdef HAVE_CUDA Ptr initCUDA(int target, csl::Stream stream) { return make_cuda_node(target, stream); } #endif #ifdef HAVE_HALIDE void attachHalide(const Halide::Expr& input, Halide::Func& top) { Halide::Var x("x"), y("y"), c("c"), n("n"); top(x, y, c, n) = tanh(input); } #endif // HAVE_HALIDE #ifdef HAVE_CANN Ptr initCannOp(const std::string& name, const std::vector > &inputs, const std::vector >& nodes) { auto x = inputs[0].dynamicCast(); auto op = std::make_shared(name); auto op_x = nodes[0].dynamicCast()->getOp(); op->set_input_x_by_name(*op_x, x->name.c_str()); auto x_desc = x->getTensorDesc(); op->update_input_desc_x(*x_desc); auto output_desc = std::make_shared(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT); op->update_output_desc_y(*output_desc); return Ptr(new CannBackendNode(op)); } #endif // HAVE_CANN #ifdef HAVE_DNN_NGRAPH std::shared_ptr initNgraphAPI(const ov::Output& node) { return std::make_shared(node); } #endif // HAVE_DNN_NGRAPH int64 getFLOPSPerElement() const { return 1; } }; template<> const char* const TanHFunctor::BaseDefaultFunctor::ocl_kernel_name = "TanHForward"; struct SwishFunctor : public BaseDefaultFunctor { using Layer = SwishLayer; int vlanes; explicit SwishFunctor() { #if (CV_SIMD || CV_SIMD_SCALABLE) vlanes = VTraits::vlanes(); #else vlanes = 1; #endif } bool supportBackend(int backendId, int) { return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA || backendId == DNN_BACKEND_HALIDE || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH || backendId == DNN_BACKEND_CANN; } inline float calculate(float x) const { return x / (1.f + exp(-x)); } void apply(const float* srcptr, float* dstptr, int stripeStart, int len, size_t planeSize, int cn0, int cn1) const { CV_UNUSED(stripeStart); for (int cn = cn0; cn < cn1; cn++, srcptr += planeSize, dstptr += planeSize) { int i = 0; #if (CV_SIMD || CV_SIMD_SCALABLE) // x / (1.f + exp(-x)); v_float32 one = vx_setall_f32(1.0f), zero = vx_setzero_f32(); for (; i <= len - vlanes; i += vlanes) { v_float32 x = vx_load(srcptr + i); v_float32 t = v_sub(zero, x); t = v_exp(t); t = v_add(one, t); t = v_div(x, t); vx_store(dstptr + i, t); } #endif // In case SIMD is not available or len < vlanes for (; i < len; i++) { dstptr[i] = calculate(srcptr[i]); } } } #ifdef HAVE_CUDA Ptr initCUDA(int target, csl::Stream stream) { return make_cuda_node(target, stream); } #endif #ifdef HAVE_HALIDE void attachHalide(const Halide::Expr& input, Halide::Func& top) { Halide::Var x("x"), y("y"), c("c"), n("n"); top(x, y, c, n) = input / (1.0f + exp(-input)); } #endif // HAVE_HALIDE #ifdef HAVE_CANN Ptr initCannOp(const std::string& name, const std::vector > &inputs, const std::vector >& nodes) { auto x = inputs[0].dynamicCast(); auto op = std::make_shared(name); op->set_attr_scale(1.0f); auto op_x = nodes[0].dynamicCast()->getOp(); op->set_input_x_by_name(*op_x, x->name.c_str()); auto x_desc = x->getTensorDesc(); op->update_input_desc_x(*x_desc); auto output_desc = std::make_shared(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT); op->update_output_desc_y(*output_desc); return Ptr(new CannBackendNode(op)); } #endif // HAVE_CANN #ifdef HAVE_DNN_NGRAPH std::shared_ptr initNgraphAPI(const ov::Output& node) { auto sigmoid = std::make_shared(node); return std::make_shared(node, sigmoid); } #endif // HAVE_DNN_NGRAPH int64 getFLOPSPerElement() const { return 3; } }; template<> const char* const SwishFunctor::BaseDefaultFunctor::ocl_kernel_name = "SwishForward"; namespace { constexpr float MISH_THRESHOLD = -36.73f; } /* This implementation is derived from https://github.com/vpisarev/ficus/blob/3c9a8b78f49e17489c5e1fd6dd5dd487348c99c2/lib/NN/OpElemwise.fx#L110 */ struct MishFunctor : public BaseDefaultFunctor { using Layer = MishLayer; int vlanes; explicit MishFunctor() { #if (CV_SIMD || CV_SIMD_SCALABLE) vlanes = VTraits::vlanes(); #else vlanes = 1; #endif } bool supportBackend(int backendId, int) { return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA || backendId == DNN_BACKEND_HALIDE || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH || backendId == DNN_BACKEND_CANN; } inline float calculate(float x) const { float y = x > MISH_THRESHOLD ? std::exp(-x) : 1.f; x *= x > MISH_THRESHOLD ? 1.f : 0.f; return x * (1 + 2 * y) / (1 + 2 * y + 2 * y * y); } void apply(const float* srcptr, float* dstptr, int stripeStart, int len, size_t planeSize, int cn0, int cn1) const { CV_UNUSED(stripeStart); for (int cn = cn0; cn < cn1; cn++, srcptr += planeSize, dstptr += planeSize) { int i = 0; #if (CV_SIMD || CV_SIMD_SCALABLE) v_float32 v_threshold = vx_setall_f32(MISH_THRESHOLD), one = vx_setall_f32(1.f), z = vx_setzero_f32(); for (; i <= len - vlanes; i += vlanes) { v_float32 x = vx_load(srcptr + i); x = v_select(v_le(x, v_threshold), z, x); v_float32 y = v_exp(v_sub(z, x)); v_float32 _2y = v_add(y, y), _2ya1 = v_add(_2y, one); x = v_div(v_mul(x, _2ya1), v_add(_2ya1, v_mul(_2y, y))); vx_store(dstptr + i, x); } #endif // In case SIMD is not available or len < vlanes for (; i < len; i++) { dstptr[i] = calculate(srcptr[i]); } } } #ifdef HAVE_CUDA Ptr initCUDA(int target, csl::Stream stream) { return make_cuda_node(target, stream); } #endif #ifdef HAVE_HALIDE void attachHalide(const Halide::Expr& input, Halide::Func& top) { Halide::Var x("x"), y("y"), c("c"), n("n"); top(x, y, c, n) = input * tanh(log(1.0f + exp(input))); } #endif // HAVE_HALIDE #ifdef HAVE_CANN Ptr initCannOp(const std::string& name, const std::vector > &inputs, const std::vector >& nodes) { auto x = inputs[0].dynamicCast(); auto op = std::make_shared(name); auto op_x = nodes[0].dynamicCast()->getOp(); op->set_input_x_by_name(*op_x, x->name.c_str()); auto x_desc = x->getTensorDesc(); op->update_input_desc_x(*x_desc); auto output_desc = std::make_shared(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT); op->update_output_desc_y(*output_desc); return Ptr(new CannBackendNode(op)); } #endif // HAVE_CANN #ifdef HAVE_DNN_NGRAPH std::shared_ptr initNgraphAPI(const ov::Output& node) { return std::make_shared(node); } #endif // HAVE_DNN_NGRAPH int64 getFLOPSPerElement() const { return 3; } }; template<> const char* const MishFunctor::BaseDefaultFunctor::ocl_kernel_name = "MishForward"; struct SigmoidFunctor : public BaseDefaultFunctor { typedef SigmoidLayer Layer; bool supportBackend(int backendId, int) { #ifdef HAVE_INF_ENGINE if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) return true; #endif return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA || backendId == DNN_BACKEND_HALIDE || backendId == DNN_BACKEND_CANN; } inline float calculate(float x) const { float y; if (x >= 0) y = 1.f / (1.f + exp(-x)); else { y = exp(x); y = y / (1 + y); } return y; } #ifdef HAVE_CUDA Ptr initCUDA(int target, csl::Stream stream) { return make_cuda_node(target, stream); } #endif #ifdef HAVE_HALIDE void attachHalide(const Halide::Expr& input, Halide::Func& top) { Halide::Var x("x"), y("y"), c("c"), n("n"); top(x, y, c, n) = 1.0f / (1.0f + exp(-input)); } #endif // HAVE_HALIDE #ifdef HAVE_CANN Ptr initCannOp(const std::string& name, const std::vector > &inputs, const std::vector >& nodes) { auto x = inputs[0].dynamicCast(); auto op = std::make_shared(name); auto op_x = nodes[0].dynamicCast()->getOp(); op->set_input_x_by_name(*op_x, x->name.c_str()); auto x_desc = x->getTensorDesc(); op->update_input_desc_x(*x_desc); auto output_desc = std::make_shared(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT); op->update_output_desc_y(*output_desc); return Ptr(new CannBackendNode(op)); } #endif // HAVE_CANN #ifdef HAVE_DNN_NGRAPH std::shared_ptr initNgraphAPI(const ov::Output& node) { return std::make_shared(node); } #endif // HAVE_DNN_NGRAPH int64 getFLOPSPerElement() const { return 3; } }; template<> const char* const SigmoidFunctor::BaseDefaultFunctor::ocl_kernel_name = "SigmoidForward"; struct ELUFunctor : public BaseDefaultFunctor { using Layer = ELULayer; float alpha; int vlanes; explicit ELUFunctor(float alpha_ = 1.f) : alpha(alpha_) { #if (CV_SIMD || CV_SIMD_SCALABLE) vlanes = VTraits::vlanes(); #else vlanes = 1; #endif } bool supportBackend(int backendId, int) { #ifdef HAVE_INF_ENGINE if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) return true; #endif return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA || backendId == DNN_BACKEND_HALIDE || backendId == DNN_BACKEND_CANN; } inline float calculate(float x) const { return x >= 0.f ? x : alpha * (exp(x) - 1.f); } void apply(const float* srcptr, float* dstptr, int stripeStart, int len, size_t planeSize, int cn0, int cn1) const { CV_UNUSED(stripeStart); for (int cn = cn0; cn < cn1; cn++, srcptr += planeSize, dstptr += planeSize) { int i = 0; #if (CV_SIMD || CV_SIMD_SCALABLE) v_float32 z = vx_setzero_f32(), v_alpha = vx_setall_f32(alpha), one = vx_setall_f32(1.0f); for (; i <= len - vlanes; i += vlanes) { v_float32 x = vx_load(srcptr + i); v_float32 t = v_mul(v_alpha, v_sub(v_exp(x), one)); x = v_select(v_ge(x, z), x, t); vx_store(dstptr + i, x); } #endif // In case SIMD is not available or len < vlanes for (; i < len; i++) { dstptr[i] = calculate(srcptr[i]); } } } inline void setKernelParams(ocl::Kernel& kernel) const { kernel.set(3, alpha); } #ifdef HAVE_CUDA Ptr initCUDA(int target, csl::Stream stream) { return make_cuda_node(target, stream, alpha); } #endif #ifdef HAVE_HALIDE void attachHalide(const Halide::Expr& input, Halide::Func& top) { Halide::Var x("x"), y("y"), c("c"), n("n"); top(x, y, c, n) = select(input >= 0.0f, input, alpha * (exp(input) - 1)); } #endif // HAVE_HALIDE #ifdef HAVE_CANN Ptr initCannOp(const std::string& name, const std::vector > &inputs, const std::vector >& nodes) { auto x = inputs[0].dynamicCast(); auto op = std::make_shared(name); op->set_attr_alpha(alpha); auto op_x = nodes[0].dynamicCast()->getOp(); op->set_input_x_by_name(*op_x, x->name.c_str()); auto x_desc = x->getTensorDesc(); op->update_input_desc_x(*x_desc); auto output_desc = std::make_shared(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT); op->update_output_desc_y(*output_desc); return Ptr(new CannBackendNode(op)); } #endif // HAVE_CANN #ifdef HAVE_DNN_NGRAPH std::shared_ptr initNgraphAPI(const ov::Output& node) { return std::make_shared(node, alpha); } #endif // HAVE_DNN_NGRAPH int64 getFLOPSPerElement() const { return 2; } }; template<> const char* const ELUFunctor::BaseDefaultFunctor::ocl_kernel_name = "ELUForward"; struct AbsValFunctor : public BaseDefaultFunctor { typedef AbsLayer Layer; bool supportBackend(int backendId, int) { #ifdef HAVE_INF_ENGINE if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) return true; #endif return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA || backendId == DNN_BACKEND_HALIDE || backendId == DNN_BACKEND_CANN; } inline float calculate(float x) const { return abs(x); } #ifdef HAVE_CUDA Ptr initCUDA(int target, csl::Stream stream) { return make_cuda_node(target, stream); } #endif #ifdef HAVE_HALIDE void attachHalide(const Halide::Expr& input, Halide::Func& top) { Halide::Var x("x"), y("y"), c("c"), n("n"); top(x, y, c, n) = abs(input); } #endif // HAVE_HALIDE #ifdef HAVE_CANN Ptr initCannOp(const std::string& name, const std::vector > &inputs, const std::vector >& nodes) { auto x = inputs[0].dynamicCast(); auto op = std::make_shared(name); auto op_x = nodes[0].dynamicCast()->getOp(); op->set_input_x_by_name(*op_x, x->name.c_str()); auto x_desc = x->getTensorDesc(); op->update_input_desc_x(*x_desc); auto output_desc = std::make_shared(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT); op->update_output_desc_y(*output_desc); return Ptr(new CannBackendNode(op)); } #endif // HAVE_CANN #ifdef HAVE_DNN_NGRAPH std::shared_ptr initNgraphAPI(const ov::Output& node) { return std::make_shared(node); } #endif // HAVE_DNN_NGRAPH int64 getFLOPSPerElement() const { return 1; } }; template<> const char* const AbsValFunctor::BaseDefaultFunctor::ocl_kernel_name = "AbsValForward"; struct BNLLFunctor : public BaseDefaultFunctor { typedef BNLLLayer Layer; bool supportBackend(int backendId, int) { return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA || backendId == DNN_BACKEND_HALIDE || backendId == DNN_BACKEND_CANN; } inline float calculate(float x) const { // https://github.com/BVLC/caffe/blame/1.0/src/caffe/layers/bnll_layer.cpp#L17 return x > 0 ? x + log(1.f + exp(-x)) : log(1.f + exp(x)); } #ifdef HAVE_CUDA Ptr initCUDA(int target, csl::Stream stream) { return make_cuda_node(target, stream); } #endif #ifdef HAVE_CANN Ptr initCannOp(const std::string& name, const std::vector > &inputs, const std::vector >& nodes) { auto x = inputs[0].dynamicCast(); auto op = std::make_shared(name); auto op_x = nodes[0].dynamicCast()->getOp(); op->set_input_x_by_name(*op_x, x->name.c_str()); auto x_desc = x->getTensorDesc(); op->update_input_desc_x(*x_desc); auto output_desc = std::make_shared(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT); op->update_output_desc_y(*output_desc); return Ptr(new CannBackendNode(op)); } #endif // HAVE_CANN #ifdef HAVE_HALIDE void attachHalide(const Halide::Expr& input, Halide::Func& top) { Halide::Var x("x"), y("y"), c("c"), n("n"); // https://github.com/BVLC/caffe/blame/1.0/src/caffe/layers/bnll_layer.cpp#L17 top(x, y, c, n) = max(input, 0) + log(1.0f + exp(-abs(input))); } #endif // HAVE_HALIDE int64 getFLOPSPerElement() const { return 5; } }; template<> const char* const BNLLFunctor::BaseDefaultFunctor::ocl_kernel_name = "BNLLForward"; struct CeilFunctor : public BaseDefaultFunctor { typedef CeilLayer Layer; bool supportBackend(int backendId, int) { return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA || backendId == DNN_BACKEND_HALIDE; } inline float calculate(float x) const { return ceil(x); } #ifdef HAVE_CUDA Ptr initCUDA(int target, csl::Stream stream) { return make_cuda_node(target, stream); } #endif #ifdef HAVE_CANN Ptr initCannOp(const std::string& name, const std::vector > &inputs, const std::vector >& nodes) { auto x = inputs[0].dynamicCast(); auto op = std::make_shared(name); auto op_x = nodes[0].dynamicCast()->getOp(); op->set_input_x_by_name(*op_x, x->name.c_str()); auto x_desc = x->getTensorDesc(); op->update_input_desc_x(*x_desc); auto output_desc = std::make_shared(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT); op->update_output_desc_y(*output_desc); return Ptr(new CannBackendNode(op)); } #endif // HAVE_CANN #ifdef HAVE_HALIDE void attachHalide(const Halide::Expr& input, Halide::Func& top) { Halide::Var x("x"), y("y"), c("c"), n("n"); top(x, y, c, n) = ceil(input); } #endif // HAVE_HALIDE int64 getFLOPSPerElement() const { return 1; } }; template<> const char* const BaseDefaultFunctor::ocl_kernel_name = "CeilForward"; struct FloorFunctor : public BaseDefaultFunctor { typedef FloorLayer Layer; bool supportBackend(int backendId, int) { return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA || backendId == DNN_BACKEND_HALIDE || backendId == DNN_BACKEND_CANN; } inline float calculate(float x) const { return floor(x); } #ifdef HAVE_CUDA Ptr initCUDA(int target, csl::Stream stream) { return make_cuda_node(target, stream); } #endif #ifdef HAVE_CANN Ptr initCannOp(const std::string& name, const std::vector > &inputs, const std::vector >& nodes) { auto x = inputs[0].dynamicCast(); auto op = std::make_shared(name); auto op_x = nodes[0].dynamicCast()->getOp(); op->set_input_x_by_name(*op_x, x->name.c_str()); auto x_desc = x->getTensorDesc(); op->update_input_desc_x(*x_desc); auto output_desc = std::make_shared(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT); op->update_output_desc_y(*output_desc); return Ptr(new CannBackendNode(op)); } #endif // HAVE_CANN #ifdef HAVE_HALIDE void attachHalide(const Halide::Expr& input, Halide::Func& top) { Halide::Var x("x"), y("y"), c("c"), n("n"); top(x, y, c, n) = floor(input); } #endif // HAVE_HALIDE int64 getFLOPSPerElement() const { return 1; } }; template<> const char* const BaseDefaultFunctor::ocl_kernel_name = "FloorForward"; struct LogFunctor : public BaseDefaultFunctor { typedef LogLayer Layer; bool supportBackend(int backendId, int) { return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA || backendId == DNN_BACKEND_HALIDE; } inline float calculate(float x) const { return log(x); } #ifdef HAVE_CUDA Ptr initCUDA(int target, csl::Stream stream) { return make_cuda_node(target, stream); } #endif #ifdef HAVE_HALIDE void attachHalide(const Halide::Expr& input, Halide::Func& top) { Halide::Var x("x"), y("y"), c("c"), n("n"); top(x, y, c, n) = log(input); } #endif // HAVE_HALIDE int64 getFLOPSPerElement() const { return 1; } }; template<> const char* const BaseDefaultFunctor::ocl_kernel_name = "LogForward"; struct RoundFunctor : public BaseDefaultFunctor { typedef RoundLayer Layer; bool supportBackend(int backendId, int) { return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA || backendId == DNN_BACKEND_HALIDE; } inline float calculate(float x) const { // Rounds to even numbers in halfway cases, so 2.5 -> 2, -2.5 -> -2 int old_rounding_direction = std::fegetround(); std::fesetround(FE_TONEAREST); float y = std::nearbyint(x); std::fesetround(old_rounding_direction); return y; } #ifdef HAVE_CUDA Ptr initCUDA(int target, csl::Stream stream) { return make_cuda_node(target, stream); } #endif #ifdef HAVE_HALIDE void attachHalide(const Halide::Expr& input, Halide::Func& top) { Halide::Var x("x"), y("y"), c("c"), n("n"); top(x, y, c, n) = round(input); } #endif // HAVE_HALIDE int64 getFLOPSPerElement() const { return 2; } }; template<> const char* const BaseDefaultFunctor::ocl_kernel_name = "RoundForward"; struct SqrtFunctor : public BaseDefaultFunctor { typedef SqrtLayer Layer; bool supportBackend(int backendId, int) { return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA || backendId == DNN_BACKEND_HALIDE; } inline float calculate(float x) const { return sqrt(x); } #ifdef HAVE_CUDA Ptr initCUDA(int target, csl::Stream stream) { return make_cuda_node(target, stream); } #endif #ifdef HAVE_HALIDE void attachHalide(const Halide::Expr& input, Halide::Func& top) { Halide::Var x("x"), y("y"), c("c"), n("n"); top(x, y, c, n) = sqrt(input); } #endif // HAVE_HALIDE #ifdef HAVE_DNN_NGRAPH std::shared_ptr initNgraphAPI(const ov::Output& node) { return std::make_shared(node); } #endif // HAVE_DNN_NGRAPH int64 getFLOPSPerElement() const { return 1; } }; template<> const char* const BaseDefaultFunctor::ocl_kernel_name = "SqrtForward"; struct NotFunctor : public BaseDefaultFunctor { typedef NotLayer Layer; bool supportBackend(int backendId, int) { return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA || backendId == DNN_BACKEND_HALIDE; } inline float calculate(float x) const { return floor(1.f - x); } #ifdef HAVE_CUDA Ptr initCUDA(int target, csl::Stream stream) { return make_cuda_node(target, stream); } #endif #ifdef HAVE_HALIDE void attachHalide(const Halide::Expr& input, Halide::Func& top) { Halide::Var x("x"), y("y"), c("c"), n("n"); top(x, y, c, n) = floor(1.0f - input); } #endif // HAVE_HALIDE int64 getFLOPSPerElement() const { return 2; } }; template<> const char* const BaseDefaultFunctor::ocl_kernel_name = "NotForward"; struct AcosFunctor : public BaseDefaultFunctor { typedef AcosLayer Layer; bool supportBackend(int backendId, int) { return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA; } inline float calculate(float x) const { return acos(x); } #ifdef HAVE_CUDA Ptr initCUDA(int target, csl::Stream stream) { return make_cuda_node(target, stream); } #endif int64 getFLOPSPerElement() const { return 1; } }; template<> const char* const BaseDefaultFunctor::ocl_kernel_name = "AcosForward"; struct AcoshFunctor : public BaseDefaultFunctor { typedef AcoshLayer Layer; bool supportBackend(int backendId, int) { return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA; } inline float calculate(float x) const { return acosh(x); } #ifdef HAVE_CUDA Ptr initCUDA(int target, csl::Stream stream) { return make_cuda_node(target, stream); } #endif int64 getFLOPSPerElement() const { return 1; } }; template<> const char* const BaseDefaultFunctor::ocl_kernel_name = "AcoshForward"; struct AsinFunctor : public BaseDefaultFunctor { typedef AsinLayer Layer; bool supportBackend(int backendId, int) { return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA; } inline float calculate(float x) const { return asin(x); } #ifdef HAVE_CUDA Ptr initCUDA(int target, csl::Stream stream) { return make_cuda_node(target, stream); } #endif int64 getFLOPSPerElement() const { return 1; } }; template<> const char* const BaseDefaultFunctor::ocl_kernel_name = "AsinForward"; struct AsinhFunctor : public BaseDefaultFunctor { typedef AsinhLayer Layer; bool supportBackend(int backendId, int) { return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA; } inline float calculate(float x) const { return asinh(x); } #ifdef HAVE_CUDA Ptr initCUDA(int target, csl::Stream stream) { return make_cuda_node(target, stream); } #endif int64 getFLOPSPerElement() const { return 1; } }; template<> const char* const BaseDefaultFunctor::ocl_kernel_name = "AsinhForward"; struct AtanFunctor : public BaseDefaultFunctor { typedef AtanLayer Layer; bool supportBackend(int backendId, int) { return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA; } inline float calculate(float x) const { return atan(x); } #ifdef HAVE_CUDA Ptr initCUDA(int target, csl::Stream stream) { return make_cuda_node(target, stream); } #endif int64 getFLOPSPerElement() const { return 1; } }; template<> const char* const BaseDefaultFunctor::ocl_kernel_name = "AtanForward"; struct AtanhFunctor : public BaseDefaultFunctor { typedef AtanhLayer Layer; bool supportBackend(int backendId, int) { return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA; } inline float calculate(float x) const { return atanh(x); } #ifdef HAVE_CUDA Ptr initCUDA(int target, csl::Stream stream) { return make_cuda_node(target, stream); } #endif int64 getFLOPSPerElement() const { return 1; } }; template<> const char* const BaseDefaultFunctor::ocl_kernel_name = "AtanhForward"; struct CosFunctor : public BaseDefaultFunctor { typedef CosLayer Layer; bool supportBackend(int backendId, int) { return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA; } inline float calculate(float x) const { return cos(x); } #ifdef HAVE_CUDA Ptr initCUDA(int target, csl::Stream stream) { return make_cuda_node(target, stream); } #endif int64 getFLOPSPerElement() const { return 1; } }; template<> const char* const BaseDefaultFunctor::ocl_kernel_name = "CosForward"; struct CoshFunctor : public BaseDefaultFunctor { typedef CoshLayer Layer; bool supportBackend(int backendId, int) { return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA; } inline float calculate(float x) const { return cosh(x); } #ifdef HAVE_CUDA Ptr initCUDA(int target, csl::Stream stream) { return make_cuda_node(target, stream); } #endif int64 getFLOPSPerElement() const { return 1; } }; template<> const char* const BaseDefaultFunctor::ocl_kernel_name = "CoshForward"; struct ErfFunctor : public BaseDefaultFunctor { typedef ErfLayer Layer; bool supportBackend(int backendId, int) { return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA; } inline float calculate(float x) const { return erf(x); } #ifdef HAVE_CUDA Ptr initCUDA(int target, csl::Stream stream) { return make_cuda_node(target, stream); } #endif int64 getFLOPSPerElement() const { return 1; } }; template<> const char* const BaseDefaultFunctor::ocl_kernel_name = "ErfForward"; struct HardSwishFunctor : public BaseDefaultFunctor { using Layer = HardSwishLayer; int vlanes; explicit HardSwishFunctor() { #if (CV_SIMD || CV_SIMD_SCALABLE) vlanes = VTraits::vlanes(); #else vlanes = 1; #endif } bool supportBackend(int backendId, int) { return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA || backendId == DNN_BACKEND_CANN; } inline float calculate(float x) const { return x * std::max(0.f, std::min(1.f, x / 6.f + 0.5f)); } void apply(const float* srcptr, float* dstptr, int stripeStart, int len, size_t planeSize, int cn0, int cn1) const { CV_UNUSED(stripeStart); for (int cn = cn0; cn < cn1; cn++, srcptr += planeSize, dstptr += planeSize) { int i = 0; #if (CV_SIMD || CV_SIMD_SCALABLE) v_float32 zero = vx_setzero_f32(), one = vx_setall_f32(1.0f), half = vx_setall_f32(0.5f), sixth = vx_setall_f32(1 / 6.0f); for (; i <= len - vlanes; i += vlanes) { v_float32 x = vx_load(srcptr + i); v_float32 t = v_add(v_mul(x, sixth), half); t = v_min(one, t); t = v_max(zero, t); t = v_mul(x, t); vx_store(dstptr + i, t); } #endif // In case SIMD is not available or len > vlanes for (; i < len; i++) { dstptr[i] = calculate(srcptr[i]); } } } #ifdef HAVE_CUDA Ptr initCUDA(int target, csl::Stream stream) { return make_cuda_node(target, stream); } #endif #ifdef HAVE_CANN Ptr initCannOp(const std::string& name, const std::vector > &inputs, const std::vector >& nodes) { auto x = inputs[0].dynamicCast(); auto op = std::make_shared(name); auto op_x = nodes[0].dynamicCast()->getOp(); op->set_input_x_by_name(*op_x, x->name.c_str()); auto x_desc = x->getTensorDesc(); op->update_input_desc_x(*x_desc); auto output_desc = std::make_shared(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT); op->update_output_desc_y(*output_desc); return Ptr(new CannBackendNode(op)); } #endif int64 getFLOPSPerElement() const { return 1; } }; template<> const char* const BaseDefaultFunctor::ocl_kernel_name = "HardSwishForward"; struct SinFunctor : public BaseDefaultFunctor { typedef SinLayer Layer; bool supportBackend(int backendId, int) { return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA; } inline float calculate(float x) const { return sin(x); } #ifdef HAVE_CUDA Ptr initCUDA(int target, csl::Stream stream) { return make_cuda_node(target, stream); } #endif int64 getFLOPSPerElement() const { return 1; } }; template<> const char* const BaseDefaultFunctor::ocl_kernel_name = "SinForward"; struct SinhFunctor : public BaseDefaultFunctor { typedef SinhLayer Layer; bool supportBackend(int backendId, int) { return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA; } inline float calculate(float x) const { return sinh(x); } #ifdef HAVE_CUDA Ptr initCUDA(int target, csl::Stream stream) { return make_cuda_node(target, stream); } #endif int64 getFLOPSPerElement() const { return 1; } }; template<> const char* const BaseDefaultFunctor::ocl_kernel_name = "SinhForward"; struct SoftplusFunctor : public BaseDefaultFunctor { typedef SoftplusLayer Layer; bool supportBackend(int backendId, int) { return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA; } inline float calculate(float x) const { return log1p(exp(x)); } #ifdef HAVE_CUDA Ptr initCUDA(int target, csl::Stream stream) { return make_cuda_node(target, stream); } #endif int64 getFLOPSPerElement() const { return 1; } }; template<> const char* const BaseDefaultFunctor::ocl_kernel_name = "SoftplusForward"; struct SoftsignFunctor : public BaseDefaultFunctor { typedef SoftsignLayer Layer; bool supportBackend(int backendId, int) { return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA; } inline float calculate(float x) const { return x / (1.f + abs(x)); } #ifdef HAVE_CUDA Ptr initCUDA(int target, csl::Stream stream) { return make_cuda_node(target, stream); } #endif int64 getFLOPSPerElement() const { return 1; } }; template<> const char* const BaseDefaultFunctor::ocl_kernel_name = "SoftsignForward"; struct TanFunctor : public BaseDefaultFunctor { typedef TanLayer Layer; bool supportBackend(int backendId, int) { return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA; } inline float calculate(float x) const { return tan(x); } #ifdef HAVE_CUDA Ptr initCUDA(int target, csl::Stream stream) { return make_cuda_node(target, stream); } #endif int64 getFLOPSPerElement() const { return 1; } }; template<> const char* const BaseDefaultFunctor::ocl_kernel_name = "TanForward"; struct CeluFunctor : public BaseDefaultFunctor { using Layer = CeluLayer; float alpha; int vlanes; explicit CeluFunctor(float alpha_ = 1.f) : alpha(alpha_) { #if (CV_SIMD || CV_SIMD_SCALABLE) vlanes = VTraits::vlanes(); #else vlanes = 1; #endif } bool supportBackend(int backendId, int) { return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA; } inline float calculate(float x) const { return std::max(0.f, x) + std::min(0.f, alpha * expm1(x / alpha)); } void apply(const float* srcptr, float* dstptr, int stripeStart, int len, size_t planeSize, int cn0, int cn1) const { CV_UNUSED(stripeStart); for (int cn = cn0; cn < cn1; cn++, srcptr += planeSize, dstptr += planeSize) { int i = 0; #if (CV_SIMD || CV_SIMD_SCALABLE) v_float32 zero = vx_setzero_f32(), v_alpha = vx_setall_f32(alpha), one = vx_setall_f32(1.0f), v_ralpha = vx_setall_f32(1.0f / alpha); for (; i <= len - vlanes; i += vlanes) { v_float32 x = vx_load(srcptr + i); v_float32 t = v_min(zero, v_mul(v_alpha, v_sub(v_exp(v_mul(x, v_ralpha)), one))); t = v_add(v_max(zero, x), t); vx_store(dstptr + i, t); } #endif // In case SIMD is not available or len < vlanes for (; i < len; i++) { dstptr[i] = calculate(srcptr[i]); } } } inline void setKernelParams(ocl::Kernel& kernel) const { kernel.set(3, alpha); } #ifdef HAVE_CUDA Ptr initCUDA(int target, csl::Stream stream) { return make_cuda_node(target, stream, alpha); } #endif int64 getFLOPSPerElement() const { return 1; } }; template<> const char* const BaseDefaultFunctor::ocl_kernel_name = "CeluForward"; struct HardSigmoidFunctor : public BaseDefaultFunctor { typedef HardSigmoidLayer Layer; float alpha; float beta; explicit HardSigmoidFunctor(float alpha_ = 0.2f, float beta_ = 0.5f) : alpha(alpha_), beta(beta_) {} bool supportBackend(int backendId, int) { return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA; } inline float calculate(float x) const { return max(0.f, min(1.f, alpha * x + beta)); } inline void setKernelParams(ocl::Kernel& kernel) const { kernel.set(3, alpha); kernel.set(4, beta); } #ifdef HAVE_CUDA Ptr initCUDA(int target, csl::Stream stream) { return make_cuda_node(target, stream, alpha, beta); } #endif int64 getFLOPSPerElement() const { return 1; } }; template<> const char* const BaseDefaultFunctor::ocl_kernel_name = "HardSigmoidForward"; struct SeluFunctor : public BaseDefaultFunctor { using Layer = SeluLayer; float alpha; float gamma; int vlanes; explicit SeluFunctor(float alpha_ = 1.67326319217681884765625f, float gamma_ = 1.05070102214813232421875f) : alpha(alpha_), gamma(gamma_) { #if (CV_SIMD || CV_SIMD_SCALABLE) vlanes = VTraits::vlanes(); #else vlanes = 1; #endif } bool supportBackend(int backendId, int) { return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA; } inline float calculate(float x) const { return gamma * (x > 0.f ? x : alpha * expm1(x)); } void apply(const float* srcptr, float* dstptr, int stripeStart, int len, size_t planeSize, int cn0, int cn1) const { CV_UNUSED(stripeStart); for (int cn = cn0; cn < cn1; cn++, srcptr += planeSize, dstptr += planeSize) { int i = 0; #if (CV_SIMD || CV_SIMD_SCALABLE) v_float32 z = vx_setzero_f32(), one = vx_setall_f32(1.0f), v_alpha = vx_setall_f32(alpha), v_gamma = vx_setall_f32(gamma); for (; i <= len - vlanes; i += vlanes) { v_float32 x = vx_load(srcptr + i); v_float32 t = v_mul(v_alpha, v_sub(v_exp(x), one)); x = v_select(v_le(x, z), t, x); x = v_mul(v_gamma, x); vx_store(dstptr + i, x); } #endif // In case SIMD is not available or len > vlanes for (; i < len; i++) { dstptr[i] = calculate(srcptr[i]); } } } inline void setKernelParams(ocl::Kernel& kernel) const { kernel.set(3, alpha); kernel.set(4, gamma); } #ifdef HAVE_CUDA Ptr initCUDA(int target, csl::Stream stream) { return make_cuda_node(target, stream, alpha, gamma); } #endif int64 getFLOPSPerElement() const { return 1; } }; template<> const char* const BaseDefaultFunctor::ocl_kernel_name = "SeluForward"; struct ThresholdedReluFunctor : public BaseDefaultFunctor { typedef ThresholdedReluLayer Layer; float alpha; explicit ThresholdedReluFunctor(float alpha_ = 1.f) : alpha(alpha_) {} bool supportBackend(int backendId, int) { return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA; } inline float calculate(float x) const { return x > alpha ? x : 0.f; } inline void setKernelParams(ocl::Kernel& kernel) const { kernel.set(3, alpha); } #ifdef HAVE_CUDA Ptr initCUDA(int target, csl::Stream stream) { return make_cuda_node(target, stream, alpha); } #endif int64 getFLOPSPerElement() const { return 1; } }; template<> const char* const BaseDefaultFunctor::ocl_kernel_name = "ThresholdedReluForward"; struct PowerFunctor : public BaseFunctor { typedef PowerLayer Layer; float power, scale, shift; float originPower, originScale, originShift; explicit PowerFunctor(float power_ = 1.f, float scale_ = 1.f, float shift_ = 0.f) : power(power_), scale(scale_), shift(shift_), originPower(power_), originScale(scale_), originShift(shift_) {} bool supportBackend(int backendId, int targetId) { #ifdef HAVE_INF_ENGINE if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) return true; #endif { return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA || backendId == DNN_BACKEND_HALIDE; } } void finalize() { power = originPower; scale = originScale; shift = originShift; } void apply(const float* srcptr, float* dstptr, int stripeStart, int len, size_t planeSize, int cn0, int cn1) const { CV_UNUSED(stripeStart); float a = scale, b = shift, p = power; if( p == 1.f ) { for( int cn = cn0; cn < cn1; cn++, srcptr += planeSize, dstptr += planeSize ) { for( int i = 0; i < len; i++ ) { float x = srcptr[i]; dstptr[i] = a*x + b; } } } else { for( int cn = cn0; cn < cn1; cn++, srcptr += planeSize, dstptr += planeSize ) { for( int i = 0; i < len; i++ ) { float x = srcptr[i]; dstptr[i] = pow(a*x + b, p); } } } } #ifdef HAVE_OPENCL bool applyOCL(InputArrayOfArrays inps, OutputArrayOfArrays outs, OutputArrayOfArrays internals) { std::vector inputs; std::vector outputs; inps.getUMatVector(inputs); outs.getUMatVector(outputs); String buildopt = oclGetTMacro(inputs[0]); for (size_t i = 0; i < inputs.size(); i++) { UMat& src = inputs[i]; UMat& dst = outputs[i]; ocl::Kernel kernel("PowForward", ocl::dnn::activations_oclsrc, buildopt); kernel.set(0, (int)src.total()); kernel.set(1, ocl::KernelArg::PtrReadOnly(src)); kernel.set(2, ocl::KernelArg::PtrWriteOnly(dst)); kernel.set(3, (float)power); kernel.set(4, (float)scale); kernel.set(5, (float)shift); size_t gSize = src.total(); CV_Assert(kernel.run(1, &gSize, NULL, false)); } return true; } #endif #ifdef HAVE_CUDA Ptr initCUDA(int target, csl::Stream stream) { return make_cuda_node(target, stream, power, scale, shift); } #endif #ifdef HAVE_HALIDE void attachHalide(const Halide::Expr& input, Halide::Func& top) { Halide::Var x("x"), y("y"), c("c"), n("n"); Halide::Expr topExpr = (scale == 1.0f ? input : input * scale); if (shift) { topExpr += shift; } if (power != 1.0f) { topExpr = pow(topExpr, power); } top(x, y, c, n) = topExpr; } #endif // HAVE_HALIDE #ifdef HAVE_CANN Ptr initCannOp(const std::string& name, const std::vector > &inputs, const std::vector >& nodes) { CV_Error(Error::StsNotImplemented, ""); } #endif // HAVE_CANN #ifdef HAVE_DNN_NGRAPH std::shared_ptr initNgraphAPI(const ov::Output& node) { auto scale_node = std::make_shared(ov::element::f32, ov::Shape{1}, &scale); auto shift_node = std::make_shared(ov::element::f32, ov::Shape{1}, &shift); auto mul = std::make_shared(scale_node, node, ov::op::AutoBroadcastType::NUMPY); auto scale_shift = std::make_shared(mul, shift_node, ov::op::AutoBroadcastType::NUMPY); if (power == 1) return scale_shift; auto power_node = std::make_shared(ov::element::f32, ov::Shape{1}, &power); return std::make_shared(scale_shift, power_node, ov::op::AutoBroadcastType::NUMPY); } #endif // HAVE_DNN_NGRAPH #ifdef HAVE_WEBNN ml::Operand initWebnnAPI(const ml::GraphBuilder& builder, const ml::Operand& input) { CV_Error(Error::StsNotImplemented, ""); ml::Operand operand; return operand; } #endif bool tryFuse(Ptr& top) { if (power != 1.0f && shift != 0.0f) return false; Mat w, b; top->getScaleShift(w, b); if ((w.empty() && b.empty()) || w.total() > 1 || b.total() > 1) return false; float nextScale = w.empty() ? 1.0f : w.at(0); float nextShift = b.empty() ? 0.0f : b.at(0); scale = std::pow(scale, power) * nextScale; shift = nextScale * shift + nextShift; return true; } void getScaleShift(Mat& _scale, Mat& _shift) const { if (power == 1.0f) { _scale = Mat(1, 1, CV_32F, Scalar(scale)); _shift = Mat(1, 1, CV_32F, Scalar(shift)); } } int64 getFLOPSPerElement() const { return power == 1 ? 2 : 10; } }; struct ExpFunctor : public BaseDefaultFunctor { typedef ExpLayer Layer; float base, scale, shift; float normScale, normShift; ExpFunctor(float base_ = -1.f, float scale_ = 1.f, float shift_ = 0.f) : base(base_), scale(scale_), shift(shift_) { // For base > 0 : // y = base^(scale * input + shift) // ln(y) = ln(base)*(scale * input + shift) // y = exp((ln(base)*scale) * input + (ln(base)*shift)) // y = exp(normalized_scale * input + normalized_shift) CV_Check(base, base == -1.f || base > 0.f, "Unsupported 'base' value"); const float ln_base = (base == -1.f) ? 1.f : log(base); normScale = scale * ln_base; normShift = shift * ln_base; } bool supportBackend(int backendId, int targetId) { return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA || backendId == DNN_BACKEND_HALIDE || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH; } inline float calculate(float x) const { return exp(normScale * x + normShift); } inline void setKernelParams(ocl::Kernel& kernel) const { kernel.set(3, normScale); kernel.set(4, normShift); } #ifdef HAVE_CUDA Ptr initCUDA(int target, csl::Stream stream) { return make_cuda_node(target, stream, normScale, normShift); } #endif #ifdef HAVE_HALIDE void attachHalide(const Halide::Expr& input, Halide::Func& top) { Halide::Var x("x"), y("y"), c("c"), n("n"); top(x, y, c, n) = exp(normScale * input + normShift); } #endif // HAVE_HALIDE #ifdef HAVE_DNN_NGRAPH std::shared_ptr initNgraphAPI(const ov::Output& node) { auto scale_node = std::make_shared(ov::element::f32, ov::Shape{1}, &normScale); auto shift_node = std::make_shared(ov::element::f32, ov::Shape{1}, &normShift); auto mul = std::make_shared(scale_node, node, ov::op::AutoBroadcastType::NUMPY); auto scale_shift = std::make_shared(mul, shift_node, ov::op::AutoBroadcastType::NUMPY); return std::make_shared(scale_shift); } #endif // HAVE_DNN_NGRAPH int64 getFLOPSPerElement() const { return 3; } }; template<> const char* const ExpFunctor::BaseDefaultFunctor::ocl_kernel_name = "ExpForward"; struct ChannelsPReLUFunctor : public BaseFunctor { typedef ChannelsPReLULayer Layer; Mat scale; #ifdef HAVE_OPENCL UMat scale_umat; std::string oclKernelName = "ChannelsPReLUForward"; #endif explicit ChannelsPReLUFunctor(const Mat& scale_=Mat()) : scale(scale_) { } bool supportBackend(int backendId, int) { #ifdef HAVE_INF_ENGINE if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) return true; #endif return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA || backendId == DNN_BACKEND_HALIDE || backendId == DNN_BACKEND_CANN; } void apply(const float* srcptr, float* dstptr, int stripeStart, int len, size_t planeSize, int cn0, int cn1) const { CV_UNUSED(stripeStart); CV_Assert(scale.isContinuous() && scale.type() == CV_32F); const float* scaleptr = scale.ptr(); CV_Assert( 0 <= cn0 && cn0 < cn1 && cn1 <= (int)scale.total() ); for( int cn = cn0; cn < cn1; cn++, srcptr += planeSize, dstptr += planeSize ) { float s = scaleptr[cn]; int i = 0; #if CV_SIMD128 v_float32x4 s4 = v_setall_f32(s), z = v_setzero_f32(); for( ; i <= len - 16; i += 16 ) { v_float32x4 x0 = v_load(srcptr + i); v_float32x4 x1 = v_load(srcptr + i + 4); v_float32x4 x2 = v_load(srcptr + i + 8); v_float32x4 x3 = v_load(srcptr + i + 12); x0 = v_select(v_ge(x0, z), x0, v_mul(x0, s4)); x1 = v_select(v_ge(x1, z), x1, v_mul(x1, s4)); x2 = v_select(v_ge(x2, z), x2, v_mul(x2, s4)); x3 = v_select(v_ge(x3, z), x3, v_mul(x3, s4)); v_store(dstptr + i, x0); v_store(dstptr + i + 4, x1); v_store(dstptr + i + 8, x2); v_store(dstptr + i + 12, x3); } #endif for( ; i < len; i++ ) { float x = srcptr[i]; dstptr[i] = x >= 0.f ? x : s*x; } } } #ifdef HAVE_OPENCL bool applyOCL(InputArrayOfArrays inps, OutputArrayOfArrays outs, OutputArrayOfArrays internals) { if (scale_umat.empty()) scale.copyTo(scale_umat); std::vector inputs; std::vector outputs; inps.getUMatVector(inputs); outs.getUMatVector(outputs); String buildopt = oclGetTMacro(inputs[0]); for (size_t i = 0; i < inputs.size(); i++) { UMat& src = inputs[i]; UMat& dst = outputs[i]; ocl::Kernel kernel(oclKernelName.c_str(), ocl::dnn::activations_oclsrc, buildopt); kernel.set(0, (int)src.total()); kernel.set(1, (int)src.size[1]); kernel.set(2, (int)total(shape(src), 2)); kernel.set(3, ocl::KernelArg::PtrReadOnly(src)); kernel.set(4, ocl::KernelArg::PtrWriteOnly(dst)); kernel.set(5, ocl::KernelArg::PtrReadOnly(scale_umat)); size_t gSize = src.total(); CV_Assert(kernel.run(1, &gSize, NULL, false)); } return true; } #endif #ifdef HAVE_CUDA Ptr initCUDA(int target, csl::Stream stream) { return make_cuda_node(target, stream, scale); } #endif #ifdef HAVE_HALIDE void attachHalide(const Halide::Expr& input, Halide::Func& top) { Halide::Var x("x"), y("y"), c("c"), n("n"); auto weights = wrapToHalideBuffer(scale, {(int)scale.total()}); top(x, y, c, n) = select(input >= 0.0f, input, weights(c) * input); } #endif // HAVE_HALIDE #ifdef HAVE_CANN Ptr initCannOp(const std::string& name, const std::vector > &inputs, const std::vector >& nodes) { auto x = inputs[0].dynamicCast(); auto op_x = nodes[0].dynamicCast()->getOp(); auto x_desc = x->getTensorDesc(); auto output_desc = std::make_shared(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT); auto op = std::make_shared(name); op->set_input_x_by_name(*op_x, x->name.c_str()); op->update_input_desc_x(*x_desc); std::vector shape_{scale.size[0]}; // scale should be a 1d of shape [n] tensor, and it is a 2d mat of shape [n, 1] in opencv auto op_const_slope = std::make_shared(scale.data, scale.type(), shape_, cv::format("%s_weight", name.c_str())); op->set_input_weight(*(op_const_slope->getOp())); op->update_input_desc_weight(*(op_const_slope->getTensorDesc())); op->update_output_desc_y(*output_desc); return Ptr(new CannBackendNode(op)); } #endif // HAVE_CANN #ifdef HAVE_DNN_NGRAPH std::shared_ptr initNgraphAPI(const ov::Output& node) { const size_t numChannels = scale.total(); auto slope = std::make_shared(ov::element::f32, ov::Shape{numChannels}, scale.data); return std::make_shared(node, slope); } #endif // HAVE_DNN_NGRAPH #ifdef HAVE_WEBNN ml::Operand initWebnnAPI(const ml::GraphBuilder& builder, const ml::Operand& input) { CV_Error(Error::StsNotImplemented, ""); ml::Operand operand; return operand; } #endif int64 getFLOPSPerElement() const { return 1; } }; struct PReLUFunctor : public ChannelsPReLUFunctor { explicit PReLUFunctor(const Mat& scale_=Mat()) : ChannelsPReLUFunctor(scale_) { #ifdef HAVE_OPENCL oclKernelName = "PReLUForward"; #endif } bool supportBackend(int backendId, int) { return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CANN || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH; } void apply(const float* srcptr, float* dstptr, int stripeStart, int len, size_t planeSize, int cn0, int cn1) const { CV_UNUSED(stripeStart); CV_Assert(scale.isContinuous() && scale.type() == CV_32F); if (stripeStart < 0) CV_Error(Error::StsNotImplemented, "PReLUFunctor requires stripe offset parameter"); const float* scaleptr = scale.ptr() + cn0 * planeSize + stripeStart; for( int cn = cn0; cn < cn1; cn++, srcptr += planeSize, dstptr += planeSize, scaleptr += planeSize ) { int i = 0; #if CV_SIMD128 v_float32x4 z = v_setzero_f32(); for( ; i <= len - 16; i += 16 ) { v_float32x4 x0 = v_load(srcptr + i); v_float32x4 x1 = v_load(srcptr + i + 4); v_float32x4 x2 = v_load(srcptr + i + 8); v_float32x4 x3 = v_load(srcptr + i + 12); v_float32x4 s0 = v_load(scaleptr + i); v_float32x4 s1 = v_load(scaleptr + i + 4); v_float32x4 s2 = v_load(scaleptr + i + 8); v_float32x4 s3 = v_load(scaleptr + i + 12); x0 = v_select(v_ge(x0, z), x0, v_mul(x0, s0)); x1 = v_select(v_ge(x1, z), x1, v_mul(x1, s1)); x2 = v_select(v_ge(x2, z), x2, v_mul(x2, s2)); x3 = v_select(v_ge(x3, z), x3, v_mul(x3, s3)); v_store(dstptr + i, x0); v_store(dstptr + i + 4, x1); v_store(dstptr + i + 8, x2); v_store(dstptr + i + 12, x3); } #endif for( ; i < len; i++ ) { float x = srcptr[i]; float s = scaleptr[i]; dstptr[i] = x >= 0.f ? x : s*x; } } } #ifdef HAVE_DNN_NGRAPH std::shared_ptr initNgraphAPI(const ov::Output& node) { auto shape = getShape(scale); auto slope = std::make_shared(ov::element::f32, shape, scale.ptr()); return std::make_shared(node, slope); } #endif // HAVE_DNN_NGRAPH }; struct SignFunctor : public BaseDefaultFunctor { typedef SignLayer Layer; bool supportBackend(int backendId, int) { return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA; } inline float calculate(float x) const { return x > 0.f ? 1.f : (x < 0.f ? -1.f : 0.f); } #ifdef HAVE_CUDA Ptr initCUDA(int target, csl::Stream stream) { return make_cuda_node(target, stream); } #endif int64 getFLOPSPerElement() const { return 1; } }; template<> const char* const SignFunctor::BaseDefaultFunctor::ocl_kernel_name = "SignForward"; struct ShrinkFunctor : public BaseDefaultFunctor { typedef ShrinkLayer Layer; float bias; float lambd; explicit ShrinkFunctor(float bias_ = 0.0f, float lambd_ = 0.5f) : bias(bias_), lambd(lambd_) {} bool supportBackend(int backendId, int) { return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA; } inline float calculate(float x) const { return x > lambd ? x - bias : (x < -lambd ? x + bias : 0.f); } #ifdef HAVE_CUDA Ptr initCUDA(int target, csl::Stream stream) { return make_cuda_node(target, stream, bias, lambd); } #endif int64 getFLOPSPerElement() const { return 1; } }; template<> const char* const ShrinkFunctor::BaseDefaultFunctor::ocl_kernel_name = "ShrinkForward"; struct ReciprocalFunctor : public BaseDefaultFunctor { typedef ReciprocalLayer Layer; bool supportBackend(int backendId, int) { return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA; } inline float calculate(float x) const { return 1.f/x; } #ifdef HAVE_CUDA Ptr initCUDA(int target, csl::Stream stream) { return make_cuda_node(target, stream); } #endif int64 getFLOPSPerElement() const { return 1; } }; template<> const char* const ReciprocalFunctor::BaseDefaultFunctor::ocl_kernel_name = "ReciprocalForward"; Ptr ReLULayer::create(const LayerParams& params) { float negativeSlope = params.get("negative_slope", 0.f); Ptr l(new ElementWiseLayer(ReLUFunctor(negativeSlope))); l->setParamsFrom(params); l->negativeSlope = negativeSlope; return l; } Ptr ReLU6Layer::create(const LayerParams& params) { float minValue = params.get("min_value", 0.0f); float maxValue = params.get("max_value", 6.0f); Ptr l(new ElementWiseLayer(ReLU6Functor(minValue, maxValue))); l->setParamsFrom(params); l->minValue = minValue; l->maxValue = maxValue; return l; } Ptr GeluLayer::create(const LayerParams& params) { Ptr l(new ElementWiseLayer(GeluFunctor())); l->setParamsFrom(params); return l; } Ptr GeluApproximationLayer::create(const LayerParams& params) { Ptr l(new ElementWiseLayer(GeluApproximationFunctor())); l->setParamsFrom(params); return l; } Ptr TanHLayer::create(const LayerParams& params) { Ptr l(new ElementWiseLayer()); l->setParamsFrom(params); return l; } Ptr SwishLayer::create(const LayerParams& params) { Ptr l(new ElementWiseLayer()); l->setParamsFrom(params); return l; } Ptr MishLayer::create(const LayerParams& params) { Ptr l(new ElementWiseLayer()); l->setParamsFrom(params); return l; } Ptr SigmoidLayer::create(const LayerParams& params) { Ptr l(new ElementWiseLayer()); l->setParamsFrom(params); return l; } Ptr ELULayer::create(const LayerParams& params) { float alpha = params.get("alpha", 1.0f); Ptr l(new ElementWiseLayer(ELUFunctor(alpha))); l->setParamsFrom(params); l->alpha = alpha; return l; } Ptr AbsLayer::create(const LayerParams& params) { Ptr l(new ElementWiseLayer()); l->setParamsFrom(params); return l; } Ptr BNLLLayer::create(const LayerParams& params) { Ptr l(new ElementWiseLayer()); l->setParamsFrom(params); return l; } Ptr CeilLayer::create(const LayerParams& params) { Ptr l(new ElementWiseLayer()); l->setParamsFrom(params); return l; } Ptr FloorLayer::create(const LayerParams& params) { Ptr l(new ElementWiseLayer()); l->setParamsFrom(params); return l; } Ptr LogLayer::create(const LayerParams& params) { Ptr l(new ElementWiseLayer()); l->setParamsFrom(params); return l; } Ptr RoundLayer::create(const LayerParams& params) { Ptr l(new ElementWiseLayer()); l->setParamsFrom(params); return l; } Ptr SqrtLayer::create(const LayerParams& params) { Ptr l(new ElementWiseLayer()); l->setParamsFrom(params); return l; } Ptr NotLayer::create(const LayerParams& params) { Ptr l(new ElementWiseLayer()); l->setParamsFrom(params); return l; } Ptr AcosLayer::create(const LayerParams& params) { Ptr l(new ElementWiseLayer()); l->setParamsFrom(params); return l; } Ptr AcoshLayer::create(const LayerParams& params) { Ptr l(new ElementWiseLayer()); l->setParamsFrom(params); return l; } Ptr AsinLayer::create(const LayerParams& params) { Ptr l(new ElementWiseLayer()); l->setParamsFrom(params); return l; } Ptr AsinhLayer::create(const LayerParams& params) { Ptr l(new ElementWiseLayer()); l->setParamsFrom(params); return l; } Ptr AtanLayer::create(const LayerParams& params) { Ptr l(new ElementWiseLayer()); l->setParamsFrom(params); return l; } Ptr AtanhLayer::create(const LayerParams& params) { Ptr l(new ElementWiseLayer()); l->setParamsFrom(params); return l; } Ptr CosLayer::create(const LayerParams& params) { Ptr l(new ElementWiseLayer()); l->setParamsFrom(params); return l; } Ptr CoshLayer::create(const LayerParams& params) { Ptr l(new ElementWiseLayer()); l->setParamsFrom(params); return l; } Ptr ErfLayer::create(const LayerParams& params) { Ptr l(new ElementWiseLayer()); l->setParamsFrom(params); return l; } Ptr HardSwishLayer::create(const LayerParams& params) { Ptr l(new ElementWiseLayer()); l->setParamsFrom(params); return l; } Ptr SinLayer::create(const LayerParams& params) { Ptr l(new ElementWiseLayer()); l->setParamsFrom(params); return l; } Ptr SinhLayer::create(const LayerParams& params) { Ptr l(new ElementWiseLayer()); l->setParamsFrom(params); return l; } Ptr SoftplusLayer::create(const LayerParams& params) { Ptr l(new ElementWiseLayer()); l->setParamsFrom(params); return l; } Ptr SoftsignLayer::create(const LayerParams& params) { Ptr l(new ElementWiseLayer()); l->setParamsFrom(params); return l; } Ptr TanLayer::create(const LayerParams& params) { Ptr l(new ElementWiseLayer()); l->setParamsFrom(params); return l; } Ptr CeluLayer::create(const LayerParams& params) { float alpha = params.get("alpha", 1.f); Ptr l(new ElementWiseLayer(CeluFunctor(alpha))); l->setParamsFrom(params); l->alpha = alpha; return l; } Ptr HardSigmoidLayer::create(const LayerParams& params) { float alpha = params.get("alpha", 0.2f); float beta = params.get("beta", 0.5f); Ptr l(new ElementWiseLayer(HardSigmoidFunctor(alpha, beta))); l->setParamsFrom(params); l->alpha = alpha; l->beta = beta; return l; } Ptr SeluLayer::create(const LayerParams& params) { float alpha = params.get("alpha", 1.67326319217681884765625f); float gamma = params.get("gamma", 1.05070102214813232421875f); Ptr l(new ElementWiseLayer(SeluFunctor(alpha, gamma))); l->setParamsFrom(params); l->alpha = alpha; l->gamma = gamma; return l; } Ptr ThresholdedReluLayer::create(const LayerParams& params) { float alpha = params.get("alpha", 1.f); Ptr l(new ElementWiseLayer(ThresholdedReluFunctor(alpha))); l->setParamsFrom(params); l->alpha = alpha; return l; } Ptr PowerLayer::create(const LayerParams& params) { float power = params.get("power", 1.0f); float scale = params.get("scale", 1.0f); float shift = params.get("shift", 0.0f); Ptr l(new ElementWiseLayer(PowerFunctor(power, scale, shift))); l->setParamsFrom(params); l->power = power; l->scale = scale; l->shift = shift; return l; } Ptr ExpLayer::create(const LayerParams& params) { float base = params.get("base", -1.0f); float scale = params.get("scale", 1.0f); float shift = params.get("shift", 0.0f); Ptr l(new ElementWiseLayer(ExpFunctor(base, scale, shift))); l->setParamsFrom(params); l->base = base; l->scale = scale; l->shift = shift; return l; } Ptr ChannelsPReLULayer::create(const LayerParams& params) { CV_Assert(params.blobs.size() == 1); Mat scale = params.blobs[0]; float slope = *scale.ptr(); if (scale.total() == 1 || countNonZero(scale != slope) == 0) { LayerParams reluParams = params; reluParams.set("negative_slope", slope); return ReLULayer::create(reluParams); } Ptr l; // Check first two dimensions of scale (batch, channels) MatShape scaleShape = shape(scale); if (std::count_if(scaleShape.begin(), scaleShape.end(), [](int d){ return d != 1;}) > 1) { l = new ElementWiseLayer(PReLUFunctor(scale)); } else { l = new ElementWiseLayer(ChannelsPReLUFunctor(scale)); } l->setParamsFrom(params); return l; } Ptr SignLayer::create(const LayerParams& params) { Ptr l(new ElementWiseLayer()); l->setParamsFrom(params); return l; } Ptr ReciprocalLayer::create(const LayerParams& params) { Ptr l(new ElementWiseLayer()); l->setParamsFrom(params); return l; } Ptr ShrinkLayer::create(const LayerParams& params) { float bias = params.get("bias", 0.f); float lambd = params.get("lambd", 0.5f); Ptr l(new ElementWiseLayer(ShrinkFunctor(bias, lambd))); l->setParamsFrom(params); l->bias = bias; l->lambd = lambd; return l; } } }