From 1bd382c1d0dc7ce65b352ffb0628b5e9bb431f28 Mon Sep 17 00:00:00 2001 From: Smirnov Egor Date: Thu, 2 Dec 2021 15:45:27 +0300 Subject: [PATCH] Add acos, acosh, asin, asinh, atan, atanh, cos, cosh, erf, hardswish, sin, sinh, softplus, softsign, tan layers --- .../dnn/include/opencv2/dnn/all_layers.hpp | 90 +++ modules/dnn/src/cuda/activations.cu | 105 ++++ modules/dnn/src/cuda/functors.hpp | 225 +++++++ modules/dnn/src/cuda/math.hpp | 84 +++ .../dnn/src/cuda4dnn/kernels/activations.hpp | 45 ++ .../src/cuda4dnn/primitives/activation.hpp | 210 +++++++ modules/dnn/src/init.cpp | 15 + modules/dnn/src/layers/elementwise_layers.cpp | 552 ++++++++++++++++++ .../dnn/src/onnx/onnx_graph_simplifier.cpp | 37 ++ modules/dnn/src/opencl/activations.cl | 84 +++ ...rmance_layer_filter__cuda_denylist.inl.hpp | 1 + ...er_filter_opencv_ocl_fp16_denylist.inl.hpp | 1 + ..._conformance_layer_parser_denylist.inl.hpp | 29 - 13 files changed, 1449 insertions(+), 29 deletions(-) diff --git a/modules/dnn/include/opencv2/dnn/all_layers.hpp b/modules/dnn/include/opencv2/dnn/all_layers.hpp index 45935279f5..26d7a9b069 100644 --- a/modules/dnn/include/opencv2/dnn/all_layers.hpp +++ b/modules/dnn/include/opencv2/dnn/all_layers.hpp @@ -648,6 +648,96 @@ CV__DNN_INLINE_NS_BEGIN static Ptr create(const LayerParams ¶ms); }; + class CV_EXPORTS AcosLayer : public ActivationLayer + { + public: + static Ptr create(const LayerParams ¶ms); + }; + + class CV_EXPORTS AcoshLayer : public ActivationLayer + { + public: + static Ptr create(const LayerParams ¶ms); + }; + + class CV_EXPORTS AsinLayer : public ActivationLayer + { + public: + static Ptr create(const LayerParams ¶ms); + }; + + class CV_EXPORTS AsinhLayer : public ActivationLayer + { + public: + static Ptr create(const LayerParams ¶ms); + }; + + class CV_EXPORTS AtanLayer : public ActivationLayer + { + public: + static Ptr create(const LayerParams ¶ms); + }; + + class CV_EXPORTS AtanhLayer : public ActivationLayer + { + public: + static Ptr create(const LayerParams ¶ms); + }; + + class CV_EXPORTS CosLayer : public ActivationLayer + { + public: + static Ptr create(const LayerParams ¶ms); + }; + + class CV_EXPORTS CoshLayer : public ActivationLayer + { + public: + static Ptr create(const LayerParams ¶ms); + }; + + class CV_EXPORTS ErfLayer : public ActivationLayer + { + public: + static Ptr create(const LayerParams ¶ms); + }; + + class CV_EXPORTS HardSwishLayer : public ActivationLayer + { + public: + static Ptr create(const LayerParams ¶ms); + }; + + class CV_EXPORTS SinLayer : public ActivationLayer + { + public: + static Ptr create(const LayerParams ¶ms); + }; + + class CV_EXPORTS SinhLayer : public ActivationLayer + { + public: + static Ptr create(const LayerParams ¶ms); + }; + + class CV_EXPORTS SoftplusLayer : public ActivationLayer + { + public: + static Ptr create(const LayerParams ¶ms); + }; + + class CV_EXPORTS SoftsignLayer : public ActivationLayer + { + public: + static Ptr create(const LayerParams ¶ms); + }; + + class CV_EXPORTS TanLayer : public ActivationLayer + { + public: + static Ptr create(const LayerParams ¶ms); + }; + class CV_EXPORTS ActivationLayerInt8 : public ActivationLayer { public: diff --git a/modules/dnn/src/cuda/activations.cu b/modules/dnn/src/cuda/activations.cu index 0980b5dd46..3d99a03ae3 100644 --- a/modules/dnn/src/cuda/activations.cu +++ b/modules/dnn/src/cuda/activations.cu @@ -158,6 +158,81 @@ void not_k(const Stream& stream, Span output, View input) { generic_op>(stream, output, input); } +template +void acos(const Stream& stream, Span output, View input) { + generic_op>(stream, output, input); +} + +template +void acosh(const Stream& stream, Span output, View input) { + generic_op>(stream, output, input); +} + +template +void asin(const Stream& stream, Span output, View input) { + generic_op>(stream, output, input); +} + +template +void asinh(const Stream& stream, Span output, View input) { + generic_op>(stream, output, input); +} + +template +void atan(const Stream& stream, Span output, View input) { + generic_op>(stream, output, input); +} + +template +void atanh(const Stream& stream, Span output, View input) { + generic_op>(stream, output, input); +} + +template +void cos(const Stream& stream, Span output, View input) { + generic_op>(stream, output, input); +} + +template +void cosh(const Stream& stream, Span output, View input) { + generic_op>(stream, output, input); +} + +template +void erf(const Stream& stream, Span output, View input) { + generic_op>(stream, output, input); +} + +template +void hardswish(const Stream& stream, Span output, View input) { + generic_op>(stream, output, input); +} + +template +void sin(const Stream& stream, Span output, View input) { + generic_op>(stream, output, input); +} + +template +void sinh(const Stream& stream, Span output, View input) { + generic_op>(stream, output, input); +} + +template +void softplus(const Stream& stream, Span output, View input) { + generic_op>(stream, output, input); +} + +template +void softsign(const Stream& stream, Span output, View input) { + generic_op>(stream, output, input); +} + +template +void tan(const Stream& stream, Span output, View input) { + generic_op>(stream, output, input); +} + template void abs(const Stream& stream, Span output, View input) { generic_op>(stream, output, input); @@ -196,6 +271,21 @@ template void log<__half>(const Stream&, Span<__half>, View<__half>); template void rint<__half>(const Stream&, Span<__half>, View<__half>); template void sqrt<__half>(const Stream&, Span<__half>, View<__half>); template void not_k<__half>(const Stream&, Span<__half>, View<__half>); +template void acos<__half>(const Stream&, Span<__half>, View<__half>); +template void acosh<__half>(const Stream&, Span<__half>, View<__half>); +template void asin<__half>(const Stream&, Span<__half>, View<__half>); +template void asinh<__half>(const Stream&, Span<__half>, View<__half>); +template void atan<__half>(const Stream&, Span<__half>, View<__half>); +template void atanh<__half>(const Stream&, Span<__half>, View<__half>); +template void cos<__half>(const Stream&, Span<__half>, View<__half>); +template void cosh<__half>(const Stream&, Span<__half>, View<__half>); +template void erf<__half>(const Stream&, Span<__half>, View<__half>); +template void hardswish<__half>(const Stream&, Span<__half>, View<__half>); +template void sin<__half>(const Stream&, Span<__half>, View<__half>); +template void sinh<__half>(const Stream&, Span<__half>, View<__half>); +template void softplus<__half>(const Stream&, Span<__half>, View<__half>); +template void softsign<__half>(const Stream&, Span<__half>, View<__half>); +template void tan<__half>(const Stream&, Span<__half>, View<__half>); template void power<__half>(const Stream&, Span<__half>, View<__half>, __half, __half, __half); template void exp<__half>(const Stream&, Span<__half>, View<__half>, __half, __half); #endif @@ -216,6 +306,21 @@ template void log(const Stream&, Span, View); template void rint(const Stream&, Span, View); template void sqrt(const Stream&, Span, View); template void not_k(const Stream&, Span, View); +template void acos(const Stream&, Span, View); +template void acosh(const Stream&, Span, View); +template void asin(const Stream&, Span, View); +template void asinh(const Stream&, Span, View); +template void atan(const Stream&, Span, View); +template void atanh(const Stream&, Span, View); +template void cos(const Stream&, Span, View); +template void cosh(const Stream&, Span, View); +template void erf(const Stream&, Span, View); +template void hardswish(const Stream&, Span, View); +template void sin(const Stream&, Span, View); +template void sinh(const Stream&, Span, View); +template void softplus(const Stream&, Span, View); +template void softsign(const Stream&, Span, View); +template void tan(const Stream&, Span, View); template void power(const Stream&, Span, View, float, float, float); template void exp(const Stream&, Span, View, float, float); diff --git a/modules/dnn/src/cuda/functors.hpp b/modules/dnn/src/cuda/functors.hpp index 98ae175ce8..c3d1669344 100644 --- a/modules/dnn/src/cuda/functors.hpp +++ b/modules/dnn/src/cuda/functors.hpp @@ -303,6 +303,231 @@ struct NotFunctor { } }; +template +struct AcosFunctor { + struct Params { + CUDA4DNN_HOST_DEVICE Params() { } + }; + + CUDA4DNN_DEVICE AcosFunctor() { } + CUDA4DNN_DEVICE AcosFunctor(const Params& params) { } + + CUDA4DNN_DEVICE T operator()(T value) { + using csl::device::acos; + return acos(value); + } +}; + +template +struct AcoshFunctor { + struct Params { + CUDA4DNN_HOST_DEVICE Params() { } + }; + + CUDA4DNN_DEVICE AcoshFunctor() { } + CUDA4DNN_DEVICE AcoshFunctor(const Params& params) { } + + CUDA4DNN_DEVICE T operator()(T value) { + using csl::device::acosh; + return acosh(value); + } +}; + +template +struct AsinFunctor { + struct Params { + CUDA4DNN_HOST_DEVICE Params() { } + }; + + CUDA4DNN_DEVICE AsinFunctor() { } + CUDA4DNN_DEVICE AsinFunctor(const Params& params) { } + + CUDA4DNN_DEVICE T operator()(T value) { + using csl::device::asin; + return asin(value); + } +}; + +template +struct AsinhFunctor { + struct Params { + CUDA4DNN_HOST_DEVICE Params() { } + }; + + CUDA4DNN_DEVICE AsinhFunctor() { } + CUDA4DNN_DEVICE AsinhFunctor(const Params& params) { } + + CUDA4DNN_DEVICE T operator()(T value) { + using csl::device::asinh; + return asinh(value); + } +}; + +template +struct AtanFunctor { + struct Params { + CUDA4DNN_HOST_DEVICE Params() { } + }; + + CUDA4DNN_DEVICE AtanFunctor() { } + CUDA4DNN_DEVICE AtanFunctor(const Params& params) { } + + CUDA4DNN_DEVICE T operator()(T value) { + using csl::device::atan; + return atan(value); + } +}; + +template +struct AtanhFunctor { + struct Params { + CUDA4DNN_HOST_DEVICE Params() { } + }; + + CUDA4DNN_DEVICE AtanhFunctor() { } + CUDA4DNN_DEVICE AtanhFunctor(const Params& params) { } + + CUDA4DNN_DEVICE T operator()(T value) { + using csl::device::atanh; + return atanh(value); + } +}; + +template +struct CosFunctor { + struct Params { + CUDA4DNN_HOST_DEVICE Params() { } + }; + + CUDA4DNN_DEVICE CosFunctor() { } + CUDA4DNN_DEVICE CosFunctor(const Params& params) { } + + CUDA4DNN_DEVICE T operator()(T value) { + using csl::device::cos; + return cos(value); + } +}; + +template +struct CoshFunctor { + struct Params { + CUDA4DNN_HOST_DEVICE Params() { } + }; + + CUDA4DNN_DEVICE CoshFunctor() { } + CUDA4DNN_DEVICE CoshFunctor(const Params& params) { } + + CUDA4DNN_DEVICE T operator()(T value) { + using csl::device::cosh; + return cosh(value); + } +}; + +template +struct ErfFunctor { + struct Params { + CUDA4DNN_HOST_DEVICE Params() { } + }; + + CUDA4DNN_DEVICE ErfFunctor() { } + CUDA4DNN_DEVICE ErfFunctor(const Params& params) { } + + CUDA4DNN_DEVICE T operator()(T value) { + using csl::device::erf; + return erf(value); + } +}; + +template +struct HardSwishFunctor { + struct Params { + CUDA4DNN_HOST_DEVICE Params() { } + }; + + CUDA4DNN_DEVICE HardSwishFunctor() { } + CUDA4DNN_DEVICE HardSwishFunctor(const Params& params) { } + + CUDA4DNN_DEVICE T operator()(T value) { + using csl::device::clamp; // saturate? + return value * clamp(value / static_cast(6.f) + static_cast(0.5f), static_cast(0.f), static_cast(1.f)); + } +}; + +template +struct SinFunctor { + struct Params { + CUDA4DNN_HOST_DEVICE Params() { } + }; + + CUDA4DNN_DEVICE SinFunctor() { } + CUDA4DNN_DEVICE SinFunctor(const Params& params) { } + + CUDA4DNN_DEVICE T operator()(T value) { + using csl::device::sin; + return sin(value); + } +}; + +template +struct SinhFunctor { + struct Params { + CUDA4DNN_HOST_DEVICE Params() { } + }; + + CUDA4DNN_DEVICE SinhFunctor() { } + CUDA4DNN_DEVICE SinhFunctor(const Params& params) { } + + CUDA4DNN_DEVICE T operator()(T value) { + using csl::device::sinh; + return sinh(value); + } +}; + +template +struct SoftplusFunctor { + struct Params { + CUDA4DNN_HOST_DEVICE Params() { } + }; + + CUDA4DNN_DEVICE SoftplusFunctor() { } + CUDA4DNN_DEVICE SoftplusFunctor(const Params& params) { } + + CUDA4DNN_DEVICE T operator()(T value) { + using csl::device::log1pexp; + return log1pexp(value); + } +}; + +template +struct SoftsignFunctor { + struct Params { + CUDA4DNN_HOST_DEVICE Params() { } + }; + + CUDA4DNN_DEVICE SoftsignFunctor() { } + CUDA4DNN_DEVICE SoftsignFunctor(const Params& params) { } + + CUDA4DNN_DEVICE T operator()(T value) { + using csl::device::abs; + return value / (static_cast(1.f) + abs(value)); + } +}; + +template +struct TanFunctor { + struct Params { + CUDA4DNN_HOST_DEVICE Params() { } + }; + + CUDA4DNN_DEVICE TanFunctor() { } + CUDA4DNN_DEVICE TanFunctor(const Params& params) { } + + CUDA4DNN_DEVICE T operator()(T value) { + using csl::device::tan; + return tan(value); + } +}; + template struct PowerFunctor { struct Params { diff --git a/modules/dnn/src/cuda/math.hpp b/modules/dnn/src/cuda/math.hpp index 0da584197d..0a312a250d 100644 --- a/modules/dnn/src/cuda/math.hpp +++ b/modules/dnn/src/cuda/math.hpp @@ -140,6 +140,90 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace csl { namespace de template <> inline __device__ __half rint(__half value) { return hrint(value); } #endif + template __device__ T acos(T value); + template <> inline __device__ double acos(double value) { return ::acos(value); } + template <> inline __device__ float acos(float value) { return acosf(value); } +#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530) + template <> inline __device__ __half acos(__half value) { return acosf(value); } +#endif + + template __device__ T acosh(T value); + template <> inline __device__ double acosh(double value) { return ::acosh(value); } + template <> inline __device__ float acosh(float value) { return acoshf(value); } +#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530) + template <> inline __device__ __half acosh(__half value) { return acoshf(value); } +#endif + + template __device__ T asin(T value); + template <> inline __device__ double asin(double value) { return ::asin(value); } + template <> inline __device__ float asin(float value) { return asinf(value); } +#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530) + template <> inline __device__ __half asin(__half value) { return asinf(value); } +#endif + + template __device__ T asinh(T value); + template <> inline __device__ double asinh(double value) { return ::asinh(value); } + template <> inline __device__ float asinh(float value) { return asinhf(value); } +#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530) + template <> inline __device__ __half asinh(__half value) { return asinhf(value); } +#endif + + template __device__ T atan(T value); + template <> inline __device__ double atan(double value) { return ::atan(value); } + template <> inline __device__ float atan(float value) { return atanf(value); } +#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530) + template <> inline __device__ __half atan(__half value) { return atanf(value); } +#endif + + template __device__ T atanh(T value); + template <> inline __device__ double atanh(double value) { return ::atanh(value); } + template <> inline __device__ float atanh(float value) { return atanhf(value); } +#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530) + template <> inline __device__ __half atanh(__half value) { return atanhf(value); } +#endif + + template __device__ T cos(T value); + template <> inline __device__ double cos(double value) { return ::cos(value); } + template <> inline __device__ float cos(float value) { return cosf(value); } +#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530) + template <> inline __device__ __half cos(__half value) { return hcos(value); } +#endif + + template __device__ T cosh(T value); + template <> inline __device__ double cosh(double value) { return ::cosh(value); } + template <> inline __device__ float cosh(float value) { return coshf(value); } +#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530) + template <> inline __device__ __half cosh(__half value) { return coshf(value); } +#endif + + template __device__ T erf(T value); + template <> inline __device__ double erf(double value) { return ::erf(value); } + template <> inline __device__ float erf(float value) { return erff(value); } +#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530) + template <> inline __device__ __half erf(__half value) { return erff(value); } +#endif + + template __device__ T sin(T value); + template <> inline __device__ double sin(double value) { return ::sin(value); } + template <> inline __device__ float sin(float value) { return sinf(value); } +#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530) + template <> inline __device__ __half sin(__half value) { return hsin(value); } +#endif + + template __device__ T sinh(T value); + template <> inline __device__ double sinh(double value) { return ::sinh(value); } + template <> inline __device__ float sinh(float value) { return sinhf(value); } +#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530) + template <> inline __device__ __half sinh(__half value) { return sinhf(value); } +#endif + + template __device__ T tan(T value); + template <> inline __device__ double tan(double value) { return ::tan(value); } + template <> inline __device__ float tan(float value) { return tanf(value); } +#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530) + template <> inline __device__ __half tan(__half value) { return tanf(value); } +#endif + template __device__ T ceil(T value); template <> inline __device__ double ceil(double value) { return ::ceil(value); } template <> inline __device__ float ceil(float value) { return ceilf(value); } diff --git a/modules/dnn/src/cuda4dnn/kernels/activations.hpp b/modules/dnn/src/cuda4dnn/kernels/activations.hpp index d7c471a5ec..854bc8ac0c 100644 --- a/modules/dnn/src/cuda4dnn/kernels/activations.hpp +++ b/modules/dnn/src/cuda4dnn/kernels/activations.hpp @@ -60,6 +60,51 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels { template void not_k(const csl::Stream& stream, csl::Span output, csl::View input); + template + void acos(const csl::Stream& stream, csl::Span output, csl::View input); + + template + void acosh(const csl::Stream& stream, csl::Span output, csl::View input); + + template + void asin(const csl::Stream& stream, csl::Span output, csl::View input); + + template + void asinh(const csl::Stream& stream, csl::Span output, csl::View input); + + template + void atan(const csl::Stream& stream, csl::Span output, csl::View input); + + template + void atanh(const csl::Stream& stream, csl::Span output, csl::View input); + + template + void cos(const csl::Stream& stream, csl::Span output, csl::View input); + + template + void cosh(const csl::Stream& stream, csl::Span output, csl::View input); + + template + void erf(const csl::Stream& stream, csl::Span output, csl::View input); + + template + void hardswish(const csl::Stream& stream, csl::Span output, csl::View input); + + template + void sin(const csl::Stream& stream, csl::Span output, csl::View input); + + template + void sinh(const csl::Stream& stream, csl::Span output, csl::View input); + + template + void softplus(const csl::Stream& stream, csl::Span output, csl::View input); + + template + void softsign(const csl::Stream& stream, csl::Span output, csl::View input); + + template + void tan(const csl::Stream& stream, csl::Span output, csl::View input); + template void power(const csl::Stream& stream, csl::Span output, csl::View input, T exp, T scale, T shift); diff --git a/modules/dnn/src/cuda4dnn/primitives/activation.hpp b/modules/dnn/src/cuda4dnn/primitives/activation.hpp index 77a79703fe..4691996d4e 100644 --- a/modules/dnn/src/cuda4dnn/primitives/activation.hpp +++ b/modules/dnn/src/cuda4dnn/primitives/activation.hpp @@ -280,6 +280,216 @@ namespace cv { namespace dnn { namespace cuda4dnn { csl::Stream stream; }; + template + class AcosOp final : public BaseOp { + public: + AcosOp(csl::Stream stream_) : stream(std::move(stream_)) { } + + void calculate(csl::TensorSpan output, csl::TensorView input) const + { + kernels::acos(stream, output, input); + } + + private: + csl::Stream stream; + }; + + template + class AcoshOp final : public BaseOp { + public: + AcoshOp(csl::Stream stream_) : stream(std::move(stream_)) { } + + void calculate(csl::TensorSpan output, csl::TensorView input) const + { + kernels::acosh(stream, output, input); + } + + private: + csl::Stream stream; + }; + + template + class AsinOp final : public BaseOp { + public: + AsinOp(csl::Stream stream_) : stream(std::move(stream_)) { } + + void calculate(csl::TensorSpan output, csl::TensorView input) const + { + kernels::asin(stream, output, input); + } + + private: + csl::Stream stream; + }; + + template + class AsinhOp final : public BaseOp { + public: + AsinhOp(csl::Stream stream_) : stream(std::move(stream_)) { } + + void calculate(csl::TensorSpan output, csl::TensorView input) const + { + kernels::asinh(stream, output, input); + } + + private: + csl::Stream stream; + }; + + template + class AtanOp final : public BaseOp { + public: + AtanOp(csl::Stream stream_) : stream(std::move(stream_)) { } + + void calculate(csl::TensorSpan output, csl::TensorView input) const + { + kernels::atan(stream, output, input); + } + + private: + csl::Stream stream; + }; + + template + class AtanhOp final : public BaseOp { + public: + AtanhOp(csl::Stream stream_) : stream(std::move(stream_)) { } + + void calculate(csl::TensorSpan output, csl::TensorView input) const + { + kernels::atanh(stream, output, input); + } + + private: + csl::Stream stream; + }; + + template + class CosOp final : public BaseOp { + public: + CosOp(csl::Stream stream_) : stream(std::move(stream_)) { } + + void calculate(csl::TensorSpan output, csl::TensorView input) const + { + kernels::cos(stream, output, input); + } + + private: + csl::Stream stream; + }; + + template + class CoshOp final : public BaseOp { + public: + CoshOp(csl::Stream stream_) : stream(std::move(stream_)) { } + + void calculate(csl::TensorSpan output, csl::TensorView input) const + { + kernels::cosh(stream, output, input); + } + + private: + csl::Stream stream; + }; + + template + class ErfOp final : public BaseOp { + public: + ErfOp(csl::Stream stream_) : stream(std::move(stream_)) { } + + void calculate(csl::TensorSpan output, csl::TensorView input) const + { + kernels::erf(stream, output, input); + } + + private: + csl::Stream stream; + }; + + template + class HardSwishOp final : public BaseOp { + public: + HardSwishOp(csl::Stream stream_) : stream(std::move(stream_)) { } + + void calculate(csl::TensorSpan output, csl::TensorView input) const + { + kernels::hardswish(stream, output, input); + } + + private: + csl::Stream stream; + }; + + template + class SinOp final : public BaseOp { + public: + SinOp(csl::Stream stream_) : stream(std::move(stream_)) { } + + void calculate(csl::TensorSpan output, csl::TensorView input) const + { + kernels::sin(stream, output, input); + } + + private: + csl::Stream stream; + }; + + template + class SinhOp final : public BaseOp { + public: + SinhOp(csl::Stream stream_) : stream(std::move(stream_)) { } + + void calculate(csl::TensorSpan output, csl::TensorView input) const + { + kernels::sinh(stream, output, input); + } + + private: + csl::Stream stream; + }; + + template + class SoftplusOp final : public BaseOp { + public: + SoftplusOp(csl::Stream stream_) : stream(std::move(stream_)) { } + + void calculate(csl::TensorSpan output, csl::TensorView input) const + { + kernels::softplus(stream, output, input); + } + + private: + csl::Stream stream; + }; + + template + class SoftsignOp final : public BaseOp { + public: + SoftsignOp(csl::Stream stream_) : stream(std::move(stream_)) { } + + void calculate(csl::TensorSpan output, csl::TensorView input) const + { + kernels::softsign(stream, output, input); + } + + private: + csl::Stream stream; + }; + + template + class TanOp final : public BaseOp { + public: + TanOp(csl::Stream stream_) : stream(std::move(stream_)) { } + + void calculate(csl::TensorSpan output, csl::TensorView input) const + { + kernels::tan(stream, output, input); + } + + private: + csl::Stream stream; + }; + template class PowerOp final : public BaseOp { public: diff --git a/modules/dnn/src/init.cpp b/modules/dnn/src/init.cpp index 443d1eaef4..89a91e17ae 100644 --- a/modules/dnn/src/init.cpp +++ b/modules/dnn/src/init.cpp @@ -117,6 +117,21 @@ void initializeLayerFactory() CV_DNN_REGISTER_LAYER_CLASS(Round, RoundLayer); CV_DNN_REGISTER_LAYER_CLASS(Sqrt, SqrtLayer); CV_DNN_REGISTER_LAYER_CLASS(Not, NotLayer); + CV_DNN_REGISTER_LAYER_CLASS(Acos, AcosLayer); + CV_DNN_REGISTER_LAYER_CLASS(Acosh, AcoshLayer); + CV_DNN_REGISTER_LAYER_CLASS(Asin, AsinLayer); + CV_DNN_REGISTER_LAYER_CLASS(Asinh, AsinhLayer); + CV_DNN_REGISTER_LAYER_CLASS(Atan, AtanLayer); + CV_DNN_REGISTER_LAYER_CLASS(Atanh, AtanhLayer); + CV_DNN_REGISTER_LAYER_CLASS(Cos, CosLayer); + CV_DNN_REGISTER_LAYER_CLASS(Cosh, CoshLayer); + CV_DNN_REGISTER_LAYER_CLASS(Erf, ErfLayer); + CV_DNN_REGISTER_LAYER_CLASS(HardSwish, HardSwishLayer); + CV_DNN_REGISTER_LAYER_CLASS(Sin, SinLayer); + CV_DNN_REGISTER_LAYER_CLASS(Sinh, SinhLayer); + CV_DNN_REGISTER_LAYER_CLASS(Softplus, SoftplusLayer); + CV_DNN_REGISTER_LAYER_CLASS(Softsign, SoftsignLayer); + CV_DNN_REGISTER_LAYER_CLASS(Tan, TanLayer); CV_DNN_REGISTER_LAYER_CLASS(BatchNorm, BatchNormLayer); CV_DNN_REGISTER_LAYER_CLASS(MaxUnpool, MaxUnpoolLayer); CV_DNN_REGISTER_LAYER_CLASS(Dropout, BlankLayer); diff --git a/modules/dnn/src/layers/elementwise_layers.cpp b/modules/dnn/src/layers/elementwise_layers.cpp index 7cec0d5f7b..772dfca602 100644 --- a/modules/dnn/src/layers/elementwise_layers.cpp +++ b/modules/dnn/src/layers/elementwise_layers.cpp @@ -76,8 +76,21 @@ using std::pow; using std::ceil; using std::floor; using std::log; +using std::log1p; using std::sqrt; using std::round; +using std::acos; +using std::acosh; +using std::asin; +using std::asinh; +using std::atan; +using std::atanh; +using std::cos; +using std::cosh; +using std::erf; +using std::sin; +using std::sinh; +using std::tan; template class ElementWiseLayer : public Func::Layer @@ -746,6 +759,20 @@ struct BaseDefaultFunctor : public BaseFunctor } #endif // HAVE_VULKAN +#ifdef HAVE_CUDA + Ptr initCUDA(int target, csl::Stream stream) + { + CV_Error(Error::StsNotImplemented, ""); + } +#endif + +#ifdef HAVE_HALIDE + void attachHalide(const Halide::Expr& input, Halide::Func& top) + { + CV_Error(Error::StsNotImplemented, ""); + } +#endif // HAVE_HALIDE + private: static const char* const ocl_kernel_name; }; @@ -1390,6 +1417,411 @@ struct NotFunctor : public BaseDefaultFunctor template<> const char* const BaseDefaultFunctor::ocl_kernel_name = "NotForward"; +struct AcosFunctor : public BaseDefaultFunctor +{ + typedef AcosLayer Layer; + + bool supportBackend(int backendId, int) + { + return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA; + } + + inline float calculate(float x) const + { + return acos(x); + } + +#ifdef HAVE_CUDA + Ptr initCUDA(int target, csl::Stream stream) + { + return make_cuda_node(target, stream); + } +#endif + + int64 getFLOPSPerElement() const { return 1; } +}; + +template<> +const char* const BaseDefaultFunctor::ocl_kernel_name = "AcosForward"; + +struct AcoshFunctor : public BaseDefaultFunctor +{ + typedef AcoshLayer Layer; + + bool supportBackend(int backendId, int) + { + return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA; + } + + inline float calculate(float x) const + { + return acosh(x); + } + +#ifdef HAVE_CUDA + Ptr initCUDA(int target, csl::Stream stream) + { + return make_cuda_node(target, stream); + } +#endif + + int64 getFLOPSPerElement() const { return 1; } +}; + +template<> +const char* const BaseDefaultFunctor::ocl_kernel_name = "AcoshForward"; + +struct AsinFunctor : public BaseDefaultFunctor +{ + typedef AsinLayer Layer; + + bool supportBackend(int backendId, int) + { + return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA; + } + + inline float calculate(float x) const + { + return asin(x); + } + +#ifdef HAVE_CUDA + Ptr initCUDA(int target, csl::Stream stream) + { + return make_cuda_node(target, stream); + } +#endif + + int64 getFLOPSPerElement() const { return 1; } +}; + +template<> +const char* const BaseDefaultFunctor::ocl_kernel_name = "AsinForward"; + +struct AsinhFunctor : public BaseDefaultFunctor +{ + typedef AsinhLayer Layer; + + bool supportBackend(int backendId, int) + { + return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA; + } + + inline float calculate(float x) const + { + return asinh(x); + } + +#ifdef HAVE_CUDA + Ptr initCUDA(int target, csl::Stream stream) + { + return make_cuda_node(target, stream); + } +#endif + + int64 getFLOPSPerElement() const { return 1; } +}; + +template<> +const char* const BaseDefaultFunctor::ocl_kernel_name = "AsinhForward"; + +struct AtanFunctor : public BaseDefaultFunctor +{ + typedef AtanLayer Layer; + + bool supportBackend(int backendId, int) + { + return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA; + } + + inline float calculate(float x) const + { + return atan(x); + } + +#ifdef HAVE_CUDA + Ptr initCUDA(int target, csl::Stream stream) + { + return make_cuda_node(target, stream); + } +#endif + + int64 getFLOPSPerElement() const { return 1; } +}; + +template<> +const char* const BaseDefaultFunctor::ocl_kernel_name = "AtanForward"; + +struct AtanhFunctor : public BaseDefaultFunctor +{ + typedef AtanhLayer Layer; + + bool supportBackend(int backendId, int) + { + return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA; + } + + inline float calculate(float x) const + { + return atanh(x); + } + +#ifdef HAVE_CUDA + Ptr initCUDA(int target, csl::Stream stream) + { + return make_cuda_node(target, stream); + } +#endif + + int64 getFLOPSPerElement() const { return 1; } +}; + +template<> +const char* const BaseDefaultFunctor::ocl_kernel_name = "AtanhForward"; + +struct CosFunctor : public BaseDefaultFunctor +{ + typedef CosLayer Layer; + + bool supportBackend(int backendId, int) + { + return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA; + } + + inline float calculate(float x) const + { + return cos(x); + } + +#ifdef HAVE_CUDA + Ptr initCUDA(int target, csl::Stream stream) + { + return make_cuda_node(target, stream); + } +#endif + + int64 getFLOPSPerElement() const { return 1; } +}; + +template<> +const char* const BaseDefaultFunctor::ocl_kernel_name = "CosForward"; + +struct CoshFunctor : public BaseDefaultFunctor +{ + typedef CoshLayer Layer; + + bool supportBackend(int backendId, int) + { + return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA; + } + + inline float calculate(float x) const + { + return cosh(x); + } + +#ifdef HAVE_CUDA + Ptr initCUDA(int target, csl::Stream stream) + { + return make_cuda_node(target, stream); + } +#endif + + int64 getFLOPSPerElement() const { return 1; } +}; + +template<> +const char* const BaseDefaultFunctor::ocl_kernel_name = "CoshForward"; + +struct ErfFunctor : public BaseDefaultFunctor +{ + typedef ErfLayer Layer; + + bool supportBackend(int backendId, int) + { + return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA; + } + + inline float calculate(float x) const + { + return erf(x); + } + +#ifdef HAVE_CUDA + Ptr initCUDA(int target, csl::Stream stream) + { + return make_cuda_node(target, stream); + } +#endif + + int64 getFLOPSPerElement() const { return 1; } +}; + +template<> +const char* const BaseDefaultFunctor::ocl_kernel_name = "ErfForward"; + +struct HardSwishFunctor : public BaseDefaultFunctor +{ + typedef HardSwishLayer Layer; + + bool supportBackend(int backendId, int) + { + return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA; + } + + inline float calculate(float x) const + { + return x * max(0.f, min(1.f, x / 6.f + 0.5f)); + } + +#ifdef HAVE_CUDA + Ptr initCUDA(int target, csl::Stream stream) + { + return make_cuda_node(target, stream); + } +#endif + + int64 getFLOPSPerElement() const { return 1; } +}; + +template<> +const char* const BaseDefaultFunctor::ocl_kernel_name = "HardSwishForward"; + +struct SinFunctor : public BaseDefaultFunctor +{ + typedef SinLayer Layer; + + bool supportBackend(int backendId, int) + { + return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA; + } + + inline float calculate(float x) const + { + return sin(x); + } + +#ifdef HAVE_CUDA + Ptr initCUDA(int target, csl::Stream stream) + { + return make_cuda_node(target, stream); + } +#endif + + int64 getFLOPSPerElement() const { return 1; } +}; + +template<> +const char* const BaseDefaultFunctor::ocl_kernel_name = "SinForward"; + +struct SinhFunctor : public BaseDefaultFunctor +{ + typedef SinhLayer Layer; + + bool supportBackend(int backendId, int) + { + return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA; + } + + inline float calculate(float x) const + { + return sinh(x); + } + +#ifdef HAVE_CUDA + Ptr initCUDA(int target, csl::Stream stream) + { + return make_cuda_node(target, stream); + } +#endif + + int64 getFLOPSPerElement() const { return 1; } +}; + +template<> +const char* const BaseDefaultFunctor::ocl_kernel_name = "SinhForward"; + +struct SoftplusFunctor : public BaseDefaultFunctor +{ + typedef SoftplusLayer Layer; + + bool supportBackend(int backendId, int) + { + return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA; + } + + inline float calculate(float x) const + { + return log1p(exp(x)); + } + +#ifdef HAVE_CUDA + Ptr initCUDA(int target, csl::Stream stream) + { + return make_cuda_node(target, stream); + } +#endif + + int64 getFLOPSPerElement() const { return 1; } +}; + +template<> +const char* const BaseDefaultFunctor::ocl_kernel_name = "SoftplusForward"; + +struct SoftsignFunctor : public BaseDefaultFunctor +{ + typedef SoftsignLayer Layer; + + bool supportBackend(int backendId, int) + { + return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA; + } + + inline float calculate(float x) const + { + return x / (1.f + abs(x)); + } + +#ifdef HAVE_CUDA + Ptr initCUDA(int target, csl::Stream stream) + { + return make_cuda_node(target, stream); + } +#endif + + int64 getFLOPSPerElement() const { return 1; } +}; + +template<> +const char* const BaseDefaultFunctor::ocl_kernel_name = "SoftsignForward"; + +struct TanFunctor : public BaseDefaultFunctor +{ + typedef TanLayer Layer; + + bool supportBackend(int backendId, int) + { + return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA; + } + + inline float calculate(float x) const + { + return tan(x); + } + +#ifdef HAVE_CUDA + Ptr initCUDA(int target, csl::Stream stream) + { + return make_cuda_node(target, stream); + } +#endif + + int64 getFLOPSPerElement() const { return 1; } +}; + +template<> +const char* const BaseDefaultFunctor::ocl_kernel_name = "TanForward"; + struct PowerFunctor : public BaseFunctor { typedef PowerLayer Layer; @@ -1937,6 +2369,126 @@ Ptr NotLayer::create(const LayerParams& params) return l; } +Ptr AcosLayer::create(const LayerParams& params) +{ + Ptr l(new ElementWiseLayer()); + l->setParamsFrom(params); + + return l; +} + +Ptr AcoshLayer::create(const LayerParams& params) +{ + Ptr l(new ElementWiseLayer()); + l->setParamsFrom(params); + + return l; +} + +Ptr AsinLayer::create(const LayerParams& params) +{ + Ptr l(new ElementWiseLayer()); + l->setParamsFrom(params); + + return l; +} + +Ptr AsinhLayer::create(const LayerParams& params) +{ + Ptr l(new ElementWiseLayer()); + l->setParamsFrom(params); + + return l; +} + +Ptr AtanLayer::create(const LayerParams& params) +{ + Ptr l(new ElementWiseLayer()); + l->setParamsFrom(params); + + return l; +} + +Ptr AtanhLayer::create(const LayerParams& params) +{ + Ptr l(new ElementWiseLayer()); + l->setParamsFrom(params); + + return l; +} + +Ptr CosLayer::create(const LayerParams& params) +{ + Ptr l(new ElementWiseLayer()); + l->setParamsFrom(params); + + return l; +} + +Ptr CoshLayer::create(const LayerParams& params) +{ + Ptr l(new ElementWiseLayer()); + l->setParamsFrom(params); + + return l; +} + +Ptr ErfLayer::create(const LayerParams& params) +{ + Ptr l(new ElementWiseLayer()); + l->setParamsFrom(params); + + return l; +} + +Ptr HardSwishLayer::create(const LayerParams& params) +{ + Ptr l(new ElementWiseLayer()); + l->setParamsFrom(params); + + return l; +} + +Ptr SinLayer::create(const LayerParams& params) +{ + Ptr l(new ElementWiseLayer()); + l->setParamsFrom(params); + + return l; +} + +Ptr SinhLayer::create(const LayerParams& params) +{ + Ptr l(new ElementWiseLayer()); + l->setParamsFrom(params); + + return l; +} + +Ptr SoftplusLayer::create(const LayerParams& params) +{ + Ptr l(new ElementWiseLayer()); + l->setParamsFrom(params); + + return l; +} + +Ptr SoftsignLayer::create(const LayerParams& params) +{ + Ptr l(new ElementWiseLayer()); + l->setParamsFrom(params); + + return l; +} + +Ptr TanLayer::create(const LayerParams& params) +{ + Ptr l(new ElementWiseLayer()); + l->setParamsFrom(params); + + return l; +} + Ptr PowerLayer::create(const LayerParams& params) { float power = params.get("power", 1.0f); diff --git a/modules/dnn/src/onnx/onnx_graph_simplifier.cpp b/modules/dnn/src/onnx/onnx_graph_simplifier.cpp index 15ce9624c4..81a5df1a28 100644 --- a/modules/dnn/src/onnx/onnx_graph_simplifier.cpp +++ b/modules/dnn/src/onnx/onnx_graph_simplifier.cpp @@ -206,6 +206,42 @@ public: } }; +class HardSwishSubgraph : public Subgraph +{ +public: + HardSwishSubgraph() + { + int input = addNodeToMatch(""); + int hardSigmoid = addNodeToMatch("HardSigmoid", input); + addNodeToMatch("Mul", input, hardSigmoid); + setFusedNode("HardSwish", input); + } + + virtual bool match(const Ptr& net, int nodeId, + std::vector& matchedNodesIds, + std::vector& targetNodesIds) CV_OVERRIDE + { + if (Subgraph::match(net, nodeId, matchedNodesIds, targetNodesIds)) + { + Ptr hardSigmoid = net->getNode(matchedNodesIds[0]); + opencv_onnx::NodeProto* node = hardSigmoid.dynamicCast()->node; + + uint8_t matched = 0; + for (int i = 0; i < node->attribute_size(); i++) + { + opencv_onnx::AttributeProto attr = node->attribute(i); + if ((attr.name() == "alpha" && attr.f() == 1.f / 6.f) || + (attr.name() == "beta" && attr.f() == 0.5f)) + { + ++matched; + } + } + return matched == 2; + } + return false; + } +}; + class NormalizeSubgraphBase : public Subgraph { public: @@ -625,6 +661,7 @@ void simplifySubgraphs(opencv_onnx::GraphProto& net) subgraphs.push_back(makePtr()); subgraphs.push_back(makePtr()); subgraphs.push_back(makePtr()); + subgraphs.push_back(makePtr()); subgraphs.push_back(makePtr()); subgraphs.push_back(makePtr()); subgraphs.push_back(makePtr()); diff --git a/modules/dnn/src/opencl/activations.cl b/modules/dnn/src/opencl/activations.cl index e110160c06..02ed9345c3 100644 --- a/modules/dnn/src/opencl/activations.cl +++ b/modules/dnn/src/opencl/activations.cl @@ -188,3 +188,87 @@ __kernel void NotForward(const int n, __global T* in, __global T* out) { if(index < n) out[index] = floor(1.0f - in[index]); } + +__kernel void AcosForward(const int n, __global T* in, __global T* out) { + int index = get_global_id(0); + if(index < n) + out[index] = acos(in[index]); +} + +__kernel void AcoshForward(const int n, __global T* in, __global T* out) { + int index = get_global_id(0); + if(index < n) + out[index] = acosh(in[index]); +} + +__kernel void AsinForward(const int n, __global T* in, __global T* out) { + int index = get_global_id(0); + if(index < n) + out[index] = asin(in[index]); +} + +__kernel void AsinhForward(const int n, __global T* in, __global T* out) { + int index = get_global_id(0); + if(index < n) + out[index] = asinh(in[index]); +} + +__kernel void AtanForward(const int n, __global T* in, __global T* out) { + int index = get_global_id(0); + if(index < n) + out[index] = atan(in[index]); +} + +__kernel void AtanhForward(const int n, __global T* in, __global T* out) { + int index = get_global_id(0); + if(index < n) + out[index] = atanh(in[index]); +} + +__kernel void CosForward(const int n, __global T* in, __global T* out) { + int index = get_global_id(0); + if(index < n) + out[index] = cos(in[index]); +} + +__kernel void CoshForward(const int n, __global T* in, __global T* out) { + int index = get_global_id(0); + if(index < n) + out[index] = cosh(in[index]); +} + +__kernel void HardSwishForward(const int n, __global T* in, __global T* out) { + int index = get_global_id(0); + if(index < n) + out[index] = in[index] * max(0.f, min(1.f, in[index] / 6.f + 0.5f)); +} + +__kernel void SinForward(const int n, __global T* in, __global T* out) { + int index = get_global_id(0); + if(index < n) + out[index] = sin(in[index]); +} + +__kernel void SinhForward(const int n, __global T* in, __global T* out) { + int index = get_global_id(0); + if(index < n) + out[index] = sinh(in[index]); +} + +__kernel void SoftplusForward(const int n, __global T* in, __global T* out) { + int index = get_global_id(0); + if(index < n) + out[index] = log1p(exp(in[index])); +} + +__kernel void SoftsignForward(const int n, __global T* in, __global T* out) { + int index = get_global_id(0); + if(index < n) + out[index] = in[index] / (1.f + fabs(in[index])); +} + +__kernel void TanForward(const int n, __global T* in, __global T* out) { + int index = get_global_id(0); + if(index < n) + out[index] = tan(in[index]); +} diff --git a/modules/dnn/test/test_onnx_conformance_layer_filter__cuda_denylist.inl.hpp b/modules/dnn/test/test_onnx_conformance_layer_filter__cuda_denylist.inl.hpp index 61a5843b35..0f5f387132 100644 --- a/modules/dnn/test/test_onnx_conformance_layer_filter__cuda_denylist.inl.hpp +++ b/modules/dnn/test/test_onnx_conformance_layer_filter__cuda_denylist.inl.hpp @@ -71,4 +71,5 @@ "test_softmax_large_number_expanded", // FP16 only "test_sub_bcast", "test_sub_uint8", +"test_tan", // FP16 only "test_upsample_nearest", diff --git a/modules/dnn/test/test_onnx_conformance_layer_filter_opencv_ocl_fp16_denylist.inl.hpp b/modules/dnn/test/test_onnx_conformance_layer_filter_opencv_ocl_fp16_denylist.inl.hpp index 573d847985..ccd1568845 100644 --- a/modules/dnn/test/test_onnx_conformance_layer_filter_opencv_ocl_fp16_denylist.inl.hpp +++ b/modules/dnn/test/test_onnx_conformance_layer_filter_opencv_ocl_fp16_denylist.inl.hpp @@ -19,3 +19,4 @@ "test_split_equal_parts_1d", "test_split_equal_parts_2d", "test_split_equal_parts_default_axis", +"test_tan", diff --git a/modules/dnn/test/test_onnx_conformance_layer_parser_denylist.inl.hpp b/modules/dnn/test/test_onnx_conformance_layer_parser_denylist.inl.hpp index b95cafd5d7..a69ace0d14 100644 --- a/modules/dnn/test/test_onnx_conformance_layer_parser_denylist.inl.hpp +++ b/modules/dnn/test/test_onnx_conformance_layer_parser_denylist.inl.hpp @@ -1,9 +1,5 @@ // The file is autogenerated // Update note: execute /testdata/dnn/onnx/generate_conformance_list.py -"test_acos", -"test_acos_example", -"test_acosh", -"test_acosh_example", "test_adagrad", "test_adagrad_multiple", "test_adam", @@ -16,14 +12,6 @@ "test_and_bcast4v2d", "test_and_bcast4v3d", "test_and_bcast4v4d", -"test_asin", -"test_asin_example", -"test_asinh", -"test_asinh_example", -"test_atan", -"test_atan_example", -"test_atanh", -"test_atanh_example", "test_basic_convinteger", "test_batchnorm_epsilon", "test_batchnorm_epsilon_training_mode", @@ -102,10 +90,6 @@ "test_convtranspose_pad", "test_convtranspose_pads", "test_convtranspose_with_kernel", -"test_cos", -"test_cos_example", -"test_cosh", -"test_cosh_example", "test_cumsum_1d", "test_cumsum_1d_exclusive", "test_cumsum_1d_reverse", @@ -138,7 +122,6 @@ "test_einsum_transpose", "test_equal", "test_equal_bcast", -"test_erf", "test_expand_dim_changed", "test_expand_dim_unchanged", "test_eyelike_populate_off_main_diagonal", @@ -193,8 +176,6 @@ "test_hardsigmoid", "test_hardsigmoid_default", "test_hardsigmoid_example", -"test_hardswish", -"test_hardswish_expanded", "test_identity_opt", "test_identity_sequence", "test_if", @@ -564,10 +545,6 @@ "test_simple_rnn_batchwise", "test_simple_rnn_defaults", "test_simple_rnn_with_initial_bias", -"test_sin", -"test_sin_example", -"test_sinh", -"test_sinh_example", "test_size", "test_size_example", "test_slice", @@ -578,10 +555,6 @@ "test_slice_neg_steps", "test_slice_negative_axes", "test_slice_start_out_of_bounds", -"test_softplus", -"test_softplus_example", -"test_softsign", -"test_softsign_example", "test_spacetodepth", "test_spacetodepth_example", "test_split_variable_parts_1d", @@ -599,8 +572,6 @@ "test_sub_example", "test_sum_example", "test_sum_two_inputs", -"test_tan", -"test_tan_example", "test_tfidfvectorizer_tf_batch_onlybigrams_skip0", "test_tfidfvectorizer_tf_batch_onlybigrams_skip5", "test_tfidfvectorizer_tf_batch_uniandbigrams_skip5",