Merge pull request #21530 from anna-khakimova:ak/simd_divrc

* GAPI Fluid: SIMD for DivRC kernel.

* Fluid: Div kernel's SIMD refactoring

* SIMD for DivRC 3 channel case

* Applied comments
pull/21674/head
Anna Khakimova 3 years ago committed by GitHub
parent ebb6915e58
commit 9c7adb7248
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 4
      modules/gapi/perf/common/gapi_core_perf_tests_inl.hpp
  2. 4
      modules/gapi/perf/cpu/gapi_core_perf_tests_fluid.cpp
  3. 77
      modules/gapi/src/backends/fluid/gfluidcore.cpp
  4. 27
      modules/gapi/src/backends/fluid/gfluidcore_func.dispatch.cpp
  5. 23
      modules/gapi/src/backends/fluid/gfluidcore_func.hpp
  6. 803
      modules/gapi/src/backends/fluid/gfluidcore_func.simd.hpp

@ -528,6 +528,10 @@ PERF_TEST_P_(DivRCPerfTest, TestPerformance)
// FIXIT Unstable input data for divide // FIXIT Unstable input data for divide
initMatsRandU(type, sz, dtype, false); initMatsRandU(type, sz, dtype, false);
//This condition need as workaround the bug in the OpenCV.
//It reinitializes divider matrix without zero values for CV_16S DST type.
if (dtype == CV_16S || (type == CV_16S && dtype == -1))
cv::randu(in_mat1, cv::Scalar::all(1), cv::Scalar::all(255));
// OpenCV code /////////////////////////////////////////////////////////// // OpenCV code ///////////////////////////////////////////////////////////
cv::divide(sc, in_mat1, out_mat_ocv, scale, dtype); cv::divide(sc, in_mat1, out_mat_ocv, scale, dtype);

@ -101,8 +101,8 @@ INSTANTIATE_TEST_CASE_P(DivCPerfTestFluid, DivCPerfTest,
INSTANTIATE_TEST_CASE_P(DivRCPerfTestFluid, DivRCPerfTest, INSTANTIATE_TEST_CASE_P(DivRCPerfTestFluid, DivRCPerfTest,
Combine(Values(Tolerance_FloatRel_IntAbs(1e-5, 1).to_compare_f()), Combine(Values(Tolerance_FloatRel_IntAbs(1e-5, 1).to_compare_f()),
Values(szSmall128, szVGA, sz720p, sz1080p), Values(szSmall128, szVGA, sz720p, sz1080p),
Values(CV_8UC1, CV_8UC3, CV_32FC1), Values(CV_8UC1, CV_8UC3, CV_16UC1, CV_16SC1, CV_32FC1),
Values(-1, CV_8U, CV_32F), Values(-1, CV_8U, CV_16U, CV_16S, CV_32F),
Values(1.0), Values(1.0),
Values(cv::compile_args(CORE_FLUID)))); Values(cv::compile_args(CORE_FLUID))));

@ -936,8 +936,8 @@ CV_ALWAYS_INLINE void run_arithm_s(Buffer &dst, const View &src, const float sca
} }
template<typename DST, typename SRC> template<typename DST, typename SRC>
static void run_arithm_rs(Buffer &dst, const View &src, const float scalar[4], Arithm arithm, CV_ALWAYS_INLINE void run_arithm_rs(Buffer &dst, const View &src, const float scalar[],
float scale=1) Arithm arithm, float scale=1)
{ {
const auto *in = src.InLine<SRC>(0); const auto *in = src.InLine<SRC>(0);
auto *out = dst.OutLine<DST>(); auto *out = dst.OutLine<DST>();
@ -955,15 +955,23 @@ static void run_arithm_rs(Buffer &dst, const View &src, const float scalar[4], A
w = subrc_simd(scalar, in, out, length, chan); w = subrc_simd(scalar, in, out, length, chan);
#endif #endif
for (; w < length; ++w) for (; w < length; ++w)
{
out[w] = subr<DST>(in[w], scalar[w % chan]); out[w] = subr<DST>(in[w], scalar[w % chan]);
}
break; break;
} }
// TODO: optimize division
case ARITHM_DIVIDE: case ARITHM_DIVIDE:
for (int w=0; w < width; w++) {
for (int c=0; c < chan; c++) int w = 0;
out[chan*w + c] = div<DST>(scalar[c], in[chan*w + c], scale); #if CV_SIMD
w = divrc_simd(scalar, in, out, length, chan, scale);
#endif
for (; w < length; ++w)
{
out[w] = div<DST>(scalar[w % chan], in[w], scale);
}
break; break;
}
default: CV_Error(cv::Error::StsBadArg, "unsupported arithmetic operation"); default: CV_Error(cv::Error::StsBadArg, "unsupported arithmetic operation");
} }
} }
@ -1319,7 +1327,9 @@ CV_ALWAYS_INLINE void run_divc(Buffer& dst, const View& src, Buffer& scratch,
#endif #endif
for (; w < length; ++w) for (; w < length; ++w)
{
out[w] = div<DST>(in[w], scalar[w % chan], scale); out[w] = div<DST>(in[w], scalar[w % chan], scale);
}
} }
GAPI_FLUID_KERNEL(GFluidDivC, cv::gapi::core::GDivC, true) GAPI_FLUID_KERNEL(GFluidDivC, cv::gapi::core::GDivC, true)
@ -1402,32 +1412,55 @@ GAPI_FLUID_KERNEL(GFluidDivC, cv::gapi::core::GDivC, true)
} }
}; };
GAPI_FLUID_KERNEL(GFluidDivRC, cv::gapi::core::GDivRC, false) GAPI_FLUID_KERNEL(GFluidDivRC, cv::gapi::core::GDivRC, true)
{ {
static const int Window = 1; static const int Window = 1;
static void run(const cv::Scalar &_scalar, const View &src, double _scale, int /*dtype*/, static void run(const cv::Scalar& _scalar, const View& src, double _scale, int /*dtype*/,
Buffer &dst) Buffer& dst, Buffer& scratch)
{ {
const float scalar[4] = { GAPI_Assert(src.meta().chan <= 4);
static_cast<float>(_scalar[0]),
static_cast<float>(_scalar[1]), if (dst.y() == 0)
static_cast<float>(_scalar[2]), {
static_cast<float>(_scalar[3]) const int chan = src.meta().chan;
}; float* _scratch = scratch.OutLine<float>();
scalar_to_scratch(_scalar, _scratch, scratch.length(), chan);
}
const float* scalar = scratch.OutLine<float>();
const float scale = static_cast<float>(_scale); const float scale = static_cast<float>(_scale);
// DST SRC OP __VA_ARGS__ // DST SRC OP __VA_ARGS__
UNARY_(uchar , uchar , run_arithm_rs, dst, src, scalar, ARITHM_DIVIDE, scale); UNARY_(uchar, uchar, run_arithm_rs, dst, src, scalar, ARITHM_DIVIDE, scale);
UNARY_(uchar , short, run_arithm_rs, dst, src, scalar, ARITHM_DIVIDE, scale); UNARY_(uchar, ushort, run_arithm_rs, dst, src, scalar, ARITHM_DIVIDE, scale);
UNARY_(uchar , float, run_arithm_rs, dst, src, scalar, ARITHM_DIVIDE, scale); UNARY_(uchar, short, run_arithm_rs, dst, src, scalar, ARITHM_DIVIDE, scale);
UNARY_( short, short, run_arithm_rs, dst, src, scalar, ARITHM_DIVIDE, scale); UNARY_(uchar, float, run_arithm_rs, dst, src, scalar, ARITHM_DIVIDE, scale);
UNARY_( float, uchar , run_arithm_rs, dst, src, scalar, ARITHM_DIVIDE, scale); UNARY_(ushort, ushort, run_arithm_rs, dst, src, scalar, ARITHM_DIVIDE, scale);
UNARY_( float, short, run_arithm_rs, dst, src, scalar, ARITHM_DIVIDE, scale); UNARY_(ushort, uchar, run_arithm_rs, dst, src, scalar, ARITHM_DIVIDE, scale);
UNARY_( float, float, run_arithm_rs, dst, src, scalar, ARITHM_DIVIDE, scale); UNARY_(ushort, short, run_arithm_rs, dst, src, scalar, ARITHM_DIVIDE, scale);
UNARY_(ushort, float, run_arithm_rs, dst, src, scalar, ARITHM_DIVIDE, scale);
UNARY_(short, short, run_arithm_rs, dst, src, scalar, ARITHM_DIVIDE, scale);
UNARY_(short, uchar, run_arithm_rs, dst, src, scalar, ARITHM_DIVIDE, scale);
UNARY_(short, ushort, run_arithm_rs, dst, src, scalar, ARITHM_DIVIDE, scale);
UNARY_(short, float, run_arithm_rs, dst, src, scalar, ARITHM_DIVIDE, scale);
UNARY_(float, uchar, run_arithm_rs, dst, src, scalar, ARITHM_DIVIDE, scale);
UNARY_(float, ushort, run_arithm_rs, dst, src, scalar, ARITHM_DIVIDE, scale);
UNARY_(float, short, run_arithm_rs, dst, src, scalar, ARITHM_DIVIDE, scale);
UNARY_(float, float, run_arithm_rs, dst, src, scalar, ARITHM_DIVIDE, scale);
CV_Error(cv::Error::StsBadArg, "unsupported combination of types"); CV_Error(cv::Error::StsBadArg, "unsupported combination of types");
} }
static void initScratch(const GScalarDesc&, const GMatDesc&, double, int, Buffer& scratch)
{
initScratchBuffer(scratch);
}
static void resetScratch(Buffer& /*scratch*/)
{
}
}; };
//------------------- //-------------------

@ -235,6 +235,33 @@ ABSDIFFC_SIMD(float)
#undef ABSDIFFC_SIMD #undef ABSDIFFC_SIMD
#define DIVRC_SIMD(SRC, DST) \
int divrc_simd(const float scalar[], const SRC in[], DST out[], \
const int length, const int chan, const float scale) \
{ \
CV_CPU_DISPATCH(divrc_simd, (scalar, in, out, length, chan, scale), \
CV_CPU_DISPATCH_MODES_ALL); \
}
DIVRC_SIMD(uchar, uchar)
DIVRC_SIMD(ushort, uchar)
DIVRC_SIMD(short, uchar)
DIVRC_SIMD(float, uchar)
DIVRC_SIMD(short, short)
DIVRC_SIMD(ushort, short)
DIVRC_SIMD(uchar, short)
DIVRC_SIMD(float, short)
DIVRC_SIMD(ushort, ushort)
DIVRC_SIMD(uchar, ushort)
DIVRC_SIMD(short, ushort)
DIVRC_SIMD(float, ushort)
DIVRC_SIMD(uchar, float)
DIVRC_SIMD(ushort, float)
DIVRC_SIMD(short, float)
DIVRC_SIMD(float, float)
#undef DIVRC_SIMD
int split3_simd(const uchar in[], uchar out1[], uchar out2[], int split3_simd(const uchar in[], uchar out1[], uchar out2[],
uchar out3[], const int width) uchar out3[], const int width)
{ {

@ -187,6 +187,29 @@ ABSDIFFC_SIMD(float)
#undef ABSDIFFC_SIMD #undef ABSDIFFC_SIMD
#define DIVRC_SIMD(SRC, DST) \
int divrc_simd(const float scalar[], const SRC in[], DST out[], \
const int length, const int chan, const float scale);
DIVRC_SIMD(uchar, uchar)
DIVRC_SIMD(ushort, uchar)
DIVRC_SIMD(short, uchar)
DIVRC_SIMD(float, uchar)
DIVRC_SIMD(short, short)
DIVRC_SIMD(ushort, short)
DIVRC_SIMD(uchar, short)
DIVRC_SIMD(float, short)
DIVRC_SIMD(ushort, ushort)
DIVRC_SIMD(uchar, ushort)
DIVRC_SIMD(short, ushort)
DIVRC_SIMD(float, ushort)
DIVRC_SIMD(uchar, float)
DIVRC_SIMD(ushort, float)
DIVRC_SIMD(short, float)
DIVRC_SIMD(float, float)
#undef DIVRC_SIMD
int split3_simd(const uchar in[], uchar out1[], uchar out2[], int split3_simd(const uchar in[], uchar out1[], uchar out2[],
uchar out3[], const int width); uchar out3[], const int width);

@ -208,6 +208,29 @@ ABSDIFFC_SIMD(float)
#undef ABSDIFFC_SIMD #undef ABSDIFFC_SIMD
#define DIVRC_SIMD(SRC, DST) \
int divrc_simd(const float scalar[], const SRC in[], DST out[], \
const int length, const int chan, const float scale);
DIVRC_SIMD(uchar, uchar)
DIVRC_SIMD(ushort, uchar)
DIVRC_SIMD(short, uchar)
DIVRC_SIMD(float, uchar)
DIVRC_SIMD(short, short)
DIVRC_SIMD(ushort, short)
DIVRC_SIMD(uchar, short)
DIVRC_SIMD(float, short)
DIVRC_SIMD(ushort, ushort)
DIVRC_SIMD(uchar, ushort)
DIVRC_SIMD(short, ushort)
DIVRC_SIMD(float, ushort)
DIVRC_SIMD(uchar, float)
DIVRC_SIMD(ushort, float)
DIVRC_SIMD(short, float)
DIVRC_SIMD(float, float)
#undef DIVRC_SIMD
int split3_simd(const uchar in[], uchar out1[], uchar out2[], int split3_simd(const uchar in[], uchar out1[], uchar out2[],
uchar out3[], const int width); uchar out3[], const int width);
@ -236,6 +259,28 @@ template<> struct vector_type_of<ushort> { using type = v_uint16; };
template<> struct vector_type_of<short> { using type = v_int16; }; template<> struct vector_type_of<short> { using type = v_int16; };
template<> struct vector_type_of<float> { using type = v_float32; }; template<> struct vector_type_of<float> { using type = v_float32; };
template<typename scalar_t>
struct zero_vec_type_of;
template<typename scalar_t>
using zero_vec_type_of_t = typename zero_vec_type_of<scalar_t>::type;
template<> struct zero_vec_type_of<uchar> { using type = v_int16; };
template<> struct zero_vec_type_of<ushort> { using type = v_int16; };
template<> struct zero_vec_type_of<short> { using type = v_int16; };
template<> struct zero_vec_type_of<float> { using type = v_float32; };
template<typename scalar_t>
struct univ_zero_vec_type_of;
template<typename scalar_t>
using univ_zero_vec_type_of_t = typename univ_zero_vec_type_of<scalar_t>::type;
template<> struct univ_zero_vec_type_of<uchar> { using type = v_uint8; };
template<> struct univ_zero_vec_type_of<ushort> { using type = v_int16; };
template<> struct univ_zero_vec_type_of<short> { using type = v_int16; };
template<> struct univ_zero_vec_type_of<float> { using type = v_float32; };
CV_ALWAYS_INLINE v_float32 vg_load_f32(const float* in) CV_ALWAYS_INLINE v_float32 vg_load_f32(const float* in)
{ {
return vx_load(in); return vx_load(in);
@ -295,143 +340,85 @@ CV_ALWAYS_INLINE void v_store_select(short* dst, const v_int16& div, const v_int
CV_ALWAYS_INLINE void v_store_select(ushort* dst, const v_int16& div, const v_int16& v_zero, CV_ALWAYS_INLINE void v_store_select(ushort* dst, const v_int16& div, const v_int16& v_zero,
const v_int32& res1, const v_int32& res2) const v_int32& res1, const v_int32& res2)
{ {
v_uint16 sel = v_reinterpret_as_u16(v_select(div == v_zero, v_zero, v_pack(res1, res2))); vx_store(dst, v_select(v_reinterpret_as_u16(div == v_zero),
vx_store(dst, sel); v_reinterpret_as_u16(v_zero), v_pack_u(res1, res2)));
} }
//================================================================================================= //=============================================================================
template<typename scale_tag_t, typename SRC, typename DST> template<typename scale_tag_t>
CV_ALWAYS_INLINE CV_ALWAYS_INLINE
typename std::enable_if<(std::is_same<SRC, short>::value && std::is_same<DST, ushort>::value) || void div_simd_impl(scale_tag_t s_tag, const v_float32& a1, const v_float32& a2,
(std::is_same<SRC, ushort>::value && std::is_same<DST, ushort>::value) || const v_float32& a3, const v_float32& a4, const uchar* in2x,
(std::is_same<SRC, ushort>::value && std::is_same<DST, short>::value), int>::type uchar* outx, const v_float32& v_scale, const v_int16& v_zero)
div_hal(scale_tag_t t, const SRC in1[], const SRC in2[], DST out[], const int length, double _scale)
{ {
constexpr int nlanes = vector_type_of_t<DST>::nlanes; constexpr int nlanes = v_uint8::nlanes;
if (length < nlanes)
return 0;
v_int16 v_zero = vx_setall_s16(0);
v_float32 scale = vx_setall_f32(static_cast<float>(_scale));
int x = 0;
for (;;)
{
for (; x <= length - nlanes; x += nlanes)
{
v_float32 a1 = vg_load_f32(&in1[x]);
v_float32 a2 = vg_load_f32(&in1[x + nlanes / 2]);
v_int16 div = v_reinterpret_as_s16(vx_load(&in2[x])); v_int16 div1 = v_reinterpret_as_s16(vx_load_expand(in2x));
v_int16 div2 = v_reinterpret_as_s16(vx_load_expand(&in2x[nlanes/2]));
v_float32 fdiv1 = v_cvt_f32(v_expand_low(div)); v_float32 fdiv1 = v_cvt_f32(v_expand_low(div1));
v_float32 fdiv2 = v_cvt_f32(v_expand_high(div)); v_float32 fdiv2 = v_cvt_f32(v_expand_high(div1));
v_float32 fdiv3 = v_cvt_f32(v_expand_low(div2));
v_float32 fdiv4 = v_cvt_f32(v_expand_high(div2));
v_int32 r1 = v_round(div_op(t, a1, fdiv1, scale)); v_int32 sum1 = v_round(div_op(s_tag, a1, fdiv1, v_scale)),
v_int32 r2 = v_round(div_op(t, a2, fdiv2, scale)); sum2 = v_round(div_op(s_tag, a2, fdiv2, v_scale)),
sum3 = v_round(div_op(s_tag, a3, fdiv3, v_scale)),
sum4 = v_round(div_op(s_tag, a4, fdiv4, v_scale));
v_store_select(&out[x], div, v_zero, r1, r2); v_int16 res1 = v_select((div1 == v_zero), v_zero, v_pack(sum1, sum2));
} v_int16 res2 = v_select((div2 == v_zero), v_zero, v_pack(sum3, sum4));
if (x < length) vx_store(outx, v_pack_u(res1, res2));
{
x = length - nlanes;
continue; // process one more time (unaligned tail)
}
break;
}
return x;
} }
//-------------------------------------------------------------------------------------------------
template<typename scale_tag_t, typename SRC> template<typename scale_tag_t, typename SRC>
CV_ALWAYS_INLINE CV_ALWAYS_INLINE
typename std::enable_if<std::is_same<SRC, short>::value || typename std::enable_if<std::is_same<SRC, short>::value ||
std::is_same<SRC, ushort>::value, int>::type std::is_same<SRC, ushort>::value, void>::type
div_hal(scale_tag_t t, const SRC in1[], const SRC in2[], uchar out[], const int length, double _scale) div_simd_impl(scale_tag_t s_tag, const v_float32& a1, const v_float32& a2,
const v_float32& a3, const v_float32& a4, const SRC* in2x,
uchar* outx, const v_float32& v_scale, const v_int16& v_zero)
{ {
constexpr int nlanes = v_uint8::nlanes; constexpr int nlanes = v_uint8::nlanes;
if (length < nlanes) v_int16 div1 = v_reinterpret_as_s16(vx_load(in2x));
return 0; v_int16 div2 = v_reinterpret_as_s16(vx_load(&in2x[nlanes/2]));
v_float32 scale = vx_setall_f32(static_cast<float>(_scale));
v_int16 v_zero = vx_setall_s16(0);
int x = 0;
for (;;)
{
for (; x <= length - nlanes; x += nlanes)
{
v_float32 a1 = vg_load_f32(&in1[x]);
v_float32 a2 = vg_load_f32(&in1[x + nlanes / 4]);
v_float32 a3 = vg_load_f32(&in1[x + nlanes / 2]);
v_float32 a4 = vg_load_f32(&in1[x + 3 * nlanes / 4]);
v_int16 div1 = v_reinterpret_as_s16(vx_load(&in2[x]));
v_int16 div2 = v_reinterpret_as_s16(vx_load(&in2[x + nlanes/2]));
v_float32 fdiv1 = v_cvt_f32(v_expand_low(div1)); v_float32 fdiv1 = v_cvt_f32(v_expand_low(div1));
v_float32 fdiv2 = v_cvt_f32(v_expand_high(div1)); v_float32 fdiv2 = v_cvt_f32(v_expand_high(div1));
v_float32 fdiv3 = v_cvt_f32(v_expand_low(div2)); v_float32 fdiv3 = v_cvt_f32(v_expand_low(div2));
v_float32 fdiv4 = v_cvt_f32(v_expand_high(div2)); v_float32 fdiv4 = v_cvt_f32(v_expand_high(div2));
v_int32 sum1 = v_round(div_op(t, a1, fdiv1, scale)), v_int32 sum1 = v_round(div_op(s_tag, a1, fdiv1, v_scale)),
sum2 = v_round(div_op(t, a2, fdiv2, scale)), sum2 = v_round(div_op(s_tag, a2, fdiv2, v_scale)),
sum3 = v_round(div_op(t, a3, fdiv3, scale)), sum3 = v_round(div_op(s_tag, a3, fdiv3, v_scale)),
sum4 = v_round(div_op(t, a4, fdiv4, scale)); sum4 = v_round(div_op(s_tag, a4, fdiv4, v_scale));
v_int16 res1 = v_select((div1 == v_zero), v_zero, v_pack(sum1, sum2)); v_int16 res1 = v_select((div1 == v_zero), v_zero, v_pack(sum1, sum2));
v_int16 res2 = v_select((div2 == v_zero), v_zero, v_pack(sum3, sum4)); v_int16 res2 = v_select((div2 == v_zero), v_zero, v_pack(sum3, sum4));
vx_store(&out[x], v_pack_u(res1, res2)); vx_store(outx, v_pack_u(res1, res2));
}
if (x < length)
{
x = length - nlanes;
continue; // process one more time (unaligned tail)
}
break;
}
return x;
} }
//-------------------------------------------------------------------------------------------------
template<typename scale_tag_t> template<typename scale_tag_t>
CV_ALWAYS_INLINE int div_hal(scale_tag_t t, const float in1[], const float in2[], uchar out[], CV_ALWAYS_INLINE void div_simd_impl(scale_tag_t s_tag, const v_float32& a1,
const int length, double _scale) const v_float32& a2, const v_float32& a3,
const v_float32& a4, const float* in2x, uchar* outx,
const v_float32& v_scale, const v_float32& v_zero)
{ {
constexpr int nlanes = v_uint8::nlanes; constexpr int nlanes = v_uint8::nlanes;
if (length < nlanes) v_float32 div1 = vg_load_f32(in2x);
return 0; v_float32 div2 = vg_load_f32(&in2x[nlanes / 4]);
v_float32 div3 = vg_load_f32(&in2x[nlanes / 2]);
v_float32 scale = vx_setall_f32(static_cast<float>(_scale)); v_float32 div4 = vg_load_f32(&in2x[3 * nlanes / 4]);
v_float32 v_zero = vx_setall_f32(0);
int x = 0;
for (;;)
{
for (; x <= length - nlanes; x += nlanes)
{
v_float32 a1 = vg_load_f32(&in1[x]);
v_float32 a2 = vg_load_f32(&in1[x + nlanes / 4]);
v_float32 a3 = vg_load_f32(&in1[x + nlanes / 2]);
v_float32 a4 = vg_load_f32(&in1[x + 3 * nlanes / 4]);
v_float32 div1 = vg_load_f32(&in2[x]);
v_float32 div2 = vg_load_f32(&in2[x + nlanes / 4]);
v_float32 div3 = vg_load_f32(&in2[x + nlanes / 2]);
v_float32 div4 = vg_load_f32(&in2[x + 3 * nlanes / 4]);
v_float32 r1 = div_op(t, a1, div1, scale); v_float32 r1 = div_op(s_tag, a1, div1, v_scale);
v_float32 r2 = div_op(t, a2, div2, scale); v_float32 r2 = div_op(s_tag, a2, div2, v_scale);
v_float32 r3 = div_op(t, a3, div3, scale); v_float32 r3 = div_op(s_tag, a3, div3, v_scale);
v_float32 r4 = div_op(t, a4, div4, scale); v_float32 r4 = div_op(s_tag, a4, div4, v_scale);
v_float32 sel1 = v_select((div1 == v_zero), v_zero, r1); v_float32 sel1 = v_select((div1 == v_zero), v_zero, r1);
v_float32 sel2 = v_select((div2 == v_zero), v_zero, r2); v_float32 sel2 = v_select((div2 == v_zero), v_zero, r2);
@ -443,17 +430,21 @@ CV_ALWAYS_INLINE int div_hal(scale_tag_t t, const float in1[], const float in2[]
v_int32 res3 = v_round(sel3); v_int32 res3 = v_round(sel3);
v_int32 res4 = v_round(sel4); v_int32 res4 = v_round(sel4);
vx_store(&out[x], v_pack_u(v_pack(res1, res2), v_pack(res3, res4))); vx_store(outx, v_pack_u(v_pack(res1, res2), v_pack(res3, res4)));
} }
if (x < length) template<typename scale_tag_t, typename SRC, typename Vtype>
{ CV_ALWAYS_INLINE void div_hal(scale_tag_t s_tag, const SRC* in1x, const SRC* in2x, uchar* outx,
x = length - nlanes; const v_float32& v_scale, const Vtype& v_zero)
continue; // process one more time (unaligned tail) {
} constexpr int nlanes = v_uint8::nlanes;
break;
} v_float32 a1 = vg_load_f32(in1x);
return x; v_float32 a2 = vg_load_f32(&in1x[nlanes / 4]);
v_float32 a3 = vg_load_f32(&in1x[nlanes / 2]);
v_float32 a4 = vg_load_f32(&in1x[3 * nlanes / 4]);
div_simd_impl(s_tag, a1, a2, a3, a4, in2x, outx, v_scale, v_zero);
} }
//------------------------------------------------------------------------------------------------- //-------------------------------------------------------------------------------------------------
@ -461,113 +452,117 @@ CV_ALWAYS_INLINE int div_hal(scale_tag_t t, const float in1[], const float in2[]
template<typename scale_tag_t, typename DST> template<typename scale_tag_t, typename DST>
CV_ALWAYS_INLINE CV_ALWAYS_INLINE
typename std::enable_if<std::is_same<DST, short>::value || typename std::enable_if<std::is_same<DST, short>::value ||
std::is_same<DST, ushort>::value, int>::type std::is_same<DST, ushort>::value, void>::type
div_hal(scale_tag_t t, const uchar in1[], const uchar in2[], DST out[], const int length, double _scale) div_simd_impl(scale_tag_t s_tag, const v_float32& a1, const v_float32& a2,
const uchar* in2x, DST* outx, const v_float32& v_scale,
const v_int16& v_zero)
{ {
constexpr int nlanes = vector_type_of_t<DST>::nlanes; v_int16 div = v_reinterpret_as_s16(vx_load_expand(in2x));
if (length < nlanes) v_float32 fdiv1 = v_cvt_f32(v_expand_low(div));
return 0; v_float32 fdiv2 = v_cvt_f32(v_expand_high(div));
v_float32 scale = vx_setall_f32(static_cast<float>(_scale)); v_int32 r1 = v_round(div_op(s_tag, a1, fdiv1, v_scale));
v_int16 v_zero = vx_setall_s16(0); v_int32 r2 = v_round(div_op(s_tag, a2, fdiv2, v_scale));
int x = 0; v_store_select(outx, div, v_zero, r1, r2);
for (;;) }
{
for (; x <= length - nlanes; x += nlanes)
{
v_float32 a1 = vg_load_f32(&in1[x]);
v_float32 a2 = vg_load_f32(&in1[x + nlanes / 2]);
v_int16 div = v_reinterpret_as_s16(vx_load_expand(&in2[x])); template<typename scale_tag_t, typename SRC, typename DST>
CV_ALWAYS_INLINE
typename std::enable_if<(std::is_same<SRC, short>::value && std::is_same<DST, ushort>::value) ||
(std::is_same<SRC, ushort>::value && std::is_same<DST, ushort>::value) ||
(std::is_same<SRC, short>::value && std::is_same<DST, short>::value) ||
(std::is_same<SRC, ushort>::value && std::is_same<DST, short>::value), void>::type
div_simd_impl(scale_tag_t s_tag, const v_float32& a1, const v_float32& a2,
const SRC* in2x, DST* outx, const v_float32& v_scale, const v_int16& v_zero)
{
v_int16 div = v_reinterpret_as_s16(vx_load(in2x));
v_float32 fdiv1 = v_cvt_f32(v_expand_low(div)); v_float32 fdiv1 = v_cvt_f32(v_expand_low(div));
v_float32 fdiv2 = v_cvt_f32(v_expand_high(div)); v_float32 fdiv2 = v_cvt_f32(v_expand_high(div));
v_int32 r1 = v_round(div_op(t, a1, fdiv1, scale)); v_int32 r1 = v_round(div_op(s_tag, a1, fdiv1, v_scale));
v_int32 r2 = v_round(div_op(t, a2, fdiv2, scale)); v_int32 r2 = v_round(div_op(s_tag, a2, fdiv2, v_scale));
v_store_select(&out[x], div, v_zero, r1, r2); v_store_select(outx, div, v_zero, r1, r2);
}
if (x < length)
{
x = length - nlanes;
continue; // process one more time (unaligned tail)
}
break;
}
return x;
} }
//-------------------------------------------------------------------------------------------------
template<typename scale_tag_t, typename DST> template<typename scale_tag_t, typename DST>
CV_ALWAYS_INLINE CV_ALWAYS_INLINE
typename std::enable_if<std::is_same<DST, short>::value || typename std::enable_if<std::is_same<DST, short>::value ||
std::is_same<DST, ushort>::value, int>::type std::is_same<DST, ushort>::value, void>::type
div_hal(scale_tag_t t, const float in1[], const float in2[], DST out[], const int length, double _scale) div_simd_impl(scale_tag_t s_tag, const v_float32& a1, const v_float32& a2,
const float* in2x, DST* outx, const v_float32& v_scale,
const v_float32& v_zero)
{ {
constexpr int nlanes = vector_type_of_t<DST>::nlanes; constexpr int nlanes = vector_type_of_t<DST>::nlanes;
if (length < nlanes) v_float32 fdiv1 = vg_load_f32(in2x);
return 0; v_float32 fdiv2 = vg_load_f32(&in2x[nlanes / 2]);
v_float32 scale = vx_setall_f32(static_cast<float>(_scale)); v_float32 r1 = div_op(s_tag, a1, fdiv1, v_scale);
v_float32 v_zero = vx_setall_f32(0); v_float32 r2 = div_op(s_tag, a2, fdiv2, v_scale);
int x = 0;
for (;;)
{
for (; x <= length - nlanes; x += nlanes)
{
v_float32 a1 = vg_load_f32(&in1[x]);
v_float32 a2 = vg_load_f32(&in1[x + nlanes / 2]);
v_float32 fdiv1 = vg_load_f32(&in2[x]);
v_float32 fdiv2 = vg_load_f32(&in2[x + nlanes / 2]);
v_float32 r1 = div_op(t, a1, fdiv1, scale);
v_float32 r2 = div_op(t, a2, fdiv2, scale);
v_int32 res1 = v_round(v_select((fdiv1 == v_zero), v_zero, r1)); v_int32 res1 = v_round(v_select((fdiv1 == v_zero), v_zero, r1));
v_int32 res2 = v_round(v_select((fdiv2 == v_zero), v_zero, r2)); v_int32 res2 = v_round(v_select((fdiv2 == v_zero), v_zero, r2));
v_store_i16(&out[x], res1, res2); v_store_i16(outx, res1, res2);
} }
if (x < length) template<typename scale_tag_t, typename SRC, typename DST, typename Vtype>
{ CV_ALWAYS_INLINE
x = length - nlanes; typename std::enable_if<std::is_same<DST, short>::value ||
continue; // process one more time (unaligned tail) std::is_same<DST, ushort>::value, void>::type
} div_hal(scale_tag_t s_tag, const SRC* in1x, const SRC* in2x, DST* outx,
break; const v_float32& v_scale, const Vtype& v_zero)
} {
return x; constexpr int nlanes = vector_type_of_t<DST>::nlanes;
v_float32 a1 = vg_load_f32(in1x);
v_float32 a2 = vg_load_f32(&in1x[nlanes / 2]);
div_simd_impl(s_tag, a1, a2, in2x, outx, v_scale, v_zero);
} }
//------------------------------------------------------------------------------------------------- //-------------------------------------------------------------------------------------------------
template<typename scale_tag_t, typename SRC> template<typename scale_tag_t, typename SRC>
CV_ALWAYS_INLINE int div_hal(scale_tag_t t, const SRC in1[], const SRC in2[], float out[], CV_ALWAYS_INLINE void div_simd_impl(scale_tag_t s_tag, const v_float32& a1, const SRC* in2x,
const int length, double _scale) float* outx, const v_float32& v_scale)
{ {
constexpr int nlanes = v_float32::nlanes; v_float32 b1 = vg_load_f32(in2x);
vx_store(outx, div_op(s_tag, a1, b1, v_scale));
}
template<typename scale_tag_t, typename SRC, typename Tvec>
CV_ALWAYS_INLINE void div_hal(scale_tag_t s_tag, const SRC* in1x, const SRC* in2x, float* outx,
const v_float32& v_scale, const Tvec&)
{
v_float32 a1 = vg_load_f32(in1x);
div_simd_impl(s_tag, a1, in2x, outx, v_scale);
}
//-------------------------------------------------------------------------------------------------
template<typename scale_tag_t, typename SRC, typename DST>
CV_ALWAYS_INLINE int div_simd_common(scale_tag_t s_tag, const SRC in1[], const SRC in2[],
DST out[], const int length, float scale)
{
constexpr int nlanes = vector_type_of_t<DST>::nlanes;
if (length < nlanes) if (length < nlanes)
return 0; return 0;
v_float32 scale = vx_setall_f32(static_cast<float>(_scale)); const zero_vec_type_of_t<SRC> v_zero = vx_setall<typename zero_vec_type_of_t<SRC>::lane_type>(0);
v_float32 v_scale = vx_setall_f32(scale);
int x = 0; int x = 0;
for (;;) for (;;)
{ {
for (; x <= length - nlanes; x += nlanes) for (; x <= length - nlanes; x += nlanes)
{ {
v_float32 a1 = vg_load_f32(&in1[x]); div_hal(s_tag, &in1[x], &in2[x], &out[x], v_scale, v_zero);
v_float32 b1 = vg_load_f32(&in2[x]);
vx_store(&out[x], div_op(t, a1, b1, scale));
} }
if (x < length) if (x < length)
@ -580,28 +575,6 @@ CV_ALWAYS_INLINE int div_hal(scale_tag_t t, const SRC in1[], const SRC in2[], fl
return x; return x;
} }
//-------------------------------------------------------------------------------------------------
template<typename scale_tag_t>
CV_ALWAYS_INLINE int div_hal(scale_tag_t, const uchar in1[], const uchar in2[], uchar out[],
const int length, double scale)
{
hal::div8u(in1, static_cast<size_t>(length), in2, static_cast<size_t>(length),
out, static_cast<size_t>(length), length, 1, &scale);
return length;
}
template<typename scale_tag_t>
CV_ALWAYS_INLINE int div_hal(scale_tag_t, const short in1[], const short in2[], short out[],
const int length, double scale)
{
hal::div16s(in1, static_cast<size_t>(length), in2, static_cast<size_t>(length),
out, static_cast<size_t>(length), length, 1, &scale);
return length;
}
//-------------------------------------------------------------------------------------------------
#define DIV_SIMD(SRC, DST) \ #define DIV_SIMD(SRC, DST) \
int div_simd(const SRC in1[], const SRC in2[], DST out[], \ int div_simd(const SRC in1[], const SRC in2[], DST out[], \
const int length, double _scale) \ const int length, double _scale) \
@ -610,13 +583,11 @@ int div_simd(const SRC in1[], const SRC in2[], DST out[],
float fscale = static_cast<float>(_scale); \ float fscale = static_cast<float>(_scale); \
if (std::fabs(fscale - 1.0f) <= FLT_EPSILON) \ if (std::fabs(fscale - 1.0f) <= FLT_EPSILON) \
{ \ { \
not_scale_tag t; \ x = div_simd_common(not_scale_tag{}, in1, in2, out, length, fscale); \
x = div_hal(t, in1, in2, out, length, _scale); \
} \ } \
else \ else \
{ \ { \
scale_tag t; \ x = div_simd_common(scale_tag{}, in1, in2, out, length, fscale); \
x = div_hal(t, in1, in2, out, length, _scale); \
} \ } \
return x; \ return x; \
} }
@ -1976,14 +1947,432 @@ ABSDIFFC_SIMD(float)
#undef ABSDIFFC_SIMD #undef ABSDIFFC_SIMD
//-------------------------------------------------------------------------------------------------
template<typename scale_tag_t, typename SRC, typename DST, typename Tvec>
CV_ALWAYS_INLINE
typename std::enable_if<std::is_same<DST, short>::value ||
std::is_same<DST, ushort>::value, void>::type
divrc_simd_common_impl(scale_tag_t s_tag, const SRC* inx,
const v_float32& v_scalar, DST* outx,
const v_float32& v_scale, const Tvec& v_zero)
{
div_simd_impl(s_tag, v_scalar, v_scalar, inx, outx, v_scale, v_zero);
}
template<typename scale_tag_t, typename SRC, typename DST, typename Tvec>
CV_ALWAYS_INLINE
typename std::enable_if<std::is_same<DST, uchar>::value, void>::type
divrc_simd_common_impl(scale_tag_t s_tag, const SRC* inx,
const v_float32& v_scalar, DST* outx,
const v_float32& v_scale, const Tvec& v_zero)
{
div_simd_impl(s_tag, v_scalar, v_scalar, v_scalar, v_scalar, inx, outx, v_scale, v_zero);
}
template<typename scale_tag_t, typename SRC, typename DST, typename Tvec>
CV_ALWAYS_INLINE
typename std::enable_if<std::is_same<DST, float>::value, void>::type
divrc_simd_common_impl(scale_tag_t s_tag, const SRC* inx,
const v_float32& v_scalar, DST* outx,
const v_float32& v_scale, const Tvec&)
{
div_simd_impl(s_tag, v_scalar, inx, outx, v_scale);
}
template<typename scale_tag_t, typename SRC, typename DST>
CV_ALWAYS_INLINE int divrc_simd_common(scale_tag_t s_tag, const SRC in[],
const float scalar[], DST out[],
const int length, const float scale)
{
constexpr int nlanes = vector_type_of_t<DST>::nlanes;
if (length < nlanes)
return 0;
v_float32 v_scalar = vx_load(scalar);
v_float32 v_scale = vx_setall_f32(scale);
zero_vec_type_of_t<SRC> v_zero =
vx_setall<typename zero_vec_type_of_t<SRC>::lane_type>(0);
int x = 0;
for (;;)
{
for (; x <= length - nlanes; x += nlanes)
{
divrc_simd_common_impl(s_tag, &in[x], v_scalar, &out[x], v_scale, v_zero);
}
if (x < length)
{
x = length - nlanes;
continue; // process unaligned tail
}
break;
}
return x;
}
//-------------------------------------------------------------------------------------------------
template<typename scale_tag_t>
CV_ALWAYS_INLINE void divrc_simd_c3_calc(scale_tag_t s_tag, const uchar* inx, uchar* outx,
const v_float32& s1, const v_float32& s2,
const v_float32& s3, const v_float32& v_scale,
const v_uint8& v_zero)
{
v_uint8 div = vx_load(inx);
v_uint8 v_mask = (div == v_zero);
v_uint16 div1 = v_expand_low(div);
v_uint16 div2 = v_expand_high(div);
v_float32 fdiv1 = v_cvt_f32(v_reinterpret_as_s32(v_expand_low(div1)));
v_float32 fdiv2 = v_cvt_f32(v_reinterpret_as_s32(v_expand_high(div1)));
v_float32 fdiv3 = v_cvt_f32(v_reinterpret_as_s32(v_expand_low(div2)));
v_float32 fdiv4 = v_cvt_f32(v_reinterpret_as_s32(v_expand_high(div2)));
vx_store(outx,
v_select(v_mask, v_zero, v_pack_u(v_pack(v_round(div_op(s_tag, s1, fdiv1, v_scale)),
v_round(div_op(s_tag, s2, fdiv2, v_scale))),
v_pack(v_round(div_op(s_tag, s3, fdiv3, v_scale)),
v_round(div_op(s_tag, s1, fdiv4, v_scale))))));
}
template<typename scale_tag_t, typename SRC>
CV_ALWAYS_INLINE
typename std::enable_if<std::is_same<SRC, short>::value ||
std::is_same<SRC, ushort>::value, void>::type
divrc_simd_c3_calc(scale_tag_t s_tag, const SRC* inx, uchar* outx,
const v_float32& s1, const v_float32& s2,
const v_float32& s3, const v_float32& v_scale,
const v_int16& v_zero)
{
constexpr int nlanes = v_uint8::nlanes;
v_int16 div1 = v_reinterpret_as_s16(vx_load(inx));
v_int16 div2 = v_reinterpret_as_s16(vx_load(&inx[nlanes / 2]));
v_int16 v_mask1 = (div1 == v_zero);
v_int16 v_mask2 = (div2 == v_zero);
v_float32 fdiv1 = v_cvt_f32(v_expand_low(div1));
v_float32 fdiv2 = v_cvt_f32(v_expand_high(div1));
v_float32 fdiv3 = v_cvt_f32(v_expand_low(div2));
v_float32 fdiv4 = v_cvt_f32(v_expand_high(div2));
vx_store(outx,
v_pack_u(v_select(v_mask1, v_zero,
v_pack(v_round(div_op(s_tag, s1, fdiv1, v_scale)),
v_round(div_op(s_tag, s2, fdiv2, v_scale)))),
v_select(v_mask2, v_zero,
v_pack(v_round(div_op(s_tag, s3, fdiv3, v_scale)),
v_round(div_op(s_tag, s1, fdiv4, v_scale))))));
}
template<typename scale_tag_t>
CV_ALWAYS_INLINE void divrc_simd_c3_calc(scale_tag_t s_tag, const float* inx, uchar* outx,
const v_float32& s1, const v_float32& s2,
const v_float32& s3, const v_float32& v_scale,
const v_float32& v_zero)
{
constexpr int nlanes = v_uint8::nlanes;
v_float32 fdiv1 = vg_load_f32(inx);
v_float32 fdiv2 = vg_load_f32(&inx[nlanes / 4]);
v_float32 fdiv3 = vg_load_f32(&inx[nlanes / 2]);
v_float32 fdiv4 = vg_load_f32(&inx[3 * nlanes / 4]);
v_float32 v_mask1 = (fdiv1 == v_zero);
v_float32 v_mask2 = (fdiv2 == v_zero);
v_float32 v_mask3 = (fdiv3 == v_zero);
v_float32 v_mask4 = (fdiv4 == v_zero);
vx_store(outx,
v_pack_u(v_pack(v_round(v_select(v_mask1, v_zero, div_op(s_tag, s1, fdiv1, v_scale))),
v_round(v_select(v_mask2, v_zero, div_op(s_tag, s2, fdiv2, v_scale)))),
v_pack(v_round(v_select(v_mask3, v_zero, div_op(s_tag, s3, fdiv3, v_scale))),
v_round(v_select(v_mask4, v_zero, div_op(s_tag, s1, fdiv4, v_scale))))));
}
template<typename scale_tag_t, typename SRC>
CV_ALWAYS_INLINE int divrc_simd_c3_impl(scale_tag_t s_tag, const SRC in[], uchar out[],
const v_float32& s1, const v_float32& s2,
const v_float32& s3, const v_float32& v_scale,
const int length, const int nlanes, const int lanes)
{
univ_zero_vec_type_of_t<SRC> v_zero =
vx_setall<typename univ_zero_vec_type_of_t<SRC>::lane_type>(0);
int x = 0;
for (;;)
{
for (; x <= length - lanes; x += lanes)
{
divrc_simd_c3_calc(s_tag, &in[x], &out[x], s1, s2, s3, v_scale, v_zero);
divrc_simd_c3_calc(s_tag, &in[x + nlanes], &out[x + nlanes], s2, s3, s1, v_scale, v_zero);
divrc_simd_c3_calc(s_tag, &in[x + 2 * nlanes], &out[x + 2 * nlanes], s3, s1, s2, v_scale, v_zero);
}
if (x < length)
{
x = length - lanes;
continue; // process unaligned tail
}
break;
}
return x;
}
//---------------------------------------------------------------------------------------
template<typename scale_tag_t, typename DST>
CV_ALWAYS_INLINE
typename std::enable_if<std::is_same<DST, short>::value ||
std::is_same<DST, ushort>::value, void>::type
divrc_simd_c3_calc(scale_tag_t s_tag, const uchar* inx, DST* outx,
const v_float32& s1, const v_float32& s2,
const v_float32& s3, const v_float32& v_scale,
const v_int16& v_zero)
{
constexpr int nlanes = vector_type_of_t<DST>::nlanes;
v_uint8 div = vx_load(inx);
v_int16 div1 = v_reinterpret_as_s16(v_expand_low(div));
v_int16 div2 = v_reinterpret_as_s16(v_expand_high(div));
v_int16 div3 = v_reinterpret_as_s16(vx_load_expand(&inx[2 * nlanes]));
v_float32 fdiv1 = v_cvt_f32(v_expand_low(div1));
v_float32 fdiv2 = v_cvt_f32(v_expand_high(div1));
v_float32 fdiv3 = v_cvt_f32(v_expand_low(div2));
v_float32 fdiv4 = v_cvt_f32(v_expand_high(div2));
v_float32 fdiv5 = v_cvt_f32(v_expand_low(div3));
v_float32 fdiv6 = v_cvt_f32(v_expand_high(div3));
v_store_select(outx, div1, v_zero, v_round(div_op(s_tag, s1, fdiv1, v_scale)),
v_round(div_op(s_tag, s2, fdiv2, v_scale)));
v_store_select(&outx[nlanes], div2, v_zero, v_round(div_op(s_tag, s3, fdiv3, v_scale)),
v_round(div_op(s_tag, s1, fdiv4, v_scale)));
v_store_select(&outx[2*nlanes], div3, v_zero, v_round(div_op(s_tag, s2, fdiv5, v_scale)),
v_round(div_op(s_tag, s3, fdiv6, v_scale)));
}
template<typename scale_tag_t, typename SRC, typename DST>
CV_ALWAYS_INLINE
typename std::enable_if<(std::is_same<SRC, short>::value && std::is_same<DST, ushort>::value) ||
(std::is_same<SRC, ushort>::value && std::is_same<DST, ushort>::value) ||
(std::is_same<SRC, short>::value && std::is_same<DST, short>::value) ||
(std::is_same<SRC, ushort>::value && std::is_same<DST, short>::value), void>::type
divrc_simd_c3_calc(scale_tag_t s_tag, const SRC* inx, DST* outx,
const v_float32& s1, const v_float32& s2,
const v_float32& s3, const v_float32& v_scale,
const v_int16& v_zero)
{
constexpr int nlanes = vector_type_of_t<DST>::nlanes;
v_int16 div1 = v_reinterpret_as_s16(vx_load(inx));
v_int16 div2 = v_reinterpret_as_s16(vx_load(&inx[nlanes]));
v_int16 div3 = v_reinterpret_as_s16(vx_load(&inx[2*nlanes]));
v_float32 fdiv1 = v_cvt_f32(v_expand_low(div1));
v_float32 fdiv2 = v_cvt_f32(v_expand_high(div1));
v_float32 fdiv3 = v_cvt_f32(v_expand_low(div2));
v_float32 fdiv4 = v_cvt_f32(v_expand_high(div2));
v_float32 fdiv5 = v_cvt_f32(v_expand_low(div3));
v_float32 fdiv6 = v_cvt_f32(v_expand_high(div3));
v_store_select(outx, div1, v_zero, v_round(div_op(s_tag, s1, fdiv1, v_scale)),
v_round(div_op(s_tag, s2, fdiv2, v_scale)));
v_store_select(&outx[nlanes], div2, v_zero, v_round(div_op(s_tag, s3, fdiv3, v_scale)),
v_round(div_op(s_tag, s1, fdiv4, v_scale)));
v_store_select(&outx[2*nlanes], div3, v_zero, v_round(div_op(s_tag, s2, fdiv5, v_scale)),
v_round(div_op(s_tag, s3, fdiv6, v_scale)));
}
template<typename scale_tag_t, typename DST>
CV_ALWAYS_INLINE
typename std::enable_if<std::is_same<DST, short>::value ||
std::is_same<DST, ushort>::value, void>::type
divrc_simd_c3_calc(scale_tag_t s_tag, const float* inx, DST* outx,
const v_float32& s1, const v_float32& s2,
const v_float32& s3, const v_float32& v_scale,
const v_float32& v_zero)
{
constexpr int nlanes = vector_type_of_t<DST>::nlanes;
v_float32 fdiv1 = vg_load_f32(inx);
v_float32 fdiv2 = vg_load_f32(&inx[nlanes/2]);
v_float32 fdiv3 = vg_load_f32(&inx[nlanes]);
v_float32 fdiv4 = vg_load_f32(&inx[3*nlanes/2]);
v_float32 fdiv5 = vg_load_f32(&inx[2*nlanes]);
v_float32 fdiv6 = vg_load_f32(&inx[5*nlanes/2]);
v_store_i16(outx, v_round(v_select(fdiv1 == v_zero, v_zero, div_op(s_tag, s1, fdiv1, v_scale))),
v_round(v_select(fdiv2 == v_zero, v_zero, div_op(s_tag, s2, fdiv2, v_scale))));
v_store_i16(&outx[nlanes], v_round(v_select(fdiv3 == v_zero, v_zero, div_op(s_tag, s3, fdiv3, v_scale))),
v_round(v_select(fdiv4 == v_zero, v_zero, div_op(s_tag, s1, fdiv4, v_scale))));
v_store_i16(&outx[2*nlanes], v_round(v_select(fdiv5 == v_zero, v_zero, div_op(s_tag, s2, fdiv5, v_scale))),
v_round(v_select(fdiv6 == v_zero, v_zero, div_op(s_tag, s3, fdiv6, v_scale))));
}
template<typename scale_tag_t, typename SRC, typename DST>
CV_ALWAYS_INLINE
typename std::enable_if<std::is_same<DST, short>::value ||
std::is_same<DST, ushort>::value, int>::type
divrc_simd_c3_impl(scale_tag_t s_tag, const SRC in[], DST out[], const v_float32& s1,
const v_float32& s2, const v_float32& s3,
const v_float32& v_scale, const int length,
const int, const int lanes)
{
zero_vec_type_of_t<SRC> v_zero =
vx_setall<typename zero_vec_type_of_t<SRC>::lane_type>(0);
int x = 0;
for (;;)
{
for (; x <= length - lanes; x += lanes)
{
divrc_simd_c3_calc(s_tag, &in[x], &out[x], s1, s2, s3, v_scale, v_zero);
}
if (x < length)
{
x = length - lanes;
continue; // process unaligned tail
}
break;
}
return x;
}
//---------------------------------------------------------------------------------------
template<typename scale_tag_t, typename SRC>
CV_ALWAYS_INLINE int divrc_simd_c3_impl(scale_tag_t s_tag, const SRC* in, float* out,
const v_float32& s1, const v_float32& s2,
const v_float32& s3, const v_float32& v_scale,
const int length, const int nlanes, const int lanes)
{
int x = 0;
for (;;)
{
for (; x <= length - lanes; x += lanes)
{
v_float32 div1 = vg_load_f32(&in[x]);
v_float32 div2 = vg_load_f32(&in[x + nlanes]);
v_float32 div3 = vg_load_f32(&in[x + 2*nlanes]);
vx_store(&out[x], div_op(s_tag, s1, div1, v_scale));
vx_store(&out[x + nlanes], div_op(s_tag, s2, div2, v_scale));
vx_store(&out[x + 2*nlanes], div_op(s_tag, s3, div3, v_scale));
}
if (x < length)
{
x = length - lanes;
continue; // process unaligned tail
}
break;
}
return x;
}
//-------------------------------------------------------------------------------------------------
template<typename scale_tag_t, typename SRC, typename DST>
CV_ALWAYS_INLINE int divrc_simd_c3(scale_tag_t s_tag, const SRC in[],
const float scalar[], DST out[],
const int length, const float scale)
{
constexpr int chan = 3;
constexpr int nlanes = vector_type_of_t<DST>::nlanes;
constexpr int lanes = chan * nlanes;
if (length < lanes)
return 0;
v_float32 v_scale = vx_setall_f32(scale);
v_float32 s1 = vx_load(scalar);
#if CV_SIMD_WIDTH == 32
v_float32 s2 = vx_load(&scalar[2]);
v_float32 s3 = vx_load(&scalar[1]);
#else
v_float32 s2 = vx_load(&scalar[1]);
v_float32 s3 = vx_load(&scalar[2]);
#endif
return divrc_simd_c3_impl(s_tag, in, out, s1, s2, s3, v_scale, length, nlanes, lanes);
}
#define DIVRC_SIMD(SRC, DST) \
int divrc_simd(const float scalar[], const SRC in[], DST out[], \
const int length, const int chan, const float scale) \
{ \
switch (chan) \
{ \
case 1: \
case 2: \
case 4: \
{ \
if (std::fabs(scale - 1.0f) <= FLT_EPSILON) \
{ \
return divrc_simd_common(not_scale_tag{}, in, scalar, \
out, length, scale); \
} \
else \
{ \
return divrc_simd_common(scale_tag{}, in, scalar, out, \
length, scale); \
} \
} \
case 3: \
{ \
if (std::fabs(scale - 1.0f) <= FLT_EPSILON) \
{ \
return divrc_simd_c3(not_scale_tag{}, in, scalar, \
out, length, scale); \
} \
else \
{ \
return divrc_simd_c3(scale_tag{}, in, scalar, out, \
length, scale); \
} \
} \
default: \
GAPI_Assert(chan <= 4); \
break; \
} \
return 0; \
}
DIVRC_SIMD(uchar, uchar)
DIVRC_SIMD(ushort, uchar)
DIVRC_SIMD(short, uchar)
DIVRC_SIMD(float, uchar)
DIVRC_SIMD(short, short)
DIVRC_SIMD(ushort, short)
DIVRC_SIMD(uchar, short)
DIVRC_SIMD(float, short)
DIVRC_SIMD(ushort, ushort)
DIVRC_SIMD(uchar, ushort)
DIVRC_SIMD(short, ushort)
DIVRC_SIMD(float, ushort)
DIVRC_SIMD(uchar, float)
DIVRC_SIMD(ushort, float)
DIVRC_SIMD(short, float)
DIVRC_SIMD(float, float)
#undef DIVRC_SIMD
//------------------------- //-------------------------
// //
// Fluid kernels: Split3 // Fluid kernels: Split3
// //
//------------------------- //-------------------------
int split3_simd(const uchar in[], uchar out1[], uchar out2[], int split3_simd(const uchar in[], uchar out1[], uchar out2[], uchar out3[],
uchar out3[], const int width) const int width)
{ {
constexpr int nlanes = v_uint8::nlanes; constexpr int nlanes = v_uint8::nlanes;
if (width < nlanes) if (width < nlanes)

Loading…
Cancel
Save