diff --git a/cmake/checks/cpu_rvv.cpp b/cmake/checks/cpu_rvv.cpp index a3eab2abc4..684b2ecbeb 100644 --- a/cmake/checks/cpu_rvv.cpp +++ b/cmake/checks/cpu_rvv.cpp @@ -9,7 +9,7 @@ int test() { const float src[] = { 0.0f, 0.0f, 0.0f, 0.0f }; - vfloat32m1_t val = vle32_v_f32m1((const float*)(src)); + vfloat32m1_t val = vle32_v_f32m1((const float*)(src), 4); return (int)vfmv_f_s_f32m1_f32(val); } #else diff --git a/modules/core/include/opencv2/core/hal/intrin_rvv.hpp b/modules/core/include/opencv2/core/hal/intrin_rvv.hpp index cb2140df58..4a3455b073 100644 --- a/modules/core/include/opencv2/core/hal/intrin_rvv.hpp +++ b/modules/core/include/opencv2/core/hal/intrin_rvv.hpp @@ -151,12 +151,14 @@ struct vint8mf4_t }; #define OPENCV_HAL_IMPL_RVV_NATIVE_LOADSTORE_MF2(_Tpvec, _Tp, suffix, width, n) \ -inline _Tpvec vle##width##_v_##suffix##mf2(const _Tp* ptr) \ +inline _Tpvec vle##width##_v_##suffix##mf2(const _Tp* ptr, size_t vl) \ { \ + CV_UNUSED(vl); \ return _Tpvec(ptr); \ } \ -inline void vse##width##_v_##suffix##mf2(_Tp* ptr, _Tpvec v) \ +inline void vse##width##_v_##suffix##mf2(_Tp* ptr, _Tpvec v, size_t vl) \ { \ + CV_UNUSED(vl); \ for (int i = 0; i < n; ++i) \ { \ ptr[i] = v.val[i]; \ @@ -176,15 +178,14 @@ OPENCV_HAL_IMPL_RVV_NATIVE_LOADSTORE_MF2(vfloat64mf2_t, float64_t, f64, 64, 1) #define OPENCV_HAL_IMPL_RVV_NATIVE_WCVT(_Tpwvec, _Tpvec, _wTp, wcvt, suffix, width, n) \ -inline _Tpwvec wcvt (_Tpvec v) \ +inline _Tpwvec wcvt (_Tpvec v, size_t vl) \ { \ _wTp tmp[n]; \ for (int i = 0; i < n; ++i) \ { \ tmp[i] = (_wTp)v.val[i]; \ } \ - vsetvlmax_e##width##m1(); \ - return vle##width##_v_##suffix##m1(tmp); \ + return vle##width##_v_##suffix##m1(tmp, vl); \ } OPENCV_HAL_IMPL_RVV_NATIVE_WCVT(vuint16m1_t, vuint8mf2_t, ushort, vwcvtu_x_x_v_u16m1, u16, 16, 8) @@ -194,32 +195,34 @@ OPENCV_HAL_IMPL_RVV_NATIVE_WCVT(vint32m1_t, vint16mf2_t, int, vwcvt_x_x_v_i32m1, OPENCV_HAL_IMPL_RVV_NATIVE_WCVT(vuint64m1_t, vuint32mf2_t, uint64, vwcvtu_x_x_v_u64m1, u64, 64, 2) OPENCV_HAL_IMPL_RVV_NATIVE_WCVT(vint64m1_t, vint32mf2_t, int64, vwcvt_x_x_v_i64m1, i64, 64, 2) -inline vuint8mf4_t vle8_v_u8mf4 (const uint8_t *base) +inline vuint8mf4_t vle8_v_u8mf4 (const uint8_t *base, size_t vl) { + CV_UNUSED(vl); return vuint8mf4_t(base); } -inline vint8mf4_t vle8_v_i8mf4 (const int8_t *base) +inline vint8mf4_t vle8_v_i8mf4 (const int8_t *base, size_t vl) { + CV_UNUSED(vl); return vint8mf4_t(base); } -inline vuint16mf2_t vwcvtu_x_x_v_u16mf2 (vuint8mf4_t src) +inline vuint16mf2_t vwcvtu_x_x_v_u16mf2 (vuint8mf4_t src, size_t vl) { ushort tmp[4]; for (int i = 0; i < 4; ++i) { tmp[i] = (ushort)src.val[i]; } - return vle16_v_u16mf2(tmp); + return vle16_v_u16mf2(tmp, vl); } -inline vint16mf2_t vwcvt_x_x_v_i16mf2 (vint8mf4_t src) +inline vint16mf2_t vwcvt_x_x_v_i16mf2 (vint8mf4_t src, size_t vl) { short tmp[4]; for (int i = 0; i < 4; ++i) { tmp[i] = (short)src.val[i]; } - return vle16_v_i16mf2(tmp); + return vle16_v_i16mf2(tmp, vl); } //////////// Types //////////// @@ -232,8 +235,7 @@ struct v_uint8x16 v_uint8x16() {} explicit v_uint8x16(vuint8m1_t v) { - vsetvlmax_e8m1(); - vse8_v_u8m1(val, v); + vse8_v_u8m1(val, v, nlanes); } v_uint8x16(uchar v0, uchar v1, uchar v2, uchar v3, uchar v4, uchar v5, uchar v6, uchar v7, uchar v8, uchar v9, uchar v10, uchar v11, uchar v12, uchar v13, uchar v14, uchar v15) @@ -246,8 +248,7 @@ struct v_uint8x16 } operator vuint8m1_t() const { - vsetvlmax_e8m1(); - return vle8_v_u8m1(val); + return vle8_v_u8m1(val, nlanes); } uchar get0() const { @@ -265,8 +266,7 @@ struct v_int8x16 v_int8x16() {} explicit v_int8x16(vint8m1_t v) { - vsetvlmax_e8m1(); - vse8_v_i8m1(val, v); + vse8_v_i8m1(val, v, nlanes); } v_int8x16(schar v0, schar v1, schar v2, schar v3, schar v4, schar v5, schar v6, schar v7, schar v8, schar v9, schar v10, schar v11, schar v12, schar v13, schar v14, schar v15) @@ -279,8 +279,7 @@ struct v_int8x16 } operator vint8m1_t() const { - vsetvlmax_e8m1(); - return vle8_v_i8m1(val); + return vle8_v_i8m1(val, nlanes); } schar get0() const { @@ -298,8 +297,7 @@ struct v_uint16x8 v_uint16x8() {} explicit v_uint16x8(vuint16m1_t v) { - vsetvlmax_e16m1(); - vse16_v_u16m1(val, v); + vse16_v_u16m1(val, v, nlanes); } v_uint16x8(ushort v0, ushort v1, ushort v2, ushort v3, ushort v4, ushort v5, ushort v6, ushort v7) { @@ -311,8 +309,7 @@ struct v_uint16x8 } operator vuint16m1_t() const { - vsetvlmax_e16m1(); - return vle16_v_u16m1(val); + return vle16_v_u16m1(val, nlanes); } ushort get0() const { @@ -330,8 +327,7 @@ struct v_int16x8 v_int16x8() {} explicit v_int16x8(vint16m1_t v) { - vsetvlmax_e16m1(); - vse16_v_i16m1(val, v); + vse16_v_i16m1(val, v, nlanes); } v_int16x8(short v0, short v1, short v2, short v3, short v4, short v5, short v6, short v7) { @@ -343,8 +339,7 @@ struct v_int16x8 } operator vint16m1_t() const { - vsetvlmax_e16m1(); - return vle16_v_i16m1(val); + return vle16_v_i16m1(val, nlanes); } short get0() const { @@ -362,8 +357,7 @@ struct v_uint32x4 v_uint32x4() {} explicit v_uint32x4(vuint32m1_t v) { - vsetvlmax_e32m1(); - vse32_v_u32m1(val, v); + vse32_v_u32m1(val, v, nlanes); } v_uint32x4(unsigned v0, unsigned v1, unsigned v2, unsigned v3) { @@ -375,8 +369,7 @@ struct v_uint32x4 } operator vuint32m1_t() const { - vsetvlmax_e32m1(); - return vle32_v_u32m1(val); + return vle32_v_u32m1(val, nlanes); } unsigned get0() const { @@ -394,8 +387,7 @@ struct v_int32x4 v_int32x4() {} explicit v_int32x4(vint32m1_t v) { - vsetvlmax_e32m1(); - vse32_v_i32m1(val, v); + vse32_v_i32m1(val, v, nlanes); } v_int32x4(int v0, int v1, int v2, int v3) { @@ -407,8 +399,7 @@ struct v_int32x4 } operator vint32m1_t() const { - vsetvlmax_e32m1(); - return vle32_v_i32m1(val); + return vle32_v_i32m1(val, nlanes); } int get0() const { @@ -425,8 +416,7 @@ struct v_float32x4 v_float32x4() {} explicit v_float32x4(vfloat32m1_t v) { - vsetvlmax_e32m1(); - vse32_v_f32m1(val, v); + vse32_v_f32m1(val, v, nlanes); } v_float32x4(float v0, float v1, float v2, float v3) { @@ -438,8 +428,7 @@ struct v_float32x4 } operator vfloat32m1_t() const { - vsetvlmax_e32m1(); - return vle32_v_f32m1(val); + return vle32_v_f32m1(val, nlanes); } float get0() const { @@ -456,8 +445,7 @@ struct v_uint64x2 v_uint64x2() {} explicit v_uint64x2(vuint64m1_t v) { - vsetvlmax_e64m1(); - vse64_v_u64m1(val, v); + vse64_v_u64m1(val, v, nlanes); } v_uint64x2(uint64 v0, uint64 v1) { @@ -469,8 +457,7 @@ struct v_uint64x2 } operator vuint64m1_t() const { - vsetvlmax_e64m1(); - return vle64_v_u64m1(val); + return vle64_v_u64m1(val, nlanes); } uint64 get0() const { @@ -488,8 +475,7 @@ struct v_int64x2 v_int64x2() {} explicit v_int64x2(vint64m1_t v) { - vsetvlmax_e64m1(); - vse64_v_i64m1(val, v); + vse64_v_i64m1(val, v, nlanes); } v_int64x2(int64 v0, int64 v1) { @@ -501,8 +487,7 @@ struct v_int64x2 } operator vint64m1_t() const { - vsetvlmax_e64m1(); - return vle64_v_i64m1(val); + return vle64_v_i64m1(val, nlanes); } int64 get0() const { @@ -521,8 +506,7 @@ struct v_float64x2 v_float64x2() {} explicit v_float64x2(vfloat64m1_t v) { - vsetvlmax_e64m1(); - vse64_v_f64m1(val, v); + vse64_v_f64m1(val, v, nlanes); } v_float64x2(double v0, double v1) { @@ -534,8 +518,7 @@ struct v_float64x2 } operator vfloat64m1_t() const { - vsetvlmax_e64m1(); - return vle64_v_f64m1(val); + return vle64_v_f64m1(val, nlanes); } double get0() const { @@ -549,42 +532,38 @@ struct v_float64x2 //////////// Initial //////////// -#define OPENCV_HAL_IMPL_RVV_INIT_INTEGER(_Tpvec, _Tp, width, suffix1, suffix2) \ +#define OPENCV_HAL_IMPL_RVV_INIT_INTEGER(_Tpvec, _Tp, suffix1, suffix2, vl) \ inline v_##_Tpvec v_setzero_##suffix1() \ { \ - vsetvlmax_e##width##m1(); \ - return v_##_Tpvec(vzero_##suffix2##m1()); \ + return v_##_Tpvec(vmv_v_x_##suffix2##m1(0, vl)); \ } \ inline v_##_Tpvec v_setall_##suffix1(_Tp v) \ { \ - vsetvlmax_e##width##m1(); \ - return v_##_Tpvec(vmv_v_x_##suffix2##m1(v)); \ + return v_##_Tpvec(vmv_v_x_##suffix2##m1(v, vl)); \ } -OPENCV_HAL_IMPL_RVV_INIT_INTEGER(uint8x16, uchar, 8, u8, u8) -OPENCV_HAL_IMPL_RVV_INIT_INTEGER(int8x16, schar, 8, s8, i8) -OPENCV_HAL_IMPL_RVV_INIT_INTEGER(uint16x8, ushort, 16, u16, u16) -OPENCV_HAL_IMPL_RVV_INIT_INTEGER(int16x8, short, 16, s16, i16) -OPENCV_HAL_IMPL_RVV_INIT_INTEGER(uint32x4, unsigned, 32, u32, u32) -OPENCV_HAL_IMPL_RVV_INIT_INTEGER(int32x4, int, 32, s32, i32) -OPENCV_HAL_IMPL_RVV_INIT_INTEGER(uint64x2, uint64, 64, u64, u64) -OPENCV_HAL_IMPL_RVV_INIT_INTEGER(int64x2, int64, 64, s64, i64) +OPENCV_HAL_IMPL_RVV_INIT_INTEGER(uint8x16, uchar, u8, u8, 16) +OPENCV_HAL_IMPL_RVV_INIT_INTEGER(int8x16, schar, s8, i8, 16) +OPENCV_HAL_IMPL_RVV_INIT_INTEGER(uint16x8, ushort, u16, u16, 8) +OPENCV_HAL_IMPL_RVV_INIT_INTEGER(int16x8, short, s16, i16, 8) +OPENCV_HAL_IMPL_RVV_INIT_INTEGER(uint32x4, unsigned, u32, u32, 4) +OPENCV_HAL_IMPL_RVV_INIT_INTEGER(int32x4, int, s32, i32, 4) +OPENCV_HAL_IMPL_RVV_INIT_INTEGER(uint64x2, uint64, u64, u64, 2) +OPENCV_HAL_IMPL_RVV_INIT_INTEGER(int64x2, int64, s64, i64, 2) -#define OPENCV_HAL_IMPL_RVV_INIT_FP(_Tpv, _Tp, width, suffix) \ +#define OPENCV_HAL_IMPL_RVV_INIT_FP(_Tpv, _Tp, suffix, vl) \ inline v_##_Tpv v_setzero_##suffix() \ { \ - vsetvlmax_e##width##m1(); \ - return v_##_Tpv(vzero_##suffix##m1()); \ + return v_##_Tpv(vfmv_v_f_##suffix##m1(0, vl)); \ } \ inline v_##_Tpv v_setall_##suffix(_Tp v) \ { \ - vsetvlmax_e##width##m1(); \ - return v_##_Tpv(vfmv_v_f_##suffix##m1(v)); \ + return v_##_Tpv(vfmv_v_f_##suffix##m1(v, vl)); \ } -OPENCV_HAL_IMPL_RVV_INIT_FP(float32x4, float, 32, f32) +OPENCV_HAL_IMPL_RVV_INIT_FP(float32x4, float, f32, 4) #if CV_SIMD128_64F -OPENCV_HAL_IMPL_RVV_INIT_FP(float64x2, double, 64, f64) +OPENCV_HAL_IMPL_RVV_INIT_FP(float64x2, double, f64, 2) #endif //////////// Reinterpret //////////// @@ -605,167 +584,155 @@ OPENCV_HAL_IMPL_RVV_SELF_REINTERPRET(int64x2, s64) OPENCV_HAL_IMPL_RVV_SELF_REINTERPRET(float64x2, f64) #endif -#define OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(_Tpvec1, _Tpvec2, _nTpvec1, _nTpvec2, suffix1, suffix2, nsuffix1, nsuffix2, width1, width2) \ +#define OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(_Tpvec1, _Tpvec2, _nTpvec1, _nTpvec2, suffix1, suffix2, nsuffix1, nsuffix2, width1, width2, vl1, vl2) \ inline v_##_Tpvec1 v_reinterpret_as_##suffix1(const v_##_Tpvec2& v) \ { \ - vsetvlmax_e##width2##m1(); \ - return v_##_Tpvec1((_nTpvec1)vle##width2##_v_##nsuffix2##m1(v.val)); \ + return v_##_Tpvec1((_nTpvec1)vle##width2##_v_##nsuffix2##m1(v.val, vl2)); \ } \ inline v_##_Tpvec2 v_reinterpret_as_##suffix2(const v_##_Tpvec1& v) \ { \ - vsetvlmax_e##width1##m1(); \ - return v_##_Tpvec2((_nTpvec2)vle##width1##_v_##nsuffix1##m1(v.val)); \ -} - -OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(uint8x16, int8x16, vuint8m1_t, vint8m1_t, u8, s8, u8, i8, 8, 8) -OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(uint16x8, int16x8, vuint16m1_t, vint16m1_t, u16, s16, u16, i16, 16, 16) -OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(uint32x4, int32x4, vuint32m1_t, vint32m1_t, u32, s32, u32, i32, 32, 32) -OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(uint32x4, float32x4, vuint32m1_t, vfloat32m1_t, u32, f32, u32, f32, 32, 32) -OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(int32x4, float32x4, vint32m1_t, vfloat32m1_t, s32, f32, i32, f32, 32, 32) -OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(uint64x2, int64x2, vuint64m1_t, vint64m1_t, u64, s64, u64, i64, 64, 64) -OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(uint8x16, uint16x8, vuint8m1_t, vuint16m1_t, u8, u16, u8, u16, 8, 16) -OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(uint8x16, uint32x4, vuint8m1_t, vuint32m1_t, u8, u32, u8, u32, 8, 32) -OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(uint8x16, uint64x2, vuint8m1_t, vuint64m1_t, u8, u64, u8, u64, 8, 64) -OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(uint16x8, uint32x4, vuint16m1_t, vuint32m1_t, u16, u32, u16, u32, 16, 32) -OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(uint16x8, uint64x2, vuint16m1_t, vuint64m1_t, u16, u64, u16, u64, 16, 64) -OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(uint32x4, uint64x2, vuint32m1_t, vuint64m1_t, u32, u64, u32, u64, 32, 64) -OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(int8x16, int16x8, vint8m1_t, vint16m1_t, s8, s16, i8, i16, 8, 16) -OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(int8x16, int32x4, vint8m1_t, vint32m1_t, s8, s32, i8, i32, 8, 32) -OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(int8x16, int64x2, vint8m1_t, vint64m1_t, s8, s64, i8, i64, 8, 64) -OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(int16x8, int32x4, vint16m1_t, vint32m1_t, s16, s32, i16, i32, 16, 32) -OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(int16x8, int64x2, vint16m1_t, vint64m1_t, s16, s64, i16, i64, 16, 64) -OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(int32x4, int64x2, vint32m1_t, vint64m1_t, s32, s64, i32, i64, 32, 64) -OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(uint8x16, int16x8, vuint8m1_t, vint16m1_t, u8, s16, u8, i16, 8, 16) -OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(uint8x16, int32x4, vuint8m1_t, vint32m1_t, u8, s32, u8, i32, 8, 32) -OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(uint8x16, int64x2, vuint8m1_t, vint64m1_t, u8, s64, u8, i64, 8, 64) -OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(uint16x8, int8x16, vuint16m1_t, vint8m1_t, u16, s8, u16, i8, 16, 8) -OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(uint16x8, int32x4, vuint16m1_t, vint32m1_t, u16, s32, u16, i32, 16, 32) -OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(uint16x8, int64x2, vuint16m1_t, vint64m1_t, u16, s64, u16, i64, 16, 64) -OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(uint32x4, int8x16, vuint32m1_t, vint8m1_t, u32, s8, u32, i8, 32, 8) -OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(uint32x4, int16x8, vuint32m1_t, vint16m1_t, u32, s16, u32, i16, 32, 16) -OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(uint32x4, int64x2, vuint32m1_t, vint64m1_t, u32, s64, u32, i64, 32, 64) -OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(uint64x2, int8x16, vuint64m1_t, vint8m1_t, u64, s8, u64, i8, 64, 8) -OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(uint64x2, int16x8, vuint64m1_t, vint16m1_t, u64, s16, u64, i16, 64, 16) -OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(uint64x2, int32x4, vuint64m1_t, vint32m1_t, u64, s32, u64, i32, 64, 32) -OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(uint8x16, float32x4, vuint8m1_t, vfloat32m1_t, u8, f32, u8, f32, 8, 32) -OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(uint16x8, float32x4, vuint16m1_t, vfloat32m1_t, u16, f32, u16, f32, 16, 32) -OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(uint64x2, float32x4, vuint64m1_t, vfloat32m1_t, u64, f32, u64, f32, 64, 32) -OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(int8x16, float32x4, vint8m1_t, vfloat32m1_t, s8, f32, i8, f32, 8, 32) -OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(int16x8, float32x4, vint16m1_t, vfloat32m1_t, s16, f32, i16, f32, 16, 32) -OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(int64x2, float32x4, vint64m1_t, vfloat32m1_t, s64, f32, i64, f32, 64, 32) + return v_##_Tpvec2((_nTpvec2)vle##width1##_v_##nsuffix1##m1(v.val, vl1)); \ +} + +OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(uint8x16, int8x16, vuint8m1_t, vint8m1_t, u8, s8, u8, i8, 8, 8, 16, 16) +OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(uint16x8, int16x8, vuint16m1_t, vint16m1_t, u16, s16, u16, i16, 16, 16, 8, 8) +OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(uint32x4, int32x4, vuint32m1_t, vint32m1_t, u32, s32, u32, i32, 32, 32, 4, 4) +OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(uint32x4, float32x4, vuint32m1_t, vfloat32m1_t, u32, f32, u32, f32, 32, 32, 4, 4) +OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(int32x4, float32x4, vint32m1_t, vfloat32m1_t, s32, f32, i32, f32, 32, 32, 4, 4) +OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(uint64x2, int64x2, vuint64m1_t, vint64m1_t, u64, s64, u64, i64, 64, 64, 2, 2) +OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(uint8x16, uint16x8, vuint8m1_t, vuint16m1_t, u8, u16, u8, u16, 8, 16, 16, 8) +OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(uint8x16, uint32x4, vuint8m1_t, vuint32m1_t, u8, u32, u8, u32, 8, 32, 16, 4) +OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(uint8x16, uint64x2, vuint8m1_t, vuint64m1_t, u8, u64, u8, u64, 8, 64, 16, 2) +OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(uint16x8, uint32x4, vuint16m1_t, vuint32m1_t, u16, u32, u16, u32, 16, 32, 8, 4) +OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(uint16x8, uint64x2, vuint16m1_t, vuint64m1_t, u16, u64, u16, u64, 16, 64, 8, 2) +OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(uint32x4, uint64x2, vuint32m1_t, vuint64m1_t, u32, u64, u32, u64, 32, 64, 4, 2) +OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(int8x16, int16x8, vint8m1_t, vint16m1_t, s8, s16, i8, i16, 8, 16, 16, 8) +OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(int8x16, int32x4, vint8m1_t, vint32m1_t, s8, s32, i8, i32, 8, 32, 16, 4) +OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(int8x16, int64x2, vint8m1_t, vint64m1_t, s8, s64, i8, i64, 8, 64, 16, 2) +OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(int16x8, int32x4, vint16m1_t, vint32m1_t, s16, s32, i16, i32, 16, 32, 8, 4) +OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(int16x8, int64x2, vint16m1_t, vint64m1_t, s16, s64, i16, i64, 16, 64, 8, 2) +OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(int32x4, int64x2, vint32m1_t, vint64m1_t, s32, s64, i32, i64, 32, 64, 4, 2) +OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(uint8x16, int16x8, vuint8m1_t, vint16m1_t, u8, s16, u8, i16, 8, 16, 16, 8) +OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(uint8x16, int32x4, vuint8m1_t, vint32m1_t, u8, s32, u8, i32, 8, 32, 16, 4) +OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(uint8x16, int64x2, vuint8m1_t, vint64m1_t, u8, s64, u8, i64, 8, 64, 16, 2) +OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(uint16x8, int8x16, vuint16m1_t, vint8m1_t, u16, s8, u16, i8, 16, 8, 8, 16) +OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(uint16x8, int32x4, vuint16m1_t, vint32m1_t, u16, s32, u16, i32, 16, 32, 8, 4) +OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(uint16x8, int64x2, vuint16m1_t, vint64m1_t, u16, s64, u16, i64, 16, 64, 8, 2) +OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(uint32x4, int8x16, vuint32m1_t, vint8m1_t, u32, s8, u32, i8, 32, 8, 4, 16) +OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(uint32x4, int16x8, vuint32m1_t, vint16m1_t, u32, s16, u32, i16, 32, 16, 4, 8) +OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(uint32x4, int64x2, vuint32m1_t, vint64m1_t, u32, s64, u32, i64, 32, 64, 4, 2) +OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(uint64x2, int8x16, vuint64m1_t, vint8m1_t, u64, s8, u64, i8, 64, 8, 2, 16) +OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(uint64x2, int16x8, vuint64m1_t, vint16m1_t, u64, s16, u64, i16, 64, 16, 2, 8) +OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(uint64x2, int32x4, vuint64m1_t, vint32m1_t, u64, s32, u64, i32, 64, 32, 2, 4) +OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(uint8x16, float32x4, vuint8m1_t, vfloat32m1_t, u8, f32, u8, f32, 8, 32, 16, 4) +OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(uint16x8, float32x4, vuint16m1_t, vfloat32m1_t, u16, f32, u16, f32, 16, 32, 8, 4) +OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(uint64x2, float32x4, vuint64m1_t, vfloat32m1_t, u64, f32, u64, f32, 64, 32, 2, 4) +OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(int8x16, float32x4, vint8m1_t, vfloat32m1_t, s8, f32, i8, f32, 8, 32, 16, 4) +OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(int16x8, float32x4, vint16m1_t, vfloat32m1_t, s16, f32, i16, f32, 16, 32, 8, 4) +OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(int64x2, float32x4, vint64m1_t, vfloat32m1_t, s64, f32, i64, f32, 64, 32, 2, 4) #if CV_SIMD128_64F -OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(uint64x2, float64x2, vuint64m1_t, vfloat64m1_t, u64, f64, u64, f64, 64, 64) -OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(int64x2, float64x2, vint64m1_t, vfloat64m1_t, s64, f64, i64, f64, 64, 64) -OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(uint8x16, float64x2, vuint8m1_t, vfloat64m1_t, u8, f64, u8, f64, 8, 64) -OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(uint16x8, float64x2, vuint16m1_t, vfloat64m1_t, u16, f64, u16, f64, 16, 64) -OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(uint32x4, float64x2, vuint32m1_t, vfloat64m1_t, u32, f64, u32, f64, 32, 64) -OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(int8x16, float64x2, vint8m1_t, vfloat64m1_t, s8, f64, i8, f64, 8, 64) -OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(int16x8, float64x2, vint16m1_t, vfloat64m1_t, s16, f64, i16, f64, 16, 64) -OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(int32x4, float64x2, vint32m1_t, vfloat64m1_t, s32, f64, i32, f64, 32, 64) -OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(float32x4, float64x2, vfloat32m1_t, vfloat64m1_t, f32, f64, f32, f64, 32, 64) +OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(uint64x2, float64x2, vuint64m1_t, vfloat64m1_t, u64, f64, u64, f64, 64, 64, 2, 2) +OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(int64x2, float64x2, vint64m1_t, vfloat64m1_t, s64, f64, i64, f64, 64, 64, 2, 2) +OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(uint8x16, float64x2, vuint8m1_t, vfloat64m1_t, u8, f64, u8, f64, 8, 64, 16, 2) +OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(uint16x8, float64x2, vuint16m1_t, vfloat64m1_t, u16, f64, u16, f64, 16, 64, 6, 2) +OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(uint32x4, float64x2, vuint32m1_t, vfloat64m1_t, u32, f64, u32, f64, 32, 64, 4, 2) +OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(int8x16, float64x2, vint8m1_t, vfloat64m1_t, s8, f64, i8, f64, 8, 64, 16, 2) +OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(int16x8, float64x2, vint16m1_t, vfloat64m1_t, s16, f64, i16, f64, 16, 64, 8, 2) +OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(int32x4, float64x2, vint32m1_t, vfloat64m1_t, s32, f64, i32, f64, 32, 64, 4, 2) +OPENCV_HAL_IMPL_RVV_ONE_TIME_REINTERPRET(float32x4, float64x2, vfloat32m1_t, vfloat64m1_t, f32, f64, f32, f64, 32, 64, 4, 2) #endif ////////////// Extract ////////////// -#define OPENCV_HAL_IMPL_RVV_EXTRACT(_Tpvec, _Tp, suffix, width, vmv) \ +#define OPENCV_HAL_IMPL_RVV_EXTRACT_INTEGER(_Tpvec, _Tp, suffix, vmv, vl) \ template \ inline _Tpvec v_extract(const _Tpvec& a, const _Tpvec& b) \ { \ - vsetvlmax_e##width##m1(); \ - return _Tpvec(vslideup_vx_##suffix##m1(vslidedown_vx_##suffix##m1(vzero_##suffix##m1(), a, s), b, _Tpvec::nlanes - s)); \ + return _Tpvec(vslideup_vx_##suffix##m1(vslidedown_vx_##suffix##m1(vmv_v_x_##suffix##m1(0, vl), a, s, vl), b, _Tpvec::nlanes - s, vl)); \ } \ template inline _Tp v_extract_n(_Tpvec v) \ { \ - vsetvlmax_e##width##m1(); \ - return _Tp(vmv(vslidedown_vx_##suffix##m1(vzero_##suffix##m1(), v, i))); \ + return _Tp(vmv(vslidedown_vx_##suffix##m1(vmv_v_x_##suffix##m1(0, vl), v, i, vl))); \ } -OPENCV_HAL_IMPL_RVV_EXTRACT(v_uint8x16, uchar, u8, 8, vmv_x_s_u8m1_u8) -OPENCV_HAL_IMPL_RVV_EXTRACT(v_int8x16, schar, i8, 8, vmv_x_s_i8m1_i8) -OPENCV_HAL_IMPL_RVV_EXTRACT(v_uint16x8, ushort, u16, 16, vmv_x_s_u16m1_u16) -OPENCV_HAL_IMPL_RVV_EXTRACT(v_int16x8, short, i16, 16, vmv_x_s_i16m1_i16) -OPENCV_HAL_IMPL_RVV_EXTRACT(v_uint32x4, uint, u32, 32, vmv_x_s_u32m1_u32) -OPENCV_HAL_IMPL_RVV_EXTRACT(v_int32x4, int, i32, 32, vmv_x_s_i32m1_i32) -OPENCV_HAL_IMPL_RVV_EXTRACT(v_uint64x2, uint64, u64, 64, vmv_x_s_u64m1_u64) -OPENCV_HAL_IMPL_RVV_EXTRACT(v_int64x2, int64, i64, 64, vmv_x_s_i64m1_i64) -OPENCV_HAL_IMPL_RVV_EXTRACT(v_float32x4, float, f32, 32, vfmv_f_s_f32m1_f32) +OPENCV_HAL_IMPL_RVV_EXTRACT_INTEGER(v_uint8x16, uchar, u8, vmv_x_s_u8m1_u8, 16) +OPENCV_HAL_IMPL_RVV_EXTRACT_INTEGER(v_int8x16, schar, i8, vmv_x_s_i8m1_i8, 16) +OPENCV_HAL_IMPL_RVV_EXTRACT_INTEGER(v_uint16x8, ushort, u16, vmv_x_s_u16m1_u16, 8) +OPENCV_HAL_IMPL_RVV_EXTRACT_INTEGER(v_int16x8, short, i16, vmv_x_s_i16m1_i16, 8) +OPENCV_HAL_IMPL_RVV_EXTRACT_INTEGER(v_uint32x4, uint, u32, vmv_x_s_u32m1_u32, 4) +OPENCV_HAL_IMPL_RVV_EXTRACT_INTEGER(v_int32x4, int, i32, vmv_x_s_i32m1_i32, 4) +OPENCV_HAL_IMPL_RVV_EXTRACT_INTEGER(v_uint64x2, uint64, u64, vmv_x_s_u64m1_u64, 2) +OPENCV_HAL_IMPL_RVV_EXTRACT_INTEGER(v_int64x2, int64, i64, vmv_x_s_i64m1_i64, 2) + +#define OPENCV_HAL_IMPL_RVV_EXTRACT_FP(_Tpvec, _Tp, suffix, vmv, vl) \ +template \ +inline _Tpvec v_extract(const _Tpvec& a, const _Tpvec& b) \ +{ \ + return _Tpvec(vslideup_vx_##suffix##m1(vslidedown_vx_##suffix##m1(vfmv_v_f_##suffix##m1(0, vl), a, s, vl), b, _Tpvec::nlanes - s, vl)); \ +} \ +template inline _Tp v_extract_n(_Tpvec v) \ +{ \ + return _Tp(vmv(vslidedown_vx_##suffix##m1(vfmv_v_f_##suffix##m1(0, vl), v, i, vl))); \ +} + +OPENCV_HAL_IMPL_RVV_EXTRACT_FP(v_float32x4, float, f32, vfmv_f_s_f32m1_f32, 4) #if CV_SIMD128_64F -OPENCV_HAL_IMPL_RVV_EXTRACT(v_float64x2, double, f64, 64, vfmv_f_s_f64m1_f64) +OPENCV_HAL_IMPL_RVV_EXTRACT_FP(v_float64x2, double, f64, vfmv_f_s_f64m1_f64, 2) #endif ////////////// Load/Store ////////////// -#define OPENCV_HAL_IMPL_RVV_LOADSTORE_OP(_Tpvec, _nTpvec, _Tp, hvl, width, suffix) \ +#define OPENCV_HAL_IMPL_RVV_LOADSTORE_OP(_Tpvec, _nTpvec, _Tp, hvl, vl, width, suffix, vmv) \ inline _Tpvec v_load(const _Tp* ptr) \ { \ - vsetvlmax_e8m1(); \ - return _Tpvec((_nTpvec)vle8_v_u8m1((uchar*)ptr)); \ + return _Tpvec((_nTpvec)vle8_v_u8m1((uchar*)ptr, 16)); \ } \ inline _Tpvec v_load_aligned(const _Tp* ptr) \ { \ - vsetvlmax_e##width##m1(); \ - return _Tpvec(vle##width##_v_##suffix##m1(ptr)); \ + return _Tpvec(vle##width##_v_##suffix##m1(ptr, vl)); \ } \ inline _Tpvec v_load_low(const _Tp* ptr) \ { \ - vsetvl_e##width##m1(hvl); \ - _Tpvec res = _Tpvec(vle##width##_v_##suffix##m1(ptr)); \ - vsetvlmax_e##width##m1(); \ + _Tpvec res = _Tpvec(vle##width##_v_##suffix##m1(ptr, hvl)); \ return res; \ } \ inline void v_store(_Tp* ptr, const _Tpvec& a) \ { \ - vsetvlmax_e8m1(); \ - vse8_v_u8m1((uchar*)ptr, vle8_v_u8m1((uchar*)a.val)); \ + vse8_v_u8m1((uchar*)ptr, vle8_v_u8m1((uchar*)a.val, 16), 16); \ } \ inline void v_store_aligned(_Tp* ptr, const _Tpvec& a) \ { \ - vsetvlmax_e##width##m1(); \ - vse##width##_v_##suffix##m1(ptr, a); \ + vse##width##_v_##suffix##m1(ptr, a, vl); \ } \ inline void v_store_aligned_nocache(_Tp* ptr, const _Tpvec& a) \ { \ - vsetvlmax_e##width##m1(); \ - vse##width##_v_##suffix##m1(ptr, a); \ + vse##width##_v_##suffix##m1(ptr, a, vl); \ } \ inline void v_store(_Tp* ptr, const _Tpvec& a, hal::StoreMode /*mode*/) \ { \ - vsetvlmax_e##width##m1(); \ - vse##width##_v_##suffix##m1(ptr, a); \ + vse##width##_v_##suffix##m1(ptr, a, vl); \ } \ inline void v_store_low(_Tp* ptr, const _Tpvec& a) \ { \ - _Tp CV_DECL_ALIGNED(32) tmp_ptr[_Tpvec::nlanes] = {0}; \ - vsetvlmax_e##width##m1(); \ - vse##width##_v_##suffix##m1(tmp_ptr, a); \ - for(int i = 0; i < _Tpvec::nlanes/2; ++i) \ - { \ - ptr[i] = tmp_ptr[i]; \ - } \ + vse##width##_v_##suffix##m1(ptr, a, hvl); \ } \ inline void v_store_high(_Tp* ptr, const _Tpvec& a) \ { \ - _Tp CV_DECL_ALIGNED(32) tmp_ptr[_Tpvec::nlanes] = {0}; \ - vsetvlmax_e##width##m1(); \ - vse##width##_v_##suffix##m1(tmp_ptr, a); \ - for(int i = 0; i < _Tpvec::nlanes/2; ++i) \ - { \ - ptr[i] = tmp_ptr[i+_Tpvec::nlanes/2]; \ - } \ + vse##width##_v_##suffix##m1(ptr, vslidedown_vx_##suffix##m1(vmv(0, vl), a, hvl, vl), hvl); \ } -OPENCV_HAL_IMPL_RVV_LOADSTORE_OP(v_uint8x16, vuint8m1_t, uchar, 8, 8, u8) -OPENCV_HAL_IMPL_RVV_LOADSTORE_OP(v_int8x16, vint8m1_t, schar, 8, 8, i8) -OPENCV_HAL_IMPL_RVV_LOADSTORE_OP(v_uint16x8, vuint16m1_t, ushort, 4, 16, u16) -OPENCV_HAL_IMPL_RVV_LOADSTORE_OP(v_int16x8, vint16m1_t, short, 4, 16, i16) -OPENCV_HAL_IMPL_RVV_LOADSTORE_OP(v_uint32x4, vuint32m1_t, unsigned, 2, 32, u32) -OPENCV_HAL_IMPL_RVV_LOADSTORE_OP(v_int32x4, vint32m1_t, int, 2, 32, i32) -OPENCV_HAL_IMPL_RVV_LOADSTORE_OP(v_uint64x2, vuint64m1_t, uint64, 1, 64, u64) -OPENCV_HAL_IMPL_RVV_LOADSTORE_OP(v_int64x2, vint64m1_t, int64, 1, 64, i64) -OPENCV_HAL_IMPL_RVV_LOADSTORE_OP(v_float32x4, vfloat32m1_t, float, 2, 32, f32) +OPENCV_HAL_IMPL_RVV_LOADSTORE_OP(v_uint8x16, vuint8m1_t, uchar, 8, 16, 8, u8, vmv_v_x_u8m1) +OPENCV_HAL_IMPL_RVV_LOADSTORE_OP(v_int8x16, vint8m1_t, schar, 8, 16, 8, i8, vmv_v_x_i8m1) +OPENCV_HAL_IMPL_RVV_LOADSTORE_OP(v_uint16x8, vuint16m1_t, ushort, 4, 8, 16, u16, vmv_v_x_u16m1) +OPENCV_HAL_IMPL_RVV_LOADSTORE_OP(v_int16x8, vint16m1_t, short, 4, 8, 16, i16, vmv_v_x_i16m1) +OPENCV_HAL_IMPL_RVV_LOADSTORE_OP(v_uint32x4, vuint32m1_t, unsigned, 2, 4, 32, u32, vmv_v_x_u32m1) +OPENCV_HAL_IMPL_RVV_LOADSTORE_OP(v_int32x4, vint32m1_t, int, 2, 4, 32, i32, vmv_v_x_i32m1) +OPENCV_HAL_IMPL_RVV_LOADSTORE_OP(v_uint64x2, vuint64m1_t, uint64, 1, 2, 64, u64, vmv_v_x_u64m1) +OPENCV_HAL_IMPL_RVV_LOADSTORE_OP(v_int64x2, vint64m1_t, int64, 1, 2, 64, i64, vmv_v_x_i64m1) +OPENCV_HAL_IMPL_RVV_LOADSTORE_OP(v_float32x4, vfloat32m1_t, float, 2, 4, 32, f32, vfmv_v_f_f32m1) #if CV_SIMD128_64F -OPENCV_HAL_IMPL_RVV_LOADSTORE_OP(v_float64x2, vfloat64m1_t, double, 1, 64, f64) +OPENCV_HAL_IMPL_RVV_LOADSTORE_OP(v_float64x2, vfloat64m1_t, double, 1, 2, 64, f64, vfmv_v_f_f64m1) #endif inline v_int8x16 v_load_halves(const schar* ptr0, const schar* ptr1) @@ -775,8 +742,7 @@ inline v_int8x16 v_load_halves(const schar* ptr0, const schar* ptr1) ptr0[0], ptr0[1], ptr0[2], ptr0[3], ptr0[4], ptr0[5], ptr0[6], ptr0[7], ptr1[0], ptr1[1], ptr1[2], ptr1[3], ptr1[4], ptr1[5], ptr1[6], ptr1[7] }; - vsetvlmax_e8m1(); - return v_int8x16(vle8_v_i8m1(elems)); + return v_int8x16(vle8_v_i8m1(elems, 16)); } inline v_uint8x16 v_load_halves(const uchar* ptr0, const uchar* ptr1) { return v_reinterpret_as_u8(v_load_halves((schar*)ptr0, (schar*)ptr1)); } @@ -786,8 +752,7 @@ inline v_int16x8 v_load_halves(const short* ptr0, const short* ptr1) { ptr0[0], ptr0[1], ptr0[2], ptr0[3], ptr1[0], ptr1[1], ptr1[2], ptr1[3] }; - vsetvlmax_e16m1(); - return v_int16x8(vle16_v_i16m1(elems)); + return v_int16x8(vle16_v_i16m1(elems, 8)); } inline v_uint16x8 v_load_halves(const ushort* ptr0, const ushort* ptr1) { return v_reinterpret_as_u16(v_load_halves((short*)ptr0, (short*)ptr1)); } @@ -797,8 +762,7 @@ inline v_int32x4 v_load_halves(const int* ptr0, const int* ptr1) { ptr0[0], ptr0[1], ptr1[0], ptr1[1] }; - vsetvlmax_e32m1(); - return v_int32x4(vle32_v_i32m1(elems)); + return v_int32x4(vle32_v_i32m1(elems, 4)); } inline v_float32x4 v_load_halves(const float* ptr0, const float* ptr1) { @@ -806,8 +770,7 @@ inline v_float32x4 v_load_halves(const float* ptr0, const float* ptr1) { ptr0[0], ptr0[1], ptr1[0], ptr1[1] }; - vsetvlmax_e32m1(); - return v_float32x4(vle32_v_f32m1(elems)); + return v_float32x4(vle32_v_f32m1(elems, 4)); } inline v_uint32x4 v_load_halves(const unsigned* ptr0, const unsigned* ptr1) { return v_reinterpret_as_u32(v_load_halves((int*)ptr0, (int*)ptr1)); } @@ -817,8 +780,7 @@ inline v_int64x2 v_load_halves(const int64* ptr0, const int64* ptr1) { ptr0[0], ptr1[0] }; - vsetvlmax_e64m1(); - return v_int64x2(vle64_v_i64m1(elems)); + return v_int64x2(vle64_v_i64m1(elems, 2)); } inline v_uint64x2 v_load_halves(const uint64* ptr0, const uint64* ptr1) { return v_reinterpret_as_u64(v_load_halves((int64*)ptr0, (int64*)ptr1)); } @@ -829,8 +791,7 @@ inline v_float64x2 v_load_halves(const double* ptr0, const double* ptr1) { ptr0[0], ptr1[0] }; - vsetvlmax_e64m1(); - return v_float64x2(vle64_v_f64m1(elems)); + return v_float64x2(vle64_v_f64m1(elems, 2)); } #endif @@ -858,8 +819,7 @@ inline v_int8x16 v_lut(const schar* tab, const int* idx) tab[idx[14]], tab[idx[15]] }; - vsetvlmax_e8m1(); - return v_int8x16(vle8_v_i8m1(elems)); + return v_int8x16(vle8_v_i8m1(elems, 16)); } inline v_int8x16 v_lut_pairs(const schar* tab, const int* idx) { @@ -882,8 +842,7 @@ inline v_int8x16 v_lut_pairs(const schar* tab, const int* idx) tab[idx[7]], tab[idx[7] + 1] }; - vsetvlmax_e8m1(); - return v_int8x16(vle8_v_i8m1(elems)); + return v_int8x16(vle8_v_i8m1(elems, 16)); } inline v_int8x16 v_lut_quads(const schar* tab, const int* idx) { @@ -906,8 +865,7 @@ inline v_int8x16 v_lut_quads(const schar* tab, const int* idx) tab[idx[3] + 2], tab[idx[3] + 3] }; - vsetvlmax_e8m1(); - return v_int8x16(vle8_v_i8m1(elems)); + return v_int8x16(vle8_v_i8m1(elems, 16)); } inline v_uint8x16 v_lut(const uchar* tab, const int* idx) { return v_reinterpret_as_u8(v_lut((schar*)tab, idx)); } inline v_uint8x16 v_lut_pairs(const uchar* tab, const int* idx) { return v_reinterpret_as_u8(v_lut_pairs((schar*)tab, idx)); } @@ -926,8 +884,7 @@ inline v_int16x8 v_lut(const short* tab, const int* idx) tab[idx[6]], tab[idx[7]] }; - vsetvlmax_e16m1(); - return v_int16x8(vle16_v_i16m1(elems)); + return v_int16x8(vle16_v_i16m1(elems, 8)); } inline v_int16x8 v_lut_pairs(const short* tab, const int* idx) { @@ -942,8 +899,7 @@ inline v_int16x8 v_lut_pairs(const short* tab, const int* idx) tab[idx[3]], tab[idx[3] + 1] }; - vsetvlmax_e16m1(); - return v_int16x8(vle16_v_i16m1(elems)); + return v_int16x8(vle16_v_i16m1(elems, 8)); } inline v_int16x8 v_lut_quads(const short* tab, const int* idx) { @@ -958,8 +914,7 @@ inline v_int16x8 v_lut_quads(const short* tab, const int* idx) tab[idx[1] + 2], tab[idx[1] + 3] }; - vsetvlmax_e16m1(); - return v_int16x8(vle16_v_i16m1(elems)); + return v_int16x8(vle16_v_i16m1(elems, 8)); } inline v_uint16x8 v_lut(const ushort* tab, const int* idx) { return v_reinterpret_as_u16(v_lut((short*)tab, idx)); } inline v_uint16x8 v_lut_pairs(const ushort* tab, const int* idx) { return v_reinterpret_as_u16(v_lut_pairs((short*)tab, idx)); } @@ -974,8 +929,7 @@ inline v_int32x4 v_lut(const int* tab, const int* idx) tab[idx[2]], tab[idx[3]] }; - vsetvlmax_e32m1(); - return v_int32x4(vle32_v_i32m1(elems)); + return v_int32x4(vle32_v_i32m1(elems, 4)); } inline v_int32x4 v_lut_pairs(const int* tab, const int* idx) { @@ -986,13 +940,11 @@ inline v_int32x4 v_lut_pairs(const int* tab, const int* idx) tab[idx[1]], tab[idx[1] + 1] }; - vsetvlmax_e32m1(); - return v_int32x4(vle32_v_i32m1(elems)); + return v_int32x4(vle32_v_i32m1(elems, 4)); } inline v_int32x4 v_lut_quads(const int* tab, const int* idx) { - vsetvlmax_e32m1(); - return v_int32x4(vle32_v_i32m1(tab + idx[0])); + return v_int32x4(vle32_v_i32m1(tab + idx[0], 4)); } inline v_uint32x4 v_lut(const unsigned* tab, const int* idx) { return v_reinterpret_as_u32(v_lut((int*)tab, idx)); } @@ -1006,13 +958,11 @@ inline v_int64x2 v_lut(const int64_t* tab, const int* idx) tab[idx[0]], tab[idx[1]] }; - vsetvlmax_e64m1(); - return v_int64x2(vle64_v_i64m1(elems)); + return v_int64x2(vle64_v_i64m1(elems, 2)); } inline v_int64x2 v_lut_pairs(const int64* tab, const int* idx) { - vsetvlmax_e64m1(); - return v_int64x2(vle64_v_i64m1(tab + idx[0])); + return v_int64x2(vle64_v_i64m1(tab + idx[0], 2)); } inline v_uint64x2 v_lut(const uint64* tab, const int* idx) { return v_reinterpret_as_u64(v_lut((const int64_t *)tab, idx)); } inline v_uint64x2 v_lut_pairs(const uint64* tab, const int* idx) { return v_reinterpret_as_u64(v_lut_pairs((const int64_t *)tab, idx)); } @@ -1026,8 +976,7 @@ inline v_float32x4 v_lut(const float* tab, const int* idx) tab[idx[2]], tab[idx[3]] }; - vsetvlmax_e32m1(); - return v_float32x4(vle32_v_f32m1(elems)); + return v_float32x4(vle32_v_f32m1(elems, 4)); } inline v_float32x4 v_lut_pairs(const float* tab, const int* idx) { @@ -1038,13 +987,11 @@ inline v_float32x4 v_lut_pairs(const float* tab, const int* idx) tab[idx[1]], tab[idx[1] + 1] }; - vsetvlmax_e32m1(); - return v_float32x4(vle32_v_f32m1(elems)); + return v_float32x4(vle32_v_f32m1(elems, 4)); } inline v_float32x4 v_lut_quads(const float* tab, const int* idx) { - vsetvlmax_e32m1(); - return v_float32x4(vle32_v_f32m1(tab + idx[0])); + return v_float32x4(vle32_v_f32m1(tab + idx[0], 4)); } inline v_int32x4 v_lut(const int* tab, const v_int32x4& idxvec) @@ -1056,8 +1003,7 @@ inline v_int32x4 v_lut(const int* tab, const v_int32x4& idxvec) tab[v_extract_n<2>(idxvec)], tab[v_extract_n<3>(idxvec)] }; - vsetvlmax_e32m1(); - return v_int32x4(vle32_v_i32m1(elems)); + return v_int32x4(vle32_v_i32m1(elems, 4)); } inline v_uint32x4 v_lut(const unsigned* tab, const v_int32x4& idxvec) @@ -1069,8 +1015,7 @@ inline v_uint32x4 v_lut(const unsigned* tab, const v_int32x4& idxvec) tab[v_extract_n<2>(idxvec)], tab[v_extract_n<3>(idxvec)] }; - vsetvlmax_e32m1(); - return v_uint32x4(vle32_v_u32m1(elems)); + return v_uint32x4(vle32_v_u32m1(elems, 4)); } inline v_float32x4 v_lut(const float* tab, const v_int32x4& idxvec) @@ -1082,8 +1027,7 @@ inline v_float32x4 v_lut(const float* tab, const v_int32x4& idxvec) tab[v_extract_n<2>(idxvec)], tab[v_extract_n<3>(idxvec)] }; - vsetvlmax_e32m1(); - return v_float32x4(vle32_v_f32m1(elems)); + return v_float32x4(vle32_v_f32m1(elems, 4)); } inline void v_lut_deinterleave(const float* tab, const v_int32x4& idxvec, v_float32x4& x, v_float32x4& y) @@ -1103,14 +1047,12 @@ inline v_float64x2 v_lut(const double* tab, const int* idx) tab[idx[0]], tab[idx[1]] }; - vsetvlmax_e64m1(); - return v_float64x2(vle64_v_f64m1(elems)); + return v_float64x2(vle64_v_f64m1(elems, 2)); } inline v_float64x2 v_lut_pairs(const double* tab, const int* idx) { - vsetvlmax_e64m1(); - return v_float64x2(vle64_v_f64m1(tab + idx[0])); + return v_float64x2(vle64_v_f64m1(tab + idx[0], 2)); } inline v_float64x2 v_lut(const double* tab, const v_int32x4& idxvec) @@ -1120,8 +1062,7 @@ inline v_float64x2 v_lut(const double* tab, const v_int32x4& idxvec) tab[v_extract_n<0>(idxvec)], tab[v_extract_n<1>(idxvec)] }; - vsetvlmax_e64m1(); - return v_float64x2(vle64_v_f64m1(elems)); + return v_float64x2(vle64_v_f64m1(elems, 2)); } inline void v_lut_deinterleave(const double* tab, const v_int32x4& idxvec, v_float64x2& x, v_float64x2& y) @@ -1141,8 +1082,7 @@ inline v_uint8x16 v_pack_b(const v_uint16x8& a, const v_uint16x8& b) ushort CV_DECL_ALIGNED(32) ptr[16] = {0}; v_store(ptr, a); v_store(ptr + 8, b); - vsetvlmax_e8m1(); - return v_uint8x16(vnsrl_wx_u8m1(vle16_v_u16m2(ptr), 0)); + return v_uint8x16(vnsrl_wx_u8m1(vle16_v_u16m2(ptr, 16), 0, 16)); } inline v_uint8x16 v_pack_b(const v_uint32x4& a, const v_uint32x4& b, @@ -1153,8 +1093,7 @@ inline v_uint8x16 v_pack_b(const v_uint32x4& a, const v_uint32x4& b, v_store(ptr + 4, b); v_store(ptr + 8, c); v_store(ptr + 12, d); - vsetvlmax_e8m1(); - return v_uint8x16(vnsrl_wx_u8m1(vnsrl_wx_u16m2(vle32_v_u32m4(ptr), 0), 0)); + return v_uint8x16(vnsrl_wx_u8m1(vnsrl_wx_u16m2(vle32_v_u32m4(ptr, 16), 0, 16), 0, 16)); } inline v_uint8x16 v_pack_b(const v_uint64x2& a, const v_uint64x2& b, const v_uint64x2& c, @@ -1170,95 +1109,89 @@ inline v_uint8x16 v_pack_b(const v_uint64x2& a, const v_uint64x2& b, const v_uin v_store(ptr + 10, f); v_store(ptr + 12, g); v_store(ptr + 14, h); - vsetvlmax_e8m1(); - return v_uint8x16(vnsrl_wx_u8m1(vnsrl_wx_u16m2(vnsrl_wx_u32m4(vle64_v_u64m8(ptr), 0), 0), 0)); + return v_uint8x16(vnsrl_wx_u8m1(vnsrl_wx_u16m2(vnsrl_wx_u32m4(vle64_v_u64m8(ptr, 16), 0, 16), 0, 16), 0, 16)); } ////////////// Arithmetics ////////////// -#define OPENCV_HAL_IMPL_RVV_BIN_OP(bin_op, _Tpvec, intrin, width) \ +#define OPENCV_HAL_IMPL_RVV_BIN_OP(bin_op, _Tpvec, intrin, vl) \ inline _Tpvec operator bin_op (const _Tpvec& a, const _Tpvec& b) \ { \ - vsetvlmax_e##width##m1(); \ - return _Tpvec(intrin(a, b)); \ + return _Tpvec(intrin(a, b, vl)); \ } \ inline _Tpvec& operator bin_op##= (_Tpvec& a, const _Tpvec& b) \ { \ - vsetvlmax_e##width##m1(); \ - a = _Tpvec(intrin(a, b)); \ + a = _Tpvec(intrin(a, b, vl)); \ return a; \ } -OPENCV_HAL_IMPL_RVV_BIN_OP(+, v_uint8x16, vsaddu_vv_u8m1, 8) -OPENCV_HAL_IMPL_RVV_BIN_OP(-, v_uint8x16, vssubu_vv_u8m1, 8) -OPENCV_HAL_IMPL_RVV_BIN_OP(/, v_uint8x16, vdivu_vv_u8m1, 8) -OPENCV_HAL_IMPL_RVV_BIN_OP(+, v_int8x16, vsadd_vv_i8m1, 8) -OPENCV_HAL_IMPL_RVV_BIN_OP(-, v_int8x16, vssub_vv_i8m1, 8) -OPENCV_HAL_IMPL_RVV_BIN_OP(/, v_int8x16, vdiv_vv_i8m1, 8) -OPENCV_HAL_IMPL_RVV_BIN_OP(+, v_uint16x8, vsaddu_vv_u16m1, 16) -OPENCV_HAL_IMPL_RVV_BIN_OP(-, v_uint16x8, vssubu_vv_u16m1, 16) -OPENCV_HAL_IMPL_RVV_BIN_OP(/, v_uint16x8, vdivu_vv_u16m1, 16) -OPENCV_HAL_IMPL_RVV_BIN_OP(+, v_int16x8, vsadd_vv_i16m1, 16) -OPENCV_HAL_IMPL_RVV_BIN_OP(-, v_int16x8, vssub_vv_i16m1, 16) -OPENCV_HAL_IMPL_RVV_BIN_OP(/, v_int16x8, vdiv_vv_i16m1, 16) -OPENCV_HAL_IMPL_RVV_BIN_OP(+, v_uint32x4, vadd_vv_u32m1, 32) -OPENCV_HAL_IMPL_RVV_BIN_OP(-, v_uint32x4, vsub_vv_u32m1, 32) -OPENCV_HAL_IMPL_RVV_BIN_OP(*, v_uint32x4, vmul_vv_u32m1, 32) -OPENCV_HAL_IMPL_RVV_BIN_OP(/, v_uint32x4, vdivu_vv_u32m1, 32) -OPENCV_HAL_IMPL_RVV_BIN_OP(+, v_int32x4, vadd_vv_i32m1, 32) -OPENCV_HAL_IMPL_RVV_BIN_OP(-, v_int32x4, vsub_vv_i32m1, 32) -OPENCV_HAL_IMPL_RVV_BIN_OP(*, v_int32x4, vmul_vv_i32m1, 32) -OPENCV_HAL_IMPL_RVV_BIN_OP(/, v_int32x4, vdiv_vv_i32m1, 32) -OPENCV_HAL_IMPL_RVV_BIN_OP(+, v_float32x4, vfadd_vv_f32m1, 32) -OPENCV_HAL_IMPL_RVV_BIN_OP(-, v_float32x4, vfsub_vv_f32m1, 32) -OPENCV_HAL_IMPL_RVV_BIN_OP(*, v_float32x4, vfmul_vv_f32m1, 32) -OPENCV_HAL_IMPL_RVV_BIN_OP(/, v_float32x4, vfdiv_vv_f32m1, 32) -OPENCV_HAL_IMPL_RVV_BIN_OP(+, v_uint64x2, vadd_vv_u64m1, 64) -OPENCV_HAL_IMPL_RVV_BIN_OP(-, v_uint64x2, vsub_vv_u64m1, 64) -OPENCV_HAL_IMPL_RVV_BIN_OP(*, v_uint64x2, vmul_vv_u64m1, 64) -OPENCV_HAL_IMPL_RVV_BIN_OP(/, v_uint64x2, vdivu_vv_u64m1, 64) -OPENCV_HAL_IMPL_RVV_BIN_OP(+, v_int64x2, vadd_vv_i64m1, 64) -OPENCV_HAL_IMPL_RVV_BIN_OP(-, v_int64x2, vsub_vv_i64m1, 64) -OPENCV_HAL_IMPL_RVV_BIN_OP(*, v_int64x2, vmul_vv_i64m1, 64) -OPENCV_HAL_IMPL_RVV_BIN_OP(/, v_int64x2, vdiv_vv_i64m1, 64) +OPENCV_HAL_IMPL_RVV_BIN_OP(+, v_uint8x16, vsaddu_vv_u8m1, 16) +OPENCV_HAL_IMPL_RVV_BIN_OP(-, v_uint8x16, vssubu_vv_u8m1, 16) +OPENCV_HAL_IMPL_RVV_BIN_OP(/, v_uint8x16, vdivu_vv_u8m1, 16) +OPENCV_HAL_IMPL_RVV_BIN_OP(+, v_int8x16, vsadd_vv_i8m1, 16) +OPENCV_HAL_IMPL_RVV_BIN_OP(-, v_int8x16, vssub_vv_i8m1, 16) +OPENCV_HAL_IMPL_RVV_BIN_OP(/, v_int8x16, vdiv_vv_i8m1, 16) +OPENCV_HAL_IMPL_RVV_BIN_OP(+, v_uint16x8, vsaddu_vv_u16m1, 8) +OPENCV_HAL_IMPL_RVV_BIN_OP(-, v_uint16x8, vssubu_vv_u16m1, 8) +OPENCV_HAL_IMPL_RVV_BIN_OP(/, v_uint16x8, vdivu_vv_u16m1, 8) +OPENCV_HAL_IMPL_RVV_BIN_OP(+, v_int16x8, vsadd_vv_i16m1, 8) +OPENCV_HAL_IMPL_RVV_BIN_OP(-, v_int16x8, vssub_vv_i16m1, 8) +OPENCV_HAL_IMPL_RVV_BIN_OP(/, v_int16x8, vdiv_vv_i16m1, 8) +OPENCV_HAL_IMPL_RVV_BIN_OP(+, v_uint32x4, vadd_vv_u32m1, 4) +OPENCV_HAL_IMPL_RVV_BIN_OP(-, v_uint32x4, vsub_vv_u32m1, 4) +OPENCV_HAL_IMPL_RVV_BIN_OP(*, v_uint32x4, vmul_vv_u32m1, 4) +OPENCV_HAL_IMPL_RVV_BIN_OP(/, v_uint32x4, vdivu_vv_u32m1, 4) +OPENCV_HAL_IMPL_RVV_BIN_OP(+, v_int32x4, vadd_vv_i32m1, 4) +OPENCV_HAL_IMPL_RVV_BIN_OP(-, v_int32x4, vsub_vv_i32m1, 4) +OPENCV_HAL_IMPL_RVV_BIN_OP(*, v_int32x4, vmul_vv_i32m1, 4) +OPENCV_HAL_IMPL_RVV_BIN_OP(/, v_int32x4, vdiv_vv_i32m1, 4) +OPENCV_HAL_IMPL_RVV_BIN_OP(+, v_float32x4, vfadd_vv_f32m1, 4) +OPENCV_HAL_IMPL_RVV_BIN_OP(-, v_float32x4, vfsub_vv_f32m1, 4) +OPENCV_HAL_IMPL_RVV_BIN_OP(*, v_float32x4, vfmul_vv_f32m1, 4) +OPENCV_HAL_IMPL_RVV_BIN_OP(/, v_float32x4, vfdiv_vv_f32m1, 4) +OPENCV_HAL_IMPL_RVV_BIN_OP(+, v_uint64x2, vadd_vv_u64m1, 2) +OPENCV_HAL_IMPL_RVV_BIN_OP(-, v_uint64x2, vsub_vv_u64m1, 2) +OPENCV_HAL_IMPL_RVV_BIN_OP(*, v_uint64x2, vmul_vv_u64m1, 2) +OPENCV_HAL_IMPL_RVV_BIN_OP(/, v_uint64x2, vdivu_vv_u64m1, 2) +OPENCV_HAL_IMPL_RVV_BIN_OP(+, v_int64x2, vadd_vv_i64m1, 2) +OPENCV_HAL_IMPL_RVV_BIN_OP(-, v_int64x2, vsub_vv_i64m1, 2) +OPENCV_HAL_IMPL_RVV_BIN_OP(*, v_int64x2, vmul_vv_i64m1, 2) +OPENCV_HAL_IMPL_RVV_BIN_OP(/, v_int64x2, vdiv_vv_i64m1, 2) #if CV_SIMD128_64F -OPENCV_HAL_IMPL_RVV_BIN_OP(+, v_float64x2, vfadd_vv_f64m1, 64) -OPENCV_HAL_IMPL_RVV_BIN_OP(-, v_float64x2, vfsub_vv_f64m1, 64) -OPENCV_HAL_IMPL_RVV_BIN_OP(*, v_float64x2, vfmul_vv_f64m1, 64) -OPENCV_HAL_IMPL_RVV_BIN_OP(/, v_float64x2, vfdiv_vv_f64m1, 64) +OPENCV_HAL_IMPL_RVV_BIN_OP(+, v_float64x2, vfadd_vv_f64m1, 2) +OPENCV_HAL_IMPL_RVV_BIN_OP(-, v_float64x2, vfsub_vv_f64m1, 2) +OPENCV_HAL_IMPL_RVV_BIN_OP(*, v_float64x2, vfmul_vv_f64m1, 2) +OPENCV_HAL_IMPL_RVV_BIN_OP(/, v_float64x2, vfdiv_vv_f64m1, 2) #endif ////////////// Bitwise logic ////////////// -#define OPENCV_HAL_IMPL_RVV_LOGIC_OP(_Tpvec, suffix, width) \ -OPENCV_HAL_IMPL_RVV_BIN_OP(&, _Tpvec, vand_vv_##suffix##m1, width) \ -OPENCV_HAL_IMPL_RVV_BIN_OP(|, _Tpvec, vor_vv_##suffix##m1, width) \ -OPENCV_HAL_IMPL_RVV_BIN_OP(^, _Tpvec, vxor_vv_##suffix##m1, width) \ +#define OPENCV_HAL_IMPL_RVV_LOGIC_OP(_Tpvec, suffix, vl) \ +OPENCV_HAL_IMPL_RVV_BIN_OP(&, _Tpvec, vand_vv_##suffix##m1, vl) \ +OPENCV_HAL_IMPL_RVV_BIN_OP(|, _Tpvec, vor_vv_##suffix##m1, vl) \ +OPENCV_HAL_IMPL_RVV_BIN_OP(^, _Tpvec, vxor_vv_##suffix##m1, vl) \ inline _Tpvec operator ~ (const _Tpvec& a) \ { \ - vsetvlmax_e##width##m1(); \ - return _Tpvec(vnot_v_##suffix##m1(a)); \ + return _Tpvec(vnot_v_##suffix##m1(a, vl)); \ } -OPENCV_HAL_IMPL_RVV_LOGIC_OP(v_uint8x16, u8, 8) -OPENCV_HAL_IMPL_RVV_LOGIC_OP(v_int8x16, i8, 8) -OPENCV_HAL_IMPL_RVV_LOGIC_OP(v_uint16x8, u16, 16) -OPENCV_HAL_IMPL_RVV_LOGIC_OP(v_int16x8, i16, 16) -OPENCV_HAL_IMPL_RVV_LOGIC_OP(v_uint32x4, u32, 32) -OPENCV_HAL_IMPL_RVV_LOGIC_OP(v_int32x4, i32, 32) -OPENCV_HAL_IMPL_RVV_LOGIC_OP(v_uint64x2, u64, 64) -OPENCV_HAL_IMPL_RVV_LOGIC_OP(v_int64x2, i64, 64) +OPENCV_HAL_IMPL_RVV_LOGIC_OP(v_uint8x16, u8, 16) +OPENCV_HAL_IMPL_RVV_LOGIC_OP(v_int8x16, i8, 16) +OPENCV_HAL_IMPL_RVV_LOGIC_OP(v_uint16x8, u16, 8) +OPENCV_HAL_IMPL_RVV_LOGIC_OP(v_int16x8, i16, 8) +OPENCV_HAL_IMPL_RVV_LOGIC_OP(v_uint32x4, u32, 4) +OPENCV_HAL_IMPL_RVV_LOGIC_OP(v_int32x4, i32, 4) +OPENCV_HAL_IMPL_RVV_LOGIC_OP(v_uint64x2, u64, 2) +OPENCV_HAL_IMPL_RVV_LOGIC_OP(v_int64x2, i64, 2) #define OPENCV_HAL_IMPL_RVV_FLT_BIT_OP(bin_op, intrin) \ inline v_float32x4 operator bin_op (const v_float32x4& a, const v_float32x4& b) \ { \ - vsetvlmax_e32m1(); \ - return v_float32x4(vreinterpret_v_i32m1_f32m1(intrin(vreinterpret_v_f32m1_i32m1(a), vreinterpret_v_f32m1_i32m1(b)))); \ + return v_float32x4(vreinterpret_v_i32m1_f32m1(intrin(vreinterpret_v_f32m1_i32m1(a), vreinterpret_v_f32m1_i32m1(b), 4))); \ } \ inline v_float32x4& operator bin_op##= (v_float32x4& a, const v_float32x4& b) \ { \ - vsetvlmax_e32m1(); \ - a = v_float32x4(vreinterpret_v_i32m1_f32m1(intrin(vreinterpret_v_f32m1_i32m1(a), vreinterpret_v_f32m1_i32m1(b)))); \ + a = v_float32x4(vreinterpret_v_i32m1_f32m1(intrin(vreinterpret_v_f32m1_i32m1(a), vreinterpret_v_f32m1_i32m1(b), 4))); \ return a; \ } @@ -1268,21 +1201,18 @@ OPENCV_HAL_IMPL_RVV_FLT_BIT_OP(^, vxor_vv_i32m1) inline v_float32x4 operator ~ (const v_float32x4& a) { - vsetvlmax_e32m1(); - return v_float32x4(vreinterpret_v_i32m1_f32m1(vnot_v_i32m1(vreinterpret_v_f32m1_i32m1(a)))); + return v_float32x4(vreinterpret_v_i32m1_f32m1(vnot_v_i32m1(vreinterpret_v_f32m1_i32m1(a), 4))); } #if CV_SIMD128_64F #define OPENCV_HAL_IMPL_RVV_FLT64_BIT_OP(bin_op, intrin) \ inline v_float64x2 operator bin_op (const v_float64x2& a, const v_float64x2& b) \ { \ - vsetvlmax_e64m1(); \ - return v_float64x2(vreinterpret_v_i64m1_f64m1(intrin(vreinterpret_v_f64m1_i64m1(a), vreinterpret_v_f64m1_i64m1(b)))); \ + return v_float64x2(vreinterpret_v_i64m1_f64m1(intrin(vreinterpret_v_f64m1_i64m1(a), vreinterpret_v_f64m1_i64m1(b), 2))); \ } \ inline v_float64x2& operator bin_op##= (v_float64x2& a, const v_float64x2& b) \ { \ - vsetvlmax_e64m1(); \ - a = v_float64x2(vreinterpret_v_i64m1_f64m1(intrin(vreinterpret_v_f64m1_i64m1(a), vreinterpret_v_f64m1_i64m1(b)))); \ + a = v_float64x2(vreinterpret_v_i64m1_f64m1(intrin(vreinterpret_v_f64m1_i64m1(a), vreinterpret_v_f64m1_i64m1(b), 2))); \ return a; \ } @@ -1292,119 +1222,108 @@ OPENCV_HAL_IMPL_RVV_FLT64_BIT_OP(^, vxor_vv_i64m1) inline v_float64x2 operator ~ (const v_float64x2& a) { - vsetvlmax_e64m1(); - return v_float64x2(vreinterpret_v_i64m1_f64m1(vnot_v_i64m1(vreinterpret_v_f64m1_i64m1(a)))); + return v_float64x2(vreinterpret_v_i64m1_f64m1(vnot_v_i64m1(vreinterpret_v_f64m1_i64m1(a), 2))); } #endif ////////////// Bitwise shifts ////////////// -#define OPENCV_HAL_IMPL_RVV_UNSIGNED_SHIFT_OP(_Tpvec, suffix, width) \ +#define OPENCV_HAL_IMPL_RVV_UNSIGNED_SHIFT_OP(_Tpvec, suffix, vl) \ inline _Tpvec operator << (const _Tpvec& a, int n) \ { \ - vsetvlmax_e##width##m1(); \ - return _Tpvec(vsll_vx_##suffix##m1(a, uint8_t(n))); \ + return _Tpvec(vsll_vx_##suffix##m1(a, uint8_t(n), vl)); \ } \ inline _Tpvec operator >> (const _Tpvec& a, int n) \ { \ - vsetvlmax_e##width##m1(); \ - return _Tpvec(vsrl_vx_##suffix##m1(a, uint8_t(n))); \ + return _Tpvec(vsrl_vx_##suffix##m1(a, uint8_t(n), vl)); \ } \ template inline _Tpvec v_shl(const _Tpvec& a) \ { \ - vsetvlmax_e##width##m1(); \ - return _Tpvec(vsll_vx_##suffix##m1(a, uint8_t(n))); \ + return _Tpvec(vsll_vx_##suffix##m1(a, uint8_t(n), vl)); \ } \ template inline _Tpvec v_shr(const _Tpvec& a) \ { \ - vsetvlmax_e##width##m1(); \ - return _Tpvec(vsrl_vx_##suffix##m1(a, uint8_t(n))); \ + return _Tpvec(vsrl_vx_##suffix##m1(a, uint8_t(n), vl)); \ } -#define OPENCV_HAL_IMPL_RVV_SIGNED_SHIFT_OP(_Tpvec, suffix, width) \ +#define OPENCV_HAL_IMPL_RVV_SIGNED_SHIFT_OP(_Tpvec, suffix, vl) \ inline _Tpvec operator << (const _Tpvec& a, int n) \ { \ - vsetvlmax_e##width##m1(); \ - return _Tpvec(vsll_vx_##suffix##m1(a, uint8_t(n))); \ + return _Tpvec(vsll_vx_##suffix##m1(a, uint8_t(n), vl)); \ } \ inline _Tpvec operator >> (const _Tpvec& a, int n) \ { \ - vsetvlmax_e##width##m1(); \ - return _Tpvec(vsra_vx_##suffix##m1(a, uint8_t(n))); \ + return _Tpvec(vsra_vx_##suffix##m1(a, uint8_t(n), vl)); \ } \ template inline _Tpvec v_shl(const _Tpvec& a) \ { \ - vsetvlmax_e##width##m1(); \ - return _Tpvec(vsll_vx_##suffix##m1(a, uint8_t(n))); \ + return _Tpvec(vsll_vx_##suffix##m1(a, uint8_t(n), vl)); \ } \ template inline _Tpvec v_shr(const _Tpvec& a) \ { \ - vsetvlmax_e##width##m1(); \ - return _Tpvec(vsra_vx_##suffix##m1(a, uint8_t(n))); \ + return _Tpvec(vsra_vx_##suffix##m1(a, uint8_t(n), vl)); \ } -OPENCV_HAL_IMPL_RVV_UNSIGNED_SHIFT_OP(v_uint8x16, u8, 8) -OPENCV_HAL_IMPL_RVV_UNSIGNED_SHIFT_OP(v_uint16x8, u16, 16) -OPENCV_HAL_IMPL_RVV_UNSIGNED_SHIFT_OP(v_uint32x4, u32, 32) -OPENCV_HAL_IMPL_RVV_UNSIGNED_SHIFT_OP(v_uint64x2, u64, 64) -OPENCV_HAL_IMPL_RVV_SIGNED_SHIFT_OP(v_int8x16, i8, 8) -OPENCV_HAL_IMPL_RVV_SIGNED_SHIFT_OP(v_int16x8, i16, 16) -OPENCV_HAL_IMPL_RVV_SIGNED_SHIFT_OP(v_int32x4, i32, 32) -OPENCV_HAL_IMPL_RVV_SIGNED_SHIFT_OP(v_int64x2, i64, 64) +OPENCV_HAL_IMPL_RVV_UNSIGNED_SHIFT_OP(v_uint8x16, u8, 16) +OPENCV_HAL_IMPL_RVV_UNSIGNED_SHIFT_OP(v_uint16x8, u16, 8) +OPENCV_HAL_IMPL_RVV_UNSIGNED_SHIFT_OP(v_uint32x4, u32, 4) +OPENCV_HAL_IMPL_RVV_UNSIGNED_SHIFT_OP(v_uint64x2, u64, 2) +OPENCV_HAL_IMPL_RVV_SIGNED_SHIFT_OP(v_int8x16, i8, 16) +OPENCV_HAL_IMPL_RVV_SIGNED_SHIFT_OP(v_int16x8, i16, 8) +OPENCV_HAL_IMPL_RVV_SIGNED_SHIFT_OP(v_int32x4, i32, 4) +OPENCV_HAL_IMPL_RVV_SIGNED_SHIFT_OP(v_int64x2, i64, 2) ////////////// Comparison ////////////// -#define OPENCV_HAL_IMPL_RVV_INT_CMP_OP(_Tpvec, op, intrin, suffix, width) \ +#define OPENCV_HAL_IMPL_RVV_INT_CMP_OP(_Tpvec, op, intrin, suffix, vl) \ inline _Tpvec operator op (const _Tpvec& a, const _Tpvec& b) \ { \ - vsetvlmax_e##width##m1(); \ - return _Tpvec(vmerge_vxm_##suffix##m1(intrin(a, b), vzero_##suffix##m1(), 1)); \ + return _Tpvec(vmerge_vxm_##suffix##m1(intrin(a, b, vl), vmv_v_x_##suffix##m1(0, vl), 1, vl)); \ } -#define OPENCV_HAL_IMPL_RVV_FLOAT_CMP_OP(_Tpvec, op, intrin, suffix, width) \ +#define OPENCV_HAL_IMPL_RVV_FLOAT_CMP_OP(_Tpvec, op, intrin, suffix, vl) \ inline _Tpvec operator op (const _Tpvec& a, const _Tpvec& b) \ { \ - vsetvlmax_e##width##m1(); \ - return _Tpvec(vfmerge_vfm_##suffix##m1(intrin(a, b), vzero_##suffix##m1(), 1)); \ -} - -#define OPENCV_HAL_IMPL_RVV_UNSIGNED_CMP(_Tpvec, suffix, width) \ -OPENCV_HAL_IMPL_RVV_INT_CMP_OP(_Tpvec, ==, vmseq_vv_##suffix##m1_b##width, suffix, width) \ -OPENCV_HAL_IMPL_RVV_INT_CMP_OP(_Tpvec, !=, vmsne_vv_##suffix##m1_b##width, suffix, width) \ -OPENCV_HAL_IMPL_RVV_INT_CMP_OP(_Tpvec, <, vmsltu_vv_##suffix##m1_b##width, suffix, width) \ -OPENCV_HAL_IMPL_RVV_INT_CMP_OP(_Tpvec, >, vmsgtu_vv_##suffix##m1_b##width, suffix, width) \ -OPENCV_HAL_IMPL_RVV_INT_CMP_OP(_Tpvec, <=, vmsleu_vv_##suffix##m1_b##width, suffix, width) \ -OPENCV_HAL_IMPL_RVV_INT_CMP_OP(_Tpvec, >=, vmsgeu_vv_##suffix##m1_b##width, suffix, width) - -#define OPENCV_HAL_IMPL_RVV_SIGNED_CMP(_Tpvec, suffix, width) \ -OPENCV_HAL_IMPL_RVV_INT_CMP_OP(_Tpvec, ==, vmseq_vv_##suffix##m1_b##width, suffix, width) \ -OPENCV_HAL_IMPL_RVV_INT_CMP_OP(_Tpvec, !=, vmsne_vv_##suffix##m1_b##width, suffix, width) \ -OPENCV_HAL_IMPL_RVV_INT_CMP_OP(_Tpvec, <, vmslt_vv_##suffix##m1_b##width, suffix, width) \ -OPENCV_HAL_IMPL_RVV_INT_CMP_OP(_Tpvec, >, vmsgt_vv_##suffix##m1_b##width, suffix, width) \ -OPENCV_HAL_IMPL_RVV_INT_CMP_OP(_Tpvec, <=, vmsle_vv_##suffix##m1_b##width, suffix, width) \ -OPENCV_HAL_IMPL_RVV_INT_CMP_OP(_Tpvec, >=, vmsge_vv_##suffix##m1_b##width, suffix, width) - -#define OPENCV_HAL_IMPL_RVV_FLOAT_CMP(_Tpvec, suffix, width) \ -OPENCV_HAL_IMPL_RVV_FLOAT_CMP_OP(_Tpvec, ==, vmfeq_vv_##suffix##m1_b##width, suffix, width) \ -OPENCV_HAL_IMPL_RVV_FLOAT_CMP_OP(_Tpvec, !=, vmfne_vv_##suffix##m1_b##width, suffix, width) \ -OPENCV_HAL_IMPL_RVV_FLOAT_CMP_OP(_Tpvec, <, vmflt_vv_##suffix##m1_b##width, suffix, width) \ -OPENCV_HAL_IMPL_RVV_FLOAT_CMP_OP(_Tpvec, >, vmfgt_vv_##suffix##m1_b##width, suffix, width) \ -OPENCV_HAL_IMPL_RVV_FLOAT_CMP_OP(_Tpvec, <=, vmfle_vv_##suffix##m1_b##width, suffix, width) \ -OPENCV_HAL_IMPL_RVV_FLOAT_CMP_OP(_Tpvec, >=, vmfge_vv_##suffix##m1_b##width, suffix, width) - - -OPENCV_HAL_IMPL_RVV_UNSIGNED_CMP(v_uint8x16, u8, 8) -OPENCV_HAL_IMPL_RVV_UNSIGNED_CMP(v_uint16x8, u16, 16) -OPENCV_HAL_IMPL_RVV_UNSIGNED_CMP(v_uint32x4, u32, 32) -OPENCV_HAL_IMPL_RVV_UNSIGNED_CMP(v_uint64x2, u64, 64) -OPENCV_HAL_IMPL_RVV_SIGNED_CMP(v_int8x16, i8, 8) -OPENCV_HAL_IMPL_RVV_SIGNED_CMP(v_int16x8, i16, 16) -OPENCV_HAL_IMPL_RVV_SIGNED_CMP(v_int32x4, i32, 32) -OPENCV_HAL_IMPL_RVV_SIGNED_CMP(v_int64x2, i64, 64) -OPENCV_HAL_IMPL_RVV_FLOAT_CMP(v_float32x4, f32, 32) + return _Tpvec(vfmerge_vfm_##suffix##m1(intrin(a, b, vl), vfmv_v_f_##suffix##m1(0, vl), 1, vl)); \ +} + +#define OPENCV_HAL_IMPL_RVV_UNSIGNED_CMP(_Tpvec, suffix, width, vl) \ +OPENCV_HAL_IMPL_RVV_INT_CMP_OP(_Tpvec, ==, vmseq_vv_##suffix##m1_b##width, suffix, vl) \ +OPENCV_HAL_IMPL_RVV_INT_CMP_OP(_Tpvec, !=, vmsne_vv_##suffix##m1_b##width, suffix, vl) \ +OPENCV_HAL_IMPL_RVV_INT_CMP_OP(_Tpvec, <, vmsltu_vv_##suffix##m1_b##width, suffix, vl) \ +OPENCV_HAL_IMPL_RVV_INT_CMP_OP(_Tpvec, >, vmsgtu_vv_##suffix##m1_b##width, suffix, vl) \ +OPENCV_HAL_IMPL_RVV_INT_CMP_OP(_Tpvec, <=, vmsleu_vv_##suffix##m1_b##width, suffix, vl) \ +OPENCV_HAL_IMPL_RVV_INT_CMP_OP(_Tpvec, >=, vmsgeu_vv_##suffix##m1_b##width, suffix, vl) + +#define OPENCV_HAL_IMPL_RVV_SIGNED_CMP(_Tpvec, suffix, width, vl) \ +OPENCV_HAL_IMPL_RVV_INT_CMP_OP(_Tpvec, ==, vmseq_vv_##suffix##m1_b##width, suffix, vl) \ +OPENCV_HAL_IMPL_RVV_INT_CMP_OP(_Tpvec, !=, vmsne_vv_##suffix##m1_b##width, suffix, vl) \ +OPENCV_HAL_IMPL_RVV_INT_CMP_OP(_Tpvec, <, vmslt_vv_##suffix##m1_b##width, suffix, vl) \ +OPENCV_HAL_IMPL_RVV_INT_CMP_OP(_Tpvec, >, vmsgt_vv_##suffix##m1_b##width, suffix, vl) \ +OPENCV_HAL_IMPL_RVV_INT_CMP_OP(_Tpvec, <=, vmsle_vv_##suffix##m1_b##width, suffix, vl) \ +OPENCV_HAL_IMPL_RVV_INT_CMP_OP(_Tpvec, >=, vmsge_vv_##suffix##m1_b##width, suffix, vl) + +#define OPENCV_HAL_IMPL_RVV_FLOAT_CMP(_Tpvec, suffix, width, vl) \ +OPENCV_HAL_IMPL_RVV_FLOAT_CMP_OP(_Tpvec, ==, vmfeq_vv_##suffix##m1_b##width, suffix, vl) \ +OPENCV_HAL_IMPL_RVV_FLOAT_CMP_OP(_Tpvec, !=, vmfne_vv_##suffix##m1_b##width, suffix, vl) \ +OPENCV_HAL_IMPL_RVV_FLOAT_CMP_OP(_Tpvec, <, vmflt_vv_##suffix##m1_b##width, suffix, vl) \ +OPENCV_HAL_IMPL_RVV_FLOAT_CMP_OP(_Tpvec, >, vmfgt_vv_##suffix##m1_b##width, suffix, vl) \ +OPENCV_HAL_IMPL_RVV_FLOAT_CMP_OP(_Tpvec, <=, vmfle_vv_##suffix##m1_b##width, suffix, vl) \ +OPENCV_HAL_IMPL_RVV_FLOAT_CMP_OP(_Tpvec, >=, vmfge_vv_##suffix##m1_b##width, suffix, vl) + + +OPENCV_HAL_IMPL_RVV_UNSIGNED_CMP(v_uint8x16, u8, 8, 16) +OPENCV_HAL_IMPL_RVV_UNSIGNED_CMP(v_uint16x8, u16, 16, 8) +OPENCV_HAL_IMPL_RVV_UNSIGNED_CMP(v_uint32x4, u32, 32, 4) +OPENCV_HAL_IMPL_RVV_UNSIGNED_CMP(v_uint64x2, u64, 64, 2) +OPENCV_HAL_IMPL_RVV_SIGNED_CMP(v_int8x16, i8, 8, 16) +OPENCV_HAL_IMPL_RVV_SIGNED_CMP(v_int16x8, i16, 16, 8) +OPENCV_HAL_IMPL_RVV_SIGNED_CMP(v_int32x4, i32, 32, 4) +OPENCV_HAL_IMPL_RVV_SIGNED_CMP(v_int64x2, i64, 64, 2) +OPENCV_HAL_IMPL_RVV_FLOAT_CMP(v_float32x4, f32, 32, 4) #if CV_SIMD128_64F -OPENCV_HAL_IMPL_RVV_FLOAT_CMP(v_float64x2, f64, 64) +OPENCV_HAL_IMPL_RVV_FLOAT_CMP(v_float64x2, f64, 64, 2) #endif inline v_float32x4 v_not_nan(const v_float32x4& a) @@ -1417,99 +1336,106 @@ inline v_float64x2 v_not_nan(const v_float64x2& a) ////////////// Min/Max ////////////// -#define OPENCV_HAL_IMPL_RVV_BIN_FUNC(_Tpvec, func, intrin, width) \ +#define OPENCV_HAL_IMPL_RVV_BIN_FUNC(_Tpvec, func, intrin, vl) \ inline _Tpvec func(const _Tpvec& a, const _Tpvec& b) \ { \ - vsetvlmax_e##width##m1(); \ - return _Tpvec(intrin(a, b)); \ -} - -OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_uint8x16, v_min, vminu_vv_u8m1, 8) -OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_uint8x16, v_max, vmaxu_vv_u8m1, 8) -OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_int8x16, v_min, vmin_vv_i8m1, 8) -OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_int8x16, v_max, vmax_vv_i8m1, 8) -OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_uint16x8, v_min, vminu_vv_u16m1, 16) -OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_uint16x8, v_max, vmaxu_vv_u16m1, 16) -OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_int16x8, v_min, vmin_vv_i16m1, 16) -OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_int16x8, v_max, vmax_vv_i16m1, 16) -OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_uint32x4, v_min, vminu_vv_u32m1, 32) -OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_uint32x4, v_max, vmaxu_vv_u32m1, 32) -OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_int32x4, v_min, vmin_vv_i32m1, 32) -OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_int32x4, v_max, vmax_vv_i32m1, 32) -OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_float32x4, v_min, vfmin_vv_f32m1, 32) -OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_float32x4, v_max, vfmax_vv_f32m1, 32) -OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_uint64x2, v_min, vminu_vv_u64m1, 64) -OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_uint64x2, v_max, vmaxu_vv_u64m1, 64) -OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_int64x2, v_min, vmin_vv_i64m1, 64) -OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_int64x2, v_max, vmax_vv_i64m1, 64) + return _Tpvec(intrin(a, b, vl)); \ +} + +OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_uint8x16, v_min, vminu_vv_u8m1, 16) +OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_uint8x16, v_max, vmaxu_vv_u8m1, 16) +OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_int8x16, v_min, vmin_vv_i8m1, 16) +OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_int8x16, v_max, vmax_vv_i8m1, 16) +OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_uint16x8, v_min, vminu_vv_u16m1, 8) +OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_uint16x8, v_max, vmaxu_vv_u16m1, 8) +OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_int16x8, v_min, vmin_vv_i16m1, 8) +OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_int16x8, v_max, vmax_vv_i16m1, 8) +OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_uint32x4, v_min, vminu_vv_u32m1, 4) +OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_uint32x4, v_max, vmaxu_vv_u32m1, 4) +OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_int32x4, v_min, vmin_vv_i32m1, 4) +OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_int32x4, v_max, vmax_vv_i32m1, 4) +OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_float32x4, v_min, vfmin_vv_f32m1, 4) +OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_float32x4, v_max, vfmax_vv_f32m1, 4) +OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_uint64x2, v_min, vminu_vv_u64m1, 2) +OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_uint64x2, v_max, vmaxu_vv_u64m1, 2) +OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_int64x2, v_min, vmin_vv_i64m1, 2) +OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_int64x2, v_max, vmax_vv_i64m1, 2) #if CV_SIMD128_64F -OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_float64x2, v_min, vfmin_vv_f64m1, 64) -OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_float64x2, v_max, vfmax_vv_f64m1, 64) +OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_float64x2, v_min, vfmin_vv_f64m1, 2) +OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_float64x2, v_max, vfmax_vv_f64m1, 2) #endif ////////////// Arithmetics wrap ////////////// -OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_uint8x16, v_add_wrap, vadd_vv_u8m1, 8) -OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_int8x16, v_add_wrap, vadd_vv_i8m1, 8) -OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_uint16x8, v_add_wrap, vadd_vv_u16m1, 16) -OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_int16x8, v_add_wrap, vadd_vv_i16m1, 16) -OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_uint8x16, v_sub_wrap, vsub_vv_u8m1, 8) -OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_int8x16, v_sub_wrap, vsub_vv_i8m1, 8) -OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_uint16x8, v_sub_wrap, vsub_vv_u16m1, 16) -OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_int16x8, v_sub_wrap, vsub_vv_i16m1, 16) -OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_uint8x16, v_mul_wrap, vmul_vv_u8m1, 8) -OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_int8x16, v_mul_wrap, vmul_vv_i8m1, 8) -OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_uint16x8, v_mul_wrap, vmul_vv_u16m1, 16) -OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_int16x8, v_mul_wrap, vmul_vv_i16m1, 16) +OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_uint8x16, v_add_wrap, vadd_vv_u8m1, 16) +OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_int8x16, v_add_wrap, vadd_vv_i8m1, 16) +OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_uint16x8, v_add_wrap, vadd_vv_u16m1, 8) +OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_int16x8, v_add_wrap, vadd_vv_i16m1, 8) +OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_uint8x16, v_sub_wrap, vsub_vv_u8m1, 16) +OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_int8x16, v_sub_wrap, vsub_vv_i8m1, 16) +OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_uint16x8, v_sub_wrap, vsub_vv_u16m1, 8) +OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_int16x8, v_sub_wrap, vsub_vv_i16m1, 8) +OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_uint8x16, v_mul_wrap, vmul_vv_u8m1, 16) +OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_int8x16, v_mul_wrap, vmul_vv_i8m1, 16) +OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_uint16x8, v_mul_wrap, vmul_vv_u16m1, 8) +OPENCV_HAL_IMPL_RVV_BIN_FUNC(v_int16x8, v_mul_wrap, vmul_vv_i16m1, 8) ////////////// Reduce ////////////// -#define OPENCV_HAL_IMPL_RVV_REDUCE_SUM(_Tpvec, _wTpvec, _nwTpvec, scalartype, suffix, wsuffix, wwidth, red) \ +#define OPENCV_HAL_IMPL_RVV_REDUCE_SUM(_Tpvec, _wTpvec, _nwTpvec, scalartype, suffix, wsuffix, vl, red) \ inline scalartype v_reduce_sum(const _Tpvec& a) \ { \ - vsetvlmax_e##wwidth##m1(); \ - _nwTpvec zero = vzero_##wsuffix##m1(); \ - _nwTpvec res = vzero_##wsuffix##m1(); \ - res = v##red##_vs_##suffix##m1_##wsuffix##m1(res, a, zero); \ + _nwTpvec zero = vmv_v_x_##wsuffix##m1(0, vl); \ + _nwTpvec res = vmv_v_x_##wsuffix##m1(0, vl); \ + res = v##red##_vs_##suffix##m1_##wsuffix##m1(res, a, zero, vl); \ return (scalartype)(_wTpvec(res).get0()); \ } OPENCV_HAL_IMPL_RVV_REDUCE_SUM(v_uint8x16, v_uint16x8, vuint16m1_t, unsigned, u8, u16, 16, wredsumu) OPENCV_HAL_IMPL_RVV_REDUCE_SUM(v_int8x16, v_int16x8, vint16m1_t, int, i8, i16, 16, wredsum) -OPENCV_HAL_IMPL_RVV_REDUCE_SUM(v_uint16x8, v_uint32x4, vuint32m1_t, unsigned, u16, u32, 32, wredsumu) -OPENCV_HAL_IMPL_RVV_REDUCE_SUM(v_int16x8, v_int32x4, vint32m1_t, int, i16, i32, 32, wredsum) -OPENCV_HAL_IMPL_RVV_REDUCE_SUM(v_uint32x4, v_uint64x2, vuint64m1_t, unsigned, u32, u64, 64, wredsumu) -OPENCV_HAL_IMPL_RVV_REDUCE_SUM(v_int32x4, v_int64x2, vint64m1_t, int, i32, i64, 64, wredsum) -OPENCV_HAL_IMPL_RVV_REDUCE_SUM(v_float32x4, v_float32x4, vfloat32m1_t, float, f32, f32, 32, fredsum) -OPENCV_HAL_IMPL_RVV_REDUCE_SUM(v_uint64x2, v_uint64x2, vuint64m1_t, uint64, u64, u64, 64, redsum) -OPENCV_HAL_IMPL_RVV_REDUCE_SUM(v_int64x2, v_int64x2, vint64m1_t, int64, i64, i64, 64, redsum) +OPENCV_HAL_IMPL_RVV_REDUCE_SUM(v_uint16x8, v_uint32x4, vuint32m1_t, unsigned, u16, u32, 8, wredsumu) +OPENCV_HAL_IMPL_RVV_REDUCE_SUM(v_int16x8, v_int32x4, vint32m1_t, int, i16, i32, 8, wredsum) +OPENCV_HAL_IMPL_RVV_REDUCE_SUM(v_uint32x4, v_uint64x2, vuint64m1_t, unsigned, u32, u64, 4, wredsumu) +OPENCV_HAL_IMPL_RVV_REDUCE_SUM(v_int32x4, v_int64x2, vint64m1_t, int, i32, i64, 4, wredsum) +OPENCV_HAL_IMPL_RVV_REDUCE_SUM(v_uint64x2, v_uint64x2, vuint64m1_t, uint64, u64, u64, 4, redsum) +OPENCV_HAL_IMPL_RVV_REDUCE_SUM(v_int64x2, v_int64x2, vint64m1_t, int64, i64, i64, 4, redsum) + +#define OPENCV_HAL_IMPL_RVV_REDUCE_SUM_FP(_Tpvec, _wTpvec, _nwTpvec, scalartype, suffix, wsuffix, vl, red) \ +inline scalartype v_reduce_sum(const _Tpvec& a) \ +{ \ + _nwTpvec zero = vfmv_v_f_##wsuffix##m1(0, vl); \ + _nwTpvec res = vfmv_v_f_##wsuffix##m1(0, vl); \ + res = v##red##_vs_##suffix##m1_##wsuffix##m1(res, a, zero, vl); \ + return (scalartype)(_wTpvec(res).get0()); \ +} + +OPENCV_HAL_IMPL_RVV_REDUCE_SUM_FP(v_float32x4, v_float32x4, vfloat32m1_t, float, f32, f32, 8, fredsum) #if CV_SIMD128_64F -OPENCV_HAL_IMPL_RVV_REDUCE_SUM(v_float64x2, v_float64x2, vfloat64m1_t, double, f64, f64, 64, fredsum) +OPENCV_HAL_IMPL_RVV_REDUCE_SUM_FP(v_float64x2, v_float64x2, vfloat64m1_t, double, f64, f64, 4, fredsum) #endif -#define OPENCV_HAL_IMPL_RVV_REDUCE(_Tpvec, func, scalartype, suffix, width, red) \ +#define OPENCV_HAL_IMPL_RVV_REDUCE(_Tpvec, func, scalartype, suffix, vl, red) \ inline scalartype v_reduce_##func(const _Tpvec& a) \ { \ - vsetvlmax_e##width##m1(); \ - _Tpvec res = _Tpvec(v##red##_vs_##suffix##m1_##suffix##m1(a, a, a)); \ + _Tpvec res = _Tpvec(v##red##_vs_##suffix##m1_##suffix##m1(a, a, a, vl)); \ return scalartype(res.get0()); \ } -OPENCV_HAL_IMPL_RVV_REDUCE(v_uint8x16, min, uchar, u8, 8, redminu) -OPENCV_HAL_IMPL_RVV_REDUCE(v_int8x16, min, schar, i8, 8, redmin) -OPENCV_HAL_IMPL_RVV_REDUCE(v_uint16x8, min, ushort, u16, 16, redminu) -OPENCV_HAL_IMPL_RVV_REDUCE(v_int16x8, min, short, i16, 16, redmin) -OPENCV_HAL_IMPL_RVV_REDUCE(v_uint32x4, min, unsigned, u32, 32, redminu) -OPENCV_HAL_IMPL_RVV_REDUCE(v_int32x4, min, int, i32, 32, redmin) -OPENCV_HAL_IMPL_RVV_REDUCE(v_float32x4, min, float, f32, 32, fredmin) -OPENCV_HAL_IMPL_RVV_REDUCE(v_uint8x16, max, uchar, u8, 8, redmaxu) -OPENCV_HAL_IMPL_RVV_REDUCE(v_int8x16, max, schar, i8, 8, redmax) -OPENCV_HAL_IMPL_RVV_REDUCE(v_uint16x8, max, ushort, u16, 16, redmaxu) -OPENCV_HAL_IMPL_RVV_REDUCE(v_int16x8, max, short, i16, 16, redmax) -OPENCV_HAL_IMPL_RVV_REDUCE(v_uint32x4, max, unsigned, u32, 32, redmaxu) -OPENCV_HAL_IMPL_RVV_REDUCE(v_int32x4, max, int, i32, 32, redmax) -OPENCV_HAL_IMPL_RVV_REDUCE(v_float32x4, max, float, f32, 32, fredmax) +OPENCV_HAL_IMPL_RVV_REDUCE(v_uint8x16, min, uchar, u8, 16, redminu) +OPENCV_HAL_IMPL_RVV_REDUCE(v_int8x16, min, schar, i8, 16, redmin) +OPENCV_HAL_IMPL_RVV_REDUCE(v_uint16x8, min, ushort, u16, 8, redminu) +OPENCV_HAL_IMPL_RVV_REDUCE(v_int16x8, min, short, i16, 8, redmin) +OPENCV_HAL_IMPL_RVV_REDUCE(v_uint32x4, min, unsigned, u32, 4, redminu) +OPENCV_HAL_IMPL_RVV_REDUCE(v_int32x4, min, int, i32, 4, redmin) +OPENCV_HAL_IMPL_RVV_REDUCE(v_float32x4, min, float, f32, 4, fredmin) +OPENCV_HAL_IMPL_RVV_REDUCE(v_uint8x16, max, uchar, u8, 16, redmaxu) +OPENCV_HAL_IMPL_RVV_REDUCE(v_int8x16, max, schar, i8, 16, redmax) +OPENCV_HAL_IMPL_RVV_REDUCE(v_uint16x8, max, ushort, u16, 8, redmaxu) +OPENCV_HAL_IMPL_RVV_REDUCE(v_int16x8, max, short, i16, 8, redmax) +OPENCV_HAL_IMPL_RVV_REDUCE(v_uint32x4, max, unsigned, u32, 4, redmaxu) +OPENCV_HAL_IMPL_RVV_REDUCE(v_int32x4, max, int, i32, 4, redmax) +OPENCV_HAL_IMPL_RVV_REDUCE(v_float32x4, max, float, f32, 4, fredmax) inline v_float32x4 v_reduce_sum4(const v_float32x4& a, const v_float32x4& b, @@ -1522,16 +1448,14 @@ inline v_float32x4 v_reduce_sum4(const v_float32x4& a, const v_float32x4& b, v_reduce_sum(c), v_reduce_sum(d) }; - vsetvlmax_e32m1(); - return v_float32x4(vle32_v_f32m1(elems)); + return v_float32x4(vle32_v_f32m1(elems, 4)); } ////////////// Square-Root ////////////// inline v_float32x4 v_sqrt(const v_float32x4& x) { - vsetvlmax_e32m1(); - return v_float32x4(vfsqrt_v_f32m1(x)); + return v_float32x4(vfsqrt_v_f32m1(x, 4)); } inline v_float32x4 v_invsqrt(const v_float32x4& x) @@ -1543,8 +1467,7 @@ inline v_float32x4 v_invsqrt(const v_float32x4& x) #if CV_SIMD128_64F inline v_float64x2 v_sqrt(const v_float64x2& x) { - vsetvlmax_e64m1(); - return v_float64x2(vfsqrt_v_f64m1(x)); + return v_float64x2(vfsqrt_v_f64m1(x, 4)); } inline v_float64x2 v_invsqrt(const v_float64x2& x) @@ -1556,29 +1479,25 @@ inline v_float64x2 v_invsqrt(const v_float64x2& x) inline v_float32x4 v_magnitude(const v_float32x4& a, const v_float32x4& b) { - vsetvlmax_e32m1(); - v_float32x4 x(vfmacc_vv_f32m1(vfmul_vv_f32m1(a, a), b, b)); + v_float32x4 x(vfmacc_vv_f32m1(vfmul_vv_f32m1(a, a, 4), b, b, 4)); return v_sqrt(x); } inline v_float32x4 v_sqr_magnitude(const v_float32x4& a, const v_float32x4& b) { - vsetvlmax_e32m1(); - return v_float32x4(vfmacc_vv_f32m1(vfmul_vv_f32m1(a, a), b, b)); + return v_float32x4(vfmacc_vv_f32m1(vfmul_vv_f32m1(a, a, 4), b, b, 4)); } #if CV_SIMD128_64F inline v_float64x2 v_magnitude(const v_float64x2& a, const v_float64x2& b) { - vsetvlmax_e64m1(); - v_float64x2 x(vfmacc_vv_f64m1(vfmul_vv_f64m1(a, a), b, b)); + v_float64x2 x(vfmacc_vv_f64m1(vfmul_vv_f64m1(a, a, 2), b, b, 2)); return v_sqrt(x); } inline v_float64x2 v_sqr_magnitude(const v_float64x2& a, const v_float64x2& b) { - vsetvlmax_e64m1(); - return v_float64x2(vfmacc_vv_f64m1(vfmul_vv_f64m1(a, a), b, b)); + return v_float64x2(vfmacc_vv_f64m1(vfmul_vv_f64m1(a, a, 2), b, b, 2)); } #endif @@ -1586,13 +1505,11 @@ inline v_float64x2 v_sqr_magnitude(const v_float64x2& a, const v_float64x2& b) inline v_float32x4 v_fma(const v_float32x4& a, const v_float32x4& b, const v_float32x4& c) { - vsetvlmax_e32m1(); - return v_float32x4(vfmacc_vv_f32m1(c, a, b)); + return v_float32x4(vfmacc_vv_f32m1(c, a, b, 4)); } inline v_int32x4 v_fma(const v_int32x4& a, const v_int32x4& b, const v_int32x4& c) { - vsetvlmax_e32m1(); - return v_int32x4(vmacc_vv_i32m1(c, a, b)); + return v_int32x4(vmacc_vv_i32m1(c, a, b, 4)); } inline v_float32x4 v_muladd(const v_float32x4& a, const v_float32x4& b, const v_float32x4& c) @@ -1608,8 +1525,7 @@ inline v_int32x4 v_muladd(const v_int32x4& a, const v_int32x4& b, const v_int32x #if CV_SIMD128_64F inline v_float64x2 v_fma(const v_float64x2& a, const v_float64x2& b, const v_float64x2& c) { - vsetvlmax_e64m1(); - return v_float64x2(vfmacc_vv_f64m1(c, a, b)); + return v_float64x2(vfmacc_vv_f64m1(c, a, b, 2)); } inline v_float64x2 v_muladd(const v_float64x2& a, const v_float64x2& b, const v_float64x2& c) @@ -1620,24 +1536,22 @@ inline v_float64x2 v_muladd(const v_float64x2& a, const v_float64x2& b, const v_ ////////////// Check all/any ////////////// -#define OPENCV_HAL_IMPL_RVV_CHECK_ALLANY(_Tpvec, suffix, shift, width) \ +#define OPENCV_HAL_IMPL_RVV_CHECK_ALLANY(_Tpvec, suffix, shift, vl) \ inline bool v_check_all(const _Tpvec& a) \ { \ - vsetvlmax_e##width##m1(); \ - v_uint64x2 v = v_uint64x2((vuint64m1_t)vsrl_vx_##suffix##m1(vnot_v_##suffix##m1(a), shift)); \ + v_uint64x2 v = v_uint64x2((vuint64m1_t)vsrl_vx_##suffix##m1(vnot_v_##suffix##m1(a, vl), shift, vl)); \ return (v.val[0] | v.val[1]) == 0; \ } \ inline bool v_check_any(const _Tpvec& a) \ { \ - vsetvlmax_e##width##m1(); \ - v_uint64x2 v = v_uint64x2((vuint64m1_t)vsrl_vx_##suffix##m1(a, shift)); \ + v_uint64x2 v = v_uint64x2((vuint64m1_t)vsrl_vx_##suffix##m1(a, shift, vl)); \ return (v.val[0] | v.val[1]) != 0; \ } -OPENCV_HAL_IMPL_RVV_CHECK_ALLANY(v_uint8x16, u8, 7, 8) -OPENCV_HAL_IMPL_RVV_CHECK_ALLANY(v_uint16x8, u16, 15, 16) -OPENCV_HAL_IMPL_RVV_CHECK_ALLANY(v_uint32x4, u32, 31, 32) -OPENCV_HAL_IMPL_RVV_CHECK_ALLANY(v_uint64x2, u64, 63, 64) +OPENCV_HAL_IMPL_RVV_CHECK_ALLANY(v_uint8x16, u8, 7, 16) +OPENCV_HAL_IMPL_RVV_CHECK_ALLANY(v_uint16x8, u16, 15, 8) +OPENCV_HAL_IMPL_RVV_CHECK_ALLANY(v_uint32x4, u32, 31, 4) +OPENCV_HAL_IMPL_RVV_CHECK_ALLANY(v_uint64x2, u64, 63, 2) inline bool v_check_all(const v_int8x16& a) @@ -1690,16 +1604,15 @@ OPENCV_HAL_IMPL_RVV_ABSDIFF(v_float64x2, absdiff) OPENCV_HAL_IMPL_RVV_ABSDIFF(v_int8x16, absdiffs) OPENCV_HAL_IMPL_RVV_ABSDIFF(v_int16x8, absdiffs) -#define OPENCV_HAL_IMPL_RVV_ABSDIFF_S(_Tpvec, _rTpvec, _nwTpvec, sub, rshr, width) \ +#define OPENCV_HAL_IMPL_RVV_ABSDIFF_S(_Tpvec, _rTpvec, _nwTpvec, sub, rshr, vl) \ inline _rTpvec v_absdiff(const _Tpvec& a, const _Tpvec& b) \ { \ - vsetvlmax_e##width##m1(); \ - return _rTpvec(rshr((_nwTpvec)sub(v_max(a, b), v_min(a, b)), 0)); \ + return _rTpvec(rshr((_nwTpvec)sub(v_max(a, b), v_min(a, b), vl), 0, vl)); \ } -OPENCV_HAL_IMPL_RVV_ABSDIFF_S(v_int8x16, v_uint8x16, vuint16m2_t, vwsub_vv_i16m2, vnclipu_wx_u8m1, 8) -OPENCV_HAL_IMPL_RVV_ABSDIFF_S(v_int16x8, v_uint16x8, vuint32m2_t, vwsub_vv_i32m2, vnclipu_wx_u16m1, 16) -OPENCV_HAL_IMPL_RVV_ABSDIFF_S(v_int32x4, v_uint32x4, vuint64m2_t, vwsub_vv_i64m2, vnclipu_wx_u32m1, 32) +OPENCV_HAL_IMPL_RVV_ABSDIFF_S(v_int8x16, v_uint8x16, vuint16m2_t, vwsub_vv_i16m2, vnclipu_wx_u8m1, 16) +OPENCV_HAL_IMPL_RVV_ABSDIFF_S(v_int16x8, v_uint16x8, vuint32m2_t, vwsub_vv_i32m2, vnclipu_wx_u16m1, 8) +OPENCV_HAL_IMPL_RVV_ABSDIFF_S(v_int32x4, v_uint32x4, vuint64m2_t, vwsub_vv_i64m2, vnclipu_wx_u32m1, 4) #define OPENCV_HAL_IMPL_RVV_ABS(_Tprvec, _Tpvec, suffix) \ inline _Tprvec v_abs(const _Tpvec& a) \ @@ -1732,149 +1645,152 @@ OPENCV_HAL_IMPL_RVV_REDUCE_SAD(v_float32x4, float) ////////////// Select ////////////// -#define OPENCV_HAL_IMPL_RVV_SELECT(_Tpvec, merge, ne, width) \ +#define OPENCV_HAL_IMPL_RVV_SELECT(_Tpvec, merge, ne, vl) \ inline _Tpvec v_select(const _Tpvec& mask, const _Tpvec& a, const _Tpvec& b) \ { \ - vsetvlmax_e##width##m1(); \ - return _Tpvec(merge(ne(mask, 0), b, a)); \ + return _Tpvec(merge(ne(mask, 0, vl), b, a, vl)); \ } -OPENCV_HAL_IMPL_RVV_SELECT(v_uint8x16, vmerge_vvm_u8m1, vmsne_vx_u8m1_b8, 8) -OPENCV_HAL_IMPL_RVV_SELECT(v_int8x16, vmerge_vvm_i8m1, vmsne_vx_i8m1_b8, 8) -OPENCV_HAL_IMPL_RVV_SELECT(v_uint16x8, vmerge_vvm_u16m1, vmsne_vx_u16m1_b16, 16) -OPENCV_HAL_IMPL_RVV_SELECT(v_int16x8, vmerge_vvm_i16m1, vmsne_vx_i16m1_b16, 16) -OPENCV_HAL_IMPL_RVV_SELECT(v_uint32x4, vmerge_vvm_u32m1, vmsne_vx_u32m1_b32, 32) -OPENCV_HAL_IMPL_RVV_SELECT(v_int32x4, vmerge_vvm_i32m1, vmsne_vx_i32m1_b32, 32) -OPENCV_HAL_IMPL_RVV_SELECT(v_float32x4, vmerge_vvm_f32m1, vmfne_vf_f32m1_b32, 32) +OPENCV_HAL_IMPL_RVV_SELECT(v_uint8x16, vmerge_vvm_u8m1, vmsne_vx_u8m1_b8, 16) +OPENCV_HAL_IMPL_RVV_SELECT(v_int8x16, vmerge_vvm_i8m1, vmsne_vx_i8m1_b8, 16) +OPENCV_HAL_IMPL_RVV_SELECT(v_uint16x8, vmerge_vvm_u16m1, vmsne_vx_u16m1_b16, 8) +OPENCV_HAL_IMPL_RVV_SELECT(v_int16x8, vmerge_vvm_i16m1, vmsne_vx_i16m1_b16, 8) +OPENCV_HAL_IMPL_RVV_SELECT(v_uint32x4, vmerge_vvm_u32m1, vmsne_vx_u32m1_b32, 4) +OPENCV_HAL_IMPL_RVV_SELECT(v_int32x4, vmerge_vvm_i32m1, vmsne_vx_i32m1_b32, 4) +OPENCV_HAL_IMPL_RVV_SELECT(v_float32x4, vmerge_vvm_f32m1, vmfne_vf_f32m1_b32, 4) #if CV_SIMD128_64F -OPENCV_HAL_IMPL_RVV_SELECT(v_float64x2, vmerge_vvm_f64m1, vmfne_vf_f64m1_b64, 64) +OPENCV_HAL_IMPL_RVV_SELECT(v_float64x2, vmerge_vvm_f64m1, vmfne_vf_f64m1_b64, 2) #endif ////////////// Rotate shift ////////////// -#define OPENCV_HAL_IMPL_RVV_ROTATE_OP(_Tpvec, suffix, width) \ +#define OPENCV_HAL_IMPL_RVV_ROTATE_INTEGER(_Tpvec, suffix, vl) \ template inline _Tpvec v_rotate_right(const _Tpvec& a) \ { \ - vsetvlmax_e##width##m1(); \ - return _Tpvec(vslidedown_vx_##suffix##m1(vzero_##suffix##m1(), a, n)); \ + return _Tpvec(vslidedown_vx_##suffix##m1(vmv_v_x_##suffix##m1(0, vl), a, n, vl)); \ } \ template inline _Tpvec v_rotate_left(const _Tpvec& a) \ { \ - vsetvlmax_e##width##m1(); \ - return _Tpvec(vslideup_vx_##suffix##m1(vzero_##suffix##m1(), a, n)); \ + return _Tpvec(vslideup_vx_##suffix##m1(vmv_v_x_##suffix##m1(0, vl), a, n, vl)); \ } \ template<> inline _Tpvec v_rotate_left<0>(const _Tpvec& a) \ { return a; } \ template inline _Tpvec v_rotate_right(const _Tpvec& a, const _Tpvec& b) \ { \ - vsetvlmax_e##width##m1(); \ - return _Tpvec(vslideup_vx_##suffix##m1(vslidedown_vx_##suffix##m1(vzero_##suffix##m1(), a, n), b, _Tpvec::nlanes - n)); \ + return _Tpvec(vslideup_vx_##suffix##m1(vslidedown_vx_##suffix##m1(vmv_v_x_##suffix##m1(0, vl), a, n, vl), b, _Tpvec::nlanes - n, vl)); \ } \ template inline _Tpvec v_rotate_left(const _Tpvec& a, const _Tpvec& b) \ { \ - vsetvlmax_e##width##m1(); \ - return _Tpvec(vslideup_vx_##suffix##m1(vslidedown_vx_##suffix##m1(vzero_##suffix##m1(), b, _Tpvec::nlanes - n), a, n)); \ + return _Tpvec(vslideup_vx_##suffix##m1(vslidedown_vx_##suffix##m1(vmv_v_x_##suffix##m1(0, vl), b, _Tpvec::nlanes - n, vl), a, n, vl)); \ } \ template<> inline _Tpvec v_rotate_left<0>(const _Tpvec& a, const _Tpvec& b) \ { CV_UNUSED(b); return a; } +OPENCV_HAL_IMPL_RVV_ROTATE_INTEGER(v_uint8x16, u8, 16) +OPENCV_HAL_IMPL_RVV_ROTATE_INTEGER(v_int8x16, i8, 16) +OPENCV_HAL_IMPL_RVV_ROTATE_INTEGER(v_uint16x8, u16, 8) +OPENCV_HAL_IMPL_RVV_ROTATE_INTEGER(v_int16x8, i16, 8) +OPENCV_HAL_IMPL_RVV_ROTATE_INTEGER(v_uint32x4, u32, 4) +OPENCV_HAL_IMPL_RVV_ROTATE_INTEGER(v_int32x4, i32, 4) +OPENCV_HAL_IMPL_RVV_ROTATE_INTEGER(v_uint64x2, u64, 2) +OPENCV_HAL_IMPL_RVV_ROTATE_INTEGER(v_int64x2, i64, 2) + +#define OPENCV_HAL_IMPL_RVV_ROTATE_FP(_Tpvec, suffix, vl) \ +template inline _Tpvec v_rotate_right(const _Tpvec& a) \ +{ \ + return _Tpvec(vslidedown_vx_##suffix##m1(vfmv_v_f_##suffix##m1(0, vl), a, n, vl)); \ +} \ +template inline _Tpvec v_rotate_left(const _Tpvec& a) \ +{ \ + return _Tpvec(vslideup_vx_##suffix##m1(vfmv_v_f_##suffix##m1(0, vl), a, n, vl)); \ +} \ +template<> inline _Tpvec v_rotate_left<0>(const _Tpvec& a) \ +{ return a; } \ +template inline _Tpvec v_rotate_right(const _Tpvec& a, const _Tpvec& b) \ +{ \ + return _Tpvec(vslideup_vx_##suffix##m1(vslidedown_vx_##suffix##m1(vfmv_v_f_##suffix##m1(0, vl), a, n, vl), b, _Tpvec::nlanes - n, vl)); \ +} \ +template inline _Tpvec v_rotate_left(const _Tpvec& a, const _Tpvec& b) \ +{ \ + return _Tpvec(vslideup_vx_##suffix##m1(vslidedown_vx_##suffix##m1(vfmv_v_f_##suffix##m1(0, vl), b, _Tpvec::nlanes - n, vl), a, n, vl)); \ +} \ +template<> inline _Tpvec v_rotate_left<0>(const _Tpvec& a, const _Tpvec& b) \ +{ CV_UNUSED(b); return a; } -OPENCV_HAL_IMPL_RVV_ROTATE_OP(v_uint8x16, u8, 8) -OPENCV_HAL_IMPL_RVV_ROTATE_OP(v_int8x16, i8, 8) -OPENCV_HAL_IMPL_RVV_ROTATE_OP(v_uint16x8, u16, 16) -OPENCV_HAL_IMPL_RVV_ROTATE_OP(v_int16x8, i16, 16) -OPENCV_HAL_IMPL_RVV_ROTATE_OP(v_uint32x4, u32, 32) -OPENCV_HAL_IMPL_RVV_ROTATE_OP(v_int32x4, i32, 32) -OPENCV_HAL_IMPL_RVV_ROTATE_OP(v_float32x4, f32, 32) -OPENCV_HAL_IMPL_RVV_ROTATE_OP(v_uint64x2, u64, 64) -OPENCV_HAL_IMPL_RVV_ROTATE_OP(v_int64x2, i64, 64) +OPENCV_HAL_IMPL_RVV_ROTATE_FP(v_float32x4, f32, 4) #if CV_SIMD128_64F -OPENCV_HAL_IMPL_RVV_ROTATE_OP(v_float64x2, f64, 64) +OPENCV_HAL_IMPL_RVV_ROTATE_FP(v_float64x2, f64, 2) #endif ////////////// Convert to float ////////////// inline v_float32x4 v_cvt_f32(const v_int32x4& a) { - vsetvlmax_e32m1(); - return v_float32x4(vfcvt_f_x_v_f32m1(a)); + return v_float32x4(vfcvt_f_x_v_f32m1(a, 4)); } #if CV_SIMD128_64F inline v_float32x4 v_cvt_f32(const v_float64x2& a) { double arr[4] = {a.val[0], a.val[1], 0, 0}; - vsetvlmax_e64m2(); - vfloat64m2_t tmp = vle64_v_f64m2(arr); - vsetvlmax_e32m1(); - return v_float32x4(vfncvt_f_f_w_f32m1(tmp)); + vfloat64m2_t tmp = vle64_v_f64m2(arr, 4); + return v_float32x4(vfncvt_f_f_w_f32m1(tmp, 4)); } inline v_float32x4 v_cvt_f32(const v_float64x2& a, const v_float64x2& b) { double arr[4] = {a.val[0], a.val[1], b.val[0], b.val[1]}; - vsetvlmax_e64m2(); - vfloat64m2_t tmp = vle64_v_f64m2(arr); - vsetvlmax_e32m1(); - return v_float32x4(vfncvt_f_f_w_f32m1(tmp)); + vfloat64m2_t tmp = vle64_v_f64m2(arr, 4); + return v_float32x4(vfncvt_f_f_w_f32m1(tmp, 4)); } inline v_float64x2 v_cvt_f64(const v_int32x4& a) { double CV_DECL_ALIGNED(32) ptr[4] = {0}; - vsetvlmax_e64m2(); - vse64_v_f64m2(ptr, vfwcvt_f_x_v_f64m2(a)); + vse64_v_f64m2(ptr, vfwcvt_f_x_v_f64m2(a, 4), 4); double CV_DECL_ALIGNED(32) elems[2] = { ptr[0], ptr[1] }; - vsetvlmax_e64m1(); - return v_float64x2(vle64_v_f64m1(elems)); + return v_float64x2(vle64_v_f64m1(elems, 2)); } inline v_float64x2 v_cvt_f64_high(const v_int32x4& a) { double CV_DECL_ALIGNED(32) ptr[4] = {0}; - vsetvlmax_e64m2(); - vse64_v_f64m2(ptr, vfwcvt_f_x_v_f64m2(a)); + vse64_v_f64m2(ptr, vfwcvt_f_x_v_f64m2(a, 4), 4); double CV_DECL_ALIGNED(32) elems[2] = { ptr[2], ptr[3] }; - vsetvlmax_e64m1(); - return v_float64x2(vle64_v_f64m1(elems)); + return v_float64x2(vle64_v_f64m1(elems, 2)); } inline v_float64x2 v_cvt_f64(const v_float32x4& a) { double CV_DECL_ALIGNED(32) ptr[4] = {0}; - vsetvlmax_e64m2(); - vse64_v_f64m2(ptr, vfwcvt_f_f_v_f64m2(a)); + vse64_v_f64m2(ptr, vfwcvt_f_f_v_f64m2(a, 4), 4); double CV_DECL_ALIGNED(32) elems[2] = { ptr[0], ptr[1] }; - vsetvlmax_e64m1(); - return v_float64x2(vle64_v_f64m1(elems)); + return v_float64x2(vle64_v_f64m1(elems, 2)); } inline v_float64x2 v_cvt_f64_high(const v_float32x4& a) { double CV_DECL_ALIGNED(32) ptr[4] = {0}; - vsetvlmax_e64m2(); - vse64_v_f64m2(ptr, vfwcvt_f_f_v_f64m2(a)); + vse64_v_f64m2(ptr, vfwcvt_f_f_v_f64m2(a, 4), 4); double CV_DECL_ALIGNED(32) elems[2] = { ptr[2], ptr[3] }; - vsetvlmax_e64m1(); - return v_float64x2(vle64_v_f64m1(elems)); + return v_float64x2(vle64_v_f64m1(elems, 2)); } inline v_float64x2 v_cvt_f64(const v_int64x2& a) { - vsetvlmax_e64m1(); - return v_float64x2(vfcvt_f_x_v_f64m1(a)); + return v_float64x2(vfcvt_f_x_v_f64m1(a, 2)); } #endif @@ -1947,7 +1863,7 @@ OPENCV_HAL_IMPL_RVV_TRANSPOSE4x4(float32x4, float, f32) ////////////// Reverse ////////////// -#define OPENCV_HAL_IMPL_RVV_REVERSE(_Tpvec, _Tp, width, suffix) \ +#define OPENCV_HAL_IMPL_RVV_REVERSE(_Tpvec, _Tp, suffix) \ inline _Tpvec v_reverse(const _Tpvec& a) \ { \ _Tp CV_DECL_ALIGNED(32) ptr[_Tpvec::nlanes] = {0}; \ @@ -1960,84 +1876,80 @@ inline _Tpvec v_reverse(const _Tpvec& a) \ return v_load(ptr); \ } -OPENCV_HAL_IMPL_RVV_REVERSE(v_uint8x16, uchar, 8, u8) -OPENCV_HAL_IMPL_RVV_REVERSE(v_int8x16, schar, 8, i8) -OPENCV_HAL_IMPL_RVV_REVERSE(v_uint16x8, ushort, 16, u16) -OPENCV_HAL_IMPL_RVV_REVERSE(v_int16x8, short, 16, i16) -OPENCV_HAL_IMPL_RVV_REVERSE(v_uint32x4, unsigned, 32, u32) -OPENCV_HAL_IMPL_RVV_REVERSE(v_int32x4, int, 32, i32) -OPENCV_HAL_IMPL_RVV_REVERSE(v_float32x4, float, 32, f32) -OPENCV_HAL_IMPL_RVV_REVERSE(v_uint64x2, uint64, 64, u64) -OPENCV_HAL_IMPL_RVV_REVERSE(v_int64x2, int64, 64, i64) +OPENCV_HAL_IMPL_RVV_REVERSE(v_uint8x16, uchar, u8) +OPENCV_HAL_IMPL_RVV_REVERSE(v_int8x16, schar, i8) +OPENCV_HAL_IMPL_RVV_REVERSE(v_uint16x8, ushort, u16) +OPENCV_HAL_IMPL_RVV_REVERSE(v_int16x8, short, i16) +OPENCV_HAL_IMPL_RVV_REVERSE(v_uint32x4, unsigned, u32) +OPENCV_HAL_IMPL_RVV_REVERSE(v_int32x4, int, i32) +OPENCV_HAL_IMPL_RVV_REVERSE(v_float32x4, float, f32) +OPENCV_HAL_IMPL_RVV_REVERSE(v_uint64x2, uint64, u64) +OPENCV_HAL_IMPL_RVV_REVERSE(v_int64x2, int64, i64) #if CV_SIMD128_64F -OPENCV_HAL_IMPL_RVV_REVERSE(v_float64x2, double, 64, f64) +OPENCV_HAL_IMPL_RVV_REVERSE(v_float64x2, double, f64) #endif //////////// Value reordering //////////// -#define OPENCV_HAL_IMPL_RVV_EXPAND(_Tpwvec, _Tp, _Tpvec, width, suffix, wcvt) \ +#define OPENCV_HAL_IMPL_RVV_EXPAND(_Tpwvec, _Tp, _Tpvec, width, suffix, wcvt, vl) \ inline void v_expand(const _Tpvec& a, _Tpwvec& b0, _Tpwvec& b1) \ { \ _Tp CV_DECL_ALIGNED(32) lptr[_Tpvec::nlanes/2] = {0}; \ _Tp CV_DECL_ALIGNED(32) hptr[_Tpvec::nlanes/2] = {0}; \ v_store_low(lptr, a); \ v_store_high(hptr, a); \ - b0 = _Tpwvec(wcvt(vle##width##_v_##suffix##mf2(lptr))); \ - b1 = _Tpwvec(wcvt(vle##width##_v_##suffix##mf2(hptr))); \ + b0 = _Tpwvec(wcvt(vle##width##_v_##suffix##mf2(lptr, vl), vl)); \ + b1 = _Tpwvec(wcvt(vle##width##_v_##suffix##mf2(hptr, vl), vl)); \ } \ inline _Tpwvec v_expand_low(const _Tpvec& a) \ { \ _Tp CV_DECL_ALIGNED(32) lptr[_Tpvec::nlanes/2] = {0}; \ v_store_low(lptr, a); \ - return _Tpwvec(wcvt(vle##width##_v_##suffix##mf2(lptr))); \ + return _Tpwvec(wcvt(vle##width##_v_##suffix##mf2(lptr, vl), vl)); \ } \ inline _Tpwvec v_expand_high(const _Tpvec& a) \ { \ _Tp CV_DECL_ALIGNED(32) hptr[_Tpvec::nlanes/2] = {0}; \ v_store_high(hptr, a); \ - return _Tpwvec(wcvt(vle##width##_v_##suffix##mf2(hptr))); \ + return _Tpwvec(wcvt(vle##width##_v_##suffix##mf2(hptr, vl), vl)); \ } \ inline _Tpwvec v_load_expand(const _Tp* ptr) \ { \ - return _Tpwvec(wcvt(vle##width##_v_##suffix##mf2(ptr))); \ + return _Tpwvec(wcvt(vle##width##_v_##suffix##mf2(ptr, vl), vl)); \ } -OPENCV_HAL_IMPL_RVV_EXPAND(v_uint16x8, uchar, v_uint8x16, 8, u8, vwcvtu_x_x_v_u16m1) -OPENCV_HAL_IMPL_RVV_EXPAND(v_int16x8, schar, v_int8x16, 8, i8, vwcvt_x_x_v_i16m1) -OPENCV_HAL_IMPL_RVV_EXPAND(v_uint32x4, ushort, v_uint16x8, 16, u16, vwcvtu_x_x_v_u32m1) -OPENCV_HAL_IMPL_RVV_EXPAND(v_int32x4, short, v_int16x8, 16, i16, vwcvt_x_x_v_i32m1) -OPENCV_HAL_IMPL_RVV_EXPAND(v_uint64x2, uint, v_uint32x4, 32, u32, vwcvtu_x_x_v_u64m1) -OPENCV_HAL_IMPL_RVV_EXPAND(v_int64x2, int, v_int32x4, 32, i32, vwcvt_x_x_v_i64m1) +OPENCV_HAL_IMPL_RVV_EXPAND(v_uint16x8, uchar, v_uint8x16, 8, u8, vwcvtu_x_x_v_u16m1, 8) +OPENCV_HAL_IMPL_RVV_EXPAND(v_int16x8, schar, v_int8x16, 8, i8, vwcvt_x_x_v_i16m1, 8) +OPENCV_HAL_IMPL_RVV_EXPAND(v_uint32x4, ushort, v_uint16x8, 16, u16, vwcvtu_x_x_v_u32m1, 4) +OPENCV_HAL_IMPL_RVV_EXPAND(v_int32x4, short, v_int16x8, 16, i16, vwcvt_x_x_v_i32m1, 4) +OPENCV_HAL_IMPL_RVV_EXPAND(v_uint64x2, uint, v_uint32x4, 32, u32, vwcvtu_x_x_v_u64m1, 2) +OPENCV_HAL_IMPL_RVV_EXPAND(v_int64x2, int, v_int32x4, 32, i32, vwcvt_x_x_v_i64m1, 2) inline v_uint32x4 v_load_expand_q(const uchar* ptr) { - vsetvlmax_e32m1(); - return v_uint32x4(vwcvtu_x_x_v_u32m1(vwcvtu_x_x_v_u16mf2(vle8_v_u8mf4(ptr)))); + return v_uint32x4(vwcvtu_x_x_v_u32m1(vwcvtu_x_x_v_u16mf2(vle8_v_u8mf4(ptr, 4), 4), 4)); } inline v_int32x4 v_load_expand_q(const schar* ptr) { - vsetvlmax_e32m1(); - return v_int32x4(vwcvt_x_x_v_i32m1(vwcvt_x_x_v_i16mf2(vle8_v_i8mf4(ptr)))); + return v_int32x4(vwcvt_x_x_v_i32m1(vwcvt_x_x_v_i16mf2(vle8_v_i8mf4(ptr, 4), 4), 4)); } -#define OPENCV_HAL_IMPL_RVV_PACK(_Tpvec, _Tp, _wTpvec, _wTp, width, suffix, rshr, shr) \ +#define OPENCV_HAL_IMPL_RVV_PACK(_Tpvec, _Tp, _wTpvec, _wTp, width, suffix, rshr, shr, hvl, vl) \ inline _Tpvec v_pack(const _wTpvec& a, const _wTpvec& b) \ { \ _wTp CV_DECL_ALIGNED(32) arr[_Tpvec::nlanes] = {0}; \ v_store(arr, a); \ v_store(arr + _wTpvec::nlanes, b); \ - vsetvlmax_e##width##m2(); \ - return _Tpvec(shr(vle##width##_v_##suffix##m2(arr), 0)); \ + return _Tpvec(shr(vle##width##_v_##suffix##m2(arr, vl), 0, vl)); \ } \ inline void v_pack_store(_Tp* ptr, const _wTpvec& a) \ { \ _wTp CV_DECL_ALIGNED(32) arr[_Tpvec::nlanes] = {0}; \ v_store(arr, a); \ - v_store(arr + _wTpvec::nlanes, _wTpvec(vzero_##suffix##m1())); \ - vsetvlmax_e##width##m2(); \ - v_store(ptr, _Tpvec(shr(vle##width##_v_##suffix##m2(arr), 0))); \ + v_store(arr + _wTpvec::nlanes, _wTpvec(vmv_v_x_##suffix##m1(0, hvl))); \ + v_store(ptr, _Tpvec(shr(vle##width##_v_##suffix##m2(arr, vl), 0, vl))); \ } \ template inline \ _Tpvec v_rshr_pack(const _wTpvec& a, const _wTpvec& b) \ @@ -2045,43 +1957,39 @@ _Tpvec v_rshr_pack(const _wTpvec& a, const _wTpvec& b) \ _wTp CV_DECL_ALIGNED(32) arr[_Tpvec::nlanes] = {0}; \ v_store(arr, a); \ v_store(arr + _wTpvec::nlanes, b); \ - vsetvlmax_e##width##m2(); \ - return _Tpvec(rshr(vle##width##_v_##suffix##m2(arr), n)); \ + return _Tpvec(rshr(vle##width##_v_##suffix##m2(arr, vl), n, vl)); \ } \ template inline \ void v_rshr_pack_store(_Tp* ptr, const _wTpvec& a) \ { \ _wTp CV_DECL_ALIGNED(32) arr[_Tpvec::nlanes] = {0}; \ v_store(arr, a); \ - v_store(arr + _wTpvec::nlanes, _wTpvec(vzero_##suffix##m1())); \ - vsetvlmax_e##width##m2(); \ - v_store(ptr, _Tpvec(rshr(vle##width##_v_##suffix##m2(arr), n))); \ + v_store(arr + _wTpvec::nlanes, _wTpvec(vmv_v_x_##suffix##m1(0, hvl))); \ + v_store(ptr, _Tpvec(rshr(vle##width##_v_##suffix##m2(arr, vl), n, vl))); \ } -OPENCV_HAL_IMPL_RVV_PACK(v_uint8x16, uchar, v_uint16x8, ushort, 16, u16, vnclipu_wx_u8m1, vnclipu_wx_u8m1) -OPENCV_HAL_IMPL_RVV_PACK(v_int8x16, schar, v_int16x8, short, 16, i16, vnclip_wx_i8m1, vnclip_wx_i8m1) -OPENCV_HAL_IMPL_RVV_PACK(v_uint16x8, ushort, v_uint32x4, unsigned, 32, u32, vnclipu_wx_u16m1, vnclipu_wx_u16m1) -OPENCV_HAL_IMPL_RVV_PACK(v_int16x8, short, v_int32x4, int, 32, i32, vnclip_wx_i16m1, vnclip_wx_i16m1) -OPENCV_HAL_IMPL_RVV_PACK(v_uint32x4, unsigned, v_uint64x2, uint64, 64, u64, vnclipu_wx_u32m1, vnsrl_wx_u32m1) -OPENCV_HAL_IMPL_RVV_PACK(v_int32x4, int, v_int64x2, int64, 64, i64, vnclip_wx_i32m1, vnsra_wx_i32m1) +OPENCV_HAL_IMPL_RVV_PACK(v_uint8x16, uchar, v_uint16x8, ushort, 16, u16, vnclipu_wx_u8m1, vnclipu_wx_u8m1, 8, 16) +OPENCV_HAL_IMPL_RVV_PACK(v_int8x16, schar, v_int16x8, short, 16, i16, vnclip_wx_i8m1, vnclip_wx_i8m1, 8, 16) +OPENCV_HAL_IMPL_RVV_PACK(v_uint16x8, ushort, v_uint32x4, unsigned, 32, u32, vnclipu_wx_u16m1, vnclipu_wx_u16m1, 4, 8) +OPENCV_HAL_IMPL_RVV_PACK(v_int16x8, short, v_int32x4, int, 32, i32, vnclip_wx_i16m1, vnclip_wx_i16m1, 4, 8) +OPENCV_HAL_IMPL_RVV_PACK(v_uint32x4, unsigned, v_uint64x2, uint64, 64, u64, vnclipu_wx_u32m1, vnsrl_wx_u32m1, 2, 4) +OPENCV_HAL_IMPL_RVV_PACK(v_int32x4, int, v_int64x2, int64, 64, i64, vnclip_wx_i32m1, vnsra_wx_i32m1, 2, 4) -#define OPENCV_HAL_IMPL_RVV_PACK_U(_Tpvec, _Tp, _wTpvec, _wTp, width, suffix, rshr, cast) \ +#define OPENCV_HAL_IMPL_RVV_PACK_U(_Tpvec, _Tp, _wTpvec, _wTp, width, suffix, rshr, cast, vl) \ inline _Tpvec v_pack_u(const _wTpvec& a, const _wTpvec& b) \ { \ _wTp CV_DECL_ALIGNED(32) arr[_Tpvec::nlanes] = {0}; \ v_store(arr, a); \ v_store(arr + _wTpvec::nlanes, b); \ - vsetvlmax_e##width##m2(); \ - return _Tpvec(rshr(cast(vmax_vx_##suffix##m2(vle##width##_v_##suffix##m2(arr), 0)), 0)); \ + return _Tpvec(rshr(cast(vmax_vx_##suffix##m2(vle##width##_v_##suffix##m2(arr, vl), 0, vl)), 0, vl)); \ } \ inline void v_pack_u_store(_Tp* ptr, const _wTpvec& a) \ { \ _wTp CV_DECL_ALIGNED(32) arr[_Tpvec::nlanes] = {0}; \ v_store(arr, a); \ - v_store(arr + _wTpvec::nlanes, _wTpvec(vzero_##suffix##m1())); \ - vsetvlmax_e##width##m2(); \ - v_store(ptr, _Tpvec(rshr(cast(vmax_vx_##suffix##m2(vle##width##_v_##suffix##m2(arr), 0)), 0))); \ + v_store(arr + _wTpvec::nlanes, _wTpvec(vmv_v_x_##suffix##m1(0, vl))); \ + v_store(ptr, _Tpvec(rshr(cast(vmax_vx_##suffix##m2(vle##width##_v_##suffix##m2(arr, vl), 0, vl)), 0, vl))); \ } \ template inline \ _Tpvec v_rshr_pack_u(const _wTpvec& a, const _wTpvec& b) \ @@ -2089,24 +1997,22 @@ _Tpvec v_rshr_pack_u(const _wTpvec& a, const _wTpvec& b) \ _wTp CV_DECL_ALIGNED(32) arr[_Tpvec::nlanes] = {0}; \ v_store(arr, a); \ v_store(arr + _wTpvec::nlanes, b); \ - vsetvlmax_e##width##m2(); \ - return _Tpvec(rshr(cast(vmax_vx_##suffix##m2(vle##width##_v_##suffix##m2(arr), 0)), n)); \ + return _Tpvec(rshr(cast(vmax_vx_##suffix##m2(vle##width##_v_##suffix##m2(arr, vl), 0, vl)), n, vl)); \ } \ template inline \ void v_rshr_pack_u_store(_Tp* ptr, const _wTpvec& a) \ { \ _wTp CV_DECL_ALIGNED(32) arr[_Tpvec::nlanes] = {0}; \ v_store(arr, a); \ - v_store(arr + _wTpvec::nlanes, _wTpvec(vzero_##suffix##m1())); \ - vsetvlmax_e##width##m2(); \ - v_store(ptr, _Tpvec(rshr(cast(vmax_vx_##suffix##m2(vle##width##_v_##suffix##m2(arr), 0)), n))); \ + v_store(arr + _wTpvec::nlanes, _wTpvec(vmv_v_x_##suffix##m1(0, vl))); \ + v_store(ptr, _Tpvec(rshr(cast(vmax_vx_##suffix##m2(vle##width##_v_##suffix##m2(arr, vl), 0, vl)), n, vl))); \ } -OPENCV_HAL_IMPL_RVV_PACK_U(v_uint8x16, uchar, v_int16x8, short, 16, i16, vnclipu_wx_u8m1, vreinterpret_v_i16m2_u16m2) -OPENCV_HAL_IMPL_RVV_PACK_U(v_uint16x8, ushort, v_int32x4, int, 32, i32, vnclipu_wx_u16m1, vreinterpret_v_i32m2_u32m2) +OPENCV_HAL_IMPL_RVV_PACK_U(v_uint8x16, uchar, v_int16x8, short, 16, i16, vnclipu_wx_u8m1, vreinterpret_v_i16m2_u16m2, 16) +OPENCV_HAL_IMPL_RVV_PACK_U(v_uint16x8, ushort, v_int32x4, int, 32, i32, vnclipu_wx_u16m1, vreinterpret_v_i32m2_u32m2, 8) -#define OPENCV_HAL_IMPL_RVV_UNPACKS(_Tpvec, _Tp, width, suffix) \ +#define OPENCV_HAL_IMPL_RVV_UNPACKS(_Tpvec, _Tp, suffix) \ inline void v_zip(const v_##_Tpvec& a0, const v_##_Tpvec& a1, v_##_Tpvec& b0, v_##_Tpvec& b1) \ { \ _Tp CV_DECL_ALIGNED(32) ptra0[v_##_Tpvec::nlanes] = {0}; \ @@ -2151,19 +2057,19 @@ inline void v_recombine(const v_##_Tpvec& a, const v_##_Tpvec& b, v_##_Tpvec& c, d = v_combine_high(a, b); \ } -OPENCV_HAL_IMPL_RVV_UNPACKS(uint8x16, uchar, 8, u8) -OPENCV_HAL_IMPL_RVV_UNPACKS(int8x16, schar, 8, i8) -OPENCV_HAL_IMPL_RVV_UNPACKS(uint16x8, ushort, 16, u16) -OPENCV_HAL_IMPL_RVV_UNPACKS(int16x8, short, 16, i16) -OPENCV_HAL_IMPL_RVV_UNPACKS(uint32x4, unsigned, 32, u32) -OPENCV_HAL_IMPL_RVV_UNPACKS(int32x4, int, 32, i32) -OPENCV_HAL_IMPL_RVV_UNPACKS(float32x4, float, 32, f32) +OPENCV_HAL_IMPL_RVV_UNPACKS(uint8x16, uchar, u8) +OPENCV_HAL_IMPL_RVV_UNPACKS(int8x16, schar, i8) +OPENCV_HAL_IMPL_RVV_UNPACKS(uint16x8, ushort, u16) +OPENCV_HAL_IMPL_RVV_UNPACKS(int16x8, short, i16) +OPENCV_HAL_IMPL_RVV_UNPACKS(uint32x4, unsigned, u32) +OPENCV_HAL_IMPL_RVV_UNPACKS(int32x4, int, i32) +OPENCV_HAL_IMPL_RVV_UNPACKS(float32x4, float, f32) #if CV_SIMD128_64F -OPENCV_HAL_IMPL_RVV_UNPACKS(float64x2, double, 64, f64) +OPENCV_HAL_IMPL_RVV_UNPACKS(float64x2, double, f64) #endif -#define OPENCV_HAL_IMPL_RVV_INTERLEAVED(_Tpvec, _Tp, suffix, width) \ +#define OPENCV_HAL_IMPL_RVV_INTERLEAVED(_Tpvec, _Tp) \ inline void v_load_deinterleave(const _Tp* ptr, v_##_Tpvec& a, v_##_Tpvec& b) \ { \ _Tp CV_DECL_ALIGNED(32) ptra[v_##_Tpvec::nlanes] = {0}; \ @@ -2298,17 +2204,17 @@ inline v_##_Tpvec v_interleave_quads(const v_##_Tpvec& vec) \ return v_load(ptr); \ } -OPENCV_HAL_IMPL_RVV_INTERLEAVED(uint8x16, uchar, u8, 8) -OPENCV_HAL_IMPL_RVV_INTERLEAVED(int8x16, schar, i8, 8) -OPENCV_HAL_IMPL_RVV_INTERLEAVED(uint16x8, ushort, u16, 16) -OPENCV_HAL_IMPL_RVV_INTERLEAVED(int16x8, short, i16, 16) -OPENCV_HAL_IMPL_RVV_INTERLEAVED(uint32x4, unsigned, u32, 32) -OPENCV_HAL_IMPL_RVV_INTERLEAVED(int32x4, int, i32, 32) -OPENCV_HAL_IMPL_RVV_INTERLEAVED(float32x4, float, f32, 32) -OPENCV_HAL_IMPL_RVV_INTERLEAVED(uint64x2, uint64, u64, 64) -OPENCV_HAL_IMPL_RVV_INTERLEAVED(int64x2, int64, i64, 64) +OPENCV_HAL_IMPL_RVV_INTERLEAVED(uint8x16, uchar) +OPENCV_HAL_IMPL_RVV_INTERLEAVED(int8x16, schar) +OPENCV_HAL_IMPL_RVV_INTERLEAVED(uint16x8, ushort) +OPENCV_HAL_IMPL_RVV_INTERLEAVED(int16x8, short) +OPENCV_HAL_IMPL_RVV_INTERLEAVED(uint32x4, unsigned) +OPENCV_HAL_IMPL_RVV_INTERLEAVED(int32x4, int) +OPENCV_HAL_IMPL_RVV_INTERLEAVED(float32x4, float) +OPENCV_HAL_IMPL_RVV_INTERLEAVED(uint64x2, uint64) +OPENCV_HAL_IMPL_RVV_INTERLEAVED(int64x2, int64) #if CV_SIMD128_64F -OPENCV_HAL_IMPL_RVV_INTERLEAVED(float64x2, double, f64, 64) +OPENCV_HAL_IMPL_RVV_INTERLEAVED(float64x2, double) #endif //////////// PopCount //////////// @@ -2356,21 +2262,20 @@ OPENCV_HAL_IMPL_RVV_POPCOUNT_OP(v_uint64x2, v_int64x2, uint64, int64, u64) //////////// SignMask //////////// -#define OPENCV_HAL_IMPL_RVV_SIGNMASK_OP(_Tpvec, _Tp, suffix, width, shift) \ +#define OPENCV_HAL_IMPL_RVV_SIGNMASK_OP(_Tpvec, _Tp, suffix, vl, shift) \ inline int v_signmask(const _Tpvec& a) \ { \ int mask = 0; \ - vsetvlmax_e##width##m1(); \ - _Tpvec tmp = _Tpvec(vsrl_vx_##suffix##m1(a, shift)); \ + _Tpvec tmp = _Tpvec(vsrl_vx_##suffix##m1(a, shift, vl)); \ for( int i = 0; i < _Tpvec::nlanes; i++ ) \ mask |= (int)(tmp.val[i]) << i; \ return mask; \ } -OPENCV_HAL_IMPL_RVV_SIGNMASK_OP(v_uint8x16, uchar, u8, 8, 7) -OPENCV_HAL_IMPL_RVV_SIGNMASK_OP(v_uint16x8, ushort, u16, 16, 15) -OPENCV_HAL_IMPL_RVV_SIGNMASK_OP(v_uint32x4, unsigned, u32, 32, 31) -OPENCV_HAL_IMPL_RVV_SIGNMASK_OP(v_uint64x2, uint64, u64, 64, 63) +OPENCV_HAL_IMPL_RVV_SIGNMASK_OP(v_uint8x16, uchar, u8, 16, 7) +OPENCV_HAL_IMPL_RVV_SIGNMASK_OP(v_uint16x8, ushort, u16, 8, 15) +OPENCV_HAL_IMPL_RVV_SIGNMASK_OP(v_uint32x4, unsigned, u32, 4, 31) +OPENCV_HAL_IMPL_RVV_SIGNMASK_OP(v_uint64x2, uint64, u64, 2, 63) inline int v_signmask(const v_int8x16& a) { return v_signmask(v_reinterpret_as_u8(a)); } @@ -2445,12 +2350,12 @@ OPENCV_HAL_IMPL_RVV_PACK_TRIPLETS(v_float32x4, float) #if CV_FP16 inline v_float32x4 v_load_expand(const float16_t* ptr) { - return v_float32x4(vfwcvt_f_f_v_f32m1(vle16_v_f16mf2(ptr))); + return v_float32x4(vfwcvt_f_f_v_f32m1(vle16_v_f16mf2(ptr, 4), 4)); } inline void v_pack_store(float16_t* ptr, const v_float32x4& v) { - vse16_v_f16mf2(ptr, vfncvt_f_f_w_f16mf2(v)); + vse16_v_f16mf2(ptr, vfncvt_f_f_w_f16mf2(v, 4), 4); } #else inline v_float32x4 v_load_expand(const float16_t* ptr) @@ -2474,70 +2379,61 @@ inline void v_pack_store(float16_t* ptr, const v_float32x4& v) inline v_int32x4 v_round(const v_float32x4& a) { - vsetvlmax_e32m1(); - return v_int32x4(vfcvt_x_f_v_i32m1(a)); + return v_int32x4(vfcvt_x_f_v_i32m1(a, 4)); } inline v_int32x4 v_floor(const v_float32x4& a) { v_float32x4 ZP5 = v_setall_f32(0.5f); v_float32x4 t = a - ZP5; - vsetvlmax_e32m1(); - return v_int32x4(vfcvt_x_f_v_i32m1(t)); + return v_int32x4(vfcvt_x_f_v_i32m1(t, 4)); } inline v_int32x4 v_ceil(const v_float32x4& a) { v_float32x4 ZP5 = v_setall_f32(0.5f); v_float32x4 t = a + ZP5; - vsetvlmax_e32m1(); - return v_int32x4(vfcvt_x_f_v_i32m1(t)); + return v_int32x4(vfcvt_x_f_v_i32m1(t, 4)); } inline v_int32x4 v_trunc(const v_float32x4& a) { - vsetvlmax_e32m1(); - return v_int32x4(vfcvt_rtz_x_f_v_i32m1(a)); + return v_int32x4(vfcvt_rtz_x_f_v_i32m1(a, 4)); } #if CV_SIMD128_64F inline v_int32x4 v_round(const v_float64x2& a) { double arr[4] = {a.val[0], a.val[1], 0, 0}; - vsetvlmax_e64m2(); - vfloat64m2_t tmp = vle64_v_f64m2(arr); - return v_int32x4(vfncvt_x_f_w_i32m1(tmp)); + vfloat64m2_t tmp = vle64_v_f64m2(arr, 4); + return v_int32x4(vfncvt_x_f_w_i32m1(tmp, 4)); } inline v_int32x4 v_round(const v_float64x2& a, const v_float64x2& b) { double arr[4] = {a.val[0], a.val[1], b.val[0], b.val[1]}; - vsetvlmax_e64m2(); - vfloat64m2_t tmp = vle64_v_f64m2(arr); - return v_int32x4(vfncvt_x_f_w_i32m1(tmp)); + vfloat64m2_t tmp = vle64_v_f64m2(arr, 4); + return v_int32x4(vfncvt_x_f_w_i32m1(tmp, 4)); } inline v_int32x4 v_floor(const v_float64x2& a) { double arr[4] = {a.val[0]-0.5f, a.val[1]-0.5f, 0, 0}; - vsetvlmax_e64m2(); - vfloat64m2_t tmp = vle64_v_f64m2(arr); - return v_int32x4(vfncvt_x_f_w_i32m1(tmp)); + vfloat64m2_t tmp = vle64_v_f64m2(arr, 4); + return v_int32x4(vfncvt_x_f_w_i32m1(tmp, 4)); } inline v_int32x4 v_ceil(const v_float64x2& a) { double arr[4] = {a.val[0]+0.5f, a.val[1]+0.5f, 0, 0}; - vsetvlmax_e64m2(); - vfloat64m2_t tmp = vle64_v_f64m2(arr); - return v_int32x4(vfncvt_x_f_w_i32m1(tmp)); + vfloat64m2_t tmp = vle64_v_f64m2(arr, 4); + return v_int32x4(vfncvt_x_f_w_i32m1(tmp, 4)); } inline v_int32x4 v_trunc(const v_float64x2& a) { double arr[4] = {a.val[0], a.val[1], 0, 0}; - vsetvlmax_e64m2(); - vfloat64m2_t tmp = vle64_v_f64m2(arr); - return v_int32x4(vfncvt_rtz_x_f_w_i32m1(tmp)); + vfloat64m2_t tmp = vle64_v_f64m2(arr, 4); + return v_int32x4(vfncvt_rtz_x_f_w_i32m1(tmp, 4)); } #endif @@ -2549,8 +2445,7 @@ inline v_int32x4 v_dotprod(const v_int16x8& a, const v_int16x8& b) { int CV_DECL_ALIGNED(32) ptr[8] = {0}; v_int32x4 t1, t2; - vsetvlmax_e32m2(); - vse32_v_i32m2(ptr, vwmul_vv_i32m2(a, b)); + vse32_v_i32m2(ptr, vwmul_vv_i32m2(a, b, 8), 8); v_load_deinterleave(ptr, t1, t2); return t1 + t2; } @@ -2558,8 +2453,7 @@ inline v_int32x4 v_dotprod(const v_int16x8& a, const v_int16x8& b, const v_int32 { int CV_DECL_ALIGNED(32) ptr[8] = {0}; v_int32x4 t1, t2; - vsetvlmax_e32m2(); - vse32_v_i32m2(ptr, vwmul_vv_i32m2(a, b)); + vse32_v_i32m2(ptr, vwmul_vv_i32m2(a, b, 8), 8); v_load_deinterleave(ptr, t1, t2); return t1 + t2 + c; } @@ -2569,8 +2463,7 @@ inline v_int64x2 v_dotprod(const v_int32x4& a, const v_int32x4& b) { int64 CV_DECL_ALIGNED(32) ptr[4] = {0}; v_int64x2 t1, t2; - vsetvlmax_e64m2(); - vse64_v_i64m2(ptr, vwmul_vv_i64m2(a, b)); + vse64_v_i64m2(ptr, vwmul_vv_i64m2(a, b, 4), 4); v_load_deinterleave(ptr, t1, t2); return t1 + t2; } @@ -2578,8 +2471,7 @@ inline v_int64x2 v_dotprod(const v_int32x4& a, const v_int32x4& b, const v_int64 { int64 CV_DECL_ALIGNED(32) ptr[4] = {0}; v_int64x2 t1, t2; - vsetvlmax_e64m2(); - vse64_v_i64m2(ptr, vwmul_vv_i64m2(a, b)); + vse64_v_i64m2(ptr, vwmul_vv_i64m2(a, b, 4), 4); v_load_deinterleave(ptr, t1, t2); return t1 + t2 + c; } @@ -2589,8 +2481,7 @@ inline v_uint32x4 v_dotprod_expand(const v_uint8x16& a, const v_uint8x16& b) { unsigned CV_DECL_ALIGNED(32) ptr[16] = {0}; v_uint32x4 t1, t2, t3, t4; - vsetvlmax_e32m4(); - vse32_v_u32m4(ptr, vqmaccu_vv_u32m4(vzero_u32m4(), a, b)); + vse32_v_u32m4(ptr, vwcvtu_x_x_v_u32m4(vwmulu_vv_u16m2(a, b, 16), 16), 16); v_load_deinterleave(ptr, t1, t2, t3, t4); return t1 + t2 + t3 + t4; } @@ -2599,8 +2490,7 @@ inline v_uint32x4 v_dotprod_expand(const v_uint8x16& a, const v_uint8x16& b, { unsigned CV_DECL_ALIGNED(32) ptr[16] = {0}; v_uint32x4 t1, t2, t3, t4; - vsetvlmax_e32m4(); - vse32_v_u32m4(ptr, vqmaccu_vv_u32m4(vzero_u32m4(), a, b)); + vse32_v_u32m4(ptr, vwcvtu_x_x_v_u32m4(vwmulu_vv_u16m2(a, b, 16), 16), 16); v_load_deinterleave(ptr, t1, t2, t3, t4); return t1 + t2 + t3 + t4 + c; } @@ -2609,8 +2499,7 @@ inline v_int32x4 v_dotprod_expand(const v_int8x16& a, const v_int8x16& b) { int CV_DECL_ALIGNED(32) ptr[16] = {0}; v_int32x4 t1, t2, t3, t4; - vsetvlmax_e32m4(); - vse32_v_i32m4(ptr, vqmacc_vv_i32m4(vzero_i32m4(), a, b)); + vse32_v_i32m4(ptr, vwcvt_x_x_v_i32m4(vwmul_vv_i16m2(a, b, 16), 16), 16); v_load_deinterleave(ptr, t1, t2, t3, t4); return t1 + t2 + t3 + t4; } @@ -2619,8 +2508,7 @@ inline v_int32x4 v_dotprod_expand(const v_int8x16& a, const v_int8x16& b, { int CV_DECL_ALIGNED(32) ptr[16] = {0}; v_int32x4 t1, t2, t3, t4; - vsetvlmax_e32m4(); - vse32_v_i32m4(ptr, vqmacc_vv_i32m4(vzero_i32m4(), a, b)); + vse32_v_i32m4(ptr, vwcvt_x_x_v_i32m4(vwmul_vv_i16m2(a, b, 16), 16), 16); v_load_deinterleave(ptr, t1, t2, t3, t4); return t1 + t2 + t3 + t4 + c; } @@ -2630,8 +2518,7 @@ inline v_uint64x2 v_dotprod_expand(const v_uint16x8& a, const v_uint16x8& b) { uint64 CV_DECL_ALIGNED(32) ptr[8] = {0}; v_uint64x2 t1, t2, t3, t4; - vsetvlmax_e64m4(); - vse64_v_u64m4(ptr, vqmaccu_vv_u64m4(vzero_u64m4(), a, b)); + vse64_v_u64m4(ptr, vwcvtu_x_x_v_u64m4(vwmulu_vv_u32m2(a, b, 8), 8), 8); v_load_deinterleave(ptr, t1, t2, t3, t4); return t1 + t2 + t3 + t4; } @@ -2639,8 +2526,7 @@ inline v_uint64x2 v_dotprod_expand(const v_uint16x8& a, const v_uint16x8& b, con { uint64 CV_DECL_ALIGNED(32) ptr[8] = {0}; v_uint64x2 t1, t2, t3, t4; - vsetvlmax_e64m4(); - vse64_v_u64m4(ptr, vqmaccu_vv_u64m4(vzero_u64m4(), a, b)); + vse64_v_u64m4(ptr, vwcvtu_x_x_v_u64m4(vwmulu_vv_u32m2(a, b, 8), 8), 8); v_load_deinterleave(ptr, t1, t2, t3, t4); return t1 + t2 + t3 + t4 + c; } @@ -2649,8 +2535,7 @@ inline v_int64x2 v_dotprod_expand(const v_int16x8& a, const v_int16x8& b) { int64 CV_DECL_ALIGNED(32) ptr[8] = {0}; v_int64x2 t1, t2, t3, t4; - vsetvlmax_e64m4(); - vse64_v_i64m4(ptr, vqmacc_vv_i64m4(vzero_i64m4(), a, b)); + vse64_v_i64m4(ptr, vwcvt_x_x_v_i64m4(vwmul_vv_i32m2(a, b, 8), 8), 8); v_load_deinterleave(ptr, t1, t2, t3, t4); return t1 + t2 + t3 + t4; } @@ -2659,8 +2544,7 @@ inline v_int64x2 v_dotprod_expand(const v_int16x8& a, const v_int16x8& b, { int64 CV_DECL_ALIGNED(32) ptr[8] = {0}; v_int64x2 t1, t2, t3, t4; - vsetvlmax_e64m4(); - vse64_v_i64m4(ptr, vqmacc_vv_i64m4(vzero_i64m4(), a, b)); + vse64_v_i64m4(ptr, vwcvt_x_x_v_i64m4(vwmul_vv_i32m2(a, b, 8), 8), 8); v_load_deinterleave(ptr, t1, t2, t3, t4); return t1 + t2 + t3 + t4 + c; } @@ -2680,8 +2564,7 @@ inline v_float64x2 v_dotprod_expand(const v_int32x4& a, const v_int32x4& b, inline v_int32x4 v_dotprod_fast(const v_int16x8& a, const v_int16x8& b) { int CV_DECL_ALIGNED(32) ptr[8] = {0}; - vsetvlmax_e32m2(); - vse32_v_i32m2(ptr, vwmul_vv_i32m2(a, b)); + vse32_v_i32m2(ptr, vwmul_vv_i32m2(a, b, 8), 8); v_int32x4 t1 = v_load(ptr); v_int32x4 t2 = v_load(ptr+4); return t1 + t2; @@ -2689,8 +2572,7 @@ inline v_int32x4 v_dotprod_fast(const v_int16x8& a, const v_int16x8& b) inline v_int32x4 v_dotprod_fast(const v_int16x8& a, const v_int16x8& b, const v_int32x4& c) { int CV_DECL_ALIGNED(32) ptr[8] = {0}; - vsetvlmax_e32m2(); - vse32_v_i32m2(ptr, vwmul_vv_i32m2(a, b)); + vse32_v_i32m2(ptr, vwmul_vv_i32m2(a, b, 8), 8); v_int32x4 t1 = v_load(ptr); v_int32x4 t2 = v_load(ptr+4); return t1 + t2 + c; @@ -2700,8 +2582,7 @@ inline v_int32x4 v_dotprod_fast(const v_int16x8& a, const v_int16x8& b, const v_ inline v_int64x2 v_dotprod_fast(const v_int32x4& a, const v_int32x4& b) { int64 CV_DECL_ALIGNED(32) ptr[4] = {0}; - vsetvlmax_e64m2(); - vse64_v_i64m2(ptr, vwmul_vv_i64m2(a, b)); + vse64_v_i64m2(ptr, vwmul_vv_i64m2(a, b, 4), 4); v_int64x2 t1 = v_load(ptr); v_int64x2 t2 = v_load(ptr+2); return t1 + t2; @@ -2709,8 +2590,7 @@ inline v_int64x2 v_dotprod_fast(const v_int32x4& a, const v_int32x4& b) inline v_int64x2 v_dotprod_fast(const v_int32x4& a, const v_int32x4& b, const v_int64x2& c) { int64 CV_DECL_ALIGNED(32) ptr[4] = {0}; - vsetvlmax_e64m2(); - vse64_v_i64m2(ptr, vwmul_vv_i64m2(a, b)); + vse64_v_i64m2(ptr, vwmul_vv_i64m2(a, b, 4), 4); v_int64x2 t1 = v_load(ptr); v_int64x2 t2 = v_load(ptr+2); return t1 + t2 + c; @@ -2721,8 +2601,7 @@ inline v_int64x2 v_dotprod_fast(const v_int32x4& a, const v_int32x4& b, const v_ inline v_uint32x4 v_dotprod_expand_fast(const v_uint8x16& a, const v_uint8x16& b) { unsigned CV_DECL_ALIGNED(32) ptr[16] = {0}; - vsetvlmax_e32m4(); - vse32_v_u32m4(ptr, vqmaccu_vv_u32m4(vzero_u32m4(), a, b)); + vse32_v_u32m4(ptr, vwcvtu_x_x_v_u32m4(vwmulu_vv_u16m2(a, b, 16), 16), 16); v_uint32x4 t1 = v_load(ptr); v_uint32x4 t2 = v_load(ptr+4); v_uint32x4 t3 = v_load(ptr+8); @@ -2732,8 +2611,7 @@ inline v_uint32x4 v_dotprod_expand_fast(const v_uint8x16& a, const v_uint8x16& b inline v_uint32x4 v_dotprod_expand_fast(const v_uint8x16& a, const v_uint8x16& b, const v_uint32x4& c) { unsigned CV_DECL_ALIGNED(32) ptr[16] = {0}; - vsetvlmax_e32m4(); - vse32_v_u32m4(ptr, vqmaccu_vv_u32m4(vzero_u32m4(), a, b)); + vse32_v_u32m4(ptr, vwcvtu_x_x_v_u32m4(vwmulu_vv_u16m2(a, b, 16), 16), 16); v_uint32x4 t1 = v_load(ptr); v_uint32x4 t2 = v_load(ptr+4); v_uint32x4 t3 = v_load(ptr+8); @@ -2743,8 +2621,7 @@ inline v_uint32x4 v_dotprod_expand_fast(const v_uint8x16& a, const v_uint8x16& b inline v_int32x4 v_dotprod_expand_fast(const v_int8x16& a, const v_int8x16& b) { int CV_DECL_ALIGNED(32) ptr[16] = {0}; - vsetvlmax_e32m4(); - vse32_v_i32m4(ptr, vqmacc_vv_i32m4(vzero_i32m4(), a, b)); + vse32_v_i32m4(ptr, vwcvt_x_x_v_i32m4(vwmul_vv_i16m2(a, b, 16), 16), 16); v_int32x4 t1 = v_load(ptr); v_int32x4 t2 = v_load(ptr+4); v_int32x4 t3 = v_load(ptr+8); @@ -2754,8 +2631,7 @@ inline v_int32x4 v_dotprod_expand_fast(const v_int8x16& a, const v_int8x16& b) inline v_int32x4 v_dotprod_expand_fast(const v_int8x16& a, const v_int8x16& b, const v_int32x4& c) { int CV_DECL_ALIGNED(32) ptr[16] = {0}; - vsetvlmax_e32m4(); - vse32_v_i32m4(ptr, vqmacc_vv_i32m4(vzero_i32m4(), a, b)); + vse32_v_i32m4(ptr, vwcvt_x_x_v_i32m4(vwmul_vv_i16m2(a, b, 16), 16), 16); v_int32x4 t1 = v_load(ptr); v_int32x4 t2 = v_load(ptr+4); v_int32x4 t3 = v_load(ptr+8); @@ -2767,8 +2643,7 @@ inline v_int32x4 v_dotprod_expand_fast(const v_int8x16& a, const v_int8x16& b, c inline v_uint64x2 v_dotprod_expand_fast(const v_uint16x8& a, const v_uint16x8& b) { uint64 CV_DECL_ALIGNED(32) ptr[8] = {0}; - vsetvlmax_e64m4(); - vse64_v_u64m4(ptr, vqmaccu_vv_u64m4(vzero_u64m4(), a, b)); + vse64_v_u64m4(ptr, vwcvtu_x_x_v_u64m4(vwmulu_vv_u32m2(a, b, 8), 8), 8); v_uint64x2 t1 = v_load(ptr); v_uint64x2 t2 = v_load(ptr+2); v_uint64x2 t3 = v_load(ptr+4); @@ -2778,8 +2653,7 @@ inline v_uint64x2 v_dotprod_expand_fast(const v_uint16x8& a, const v_uint16x8& b inline v_uint64x2 v_dotprod_expand_fast(const v_uint16x8& a, const v_uint16x8& b, const v_uint64x2& c) { uint64 CV_DECL_ALIGNED(32) ptr[8] = {0}; - vsetvlmax_e64m4(); - vse64_v_u64m4(ptr, vqmaccu_vv_u64m4(vzero_u64m4(), a, b)); + vse64_v_u64m4(ptr, vwcvtu_x_x_v_u64m4(vwmulu_vv_u32m2(a, b, 8), 8), 8); v_uint64x2 t1 = v_load(ptr); v_uint64x2 t2 = v_load(ptr+2); v_uint64x2 t3 = v_load(ptr+4); @@ -2789,8 +2663,7 @@ inline v_uint64x2 v_dotprod_expand_fast(const v_uint16x8& a, const v_uint16x8& b inline v_int64x2 v_dotprod_expand_fast(const v_int16x8& a, const v_int16x8& b) { int64 CV_DECL_ALIGNED(32) ptr[8] = {0}; - vsetvlmax_e64m4(); - vse64_v_i64m4(ptr, vqmacc_vv_i64m4(vzero_i64m4(), a, b)); + vse64_v_i64m4(ptr, vwcvt_x_x_v_i64m4(vwmul_vv_i32m2(a, b, 8), 8), 8); v_int64x2 t1 = v_load(ptr); v_int64x2 t2 = v_load(ptr+2); v_int64x2 t3 = v_load(ptr+4); @@ -2800,8 +2673,7 @@ inline v_int64x2 v_dotprod_expand_fast(const v_int16x8& a, const v_int16x8& b) inline v_int64x2 v_dotprod_expand_fast(const v_int16x8& a, const v_int16x8& b, const v_int64x2& c) { int64 CV_DECL_ALIGNED(32) ptr[8] = {0}; - vsetvlmax_e64m4(); - vse64_v_i64m4(ptr, vqmacc_vv_i64m4(vzero_i64m4(), a, b)); + vse64_v_i64m4(ptr, vwcvt_x_x_v_i64m4(vwmul_vv_i32m2(a, b, 8), 8), 8); v_int64x2 t1 = v_load(ptr); v_int64x2 t2 = v_load(ptr+2); v_int64x2 t3 = v_load(ptr+4); @@ -2822,11 +2694,10 @@ inline v_float32x4 v_matmul(const v_float32x4& v, const v_float32x4& m0, const v_float32x4& m1, const v_float32x4& m2, const v_float32x4& m3) { - vsetvlmax_e32m1(); - vfloat32m1_t res = vfmul_vf_f32m1(m0, v_extract_n<0>(v)); - res = vfmacc_vf_f32m1(res, v_extract_n<1>(v), m1); - res = vfmacc_vf_f32m1(res, v_extract_n<2>(v), m2); - res = vfmacc_vf_f32m1(res, v_extract_n<3>(v), m3); + vfloat32m1_t res = vfmul_vf_f32m1(m0, v_extract_n<0>(v), 4); + res = vfmacc_vf_f32m1(res, v_extract_n<1>(v), m1, 4); + res = vfmacc_vf_f32m1(res, v_extract_n<2>(v), m2, 4); + res = vfmacc_vf_f32m1(res, v_extract_n<3>(v), m3, 4); return v_float32x4(res); } @@ -2834,40 +2705,35 @@ inline v_float32x4 v_matmuladd(const v_float32x4& v, const v_float32x4& m0, const v_float32x4& m1, const v_float32x4& m2, const v_float32x4& a) { - vsetvlmax_e32m1(); - vfloat32m1_t res = vfmul_vf_f32m1(m0, v_extract_n<0>(v)); - res = vfmacc_vf_f32m1(res, v_extract_n<1>(v), m1); - res = vfmacc_vf_f32m1(res, v_extract_n<2>(v), m2); + vfloat32m1_t res = vfmul_vf_f32m1(m0, v_extract_n<0>(v), 4); + res = vfmacc_vf_f32m1(res, v_extract_n<1>(v), m1, 4); + res = vfmacc_vf_f32m1(res, v_extract_n<2>(v), m2, 4); return v_float32x4(res) + a; } -#define OPENCV_HAL_IMPL_RVV_MUL_EXPAND(_Tpvec, _Tpwvec, _Tpw, suffix, wmul, width) \ +#define OPENCV_HAL_IMPL_RVV_MUL_EXPAND(_Tpvec, _Tpwvec, _Tpw, suffix, wmul, width, vl, hvl) \ inline void v_mul_expand(const _Tpvec& a, const _Tpvec& b, _Tpwvec& c, _Tpwvec& d) \ { \ _Tpw CV_DECL_ALIGNED(32) ptr[_Tpwvec::nlanes*2] = {0}; \ - vsetvlmax_e##width##m2(); \ - vse##width##_v_##suffix##m2(ptr, wmul(a, b)); \ - vsetvlmax_e##width##m1(); \ - c = _Tpwvec(vle##width##_v_##suffix##m1(ptr)); \ - d = _Tpwvec(vle##width##_v_##suffix##m1(ptr+_Tpwvec::nlanes)); \ + vse##width##_v_##suffix##m2(ptr, wmul(a, b, vl), vl); \ + c = _Tpwvec(vle##width##_v_##suffix##m1(ptr, hvl)); \ + d = _Tpwvec(vle##width##_v_##suffix##m1(ptr+_Tpwvec::nlanes, hvl)); \ } -OPENCV_HAL_IMPL_RVV_MUL_EXPAND(v_uint8x16, v_uint16x8, ushort, u16, vwmulu_vv_u16m2, 16) -OPENCV_HAL_IMPL_RVV_MUL_EXPAND(v_int8x16, v_int16x8, short, i16, vwmul_vv_i16m2, 16) -OPENCV_HAL_IMPL_RVV_MUL_EXPAND(v_uint16x8, v_uint32x4, unsigned, u32, vwmulu_vv_u32m2, 32) -OPENCV_HAL_IMPL_RVV_MUL_EXPAND(v_int16x8, v_int32x4, int, i32, vwmul_vv_i32m2, 32) -OPENCV_HAL_IMPL_RVV_MUL_EXPAND(v_uint32x4, v_uint64x2, uint64, u64, vwmulu_vv_u64m2, 64) +OPENCV_HAL_IMPL_RVV_MUL_EXPAND(v_uint8x16, v_uint16x8, ushort, u16, vwmulu_vv_u16m2, 16, 16, 8) +OPENCV_HAL_IMPL_RVV_MUL_EXPAND(v_int8x16, v_int16x8, short, i16, vwmul_vv_i16m2, 16, 16, 8) +OPENCV_HAL_IMPL_RVV_MUL_EXPAND(v_uint16x8, v_uint32x4, unsigned, u32, vwmulu_vv_u32m2, 32, 8, 4) +OPENCV_HAL_IMPL_RVV_MUL_EXPAND(v_int16x8, v_int32x4, int, i32, vwmul_vv_i32m2, 32, 8, 4) +OPENCV_HAL_IMPL_RVV_MUL_EXPAND(v_uint32x4, v_uint64x2, uint64, u64, vwmulu_vv_u64m2, 64, 4, 2) inline v_int16x8 v_mul_hi(const v_int16x8& a, const v_int16x8& b) { - vsetvlmax_e16m1(); - return v_int16x8(vnsra_wx_i16m1(vwmul_vv_i32m2(a, b), 16)); + return v_int16x8(vnsra_wx_i16m1(vwmul_vv_i32m2(a, b, 8), 16, 8)); } inline v_uint16x8 v_mul_hi(const v_uint16x8& a, const v_uint16x8& b) { - vsetvlmax_e16m1(); - return v_uint16x8(vnsrl_wx_u16m1(vwmulu_vv_u32m2(a, b), 16)); + return v_uint16x8(vnsrl_wx_u16m1(vwmulu_vv_u32m2(a, b, 8), 16, 8)); } diff --git a/platforms/linux/riscv64-gcc.toolchain.cmake b/platforms/linux/riscv64-gcc.toolchain.cmake index c46d62a360..675879f86b 100644 --- a/platforms/linux/riscv64-gcc.toolchain.cmake +++ b/platforms/linux/riscv64-gcc.toolchain.cmake @@ -10,8 +10,8 @@ set(CMAKE_CXX_COMPILER ${RISCV_GCC_INSTALL_ROOT}/bin/riscv64-unknown-linux-gnu-g # Don't run the linker on compiler check set(CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY) -set(CMAKE_C_FLAGS "-march=rv64gcv_zvqmac ${CMAKE_C_FLAGS}") -set(CMAKE_CXX_FLAGS "-march=rv64gcv_zvqmac ${CXX_FLAGS}") +set(CMAKE_C_FLAGS "-march=rv64gcv_zfh ${CMAKE_C_FLAGS}") +set(CMAKE_CXX_FLAGS "-march=rv64gcv_zfh ${CXX_FLAGS}") set(CMAKE_FIND_ROOT_PATH ${CMAKE_SYSROOT}) set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)