HAL universal intrinsics tests and documentation

pull/5324/head
Maksim Shabunin 9 years ago
parent 190d00ea3e
commit a275489f0a
  1. 13
      modules/hal/include/opencv2/hal.hpp
  2. 20
      modules/hal/include/opencv2/hal/defs.h
  3. 28
      modules/hal/include/opencv2/hal/intrin.hpp
  4. 1079
      modules/hal/include/opencv2/hal/intrin_cpp.hpp
  5. 31
      modules/hal/include/opencv2/hal/intrin_neon.hpp
  6. 66
      modules/hal/include/opencv2/hal/intrin_sse.hpp
  7. 864
      modules/hal/test/test_intrin.cpp
  8. 234
      modules/hal/test/test_intrin_utils.hpp
  9. 3
      modules/hal/test/test_main.cpp
  10. 11
      modules/hal/test/test_precomp.hpp

@ -49,10 +49,21 @@
/**
@defgroup hal Hardware Acceleration Layer
@{
@defgroup hal_intrin Universal intrinsics
@{
@defgroup hal_intrin_impl Private implementation helpers
@}
@defgroup hal_utils Platform-dependent utils
@}
*/
namespace cv { namespace hal {
//! @addtogroup hal
//! @{
namespace Error {
enum
@ -93,6 +104,8 @@ void sqrt(const double* src, double* dst, int len);
void invSqrt(const float* src, float* dst, int len);
void invSqrt(const double* src, double* dst, int len);
//! @}
}} //cv::hal
#endif //__OPENCV_HAL_HPP__

@ -45,6 +45,9 @@
#ifndef __OPENCV_DEF_H__
#define __OPENCV_DEF_H__
//! @addtogroup hal_utils
//! @{
#if !defined _CRT_SECURE_NO_DEPRECATE && defined _MSC_VER && _MSC_VER > 1300
# define _CRT_SECURE_NO_DEPRECATE /* to avoid multiple Visual Studio warnings */
#endif
@ -335,9 +338,6 @@ Cv64suf;
# include "tegra_round.hpp"
#endif
//! @addtogroup core_utils
//! @{
#if CV_VFP
// 1. general scheme
#define ARM_ROUND(_value, _asm_string) \
@ -567,15 +567,19 @@ CV_INLINE int cvIsInf( float value )
return (ieee754.u & 0x7fffffff) == 0x7f800000;
}
//! @}
#include <algorithm>
namespace cv
{
//! @addtogroup hal_utils
//! @{
/////////////// saturate_cast (used in image & signal processing) ///////////////////
/**
Template function for accurate conversion from one primitive type to another.
/** @brief Template function for accurate conversion from one primitive type to another.
The functions saturate_cast resemble the standard C++ cast operations, such as static_cast\<T\>()
and others. They perform an efficient and accurate conversion from one primitive type to another
@ -618,8 +622,6 @@ template<typename _Tp> static inline _Tp saturate_cast(int64 v) { return _Tp(
/** @overload */
template<typename _Tp> static inline _Tp saturate_cast(uint64 v) { return _Tp(v); }
//! @cond IGNORED
template<> inline uchar saturate_cast<uchar>(schar v) { return (uchar)std::max((int)v, 0); }
template<> inline uchar saturate_cast<uchar>(ushort v) { return (uchar)std::min((unsigned)v, (unsigned)UCHAR_MAX); }
template<> inline uchar saturate_cast<uchar>(int v) { return (uchar)((unsigned)v <= UCHAR_MAX ? v : v > 0 ? UCHAR_MAX : 0); }
@ -664,12 +666,10 @@ template<> inline int saturate_cast<int>(double v) { return cvRound(v)
template<> inline unsigned saturate_cast<unsigned>(float v) { return cvRound(v); }
template<> inline unsigned saturate_cast<unsigned>(double v) { return cvRound(v); }
//! @endcond
//! @}
}
#endif // __cplusplus
//! @} core_utils
#endif //__OPENCV_HAL_H__

@ -48,6 +48,7 @@
#include <cmath>
#include <float.h>
#include <stdlib.h>
#include "opencv2/hal/defs.h"
#define OPENCV_HAL_ADD(a, b) ((a) + (b))
#define OPENCV_HAL_AND(a, b) ((a) & (b))
@ -59,6 +60,10 @@
// access from within opencv code more accessible
namespace cv {
//! @addtogroup hal_intrin
//! @{
//! @cond IGNORED
template<typename _Tp> struct V_TypeTraits
{
typedef _Tp int_type;
@ -82,6 +87,7 @@ template<> struct V_TypeTraits<uchar>
typedef int sum_type;
typedef ushort w_type;
typedef unsigned q_type;
enum { delta = 128, shift = 8 };
@ -99,6 +105,7 @@ template<> struct V_TypeTraits<schar>
typedef int sum_type;
typedef short w_type;
typedef int q_type;
enum { delta = 128, shift = 8 };
@ -265,8 +272,22 @@ template<> struct V_TypeTraits<double>
}
};
template <typename T> struct V_SIMD128Traits
{
enum { nlanes = 16 / sizeof(T) };
};
//! @endcond
//! @}
}
#ifdef CV_DOXYGEN
# undef CV_SSE2
# undef CV_NEON
#endif
#if CV_SSE2
#include "opencv2/hal/intrin_sse.hpp"
@ -281,12 +302,19 @@ template<> struct V_TypeTraits<double>
#endif
//! @addtogroup hal_intrin
//! @{
#ifndef CV_SIMD128
//! Set to 1 if current compiler supports vector extensions (NEON or SSE is enabled)
#define CV_SIMD128 0
#endif
#ifndef CV_SIMD128_64F
//! Set to 1 if current intrinsics implementation supports 64-bit float vectors
#define CV_SIMD128_64F 0
#endif
//! @}
#endif

File diff suppressed because it is too large Load Diff

@ -48,6 +48,8 @@
namespace cv
{
//! @cond IGNORED
#define CV_SIMD128 1
struct v_uint8x16
@ -278,14 +280,15 @@ void v_rshr_##pack##_store(_Tp* ptr, const _Tpwvec& a) \
}
OPENCV_HAL_IMPL_NEON_PACK(v_uint8x16, uchar, uint8x8_t, u8, v_uint16x8, u16, pack, n)
OPENCV_HAL_IMPL_NEON_PACK(v_uint8x16, uchar, uint8x8_t, u8, v_int16x8, s16, pack_u, un)
OPENCV_HAL_IMPL_NEON_PACK(v_int8x16, schar, int8x8_t, s8, v_int16x8, s16, pack, n)
OPENCV_HAL_IMPL_NEON_PACK(v_uint16x8, ushort, uint16x4_t, u16, v_uint32x4, u32, pack, n)
OPENCV_HAL_IMPL_NEON_PACK(v_uint16x8, ushort, uint16x4_t, u16, v_int32x4, s32, pack_u, un)
OPENCV_HAL_IMPL_NEON_PACK(v_int16x8, short, int16x4_t, s16, v_int32x4, s32, pack, n)
OPENCV_HAL_IMPL_NEON_PACK(v_uint32x4, unsigned, uint32x2_t, u32, v_uint64x2, u64, pack, n)
OPENCV_HAL_IMPL_NEON_PACK(v_int32x4, int, int32x2_t, s32, v_int64x2, s64, pack, n)
OPENCV_HAL_IMPL_NEON_PACK(v_uint8x16, uchar, uint8x8_t, u8, v_int16x8, s16, pack_u, un)
OPENCV_HAL_IMPL_NEON_PACK(v_uint16x8, ushort, uint16x4_t, u16, v_int32x4, s32, pack_u, un)
inline v_float32x4 v_matmul(const v_float32x4& v, const v_float32x4& m0,
const v_float32x4& m1, const v_float32x4& m2,
const v_float32x4& m3)
@ -374,7 +377,7 @@ inline v_int32x4 v_dotprod(const v_int16x8& a, const v_int16x8& b)
{
int32x4_t c = vmull_s16(vget_low_s16(a.val), vget_low_s16(b.val));
int32x4_t d = vmull_s16(vget_high_s16(a.val), vget_high_s16(b.val));
int32x4x2_t cd = vtrnq_s32(c, d);
int32x4x2_t cd = vuzpq_s32(c, d);
return v_int32x4(vaddq_s32(cd.val[0], cd.val[1]));
}
@ -497,6 +500,16 @@ OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_uint16x8, v_absdiff, vabdq_u16)
OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_uint32x4, v_absdiff, vabdq_u32)
OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_float32x4, v_absdiff, vabdq_f32)
#define OPENCV_HAL_IMPL_NEON_BIN_FUNC2(_Tpvec, _Tpvec2, cast, func, intrin) \
inline _Tpvec2 func(const _Tpvec& a, const _Tpvec& b) \
{ \
return _Tpvec2(cast(intrin(a.val, b.val))); \
}
OPENCV_HAL_IMPL_NEON_BIN_FUNC2(v_int8x16, v_uint8x16, vreinterpretq_u8_s8, v_absdiff, vabdq_s8)
OPENCV_HAL_IMPL_NEON_BIN_FUNC2(v_int16x8, v_uint16x8, vreinterpretq_u16_s16, v_absdiff, vabdq_s16)
OPENCV_HAL_IMPL_NEON_BIN_FUNC2(v_int32x4, v_uint32x4, vreinterpretq_u32_s32, v_absdiff, vabdq_s32)
inline v_float32x4 v_magnitude(const v_float32x4& a, const v_float32x4& b)
{
v_float32x4 x(vmlaq_f32(vmulq_f32(a.val, a.val), b.val, b.val));
@ -641,13 +654,13 @@ inline bool v_check_all(const v_float32x4& a)
{ return v_check_all(v_reinterpret_as_u32(a)); }
inline bool v_check_any(const v_int8x16& a)
{ return v_check_all(v_reinterpret_as_u8(a)); }
{ return v_check_any(v_reinterpret_as_u8(a)); }
inline bool v_check_any(const v_int16x8& a)
{ return v_check_all(v_reinterpret_as_u16(a)); }
{ return v_check_any(v_reinterpret_as_u16(a)); }
inline bool v_check_any(const v_int32x4& a)
{ return v_check_all(v_reinterpret_as_u32(a)); }
{ return v_check_any(v_reinterpret_as_u32(a)); }
inline bool v_check_any(const v_float32x4& a)
{ return v_check_all(v_reinterpret_as_u32(a)); }
{ return v_check_any(v_reinterpret_as_u32(a)); }
#define OPENCV_HAL_IMPL_NEON_SELECT(_Tpvec, suffix, usuffix) \
inline _Tpvec v_select(const _Tpvec& mask, const _Tpvec& a, const _Tpvec& b) \
@ -678,6 +691,8 @@ OPENCV_HAL_IMPL_NEON_EXPAND(v_uint8x16, v_uint16x8, uchar, u8)
OPENCV_HAL_IMPL_NEON_EXPAND(v_int8x16, v_int16x8, schar, s8)
OPENCV_HAL_IMPL_NEON_EXPAND(v_uint16x8, v_uint32x4, ushort, u16)
OPENCV_HAL_IMPL_NEON_EXPAND(v_int16x8, v_int32x4, short, s16)
OPENCV_HAL_IMPL_NEON_EXPAND(v_uint32x4, v_uint64x2, uint, u32)
OPENCV_HAL_IMPL_NEON_EXPAND(v_int32x4, v_int64x2, int, s32)
inline v_uint32x4 v_load_expand_q(const uchar* ptr)
{
@ -840,6 +855,8 @@ inline v_float32x4 v_cvt_f32(const v_int32x4& a)
return v_float32x4(vcvtq_f32_s32(a.val));
}
//! @endcond
}
#endif

@ -51,6 +51,8 @@
namespace cv
{
//! @cond IGNORED
struct v_uint8x16
{
typedef uchar lane_type;
@ -296,6 +298,11 @@ OPENCV_HAL_IMPL_SSE_INIT_FROM_FLT(v_int32x4, s32)
OPENCV_HAL_IMPL_SSE_INIT_FROM_FLT(v_uint64x2, u64)
OPENCV_HAL_IMPL_SSE_INIT_FROM_FLT(v_int64x2, s64)
inline v_float32x4 v_reinterpret_as_f32(const v_float32x4& a) {return a; }
inline v_float64x2 v_reinterpret_as_f64(const v_float64x2& a) {return a; }
inline v_float32x4 v_reinterpret_as_f32(const v_float64x2& a) {return v_float32x4(_mm_castpd_ps(a.val)); }
inline v_float64x2 v_reinterpret_as_f64(const v_float32x4& a) {return v_float64x2(_mm_castps_pd(a.val)); }
//////////////// PACK ///////////////
inline v_uint8x16 v_pack(const v_uint16x8& a, const v_uint16x8& b)
{
@ -430,6 +437,17 @@ inline void v_pack_u_store(ushort* ptr, const v_int32x4& a)
_mm_storel_epi64((__m128i*)ptr, r);
}
template<int n> inline
v_uint16x8 v_rshr_pack_u(const v_int32x4& a, const v_int32x4& b)
{
__m128i delta = _mm_set1_epi32(1 << (n-1)), delta32 = _mm_set1_epi32(32768);
__m128i a1 = _mm_sub_epi32(_mm_srai_epi32(_mm_add_epi32(a.val, delta), n), delta32);
__m128i a2 = _mm_sub_epi16(_mm_packs_epi32(a1, a1), _mm_set1_epi16(-32768));
__m128i b1 = _mm_sub_epi32(_mm_srai_epi32(_mm_add_epi32(b.val, delta), n), delta32);
__m128i b2 = _mm_sub_epi16(_mm_packs_epi32(b1, b1), _mm_set1_epi16(-32768));
return v_uint16x8(_mm_unpacklo_epi64(a2, b2));
}
template<int n> inline
void v_rshr_pack_u_store(ushort* ptr, const v_int32x4& a)
{
@ -460,7 +478,7 @@ void v_rshr_pack_store(short* ptr, const v_int32x4& a)
{
__m128i delta = _mm_set1_epi32(1 << (n-1));
__m128i a1 = _mm_srai_epi32(_mm_add_epi32(a.val, delta), n);
_mm_storel_epi64((__m128i*)ptr, a1);
_mm_storel_epi64((__m128i*)ptr, _mm_packs_epi32(a1, a1));
}
@ -469,7 +487,7 @@ inline v_uint32x4 v_pack(const v_uint64x2& a, const v_uint64x2& b)
{
__m128i v0 = _mm_unpacklo_epi32(a.val, b.val); // a0 a1 0 0
__m128i v1 = _mm_unpackhi_epi32(a.val, b.val); // b0 b1 0 0
return v_uint32x4(_mm_unpacklo_epi64(v0, v1));
return v_uint32x4(_mm_unpacklo_epi32(v0, v1));
}
inline void v_pack_store(unsigned* ptr, const v_uint64x2& a)
@ -483,7 +501,7 @@ inline v_int32x4 v_pack(const v_int64x2& a, const v_int64x2& b)
{
__m128i v0 = _mm_unpacklo_epi32(a.val, b.val); // a0 a1 0 0
__m128i v1 = _mm_unpackhi_epi32(a.val, b.val); // b0 b1 0 0
return v_int32x4(_mm_unpacklo_epi64(v0, v1));
return v_int32x4(_mm_unpacklo_epi32(v0, v1));
}
inline void v_pack_store(int* ptr, const v_int64x2& a)
@ -501,7 +519,7 @@ v_uint32x4 v_rshr_pack(const v_uint64x2& a, const v_uint64x2& b)
__m128i b1 = _mm_srli_epi64(_mm_add_epi64(b.val, delta2.val), n);
__m128i v0 = _mm_unpacklo_epi32(a1, b1); // a0 a1 0 0
__m128i v1 = _mm_unpackhi_epi32(a1, b1); // b0 b1 0 0
return v_uint32x4(_mm_unpacklo_epi64(v0, v1));
return v_uint32x4(_mm_unpacklo_epi32(v0, v1));
}
template<int n> inline
@ -534,7 +552,7 @@ v_int32x4 v_rshr_pack(const v_int64x2& a, const v_int64x2& b)
__m128i b1 = v_srai_epi64(_mm_add_epi64(b.val, delta2.val), n);
__m128i v0 = _mm_unpacklo_epi32(a1, b1); // a0 a1 0 0
__m128i v1 = _mm_unpackhi_epi32(a1, b1); // b0 b1 0 0
return v_int32x4(_mm_unpacklo_epi64(v0, v1));
return v_int32x4(_mm_unpacklo_epi32(v0, v1));
}
template<int n> inline
@ -630,8 +648,8 @@ inline void v_mul_expand(const v_int16x8& a, const v_int16x8& b,
{
__m128i v0 = _mm_mullo_epi16(a.val, b.val);
__m128i v1 = _mm_mulhi_epi16(a.val, b.val);
c.val = _mm_unpacklo_epi32(v0, v1);
d.val = _mm_unpackhi_epi32(v0, v1);
c.val = _mm_unpacklo_epi16(v0, v1);
d.val = _mm_unpackhi_epi16(v0, v1);
}
inline void v_mul_expand(const v_uint16x8& a, const v_uint16x8& b,
@ -639,8 +657,8 @@ inline void v_mul_expand(const v_uint16x8& a, const v_uint16x8& b,
{
__m128i v0 = _mm_mullo_epi16(a.val, b.val);
__m128i v1 = _mm_mulhi_epu16(a.val, b.val);
c.val = _mm_unpacklo_epi32(v0, v1);
d.val = _mm_unpackhi_epi32(v0, v1);
c.val = _mm_unpacklo_epi16(v0, v1);
d.val = _mm_unpackhi_epi16(v0, v1);
}
inline void v_mul_expand(const v_uint32x4& a, const v_uint32x4& b,
@ -869,6 +887,18 @@ inline _Tpuvec v_absdiff(const _Tpsvec& a, const _Tpsvec& b) \
OPENCV_HAL_IMPL_SSE_ABSDIFF_8_16(v_uint8x16, v_int8x16, 8, (int)0x80808080)
OPENCV_HAL_IMPL_SSE_ABSDIFF_8_16(v_uint16x8, v_int16x8, 16, (int)0x80008000)
inline v_uint32x4 v_absdiff(const v_uint32x4& a, const v_uint32x4& b)
{
return v_max(a, b) - v_min(a, b);
}
inline v_uint32x4 v_absdiff(const v_int32x4& a, const v_int32x4& b)
{
__m128i d = _mm_sub_epi32(a.val, b.val);
__m128i m = _mm_cmpgt_epi32(b.val, a.val);
return v_uint32x4(_mm_sub_epi32(_mm_xor_si128(d, m), m));
}
#define OPENCV_HAL_IMPL_SSE_MISC_FLT_OP(_Tpvec, _Tp, _Tpreg, suffix, absmask_vec) \
inline _Tpvec v_absdiff(const _Tpvec& a, const _Tpvec& b) \
{ \
@ -1047,8 +1077,8 @@ OPENCV_HAL_IMPL_SSE_SELECT(v_uint16x8, si128)
OPENCV_HAL_IMPL_SSE_SELECT(v_int16x8, si128)
OPENCV_HAL_IMPL_SSE_SELECT(v_uint32x4, si128)
OPENCV_HAL_IMPL_SSE_SELECT(v_int32x4, si128)
OPENCV_HAL_IMPL_SSE_SELECT(v_uint64x2, si128)
OPENCV_HAL_IMPL_SSE_SELECT(v_int64x2, si128)
// OPENCV_HAL_IMPL_SSE_SELECT(v_uint64x2, si128)
// OPENCV_HAL_IMPL_SSE_SELECT(v_int64x2, si128)
OPENCV_HAL_IMPL_SSE_SELECT(v_float32x4, ps)
OPENCV_HAL_IMPL_SSE_SELECT(v_float64x2, pd)
@ -1257,7 +1287,7 @@ inline void v_load_deinterleave(const uchar* ptr, v_uint8x16& a, v_uint8x16& b,
__m128i v0 = _mm_unpacklo_epi8(u0, u2); // a0 a8 b0 b8 ...
__m128i v1 = _mm_unpackhi_epi8(u0, u2); // a2 a10 b2 b10 ...
__m128i v2 = _mm_unpacklo_epi8(u1, u3); // a4 a12 b4 b12 ...
__m128i v3 = _mm_unpackhi_epi8(u1, u3); // a6 a14 b4 b14 ...
__m128i v3 = _mm_unpackhi_epi8(u1, u3); // a6 a14 b6 b14 ...
u0 = _mm_unpacklo_epi8(v0, v2); // a0 a4 a8 a12 ...
u1 = _mm_unpacklo_epi8(v1, v3); // a2 a6 a10 a14 ...
@ -1266,13 +1296,13 @@ inline void v_load_deinterleave(const uchar* ptr, v_uint8x16& a, v_uint8x16& b,
v0 = _mm_unpacklo_epi8(u0, u1); // a0 a2 a4 a6 ...
v1 = _mm_unpacklo_epi8(u2, u3); // a1 a3 a5 a7 ...
v2 = _mm_unpackhi_epi8(u0, u1); // b0 b2 b4 b6 ...
v3 = _mm_unpackhi_epi8(u2, u3); // b1 b3 b5 b7 ...
v2 = _mm_unpackhi_epi8(u0, u1); // c0 c2 c4 c6 ...
v3 = _mm_unpackhi_epi8(u2, u3); // c1 c3 c5 c7 ...
a.val = _mm_unpacklo_epi8(v0, v1);
b.val = _mm_unpacklo_epi8(v2, v3);
c.val = _mm_unpackhi_epi8(v0, v1);
d.val = _mm_unpacklo_epi8(v2, v3);
b.val = _mm_unpackhi_epi8(v0, v1);
c.val = _mm_unpacklo_epi8(v2, v3);
d.val = _mm_unpackhi_epi8(v2, v3);
}
inline void v_load_deinterleave(const ushort* ptr, v_uint16x8& a, v_uint16x8& b, v_uint16x8& c)
@ -1560,6 +1590,8 @@ inline v_float64x2 v_cvt_f64(const v_float32x4& a)
return v_float64x2(_mm_cvtps_pd(a.val));
}
//! @endcond
}
#endif

@ -0,0 +1,864 @@
#include "test_intrin_utils.hpp"
#include <climits>
using namespace cv;
template<typename R> struct TheTest
{
typedef typename R::lane_type LaneType;
TheTest & test_loadstore()
{
AlignedData<R> data;
AlignedData<R> out;
// check if addresses are aligned and unaligned respectively
EXPECT_EQ((size_t)0, (size_t)&data.a.d % 16);
EXPECT_NE((size_t)0, (size_t)&data.u.d % 16);
EXPECT_EQ((size_t)0, (size_t)&out.a.d % 16);
EXPECT_NE((size_t)0, (size_t)&out.u.d % 16);
// check some initialization methods
R r1 = data.a;
R r2 = v_load(data.u.d);
R r3 = v_load_aligned(data.a.d);
R r4(r2);
EXPECT_EQ(data.a[0], r1.get0());
EXPECT_EQ(data.u[0], r2.get0());
EXPECT_EQ(data.a[0], r3.get0());
EXPECT_EQ(data.u[0], r4.get0());
// check some store methods
out.u.clear();
out.a.clear();
v_store(out.u.d, r1);
v_store_aligned(out.a.d, r2);
EXPECT_EQ(data.a, out.a);
EXPECT_EQ(data.u, out.u);
// check more store methods
Data<R> d, res(0);
R r5 = d;
v_store_high(res.mid(), r5);
v_store_low(res.d, r5);
EXPECT_EQ(d, res);
// check halves load correctness
res.clear();
R r6 = v_load_halves(d.d, d.mid());
v_store(res.d, r6);
EXPECT_EQ(d, res);
// zero, all
Data<R> resZ = RegTrait<R>::zero();
Data<R> resV = RegTrait<R>::all(8);
for (int i = 0; i < R::nlanes; ++i)
{
EXPECT_EQ((LaneType)0, resZ[i]);
EXPECT_EQ((LaneType)8, resV[i]);
}
// reinterpret_as
v_uint8x16 vu8 = v_reinterpret_as_u8(r1); out.a.clear(); v_store((uchar*)out.a.d, vu8); EXPECT_EQ(data.a, out.a);
v_int8x16 vs8 = v_reinterpret_as_s8(r1); out.a.clear(); v_store((schar*)out.a.d, vs8); EXPECT_EQ(data.a, out.a);
v_uint16x8 vu16 = v_reinterpret_as_u16(r1); out.a.clear(); v_store((ushort*)out.a.d, vu16); EXPECT_EQ(data.a, out.a);
v_int16x8 vs16 = v_reinterpret_as_s16(r1); out.a.clear(); v_store((short*)out.a.d, vs16); EXPECT_EQ(data.a, out.a);
v_uint32x4 vu32 = v_reinterpret_as_u32(r1); out.a.clear(); v_store((unsigned*)out.a.d, vu32); EXPECT_EQ(data.a, out.a);
v_int32x4 vs32 = v_reinterpret_as_s32(r1); out.a.clear(); v_store((int*)out.a.d, vs32); EXPECT_EQ(data.a, out.a);
v_uint64x2 vu64 = v_reinterpret_as_u64(r1); out.a.clear(); v_store((uint64*)out.a.d, vu64); EXPECT_EQ(data.a, out.a);
v_int64x2 vs64 = v_reinterpret_as_s64(r1); out.a.clear(); v_store((int64*)out.a.d, vs64); EXPECT_EQ(data.a, out.a);
v_float32x4 vf32 = v_reinterpret_as_f32(r1); out.a.clear(); v_store((float*)out.a.d, vf32); EXPECT_EQ(data.a, out.a);
#if CV_SIMD128_64F
v_float64x2 vf64 = v_reinterpret_as_f64(r1); out.a.clear(); v_store((double*)out.a.d, vf64); EXPECT_EQ(data.a, out.a);
#endif
return *this;
}
TheTest & test_interleave()
{
Data<R> data1, data2, data3, data4;
data2 += 20;
data3 += 40;
data4 += 60;
R a = data1, b = data2, c = data3;
R d = data1, e = data2, f = data3, g = data4;
LaneType buf3[R::nlanes * 3];
LaneType buf4[R::nlanes * 4];
v_store_interleave(buf3, a, b, c);
v_store_interleave(buf4, d, e, f, g);
Data<R> z(0);
a = b = c = d = e = f = g = z;
v_load_deinterleave(buf3, a, b, c);
v_load_deinterleave(buf4, d, e, f, g);
for (int i = 0; i < R::nlanes; ++i)
{
EXPECT_EQ(data1, Data<R>(a));
EXPECT_EQ(data2, Data<R>(b));
EXPECT_EQ(data3, Data<R>(c));
EXPECT_EQ(data1, Data<R>(d));
EXPECT_EQ(data2, Data<R>(e));
EXPECT_EQ(data3, Data<R>(f));
EXPECT_EQ(data4, Data<R>(g));
}
return *this;
}
// v_expand and v_load_expand
TheTest & test_expand()
{
typedef typename RegTrait<R>::w_reg Rx2;
Data<R> dataA;
R a = dataA;
Data<Rx2> resB = v_load_expand(dataA.d);
Rx2 c, d;
v_expand(a, c, d);
Data<Rx2> resC = c, resD = d;
const int n = Rx2::nlanes;
for (int i = 0; i < n; ++i)
{
EXPECT_EQ(dataA[i], resB[i]);
EXPECT_EQ(dataA[i], resC[i]);
EXPECT_EQ(dataA[i + n], resD[i]);
}
return *this;
}
TheTest & test_expand_q()
{
typedef typename RegTrait<R>::q_reg Rx4;
Data<R> data;
Data<Rx4> out = v_load_expand_q(data.d);
const int n = Rx4::nlanes;
for (int i = 0; i < n; ++i)
EXPECT_EQ(data[i], out[i]);
return *this;
}
TheTest & test_addsub()
{
Data<R> dataA, dataB;
dataB.reverse();
R a = dataA, b = dataB;
Data<R> resC = a + b, resD = a - b;
for (int i = 0; i < R::nlanes; ++i)
{
EXPECT_EQ(saturate_cast<LaneType>(dataA[i] + dataB[i]), resC[i]);
EXPECT_EQ(saturate_cast<LaneType>(dataA[i] - dataB[i]), resD[i]);
}
return *this;
}
TheTest & test_addsub_wrap()
{
Data<R> dataA, dataB;
dataB.reverse();
R a = dataA, b = dataB;
Data<R> resC = v_add_wrap(a, b),
resD = v_sub_wrap(a, b);
for (int i = 0; i < R::nlanes; ++i)
{
EXPECT_EQ((LaneType)(dataA[i] + dataB[i]), resC[i]);
EXPECT_EQ((LaneType)(dataA[i] - dataB[i]), resD[i]);
}
return *this;
}
TheTest & test_mul()
{
Data<R> dataA, dataB;
dataB.reverse();
R a = dataA, b = dataB;
Data<R> resC = a * b;
for (int i = 0; i < R::nlanes; ++i)
{
EXPECT_EQ(dataA[i] * dataB[i], resC[i]);
}
return *this;
}
TheTest & test_div()
{
Data<R> dataA, dataB;
dataB.reverse();
R a = dataA, b = dataB;
Data<R> resC = a / b;
for (int i = 0; i < R::nlanes; ++i)
{
EXPECT_EQ(dataA[i] / dataB[i], resC[i]);
}
return *this;
}
TheTest & test_mul_expand()
{
typedef typename RegTrait<R>::w_reg Rx2;
Data<R> dataA, dataB(2);
R a = dataA, b = dataB;
Rx2 c, d;
v_mul_expand(a, b, c, d);
Data<Rx2> resC = c, resD = d;
const int n = R::nlanes / 2;
for (int i = 0; i < n; ++i)
{
EXPECT_EQ((typename Rx2::lane_type)dataA[i] * dataB[i], resC[i]);
EXPECT_EQ((typename Rx2::lane_type)dataA[i + n] * dataB[i + n], resD[i]);
}
return *this;
}
template <int s>
TheTest & test_shift()
{
Data<R> dataA;
R a = dataA;
Data<R> resB = a << s, resC = v_shl<s>(a), resD = a >> s, resE = v_shr<s>(a);
for (int i = 0; i < R::nlanes; ++i)
{
EXPECT_EQ(dataA[i] << s, resB[i]);
EXPECT_EQ(dataA[i] << s, resC[i]);
EXPECT_EQ(dataA[i] >> s, resD[i]);
EXPECT_EQ(dataA[i] >> s, resE[i]);
}
return *this;
}
TheTest & test_cmp()
{
Data<R> dataA, dataB;
dataB.reverse();
dataB += 1;
R a = dataA, b = dataB;
Data<R> resC = (a == b);
Data<R> resD = (a != b);
Data<R> resE = (a > b);
Data<R> resF = (a >= b);
Data<R> resG = (a < b);
Data<R> resH = (a <= b);
for (int i = 0; i < R::nlanes; ++i)
{
EXPECT_EQ(dataA[i] == dataB[i], resC[i] != 0);
EXPECT_EQ(dataA[i] != dataB[i], resD[i] != 0);
EXPECT_EQ(dataA[i] > dataB[i], resE[i] != 0);
EXPECT_EQ(dataA[i] >= dataB[i], resF[i] != 0);
EXPECT_EQ(dataA[i] < dataB[i], resG[i] != 0);
EXPECT_EQ(dataA[i] <= dataB[i], resH[i] != 0);
}
return *this;
}
TheTest & test_dot_prod()
{
typedef typename RegTrait<R>::w_reg Rx2;
Data<R> dataA, dataB(2);
R a = dataA, b = dataB;
Data<Rx2> res = v_dotprod(a, b);
const int n = R::nlanes / 2;
for (int i = 0; i < n; ++i)
{
EXPECT_EQ(dataA[i*2] * dataB[i*2] + dataA[i*2 + 1] * dataB[i*2 + 1], res[i]);
}
return *this;
}
TheTest & test_logic()
{
Data<R> dataA, dataB(2);
R a = dataA, b = dataB;
Data<R> resC = a & b, resD = a | b, resE = a ^ b, resF = ~a;
for (int i = 0; i < R::nlanes; ++i)
{
EXPECT_EQ(dataA[i] & dataB[i], resC[i]);
EXPECT_EQ(dataA[i] | dataB[i], resD[i]);
EXPECT_EQ(dataA[i] ^ dataB[i], resE[i]);
EXPECT_EQ((LaneType)~dataA[i], resF[i]);
}
return *this;
}
TheTest & test_sqrt_abs()
{
Data<R> dataA, dataD;
dataD *= -1.0;
R a = dataA, d = dataD;
Data<R> resB = v_sqrt(a), resC = v_invsqrt(a), resE = v_abs(d);
for (int i = 0; i < R::nlanes; ++i)
{
EXPECT_FLOAT_EQ((float)std::sqrt(dataA[i]), (float)resB[i]);
EXPECT_FLOAT_EQ((float)1/std::sqrt(dataA[i]), (float)resC[i]);
EXPECT_FLOAT_EQ((float)abs(dataA[i]), (float)resE[i]);
}
return *this;
}
TheTest & test_min_max()
{
Data<R> dataA, dataB;
dataB.reverse();
R a = dataA, b = dataB;
Data<R> resC = v_min(a, b), resD = v_max(a, b);
for (int i = 0; i < R::nlanes; ++i)
{
EXPECT_EQ(std::min(dataA[i], dataB[i]), resC[i]);
EXPECT_EQ(std::max(dataA[i], dataB[i]), resD[i]);
}
return *this;
}
TheTest & test_absdiff()
{
typedef typename RegTrait<R>::u_reg Ru;
typedef typename Ru::lane_type u_type;
Data<R> dataA(std::numeric_limits<LaneType>::max()),
dataB(std::numeric_limits<LaneType>::min());
dataA[0] = (LaneType)-1;
dataB[0] = 1;
dataA[1] = 2;
dataB[1] = (LaneType)-2;
R a = dataA, b = dataB;
Data<Ru> resC = v_absdiff(a, b);
const u_type mask = std::numeric_limits<LaneType>::is_signed ? (u_type)(1 << (sizeof(u_type)*8 - 1)) : 0;
for (int i = 0; i < Ru::nlanes; ++i)
{
u_type uA = dataA[i] ^ mask;
u_type uB = dataB[i] ^ mask;
EXPECT_EQ(uA > uB ? uA - uB : uB - uA, resC[i]);
}
return *this;
}
TheTest & test_float_absdiff()
{
Data<R> dataA(std::numeric_limits<LaneType>::max()),
dataB(std::numeric_limits<LaneType>::min());
dataA[0] = -1;
dataB[0] = 1;
dataA[1] = 2;
dataB[1] = -2;
R a = dataA, b = dataB;
Data<R> resC = v_absdiff(a, b);
for (int i = 0; i < R::nlanes; ++i)
{
EXPECT_EQ(dataA[i] > dataB[i] ? dataA[i] - dataB[i] : dataB[i] - dataA[i], resC[i]);
}
return *this;
}
TheTest & test_reduce()
{
Data<R> dataA;
R a = dataA;
EXPECT_EQ((LaneType)1, v_reduce_min(a));
EXPECT_EQ((LaneType)R::nlanes, v_reduce_max(a));
EXPECT_EQ((LaneType)(1 + R::nlanes)*2, v_reduce_sum(a));
return *this;
}
TheTest & test_mask()
{
Data<R> dataA, dataB, dataC, dataD(1), dataE(2);
dataA[1] *= (LaneType)-1;
dataC *= (LaneType)-1;
R a = dataA, b = dataB, c = dataC, d = dataD, e = dataE;
int m = v_signmask(a);
EXPECT_EQ(2, m);
EXPECT_EQ(false, v_check_all(a));
EXPECT_EQ(false, v_check_all(b));
EXPECT_EQ(true, v_check_all(c));
EXPECT_EQ(true, v_check_any(a));
EXPECT_EQ(false, v_check_any(b));
EXPECT_EQ(true, v_check_any(c));
typedef V_TypeTraits<LaneType> Traits;
typedef typename Traits::int_type int_type;
R f = v_select(b, d, e);
Data<R> resF = f;
for (int i = 0; i < R::nlanes; ++i)
{
int_type m2 = Traits::reinterpret_int(dataB[i]);
EXPECT_EQ((Traits::reinterpret_int(dataD[i]) & m2)
| (Traits::reinterpret_int(dataE[i]) & ~m2),
Traits::reinterpret_int(resF[i]));
}
return *this;
}
template <int s>
TheTest & test_pack()
{
typedef typename RegTrait<R>::w_reg Rx2;
typedef typename Rx2::lane_type w_type;
Data<Rx2> dataA, dataB;
dataA += std::numeric_limits<LaneType>::is_signed ? -10 : 10;
dataB *= 10;
Rx2 a = dataA, b = dataB;
Data<R> resC = v_pack(a, b);
Data<R> resD = v_rshr_pack<s>(a, b);
Data<R> resE(0);
v_pack_store(resE.d, b);
Data<R> resF(0);
v_rshr_pack_store<s>(resF.d, b);
const int n = Rx2::nlanes;
const w_type add = (w_type)1 << (s - 1);
for (int i = 0; i < n; ++i)
{
EXPECT_EQ(saturate_cast<LaneType>(dataA[i]), resC[i]);
EXPECT_EQ(saturate_cast<LaneType>(dataB[i]), resC[i + n]);
EXPECT_EQ(saturate_cast<LaneType>((dataA[i] + add) >> s), resD[i]);
EXPECT_EQ(saturate_cast<LaneType>((dataB[i] + add) >> s), resD[i + n]);
EXPECT_EQ(saturate_cast<LaneType>(dataB[i]), resE[i]);
EXPECT_EQ((LaneType)0, resE[i + n]);
EXPECT_EQ(saturate_cast<LaneType>((dataB[i] + add) >> s), resF[i]);
EXPECT_EQ((LaneType)0, resF[i + n]);
}
return *this;
}
template <int s>
TheTest & test_pack_u()
{
typedef typename RegTrait<R>::w_reg Rx2;
typedef typename RegTrait<Rx2>::int_reg Ri2;
typedef typename Ri2::lane_type w_type;
Data<Ri2> dataA, dataB;
dataA += -10;
dataB *= 10;
Ri2 a = dataA, b = dataB;
Data<R> resC = v_pack_u(a, b);
Data<R> resD = v_rshr_pack_u<s>(a, b);
Data<R> resE(0);
v_pack_u_store(resE.d, b);
Data<R> resF(0);
v_rshr_pack_u_store<s>(resF.d, b);
const int n = Ri2::nlanes;
const w_type add = (w_type)1 << (s - 1);
for (int i = 0; i < n; ++i)
{
EXPECT_EQ(saturate_cast<LaneType>(dataA[i]), resC[i]);
EXPECT_EQ(saturate_cast<LaneType>(dataB[i]), resC[i + n]);
EXPECT_EQ(saturate_cast<LaneType>((dataA[i] + add) >> s), resD[i]);
EXPECT_EQ(saturate_cast<LaneType>((dataB[i] + add) >> s), resD[i + n]);
EXPECT_EQ(saturate_cast<LaneType>(dataB[i]), resE[i]);
EXPECT_EQ((LaneType)0, resE[i + n]);
EXPECT_EQ(saturate_cast<LaneType>((dataB[i] + add) >> s), resF[i]);
EXPECT_EQ((LaneType)0, resF[i + n]);
}
return *this;
}
TheTest & test_unpack()
{
Data<R> dataA, dataB;
dataB *= 10;
R a = dataA, b = dataB;
R c, d, e, f, lo, hi;
v_zip(a, b, c, d);
v_recombine(a, b, e, f);
lo = v_combine_low(a, b);
hi = v_combine_high(a, b);
Data<R> resC = c, resD = d, resE = e, resF = f, resLo = lo, resHi = hi;
const int n = R::nlanes/2;
for (int i = 0; i < n; ++i)
{
EXPECT_EQ(dataA[i], resC[i*2]);
EXPECT_EQ(dataB[i], resC[i*2+1]);
EXPECT_EQ(dataA[i+n], resD[i*2]);
EXPECT_EQ(dataB[i+n], resD[i*2+1]);
EXPECT_EQ(dataA[i], resE[i]);
EXPECT_EQ(dataB[i], resE[i+n]);
EXPECT_EQ(dataA[i+n], resF[i]);
EXPECT_EQ(dataB[i+n], resF[i+n]);
EXPECT_EQ(dataA[i], resLo[i]);
EXPECT_EQ(dataB[i], resLo[i+n]);
EXPECT_EQ(dataA[i+n], resHi[i]);
EXPECT_EQ(dataB[i+n], resHi[i+n]);
}
return *this;
}
template<int s>
TheTest & test_extract()
{
Data<R> dataA, dataB;
dataB *= 10;
R a = dataA, b = dataB;
Data<R> resC = v_extract<s>(a, b);
for (int i = 0; i < R::nlanes; ++i)
{
if (i + s >= R::nlanes)
EXPECT_EQ(dataB[i - R::nlanes + s], resC[i]);
else
EXPECT_EQ(dataA[i + s], resC[i]);
}
return *this;
}
TheTest & test_float_math()
{
typedef typename RegTrait<R>::int_reg Ri;
Data<R> data1, data2, data3;
data1 *= 1.1;
data2 += 10;
R a1 = data1, a2 = data2, a3 = data3;
Data<Ri> resB = v_round(a1),
resC = v_trunc(a1),
resD = v_floor(a1),
resE = v_ceil(a1);
Data<R> resF = v_magnitude(a1, a2),
resG = v_sqr_magnitude(a1, a2),
resH = v_muladd(a1, a2, a3);
for (int i = 0; i < R::nlanes; ++i)
{
EXPECT_EQ(cvRound(data1[i]), resB[i]);
EXPECT_EQ((typename Ri::lane_type)data1[i], resC[i]);
EXPECT_EQ(cvFloor(data1[i]), resD[i]);
EXPECT_EQ(cvCeil(data1[i]), resE[i]);
EXPECT_DOUBLE_EQ(std::sqrt(data1[i]*data1[i] + data2[i]*data2[i]), resF[i]);
EXPECT_DOUBLE_EQ(data1[i]*data1[i] + data2[i]*data2[i], resG[i]);
EXPECT_DOUBLE_EQ(data1[i]*data2[i] + data3[i], resH[i]);
}
return *this;
}
TheTest & test_float_cvt32()
{
typedef v_float32x4 Rt;
Data<R> dataA;
dataA *= 1.1;
R a = dataA;
Rt b = v_cvt_f32(a);
Data<Rt> resB = b;
int n = std::min<int>(Rt::nlanes, R::nlanes);
for (int i = 0; i < n; ++i)
{
EXPECT_EQ((typename Rt::lane_type)dataA[i], resB[i]);
}
return *this;
}
TheTest & test_float_cvt64()
{
#if CV_SIMD128_64F
typedef v_float64x2 Rt;
Data<R> dataA;
dataA *= 1.1;
R a = dataA;
Rt b = v_cvt_f64(a);
Data<Rt> resB = b;
int n = std::min<int>(Rt::nlanes, R::nlanes);
for (int i = 0; i < n; ++i)
{
EXPECT_EQ((typename Rt::lane_type)dataA[i], resB[i]);
}
#endif
return *this;
}
TheTest & test_matmul()
{
Data<R> dataV, dataA, dataB, dataC, dataD;
dataB.reverse();
dataC += 2;
dataD *= 0.3;
R v = dataV, a = dataA, b = dataB, c = dataC, d = dataD;
Data<R> res = v_matmul(v, a, b, c, d);
for (int i = 0; i < R::nlanes; ++i)
{
LaneType val = dataV[0] * dataA[i]
+ dataV[1] * dataB[i]
+ dataV[2] * dataC[i]
+ dataV[3] * dataD[i];
EXPECT_DOUBLE_EQ(val, res[i]);
}
return *this;
}
TheTest & test_transpose()
{
Data<R> dataA, dataB, dataC, dataD;
dataB *= 5;
dataC *= 10;
dataD *= 15;
R a = dataA, b = dataB, c = dataC, d = dataD;
R e, f, g, h;
v_transpose4x4(a, b, c, d,
e, f, g, h);
Data<R> res[4] = {e, f, g, h};
for (int i = 0; i < R::nlanes; ++i)
{
EXPECT_EQ(dataA[i], res[i][0]);
EXPECT_EQ(dataB[i], res[i][1]);
EXPECT_EQ(dataC[i], res[i][2]);
EXPECT_EQ(dataD[i], res[i][3]);
}
return *this;
}
};
//============= 8-bit integer =====================================================================
TEST(hal_intrin, uint8x16) {
TheTest<v_uint8x16>()
.test_loadstore()
.test_interleave()
.test_expand()
.test_expand_q()
.test_addsub()
.test_addsub_wrap()
.test_cmp()
.test_logic()
.test_min_max()
.test_absdiff()
.test_mask()
.test_pack<1>().test_pack<2>().test_pack<3>().test_pack<8>()
.test_pack_u<1>().test_pack_u<2>().test_pack_u<3>().test_pack_u<8>()
.test_unpack()
.test_extract<0>().test_extract<1>().test_extract<8>().test_extract<15>()
;
}
TEST(hal_intrin, int8x16) {
TheTest<v_int8x16>()
.test_loadstore()
.test_interleave()
.test_expand()
.test_expand_q()
.test_addsub()
.test_addsub_wrap()
.test_cmp()
.test_logic()
.test_min_max()
.test_absdiff()
.test_mask()
.test_pack<1>().test_pack<2>().test_pack<3>().test_pack<8>()
.test_unpack()
.test_extract<0>().test_extract<1>().test_extract<8>().test_extract<15>()
;
}
//============= 16-bit integer =====================================================================
TEST(hal_intrin, uint16x8) {
TheTest<v_uint16x8>()
.test_loadstore()
.test_interleave()
.test_expand()
.test_addsub()
.test_addsub_wrap()
.test_mul()
.test_mul_expand()
.test_cmp()
.test_shift<1>()
.test_shift<8>()
.test_logic()
.test_min_max()
.test_absdiff()
.test_mask()
.test_pack<1>().test_pack<2>().test_pack<7>().test_pack<16>()
.test_pack_u<1>().test_pack_u<2>().test_pack_u<7>().test_pack_u<16>()
.test_unpack()
.test_extract<0>().test_extract<1>().test_extract<4>().test_extract<7>()
;
}
TEST(hal_intrin, int16x8) {
TheTest<v_int16x8>()
.test_loadstore()
.test_interleave()
.test_expand()
.test_addsub()
.test_addsub_wrap()
.test_mul()
.test_mul_expand()
.test_cmp()
.test_shift<1>()
.test_shift<8>()
.test_dot_prod()
.test_logic()
.test_min_max()
.test_absdiff()
.test_mask()
.test_pack<1>().test_pack<2>().test_pack<7>().test_pack<16>()
.test_unpack()
.test_extract<0>().test_extract<1>().test_extract<4>().test_extract<7>()
;
}
//============= 32-bit integer =====================================================================
TEST(hal_intrin, uint32x4) {
TheTest<v_uint32x4>()
.test_loadstore()
.test_interleave()
.test_expand()
.test_addsub()
.test_mul()
.test_mul_expand()
.test_cmp()
.test_shift<1>()
.test_shift<8>()
.test_logic()
.test_min_max()
.test_absdiff()
.test_reduce()
.test_mask()
.test_pack<1>().test_pack<2>().test_pack<15>().test_pack<32>()
.test_unpack()
.test_extract<0>().test_extract<1>().test_extract<2>().test_extract<3>()
.test_transpose()
;
}
TEST(hal_intrin, int32x4) {
TheTest<v_int32x4>()
.test_loadstore()
.test_interleave()
.test_expand()
.test_addsub()
.test_mul()
.test_cmp()
.test_shift<1>().test_shift<8>()
.test_logic()
.test_min_max()
.test_absdiff()
.test_reduce()
.test_mask()
.test_pack<1>().test_pack<2>().test_pack<15>().test_pack<32>()
.test_unpack()
.test_extract<0>().test_extract<1>().test_extract<2>().test_extract<3>()
.test_float_cvt32()
.test_float_cvt64()
.test_transpose()
;
}
//============= 64-bit integer =====================================================================
TEST(hal_intrin, uint64x2) {
TheTest<v_uint64x2>()
.test_loadstore()
.test_addsub()
.test_shift<1>().test_shift<8>()
.test_logic()
.test_extract<0>().test_extract<1>()
;
}
TEST(hal_intrin, int64x2) {
TheTest<v_int64x2>()
.test_loadstore()
.test_addsub()
.test_shift<1>().test_shift<8>()
.test_logic()
.test_extract<0>().test_extract<1>()
;
}
//============= Floating point =====================================================================
TEST(hal_intrin, float32x4) {
TheTest<v_float32x4>()
.test_loadstore()
.test_interleave()
.test_addsub()
.test_mul()
.test_div()
.test_cmp()
.test_sqrt_abs()
.test_min_max()
.test_float_absdiff()
.test_reduce()
.test_mask()
.test_unpack()
.test_float_math()
.test_float_cvt64()
.test_matmul()
.test_transpose()
;
}
#if CV_SIMD128_64F
TEST(hal_intrin, float64x2) {
TheTest<v_float64x2>()
.test_loadstore()
.test_addsub()
.test_mul()
.test_div()
.test_cmp()
.test_sqrt_abs()
.test_min_max()
.test_float_absdiff()
.test_mask()
.test_unpack()
.test_float_math()
.test_float_cvt32()
;
}
#endif

@ -0,0 +1,234 @@
#ifndef _TEST_UTILS_HPP_
#define _TEST_UTILS_HPP_
#include "opencv2/hal/intrin.hpp"
#include "opencv2/ts.hpp"
#include <ostream>
#include <algorithm>
template <typename R> struct Data;
template <int N> struct initializer;
template <> struct initializer<16>
{
template <typename R> static R init(const Data<R> & d)
{
return R(d[0], d[1], d[2], d[3], d[4], d[5], d[6], d[7], d[8], d[9], d[10], d[11], d[12], d[13], d[14], d[15]);
}
};
template <> struct initializer<8>
{
template <typename R> static R init(const Data<R> & d)
{
return R(d[0], d[1], d[2], d[3], d[4], d[5], d[6], d[7]);
}
};
template <> struct initializer<4>
{
template <typename R> static R init(const Data<R> & d)
{
return R(d[0], d[1], d[2], d[3]);
}
};
template <> struct initializer<2>
{
template <typename R> static R init(const Data<R> & d)
{
return R(d[0], d[1]);
}
};
//==================================================================================================
template <typename R> struct Data
{
typedef typename R::lane_type LaneType;
Data()
{
for (int i = 0; i < R::nlanes; ++i)
d[i] = (LaneType)(i + 1);
}
Data(LaneType val)
{
fill(val);
}
Data(const R & r)
{
*this = r;
}
operator R ()
{
return initializer<R::nlanes>().init(*this);
}
Data<R> & operator=(const R & r)
{
v_store(d, r);
return *this;
}
template <typename T> Data<R> & operator*=(T m)
{
for (int i = 0; i < R::nlanes; ++i)
d[i] *= (LaneType)m;
return *this;
}
template <typename T> Data<R> & operator+=(T m)
{
for (int i = 0; i < R::nlanes; ++i)
d[i] += (LaneType)m;
return *this;
}
void fill(LaneType val)
{
for (int i = 0; i < R::nlanes; ++i)
d[i] = val;
}
void reverse()
{
for (int i = 0; i < R::nlanes / 2; ++i)
std::swap(d[i], d[R::nlanes - i - 1]);
}
const LaneType & operator[](int i) const
{
CV_Assert(i >= 0 && i < R::nlanes);
return d[i];
}
LaneType & operator[](int i)
{
CV_Assert(i >= 0 && i < R::nlanes);
return d[i];
}
const LaneType * mid() const
{
return d + R::nlanes / 2;
}
LaneType * mid()
{
return d + R::nlanes / 2;
}
bool operator==(const Data<R> & other) const
{
for (int i = 0; i < R::nlanes; ++i)
if (d[i] != other.d[i])
return false;
return true;
}
void clear()
{
fill(0);
}
bool isZero() const
{
return isValue(0);
}
bool isValue(uchar val) const
{
for (int i = 0; i < R::nlanes; ++i)
if (d[i] != val)
return false;
return true;
}
LaneType d[R::nlanes];
};
template<typename R> struct AlignedData
{
Data<R> CV_DECL_ALIGNED(16) a; // aligned
char dummy;
Data<R> u; // unaligned
};
template <typename R> std::ostream & operator<<(std::ostream & out, const Data<R> & d)
{
out << "{ ";
for (int i = 0; i < R::nlanes; ++i)
{
// out << std::hex << +V_TypeTraits<typename R::lane_type>::reinterpret_int(d.d[i]);
out << +d.d[i];
if (i + 1 < R::nlanes)
out << ", ";
}
out << " }";
return out;
}
//==================================================================================================
template <typename R> struct RegTrait;
template <> struct RegTrait<cv::v_uint8x16> {
typedef cv::v_uint16x8 w_reg;
typedef cv::v_uint32x4 q_reg;
typedef cv::v_uint8x16 u_reg;
static cv::v_uint8x16 zero() { return cv::v_setzero_u8(); }
static cv::v_uint8x16 all(uchar val) { return cv::v_setall_u8(val); }
};
template <> struct RegTrait<cv::v_int8x16> {
typedef cv::v_int16x8 w_reg;
typedef cv::v_int32x4 q_reg;
typedef cv::v_uint8x16 u_reg;
static cv::v_int8x16 zero() { return cv::v_setzero_s8(); }
static cv::v_int8x16 all(schar val) { return cv::v_setall_s8(val); }
};
template <> struct RegTrait<cv::v_uint16x8> {
typedef cv::v_uint32x4 w_reg;
typedef cv::v_int16x8 int_reg;
typedef cv::v_uint16x8 u_reg;
static cv::v_uint16x8 zero() { return cv::v_setzero_u16(); }
static cv::v_uint16x8 all(ushort val) { return cv::v_setall_u16(val); }
};
template <> struct RegTrait<cv::v_int16x8> {
typedef cv::v_int32x4 w_reg;
typedef cv::v_uint16x8 u_reg;
static cv::v_int16x8 zero() { return cv::v_setzero_s16(); }
static cv::v_int16x8 all(short val) { return cv::v_setall_s16(val); }
};
template <> struct RegTrait<cv::v_uint32x4> {
typedef cv::v_uint64x2 w_reg;
typedef cv::v_int32x4 int_reg;
typedef cv::v_uint32x4 u_reg;
static cv::v_uint32x4 zero() { return cv::v_setzero_u32(); }
static cv::v_uint32x4 all(unsigned val) { return cv::v_setall_u32(val); }
};
template <> struct RegTrait<cv::v_int32x4> {
typedef cv::v_int64x2 w_reg;
typedef cv::v_uint32x4 u_reg;
static cv::v_int32x4 zero() { return cv::v_setzero_s32(); }
static cv::v_int32x4 all(int val) { return cv::v_setall_s32(val); }
};
template <> struct RegTrait<cv::v_uint64x2> {
static cv::v_uint64x2 zero() { return cv::v_setzero_u64(); }
static cv::v_uint64x2 all(uint64 val) { return cv::v_setall_u64(val); }
};
template <> struct RegTrait<cv::v_int64x2> {
static cv::v_int64x2 zero() { return cv::v_setzero_s64(); }
static cv::v_int64x2 all(int64 val) { return cv::v_setall_s64(val); }
};
template <> struct RegTrait<cv::v_float32x4> {
typedef cv::v_int32x4 int_reg;
typedef cv::v_float32x4 u_reg;
static cv::v_float32x4 zero() { return cv::v_setzero_f32(); }
static cv::v_float32x4 all(float val) { return cv::v_setall_f32(val); }
};
#if CV_SIMD128_64F
template <> struct RegTrait<cv::v_float64x2> {
typedef cv::v_int32x4 int_reg;
typedef cv::v_float64x2 u_reg;
static cv::v_float64x2 zero() { return cv::v_setzero_f64(); }
static cv::v_float64x2 all(double val) { return cv::v_setall_f64(val); }
};
#endif
#endif

@ -0,0 +1,3 @@
#include "opencv2/ts.hpp"
CV_TEST_MAIN("cv")

@ -0,0 +1,11 @@
#ifndef __OPENCV_HAL_TEST_PRECOMP_HPP__
#define __OPENCV_HAL_TEST_PRECOMP_HPP__
#include <iostream>
#include <limits>
#include "opencv2/ts.hpp"
#include "opencv2/hal.hpp"
#include "opencv2/hal/defs.h"
#include "opencv2/hal/intrin.hpp"
#endif
Loading…
Cancel
Save