Merge pull request #3326 from ilya-lavrenov:neon_canny

pull/3330/head
Vadim Pisarevsky 11 years ago
commit a798386660
  1. 567
      modules/core/src/arithm.cpp
  2. 784
      modules/core/src/convert.cpp
  3. 121
      modules/core/src/matmul.cpp
  4. 202
      modules/core/src/stat.cpp
  5. 7
      modules/imgproc/perf/perf_blur.cpp
  6. 431
      modules/imgproc/src/accum.cpp
  7. 21
      modules/imgproc/src/canny.cpp
  8. 65
      modules/imgproc/src/clahe.cpp
  9. 2231
      modules/imgproc/src/color.cpp
  10. 27
      modules/imgproc/src/corner.cpp
  11. 338
      modules/imgproc/src/imgwarp.cpp
  12. 91
      modules/imgproc/src/moments.cpp
  13. 189
      modules/imgproc/src/pyramids.cpp
  14. 352
      modules/imgproc/src/smooth.cpp
  15. 152
      modules/imgproc/src/thresh.cpp
  16. 59
      modules/imgproc/test/test_imgwarp.cpp
  17. 16
      modules/imgproc/test/test_imgwarp_strict.cpp

@ -1987,6 +1987,238 @@ void cv::absdiff( InputArray src1, InputArray src2, OutputArray dst )
namespace cv
{
template <typename T, typename WT>
struct Mul_SIMD
{
int operator() (const T *, const T *, T *, int, WT) const
{
return 0;
}
};
#if CV_NEON
template <>
struct Mul_SIMD<uchar, float>
{
int operator() (const uchar * src1, const uchar * src2, uchar * dst, int width, float scale) const
{
int x = 0;
if( scale == 1.0f )
for ( ; x <= width - 8; x += 8)
{
uint16x8_t v_src1 = vmovl_u8(vld1_u8(src1 + x));
uint16x8_t v_src2 = vmovl_u8(vld1_u8(src2 + x));
float32x4_t v_dst1 = vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src1))),
vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src2))));
float32x4_t v_dst2 = vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src1))),
vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src2))));
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst1)),
vqmovn_u32(cv_vrndq_u32_f32(v_dst2)));
vst1_u8(dst + x, vqmovn_u16(v_dst));
}
else
{
float32x4_t v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
uint16x8_t v_src1 = vmovl_u8(vld1_u8(src1 + x));
uint16x8_t v_src2 = vmovl_u8(vld1_u8(src2 + x));
float32x4_t v_dst1 = vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src1))),
vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src2))));
v_dst1 = vmulq_f32(v_dst1, v_scale);
float32x4_t v_dst2 = vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src1))),
vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src2))));
v_dst2 = vmulq_f32(v_dst2, v_scale);
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst1)),
vqmovn_u32(cv_vrndq_u32_f32(v_dst2)));
vst1_u8(dst + x, vqmovn_u16(v_dst));
}
}
return x;
}
};
template <>
struct Mul_SIMD<schar, float>
{
int operator() (const schar * src1, const schar * src2, schar * dst, int width, float scale) const
{
int x = 0;
if( scale == 1.0f )
for ( ; x <= width - 8; x += 8)
{
int16x8_t v_src1 = vmovl_s8(vld1_s8(src1 + x));
int16x8_t v_src2 = vmovl_s8(vld1_s8(src2 + x));
float32x4_t v_dst1 = vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src1))),
vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src2))));
float32x4_t v_dst2 = vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src1))),
vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src2))));
int16x8_t v_dst = vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(v_dst1)),
vqmovn_s32(cv_vrndq_s32_f32(v_dst2)));
vst1_s8(dst + x, vqmovn_s16(v_dst));
}
else
{
float32x4_t v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
int16x8_t v_src1 = vmovl_s8(vld1_s8(src1 + x));
int16x8_t v_src2 = vmovl_s8(vld1_s8(src2 + x));
float32x4_t v_dst1 = vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src1))),
vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src2))));
v_dst1 = vmulq_f32(v_dst1, v_scale);
float32x4_t v_dst2 = vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src1))),
vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src2))));
v_dst2 = vmulq_f32(v_dst2, v_scale);
int16x8_t v_dst = vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(v_dst1)),
vqmovn_s32(cv_vrndq_s32_f32(v_dst2)));
vst1_s8(dst + x, vqmovn_s16(v_dst));
}
}
return x;
}
};
template <>
struct Mul_SIMD<ushort, float>
{
int operator() (const ushort * src1, const ushort * src2, ushort * dst, int width, float scale) const
{
int x = 0;
if( scale == 1.0f )
for ( ; x <= width - 8; x += 8)
{
uint16x8_t v_src1 = vld1q_u16(src1 + x), v_src2 = vld1q_u16(src2 + x);
float32x4_t v_dst1 = vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src1))),
vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src2))));
float32x4_t v_dst2 = vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src1))),
vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src2))));
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst1)),
vqmovn_u32(cv_vrndq_u32_f32(v_dst2)));
vst1q_u16(dst + x, v_dst);
}
else
{
float32x4_t v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
uint16x8_t v_src1 = vld1q_u16(src1 + x), v_src2 = vld1q_u16(src2 + x);
float32x4_t v_dst1 = vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src1))),
vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src2))));
v_dst1 = vmulq_f32(v_dst1, v_scale);
float32x4_t v_dst2 = vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src1))),
vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src2))));
v_dst2 = vmulq_f32(v_dst2, v_scale);
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst1)),
vqmovn_u32(cv_vrndq_u32_f32(v_dst2)));
vst1q_u16(dst + x, v_dst);
}
}
return x;
}
};
template <>
struct Mul_SIMD<short, float>
{
int operator() (const short * src1, const short * src2, short * dst, int width, float scale) const
{
int x = 0;
if( scale == 1.0f )
for ( ; x <= width - 8; x += 8)
{
int16x8_t v_src1 = vld1q_s16(src1 + x), v_src2 = vld1q_s16(src2 + x);
float32x4_t v_dst1 = vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src1))),
vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src2))));
float32x4_t v_dst2 = vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src1))),
vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src2))));
int16x8_t v_dst = vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(v_dst1)),
vqmovn_s32(cv_vrndq_s32_f32(v_dst2)));
vst1q_s16(dst + x, v_dst);
}
else
{
float32x4_t v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
int16x8_t v_src1 = vld1q_s16(src1 + x), v_src2 = vld1q_s16(src2 + x);
float32x4_t v_dst1 = vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src1))),
vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src2))));
v_dst1 = vmulq_f32(v_dst1, v_scale);
float32x4_t v_dst2 = vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src1))),
vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src2))));
v_dst2 = vmulq_f32(v_dst2, v_scale);
int16x8_t v_dst = vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(v_dst1)),
vqmovn_s32(cv_vrndq_s32_f32(v_dst2)));
vst1q_s16(dst + x, v_dst);
}
}
return x;
}
};
template <>
struct Mul_SIMD<float, float>
{
int operator() (const float * src1, const float * src2, float * dst, int width, float scale) const
{
int x = 0;
if( scale == 1.0f )
for ( ; x <= width - 8; x += 8)
{
float32x4_t v_dst1 = vmulq_f32(vld1q_f32(src1 + x), vld1q_f32(src2 + x));
float32x4_t v_dst2 = vmulq_f32(vld1q_f32(src1 + x + 4), vld1q_f32(src2 + x + 4));
vst1q_f32(dst + x, v_dst1);
vst1q_f32(dst + x + 4, v_dst2);
}
else
{
float32x4_t v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
float32x4_t v_dst1 = vmulq_f32(vld1q_f32(src1 + x), vld1q_f32(src2 + x));
v_dst1 = vmulq_f32(v_dst1, v_scale);
float32x4_t v_dst2 = vmulq_f32(vld1q_f32(src1 + x + 4), vld1q_f32(src2 + x + 4));
v_dst2 = vmulq_f32(v_dst2, v_scale);
vst1q_f32(dst + x, v_dst1);
vst1q_f32(dst + x + 4, v_dst2);
}
}
return x;
}
};
#endif
template<typename T, typename WT> static void
mul_( const T* src1, size_t step1, const T* src2, size_t step2,
T* dst, size_t step, Size size, WT scale )
@ -1995,11 +2227,13 @@ mul_( const T* src1, size_t step1, const T* src2, size_t step2,
step2 /= sizeof(src2[0]);
step /= sizeof(dst[0]);
Mul_SIMD<T, WT> vop;
if( scale == (WT)1. )
{
for( ; size.height--; src1 += step1, src2 += step2, dst += step )
{
int i=0;
int i = vop(src1, src2, dst, size.width, scale);
#if CV_ENABLE_UNROLLED
for(; i <= size.width - 4; i += 4 )
{
@ -2024,7 +2258,7 @@ mul_( const T* src1, size_t step1, const T* src2, size_t step2,
{
for( ; size.height--; src1 += step1, src2 += step2, dst += step )
{
int i = 0;
int i = vop(src1, src2, dst, size.width, scale);
#if CV_ENABLE_UNROLLED
for(; i <= size.width - 4; i += 4 )
{
@ -2367,6 +2601,114 @@ void cv::divide(double scale, InputArray src2,
namespace cv
{
template <typename T, typename WT>
struct AddWeighted_SIMD
{
int operator() (const T *, const T *, T *, int, WT, WT, WT) const
{
return 0;
}
};
#if CV_NEON
template <>
struct AddWeighted_SIMD<schar, float>
{
int operator() (const schar * src1, const schar * src2, schar * dst, int width, float alpha, float beta, float gamma) const
{
int x = 0;
float32x4_t g = vdupq_n_f32 (gamma);
for( ; x <= width - 8; x += 8 )
{
int8x8_t in1 = vld1_s8(src1 + x);
int16x8_t in1_16 = vmovl_s8(in1);
float32x4_t in1_f_l = vcvtq_f32_s32(vmovl_s16(vget_low_s16(in1_16)));
float32x4_t in1_f_h = vcvtq_f32_s32(vmovl_s16(vget_high_s16(in1_16)));
int8x8_t in2 = vld1_s8(src2+x);
int16x8_t in2_16 = vmovl_s8(in2);
float32x4_t in2_f_l = vcvtq_f32_s32(vmovl_s16(vget_low_s16(in2_16)));
float32x4_t in2_f_h = vcvtq_f32_s32(vmovl_s16(vget_high_s16(in2_16)));
float32x4_t out_f_l = vaddq_f32(vmulq_n_f32(in1_f_l, alpha), vmulq_n_f32(in2_f_l, beta));
float32x4_t out_f_h = vaddq_f32(vmulq_n_f32(in1_f_h, alpha), vmulq_n_f32(in2_f_h, beta));
out_f_l = vaddq_f32(out_f_l, g);
out_f_h = vaddq_f32(out_f_h, g);
int16x4_t out_16_l = vqmovn_s32(cv_vrndq_s32_f32(out_f_l));
int16x4_t out_16_h = vqmovn_s32(cv_vrndq_s32_f32(out_f_h));
int16x8_t out_16 = vcombine_s16(out_16_l, out_16_h);
int8x8_t out = vqmovn_s16(out_16);
vst1_s8(dst + x, out);
}
return x;
}
};
template <>
struct AddWeighted_SIMD<ushort, float>
{
int operator() (const ushort * src1, const ushort * src2, ushort * dst, int width, float alpha, float beta, float gamma) const
{
int x = 0;
float32x4_t g = vdupq_n_f32(gamma);
for( ; x <= width - 8; x += 8 )
{
uint16x8_t v_src1 = vld1q_u16(src1 + x), v_src2 = vld1q_u16(src2 + x);
float32x4_t v_s1 = vmulq_n_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src1))), alpha);
float32x4_t v_s2 = vmulq_n_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src2))), beta);
uint16x4_t v_dst1 = vqmovn_u32(cv_vrndq_u32_f32(vaddq_f32(vaddq_f32(v_s1, v_s2), g)));
v_s1 = vmulq_n_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src1))), alpha);
v_s2 = vmulq_n_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src2))), beta);
uint16x4_t v_dst2 = vqmovn_u32(cv_vrndq_u32_f32(vaddq_f32(vaddq_f32(v_s1, v_s2), g)));
vst1q_u16(dst + x, vcombine_u16(v_dst1, v_dst2));
}
return x;
}
};
template <>
struct AddWeighted_SIMD<short, float>
{
int operator() (const short * src1, const short * src2, short * dst, int width, float alpha, float beta, float gamma) const
{
int x = 0;
float32x4_t g = vdupq_n_f32(gamma);
for( ; x <= width - 8; x += 8 )
{
int16x8_t v_src1 = vld1q_s16(src1 + x), v_src2 = vld1q_s16(src2 + x);
float32x4_t v_s1 = vmulq_n_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src1))), alpha);
float32x4_t v_s2 = vmulq_n_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src2))), beta);
int16x4_t v_dst1 = vqmovn_s32(cv_vrndq_s32_f32(vaddq_f32(vaddq_f32(v_s1, v_s2), g)));
v_s1 = vmulq_n_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src1))), alpha);
v_s2 = vmulq_n_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src2))), beta);
int16x4_t v_dst2 = vqmovn_s32(cv_vrndq_s32_f32(vaddq_f32(vaddq_f32(v_s1, v_s2), g)));
vst1q_s16(dst + x, vcombine_s16(v_dst1, v_dst2));
}
return x;
}
};
#endif
template<typename T, typename WT> static void
addWeighted_( const T* src1, size_t step1, const T* src2, size_t step2,
T* dst, size_t step, Size size, void* _scalars )
@ -2377,9 +2719,11 @@ addWeighted_( const T* src1, size_t step1, const T* src2, size_t step2,
step2 /= sizeof(src2[0]);
step /= sizeof(dst[0]);
AddWeighted_SIMD<T, WT> vop;
for( ; size.height--; src1 += step1, src2 += step2, dst += step )
{
int x = 0;
int x = vop(src1, src2, dst, size.width, alpha, beta, gamma);
#if CV_ENABLE_UNROLLED
for( ; x <= size.width - 4; x += 4 )
{
@ -2457,8 +2801,8 @@ addWeighted8u( const uchar* src1, size_t step1,
out_f_l = vaddq_f32(out_f_l, g);
out_f_h = vaddq_f32(out_f_h, g);
uint16x4_t out_16_l = vqmovun_s32(vcvtq_s32_f32(out_f_l));
uint16x4_t out_16_h = vqmovun_s32(vcvtq_s32_f32(out_f_h));
uint16x4_t out_16_l = vqmovun_s32(cv_vrndq_s32_f32(out_f_l));
uint16x4_t out_16_h = vqmovun_s32(cv_vrndq_s32_f32(out_f_h));
uint16x8_t out_16 = vcombine_u16(out_16_l, out_16_h);
uint8x8_t out = vqmovn_u16(out_16);
@ -2557,6 +2901,213 @@ void cv::addWeighted( InputArray src1, double alpha, InputArray src2,
namespace cv
{
template <typename T>
struct Cmp_SIMD
{
explicit Cmp_SIMD(int)
{
}
int operator () (const T *, const T *, uchar *, int) const
{
return 0;
}
};
#if CV_NEON
template <>
struct Cmp_SIMD<schar>
{
explicit Cmp_SIMD(int code_) :
code(code_)
{
CV_Assert(code == CMP_GT || code == CMP_LE ||
code == CMP_EQ || code == CMP_NE);
v_mask = vdupq_n_u8(255);
}
int operator () (const schar * src1, const schar * src2, uchar * dst, int width) const
{
int x = 0;
if (code == CMP_GT)
for ( ; x <= width - 16; x += 16)
vst1q_u8(dst + x, vcgtq_s8(vld1q_s8(src1 + x), vld1q_s8(src2 + x)));
else if (code == CMP_LE)
for ( ; x <= width - 16; x += 16)
vst1q_u8(dst + x, vcleq_s8(vld1q_s8(src1 + x), vld1q_s8(src2 + x)));
else if (code == CMP_EQ)
for ( ; x <= width - 16; x += 16)
vst1q_u8(dst + x, vceqq_s8(vld1q_s8(src1 + x), vld1q_s8(src2 + x)));
else if (code == CMP_NE)
for ( ; x <= width - 16; x += 16)
vst1q_u8(dst + x, veorq_u8(vceqq_s8(vld1q_s8(src1 + x), vld1q_s8(src2 + x)), v_mask));
return x;
}
int code;
uint8x16_t v_mask;
};
template <>
struct Cmp_SIMD<ushort>
{
explicit Cmp_SIMD(int code_) :
code(code_)
{
CV_Assert(code == CMP_GT || code == CMP_LE ||
code == CMP_EQ || code == CMP_NE);
v_mask = vdup_n_u8(255);
}
int operator () (const ushort * src1, const ushort * src2, uchar * dst, int width) const
{
int x = 0;
if (code == CMP_GT)
for ( ; x <= width - 8; x += 8)
{
uint16x8_t v_dst = vcgtq_u16(vld1q_u16(src1 + x), vld1q_u16(src2 + x));
vst1_u8(dst + x, vmovn_u16(v_dst));
}
else if (code == CMP_LE)
for ( ; x <= width - 8; x += 8)
{
uint16x8_t v_dst = vcleq_u16(vld1q_u16(src1 + x), vld1q_u16(src2 + x));
vst1_u8(dst + x, vmovn_u16(v_dst));
}
else if (code == CMP_EQ)
for ( ; x <= width - 8; x += 8)
{
uint16x8_t v_dst = vceqq_u16(vld1q_u16(src1 + x), vld1q_u16(src2 + x));
vst1_u8(dst + x, vmovn_u16(v_dst));
}
else if (code == CMP_NE)
for ( ; x <= width - 8; x += 8)
{
uint16x8_t v_dst = vceqq_u16(vld1q_u16(src1 + x), vld1q_u16(src2 + x));
vst1_u8(dst + x, veor_u8(vmovn_u16(v_dst), v_mask));
}
return x;
}
int code;
uint8x8_t v_mask;
};
template <>
struct Cmp_SIMD<int>
{
explicit Cmp_SIMD(int code_) :
code(code_)
{
CV_Assert(code == CMP_GT || code == CMP_LE ||
code == CMP_EQ || code == CMP_NE);
v_mask = vdup_n_u8(255);
}
int operator () (const int * src1, const int * src2, uchar * dst, int width) const
{
int x = 0;
if (code == CMP_GT)
for ( ; x <= width - 8; x += 8)
{
uint32x4_t v_dst1 = vcgtq_s32(vld1q_s32(src1 + x), vld1q_s32(src2 + x));
uint32x4_t v_dst2 = vcgtq_s32(vld1q_s32(src1 + x + 4), vld1q_s32(src2 + x + 4));
vst1_u8(dst + x, vmovn_u16(vcombine_u16(vmovn_u32(v_dst1), vmovn_u32(v_dst2))));
}
else if (code == CMP_LE)
for ( ; x <= width - 8; x += 8)
{
uint32x4_t v_dst1 = vcleq_s32(vld1q_s32(src1 + x), vld1q_s32(src2 + x));
uint32x4_t v_dst2 = vcleq_s32(vld1q_s32(src1 + x + 4), vld1q_s32(src2 + x + 4));
vst1_u8(dst + x, vmovn_u16(vcombine_u16(vmovn_u32(v_dst1), vmovn_u32(v_dst2))));
}
else if (code == CMP_EQ)
for ( ; x <= width - 8; x += 8)
{
uint32x4_t v_dst1 = vceqq_s32(vld1q_s32(src1 + x), vld1q_s32(src2 + x));
uint32x4_t v_dst2 = vceqq_s32(vld1q_s32(src1 + x + 4), vld1q_s32(src2 + x + 4));
vst1_u8(dst + x, vmovn_u16(vcombine_u16(vmovn_u32(v_dst1), vmovn_u32(v_dst2))));
}
else if (code == CMP_NE)
for ( ; x <= width - 8; x += 8)
{
uint32x4_t v_dst1 = vceqq_s32(vld1q_s32(src1 + x), vld1q_s32(src2 + x));
uint32x4_t v_dst2 = vceqq_s32(vld1q_s32(src1 + x + 4), vld1q_s32(src2 + x + 4));
uint8x8_t v_dst = vmovn_u16(vcombine_u16(vmovn_u32(v_dst1), vmovn_u32(v_dst2)));
vst1_u8(dst + x, veor_u8(v_dst, v_mask));
}
return x;
}
int code;
uint8x8_t v_mask;
};
template <>
struct Cmp_SIMD<float>
{
explicit Cmp_SIMD(int code_) :
code(code_)
{
CV_Assert(code == CMP_GT || code == CMP_LE ||
code == CMP_EQ || code == CMP_NE);
v_mask = vdup_n_u8(255);
}
int operator () (const float * src1, const float * src2, uchar * dst, int width) const
{
int x = 0;
if (code == CMP_GT)
for ( ; x <= width - 8; x += 8)
{
uint32x4_t v_dst1 = vcgtq_f32(vld1q_f32(src1 + x), vld1q_f32(src2 + x));
uint32x4_t v_dst2 = vcgtq_f32(vld1q_f32(src1 + x + 4), vld1q_f32(src2 + x + 4));
vst1_u8(dst + x, vmovn_u16(vcombine_u16(vmovn_u32(v_dst1), vmovn_u32(v_dst2))));
}
else if (code == CMP_LE)
for ( ; x <= width - 8; x += 8)
{
uint32x4_t v_dst1 = vcleq_f32(vld1q_f32(src1 + x), vld1q_f32(src2 + x));
uint32x4_t v_dst2 = vcleq_f32(vld1q_f32(src1 + x + 4), vld1q_f32(src2 + x + 4));
vst1_u8(dst + x, vmovn_u16(vcombine_u16(vmovn_u32(v_dst1), vmovn_u32(v_dst2))));
}
else if (code == CMP_EQ)
for ( ; x <= width - 8; x += 8)
{
uint32x4_t v_dst1 = vceqq_f32(vld1q_f32(src1 + x), vld1q_f32(src2 + x));
uint32x4_t v_dst2 = vceqq_f32(vld1q_f32(src1 + x + 4), vld1q_f32(src2 + x + 4));
vst1_u8(dst + x, vmovn_u16(vcombine_u16(vmovn_u32(v_dst1), vmovn_u32(v_dst2))));
}
else if (code == CMP_NE)
for ( ; x <= width - 8; x += 8)
{
uint32x4_t v_dst1 = vceqq_f32(vld1q_f32(src1 + x), vld1q_f32(src2 + x));
uint32x4_t v_dst2 = vceqq_f32(vld1q_f32(src1 + x + 4), vld1q_f32(src2 + x + 4));
uint8x8_t v_dst = vmovn_u16(vcombine_u16(vmovn_u32(v_dst1), vmovn_u32(v_dst2)));
vst1_u8(dst + x, veor_u8(v_dst, v_mask));
}
return x;
}
int code;
uint8x8_t v_mask;
};
#endif
template<typename T> static void
cmp_(const T* src1, size_t step1, const T* src2, size_t step2,
uchar* dst, size_t step, Size size, int code)
@ -2570,12 +3121,14 @@ cmp_(const T* src1, size_t step1, const T* src2, size_t step2,
code = code == CMP_GE ? CMP_LE : CMP_GT;
}
Cmp_SIMD<T> vop(code);
if( code == CMP_GT || code == CMP_LE )
{
int m = code == CMP_GT ? 0 : 255;
for( ; size.height--; src1 += step1, src2 += step2, dst += step )
{
int x = 0;
int x = vop(src1, src2, dst, size.width);
#if CV_ENABLE_UNROLLED
for( ; x <= size.width - 4; x += 4 )
{
@ -2590,7 +3143,7 @@ cmp_(const T* src1, size_t step1, const T* src2, size_t step2,
#endif
for( ; x < size.width; x++ )
dst[x] = (uchar)(-(src1[x] > src2[x]) ^ m);
}
}
}
else if( code == CMP_EQ || code == CMP_NE )
{

@ -1480,6 +1480,724 @@ cvtScaleAbs_( const T* src, size_t sstep,
}
}
template <typename T, typename DT, typename WT>
struct cvtScale_SIMD
{
int operator () (const T *, DT *, int, WT, WT) const
{
return 0;
}
};
#if CV_NEON
// from uchar
template <>
struct cvtScale_SIMD<uchar, uchar, float>
{
int operator () (const uchar * src, uchar * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
uint16x8_t v_src = vmovl_u8(vld1_u8(src + x));
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift);
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift);
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst1)),
vqmovn_u32(cv_vrndq_u32_f32(v_dst2)));
vst1_u8(dst + x, vqmovn_u16(v_dst));
}
return x;
}
};
template <>
struct cvtScale_SIMD<uchar, schar, float>
{
int operator () (const uchar * src, schar * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
uint16x8_t v_src = vmovl_u8(vld1_u8(src + x));
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift);
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift);
int16x8_t v_dst = vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(v_dst1)),
vqmovn_s32(cv_vrndq_s32_f32(v_dst2)));
vst1_s8(dst + x, vqmovn_s16(v_dst));
}
return x;
}
};
template <>
struct cvtScale_SIMD<uchar, ushort, float>
{
int operator () (const uchar * src, ushort * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
uint16x8_t v_src = vmovl_u8(vld1_u8(src + x));
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift);
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift);
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst1)),
vqmovn_u32(cv_vrndq_u32_f32(v_dst2)));
vst1q_u16(dst + x, v_dst);
}
return x;
}
};
template <>
struct cvtScale_SIMD<uchar, short, float>
{
int operator () (const uchar * src, short * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
uint16x8_t v_src = vmovl_u8(vld1_u8(src + x));
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift);
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift);
int16x8_t v_dst = vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(v_dst1)),
vqmovn_s32(cv_vrndq_s32_f32(v_dst2)));
vst1q_s16(dst + x, v_dst);
}
return x;
}
};
template <>
struct cvtScale_SIMD<uchar, int, float>
{
int operator () (const uchar * src, int * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
uint16x8_t v_src = vmovl_u8(vld1_u8(src + x));
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift);
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift);
vst1q_s32(dst + x, cv_vrndq_s32_f32(v_dst1));
vst1q_s32(dst + x + 4, cv_vrndq_s32_f32(v_dst2));
}
return x;
}
};
template <>
struct cvtScale_SIMD<uchar, float, float>
{
int operator () (const uchar * src, float * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
uint16x8_t v_src = vmovl_u8(vld1_u8(src + x));
vst1q_f32(dst + x, vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift));
vst1q_f32(dst + x + 4, vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift));
}
return x;
}
};
// from schar
template <>
struct cvtScale_SIMD<schar, uchar, float>
{
int operator () (const schar * src, uchar * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
int16x8_t v_src = vmovl_s8(vld1_s8(src + x));
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))), v_scale), v_shift);
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))), v_scale), v_shift);
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst1)),
vqmovn_u32(cv_vrndq_u32_f32(v_dst2)));
vst1_u8(dst + x, vqmovn_u16(v_dst));
}
return x;
}
};
template <>
struct cvtScale_SIMD<schar, schar, float>
{
int operator () (const schar * src, schar * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
int16x8_t v_src = vmovl_s8(vld1_s8(src + x));
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))), v_scale), v_shift);
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))), v_scale), v_shift);
int16x8_t v_dst = vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(v_dst1)),
vqmovn_s32(cv_vrndq_s32_f32(v_dst2)));
vst1_s8(dst + x, vqmovn_s16(v_dst));
}
return x;
}
};
template <>
struct cvtScale_SIMD<schar, ushort, float>
{
int operator () (const schar * src, ushort * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
int16x8_t v_src = vmovl_s8(vld1_s8(src + x));
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))), v_scale), v_shift);
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))), v_scale), v_shift);
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst1)),
vqmovn_u32(cv_vrndq_u32_f32(v_dst2)));
vst1q_u16(dst + x, v_dst);
}
return x;
}
};
template <>
struct cvtScale_SIMD<schar, short, float>
{
int operator () (const schar * src, short * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
int16x8_t v_src = vmovl_s8(vld1_s8(src + x));
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))), v_scale), v_shift);
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))), v_scale), v_shift);
int16x8_t v_dst = vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(v_dst1)),
vqmovn_s32(cv_vrndq_s32_f32(v_dst2)));
vst1q_s16(dst + x, v_dst);
}
return x;
}
};
template <>
struct cvtScale_SIMD<schar, int, float>
{
int operator () (const schar * src, int * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
int16x8_t v_src = vmovl_s8(vld1_s8(src + x));
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))), v_scale), v_shift);
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))), v_scale), v_shift);
vst1q_s32(dst + x, cv_vrndq_s32_f32(v_dst1));
vst1q_s32(dst + x + 4, cv_vrndq_s32_f32(v_dst2));
}
return x;
}
};
template <>
struct cvtScale_SIMD<schar, float, float>
{
int operator () (const schar * src, float * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
int16x8_t v_src = vmovl_s8(vld1_s8(src + x));
vst1q_f32(dst + x, vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))), v_scale), v_shift));
vst1q_f32(dst + x + 4, vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))), v_scale), v_shift));
}
return x;
}
};
// from ushort
template <>
struct cvtScale_SIMD<ushort, uchar, float>
{
int operator () (const ushort * src, uchar * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
uint16x8_t v_src = vld1q_u16(src + x);
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift);
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift);
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst1)),
vqmovn_u32(cv_vrndq_u32_f32(v_dst2)));
vst1_u8(dst + x, vqmovn_u16(v_dst));
}
return x;
}
};
template <>
struct cvtScale_SIMD<ushort, schar, float>
{
int operator () (const ushort * src, schar * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
uint16x8_t v_src = vld1q_u16(src + x);
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift);
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift);
int16x8_t v_dst = vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(v_dst1)),
vqmovn_s32(cv_vrndq_s32_f32(v_dst2)));
vst1_s8(dst + x, vqmovn_s16(v_dst));
}
return x;
}
};
template <>
struct cvtScale_SIMD<ushort, ushort, float>
{
int operator () (const ushort * src, ushort * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
uint16x8_t v_src = vld1q_u16(src + x);
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift);
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift);
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst1)),
vqmovn_u32(cv_vrndq_u32_f32(v_dst2)));
vst1q_u16(dst + x, v_dst);
}
return x;
}
};
template <>
struct cvtScale_SIMD<ushort, short, float>
{
int operator () (const ushort * src, short * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
uint16x8_t v_src = vld1q_u16(src + x);
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift);
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift);
int16x8_t v_dst = vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(v_dst1)),
vqmovn_s32(cv_vrndq_s32_f32(v_dst2)));
vst1q_s16(dst + x, v_dst);
}
return x;
}
};
template <>
struct cvtScale_SIMD<ushort, int, float>
{
int operator () (const ushort * src, int * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
uint16x8_t v_src = vld1q_u16(src + x);
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift);
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift);
vst1q_s32(dst + x, cv_vrndq_s32_f32(v_dst1));
vst1q_s32(dst + x + 4, cv_vrndq_s32_f32(v_dst2));
}
return x;
}
};
template <>
struct cvtScale_SIMD<ushort, float, float>
{
int operator () (const ushort * src, float * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
uint16x8_t v_src = vld1q_u16(src + x);
vst1q_f32(dst + x, vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift));
vst1q_f32(dst + x + 4, vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift));
}
return x;
}
};
// from short
template <>
struct cvtScale_SIMD<short, uchar, float>
{
int operator () (const short * src, uchar * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
int16x8_t v_src = vld1q_s16(src + x);
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))), v_scale), v_shift);
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))), v_scale), v_shift);
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst1)),
vqmovn_u32(cv_vrndq_u32_f32(v_dst2)));
vst1_u8(dst + x, vqmovn_u16(v_dst));
}
return x;
}
};
template <>
struct cvtScale_SIMD<short, schar, float>
{
int operator () (const short * src, schar * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
int16x8_t v_src = vld1q_s16(src + x);
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))), v_scale), v_shift);
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))), v_scale), v_shift);
int16x8_t v_dst = vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(v_dst1)),
vqmovn_s32(cv_vrndq_s32_f32(v_dst2)));
vst1_s8(dst + x, vqmovn_s16(v_dst));
}
return x;
}
};
template <>
struct cvtScale_SIMD<short, ushort, float>
{
int operator () (const short * src, ushort * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
int16x8_t v_src = vld1q_s16(src + x);
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))), v_scale), v_shift);
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))), v_scale), v_shift);
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst1)),
vqmovn_u32(cv_vrndq_u32_f32(v_dst2)));
vst1q_u16(dst + x, v_dst);
}
return x;
}
};
template <>
struct cvtScale_SIMD<short, float, float>
{
int operator () (const short * src, float * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
int16x8_t v_src = vld1q_s16(src + x);
vst1q_f32(dst + x, vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))), v_scale), v_shift));
vst1q_f32(dst + x + 4, vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))), v_scale), v_shift));
}
return x;
}
};
// from int
template <>
struct cvtScale_SIMD<int, uchar, float>
{
int operator () (const int * src, uchar * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vld1q_s32(src + x)), v_scale), v_shift);
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vld1q_s32(src + x + 4)), v_scale), v_shift);
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst1)),
vqmovn_u32(cv_vrndq_u32_f32(v_dst2)));
vst1_u8(dst + x, vqmovn_u16(v_dst));
}
return x;
}
};
template <>
struct cvtScale_SIMD<int, schar, float>
{
int operator () (const int * src, schar * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vld1q_s32(src + x)), v_scale), v_shift);
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vld1q_s32(src + x + 4)), v_scale), v_shift);
int16x8_t v_dst = vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(v_dst1)),
vqmovn_s32(cv_vrndq_s32_f32(v_dst2)));
vst1_s8(dst + x, vqmovn_s16(v_dst));
}
return x;
}
};
template <>
struct cvtScale_SIMD<int, ushort, float>
{
int operator () (const int * src, ushort * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vld1q_s32(src + x)), v_scale), v_shift);
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vld1q_s32(src + x + 4)), v_scale), v_shift);
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst1)),
vqmovn_u32(cv_vrndq_u32_f32(v_dst2)));
vst1q_u16(dst + x, v_dst);
}
return x;
}
};
template <>
struct cvtScale_SIMD<int, short, float>
{
int operator () (const int * src, short * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vld1q_s32(src + x)), v_scale), v_shift);
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vld1q_s32(src + x + 4)), v_scale), v_shift);
int16x8_t v_dst = vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(v_dst1)),
vqmovn_s32(cv_vrndq_s32_f32(v_dst2)));
vst1q_s16(dst + x, v_dst);
}
return x;
}
};
// from float
template <>
struct cvtScale_SIMD<float, uchar, float>
{
int operator () (const float * src, uchar * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vld1q_f32(src + x), v_scale), v_shift);
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vld1q_f32(src + x + 4), v_scale), v_shift);
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst1)),
vqmovn_u32(cv_vrndq_u32_f32(v_dst2)));
vst1_u8(dst + x, vqmovn_u16(v_dst));
}
return x;
}
};
template <>
struct cvtScale_SIMD<float, schar, float>
{
int operator () (const float * src, schar * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vld1q_f32(src + x), v_scale), v_shift);
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vld1q_f32(src + x + 4), v_scale), v_shift);
int16x8_t v_dst = vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(v_dst1)),
vqmovn_s32(cv_vrndq_s32_f32(v_dst2)));
vst1_s8(dst + x, vqmovn_s16(v_dst));
}
return x;
}
};
template <>
struct cvtScale_SIMD<float, ushort, float>
{
int operator () (const float * src, ushort * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vld1q_f32(src + x), v_scale), v_shift);
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vld1q_f32(src + x + 4), v_scale), v_shift);
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst1)),
vqmovn_u32(cv_vrndq_u32_f32(v_dst2)));
vst1q_u16(dst + x, v_dst);
}
return x;
}
};
template <>
struct cvtScale_SIMD<float, short, float>
{
int operator () (const float * src, short * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vld1q_f32(src + x), v_scale), v_shift);
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vld1q_f32(src + x + 4), v_scale), v_shift);
int16x8_t v_dst = vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(v_dst1)),
vqmovn_s32(cv_vrndq_s32_f32(v_dst2)));
vst1q_s16(dst + x, v_dst);
}
return x;
}
};
template <>
struct cvtScale_SIMD<float, int, float>
{
int operator () (const float * src, int * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 4; x += 4)
vst1q_s32(dst + x, cv_vrndq_s32_f32(vaddq_f32(vmulq_f32(vld1q_f32(src + x), v_scale), v_shift)));
return x;
}
};
template <>
struct cvtScale_SIMD<float, float, float>
{
int operator () (const float * src, float * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 4; x += 4)
vst1q_f32(dst + x, vaddq_f32(vmulq_f32(vld1q_f32(src + x), v_scale), v_shift));
return x;
}
};
#endif
template<typename T, typename DT, typename WT> static void
cvtScale_( const T* src, size_t sstep,
DT* dst, size_t dstep, Size size,
@ -1488,9 +2206,11 @@ cvtScale_( const T* src, size_t sstep,
sstep /= sizeof(src[0]);
dstep /= sizeof(dst[0]);
cvtScale_SIMD<T, DT, WT> vop;
for( ; size.height--; src += sstep, dst += dstep )
{
int x = 0;
int x = vop(src, dst, size.width, scale, shift);
#if CV_ENABLE_UNROLLED
for( ; x <= size.width - 4; x += 4 )
@ -1755,6 +2475,25 @@ struct Cvt_SIMD<schar, short>
}
};
template <>
struct Cvt_SIMD<schar, ushort>
{
int operator() (const schar * src, ushort * dst, int width) const
{
int x = 0;
for ( ; x <= width - 8; x += 8)
{
int16x8_t v_src = vmovl_s8(vld1_s8(src + x));
vst1q_u16(dst + x, vcombine_u16(vqmovun_s32(vmovl_s16(vget_low_s16(v_src))),
vqmovun_s32(vmovl_s16(vget_high_s16(v_src)))));
}
return x;
}
};
template <>
struct Cvt_SIMD<schar, int>
{
@ -1810,6 +2549,49 @@ struct Cvt_SIMD<ushort, uchar>
}
};
template <>
struct Cvt_SIMD<ushort, schar>
{
int operator() (const ushort * src, schar * dst, int width) const
{
int x = 0;
for ( ; x <= width - 16; x += 16)
{
uint16x8_t v_src1 = vld1q_u16(src + x), v_src2 = vld1q_u16(src + x + 8);
int32x4_t v_dst10 = vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(v_src1)));
int32x4_t v_dst11 = vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(v_src1)));
int32x4_t v_dst20 = vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(v_src2)));
int32x4_t v_dst21 = vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(v_src2)));
vst1q_s8(dst + x, vcombine_s8(vqmovn_s16(vcombine_s16(vqmovn_s32(v_dst10), vqmovn_s32(v_dst11))),
vqmovn_s16(vcombine_s16(vqmovn_s32(v_dst20), vqmovn_s32(v_dst21)))));
}
return x;
}
};
template <>
struct Cvt_SIMD<ushort, short>
{
int operator() (const ushort * src, short * dst, int width) const
{
int x = 0;
for ( ; x <= width - 8; x += 8)
{
uint16x8_t v_src = vld1q_u16(src + x);
int32x4_t v_dst0 = vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(v_src)));
int32x4_t v_dst1 = vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(v_src)));
vst1q_s16(dst + x, vcombine_s16(vqmovn_s32(v_dst0), vqmovn_s32(v_dst1)));
}
return x;
}
};
template <>
struct Cvt_SIMD<ushort, int>
{

@ -2804,7 +2804,8 @@ dotProd_(const T* src1, const T* src2, int len)
{
int i = 0;
double result = 0;
#if CV_ENABLE_UNROLLED
#if CV_ENABLE_UNROLLED
for( ; i <= len - 4; i += 4 )
result += (double)src1[i]*src2[i] + (double)src1[i+1]*src2[i+1] +
(double)src1[i+2]*src2[i+2] + (double)src1[i+3]*src2[i+3];
@ -2833,10 +2834,12 @@ static double dotProd_8u(const uchar* src1, const uchar* src2, int len)
{
int j, len0 = len & -4, blockSize0 = (1 << 13), blockSize;
__m128i z = _mm_setzero_si128();
CV_DECL_ALIGNED(16) int buf[4];
while( i < len0 )
{
blockSize = std::min(len0 - i, blockSize0);
__m128i s = _mm_setzero_si128();
__m128i s = z;
j = 0;
for( ; j <= blockSize - 16; j += 16 )
{
@ -2860,7 +2863,7 @@ static double dotProd_8u(const uchar* src1, const uchar* src2, int len)
s0 = _mm_madd_epi16(s0, s1);
s = _mm_add_epi32(s, s0);
}
CV_DECL_ALIGNED(16) int buf[4];
_mm_store_si128((__m128i*)buf, s);
r += buf[0] + buf[1] + buf[2] + buf[3];
@ -2869,6 +2872,45 @@ static double dotProd_8u(const uchar* src1, const uchar* src2, int len)
i += blockSize;
}
}
#elif CV_NEON
int len0 = len & -8, blockSize0 = (1 << 15), blockSize;
uint32x4_t v_zero = vdupq_n_u32(0u);
CV_DECL_ALIGNED(16) uint buf[4];
while( i < len0 )
{
blockSize = std::min(len0 - i, blockSize0);
uint32x4_t v_sum = v_zero;
int j = 0;
for( ; j <= blockSize - 16; j += 16 )
{
uint8x16_t v_src1 = vld1q_u8(src1 + j), v_src2 = vld1q_u8(src2 + j);
uint16x8_t v_src10 = vmovl_u8(vget_low_u8(v_src1)), v_src20 = vmovl_u8(vget_low_u8(v_src2));
v_sum = vmlal_u16(v_sum, vget_low_u16(v_src10), vget_low_u16(v_src20));
v_sum = vmlal_u16(v_sum, vget_high_u16(v_src10), vget_high_u16(v_src20));
v_src10 = vmovl_u8(vget_high_u8(v_src1));
v_src20 = vmovl_u8(vget_high_u8(v_src2));
v_sum = vmlal_u16(v_sum, vget_low_u16(v_src10), vget_low_u16(v_src20));
v_sum = vmlal_u16(v_sum, vget_high_u16(v_src10), vget_high_u16(v_src20));
}
for( ; j <= blockSize - 8; j += 8 )
{
uint16x8_t v_src1 = vmovl_u8(vld1_u8(src1 + j)), v_src2 = vmovl_u8(vld1_u8(src2 + j));
v_sum = vmlal_u16(v_sum, vget_low_u16(v_src1), vget_low_u16(v_src2));
v_sum = vmlal_u16(v_sum, vget_high_u16(v_src1), vget_high_u16(v_src2));
}
vst1q_u32(buf, v_sum);
r += buf[0] + buf[1] + buf[2] + buf[3];
src1 += blockSize;
src2 += blockSize;
i += blockSize;
}
#endif
return r + dotProd_(src1, src2, len - i);
}
@ -2876,7 +2918,51 @@ static double dotProd_8u(const uchar* src1, const uchar* src2, int len)
static double dotProd_8s(const schar* src1, const schar* src2, int len)
{
return dotProd_(src1, src2, len);
int i = 0;
double r = 0.0;
#if CV_NEON
int len0 = len & -8, blockSize0 = (1 << 14), blockSize;
int32x4_t v_zero = vdupq_n_s32(0);
CV_DECL_ALIGNED(16) int buf[4];
while( i < len0 )
{
blockSize = std::min(len0 - i, blockSize0);
int32x4_t v_sum = v_zero;
int j = 0;
for( ; j <= blockSize - 16; j += 16 )
{
int8x16_t v_src1 = vld1q_s8(src1 + j), v_src2 = vld1q_s8(src2 + j);
int16x8_t v_src10 = vmovl_s8(vget_low_s8(v_src1)), v_src20 = vmovl_s8(vget_low_s8(v_src2));
v_sum = vmlal_s16(v_sum, vget_low_s16(v_src10), vget_low_s16(v_src20));
v_sum = vmlal_s16(v_sum, vget_high_s16(v_src10), vget_high_s16(v_src20));
v_src10 = vmovl_s8(vget_high_s8(v_src1));
v_src20 = vmovl_s8(vget_high_s8(v_src2));
v_sum = vmlal_s16(v_sum, vget_low_s16(v_src10), vget_low_s16(v_src20));
v_sum = vmlal_s16(v_sum, vget_high_s16(v_src10), vget_high_s16(v_src20));
}
for( ; j <= blockSize - 8; j += 8 )
{
int16x8_t v_src1 = vmovl_s8(vld1_s8(src1 + j)), v_src2 = vmovl_s8(vld1_s8(src2 + j));
v_sum = vmlal_s16(v_sum, vget_low_s16(v_src1), vget_low_s16(v_src2));
v_sum = vmlal_s16(v_sum, vget_high_s16(v_src1), vget_high_s16(v_src2));
}
vst1q_s32(buf, v_sum);
r += buf[0] + buf[1] + buf[2] + buf[3];
src1 += blockSize;
src2 += blockSize;
i += blockSize;
}
#endif
return r + dotProd_(src1, src2, len - i);
}
static double dotProd_16u(const ushort* src1, const ushort* src2, int len)
@ -2914,13 +3000,36 @@ static double dotProd_32s(const int* src1, const int* src2, int len)
static double dotProd_32f(const float* src1, const float* src2, int len)
{
double r = 0.0;
int i = 0;
#if (ARITHM_USE_IPP == 1)
double r = 0;
if (0 <= ippsDotProd_32f64f(src1, src2, len, &r))
return r;
setIppErrorStatus();
#elif CV_NEON
int len0 = len & -4, blockSize0 = (1 << 13), blockSize;
float32x4_t v_zero = vdupq_n_f32(0.0f);
CV_DECL_ALIGNED(16) float buf[4];
while( i < len0 )
{
blockSize = std::min(len0 - i, blockSize0);
float32x4_t v_sum = v_zero;
int j = 0;
for( ; j <= blockSize - 4; j += 4 )
v_sum = vmlaq_f32(v_sum, vld1q_f32(src1 + j), vld1q_f32(src2 + j));
vst1q_f32(buf, v_sum);
r += buf[0] + buf[1] + buf[2] + buf[3];
src1 += blockSize;
src2 += blockSize;
i += blockSize;
}
#endif
return dotProd_(src1, src2, len);
return r + dotProd_(src1, src2, len - i);
}
static double dotProd_64f(const double* src1, const double* src2, int len)

@ -90,20 +90,20 @@ struct Sum_SIMD<uchar, int>
uint8x16_t v_src = vld1q_u8(src0 + x);
uint16x8_t v_half = vmovl_u8(vget_low_u8(v_src));
v_sum = vaddq_u32(v_sum, vmovl_u16(vget_low_u16(v_half)));
v_sum = vaddq_u32(v_sum, vmovl_u16(vget_high_u16(v_half)));
v_sum = vaddw_u16(v_sum, vget_low_u16(v_half));
v_sum = vaddw_u16(v_sum, vget_high_u16(v_half));
v_half = vmovl_u8(vget_high_u8(v_src));
v_sum = vaddq_u32(v_sum, vmovl_u16(vget_low_u16(v_half)));
v_sum = vaddq_u32(v_sum, vmovl_u16(vget_high_u16(v_half)));
v_sum = vaddw_u16(v_sum, vget_low_u16(v_half));
v_sum = vaddw_u16(v_sum, vget_high_u16(v_half));
}
for ( ; x <= len - 8; x += 8)
{
uint16x8_t v_src = vmovl_u8(vld1_u8(src0 + x));
v_sum = vaddq_u32(v_sum, vmovl_u16(vget_low_u16(v_src)));
v_sum = vaddq_u32(v_sum, vmovl_u16(vget_high_u16(v_src)));
v_sum = vaddw_u16(v_sum, vget_low_u16(v_src));
v_sum = vaddw_u16(v_sum, vget_high_u16(v_src));
}
unsigned int CV_DECL_ALIGNED(16) ar[4];
@ -133,20 +133,20 @@ struct Sum_SIMD<schar, int>
int8x16_t v_src = vld1q_s8(src0 + x);
int16x8_t v_half = vmovl_s8(vget_low_s8(v_src));
v_sum = vaddq_s32(v_sum, vmovl_s16(vget_low_s16(v_half)));
v_sum = vaddq_s32(v_sum, vmovl_s16(vget_high_s16(v_half)));
v_sum = vaddw_s16(v_sum, vget_low_s16(v_half));
v_sum = vaddw_s16(v_sum, vget_high_s16(v_half));
v_half = vmovl_s8(vget_high_s8(v_src));
v_sum = vaddq_s32(v_sum, vmovl_s16(vget_low_s16(v_half)));
v_sum = vaddq_s32(v_sum, vmovl_s16(vget_high_s16(v_half)));
v_sum = vaddw_s16(v_sum, vget_low_s16(v_half));
v_sum = vaddw_s16(v_sum, vget_high_s16(v_half));
}
for ( ; x <= len - 8; x += 8)
{
int16x8_t v_src = vmovl_s8(vld1_s8(src0 + x));
v_sum = vaddq_s32(v_sum, vmovl_s16(vget_low_s16(v_src)));
v_sum = vaddq_s32(v_sum, vmovl_s16(vget_high_s16(v_src)));
v_sum = vaddw_s16(v_sum, vget_low_s16(v_src));
v_sum = vaddw_s16(v_sum, vget_high_s16(v_src));
}
int CV_DECL_ALIGNED(16) ar[4];
@ -175,13 +175,13 @@ struct Sum_SIMD<ushort, int>
{
uint16x8_t v_src = vld1q_u16(src0 + x);
v_sum = vaddq_u32(v_sum, vmovl_u16(vget_low_u16(v_src)));
v_sum = vaddq_u32(v_sum, vmovl_u16(vget_high_u16(v_src)));
v_sum = vaddw_u16(v_sum, vget_low_u16(v_src));
v_sum = vaddw_u16(v_sum, vget_high_u16(v_src));
}
for ( ; x <= len - 4; x += 4)
v_sum = vaddq_u32(v_sum, vmovl_u16(vld1_u16(src0 + x)));
v_sum = vaddw_u16(v_sum, vld1_u16(src0 + x));
unsigned int CV_DECL_ALIGNED(16) ar[4];
vst1q_u32(ar, v_sum);
@ -208,13 +208,13 @@ struct Sum_SIMD<short, int>
{
int16x8_t v_src = vld1q_s16(src0 + x);
v_sum = vaddq_s32(v_sum, vmovl_s16(vget_low_s16(v_src)));
v_sum = vaddq_s32(v_sum, vmovl_s16(vget_high_s16(v_src)));
v_sum = vaddw_s16(v_sum, vget_low_s16(v_src));
v_sum = vaddw_s16(v_sum, vget_high_s16(v_src));
}
for ( ; x <= len - 4; x += 4)
v_sum = vaddq_s32(v_sum, vmovl_s16(vld1_s16(src0 + x)));
v_sum = vaddw_s16(v_sum, vld1_s16(src0 + x));
int CV_DECL_ALIGNED(16) ar[4];
vst1q_s32(ar, v_sum);
@ -426,6 +426,38 @@ static int countNonZero8u( const uchar* src, int len )
nz += tab[val & 255] + tab[val >> 8];
}
}
#elif CV_NEON
int len0 = len & -16, blockSize1 = (1 << 8) - 16, blockSize0 = blockSize1 << 6;
uint32x4_t v_nz = vdupq_n_u32(0u);
uint8x16_t v_zero = vdupq_n_u8(0), v_1 = vdupq_n_u8(1);
const uchar * src0 = src;
while( i < len0 )
{
int blockSizei = std::min(len0 - i, blockSize0), j = 0;
while (j < blockSizei)
{
int blockSizej = std::min(blockSizei - j, blockSize1), k = 0;
uint8x16_t v_pz = v_zero;
for( ; k <= blockSizej - 16; k += 16 )
v_pz = vaddq_u8(v_pz, vandq_u8(vceqq_u8(vld1q_u8(src0 + k), v_zero), v_1));
uint16x8_t v_p1 = vmovl_u8(vget_low_u8(v_pz)), v_p2 = vmovl_u8(vget_high_u8(v_pz));
v_nz = vaddq_u32(vaddl_u16(vget_low_u16(v_p1), vget_high_u16(v_p1)), v_nz);
v_nz = vaddq_u32(vaddl_u16(vget_low_u16(v_p2), vget_high_u16(v_p2)), v_nz);
src0 += blockSizej;
j += blockSizej;
}
i += blockSizei;
}
CV_DECL_ALIGNED(16) unsigned int buf[4];
vst1q_u32(buf, v_nz);
nz += i - saturate_cast<int>(buf[0] + buf[1] + buf[2] + buf[3]);
#endif
for( ; i < len; i++ )
nz += src[i] != 0;
@ -433,13 +465,116 @@ static int countNonZero8u( const uchar* src, int len )
}
static int countNonZero16u( const ushort* src, int len )
{ return countNonZero_(src, len); }
{
int i = 0, nz = 0;
#if CV_NEON
int len0 = len & -8, blockSize1 = (1 << 15), blockSize0 = blockSize1 << 6;
uint32x4_t v_nz = vdupq_n_u32(0u);
uint16x8_t v_zero = vdupq_n_u16(0), v_1 = vdupq_n_u16(1);
while( i < len0 )
{
int blockSizei = std::min(len0 - i, blockSize0), j = 0;
while (j < blockSizei)
{
int blockSizej = std::min(blockSizei - j, blockSize1), k = 0;
uint16x8_t v_pz = v_zero;
for( ; k <= blockSizej - 8; k += 8 )
v_pz = vaddq_u16(v_pz, vandq_u16(vceqq_u16(vld1q_u16(src + k), v_zero), v_1));
v_nz = vaddq_u32(vaddl_u16(vget_low_u16(v_pz), vget_high_u16(v_pz)), v_nz);
src += blockSizej;
j += blockSizej;
}
i += blockSizei;
}
CV_DECL_ALIGNED(16) unsigned int buf[4];
vst1q_u32(buf, v_nz);
nz += i - saturate_cast<int>(buf[0] + buf[1] + buf[2] + buf[3]);
#endif
return nz + countNonZero_(src, len - i);
}
static int countNonZero32s( const int* src, int len )
{ return countNonZero_(src, len); }
{
int i = 0, nz = 0;
#if CV_NEON
int len0 = len & -8, blockSize1 = (1 << 15), blockSize0 = blockSize1 << 6;
uint32x4_t v_nz = vdupq_n_u32(0u);
int32x4_t v_zero = vdupq_n_s32(0.0f);
uint16x8_t v_1 = vdupq_n_u16(1u), v_zerou = vdupq_n_u16(0u);
while( i < len0 )
{
int blockSizei = std::min(len0 - i, blockSize0), j = 0;
while (j < blockSizei)
{
int blockSizej = std::min(blockSizei - j, blockSize1), k = 0;
uint16x8_t v_pz = v_zerou;
for( ; k <= blockSizej - 8; k += 8 )
v_pz = vaddq_u16(v_pz, vandq_u16(vcombine_u16(vmovn_u32(vceqq_s32(vld1q_s32(src + k), v_zero)),
vmovn_u32(vceqq_s32(vld1q_s32(src + k + 4), v_zero))), v_1));
v_nz = vaddq_u32(vaddl_u16(vget_low_u16(v_pz), vget_high_u16(v_pz)), v_nz);
src += blockSizej;
j += blockSizej;
}
i += blockSizei;
}
CV_DECL_ALIGNED(16) unsigned int buf[4];
vst1q_u32(buf, v_nz);
nz += i - saturate_cast<int>(buf[0] + buf[1] + buf[2] + buf[3]);
#endif
return nz + countNonZero_(src, len - i);
}
static int countNonZero32f( const float* src, int len )
{ return countNonZero_(src, len); }
{
int i = 0, nz = 0;
#if CV_NEON
int len0 = len & -8, blockSize1 = (1 << 15), blockSize0 = blockSize1 << 6;
uint32x4_t v_nz = vdupq_n_u32(0u);
float32x4_t v_zero = vdupq_n_f32(0.0f);
uint16x8_t v_1 = vdupq_n_u16(1u), v_zerou = vdupq_n_u16(0u);
while( i < len0 )
{
int blockSizei = std::min(len0 - i, blockSize0), j = 0;
while (j < blockSizei)
{
int blockSizej = std::min(blockSizei - j, blockSize1), k = 0;
uint16x8_t v_pz = v_zerou;
for( ; k <= blockSizej - 8; k += 8 )
v_pz = vaddq_u16(v_pz, vandq_u16(vcombine_u16(vmovn_u32(vceqq_f32(vld1q_f32(src + k), v_zero)),
vmovn_u32(vceqq_f32(vld1q_f32(src + k + 4), v_zero))), v_1));
v_nz = vaddq_u32(vaddl_u16(vget_low_u16(v_pz), vget_high_u16(v_pz)), v_nz);
src += blockSizej;
j += blockSizej;
}
i += blockSizei;
}
CV_DECL_ALIGNED(16) unsigned int buf[4];
vst1q_u32(buf, v_nz);
nz += i - saturate_cast<int>(buf[0] + buf[1] + buf[2] + buf[3]);
#endif
return nz + countNonZero_(src, len - i);
}
static int countNonZero64f( const double* src, int len )
{ return countNonZero_(src, len); }
@ -1956,6 +2091,14 @@ float normL1_(const float* a, const float* b, int n)
d = buf[0] + buf[1] + buf[2] + buf[3];
}
else
#elif CV_NEON
float32x4_t v_sum = vdupq_n_f32(0.0f);
for ( ; j <= n - 4; j += 4)
v_sum = vaddq_f32(v_sum, vabdq_f32(vld1q_f32(a + j), vld1q_f32(b + j)));
float CV_DECL_ALIGNED(16) buf[4];
vst1q_f32(buf, v_sum);
d = buf[0] + buf[1] + buf[2] + buf[3];
#endif
{
for( ; j <= n - 4; j += 4 )
@ -1996,6 +2139,19 @@ int normL1_(const uchar* a, const uchar* b, int n)
d = _mm_cvtsi128_si32(_mm_add_epi32(d0, _mm_unpackhi_epi64(d0, d0)));
}
else
#elif CV_NEON
uint32x4_t v_sum = vdupq_n_u32(0.0f);
for ( ; j <= n - 16; j += 16)
{
uint8x16_t v_dst = vabdq_u8(vld1q_u8(a + j), vld1q_u8(b + j));
uint16x8_t v_low = vmovl_u8(vget_low_u8(v_dst)), v_high = vmovl_u8(vget_high_u8(v_dst));
v_sum = vaddq_u32(v_sum, vaddl_u16(vget_low_u16(v_low), vget_low_u16(v_high)));
v_sum = vaddq_u32(v_sum, vaddl_u16(vget_high_u16(v_low), vget_high_u16(v_high)));
}
uint CV_DECL_ALIGNED(16) buf[4];
vst1q_u32(buf, v_sum);
d = buf[0] + buf[1] + buf[2] + buf[3];
#endif
{
for( ; j <= n - 4; j += 4 )

@ -98,6 +98,11 @@ PERF_TEST_P(Size_MatType_BorderType, blur16x16,
Size size = get<0>(GetParam());
int type = get<1>(GetParam());
BorderType btype = get<2>(GetParam());
double eps = 1e-3;
#if CV_NEON
eps = CV_MAT_DEPTH(type) <= CV_32S ? 1 : eps;
#endif
Mat src(size, type);
Mat dst(size, type);
@ -106,7 +111,7 @@ PERF_TEST_P(Size_MatType_BorderType, blur16x16,
TEST_CYCLE() blur(src, dst, Size(16,16), Point(-1,-1), btype);
SANITY_CHECK(dst, 1e-3);
SANITY_CHECK(dst, eps);
}
PERF_TEST_P(Size_MatType_BorderType3x3, box3x3,

@ -46,10 +46,433 @@
namespace cv
{
template <typename T, typename AT>
struct Acc_SIMD
{
int operator() (const T *, AT *, const uchar *, int, int) const
{
return 0;
}
};
template <typename T, typename AT>
struct AccSqr_SIMD
{
int operator() (const T *, AT *, const uchar *, int, int) const
{
return 0;
}
};
template <typename T, typename AT>
struct AccProd_SIMD
{
int operator() (const T *, const T *, AT *, const uchar *, int, int) const
{
return 0;
}
};
template <typename T, typename AT>
struct AccW_SIMD
{
int operator() (const T *, AT *, const uchar *, int, int, AT) const
{
return 0;
}
};
#if CV_NEON
template <>
struct Acc_SIMD<uchar, float>
{
int operator() (const uchar * src, float * dst, const uchar * mask, int len, int cn) const
{
int x = 0;
if (!mask)
{
len *= cn;
for ( ; x <= len - 16; x += 16)
{
uint8x16_t v_src = vld1q_u8(src + x);
uint16x8_t v_src0 = vmovl_u8(vget_low_u8(v_src)), v_src1 = vmovl_u8(vget_high_u8(v_src));
vst1q_f32(dst + x, vaddq_f32(vld1q_f32(dst + x), vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src0)))));
vst1q_f32(dst + x + 4, vaddq_f32(vld1q_f32(dst + x + 4), vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src0)))));
vst1q_f32(dst + x + 8, vaddq_f32(vld1q_f32(dst + x + 8), vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src1)))));
vst1q_f32(dst + x + 12, vaddq_f32(vld1q_f32(dst + x + 12), vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src1)))));
}
}
else if (cn == 1)
{
uint8x16_t v_255 = vdupq_n_u8(255), v_0 = vdupq_n_u8(0);
for ( ; x <= len - 16; x += 16)
{
uint8x16_t v_src = vandq_u8(vld1q_u8(src + x), veorq_u8(v_255, vceqq_u8(vld1q_u8(mask + x), v_0)));
uint16x8_t v_src0 = vmovl_u8(vget_low_u8(v_src)), v_src1 = vmovl_u8(vget_high_u8(v_src));
vst1q_f32(dst + x, vaddq_f32(vld1q_f32(dst + x), vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src0)))));
vst1q_f32(dst + x + 4, vaddq_f32(vld1q_f32(dst + x + 4), vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src0)))));
vst1q_f32(dst + x + 8, vaddq_f32(vld1q_f32(dst + x + 8), vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src1)))));
vst1q_f32(dst + x + 12, vaddq_f32(vld1q_f32(dst + x + 12), vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src1)))));
}
}
return x;
}
};
template <>
struct Acc_SIMD<ushort, float>
{
int operator() (const ushort * src, float * dst, const uchar * mask, int len, int cn) const
{
int x = 0;
if (!mask)
{
len *= cn;
for ( ; x <= len - 8; x += 8)
{
uint16x8_t v_src = vld1q_u16(src + x);
uint32x4_t v_src0 = vmovl_u16(vget_low_u16(v_src)), v_src1 = vmovl_u16(vget_high_u16(v_src));
vst1q_f32(dst + x, vaddq_f32(vld1q_f32(dst + x), vcvtq_f32_u32(v_src0)));
vst1q_f32(dst + x + 4, vaddq_f32(vld1q_f32(dst + x + 4), vcvtq_f32_u32(v_src1)));
}
}
return x;
}
};
template <>
struct Acc_SIMD<float, float>
{
int operator() (const float * src, float * dst, const uchar * mask, int len, int cn) const
{
int x = 0;
if (!mask)
{
len *= cn;
for ( ; x <= len - 8; x += 8)
{
vst1q_f32(dst + x, vaddq_f32(vld1q_f32(dst + x), vld1q_f32(src + x)));
vst1q_f32(dst + x + 4, vaddq_f32(vld1q_f32(dst + x + 4), vld1q_f32(src + x + 4)));
}
}
return x;
}
};
template <>
struct AccSqr_SIMD<uchar, float>
{
int operator() (const uchar * src, float * dst, const uchar * mask, int len, int cn) const
{
int x = 0;
if (!mask)
{
len *= cn;
for ( ; x <= len - 16; x += 16)
{
uint8x16_t v_src = vld1q_u8(src + x);
uint8x8_t v_src_0 = vget_low_u8(v_src), v_src_1 = vget_high_u8(v_src);
uint16x8_t v_src0 = vmull_u8(v_src_0, v_src_0), v_src1 = vmull_u8(v_src_1, v_src_1);
vst1q_f32(dst + x, vaddq_f32(vld1q_f32(dst + x), vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src0)))));
vst1q_f32(dst + x + 4, vaddq_f32(vld1q_f32(dst + x + 4), vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src0)))));
vst1q_f32(dst + x + 8, vaddq_f32(vld1q_f32(dst + x + 8), vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src1)))));
vst1q_f32(dst + x + 12, vaddq_f32(vld1q_f32(dst + x + 12), vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src1)))));
}
}
else if (cn == 1)
{
uint8x16_t v_255 = vdupq_n_u8(255), v_0 = vdupq_n_u8(0);
for ( ; x <= len - 16; x += 16)
{
uint8x16_t v_src = vandq_u8(vld1q_u8(src + x), veorq_u8(v_255, vceqq_u8(vld1q_u8(mask + x), v_0)));
uint8x8_t v_src_0 = vget_low_u8(v_src), v_src_1 = vget_high_u8(v_src);
uint16x8_t v_src0 = vmull_u8(v_src_0, v_src_0), v_src1 = vmull_u8(v_src_1, v_src_1);
vst1q_f32(dst + x, vaddq_f32(vld1q_f32(dst + x), vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src0)))));
vst1q_f32(dst + x + 4, vaddq_f32(vld1q_f32(dst + x + 4), vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src0)))));
vst1q_f32(dst + x + 8, vaddq_f32(vld1q_f32(dst + x + 8), vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src1)))));
vst1q_f32(dst + x + 12, vaddq_f32(vld1q_f32(dst + x + 12), vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src1)))));
}
}
return x;
}
};
template <>
struct AccSqr_SIMD<ushort, float>
{
int operator() (const ushort * src, float * dst, const uchar * mask, int len, int cn) const
{
int x = 0;
if (!mask)
{
len *= cn;
for ( ; x <= len - 8; x += 8)
{
uint16x8_t v_src = vld1q_u16(src + x);
uint16x4_t v_src_0 = vget_low_u16(v_src), v_src_1 = vget_high_u16(v_src);
uint32x4_t v_src0 = vmull_u16(v_src_0, v_src_0), v_src1 = vmull_u16(v_src_1, v_src_1);
vst1q_f32(dst + x, vaddq_f32(vld1q_f32(dst + x), vcvtq_f32_u32(v_src0)));
vst1q_f32(dst + x + 4, vaddq_f32(vld1q_f32(dst + x + 4), vcvtq_f32_u32(v_src1)));
}
}
else if (cn == 1)
{
uint8x8_t v_255 = vdup_n_u8(255), v_0 = vdup_n_u8(0);
for ( ; x <= len - 8; x += 8)
{
uint8x8_t v_mask_src = veor_u8(v_255, vceq_u8(vld1_u8(mask + x), v_0));
uint8x8x2_t v_mask_zp = vzip_u8(v_mask_src, v_mask_src);
uint16x8_t v_mask = vreinterpretq_u16_u8(vcombine_u8(v_mask_zp.val[0], v_mask_zp.val[1])),
v_src = vandq_u16(vld1q_u16(src + x), v_mask);
uint16x4_t v_src_0 = vget_low_u16(v_src), v_src_1 = vget_high_u16(v_src);
uint32x4_t v_src0 = vmull_u16(v_src_0, v_src_0), v_src1 = vmull_u16(v_src_1, v_src_1);
vst1q_f32(dst + x, vaddq_f32(vld1q_f32(dst + x), vcvtq_f32_u32(v_src0)));
vst1q_f32(dst + x + 4, vaddq_f32(vld1q_f32(dst + x + 4), vcvtq_f32_u32(v_src1)));
}
}
return x;
}
};
template <>
struct AccSqr_SIMD<float, float>
{
int operator() (const float * src, float * dst, const uchar * mask, int len, int cn) const
{
int x = 0;
if (!mask)
{
len *= cn;
for ( ; x <= len - 8; x += 8)
{
float32x4_t v_src = vld1q_f32(src + x);
vst1q_f32(dst + x, vmlaq_f32(vld1q_f32(dst + x), v_src, v_src));
v_src = vld1q_f32(src + x + 4);
vst1q_f32(dst + x + 4, vmlaq_f32(vld1q_f32(dst + x + 4), v_src, v_src));
}
}
return x;
}
};
template <>
struct AccProd_SIMD<uchar, float>
{
int operator() (const uchar * src1, const uchar * src2, float * dst, const uchar * mask, int len, int cn) const
{
int x = 0;
if (!mask)
{
len *= cn;
for ( ; x <= len - 16; x += 16)
{
uint8x16_t v_1src = vld1q_u8(src1 + x), v_2src = vld1q_u8(src2 + x);
uint16x8_t v_src0 = vmull_u8(vget_low_u8(v_1src), vget_low_u8(v_2src)),
v_src1 = vmull_u8(vget_high_u8(v_1src), vget_high_u8(v_2src));
vst1q_f32(dst + x, vaddq_f32(vld1q_f32(dst + x), vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src0)))));
vst1q_f32(dst + x + 4, vaddq_f32(vld1q_f32(dst + x + 4), vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src0)))));
vst1q_f32(dst + x + 8, vaddq_f32(vld1q_f32(dst + x + 8), vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src1)))));
vst1q_f32(dst + x + 12, vaddq_f32(vld1q_f32(dst + x + 12), vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src1)))));
}
}
else if (cn == 1)
{
uint8x16_t v_255 = vdupq_n_u8(255), v_0 = vdupq_n_u8(0);
for ( ; x <= len - 16; x += 16)
{
uint8x16_t v_mask = veorq_u8(v_255, vceqq_u8(vld1q_u8(mask + x), v_0));
uint8x16_t v_1src = vandq_u8(vld1q_u8(src1 + x), v_mask), v_2src = vandq_u8(vld1q_u8(src2 + x), v_mask);
uint16x8_t v_src0 = vmull_u8(vget_low_u8(v_1src), vget_low_u8(v_2src)),
v_src1 = vmull_u8(vget_high_u8(v_1src), vget_high_u8(v_2src));
vst1q_f32(dst + x, vaddq_f32(vld1q_f32(dst + x), vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src0)))));
vst1q_f32(dst + x + 4, vaddq_f32(vld1q_f32(dst + x + 4), vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src0)))));
vst1q_f32(dst + x + 8, vaddq_f32(vld1q_f32(dst + x + 8), vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src1)))));
vst1q_f32(dst + x + 12, vaddq_f32(vld1q_f32(dst + x + 12), vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src1)))));
}
}
return x;
}
};
template <>
struct AccProd_SIMD<ushort, float>
{
int operator() (const ushort * src1, const ushort * src2, float * dst, const uchar * mask, int len, int cn) const
{
int x = 0;
if (!mask)
{
len *= cn;
for ( ; x <= len - 8; x += 8)
{
uint16x8_t v_1src = vld1q_u16(src1 + x), v_2src = vld1q_u16(src2 + x);
uint32x4_t v_src0 = vmull_u16(vget_low_u16(v_1src), vget_low_u16(v_2src)),
v_src1 = vmull_u16(vget_high_u16(v_1src), vget_high_u16(v_2src));
vst1q_f32(dst + x, vaddq_f32(vld1q_f32(dst + x), vcvtq_f32_u32(v_src0)));
vst1q_f32(dst + x + 4, vaddq_f32(vld1q_f32(dst + x + 4), vcvtq_f32_u32(v_src1)));
}
}
else if (cn == 1)
{
uint8x8_t v_255 = vdup_n_u8(255), v_0 = vdup_n_u8(0);
for ( ; x <= len - 8; x += 8)
{
uint8x8_t v_mask_src = veor_u8(v_255, vceq_u8(vld1_u8(mask + x), v_0));
uint8x8x2_t v_mask_zp = vzip_u8(v_mask_src, v_mask_src);
uint16x8_t v_mask = vreinterpretq_u16_u8(vcombine_u8(v_mask_zp.val[0], v_mask_zp.val[1])),
v_1src = vandq_u16(vld1q_u16(src1 + x), v_mask),
v_2src = vandq_u16(vld1q_u16(src2 + x), v_mask);
uint32x4_t v_src0 = vmull_u16(vget_low_u16(v_1src), vget_low_u16(v_2src)),
v_src1 = vmull_u16(vget_high_u16(v_1src), vget_high_u16(v_2src));
vst1q_f32(dst + x, vaddq_f32(vld1q_f32(dst + x), vcvtq_f32_u32(v_src0)));
vst1q_f32(dst + x + 4, vaddq_f32(vld1q_f32(dst + x + 4), vcvtq_f32_u32(v_src1)));
}
}
return x;
}
};
template <>
struct AccProd_SIMD<float, float>
{
int operator() (const float * src1, const float * src2, float * dst, const uchar * mask, int len, int cn) const
{
int x = 0;
if (!mask)
{
len *= cn;
for ( ; x <= len - 8; x += 8)
{
vst1q_f32(dst + x, vmlaq_f32(vld1q_f32(dst + x), vld1q_f32(src1 + x), vld1q_f32(src2 + x)));
vst1q_f32(dst + x + 4, vmlaq_f32(vld1q_f32(dst + x + 4), vld1q_f32(src1 + x + 4), vld1q_f32(src2 + x + 4)));
}
}
return x;
}
};
template <>
struct AccW_SIMD<uchar, float>
{
int operator() (const uchar * src, float * dst, const uchar * mask, int len, int cn, float alpha) const
{
int x = 0;
float32x4_t v_alpha = vdupq_n_f32(alpha), v_beta = vdupq_n_f32(1.0f - alpha);
if (!mask)
{
len *= cn;
for ( ; x <= len - 16; x += 16)
{
uint8x16_t v_src = vld1q_u8(src + x);
uint16x8_t v_src0 = vmovl_u8(vget_low_u8(v_src)), v_src1 = vmovl_u8(vget_high_u8(v_src));
vst1q_f32(dst + x, vmlaq_f32(vmulq_f32(vld1q_f32(dst + x), v_beta),
vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src0))), v_alpha));
vst1q_f32(dst + x + 4, vmlaq_f32(vmulq_f32(vld1q_f32(dst + x + 4), v_beta),
vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src0))), v_alpha));
vst1q_f32(dst + x + 8, vmlaq_f32(vmulq_f32(vld1q_f32(dst + x + 8), v_beta),
vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src1))), v_alpha));
vst1q_f32(dst + x + 12, vmlaq_f32(vmulq_f32(vld1q_f32(dst + x + 12), v_beta),
vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src1))), v_alpha));
}
}
return x;
}
};
template <>
struct AccW_SIMD<ushort, float>
{
int operator() (const ushort * src, float * dst, const uchar * mask, int len, int cn, float alpha) const
{
int x = 0;
float32x4_t v_alpha = vdupq_n_f32(alpha), v_beta = vdupq_n_f32(1.0f - alpha);
if (!mask)
{
len *= cn;
for ( ; x <= len - 8; x += 8)
{
uint16x8_t v_src = vld1q_u16(src + x);
uint32x4_t v_src0 = vmovl_u16(vget_low_u16(v_src)), v_src1 = vmovl_u16(vget_high_u16(v_src));
vst1q_f32(dst + x, vmlaq_f32(vmulq_f32(vld1q_f32(dst + x), v_beta), vcvtq_f32_u32(v_src0), v_alpha));
vst1q_f32(dst + x + 4, vmlaq_f32(vmulq_f32(vld1q_f32(dst + x + 4), v_beta), vcvtq_f32_u32(v_src1), v_alpha));
}
}
return x;
}
};
template <>
struct AccW_SIMD<float, float>
{
int operator() (const float * src, float * dst, const uchar * mask, int len, int cn, float alpha) const
{
int x = 0;
float32x4_t v_alpha = vdupq_n_f32(alpha), v_beta = vdupq_n_f32(1.0f - alpha);
if (!mask)
{
len *= cn;
for ( ; x <= len - 8; x += 8)
{
vst1q_f32(dst + x, vmlaq_f32(vmulq_f32(vld1q_f32(dst + x), v_beta), vld1q_f32(src + x), v_alpha));
vst1q_f32(dst + x + 4, vmlaq_f32(vmulq_f32(vld1q_f32(dst + x + 4), v_beta), vld1q_f32(src + x + 4), v_alpha));
}
}
return x;
}
};
#endif
template<typename T, typename AT> void
acc_( const T* src, AT* dst, const uchar* mask, int len, int cn )
{
int i = 0;
int i = Acc_SIMD<T, AT>()(src, dst, mask, len, cn);
if( !mask )
{
@ -107,7 +530,7 @@ acc_( const T* src, AT* dst, const uchar* mask, int len, int cn )
template<typename T, typename AT> void
accSqr_( const T* src, AT* dst, const uchar* mask, int len, int cn )
{
int i = 0;
int i = AccSqr_SIMD<T, AT>()(src, dst, mask, len, cn);
if( !mask )
{
@ -165,7 +588,7 @@ accSqr_( const T* src, AT* dst, const uchar* mask, int len, int cn )
template<typename T, typename AT> void
accProd_( const T* src1, const T* src2, AT* dst, const uchar* mask, int len, int cn )
{
int i = 0;
int i = AccProd_SIMD<T, AT>()(src1, src2, dst, mask, len, cn);
if( !mask )
{
@ -224,7 +647,7 @@ template<typename T, typename AT> void
accW_( const T* src, AT* dst, const uchar* mask, int len, int cn, double alpha )
{
AT a = (AT)alpha, b = 1 - a;
int i = 0;
int i = AccW_SIMD<T, AT>()(src, dst, mask, len, cn, a);
if( !mask )
{

@ -361,6 +361,15 @@ void cv::Canny( InputArray _src, OutputArray _dst,
_mm_storeu_si128((__m128i *)(_norm + j + 4), v_norm);
}
}
#elif CV_NEON
for ( ; j <= width - 8; j += 8)
{
int16x8_t v_dx = vld1q_s16(_dx + j), v_dy = vld1q_s16(_dy + j);
vst1q_s32(_norm + j, vaddq_s32(vabsq_s32(vmovl_s16(vget_low_s16(v_dx))),
vabsq_s32(vmovl_s16(vget_low_s16(v_dy)))));
vst1q_s32(_norm + j + 4, vaddq_s32(vabsq_s32(vmovl_s16(vget_high_s16(v_dx))),
vabsq_s32(vmovl_s16(vget_high_s16(v_dy)))));
}
#endif
for ( ; j < width; ++j)
_norm[j] = std::abs(int(_dx[j])) + std::abs(int(_dy[j]));
@ -386,6 +395,18 @@ void cv::Canny( InputArray _src, OutputArray _dst,
_mm_storeu_si128((__m128i *)(_norm + j + 4), v_norm);
}
}
#elif CV_NEON
for ( ; j <= width - 8; j += 8)
{
int16x8_t v_dx = vld1q_s16(_dx + j), v_dy = vld1q_s16(_dy + j);
int16x4_t v_dxp = vget_low_s16(v_dx), v_dyp = vget_low_s16(v_dy);
int32x4_t v_dst = vmlal_s16(vmull_s16(v_dxp, v_dxp), v_dyp, v_dyp);
vst1q_s32(_norm + j, v_dst);
v_dxp = vget_high_s16(v_dx), v_dyp = vget_high_s16(v_dy);
v_dst = vmlal_s16(vmull_s16(v_dxp, v_dxp), v_dyp, v_dyp);
vst1q_s32(_norm + j + 4, v_dst);
}
#endif
for ( ; j < width; ++j)
_norm[j] = int(_dx[j])*_dx[j] + int(_dy[j])*_dy[j];

@ -233,6 +233,31 @@ namespace
CLAHE_Interpolation_Body(const cv::Mat& src, const cv::Mat& dst, const cv::Mat& lut, const cv::Size& tileSize, const int& tilesX, const int& tilesY) :
src_(src), dst_(dst), lut_(lut), tileSize_(tileSize), tilesX_(tilesX), tilesY_(tilesY)
{
buf.allocate(src.cols << 2);
ind1_p = (int *)buf;
ind2_p = ind1_p + src.cols;
xa_p = (float *)(ind2_p + src.cols);
xa1_p = xa_p + src.cols;
int lut_step = static_cast<int>(lut_.step / sizeof(T));
float inv_tw = 1.0f / tileSize_.width;
for (int x = 0; x < src.cols; ++x)
{
float txf = x * inv_tw - 0.5f;
int tx1 = cvFloor(txf);
int tx2 = tx1 + 1;
xa_p[x] = txf - tx1;
xa1_p[x] = 1.0f - xa_p[x];
tx1 = std::max(tx1, 0);
tx2 = std::min(tx2, tilesX_ - 1);
ind1_p[x] = tx1 * lut_step;
ind2_p[x] = tx2 * lut_step;
}
}
void operator ()(const cv::Range& range) const;
@ -245,24 +270,28 @@ namespace
cv::Size tileSize_;
int tilesX_;
int tilesY_;
cv::AutoBuffer<int> buf;
int * ind1_p, * ind2_p;
float * xa_p, * xa1_p;
};
template <class T>
void CLAHE_Interpolation_Body<T>::operator ()(const cv::Range& range) const
{
const size_t lut_step = lut_.step / sizeof(T);
float inv_th = 1.0f / tileSize_.height;
for (int y = range.start; y < range.end; ++y)
{
const T* srcRow = src_.ptr<T>(y);
T* dstRow = dst_.ptr<T>(y);
const float tyf = (static_cast<float>(y) / tileSize_.height) - 0.5f;
float tyf = y * inv_th - 0.5f;
int ty1 = cvFloor(tyf);
int ty2 = ty1 + 1;
const float ya = tyf - ty1;
float ya = tyf - ty1, ya1 = 1.0f - ya;
ty1 = std::max(ty1, 0);
ty2 = std::min(ty2, tilesY_ - 1);
@ -272,27 +301,13 @@ namespace
for (int x = 0; x < src_.cols; ++x)
{
const float txf = (static_cast<float>(x) / tileSize_.width) - 0.5f;
int tx1 = cvFloor(txf);
int tx2 = tx1 + 1;
int srcVal = srcRow[x];
const float xa = txf - tx1;
tx1 = std::max(tx1, 0);
tx2 = std::min(tx2, tilesX_ - 1);
int ind1 = ind1_p[x] + srcVal;
int ind2 = ind2_p[x] + srcVal;
const int srcVal = srcRow[x];
const size_t ind1 = tx1 * lut_step + srcVal;
const size_t ind2 = tx2 * lut_step + srcVal;
float res = 0;
res += lutPlane1[ind1] * ((1.0f - xa) * (1.0f - ya));
res += lutPlane1[ind2] * ((xa) * (1.0f - ya));
res += lutPlane2[ind1] * ((1.0f - xa) * (ya));
res += lutPlane2[ind2] * ((xa) * (ya));
float res = (lutPlane1[ind1] * xa1_p[x] + lutPlane1[ind2] * xa_p[x]) * ya1 +
(lutPlane2[ind1] * xa1_p[x] + lutPlane2[ind2] * xa_p[x]) * ya;
dstRow[x] = cv::saturate_cast<T>(res);
}
@ -403,7 +418,9 @@ namespace
calcLutBody = cv::makePtr<CLAHE_CalcLut_Body<uchar, 256, 0> >(srcForLut, lut_, tileSize, tilesX_, clipLimit, lutScale);
else if (_src.type() == CV_16UC1)
calcLutBody = cv::makePtr<CLAHE_CalcLut_Body<ushort, 4096, 4> >(srcForLut, lut_, tileSize, tilesX_, clipLimit, lutScale);
CV_Assert(!calcLutBody.empty());
else
CV_Error( CV_StsBadArg, "Unsupported type" );
cv::parallel_for_(cv::Range(0, tilesX_ * tilesY_), *calcLutBody);
cv::Ptr<cv::ParallelLoopBody> interpolationBody;
@ -411,7 +428,7 @@ namespace
interpolationBody = cv::makePtr<CLAHE_Interpolation_Body<uchar> >(src, dst, lut_, tileSize, tilesX_, tilesY_);
else if (_src.type() == CV_16UC1)
interpolationBody = cv::makePtr<CLAHE_Interpolation_Body<ushort> >(src, dst, lut_, tileSize, tilesX_, tilesY_);
CV_Assert(!interpolationBody.empty());
cv::parallel_for_(cv::Range(0, src.rows), *interpolationBody);
}

File diff suppressed because it is too large Load Diff

@ -126,7 +126,7 @@ static void calcHarris( const Mat& _cov, Mat& _dst, double k )
if( simd )
{
__m128 k4 = _mm_set1_ps((float)k);
for( ; j <= size.width - 5; j += 4 )
for( ; j <= size.width - 4; j += 4 )
{
__m128 t0 = _mm_loadu_ps(cov + j*3); // a0 b0 c0 x
__m128 t1 = _mm_loadu_ps(cov + j*3 + 3); // a1 b1 c1 x
@ -146,6 +146,17 @@ static void calcHarris( const Mat& _cov, Mat& _dst, double k )
_mm_storeu_ps(dst + j, a);
}
}
#elif CV_NEON
float32x4_t v_k = vdupq_n_f32((float)k);
for( ; j <= size.width - 4; j += 4 )
{
float32x4x3_t v_src = vld3q_f32(cov + j * 3);
float32x4_t v_a = v_src.val[0], v_b = v_src.val[1], v_c = v_src.val[2];
float32x4_t v_ac_bb = vmlsq_f32(vmulq_f32(v_a, v_c), v_b, v_b);
float32x4_t v_ac = vaddq_f32(v_a, v_c);
vst1q_f32(dst + j, vmlsq_f32(v_ac_bb, v_k, vmulq_f32(v_ac, v_ac)));
}
#endif
for( ; j < size.width; j++ )
@ -607,10 +618,13 @@ void cv::preCornerDetect( InputArray _src, OutputArray _dst, int ksize, int bord
if( src.depth() == CV_8U )
factor *= 255;
factor = 1./(factor * factor * factor);
#if CV_NEON || CV_SSE2
float factor_f = (float)factor;
#endif
#if CV_SSE2
volatile bool haveSSE2 = cv::checkHardwareSupport(CV_CPU_SSE2);
__m128 v_factor = _mm_set1_ps((float)factor), v_m2 = _mm_set1_ps(-2.0f);
__m128 v_factor = _mm_set1_ps(factor_f), v_m2 = _mm_set1_ps(-2.0f);
#endif
Size size = src.size();
@ -641,6 +655,15 @@ void cv::preCornerDetect( InputArray _src, OutputArray _dst, int ksize, int bord
_mm_storeu_ps(dstdata + j, v_s1);
}
}
#elif CV_NEON
for( ; j <= size.width - 4; j += 4 )
{
float32x4_t v_dx = vld1q_f32(dxdata + j), v_dy = vld1q_f32(dydata + j);
float32x4_t v_s = vmulq_f32(v_dx, vmulq_f32(v_dx, vld1q_f32(d2ydata + j)));
v_s = vmlaq_f32(v_s, vld1q_f32(d2xdata + j), vmulq_f32(v_dy, v_dy));
v_s = vmlaq_f32(v_s, vld1q_f32(dxydata + j), vmulq_n_f32(vmulq_f32(v_dy, v_dx), -2));
vst1q_f32(dstdata + j, vmulq_n_f32(v_s, factor_f));
}
#endif
for( ; j < size.width; j++ )

@ -894,6 +894,183 @@ struct VResizeCubicVec_32f
}
};
#elif CV_NEON
typedef VResizeNoVec VResizeLinearVec_32s8u;
struct VResizeLinearVec_32f16u
{
int operator()(const uchar** _src, uchar* _dst, const uchar* _beta, int width ) const
{
const float** src = (const float**)_src;
const float* beta = (const float*)_beta;
const float *S0 = src[0], *S1 = src[1];
ushort* dst = (ushort*)_dst;
int x = 0;
float32x4_t v_b0 = vdupq_n_f32(beta[0]), v_b1 = vdupq_n_f32(beta[1]);
for( ; x <= width - 8; x += 8 )
{
float32x4_t v_src00 = vld1q_f32(S0 + x), v_src01 = vld1q_f32(S0 + x + 4);
float32x4_t v_src10 = vld1q_f32(S1 + x), v_src11 = vld1q_f32(S1 + x + 4);
float32x4_t v_dst0 = vmlaq_f32(vmulq_f32(v_src00, v_b0), v_src10, v_b1);
float32x4_t v_dst1 = vmlaq_f32(vmulq_f32(v_src01, v_b0), v_src11, v_b1);
vst1q_u16(dst + x, vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst0)),
vqmovn_u32(cv_vrndq_u32_f32(v_dst1))));
}
return x;
}
};
struct VResizeLinearVec_32f16s
{
int operator()(const uchar** _src, uchar* _dst, const uchar* _beta, int width ) const
{
const float** src = (const float**)_src;
const float* beta = (const float*)_beta;
const float *S0 = src[0], *S1 = src[1];
short* dst = (short*)_dst;
int x = 0;
float32x4_t v_b0 = vdupq_n_f32(beta[0]), v_b1 = vdupq_n_f32(beta[1]);
for( ; x <= width - 8; x += 8 )
{
float32x4_t v_src00 = vld1q_f32(S0 + x), v_src01 = vld1q_f32(S0 + x + 4);
float32x4_t v_src10 = vld1q_f32(S1 + x), v_src11 = vld1q_f32(S1 + x + 4);
float32x4_t v_dst0 = vmlaq_f32(vmulq_f32(v_src00, v_b0), v_src10, v_b1);
float32x4_t v_dst1 = vmlaq_f32(vmulq_f32(v_src01, v_b0), v_src11, v_b1);
vst1q_s16(dst + x, vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(v_dst0)),
vqmovn_s32(cv_vrndq_s32_f32(v_dst1))));
}
return x;
}
};
struct VResizeLinearVec_32f
{
int operator()(const uchar** _src, uchar* _dst, const uchar* _beta, int width ) const
{
const float** src = (const float**)_src;
const float* beta = (const float*)_beta;
const float *S0 = src[0], *S1 = src[1];
float* dst = (float*)_dst;
int x = 0;
float32x4_t v_b0 = vdupq_n_f32(beta[0]), v_b1 = vdupq_n_f32(beta[1]);
for( ; x <= width - 8; x += 8 )
{
float32x4_t v_src00 = vld1q_f32(S0 + x), v_src01 = vld1q_f32(S0 + x + 4);
float32x4_t v_src10 = vld1q_f32(S1 + x), v_src11 = vld1q_f32(S1 + x + 4);
vst1q_f32(dst + x, vmlaq_f32(vmulq_f32(v_src00, v_b0), v_src10, v_b1));
vst1q_f32(dst + x + 4, vmlaq_f32(vmulq_f32(v_src01, v_b0), v_src11, v_b1));
}
return x;
}
};
typedef VResizeNoVec VResizeCubicVec_32s8u;
struct VResizeCubicVec_32f16u
{
int operator()(const uchar** _src, uchar* _dst, const uchar* _beta, int width ) const
{
const float** src = (const float**)_src;
const float* beta = (const float*)_beta;
const float *S0 = src[0], *S1 = src[1], *S2 = src[2], *S3 = src[3];
ushort* dst = (ushort*)_dst;
int x = 0;
float32x4_t v_b0 = vdupq_n_f32(beta[0]), v_b1 = vdupq_n_f32(beta[1]),
v_b2 = vdupq_n_f32(beta[2]), v_b3 = vdupq_n_f32(beta[3]);
for( ; x <= width - 8; x += 8 )
{
float32x4_t v_dst0 = vmlaq_f32(vmlaq_f32(vmlaq_f32(vmulq_f32(v_b0, vld1q_f32(S0 + x)),
v_b1, vld1q_f32(S1 + x)),
v_b2, vld1q_f32(S2 + x)),
v_b3, vld1q_f32(S3 + x));
float32x4_t v_dst1 = vmlaq_f32(vmlaq_f32(vmlaq_f32(vmulq_f32(v_b0, vld1q_f32(S0 + x + 4)),
v_b1, vld1q_f32(S1 + x + 4)),
v_b2, vld1q_f32(S2 + x + 4)),
v_b3, vld1q_f32(S3 + x + 4));
vst1q_u16(dst + x, vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst0)),
vqmovn_u32(cv_vrndq_u32_f32(v_dst1))));
}
return x;
}
};
struct VResizeCubicVec_32f16s
{
int operator()(const uchar** _src, uchar* _dst, const uchar* _beta, int width ) const
{
const float** src = (const float**)_src;
const float* beta = (const float*)_beta;
const float *S0 = src[0], *S1 = src[1], *S2 = src[2], *S3 = src[3];
short* dst = (short*)_dst;
int x = 0;
float32x4_t v_b0 = vdupq_n_f32(beta[0]), v_b1 = vdupq_n_f32(beta[1]),
v_b2 = vdupq_n_f32(beta[2]), v_b3 = vdupq_n_f32(beta[3]);
for( ; x <= width - 8; x += 8 )
{
float32x4_t v_dst0 = vmlaq_f32(vmlaq_f32(vmlaq_f32(vmulq_f32(v_b0, vld1q_f32(S0 + x)),
v_b1, vld1q_f32(S1 + x)),
v_b2, vld1q_f32(S2 + x)),
v_b3, vld1q_f32(S3 + x));
float32x4_t v_dst1 = vmlaq_f32(vmlaq_f32(vmlaq_f32(vmulq_f32(v_b0, vld1q_f32(S0 + x + 4)),
v_b1, vld1q_f32(S1 + x + 4)),
v_b2, vld1q_f32(S2 + x + 4)),
v_b3, vld1q_f32(S3 + x + 4));
vst1q_s16(dst + x, vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(v_dst0)),
vqmovn_s32(cv_vrndq_s32_f32(v_dst1))));
}
return x;
}
};
struct VResizeCubicVec_32f
{
int operator()(const uchar** _src, uchar* _dst, const uchar* _beta, int width ) const
{
const float** src = (const float**)_src;
const float* beta = (const float*)_beta;
const float *S0 = src[0], *S1 = src[1], *S2 = src[2], *S3 = src[3];
float* dst = (float*)_dst;
int x = 0;
float32x4_t v_b0 = vdupq_n_f32(beta[0]), v_b1 = vdupq_n_f32(beta[1]),
v_b2 = vdupq_n_f32(beta[2]), v_b3 = vdupq_n_f32(beta[3]);
for( ; x <= width - 8; x += 8 )
{
vst1q_f32(dst + x, vmlaq_f32(vmlaq_f32(vmlaq_f32(vmulq_f32(v_b0, vld1q_f32(S0 + x)),
v_b1, vld1q_f32(S1 + x)),
v_b2, vld1q_f32(S2 + x)),
v_b3, vld1q_f32(S3 + x)));
vst1q_f32(dst + x + 4, vmlaq_f32(vmlaq_f32(vmlaq_f32(vmulq_f32(v_b0, vld1q_f32(S0 + x + 4)),
v_b1, vld1q_f32(S1 + x + 4)),
v_b2, vld1q_f32(S2 + x + 4)),
v_b3, vld1q_f32(S3 + x + 4)));
}
return x;
}
};
#else
typedef VResizeNoVec VResizeLinearVec_32s8u;
@ -1322,7 +1499,120 @@ struct ResizeAreaFastNoVec
{ return 0; }
};
#if CV_SSE2
#if CV_NEON
class ResizeAreaFastVec_SIMD_8u
{
public:
ResizeAreaFastVec_SIMD_8u(int _cn, int _step) :
cn(_cn), step(_step)
{
}
int operator() (const uchar* S, uchar* D, int w) const
{
int dx = 0;
const uchar* S0 = S, * S1 = S0 + step;
uint16x8_t v_2 = vdupq_n_u16(2);
if (cn == 1)
{
for ( ; dx <= w - 16; dx += 16, S0 += 32, S1 += 32, D += 16)
{
uint8x16x2_t v_row0 = vld2q_u8(S0), v_row1 = vld2q_u8(S1);
uint16x8_t v_dst0 = vaddl_u8(vget_low_u8(v_row0.val[0]), vget_low_u8(v_row0.val[1]));
v_dst0 = vaddq_u16(v_dst0, vaddl_u8(vget_low_u8(v_row1.val[0]), vget_low_u8(v_row1.val[1])));
v_dst0 = vshrq_n_u16(vaddq_u16(v_dst0, v_2), 2);
uint16x8_t v_dst1 = vaddl_u8(vget_high_u8(v_row0.val[0]), vget_high_u8(v_row0.val[1]));
v_dst1 = vaddq_u16(v_dst1, vaddl_u8(vget_high_u8(v_row1.val[0]), vget_high_u8(v_row1.val[1])));
v_dst1 = vshrq_n_u16(vaddq_u16(v_dst1, v_2), 2);
vst1q_u8(D, vcombine_u8(vmovn_u16(v_dst0), vmovn_u16(v_dst1)));
}
}
else if (cn == 4)
{
for ( ; dx <= w - 8; dx += 8, S0 += 16, S1 += 16, D += 8)
{
uint8x16_t v_row0 = vld1q_u8(S0), v_row1 = vld1q_u8(S1);
uint16x8_t v_row00 = vmovl_u8(vget_low_u8(v_row0));
uint16x8_t v_row01 = vmovl_u8(vget_high_u8(v_row0));
uint16x8_t v_row10 = vmovl_u8(vget_low_u8(v_row1));
uint16x8_t v_row11 = vmovl_u8(vget_high_u8(v_row1));
uint16x4_t v_p0 = vadd_u16(vadd_u16(vget_low_u16(v_row00), vget_high_u16(v_row00)),
vadd_u16(vget_low_u16(v_row10), vget_high_u16(v_row10)));
uint16x4_t v_p1 = vadd_u16(vadd_u16(vget_low_u16(v_row01), vget_high_u16(v_row01)),
vadd_u16(vget_low_u16(v_row11), vget_high_u16(v_row11)));
uint16x8_t v_dst = vshrq_n_u16(vaddq_u16(vcombine_u16(v_p0, v_p1), v_2), 2);
vst1_u8(D, vmovn_u16(v_dst));
}
}
return dx;
}
private:
int cn, step;
};
class ResizeAreaFastVec_SIMD_16u
{
public:
ResizeAreaFastVec_SIMD_16u(int _cn, int _step) :
cn(_cn), step(_step)
{
}
int operator() (const ushort * S, ushort * D, int w) const
{
int dx = 0;
const ushort * S0 = S, * S1 = (const ushort *)((const uchar *)(S0) + step);
uint32x4_t v_2 = vdupq_n_u32(2);
if (cn == 1)
{
for ( ; dx <= w - 8; dx += 8, S0 += 16, S1 += 16, D += 8)
{
uint16x8x2_t v_row0 = vld2q_u16(S0), v_row1 = vld2q_u16(S1);
uint32x4_t v_dst0 = vaddl_u16(vget_low_u16(v_row0.val[0]), vget_low_u16(v_row0.val[1]));
v_dst0 = vaddq_u32(v_dst0, vaddl_u16(vget_low_u16(v_row1.val[0]), vget_low_u16(v_row1.val[1])));
v_dst0 = vshrq_n_u32(vaddq_u32(v_dst0, v_2), 2);
uint32x4_t v_dst1 = vaddl_u16(vget_high_u16(v_row0.val[0]), vget_high_u16(v_row0.val[1]));
v_dst1 = vaddq_u32(v_dst1, vaddl_u16(vget_high_u16(v_row1.val[0]), vget_high_u16(v_row1.val[1])));
v_dst1 = vshrq_n_u32(vaddq_u32(v_dst1, v_2), 2);
vst1q_u16(D, vcombine_u16(vmovn_u32(v_dst0), vmovn_u32(v_dst1)));
}
}
else if (cn == 4)
{
for ( ; dx <= w - 4; dx += 4, S0 += 8, S1 += 8, D += 4)
{
uint16x8_t v_row0 = vld1q_u16(S0), v_row1 = vld1q_u16(S1);
uint32x4_t v_dst = vaddq_u32(vaddl_u16(vget_low_u16(v_row0), vget_high_u16(v_row0)),
vaddl_u16(vget_low_u16(v_row1), vget_high_u16(v_row1)));
vst1_u16(D, vmovn_u32(vshrq_n_u32(vaddq_u32(v_dst, v_2), 2)));
}
}
return dx;
}
private:
int cn, step;
};
#elif CV_SSE2
class ResizeAreaFastVec_SIMD_8u
{
public:
@ -3489,7 +3779,15 @@ public:
bufxy = (*m1)(Rect(x, y, bcols, brows));
const ushort* sA = m2->ptr<ushort>(y+y1) + x;
for( x1 = 0; x1 < bcols; x1++ )
x1 = 0;
#if CV_NEON
uint16x8_t v_scale = vdupq_n_u16(INTER_TAB_SIZE2-1);
for ( ; x1 <= bcols - 8; x1 += 8)
vst1q_u16(A + x1, vandq_u16(vld1q_u16(sA + x1), v_scale));
#endif
for( ; x1 < bcols; x1++ )
A[x1] = (ushort)(sA[x1] & (INTER_TAB_SIZE2-1));
}
else if( planar_input )
@ -3534,6 +3832,22 @@ public:
_mm_storeu_si128((__m128i*)(XY + x1*2 + 8), iy1);
}
}
#elif CV_NEON
float32x4_t v_scale = vdupq_n_f32((float)INTER_TAB_SIZE);
int32x4_t v_scale2 = vdupq_n_s32(INTER_TAB_SIZE - 1), v_scale3 = vdupq_n_s32(INTER_TAB_SIZE);
for( ; x1 <= bcols - 4; x1 += 4 )
{
int32x4_t v_sx = cv_vrndq_s32_f32(vmulq_f32(vld1q_f32(sX + x1), v_scale)),
v_sy = cv_vrndq_s32_f32(vmulq_f32(vld1q_f32(sY + x1), v_scale));
int32x4_t v_v = vmlaq_s32(vandq_s32(v_sx, v_scale2), v_scale3,
vandq_s32(v_sy, v_scale2));
vst1_u16(A + x1, vqmovun_s32(v_v));
int16x4x2_t v_dst = vzip_s16(vqmovn_s32(vshrq_n_s32(v_sx, INTER_BITS)),
vqmovn_s32(vshrq_n_s32(v_sy, INTER_BITS)));
vst1q_s16(XY + (x1 << 1), vcombine_s16(v_dst.val[0], v_dst.val[1]));
}
#endif
for( ; x1 < bcols; x1++ )
@ -3549,6 +3863,26 @@ public:
else
{
const float* sXY = m1->ptr<float>(y+y1) + x*2;
x1 = 0;
#if CV_NEON
float32x4_t v_scale = vdupq_n_f32(INTER_TAB_SIZE);
int32x4_t v_scale2 = vdupq_n_s32(INTER_TAB_SIZE-1), v_scale3 = vdupq_n_s32(INTER_TAB_SIZE);
for( ; x1 <= bcols - 4; x1 += 4 )
{
float32x4x2_t v_src = vld2q_f32(sXY + (x1 << 1));
int32x4_t v_sx = cv_vrndq_s32_f32(vmulq_f32(v_src.val[0], v_scale));
int32x4_t v_sy = cv_vrndq_s32_f32(vmulq_f32(v_src.val[1], v_scale));
int32x4_t v_v = vmlaq_s32(vandq_s32(v_sx, v_scale2), v_scale3,
vandq_s32(v_sy, v_scale2));
vst1_u16(A + x1, vqmovun_s32(v_v));
int16x4x2_t v_dst = vzip_s16(vqmovn_s32(vshrq_n_s32(v_sx, INTER_BITS)),
vqmovn_s32(vshrq_n_s32(v_sy, INTER_BITS)));
vst1q_s16(XY + (x1 << 1), vcombine_s16(v_dst.val[0], v_dst.val[1]));
}
#endif
for( x1 = 0; x1 < bcols; x1++ )
{

@ -203,7 +203,7 @@ static Moments contourMoments( const Mat& contour )
\****************************************************************************************/
template<typename T, typename WT, typename MT>
struct MomentsInTile_SSE
struct MomentsInTile_SIMD
{
int operator() (const T *, int, WT &, WT &, WT &, MT &)
{
@ -214,9 +214,9 @@ struct MomentsInTile_SSE
#if CV_SSE2
template <>
struct MomentsInTile_SSE<uchar, int, int>
struct MomentsInTile_SIMD<uchar, int, int>
{
MomentsInTile_SSE()
MomentsInTile_SIMD()
{
useSIMD = checkHardwareSupport(CV_CPU_SSE2);
}
@ -234,17 +234,16 @@ struct MomentsInTile_SSE<uchar, int, int>
for( ; x <= len - 8; x += 8 )
{
__m128i p = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i*)(ptr + x)), z);
qx0 = _mm_add_epi32(qx0, _mm_sad_epu8(p, z));
__m128i px = _mm_mullo_epi16(p, qx);
__m128i sx = _mm_mullo_epi16(qx, qx);
qx0 = _mm_add_epi32(qx0, _mm_sad_epu8(p, z));
qx1 = _mm_add_epi32(qx1, _mm_madd_epi16(p, qx));
qx2 = _mm_add_epi32(qx2, _mm_madd_epi16(p, sx));
qx3 = _mm_add_epi32(qx3, _mm_madd_epi16(px, sx));
qx3 = _mm_add_epi32(qx3, _mm_madd_epi16( _mm_mullo_epi16(p, qx), sx));
qx = _mm_add_epi16(qx, dx);
}
int CV_DECL_ALIGNED(16) buf[4];
_mm_store_si128((__m128i*)buf, qx0);
x0 = buf[0] + buf[1] + buf[2] + buf[3];
_mm_store_si128((__m128i*)buf, qx1);
@ -258,17 +257,84 @@ struct MomentsInTile_SSE<uchar, int, int>
return x;
}
int CV_DECL_ALIGNED(16) buf[4];
bool useSIMD;
};
#elif CV_NEON
template <>
struct MomentsInTile_SIMD<uchar, int, int>
{
MomentsInTile_SIMD()
{
ushort CV_DECL_ALIGNED(8) init[4] = { 0, 1, 2, 3 };
qx_init = vld1_u16(init);
v_step = vdup_n_u16(4);
}
int operator() (const uchar * ptr, int len, int & x0, int & x1, int & x2, int & x3)
{
int x = 0;
uint32x4_t v_z = vdupq_n_u32(0), v_x0 = v_z, v_x1 = v_z,
v_x2 = v_z, v_x3 = v_z;
uint16x4_t qx = qx_init;
for( ; x <= len - 8; x += 8 )
{
uint16x8_t v_src = vmovl_u8(vld1_u8(ptr + x));
// first part
uint32x4_t v_qx = vmovl_u16(qx);
uint16x4_t v_p = vget_low_u16(v_src);
uint32x4_t v_px = vmull_u16(qx, v_p);
v_x0 = vaddw_u16(v_x0, v_p);
v_x1 = vaddq_u32(v_x1, v_px);
v_px = vmulq_u32(v_px, v_qx);
v_x2 = vaddq_u32(v_x2, v_px);
v_x3 = vaddq_u32(v_x3, vmulq_u32(v_px, v_qx));
qx = vadd_u16(qx, v_step);
// second part
v_qx = vmovl_u16(qx);
v_p = vget_high_u16(v_src);
v_px = vmull_u16(qx, v_p);
v_x0 = vaddw_u16(v_x0, v_p);
v_x1 = vaddq_u32(v_x1, v_px);
v_px = vmulq_u32(v_px, v_qx);
v_x2 = vaddq_u32(v_x2, v_px);
v_x3 = vaddq_u32(v_x3, vmulq_u32(v_px, v_qx));
qx = vadd_u16(qx, v_step);
}
vst1q_u32(buf, v_x0);
x0 = buf[0] + buf[1] + buf[2] + buf[3];
vst1q_u32(buf, v_x1);
x1 = buf[0] + buf[1] + buf[2] + buf[3];
vst1q_u32(buf, v_x2);
x2 = buf[0] + buf[1] + buf[2] + buf[3];
vst1q_u32(buf, v_x3);
x3 = buf[0] + buf[1] + buf[2] + buf[3];
return x;
}
uint CV_DECL_ALIGNED(16) buf[4];
uint16x4_t qx_init, v_step;
};
#endif
#if CV_SSE4_1
template <>
struct MomentsInTile_SSE<ushort, int, int64>
struct MomentsInTile_SIMD<ushort, int, int64>
{
MomentsInTile_SSE()
MomentsInTile_SIMD()
{
useSIMD = checkHardwareSupport(CV_CPU_SSE4_1);
}
@ -302,9 +368,6 @@ struct MomentsInTile_SSE<ushort, int, int64>
v_ix1 = _mm_add_epi32(v_ix1, v_delta);
}
int CV_DECL_ALIGNED(16) buf[4];
int64 CV_DECL_ALIGNED(16) buf64[2];
_mm_store_si128((__m128i*)buf, v_x0);
x0 = buf[0] + buf[1] + buf[2] + buf[3];
_mm_store_si128((__m128i*)buf, v_x1);
@ -319,6 +382,8 @@ struct MomentsInTile_SSE<ushort, int, int64>
return x;
}
int CV_DECL_ALIGNED(16) buf[4];
int64 CV_DECL_ALIGNED(16) buf64[2];
bool useSIMD;
};
@ -334,7 +399,7 @@ static void momentsInTile( const Mat& img, double* moments )
Size size = img.size();
int x, y;
MT mom[10] = {0,0,0,0,0,0,0,0,0,0};
MomentsInTile_SSE<T, WT, MT> vop;
MomentsInTile_SIMD<T, WT, MT> vop;
for( y = 0; y < size.height; y++ )
{

@ -178,11 +178,190 @@ struct PyrDownVec_32f
}
};
typedef NoVec<int, ushort> PyrDownVec_32s16u;
typedef NoVec<int, short> PyrDownVec_32s16s;
typedef NoVec<float, float> PyrUpVec_32f;
#elif CV_NEON
struct PyrDownVec_32s8u
{
int operator()(int** src, uchar* dst, int, int width) const
{
int x = 0;
const unsigned int *row0 = (unsigned int*)src[0], *row1 = (unsigned int*)src[1],
*row2 = (unsigned int*)src[2], *row3 = (unsigned int*)src[3],
*row4 = (unsigned int*)src[4];
uint16x8_t v_delta = vdupq_n_u16(128);
for( ; x <= width - 16; x += 16 )
{
uint16x8_t v_r0 = vcombine_u16(vqmovn_u32(vld1q_u32(row0 + x)), vqmovn_u32(vld1q_u32(row0 + x + 4)));
uint16x8_t v_r1 = vcombine_u16(vqmovn_u32(vld1q_u32(row1 + x)), vqmovn_u32(vld1q_u32(row1 + x + 4)));
uint16x8_t v_r2 = vcombine_u16(vqmovn_u32(vld1q_u32(row2 + x)), vqmovn_u32(vld1q_u32(row2 + x + 4)));
uint16x8_t v_r3 = vcombine_u16(vqmovn_u32(vld1q_u32(row3 + x)), vqmovn_u32(vld1q_u32(row3 + x + 4)));
uint16x8_t v_r4 = vcombine_u16(vqmovn_u32(vld1q_u32(row4 + x)), vqmovn_u32(vld1q_u32(row4 + x + 4)));
v_r0 = vqaddq_u16(vqaddq_u16(v_r0, v_r4), vqaddq_u16(v_r2, v_r2));
v_r1 = vqaddq_u16(vqaddq_u16(v_r1, v_r2), v_r3);
uint16x8_t v_dst0 = vqaddq_u16(v_r0, vshlq_n_u16(v_r1, 2));
v_r0 = vcombine_u16(vqmovn_u32(vld1q_u32(row0 + x + 8)), vqmovn_u32(vld1q_u32(row0 + x + 12)));
v_r1 = vcombine_u16(vqmovn_u32(vld1q_u32(row1 + x + 8)), vqmovn_u32(vld1q_u32(row1 + x + 12)));
v_r2 = vcombine_u16(vqmovn_u32(vld1q_u32(row2 + x + 8)), vqmovn_u32(vld1q_u32(row2 + x + 12)));
v_r3 = vcombine_u16(vqmovn_u32(vld1q_u32(row3 + x + 8)), vqmovn_u32(vld1q_u32(row3 + x + 12)));
v_r4 = vcombine_u16(vqmovn_u32(vld1q_u32(row4 + x + 8)), vqmovn_u32(vld1q_u32(row4 + x + 12)));
v_r0 = vqaddq_u16(vqaddq_u16(v_r0, v_r4), vqaddq_u16(v_r2, v_r2));
v_r1 = vqaddq_u16(vqaddq_u16(v_r1, v_r2), v_r3);
uint16x8_t v_dst1 = vqaddq_u16(v_r0, vshlq_n_u16(v_r1, 2));
vst1q_u8(dst + x, vcombine_u8(vqmovn_u16(vshrq_n_u16(vaddq_u16(v_dst0, v_delta), 8)),
vqmovn_u16(vshrq_n_u16(vaddq_u16(v_dst1, v_delta), 8))));
}
return x;
}
};
struct PyrDownVec_32s16u
{
int operator()(int** src, ushort* dst, int, int width) const
{
int x = 0;
const int *row0 = src[0], *row1 = src[1], *row2 = src[2], *row3 = src[3], *row4 = src[4];
int32x4_t v_delta = vdupq_n_s32(128);
for( ; x <= width - 8; x += 8 )
{
int32x4_t v_r00 = vld1q_s32(row0 + x), v_r01 = vld1q_s32(row0 + x + 4);
int32x4_t v_r10 = vld1q_s32(row1 + x), v_r11 = vld1q_s32(row1 + x + 4);
int32x4_t v_r20 = vld1q_s32(row2 + x), v_r21 = vld1q_s32(row2 + x + 4);
int32x4_t v_r30 = vld1q_s32(row3 + x), v_r31 = vld1q_s32(row3 + x + 4);
int32x4_t v_r40 = vld1q_s32(row4 + x), v_r41 = vld1q_s32(row4 + x + 4);
v_r00 = vaddq_s32(vqaddq_s32(v_r00, v_r40), vqaddq_s32(v_r20, v_r20));
v_r10 = vaddq_s32(vqaddq_s32(v_r10, v_r20), v_r30);
int32x4_t v_dst0 = vshrq_n_s32(vaddq_s32(vqaddq_s32(v_r00, vshlq_n_s32(v_r10, 2)), v_delta), 8);
v_r01 = vaddq_s32(vqaddq_s32(v_r01, v_r41), vqaddq_s32(v_r21, v_r21));
v_r11 = vaddq_s32(vqaddq_s32(v_r11, v_r21), v_r31);
int32x4_t v_dst1 = vshrq_n_s32(vaddq_s32(vqaddq_s32(v_r01, vshlq_n_s32(v_r11, 2)), v_delta), 8);
vst1q_u16(dst + x, vcombine_u16(vqmovun_s32(v_dst0), vqmovun_s32(v_dst1)));
}
return x;
}
};
struct PyrDownVec_32s16s
{
int operator()(int** src, short* dst, int, int width) const
{
int x = 0;
const int *row0 = src[0], *row1 = src[1], *row2 = src[2], *row3 = src[3], *row4 = src[4];
int32x4_t v_delta = vdupq_n_s32(128);
for( ; x <= width - 8; x += 8 )
{
int32x4_t v_r00 = vld1q_s32(row0 + x), v_r01 = vld1q_s32(row0 + x + 4);
int32x4_t v_r10 = vld1q_s32(row1 + x), v_r11 = vld1q_s32(row1 + x + 4);
int32x4_t v_r20 = vld1q_s32(row2 + x), v_r21 = vld1q_s32(row2 + x + 4);
int32x4_t v_r30 = vld1q_s32(row3 + x), v_r31 = vld1q_s32(row3 + x + 4);
int32x4_t v_r40 = vld1q_s32(row4 + x), v_r41 = vld1q_s32(row4 + x + 4);
v_r00 = vaddq_s32(vqaddq_s32(v_r00, v_r40), vqaddq_s32(v_r20, v_r20));
v_r10 = vaddq_s32(vqaddq_s32(v_r10, v_r20), v_r30);
int32x4_t v_dst0 = vshrq_n_s32(vaddq_s32(vqaddq_s32(v_r00, vshlq_n_s32(v_r10, 2)), v_delta), 8);
v_r01 = vaddq_s32(vqaddq_s32(v_r01, v_r41), vqaddq_s32(v_r21, v_r21));
v_r11 = vaddq_s32(vqaddq_s32(v_r11, v_r21), v_r31);
int32x4_t v_dst1 = vshrq_n_s32(vaddq_s32(vqaddq_s32(v_r01, vshlq_n_s32(v_r11, 2)), v_delta), 8);
vst1q_s16(dst + x, vcombine_s16(vqmovn_s32(v_dst0), vqmovn_s32(v_dst1)));
}
return x;
}
};
struct PyrDownVec_32f
{
int operator()(float** src, float* dst, int, int width) const
{
int x = 0;
const float *row0 = src[0], *row1 = src[1], *row2 = src[2], *row3 = src[3], *row4 = src[4];
float32x4_t v_4 = vdupq_n_f32(4.0f), v_scale = vdupq_n_f32(1.f/256.0f);
for( ; x <= width - 8; x += 8 )
{
float32x4_t v_r0 = vld1q_f32(row0 + x);
float32x4_t v_r1 = vld1q_f32(row1 + x);
float32x4_t v_r2 = vld1q_f32(row2 + x);
float32x4_t v_r3 = vld1q_f32(row3 + x);
float32x4_t v_r4 = vld1q_f32(row4 + x);
v_r0 = vaddq_f32(vaddq_f32(v_r0, v_r4), vaddq_f32(v_r2, v_r2));
v_r1 = vaddq_f32(vaddq_f32(v_r1, v_r2), v_r3);
vst1q_f32(dst + x, vmulq_f32(vmlaq_f32(v_r0, v_4, v_r1), v_scale));
v_r0 = vld1q_f32(row0 + x + 4);
v_r1 = vld1q_f32(row1 + x + 4);
v_r2 = vld1q_f32(row2 + x + 4);
v_r3 = vld1q_f32(row3 + x + 4);
v_r4 = vld1q_f32(row4 + x + 4);
v_r0 = vaddq_f32(vaddq_f32(v_r0, v_r4), vaddq_f32(v_r2, v_r2));
v_r1 = vaddq_f32(vaddq_f32(v_r1, v_r2), v_r3);
vst1q_f32(dst + x + 4, vmulq_f32(vmlaq_f32(v_r0, v_4, v_r1), v_scale));
}
return x;
}
};
struct PyrUpVec_32f
{
int operator()(float** src, float* dst, int, int width) const
{
int x = 0;
float ** dsts = (float **)dst;
const float *row0 = src[0], *row1 = src[1], *row2 = src[2];
float *dst0 = dsts[0], *dst1 = dsts[1];
float32x4_t v_6 = vdupq_n_f32(6.0f), v_scale = vdupq_n_f32(1.f/64.0f), v_scale4 = vmulq_n_f32(v_scale, 4.0f);
for( ; x <= width - 8; x += 8 )
{
float32x4_t v_r0 = vld1q_f32(row0 + x);
float32x4_t v_r1 = vld1q_f32(row1 + x);
float32x4_t v_r2 = vld1q_f32(row2 + x);
vst1q_f32(dst1 + x, vmulq_f32(v_scale4, vaddq_f32(v_r1, v_r2)));
vst1q_f32(dst0 + x, vmulq_f32(v_scale, vaddq_f32(vmlaq_f32(v_r0, v_6, v_r1), v_r2)));
v_r0 = vld1q_f32(row0 + x + 4);
v_r1 = vld1q_f32(row1 + x + 4);
v_r2 = vld1q_f32(row2 + x + 4);
vst1q_f32(dst1 + x + 4, vmulq_f32(v_scale4, vaddq_f32(v_r1, v_r2)));
vst1q_f32(dst0 + x + 4, vmulq_f32(v_scale, vaddq_f32(vmlaq_f32(v_r0, v_6, v_r1), v_r2)));
}
return x;
}
};
#else
typedef NoVec<int, uchar> PyrDownVec_32s8u;
typedef NoVec<int, ushort> PyrDownVec_32s16u;
typedef NoVec<int, short> PyrDownVec_32s16s;
typedef NoVec<float, float> PyrDownVec_32f;
typedef NoVec<float, float> PyrUpVec_32f;
#endif
template<class CastOp, class VecOp> void
@ -325,6 +504,7 @@ pyrUp_( const Mat& _src, Mat& _dst, int)
AutoBuffer<int> _dtab(ssize.width*cn);
int* dtab = _dtab;
WT* rows[PU_SZ];
T* dsts[2];
CastOp castOp;
VecOp vecOp;
@ -385,8 +565,9 @@ pyrUp_( const Mat& _src, Mat& _dst, int)
for( k = 0; k < PU_SZ; k++ )
rows[k] = buf + ((y - PU_SZ/2 + k - sy0) % PU_SZ)*bufstep;
row0 = rows[0]; row1 = rows[1]; row2 = rows[2];
dsts[0] = dst0; dsts[1] = dst1;
x = vecOp(rows, dst0, (int)_dst.step, dsize.width);
x = vecOp(rows, (T*)dsts, (int)_dst.step, dsize.width);
for( ; x < dsize.width; x++ )
{
T t1 = castOp((row1[x] + row2[x])*4);
@ -561,9 +742,9 @@ void cv::pyrDown( InputArray _src, OutputArray _dst, const Size& _dsz, int borde
if( depth == CV_8U )
func = pyrDown_<FixPtCast<uchar, 8>, PyrDownVec_32s8u>;
else if( depth == CV_16S )
func = pyrDown_<FixPtCast<short, 8>, NoVec<int, short> >;
func = pyrDown_<FixPtCast<short, 8>, PyrDownVec_32s16s >;
else if( depth == CV_16U )
func = pyrDown_<FixPtCast<ushort, 8>, NoVec<int, ushort> >;
func = pyrDown_<FixPtCast<ushort, 8>, PyrDownVec_32s16u >;
else if( depth == CV_32F )
func = pyrDown_<FltCast<float, 8>, PyrDownVec_32f>;
else if( depth == CV_64F )
@ -636,7 +817,7 @@ void cv::pyrUp( InputArray _src, OutputArray _dst, const Size& _dsz, int borderT
else if( depth == CV_16U )
func = pyrUp_<FixPtCast<ushort, 6>, NoVec<int, ushort> >;
else if( depth == CV_32F )
func = pyrUp_<FltCast<float, 6>, NoVec<float, float> >;
func = pyrUp_<FltCast<float, 6>, PyrUpVec_32f >;
else if( depth == CV_64F )
func = pyrUp_<FltCast<double, 6>, NoVec<double, double> >;
else

@ -132,8 +132,8 @@ struct ColumnSum :
SUM = &sum[0];
if( sumCount == 0 )
{
for( i = 0; i < width; i++ )
SUM[i] = 0;
memset((void*)SUM, 0, width*sizeof(ST));
for( ; sumCount < ksize - 1; sumCount++, src++ )
{
const ST* Sp = (const ST*)src[0];
@ -247,13 +247,16 @@ struct ColumnSum<int, uchar> :
#if CV_SSE2
if(haveSSE2)
{
for( ; i < width-4; i+=4 )
for( ; i <= width-4; i+=4 )
{
__m128i _sum = _mm_loadu_si128((const __m128i*)(SUM+i));
__m128i _sp = _mm_loadu_si128((const __m128i*)(Sp+i));
_mm_storeu_si128((__m128i*)(SUM+i),_mm_add_epi32(_sum, _sp));
}
}
#elif CV_NEON
for( ; i <= width - 4; i+=4 )
vst1q_s32(SUM + i, vaddq_s32(vld1q_s32(SUM + i), vld1q_s32(Sp + i)));
#endif
for( ; i < width; i++ )
SUM[i] += Sp[i];
@ -277,7 +280,7 @@ struct ColumnSum<int, uchar> :
if(haveSSE2)
{
const __m128 scale4 = _mm_set1_ps((float)_scale);
for( ; i < width-8; i+=8 )
for( ; i <= width-8; i+=8 )
{
__m128i _sm = _mm_loadu_si128((const __m128i*)(Sm+i));
__m128i _sm1 = _mm_loadu_si128((const __m128i*)(Sm+i+4));
@ -298,6 +301,22 @@ struct ColumnSum<int, uchar> :
_mm_storeu_si128((__m128i*)(SUM+i+4),_mm_sub_epi32(_s01,_sm1));
}
}
#elif CV_NEON
float32x4_t v_scale = vdupq_n_f32((float)_scale);
for( ; i <= width-8; i+=8 )
{
int32x4_t v_s0 = vaddq_s32(vld1q_s32(SUM + i), vld1q_s32(Sp + i));
int32x4_t v_s01 = vaddq_s32(vld1q_s32(SUM + i + 4), vld1q_s32(Sp + i + 4));
uint32x4_t v_s0d = cv_vrndq_u32_f32(vmulq_f32(vcvtq_f32_s32(v_s0), v_scale));
uint32x4_t v_s01d = cv_vrndq_u32_f32(vmulq_f32(vcvtq_f32_s32(v_s01), v_scale));
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(v_s0d), vqmovn_u32(v_s01d));
vst1_u8(D + i, vqmovn_u16(v_dst));
vst1q_s32(SUM + i, vsubq_s32(v_s0, vld1q_s32(Sm + i)));
vst1q_s32(SUM + i + 4, vsubq_s32(v_s01, vld1q_s32(Sm + i + 4)));
}
#endif
for( ; i < width; i++ )
{
@ -312,7 +331,7 @@ struct ColumnSum<int, uchar> :
#if CV_SSE2
if(haveSSE2)
{
for( ; i < width-8; i+=8 )
for( ; i <= width-8; i+=8 )
{
__m128i _sm = _mm_loadu_si128((const __m128i*)(Sm+i));
__m128i _sm1 = _mm_loadu_si128((const __m128i*)(Sm+i+4));
@ -330,6 +349,18 @@ struct ColumnSum<int, uchar> :
_mm_storeu_si128((__m128i*)(SUM+i+4),_mm_sub_epi32(_s01,_sm1));
}
}
#elif CV_NEON
for( ; i <= width-8; i+=8 )
{
int32x4_t v_s0 = vaddq_s32(vld1q_s32(SUM + i), vld1q_s32(Sp + i));
int32x4_t v_s01 = vaddq_s32(vld1q_s32(SUM + i + 4), vld1q_s32(Sp + i + 4));
uint16x8_t v_dst = vcombine_u16(vqmovun_s32(v_s0), vqmovun_s32(v_s01));
vst1_u8(D + i, vqmovn_u16(v_dst));
vst1q_s32(SUM + i, vsubq_s32(v_s0, vld1q_s32(Sm + i)));
vst1q_s32(SUM + i + 4, vsubq_s32(v_s01, vld1q_s32(Sm + i + 4)));
}
#endif
for( ; i < width; i++ )
@ -390,13 +421,16 @@ struct ColumnSum<int, short> :
#if CV_SSE2
if(haveSSE2)
{
for( ; i < width-4; i+=4 )
for( ; i <= width-4; i+=4 )
{
__m128i _sum = _mm_loadu_si128((const __m128i*)(SUM+i));
__m128i _sp = _mm_loadu_si128((const __m128i*)(Sp+i));
_mm_storeu_si128((__m128i*)(SUM+i),_mm_add_epi32(_sum, _sp));
}
}
#elif CV_NEON
for( ; i <= width - 4; i+=4 )
vst1q_s32(SUM + i, vaddq_s32(vld1q_s32(SUM + i), vld1q_s32(Sp + i)));
#endif
for( ; i < width; i++ )
SUM[i] += Sp[i];
@ -420,7 +454,7 @@ struct ColumnSum<int, short> :
if(haveSSE2)
{
const __m128 scale4 = _mm_set1_ps((float)_scale);
for( ; i < width-8; i+=8 )
for( ; i <= width-8; i+=8 )
{
__m128i _sm = _mm_loadu_si128((const __m128i*)(Sm+i));
__m128i _sm1 = _mm_loadu_si128((const __m128i*)(Sm+i+4));
@ -439,6 +473,20 @@ struct ColumnSum<int, short> :
_mm_storeu_si128((__m128i*)(SUM+i+4), _mm_sub_epi32(_s01,_sm1));
}
}
#elif CV_NEON
float32x4_t v_scale = vdupq_n_f32((float)_scale);
for( ; i <= width-8; i+=8 )
{
int32x4_t v_s0 = vaddq_s32(vld1q_s32(SUM + i), vld1q_s32(Sp + i));
int32x4_t v_s01 = vaddq_s32(vld1q_s32(SUM + i + 4), vld1q_s32(Sp + i + 4));
int32x4_t v_s0d = cv_vrndq_s32_f32(vmulq_f32(vcvtq_f32_s32(v_s0), v_scale));
int32x4_t v_s01d = cv_vrndq_s32_f32(vmulq_f32(vcvtq_f32_s32(v_s01), v_scale));
vst1q_s16(D + i, vcombine_s16(vqmovn_s32(v_s0d), vqmovn_s32(v_s01d)));
vst1q_s32(SUM + i, vsubq_s32(v_s0, vld1q_s32(Sm + i)));
vst1q_s32(SUM + i + 4, vsubq_s32(v_s01, vld1q_s32(Sm + i + 4)));
}
#endif
for( ; i < width; i++ )
{
@ -453,7 +501,7 @@ struct ColumnSum<int, short> :
#if CV_SSE2
if(haveSSE2)
{
for( ; i < width-8; i+=8 )
for( ; i <= width-8; i+=8 )
{
__m128i _sm = _mm_loadu_si128((const __m128i*)(Sm+i));
@ -470,6 +518,17 @@ struct ColumnSum<int, short> :
_mm_storeu_si128((__m128i*)(SUM+i+4),_mm_sub_epi32(_s01,_sm1));
}
}
#elif CV_NEON
for( ; i <= width-8; i+=8 )
{
int32x4_t v_s0 = vaddq_s32(vld1q_s32(SUM + i), vld1q_s32(Sp + i));
int32x4_t v_s01 = vaddq_s32(vld1q_s32(SUM + i + 4), vld1q_s32(Sp + i + 4));
vst1q_s16(D + i, vcombine_s16(vqmovn_s32(v_s0), vqmovn_s32(v_s01)));
vst1q_s32(SUM + i, vsubq_s32(v_s0, vld1q_s32(Sm + i)));
vst1q_s32(SUM + i + 4, vsubq_s32(v_s01, vld1q_s32(Sm + i + 4)));
}
#endif
for( ; i < width; i++ )
@ -537,6 +596,9 @@ struct ColumnSum<int, ushort> :
_mm_storeu_si128((__m128i*)(SUM+i), _mm_add_epi32(_sum, _sp));
}
}
#elif CV_NEON
for( ; i <= width - 4; i+=4 )
vst1q_s32(SUM + i, vaddq_s32(vld1q_s32(SUM + i), vld1q_s32(Sp + i)));
#endif
for( ; i < width; i++ )
SUM[i] += Sp[i];
@ -578,6 +640,20 @@ struct ColumnSum<int, ushort> :
_mm_storeu_si128((__m128i*)(SUM+i), _mm_sub_epi32(_s0,_sm));
}
}
#elif CV_NEON
float32x4_t v_scale = vdupq_n_f32((float)_scale);
for( ; i <= width-8; i+=8 )
{
int32x4_t v_s0 = vaddq_s32(vld1q_s32(SUM + i), vld1q_s32(Sp + i));
int32x4_t v_s01 = vaddq_s32(vld1q_s32(SUM + i + 4), vld1q_s32(Sp + i + 4));
uint32x4_t v_s0d = cv_vrndq_u32_f32(vmulq_f32(vcvtq_f32_s32(v_s0), v_scale));
uint32x4_t v_s01d = cv_vrndq_u32_f32(vmulq_f32(vcvtq_f32_s32(v_s01), v_scale));
vst1q_u16(D + i, vcombine_u16(vqmovn_u32(v_s0d), vqmovn_u32(v_s01d)));
vst1q_s32(SUM + i, vsubq_s32(v_s0, vld1q_s32(Sm + i)));
vst1q_s32(SUM + i + 4, vsubq_s32(v_s01, vld1q_s32(Sm + i + 4)));
}
#endif
for( ; i < width; i++ )
{
@ -608,6 +684,17 @@ struct ColumnSum<int, ushort> :
_mm_storeu_si128((__m128i*)(SUM+i), _mm_sub_epi32(_s0,_sm));
}
}
#elif CV_NEON
for( ; i <= width-8; i+=8 )
{
int32x4_t v_s0 = vaddq_s32(vld1q_s32(SUM + i), vld1q_s32(Sp + i));
int32x4_t v_s01 = vaddq_s32(vld1q_s32(SUM + i + 4), vld1q_s32(Sp + i + 4));
vst1q_u16(D + i, vcombine_u16(vqmovun_s32(v_s0), vqmovun_s32(v_s01)));
vst1q_s32(SUM + i, vsubq_s32(v_s0, vld1q_s32(Sm + i)));
vst1q_s32(SUM + i + 4, vsubq_s32(v_s01, vld1q_s32(Sm + i + 4)));
}
#endif
for( ; i < width; i++ )
@ -626,6 +713,166 @@ struct ColumnSum<int, ushort> :
std::vector<int> sum;
};
template<>
struct ColumnSum<int, float> :
public BaseColumnFilter
{
ColumnSum( int _ksize, int _anchor, double _scale ) :
BaseColumnFilter()
{
ksize = _ksize;
anchor = _anchor;
scale = _scale;
sumCount = 0;
}
virtual void reset() { sumCount = 0; }
virtual void operator()(const uchar** src, uchar* dst, int dststep, int count, int width)
{
int i;
int* SUM;
bool haveScale = scale != 1;
double _scale = scale;
#if CV_SSE2
bool haveSSE2 = checkHardwareSupport(CV_CPU_SSE2);
#endif
if( width != (int)sum.size() )
{
sum.resize(width);
sumCount = 0;
}
SUM = &sum[0];
if( sumCount == 0 )
{
memset((void *)SUM, 0, sizeof(int) * width);
for( ; sumCount < ksize - 1; sumCount++, src++ )
{
const int* Sp = (const int*)src[0];
i = 0;
#if CV_SSE2
if(haveSSE2)
{
for( ; i < width-4; i+=4 )
{
__m128i _sum = _mm_loadu_si128((const __m128i*)(SUM+i));
__m128i _sp = _mm_loadu_si128((const __m128i*)(Sp+i));
_mm_storeu_si128((__m128i*)(SUM+i), _mm_add_epi32(_sum, _sp));
}
}
#elif CV_NEON
for( ; i <= width - 4; i+=4 )
vst1q_s32(SUM + i, vaddq_s32(vld1q_s32(SUM + i), vld1q_s32(Sp + i)));
#endif
for( ; i < width; i++ )
SUM[i] += Sp[i];
}
}
else
{
CV_Assert( sumCount == ksize-1 );
src += ksize-1;
}
for( ; count--; src++ )
{
const int * Sp = (const int*)src[0];
const int * Sm = (const int*)src[1-ksize];
float* D = (float*)dst;
if( haveScale )
{
i = 0;
#if CV_SSE2
if(haveSSE2)
{
const __m128 scale4 = _mm_set1_ps((float)_scale);
for( ; i < width-4; i+=4)
{
__m128i _sm = _mm_loadu_si128((const __m128i*)(Sm+i));
__m128i _s0 = _mm_add_epi32(_mm_loadu_si128((const __m128i*)(SUM+i)),
_mm_loadu_si128((const __m128i*)(Sp+i)));
_mm_storeu_ps(D+i, _mm_mul_ps(scale4, _mm_cvtepi32_ps(_s0)));
_mm_storeu_si128((__m128i*)(SUM+i), _mm_sub_epi32(_s0,_sm));
}
}
#elif CV_NEON
float32x4_t v_scale = vdupq_n_f32((float)_scale);
for( ; i <= width-8; i+=8 )
{
int32x4_t v_s0 = vaddq_s32(vld1q_s32(SUM + i), vld1q_s32(Sp + i));
int32x4_t v_s01 = vaddq_s32(vld1q_s32(SUM + i + 4), vld1q_s32(Sp + i + 4));
vst1q_f32(D + i, vmulq_f32(vcvtq_f32_s32(v_s0), v_scale));
vst1q_f32(D + i + 4, vmulq_f32(vcvtq_f32_s32(v_s01), v_scale));
vst1q_s32(SUM + i, vsubq_s32(v_s0, vld1q_s32(Sm + i)));
vst1q_s32(SUM + i + 4, vsubq_s32(v_s01, vld1q_s32(Sm + i + 4)));
}
#endif
for( ; i < width; i++ )
{
int s0 = SUM[i] + Sp[i];
D[i] = (float)(s0*_scale);
SUM[i] = s0 - Sm[i];
}
}
else
{
i = 0;
#if CV_SSE2
if(haveSSE2)
{
for( ; i < width-4; i+=4)
{
__m128i _sm = _mm_loadu_si128((const __m128i*)(Sm+i));
__m128i _s0 = _mm_add_epi32(_mm_loadu_si128((const __m128i*)(SUM+i)),
_mm_loadu_si128((const __m128i*)(Sp+i)));
_mm_storeu_ps(D+i, _mm_cvtepi32_ps(_s0));
_mm_storeu_si128((__m128i*)(SUM+i), _mm_sub_epi32(_s0,_sm));
}
}
#elif CV_NEON
for( ; i <= width-8; i+=8 )
{
int32x4_t v_s0 = vaddq_s32(vld1q_s32(SUM + i), vld1q_s32(Sp + i));
int32x4_t v_s01 = vaddq_s32(vld1q_s32(SUM + i + 4), vld1q_s32(Sp + i + 4));
vst1q_f32(D + i, vcvtq_f32_s32(v_s0));
vst1q_f32(D + i + 4, vcvtq_f32_s32(v_s01));
vst1q_s32(SUM + i, vsubq_s32(v_s0, vld1q_s32(Sm + i)));
vst1q_s32(SUM + i + 4, vsubq_s32(v_s01, vld1q_s32(Sm + i + 4)));
}
#endif
for( ; i < width; i++ )
{
int s0 = SUM[i] + Sp[i];
D[i] = (float)(s0);
SUM[i] = s0 - Sm[i];
}
}
dst += dststep;
}
}
double scale;
int sumCount;
std::vector<int> sum;
};
#ifdef HAVE_OPENCL
#define DIVUP(total, grain) ((total + grain - 1) / (grain))
@ -1360,6 +1607,21 @@ static inline void histogram_sub_simd( const HT x[16], HT y[16] )
_mm_store_si128(ry+1, r1);
}
#elif CV_NEON
#define MEDIAN_HAVE_SIMD 1
static inline void histogram_add_simd( const HT x[16], HT y[16] )
{
vst1q_u16(y, vaddq_u16(vld1q_u16(x), vld1q_u16(y)));
vst1q_u16(y + 8, vaddq_u16(vld1q_u16(x + 8), vld1q_u16(y + 8)));
}
static inline void histogram_sub_simd( const HT x[16], HT y[16] )
{
vst1q_u16(y, vsubq_u16(vld1q_u16(x), vld1q_u16(y)));
vst1q_u16(y + 8, vsubq_u16(vld1q_u16(x + 8), vld1q_u16(y + 8)));
}
#else
#define MEDIAN_HAVE_SIMD 0
#endif
@ -1413,7 +1675,7 @@ medianBlur_8u_O1( const Mat& _src, Mat& _dst, int ksize )
HT* h_coarse = alignPtr(&_h_coarse[0], 16);
HT* h_fine = alignPtr(&_h_fine[0], 16);
#if MEDIAN_HAVE_SIMD
volatile bool useSIMD = checkHardwareSupport(CV_CPU_SSE2);
volatile bool useSIMD = checkHardwareSupport(CV_CPU_SSE2) || checkHardwareSupport(CV_CPU_NEON);
#endif
for( int x = 0; x < _dst.cols; x += STRIPE_SIZE )
@ -1861,6 +2123,71 @@ struct MinMaxVec32f
}
};
#elif CV_NEON
struct MinMaxVec8u
{
typedef uchar value_type;
typedef uint8x16_t arg_type;
enum { SIZE = 16 };
arg_type load(const uchar* ptr) { return vld1q_u8(ptr); }
void store(uchar* ptr, arg_type val) { vst1q_u8(ptr, val); }
void operator()(arg_type& a, arg_type& b) const
{
arg_type t = a;
a = vminq_u8(a, b);
b = vmaxq_u8(b, t);
}
};
struct MinMaxVec16u
{
typedef ushort value_type;
typedef uint16x8_t arg_type;
enum { SIZE = 8 };
arg_type load(const ushort* ptr) { return vld1q_u16(ptr); }
void store(ushort* ptr, arg_type val) { vst1q_u16(ptr, val); }
void operator()(arg_type& a, arg_type& b) const
{
arg_type t = a;
a = vminq_u16(a, b);
b = vmaxq_u16(b, t);
}
};
struct MinMaxVec16s
{
typedef short value_type;
typedef int16x8_t arg_type;
enum { SIZE = 8 };
arg_type load(const short* ptr) { return vld1q_s16(ptr); }
void store(short* ptr, arg_type val) { vst1q_s16(ptr, val); }
void operator()(arg_type& a, arg_type& b) const
{
arg_type t = a;
a = vminq_s16(a, b);
b = vmaxq_s16(b, t);
}
};
struct MinMaxVec32f
{
typedef float value_type;
typedef float32x4_t arg_type;
enum { SIZE = 4 };
arg_type load(const float* ptr) { return vld1q_f32(ptr); }
void store(float* ptr, arg_type val) { vst1q_f32(ptr, val); }
void operator()(arg_type& a, arg_type& b) const
{
arg_type t = a;
a = vminq_f32(a, b);
b = vmaxq_f32(b, t);
}
};
#else
@ -1887,7 +2214,7 @@ medianBlur_SortNet( const Mat& _src, Mat& _dst, int m )
int i, j, k, cn = _src.channels();
Op op;
VecOp vop;
volatile bool useSIMD = checkHardwareSupport(CV_CPU_SSE2);
volatile bool useSIMD = checkHardwareSupport(CV_CPU_SSE2) || checkHardwareSupport(CV_CPU_NEON);
if( m == 3 )
{
@ -2203,7 +2530,7 @@ void cv::medianBlur( InputArray _src0, OutputArray _dst, int ksize )
#endif
bool useSortNet = ksize == 3 || (ksize == 5
#if !CV_SSE2
#if !(CV_SSE2 || CV_NEON)
&& src0.depth() > CV_8U
#endif
);
@ -2237,7 +2564,8 @@ void cv::medianBlur( InputArray _src0, OutputArray _dst, int ksize )
CV_Assert( src.depth() == CV_8U && (cn == 1 || cn == 3 || cn == 4) );
double img_size_mp = (double)(src0.total())/(1 << 20);
if( ksize <= 3 + (img_size_mp < 1 ? 12 : img_size_mp < 4 ? 6 : 2)*(MEDIAN_HAVE_SIMD && checkHardwareSupport(CV_CPU_SSE2) ? 1 : 3))
if( ksize <= 3 + (img_size_mp < 1 ? 12 : img_size_mp < 4 ? 6 : 2)*
(MEDIAN_HAVE_SIMD && (checkHardwareSupport(CV_CPU_SSE2) || checkHardwareSupport(CV_CPU_NEON)) ? 1 : 3))
medianBlur_8u_Om( src, dst, ksize );
else
medianBlur_8u_O1( src, dst, ksize );

@ -264,6 +264,74 @@ thresh_8u( const Mat& _src, Mat& _dst, uchar thresh, uchar maxval, int type )
}
}
}
#elif CV_NEON
uint8x16_t v_thresh = vdupq_n_u8(thresh), v_maxval = vdupq_n_u8(maxval);
switch( type )
{
case THRESH_BINARY:
for( i = 0; i < roi.height; i++ )
{
const uchar* src = _src.ptr() + src_step*i;
uchar* dst = _dst.ptr() + dst_step*i;
for ( j_scalar = 0; j_scalar <= roi.width - 16; j_scalar += 16)
vst1q_u8(dst + j_scalar, vandq_u8(vcgtq_u8(vld1q_u8(src + j_scalar), v_thresh), v_maxval));
}
break;
case THRESH_BINARY_INV:
for( i = 0; i < roi.height; i++ )
{
const uchar* src = _src.ptr() + src_step*i;
uchar* dst = _dst.ptr() + dst_step*i;
for ( j_scalar = 0; j_scalar <= roi.width - 16; j_scalar += 16)
vst1q_u8(dst + j_scalar, vandq_u8(vcleq_u8(vld1q_u8(src + j_scalar), v_thresh), v_maxval));
}
break;
case THRESH_TRUNC:
for( i = 0; i < roi.height; i++ )
{
const uchar* src = _src.ptr() + src_step*i;
uchar* dst = _dst.ptr() + dst_step*i;
for ( j_scalar = 0; j_scalar <= roi.width - 16; j_scalar += 16)
vst1q_u8(dst + j_scalar, vminq_u8(vld1q_u8(src + j_scalar), v_thresh));
}
break;
case THRESH_TOZERO:
for( i = 0; i < roi.height; i++ )
{
const uchar* src = _src.ptr() + src_step*i;
uchar* dst = _dst.ptr() + dst_step*i;
for ( j_scalar = 0; j_scalar <= roi.width - 16; j_scalar += 16)
{
uint8x16_t v_src = vld1q_u8(src + j_scalar), v_mask = vcgtq_u8(v_src, v_thresh);
vst1q_u8(dst + j_scalar, vandq_u8(v_mask, v_src));
}
}
break;
case THRESH_TOZERO_INV:
for( i = 0; i < roi.height; i++ )
{
const uchar* src = _src.ptr() + src_step*i;
uchar* dst = _dst.ptr() + dst_step*i;
for ( j_scalar = 0; j_scalar <= roi.width - 16; j_scalar += 16)
{
uint8x16_t v_src = vld1q_u8(src + j_scalar), v_mask = vcleq_u8(v_src, v_thresh);
vst1q_u8(dst + j_scalar, vandq_u8(v_mask, v_src));
}
}
break;
default:
return CV_Error( CV_StsBadArg, "" );
}
#endif
if( j_scalar < roi.width )
@ -382,6 +450,14 @@ thresh_16s( const Mat& _src, Mat& _dst, short thresh, short maxval, int type )
_mm_storeu_si128((__m128i*)(dst + j + 8), v1 );
}
}
#elif CV_NEON
int16x8_t v_thresh = vdupq_n_s16(thresh), v_maxval = vdupq_n_s16(maxval);
for( ; j <= roi.width - 8; j += 8 )
{
uint16x8_t v_mask = vcgtq_s16(vld1q_s16(src + j), v_thresh);
vst1q_s16(dst + j, vandq_s16(vreinterpretq_s16_u16(v_mask), v_maxval));
}
#endif
for( ; j < roi.width; j++ )
@ -410,6 +486,14 @@ thresh_16s( const Mat& _src, Mat& _dst, short thresh, short maxval, int type )
_mm_storeu_si128((__m128i*)(dst + j + 8), v1 );
}
}
#elif CV_NEON
int16x8_t v_thresh = vdupq_n_s16(thresh), v_maxval = vdupq_n_s16(maxval);
for( ; j <= roi.width - 8; j += 8 )
{
uint16x8_t v_mask = vcleq_s16(vld1q_s16(src + j), v_thresh);
vst1q_s16(dst + j, vandq_s16(vreinterpretq_s16_u16(v_mask), v_maxval));
}
#endif
for( ; j < roi.width; j++ )
@ -436,6 +520,11 @@ thresh_16s( const Mat& _src, Mat& _dst, short thresh, short maxval, int type )
_mm_storeu_si128((__m128i*)(dst + j + 8), v1 );
}
}
#elif CV_NEON
int16x8_t v_thresh = vdupq_n_s16(thresh);
for( ; j <= roi.width - 8; j += 8 )
vst1q_s16(dst + j, vminq_s16(vld1q_s16(src + j), v_thresh));
#endif
for( ; j < roi.width; j++ )
@ -462,6 +551,15 @@ thresh_16s( const Mat& _src, Mat& _dst, short thresh, short maxval, int type )
_mm_storeu_si128((__m128i*)(dst + j + 8), v1 );
}
}
#elif CV_NEON
int16x8_t v_thresh = vdupq_n_s16(thresh);
for( ; j <= roi.width - 8; j += 8 )
{
int16x8_t v_src = vld1q_s16(src + j);
uint16x8_t v_mask = vcgtq_s16(v_src, v_thresh);
vst1q_s16(dst + j, vandq_s16(vreinterpretq_s16_u16(v_mask), v_src));
}
#endif
for( ; j < roi.width; j++ )
@ -491,6 +589,15 @@ thresh_16s( const Mat& _src, Mat& _dst, short thresh, short maxval, int type )
_mm_storeu_si128((__m128i*)(dst + j + 8), v1 );
}
}
#elif CV_NEON
int16x8_t v_thresh = vdupq_n_s16(thresh);
for( ; j <= roi.width - 8; j += 8 )
{
int16x8_t v_src = vld1q_s16(src + j);
uint16x8_t v_mask = vcleq_s16(v_src, v_thresh);
vst1q_s16(dst + j, vandq_s16(vreinterpretq_s16_u16(v_mask), v_src));
}
#endif
for( ; j < roi.width; j++ )
{
@ -576,6 +683,16 @@ thresh_32f( const Mat& _src, Mat& _dst, float thresh, float maxval, int type )
_mm_storeu_ps( dst + j + 4, v1 );
}
}
#elif CV_NEON
float32x4_t v_thresh = vdupq_n_f32(thresh);
uint32x4_t v_maxval = vreinterpretq_u32_f32(vdupq_n_f32(maxval));
for( ; j <= roi.width - 4; j += 4 )
{
float32x4_t v_src = vld1q_f32(src + j);
uint32x4_t v_dst = vandq_u32(vcgtq_f32(v_src, v_thresh), v_maxval);
vst1q_f32(dst + j, vreinterpretq_f32_u32(v_dst));
}
#endif
for( ; j < roi.width; j++ )
@ -604,6 +721,16 @@ thresh_32f( const Mat& _src, Mat& _dst, float thresh, float maxval, int type )
_mm_storeu_ps( dst + j + 4, v1 );
}
}
#elif CV_NEON
float32x4_t v_thresh = vdupq_n_f32(thresh);
uint32x4_t v_maxval = vreinterpretq_u32_f32(vdupq_n_f32(maxval));
for( ; j <= roi.width - 4; j += 4 )
{
float32x4_t v_src = vld1q_f32(src + j);
uint32x4_t v_dst = vandq_u32(vcleq_f32(v_src, v_thresh), v_maxval);
vst1q_f32(dst + j, vreinterpretq_f32_u32(v_dst));
}
#endif
for( ; j < roi.width; j++ )
@ -630,6 +757,11 @@ thresh_32f( const Mat& _src, Mat& _dst, float thresh, float maxval, int type )
_mm_storeu_ps( dst + j + 4, v1 );
}
}
#elif CV_NEON
float32x4_t v_thresh = vdupq_n_f32(thresh);
for( ; j <= roi.width - 4; j += 4 )
vst1q_f32(dst + j, vminq_f32(vld1q_f32(src + j), v_thresh));
#endif
for( ; j < roi.width; j++ )
@ -656,6 +788,16 @@ thresh_32f( const Mat& _src, Mat& _dst, float thresh, float maxval, int type )
_mm_storeu_ps( dst + j + 4, v1 );
}
}
#elif CV_NEON
float32x4_t v_thresh = vdupq_n_f32(thresh);
for( ; j <= roi.width - 4; j += 4 )
{
float32x4_t v_src = vld1q_f32(src + j);
uint32x4_t v_dst = vandq_u32(vcgtq_f32(v_src, v_thresh),
vreinterpretq_u32_f32(v_src));
vst1q_f32(dst + j, vreinterpretq_f32_u32(v_dst));
}
#endif
for( ; j < roi.width; j++ )
@ -685,6 +827,16 @@ thresh_32f( const Mat& _src, Mat& _dst, float thresh, float maxval, int type )
_mm_storeu_ps( dst + j + 4, v1 );
}
}
#elif CV_NEON
float32x4_t v_thresh = vdupq_n_f32(thresh);
for( ; j <= roi.width - 4; j += 4 )
{
float32x4_t v_src = vld1q_f32(src + j);
uint32x4_t v_dst = vandq_u32(vcleq_f32(v_src, v_thresh),
vreinterpretq_u32_f32(v_src));
vst1q_f32(dst + j, vreinterpretq_f32_u32(v_dst));
}
#endif
for( ; j < roi.width; j++ )
{

@ -1545,4 +1545,63 @@ TEST(Imgproc_InitUndistortMap, accuracy) { CV_UndistortMapTest test; test.safe_r
TEST(Imgproc_GetRectSubPix, accuracy) { CV_GetRectSubPixTest test; test.safe_run(); }
TEST(Imgproc_GetQuadSubPix, accuracy) { CV_GetQuadSubPixTest test; test.safe_run(); }
//////////////////////////////////////////////////////////////////////////
template <typename T, typename WT>
void resizeArea(const cv::Mat & src, cv::Mat & dst)
{
int cn = src.channels();
for (int y = 0; y < dst.rows; ++y)
{
const T * sptr0 = src.ptr<T>(y << 1);
const T * sptr1 = src.ptr<T>((y << 1) + 1);
T * dptr = dst.ptr<T>(y);
for (int x = 0; x < dst.cols * cn; x += cn)
{
int x1 = x << 1;
for (int c = 0; c < cn; ++c)
{
WT sum = WT(sptr0[x1 + c]) + WT(sptr0[x1 + c + cn]);
sum += WT(sptr1[x1 + c]) + WT(sptr1[x1 + c + cn]) + (WT)(2);
dptr[x + c] = cv::saturate_cast<T>(sum >> 2);
}
}
}
}
TEST(Resize, Area_half)
{
const int size = 10;
int types[] = { CV_8UC1, CV_8UC4, CV_16UC1, CV_16UC4 };
cv::RNG rng(17);
for (int i = 0, _size = sizeof(types) / sizeof(types[0]); i < _size; ++i)
{
int type = types[i], depth = CV_MAT_DEPTH(type);
SCOPED_TRACE(depth);
cv::Mat src(size, size, type), dst_actual(size >> 1, size >> 1, type),
dst_reference(size >> 1, size >> 1, type);
rng.fill(src, cv::RNG::UNIFORM, 0, 1000, true);
if (depth == CV_8U)
resizeArea<uchar, ushort>(src, dst_reference);
else if (depth == CV_16U)
resizeArea<ushort, int>(src, dst_reference);
else
CV_Assert(0);
cv::resize(src, dst_actual, dst_actual.size(), 0, 0, cv::INTER_AREA);
ASSERT_EQ(0, cvtest::norm(dst_reference, dst_actual, cv::NORM_INF));
}
}
/* End of file. */

@ -733,19 +733,25 @@ void CV_Remap_Test::generate_test_data()
case CV_32FC2:
{
MatIterator_<Vec2f> begin_x = mapx.begin<Vec2f>(), end_x = mapx.end<Vec2f>();
float fscols = static_cast<float>(std::max(src.cols - 1 + n, 0)),
fsrows = static_cast<float>(std::max(src.rows - 1 + n, 0));
for ( ; begin_x != end_x; ++begin_x)
int width = mapx.cols << 1;
for (int y = 0; y < mapx.rows; ++y)
{
begin_x[0] = rng.uniform(_n, fscols);
begin_x[1] = rng.uniform(_n, fsrows);
float * ptr = mapx.ptr<float>(y);
for (int x = 0; x < width; x += 2)
{
ptr[x] = rng.uniform(_n, fscols);
ptr[x + 1] = rng.uniform(_n, fsrows);
}
}
}
break;
default:
assert(0);
CV_Assert(0);
break;
}
}

Loading…
Cancel
Save