|
|
|
@ -851,6 +851,175 @@ void cv::insertChannel(InputArray _src, InputOutputArray _dst, int coi) |
|
|
|
|
namespace cv |
|
|
|
|
{ |
|
|
|
|
|
|
|
|
|
template<typename T, typename DT, typename WT> |
|
|
|
|
struct cvtScaleAbs_SSE2 |
|
|
|
|
{ |
|
|
|
|
int operator () (const T *, DT *, int, WT, WT) const |
|
|
|
|
{ |
|
|
|
|
return 0; |
|
|
|
|
} |
|
|
|
|
}; |
|
|
|
|
|
|
|
|
|
#if CV_SSE2 |
|
|
|
|
|
|
|
|
|
template <> |
|
|
|
|
struct cvtScaleAbs_SSE2<uchar, uchar, float> |
|
|
|
|
{ |
|
|
|
|
int operator () (const uchar * src, uchar * dst, int width, |
|
|
|
|
float scale, float shift) const |
|
|
|
|
{ |
|
|
|
|
int x = 0; |
|
|
|
|
|
|
|
|
|
if (USE_SSE2) |
|
|
|
|
{ |
|
|
|
|
__m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift), |
|
|
|
|
v_zero_f = _mm_setzero_ps(); |
|
|
|
|
__m128i v_zero_i = _mm_setzero_si128(); |
|
|
|
|
|
|
|
|
|
for ( ; x <= width - 16; x += 16) |
|
|
|
|
{ |
|
|
|
|
__m128i v_src = _mm_loadu_si128((const __m128i *)(src + x)); |
|
|
|
|
__m128i v_src12 = _mm_unpacklo_epi8(v_src, v_zero_i), v_src_34 = _mm_unpackhi_epi8(v_src, v_zero_i); |
|
|
|
|
__m128 v_dst1 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src12, v_zero_i)), v_scale), v_shift); |
|
|
|
|
v_dst1 = _mm_max_ps(_mm_sub_ps(v_zero_f, v_dst1), v_dst1); |
|
|
|
|
__m128 v_dst2 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src12, v_zero_i)), v_scale), v_shift); |
|
|
|
|
v_dst2 = _mm_max_ps(_mm_sub_ps(v_zero_f, v_dst2), v_dst2); |
|
|
|
|
__m128 v_dst3 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src_34, v_zero_i)), v_scale), v_shift); |
|
|
|
|
v_dst3 = _mm_max_ps(_mm_sub_ps(v_zero_f, v_dst3), v_dst3); |
|
|
|
|
__m128 v_dst4 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src_34, v_zero_i)), v_scale), v_shift); |
|
|
|
|
v_dst4 = _mm_max_ps(_mm_sub_ps(v_zero_f, v_dst4), v_dst4); |
|
|
|
|
|
|
|
|
|
__m128i v_dst_i = _mm_packus_epi16(_mm_packs_epi32(_mm_cvtps_epi32(v_dst1), _mm_cvtps_epi32(v_dst2)), |
|
|
|
|
_mm_packs_epi32(_mm_cvtps_epi32(v_dst3), _mm_cvtps_epi32(v_dst4))); |
|
|
|
|
_mm_storeu_si128((__m128i *)(dst + x), v_dst_i); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
return x; |
|
|
|
|
} |
|
|
|
|
}; |
|
|
|
|
|
|
|
|
|
template <> |
|
|
|
|
struct cvtScaleAbs_SSE2<ushort, uchar, float> |
|
|
|
|
{ |
|
|
|
|
int operator () (const ushort * src, uchar * dst, int width, |
|
|
|
|
float scale, float shift) const |
|
|
|
|
{ |
|
|
|
|
int x = 0; |
|
|
|
|
|
|
|
|
|
if (USE_SSE2) |
|
|
|
|
{ |
|
|
|
|
__m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift), |
|
|
|
|
v_zero_f = _mm_setzero_ps(); |
|
|
|
|
__m128i v_zero_i = _mm_setzero_si128(); |
|
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8) |
|
|
|
|
{ |
|
|
|
|
__m128i v_src = _mm_loadu_si128((const __m128i *)(src + x)); |
|
|
|
|
__m128 v_dst1 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src, v_zero_i)), v_scale), v_shift); |
|
|
|
|
v_dst1 = _mm_max_ps(_mm_sub_ps(v_zero_f, v_dst1), v_dst1); |
|
|
|
|
__m128 v_dst2 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src, v_zero_i)), v_scale), v_shift); |
|
|
|
|
v_dst2 = _mm_max_ps(_mm_sub_ps(v_zero_f, v_dst2), v_dst2); |
|
|
|
|
|
|
|
|
|
__m128i v_dst_i = _mm_packus_epi16(_mm_packs_epi32(_mm_cvtps_epi32(v_dst1), _mm_cvtps_epi32(v_dst2)), v_zero_i); |
|
|
|
|
_mm_storel_epi64((__m128i *)(dst + x), v_dst_i); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
return x; |
|
|
|
|
} |
|
|
|
|
}; |
|
|
|
|
|
|
|
|
|
template <> |
|
|
|
|
struct cvtScaleAbs_SSE2<short, uchar, float> |
|
|
|
|
{ |
|
|
|
|
int operator () (const short * src, uchar * dst, int width, |
|
|
|
|
float scale, float shift) const |
|
|
|
|
{ |
|
|
|
|
int x = 0; |
|
|
|
|
|
|
|
|
|
if (USE_SSE2) |
|
|
|
|
{ |
|
|
|
|
__m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift), |
|
|
|
|
v_zero_f = _mm_setzero_ps(); |
|
|
|
|
__m128i v_zero_i = _mm_setzero_si128(); |
|
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8) |
|
|
|
|
{ |
|
|
|
|
__m128i v_src = _mm_loadu_si128((const __m128i *)(src + x)); |
|
|
|
|
__m128 v_dst1 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_src, v_src), 16)), v_scale), v_shift); |
|
|
|
|
v_dst1 = _mm_max_ps(_mm_sub_ps(v_zero_f, v_dst1), v_dst1); |
|
|
|
|
__m128 v_dst2 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_src, v_src), 16)), v_scale), v_shift); |
|
|
|
|
v_dst2 = _mm_max_ps(_mm_sub_ps(v_zero_f, v_dst2), v_dst2); |
|
|
|
|
|
|
|
|
|
__m128i v_dst_i = _mm_packus_epi16(_mm_packs_epi32(_mm_cvtps_epi32(v_dst1), _mm_cvtps_epi32(v_dst2)), v_zero_i); |
|
|
|
|
_mm_storel_epi64((__m128i *)(dst + x), v_dst_i); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
return x; |
|
|
|
|
} |
|
|
|
|
}; |
|
|
|
|
|
|
|
|
|
template <> |
|
|
|
|
struct cvtScaleAbs_SSE2<int, uchar, float> |
|
|
|
|
{ |
|
|
|
|
int operator () (const int * src, uchar * dst, int width, |
|
|
|
|
float scale, float shift) const |
|
|
|
|
{ |
|
|
|
|
int x = 0; |
|
|
|
|
|
|
|
|
|
if (USE_SSE2) |
|
|
|
|
{ |
|
|
|
|
__m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift), |
|
|
|
|
v_zero_f = _mm_setzero_ps(); |
|
|
|
|
__m128i v_zero_i = _mm_setzero_si128(); |
|
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 4) |
|
|
|
|
{ |
|
|
|
|
__m128i v_src = _mm_loadu_si128((const __m128i *)(src + x)); |
|
|
|
|
__m128 v_dst1 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(v_src), v_scale), v_shift); |
|
|
|
|
v_dst1 = _mm_max_ps(_mm_sub_ps(v_zero_f, v_dst1), v_dst1); |
|
|
|
|
|
|
|
|
|
__m128i v_dst_i = _mm_packus_epi16(_mm_packs_epi32(_mm_cvtps_epi32(v_dst1), v_zero_i), v_zero_i); |
|
|
|
|
_mm_storel_epi64((__m128i *)(dst + x), v_dst_i); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
return x; |
|
|
|
|
} |
|
|
|
|
}; |
|
|
|
|
|
|
|
|
|
template <> |
|
|
|
|
struct cvtScaleAbs_SSE2<float, uchar, float> |
|
|
|
|
{ |
|
|
|
|
int operator () (const float * src, uchar * dst, int width, |
|
|
|
|
float scale, float shift) const |
|
|
|
|
{ |
|
|
|
|
int x = 0; |
|
|
|
|
|
|
|
|
|
if (USE_SSE2) |
|
|
|
|
{ |
|
|
|
|
__m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift), |
|
|
|
|
v_zero_f = _mm_setzero_ps(); |
|
|
|
|
__m128i v_zero_i = _mm_setzero_si128(); |
|
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 4) |
|
|
|
|
{ |
|
|
|
|
__m128 v_dst = _mm_add_ps(_mm_mul_ps(_mm_loadu_ps(src + x), v_scale), v_shift); |
|
|
|
|
v_dst = _mm_max_ps(_mm_sub_ps(v_zero_f, v_dst), v_dst); |
|
|
|
|
|
|
|
|
|
__m128i v_dst_i = _mm_packs_epi32(_mm_cvtps_epi32(v_dst), v_zero_i); |
|
|
|
|
_mm_storel_epi64((__m128i *)(dst + x), _mm_packus_epi16(v_dst_i, v_zero_i)); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
return x; |
|
|
|
|
} |
|
|
|
|
}; |
|
|
|
|
|
|
|
|
|
#endif |
|
|
|
|
|
|
|
|
|
template<typename T, typename DT, typename WT> static void |
|
|
|
|
cvtScaleAbs_( const T* src, size_t sstep, |
|
|
|
|
DT* dst, size_t dstep, Size size, |
|
|
|
@ -858,10 +1027,12 @@ cvtScaleAbs_( const T* src, size_t sstep, |
|
|
|
|
{ |
|
|
|
|
sstep /= sizeof(src[0]); |
|
|
|
|
dstep /= sizeof(dst[0]); |
|
|
|
|
cvtScaleAbs_SSE2<T, DT, WT> vop; |
|
|
|
|
|
|
|
|
|
for( ; size.height--; src += sstep, dst += dstep ) |
|
|
|
|
{ |
|
|
|
|
int x = 0; |
|
|
|
|
int x = vop(src, dst, size.width, scale, shift); |
|
|
|
|
|
|
|
|
|
#if CV_ENABLE_UNROLLED |
|
|
|
|
for( ; x <= size.width - 4; x += 4 ) |
|
|
|
|
{ |
|
|
|
@ -879,7 +1050,6 @@ cvtScaleAbs_( const T* src, size_t sstep, |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
template<typename T, typename DT, typename WT> static void |
|
|
|
|
cvtScale_( const T* src, size_t sstep, |
|
|
|
|
DT* dst, size_t dstep, Size size, |
|
|
|
|