|
|
|
@ -1123,6 +1123,48 @@ struct cvtScaleAbs_SIMD<uchar, uchar, float> |
|
|
|
|
} |
|
|
|
|
}; |
|
|
|
|
|
|
|
|
|
template <> |
|
|
|
|
struct cvtScaleAbs_SIMD<schar, uchar, float> |
|
|
|
|
{ |
|
|
|
|
int operator () (const schar * src, uchar * dst, int width, |
|
|
|
|
float scale, float shift) const |
|
|
|
|
{ |
|
|
|
|
int x = 0; |
|
|
|
|
|
|
|
|
|
if (USE_SSE2) |
|
|
|
|
{ |
|
|
|
|
__m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift), |
|
|
|
|
v_zero_f = _mm_setzero_ps(); |
|
|
|
|
__m128i v_zero_i = _mm_setzero_si128(); |
|
|
|
|
|
|
|
|
|
for ( ; x <= width - 16; x += 16) |
|
|
|
|
{ |
|
|
|
|
__m128i v_src = _mm_loadu_si128((const __m128i *)(src + x)); |
|
|
|
|
__m128i v_src_12 = _mm_srai_epi16(_mm_unpacklo_epi8(v_zero_i, v_src), 8), |
|
|
|
|
v_src_34 = _mm_srai_epi16(_mm_unpackhi_epi8(v_zero_i, v_src), 8); |
|
|
|
|
__m128 v_dst1 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps( |
|
|
|
|
_mm_srai_epi32(_mm_unpacklo_epi16(v_zero_i, v_src_12), 16)), v_scale), v_shift); |
|
|
|
|
v_dst1 = _mm_max_ps(_mm_sub_ps(v_zero_f, v_dst1), v_dst1); |
|
|
|
|
__m128 v_dst2 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps( |
|
|
|
|
_mm_srai_epi32(_mm_unpackhi_epi16(v_zero_i, v_src_12), 16)), v_scale), v_shift); |
|
|
|
|
v_dst2 = _mm_max_ps(_mm_sub_ps(v_zero_f, v_dst2), v_dst2); |
|
|
|
|
__m128 v_dst3 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps( |
|
|
|
|
_mm_srai_epi32(_mm_unpacklo_epi16(v_zero_i, v_src_34), 16)), v_scale), v_shift); |
|
|
|
|
v_dst3 = _mm_max_ps(_mm_sub_ps(v_zero_f, v_dst3), v_dst3); |
|
|
|
|
__m128 v_dst4 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps( |
|
|
|
|
_mm_srai_epi32(_mm_unpackhi_epi16(v_zero_i, v_src_34), 16)), v_scale), v_shift); |
|
|
|
|
v_dst4 = _mm_max_ps(_mm_sub_ps(v_zero_f, v_dst4), v_dst4); |
|
|
|
|
|
|
|
|
|
__m128i v_dst_i = _mm_packus_epi16(_mm_packs_epi32(_mm_cvtps_epi32(v_dst1), _mm_cvtps_epi32(v_dst2)), |
|
|
|
|
_mm_packs_epi32(_mm_cvtps_epi32(v_dst3), _mm_cvtps_epi32(v_dst4))); |
|
|
|
|
_mm_storeu_si128((__m128i *)(dst + x), v_dst_i); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
return x; |
|
|
|
|
} |
|
|
|
|
}; |
|
|
|
|
|
|
|
|
|
template <> |
|
|
|
|
struct cvtScaleAbs_SIMD<ushort, uchar, float> |
|
|
|
|
{ |
|
|
|
@ -1242,6 +1284,44 @@ struct cvtScaleAbs_SIMD<float, uchar, float> |
|
|
|
|
} |
|
|
|
|
}; |
|
|
|
|
|
|
|
|
|
template <> |
|
|
|
|
struct cvtScaleAbs_SIMD<double, uchar, float> |
|
|
|
|
{ |
|
|
|
|
int operator () (const double * src, uchar * dst, int width, |
|
|
|
|
float scale, float shift) const |
|
|
|
|
{ |
|
|
|
|
int x = 0; |
|
|
|
|
|
|
|
|
|
if (USE_SSE2) |
|
|
|
|
{ |
|
|
|
|
__m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift), |
|
|
|
|
v_zero_f = _mm_setzero_ps(); |
|
|
|
|
__m128i v_zero_i = _mm_setzero_si128(); |
|
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8) |
|
|
|
|
{ |
|
|
|
|
__m128 v_src1 = _mm_movelh_ps(_mm_cvtpd_ps(_mm_loadu_pd(src + x)), |
|
|
|
|
_mm_cvtpd_ps(_mm_loadu_pd(src + x + 2))); |
|
|
|
|
__m128 v_src2 = _mm_movelh_ps(_mm_cvtpd_ps(_mm_loadu_pd(src + x + 4)), |
|
|
|
|
_mm_cvtpd_ps(_mm_loadu_pd(src + x + 6))); |
|
|
|
|
|
|
|
|
|
__m128 v_dst1 = _mm_add_ps(_mm_mul_ps(v_src1, v_scale), v_shift); |
|
|
|
|
v_dst1 = _mm_max_ps(_mm_sub_ps(v_zero_f, v_dst1), v_dst1); |
|
|
|
|
|
|
|
|
|
__m128 v_dst2 = _mm_add_ps(_mm_mul_ps(v_src2, v_scale), v_shift); |
|
|
|
|
v_dst2 = _mm_max_ps(_mm_sub_ps(v_zero_f, v_dst2), v_dst2); |
|
|
|
|
|
|
|
|
|
__m128i v_dst_i = _mm_packs_epi32(_mm_cvtps_epi32(v_dst1), |
|
|
|
|
_mm_cvtps_epi32(v_dst2)); |
|
|
|
|
|
|
|
|
|
_mm_storel_epi64((__m128i *)(dst + x), _mm_packus_epi16(v_dst_i, v_zero_i)); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
return x; |
|
|
|
|
} |
|
|
|
|
}; |
|
|
|
|
|
|
|
|
|
#elif CV_NEON |
|
|
|
|
|
|
|
|
|
template <> |
|
|
|
|