|
|
|
@ -167,9 +167,23 @@ float calcOrientationHist( |
|
|
|
|
int i, j, k, len = (radius*2+1)*(radius*2+1); |
|
|
|
|
|
|
|
|
|
float expf_scale = -1.f/(2.f * sigma * sigma); |
|
|
|
|
#if CV_SIMD |
|
|
|
|
AutoBuffer<float> bufX(len + v_float32::nlanes); |
|
|
|
|
AutoBuffer<float> bufY(len + v_float32::nlanes); |
|
|
|
|
AutoBuffer<float> bufO(len + v_float32::nlanes); |
|
|
|
|
AutoBuffer<float> bufW(len + v_float32::nlanes); |
|
|
|
|
AutoBuffer<float> bufT(n+4 + v_float32::nlanes); |
|
|
|
|
float *X = alignPtr(bufX.data(), CV_SIMD_WIDTH); |
|
|
|
|
float *Y = alignPtr(bufY.data(), CV_SIMD_WIDTH); |
|
|
|
|
float *Mag = X; |
|
|
|
|
float *Ori = alignPtr(bufO.data(), CV_SIMD_WIDTH); |
|
|
|
|
float *W = alignPtr(bufW.data(), CV_SIMD_WIDTH); |
|
|
|
|
float *temphist = alignPtr(bufT.data(), CV_SIMD_WIDTH)+2; |
|
|
|
|
#else |
|
|
|
|
AutoBuffer<float> buf(len*4 + n+4); |
|
|
|
|
float *X = buf.data(), *Y = X + len, *Mag = X, *Ori = Y + len, *W = Ori + len; |
|
|
|
|
float* temphist = W + len + 2; |
|
|
|
|
#endif |
|
|
|
|
|
|
|
|
|
for( i = 0; i < n; i++ ) |
|
|
|
|
temphist[i] = 0.f; |
|
|
|
@ -201,32 +215,29 @@ float calcOrientationHist( |
|
|
|
|
cv::hal::magnitude32f(X, Y, Mag, len); |
|
|
|
|
|
|
|
|
|
k = 0; |
|
|
|
|
#if CV_AVX2 |
|
|
|
|
#if CV_SIMD |
|
|
|
|
const int vecsize = v_float32::nlanes; |
|
|
|
|
v_float32 nd360 = vx_setall_f32(n/360.f); |
|
|
|
|
v_int32 __n = vx_setall_s32(n); |
|
|
|
|
int CV_DECL_ALIGNED(CV_SIMD_WIDTH) bin_buf[vecsize]; |
|
|
|
|
float CV_DECL_ALIGNED(CV_SIMD_WIDTH) w_mul_mag_buf[vecsize]; |
|
|
|
|
|
|
|
|
|
for( ; k <= len - vecsize; k += vecsize ) |
|
|
|
|
{ |
|
|
|
|
__m256 __nd360 = _mm256_set1_ps(n/360.f); |
|
|
|
|
__m256i __n = _mm256_set1_epi32(n); |
|
|
|
|
int CV_DECL_ALIGNED(32) bin_buf[8]; |
|
|
|
|
float CV_DECL_ALIGNED(32) w_mul_mag_buf[8]; |
|
|
|
|
for ( ; k <= len - 8; k+=8 ) |
|
|
|
|
v_float32 w = vx_load_aligned( W + k ); |
|
|
|
|
v_float32 mag = vx_load_aligned( Mag + k ); |
|
|
|
|
v_float32 ori = vx_load_aligned( Ori + k ); |
|
|
|
|
v_int32 bin = v_round( nd360 * ori ); |
|
|
|
|
|
|
|
|
|
bin = v_select(bin >= __n, bin - __n, bin); |
|
|
|
|
bin = v_select(bin < vx_setzero_s32(), bin + __n, bin); |
|
|
|
|
|
|
|
|
|
w = w * mag; |
|
|
|
|
v_store_aligned(bin_buf, bin); |
|
|
|
|
v_store_aligned(w_mul_mag_buf, w); |
|
|
|
|
for(int vi = 0; vi < vecsize; vi++) |
|
|
|
|
{ |
|
|
|
|
__m256i __bin = _mm256_cvtps_epi32(_mm256_mul_ps(__nd360, _mm256_loadu_ps(&Ori[k]))); |
|
|
|
|
|
|
|
|
|
__bin = _mm256_sub_epi32(__bin, _mm256_andnot_si256(_mm256_cmpgt_epi32(__n, __bin), __n)); |
|
|
|
|
__bin = _mm256_add_epi32(__bin, _mm256_and_si256(__n, _mm256_cmpgt_epi32(_mm256_setzero_si256(), __bin))); |
|
|
|
|
|
|
|
|
|
__m256 __w_mul_mag = _mm256_mul_ps(_mm256_loadu_ps(&W[k]), _mm256_loadu_ps(&Mag[k])); |
|
|
|
|
|
|
|
|
|
_mm256_store_si256((__m256i *) bin_buf, __bin); |
|
|
|
|
_mm256_store_ps(w_mul_mag_buf, __w_mul_mag); |
|
|
|
|
|
|
|
|
|
temphist[bin_buf[0]] += w_mul_mag_buf[0]; |
|
|
|
|
temphist[bin_buf[1]] += w_mul_mag_buf[1]; |
|
|
|
|
temphist[bin_buf[2]] += w_mul_mag_buf[2]; |
|
|
|
|
temphist[bin_buf[3]] += w_mul_mag_buf[3]; |
|
|
|
|
temphist[bin_buf[4]] += w_mul_mag_buf[4]; |
|
|
|
|
temphist[bin_buf[5]] += w_mul_mag_buf[5]; |
|
|
|
|
temphist[bin_buf[6]] += w_mul_mag_buf[6]; |
|
|
|
|
temphist[bin_buf[7]] += w_mul_mag_buf[7]; |
|
|
|
|
temphist[bin_buf[vi]] += w_mul_mag_buf[vi]; |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
#endif |
|
|
|
@ -247,34 +258,20 @@ float calcOrientationHist( |
|
|
|
|
temphist[n+1] = temphist[1]; |
|
|
|
|
|
|
|
|
|
i = 0; |
|
|
|
|
#if CV_AVX2 |
|
|
|
|
#if CV_SIMD |
|
|
|
|
v_float32 d_1_16 = vx_setall_f32(1.f/16.f); |
|
|
|
|
v_float32 d_4_16 = vx_setall_f32(4.f/16.f); |
|
|
|
|
v_float32 d_6_16 = vx_setall_f32(6.f/16.f); |
|
|
|
|
for( ; i <= n - v_float32::nlanes; i += v_float32::nlanes ) |
|
|
|
|
{ |
|
|
|
|
__m256 __d_1_16 = _mm256_set1_ps(1.f/16.f); |
|
|
|
|
__m256 __d_4_16 = _mm256_set1_ps(4.f/16.f); |
|
|
|
|
__m256 __d_6_16 = _mm256_set1_ps(6.f/16.f); |
|
|
|
|
for( ; i <= n - 8; i+=8 ) |
|
|
|
|
{ |
|
|
|
|
#if CV_FMA3 |
|
|
|
|
__m256 __hist = _mm256_fmadd_ps( |
|
|
|
|
_mm256_add_ps(_mm256_loadu_ps(&temphist[i-2]), _mm256_loadu_ps(&temphist[i+2])), |
|
|
|
|
__d_1_16, |
|
|
|
|
_mm256_fmadd_ps( |
|
|
|
|
_mm256_add_ps(_mm256_loadu_ps(&temphist[i-1]), _mm256_loadu_ps(&temphist[i+1])), |
|
|
|
|
__d_4_16, |
|
|
|
|
_mm256_mul_ps(_mm256_loadu_ps(&temphist[i]), __d_6_16))); |
|
|
|
|
#else |
|
|
|
|
__m256 __hist = _mm256_add_ps( |
|
|
|
|
_mm256_mul_ps( |
|
|
|
|
_mm256_add_ps(_mm256_loadu_ps(&temphist[i-2]), _mm256_loadu_ps(&temphist[i+2])), |
|
|
|
|
__d_1_16), |
|
|
|
|
_mm256_add_ps( |
|
|
|
|
_mm256_mul_ps( |
|
|
|
|
_mm256_add_ps(_mm256_loadu_ps(&temphist[i-1]), _mm256_loadu_ps(&temphist[i+1])), |
|
|
|
|
__d_4_16), |
|
|
|
|
_mm256_mul_ps(_mm256_loadu_ps(&temphist[i]), __d_6_16))); |
|
|
|
|
#endif |
|
|
|
|
_mm256_storeu_ps(&hist[i], __hist); |
|
|
|
|
} |
|
|
|
|
v_float32 tn2 = vx_load_aligned(temphist + i-2); |
|
|
|
|
v_float32 tn1 = vx_load(temphist + i-1); |
|
|
|
|
v_float32 t0 = vx_load(temphist + i); |
|
|
|
|
v_float32 t1 = vx_load(temphist + i+1); |
|
|
|
|
v_float32 t2 = vx_load(temphist + i+2); |
|
|
|
|
v_float32 _hist = v_fma(tn2 + t2, d_1_16, |
|
|
|
|
v_fma(tn1 + t1, d_4_16, t0 * d_6_16)); |
|
|
|
|
v_store(hist + i, _hist); |
|
|
|
|
} |
|
|
|
|
#endif |
|
|
|
|
for( ; i < n; i++ ) |
|
|
|
@ -623,91 +620,65 @@ void calcSIFTDescriptor( |
|
|
|
|
cv::hal::exp32f(W, W, len); |
|
|
|
|
|
|
|
|
|
k = 0; |
|
|
|
|
#if CV_AVX2 |
|
|
|
|
#if CV_SIMD |
|
|
|
|
{ |
|
|
|
|
int CV_DECL_ALIGNED(32) idx_buf[8]; |
|
|
|
|
float CV_DECL_ALIGNED(32) rco_buf[64]; |
|
|
|
|
const __m256 __ori = _mm256_set1_ps(ori); |
|
|
|
|
const __m256 __bins_per_rad = _mm256_set1_ps(bins_per_rad); |
|
|
|
|
const __m256i __n = _mm256_set1_epi32(n); |
|
|
|
|
for( ; k <= len - 8; k+=8 ) |
|
|
|
|
const int vecsize = v_float32::nlanes; |
|
|
|
|
int CV_DECL_ALIGNED(CV_SIMD_WIDTH) idx_buf[vecsize]; |
|
|
|
|
float CV_DECL_ALIGNED(CV_SIMD_WIDTH) rco_buf[8*vecsize]; |
|
|
|
|
const v_float32 __ori = vx_setall_f32(ori); |
|
|
|
|
const v_float32 __bins_per_rad = vx_setall_f32(bins_per_rad); |
|
|
|
|
const v_int32 __n = vx_setall_s32(n); |
|
|
|
|
const v_int32 __1 = vx_setall_s32(1); |
|
|
|
|
const v_int32 __d_plus_2 = vx_setall_s32(d+2); |
|
|
|
|
const v_int32 __n_plus_2 = vx_setall_s32(n+2); |
|
|
|
|
for( ; k <= len - vecsize; k += vecsize ) |
|
|
|
|
{ |
|
|
|
|
__m256 __rbin = _mm256_loadu_ps(&RBin[k]); |
|
|
|
|
__m256 __cbin = _mm256_loadu_ps(&CBin[k]); |
|
|
|
|
__m256 __obin = _mm256_mul_ps(_mm256_sub_ps(_mm256_loadu_ps(&Ori[k]), __ori), __bins_per_rad); |
|
|
|
|
__m256 __mag = _mm256_mul_ps(_mm256_loadu_ps(&Mag[k]), _mm256_loadu_ps(&W[k])); |
|
|
|
|
|
|
|
|
|
__m256 __r0 = _mm256_floor_ps(__rbin); |
|
|
|
|
__rbin = _mm256_sub_ps(__rbin, __r0); |
|
|
|
|
__m256 __c0 = _mm256_floor_ps(__cbin); |
|
|
|
|
__cbin = _mm256_sub_ps(__cbin, __c0); |
|
|
|
|
__m256 __o0 = _mm256_floor_ps(__obin); |
|
|
|
|
__obin = _mm256_sub_ps(__obin, __o0); |
|
|
|
|
|
|
|
|
|
__m256i __o0i = _mm256_cvtps_epi32(__o0); |
|
|
|
|
__o0i = _mm256_add_epi32(__o0i, _mm256_and_si256(__n, _mm256_cmpgt_epi32(_mm256_setzero_si256(), __o0i))); |
|
|
|
|
__o0i = _mm256_sub_epi32(__o0i, _mm256_andnot_si256(_mm256_cmpgt_epi32(__n, __o0i), __n)); |
|
|
|
|
|
|
|
|
|
__m256 __v_r1 = _mm256_mul_ps(__mag, __rbin); |
|
|
|
|
__m256 __v_r0 = _mm256_sub_ps(__mag, __v_r1); |
|
|
|
|
|
|
|
|
|
__m256 __v_rc11 = _mm256_mul_ps(__v_r1, __cbin); |
|
|
|
|
__m256 __v_rc10 = _mm256_sub_ps(__v_r1, __v_rc11); |
|
|
|
|
|
|
|
|
|
__m256 __v_rc01 = _mm256_mul_ps(__v_r0, __cbin); |
|
|
|
|
__m256 __v_rc00 = _mm256_sub_ps(__v_r0, __v_rc01); |
|
|
|
|
|
|
|
|
|
__m256 __v_rco111 = _mm256_mul_ps(__v_rc11, __obin); |
|
|
|
|
__m256 __v_rco110 = _mm256_sub_ps(__v_rc11, __v_rco111); |
|
|
|
|
|
|
|
|
|
__m256 __v_rco101 = _mm256_mul_ps(__v_rc10, __obin); |
|
|
|
|
__m256 __v_rco100 = _mm256_sub_ps(__v_rc10, __v_rco101); |
|
|
|
|
|
|
|
|
|
__m256 __v_rco011 = _mm256_mul_ps(__v_rc01, __obin); |
|
|
|
|
__m256 __v_rco010 = _mm256_sub_ps(__v_rc01, __v_rco011); |
|
|
|
|
|
|
|
|
|
__m256 __v_rco001 = _mm256_mul_ps(__v_rc00, __obin); |
|
|
|
|
__m256 __v_rco000 = _mm256_sub_ps(__v_rc00, __v_rco001); |
|
|
|
|
|
|
|
|
|
__m256i __one = _mm256_set1_epi32(1); |
|
|
|
|
__m256i __idx = _mm256_add_epi32( |
|
|
|
|
_mm256_mullo_epi32( |
|
|
|
|
_mm256_add_epi32( |
|
|
|
|
_mm256_mullo_epi32(_mm256_add_epi32(_mm256_cvtps_epi32(__r0), __one), _mm256_set1_epi32(d + 2)), |
|
|
|
|
_mm256_add_epi32(_mm256_cvtps_epi32(__c0), __one)), |
|
|
|
|
_mm256_set1_epi32(n + 2)), |
|
|
|
|
__o0i); |
|
|
|
|
|
|
|
|
|
_mm256_store_si256((__m256i *)idx_buf, __idx); |
|
|
|
|
|
|
|
|
|
_mm256_store_ps(&(rco_buf[0]), __v_rco000); |
|
|
|
|
_mm256_store_ps(&(rco_buf[8]), __v_rco001); |
|
|
|
|
_mm256_store_ps(&(rco_buf[16]), __v_rco010); |
|
|
|
|
_mm256_store_ps(&(rco_buf[24]), __v_rco011); |
|
|
|
|
_mm256_store_ps(&(rco_buf[32]), __v_rco100); |
|
|
|
|
_mm256_store_ps(&(rco_buf[40]), __v_rco101); |
|
|
|
|
_mm256_store_ps(&(rco_buf[48]), __v_rco110); |
|
|
|
|
_mm256_store_ps(&(rco_buf[56]), __v_rco111); |
|
|
|
|
#define HIST_SUM_HELPER(id) \ |
|
|
|
|
hist[idx_buf[(id)]] += rco_buf[(id)]; \
|
|
|
|
|
hist[idx_buf[(id)]+1] += rco_buf[8 + (id)]; \
|
|
|
|
|
hist[idx_buf[(id)]+(n+2)] += rco_buf[16 + (id)]; \
|
|
|
|
|
hist[idx_buf[(id)]+(n+3)] += rco_buf[24 + (id)]; \
|
|
|
|
|
hist[idx_buf[(id)]+(d+2)*(n+2)] += rco_buf[32 + (id)]; \
|
|
|
|
|
hist[idx_buf[(id)]+(d+2)*(n+2)+1] += rco_buf[40 + (id)]; \
|
|
|
|
|
hist[idx_buf[(id)]+(d+3)*(n+2)] += rco_buf[48 + (id)]; \
|
|
|
|
|
hist[idx_buf[(id)]+(d+3)*(n+2)+1] += rco_buf[56 + (id)]; |
|
|
|
|
|
|
|
|
|
HIST_SUM_HELPER(0); |
|
|
|
|
HIST_SUM_HELPER(1); |
|
|
|
|
HIST_SUM_HELPER(2); |
|
|
|
|
HIST_SUM_HELPER(3); |
|
|
|
|
HIST_SUM_HELPER(4); |
|
|
|
|
HIST_SUM_HELPER(5); |
|
|
|
|
HIST_SUM_HELPER(6); |
|
|
|
|
HIST_SUM_HELPER(7); |
|
|
|
|
|
|
|
|
|
#undef HIST_SUM_HELPER |
|
|
|
|
v_float32 rbin = vx_load(RBin + k); |
|
|
|
|
v_float32 cbin = vx_load(CBin + k); |
|
|
|
|
v_float32 obin = (vx_load(Ori + k) - __ori) * __bins_per_rad; |
|
|
|
|
v_float32 mag = vx_load(Mag + k) * vx_load(W + k); |
|
|
|
|
|
|
|
|
|
v_int32 r0 = v_floor(rbin); |
|
|
|
|
v_int32 c0 = v_floor(cbin); |
|
|
|
|
v_int32 o0 = v_floor(obin); |
|
|
|
|
rbin -= v_cvt_f32(r0); |
|
|
|
|
cbin -= v_cvt_f32(c0); |
|
|
|
|
obin -= v_cvt_f32(o0); |
|
|
|
|
|
|
|
|
|
o0 = v_select(o0 < vx_setzero_s32(), o0 + __n, o0); |
|
|
|
|
o0 = v_select(o0 >= __n, o0 - __n, o0); |
|
|
|
|
|
|
|
|
|
v_float32 v_r1 = mag*rbin, v_r0 = mag - v_r1; |
|
|
|
|
v_float32 v_rc11 = v_r1*cbin, v_rc10 = v_r1 - v_rc11; |
|
|
|
|
v_float32 v_rc01 = v_r0*cbin, v_rc00 = v_r0 - v_rc01; |
|
|
|
|
v_float32 v_rco111 = v_rc11*obin, v_rco110 = v_rc11 - v_rco111; |
|
|
|
|
v_float32 v_rco101 = v_rc10*obin, v_rco100 = v_rc10 - v_rco101; |
|
|
|
|
v_float32 v_rco011 = v_rc01*obin, v_rco010 = v_rc01 - v_rco011; |
|
|
|
|
v_float32 v_rco001 = v_rc00*obin, v_rco000 = v_rc00 - v_rco001; |
|
|
|
|
|
|
|
|
|
v_int32 idx = v_fma(v_fma(r0+__1, __d_plus_2, c0+__1), __n_plus_2, o0); |
|
|
|
|
v_store_aligned(idx_buf, idx); |
|
|
|
|
|
|
|
|
|
v_store_aligned(rco_buf, v_rco000); |
|
|
|
|
v_store_aligned(rco_buf+vecsize, v_rco001); |
|
|
|
|
v_store_aligned(rco_buf+vecsize*2, v_rco010); |
|
|
|
|
v_store_aligned(rco_buf+vecsize*3, v_rco011); |
|
|
|
|
v_store_aligned(rco_buf+vecsize*4, v_rco100); |
|
|
|
|
v_store_aligned(rco_buf+vecsize*5, v_rco101); |
|
|
|
|
v_store_aligned(rco_buf+vecsize*6, v_rco110); |
|
|
|
|
v_store_aligned(rco_buf+vecsize*7, v_rco111); |
|
|
|
|
|
|
|
|
|
for(int id = 0; id < vecsize; id++) |
|
|
|
|
{ |
|
|
|
|
hist[idx_buf[id]] += rco_buf[id]; |
|
|
|
|
hist[idx_buf[id]+1] += rco_buf[vecsize + id]; |
|
|
|
|
hist[idx_buf[id]+(n+2)] += rco_buf[2*vecsize + id]; |
|
|
|
|
hist[idx_buf[id]+(n+3)] += rco_buf[3*vecsize + id]; |
|
|
|
|
hist[idx_buf[id]+(d+2)*(n+2)] += rco_buf[4*vecsize + id]; |
|
|
|
|
hist[idx_buf[id]+(d+2)*(n+2)+1] += rco_buf[5*vecsize + id]; |
|
|
|
|
hist[idx_buf[id]+(d+3)*(n+2)] += rco_buf[6*vecsize + id]; |
|
|
|
|
hist[idx_buf[id]+(d+3)*(n+2)+1] += rco_buf[7*vecsize + id]; |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
#endif |
|
|
|
@ -766,23 +737,16 @@ void calcSIFTDescriptor( |
|
|
|
|
float nrm2 = 0; |
|
|
|
|
len = d*d*n; |
|
|
|
|
k = 0; |
|
|
|
|
#if CV_AVX2 |
|
|
|
|
#if CV_SIMD |
|
|
|
|
{ |
|
|
|
|
float CV_DECL_ALIGNED(32) nrm2_buf[8]; |
|
|
|
|
__m256 __nrm2 = _mm256_setzero_ps(); |
|
|
|
|
__m256 __dst; |
|
|
|
|
for( ; k <= len - 8; k += 8 ) |
|
|
|
|
v_float32 __nrm2 = vx_setzero_f32(); |
|
|
|
|
v_float32 __dst; |
|
|
|
|
for( ; k <= len - v_float32::nlanes; k += v_float32::nlanes ) |
|
|
|
|
{ |
|
|
|
|
__dst = _mm256_loadu_ps(&dst[k]); |
|
|
|
|
#if CV_FMA3 |
|
|
|
|
__nrm2 = _mm256_fmadd_ps(__dst, __dst, __nrm2); |
|
|
|
|
#else |
|
|
|
|
__nrm2 = _mm256_add_ps(__nrm2, _mm256_mul_ps(__dst, __dst)); |
|
|
|
|
#endif |
|
|
|
|
__dst = vx_load(dst + k); |
|
|
|
|
__nrm2 = v_fma(__dst, __dst, __nrm2); |
|
|
|
|
} |
|
|
|
|
_mm256_store_ps(nrm2_buf, __nrm2); |
|
|
|
|
nrm2 = nrm2_buf[0] + nrm2_buf[1] + nrm2_buf[2] + nrm2_buf[3] + |
|
|
|
|
nrm2_buf[4] + nrm2_buf[5] + nrm2_buf[6] + nrm2_buf[7]; |
|
|
|
|
nrm2 = (float)v_reduce_sum(__nrm2); |
|
|
|
|
} |
|
|
|
|
#endif |
|
|
|
|
for( ; k < len; k++ ) |
|
|
|
@ -795,7 +759,7 @@ void calcSIFTDescriptor( |
|
|
|
|
// This code cannot be enabled because it sums nrm2 in a different order,
|
|
|
|
|
// thus producing slightly different results
|
|
|
|
|
{ |
|
|
|
|
float CV_DECL_ALIGNED(32) nrm2_buf[8]; |
|
|
|
|
float CV_DECL_ALIGNED(CV_SIMD_WIDTH) nrm2_buf[8]; |
|
|
|
|
__m256 __dst; |
|
|
|
|
__m256 __nrm2 = _mm256_setzero_ps(); |
|
|
|
|
__m256 __thr = _mm256_set1_ps(thr); |
|
|
|
@ -825,17 +789,17 @@ void calcSIFTDescriptor( |
|
|
|
|
|
|
|
|
|
#if 1 |
|
|
|
|
k = 0; |
|
|
|
|
#if CV_AVX2 |
|
|
|
|
#if CV_SIMD |
|
|
|
|
{ |
|
|
|
|
__m256 __dst; |
|
|
|
|
__m256 __min = _mm256_setzero_ps(); |
|
|
|
|
__m256 __max = _mm256_set1_ps(255.0f); // max of uchar
|
|
|
|
|
__m256 __nrm2 = _mm256_set1_ps(nrm2); |
|
|
|
|
for( k = 0; k <= len - 8; k+=8 ) |
|
|
|
|
v_float32 __dst; |
|
|
|
|
v_float32 __min = vx_setzero_f32(); |
|
|
|
|
v_float32 __max = vx_setall_f32(255.0f); // max of uchar
|
|
|
|
|
v_float32 __nrm2 = vx_setall_f32(nrm2); |
|
|
|
|
for( k = 0; k <= len - v_float32::nlanes; k += v_float32::nlanes ) |
|
|
|
|
{ |
|
|
|
|
__dst = _mm256_loadu_ps(&dst[k]); |
|
|
|
|
__dst = _mm256_min_ps(_mm256_max_ps(_mm256_round_ps(_mm256_mul_ps(__dst, __nrm2), _MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC), __min), __max); |
|
|
|
|
_mm256_storeu_ps(&dst[k], __dst); |
|
|
|
|
__dst = vx_load(dst + k); |
|
|
|
|
__dst = v_min(v_max(v_cvt_f32(v_round(__dst * __nrm2)), __min), __max); |
|
|
|
|
v_store(dst + k, __dst); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
#endif |
|
|
|
|