|
|
|
@ -3371,7 +3371,13 @@ struct HLS2RGB_b |
|
|
|
|
|
|
|
|
|
HLS2RGB_b(int _dstcn, int _blueIdx, int _hrange) |
|
|
|
|
: dstcn(_dstcn), cvt(3, _blueIdx, (float)_hrange) |
|
|
|
|
{} |
|
|
|
|
{ |
|
|
|
|
#if CV_NEON |
|
|
|
|
v_scale_inv = vdupq_n_f32(1.f/255.f); |
|
|
|
|
v_scale = vdupq_n_f32(255.f); |
|
|
|
|
v_alpha = vdup_n_u8(ColorChannel<uchar>::max()); |
|
|
|
|
#endif |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
void operator()(const uchar* src, uchar* dst, int n) const |
|
|
|
|
{ |
|
|
|
@ -3382,8 +3388,29 @@ struct HLS2RGB_b |
|
|
|
|
for( i = 0; i < n; i += BLOCK_SIZE, src += BLOCK_SIZE*3 ) |
|
|
|
|
{ |
|
|
|
|
int dn = std::min(n - i, (int)BLOCK_SIZE); |
|
|
|
|
j = 0; |
|
|
|
|
|
|
|
|
|
for( j = 0; j < dn*3; j += 3 ) |
|
|
|
|
#if CV_NEON |
|
|
|
|
for ( ; j <= (dn - 8) * 3; j += 24) |
|
|
|
|
{ |
|
|
|
|
uint8x8x3_t v_src = vld3_u8(src + j); |
|
|
|
|
uint16x8_t v_t0 = vmovl_u8(v_src.val[0]), |
|
|
|
|
v_t1 = vmovl_u8(v_src.val[1]), |
|
|
|
|
v_t2 = vmovl_u8(v_src.val[2]); |
|
|
|
|
|
|
|
|
|
float32x4x3_t v_dst; |
|
|
|
|
v_dst.val[0] = vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_t0))); |
|
|
|
|
v_dst.val[1] = vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_t1))), v_scale_inv); |
|
|
|
|
v_dst.val[2] = vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_t2))), v_scale_inv); |
|
|
|
|
vst3q_f32(buf + j, v_dst); |
|
|
|
|
|
|
|
|
|
v_dst.val[0] = vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_t0))); |
|
|
|
|
v_dst.val[1] = vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_t1))), v_scale_inv); |
|
|
|
|
v_dst.val[2] = vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_t2))), v_scale_inv); |
|
|
|
|
vst3q_f32(buf + j + 12, v_dst); |
|
|
|
|
} |
|
|
|
|
#endif |
|
|
|
|
for( ; j < dn*3; j += 3 ) |
|
|
|
|
{ |
|
|
|
|
buf[j] = src[j]; |
|
|
|
|
buf[j+1] = src[j+1]*(1.f/255.f); |
|
|
|
@ -3391,7 +3418,38 @@ struct HLS2RGB_b |
|
|
|
|
} |
|
|
|
|
cvt(buf, buf, dn); |
|
|
|
|
|
|
|
|
|
for( j = 0; j < dn*3; j += 3, dst += dcn ) |
|
|
|
|
j = 0; |
|
|
|
|
#if CV_NEON |
|
|
|
|
for ( ; j <= (dn - 8) * 3; j += 24, dst += dcn * 8) |
|
|
|
|
{ |
|
|
|
|
float32x4x3_t v_src0 = vld3q_f32(buf + j), v_src1 = vld3q_f32(buf + j + 12); |
|
|
|
|
uint8x8_t v_dst0 = vqmovn_u16(vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(vmulq_f32(v_src0.val[0], v_scale))), |
|
|
|
|
vqmovn_u32(cv_vrndq_u32_f32(vmulq_f32(v_src1.val[0], v_scale))))); |
|
|
|
|
uint8x8_t v_dst1 = vqmovn_u16(vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(vmulq_f32(v_src0.val[1], v_scale))), |
|
|
|
|
vqmovn_u32(cv_vrndq_u32_f32(vmulq_f32(v_src1.val[1], v_scale))))); |
|
|
|
|
uint8x8_t v_dst2 = vqmovn_u16(vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(vmulq_f32(v_src0.val[2], v_scale))), |
|
|
|
|
vqmovn_u32(cv_vrndq_u32_f32(vmulq_f32(v_src1.val[2], v_scale))))); |
|
|
|
|
|
|
|
|
|
if (dcn == 4) |
|
|
|
|
{ |
|
|
|
|
uint8x8x4_t v_dst; |
|
|
|
|
v_dst.val[0] = v_dst0; |
|
|
|
|
v_dst.val[1] = v_dst1; |
|
|
|
|
v_dst.val[2] = v_dst2; |
|
|
|
|
v_dst.val[3] = v_alpha; |
|
|
|
|
vst4_u8(dst, v_dst); |
|
|
|
|
} |
|
|
|
|
else |
|
|
|
|
{ |
|
|
|
|
uint8x8x3_t v_dst; |
|
|
|
|
v_dst.val[0] = v_dst0; |
|
|
|
|
v_dst.val[1] = v_dst1; |
|
|
|
|
v_dst.val[2] = v_dst2; |
|
|
|
|
vst3_u8(dst, v_dst); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
#endif |
|
|
|
|
for( ; j < dn*3; j += 3, dst += dcn ) |
|
|
|
|
{ |
|
|
|
|
dst[0] = saturate_cast<uchar>(buf[j]*255.f); |
|
|
|
|
dst[1] = saturate_cast<uchar>(buf[j+1]*255.f); |
|
|
|
@ -3404,6 +3462,10 @@ struct HLS2RGB_b |
|
|
|
|
|
|
|
|
|
int dstcn; |
|
|
|
|
HLS2RGB_f cvt; |
|
|
|
|
#if CV_NEON |
|
|
|
|
float32x4_t v_scale, v_scale_inv; |
|
|
|
|
uint8x8_t v_alpha; |
|
|
|
|
#endif |
|
|
|
|
}; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|