|
|
|
@ -43,7 +43,7 @@ |
|
|
|
|
#define __OPENCV_CORE_SSE_UTILS_HPP__ |
|
|
|
|
|
|
|
|
|
#ifndef __cplusplus |
|
|
|
|
# error base.hpp header must be compiled as C++ |
|
|
|
|
# error sse_utils.hpp header must be compiled as C++ |
|
|
|
|
#endif |
|
|
|
|
|
|
|
|
|
#if CV_SSE2 |
|
|
|
@ -117,7 +117,7 @@ inline void _mm_deinterleave_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0 |
|
|
|
|
|
|
|
|
|
inline void _mm_deinterleave_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1, |
|
|
|
|
__m128i & v_b0, __m128i & v_b1, __m128i & v_a0, __m128i & v_a1) |
|
|
|
|
{
|
|
|
|
|
{ |
|
|
|
|
__m128i layer1_chunk0 = _mm_unpacklo_epi8(v_r0, v_b0); |
|
|
|
|
__m128i layer1_chunk1 = _mm_unpackhi_epi8(v_r0, v_b0); |
|
|
|
|
__m128i layer1_chunk2 = _mm_unpacklo_epi8(v_r1, v_b1); |
|
|
|
@ -165,9 +165,9 @@ inline void _mm_deinterleave_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0 |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
inline void _mm_interleave_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1) |
|
|
|
|
{
|
|
|
|
|
{ |
|
|
|
|
__m128i v_mask = _mm_set1_epi16(0x00ff); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
__m128i layer4_chunk0 = _mm_packus_epi16(_mm_and_si128(v_r0, v_mask), _mm_and_si128(v_r1, v_mask)); |
|
|
|
|
__m128i layer4_chunk2 = _mm_packus_epi16(_mm_srli_epi16(v_r0, 8), _mm_srli_epi16(v_r1, 8)); |
|
|
|
|
__m128i layer4_chunk1 = _mm_packus_epi16(_mm_and_si128(v_g0, v_mask), _mm_and_si128(v_g1, v_mask)); |
|
|
|
@ -177,28 +177,28 @@ inline void _mm_interleave_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, |
|
|
|
|
__m128i layer3_chunk2 = _mm_packus_epi16(_mm_srli_epi16(layer4_chunk0, 8), _mm_srli_epi16(layer4_chunk1, 8)); |
|
|
|
|
__m128i layer3_chunk1 = _mm_packus_epi16(_mm_and_si128(layer4_chunk2, v_mask), _mm_and_si128(layer4_chunk3, v_mask)); |
|
|
|
|
__m128i layer3_chunk3 = _mm_packus_epi16(_mm_srli_epi16(layer4_chunk2, 8), _mm_srli_epi16(layer4_chunk3, 8)); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
__m128i layer2_chunk0 = _mm_packus_epi16(_mm_and_si128(layer3_chunk0, v_mask), _mm_and_si128(layer3_chunk1, v_mask)); |
|
|
|
|
__m128i layer2_chunk2 = _mm_packus_epi16(_mm_srli_epi16(layer3_chunk0, 8), _mm_srli_epi16(layer3_chunk1, 8)); |
|
|
|
|
__m128i layer2_chunk1 = _mm_packus_epi16(_mm_and_si128(layer3_chunk2, v_mask), _mm_and_si128(layer3_chunk3, v_mask)); |
|
|
|
|
__m128i layer2_chunk3 = _mm_packus_epi16(_mm_srli_epi16(layer3_chunk2, 8), _mm_srli_epi16(layer3_chunk3, 8)); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
__m128i layer1_chunk0 = _mm_packus_epi16(_mm_and_si128(layer2_chunk0, v_mask), _mm_and_si128(layer2_chunk1, v_mask)); |
|
|
|
|
__m128i layer1_chunk2 = _mm_packus_epi16(_mm_srli_epi16(layer2_chunk0, 8), _mm_srli_epi16(layer2_chunk1, 8)); |
|
|
|
|
__m128i layer1_chunk1 = _mm_packus_epi16(_mm_and_si128(layer2_chunk2, v_mask), _mm_and_si128(layer2_chunk3, v_mask)); |
|
|
|
|
__m128i layer1_chunk3 = _mm_packus_epi16(_mm_srli_epi16(layer2_chunk2, 8), _mm_srli_epi16(layer2_chunk3, 8)); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
v_r0 = _mm_packus_epi16(_mm_and_si128(layer1_chunk0, v_mask), _mm_and_si128(layer1_chunk1, v_mask)); |
|
|
|
|
v_g0 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk0, 8), _mm_srli_epi16(layer1_chunk1, 8)); |
|
|
|
|
v_r1 = _mm_packus_epi16(_mm_and_si128(layer1_chunk2, v_mask), _mm_and_si128(layer1_chunk3, v_mask)); |
|
|
|
|
v_g1 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk2, 8), _mm_srli_epi16(layer1_chunk3, 8)); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
inline void _mm_interleave_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0,
|
|
|
|
|
inline void _mm_interleave_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, |
|
|
|
|
__m128i & v_g1, __m128i & v_b0, __m128i & v_b1) |
|
|
|
|
{
|
|
|
|
|
{ |
|
|
|
|
__m128i v_mask = _mm_set1_epi16(0x00ff); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
__m128i layer4_chunk0 = _mm_packus_epi16(_mm_and_si128(v_r0, v_mask), _mm_and_si128(v_r1, v_mask)); |
|
|
|
|
__m128i layer4_chunk3 = _mm_packus_epi16(_mm_srli_epi16(v_r0, 8), _mm_srli_epi16(v_r1, 8)); |
|
|
|
|
__m128i layer4_chunk1 = _mm_packus_epi16(_mm_and_si128(v_g0, v_mask), _mm_and_si128(v_g1, v_mask)); |
|
|
|
@ -237,7 +237,7 @@ inline void _mm_interleave_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, |
|
|
|
|
|
|
|
|
|
inline void _mm_interleave_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1, |
|
|
|
|
__m128i & v_b0, __m128i & v_b1, __m128i & v_a0, __m128i & v_a1) |
|
|
|
|
{
|
|
|
|
|
{ |
|
|
|
|
__m128i v_mask = _mm_set1_epi16(0x00ff); |
|
|
|
|
|
|
|
|
|
__m128i layer4_chunk0 = _mm_packus_epi16(_mm_and_si128(v_r0, v_mask), _mm_and_si128(v_r1, v_mask)); |
|
|
|
@ -286,8 +286,8 @@ inline void _mm_interleave_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, |
|
|
|
|
v_a1 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk6, 8), _mm_srli_epi16(layer1_chunk7, 8)); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
inline void _mm_deinterleave_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1)
|
|
|
|
|
{
|
|
|
|
|
inline void _mm_deinterleave_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1) |
|
|
|
|
{ |
|
|
|
|
__m128i layer1_chunk0 = _mm_unpacklo_epi16(v_r0, v_g0); |
|
|
|
|
__m128i layer1_chunk1 = _mm_unpackhi_epi16(v_r0, v_g0); |
|
|
|
|
__m128i layer1_chunk2 = _mm_unpacklo_epi16(v_r1, v_g1); |
|
|
|
@ -310,8 +310,8 @@ inline void _mm_deinterleave_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
inline void _mm_deinterleave_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, |
|
|
|
|
__m128i & v_g1, __m128i & v_b0, __m128i & v_b1)
|
|
|
|
|
{
|
|
|
|
|
__m128i & v_g1, __m128i & v_b0, __m128i & v_b1) |
|
|
|
|
{ |
|
|
|
|
__m128i layer1_chunk0 = _mm_unpacklo_epi16(v_r0, v_g1); |
|
|
|
|
__m128i layer1_chunk1 = _mm_unpackhi_epi16(v_r0, v_g1); |
|
|
|
|
__m128i layer1_chunk2 = _mm_unpacklo_epi16(v_r1, v_b0); |
|
|
|
@ -342,7 +342,7 @@ inline void _mm_deinterleave_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
inline void _mm_deinterleave_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1, |
|
|
|
|
__m128i & v_b0, __m128i & v_b1, __m128i & v_a0, __m128i & v_a1)
|
|
|
|
|
__m128i & v_b0, __m128i & v_b1, __m128i & v_a0, __m128i & v_a1) |
|
|
|
|
{ |
|
|
|
|
__m128i layer1_chunk0 = _mm_unpacklo_epi16(v_r0, v_b0); |
|
|
|
|
__m128i layer1_chunk1 = _mm_unpackhi_epi16(v_r0, v_b0); |
|
|
|
@ -352,7 +352,7 @@ inline void _mm_deinterleave_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g |
|
|
|
|
__m128i layer1_chunk5 = _mm_unpackhi_epi16(v_g0, v_a0); |
|
|
|
|
__m128i layer1_chunk6 = _mm_unpacklo_epi16(v_g1, v_a1); |
|
|
|
|
__m128i layer1_chunk7 = _mm_unpackhi_epi16(v_g1, v_a1); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
__m128i layer2_chunk0 = _mm_unpacklo_epi16(layer1_chunk0, layer1_chunk4); |
|
|
|
|
__m128i layer2_chunk1 = _mm_unpackhi_epi16(layer1_chunk0, layer1_chunk4); |
|
|
|
|
__m128i layer2_chunk2 = _mm_unpacklo_epi16(layer1_chunk1, layer1_chunk5); |
|
|
|
@ -393,14 +393,14 @@ inline void _mm_interleave_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, |
|
|
|
|
__m128i layer3_chunk3 = _mm_packus_epi32(_mm_srli_epi32(v_g0, 16), _mm_srli_epi32(v_g1, 16)); |
|
|
|
|
|
|
|
|
|
__m128i layer2_chunk0 = _mm_packus_epi32(_mm_and_si128(layer3_chunk0, v_mask), _mm_and_si128(layer3_chunk1, v_mask)); |
|
|
|
|
__m128i layer2_chunk2 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk0, 16), _mm_srli_epi32(layer3_chunk1, 16));
|
|
|
|
|
__m128i layer2_chunk2 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk0, 16), _mm_srli_epi32(layer3_chunk1, 16)); |
|
|
|
|
__m128i layer2_chunk1 = _mm_packus_epi32(_mm_and_si128(layer3_chunk2, v_mask), _mm_and_si128(layer3_chunk3, v_mask)); |
|
|
|
|
__m128i layer2_chunk3 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk2, 16), _mm_srli_epi32(layer3_chunk3, 16));
|
|
|
|
|
__m128i layer2_chunk3 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk2, 16), _mm_srli_epi32(layer3_chunk3, 16)); |
|
|
|
|
|
|
|
|
|
__m128i layer1_chunk0 = _mm_packus_epi32(_mm_and_si128(layer2_chunk0, v_mask), _mm_and_si128(layer2_chunk1, v_mask)); |
|
|
|
|
__m128i layer1_chunk2 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk0, 16), _mm_srli_epi32(layer2_chunk1, 16));
|
|
|
|
|
__m128i layer1_chunk2 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk0, 16), _mm_srli_epi32(layer2_chunk1, 16)); |
|
|
|
|
__m128i layer1_chunk1 = _mm_packus_epi32(_mm_and_si128(layer2_chunk2, v_mask), _mm_and_si128(layer2_chunk3, v_mask)); |
|
|
|
|
__m128i layer1_chunk3 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk2, 16), _mm_srli_epi32(layer2_chunk3, 16));
|
|
|
|
|
__m128i layer1_chunk3 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk2, 16), _mm_srli_epi32(layer2_chunk3, 16)); |
|
|
|
|
|
|
|
|
|
v_r0 = _mm_packus_epi32(_mm_and_si128(layer1_chunk0, v_mask), _mm_and_si128(layer1_chunk1, v_mask)); |
|
|
|
|
v_g0 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk0, 16), _mm_srli_epi32(layer1_chunk1, 16)); |
|
|
|
@ -421,18 +421,18 @@ inline void _mm_interleave_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, |
|
|
|
|
__m128i layer3_chunk5 = _mm_packus_epi32(_mm_srli_epi32(v_b0, 16), _mm_srli_epi32(v_b1, 16)); |
|
|
|
|
|
|
|
|
|
__m128i layer2_chunk0 = _mm_packus_epi32(_mm_and_si128(layer3_chunk0, v_mask), _mm_and_si128(layer3_chunk1, v_mask)); |
|
|
|
|
__m128i layer2_chunk3 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk0, 16), _mm_srli_epi32(layer3_chunk1, 16));
|
|
|
|
|
__m128i layer2_chunk3 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk0, 16), _mm_srli_epi32(layer3_chunk1, 16)); |
|
|
|
|
__m128i layer2_chunk1 = _mm_packus_epi32(_mm_and_si128(layer3_chunk2, v_mask), _mm_and_si128(layer3_chunk3, v_mask)); |
|
|
|
|
__m128i layer2_chunk4 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk2, 16), _mm_srli_epi32(layer3_chunk3, 16));
|
|
|
|
|
__m128i layer2_chunk4 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk2, 16), _mm_srli_epi32(layer3_chunk3, 16)); |
|
|
|
|
__m128i layer2_chunk2 = _mm_packus_epi32(_mm_and_si128(layer3_chunk4, v_mask), _mm_and_si128(layer3_chunk5, v_mask)); |
|
|
|
|
__m128i layer2_chunk5 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk4, 16), _mm_srli_epi32(layer3_chunk5, 16));
|
|
|
|
|
__m128i layer2_chunk5 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk4, 16), _mm_srli_epi32(layer3_chunk5, 16)); |
|
|
|
|
|
|
|
|
|
__m128i layer1_chunk0 = _mm_packus_epi32(_mm_and_si128(layer2_chunk0, v_mask), _mm_and_si128(layer2_chunk1, v_mask)); |
|
|
|
|
__m128i layer1_chunk3 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk0, 16), _mm_srli_epi32(layer2_chunk1, 16));
|
|
|
|
|
__m128i layer1_chunk3 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk0, 16), _mm_srli_epi32(layer2_chunk1, 16)); |
|
|
|
|
__m128i layer1_chunk1 = _mm_packus_epi32(_mm_and_si128(layer2_chunk2, v_mask), _mm_and_si128(layer2_chunk3, v_mask)); |
|
|
|
|
__m128i layer1_chunk4 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk2, 16), _mm_srli_epi32(layer2_chunk3, 16));
|
|
|
|
|
__m128i layer1_chunk4 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk2, 16), _mm_srli_epi32(layer2_chunk3, 16)); |
|
|
|
|
__m128i layer1_chunk2 = _mm_packus_epi32(_mm_and_si128(layer2_chunk4, v_mask), _mm_and_si128(layer2_chunk5, v_mask)); |
|
|
|
|
__m128i layer1_chunk5 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk4, 16), _mm_srli_epi32(layer2_chunk5, 16));
|
|
|
|
|
__m128i layer1_chunk5 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk4, 16), _mm_srli_epi32(layer2_chunk5, 16)); |
|
|
|
|
|
|
|
|
|
v_r0 = _mm_packus_epi32(_mm_and_si128(layer1_chunk0, v_mask), _mm_and_si128(layer1_chunk1, v_mask)); |
|
|
|
|
v_g1 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk0, 16), _mm_srli_epi32(layer1_chunk1, 16)); |
|
|
|
@ -457,26 +457,26 @@ inline void _mm_interleave_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, |
|
|
|
|
__m128i layer3_chunk7 = _mm_packus_epi32(_mm_srli_epi32(v_a0, 16), _mm_srli_epi32(v_a1, 16)); |
|
|
|
|
|
|
|
|
|
__m128i layer2_chunk0 = _mm_packus_epi32(_mm_and_si128(layer3_chunk0, v_mask), _mm_and_si128(layer3_chunk1, v_mask)); |
|
|
|
|
__m128i layer2_chunk4 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk0, 16), _mm_srli_epi32(layer3_chunk1, 16));
|
|
|
|
|
__m128i layer2_chunk4 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk0, 16), _mm_srli_epi32(layer3_chunk1, 16)); |
|
|
|
|
__m128i layer2_chunk1 = _mm_packus_epi32(_mm_and_si128(layer3_chunk2, v_mask), _mm_and_si128(layer3_chunk3, v_mask)); |
|
|
|
|
__m128i layer2_chunk5 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk2, 16), _mm_srli_epi32(layer3_chunk3, 16));
|
|
|
|
|
__m128i layer2_chunk5 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk2, 16), _mm_srli_epi32(layer3_chunk3, 16)); |
|
|
|
|
__m128i layer2_chunk2 = _mm_packus_epi32(_mm_and_si128(layer3_chunk4, v_mask), _mm_and_si128(layer3_chunk5, v_mask)); |
|
|
|
|
__m128i layer2_chunk6 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk4, 16), _mm_srli_epi32(layer3_chunk5, 16));
|
|
|
|
|
__m128i layer2_chunk6 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk4, 16), _mm_srli_epi32(layer3_chunk5, 16)); |
|
|
|
|
__m128i layer2_chunk3 = _mm_packus_epi32(_mm_and_si128(layer3_chunk6, v_mask), _mm_and_si128(layer3_chunk7, v_mask)); |
|
|
|
|
__m128i layer2_chunk7 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk6, 16), _mm_srli_epi32(layer3_chunk7, 16));
|
|
|
|
|
__m128i layer2_chunk7 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk6, 16), _mm_srli_epi32(layer3_chunk7, 16)); |
|
|
|
|
|
|
|
|
|
__m128i layer1_chunk0 = _mm_packus_epi32(_mm_and_si128(layer2_chunk0, v_mask), _mm_and_si128(layer2_chunk1, v_mask)); |
|
|
|
|
__m128i layer1_chunk4 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk0, 16), _mm_srli_epi32(layer2_chunk1, 16));
|
|
|
|
|
__m128i layer1_chunk4 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk0, 16), _mm_srli_epi32(layer2_chunk1, 16)); |
|
|
|
|
__m128i layer1_chunk1 = _mm_packus_epi32(_mm_and_si128(layer2_chunk2, v_mask), _mm_and_si128(layer2_chunk3, v_mask)); |
|
|
|
|
__m128i layer1_chunk5 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk2, 16), _mm_srli_epi32(layer2_chunk3, 16));
|
|
|
|
|
__m128i layer1_chunk5 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk2, 16), _mm_srli_epi32(layer2_chunk3, 16)); |
|
|
|
|
__m128i layer1_chunk2 = _mm_packus_epi32(_mm_and_si128(layer2_chunk4, v_mask), _mm_and_si128(layer2_chunk5, v_mask)); |
|
|
|
|
__m128i layer1_chunk6 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk4, 16), _mm_srli_epi32(layer2_chunk5, 16));
|
|
|
|
|
__m128i layer1_chunk6 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk4, 16), _mm_srli_epi32(layer2_chunk5, 16)); |
|
|
|
|
__m128i layer1_chunk3 = _mm_packus_epi32(_mm_and_si128(layer2_chunk6, v_mask), _mm_and_si128(layer2_chunk7, v_mask)); |
|
|
|
|
__m128i layer1_chunk7 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk6, 16), _mm_srli_epi32(layer2_chunk7, 16));
|
|
|
|
|
__m128i layer1_chunk7 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk6, 16), _mm_srli_epi32(layer2_chunk7, 16)); |
|
|
|
|
|
|
|
|
|
v_r0 = _mm_packus_epi32(_mm_and_si128(layer1_chunk0, v_mask), _mm_and_si128(layer1_chunk1, v_mask)); |
|
|
|
|
v_b0 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk0, 16), _mm_srli_epi32(layer1_chunk1, 16)); |
|
|
|
|
v_r0 = _mm_packus_epi32(_mm_and_si128(layer1_chunk2, v_mask), _mm_and_si128(layer1_chunk3, v_mask)); |
|
|
|
|
v_r1 = _mm_packus_epi32(_mm_and_si128(layer1_chunk2, v_mask), _mm_and_si128(layer1_chunk3, v_mask)); |
|
|
|
|
v_b1 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk2, 16), _mm_srli_epi32(layer1_chunk3, 16)); |
|
|
|
|
v_g0 = _mm_packus_epi32(_mm_and_si128(layer1_chunk4, v_mask), _mm_and_si128(layer1_chunk5, v_mask)); |
|
|
|
|
v_a0 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk4, 16), _mm_srli_epi32(layer1_chunk5, 16)); |
|
|
|
@ -487,12 +487,12 @@ inline void _mm_interleave_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, |
|
|
|
|
#endif // CV_SSE4_1
|
|
|
|
|
|
|
|
|
|
inline void _mm_deinterleave_ps(__m128 & v_r0, __m128 & v_r1, __m128 & v_g0, __m128 & v_g1) |
|
|
|
|
{
|
|
|
|
|
{ |
|
|
|
|
__m128 layer1_chunk0 = _mm_unpacklo_ps(v_r0, v_g0); |
|
|
|
|
__m128 layer1_chunk1 = _mm_unpackhi_ps(v_r0, v_g0); |
|
|
|
|
__m128 layer1_chunk2 = _mm_unpacklo_ps(v_r1, v_g1); |
|
|
|
|
__m128 layer1_chunk3 = _mm_unpackhi_ps(v_r1, v_g1); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
__m128 layer2_chunk0 = _mm_unpacklo_ps(layer1_chunk0, layer1_chunk2); |
|
|
|
|
__m128 layer2_chunk1 = _mm_unpackhi_ps(layer1_chunk0, layer1_chunk2); |
|
|
|
|
__m128 layer2_chunk2 = _mm_unpacklo_ps(layer1_chunk1, layer1_chunk3); |
|
|
|
@ -506,14 +506,14 @@ inline void _mm_deinterleave_ps(__m128 & v_r0, __m128 & v_r1, __m128 & v_g0, __m |
|
|
|
|
|
|
|
|
|
inline void _mm_deinterleave_ps(__m128 & v_r0, __m128 & v_r1, __m128 & v_g0, |
|
|
|
|
__m128 & v_g1, __m128 & v_b0, __m128 & v_b1) |
|
|
|
|
{
|
|
|
|
|
{ |
|
|
|
|
__m128 layer1_chunk0 = _mm_unpacklo_ps(v_r0, v_g1); |
|
|
|
|
__m128 layer1_chunk1 = _mm_unpackhi_ps(v_r0, v_g1); |
|
|
|
|
__m128 layer1_chunk2 = _mm_unpacklo_ps(v_r1, v_b0); |
|
|
|
|
__m128 layer1_chunk3 = _mm_unpackhi_ps(v_r1, v_b0); |
|
|
|
|
__m128 layer1_chunk4 = _mm_unpacklo_ps(v_g0, v_b1); |
|
|
|
|
__m128 layer1_chunk5 = _mm_unpackhi_ps(v_g0, v_b1); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
__m128 layer2_chunk0 = _mm_unpacklo_ps(layer1_chunk0, layer1_chunk3); |
|
|
|
|
__m128 layer2_chunk1 = _mm_unpackhi_ps(layer1_chunk0, layer1_chunk3); |
|
|
|
|
__m128 layer2_chunk2 = _mm_unpacklo_ps(layer1_chunk1, layer1_chunk4); |
|
|
|
@ -531,7 +531,7 @@ inline void _mm_deinterleave_ps(__m128 & v_r0, __m128 & v_r1, __m128 & v_g0, |
|
|
|
|
|
|
|
|
|
inline void _mm_deinterleave_ps(__m128 & v_r0, __m128 & v_r1, __m128 & v_g0, __m128 & v_g1, |
|
|
|
|
__m128 & v_b0, __m128 & v_b1, __m128 & v_a0, __m128 & v_a1) |
|
|
|
|
{
|
|
|
|
|
{ |
|
|
|
|
__m128 layer1_chunk0 = _mm_unpacklo_ps(v_r0, v_b0); |
|
|
|
|
__m128 layer1_chunk1 = _mm_unpackhi_ps(v_r0, v_b0); |
|
|
|
|
__m128 layer1_chunk2 = _mm_unpacklo_ps(v_r1, v_b1); |
|
|
|
|