|
|
@ -184,82 +184,70 @@ static void H264_CHROMA_MC8_TMPL(uint8_t *dst/*align 8*/, uint8_t *src/*align 1* |
|
|
|
|
|
|
|
|
|
|
|
static void H264_CHROMA_MC4_TMPL(uint8_t *dst/*align 4*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) |
|
|
|
static void H264_CHROMA_MC4_TMPL(uint8_t *dst/*align 4*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) |
|
|
|
{ |
|
|
|
{ |
|
|
|
DECLARE_ALIGNED_8(uint64_t, AA); |
|
|
|
|
|
|
|
DECLARE_ALIGNED_8(uint64_t, DD); |
|
|
|
|
|
|
|
int i; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* no special case for mv=(0,0) in 4x*, since it's much less common than in 8x*.
|
|
|
|
|
|
|
|
* could still save a few cycles, but maybe not worth the complexity. */ |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
assert(x<8 && y<8 && x>=0 && y>=0); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
asm volatile("movd %2, %%mm4\n\t" |
|
|
|
|
|
|
|
"movd %3, %%mm6\n\t" |
|
|
|
|
|
|
|
"punpcklwd %%mm4, %%mm4\n\t" |
|
|
|
|
|
|
|
"punpcklwd %%mm6, %%mm6\n\t" |
|
|
|
|
|
|
|
"punpckldq %%mm4, %%mm4\n\t" /* mm4 = x words */ |
|
|
|
|
|
|
|
"punpckldq %%mm6, %%mm6\n\t" /* mm6 = y words */ |
|
|
|
|
|
|
|
"movq %%mm4, %%mm5\n\t" |
|
|
|
|
|
|
|
"pmullw %%mm6, %%mm4\n\t" /* mm4 = x * y */ |
|
|
|
|
|
|
|
"psllw $3, %%mm5\n\t" |
|
|
|
|
|
|
|
"psllw $3, %%mm6\n\t" |
|
|
|
|
|
|
|
"movq %%mm5, %%mm7\n\t" |
|
|
|
|
|
|
|
"paddw %%mm6, %%mm7\n\t" |
|
|
|
|
|
|
|
"movq %%mm4, %1\n\t" /* DD = x * y */ |
|
|
|
|
|
|
|
"psubw %%mm4, %%mm5\n\t" /* mm5 = B = 8x - xy */ |
|
|
|
|
|
|
|
"psubw %%mm4, %%mm6\n\t" /* mm6 = C = 8y - xy */ |
|
|
|
|
|
|
|
"paddw %4, %%mm4\n\t" |
|
|
|
|
|
|
|
"psubw %%mm7, %%mm4\n\t" /* mm4 = A = xy - (8x+8y) + 64 */ |
|
|
|
|
|
|
|
"pxor %%mm7, %%mm7\n\t" |
|
|
|
|
|
|
|
"movq %%mm4, %0\n\t" |
|
|
|
|
|
|
|
: "=m" (AA), "=m" (DD) : "rm" (x), "rm" (y), "m" (ff_pw_64)); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
asm volatile( |
|
|
|
asm volatile( |
|
|
|
/* mm0 = src[0..3], mm1 = src[1..4] */ |
|
|
|
"pxor %%mm7, %%mm7 \n\t" |
|
|
|
"movd %0, %%mm0\n\t" |
|
|
|
"movd %5, %%mm2 \n\t" |
|
|
|
"movd %1, %%mm1\n\t" |
|
|
|
"movd %6, %%mm3 \n\t" |
|
|
|
"punpcklbw %%mm7, %%mm0\n\t" |
|
|
|
"movq %7, %%mm4 \n\t" |
|
|
|
"punpcklbw %%mm7, %%mm1\n\t" |
|
|
|
"movq %7, %%mm5 \n\t" |
|
|
|
: : "m" (src[0]), "m" (src[1])); |
|
|
|
"punpcklwd %%mm2, %%mm2 \n\t" |
|
|
|
|
|
|
|
"punpcklwd %%mm3, %%mm3 \n\t" |
|
|
|
|
|
|
|
"punpcklwd %%mm2, %%mm2 \n\t" |
|
|
|
|
|
|
|
"punpcklwd %%mm3, %%mm3 \n\t" |
|
|
|
|
|
|
|
"psubw %%mm2, %%mm4 \n\t" |
|
|
|
|
|
|
|
"psubw %%mm3, %%mm5 \n\t" |
|
|
|
|
|
|
|
|
|
|
|
for(i=0; i<h; i++) { |
|
|
|
"movd (%1), %%mm0 \n\t" |
|
|
|
asm volatile( |
|
|
|
"movd 1(%1), %%mm6 \n\t" |
|
|
|
/* mm2 = A * src[0..3] + B * src[1..4] */ |
|
|
|
"add %3, %1 \n\t" |
|
|
|
"movq %%mm0, %%mm2\n\t" |
|
|
|
"punpcklbw %%mm7, %%mm0 \n\t" |
|
|
|
"pmullw %0, %%mm2\n\t" |
|
|
|
"punpcklbw %%mm7, %%mm6 \n\t" |
|
|
|
"pmullw %%mm5, %%mm1\n\t" |
|
|
|
"pmullw %%mm4, %%mm0 \n\t" |
|
|
|
"paddw %%mm1, %%mm2\n\t" |
|
|
|
"pmullw %%mm2, %%mm6 \n\t" |
|
|
|
: : "m" (AA)); |
|
|
|
"paddw %%mm0, %%mm6 \n\t" |
|
|
|
|
|
|
|
|
|
|
|
src += stride; |
|
|
|
"1: \n\t" |
|
|
|
asm volatile( |
|
|
|
"movd (%1), %%mm0 \n\t" |
|
|
|
/* mm0 = src[0..3], mm1 = src[1..4] */ |
|
|
|
"movd 1(%1), %%mm1 \n\t" |
|
|
|
"movd %0, %%mm0\n\t" |
|
|
|
"add %3, %1 \n\t" |
|
|
|
"movd %1, %%mm1\n\t" |
|
|
|
"punpcklbw %%mm7, %%mm0 \n\t" |
|
|
|
"punpcklbw %%mm7, %%mm0\n\t" |
|
|
|
"punpcklbw %%mm7, %%mm1 \n\t" |
|
|
|
"punpcklbw %%mm7, %%mm1\n\t" |
|
|
|
"pmullw %%mm4, %%mm0 \n\t" |
|
|
|
: : "m" (src[0]), "m" (src[1])); |
|
|
|
"pmullw %%mm2, %%mm1 \n\t" |
|
|
|
|
|
|
|
"paddw %%mm0, %%mm1 \n\t" |
|
|
|
asm volatile( |
|
|
|
"movq %%mm1, %%mm0 \n\t" |
|
|
|
/* mm2 += C * src[0..3] + D * src[1..4] */ |
|
|
|
"pmullw %%mm5, %%mm6 \n\t" |
|
|
|
"movq %%mm0, %%mm3\n\t" |
|
|
|
"pmullw %%mm3, %%mm1 \n\t" |
|
|
|
"movq %%mm1, %%mm4\n\t" |
|
|
|
"paddw %%mm6, %%mm1 \n\t" |
|
|
|
"pmullw %%mm6, %%mm3\n\t" |
|
|
|
"paddw %4, %%mm1 \n\t" |
|
|
|
"pmullw %0, %%mm4\n\t" |
|
|
|
"psrlw $6, %%mm1 \n\t" |
|
|
|
"paddw %%mm3, %%mm2\n\t" |
|
|
|
"packuswb %%mm1, %%mm1 \n\t" |
|
|
|
"paddw %%mm4, %%mm2\n\t" |
|
|
|
H264_CHROMA_OP4((%0), %%mm1, %%mm6) |
|
|
|
: : "m" (DD)); |
|
|
|
"movd %%mm1, (%0) \n\t" |
|
|
|
|
|
|
|
"add %3, %0 \n\t" |
|
|
|
asm volatile( |
|
|
|
"movd (%1), %%mm6 \n\t" |
|
|
|
/* dst[0..3] = pack((mm2 + 32) >> 6) */ |
|
|
|
"movd 1(%1), %%mm1 \n\t" |
|
|
|
"paddw %1, %%mm2\n\t" |
|
|
|
"add %3, %1 \n\t" |
|
|
|
"psrlw $6, %%mm2\n\t" |
|
|
|
"punpcklbw %%mm7, %%mm6 \n\t" |
|
|
|
"packuswb %%mm7, %%mm2\n\t" |
|
|
|
"punpcklbw %%mm7, %%mm1 \n\t" |
|
|
|
H264_CHROMA_OP4(%0, %%mm2, %%mm3) |
|
|
|
"pmullw %%mm4, %%mm6 \n\t" |
|
|
|
"movd %%mm2, %0\n\t" |
|
|
|
"pmullw %%mm2, %%mm1 \n\t" |
|
|
|
: "=m" (dst[0]) : "m" (ff_pw_32)); |
|
|
|
"paddw %%mm6, %%mm1 \n\t" |
|
|
|
dst += stride; |
|
|
|
"movq %%mm1, %%mm6 \n\t" |
|
|
|
} |
|
|
|
"pmullw %%mm5, %%mm0 \n\t" |
|
|
|
|
|
|
|
"pmullw %%mm3, %%mm1 \n\t" |
|
|
|
|
|
|
|
"paddw %%mm0, %%mm1 \n\t" |
|
|
|
|
|
|
|
"paddw %4, %%mm1 \n\t" |
|
|
|
|
|
|
|
"psrlw $6, %%mm1 \n\t" |
|
|
|
|
|
|
|
"packuswb %%mm1, %%mm1 \n\t" |
|
|
|
|
|
|
|
H264_CHROMA_OP4((%0), %%mm1, %%mm0) |
|
|
|
|
|
|
|
"movd %%mm1, (%0) \n\t" |
|
|
|
|
|
|
|
"add %3, %0 \n\t" |
|
|
|
|
|
|
|
"sub $2, %2 \n\t" |
|
|
|
|
|
|
|
"jnz 1b \n\t" |
|
|
|
|
|
|
|
: "+r"(dst), "+r"(src), "+r"(h) |
|
|
|
|
|
|
|
: "r"(stride), "m"(ff_pw_32), "m"(x), "m"(y), "m"(ff_pw_8) |
|
|
|
|
|
|
|
); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
#ifdef H264_CHROMA_MC2_TMPL |
|
|
|
#ifdef H264_CHROMA_MC2_TMPL |
|
|
|