|
|
|
@ -267,8 +267,6 @@ static void H264_CHROMA_MC2_TMPL(uint8_t *dst/*align 2*/, uint8_t *src/*align 1* |
|
|
|
|
{ |
|
|
|
|
int CD=((1<<16)-1)*x*y + 8*y; |
|
|
|
|
int AB=((8<<16)-8)*x + 64 - CD; |
|
|
|
|
int i; |
|
|
|
|
|
|
|
|
|
asm volatile( |
|
|
|
|
/* mm5 = {A,B,A,B} */ |
|
|
|
|
/* mm6 = {C,D,C,D} */ |
|
|
|
@ -277,50 +275,41 @@ static void H264_CHROMA_MC2_TMPL(uint8_t *dst/*align 2*/, uint8_t *src/*align 1* |
|
|
|
|
"punpckldq %%mm5, %%mm5\n\t" |
|
|
|
|
"punpckldq %%mm6, %%mm6\n\t" |
|
|
|
|
"pxor %%mm7, %%mm7\n\t" |
|
|
|
|
:: "r"(AB), "r"(CD)); |
|
|
|
|
|
|
|
|
|
asm volatile( |
|
|
|
|
/* mm0 = src[0,1,1,2] */ |
|
|
|
|
"movd %0, %%mm0\n\t" |
|
|
|
|
"movd %2, %%mm0\n\t" |
|
|
|
|
"punpcklbw %%mm7, %%mm0\n\t" |
|
|
|
|
"pshufw $0x94, %%mm0, %%mm0\n\t" |
|
|
|
|
:: "m"(src[0])); |
|
|
|
|
:: "r"(AB), "r"(CD), "m"(src[0])); |
|
|
|
|
|
|
|
|
|
for(i=0; i<h; i++) { |
|
|
|
|
asm volatile( |
|
|
|
|
/* mm1 = A * src[0,1] + B * src[1,2] */ |
|
|
|
|
"movq %%mm0, %%mm1\n\t" |
|
|
|
|
"pmaddwd %%mm5, %%mm1\n\t" |
|
|
|
|
::); |
|
|
|
|
|
|
|
|
|
src += stride; |
|
|
|
|
asm volatile( |
|
|
|
|
/* mm0 = src[0,1,1,2] */ |
|
|
|
|
"movd %0, %%mm0\n\t" |
|
|
|
|
"punpcklbw %%mm7, %%mm0\n\t" |
|
|
|
|
"pshufw $0x94, %%mm0, %%mm0\n\t" |
|
|
|
|
:: "m"(src[0])); |
|
|
|
|
|
|
|
|
|
asm volatile( |
|
|
|
|
/* mm1 += C * src[0,1] + D * src[1,2] */ |
|
|
|
|
"movq %%mm0, %%mm2\n\t" |
|
|
|
|
"pmaddwd %%mm6, %%mm2\n\t" |
|
|
|
|
"paddw %%mm2, %%mm1\n\t" |
|
|
|
|
::); |
|
|
|
|
asm volatile( |
|
|
|
|
"1:\n\t" |
|
|
|
|
"addl %4, %1\n\t" |
|
|
|
|
/* mm1 = A * src[0,1] + B * src[1,2] */ |
|
|
|
|
"movq %%mm0, %%mm1\n\t" |
|
|
|
|
"pmaddwd %%mm5, %%mm1\n\t" |
|
|
|
|
/* mm0 = src[0,1,1,2] */ |
|
|
|
|
"movd (%1), %%mm0\n\t" |
|
|
|
|
"punpcklbw %%mm7, %%mm0\n\t" |
|
|
|
|
"pshufw $0x94, %%mm0, %%mm0\n\t" |
|
|
|
|
/* mm1 += C * src[0,1] + D * src[1,2] */ |
|
|
|
|
"movq %%mm0, %%mm2\n\t" |
|
|
|
|
"pmaddwd %%mm6, %%mm2\n\t" |
|
|
|
|
"paddw %%mm2, %%mm1\n\t" |
|
|
|
|
/* dst[0,1] = pack((mm1 + 32) >> 6) */ |
|
|
|
|
"paddw %3, %%mm1\n\t" |
|
|
|
|
"psrlw $6, %%mm1\n\t" |
|
|
|
|
"packssdw %%mm7, %%mm1\n\t" |
|
|
|
|
"packuswb %%mm7, %%mm1\n\t" |
|
|
|
|
/* writes garbage to the right of dst.
|
|
|
|
|
* ok because partitions are processed from left to right. */ |
|
|
|
|
H264_CHROMA_OP4((%0), %%mm1, %%mm3) |
|
|
|
|
"movd %%mm1, (%0)\n\t" |
|
|
|
|
"addl %4, %0\n\t" |
|
|
|
|
"subl $1, %2\n\t" |
|
|
|
|
"jnz 1b\n\t" |
|
|
|
|
: "+r" (dst), "+r"(src), "+r"(h) : "m" (ff_pw_32), "r"(stride)); |
|
|
|
|
|
|
|
|
|
asm volatile( |
|
|
|
|
/* dst[0,1] = pack((mm1 + 32) >> 6) */ |
|
|
|
|
"paddw %1, %%mm1\n\t" |
|
|
|
|
"psrlw $6, %%mm1\n\t" |
|
|
|
|
"packssdw %%mm7, %%mm1\n\t" |
|
|
|
|
"packuswb %%mm7, %%mm1\n\t" |
|
|
|
|
/* writes garbage to the right of dst.
|
|
|
|
|
* ok because partitions are processed from left to right. */ |
|
|
|
|
H264_CHROMA_OP4(%0, %%mm1, %%mm3) |
|
|
|
|
"movd %%mm1, %0\n\t" |
|
|
|
|
: "=m" (dst[0]) : "m" (ff_pw_32)); |
|
|
|
|
dst += stride; |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
#endif |
|
|
|
|
|
|
|
|
|