|
|
|
@ -282,7 +282,7 @@ static void OPNAME ## h264_qpel ## SIZE ## _mc30_ ## MMX(uint8_t *dst, const uin |
|
|
|
|
#define H264_MC_V(OPNAME, SIZE, MMX, ALIGN) \ |
|
|
|
|
static void OPNAME ## h264_qpel ## SIZE ## _mc01_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
|
|
|
|
|
{\
|
|
|
|
|
DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*SIZE];\
|
|
|
|
|
LOCAL_ALIGNED(ALIGN, uint8_t, temp, [SIZE*SIZE]);\
|
|
|
|
|
ff_put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\
|
|
|
|
|
ff_ ## OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src, temp, stride, stride, SIZE);\
|
|
|
|
|
}\
|
|
|
|
@ -294,7 +294,7 @@ static void OPNAME ## h264_qpel ## SIZE ## _mc02_ ## MMX(uint8_t *dst, const uin |
|
|
|
|
\
|
|
|
|
|
static void OPNAME ## h264_qpel ## SIZE ## _mc03_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
|
|
|
|
|
{\
|
|
|
|
|
DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*SIZE];\
|
|
|
|
|
LOCAL_ALIGNED(ALIGN, uint8_t, temp, [SIZE*SIZE]);\
|
|
|
|
|
ff_put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\
|
|
|
|
|
ff_ ## OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src+stride, temp, stride, stride, SIZE);\
|
|
|
|
|
}\
|
|
|
|
@ -302,41 +302,41 @@ static void OPNAME ## h264_qpel ## SIZE ## _mc03_ ## MMX(uint8_t *dst, const uin |
|
|
|
|
#define H264_MC_HV(OPNAME, SIZE, MMX, ALIGN) \ |
|
|
|
|
static void OPNAME ## h264_qpel ## SIZE ## _mc11_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
|
|
|
|
|
{\
|
|
|
|
|
DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*SIZE];\
|
|
|
|
|
LOCAL_ALIGNED(ALIGN, uint8_t, temp, [SIZE*SIZE]);\
|
|
|
|
|
ff_put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\
|
|
|
|
|
ff_ ## OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, temp, stride, SIZE);\
|
|
|
|
|
}\
|
|
|
|
|
\
|
|
|
|
|
static void OPNAME ## h264_qpel ## SIZE ## _mc31_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
|
|
|
|
|
{\
|
|
|
|
|
DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*SIZE];\
|
|
|
|
|
LOCAL_ALIGNED(ALIGN, uint8_t, temp, [SIZE*SIZE]);\
|
|
|
|
|
ff_put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src+1, SIZE, stride);\
|
|
|
|
|
ff_ ## OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, temp, stride, SIZE);\
|
|
|
|
|
}\
|
|
|
|
|
\
|
|
|
|
|
static void OPNAME ## h264_qpel ## SIZE ## _mc13_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
|
|
|
|
|
{\
|
|
|
|
|
DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*SIZE];\
|
|
|
|
|
LOCAL_ALIGNED(ALIGN, uint8_t, temp, [SIZE*SIZE]);\
|
|
|
|
|
ff_put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\
|
|
|
|
|
ff_ ## OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, temp, stride, SIZE);\
|
|
|
|
|
}\
|
|
|
|
|
\
|
|
|
|
|
static void OPNAME ## h264_qpel ## SIZE ## _mc33_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
|
|
|
|
|
{\
|
|
|
|
|
DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*SIZE];\
|
|
|
|
|
LOCAL_ALIGNED(ALIGN, uint8_t, temp, [SIZE*SIZE]);\
|
|
|
|
|
ff_put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src+1, SIZE, stride);\
|
|
|
|
|
ff_ ## OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, temp, stride, SIZE);\
|
|
|
|
|
}\
|
|
|
|
|
\
|
|
|
|
|
static void OPNAME ## h264_qpel ## SIZE ## _mc22_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
|
|
|
|
|
{\
|
|
|
|
|
DECLARE_ALIGNED(ALIGN, uint16_t, temp)[SIZE*(SIZE<8?12:24)];\
|
|
|
|
|
LOCAL_ALIGNED(ALIGN, uint16_t, temp, [SIZE*(SIZE<8?12:24)]);\
|
|
|
|
|
ff_ ## OPNAME ## h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(dst, temp, src, stride, SIZE, stride);\
|
|
|
|
|
}\
|
|
|
|
|
\
|
|
|
|
|
static void OPNAME ## h264_qpel ## SIZE ## _mc21_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
|
|
|
|
|
{\
|
|
|
|
|
DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE];\
|
|
|
|
|
LOCAL_ALIGNED(ALIGN, uint8_t, temp, [SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE]);\
|
|
|
|
|
uint8_t * const halfHV= temp;\
|
|
|
|
|
int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\
|
|
|
|
|
av_assert2(((int)temp & 7) == 0);\
|
|
|
|
@ -346,7 +346,7 @@ static void OPNAME ## h264_qpel ## SIZE ## _mc21_ ## MMX(uint8_t *dst, const uin |
|
|
|
|
\
|
|
|
|
|
static void OPNAME ## h264_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
|
|
|
|
|
{\
|
|
|
|
|
DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE];\
|
|
|
|
|
LOCAL_ALIGNED(ALIGN, uint8_t, temp, [SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE]);\
|
|
|
|
|
uint8_t * const halfHV= temp;\
|
|
|
|
|
int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\
|
|
|
|
|
av_assert2(((int)temp & 7) == 0);\
|
|
|
|
@ -356,7 +356,7 @@ static void OPNAME ## h264_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, const uin |
|
|
|
|
\
|
|
|
|
|
static void OPNAME ## h264_qpel ## SIZE ## _mc12_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
|
|
|
|
|
{\
|
|
|
|
|
DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE];\
|
|
|
|
|
LOCAL_ALIGNED(ALIGN, uint8_t, temp, [SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE]);\
|
|
|
|
|
uint8_t * const halfHV= temp;\
|
|
|
|
|
int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\
|
|
|
|
|
av_assert2(((int)temp & 7) == 0);\
|
|
|
|
@ -366,7 +366,7 @@ static void OPNAME ## h264_qpel ## SIZE ## _mc12_ ## MMX(uint8_t *dst, const uin |
|
|
|
|
\
|
|
|
|
|
static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
|
|
|
|
|
{\
|
|
|
|
|
DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE];\
|
|
|
|
|
LOCAL_ALIGNED(ALIGN, uint8_t, temp, [SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE]);\
|
|
|
|
|
uint8_t * const halfHV= temp;\
|
|
|
|
|
int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\
|
|
|
|
|
av_assert2(((int)temp & 7) == 0);\
|
|
|
|
|