avcodec/h264: sse2 and avx 4:2:2 idct add8 10-bit functions

Yorkfield:
 - sse2:
   - complex: 4.13x faster (1514 vs. 367 cycles)
   - simple:  4.38x faster (1836 vs. 419 cycles)

Skylake:
 - sse2:
   - complex: 3.61x faster ( 936 vs. 260 cycles)
   - simple:  3.97x faster (1126 vs. 284 cycles)
 - avx (versus sse2):
   - complex: 1.07x faster (260 vs. 244 cycles)
   - simple:  1.03x faster (284 vs. 274 cycles)
pull/241/head
James Darnley 8 years ago
parent 1dae7ffa0b
commit 13d71c28cc
  1. 53
      libavcodec/x86/h264_idct_10bit.asm
  2. 13
      libavcodec/x86/h264dsp_init.c

@ -350,6 +350,59 @@ INIT_XMM avx
IDCT_ADD8
%endif
;-----------------------------------------------------------------------------
; void ff_h264_idct_add8_422_10(pixel **dst, const int *block_offset,
; int16_t *block, int stride,
; const uint8_t nnzc[6*8])
;-----------------------------------------------------------------------------
%assign last_block 44
%macro IDCT_ADD8_422 0
cglobal h264_idct_add8_422_10, 5, 8, 7
movsxdifnidn r3, r3d
%if ARCH_X86_64
mov r7, r0
%endif
add r2, 1024
mov r0, [r0]
ADD16_OP_INTRA 16, 4+ 6*8
ADD16_OP_INTRA 18, 4+ 7*8
ADD16_OP_INTRA 24, 4+ 8*8 ; i+4
ADD16_OP_INTRA 26, 4+ 9*8 ; i+4
add r2, 1024-128*4
%if ARCH_X86_64
mov r0, [r7+gprsize]
%else
mov r0, r0m
mov r0, [r0+gprsize]
%endif
ADD16_OP_INTRA 32, 4+11*8
ADD16_OP_INTRA 34, 4+12*8
ADD16_OP_INTRA 40, 4+13*8 ; i+4
ADD16_OP_INTRA 42, 4+14*8 ; i+4
REP_RET
AC 16
AC 18
AC 24 ; i+4
AC 26 ; i+4
AC 32
AC 34
AC 40 ; i+4
AC 42 ; i+4
%endmacro
INIT_XMM sse2
IDCT_ADD8_422
%if HAVE_AVX_EXTERNAL
INIT_XMM avx
IDCT_ADD8_422
%endif
;-----------------------------------------------------------------------------
; void ff_h264_idct8_add_10(pixel *dst, int16_t *block, int stride)
;-----------------------------------------------------------------------------

@ -80,6 +80,9 @@ IDCT_ADD_REP_FUNC2(, 8, 10, avx)
IDCT_ADD_REP_FUNC2(, 8_422, 8, mmx)
IDCT_ADD_REP_FUNC2(, 8_422, 10, sse2)
IDCT_ADD_REP_FUNC2(, 8_422, 10, avx)
void ff_h264_luma_dc_dequant_idct_mmx(int16_t *output, int16_t *input, int qmul);
void ff_h264_luma_dc_dequant_idct_sse2(int16_t *output, int16_t *input, int qmul);
@ -319,8 +322,11 @@ av_cold void ff_h264dsp_init_x86(H264DSPContext *c, const int bit_depth,
c->h264_idct8_dc_add = ff_h264_idct8_dc_add_10_sse2;
c->h264_idct_add16 = ff_h264_idct_add16_10_sse2;
if (chroma_format_idc <= 1)
if (chroma_format_idc <= 1) {
c->h264_idct_add8 = ff_h264_idct_add8_10_sse2;
} else {
c->h264_idct_add8 = ff_h264_idct_add8_422_10_sse2;
}
c->h264_idct_add16intra = ff_h264_idct_add16intra_10_sse2;
#if HAVE_ALIGNED_STACK
c->h264_idct8_add = ff_h264_idct8_add_10_sse2;
@ -359,8 +365,11 @@ av_cold void ff_h264dsp_init_x86(H264DSPContext *c, const int bit_depth,
c->h264_idct8_dc_add = ff_h264_idct8_dc_add_10_avx;
c->h264_idct_add16 = ff_h264_idct_add16_10_avx;
if (chroma_format_idc <= 1)
if (chroma_format_idc <= 1) {
c->h264_idct_add8 = ff_h264_idct_add8_10_avx;
} else {
c->h264_idct_add8 = ff_h264_idct_add8_422_10_avx;
}
c->h264_idct_add16intra = ff_h264_idct_add16intra_10_avx;
#if HAVE_ALIGNED_STACK
c->h264_idct8_add = ff_h264_idct8_add_10_avx;

Loading…
Cancel
Save