avcodec/mips: [loongson] optimize put_hevc_qpel_bi_h_8 with mmi.

Optimize put_hevc_qpel_bi_h_8 with mmi in the case width=4/8/12/16/24/32/48/64.
This optimization improved HEVC decoding performance 2.1%(2.34x to 2.39x, tested on loongson 3A3000).

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
pull/307/head
Shiyou Yin 6 years ago committed by Michael Niedermayer
parent c0942b7a2c
commit dceefb2b84
  1. 9
      libavcodec/mips/hevcdsp_init_mips.c
  2. 9
      libavcodec/mips/hevcdsp_mips.h
  3. 132
      libavcodec/mips/hevcdsp_mmi.c

@ -34,6 +34,15 @@ static av_cold void hevc_dsp_init_mmi(HEVCDSPContext *c,
c->put_hevc_qpel[8][1][1] = ff_hevc_put_hevc_qpel_hv48_8_mmi;
c->put_hevc_qpel[9][1][1] = ff_hevc_put_hevc_qpel_hv64_8_mmi;
c->put_hevc_qpel_bi[1][0][1] = ff_hevc_put_hevc_qpel_bi_h4_8_mmi;
c->put_hevc_qpel_bi[3][0][1] = ff_hevc_put_hevc_qpel_bi_h8_8_mmi;
c->put_hevc_qpel_bi[4][0][1] = ff_hevc_put_hevc_qpel_bi_h12_8_mmi;
c->put_hevc_qpel_bi[5][0][1] = ff_hevc_put_hevc_qpel_bi_h16_8_mmi;
c->put_hevc_qpel_bi[6][0][1] = ff_hevc_put_hevc_qpel_bi_h24_8_mmi;
c->put_hevc_qpel_bi[7][0][1] = ff_hevc_put_hevc_qpel_bi_h32_8_mmi;
c->put_hevc_qpel_bi[8][0][1] = ff_hevc_put_hevc_qpel_bi_h48_8_mmi;
c->put_hevc_qpel_bi[9][0][1] = ff_hevc_put_hevc_qpel_bi_h64_8_mmi;
c->put_hevc_qpel_bi[1][1][1] = ff_hevc_put_hevc_qpel_bi_hv4_8_mmi;
c->put_hevc_qpel_bi[3][1][1] = ff_hevc_put_hevc_qpel_bi_hv8_8_mmi;
c->put_hevc_qpel_bi[4][1][1] = ff_hevc_put_hevc_qpel_bi_hv12_8_mmi;

@ -524,6 +524,15 @@ L_BI_MC(qpel, hv, 32, mmi);
L_BI_MC(qpel, hv, 48, mmi);
L_BI_MC(qpel, hv, 64, mmi);
L_BI_MC(qpel, h, 4, mmi);
L_BI_MC(qpel, h, 8, mmi);
L_BI_MC(qpel, h, 12, mmi);
L_BI_MC(qpel, h, 16, mmi);
L_BI_MC(qpel, h, 24, mmi);
L_BI_MC(qpel, h, 32, mmi);
L_BI_MC(qpel, h, 48, mmi);
L_BI_MC(qpel, h, 64, mmi);
L_BI_MC(epel, hv, 4, mmi);
L_BI_MC(epel, hv, 8, mmi);
L_BI_MC(epel, hv, 12, mmi);

@ -217,6 +217,138 @@ PUT_HEVC_QPEL_HV(32, 8, -32, -64);
PUT_HEVC_QPEL_HV(48, 12, -48, -96);
PUT_HEVC_QPEL_HV(64, 16, -64, -128);
#define PUT_HEVC_QPEL_BI_H(w, x_step, src_step, src2_step, dst_step) \
void ff_hevc_put_hevc_qpel_bi_h##w##_8_mmi(uint8_t *_dst, \
ptrdiff_t _dststride, \
uint8_t *_src, \
ptrdiff_t _srcstride, \
int16_t *src2, int height, \
intptr_t mx, intptr_t my, \
int width) \
{ \
int x, y; \
pixel *src = (pixel*)_src - 3; \
ptrdiff_t srcstride = _srcstride / sizeof(pixel); \
pixel *dst = (pixel *)_dst; \
ptrdiff_t dststride = _dststride / sizeof(pixel); \
const int8_t *filter = ff_hevc_qpel_filters[mx - 1]; \
uint64_t ftmp[20]; \
uint64_t rtmp[1]; \
int shift = 7; \
int offset = 64; \
\
x = width >> 2; \
y = height; \
__asm__ volatile( \
MMI_LDC1(%[ftmp1], %[filter], 0x00) \
"li %[rtmp0], 0x08 \n\t" \
"dmtc1 %[rtmp0], %[ftmp0] \n\t" \
"punpckhbh %[ftmp2], %[ftmp0], %[ftmp1] \n\t" \
"punpcklbh %[ftmp1], %[ftmp0], %[ftmp1] \n\t" \
"psrah %[ftmp1], %[ftmp1], %[ftmp0] \n\t" \
"psrah %[ftmp2], %[ftmp2], %[ftmp0] \n\t" \
"xor %[ftmp0], %[ftmp0], %[ftmp0] \n\t" \
"punpcklhw %[offset], %[offset], %[offset] \n\t" \
"punpcklwd %[offset], %[offset], %[offset] \n\t" \
\
"1: \n\t" \
"li %[x], " #x_step " \n\t" \
"2: \n\t" \
"gsldlc1 %[ftmp3], 0x07(%[src]) \n\t" \
"gsldrc1 %[ftmp3], 0x00(%[src]) \n\t" \
"gsldlc1 %[ftmp4], 0x08(%[src]) \n\t" \
"gsldrc1 %[ftmp4], 0x01(%[src]) \n\t" \
"gsldlc1 %[ftmp5], 0x09(%[src]) \n\t" \
"gsldrc1 %[ftmp5], 0x02(%[src]) \n\t" \
"gsldlc1 %[ftmp6], 0x0a(%[src]) \n\t" \
"gsldrc1 %[ftmp6], 0x03(%[src]) \n\t" \
"punpcklbh %[ftmp7], %[ftmp3], %[ftmp0] \n\t" \
"punpckhbh %[ftmp8], %[ftmp3], %[ftmp0] \n\t" \
"pmullh %[ftmp7], %[ftmp7], %[ftmp1] \n\t" \
"pmullh %[ftmp8], %[ftmp8], %[ftmp2] \n\t" \
"paddh %[ftmp3], %[ftmp7], %[ftmp8] \n\t" \
"punpcklbh %[ftmp7], %[ftmp4], %[ftmp0] \n\t" \
"punpckhbh %[ftmp8], %[ftmp4], %[ftmp0] \n\t" \
"pmullh %[ftmp7], %[ftmp7], %[ftmp1] \n\t" \
"pmullh %[ftmp8], %[ftmp8], %[ftmp2] \n\t" \
"paddh %[ftmp4], %[ftmp7], %[ftmp8] \n\t" \
"punpcklbh %[ftmp7], %[ftmp5], %[ftmp0] \n\t" \
"punpckhbh %[ftmp8], %[ftmp5], %[ftmp0] \n\t" \
"pmullh %[ftmp7], %[ftmp7], %[ftmp1] \n\t" \
"pmullh %[ftmp8], %[ftmp8], %[ftmp2] \n\t" \
"paddh %[ftmp5], %[ftmp7], %[ftmp8] \n\t" \
"punpcklbh %[ftmp7], %[ftmp6], %[ftmp0] \n\t" \
"punpckhbh %[ftmp8], %[ftmp6], %[ftmp0] \n\t" \
"pmullh %[ftmp7], %[ftmp7], %[ftmp1] \n\t" \
"pmullh %[ftmp8], %[ftmp8], %[ftmp2] \n\t" \
"paddh %[ftmp6], %[ftmp7], %[ftmp8] \n\t" \
TRANSPOSE_4H(%[ftmp3], %[ftmp4], %[ftmp5], %[ftmp6], \
%[ftmp7], %[ftmp8], %[ftmp9], %[ftmp10]) \
"paddh %[ftmp3], %[ftmp3], %[ftmp4] \n\t" \
"paddh %[ftmp5], %[ftmp5], %[ftmp6] \n\t" \
"paddh %[ftmp3], %[ftmp3], %[ftmp5] \n\t" \
"paddh %[ftmp3], %[ftmp3], %[offset] \n\t" \
"gsldlc1 %[ftmp4], 0x07(%[src2]) \n\t" \
"gsldrc1 %[ftmp4], 0x00(%[src2]) \n\t" \
"li %[rtmp0], 0x10 \n\t" \
"dmtc1 %[rtmp0], %[ftmp8] \n\t" \
"punpcklhw %[ftmp5], %[ftmp0], %[ftmp3] \n\t" \
"punpckhhw %[ftmp6], %[ftmp0], %[ftmp3] \n\t" \
"punpckhhw %[ftmp3], %[ftmp0], %[ftmp4] \n\t" \
"punpcklhw %[ftmp4], %[ftmp0], %[ftmp4] \n\t" \
"psraw %[ftmp5], %[ftmp5], %[ftmp8] \n\t" \
"psraw %[ftmp6], %[ftmp6], %[ftmp8] \n\t" \
"psraw %[ftmp3], %[ftmp3], %[ftmp8] \n\t" \
"psraw %[ftmp4], %[ftmp4], %[ftmp8] \n\t" \
"paddw %[ftmp5], %[ftmp5], %[ftmp4] \n\t" \
"paddw %[ftmp6], %[ftmp6], %[ftmp3] \n\t" \
"psraw %[ftmp5], %[ftmp5], %[shift] \n\t" \
"psraw %[ftmp6], %[ftmp6], %[shift] \n\t" \
"packsswh %[ftmp5], %[ftmp5], %[ftmp6] \n\t" \
"pcmpgth %[ftmp7], %[ftmp5], %[ftmp0] \n\t" \
"and %[ftmp3], %[ftmp5], %[ftmp7] \n\t" \
"packushb %[ftmp3], %[ftmp3], %[ftmp3] \n\t" \
"gsswlc1 %[ftmp3], 0x03(%[dst]) \n\t" \
"gsswrc1 %[ftmp3], 0x00(%[dst]) \n\t" \
\
"daddi %[x], %[x], -0x01 \n\t" \
PTR_ADDIU "%[src], %[src], 0x04 \n\t" \
PTR_ADDIU "%[dst], %[dst], 0x04 \n\t" \
PTR_ADDIU "%[src2], %[src2], 0x08 \n\t" \
"bnez %[x], 2b \n\t" \
\
"daddi %[y], %[y], -0x01 \n\t" \
PTR_ADDIU "%[src], %[src], " #src_step " \n\t" \
PTR_ADDIU "%[dst], %[dst], " #dst_step " \n\t" \
PTR_ADDIU "%[src2], %[src2], " #src2_step " \n\t" \
PTR_ADDU "%[src], %[src], %[src_stride] \n\t" \
PTR_ADDU "%[dst], %[dst], %[dst_stride] \n\t" \
PTR_ADDIU "%[src2], %[src2], 0x80 \n\t" \
"bnez %[y], 1b \n\t" \
: [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]), \
[ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]), \
[ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]), \
[ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]), \
[ftmp8]"=&f"(ftmp[8]), [ftmp9]"=&f"(ftmp[9]), \
[ftmp10]"=&f"(ftmp[10]), [ftmp11]"=&f"(ftmp[11]), \
[ftmp12]"=&f"(ftmp[12]), [src2]"+&r"(src2), \
[dst]"+&r"(dst), [src]"+&r"(src), [y]"+&r"(y), [x]"=&r"(x), \
[offset]"+&f"(offset), [rtmp0]"=&r"(rtmp[0]) \
: [src_stride]"r"(srcstride), [dst_stride]"r"(dststride), \
[filter]"r"(filter), [shift]"f"(shift) \
: "memory" \
); \
}
PUT_HEVC_QPEL_BI_H(4, 1, -4, -8, -4);
PUT_HEVC_QPEL_BI_H(8, 2, -8, -16, -8);
PUT_HEVC_QPEL_BI_H(12, 3, -12, -24, -12);
PUT_HEVC_QPEL_BI_H(16, 4, -16, -32, -16);
PUT_HEVC_QPEL_BI_H(24, 6, -24, -48, -24);
PUT_HEVC_QPEL_BI_H(32, 8, -32, -64, -32);
PUT_HEVC_QPEL_BI_H(48, 12, -48, -96, -48);
PUT_HEVC_QPEL_BI_H(64, 16, -64, -128, -64);
#define PUT_HEVC_QPEL_BI_HV(w, x_step, src_step, src2_step, dst_step) \
void ff_hevc_put_hevc_qpel_bi_hv##w##_8_mmi(uint8_t *_dst, \
ptrdiff_t _dststride, \

Loading…
Cancel
Save