mirror of https://github.com/FFmpeg/FFmpeg.git
ffmpeg -i ../10_vp9_1080p_30fps_3Mbps.webm -f rawvideo -y /dev/null -an before:170fps after :294fps Reviewed-by: Shiyou Yin <yinshiyou-hf@loongson.cn> Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>pull/382/head
parent
72bcbe216e
commit
2fd914e079
7 changed files with 3379 additions and 0 deletions
@ -0,0 +1,653 @@ |
||||
/*
|
||||
* Copyright (c) 2021 Loongson Technology Corporation Limited |
||||
* Contributed by Hao Chen <chenhao@loongson.cn> |
||||
* |
||||
* This file is part of FFmpeg. |
||||
* |
||||
* FFmpeg is free software; you can redistribute it and/or |
||||
* modify it under the terms of the GNU Lesser General Public |
||||
* License as published by the Free Software Foundation; either |
||||
* version 2.1 of the License, or (at your option) any later version. |
||||
* |
||||
* FFmpeg is distributed in the hope that it will be useful, |
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
||||
* Lesser General Public License for more details. |
||||
* |
||||
* You should have received a copy of the GNU Lesser General Public |
||||
* License along with FFmpeg; if not, write to the Free Software |
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
||||
*/ |
||||
|
||||
#include "libavcodec/vp9dsp.h" |
||||
#include "libavutil/loongarch/loongson_intrinsics.h" |
||||
#include "vp9dsp_loongarch.h" |
||||
|
||||
#define LSX_ST_8(_dst0, _dst1, _dst2, _dst3, _dst4, \ |
||||
_dst5, _dst6, _dst7, _dst, _stride, \
|
||||
_stride2, _stride3, _stride4) \
|
||||
{ \
|
||||
__lsx_vst(_dst0, _dst, 0); \
|
||||
__lsx_vstx(_dst1, _dst, _stride); \
|
||||
__lsx_vstx(_dst2, _dst, _stride2); \
|
||||
__lsx_vstx(_dst3, _dst, _stride3); \
|
||||
_dst += _stride4; \
|
||||
__lsx_vst(_dst4, _dst, 0); \
|
||||
__lsx_vstx(_dst5, _dst, _stride); \
|
||||
__lsx_vstx(_dst6, _dst, _stride2); \
|
||||
__lsx_vstx(_dst7, _dst, _stride3); \
|
||||
} |
||||
|
||||
#define LSX_ST_8X16(_dst0, _dst1, _dst2, _dst3, _dst4, \ |
||||
_dst5, _dst6, _dst7, _dst, _stride) \
|
||||
{ \
|
||||
__lsx_vst(_dst0, _dst, 0); \
|
||||
__lsx_vst(_dst0, _dst, 16); \
|
||||
_dst += _stride; \
|
||||
__lsx_vst(_dst1, _dst, 0); \
|
||||
__lsx_vst(_dst1, _dst, 16); \
|
||||
_dst += _stride; \
|
||||
__lsx_vst(_dst2, _dst, 0); \
|
||||
__lsx_vst(_dst2, _dst, 16); \
|
||||
_dst += _stride; \
|
||||
__lsx_vst(_dst3, _dst, 0); \
|
||||
__lsx_vst(_dst3, _dst, 16); \
|
||||
_dst += _stride; \
|
||||
__lsx_vst(_dst4, _dst, 0); \
|
||||
__lsx_vst(_dst4, _dst, 16); \
|
||||
_dst += _stride; \
|
||||
__lsx_vst(_dst5, _dst, 0); \
|
||||
__lsx_vst(_dst5, _dst, 16); \
|
||||
_dst += _stride; \
|
||||
__lsx_vst(_dst6, _dst, 0); \
|
||||
__lsx_vst(_dst6, _dst, 16); \
|
||||
_dst += _stride; \
|
||||
__lsx_vst(_dst7, _dst, 0); \
|
||||
__lsx_vst(_dst7, _dst, 16); \
|
||||
_dst += _stride; \
|
||||
} |
||||
|
||||
void ff_vert_16x16_lsx(uint8_t *dst, ptrdiff_t dst_stride, const uint8_t *left, |
||||
const uint8_t *src) |
||||
{ |
||||
__m128i src0; |
||||
ptrdiff_t stride2 = dst_stride << 1; |
||||
ptrdiff_t stride3 = stride2 + dst_stride; |
||||
ptrdiff_t stride4 = stride2 << 1; |
||||
src0 = __lsx_vld(src, 0); |
||||
LSX_ST_8(src0, src0, src0, src0, src0, src0, src0, src0, dst, |
||||
dst_stride, stride2, stride3, stride4); |
||||
dst += stride4; |
||||
LSX_ST_8(src0, src0, src0, src0, src0, src0, src0, src0, dst, |
||||
dst_stride, stride2, stride3, stride4); |
||||
} |
||||
|
||||
void ff_vert_32x32_lsx(uint8_t *dst, ptrdiff_t dst_stride, const uint8_t *left, |
||||
const uint8_t *src) |
||||
{ |
||||
uint32_t row; |
||||
__m128i src0, src1; |
||||
|
||||
DUP2_ARG2(__lsx_vld, src, 0, src, 16, src0, src1); |
||||
for (row = 32; row--;) { |
||||
__lsx_vst(src0, dst, 0); |
||||
__lsx_vst(src1, dst, 16); |
||||
dst += dst_stride; |
||||
} |
||||
} |
||||
|
||||
void ff_hor_16x16_lsx(uint8_t *dst, ptrdiff_t dst_stride, const uint8_t *src, |
||||
const uint8_t *top) |
||||
{ |
||||
__m128i src0, src1, src2, src3, src4, src5, src6, src7; |
||||
__m128i src8, src9, src10, src11, src12, src13, src14, src15; |
||||
ptrdiff_t stride2 = dst_stride << 1; |
||||
ptrdiff_t stride3 = stride2 + dst_stride; |
||||
ptrdiff_t stride4 = stride2 << 1; |
||||
|
||||
src15 = __lsx_vldrepl_b(src, 0); |
||||
src14 = __lsx_vldrepl_b(src, 1); |
||||
src13 = __lsx_vldrepl_b(src, 2); |
||||
src12 = __lsx_vldrepl_b(src, 3); |
||||
src11 = __lsx_vldrepl_b(src, 4); |
||||
src10 = __lsx_vldrepl_b(src, 5); |
||||
src9 = __lsx_vldrepl_b(src, 6); |
||||
src8 = __lsx_vldrepl_b(src, 7); |
||||
src7 = __lsx_vldrepl_b(src, 8); |
||||
src6 = __lsx_vldrepl_b(src, 9); |
||||
src5 = __lsx_vldrepl_b(src, 10); |
||||
src4 = __lsx_vldrepl_b(src, 11); |
||||
src3 = __lsx_vldrepl_b(src, 12); |
||||
src2 = __lsx_vldrepl_b(src, 13); |
||||
src1 = __lsx_vldrepl_b(src, 14); |
||||
src0 = __lsx_vldrepl_b(src, 15); |
||||
LSX_ST_8(src0, src1, src2, src3, src4, src5, src6, src7, dst, |
||||
dst_stride, stride2, stride3, stride4); |
||||
dst += stride4; |
||||
LSX_ST_8(src8, src9, src10, src11, src12, src13, src14, src15, dst, |
||||
dst_stride, stride2, stride3, stride4); |
||||
} |
||||
|
||||
void ff_hor_32x32_lsx(uint8_t *dst, ptrdiff_t dst_stride, const uint8_t *src, |
||||
const uint8_t *top) |
||||
{ |
||||
__m128i src0, src1, src2, src3, src4, src5, src6, src7; |
||||
__m128i src8, src9, src10, src11, src12, src13, src14, src15; |
||||
__m128i src16, src17, src18, src19, src20, src21, src22, src23; |
||||
__m128i src24, src25, src26, src27, src28, src29, src30, src31; |
||||
|
||||
src31 = __lsx_vldrepl_b(src, 0); |
||||
src30 = __lsx_vldrepl_b(src, 1); |
||||
src29 = __lsx_vldrepl_b(src, 2); |
||||
src28 = __lsx_vldrepl_b(src, 3); |
||||
src27 = __lsx_vldrepl_b(src, 4); |
||||
src26 = __lsx_vldrepl_b(src, 5); |
||||
src25 = __lsx_vldrepl_b(src, 6); |
||||
src24 = __lsx_vldrepl_b(src, 7); |
||||
src23 = __lsx_vldrepl_b(src, 8); |
||||
src22 = __lsx_vldrepl_b(src, 9); |
||||
src21 = __lsx_vldrepl_b(src, 10); |
||||
src20 = __lsx_vldrepl_b(src, 11); |
||||
src19 = __lsx_vldrepl_b(src, 12); |
||||
src18 = __lsx_vldrepl_b(src, 13); |
||||
src17 = __lsx_vldrepl_b(src, 14); |
||||
src16 = __lsx_vldrepl_b(src, 15); |
||||
src15 = __lsx_vldrepl_b(src, 16); |
||||
src14 = __lsx_vldrepl_b(src, 17); |
||||
src13 = __lsx_vldrepl_b(src, 18); |
||||
src12 = __lsx_vldrepl_b(src, 19); |
||||
src11 = __lsx_vldrepl_b(src, 20); |
||||
src10 = __lsx_vldrepl_b(src, 21); |
||||
src9 = __lsx_vldrepl_b(src, 22); |
||||
src8 = __lsx_vldrepl_b(src, 23); |
||||
src7 = __lsx_vldrepl_b(src, 24); |
||||
src6 = __lsx_vldrepl_b(src, 25); |
||||
src5 = __lsx_vldrepl_b(src, 26); |
||||
src4 = __lsx_vldrepl_b(src, 27); |
||||
src3 = __lsx_vldrepl_b(src, 28); |
||||
src2 = __lsx_vldrepl_b(src, 29); |
||||
src1 = __lsx_vldrepl_b(src, 30); |
||||
src0 = __lsx_vldrepl_b(src, 31); |
||||
LSX_ST_8X16(src0, src1, src2, src3, src4, src5, src6, src7, |
||||
dst, dst_stride); |
||||
LSX_ST_8X16(src8, src9, src10, src11, src12, src13, src14, src15, |
||||
dst, dst_stride); |
||||
LSX_ST_8X16(src16, src17, src18, src19, src20, src21, src22, src23, |
||||
dst, dst_stride); |
||||
LSX_ST_8X16(src24, src25, src26, src27, src28, src29, src30, src31, |
||||
dst, dst_stride); |
||||
} |
||||
|
||||
void ff_dc_4x4_lsx(uint8_t *dst, ptrdiff_t dst_stride, const uint8_t *src_left, |
||||
const uint8_t *src_top) |
||||
{ |
||||
__m128i tmp0, tmp1, dst0; |
||||
|
||||
tmp0 = __lsx_vldrepl_w(src_top, 0); |
||||
tmp1 = __lsx_vldrepl_w(src_left, 0); |
||||
dst0 = __lsx_vilvl_w(tmp1, tmp0); |
||||
dst0 = __lsx_vhaddw_hu_bu(dst0, dst0); |
||||
dst0 = __lsx_vhaddw_wu_hu(dst0, dst0); |
||||
dst0 = __lsx_vhaddw_du_wu(dst0, dst0); |
||||
dst0 = __lsx_vsrari_w(dst0, 3); |
||||
dst0 = __lsx_vshuf4i_b(dst0, 0); |
||||
__lsx_vstelm_w(dst0, dst, 0, 0); |
||||
dst += dst_stride; |
||||
__lsx_vstelm_w(dst0, dst, 0, 0); |
||||
dst += dst_stride; |
||||
__lsx_vstelm_w(dst0, dst, 0, 0); |
||||
dst += dst_stride; |
||||
__lsx_vstelm_w(dst0, dst, 0, 0); |
||||
} |
||||
|
||||
#define INTRA_DC_TL_4X4(dir) \ |
||||
void ff_dc_##dir##_4x4_lsx(uint8_t *dst, ptrdiff_t dst_stride, \
|
||||
const uint8_t *left, \
|
||||
const uint8_t *top) \
|
||||
{ \
|
||||
__m128i tmp0, dst0; \
|
||||
\
|
||||
tmp0 = __lsx_vldrepl_w(dir, 0); \
|
||||
dst0 = __lsx_vhaddw_hu_bu(tmp0, tmp0); \
|
||||
dst0 = __lsx_vhaddw_wu_hu(dst0, dst0); \
|
||||
dst0 = __lsx_vsrari_w(dst0, 2); \
|
||||
dst0 = __lsx_vshuf4i_b(dst0, 0); \
|
||||
__lsx_vstelm_w(dst0, dst, 0, 0); \
|
||||
dst += dst_stride; \
|
||||
__lsx_vstelm_w(dst0, dst, 0, 0); \
|
||||
dst += dst_stride; \
|
||||
__lsx_vstelm_w(dst0, dst, 0, 0); \
|
||||
dst += dst_stride; \
|
||||
__lsx_vstelm_w(dst0, dst, 0, 0); \
|
||||
} |
||||
INTRA_DC_TL_4X4(top); |
||||
INTRA_DC_TL_4X4(left); |
||||
|
||||
void ff_dc_8x8_lsx(uint8_t *dst, ptrdiff_t dst_stride, const uint8_t *src_left, |
||||
const uint8_t *src_top) |
||||
{ |
||||
__m128i tmp0, tmp1, dst0; |
||||
|
||||
tmp0 = __lsx_vldrepl_d(src_top, 0); |
||||
tmp1 = __lsx_vldrepl_d(src_left, 0); |
||||
dst0 = __lsx_vilvl_d(tmp1, tmp0); |
||||
dst0 = __lsx_vhaddw_hu_bu(dst0, dst0); |
||||
dst0 = __lsx_vhaddw_wu_hu(dst0, dst0); |
||||
dst0 = __lsx_vhaddw_du_wu(dst0, dst0); |
||||
dst0 = __lsx_vhaddw_qu_du(dst0, dst0); |
||||
dst0 = __lsx_vsrari_w(dst0, 4); |
||||
dst0 = __lsx_vreplvei_b(dst0, 0); |
||||
__lsx_vstelm_d(dst0, dst, 0, 0); |
||||
dst += dst_stride; |
||||
__lsx_vstelm_d(dst0, dst, 0, 0); |
||||
dst += dst_stride; |
||||
__lsx_vstelm_d(dst0, dst, 0, 0); |
||||
dst += dst_stride; |
||||
__lsx_vstelm_d(dst0, dst, 0, 0); |
||||
dst += dst_stride; |
||||
__lsx_vstelm_d(dst0, dst, 0, 0); |
||||
dst += dst_stride; |
||||
__lsx_vstelm_d(dst0, dst, 0, 0); |
||||
dst += dst_stride; |
||||
__lsx_vstelm_d(dst0, dst, 0, 0); |
||||
dst += dst_stride; |
||||
__lsx_vstelm_d(dst0, dst, 0, 0); |
||||
} |
||||
|
||||
#define INTRA_DC_TL_8X8(dir) \ |
||||
void ff_dc_##dir##_8x8_lsx(uint8_t *dst, ptrdiff_t dst_stride, \
|
||||
const uint8_t *left, \
|
||||
const uint8_t *top) \
|
||||
{ \
|
||||
__m128i tmp0, dst0; \
|
||||
\
|
||||
tmp0 = __lsx_vldrepl_d(dir, 0); \
|
||||
dst0 = __lsx_vhaddw_hu_bu(tmp0, tmp0); \
|
||||
dst0 = __lsx_vhaddw_wu_hu(dst0, dst0); \
|
||||
dst0 = __lsx_vhaddw_du_wu(dst0, dst0); \
|
||||
dst0 = __lsx_vsrari_w(dst0, 3); \
|
||||
dst0 = __lsx_vreplvei_b(dst0, 0); \
|
||||
__lsx_vstelm_d(dst0, dst, 0, 0); \
|
||||
dst += dst_stride; \
|
||||
__lsx_vstelm_d(dst0, dst, 0, 0); \
|
||||
dst += dst_stride; \
|
||||
__lsx_vstelm_d(dst0, dst, 0, 0); \
|
||||
dst += dst_stride; \
|
||||
__lsx_vstelm_d(dst0, dst, 0, 0); \
|
||||
dst += dst_stride; \
|
||||
__lsx_vstelm_d(dst0, dst, 0, 0); \
|
||||
dst += dst_stride; \
|
||||
__lsx_vstelm_d(dst0, dst, 0, 0); \
|
||||
dst += dst_stride; \
|
||||
__lsx_vstelm_d(dst0, dst, 0, 0); \
|
||||
dst += dst_stride; \
|
||||
__lsx_vstelm_d(dst0, dst, 0, 0); \
|
||||
} |
||||
|
||||
INTRA_DC_TL_8X8(top); |
||||
INTRA_DC_TL_8X8(left); |
||||
|
||||
void ff_dc_16x16_lsx(uint8_t *dst, ptrdiff_t dst_stride, |
||||
const uint8_t *src_left, const uint8_t *src_top) |
||||
{ |
||||
__m128i tmp0, tmp1, dst0; |
||||
ptrdiff_t stride2 = dst_stride << 1; |
||||
ptrdiff_t stride3 = stride2 + dst_stride; |
||||
ptrdiff_t stride4 = stride2 << 1; |
||||
|
||||
tmp0 = __lsx_vld(src_top, 0); |
||||
tmp1 = __lsx_vld(src_left, 0); |
||||
DUP2_ARG2(__lsx_vhaddw_hu_bu, tmp0, tmp0, tmp1, tmp1, tmp0, tmp1); |
||||
dst0 = __lsx_vadd_h(tmp0, tmp1); |
||||
dst0 = __lsx_vhaddw_wu_hu(dst0, dst0); |
||||
dst0 = __lsx_vhaddw_du_wu(dst0, dst0); |
||||
dst0 = __lsx_vhaddw_qu_du(dst0, dst0); |
||||
dst0 = __lsx_vsrari_w(dst0, 5); |
||||
dst0 = __lsx_vreplvei_b(dst0, 0); |
||||
LSX_ST_8(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst, |
||||
dst_stride, stride2, stride3, stride4); |
||||
dst += stride4; |
||||
LSX_ST_8(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst, |
||||
dst_stride, stride2, stride3, stride4); |
||||
} |
||||
|
||||
#define INTRA_DC_TL_16X16(dir) \ |
||||
void ff_dc_##dir##_16x16_lsx(uint8_t *dst, ptrdiff_t dst_stride, \
|
||||
const uint8_t *left, \
|
||||
const uint8_t *top) \
|
||||
{ \
|
||||
__m128i tmp0, dst0; \
|
||||
ptrdiff_t stride2 = dst_stride << 1; \
|
||||
ptrdiff_t stride3 = stride2 + dst_stride; \
|
||||
ptrdiff_t stride4 = stride2 << 1; \
|
||||
\
|
||||
tmp0 = __lsx_vld(dir, 0); \
|
||||
dst0 = __lsx_vhaddw_hu_bu(tmp0, tmp0); \
|
||||
dst0 = __lsx_vhaddw_wu_hu(dst0, dst0); \
|
||||
dst0 = __lsx_vhaddw_du_wu(dst0, dst0); \
|
||||
dst0 = __lsx_vhaddw_qu_du(dst0, dst0); \
|
||||
dst0 = __lsx_vsrari_w(dst0, 4); \
|
||||
dst0 = __lsx_vreplvei_b(dst0, 0); \
|
||||
LSX_ST_8(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst, \
|
||||
dst_stride, stride2, stride3, stride4); \
|
||||
dst += stride4; \
|
||||
LSX_ST_8(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst, \
|
||||
dst_stride, stride2, stride3, stride4); \
|
||||
} |
||||
|
||||
INTRA_DC_TL_16X16(top); |
||||
INTRA_DC_TL_16X16(left); |
||||
|
||||
void ff_dc_32x32_lsx(uint8_t *dst, ptrdiff_t dst_stride, |
||||
const uint8_t *src_left, const uint8_t *src_top) |
||||
{ |
||||
__m128i tmp0, tmp1, tmp2, tmp3, dst0; |
||||
|
||||
DUP2_ARG2(__lsx_vld, src_top, 0, src_top, 16, tmp0, tmp1); |
||||
DUP2_ARG2(__lsx_vld, src_left, 0, src_left, 16, tmp2, tmp3); |
||||
DUP4_ARG2(__lsx_vhaddw_hu_bu, tmp0, tmp0, tmp1, tmp1, tmp2, tmp2, |
||||
tmp3, tmp3, tmp0, tmp1, tmp2, tmp3); |
||||
DUP2_ARG2(__lsx_vadd_h, tmp0, tmp1, tmp2, tmp3, tmp0, tmp1); |
||||
dst0 = __lsx_vadd_h(tmp0, tmp1); |
||||
dst0 = __lsx_vhaddw_wu_hu(dst0, dst0); |
||||
dst0 = __lsx_vhaddw_du_wu(dst0, dst0); |
||||
dst0 = __lsx_vhaddw_qu_du(dst0, dst0); |
||||
dst0 = __lsx_vsrari_w(dst0, 6); |
||||
dst0 = __lsx_vreplvei_b(dst0, 0); |
||||
LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0, |
||||
dst, dst_stride); |
||||
LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0, |
||||
dst, dst_stride); |
||||
LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0, |
||||
dst, dst_stride); |
||||
LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0, |
||||
dst, dst_stride); |
||||
} |
||||
|
||||
#define INTRA_DC_TL_32X32(dir) \ |
||||
void ff_dc_##dir##_32x32_lsx(uint8_t *dst, ptrdiff_t dst_stride, \
|
||||
const uint8_t *left, \
|
||||
const uint8_t *top) \
|
||||
{ \
|
||||
__m128i tmp0, tmp1, dst0; \
|
||||
\
|
||||
DUP2_ARG2(__lsx_vld, dir, 0, dir, 16, tmp0, tmp1); \
|
||||
DUP2_ARG2(__lsx_vhaddw_hu_bu, tmp0, tmp0, tmp1, tmp1, tmp0, tmp1); \
|
||||
dst0 = __lsx_vadd_h(tmp0, tmp1); \
|
||||
dst0 = __lsx_vhaddw_wu_hu(dst0, dst0); \
|
||||
dst0 = __lsx_vhaddw_du_wu(dst0, dst0); \
|
||||
dst0 = __lsx_vhaddw_qu_du(dst0, dst0); \
|
||||
dst0 = __lsx_vsrari_w(dst0, 5); \
|
||||
dst0 = __lsx_vreplvei_b(dst0, 0); \
|
||||
LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0, \
|
||||
dst, dst_stride); \
|
||||
LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0, \
|
||||
dst, dst_stride); \
|
||||
LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0, \
|
||||
dst, dst_stride); \
|
||||
LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0, \
|
||||
dst, dst_stride); \
|
||||
} |
||||
|
||||
INTRA_DC_TL_32X32(top); |
||||
INTRA_DC_TL_32X32(left); |
||||
|
||||
#define INTRA_PREDICT_VALDC_16X16_LSX(val) \ |
||||
void ff_dc_##val##_16x16_lsx(uint8_t *dst, ptrdiff_t dst_stride, \
|
||||
const uint8_t *left, const uint8_t *top) \
|
||||
{ \
|
||||
__m128i out = __lsx_vldi(val); \
|
||||
ptrdiff_t stride2 = dst_stride << 1; \
|
||||
ptrdiff_t stride3 = stride2 + dst_stride; \
|
||||
ptrdiff_t stride4 = stride2 << 1; \
|
||||
\
|
||||
LSX_ST_8(out, out, out, out, out, out, out, out, dst, \
|
||||
dst_stride, stride2, stride3, stride4); \
|
||||
dst += stride4; \
|
||||
LSX_ST_8(out, out, out, out, out, out, out, out, dst, \
|
||||
dst_stride, stride2, stride3, stride4); \
|
||||
} |
||||
|
||||
INTRA_PREDICT_VALDC_16X16_LSX(127); |
||||
INTRA_PREDICT_VALDC_16X16_LSX(128); |
||||
INTRA_PREDICT_VALDC_16X16_LSX(129); |
||||
|
||||
#define INTRA_PREDICT_VALDC_32X32_LSX(val) \ |
||||
void ff_dc_##val##_32x32_lsx(uint8_t *dst, ptrdiff_t dst_stride, \
|
||||
const uint8_t *left, const uint8_t *top) \
|
||||
{ \
|
||||
__m128i out = __lsx_vldi(val); \
|
||||
\
|
||||
LSX_ST_8X16(out, out, out, out, out, out, out, out, dst, dst_stride);\
|
||||
LSX_ST_8X16(out, out, out, out, out, out, out, out, dst, dst_stride);\
|
||||
LSX_ST_8X16(out, out, out, out, out, out, out, out, dst, dst_stride);\
|
||||
LSX_ST_8X16(out, out, out, out, out, out, out, out, dst, dst_stride);\
|
||||
} |
||||
|
||||
INTRA_PREDICT_VALDC_32X32_LSX(127); |
||||
INTRA_PREDICT_VALDC_32X32_LSX(128); |
||||
INTRA_PREDICT_VALDC_32X32_LSX(129); |
||||
|
||||
void ff_tm_4x4_lsx(uint8_t *dst, ptrdiff_t dst_stride, |
||||
const uint8_t *src_left, const uint8_t *src_top_ptr) |
||||
{ |
||||
uint8_t top_left = src_top_ptr[-1]; |
||||
__m128i tmp0, tmp1, tmp2, tmp3, reg0, reg1; |
||||
__m128i src0, src1, src2, src3; |
||||
__m128i dst0, dst1, dst2, dst3; |
||||
|
||||
reg0 = __lsx_vreplgr2vr_h(top_left); |
||||
reg1 = __lsx_vld(src_top_ptr, 0); |
||||
DUP4_ARG2(__lsx_vldrepl_b, src_left, 0, src_left, 1, src_left, 2, src_left, |
||||
3, tmp3, tmp2, tmp1, tmp0); |
||||
DUP4_ARG2(__lsx_vilvl_b, tmp0, reg1, tmp1, reg1, tmp2, reg1, tmp3, reg1, |
||||
src0, src1, src2, src3); |
||||
DUP4_ARG2(__lsx_vhaddw_hu_bu, src0, src0, src1, src1, src2, src2, src3, |
||||
src3, dst0, dst1, dst2, dst3); |
||||
DUP4_ARG2(__lsx_vssub_hu, dst0, reg0, dst1, reg0, dst2, reg0, dst3, reg0, |
||||
dst0, dst1, dst2, dst3); |
||||
DUP4_ARG2(__lsx_vsat_hu, dst0, 7, dst1, 7, dst2, 7, dst3, 7, |
||||
dst0, dst1, dst2, dst3); |
||||
DUP2_ARG2(__lsx_vpickev_b, dst1, dst0, dst3, dst2, dst0, dst1); |
||||
__lsx_vstelm_w(dst0, dst, 0, 0); |
||||
dst += dst_stride; |
||||
__lsx_vstelm_w(dst0, dst, 0, 2); |
||||
dst += dst_stride; |
||||
__lsx_vstelm_w(dst1, dst, 0, 0); |
||||
dst += dst_stride; |
||||
__lsx_vstelm_w(dst1, dst, 0, 2); |
||||
} |
||||
|
||||
void ff_tm_8x8_lsx(uint8_t *dst, ptrdiff_t dst_stride, |
||||
const uint8_t *src_left, const uint8_t *src_top_ptr) |
||||
{ |
||||
uint8_t top_left = src_top_ptr[-1]; |
||||
__m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; |
||||
__m128i src0, src1, src2, src3, src4, src5, src6, src7; |
||||
__m128i reg0, reg1; |
||||
|
||||
reg0 = __lsx_vreplgr2vr_h(top_left); |
||||
reg1 = __lsx_vld(src_top_ptr, 0); |
||||
DUP4_ARG2(__lsx_vldrepl_b, src_left, 0, src_left, 1, src_left, 2, src_left, |
||||
3, tmp7, tmp6, tmp5, tmp4); |
||||
DUP4_ARG2(__lsx_vldrepl_b, src_left, 4, src_left, 5, src_left, 6, src_left, |
||||
7, tmp3, tmp2, tmp1, tmp0); |
||||
DUP4_ARG2(__lsx_vilvl_b, tmp0, reg1, tmp1, reg1, tmp2, reg1, tmp3, reg1, |
||||
src0, src1, src2, src3); |
||||
DUP4_ARG2(__lsx_vilvl_b, tmp4, reg1, tmp5, reg1, tmp6, reg1, tmp7, reg1, |
||||
src4, src5, src6, src7); |
||||
DUP4_ARG2(__lsx_vhaddw_hu_bu, src0, src0, src1, src1, src2, src2, src3, |
||||
src3, src0, src1, src2, src3); |
||||
DUP4_ARG2(__lsx_vhaddw_hu_bu, src4, src4, src5, src5, src6, src6, src7, |
||||
src7, src4, src5, src6, src7); |
||||
DUP4_ARG2(__lsx_vssub_hu, src0, reg0, src1, reg0, src2, reg0, src3, reg0, |
||||
src0, src1, src2, src3); |
||||
DUP4_ARG2(__lsx_vssub_hu, src4, reg0, src5, reg0, src6, reg0, src7, reg0, |
||||
src4, src5, src6, src7); |
||||
DUP4_ARG2(__lsx_vsat_hu, src0, 7, src1, 7, src2, 7, src3, 7, |
||||
src0, src1, src2, src3); |
||||
DUP4_ARG2(__lsx_vsat_hu, src4, 7, src5, 7, src6, 7, src7, 7, |
||||
src4, src5, src6, src7); |
||||
DUP4_ARG2(__lsx_vpickev_b, src1, src0, src3, src2, src5, src4, src7, src6, |
||||
src0, src1, src2, src3); |
||||
__lsx_vstelm_d(src0, dst, 0, 0); |
||||
dst += dst_stride; |
||||
__lsx_vstelm_d(src0, dst, 0, 1); |
||||
dst += dst_stride; |
||||
__lsx_vstelm_d(src1, dst, 0, 0); |
||||
dst += dst_stride; |
||||
__lsx_vstelm_d(src1, dst, 0, 1); |
||||
dst += dst_stride; |
||||
__lsx_vstelm_d(src2, dst, 0, 0); |
||||
dst += dst_stride; |
||||
__lsx_vstelm_d(src2, dst, 0, 1); |
||||
dst += dst_stride; |
||||
__lsx_vstelm_d(src3, dst, 0, 0); |
||||
dst += dst_stride; |
||||
__lsx_vstelm_d(src3, dst, 0, 1); |
||||
} |
||||
|
||||
void ff_tm_16x16_lsx(uint8_t *dst, ptrdiff_t dst_stride, |
||||
const uint8_t *src_left, const uint8_t *src_top_ptr) |
||||
{ |
||||
uint8_t top_left = src_top_ptr[-1]; |
||||
__m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; |
||||
__m128i tmp8, tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15; |
||||
__m128i src0, src1, src2, src3, src4, src5, src6, src7; |
||||
__m128i reg0, reg1; |
||||
ptrdiff_t stride2 = dst_stride << 1; |
||||
ptrdiff_t stride3 = stride2 + dst_stride; |
||||
ptrdiff_t stride4 = stride2 << 1; |
||||
|
||||
reg0 = __lsx_vreplgr2vr_h(top_left); |
||||
reg1 = __lsx_vld(src_top_ptr, 0); |
||||
DUP4_ARG2(__lsx_vldrepl_b, src_left, 0, src_left, 1, src_left, 2, src_left, |
||||
3, tmp15, tmp14, tmp13, tmp12); |
||||
DUP4_ARG2(__lsx_vldrepl_b, src_left, 4, src_left, 5, src_left, 6, src_left, |
||||
7, tmp11, tmp10, tmp9, tmp8); |
||||
DUP4_ARG2(__lsx_vldrepl_b, src_left, 8, src_left, 9, src_left, 10, |
||||
src_left, 11, tmp7, tmp6, tmp5, tmp4); |
||||
DUP4_ARG2(__lsx_vldrepl_b, src_left, 12, src_left, 13, src_left, 14, |
||||
src_left, 15, tmp3, tmp2, tmp1, tmp0); |
||||
DUP4_ARG2(__lsx_vaddwev_h_bu, tmp0, reg1, tmp1, reg1, tmp2, reg1, tmp3, |
||||
reg1, src0, src1, src2, src3); |
||||
DUP4_ARG2(__lsx_vaddwod_h_bu, tmp0, reg1, tmp1, reg1, tmp2, reg1, tmp3, |
||||
reg1, src4, src5, src6, src7); |
||||
DUP4_ARG2(__lsx_vssub_hu, src0, reg0, src1, reg0, src2, reg0, src3, reg0, |
||||
src0, src1, src2, src3); |
||||
DUP4_ARG2(__lsx_vssub_hu, src4, reg0, src5, reg0, src6, reg0, src7, reg0, |
||||
src4, src5, src6, src7); |
||||
DUP4_ARG2(__lsx_vsat_hu, src0, 7, src1, 7, src2, 7, src3, 7, |
||||
src0, src1, src2, src3); |
||||
DUP4_ARG2(__lsx_vsat_hu, src4, 7, src5, 7, src6, 7, src7, 7, |
||||
src4, src5, src6, src7); |
||||
DUP4_ARG2(__lsx_vpackev_b, src4, src0, src5, src1, src6, src2, src7, src3, |
||||
tmp0, tmp1, tmp2, tmp3); |
||||
DUP4_ARG2(__lsx_vaddwev_h_bu, tmp4, reg1, tmp5, reg1, tmp6, reg1, tmp7, |
||||
reg1, src0, src1, src2, src3); |
||||
DUP4_ARG2(__lsx_vaddwod_h_bu, tmp4, reg1, tmp5, reg1, tmp6, reg1, tmp7, |
||||
reg1, src4, src5, src6, src7); |
||||
DUP4_ARG2(__lsx_vssub_hu, src0, reg0, src1, reg0, src2, reg0, src3, reg0, |
||||
src0, src1, src2, src3); |
||||
DUP4_ARG2(__lsx_vssub_hu, src4, reg0, src5, reg0, src6, reg0, src7, reg0, |
||||
src4, src5, src6, src7); |
||||
DUP4_ARG2(__lsx_vsat_hu, src0, 7, src1, 7, src2, 7, src3, 7, |
||||
src0, src1, src2, src3); |
||||
DUP4_ARG2(__lsx_vsat_hu, src4, 7, src5, 7, src6, 7, src7, 7, |
||||
src4, src5, src6, src7); |
||||
DUP4_ARG2(__lsx_vpackev_b, src4, src0, src5, src1, src6, src2, src7, src3, |
||||
tmp4, tmp5, tmp6, tmp7); |
||||
DUP4_ARG2(__lsx_vaddwev_h_bu, tmp8, reg1, tmp9, reg1, tmp10, reg1, tmp11, |
||||
reg1, src0, src1, src2, src3); |
||||
DUP4_ARG2(__lsx_vaddwod_h_bu, tmp8, reg1, tmp9, reg1, tmp10, reg1, tmp11, |
||||
reg1, src4, src5, src6, src7); |
||||
DUP4_ARG2(__lsx_vssub_hu, src0, reg0, src1, reg0, src2, reg0, src3, reg0, |
||||
src0, src1, src2, src3); |
||||
DUP4_ARG2(__lsx_vssub_hu, src4, reg0, src5, reg0, src6, reg0, src7, reg0, |
||||
src4, src5, src6, src7); |
||||
DUP4_ARG2(__lsx_vsat_hu, src0, 7, src1, 7, src2, 7, src3, 7, |
||||
src0, src1, src2, src3); |
||||
DUP4_ARG2(__lsx_vsat_hu, src4, 7, src5, 7, src6, 7, src7, 7, |
||||
src4, src5, src6, src7); |
||||
DUP4_ARG2(__lsx_vpackev_b, src4, src0, src5, src1, src6, src2, src7, src3, |
||||
tmp8, tmp9, tmp10, tmp11); |
||||
DUP4_ARG2(__lsx_vaddwev_h_bu, tmp12, reg1, tmp13, reg1, tmp14, reg1, |
||||
tmp15, reg1, src0, src1, src2, src3); |
||||
DUP4_ARG2(__lsx_vaddwod_h_bu, tmp12, reg1, tmp13, reg1, tmp14, reg1, |
||||
tmp15, reg1, src4, src5, src6, src7); |
||||
DUP4_ARG2(__lsx_vssub_hu, src0, reg0, src1, reg0, src2, reg0, src3, reg0, |
||||
src0, src1, src2, src3); |
||||
DUP4_ARG2(__lsx_vssub_hu, src4, reg0, src5, reg0, src6, reg0, src7, reg0, |
||||
src4, src5, src6, src7); |
||||
DUP4_ARG2(__lsx_vsat_hu, src0, 7, src1, 7, src2, 7, src3, 7, |
||||
src0, src1, src2, src3); |
||||
DUP4_ARG2(__lsx_vsat_hu, src4, 7, src5, 7, src6, 7, src7, 7, |
||||
src4, src5, src6, src7); |
||||
DUP4_ARG2(__lsx_vpackev_b, src4, src0, src5, src1, src6, src2, src7, src3, |
||||
tmp12, tmp13, tmp14, tmp15); |
||||
LSX_ST_8(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, dst, |
||||
dst_stride, stride2, stride3, stride4); |
||||
dst += stride4; |
||||
LSX_ST_8(tmp8, tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, dst, |
||||
dst_stride, stride2, stride3, stride4); |
||||
} |
||||
|
||||
void ff_tm_32x32_lsx(uint8_t *dst, ptrdiff_t dst_stride, |
||||
const uint8_t *src_left, const uint8_t *src_top_ptr) |
||||
{ |
||||
uint8_t top_left = src_top_ptr[-1]; |
||||
uint32_t loop_cnt; |
||||
__m128i tmp0, tmp1, tmp2, tmp3, reg0, reg1, reg2; |
||||
__m128i src0, src1, src2, src3, src4, src5, src6, src7; |
||||
__m128i dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7; |
||||
|
||||
reg0 = __lsx_vreplgr2vr_h(top_left); |
||||
DUP2_ARG2(__lsx_vld, src_top_ptr, 0, src_top_ptr, 16, reg1, reg2); |
||||
|
||||
src_left += 28; |
||||
for (loop_cnt = 8; loop_cnt--;) { |
||||
DUP4_ARG2(__lsx_vldrepl_b, src_left, 0, src_left, 1, src_left, 2, |
||||
src_left, 3, tmp3, tmp2, tmp1, tmp0); |
||||
src_left -= 4; |
||||
DUP4_ARG2(__lsx_vaddwev_h_bu, tmp0, reg1, tmp1, reg1, tmp2, reg1, |
||||
tmp3, reg1, src0, src1, src2, src3); |
||||
DUP4_ARG2(__lsx_vaddwod_h_bu, tmp0, reg1, tmp1, reg1, tmp2, reg1, |
||||
tmp3, reg1, src4, src5, src6, src7); |
||||
DUP4_ARG2(__lsx_vssub_hu, src0, reg0, src1, reg0, src2, reg0, src3, |
||||
reg0, src0, src1, src2, src3); |
||||
DUP4_ARG2(__lsx_vssub_hu, src4, reg0, src5, reg0, src6, reg0, src7, |
||||
reg0, src4, src5, src6, src7); |
||||
DUP4_ARG2(__lsx_vaddwev_h_bu, tmp0, reg2, tmp1, reg2, tmp2, reg2, |
||||
tmp3, reg2, dst0, dst1, dst2, dst3); |
||||
DUP4_ARG2(__lsx_vaddwod_h_bu, tmp0, reg2, tmp1, reg2, tmp2, reg2, |
||||
tmp3, reg2, dst4, dst5, dst6, dst7); |
||||
DUP4_ARG2(__lsx_vssub_hu, dst0, reg0, dst1, reg0, dst2, reg0, dst3, |
||||
reg0, dst0, dst1, dst2, dst3); |
||||
DUP4_ARG2(__lsx_vssub_hu, dst4, reg0, dst5, reg0, dst6, reg0, dst7, |
||||
reg0, dst4, dst5, dst6, dst7); |
||||
DUP4_ARG2(__lsx_vsat_hu, src0, 7, src1, 7, src2, 7, src3, 7, |
||||
src0, src1, src2, src3); |
||||
DUP4_ARG2(__lsx_vsat_hu, src4, 7, src5, 7, src6, 7, src7, 7, |
||||
src4, src5, src6, src7); |
||||
DUP4_ARG2(__lsx_vsat_hu, dst0, 7, dst1, 7, dst2, 7, dst3, 7, |
||||
dst0, dst1, dst2, dst3); |
||||
DUP4_ARG2(__lsx_vsat_hu, dst4, 7, dst5, 7, dst6, 7, dst7, 7, |
||||
dst4, dst5, dst6, dst7); |
||||
DUP4_ARG2(__lsx_vpackev_b, src4, src0, src5, src1, src6, src2, src7, |
||||
src3, src0, src1, src2, src3); |
||||
DUP4_ARG2(__lsx_vpackev_b, dst4, dst0, dst5, dst1, dst6, dst2, dst7, |
||||
dst3, dst0, dst1, dst2, dst3); |
||||
__lsx_vst(src0, dst, 0); |
||||
__lsx_vst(dst0, dst, 16); |
||||
dst += dst_stride; |
||||
__lsx_vst(src1, dst, 0); |
||||
__lsx_vst(dst1, dst, 16); |
||||
dst += dst_stride; |
||||
__lsx_vst(src2, dst, 0); |
||||
__lsx_vst(dst2, dst, 16); |
||||
dst += dst_stride; |
||||
__lsx_vst(src3, dst, 0); |
||||
__lsx_vst(dst3, dst, 16); |
||||
dst += dst_stride; |
||||
} |
||||
} |
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,97 @@ |
||||
/*
|
||||
* Copyright (c) 2021 Loongson Technology Corporation Limited |
||||
* Contributed by Hao Chen <chenhao@loongson.cn> |
||||
* |
||||
* This file is part of FFmpeg. |
||||
* |
||||
* FFmpeg is free software; you can redistribute it and/or |
||||
* modify it under the terms of the GNU Lesser General Public |
||||
* License as published by the Free Software Foundation; either |
||||
* version 2.1 of the License, or (at your option) any later version. |
||||
* |
||||
* FFmpeg is distributed in the hope that it will be useful, |
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
||||
* Lesser General Public License for more details. |
||||
* |
||||
* You should have received a copy of the GNU Lesser General Public |
||||
* License along with FFmpeg; if not, write to the Free Software |
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
||||
*/ |
||||
|
||||
#include "libavutil/loongarch/cpu.h" |
||||
#include "libavutil/attributes.h" |
||||
#include "libavcodec/vp9dsp.h" |
||||
#include "vp9dsp_loongarch.h" |
||||
|
||||
#define init_subpel1(idx1, idx2, idxh, idxv, sz, dir, type) \ |
||||
dsp->mc[idx1][FILTER_8TAP_SMOOTH ][idx2][idxh][idxv] = \
|
||||
ff_##type##_8tap_smooth_##sz##dir##_lsx; \
|
||||
dsp->mc[idx1][FILTER_8TAP_REGULAR][idx2][idxh][idxv] = \
|
||||
ff_##type##_8tap_regular_##sz##dir##_lsx; \
|
||||
dsp->mc[idx1][FILTER_8TAP_SHARP ][idx2][idxh][idxv] = \
|
||||
ff_##type##_8tap_sharp_##sz##dir##_lsx; |
||||
|
||||
#define init_subpel2(idx, idxh, idxv, dir, type) \ |
||||
init_subpel1(0, idx, idxh, idxv, 64, dir, type); \
|
||||
init_subpel1(1, idx, idxh, idxv, 32, dir, type); \
|
||||
init_subpel1(2, idx, idxh, idxv, 16, dir, type); \
|
||||
init_subpel1(3, idx, idxh, idxv, 8, dir, type); \
|
||||
init_subpel1(4, idx, idxh, idxv, 4, dir, type); |
||||
|
||||
#define init_subpel3(idx, type) \ |
||||
init_subpel2(idx, 1, 0, h, type); \
|
||||
init_subpel2(idx, 0, 1, v, type); \
|
||||
init_subpel2(idx, 1, 1, hv, type); |
||||
|
||||
#define init_fpel(idx1, idx2, sz, type) \ |
||||
dsp->mc[idx1][FILTER_8TAP_SMOOTH ][idx2][0][0] = ff_##type##sz##_lsx; \
|
||||
dsp->mc[idx1][FILTER_8TAP_REGULAR][idx2][0][0] = ff_##type##sz##_lsx; \
|
||||
dsp->mc[idx1][FILTER_8TAP_SHARP ][idx2][0][0] = ff_##type##sz##_lsx; \
|
||||
dsp->mc[idx1][FILTER_BILINEAR ][idx2][0][0] = ff_##type##sz##_lsx; |
||||
|
||||
#define init_copy(idx, sz) \ |
||||
init_fpel(idx, 0, sz, copy); \
|
||||
init_fpel(idx, 1, sz, avg); |
||||
|
||||
#define init_intra_pred1_lsx(tx, sz) \ |
||||
dsp->intra_pred[tx][VERT_PRED] = ff_vert_##sz##_lsx; \
|
||||
dsp->intra_pred[tx][HOR_PRED] = ff_hor_##sz##_lsx; \
|
||||
dsp->intra_pred[tx][DC_PRED] = ff_dc_##sz##_lsx; \
|
||||
dsp->intra_pred[tx][LEFT_DC_PRED] = ff_dc_left_##sz##_lsx; \
|
||||
dsp->intra_pred[tx][TOP_DC_PRED] = ff_dc_top_##sz##_lsx; \
|
||||
dsp->intra_pred[tx][DC_128_PRED] = ff_dc_128_##sz##_lsx; \
|
||||
dsp->intra_pred[tx][DC_127_PRED] = ff_dc_127_##sz##_lsx; \
|
||||
dsp->intra_pred[tx][DC_129_PRED] = ff_dc_129_##sz##_lsx; \
|
||||
dsp->intra_pred[tx][TM_VP8_PRED] = ff_tm_##sz##_lsx; \
|
||||
|
||||
#define init_intra_pred2_lsx(tx, sz) \ |
||||
dsp->intra_pred[tx][DC_PRED] = ff_dc_##sz##_lsx; \
|
||||
dsp->intra_pred[tx][LEFT_DC_PRED] = ff_dc_left_##sz##_lsx; \
|
||||
dsp->intra_pred[tx][TOP_DC_PRED] = ff_dc_top_##sz##_lsx; \
|
||||
dsp->intra_pred[tx][TM_VP8_PRED] = ff_tm_##sz##_lsx; \
|
||||
|
||||
av_cold void ff_vp9dsp_init_loongarch(VP9DSPContext *dsp, int bpp) |
||||
{ |
||||
int cpu_flags = av_get_cpu_flags(); |
||||
if (have_lsx(cpu_flags)) |
||||
if (bpp == 8) { |
||||
init_subpel3(0, put); |
||||
init_subpel3(1, avg); |
||||
init_copy(0, 64); |
||||
init_copy(1, 32); |
||||
init_copy(2, 16); |
||||
init_copy(3, 8); |
||||
init_intra_pred1_lsx(TX_16X16, 16x16); |
||||
init_intra_pred1_lsx(TX_32X32, 32x32); |
||||
init_intra_pred2_lsx(TX_4X4, 4x4); |
||||
init_intra_pred2_lsx(TX_8X8, 8x8); |
||||
} |
||||
} |
||||
#undef init_subpel1 |
||||
#undef init_subpel2 |
||||
#undef init_subpel3 |
||||
#undef init_copy |
||||
#undef init_fpel |
||||
#undef init_intra_pred1_lsx |
||||
#undef init_intra_pred2_lsx |
@ -0,0 +1,144 @@ |
||||
/*
|
||||
* Copyright (c) 2021 Loongson Technology Corporation Limited |
||||
* Contributed by Hao Chen <chenhao@loongson.cn> |
||||
* |
||||
* This file is part of FFmpeg. |
||||
* |
||||
* FFmpeg is free software; you can redistribute it and/or |
||||
* modify it under the terms of the GNU Lesser General Public |
||||
* License as published by the Free Software Foundation; either |
||||
* version 2.1 of the License, or (at your option) any later version. |
||||
* |
||||
* FFmpeg is distributed in the hope that it will be useful, |
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
||||
* Lesser General Public License for more details. |
||||
* |
||||
* You should have received a copy of the GNU Lesser General Public |
||||
* License along with FFmpeg; if not, write to the Free Software |
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
||||
*/ |
||||
|
||||
#ifndef AVCODEC_LOONGARCH_VP9DSP_LOONGARCH_H |
||||
#define AVCODEC_LOONGARCH_VP9DSP_LOONGARCH_H |
||||
|
||||
#define VP9_8TAP_LOONGARCH_LSX_FUNC(SIZE, type, type_idx) \ |
||||
void ff_put_8tap_##type##_##SIZE##h_lsx(uint8_t *dst, ptrdiff_t dststride, \
|
||||
const uint8_t *src, \
|
||||
ptrdiff_t srcstride, \
|
||||
int h, int mx, int my); \
|
||||
\
|
||||
void ff_put_8tap_##type##_##SIZE##v_lsx(uint8_t *dst, ptrdiff_t dststride, \
|
||||
const uint8_t *src, \
|
||||
ptrdiff_t srcstride, \
|
||||
int h, int mx, int my); \
|
||||
\
|
||||
void ff_put_8tap_##type##_##SIZE##hv_lsx(uint8_t *dst, ptrdiff_t dststride, \
|
||||
const uint8_t *src, \
|
||||
ptrdiff_t srcstride, \
|
||||
int h, int mx, int my); \
|
||||
\
|
||||
void ff_avg_8tap_##type##_##SIZE##h_lsx(uint8_t *dst, ptrdiff_t dststride, \
|
||||
const uint8_t *src, \
|
||||
ptrdiff_t srcstride, \
|
||||
int h, int mx, int my); \
|
||||
\
|
||||
void ff_avg_8tap_##type##_##SIZE##v_lsx(uint8_t *dst, ptrdiff_t dststride, \
|
||||
const uint8_t *src, \
|
||||
ptrdiff_t srcstride, \
|
||||
int h, int mx, int my); \
|
||||
\
|
||||
void ff_avg_8tap_##type##_##SIZE##hv_lsx(uint8_t *dst, ptrdiff_t dststride, \
|
||||
const uint8_t *src, \
|
||||
ptrdiff_t srcstride, \
|
||||
int h, int mx, int my); |
||||
|
||||
#define VP9_COPY_LOONGARCH_LSX_FUNC(SIZE) \ |
||||
void ff_copy##SIZE##_lsx(uint8_t *dst, ptrdiff_t dststride, \
|
||||
const uint8_t *src, ptrdiff_t srcstride, \
|
||||
int h, int mx, int my); \
|
||||
\
|
||||
void ff_avg##SIZE##_lsx(uint8_t *dst, ptrdiff_t dststride, \
|
||||
const uint8_t *src, ptrdiff_t srcstride, \
|
||||
int h, int mx, int my); |
||||
|
||||
VP9_8TAP_LOONGARCH_LSX_FUNC(64, regular, FILTER_8TAP_REGULAR); |
||||
VP9_8TAP_LOONGARCH_LSX_FUNC(32, regular, FILTER_8TAP_REGULAR); |
||||
VP9_8TAP_LOONGARCH_LSX_FUNC(16, regular, FILTER_8TAP_REGULAR); |
||||
VP9_8TAP_LOONGARCH_LSX_FUNC(8, regular, FILTER_8TAP_REGULAR); |
||||
VP9_8TAP_LOONGARCH_LSX_FUNC(4, regular, FILTER_8TAP_REGULAR); |
||||
|
||||
VP9_8TAP_LOONGARCH_LSX_FUNC(64, sharp, FILTER_8TAP_SHARP); |
||||
VP9_8TAP_LOONGARCH_LSX_FUNC(32, sharp, FILTER_8TAP_SHARP); |
||||
VP9_8TAP_LOONGARCH_LSX_FUNC(16, sharp, FILTER_8TAP_SHARP); |
||||
VP9_8TAP_LOONGARCH_LSX_FUNC(8, sharp, FILTER_8TAP_SHARP); |
||||
VP9_8TAP_LOONGARCH_LSX_FUNC(4, sharp, FILTER_8TAP_SHARP); |
||||
|
||||
VP9_8TAP_LOONGARCH_LSX_FUNC(64, smooth, FILTER_8TAP_SMOOTH); |
||||
VP9_8TAP_LOONGARCH_LSX_FUNC(32, smooth, FILTER_8TAP_SMOOTH); |
||||
VP9_8TAP_LOONGARCH_LSX_FUNC(16, smooth, FILTER_8TAP_SMOOTH); |
||||
VP9_8TAP_LOONGARCH_LSX_FUNC(8, smooth, FILTER_8TAP_SMOOTH); |
||||
VP9_8TAP_LOONGARCH_LSX_FUNC(4, smooth, FILTER_8TAP_SMOOTH); |
||||
|
||||
VP9_COPY_LOONGARCH_LSX_FUNC(64); |
||||
VP9_COPY_LOONGARCH_LSX_FUNC(32); |
||||
VP9_COPY_LOONGARCH_LSX_FUNC(16); |
||||
VP9_COPY_LOONGARCH_LSX_FUNC(8); |
||||
|
||||
#undef VP9_8TAP_LOONGARCH_LSX_FUNC |
||||
#undef VP9_COPY_LOONGARCH_LSX_FUNC |
||||
|
||||
void ff_vert_16x16_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t *left, |
||||
const uint8_t *top); |
||||
void ff_vert_32x32_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t *left, |
||||
const uint8_t *top); |
||||
void ff_hor_16x16_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t *left, |
||||
const uint8_t *top); |
||||
void ff_hor_32x32_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t *left, |
||||
const uint8_t *top); |
||||
void ff_dc_4x4_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t *left, |
||||
const uint8_t *top); |
||||
void ff_dc_8x8_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t *left, |
||||
const uint8_t *top); |
||||
void ff_dc_16x16_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t *left, |
||||
const uint8_t *top); |
||||
void ff_dc_32x32_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t *left, |
||||
const uint8_t *top); |
||||
void ff_dc_left_4x4_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t *left, |
||||
const uint8_t *top); |
||||
void ff_dc_left_8x8_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t *left, |
||||
const uint8_t *top); |
||||
void ff_dc_left_16x16_lsx(uint8_t *dst, ptrdiff_t stride, |
||||
const uint8_t *left, const uint8_t *top); |
||||
void ff_dc_left_32x32_lsx(uint8_t *dst, ptrdiff_t stride, |
||||
const uint8_t *left, const uint8_t *top); |
||||
void ff_dc_top_4x4_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t *left, |
||||
const uint8_t *top); |
||||
void ff_dc_top_8x8_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t *left, |
||||
const uint8_t *top); |
||||
void ff_dc_top_16x16_lsx(uint8_t *dst, ptrdiff_t stride, |
||||
const uint8_t *left, const uint8_t *top); |
||||
void ff_dc_top_32x32_lsx(uint8_t *dst, ptrdiff_t stride, |
||||
const uint8_t *left, const uint8_t *top); |
||||
void ff_dc_128_16x16_lsx(uint8_t *dst, ptrdiff_t stride, |
||||
const uint8_t *left, const uint8_t *top); |
||||
void ff_dc_128_32x32_lsx(uint8_t *dst, ptrdiff_t stride, |
||||
const uint8_t *left, const uint8_t *top); |
||||
void ff_dc_127_16x16_lsx(uint8_t *dst, ptrdiff_t stride, |
||||
const uint8_t *left, const uint8_t *top); |
||||
void ff_dc_127_32x32_lsx(uint8_t *dst, ptrdiff_t stride, |
||||
const uint8_t *left, const uint8_t *top); |
||||
void ff_dc_129_16x16_lsx(uint8_t *dst, ptrdiff_t stride, |
||||
const uint8_t *left, const uint8_t *top); |
||||
void ff_dc_129_32x32_lsx(uint8_t *dst, ptrdiff_t stride, |
||||
const uint8_t *left, const uint8_t *top); |
||||
void ff_tm_4x4_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t *left, |
||||
const uint8_t *top); |
||||
void ff_tm_8x8_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t *left, |
||||
const uint8_t *top); |
||||
void ff_tm_16x16_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t *left, |
||||
const uint8_t *top); |
||||
void ff_tm_32x32_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t *left, |
||||
const uint8_t *top); |
||||
|
||||
#endif /* AVCODEC_LOONGARCH_VP9DSP_LOONGARCH_H */ |
Loading…
Reference in new issue