|
|
|
@ -159,300 +159,6 @@ static const uint8_t luma_mask_arr[16 * 8] = { |
|
|
|
|
out0_m; \
|
|
|
|
|
} ) |
|
|
|
|
|
|
|
|
|
static void avc_luma_hz_4w_msa(const uint8_t *src, int32_t src_stride, |
|
|
|
|
uint8_t *dst, int32_t dst_stride, |
|
|
|
|
int32_t height) |
|
|
|
|
{ |
|
|
|
|
uint32_t loop_cnt; |
|
|
|
|
v16i8 src0, src1, src2, src3; |
|
|
|
|
v8i16 res0, res1; |
|
|
|
|
v16u8 out; |
|
|
|
|
v16i8 mask0, mask1, mask2; |
|
|
|
|
v16i8 vec0, vec1, vec2, vec3, vec4, vec5; |
|
|
|
|
v16i8 minus5b = __msa_ldi_b(-5); |
|
|
|
|
v16i8 plus20b = __msa_ldi_b(20); |
|
|
|
|
|
|
|
|
|
LD_SB3(&luma_mask_arr[48], 16, mask0, mask1, mask2); |
|
|
|
|
for (loop_cnt = (height >> 2); loop_cnt--;) { |
|
|
|
|
LD_SB4(src, src_stride, src0, src1, src2, src3); |
|
|
|
|
src += (4 * src_stride); |
|
|
|
|
|
|
|
|
|
XORI_B4_128_SB(src0, src1, src2, src3); |
|
|
|
|
VSHF_B2_SB(src0, src1, src2, src3, mask0, mask0, vec0, vec1); |
|
|
|
|
HADD_SB2_SH(vec0, vec1, res0, res1); |
|
|
|
|
VSHF_B2_SB(src0, src1, src2, src3, mask1, mask1, vec2, vec3); |
|
|
|
|
DPADD_SB2_SH(vec2, vec3, minus5b, minus5b, res0, res1); |
|
|
|
|
VSHF_B2_SB(src0, src1, src2, src3, mask2, mask2, vec4, vec5); |
|
|
|
|
DPADD_SB2_SH(vec4, vec5, plus20b, plus20b, res0, res1); |
|
|
|
|
SRARI_H2_SH(res0, res1, 5); |
|
|
|
|
SAT_SH2_SH(res0, res1, 7); |
|
|
|
|
out = PCKEV_XORI128_UB(res0, res1); |
|
|
|
|
ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride); |
|
|
|
|
dst += (4 * dst_stride); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static void avc_luma_hz_8w_msa(const uint8_t *src, int32_t src_stride, |
|
|
|
|
uint8_t *dst, int32_t dst_stride, |
|
|
|
|
int32_t height) |
|
|
|
|
{ |
|
|
|
|
uint32_t loop_cnt; |
|
|
|
|
v16i8 src0, src1, src2, src3; |
|
|
|
|
v8i16 res0, res1, res2, res3; |
|
|
|
|
v16i8 mask0, mask1, mask2; |
|
|
|
|
v16i8 vec0, vec1, vec2, vec3, vec4, vec5; |
|
|
|
|
v16i8 vec6, vec7, vec8, vec9, vec10, vec11; |
|
|
|
|
v16i8 minus5b = __msa_ldi_b(-5); |
|
|
|
|
v16i8 plus20b = __msa_ldi_b(20); |
|
|
|
|
v16u8 out0, out1; |
|
|
|
|
|
|
|
|
|
LD_SB3(&luma_mask_arr[0], 16, mask0, mask1, mask2); |
|
|
|
|
|
|
|
|
|
for (loop_cnt = (height >> 2); loop_cnt--;) { |
|
|
|
|
LD_SB4(src, src_stride, src0, src1, src2, src3); |
|
|
|
|
src += (4 * src_stride); |
|
|
|
|
|
|
|
|
|
XORI_B4_128_SB(src0, src1, src2, src3); |
|
|
|
|
VSHF_B2_SB(src0, src0, src1, src1, mask0, mask0, vec0, vec1); |
|
|
|
|
VSHF_B2_SB(src2, src2, src3, src3, mask0, mask0, vec2, vec3); |
|
|
|
|
HADD_SB4_SH(vec0, vec1, vec2, vec3, res0, res1, res2, res3); |
|
|
|
|
VSHF_B2_SB(src0, src0, src1, src1, mask1, mask1, vec4, vec5); |
|
|
|
|
VSHF_B2_SB(src2, src2, src3, src3, mask1, mask1, vec6, vec7); |
|
|
|
|
DPADD_SB4_SH(vec4, vec5, vec6, vec7, minus5b, minus5b, minus5b, minus5b, |
|
|
|
|
res0, res1, res2, res3); |
|
|
|
|
VSHF_B2_SB(src0, src0, src1, src1, mask2, mask2, vec8, vec9); |
|
|
|
|
VSHF_B2_SB(src2, src2, src3, src3, mask2, mask2, vec10, vec11); |
|
|
|
|
DPADD_SB4_SH(vec8, vec9, vec10, vec11, plus20b, plus20b, plus20b, |
|
|
|
|
plus20b, res0, res1, res2, res3); |
|
|
|
|
SRARI_H4_SH(res0, res1, res2, res3, 5); |
|
|
|
|
SAT_SH4_SH(res0, res1, res2, res3, 7); |
|
|
|
|
out0 = PCKEV_XORI128_UB(res0, res1); |
|
|
|
|
out1 = PCKEV_XORI128_UB(res2, res3); |
|
|
|
|
ST8x4_UB(out0, out1, dst, dst_stride); |
|
|
|
|
dst += (4 * dst_stride); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static void avc_luma_hz_16w_msa(const uint8_t *src, int32_t src_stride, |
|
|
|
|
uint8_t *dst, int32_t dst_stride, |
|
|
|
|
int32_t height) |
|
|
|
|
{ |
|
|
|
|
uint32_t loop_cnt; |
|
|
|
|
v16i8 src0, src1, src2, src3, src4, src5, src6, src7; |
|
|
|
|
v8i16 res0, res1, res2, res3, res4, res5, res6, res7; |
|
|
|
|
v16i8 mask0, mask1, mask2; |
|
|
|
|
v16i8 vec0, vec1, vec2, vec3, vec4, vec5; |
|
|
|
|
v16i8 vec6, vec7, vec8, vec9, vec10, vec11; |
|
|
|
|
v16i8 minus5b = __msa_ldi_b(-5); |
|
|
|
|
v16i8 plus20b = __msa_ldi_b(20); |
|
|
|
|
|
|
|
|
|
LD_SB3(&luma_mask_arr[0], 16, mask0, mask1, mask2); |
|
|
|
|
|
|
|
|
|
for (loop_cnt = (height >> 2); loop_cnt--;) { |
|
|
|
|
LD_SB2(src, 8, src0, src1); |
|
|
|
|
src += src_stride; |
|
|
|
|
LD_SB2(src, 8, src2, src3); |
|
|
|
|
src += src_stride; |
|
|
|
|
|
|
|
|
|
XORI_B4_128_SB(src0, src1, src2, src3); |
|
|
|
|
VSHF_B2_SB(src0, src0, src1, src1, mask0, mask0, vec0, vec3); |
|
|
|
|
VSHF_B2_SB(src2, src2, src3, src3, mask0, mask0, vec6, vec9); |
|
|
|
|
VSHF_B2_SB(src0, src0, src1, src1, mask1, mask1, vec1, vec4); |
|
|
|
|
VSHF_B2_SB(src2, src2, src3, src3, mask1, mask1, vec7, vec10); |
|
|
|
|
VSHF_B2_SB(src0, src0, src1, src1, mask2, mask2, vec2, vec5); |
|
|
|
|
VSHF_B2_SB(src2, src2, src3, src3, mask2, mask2, vec8, vec11); |
|
|
|
|
HADD_SB4_SH(vec0, vec3, vec6, vec9, res0, res1, res2, res3); |
|
|
|
|
DPADD_SB4_SH(vec1, vec4, vec7, vec10, minus5b, minus5b, minus5b, |
|
|
|
|
minus5b, res0, res1, res2, res3); |
|
|
|
|
DPADD_SB4_SH(vec2, vec5, vec8, vec11, plus20b, plus20b, plus20b, |
|
|
|
|
plus20b, res0, res1, res2, res3); |
|
|
|
|
|
|
|
|
|
LD_SB2(src, 8, src4, src5); |
|
|
|
|
src += src_stride; |
|
|
|
|
LD_SB2(src, 8, src6, src7); |
|
|
|
|
src += src_stride; |
|
|
|
|
|
|
|
|
|
XORI_B4_128_SB(src4, src5, src6, src7); |
|
|
|
|
VSHF_B2_SB(src4, src4, src5, src5, mask0, mask0, vec0, vec3); |
|
|
|
|
VSHF_B2_SB(src6, src6, src7, src7, mask0, mask0, vec6, vec9); |
|
|
|
|
VSHF_B2_SB(src4, src4, src5, src5, mask1, mask1, vec1, vec4); |
|
|
|
|
VSHF_B2_SB(src6, src6, src7, src7, mask1, mask1, vec7, vec10); |
|
|
|
|
VSHF_B2_SB(src4, src4, src5, src5, mask2, mask2, vec2, vec5); |
|
|
|
|
VSHF_B2_SB(src6, src6, src7, src7, mask2, mask2, vec8, vec11); |
|
|
|
|
HADD_SB4_SH(vec0, vec3, vec6, vec9, res4, res5, res6, res7); |
|
|
|
|
DPADD_SB4_SH(vec1, vec4, vec7, vec10, minus5b, minus5b, minus5b, |
|
|
|
|
minus5b, res4, res5, res6, res7); |
|
|
|
|
DPADD_SB4_SH(vec2, vec5, vec8, vec11, plus20b, plus20b, plus20b, |
|
|
|
|
plus20b, res4, res5, res6, res7); |
|
|
|
|
SRARI_H4_SH(res0, res1, res2, res3, 5); |
|
|
|
|
SRARI_H4_SH(res4, res5, res6, res7, 5); |
|
|
|
|
SAT_SH4_SH(res0, res1, res2, res3, 7); |
|
|
|
|
SAT_SH4_SH(res4, res5, res6, res7, 7); |
|
|
|
|
PCKEV_B4_SB(res1, res0, res3, res2, res5, res4, res7, res6, |
|
|
|
|
vec0, vec1, vec2, vec3); |
|
|
|
|
XORI_B4_128_SB(vec0, vec1, vec2, vec3); |
|
|
|
|
|
|
|
|
|
ST_SB4(vec0, vec1, vec2, vec3, dst, dst_stride); |
|
|
|
|
dst += (4 * dst_stride); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static void avc_luma_hz_qrt_4w_msa(const uint8_t *src, int32_t src_stride, |
|
|
|
|
uint8_t *dst, int32_t dst_stride, |
|
|
|
|
int32_t height, uint8_t hor_offset) |
|
|
|
|
{ |
|
|
|
|
uint8_t slide; |
|
|
|
|
uint32_t loop_cnt; |
|
|
|
|
v16i8 src0, src1, src2, src3; |
|
|
|
|
v8i16 res0, res1; |
|
|
|
|
v16i8 res, mask0, mask1, mask2; |
|
|
|
|
v16i8 vec0, vec1, vec2, vec3, vec4, vec5; |
|
|
|
|
v16i8 minus5b = __msa_ldi_b(-5); |
|
|
|
|
v16i8 plus20b = __msa_ldi_b(20); |
|
|
|
|
|
|
|
|
|
LD_SB3(&luma_mask_arr[48], 16, mask0, mask1, mask2); |
|
|
|
|
slide = 2 + hor_offset; |
|
|
|
|
|
|
|
|
|
for (loop_cnt = (height >> 2); loop_cnt--;) { |
|
|
|
|
LD_SB4(src, src_stride, src0, src1, src2, src3); |
|
|
|
|
src += (4 * src_stride); |
|
|
|
|
|
|
|
|
|
XORI_B4_128_SB(src0, src1, src2, src3); |
|
|
|
|
VSHF_B2_SB(src0, src1, src2, src3, mask0, mask0, vec0, vec1); |
|
|
|
|
HADD_SB2_SH(vec0, vec1, res0, res1); |
|
|
|
|
VSHF_B2_SB(src0, src1, src2, src3, mask1, mask1, vec2, vec3); |
|
|
|
|
DPADD_SB2_SH(vec2, vec3, minus5b, minus5b, res0, res1); |
|
|
|
|
VSHF_B2_SB(src0, src1, src2, src3, mask2, mask2, vec4, vec5); |
|
|
|
|
DPADD_SB2_SH(vec4, vec5, plus20b, plus20b, res0, res1); |
|
|
|
|
SRARI_H2_SH(res0, res1, 5); |
|
|
|
|
SAT_SH2_SH(res0, res1, 7); |
|
|
|
|
|
|
|
|
|
res = __msa_pckev_b((v16i8) res1, (v16i8) res0); |
|
|
|
|
src0 = __msa_sld_b(src0, src0, slide); |
|
|
|
|
src1 = __msa_sld_b(src1, src1, slide); |
|
|
|
|
src2 = __msa_sld_b(src2, src2, slide); |
|
|
|
|
src3 = __msa_sld_b(src3, src3, slide); |
|
|
|
|
src0 = (v16i8) __msa_insve_w((v4i32) src0, 1, (v4i32) src1); |
|
|
|
|
src1 = (v16i8) __msa_insve_w((v4i32) src2, 1, (v4i32) src3); |
|
|
|
|
src0 = (v16i8) __msa_insve_d((v2i64) src0, 1, (v2i64) src1); |
|
|
|
|
res = __msa_aver_s_b(res, src0); |
|
|
|
|
res = (v16i8) __msa_xori_b((v16u8) res, 128); |
|
|
|
|
|
|
|
|
|
ST4x4_UB(res, res, 0, 1, 2, 3, dst, dst_stride); |
|
|
|
|
dst += (4 * dst_stride); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static void avc_luma_hz_qrt_8w_msa(const uint8_t *src, int32_t src_stride, |
|
|
|
|
uint8_t *dst, int32_t dst_stride, |
|
|
|
|
int32_t height, uint8_t hor_offset) |
|
|
|
|
{ |
|
|
|
|
uint8_t slide; |
|
|
|
|
uint32_t loop_cnt; |
|
|
|
|
v16i8 src0, src1, src2, src3; |
|
|
|
|
v16i8 tmp0, tmp1; |
|
|
|
|
v8i16 res0, res1, res2, res3; |
|
|
|
|
v16i8 mask0, mask1, mask2; |
|
|
|
|
v16i8 vec0, vec1, vec2, vec3, vec4, vec5; |
|
|
|
|
v16i8 vec6, vec7, vec8, vec9, vec10, vec11; |
|
|
|
|
v16i8 minus5b = __msa_ldi_b(-5); |
|
|
|
|
v16i8 plus20b = __msa_ldi_b(20); |
|
|
|
|
|
|
|
|
|
LD_SB3(&luma_mask_arr[0], 16, mask0, mask1, mask2); |
|
|
|
|
slide = 2 + hor_offset; |
|
|
|
|
|
|
|
|
|
for (loop_cnt = height >> 2; loop_cnt--;) { |
|
|
|
|
LD_SB4(src, src_stride, src0, src1, src2, src3); |
|
|
|
|
src += (4 * src_stride); |
|
|
|
|
|
|
|
|
|
XORI_B4_128_SB(src0, src1, src2, src3); |
|
|
|
|
VSHF_B2_SB(src0, src0, src1, src1, mask0, mask0, vec0, vec1); |
|
|
|
|
VSHF_B2_SB(src2, src2, src3, src3, mask0, mask0, vec2, vec3); |
|
|
|
|
HADD_SB4_SH(vec0, vec1, vec2, vec3, res0, res1, res2, res3); |
|
|
|
|
VSHF_B2_SB(src0, src0, src1, src1, mask1, mask1, vec4, vec5); |
|
|
|
|
VSHF_B2_SB(src2, src2, src3, src3, mask1, mask1, vec6, vec7); |
|
|
|
|
DPADD_SB4_SH(vec4, vec5, vec6, vec7, minus5b, minus5b, minus5b, minus5b, |
|
|
|
|
res0, res1, res2, res3); |
|
|
|
|
VSHF_B2_SB(src0, src0, src1, src1, mask2, mask2, vec8, vec9); |
|
|
|
|
VSHF_B2_SB(src2, src2, src3, src3, mask2, mask2, vec10, vec11); |
|
|
|
|
DPADD_SB4_SH(vec8, vec9, vec10, vec11, plus20b, plus20b, plus20b, |
|
|
|
|
plus20b, res0, res1, res2, res3); |
|
|
|
|
|
|
|
|
|
src0 = __msa_sld_b(src0, src0, slide); |
|
|
|
|
src1 = __msa_sld_b(src1, src1, slide); |
|
|
|
|
src2 = __msa_sld_b(src2, src2, slide); |
|
|
|
|
src3 = __msa_sld_b(src3, src3, slide); |
|
|
|
|
|
|
|
|
|
SRARI_H4_SH(res0, res1, res2, res3, 5); |
|
|
|
|
SAT_SH4_SH(res0, res1, res2, res3, 7); |
|
|
|
|
PCKEV_B2_SB(res1, res0, res3, res2, tmp0, tmp1); |
|
|
|
|
PCKEV_D2_SB(src1, src0, src3, src2, src0, src1); |
|
|
|
|
|
|
|
|
|
tmp0 = __msa_aver_s_b(tmp0, src0); |
|
|
|
|
tmp1 = __msa_aver_s_b(tmp1, src1); |
|
|
|
|
|
|
|
|
|
XORI_B2_128_SB(tmp0, tmp1); |
|
|
|
|
ST8x4_UB(tmp0, tmp1, dst, dst_stride); |
|
|
|
|
|
|
|
|
|
dst += (4 * dst_stride); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static void avc_luma_hz_qrt_16w_msa(const uint8_t *src, int32_t src_stride, |
|
|
|
|
uint8_t *dst, int32_t dst_stride, |
|
|
|
|
int32_t height, uint8_t hor_offset) |
|
|
|
|
{ |
|
|
|
|
uint32_t loop_cnt; |
|
|
|
|
v16i8 dst0, dst1; |
|
|
|
|
v16i8 src0, src1, src2, src3; |
|
|
|
|
v16i8 mask0, mask1, mask2, vshf; |
|
|
|
|
v8i16 res0, res1, res2, res3; |
|
|
|
|
v16i8 vec0, vec1, vec2, vec3, vec4, vec5; |
|
|
|
|
v16i8 vec6, vec7, vec8, vec9, vec10, vec11; |
|
|
|
|
v16i8 minus5b = __msa_ldi_b(-5); |
|
|
|
|
v16i8 plus20b = __msa_ldi_b(20); |
|
|
|
|
|
|
|
|
|
LD_SB3(&luma_mask_arr[0], 16, mask0, mask1, mask2); |
|
|
|
|
|
|
|
|
|
if (hor_offset) { |
|
|
|
|
vshf = LD_SB(&luma_mask_arr[16 + 96]); |
|
|
|
|
} else { |
|
|
|
|
vshf = LD_SB(&luma_mask_arr[96]); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
for (loop_cnt = height >> 1; loop_cnt--;) { |
|
|
|
|
LD_SB2(src, 8, src0, src1); |
|
|
|
|
src += src_stride; |
|
|
|
|
LD_SB2(src, 8, src2, src3); |
|
|
|
|
src += src_stride; |
|
|
|
|
|
|
|
|
|
XORI_B4_128_SB(src0, src1, src2, src3); |
|
|
|
|
VSHF_B2_SB(src0, src0, src1, src1, mask0, mask0, vec0, vec3); |
|
|
|
|
VSHF_B2_SB(src2, src2, src3, src3, mask0, mask0, vec6, vec9); |
|
|
|
|
VSHF_B2_SB(src0, src0, src1, src1, mask1, mask1, vec1, vec4); |
|
|
|
|
VSHF_B2_SB(src2, src2, src3, src3, mask1, mask1, vec7, vec10); |
|
|
|
|
VSHF_B2_SB(src0, src0, src1, src1, mask2, mask2, vec2, vec5); |
|
|
|
|
VSHF_B2_SB(src2, src2, src3, src3, mask2, mask2, vec8, vec11); |
|
|
|
|
HADD_SB4_SH(vec0, vec3, vec6, vec9, res0, res1, res2, res3); |
|
|
|
|
DPADD_SB4_SH(vec1, vec4, vec7, vec10, minus5b, minus5b, minus5b, |
|
|
|
|
minus5b, res0, res1, res2, res3); |
|
|
|
|
DPADD_SB4_SH(vec2, vec5, vec8, vec11, plus20b, plus20b, plus20b, |
|
|
|
|
plus20b, res0, res1, res2, res3); |
|
|
|
|
VSHF_B2_SB(src0, src1, src2, src3, vshf, vshf, src0, src2); |
|
|
|
|
SRARI_H4_SH(res0, res1, res2, res3, 5); |
|
|
|
|
SAT_SH4_SH(res0, res1, res2, res3, 7); |
|
|
|
|
PCKEV_B2_SB(res1, res0, res3, res2, dst0, dst1); |
|
|
|
|
|
|
|
|
|
dst0 = __msa_aver_s_b(dst0, src0); |
|
|
|
|
dst1 = __msa_aver_s_b(dst1, src2); |
|
|
|
|
|
|
|
|
|
XORI_B2_128_SB(dst0, dst1); |
|
|
|
|
|
|
|
|
|
ST_SB2(dst0, dst1, dst, dst_stride); |
|
|
|
|
dst += (2 * dst_stride); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static void avc_luma_vt_4w_msa(const uint8_t *src, int32_t src_stride, |
|
|
|
|
uint8_t *dst, int32_t dst_stride, |
|
|
|
|
int32_t height) |
|
|
|
@ -623,217 +329,6 @@ static void avc_luma_vt_16w_msa(const uint8_t *src, int32_t src_stride, |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static void avc_luma_vt_qrt_4w_msa(const uint8_t *src, int32_t src_stride, |
|
|
|
|
uint8_t *dst, int32_t dst_stride, |
|
|
|
|
int32_t height, uint8_t ver_offset) |
|
|
|
|
{ |
|
|
|
|
int32_t loop_cnt; |
|
|
|
|
int16_t filt_const0 = 0xfb01; |
|
|
|
|
int16_t filt_const1 = 0x1414; |
|
|
|
|
int16_t filt_const2 = 0x1fb; |
|
|
|
|
v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8; |
|
|
|
|
v16i8 src10_r, src32_r, src54_r, src76_r, src21_r, src43_r, src65_r; |
|
|
|
|
v16i8 src87_r, src2110, src4332, src6554, src8776; |
|
|
|
|
v8i16 out10, out32; |
|
|
|
|
v16i8 filt0, filt1, filt2; |
|
|
|
|
v16u8 out; |
|
|
|
|
|
|
|
|
|
filt0 = (v16i8) __msa_fill_h(filt_const0); |
|
|
|
|
filt1 = (v16i8) __msa_fill_h(filt_const1); |
|
|
|
|
filt2 = (v16i8) __msa_fill_h(filt_const2); |
|
|
|
|
|
|
|
|
|
LD_SB5(src, src_stride, src0, src1, src2, src3, src4); |
|
|
|
|
src += (5 * src_stride); |
|
|
|
|
|
|
|
|
|
ILVR_B4_SB(src1, src0, src2, src1, src3, src2, src4, src3, |
|
|
|
|
src10_r, src21_r, src32_r, src43_r); |
|
|
|
|
ILVR_D2_SB(src21_r, src10_r, src43_r, src32_r, src2110, src4332); |
|
|
|
|
XORI_B2_128_SB(src2110, src4332); |
|
|
|
|
|
|
|
|
|
for (loop_cnt = (height >> 2); loop_cnt--;) { |
|
|
|
|
LD_SB4(src, src_stride, src5, src6, src7, src8); |
|
|
|
|
src += (4 * src_stride); |
|
|
|
|
|
|
|
|
|
ILVR_B4_SB(src5, src4, src6, src5, src7, src6, src8, src7, |
|
|
|
|
src54_r, src65_r, src76_r, src87_r); |
|
|
|
|
ILVR_D2_SB(src65_r, src54_r, src87_r, src76_r, src6554, src8776); |
|
|
|
|
XORI_B2_128_SB(src6554, src8776); |
|
|
|
|
out10 = DPADD_SH3_SH(src2110, src4332, src6554, filt0, filt1, filt2); |
|
|
|
|
out32 = DPADD_SH3_SH(src4332, src6554, src8776, filt0, filt1, filt2); |
|
|
|
|
SRARI_H2_SH(out10, out32, 5); |
|
|
|
|
SAT_SH2_SH(out10, out32, 7); |
|
|
|
|
|
|
|
|
|
out = PCKEV_XORI128_UB(out10, out32); |
|
|
|
|
|
|
|
|
|
if (ver_offset) { |
|
|
|
|
src32_r = (v16i8) __msa_insve_w((v4i32) src3, 1, (v4i32) src4); |
|
|
|
|
src54_r = (v16i8) __msa_insve_w((v4i32) src5, 1, (v4i32) src6); |
|
|
|
|
} else { |
|
|
|
|
src32_r = (v16i8) __msa_insve_w((v4i32) src2, 1, (v4i32) src3); |
|
|
|
|
src54_r = (v16i8) __msa_insve_w((v4i32) src4, 1, (v4i32) src5); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
src32_r = (v16i8) __msa_insve_d((v2i64) src32_r, 1, (v2i64) src54_r); |
|
|
|
|
out = __msa_aver_u_b(out, (v16u8) src32_r); |
|
|
|
|
|
|
|
|
|
ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride); |
|
|
|
|
dst += (4 * dst_stride); |
|
|
|
|
src2110 = src6554; |
|
|
|
|
src4332 = src8776; |
|
|
|
|
src2 = src6; |
|
|
|
|
src3 = src7; |
|
|
|
|
src4 = src8; |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static void avc_luma_vt_qrt_8w_msa(const uint8_t *src, int32_t src_stride, |
|
|
|
|
uint8_t *dst, int32_t dst_stride, |
|
|
|
|
int32_t height, uint8_t ver_offset) |
|
|
|
|
{ |
|
|
|
|
int32_t loop_cnt; |
|
|
|
|
int16_t filt_const0 = 0xfb01; |
|
|
|
|
int16_t filt_const1 = 0x1414; |
|
|
|
|
int16_t filt_const2 = 0x1fb; |
|
|
|
|
v16i8 src0, src1, src2, src3, src4, src7, src8, src9, src10; |
|
|
|
|
v16i8 src10_r, src32_r, src76_r, src98_r; |
|
|
|
|
v16i8 src21_r, src43_r, src87_r, src109_r; |
|
|
|
|
v8i16 out0_r, out1_r, out2_r, out3_r; |
|
|
|
|
v16i8 res0, res1; |
|
|
|
|
v16i8 filt0, filt1, filt2; |
|
|
|
|
|
|
|
|
|
filt0 = (v16i8) __msa_fill_h(filt_const0); |
|
|
|
|
filt1 = (v16i8) __msa_fill_h(filt_const1); |
|
|
|
|
filt2 = (v16i8) __msa_fill_h(filt_const2); |
|
|
|
|
|
|
|
|
|
LD_SB5(src, src_stride, src0, src1, src2, src3, src4); |
|
|
|
|
src += (5 * src_stride); |
|
|
|
|
|
|
|
|
|
XORI_B5_128_SB(src0, src1, src2, src3, src4); |
|
|
|
|
ILVR_B4_SB(src1, src0, src2, src1, src3, src2, src4, src3, |
|
|
|
|
src10_r, src21_r, src32_r, src43_r); |
|
|
|
|
|
|
|
|
|
for (loop_cnt = (height >> 2); loop_cnt--;) { |
|
|
|
|
LD_SB4(src, src_stride, src7, src8, src9, src10); |
|
|
|
|
src += (4 * src_stride); |
|
|
|
|
|
|
|
|
|
XORI_B4_128_SB(src7, src8, src9, src10); |
|
|
|
|
ILVR_B4_SB(src7, src4, src8, src7, src9, src8, src10, src9, |
|
|
|
|
src76_r, src87_r, src98_r, src109_r); |
|
|
|
|
out0_r = DPADD_SH3_SH(src10_r, src32_r, src76_r, filt0, filt1, filt2); |
|
|
|
|
out1_r = DPADD_SH3_SH(src21_r, src43_r, src87_r, filt0, filt1, filt2); |
|
|
|
|
out2_r = DPADD_SH3_SH(src32_r, src76_r, src98_r, filt0, filt1, filt2); |
|
|
|
|
out3_r = DPADD_SH3_SH(src43_r, src87_r, src109_r, filt0, filt1, filt2); |
|
|
|
|
SRARI_H4_SH(out0_r, out1_r, out2_r, out3_r, 5); |
|
|
|
|
SAT_SH4_SH(out0_r, out1_r, out2_r, out3_r, 7); |
|
|
|
|
PCKEV_B2_SB(out1_r, out0_r, out3_r, out2_r, res0, res1); |
|
|
|
|
|
|
|
|
|
if (ver_offset) { |
|
|
|
|
PCKEV_D2_SB(src4, src3, src8, src7, src10_r, src32_r); |
|
|
|
|
} else { |
|
|
|
|
PCKEV_D2_SB(src3, src2, src7, src4, src10_r, src32_r); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
res0 = __msa_aver_s_b(res0, (v16i8) src10_r); |
|
|
|
|
res1 = __msa_aver_s_b(res1, (v16i8) src32_r); |
|
|
|
|
|
|
|
|
|
XORI_B2_128_SB(res0, res1); |
|
|
|
|
ST8x4_UB(res0, res1, dst, dst_stride); |
|
|
|
|
|
|
|
|
|
dst += (4 * dst_stride); |
|
|
|
|
src10_r = src76_r; |
|
|
|
|
src32_r = src98_r; |
|
|
|
|
src21_r = src87_r; |
|
|
|
|
src43_r = src109_r; |
|
|
|
|
src2 = src8; |
|
|
|
|
src3 = src9; |
|
|
|
|
src4 = src10; |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static void avc_luma_vt_qrt_16w_msa(const uint8_t *src, int32_t src_stride, |
|
|
|
|
uint8_t *dst, int32_t dst_stride, |
|
|
|
|
int32_t height, uint8_t ver_offset) |
|
|
|
|
{ |
|
|
|
|
int32_t loop_cnt; |
|
|
|
|
int16_t filt_const0 = 0xfb01; |
|
|
|
|
int16_t filt_const1 = 0x1414; |
|
|
|
|
int16_t filt_const2 = 0x1fb; |
|
|
|
|
v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8; |
|
|
|
|
v16i8 src10_r, src32_r, src54_r, src76_r, src21_r, src43_r, src65_r; |
|
|
|
|
v16i8 src87_r, src10_l, src32_l, src54_l, src76_l, src21_l, src43_l; |
|
|
|
|
v16i8 src65_l, src87_l; |
|
|
|
|
v8i16 out0_r, out1_r, out2_r, out3_r, out0_l, out1_l, out2_l, out3_l; |
|
|
|
|
v16u8 res0, res1, res2, res3; |
|
|
|
|
v16i8 filt0, filt1, filt2; |
|
|
|
|
|
|
|
|
|
filt0 = (v16i8) __msa_fill_h(filt_const0); |
|
|
|
|
filt1 = (v16i8) __msa_fill_h(filt_const1); |
|
|
|
|
filt2 = (v16i8) __msa_fill_h(filt_const2); |
|
|
|
|
|
|
|
|
|
LD_SB5(src, src_stride, src0, src1, src2, src3, src4); |
|
|
|
|
src += (5 * src_stride); |
|
|
|
|
|
|
|
|
|
XORI_B5_128_SB(src0, src1, src2, src3, src4); |
|
|
|
|
ILVR_B4_SB(src1, src0, src2, src1, src3, src2, src4, src3, |
|
|
|
|
src10_r, src21_r, src32_r, src43_r); |
|
|
|
|
ILVL_B4_SB(src1, src0, src2, src1, src3, src2, src4, src3, |
|
|
|
|
src10_l, src21_l, src32_l, src43_l); |
|
|
|
|
|
|
|
|
|
for (loop_cnt = (height >> 2); loop_cnt--;) { |
|
|
|
|
LD_SB4(src, src_stride, src5, src6, src7, src8); |
|
|
|
|
src += (4 * src_stride); |
|
|
|
|
|
|
|
|
|
XORI_B4_128_SB(src5, src6, src7, src8); |
|
|
|
|
ILVR_B4_SB(src5, src4, src6, src5, src7, src6, src8, src7, |
|
|
|
|
src54_r, src65_r, src76_r, src87_r); |
|
|
|
|
ILVL_B4_SB(src5, src4, src6, src5, src7, src6, src8, src7, |
|
|
|
|
src54_l, src65_l, src76_l, src87_l); |
|
|
|
|
out0_r = DPADD_SH3_SH(src10_r, src32_r, src54_r, filt0, filt1, filt2); |
|
|
|
|
out1_r = DPADD_SH3_SH(src21_r, src43_r, src65_r, filt0, filt1, filt2); |
|
|
|
|
out2_r = DPADD_SH3_SH(src32_r, src54_r, src76_r, filt0, filt1, filt2); |
|
|
|
|
out3_r = DPADD_SH3_SH(src43_r, src65_r, src87_r, filt0, filt1, filt2); |
|
|
|
|
out0_l = DPADD_SH3_SH(src10_l, src32_l, src54_l, filt0, filt1, filt2); |
|
|
|
|
out1_l = DPADD_SH3_SH(src21_l, src43_l, src65_l, filt0, filt1, filt2); |
|
|
|
|
out2_l = DPADD_SH3_SH(src32_l, src54_l, src76_l, filt0, filt1, filt2); |
|
|
|
|
out3_l = DPADD_SH3_SH(src43_l, src65_l, src87_l, filt0, filt1, filt2); |
|
|
|
|
SRARI_H4_SH(out0_r, out1_r, out2_r, out3_r, 5); |
|
|
|
|
SAT_SH4_SH(out0_r, out1_r, out2_r, out3_r, 7); |
|
|
|
|
SRARI_H4_SH(out0_l, out1_l, out2_l, out3_l, 5); |
|
|
|
|
SAT_SH4_SH(out0_l, out1_l, out2_l, out3_l, 7); |
|
|
|
|
PCKEV_B4_UB(out0_l, out0_r, out1_l, out1_r, out2_l, out2_r, out3_l, |
|
|
|
|
out3_r, res0, res1, res2, res3); |
|
|
|
|
|
|
|
|
|
if (ver_offset) { |
|
|
|
|
res0 = (v16u8) __msa_aver_s_b((v16i8) res0, src3); |
|
|
|
|
res1 = (v16u8) __msa_aver_s_b((v16i8) res1, src4); |
|
|
|
|
res2 = (v16u8) __msa_aver_s_b((v16i8) res2, src5); |
|
|
|
|
res3 = (v16u8) __msa_aver_s_b((v16i8) res3, src6); |
|
|
|
|
} else { |
|
|
|
|
res0 = (v16u8) __msa_aver_s_b((v16i8) res0, src2); |
|
|
|
|
res1 = (v16u8) __msa_aver_s_b((v16i8) res1, src3); |
|
|
|
|
res2 = (v16u8) __msa_aver_s_b((v16i8) res2, src4); |
|
|
|
|
res3 = (v16u8) __msa_aver_s_b((v16i8) res3, src5); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
XORI_B4_128_UB(res0, res1, res2, res3); |
|
|
|
|
ST_UB4(res0, res1, res2, res3, dst, dst_stride); |
|
|
|
|
|
|
|
|
|
dst += (4 * dst_stride); |
|
|
|
|
|
|
|
|
|
src10_r = src54_r; |
|
|
|
|
src32_r = src76_r; |
|
|
|
|
src21_r = src65_r; |
|
|
|
|
src43_r = src87_r; |
|
|
|
|
src10_l = src54_l; |
|
|
|
|
src32_l = src76_l; |
|
|
|
|
src21_l = src65_l; |
|
|
|
|
src43_l = src87_l; |
|
|
|
|
src2 = src6; |
|
|
|
|
src3 = src7; |
|
|
|
|
src4 = src8; |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static void avc_luma_mid_4w_msa(const uint8_t *src, int32_t src_stride, |
|
|
|
|
uint8_t *dst, int32_t dst_stride, |
|
|
|
|
int32_t height) |
|
|
|
@ -2733,247 +2228,6 @@ static void avc_luma_hv_qrt_and_aver_dst_16x16_msa(const uint8_t *src_x, |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static void copy_width8_msa(const uint8_t *src, int32_t src_stride, |
|
|
|
|
uint8_t *dst, int32_t dst_stride, |
|
|
|
|
int32_t height) |
|
|
|
|
{ |
|
|
|
|
int32_t cnt; |
|
|
|
|
uint64_t out0, out1, out2, out3, out4, out5, out6, out7; |
|
|
|
|
v16u8 src0, src1, src2, src3, src4, src5, src6, src7; |
|
|
|
|
|
|
|
|
|
if (0 == height % 12) { |
|
|
|
|
for (cnt = (height / 12); cnt--;) { |
|
|
|
|
LD_UB8(src, src_stride, |
|
|
|
|
src0, src1, src2, src3, src4, src5, src6, src7); |
|
|
|
|
src += (8 * src_stride); |
|
|
|
|
|
|
|
|
|
out0 = __msa_copy_u_d((v2i64) src0, 0); |
|
|
|
|
out1 = __msa_copy_u_d((v2i64) src1, 0); |
|
|
|
|
out2 = __msa_copy_u_d((v2i64) src2, 0); |
|
|
|
|
out3 = __msa_copy_u_d((v2i64) src3, 0); |
|
|
|
|
out4 = __msa_copy_u_d((v2i64) src4, 0); |
|
|
|
|
out5 = __msa_copy_u_d((v2i64) src5, 0); |
|
|
|
|
out6 = __msa_copy_u_d((v2i64) src6, 0); |
|
|
|
|
out7 = __msa_copy_u_d((v2i64) src7, 0); |
|
|
|
|
|
|
|
|
|
SD4(out0, out1, out2, out3, dst, dst_stride); |
|
|
|
|
dst += (4 * dst_stride); |
|
|
|
|
SD4(out4, out5, out6, out7, dst, dst_stride); |
|
|
|
|
dst += (4 * dst_stride); |
|
|
|
|
|
|
|
|
|
LD_UB4(src, src_stride, src0, src1, src2, src3); |
|
|
|
|
src += (4 * src_stride); |
|
|
|
|
|
|
|
|
|
out0 = __msa_copy_u_d((v2i64) src0, 0); |
|
|
|
|
out1 = __msa_copy_u_d((v2i64) src1, 0); |
|
|
|
|
out2 = __msa_copy_u_d((v2i64) src2, 0); |
|
|
|
|
out3 = __msa_copy_u_d((v2i64) src3, 0); |
|
|
|
|
|
|
|
|
|
SD4(out0, out1, out2, out3, dst, dst_stride); |
|
|
|
|
dst += (4 * dst_stride); |
|
|
|
|
} |
|
|
|
|
} else if (0 == height % 8) { |
|
|
|
|
for (cnt = height >> 3; cnt--;) { |
|
|
|
|
LD_UB8(src, src_stride, |
|
|
|
|
src0, src1, src2, src3, src4, src5, src6, src7); |
|
|
|
|
src += (8 * src_stride); |
|
|
|
|
|
|
|
|
|
out0 = __msa_copy_u_d((v2i64) src0, 0); |
|
|
|
|
out1 = __msa_copy_u_d((v2i64) src1, 0); |
|
|
|
|
out2 = __msa_copy_u_d((v2i64) src2, 0); |
|
|
|
|
out3 = __msa_copy_u_d((v2i64) src3, 0); |
|
|
|
|
out4 = __msa_copy_u_d((v2i64) src4, 0); |
|
|
|
|
out5 = __msa_copy_u_d((v2i64) src5, 0); |
|
|
|
|
out6 = __msa_copy_u_d((v2i64) src6, 0); |
|
|
|
|
out7 = __msa_copy_u_d((v2i64) src7, 0); |
|
|
|
|
|
|
|
|
|
SD4(out0, out1, out2, out3, dst, dst_stride); |
|
|
|
|
dst += (4 * dst_stride); |
|
|
|
|
SD4(out4, out5, out6, out7, dst, dst_stride); |
|
|
|
|
dst += (4 * dst_stride); |
|
|
|
|
} |
|
|
|
|
} else if (0 == height % 4) { |
|
|
|
|
for (cnt = (height / 4); cnt--;) { |
|
|
|
|
LD_UB4(src, src_stride, src0, src1, src2, src3); |
|
|
|
|
src += (4 * src_stride); |
|
|
|
|
out0 = __msa_copy_u_d((v2i64) src0, 0); |
|
|
|
|
out1 = __msa_copy_u_d((v2i64) src1, 0); |
|
|
|
|
out2 = __msa_copy_u_d((v2i64) src2, 0); |
|
|
|
|
out3 = __msa_copy_u_d((v2i64) src3, 0); |
|
|
|
|
|
|
|
|
|
SD4(out0, out1, out2, out3, dst, dst_stride); |
|
|
|
|
dst += (4 * dst_stride); |
|
|
|
|
} |
|
|
|
|
} else if (0 == height % 2) { |
|
|
|
|
for (cnt = (height / 2); cnt--;) { |
|
|
|
|
LD_UB2(src, src_stride, src0, src1); |
|
|
|
|
src += (2 * src_stride); |
|
|
|
|
out0 = __msa_copy_u_d((v2i64) src0, 0); |
|
|
|
|
out1 = __msa_copy_u_d((v2i64) src1, 0); |
|
|
|
|
|
|
|
|
|
SD(out0, dst); |
|
|
|
|
dst += dst_stride; |
|
|
|
|
SD(out1, dst); |
|
|
|
|
dst += dst_stride; |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static void copy_16multx8mult_msa(const uint8_t *src, int32_t src_stride, |
|
|
|
|
uint8_t *dst, int32_t dst_stride, |
|
|
|
|
int32_t height, int32_t width) |
|
|
|
|
{ |
|
|
|
|
int32_t cnt, loop_cnt; |
|
|
|
|
const uint8_t *src_tmp; |
|
|
|
|
uint8_t *dst_tmp; |
|
|
|
|
v16u8 src0, src1, src2, src3, src4, src5, src6, src7; |
|
|
|
|
|
|
|
|
|
for (cnt = (width >> 4); cnt--;) { |
|
|
|
|
src_tmp = src; |
|
|
|
|
dst_tmp = dst; |
|
|
|
|
|
|
|
|
|
for (loop_cnt = (height >> 3); loop_cnt--;) { |
|
|
|
|
LD_UB8(src_tmp, src_stride, |
|
|
|
|
src0, src1, src2, src3, src4, src5, src6, src7); |
|
|
|
|
src_tmp += (8 * src_stride); |
|
|
|
|
|
|
|
|
|
ST_UB8(src0, src1, src2, src3, src4, src5, src6, src7, |
|
|
|
|
dst_tmp, dst_stride); |
|
|
|
|
dst_tmp += (8 * dst_stride); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
src += 16; |
|
|
|
|
dst += 16; |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static void copy_width16_msa(const uint8_t *src, int32_t src_stride, |
|
|
|
|
uint8_t *dst, int32_t dst_stride, |
|
|
|
|
int32_t height) |
|
|
|
|
{ |
|
|
|
|
int32_t cnt; |
|
|
|
|
v16u8 src0, src1, src2, src3, src4, src5, src6, src7; |
|
|
|
|
|
|
|
|
|
if (0 == height % 12) { |
|
|
|
|
for (cnt = (height / 12); cnt--;) { |
|
|
|
|
LD_UB8(src, src_stride, |
|
|
|
|
src0, src1, src2, src3, src4, src5, src6, src7); |
|
|
|
|
src += (8 * src_stride); |
|
|
|
|
ST_UB8(src0, src1, src2, src3, src4, src5, src6, src7, |
|
|
|
|
dst, dst_stride); |
|
|
|
|
dst += (8 * dst_stride); |
|
|
|
|
|
|
|
|
|
LD_UB4(src, src_stride, src0, src1, src2, src3); |
|
|
|
|
src += (4 * src_stride); |
|
|
|
|
ST_UB4(src0, src1, src2, src3, dst, dst_stride); |
|
|
|
|
dst += (4 * dst_stride); |
|
|
|
|
} |
|
|
|
|
} else if (0 == height % 8) { |
|
|
|
|
copy_16multx8mult_msa(src, src_stride, dst, dst_stride, height, 16); |
|
|
|
|
} else if (0 == height % 4) { |
|
|
|
|
for (cnt = (height >> 2); cnt--;) { |
|
|
|
|
LD_UB4(src, src_stride, src0, src1, src2, src3); |
|
|
|
|
src += (4 * src_stride); |
|
|
|
|
|
|
|
|
|
ST_UB4(src0, src1, src2, src3, dst, dst_stride); |
|
|
|
|
dst += (4 * dst_stride); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static void avg_width4_msa(const uint8_t *src, int32_t src_stride, |
|
|
|
|
uint8_t *dst, int32_t dst_stride, |
|
|
|
|
int32_t height) |
|
|
|
|
{ |
|
|
|
|
int32_t cnt; |
|
|
|
|
uint32_t out0, out1, out2, out3; |
|
|
|
|
v16u8 src0, src1, src2, src3; |
|
|
|
|
v16u8 dst0, dst1, dst2, dst3; |
|
|
|
|
|
|
|
|
|
if (0 == (height % 4)) { |
|
|
|
|
for (cnt = (height / 4); cnt--;) { |
|
|
|
|
LD_UB4(src, src_stride, src0, src1, src2, src3); |
|
|
|
|
src += (4 * src_stride); |
|
|
|
|
|
|
|
|
|
LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3); |
|
|
|
|
|
|
|
|
|
AVER_UB4_UB(src0, dst0, src1, dst1, src2, dst2, src3, dst3, |
|
|
|
|
dst0, dst1, dst2, dst3); |
|
|
|
|
|
|
|
|
|
out0 = __msa_copy_u_w((v4i32) dst0, 0); |
|
|
|
|
out1 = __msa_copy_u_w((v4i32) dst1, 0); |
|
|
|
|
out2 = __msa_copy_u_w((v4i32) dst2, 0); |
|
|
|
|
out3 = __msa_copy_u_w((v4i32) dst3, 0); |
|
|
|
|
SW4(out0, out1, out2, out3, dst, dst_stride); |
|
|
|
|
dst += (4 * dst_stride); |
|
|
|
|
} |
|
|
|
|
} else if (0 == (height % 2)) { |
|
|
|
|
for (cnt = (height / 2); cnt--;) { |
|
|
|
|
LD_UB2(src, src_stride, src0, src1); |
|
|
|
|
src += (2 * src_stride); |
|
|
|
|
|
|
|
|
|
LD_UB2(dst, dst_stride, dst0, dst1); |
|
|
|
|
|
|
|
|
|
AVER_UB2_UB(src0, dst0, src1, dst1, dst0, dst1); |
|
|
|
|
|
|
|
|
|
out0 = __msa_copy_u_w((v4i32) dst0, 0); |
|
|
|
|
out1 = __msa_copy_u_w((v4i32) dst1, 0); |
|
|
|
|
SW(out0, dst); |
|
|
|
|
dst += dst_stride; |
|
|
|
|
SW(out1, dst); |
|
|
|
|
dst += dst_stride; |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static void avg_width8_msa(const uint8_t *src, int32_t src_stride, |
|
|
|
|
uint8_t *dst, int32_t dst_stride, |
|
|
|
|
int32_t height) |
|
|
|
|
{ |
|
|
|
|
int32_t cnt; |
|
|
|
|
uint64_t out0, out1, out2, out3; |
|
|
|
|
v16u8 src0, src1, src2, src3; |
|
|
|
|
v16u8 dst0, dst1, dst2, dst3; |
|
|
|
|
|
|
|
|
|
for (cnt = (height / 4); cnt--;) { |
|
|
|
|
LD_UB4(src, src_stride, src0, src1, src2, src3); |
|
|
|
|
src += (4 * src_stride); |
|
|
|
|
LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3); |
|
|
|
|
|
|
|
|
|
AVER_UB4_UB(src0, dst0, src1, dst1, src2, dst2, src3, dst3, |
|
|
|
|
dst0, dst1, dst2, dst3); |
|
|
|
|
|
|
|
|
|
out0 = __msa_copy_u_d((v2i64) dst0, 0); |
|
|
|
|
out1 = __msa_copy_u_d((v2i64) dst1, 0); |
|
|
|
|
out2 = __msa_copy_u_d((v2i64) dst2, 0); |
|
|
|
|
out3 = __msa_copy_u_d((v2i64) dst3, 0); |
|
|
|
|
SD4(out0, out1, out2, out3, dst, dst_stride); |
|
|
|
|
dst += (4 * dst_stride); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static void avg_width16_msa(const uint8_t *src, int32_t src_stride, |
|
|
|
|
uint8_t *dst, int32_t dst_stride, |
|
|
|
|
int32_t height) |
|
|
|
|
{ |
|
|
|
|
int32_t cnt; |
|
|
|
|
v16u8 src0, src1, src2, src3, src4, src5, src6, src7; |
|
|
|
|
v16u8 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7; |
|
|
|
|
|
|
|
|
|
for (cnt = (height / 8); cnt--;) { |
|
|
|
|
LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); |
|
|
|
|
src += (8 * src_stride); |
|
|
|
|
LD_UB8(dst, dst_stride, dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7); |
|
|
|
|
|
|
|
|
|
AVER_UB4_UB(src0, dst0, src1, dst1, src2, dst2, src3, dst3, |
|
|
|
|
dst0, dst1, dst2, dst3); |
|
|
|
|
AVER_UB4_UB(src4, dst4, src5, dst5, src6, dst6, src7, dst7, |
|
|
|
|
dst4, dst5, dst6, dst7); |
|
|
|
|
ST_UB8(dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7, dst, dst_stride); |
|
|
|
|
dst += (8 * dst_stride); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
void ff_put_h264_qpel16_mc00_msa(uint8_t *dst, const uint8_t *src, |
|
|
|
|
ptrdiff_t stride) |
|
|
|
|
{ |
|
|
|
|