From a451324dddf5d2ab4bcd6aa0f546596f71bdada3 Mon Sep 17 00:00:00 2001 From: "Ronald S. Bultje" Date: Tue, 12 Aug 2014 18:11:05 -0400 Subject: [PATCH 01/23] vp9: ignore reference segmentation map if error_resilience flag is set. Fixes ffvp9_fails_where_libvpx.succeeds.webm. Bug-Id: ffmpeg/3849. Signed-off-by: Anton Khirnov --- libavcodec/vp9.c | 2 +- libavcodec/vp9block.c | 28 ++++++++++++++++------------ 2 files changed, 17 insertions(+), 13 deletions(-) diff --git a/libavcodec/vp9.c b/libavcodec/vp9.c index 32d995f4a1..906218565e 100644 --- a/libavcodec/vp9.c +++ b/libavcodec/vp9.c @@ -64,7 +64,7 @@ static int vp9_frame_alloc(AVCodecContext *avctx, VP9Frame *f) f->mv = (VP9MVRefPair*)f->mv_buf->data; if (s->segmentation.enabled && !s->segmentation.update_map && - !s->keyframe && !s->intraonly) + !s->keyframe && !s->intraonly && !s->errorres) memcpy(f->segmentation_map, s->frames[LAST_FRAME].segmentation_map, sz); return 0; diff --git a/libavcodec/vp9block.c b/libavcodec/vp9block.c index 9b0d836adc..cd40c38989 100644 --- a/libavcodec/vp9block.c +++ b/libavcodec/vp9block.c @@ -70,18 +70,22 @@ static void decode_mode(VP9Context *s, VP9Block *const b) vp56_rac_get_prob_branchy(&s->c, s->prob.segpred[s->above_segpred_ctx[col] + s->left_segpred_ctx[row7]]))) { - uint8_t *refsegmap = s->frames[LAST_FRAME].segmentation_map; - int pred = MAX_SEGMENT - 1; - int x; - - if (!s->last_uses_2pass) - ff_thread_await_progress(&s->frames[LAST_FRAME].tf, row >> 3, 0); - - for (y = 0; y < h4; y++) - for (x = 0; x < w4; x++) - pred = FFMIN(pred, - refsegmap[(y + row) * 8 * s->sb_cols + x + col]); - b->seg_id = pred; + if (!s->errorres) { + uint8_t *refsegmap = s->frames[LAST_FRAME].segmentation_map; + int pred = MAX_SEGMENT - 1; + int x; + + if (!s->last_uses_2pass) + ff_thread_await_progress(&s->frames[LAST_FRAME].tf, row >> 3, 0); + + for (y = 0; y < h4; y++) + for (x = 0; x < w4; x++) + pred = FFMIN(pred, + refsegmap[(y + row) * 8 * s->sb_cols + x + col]); + b->seg_id = pred; + } else { + b->seg_id = 0; + } memset(&s->above_segpred_ctx[col], 1, w4); memset(&s->left_segpred_ctx[row7], 1, h4); From c935b54bd6a12714fc08d88791dadee2ba07176a Mon Sep 17 00:00:00 2001 From: "Ronald S. Bultje" Date: Thu, 17 Sep 2015 11:58:10 -0400 Subject: [PATCH 02/23] checkasm: add VP9 loopfilter tests. The randomize_buffer() implementation assures that "most of the time", we'll do a good mix of wide16/wide8/hev/regular/no filters for complete code coverage. However, this is not mathematically assured because that would make the code either much more complex, or much less random. Some fixes and improvements by Rodger Combs Signed-off-by: Anton Khirnov --- tests/checkasm/vp9dsp.c | 159 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 159 insertions(+) diff --git a/tests/checkasm/vp9dsp.c b/tests/checkasm/vp9dsp.c index dd37077adf..f0d93725eb 100644 --- a/tests/checkasm/vp9dsp.c +++ b/tests/checkasm/vp9dsp.c @@ -32,6 +32,164 @@ static const uint32_t pixel_mask[3] = { 0xffffffff, 0x03ff03ff, 0x0fff0fff }; #define BIT_DEPTH 8 #define SIZEOF_PIXEL ((BIT_DEPTH + 7) / 8) + +#define setpx(a,b,c) \ + do { \ + if (SIZEOF_PIXEL == 1) { \ + buf0[(a) + (b) * jstride] = av_clip_uint8(c); \ + } else { \ + ((uint16_t *)buf0)[(a) + (b) * jstride] = av_clip_uintp2(c, BIT_DEPTH); \ + } \ + } while (0) +#define setdx(a,b,c,d) setpx(a,b,c-(d)+(rnd()%((d)*2+1))) +#define setsx(a,b,c,d) setdx(a,b,c,(d) << (BIT_DEPTH - 8)) + +static void randomize_loopfilter_buffers(int bidx, int lineoff, int str, + int bit_depth, int dir, + const int *E, const int *F, + const int *H, const int *I, + uint8_t *buf0, uint8_t *buf1) +{ + uint32_t mask = (1 << BIT_DEPTH) - 1; + int off = dir ? lineoff : lineoff * 16; + int istride = dir ? 1 : 16; + int jstride = dir ? str : 1; + int i, j; + for (i = 0; i < 2; i++) /* flat16 */ { + int idx = off + i * istride, p0, q0; + setpx(idx, 0, q0 = rnd() & mask); + setsx(idx, -1, p0 = q0, E[bidx] >> 2); + for (j = 1; j < 8; j++) { + setsx(idx, -1 - j, p0, F[bidx]); + setsx(idx, j, q0, F[bidx]); + } + } + for (i = 2; i < 4; i++) /* flat8 */ { + int idx = off + i * istride, p0, q0; + setpx(idx, 0, q0 = rnd() & mask); + setsx(idx, -1, p0 = q0, E[bidx] >> 2); + for (j = 1; j < 4; j++) { + setsx(idx, -1 - j, p0, F[bidx]); + setsx(idx, j, q0, F[bidx]); + } + for (j = 4; j < 8; j++) { + setpx(idx, -1 - j, rnd() & mask); + setpx(idx, j, rnd() & mask); + } + } + for (i = 4; i < 6; i++) /* regular */ { + int idx = off + i * istride, p2, p1, p0, q0, q1, q2; + setpx(idx, 0, q0 = rnd() & mask); + setsx(idx, 1, q1 = q0, I[bidx]); + setsx(idx, 2, q2 = q1, I[bidx]); + setsx(idx, 3, q2, I[bidx]); + setsx(idx, -1, p0 = q0, E[bidx] >> 2); + setsx(idx, -2, p1 = p0, I[bidx]); + setsx(idx, -3, p2 = p1, I[bidx]); + setsx(idx, -4, p2, I[bidx]); + for (j = 4; j < 8; j++) { + setpx(idx, -1 - j, rnd() & mask); + setpx(idx, j, rnd() & mask); + } + } + for (i = 6; i < 8; i++) /* off */ { + int idx = off + i * istride; + for (j = 0; j < 8; j++) { + setpx(idx, -1 - j, rnd() & mask); + setpx(idx, j, rnd() & mask); + } + } +} + +#define randomize_buffers(bidx, lineoff, str) \ + randomize_loopfilter_buffers(bidx, lineoff, str, BIT_DEPTH, dir, \ + E, F, H, I, buf0, buf1) + +static void check_loopfilter(void) +{ + LOCAL_ALIGNED_32(uint8_t, base0, [32 + 16 * 16 * 2]); + LOCAL_ALIGNED_32(uint8_t, base1, [32 + 16 * 16 * 2]); + VP9DSPContext dsp; + int dir, wd, wd2; + static const char *const dir_name[2] = { "h", "v" }; + static const int E[2] = { 20, 28 }, I[2] = { 10, 16 }; + static const int H[2] = { 7, 11 }, F[2] = { 1, 1 }; + declare_func(void, uint8_t *dst, ptrdiff_t stride, int E, int I, int H); + + ff_vp9dsp_init(&dsp); + + for (dir = 0; dir < 2; dir++) { + uint8_t *buf0, *buf1; + int midoff = (dir ? 8 * 8 : 8) * SIZEOF_PIXEL; + int midoff_aligned = (dir ? 8 * 8 : 16) * SIZEOF_PIXEL; + + buf0 = base0 + midoff_aligned; + buf1 = base1 + midoff_aligned; + + for (wd = 0; wd < 3; wd++) { + // 4/8/16wd_8px + if (check_func(dsp.loop_filter_8[wd][dir], + "vp9_loop_filter_%s_%d_8", + dir_name[dir], 4 << wd)) { + randomize_buffers(0, 0, 8); + memcpy(buf1 - midoff, buf0 - midoff, + 16 * 8 * SIZEOF_PIXEL); + call_ref(buf0, 16 * SIZEOF_PIXEL >> dir, E[0], I[0], H[0]); + call_new(buf1, 16 * SIZEOF_PIXEL >> dir, E[0], I[0], H[0]); + if (memcmp(buf0 - midoff, buf1 - midoff, 16 * 8 * SIZEOF_PIXEL)) + fail(); + bench_new(buf1, 16 * SIZEOF_PIXEL >> dir, E[0], I[0], H[0]); + } + } + + midoff = (dir ? 16 * 8 : 8) * SIZEOF_PIXEL; + midoff_aligned = (dir ? 16 * 8 : 16) * SIZEOF_PIXEL; + + buf0 = base0 + midoff_aligned; + buf1 = base1 + midoff_aligned; + + // 16wd_16px loopfilter + if (check_func(dsp.loop_filter_16[dir], + "vp9_loop_filter_%s_16_16", + dir_name[dir])) { + randomize_buffers(0, 0, 16); + randomize_buffers(0, 8, 16); + memcpy(buf1 - midoff, buf0 - midoff, 16 * 16 * SIZEOF_PIXEL); + call_ref(buf0, 16 * SIZEOF_PIXEL, E[0], I[0], H[0]); + call_new(buf1, 16 * SIZEOF_PIXEL, E[0], I[0], H[0]); + if (memcmp(buf0 - midoff, buf1 - midoff, 16 * 16 * SIZEOF_PIXEL)) + fail(); + bench_new(buf1, 16 * SIZEOF_PIXEL, E[0], I[0], H[0]); + } + + for (wd = 0; wd < 2; wd++) { + for (wd2 = 0; wd2 < 2; wd2++) { + // mix2 loopfilter + if (check_func(dsp.loop_filter_mix2[wd][wd2][dir], + "vp9_loop_filter_mix2_%s_%d%d_16", + dir_name[dir], 4 << wd, 4 << wd2)) { + randomize_buffers(0, 0, 16); + randomize_buffers(1, 8, 16); + memcpy(buf1 - midoff, buf0 - midoff, 16 * 16 * SIZEOF_PIXEL); +#define M(a) ((a[1] << 8) | a[0]) + call_ref(buf0, 16 * SIZEOF_PIXEL, M(E), M(I), M(H)); + call_new(buf1, 16 * SIZEOF_PIXEL, M(E), M(I), M(H)); + if (memcmp(buf0 - midoff, buf1 - midoff, 16 * 16 * SIZEOF_PIXEL)) + fail(); + bench_new(buf1, 16 * SIZEOF_PIXEL, M(E), M(I), M(H)); +#undef M + } + } + } + } + report("loopfilter"); +} + +#undef setsx +#undef setpx +#undef setdx +#undef randomize_buffers + #define DST_BUF_SIZE (size * size * SIZEOF_PIXEL) #define SRC_BUF_STRIDE 72 #define SRC_BUF_SIZE ((size + 7) * SRC_BUF_STRIDE * SIZEOF_PIXEL) @@ -123,5 +281,6 @@ static void check_mc(void) void checkasm_check_vp9dsp(void) { + check_loopfilter(); check_mc(); } From a692724c587859c8157d1aefcae9fbdb23328c61 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20B=C5=93sch?= Date: Sun, 5 Jan 2014 21:00:40 +0100 Subject: [PATCH 03/23] vp9lpf/x86: add x86 SSSE3/AVX SIMD for vp9_loop_filter_[vh]_16_16. Signed-off-by: Anton Khirnov --- libavcodec/x86/Makefile | 3 +- libavcodec/x86/vp9dsp_init.c | 15 + libavcodec/x86/vp9lpf.asm | 601 +++++++++++++++++++++++++++++++++++ 3 files changed, 618 insertions(+), 1 deletion(-) create mode 100644 libavcodec/x86/vp9lpf.asm diff --git a/libavcodec/x86/Makefile b/libavcodec/x86/Makefile index 4aae594bd8..a38535b98f 100644 --- a/libavcodec/x86/Makefile +++ b/libavcodec/x86/Makefile @@ -125,4 +125,5 @@ YASM-OBJS-$(CONFIG_V210_ENCODER) += x86/v210enc.o YASM-OBJS-$(CONFIG_VORBIS_DECODER) += x86/vorbisdsp.o YASM-OBJS-$(CONFIG_VP3_DECODER) += x86/hpeldsp_vp3.o YASM-OBJS-$(CONFIG_VP6_DECODER) += x86/vp6dsp.o -YASM-OBJS-$(CONFIG_VP9_DECODER) += x86/vp9mc.o +YASM-OBJS-$(CONFIG_VP9_DECODER) += x86/vp9mc.o \ + x86/vp9lpf.o diff --git a/libavcodec/x86/vp9dsp_init.c b/libavcodec/x86/vp9dsp_init.c index 41fa35a4c3..93453b83e3 100644 --- a/libavcodec/x86/vp9dsp_init.c +++ b/libavcodec/x86/vp9dsp_init.c @@ -217,6 +217,11 @@ filters_8tap_1d_fn2(avg, 32, avx2, ssse3) #undef filters_8tap_1d_fn3 #undef filter_8tap_1d_fn +void ff_vp9_loop_filter_v_16_16_ssse3(uint8_t *dst, ptrdiff_t stride, int E, int I, int H); +void ff_vp9_loop_filter_v_16_16_avx (uint8_t *dst, ptrdiff_t stride, int E, int I, int H); +void ff_vp9_loop_filter_h_16_16_ssse3(uint8_t *dst, ptrdiff_t stride, int E, int I, int H); +void ff_vp9_loop_filter_h_16_16_avx (uint8_t *dst, ptrdiff_t stride, int E, int I, int H); + #endif /* HAVE_YASM */ av_cold void ff_vp9dsp_init_x86(VP9DSPContext *dsp) @@ -283,11 +288,21 @@ av_cold void ff_vp9dsp_init_x86(VP9DSPContext *dsp) if (EXTERNAL_SSSE3(cpu_flags)) { init_subpel3(0, put, ssse3); init_subpel3(1, avg, ssse3); + + if (ARCH_X86_64) { + dsp->loop_filter_16[0] = ff_vp9_loop_filter_h_16_16_ssse3; + dsp->loop_filter_16[1] = ff_vp9_loop_filter_v_16_16_ssse3; + } } if (EXTERNAL_AVX(cpu_flags)) { init_fpel(1, 0, 32, put, avx); init_fpel(0, 0, 64, put, avx); + + if (ARCH_X86_64) { + dsp->loop_filter_16[0] = ff_vp9_loop_filter_h_16_16_avx; + dsp->loop_filter_16[1] = ff_vp9_loop_filter_v_16_16_avx; + } } if (EXTERNAL_AVX2(cpu_flags)) { diff --git a/libavcodec/x86/vp9lpf.asm b/libavcodec/x86/vp9lpf.asm new file mode 100644 index 0000000000..8568f1d613 --- /dev/null +++ b/libavcodec/x86/vp9lpf.asm @@ -0,0 +1,601 @@ +;****************************************************************************** +;* VP9 loop filter SIMD optimizations +;* +;* Copyright (C) 2013-2014 Clément Bœsch +;* +;* This file is part of Libav. +;* +;* Libav is free software; you can redistribute it and/or +;* modify it under the terms of the GNU Lesser General Public +;* License as published by the Free Software Foundation; either +;* version 2.1 of the License, or (at your option) any later version. +;* +;* Libav is distributed in the hope that it will be useful, +;* but WITHOUT ANY WARRANTY; without even the implied warranty of +;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +;* Lesser General Public License for more details. +;* +;* You should have received a copy of the GNU Lesser General Public +;* License along with Libav; if not, write to the Free Software +;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA +;****************************************************************************** + + +%include "libavutil/x86/x86util.asm" + +%if ARCH_X86_64 + +SECTION_RODATA + +cextern pb_3 +cextern pb_80 + +pb_4: times 16 db 0x04 +pb_10: times 16 db 0x10 +pb_40: times 16 db 0x40 +pb_81: times 16 db 0x81 +pb_f8: times 16 db 0xf8 +pb_fe: times 16 db 0xfe + +pw_4: times 8 dw 4 +pw_8: times 8 dw 8 + +SECTION .text + +; %1 = abs(%2-%3) +%macro ABSSUB 4 ; dst, src1 (RO), src2 (RO), tmp + psubusb %1, %3, %2 + psubusb %4, %2, %3 + por %1, %4 +%endmacro + +; %1 = %1<=%2 +%macro CMP_LTE 3-4 ; src/dst, cmp, tmp, pb_80 +%if %0 == 4 + pxor %1, %4 +%endif + pcmpgtb %3, %2, %1 ; cmp > src? + pcmpeqb %1, %2 ; cmp == src? XXX: avoid this with a -1/+1 well placed? + por %1, %3 ; cmp >= src? +%endmacro + +; %1 = abs(%2-%3) <= %4 +%macro ABSSUB_CMP 6-7 [pb_80]; dst, src1, src2, cmp, tmp1, tmp2, [pb_80] + ABSSUB %1, %2, %3, %6 ; dst = abs(src1-src2) + CMP_LTE %1, %4, %6, %7 ; dst <= cmp +%endmacro + +%macro MASK_APPLY 4 ; %1=new_data/dst %2=old_data %3=mask %4=tmp + pand %1, %3 ; new &= mask + pandn %4, %3, %2 ; tmp = ~mask & old + por %1, %4 ; new&mask | old&~mask +%endmacro + +%macro FILTER_SUBx2_ADDx2 8 ; %1=dst %2=h/l %3=cache %4=sub1 %5=sub2 %6=add1 %7=add2 %8=rshift + punpck%2bw %3, %4, m0 + psubw %1, %3 + punpck%2bw %3, %5, m0 + psubw %1, %3 + punpck%2bw %3, %6, m0 + paddw %1, %3 + punpck%2bw %3, %7, m0 + paddw %3, %1 + psraw %1, %3, %8 +%endmacro + +%macro FILTER_INIT 8 ; tmp1, tmp2, cacheL, cacheH, dstp, filterid, mask, source + FILTER%6_INIT %1, l, %3 + FILTER%6_INIT %2, h, %4 + packuswb %1, %2 + MASK_APPLY %1, %8, %7, %2 + mova %5, %1 +%endmacro + +%macro FILTER_UPDATE 11-14 ; tmp1, tmp2, cacheL, cacheH, dstp, -, -, +, +, rshift, mask, [source], [preload reg + value] +%if %0 == 13 ; no source + preload + mova %12, %13 +%elif %0 == 14 ; source + preload + mova %13, %14 +%endif + FILTER_SUBx2_ADDx2 %1, l, %3, %6, %7, %8, %9, %10 + FILTER_SUBx2_ADDx2 %2, h, %4, %6, %7, %8, %9, %10 + packuswb %1, %2 +%if %0 == 12 || %0 == 14 + MASK_APPLY %1, %12, %11, %2 +%else + MASK_APPLY %1, %5, %11, %2 +%endif + mova %5, %1 +%endmacro + +%macro SRSHIFT3B_2X 4 ; reg1, reg2, [pb_10], tmp + mova %4, [pb_f8] + pand %1, %4 + pand %2, %4 + psrlq %1, 3 + psrlq %2, 3 + pxor %1, %3 + pxor %2, %3 + psubb %1, %3 + psubb %2, %3 +%endmacro + +%macro EXTRACT_POS_NEG 3 ; i8, neg, pos + pxor %3, %3 + pxor %2, %2 + pcmpgtb %3, %1 ; i8 < 0 mask + psubb %2, %1 ; neg values (only the originally - will be kept) + pand %2, %3 ; negative values of i8 (but stored as +) + pandn %3, %1 ; positive values of i8 +%endmacro + +; clip_u8(u8 + i8) +%macro SIGN_ADD 5 ; dst, u8, i8, tmp1, tmp2 + EXTRACT_POS_NEG %3, %4, %5 + psubusb %1, %2, %4 ; sub the negatives + paddusb %1, %5 ; add the positives +%endmacro + +; clip_u8(u8 - i8) +%macro SIGN_SUB 5 ; dst, u8, i8, tmp1, tmp2 + EXTRACT_POS_NEG %3, %4, %5 + psubusb %1, %2, %5 ; sub the positives + paddusb %1, %4 ; add the negatives +%endmacro + +%macro FILTER6_INIT 3 ; %1=dst %2=h/l %3=cache + punpck%2bw %1, m14, m0 ; p3: B->W + paddw %3, %1, %1 ; p3*2 + paddw %3, %1 ; p3*3 + punpck%2bw %1, m15, m0 ; p2: B->W + paddw %3, %1 ; p3*3 + p2 + paddw %3, %1 ; p3*3 + p2*2 + punpck%2bw %1, m10, m0 ; p1: B->W + paddw %3, %1 ; p3*3 + p2*2 + p1 + punpck%2bw %1, m11, m0 ; p0: B->W + paddw %3, %1 ; p3*3 + p2*2 + p1 + p0 + punpck%2bw %1, m12, m0 ; q0: B->W + paddw %3, %1 ; p3*3 + p2*2 + p1 + p0 + q0 + paddw %3, [pw_4] ; p3*3 + p2*2 + p1 + p0 + q0 + 4 + psraw %1, %3, 3 ; (p3*3 + p2*2 + p1 + p0 + q0 + 4) >> 3 +%endmacro + +%macro FILTER14_INIT 3 ; %1=dst %2=h/l %3=cache + punpck%2bw %1, m2, m0 ; p7: B->W + psllw %3, %1, 3 ; p7*8 + psubw %3, %1 ; p7*7 + punpck%2bw %1, m3, m0 ; p6: B->W + paddw %3, %1 ; p7*7 + p6 + paddw %3, %1 ; p7*7 + p6*2 + punpck%2bw %1, m8, m0 ; p5: B->W + paddw %3, %1 ; p7*7 + p6*2 + p5 + punpck%2bw %1, m9, m0 ; p4: B->W + paddw %3, %1 ; p7*7 + p6*2 + p5 + p4 + punpck%2bw %1, m14, m0 ; p3: B->W + paddw %3, %1 ; p7*7 + p6*2 + p5 + p4 + p3 + punpck%2bw %1, m15, m0 ; p2: B->W + paddw %3, %1 ; p7*7 + p6*2 + p5 + .. + p2 + punpck%2bw %1, m10, m0 ; p1: B->W + paddw %3, %1 ; p7*7 + p6*2 + p5 + .. + p1 + punpck%2bw %1, m11, m0 ; p0: B->W + paddw %3, %1 ; p7*7 + p6*2 + p5 + .. + p0 + punpck%2bw %1, m12, m0 ; q0: B->W + paddw %3, %1 ; p7*7 + p6*2 + p5 + .. + p0 + q0 + paddw %3, [pw_8] ; p7*7 + p6*2 + p5 + .. + p0 + q0 + 8 + psraw %1, %3, 4 ; (p7*7 + p6*2 + p5 + .. + p0 + q0 + 8) >> 4 +%endmacro + +%macro TRANSPOSE16x16B 17 + mova %17, m%16 + SBUTTERFLY bw, %1, %2, %16 + SBUTTERFLY bw, %3, %4, %16 + SBUTTERFLY bw, %5, %6, %16 + SBUTTERFLY bw, %7, %8, %16 + SBUTTERFLY bw, %9, %10, %16 + SBUTTERFLY bw, %11, %12, %16 + SBUTTERFLY bw, %13, %14, %16 + mova m%16, %17 + mova %17, m%14 + SBUTTERFLY bw, %15, %16, %14 + SBUTTERFLY wd, %1, %3, %14 + SBUTTERFLY wd, %2, %4, %14 + SBUTTERFLY wd, %5, %7, %14 + SBUTTERFLY wd, %6, %8, %14 + SBUTTERFLY wd, %9, %11, %14 + SBUTTERFLY wd, %10, %12, %14 + SBUTTERFLY wd, %13, %15, %14 + mova m%14, %17 + mova %17, m%12 + SBUTTERFLY wd, %14, %16, %12 + SBUTTERFLY dq, %1, %5, %12 + SBUTTERFLY dq, %2, %6, %12 + SBUTTERFLY dq, %3, %7, %12 + SBUTTERFLY dq, %4, %8, %12 + SBUTTERFLY dq, %9, %13, %12 + SBUTTERFLY dq, %10, %14, %12 + SBUTTERFLY dq, %11, %15, %12 + mova m%12, %17 + mova %17, m%8 + SBUTTERFLY dq, %12, %16, %8 + SBUTTERFLY qdq, %1, %9, %8 + SBUTTERFLY qdq, %2, %10, %8 + SBUTTERFLY qdq, %3, %11, %8 + SBUTTERFLY qdq, %4, %12, %8 + SBUTTERFLY qdq, %5, %13, %8 + SBUTTERFLY qdq, %6, %14, %8 + SBUTTERFLY qdq, %7, %15, %8 + mova m%8, %17 + mova %17, m%1 + SBUTTERFLY qdq, %8, %16, %1 + mova m%1, %17 + SWAP %2, %9 + SWAP %3, %5 + SWAP %4, %13 + SWAP %6, %11 + SWAP %8, %15 + SWAP %12, %14 +%endmacro + +%macro DEFINE_REAL_P7_TO_Q7 0 +%define P7 dst1q + 2*mstrideq +%define P6 dst1q + mstrideq +%define P5 dst1q +%define P4 dst1q + strideq +%define P3 dstq + 4*mstrideq +%define P2 dstq + mstride3q +%define P1 dstq + 2*mstrideq +%define P0 dstq + mstrideq +%define Q0 dstq +%define Q1 dstq + strideq +%define Q2 dstq + 2*strideq +%define Q3 dstq + stride3q +%define Q4 dstq + 4*strideq +%define Q5 dst2q + mstrideq +%define Q6 dst2q +%define Q7 dst2q + strideq +%endmacro + +%macro LPF_16_16 1 + lea mstrideq, [strideq] + neg mstrideq + + lea stride3q, [strideq+2*strideq] + mov mstride3q, stride3q + neg mstride3q + +%ifidn %1, h + lea dstq, [dstq + 8*strideq - 8] ; go from top center (h pos) to center left (v pos) +%endif + + lea dst1q, [dstq + 2*mstride3q] ; dst1q = &dst[stride * -6] + lea dst2q, [dstq + 2* stride3q] ; dst2q = &dst[stride * +6] + + DEFINE_REAL_P7_TO_Q7 + +%ifidn %1, h + movu m0, [P7] + movu m1, [P6] + movu m2, [P5] + movu m3, [P4] + movu m4, [P3] + movu m5, [P2] + movu m6, [P1] + movu m7, [P0] + movu m8, [Q0] + movu m9, [Q1] + movu m10, [Q2] + movu m11, [Q3] + movu m12, [Q4] + movu m13, [Q5] + movu m14, [Q6] + movu m15, [Q7] + TRANSPOSE16x16B 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, [rsp] + +%define P7 rsp + 0 +%define P6 rsp + 16 +%define P5 rsp + 32 +%define P4 rsp + 48 +%define P3 rsp + 64 +%define P2 rsp + 80 +%define P1 rsp + 96 +%define P0 rsp + 112 +%define Q0 rsp + 128 +%define Q1 rsp + 144 +%define Q2 rsp + 160 +%define Q3 rsp + 176 +%define Q4 rsp + 192 +%define Q5 rsp + 208 +%define Q6 rsp + 224 +%define Q7 rsp + 240 + + mova [P7], m0 + mova [P6], m1 + mova [P5], m2 + mova [P4], m3 + mova [P3], m4 + mova [P2], m5 + mova [P1], m6 + mova [P0], m7 + mova [Q0], m8 + mova [Q1], m9 + mova [Q2], m10 + mova [Q3], m11 + mova [Q4], m12 + mova [Q5], m13 + mova [Q6], m14 + mova [Q7], m15 +%endif + + ; calc fm mask + pxor m0, m0 + movd m2, Id + movd m3, Ed + pshufb m2, m0 ; I I I I ... + pshufb m3, m0 ; E E E E ... + mova m0, [pb_80] + pxor m2, m0 + pxor m3, m0 +%ifidn %1, v + mova m8, [P3] + mova m9, [P2] + mova m10, [P1] + mova m11, [P0] + mova m12, [Q0] + mova m13, [Q1] + mova m14, [Q2] + mova m15, [Q3] +%else + ; In case of horizontal, P3..Q3 are already present in some registers due + ; to the previous transpose, so we just swap registers. + SWAP 8, 4, 12 + SWAP 9, 5, 13 + SWAP 10, 6, 14 + SWAP 11, 7, 15 +%endif + ABSSUB_CMP m5, m8, m9, m2, m6, m7, m0 ; m5 = abs(p3-p2) <= I + ABSSUB_CMP m1, m9, m10, m2, m6, m7, m0 ; m1 = abs(p2-p1) <= I + pand m5, m1 + ABSSUB_CMP m1, m10, m11, m2, m6, m7, m0 ; m1 = abs(p1-p0) <= I + pand m5, m1 + ABSSUB_CMP m1, m12, m13, m2, m6, m7, m0 ; m1 = abs(q1-q0) <= I + pand m5, m1 + ABSSUB_CMP m1, m13, m14, m2, m6, m7, m0 ; m1 = abs(q2-q1) <= I + pand m5, m1 + ABSSUB_CMP m1, m14, m15, m2, m6, m7, m0 ; m1 = abs(q3-q2) <= I + pand m5, m1 + ABSSUB m1, m11, m12, m7 ; abs(p0-q0) + paddusb m1, m1 ; abs(p0-q0) * 2 + ABSSUB m2, m10, m13, m7 ; abs(p1-q1) + pand m2, [pb_fe] ; drop lsb so shift can work + psrlq m2, 1 ; abs(p1-q1)/2 + paddusb m1, m2 ; abs(p0-q0)*2 + abs(p1-q1)/2 + pxor m1, m0 + pcmpgtb m4, m3, m1 ; E > X? + pcmpeqb m3, m1 ; E == X? + por m3, m4 ; E >= X? + pand m3, m5 ; fm final value + + ; (m3: fm, m8..15: p3 p2 p1 p0 q0 q1 q2 q3) + ; calc flat8in and hev masks + mova m6, [pb_81] ; [1 1 1 1 ...] ^ 0x80 + ABSSUB_CMP m2, m8, m11, m6, m4, m5 ; abs(p3 - p0) <= 1 + mova m8, [pb_80] + ABSSUB_CMP m1, m9, m11, m6, m4, m5, m8 ; abs(p2 - p0) <= 1 + pand m2, m1 + ABSSUB m4, m10, m11, m5 ; abs(p1 - p0) + pxor m0, m0 + movd m7, Hd + pshufb m7, m0 ; H H H H ... + pxor m7, m8 + pxor m4, m8 + pcmpgtb m0, m4, m7 ; abs(p1 - p0) > H (1/2 hev condition) + CMP_LTE m4, m6, m5 ; abs(p1 - p0) <= 1 + pand m2, m4 ; (flat8in) + ABSSUB m4, m13, m12, m1 ; abs(q1 - q0) + pxor m4, m8 + pcmpgtb m5, m4, m7 ; abs(q1 - q0) > H (2/2 hev condition) + por m0, m5 ; hev final value + CMP_LTE m4, m6, m5 ; abs(q1 - q0) <= 1 + pand m2, m4 ; (flat8in) + ABSSUB_CMP m1, m14, m12, m6, m4, m5, m8 ; abs(q2 - q0) <= 1 + pand m2, m1 + ABSSUB_CMP m1, m15, m12, m6, m4, m5, m8 ; abs(q3 - q0) <= 1 + pand m2, m1 ; flat8in final value + + ; (m0: hev, m2: flat8in, m3: fm, m6: pb_81, m9..15: p2 p1 p0 q0 q1 q2 q3) + ; calc flat8out mask + mova m8, [P7] + mova m9, [P6] + ABSSUB_CMP m1, m8, m11, m6, m4, m5 ; abs(p7 - p0) <= 1 + ABSSUB_CMP m7, m9, m11, m6, m4, m5 ; abs(p6 - p0) <= 1 + pand m1, m7 + mova m8, [P5] + mova m9, [P4] + ABSSUB_CMP m7, m8, m11, m6, m4, m5 ; abs(p5 - p0) <= 1 + pand m1, m7 + ABSSUB_CMP m7, m9, m11, m6, m4, m5 ; abs(p4 - p0) <= 1 + pand m1, m7 + mova m14, [Q4] + mova m15, [Q5] + ABSSUB_CMP m7, m14, m12, m6, m4, m5 ; abs(q4 - q0) <= 1 + pand m1, m7 + ABSSUB_CMP m7, m15, m12, m6, m4, m5 ; abs(q5 - q0) <= 1 + pand m1, m7 + mova m14, [Q6] + mova m15, [Q7] + ABSSUB_CMP m7, m14, m12, m6, m4, m5 ; abs(q4 - q0) <= 1 + pand m1, m7 + ABSSUB_CMP m7, m15, m12, m6, m4, m5 ; abs(q5 - q0) <= 1 + pand m1, m7 ; flat8out final value + + ; if (fm) { + ; if (out && in) filter_14() + ; else if (in) filter_6() + ; else if (hev) filter_2() + ; else filter_4() + ; } + ; + ; f14: fm & out & in + ; f6: fm & ~f14 & in => fm & ~(out & in) & in => fm & ~out & in + ; f2: fm & ~f14 & ~f6 & hev => fm & ~(out & in) & ~(~out & in) & hev => fm & ~in & hev + ; f4: fm & ~f14 & ~f6 & ~f2 => fm & ~(out & in) & ~(~out & in) & ~(~in & hev) => fm & ~in & ~hev + + ; (m0: hev, m1: flat8out, m2: flat8in, m3: fm, m8..15: p5 p4 p1 p0 q0 q1 q6 q7) + ; filter2() + mova m6, [pb_80] + pxor m15, m12, m6 ; q0 ^ 0x80 + pxor m14, m11, m6 ; p0 ^ 0x80 + psubsb m15, m14 ; (signed) q0 - p0 + pxor m4, m10, m6 ; p1 ^ 0x80 + pxor m5, m13, m6 ; q1 ^ 0x80 + psubsb m4, m5 ; (signed) p1 - q1 + paddsb m4, m15 ; (q0 - p0) + (p1 - q1) + paddsb m4, m15 ; 2*(q0 - p0) + (p1 - q1) + paddsb m4, m15 ; 3*(q0 - p0) + (p1 - q1) + paddsb m6, m4, [pb_4] ; m6: f1 = clip(f + 4, 127) + paddsb m4, [pb_3] ; m4: f2 = clip(f + 3, 127) + mova m14, [pb_10] ; will be reused in filter4() + SRSHIFT3B_2X m6, m4, m14, m7 ; f1 and f2 sign byte shift by 3 + SIGN_SUB m7, m12, m6, m5, m9 ; m7 = q0 - f1 + SIGN_ADD m8, m11, m4, m5, m9 ; m8 = p0 + f2 + pandn m6, m2, m3 ; ~mask(in) & mask(fm) + pand m6, m0 ; (~mask(in) & mask(fm)) & mask(hev) + MASK_APPLY m7, m12, m6, m5 ; m7 = filter2(q0) & mask / we write it in filter4() + MASK_APPLY m8, m11, m6, m5 ; m8 = filter2(p0) & mask / we write it in filter4() + + ; (m0: hev, m1: flat8out, m2: flat8in, m3: fm, m7..m8: q0' p0', m10..13: p1 p0 q0 q1, m14: pb_10, m15: q0-p0) + ; filter4() + mova m4, m15 + paddsb m15, m4 ; 2 * (q0 - p0) + paddsb m15, m4 ; 3 * (q0 - p0) + paddsb m6, m15, [pb_4] ; m6: f1 = clip(f + 4, 127) + paddsb m15, [pb_3] ; m15: f2 = clip(f + 3, 127) + SRSHIFT3B_2X m6, m15, m14, m9 ; f1 and f2 sign byte shift by 3 + pandn m5, m2, m3 ; ~mask(in) & mask(fm) + pandn m0, m5 ; ~mask(hev) & (~mask(in) & mask(fm)) + SIGN_SUB m9, m12, m6, m4, m14 ; q0 - f1 + MASK_APPLY m9, m7, m0, m5 ; m9 = filter4(q0) & mask + mova [Q0], m9 + SIGN_ADD m7, m11, m15, m4, m14 ; p0 + f2 + MASK_APPLY m7, m8, m0, m5 ; m7 = filter4(p0) & mask + mova [P0], m7 + paddb m6, [pb_80] ; + pxor m8, m8 ; f=(f1+1)>>1 + pavgb m6, m8 ; + psubb m6, [pb_40] ; + SIGN_ADD m7, m10, m6, m8, m9 ; p1 + f + SIGN_SUB m4, m13, m6, m8, m9 ; q1 - f + MASK_APPLY m7, m10, m0, m14 ; m7 = filter4(p1) + MASK_APPLY m4, m13, m0, m14 ; m4 = filter4(q1) + mova [P1], m7 + mova [Q1], m4 + + ; (m1: flat8out, m2: flat8in, m3: fm, m10..13: p1 p0 q0 q1) + ; filter6() + pxor m0, m0 + pand m2, m3 ; mask(fm) & mask(in) + pandn m3, m1, m2 ; ~mask(out) & (mask(fm) & mask(in)) + mova m14, [P3] + mova m15, [P2] + mova m8, [Q2] + mova m9, [Q3] + FILTER_INIT m4, m5, m6, m7, [P2], 6, m3, m15 ; [p2] + FILTER_UPDATE m6, m7, m4, m5, [P1], m14, m15, m10, m13, 3, m3 ; [p1] -p3 -p2 +p1 +q1 + FILTER_UPDATE m4, m5, m6, m7, [P0], m14, m10, m11, m8, 3, m3 ; [p0] -p3 -p1 +p0 +q2 + FILTER_UPDATE m6, m7, m4, m5, [Q0], m14, m11, m12, m9, 3, m3 ; [q0] -p3 -p0 +q0 +q3 + FILTER_UPDATE m4, m5, m6, m7, [Q1], m15, m12, m13, m9, 3, m3 ; [q1] -p2 -q0 +q1 +q3 + FILTER_UPDATE m6, m7, m4, m5, [Q2], m10, m13, m8, m9, 3, m3, m8 ; [q2] -p1 -q1 +q2 +q3 + + ; (m0: 0, m1: flat8out, m2: fm & flat8in, m8..15: q2 q3 p1 p0 q0 q1 p3 p2) + ; filter14() + ; + ; m2 m3 m8 m9 m14 m15 m10 m11 m12 m13 + ; + ; q2 q3 p3 p2 p1 p0 q0 q1 + ; p6 -7 p7 p6 p5 p4 . . . . . + ; p5 -6 -p7 -p6 +p5 +q1 . . . . + ; p4 -5 -p7 -p5 +p4 +q2 . . . q2 + ; p3 -4 -p7 -p4 +p3 +q3 . . . q3 + ; p2 -3 -p7 -p3 +p2 +q4 . . . q4 + ; p1 -2 -p7 -p2 +p1 +q5 . . . q5 + ; p0 -1 -p7 -p1 +p0 +q6 . . . q6 + ; q0 +0 -p7 -p0 +q0 +q7 . . . q7 + ; q1 +1 -p6 -q0 +q1 +q7 q1 . . . + ; q2 +2 -p5 -q1 +q2 +q7 . q2 . . + ; q3 +3 -p4 -q2 +q3 +q7 . q3 . . + ; q4 +4 -p3 -q3 +q4 +q7 . q4 . . + ; q5 +5 -p2 -q4 +q5 +q7 . q5 . . + ; q6 +6 -p1 -q5 +q6 +q7 . q6 . . + + pand m1, m2 ; mask(out) & (mask(fm) & mask(in)) + mova m2, [P7] + mova m3, [P6] + mova m8, [P5] + mova m9, [P4] + FILTER_INIT m4, m5, m6, m7, [P6], 14, m1, m3 + FILTER_UPDATE m6, m7, m4, m5, [P5], m2, m3, m8, m13, 4, m1, m8 ; [p5] -p7 -p6 +p5 +q1 + FILTER_UPDATE m4, m5, m6, m7, [P4], m2, m8, m9, m13, 4, m1, m9, m13, [Q2] ; [p4] -p7 -p5 +p4 +q2 + FILTER_UPDATE m6, m7, m4, m5, [P3], m2, m9, m14, m13, 4, m1, m14, m13, [Q3] ; [p3] -p7 -p4 +p3 +q3 + FILTER_UPDATE m4, m5, m6, m7, [P2], m2, m14, m15, m13, 4, m1, m13, [Q4] ; [p2] -p7 -p3 +p2 +q4 + FILTER_UPDATE m6, m7, m4, m5, [P1], m2, m15, m10, m13, 4, m1, m13, [Q5] ; [p1] -p7 -p2 +p1 +q5 + FILTER_UPDATE m4, m5, m6, m7, [P0], m2, m10, m11, m13, 4, m1, m13, [Q6] ; [p0] -p7 -p1 +p0 +q6 + FILTER_UPDATE m6, m7, m4, m5, [Q0], m2, m11, m12, m13, 4, m1, m13, [Q7] ; [q0] -p7 -p0 +q0 +q7 + FILTER_UPDATE m4, m5, m6, m7, [Q1], m3, m12, m2, m13, 4, m1, m2, [Q1] ; [q1] -p6 -q0 +q1 +q7 + FILTER_UPDATE m6, m7, m4, m5, [Q2], m8, m2, m3, m13, 4, m1, m3, [Q2] ; [q2] -p5 -q1 +q2 +q7 + FILTER_UPDATE m4, m5, m6, m7, [Q3], m9, m3, m8, m13, 4, m1, m8, m8, [Q3] ; [q3] -p4 -q2 +q3 +q7 + FILTER_UPDATE m6, m7, m4, m5, [Q4], m14, m8, m9, m13, 4, m1, m9, m9, [Q4] ; [q4] -p3 -q3 +q4 +q7 + FILTER_UPDATE m4, m5, m6, m7, [Q5], m15, m9, m14, m13, 4, m1, m14, m14, [Q5] ; [q5] -p2 -q4 +q5 +q7 + FILTER_UPDATE m6, m7, m4, m5, [Q6], m10, m14, m15, m13, 4, m1, m15, m15, [Q6] ; [q6] -p1 -q5 +q6 +q7 + +%ifidn %1, h + mova m0, [P7] + mova m1, [P6] + mova m2, [P5] + mova m3, [P4] + mova m4, [P3] + mova m5, [P2] + mova m6, [P1] + mova m7, [P0] + mova m8, [Q0] + mova m9, [Q1] + mova m10, [Q2] + mova m11, [Q3] + mova m12, [Q4] + mova m13, [Q5] + mova m14, [Q6] + mova m15, [Q7] + TRANSPOSE16x16B 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, [rsp] + DEFINE_REAL_P7_TO_Q7 + movu [P7], m0 + movu [P6], m1 + movu [P5], m2 + movu [P4], m3 + movu [P3], m4 + movu [P2], m5 + movu [P1], m6 + movu [P0], m7 + movu [Q0], m8 + movu [Q1], m9 + movu [Q2], m10 + movu [Q3], m11 + movu [Q4], m12 + movu [Q5], m13 + movu [Q6], m14 + movu [Q7], m15 +%endif +%endmacro + +%macro LPF_16_16_VH 1 +INIT_XMM %1 +cglobal vp9_loop_filter_v_16_16, 5,10,16, dst, stride, E, I, H, mstride, dst1, dst2, stride3, mstride3 + LPF_16_16 v + RET +cglobal vp9_loop_filter_h_16_16, 5,10,16, 256, dst, stride, E, I, H, mstride, dst1, dst2, stride3, mstride3 + LPF_16_16 h + RET +%endmacro + +LPF_16_16_VH ssse3 +LPF_16_16_VH avx + +%endif ; x86-64 From 1f451eed606b680751e429660cc0945c60d0430c Mon Sep 17 00:00:00 2001 From: James Almer Date: Fri, 17 Jan 2014 03:55:44 -0300 Subject: [PATCH 04/23] vp9lpf/x86: add ff_vp9_loop_filter_[vh]_16_16_sse2(). Similar gains in performance as the SSSE3 version Signed-off-by: James Almer Signed-off-by: Anton Khirnov --- libavcodec/x86/vp9dsp_init.c | 19 +++++++++++++++---- libavcodec/x86/vp9lpf.asm | 14 ++++++++------ 2 files changed, 23 insertions(+), 10 deletions(-) diff --git a/libavcodec/x86/vp9dsp_init.c b/libavcodec/x86/vp9dsp_init.c index 93453b83e3..139603c82f 100644 --- a/libavcodec/x86/vp9dsp_init.c +++ b/libavcodec/x86/vp9dsp_init.c @@ -217,10 +217,17 @@ filters_8tap_1d_fn2(avg, 32, avx2, ssse3) #undef filters_8tap_1d_fn3 #undef filter_8tap_1d_fn -void ff_vp9_loop_filter_v_16_16_ssse3(uint8_t *dst, ptrdiff_t stride, int E, int I, int H); -void ff_vp9_loop_filter_v_16_16_avx (uint8_t *dst, ptrdiff_t stride, int E, int I, int H); -void ff_vp9_loop_filter_h_16_16_ssse3(uint8_t *dst, ptrdiff_t stride, int E, int I, int H); -void ff_vp9_loop_filter_h_16_16_avx (uint8_t *dst, ptrdiff_t stride, int E, int I, int H); +#define lpf_funcs(size1, size2, opt) \ +void ff_vp9_loop_filter_v_##size1##_##size2##_##opt(uint8_t *dst, ptrdiff_t stride, \ + int E, int I, int H); \ +void ff_vp9_loop_filter_h_##size1##_##size2##_##opt(uint8_t *dst, ptrdiff_t stride, \ + int E, int I, int H) + +lpf_funcs(16, 16, sse2); +lpf_funcs(16, 16, ssse3); +lpf_funcs(16, 16, avx); + +#undef lpf_funcs #endif /* HAVE_YASM */ @@ -283,6 +290,10 @@ av_cold void ff_vp9dsp_init_x86(VP9DSPContext *dsp) init_fpel(2, 1, 16, avg, sse2); init_fpel(1, 1, 32, avg, sse2); init_fpel(0, 1, 64, avg, sse2); + if (ARCH_X86_64) { + dsp->loop_filter_16[0] = ff_vp9_loop_filter_h_16_16_sse2; + dsp->loop_filter_16[1] = ff_vp9_loop_filter_v_16_16_sse2; + } } if (EXTERNAL_SSSE3(cpu_flags)) { diff --git a/libavcodec/x86/vp9lpf.asm b/libavcodec/x86/vp9lpf.asm index 8568f1d613..d4c70f5b9c 100644 --- a/libavcodec/x86/vp9lpf.asm +++ b/libavcodec/x86/vp9lpf.asm @@ -327,11 +327,11 @@ SECTION .text %endif ; calc fm mask +%if cpuflag(ssse3) pxor m0, m0 - movd m2, Id - movd m3, Ed - pshufb m2, m0 ; I I I I ... - pshufb m3, m0 ; E E E E ... +%endif + SPLATB_REG m2, I, m0 ; I I I I ... + SPLATB_REG m3, E, m0 ; E E E E ... mova m0, [pb_80] pxor m2, m0 pxor m3, m0 @@ -383,9 +383,10 @@ SECTION .text ABSSUB_CMP m1, m9, m11, m6, m4, m5, m8 ; abs(p2 - p0) <= 1 pand m2, m1 ABSSUB m4, m10, m11, m5 ; abs(p1 - p0) +%if cpuflag(ssse3) pxor m0, m0 - movd m7, Hd - pshufb m7, m0 ; H H H H ... +%endif + SPLATB_REG m7, H, m0 ; H H H H ... pxor m7, m8 pxor m4, m8 pcmpgtb m0, m4, m7 ; abs(p1 - p0) > H (1/2 hev condition) @@ -595,6 +596,7 @@ cglobal vp9_loop_filter_h_16_16, 5,10,16, 256, dst, stride, E, I, H, mstride, ds RET %endmacro +LPF_16_16_VH sse2 LPF_16_16_VH ssse3 LPF_16_16_VH avx From 6bea478158910b1a5cac4f3fd739cad8ec5740ed Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20B=C5=93sch?= Date: Sat, 25 Jan 2014 17:38:42 +0100 Subject: [PATCH 05/23] vp9lpf/x86: add ff_vp9_loop_filter_[vh]_88_16_{ssse3,avx}. Signed-off-by: Anton Khirnov --- libavcodec/x86/vp9dsp_init.c | 6 ++ libavcodec/x86/vp9lpf.asm | 184 ++++++++++++++++++++++++++++++----- 2 files changed, 163 insertions(+), 27 deletions(-) diff --git a/libavcodec/x86/vp9dsp_init.c b/libavcodec/x86/vp9dsp_init.c index 139603c82f..00a57984fb 100644 --- a/libavcodec/x86/vp9dsp_init.c +++ b/libavcodec/x86/vp9dsp_init.c @@ -226,6 +226,8 @@ void ff_vp9_loop_filter_h_##size1##_##size2##_##opt(uint8_t *dst, ptrdiff_t stri lpf_funcs(16, 16, sse2); lpf_funcs(16, 16, ssse3); lpf_funcs(16, 16, avx); +lpf_funcs(88, 16, ssse3); +lpf_funcs(88, 16, avx); #undef lpf_funcs @@ -301,6 +303,8 @@ av_cold void ff_vp9dsp_init_x86(VP9DSPContext *dsp) init_subpel3(1, avg, ssse3); if (ARCH_X86_64) { + dsp->loop_filter_mix2[1][1][0] = ff_vp9_loop_filter_h_88_16_ssse3; + dsp->loop_filter_mix2[1][1][1] = ff_vp9_loop_filter_v_88_16_ssse3; dsp->loop_filter_16[0] = ff_vp9_loop_filter_h_16_16_ssse3; dsp->loop_filter_16[1] = ff_vp9_loop_filter_v_16_16_ssse3; } @@ -311,6 +315,8 @@ av_cold void ff_vp9dsp_init_x86(VP9DSPContext *dsp) init_fpel(0, 0, 64, put, avx); if (ARCH_X86_64) { + dsp->loop_filter_mix2[1][1][0] = ff_vp9_loop_filter_h_88_16_avx; + dsp->loop_filter_mix2[1][1][1] = ff_vp9_loop_filter_v_88_16_avx; dsp->loop_filter_16[0] = ff_vp9_loop_filter_h_16_16_avx; dsp->loop_filter_16[1] = ff_vp9_loop_filter_v_16_16_avx; } diff --git a/libavcodec/x86/vp9lpf.asm b/libavcodec/x86/vp9lpf.asm index d4c70f5b9c..183f3f6874 100644 --- a/libavcodec/x86/vp9lpf.asm +++ b/libavcodec/x86/vp9lpf.asm @@ -40,6 +40,11 @@ pb_fe: times 16 db 0xfe pw_4: times 8 dw 4 pw_8: times 8 dw 8 +; with mix functions, two 8-bit thresholds are stored in a 16-bit storage, +; the following mask is used to splat both in the same register +mask_mix: times 8 db 0 + times 8 db 1 + SECTION .text ; %1 = abs(%2-%3) @@ -236,6 +241,38 @@ SECTION .text SWAP %12, %14 %endmacro +; transpose 16 half lines (high part) to 8 full centered lines +%macro TRANSPOSE16x8B 16 + punpcklbw m%1, m%2 + punpcklbw m%3, m%4 + punpcklbw m%5, m%6 + punpcklbw m%7, m%8 + punpcklbw m%9, m%10 + punpcklbw m%11, m%12 + punpcklbw m%13, m%14 + punpcklbw m%15, m%16 + SBUTTERFLY wd, %1, %3, %2 + SBUTTERFLY wd, %5, %7, %2 + SBUTTERFLY wd, %9, %11, %2 + SBUTTERFLY wd, %13, %15, %2 + SBUTTERFLY dq, %1, %5, %2 + SBUTTERFLY dq, %3, %7, %2 + SBUTTERFLY dq, %9, %13, %2 + SBUTTERFLY dq, %11, %15, %2 + SBUTTERFLY qdq, %1, %9, %2 + SBUTTERFLY qdq, %3, %11, %2 + SBUTTERFLY qdq, %5, %13, %2 + SBUTTERFLY qdq, %7, %15, %2 + SWAP %5, %1 + SWAP %6, %9 + SWAP %7, %1 + SWAP %8, %13 + SWAP %9, %3 + SWAP %10, %11 + SWAP %11, %1 + SWAP %12, %15 +%endmacro + %macro DEFINE_REAL_P7_TO_Q7 0 %define P7 dst1q + 2*mstrideq %define P6 dst1q + mstrideq @@ -255,7 +292,7 @@ SECTION .text %define Q7 dst2q + strideq %endmacro -%macro LPF_16_16 1 +%macro LOOPFILTER 2 ; %1=v/h %2=size1 lea mstrideq, [strideq] neg mstrideq @@ -264,7 +301,13 @@ SECTION .text neg mstride3q %ifidn %1, h +%if %2 == 88 +%define movx movh + lea dstq, [dstq + 8*strideq - 4] +%else +%define movx movu lea dstq, [dstq + 8*strideq - 8] ; go from top center (h pos) to center left (v pos) +%endif %endif lea dst1q, [dstq + 2*mstride3q] ; dst1q = &dst[stride * -6] @@ -273,24 +316,22 @@ SECTION .text DEFINE_REAL_P7_TO_Q7 %ifidn %1, h - movu m0, [P7] - movu m1, [P6] - movu m2, [P5] - movu m3, [P4] - movu m4, [P3] - movu m5, [P2] - movu m6, [P1] - movu m7, [P0] - movu m8, [Q0] - movu m9, [Q1] - movu m10, [Q2] - movu m11, [Q3] - movu m12, [Q4] - movu m13, [Q5] - movu m14, [Q6] - movu m15, [Q7] - TRANSPOSE16x16B 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, [rsp] - + movx m0, [P7] + movx m1, [P6] + movx m2, [P5] + movx m3, [P4] + movx m4, [P3] + movx m5, [P2] + movx m6, [P1] + movx m7, [P0] + movx m8, [Q0] + movx m9, [Q1] + movx m10, [Q2] + movx m11, [Q3] + movx m12, [Q4] + movx m13, [Q5] + movx m14, [Q6] + movx m15, [Q7] %define P7 rsp + 0 %define P6 rsp + 16 %define P5 rsp + 32 @@ -308,10 +349,15 @@ SECTION .text %define Q6 rsp + 224 %define Q7 rsp + 240 +%if %2 != 88 + TRANSPOSE16x16B 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, [rsp] mova [P7], m0 mova [P6], m1 mova [P5], m2 mova [P4], m3 +%else + TRANSPOSE16x8B 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 +%endif mova [P3], m4 mova [P2], m5 mova [P1], m6 @@ -320,18 +366,28 @@ SECTION .text mova [Q1], m9 mova [Q2], m10 mova [Q3], m11 +%if %2 != 88 mova [Q4], m12 mova [Q5], m13 mova [Q6], m14 mova [Q7], m15 +%endif %endif ; calc fm mask +%if %2 == 16 %if cpuflag(ssse3) pxor m0, m0 %endif SPLATB_REG m2, I, m0 ; I I I I ... SPLATB_REG m3, E, m0 ; E E E E ... +%elif %2 == 88 + mova m0, [mask_mix] + movd m2, Id + movd m3, Ed + pshufb m2, m0 + pshufb m3, m0 +%endif mova m0, [pb_80] pxor m2, m0 pxor m3, m0 @@ -383,10 +439,15 @@ SECTION .text ABSSUB_CMP m1, m9, m11, m6, m4, m5, m8 ; abs(p2 - p0) <= 1 pand m2, m1 ABSSUB m4, m10, m11, m5 ; abs(p1 - p0) +%if %2 != 88 %if cpuflag(ssse3) pxor m0, m0 %endif SPLATB_REG m7, H, m0 ; H H H H ... +%else + movd m7, Hd + pshufb m7, [mask_mix] +%endif pxor m7, m8 pxor m4, m8 pcmpgtb m0, m4, m7 ; abs(p1 - p0) > H (1/2 hev condition) @@ -403,6 +464,7 @@ SECTION .text ABSSUB_CMP m1, m15, m12, m6, m4, m5, m8 ; abs(q3 - q0) <= 1 pand m2, m1 ; flat8in final value +%if %2 != 88 ; (m0: hev, m2: flat8in, m3: fm, m6: pb_81, m9..15: p2 p1 p0 q0 q1 q2 q3) ; calc flat8out mask mova m8, [P7] @@ -428,6 +490,7 @@ SECTION .text pand m1, m7 ABSSUB_CMP m7, m15, m12, m6, m4, m5 ; abs(q5 - q0) <= 1 pand m1, m7 ; flat8out final value +%endif ; if (fm) { ; if (out && in) filter_14() @@ -441,7 +504,7 @@ SECTION .text ; f2: fm & ~f14 & ~f6 & hev => fm & ~(out & in) & ~(~out & in) & hev => fm & ~in & hev ; f4: fm & ~f14 & ~f6 & ~f2 => fm & ~(out & in) & ~(~out & in) & ~(~in & hev) => fm & ~in & ~hev - ; (m0: hev, m1: flat8out, m2: flat8in, m3: fm, m8..15: p5 p4 p1 p0 q0 q1 q6 q7) + ; (m0: hev, [m1: flat8out], m2: flat8in, m3: fm, m8..15: p5 p4 p1 p0 q0 q1 q6 q7) ; filter2() mova m6, [pb_80] pxor m15, m12, m6 ; q0 ^ 0x80 @@ -464,7 +527,7 @@ SECTION .text MASK_APPLY m7, m12, m6, m5 ; m7 = filter2(q0) & mask / we write it in filter4() MASK_APPLY m8, m11, m6, m5 ; m8 = filter2(p0) & mask / we write it in filter4() - ; (m0: hev, m1: flat8out, m2: flat8in, m3: fm, m7..m8: q0' p0', m10..13: p1 p0 q0 q1, m14: pb_10, m15: q0-p0) + ; (m0: hev, [m1: flat8out], m2: flat8in, m3: fm, m7..m8: q0' p0', m10..13: p1 p0 q0 q1, m14: pb_10, m15: q0-p0) ; filter4() mova m4, m15 paddsb m15, m4 ; 2 * (q0 - p0) @@ -491,11 +554,15 @@ SECTION .text mova [P1], m7 mova [Q1], m4 - ; (m1: flat8out, m2: flat8in, m3: fm, m10..13: p1 p0 q0 q1) + ; ([m1: flat8out], m2: flat8in, m3: fm, m10..13: p1 p0 q0 q1) ; filter6() pxor m0, m0 +%if %2 == 88 + pand m3, m2 +%else pand m2, m3 ; mask(fm) & mask(in) pandn m3, m1, m2 ; ~mask(out) & (mask(fm) & mask(in)) +%endif mova m14, [P3] mova m15, [P2] mova m8, [Q2] @@ -507,7 +574,7 @@ SECTION .text FILTER_UPDATE m4, m5, m6, m7, [Q1], m15, m12, m13, m9, 3, m3 ; [q1] -p2 -q0 +q1 +q3 FILTER_UPDATE m6, m7, m4, m5, [Q2], m10, m13, m8, m9, 3, m3, m8 ; [q2] -p1 -q1 +q2 +q3 - ; (m0: 0, m1: flat8out, m2: fm & flat8in, m8..15: q2 q3 p1 p0 q0 q1 p3 p2) + ; (m0: 0, [m1: flat8out], m2: fm & flat8in, m8..15: q2 q3 p1 p0 q0 q1 p3 p2) ; filter14() ; ; m2 m3 m8 m9 m14 m15 m10 m11 m12 m13 @@ -528,6 +595,7 @@ SECTION .text ; q5 +5 -p2 -q4 +q5 +q7 . q5 . . ; q6 +6 -p1 -q5 +q6 +q7 . q6 . . +%if %2 != 88 pand m1, m2 ; mask(out) & (mask(fm) & mask(in)) mova m2, [P7] mova m3, [P6] @@ -547,8 +615,10 @@ SECTION .text FILTER_UPDATE m6, m7, m4, m5, [Q4], m14, m8, m9, m13, 4, m1, m9, m9, [Q4] ; [q4] -p3 -q3 +q4 +q7 FILTER_UPDATE m4, m5, m6, m7, [Q5], m15, m9, m14, m13, 4, m1, m14, m14, [Q5] ; [q5] -p2 -q4 +q5 +q7 FILTER_UPDATE m6, m7, m4, m5, [Q6], m10, m14, m15, m13, 4, m1, m15, m15, [Q6] ; [q6] -p1 -q5 +q6 +q7 +%endif %ifidn %1, h +%if %2 != 88 mova m0, [P7] mova m1, [P6] mova m2, [P5] @@ -583,21 +653,81 @@ SECTION .text movu [Q5], m13 movu [Q6], m14 movu [Q7], m15 +%else + ; the following code do a transpose of 8 full lines to 16 half + ; lines (high part). It is inlined to avoid the need of a staging area + mova m0, [P3] + mova m1, [P2] + mova m2, [P1] + mova m3, [P0] + mova m4, [Q0] + mova m5, [Q1] + mova m6, [Q2] + mova m7, [Q3] + DEFINE_REAL_P7_TO_Q7 + SBUTTERFLY bw, 0, 1, 8 + SBUTTERFLY bw, 2, 3, 8 + SBUTTERFLY bw, 4, 5, 8 + SBUTTERFLY bw, 6, 7, 8 + SBUTTERFLY wd, 0, 2, 8 + SBUTTERFLY wd, 1, 3, 8 + SBUTTERFLY wd, 4, 6, 8 + SBUTTERFLY wd, 5, 7, 8 + SBUTTERFLY dq, 0, 4, 8 + SBUTTERFLY dq, 1, 5, 8 + SBUTTERFLY dq, 2, 6, 8 + SBUTTERFLY dq, 3, 7, 8 + movh [P7], m0 + punpckhqdq m0, m8 + movh [P6], m0 + movh [Q0], m1 + punpckhqdq m1, m9 + movh [Q1], m1 + movh [P3], m2 + punpckhqdq m2, m10 + movh [P2], m2 + movh [Q4], m3 + punpckhqdq m3, m11 + movh [Q5], m3 + movh [P5], m4 + punpckhqdq m4, m12 + movh [P4], m4 + movh [Q2], m5 + punpckhqdq m5, m13 + movh [Q3], m5 + movh [P1], m6 + punpckhqdq m6, m14 + movh [P0], m6 + movh [Q6], m7 + punpckhqdq m7, m8 + movh [Q7], m7 +%endif %endif + + RET %endmacro %macro LPF_16_16_VH 1 INIT_XMM %1 cglobal vp9_loop_filter_v_16_16, 5,10,16, dst, stride, E, I, H, mstride, dst1, dst2, stride3, mstride3 - LPF_16_16 v - RET + LOOPFILTER v, 16 cglobal vp9_loop_filter_h_16_16, 5,10,16, 256, dst, stride, E, I, H, mstride, dst1, dst2, stride3, mstride3 - LPF_16_16 h - RET + LOOPFILTER h, 16 +%endmacro + +%macro LPF_88_16_VH 1 +INIT_XMM %1 +cglobal vp9_loop_filter_v_88_16, 5,10,16, dst, stride, E, I, H, mstride, dst1, dst2, stride3, mstride3 + LOOPFILTER v, 88 +cglobal vp9_loop_filter_h_88_16, 5,10,16, 256, dst, stride, E, I, H, mstride, dst1, dst2, stride3, mstride3 + LOOPFILTER h, 88 %endmacro LPF_16_16_VH sse2 LPF_16_16_VH ssse3 LPF_16_16_VH avx +LPF_88_16_VH ssse3 +LPF_88_16_VH avx + %endif ; x86-64 From 92d47550ea099fde8c6f4443c94ec768e19ffd26 Mon Sep 17 00:00:00 2001 From: James Almer Date: Tue, 28 Jan 2014 04:59:45 -0300 Subject: [PATCH 06/23] vp9lpf/x86: add an SSE2 version of vp9_loop_filter_[vh]_88_16 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Similar gains as the ssse3 version once again Additional improvements by Clément Bœsch . Signed-off-by: James Almer Signed-off-by: Anton Khirnov --- libavcodec/x86/vp9dsp_init.c | 3 +++ libavcodec/x86/vp9lpf.asm | 20 +++++++++++++++++--- 2 files changed, 20 insertions(+), 3 deletions(-) diff --git a/libavcodec/x86/vp9dsp_init.c b/libavcodec/x86/vp9dsp_init.c index 00a57984fb..37d53d21b2 100644 --- a/libavcodec/x86/vp9dsp_init.c +++ b/libavcodec/x86/vp9dsp_init.c @@ -226,6 +226,7 @@ void ff_vp9_loop_filter_h_##size1##_##size2##_##opt(uint8_t *dst, ptrdiff_t stri lpf_funcs(16, 16, sse2); lpf_funcs(16, 16, ssse3); lpf_funcs(16, 16, avx); +lpf_funcs(88, 16, sse2); lpf_funcs(88, 16, ssse3); lpf_funcs(88, 16, avx); @@ -293,6 +294,8 @@ av_cold void ff_vp9dsp_init_x86(VP9DSPContext *dsp) init_fpel(1, 1, 32, avg, sse2); init_fpel(0, 1, 64, avg, sse2); if (ARCH_X86_64) { + dsp->loop_filter_mix2[1][1][0] = ff_vp9_loop_filter_h_88_16_sse2; + dsp->loop_filter_mix2[1][1][1] = ff_vp9_loop_filter_v_88_16_sse2; dsp->loop_filter_16[0] = ff_vp9_loop_filter_h_16_16_sse2; dsp->loop_filter_16[1] = ff_vp9_loop_filter_v_16_16_sse2; } diff --git a/libavcodec/x86/vp9lpf.asm b/libavcodec/x86/vp9lpf.asm index 183f3f6874..bde3fcb22b 100644 --- a/libavcodec/x86/vp9lpf.asm +++ b/libavcodec/x86/vp9lpf.asm @@ -292,6 +292,17 @@ SECTION .text %define Q7 dst2q + strideq %endmacro +; ..............AB -> AAAAAAAABBBBBBBB +%macro SPLATB_MIX 1-2 [mask_mix] +%if cpuflag(ssse3) + pshufb %1, %2 +%else + punpcklbw %1, %1 + punpcklwd %1, %1 + punpckldq %1, %1 +%endif +%endmacro + %macro LOOPFILTER 2 ; %1=v/h %2=size1 lea mstrideq, [strideq] neg mstrideq @@ -382,11 +393,13 @@ SECTION .text SPLATB_REG m2, I, m0 ; I I I I ... SPLATB_REG m3, E, m0 ; E E E E ... %elif %2 == 88 +%if cpuflag(ssse3) mova m0, [mask_mix] +%endif movd m2, Id movd m3, Ed - pshufb m2, m0 - pshufb m3, m0 + SPLATB_MIX m2, m0 + SPLATB_MIX m3, m0 %endif mova m0, [pb_80] pxor m2, m0 @@ -446,7 +459,7 @@ SECTION .text SPLATB_REG m7, H, m0 ; H H H H ... %else movd m7, Hd - pshufb m7, [mask_mix] + SPLATB_MIX m7 %endif pxor m7, m8 pxor m4, m8 @@ -727,6 +740,7 @@ LPF_16_16_VH sse2 LPF_16_16_VH ssse3 LPF_16_16_VH avx +LPF_88_16_VH sse2 LPF_88_16_VH ssse3 LPF_88_16_VH avx From f2e3d706a16d51dfc6862ab0c8798fc42aa500c9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20B=C5=93sch?= Date: Thu, 30 Jan 2014 19:01:30 +0100 Subject: [PATCH 07/23] vp9lpf/x86: add ff_vp9_loop_filter_h_{48,84}_16_{sse2,ssse3,avx}(). Signed-off-by: Anton Khirnov --- libavcodec/x86/vp9dsp_init.c | 42 +++++++++++++------------ libavcodec/x86/vp9lpf.asm | 59 +++++++++++++++++++----------------- 2 files changed, 53 insertions(+), 48 deletions(-) diff --git a/libavcodec/x86/vp9dsp_init.c b/libavcodec/x86/vp9dsp_init.c index 37d53d21b2..b649372494 100644 --- a/libavcodec/x86/vp9dsp_init.c +++ b/libavcodec/x86/vp9dsp_init.c @@ -226,6 +226,12 @@ void ff_vp9_loop_filter_h_##size1##_##size2##_##opt(uint8_t *dst, ptrdiff_t stri lpf_funcs(16, 16, sse2); lpf_funcs(16, 16, ssse3); lpf_funcs(16, 16, avx); +lpf_funcs(84, 16, sse2); +lpf_funcs(84, 16, ssse3); +lpf_funcs(84, 16, avx); +lpf_funcs(48, 16, sse2); +lpf_funcs(48, 16, ssse3); +lpf_funcs(48, 16, avx); lpf_funcs(88, 16, sse2); lpf_funcs(88, 16, ssse3); lpf_funcs(88, 16, avx); @@ -269,6 +275,19 @@ av_cold void ff_vp9dsp_init_x86(VP9DSPContext *dsp) init_subpel3_8to64(idx, type, opt); \ init_subpel2(4, idx, 4, type, opt) +#define init_lpf(opt) do { \ + if (ARCH_X86_64) { \ + dsp->loop_filter_16[0] = ff_vp9_loop_filter_h_16_16_##opt; \ + dsp->loop_filter_16[1] = ff_vp9_loop_filter_v_16_16_##opt; \ + dsp->loop_filter_mix2[0][1][0] = ff_vp9_loop_filter_h_48_16_##opt; \ + dsp->loop_filter_mix2[0][1][1] = ff_vp9_loop_filter_v_48_16_##opt; \ + dsp->loop_filter_mix2[1][0][0] = ff_vp9_loop_filter_h_84_16_##opt; \ + dsp->loop_filter_mix2[1][0][1] = ff_vp9_loop_filter_v_84_16_##opt; \ + dsp->loop_filter_mix2[1][1][0] = ff_vp9_loop_filter_h_88_16_##opt; \ + dsp->loop_filter_mix2[1][1][1] = ff_vp9_loop_filter_v_88_16_##opt; \ + } \ +} while (0) + if (EXTERNAL_MMX(cpu_flags)) { init_fpel(4, 0, 4, put, mmx); init_fpel(3, 0, 8, put, mmx); @@ -293,36 +312,19 @@ av_cold void ff_vp9dsp_init_x86(VP9DSPContext *dsp) init_fpel(2, 1, 16, avg, sse2); init_fpel(1, 1, 32, avg, sse2); init_fpel(0, 1, 64, avg, sse2); - if (ARCH_X86_64) { - dsp->loop_filter_mix2[1][1][0] = ff_vp9_loop_filter_h_88_16_sse2; - dsp->loop_filter_mix2[1][1][1] = ff_vp9_loop_filter_v_88_16_sse2; - dsp->loop_filter_16[0] = ff_vp9_loop_filter_h_16_16_sse2; - dsp->loop_filter_16[1] = ff_vp9_loop_filter_v_16_16_sse2; - } + init_lpf(sse2); } if (EXTERNAL_SSSE3(cpu_flags)) { init_subpel3(0, put, ssse3); init_subpel3(1, avg, ssse3); - - if (ARCH_X86_64) { - dsp->loop_filter_mix2[1][1][0] = ff_vp9_loop_filter_h_88_16_ssse3; - dsp->loop_filter_mix2[1][1][1] = ff_vp9_loop_filter_v_88_16_ssse3; - dsp->loop_filter_16[0] = ff_vp9_loop_filter_h_16_16_ssse3; - dsp->loop_filter_16[1] = ff_vp9_loop_filter_v_16_16_ssse3; - } + init_lpf(ssse3); } if (EXTERNAL_AVX(cpu_flags)) { init_fpel(1, 0, 32, put, avx); init_fpel(0, 0, 64, put, avx); - - if (ARCH_X86_64) { - dsp->loop_filter_mix2[1][1][0] = ff_vp9_loop_filter_h_88_16_avx; - dsp->loop_filter_mix2[1][1][1] = ff_vp9_loop_filter_v_88_16_avx; - dsp->loop_filter_16[0] = ff_vp9_loop_filter_h_16_16_avx; - dsp->loop_filter_16[1] = ff_vp9_loop_filter_v_16_16_avx; - } + init_lpf(avx); } if (EXTERNAL_AVX2(cpu_flags)) { diff --git a/libavcodec/x86/vp9lpf.asm b/libavcodec/x86/vp9lpf.asm index bde3fcb22b..5377d96e7e 100644 --- a/libavcodec/x86/vp9lpf.asm +++ b/libavcodec/x86/vp9lpf.asm @@ -45,6 +45,11 @@ pw_8: times 8 dw 8 mask_mix: times 8 db 0 times 8 db 1 +mask_mix84: times 8 db 0xff + times 8 db 0x00 +mask_mix48: times 8 db 0x00 + times 8 db 0xff + SECTION .text ; %1 = abs(%2-%3) @@ -312,7 +317,7 @@ SECTION .text neg mstride3q %ifidn %1, h -%if %2 == 88 +%if %2 > 16 %define movx movh lea dstq, [dstq + 8*strideq - 4] %else @@ -360,7 +365,7 @@ SECTION .text %define Q6 rsp + 224 %define Q7 rsp + 240 -%if %2 != 88 +%if %2 == 16 TRANSPOSE16x16B 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, [rsp] mova [P7], m0 mova [P6], m1 @@ -377,7 +382,7 @@ SECTION .text mova [Q1], m9 mova [Q2], m10 mova [Q3], m11 -%if %2 != 88 +%if %2 == 16 mova [Q4], m12 mova [Q5], m13 mova [Q6], m14 @@ -392,7 +397,7 @@ SECTION .text %endif SPLATB_REG m2, I, m0 ; I I I I ... SPLATB_REG m3, E, m0 ; E E E E ... -%elif %2 == 88 +%else %if cpuflag(ssse3) mova m0, [mask_mix] %endif @@ -452,7 +457,7 @@ SECTION .text ABSSUB_CMP m1, m9, m11, m6, m4, m5, m8 ; abs(p2 - p0) <= 1 pand m2, m1 ABSSUB m4, m10, m11, m5 ; abs(p1 - p0) -%if %2 != 88 +%if %2 == 16 %if cpuflag(ssse3) pxor m0, m0 %endif @@ -476,8 +481,11 @@ SECTION .text pand m2, m1 ABSSUB_CMP m1, m15, m12, m6, m4, m5, m8 ; abs(q3 - q0) <= 1 pand m2, m1 ; flat8in final value +%if %2 == 84 || %2 == 48 + pand m2, [mask_mix%2] +%endif -%if %2 != 88 +%if %2 == 16 ; (m0: hev, m2: flat8in, m3: fm, m6: pb_81, m9..15: p2 p1 p0 q0 q1 q2 q3) ; calc flat8out mask mova m8, [P7] @@ -570,7 +578,7 @@ SECTION .text ; ([m1: flat8out], m2: flat8in, m3: fm, m10..13: p1 p0 q0 q1) ; filter6() pxor m0, m0 -%if %2 == 88 +%if %2 > 16 pand m3, m2 %else pand m2, m3 ; mask(fm) & mask(in) @@ -608,7 +616,7 @@ SECTION .text ; q5 +5 -p2 -q4 +q5 +q7 . q5 . . ; q6 +6 -p1 -q5 +q6 +q7 . q6 . . -%if %2 != 88 +%if %2 == 16 pand m1, m2 ; mask(out) & (mask(fm) & mask(in)) mova m2, [P7] mova m3, [P6] @@ -631,7 +639,7 @@ SECTION .text %endif %ifidn %1, h -%if %2 != 88 +%if %2 == 16 mova m0, [P7] mova m1, [P6] mova m2, [P5] @@ -720,28 +728,23 @@ SECTION .text RET %endmacro -%macro LPF_16_16_VH 1 -INIT_XMM %1 -cglobal vp9_loop_filter_v_16_16, 5,10,16, dst, stride, E, I, H, mstride, dst1, dst2, stride3, mstride3 - LOOPFILTER v, 16 -cglobal vp9_loop_filter_h_16_16, 5,10,16, 256, dst, stride, E, I, H, mstride, dst1, dst2, stride3, mstride3 - LOOPFILTER h, 16 +%macro LPF_16_VH 2 +INIT_XMM %2 +cglobal vp9_loop_filter_v_%1_16, 5,10,16, dst, stride, E, I, H, mstride, dst1, dst2, stride3, mstride3 + LOOPFILTER v, %1 +cglobal vp9_loop_filter_h_%1_16, 5,10,16, 256, dst, stride, E, I, H, mstride, dst1, dst2, stride3, mstride3 + LOOPFILTER h, %1 %endmacro -%macro LPF_88_16_VH 1 -INIT_XMM %1 -cglobal vp9_loop_filter_v_88_16, 5,10,16, dst, stride, E, I, H, mstride, dst1, dst2, stride3, mstride3 - LOOPFILTER v, 88 -cglobal vp9_loop_filter_h_88_16, 5,10,16, 256, dst, stride, E, I, H, mstride, dst1, dst2, stride3, mstride3 - LOOPFILTER h, 88 +%macro LPF_16_VH_ALL_OPTS 1 +LPF_16_VH %1, sse2 +LPF_16_VH %1, ssse3 +LPF_16_VH %1, avx %endmacro -LPF_16_16_VH sse2 -LPF_16_16_VH ssse3 -LPF_16_16_VH avx - -LPF_88_16_VH sse2 -LPF_88_16_VH ssse3 -LPF_88_16_VH avx +LPF_16_VH_ALL_OPTS 16 +LPF_16_VH_ALL_OPTS 48 +LPF_16_VH_ALL_OPTS 84 +LPF_16_VH_ALL_OPTS 88 %endif ; x86-64 From 0ed21bdc9e7f9ef557a7d63fbaa6ce65eb455b9a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20B=C5=93sch?= Date: Wed, 5 Feb 2014 07:21:06 +0100 Subject: [PATCH 08/23] vp9lpf/x86: add ff_vp9_loop_filter_[vh]_44_16_{sse2,ssse3,avx}. Signed-off-by: Anton Khirnov --- libavcodec/x86/vp9dsp_init.c | 5 ++ libavcodec/x86/vp9lpf.asm | 121 +++++++++++++++++++++++++++-------- 2 files changed, 99 insertions(+), 27 deletions(-) diff --git a/libavcodec/x86/vp9dsp_init.c b/libavcodec/x86/vp9dsp_init.c index b649372494..88267b9fc9 100644 --- a/libavcodec/x86/vp9dsp_init.c +++ b/libavcodec/x86/vp9dsp_init.c @@ -226,6 +226,9 @@ void ff_vp9_loop_filter_h_##size1##_##size2##_##opt(uint8_t *dst, ptrdiff_t stri lpf_funcs(16, 16, sse2); lpf_funcs(16, 16, ssse3); lpf_funcs(16, 16, avx); +lpf_funcs(44, 16, sse2); +lpf_funcs(44, 16, ssse3); +lpf_funcs(44, 16, avx); lpf_funcs(84, 16, sse2); lpf_funcs(84, 16, ssse3); lpf_funcs(84, 16, avx); @@ -279,6 +282,8 @@ av_cold void ff_vp9dsp_init_x86(VP9DSPContext *dsp) if (ARCH_X86_64) { \ dsp->loop_filter_16[0] = ff_vp9_loop_filter_h_16_16_##opt; \ dsp->loop_filter_16[1] = ff_vp9_loop_filter_v_16_16_##opt; \ + dsp->loop_filter_mix2[0][0][0] = ff_vp9_loop_filter_h_44_16_##opt; \ + dsp->loop_filter_mix2[0][0][1] = ff_vp9_loop_filter_v_44_16_##opt; \ dsp->loop_filter_mix2[0][1][0] = ff_vp9_loop_filter_h_48_16_##opt; \ dsp->loop_filter_mix2[0][1][1] = ff_vp9_loop_filter_v_48_16_##opt; \ dsp->loop_filter_mix2[1][0][0] = ff_vp9_loop_filter_h_84_16_##opt; \ diff --git a/libavcodec/x86/vp9lpf.asm b/libavcodec/x86/vp9lpf.asm index 5377d96e7e..6138da101a 100644 --- a/libavcodec/x86/vp9lpf.asm +++ b/libavcodec/x86/vp9lpf.asm @@ -278,23 +278,23 @@ SECTION .text SWAP %12, %15 %endmacro -%macro DEFINE_REAL_P7_TO_Q7 0 -%define P7 dst1q + 2*mstrideq -%define P6 dst1q + mstrideq -%define P5 dst1q -%define P4 dst1q + strideq -%define P3 dstq + 4*mstrideq -%define P2 dstq + mstride3q -%define P1 dstq + 2*mstrideq -%define P0 dstq + mstrideq -%define Q0 dstq -%define Q1 dstq + strideq -%define Q2 dstq + 2*strideq -%define Q3 dstq + stride3q -%define Q4 dstq + 4*strideq -%define Q5 dst2q + mstrideq -%define Q6 dst2q -%define Q7 dst2q + strideq +%macro DEFINE_REAL_P7_TO_Q7 0-1 0 +%define P7 dst1q + 2*mstrideq + %1 +%define P6 dst1q + mstrideq + %1 +%define P5 dst1q + %1 +%define P4 dst1q + strideq + %1 +%define P3 dstq + 4*mstrideq + %1 +%define P2 dstq + mstride3q + %1 +%define P1 dstq + 2*mstrideq + %1 +%define P0 dstq + mstrideq + %1 +%define Q0 dstq + %1 +%define Q1 dstq + strideq + %1 +%define Q2 dstq + 2*strideq + %1 +%define Q3 dstq + stride3q + %1 +%define Q4 dstq + 4*strideq + %1 +%define Q5 dst2q + mstrideq + %1 +%define Q6 dst2q + %1 +%define Q7 dst2q + strideq + %1 %endmacro ; ..............AB -> AAAAAAAABBBBBBBB @@ -450,8 +450,9 @@ SECTION .text pand m3, m5 ; fm final value ; (m3: fm, m8..15: p3 p2 p1 p0 q0 q1 q2 q3) - ; calc flat8in and hev masks + ; calc flat8in (if not 44_16) and hev masks mova m6, [pb_81] ; [1 1 1 1 ...] ^ 0x80 +%if %2 != 44 ABSSUB_CMP m2, m8, m11, m6, m4, m5 ; abs(p3 - p0) <= 1 mova m8, [pb_80] ABSSUB_CMP m1, m9, m11, m6, m4, m5, m8 ; abs(p2 - p0) <= 1 @@ -484,6 +485,19 @@ SECTION .text %if %2 == 84 || %2 == 48 pand m2, [mask_mix%2] %endif +%else + mova m6, [pb_80] + movd m7, Hd + SPLATB_MIX m7 + pxor m7, m6 + ABSSUB m4, m10, m11, m1 ; abs(p1 - p0) + pxor m4, m6 + pcmpgtb m0, m4, m7 ; abs(p1 - p0) > H (1/2 hev condition) + ABSSUB m4, m13, m12, m1 ; abs(q1 - q0) + pxor m4, m6 + pcmpgtb m5, m4, m7 ; abs(q1 - q0) > H (2/2 hev condition) + por m0, m5 ; hev final value +%endif %if %2 == 16 ; (m0: hev, m2: flat8in, m3: fm, m6: pb_81, m9..15: p2 p1 p0 q0 q1 q2 q3) @@ -525,9 +539,11 @@ SECTION .text ; f2: fm & ~f14 & ~f6 & hev => fm & ~(out & in) & ~(~out & in) & hev => fm & ~in & hev ; f4: fm & ~f14 & ~f6 & ~f2 => fm & ~(out & in) & ~(~out & in) & ~(~in & hev) => fm & ~in & ~hev - ; (m0: hev, [m1: flat8out], m2: flat8in, m3: fm, m8..15: p5 p4 p1 p0 q0 q1 q6 q7) + ; (m0: hev, [m1: flat8out], [m2: flat8in], m3: fm, m8..15: p5 p4 p1 p0 q0 q1 q6 q7) ; filter2() - mova m6, [pb_80] +%if %2 != 44 + mova m6, [pb_80] ; already in m6 if 44_16 +%endif pxor m15, m12, m6 ; q0 ^ 0x80 pxor m14, m11, m6 ; p0 ^ 0x80 psubsb m15, m14 ; (signed) q0 - p0 @@ -543,12 +559,16 @@ SECTION .text SRSHIFT3B_2X m6, m4, m14, m7 ; f1 and f2 sign byte shift by 3 SIGN_SUB m7, m12, m6, m5, m9 ; m7 = q0 - f1 SIGN_ADD m8, m11, m4, m5, m9 ; m8 = p0 + f2 +%if %2 != 44 pandn m6, m2, m3 ; ~mask(in) & mask(fm) pand m6, m0 ; (~mask(in) & mask(fm)) & mask(hev) +%else + pand m6, m3, m0 +%endif MASK_APPLY m7, m12, m6, m5 ; m7 = filter2(q0) & mask / we write it in filter4() MASK_APPLY m8, m11, m6, m5 ; m8 = filter2(p0) & mask / we write it in filter4() - ; (m0: hev, [m1: flat8out], m2: flat8in, m3: fm, m7..m8: q0' p0', m10..13: p1 p0 q0 q1, m14: pb_10, m15: q0-p0) + ; (m0: hev, [m1: flat8out], [m2: flat8in], m3: fm, m7..m8: q0' p0', m10..13: p1 p0 q0 q1, m14: pb_10, m15: q0-p0) ; filter4() mova m4, m15 paddsb m15, m4 ; 2 * (q0 - p0) @@ -556,14 +576,22 @@ SECTION .text paddsb m6, m15, [pb_4] ; m6: f1 = clip(f + 4, 127) paddsb m15, [pb_3] ; m15: f2 = clip(f + 3, 127) SRSHIFT3B_2X m6, m15, m14, m9 ; f1 and f2 sign byte shift by 3 +%if %2 != 44 +%define p0tmp m7 +%define q0tmp m9 pandn m5, m2, m3 ; ~mask(in) & mask(fm) pandn m0, m5 ; ~mask(hev) & (~mask(in) & mask(fm)) - SIGN_SUB m9, m12, m6, m4, m14 ; q0 - f1 - MASK_APPLY m9, m7, m0, m5 ; m9 = filter4(q0) & mask - mova [Q0], m9 - SIGN_ADD m7, m11, m15, m4, m14 ; p0 + f2 - MASK_APPLY m7, m8, m0, m5 ; m7 = filter4(p0) & mask - mova [P0], m7 +%else +%define p0tmp m1 +%define q0tmp m2 + pandn m0, m3 +%endif + SIGN_SUB q0tmp, m12, m6, m4, m14 ; q0 - f1 + MASK_APPLY q0tmp, m7, m0, m5 ; filter4(q0) & mask + mova [Q0], q0tmp + SIGN_ADD p0tmp, m11, m15, m4, m14 ; p0 + f2 + MASK_APPLY p0tmp, m8, m0, m5 ; filter4(p0) & mask + mova [P0], p0tmp paddb m6, [pb_80] ; pxor m8, m8 ; f=(f1+1)>>1 pavgb m6, m8 ; @@ -577,6 +605,7 @@ SECTION .text ; ([m1: flat8out], m2: flat8in, m3: fm, m10..13: p1 p0 q0 q1) ; filter6() +%if %2 != 44 pxor m0, m0 %if %2 > 16 pand m3, m2 @@ -594,6 +623,7 @@ SECTION .text FILTER_UPDATE m6, m7, m4, m5, [Q0], m14, m11, m12, m9, 3, m3 ; [q0] -p3 -p0 +q0 +q3 FILTER_UPDATE m4, m5, m6, m7, [Q1], m15, m12, m13, m9, 3, m3 ; [q1] -p2 -q0 +q1 +q3 FILTER_UPDATE m6, m7, m4, m5, [Q2], m10, m13, m8, m9, 3, m3, m8 ; [q2] -p1 -q1 +q2 +q3 +%endif ; (m0: 0, [m1: flat8out], m2: fm & flat8in, m8..15: q2 q3 p1 p0 q0 q1 p3 p2) ; filter14() @@ -674,6 +704,42 @@ SECTION .text movu [Q5], m13 movu [Q6], m14 movu [Q7], m15 +%elif %2 == 44 + SWAP 0, 7 ; m0 = p1 + SWAP 3, 4 ; m3 = q1 + DEFINE_REAL_P7_TO_Q7 2 + SBUTTERFLY bw, 0, 1, 8 + SBUTTERFLY bw, 2, 3, 8 + SBUTTERFLY wd, 0, 2, 8 + SBUTTERFLY wd, 1, 3, 8 + SBUTTERFLY dq, 0, 4, 8 + SBUTTERFLY dq, 1, 5, 8 + SBUTTERFLY dq, 2, 6, 8 + SBUTTERFLY dq, 3, 7, 8 + movd [P7], m0 + punpckhqdq m0, m8 + movd [P6], m0 + movd [Q0], m1 + punpckhqdq m1, m9 + movd [Q1], m1 + movd [P3], m2 + punpckhqdq m2, m10 + movd [P2], m2 + movd [Q4], m3 + punpckhqdq m3, m11 + movd [Q5], m3 + movd [P5], m4 + punpckhqdq m4, m12 + movd [P4], m4 + movd [Q2], m5 + punpckhqdq m5, m13 + movd [Q3], m5 + movd [P1], m6 + punpckhqdq m6, m14 + movd [P0], m6 + movd [Q6], m7 + punpckhqdq m7, m8 + movd [Q7], m7 %else ; the following code do a transpose of 8 full lines to 16 half ; lines (high part). It is inlined to avoid the need of a staging area @@ -743,6 +809,7 @@ LPF_16_VH %1, avx %endmacro LPF_16_VH_ALL_OPTS 16 +LPF_16_VH_ALL_OPTS 44 LPF_16_VH_ALL_OPTS 48 LPF_16_VH_ALL_OPTS 84 LPF_16_VH_ALL_OPTS 88 From a6e288d62414c25ed173b17b48ddea947bede73e Mon Sep 17 00:00:00 2001 From: "Ronald S. Bultje" Date: Fri, 19 Dec 2014 21:44:57 -0500 Subject: [PATCH 09/23] vp9lpf/x86: save one register in loopfilter surface coverage. Signed-off-by: Anton Khirnov --- libavcodec/x86/vp9lpf.asm | 56 +++++++++++++++++++-------------------- 1 file changed, 28 insertions(+), 28 deletions(-) diff --git a/libavcodec/x86/vp9lpf.asm b/libavcodec/x86/vp9lpf.asm index 6138da101a..dc22705696 100644 --- a/libavcodec/x86/vp9lpf.asm +++ b/libavcodec/x86/vp9lpf.asm @@ -279,22 +279,22 @@ SECTION .text %endmacro %macro DEFINE_REAL_P7_TO_Q7 0-1 0 -%define P7 dst1q + 2*mstrideq + %1 -%define P6 dst1q + mstrideq + %1 -%define P5 dst1q + %1 -%define P4 dst1q + strideq + %1 -%define P3 dstq + 4*mstrideq + %1 -%define P2 dstq + mstride3q + %1 -%define P1 dstq + 2*mstrideq + %1 -%define P0 dstq + mstrideq + %1 -%define Q0 dstq + %1 -%define Q1 dstq + strideq + %1 -%define Q2 dstq + 2*strideq + %1 -%define Q3 dstq + stride3q + %1 -%define Q4 dstq + 4*strideq + %1 -%define Q5 dst2q + mstrideq + %1 -%define Q6 dst2q + %1 -%define Q7 dst2q + strideq + %1 +%define P7 dstq + 4*mstrideq + %1 +%define P6 dstq + mstride3q + %1 +%define P5 dstq + 2*mstrideq + %1 +%define P4 dstq + mstrideq + %1 +%define P3 dstq + %1 +%define P2 dstq + strideq + %1 +%define P1 dstq + 2* strideq + %1 +%define P0 dstq + stride3q + %1 +%define Q0 dstq + 4* strideq + %1 +%define Q1 dst2q + mstride3q + %1 +%define Q2 dst2q + 2*mstrideq + %1 +%define Q3 dst2q + mstrideq + %1 +%define Q4 dst2q + %1 +%define Q5 dst2q + strideq + %1 +%define Q6 dst2q + 2* strideq + %1 +%define Q7 dst2q + stride3q + %1 %endmacro ; ..............AB -> AAAAAAAABBBBBBBB @@ -309,26 +309,26 @@ SECTION .text %endmacro %macro LOOPFILTER 2 ; %1=v/h %2=size1 - lea mstrideq, [strideq] - neg mstrideq + mov mstrideq, strideq + neg mstrideq - lea stride3q, [strideq+2*strideq] - mov mstride3q, stride3q - neg mstride3q + lea stride3q, [strideq*3] + lea mstride3q, [mstrideq*3] %ifidn %1, h %if %2 > 16 %define movx movh - lea dstq, [dstq + 8*strideq - 4] + lea dstq, [dstq + 4*strideq - 4] %else %define movx movu - lea dstq, [dstq + 8*strideq - 8] ; go from top center (h pos) to center left (v pos) + lea dstq, [dstq + 4*strideq - 8] ; go from top center (h pos) to center left (v pos) %endif + lea dst2q, [dstq + 8*strideq] +%else + lea dstq, [dstq + 4*mstrideq] + lea dst2q, [dstq + 8*strideq] %endif - lea dst1q, [dstq + 2*mstride3q] ; dst1q = &dst[stride * -6] - lea dst2q, [dstq + 2* stride3q] ; dst2q = &dst[stride * +6] - DEFINE_REAL_P7_TO_Q7 %ifidn %1, h @@ -796,9 +796,9 @@ SECTION .text %macro LPF_16_VH 2 INIT_XMM %2 -cglobal vp9_loop_filter_v_%1_16, 5,10,16, dst, stride, E, I, H, mstride, dst1, dst2, stride3, mstride3 +cglobal vp9_loop_filter_v_%1_16, 5,10,16, dst, stride, E, I, H, mstride, dst2, stride3, mstride3 LOOPFILTER v, %1 -cglobal vp9_loop_filter_h_%1_16, 5,10,16, 256, dst, stride, E, I, H, mstride, dst1, dst2, stride3, mstride3 +cglobal vp9_loop_filter_h_%1_16, 5,10,16, 256, dst, stride, E, I, H, mstride, dst2, stride3, mstride3 LOOPFILTER h, %1 %endmacro From 6411c328a233b80faa5aa3ef4266f9a16e499699 Mon Sep 17 00:00:00 2001 From: "Ronald S. Bultje" Date: Fri, 19 Dec 2014 22:09:30 -0500 Subject: [PATCH 10/23] vp9lpf/x86: make cglobal statement more conservative in register allocation. Signed-off-by: Anton Khirnov --- libavcodec/x86/vp9lpf.asm | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/libavcodec/x86/vp9lpf.asm b/libavcodec/x86/vp9lpf.asm index dc22705696..878bc54a28 100644 --- a/libavcodec/x86/vp9lpf.asm +++ b/libavcodec/x86/vp9lpf.asm @@ -308,7 +308,20 @@ SECTION .text %endif %endmacro -%macro LOOPFILTER 2 ; %1=v/h %2=size1 +%macro LOOPFILTER 3 ; %1=v/h %2=size1 %3=stack +%if UNIX64 +cglobal vp9_loop_filter_%1_%2_16, 5, 9, 16, %3, dst, stride, E, I, H, mstride, dst2, stride3, mstride3 +%else +%if WIN64 +cglobal vp9_loop_filter_%1_%2_16, 4, 8, 16, %3, dst, stride, E, I, mstride, dst2, stride3, mstride3 +%else +cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3, dst, stride, mstride, dst2, stride3, mstride3 +%define Ed dword r2m +%define Id dword r3m +%endif +%define Hd dword r4m +%endif + mov mstrideq, strideq neg mstrideq @@ -796,10 +809,8 @@ SECTION .text %macro LPF_16_VH 2 INIT_XMM %2 -cglobal vp9_loop_filter_v_%1_16, 5,10,16, dst, stride, E, I, H, mstride, dst2, stride3, mstride3 - LOOPFILTER v, %1 -cglobal vp9_loop_filter_h_%1_16, 5,10,16, 256, dst, stride, E, I, H, mstride, dst2, stride3, mstride3 - LOOPFILTER h, %1 +LOOPFILTER v, %1, 0 +LOOPFILTER h, %1, 256 %endmacro %macro LPF_16_VH_ALL_OPTS 1 From 6e74e9636b1752e777146421ffa2b2498071e28d Mon Sep 17 00:00:00 2001 From: "Ronald S. Bultje" Date: Fri, 19 Dec 2014 22:18:42 -0500 Subject: [PATCH 11/23] vp9lpf/x86: slightly simplify 44/48/84/88 h stores. Signed-off-by: Anton Khirnov --- libavcodec/x86/vp9lpf.asm | 88 ++++++++++++++++++--------------------- 1 file changed, 40 insertions(+), 48 deletions(-) diff --git a/libavcodec/x86/vp9lpf.asm b/libavcodec/x86/vp9lpf.asm index 878bc54a28..d5b3fca990 100644 --- a/libavcodec/x86/vp9lpf.asm +++ b/libavcodec/x86/vp9lpf.asm @@ -725,34 +725,34 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3, dst, stride, mstride, dst2, stri SBUTTERFLY bw, 2, 3, 8 SBUTTERFLY wd, 0, 2, 8 SBUTTERFLY wd, 1, 3, 8 - SBUTTERFLY dq, 0, 4, 8 - SBUTTERFLY dq, 1, 5, 8 - SBUTTERFLY dq, 2, 6, 8 - SBUTTERFLY dq, 3, 7, 8 movd [P7], m0 - punpckhqdq m0, m8 - movd [P6], m0 - movd [Q0], m1 - punpckhqdq m1, m9 - movd [Q1], m1 movd [P3], m2 - punpckhqdq m2, m10 - movd [P2], m2 + movd [Q0], m1 movd [Q4], m3 - punpckhqdq m3, m11 + psrldq m0, 4 + psrldq m1, 4 + psrldq m2, 4 + psrldq m3, 4 + movd [P6], m0 + movd [P2], m2 + movd [Q1], m1 movd [Q5], m3 - movd [P5], m4 - punpckhqdq m4, m12 - movd [P4], m4 - movd [Q2], m5 - punpckhqdq m5, m13 - movd [Q3], m5 - movd [P1], m6 - punpckhqdq m6, m14 - movd [P0], m6 - movd [Q6], m7 - punpckhqdq m7, m8 - movd [Q7], m7 + psrldq m0, 4 + psrldq m1, 4 + psrldq m2, 4 + psrldq m3, 4 + movd [P5], m0 + movd [P1], m2 + movd [Q2], m1 + movd [Q6], m3 + psrldq m0, 4 + psrldq m1, 4 + psrldq m2, 4 + psrldq m3, 4 + movd [P4], m0 + movd [P0], m2 + movd [Q3], m1 + movd [Q7], m3 %else ; the following code do a transpose of 8 full lines to 16 half ; lines (high part). It is inlined to avoid the need of a staging area @@ -777,30 +777,22 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3, dst, stride, mstride, dst2, stri SBUTTERFLY dq, 1, 5, 8 SBUTTERFLY dq, 2, 6, 8 SBUTTERFLY dq, 3, 7, 8 - movh [P7], m0 - punpckhqdq m0, m8 - movh [P6], m0 - movh [Q0], m1 - punpckhqdq m1, m9 - movh [Q1], m1 - movh [P3], m2 - punpckhqdq m2, m10 - movh [P2], m2 - movh [Q4], m3 - punpckhqdq m3, m11 - movh [Q5], m3 - movh [P5], m4 - punpckhqdq m4, m12 - movh [P4], m4 - movh [Q2], m5 - punpckhqdq m5, m13 - movh [Q3], m5 - movh [P1], m6 - punpckhqdq m6, m14 - movh [P0], m6 - movh [Q6], m7 - punpckhqdq m7, m8 - movh [Q7], m7 + movh [P7], m0 + movhps [P6], m0 + movh [Q0], m1 + movhps [Q1], m1 + movh [P3], m2 + movhps [P2], m2 + movh [Q4], m3 + movhps [Q5], m3 + movh [P5], m4 + movhps [P4], m4 + movh [Q2], m5 + movhps [Q3], m5 + movh [P1], m6 + movhps [P0], m6 + movh [Q6], m7 + movhps [Q7], m7 %endif %endif From 683da2788e418877808f1407d68140cafaae8b4f Mon Sep 17 00:00:00 2001 From: "Ronald S. Bultje" Date: Sat, 20 Dec 2014 11:13:06 -0500 Subject: [PATCH 12/23] vp9lpf/x86: remove unused register from ABSSUB_CMP macro. Signed-off-by: Anton Khirnov --- libavcodec/x86/vp9lpf.asm | 42 +++++++++++++++++++-------------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/libavcodec/x86/vp9lpf.asm b/libavcodec/x86/vp9lpf.asm index d5b3fca990..7dc40a393c 100644 --- a/libavcodec/x86/vp9lpf.asm +++ b/libavcodec/x86/vp9lpf.asm @@ -70,9 +70,9 @@ SECTION .text %endmacro ; %1 = abs(%2-%3) <= %4 -%macro ABSSUB_CMP 6-7 [pb_80]; dst, src1, src2, cmp, tmp1, tmp2, [pb_80] - ABSSUB %1, %2, %3, %6 ; dst = abs(src1-src2) - CMP_LTE %1, %4, %6, %7 ; dst <= cmp +%macro ABSSUB_CMP 5-6 [pb_80]; dst, src1, src2, cmp, tmp, [pb_80] + ABSSUB %1, %2, %3, %5 ; dst = abs(src1-src2) + CMP_LTE %1, %4, %5, %6 ; dst <= cmp %endmacro %macro MASK_APPLY 4 ; %1=new_data/dst %2=old_data %3=mask %4=tmp @@ -439,16 +439,16 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3, dst, stride, mstride, dst2, stri SWAP 10, 6, 14 SWAP 11, 7, 15 %endif - ABSSUB_CMP m5, m8, m9, m2, m6, m7, m0 ; m5 = abs(p3-p2) <= I - ABSSUB_CMP m1, m9, m10, m2, m6, m7, m0 ; m1 = abs(p2-p1) <= I + ABSSUB_CMP m5, m8, m9, m2, m7, m0 ; m5 = abs(p3-p2) <= I + ABSSUB_CMP m1, m9, m10, m2, m7, m0 ; m1 = abs(p2-p1) <= I pand m5, m1 - ABSSUB_CMP m1, m10, m11, m2, m6, m7, m0 ; m1 = abs(p1-p0) <= I + ABSSUB_CMP m1, m10, m11, m2, m7, m0 ; m1 = abs(p1-p0) <= I pand m5, m1 - ABSSUB_CMP m1, m12, m13, m2, m6, m7, m0 ; m1 = abs(q1-q0) <= I + ABSSUB_CMP m1, m12, m13, m2, m7, m0 ; m1 = abs(q1-q0) <= I pand m5, m1 - ABSSUB_CMP m1, m13, m14, m2, m6, m7, m0 ; m1 = abs(q2-q1) <= I + ABSSUB_CMP m1, m13, m14, m2, m7, m0 ; m1 = abs(q2-q1) <= I pand m5, m1 - ABSSUB_CMP m1, m14, m15, m2, m6, m7, m0 ; m1 = abs(q3-q2) <= I + ABSSUB_CMP m1, m14, m15, m2, m7, m0 ; m1 = abs(q3-q2) <= I pand m5, m1 ABSSUB m1, m11, m12, m7 ; abs(p0-q0) paddusb m1, m1 ; abs(p0-q0) * 2 @@ -466,9 +466,9 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3, dst, stride, mstride, dst2, stri ; calc flat8in (if not 44_16) and hev masks mova m6, [pb_81] ; [1 1 1 1 ...] ^ 0x80 %if %2 != 44 - ABSSUB_CMP m2, m8, m11, m6, m4, m5 ; abs(p3 - p0) <= 1 + ABSSUB_CMP m2, m8, m11, m6, m5 ; abs(p3 - p0) <= 1 mova m8, [pb_80] - ABSSUB_CMP m1, m9, m11, m6, m4, m5, m8 ; abs(p2 - p0) <= 1 + ABSSUB_CMP m1, m9, m11, m6, m5, m8 ; abs(p2 - p0) <= 1 pand m2, m1 ABSSUB m4, m10, m11, m5 ; abs(p1 - p0) %if %2 == 16 @@ -491,9 +491,9 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3, dst, stride, mstride, dst2, stri por m0, m5 ; hev final value CMP_LTE m4, m6, m5 ; abs(q1 - q0) <= 1 pand m2, m4 ; (flat8in) - ABSSUB_CMP m1, m14, m12, m6, m4, m5, m8 ; abs(q2 - q0) <= 1 + ABSSUB_CMP m1, m14, m12, m6, m5, m8 ; abs(q2 - q0) <= 1 pand m2, m1 - ABSSUB_CMP m1, m15, m12, m6, m4, m5, m8 ; abs(q3 - q0) <= 1 + ABSSUB_CMP m1, m15, m12, m6, m5, m8 ; abs(q3 - q0) <= 1 pand m2, m1 ; flat8in final value %if %2 == 84 || %2 == 48 pand m2, [mask_mix%2] @@ -517,26 +517,26 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3, dst, stride, mstride, dst2, stri ; calc flat8out mask mova m8, [P7] mova m9, [P6] - ABSSUB_CMP m1, m8, m11, m6, m4, m5 ; abs(p7 - p0) <= 1 - ABSSUB_CMP m7, m9, m11, m6, m4, m5 ; abs(p6 - p0) <= 1 + ABSSUB_CMP m1, m8, m11, m6, m5 ; abs(p7 - p0) <= 1 + ABSSUB_CMP m7, m9, m11, m6, m5 ; abs(p6 - p0) <= 1 pand m1, m7 mova m8, [P5] mova m9, [P4] - ABSSUB_CMP m7, m8, m11, m6, m4, m5 ; abs(p5 - p0) <= 1 + ABSSUB_CMP m7, m8, m11, m6, m5 ; abs(p5 - p0) <= 1 pand m1, m7 - ABSSUB_CMP m7, m9, m11, m6, m4, m5 ; abs(p4 - p0) <= 1 + ABSSUB_CMP m7, m9, m11, m6, m5 ; abs(p4 - p0) <= 1 pand m1, m7 mova m14, [Q4] mova m15, [Q5] - ABSSUB_CMP m7, m14, m12, m6, m4, m5 ; abs(q4 - q0) <= 1 + ABSSUB_CMP m7, m14, m12, m6, m5 ; abs(q4 - q0) <= 1 pand m1, m7 - ABSSUB_CMP m7, m15, m12, m6, m4, m5 ; abs(q5 - q0) <= 1 + ABSSUB_CMP m7, m15, m12, m6, m5 ; abs(q5 - q0) <= 1 pand m1, m7 mova m14, [Q6] mova m15, [Q7] - ABSSUB_CMP m7, m14, m12, m6, m4, m5 ; abs(q4 - q0) <= 1 + ABSSUB_CMP m7, m14, m12, m6, m5 ; abs(q4 - q0) <= 1 pand m1, m7 - ABSSUB_CMP m7, m15, m12, m6, m4, m5 ; abs(q5 - q0) <= 1 + ABSSUB_CMP m7, m15, m12, m6, m5 ; abs(q5 - q0) <= 1 pand m1, m7 ; flat8out final value %endif From e4961035b288043b2b00bdc2ccbe3c31393e12d5 Mon Sep 17 00:00:00 2001 From: "Ronald S. Bultje" Date: Sun, 21 Dec 2014 19:34:03 -0500 Subject: [PATCH 13/23] vp9lpf/x86: simplify ABSSUM_CMP by inverting the comparison meaning. Signed-off-by: Anton Khirnov --- libavcodec/x86/vp9lpf.asm | 103 +++++++++++++++++++------------------- 1 file changed, 52 insertions(+), 51 deletions(-) diff --git a/libavcodec/x86/vp9lpf.asm b/libavcodec/x86/vp9lpf.asm index 7dc40a393c..5d829959fd 100644 --- a/libavcodec/x86/vp9lpf.asm +++ b/libavcodec/x86/vp9lpf.asm @@ -36,6 +36,7 @@ pb_40: times 16 db 0x40 pb_81: times 16 db 0x81 pb_f8: times 16 db 0xf8 pb_fe: times 16 db 0xfe +pb_ff: times 16 db 0xff pw_4: times 8 dw 4 pw_8: times 8 dw 8 @@ -59,20 +60,18 @@ SECTION .text por %1, %4 %endmacro -; %1 = %1<=%2 -%macro CMP_LTE 3-4 ; src/dst, cmp, tmp, pb_80 -%if %0 == 4 - pxor %1, %4 +; %1 = %1>%2 +%macro CMP_GT 2-3 ; src/dst, cmp, pb_80 +%if %0 == 3 + pxor %1, %3 %endif - pcmpgtb %3, %2, %1 ; cmp > src? - pcmpeqb %1, %2 ; cmp == src? XXX: avoid this with a -1/+1 well placed? - por %1, %3 ; cmp >= src? + pcmpgtb %1, %2 %endmacro -; %1 = abs(%2-%3) <= %4 -%macro ABSSUB_CMP 5-6 [pb_80]; dst, src1, src2, cmp, tmp, [pb_80] +; %1 = abs(%2-%3) > %4 +%macro ABSSUB_GT 5-6 [pb_80]; dst, src1, src2, cmp, tmp, [pb_80] ABSSUB %1, %2, %3, %5 ; dst = abs(src1-src2) - CMP_LTE %1, %4, %5, %6 ; dst <= cmp + CMP_GT %1, %4, %6 ; dst > cmp %endmacro %macro MASK_APPLY 4 ; %1=new_data/dst %2=old_data %3=mask %4=tmp @@ -439,17 +438,17 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3, dst, stride, mstride, dst2, stri SWAP 10, 6, 14 SWAP 11, 7, 15 %endif - ABSSUB_CMP m5, m8, m9, m2, m7, m0 ; m5 = abs(p3-p2) <= I - ABSSUB_CMP m1, m9, m10, m2, m7, m0 ; m1 = abs(p2-p1) <= I - pand m5, m1 - ABSSUB_CMP m1, m10, m11, m2, m7, m0 ; m1 = abs(p1-p0) <= I - pand m5, m1 - ABSSUB_CMP m1, m12, m13, m2, m7, m0 ; m1 = abs(q1-q0) <= I - pand m5, m1 - ABSSUB_CMP m1, m13, m14, m2, m7, m0 ; m1 = abs(q2-q1) <= I - pand m5, m1 - ABSSUB_CMP m1, m14, m15, m2, m7, m0 ; m1 = abs(q3-q2) <= I - pand m5, m1 + ABSSUB_GT m5, m8, m9, m2, m7, m0 ; m5 = abs(p3-p2) <= I + ABSSUB_GT m1, m9, m10, m2, m7, m0 ; m1 = abs(p2-p1) <= I + por m5, m1 + ABSSUB_GT m1, m10, m11, m2, m7, m0 ; m1 = abs(p1-p0) <= I + por m5, m1 + ABSSUB_GT m1, m12, m13, m2, m7, m0 ; m1 = abs(q1-q0) <= I + por m5, m1 + ABSSUB_GT m1, m13, m14, m2, m7, m0 ; m1 = abs(q2-q1) <= I + por m5, m1 + ABSSUB_GT m1, m14, m15, m2, m7, m0 ; m1 = abs(q3-q2) <= I + por m5, m1 ABSSUB m1, m11, m12, m7 ; abs(p0-q0) paddusb m1, m1 ; abs(p0-q0) * 2 ABSSUB m2, m10, m13, m7 ; abs(p1-q1) @@ -457,19 +456,19 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3, dst, stride, mstride, dst2, stri psrlq m2, 1 ; abs(p1-q1)/2 paddusb m1, m2 ; abs(p0-q0)*2 + abs(p1-q1)/2 pxor m1, m0 - pcmpgtb m4, m3, m1 ; E > X? - pcmpeqb m3, m1 ; E == X? - por m3, m4 ; E >= X? - pand m3, m5 ; fm final value + pcmpgtb m1, m3 + por m1, m5 ; fm final value + SWAP 1, 3 + pxor m3, [pb_ff] ; (m3: fm, m8..15: p3 p2 p1 p0 q0 q1 q2 q3) ; calc flat8in (if not 44_16) and hev masks mova m6, [pb_81] ; [1 1 1 1 ...] ^ 0x80 %if %2 != 44 - ABSSUB_CMP m2, m8, m11, m6, m5 ; abs(p3 - p0) <= 1 + ABSSUB_GT m2, m8, m11, m6, m5 ; abs(p3 - p0) <= 1 mova m8, [pb_80] - ABSSUB_CMP m1, m9, m11, m6, m5, m8 ; abs(p2 - p0) <= 1 - pand m2, m1 + ABSSUB_GT m1, m9, m11, m6, m5, m8 ; abs(p2 - p0) <= 1 + por m2, m1 ABSSUB m4, m10, m11, m5 ; abs(p1 - p0) %if %2 == 16 %if cpuflag(ssse3) @@ -483,18 +482,19 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3, dst, stride, mstride, dst2, stri pxor m7, m8 pxor m4, m8 pcmpgtb m0, m4, m7 ; abs(p1 - p0) > H (1/2 hev condition) - CMP_LTE m4, m6, m5 ; abs(p1 - p0) <= 1 - pand m2, m4 ; (flat8in) + CMP_GT m4, m6 ; abs(p1 - p0) <= 1 + por m2, m4 ; (flat8in) ABSSUB m4, m13, m12, m1 ; abs(q1 - q0) pxor m4, m8 pcmpgtb m5, m4, m7 ; abs(q1 - q0) > H (2/2 hev condition) por m0, m5 ; hev final value - CMP_LTE m4, m6, m5 ; abs(q1 - q0) <= 1 - pand m2, m4 ; (flat8in) - ABSSUB_CMP m1, m14, m12, m6, m5, m8 ; abs(q2 - q0) <= 1 - pand m2, m1 - ABSSUB_CMP m1, m15, m12, m6, m5, m8 ; abs(q3 - q0) <= 1 - pand m2, m1 ; flat8in final value + CMP_GT m4, m6 ; abs(q1 - q0) <= 1 + por m2, m4 ; (flat8in) + ABSSUB_GT m1, m14, m12, m6, m5, m8 ; abs(q2 - q0) <= 1 + por m2, m1 + ABSSUB_GT m1, m15, m12, m6, m5, m8 ; abs(q3 - q0) <= 1 + por m2, m1 ; flat8in final value + pxor m2, [pb_ff] %if %2 == 84 || %2 == 48 pand m2, [mask_mix%2] %endif @@ -517,27 +517,28 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3, dst, stride, mstride, dst2, stri ; calc flat8out mask mova m8, [P7] mova m9, [P6] - ABSSUB_CMP m1, m8, m11, m6, m5 ; abs(p7 - p0) <= 1 - ABSSUB_CMP m7, m9, m11, m6, m5 ; abs(p6 - p0) <= 1 - pand m1, m7 + ABSSUB_GT m1, m8, m11, m6, m5 ; abs(p7 - p0) <= 1 + ABSSUB_GT m7, m9, m11, m6, m5 ; abs(p6 - p0) <= 1 + por m1, m7 mova m8, [P5] mova m9, [P4] - ABSSUB_CMP m7, m8, m11, m6, m5 ; abs(p5 - p0) <= 1 - pand m1, m7 - ABSSUB_CMP m7, m9, m11, m6, m5 ; abs(p4 - p0) <= 1 - pand m1, m7 + ABSSUB_GT m7, m8, m11, m6, m5 ; abs(p5 - p0) <= 1 + por m1, m7 + ABSSUB_GT m7, m9, m11, m6, m5 ; abs(p4 - p0) <= 1 + por m1, m7 mova m14, [Q4] mova m15, [Q5] - ABSSUB_CMP m7, m14, m12, m6, m5 ; abs(q4 - q0) <= 1 - pand m1, m7 - ABSSUB_CMP m7, m15, m12, m6, m5 ; abs(q5 - q0) <= 1 - pand m1, m7 + ABSSUB_GT m7, m14, m12, m6, m5 ; abs(q4 - q0) <= 1 + por m1, m7 + ABSSUB_GT m7, m15, m12, m6, m5 ; abs(q5 - q0) <= 1 + por m1, m7 mova m14, [Q6] mova m15, [Q7] - ABSSUB_CMP m7, m14, m12, m6, m5 ; abs(q4 - q0) <= 1 - pand m1, m7 - ABSSUB_CMP m7, m15, m12, m6, m5 ; abs(q5 - q0) <= 1 - pand m1, m7 ; flat8out final value + ABSSUB_GT m7, m14, m12, m6, m5 ; abs(q4 - q0) <= 1 + por m1, m7 + ABSSUB_GT m7, m15, m12, m6, m5 ; abs(q5 - q0) <= 1 + por m1, m7 ; flat8out final value + pxor m1, [pb_ff] %endif ; if (fm) { From 4ce8ba72f9cbdecf3a2ee3533959e097a2095595 Mon Sep 17 00:00:00 2001 From: "Ronald S. Bultje" Date: Wed, 24 Dec 2014 14:22:19 -0500 Subject: [PATCH 14/23] vp9lpf/x86: move variable assigned inside macro branch. The value is not used outside the branch. Signed-off-by: Anton Khirnov --- libavcodec/x86/vp9lpf.asm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libavcodec/x86/vp9lpf.asm b/libavcodec/x86/vp9lpf.asm index 5d829959fd..0b72facc8d 100644 --- a/libavcodec/x86/vp9lpf.asm +++ b/libavcodec/x86/vp9lpf.asm @@ -463,8 +463,8 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3, dst, stride, mstride, dst2, stri ; (m3: fm, m8..15: p3 p2 p1 p0 q0 q1 q2 q3) ; calc flat8in (if not 44_16) and hev masks - mova m6, [pb_81] ; [1 1 1 1 ...] ^ 0x80 %if %2 != 44 + mova m6, [pb_81] ; [1 1 1 1 ...] ^ 0x80 ABSSUB_GT m2, m8, m11, m6, m5 ; abs(p3 - p0) <= 1 mova m8, [pb_80] ABSSUB_GT m1, m9, m11, m6, m5, m8 ; abs(p2 - p0) <= 1 From c6375a83d1ad512ed24e8fef044f3ba17237e03e Mon Sep 17 00:00:00 2001 From: "Ronald S. Bultje" Date: Wed, 24 Dec 2014 14:17:28 -0500 Subject: [PATCH 15/23] vp9lpf/x86: store unpacked intermediates for filter6/14 on stack. filter16 goes from 508 to 482 (h) or 346 to 314 (v) cycles; filter88 goes from 240 to 238 (h) or 174 to 165 (v) cycles, measured on TOS. Signed-off-by: Anton Khirnov --- libavcodec/x86/vp9lpf.asm | 151 ++++++++++++++++++++------------------ 1 file changed, 79 insertions(+), 72 deletions(-) diff --git a/libavcodec/x86/vp9lpf.asm b/libavcodec/x86/vp9lpf.asm index 0b72facc8d..e3371329a8 100644 --- a/libavcodec/x86/vp9lpf.asm +++ b/libavcodec/x86/vp9lpf.asm @@ -80,39 +80,42 @@ SECTION .text por %1, %4 ; new&mask | old&~mask %endmacro -%macro FILTER_SUBx2_ADDx2 8 ; %1=dst %2=h/l %3=cache %4=sub1 %5=sub2 %6=add1 %7=add2 %8=rshift - punpck%2bw %3, %4, m0 - psubw %1, %3 - punpck%2bw %3, %5, m0 - psubw %1, %3 - punpck%2bw %3, %6, m0 - paddw %1, %3 - punpck%2bw %3, %7, m0 +%macro FILTER_SUBx2_ADDx2 9-10 "" ; %1=dst %2=h/l %3=cache %4=stack_off %5=sub1 %6=sub2 %7=add1 %8=add2 %9=rshift, [unpack] + psubw %3, [rsp+%4+%5*32] + psubw %3, [rsp+%4+%6*32] + paddw %3, [rsp+%4+%7*32] +%ifnidn %10, "" + punpck%2bw %1, %10, m0 + mova [rsp+%4+%8*32], %1 paddw %3, %1 - psraw %1, %3, %8 +%else + paddw %3, [rsp+%4+%8*32] +%endif + psraw %1, %3, %9 %endmacro -%macro FILTER_INIT 8 ; tmp1, tmp2, cacheL, cacheH, dstp, filterid, mask, source - FILTER%6_INIT %1, l, %3 - FILTER%6_INIT %2, h, %4 +; FIXME interleave l/h better (for instruction pairing) +%macro FILTER_INIT 9 ; tmp1, tmp2, cacheL, cacheH, dstp, stack_off, filterid, mask, source + FILTER%7_INIT %1, l, %3, %6 + 0 + FILTER%7_INIT %2, h, %4, %6 + 16 packuswb %1, %2 - MASK_APPLY %1, %8, %7, %2 + MASK_APPLY %1, %9, %8, %2 mova %5, %1 %endmacro -%macro FILTER_UPDATE 11-14 ; tmp1, tmp2, cacheL, cacheH, dstp, -, -, +, +, rshift, mask, [source], [preload reg + value] -%if %0 == 13 ; no source + preload - mova %12, %13 -%elif %0 == 14 ; source + preload - mova %13, %14 + +%macro FILTER_UPDATE 12-15 "", "" ; tmp1, tmp2, cacheL, cacheH, dstp, stack_off, -, -, +, +, rshift, mask, [source], [unpack] +; FIXME interleave this properly with the subx2/addx2 +%if %0 == 15 + mova %14, %15 %endif - FILTER_SUBx2_ADDx2 %1, l, %3, %6, %7, %8, %9, %10 - FILTER_SUBx2_ADDx2 %2, h, %4, %6, %7, %8, %9, %10 + FILTER_SUBx2_ADDx2 %1, l, %3, %6 + 0, %7, %8, %9, %10, %11, %14 + FILTER_SUBx2_ADDx2 %2, h, %4, %6 + 16, %7, %8, %9, %10, %11, %14 packuswb %1, %2 -%if %0 == 12 || %0 == 14 - MASK_APPLY %1, %12, %11, %2 +%ifnidn %13, "" + MASK_APPLY %1, %13, %12, %2 %else - MASK_APPLY %1, %5, %11, %2 + MASK_APPLY %1, %5, %12, %2 %endif mova %5, %1 %endmacro @@ -152,44 +155,48 @@ SECTION .text paddusb %1, %4 ; add the negatives %endmacro -%macro FILTER6_INIT 3 ; %1=dst %2=h/l %3=cache +%macro FILTER6_INIT 4 ; %1=dst %2=h/l %3=cache, %4=stack_off punpck%2bw %1, m14, m0 ; p3: B->W + mova [rsp+%4+0*32], %1 paddw %3, %1, %1 ; p3*2 paddw %3, %1 ; p3*3 punpck%2bw %1, m15, m0 ; p2: B->W + mova [rsp+%4+1*32], %1 paddw %3, %1 ; p3*3 + p2 paddw %3, %1 ; p3*3 + p2*2 punpck%2bw %1, m10, m0 ; p1: B->W + mova [rsp+%4+2*32], %1 paddw %3, %1 ; p3*3 + p2*2 + p1 punpck%2bw %1, m11, m0 ; p0: B->W + mova [rsp+%4+3*32], %1 paddw %3, %1 ; p3*3 + p2*2 + p1 + p0 punpck%2bw %1, m12, m0 ; q0: B->W + mova [rsp+%4+4*32], %1 paddw %3, %1 ; p3*3 + p2*2 + p1 + p0 + q0 paddw %3, [pw_4] ; p3*3 + p2*2 + p1 + p0 + q0 + 4 psraw %1, %3, 3 ; (p3*3 + p2*2 + p1 + p0 + q0 + 4) >> 3 %endmacro -%macro FILTER14_INIT 3 ; %1=dst %2=h/l %3=cache +%macro FILTER14_INIT 4 ; %1=dst %2=h/l %3=cache, %4=stack_off punpck%2bw %1, m2, m0 ; p7: B->W + mova [rsp+%4+ 8*32], %1 psllw %3, %1, 3 ; p7*8 psubw %3, %1 ; p7*7 punpck%2bw %1, m3, m0 ; p6: B->W + mova [rsp+%4+ 9*32], %1 paddw %3, %1 ; p7*7 + p6 paddw %3, %1 ; p7*7 + p6*2 punpck%2bw %1, m8, m0 ; p5: B->W + mova [rsp+%4+10*32], %1 paddw %3, %1 ; p7*7 + p6*2 + p5 punpck%2bw %1, m9, m0 ; p4: B->W + mova [rsp+%4+11*32], %1 paddw %3, %1 ; p7*7 + p6*2 + p5 + p4 - punpck%2bw %1, m14, m0 ; p3: B->W - paddw %3, %1 ; p7*7 + p6*2 + p5 + p4 + p3 - punpck%2bw %1, m15, m0 ; p2: B->W - paddw %3, %1 ; p7*7 + p6*2 + p5 + .. + p2 - punpck%2bw %1, m10, m0 ; p1: B->W - paddw %3, %1 ; p7*7 + p6*2 + p5 + .. + p1 - punpck%2bw %1, m11, m0 ; p0: B->W - paddw %3, %1 ; p7*7 + p6*2 + p5 + .. + p0 - punpck%2bw %1, m12, m0 ; q0: B->W - paddw %3, %1 ; p7*7 + p6*2 + p5 + .. + p0 + q0 + paddw %3, [rsp+%4+ 0*32] ; p7*7 + p6*2 + p5 + p4 + p3 + paddw %3, [rsp+%4+ 1*32] ; p7*7 + p6*2 + p5 + .. + p2 + paddw %3, [rsp+%4+ 2*32] ; p7*7 + p6*2 + p5 + .. + p1 + paddw %3, [rsp+%4+ 3*32] ; p7*7 + p6*2 + p5 + .. + p0 + paddw %3, [rsp+%4+ 4*32] ; p7*7 + p6*2 + p5 + .. + p0 + q0 paddw %3, [pw_8] ; p7*7 + p6*2 + p5 + .. + p0 + q0 + 8 psraw %1, %3, 4 ; (p7*7 + p6*2 + p5 + .. + p0 + q0 + 8) >> 4 %endmacro @@ -307,14 +314,14 @@ SECTION .text %endif %endmacro -%macro LOOPFILTER 3 ; %1=v/h %2=size1 %3=stack +%macro LOOPFILTER 4 ; %1=v/h %2=size1 %3+%4=stack %if UNIX64 -cglobal vp9_loop_filter_%1_%2_16, 5, 9, 16, %3, dst, stride, E, I, H, mstride, dst2, stride3, mstride3 +cglobal vp9_loop_filter_%1_%2_16, 5, 9, 16, %3 + %4, dst, stride, E, I, H, mstride, dst2, stride3, mstride3 %else %if WIN64 -cglobal vp9_loop_filter_%1_%2_16, 4, 8, 16, %3, dst, stride, E, I, mstride, dst2, stride3, mstride3 +cglobal vp9_loop_filter_%1_%2_16, 4, 8, 16, %3 + %4, dst, stride, E, I, mstride, dst2, stride3, mstride3 %else -cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3, dst, stride, mstride, dst2, stride3, mstride3 +cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4, dst, stride, mstride, dst2, stride3, mstride3 %define Ed dword r2m %define Id dword r3m %endif @@ -631,12 +638,12 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3, dst, stride, mstride, dst2, stri mova m15, [P2] mova m8, [Q2] mova m9, [Q3] - FILTER_INIT m4, m5, m6, m7, [P2], 6, m3, m15 ; [p2] - FILTER_UPDATE m6, m7, m4, m5, [P1], m14, m15, m10, m13, 3, m3 ; [p1] -p3 -p2 +p1 +q1 - FILTER_UPDATE m4, m5, m6, m7, [P0], m14, m10, m11, m8, 3, m3 ; [p0] -p3 -p1 +p0 +q2 - FILTER_UPDATE m6, m7, m4, m5, [Q0], m14, m11, m12, m9, 3, m3 ; [q0] -p3 -p0 +q0 +q3 - FILTER_UPDATE m4, m5, m6, m7, [Q1], m15, m12, m13, m9, 3, m3 ; [q1] -p2 -q0 +q1 +q3 - FILTER_UPDATE m6, m7, m4, m5, [Q2], m10, m13, m8, m9, 3, m3, m8 ; [q2] -p1 -q1 +q2 +q3 + FILTER_INIT m4, m5, m6, m7, [P2], %4, 6, m3, m15 ; [p2] + FILTER_UPDATE m4, m5, m6, m7, [P1], %4, 0, 1, 2, 5, 3, m3, "", m13 ; [p1] -p3 -p2 +p1 +q1 + FILTER_UPDATE m4, m5, m6, m7, [P0], %4, 0, 2, 3, 6, 3, m3, "", m8 ; [p0] -p3 -p1 +p0 +q2 + FILTER_UPDATE m4, m5, m6, m7, [Q0], %4, 0, 3, 4, 7, 3, m3, "", m9 ; [q0] -p3 -p0 +q0 +q3 + FILTER_UPDATE m4, m5, m6, m7, [Q1], %4, 1, 4, 5, 7, 3, m3, "" ; [q1] -p2 -q0 +q1 +q3 + FILTER_UPDATE m4, m5, m6, m7, [Q2], %4, 2, 5, 6, 7, 3, m3, m8 ; [q2] -p1 -q1 +q2 +q3 %endif ; (m0: 0, [m1: flat8out], m2: fm & flat8in, m8..15: q2 q3 p1 p0 q0 q1 p3 p2) @@ -666,20 +673,20 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3, dst, stride, mstride, dst2, stri mova m3, [P6] mova m8, [P5] mova m9, [P4] - FILTER_INIT m4, m5, m6, m7, [P6], 14, m1, m3 - FILTER_UPDATE m6, m7, m4, m5, [P5], m2, m3, m8, m13, 4, m1, m8 ; [p5] -p7 -p6 +p5 +q1 - FILTER_UPDATE m4, m5, m6, m7, [P4], m2, m8, m9, m13, 4, m1, m9, m13, [Q2] ; [p4] -p7 -p5 +p4 +q2 - FILTER_UPDATE m6, m7, m4, m5, [P3], m2, m9, m14, m13, 4, m1, m14, m13, [Q3] ; [p3] -p7 -p4 +p3 +q3 - FILTER_UPDATE m4, m5, m6, m7, [P2], m2, m14, m15, m13, 4, m1, m13, [Q4] ; [p2] -p7 -p3 +p2 +q4 - FILTER_UPDATE m6, m7, m4, m5, [P1], m2, m15, m10, m13, 4, m1, m13, [Q5] ; [p1] -p7 -p2 +p1 +q5 - FILTER_UPDATE m4, m5, m6, m7, [P0], m2, m10, m11, m13, 4, m1, m13, [Q6] ; [p0] -p7 -p1 +p0 +q6 - FILTER_UPDATE m6, m7, m4, m5, [Q0], m2, m11, m12, m13, 4, m1, m13, [Q7] ; [q0] -p7 -p0 +q0 +q7 - FILTER_UPDATE m4, m5, m6, m7, [Q1], m3, m12, m2, m13, 4, m1, m2, [Q1] ; [q1] -p6 -q0 +q1 +q7 - FILTER_UPDATE m6, m7, m4, m5, [Q2], m8, m2, m3, m13, 4, m1, m3, [Q2] ; [q2] -p5 -q1 +q2 +q7 - FILTER_UPDATE m4, m5, m6, m7, [Q3], m9, m3, m8, m13, 4, m1, m8, m8, [Q3] ; [q3] -p4 -q2 +q3 +q7 - FILTER_UPDATE m6, m7, m4, m5, [Q4], m14, m8, m9, m13, 4, m1, m9, m9, [Q4] ; [q4] -p3 -q3 +q4 +q7 - FILTER_UPDATE m4, m5, m6, m7, [Q5], m15, m9, m14, m13, 4, m1, m14, m14, [Q5] ; [q5] -p2 -q4 +q5 +q7 - FILTER_UPDATE m6, m7, m4, m5, [Q6], m10, m14, m15, m13, 4, m1, m15, m15, [Q6] ; [q6] -p1 -q5 +q6 +q7 + FILTER_INIT m4, m5, m6, m7, [P6], %4, 14, m1, m3 ; [p6] + FILTER_UPDATE m4, m5, m6, m7, [P5], %4, 8, 9, 10, 5, 4, m1, m8 ; [p5] -p7 -p6 +p5 +q1 + FILTER_UPDATE m4, m5, m6, m7, [P4], %4, 8, 10, 11, 6, 4, m1, m9 ; [p4] -p7 -p5 +p4 +q2 + FILTER_UPDATE m4, m5, m6, m7, [P3], %4, 8, 11, 0, 7, 4, m1, m14 ; [p3] -p7 -p4 +p3 +q3 + FILTER_UPDATE m4, m5, m6, m7, [P2], %4, 8, 0, 1, 12, 4, m1, "", m8, [Q4] ; [p2] -p7 -p3 +p2 +q4 + FILTER_UPDATE m4, m5, m6, m7, [P1], %4, 8, 1, 2, 13, 4, m1, "", m9, [Q5] ; [p1] -p7 -p2 +p1 +q5 + FILTER_UPDATE m4, m5, m6, m7, [P0], %4, 8, 2, 3, 14, 4, m1, "", m14, [Q6] ; [p0] -p7 -p1 +p0 +q6 + FILTER_UPDATE m4, m5, m6, m7, [Q0], %4, 8, 3, 4, 15, 4, m1, "", m15, [Q7] ; [q0] -p7 -p0 +q0 +q7 + FILTER_UPDATE m4, m5, m6, m7, [Q1], %4, 9, 4, 5, 15, 4, m1, "" ; [q1] -p6 -q0 +q1 +q7 + FILTER_UPDATE m4, m5, m6, m7, [Q2], %4, 10, 5, 6, 15, 4, m1, "" ; [q2] -p5 -q1 +q2 +q7 + FILTER_UPDATE m4, m5, m6, m7, [Q3], %4, 11, 6, 7, 15, 4, m1, "" ; [q3] -p4 -q2 +q3 +q7 + FILTER_UPDATE m4, m5, m6, m7, [Q4], %4, 0, 7, 12, 15, 4, m1, m8 ; [q4] -p3 -q3 +q4 +q7 + FILTER_UPDATE m4, m5, m6, m7, [Q5], %4, 1, 12, 13, 15, 4, m1, m9 ; [q5] -p2 -q4 +q5 +q7 + FILTER_UPDATE m4, m5, m6, m7, [Q6], %4, 2, 13, 14, 15, 4, m1, m14 ; [q6] -p1 -q5 +q6 +q7 %endif %ifidn %1, h @@ -800,22 +807,22 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3, dst, stride, mstride, dst2, stri RET %endmacro -%macro LPF_16_VH 2 -INIT_XMM %2 -LOOPFILTER v, %1, 0 -LOOPFILTER h, %1, 256 +%macro LPF_16_VH 3 +INIT_XMM %3 +LOOPFILTER v, %1, %2, 0 +LOOPFILTER h, %1, %2, 256 %endmacro -%macro LPF_16_VH_ALL_OPTS 1 -LPF_16_VH %1, sse2 -LPF_16_VH %1, ssse3 -LPF_16_VH %1, avx +%macro LPF_16_VH_ALL_OPTS 2 +LPF_16_VH %1, %2, sse2 +LPF_16_VH %1, %2, ssse3 +LPF_16_VH %1, %2, avx %endmacro -LPF_16_VH_ALL_OPTS 16 -LPF_16_VH_ALL_OPTS 44 -LPF_16_VH_ALL_OPTS 48 -LPF_16_VH_ALL_OPTS 84 -LPF_16_VH_ALL_OPTS 88 +LPF_16_VH_ALL_OPTS 16, 512 +LPF_16_VH_ALL_OPTS 44, 0 +LPF_16_VH_ALL_OPTS 48, 256 +LPF_16_VH_ALL_OPTS 84, 256 +LPF_16_VH_ALL_OPTS 88, 256 %endif ; x86-64 From 7c62891efedf0102934bc18d62c6561152a4d0bc Mon Sep 17 00:00:00 2001 From: "Ronald S. Bultje" Date: Sat, 27 Dec 2014 14:47:07 -0500 Subject: [PATCH 16/23] vp9lpf/x86: save one register in SIGN_ADD/SUB. Signed-off-by: Anton Khirnov --- libavcodec/x86/vp9lpf.asm | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/libavcodec/x86/vp9lpf.asm b/libavcodec/x86/vp9lpf.asm index e3371329a8..c2afc44418 100644 --- a/libavcodec/x86/vp9lpf.asm +++ b/libavcodec/x86/vp9lpf.asm @@ -142,17 +142,17 @@ SECTION .text %endmacro ; clip_u8(u8 + i8) -%macro SIGN_ADD 5 ; dst, u8, i8, tmp1, tmp2 - EXTRACT_POS_NEG %3, %4, %5 - psubusb %1, %2, %4 ; sub the negatives - paddusb %1, %5 ; add the positives +%macro SIGN_ADD 4 ; dst, u8, i8, tmp1 + EXTRACT_POS_NEG %3, %4, %1 + paddusb %1, %2 ; add the positives + psubusb %1, %4 ; sub the negatives %endmacro ; clip_u8(u8 - i8) -%macro SIGN_SUB 5 ; dst, u8, i8, tmp1, tmp2 - EXTRACT_POS_NEG %3, %4, %5 - psubusb %1, %2, %5 ; sub the positives - paddusb %1, %4 ; add the negatives +%macro SIGN_SUB 4 ; dst, u8, i8, tmp1 + EXTRACT_POS_NEG %3, %1, %4 + paddusb %1, %2 ; add the negatives + psubusb %1, %4 ; sub the positives %endmacro %macro FILTER6_INIT 4 ; %1=dst %2=h/l %3=cache, %4=stack_off @@ -578,8 +578,8 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4, dst, stride, mstride, dst2, paddsb m4, [pb_3] ; m4: f2 = clip(f + 3, 127) mova m14, [pb_10] ; will be reused in filter4() SRSHIFT3B_2X m6, m4, m14, m7 ; f1 and f2 sign byte shift by 3 - SIGN_SUB m7, m12, m6, m5, m9 ; m7 = q0 - f1 - SIGN_ADD m8, m11, m4, m5, m9 ; m8 = p0 + f2 + SIGN_SUB m7, m12, m6, m5 ; m7 = q0 - f1 + SIGN_ADD m8, m11, m4, m5 ; m8 = p0 + f2 %if %2 != 44 pandn m6, m2, m3 ; ~mask(in) & mask(fm) pand m6, m0 ; (~mask(in) & mask(fm)) & mask(hev) @@ -607,18 +607,18 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4, dst, stride, mstride, dst2, %define q0tmp m2 pandn m0, m3 %endif - SIGN_SUB q0tmp, m12, m6, m4, m14 ; q0 - f1 + SIGN_SUB q0tmp, m12, m6, m4 ; q0 - f1 MASK_APPLY q0tmp, m7, m0, m5 ; filter4(q0) & mask mova [Q0], q0tmp - SIGN_ADD p0tmp, m11, m15, m4, m14 ; p0 + f2 + SIGN_ADD p0tmp, m11, m15, m4 ; p0 + f2 MASK_APPLY p0tmp, m8, m0, m5 ; filter4(p0) & mask mova [P0], p0tmp paddb m6, [pb_80] ; pxor m8, m8 ; f=(f1+1)>>1 pavgb m6, m8 ; psubb m6, [pb_40] ; - SIGN_ADD m7, m10, m6, m8, m9 ; p1 + f - SIGN_SUB m4, m13, m6, m8, m9 ; q1 - f + SIGN_ADD m7, m10, m6, m8 ; p1 + f + SIGN_SUB m4, m13, m6, m8 ; q1 - f MASK_APPLY m7, m10, m0, m14 ; m7 = filter4(p1) MASK_APPLY m4, m13, m0, m14 ; m4 = filter4(q1) mova [P1], m7 From be10834bd9dde81fc10568b7da8ffd1493df8589 Mon Sep 17 00:00:00 2001 From: "Ronald S. Bultje" Date: Sat, 27 Dec 2014 15:08:48 -0500 Subject: [PATCH 17/23] vp9lpf/x86: make filter_44_v work on 32-bit. Signed-off-by: Anton Khirnov --- libavcodec/x86/vp9dsp_init.c | 4 +- libavcodec/x86/vp9lpf.asm | 155 ++++++++++++++++++++++------------- 2 files changed, 100 insertions(+), 59 deletions(-) diff --git a/libavcodec/x86/vp9dsp_init.c b/libavcodec/x86/vp9dsp_init.c index 88267b9fc9..daced21731 100644 --- a/libavcodec/x86/vp9dsp_init.c +++ b/libavcodec/x86/vp9dsp_init.c @@ -283,7 +283,9 @@ av_cold void ff_vp9dsp_init_x86(VP9DSPContext *dsp) dsp->loop_filter_16[0] = ff_vp9_loop_filter_h_16_16_##opt; \ dsp->loop_filter_16[1] = ff_vp9_loop_filter_v_16_16_##opt; \ dsp->loop_filter_mix2[0][0][0] = ff_vp9_loop_filter_h_44_16_##opt; \ - dsp->loop_filter_mix2[0][0][1] = ff_vp9_loop_filter_v_44_16_##opt; \ + } \ + dsp->loop_filter_mix2[0][0][1] = ff_vp9_loop_filter_v_44_16_##opt; \ + if (ARCH_X86_64) { \ dsp->loop_filter_mix2[0][1][0] = ff_vp9_loop_filter_h_48_16_##opt; \ dsp->loop_filter_mix2[0][1][1] = ff_vp9_loop_filter_v_48_16_##opt; \ dsp->loop_filter_mix2[1][0][0] = ff_vp9_loop_filter_h_84_16_##opt; \ diff --git a/libavcodec/x86/vp9lpf.asm b/libavcodec/x86/vp9lpf.asm index c2afc44418..d9a6215aa4 100644 --- a/libavcodec/x86/vp9lpf.asm +++ b/libavcodec/x86/vp9lpf.asm @@ -2,6 +2,7 @@ ;* VP9 loop filter SIMD optimizations ;* ;* Copyright (C) 2013-2014 Clément Bœsch +;* Copyright (C) 2014 Ronald S. Bultje ;* ;* This file is part of Libav. ;* @@ -23,8 +24,6 @@ %include "libavutil/x86/x86util.asm" -%if ARCH_X86_64 - SECTION_RODATA cextern pb_3 @@ -55,8 +54,15 @@ SECTION .text ; %1 = abs(%2-%3) %macro ABSSUB 4 ; dst, src1 (RO), src2 (RO), tmp +%if ARCH_X86_64 psubusb %1, %3, %2 psubusb %4, %2, %3 +%else + mova %1, %3 + mova %4, %2 + psubusb %1, %2 + psubusb %4, %3 +%endif por %1, %4 %endmacro @@ -428,6 +434,7 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4, dst, stride, mstride, dst2, mova m0, [pb_80] pxor m2, m0 pxor m3, m0 +%if ARCH_X86_64 %ifidn %1, v mova m8, [P3] mova m9, [P2] @@ -445,20 +452,38 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4, dst, stride, mstride, dst2, SWAP 10, 6, 14 SWAP 11, 7, 15 %endif - ABSSUB_GT m5, m8, m9, m2, m7, m0 ; m5 = abs(p3-p2) <= I - ABSSUB_GT m1, m9, m10, m2, m7, m0 ; m1 = abs(p2-p1) <= I +%define rp3 m8 +%define rp2 m9 +%define rp1 m10 +%define rp0 m11 +%define rq0 m12 +%define rq1 m13 +%define rq2 m14 +%define rq3 m15 +%else +%define rp3 [P3] +%define rp2 [P2] +%define rp1 [P1] +%define rp0 [P0] +%define rq0 [Q0] +%define rq1 [Q1] +%define rq2 [Q2] +%define rq3 [Q3] +%endif + ABSSUB_GT m5, rp3, rp2, m2, m7, m0 ; m5 = abs(p3-p2) <= I + ABSSUB_GT m1, rp2, rp1, m2, m7, m0 ; m1 = abs(p2-p1) <= I por m5, m1 - ABSSUB_GT m1, m10, m11, m2, m7, m0 ; m1 = abs(p1-p0) <= I + ABSSUB_GT m1, rp1, rp0, m2, m7, m0 ; m1 = abs(p1-p0) <= I por m5, m1 - ABSSUB_GT m1, m12, m13, m2, m7, m0 ; m1 = abs(q1-q0) <= I + ABSSUB_GT m1, rq0, rq1, m2, m7, m0 ; m1 = abs(q1-q0) <= I por m5, m1 - ABSSUB_GT m1, m13, m14, m2, m7, m0 ; m1 = abs(q2-q1) <= I + ABSSUB_GT m1, rq1, rq2, m2, m7, m0 ; m1 = abs(q2-q1) <= I por m5, m1 - ABSSUB_GT m1, m14, m15, m2, m7, m0 ; m1 = abs(q3-q2) <= I + ABSSUB_GT m1, rq2, rq3, m2, m7, m0 ; m1 = abs(q3-q2) <= I por m5, m1 - ABSSUB m1, m11, m12, m7 ; abs(p0-q0) + ABSSUB m1, rp0, rq0, m7 ; abs(p0-q0) paddusb m1, m1 ; abs(p0-q0) * 2 - ABSSUB m2, m10, m13, m7 ; abs(p1-q1) + ABSSUB m2, rp1, rq1, m7 ; abs(p1-q1) pand m2, [pb_fe] ; drop lsb so shift can work psrlq m2, 1 ; abs(p1-q1)/2 paddusb m1, m2 ; abs(p0-q0)*2 + abs(p1-q1)/2 @@ -510,10 +535,10 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4, dst, stride, mstride, dst2, movd m7, Hd SPLATB_MIX m7 pxor m7, m6 - ABSSUB m4, m10, m11, m1 ; abs(p1 - p0) + ABSSUB m4, rp1, rp0, m1 ; abs(p1 - p0) pxor m4, m6 pcmpgtb m0, m4, m7 ; abs(p1 - p0) > H (1/2 hev condition) - ABSSUB m4, m13, m12, m1 ; abs(q1 - q0) + ABSSUB m4, rq1, rq0, m1 ; abs(q1 - q0) pxor m4, m6 pcmpgtb m5, m4, m7 ; abs(q1 - q0) > H (2/2 hev condition) por m0, m5 ; hev final value @@ -564,66 +589,74 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4, dst, stride, mstride, dst2, ; filter2() %if %2 != 44 mova m6, [pb_80] ; already in m6 if 44_16 + SWAP 2, 15 + SWAP 1, 8 %endif - pxor m15, m12, m6 ; q0 ^ 0x80 - pxor m14, m11, m6 ; p0 ^ 0x80 - psubsb m15, m14 ; (signed) q0 - p0 - pxor m4, m10, m6 ; p1 ^ 0x80 - pxor m5, m13, m6 ; q1 ^ 0x80 + pxor m2, m6, rq0 ; q0 ^ 0x80 + pxor m4, m6, rp0 ; p0 ^ 0x80 + psubsb m2, m4 ; (signed) q0 - p0 + pxor m4, m6, rp1 ; p1 ^ 0x80 + pxor m5, m6, rq1 ; q1 ^ 0x80 psubsb m4, m5 ; (signed) p1 - q1 - paddsb m4, m15 ; (q0 - p0) + (p1 - q1) - paddsb m4, m15 ; 2*(q0 - p0) + (p1 - q1) - paddsb m4, m15 ; 3*(q0 - p0) + (p1 - q1) + paddsb m4, m2 ; (q0 - p0) + (p1 - q1) + paddsb m4, m2 ; 2*(q0 - p0) + (p1 - q1) + paddsb m4, m2 ; 3*(q0 - p0) + (p1 - q1) paddsb m6, m4, [pb_4] ; m6: f1 = clip(f + 4, 127) paddsb m4, [pb_3] ; m4: f2 = clip(f + 3, 127) +%if ARCH_X86_64 mova m14, [pb_10] ; will be reused in filter4() - SRSHIFT3B_2X m6, m4, m14, m7 ; f1 and f2 sign byte shift by 3 - SIGN_SUB m7, m12, m6, m5 ; m7 = q0 - f1 - SIGN_ADD m8, m11, m4, m5 ; m8 = p0 + f2 +%define rb10 m14 +%else +%define rb10 [pb_10] +%endif + SRSHIFT3B_2X m6, m4, rb10, m7 ; f1 and f2 sign byte shift by 3 + SIGN_SUB m7, rq0, m6, m5 ; m7 = q0 - f1 + SIGN_ADD m1, rp0, m4, m5 ; m1 = p0 + f2 %if %2 != 44 - pandn m6, m2, m3 ; ~mask(in) & mask(fm) + pandn m6, m15, m3 ; ~mask(in) & mask(fm) pand m6, m0 ; (~mask(in) & mask(fm)) & mask(hev) %else pand m6, m3, m0 %endif - MASK_APPLY m7, m12, m6, m5 ; m7 = filter2(q0) & mask / we write it in filter4() - MASK_APPLY m8, m11, m6, m5 ; m8 = filter2(p0) & mask / we write it in filter4() + MASK_APPLY m7, rq0, m6, m5 ; m7 = filter2(q0) & mask / we write it in filter4() + MASK_APPLY m1, rp0, m6, m5 ; m1 = filter2(p0) & mask / we write it in filter4() - ; (m0: hev, [m1: flat8out], [m2: flat8in], m3: fm, m7..m8: q0' p0', m10..13: p1 p0 q0 q1, m14: pb_10, m15: q0-p0) + ; (m0: hev, m1: p0', m2: q0-p0, m3: fm, m7: q0', [m8: flat8out], m10..13: p1 p0 q0 q1, m14: pb_10, [m15: flat8in], ) ; filter4() - mova m4, m15 - paddsb m15, m4 ; 2 * (q0 - p0) - paddsb m15, m4 ; 3 * (q0 - p0) - paddsb m6, m15, [pb_4] ; m6: f1 = clip(f + 4, 127) - paddsb m15, [pb_3] ; m15: f2 = clip(f + 3, 127) - SRSHIFT3B_2X m6, m15, m14, m9 ; f1 and f2 sign byte shift by 3 + mova m4, m2 + paddsb m2, m4 ; 2 * (q0 - p0) + paddsb m2, m4 ; 3 * (q0 - p0) + paddsb m6, m2, [pb_4] ; m6: f1 = clip(f + 4, 127) + paddsb m2, [pb_3] ; m2: f2 = clip(f + 3, 127) + SRSHIFT3B_2X m6, m2, rb10, m4 ; f1 and f2 sign byte shift by 3 %if %2 != 44 -%define p0tmp m7 -%define q0tmp m9 - pandn m5, m2, m3 ; ~mask(in) & mask(fm) + pandn m5, m15, m3 ; ~mask(in) & mask(fm) pandn m0, m5 ; ~mask(hev) & (~mask(in) & mask(fm)) %else -%define p0tmp m1 -%define q0tmp m2 pandn m0, m3 %endif - SIGN_SUB q0tmp, m12, m6, m4 ; q0 - f1 - MASK_APPLY q0tmp, m7, m0, m5 ; filter4(q0) & mask - mova [Q0], q0tmp - SIGN_ADD p0tmp, m11, m15, m4 ; p0 + f2 - MASK_APPLY p0tmp, m8, m0, m5 ; filter4(p0) & mask - mova [P0], p0tmp + SIGN_SUB m5, rq0, m6, m4 ; q0 - f1 + MASK_APPLY m5, m7, m0, m4 ; filter4(q0) & mask + mova [Q0], m5 + SIGN_ADD m7, rp0, m2, m4 ; p0 + f2 + MASK_APPLY m7, m1, m0, m4 ; filter4(p0) & mask + mova [P0], m7 paddb m6, [pb_80] ; - pxor m8, m8 ; f=(f1+1)>>1 - pavgb m6, m8 ; + pxor m1, m1 ; f=(f1+1)>>1 + pavgb m6, m1 ; psubb m6, [pb_40] ; - SIGN_ADD m7, m10, m6, m8 ; p1 + f - SIGN_SUB m4, m13, m6, m8 ; q1 - f - MASK_APPLY m7, m10, m0, m14 ; m7 = filter4(p1) - MASK_APPLY m4, m13, m0, m14 ; m4 = filter4(q1) - mova [P1], m7 + SIGN_ADD m1, rp1, m6, m2 ; p1 + f + SIGN_SUB m4, rq1, m6, m2 ; q1 - f + MASK_APPLY m1, rp1, m0, m2 ; m1 = filter4(p1) + MASK_APPLY m4, rq1, m0, m2 ; m4 = filter4(q1) + mova [P1], m1 mova [Q1], m4 +%if %2 != 44 +SWAP 1, 8 +SWAP 2, 15 +%endif + ; ([m1: flat8out], m2: flat8in, m3: fm, m10..13: p1 p0 q0 q1) ; filter6() %if %2 != 44 @@ -726,13 +759,15 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4, dst, stride, mstride, dst2, movu [Q6], m14 movu [Q7], m15 %elif %2 == 44 - SWAP 0, 7 ; m0 = p1 + SWAP 0, 1 ; m0 = p1 + SWAP 1, 7 ; m1 = p0 + SWAP 2, 5 ; m2 = q0 SWAP 3, 4 ; m3 = q1 DEFINE_REAL_P7_TO_Q7 2 - SBUTTERFLY bw, 0, 1, 8 - SBUTTERFLY bw, 2, 3, 8 - SBUTTERFLY wd, 0, 2, 8 - SBUTTERFLY wd, 1, 3, 8 + SBUTTERFLY bw, 0, 1, 4 + SBUTTERFLY bw, 2, 3, 4 + SBUTTERFLY wd, 0, 2, 4 + SBUTTERFLY wd, 1, 3, 4 movd [P7], m0 movd [P3], m2 movd [Q0], m1 @@ -810,7 +845,9 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4, dst, stride, mstride, dst2, %macro LPF_16_VH 3 INIT_XMM %3 LOOPFILTER v, %1, %2, 0 +%if ARCH_X86_64 LOOPFILTER h, %1, %2, 256 +%endif %endmacro %macro LPF_16_VH_ALL_OPTS 2 @@ -819,10 +856,12 @@ LPF_16_VH %1, %2, ssse3 LPF_16_VH %1, %2, avx %endmacro +%if ARCH_X86_64 LPF_16_VH_ALL_OPTS 16, 512 +%endif LPF_16_VH_ALL_OPTS 44, 0 +%if ARCH_X86_64 LPF_16_VH_ALL_OPTS 48, 256 LPF_16_VH_ALL_OPTS 84, 256 LPF_16_VH_ALL_OPTS 88, 256 - -%endif ; x86-64 +%endif From 37637e65907b1a8c3731ea69c638792cb2438d0c Mon Sep 17 00:00:00 2001 From: "Ronald S. Bultje" Date: Sat, 27 Dec 2014 15:12:01 -0500 Subject: [PATCH 18/23] vp9lpf/x86: make filter_88_v work on 32-bit. Signed-off-by: Anton Khirnov --- libavcodec/x86/vp9dsp_init.c | 2 +- libavcodec/x86/vp9lpf.asm | 155 ++++++++++++++++++++++++----------- 2 files changed, 109 insertions(+), 48 deletions(-) diff --git a/libavcodec/x86/vp9dsp_init.c b/libavcodec/x86/vp9dsp_init.c index daced21731..523e92c487 100644 --- a/libavcodec/x86/vp9dsp_init.c +++ b/libavcodec/x86/vp9dsp_init.c @@ -291,8 +291,8 @@ av_cold void ff_vp9dsp_init_x86(VP9DSPContext *dsp) dsp->loop_filter_mix2[1][0][0] = ff_vp9_loop_filter_h_84_16_##opt; \ dsp->loop_filter_mix2[1][0][1] = ff_vp9_loop_filter_v_84_16_##opt; \ dsp->loop_filter_mix2[1][1][0] = ff_vp9_loop_filter_h_88_16_##opt; \ - dsp->loop_filter_mix2[1][1][1] = ff_vp9_loop_filter_v_88_16_##opt; \ } \ + dsp->loop_filter_mix2[1][1][1] = ff_vp9_loop_filter_v_88_16_##opt; \ } while (0) if (EXTERNAL_MMX(cpu_flags)) { diff --git a/libavcodec/x86/vp9lpf.asm b/libavcodec/x86/vp9lpf.asm index d9a6215aa4..e1c2b7b445 100644 --- a/libavcodec/x86/vp9lpf.asm +++ b/libavcodec/x86/vp9lpf.asm @@ -52,6 +52,22 @@ mask_mix48: times 8 db 0x00 SECTION .text +%macro SCRATCH 3 +%if ARCH_X86_64 + SWAP %1, %2 +%else + mova [%3], m%1 +%endif +%endmacro + +%macro UNSCRATCH 3 +%if ARCH_X86_64 + SWAP %1, %2 +%else + mova m%1, [%3] +%endif +%endmacro + ; %1 = abs(%2-%3) %macro ABSSUB 4 ; dst, src1 (RO), src2 (RO), tmp %if ARCH_X86_64 @@ -86,12 +102,26 @@ SECTION .text por %1, %4 ; new&mask | old&~mask %endmacro -%macro FILTER_SUBx2_ADDx2 9-10 "" ; %1=dst %2=h/l %3=cache %4=stack_off %5=sub1 %6=sub2 %7=add1 %8=add2 %9=rshift, [unpack] +%macro UNPACK 4 +%if ARCH_X86_64 + punpck%1bw %2, %3, %4 +%else + mova %2, %3 + punpck%1bw %2, %4 +%endif +%endmacro + +%macro FILTER_SUBx2_ADDx2 11 ; %1=dst %2=h/l %3=cache %4=stack_off %5=sub1 %6=sub2 %7=add1 + ; %8=add2 %9=rshift, [unpack], [unpack_is_mem_on_x86_32] psubw %3, [rsp+%4+%5*32] psubw %3, [rsp+%4+%6*32] paddw %3, [rsp+%4+%7*32] %ifnidn %10, "" +%if %11 == 0 punpck%2bw %1, %10, m0 +%else + UNPACK %2, %1, %10, m0 +%endif mova [rsp+%4+%8*32], %1 paddw %3, %1 %else @@ -110,13 +140,14 @@ SECTION .text %endmacro -%macro FILTER_UPDATE 12-15 "", "" ; tmp1, tmp2, cacheL, cacheH, dstp, stack_off, -, -, +, +, rshift, mask, [source], [unpack] +%macro FILTER_UPDATE 12-16 "", "", "", 0 ; tmp1, tmp2, cacheL, cacheH, dstp, stack_off, -, -, +, +, rshift, + ; mask, [source], [unpack + src], [unpack_is_mem_on_x86_32] ; FIXME interleave this properly with the subx2/addx2 -%if %0 == 15 +%ifnidn %15, "" mova %14, %15 %endif - FILTER_SUBx2_ADDx2 %1, l, %3, %6 + 0, %7, %8, %9, %10, %11, %14 - FILTER_SUBx2_ADDx2 %2, h, %4, %6 + 16, %7, %8, %9, %10, %11, %14 + FILTER_SUBx2_ADDx2 %1, l, %3, %6 + 0, %7, %8, %9, %10, %11, %14, %16 + FILTER_SUBx2_ADDx2 %2, h, %4, %6 + 16, %7, %8, %9, %10, %11, %14, %16 packuswb %1, %2 %ifnidn %13, "" MASK_APPLY %1, %13, %12, %2 @@ -162,21 +193,21 @@ SECTION .text %endmacro %macro FILTER6_INIT 4 ; %1=dst %2=h/l %3=cache, %4=stack_off - punpck%2bw %1, m14, m0 ; p3: B->W + UNPACK %2, %1, rp3, m0 ; p3: B->W mova [rsp+%4+0*32], %1 paddw %3, %1, %1 ; p3*2 paddw %3, %1 ; p3*3 - punpck%2bw %1, m15, m0 ; p2: B->W + punpck%2bw %1, m2, m0 ; p2: B->W mova [rsp+%4+1*32], %1 paddw %3, %1 ; p3*3 + p2 paddw %3, %1 ; p3*3 + p2*2 - punpck%2bw %1, m10, m0 ; p1: B->W + UNPACK %2, %1, rp1, m0 ; p1: B->W mova [rsp+%4+2*32], %1 paddw %3, %1 ; p3*3 + p2*2 + p1 - punpck%2bw %1, m11, m0 ; p0: B->W + UNPACK %2, %1, rp0, m0 ; p0: B->W mova [rsp+%4+3*32], %1 paddw %3, %1 ; p3*3 + p2*2 + p1 + p0 - punpck%2bw %1, m12, m0 ; q0: B->W + UNPACK %2, %1, rq0, m0 ; q0: B->W mova [rsp+%4+4*32], %1 paddw %3, %1 ; p3*3 + p2*2 + p1 + p0 + q0 paddw %3, [pw_4] ; p3*3 + p2*2 + p1 + p0 + q0 + 4 @@ -320,14 +351,14 @@ SECTION .text %endif %endmacro -%macro LOOPFILTER 4 ; %1=v/h %2=size1 %3+%4=stack +%macro LOOPFILTER 5 ; %1=v/h %2=size1 %3+%4=stack, %5=32bit stack only %if UNIX64 cglobal vp9_loop_filter_%1_%2_16, 5, 9, 16, %3 + %4, dst, stride, E, I, H, mstride, dst2, stride3, mstride3 %else %if WIN64 cglobal vp9_loop_filter_%1_%2_16, 4, 8, 16, %3 + %4, dst, stride, E, I, mstride, dst2, stride3, mstride3 %else -cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4, dst, stride, mstride, dst2, stride3, mstride3 +cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride, dst2, stride3, mstride3 %define Ed dword r2m %define Id dword r3m %endif @@ -497,11 +528,16 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4, dst, stride, mstride, dst2, ; calc flat8in (if not 44_16) and hev masks %if %2 != 44 mova m6, [pb_81] ; [1 1 1 1 ...] ^ 0x80 - ABSSUB_GT m2, m8, m11, m6, m5 ; abs(p3 - p0) <= 1 + ABSSUB_GT m2, rp3, rp0, m6, m5 ; abs(p3 - p0) <= 1 +%if ARCH_X86_64 mova m8, [pb_80] - ABSSUB_GT m1, m9, m11, m6, m5, m8 ; abs(p2 - p0) <= 1 +%define rb80 m8 +%else +%define rb80 [pb_80] +%endif + ABSSUB_GT m1, rp2, rp0, m6, m5, rb80 ; abs(p2 - p0) <= 1 por m2, m1 - ABSSUB m4, m10, m11, m5 ; abs(p1 - p0) + ABSSUB m4, rp1, rp0, m5 ; abs(p1 - p0) %if %2 == 16 %if cpuflag(ssse3) pxor m0, m0 @@ -511,20 +547,20 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4, dst, stride, mstride, dst2, movd m7, Hd SPLATB_MIX m7 %endif - pxor m7, m8 - pxor m4, m8 + pxor m7, rb80 + pxor m4, rb80 pcmpgtb m0, m4, m7 ; abs(p1 - p0) > H (1/2 hev condition) CMP_GT m4, m6 ; abs(p1 - p0) <= 1 por m2, m4 ; (flat8in) - ABSSUB m4, m13, m12, m1 ; abs(q1 - q0) - pxor m4, m8 + ABSSUB m4, rq1, rq0, m1 ; abs(q1 - q0) + pxor m4, rb80 pcmpgtb m5, m4, m7 ; abs(q1 - q0) > H (2/2 hev condition) por m0, m5 ; hev final value CMP_GT m4, m6 ; abs(q1 - q0) <= 1 por m2, m4 ; (flat8in) - ABSSUB_GT m1, m14, m12, m6, m5, m8 ; abs(q2 - q0) <= 1 + ABSSUB_GT m1, rq2, rq0, m6, m5, rb80 ; abs(q2 - q0) <= 1 por m2, m1 - ABSSUB_GT m1, m15, m12, m6, m5, m8 ; abs(q3 - q0) <= 1 + ABSSUB_GT m1, rq3, rq0, m6, m5, rb80 ; abs(q3 - q0) <= 1 por m2, m1 ; flat8in final value pxor m2, [pb_ff] %if %2 == 84 || %2 == 48 @@ -589,8 +625,10 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4, dst, stride, mstride, dst2, ; filter2() %if %2 != 44 mova m6, [pb_80] ; already in m6 if 44_16 - SWAP 2, 15 + SCRATCH 2, 15, rsp+%3+%4 +%if %2 == 16 SWAP 1, 8 +%endif %endif pxor m2, m6, rq0 ; q0 ^ 0x80 pxor m4, m6, rp0 ; p0 ^ 0x80 @@ -613,7 +651,12 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4, dst, stride, mstride, dst2, SIGN_SUB m7, rq0, m6, m5 ; m7 = q0 - f1 SIGN_ADD m1, rp0, m4, m5 ; m1 = p0 + f2 %if %2 != 44 +%if ARCH_X86_64 pandn m6, m15, m3 ; ~mask(in) & mask(fm) +%else + mova m6, [rsp+%3+%4] + pandn m6, m3 +%endif pand m6, m0 ; (~mask(in) & mask(fm)) & mask(hev) %else pand m6, m3, m0 @@ -630,7 +673,12 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4, dst, stride, mstride, dst2, paddsb m2, [pb_3] ; m2: f2 = clip(f + 3, 127) SRSHIFT3B_2X m6, m2, rb10, m4 ; f1 and f2 sign byte shift by 3 %if %2 != 44 +%if ARCH_X86_64 pandn m5, m15, m3 ; ~mask(in) & mask(fm) +%else + mova m5, [rsp+%3+%4] + pandn m5, m3 +%endif pandn m0, m5 ; ~mask(hev) & (~mask(in) & mask(fm)) %else pandn m0, m3 @@ -652,31 +700,44 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4, dst, stride, mstride, dst2, mova [P1], m1 mova [Q1], m4 -%if %2 != 44 -SWAP 1, 8 -SWAP 2, 15 -%endif - ; ([m1: flat8out], m2: flat8in, m3: fm, m10..13: p1 p0 q0 q1) ; filter6() %if %2 != 44 pxor m0, m0 %if %2 > 16 - pand m3, m2 +%if ARCH_X86_64 + pand m3, m15 +%else + pand m3, [rsp+%3+%4] +%endif %else - pand m2, m3 ; mask(fm) & mask(in) - pandn m3, m1, m2 ; ~mask(out) & (mask(fm) & mask(in)) + pand m15, m3 ; mask(fm) & mask(in) + pandn m3, m8, m15 ; ~mask(out) & (mask(fm) & mask(in)) %endif +%if ARCH_X86_64 mova m14, [P3] - mova m15, [P2] - mova m8, [Q2] mova m9, [Q3] - FILTER_INIT m4, m5, m6, m7, [P2], %4, 6, m3, m15 ; [p2] - FILTER_UPDATE m4, m5, m6, m7, [P1], %4, 0, 1, 2, 5, 3, m3, "", m13 ; [p1] -p3 -p2 +p1 +q1 - FILTER_UPDATE m4, m5, m6, m7, [P0], %4, 0, 2, 3, 6, 3, m3, "", m8 ; [p0] -p3 -p1 +p0 +q2 - FILTER_UPDATE m4, m5, m6, m7, [Q0], %4, 0, 3, 4, 7, 3, m3, "", m9 ; [q0] -p3 -p0 +q0 +q3 +%define rp3 m14 +%define rq3 m9 +%else +%define rp3 [P3] +%define rq3 [Q3] +%endif + mova m2, [P2] + mova m1, [Q2] + FILTER_INIT m4, m5, m6, m7, [P2], %4, 6, m3, m2 ; [p2] + FILTER_UPDATE m4, m5, m6, m7, [P1], %4, 0, 1, 2, 5, 3, m3, "", rq1, "", 1 ; [p1] -p3 -p2 +p1 +q1 + FILTER_UPDATE m4, m5, m6, m7, [P0], %4, 0, 2, 3, 6, 3, m3, "", m1 ; [p0] -p3 -p1 +p0 +q2 + FILTER_UPDATE m4, m5, m6, m7, [Q0], %4, 0, 3, 4, 7, 3, m3, "", rq3, "", 1 ; [q0] -p3 -p0 +q0 +q3 FILTER_UPDATE m4, m5, m6, m7, [Q1], %4, 1, 4, 5, 7, 3, m3, "" ; [q1] -p2 -q0 +q1 +q3 - FILTER_UPDATE m4, m5, m6, m7, [Q2], %4, 2, 5, 6, 7, 3, m3, m8 ; [q2] -p1 -q1 +q2 +q3 + FILTER_UPDATE m4, m5, m6, m7, [Q2], %4, 2, 5, 6, 7, 3, m3, m1 ; [q2] -p1 -q1 +q2 +q3 +%endif + +%if %2 != 44 +%if %2 == 16 +SWAP 1, 8 +%endif +SWAP 2, 15 %endif ; (m0: 0, [m1: flat8out], m2: fm & flat8in, m8..15: q2 q3 p1 p0 q0 q1 p3 p2) @@ -842,26 +903,26 @@ SWAP 2, 15 RET %endmacro -%macro LPF_16_VH 3 -INIT_XMM %3 -LOOPFILTER v, %1, %2, 0 +%macro LPF_16_VH 4 +INIT_XMM %4 +LOOPFILTER v, %1, %2, 0, %3 %if ARCH_X86_64 -LOOPFILTER h, %1, %2, 256 +LOOPFILTER h, %1, %2, 256, %3 %endif %endmacro -%macro LPF_16_VH_ALL_OPTS 2 -LPF_16_VH %1, %2, sse2 -LPF_16_VH %1, %2, ssse3 -LPF_16_VH %1, %2, avx +%macro LPF_16_VH_ALL_OPTS 2-3 0 +LPF_16_VH %1, %2, %3, sse2 +LPF_16_VH %1, %2, %3, ssse3 +LPF_16_VH %1, %2, %3, avx %endmacro %if ARCH_X86_64 LPF_16_VH_ALL_OPTS 16, 512 %endif -LPF_16_VH_ALL_OPTS 44, 0 +LPF_16_VH_ALL_OPTS 44, 0, 0 %if ARCH_X86_64 LPF_16_VH_ALL_OPTS 48, 256 LPF_16_VH_ALL_OPTS 84, 256 -LPF_16_VH_ALL_OPTS 88, 256 %endif +LPF_16_VH_ALL_OPTS 88, 256, 16 From b905e8d2fe03da1bf34ffa6e04b322f19a479143 Mon Sep 17 00:00:00 2001 From: "Ronald S. Bultje" Date: Fri, 26 Dec 2014 12:10:26 -0500 Subject: [PATCH 19/23] vp9lpf/x86: make filter_48/84_v work on 32-bit. Signed-off-by: Anton Khirnov --- libavcodec/x86/vp9dsp_init.c | 8 ++++++-- libavcodec/x86/vp9lpf.asm | 8 +++----- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/libavcodec/x86/vp9dsp_init.c b/libavcodec/x86/vp9dsp_init.c index 523e92c487..9f090534d3 100644 --- a/libavcodec/x86/vp9dsp_init.c +++ b/libavcodec/x86/vp9dsp_init.c @@ -287,9 +287,13 @@ av_cold void ff_vp9dsp_init_x86(VP9DSPContext *dsp) dsp->loop_filter_mix2[0][0][1] = ff_vp9_loop_filter_v_44_16_##opt; \ if (ARCH_X86_64) { \ dsp->loop_filter_mix2[0][1][0] = ff_vp9_loop_filter_h_48_16_##opt; \ - dsp->loop_filter_mix2[0][1][1] = ff_vp9_loop_filter_v_48_16_##opt; \ + } \ + dsp->loop_filter_mix2[0][1][1] = ff_vp9_loop_filter_v_48_16_##opt; \ + if (ARCH_X86_64) { \ dsp->loop_filter_mix2[1][0][0] = ff_vp9_loop_filter_h_84_16_##opt; \ - dsp->loop_filter_mix2[1][0][1] = ff_vp9_loop_filter_v_84_16_##opt; \ + } \ + dsp->loop_filter_mix2[1][0][1] = ff_vp9_loop_filter_v_84_16_##opt; \ + if (ARCH_X86_64) { \ dsp->loop_filter_mix2[1][1][0] = ff_vp9_loop_filter_h_88_16_##opt; \ } \ dsp->loop_filter_mix2[1][1][1] = ff_vp9_loop_filter_v_88_16_##opt; \ diff --git a/libavcodec/x86/vp9lpf.asm b/libavcodec/x86/vp9lpf.asm index e1c2b7b445..150cd7388a 100644 --- a/libavcodec/x86/vp9lpf.asm +++ b/libavcodec/x86/vp9lpf.asm @@ -920,9 +920,7 @@ LPF_16_VH %1, %2, %3, avx %if ARCH_X86_64 LPF_16_VH_ALL_OPTS 16, 512 %endif -LPF_16_VH_ALL_OPTS 44, 0, 0 -%if ARCH_X86_64 -LPF_16_VH_ALL_OPTS 48, 256 -LPF_16_VH_ALL_OPTS 84, 256 -%endif +LPF_16_VH_ALL_OPTS 44, 0, 0 +LPF_16_VH_ALL_OPTS 48, 256, 16 +LPF_16_VH_ALL_OPTS 84, 256, 16 LPF_16_VH_ALL_OPTS 88, 256, 16 From 5bfa96c4b30d9fdb59a8f2a9d0769a3fa2e622be Mon Sep 17 00:00:00 2001 From: "Ronald S. Bultje" Date: Fri, 26 Dec 2014 14:05:23 -0500 Subject: [PATCH 20/23] vp9lpf/x86: make filter_16_v work on 32-bit. Signed-off-by: Anton Khirnov --- libavcodec/x86/vp9dsp_init.c | 4 +- libavcodec/x86/vp9lpf.asm | 135 +++++++++++++++++++++++++---------- 2 files changed, 99 insertions(+), 40 deletions(-) diff --git a/libavcodec/x86/vp9dsp_init.c b/libavcodec/x86/vp9dsp_init.c index 9f090534d3..76bb06e61b 100644 --- a/libavcodec/x86/vp9dsp_init.c +++ b/libavcodec/x86/vp9dsp_init.c @@ -281,7 +281,9 @@ av_cold void ff_vp9dsp_init_x86(VP9DSPContext *dsp) #define init_lpf(opt) do { \ if (ARCH_X86_64) { \ dsp->loop_filter_16[0] = ff_vp9_loop_filter_h_16_16_##opt; \ - dsp->loop_filter_16[1] = ff_vp9_loop_filter_v_16_16_##opt; \ + } \ + dsp->loop_filter_16[1] = ff_vp9_loop_filter_v_16_16_##opt; \ + if (ARCH_X86_64) { \ dsp->loop_filter_mix2[0][0][0] = ff_vp9_loop_filter_h_44_16_##opt; \ } \ dsp->loop_filter_mix2[0][0][1] = ff_vp9_loop_filter_v_44_16_##opt; \ diff --git a/libavcodec/x86/vp9lpf.asm b/libavcodec/x86/vp9lpf.asm index 150cd7388a..57536b9d83 100644 --- a/libavcodec/x86/vp9lpf.asm +++ b/libavcodec/x86/vp9lpf.asm @@ -144,7 +144,9 @@ SECTION .text ; mask, [source], [unpack + src], [unpack_is_mem_on_x86_32] ; FIXME interleave this properly with the subx2/addx2 %ifnidn %15, "" +%if %16 == 0 || ARCH_X86_64 mova %14, %15 +%endif %endif FILTER_SUBx2_ADDx2 %1, l, %3, %6 + 0, %7, %8, %9, %10, %11, %14, %16 FILTER_SUBx2_ADDx2 %2, h, %4, %6 + 16, %7, %8, %9, %10, %11, %14, %16 @@ -197,7 +199,7 @@ SECTION .text mova [rsp+%4+0*32], %1 paddw %3, %1, %1 ; p3*2 paddw %3, %1 ; p3*3 - punpck%2bw %1, m2, m0 ; p2: B->W + punpck%2bw %1, m1, m0 ; p2: B->W mova [rsp+%4+1*32], %1 paddw %3, %1 ; p3*3 + p2 paddw %3, %1 ; p3*3 + p2*2 @@ -223,10 +225,10 @@ SECTION .text mova [rsp+%4+ 9*32], %1 paddw %3, %1 ; p7*7 + p6 paddw %3, %1 ; p7*7 + p6*2 - punpck%2bw %1, m8, m0 ; p5: B->W + UNPACK %2, %1, rp5, m0 ; p5: B->W mova [rsp+%4+10*32], %1 paddw %3, %1 ; p7*7 + p6*2 + p5 - punpck%2bw %1, m9, m0 ; p4: B->W + UNPACK %2, %1, rp4, m0 ; p4: B->W mova [rsp+%4+11*32], %1 paddw %3, %1 ; p7*7 + p6*2 + p5 + p4 paddw %3, [rsp+%4+ 0*32] ; p7*7 + p6*2 + p5 + p4 + p3 @@ -583,28 +585,56 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride, %if %2 == 16 ; (m0: hev, m2: flat8in, m3: fm, m6: pb_81, m9..15: p2 p1 p0 q0 q1 q2 q3) ; calc flat8out mask +%if ARCH_X86_64 mova m8, [P7] mova m9, [P6] - ABSSUB_GT m1, m8, m11, m6, m5 ; abs(p7 - p0) <= 1 - ABSSUB_GT m7, m9, m11, m6, m5 ; abs(p6 - p0) <= 1 +%define rp7 m8 +%define rp6 m9 +%else +%define rp7 [P7] +%define rp6 [P6] +%endif + ABSSUB_GT m1, rp7, rp0, m6, m5 ; abs(p7 - p0) <= 1 + ABSSUB_GT m7, rp6, rp0, m6, m5 ; abs(p6 - p0) <= 1 por m1, m7 +%if ARCH_X86_64 mova m8, [P5] mova m9, [P4] - ABSSUB_GT m7, m8, m11, m6, m5 ; abs(p5 - p0) <= 1 +%define rp5 m8 +%define rp4 m9 +%else +%define rp5 [P5] +%define rp4 [P4] +%endif + ABSSUB_GT m7, rp5, rp0, m6, m5 ; abs(p5 - p0) <= 1 por m1, m7 - ABSSUB_GT m7, m9, m11, m6, m5 ; abs(p4 - p0) <= 1 + ABSSUB_GT m7, rp4, rp0, m6, m5 ; abs(p4 - p0) <= 1 por m1, m7 +%if ARCH_X86_64 mova m14, [Q4] mova m15, [Q5] - ABSSUB_GT m7, m14, m12, m6, m5 ; abs(q4 - q0) <= 1 +%define rq4 m14 +%define rq5 m15 +%else +%define rq4 [Q4] +%define rq5 [Q5] +%endif + ABSSUB_GT m7, rq4, rq0, m6, m5 ; abs(q4 - q0) <= 1 por m1, m7 - ABSSUB_GT m7, m15, m12, m6, m5 ; abs(q5 - q0) <= 1 + ABSSUB_GT m7, rq5, rq0, m6, m5 ; abs(q5 - q0) <= 1 por m1, m7 +%if ARCH_X86_64 mova m14, [Q6] mova m15, [Q7] - ABSSUB_GT m7, m14, m12, m6, m5 ; abs(q4 - q0) <= 1 +%define rq6 m14 +%define rq7 m15 +%else +%define rq6 [Q6] +%define rq7 [Q7] +%endif + ABSSUB_GT m7, rq6, rq0, m6, m5 ; abs(q4 - q0) <= 1 por m1, m7 - ABSSUB_GT m7, m15, m12, m6, m5 ; abs(q5 - q0) <= 1 + ABSSUB_GT m7, rq7, rq0, m6, m5 ; abs(q5 - q0) <= 1 por m1, m7 ; flat8out final value pxor m1, [pb_ff] %endif @@ -627,7 +657,7 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride, mova m6, [pb_80] ; already in m6 if 44_16 SCRATCH 2, 15, rsp+%3+%4 %if %2 == 16 - SWAP 1, 8 + SCRATCH 1, 8, rsp+%3+%4+16 %endif %endif pxor m2, m6, rq0 ; q0 ^ 0x80 @@ -700,19 +730,24 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride, mova [P1], m1 mova [Q1], m4 +%if %2 != 44 + UNSCRATCH 2, 15, rsp+%3+%4 +%endif + ; ([m1: flat8out], m2: flat8in, m3: fm, m10..13: p1 p0 q0 q1) ; filter6() %if %2 != 44 pxor m0, m0 %if %2 > 16 + pand m3, m2 +%else + pand m2, m3 ; mask(fm) & mask(in) %if ARCH_X86_64 - pand m3, m15 + pandn m3, m8, m2 ; ~mask(out) & (mask(fm) & mask(in)) %else - pand m3, [rsp+%3+%4] + mova m3, [rsp+%3+%4+16] + pandn m3, m2 %endif -%else - pand m15, m3 ; mask(fm) & mask(in) - pandn m3, m8, m15 ; ~mask(out) & (mask(fm) & mask(in)) %endif %if ARCH_X86_64 mova m14, [P3] @@ -723,21 +758,18 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride, %define rp3 [P3] %define rq3 [Q3] %endif - mova m2, [P2] + mova m1, [P2] + FILTER_INIT m4, m5, m6, m7, [P2], %4, 6, m3, m1 ; [p2] mova m1, [Q2] - FILTER_INIT m4, m5, m6, m7, [P2], %4, 6, m3, m2 ; [p2] FILTER_UPDATE m4, m5, m6, m7, [P1], %4, 0, 1, 2, 5, 3, m3, "", rq1, "", 1 ; [p1] -p3 -p2 +p1 +q1 - FILTER_UPDATE m4, m5, m6, m7, [P0], %4, 0, 2, 3, 6, 3, m3, "", m1 ; [p0] -p3 -p1 +p0 +q2 + FILTER_UPDATE m4, m5, m6, m7, [P0], %4, 0, 2, 3, 6, 3, m3, "", m1 ; [p0] -p3 -p1 +p0 +q2 FILTER_UPDATE m4, m5, m6, m7, [Q0], %4, 0, 3, 4, 7, 3, m3, "", rq3, "", 1 ; [q0] -p3 -p0 +q0 +q3 - FILTER_UPDATE m4, m5, m6, m7, [Q1], %4, 1, 4, 5, 7, 3, m3, "" ; [q1] -p2 -q0 +q1 +q3 - FILTER_UPDATE m4, m5, m6, m7, [Q2], %4, 2, 5, 6, 7, 3, m3, m1 ; [q2] -p1 -q1 +q2 +q3 + FILTER_UPDATE m4, m5, m6, m7, [Q1], %4, 1, 4, 5, 7, 3, m3, "" ; [q1] -p2 -q0 +q1 +q3 + FILTER_UPDATE m4, m5, m6, m7, [Q2], %4, 2, 5, 6, 7, 3, m3, m1 ; [q2] -p1 -q1 +q2 +q3 %endif -%if %2 != 44 %if %2 == 16 -SWAP 1, 8 -%endif -SWAP 2, 15 + UNSCRATCH 1, 8, rsp+%3+%4+16 %endif ; (m0: 0, [m1: flat8out], m2: fm & flat8in, m8..15: q2 q3 p1 p0 q0 q1 p3 p2) @@ -765,22 +797,49 @@ SWAP 2, 15 pand m1, m2 ; mask(out) & (mask(fm) & mask(in)) mova m2, [P7] mova m3, [P6] +%if ARCH_X86_64 mova m8, [P5] mova m9, [P4] +%define rp5 m8 +%define rp4 m9 +%define rp5s m8 +%define rp4s m9 +%define rp3s m14 +%define rq4 m8 +%define rq5 m9 +%define rq6 m14 +%define rq7 m15 +%define rq4s m8 +%define rq5s m9 +%define rq6s m14 +%else +%define rp5 [P5] +%define rp4 [P4] +%define rp5s "" +%define rp4s "" +%define rp3s "" +%define rq4 [Q4] +%define rq5 [Q5] +%define rq6 [Q6] +%define rq7 [Q7] +%define rq4s "" +%define rq5s "" +%define rq6s "" +%endif FILTER_INIT m4, m5, m6, m7, [P6], %4, 14, m1, m3 ; [p6] - FILTER_UPDATE m4, m5, m6, m7, [P5], %4, 8, 9, 10, 5, 4, m1, m8 ; [p5] -p7 -p6 +p5 +q1 - FILTER_UPDATE m4, m5, m6, m7, [P4], %4, 8, 10, 11, 6, 4, m1, m9 ; [p4] -p7 -p5 +p4 +q2 - FILTER_UPDATE m4, m5, m6, m7, [P3], %4, 8, 11, 0, 7, 4, m1, m14 ; [p3] -p7 -p4 +p3 +q3 - FILTER_UPDATE m4, m5, m6, m7, [P2], %4, 8, 0, 1, 12, 4, m1, "", m8, [Q4] ; [p2] -p7 -p3 +p2 +q4 - FILTER_UPDATE m4, m5, m6, m7, [P1], %4, 8, 1, 2, 13, 4, m1, "", m9, [Q5] ; [p1] -p7 -p2 +p1 +q5 - FILTER_UPDATE m4, m5, m6, m7, [P0], %4, 8, 2, 3, 14, 4, m1, "", m14, [Q6] ; [p0] -p7 -p1 +p0 +q6 - FILTER_UPDATE m4, m5, m6, m7, [Q0], %4, 8, 3, 4, 15, 4, m1, "", m15, [Q7] ; [q0] -p7 -p0 +q0 +q7 + FILTER_UPDATE m4, m5, m6, m7, [P5], %4, 8, 9, 10, 5, 4, m1, rp5s ; [p5] -p7 -p6 +p5 +q1 + FILTER_UPDATE m4, m5, m6, m7, [P4], %4, 8, 10, 11, 6, 4, m1, rp4s ; [p4] -p7 -p5 +p4 +q2 + FILTER_UPDATE m4, m5, m6, m7, [P3], %4, 8, 11, 0, 7, 4, m1, rp3s ; [p3] -p7 -p4 +p3 +q3 + FILTER_UPDATE m4, m5, m6, m7, [P2], %4, 8, 0, 1, 12, 4, m1, "", rq4, [Q4], 1 ; [p2] -p7 -p3 +p2 +q4 + FILTER_UPDATE m4, m5, m6, m7, [P1], %4, 8, 1, 2, 13, 4, m1, "", rq5, [Q5], 1 ; [p1] -p7 -p2 +p1 +q5 + FILTER_UPDATE m4, m5, m6, m7, [P0], %4, 8, 2, 3, 14, 4, m1, "", rq6, [Q6], 1 ; [p0] -p7 -p1 +p0 +q6 + FILTER_UPDATE m4, m5, m6, m7, [Q0], %4, 8, 3, 4, 15, 4, m1, "", rq7, [Q7], 1 ; [q0] -p7 -p0 +q0 +q7 FILTER_UPDATE m4, m5, m6, m7, [Q1], %4, 9, 4, 5, 15, 4, m1, "" ; [q1] -p6 -q0 +q1 +q7 FILTER_UPDATE m4, m5, m6, m7, [Q2], %4, 10, 5, 6, 15, 4, m1, "" ; [q2] -p5 -q1 +q2 +q7 FILTER_UPDATE m4, m5, m6, m7, [Q3], %4, 11, 6, 7, 15, 4, m1, "" ; [q3] -p4 -q2 +q3 +q7 - FILTER_UPDATE m4, m5, m6, m7, [Q4], %4, 0, 7, 12, 15, 4, m1, m8 ; [q4] -p3 -q3 +q4 +q7 - FILTER_UPDATE m4, m5, m6, m7, [Q5], %4, 1, 12, 13, 15, 4, m1, m9 ; [q5] -p2 -q4 +q5 +q7 - FILTER_UPDATE m4, m5, m6, m7, [Q6], %4, 2, 13, 14, 15, 4, m1, m14 ; [q6] -p1 -q5 +q6 +q7 + FILTER_UPDATE m4, m5, m6, m7, [Q4], %4, 0, 7, 12, 15, 4, m1, rq4s ; [q4] -p3 -q3 +q4 +q7 + FILTER_UPDATE m4, m5, m6, m7, [Q5], %4, 1, 12, 13, 15, 4, m1, rq5s ; [q5] -p2 -q4 +q5 +q7 + FILTER_UPDATE m4, m5, m6, m7, [Q6], %4, 2, 13, 14, 15, 4, m1, rq6s ; [q6] -p1 -q5 +q6 +q7 %endif %ifidn %1, h @@ -917,9 +976,7 @@ LPF_16_VH %1, %2, %3, ssse3 LPF_16_VH %1, %2, %3, avx %endmacro -%if ARCH_X86_64 -LPF_16_VH_ALL_OPTS 16, 512 -%endif +LPF_16_VH_ALL_OPTS 16, 512, 32 LPF_16_VH_ALL_OPTS 44, 0, 0 LPF_16_VH_ALL_OPTS 48, 256, 16 LPF_16_VH_ALL_OPTS 84, 256, 16 From 725a216481c422a71a727771706d6343a0eaeaf8 Mon Sep 17 00:00:00 2001 From: "Ronald S. Bultje" Date: Fri, 26 Dec 2014 14:48:01 -0500 Subject: [PATCH 21/23] vp9lpf/x86: make filter_44_h work on 32-bit. Signed-off-by: Anton Khirnov --- libavcodec/x86/vp9dsp_init.c | 4 +- libavcodec/x86/vp9lpf.asm | 140 +++++++++++++++++++---------------- 2 files changed, 78 insertions(+), 66 deletions(-) diff --git a/libavcodec/x86/vp9dsp_init.c b/libavcodec/x86/vp9dsp_init.c index 76bb06e61b..6438644867 100644 --- a/libavcodec/x86/vp9dsp_init.c +++ b/libavcodec/x86/vp9dsp_init.c @@ -283,9 +283,7 @@ av_cold void ff_vp9dsp_init_x86(VP9DSPContext *dsp) dsp->loop_filter_16[0] = ff_vp9_loop_filter_h_16_16_##opt; \ } \ dsp->loop_filter_16[1] = ff_vp9_loop_filter_v_16_16_##opt; \ - if (ARCH_X86_64) { \ - dsp->loop_filter_mix2[0][0][0] = ff_vp9_loop_filter_h_44_16_##opt; \ - } \ + dsp->loop_filter_mix2[0][0][0] = ff_vp9_loop_filter_h_44_16_##opt; \ dsp->loop_filter_mix2[0][0][1] = ff_vp9_loop_filter_v_44_16_##opt; \ if (ARCH_X86_64) { \ dsp->loop_filter_mix2[0][1][0] = ff_vp9_loop_filter_h_48_16_##opt; \ diff --git a/libavcodec/x86/vp9lpf.asm b/libavcodec/x86/vp9lpf.asm index 57536b9d83..881bdab8a8 100644 --- a/libavcodec/x86/vp9lpf.asm +++ b/libavcodec/x86/vp9lpf.asm @@ -291,38 +291,6 @@ SECTION .text SWAP %12, %14 %endmacro -; transpose 16 half lines (high part) to 8 full centered lines -%macro TRANSPOSE16x8B 16 - punpcklbw m%1, m%2 - punpcklbw m%3, m%4 - punpcklbw m%5, m%6 - punpcklbw m%7, m%8 - punpcklbw m%9, m%10 - punpcklbw m%11, m%12 - punpcklbw m%13, m%14 - punpcklbw m%15, m%16 - SBUTTERFLY wd, %1, %3, %2 - SBUTTERFLY wd, %5, %7, %2 - SBUTTERFLY wd, %9, %11, %2 - SBUTTERFLY wd, %13, %15, %2 - SBUTTERFLY dq, %1, %5, %2 - SBUTTERFLY dq, %3, %7, %2 - SBUTTERFLY dq, %9, %13, %2 - SBUTTERFLY dq, %11, %15, %2 - SBUTTERFLY qdq, %1, %9, %2 - SBUTTERFLY qdq, %3, %11, %2 - SBUTTERFLY qdq, %5, %13, %2 - SBUTTERFLY qdq, %7, %15, %2 - SWAP %5, %1 - SWAP %6, %9 - SWAP %7, %1 - SWAP %8, %13 - SWAP %9, %3 - SWAP %10, %11 - SWAP %11, %1 - SWAP %12, %15 -%endmacro - %macro DEFINE_REAL_P7_TO_Q7 0-1 0 %define P7 dstq + 4*mstrideq + %1 %define P6 dstq + mstride3q + %1 @@ -398,6 +366,7 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride, movx m5, [P2] movx m6, [P1] movx m7, [P0] +%if ARCH_X86_64 movx m8, [Q0] movx m9, [Q1] movx m10, [Q2] @@ -406,32 +375,67 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride, movx m13, [Q5] movx m14, [Q6] movx m15, [Q7] -%define P7 rsp + 0 -%define P6 rsp + 16 -%define P5 rsp + 32 -%define P4 rsp + 48 -%define P3 rsp + 64 -%define P2 rsp + 80 -%define P1 rsp + 96 -%define P0 rsp + 112 -%define Q0 rsp + 128 -%define Q1 rsp + 144 -%define Q2 rsp + 160 -%define Q3 rsp + 176 +%if %2 == 16 + TRANSPOSE16x16B 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, [rsp] +%define P7 rsp + 128 +%define P6 rsp + 144 +%define P5 rsp + 160 +%define P4 rsp + 176 %define Q4 rsp + 192 %define Q5 rsp + 208 %define Q6 rsp + 224 %define Q7 rsp + 240 - -%if %2 == 16 - TRANSPOSE16x16B 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, [rsp] mova [P7], m0 mova [P6], m1 mova [P5], m2 mova [P4], m3 %else - TRANSPOSE16x8B 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 -%endif + ; 8x16 transpose + punpcklbw m0, m1 + punpcklbw m2, m3 + punpcklbw m4, m5 + punpcklbw m6, m7 + punpcklbw m8, m9 + punpcklbw m10, m11 + punpcklbw m12, m13 + punpcklbw m14, m15 + TRANSPOSE8x8W 0, 2, 4, 6, 8, 10, 12, 14, 15 + SWAP 0, 4 + SWAP 2, 5 + SWAP 0, 6 + SWAP 0, 7 + SWAP 10, 9 + SWAP 12, 10 + SWAP 14, 11 +%endif +%else ; x86-32 + punpcklbw m0, m1 + punpcklbw m2, m3 + punpcklbw m4, m5 + punpcklbw m6, m7 + movx m1, [Q0] + movx m3, [Q1] + movx m5, [Q2] + movx m7, [Q3] + punpcklbw m1, m3 + punpcklbw m5, m7 + movx m3, [Q4] + movx m7, [Q5] + punpcklbw m3, m7 + mova [rsp], m3 + movx m3, [Q6] + movx m7, [Q7] + punpcklbw m3, m7 +%endif +%define P3 rsp + 0 +%define P2 rsp + 16 +%define P1 rsp + 32 +%define P0 rsp + 48 +%define Q0 rsp + 64 +%define Q1 rsp + 80 +%define Q2 rsp + 96 +%define Q3 rsp + 112 +%if ARCH_X86_64 mova [P3], m4 mova [P2], m5 mova [P1], m6 @@ -446,7 +450,17 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride, mova [Q6], m14 mova [Q7], m15 %endif +%else ; x86-32 + TRANSPOSE8x8W 0, 2, 4, 6, 1, 5, 7, 3, [rsp], [Q0], 1 + mova [P3], m0 + mova [P2], m2 + mova [P1], m4 + mova [P0], m6 + mova [Q1], m5 + mova [Q2], m7 + mova [Q3], m3 %endif +%endif ; %1 == h ; calc fm mask %if %2 == 16 @@ -962,22 +976,22 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride, RET %endmacro -%macro LPF_16_VH 4 -INIT_XMM %4 -LOOPFILTER v, %1, %2, 0, %3 -%if ARCH_X86_64 -LOOPFILTER h, %1, %2, 256, %3 +%macro LPF_16_VH 5 +INIT_XMM %5 +LOOPFILTER v, %1, %2, 0, %4 +%if ARCH_X86_64 || %1 == 44 +LOOPFILTER h, %1, %2, %3, %4 %endif %endmacro -%macro LPF_16_VH_ALL_OPTS 2-3 0 -LPF_16_VH %1, %2, %3, sse2 -LPF_16_VH %1, %2, %3, ssse3 -LPF_16_VH %1, %2, %3, avx +%macro LPF_16_VH_ALL_OPTS 4 +LPF_16_VH %1, %2, %3, %4, sse2 +LPF_16_VH %1, %2, %3, %4, ssse3 +LPF_16_VH %1, %2, %3, %4, avx %endmacro -LPF_16_VH_ALL_OPTS 16, 512, 32 -LPF_16_VH_ALL_OPTS 44, 0, 0 -LPF_16_VH_ALL_OPTS 48, 256, 16 -LPF_16_VH_ALL_OPTS 84, 256, 16 -LPF_16_VH_ALL_OPTS 88, 256, 16 +LPF_16_VH_ALL_OPTS 16, 512, 256, 32 +LPF_16_VH_ALL_OPTS 44, 0, 128, 0 +LPF_16_VH_ALL_OPTS 48, 256, 128, 16 +LPF_16_VH_ALL_OPTS 84, 256, 128, 16 +LPF_16_VH_ALL_OPTS 88, 256, 128, 16 From 8915320db94c9b3ceb97d6ad92addda690af8c18 Mon Sep 17 00:00:00 2001 From: "Ronald S. Bultje" Date: Fri, 26 Dec 2014 15:15:50 -0500 Subject: [PATCH 22/23] vp9lpf/x86: make filter_48/84/88_h work on 32-bit. Signed-off-by: Anton Khirnov --- libavcodec/x86/vp9dsp_init.c | 12 ++----- libavcodec/x86/vp9lpf.asm | 62 ++++++++++++++++++++++++++---------- 2 files changed, 48 insertions(+), 26 deletions(-) diff --git a/libavcodec/x86/vp9dsp_init.c b/libavcodec/x86/vp9dsp_init.c index 6438644867..76ea48f457 100644 --- a/libavcodec/x86/vp9dsp_init.c +++ b/libavcodec/x86/vp9dsp_init.c @@ -285,17 +285,11 @@ av_cold void ff_vp9dsp_init_x86(VP9DSPContext *dsp) dsp->loop_filter_16[1] = ff_vp9_loop_filter_v_16_16_##opt; \ dsp->loop_filter_mix2[0][0][0] = ff_vp9_loop_filter_h_44_16_##opt; \ dsp->loop_filter_mix2[0][0][1] = ff_vp9_loop_filter_v_44_16_##opt; \ - if (ARCH_X86_64) { \ - dsp->loop_filter_mix2[0][1][0] = ff_vp9_loop_filter_h_48_16_##opt; \ - } \ + dsp->loop_filter_mix2[0][1][0] = ff_vp9_loop_filter_h_48_16_##opt; \ dsp->loop_filter_mix2[0][1][1] = ff_vp9_loop_filter_v_48_16_##opt; \ - if (ARCH_X86_64) { \ - dsp->loop_filter_mix2[1][0][0] = ff_vp9_loop_filter_h_84_16_##opt; \ - } \ + dsp->loop_filter_mix2[1][0][0] = ff_vp9_loop_filter_h_84_16_##opt; \ dsp->loop_filter_mix2[1][0][1] = ff_vp9_loop_filter_v_84_16_##opt; \ - if (ARCH_X86_64) { \ - dsp->loop_filter_mix2[1][1][0] = ff_vp9_loop_filter_h_88_16_##opt; \ - } \ + dsp->loop_filter_mix2[1][1][0] = ff_vp9_loop_filter_h_88_16_##opt; \ dsp->loop_filter_mix2[1][1][1] = ff_vp9_loop_filter_v_88_16_##opt; \ } while (0) diff --git a/libavcodec/x86/vp9lpf.asm b/libavcodec/x86/vp9lpf.asm index 881bdab8a8..c20eeb89bf 100644 --- a/libavcodec/x86/vp9lpf.asm +++ b/libavcodec/x86/vp9lpf.asm @@ -939,9 +939,12 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride, mova m3, [P0] mova m4, [Q0] mova m5, [Q1] +%if ARCH_X86_64 mova m6, [Q2] +%endif mova m7, [Q3] DEFINE_REAL_P7_TO_Q7 +%if ARCH_X86_64 SBUTTERFLY bw, 0, 1, 8 SBUTTERFLY bw, 2, 3, 8 SBUTTERFLY bw, 4, 5, 8 @@ -954,22 +957,47 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride, SBUTTERFLY dq, 1, 5, 8 SBUTTERFLY dq, 2, 6, 8 SBUTTERFLY dq, 3, 7, 8 - movh [P7], m0 - movhps [P6], m0 - movh [Q0], m1 - movhps [Q1], m1 - movh [P3], m2 - movhps [P2], m2 - movh [Q4], m3 - movhps [Q5], m3 - movh [P5], m4 - movhps [P4], m4 - movh [Q2], m5 - movhps [Q3], m5 - movh [P1], m6 - movhps [P0], m6 - movh [Q6], m7 - movhps [Q7], m7 +%else + SBUTTERFLY bw, 0, 1, 6 + mova [rsp+64], m1 + mova m6, [rsp+96] + SBUTTERFLY bw, 2, 3, 1 + SBUTTERFLY bw, 4, 5, 1 + SBUTTERFLY bw, 6, 7, 1 + SBUTTERFLY wd, 0, 2, 1 + mova [rsp+96], m2 + mova m1, [rsp+64] + SBUTTERFLY wd, 1, 3, 2 + SBUTTERFLY wd, 4, 6, 2 + SBUTTERFLY wd, 5, 7, 2 + SBUTTERFLY dq, 0, 4, 2 + SBUTTERFLY dq, 1, 5, 2 + movh [Q0], m1 + movhps [Q1], m1 + mova m2, [rsp+96] + SBUTTERFLY dq, 2, 6, 1 + SBUTTERFLY dq, 3, 7, 1 +%endif + SWAP 3, 6 + SWAP 1, 4 + movh [P7], m0 + movhps [P6], m0 + movh [P5], m1 + movhps [P4], m1 + movh [P3], m2 + movhps [P2], m2 + movh [P1], m3 + movhps [P0], m3 +%if ARCH_X86_64 + movh [Q0], m4 + movhps [Q1], m4 +%endif + movh [Q2], m5 + movhps [Q3], m5 + movh [Q4], m6 + movhps [Q5], m6 + movh [Q6], m7 + movhps [Q7], m7 %endif %endif @@ -979,7 +1007,7 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride, %macro LPF_16_VH 5 INIT_XMM %5 LOOPFILTER v, %1, %2, 0, %4 -%if ARCH_X86_64 || %1 == 44 +%if ARCH_X86_64 || %1 != 16 LOOPFILTER h, %1, %2, %3, %4 %endif %endmacro From 715f139c9bd407ef7f4d1f564ad683140ec61e6d Mon Sep 17 00:00:00 2001 From: "Ronald S. Bultje" Date: Fri, 26 Dec 2014 17:50:38 -0500 Subject: [PATCH 23/23] vp9lpf/x86: make filter_16_h work on 32-bit. Signed-off-by: Anton Khirnov --- libavcodec/x86/vp9dsp_init.c | 4 +- libavcodec/x86/vp9lpf.asm | 191 ++++++++++++++++++++++++++++------- 2 files changed, 154 insertions(+), 41 deletions(-) diff --git a/libavcodec/x86/vp9dsp_init.c b/libavcodec/x86/vp9dsp_init.c index 76ea48f457..3b9e1bb0ca 100644 --- a/libavcodec/x86/vp9dsp_init.c +++ b/libavcodec/x86/vp9dsp_init.c @@ -279,9 +279,7 @@ av_cold void ff_vp9dsp_init_x86(VP9DSPContext *dsp) init_subpel2(4, idx, 4, type, opt) #define init_lpf(opt) do { \ - if (ARCH_X86_64) { \ - dsp->loop_filter_16[0] = ff_vp9_loop_filter_h_16_16_##opt; \ - } \ + dsp->loop_filter_16[0] = ff_vp9_loop_filter_h_16_16_##opt; \ dsp->loop_filter_16[1] = ff_vp9_loop_filter_v_16_16_##opt; \ dsp->loop_filter_mix2[0][0][0] = ff_vp9_loop_filter_h_44_16_##opt; \ dsp->loop_filter_mix2[0][0][1] = ff_vp9_loop_filter_v_44_16_##opt; \ diff --git a/libavcodec/x86/vp9lpf.asm b/libavcodec/x86/vp9lpf.asm index c20eeb89bf..54f20fe090 100644 --- a/libavcodec/x86/vp9lpf.asm +++ b/libavcodec/x86/vp9lpf.asm @@ -291,6 +291,30 @@ SECTION .text SWAP %12, %14 %endmacro +%macro TRANSPOSE8x8B 13 + SBUTTERFLY bw, %1, %2, %7 + movdq%10 m%7, %9 + movdqa %11, m%2 + SBUTTERFLY bw, %3, %4, %2 + SBUTTERFLY bw, %5, %6, %2 + SBUTTERFLY bw, %7, %8, %2 + SBUTTERFLY wd, %1, %3, %2 + movdqa m%2, %11 + movdqa %11, m%3 + SBUTTERFLY wd, %2, %4, %3 + SBUTTERFLY wd, %5, %7, %3 + SBUTTERFLY wd, %6, %8, %3 + SBUTTERFLY dq, %1, %5, %3 + SBUTTERFLY dq, %2, %6, %3 + movdqa m%3, %11 + movh %12, m%2 + movhps %13, m%2 + SBUTTERFLY dq, %3, %7, %2 + SBUTTERFLY dq, %4, %8, %2 + SWAP %2, %5 + SWAP %4, %7 +%endmacro + %macro DEFINE_REAL_P7_TO_Q7 0-1 0 %define P7 dstq + 4*mstrideq + %1 %define P6 dstq + mstride3q + %1 @@ -310,6 +334,25 @@ SECTION .text %define Q7 dst2q + stride3q + %1 %endmacro +%macro DEFINE_TRANSPOSED_P7_TO_Q7 0-1 0 +%define P3 rsp + 0 + %1 +%define P2 rsp + 16 + %1 +%define P1 rsp + 32 + %1 +%define P0 rsp + 48 + %1 +%define Q0 rsp + 64 + %1 +%define Q1 rsp + 80 + %1 +%define Q2 rsp + 96 + %1 +%define Q3 rsp + 112 + %1 +%define P7 rsp + 128 + %1 +%define P6 rsp + 144 + %1 +%define P5 rsp + 160 + %1 +%define P4 rsp + 176 + %1 +%define Q4 rsp + 192 + %1 +%define Q5 rsp + 208 + %1 +%define Q6 rsp + 224 + %1 +%define Q7 rsp + 240 + %1 +%endmacro + ; ..............AB -> AAAAAAAABBBBBBBB %macro SPLATB_MIX 1-2 [mask_mix] %if cpuflag(ssse3) @@ -364,7 +407,9 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride, movx m3, [P4] movx m4, [P3] movx m5, [P2] +%if ARCH_X86_64 || %2 != 16 movx m6, [P1] +%endif movx m7, [P0] %if ARCH_X86_64 movx m8, [Q0] @@ -375,21 +420,14 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride, movx m13, [Q5] movx m14, [Q6] movx m15, [Q7] + DEFINE_TRANSPOSED_P7_TO_Q7 %if %2 == 16 TRANSPOSE16x16B 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, [rsp] -%define P7 rsp + 128 -%define P6 rsp + 144 -%define P5 rsp + 160 -%define P4 rsp + 176 -%define Q4 rsp + 192 -%define Q5 rsp + 208 -%define Q6 rsp + 224 -%define Q7 rsp + 240 mova [P7], m0 mova [P6], m1 mova [P5], m2 mova [P4], m3 -%else +%else ; %2 == 44/48/84/88 ; 8x16 transpose punpcklbw m0, m1 punpcklbw m2, m3 @@ -407,8 +445,65 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride, SWAP 10, 9 SWAP 12, 10 SWAP 14, 11 -%endif +%endif ; %2 + mova [P3], m4 + mova [P2], m5 + mova [P1], m6 + mova [P0], m7 + mova [Q0], m8 + mova [Q1], m9 + mova [Q2], m10 + mova [Q3], m11 +%if %2 == 16 + mova [Q4], m12 + mova [Q5], m13 + mova [Q6], m14 + mova [Q7], m15 +%endif ; %2 %else ; x86-32 +%if %2 == 16 + TRANSPOSE8x8B 0, 1, 2, 3, 4, 5, 6, 7, [P1], u, [rsp+%3+%4], [rsp+64], [rsp+80] + DEFINE_TRANSPOSED_P7_TO_Q7 + movh [P7], m0 + movh [P5], m1 + movh [P3], m2 + movh [P1], m3 + movh [Q2], m5 + movh [Q4], m6 + movh [Q6], m7 + movhps [P6], m0 + movhps [P4], m1 + movhps [P2], m2 + movhps [P0], m3 + movhps [Q3], m5 + movhps [Q5], m6 + movhps [Q7], m7 + DEFINE_REAL_P7_TO_Q7 + movx m0, [Q0] + movx m1, [Q1] + movx m2, [Q2] + movx m3, [Q3] + movx m4, [Q4] + movx m5, [Q5] + movx m7, [Q7] + TRANSPOSE8x8B 0, 1, 2, 3, 4, 5, 6, 7, [Q6], u, [rsp+%3+%4], [rsp+72], [rsp+88] + DEFINE_TRANSPOSED_P7_TO_Q7 8 + movh [P7], m0 + movh [P5], m1 + movh [P3], m2 + movh [P1], m3 + movh [Q2], m5 + movh [Q4], m6 + movh [Q6], m7 + movhps [P6], m0 + movhps [P4], m1 + movhps [P2], m2 + movhps [P0], m3 + movhps [Q3], m5 + movhps [Q5], m6 + movhps [Q7], m7 + DEFINE_TRANSPOSED_P7_TO_Q7 +%else ; %2 == 44/48/84/88 punpcklbw m0, m1 punpcklbw m2, m3 punpcklbw m4, m5 @@ -426,31 +521,7 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride, movx m3, [Q6] movx m7, [Q7] punpcklbw m3, m7 -%endif -%define P3 rsp + 0 -%define P2 rsp + 16 -%define P1 rsp + 32 -%define P0 rsp + 48 -%define Q0 rsp + 64 -%define Q1 rsp + 80 -%define Q2 rsp + 96 -%define Q3 rsp + 112 -%if ARCH_X86_64 - mova [P3], m4 - mova [P2], m5 - mova [P1], m6 - mova [P0], m7 - mova [Q0], m8 - mova [Q1], m9 - mova [Q2], m10 - mova [Q3], m11 -%if %2 == 16 - mova [Q4], m12 - mova [Q5], m13 - mova [Q6], m14 - mova [Q7], m15 -%endif -%else ; x86-32 + DEFINE_TRANSPOSED_P7_TO_Q7 TRANSPOSE8x8W 0, 2, 4, 6, 1, 5, 7, 3, [rsp], [Q0], 1 mova [P3], m0 mova [P2], m2 @@ -459,7 +530,8 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride, mova [Q1], m5 mova [Q2], m7 mova [Q3], m3 -%endif +%endif ; %2 +%endif ; x86-32/64 %endif ; %1 == h ; calc fm mask @@ -864,8 +936,11 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride, mova m3, [P4] mova m4, [P3] mova m5, [P2] +%if ARCH_X86_64 mova m6, [P1] +%endif mova m7, [P0] +%if ARCH_X86_64 mova m8, [Q0] mova m9, [Q1] mova m10, [Q2] @@ -892,6 +967,48 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride, movu [Q5], m13 movu [Q6], m14 movu [Q7], m15 +%else + DEFINE_REAL_P7_TO_Q7 + TRANSPOSE8x8B 0, 1, 2, 3, 4, 5, 6, 7, [rsp+32], a, [rsp+%3+%4], [Q0], [Q1] + movh [P7], m0 + movh [P5], m1 + movh [P3], m2 + movh [P1], m3 + movh [Q2], m5 + movh [Q4], m6 + movh [Q6], m7 + movhps [P6], m0 + movhps [P4], m1 + movhps [P2], m2 + movhps [P0], m3 + movhps [Q3], m5 + movhps [Q5], m6 + movhps [Q7], m7 + DEFINE_TRANSPOSED_P7_TO_Q7 + mova m0, [Q0] + mova m1, [Q1] + mova m2, [Q2] + mova m3, [Q3] + mova m4, [Q4] + mova m5, [Q5] + mova m7, [Q7] + DEFINE_REAL_P7_TO_Q7 8 + TRANSPOSE8x8B 0, 1, 2, 3, 4, 5, 6, 7, [rsp+224], a, [rsp+%3+%4], [Q0], [Q1] + movh [P7], m0 + movh [P5], m1 + movh [P3], m2 + movh [P1], m3 + movh [Q2], m5 + movh [Q4], m6 + movh [Q6], m7 + movhps [P6], m0 + movhps [P4], m1 + movhps [P2], m2 + movhps [P0], m3 + movhps [Q3], m5 + movhps [Q5], m6 + movhps [Q7], m7 +%endif %elif %2 == 44 SWAP 0, 1 ; m0 = p1 SWAP 1, 7 ; m1 = p0 @@ -1007,9 +1124,7 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride, %macro LPF_16_VH 5 INIT_XMM %5 LOOPFILTER v, %1, %2, 0, %4 -%if ARCH_X86_64 || %1 != 16 LOOPFILTER h, %1, %2, %3, %4 -%endif %endmacro %macro LPF_16_VH_ALL_OPTS 4