From 5cbeefc79eea34aa856c6e4e728ae8a0fc247eed Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Martin=20Storsj=C3=B6?= Date: Fri, 22 Mar 2024 13:41:45 +0200 Subject: [PATCH] aarch64: hevc: Produce plain neon versions of qpel_hv MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As the plain neon qpel_h functions process two rows at a time, we need to allocate storage for h+8 rows instead of h+7. By allocating storage for h+8 rows, incrementing the stack pointer won't end up at the right spot in the end. Store the intended final stack pointer value in a register x14 which we store on the stack. AWS Graviton 3: put_hevc_qpel_hv4_8_c: 386.0 put_hevc_qpel_hv4_8_neon: 125.7 put_hevc_qpel_hv4_8_i8mm: 83.2 put_hevc_qpel_hv6_8_c: 749.0 put_hevc_qpel_hv6_8_neon: 207.0 put_hevc_qpel_hv6_8_i8mm: 166.0 put_hevc_qpel_hv8_8_c: 1305.2 put_hevc_qpel_hv8_8_neon: 216.5 put_hevc_qpel_hv8_8_i8mm: 213.0 put_hevc_qpel_hv12_8_c: 2570.5 put_hevc_qpel_hv12_8_neon: 480.0 put_hevc_qpel_hv12_8_i8mm: 398.2 put_hevc_qpel_hv16_8_c: 4158.7 put_hevc_qpel_hv16_8_neon: 659.7 put_hevc_qpel_hv16_8_i8mm: 593.5 put_hevc_qpel_hv24_8_c: 8626.7 put_hevc_qpel_hv24_8_neon: 1653.5 put_hevc_qpel_hv24_8_i8mm: 1398.7 put_hevc_qpel_hv32_8_c: 14646.0 put_hevc_qpel_hv32_8_neon: 2566.2 put_hevc_qpel_hv32_8_i8mm: 2287.5 put_hevc_qpel_hv48_8_c: 31072.5 put_hevc_qpel_hv48_8_neon: 6228.5 put_hevc_qpel_hv48_8_i8mm: 5291.0 put_hevc_qpel_hv64_8_c: 53847.2 put_hevc_qpel_hv64_8_neon: 9856.7 put_hevc_qpel_hv64_8_i8mm: 8831.0 Signed-off-by: Martin Storsjö --- libavcodec/aarch64/hevcdsp_init_aarch64.c | 6 + libavcodec/aarch64/hevcdsp_qpel_neon.S | 166 +++++++++++++--------- 2 files changed, 104 insertions(+), 68 deletions(-) diff --git a/libavcodec/aarch64/hevcdsp_init_aarch64.c b/libavcodec/aarch64/hevcdsp_init_aarch64.c index ea0d26c019..105c26017b 100644 --- a/libavcodec/aarch64/hevcdsp_init_aarch64.c +++ b/libavcodec/aarch64/hevcdsp_init_aarch64.c @@ -265,6 +265,10 @@ NEON8_FNPROTO(qpel_v, (int16_t *dst, const uint8_t *src, ptrdiff_t srcstride, int height, intptr_t mx, intptr_t my, int width),); +NEON8_FNPROTO(qpel_hv, (int16_t *dst, + const uint8_t *src, ptrdiff_t srcstride, + int height, intptr_t mx, intptr_t my, int width),); + NEON8_FNPROTO(qpel_hv, (int16_t *dst, const uint8_t *src, ptrdiff_t srcstride, int height, intptr_t mx, intptr_t my, int width), _i8mm); @@ -436,6 +440,8 @@ av_cold void ff_hevc_dsp_init_aarch64(HEVCDSPContext *c, const int bit_depth) NEON8_FNASSIGN_SHARED_32(c->put_hevc_qpel_uni_w, 0, 1, qpel_uni_w_h,); + NEON8_FNASSIGN(c->put_hevc_qpel, 1, 1, qpel_hv,); + if (have_i8mm(cpu_flags)) { NEON8_FNASSIGN(c->put_hevc_epel, 0, 1, epel_h, _i8mm); NEON8_FNASSIGN(c->put_hevc_epel, 1, 1, epel_hv, _i8mm); diff --git a/libavcodec/aarch64/hevcdsp_qpel_neon.S b/libavcodec/aarch64/hevcdsp_qpel_neon.S index ad568e415b..7bffb991a7 100644 --- a/libavcodec/aarch64/hevcdsp_qpel_neon.S +++ b/libavcodec/aarch64/hevcdsp_qpel_neon.S @@ -3804,7 +3804,8 @@ function hevc_put_hevc_qpel_hv4_8_end_neon .endm 1: calc_all .purgem calc -2: ret +2: mov sp, x14 + ret endfunc function hevc_put_hevc_qpel_hv6_8_end_neon @@ -3831,7 +3832,8 @@ function hevc_put_hevc_qpel_hv6_8_end_neon .endm 1: calc_all .purgem calc -2: ret +2: mov sp, x14 + ret endfunc function hevc_put_hevc_qpel_hv8_8_end_neon @@ -3857,7 +3859,8 @@ function hevc_put_hevc_qpel_hv8_8_end_neon .endm 1: calc_all .purgem calc -2: ret +2: mov sp, x14 + ret endfunc function hevc_put_hevc_qpel_hv12_8_end_neon @@ -3882,7 +3885,8 @@ function hevc_put_hevc_qpel_hv12_8_end_neon .endm 1: calc_all2 .purgem calc -2: ret +2: mov sp, x14 + ret endfunc function hevc_put_hevc_qpel_hv16_8_end_neon @@ -3906,7 +3910,8 @@ function hevc_put_hevc_qpel_hv16_8_end_neon .endm 1: calc_all2 .purgem calc -2: ret +2: mov sp, x14 + ret endfunc function hevc_put_hevc_qpel_hv32_8_end_neon @@ -3937,162 +3942,187 @@ function hevc_put_hevc_qpel_hv32_8_end_neon add sp, sp, #32 subs w6, w6, #16 b.hi 0b - add w10, w3, #6 - add sp, sp, #64 // discard rest of first line - lsl x10, x10, #7 - add sp, sp, x10 // tmp_array without first line + mov sp, x14 ret endfunc -#if HAVE_I8MM -ENABLE_I8MM -function ff_hevc_put_hevc_qpel_hv4_8_neon_i8mm, export=1 - add w10, w3, #7 +.macro qpel_hv suffix +function ff_hevc_put_hevc_qpel_hv4_8_\suffix, export=1 + add w10, w3, #8 mov x7, #128 lsl x10, x10, #7 + mov x14, sp sub sp, sp, x10 // tmp_array - stp x5, x30, [sp, #-32]! - stp x0, x3, [sp, #16] - add x0, sp, #32 + stp x5, x30, [sp, #-48]! + stp x0, x3, [sp, #16] + str x14, [sp, #32] + add x0, sp, #48 sub x1, x1, x2, lsl #1 add x3, x3, #7 sub x1, x1, x2 - bl X(ff_hevc_put_hevc_qpel_h4_8_neon_i8mm) - ldp x0, x3, [sp, #16] - ldp x5, x30, [sp], #32 + bl X(ff_hevc_put_hevc_qpel_h4_8_\suffix) + ldr x14, [sp, #32] + ldp x0, x3, [sp, #16] + ldp x5, x30, [sp], #48 b hevc_put_hevc_qpel_hv4_8_end_neon endfunc -function ff_hevc_put_hevc_qpel_hv6_8_neon_i8mm, export=1 - add w10, w3, #7 +function ff_hevc_put_hevc_qpel_hv6_8_\suffix, export=1 + add w10, w3, #8 mov x7, #128 lsl x10, x10, #7 + mov x14, sp sub sp, sp, x10 // tmp_array - stp x5, x30, [sp, #-32]! - stp x0, x3, [sp, #16] - add x0, sp, #32 + stp x5, x30, [sp, #-48]! + stp x0, x3, [sp, #16] + str x14, [sp, #32] + add x0, sp, #48 sub x1, x1, x2, lsl #1 add x3, x3, #7 sub x1, x1, x2 - bl X(ff_hevc_put_hevc_qpel_h6_8_neon_i8mm) - ldp x0, x3, [sp, #16] - ldp x5, x30, [sp], #32 + bl X(ff_hevc_put_hevc_qpel_h6_8_\suffix) + ldr x14, [sp, #32] + ldp x0, x3, [sp, #16] + ldp x5, x30, [sp], #48 b hevc_put_hevc_qpel_hv6_8_end_neon endfunc -function ff_hevc_put_hevc_qpel_hv8_8_neon_i8mm, export=1 - add w10, w3, #7 +function ff_hevc_put_hevc_qpel_hv8_8_\suffix, export=1 + add w10, w3, #8 lsl x10, x10, #7 sub x1, x1, x2, lsl #1 + mov x14, sp sub sp, sp, x10 // tmp_array - stp x5, x30, [sp, #-32]! - stp x0, x3, [sp, #16] - add x0, sp, #32 + stp x5, x30, [sp, #-48]! + stp x0, x3, [sp, #16] + str x14, [sp, #32] + add x0, sp, #48 add x3, x3, #7 sub x1, x1, x2 - bl X(ff_hevc_put_hevc_qpel_h8_8_neon_i8mm) - ldp x0, x3, [sp, #16] - ldp x5, x30, [sp], #32 + bl X(ff_hevc_put_hevc_qpel_h8_8_\suffix) + ldr x14, [sp, #32] + ldp x0, x3, [sp, #16] + ldp x5, x30, [sp], #48 b hevc_put_hevc_qpel_hv8_8_end_neon endfunc -function ff_hevc_put_hevc_qpel_hv12_8_neon_i8mm, export=1 - add w10, w3, #7 +function ff_hevc_put_hevc_qpel_hv12_8_\suffix, export=1 + add w10, w3, #8 lsl x10, x10, #7 sub x1, x1, x2, lsl #1 + mov x14, sp sub sp, sp, x10 // tmp_array - stp x5, x30, [sp, #-32]! - stp x0, x3, [sp, #16] - add x0, sp, #32 + stp x5, x30, [sp, #-48]! + stp x0, x3, [sp, #16] + str x14, [sp, #32] + add x0, sp, #48 add x3, x3, #7 sub x1, x1, x2 - bl X(ff_hevc_put_hevc_qpel_h12_8_neon_i8mm) - ldp x0, x3, [sp, #16] - ldp x5, x30, [sp], #32 + mov w6, #12 + bl X(ff_hevc_put_hevc_qpel_h12_8_\suffix) + ldr x14, [sp, #32] + ldp x0, x3, [sp, #16] + ldp x5, x30, [sp], #48 b hevc_put_hevc_qpel_hv12_8_end_neon endfunc -function ff_hevc_put_hevc_qpel_hv16_8_neon_i8mm, export=1 - add w10, w3, #7 +function ff_hevc_put_hevc_qpel_hv16_8_\suffix, export=1 + add w10, w3, #8 lsl x10, x10, #7 sub x1, x1, x2, lsl #1 + mov x14, sp sub sp, sp, x10 // tmp_array - stp x5, x30, [sp, #-32]! - stp x0, x3, [sp, #16] + stp x5, x30, [sp, #-48]! + stp x0, x3, [sp, #16] + str x14, [sp, #32] add x3, x3, #7 - add x0, sp, #32 + add x0, sp, #48 sub x1, x1, x2 - bl X(ff_hevc_put_hevc_qpel_h16_8_neon_i8mm) - ldp x0, x3, [sp, #16] - ldp x5, x30, [sp], #32 + bl X(ff_hevc_put_hevc_qpel_h16_8_\suffix) + ldr x14, [sp, #32] + ldp x0, x3, [sp, #16] + ldp x5, x30, [sp], #48 b hevc_put_hevc_qpel_hv16_8_end_neon endfunc -function ff_hevc_put_hevc_qpel_hv24_8_neon_i8mm, export=1 +function ff_hevc_put_hevc_qpel_hv24_8_\suffix, export=1 stp x4, x5, [sp, #-64]! stp x2, x3, [sp, #16] stp x0, x1, [sp, #32] str x30, [sp, #48] - bl X(ff_hevc_put_hevc_qpel_hv12_8_neon_i8mm) + bl X(ff_hevc_put_hevc_qpel_hv12_8_\suffix) ldp x0, x1, [sp, #32] ldp x2, x3, [sp, #16] ldp x4, x5, [sp], #48 add x1, x1, #12 add x0, x0, #24 - bl X(ff_hevc_put_hevc_qpel_hv12_8_neon_i8mm) + bl X(ff_hevc_put_hevc_qpel_hv12_8_\suffix) ldr x30, [sp], #16 ret endfunc -function ff_hevc_put_hevc_qpel_hv32_8_neon_i8mm, export=1 - add w10, w3, #7 +function ff_hevc_put_hevc_qpel_hv32_8_\suffix, export=1 + add w10, w3, #8 sub x1, x1, x2, lsl #1 lsl x10, x10, #7 sub x1, x1, x2 + mov x14, sp sub sp, sp, x10 // tmp_array - stp x5, x30, [sp, #-32]! - stp x0, x3, [sp, #16] + stp x5, x30, [sp, #-48]! + stp x0, x3, [sp, #16] + str x14, [sp, #32] add x3, x3, #7 - add x0, sp, #32 - bl X(ff_hevc_put_hevc_qpel_h32_8_neon_i8mm) - ldp x0, x3, [sp, #16] - ldp x5, x30, [sp], #32 + add x0, sp, #48 + mov w6, #32 + bl X(ff_hevc_put_hevc_qpel_h32_8_\suffix) + ldr x14, [sp, #32] + ldp x0, x3, [sp, #16] + ldp x5, x30, [sp], #48 b hevc_put_hevc_qpel_hv32_8_end_neon endfunc -function ff_hevc_put_hevc_qpel_hv48_8_neon_i8mm, export=1 +function ff_hevc_put_hevc_qpel_hv48_8_\suffix, export=1 stp x4, x5, [sp, #-64]! stp x2, x3, [sp, #16] stp x0, x1, [sp, #32] str x30, [sp, #48] - bl X(ff_hevc_put_hevc_qpel_hv24_8_neon_i8mm) + bl X(ff_hevc_put_hevc_qpel_hv24_8_\suffix) ldp x0, x1, [sp, #32] ldp x2, x3, [sp, #16] ldp x4, x5, [sp], #48 add x1, x1, #24 add x0, x0, #48 - bl X(ff_hevc_put_hevc_qpel_hv24_8_neon_i8mm) + bl X(ff_hevc_put_hevc_qpel_hv24_8_\suffix) ldr x30, [sp], #16 ret endfunc -function ff_hevc_put_hevc_qpel_hv64_8_neon_i8mm, export=1 +function ff_hevc_put_hevc_qpel_hv64_8_\suffix, export=1 stp x4, x5, [sp, #-64]! stp x2, x3, [sp, #16] stp x0, x1, [sp, #32] str x30, [sp, #48] mov x6, #32 - bl X(ff_hevc_put_hevc_qpel_hv32_8_neon_i8mm) + bl X(ff_hevc_put_hevc_qpel_hv32_8_\suffix) ldp x0, x1, [sp, #32] ldp x2, x3, [sp, #16] ldp x4, x5, [sp], #48 add x1, x1, #32 add x0, x0, #64 mov x6, #32 - bl X(ff_hevc_put_hevc_qpel_hv32_8_neon_i8mm) + bl X(ff_hevc_put_hevc_qpel_hv32_8_\suffix) ldr x30, [sp], #16 ret endfunc +.endm + +qpel_hv neon + +#if HAVE_I8MM +ENABLE_I8MM + +qpel_hv neon_i8mm + DISABLE_I8MM #endif