lavc/aarch64: add hevc sao edge 16x16

bench on AWS Graviton:

hevc_sao_edge_16x16_8_c: 1857.0
hevc_sao_edge_16x16_8_neon: 211.0
hevc_sao_edge_32x32_8_c: 7802.2
hevc_sao_edge_32x32_8_neon: 808.2
hevc_sao_edge_48x48_8_c: 16764.2
hevc_sao_edge_48x48_8_neon: 1796.5
hevc_sao_edge_64x64_8_c: 32647.5
hevc_sao_edge_64x64_8_neon: 3118.5

Signed-off-by: J. Dekker <jdek@itanimul.li>
release/5.1
J. Dekker 3 years ago
parent d957ee34a6
commit 92f67e4017
  1. 8
      libavcodec/aarch64/hevcdsp_init_aarch64.c
  2. 66
      libavcodec/aarch64/hevcdsp_sao_neon.S

@ -57,8 +57,8 @@ void ff_hevc_sao_band_filter_8x8_8_neon(uint8_t *_dst, uint8_t *_src,
ptrdiff_t stride_dst, ptrdiff_t stride_src,
int16_t *sao_offset_val, int sao_left_class,
int width, int height);
void ff_hevc_sao_edge_filter_16x16_8_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride_dst,
int16_t *sao_offset_val, int eo, int width, int height);
av_cold void ff_hevc_dsp_init_aarch64(HEVCDSPContext *c, const int bit_depth)
{
@ -80,6 +80,10 @@ av_cold void ff_hevc_dsp_init_aarch64(HEVCDSPContext *c, const int bit_depth)
c->sao_band_filter[2] =
c->sao_band_filter[3] =
c->sao_band_filter[4] = ff_hevc_sao_band_filter_8x8_8_neon;
c->sao_edge_filter[1] =
c->sao_edge_filter[2] =
c->sao_edge_filter[3] =
c->sao_edge_filter[4] = ff_hevc_sao_edge_filter_16x16_8_neon;
}
if (bit_depth == 10) {
c->add_residual[0] = ff_hevc_add_residual_4x4_10_neon;

@ -74,3 +74,69 @@ function ff_hevc_sao_band_filter_8x8_8_neon, export=1
bne 1b
ret
endfunc
// ASSUMES STRIDE_SRC = 192
.Lsao_edge_pos:
.word 1 // horizontal
.word 192 // vertical
.word 192 + 1 // 45 degree
.word 192 - 1 // 135 degree
// ff_hevc_sao_edge_filter_16x16_8_neon(char *dst, char *src, ptrdiff stride_dst,
// int16 *sao_offset_val, int eo, int width, int height)
function ff_hevc_sao_edge_filter_16x16_8_neon, export=1
adr x7, .Lsao_edge_pos
ld1 {v3.8h}, [x3] // load sao_offset_val
add w5, w5, #0xF
bic w5, w5, #0xF
ldr w4, [x7, w4, uxtw #2] // stride_src
mov v3.h[7], v3.h[0] // reorder to [1,2,0,3,4]
mov v3.h[0], v3.h[1]
mov v3.h[1], v3.h[2]
mov v3.h[2], v3.h[7]
// split 16bit values into two tables
uzp2 v1.16b, v3.16b, v3.16b // sao_offset_val -> upper
uzp1 v0.16b, v3.16b, v3.16b // sao_offset_val -> lower
movi v2.16b, #2
mov x15, #192
// strides between end of line and next src/dst
sub x15, x15, x5 // stride_src - width
sub x16, x2, x5 // stride_dst - width
mov x11, x1 // copy base src
1: // new line
mov x14, x5 // copy width
sub x12, x11, x4 // src_a (prev) = src - sao_edge_pos
add x13, x11, x4 // src_b (next) = src + sao_edge_pos
2: // process 16 bytes
ld1 {v3.16b}, [x11], #16 // load src
ld1 {v4.16b}, [x12], #16 // load src_a (prev)
ld1 {v5.16b}, [x13], #16 // load src_b (next)
cmhi v16.16b, v4.16b, v3.16b // (prev > cur)
cmhi v17.16b, v3.16b, v4.16b // (cur > prev)
cmhi v18.16b, v5.16b, v3.16b // (next > cur)
cmhi v19.16b, v3.16b, v5.16b // (cur > next)
sub v20.16b, v16.16b, v17.16b // diff0 = CMP(cur, prev) = (cur > prev) - (cur < prev)
sub v21.16b, v18.16b, v19.16b // diff1 = CMP(cur, next) = (cur > next) - (cur < next)
add v20.16b, v20.16b, v21.16b // diff = diff0 + diff1
add v20.16b, v20.16b, v2.16b // offset_val = diff + 2
tbl v16.16b, {v0.16b}, v20.16b
tbl v17.16b, {v1.16b}, v20.16b
uxtl v20.8h, v3.8b // src[0:7]
uxtl2 v21.8h, v3.16b // src[7:15]
zip1 v18.16b, v16.16b, v17.16b // sao_offset_val lower ->
zip2 v19.16b, v16.16b, v17.16b // sao_offset_val upper ->
sqadd v20.8h, v18.8h, v20.8h // + sao_offset_val
sqadd v21.8h, v19.8h, v21.8h
sqxtun v3.8b, v20.8h
sqxtun2 v3.16b, v21.8h
st1 {v3.16b}, [x0], #16
subs x14, x14, #16 // filtered 16 bytes
b.ne 2b // do we have width to filter?
// no width to filter, setup next line
add x11, x11, x15 // stride src to next line
add x0, x0, x16 // stride dst to next line
subs w6, w6, #1 // filtered line
b.ne 1b // do we have lines to process?
// no lines to filter
ret
endfunc

Loading…
Cancel
Save