lavc/pixblockdsp: aligned R-V V 8-bit functions

If the scan lines are aligned, we can load each row as a 64-bit value,
thus avoiding segmentation. And then we can factor the conversion or
subtraction.

In principle, the same optimisation should be possible for high depth,
but would require 128-bit elements, for which no FFmpeg CPU flag
exists.
release/7.0
Rémi Denis-Courmont 1 year ago
parent 722765687b
commit 300ee8b02d
  1. 11
      libavcodec/riscv/pixblockdsp_init.c
  2. 21
      libavcodec/riscv/pixblockdsp_rvv.S

@ -32,10 +32,14 @@ void ff_get_pixels_8_rvi(int16_t *block, const uint8_t *pixels,
void ff_get_pixels_16_rvi(int16_t *block, const uint8_t *pixels,
ptrdiff_t stride);
void ff_get_pixels_8_rvv(int16_t *block, const uint8_t *pixels,
ptrdiff_t stride);
void ff_get_pixels_unaligned_8_rvv(int16_t *block, const uint8_t *pixels,
ptrdiff_t stride);
void ff_get_pixels_unaligned_16_rvv(int16_t *block, const uint8_t *pixels,
ptrdiff_t stride);
void ff_diff_pixels_rvv(int16_t *block, const uint8_t *s1,
const uint8_t *s2, ptrdiff_t stride);
void ff_diff_pixels_unaligned_rvv(int16_t *block, const uint8_t *s1,
const uint8_t *s2, ptrdiff_t stride);
@ -64,6 +68,13 @@ av_cold void ff_pixblockdsp_init_riscv(PixblockDSPContext *c,
c->diff_pixels = ff_diff_pixels_unaligned_rvv;
c->diff_pixels_unaligned = ff_diff_pixels_unaligned_rvv;
if (cpu_flags & AV_CPU_FLAG_RVV_I64) {
if (!high_bit_depth)
c->get_pixels = ff_get_pixels_8_rvv;
c->diff_pixels = ff_diff_pixels_rvv;
}
}
#endif
}

@ -20,6 +20,16 @@
#include "libavutil/riscv/asm.S"
func ff_get_pixels_8_rvv, zve64x
vsetivli zero, 8, e8, mf2, ta, ma
li t0, 8 * 8
vlse64.v v16, (a1), a2
vsetvli zero, t0, e8, m4, ta, ma
vwcvtu.x.x.v v8, v16
vse16.v v8, (a0)
ret
endfunc
func ff_get_pixels_unaligned_8_rvv, zve32x
vsetivli zero, 8, e8, mf2, ta, ma
vlsseg8e8.v v16, (a1), a2
@ -42,6 +52,17 @@ func ff_get_pixels_unaligned_16_rvv, zve32x
ret
endfunc
func ff_diff_pixels_rvv, zve64x
vsetivli zero, 8, e8, mf2, ta, ma
li t0, 8 * 8
vlse64.v v16, (a1), a3
vlse64.v v24, (a2), a3
vsetvli zero, t0, e8, m4, ta, ma
vwsubu.vv v8, v16, v24
vse16.v v8, (a0)
ret
endfunc
func ff_diff_pixels_unaligned_rvv, zve32x
vsetivli zero, 8, e8, mf2, ta, ma
vlsseg8e8.v v16, (a1), a3

Loading…
Cancel
Save