arm/aarch64: Improve scheduling in the avg form of h264_qpel

Don't use the loaded registers directly, avoiding stalls on in
order cores. Use vrhadd.u8 with q registers where easily possible.

Signed-off-by: Martin Storsjö <martin@martin.st>
pull/370/head
Martin Storsjö 3 years ago
parent d04c005021
commit 2d5a7f6d00
  1. 60
      libavcodec/aarch64/h264qpel_neon.S
  2. 57
      libavcodec/arm/h264qpel_neon.S

@ -169,8 +169,8 @@ function \type\()_h264_qpel8_h_lowpass_neon
lowpass_8 v28, v29, v16, v17, v28, v16
.ifc \type,avg
ld1 {v2.8B}, [x0], x3
urhadd v28.8B, v28.8B, v2.8B
ld1 {v3.8B}, [x0]
urhadd v28.8B, v28.8B, v2.8B
urhadd v16.8B, v16.8B, v3.8B
sub x0, x0, x3
.endif
@ -210,8 +210,8 @@ function \type\()_h264_qpel8_h_lowpass_l2_neon
urhadd v27.8B, v27.8B, v29.8B
.ifc \type,avg
ld1 {v2.8B}, [x0], x2
urhadd v26.8B, v26.8B, v2.8B
ld1 {v3.8B}, [x0]
urhadd v26.8B, v26.8B, v2.8B
urhadd v27.8B, v27.8B, v3.8B
sub x0, x0, x2
.endif
@ -281,20 +281,20 @@ function \type\()_h264_qpel8_v_lowpass_neon
.ifc \type,avg
ld1 {v24.8B}, [x0], x2
urhadd v16.8B, v16.8B, v24.8B
ld1 {v25.8B}, [x0], x2
urhadd v17.8B, v17.8B, v25.8B
ld1 {v26.8B}, [x0], x2
urhadd v18.8B, v18.8B, v26.8B
urhadd v16.8B, v16.8B, v24.8B
ld1 {v27.8B}, [x0], x2
urhadd v19.8B, v19.8B, v27.8B
urhadd v17.8B, v17.8B, v25.8B
ld1 {v28.8B}, [x0], x2
urhadd v20.8B, v20.8B, v28.8B
urhadd v18.8B, v18.8B, v26.8B
ld1 {v29.8B}, [x0], x2
urhadd v21.8B, v21.8B, v29.8B
urhadd v19.8B, v19.8B, v27.8B
ld1 {v30.8B}, [x0], x2
urhadd v22.8B, v22.8B, v30.8B
urhadd v20.8B, v20.8B, v28.8B
ld1 {v31.8B}, [x0], x2
urhadd v21.8B, v21.8B, v29.8B
urhadd v22.8B, v22.8B, v30.8B
urhadd v23.8B, v23.8B, v31.8B
sub x0, x0, x2, lsl #3
.endif
@ -375,20 +375,20 @@ function \type\()_h264_qpel8_v_lowpass_l2_neon
.ifc \type,avg
ld1 {v24.8B}, [x0], x3
urhadd v16.8B, v16.8B, v24.8B
ld1 {v25.8B}, [x0], x3
urhadd v17.8B, v17.8B, v25.8B
ld1 {v26.8B}, [x0], x3
urhadd v18.8B, v18.8B, v26.8B
urhadd v16.8B, v16.8B, v24.8B
ld1 {v27.8B}, [x0], x3
urhadd v19.8B, v19.8B, v27.8B
urhadd v17.8B, v17.8B, v25.8B
ld1 {v28.8B}, [x0], x3
urhadd v20.8B, v20.8B, v28.8B
urhadd v18.8B, v18.8B, v26.8B
ld1 {v29.8B}, [x0], x3
urhadd v21.8B, v21.8B, v29.8B
urhadd v19.8B, v19.8B, v27.8B
ld1 {v30.8B}, [x0], x3
urhadd v22.8B, v22.8B, v30.8B
urhadd v20.8B, v20.8B, v28.8B
ld1 {v31.8B}, [x0], x3
urhadd v21.8B, v21.8B, v29.8B
urhadd v22.8B, v22.8B, v30.8B
urhadd v23.8B, v23.8B, v31.8B
sub x0, x0, x3, lsl #3
.endif
@ -458,20 +458,20 @@ function \type\()_h264_qpel8_hv_lowpass_neon
bl put_h264_qpel8_hv_lowpass_neon_top
.ifc \type,avg
ld1 {v0.8B}, [x0], x2
urhadd v16.8B, v16.8B, v0.8B
ld1 {v1.8B}, [x0], x2
urhadd v17.8B, v17.8B, v1.8B
ld1 {v2.8B}, [x0], x2
urhadd v18.8B, v18.8B, v2.8B
urhadd v16.8B, v16.8B, v0.8B
ld1 {v3.8B}, [x0], x2
urhadd v19.8B, v19.8B, v3.8B
urhadd v17.8B, v17.8B, v1.8B
ld1 {v4.8B}, [x0], x2
urhadd v20.8B, v20.8B, v4.8B
urhadd v18.8B, v18.8B, v2.8B
ld1 {v5.8B}, [x0], x2
urhadd v21.8B, v21.8B, v5.8B
urhadd v19.8B, v19.8B, v3.8B
ld1 {v6.8B}, [x0], x2
urhadd v22.8B, v22.8B, v6.8B
urhadd v20.8B, v20.8B, v4.8B
ld1 {v7.8B}, [x0], x2
urhadd v21.8B, v21.8B, v5.8B
urhadd v22.8B, v22.8B, v6.8B
urhadd v23.8B, v23.8B, v7.8B
sub x0, x0, x2, lsl #3
.endif
@ -511,20 +511,20 @@ function \type\()_h264_qpel8_hv_lowpass_l2_neon
urhadd v7.8B, v7.8B, v23.8B
.ifc \type,avg
ld1 {v16.8B}, [x0], x3
urhadd v0.8B, v0.8B, v16.8B
ld1 {v17.8B}, [x0], x3
urhadd v1.8B, v1.8B, v17.8B
ld1 {v18.8B}, [x0], x3
urhadd v2.8B, v2.8B, v18.8B
urhadd v0.8B, v0.8B, v16.8B
ld1 {v19.8B}, [x0], x3
urhadd v3.8B, v3.8B, v19.8B
urhadd v1.8B, v1.8B, v17.8B
ld1 {v20.8B}, [x0], x3
urhadd v4.8B, v4.8B, v20.8B
urhadd v2.8B, v2.8B, v18.8B
ld1 {v21.8B}, [x0], x3
urhadd v5.8B, v5.8B, v21.8B
urhadd v3.8B, v3.8B, v19.8B
ld1 {v22.8B}, [x0], x3
urhadd v6.8B, v6.8B, v22.8B
urhadd v4.8B, v4.8B, v20.8B
ld1 {v23.8B}, [x0], x3
urhadd v5.8B, v5.8B, v21.8B
urhadd v6.8B, v6.8B, v22.8B
urhadd v7.8B, v7.8B, v23.8B
sub x0, x0, x3, lsl #3
.endif

@ -156,8 +156,8 @@ function \type\()_h264_qpel8_h_lowpass_neon
lowpass_8 d0, d1, d16, d17, d0, d16
.ifc \type,avg
vld1.8 {d2}, [r0,:64], r3
vrhadd.u8 d0, d0, d2
vld1.8 {d3}, [r0,:64]
vrhadd.u8 d0, d0, d2
vrhadd.u8 d16, d16, d3
sub r0, r0, r3
.endif
@ -196,9 +196,8 @@ function \type\()_h264_qpel8_h_lowpass_l2_neon
vrhadd.u8 q0, q0, q14
.ifc \type,avg
vld1.8 {d2}, [r0,:64], r2
vrhadd.u8 d0, d0, d2
vld1.8 {d3}, [r0,:64]
vrhadd.u8 d1, d1, d3
vrhadd.u8 q0, q0, q1
sub r0, r0, r2
.endif
vst1.8 {d0}, [r0,:64], r2
@ -266,20 +265,20 @@ function \type\()_h264_qpel8_v_lowpass_neon
.ifc \type,avg
vld1.8 {d9}, [r0,:64], r2
vrhadd.u8 d8, d8, d9
vld1.8 {d11}, [r0,:64], r2
vrhadd.u8 d10, d10, d11
vld1.8 {d13}, [r0,:64], r2
vrhadd.u8 d12, d12, d13
vrhadd.u8 d8, d8, d9
vld1.8 {d15}, [r0,:64], r2
vrhadd.u8 d14, d14, d15
vrhadd.u8 d10, d10, d11
vld1.8 {d23}, [r0,:64], r2
vrhadd.u8 d22, d22, d23
vrhadd.u8 d12, d12, d13
vld1.8 {d25}, [r0,:64], r2
vrhadd.u8 d24, d24, d25
vrhadd.u8 d14, d14, d15
vld1.8 {d27}, [r0,:64], r2
vrhadd.u8 d26, d26, d27
vrhadd.u8 d22, d22, d23
vld1.8 {d29}, [r0,:64], r2
vrhadd.u8 d24, d24, d25
vrhadd.u8 d26, d26, d27
vrhadd.u8 d28, d28, d29
sub r0, r0, r2, lsl #3
.endif
@ -355,20 +354,20 @@ function \type\()_h264_qpel8_v_lowpass_l2_neon
.ifc \type,avg
vld1.8 {d16}, [r0,:64], r3
vrhadd.u8 d0, d0, d16
vld1.8 {d17}, [r0,:64], r3
vrhadd.u8 d1, d1, d17
vrhadd.u8 d0, d0, d16
vld1.8 {d16}, [r0,:64], r3
vrhadd.u8 d2, d2, d16
vrhadd.u8 d1, d1, d17
vld1.8 {d17}, [r0,:64], r3
vrhadd.u8 d3, d3, d17
vrhadd.u8 d2, d2, d16
vld1.8 {d16}, [r0,:64], r3
vrhadd.u8 d4, d4, d16
vrhadd.u8 d3, d3, d17
vld1.8 {d17}, [r0,:64], r3
vrhadd.u8 d5, d5, d17
vrhadd.u8 d4, d4, d16
vld1.8 {d16}, [r0,:64], r3
vrhadd.u8 d10, d10, d16
vrhadd.u8 d5, d5, d17
vld1.8 {d17}, [r0,:64], r3
vrhadd.u8 d10, d10, d16
vrhadd.u8 d11, d11, d17
sub r0, r0, r3, lsl #3
.endif
@ -461,21 +460,17 @@ function \type\()_h264_qpel8_hv_lowpass_neon
bl put_h264_qpel8_hv_lowpass_neon_top
.ifc \type,avg
vld1.8 {d0}, [r0,:64], r2
vrhadd.u8 d12, d12, d0
vld1.8 {d1}, [r0,:64], r2
vrhadd.u8 d13, d13, d1
vld1.8 {d2}, [r0,:64], r2
vrhadd.u8 d14, d14, d2
vld1.8 {d3}, [r0,:64], r2
vrhadd.u8 d15, d15, d3
vrhadd.u8 q6, q6, q0
vld1.8 {d4}, [r0,:64], r2
vrhadd.u8 d8, d8, d4
vld1.8 {d5}, [r0,:64], r2
vrhadd.u8 d9, d9, d5
vrhadd.u8 q7, q7, q1
vld1.8 {d6}, [r0,:64], r2
vrhadd.u8 d10, d10, d6
vld1.8 {d7}, [r0,:64], r2
vrhadd.u8 d11, d11, d7
vrhadd.u8 q4, q4, q2
vrhadd.u8 q5, q5, q3
sub r0, r0, r2, lsl #3
.endif
@ -511,21 +506,17 @@ function \type\()_h264_qpel8_hv_lowpass_l2_neon
vrhadd.u8 q3, q3, q5
.ifc \type,avg
vld1.8 {d16}, [r0,:64], r3
vrhadd.u8 d0, d0, d16
vld1.8 {d17}, [r0,:64], r3
vrhadd.u8 d1, d1, d17
vld1.8 {d18}, [r0,:64], r3
vrhadd.u8 d2, d2, d18
vld1.8 {d19}, [r0,:64], r3
vrhadd.u8 d3, d3, d19
vrhadd.u8 q0, q0, q8
vld1.8 {d20}, [r0,:64], r3
vrhadd.u8 d4, d4, d20
vld1.8 {d21}, [r0,:64], r3
vrhadd.u8 d5, d5, d21
vrhadd.u8 q1, q1, q9
vld1.8 {d22}, [r0,:64], r3
vrhadd.u8 d6, d6, d22
vld1.8 {d23}, [r0,:64], r3
vrhadd.u8 d7, d7, d23
vrhadd.u8 q2, q2, q10
vrhadd.u8 q3, q3, q11
sub r0, r0, r3, lsl #3
.endif
vst1.8 {d0}, [r0,:64], r3

Loading…
Cancel
Save