/ *
* VC1 N E O N o p t i m i s a t i o n s
*
* Copyright ( c ) 2 0 1 0 R o b C l a r k < r o b @ti.com>
* Copyright ( c ) 2 0 1 1 M a n s R u l l g a r d < m a n s @mansr.com>
*
* This f i l e i s p a r t o f F F m p e g .
*
* FFmpeg i s f r e e s o f t w a r e ; you can redistribute it and/or
* modify i t u n d e r t h e t e r m s o f t h e G N U L e s s e r G e n e r a l P u b l i c
* License a s p u b l i s h e d b y t h e F r e e S o f t w a r e F o u n d a t i o n ; either
* version 2 . 1 o f t h e L i c e n s e , o r ( a t y o u r o p t i o n ) a n y l a t e r v e r s i o n .
*
* FFmpeg i s d i s t r i b u t e d i n t h e h o p e t h a t i t w i l l b e u s e f u l ,
* but W I T H O U T A N Y W A R R A N T Y ; without even the implied warranty of
* MERCHANTABILITY o r F I T N E S S F O R A P A R T I C U L A R P U R P O S E . S e e t h e G N U
* Lesser G e n e r a l P u b l i c L i c e n s e f o r m o r e d e t a i l s .
*
* You s h o u l d h a v e r e c e i v e d a c o p y o f t h e G N U L e s s e r G e n e r a l P u b l i c
* License a l o n g w i t h F F m p e g ; if not, write to the Free Software
* Foundation, I n c . , 5 1 F r a n k l i n S t r e e t , F i f t h F l o o r , B o s t o n , M A 0 2 1 1 0 - 1 3 0 1 U S A
* /
# include " l i b a v u t i l / a r m / a s m . S "
# include " n e o n . S "
# include " c o n f i g . h "
@ Transpose rows into columns of a matrix of 16-bit elements. For 4x4, pass
@ double-word registers, for 8x4, pass quad-word registers.
.macro transpose16 r0 , r1 , r2 , r3
@ At this point:
@ row[0] r0
@ row[1] r1
@ row[2] r2
@ row[3] r3
vtrn. 1 6 \ r0 , \ r1 @ first and second row
vtrn. 1 6 \ r2 , \ r3 @ third and fourth row
vtrn. 3 2 \ r0 , \ r2 @ first and third row
vtrn. 3 2 \ r1 , \ r3 @ second and fourth row
@ At this point, if registers are quad-word:
@ column[0] d0
@ column[1] d2
@ column[2] d4
@ column[3] d6
@ column[4] d1
@ column[5] d3
@ column[6] d5
@ column[7] d7
@ At this point, if registers are double-word:
@ column[0] d0
@ column[1] d1
@ column[2] d2
@ column[3] d3
.endm
@ ff_vc1_inv_trans_{4,8}x{4,8}_neon and overflow: The input values in the file
@ are supposed to be in a specific range as to allow for 16-bit math without
@ causing overflows, but sometimes the input values are just big enough to
@ barely cause overflow in vadd instructions like:
@
@ vadd.i16 q0, q8, q10
@ vshr.s16 q0, q0, #\rshift
@
@ To prevent these borderline cases from overflowing, we just need one more
@ bit of precision, which is accomplished by replacing the sequence above with:
@
@ vhadd.s16 q0, q8, q10
@ vshr.s16 q0, q0, #(\rshift -1)
@
@ This works because vhadd is a single instruction that adds, then shifts to
@ the right once, all before writing the result to the destination register.
@
@ Even with this workaround, there were still some files that caused overflows
@ in ff_vc1_inv_trans_8x8_neon. See the comments in ff_vc1_inv_trans_8x8_neon
@ for the additional workaround.
@ Takes 4 columns of 8 values each and operates on it. Modeled after the first
@ for loop in vc1_inv_trans_4x8_c.
@ Input columns: q0 q1 q2 q3
@ Output columns: q0 q1 q2 q3
@ Trashes: r12 q8 q9 q10 q11 q12 q13
.macro vc1_inv_trans_4x8_helper add r s h i f t
@ Compute temp1, temp2 and setup scalar #17, #22, #10
vadd. i 1 6 q12 , q0 , q2 @ temp1 = src[0] + src[2]
movw r12 , #17
vsub. i 1 6 q13 , q0 , q2 @ temp2 = src[0] - src[2]
movt r12 , #22
vmov. 3 2 d0 [ 0 ] , r12
movw r12 , #10
vmov. 1 6 d1 [ 0 ] , r12
vmov. i 1 6 q8 , #\ a d d @ t 1 w i l l a c c u m u l a t e h e r e
vmov. i 1 6 q9 , #\ a d d @ t 2 w i l l a c c u m u l a t e h e r e
vmul. i 1 6 q10 , q1 , d0 [ 1 ] @ t3 = 22 * (src[1])
vmul. i 1 6 q11 , q3 , d0 [ 1 ] @ t4 = 22 * (src[3])
vmla. i 1 6 q8 , q12 , d0 [ 0 ] @ t1 = 17 * (temp1) + 4
vmla. i 1 6 q9 , q13 , d0 [ 0 ] @ t2 = 17 * (temp2) + 4
vmla. i 1 6 q10 , q3 , d1 [ 0 ] @ t3 += 10 * src[3]
vmls. i 1 6 q11 , q1 , d1 [ 0 ] @ t4 -= 10 * src[1]
vhadd. s16 q0 , q8 , q10 @ dst[0] = (t1 + t3) >> 1
vhsub. s16 q3 , q8 , q10 @ dst[3] = (t1 - t3) >> 1
vhsub. s16 q1 , q9 , q11 @ dst[1] = (t2 - t4) >> 1
vhadd. s16 q2 , q9 , q11 @ dst[2] = (t2 + t4) >> 1
@ Halving add/sub above already did one shift
vshr. s16 q0 , q0 , #( \ r s h i f t - 1 ) @ dst[0] >>= (rshift - 1)
vshr. s16 q3 , q3 , #( \ r s h i f t - 1 ) @ dst[3] >>= (rshift - 1)
vshr. s16 q1 , q1 , #( \ r s h i f t - 1 ) @ dst[1] >>= (rshift - 1)
vshr. s16 q2 , q2 , #( \ r s h i f t - 1 ) @ dst[2] >>= (rshift - 1)
.endm
@ Takes 8 columns of 4 values each and operates on it. Modeled after the second
@ for loop in vc1_inv_trans_4x8_c.
@ Input columns: d0 d2 d4 d6 d1 d3 d5 d7
@ Output columns: d16 d17 d18 d19 d21 d20 d23 d22
@ Trashes all NEON registers (and r12) except for: q4 q5 q6 q7
.macro vc1_inv_trans_8x4_helper add a d d1 b e f o r e s h i f t r s h i f t
@ At this point:
@ src[0] d0 overwritten later
@ src[8] d2
@ src[16] d4 overwritten later
@ src[24] d6
@ src[32] d1 overwritten later
@ src[40] d3
@ src[48] d5 overwritten later
@ src[56] d7
movw r12 , #12
vmov. i 1 6 q14 , #\ a d d @ t 1 | t 2 w i l l a c c u m u l a t e h e r e
movt r12 , #6
vadd. i 1 6 d20 , d0 , d1 @ temp1 = src[0] + src[32]
vsub. i 1 6 d21 , d0 , d1 @ temp2 = src[0] - src[32]
vmov. i 3 2 d0 [ 0 ] , r12 @ 16-bit: d0[0] = #12, d0[1] = #6
vshl. i 1 6 q15 , q2 , #4 @ t3|t4 = 16 * (src[16]|src[48])
vswp d4 , d5 @ q2 = src[48]|src[16]
vmla. i 1 6 q14 , q10 , d0 [ 0 ] @ t1|t2 = 12 * (temp1|temp2) + 64
movw r12 , #15
movt r12 , #9
vmov. i 3 2 d0 [ 1 ] , r12 @ 16-bit: d0[2] = #15, d0[3] = #9
vneg. s16 d31 , d31 @ t4 = -t4
vmla. i 1 6 q15 , q2 , d0 [ 1 ] @ t3|t4 += 6 * (src[48]|src[16])
@ At this point:
@ d0[2] #15
@ d0[3] #9
@ q1 src[8]|src[40]
@ q3 src[24]|src[56]
@ q14 old t1|t2
@ q15 old t3|t4
vshl. i 1 6 q8 , q1 , #4 @ t1|t2 = 16 * (src[8]|src[40])
vswp d2 , d3 @ q1 = src[40]|src[8]
vshl. i 1 6 q12 , q3 , #4 @ temp3a|temp4a = 16 * src[24]|src[56]
vswp d6 , d7 @ q3 = src[56]|src[24]
vshl. i 1 6 q13 , q1 , #2 @ temp3b|temp4b = 4 * (src[40]|src[8])
vshl. i 1 6 q2 , q3 , #2 @ temp1|temp2 = 4 * (src[56]|src[24])
vswp d3 , d6 @ q1 = src[40]|src[56], q3 = src[8]|src[24]
vsub. i 1 6 q9 , q13 , q12 @ t3|t4 = - (temp3a|temp4a) + (temp3b|temp4b)
vadd. i 1 6 q8 , q8 , q2 @ t1|t2 += temp1|temp2
vmul. i 1 6 q12 , q3 , d0 [ 3 ] @ temp3|temp4 = 9 * src[8]|src[24]
vmla. i 1 6 q8 , q1 , d0 [ 3 ] @ t1|t2 += 9 * (src[40]|src[56])
vswp d6 , d7 @ q3 = src[24]|src[8]
vswp d2 , d3 @ q1 = src[56]|src[40]
vsub. i 1 6 q11 , q14 , q15 @ t8|t7 = old t1|t2 - old t3|t4
vadd. i 1 6 q10 , q14 , q15 @ t5|t6 = old t1|t2 + old t3|t4
.if \ add1 b e f o r e s h i f t
vmov. i 1 6 q15 , #1
.endif
vadd. i 1 6 d18 , d18 , d24 @ t3 += temp3
vsub. i 1 6 d19 , d19 , d25 @ t4 -= temp4
vswp d22 , d23 @ q11 = t7|t8
vneg. s16 d17 , d17 @ t2 = -t2
vmla. i 1 6 q9 , q1 , d0 [ 2 ] @ t3|t4 += 15 * src[56]|src[40]
vmla. i 1 6 q8 , q3 , d0 [ 2 ] @ t1|t2 += 15 * src[24]|src[8]
@ At this point:
@ t1 d16
@ t2 d17
@ t3 d18
@ t4 d19
@ t5 d20
@ t6 d21
@ t7 d22
@ t8 d23
@ #1 q15
.if \ add1 b e f o r e s h i f t
vadd. i 1 6 q3 , q15 , q10 @ line[7,6] = t5|t6 + 1
vadd. i 1 6 q2 , q15 , q11 @ line[5,4] = t7|t8 + 1
.endif
@ Sometimes this overflows, so to get one additional bit of precision, use
@ a single instruction that both adds and shifts right (halving).
vhadd. s16 q1 , q9 , q11 @ line[2,3] = (t3|t4 + t7|t8) >> 1
vhadd. s16 q0 , q8 , q10 @ line[0,1] = (t1|t2 + t5|t6) >> 1
.if \ add1 b e f o r e s h i f t
vhsub. s16 q2 , q2 , q9 @ line[5,4] = (t7|t8 - t3|t4 + 1) >> 1
vhsub. s16 q3 , q3 , q8 @ line[7,6] = (t5|t6 - t1|t2 + 1) >> 1
.else
vhsub. s16 q2 , q11 , q9 @ line[5,4] = (t7|t8 - t3|t4) >> 1
vhsub. s16 q3 , q10 , q8 @ line[7,6] = (t5|t6 - t1|t2) >> 1
.endif
vshr. s16 q9 , q1 , #( \ r s h i f t - 1 ) @ one shift is already done by vhadd/vhsub above
vshr. s16 q8 , q0 , #( \ r s h i f t - 1 )
vshr. s16 q10 , q2 , #( \ r s h i f t - 1 )
vshr. s16 q11 , q3 , #( \ r s h i f t - 1 )
@ At this point:
@ dst[0] d16
@ dst[1] d17
@ dst[2] d18
@ dst[3] d19
@ dst[4] d21
@ dst[5] d20
@ dst[6] d23
@ dst[7] d22
.endm
@ This is modeled after the first and second for loop in vc1_inv_trans_8x8_c.
@ Input columns: q8, q9, q10, q11, q12, q13, q14, q15
@ Output columns: q8, q9, q10, q11, q12, q13, q14, q15
@ Trashes all NEON registers (and r12) except for: q4 q5 q6 q7
.macro vc1_inv_trans_8x8_helper add a d d1 b e f o r e s h i f t r s h i f t
@ This actually computes half of t1, t2, t3, t4, as explained below
@ near `tNhalf`.
vmov. i 1 6 q0 , #( 6 / 2 ) @ q0 = #6/2
vshl. i 1 6 q1 , q10 , #3 @ t3 = 16/2 * src[16]
vshl. i 1 6 q3 , q14 , #3 @ temp4 = 16/2 * src[48]
vmul. i 1 6 q2 , q10 , q0 @ t4 = 6/2 * src[16]
vmla. i 1 6 q1 , q14 , q0 @ t3 += 6/2 * src[48]
@ unused: q0, q10, q14
vmov. i 1 6 q0 , #( 12 / 2 ) @ q0 = #12/2
vadd. i 1 6 q10 , q8 , q12 @ temp1 = src[0] + src[32]
vsub. i 1 6 q14 , q8 , q12 @ temp2 = src[0] - src[32]
@ unused: q8, q12
vmov. i 1 6 q8 , #( \ a d d / 2 ) @ t1 will accumulate here
vmov. i 1 6 q12 , #( \ a d d / 2 ) @ t2 will accumulate here
movw r12 , #15
vsub. i 1 6 q2 , q2 , q3 @ t4 = 6/2 * src[16] - 16/2 * src[48]
movt r12 , #9
@ unused: q3
vmla. i 1 6 q8 , q10 , q0 @ t1 = 12/2 * temp1 + add
vmla. i 1 6 q12 , q14 , q0 @ t2 = 12/2 * temp2 + add
vmov. i 3 2 d0 [ 0 ] , r12
@ unused: q3, q10, q14
@ At this point:
@ q0 d0=#15|#9
@ q1 old t3
@ q2 old t4
@ q3
@ q8 old t1
@ q9 src[8]
@ q10
@ q11 src[24]
@ q12 old t2
@ q13 src[40]
@ q14
@ q15 src[56]
@ unused: q3, q10, q14
movw r12 , #16
vshl. i 1 6 q3 , q9 , #4 @ t1 = 16 * src[8]
movt r12 , #4
vshl. i 1 6 q10 , q9 , #2 @ t4 = 4 * src[8]
vmov. i 3 2 d1 [ 0 ] , r12
vmul. i 1 6 q14 , q9 , d0 [ 0 ] @ t2 = 15 * src[8]
vmul. i 1 6 q9 , q9 , d0 [ 1 ] @ t3 = 9 * src[8]
@ unused: none
vmla. i 1 6 q3 , q11 , d0 [ 0 ] @ t1 += 15 * src[24]
vmls. i 1 6 q10 , q11 , d0 [ 1 ] @ t4 -= 9 * src[24]
vmls. i 1 6 q14 , q11 , d1 [ 1 ] @ t2 -= 4 * src[24]
vmls. i 1 6 q9 , q11 , d1 [ 0 ] @ t3 -= 16 * src[24]
@ unused: q11
vmla. i 1 6 q3 , q13 , d0 [ 1 ] @ t1 += 9 * src[40]
vmla. i 1 6 q10 , q13 , d0 [ 0 ] @ t4 += 15 * src[40]
vmls. i 1 6 q14 , q13 , d1 [ 0 ] @ t2 -= 16 * src[40]
vmla. i 1 6 q9 , q13 , d1 [ 1 ] @ t3 += 4 * src[40]
@ unused: q11, q13
@ Compute t5, t6, t7, t8 from old t1, t2, t3, t4. Actually, it computes
@ half of t5, t6, t7, t8 since t1, t2, t3, t4 are halved.
vadd. i 1 6 q11 , q8 , q1 @ t5 = t1 + t3
vsub. i 1 6 q1 , q8 , q1 @ t8 = t1 - t3
vadd. i 1 6 q13 , q12 , q2 @ t6 = t2 + t4
vsub. i 1 6 q2 , q12 , q2 @ t7 = t2 - t4
@ unused: q8, q12
.if \ add1 b e f o r e s h i f t
vmov. i 1 6 q12 , #1
.endif
@ unused: q8
vmla. i 1 6 q3 , q15 , d1 [ 1 ] @ t1 += 4 * src[56]
vmls. i 1 6 q14 , q15 , d0 [ 1 ] @ t2 -= 9 * src[56]
vmla. i 1 6 q9 , q15 , d0 [ 0 ] @ t3 += 15 * src[56]
vmls. i 1 6 q10 , q15 , d1 [ 0 ] @ t4 -= 16 * src[56]
@ unused: q0, q8, q15
@ At this point:
@ t1 q3
@ t2 q14
@ t3 q9
@ t4 q10
@ t5half q11
@ t6half q13
@ t7half q2
@ t8half q1
@ #1 q12
@
@ tNhalf is half of the value of tN (as described in vc1_inv_trans_8x8_c).
@ This is done because sometimes files have input that causes tN + tM to
@ overflow. To avoid this overflow, we compute tNhalf, then compute
@ tNhalf + tM (which doesn't overflow), and then we use vhadd to compute
@ (tNhalf + (tNhalf + tM)) >> 1 which does not overflow because it is
@ one instruction.
@ For each pair of tN and tM, do:
@ lineA = t5half + t1
@ if add1beforeshift: t1 -= 1
@ lineA = (t5half + lineA) >> 1
@ lineB = t5half - t1
@ lineB = (t5half + lineB) >> 1
@ lineA >>= rshift - 1
@ lineB >>= rshift - 1
vadd. i 1 6 q8 , q11 , q3 @ q8 = t5half + t1
.if \ add1 b e f o r e s h i f t
vsub. i 1 6 q3 , q3 , q12 @ q3 = t1 - 1
.endif
vadd. i 1 6 q0 , q13 , q14 @ q0 = t6half + t2
.if \ add1 b e f o r e s h i f t
vsub. i 1 6 q14 , q14 , q12 @ q14 = t2 - 1
.endif
vadd. i 1 6 q15 , q2 , q9 @ q15 = t7half + t3
.if \ add1 b e f o r e s h i f t
vsub. i 1 6 q9 , q9 , q12 @ q9 = t3 - 1
.endif
@ unused: none
vhadd. s16 q8 , q11 , q8 @ q8 = (t5half + t5half + t1) >> 1
vsub. i 1 6 q3 , q11 , q3 @ q3 = t5half - t1 + 1
vhadd. s16 q0 , q13 , q0 @ q0 = (t6half + t6half + t2) >> 1
vsub. i 1 6 q14 , q13 , q14 @ q14 = t6half - t2 + 1
vhadd. s16 q15 , q2 , q15 @ q15 = (t7half + t7half + t3) >> 1
vsub. i 1 6 q9 , q2 , q9 @ q9 = t7half - t3 + 1
vhadd. s16 q3 , q11 , q3 @ q3 = (t5half + t5half - t1 + 1) >> 1
@ unused: q11
vadd. i 1 6 q11 , q1 , q10 @ q11 = t8half + t4
.if \ add1 b e f o r e s h i f t
vsub. i 1 6 q10 , q10 , q12 @ q10 = t4 - 1
.endif
@ unused: q12
vhadd. s16 q14 , q13 , q14 @ q14 = (t6half + t6half - t2 + 1) >> 1
@ unused: q12, q13
vhadd. s16 q13 , q2 , q9 @ q9 = (t7half + t7half - t3 + 1) >> 1
@ unused: q12, q2, q9
vsub. i 1 6 q10 , q1 , q10 @ q10 = t8half - t4 + 1
vhadd. s16 q11 , q1 , q11 @ q11 = (t8half + t8half + t4) >> 1
vshr. s16 q8 , q8 , #( \ r s h i f t - 1 ) @ q8 = line[0]
vhadd. s16 q12 , q1 , q10 @ q12 = (t8half + t8half - t4 + 1) >> 1
vshr. s16 q9 , q0 , #( \ r s h i f t - 1 ) @ q9 = line[1]
vshr. s16 q10 , q15 , #( \ r s h i f t - 1 ) @ q10 = line[2]
vshr. s16 q11 , q11 , #( \ r s h i f t - 1 ) @ q11 = line[3]
vshr. s16 q12 , q12 , #( \ r s h i f t - 1 ) @ q12 = line[4]
vshr. s16 q13 , q13 , #( \ r s h i f t - 1 ) @ q13 = line[5]
vshr. s16 q14 , q14 , #( \ r s h i f t - 1 ) @ q14 = line[6]
vshr. s16 q15 , q3 , #( \ r s h i f t - 1 ) @ q15 = line[7]
.endm
@ (int16_t *block [r0])
function f f _ v c1 _ i n v _ t r a n s _ 8 x8 _ n e o n , e x p o r t =1
vld1 . 6 4 { q8 - q9 } , [ r0 ,: 1 2 8 ] !
vld1 . 6 4 { q10 - q11 } , [ r0 ,: 1 2 8 ] !
vld1 . 6 4 { q12 - q13 } , [ r0 ,: 1 2 8 ] !
vld1 . 6 4 { q14 - q15 } , [ r0 ,: 1 2 8 ]
sub r0 , r0 , #( 16 * 2 * 3 ) @ restore r0
@ At this point:
@ src[0] q8
@ src[8] q9
@ src[16] q10
@ src[24] q11
@ src[32] q12
@ src[40] q13
@ src[48] q14
@ src[56] q15
vc1 _ i n v _ t r a n s _ 8 x8 _ h e l p e r a d d =4 , a d d1 b e f o r e s h i f t =0 , r s h i f t =3
@ Transpose result matrix of 8x8
swap4 d17 , d19 , d21 , d23 , d24 , d26 , d28 , d30
transpose1 6 _ 4 x4 q8 , q9 , q10 , q11 , q12 , q13 , q14 , q15
vc1 _ i n v _ t r a n s _ 8 x8 _ h e l p e r a d d =64 , a d d1 b e f o r e s h i f t =1 , r s h i f t =7
vst1 . 6 4 { q8 - q9 } , [ r0 ,: 1 2 8 ] !
vst1 . 6 4 { q10 - q11 } , [ r0 ,: 1 2 8 ] !
vst1 . 6 4 { q12 - q13 } , [ r0 ,: 1 2 8 ] !
vst1 . 6 4 { q14 - q15 } , [ r0 ,: 1 2 8 ]
bx l r
endfunc
@ (uint8_t *dest [r0], ptrdiff_t stride [r1], int16_t *block [r2])
function f f _ v c1 _ i n v _ t r a n s _ 8 x4 _ n e o n , e x p o r t =1
vld1 . 6 4 { q0 - q1 } , [ r2 ,: 1 2 8 ] ! @ load 8 * 4 * 2 = 64 bytes / 16 bytes per quad = 4 quad registers
vld1 . 6 4 { q2 - q3 } , [ r2 ,: 1 2 8 ]
transpose1 6 q0 , q1 , q2 , q3 @ transpose rows to columns
@ At this point:
@ src[0] d0
@ src[1] d2
@ src[2] d4
@ src[3] d6
@ src[4] d1
@ src[5] d3
@ src[6] d5
@ src[7] d7
vc1 _ i n v _ t r a n s _ 8 x4 _ h e l p e r a d d =4 , a d d1 b e f o r e s h i f t =0 , r s h i f t =3
@ Move output to more standardized registers
vmov d0 , d16
vmov d2 , d17
vmov d4 , d18
vmov d6 , d19
vmov d1 , d21
vmov d3 , d20
vmov d5 , d23
vmov d7 , d22
@ At this point:
@ dst[0] d0
@ dst[1] d2
@ dst[2] d4
@ dst[3] d6
@ dst[4] d1
@ dst[5] d3
@ dst[6] d5
@ dst[7] d7
transpose1 6 q0 , q1 , q2 , q3 @ turn columns into rows
@ At this point:
@ row[0] q0
@ row[1] q1
@ row[2] q2
@ row[3] q3
vc1 _ i n v _ t r a n s _ 4 x8 _ h e l p e r a d d =64 , r s h i f t =7
@ At this point:
@ line[0].l d0
@ line[0].h d1
@ line[1].l d2
@ line[1].h d3
@ line[2].l d4
@ line[2].h d5
@ line[3].l d6
@ line[3].h d7
@ unused registers: q12, q13, q14, q15
vld1 . 6 4 { d28 } , [ r0 ,: 6 4 ] , r1 @ read dest
vld1 . 6 4 { d29 } , [ r0 ,: 6 4 ] , r1
vld1 . 6 4 { d30 } , [ r0 ,: 6 4 ] , r1
vld1 . 6 4 { d31 } , [ r0 ,: 6 4 ] , r1
sub r0 , r0 , r1 , l s l #2 @ restore original r0 value
vaddw. u 8 q0 , q0 , d28 @ line[0] += dest[0]
vaddw. u 8 q1 , q1 , d29 @ line[1] += dest[1]
vaddw. u 8 q2 , q2 , d30 @ line[2] += dest[2]
vaddw. u 8 q3 , q3 , d31 @ line[3] += dest[3]
vqmovun. s16 d0 , q0 @ line[0]
vqmovun. s16 d1 , q1 @ line[1]
vqmovun. s16 d2 , q2 @ line[2]
vqmovun. s16 d3 , q3 @ line[3]
vst1 . 6 4 { d0 } , [ r0 ,: 6 4 ] , r1 @ write dest
vst1 . 6 4 { d1 } , [ r0 ,: 6 4 ] , r1
vst1 . 6 4 { d2 } , [ r0 ,: 6 4 ] , r1
vst1 . 6 4 { d3 } , [ r0 ,: 6 4 ]
bx l r
endfunc
@ (uint8_t *dest [r0], ptrdiff_t stride [r1], int16_t *block [r2])
function f f _ v c1 _ i n v _ t r a n s _ 4 x8 _ n e o n , e x p o r t =1
mov r12 , #( 8 * 2 ) @ 8 elements per line, each element 2 bytes
vld4 . 1 6 { d0 [ ] , d2 [ ] , d4 [ ] , d6 [ ] } , [ r2 ,: 6 4 ] , r12 @ read each column into a q register
vld4 . 1 6 { d0 [ 1 ] , d2 [ 1 ] , d4 [ 1 ] , d6 [ 1 ] } , [ r2 ,: 6 4 ] , r12
vld4 . 1 6 { d0 [ 2 ] , d2 [ 2 ] , d4 [ 2 ] , d6 [ 2 ] } , [ r2 ,: 6 4 ] , r12
vld4 . 1 6 { d0 [ 3 ] , d2 [ 3 ] , d4 [ 3 ] , d6 [ 3 ] } , [ r2 ,: 6 4 ] , r12
vld4 . 1 6 { d1 [ ] , d3 [ ] , d5 [ ] , d7 [ ] } , [ r2 ,: 6 4 ] , r12
vld4 . 1 6 { d1 [ 1 ] , d3 [ 1 ] , d5 [ 1 ] , d7 [ 1 ] } , [ r2 ,: 6 4 ] , r12
vld4 . 1 6 { d1 [ 2 ] , d3 [ 2 ] , d5 [ 2 ] , d7 [ 2 ] } , [ r2 ,: 6 4 ] , r12
vld4 . 1 6 { d1 [ 3 ] , d3 [ 3 ] , d5 [ 3 ] , d7 [ 3 ] } , [ r2 ,: 6 4 ]
vc1 _ i n v _ t r a n s _ 4 x8 _ h e l p e r a d d =4 , r s h i f t =3
@ At this point:
@ dst[0] = q0
@ dst[1] = q1
@ dst[2] = q2
@ dst[3] = q3
transpose1 6 q0 , q1 , q2 , q3 @ Transpose rows (registers) into columns
vc1 _ i n v _ t r a n s _ 8 x4 _ h e l p e r a d d =64 , a d d1 b e f o r e s h i f t =1 , r s h i f t =7
vld1 . 3 2 { d28 [ ] } , [ r0 ,: 3 2 ] , r1 @ read dest
vld1 . 3 2 { d28 [ 1 ] } , [ r0 ,: 3 2 ] , r1
vld1 . 3 2 { d29 [ ] } , [ r0 ,: 3 2 ] , r1
vld1 . 3 2 { d29 [ 1 ] } , [ r0 ,: 3 2 ] , r1
vld1 . 3 2 { d30 [ ] } , [ r0 ,: 3 2 ] , r1
vld1 . 3 2 { d30 [ 0 ] } , [ r0 ,: 3 2 ] , r1
vld1 . 3 2 { d31 [ ] } , [ r0 ,: 3 2 ] , r1
vld1 . 3 2 { d31 [ 0 ] } , [ r0 ,: 3 2 ] , r1
sub r0 , r0 , r1 , l s l #3 @ restore original r0 value
vaddw. u 8 q8 , q8 , d28 @ line[0,1] += dest[0,1]
vaddw. u 8 q9 , q9 , d29 @ line[2,3] += dest[2,3]
vaddw. u 8 q10 , q10 , d30 @ line[5,4] += dest[5,4]
vaddw. u 8 q11 , q11 , d31 @ line[7,6] += dest[7,6]
vqmovun. s16 d16 , q8 @ clip(line[0,1])
vqmovun. s16 d18 , q9 @ clip(line[2,3])
vqmovun. s16 d20 , q10 @ clip(line[5,4])
vqmovun. s16 d22 , q11 @ clip(line[7,6])
vst1 . 3 2 { d16 [ 0 ] } , [ r0 ,: 3 2 ] , r1 @ write dest
vst1 . 3 2 { d16 [ 1 ] } , [ r0 ,: 3 2 ] , r1
vst1 . 3 2 { d18 [ 0 ] } , [ r0 ,: 3 2 ] , r1
vst1 . 3 2 { d18 [ 1 ] } , [ r0 ,: 3 2 ] , r1
vst1 . 3 2 { d20 [ 1 ] } , [ r0 ,: 3 2 ] , r1
vst1 . 3 2 { d20 [ 0 ] } , [ r0 ,: 3 2 ] , r1
vst1 . 3 2 { d22 [ 1 ] } , [ r0 ,: 3 2 ] , r1
vst1 . 3 2 { d22 [ 0 ] } , [ r0 ,: 3 2 ]
bx l r
endfunc
@ Setup constants in registers which are used by vc1_inv_trans_4x4_helper
.macro vc1_inv_trans_4x4_helper_setup
vmov. i 1 6 q13 , #17
vmov. i 1 6 q14 , #22
vmov. i 1 6 d30 , #10 @ only need double-word, not quad-word
.endm
@ This is modeled after the first for loop in vc1_inv_trans_4x4_c.
.macro vc1_inv_trans_4x4_helper add r s h i f t
vmov. i 1 6 q2 , #\ a d d @ t 1 | t 2 w i l l a c c u m u l a t e h e r e
vadd. i 1 6 d16 , d0 , d1 @ temp1 = src[0] + src[2]
vsub. i 1 6 d17 , d0 , d1 @ temp2 = src[0] - src[2]
vmul. i 1 6 q3 , q14 , q1 @ t3|t4 = 22 * (src[1]|src[3])
vmla. i 1 6 q2 , q13 , q8 @ t1|t2 = 17 * (temp1|temp2) + add
vmla. i 1 6 d6 , d30 , d3 @ t3 += 10 * src[3]
vmls. i 1 6 d7 , d30 , d2 @ t4 -= 10 * src[1]
vadd. i 1 6 q0 , q2 , q3 @ dst[0,2] = (t1|t2 + t3|t4)
vsub. i 1 6 q1 , q2 , q3 @ dst[3,1] = (t1|t2 - t3|t4)
vshr. s16 q0 , q0 , #\ r s h i f t @ d s t [ 0 ,2 ] > > = r s h i f t
vshr. s16 q1 , q1 , #\ r s h i f t @ d s t [ 3 ,1 ] > > = r s h i f t
.endm
@ (uint8_t *dest [r0], ptrdiff_t stride [r1], int16_t *block [r2])
function f f _ v c1 _ i n v _ t r a n s _ 4 x4 _ n e o n , e x p o r t =1
mov r12 , #( 8 * 2 ) @ 8 elements per line, each element 2 bytes
vld4 . 1 6 { d0 [ ] , d1 [ ] , d2 [ ] , d3 [ ] } , [ r2 ,: 6 4 ] , r12 @ read each column into a register
vld4 . 1 6 { d0 [ 1 ] , d1 [ 1 ] , d2 [ 1 ] , d3 [ 1 ] } , [ r2 ,: 6 4 ] , r12
vld4 . 1 6 { d0 [ 2 ] , d1 [ 2 ] , d2 [ 2 ] , d3 [ 2 ] } , [ r2 ,: 6 4 ] , r12
vld4 . 1 6 { d0 [ 3 ] , d1 [ 3 ] , d2 [ 3 ] , d3 [ 3 ] } , [ r2 ,: 6 4 ]
vswp d1 , d2 @ so that we can later access column 1 and column 3 as a single q1 register
vc1 _ i n v _ t r a n s _ 4 x4 _ h e l p e r _ s e t u p
@ At this point:
@ src[0] = d0
@ src[1] = d2
@ src[2] = d1
@ src[3] = d3
vc1 _ i n v _ t r a n s _ 4 x4 _ h e l p e r a d d =4 , r s h i f t =3 @ compute t1, t2, t3, t4 and combine them into dst[0-3]
@ At this point:
@ dst[0] = d0
@ dst[1] = d3
@ dst[2] = d1
@ dst[3] = d2
transpose1 6 d0 , d3 , d1 , d2 @ Transpose rows (registers) into columns
@ At this point:
@ src[0] = d0
@ src[8] = d3
@ src[16] = d1
@ src[24] = d2
vswp d2 , d3 @ so that we can later access column 1 and column 3 in order as a single q1 register
@ At this point:
@ src[0] = d0
@ src[8] = d2
@ src[16] = d1
@ src[24] = d3
vc1 _ i n v _ t r a n s _ 4 x4 _ h e l p e r a d d =64 , r s h i f t =7 @ compute t1, t2, t3, t4 and combine them into dst[0-3]
@ At this point:
@ line[0] = d0
@ line[1] = d3
@ line[2] = d1
@ line[3] = d2
vld1 . 3 2 { d18 [ ] } , [ r0 ,: 3 2 ] , r1 @ read dest
vld1 . 3 2 { d19 [ ] } , [ r0 ,: 3 2 ] , r1
vld1 . 3 2 { d18 [ 1 ] } , [ r0 ,: 3 2 ] , r1
vld1 . 3 2 { d19 [ 0 ] } , [ r0 ,: 3 2 ] , r1
sub r0 , r0 , r1 , l s l #2 @ restore original r0 value
vaddw. u 8 q0 , q0 , d18 @ line[0,2] += dest[0,2]
vaddw. u 8 q1 , q1 , d19 @ line[3,1] += dest[3,1]
vqmovun. s16 d0 , q0 @ clip(line[0,2])
vqmovun. s16 d1 , q1 @ clip(line[3,1])
vst1 . 3 2 { d0 [ 0 ] } , [ r0 ,: 3 2 ] , r1 @ write dest
vst1 . 3 2 { d1 [ 1 ] } , [ r0 ,: 3 2 ] , r1
vst1 . 3 2 { d0 [ 1 ] } , [ r0 ,: 3 2 ] , r1
vst1 . 3 2 { d1 [ 0 ] } , [ r0 ,: 3 2 ]
bx l r
endfunc
@ The absolute value of multiplication constants from vc1_mspel_filter and vc1_mspel_{ver,hor}_filter_16bits.
@ The sign is embedded in the code below that carries out the multiplication (mspel_filter{,.16}).
# define M S P E L _ M O D E _ 1 _ M U L _ C O N S T A N T S 4 , 5 3 , 1 8 , 3
# define M S P E L _ M O D E _ 2 _ M U L _ C O N S T A N T S 1 , 9 , 9 , 1
# define M S P E L _ M O D E _ 3 _ M U L _ C O N S T A N T S 3 , 1 8 , 5 3 , 4
@ These constants are from reading the source code of vc1_mspel_mc and determining the value that
@ is added to `rnd` to result in the variable `r`, and the value of the variable `shift`.
# define M S P E L _ M O D E S _ 1 1 _ A D D S H I F T _ C O N S T A N T S 1 5 , 5
# define M S P E L _ M O D E S _ 1 2 _ A D D S H I F T _ C O N S T A N T S 3 , 3
# define M S P E L _ M O D E S _ 1 3 _ A D D S H I F T _ C O N S T A N T S 1 5 , 5
# define M S P E L _ M O D E S _ 2 1 _ A D D S H I F T _ C O N S T A N T S M S P E L _ M O D E S _ 1 2 _ A D D S H I F T _ C O N S T A N T S
# define M S P E L _ M O D E S _ 2 2 _ A D D S H I F T _ C O N S T A N T S 0 , 1
# define M S P E L _ M O D E S _ 2 3 _ A D D S H I F T _ C O N S T A N T S 3 , 3
# define M S P E L _ M O D E S _ 3 1 _ A D D S H I F T _ C O N S T A N T S M S P E L _ M O D E S _ 1 3 _ A D D S H I F T _ C O N S T A N T S
# define M S P E L _ M O D E S _ 3 2 _ A D D S H I F T _ C O N S T A N T S M S P E L _ M O D E S _ 2 3 _ A D D S H I F T _ C O N S T A N T S
# define M S P E L _ M O D E S _ 3 3 _ A D D S H I F T _ C O N S T A N T S 1 5 , 5
@ The addition and shift constants from vc1_mspel_filter.
# define M S P E L _ M O D E _ 1 _ A D D S H I F T _ C O N S T A N T S 3 2 , 6
# define M S P E L _ M O D E _ 2 _ A D D S H I F T _ C O N S T A N T S 8 , 4
# define M S P E L _ M O D E _ 3 _ A D D S H I F T _ C O N S T A N T S 3 2 , 6
@ Setup constants in registers for a subsequent use of mspel_filter{,.16}.
.macro mspel_constants typesize r e g _ a r e g _ b r e g _ c r e g _ d f i l t e r _ a f i l t e r _ b f i l t e r _ c f i l t e r _ d r e g _ a d d f i l t e r _ a d d _ r e g i s t e r
@ Typesize should be i8 or i16.
@ Only set the register if the value is not 1 and unique
.if \ filter_ a ! = 1
vmov. \ t y p e s i z e \ r e g _ a , #\ f i l t e r _ a @ r e g _ a = f i l t e r _ a
.endif
vmov. \ t y p e s i z e \ r e g _ b , #\ f i l t e r _ b @ r e g _ b = f i l t e r _ b
.if \ filter_ b ! = \ f i l t e r _ c
vmov. \ t y p e s i z e \ r e g _ c , #\ f i l t e r _ c @ r e g _ c = f i l t e r _ c
.endif
.if \ filter_ d ! = 1
vmov. \ t y p e s i z e \ r e g _ d , #\ f i l t e r _ d @ r e g _ d = f i l t e r _ d
.endif
@ vdup to double the size of typesize
.ifc \ typesize,i 8
vdup. 1 6 \ r e g _ a d d , \ f i l t e r _ a d d _ r e g i s t e r @ reg_add = filter_add_register
.else
vdup. 3 2 \ r e g _ a d d , \ f i l t e r _ a d d _ r e g i s t e r @ reg_add = filter_add_register
.endif
.endm
@ After mspel_constants has been used, do the filtering.
.macro mspel_filter acc d e s t s r c0 s r c1 s r c2 s r c3 f i l t e r _ a f i l t e r _ b f i l t e r _ c f i l t e r _ d r e g _ a r e g _ b r e g _ c r e g _ d r e g _ a d d f i l t e r _ s h i f t n a r r o w =1
.if \ filter_ a ! = 1
@ If filter_a != 1, then we need a move and subtract instruction
vmov \ a c c , \ r e g _ a d d @ acc = reg_add
vmlsl. u 8 \ a c c , \ r e g _ a , \ s r c0 @ acc -= filter_a * src[-stride]
.else
@ If filter_a is 1, then just subtract without an extra move
vsubw. u 8 \ a c c , \ r e g _ a d d , \ s r c0 @ acc = reg_add - src[-stride] @ since filter_a == 1
.endif
vmlal. u 8 \ a c c , \ r e g _ b , \ s r c1 @ acc += filter_b * src[0]
.if \ filter_ b ! = \ f i l t e r _ c
vmlal. u 8 \ a c c , \ r e g _ c , \ s r c2 @ acc += filter_c * src[stride]
.else
@ If filter_b is the same as filter_c, use the same reg_b register
vmlal. u 8 \ a c c , \ r e g _ b , \ s r c2 @ acc += filter_c * src[stride] @ where filter_c == filter_b
.endif
.if \ filter_ d ! = 1
@ If filter_d != 1, then do a multiply accumulate
vmlsl. u 8 \ a c c , \ r e g _ d , \ s r c3 @ acc -= filter_d * src[stride * 2]
.else
@ If filter_d is 1, then just do a subtract
vsubw. u 8 \ a c c , \ a c c , \ s r c3 @ acc -= src[stride * 2] @ since filter_d == 1
.endif
.if \ narrow
vqshrun. s16 \ d e s t , \ a c c , #\ f i l t e r _ s h i f t @ d e s t = c l i p _ u i n t 8 ( a c c > > f i l t e r _ s h i f t )
.else
vshr. s16 \ d e s t , \ a c c , #\ f i l t e r _ s h i f t @ d e s t = a c c > > f i l t e r _ s h i f t
.endif
.endm
@ This is similar to mspel_filter, but the input is 16-bit instead of 8-bit and narrow=0 is not supported.
.macro mspel_ f i l t e r . 1 6 a c c0 a c c1 a c c0 _ 0 a c c0 _ 1 d e s t s r c0 s r c1 s r c2 s r c3 s r c4 s r c5 s r c6 s r c7 f i l t e r _ a f i l t e r _ b f i l t e r _ c f i l t e r _ d r e g _ a r e g _ b r e g _ c r e g _ d r e g _ a d d f i l t e r _ s h i f t
.if \ filter_ a ! = 1
vmov \ a c c0 , \ r e g _ a d d
vmov \ a c c1 , \ r e g _ a d d
vmlsl. s16 \ a c c0 , \ r e g _ a , \ s r c0
vmlsl. s16 \ a c c1 , \ r e g _ a , \ s r c1
.else
vsubw. s16 \ a c c0 , \ r e g _ a d d , \ s r c0
vsubw. s16 \ a c c1 , \ r e g _ a d d , \ s r c1
.endif
vmlal. s16 \ a c c0 , \ r e g _ b , \ s r c2
vmlal. s16 \ a c c1 , \ r e g _ b , \ s r c3
.if \ filter_ b ! = \ f i l t e r _ c
vmlal. s16 \ a c c0 , \ r e g _ c , \ s r c4
vmlal. s16 \ a c c1 , \ r e g _ c , \ s r c5
.else
vmlal. s16 \ a c c0 , \ r e g _ b , \ s r c4
vmlal. s16 \ a c c1 , \ r e g _ b , \ s r c5
.endif
.if \ filter_ d ! = 1
vmlsl. s16 \ a c c0 , \ r e g _ d , \ s r c6
vmlsl. s16 \ a c c1 , \ r e g _ d , \ s r c7
.else
vsubw. s16 \ a c c0 , \ a c c0 , \ s r c6
vsubw. s16 \ a c c1 , \ a c c1 , \ s r c7
.endif
@ Use acc0_0 and acc0_1 as temp space
vqshrun. s32 \ a c c0 _ 0 , \ a c c0 , #\ f i l t e r _ s h i f t @ S h i f t a n d n a r r o w w i t h s a t u r a t i o n f r o m s 32 t o u 1 6
vqshrun. s32 \ a c c0 _ 1 , \ a c c1 , #\ f i l t e r _ s h i f t
vqmovn. u 1 6 \ d e s t , \ a c c0 @ Narrow with saturation from u16 to u8
.endm
@ Register usage for put_vc1_mspel_mc functions. Registers marked 'hv' are only used in put_vc1_mspel_mc_hv.
@
@ r0 adjusted dst
@ r1 adjusted src
@ r2 stride
@ r3 adjusted rnd
@ r4 [hv] tmp
@ r11 [hv] sp saved
@ r12 loop counter
@ d0 src[-stride]
@ d1 src[0]
@ d2 src[stride]
@ d3 src[stride * 2]
@ q0 [hv] src[-stride]
@ q1 [hv] src[0]
@ q2 [hv] src[stride]
@ q3 [hv] src[stride * 2]
@ d21 often result from mspel_filter
@ q11 accumulator 0
@ q12 [hv] accumulator 1
@ q13 accumulator initial value
@ d28 filter_a
@ d29 filter_b
@ d30 filter_c
@ d31 filter_d
@ (uint8_t *dst [r0], const uint8_t *src [r1], ptrdiff_t stride [r2], int rnd [r3])
.macro put_vc1_mspel_mc_hv hmode v m o d e f i l t e r _ h _ a f i l t e r _ h _ b f i l t e r _ h _ c f i l t e r _ h _ d f i l t e r _ v _ a f i l t e r _ v _ b f i l t e r _ v _ c f i l t e r _ v _ d f i l t e r _ a d d f i l t e r _ s h i f t
function f f _ p u t _ v c1 _ m s p e l _ m c \ h m o d e \ ( ) \ v m o d e \ ( ) _ n e o n , e x p o r t =1
push { r4 , r11 , l r }
mov r11 , s p @ r11 = stack pointer before realignmnet
A b i c s p , s p , #15 @ sp = round down to multiple of 16 bytes
T b i c r4 , r11 , #15
T m o v s p , r4
sub s p , s p , #( 8 * 2 * 1 6 ) @ make space for 8 rows * 2 byte per element * 16 elements per row (to fit 11 actual elements per row)
mov r4 , s p @ r4 = int16_t tmp[8 * 16]
sub r1 , r1 , #1 @ src -= 1
.if \ filter_ a d d ! = 0
add r3 , r3 , #\ f i l t e r _ a d d @ r 3 = f i l t e r _ a d d + r n d
.endif
mov r12 , #8 @ loop counter
sub r1 , r1 , r2 @ r1 = &src[-stride] @ slide back
@ Do vertical filtering from src into tmp
mspel_ c o n s t a n t s i 8 , d28 , d29 , d30 , d31 , \ f i l t e r _ v _ a , \ f i l t e r _ v _ b , \ f i l t e r _ v _ c , \ f i l t e r _ v _ d , q13 , r3
vld1 . 6 4 { d0 ,d1 } , [ r1 ] , r2
vld1 . 6 4 { d2 ,d3 } , [ r1 ] , r2
vld1 . 6 4 { d4 ,d5 } , [ r1 ] , r2
1 :
subs r12 , r12 , #4
vld1 . 6 4 { d6 ,d7 } , [ r1 ] , r2
mspel_ f i l t e r q11 , q11 , d0 , d2 , d4 , d6 , \ f i l t e r _ v _ a , \ f i l t e r _ v _ b , \ f i l t e r _ v _ c , \ f i l t e r _ v _ d , d28 , d29 , d30 , d31 , q13 , \ f i l t e r _ s h i f t , n a r r o w =0
mspel_ f i l t e r q12 , q12 , d1 , d3 , d5 , d7 , \ f i l t e r _ v _ a , \ f i l t e r _ v _ b , \ f i l t e r _ v _ c , \ f i l t e r _ v _ d , d28 , d29 , d30 , d31 , q13 , \ f i l t e r _ s h i f t , n a r r o w =0
vst1 . 6 4 { q11 ,q12 } , [ r4 ,: 1 2 8 ] ! @ store and increment
vld1 . 6 4 { d0 ,d1 } , [ r1 ] , r2
mspel_ f i l t e r q11 , q11 , d2 , d4 , d6 , d0 , \ f i l t e r _ v _ a , \ f i l t e r _ v _ b , \ f i l t e r _ v _ c , \ f i l t e r _ v _ d , d28 , d29 , d30 , d31 , q13 , \ f i l t e r _ s h i f t , n a r r o w =0
mspel_ f i l t e r q12 , q12 , d3 , d5 , d7 , d1 , \ f i l t e r _ v _ a , \ f i l t e r _ v _ b , \ f i l t e r _ v _ c , \ f i l t e r _ v _ d , d28 , d29 , d30 , d31 , q13 , \ f i l t e r _ s h i f t , n a r r o w =0
vst1 . 6 4 { q11 ,q12 } , [ r4 ,: 1 2 8 ] ! @ store and increment
vld1 . 6 4 { d2 ,d3 } , [ r1 ] , r2
mspel_ f i l t e r q11 , q11 , d4 , d6 , d0 , d2 , \ f i l t e r _ v _ a , \ f i l t e r _ v _ b , \ f i l t e r _ v _ c , \ f i l t e r _ v _ d , d28 , d29 , d30 , d31 , q13 , \ f i l t e r _ s h i f t , n a r r o w =0
mspel_ f i l t e r q12 , q12 , d5 , d7 , d1 , d3 , \ f i l t e r _ v _ a , \ f i l t e r _ v _ b , \ f i l t e r _ v _ c , \ f i l t e r _ v _ d , d28 , d29 , d30 , d31 , q13 , \ f i l t e r _ s h i f t , n a r r o w =0
vst1 . 6 4 { q11 ,q12 } , [ r4 ,: 1 2 8 ] ! @ store and increment
vld1 . 6 4 { d4 ,d5 } , [ r1 ] , r2
mspel_ f i l t e r q11 , q11 , d6 , d0 , d2 , d4 , \ f i l t e r _ v _ a , \ f i l t e r _ v _ b , \ f i l t e r _ v _ c , \ f i l t e r _ v _ d , d28 , d29 , d30 , d31 , q13 , \ f i l t e r _ s h i f t , n a r r o w =0
mspel_ f i l t e r q12 , q12 , d7 , d1 , d3 , d5 , \ f i l t e r _ v _ a , \ f i l t e r _ v _ b , \ f i l t e r _ v _ c , \ f i l t e r _ v _ d , d28 , d29 , d30 , d31 , q13 , \ f i l t e r _ s h i f t , n a r r o w =0
vst1 . 6 4 { q11 ,q12 } , [ r4 ,: 1 2 8 ] ! @ store and increment
bne 1 b
rsb r3 , r3 , #( 64 + \ f i l t e r _ a d d ) @ r3 = (64 + filter_add) - r3
mov r12 , #8 @ loop counter
mov r4 , s p @ r4 = tmp
@ Do horizontal filtering from temp to dst
mspel_ c o n s t a n t s i 1 6 , d28 , d29 , d30 , d31 , \ f i l t e r _ h _ a , \ f i l t e r _ h _ b , \ f i l t e r _ h _ c , \ f i l t e r _ h _ d , q13 , r3
2 :
subs r12 , r12 , #1
vld1 . 6 4 { q0 ,q1 } , [ r4 ,: 1 2 8 ] ! @ read one line of tmp
vext. 1 6 q2 , q0 , q1 , #2
vext. 1 6 q3 , q0 , q1 , #3
vext. 1 6 q1 , q0 , q1 , #1 @ do last because it writes to q1 which is read by the other vext instructions
mspel_ f i l t e r . 1 6 q11 , q12 , d22 , d23 , d21 , d0 , d1 , d2 , d3 , d4 , d5 , d6 , d7 , \ f i l t e r _ h _ a , \ f i l t e r _ h _ b , \ f i l t e r _ h _ c , \ f i l t e r _ h _ d , d28 , d29 , d30 , d31 , q13 , 7
vst1 . 6 4 { d21 } , [ r0 ,: 6 4 ] , r2 @ store and increment dst
bne 2 b
mov s p , r11
pop { r4 , r11 , p c }
endfunc
.endm
@ Use C preprocessor and assembler macros to expand to functions for horizontal and vertical filtering.
# define P U T _ V C 1 _ M S P E L _ M C _ H V ( h m o d e , v m o d e ) \
put_ v c1 _ m s p e l _ m c _ h v h m o d e , v m o d e , \
MSPEL_ M O D E _ ## h m o d e # # _ M U L _ C O N S T A N T S , \
MSPEL_ M O D E _ ## v m o d e # # _ M U L _ C O N S T A N T S , \
MSPEL_ M O D E S _ ## h m o d e # # v m o d e # # _ A D D S H I F T _ C O N S T A N T S
PUT_ V C 1 _ M S P E L _ M C _ H V ( 1 , 1 )
PUT_ V C 1 _ M S P E L _ M C _ H V ( 1 , 2 )
PUT_ V C 1 _ M S P E L _ M C _ H V ( 1 , 3 )
PUT_ V C 1 _ M S P E L _ M C _ H V ( 2 , 1 )
PUT_ V C 1 _ M S P E L _ M C _ H V ( 2 , 2 )
PUT_ V C 1 _ M S P E L _ M C _ H V ( 2 , 3 )
PUT_ V C 1 _ M S P E L _ M C _ H V ( 3 , 1 )
PUT_ V C 1 _ M S P E L _ M C _ H V ( 3 , 2 )
PUT_ V C 1 _ M S P E L _ M C _ H V ( 3 , 3 )
# undef P U T _ V C 1 _ M S P E L _ M C _ H V
.macro put_vc1_mspel_mc_h_only hmode f i l t e r _ a f i l t e r _ b f i l t e r _ c f i l t e r _ d f i l t e r _ a d d f i l t e r _ s h i f t
function f f _ p u t _ v c1 _ m s p e l _ m c \ h m o d e \ ( ) 0 _ n e o n , e x p o r t =1
rsb r3 , r3 , #\ f i l t e r _ a d d @ r 3 = f i l t e r _ a d d - r = f i l t e r _ a d d - r n d
mov r12 , #8 @ loop counter
sub r1 , r1 , #1 @ slide back, using immediate
mspel_ c o n s t a n t s i 8 , d28 , d29 , d30 , d31 , \ f i l t e r _ a , \ f i l t e r _ b , \ f i l t e r _ c , \ f i l t e r _ d , q13 , r3
1 :
subs r12 , r12 , #1
vld1 . 6 4 { d0 ,d1 } , [ r1 ] , r2 @ read 16 bytes even though we only need 11, also src += stride
vext. 8 d2 , d0 , d1 , #2
vext. 8 d3 , d0 , d1 , #3
vext. 8 d1 , d0 , d1 , #1 @ do last because it writes to d1 which is read by the other vext instructions
mspel_ f i l t e r q11 , d21 , d0 , d1 , d2 , d3 , \ f i l t e r _ a , \ f i l t e r _ b , \ f i l t e r _ c , \ f i l t e r _ d , d28 , d29 , d30 , d31 , q13 , \ f i l t e r _ s h i f t
vst1 . 6 4 { d21 } , [ r0 ,: 6 4 ] , r2 @ store and increment dst
bne 1 b
bx l r
endfunc
.endm
@ Use C preprocessor and assembler macros to expand to functions for horizontal only filtering.
# define P U T _ V C 1 _ M S P E L _ M C _ H _ O N L Y ( h m o d e ) \
put_ v c1 _ m s p e l _ m c _ h _ o n l y h m o d e , M S P E L _ M O D E _ ## h m o d e # # _ M U L _ C O N S T A N T S , M S P E L _ M O D E _ # # h m o d e # # _ A D D S H I F T _ C O N S T A N T S
PUT_ V C 1 _ M S P E L _ M C _ H _ O N L Y ( 1 )
PUT_ V C 1 _ M S P E L _ M C _ H _ O N L Y ( 2 )
PUT_ V C 1 _ M S P E L _ M C _ H _ O N L Y ( 3 )
# undef P U T _ V C 1 _ M S P E L _ M C _ H _ O N L Y
@ (uint8_t *dst [r0], const uint8_t *src [r1], ptrdiff_t stride [r2], int rnd [r3])
.macro put_vc1_mspel_mc_v_only vmode f i l t e r _ a f i l t e r _ b f i l t e r _ c f i l t e r _ d f i l t e r _ a d d f i l t e r _ s h i f t
function f f _ p u t _ v c1 _ m s p e l _ m c0 \ v m o d e \ ( ) _ n e o n , e x p o r t =1
add r3 , r3 , #\ f i l t e r _ a d d - 1 @ r3 = filter_add - r = filter_add - (1 - rnd) = filter_add - 1 + rnd
mov r12 , #8 @ loop counter
sub r1 , r1 , r2 @ r1 = &src[-stride] @ slide back
mspel_ c o n s t a n t s i 8 , d28 , d29 , d30 , d31 , \ f i l t e r _ a , \ f i l t e r _ b , \ f i l t e r _ c , \ f i l t e r _ d , q13 , r3
vld1 . 6 4 { d0 } , [ r1 ] , r2 @ d0 = src[-stride]
vld1 . 6 4 { d1 } , [ r1 ] , r2 @ d1 = src[0]
vld1 . 6 4 { d2 } , [ r1 ] , r2 @ d2 = src[stride]
1 :
subs r12 , r12 , #4
vld1 . 6 4 { d3 } , [ r1 ] , r2 @ d3 = src[stride * 2]
mspel_ f i l t e r q11 , d21 , d0 , d1 , d2 , d3 , \ f i l t e r _ a , \ f i l t e r _ b , \ f i l t e r _ c , \ f i l t e r _ d , d28 , d29 , d30 , d31 , q13 , \ f i l t e r _ s h i f t
vst1 . 6 4 { d21 } , [ r0 ,: 6 4 ] , r2 @ store and increment dst
vld1 . 6 4 { d0 } , [ r1 ] , r2 @ d0 = next line
mspel_ f i l t e r q11 , d21 , d1 , d2 , d3 , d0 , \ f i l t e r _ a , \ f i l t e r _ b , \ f i l t e r _ c , \ f i l t e r _ d , d28 , d29 , d30 , d31 , q13 , \ f i l t e r _ s h i f t
vst1 . 6 4 { d21 } , [ r0 ,: 6 4 ] , r2 @ store and increment dst
vld1 . 6 4 { d1 } , [ r1 ] , r2 @ d1 = next line
mspel_ f i l t e r q11 , d21 , d2 , d3 , d0 , d1 , \ f i l t e r _ a , \ f i l t e r _ b , \ f i l t e r _ c , \ f i l t e r _ d , d28 , d29 , d30 , d31 , q13 , \ f i l t e r _ s h i f t
vst1 . 6 4 { d21 } , [ r0 ,: 6 4 ] , r2 @ store and increment dst
vld1 . 6 4 { d2 } , [ r1 ] , r2 @ d2 = next line
mspel_ f i l t e r q11 , d21 , d3 , d0 , d1 , d2 , \ f i l t e r _ a , \ f i l t e r _ b , \ f i l t e r _ c , \ f i l t e r _ d , d28 , d29 , d30 , d31 , q13 , \ f i l t e r _ s h i f t
vst1 . 6 4 { d21 } , [ r0 ,: 6 4 ] , r2 @ store and increment dst
bne 1 b
bx l r
endfunc
.endm
@ Use C preprocessor and assembler macros to expand to functions for vertical only filtering.
# define P U T _ V C 1 _ M S P E L _ M C _ V _ O N L Y ( v m o d e ) \
put_ v c1 _ m s p e l _ m c _ v _ o n l y v m o d e , M S P E L _ M O D E _ ## v m o d e # # _ M U L _ C O N S T A N T S , M S P E L _ M O D E _ # # v m o d e # # _ A D D S H I F T _ C O N S T A N T S
PUT_ V C 1 _ M S P E L _ M C _ V _ O N L Y ( 1 )
PUT_ V C 1 _ M S P E L _ M C _ V _ O N L Y ( 2 )
PUT_ V C 1 _ M S P E L _ M C _ V _ O N L Y ( 3 )
# undef P U T _ V C 1 _ M S P E L _ M C _ V _ O N L Y
function f f _ p u t _ p i x e l s8 x8 _ n e o n , e x p o r t =1
vld1 . 6 4 { d0 } , [ r1 ] , r2
vld1 . 6 4 { d1 } , [ r1 ] , r2
vld1 . 6 4 { d2 } , [ r1 ] , r2
vld1 . 6 4 { d3 } , [ r1 ] , r2
vld1 . 6 4 { d4 } , [ r1 ] , r2
vld1 . 6 4 { d5 } , [ r1 ] , r2
vld1 . 6 4 { d6 } , [ r1 ] , r2
vld1 . 6 4 { d7 } , [ r1 ]
vst1 . 6 4 { d0 } , [ r0 ,: 6 4 ] , r2
vst1 . 6 4 { d1 } , [ r0 ,: 6 4 ] , r2
vst1 . 6 4 { d2 } , [ r0 ,: 6 4 ] , r2
vst1 . 6 4 { d3 } , [ r0 ,: 6 4 ] , r2
vst1 . 6 4 { d4 } , [ r0 ,: 6 4 ] , r2
vst1 . 6 4 { d5 } , [ r0 ,: 6 4 ] , r2
vst1 . 6 4 { d6 } , [ r0 ,: 6 4 ] , r2
vst1 . 6 4 { d7 } , [ r0 ,: 6 4 ]
bx l r
endfunc
function f f _ v c1 _ i n v _ t r a n s _ 8 x8 _ d c _ n e o n , e x p o r t =1
ldrsh r2 , [ r2 ] @ int dc = block[0];
vld1 . 6 4 { d0 } , [ r0 ,: 6 4 ] , r1
vld1 . 6 4 { d1 } , [ r0 ,: 6 4 ] , r1
vld1 . 6 4 { d4 } , [ r0 ,: 6 4 ] , r1
vld1 . 6 4 { d5 } , [ r0 ,: 6 4 ] , r1
add r2 , r2 , r2 , l s l #1 @ dc = (3 * dc + 1) >> 1;
vld1 . 6 4 { d6 } , [ r0 ,: 6 4 ] , r1
add r2 , r2 , #1
vld1 . 6 4 { d7 } , [ r0 ,: 6 4 ] , r1
vld1 . 6 4 { d16 } , [ r0 ,: 6 4 ] , r1
vld1 . 6 4 { d17 } , [ r0 ,: 6 4 ] , r1
asr r2 , r2 , #1
sub r0 , r0 , r1 , l s l #3 @ restore r0 to original value
add r2 , r2 , r2 , l s l #1 @ dc = (3 * dc + 16) >> 5;
add r2 , r2 , #16
asr r2 , r2 , #5
vdup. 1 6 q1 , r2 @ dc
vaddw. u 8 q9 , q1 , d0
vaddw. u 8 q10 , q1 , d1
vaddw. u 8 q11 , q1 , d4
vaddw. u 8 q12 , q1 , d5
vqmovun. s16 d0 , q9
vqmovun. s16 d1 , q10
vqmovun. s16 d4 , q11
vst1 . 6 4 { d0 } , [ r0 ,: 6 4 ] , r1
vqmovun. s16 d5 , q12
vst1 . 6 4 { d1 } , [ r0 ,: 6 4 ] , r1
vaddw. u 8 q13 , q1 , d6
vst1 . 6 4 { d4 } , [ r0 ,: 6 4 ] , r1
vaddw. u 8 q14 , q1 , d7
vst1 . 6 4 { d5 } , [ r0 ,: 6 4 ] , r1
vaddw. u 8 q15 , q1 , d16
vaddw. u 8 q1 , q1 , d17 @ this destroys q1
vqmovun. s16 d6 , q13
vqmovun. s16 d7 , q14
vqmovun. s16 d16 , q15
vqmovun. s16 d17 , q1
vst1 . 6 4 { d6 } , [ r0 ,: 6 4 ] , r1
vst1 . 6 4 { d7 } , [ r0 ,: 6 4 ] , r1
vst1 . 6 4 { d16 } , [ r0 ,: 6 4 ] , r1
vst1 . 6 4 { d17 } , [ r0 ,: 6 4 ]
bx l r
endfunc
function f f _ v c1 _ i n v _ t r a n s _ 8 x4 _ d c _ n e o n , e x p o r t =1
ldrsh r2 , [ r2 ] @ int dc = block[0];
vld1 . 6 4 { d0 } , [ r0 ,: 6 4 ] , r1
vld1 . 6 4 { d1 } , [ r0 ,: 6 4 ] , r1
vld1 . 6 4 { d4 } , [ r0 ,: 6 4 ] , r1
vld1 . 6 4 { d5 } , [ r0 ,: 6 4 ] , r1
add r2 , r2 , r2 , l s l #1 @ dc = ( 3 * dc + 1) >> 1;
sub r0 , r0 , r1 , l s l #2 @ restore r0 to original value
add r2 , r2 , #1
asr r2 , r2 , #1
add r2 , r2 , r2 , l s l #4 @ dc = (17 * dc + 64) >> 7;
add r2 , r2 , #64
asr r2 , r2 , #7
vdup. 1 6 q1 , r2 @ dc
vaddw. u 8 q3 , q1 , d0
vaddw. u 8 q8 , q1 , d1
vaddw. u 8 q9 , q1 , d4
vaddw. u 8 q10 , q1 , d5
vqmovun. s16 d0 , q3
vqmovun. s16 d1 , q8
vqmovun. s16 d4 , q9
vst1 . 6 4 { d0 } , [ r0 ,: 6 4 ] , r1
vqmovun. s16 d5 , q10
vst1 . 6 4 { d1 } , [ r0 ,: 6 4 ] , r1
vst1 . 6 4 { d4 } , [ r0 ,: 6 4 ] , r1
vst1 . 6 4 { d5 } , [ r0 ,: 6 4 ]
bx l r
endfunc
function f f _ v c1 _ i n v _ t r a n s _ 4 x8 _ d c _ n e o n , e x p o r t =1
ldrsh r2 , [ r2 ] @ int dc = block[0];
vld1 . 3 2 { d0 [ ] } , [ r0 ,: 3 2 ] , r1
vld1 . 3 2 { d1 [ ] } , [ r0 ,: 3 2 ] , r1
vld1 . 3 2 { d0 [ 1 ] } , [ r0 ,: 3 2 ] , r1
vld1 . 3 2 { d1 [ 1 ] } , [ r0 ,: 3 2 ] , r1
add r2 , r2 , r2 , l s l #4 @ dc = (17 * dc + 4) >> 3;
vld1 . 3 2 { d4 [ ] } , [ r0 ,: 3 2 ] , r1
add r2 , r2 , #4
vld1 . 3 2 { d5 [ ] } , [ r0 ,: 3 2 ] , r1
vld1 . 3 2 { d4 [ 1 ] } , [ r0 ,: 3 2 ] , r1
asr r2 , r2 , #3
vld1 . 3 2 { d5 [ 1 ] } , [ r0 ,: 3 2 ] , r1
add r2 , r2 , r2 , l s l #1 @ dc = (12 * dc + 64) >> 7;
sub r0 , r0 , r1 , l s l #3 @ restore r0 to original value
lsl r2 , r2 , #2
add r2 , r2 , #64
asr r2 , r2 , #7
vdup. 1 6 q1 , r2 @ dc
vaddw. u 8 q3 , q1 , d0
vaddw. u 8 q8 , q1 , d1
vaddw. u 8 q9 , q1 , d4
vaddw. u 8 q10 , q1 , d5
vqmovun. s16 d0 , q3
vst1 . 3 2 { d0 [ 0 ] } , [ r0 ,: 3 2 ] , r1
vqmovun. s16 d1 , q8
vst1 . 3 2 { d1 [ 0 ] } , [ r0 ,: 3 2 ] , r1
vqmovun. s16 d4 , q9
vst1 . 3 2 { d0 [ 1 ] } , [ r0 ,: 3 2 ] , r1
vqmovun. s16 d5 , q10
vst1 . 3 2 { d1 [ 1 ] } , [ r0 ,: 3 2 ] , r1
vst1 . 3 2 { d4 [ 0 ] } , [ r0 ,: 3 2 ] , r1
vst1 . 3 2 { d5 [ 0 ] } , [ r0 ,: 3 2 ] , r1
vst1 . 3 2 { d4 [ 1 ] } , [ r0 ,: 3 2 ] , r1
vst1 . 3 2 { d5 [ 1 ] } , [ r0 ,: 3 2 ]
bx l r
endfunc
function f f _ v c1 _ i n v _ t r a n s _ 4 x4 _ d c _ n e o n , e x p o r t =1
ldrsh r2 , [ r2 ] @ int dc = block[0];
vld1 . 3 2 { d0 [ ] } , [ r0 ,: 3 2 ] , r1
vld1 . 3 2 { d1 [ ] } , [ r0 ,: 3 2 ] , r1
vld1 . 3 2 { d0 [ 1 ] } , [ r0 ,: 3 2 ] , r1
vld1 . 3 2 { d1 [ 1 ] } , [ r0 ,: 3 2 ] , r1
add r2 , r2 , r2 , l s l #4 @ dc = (17 * dc + 4) >> 3;
sub r0 , r0 , r1 , l s l #2 @ restore r0 to original value
add r2 , r2 , #4
asr r2 , r2 , #3
add r2 , r2 , r2 , l s l #4 @ dc = (17 * dc + 64) >> 7;
add r2 , r2 , #64
asr r2 , r2 , #7
vdup. 1 6 q1 , r2 @ dc
vaddw. u 8 q2 , q1 , d0
vaddw. u 8 q3 , q1 , d1
vqmovun. s16 d0 , q2
vst1 . 3 2 { d0 [ 0 ] } , [ r0 ,: 3 2 ] , r1
vqmovun. s16 d1 , q3
vst1 . 3 2 { d1 [ 0 ] } , [ r0 ,: 3 2 ] , r1
vst1 . 3 2 { d0 [ 1 ] } , [ r0 ,: 3 2 ] , r1
vst1 . 3 2 { d1 [ 1 ] } , [ r0 ,: 3 2 ]
bx l r
endfunc
@ VC-1 in-loop deblocking filter for 4 pixel pairs at boundary of vertically-neighbouring blocks
@ On entry:
@ r0 -> top-left pel of lower block
@ r1 = row stride, bytes
@ r2 = PQUANT bitstream parameter
function f f _ v c1 _ v _ l o o p _ f i l t e r4 _ n e o n , e x p o r t =1
sub r3 , r0 , r1 , l s l #2
vldr d0 , . L c o e f f s
vld1 . 3 2 { d1 [ 0 ] } , [ r0 ] , r1 @ P5
vld1 . 3 2 { d2 [ 0 ] } , [ r3 ] , r1 @ P1
vld1 . 3 2 { d3 [ 0 ] } , [ r3 ] , r1 @ P2
vld1 . 3 2 { d4 [ 0 ] } , [ r0 ] , r1 @ P6
vld1 . 3 2 { d5 [ 0 ] } , [ r3 ] , r1 @ P3
vld1 . 3 2 { d6 [ 0 ] } , [ r0 ] , r1 @ P7
vld1 . 3 2 { d7 [ 0 ] } , [ r3 ] @ P4
vld1 . 3 2 { d16 [ 0 ] } , [ r0 ] @ P8
vshll. u 8 q9 , d1 , #1 @ 2*P5
vdup. 1 6 d17 , r2 @ pq
vshll. u 8 q10 , d2 , #1 @ 2*P1
vmovl. u 8 q11 , d3 @ P2
vmovl. u 8 q1 , d4 @ P6
vmovl. u 8 q12 , d5 @ P3
vmls. i 1 6 d20 , d22 , d0 [ 1 ] @ 2*P1-5*P2
vmovl. u 8 q11 , d6 @ P7
vmls. i 1 6 d18 , d2 , d0 [ 1 ] @ 2*P5-5*P6
vshll. u 8 q2 , d5 , #1 @ 2*P3
vmovl. u 8 q3 , d7 @ P4
vmla. i 1 6 d18 , d22 , d0 [ 1 ] @ 2*P5-5*P6+5*P7
vmovl. u 8 q11 , d16 @ P8
vmla. u 1 6 d20 , d24 , d0 [ 1 ] @ 2*P1-5*P2+5*P3
vmovl. u 8 q12 , d1 @ P5
vmls. u 1 6 d4 , d6 , d0 [ 1 ] @ 2*P3-5*P4
vmls. u 1 6 d18 , d22 , d0 [ 0 ] @ 2*P5-5*P6+5*P7-2*P8
vsub. i 1 6 d1 , d6 , d24 @ P4-P5
vmls. i 1 6 d20 , d6 , d0 [ 0 ] @ 2*P1-5*P2+5*P3-2*P4
vmla. i 1 6 d4 , d24 , d0 [ 1 ] @ 2*P3-5*P4+5*P5
vmls. i 1 6 d4 , d2 , d0 [ 0 ] @ 2*P3-5*P4+5*P5-2*P6
vabs. s16 d2 , d1
vrshr. s16 d3 , d18 , #3
vrshr. s16 d5 , d20 , #3
vshr. s16 d2 , d2 , #1 @ clip
vrshr. s16 d4 , d4 , #3
vabs. s16 d3 , d3 @ a2
vshr. s16 d1 , d1 , #8 @ clip_sign
vabs. s16 d5 , d5 @ a1
vceq. i 1 6 d7 , d2 , #0 @ test clip == 0
vabs. s16 d16 , d4 @ a0
vshr. s16 d4 , d4 , #8 @ a0_sign
vcge. s16 d18 , d5 , d3 @ test a1 >= a2
vcge. s16 d17 , d16 , d17 @ test a0 >= pq
vbsl d18 , d3 , d5 @ a3
vsub. i 1 6 d1 , d1 , d4 @ clip_sign - a0_sign
vorr d3 , d7 , d17 @ test clip == 0 || a0 >= pq
vqsub. u 1 6 d4 , d16 , d18 @ a0 >= a3 ? a0-a3 : 0 (a0 > a3 in all cases where filtering is enabled, so makes more sense to subtract this way round than the opposite and then taking the abs)
vcge. s16 d5 , d18 , d16 @ test a3 >= a0
vmul. i 1 6 d0 , d4 , d0 [ 1 ] @ a0 >= a3 ? 5*(a0-a3) : 0
vorr d4 , d3 , d5 @ test clip == 0 || a0 >= pq || a3 >= a0
vmov. 3 2 r0 , d4 [ 1 ] @ move to gp reg
vshr. u 1 6 d0 , d0 , #3 @ a0 >= a3 ? (5*(a0-a3))>>3 : 0
vcge. s16 d4 , d0 , d2
tst r0 , #1
bne 1 f @ none of the 4 pixel pairs should be updated if this one is not filtered
vbsl d4 , d2 , d0 @ FFMIN(d, clip)
vbic d0 , d4 , d3 @ set each d to zero if it should not be filtered because clip == 0 || a0 >= pq (a3 > a0 case already zeroed by saturating sub)
vmls. i 1 6 d6 , d0 , d1 @ invert d depending on clip_sign & a0_sign, or zero it if they match, and accumulate into P4
vmla. i 1 6 d24 , d0 , d1 @ invert d depending on clip_sign & a0_sign, or zero it if they match, and accumulate into P5
vqmovun. s16 d0 , q3
vqmovun. s16 d1 , q12
vst1 . 3 2 { d0 [ 0 ] } , [ r3 ] , r1
vst1 . 3 2 { d1 [ 0 ] } , [ r3 ]
1 : bx l r
endfunc
@ VC-1 in-loop deblocking filter for 4 pixel pairs at boundary of horizontally-neighbouring blocks
@ On entry:
@ r0 -> top-left pel of right block
@ r1 = row stride, bytes
@ r2 = PQUANT bitstream parameter
function f f _ v c1 _ h _ l o o p _ f i l t e r4 _ n e o n , e x p o r t =1
sub r3 , r0 , #4 @ where to start reading
vldr d0 , . L c o e f f s
vld1 . 3 2 { d2 } , [ r3 ] , r1
sub r0 , r0 , #1 @ where to start writing
vld1 . 3 2 { d4 } , [ r3 ] , r1
vld1 . 3 2 { d3 } , [ r3 ] , r1
vld1 . 3 2 { d5 } , [ r3 ]
vdup. 1 6 d1 , r2 @ pq
vtrn. 8 q1 , q2
vtrn. 1 6 d2 , d3 @ P1, P5, P3, P7
vtrn. 1 6 d4 , d5 @ P2, P6, P4, P8
vshll. u 8 q3 , d2 , #1 @ 2*P1, 2*P5
vmovl. u 8 q8 , d4 @ P2, P6
vmovl. u 8 q9 , d3 @ P3, P7
vmovl. u 8 q2 , d5 @ P4, P8
vmls. i 1 6 q3 , q8 , d0 [ 1 ] @ 2*P1-5*P2, 2*P5-5*P6
vshll. u 8 q10 , d3 , #1 @ 2*P3, 2*P7
vmovl. u 8 q1 , d2 @ P1, P5
vmla. i 1 6 q3 , q9 , d0 [ 1 ] @ 2*P1-5*P2+5*P3, 2*P5-5*P6+5*P7
vmls. i 1 6 q3 , q2 , d0 [ 0 ] @ 2*P1-5*P2+5*P3-2*P4, 2*P5-5*P6+5*P7-2*P8
vmov d2 , d3 @ needs to be in an even-numbered vector for when we come to narrow it later
vmls. i 1 6 d20 , d4 , d0 [ 1 ] @ 2*P3-5*P4
vmla. i 1 6 d20 , d3 , d0 [ 1 ] @ 2*P3-5*P4+5*P5
vsub. i 1 6 d3 , d4 , d2 @ P4-P5
vmls. i 1 6 d20 , d17 , d0 [ 0 ] @ 2*P3-5*P4+5*P5-2*P6
vrshr. s16 q3 , q3 , #3
vabs. s16 d5 , d3
vshr. s16 d3 , d3 , #8 @ clip_sign
vrshr. s16 d16 , d20 , #3
vabs. s16 q3 , q3 @ a1, a2
vshr. s16 d5 , d5 , #1 @ clip
vabs. s16 d17 , d16 @ a0
vceq. i 1 6 d18 , d5 , #0 @ test clip == 0
vshr. s16 d16 , d16 , #8 @ a0_sign
vcge. s16 d19 , d6 , d7 @ test a1 >= a2
vcge. s16 d1 , d17 , d1 @ test a0 >= pq
vsub. i 1 6 d16 , d3 , d16 @ clip_sign - a0_sign
vbsl d19 , d7 , d6 @ a3
vorr d1 , d18 , d1 @ test clip == 0 || a0 >= pq
vqsub. u 1 6 d3 , d17 , d19 @ a0 >= a3 ? a0-a3 : 0 (a0 > a3 in all cases where filtering is enabled, so makes more sense to subtract this way round than the opposite and then taking the abs)
vcge. s16 d6 , d19 , d17 @ test a3 >= a0 @
vmul. i 1 6 d0 , d3 , d0 [ 1 ] @ a0 >= a3 ? 5*(a0-a3) : 0
vorr d3 , d1 , d6 @ test clip == 0 || a0 >= pq || a3 >= a0
vmov. 3 2 r2 , d3 [ 1 ] @ move to gp reg
vshr. u 1 6 d0 , d0 , #3 @ a0 >= a3 ? (5*(a0-a3))>>3 : 0
vcge. s16 d3 , d0 , d5
tst r2 , #1
bne 1 f @ none of the 4 pixel pairs should be updated if this one is not filtered
vbsl d3 , d5 , d0 @ FFMIN(d, clip)
vbic d0 , d3 , d1 @ set each d to zero if it should not be filtered because clip == 0 || a0 >= pq (a3 > a0 case already zeroed by saturating sub)
vmla. i 1 6 d2 , d0 , d16 @ invert d depending on clip_sign & a0_sign, or zero it if they match, and accumulate into P5
vmls. i 1 6 d4 , d0 , d16 @ invert d depending on clip_sign & a0_sign, or zero it if they match, and accumulate into P4
vqmovun. s16 d1 , q1
vqmovun. s16 d0 , q2
vst2 . 8 { d0 [ 0 ] , d1 [ 0 ] } , [ r0 ] , r1
vst2 . 8 { d0 [ 1 ] , d1 [ 1 ] } , [ r0 ] , r1
vst2 . 8 { d0 [ 2 ] , d1 [ 2 ] } , [ r0 ] , r1
vst2 . 8 { d0 [ 3 ] , d1 [ 3 ] } , [ r0 ]
1 : bx l r
endfunc
@ VC-1 in-loop deblocking filter for 8 pixel pairs at boundary of vertically-neighbouring blocks
@ On entry:
@ r0 -> top-left pel of lower block
@ r1 = row stride, bytes
@ r2 = PQUANT bitstream parameter
function f f _ v c1 _ v _ l o o p _ f i l t e r8 _ n e o n , e x p o r t =1
sub r3 , r0 , r1 , l s l #2
vldr d0 , . L c o e f f s
vld1 . 3 2 { d1 } , [ r0 : 6 4 ] , r1 @ P5
vld1 . 3 2 { d2 } , [ r3 : 6 4 ] , r1 @ P1
vld1 . 3 2 { d3 } , [ r3 : 6 4 ] , r1 @ P2
vld1 . 3 2 { d4 } , [ r0 : 6 4 ] , r1 @ P6
vld1 . 3 2 { d5 } , [ r3 : 6 4 ] , r1 @ P3
vld1 . 3 2 { d6 } , [ r0 : 6 4 ] , r1 @ P7
vshll. u 8 q8 , d1 , #1 @ 2*P5
vshll. u 8 q9 , d2 , #1 @ 2*P1
vld1 . 3 2 { d7 } , [ r3 : 6 4 ] @ P4
vmovl. u 8 q1 , d3 @ P2
vld1 . 3 2 { d20 } , [ r0 : 6 4 ] @ P8
vmovl. u 8 q11 , d4 @ P6
vdup. 1 6 q12 , r2 @ pq
vmovl. u 8 q13 , d5 @ P3
vmls. i 1 6 q9 , q1 , d0 [ 1 ] @ 2*P1-5*P2
vmovl. u 8 q1 , d6 @ P7
vshll. u 8 q2 , d5 , #1 @ 2*P3
vmls. i 1 6 q8 , q11 , d0 [ 1 ] @ 2*P5-5*P6
vmovl. u 8 q3 , d7 @ P4
vmovl. u 8 q10 , d20 @ P8
vmla. i 1 6 q8 , q1 , d0 [ 1 ] @ 2*P5-5*P6+5*P7
vmovl. u 8 q1 , d1 @ P5
vmla. i 1 6 q9 , q13 , d0 [ 1 ] @ 2*P1-5*P2+5*P3
vsub. i 1 6 q13 , q3 , q1 @ P4-P5
vmls. i 1 6 q2 , q3 , d0 [ 1 ] @ 2*P3-5*P4
vmls. i 1 6 q8 , q10 , d0 [ 0 ] @ 2*P5-5*P6+5*P7-2*P8
vabs. s16 q10 , q13
vshr. s16 q13 , q13 , #8 @ clip_sign
vmls. i 1 6 q9 , q3 , d0 [ 0 ] @ 2*P1-5*P2+5*P3-2*P4
vshr. s16 q10 , q10 , #1 @ clip
vmla. i 1 6 q2 , q1 , d0 [ 1 ] @ 2*P3-5*P4+5*P5
vrshr. s16 q8 , q8 , #3
vmls. i 1 6 q2 , q11 , d0 [ 0 ] @ 2*P3-5*P4+5*P5-2*P6
vceq. i 1 6 q11 , q10 , #0 @ test clip == 0
vrshr. s16 q9 , q9 , #3
vabs. s16 q8 , q8 @ a2
vabs. s16 q9 , q9 @ a1
vrshr. s16 q2 , q2 , #3
vcge. s16 q14 , q9 , q8 @ test a1 >= a2
vabs. s16 q15 , q2 @ a0
vshr. s16 q2 , q2 , #8 @ a0_sign
vbsl q14 , q8 , q9 @ a3
vcge. s16 q8 , q15 , q12 @ test a0 >= pq
vsub. i 1 6 q2 , q13 , q2 @ clip_sign - a0_sign
vqsub. u 1 6 q9 , q15 , q14 @ a0 >= a3 ? a0-a3 : 0 (a0 > a3 in all cases where filtering is enabled, so makes more sense to subtract this way round than the opposite and then taking the abs)
vcge. s16 q12 , q14 , q15 @ test a3 >= a0
vorr q8 , q11 , q8 @ test clip == 0 || a0 >= pq
vmul. i 1 6 q0 , q9 , d0 [ 1 ] @ a0 >= a3 ? 5*(a0-a3) : 0
vorr q9 , q8 , q12 @ test clip == 0 || a0 >= pq || a3 >= a0
vshl. i 6 4 q11 , q9 , #16
vmov. 3 2 r0 , d18 [ 1 ] @ move to gp reg
vshr. u 1 6 q0 , q0 , #3 @ a0 >= a3 ? (5*(a0-a3))>>3 : 0
vmov. 3 2 r2 , d19 [ 1 ]
vshr. s64 q9 , q11 , #48
vcge. s16 q11 , q0 , q10
vorr q8 , q8 , q9
and r0 , r0 , r2
vbsl q11 , q10 , q0 @ FFMIN(d, clip)
tst r0 , #1
bne 1 f @ none of the 8 pixel pairs should be updated in this case
vbic q0 , q11 , q8 @ set each d to zero if it should not be filtered
vmls. i 1 6 q3 , q0 , q2 @ invert d depending on clip_sign & a0_sign, or zero it if they match, and accumulate into P4
vmla. i 1 6 q1 , q0 , q2 @ invert d depending on clip_sign & a0_sign, or zero it if they match, and accumulate into P5
vqmovun. s16 d0 , q3
vqmovun. s16 d1 , q1
vst1 . 3 2 { d0 } , [ r3 : 6 4 ] , r1
vst1 . 3 2 { d1 } , [ r3 : 6 4 ]
1 : bx l r
endfunc
.align 5
.Lcoeffs :
.quad 0x00050002
@ VC-1 in-loop deblocking filter for 8 pixel pairs at boundary of horizontally-neighbouring blocks
@ On entry:
@ r0 -> top-left pel of right block
@ r1 = row stride, bytes
@ r2 = PQUANT bitstream parameter
function f f _ v c1 _ h _ l o o p _ f i l t e r8 _ n e o n , e x p o r t =1
push { l r }
sub r3 , r0 , #4 @ where to start reading
vldr d0 , . L c o e f f s
vld1 . 3 2 { d2 } , [ r3 ] , r1 @ P1[0], P2[0]...
sub r0 , r0 , #1 @ where to start writing
vld1 . 3 2 { d4 } , [ r3 ] , r1
add r12 , r0 , r1 , l s l #2
vld1 . 3 2 { d3 } , [ r3 ] , r1
vld1 . 3 2 { d5 } , [ r3 ] , r1
vld1 . 3 2 { d6 } , [ r3 ] , r1
vld1 . 3 2 { d16 } , [ r3 ] , r1
vld1 . 3 2 { d7 } , [ r3 ] , r1
vld1 . 3 2 { d17 } , [ r3 ]
vtrn. 8 q1 , q2 @ P1[0], P1[1], P3[0]... P1[2], P1[3], P3[2]... P2[0], P2[1], P4[0]... P2[2], P2[3], P4[2]...
vdup. 1 6 q9 , r2 @ pq
vtrn. 1 6 d2 , d3 @ P1[0], P1[1], P1[2], P1[3], P5[0]... P3[0], P3[1], P3[2], P3[3], P7[0]...
vtrn. 1 6 d4 , d5 @ P2[0], P2[1], P2[2], P2[3], P6[0]... P4[0], P4[1], P4[2], P4[3], P8[0]...
vtrn. 8 q3 , q8 @ P1[4], P1[5], P3[4]... P1[6], P1[7], P3[6]... P2[4], P2[5], P4[4]... P2[6], P2[7], P4[6]...
vtrn. 1 6 d6 , d7 @ P1[4], P1[5], P1[6], P1[7], P5[4]... P3[4], P3[5], P3[5], P3[7], P7[4]...
vtrn. 1 6 d16 , d17 @ P2[4], P2[5], P2[6], P2[7], P6[4]... P4[4], P4[5], P4[6], P4[7], P8[4]...
vtrn. 3 2 d2 , d6 @ P1, P5
vtrn. 3 2 d4 , d16 @ P2, P6
vtrn. 3 2 d3 , d7 @ P3, P7
vtrn. 3 2 d5 , d17 @ P4, P8
vshll. u 8 q10 , d2 , #1 @ 2*P1
vshll. u 8 q11 , d6 , #1 @ 2*P5
vmovl. u 8 q12 , d4 @ P2
vmovl. u 8 q13 , d16 @ P6
vmovl. u 8 q14 , d3 @ P3
vmls. i 1 6 q10 , q12 , d0 [ 1 ] @ 2*P1-5*P2
vmovl. u 8 q12 , d7 @ P7
vshll. u 8 q1 , d3 , #1 @ 2*P3
vmls. i 1 6 q11 , q13 , d0 [ 1 ] @ 2*P5-5*P6
vmovl. u 8 q2 , d5 @ P4
vmovl. u 8 q8 , d17 @ P8
vmla. i 1 6 q11 , q12 , d0 [ 1 ] @ 2*P5-5*P6+5*P7
vmovl. u 8 q3 , d6 @ P5
vmla. i 1 6 q10 , q14 , d0 [ 1 ] @ 2*P1-5*P2+5*P3
vsub. i 1 6 q12 , q2 , q3 @ P4-P5
vmls. i 1 6 q1 , q2 , d0 [ 1 ] @ 2*P3-5*P4
vmls. i 1 6 q11 , q8 , d0 [ 0 ] @ 2*P5-5*P6+5*P7-2*P8
vabs. s16 q8 , q12
vshr. s16 q12 , q12 , #8 @ clip_sign
vmls. i 1 6 q10 , q2 , d0 [ 0 ] @ 2*P1-5*P2+5*P3-2*P4
vshr. s16 q8 , q8 , #1 @ clip
vmla. i 1 6 q1 , q3 , d0 [ 1 ] @ 2*P3-5*P4+5*P5
vrshr. s16 q11 , q11 , #3
vmls. i 1 6 q1 , q13 , d0 [ 0 ] @ 2*P3-5*P4+5*P5-2*P6
vceq. i 1 6 q13 , q8 , #0 @ test clip == 0
vrshr. s16 q10 , q10 , #3
vabs. s16 q11 , q11 @ a2
vabs. s16 q10 , q10 @ a1
vrshr. s16 q1 , q1 , #3
vcge. s16 q14 , q10 , q11 @ test a1 >= a2
vabs. s16 q15 , q1 @ a0
vshr. s16 q1 , q1 , #8 @ a0_sign
vbsl q14 , q11 , q10 @ a3
vcge. s16 q9 , q15 , q9 @ test a0 >= pq
vsub. i 1 6 q1 , q12 , q1 @ clip_sign - a0_sign
vqsub. u 1 6 q10 , q15 , q14 @ a0 >= a3 ? a0-a3 : 0 (a0 > a3 in all cases where filtering is enabled, so makes more sense to subtract this way round than the opposite and then taking the abs)
vcge. s16 q11 , q14 , q15 @ test a3 >= a0
vorr q9 , q13 , q9 @ test clip == 0 || a0 >= pq
vmul. i 1 6 q0 , q10 , d0 [ 1 ] @ a0 >= a3 ? 5*(a0-a3) : 0
vorr q10 , q9 , q11 @ test clip == 0 || a0 >= pq || a3 >= a0
vmov. 3 2 r2 , d20 [ 1 ] @ move to gp reg
vshr. u 1 6 q0 , q0 , #3 @ a0 >= a3 ? (5*(a0-a3))>>3 : 0
vmov. 3 2 r3 , d21 [ 1 ]
vcge. s16 q10 , q0 , q8
and r14 , r2 , r3
vbsl q10 , q8 , q0 @ FFMIN(d, clip)
tst r14 , #1
bne 2 f @ none of the 8 pixel pairs should be updated in this case
vbic q0 , q10 , q9 @ set each d to zero if it should not be filtered because clip == 0 || a0 >= pq (a3 > a0 case already zeroed by saturating sub)
vmla. i 1 6 q3 , q0 , q1 @ invert d depending on clip_sign & a0_sign, or zero it if they match, and accumulate into P5
vmls. i 1 6 q2 , q0 , q1 @ invert d depending on clip_sign & a0_sign, or zero it if they match, and accumulate into P4
vqmovun. s16 d1 , q3
vqmovun. s16 d0 , q2
tst r2 , #1
bne 1 f @ none of the first 4 pixel pairs should be updated if so
vst2 . 8 { d0 [ 0 ] , d1 [ 0 ] } , [ r0 ] , r1
vst2 . 8 { d0 [ 1 ] , d1 [ 1 ] } , [ r0 ] , r1
vst2 . 8 { d0 [ 2 ] , d1 [ 2 ] } , [ r0 ] , r1
vst2 . 8 { d0 [ 3 ] , d1 [ 3 ] } , [ r0 ]
1 : tst r3 , #1
bne 2 f @ none of the second 4 pixel pairs should be updated if so
vst2 . 8 { d0 [ 4 ] , d1 [ 4 ] } , [ r12 ] , r1
vst2 . 8 { d0 [ 5 ] , d1 [ 5 ] } , [ r12 ] , r1
vst2 . 8 { d0 [ 6 ] , d1 [ 6 ] } , [ r12 ] , r1
vst2 . 8 { d0 [ 7 ] , d1 [ 7 ] } , [ r12 ]
2 : pop { p c }
endfunc
@ VC-1 in-loop deblocking filter for 16 pixel pairs at boundary of vertically-neighbouring blocks
@ On entry:
@ r0 -> top-left pel of lower block
@ r1 = row stride, bytes
@ r2 = PQUANT bitstream parameter
function f f _ v c1 _ v _ l o o p _ f i l t e r16 _ n e o n , e x p o r t =1
vpush { d8 - d15 }
sub r3 , r0 , r1 , l s l #2
vldr d0 , . L c o e f f s
vld1 . 6 4 { q1 } , [ r0 : 1 2 8 ] , r1 @ P5
vld1 . 6 4 { q2 } , [ r3 : 1 2 8 ] , r1 @ P1
vld1 . 6 4 { q3 } , [ r3 : 1 2 8 ] , r1 @ P2
vld1 . 6 4 { q4 } , [ r0 : 1 2 8 ] , r1 @ P6
vld1 . 6 4 { q5 } , [ r3 : 1 2 8 ] , r1 @ P3
vld1 . 6 4 { q6 } , [ r0 : 1 2 8 ] , r1 @ P7
vshll. u 8 q7 , d2 , #1 @ 2*P5[0..7]
vshll. u 8 q8 , d4 , #1 @ 2*P1[0..7]
vld1 . 6 4 { q9 } , [ r3 : 1 2 8 ] @ P4
vmovl. u 8 q10 , d6 @ P2[0..7]
vld1 . 6 4 { q11 } , [ r0 : 1 2 8 ] @ P8
vmovl. u 8 q12 , d8 @ P6[0..7]
vdup. 1 6 q13 , r2 @ pq
vshll. u 8 q2 , d5 , #1 @ 2*P1[8..15]
vmls. i 1 6 q8 , q10 , d0 [ 1 ] @ 2*P1[0..7]-5*P2[0..7]
vshll. u 8 q10 , d3 , #1 @ 2*P5[8..15]
vmovl. u 8 q3 , d7 @ P2[8..15]
vmls. i 1 6 q7 , q12 , d0 [ 1 ] @ 2*P5[0..7]-5*P6[0..7]
vmovl. u 8 q4 , d9 @ P6[8..15]
vmovl. u 8 q14 , d10 @ P3[0..7]
vmovl. u 8 q15 , d12 @ P7[0..7]
vmls. i 1 6 q2 , q3 , d0 [ 1 ] @ 2*P1[8..15]-5*P2[8..15]
vshll. u 8 q3 , d10 , #1 @ 2*P3[0..7]
vmls. i 1 6 q10 , q4 , d0 [ 1 ] @ 2*P5[8..15]-5*P6[8..15]
vmovl. u 8 q6 , d13 @ P7[8..15]
vmla. i 1 6 q8 , q14 , d0 [ 1 ] @ 2*P1[0..7]-5*P2[0..7]+5*P3[0..7]
vmovl. u 8 q14 , d18 @ P4[0..7]
vmovl. u 8 q9 , d19 @ P4[8..15]
vmla. i 1 6 q7 , q15 , d0 [ 1 ] @ 2*P5[0..7]-5*P6[0..7]+5*P7[0..7]
vmovl. u 8 q15 , d11 @ P3[8..15]
vshll. u 8 q5 , d11 , #1 @ 2*P3[8..15]
vmls. i 1 6 q3 , q14 , d0 [ 1 ] @ 2*P3[0..7]-5*P4[0..7]
vmla. i 1 6 q2 , q15 , d0 [ 1 ] @ 2*P1[8..15]-5*P2[8..15]+5*P3[8..15]
vmovl. u 8 q15 , d22 @ P8[0..7]
vmovl. u 8 q11 , d23 @ P8[8..15]
vmla. i 1 6 q10 , q6 , d0 [ 1 ] @ 2*P5[8..15]-5*P6[8..15]+5*P7[8..15]
vmovl. u 8 q6 , d2 @ P5[0..7]
vmovl. u 8 q1 , d3 @ P5[8..15]
vmls. i 1 6 q5 , q9 , d0 [ 1 ] @ 2*P3[8..15]-5*P4[8..15]
vmls. i 1 6 q8 , q14 , d0 [ 0 ] @ 2*P1[0..7]-5*P2[0..7]+5*P3[0..7]-2*P4[0..7]
vmls. i 1 6 q7 , q15 , d0 [ 0 ] @ 2*P5[0..7]-5*P6[0..7]+5*P7[0..7]-2*P8[0..7]
vsub. i 1 6 q15 , q14 , q6 @ P4[0..7]-P5[0..7]
vmla. i 1 6 q3 , q6 , d0 [ 1 ] @ 2*P3[0..7]-5*P4[0..7]+5*P5[0..7]
vrshr. s16 q8 , q8 , #3
vmls. i 1 6 q2 , q9 , d0 [ 0 ] @ 2*P1[8..15]-5*P2[8..15]+5*P3[8..15]-2*P4[8..15]
vrshr. s16 q7 , q7 , #3
vmls. i 1 6 q10 , q11 , d0 [ 0 ] @ 2*P5[8..15]-5*P6[8..15]+5*P7[8..15]-2*P8[8..15]
vabs. s16 q11 , q15
vabs. s16 q8 , q8 @ a1[0..7]
vmla. i 1 6 q5 , q1 , d0 [ 1 ] @ 2*P3[8..15]-5*P4[8..15]+5*P5[8..15]
vshr. s16 q15 , q15 , #8 @ clip_sign[0..7]
vrshr. s16 q2 , q2 , #3
vmls. i 1 6 q3 , q12 , d0 [ 0 ] @ 2*P3[0..7]-5*P4[0..7]+5*P5[0..7]-2*P6[0..7]
vabs. s16 q7 , q7 @ a2[0..7]
vrshr. s16 q10 , q10 , #3
vsub. i 1 6 q12 , q9 , q1 @ P4[8..15]-P5[8..15]
vshr. s16 q11 , q11 , #1 @ clip[0..7]
vmls. i 1 6 q5 , q4 , d0 [ 0 ] @ 2*P3[8..15]-5*P4[8..15]+5*P5[8..15]-2*P6[8..15]
vcge. s16 q4 , q8 , q7 @ test a1[0..7] >= a2[0..7]
vabs. s16 q2 , q2 @ a1[8..15]
vrshr. s16 q3 , q3 , #3
vabs. s16 q10 , q10 @ a2[8..15]
vbsl q4 , q7 , q8 @ a3[0..7]
vabs. s16 q7 , q12
vshr. s16 q8 , q12 , #8 @ clip_sign[8..15]
vrshr. s16 q5 , q5 , #3
vcge. s16 q12 , q2 , q10 @ test a1[8..15] >= a2[8.15]
vshr. s16 q7 , q7 , #1 @ clip[8..15]
vbsl q12 , q10 , q2 @ a3[8..15]
vabs. s16 q2 , q3 @ a0[0..7]
vceq. i 1 6 q10 , q11 , #0 @ test clip[0..7] == 0
vshr. s16 q3 , q3 , #8 @ a0_sign[0..7]
vsub. i 1 6 q3 , q15 , q3 @ clip_sign[0..7] - a0_sign[0..7]
vcge. s16 q15 , q2 , q13 @ test a0[0..7] >= pq
vorr q10 , q10 , q15 @ test clip[0..7] == 0 || a0[0..7] >= pq
vqsub. u 1 6 q15 , q2 , q4 @ a0[0..7] >= a3[0..7] ? a0[0..7]-a3[0..7] : 0 (a0 > a3 in all cases where filtering is enabled, so makes more sense to subtract this way round than the opposite and then taking the abs)
vcge. s16 q2 , q4 , q2 @ test a3[0..7] >= a0[0..7]
vabs. s16 q4 , q5 @ a0[8..15]
vshr. s16 q5 , q5 , #8 @ a0_sign[8..15]
vmul. i 1 6 q15 , q15 , d0 [ 1 ] @ a0[0..7] >= a3[0..7] ? 5*(a0[0..7]-a3[0..7]) : 0
vcge. s16 q13 , q4 , q13 @ test a0[8..15] >= pq
vorr q2 , q10 , q2 @ test clip[0..7] == 0 || a0[0..7] >= pq || a3[0..7] >= a0[0..7]
vsub. i 1 6 q5 , q8 , q5 @ clip_sign[8..15] - a0_sign[8..15]
vceq. i 1 6 q8 , q7 , #0 @ test clip[8..15] == 0
vshr. u 1 6 q15 , q15 , #3 @ a0[0..7] >= a3[0..7] ? (5*(a0[0..7]-a3[0..7]))>>3 : 0
vmov. 3 2 r0 , d4 [ 1 ] @ move to gp reg
vorr q8 , q8 , q13 @ test clip[8..15] == 0 || a0[8..15] >= pq
vqsub. u 1 6 q13 , q4 , q12 @ a0[8..15] >= a3[8..15] ? a0[8..15]-a3[8..15] : 0 (a0 > a3 in all cases where filtering is enabled, so makes more sense to subtract this way round than the opposite and then taking the abs)
vmov. 3 2 r2 , d5 [ 1 ]
vcge. s16 q4 , q12 , q4 @ test a3[8..15] >= a0[8..15]
vshl. i 6 4 q2 , q2 , #16
vcge. s16 q12 , q15 , q11
vmul. i 1 6 q0 , q13 , d0 [ 1 ] @ a0[8..15] >= a3[8..15] ? 5*(a0[8..15]-a3[8..15]) : 0
vorr q4 , q8 , q4 @ test clip[8..15] == 0 || a0[8..15] >= pq || a3[8..15] >= a0[8..15]
vshr. s64 q2 , q2 , #48
and r0 , r0 , r2
vbsl q12 , q11 , q15 @ FFMIN(d[0..7], clip[0..7])
vshl. i 6 4 q11 , q4 , #16
vmov. 3 2 r2 , d8 [ 1 ]
vshr. u 1 6 q0 , q0 , #3 @ a0[8..15] >= a3[8..15] ? (5*(a0[8..15]-a3[8..15]))>>3 : 0
vorr q2 , q10 , q2
vmov. 3 2 r12 , d9 [ 1 ]
vshr. s64 q4 , q11 , #48
vcge. s16 q10 , q0 , q7
vbic q2 , q12 , q2 @ set each d[0..7] to zero if it should not be filtered because clip[0..7] == 0 || a0[0..7] >= pq (a3 > a0 case already zeroed by saturating sub)
vorr q4 , q8 , q4
and r2 , r2 , r12
vbsl q10 , q7 , q0 @ FFMIN(d[8..15], clip[8..15])
vmls. i 1 6 q14 , q2 , q3 @ invert d[0..7] depending on clip_sign[0..7] & a0_sign[0..7], or zero it if they match, and accumulate into P4[0..7]
and r0 , r0 , r2
vbic q0 , q10 , q4 @ set each d[8..15] to zero if it should not be filtered because clip[8..15] == 0 || a0[8..15] >= pq (a3 > a0 case already zeroed by saturating sub)
tst r0 , #1
bne 1 f @ none of the 16 pixel pairs should be updated in this case
vmla. i 1 6 q6 , q2 , q3 @ invert d[0..7] depending on clip_sign[0..7] & a0_sign[0..7], or zero it if they match, and accumulate into P5[0..7]
vmls. i 1 6 q9 , q0 , q5 @ invert d[8..15] depending on clip_sign[8..15] & a0_sign[8..15], or zero it if they match, and accumulate into P4[8..15]
vqmovun. s16 d4 , q14
vmla. i 1 6 q1 , q0 , q5 @ invert d[8..15] depending on clip_sign[8..15] & a0_sign[8..15], or zero it if they match, and accumulate into P5[8..15]
vqmovun. s16 d0 , q6
vqmovun. s16 d5 , q9
vqmovun. s16 d1 , q1
vst1 . 6 4 { q2 } , [ r3 : 1 2 8 ] , r1
vst1 . 6 4 { q0 } , [ r3 : 1 2 8 ]
1 : vpop { d8 - d15 }
bx l r
endfunc
@ VC-1 in-loop deblocking filter for 16 pixel pairs at boundary of horizontally-neighbouring blocks
@ On entry:
@ r0 -> top-left pel of right block
@ r1 = row stride, bytes
@ r2 = PQUANT bitstream parameter
function f f _ v c1 _ h _ l o o p _ f i l t e r16 _ n e o n , e x p o r t =1
push { r4 - r6 ,l r }
vpush { d8 - d15 }
sub r3 , r0 , #4 @ where to start reading
vldr d0 , . L c o e f f s
vld1 . 3 2 { d2 } , [ r3 ] , r1 @ P1[0], P2[0]...
sub r0 , r0 , #1 @ where to start writing
vld1 . 3 2 { d3 } , [ r3 ] , r1
add r4 , r0 , r1 , l s l #2
vld1 . 3 2 { d10 } , [ r3 ] , r1
vld1 . 3 2 { d11 } , [ r3 ] , r1
vld1 . 3 2 { d16 } , [ r3 ] , r1
vld1 . 3 2 { d4 } , [ r3 ] , r1
vld1 . 3 2 { d8 } , [ r3 ] , r1
vtrn. 8 d2 , d3 @ P1[0], P1[1], P3[0]... P2[0], P2[1], P4[0]...
vld1 . 3 2 { d14 } , [ r3 ] , r1
vld1 . 3 2 { d5 } , [ r3 ] , r1
vtrn. 8 d10 , d11 @ P1[2], P1[3], P3[2]... P2[2], P2[3], P4[2]...
vld1 . 3 2 { d6 } , [ r3 ] , r1
vld1 . 3 2 { d12 } , [ r3 ] , r1
vtrn. 8 d16 , d4 @ P1[4], P1[5], P3[4]... P2[4], P2[5], P4[4]...
vld1 . 3 2 { d13 } , [ r3 ] , r1
vtrn. 1 6 d2 , d10 @ P1[0], P1[1], P1[2], P1[3], P5[0]... P3[0], P3[1], P3[2], P3[3], P7[0]...
vld1 . 3 2 { d1 } , [ r3 ] , r1
vtrn. 8 d8 , d14 @ P1[6], P1[7], P3[6]... P2[6], P2[7], P4[6]...
vld1 . 3 2 { d7 } , [ r3 ] , r1
vtrn. 1 6 d3 , d11 @ P2[0], P2[1], P2[2], P2[3], P6[0]... P4[0], P4[1], P4[2], P4[3], P8[0]...
vld1 . 3 2 { d9 } , [ r3 ] , r1
vtrn. 8 d5 , d6 @ P1[8], P1[9], P3[8]... P2[8], P2[9], P4[8]...
vld1 . 3 2 { d15 } , [ r3 ]
vtrn. 1 6 d16 , d8 @ P1[4], P1[5], P1[6], P1[7], P5[4]... P3[4], P3[5], P3[6], P3[7], P7[4]...
vtrn. 1 6 d4 , d14 @ P2[4], P2[5], P2[6], P2[7], P6[4]... P4[4], P4[5], P4[6], P4[7], P8[4]...
vtrn. 8 d12 , d13 @ P1[10], P1[11], P3[10]... P2[10], P2[11], P4[10]...
vdup. 1 6 q9 , r2 @ pq
vtrn. 8 d1 , d7 @ P1[12], P1[13], P3[12]... P2[12], P2[13], P4[12]...
vtrn. 3 2 d2 , d16 @ P1[0..7], P5[0..7]
vtrn. 1 6 d5 , d12 @ P1[8], P1[7], P1[10], P1[11], P5[8]... P3[8], P3[9], P3[10], P3[11], P7[8]...
vtrn. 1 6 d6 , d13 @ P2[8], P2[7], P2[10], P2[11], P6[8]... P4[8], P4[9], P4[10], P4[11], P8[8]...
vtrn. 8 d9 , d15 @ P1[14], P1[15], P3[14]... P2[14], P2[15], P4[14]...
vtrn. 3 2 d3 , d4 @ P2[0..7], P6[0..7]
vshll. u 8 q10 , d2 , #1 @ 2*P1[0..7]
vtrn. 3 2 d10 , d8 @ P3[0..7], P7[0..7]
vshll. u 8 q11 , d16 , #1 @ 2*P5[0..7]
vtrn. 3 2 d11 , d14 @ P4[0..7], P8[0..7]
vtrn. 1 6 d1 , d9 @ P1[12], P1[13], P1[14], P1[15], P5[12]... P3[12], P3[13], P3[14], P3[15], P7[12]...
vtrn. 1 6 d7 , d15 @ P2[12], P2[13], P2[14], P2[15], P6[12]... P4[12], P4[13], P4[14], P4[15], P8[12]...
vmovl. u 8 q1 , d3 @ P2[0..7]
vmovl. u 8 q12 , d4 @ P6[0..7]
vtrn. 3 2 d5 , d1 @ P1[8..15], P5[8..15]
vtrn. 3 2 d6 , d7 @ P2[8..15], P6[8..15]
vtrn. 3 2 d12 , d9 @ P3[8..15], P7[8..15]
vtrn. 3 2 d13 , d15 @ P4[8..15], P8[8..15]
vmls. i 1 6 q10 , q1 , d0 [ 1 ] @ 2*P1[0..7]-5*P2[0..7]
vmovl. u 8 q1 , d10 @ P3[0..7]
vshll. u 8 q2 , d5 , #1 @ 2*P1[8..15]
vshll. u 8 q13 , d1 , #1 @ 2*P5[8..15]
vmls. i 1 6 q11 , q12 , d0 [ 1 ] @ 2*P5[0..7]-5*P6[0..7]
vmovl. u 8 q14 , d6 @ P2[8..15]
vmovl. u 8 q3 , d7 @ P6[8..15]
vmovl. u 8 q15 , d8 @ P7[0..7]
vmla. i 1 6 q10 , q1 , d0 [ 1 ] @ 2*P1[0..7]-5*P2[0..7]+5*P3[0..7]
vmovl. u 8 q1 , d12 @ P3[8..15]
vmls. i 1 6 q2 , q14 , d0 [ 1 ] @ 2*P1[8..15]-5*P2[8..15]
vmovl. u 8 q4 , d9 @ P7[8..15]
vshll. u 8 q14 , d10 , #1 @ 2*P3[0..7]
vmls. i 1 6 q13 , q3 , d0 [ 1 ] @ 2*P5[8..15]-5*P6[8..15]
vmovl. u 8 q5 , d11 @ P4[0..7]
vmla. i 1 6 q11 , q15 , d0 [ 1 ] @ 2*P5[0..7]-5*P6[0..7]+5*P7[0..7]
vshll. u 8 q15 , d12 , #1 @ 2*P3[8..15]
vmovl. u 8 q6 , d13 @ P4[8..15]
vmla. i 1 6 q2 , q1 , d0 [ 1 ] @ 2*P1[8..15]-5*P2[8..15]+5*P3[8..15]
vmovl. u 8 q1 , d14 @ P8[0..7]
vmovl. u 8 q7 , d15 @ P8[8..15]
vmla. i 1 6 q13 , q4 , d0 [ 1 ] @ 2*P5[8..15]-5*P6[8..15]+5*P7[8..15]
vmovl. u 8 q4 , d16 @ P5[0..7]
vmovl. u 8 q8 , d1 @ P5[8..15]
vmls. i 1 6 q14 , q5 , d0 [ 1 ] @ 2*P3[0..7]-5*P4[0..7]
vmls. i 1 6 q15 , q6 , d0 [ 1 ] @ 2*P3[8..15]-5*P4[8..15]
vmls. i 1 6 q10 , q5 , d0 [ 0 ] @ 2*P1[0..7]-5*P2[0..7]+5*P3[0..7]-2*P4[0..7]
vmls. i 1 6 q11 , q1 , d0 [ 0 ] @ 2*P5[0..7]-5*P6[0..7]+5*P7[0..7]-2*P8[0..7]
vsub. i 1 6 q1 , q5 , q4 @ P4[0..7]-P5[0..7]
vmls. i 1 6 q2 , q6 , d0 [ 0 ] @ 2*P1[8..15]-5*P2[8..15]+5*P3[8..15]-2*P4[8..15]
vrshr. s16 q10 , q10 , #3
vmls. i 1 6 q13 , q7 , d0 [ 0 ] @ 2*P5[8..15]-5*P6[8..15]+5*P7[8..15]-2*P8[8..15]
vsub. i 1 6 q7 , q6 , q8 @ P4[8..15]-P5[8..15]
vrshr. s16 q11 , q11 , #3
vmla. s16 q14 , q4 , d0 [ 1 ] @ 2*P3[0..7]-5*P4[0..7]+5*P5[0..7]
vrshr. s16 q2 , q2 , #3
vmla. i 1 6 q15 , q8 , d0 [ 1 ] @ 2*P3[8..15]-5*P4[8..15]+5*P5[8..15]
vabs. s16 q10 , q10 @ a1[0..7]
vrshr. s16 q13 , q13 , #3
vmls. i 1 6 q15 , q3 , d0 [ 0 ] @ 2*P3[8..15]-5*P4[8..15]+5*P5[8..15]-2*P6[8..15]
vabs. s16 q3 , q11 @ a2[0..7]
vabs. s16 q2 , q2 @ a1[8..15]
vmls. i 1 6 q14 , q12 , d0 [ 0 ] @ 2*P3[0..7]-5*P4[0..7]+5*P5[0..7]-2*P6[0..7]
vabs. s16 q11 , q1
vabs. s16 q12 , q13 @ a2[8..15]
vcge. s16 q13 , q10 , q3 @ test a1[0..7] >= a2[0..7]
vshr. s16 q1 , q1 , #8 @ clip_sign[0..7]
vrshr. s16 q15 , q15 , #3
vshr. s16 q11 , q11 , #1 @ clip[0..7]
vrshr. s16 q14 , q14 , #3
vbsl q13 , q3 , q10 @ a3[0..7]
vcge. s16 q3 , q2 , q12 @ test a1[8..15] >= a2[8.15]
vabs. s16 q10 , q15 @ a0[8..15]
vshr. s16 q15 , q15 , #8 @ a0_sign[8..15]
vbsl q3 , q12 , q2 @ a3[8..15]
vabs. s16 q2 , q14 @ a0[0..7]
vabs. s16 q12 , q7
vshr. s16 q7 , q7 , #8 @ clip_sign[8..15]
vshr. s16 q14 , q14 , #8 @ a0_sign[0..7]
vshr. s16 q12 , q12 , #1 @ clip[8..15]
vsub. i 1 6 q7 , q7 , q15 @ clip_sign[8..15] - a0_sign[8..15]
vqsub. u 1 6 q15 , q10 , q3 @ a0[8..15] >= a3[8..15] ? a0[8..15]-a3[8..15] : 0 (a0 > a3 in all cases where filtering is enabled, so makes more sense to subtract this way round than the opposite and then taking the abs)
vcge. s16 q3 , q3 , q10 @ test a3[8..15] >= a0[8..15]
vcge. s16 q10 , q10 , q9 @ test a0[8..15] >= pq
vcge. s16 q9 , q2 , q9 @ test a0[0..7] >= pq
vsub. i 1 6 q1 , q1 , q14 @ clip_sign[0..7] - a0_sign[0..7]
vqsub. u 1 6 q14 , q2 , q13 @ a0[0..7] >= a3[0..7] ? a0[0..7]-a3[0..7] : 0 (a0 > a3 in all cases where filtering is enabled, so makes more sense to subtract this way round than the opposite and then taking the abs)
vcge. s16 q2 , q13 , q2 @ test a3[0..7] >= a0[0..7]
vmul. i 1 6 q13 , q15 , d0 [ 1 ] @ a0[8..15] >= a3[8..15] ? 5*(a0[8..15]-a3[8..15]) : 0
vceq. i 1 6 q15 , q11 , #0 @ test clip[0..7] == 0
vmul. i 1 6 q0 , q14 , d0 [ 1 ] @ a0[0..7] >= a3[0..7] ? 5*(a0[0..7]-a3[0..7]) : 0
vorr q9 , q15 , q9 @ test clip[0..7] == 0 || a0[0..7] >= pq
vceq. i 1 6 q14 , q12 , #0 @ test clip[8..15] == 0
vshr. u 1 6 q13 , q13 , #3 @ a0[8..15] >= a3[8..15] ? (5*(a0[8..15]-a3[8..15]))>>3 : 0
vorr q2 , q9 , q2 @ test clip[0..7] == 0 || a0[0..7] >= pq || a3[0..7] >= a0[0..7]
vshr. u 1 6 q0 , q0 , #3 @ a0[0..7] >= a3[0..7] ? (5*(a0[0..7]-a3[0..7]))>>3 : 0
vorr q10 , q14 , q10 @ test clip[8..15] == 0 || a0[8..15] >= pq
vcge. s16 q14 , q13 , q12
vmov. 3 2 r2 , d4 [ 1 ] @ move to gp reg
vorr q3 , q10 , q3 @ test clip[8..15] == 0 || a0[8..15] >= pq || a3[8..15] >= a0[8..15]
vmov. 3 2 r3 , d5 [ 1 ]
vcge. s16 q2 , q0 , q11
vbsl q14 , q12 , q13 @ FFMIN(d[8..15], clip[8..15])
vbsl q2 , q11 , q0 @ FFMIN(d[0..7], clip[0..7])
vmov. 3 2 r5 , d6 [ 1 ]
vbic q0 , q14 , q10 @ set each d[8..15] to zero if it should not be filtered because clip[8..15] == 0 || a0[8..15] >= pq (a3 > a0 case already zeroed by saturating sub)
vmov. 3 2 r6 , d7 [ 1 ]
and r12 , r2 , r3
vbic q2 , q2 , q9 @ set each d[0..7] to zero if it should not be filtered because clip[0..7] == 0 || a0[0..7] >= pq (a3 > a0 case already zeroed by saturating sub)
vmls. i 1 6 q6 , q0 , q7 @ invert d[8..15] depending on clip_sign[8..15] & a0_sign[8..15], or zero it if they match, and accumulate into P4
vmls. i 1 6 q5 , q2 , q1 @ invert d[0..7] depending on clip_sign[0..7] & a0_sign[0..7], or zero it if they match, and accumulate into P4
and r14 , r5 , r6
vmla. i 1 6 q4 , q2 , q1 @ invert d[0..7] depending on clip_sign[0..7] & a0_sign[0..7], or zero it if they match, and accumulate into P5
and r12 , r12 , r14
vqmovun. s16 d4 , q6
vmla. i 1 6 q8 , q0 , q7 @ invert d[8..15] depending on clip_sign[8..15] & a0_sign[8..15], or zero it if they match, and accumulate into P5
tst r12 , #1
bne 4 f @ none of the 16 pixel pairs should be updated in this case
vqmovun. s16 d2 , q5
vqmovun. s16 d3 , q4
vqmovun. s16 d5 , q8
tst r2 , #1
bne 1 f
vst2 . 8 { d2 [ 0 ] , d3 [ 0 ] } , [ r0 ] , r1
vst2 . 8 { d2 [ 1 ] , d3 [ 1 ] } , [ r0 ] , r1
vst2 . 8 { d2 [ 2 ] , d3 [ 2 ] } , [ r0 ] , r1
vst2 . 8 { d2 [ 3 ] , d3 [ 3 ] } , [ r0 ]
1 : add r0 , r4 , r1 , l s l #2
tst r3 , #1
bne 2 f
vst2 . 8 { d2 [ 4 ] , d3 [ 4 ] } , [ r4 ] , r1
vst2 . 8 { d2 [ 5 ] , d3 [ 5 ] } , [ r4 ] , r1
vst2 . 8 { d2 [ 6 ] , d3 [ 6 ] } , [ r4 ] , r1
vst2 . 8 { d2 [ 7 ] , d3 [ 7 ] } , [ r4 ]
2 : add r4 , r0 , r1 , l s l #2
tst r5 , #1
bne 3 f
vst2 . 8 { d4 [ 0 ] , d5 [ 0 ] } , [ r0 ] , r1
vst2 . 8 { d4 [ 1 ] , d5 [ 1 ] } , [ r0 ] , r1
vst2 . 8 { d4 [ 2 ] , d5 [ 2 ] } , [ r0 ] , r1
vst2 . 8 { d4 [ 3 ] , d5 [ 3 ] } , [ r0 ]
3 : tst r6 , #1
bne 4 f
vst2 . 8 { d4 [ 4 ] , d5 [ 4 ] } , [ r4 ] , r1
vst2 . 8 { d4 [ 5 ] , d5 [ 5 ] } , [ r4 ] , r1
vst2 . 8 { d4 [ 6 ] , d5 [ 6 ] } , [ r4 ] , r1
vst2 . 8 { d4 [ 7 ] , d5 [ 7 ] } , [ r4 ]
4 : vpop { d8 - d15 }
pop { r4 - r6 ,p c }
endfunc