|
|
|
@ -113,6 +113,116 @@ func ff_vc1_inv_trans_4x4_dc_rvv, zve32x |
|
|
|
|
ret |
|
|
|
|
endfunc |
|
|
|
|
|
|
|
|
|
.variant_cc ff_vc1_inv_trans_8_rvv
|
|
|
|
|
func ff_vc1_inv_trans_8_rvv, zve32x |
|
|
|
|
li t4, 12 |
|
|
|
|
vsll.vi v18, v6, 4 |
|
|
|
|
li t2, 6 |
|
|
|
|
vmul.vx v8, v0, t4 |
|
|
|
|
li t5, 15 |
|
|
|
|
vmul.vx v10, v4, t4 |
|
|
|
|
li t3, 9 |
|
|
|
|
# t[2..5] = [6 9 12 15] |
|
|
|
|
vsll.vi v12, v2, 4 |
|
|
|
|
vmul.vx v14, v6, t2 |
|
|
|
|
vmul.vx v16, v2, t2 |
|
|
|
|
vadd.vv v26, v12, v14 # t3 |
|
|
|
|
vadd.vv v24, v8, v10 # t1 |
|
|
|
|
vsub.vv v25, v8, v10 # t2 |
|
|
|
|
vsub.vv v27, v16, v18 # t4 |
|
|
|
|
vadd.vv v28, v24, v26 # t5 |
|
|
|
|
vsub.vv v31, v24, v26 # t8 |
|
|
|
|
vadd.vv v29, v25, v27 # t6 |
|
|
|
|
vsub.vv v30, v25, v27 # t7 |
|
|
|
|
vsll.vi v20, v1, 4 |
|
|
|
|
vmul.vx v21, v3, t5 |
|
|
|
|
vmul.vx v22, v5, t3 |
|
|
|
|
vsll.vi v23, v7, 2 |
|
|
|
|
vadd.vv v20, v20, v21 |
|
|
|
|
vadd.vv v22, v22, v23 |
|
|
|
|
vsll.vi v21, v3, 2 |
|
|
|
|
vadd.vv v16, v20, v22 # t1 |
|
|
|
|
vmul.vx v20, v1, t5 |
|
|
|
|
vsll.vi v22, v5, 4 |
|
|
|
|
vmul.vx v23, v7, t3 |
|
|
|
|
vsub.vv v20, v20, v21 |
|
|
|
|
vadd.vv v22, v22, v23 |
|
|
|
|
vsll.vi v21, v3, 4 |
|
|
|
|
vsub.vv v17, v20, v22 # t2 |
|
|
|
|
vmul.vx v20, v1, t3 |
|
|
|
|
vsll.vi v22, v5, 2 |
|
|
|
|
vmul.vx v23, v7, t5 |
|
|
|
|
vsub.vv v20, v20, v21 |
|
|
|
|
vadd.vv v22, v22, v23 |
|
|
|
|
vmul.vx v21, v3, t3 |
|
|
|
|
vadd.vv v18, v20, v22 # t3 |
|
|
|
|
vsll.vi v20, v1, 2 |
|
|
|
|
vmul.vx v22, v5, t5 |
|
|
|
|
vsll.vi v23, v7, 4 |
|
|
|
|
vsub.vv v20, v20, v21 |
|
|
|
|
vsub.vv v22, v22, v23 |
|
|
|
|
vadd.vv v0, v28, v16 |
|
|
|
|
vadd.vv v19, v20, v22 # t4 |
|
|
|
|
vadd.vv v1, v29, v17 |
|
|
|
|
vadd.vv v2, v30, v18 |
|
|
|
|
vadd.vv v3, v31, v19 |
|
|
|
|
vsub.vv v4, v31, v19 |
|
|
|
|
vsub.vv v5, v30, v18 |
|
|
|
|
vsub.vv v6, v29, v17 |
|
|
|
|
vsub.vv v7, v28, v16 |
|
|
|
|
jr t0 |
|
|
|
|
endfunc |
|
|
|
|
|
|
|
|
|
func ff_vc1_inv_trans_8x8_rvv, zve32x |
|
|
|
|
csrwi vxrm, 0 |
|
|
|
|
vsetivli zero, 8, e16, m1, ta, ma |
|
|
|
|
addi a1, a0, 1 * 8 * 2 |
|
|
|
|
vle16.v v0, (a0) |
|
|
|
|
addi a2, a0, 2 * 8 * 2 |
|
|
|
|
vle16.v v1, (a1) |
|
|
|
|
addi a3, a0, 3 * 8 * 2 |
|
|
|
|
vle16.v v2, (a2) |
|
|
|
|
addi a4, a0, 4 * 8 * 2 |
|
|
|
|
vle16.v v3, (a3) |
|
|
|
|
addi a5, a0, 5 * 8 * 2 |
|
|
|
|
vle16.v v4, (a4) |
|
|
|
|
addi a6, a0, 6 * 8 * 2 |
|
|
|
|
vle16.v v5, (a5) |
|
|
|
|
addi a7, a0, 7 * 8 * 2 |
|
|
|
|
vle16.v v6, (a6) |
|
|
|
|
vle16.v v7, (a7) |
|
|
|
|
jal t0, ff_vc1_inv_trans_8_rvv |
|
|
|
|
.irp n,0,1,2,3,4,5,6,7 |
|
|
|
|
vssra.vi v\n, v\n, 3 |
|
|
|
|
.endr |
|
|
|
|
vsseg8e16.v v0, (a0) |
|
|
|
|
.irp n,0,1,2,3,4,5,6,7 |
|
|
|
|
vle16.v v\n, (a\n) |
|
|
|
|
.endr |
|
|
|
|
jal t0, ff_vc1_inv_trans_8_rvv |
|
|
|
|
vadd.vi v4, v4, 1 |
|
|
|
|
vadd.vi v5, v5, 1 |
|
|
|
|
vssra.vi v4, v4, 7 |
|
|
|
|
vssra.vi v5, v5, 7 |
|
|
|
|
vse16.v v4, (a4) |
|
|
|
|
vadd.vi v6, v6, 1 |
|
|
|
|
vse16.v v5, (a5) |
|
|
|
|
vadd.vi v7, v7, 1 |
|
|
|
|
vssra.vi v6, v6, 7 |
|
|
|
|
vssra.vi v7, v7, 7 |
|
|
|
|
vse16.v v6, (a6) |
|
|
|
|
vssra.vi v0, v0, 7 |
|
|
|
|
vse16.v v7, (a7) |
|
|
|
|
vssra.vi v1, v1, 7 |
|
|
|
|
vse16.v v0, (a0) |
|
|
|
|
vssra.vi v2, v2, 7 |
|
|
|
|
vse16.v v1, (a1) |
|
|
|
|
vssra.vi v3, v3, 7 |
|
|
|
|
vse16.v v2, (a2) |
|
|
|
|
vse16.v v3, (a3) |
|
|
|
|
ret |
|
|
|
|
endfunc |
|
|
|
|
|
|
|
|
|
.macro mspel_op op pos n1 n2 |
|
|
|
|
add t1, \pos, a2 |
|
|
|
|
v\op\()e8.v v\n1, (\pos) |
|
|
|
|