|
|
|
@ -304,16 +304,23 @@ static void vc1_inv_trans_8x4_altivec(uint8_t *dest, int stride, int16_t *block) |
|
|
|
|
src2 = vec_pack(s2, sA); |
|
|
|
|
src3 = vec_pack(s3, sB); |
|
|
|
|
|
|
|
|
|
#if HAVE_BIGENDIAN |
|
|
|
|
p0 = vec_lvsl (0, dest); |
|
|
|
|
p1 = vec_lvsl (stride, dest); |
|
|
|
|
p = vec_splat_u8 (-1); |
|
|
|
|
perm0 = vec_mergeh (p, p0); |
|
|
|
|
perm1 = vec_mergeh (p, p1); |
|
|
|
|
#define GET_TMP2(dst, p) \ |
|
|
|
|
tmp = vec_ld (0, dest); \
|
|
|
|
|
tmp2 = (vector signed short)vec_perm (tmp, vec_splat_u8(0), p); |
|
|
|
|
#else |
|
|
|
|
#define GET_TMP2(dst,p) \ |
|
|
|
|
tmp = vec_vsx_ld (0, dst); \
|
|
|
|
|
tmp2 = (vector signed short)vec_mergeh (tmp, vec_splat_u8(0)); |
|
|
|
|
#endif |
|
|
|
|
|
|
|
|
|
#define ADD(dest,src,perm) \ |
|
|
|
|
/* *(uint64_t *)&tmp = *(uint64_t *)dest; */ \
|
|
|
|
|
tmp = vec_ld (0, dest); \
|
|
|
|
|
tmp2 = (vector signed short)vec_perm (tmp, vec_splat_u8(0), perm); \
|
|
|
|
|
GET_TMP2(dest, perm); \
|
|
|
|
|
tmp3 = vec_adds (tmp2, src); \
|
|
|
|
|
tmp = vec_packsu (tmp3, tmp3); \
|
|
|
|
|
vec_ste ((vector unsigned int)tmp, 0, (unsigned int *)dest); \
|
|
|
|
|