|
|
|
@ -39,24 +39,22 @@ static void get_pixels_altivec(int16_t *restrict block, const uint8_t *pixels, |
|
|
|
|
int line_size) |
|
|
|
|
{ |
|
|
|
|
int i; |
|
|
|
|
vector unsigned char perm = vec_lvsl(0, pixels); |
|
|
|
|
const vector unsigned char zero = |
|
|
|
|
(const vector unsigned char) vec_splat_u8(0); |
|
|
|
|
vec_u8 perm = vec_lvsl(0, pixels); |
|
|
|
|
const vec_u8 zero = (const vec_u8)vec_splat_u8(0); |
|
|
|
|
|
|
|
|
|
for (i = 0; i < 8; i++) { |
|
|
|
|
/* Read potentially unaligned pixels.
|
|
|
|
|
* We're reading 16 pixels, and actually only want 8, |
|
|
|
|
* but we simply ignore the extras. */ |
|
|
|
|
vector unsigned char pixl = vec_ld(0, pixels); |
|
|
|
|
vector unsigned char pixr = vec_ld(7, pixels); |
|
|
|
|
vector unsigned char bytes = vec_perm(pixl, pixr, perm); |
|
|
|
|
vec_u8 pixl = vec_ld(0, pixels); |
|
|
|
|
vec_u8 pixr = vec_ld(7, pixels); |
|
|
|
|
vec_u8 bytes = vec_perm(pixl, pixr, perm); |
|
|
|
|
|
|
|
|
|
// Convert the bytes into shorts.
|
|
|
|
|
vector signed short shorts = (vector signed short) vec_mergeh(zero, |
|
|
|
|
bytes); |
|
|
|
|
vec_s16 shorts = (vec_s16)vec_mergeh(zero, bytes); |
|
|
|
|
|
|
|
|
|
// Save the data to the block, we assume the block is 16-byte aligned.
|
|
|
|
|
vec_st(shorts, i * 16, (vector signed short *) block); |
|
|
|
|
vec_st(shorts, i * 16, (vec_s16 *)block); |
|
|
|
|
|
|
|
|
|
pixels += line_size; |
|
|
|
|
} |
|
|
|
@ -66,22 +64,21 @@ static void diff_pixels_altivec(int16_t *restrict block, const uint8_t *s1, |
|
|
|
|
const uint8_t *s2, int stride) |
|
|
|
|
{ |
|
|
|
|
int i; |
|
|
|
|
vector unsigned char perm1 = vec_lvsl(0, s1); |
|
|
|
|
vector unsigned char perm2 = vec_lvsl(0, s2); |
|
|
|
|
const vector unsigned char zero = |
|
|
|
|
(const vector unsigned char) vec_splat_u8(0); |
|
|
|
|
vector signed short shorts1, shorts2; |
|
|
|
|
vec_u8 perm1 = vec_lvsl(0, s1); |
|
|
|
|
vec_u8 perm2 = vec_lvsl(0, s2); |
|
|
|
|
const vec_u8 zero = (const vec_u8)vec_splat_u8(0); |
|
|
|
|
vec_s16 shorts1, shorts2; |
|
|
|
|
|
|
|
|
|
for (i = 0; i < 4; i++) { |
|
|
|
|
/* Read potentially unaligned pixels.
|
|
|
|
|
* We're reading 16 pixels, and actually only want 8, |
|
|
|
|
* but we simply ignore the extras. */ |
|
|
|
|
vector unsigned char pixl = vec_ld(0, s1); |
|
|
|
|
vector unsigned char pixr = vec_ld(15, s1); |
|
|
|
|
vector unsigned char bytes = vec_perm(pixl, pixr, perm1); |
|
|
|
|
vec_u8 pixl = vec_ld(0, s1); |
|
|
|
|
vec_u8 pixr = vec_ld(15, s1); |
|
|
|
|
vec_u8 bytes = vec_perm(pixl, pixr, perm1); |
|
|
|
|
|
|
|
|
|
// Convert the bytes into shorts.
|
|
|
|
|
shorts1 = (vector signed short) vec_mergeh(zero, bytes); |
|
|
|
|
shorts1 = (vec_s16)vec_mergeh(zero, bytes); |
|
|
|
|
|
|
|
|
|
// Do the same for the second block of pixels.
|
|
|
|
|
pixl = vec_ld(0, s2); |
|
|
|
@ -89,13 +86,13 @@ static void diff_pixels_altivec(int16_t *restrict block, const uint8_t *s1, |
|
|
|
|
bytes = vec_perm(pixl, pixr, perm2); |
|
|
|
|
|
|
|
|
|
// Convert the bytes into shorts.
|
|
|
|
|
shorts2 = (vector signed short) vec_mergeh(zero, bytes); |
|
|
|
|
shorts2 = (vec_s16)vec_mergeh(zero, bytes); |
|
|
|
|
|
|
|
|
|
// Do the subtraction.
|
|
|
|
|
shorts1 = vec_sub(shorts1, shorts2); |
|
|
|
|
|
|
|
|
|
// Save the data to the block, we assume the block is 16-byte aligned.
|
|
|
|
|
vec_st(shorts1, 0, (vector signed short *) block); |
|
|
|
|
vec_st(shorts1, 0, (vec_s16 *)block); |
|
|
|
|
|
|
|
|
|
s1 += stride; |
|
|
|
|
s2 += stride; |
|
|
|
@ -112,7 +109,7 @@ static void diff_pixels_altivec(int16_t *restrict block, const uint8_t *s1, |
|
|
|
|
bytes = vec_perm(pixl, pixr, perm1); |
|
|
|
|
|
|
|
|
|
// Convert the bytes into shorts.
|
|
|
|
|
shorts1 = (vector signed short) vec_mergeh(zero, bytes); |
|
|
|
|
shorts1 = (vec_s16)vec_mergeh(zero, bytes); |
|
|
|
|
|
|
|
|
|
// Do the same for the second block of pixels.
|
|
|
|
|
pixl = vec_ld(0, s2); |
|
|
|
@ -120,13 +117,13 @@ static void diff_pixels_altivec(int16_t *restrict block, const uint8_t *s1, |
|
|
|
|
bytes = vec_perm(pixl, pixr, perm2); |
|
|
|
|
|
|
|
|
|
// Convert the bytes into shorts.
|
|
|
|
|
shorts2 = (vector signed short) vec_mergeh(zero, bytes); |
|
|
|
|
shorts2 = (vec_s16)vec_mergeh(zero, bytes); |
|
|
|
|
|
|
|
|
|
// Do the subtraction.
|
|
|
|
|
shorts1 = vec_sub(shorts1, shorts2); |
|
|
|
|
|
|
|
|
|
// Save the data to the block, we assume the block is 16-byte aligned.
|
|
|
|
|
vec_st(shorts1, 0, (vector signed short *) block); |
|
|
|
|
vec_st(shorts1, 0, (vec_s16 *)block); |
|
|
|
|
|
|
|
|
|
s1 += stride; |
|
|
|
|
s2 += stride; |
|
|
|
|