|
|
|
@ -235,7 +235,7 @@ static void vc1_h_loop_filter16_c(uint8_t *src, int stride, int pq) |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
/* Do inverse transform on 8x8 block */ |
|
|
|
|
static void vc1_inv_trans_8x8_dc_c(uint8_t *dest, int linesize, int16_t *block) |
|
|
|
|
static void vc1_inv_trans_8x8_dc_c(uint8_t *dest, ptrdiff_t stride, int16_t *block) |
|
|
|
|
{ |
|
|
|
|
int i; |
|
|
|
|
int dc = block[0]; |
|
|
|
@ -252,7 +252,7 @@ static void vc1_inv_trans_8x8_dc_c(uint8_t *dest, int linesize, int16_t *block) |
|
|
|
|
dest[5] = av_clip_uint8(dest[5] + dc); |
|
|
|
|
dest[6] = av_clip_uint8(dest[6] + dc); |
|
|
|
|
dest[7] = av_clip_uint8(dest[7] + dc); |
|
|
|
|
dest += linesize; |
|
|
|
|
dest += stride; |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
@ -326,7 +326,7 @@ static void vc1_inv_trans_8x8_c(int16_t block[64]) |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
/* Do inverse transform on 8x4 part of block */ |
|
|
|
|
static void vc1_inv_trans_8x4_dc_c(uint8_t *dest, int linesize, int16_t *block) |
|
|
|
|
static void vc1_inv_trans_8x4_dc_c(uint8_t *dest, ptrdiff_t stride, int16_t *block) |
|
|
|
|
{ |
|
|
|
|
int i; |
|
|
|
|
int dc = block[0]; |
|
|
|
@ -343,11 +343,11 @@ static void vc1_inv_trans_8x4_dc_c(uint8_t *dest, int linesize, int16_t *block) |
|
|
|
|
dest[5] = av_clip_uint8(dest[5] + dc); |
|
|
|
|
dest[6] = av_clip_uint8(dest[6] + dc); |
|
|
|
|
dest[7] = av_clip_uint8(dest[7] + dc); |
|
|
|
|
dest += linesize; |
|
|
|
|
dest += stride; |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static void vc1_inv_trans_8x4_c(uint8_t *dest, int linesize, int16_t *block) |
|
|
|
|
static void vc1_inv_trans_8x4_c(uint8_t *dest, ptrdiff_t stride, int16_t *block) |
|
|
|
|
{ |
|
|
|
|
int i; |
|
|
|
|
register int t1, t2, t3, t4, t5, t6, t7, t8; |
|
|
|
@ -392,10 +392,10 @@ static void vc1_inv_trans_8x4_c(uint8_t *dest, int linesize, int16_t *block) |
|
|
|
|
t3 = 22 * src[ 8] + 10 * src[24]; |
|
|
|
|
t4 = 22 * src[24] - 10 * src[ 8]; |
|
|
|
|
|
|
|
|
|
dest[0 * linesize] = av_clip_uint8(dest[0 * linesize] + ((t1 + t3) >> 7)); |
|
|
|
|
dest[1 * linesize] = av_clip_uint8(dest[1 * linesize] + ((t2 - t4) >> 7)); |
|
|
|
|
dest[2 * linesize] = av_clip_uint8(dest[2 * linesize] + ((t2 + t4) >> 7)); |
|
|
|
|
dest[3 * linesize] = av_clip_uint8(dest[3 * linesize] + ((t1 - t3) >> 7)); |
|
|
|
|
dest[0 * stride] = av_clip_uint8(dest[0 * stride] + ((t1 + t3) >> 7)); |
|
|
|
|
dest[1 * stride] = av_clip_uint8(dest[1 * stride] + ((t2 - t4) >> 7)); |
|
|
|
|
dest[2 * stride] = av_clip_uint8(dest[2 * stride] + ((t2 + t4) >> 7)); |
|
|
|
|
dest[3 * stride] = av_clip_uint8(dest[3 * stride] + ((t1 - t3) >> 7)); |
|
|
|
|
|
|
|
|
|
src++; |
|
|
|
|
dest++; |
|
|
|
@ -403,7 +403,7 @@ static void vc1_inv_trans_8x4_c(uint8_t *dest, int linesize, int16_t *block) |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
/* Do inverse transform on 4x8 parts of block */ |
|
|
|
|
static void vc1_inv_trans_4x8_dc_c(uint8_t *dest, int linesize, int16_t *block) |
|
|
|
|
static void vc1_inv_trans_4x8_dc_c(uint8_t *dest, ptrdiff_t stride, int16_t *block) |
|
|
|
|
{ |
|
|
|
|
int i; |
|
|
|
|
int dc = block[0]; |
|
|
|
@ -416,11 +416,11 @@ static void vc1_inv_trans_4x8_dc_c(uint8_t *dest, int linesize, int16_t *block) |
|
|
|
|
dest[1] = av_clip_uint8(dest[1] + dc); |
|
|
|
|
dest[2] = av_clip_uint8(dest[2] + dc); |
|
|
|
|
dest[3] = av_clip_uint8(dest[3] + dc); |
|
|
|
|
dest += linesize; |
|
|
|
|
dest += stride; |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static void vc1_inv_trans_4x8_c(uint8_t *dest, int linesize, int16_t *block) |
|
|
|
|
static void vc1_inv_trans_4x8_c(uint8_t *dest, ptrdiff_t stride, int16_t *block) |
|
|
|
|
{ |
|
|
|
|
int i; |
|
|
|
|
register int t1, t2, t3, t4, t5, t6, t7, t8; |
|
|
|
@ -461,14 +461,14 @@ static void vc1_inv_trans_4x8_c(uint8_t *dest, int linesize, int16_t *block) |
|
|
|
|
t3 = 9 * src[ 8] - 16 * src[24] + 4 * src[40] + 15 * src[56]; |
|
|
|
|
t4 = 4 * src[ 8] - 9 * src[24] + 15 * src[40] - 16 * src[56]; |
|
|
|
|
|
|
|
|
|
dest[0 * linesize] = av_clip_uint8(dest[0 * linesize] + ((t5 + t1) >> 7)); |
|
|
|
|
dest[1 * linesize] = av_clip_uint8(dest[1 * linesize] + ((t6 + t2) >> 7)); |
|
|
|
|
dest[2 * linesize] = av_clip_uint8(dest[2 * linesize] + ((t7 + t3) >> 7)); |
|
|
|
|
dest[3 * linesize] = av_clip_uint8(dest[3 * linesize] + ((t8 + t4) >> 7)); |
|
|
|
|
dest[4 * linesize] = av_clip_uint8(dest[4 * linesize] + ((t8 - t4 + 1) >> 7)); |
|
|
|
|
dest[5 * linesize] = av_clip_uint8(dest[5 * linesize] + ((t7 - t3 + 1) >> 7)); |
|
|
|
|
dest[6 * linesize] = av_clip_uint8(dest[6 * linesize] + ((t6 - t2 + 1) >> 7)); |
|
|
|
|
dest[7 * linesize] = av_clip_uint8(dest[7 * linesize] + ((t5 - t1 + 1) >> 7)); |
|
|
|
|
dest[0 * stride] = av_clip_uint8(dest[0 * stride] + ((t5 + t1) >> 7)); |
|
|
|
|
dest[1 * stride] = av_clip_uint8(dest[1 * stride] + ((t6 + t2) >> 7)); |
|
|
|
|
dest[2 * stride] = av_clip_uint8(dest[2 * stride] + ((t7 + t3) >> 7)); |
|
|
|
|
dest[3 * stride] = av_clip_uint8(dest[3 * stride] + ((t8 + t4) >> 7)); |
|
|
|
|
dest[4 * stride] = av_clip_uint8(dest[4 * stride] + ((t8 - t4 + 1) >> 7)); |
|
|
|
|
dest[5 * stride] = av_clip_uint8(dest[5 * stride] + ((t7 - t3 + 1) >> 7)); |
|
|
|
|
dest[6 * stride] = av_clip_uint8(dest[6 * stride] + ((t6 - t2 + 1) >> 7)); |
|
|
|
|
dest[7 * stride] = av_clip_uint8(dest[7 * stride] + ((t5 - t1 + 1) >> 7)); |
|
|
|
|
|
|
|
|
|
src++; |
|
|
|
|
dest++; |
|
|
|
@ -476,7 +476,7 @@ static void vc1_inv_trans_4x8_c(uint8_t *dest, int linesize, int16_t *block) |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
/* Do inverse transform on 4x4 part of block */ |
|
|
|
|
static void vc1_inv_trans_4x4_dc_c(uint8_t *dest, int linesize, int16_t *block) |
|
|
|
|
static void vc1_inv_trans_4x4_dc_c(uint8_t *dest, ptrdiff_t stride, int16_t *block) |
|
|
|
|
{ |
|
|
|
|
int i; |
|
|
|
|
int dc = block[0]; |
|
|
|
@ -489,11 +489,11 @@ static void vc1_inv_trans_4x4_dc_c(uint8_t *dest, int linesize, int16_t *block) |
|
|
|
|
dest[1] = av_clip_uint8(dest[1] + dc); |
|
|
|
|
dest[2] = av_clip_uint8(dest[2] + dc); |
|
|
|
|
dest[3] = av_clip_uint8(dest[3] + dc); |
|
|
|
|
dest += linesize; |
|
|
|
|
dest += stride; |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static void vc1_inv_trans_4x4_c(uint8_t *dest, int linesize, int16_t *block) |
|
|
|
|
static void vc1_inv_trans_4x4_c(uint8_t *dest, ptrdiff_t stride, int16_t *block) |
|
|
|
|
{ |
|
|
|
|
int i; |
|
|
|
|
register int t1, t2, t3, t4; |
|
|
|
@ -523,10 +523,10 @@ static void vc1_inv_trans_4x4_c(uint8_t *dest, int linesize, int16_t *block) |
|
|
|
|
t3 = 22 * src[8] + 10 * src[24]; |
|
|
|
|
t4 = 22 * src[24] - 10 * src[8]; |
|
|
|
|
|
|
|
|
|
dest[0 * linesize] = av_clip_uint8(dest[0 * linesize] + ((t1 + t3) >> 7)); |
|
|
|
|
dest[1 * linesize] = av_clip_uint8(dest[1 * linesize] + ((t2 - t4) >> 7)); |
|
|
|
|
dest[2 * linesize] = av_clip_uint8(dest[2 * linesize] + ((t2 + t4) >> 7)); |
|
|
|
|
dest[3 * linesize] = av_clip_uint8(dest[3 * linesize] + ((t1 - t3) >> 7)); |
|
|
|
|
dest[0 * stride] = av_clip_uint8(dest[0 * stride] + ((t1 + t3) >> 7)); |
|
|
|
|
dest[1 * stride] = av_clip_uint8(dest[1 * stride] + ((t2 - t4) >> 7)); |
|
|
|
|
dest[2 * stride] = av_clip_uint8(dest[2 * stride] + ((t2 + t4) >> 7)); |
|
|
|
|
dest[3 * stride] = av_clip_uint8(dest[3 * stride] + ((t1 - t3) >> 7)); |
|
|
|
|
|
|
|
|
|
src++; |
|
|
|
|
dest++; |
|
|
|
|