removed unused code - began to merge alpha specific stuff - added mac macros for suitable CPUs

Originally committed as revision 667 to svn://svn.ffmpeg.org/ffmpeg/trunk
pull/126/head
Fabrice Bellard 23 years ago
parent 16300e23d5
commit 412ba501b1
  1. 449
      libavcodec/simple_idct.c

@ -47,61 +47,29 @@
#define COL_SHIFT 20 // 6
#endif
/* 8x8 Matrix used to do a trivial (slow) 8 point IDCT */
static int coeff[64]={
W4, W4, W4, W4, W4, W4, W4, W4,
W1, W3, W5, W7,-W7,-W5,-W3,-W1,
W2, W6,-W6,-W2,-W2,-W6, W6, W2,
W3,-W7,-W1,-W5, W5, W1, W7,-W3,
W4,-W4,-W4, W4, W4,-W4,-W4, W4,
W5,-W1, W7, W3,-W3,-W7, W1,-W5,
W6,-W2, W2,-W6,-W6, W2,-W2, W6,
W7,-W5, W3,-W1, W1,-W3, W5,-W7
};
static inline int idctRowCondZ (int16_t * row)
{
int a0, a1, a2, a3, b0, b1, b2, b3;
#ifdef ARCH_ALPHA
#define FAST_64BIT
#endif
if( !( ((uint32_t*)row)[0]|((uint32_t*)row)[1] |((uint32_t*)row)[2] |((uint32_t*)row)[3])) {
/* row[0] = row[1] = row[2] = row[3] = row[4] =
row[5] = row[6] = row[7] = 0;*/
return 0;
}
#if defined(ARCH_POWERPC_405)
if(!( ((uint32_t*)row)[2] |((uint32_t*)row)[3] )){
a0 = W4*row[0] + W2*row[2] + (1<<(ROW_SHIFT-1));
a1 = W4*row[0] + W6*row[2] + (1<<(ROW_SHIFT-1));
a2 = W4*row[0] - W6*row[2] + (1<<(ROW_SHIFT-1));
a3 = W4*row[0] - W2*row[2] + (1<<(ROW_SHIFT-1));
b0 = W1*row[1] + W3*row[3];
b1 = W3*row[1] - W7*row[3];
b2 = W5*row[1] - W1*row[3];
b3 = W7*row[1] - W5*row[3];
}else{
a0 = W4*row[0] + W2*row[2] + W4*row[4] + W6*row[6] + (1<<(ROW_SHIFT-1));
a1 = W4*row[0] + W6*row[2] - W4*row[4] - W2*row[6] + (1<<(ROW_SHIFT-1));
a2 = W4*row[0] - W6*row[2] - W4*row[4] + W2*row[6] + (1<<(ROW_SHIFT-1));
a3 = W4*row[0] - W2*row[2] + W4*row[4] - W6*row[6] + (1<<(ROW_SHIFT-1));
b0 = W1*row[1] + W3*row[3] + W5*row[5] + W7*row[7];
b1 = W3*row[1] - W7*row[3] - W1*row[5] - W5*row[7];
b2 = W5*row[1] - W1*row[3] + W7*row[5] + W3*row[7];
b3 = W7*row[1] - W5*row[3] + W3*row[5] - W1*row[7];
}
/* signed 16x16 -> 32 multiply add accumulate */
#define MAC16(rt, ra, rb) \
asm ("maclhw %0, %2, %3" : "=r" (rt) : "0" (rt), "r" (ra), "r" (rb));
row[0] = (a0 + b0) >> ROW_SHIFT;
row[1] = (a1 + b1) >> ROW_SHIFT;
row[2] = (a2 + b2) >> ROW_SHIFT;
row[3] = (a3 + b3) >> ROW_SHIFT;
row[4] = (a3 - b3) >> ROW_SHIFT;
row[5] = (a2 - b2) >> ROW_SHIFT;
row[6] = (a1 - b1) >> ROW_SHIFT;
row[7] = (a0 - b0) >> ROW_SHIFT;
/* signed 16x16 -> 32 multiply */
#define MUL16(rt, ra, rb) \
asm ("mullhw %0, %1, %2" : "=r" (rt) : "r" (ra), "r" (rb));
return 1;
}
#else
/* signed 16x16 -> 32 multiply add accumulate */
#define MAC16(rt, ra, rb) rt += (ra) * (rb)
/* signed 16x16 -> 32 multiply */
#define MUL16(rt, ra, rb) rt = (ra) * (rb)
#endif
#ifdef ARCH_ALPHA
/* 0: all entries 0, 1: only first entry nonzero, 2: otherwise */
@ -129,10 +97,10 @@ static inline int idctRowCondDC(int16_t *row)
}
}
a0 = W4 * row[0];
a1 = W4 * row[0];
a2 = W4 * row[0];
a3 = W4 * row[0];
a0 = (W4 * row[0]) + (1 << (ROW_SHIFT - 1));
a1 = a0;
a2 = a0;
a3 = a0;
if (row[2]) {
a0 += W2 * row[2];
@ -155,11 +123,6 @@ static inline int idctRowCondDC(int16_t *row)
a3 -= W6 * row[6];
}
a0 += 1 << (ROW_SHIFT - 1);
a1 += 1 << (ROW_SHIFT - 1);
a2 += 1 << (ROW_SHIFT - 1);
a3 += 1 << (ROW_SHIFT - 1);
if (row[1]) {
b0 = W1 * row[1];
b1 = W3 * row[1];
@ -205,38 +168,86 @@ static inline int idctRowCondDC(int16_t *row)
return 2;
}
#else /* not ARCH_ALPHA */
static inline int idctRowCondDC (int16_t * row)
static inline void idctRowCondDC (int16_t * row)
{
int a0, a1, a2, a3, b0, b1, b2, b3;
#ifdef FAST_64BIT
uint64_t temp;
#else
uint32_t temp;
#endif
if( !( ((uint32_t*)row)[1] |((uint32_t*)row)[2] |((uint32_t*)row)[3]| row[1])) {
// row[0] = row[1] = row[2] = row[3] = row[4] = row[5] = row[6] = row[7] = row[0]<<3;
uint16_t temp= row[0]<<3;
((uint32_t*)row)[0]=((uint32_t*)row)[1]=
((uint32_t*)row)[2]=((uint32_t*)row)[3]= temp + (temp<<16);
return 0;
#ifdef FAST_64BIT
#ifdef WORDS_BIGENDIAN
#define ROW0_MASK 0xffff000000000000LL
#else
#define ROW0_MASK 0xffffLL
#endif
if ( ((((uint64_t *)row)[0] & ~ROW0_MASK) |
((uint64_t *)row)[1]) == 0) {
temp = (row[0] << 3) & 0xffff;
temp += temp << 16;
temp += temp << 32;
((uint64_t *)row)[0] = temp;
((uint64_t *)row)[1] = temp;
return;
}
#else
if (!(((uint32_t*)row)[1] |
((uint32_t*)row)[2] |
((uint32_t*)row)[3] |
row[1])) {
temp = (row[0] << 3) & 0xffff;
temp += temp << 16;
((uint32_t*)row)[0]=((uint32_t*)row)[1] =
((uint32_t*)row)[2]=((uint32_t*)row)[3] = temp;
return;
}
#endif
a0 = (W4 * row[0]) + (1 << (ROW_SHIFT - 1));
a1 = a0;
a2 = a0;
a3 = a0;
if(!( ((uint32_t*)row)[2] |((uint32_t*)row)[3] )){
a0 = W4*row[0] + W2*row[2] + (1<<(ROW_SHIFT-1));
a1 = W4*row[0] + W6*row[2] + (1<<(ROW_SHIFT-1));
a2 = W4*row[0] - W6*row[2] + (1<<(ROW_SHIFT-1));
a3 = W4*row[0] - W2*row[2] + (1<<(ROW_SHIFT-1));
b0 = W1*row[1] + W3*row[3];
b1 = W3*row[1] - W7*row[3];
b2 = W5*row[1] - W1*row[3];
b3 = W7*row[1] - W5*row[3];
}else{
a0 = W4*row[0] + W2*row[2] + W4*row[4] + W6*row[6] + (1<<(ROW_SHIFT-1));
a1 = W4*row[0] + W6*row[2] - W4*row[4] - W2*row[6] + (1<<(ROW_SHIFT-1));
a2 = W4*row[0] - W6*row[2] - W4*row[4] + W2*row[6] + (1<<(ROW_SHIFT-1));
a3 = W4*row[0] - W2*row[2] + W4*row[4] - W6*row[6] + (1<<(ROW_SHIFT-1));
b0 = W1*row[1] + W3*row[3] + W5*row[5] + W7*row[7];
b1 = W3*row[1] - W7*row[3] - W1*row[5] - W5*row[7];
b2 = W5*row[1] - W1*row[3] + W7*row[5] + W3*row[7];
b3 = W7*row[1] - W5*row[3] + W3*row[5] - W1*row[7];
/* no need to optimize : gcc does it */
a0 += W2 * row[2];
a1 += W6 * row[2];
a2 -= W6 * row[2];
a3 -= W2 * row[2];
MUL16(b0, W1, row[1]);
MAC16(b0, W3, row[3]);
MUL16(b1, W3, row[1]);
MAC16(b1, -W7, row[3]);
MUL16(b2, W5, row[1]);
MAC16(b2, -W1, row[3]);
MUL16(b3, W7, row[1]);
MAC16(b3, -W5, row[3]);
#ifdef FAST_64BIT
temp = ((uint64_t*)row)[1];
#else
temp = ((uint32_t*)row)[2] | ((uint32_t*)row)[3];
#endif
if (temp != 0) {
a0 += W4*row[4] + W6*row[6];
a1 += - W4*row[4] - W2*row[6];
a2 += - W4*row[4] + W2*row[6];
a3 += W4*row[4] - W6*row[6];
MAC16(b0, W5, row[5]);
MAC16(b0, W7, row[7]);
MAC16(b1, -W1, row[5]);
MAC16(b1, -W5, row[7]);
MAC16(b2, W7, row[5]);
MAC16(b2, W3, row[7]);
MAC16(b3, W3, row[5]);
MAC16(b3, -W1, row[7]);
}
row[0] = (a0 + b0) >> ROW_SHIFT;
@ -247,146 +258,33 @@ static inline int idctRowCondDC (int16_t * row)
row[5] = (a2 - b2) >> ROW_SHIFT;
row[3] = (a3 + b3) >> ROW_SHIFT;
row[4] = (a3 - b3) >> ROW_SHIFT;
return 1;
}
#endif /* not ARCH_ALPHA */
static inline void idctCol (int16_t * col)
{
/*
if( !(col[8*1] | col[8*2] |col[8*3] |col[8*4] |col[8*5] |col[8*6] | col[8*7])) {
col[8*0] = col[8*1] = col[8*2] = col[8*3] = col[8*4] =
col[8*5] = col[8*6] = col[8*7] = col[8*0]<<3;
return;
}*/
int a0, a1, a2, a3, b0, b1, b2, b3;
col[0] += (1<<(COL_SHIFT-1))/W4;
a0 = W4*col[8*0] + W2*col[8*2] + W4*col[8*4] + W6*col[8*6];
a1 = W4*col[8*0] + W6*col[8*2] - W4*col[8*4] - W2*col[8*6];
a2 = W4*col[8*0] - W6*col[8*2] - W4*col[8*4] + W2*col[8*6];
a3 = W4*col[8*0] - W2*col[8*2] + W4*col[8*4] - W6*col[8*6];
b0 = W1*col[8*1] + W3*col[8*3] + W5*col[8*5] + W7*col[8*7];
b1 = W3*col[8*1] - W7*col[8*3] - W1*col[8*5] - W5*col[8*7];
b2 = W5*col[8*1] - W1*col[8*3] + W7*col[8*5] + W3*col[8*7];
b3 = W7*col[8*1] - W5*col[8*3] + W3*col[8*5] - W1*col[8*7];
col[8*0] = (a0 + b0) >> COL_SHIFT;
col[8*7] = (a0 - b0) >> COL_SHIFT;
col[8*1] = (a1 + b1) >> COL_SHIFT;
col[8*6] = (a1 - b1) >> COL_SHIFT;
col[8*2] = (a2 + b2) >> COL_SHIFT;
col[8*5] = (a2 - b2) >> COL_SHIFT;
col[8*3] = (a3 + b3) >> COL_SHIFT;
col[8*4] = (a3 - b3) >> COL_SHIFT;
}
static inline void idctSparseCol (int16_t * col)
{
int a0, a1, a2, a3, b0, b1, b2, b3;
col[0] += (1<<(COL_SHIFT-1))/W4;
a0 = W4*col[8*0];
a1 = W4*col[8*0];
a2 = W4*col[8*0];
a3 = W4*col[8*0];
if(col[8*2]){
/* XXX: I did that only to give same values as previous code */
a0 = W4 * (col[8*0] + ((1<<(COL_SHIFT-1))/W4));
a1 = a0;
a2 = a0;
a3 = a0;
a0 += + W2*col[8*2];
a1 += + W6*col[8*2];
a2 += - W6*col[8*2];
a3 += - W2*col[8*2];
}
if(col[8*4]){
a0 += + W4*col[8*4];
a1 += - W4*col[8*4];
a2 += - W4*col[8*4];
a3 += + W4*col[8*4];
}
if(col[8*6]){
a0 += + W6*col[8*6];
a1 += - W2*col[8*6];
a2 += + W2*col[8*6];
a3 += - W6*col[8*6];
}
MUL16(b0, W1, col[8*1]);
MUL16(b1, W3, col[8*1]);
MUL16(b2, W5, col[8*1]);
MUL16(b3, W7, col[8*1]);
if(col[8*1]){
b0 = W1*col[8*1];
b1 = W3*col[8*1];
b2 = W5*col[8*1];
b3 = W7*col[8*1];
}else{
b0 =
b1 =
b2 =
b3 = 0;
}
if(col[8*3]){
b0 += + W3*col[8*3];
b1 += - W7*col[8*3];
b2 += - W1*col[8*3];
b3 += - W5*col[8*3];
}
if(col[8*5]){
b0 += + W5*col[8*5];
b1 += - W1*col[8*5];
b2 += + W7*col[8*5];
b3 += + W3*col[8*5];
}
if(col[8*7]){
b0 += + W7*col[8*7];
b1 += - W5*col[8*7];
b2 += + W3*col[8*7];
b3 += - W1*col[8*7];
}
#ifndef ARCH_ALPHA
if(!(b0|b1|b2|b3)){
col[8*0] = (a0) >> COL_SHIFT;
col[8*7] = (a0) >> COL_SHIFT;
col[8*1] = (a1) >> COL_SHIFT;
col[8*6] = (a1) >> COL_SHIFT;
col[8*2] = (a2) >> COL_SHIFT;
col[8*5] = (a2) >> COL_SHIFT;
col[8*3] = (a3) >> COL_SHIFT;
col[8*4] = (a3) >> COL_SHIFT;
}else{
#endif
col[8*0] = (a0 + b0) >> COL_SHIFT;
col[8*7] = (a0 - b0) >> COL_SHIFT;
col[8*1] = (a1 + b1) >> COL_SHIFT;
col[8*6] = (a1 - b1) >> COL_SHIFT;
col[8*2] = (a2 + b2) >> COL_SHIFT;
col[8*5] = (a2 - b2) >> COL_SHIFT;
col[8*3] = (a3 + b3) >> COL_SHIFT;
col[8*4] = (a3 - b3) >> COL_SHIFT;
#ifndef ARCH_ALPHA
}
#endif
}
static inline void idctSparse2Col (int16_t * col)
{
int a0, a1, a2, a3, b0, b1, b2, b3;
col[0] += (1<<(COL_SHIFT-1))/W4;
a0 = W4*col[8*0];
a1 = W4*col[8*0];
a2 = W4*col[8*0];
a3 = W4*col[8*0];
if(col[8*2]){
a0 += + W2*col[8*2];
a1 += + W6*col[8*2];
a2 += - W6*col[8*2];
a3 += - W2*col[8*2];
}
MAC16(b0, + W3, col[8*3]);
MAC16(b1, - W7, col[8*3]);
MAC16(b2, - W1, col[8*3]);
MAC16(b3, - W5, col[8*3]);
if(col[8*4]){
a0 += + W4*col[8*4];
@ -395,6 +293,13 @@ static inline void idctSparse2Col (int16_t * col)
a3 += + W4*col[8*4];
}
if (col[8*5]) {
MAC16(b0, + W5, col[8*5]);
MAC16(b1, - W1, col[8*5]);
MAC16(b2, + W7, col[8*5]);
MAC16(b3, + W3, col[8*5]);
}
if(col[8*6]){
a0 += + W6*col[8*6];
a1 += - W2*col[8*6];
@ -402,37 +307,11 @@ static inline void idctSparse2Col (int16_t * col)
a3 += - W6*col[8*6];
}
if(col[8*1] || 1){
b0 = W1*col[8*1];
b1 = W3*col[8*1];
b2 = W5*col[8*1];
b3 = W7*col[8*1];
}else{
b0 =
b1 =
b2 =
b3 = 0;
}
if(col[8*3]){
b0 += + W3*col[8*3];
b1 += - W7*col[8*3];
b2 += - W1*col[8*3];
b3 += - W5*col[8*3];
}
if(col[8*5]){
b0 += + W5*col[8*5];
b1 += - W1*col[8*5];
b2 += + W7*col[8*5];
b3 += + W3*col[8*5];
}
if(col[8*7]){
b0 += + W7*col[8*7];
b1 += - W5*col[8*7];
b2 += + W3*col[8*7];
b3 += - W1*col[8*7];
if (col[8*7]) {
MAC16(b0, + W7, col[8*7]);
MAC16(b1, - W5, col[8*7]);
MAC16(b2, + W3, col[8*7]);
MAC16(b3, - W1, col[8*7]);
}
col[8*0] = (a0 + b0) >> COL_SHIFT;
@ -472,82 +351,11 @@ static inline void idctCol2(int16_t *col)
lcol[12] = l; lcol[13] = r;
lcol[14] = l; lcol[15] = r;
}
#endif
void simple_idct (short *block)
{
int i;
#if 0
int nonZero[8];
int buffer[64];
int nNonZero=0;
idctRowCondDC(block);
for(i=1; i<8; i++)
{
nonZero[nNonZero]=i;
nNonZero+= idctRowCondZ(block + i*8);
}
if(nNonZero==0)
{
for(i=0; i<8; i++)
{
block[i ]=
block[i+8 ]=
block[i+16]=
block[i+24]=
block[i+32]=
block[i+40]=
block[i+48]=
block[i+56]= (W4*block[i] + (1<<(COL_SHIFT-1))) >> COL_SHIFT;
}
}
else if(nNonZero==1)
{
int index= nonZero[0]*8;
for(i=0; i<8; i++)
{
int bias= W4*block[i] + (1<<(COL_SHIFT-1));
int c= block[i + index];
block[i ]= (c*coeff[index ] + bias) >> COL_SHIFT;
block[i+8 ]= (c*coeff[index+1] + bias) >> COL_SHIFT;
block[i+16]= (c*coeff[index+2] + bias) >> COL_SHIFT;
block[i+24]= (c*coeff[index+3] + bias) >> COL_SHIFT;
block[i+32]= (c*coeff[index+4] + bias) >> COL_SHIFT;
block[i+40]= (c*coeff[index+5] + bias) >> COL_SHIFT;
block[i+48]= (c*coeff[index+6] + bias) >> COL_SHIFT;
block[i+56]= (c*coeff[index+7] + bias) >> COL_SHIFT;
}
}
/* else if(nNonZero==2)
{
int index1= nonZero[0]*8;
int index2= nonZero[1]*8;
for(i=0; i<8; i++)
{
int bias= W4*block[i] + (1<<(COL_SHIFT-1));
int c1= block[i + index1];
int c2= block[i + index2];
block[i ]= (c1*coeff[index1 ] + c2*coeff[index2 ] + bias) >> COL_SHIFT;
block[i+8 ]= (c1*coeff[index1+1] + c2*coeff[index2+1] + bias) >> COL_SHIFT;
block[i+16]= (c1*coeff[index1+2] + c2*coeff[index2+2] + bias) >> COL_SHIFT;
block[i+24]= (c1*coeff[index1+3] + c2*coeff[index2+3] + bias) >> COL_SHIFT;
block[i+32]= (c1*coeff[index1+4] + c2*coeff[index2+4] + bias) >> COL_SHIFT;
block[i+40]= (c1*coeff[index1+5] + c2*coeff[index2+5] + bias) >> COL_SHIFT;
block[i+48]= (c1*coeff[index1+6] + c2*coeff[index2+6] + bias) >> COL_SHIFT;
block[i+56]= (c1*coeff[index1+7] + c2*coeff[index2+7] + bias) >> COL_SHIFT;
}
}*/
else
{
for(i=0; i<8; i++)
idctSparse2Col(block + i);
}
#elif defined(ARCH_ALPHA)
int rowsZero = 1; /* all rows except row 0 zero */
int rowsConstant = 1; /* all rows consist of a constant value */
@ -579,13 +387,20 @@ void simple_idct (short *block)
for (i = 0; i < 8; i++)
idctSparseCol(block + i);
}
}
#else
void simple_idct (short *block)
{
int i;
for(i=0; i<8; i++)
idctRowCondDC(block + i*8);
for(i=0; i<8; i++)
idctSparseCol(block + i);
#endif
}
#endif
#undef COL_SHIFT

Loading…
Cancel
Save