Use common define for x86_32 and x86_64.

Originally committed as revision 20592 to svn://svn.mplayerhq.hu/mplayer/trunk/libswscale
pull/126/head
Diego Biurrun 18 years ago
parent 3cd52279c9
commit 3d6a30d948
  1. 6
      libswscale/rgb2rgb.c
  2. 6
      libswscale/swscale-example.c
  3. 24
      libswscale/swscale.c
  4. 4
      libswscale/swscale_template.c
  5. 2
      libswscale/yuv2rgb.c

@ -88,7 +88,7 @@ void (*yvu9_to_yuy2)(const uint8_t *src1, const uint8_t *src2, const uint8_t *sr
long srcStride1, long srcStride2, long srcStride1, long srcStride2,
long srcStride3, long dstStride); long srcStride3, long dstStride);
#if defined(ARCH_X86) || defined(ARCH_X86_64) #if defined(ARCH_X86)
static const uint64_t mmx_null __attribute__((aligned(8))) = 0x0000000000000000ULL; static const uint64_t mmx_null __attribute__((aligned(8))) = 0x0000000000000000ULL;
static const uint64_t mmx_one __attribute__((aligned(8))) = 0xFFFFFFFFFFFFFFFFULL; static const uint64_t mmx_one __attribute__((aligned(8))) = 0xFFFFFFFFFFFFFFFFULL;
static const uint64_t mask32b attribute_used __attribute__((aligned(8))) = 0x000000FF000000FFULL; static const uint64_t mask32b attribute_used __attribute__((aligned(8))) = 0x000000FF000000FFULL;
@ -150,7 +150,7 @@ static uint64_t __attribute__((aligned(8))) dither8[2]={
0x0602060206020602LL, 0x0602060206020602LL,
0x0004000400040004LL,}; 0x0004000400040004LL,};
#endif #endif
#endif /* defined(ARCH_X86) || defined(ARCH_X86_64) */ #endif /* defined(ARCH_X86) */
#define RGB2YUV_SHIFT 8 #define RGB2YUV_SHIFT 8
#define BY ((int)( 0.098*(1<<RGB2YUV_SHIFT)+0.5)) #define BY ((int)( 0.098*(1<<RGB2YUV_SHIFT)+0.5))
@ -172,7 +172,7 @@ static uint64_t __attribute__((aligned(8))) dither8[2]={
#define RENAME(a) a ## _C #define RENAME(a) a ## _C
#include "rgb2rgb_template.c" #include "rgb2rgb_template.c"
#if defined(ARCH_X86) || defined(ARCH_X86_64) #if defined(ARCH_X86)
//MMX versions //MMX versions
#undef RENAME #undef RENAME

@ -119,7 +119,7 @@ static int doTest(uint8_t *ref[3], int refStride[3], int w, int h, int srcFormat
sws_scale(dstContext, src, srcStride, 0, srcH, dst, dstStride); sws_scale(dstContext, src, srcStride, 0, srcH, dst, dstStride);
sws_scale(outContext, dst, dstStride, 0, dstH, out, refStride); sws_scale(outContext, dst, dstStride, 0, dstH, out, refStride);
#if defined(ARCH_X86) || defined(ARCH_X86_64) #if defined(ARCH_X86)
asm volatile ("emms\n\t"); asm volatile ("emms\n\t");
#endif #endif
@ -212,14 +212,14 @@ int main(int argc, char **argv){
rgb_data[ x + y*4*W]= random(); rgb_data[ x + y*4*W]= random();
} }
} }
#if defined(ARCH_X86) || defined(ARCH_X86_64) #if defined(ARCH_X86)
sws_rgb2rgb_init(SWS_CPU_CAPS_MMX*0); sws_rgb2rgb_init(SWS_CPU_CAPS_MMX*0);
#else #else
sws_rgb2rgb_init(0); sws_rgb2rgb_init(0);
#endif #endif
sws_scale(sws, rgb_src, rgb_stride, 0, H , src, stride); sws_scale(sws, rgb_src, rgb_stride, 0, H , src, stride);
#if defined(ARCH_X86) || defined(ARCH_X86_64) #if defined(ARCH_X86)
asm volatile ("emms\n\t"); asm volatile ("emms\n\t");
#endif #endif

@ -149,7 +149,7 @@ add BGR4 output support
write special BGR->BGR scaler write special BGR->BGR scaler
*/ */
#if defined(ARCH_X86) || defined(ARCH_X86_64) #if defined(ARCH_X86)
static uint64_t attribute_used __attribute__((aligned(8))) bF8= 0xF8F8F8F8F8F8F8F8LL; static uint64_t attribute_used __attribute__((aligned(8))) bF8= 0xF8F8F8F8F8F8F8F8LL;
static uint64_t attribute_used __attribute__((aligned(8))) bFC= 0xFCFCFCFCFCFCFCFCLL; static uint64_t attribute_used __attribute__((aligned(8))) bFC= 0xFCFCFCFCFCFCFCFCLL;
static uint64_t __attribute__((aligned(8))) w10= 0x0010001000100010LL; static uint64_t __attribute__((aligned(8))) w10= 0x0010001000100010LL;
@ -195,7 +195,7 @@ static const uint64_t bgr2VCoeff attribute_used __attribute__((aligned(8))) = 0
static const uint64_t bgr2YOffset attribute_used __attribute__((aligned(8))) = 0x1010101010101010ULL; static const uint64_t bgr2YOffset attribute_used __attribute__((aligned(8))) = 0x1010101010101010ULL;
static const uint64_t bgr2UVOffset attribute_used __attribute__((aligned(8)))= 0x8080808080808080ULL; static const uint64_t bgr2UVOffset attribute_used __attribute__((aligned(8)))= 0x8080808080808080ULL;
static const uint64_t w1111 attribute_used __attribute__((aligned(8))) = 0x0001000100010001ULL; static const uint64_t w1111 attribute_used __attribute__((aligned(8))) = 0x0001000100010001ULL;
#endif /* defined(ARCH_X86) || defined(ARCH_X86_64) */ #endif /* defined(ARCH_X86) */
// clipping helper table for C implementations: // clipping helper table for C implementations:
static unsigned char clip_table[768]; static unsigned char clip_table[768];
@ -290,7 +290,7 @@ char *sws_format_name(enum PixelFormat format)
} }
} }
#if defined(ARCH_X86) || defined(ARCH_X86_64) #if defined(ARCH_X86)
void in_asm_used_var_warning_killer() void in_asm_used_var_warning_killer()
{ {
volatile int i= bF8+bFC+w10+ volatile int i= bF8+bFC+w10+
@ -813,7 +813,7 @@ static inline void yuv2packedXinC(SwsContext *c, int16_t *lumFilter, int16_t **l
#endif //HAVE_ALTIVEC #endif //HAVE_ALTIVEC
#endif //ARCH_POWERPC #endif //ARCH_POWERPC
#if defined(ARCH_X86) || defined(ARCH_X86_64) #if defined(ARCH_X86)
#if (defined (HAVE_MMX) && !defined (HAVE_3DNOW) && !defined (HAVE_MMX2)) || defined (RUNTIME_CPUDETECT) #if (defined (HAVE_MMX) && !defined (HAVE_3DNOW) && !defined (HAVE_MMX2)) || defined (RUNTIME_CPUDETECT)
#define COMPILE_MMX #define COMPILE_MMX
@ -850,7 +850,7 @@ static inline void yuv2packedXinC(SwsContext *c, int16_t *lumFilter, int16_t **l
#endif #endif
#endif //ARCH_POWERPC #endif //ARCH_POWERPC
#if defined(ARCH_X86) || defined(ARCH_X86_64) #if defined(ARCH_X86)
//X86 versions //X86 versions
/* /*
@ -917,7 +917,7 @@ static inline int initFilter(int16_t **outFilter, int16_t **filterPos, int *outF
int minFilterSize; int minFilterSize;
double *filter=NULL; double *filter=NULL;
double *filter2=NULL; double *filter2=NULL;
#if defined(ARCH_X86) || defined(ARCH_X86_64) #if defined(ARCH_X86)
if(flags & SWS_CPU_CAPS_MMX) if(flags & SWS_CPU_CAPS_MMX)
asm volatile("emms\n\t"::: "memory"); //FIXME this shouldnt be required but it IS (even for non mmx versions) asm volatile("emms\n\t"::: "memory"); //FIXME this shouldnt be required but it IS (even for non mmx versions)
#endif #endif
@ -1471,7 +1471,7 @@ static void globalInit(void){
static SwsFunc getSwsFunc(int flags){ static SwsFunc getSwsFunc(int flags){
#ifdef RUNTIME_CPUDETECT #ifdef RUNTIME_CPUDETECT
#if defined(ARCH_X86) || defined(ARCH_X86_64) #if defined(ARCH_X86)
// ordered per speed fasterst first // ordered per speed fasterst first
if(flags & SWS_CPU_CAPS_MMX2) if(flags & SWS_CPU_CAPS_MMX2)
return swScale_MMX2; return swScale_MMX2;
@ -1490,7 +1490,7 @@ static SwsFunc getSwsFunc(int flags){
return swScale_C; return swScale_C;
#endif #endif
return swScale_C; return swScale_C;
#endif /* defined(ARCH_X86) || defined(ARCH_X86_64) */ #endif /* defined(ARCH_X86) */
#else //RUNTIME_CPUDETECT #else //RUNTIME_CPUDETECT
#ifdef HAVE_MMX2 #ifdef HAVE_MMX2
return swScale_MMX2; return swScale_MMX2;
@ -1943,7 +1943,7 @@ SwsContext *sws_getContext(int srcW, int srcH, int srcFormat, int dstW, int dstH
int unscaled, needsDither; int unscaled, needsDither;
int srcRange, dstRange; int srcRange, dstRange;
SwsFilter dummyFilter= {NULL, NULL, NULL, NULL}; SwsFilter dummyFilter= {NULL, NULL, NULL, NULL};
#if defined(ARCH_X86) || defined(ARCH_X86_64) #if defined(ARCH_X86)
if(flags & SWS_CPU_CAPS_MMX) if(flags & SWS_CPU_CAPS_MMX)
asm volatile("emms\n\t"::: "memory"); asm volatile("emms\n\t"::: "memory");
#endif #endif
@ -2365,7 +2365,7 @@ SwsContext *sws_getContext(int srcW, int srcH, int srcFormat, int dstW, int dstH
} }
else else
{ {
#if defined(ARCH_X86) || defined(ARCH_X86_64) #if defined(ARCH_X86)
MSG_V("SwScaler: using X86-Asm scaler for horizontal scaling\n"); MSG_V("SwScaler: using X86-Asm scaler for horizontal scaling\n");
#else #else
if(flags & SWS_FAST_BILINEAR) if(flags & SWS_FAST_BILINEAR)
@ -2802,7 +2802,7 @@ void sws_freeContext(SwsContext *c){
av_free(c->hChrFilterPos); av_free(c->hChrFilterPos);
c->hChrFilterPos = NULL; c->hChrFilterPos = NULL;
#if defined(ARCH_X86) || defined(ARCH_X86_64) #if defined(ARCH_X86)
#ifdef MAP_ANONYMOUS #ifdef MAP_ANONYMOUS
if(c->funnyYCode) munmap(c->funnyYCode, MAX_FUNNY_CODE_SIZE); if(c->funnyYCode) munmap(c->funnyYCode, MAX_FUNNY_CODE_SIZE);
if(c->funnyUVCode) munmap(c->funnyUVCode, MAX_FUNNY_CODE_SIZE); if(c->funnyUVCode) munmap(c->funnyUVCode, MAX_FUNNY_CODE_SIZE);
@ -2812,7 +2812,7 @@ void sws_freeContext(SwsContext *c){
#endif #endif
c->funnyYCode=NULL; c->funnyYCode=NULL;
c->funnyUVCode=NULL; c->funnyUVCode=NULL;
#endif /* defined(ARCH_X86) || defined(ARCH_X86_64) */ #endif /* defined(ARCH_X86) */
av_free(c->lumMmx2Filter); av_free(c->lumMmx2Filter);
c->lumMmx2Filter=NULL; c->lumMmx2Filter=NULL;

@ -2489,7 +2489,7 @@ static inline void RENAME(hyscale)(uint16_t *dst, long dstWidth, uint8_t *src, i
} }
else // Fast Bilinear upscale / crap downscale else // Fast Bilinear upscale / crap downscale
{ {
#if defined(ARCH_X86) || defined(ARCH_X86_64) #if defined(ARCH_X86)
#ifdef HAVE_MMX2 #ifdef HAVE_MMX2
int i; int i;
#if defined(PIC) #if defined(PIC)
@ -2689,7 +2689,7 @@ inline static void RENAME(hcscale)(uint16_t *dst, long dstWidth, uint8_t *src1,
} }
else // Fast Bilinear upscale / crap downscale else // Fast Bilinear upscale / crap downscale
{ {
#if defined(ARCH_X86) || defined(ARCH_X86_64) #if defined(ARCH_X86)
#ifdef HAVE_MMX2 #ifdef HAVE_MMX2
int i; int i;
#if defined(PIC) #if defined(PIC)

@ -197,7 +197,7 @@ static uint64_t __attribute__((aligned(8))) dither8[2]={
#define RENAME(a) a ## _MMX2 #define RENAME(a) a ## _MMX2
#include "yuv2rgb_template.c" #include "yuv2rgb_template.c"
#endif /* defined(ARCH_X86) || defined(ARCH_X86_64) */ #endif /* defined(ARCH_X86) */
const int32_t Inverse_Table_6_9[8][4] = { const int32_t Inverse_Table_6_9[8][4] = {
{117504, 138453, 13954, 34903}, /* no sequence_display_extension */ {117504, 138453, 13954, 34903}, /* no sequence_display_extension */

Loading…
Cancel
Save