Use common define for x86_32 and x86_64.

Originally committed as revision 6859 to svn://svn.ffmpeg.org/ffmpeg/trunk
pull/126/head
Diego Biurrun 18 years ago
parent a63e5f1cce
commit 3cd52279c9
  1. 12
      libavcodec/bitstream.h
  2. 2
      libavcodec/msmpeg4.c
  3. 4
      libavutil/bswap.h
  4. 2
      libavutil/common.h
  5. 4
      libavutil/internal.h
  6. 10
      libpostproc/postprocess.c

@ -47,7 +47,7 @@
extern const uint8_t ff_reverse[256]; extern const uint8_t ff_reverse[256];
#if defined(ARCH_X86) || defined(ARCH_X86_64) #if defined(ARCH_X86)
// avoid +32 for shift optimization (gcc should do that ...) // avoid +32 for shift optimization (gcc should do that ...)
static inline int32_t NEG_SSR32( int32_t a, int8_t s){ static inline int32_t NEG_SSR32( int32_t a, int8_t s){
asm ("sarl %1, %0\n\t" asm ("sarl %1, %0\n\t"
@ -171,7 +171,7 @@ typedef struct RL_VLC_ELEM {
#endif #endif
/* used to avoid missaligned exceptions on some archs (alpha, ...) */ /* used to avoid missaligned exceptions on some archs (alpha, ...) */
#if defined(ARCH_X86) || defined(ARCH_X86_64) #if defined(ARCH_X86)
# define unaligned16(a) (*(const uint16_t*)(a)) # define unaligned16(a) (*(const uint16_t*)(a))
# define unaligned32(a) (*(const uint32_t*)(a)) # define unaligned32(a) (*(const uint32_t*)(a))
# define unaligned64(a) (*(const uint64_t*)(a)) # define unaligned64(a) (*(const uint64_t*)(a))
@ -200,7 +200,7 @@ unaligned(16)
unaligned(32) unaligned(32)
unaligned(64) unaligned(64)
#undef unaligned #undef unaligned
#endif /* defined(ARCH_X86) || defined(ARCH_X86_64) */ #endif /* defined(ARCH_X86) */
#ifndef ALT_BITSTREAM_WRITER #ifndef ALT_BITSTREAM_WRITER
static inline void put_bits(PutBitContext *s, int n, unsigned int value) static inline void put_bits(PutBitContext *s, int n, unsigned int value)
@ -247,7 +247,7 @@ static inline void put_bits(PutBitContext *s, int n, unsigned int value)
static inline void put_bits(PutBitContext *s, int n, unsigned int value) static inline void put_bits(PutBitContext *s, int n, unsigned int value)
{ {
# ifdef ALIGNED_BITSTREAM_WRITER # ifdef ALIGNED_BITSTREAM_WRITER
# if defined(ARCH_X86) || defined(ARCH_X86_64) # if defined(ARCH_X86)
asm volatile( asm volatile(
"movl %0, %%ecx \n\t" "movl %0, %%ecx \n\t"
"xorl %%eax, %%eax \n\t" "xorl %%eax, %%eax \n\t"
@ -278,7 +278,7 @@ static inline void put_bits(PutBitContext *s, int n, unsigned int value)
s->index= index; s->index= index;
# endif # endif
# else //ALIGNED_BITSTREAM_WRITER # else //ALIGNED_BITSTREAM_WRITER
# if defined(ARCH_X86) || defined(ARCH_X86_64) # if defined(ARCH_X86)
asm volatile( asm volatile(
"movl $7, %%ecx \n\t" "movl $7, %%ecx \n\t"
"andl %0, %%ecx \n\t" "andl %0, %%ecx \n\t"
@ -580,7 +580,7 @@ static inline void skip_bits_long(GetBitContext *s, int n){
name##_bit_count-= 32;\ name##_bit_count-= 32;\
}\ }\
#if defined(ARCH_X86) || defined(ARCH_X86_64) #if defined(ARCH_X86)
# define SKIP_CACHE(name, gb, num)\ # define SKIP_CACHE(name, gb, num)\
asm(\ asm(\
"shldl %2, %1, %0 \n\t"\ "shldl %2, %1, %0 \n\t"\

@ -659,7 +659,7 @@ static inline int msmpeg4_pred_dc(MpegEncContext * s, int n,
necessitate to modify mpegvideo.c. The problem comes from the necessitate to modify mpegvideo.c. The problem comes from the
fact they decided to store the quantized DC (which would lead fact they decided to store the quantized DC (which would lead
to problems if Q could vary !) */ to problems if Q could vary !) */
#if (defined(ARCH_X86) || defined(ARCH_X86_64)) && !defined PIC #if (defined(ARCH_X86)) && !defined PIC
asm volatile( asm volatile(
"movl %3, %%eax \n\t" "movl %3, %%eax \n\t"
"shrl $1, %%eax \n\t" "shrl $1, %%eax \n\t"

@ -36,7 +36,7 @@
# define LEGACY_REGS "=q" # define LEGACY_REGS "=q"
#endif #endif
#if defined(ARCH_X86) || defined(ARCH_X86_64) #if defined(ARCH_X86)
static always_inline uint16_t bswap_16(uint16_t x) static always_inline uint16_t bswap_16(uint16_t x)
{ {
__asm("rorw $8, %0" : __asm("rorw $8, %0" :
@ -149,7 +149,7 @@ static inline uint64_t bswap_64(uint64_t x)
return r.ll; return r.ll;
#endif #endif
} }
#endif /* defined(ARCH_X86) || defined(ARCH_X86_64) */ #endif /* defined(ARCH_X86) */
#endif /* !HAVE_BYTESWAP_H */ #endif /* !HAVE_BYTESWAP_H */

@ -334,7 +334,7 @@ static inline int ff_get_fourcc(const char *s){
}\ }\
} }
#if defined(ARCH_X86) || defined(ARCH_X86_64) || defined(ARCH_POWERPC) #if defined(ARCH_X86) || defined(ARCH_POWERPC)
#if defined(ARCH_X86_64) #if defined(ARCH_X86_64)
static inline uint64_t read_time(void) static inline uint64_t read_time(void)
{ {

@ -105,7 +105,7 @@
extern const uint32_t inverse[256]; extern const uint32_t inverse[256];
#if defined(ARCH_X86) || defined(ARCH_X86_64) #if defined(ARCH_X86)
# define FASTDIV(a,b) \ # define FASTDIV(a,b) \
({\ ({\
int ret,dmy;\ int ret,dmy;\
@ -154,7 +154,7 @@ static inline int ff_sqrt(int a)
return ret; return ret;
} }
#if defined(ARCH_X86) || defined(ARCH_X86_64) #if defined(ARCH_X86)
#define MASK_ABS(mask, level)\ #define MASK_ABS(mask, level)\
asm volatile(\ asm volatile(\
"cdq \n\t"\ "cdq \n\t"\

@ -105,7 +105,7 @@ try to unroll inner for(x=0 ... loop to avoid these damn if(x ... checks
#define TEMP_STRIDE 8 #define TEMP_STRIDE 8
//#define NUM_BLOCKS_AT_ONCE 16 //not used yet //#define NUM_BLOCKS_AT_ONCE 16 //not used yet
#if defined(ARCH_X86) || defined(ARCH_X86_64) #if defined(ARCH_X86)
static uint64_t __attribute__((aligned(8))) attribute_used w05= 0x0005000500050005LL; static uint64_t __attribute__((aligned(8))) attribute_used w05= 0x0005000500050005LL;
static uint64_t __attribute__((aligned(8))) attribute_used w04= 0x0004000400040004LL; static uint64_t __attribute__((aligned(8))) attribute_used w04= 0x0004000400040004LL;
static uint64_t __attribute__((aligned(8))) attribute_used w20= 0x0020002000200020LL; static uint64_t __attribute__((aligned(8))) attribute_used w20= 0x0020002000200020LL;
@ -156,7 +156,7 @@ static const char *replaceTable[]=
}; };
#if defined(ARCH_X86) || defined(ARCH_X86_64) #if defined(ARCH_X86)
static inline void prefetchnta(void *p) static inline void prefetchnta(void *p)
{ {
asm volatile( "prefetchnta (%0)\n\t" asm volatile( "prefetchnta (%0)\n\t"
@ -581,7 +581,7 @@ static always_inline void do_a_deblock_C(uint8_t *src, int step, int stride, PPC
#endif //HAVE_ALTIVEC #endif //HAVE_ALTIVEC
#endif //ARCH_POWERPC #endif //ARCH_POWERPC
#if defined(ARCH_X86) || defined(ARCH_X86_64) #if defined(ARCH_X86)
#if (defined (HAVE_MMX) && !defined (HAVE_3DNOW) && !defined (HAVE_MMX2)) || defined (RUNTIME_CPUDETECT) #if (defined (HAVE_MMX) && !defined (HAVE_3DNOW) && !defined (HAVE_MMX2)) || defined (RUNTIME_CPUDETECT)
#define COMPILE_MMX #define COMPILE_MMX
@ -594,7 +594,7 @@ static always_inline void do_a_deblock_C(uint8_t *src, int step, int stride, PPC
#if (defined (HAVE_3DNOW) && !defined (HAVE_MMX2)) || defined (RUNTIME_CPUDETECT) #if (defined (HAVE_3DNOW) && !defined (HAVE_MMX2)) || defined (RUNTIME_CPUDETECT)
#define COMPILE_3DNOW #define COMPILE_3DNOW
#endif #endif
#endif /* defined(ARCH_X86) || defined(ARCH_X86_64) */ #endif /* defined(ARCH_X86) */
#undef HAVE_MMX #undef HAVE_MMX
#undef HAVE_MMX2 #undef HAVE_MMX2
@ -662,7 +662,7 @@ static inline void postProcess(uint8_t src[], int srcStride, uint8_t dst[], int
// difference wouldnt be messureable here but its much better because // difference wouldnt be messureable here but its much better because
// someone might exchange the cpu whithout restarting mplayer ;) // someone might exchange the cpu whithout restarting mplayer ;)
#ifdef RUNTIME_CPUDETECT #ifdef RUNTIME_CPUDETECT
#if defined(ARCH_X86) || defined(ARCH_X86_64) #if defined(ARCH_X86)
// ordered per speed fasterst first // ordered per speed fasterst first
if(c->cpuCaps & PP_CPU_CAPS_MMX2) if(c->cpuCaps & PP_CPU_CAPS_MMX2)
postProcess_MMX2(src, srcStride, dst, dstStride, width, height, QPs, QPStride, isColor, c); postProcess_MMX2(src, srcStride, dst, dstStride, width, height, QPs, QPStride, isColor, c);

Loading…
Cancel
Save