Merge commit 'f8bbebecfd7ea3dceb7c96f931beca33f80a3490'

* commit 'f8bbebecfd7ea3dceb7c96f931beca33f80a3490':
  x86: motion_est: K&R formatting cosmetics

Conflicts:
	libavcodec/x86/motion_est.c

Merged-by: Michael Niedermayer <michaelni@gmx.at>
pull/293/head
Michael Niedermayer 11 years ago
commit 925ce6faf4
  1. 107
      libavcodec/x86/motion_est.c

@ -71,8 +71,7 @@ static inline void sad8_1_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
"add %3, %%"REG_a" \n\t"
" js 1b \n\t"
: "+a" (len)
: "r" (blk1 - len), "r" (blk2 - len), "r" ((x86_reg)stride)
);
: "r" (blk1 - len), "r" (blk2 - len), "r" ((x86_reg) stride));
}
static inline void sad8_1_mmxext(uint8_t *blk1, uint8_t *blk2,
@ -92,8 +91,7 @@ static inline void sad8_1_mmxext(uint8_t *blk1, uint8_t *blk2,
"sub $2, %0 \n\t"
" jg 1b \n\t"
: "+r" (h), "+r" (blk1), "+r" (blk2)
: "r" ((x86_reg)stride)
);
: "r" ((x86_reg) stride));
}
static int sad16_sse2(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)
@ -117,8 +115,7 @@ static int sad16_sse2(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)
"paddw %%xmm0, %%xmm2 \n\t"
"movd %%xmm2, %3 \n\t"
: "+r" (h), "+r" (blk1), "+r" (blk2), "=r" (ret)
: "r" ((x86_reg)stride)
);
: "r" ((x86_reg) stride));
return ret;
}
@ -141,8 +138,7 @@ static inline void sad8_x2a_mmxext(uint8_t *blk1, uint8_t *blk2,
"sub $2, %0 \n\t"
" jg 1b \n\t"
: "+r" (h), "+r" (blk1), "+r" (blk2)
: "r" ((x86_reg)stride)
);
: "r" ((x86_reg) stride));
}
static inline void sad8_y2a_mmxext(uint8_t *blk1, uint8_t *blk2,
@ -167,8 +163,7 @@ static inline void sad8_y2a_mmxext(uint8_t *blk1, uint8_t *blk2,
"sub $2, %0 \n\t"
" jg 1b \n\t"
: "+r" (h), "+r" (blk1), "+r" (blk2)
: "r" ((x86_reg)stride)
);
: "r" ((x86_reg) stride));
}
static inline void sad8_4_mmxext(uint8_t *blk1, uint8_t *blk2,
@ -198,11 +193,11 @@ static inline void sad8_4_mmxext(uint8_t *blk1, uint8_t *blk2,
"sub $2, %0 \n\t"
" jg 1b \n\t"
: "+r" (h), "+r" (blk1), "+r" (blk2)
: "r" ((x86_reg)stride)
);
: "r" ((x86_reg) stride));
}
static inline void sad8_2_mmx(uint8_t *blk1a, uint8_t *blk1b, uint8_t *blk2, int stride, int h)
static inline void sad8_2_mmx(uint8_t *blk1a, uint8_t *blk1b, uint8_t *blk2,
int stride, int h)
{
x86_reg len = -(x86_reg)stride * h;
__asm__ volatile (
@ -236,8 +231,8 @@ static inline void sad8_2_mmx(uint8_t *blk1a, uint8_t *blk1b, uint8_t *blk2, int
"add %4, %%"REG_a" \n\t"
" js 1b \n\t"
: "+a" (len)
: "r" (blk1a - len), "r" (blk1b -len), "r" (blk2 - len), "r" ((x86_reg)stride)
);
: "r" (blk1a - len), "r" (blk1b - len), "r" (blk2 - len),
"r" ((x86_reg) stride));
}
static inline void sad8_4_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
@ -289,8 +284,8 @@ static inline void sad8_4_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
"add %4, %%"REG_a" \n\t"
" js 1b \n\t"
: "+a" (len)
: "r" (blk1 - len), "r" (blk1 -len + stride), "r" (blk2 - len), "r" ((x86_reg)stride)
);
: "r" (blk1 - len), "r" (blk1 - len + stride), "r" (blk2 - len),
"r" ((x86_reg) stride));
}
static inline int sum_mmx(void)
@ -304,8 +299,7 @@ static inline int sum_mmx(void)
"psrlq $16, %%mm6 \n\t"
"paddw %%mm0, %%mm6 \n\t"
"movd %%mm6, %0 \n\t"
: "=r" (ret)
);
: "=r" (ret));
return ret & 0xFFFF;
}
@ -314,8 +308,7 @@ static inline int sum_mmxext(void)
int ret;
__asm__ volatile (
"movd %%mm6, %0 \n\t"
: "=r" (ret)
);
: "=r" (ret));
return ret;
}
@ -323,55 +316,63 @@ static inline void sad8_x2a_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
{
sad8_2_mmx(blk1, blk1 + 1, blk2, stride, h);
}
static inline void sad8_y2a_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
{
sad8_2_mmx(blk1, blk1 + stride, blk2, stride, h);
}
#define PIX_SAD(suf) \
static int sad8_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
static int sad8_ ## suf(void *v, uint8_t *blk2, \
uint8_t *blk1, int stride, int h) \
{ \
av_assert2(h == 8); \
__asm__ volatile("pxor %%mm7, %%mm7 \n\t"\
"pxor %%mm6, %%mm6 \n\t":);\
__asm__ volatile ( \
"pxor %%mm7, %%mm7 \n\t" \
"pxor %%mm6, %%mm6 \n\t" \
:); \
\
sad8_1_ ## suf(blk1, blk2, stride, 8); \
\
return sum_ ## suf(); \
} \
static int sad8_x2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
\
static int sad8_x2_ ## suf(void *v, uint8_t *blk2, \
uint8_t *blk1, int stride, int h) \
{ \
av_assert2(h == 8); \
__asm__ volatile("pxor %%mm7, %%mm7 \n\t"\
__asm__ volatile ( \
"pxor %%mm7, %%mm7 \n\t" \
"pxor %%mm6, %%mm6 \n\t" \
"movq %0, %%mm5 \n\t" \
:: "m"(round_tab[1]) \
);\
:: "m" (round_tab[1])); \
\
sad8_x2a_ ## suf(blk1, blk2, stride, 8); \
\
return sum_ ## suf(); \
} \
\
static int sad8_y2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
static int sad8_y2_ ## suf(void *v, uint8_t *blk2, \
uint8_t *blk1, int stride, int h) \
{ \
av_assert2(h == 8); \
__asm__ volatile("pxor %%mm7, %%mm7 \n\t"\
__asm__ volatile ( \
"pxor %%mm7, %%mm7 \n\t" \
"pxor %%mm6, %%mm6 \n\t" \
"movq %0, %%mm5 \n\t" \
:: "m"(round_tab[1]) \
);\
:: "m" (round_tab[1])); \
\
sad8_y2a_ ## suf(blk1, blk2, stride, 8); \
\
return sum_ ## suf(); \
} \
\
static int sad8_xy2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
static int sad8_xy2_ ## suf(void *v, uint8_t *blk2, \
uint8_t *blk1, int stride, int h) \
{ \
av_assert2(h == 8); \
__asm__ volatile("pxor %%mm7, %%mm7 \n\t"\
__asm__ volatile ( \
"pxor %%mm7, %%mm7 \n\t" \
"pxor %%mm6, %%mm6 \n\t" \
::); \
\
@ -380,45 +381,55 @@ static int sad8_xy2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, i
return sum_ ## suf(); \
} \
\
static int sad16_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
static int sad16_ ## suf(void *v, uint8_t *blk2, \
uint8_t *blk1, int stride, int h) \
{ \
__asm__ volatile("pxor %%mm7, %%mm7 \n\t"\
"pxor %%mm6, %%mm6 \n\t":);\
__asm__ volatile ( \
"pxor %%mm7, %%mm7 \n\t" \
"pxor %%mm6, %%mm6 \n\t" \
:); \
\
sad8_1_ ## suf(blk1, blk2, stride, h); \
sad8_1_ ## suf(blk1 + 8, blk2 + 8, stride, h); \
\
return sum_ ## suf(); \
} \
static int sad16_x2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
\
static int sad16_x2_ ## suf(void *v, uint8_t *blk2, \
uint8_t *blk1, int stride, int h) \
{ \
__asm__ volatile("pxor %%mm7, %%mm7 \n\t"\
__asm__ volatile ( \
"pxor %%mm7, %%mm7 \n\t" \
"pxor %%mm6, %%mm6 \n\t" \
"movq %0, %%mm5 \n\t" \
:: "m"(round_tab[1]) \
);\
:: "m" (round_tab[1])); \
\
sad8_x2a_ ## suf(blk1, blk2, stride, h); \
sad8_x2a_ ## suf(blk1 + 8, blk2 + 8, stride, h); \
\
return sum_ ## suf(); \
} \
static int sad16_y2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
\
static int sad16_y2_ ## suf(void *v, uint8_t *blk2, \
uint8_t *blk1, int stride, int h) \
{ \
__asm__ volatile("pxor %%mm7, %%mm7 \n\t"\
__asm__ volatile ( \
"pxor %%mm7, %%mm7 \n\t" \
"pxor %%mm6, %%mm6 \n\t" \
"movq %0, %%mm5 \n\t" \
:: "m"(round_tab[1]) \
);\
:: "m" (round_tab[1])); \
\
sad8_y2a_ ## suf(blk1, blk2, stride, h); \
sad8_y2a_ ## suf(blk1 + 8, blk2 + 8, stride, h); \
\
return sum_ ## suf(); \
} \
static int sad16_xy2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
\
static int sad16_xy2_ ## suf(void *v, uint8_t *blk2, \
uint8_t *blk1, int stride, int h) \
{ \
__asm__ volatile("pxor %%mm7, %%mm7 \n\t"\
__asm__ volatile ( \
"pxor %%mm7, %%mm7 \n\t" \
"pxor %%mm6, %%mm6 \n\t" \
::); \
\

Loading…
Cancel
Save