swresample: change COMMON_CORE_INT16 asm from SSSE3 to SSE2

pshuf+paddd is slightly faster than phaddd.
The real gain is in pre-ssse3 processors like AMD K8 and K10, which get
a big boost in performance compared to the mmxext version

Signed-off-by: James Almer <jamrial@gmail.com>
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
pull/293/head
James Almer 11 years ago committed by Michael Niedermayer
parent c56d25c476
commit 7c8bf09edd
  1. 10
      libswresample/resample.c
  2. 8
      libswresample/resample_template.c
  3. 10
      libswresample/x86/resample_mmx.h

@ -302,10 +302,10 @@ static int set_compensation(ResampleContext *c, int sample_delta, int compensati
#include "resample_template.c" #include "resample_template.c"
#undef TEMPLATE_RESAMPLE_S16_MMX2 #undef TEMPLATE_RESAMPLE_S16_MMX2
#if HAVE_SSSE3_INLINE #if HAVE_SSE2_INLINE
#define TEMPLATE_RESAMPLE_S16_SSSE3 #define TEMPLATE_RESAMPLE_S16_SSE2
#include "resample_template.c" #include "resample_template.c"
#undef TEMPLATE_RESAMPLE_S16_SSSE3 #undef TEMPLATE_RESAMPLE_S16_SSE2
#endif #endif
#endif // HAVE_MMXEXT_INLINE #endif // HAVE_MMXEXT_INLINE
@ -317,8 +317,8 @@ static int multiple_resample(ResampleContext *c, AudioData *dst, int dst_size, A
for(i=0; i<dst->ch_count; i++){ for(i=0; i<dst->ch_count; i++){
#if HAVE_MMXEXT_INLINE #if HAVE_MMXEXT_INLINE
#if HAVE_SSSE3_INLINE #if HAVE_SSE2_INLINE
if(c->format == AV_SAMPLE_FMT_S16P && (mm_flags&AV_CPU_FLAG_SSSE3)) ret= swri_resample_int16_ssse3(c, (int16_t*)dst->ch[i], (const int16_t*)src->ch[i], consumed, src_size, dst_size, i+1==dst->ch_count); if(c->format == AV_SAMPLE_FMT_S16P && (mm_flags&AV_CPU_FLAG_SSE2)) ret= swri_resample_int16_sse2 (c, (int16_t*)dst->ch[i], (const int16_t*)src->ch[i], consumed, src_size, dst_size, i+1==dst->ch_count);
else else
#endif #endif
if(c->format == AV_SAMPLE_FMT_S16P && (mm_flags&AV_CPU_FLAG_MMX2 )){ if(c->format == AV_SAMPLE_FMT_S16P && (mm_flags&AV_CPU_FLAG_MMX2 )){

@ -57,7 +57,7 @@
#elif defined(TEMPLATE_RESAMPLE_S16) \ #elif defined(TEMPLATE_RESAMPLE_S16) \
|| defined(TEMPLATE_RESAMPLE_S16_MMX2) \ || defined(TEMPLATE_RESAMPLE_S16_MMX2) \
|| defined(TEMPLATE_RESAMPLE_S16_SSSE3) || defined(TEMPLATE_RESAMPLE_S16_SSE2)
# define FILTER_SHIFT 15 # define FILTER_SHIFT 15
# define DELEM int16_t # define DELEM int16_t
@ -74,9 +74,9 @@
# elif defined(TEMPLATE_RESAMPLE_S16_MMX2) # elif defined(TEMPLATE_RESAMPLE_S16_MMX2)
# define COMMON_CORE COMMON_CORE_INT16_MMX2 # define COMMON_CORE COMMON_CORE_INT16_MMX2
# define RENAME(N) N ## _int16_mmx2 # define RENAME(N) N ## _int16_mmx2
# elif defined(TEMPLATE_RESAMPLE_S16_SSSE3) # elif defined(TEMPLATE_RESAMPLE_S16_SSE2)
# define COMMON_CORE COMMON_CORE_INT16_SSSE3 # define COMMON_CORE COMMON_CORE_INT16_SSE2
# define RENAME(N) N ## _int16_ssse3 # define RENAME(N) N ## _int16_sse2
# endif # endif
#endif #endif

@ -23,7 +23,7 @@
#include "libswresample/swresample_internal.h" #include "libswresample/swresample_internal.h"
int swri_resample_int16_mmx2 (struct ResampleContext *c, int16_t *dst, const int16_t *src, int *consumed, int src_size, int dst_size, int update_ctx); int swri_resample_int16_mmx2 (struct ResampleContext *c, int16_t *dst, const int16_t *src, int *consumed, int src_size, int dst_size, int update_ctx);
int swri_resample_int16_ssse3(struct ResampleContext *c, int16_t *dst, const int16_t *src, int *consumed, int src_size, int dst_size, int update_ctx); int swri_resample_int16_sse2 (struct ResampleContext *c, int16_t *dst, const int16_t *src, int *consumed, int src_size, int dst_size, int update_ctx);
DECLARE_ALIGNED(16, const uint64_t, ff_resample_int16_rounder)[2] = { 0x0000000000004000ULL, 0x0000000000000000ULL}; DECLARE_ALIGNED(16, const uint64_t, ff_resample_int16_rounder)[2] = { 0x0000000000004000ULL, 0x0000000000000000ULL};
@ -48,7 +48,7 @@ __asm__ volatile(\
"r" (dst+dst_index)\ "r" (dst+dst_index)\
); );
#define COMMON_CORE_INT16_SSSE3 \ #define COMMON_CORE_INT16_SSE2 \
x86_reg len= -2*c->filter_length;\ x86_reg len= -2*c->filter_length;\
__asm__ volatile(\ __asm__ volatile(\
"movdqa "MANGLE(ff_resample_int16_rounder)", %%xmm0 \n\t"\ "movdqa "MANGLE(ff_resample_int16_rounder)", %%xmm0 \n\t"\
@ -58,8 +58,10 @@ __asm__ volatile(\
"paddd %%xmm1, %%xmm0 \n\t"\ "paddd %%xmm1, %%xmm0 \n\t"\
"add $16, %0 \n\t"\ "add $16, %0 \n\t"\
" js 1b \n\t"\ " js 1b \n\t"\
"phaddd %%xmm0, %%xmm0 \n\t"\ "pshufd $0x0E, %%xmm0, %%xmm1 \n\t"\
"phaddd %%xmm0, %%xmm0 \n\t"\ "paddd %%xmm1, %%xmm0 \n\t"\
"pshufd $0x01, %%xmm0, %%xmm1 \n\t"\
"paddd %%xmm1, %%xmm0 \n\t"\
"psrad $15, %%xmm0 \n\t"\ "psrad $15, %%xmm0 \n\t"\
"packssdw %%xmm0, %%xmm0 \n\t"\ "packssdw %%xmm0, %%xmm0 \n\t"\
"movd %%xmm0, (%3) \n\t"\ "movd %%xmm0, (%3) \n\t"\

Loading…
Cancel
Save