|
|
|
@ -24,6 +24,7 @@ |
|
|
|
|
|
|
|
|
|
int swri_resample_int16_mmx2 (struct ResampleContext *c, int16_t *dst, const int16_t *src, int *consumed, int src_size, int dst_size, int update_ctx); |
|
|
|
|
int swri_resample_int16_sse2 (struct ResampleContext *c, int16_t *dst, const int16_t *src, int *consumed, int src_size, int dst_size, int update_ctx); |
|
|
|
|
int swri_resample_float_sse (struct ResampleContext *c, float *dst, const float *src, int *consumed, int src_size, int dst_size, int update_ctx); |
|
|
|
|
|
|
|
|
|
DECLARE_ALIGNED(16, const uint64_t, ff_resample_int16_rounder)[2] = { 0x0000000000004000ULL, 0x0000000000000000ULL}; |
|
|
|
|
|
|
|
|
@ -72,3 +73,25 @@ __asm__ volatile(\ |
|
|
|
|
"r" (dst+dst_index)\
|
|
|
|
|
NAMED_CONSTRAINTS_ADD(ff_resample_int16_rounder)\
|
|
|
|
|
); |
|
|
|
|
|
|
|
|
|
#define COMMON_CORE_FLT_SSE \ |
|
|
|
|
x86_reg len= -4*c->filter_length;\
|
|
|
|
|
__asm__ volatile(\
|
|
|
|
|
"xorps %%xmm0, %%xmm0 \n\t"\
|
|
|
|
|
"1: \n\t"\
|
|
|
|
|
"movups (%1, %0), %%xmm1 \n\t"\
|
|
|
|
|
"mulps (%2, %0), %%xmm1 \n\t"\
|
|
|
|
|
"addps %%xmm1, %%xmm0 \n\t"\
|
|
|
|
|
"add $16, %0 \n\t"\
|
|
|
|
|
" js 1b \n\t"\
|
|
|
|
|
"movhlps %%xmm0, %%xmm1 \n\t"\
|
|
|
|
|
"addps %%xmm1, %%xmm0 \n\t"\
|
|
|
|
|
"movss %%xmm0, %%xmm1 \n\t"\
|
|
|
|
|
"shufps $1, %%xmm0, %%xmm0 \n\t"\
|
|
|
|
|
"addps %%xmm1, %%xmm0 \n\t"\
|
|
|
|
|
"movss %%xmm0, (%3) \n\t"\
|
|
|
|
|
: "+r" (len)\
|
|
|
|
|
: "r" (((uint8_t*)(src+sample_index))-len),\
|
|
|
|
|
"r" (((uint8_t*)filter)-len),\
|
|
|
|
|
"r" (dst+dst_index)\
|
|
|
|
|
); |
|
|
|
|