x86/float_dsp: port vector_fmul_window to yasm

Signed-off-by: James Almer <jamrial@gmail.com>
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
pull/76/merge
James Almer 11 years ago committed by Michael Niedermayer
parent d4be3a8d63
commit dcaf9660b6
  1. 55
      libavutil/x86/float_dsp.asm
  2. 81
      libavutil/x86/float_dsp_init.c

@ -191,6 +191,61 @@ INIT_YMM avx
VECTOR_DMUL_SCALAR
%endif
;-----------------------------------------------------------------------------
; vector_fmul_window(float *dst, const float *src0,
; const float *src1, const float *win, int len);
;-----------------------------------------------------------------------------
%macro VECTOR_FMUL_WINDOW 0
cglobal vector_fmul_window, 5, 6, 6, dst, src0, src1, win, len, len1
shl lend, 2
lea len1q, [lenq - mmsize]
add src0q, lenq
add dstq, lenq
add winq, lenq
neg lenq
.loop
mova m0, [winq + lenq]
mova m4, [src0q + lenq]
%if cpuflag(sse)
mova m1, [winq + len1q]
mova m5, [src1q + len1q]
shufps m1, m1, 0x1b
shufps m5, m5, 0x1b
mova m2, m0
mova m3, m1
mulps m2, m4
mulps m3, m5
mulps m1, m4
mulps m0, m5
addps m2, m3
subps m1, m0
shufps m2, m2, 0x1b
%else
pswapd m1, [winq + len1q]
pswapd m5, [src1q + len1q]
mova m2, m0
mova m3, m1
pfmul m2, m4
pfmul m3, m5
pfmul m1, m4
pfmul m0, m5
pfadd m2, m3
pfsub m1, m0
pswapd m2, m2
%endif
mova [dstq + lenq], m1
mova [dstq + len1q], m2
sub len1q, mmsize
add lenq, mmsize
jl .loop
REP_RET
%endmacro
INIT_MMX 3dnowext
VECTOR_FMUL_WINDOW
INIT_XMM sse
VECTOR_FMUL_WINDOW
;-----------------------------------------------------------------------------
; vector_fmul_add(float *dst, const float *src0, const float *src1,
; const float *src2, int len)

@ -44,6 +44,11 @@ void ff_vector_dmul_scalar_sse2(double *dst, const double *src,
void ff_vector_dmul_scalar_avx(double *dst, const double *src,
double mul, int len);
void ff_vector_fmul_window_3dnowext(float *dst, const float *src0,
const float *src1, const float *win, int len);
void ff_vector_fmul_window_sse(float *dst, const float *src0,
const float *src1, const float *win, int len);
void ff_vector_fmul_add_sse(float *dst, const float *src0, const float *src1,
const float *src2, int len);
void ff_vector_fmul_add_avx(float *dst, const float *src0, const float *src1,
@ -60,88 +65,18 @@ float ff_scalarproduct_float_sse(const float *v1, const float *v2, int order);
void ff_butterflies_float_sse(float *src0, float *src1, int len);
#if HAVE_6REGS && HAVE_INLINE_ASM
static void vector_fmul_window_3dnowext(float *dst, const float *src0,
const float *src1, const float *win,
int len)
{
x86_reg i = -len * 4;
x86_reg j = len * 4 - 8;
__asm__ volatile (
"1: \n"
"pswapd (%5, %1), %%mm1 \n"
"movq (%5, %0), %%mm0 \n"
"pswapd (%4, %1), %%mm5 \n"
"movq (%3, %0), %%mm4 \n"
"movq %%mm0, %%mm2 \n"
"movq %%mm1, %%mm3 \n"
"pfmul %%mm4, %%mm2 \n" // src0[len + i] * win[len + i]
"pfmul %%mm5, %%mm3 \n" // src1[j] * win[len + j]
"pfmul %%mm4, %%mm1 \n" // src0[len + i] * win[len + j]
"pfmul %%mm5, %%mm0 \n" // src1[j] * win[len + i]
"pfadd %%mm3, %%mm2 \n"
"pfsub %%mm0, %%mm1 \n"
"pswapd %%mm2, %%mm2 \n"
"movq %%mm1, (%2, %0) \n"
"movq %%mm2, (%2, %1) \n"
"sub $8, %1 \n"
"add $8, %0 \n"
"jl 1b \n"
"femms \n"
: "+r"(i), "+r"(j)
: "r"(dst + len), "r"(src0 + len), "r"(src1), "r"(win + len)
);
}
static void vector_fmul_window_sse(float *dst, const float *src0,
const float *src1, const float *win, int len)
{
x86_reg i = -len * 4;
x86_reg j = len * 4 - 16;
__asm__ volatile (
"1: \n"
"movaps (%5, %1), %%xmm1 \n"
"movaps (%5, %0), %%xmm0 \n"
"movaps (%4, %1), %%xmm5 \n"
"movaps (%3, %0), %%xmm4 \n"
"shufps $0x1b, %%xmm1, %%xmm1 \n"
"shufps $0x1b, %%xmm5, %%xmm5 \n"
"movaps %%xmm0, %%xmm2 \n"
"movaps %%xmm1, %%xmm3 \n"
"mulps %%xmm4, %%xmm2 \n" // src0[len + i] * win[len + i]
"mulps %%xmm5, %%xmm3 \n" // src1[j] * win[len + j]
"mulps %%xmm4, %%xmm1 \n" // src0[len + i] * win[len + j]
"mulps %%xmm5, %%xmm0 \n" // src1[j] * win[len + i]
"addps %%xmm3, %%xmm2 \n"
"subps %%xmm0, %%xmm1 \n"
"shufps $0x1b, %%xmm2, %%xmm2 \n"
"movaps %%xmm1, (%2, %0) \n"
"movaps %%xmm2, (%2, %1) \n"
"sub $16, %1 \n"
"add $16, %0 \n"
"jl 1b \n"
: "+r"(i), "+r"(j)
: "r"(dst + len), "r"(src0 + len), "r"(src1), "r"(win + len)
);
}
#endif /* HAVE_6REGS && HAVE_INLINE_ASM */
av_cold void ff_float_dsp_init_x86(AVFloatDSPContext *fdsp)
{
int cpu_flags = av_get_cpu_flags();
#if HAVE_6REGS && HAVE_INLINE_ASM
if (INLINE_AMD3DNOWEXT(cpu_flags)) {
fdsp->vector_fmul_window = vector_fmul_window_3dnowext;
}
if (INLINE_SSE(cpu_flags)) {
fdsp->vector_fmul_window = vector_fmul_window_sse;
if (EXTERNAL_AMD3DNOWEXT(cpu_flags)) {
fdsp->vector_fmul_window = ff_vector_fmul_window_3dnowext;
}
#endif
if (EXTERNAL_SSE(cpu_flags)) {
fdsp->vector_fmul = ff_vector_fmul_sse;
fdsp->vector_fmac_scalar = ff_vector_fmac_scalar_sse;
fdsp->vector_fmul_scalar = ff_vector_fmul_scalar_sse;
fdsp->vector_fmul_window = ff_vector_fmul_window_sse;
fdsp->vector_fmul_add = ff_vector_fmul_add_sse;
fdsp->vector_fmul_reverse = ff_vector_fmul_reverse_sse;
fdsp->scalarproduct_float = ff_scalarproduct_float_sse;

Loading…
Cancel
Save