3DNow! & Extended 3DNow! versions of FFT

Patch by Zuxy Meng, zuxy <<dot>> meng >>at<< gmail <<dot>> com
Minor non-functional diff-related fixes by me.

Originally committed as revision 5125 to svn://svn.ffmpeg.org/ffmpeg/trunk
pull/126/head
Zuxy Meng 19 years ago committed by Corey Hickey
parent 8e32161943
commit 82eb4b0f1b
  1. 18
      configure
  2. 6
      libavcodec/Makefile
  3. 2
      libavcodec/dsputil.h
  4. 24
      libavcodec/fft.c
  5. 136
      libavcodec/i386/fft_3dn.c
  6. 136
      libavcodec/i386/fft_3dn2.c

18
configure vendored

@ -1110,6 +1110,19 @@ if $cc -msse -o $TMPO $TMPC 2> /dev/null ; then
builtin_vector=yes
fi
# test for mm3dnow.h
cat > $TMPC << EOF
#include <mm3dnow.h>
int main(void) {
return 0;
}
EOF
mm3dnow=no
if $cc -march=athlon -o $TMPO $TMPC 2> /dev/null ; then
mm3dnow=yes
fi
# Probe for -Wdeclaration-after-statement
if test "$cc" = "gcc"; then
cat > $TMPC << EOF
@ -1328,6 +1341,7 @@ echo "broken inttypes.h $emu_fast_int"
if test $cpu = "x86" -o $cpu = "x86_64"; then
echo "MMX enabled $mmx"
echo "Vector Builtins $builtin_vector"
echo "3DNow! Builtins $mm3dnow"
fi
if test $cpu = "armv4l"; then
echo "IWMMXT enabled $iwmmxt"
@ -1508,6 +1522,10 @@ if test "$builtin_vector" = "yes" ; then
echo "TARGET_BUILTIN_VECTOR=yes" >> config.mak
echo "#define HAVE_BUILTIN_VECTOR 1" >> $TMPH
fi
if test "$mm3dnow" = "yes" ; then
echo "TARGET_BUILTIN_3DNOW=yes" >> config.mak
echo "#define HAVE_MM3DNOW 1" >> $TMPH
fi
if test "$iwmmxt" = "yes" ; then
echo "TARGET_IWMMXT=yes" >> config.mak
echo "#define HAVE_IWMMXT 1" >> $TMPH

@ -330,7 +330,7 @@ OBJS += i386/fdct_mmx.o i386/cputest.o \
i386/dsputil_mmx.o i386/mpegvideo_mmx.o \
i386/idct_mmx.o i386/motion_est_mmx.o \
i386/simple_idct_mmx.o i386/fft_sse.o i386/vp3dsp_mmx.o \
i386/vp3dsp_sse2.o
i386/vp3dsp_sse2.o i386/fft_3dn.o i386/fft_3dn2.o
ifeq ($(CONFIG_GPL),yes)
OBJS += i386/idct_mmx_xvid.o
endif
@ -338,6 +338,10 @@ ifdef TARGET_BUILTIN_VECTOR
i386/fft_sse.o: CFLAGS+= -msse
depend: CFLAGS+= -msse
endif
ifdef TARGET_BUILTIN_3DNOW
i386/fft_3dn.o: CFLAGS+= -m3dnow
i386/fft_3dn2.o: CFLAGS+= -march=athlon
endif
endif
# armv4l specific stuff

@ -564,6 +564,8 @@ int ff_fft_init(FFTContext *s, int nbits, int inverse);
void ff_fft_permute(FFTContext *s, FFTComplex *z);
void ff_fft_calc_c(FFTContext *s, FFTComplex *z);
void ff_fft_calc_sse(FFTContext *s, FFTComplex *z);
void ff_fft_calc_3dn(FFTContext *s, FFTComplex *z);
void ff_fft_calc_3dn2(FFTContext *s, FFTComplex *z);
void ff_fft_calc_altivec(FFTContext *s, FFTComplex *z);
static inline void ff_fft_calc(FFTContext *s, FFTComplex *z)

@ -57,12 +57,12 @@ int ff_fft_init(FFTContext *s, int nbits, int inverse)
s->exptab1 = NULL;
/* compute constant table for HAVE_SSE version */
#if (defined(HAVE_MMX) && defined(HAVE_BUILTIN_VECTOR)) || defined(HAVE_ALTIVEC)
#if (defined(HAVE_MMX) && (defined(HAVE_BUILTIN_VECTOR) || defined(HAVE_MM3DNOW))) || defined(HAVE_ALTIVEC)
{
int has_vectors = 0;
#if defined(HAVE_MMX)
has_vectors = mm_support() & MM_SSE;
has_vectors = mm_support() & (MM_3DNOW | MM_3DNOWEXT | MM_SSE | MM_SSE2);
#endif
#if defined(HAVE_ALTIVEC) && !defined(ALTIVEC_USE_REFERENCE_C_CODE)
has_vectors = mm_support() & MM_ALTIVEC;
@ -94,8 +94,24 @@ int ff_fft_init(FFTContext *s, int nbits, int inverse)
} while (nblocks != 0);
av_freep(&s->exptab);
#if defined(HAVE_MMX)
s->fft_calc = ff_fft_calc_sse;
#else
#ifdef HAVE_MM3DNOW
if (has_vectors & MM_3DNOWEXT)
/* 3DNowEx for Athlon(XP) */
s->fft_calc = ff_fft_calc_3dn2;
else if (has_vectors & MM_3DNOW)
/* 3DNow! for K6-2/3 */
s->fft_calc = ff_fft_calc_3dn;
#endif
#ifdef HAVE_BUILTIN_VECTOR
if (has_vectors & MM_SSE2)
/* SSE for P4/K8 */
s->fft_calc = ff_fft_calc_sse;
else if ((has_vectors & MM_SSE) &&
s->fft_calc == ff_fft_calc_c)
/* SSE for P3 */
s->fft_calc = ff_fft_calc_sse;
#endif
#else /* HAVE_MMX */
s->fft_calc = ff_fft_calc_altivec;
#endif
}

@ -0,0 +1,136 @@
/*
* FFT/MDCT transform with 3DNow! optimizations
* Copyright (c) 2006 Zuxy MENG Jie.
* Based on fft_sse.c copyright (c) 2002 Fabrice Bellard.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "../dsputil.h"
#include <math.h>
#ifdef HAVE_MM3DNOW
#include <mm3dnow.h>
static const int p1m1[2] __attribute__((aligned(8))) =
{ 0, 1 << 31 };
static const int m1p1[2] __attribute__((aligned(8))) =
{ 1 << 31, 0 };
void ff_fft_calc_3dn(FFTContext *s, FFTComplex *z)
{
int ln = s->nbits;
int j, np, np2;
int nblocks, nloops;
register FFTComplex *p, *q;
FFTComplex *cptr, *cptr1;
int k;
np = 1 << ln;
/* FEMMS not a must here but recommended by AMD */
_m_femms();
{
__m64 *r, a0, a1, b0, b1, tmp, c;
r = (__m64 *)&z[0];
if (s->inverse)
c = *(__m64 *)m1p1;
else
c = *(__m64 *)p1m1;
j = (np >> 2);
do {
/* do the pass 0 butterfly */
a0 = _m_pfadd(r[0], r[1]);
a1 = _m_pfsub(r[0], r[1]);
/* do the pass 0 butterfly */
b0 = _m_pfadd(r[2], r[3]);
b1 = _m_pfsub(r[2], r[3]);
/* multiply third by -i */
tmp = _m_punpckhdq(b1, b1);
b1 = _m_punpckldq(b1, b1);
b1 = _m_punpckldq(tmp, b1);
b1 = _m_pxor(b1, c);
/* do the pass 1 butterfly */
r[0] = _m_pfadd(a0, b0);
r[1] = _m_pfadd(a1, b1);
r[2] = _m_pfsub(a0, b0);
r[3] = _m_pfsub(a1, b1);
r += 4;
} while (--j != 0);
}
/* pass 2 .. ln-1 */
nblocks = np >> 3;
nloops = 1 << 2;
np2 = np >> 1;
cptr1 = s->exptab1;
do {
p = z;
q = z + nloops;
j = nblocks;
do {
cptr = cptr1;
k = nloops >> 1;
do {
__m64 a0, a1, b0, b1, c0, c1, t10, t11, t20, t21;
a0 = *(__m64 *)&p[0];
a1 = *(__m64 *)&p[1];
b0 = *(__m64 *)&q[0];
b1 = *(__m64 *)&q[1];
/* complex mul */
c0 = *(__m64 *)&cptr[0];
c1 = *(__m64 *)&cptr[1];
/* cre*re cim*re */
t10 = _m_pfmul(c0, _m_punpckldq(b0, b0));
t11 = _m_pfmul(c1, _m_punpckldq(b1, b1));
c0 = *(__m64 *)&cptr[2];
c1 = *(__m64 *)&cptr[3];
/* -cim*im cre*im */
t20 = _m_pfmul(c0, _m_punpckhdq(b0, b0));
t21 = _m_pfmul(c1, _m_punpckhdq(b1, b1));
b0 = _m_pfadd(t10, t20);
b1 = _m_pfadd(t11, t21);
/* butterfly */
*(__m64 *)&p[0] = _m_pfadd(a0, b0);
*(__m64 *)&p[1] = _m_pfadd(a1, b1);
*(__m64 *)&q[0] = _m_pfsub(a0, b0);
*(__m64 *)&q[1] = _m_pfsub(a1, b1);
p += 2;
q += 2;
cptr += 4;
} while (--k);
p += nloops;
q += nloops;
} while (--j);
cptr1 += nloops * 2;
nblocks = nblocks >> 1;
nloops = nloops << 1;
} while (nblocks != 0);
_m_femms();
}
#endif

@ -0,0 +1,136 @@
/*
* FFT/MDCT transform with Extended 3DNow! optimizations
* Copyright (c) 2006 Zuxy MENG Jie.
* Based on fft_sse.c copyright (c) 2002 Fabrice Bellard.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "../dsputil.h"
#include <math.h>
#ifdef HAVE_MM3DNOW
#include <mm3dnow.h>
static const int p1m1[2] __attribute__((aligned(8))) =
{ 0, 1 << 31 };
static const int m1p1[2] __attribute__((aligned(8))) =
{ 1 << 31, 0 };
void ff_fft_calc_3dn2(FFTContext *s, FFTComplex *z)
{
int ln = s->nbits;
int j, np, np2;
int nblocks, nloops;
register FFTComplex *p, *q;
FFTComplex *cptr, *cptr1;
int k;
np = 1 << ln;
/* FEMMS is not a must here but recommended by AMD */
_m_femms();
{
__m64 *r, a0, a1, b0, b1, c;
r = (__m64 *)&z[0];
if (s->inverse)
c = *(__m64 *)m1p1;
else
c = *(__m64 *)p1m1;
j = (np >> 2);
do {
/* do the pass 0 butterfly */
a0 = _m_pfadd(r[0], r[1]);
a1 = _m_pfsub(r[0], r[1]);
/* do the pass 0 butterfly */
b0 = _m_pfadd(r[2], r[3]);
b1 = _m_pfsub(r[2], r[3]);
/* multiply third by -i */
b1 = _m_pswapd(b1);
b1 = _m_pxor(b1, c);
r[0] = _m_pfadd(a0, b0);
r[1] = _m_pfadd(a1, b1);
r[2] = _m_pfsub(a0, b0);
r[3] = _m_pfsub(a1, b1);
r += 4;
} while (--j != 0);
}
/* pass 2 .. ln-1 */
nblocks = np >> 3;
nloops = 1 << 2;
np2 = np >> 1;
cptr1 = s->exptab1;
do {
p = z;
q = z + nloops;
j = nblocks;
do {
cptr = cptr1;
k = nloops >> 1;
do {
__m64 a0, a1, b0, b1, c0, c1, t10, t11, t20, t21;
a0 = *(__m64 *)&p[0];
a1 = *(__m64 *)&p[1];
b0 = *(__m64 *)&q[0];
b1 = *(__m64 *)&q[1];
/* complex mul */
c0 = *(__m64 *)&cptr[0];
c1 = *(__m64 *)&cptr[1];
/* cre*re cim*im */
t10 = _m_pfmul(c0, b0);
t11 = _m_pfmul(c1, b1);
/* no need to access cptr[2] & cptr[3] */
c0 = _m_pswapd(c0);
c1 = _m_pswapd(c1);
/* cim*re cre*im */
t20 = _m_pfmul(c0, b0);
t21 = _m_pfmul(c1, b1);
/* cre*re-cim*im cim*re+cre*im */
b0 = _m_pfpnacc(t10, t20);
b1 = _m_pfpnacc(t11, t21);
/* butterfly */
*(__m64 *)&p[0] = _m_pfadd(a0, b0);
*(__m64 *)&p[1] = _m_pfadd(a1, b1);
*(__m64 *)&q[0] = _m_pfsub(a0, b0);
*(__m64 *)&q[1] = _m_pfsub(a1, b1);
p += 2;
q += 2;
cptr += 4;
} while (--k);
p += nloops;
q += nloops;
} while (--j);
cptr1 += nloops * 2;
nblocks = nblocks >> 1;
nloops = nloops << 1;
} while (nblocks != 0);
_m_femms();
}
#endif
Loading…
Cancel
Save