x86: xvid_idct: port SSE2 iDCT to yasm

The main difference consists in renaming properly labels, and
letting yasm select the gprs for skipping 1D transforms.

Previous-version-reviewed-by: James Almer <jamrial@gmail.com>
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
pull/125/head
Christophe Gisquet 10 years ago committed by Michael Niedermayer
parent 8f8c31f4c7
commit 2999bd7da2
  1. 4
      libavcodec/x86/Makefile
  2. 4
      libavcodec/x86/dct-test.c
  3. 379
      libavcodec/x86/xvididct.asm
  4. 18
      libavcodec/x86/xvididct_init.c
  5. 406
      libavcodec/x86/xvididct_sse2.c

@ -73,8 +73,7 @@ MMX-OBJS-$(CONFIG_FDCTDSP) += x86/fdct.o
MMX-OBJS-$(CONFIG_IDCTDSP) += x86/simple_idct.o
# decoders/encoders
MMX-OBJS-$(CONFIG_MPEG4_DECODER) += x86/xvididct_mmx.o \
x86/xvididct_sse2.o
MMX-OBJS-$(CONFIG_MPEG4_DECODER) += x86/xvididct_mmx.o
MMX-OBJS-$(CONFIG_SNOW_DECODER) += x86/snowdsp.o
MMX-OBJS-$(CONFIG_SNOW_ENCODER) += x86/snowdsp.o
MMX-OBJS-$(CONFIG_VC1_DECODER) += x86/vc1dsp_mmx.o
@ -141,6 +140,7 @@ YASM-OBJS-$(CONFIG_HEVC_DECODER) += x86/hevc_mc.o \
x86/hevc_res_add.o \
x86/hevc_sao.o
YASM-OBJS-$(CONFIG_MLP_DECODER) += x86/mlpdsp.o
YASM-OBJS-$(CONFIG_MPEG4_DECODER) += x86/xvididct.o
YASM-OBJS-$(CONFIG_PNG_DECODER) += x86/pngdsp.o
YASM-OBJS-$(CONFIG_PRORES_DECODER) += x86/proresdsp.o
YASM-OBJS-$(CONFIG_PRORES_LGPL_DECODER) += x86/proresdsp.o

@ -67,9 +67,9 @@ static const struct algo idct_tab_arch[] = {
#if HAVE_MMXEXT_INLINE
{ "XVID-MMXEXT", ff_xvid_idct_mmxext, FF_IDCT_PERM_NONE, AV_CPU_FLAG_MMXEXT, 1 },
#endif
#if HAVE_SSE2_INLINE
#if HAVE_SSE2_EXTERNAL
{ "XVID-SSE2", ff_xvid_idct_sse2, FF_IDCT_PERM_SSE2, AV_CPU_FLAG_SSE2, 1 },
#if ARCH_X86_64 && HAVE_YASM
#if ARCH_X86_64
{ "PR-SSE2", ff_prores_idct_put_10_sse2_wrap, FF_IDCT_PERM_TRANSPOSE, AV_CPU_FLAG_SSE2, 1 },
#endif
#endif

@ -0,0 +1,379 @@
; XVID MPEG-4 VIDEO CODEC
; - SSE2 inverse discrete cosine transform -
;
; Copyright(C) 2003 Pascal Massimino <skal@planet-d.net>
;
; Conversion to gcc syntax with modifications
; by Alexander Strange <astrange@ithinksw.com>
;
; Originally from dct/x86_asm/fdct_sse2_skal.asm in Xvid.
;
; This file is part of FFmpeg.
;
; Vertical pass is an implementation of the scheme:
; Loeffler C., Ligtenberg A., and Moschytz C.S.:
; Practical Fast 1D DCT Algorithm with Eleven Multiplications,
; Proc. ICASSP 1989, 988-991.
;
; Horizontal pass is a double 4x4 vector/matrix multiplication,
; (see also Intel's Application Note 922:
; http://developer.intel.com/vtune/cbts/strmsimd/922down.htm
; Copyright (C) 1999 Intel Corporation)
;
; More details at http://skal.planet-d.net/coding/dct.html
;
; FFmpeg is free software; you can redistribute it and/or
; modify it under the terms of the GNU Lesser General Public
; License as published by the Free Software Foundation; either
; version 2.1 of the License, or (at your option) any later version.
;
; FFmpeg is distributed in the hope that it will be useful,
; but WITHOUT ANY WARRANTY; without even the implied warranty of
; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
; Lesser General Public License for more details.
;
; You should have received a copy of the GNU Lesser General Public License
; along with FFmpeg; if not, write to the Free Software Foundation,
; Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
%include "libavutil/x86/x86util.asm"
SECTION_RODATA
tan1: times 8 dw 13036
tan2: times 8 dw 27146
tan3: times 8 dw 43790
sqrt2: times 8 dw 23170
iTab1: dw 0x4000, 0x539f, 0xc000, 0xac61, 0x4000, 0xdd5d, 0x4000, 0xdd5d
dw 0x4000, 0x22a3, 0x4000, 0x22a3, 0xc000, 0x539f, 0x4000, 0xac61
dw 0x3249, 0x11a8, 0x4b42, 0xee58, 0x11a8, 0x4b42, 0x11a8, 0xcdb7
dw 0x58c5, 0x4b42, 0xa73b, 0xcdb7, 0x3249, 0xa73b, 0x4b42, 0xa73b
iTab2: dw 0x58c5, 0x73fc, 0xa73b, 0x8c04, 0x58c5, 0xcff5, 0x58c5, 0xcff5
dw 0x58c5, 0x300b, 0x58c5, 0x300b, 0xa73b, 0x73fc, 0x58c5, 0x8c04
dw 0x45bf, 0x187e, 0x6862, 0xe782, 0x187e, 0x6862, 0x187e, 0xba41
dw 0x7b21, 0x6862, 0x84df, 0xba41, 0x45bf, 0x84df, 0x6862, 0x84df
iTab3: dw 0x539f, 0x6d41, 0xac61, 0x92bf, 0x539f, 0xd2bf, 0x539f, 0xd2bf
dw 0x539f, 0x2d41, 0x539f, 0x2d41, 0xac61, 0x6d41, 0x539f, 0x92bf
dw 0x41b3, 0x1712, 0x6254, 0xe8ee, 0x1712, 0x6254, 0x1712, 0xbe4d
dw 0x73fc, 0x6254, 0x8c04, 0xbe4d, 0x41b3, 0x8c04, 0x6254, 0x8c04
iTab4: dw 0x4b42, 0x6254, 0xb4be, 0x9dac, 0x4b42, 0xd746, 0x4b42, 0xd746
dw 0x4b42, 0x28ba, 0x4b42, 0x28ba, 0xb4be, 0x6254, 0x4b42, 0x9dac
dw 0x3b21, 0x14c3, 0x587e, 0xeb3d, 0x14c3, 0x587e, 0x14c3, 0xc4df
dw 0x6862, 0x587e, 0x979e, 0xc4df, 0x3b21, 0x979e, 0x587e, 0x979e
walkenIdctRounders: times 4 dd 65536
times 4 dd 3597
times 4 dd 2260
times 4 dd 1203
times 4 dd 120
times 4 dd 512
pb_127: times 8 db 127
SECTION .text
; Temporary storage before the column pass
%define ROW1 xmm6
%define ROW3 xmm4
%define ROW5 xmm5
%define ROW7 xmm7
%macro CLEAR_ODD 1
pxor %1, %1
%endmacro
%macro PUT_ODD 1
pshufhw %1, xmm2, 0x1B
%endmacro
%macro MOV32 2
%if ARCH_X86_32
movdqa %2, %1
%endif
%endmacro
%macro CLEAR_EVEN 1
%if ARCH_X86_64
CLEAR_ODD %1
%endif
%endmacro
%macro PUT_EVEN 1
%if ARCH_X86_64
PUT_ODD %1
%else
pshufhw xmm2, xmm2, 0x1B
movdqa %1, xmm2
%endif
%endmacro
%if ARCH_X86_64
%define ROW0 xmm8
%define REG0 ROW0
%define ROW2 xmm9
%define REG2 ROW2
%define ROW4 xmm10
%define REG4 ROW4
%define ROW6 xmm11
%define REG6 ROW6
%define XMMS xmm12
%define SREG2 REG2
%define TAN3 xmm13
%define TAN1 xmm14
%else
%define ROW0 [r0 + 0*16]
%define REG0 xmm4
%define ROW2 [r0 + 2*16]
%define REG2 xmm4
%define ROW4 [r0 + 4*16]
%define REG4 xmm6
%define ROW6 [r0 + 6*16]
%define REG6 xmm6
%define XMMS xmm2
%define SREG2 xmm7
%define TAN3 xmm0
%define TAN1 xmm2
%endif
%macro JZ 2
test %1, %1
jz .%2
%endmacro
%macro JNZ 2
test %1, %1
jnz .%2
%endmacro
%macro TEST_ONE_ROW 4 ; src, reg, clear, arg
%3 %4
movq mm1, [%1]
por mm1, [%1 + 8]
paddusb mm1, mm0
pmovmskb %2, mm1
%endmacro
;row1, row2, reg1, reg2, clear1, arg1, clear2, arg2
%macro TEST_TWO_ROWS 8
%5 %6
%7 %8
movq mm1, [%1 + 0]
por mm1, [%1 + 8]
movq mm2, [%2 + 0]
por mm2, [%2 + 8]
paddusb mm1, mm0
paddusb mm2, mm0
pmovmskb %3, mm1
pmovmskb %4, mm2
%endmacro
; IDCT pass on rows.
%macro iMTX_MULT 4-5 ; src, table, put, arg, rounder
movdqa xmm3, [%1]
movdqa xmm0, xmm3
pshufd xmm1, xmm3, 0x11 ; 4602
punpcklqdq xmm0, xmm0 ; 0246
pmaddwd xmm0, [%2]
pmaddwd xmm1, [%2+16]
pshufd xmm2, xmm3, 0xBB ; 5713
punpckhqdq xmm3, xmm3 ; 1357
pmaddwd xmm2, [%2+32]
pmaddwd xmm3, [%2+48]
paddd xmm0, xmm1
paddd xmm2, xmm3
%if %0 == 5
paddd xmm0, [walkenIdctRounders+%5]
%endif
movdqa xmm3, xmm2
paddd xmm2, xmm0
psubd xmm0, xmm3
psrad xmm2, 11
psrad xmm0, 11
packssdw xmm2, xmm0
%3 %4
%endmacro
%macro iLLM_HEAD 0
movdqa TAN3, [tan3]
movdqa TAN1, [tan1]
%endmacro
; IDCT pass on columns.
%macro iLLM_PASS 1 ;dct
movdqa xmm1, TAN3
movdqa xmm3, TAN1
pmulhw TAN3, xmm4
pmulhw xmm1, xmm5
paddsw TAN3, xmm4
paddsw xmm1, xmm5
psubsw TAN3, xmm5
paddsw xmm1, xmm4
pmulhw xmm3, xmm7
pmulhw TAN1, xmm6
paddsw xmm3, xmm6
psubsw TAN1, xmm7
movdqa xmm7, xmm3
movdqa xmm6, TAN1
psubsw xmm3, xmm1
psubsw TAN1, TAN3
paddsw xmm1, xmm7
paddsw TAN3, xmm6
movdqa xmm6, xmm3
psubsw xmm3, TAN3
paddsw TAN3, xmm6
movdqa xmm4, [sqrt2]
pmulhw xmm3, xmm4
pmulhw TAN3, xmm4
paddsw TAN3, TAN3
paddsw xmm3, xmm3
movdqa xmm7, [tan2]
MOV32 ROW2, REG2
MOV32 ROW6, REG6
movdqa xmm5, xmm7
pmulhw xmm7, REG6
pmulhw xmm5, REG2
paddsw xmm7, REG2
psubsw xmm5, REG6
MOV32 ROW0, REG0
MOV32 ROW4, REG4
MOV32 TAN1, [r0]
movdqa XMMS, REG0
psubsw REG0, REG4
paddsw REG4, XMMS
movdqa XMMS, REG4
psubsw REG4, xmm7
paddsw xmm7, XMMS
movdqa XMMS, REG0
psubsw REG0, xmm5
paddsw xmm5, XMMS
movdqa XMMS, xmm5
psubsw xmm5, TAN3
paddsw TAN3, XMMS
movdqa XMMS, REG0
psubsw REG0, xmm3
paddsw xmm3, XMMS
MOV32 [r0], TAN1
psraw xmm5, 6
psraw REG0, 6
psraw TAN3, 6
psraw xmm3, 6
movdqa [%1+1*16], TAN3
movdqa [%1+2*16], xmm3
movdqa [%1+5*16], REG0
movdqa [%1+6*16], xmm5
movdqa xmm0, xmm7
movdqa xmm4, REG4
psubsw xmm7, xmm1
psubsw REG4, TAN1
paddsw xmm1, xmm0
paddsw TAN1, xmm4
psraw xmm1, 6
psraw xmm7, 6
psraw TAN1, 6
psraw REG4, 6
movdqa [%1+0*16], xmm1
movdqa [%1+3*16], TAN1
movdqa [%1+4*16], REG4
movdqa [%1+7*16], xmm7
%endmacro
; IDCT pass on columns, assuming rows 4-7 are zero
%macro iLLM_PASS_SPARSE 1 ;dct
pmulhw TAN3, xmm4
paddsw TAN3, xmm4
movdqa xmm3, xmm6
pmulhw TAN1, xmm6
movdqa xmm1, xmm4
psubsw xmm3, xmm1
paddsw xmm1, xmm6
movdqa xmm6, TAN1
psubsw TAN1, TAN3
paddsw TAN3, xmm6
movdqa xmm6, xmm3
psubsw xmm3, TAN3
paddsw TAN3, xmm6
movdqa xmm4, [sqrt2]
pmulhw xmm3, xmm4
pmulhw TAN3, xmm4
paddsw TAN3, TAN3
paddsw xmm3, xmm3
movdqa xmm5, [tan2]
MOV32 ROW2, SREG2
pmulhw xmm5, SREG2
MOV32 ROW0, REG0
movdqa xmm6, REG0
psubsw xmm6, SREG2
paddsw SREG2, REG0
MOV32 TAN1, [r0]
movdqa XMMS, REG0
psubsw REG0, xmm5
paddsw xmm5, XMMS
movdqa XMMS, xmm5
psubsw xmm5, TAN3
paddsw TAN3, XMMS
movdqa XMMS, REG0
psubsw REG0, xmm3
paddsw xmm3, XMMS
MOV32 [r0], TAN1
psraw xmm5, 6
psraw REG0, 6
psraw TAN3, 6
psraw xmm3, 6
movdqa [%1+1*16], TAN3
movdqa [%1+2*16], xmm3
movdqa [%1+5*16], REG0
movdqa [%1+6*16], xmm5
movdqa xmm0, SREG2
movdqa xmm4, xmm6
psubsw SREG2, xmm1
psubsw xmm6, TAN1
paddsw xmm1, xmm0
paddsw TAN1, xmm4
psraw xmm1, 6
psraw SREG2, 6
psraw TAN1, 6
psraw xmm6, 6
movdqa [%1+0*16], xmm1
movdqa [%1+3*16], TAN1
movdqa [%1+4*16], xmm6
movdqa [%1+7*16], SREG2
%endmacro
INIT_XMM sse2
cglobal xvid_idct, 1, 5, 8+7*ARCH_X86_64, block
movq mm0, [pb_127]
iMTX_MULT r0 + 0*16, iTab1, PUT_EVEN, ROW0, 0*16
iMTX_MULT r0 + 1*16, iTab2, PUT_ODD, ROW1, 1*16
iMTX_MULT r0 + 2*16, iTab3, PUT_EVEN, ROW2, 2*16
TEST_TWO_ROWS r0 + 3*16, r0 + 4*16, r1d, r2d, CLEAR_ODD, ROW3, CLEAR_EVEN, ROW4 ; a, c
JZ r1d, col1
iMTX_MULT r0 + 3*16, iTab4, PUT_ODD, ROW3, 3*16
.col1:
TEST_TWO_ROWS r0 + 5*16, r0 + 6*16, r1d, r3d, CLEAR_ODD, ROW5, CLEAR_EVEN, ROW6 ; a, d
TEST_ONE_ROW r0 + 7*16, r4d, CLEAR_ODD, ROW7 ; esi
iLLM_HEAD
JNZ r2d, 2
JNZ r1d, 3
JNZ r3d, 4
JNZ r4d, 5
iLLM_PASS_SPARSE r0
jmp .6
.2:
iMTX_MULT r0 + 4*16, iTab1, PUT_EVEN, ROW4
.3:
iMTX_MULT r0 + 5*16, iTab4, PUT_ODD, ROW5, 4*16
JZ r3d, col2
.4:
iMTX_MULT r0 + 6*16, iTab3, PUT_EVEN, ROW6, 5*16
.col2:
JZ r4d, col3
.5:
iMTX_MULT r0 + 7*16, iTab2, PUT_ODD, ROW7, 5*16
.col3:
%if ARCH_X86_32
iLLM_HEAD
%endif
iLLM_PASS r0
.6:
RET

@ -26,6 +26,18 @@
#include "idctdsp.h"
#include "xvididct.h"
static void xvid_idct_sse2_put(uint8_t *dest, int line_size, short *block)
{
ff_xvid_idct_sse2(block);
ff_put_pixels_clamped(block, dest, line_size);
}
static void xvid_idct_sse2_add(uint8_t *dest, int line_size, short *block)
{
ff_xvid_idct_sse2(block);
ff_add_pixels_clamped(block, dest, line_size);
}
av_cold void ff_xvid_idct_init_x86(IDCTDSPContext *c, AVCodecContext *avctx,
unsigned high_bit_depth)
{
@ -50,9 +62,9 @@ av_cold void ff_xvid_idct_init_x86(IDCTDSPContext *c, AVCodecContext *avctx,
c->perm_type = FF_IDCT_PERM_NONE;
}
if (INLINE_SSE2(cpu_flags)) {
c->idct_put = ff_xvid_idct_sse2_put;
c->idct_add = ff_xvid_idct_sse2_add;
if (EXTERNAL_SSE2(cpu_flags)) {
c->idct_put = xvid_idct_sse2_put;
c->idct_add = xvid_idct_sse2_add;
c->idct = ff_xvid_idct_sse2;
c->perm_type = FF_IDCT_PERM_SSE2;
}

@ -1,406 +0,0 @@
/*
* XVID MPEG-4 VIDEO CODEC
* - SSE2 inverse discrete cosine transform -
*
* Copyright(C) 2003 Pascal Massimino <skal@planet-d.net>
*
* Conversion to gcc syntax with modifications
* by Alexander Strange <astrange@ithinksw.com>
*
* Originally from dct/x86_asm/fdct_sse2_skal.asm in Xvid.
*
* This file is part of FFmpeg.
*
* Vertical pass is an implementation of the scheme:
* Loeffler C., Ligtenberg A., and Moschytz C.S.:
* Practical Fast 1D DCT Algorithm with Eleven Multiplications,
* Proc. ICASSP 1989, 988-991.
*
* Horizontal pass is a double 4x4 vector/matrix multiplication,
* (see also Intel's Application Note 922:
* http://developer.intel.com/vtune/cbts/strmsimd/922down.htm
* Copyright (C) 1999 Intel Corporation)
*
* More details at http://skal.planet-d.net/coding/dct.html
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with FFmpeg; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/mem.h"
#include "libavutil/x86/asm.h"
#include "libavcodec/idctdsp.h"
#include "idctdsp.h"
#include "xvididct.h"
#if HAVE_SSE2_INLINE
/**
* @file
* @brief SSE2 IDCT compatible with the Xvid IDCT
*/
#define X8(x) x, x, x, x, x, x, x, x
DECLARE_ASM_CONST(16, int16_t, tan1)[] = { X8(13036) }; // tan( pi/16)
DECLARE_ASM_CONST(16, int16_t, tan2)[] = { X8(27146) }; // tan(2pi/16) = sqrt(2)-1
DECLARE_ASM_CONST(16, int16_t, tan3)[] = { X8(43790) }; // tan(3pi/16)-1
DECLARE_ASM_CONST(16, int16_t, sqrt2)[] = { X8(23170) }; // 0.5/sqrt(2)
DECLARE_ASM_CONST(8, uint8_t, m127)[] = { X8(127) };
DECLARE_ASM_CONST(16, int16_t, iTab1)[] = {
0x4000, 0x539f, 0xc000, 0xac61, 0x4000, 0xdd5d, 0x4000, 0xdd5d,
0x4000, 0x22a3, 0x4000, 0x22a3, 0xc000, 0x539f, 0x4000, 0xac61,
0x3249, 0x11a8, 0x4b42, 0xee58, 0x11a8, 0x4b42, 0x11a8, 0xcdb7,
0x58c5, 0x4b42, 0xa73b, 0xcdb7, 0x3249, 0xa73b, 0x4b42, 0xa73b
};
DECLARE_ASM_CONST(16, int16_t, iTab2)[] = {
0x58c5, 0x73fc, 0xa73b, 0x8c04, 0x58c5, 0xcff5, 0x58c5, 0xcff5,
0x58c5, 0x300b, 0x58c5, 0x300b, 0xa73b, 0x73fc, 0x58c5, 0x8c04,
0x45bf, 0x187e, 0x6862, 0xe782, 0x187e, 0x6862, 0x187e, 0xba41,
0x7b21, 0x6862, 0x84df, 0xba41, 0x45bf, 0x84df, 0x6862, 0x84df
};
DECLARE_ASM_CONST(16, int16_t, iTab3)[] = {
0x539f, 0x6d41, 0xac61, 0x92bf, 0x539f, 0xd2bf, 0x539f, 0xd2bf,
0x539f, 0x2d41, 0x539f, 0x2d41, 0xac61, 0x6d41, 0x539f, 0x92bf,
0x41b3, 0x1712, 0x6254, 0xe8ee, 0x1712, 0x6254, 0x1712, 0xbe4d,
0x73fc, 0x6254, 0x8c04, 0xbe4d, 0x41b3, 0x8c04, 0x6254, 0x8c04
};
DECLARE_ASM_CONST(16, int16_t, iTab4)[] = {
0x4b42, 0x6254, 0xb4be, 0x9dac, 0x4b42, 0xd746, 0x4b42, 0xd746,
0x4b42, 0x28ba, 0x4b42, 0x28ba, 0xb4be, 0x6254, 0x4b42, 0x9dac,
0x3b21, 0x14c3, 0x587e, 0xeb3d, 0x14c3, 0x587e, 0x14c3, 0xc4df,
0x6862, 0x587e, 0x979e, 0xc4df, 0x3b21, 0x979e, 0x587e, 0x979e
};
DECLARE_ASM_CONST(16, int32_t, walkenIdctRounders)[] = {
65536, 65536, 65536, 65536,
3597, 3597, 3597, 3597,
2260, 2260, 2260, 2260,
1203, 1203, 1203, 1203,
120, 120, 120, 120,
512, 512, 512, 512
};
// Temporary storage before the column pass
#define ROW1 "%%xmm6"
#define ROW3 "%%xmm4"
#define ROW5 "%%xmm5"
#define ROW7 "%%xmm7"
#define CLEAR_ODD(r) "pxor "r","r" \n\t"
#define PUT_ODD(dst) "pshufhw $0x1B, %%xmm2, "dst" \n\t"
#if ARCH_X86_64
# define ROW0 "%%xmm8"
# define REG0 ROW0
# define ROW2 "%%xmm9"
# define REG2 ROW2
# define ROW4 "%%xmm10"
# define REG4 ROW4
# define ROW6 "%%xmm11"
# define REG6 ROW6
# define CLEAR_EVEN(r) CLEAR_ODD(r)
# define PUT_EVEN(dst) PUT_ODD(dst)
# define XMMS "%%xmm12"
# define MOV_32_ONLY "#"
# define SREG2 REG2
# define TAN3 "%%xmm13"
# define TAN1 "%%xmm14"
#else
# define ROW0 "(%0)"
# define REG0 "%%xmm4"
# define ROW2 "2*16(%0)"
# define REG2 "%%xmm4"
# define ROW4 "4*16(%0)"
# define REG4 "%%xmm6"
# define ROW6 "6*16(%0)"
# define REG6 "%%xmm6"
# define CLEAR_EVEN(r)
# define PUT_EVEN(dst) \
"pshufhw $0x1B, %%xmm2, %%xmm2 \n\t" \
"movdqa %%xmm2, "dst" \n\t"
# define XMMS "%%xmm2"
# define MOV_32_ONLY "movdqa "
# define SREG2 "%%xmm7"
# define TAN3 "%%xmm0"
# define TAN1 "%%xmm2"
#endif
#define ROUND(x) "paddd "x
#define JZ(reg, to) \
"testl "reg","reg" \n\t" \
"jz "to" \n\t"
#define JNZ(reg, to) \
"testl "reg","reg" \n\t" \
"jnz "to" \n\t"
#define TEST_ONE_ROW(src, reg, clear) \
clear \
"movq "src", %%mm1 \n\t" \
"por 8+"src", %%mm1 \n\t" \
"paddusb %%mm0, %%mm1 \n\t" \
"pmovmskb %%mm1, "reg" \n\t"
#define TEST_TWO_ROWS(row1, row2, reg1, reg2, clear1, clear2) \
clear1 \
clear2 \
"movq "row1", %%mm1 \n\t" \
"por 8+"row1", %%mm1 \n\t" \
"movq "row2", %%mm2 \n\t" \
"por 8+"row2", %%mm2 \n\t" \
"paddusb %%mm0, %%mm1 \n\t" \
"paddusb %%mm0, %%mm2 \n\t" \
"pmovmskb %%mm1, "reg1" \n\t" \
"pmovmskb %%mm2, "reg2" \n\t"
/// IDCT pass on rows.
#define iMTX_MULT(src, table, rounder, put) \
"movdqa "src", %%xmm3 \n\t" \
"movdqa %%xmm3, %%xmm0 \n\t" \
"pshufd $0x11, %%xmm3, %%xmm1 \n\t" /* 4602 */ \
"punpcklqdq %%xmm0, %%xmm0 \n\t" /* 0246 */ \
"pmaddwd "table", %%xmm0 \n\t" \
"pmaddwd 16+"table", %%xmm1 \n\t" \
"pshufd $0xBB, %%xmm3, %%xmm2 \n\t" /* 5713 */ \
"punpckhqdq %%xmm3, %%xmm3 \n\t" /* 1357 */ \
"pmaddwd 32+"table", %%xmm2 \n\t" \
"pmaddwd 48+"table", %%xmm3 \n\t" \
"paddd %%xmm1, %%xmm0 \n\t" \
"paddd %%xmm3, %%xmm2 \n\t" \
rounder", %%xmm0 \n\t" \
"movdqa %%xmm2, %%xmm3 \n\t" \
"paddd %%xmm0, %%xmm2 \n\t" \
"psubd %%xmm3, %%xmm0 \n\t" \
"psrad $11, %%xmm2 \n\t" \
"psrad $11, %%xmm0 \n\t" \
"packssdw %%xmm0, %%xmm2 \n\t" \
put \
"1: \n\t"
#define iLLM_HEAD \
"movdqa "MANGLE(tan3)", "TAN3" \n\t" \
"movdqa "MANGLE(tan1)", "TAN1" \n\t" \
/// IDCT pass on columns.
#define iLLM_PASS(dct) \
"movdqa "TAN3", %%xmm1 \n\t" \
"movdqa "TAN1", %%xmm3 \n\t" \
"pmulhw %%xmm4, "TAN3" \n\t" \
"pmulhw %%xmm5, %%xmm1 \n\t" \
"paddsw %%xmm4, "TAN3" \n\t" \
"paddsw %%xmm5, %%xmm1 \n\t" \
"psubsw %%xmm5, "TAN3" \n\t" \
"paddsw %%xmm4, %%xmm1 \n\t" \
"pmulhw %%xmm7, %%xmm3 \n\t" \
"pmulhw %%xmm6, "TAN1" \n\t" \
"paddsw %%xmm6, %%xmm3 \n\t" \
"psubsw %%xmm7, "TAN1" \n\t" \
"movdqa %%xmm3, %%xmm7 \n\t" \
"movdqa "TAN1", %%xmm6 \n\t" \
"psubsw %%xmm1, %%xmm3 \n\t" \
"psubsw "TAN3", "TAN1" \n\t" \
"paddsw %%xmm7, %%xmm1 \n\t" \
"paddsw %%xmm6, "TAN3" \n\t" \
"movdqa %%xmm3, %%xmm6 \n\t" \
"psubsw "TAN3", %%xmm3 \n\t" \
"paddsw %%xmm6, "TAN3" \n\t" \
"movdqa "MANGLE(sqrt2)", %%xmm4 \n\t" \
"pmulhw %%xmm4, %%xmm3 \n\t" \
"pmulhw %%xmm4, "TAN3" \n\t" \
"paddsw "TAN3", "TAN3" \n\t" \
"paddsw %%xmm3, %%xmm3 \n\t" \
"movdqa "MANGLE(tan2)", %%xmm7 \n\t" \
MOV_32_ONLY ROW2", "REG2" \n\t" \
MOV_32_ONLY ROW6", "REG6" \n\t" \
"movdqa %%xmm7, %%xmm5 \n\t" \
"pmulhw "REG6", %%xmm7 \n\t" \
"pmulhw "REG2", %%xmm5 \n\t" \
"paddsw "REG2", %%xmm7 \n\t" \
"psubsw "REG6", %%xmm5 \n\t" \
MOV_32_ONLY ROW0", "REG0" \n\t" \
MOV_32_ONLY ROW4", "REG4" \n\t" \
MOV_32_ONLY" "TAN1", (%0) \n\t" \
"movdqa "REG0", "XMMS" \n\t" \
"psubsw "REG4", "REG0" \n\t" \
"paddsw "XMMS", "REG4" \n\t" \
"movdqa "REG4", "XMMS" \n\t" \
"psubsw %%xmm7, "REG4" \n\t" \
"paddsw "XMMS", %%xmm7 \n\t" \
"movdqa "REG0", "XMMS" \n\t" \
"psubsw %%xmm5, "REG0" \n\t" \
"paddsw "XMMS", %%xmm5 \n\t" \
"movdqa %%xmm5, "XMMS" \n\t" \
"psubsw "TAN3", %%xmm5 \n\t" \
"paddsw "XMMS", "TAN3" \n\t" \
"movdqa "REG0", "XMMS" \n\t" \
"psubsw %%xmm3, "REG0" \n\t" \
"paddsw "XMMS", %%xmm3 \n\t" \
MOV_32_ONLY" (%0), "TAN1" \n\t" \
"psraw $6, %%xmm5 \n\t" \
"psraw $6, "REG0" \n\t" \
"psraw $6, "TAN3" \n\t" \
"psraw $6, %%xmm3 \n\t" \
"movdqa "TAN3", 1*16("dct") \n\t" \
"movdqa %%xmm3, 2*16("dct") \n\t" \
"movdqa "REG0", 5*16("dct") \n\t" \
"movdqa %%xmm5, 6*16("dct") \n\t" \
"movdqa %%xmm7, %%xmm0 \n\t" \
"movdqa "REG4", %%xmm4 \n\t" \
"psubsw %%xmm1, %%xmm7 \n\t" \
"psubsw "TAN1", "REG4" \n\t" \
"paddsw %%xmm0, %%xmm1 \n\t" \
"paddsw %%xmm4, "TAN1" \n\t" \
"psraw $6, %%xmm1 \n\t" \
"psraw $6, %%xmm7 \n\t" \
"psraw $6, "TAN1" \n\t" \
"psraw $6, "REG4" \n\t" \
"movdqa %%xmm1, ("dct") \n\t" \
"movdqa "TAN1", 3*16("dct") \n\t" \
"movdqa "REG4", 4*16("dct") \n\t" \
"movdqa %%xmm7, 7*16("dct") \n\t"
/// IDCT pass on columns, assuming rows 4-7 are zero.
#define iLLM_PASS_SPARSE(dct) \
"pmulhw %%xmm4, "TAN3" \n\t" \
"paddsw %%xmm4, "TAN3" \n\t" \
"movdqa %%xmm6, %%xmm3 \n\t" \
"pmulhw %%xmm6, "TAN1" \n\t" \
"movdqa %%xmm4, %%xmm1 \n\t" \
"psubsw %%xmm1, %%xmm3 \n\t" \
"paddsw %%xmm6, %%xmm1 \n\t" \
"movdqa "TAN1", %%xmm6 \n\t" \
"psubsw "TAN3", "TAN1" \n\t" \
"paddsw %%xmm6, "TAN3" \n\t" \
"movdqa %%xmm3, %%xmm6 \n\t" \
"psubsw "TAN3", %%xmm3 \n\t" \
"paddsw %%xmm6, "TAN3" \n\t" \
"movdqa "MANGLE(sqrt2)", %%xmm4 \n\t" \
"pmulhw %%xmm4, %%xmm3 \n\t" \
"pmulhw %%xmm4, "TAN3" \n\t" \
"paddsw "TAN3", "TAN3" \n\t" \
"paddsw %%xmm3, %%xmm3 \n\t" \
"movdqa "MANGLE(tan2)", %%xmm5 \n\t" \
MOV_32_ONLY ROW2", "SREG2" \n\t" \
"pmulhw "SREG2", %%xmm5 \n\t" \
MOV_32_ONLY ROW0", "REG0" \n\t" \
"movdqa "REG0", %%xmm6 \n\t" \
"psubsw "SREG2", %%xmm6 \n\t" \
"paddsw "REG0", "SREG2" \n\t" \
MOV_32_ONLY" "TAN1", (%0) \n\t" \
"movdqa "REG0", "XMMS" \n\t" \
"psubsw %%xmm5, "REG0" \n\t" \
"paddsw "XMMS", %%xmm5 \n\t" \
"movdqa %%xmm5, "XMMS" \n\t" \
"psubsw "TAN3", %%xmm5 \n\t" \
"paddsw "XMMS", "TAN3" \n\t" \
"movdqa "REG0", "XMMS" \n\t" \
"psubsw %%xmm3, "REG0" \n\t" \
"paddsw "XMMS", %%xmm3 \n\t" \
MOV_32_ONLY" (%0), "TAN1" \n\t" \
"psraw $6, %%xmm5 \n\t" \
"psraw $6, "REG0" \n\t" \
"psraw $6, "TAN3" \n\t" \
"psraw $6, %%xmm3 \n\t" \
"movdqa "TAN3", 1*16("dct") \n\t" \
"movdqa %%xmm3, 2*16("dct") \n\t" \
"movdqa "REG0", 5*16("dct") \n\t" \
"movdqa %%xmm5, 6*16("dct") \n\t" \
"movdqa "SREG2", %%xmm0 \n\t" \
"movdqa %%xmm6, %%xmm4 \n\t" \
"psubsw %%xmm1, "SREG2" \n\t" \
"psubsw "TAN1", %%xmm6 \n\t" \
"paddsw %%xmm0, %%xmm1 \n\t" \
"paddsw %%xmm4, "TAN1" \n\t" \
"psraw $6, %%xmm1 \n\t" \
"psraw $6, "SREG2" \n\t" \
"psraw $6, "TAN1" \n\t" \
"psraw $6, %%xmm6 \n\t" \
"movdqa %%xmm1, ("dct") \n\t" \
"movdqa "TAN1", 3*16("dct") \n\t" \
"movdqa %%xmm6, 4*16("dct") \n\t" \
"movdqa "SREG2", 7*16("dct") \n\t"
av_extern_inline void ff_xvid_idct_sse2(short *block)
{
__asm__ volatile (
"movq "MANGLE (m127) ", %%mm0 \n\t"
iMTX_MULT("(%0)", MANGLE(iTab1), ROUND(MANGLE(walkenIdctRounders)), PUT_EVEN(ROW0))
iMTX_MULT("1*16(%0)", MANGLE(iTab2), ROUND("1*16+"MANGLE(walkenIdctRounders)), PUT_ODD(ROW1))
iMTX_MULT("2*16(%0)", MANGLE(iTab3), ROUND("2*16+"MANGLE(walkenIdctRounders)), PUT_EVEN(ROW2))
TEST_TWO_ROWS("3*16(%0)", "4*16(%0)", "%%eax", "%%ecx", CLEAR_ODD(ROW3), CLEAR_EVEN(ROW4))
JZ("%%eax", "1f")
iMTX_MULT("3*16(%0)", MANGLE(iTab4), ROUND("3*16+"MANGLE(walkenIdctRounders)), PUT_ODD(ROW3))
TEST_TWO_ROWS("5*16(%0)", "6*16(%0)", "%%eax", "%%edx", CLEAR_ODD(ROW5), CLEAR_EVEN(ROW6))
TEST_ONE_ROW("7*16(%0)", "%%esi", CLEAR_ODD(ROW7))
iLLM_HEAD
".p2align 4 \n\t"
JNZ("%%ecx", "2f")
JNZ("%%eax", "3f")
JNZ("%%edx", "4f")
JNZ("%%esi", "5f")
iLLM_PASS_SPARSE("%0")
"jmp 6f \n\t"
"2: \n\t"
iMTX_MULT("4*16(%0)", MANGLE(iTab1), "#", PUT_EVEN(ROW4))
"3: \n\t"
iMTX_MULT("5*16(%0)", MANGLE(iTab4), ROUND("4*16+"MANGLE(walkenIdctRounders)), PUT_ODD(ROW5))
JZ("%%edx", "1f")
"4: \n\t"
iMTX_MULT("6*16(%0)", MANGLE(iTab3), ROUND("5*16+"MANGLE(walkenIdctRounders)), PUT_EVEN(ROW6))
JZ("%%esi", "1f")
"5: \n\t"
iMTX_MULT("7*16(%0)", MANGLE(iTab2), ROUND("5*16+"MANGLE(walkenIdctRounders)), PUT_ODD(ROW7))
#if ARCH_X86_32
iLLM_HEAD
#endif
iLLM_PASS("%0")
"6: \n\t"
: "+r" (block)
: NAMED_CONSTRAINTS_ARRAY(m127,iTab1,walkenIdctRounders,iTab2,iTab3,iTab4,tan3,tan1,tan2,sqrt2)
: XMM_CLOBBERS("%xmm0", "%xmm1", "%xmm2", "%xmm3",
"%xmm4", "%xmm5", "%xmm6", "%xmm7", )
#if ARCH_X86_64
XMM_CLOBBERS("%xmm8", "%xmm9", "%xmm10", "%xmm11",
"%xmm12", "%xmm13", "%xmm14", )
#endif
"%eax", "%ecx", "%edx", "%esi", "memory");
}
void ff_xvid_idct_sse2_put(uint8_t *dest, int line_size, short *block)
{
ff_xvid_idct_sse2(block);
ff_put_pixels_clamped(block, dest, line_size);
}
void ff_xvid_idct_sse2_add(uint8_t *dest, int line_size, short *block)
{
ff_xvid_idct_sse2(block);
ff_add_pixels_clamped(block, dest, line_size);
}
#endif /* HAVE_SSE2_INLINE */
Loading…
Cancel
Save