avcodec/x86/huffyuvdsp: Remove obsolete MMX functions

The only systems which benefit from these are truely
ancient 32bit x86s as all other systems use at least the SSE2 versions
(this includes all x64 cpus (which is why this code is restricted
to x86-32)).

Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@outlook.com>
release/5.1
Andreas Rheinhardt 2 years ago
parent 6a551f1405
commit 4b6ffc2880
  1. 23
      libavcodec/x86/huffyuvdsp.asm
  2. 8
      libavcodec/x86/huffyuvdsp_init.c

@ -32,24 +32,15 @@ SECTION .text
%macro ADD_INT16 0
cglobal add_int16, 4,4,5, dst, src, mask, w, tmp
%if mmsize > 8
test srcq, mmsize-1
jnz .unaligned
test dstq, mmsize-1
jnz .unaligned
%endif
INT16_LOOP a, add
%if mmsize > 8
.unaligned:
INT16_LOOP u, add
%endif
%endmacro
%if ARCH_X86_32
INIT_MMX mmx
ADD_INT16
%endif
INIT_XMM sse2
ADD_INT16
@ -60,7 +51,7 @@ ADD_INT16
; void add_hfyu_left_pred_bgr32(uint8_t *dst, const uint8_t *src,
; intptr_t w, uint8_t *left)
%macro LEFT_BGR32 0
INIT_XMM sse2
cglobal add_hfyu_left_pred_bgr32, 4,4,3, dst, src, w, left
shl wq, 2
movd m0, [leftq]
@ -71,17 +62,12 @@ cglobal add_hfyu_left_pred_bgr32, 4,4,3, dst, src, w, left
.loop:
movu m1, [srcq+wq]
mova m2, m1
%if mmsize == 8
punpckhdq m0, m0
%endif
LSHIFT m1, 4
paddb m1, m2
%if mmsize == 16
pshufd m0, m0, q3333
mova m2, m1
LSHIFT m1, 8
paddb m1, m2
%endif
paddb m0, m1
movu [dstq+wq], m0
add wq, mmsize
@ -89,14 +75,7 @@ cglobal add_hfyu_left_pred_bgr32, 4,4,3, dst, src, w, left
movd m0, [dstq-4]
movd [leftq], m0
REP_RET
%endmacro
%if ARCH_X86_32
INIT_MMX mmx
LEFT_BGR32
%endif
INIT_XMM sse2
LEFT_BGR32
; void add_hfyu_median_prediction_mmxext(uint8_t *dst, const uint8_t *top, const uint8_t *diff, int mask, int w, int *left, int *left_top)
INIT_MMX mmxext

@ -26,12 +26,9 @@
#include "libavutil/x86/cpu.h"
#include "libavcodec/huffyuvdsp.h"
void ff_add_int16_mmx(uint16_t *dst, const uint16_t *src, unsigned mask, int w);
void ff_add_int16_sse2(uint16_t *dst, const uint16_t *src, unsigned mask, int w);
void ff_add_int16_avx2(uint16_t *dst, const uint16_t *src, unsigned mask, int w);
void ff_add_hfyu_left_pred_bgr32_mmx(uint8_t *dst, const uint8_t *src,
intptr_t w, uint8_t *left);
void ff_add_hfyu_left_pred_bgr32_sse2(uint8_t *dst, const uint8_t *src,
intptr_t w, uint8_t *left);
void ff_add_hfyu_median_pred_int16_mmxext(uint16_t *dst, const uint16_t *top, const uint16_t *diff, unsigned mask, int w, int *left, int *left_top);
@ -41,11 +38,6 @@ av_cold void ff_huffyuvdsp_init_x86(HuffYUVDSPContext *c, enum AVPixelFormat pix
int cpu_flags = av_get_cpu_flags();
const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(pix_fmt);
if (ARCH_X86_32 && EXTERNAL_MMX(cpu_flags)) {
c->add_hfyu_left_pred_bgr32 = ff_add_hfyu_left_pred_bgr32_mmx;
c->add_int16 = ff_add_int16_mmx;
}
if (EXTERNAL_MMXEXT(cpu_flags) && pix_desc && pix_desc->comp[0].depth<16) {
c->add_hfyu_median_pred_int16 = ff_add_hfyu_median_pred_int16_mmxext;
}

Loading…
Cancel
Save