dsputil: Separate h264chroma

pull/9/merge
Diego Biurrun 12 years ago
parent 293065bdb5
commit 79dad2a932
  1. 2
      configure
  2. 1
      libavcodec/Makefile
  3. 4
      libavcodec/arm/Makefile
  4. 18
      libavcodec/arm/dsputil_init_neon.c
  5. 51
      libavcodec/arm/h264chroma_init_arm.c
  6. 27
      libavcodec/cavs.c
  7. 2
      libavcodec/cavs.h
  8. 7
      libavcodec/dsputil.c
  9. 7
      libavcodec/dsputil.h
  10. 118
      libavcodec/dsputil_template.c
  11. 3
      libavcodec/h264.c
  12. 2
      libavcodec/h264.h
  13. 12
      libavcodec/h264_mb_template.c
  14. 64
      libavcodec/h264chroma.c
  15. 38
      libavcodec/h264chroma.h
  16. 142
      libavcodec/h264chroma_template.c
  17. 1
      libavcodec/ppc/Makefile
  18. 2
      libavcodec/ppc/dsputil_altivec.h
  19. 2
      libavcodec/ppc/dsputil_ppc.c
  20. 20
      libavcodec/ppc/h264_qpel.c
  21. 268
      libavcodec/ppc/h264_qpel_template.c
  22. 64
      libavcodec/ppc/h264chroma_init.c
  23. 289
      libavcodec/ppc/h264chroma_template.c
  24. 4
      libavcodec/ppc/vc1dsp_altivec.c
  25. 11
      libavcodec/rv30dsp.c
  26. 1
      libavcodec/rv34dsp.h
  27. 2
      libavcodec/sh4/Makefile
  28. 8
      libavcodec/sh4/dsputil_align.c
  29. 132
      libavcodec/sh4/h264chroma_init.c
  30. 91
      libavcodec/sh4/qpel.c
  31. 2
      libavcodec/vc1.h
  32. 24
      libavcodec/vc1dec.c
  33. 3
      libavcodec/vc1dsp.c
  34. 1
      libavcodec/vc1dsp.h
  35. 3
      libavcodec/vp56.c
  36. 2
      libavcodec/vp56.h
  37. 6
      libavcodec/vp6.c
  38. 3
      libavcodec/x86/Makefile
  39. 98
      libavcodec/x86/dsputil_mmx.c
  40. 116
      libavcodec/x86/h264chroma_init.c

2
configure vendored

@ -1638,7 +1638,7 @@ wmv3_vdpau_decoder_select="vc1_vdpau_decoder"
wmv3_vdpau_hwaccel_select="vc1_vdpau_hwaccel"
# parsers
h264_parser_select="error_resilience golomb h264dsp h264pred h264qpel mpegvideo"
h264_parser_select="error_resilience golomb h264chroma h264dsp h264pred h264qpel mpegvideo"
mpeg4video_parser_select="error_resilience mpegvideo"
mpegvideo_parser_select="error_resilience mpegvideo"
vc1_parser_select="error_resilience mpegvideo"

@ -44,6 +44,7 @@ FFT-OBJS-$(CONFIG_HARDCODED_TABLES) += cos_tables.o cos_fixed_tables.o
OBJS-$(CONFIG_FFT) += avfft.o fft_fixed.o fft_float.o \
$(FFT-OBJS-yes)
OBJS-$(CONFIG_GOLOMB) += golomb.o
OBJS-$(CONFIG_H264CHROMA) += h264chroma.o
OBJS-$(CONFIG_H264DSP) += h264dsp.o h264idct.o
OBJS-$(CONFIG_H264PRED) += h264pred.o
OBJS-$(CONFIG_H264QPEL) += h264qpel.o

@ -26,6 +26,7 @@ ARMV6-OBJS-$(CONFIG_VP8_DECODER) += arm/vp8_armv6.o \
arm/vp8dsp_init_armv6.o \
arm/vp8dsp_armv6.o
OBJS-$(CONFIG_H264CHROMA) += arm/h264chroma_init_arm.o
OBJS-$(CONFIG_H264DSP) += arm/h264dsp_init_arm.o
OBJS-$(CONFIG_H264PRED) += arm/h264pred_init_arm.o
OBJS-$(CONFIG_H264QPEL) += arm/h264qpel_init_arm.o
@ -67,9 +68,9 @@ NEON-OBJS-$(CONFIG_MDCT) += arm/mdct_neon.o \
NEON-OBJS-$(CONFIG_RDFT) += arm/rdft_neon.o \
NEON-OBJS-$(CONFIG_H264CHROMA) += arm/h264cmc_neon.o
NEON-OBJS-$(CONFIG_H264DSP) += arm/h264dsp_neon.o \
arm/h264idct_neon.o \
arm/h264cmc_neon.o \
NEON-OBJS-$(CONFIG_H264PRED) += arm/h264pred_neon.o \
@ -87,7 +88,6 @@ NEON-OBJS-$(CONFIG_MPEGVIDEO) += arm/mpegvideo_neon.o
NEON-OBJS-$(CONFIG_RV30_DECODER) += arm/rv34dsp_neon.o
NEON-OBJS-$(CONFIG_RV40_DECODER) += arm/rv34dsp_neon.o \
arm/rv40dsp_neon.o \
arm/h264cmc_neon.o \
NEON-OBJS-$(CONFIG_VORBIS_DECODER) += arm/vorbisdsp_neon.o

@ -64,14 +64,6 @@ void ff_add_pixels_clamped_neon(const int16_t *, uint8_t *, int);
void ff_put_pixels_clamped_neon(const int16_t *, uint8_t *, int);
void ff_put_signed_pixels_clamped_neon(const int16_t *, uint8_t *, int);
void ff_put_h264_chroma_mc8_neon(uint8_t *, uint8_t *, int, int, int, int);
void ff_put_h264_chroma_mc4_neon(uint8_t *, uint8_t *, int, int, int, int);
void ff_put_h264_chroma_mc2_neon(uint8_t *, uint8_t *, int, int, int, int);
void ff_avg_h264_chroma_mc8_neon(uint8_t *, uint8_t *, int, int, int, int);
void ff_avg_h264_chroma_mc4_neon(uint8_t *, uint8_t *, int, int, int, int);
void ff_avg_h264_chroma_mc2_neon(uint8_t *, uint8_t *, int, int, int, int);
void ff_vector_clipf_neon(float *dst, const float *src, float min, float max,
int len);
void ff_vector_clip_int32_neon(int32_t *dst, const int32_t *src, int32_t min,
@ -139,16 +131,6 @@ av_cold void ff_dsputil_init_neon(DSPContext *c, AVCodecContext *avctx)
c->put_pixels_clamped = ff_put_pixels_clamped_neon;
c->put_signed_pixels_clamped = ff_put_signed_pixels_clamped_neon;
if (CONFIG_H264_DECODER && !high_bit_depth) {
c->put_h264_chroma_pixels_tab[0] = ff_put_h264_chroma_mc8_neon;
c->put_h264_chroma_pixels_tab[1] = ff_put_h264_chroma_mc4_neon;
c->put_h264_chroma_pixels_tab[2] = ff_put_h264_chroma_mc2_neon;
c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_neon;
c->avg_h264_chroma_pixels_tab[1] = ff_avg_h264_chroma_mc4_neon;
c->avg_h264_chroma_pixels_tab[2] = ff_avg_h264_chroma_mc2_neon;
}
c->vector_clipf = ff_vector_clipf_neon;
c->vector_clip_int32 = ff_vector_clip_int32_neon;

@ -0,0 +1,51 @@
/*
* ARM NEON optimised H.264 chroma functions
* Copyright (c) 2008 Mans Rullgard <mans@mansr.com>
*
* This file is part of Libav.
*
* Libav is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* Libav is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with Libav; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdint.h>
#include "libavutil/attributes.h"
#include "libavutil/cpu.h"
#include "libavutil/arm/cpu.h"
#include "libavcodec/h264chroma.h"
void ff_put_h264_chroma_mc8_neon(uint8_t *, uint8_t *, int, int, int, int);
void ff_put_h264_chroma_mc4_neon(uint8_t *, uint8_t *, int, int, int, int);
void ff_put_h264_chroma_mc2_neon(uint8_t *, uint8_t *, int, int, int, int);
void ff_avg_h264_chroma_mc8_neon(uint8_t *, uint8_t *, int, int, int, int);
void ff_avg_h264_chroma_mc4_neon(uint8_t *, uint8_t *, int, int, int, int);
void ff_avg_h264_chroma_mc2_neon(uint8_t *, uint8_t *, int, int, int, int);
av_cold void ff_h264chroma_init_arm(H264ChromaContext *c, int bit_depth)
{
const int high_bit_depth = bit_depth > 8;
int cpu_flags = av_get_cpu_flags();
if (have_neon(cpu_flags) && !high_bit_depth) {
c->put_h264_chroma_pixels_tab[0] = ff_put_h264_chroma_mc8_neon;
c->put_h264_chroma_pixels_tab[1] = ff_put_h264_chroma_mc4_neon;
c->put_h264_chroma_pixels_tab[2] = ff_put_h264_chroma_mc2_neon;
c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_neon;
c->avg_h264_chroma_pixels_tab[1] = ff_avg_h264_chroma_mc4_neon;
c->avg_h264_chroma_pixels_tab[2] = ff_avg_h264_chroma_mc2_neon;
}
}

@ -28,6 +28,7 @@
#include "avcodec.h"
#include "get_bits.h"
#include "golomb.h"
#include "h264chroma.h"
#include "mathops.h"
#include "cavs.h"
@ -464,30 +465,35 @@ void ff_cavs_inter(AVSContext *h, enum cavs_mb mb_type) {
if(ff_cavs_partition_flags[mb_type] == 0){ // 16x16
mc_part_std(h, 8, 0, h->cy, h->cu, h->cv, 0, 0,
h->cdsp.put_cavs_qpel_pixels_tab[0],
h->dsp.put_h264_chroma_pixels_tab[0],
h->h264chroma.put_h264_chroma_pixels_tab[0],
h->cdsp.avg_cavs_qpel_pixels_tab[0],
h->dsp.avg_h264_chroma_pixels_tab[0],&h->mv[MV_FWD_X0]);
h->h264chroma.avg_h264_chroma_pixels_tab[0],
&h->mv[MV_FWD_X0]);
}else{
mc_part_std(h, 4, 0, h->cy, h->cu, h->cv, 0, 0,
h->cdsp.put_cavs_qpel_pixels_tab[1],
h->dsp.put_h264_chroma_pixels_tab[1],
h->h264chroma.put_h264_chroma_pixels_tab[1],
h->cdsp.avg_cavs_qpel_pixels_tab[1],
h->dsp.avg_h264_chroma_pixels_tab[1],&h->mv[MV_FWD_X0]);
h->h264chroma.avg_h264_chroma_pixels_tab[1],
&h->mv[MV_FWD_X0]);
mc_part_std(h, 4, 0, h->cy, h->cu, h->cv, 4, 0,
h->cdsp.put_cavs_qpel_pixels_tab[1],
h->dsp.put_h264_chroma_pixels_tab[1],
h->h264chroma.put_h264_chroma_pixels_tab[1],
h->cdsp.avg_cavs_qpel_pixels_tab[1],
h->dsp.avg_h264_chroma_pixels_tab[1],&h->mv[MV_FWD_X1]);
h->h264chroma.avg_h264_chroma_pixels_tab[1],
&h->mv[MV_FWD_X1]);
mc_part_std(h, 4, 0, h->cy, h->cu, h->cv, 0, 4,
h->cdsp.put_cavs_qpel_pixels_tab[1],
h->dsp.put_h264_chroma_pixels_tab[1],
h->h264chroma.put_h264_chroma_pixels_tab[1],
h->cdsp.avg_cavs_qpel_pixels_tab[1],
h->dsp.avg_h264_chroma_pixels_tab[1],&h->mv[MV_FWD_X2]);
h->h264chroma.avg_h264_chroma_pixels_tab[1],
&h->mv[MV_FWD_X2]);
mc_part_std(h, 4, 0, h->cy, h->cu, h->cv, 4, 4,
h->cdsp.put_cavs_qpel_pixels_tab[1],
h->dsp.put_h264_chroma_pixels_tab[1],
h->h264chroma.put_h264_chroma_pixels_tab[1],
h->cdsp.avg_cavs_qpel_pixels_tab[1],
h->dsp.avg_h264_chroma_pixels_tab[1],&h->mv[MV_FWD_X3]);
h->h264chroma.avg_h264_chroma_pixels_tab[1],
&h->mv[MV_FWD_X3]);
}
}
@ -720,6 +726,7 @@ av_cold int ff_cavs_init(AVCodecContext *avctx) {
AVSContext *h = avctx->priv_data;
ff_dsputil_init(&h->dsp, avctx);
ff_h264chroma_init(&h->h264chroma, 8);
ff_videodsp_init(&h->vdsp, 8);
ff_cavsdsp_init(&h->cdsp, avctx);
ff_init_scantable_permutation(h->dsp.idct_permutation,

@ -24,6 +24,7 @@
#include "cavsdsp.h"
#include "dsputil.h"
#include "h264chroma.h"
#include "get_bits.h"
#include "videodsp.h"
@ -161,6 +162,7 @@ typedef struct AVSFrame {
typedef struct AVSContext {
AVCodecContext *avctx;
DSPContext dsp;
H264ChromaContext h264chroma;
VideoDSPContext vdsp;
CAVSDSPContext cdsp;
GetBitContext gb;

@ -2719,13 +2719,6 @@ av_cold void ff_dsputil_init(DSPContext* c, AVCodecContext *avctx)
c->clear_blocks = FUNCC(clear_blocks ## dct , depth);\
c->add_pixels8 = FUNCC(add_pixels8 ## dct , depth);\
c->add_pixels4 = FUNCC(add_pixels4 ## dct , depth);\
\
c->put_h264_chroma_pixels_tab[0] = FUNCC(put_h264_chroma_mc8 , depth);\
c->put_h264_chroma_pixels_tab[1] = FUNCC(put_h264_chroma_mc4 , depth);\
c->put_h264_chroma_pixels_tab[2] = FUNCC(put_h264_chroma_mc2 , depth);\
c->avg_h264_chroma_pixels_tab[0] = FUNCC(avg_h264_chroma_mc8 , depth);\
c->avg_h264_chroma_pixels_tab[1] = FUNCC(avg_h264_chroma_mc4 , depth);\
c->avg_h264_chroma_pixels_tab[2] = FUNCC(avg_h264_chroma_mc2 , depth)
switch (avctx->bits_per_raw_sample) {
case 9:

@ -140,7 +140,6 @@ void clear_blocks_c(int16_t *blocks);
typedef void (*op_pixels_func)(uint8_t *block/*align width (8 or 16)*/, const uint8_t *pixels/*align 1*/, ptrdiff_t line_size, int h);
typedef void (*tpel_mc_func)(uint8_t *block/*align width (8 or 16)*/, const uint8_t *pixels/*align 1*/, int line_size, int w, int h);
typedef void (*qpel_mc_func)(uint8_t *dst/*align width (8 or 16)*/, uint8_t *src/*align 1*/, int stride);
typedef void (*h264_chroma_mc_func)(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int srcStride, int h, int x, int y);
typedef void (*op_fill_func)(uint8_t *block/*align width (8 or 16)*/, uint8_t value, int line_size, int h);
@ -308,12 +307,6 @@ typedef struct DSPContext {
qpel_mc_func put_no_rnd_qpel_pixels_tab[2][16];
qpel_mc_func put_mspel_pixels_tab[8];
/**
* h264 Chroma MC
*/
h264_chroma_mc_func put_h264_chroma_pixels_tab[3];
h264_chroma_mc_func avg_h264_chroma_pixels_tab[3];
me_cmp_func pix_abs[2][4];
/* huffyuv specific */

@ -463,124 +463,6 @@ PIXOP2(put, op_put)
#undef op_avg
#undef op_put
#define H264_CHROMA_MC(OPNAME, OP)\
static void FUNCC(OPNAME ## h264_chroma_mc2)(uint8_t *_dst/*align 8*/, uint8_t *_src/*align 1*/, int stride, int h, int x, int y){\
pixel *dst = (pixel*)_dst;\
pixel *src = (pixel*)_src;\
const int A=(8-x)*(8-y);\
const int B=( x)*(8-y);\
const int C=(8-x)*( y);\
const int D=( x)*( y);\
int i;\
stride /= sizeof(pixel);\
\
assert(x<8 && y<8 && x>=0 && y>=0);\
\
if(D){\
for(i=0; i<h; i++){\
OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1]));\
OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2]));\
dst+= stride;\
src+= stride;\
}\
}else{\
const int E= B+C;\
const int step= C ? stride : 1;\
for(i=0; i<h; i++){\
OP(dst[0], (A*src[0] + E*src[step+0]));\
OP(dst[1], (A*src[1] + E*src[step+1]));\
dst+= stride;\
src+= stride;\
}\
}\
}\
\
static void FUNCC(OPNAME ## h264_chroma_mc4)(uint8_t *_dst/*align 8*/, uint8_t *_src/*align 1*/, int stride, int h, int x, int y){\
pixel *dst = (pixel*)_dst;\
pixel *src = (pixel*)_src;\
const int A=(8-x)*(8-y);\
const int B=( x)*(8-y);\
const int C=(8-x)*( y);\
const int D=( x)*( y);\
int i;\
stride /= sizeof(pixel);\
\
assert(x<8 && y<8 && x>=0 && y>=0);\
\
if(D){\
for(i=0; i<h; i++){\
OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1]));\
OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2]));\
OP(dst[2], (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3]));\
OP(dst[3], (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4]));\
dst+= stride;\
src+= stride;\
}\
}else{\
const int E= B+C;\
const int step= C ? stride : 1;\
for(i=0; i<h; i++){\
OP(dst[0], (A*src[0] + E*src[step+0]));\
OP(dst[1], (A*src[1] + E*src[step+1]));\
OP(dst[2], (A*src[2] + E*src[step+2]));\
OP(dst[3], (A*src[3] + E*src[step+3]));\
dst+= stride;\
src+= stride;\
}\
}\
}\
\
static void FUNCC(OPNAME ## h264_chroma_mc8)(uint8_t *_dst/*align 8*/, uint8_t *_src/*align 1*/, int stride, int h, int x, int y){\
pixel *dst = (pixel*)_dst;\
pixel *src = (pixel*)_src;\
const int A=(8-x)*(8-y);\
const int B=( x)*(8-y);\
const int C=(8-x)*( y);\
const int D=( x)*( y);\
int i;\
stride /= sizeof(pixel);\
\
assert(x<8 && y<8 && x>=0 && y>=0);\
\
if(D){\
for(i=0; i<h; i++){\
OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1]));\
OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2]));\
OP(dst[2], (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3]));\
OP(dst[3], (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4]));\
OP(dst[4], (A*src[4] + B*src[5] + C*src[stride+4] + D*src[stride+5]));\
OP(dst[5], (A*src[5] + B*src[6] + C*src[stride+5] + D*src[stride+6]));\
OP(dst[6], (A*src[6] + B*src[7] + C*src[stride+6] + D*src[stride+7]));\
OP(dst[7], (A*src[7] + B*src[8] + C*src[stride+7] + D*src[stride+8]));\
dst+= stride;\
src+= stride;\
}\
}else{\
const int E= B+C;\
const int step= C ? stride : 1;\
for(i=0; i<h; i++){\
OP(dst[0], (A*src[0] + E*src[step+0]));\
OP(dst[1], (A*src[1] + E*src[step+1]));\
OP(dst[2], (A*src[2] + E*src[step+2]));\
OP(dst[3], (A*src[3] + E*src[step+3]));\
OP(dst[4], (A*src[4] + E*src[step+4]));\
OP(dst[5], (A*src[5] + E*src[step+5]));\
OP(dst[6], (A*src[6] + E*src[step+6]));\
OP(dst[7], (A*src[7] + E*src[step+7]));\
dst+= stride;\
src+= stride;\
}\
}\
}
#define op_avg(a, b) a = (((a)+(((b) + 32)>>6)+1)>>1)
#define op_put(a, b) a = (((b) + 32)>>6)
H264_CHROMA_MC(put_ , op_put)
H264_CHROMA_MC(avg_ , op_avg)
#undef op_avg
#undef op_put
void FUNCC(ff_put_pixels8x8)(uint8_t *dst, uint8_t *src, int stride) {
FUNCC(put_pixels8)(dst, src, stride, 8);
}

@ -34,6 +34,7 @@
#include "mpegvideo.h"
#include "h264.h"
#include "h264data.h"
#include "h264chroma.h"
#include "h264_mvpred.h"
#include "golomb.h"
#include "mathops.h"
@ -976,6 +977,7 @@ static av_cold void common_init(H264Context *h)
s->codec_id = s->avctx->codec->id;
ff_h264dsp_init(&h->h264dsp, 8, 1);
ff_h264chroma_init(&h->h264chroma, h->sps.bit_depth_chroma);
ff_h264qpel_init(&h->h264qpel, 8);
ff_h264_pred_init(&h->hpc, s->codec_id, 8, 1);
@ -2445,6 +2447,7 @@ static int h264_set_parameter_from_sps(H264Context *h)
ff_h264dsp_init(&h->h264dsp, h->sps.bit_depth_luma,
h->sps.chroma_format_idc);
ff_h264chroma_init(&h->h264chroma, h->sps.bit_depth_chroma);
ff_h264qpel_init(&h->h264qpel, h->sps.bit_depth_luma);
ff_h264_pred_init(&h->hpc, s->codec_id, h->sps.bit_depth_luma,
h->sps.chroma_format_idc);

@ -31,6 +31,7 @@
#include "libavutil/intreadwrite.h"
#include "cabac.h"
#include "mpegvideo.h"
#include "h264chroma.h"
#include "h264dsp.h"
#include "h264pred.h"
#include "h264qpel.h"
@ -254,6 +255,7 @@ typedef struct MMCO {
typedef struct H264Context {
MpegEncContext s;
H264DSPContext h264dsp;
H264ChromaContext h264chroma;
H264QpelContext h264qpel;
int pixel_shift; ///< 0 for 8-bit H264, 1 for high-bit-depth H264
int chroma_qp[2]; // QPc

@ -176,14 +176,14 @@ static av_noinline void FUNC(hl_decode_mb)(H264Context *h)
} else if (is_h264) {
if (chroma422) {
FUNC(hl_motion_422)(h, dest_y, dest_cb, dest_cr,
s->me.qpel_put, s->dsp.put_h264_chroma_pixels_tab,
s->me.qpel_avg, s->dsp.avg_h264_chroma_pixels_tab,
s->me.qpel_put, h->h264chroma.put_h264_chroma_pixels_tab,
s->me.qpel_avg, h->h264chroma.avg_h264_chroma_pixels_tab,
h->h264dsp.weight_h264_pixels_tab,
h->h264dsp.biweight_h264_pixels_tab);
} else {
FUNC(hl_motion_420)(h, dest_y, dest_cb, dest_cr,
s->me.qpel_put, s->dsp.put_h264_chroma_pixels_tab,
s->me.qpel_avg, s->dsp.avg_h264_chroma_pixels_tab,
s->me.qpel_put, h->h264chroma.put_h264_chroma_pixels_tab,
s->me.qpel_avg, h->h264chroma.avg_h264_chroma_pixels_tab,
h->h264dsp.weight_h264_pixels_tab,
h->h264dsp.biweight_h264_pixels_tab);
}
@ -360,8 +360,8 @@ static av_noinline void FUNC(hl_decode_mb_444)(H264Context *h)
linesize, 0, 1, SIMPLE, PIXEL_SHIFT);
} else {
FUNC(hl_motion_444)(h, dest[0], dest[1], dest[2],
s->me.qpel_put, s->dsp.put_h264_chroma_pixels_tab,
s->me.qpel_avg, s->dsp.avg_h264_chroma_pixels_tab,
s->me.qpel_put, h->h264chroma.put_h264_chroma_pixels_tab,
s->me.qpel_avg, h->h264chroma.avg_h264_chroma_pixels_tab,
h->h264dsp.weight_h264_pixels_tab,
h->h264dsp.biweight_h264_pixels_tab);
}

@ -0,0 +1,64 @@
/*
* This file is part of Libav.
*
* Libav is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* Libav is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with Libav; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "config.h"
#include "h264chroma.h"
#define BIT_DEPTH 8
#include "h264chroma_template.c"
#undef BIT_DEPTH
#define BIT_DEPTH 9
#include "h264chroma_template.c"
#undef BIT_DEPTH
#define BIT_DEPTH 10
#include "h264chroma_template.c"
#undef BIT_DEPTH
#define SET_CHROMA(depth) \
c->put_h264_chroma_pixels_tab[0] = put_h264_chroma_mc8_ ## depth ## _c; \
c->put_h264_chroma_pixels_tab[1] = put_h264_chroma_mc4_ ## depth ## _c; \
c->put_h264_chroma_pixels_tab[2] = put_h264_chroma_mc2_ ## depth ## _c; \
c->avg_h264_chroma_pixels_tab[0] = avg_h264_chroma_mc8_ ## depth ## _c; \
c->avg_h264_chroma_pixels_tab[1] = avg_h264_chroma_mc4_ ## depth ## _c; \
c->avg_h264_chroma_pixels_tab[2] = avg_h264_chroma_mc2_ ## depth ## _c; \
void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
{
switch (bit_depth) {
case 10:
SET_CHROMA(10);
break;
case 9:
SET_CHROMA(9);
break;
default:
SET_CHROMA(8);
break;
}
if (ARCH_ARM)
ff_h264chroma_init_arm(c, bit_depth);
if (ARCH_PPC)
ff_h264chroma_init_ppc(c, bit_depth);
if (ARCH_SH4)
ff_h264chroma_init_sh4(c, bit_depth);
if (ARCH_X86)
ff_h264chroma_init_x86(c, bit_depth);
}

@ -0,0 +1,38 @@
/*
* This file is part of Libav.
*
* Libav is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* Libav is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with Libav; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_H264CHROMA_H
#define AVCODEC_H264CHROMA_H
#include <stdint.h>
typedef void (*h264_chroma_mc_func)(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int srcStride, int h, int x, int y);
typedef struct H264ChromaContext {
h264_chroma_mc_func put_h264_chroma_pixels_tab[3];
h264_chroma_mc_func avg_h264_chroma_pixels_tab[3];
} H264ChromaContext;
void ff_h264chroma_init(H264ChromaContext *c, int bit_depth);
void ff_h264chroma_init_arm(H264ChromaContext *c, int bit_depth);
void ff_h264chroma_init_ppc(H264ChromaContext *c, int bit_depth);
void ff_h264chroma_init_sh4(H264ChromaContext *c, int bit_depth);
void ff_h264chroma_init_x86(H264ChromaContext *c, int bit_depth);
#endif /* AVCODEC_H264CHROMA_H */

@ -0,0 +1,142 @@
/*
* Copyright (c) 2000, 2001 Fabrice Bellard
* Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
*
* This file is part of Libav.
*
* Libav is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* Libav is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with Libav; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <assert.h>
#include "bit_depth_template.c"
#define H264_CHROMA_MC(OPNAME, OP)\
static void FUNCC(OPNAME ## h264_chroma_mc2)(uint8_t *_dst/*align 8*/, uint8_t *_src/*align 1*/, int stride, int h, int x, int y){\
pixel *dst = (pixel*)_dst;\
pixel *src = (pixel*)_src;\
const int A=(8-x)*(8-y);\
const int B=( x)*(8-y);\
const int C=(8-x)*( y);\
const int D=( x)*( y);\
int i;\
stride /= sizeof(pixel);\
\
assert(x<8 && y<8 && x>=0 && y>=0);\
\
if(D){\
for(i=0; i<h; i++){\
OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1]));\
OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2]));\
dst+= stride;\
src+= stride;\
}\
}else{\
const int E= B+C;\
const int step= C ? stride : 1;\
for(i=0; i<h; i++){\
OP(dst[0], (A*src[0] + E*src[step+0]));\
OP(dst[1], (A*src[1] + E*src[step+1]));\
dst+= stride;\
src+= stride;\
}\
}\
}\
\
static void FUNCC(OPNAME ## h264_chroma_mc4)(uint8_t *_dst/*align 8*/, uint8_t *_src/*align 1*/, int stride, int h, int x, int y){\
pixel *dst = (pixel*)_dst;\
pixel *src = (pixel*)_src;\
const int A=(8-x)*(8-y);\
const int B=( x)*(8-y);\
const int C=(8-x)*( y);\
const int D=( x)*( y);\
int i;\
stride /= sizeof(pixel);\
\
assert(x<8 && y<8 && x>=0 && y>=0);\
\
if(D){\
for(i=0; i<h; i++){\
OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1]));\
OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2]));\
OP(dst[2], (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3]));\
OP(dst[3], (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4]));\
dst+= stride;\
src+= stride;\
}\
}else{\
const int E= B+C;\
const int step= C ? stride : 1;\
for(i=0; i<h; i++){\
OP(dst[0], (A*src[0] + E*src[step+0]));\
OP(dst[1], (A*src[1] + E*src[step+1]));\
OP(dst[2], (A*src[2] + E*src[step+2]));\
OP(dst[3], (A*src[3] + E*src[step+3]));\
dst+= stride;\
src+= stride;\
}\
}\
}\
\
static void FUNCC(OPNAME ## h264_chroma_mc8)(uint8_t *_dst/*align 8*/, uint8_t *_src/*align 1*/, int stride, int h, int x, int y){\
pixel *dst = (pixel*)_dst;\
pixel *src = (pixel*)_src;\
const int A=(8-x)*(8-y);\
const int B=( x)*(8-y);\
const int C=(8-x)*( y);\
const int D=( x)*( y);\
int i;\
stride /= sizeof(pixel);\
\
assert(x<8 && y<8 && x>=0 && y>=0);\
\
if(D){\
for(i=0; i<h; i++){\
OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1]));\
OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2]));\
OP(dst[2], (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3]));\
OP(dst[3], (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4]));\
OP(dst[4], (A*src[4] + B*src[5] + C*src[stride+4] + D*src[stride+5]));\
OP(dst[5], (A*src[5] + B*src[6] + C*src[stride+5] + D*src[stride+6]));\
OP(dst[6], (A*src[6] + B*src[7] + C*src[stride+6] + D*src[stride+7]));\
OP(dst[7], (A*src[7] + B*src[8] + C*src[stride+7] + D*src[stride+8]));\
dst+= stride;\
src+= stride;\
}\
}else{\
const int E= B+C;\
const int step= C ? stride : 1;\
for(i=0; i<h; i++){\
OP(dst[0], (A*src[0] + E*src[step+0]));\
OP(dst[1], (A*src[1] + E*src[step+1]));\
OP(dst[2], (A*src[2] + E*src[step+2]));\
OP(dst[3], (A*src[3] + E*src[step+3]));\
OP(dst[4], (A*src[4] + E*src[step+4]));\
OP(dst[5], (A*src[5] + E*src[step+5]));\
OP(dst[6], (A*src[6] + E*src[step+6]));\
OP(dst[7], (A*src[7] + E*src[step+7]));\
dst+= stride;\
src+= stride;\
}\
}\
}
#define op_avg(a, b) a = (((a)+(((b) + 32)>>6)+1)>>1)
#define op_put(a, b) a = (((b) + 32)>>6)
H264_CHROMA_MC(put_ , op_put)
H264_CHROMA_MC(avg_ , op_avg)
#undef op_avg
#undef op_put

@ -1,6 +1,7 @@
OBJS += ppc/dsputil_ppc.o \
ppc/videodsp_ppc.o \
OBJS-$(CONFIG_H264CHROMA) += ppc/h264chroma_init.o
OBJS-$(CONFIG_H264QPEL) += ppc/h264_qpel.o
OBJS-$(CONFIG_VORBIS_DECODER) += ppc/vorbisdsp_altivec.o
OBJS-$(CONFIG_VP3DSP) += ppc/vp3dsp_altivec.o

@ -36,8 +36,6 @@ void ff_gmc1_altivec(uint8_t *dst, uint8_t *src, int stride, int h,
void ff_idct_put_altivec(uint8_t *dest, int line_size, int16_t *block);
void ff_idct_add_altivec(uint8_t *dest, int line_size, int16_t *block);
void ff_dsputil_h264_init_ppc(DSPContext* c, AVCodecContext *avctx);
void ff_dsputil_init_altivec(DSPContext* c, AVCodecContext *avctx);
void ff_float_init_altivec(DSPContext* c, AVCodecContext *avctx);
void ff_int_init_altivec(DSPContext* c, AVCodecContext *avctx);

@ -157,8 +157,6 @@ av_cold void ff_dsputil_init_ppc(DSPContext *c, AVCodecContext *avctx)
}
#if HAVE_ALTIVEC
if(CONFIG_H264_DECODER) ff_dsputil_h264_init_ppc(c, avctx);
if (av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC) {
ff_dsputil_init_altivec(c, avctx);
ff_int_init_altivec(c, avctx);

@ -33,8 +33,6 @@
#define AVG_OP_U8_ALTIVEC(d, s, dst) d = vec_avg(dst, s)
#define OP_U8_ALTIVEC PUT_OP_U8_ALTIVEC
#define PREFIX_h264_chroma_mc8_altivec put_h264_chroma_mc8_altivec
#define PREFIX_h264_chroma_mc8_num altivec_put_h264_chroma_mc8_num
#define PREFIX_h264_qpel16_h_lowpass_altivec put_h264_qpel16_h_lowpass_altivec
#define PREFIX_h264_qpel16_h_lowpass_num altivec_put_h264_qpel16_h_lowpass_num
#define PREFIX_h264_qpel16_v_lowpass_altivec put_h264_qpel16_v_lowpass_altivec
@ -43,8 +41,6 @@
#define PREFIX_h264_qpel16_hv_lowpass_num altivec_put_h264_qpel16_hv_lowpass_num
#include "h264_qpel_template.c"
#undef OP_U8_ALTIVEC
#undef PREFIX_h264_chroma_mc8_altivec
#undef PREFIX_h264_chroma_mc8_num
#undef PREFIX_h264_qpel16_h_lowpass_altivec
#undef PREFIX_h264_qpel16_h_lowpass_num
#undef PREFIX_h264_qpel16_v_lowpass_altivec
@ -53,8 +49,6 @@
#undef PREFIX_h264_qpel16_hv_lowpass_num
#define OP_U8_ALTIVEC AVG_OP_U8_ALTIVEC
#define PREFIX_h264_chroma_mc8_altivec avg_h264_chroma_mc8_altivec
#define PREFIX_h264_chroma_mc8_num altivec_avg_h264_chroma_mc8_num
#define PREFIX_h264_qpel16_h_lowpass_altivec avg_h264_qpel16_h_lowpass_altivec
#define PREFIX_h264_qpel16_h_lowpass_num altivec_avg_h264_qpel16_h_lowpass_num
#define PREFIX_h264_qpel16_v_lowpass_altivec avg_h264_qpel16_v_lowpass_altivec
@ -63,8 +57,6 @@
#define PREFIX_h264_qpel16_hv_lowpass_num altivec_avg_h264_qpel16_hv_lowpass_num
#include "h264_qpel_template.c"
#undef OP_U8_ALTIVEC
#undef PREFIX_h264_chroma_mc8_altivec
#undef PREFIX_h264_chroma_mc8_num
#undef PREFIX_h264_qpel16_h_lowpass_altivec
#undef PREFIX_h264_qpel16_h_lowpass_num
#undef PREFIX_h264_qpel16_v_lowpass_altivec
@ -273,18 +265,6 @@ static inline void avg_pixels16_l2_altivec( uint8_t * dst, const uint8_t * src1,
H264_MC(put_, 16, altivec)
H264_MC(avg_, 16, altivec)
void ff_dsputil_h264_init_ppc(DSPContext* c, AVCodecContext *avctx)
{
const int high_bit_depth = avctx->bits_per_raw_sample > 8;
if (av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC) {
if (!high_bit_depth) {
c->put_h264_chroma_pixels_tab[0] = put_h264_chroma_mc8_altivec;
c->avg_h264_chroma_pixels_tab[0] = avg_h264_chroma_mc8_altivec;
}
}
}
#endif /* HAVE_ALTIVEC */
av_cold void ff_h264qpel_init_ppc(H264QpelContext *c, int bit_depth)

@ -26,274 +26,6 @@
#define ASSERT_ALIGNED(ptr) ;
#endif
/* this code assume that stride % 16 == 0 */
#define CHROMA_MC8_ALTIVEC_CORE(BIAS1, BIAS2) \
vsrc2ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc2uc);\
vsrc3ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc3uc);\
\
psum = vec_mladd(vA, vsrc0ssH, BIAS1);\
psum = vec_mladd(vB, vsrc1ssH, psum);\
psum = vec_mladd(vC, vsrc2ssH, psum);\
psum = vec_mladd(vD, vsrc3ssH, psum);\
psum = BIAS2(psum);\
psum = vec_sr(psum, v6us);\
\
vdst = vec_ld(0, dst);\
ppsum = (vec_u8)vec_pack(psum, psum);\
vfdst = vec_perm(vdst, ppsum, fperm);\
\
OP_U8_ALTIVEC(fsum, vfdst, vdst);\
\
vec_st(fsum, 0, dst);\
\
vsrc0ssH = vsrc2ssH;\
vsrc1ssH = vsrc3ssH;\
\
dst += stride;\
src += stride;
#define CHROMA_MC8_ALTIVEC_CORE_SIMPLE \
\
vsrc0ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc0uc);\
vsrc1ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc1uc);\
\
psum = vec_mladd(vA, vsrc0ssH, v32ss);\
psum = vec_mladd(vE, vsrc1ssH, psum);\
psum = vec_sr(psum, v6us);\
\
vdst = vec_ld(0, dst);\
ppsum = (vec_u8)vec_pack(psum, psum);\
vfdst = vec_perm(vdst, ppsum, fperm);\
\
OP_U8_ALTIVEC(fsum, vfdst, vdst);\
\
vec_st(fsum, 0, dst);\
\
dst += stride;\
src += stride;
#define noop(a) a
#define add28(a) vec_add(v28ss, a)
#ifdef PREFIX_h264_chroma_mc8_altivec
static void PREFIX_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src,
int stride, int h, int x, int y) {
DECLARE_ALIGNED(16, signed int, ABCD)[4] =
{((8 - x) * (8 - y)),
(( x) * (8 - y)),
((8 - x) * ( y)),
(( x) * ( y))};
register int i;
vec_u8 fperm;
const vec_s32 vABCD = vec_ld(0, ABCD);
const vec_s16 vA = vec_splat((vec_s16)vABCD, 1);
const vec_s16 vB = vec_splat((vec_s16)vABCD, 3);
const vec_s16 vC = vec_splat((vec_s16)vABCD, 5);
const vec_s16 vD = vec_splat((vec_s16)vABCD, 7);
LOAD_ZERO;
const vec_s16 v32ss = vec_sl(vec_splat_s16(1),vec_splat_u16(5));
const vec_u16 v6us = vec_splat_u16(6);
register int loadSecond = (((unsigned long)src) % 16) <= 7 ? 0 : 1;
register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0;
vec_u8 vsrcAuc, av_uninit(vsrcBuc), vsrcperm0, vsrcperm1;
vec_u8 vsrc0uc, vsrc1uc;
vec_s16 vsrc0ssH, vsrc1ssH;
vec_u8 vsrcCuc, vsrc2uc, vsrc3uc;
vec_s16 vsrc2ssH, vsrc3ssH, psum;
vec_u8 vdst, ppsum, vfdst, fsum;
if (((unsigned long)dst) % 16 == 0) {
fperm = (vec_u8){0x10, 0x11, 0x12, 0x13,
0x14, 0x15, 0x16, 0x17,
0x08, 0x09, 0x0A, 0x0B,
0x0C, 0x0D, 0x0E, 0x0F};
} else {
fperm = (vec_u8){0x00, 0x01, 0x02, 0x03,
0x04, 0x05, 0x06, 0x07,
0x18, 0x19, 0x1A, 0x1B,
0x1C, 0x1D, 0x1E, 0x1F};
}
vsrcAuc = vec_ld(0, src);
if (loadSecond)
vsrcBuc = vec_ld(16, src);
vsrcperm0 = vec_lvsl(0, src);
vsrcperm1 = vec_lvsl(1, src);
vsrc0uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm0);
if (reallyBadAlign)
vsrc1uc = vsrcBuc;
else
vsrc1uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm1);
vsrc0ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc0uc);
vsrc1ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc1uc);
if (ABCD[3]) {
if (!loadSecond) {// -> !reallyBadAlign
for (i = 0 ; i < h ; i++) {
vsrcCuc = vec_ld(stride + 0, src);
vsrc2uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
vsrc3uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1);
CHROMA_MC8_ALTIVEC_CORE(v32ss, noop)
}
} else {
vec_u8 vsrcDuc;
for (i = 0 ; i < h ; i++) {
vsrcCuc = vec_ld(stride + 0, src);
vsrcDuc = vec_ld(stride + 16, src);
vsrc2uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
if (reallyBadAlign)
vsrc3uc = vsrcDuc;
else
vsrc3uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1);
CHROMA_MC8_ALTIVEC_CORE(v32ss, noop)
}
}
} else {
const vec_s16 vE = vec_add(vB, vC);
if (ABCD[2]) { // x == 0 B == 0
if (!loadSecond) {// -> !reallyBadAlign
for (i = 0 ; i < h ; i++) {
vsrcCuc = vec_ld(stride + 0, src);
vsrc1uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
CHROMA_MC8_ALTIVEC_CORE_SIMPLE
vsrc0uc = vsrc1uc;
}
} else {
vec_u8 vsrcDuc;
for (i = 0 ; i < h ; i++) {
vsrcCuc = vec_ld(stride + 0, src);
vsrcDuc = vec_ld(stride + 15, src);
vsrc1uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
CHROMA_MC8_ALTIVEC_CORE_SIMPLE
vsrc0uc = vsrc1uc;
}
}
} else { // y == 0 C == 0
if (!loadSecond) {// -> !reallyBadAlign
for (i = 0 ; i < h ; i++) {
vsrcCuc = vec_ld(0, src);
vsrc0uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
vsrc1uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1);
CHROMA_MC8_ALTIVEC_CORE_SIMPLE
}
} else {
vec_u8 vsrcDuc;
for (i = 0 ; i < h ; i++) {
vsrcCuc = vec_ld(0, src);
vsrcDuc = vec_ld(15, src);
vsrc0uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
if (reallyBadAlign)
vsrc1uc = vsrcDuc;
else
vsrc1uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1);
CHROMA_MC8_ALTIVEC_CORE_SIMPLE
}
}
}
}
}
#endif
/* this code assume that stride % 16 == 0 */
#ifdef PREFIX_no_rnd_vc1_chroma_mc8_altivec
static void PREFIX_no_rnd_vc1_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, int stride, int h, int x, int y) {
DECLARE_ALIGNED(16, signed int, ABCD)[4] =
{((8 - x) * (8 - y)),
(( x) * (8 - y)),
((8 - x) * ( y)),
(( x) * ( y))};
register int i;
vec_u8 fperm;
const vec_s32 vABCD = vec_ld(0, ABCD);
const vec_s16 vA = vec_splat((vec_s16)vABCD, 1);
const vec_s16 vB = vec_splat((vec_s16)vABCD, 3);
const vec_s16 vC = vec_splat((vec_s16)vABCD, 5);
const vec_s16 vD = vec_splat((vec_s16)vABCD, 7);
LOAD_ZERO;
const vec_s16 v28ss = vec_sub(vec_sl(vec_splat_s16(1),vec_splat_u16(5)),vec_splat_s16(4));
const vec_u16 v6us = vec_splat_u16(6);
register int loadSecond = (((unsigned long)src) % 16) <= 7 ? 0 : 1;
register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0;
vec_u8 vsrcAuc, av_uninit(vsrcBuc), vsrcperm0, vsrcperm1;
vec_u8 vsrc0uc, vsrc1uc;
vec_s16 vsrc0ssH, vsrc1ssH;
vec_u8 vsrcCuc, vsrc2uc, vsrc3uc;
vec_s16 vsrc2ssH, vsrc3ssH, psum;
vec_u8 vdst, ppsum, vfdst, fsum;
if (((unsigned long)dst) % 16 == 0) {
fperm = (vec_u8){0x10, 0x11, 0x12, 0x13,
0x14, 0x15, 0x16, 0x17,
0x08, 0x09, 0x0A, 0x0B,
0x0C, 0x0D, 0x0E, 0x0F};
} else {
fperm = (vec_u8){0x00, 0x01, 0x02, 0x03,
0x04, 0x05, 0x06, 0x07,
0x18, 0x19, 0x1A, 0x1B,
0x1C, 0x1D, 0x1E, 0x1F};
}
vsrcAuc = vec_ld(0, src);
if (loadSecond)
vsrcBuc = vec_ld(16, src);
vsrcperm0 = vec_lvsl(0, src);
vsrcperm1 = vec_lvsl(1, src);
vsrc0uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm0);
if (reallyBadAlign)
vsrc1uc = vsrcBuc;
else
vsrc1uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm1);
vsrc0ssH = (vec_s16)vec_mergeh(zero_u8v, (vec_u8)vsrc0uc);
vsrc1ssH = (vec_s16)vec_mergeh(zero_u8v, (vec_u8)vsrc1uc);
if (!loadSecond) {// -> !reallyBadAlign
for (i = 0 ; i < h ; i++) {
vsrcCuc = vec_ld(stride + 0, src);
vsrc2uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
vsrc3uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1);
CHROMA_MC8_ALTIVEC_CORE(vec_splat_s16(0), add28)
}
} else {
vec_u8 vsrcDuc;
for (i = 0 ; i < h ; i++) {
vsrcCuc = vec_ld(stride + 0, src);
vsrcDuc = vec_ld(stride + 16, src);
vsrc2uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
if (reallyBadAlign)
vsrc3uc = vsrcDuc;
else
vsrc3uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1);
CHROMA_MC8_ALTIVEC_CORE(vec_splat_s16(0), add28)
}
}
}
#endif
#undef noop
#undef add28
#undef CHROMA_MC8_ALTIVEC_CORE
/* this code assume stride % 16 == 0 */
#ifdef PREFIX_h264_qpel16_h_lowpass_altivec
static void PREFIX_h264_qpel16_h_lowpass_altivec(uint8_t * dst, uint8_t * src, int dstStride, int srcStride) {

@ -0,0 +1,64 @@
/*
* Copyright (c) 2004 Romain Dolbeau <romain@dolbeau.org>
*
* This file is part of Libav.
*
* Libav is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* Libav is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with Libav; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "config.h"
#include "libavutil/attributes.h"
#include "libavcodec/h264chroma.h"
#if HAVE_ALTIVEC
#include "libavutil/cpu.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/ppc/types_altivec.h"
#include "libavutil/ppc/util_altivec.h"
#include "dsputil_altivec.h"
#define PUT_OP_U8_ALTIVEC(d, s, dst) d = s
#define AVG_OP_U8_ALTIVEC(d, s, dst) d = vec_avg(dst, s)
#define OP_U8_ALTIVEC PUT_OP_U8_ALTIVEC
#define PREFIX_h264_chroma_mc8_altivec put_h264_chroma_mc8_altivec
#define PREFIX_h264_chroma_mc8_num altivec_put_h264_chroma_mc8_num
#include "h264chroma_template.c"
#undef OP_U8_ALTIVEC
#undef PREFIX_h264_chroma_mc8_altivec
#undef PREFIX_h264_chroma_mc8_num
#define OP_U8_ALTIVEC AVG_OP_U8_ALTIVEC
#define PREFIX_h264_chroma_mc8_altivec avg_h264_chroma_mc8_altivec
#define PREFIX_h264_chroma_mc8_num altivec_avg_h264_chroma_mc8_num
#include "h264chroma_template.c"
#undef OP_U8_ALTIVEC
#undef PREFIX_h264_chroma_mc8_altivec
#undef PREFIX_h264_chroma_mc8_num
#endif /* HAVE_ALTIVEC */
av_cold void ff_h264chroma_init_ppc(H264ChromaContext *c, int bit_depth)
{
#if HAVE_ALTIVEC
const int high_bit_depth = bit_depth > 8;
if (av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC) {
if (!high_bit_depth) {
c->put_h264_chroma_pixels_tab[0] = put_h264_chroma_mc8_altivec;
c->avg_h264_chroma_pixels_tab[0] = avg_h264_chroma_mc8_altivec;
}
}
#endif /* HAVE_ALTIVEC */
}

@ -0,0 +1,289 @@
/*
* Copyright (c) 2004 Romain Dolbeau <romain@dolbeau.org>
*
* This file is part of Libav.
*
* Libav is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* Libav is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with Libav; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/mem.h"
/* this code assume that stride % 16 == 0 */
#define CHROMA_MC8_ALTIVEC_CORE(BIAS1, BIAS2) \
vsrc2ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc2uc);\
vsrc3ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc3uc);\
\
psum = vec_mladd(vA, vsrc0ssH, BIAS1);\
psum = vec_mladd(vB, vsrc1ssH, psum);\
psum = vec_mladd(vC, vsrc2ssH, psum);\
psum = vec_mladd(vD, vsrc3ssH, psum);\
psum = BIAS2(psum);\
psum = vec_sr(psum, v6us);\
\
vdst = vec_ld(0, dst);\
ppsum = (vec_u8)vec_pack(psum, psum);\
vfdst = vec_perm(vdst, ppsum, fperm);\
\
OP_U8_ALTIVEC(fsum, vfdst, vdst);\
\
vec_st(fsum, 0, dst);\
\
vsrc0ssH = vsrc2ssH;\
vsrc1ssH = vsrc3ssH;\
\
dst += stride;\
src += stride;
#define CHROMA_MC8_ALTIVEC_CORE_SIMPLE \
\
vsrc0ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc0uc);\
vsrc1ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc1uc);\
\
psum = vec_mladd(vA, vsrc0ssH, v32ss);\
psum = vec_mladd(vE, vsrc1ssH, psum);\
psum = vec_sr(psum, v6us);\
\
vdst = vec_ld(0, dst);\
ppsum = (vec_u8)vec_pack(psum, psum);\
vfdst = vec_perm(vdst, ppsum, fperm);\
\
OP_U8_ALTIVEC(fsum, vfdst, vdst);\
\
vec_st(fsum, 0, dst);\
\
dst += stride;\
src += stride;
#define noop(a) a
#define add28(a) vec_add(v28ss, a)
#ifdef PREFIX_h264_chroma_mc8_altivec
static void PREFIX_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src,
int stride, int h, int x, int y) {
DECLARE_ALIGNED(16, signed int, ABCD)[4] =
{((8 - x) * (8 - y)),
(( x) * (8 - y)),
((8 - x) * ( y)),
(( x) * ( y))};
register int i;
vec_u8 fperm;
const vec_s32 vABCD = vec_ld(0, ABCD);
const vec_s16 vA = vec_splat((vec_s16)vABCD, 1);
const vec_s16 vB = vec_splat((vec_s16)vABCD, 3);
const vec_s16 vC = vec_splat((vec_s16)vABCD, 5);
const vec_s16 vD = vec_splat((vec_s16)vABCD, 7);
LOAD_ZERO;
const vec_s16 v32ss = vec_sl(vec_splat_s16(1),vec_splat_u16(5));
const vec_u16 v6us = vec_splat_u16(6);
register int loadSecond = (((unsigned long)src) % 16) <= 7 ? 0 : 1;
register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0;
vec_u8 vsrcAuc, av_uninit(vsrcBuc), vsrcperm0, vsrcperm1;
vec_u8 vsrc0uc, vsrc1uc;
vec_s16 vsrc0ssH, vsrc1ssH;
vec_u8 vsrcCuc, vsrc2uc, vsrc3uc;
vec_s16 vsrc2ssH, vsrc3ssH, psum;
vec_u8 vdst, ppsum, vfdst, fsum;
if (((unsigned long)dst) % 16 == 0) {
fperm = (vec_u8){0x10, 0x11, 0x12, 0x13,
0x14, 0x15, 0x16, 0x17,
0x08, 0x09, 0x0A, 0x0B,
0x0C, 0x0D, 0x0E, 0x0F};
} else {
fperm = (vec_u8){0x00, 0x01, 0x02, 0x03,
0x04, 0x05, 0x06, 0x07,
0x18, 0x19, 0x1A, 0x1B,
0x1C, 0x1D, 0x1E, 0x1F};
}
vsrcAuc = vec_ld(0, src);
if (loadSecond)
vsrcBuc = vec_ld(16, src);
vsrcperm0 = vec_lvsl(0, src);
vsrcperm1 = vec_lvsl(1, src);
vsrc0uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm0);
if (reallyBadAlign)
vsrc1uc = vsrcBuc;
else
vsrc1uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm1);
vsrc0ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc0uc);
vsrc1ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc1uc);
if (ABCD[3]) {
if (!loadSecond) {// -> !reallyBadAlign
for (i = 0 ; i < h ; i++) {
vsrcCuc = vec_ld(stride + 0, src);
vsrc2uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
vsrc3uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1);
CHROMA_MC8_ALTIVEC_CORE(v32ss, noop)
}
} else {
vec_u8 vsrcDuc;
for (i = 0 ; i < h ; i++) {
vsrcCuc = vec_ld(stride + 0, src);
vsrcDuc = vec_ld(stride + 16, src);
vsrc2uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
if (reallyBadAlign)
vsrc3uc = vsrcDuc;
else
vsrc3uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1);
CHROMA_MC8_ALTIVEC_CORE(v32ss, noop)
}
}
} else {
const vec_s16 vE = vec_add(vB, vC);
if (ABCD[2]) { // x == 0 B == 0
if (!loadSecond) {// -> !reallyBadAlign
for (i = 0 ; i < h ; i++) {
vsrcCuc = vec_ld(stride + 0, src);
vsrc1uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
CHROMA_MC8_ALTIVEC_CORE_SIMPLE
vsrc0uc = vsrc1uc;
}
} else {
vec_u8 vsrcDuc;
for (i = 0 ; i < h ; i++) {
vsrcCuc = vec_ld(stride + 0, src);
vsrcDuc = vec_ld(stride + 15, src);
vsrc1uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
CHROMA_MC8_ALTIVEC_CORE_SIMPLE
vsrc0uc = vsrc1uc;
}
}
} else { // y == 0 C == 0
if (!loadSecond) {// -> !reallyBadAlign
for (i = 0 ; i < h ; i++) {
vsrcCuc = vec_ld(0, src);
vsrc0uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
vsrc1uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1);
CHROMA_MC8_ALTIVEC_CORE_SIMPLE
}
} else {
vec_u8 vsrcDuc;
for (i = 0 ; i < h ; i++) {
vsrcCuc = vec_ld(0, src);
vsrcDuc = vec_ld(15, src);
vsrc0uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
if (reallyBadAlign)
vsrc1uc = vsrcDuc;
else
vsrc1uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1);
CHROMA_MC8_ALTIVEC_CORE_SIMPLE
}
}
}
}
}
#endif
/* this code assume that stride % 16 == 0 */
#ifdef PREFIX_no_rnd_vc1_chroma_mc8_altivec
static void PREFIX_no_rnd_vc1_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, int stride, int h, int x, int y) {
DECLARE_ALIGNED(16, signed int, ABCD)[4] =
{((8 - x) * (8 - y)),
(( x) * (8 - y)),
((8 - x) * ( y)),
(( x) * ( y))};
register int i;
vec_u8 fperm;
const vec_s32 vABCD = vec_ld(0, ABCD);
const vec_s16 vA = vec_splat((vec_s16)vABCD, 1);
const vec_s16 vB = vec_splat((vec_s16)vABCD, 3);
const vec_s16 vC = vec_splat((vec_s16)vABCD, 5);
const vec_s16 vD = vec_splat((vec_s16)vABCD, 7);
LOAD_ZERO;
const vec_s16 v28ss = vec_sub(vec_sl(vec_splat_s16(1),vec_splat_u16(5)),vec_splat_s16(4));
const vec_u16 v6us = vec_splat_u16(6);
register int loadSecond = (((unsigned long)src) % 16) <= 7 ? 0 : 1;
register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0;
vec_u8 vsrcAuc, av_uninit(vsrcBuc), vsrcperm0, vsrcperm1;
vec_u8 vsrc0uc, vsrc1uc;
vec_s16 vsrc0ssH, vsrc1ssH;
vec_u8 vsrcCuc, vsrc2uc, vsrc3uc;
vec_s16 vsrc2ssH, vsrc3ssH, psum;
vec_u8 vdst, ppsum, vfdst, fsum;
if (((unsigned long)dst) % 16 == 0) {
fperm = (vec_u8){0x10, 0x11, 0x12, 0x13,
0x14, 0x15, 0x16, 0x17,
0x08, 0x09, 0x0A, 0x0B,
0x0C, 0x0D, 0x0E, 0x0F};
} else {
fperm = (vec_u8){0x00, 0x01, 0x02, 0x03,
0x04, 0x05, 0x06, 0x07,
0x18, 0x19, 0x1A, 0x1B,
0x1C, 0x1D, 0x1E, 0x1F};
}
vsrcAuc = vec_ld(0, src);
if (loadSecond)
vsrcBuc = vec_ld(16, src);
vsrcperm0 = vec_lvsl(0, src);
vsrcperm1 = vec_lvsl(1, src);
vsrc0uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm0);
if (reallyBadAlign)
vsrc1uc = vsrcBuc;
else
vsrc1uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm1);
vsrc0ssH = (vec_s16)vec_mergeh(zero_u8v, (vec_u8)vsrc0uc);
vsrc1ssH = (vec_s16)vec_mergeh(zero_u8v, (vec_u8)vsrc1uc);
if (!loadSecond) {// -> !reallyBadAlign
for (i = 0 ; i < h ; i++) {
vsrcCuc = vec_ld(stride + 0, src);
vsrc2uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
vsrc3uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1);
CHROMA_MC8_ALTIVEC_CORE(vec_splat_s16(0), add28)
}
} else {
vec_u8 vsrcDuc;
for (i = 0 ; i < h ; i++) {
vsrcCuc = vec_ld(stride + 0, src);
vsrcDuc = vec_ld(stride + 16, src);
vsrc2uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
if (reallyBadAlign)
vsrc3uc = vsrcDuc;
else
vsrc3uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1);
CHROMA_MC8_ALTIVEC_CORE(vec_splat_s16(0), add28)
}
}
}
#endif
#undef noop
#undef add28
#undef CHROMA_MC8_ALTIVEC_CORE

@ -326,13 +326,13 @@ static void vc1_inv_trans_8x4_altivec(uint8_t *dest, int stride, int16_t *block)
#define OP_U8_ALTIVEC PUT_OP_U8_ALTIVEC
#define PREFIX_no_rnd_vc1_chroma_mc8_altivec put_no_rnd_vc1_chroma_mc8_altivec
#include "h264_qpel_template.c"
#include "h264chroma_template.c"
#undef OP_U8_ALTIVEC
#undef PREFIX_no_rnd_vc1_chroma_mc8_altivec
#define OP_U8_ALTIVEC AVG_OP_U8_ALTIVEC
#define PREFIX_no_rnd_vc1_chroma_mc8_altivec avg_no_rnd_vc1_chroma_mc8_altivec
#include "h264_qpel_template.c"
#include "h264chroma_template.c"
#undef OP_U8_ALTIVEC
#undef PREFIX_no_rnd_vc1_chroma_mc8_altivec

@ -26,6 +26,7 @@
#include "avcodec.h"
#include "dsputil.h"
#include "h264chroma.h"
#include "h264qpel.h"
#include "rv34dsp.h"
@ -254,9 +255,11 @@ RV30_MC(avg_, 8)
RV30_MC(avg_, 16)
av_cold void ff_rv30dsp_init(RV34DSPContext *c, DSPContext* dsp) {
H264ChromaContext h264chroma;
H264QpelContext qpel;
ff_rv34dsp_init(c, dsp);
ff_h264chroma_init(&h264chroma, 8);
ff_h264qpel_init(&qpel, 8);
c->put_pixels_tab[0][ 0] = qpel.put_h264_qpel_pixels_tab[0][0];
@ -296,8 +299,8 @@ av_cold void ff_rv30dsp_init(RV34DSPContext *c, DSPContext* dsp) {
c->avg_pixels_tab[1][ 9] = avg_rv30_tpel8_mc12_c;
c->avg_pixels_tab[1][10] = avg_rv30_tpel8_mc22_c;
c->put_chroma_pixels_tab[0] = dsp->put_h264_chroma_pixels_tab[0];
c->put_chroma_pixels_tab[1] = dsp->put_h264_chroma_pixels_tab[1];
c->avg_chroma_pixels_tab[0] = dsp->avg_h264_chroma_pixels_tab[0];
c->avg_chroma_pixels_tab[1] = dsp->avg_h264_chroma_pixels_tab[1];
c->put_chroma_pixels_tab[0] = h264chroma.put_h264_chroma_pixels_tab[0];
c->put_chroma_pixels_tab[1] = h264chroma.put_h264_chroma_pixels_tab[1];
c->avg_chroma_pixels_tab[0] = h264chroma.avg_h264_chroma_pixels_tab[0];
c->avg_chroma_pixels_tab[1] = h264chroma.avg_h264_chroma_pixels_tab[1];
}

@ -28,6 +28,7 @@
#define AVCODEC_RV34DSP_H
#include "dsputil.h"
#include "h264chroma.h"
typedef void (*rv40_weight_func)(uint8_t *dst/*align width (8 or 16)*/,
uint8_t *src1/*align width (8 or 16)*/,

@ -1,3 +1,5 @@
OBJS += sh4/dsputil_align.o \
sh4/dsputil_sh4.o \
sh4/idct_sh4.o \
OBJS-$(CONFIG_H264CHROMA) += sh4/h264chroma_init.o \

@ -369,14 +369,6 @@ av_cold void ff_dsputil_init_align(DSPContext *c, AVCodecContext *avctx)
/* dspfunc(avg_no_rnd_qpel, 1, 8); */
#undef dspfunc
if (!high_bit_depth) {
c->put_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_sh4;
c->put_h264_chroma_pixels_tab[1]= put_h264_chroma_mc4_sh4;
c->put_h264_chroma_pixels_tab[2]= put_h264_chroma_mc2_sh4;
c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_sh4;
c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_sh4;
c->avg_h264_chroma_pixels_tab[2]= avg_h264_chroma_mc2_sh4;
}
c->put_mspel_pixels_tab[0]= put_mspel8_mc00_sh4;
c->put_mspel_pixels_tab[1]= put_mspel8_mc10_sh4;

@ -0,0 +1,132 @@
/*
* aligned/packed access motion
*
* Copyright (c) 2001-2003 BERO <bero@geocities.co.jp>
*
* This file is part of Libav.
*
* Libav is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* Libav is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with Libav; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <assert.h>
#include <stdint.h>
#include "libavutil/attributes.h"
#include "libavcodec/h264chroma.h"
#define H264_CHROMA_MC(OPNAME, OP)\
static void OPNAME ## h264_chroma_mc2_sh4(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){\
const int A=(8-x)*(8-y);\
const int B=( x)*(8-y);\
const int C=(8-x)*( y);\
const int D=( x)*( y);\
\
assert(x<8 && y<8 && x>=0 && y>=0);\
\
do {\
int t0,t1,t2,t3; \
uint8_t *s0 = src; \
uint8_t *s1 = src+stride; \
t0 = *s0++; t2 = *s1++; \
t1 = *s0++; t3 = *s1++; \
OP(dst[0], (A*t0 + B*t1 + C*t2 + D*t3));\
t0 = *s0++; t2 = *s1++; \
OP(dst[1], (A*t1 + B*t0 + C*t3 + D*t2));\
dst+= stride;\
src+= stride;\
}while(--h);\
}\
\
static void OPNAME ## h264_chroma_mc4_sh4(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){\
const int A=(8-x)*(8-y);\
const int B=( x)*(8-y);\
const int C=(8-x)*( y);\
const int D=( x)*( y);\
\
assert(x<8 && y<8 && x>=0 && y>=0);\
\
do {\
int t0,t1,t2,t3; \
uint8_t *s0 = src; \
uint8_t *s1 = src+stride; \
t0 = *s0++; t2 = *s1++; \
t1 = *s0++; t3 = *s1++; \
OP(dst[0], (A*t0 + B*t1 + C*t2 + D*t3));\
t0 = *s0++; t2 = *s1++; \
OP(dst[1], (A*t1 + B*t0 + C*t3 + D*t2));\
t1 = *s0++; t3 = *s1++; \
OP(dst[2], (A*t0 + B*t1 + C*t2 + D*t3));\
t0 = *s0++; t2 = *s1++; \
OP(dst[3], (A*t1 + B*t0 + C*t3 + D*t2));\
dst+= stride;\
src+= stride;\
}while(--h);\
}\
\
static void OPNAME ## h264_chroma_mc8_sh4(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){\
const int A=(8-x)*(8-y);\
const int B=( x)*(8-y);\
const int C=(8-x)*( y);\
const int D=( x)*( y);\
\
assert(x<8 && y<8 && x>=0 && y>=0);\
\
do {\
int t0,t1,t2,t3; \
uint8_t *s0 = src; \
uint8_t *s1 = src+stride; \
t0 = *s0++; t2 = *s1++; \
t1 = *s0++; t3 = *s1++; \
OP(dst[0], (A*t0 + B*t1 + C*t2 + D*t3));\
t0 = *s0++; t2 = *s1++; \
OP(dst[1], (A*t1 + B*t0 + C*t3 + D*t2));\
t1 = *s0++; t3 = *s1++; \
OP(dst[2], (A*t0 + B*t1 + C*t2 + D*t3));\
t0 = *s0++; t2 = *s1++; \
OP(dst[3], (A*t1 + B*t0 + C*t3 + D*t2));\
t1 = *s0++; t3 = *s1++; \
OP(dst[4], (A*t0 + B*t1 + C*t2 + D*t3));\
t0 = *s0++; t2 = *s1++; \
OP(dst[5], (A*t1 + B*t0 + C*t3 + D*t2));\
t1 = *s0++; t3 = *s1++; \
OP(dst[6], (A*t0 + B*t1 + C*t2 + D*t3));\
t0 = *s0++; t2 = *s1++; \
OP(dst[7], (A*t1 + B*t0 + C*t3 + D*t2));\
dst+= stride;\
src+= stride;\
}while(--h);\
}
#define op_avg(a, b) a = (((a)+(((b) + 32)>>6)+1)>>1)
#define op_put(a, b) a = (((b) + 32)>>6)
H264_CHROMA_MC(put_ , op_put)
H264_CHROMA_MC(avg_ , op_avg)
#undef op_avg
#undef op_put
av_cold void ff_h264chroma_init_sh4(H264ChromaContext *c, int bit_depth)
{
const int high_bit_depth = bit_depth > 8;
if (!high_bit_depth) {
c->put_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_sh4;
c->put_h264_chroma_pixels_tab[1]= put_h264_chroma_mc4_sh4;
c->put_h264_chroma_pixels_tab[2]= put_h264_chroma_mc2_sh4;
c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_sh4;
c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_sh4;
c->avg_h264_chroma_pixels_tab[2]= avg_h264_chroma_mc2_sh4;
}
}

@ -359,97 +359,6 @@ static void gmc1_c(uint8_t *dst, uint8_t *src, int stride, int h, int x16, int y
}while(--h);
}
#define H264_CHROMA_MC(OPNAME, OP)\
static void OPNAME ## h264_chroma_mc2_sh4(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){\
const int A=(8-x)*(8-y);\
const int B=( x)*(8-y);\
const int C=(8-x)*( y);\
const int D=( x)*( y);\
\
assert(x<8 && y<8 && x>=0 && y>=0);\
\
do {\
int t0,t1,t2,t3; \
uint8_t *s0 = src; \
uint8_t *s1 = src+stride; \
t0 = *s0++; t2 = *s1++; \
t1 = *s0++; t3 = *s1++; \
OP(dst[0], (A*t0 + B*t1 + C*t2 + D*t3));\
t0 = *s0++; t2 = *s1++; \
OP(dst[1], (A*t1 + B*t0 + C*t3 + D*t2));\
dst+= stride;\
src+= stride;\
}while(--h);\
}\
\
static void OPNAME ## h264_chroma_mc4_sh4(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){\
const int A=(8-x)*(8-y);\
const int B=( x)*(8-y);\
const int C=(8-x)*( y);\
const int D=( x)*( y);\
\
assert(x<8 && y<8 && x>=0 && y>=0);\
\
do {\
int t0,t1,t2,t3; \
uint8_t *s0 = src; \
uint8_t *s1 = src+stride; \
t0 = *s0++; t2 = *s1++; \
t1 = *s0++; t3 = *s1++; \
OP(dst[0], (A*t0 + B*t1 + C*t2 + D*t3));\
t0 = *s0++; t2 = *s1++; \
OP(dst[1], (A*t1 + B*t0 + C*t3 + D*t2));\
t1 = *s0++; t3 = *s1++; \
OP(dst[2], (A*t0 + B*t1 + C*t2 + D*t3));\
t0 = *s0++; t2 = *s1++; \
OP(dst[3], (A*t1 + B*t0 + C*t3 + D*t2));\
dst+= stride;\
src+= stride;\
}while(--h);\
}\
\
static void OPNAME ## h264_chroma_mc8_sh4(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){\
const int A=(8-x)*(8-y);\
const int B=( x)*(8-y);\
const int C=(8-x)*( y);\
const int D=( x)*( y);\
\
assert(x<8 && y<8 && x>=0 && y>=0);\
\
do {\
int t0,t1,t2,t3; \
uint8_t *s0 = src; \
uint8_t *s1 = src+stride; \
t0 = *s0++; t2 = *s1++; \
t1 = *s0++; t3 = *s1++; \
OP(dst[0], (A*t0 + B*t1 + C*t2 + D*t3));\
t0 = *s0++; t2 = *s1++; \
OP(dst[1], (A*t1 + B*t0 + C*t3 + D*t2));\
t1 = *s0++; t3 = *s1++; \
OP(dst[2], (A*t0 + B*t1 + C*t2 + D*t3));\
t0 = *s0++; t2 = *s1++; \
OP(dst[3], (A*t1 + B*t0 + C*t3 + D*t2));\
t1 = *s0++; t3 = *s1++; \
OP(dst[4], (A*t0 + B*t1 + C*t2 + D*t3));\
t0 = *s0++; t2 = *s1++; \
OP(dst[5], (A*t1 + B*t0 + C*t3 + D*t2));\
t1 = *s0++; t3 = *s1++; \
OP(dst[6], (A*t0 + B*t1 + C*t2 + D*t3));\
t0 = *s0++; t2 = *s1++; \
OP(dst[7], (A*t1 + B*t0 + C*t3 + D*t2));\
dst+= stride;\
src+= stride;\
}while(--h);\
}
#define op_avg(a, b) a = (((a)+(((b) + 32)>>6)+1)>>1)
#define op_put(a, b) a = (((b) + 32)>>6)
H264_CHROMA_MC(put_ , op_put)
H264_CHROMA_MC(avg_ , op_avg)
#undef op_avg
#undef op_put
#define QPEL_MC(r, OPNAME, RND, OP) \
static void OPNAME ## mpeg4_qpel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\

@ -24,6 +24,7 @@
#define AVCODEC_VC1_H
#include "avcodec.h"
#include "h264chroma.h"
#include "mpegvideo.h"
#include "intrax8.h"
#include "vc1dsp.h"
@ -181,6 +182,7 @@ enum FrameCodingMode {
typedef struct VC1Context{
MpegEncContext s;
IntraX8Context x8;
H264ChromaContext h264chroma;
VC1DSPContext vc1dsp;
int bits;

@ -31,6 +31,7 @@
#include "avcodec.h"
#include "mpegvideo.h"
#include "h263.h"
#include "h264chroma.h"
#include "vc1.h"
#include "vc1data.h"
#include "vc1acdata.h"
@ -331,6 +332,7 @@ static void vc1_mc_1mv(VC1Context *v, int dir)
{
MpegEncContext *s = &v->s;
DSPContext *dsp = &v->s.dsp;
H264ChromaContext *h264chroma = &v->h264chroma;
uint8_t *srcY, *srcU, *srcV;
int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
int off, off_uv;
@ -519,8 +521,8 @@ static void vc1_mc_1mv(VC1Context *v, int dir)
uvmx = (uvmx & 3) << 1;
uvmy = (uvmy & 3) << 1;
if (!v->rnd) {
dsp->put_h264_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
dsp->put_h264_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
h264chroma->put_h264_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
h264chroma->put_h264_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
} else {
v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
@ -769,7 +771,7 @@ static av_always_inline int get_chroma_mv(int *mvx, int *mvy, int *a, int flag,
static void vc1_mc_4mv_chroma(VC1Context *v, int dir)
{
MpegEncContext *s = &v->s;
DSPContext *dsp = &v->s.dsp;
H264ChromaContext *h264chroma = &v->h264chroma;
uint8_t *srcU, *srcV;
int uvmx, uvmy, uvsrc_x, uvsrc_y;
int k, tx = 0, ty = 0;
@ -915,8 +917,8 @@ static void vc1_mc_4mv_chroma(VC1Context *v, int dir)
uvmx = (uvmx & 3) << 1;
uvmy = (uvmy & 3) << 1;
if (!v->rnd) {
dsp->put_h264_chroma_pixels_tab[0](s->dest[1] + off, srcU, s->uvlinesize, 8, uvmx, uvmy);
dsp->put_h264_chroma_pixels_tab[0](s->dest[2] + off, srcV, s->uvlinesize, 8, uvmx, uvmy);
h264chroma->put_h264_chroma_pixels_tab[0](s->dest[1] + off, srcU, s->uvlinesize, 8, uvmx, uvmy);
h264chroma->put_h264_chroma_pixels_tab[0](s->dest[2] + off, srcV, s->uvlinesize, 8, uvmx, uvmy);
} else {
v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1] + off, srcU, s->uvlinesize, 8, uvmx, uvmy);
v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2] + off, srcV, s->uvlinesize, 8, uvmx, uvmy);
@ -928,7 +930,7 @@ static void vc1_mc_4mv_chroma(VC1Context *v, int dir)
static void vc1_mc_4mv_chroma4(VC1Context *v)
{
MpegEncContext *s = &v->s;
DSPContext *dsp = &v->s.dsp;
H264ChromaContext *h264chroma = &v->h264chroma;
uint8_t *srcU, *srcV;
int uvsrc_x, uvsrc_y;
int uvmx_field[4], uvmy_field[4];
@ -1000,8 +1002,8 @@ static void vc1_mc_4mv_chroma4(VC1Context *v)
}
}
if (!v->rnd) {
dsp->put_h264_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
dsp->put_h264_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
h264chroma->put_h264_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
h264chroma->put_h264_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
} else {
v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
@ -1828,6 +1830,7 @@ static void vc1_interp_mc(VC1Context *v)
{
MpegEncContext *s = &v->s;
DSPContext *dsp = &v->s.dsp;
H264ChromaContext *h264chroma = &v->h264chroma;
uint8_t *srcY, *srcU, *srcV;
int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
int off, off_uv;
@ -1957,8 +1960,8 @@ static void vc1_interp_mc(VC1Context *v)
uvmx = (uvmx & 3) << 1;
uvmy = (uvmy & 3) << 1;
if (!v->rnd) {
dsp->avg_h264_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
dsp->avg_h264_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
h264chroma->avg_h264_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
h264chroma->avg_h264_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
} else {
v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
@ -5164,6 +5167,7 @@ static av_cold int vc1_decode_init(AVCodecContext *avctx)
if (ff_vc1_init_common(v) < 0)
return -1;
ff_h264chroma_init(&v->h264chroma, 8);
ff_vc1dsp_init(&v->vc1dsp);
if (avctx->codec_id == AV_CODEC_ID_WMV3 || avctx->codec_id == AV_CODEC_ID_WMV3IMAGE) {

@ -25,8 +25,9 @@
*
*/
#include "vc1dsp.h"
#include "libavutil/common.h"
#include "h264chroma.h"
#include "vc1dsp.h"
/** Apply overlap transform to horizontal edge

@ -29,6 +29,7 @@
#define AVCODEC_VC1DSP_H
#include "dsputil.h"
#include "h264chroma.h"
typedef struct VC1DSPContext {
/* vc1 functions */

@ -26,7 +26,7 @@
#include "avcodec.h"
#include "bytestream.h"
#include "internal.h"
#include "h264chroma.h"
#include "vp56.h"
#include "vp56data.h"
@ -674,6 +674,7 @@ av_cold void ff_vp56_init(AVCodecContext *avctx, int flip, int has_alpha)
avctx->pix_fmt = has_alpha ? AV_PIX_FMT_YUVA420P : AV_PIX_FMT_YUV420P;
ff_dsputil_init(&s->dsp, avctx);
ff_h264chroma_init(&s->h264chroma, 8);
ff_videodsp_init(&s->vdsp, 8);
ff_vp3dsp_init(&s->vp3dsp, avctx->flags);
ff_vp56dsp_init(&s->vp56dsp, avctx->codec->id);

@ -30,6 +30,7 @@
#include "dsputil.h"
#include "get_bits.h"
#include "bytestream.h"
#include "h264chroma.h"
#include "videodsp.h"
#include "vp3dsp.h"
#include "vp56dsp.h"
@ -95,6 +96,7 @@ typedef struct VP56Model {
struct vp56_context {
AVCodecContext *avctx;
DSPContext dsp;
H264ChromaContext h264chroma;
VideoDSPContext vdsp;
VP3DSPContext vp3dsp;
VP56DSPContext vp56dsp;

@ -536,8 +536,8 @@ static void vp6_filter_diag2(VP56Context *s, uint8_t *dst, uint8_t *src,
int stride, int h_weight, int v_weight)
{
uint8_t *tmp = s->edge_emu_buffer+16;
s->dsp.put_h264_chroma_pixels_tab[0](tmp, src, stride, 9, h_weight, 0);
s->dsp.put_h264_chroma_pixels_tab[0](dst, tmp, stride, 8, 0, v_weight);
s->h264chroma.put_h264_chroma_pixels_tab[0](tmp, src, stride, 9, h_weight, 0);
s->h264chroma.put_h264_chroma_pixels_tab[0](dst, tmp, stride, 8, 0, v_weight);
}
static void vp6_filter(VP56Context *s, uint8_t *dst, uint8_t *src,
@ -583,7 +583,7 @@ static void vp6_filter(VP56Context *s, uint8_t *dst, uint8_t *src,
}
} else {
if (!x8 || !y8) {
s->dsp.put_h264_chroma_pixels_tab[0](dst, src+offset1, stride, 8, x8, y8);
s->h264chroma.put_h264_chroma_pixels_tab[0](dst, src + offset1, stride, 8, x8, y8);
} else {
vp6_filter_diag2(s, dst, src+offset1 + ((mv.x^mv.y)>>31), stride, x8, y8);
}

@ -43,7 +43,8 @@ YASM-OBJS-$(CONFIG_AC3DSP) += x86/ac3dsp.o
YASM-OBJS-$(CONFIG_DCT) += x86/dct32.o
YASM-OBJS-$(CONFIG_ENCODERS) += x86/dsputilenc.o
YASM-OBJS-$(CONFIG_FFT) += x86/fft.o
YASM-OBJS-$(CONFIG_H264CHROMA) += x86/h264_chromamc.o \
YASM-OBJS-$(CONFIG_H264CHROMA) += x86/h264chroma_init.o \
x86/h264_chromamc.o \
x86/h264_chromamc_10bit.o
YASM-OBJS-$(CONFIG_H264DSP) += x86/h264_deblock.o \
x86/h264_deblock_10bit.o \

@ -1460,49 +1460,6 @@ void ff_put_pixels16_sse2(uint8_t *block, const uint8_t *pixels,
void ff_avg_pixels16_sse2(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_put_h264_chroma_mc8_rnd_mmx (uint8_t *dst, uint8_t *src,
int stride, int h, int x, int y);
void ff_avg_h264_chroma_mc8_rnd_mmxext(uint8_t *dst, uint8_t *src,
int stride, int h, int x, int y);
void ff_avg_h264_chroma_mc8_rnd_3dnow(uint8_t *dst, uint8_t *src,
int stride, int h, int x, int y);
void ff_put_h264_chroma_mc4_mmx (uint8_t *dst, uint8_t *src,
int stride, int h, int x, int y);
void ff_avg_h264_chroma_mc4_mmxext (uint8_t *dst, uint8_t *src,
int stride, int h, int x, int y);
void ff_avg_h264_chroma_mc4_3dnow (uint8_t *dst, uint8_t *src,
int stride, int h, int x, int y);
void ff_put_h264_chroma_mc2_mmxext (uint8_t *dst, uint8_t *src,
int stride, int h, int x, int y);
void ff_avg_h264_chroma_mc2_mmxext (uint8_t *dst, uint8_t *src,
int stride, int h, int x, int y);
void ff_put_h264_chroma_mc8_rnd_ssse3(uint8_t *dst, uint8_t *src,
int stride, int h, int x, int y);
void ff_put_h264_chroma_mc4_ssse3 (uint8_t *dst, uint8_t *src,
int stride, int h, int x, int y);
void ff_avg_h264_chroma_mc8_rnd_ssse3(uint8_t *dst, uint8_t *src,
int stride, int h, int x, int y);
void ff_avg_h264_chroma_mc4_ssse3 (uint8_t *dst, uint8_t *src,
int stride, int h, int x, int y);
#define CHROMA_MC(OP, NUM, DEPTH, OPT) \
void ff_ ## OP ## _h264_chroma_mc ## NUM ## _ ## DEPTH ## _ ## OPT \
(uint8_t *dst, uint8_t *src, \
int stride, int h, int x, int y);
CHROMA_MC(put, 2, 10, mmxext)
CHROMA_MC(avg, 2, 10, mmxext)
CHROMA_MC(put, 4, 10, mmxext)
CHROMA_MC(avg, 4, 10, mmxext)
CHROMA_MC(put, 8, 10, sse2)
CHROMA_MC(avg, 8, 10, sse2)
CHROMA_MC(put, 8, 10, avx)
CHROMA_MC(avg, 8, 10, avx)
#if HAVE_INLINE_ASM
/* CAVS-specific */
@ -1704,11 +1661,6 @@ static av_cold void dsputil_init_mmx(DSPContext *c, AVCodecContext *avctx,
#endif /* HAVE_INLINE_ASM */
#if HAVE_YASM
if (!high_bit_depth && CONFIG_H264CHROMA) {
c->put_h264_chroma_pixels_tab[0] = ff_put_h264_chroma_mc8_rnd_mmx;
c->put_h264_chroma_pixels_tab[1] = ff_put_h264_chroma_mc4_mmx;
}
c->vector_clip_int32 = ff_vector_clip_int32_mmx;
#endif
@ -1773,19 +1725,6 @@ static av_cold void dsputil_init_mmxext(DSPContext *c, AVCodecContext *avctx,
c->put_no_rnd_pixels_tab[1][2] = ff_put_no_rnd_pixels8_y2_exact_mmxext;
}
if (!high_bit_depth && CONFIG_H264CHROMA) {
c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_rnd_mmxext;
c->avg_h264_chroma_pixels_tab[1] = ff_avg_h264_chroma_mc4_mmxext;
c->avg_h264_chroma_pixels_tab[2] = ff_avg_h264_chroma_mc2_mmxext;
c->put_h264_chroma_pixels_tab[2] = ff_put_h264_chroma_mc2_mmxext;
}
if (bit_depth == 10 && CONFIG_H264CHROMA) {
c->put_h264_chroma_pixels_tab[2] = ff_put_h264_chroma_mc2_10_mmxext;
c->avg_h264_chroma_pixels_tab[2] = ff_avg_h264_chroma_mc2_10_mmxext;
c->put_h264_chroma_pixels_tab[1] = ff_put_h264_chroma_mc4_10_mmxext;
c->avg_h264_chroma_pixels_tab[1] = ff_avg_h264_chroma_mc4_10_mmxext;
}
/* slower than cmov version on AMD */
if (!(mm_flags & AV_CPU_FLAG_3DNOW))
c->add_hfyu_median_prediction = ff_add_hfyu_median_prediction_mmxext;
@ -1838,11 +1777,6 @@ static av_cold void dsputil_init_3dnow(DSPContext *c, AVCodecContext *avctx,
c->put_no_rnd_pixels_tab[1][1] = ff_put_no_rnd_pixels8_x2_exact_3dnow;
c->put_no_rnd_pixels_tab[1][2] = ff_put_no_rnd_pixels8_y2_exact_3dnow;
}
if (!high_bit_depth && CONFIG_H264CHROMA) {
c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_rnd_3dnow;
c->avg_h264_chroma_pixels_tab[1] = ff_avg_h264_chroma_mc4_3dnow;
}
#endif /* HAVE_YASM */
}
@ -1889,13 +1823,6 @@ static av_cold void dsputil_init_sse2(DSPContext *c, AVCodecContext *avctx,
}
}
if (bit_depth == 10) {
if (CONFIG_H264CHROMA) {
c->put_h264_chroma_pixels_tab[0] = ff_put_h264_chroma_mc8_10_sse2;
c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_10_sse2;
}
}
c->scalarproduct_int16 = ff_scalarproduct_int16_sse2;
c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_sse2;
if (mm_flags & AV_CPU_FLAG_ATOM) {
@ -1916,14 +1843,6 @@ static av_cold void dsputil_init_ssse3(DSPContext *c, AVCodecContext *avctx,
int mm_flags)
{
#if HAVE_SSSE3_EXTERNAL
const int high_bit_depth = avctx->bits_per_raw_sample > 8;
if (!high_bit_depth && CONFIG_H264CHROMA) {
c->put_h264_chroma_pixels_tab[0] = ff_put_h264_chroma_mc8_rnd_ssse3;
c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_rnd_ssse3;
c->put_h264_chroma_pixels_tab[1] = ff_put_h264_chroma_mc4_ssse3;
c->avg_h264_chroma_pixels_tab[1] = ff_avg_h264_chroma_mc4_ssse3;
}
c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_ssse3;
if (mm_flags & AV_CPU_FLAG_SSE4) // not really sse4, just slow on Conroe
c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_sse4;
@ -1946,20 +1865,6 @@ static av_cold void dsputil_init_sse4(DSPContext *c, AVCodecContext *avctx,
#endif /* HAVE_SSE4_EXTERNAL */
}
static av_cold void dsputil_init_avx(DSPContext *c, AVCodecContext *avctx, int mm_flags)
{
#if HAVE_AVX_EXTERNAL
const int bit_depth = avctx->bits_per_raw_sample;
if (bit_depth == 10) {
if (CONFIG_H264CHROMA) {
c->put_h264_chroma_pixels_tab[0] = ff_put_h264_chroma_mc8_10_avx;
c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_10_avx;
}
}
#endif /* HAVE_AVX_EXTERNAL */
}
av_cold void ff_dsputil_init_mmx(DSPContext *c, AVCodecContext *avctx)
{
int mm_flags = av_get_cpu_flags();
@ -1990,9 +1895,6 @@ av_cold void ff_dsputil_init_mmx(DSPContext *c, AVCodecContext *avctx)
if (mm_flags & AV_CPU_FLAG_SSE4)
dsputil_init_sse4(c, avctx, mm_flags);
if (mm_flags & AV_CPU_FLAG_AVX)
dsputil_init_avx(c, avctx, mm_flags);
if (CONFIG_ENCODERS)
ff_dsputilenc_init_mmx(c, avctx);
}

@ -0,0 +1,116 @@
/*
* This file is part of Libav.
*
* Libav is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* Libav is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with Libav; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdint.h>
#include "config.h"
#include "libavutil/cpu.h"
#include "libavutil/x86/cpu.h"
#include "libavcodec/h264chroma.h"
void ff_put_h264_chroma_mc8_rnd_mmx (uint8_t *dst, uint8_t *src,
int stride, int h, int x, int y);
void ff_avg_h264_chroma_mc8_rnd_mmxext(uint8_t *dst, uint8_t *src,
int stride, int h, int x, int y);
void ff_avg_h264_chroma_mc8_rnd_3dnow(uint8_t *dst, uint8_t *src,
int stride, int h, int x, int y);
void ff_put_h264_chroma_mc4_mmx (uint8_t *dst, uint8_t *src,
int stride, int h, int x, int y);
void ff_avg_h264_chroma_mc4_mmxext (uint8_t *dst, uint8_t *src,
int stride, int h, int x, int y);
void ff_avg_h264_chroma_mc4_3dnow (uint8_t *dst, uint8_t *src,
int stride, int h, int x, int y);
void ff_put_h264_chroma_mc2_mmxext (uint8_t *dst, uint8_t *src,
int stride, int h, int x, int y);
void ff_avg_h264_chroma_mc2_mmxext (uint8_t *dst, uint8_t *src,
int stride, int h, int x, int y);
void ff_put_h264_chroma_mc8_rnd_ssse3(uint8_t *dst, uint8_t *src,
int stride, int h, int x, int y);
void ff_put_h264_chroma_mc4_ssse3 (uint8_t *dst, uint8_t *src,
int stride, int h, int x, int y);
void ff_avg_h264_chroma_mc8_rnd_ssse3(uint8_t *dst, uint8_t *src,
int stride, int h, int x, int y);
void ff_avg_h264_chroma_mc4_ssse3 (uint8_t *dst, uint8_t *src,
int stride, int h, int x, int y);
#define CHROMA_MC(OP, NUM, DEPTH, OPT) \
void ff_ ## OP ## _h264_chroma_mc ## NUM ## _ ## DEPTH ## _ ## OPT \
(uint8_t *dst, uint8_t *src, \
int stride, int h, int x, int y);
CHROMA_MC(put, 2, 10, mmxext)
CHROMA_MC(avg, 2, 10, mmxext)
CHROMA_MC(put, 4, 10, mmxext)
CHROMA_MC(avg, 4, 10, mmxext)
CHROMA_MC(put, 8, 10, sse2)
CHROMA_MC(avg, 8, 10, sse2)
CHROMA_MC(put, 8, 10, avx)
CHROMA_MC(avg, 8, 10, avx)
void ff_h264chroma_init_x86(H264ChromaContext *c, int bit_depth)
{
int high_bit_depth = bit_depth > 8;
int mm_flags = av_get_cpu_flags();
if (EXTERNAL_MMX(mm_flags) && !high_bit_depth) {
c->put_h264_chroma_pixels_tab[0] = ff_put_h264_chroma_mc8_rnd_mmx;
c->put_h264_chroma_pixels_tab[1] = ff_put_h264_chroma_mc4_mmx;
}
if (EXTERNAL_AMD3DNOW(mm_flags) && !high_bit_depth) {
c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_rnd_3dnow;
c->avg_h264_chroma_pixels_tab[1] = ff_avg_h264_chroma_mc4_3dnow;
}
if (EXTERNAL_MMXEXT(mm_flags) && !high_bit_depth) {
c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_rnd_mmxext;
c->avg_h264_chroma_pixels_tab[1] = ff_avg_h264_chroma_mc4_mmxext;
c->avg_h264_chroma_pixels_tab[2] = ff_avg_h264_chroma_mc2_mmxext;
c->put_h264_chroma_pixels_tab[2] = ff_put_h264_chroma_mc2_mmxext;
}
if (EXTERNAL_MMXEXT(mm_flags) && bit_depth == 10) {
c->put_h264_chroma_pixels_tab[2] = ff_put_h264_chroma_mc2_10_mmxext;
c->avg_h264_chroma_pixels_tab[2] = ff_avg_h264_chroma_mc2_10_mmxext;
c->put_h264_chroma_pixels_tab[1] = ff_put_h264_chroma_mc4_10_mmxext;
c->avg_h264_chroma_pixels_tab[1] = ff_avg_h264_chroma_mc4_10_mmxext;
}
if (EXTERNAL_SSE2(mm_flags) && bit_depth == 10) {
c->put_h264_chroma_pixels_tab[0] = ff_put_h264_chroma_mc8_10_sse2;
c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_10_sse2;
}
if (EXTERNAL_SSSE3(mm_flags) && !high_bit_depth) {
c->put_h264_chroma_pixels_tab[0] = ff_put_h264_chroma_mc8_rnd_ssse3;
c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_rnd_ssse3;
c->put_h264_chroma_pixels_tab[1] = ff_put_h264_chroma_mc4_ssse3;
c->avg_h264_chroma_pixels_tab[1] = ff_avg_h264_chroma_mc4_ssse3;
}
if (EXTERNAL_AVX(mm_flags) && bit_depth == 10) {
// AVX implies !cache64.
// TODO: Port cache(32|64) detection from x264.
c->put_h264_chroma_pixels_tab[0] = ff_put_h264_chroma_mc8_10_avx;
c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_10_avx;
}
}
Loading…
Cancel
Save