Merge remote-tracking branch 'qatar/master'

* qatar/master: (27 commits)
  ppc: Add ff_ prefix to nonstatic symbols
  sh4: Add ff_ prefix to nonstatic symbols
  mpegvideo: Add ff_ prefix to nonstatic functions
  rtjpeg: Add ff_ prefix to nonstatic symbols
  rv: Add ff_ prefix to nonstatic symbols
  vp56: Add ff_ prefix to nonstatic symbols
  vorbis: Add ff_ prefix to nonstatic symbols
  msmpeg4: Add ff_ prefix to nonstatic symbols
  vc1: Add ff_ prefix to nonstatic symbols
  msmpeg4: Add ff_ prefixes to nonstatic symbols
  snow: Add ff_ prefix to nonstatic symbols
  mpeg12: Add ff_ prefix to nonstatic symbols
  mpeg4: Add ff_ prefixes to nonstatic symbols
  lagarith: Add ff_ prefix to lag_rac_init
  libavcodec: Add ff_ prefix to j_rev_dct*
  dsputil: Add ff_ prefix to inv_zigzag_direct16
  libavcodec: Prefix fdct_ifast, fdct_ifast248
  dsputil: Add ff_ prefix to the dsputil*_init* functions
  libavcodec: Add ff_ prefix to some nonstatic symbols
  vlc/rl: Add ff_ prefix to the nonstatic symbols
  ...

Conflicts:
	libavcodec/Makefile
	libavcodec/allcodecs.c
	libavcodec/dnxhddec.c
	libavcodec/ffv1.c
	libavcodec/h263.h
	libavcodec/h263dec.c
	libavcodec/h264.c
	libavcodec/mpegvideo.c
	libavcodec/mpegvideo_enc.c
	libavcodec/nuv.c
	libavcodec/ppc/dsputil_ppc.c
	libavcodec/proresdsp.c
	libavcodec/svq3.c
	libavcodec/version.h
	libavformat/dv.h
	libavformat/dvenc.c
	libavformat/matroskadec.c

Merged-by: Michael Niedermayer <michaelni@gmx.at>
pull/3/merge
Michael Niedermayer 13 years ago
commit 6cb2085278
  1. 1
      Changelog
  2. 4
      libavcodec/4xm.c
  3. 1
      libavcodec/Makefile
  4. 2
      libavcodec/aacdec.c
  5. 2
      libavcodec/aacenc.c
  6. 2
      libavcodec/ac3dec.c
  7. 2
      libavcodec/ac3enc.c
  8. 1
      libavcodec/allcodecs.c
  9. 2
      libavcodec/alpha/dsputil_alpha.c
  10. 2
      libavcodec/alpha/mpegvideo_alpha.c
  11. 2
      libavcodec/alsdec.c
  12. 2
      libavcodec/apedec.c
  13. 2
      libavcodec/arm/dsputil_init_arm.c
  14. 6
      libavcodec/arm/mpegvideo_arm.c
  15. 4
      libavcodec/arm/mpegvideo_arm.h
  16. 2
      libavcodec/arm/mpegvideo_armv5te.c
  17. 2
      libavcodec/arm/mpegvideo_iwmmxt.c
  18. 2
      libavcodec/asv1.c
  19. 4
      libavcodec/atrac.c
  20. 4
      libavcodec/atrac.h
  21. 8
      libavcodec/atrac1.c
  22. 10
      libavcodec/atrac3.c
  23. 2
      libavcodec/bfin/dsputil_bfin.c
  24. 2
      libavcodec/bfin/mpegvideo_bfin.c
  25. 2
      libavcodec/bink.c
  26. 2
      libavcodec/binkaudio.c
  27. 6
      libavcodec/bitstream.c
  28. 2
      libavcodec/cavs.c
  29. 2
      libavcodec/cavsdec.c
  30. 6
      libavcodec/cook.c
  31. 2
      libavcodec/dca.c
  32. 4
      libavcodec/dct-test.c
  33. 16
      libavcodec/dnxhddec.c
  34. 4
      libavcodec/dnxhdenc.c
  35. 55
      libavcodec/dsputil.c
  36. 33
      libavcodec/dsputil.h
  37. 4
      libavcodec/dv.c
  38. 2
      libavcodec/eamad.c
  39. 2
      libavcodec/eatgq.c
  40. 2
      libavcodec/eatqi.c
  41. 2
      libavcodec/error_resilience.c
  42. 10
      libavcodec/faxcompr.c
  43. 2
      libavcodec/ffv1.c
  44. 6
      libavcodec/flvenc.c
  45. 6
      libavcodec/fraps.c
  46. 12
      libavcodec/get_bits.h
  47. 18
      libavcodec/h261dec.c
  48. 8
      libavcodec/h261enc.c
  49. 8
      libavcodec/h263.c
  50. 42
      libavcodec/h263.h
  51. 24
      libavcodec/h263data.h
  52. 32
      libavcodec/h263dec.c
  53. 24
      libavcodec/h264.c
  54. 2
      libavcodec/huffman.c
  55. 14
      libavcodec/huffyuv.c
  56. 2
      libavcodec/imc.c
  57. 2
      libavcodec/indeo3.c
  58. 2
      libavcodec/indeo5.c
  59. 4
      libavcodec/intelh263dec.c
  60. 2
      libavcodec/interplayvideo.c
  61. 8
      libavcodec/intrax8.c
  62. 86
      libavcodec/ituh263dec.c
  63. 46
      libavcodec/ituh263enc.c
  64. 4
      libavcodec/ivi_common.c
  65. 4
      libavcodec/jfdctfst.c
  66. 8
      libavcodec/jrevdct.c
  67. 2
      libavcodec/jvdec.c
  68. 4
      libavcodec/lagarith.c
  69. 2
      libavcodec/lagarithrac.c
  70. 2
      libavcodec/lagarithrac.h
  71. 4
      libavcodec/ljpegenc.c
  72. 2
      libavcodec/mdec.c
  73. 4
      libavcodec/mimic.c
  74. 2
      libavcodec/mips/dsputil_mmi.c
  75. 2
      libavcodec/mips/mpegvideo_mmi.c
  76. 12
      libavcodec/mjpegdec.c
  77. 12
      libavcodec/mjpegenc.c
  78. 2
      libavcodec/mlpdec.c
  79. 4
      libavcodec/motion-test.c
  80. 4
      libavcodec/motionpixels.c
  81. 2
      libavcodec/mpc7.c
  82. 6
      libavcodec/mpc8.c
  83. 24
      libavcodec/mpeg12.c
  84. 18
      libavcodec/mpeg12enc.c
  85. 10
      libavcodec/mpeg4data.h
  86. 24
      libavcodec/mpeg4video.h
  87. 88
      libavcodec/mpeg4videodec.c
  88. 22
      libavcodec/mpeg4videoenc.c
  89. 50
      libavcodec/mpegvideo.c
  90. 62
      libavcodec/mpegvideo.h
  91. 10
      libavcodec/mpegvideo_common.h
  92. 94
      libavcodec/mpegvideo_enc.c
  93. 150
      libavcodec/msmpeg4.c
  94. 40
      libavcodec/msmpeg4data.c
  95. 26
      libavcodec/msmpeg4data.h
  96. 2
      libavcodec/nellymoserdec.c
  97. 2
      libavcodec/nellymoserenc.c
  98. 10
      libavcodec/nuv.c
  99. 2
      libavcodec/pngenc.c
  100. 12
      libavcodec/ppc/dsputil_altivec.c
  101. Some files were not shown because too many files have changed in this diff Show More

@ -5,6 +5,7 @@ version next:
- v408 Quicktime and Microsoft AYUV Uncompressed 4:4:4:4 encoder and decoder
- setfield filter
- CDXL demuxer and decoder
- Apple ProRes encoder
version 0.10:

@ -925,7 +925,7 @@ static av_cold void common_init(AVCodecContext *avctx)
{
FourXContext * const f = avctx->priv_data;
dsputil_init(&f->dsp, avctx);
ff_dsputil_init(&f->dsp, avctx);
f->avctx = avctx;
}
@ -969,7 +969,7 @@ static av_cold int decode_end(AVCodecContext *avctx)
av_freep(&f->cfrm[i].data);
f->cfrm[i].allocated_size = 0;
}
free_vlc(&f->pre_vlc);
ff_free_vlc(&f->pre_vlc);
if (f->current_picture.data[0])
avctx->release_buffer(avctx, &f->current_picture);
if (f->last_picture.data[0])

@ -333,6 +333,7 @@ OBJS-$(CONFIG_PRORES_DECODER) += proresdec2.o
OBJS-$(CONFIG_PRORES_LGPL_DECODER) += proresdec_lgpl.o proresdsp.o
OBJS-$(CONFIG_PRORES_ENCODER) += proresenc_anatoliy.o
OBJS-$(CONFIG_PRORES_ANATOLIY_ENCODER) += proresenc_anatoliy.o
OBJS-$(CONFIG_PRORES_KOSTYA_ENCODER) += proresenc_kostya.o proresdata.o proresdsp.o
OBJS-$(CONFIG_PTX_DECODER) += ptx.o
OBJS-$(CONFIG_QCELP_DECODER) += qcelpdec.o celp_math.o \
celp_filters.o acelp_vectors.o \

@ -825,7 +825,7 @@ static av_cold int aac_decode_init(AVCodecContext *avctx)
ff_aac_sbr_init();
dsputil_init(&ac->dsp, avctx);
ff_dsputil_init(&ac->dsp, avctx);
ff_fmt_convert_init(&ac->fmt_conv, avctx);
ac->random_state = 0x1f2e3d4c;

@ -669,7 +669,7 @@ static av_cold int dsp_init(AVCodecContext *avctx, AACEncContext *s)
{
int ret = 0;
dsputil_init(&s->dsp, avctx);
ff_dsputil_init(&s->dsp, avctx);
// window init
ff_kbd_window_init(ff_aac_kbd_long_1024, 4.0, 1024);

@ -167,7 +167,7 @@ static av_cold int ac3_decode_init(AVCodecContext *avctx)
ff_mdct_init(&s->imdct_256, 8, 1, 1.0);
ff_mdct_init(&s->imdct_512, 9, 1, 1.0);
ff_kbd_window_init(s->window, 5.0, 256);
dsputil_init(&s->dsp, avctx);
ff_dsputil_init(&s->dsp, avctx);
ff_ac3dsp_init(&s->ac3dsp, avctx->flags & CODEC_FLAG_BITEXACT);
ff_fmt_convert_init(&s->fmt_conv, avctx);
av_lfg_init(&s->dith_state, 0);

@ -2467,7 +2467,7 @@ av_cold int ff_ac3_encode_init(AVCodecContext *avctx)
if (!avctx->coded_frame)
goto init_fail;
dsputil_init(&s->dsp, avctx);
ff_dsputil_init(&s->dsp, avctx);
ff_ac3dsp_init(&s->ac3dsp, avctx->flags & CODEC_FLAG_BITEXACT);
dprint_options(s);

@ -182,6 +182,7 @@ void avcodec_register_all(void)
REGISTER_ENCDEC (PPM, ppm);
REGISTER_ENCDEC (PRORES, prores);
REGISTER_ENCODER (PRORES_ANATOLIY, prores_anatoliy);
REGISTER_ENCODER (PRORES_KOSTYA, prores_kostya);
REGISTER_DECODER (PRORES_LGPL, prores_lgpl);
REGISTER_DECODER (PTX, ptx);
REGISTER_DECODER (QDRAW, qdraw);

@ -268,7 +268,7 @@ static void put_pixels16_axp_asm(uint8_t *block, const uint8_t *pixels,
put_pixels_axp_asm(block + 8, pixels + 8, line_size, h);
}
void dsputil_init_alpha(DSPContext* c, AVCodecContext *avctx)
void ff_dsputil_init_alpha(DSPContext* c, AVCodecContext *avctx)
{
const int high_bit_depth = avctx->bits_per_raw_sample > 8;

@ -103,7 +103,7 @@ static void dct_unquantize_h263_inter_axp(MpegEncContext *s, DCTELEM *block,
dct_unquantize_h263_axp(block, n_coeffs, qscale, (qscale - 1) | 1);
}
void MPV_common_init_axp(MpegEncContext *s)
void ff_MPV_common_init_axp(MpegEncContext *s)
{
s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_axp;
s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_axp;

@ -1724,7 +1724,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
}
}
dsputil_init(&ctx->dsp, avctx);
ff_dsputil_init(&ctx->dsp, avctx);
avcodec_get_frame_defaults(&ctx->frame);
avctx->coded_frame = &ctx->frame;

@ -231,7 +231,7 @@ static av_cold int ape_decode_init(AVCodecContext *avctx)
filter_alloc_fail);
}
dsputil_init(&s->dsp, avctx);
ff_dsputil_init(&s->dsp, avctx);
avctx->channel_layout = (avctx->channels==2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO;
avcodec_get_frame_defaults(&s->frame);

@ -73,7 +73,7 @@ static void simple_idct_arm_add(uint8_t *dest, int line_size, DCTELEM *block)
ff_add_pixels_clamped(block, dest, line_size);
}
void dsputil_init_arm(DSPContext* c, AVCodecContext *avctx)
void ff_dsputil_init_arm(DSPContext* c, AVCodecContext *avctx)
{
const int high_bit_depth = avctx->bits_per_raw_sample > 8;

@ -38,17 +38,17 @@ void ff_dct_unquantize_h263_inter_neon(MpegEncContext *s, DCTELEM *block,
void ff_dct_unquantize_h263_intra_neon(MpegEncContext *s, DCTELEM *block,
int n, int qscale);
void MPV_common_init_arm(MpegEncContext *s)
void ff_MPV_common_init_arm(MpegEncContext *s)
{
/* IWMMXT support is a superset of armv5te, so
* allow optimized functions for armv5te unless
* a better iwmmxt function exists
*/
#if HAVE_ARMV5TE
MPV_common_init_armv5te(s);
ff_MPV_common_init_armv5te(s);
#endif
#if HAVE_IWMMXT
MPV_common_init_iwmmxt(s);
ff_MPV_common_init_iwmmxt(s);
#endif
if (HAVE_NEON) {

@ -21,7 +21,7 @@
#include "libavcodec/mpegvideo.h"
void MPV_common_init_iwmmxt(MpegEncContext *s);
void MPV_common_init_armv5te(MpegEncContext *s);
void ff_MPV_common_init_iwmmxt(MpegEncContext *s);
void ff_MPV_common_init_armv5te(MpegEncContext *s);
#endif /* AVCODEC_ARM_MPEGVIDEO_H */

@ -94,7 +94,7 @@ static void dct_unquantize_h263_inter_armv5te(MpegEncContext *s,
ff_dct_unquantize_h263_armv5te(block, qmul, qadd, nCoeffs + 1);
}
void MPV_common_init_armv5te(MpegEncContext *s)
void ff_MPV_common_init_armv5te(MpegEncContext *s)
{
s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_armv5te;
s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_armv5te;

@ -93,7 +93,7 @@ static void dct_unquantize_h263_intra_iwmmxt(MpegEncContext *s,
block_orig[0] = level;
}
void MPV_common_init_iwmmxt(MpegEncContext *s)
void ff_MPV_common_init_iwmmxt(MpegEncContext *s)
{
if (!(mm_flags & AV_CPU_FLAG_IWMMXT)) return;

@ -519,7 +519,7 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size,
static av_cold void common_init(AVCodecContext *avctx){
ASV1Context * const a = avctx->priv_data;
dsputil_init(&a->dsp, avctx);
ff_dsputil_init(&a->dsp, avctx);
a->mb_width = (avctx->width + 15) / 16;
a->mb_height = (avctx->height + 15) / 16;

@ -48,7 +48,7 @@ static const float qmf_48tap_half[24] = {
* Generate common tables
*/
void atrac_generate_tables(void)
void ff_atrac_generate_tables(void)
{
int i;
float s;
@ -79,7 +79,7 @@ void atrac_generate_tables(void)
*/
void atrac_iqmf (float *inlo, float *inhi, unsigned int nIn, float *pOut, float *delayBuf, float *temp)
void ff_atrac_iqmf (float *inlo, float *inhi, unsigned int nIn, float *pOut, float *delayBuf, float *temp)
{
int i, j;
float *p1, *p3;

@ -30,7 +30,7 @@
extern float ff_atrac_sf_table[64];
void atrac_generate_tables(void);
void atrac_iqmf (float *inlo, float *inhi, unsigned int nIn, float *pOut, float *delayBuf, float *temp);
void ff_atrac_generate_tables(void);
void ff_atrac_iqmf (float *inlo, float *inhi, unsigned int nIn, float *pOut, float *delayBuf, float *temp);
#endif /* AVCODEC_ATRAC_H */

@ -262,14 +262,14 @@ static void at1_subband_synthesis(AT1Ctx *q, AT1SUCtx* su, float *pOut)
float iqmf_temp[512 + 46];
/* combine low and middle bands */
atrac_iqmf(q->bands[0], q->bands[1], 128, temp, su->fst_qmf_delay, iqmf_temp);
ff_atrac_iqmf(q->bands[0], q->bands[1], 128, temp, su->fst_qmf_delay, iqmf_temp);
/* delay the signal of the high band by 23 samples */
memcpy( su->last_qmf_delay, &su->last_qmf_delay[256], sizeof(float) * 23);
memcpy(&su->last_qmf_delay[23], q->bands[2], sizeof(float) * 256);
/* combine (low + middle) and high bands */
atrac_iqmf(temp, su->last_qmf_delay, 256, pOut, su->snd_qmf_delay, iqmf_temp);
ff_atrac_iqmf(temp, su->last_qmf_delay, 256, pOut, su->snd_qmf_delay, iqmf_temp);
}
@ -378,9 +378,9 @@ static av_cold int atrac1_decode_init(AVCodecContext *avctx)
ff_init_ff_sine_windows(5);
atrac_generate_tables();
ff_atrac_generate_tables();
dsputil_init(&q->dsp, avctx);
ff_dsputil_init(&q->dsp, avctx);
ff_fmt_convert_init(&q->fmt_conv, avctx);
q->bands[0] = q->low;

@ -814,9 +814,9 @@ static int decodeFrame(ATRAC3Context *q, const uint8_t* databuf,
p2= p1+256;
p3= p2+256;
p4= p3+256;
atrac_iqmf (p1, p2, 256, p1, q->pUnits[i].delayBuf1, q->tempBuf);
atrac_iqmf (p4, p3, 256, p3, q->pUnits[i].delayBuf2, q->tempBuf);
atrac_iqmf (p1, p3, 512, p1, q->pUnits[i].delayBuf3, q->tempBuf);
ff_atrac_iqmf (p1, p2, 256, p1, q->pUnits[i].delayBuf1, q->tempBuf);
ff_atrac_iqmf (p4, p3, 256, p3, q->pUnits[i].delayBuf2, q->tempBuf);
ff_atrac_iqmf (p1, p3, 512, p1, q->pUnits[i].delayBuf3, q->tempBuf);
}
return 0;
@ -1016,7 +1016,7 @@ static av_cold int atrac3_decode_init(AVCodecContext *avctx)
return ret;
}
atrac_generate_tables();
ff_atrac_generate_tables();
/* Generate gain tables. */
for (i=0 ; i<16 ; i++)
@ -1039,7 +1039,7 @@ static av_cold int atrac3_decode_init(AVCodecContext *avctx)
q->matrix_coeff_index_next[i] = 3;
}
dsputil_init(&dsp, avctx);
ff_dsputil_init(&dsp, avctx);
ff_fmt_convert_init(&q->fmt_conv, avctx);
q->pUnits = av_mallocz(sizeof(channel_unit)*q->channels);

@ -195,7 +195,7 @@ static int bfin_pix_abs8_xy2 (void *c, uint8_t *blk1, uint8_t *blk2, int line_si
*/
void dsputil_init_bfin( DSPContext* c, AVCodecContext *avctx )
void ff_dsputil_init_bfin( DSPContext* c, AVCodecContext *avctx )
{
const int high_bit_depth = avctx->bits_per_raw_sample > 8;

@ -146,7 +146,7 @@ static int dct_quantize_bfin (MpegEncContext *s,
return last_non_zero;
}
void MPV_common_init_bfin (MpegEncContext *s)
void ff_MPV_common_init_bfin (MpegEncContext *s)
{
/* s->dct_quantize= dct_quantize_bfin; */
}

@ -1298,7 +1298,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
avctx->pix_fmt = c->has_alpha ? PIX_FMT_YUVA420P : PIX_FMT_YUV420P;
avctx->idct_algo = FF_IDCT_BINK;
dsputil_init(&c->dsp, avctx);
ff_dsputil_init(&c->dsp, avctx);
ff_binkdsp_init(&c->bdsp);
init_bundles(c);

@ -79,7 +79,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
int i;
int frame_len_bits;
dsputil_init(&s->dsp, avctx);
ff_dsputil_init(&s->dsp, avctx);
ff_fmt_convert_init(&s->fmt_conv, avctx);
/* determine frame length */

@ -253,9 +253,9 @@ static int build_table(VLC *vlc, int table_nb_bits, int nb_codes,
(byte/word/long) to store the 'bits', 'codes', and 'symbols' tables.
'use_static' should be set to 1 for tables, which should be freed
with av_free_static(), 0 if free_vlc() will be used.
with av_free_static(), 0 if ff_free_vlc() will be used.
*/
int init_vlc_sparse(VLC *vlc, int nb_bits, int nb_codes,
int ff_init_vlc_sparse(VLC *vlc, int nb_bits, int nb_codes,
const void *bits, int bits_wrap, int bits_size,
const void *codes, int codes_wrap, int codes_size,
const void *symbols, int symbols_wrap, int symbols_size,
@ -318,7 +318,7 @@ int init_vlc_sparse(VLC *vlc, int nb_bits, int nb_codes,
}
void free_vlc(VLC *vlc)
void ff_free_vlc(VLC *vlc)
{
av_freep(&vlc->table);
}

@ -671,7 +671,7 @@ av_cold int ff_cavs_init(AVCodecContext *avctx) {
AVSContext *h = avctx->priv_data;
MpegEncContext * const s = &h->s;
MPV_decode_defaults(s);
ff_MPV_decode_defaults(s);
ff_cavsdsp_init(&h->cdsp, avctx);
s->avctx = avctx;

@ -470,7 +470,7 @@ static int decode_pic(AVSContext *h) {
if (!s->context_initialized) {
s->avctx->idct_algo = FF_IDCT_CAVS;
if (MPV_common_init(s) < 0)
if (ff_MPV_common_init(s) < 0)
return -1;
ff_init_scantable(s->dsp.idct_permutation,&h->scantable,ff_zigzag_direct);
}

@ -321,11 +321,11 @@ static av_cold int cook_decode_close(AVCodecContext *avctx)
/* Free the VLC tables. */
for (i = 0; i < 13; i++)
free_vlc(&q->envelope_quant_index[i]);
ff_free_vlc(&q->envelope_quant_index[i]);
for (i = 0; i < 7; i++)
free_vlc(&q->sqvh[i]);
ff_free_vlc(&q->sqvh[i]);
for (i = 0; i < q->num_subpackets; i++)
free_vlc(&q->subpacket[i].ccpl);
ff_free_vlc(&q->subpacket[i].ccpl);
av_log(avctx, AV_LOG_DEBUG, "Memory deallocated.\n");

@ -1926,7 +1926,7 @@ static av_cold int dca_decode_init(AVCodecContext *avctx)
s->avctx = avctx;
dca_init_vlcs();
dsputil_init(&s->dsp, avctx);
ff_dsputil_init(&s->dsp, avctx);
ff_mdct_init(&s->imdct, 6, 1, 1.0);
ff_synth_filter_init(&s->synth);
ff_dcadsp_init(&s->dcadsp);

@ -85,7 +85,7 @@ static int cpu_flags;
static const struct algo fdct_tab[] = {
{ "REF-DBL", ff_ref_fdct, NO_PERM },
{ "FAAN", ff_faandct, FAAN_SCALE },
{ "IJG-AAN-INT", fdct_ifast, SCALE_PERM },
{ "IJG-AAN-INT", ff_fdct_ifast, SCALE_PERM },
{ "IJG-LLM-INT", ff_jpeg_fdct_islow_8, NO_PERM },
#if HAVE_MMX
@ -124,7 +124,7 @@ static void ff_prores_idct_put_10_sse2_wrap(uint16_t *dst){
static const struct algo idct_tab[] = {
{ "FAANI", ff_faanidct, NO_PERM },
{ "REF-DBL", ff_ref_idct, NO_PERM },
{ "INT", j_rev_dct, MMX_PERM },
{ "INT", ff_j_rev_dct, MMX_PERM },
{ "SIMPLE-C", ff_simple_idct_8, NO_PERM },
#if HAVE_MMX

@ -84,9 +84,9 @@ static int dnxhd_init_vlc(DNXHDContext *ctx, int cid)
}
ctx->cid_table = &ff_dnxhd_cid_table[index];
free_vlc(&ctx->ac_vlc);
free_vlc(&ctx->dc_vlc);
free_vlc(&ctx->run_vlc);
ff_free_vlc(&ctx->ac_vlc);
ff_free_vlc(&ctx->dc_vlc);
ff_free_vlc(&ctx->run_vlc);
init_vlc(&ctx->ac_vlc, DNXHD_VLC_BITS, 257,
ctx->cid_table->ac_bits, 1, 1,
@ -132,7 +132,7 @@ static int dnxhd_decode_header(DNXHDContext *ctx, const uint8_t *buf, int buf_si
ctx->avctx->pix_fmt = PIX_FMT_YUV422P10;
ctx->avctx->bits_per_raw_sample = 10;
if (ctx->bit_depth != 10) {
dsputil_init(&ctx->dsp, ctx->avctx);
ff_dsputil_init(&ctx->dsp, ctx->avctx);
ctx->bit_depth = 10;
ctx->decode_dct_block = dnxhd_decode_dct_block_10;
}
@ -140,7 +140,7 @@ static int dnxhd_decode_header(DNXHDContext *ctx, const uint8_t *buf, int buf_si
ctx->avctx->pix_fmt = PIX_FMT_YUV422P;
ctx->avctx->bits_per_raw_sample = 8;
if (ctx->bit_depth != 8) {
dsputil_init(&ctx->dsp, ctx->avctx);
ff_dsputil_init(&ctx->dsp, ctx->avctx);
ctx->bit_depth = 8;
ctx->decode_dct_block = dnxhd_decode_dct_block_8;
}
@ -414,9 +414,9 @@ static av_cold int dnxhd_decode_close(AVCodecContext *avctx)
if (ctx->picture.data[0])
ff_thread_release_buffer(avctx, &ctx->picture);
free_vlc(&ctx->ac_vlc);
free_vlc(&ctx->dc_vlc);
free_vlc(&ctx->run_vlc);
ff_free_vlc(&ctx->ac_vlc);
ff_free_vlc(&ctx->dc_vlc);
ff_free_vlc(&ctx->run_vlc);
return 0;
}

@ -267,10 +267,10 @@ static int dnxhd_encode_init(AVCodecContext *avctx)
avctx->bits_per_raw_sample = ctx->cid_table->bit_depth;
dsputil_init(&ctx->m.dsp, avctx);
ff_dsputil_init(&ctx->m.dsp, avctx);
ff_dct_common_init(&ctx->m);
if (!ctx->m.dct_quantize)
ctx->m.dct_quantize = dct_quantize_c;
ctx->m.dct_quantize = ff_dct_quantize_c;
if (ctx->cid_table->bit_depth == 10) {
ctx->m.dct_quantize = dnxhd_10bit_dct_quantize;

@ -83,7 +83,7 @@ const uint8_t ff_zigzag248_direct[64] = {
};
/* not permutated inverse zigzag_direct + 1 for MMX quantizer */
DECLARE_ALIGNED(16, uint16_t, inv_zigzag_direct16)[64];
DECLARE_ALIGNED(16, uint16_t, ff_inv_zigzag_direct16)[64];
const uint8_t ff_alternate_horizontal_scan[64] = {
0, 1, 2, 3, 8, 9, 16, 17,
@ -2746,34 +2746,34 @@ static void ff_wmv2_idct_add_c(uint8_t *dest, int line_size, DCTELEM *block)
}
static void ff_jref_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
{
j_rev_dct (block);
ff_j_rev_dct (block);
ff_put_pixels_clamped_c(block, dest, line_size);
}
static void ff_jref_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
{
j_rev_dct (block);
ff_j_rev_dct (block);
ff_add_pixels_clamped_c(block, dest, line_size);
}
static void ff_jref_idct4_put(uint8_t *dest, int line_size, DCTELEM *block)
{
j_rev_dct4 (block);
ff_j_rev_dct4 (block);
put_pixels_clamped4_c(block, dest, line_size);
}
static void ff_jref_idct4_add(uint8_t *dest, int line_size, DCTELEM *block)
{
j_rev_dct4 (block);
ff_j_rev_dct4 (block);
add_pixels_clamped4_c(block, dest, line_size);
}
static void ff_jref_idct2_put(uint8_t *dest, int line_size, DCTELEM *block)
{
j_rev_dct2 (block);
ff_j_rev_dct2 (block);
put_pixels_clamped2_c(block, dest, line_size);
}
static void ff_jref_idct2_add(uint8_t *dest, int line_size, DCTELEM *block)
{
j_rev_dct2 (block);
ff_j_rev_dct2 (block);
add_pixels_clamped2_c(block, dest, line_size);
}
@ -2793,7 +2793,7 @@ static void ff_jref_idct1_add(uint8_t *dest, int line_size, DCTELEM *block)
static void just_return(void *mem av_unused, int stride av_unused, int h av_unused) { return; }
/* init static data */
av_cold void dsputil_static_init(void)
av_cold void ff_dsputil_static_init(void)
{
int i;
@ -2807,7 +2807,7 @@ av_cold void dsputil_static_init(void)
ff_squareTbl[i] = (i - 256) * (i - 256);
}
for(i=0; i<64; i++) inv_zigzag_direct16[ff_zigzag_direct[i]]= i+1;
for(i=0; i<64; i++) ff_inv_zigzag_direct16[ff_zigzag_direct[i]]= i+1;
}
int ff_check_alignment(void){
@ -2830,7 +2830,7 @@ int ff_check_alignment(void){
return 0;
}
av_cold void dsputil_init(DSPContext* c, AVCodecContext *avctx)
av_cold void ff_dsputil_init(DSPContext* c, AVCodecContext *avctx)
{
int i;
@ -2842,8 +2842,8 @@ av_cold void dsputil_init(DSPContext* c, AVCodecContext *avctx)
c->fdct248 = ff_fdct248_islow_10;
} else {
if(avctx->dct_algo==FF_DCT_FASTINT) {
c->fdct = fdct_ifast;
c->fdct248 = fdct_ifast248;
c->fdct = ff_fdct_ifast;
c->fdct248 = ff_fdct_ifast248;
}
else if(avctx->dct_algo==FF_DCT_FAAN) {
c->fdct = ff_faandct;
@ -2859,17 +2859,17 @@ av_cold void dsputil_init(DSPContext* c, AVCodecContext *avctx)
if(avctx->lowres==1){
c->idct_put= ff_jref_idct4_put;
c->idct_add= ff_jref_idct4_add;
c->idct = j_rev_dct4;
c->idct = ff_j_rev_dct4;
c->idct_permutation_type= FF_NO_IDCT_PERM;
}else if(avctx->lowres==2){
c->idct_put= ff_jref_idct2_put;
c->idct_add= ff_jref_idct2_add;
c->idct = j_rev_dct2;
c->idct = ff_j_rev_dct2;
c->idct_permutation_type= FF_NO_IDCT_PERM;
}else if(avctx->lowres==3){
c->idct_put= ff_jref_idct1_put;
c->idct_add= ff_jref_idct1_add;
c->idct = j_rev_dct1;
c->idct = ff_j_rev_dct1;
c->idct_permutation_type= FF_NO_IDCT_PERM;
}else{
if (avctx->bits_per_raw_sample == 10) {
@ -2881,7 +2881,7 @@ av_cold void dsputil_init(DSPContext* c, AVCodecContext *avctx)
if(avctx->idct_algo==FF_IDCT_INT){
c->idct_put= ff_jref_idct_put;
c->idct_add= ff_jref_idct_add;
c->idct = j_rev_dct;
c->idct = ff_j_rev_dct;
c->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM;
}else if((CONFIG_VP3_DECODER || CONFIG_VP5_DECODER || CONFIG_VP6_DECODER ) &&
avctx->idct_algo==FF_IDCT_VP3){
@ -3062,7 +3062,7 @@ av_cold void dsputil_init(DSPContext* c, AVCodecContext *avctx)
c->add_8x8basis= add_8x8basis_c;
#if CONFIG_VORBIS_DECODER
c->vorbis_inverse_coupling = vorbis_inverse_coupling;
c->vorbis_inverse_coupling = ff_vorbis_inverse_coupling;
#endif
#if CONFIG_AC3_DECODER
c->ac3_downmix = ff_ac3_downmix_c;
@ -3182,14 +3182,14 @@ av_cold void dsputil_init(DSPContext* c, AVCodecContext *avctx)
}
if (HAVE_MMX) dsputil_init_mmx (c, avctx);
if (ARCH_ARM) dsputil_init_arm (c, avctx);
if (HAVE_VIS) dsputil_init_vis (c, avctx);
if (ARCH_ALPHA) dsputil_init_alpha (c, avctx);
if (ARCH_PPC) dsputil_init_ppc (c, avctx);
if (HAVE_MMI) dsputil_init_mmi (c, avctx);
if (ARCH_SH4) dsputil_init_sh4 (c, avctx);
if (ARCH_BFIN) dsputil_init_bfin (c, avctx);
if (HAVE_MMX) ff_dsputil_init_mmx (c, avctx);
if (ARCH_ARM) ff_dsputil_init_arm (c, avctx);
if (HAVE_VIS) ff_dsputil_init_vis (c, avctx);
if (ARCH_ALPHA) ff_dsputil_init_alpha (c, avctx);
if (ARCH_PPC) ff_dsputil_init_ppc (c, avctx);
if (HAVE_MMI) ff_dsputil_init_mmi (c, avctx);
if (ARCH_SH4) ff_dsputil_init_sh4 (c, avctx);
if (ARCH_BFIN) ff_dsputil_init_bfin (c, avctx);
for(i=0; i<64; i++){
if(!c->put_2tap_qpel_pixels_tab[0][i])
@ -3201,3 +3201,8 @@ av_cold void dsputil_init(DSPContext* c, AVCodecContext *avctx)
ff_init_scantable_permutation(c->idct_permutation,
c->idct_permutation_type);
}
av_cold void dsputil_init(DSPContext* c, AVCodecContext *avctx)
{
ff_dsputil_init(c, avctx);
}

@ -38,17 +38,17 @@
/* dct code */
typedef short DCTELEM;
void fdct_ifast (DCTELEM *data);
void fdct_ifast248 (DCTELEM *data);
void ff_fdct_ifast (DCTELEM *data);
void ff_fdct_ifast248 (DCTELEM *data);
void ff_jpeg_fdct_islow_8(DCTELEM *data);
void ff_jpeg_fdct_islow_10(DCTELEM *data);
void ff_fdct248_islow_8(DCTELEM *data);
void ff_fdct248_islow_10(DCTELEM *data);
void j_rev_dct (DCTELEM *data);
void j_rev_dct4 (DCTELEM *data);
void j_rev_dct2 (DCTELEM *data);
void j_rev_dct1 (DCTELEM *data);
void ff_j_rev_dct (DCTELEM *data);
void ff_j_rev_dct4 (DCTELEM *data);
void ff_j_rev_dct2 (DCTELEM *data);
void ff_j_rev_dct1 (DCTELEM *data);
void ff_wmv2_idct_c(DCTELEM *data);
void ff_fdct_mmx(DCTELEM *block);
@ -581,8 +581,9 @@ typedef struct DSPContext {
op_fill_func fill_block_tab[2];
} DSPContext;
void dsputil_static_init(void);
void dsputil_init(DSPContext* p, AVCodecContext *avctx);
void ff_dsputil_static_init(void);
void ff_dsputil_init(DSPContext* p, AVCodecContext *avctx);
attribute_deprecated void dsputil_init(DSPContext* c, AVCodecContext *avctx);
int ff_check_alignment(void);
@ -641,14 +642,14 @@ static inline int get_penalty_factor(int lambda, int lambda2, int type){
}
}
void dsputil_init_alpha(DSPContext* c, AVCodecContext *avctx);
void dsputil_init_arm(DSPContext* c, AVCodecContext *avctx);
void dsputil_init_bfin(DSPContext* c, AVCodecContext *avctx);
void dsputil_init_mmi(DSPContext* c, AVCodecContext *avctx);
void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx);
void dsputil_init_ppc(DSPContext* c, AVCodecContext *avctx);
void dsputil_init_sh4(DSPContext* c, AVCodecContext *avctx);
void dsputil_init_vis(DSPContext* c, AVCodecContext *avctx);
void ff_dsputil_init_alpha(DSPContext* c, AVCodecContext *avctx);
void ff_dsputil_init_arm(DSPContext* c, AVCodecContext *avctx);
void ff_dsputil_init_bfin(DSPContext* c, AVCodecContext *avctx);
void ff_dsputil_init_mmi(DSPContext* c, AVCodecContext *avctx);
void ff_dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx);
void ff_dsputil_init_ppc(DSPContext* c, AVCodecContext *avctx);
void ff_dsputil_init_sh4(DSPContext* c, AVCodecContext *avctx);
void ff_dsputil_init_vis(DSPContext* c, AVCodecContext *avctx);
void ff_dsputil_init_dwt(DSPContext *c);
void ff_intrax8dsp_init(DSPContext* c, AVCodecContext *avctx);

@ -313,13 +313,13 @@ static av_cold int dvvideo_init(AVCodecContext *avctx)
dv_rl_vlc[i].level = level;
dv_rl_vlc[i].run = run;
}
free_vlc(&dv_vlc);
ff_free_vlc(&dv_vlc);
dv_vlc_map_tableinit();
}
/* Generic DSP setup */
dsputil_init(&dsp, avctx);
ff_dsputil_init(&dsp, avctx);
ff_set_cmp(&dsp, dsp.ildct_cmp, avctx->ildct_cmp);
s->get_pixels = dsp.get_pixels;
s->ildct_cmp = dsp.ildct_cmp[5];

@ -65,7 +65,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
avctx->pix_fmt = PIX_FMT_YUV420P;
if (avctx->idct_algo == FF_IDCT_AUTO)
avctx->idct_algo = FF_IDCT_EA;
dsputil_init(&s->dsp, avctx);
ff_dsputil_init(&s->dsp, avctx);
ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct);
ff_mpeg12_init_vlcs();
return 0;

@ -50,7 +50,7 @@ static av_cold int tgq_decode_init(AVCodecContext *avctx){
s->avctx = avctx;
if(avctx->idct_algo==FF_IDCT_AUTO)
avctx->idct_algo=FF_IDCT_EA;
dsputil_init(&s->dsp, avctx);
ff_dsputil_init(&s->dsp, avctx);
ff_init_scantable(s->dsp.idct_permutation, &s->scantable, ff_zigzag_direct);
avctx->time_base = (AVRational){1, 15};
avctx->pix_fmt = PIX_FMT_YUV420P;

@ -48,7 +48,7 @@ static av_cold int tqi_decode_init(AVCodecContext *avctx)
s->avctx = avctx;
if(avctx->idct_algo==FF_IDCT_AUTO)
avctx->idct_algo=FF_IDCT_EA;
dsputil_init(&s->dsp, avctx);
ff_dsputil_init(&s->dsp, avctx);
ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct);
s->qscale = 1;
avctx->time_base = (AVRational){1, 15};

@ -69,7 +69,7 @@ static void decode_mb(MpegEncContext *s, int ref)
ff_h264_hl_decode_mb(h);
} else {
assert(ref == 0);
MPV_decode_mb(s, s->block);
ff_MPV_decode_mb(s, s->block);
}
}

@ -110,11 +110,11 @@ av_cold void ff_ccitt_unpack_init(void)
ccitt_vlc[1].table = code_table2;
ccitt_vlc[1].table_allocated = 648;
for(i = 0; i < 2; i++){
init_vlc_sparse(&ccitt_vlc[i], 9, CCITT_SYMS,
ccitt_codes_lens[i], 1, 1,
ccitt_codes_bits[i], 1, 1,
ccitt_syms, 2, 2,
INIT_VLC_USE_NEW_STATIC);
ff_init_vlc_sparse(&ccitt_vlc[i], 9, CCITT_SYMS,
ccitt_codes_lens[i], 1, 1,
ccitt_codes_bits[i], 1, 1,
ccitt_syms, 2, 2,
INIT_VLC_USE_NEW_STATIC);
}
INIT_VLC_STATIC(&ccitt_group3_2d_vlc, 9, 11,
ccitt_group3_2d_lens, 1, 1,

@ -674,7 +674,7 @@ static av_cold int common_init(AVCodecContext *avctx){
avcodec_get_frame_defaults(&s->picture);
dsputil_init(&s->dsp, avctx);
ff_dsputil_init(&s->dsp, avctx);
s->width = avctx->width;
s->height= avctx->height;

@ -89,9 +89,9 @@ AVCodec ff_flv_encoder = {
.type = AVMEDIA_TYPE_VIDEO,
.id = CODEC_ID_FLV1,
.priv_data_size = sizeof(MpegEncContext),
.init = MPV_encode_init,
.encode = MPV_encode_picture,
.close = MPV_encode_end,
.init = ff_MPV_encode_init,
.encode = ff_MPV_encode_picture,
.close = ff_MPV_encode_end,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
.long_name= NULL_IF_CONFIG_SMALL("Flash Video (FLV) / Sorenson Spark / Sorenson H.263"),
};

@ -67,7 +67,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
s->avctx = avctx;
s->tmpbuf = NULL;
dsputil_init(&s->dsp, avctx);
ff_dsputil_init(&s->dsp, avctx);
return 0;
}
@ -114,13 +114,13 @@ static int fraps2_decode_plane(FrapsContext *s, uint8_t *dst, int stride, int w,
if(j) dst[i] += dst[i - stride];
else if(Uoff) dst[i] += 0x80;
if (get_bits_left(&gb) < 0) {
free_vlc(&vlc);
ff_free_vlc(&vlc);
return AVERROR_INVALIDDATA;
}
}
dst += stride;
}
free_vlc(&vlc);
ff_free_vlc(&vlc);
return 0;
}

@ -360,19 +360,19 @@ static inline void align_get_bits(GetBitContext *s)
bits, bits_wrap, bits_size, \
codes, codes_wrap, codes_size, \
flags) \
init_vlc_sparse(vlc, nb_bits, nb_codes, \
bits, bits_wrap, bits_size, \
codes, codes_wrap, codes_size, \
NULL, 0, 0, flags)
ff_init_vlc_sparse(vlc, nb_bits, nb_codes, \
bits, bits_wrap, bits_size, \
codes, codes_wrap, codes_size, \
NULL, 0, 0, flags)
int init_vlc_sparse(VLC *vlc, int nb_bits, int nb_codes,
int ff_init_vlc_sparse(VLC *vlc, int nb_bits, int nb_codes,
const void *bits, int bits_wrap, int bits_size,
const void *codes, int codes_wrap, int codes_size,
const void *symbols, int symbols_wrap, int symbols_size,
int flags);
#define INIT_VLC_LE 2
#define INIT_VLC_USE_NEW_STATIC 4
void free_vlc(VLC *vlc);
void ff_free_vlc(VLC *vlc);
#define INIT_VLC_STATIC(vlc, bits, a,b,c,d,e,f,g, static_size) do { \
static VLC_TYPE table[static_size][2]; \

@ -66,7 +66,7 @@ static av_cold void h261_decode_init_vlc(H261Context *h){
INIT_VLC_STATIC(&h261_cbp_vlc, H261_CBP_VLC_BITS, 63,
&h261_cbp_tab[0][1], 2, 1,
&h261_cbp_tab[0][0], 2, 1, 512);
init_rl(&h261_rl_tcoeff, ff_h261_rl_table_store);
ff_init_rl(&h261_rl_tcoeff, ff_h261_rl_table_store);
INIT_VLC_RL(h261_rl_tcoeff, 552);
}
}
@ -76,7 +76,7 @@ static av_cold int h261_decode_init(AVCodecContext *avctx){
MpegEncContext * const s = &h->s;
// set defaults
MPV_decode_defaults(s);
ff_MPV_decode_defaults(s);
s->avctx = avctx;
s->width = s->avctx->coded_width;
@ -221,7 +221,7 @@ static int h261_decode_mb_skipped(H261Context *h, int mba1, int mba2 )
s->mb_skipped = 1;
h->mtype &= ~MB_TYPE_H261_FIL;
MPV_decode_mb(s, s->block);
ff_MPV_decode_mb(s, s->block);
}
return 0;
@ -349,7 +349,7 @@ intra:
s->block_last_index[i]= -1;
}
MPV_decode_mb(s, s->block);
ff_MPV_decode_mb(s, s->block);
return SLICE_OK;
}
@ -565,7 +565,7 @@ retry:
init_get_bits(&s->gb, buf, buf_size*8);
if(!s->context_initialized){
if (MPV_common_init(s) < 0) //we need the idct permutaton for reading a custom matrix
if (ff_MPV_common_init(s) < 0) //we need the idct permutaton for reading a custom matrix
return -1;
}
@ -588,7 +588,7 @@ retry:
if (s->width != avctx->coded_width || s->height != avctx->coded_height){
ParseContext pc= s->parse_context; //FIXME move this demuxing hack to libavformat
s->parse_context.buffer=0;
MPV_common_end(s);
ff_MPV_common_end(s);
s->parse_context= pc;
}
if (!s->context_initialized) {
@ -606,7 +606,7 @@ retry:
|| avctx->skip_frame >= AVDISCARD_ALL)
return get_consumed_bytes(s, buf_size);
if(MPV_frame_start(s, avctx) < 0)
if(ff_MPV_frame_start(s, avctx) < 0)
return -1;
ff_er_frame_start(s);
@ -620,7 +620,7 @@ retry:
break;
h261_decode_gob(h);
}
MPV_frame_end(s);
ff_MPV_frame_end(s);
assert(s->current_picture.f.pict_type == s->current_picture_ptr->f.pict_type);
assert(s->current_picture.f.pict_type == s->pict_type);
@ -637,7 +637,7 @@ static av_cold int h261_decode_end(AVCodecContext *avctx)
H261Context *h= avctx->priv_data;
MpegEncContext *s = &h->s;
MPV_common_end(s);
ff_MPV_common_end(s);
return 0;
}

@ -240,7 +240,7 @@ void ff_h261_encode_init(MpegEncContext *s){
if (!done) {
done = 1;
init_rl(&h261_rl_tcoeff, ff_h261_rl_table_store);
ff_init_rl(&h261_rl_tcoeff, ff_h261_rl_table_store);
}
s->min_qcoeff= -127;
@ -326,9 +326,9 @@ AVCodec ff_h261_encoder = {
.type = AVMEDIA_TYPE_VIDEO,
.id = CODEC_ID_H261,
.priv_data_size = sizeof(H261Context),
.init = MPV_encode_init,
.encode = MPV_encode_picture,
.close = MPV_encode_end,
.init = ff_MPV_encode_init,
.encode = ff_MPV_encode_picture,
.close = ff_MPV_encode_end,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
.long_name= NULL_IF_CONFIG_SMALL("H.261"),
};

@ -98,7 +98,7 @@ void ff_h263_update_motion_val(MpegEncContext * s){
}
}
int h263_pred_dc(MpegEncContext * s, int n, int16_t **dc_val_ptr)
int ff_h263_pred_dc(MpegEncContext * s, int n, int16_t **dc_val_ptr)
{
int x, y, wrap, a, c, pred_dc;
int16_t *dc_val;
@ -226,7 +226,7 @@ void ff_h263_loop_filter(MpegEncContext * s){
}
}
void h263_pred_acdc(MpegEncContext * s, DCTELEM *block, int n)
void ff_h263_pred_acdc(MpegEncContext * s, DCTELEM *block, int n)
{
int x, y, wrap, a, c, pred_dc, scale, i;
int16_t *dc_val, *ac_val, *ac_val1;
@ -313,8 +313,8 @@ void h263_pred_acdc(MpegEncContext * s, DCTELEM *block, int n)
ac_val1[8 + i] = block[s->dsp.idct_permutation[i ]];
}
int16_t *h263_pred_motion(MpegEncContext * s, int block, int dir,
int *px, int *py)
int16_t *ff_h263_pred_motion(MpegEncContext * s, int block, int dir,
int *px, int *py)
{
int wrap;
int16_t *A, *B, *C, (*mot_val)[2];

@ -38,16 +38,16 @@
extern const AVRational ff_h263_pixel_aspect[16];
extern const uint8_t ff_h263_cbpy_tab[16][2];
extern const uint8_t cbpc_b_tab[4][2];
extern const uint8_t ff_cbpc_b_tab[4][2];
extern const uint8_t mvtab[33][2];
extern const uint8_t ff_mvtab[33][2];
extern const uint8_t ff_h263_intra_MCBPC_code[9];
extern const uint8_t ff_h263_intra_MCBPC_bits[9];
extern const uint8_t ff_h263_inter_MCBPC_code[28];
extern const uint8_t ff_h263_inter_MCBPC_bits[28];
extern const uint8_t h263_mbtype_b_tab[15][2];
extern const uint8_t ff_h263_mbtype_b_tab[15][2];
extern VLC ff_h263_intra_MCBPC_vlc;
extern VLC ff_h263_inter_MCBPC_vlc;
@ -55,41 +55,41 @@ extern VLC ff_h263_cbpy_vlc;
extern RLTable ff_h263_rl_inter;
extern RLTable rl_intra_aic;
extern RLTable ff_rl_intra_aic;
extern const uint16_t h263_format[8][2];
extern const uint8_t modified_quant_tab[2][32];
extern const uint16_t ff_h263_format[8][2];
extern const uint8_t ff_modified_quant_tab[2][32];
extern const uint16_t ff_mba_max[6];
extern const uint8_t ff_mba_length[7];
extern uint8_t ff_h263_static_rl_table_store[2][2][2*MAX_RUN + MAX_LEVEL + 3];
int h263_decode_motion(MpegEncContext * s, int pred, int f_code);
int ff_h263_decode_motion(MpegEncContext * s, int pred, int f_code);
av_const int ff_h263_aspect_to_info(AVRational aspect);
int ff_h263_decode_init(AVCodecContext *avctx);
int ff_h263_decode_frame(AVCodecContext *avctx,
void *data, int *data_size,
AVPacket *avpkt);
int ff_h263_decode_end(AVCodecContext *avctx);
void h263_encode_mb(MpegEncContext *s,
DCTELEM block[6][64],
int motion_x, int motion_y);
void h263_encode_picture_header(MpegEncContext *s, int picture_number);
void h263_encode_gob_header(MpegEncContext * s, int mb_line);
int16_t *h263_pred_motion(MpegEncContext * s, int block, int dir,
int *px, int *py);
void h263_encode_init(MpegEncContext *s);
void h263_decode_init_vlc(MpegEncContext *s);
int h263_decode_picture_header(MpegEncContext *s);
void ff_h263_encode_mb(MpegEncContext *s,
DCTELEM block[6][64],
int motion_x, int motion_y);
void ff_h263_encode_picture_header(MpegEncContext *s, int picture_number);
void ff_h263_encode_gob_header(MpegEncContext * s, int mb_line);
int16_t *ff_h263_pred_motion(MpegEncContext * s, int block, int dir,
int *px, int *py);
void ff_h263_encode_init(MpegEncContext *s);
void ff_h263_decode_init_vlc(MpegEncContext *s);
int ff_h263_decode_picture_header(MpegEncContext *s);
int ff_h263_decode_gob_header(MpegEncContext *s);
void ff_h263_update_motion_val(MpegEncContext * s);
void ff_h263_loop_filter(MpegEncContext * s);
int ff_h263_decode_mba(MpegEncContext *s);
void ff_h263_encode_mba(MpegEncContext *s);
void ff_init_qscale_tab(MpegEncContext *s);
int h263_pred_dc(MpegEncContext * s, int n, int16_t **dc_val_ptr);
void h263_pred_acdc(MpegEncContext * s, DCTELEM *block, int n);
int ff_h263_pred_dc(MpegEncContext * s, int n, int16_t **dc_val_ptr);
void ff_h263_pred_acdc(MpegEncContext * s, DCTELEM *block, int n);
/**
@ -119,7 +119,7 @@ static inline int h263_get_motion_length(MpegEncContext * s, int val, int f_code
int l, bit_size, code;
if (val == 0) {
return mvtab[0][1];
return ff_mvtab[0][1];
} else {
bit_size = f_code - 1;
/* modulo encoding */
@ -128,7 +128,7 @@ static inline int h263_get_motion_length(MpegEncContext * s, int val, int f_code
val--;
code = (val >> bit_size) + 1;
return mvtab[code][1] + 1 + bit_size;
return ff_mvtab[code][1] + 1 + bit_size;
}
}

@ -57,7 +57,7 @@ const uint8_t ff_h263_inter_MCBPC_bits[28] = {
11, 13, 13, 13,/* inter4Q*/
};
const uint8_t h263_mbtype_b_tab[15][2] = {
const uint8_t ff_h263_mbtype_b_tab[15][2] = {
{1, 1},
{3, 3},
{1, 5},
@ -75,7 +75,7 @@ const uint8_t h263_mbtype_b_tab[15][2] = {
{1, 8},
};
const uint8_t cbpc_b_tab[4][2] = {
const uint8_t ff_cbpc_b_tab[4][2] = {
{0, 1},
{2, 2},
{7, 3},
@ -88,7 +88,7 @@ const uint8_t ff_h263_cbpy_tab[16][2] =
{2,5}, {3,6}, {5,4}, {10,4}, {4,4}, {8,4}, {6,4}, {3,2}
};
const uint8_t mvtab[33][2] =
const uint8_t ff_mvtab[33][2] =
{
{1,1}, {1,2}, {1,3}, {1,4}, {3,6}, {5,7}, {4,7}, {3,7},
{11,9}, {10,9}, {9,9}, {17,10}, {16,10}, {15,10}, {14,10}, {13,10},
@ -98,7 +98,7 @@ const uint8_t mvtab[33][2] =
};
/* third non intra table */
const uint16_t inter_vlc[103][2] = {
const uint16_t ff_inter_vlc[103][2] = {
{ 0x2, 2 },{ 0xf, 4 },{ 0x15, 6 },{ 0x17, 7 },
{ 0x1f, 8 },{ 0x25, 9 },{ 0x24, 9 },{ 0x21, 10 },
{ 0x20, 10 },{ 0x7, 11 },{ 0x6, 11 },{ 0x20, 11 },
@ -127,7 +127,7 @@ const uint16_t inter_vlc[103][2] = {
{ 0x5e, 12 },{ 0x5f, 12 },{ 0x3, 7 },
};
const int8_t inter_level[102] = {
const int8_t ff_inter_level[102] = {
1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 1, 2, 3, 4,
5, 6, 1, 2, 3, 4, 1, 2,
@ -143,7 +143,7 @@ const int8_t inter_level[102] = {
1, 1, 1, 1, 1, 1,
};
const int8_t inter_run[102] = {
const int8_t ff_inter_run[102] = {
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 1, 1, 1, 1,
1, 1, 2, 2, 2, 2, 3, 3,
@ -162,9 +162,9 @@ const int8_t inter_run[102] = {
RLTable ff_h263_rl_inter = {
102,
58,
inter_vlc,
inter_run,
inter_level,
ff_inter_vlc,
ff_inter_run,
ff_inter_level,
};
static const uint16_t intra_vlc_aic[103][2] = {
@ -228,7 +228,7 @@ static const int8_t intra_level_aic[102] = {
1, 1, 1, 1, 1, 1,
};
RLTable rl_intra_aic = {
RLTable ff_rl_intra_aic = {
102,
58,
intra_vlc_aic,
@ -236,7 +236,7 @@ RLTable rl_intra_aic = {
intra_level_aic,
};
const uint16_t h263_format[8][2] = {
const uint16_t ff_h263_format[8][2] = {
{ 0, 0 },
{ 128, 96 },
{ 176, 144 },
@ -250,7 +250,7 @@ const uint8_t ff_aic_dc_scale_table[32]={
0, 2, 4, 6, 8,10,12,14,16,18,20,22,24,26,28,30,32,34,36,38,40,42,44,46,48,50,52,54,56,58,60,62
};
const uint8_t modified_quant_tab[2][32]={
const uint8_t ff_modified_quant_tab[2][32]={
// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
{
0, 3, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9,10,11,12,13,14,15,16,17,18,18,19,20,21,22,23,24,25,26,27,28

@ -56,7 +56,7 @@ av_cold int ff_h263_decode_init(AVCodecContext *avctx)
s->workaround_bugs= avctx->workaround_bugs;
// set defaults
MPV_decode_defaults(s);
ff_MPV_decode_defaults(s);
s->quant_precision=5;
s->decode_mb= ff_h263_decode_mb;
s->low_delay= 1;
@ -112,10 +112,10 @@ av_cold int ff_h263_decode_init(AVCodecContext *avctx)
/* for h263, we allocate the images after having read the header */
if (avctx->codec->id != CODEC_ID_H263 && avctx->codec->id != CODEC_ID_MPEG4)
if (MPV_common_init(s) < 0)
if (ff_MPV_common_init(s) < 0)
return -1;
h263_decode_init_vlc(s);
ff_h263_decode_init_vlc(s);
return 0;
}
@ -124,7 +124,7 @@ av_cold int ff_h263_decode_end(AVCodecContext *avctx)
{
MpegEncContext *s = avctx->priv_data;
MPV_common_end(s);
ff_MPV_common_end(s);
return 0;
}
@ -222,7 +222,7 @@ static int decode_slice(MpegEncContext *s){
if(ret<0){
const int xy= s->mb_x + s->mb_y*s->mb_stride;
if(ret==SLICE_END){
MPV_decode_mb(s, s->block);
ff_MPV_decode_mb(s, s->block);
if(s->loop_filter)
ff_h263_loop_filter(s);
@ -234,7 +234,7 @@ static int decode_slice(MpegEncContext *s){
if(++s->mb_x >= s->mb_width){
s->mb_x=0;
ff_draw_horiz_band(s, s->mb_y*mb_size, mb_size);
MPV_report_decode_progress(s);
ff_MPV_report_decode_progress(s);
s->mb_y++;
}
return 0;
@ -249,13 +249,13 @@ static int decode_slice(MpegEncContext *s){
return -1;
}
MPV_decode_mb(s, s->block);
ff_MPV_decode_mb(s, s->block);
if(s->loop_filter)
ff_h263_loop_filter(s);
}
ff_draw_horiz_band(s, s->mb_y*mb_size, mb_size);
MPV_report_decode_progress(s);
ff_MPV_report_decode_progress(s);
s->mb_x= 0;
}
@ -404,7 +404,7 @@ retry:
s->bitstream_buffer_size=0;
if (!s->context_initialized) {
if (MPV_common_init(s) < 0) //we need the idct permutaton for reading a custom matrix
if (ff_MPV_common_init(s) < 0) //we need the idct permutaton for reading a custom matrix
return -1;
}
@ -421,7 +421,7 @@ retry:
if (CONFIG_WMV2_DECODER && s->msmpeg4_version==5) {
ret= ff_wmv2_decode_picture_header(s);
} else if (CONFIG_MSMPEG4_DECODER && s->msmpeg4_version) {
ret = msmpeg4_decode_picture_header(s);
ret = ff_msmpeg4_decode_picture_header(s);
} else if (CONFIG_MPEG4_DECODER && s->h263_pred) {
if(s->avctx->extradata_size && s->picture_number==0){
GetBitContext gb;
@ -435,7 +435,7 @@ retry:
} else if (CONFIG_FLV_DECODER && s->h263_flv) {
ret = ff_flv_decode_picture_header(s);
} else {
ret = h263_decode_picture_header(s);
ret = ff_h263_decode_picture_header(s);
}
if(ret==FRAME_SKIPPED) return get_consumed_bytes(s, buf_size);
@ -591,7 +591,7 @@ retry:
}
s->parse_context.buffer=0;
MPV_common_end(s);
ff_MPV_common_end(s);
s->parse_context= pc;
}
if (!s->context_initialized) {
@ -632,7 +632,7 @@ retry:
s->me.qpel_avg= s->dsp.avg_qpel_pixels_tab;
}
if(MPV_frame_start(s, avctx) < 0)
if(ff_MPV_frame_start(s, avctx) < 0)
return -1;
if (!s->divx_packed) ff_thread_finish_setup(avctx);
@ -650,7 +650,7 @@ retry:
ff_er_frame_start(s);
//the second part of the wmv2 header contains the MB skip bits which are stored in current_picture->mb_type
//which is not available before MPV_frame_start()
//which is not available before ff_MPV_frame_start()
if (CONFIG_WMV2_DECODER && s->msmpeg4_version==5){
ret = ff_wmv2_decode_secondary_picture_header(s);
if(ret<0) return ret;
@ -681,7 +681,7 @@ retry:
}
if (s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type==AV_PICTURE_TYPE_I)
if(!CONFIG_MSMPEG4_DECODER || msmpeg4_decode_ext_header(s, buf_size) < 0){
if(!CONFIG_MSMPEG4_DECODER || ff_msmpeg4_decode_ext_header(s, buf_size) < 0){
s->error_status_table[s->mb_num-1]= ER_MB_ERROR;
}
@ -722,7 +722,7 @@ intrax8_decoded:
return -1;
}
MPV_frame_end(s);
ff_MPV_frame_end(s);
assert(s->current_picture.f.pict_type == s->current_picture_ptr->f.pict_type);
assert(s->current_picture.f.pict_type == s->pict_type);

@ -1051,7 +1051,7 @@ static av_cold void common_init(H264Context *h){
s->unrestricted_mv=1;
s->dsp.dct_bits = 16;
dsputil_init(&s->dsp, s->avctx); // needed so that idct permutation is known early
ff_dsputil_init(&s->dsp, s->avctx); // needed so that idct permutation is known early
memset(h->pps.scaling_matrix4, 16, 6*16*sizeof(uint8_t));
memset(h->pps.scaling_matrix8, 16, 2*64*sizeof(uint8_t));
@ -1117,7 +1117,7 @@ av_cold int ff_h264_decode_init(AVCodecContext *avctx){
MpegEncContext * const s = &h->s;
int i;
MPV_decode_defaults(s);
ff_MPV_decode_defaults(s);
s->avctx = avctx;
common_init(h);
@ -1303,11 +1303,11 @@ int ff_h264_frame_start(H264Context *h){
int i;
const int pixel_shift = h->pixel_shift;
if(MPV_frame_start(s, s->avctx) < 0)
if(ff_MPV_frame_start(s, s->avctx) < 0)
return -1;
ff_er_frame_start(s);
/*
* MPV_frame_start uses pict_type to derive key_frame.
* ff_MPV_frame_start uses pict_type to derive key_frame.
* This is incorrect for H.264; IDR markings must be used.
* Zero here; IDR markings per slice in frame or fields are ORed in later.
* See decode_nal_units().
@ -1342,7 +1342,7 @@ int ff_h264_frame_start(H264Context *h){
// We mark the current picture as non-reference after allocating it, so
// that if we break out due to an error it can be released automatically
// in the next MPV_frame_start().
// in the next ff_MPV_frame_start().
// SVQ3 as well as most other codecs have only last/next/current and thus
// get released even with set reference, besides SVQ3 and others do not
// mark frames as reference later "naturally".
@ -2549,7 +2549,7 @@ static int field_end(H264Context *h, int in_setup){
if (!FIELD_PICTURE)
ff_er_frame_end(s);
MPV_frame_end(s);
ff_MPV_frame_end(s);
h->current_slice=0;
@ -2612,7 +2612,7 @@ int ff_h264_get_profile(SPS *sps)
/**
* Decode a slice header.
* This will also call MPV_common_init() and frame_start() as needed.
* This will also call ff_MPV_common_init() and frame_start() as needed.
*
* @param h h264context
* @param h0 h264 master context (differs from 'h' when doing sliced based parallel decoding)
@ -2720,7 +2720,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
}
free_tables(h, 0);
flush_dpb(s->avctx);
MPV_common_end(s);
ff_MPV_common_end(s);
h->list_count = 0;
}
if (!s->context_initialized) {
@ -2745,7 +2745,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
ff_h264dsp_init(&h->h264dsp, h->sps.bit_depth_luma, h->sps.chroma_format_idc);
ff_h264_pred_init(&h->hpc, s->codec_id, h->sps.bit_depth_luma, h->sps.chroma_format_idc);
s->dsp.dct_bits = h->sps.bit_depth_luma > 8 ? 32 : 16;
dsputil_init(&s->dsp, s->avctx);
ff_dsputil_init(&s->dsp, s->avctx);
} else {
av_log(s->avctx, AV_LOG_ERROR, "Unsupported bit depth: %d chroma_idc: %d\n",
h->sps.bit_depth_luma, h->sps.chroma_format_idc);
@ -2816,8 +2816,8 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
s->avctx->hwaccel = ff_find_hwaccel(s->avctx->codec->id, s->avctx->pix_fmt);
if (MPV_common_init(s) < 0) {
av_log(h->s.avctx, AV_LOG_ERROR, "MPV_common_init() failed.\n");
if (ff_MPV_common_init(s) < 0) {
av_log(h->s.avctx, AV_LOG_ERROR, "ff_MPV_common_init() failed.\n");
return -1;
}
s->first_field = 0;
@ -4169,7 +4169,7 @@ av_cold int ff_h264_decode_end(AVCodecContext *avctx)
ff_h264_remove_all_refs(h);
ff_h264_free_context(h);
MPV_common_end(s);
ff_MPV_common_end(s);
// memset(h, 0, sizeof(H264Context));

@ -61,7 +61,7 @@ static int build_huff_tree(VLC *vlc, Node *nodes, int head, int flags)
int pos = 0;
get_tree_codes(bits, lens, xlat, nodes, head, 0, 0, &pos, no_zero_count);
return init_vlc_sparse(vlc, 9, pos, lens, 2, 2, bits, 4, 4, xlat, 1, 1, 0);
return ff_init_vlc_sparse(vlc, 9, pos, lens, 2, 2, bits, 4, 4, xlat, 1, 1, 0);
}

@ -322,8 +322,8 @@ static void generate_joint_tables(HYuvContext *s){
i++;
}
}
free_vlc(&s->vlc[3+p]);
init_vlc_sparse(&s->vlc[3+p], VLC_BITS, i, len, 1, 1, bits, 2, 2, symbols, 2, 2, 0);
ff_free_vlc(&s->vlc[3+p]);
ff_init_vlc_sparse(&s->vlc[3+p], VLC_BITS, i, len, 1, 1, bits, 2, 2, symbols, 2, 2, 0);
}
}else{
uint8_t (*map)[4] = (uint8_t(*)[4])s->pix_bgr_map;
@ -363,7 +363,7 @@ static void generate_joint_tables(HYuvContext *s){
}
}
}
free_vlc(&s->vlc[3]);
ff_free_vlc(&s->vlc[3]);
init_vlc(&s->vlc[3], VLC_BITS, i, len, 1, 1, bits, 2, 2, 0);
}
}
@ -380,7 +380,7 @@ static int read_huffman_tables(HYuvContext *s, const uint8_t *src, int length){
if(generate_bits_table(s->bits[i], s->len[i])<0){
return -1;
}
free_vlc(&s->vlc[i]);
ff_free_vlc(&s->vlc[i]);
init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
}
@ -412,7 +412,7 @@ static int read_old_huffman_tables(HYuvContext *s){
memcpy(s->len[2] , s->len [1], 256*sizeof(uint8_t));
for(i=0; i<3; i++){
free_vlc(&s->vlc[i]);
ff_free_vlc(&s->vlc[i]);
init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
}
@ -443,7 +443,7 @@ static av_cold int common_init(AVCodecContext *avctx){
s->avctx= avctx;
s->flags= avctx->flags;
dsputil_init(&s->dsp, avctx);
ff_dsputil_init(&s->dsp, avctx);
s->width= avctx->width;
s->height= avctx->height;
@ -1253,7 +1253,7 @@ static av_cold int decode_end(AVCodecContext *avctx)
av_freep(&s->bitstream_buffer);
for(i=0; i<6; i++){
free_vlc(&s->vlc[i]);
ff_free_vlc(&s->vlc[i]);
}
return 0;

@ -166,7 +166,7 @@ static av_cold int imc_decode_init(AVCodecContext * avctx)
av_log(avctx, AV_LOG_INFO, "FFT init failed\n");
return ret;
}
dsputil_init(&q->dsp, avctx);
ff_dsputil_init(&q->dsp, avctx);
avctx->sample_fmt = AV_SAMPLE_FMT_FLT;
avctx->channel_layout = AV_CH_LAYOUT_MONO;

@ -995,7 +995,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
build_requant_tab();
dsputil_init(&ctx->dsp, avctx);
ff_dsputil_init(&ctx->dsp, avctx);
return allocate_frame_buffers(ctx, avctx);
}

@ -816,7 +816,7 @@ static av_cold int decode_close(AVCodecContext *avctx)
ff_ivi_free_buffers(&ctx->planes[0]);
if (ctx->mb_vlc.cust_tab.table)
free_vlc(&ctx->mb_vlc.cust_tab);
ff_free_vlc(&ctx->mb_vlc.cust_tab);
if (ctx->frame.data[0])
avctx->release_buffer(avctx, &ctx->frame);

@ -65,8 +65,8 @@ int ff_intel_h263_decode_picture_header(MpegEncContext *s)
s->pb_frame = get_bits1(&s->gb);
if (format < 6) {
s->width = h263_format[format][0];
s->height = h263_format[format][1];
s->width = ff_h263_format[format][0];
s->height = ff_h263_format[format][1];
s->avctx->sample_aspect_ratio.num = 12;
s->avctx->sample_aspect_ratio.den = 11;
} else {

@ -1017,7 +1017,7 @@ static av_cold int ipvideo_decode_init(AVCodecContext *avctx)
s->is_16bpp = avctx->bits_per_coded_sample == 16;
avctx->pix_fmt = s->is_16bpp ? PIX_FMT_RGB555 : PIX_FMT_PAL8;
dsputil_init(&s->dsp, avctx);
ff_dsputil_init(&s->dsp, avctx);
avcodec_get_frame_defaults(&s->second_last_frame);
avcodec_get_frame_defaults(&s->last_frame);

@ -696,9 +696,9 @@ av_cold void ff_intrax8_common_init(IntraX8Context * w, MpegEncContext * const s
assert(s->mb_width>0);
w->prediction_table=av_mallocz(s->mb_width*2*2);//two rows, 2 blocks per cannon mb
ff_init_scantable(s->dsp.idct_permutation, &w->scantable[0], wmv1_scantable[0]);
ff_init_scantable(s->dsp.idct_permutation, &w->scantable[1], wmv1_scantable[2]);
ff_init_scantable(s->dsp.idct_permutation, &w->scantable[2], wmv1_scantable[3]);
ff_init_scantable(s->dsp.idct_permutation, &w->scantable[0], ff_wmv1_scantable[0]);
ff_init_scantable(s->dsp.idct_permutation, &w->scantable[1], ff_wmv1_scantable[2]);
ff_init_scantable(s->dsp.idct_permutation, &w->scantable[2], ff_wmv1_scantable[3]);
}
/**
@ -721,7 +721,7 @@ av_cold void ff_intrax8_common_end(IntraX8Context * w)
* @param dquant doubled quantizer, it would be odd in case of VC-1 halfpq==1.
* @param quant_offset offset away from zero
*/
//FIXME extern uint8_t wmv3_dc_scale_table[32];
//FIXME extern uint8_t ff_wmv3_dc_scale_table[32];
int ff_intrax8_decode_picture(IntraX8Context * const w, int dquant, int quant_offset){
MpegEncContext * const s= w->s;
int mb_xy;

@ -103,7 +103,7 @@ static VLC cbpc_b_vlc;
/* init vlcs */
/* XXX: find a better solution to handle static init */
void h263_decode_init_vlc(MpegEncContext *s)
void ff_h263_decode_init_vlc(MpegEncContext *s)
{
static int done = 0;
@ -120,18 +120,18 @@ void h263_decode_init_vlc(MpegEncContext *s)
&ff_h263_cbpy_tab[0][1], 2, 1,
&ff_h263_cbpy_tab[0][0], 2, 1, 64);
INIT_VLC_STATIC(&mv_vlc, MV_VLC_BITS, 33,
&mvtab[0][1], 2, 1,
&mvtab[0][0], 2, 1, 538);
init_rl(&ff_h263_rl_inter, ff_h263_static_rl_table_store[0]);
init_rl(&rl_intra_aic, ff_h263_static_rl_table_store[1]);
&ff_mvtab[0][1], 2, 1,
&ff_mvtab[0][0], 2, 1, 538);
ff_init_rl(&ff_h263_rl_inter, ff_h263_static_rl_table_store[0]);
ff_init_rl(&ff_rl_intra_aic, ff_h263_static_rl_table_store[1]);
INIT_VLC_RL(ff_h263_rl_inter, 554);
INIT_VLC_RL(rl_intra_aic, 554);
INIT_VLC_RL(ff_rl_intra_aic, 554);
INIT_VLC_STATIC(&h263_mbtype_b_vlc, H263_MBTYPE_B_VLC_BITS, 15,
&h263_mbtype_b_tab[0][1], 2, 1,
&h263_mbtype_b_tab[0][0], 2, 1, 80);
&ff_h263_mbtype_b_tab[0][1], 2, 1,
&ff_h263_mbtype_b_tab[0][0], 2, 1, 80);
INIT_VLC_STATIC(&cbpc_b_vlc, CBPC_B_VLC_BITS, 4,
&cbpc_b_tab[0][1], 2, 1,
&cbpc_b_tab[0][0], 2, 1, 8);
&ff_cbpc_b_tab[0][1], 2, 1,
&ff_cbpc_b_tab[0][0], 2, 1, 8);
}
}
@ -240,7 +240,7 @@ int ff_h263_resync(MpegEncContext *s){
if(show_bits(&s->gb, 16)==0){
pos= get_bits_count(&s->gb);
if(CONFIG_MPEG4_DECODER && s->codec_id==CODEC_ID_MPEG4)
ret= mpeg4_decode_video_packet_header(s);
ret= ff_mpeg4_decode_video_packet_header(s);
else
ret= h263_decode_gob_header(s);
if(ret>=0)
@ -257,7 +257,7 @@ int ff_h263_resync(MpegEncContext *s){
pos= get_bits_count(&s->gb);
if(CONFIG_MPEG4_DECODER && s->codec_id==CODEC_ID_MPEG4)
ret= mpeg4_decode_video_packet_header(s);
ret= ff_mpeg4_decode_video_packet_header(s);
else
ret= h263_decode_gob_header(s);
if(ret>=0)
@ -271,7 +271,7 @@ int ff_h263_resync(MpegEncContext *s){
return -1;
}
int h263_decode_motion(MpegEncContext * s, int pred, int f_code)
int ff_h263_decode_motion(MpegEncContext * s, int pred, int f_code)
{
int code, val, sign, shift;
code = get_vlc2(&s->gb, mv_vlc.table, MV_VLC_BITS, 2);
@ -381,16 +381,16 @@ static void preview_obmc(MpegEncContext *s){
if ((cbpc & 16) == 0) {
s->current_picture.f.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0;
/* 16x16 motion prediction */
mot_val= h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
mot_val= ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
if (s->umvplus)
mx = h263p_decode_umotion(s, pred_x);
else
mx = h263_decode_motion(s, pred_x, 1);
mx = ff_h263_decode_motion(s, pred_x, 1);
if (s->umvplus)
my = h263p_decode_umotion(s, pred_y);
else
my = h263_decode_motion(s, pred_y, 1);
my = ff_h263_decode_motion(s, pred_y, 1);
mot_val[0 ]= mot_val[2 ]=
mot_val[0+stride]= mot_val[2+stride]= mx;
@ -399,16 +399,16 @@ static void preview_obmc(MpegEncContext *s){
} else {
s->current_picture.f.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_L0;
for(i=0;i<4;i++) {
mot_val = h263_pred_motion(s, i, 0, &pred_x, &pred_y);
mot_val = ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
if (s->umvplus)
mx = h263p_decode_umotion(s, pred_x);
else
mx = h263_decode_motion(s, pred_x, 1);
mx = ff_h263_decode_motion(s, pred_x, 1);
if (s->umvplus)
my = h263p_decode_umotion(s, pred_y);
else
my = h263_decode_motion(s, pred_y, 1);
my = ff_h263_decode_motion(s, pred_y, 1);
if (s->umvplus && (mx - pred_x) == 1 && (my - pred_y) == 1)
skip_bits1(&s->gb); /* Bit stuffing to prevent PSC */
mot_val[0] = mx;
@ -432,7 +432,7 @@ static void h263_decode_dquant(MpegEncContext *s){
if(s->modified_quant){
if(get_bits1(&s->gb))
s->qscale= modified_quant_tab[get_bits1(&s->gb)][ s->qscale ];
s->qscale= ff_modified_quant_tab[get_bits1(&s->gb)][ s->qscale ];
else
s->qscale= get_bits(&s->gb, 5);
}else
@ -450,7 +450,7 @@ static int h263_decode_block(MpegEncContext * s, DCTELEM * block,
scan_table = s->intra_scantable.permutated;
if (s->h263_aic && s->mb_intra) {
rl = &rl_intra_aic;
rl = &ff_rl_intra_aic;
i = 0;
if (s->ac_pred) {
if (s->h263_aic_dir)
@ -467,7 +467,7 @@ static int h263_decode_block(MpegEncContext * s, DCTELEM * block,
component = (n <= 3 ? 0 : n - 4 + 1);
level = s->last_dc[component];
if (s->rv10_first_dc_coded[component]) {
diff = rv_decode_dc(s, n);
diff = ff_rv_decode_dc(s, n);
if (diff == 0xffff)
return -1;
level += diff;
@ -539,7 +539,7 @@ retry:
if (i >= 64){
if(s->alt_inter_vlc && rl == &ff_h263_rl_inter && !s->mb_intra){
//Looks like a hack but no, it's the way it is supposed to work ...
rl = &rl_intra_aic;
rl = &ff_rl_intra_aic;
i = 0;
s->gb= gb;
s->dsp.clear_block(block);
@ -556,7 +556,7 @@ retry:
}
not_coded:
if (s->mb_intra && s->h263_aic) {
h263_pred_acdc(s, block, n);
ff_h263_pred_acdc(s, block, n);
i = 63;
}
s->block_last_index[n] = i;
@ -655,11 +655,11 @@ int ff_h263_decode_mb(MpegEncContext *s,
s->current_picture.f.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0;
/* 16x16 motion prediction */
s->mv_type = MV_TYPE_16X16;
h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
if (s->umvplus)
mx = h263p_decode_umotion(s, pred_x);
else
mx = h263_decode_motion(s, pred_x, 1);
mx = ff_h263_decode_motion(s, pred_x, 1);
if (mx >= 0xffff)
return -1;
@ -667,7 +667,7 @@ int ff_h263_decode_mb(MpegEncContext *s,
if (s->umvplus)
my = h263p_decode_umotion(s, pred_y);
else
my = h263_decode_motion(s, pred_y, 1);
my = ff_h263_decode_motion(s, pred_y, 1);
if (my >= 0xffff)
return -1;
@ -680,18 +680,18 @@ int ff_h263_decode_mb(MpegEncContext *s,
s->current_picture.f.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_L0;
s->mv_type = MV_TYPE_8X8;
for(i=0;i<4;i++) {
mot_val = h263_pred_motion(s, i, 0, &pred_x, &pred_y);
mot_val = ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
if (s->umvplus)
mx = h263p_decode_umotion(s, pred_x);
else
mx = h263_decode_motion(s, pred_x, 1);
mx = ff_h263_decode_motion(s, pred_x, 1);
if (mx >= 0xffff)
return -1;
if (s->umvplus)
my = h263p_decode_umotion(s, pred_y);
else
my = h263_decode_motion(s, pred_y, 1);
my = ff_h263_decode_motion(s, pred_y, 1);
if (my >= 0xffff)
return -1;
s->mv[0][i][0] = mx;
@ -763,11 +763,11 @@ int ff_h263_decode_mb(MpegEncContext *s,
//FIXME UMV
if(USES_LIST(mb_type, 0)){
int16_t *mot_val= h263_pred_motion(s, 0, 0, &mx, &my);
int16_t *mot_val= ff_h263_pred_motion(s, 0, 0, &mx, &my);
s->mv_dir = MV_DIR_FORWARD;
mx = h263_decode_motion(s, mx, 1);
my = h263_decode_motion(s, my, 1);
mx = ff_h263_decode_motion(s, mx, 1);
my = ff_h263_decode_motion(s, my, 1);
s->mv[0][0][0] = mx;
s->mv[0][0][1] = my;
@ -776,11 +776,11 @@ int ff_h263_decode_mb(MpegEncContext *s,
}
if(USES_LIST(mb_type, 1)){
int16_t *mot_val= h263_pred_motion(s, 0, 1, &mx, &my);
int16_t *mot_val= ff_h263_pred_motion(s, 0, 1, &mx, &my);
s->mv_dir |= MV_DIR_BACKWARD;
mx = h263_decode_motion(s, mx, 1);
my = h263_decode_motion(s, my, 1);
mx = ff_h263_decode_motion(s, mx, 1);
my = ff_h263_decode_motion(s, my, 1);
s->mv[1][0][0] = mx;
s->mv[1][0][1] = my;
@ -831,8 +831,8 @@ intra:
}
while(pb_mv_count--){
h263_decode_motion(s, 0, 1);
h263_decode_motion(s, 0, 1);
ff_h263_decode_motion(s, 0, 1);
ff_h263_decode_motion(s, 0, 1);
}
/* decode each block */
@ -866,7 +866,7 @@ end:
}
/* most is hardcoded. should extend to handle all h263 streams */
int h263_decode_picture_header(MpegEncContext *s)
int ff_h263_decode_picture_header(MpegEncContext *s)
{
int format, width, height, i;
uint32_t startcode;
@ -918,8 +918,8 @@ int h263_decode_picture_header(MpegEncContext *s)
if (format != 7 && format != 6) {
s->h263_plus = 0;
/* H.263v1 */
width = h263_format[format][0];
height = h263_format[format][1];
width = ff_h263_format[format][0];
height = ff_h263_format[format][1];
if (!width)
return -1;
@ -1026,8 +1026,8 @@ int h263_decode_picture_header(MpegEncContext *s)
s->avctx->sample_aspect_ratio= ff_h263_pixel_aspect[s->aspect_ratio_info];
}
} else {
width = h263_format[format][0];
height = h263_format[format][1];
width = ff_h263_format[format][0];
height = ff_h263_format[format][1];
s->avctx->sample_aspect_ratio= (AVRational){12,11};
}
if ((width == 0) || (height == 0))

@ -102,7 +102,7 @@ av_const int ff_h263_aspect_to_info(AVRational aspect){
return FF_ASPECT_EXTENDED;
}
void h263_encode_picture_header(MpegEncContext * s, int picture_number)
void ff_h263_encode_picture_header(MpegEncContext * s, int picture_number)
{
int format, coded_frame_rate, coded_frame_rate_base, i, temp_ref;
int best_clock_code=1;
@ -141,7 +141,7 @@ void h263_encode_picture_header(MpegEncContext * s, int picture_number)
put_bits(&s->pb, 1, 0); /* camera off */
put_bits(&s->pb, 1, 0); /* freeze picture release off */
format = ff_match_2uint16(h263_format, FF_ARRAY_ELEMS(h263_format), s->width, s->height);
format = ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format), s->width, s->height);
if (!s->h263_plus) {
/* H.263v1 */
put_bits(&s->pb, 3, format);
@ -247,7 +247,7 @@ void h263_encode_picture_header(MpegEncContext * s, int picture_number)
/**
* Encode a group of blocks header.
*/
void h263_encode_gob_header(MpegEncContext * s, int mb_line)
void ff_h263_encode_gob_header(MpegEncContext * s, int mb_line)
{
put_bits(&s->pb, 17, 1); /* GBSC */
@ -333,7 +333,7 @@ static void h263_encode_block(MpegEncContext * s, DCTELEM * block, int n)
} else {
i = 0;
if (s->h263_aic && s->mb_intra)
rl = &rl_intra_aic;
rl = &ff_rl_intra_aic;
if(s->alt_inter_vlc && !s->mb_intra){
int aic_vlc_bits=0;
@ -353,14 +353,14 @@ static void h263_encode_block(MpegEncContext * s, DCTELEM * block, int n)
if(level<0) level= -level;
code = get_rl_index(rl, last, run, level);
aic_code = get_rl_index(&rl_intra_aic, last, run, level);
aic_code = get_rl_index(&ff_rl_intra_aic, last, run, level);
inter_vlc_bits += rl->table_vlc[code][1]+1;
aic_vlc_bits += rl_intra_aic.table_vlc[aic_code][1]+1;
aic_vlc_bits += ff_rl_intra_aic.table_vlc[aic_code][1]+1;
if (code == rl->n) {
inter_vlc_bits += 1+6+8-1;
}
if (aic_code == rl_intra_aic.n) {
if (aic_code == ff_rl_intra_aic.n) {
aic_vlc_bits += 1+6+8-1;
wrong_pos += run + 1;
}else
@ -370,7 +370,7 @@ static void h263_encode_block(MpegEncContext * s, DCTELEM * block, int n)
}
i = 0;
if(aic_vlc_bits < inter_vlc_bits && wrong_pos > 63)
rl = &rl_intra_aic;
rl = &ff_rl_intra_aic;
}
}
@ -454,9 +454,9 @@ static void h263p_encode_umotion(MpegEncContext * s, int val)
}
}
void h263_encode_mb(MpegEncContext * s,
DCTELEM block[6][64],
int motion_x, int motion_y)
void ff_h263_encode_mb(MpegEncContext * s,
DCTELEM block[6][64],
int motion_x, int motion_y)
{
int cbpc, cbpy, i, cbp, pred_x, pred_y;
int16_t pred_dc;
@ -500,7 +500,7 @@ void h263_encode_mb(MpegEncContext * s,
}
/* motion vectors: 16x16 mode */
h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
if (!s->umvplus) {
ff_h263_encode_motion_vector(s, motion_x - pred_x,
@ -527,7 +527,7 @@ void h263_encode_mb(MpegEncContext * s,
for(i=0; i<4; i++){
/* motion vectors: 8x8 mode*/
h263_pred_motion(s, i, 0, &pred_x, &pred_y);
ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
motion_x = s->current_picture.f.motion_val[0][s->block_index[i]][0];
motion_y = s->current_picture.f.motion_val[0][s->block_index[i]][1];
@ -561,7 +561,7 @@ void h263_encode_mb(MpegEncContext * s,
if(i<4) scale= s->y_dc_scale;
else scale= s->c_dc_scale;
pred_dc = h263_pred_dc(s, i, &dc_ptr[i]);
pred_dc = ff_h263_pred_dc(s, i, &dc_ptr[i]);
level -= pred_dc;
/* Quant */
if (level >= 0)
@ -662,7 +662,7 @@ void ff_h263_encode_motion(MpegEncContext * s, int val, int f_code)
if (val == 0) {
/* zero vector */
code = 0;
put_bits(&s->pb, mvtab[code][1], mvtab[code][0]);
put_bits(&s->pb, ff_mvtab[code][1], ff_mvtab[code][0]);
} else {
bit_size = f_code - 1;
range = 1 << bit_size;
@ -676,7 +676,7 @@ void ff_h263_encode_motion(MpegEncContext * s, int val, int f_code)
code = (val >> bit_size) + 1;
bits = val & (range - 1);
put_bits(&s->pb, mvtab[code][1] + 1, (mvtab[code][0] << 1) | sign);
put_bits(&s->pb, ff_mvtab[code][1] + 1, (ff_mvtab[code][0] << 1) | sign);
if (bit_size > 0) {
put_bits(&s->pb, bit_size, bits);
}
@ -692,7 +692,7 @@ static void init_mv_penalty_and_fcode(MpegEncContext *s)
for(mv=-MAX_MV; mv<=MAX_MV; mv++){
int len;
if(mv==0) len= mvtab[0][1];
if(mv==0) len= ff_mvtab[0][1];
else{
int val, bit_size, code;
@ -704,9 +704,9 @@ static void init_mv_penalty_and_fcode(MpegEncContext *s)
val--;
code = (val >> bit_size) + 1;
if(code<33){
len= mvtab[code][1] + 1 + bit_size;
len= ff_mvtab[code][1] + 1 + bit_size;
}else{
len= mvtab[32][1] + av_log2(code>>5) + 2 + bit_size;
len= ff_mvtab[32][1] + av_log2(code>>5) + 2 + bit_size;
}
}
@ -768,17 +768,17 @@ static void init_uni_h263_rl_tab(RLTable *rl, uint32_t *bits_tab, uint8_t *len_t
}
}
void h263_encode_init(MpegEncContext *s)
void ff_h263_encode_init(MpegEncContext *s)
{
static int done = 0;
if (!done) {
done = 1;
init_rl(&ff_h263_rl_inter, ff_h263_static_rl_table_store[0]);
init_rl(&rl_intra_aic, ff_h263_static_rl_table_store[1]);
ff_init_rl(&ff_h263_rl_inter, ff_h263_static_rl_table_store[0]);
ff_init_rl(&ff_rl_intra_aic, ff_h263_static_rl_table_store[1]);
init_uni_h263_rl_tab(&rl_intra_aic, NULL, uni_h263_intra_aic_rl_len);
init_uni_h263_rl_tab(&ff_rl_intra_aic, NULL, uni_h263_intra_aic_rl_len);
init_uni_h263_rl_tab(&ff_h263_rl_inter , NULL, uni_h263_inter_rl_len);
init_mv_penalty_and_fcode(s);

@ -132,7 +132,7 @@ int ff_ivi_dec_huff_desc(GetBitContext *gb, int desc_coded, int which_tab,
ff_ivi_huff_desc_copy(&huff_tab->cust_desc, &new_huff);
if (huff_tab->cust_tab.table)
free_vlc(&huff_tab->cust_tab);
ff_free_vlc(&huff_tab->cust_tab);
result = ff_ivi_create_huff_from_desc(&huff_tab->cust_desc,
&huff_tab->cust_tab, 0);
if (result) {
@ -237,7 +237,7 @@ void av_cold ff_ivi_free_buffers(IVIPlaneDesc *planes)
av_freep(&planes[p].bands[b].bufs[2]);
if (planes[p].bands[b].blk_vlc.cust_tab.table)
free_vlc(&planes[p].bands[b].blk_vlc.cust_tab);
ff_free_vlc(&planes[p].bands[b].blk_vlc.cust_tab);
for (t = 0; t < planes[p].bands[b].num_tiles; t++)
av_freep(&planes[p].bands[b].tiles[t].mbs);
av_freep(&planes[p].bands[b].tiles);

@ -205,7 +205,7 @@ static av_always_inline void row_fdct(DCTELEM * data){
*/
GLOBAL(void)
fdct_ifast (DCTELEM * data)
ff_fdct_ifast (DCTELEM * data)
{
int tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
int tmp10, tmp11, tmp12, tmp13;
@ -271,7 +271,7 @@ fdct_ifast (DCTELEM * data)
*/
GLOBAL(void)
fdct_ifast248 (DCTELEM * data)
ff_fdct_ifast248 (DCTELEM * data)
{
int tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
int tmp10, tmp11, tmp12, tmp13;

@ -207,7 +207,7 @@ ones here or successive P-frames will drift too much with Reference frame coding
* Perform the inverse DCT on one block of coefficients.
*/
void j_rev_dct(DCTBLOCK data)
void ff_j_rev_dct(DCTBLOCK data)
{
int32_t tmp0, tmp1, tmp2, tmp3;
int32_t tmp10, tmp11, tmp12, tmp13;
@ -945,7 +945,7 @@ void j_rev_dct(DCTBLOCK data)
#define DCTSIZE 4
#define DCTSTRIDE 8
void j_rev_dct4(DCTBLOCK data)
void ff_j_rev_dct4(DCTBLOCK data)
{
int32_t tmp0, tmp1, tmp2, tmp3;
int32_t tmp10, tmp11, tmp12, tmp13;
@ -1132,7 +1132,7 @@ void j_rev_dct4(DCTBLOCK data)
}
}
void j_rev_dct2(DCTBLOCK data){
void ff_j_rev_dct2(DCTBLOCK data){
int d00, d01, d10, d11;
data[0] += 4;
@ -1147,7 +1147,7 @@ void j_rev_dct2(DCTBLOCK data){
data[1+1*DCTSTRIDE]= (d01 - d11)>>3;
}
void j_rev_dct1(DCTBLOCK data){
void ff_j_rev_dct1(DCTBLOCK data){
data[0] = (data[0] + 4)>>3;
}

@ -41,7 +41,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
{
JvContext *s = avctx->priv_data;
avctx->pix_fmt = PIX_FMT_PAL8;
dsputil_init(&s->dsp, avctx);
ff_dsputil_init(&s->dsp, avctx);
return 0;
}

@ -383,7 +383,7 @@ static int lag_decode_arith_plane(LagarithContext *l, uint8_t *dst,
if (lag_read_prob_header(&rac, &gb) < 0)
return -1;
lag_rac_init(&rac, &gb, length - stride);
ff_lag_rac_init(&rac, &gb, length - stride);
for (i = 0; i < height; i++)
read += lag_decode_line(l, &rac, dst + (i * stride), width,
@ -572,7 +572,7 @@ static av_cold int lag_decode_init(AVCodecContext *avctx)
LagarithContext *l = avctx->priv_data;
l->avctx = avctx;
dsputil_init(&l->dsp, avctx);
ff_dsputil_init(&l->dsp, avctx);
return 0;
}

@ -30,7 +30,7 @@
#include "get_bits.h"
#include "lagarithrac.h"
void lag_rac_init(lag_rac *l, GetBitContext *gb, int length)
void ff_lag_rac_init(lag_rac *l, GetBitContext *gb, int length)
{
int i, j;

@ -51,7 +51,7 @@ typedef struct lag_rac {
uint8_t range_hash[256]; /**< Hash table mapping upper byte to approximate symbol. */
} lag_rac;
void lag_rac_init(lag_rac *l, GetBitContext *gb, int length);
void ff_lag_rac_init(lag_rac *l, GetBitContext *gb, int length);
/* TODO: Optimize */
static inline void lag_rac_refill(lag_rac *l)

@ -199,8 +199,8 @@ AVCodec ff_ljpeg_encoder = { //FIXME avoid MPV_* lossless JPEG should not need t
.type = AVMEDIA_TYPE_VIDEO,
.id = CODEC_ID_LJPEG,
.priv_data_size = sizeof(MpegEncContext),
.init = MPV_encode_init,
.init = ff_MPV_encode_init,
.encode = encode_picture_lossless,
.close = MPV_encode_end,
.close = ff_MPV_encode_end,
.long_name = NULL_IF_CONFIG_SMALL("Lossless JPEG"),
};

@ -214,7 +214,7 @@ static int decode_frame(AVCodecContext *avctx,
static av_cold void mdec_common_init(AVCodecContext *avctx){
MDECContext * const a = avctx->priv_data;
dsputil_init(&a->dsp, avctx);
ff_dsputil_init(&a->dsp, avctx);
a->mb_width = (avctx->coded_width + 15) / 16;
a->mb_height = (avctx->coded_height + 15) / 16;

@ -121,7 +121,7 @@ static av_cold int mimic_decode_init(AVCodecContext *avctx)
av_log(avctx, AV_LOG_ERROR, "error initializing vlc table\n");
return -1;
}
dsputil_init(&ctx->dsp, avctx);
ff_dsputil_init(&ctx->dsp, avctx);
ff_init_scantable(ctx->dsp.idct_permutation, &ctx->scantable, col_zag);
return 0;
@ -411,7 +411,7 @@ static av_cold int mimic_decode_end(AVCodecContext *avctx)
for(i = 0; i < 16; i++)
if(ctx->buf_ptrs[i].data[0])
ff_thread_release_buffer(avctx, &ctx->buf_ptrs[i]);
free_vlc(&ctx->vlc);
ff_free_vlc(&ctx->vlc);
return 0;
}

@ -135,7 +135,7 @@ static void put_pixels16_mmi(uint8_t *block, const uint8_t *pixels, int line_siz
}
void dsputil_init_mmi(DSPContext* c, AVCodecContext *avctx)
void ff_dsputil_init_mmi(DSPContext* c, AVCodecContext *avctx)
{
const int idct_algo= avctx->idct_algo;
const int high_bit_depth = avctx->bits_per_raw_sample > 8;

@ -80,7 +80,7 @@ static void dct_unquantize_h263_mmi(MpegEncContext *s,
}
void MPV_common_init_mmi(MpegEncContext *s)
void ff_MPV_common_init_mmi(MpegEncContext *s)
{
s->dct_unquantize_h263_intra =
s->dct_unquantize_h263_inter = dct_unquantize_h263_mmi;

@ -63,8 +63,8 @@ static int build_vlc(VLC *vlc, const uint8_t *bits_table,
if (is_ac)
huff_sym[0] = 16 * 256;
return init_vlc_sparse(vlc, 9, nb_codes, huff_size, 1, 1,
huff_code, 2, 2, huff_sym, 2, 2, use_static);
return ff_init_vlc_sparse(vlc, 9, nb_codes, huff_size, 1, 1,
huff_code, 2, 2, huff_sym, 2, 2, use_static);
}
static void build_basic_mjpeg_vlc(MJpegDecodeContext *s)
@ -92,7 +92,7 @@ av_cold int ff_mjpeg_decode_init(AVCodecContext *avctx)
avcodec_get_frame_defaults(&s->picture);
s->avctx = avctx;
dsputil_init(&s->dsp, avctx);
ff_dsputil_init(&s->dsp, avctx);
ff_init_scantable(s->dsp.idct_permutation, &s->scantable, ff_zigzag_direct);
s->buffer_size = 0;
s->buffer = NULL;
@ -193,7 +193,7 @@ int ff_mjpeg_decode_dht(MJpegDecodeContext *s)
len -= n;
/* build VLC and flush previous vlc if present */
free_vlc(&s->vlcs[class][index]);
ff_free_vlc(&s->vlcs[class][index]);
av_log(s->avctx, AV_LOG_DEBUG, "class=%d index=%d nb_codes=%d\n",
class, index, code_max + 1);
if (build_vlc(&s->vlcs[class][index], bits_table, val_table,
@ -201,7 +201,7 @@ int ff_mjpeg_decode_dht(MJpegDecodeContext *s)
return -1;
if (class > 0) {
free_vlc(&s->vlcs[2][index]);
ff_free_vlc(&s->vlcs[2][index]);
if (build_vlc(&s->vlcs[2][index], bits_table, val_table,
code_max + 1, 0, 0) < 0)
return -1;
@ -1767,7 +1767,7 @@ av_cold int ff_mjpeg_decode_end(AVCodecContext *avctx)
for (i = 0; i < 3; i++) {
for (j = 0; j < 4; j++)
free_vlc(&s->vlcs[i][j]);
ff_free_vlc(&s->vlcs[i][j]);
}
for (i = 0; i < MAX_COMPONENTS; i++) {
av_freep(&s->blocks[i]);

@ -469,7 +469,7 @@ static int amv_encode_picture(AVCodecContext *avctx,
pic->data[i] += (pic->linesize[i] * (s->mjpeg_vsample[i] * (8 * s->mb_height -((s->height/V_MAX)&7)) - 1 ));
pic->linesize[i] *= -1;
}
return MPV_encode_picture(avctx,buf, buf_size, pic);
return ff_MPV_encode_picture(avctx,buf, buf_size, pic);
}
AVCodec ff_mjpeg_encoder = {
@ -477,9 +477,9 @@ AVCodec ff_mjpeg_encoder = {
.type = AVMEDIA_TYPE_VIDEO,
.id = CODEC_ID_MJPEG,
.priv_data_size = sizeof(MpegEncContext),
.init = MPV_encode_init,
.encode = MPV_encode_picture,
.close = MPV_encode_end,
.init = ff_MPV_encode_init,
.encode = ff_MPV_encode_picture,
.close = ff_MPV_encode_end,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_NONE},
.long_name= NULL_IF_CONFIG_SMALL("MJPEG (Motion JPEG)"),
};
@ -489,9 +489,9 @@ AVCodec ff_amv_encoder = {
.type = AVMEDIA_TYPE_VIDEO,
.id = CODEC_ID_AMV,
.priv_data_size = sizeof(MpegEncContext),
.init = MPV_encode_init,
.init = ff_MPV_encode_init,
.encode = amv_encode_picture,
.close = MPV_encode_end,
.close = ff_MPV_encode_end,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_NONE},
.long_name = NULL_IF_CONFIG_SMALL("AMV Video"),
};

@ -241,7 +241,7 @@ static av_cold int mlp_decode_init(AVCodecContext *avctx)
m->avctx = avctx;
for (substr = 0; substr < MAX_SUBSTREAMS; substr++)
m->substream[substr].lossless_check_data = 0xffffffff;
dsputil_init(&m->dsp, avctx);
ff_dsputil_init(&m->dsp, avctx);
avcodec_get_frame_defaults(&m->frame);
avctx->coded_frame = &m->frame;

@ -144,11 +144,11 @@ int main(int argc, char **argv)
ctx = avcodec_alloc_context3(NULL);
ctx->dsp_mask = AV_CPU_FLAG_FORCE;
dsputil_init(&cctx, ctx);
ff_dsputil_init(&cctx, ctx);
for (c = 0; c < flags_size; c++) {
int x;
ctx->dsp_mask = AV_CPU_FLAG_FORCE | flags[c];
dsputil_init(&mmxctx, ctx);
ff_dsputil_init(&mmxctx, ctx);
for (x = 0; x < 2; x++) {
printf("%s for %dx%d pixels\n", c ? "mmx2" : "mmx",

@ -57,7 +57,7 @@ static av_cold int mp_decode_init(AVCodecContext *avctx)
motionpixels_tableinit();
mp->avctx = avctx;
dsputil_init(&mp->dsp, avctx);
ff_dsputil_init(&mp->dsp, avctx);
mp->changes_map = av_mallocz(avctx->width * h4);
mp->offset_bits_len = av_log2(avctx->width * avctx->height) + 1;
mp->vpt = av_mallocz(avctx->height * sizeof(YuvPixel));
@ -287,7 +287,7 @@ static int mp_decode_frame(AVCodecContext *avctx,
if (init_vlc(&mp->vlc, mp->max_codes_bits, mp->codes_count, &mp->codes[0].size, sizeof(HuffCode), 1, &mp->codes[0].code, sizeof(HuffCode), 4, 0))
goto end;
mp_decode_frame_helper(mp, &gb);
free_vlc(&mp->vlc);
ff_free_vlc(&mp->vlc);
end:
*data_size = sizeof(AVFrame);

@ -74,7 +74,7 @@ static av_cold int mpc7_decode_init(AVCodecContext * avctx)
}
memset(c->oldDSCF, 0, sizeof(c->oldDSCF));
av_lfg_init(&c->rnd, 0xDEADBEEF);
dsputil_init(&c->dsp, avctx);
ff_dsputil_init(&c->dsp, avctx);
ff_mpadsp_init(&c->mpadsp);
c->dsp.bswap_buf((uint32_t*)buf, (const uint32_t*)avctx->extradata, 4);
ff_mpc_init();

@ -118,7 +118,7 @@ static av_cold int mpc8_decode_init(AVCodecContext * avctx)
}
memset(c->oldDSCF, 0, sizeof(c->oldDSCF));
av_lfg_init(&c->rnd, 0xDEADBEEF);
dsputil_init(&c->dsp, avctx);
ff_dsputil_init(&c->dsp, avctx);
ff_mpadsp_init(&c->mpadsp);
ff_mpc_init();
@ -184,13 +184,13 @@ static av_cold int mpc8_decode_init(AVCodecContext * avctx)
q3_vlc[0].table = q3_0_table;
q3_vlc[0].table_allocated = 512;
init_vlc_sparse(&q3_vlc[0], MPC8_Q3_BITS, MPC8_Q3_SIZE,
ff_init_vlc_sparse(&q3_vlc[0], MPC8_Q3_BITS, MPC8_Q3_SIZE,
mpc8_q3_bits, 1, 1,
mpc8_q3_codes, 1, 1,
mpc8_q3_syms, 1, 1, INIT_VLC_USE_NEW_STATIC);
q3_vlc[1].table = q3_1_table;
q3_vlc[1].table_allocated = 516;
init_vlc_sparse(&q3_vlc[1], MPC8_Q4_BITS, MPC8_Q4_SIZE,
ff_init_vlc_sparse(&q3_vlc[1], MPC8_Q4_BITS, MPC8_Q4_SIZE,
mpc8_q4_bits, 1, 1,
mpc8_q4_codes, 1, 1,
mpc8_q4_syms, 1, 1, INIT_VLC_USE_NEW_STATIC);

@ -697,8 +697,8 @@ av_cold void ff_mpeg12_init_vlcs(void)
INIT_VLC_STATIC(&mb_btype_vlc, MB_BTYPE_VLC_BITS, 11,
&table_mb_btype[0][1], 2, 1,
&table_mb_btype[0][0], 2, 1, 64);
init_rl(&ff_rl_mpeg1, ff_mpeg12_static_rl_table_store[0]);
init_rl(&ff_rl_mpeg2, ff_mpeg12_static_rl_table_store[1]);
ff_init_rl(&ff_rl_mpeg1, ff_mpeg12_static_rl_table_store[0]);
ff_init_rl(&ff_rl_mpeg2, ff_mpeg12_static_rl_table_store[1]);
INIT_2D_VLC_RL(ff_rl_mpeg1, 680);
INIT_2D_VLC_RL(ff_rl_mpeg2, 674);
@ -1126,7 +1126,7 @@ static av_cold int mpeg_decode_init(AVCodecContext *avctx)
for (i = 0; i < 64; i++)
s2->dsp.idct_permutation[i]=i;
MPV_decode_defaults(s2);
ff_MPV_decode_defaults(s2);
s->mpeg_enc_ctx.avctx = avctx;
s->mpeg_enc_ctx.flags = avctx->flags;
@ -1258,7 +1258,7 @@ static int mpeg_decode_postinit(AVCodecContext *avctx)
if (s1->mpeg_enc_ctx_allocated) {
ParseContext pc = s->parse_context;
s->parse_context.buffer = 0;
MPV_common_end(s);
ff_MPV_common_end(s);
s->parse_context = pc;
}
@ -1336,7 +1336,7 @@ static int mpeg_decode_postinit(AVCodecContext *avctx)
* if DCT permutation is changed. */
memcpy(old_permutation, s->dsp.idct_permutation, 64 * sizeof(uint8_t));
if (MPV_common_init(s) < 0)
if (ff_MPV_common_init(s) < 0)
return -2;
quant_matrix_rebuild(s->intra_matrix, old_permutation, s->dsp.idct_permutation);
@ -1600,7 +1600,7 @@ static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size)
/* start frame decoding */
if (s->first_field || s->picture_structure == PICT_FRAME) {
if (MPV_frame_start(s, avctx) < 0)
if (ff_MPV_frame_start(s, avctx) < 0)
return -1;
ff_er_frame_start(s);
@ -1790,13 +1790,13 @@ static int mpeg_decode_slice(MpegEncContext *s, int mb_y,
s->dest[1] +=(16 >> lowres) >> s->chroma_x_shift;
s->dest[2] +=(16 >> lowres) >> s->chroma_x_shift;
MPV_decode_mb(s, s->block);
ff_MPV_decode_mb(s, s->block);
if (++s->mb_x >= s->mb_width) {
const int mb_size = 16 >> s->avctx->lowres;
ff_draw_horiz_band(s, mb_size*(s->mb_y >> field_pic), mb_size);
MPV_report_decode_progress(s);
ff_MPV_report_decode_progress(s);
s->mb_x = 0;
s->mb_y += 1 << field_pic;
@ -1949,7 +1949,7 @@ static int slice_end(AVCodecContext *avctx, AVFrame *pict)
ff_er_frame_end(s);
MPV_frame_end(s);
ff_MPV_frame_end(s);
if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
*pict = *(AVFrame*)s->current_picture_ptr;
@ -2060,7 +2060,7 @@ static int vcr2_init_sequence(AVCodecContext *avctx)
/* start new MPEG-1 context decoding */
s->out_format = FMT_MPEG1;
if (s1->mpeg_enc_ctx_allocated) {
MPV_common_end(s);
ff_MPV_common_end(s);
}
s->width = avctx->coded_width;
s->height = avctx->coded_height;
@ -2074,7 +2074,7 @@ static int vcr2_init_sequence(AVCodecContext *avctx)
if (avctx->idct_algo == FF_IDCT_AUTO)
avctx->idct_algo = FF_IDCT_SIMPLE;
if (MPV_common_init(s) < 0)
if (ff_MPV_common_init(s) < 0)
return -1;
s1->mpeg_enc_ctx_allocated = 1;
@ -2541,7 +2541,7 @@ static int mpeg_decode_end(AVCodecContext *avctx)
Mpeg1Context *s = avctx->priv_data;
if (s->mpeg_enc_ctx_allocated)
MPV_common_end(&s->mpeg_enc_ctx);
ff_MPV_common_end(&s->mpeg_enc_ctx);
return 0;
}

@ -132,7 +132,7 @@ static av_cold int encode_init(AVCodecContext *avctx)
{
MpegEncContext *s = avctx->priv_data;
if(MPV_encode_init(avctx) < 0)
if(ff_MPV_encode_init(avctx) < 0)
return -1;
if(find_frame_rate_index(s) < 0){
@ -341,7 +341,7 @@ void ff_mpeg1_encode_slice_header(MpegEncContext *s){
put_bits(&s->pb, 1, 0); /* slice extra information */
}
void mpeg1_encode_picture_header(MpegEncContext *s, int picture_number)
void ff_mpeg1_encode_picture_header(MpegEncContext *s, int picture_number)
{
mpeg1_encode_sequence_header(s);
@ -666,7 +666,7 @@ static av_always_inline void mpeg1_encode_mb_internal(MpegEncContext *s,
}
}
void mpeg1_encode_mb(MpegEncContext *s, DCTELEM block[6][64], int motion_x, int motion_y)
void ff_mpeg1_encode_mb(MpegEncContext *s, DCTELEM block[6][64], int motion_x, int motion_y)
{
if (s->chroma_format == CHROMA_420) mpeg1_encode_mb_internal(s, block, motion_x, motion_y, 6);
else mpeg1_encode_mb_internal(s, block, motion_x, motion_y, 8);
@ -725,8 +725,8 @@ void ff_mpeg1_encode_init(MpegEncContext *s)
int i;
done=1;
init_rl(&ff_rl_mpeg1, ff_mpeg12_static_rl_table_store[0]);
init_rl(&ff_rl_mpeg2, ff_mpeg12_static_rl_table_store[1]);
ff_init_rl(&ff_rl_mpeg1, ff_mpeg12_static_rl_table_store[0]);
ff_init_rl(&ff_rl_mpeg2, ff_mpeg12_static_rl_table_store[1]);
for(i=0; i<64; i++)
{
@ -966,8 +966,8 @@ AVCodec ff_mpeg1video_encoder = {
.id = CODEC_ID_MPEG1VIDEO,
.priv_data_size = sizeof(MpegEncContext),
.init = encode_init,
.encode = MPV_encode_picture,
.close = MPV_encode_end,
.encode = ff_MPV_encode_picture,
.close = ff_MPV_encode_end,
.supported_framerates= avpriv_frame_rate_tab+1,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
.capabilities= CODEC_CAP_DELAY,
@ -981,8 +981,8 @@ AVCodec ff_mpeg2video_encoder = {
.id = CODEC_ID_MPEG2VIDEO,
.priv_data_size = sizeof(MpegEncContext),
.init = encode_init,
.encode = MPV_encode_picture,
.close = MPV_encode_end,
.encode = ff_MPV_encode_picture,
.close = ff_MPV_encode_end,
.supported_framerates= avpriv_frame_rate_tab+1,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_NONE},
.capabilities= CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS,

@ -211,7 +211,7 @@ static const int8_t inter_rvlc_level[169]={
1, 1,
};
RLTable rvlc_rl_inter = {
RLTable ff_rvlc_rl_inter = {
169,
103,
inter_rvlc,
@ -315,7 +315,7 @@ static const int8_t intra_rvlc_level[169]={
1, 1,
};
RLTable rvlc_rl_intra = {
RLTable ff_rvlc_rl_intra = {
169,
103,
intra_rvlc,
@ -323,13 +323,13 @@ RLTable rvlc_rl_intra = {
intra_rvlc_level,
};
const uint16_t sprite_trajectory_tab[15][2] = {
const uint16_t ff_sprite_trajectory_tab[15][2] = {
{0x00, 2}, {0x02, 3}, {0x03, 3}, {0x04, 3}, {0x05, 3}, {0x06, 3},
{0x0E, 4}, {0x1E, 5}, {0x3E, 6}, {0x7E, 7}, {0xFE, 8},
{0x1FE, 9},{0x3FE, 10},{0x7FE, 11},{0xFFE, 12},
};
const uint8_t mb_type_b_tab[4][2] = {
const uint8_t ff_mb_type_b_tab[4][2] = {
{1, 1}, {1, 2}, {1, 3}, {1, 4},
};
@ -369,7 +369,7 @@ const uint16_t ff_mpeg4_resync_prefix[8]={
0x7F00, 0x7E00, 0x7C00, 0x7800, 0x7000, 0x6000, 0x4000, 0x0000
};
const uint8_t mpeg4_dc_threshold[8]={
const uint8_t ff_mpeg4_dc_threshold[8]={
99, 13, 15, 17, 19, 21, 23, 0
};

@ -66,11 +66,11 @@ extern const uint16_t ff_mpeg4_intra_vlc[103][2];
extern RLTable ff_mpeg4_rl_intra;
/* Note this is identical to the intra rvlc except that it is reordered. */
extern RLTable rvlc_rl_inter;
extern RLTable rvlc_rl_intra;
extern RLTable ff_rvlc_rl_inter;
extern RLTable ff_rvlc_rl_intra;
extern const uint16_t sprite_trajectory_tab[15][2];
extern const uint8_t mb_type_b_tab[4][2];
extern const uint16_t ff_sprite_trajectory_tab[15][2];
extern const uint8_t ff_mb_type_b_tab[4][2];
/* these matrixes will be permuted for the idct */
extern const int16_t ff_mpeg4_default_intra_matrix[64];
@ -80,15 +80,15 @@ extern const uint8_t ff_mpeg4_y_dc_scale_table[32];
extern const uint8_t ff_mpeg4_c_dc_scale_table[32];
extern const uint16_t ff_mpeg4_resync_prefix[8];
extern const uint8_t mpeg4_dc_threshold[8];
extern const uint8_t ff_mpeg4_dc_threshold[8];
void mpeg4_encode_mb(MpegEncContext *s,
DCTELEM block[6][64],
int motion_x, int motion_y);
void mpeg4_pred_ac(MpegEncContext * s, DCTELEM *block, int n,
int dir);
void ff_mpeg4_encode_mb(MpegEncContext *s,
DCTELEM block[6][64],
int motion_x, int motion_y);
void ff_mpeg4_pred_ac(MpegEncContext * s, DCTELEM *block, int n,
int dir);
void ff_set_mpeg4_time(MpegEncContext * s);
void mpeg4_encode_picture_header(MpegEncContext *s, int picture_number);
void ff_mpeg4_encode_picture_header(MpegEncContext *s, int picture_number);
int ff_mpeg4_decode_picture_header(MpegEncContext * s, GetBitContext *gb);
void ff_mpeg4_encode_video_packet_header(MpegEncContext *s);
@ -99,7 +99,7 @@ void ff_mpeg4_merge_partitions(MpegEncContext *s);
void ff_clean_mpeg4_qscales(MpegEncContext *s);
int ff_mpeg4_decode_partitions(MpegEncContext *s);
int ff_mpeg4_get_video_packet_prefix_length(MpegEncContext *s);
int mpeg4_decode_video_packet_header(MpegEncContext *s);
int ff_mpeg4_decode_video_packet_header(MpegEncContext *s);
void ff_mpeg4_init_direct_mv(MpegEncContext *s);
/**

@ -53,8 +53,8 @@ static const int mb_type_b_map[4]= {
* @param n block index (0-3 are luma, 4-5 are chroma)
* @param dir the ac prediction direction
*/
void mpeg4_pred_ac(MpegEncContext * s, DCTELEM *block, int n,
int dir)
void ff_mpeg4_pred_ac(MpegEncContext * s, DCTELEM *block, int n,
int dir)
{
int i;
int16_t *ac_val, *ac_val1;
@ -358,7 +358,7 @@ static int mpeg4_decode_sprite_trajectory(MpegEncContext * s, GetBitContext *gb)
* Decode the next video packet.
* @return <0 if something went wrong
*/
int mpeg4_decode_video_packet_header(MpegEncContext *s)
int ff_mpeg4_decode_video_packet_header(MpegEncContext *s)
{
int mb_num_bits= av_log2(s->mb_num - 1) + 1;
int header_extension=0, mb_num, len;
@ -654,13 +654,13 @@ try_again:
if ((cbpc & 16) == 0) {
/* 16x16 motion prediction */
h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
if(!s->mcsel){
mx = h263_decode_motion(s, pred_x, s->f_code);
mx = ff_h263_decode_motion(s, pred_x, s->f_code);
if (mx >= 0xffff)
return -1;
my = h263_decode_motion(s, pred_y, s->f_code);
my = ff_h263_decode_motion(s, pred_y, s->f_code);
if (my >= 0xffff)
return -1;
s->current_picture.f.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0;
@ -678,12 +678,12 @@ try_again:
int i;
s->current_picture.f.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_L0;
for(i=0;i<4;i++) {
int16_t *mot_val= h263_pred_motion(s, i, 0, &pred_x, &pred_y);
mx = h263_decode_motion(s, pred_x, s->f_code);
int16_t *mot_val= ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
mx = ff_h263_decode_motion(s, pred_x, s->f_code);
if (mx >= 0xffff)
return -1;
my = h263_decode_motion(s, pred_y, s->f_code);
my = ff_h263_decode_motion(s, pred_y, s->f_code);
if (my >= 0xffff)
return -1;
mot_val[0] = mx;
@ -878,8 +878,8 @@ static inline int mpeg4_decode_block(MpegEncContext * s, DCTELEM * block,
goto not_coded;
if(rvlc){
rl = &rvlc_rl_intra;
rl_vlc = rvlc_rl_intra.rl_vlc[0];
rl = &ff_rvlc_rl_intra;
rl_vlc = ff_rvlc_rl_intra.rl_vlc[0];
}else{
rl = &ff_mpeg4_rl_intra;
rl_vlc = ff_mpeg4_rl_intra.rl_vlc[0];
@ -900,7 +900,7 @@ static inline int mpeg4_decode_block(MpegEncContext * s, DCTELEM * block,
s->block_last_index[n] = i;
return 0;
}
if(rvlc) rl = &rvlc_rl_inter;
if(rvlc) rl = &ff_rvlc_rl_inter;
else rl = &ff_h263_rl_inter;
scan_table = s->intra_scantable.permutated;
@ -909,7 +909,7 @@ static inline int mpeg4_decode_block(MpegEncContext * s, DCTELEM * block,
qmul=1;
qadd=0;
if(rvlc){
rl_vlc = rvlc_rl_inter.rl_vlc[0];
rl_vlc = ff_rvlc_rl_inter.rl_vlc[0];
}else{
rl_vlc = ff_h263_rl_inter.rl_vlc[0];
}
@ -917,7 +917,7 @@ static inline int mpeg4_decode_block(MpegEncContext * s, DCTELEM * block,
qmul = s->qscale << 1;
qadd = (s->qscale - 1) | 1;
if(rvlc){
rl_vlc = rvlc_rl_inter.rl_vlc[s->qscale];
rl_vlc = ff_rvlc_rl_inter.rl_vlc[s->qscale];
}else{
rl_vlc = ff_h263_rl_inter.rl_vlc[s->qscale];
}
@ -1076,7 +1076,7 @@ static inline int mpeg4_decode_block(MpegEncContext * s, DCTELEM * block,
i -= i>>31; //if(i == -1) i=0;
}
mpeg4_pred_ac(s, block, n, dc_pred_dir);
ff_mpeg4_pred_ac(s, block, n, dc_pred_dir);
if (s->ac_pred) {
i = 63; /* XXX: not optimal */
}
@ -1248,14 +1248,14 @@ static int mpeg4_decode_mb(MpegEncContext *s,
s->field_select[0][0]= get_bits1(&s->gb);
s->field_select[0][1]= get_bits1(&s->gb);
h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
for(i=0; i<2; i++){
mx = h263_decode_motion(s, pred_x, s->f_code);
mx = ff_h263_decode_motion(s, pred_x, s->f_code);
if (mx >= 0xffff)
return -1;
my = h263_decode_motion(s, pred_y/2, s->f_code);
my = ff_h263_decode_motion(s, pred_y/2, s->f_code);
if (my >= 0xffff)
return -1;
@ -1266,13 +1266,13 @@ static int mpeg4_decode_mb(MpegEncContext *s,
s->current_picture.f.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0;
/* 16x16 motion prediction */
s->mv_type = MV_TYPE_16X16;
h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
mx = h263_decode_motion(s, pred_x, s->f_code);
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
mx = ff_h263_decode_motion(s, pred_x, s->f_code);
if (mx >= 0xffff)
return -1;
my = h263_decode_motion(s, pred_y, s->f_code);
my = ff_h263_decode_motion(s, pred_y, s->f_code);
if (my >= 0xffff)
return -1;
@ -1283,12 +1283,12 @@ static int mpeg4_decode_mb(MpegEncContext *s,
s->current_picture.f.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_L0;
s->mv_type = MV_TYPE_8X8;
for(i=0;i<4;i++) {
mot_val = h263_pred_motion(s, i, 0, &pred_x, &pred_y);
mx = h263_decode_motion(s, pred_x, s->f_code);
mot_val = ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
mx = ff_h263_decode_motion(s, pred_x, s->f_code);
if (mx >= 0xffff)
return -1;
my = h263_decode_motion(s, pred_y, s->f_code);
my = ff_h263_decode_motion(s, pred_y, s->f_code);
if (my >= 0xffff)
return -1;
s->mv[0][i][0] = mx;
@ -1384,8 +1384,8 @@ static int mpeg4_decode_mb(MpegEncContext *s,
if(USES_LIST(mb_type, 0)){
s->mv_dir = MV_DIR_FORWARD;
mx = h263_decode_motion(s, s->last_mv[0][0][0], s->f_code);
my = h263_decode_motion(s, s->last_mv[0][0][1], s->f_code);
mx = ff_h263_decode_motion(s, s->last_mv[0][0][0], s->f_code);
my = ff_h263_decode_motion(s, s->last_mv[0][0][1], s->f_code);
s->last_mv[0][1][0]= s->last_mv[0][0][0]= s->mv[0][0][0] = mx;
s->last_mv[0][1][1]= s->last_mv[0][0][1]= s->mv[0][0][1] = my;
}
@ -1393,8 +1393,8 @@ static int mpeg4_decode_mb(MpegEncContext *s,
if(USES_LIST(mb_type, 1)){
s->mv_dir |= MV_DIR_BACKWARD;
mx = h263_decode_motion(s, s->last_mv[1][0][0], s->b_code);
my = h263_decode_motion(s, s->last_mv[1][0][1], s->b_code);
mx = ff_h263_decode_motion(s, s->last_mv[1][0][0], s->b_code);
my = ff_h263_decode_motion(s, s->last_mv[1][0][1], s->b_code);
s->last_mv[1][1][0]= s->last_mv[1][0][0]= s->mv[1][0][0] = mx;
s->last_mv[1][1][1]= s->last_mv[1][0][1]= s->mv[1][0][1] = my;
}
@ -1405,8 +1405,8 @@ static int mpeg4_decode_mb(MpegEncContext *s,
s->mv_dir = MV_DIR_FORWARD;
for(i=0; i<2; i++){
mx = h263_decode_motion(s, s->last_mv[0][i][0] , s->f_code);
my = h263_decode_motion(s, s->last_mv[0][i][1]/2, s->f_code);
mx = ff_h263_decode_motion(s, s->last_mv[0][i][0] , s->f_code);
my = ff_h263_decode_motion(s, s->last_mv[0][i][1]/2, s->f_code);
s->last_mv[0][i][0]= s->mv[0][i][0] = mx;
s->last_mv[0][i][1]= (s->mv[0][i][1] = my)*2;
}
@ -1416,8 +1416,8 @@ static int mpeg4_decode_mb(MpegEncContext *s,
s->mv_dir |= MV_DIR_BACKWARD;
for(i=0; i<2; i++){
mx = h263_decode_motion(s, s->last_mv[1][i][0] , s->b_code);
my = h263_decode_motion(s, s->last_mv[1][i][1]/2, s->b_code);
mx = ff_h263_decode_motion(s, s->last_mv[1][i][0] , s->b_code);
my = ff_h263_decode_motion(s, s->last_mv[1][i][1]/2, s->b_code);
s->last_mv[1][i][0]= s->mv[1][i][0] = mx;
s->last_mv[1][i][1]= (s->mv[1][i][1] = my)*2;
}
@ -1429,8 +1429,8 @@ static int mpeg4_decode_mb(MpegEncContext *s,
if(IS_SKIP(mb_type))
mx=my=0;
else{
mx = h263_decode_motion(s, 0, 1);
my = h263_decode_motion(s, 0, 1);
mx = ff_h263_decode_motion(s, 0, 1);
my = ff_h263_decode_motion(s, 0, 1);
}
s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
@ -2035,7 +2035,7 @@ static int decode_vop_header(MpegEncContext *s, GetBitContext *gb){
if(s->pict_type == AV_PICTURE_TYPE_B)
skip_bits_long(gb, s->cplx_estimation_trash_b);
s->intra_dc_threshold= mpeg4_dc_threshold[ get_bits(gb, 3) ];
s->intra_dc_threshold= ff_mpeg4_dc_threshold[ get_bits(gb, 3) ];
if(!s->progressive_sequence){
s->top_field_first= get_bits1(gb);
s->alternate_scan= get_bits1(gb);
@ -2237,12 +2237,12 @@ static av_cold int decode_init(AVCodecContext *avctx)
if (!done) {
done = 1;
init_rl(&ff_mpeg4_rl_intra, ff_mpeg4_static_rl_table_store[0]);
init_rl(&rvlc_rl_inter, ff_mpeg4_static_rl_table_store[1]);
init_rl(&rvlc_rl_intra, ff_mpeg4_static_rl_table_store[2]);
ff_init_rl(&ff_mpeg4_rl_intra, ff_mpeg4_static_rl_table_store[0]);
ff_init_rl(&ff_rvlc_rl_inter, ff_mpeg4_static_rl_table_store[1]);
ff_init_rl(&ff_rvlc_rl_intra, ff_mpeg4_static_rl_table_store[2]);
INIT_VLC_RL(ff_mpeg4_rl_intra, 554);
INIT_VLC_RL(rvlc_rl_inter, 1072);
INIT_VLC_RL(rvlc_rl_intra, 1072);
INIT_VLC_RL(ff_rvlc_rl_inter, 1072);
INIT_VLC_RL(ff_rvlc_rl_intra, 1072);
INIT_VLC_STATIC(&dc_lum, DC_VLC_BITS, 10 /* 13 */,
&ff_mpeg4_DCtab_lum[0][1], 2, 1,
&ff_mpeg4_DCtab_lum[0][0], 2, 1, 512);
@ -2250,11 +2250,11 @@ static av_cold int decode_init(AVCodecContext *avctx)
&ff_mpeg4_DCtab_chrom[0][1], 2, 1,
&ff_mpeg4_DCtab_chrom[0][0], 2, 1, 512);
INIT_VLC_STATIC(&sprite_trajectory, SPRITE_TRAJ_VLC_BITS, 15,
&sprite_trajectory_tab[0][1], 4, 2,
&sprite_trajectory_tab[0][0], 4, 2, 128);
&ff_sprite_trajectory_tab[0][1], 4, 2,
&ff_sprite_trajectory_tab[0][0], 4, 2, 128);
INIT_VLC_STATIC(&mb_type_b_vlc, MB_TYPE_B_VLC_BITS, 4,
&mb_type_b_tab[0][1], 2, 1,
&mb_type_b_tab[0][0], 2, 1, 16);
&ff_mb_type_b_tab[0][1], 2, 1,
&ff_mb_type_b_tab[0][0], 2, 1, 16);
}
s->h263_pred = 1;

@ -468,9 +468,9 @@ static inline int get_b_cbp(MpegEncContext * s, DCTELEM block[6][64],
//FIXME this is duplicated to h263.c
static const int dquant_code[5]= {1,0,9,2,3};
void mpeg4_encode_mb(MpegEncContext * s,
DCTELEM block[6][64],
int motion_x, int motion_y)
void ff_mpeg4_encode_mb(MpegEncContext * s,
DCTELEM block[6][64],
int motion_x, int motion_y)
{
int cbpc, cbpy, pred_x, pred_y;
PutBitContext * const pb2 = s->data_partitioning ? &s->pb2 : &s->pb;
@ -705,7 +705,7 @@ void mpeg4_encode_mb(MpegEncContext * s,
}
/* motion vectors: 16x16 mode */
h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
ff_h263_encode_motion_vector(s, motion_x - pred_x,
motion_y - pred_y, s->f_code);
@ -729,7 +729,7 @@ void mpeg4_encode_mb(MpegEncContext * s,
}
/* motion vectors: 16x8 interlaced mode */
h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
pred_y /=2;
put_bits(&s->pb, 1, s->field_select[0][0]);
@ -757,7 +757,7 @@ void mpeg4_encode_mb(MpegEncContext * s,
for(i=0; i<4; i++){
/* motion vectors: 8x8 mode*/
h263_pred_motion(s, i, 0, &pred_x, &pred_y);
ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
ff_h263_encode_motion_vector(s, s->current_picture.f.motion_val[0][ s->block_index[i] ][0] - pred_x,
s->current_picture.f.motion_val[0][ s->block_index[i] ][1] - pred_y, s->f_code);
@ -1038,7 +1038,7 @@ static void mpeg4_encode_vol_header(MpegEncContext * s, int vo_number, int vol_n
}
/* write mpeg4 VOP header */
void mpeg4_encode_picture_header(MpegEncContext * s, int picture_number)
void ff_mpeg4_encode_picture_header(MpegEncContext * s, int picture_number)
{
int time_incr;
int time_div, time_mod;
@ -1232,7 +1232,7 @@ static av_cold int encode_init(AVCodecContext *avctx)
int ret;
static int done = 0;
if((ret=MPV_encode_init(avctx)) < 0)
if((ret=ff_MPV_encode_init(avctx)) < 0)
return ret;
if (!done) {
@ -1240,7 +1240,7 @@ static av_cold int encode_init(AVCodecContext *avctx)
init_uni_dc_tab();
init_rl(&ff_mpeg4_rl_intra, ff_mpeg4_static_rl_table_store[0]);
ff_init_rl(&ff_mpeg4_rl_intra, ff_mpeg4_static_rl_table_store[0]);
init_uni_mpeg4_rl_tab(&ff_mpeg4_rl_intra, uni_mpeg4_intra_rl_bits, uni_mpeg4_intra_rl_len);
init_uni_mpeg4_rl_tab(&ff_h263_rl_inter, uni_mpeg4_inter_rl_bits, uni_mpeg4_inter_rl_len);
@ -1346,8 +1346,8 @@ AVCodec ff_mpeg4_encoder = {
.id = CODEC_ID_MPEG4,
.priv_data_size = sizeof(MpegEncContext),
.init = encode_init,
.encode = MPV_encode_picture,
.close = MPV_encode_end,
.encode = ff_MPV_encode_picture,
.close = ff_MPV_encode_end,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
.capabilities= CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS,
.long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2"),

@ -176,7 +176,7 @@ const uint8_t *avpriv_mpv_find_start_code(const uint8_t *restrict p,
/* init common dct for both encoder and decoder */
av_cold int ff_dct_common_init(MpegEncContext *s)
{
dsputil_init(&s->dsp, s->avctx);
ff_dsputil_init(&s->dsp, s->avctx);
s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
@ -188,17 +188,17 @@ av_cold int ff_dct_common_init(MpegEncContext *s)
s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
#if HAVE_MMX
MPV_common_init_mmx(s);
ff_MPV_common_init_mmx(s);
#elif ARCH_ALPHA
MPV_common_init_axp(s);
ff_MPV_common_init_axp(s);
#elif HAVE_MMI
MPV_common_init_mmi(s);
ff_MPV_common_init_mmi(s);
#elif ARCH_ARM
MPV_common_init_arm(s);
ff_MPV_common_init_arm(s);
#elif HAVE_ALTIVEC
MPV_common_init_altivec(s);
ff_MPV_common_init_altivec(s);
#elif ARCH_BFIN
MPV_common_init_bfin(s);
ff_MPV_common_init_bfin(s);
#endif
/* load & permutate scantables
@ -458,7 +458,7 @@ static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base)
return 0;
fail:
return -1; // free() through MPV_common_end()
return -1; // free() through ff_MPV_common_end()
}
static void free_duplicate_context(MpegEncContext *s)
@ -544,7 +544,7 @@ int ff_mpeg_update_thread_context(AVCodecContext *dst,
if (s1->context_initialized){
s->picture_range_start += MAX_PICTURE_COUNT;
s->picture_range_end += MAX_PICTURE_COUNT;
MPV_common_init(s);
ff_MPV_common_init(s);
}
}
@ -618,7 +618,7 @@ int ff_mpeg_update_thread_context(AVCodecContext *dst,
* The changed fields will not depend upon the
* prior state of the MpegEncContext.
*/
void MPV_common_defaults(MpegEncContext *s)
void ff_MPV_common_defaults(MpegEncContext *s)
{
s->y_dc_scale_table =
s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
@ -647,16 +647,16 @@ void MPV_common_defaults(MpegEncContext *s)
* the changed fields will not depend upon
* the prior state of the MpegEncContext.
*/
void MPV_decode_defaults(MpegEncContext *s)
void ff_MPV_decode_defaults(MpegEncContext *s)
{
MPV_common_defaults(s);
ff_MPV_common_defaults(s);
}
/**
* init common structure for both encoder and decoder.
* this assumes that some variables like width/height are already set
*/
av_cold int MPV_common_init(MpegEncContext *s)
av_cold int ff_MPV_common_init(MpegEncContext *s)
{
int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
int nb_slices = (HAVE_THREADS &&
@ -862,12 +862,12 @@ av_cold int MPV_common_init(MpegEncContext *s)
return 0;
fail:
MPV_common_end(s);
ff_MPV_common_end(s);
return -1;
}
/* init common structure for both encoder and decoder */
void MPV_common_end(MpegEncContext *s)
void ff_MPV_common_end(MpegEncContext *s)
{
int i, j, k;
@ -956,8 +956,8 @@ void MPV_common_end(MpegEncContext *s)
avcodec_default_free_buffers(s->avctx);
}
void init_rl(RLTable *rl,
uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
void ff_init_rl(RLTable *rl,
uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
{
int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
uint8_t index_run[MAX_RUN + 1];
@ -1008,7 +1008,7 @@ void init_rl(RLTable *rl,
}
}
void init_vlc_rl(RLTable *rl)
void ff_init_vlc_rl(RLTable *rl)
{
int i, q;
@ -1125,7 +1125,7 @@ static void update_noise_reduction(MpegEncContext *s)
* generic function for encode/decode called after coding/decoding
* the header and before a frame is coded/decoded.
*/
int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
{
int i;
Picture *pic;
@ -1328,7 +1328,7 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
/* generic function for encode/decode called after a
* frame has been coded/decoded. */
void MPV_frame_end(MpegEncContext *s)
void ff_MPV_frame_end(MpegEncContext *s)
{
int i;
/* redraw edges for the frame if decoding didn't complete */
@ -2156,7 +2156,7 @@ static inline void MPV_motion_lowres(MpegEncContext *s,
/**
* find the lowest MB row referenced in the MVs
*/
int MPV_lowest_referenced_row(MpegEncContext *s, int dir)
int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
{
int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
int my, off, i, mvs;
@ -2346,10 +2346,10 @@ void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
if (s->mv_dir & MV_DIR_FORWARD) {
ff_thread_await_progress((AVFrame*)s->last_picture_ptr, MPV_lowest_referenced_row(s, 0), 0);
ff_thread_await_progress((AVFrame*)s->last_picture_ptr, ff_MPV_lowest_referenced_row(s, 0), 0);
}
if (s->mv_dir & MV_DIR_BACKWARD) {
ff_thread_await_progress((AVFrame*)s->next_picture_ptr, MPV_lowest_referenced_row(s, 1), 0);
ff_thread_await_progress((AVFrame*)s->next_picture_ptr, ff_MPV_lowest_referenced_row(s, 1), 0);
}
}
@ -2500,7 +2500,7 @@ skip_idct:
}
}
void MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
void ff_MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
#if !CONFIG_SMALL
if(s->out_format == FMT_MPEG1) {
if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
@ -2864,7 +2864,7 @@ void ff_set_qscale(MpegEncContext * s, int qscale)
s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
}
void MPV_report_decode_progress(MpegEncContext *s)
void ff_MPV_report_decode_progress(MpegEncContext *s)
{
if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->error_occurred)
ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_y, 0);

@ -694,21 +694,21 @@ typedef struct MpegEncContext {
&new_ctx->picture[pic - old_ctx->picture] : pic - (Picture*)old_ctx + (Picture*)new_ctx)\
: NULL)
void MPV_decode_defaults(MpegEncContext *s);
int MPV_common_init(MpegEncContext *s);
void MPV_common_end(MpegEncContext *s);
void MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]);
int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx);
void MPV_frame_end(MpegEncContext *s);
int MPV_encode_init(AVCodecContext *avctx);
int MPV_encode_end(AVCodecContext *avctx);
int MPV_encode_picture(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data);
void MPV_common_init_mmx(MpegEncContext *s);
void MPV_common_init_axp(MpegEncContext *s);
void MPV_common_init_mmi(MpegEncContext *s);
void MPV_common_init_arm(MpegEncContext *s);
void MPV_common_init_altivec(MpegEncContext *s);
void MPV_common_init_bfin(MpegEncContext *s);
void ff_MPV_decode_defaults(MpegEncContext *s);
int ff_MPV_common_init(MpegEncContext *s);
void ff_MPV_common_end(MpegEncContext *s);
void ff_MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]);
int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx);
void ff_MPV_frame_end(MpegEncContext *s);
int ff_MPV_encode_init(AVCodecContext *avctx);
int ff_MPV_encode_end(AVCodecContext *avctx);
int ff_MPV_encode_picture(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data);
void ff_MPV_common_init_mmx(MpegEncContext *s);
void ff_MPV_common_init_axp(MpegEncContext *s);
void ff_MPV_common_init_mmi(MpegEncContext *s);
void ff_MPV_common_init_arm(MpegEncContext *s);
void ff_MPV_common_init_altivec(MpegEncContext *s);
void ff_MPV_common_init_bfin(MpegEncContext *s);
void ff_clean_intra_table_entries(MpegEncContext *s);
void ff_draw_horiz_band(MpegEncContext *s, int y, int h);
void ff_mpeg_flush(AVCodecContext *avctx);
@ -718,8 +718,8 @@ void ff_release_unused_pictures(MpegEncContext *s, int remove_current);
int ff_find_unused_picture(MpegEncContext *s, int shared);
void ff_denoise_dct(MpegEncContext *s, DCTELEM *block);
void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src);
int MPV_lowest_referenced_row(MpegEncContext *s, int dir);
void MPV_report_decode_progress(MpegEncContext *s);
int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir);
void ff_MPV_report_decode_progress(MpegEncContext *s);
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src);
const uint8_t *avpriv_mpv_find_start_code(const uint8_t *p, const uint8_t *end, uint32_t *state);
void ff_set_qscale(MpegEncContext * s, int qscale);
@ -796,10 +796,10 @@ int ff_get_mb_score(MpegEncContext * s, int mx, int my, int src_index,
extern const uint8_t ff_mpeg1_dc_scale_table[128];
extern const uint8_t * const ff_mpeg2_dc_scale_table[4];
void mpeg1_encode_picture_header(MpegEncContext *s, int picture_number);
void mpeg1_encode_mb(MpegEncContext *s,
DCTELEM block[6][64],
int motion_x, int motion_y);
void ff_mpeg1_encode_picture_header(MpegEncContext *s, int picture_number);
void ff_mpeg1_encode_mb(MpegEncContext *s,
DCTELEM block[6][64],
int motion_x, int motion_y);
void ff_mpeg1_encode_init(MpegEncContext *s);
void ff_mpeg1_encode_slice_header(MpegEncContext *s);
void ff_mpeg1_clean_buffers(MpegEncContext *s);
@ -821,19 +821,19 @@ int ff_h261_get_picture_format(int width, int height);
/* rv10.c */
void rv10_encode_picture_header(MpegEncContext *s, int picture_number);
int rv_decode_dc(MpegEncContext *s, int n);
void rv20_encode_picture_header(MpegEncContext *s, int picture_number);
void ff_rv10_encode_picture_header(MpegEncContext *s, int picture_number);
int ff_rv_decode_dc(MpegEncContext *s, int n);
void ff_rv20_encode_picture_header(MpegEncContext *s, int picture_number);
/* msmpeg4.c */
void msmpeg4_encode_picture_header(MpegEncContext * s, int picture_number);
void msmpeg4_encode_ext_header(MpegEncContext * s);
void msmpeg4_encode_mb(MpegEncContext * s,
DCTELEM block[6][64],
int motion_x, int motion_y);
int msmpeg4_decode_picture_header(MpegEncContext * s);
int msmpeg4_decode_ext_header(MpegEncContext * s, int buf_size);
void ff_msmpeg4_encode_picture_header(MpegEncContext * s, int picture_number);
void ff_msmpeg4_encode_ext_header(MpegEncContext * s);
void ff_msmpeg4_encode_mb(MpegEncContext * s,
DCTELEM block[6][64],
int motion_x, int motion_y);
int ff_msmpeg4_decode_picture_header(MpegEncContext * s);
int ff_msmpeg4_decode_ext_header(MpegEncContext * s, int buf_size);
int ff_msmpeg4_decode_init(AVCodecContext *avctx);
void ff_msmpeg4_encode_init(MpegEncContext *s);
int ff_wmv2_decode_picture_header(MpegEncContext * s);

@ -39,19 +39,13 @@
#include "faandct.h"
#include <limits.h>
int dct_quantize_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
/**
* Allocate a Picture.
* The pixels are allocated/set by calling get_buffer() if shared = 0.
*/
int alloc_picture(MpegEncContext *s, Picture *pic, int shared);
int ff_dct_quantize_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
/**
* Set the given MpegEncContext to common defaults (same for encoding and decoding).
* The changed fields will not depend upon the prior state of the MpegEncContext.
*/
void MPV_common_defaults(MpegEncContext *s);
void ff_MPV_common_defaults(MpegEncContext *s);
static inline void gmc1_motion(MpegEncContext *s,
uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,

@ -90,7 +90,7 @@ void ff_convert_matrix(DSPContext *dsp, int (*qmat)[64],
qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
(qscale * quant_matrix[j]));
}
} else if (dsp->fdct == fdct_ifast
} else if (dsp->fdct == ff_fdct_ifast
#ifndef FAAN_POSTSCALE
|| dsp->fdct == ff_faandct
#endif
@ -132,7 +132,7 @@ void ff_convert_matrix(DSPContext *dsp, int (*qmat)[64],
for (i = intra; i < 64; i++) {
int64_t max = 8191;
if (dsp->fdct == fdct_ifast
if (dsp->fdct == ff_fdct_ifast
#ifndef FAAN_POSTSCALE
|| dsp->fdct == ff_faandct
#endif
@ -264,7 +264,7 @@ static void update_duplicate_context_after_me(MpegEncContext *dst,
static void MPV_encode_defaults(MpegEncContext *s)
{
int i;
MPV_common_defaults(s);
ff_MPV_common_defaults(s);
for (i = -16; i < 16; i++) {
default_fcode_tab[i + MAX_MV] = 1;
@ -274,7 +274,7 @@ static void MPV_encode_defaults(MpegEncContext *s)
}
/* init video encoder */
av_cold int MPV_encode_init(AVCodecContext *avctx)
av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
{
MpegEncContext *s = avctx->priv_data;
int i;
@ -665,7 +665,7 @@ av_cold int MPV_encode_init(AVCodecContext *avctx)
case CODEC_ID_H263:
if (!CONFIG_H263_ENCODER)
return -1;
if (ff_match_2uint16(h263_format, FF_ARRAY_ELEMS(h263_format),
if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
s->width, s->height) == 8) {
av_log(avctx, AV_LOG_ERROR,
"The specified picture size of %dx%d is not valid for "
@ -771,11 +771,11 @@ av_cold int MPV_encode_init(AVCodecContext *avctx)
s->alternate_scan);
/* init */
if (MPV_common_init(s) < 0)
if (ff_MPV_common_init(s) < 0)
return -1;
if (!s->dct_quantize)
s->dct_quantize = dct_quantize_c;
s->dct_quantize = ff_dct_quantize_c;
if (!s->denoise_dct)
s->denoise_dct = denoise_dct_c;
s->fast_dct_quantize = s->dct_quantize;
@ -793,7 +793,7 @@ av_cold int MPV_encode_init(AVCodecContext *avctx)
if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
ff_h261_encode_init(s);
if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
h263_encode_init(s);
ff_h263_encode_init(s);
if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
ff_msmpeg4_encode_init(s);
if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
@ -838,13 +838,13 @@ av_cold int MPV_encode_init(AVCodecContext *avctx)
return 0;
}
av_cold int MPV_encode_end(AVCodecContext *avctx)
av_cold int ff_MPV_encode_end(AVCodecContext *avctx)
{
MpegEncContext *s = avctx->priv_data;
ff_rate_control_uninit(s);
MPV_common_end(s);
ff_MPV_common_end(s);
if ((CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) &&
s->out_format == FMT_MJPEG)
ff_mjpeg_encode_close(s);
@ -1384,8 +1384,8 @@ no_output_pic:
return 0;
}
int MPV_encode_picture(AVCodecContext *avctx,
unsigned char *buf, int buf_size, void *data)
int ff_MPV_encode_picture(AVCodecContext *avctx,
unsigned char *buf, int buf_size, void *data)
{
MpegEncContext *s = avctx->priv_data;
AVFrame *pic_arg = data;
@ -1417,7 +1417,7 @@ int MPV_encode_picture(AVCodecContext *avctx,
//emms_c();
//printf("qs:%f %f %d\n", s->new_picture.quality,
// s->current_picture.quality, s->qscale);
MPV_frame_start(s, avctx);
ff_MPV_frame_start(s, avctx);
vbv_retry:
if (encode_picture(s, s->picture_number) < 0)
return -1;
@ -1432,7 +1432,7 @@ vbv_retry:
avctx->p_count = s->mb_num - s->i_count - s->skip_count;
avctx->skip_count = s->skip_count;
MPV_frame_end(s);
ff_MPV_frame_end(s);
if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
ff_mjpeg_encode_picture_trailer(s);
@ -1987,7 +1987,7 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s,
}
// non c quantize code returns incorrect block_last_index FIXME
if (s->alternate_scan && s->dct_quantize != dct_quantize_c) {
if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
for (i = 0; i < mb_block_count; i++) {
int j;
if (s->block_last_index[i] > 0) {
@ -2005,17 +2005,17 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s,
case CODEC_ID_MPEG1VIDEO:
case CODEC_ID_MPEG2VIDEO:
if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
mpeg1_encode_mb(s, s->block, motion_x, motion_y);
ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
break;
case CODEC_ID_MPEG4:
if (CONFIG_MPEG4_ENCODER)
mpeg4_encode_mb(s, s->block, motion_x, motion_y);
ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
break;
case CODEC_ID_MSMPEG4V2:
case CODEC_ID_MSMPEG4V3:
case CODEC_ID_WMV1:
if (CONFIG_MSMPEG4_ENCODER)
msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
break;
case CODEC_ID_WMV2:
if (CONFIG_WMV2_ENCODER)
@ -2031,7 +2031,7 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s,
case CODEC_ID_RV10:
case CODEC_ID_RV20:
if (CONFIG_H263_ENCODER)
h263_encode_mb(s, s->block, motion_x, motion_y);
ff_h263_encode_mb(s, s->block, motion_x, motion_y);
break;
case CODEC_ID_MJPEG:
case CODEC_ID_AMV:
@ -2149,7 +2149,7 @@ static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegE
}
if(s->avctx->mb_decision == FF_MB_DECISION_RD){
MPV_decode_mb(s, s->block);
ff_MPV_decode_mb(s, s->block);
score *= s->lambda2;
score += sse_mb(s) << FF_LAMBDA_SHIFT;
@ -2467,7 +2467,7 @@ static int encode_thread(AVCodecContext *c, void *arg){
case CODEC_ID_H263:
case CODEC_ID_H263P:
if (CONFIG_H263_ENCODER)
h263_encode_gob_header(s, mb_y);
ff_h263_encode_gob_header(s, mb_y);
break;
}
@ -2760,7 +2760,7 @@ static int encode_thread(AVCodecContext *c, void *arg){
}
if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
MPV_decode_mb(s, s->block);
ff_MPV_decode_mb(s, s->block);
} else {
int motion_x = 0, motion_y = 0;
s->mv_type=MV_TYPE_16X16;
@ -2880,7 +2880,7 @@ static int encode_thread(AVCodecContext *c, void *arg){
s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
ff_h263_update_motion_val(s);
MPV_decode_mb(s, s->block);
ff_MPV_decode_mb(s, s->block);
}
/* clean the MV table in IPS frames for direct mode in B frames */
@ -2916,7 +2916,7 @@ static int encode_thread(AVCodecContext *c, void *arg){
//not beautiful here but we must write it before flushing so it has to be here
if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
msmpeg4_encode_ext_header(s);
ff_msmpeg4_encode_ext_header(s);
write_slice_end(s);
@ -3233,21 +3233,21 @@ static int encode_picture(MpegEncContext *s, int picture_number)
if (CONFIG_WMV2_ENCODER && s->codec_id == CODEC_ID_WMV2)
ff_wmv2_encode_picture_header(s, picture_number);
else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
msmpeg4_encode_picture_header(s, picture_number);
ff_msmpeg4_encode_picture_header(s, picture_number);
else if (CONFIG_MPEG4_ENCODER && s->h263_pred)
mpeg4_encode_picture_header(s, picture_number);
ff_mpeg4_encode_picture_header(s, picture_number);
else if (CONFIG_RV10_ENCODER && s->codec_id == CODEC_ID_RV10)
rv10_encode_picture_header(s, picture_number);
ff_rv10_encode_picture_header(s, picture_number);
else if (CONFIG_RV20_ENCODER && s->codec_id == CODEC_ID_RV20)
rv20_encode_picture_header(s, picture_number);
ff_rv20_encode_picture_header(s, picture_number);
else if (CONFIG_FLV_ENCODER && s->codec_id == CODEC_ID_FLV1)
ff_flv_encode_picture_header(s, picture_number);
else if (CONFIG_H263_ENCODER)
h263_encode_picture_header(s, picture_number);
ff_h263_encode_picture_header(s, picture_number);
break;
case FMT_MPEG1:
if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
mpeg1_encode_picture_header(s, picture_number);
ff_mpeg1_encode_picture_header(s, picture_number);
break;
case FMT_H264:
break;
@ -3413,7 +3413,7 @@ static int dct_quantize_trellis_c(MpegEncContext *s,
int dct_coeff= FFABS(block[ scantable[i] ]);
int best_score=256*256*256*120;
if ( s->dsp.fdct == fdct_ifast
if ( s->dsp.fdct == ff_fdct_ifast
#ifndef FAAN_POSTSCALE
|| s->dsp.fdct == ff_faandct
#endif
@ -3983,7 +3983,7 @@ STOP_TIMER("iterative search")
return last_non_zero;
}
int dct_quantize_c(MpegEncContext *s,
int ff_dct_quantize_c(MpegEncContext *s,
DCTELEM *block, int n,
int qscale, int *overflow)
{
@ -4083,9 +4083,9 @@ AVCodec ff_h263_encoder = {
.type = AVMEDIA_TYPE_VIDEO,
.id = CODEC_ID_H263,
.priv_data_size = sizeof(MpegEncContext),
.init = MPV_encode_init,
.encode = MPV_encode_picture,
.close = MPV_encode_end,
.init = ff_MPV_encode_init,
.encode = ff_MPV_encode_picture,
.close = ff_MPV_encode_end,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
.long_name= NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
.priv_class = &h263_class,
@ -4110,9 +4110,9 @@ AVCodec ff_h263p_encoder = {
.type = AVMEDIA_TYPE_VIDEO,
.id = CODEC_ID_H263P,
.priv_data_size = sizeof(MpegEncContext),
.init = MPV_encode_init,
.encode = MPV_encode_picture,
.close = MPV_encode_end,
.init = ff_MPV_encode_init,
.encode = ff_MPV_encode_picture,
.close = ff_MPV_encode_end,
.capabilities = CODEC_CAP_SLICE_THREADS,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
.long_name= NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
@ -4124,9 +4124,9 @@ AVCodec ff_msmpeg4v2_encoder = {
.type = AVMEDIA_TYPE_VIDEO,
.id = CODEC_ID_MSMPEG4V2,
.priv_data_size = sizeof(MpegEncContext),
.init = MPV_encode_init,
.encode = MPV_encode_picture,
.close = MPV_encode_end,
.init = ff_MPV_encode_init,
.encode = ff_MPV_encode_picture,
.close = ff_MPV_encode_end,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
.long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
};
@ -4136,9 +4136,9 @@ AVCodec ff_msmpeg4v3_encoder = {
.type = AVMEDIA_TYPE_VIDEO,
.id = CODEC_ID_MSMPEG4V3,
.priv_data_size = sizeof(MpegEncContext),
.init = MPV_encode_init,
.encode = MPV_encode_picture,
.close = MPV_encode_end,
.init = ff_MPV_encode_init,
.encode = ff_MPV_encode_picture,
.close = ff_MPV_encode_end,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
.long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
};
@ -4148,9 +4148,9 @@ AVCodec ff_wmv1_encoder = {
.type = AVMEDIA_TYPE_VIDEO,
.id = CODEC_ID_WMV1,
.priv_data_size = sizeof(MpegEncContext),
.init = MPV_encode_init,
.encode = MPV_encode_picture,
.close = MPV_encode_end,
.init = ff_MPV_encode_init,
.encode = ff_MPV_encode_picture,
.close = ff_MPV_encode_end,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
.long_name= NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
};

@ -60,7 +60,7 @@ static uint32_t v2_dc_lum_table[512][2];
static uint32_t v2_dc_chroma_table[512][2];
/* vc1 externs */
extern const uint8_t wmv3_dc_scale_table[32];
extern const uint8_t ff_wmv3_dc_scale_table[32];
#include "msmpeg4data.h"
@ -138,8 +138,8 @@ static av_cold void common_init(MpegEncContext * s)
break;
case 3:
if(s->workaround_bugs){
s->y_dc_scale_table= old_ff_y_dc_scale_table;
s->c_dc_scale_table= wmv1_c_dc_scale_table;
s->y_dc_scale_table= ff_old_ff_y_dc_scale_table;
s->c_dc_scale_table= ff_wmv1_c_dc_scale_table;
} else{
s->y_dc_scale_table= ff_mpeg4_y_dc_scale_table;
s->c_dc_scale_table= ff_mpeg4_c_dc_scale_table;
@ -147,13 +147,13 @@ static av_cold void common_init(MpegEncContext * s)
break;
case 4:
case 5:
s->y_dc_scale_table= wmv1_y_dc_scale_table;
s->c_dc_scale_table= wmv1_c_dc_scale_table;
s->y_dc_scale_table= ff_wmv1_y_dc_scale_table;
s->c_dc_scale_table= ff_wmv1_c_dc_scale_table;
break;
#if CONFIG_VC1_DECODER
case 6:
s->y_dc_scale_table= wmv3_dc_scale_table;
s->c_dc_scale_table= wmv3_dc_scale_table;
s->y_dc_scale_table= ff_wmv3_dc_scale_table;
s->c_dc_scale_table= ff_wmv3_dc_scale_table;
break;
#endif
@ -161,10 +161,10 @@ static av_cold void common_init(MpegEncContext * s)
if(s->msmpeg4_version>=4){
ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , wmv1_scantable[1]);
ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, wmv1_scantable[2]);
ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, wmv1_scantable[3]);
ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , wmv1_scantable[0]);
ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_wmv1_scantable[1]);
ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_wmv1_scantable[2]);
ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_wmv1_scantable[3]);
ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_wmv1_scantable[0]);
}
//Note the default tables are set in common_init in mpegvideo.c
@ -259,10 +259,10 @@ av_cold void ff_msmpeg4_encode_init(MpegEncContext *s)
if (!init_done) {
/* init various encoding tables */
init_done = 1;
init_mv_table(&mv_tables[0]);
init_mv_table(&mv_tables[1]);
init_mv_table(&ff_mv_tables[0]);
init_mv_table(&ff_mv_tables[1]);
for(i=0;i<NB_RL_TABLES;i++)
init_rl(&rl_table[i], static_rl_table_store[i]);
ff_init_rl(&ff_rl_table[i], static_rl_table_store[i]);
for(i=0; i<NB_RL_TABLES; i++){
int level;
@ -271,7 +271,7 @@ av_cold void ff_msmpeg4_encode_init(MpegEncContext *s)
for(run=0; run<=MAX_RUN; run++){
int last;
for(last=0; last<2; last++){
rl_length[i][level][run][last]= get_size_of_code(s, &rl_table[ i], last, run, level, 0);
rl_length[i][level][run][last]= get_size_of_code(s, &ff_rl_table[ i], last, run, level, 0);
}
}
}
@ -347,7 +347,7 @@ static void find_best_tables(MpegEncContext * s)
}
/* write MSMPEG4 compatible frame header */
void msmpeg4_encode_picture_header(MpegEncContext * s, int picture_number)
void ff_msmpeg4_encode_picture_header(MpegEncContext * s, int picture_number)
{
find_best_tables(s);
@ -373,7 +373,7 @@ void msmpeg4_encode_picture_header(MpegEncContext * s, int picture_number)
put_bits(&s->pb, 5, 0x16 + s->mb_height/s->slice_height);
if(s->msmpeg4_version==4){
msmpeg4_encode_ext_header(s);
ff_msmpeg4_encode_ext_header(s);
if(s->bit_rate>MBAC_BITRATE)
put_bits(&s->pb, 1, s->per_mb_rl_table);
}
@ -406,7 +406,7 @@ void msmpeg4_encode_picture_header(MpegEncContext * s, int picture_number)
s->esc3_run_length= 0;
}
void msmpeg4_encode_ext_header(MpegEncContext * s)
void ff_msmpeg4_encode_ext_header(MpegEncContext * s)
{
put_bits(&s->pb, 5, s->avctx->time_base.den / s->avctx->time_base.num); //yes 29.97 -> 29
@ -474,7 +474,7 @@ void ff_msmpeg4_encode_motion(MpegEncContext * s,
(unsigned)my >= 64)
av_log(s->avctx, AV_LOG_ERROR, "error mx=%d my=%d\n", mx, my);
#endif
mv = &mv_tables[s->mv_table_index];
mv = &ff_mv_tables[s->mv_table_index];
code = mv->table_mv_index[(mx << 6) | my];
put_bits(&s->pb,
@ -507,7 +507,7 @@ static void msmpeg4v2_encode_motion(MpegEncContext * s, int val)
if (val == 0) {
/* zero vector */
code = 0;
put_bits(&s->pb, mvtab[code][1], mvtab[code][0]);
put_bits(&s->pb, ff_mvtab[code][1], ff_mvtab[code][0]);
} else {
bit_size = s->f_code - 1;
range = 1 << bit_size;
@ -526,16 +526,16 @@ static void msmpeg4v2_encode_motion(MpegEncContext * s, int val)
code = (val >> bit_size) + 1;
bits = val & (range - 1);
put_bits(&s->pb, mvtab[code][1] + 1, (mvtab[code][0] << 1) | sign);
put_bits(&s->pb, ff_mvtab[code][1] + 1, (ff_mvtab[code][0] << 1) | sign);
if (bit_size > 0) {
put_bits(&s->pb, bit_size, bits);
}
}
}
void msmpeg4_encode_mb(MpegEncContext * s,
DCTELEM block[6][64],
int motion_x, int motion_y)
void ff_msmpeg4_encode_mb(MpegEncContext * s,
DCTELEM block[6][64],
int motion_x, int motion_y)
{
int cbp, coded_cbp, i;
int pred_x, pred_y;
@ -564,8 +564,8 @@ void msmpeg4_encode_mb(MpegEncContext * s,
if(s->msmpeg4_version<=2){
put_bits(&s->pb,
v2_mb_type[cbp&3][1],
v2_mb_type[cbp&3][0]);
ff_v2_mb_type[cbp&3][1],
ff_v2_mb_type[cbp&3][0]);
if((cbp&3) != 3) coded_cbp= cbp ^ 0x3C;
else coded_cbp= cbp;
@ -575,18 +575,18 @@ void msmpeg4_encode_mb(MpegEncContext * s,
s->misc_bits += get_bits_diff(s);
h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
msmpeg4v2_encode_motion(s, motion_x - pred_x);
msmpeg4v2_encode_motion(s, motion_y - pred_y);
}else{
put_bits(&s->pb,
table_mb_non_intra[cbp + 64][1],
table_mb_non_intra[cbp + 64][0]);
ff_table_mb_non_intra[cbp + 64][1],
ff_table_mb_non_intra[cbp + 64][0]);
s->misc_bits += get_bits_diff(s);
/* motion vector */
h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
ff_msmpeg4_encode_motion(s, motion_x - pred_x,
motion_y - pred_y);
}
@ -617,13 +617,13 @@ void msmpeg4_encode_mb(MpegEncContext * s,
if(s->msmpeg4_version<=2){
if (s->pict_type == AV_PICTURE_TYPE_I) {
put_bits(&s->pb,
v2_intra_cbpc[cbp&3][1], v2_intra_cbpc[cbp&3][0]);
ff_v2_intra_cbpc[cbp&3][1], ff_v2_intra_cbpc[cbp&3][0]);
} else {
if (s->use_skip_mb_code)
put_bits(&s->pb, 1, 0); /* mb coded */
put_bits(&s->pb,
v2_mb_type[(cbp&3) + 4][1],
v2_mb_type[(cbp&3) + 4][0]);
ff_v2_mb_type[(cbp&3) + 4][1],
ff_v2_mb_type[(cbp&3) + 4][0]);
}
put_bits(&s->pb, 1, 0); /* no AC prediction yet */
put_bits(&s->pb,
@ -637,13 +637,13 @@ void msmpeg4_encode_mb(MpegEncContext * s,
if (s->use_skip_mb_code)
put_bits(&s->pb, 1, 0); /* mb coded */
put_bits(&s->pb,
table_mb_non_intra[cbp][1],
table_mb_non_intra[cbp][0]);
ff_table_mb_non_intra[cbp][1],
ff_table_mb_non_intra[cbp][0]);
}
put_bits(&s->pb, 1, 0); /* no AC prediction yet */
if(s->inter_intra_pred){
s->h263_aic_dir=0;
put_bits(&s->pb, table_inter_intra[s->h263_aic_dir][1], table_inter_intra[s->h263_aic_dir][0]);
put_bits(&s->pb, ff_table_inter_intra[s->h263_aic_dir][1], ff_table_inter_intra[s->h263_aic_dir][0]);
}
}
s->misc_bits += get_bits_diff(s);
@ -927,15 +927,15 @@ void ff_msmpeg4_encode_block(MpegEncContext * s, DCTELEM * block, int n)
msmpeg4_encode_dc(s, block[0], n, &dc_pred_dir);
i = 1;
if (n < 4) {
rl = &rl_table[s->rl_table_index];
rl = &ff_rl_table[s->rl_table_index];
} else {
rl = &rl_table[3 + s->rl_chroma_table_index];
rl = &ff_rl_table[3 + s->rl_chroma_table_index];
}
run_diff = s->msmpeg4_version>=4;
scantable= s->intra_scantable.permutated;
} else {
i = 0;
rl = &rl_table[3 + s->rl_table_index];
rl = &ff_rl_table[3 + s->rl_table_index];
if(s->msmpeg4_version<=2)
run_diff = 0;
else
@ -1134,7 +1134,7 @@ static int msmpeg4v12_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
cbp|= cbpy<<2;
if(s->msmpeg4_version==1 || (cbp&3) != 3) cbp^= 0x3C;
h263_pred_motion(s, 0, 0, &mx, &my);
ff_h263_pred_motion(s, 0, 0, &mx, &my);
mx= msmpeg4v2_decode_motion(s, mx, 1);
my= msmpeg4v2_decode_motion(s, my, 1);
@ -1220,7 +1220,7 @@ static int msmpeg4v34_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
s->rl_table_index = decode012(&s->gb);
s->rl_chroma_table_index = s->rl_table_index;
}
h263_pred_motion(s, 0, 0, &mx, &my);
ff_h263_pred_motion(s, 0, 0, &mx, &my);
if (ff_msmpeg4_decode_motion(s, &mx, &my) < 0)
return -1;
s->mv_dir = MV_DIR_FORWARD;
@ -1271,20 +1271,20 @@ av_cold int ff_msmpeg4_decode_init(AVCodecContext *avctx)
done = 1;
for(i=0;i<NB_RL_TABLES;i++) {
init_rl(&rl_table[i], static_rl_table_store[i]);
ff_init_rl(&ff_rl_table[i], static_rl_table_store[i]);
}
INIT_VLC_RL(rl_table[0], 642);
INIT_VLC_RL(rl_table[1], 1104);
INIT_VLC_RL(rl_table[2], 554);
INIT_VLC_RL(rl_table[3], 940);
INIT_VLC_RL(rl_table[4], 962);
INIT_VLC_RL(rl_table[5], 554);
mv = &mv_tables[0];
INIT_VLC_RL(ff_rl_table[0], 642);
INIT_VLC_RL(ff_rl_table[1], 1104);
INIT_VLC_RL(ff_rl_table[2], 554);
INIT_VLC_RL(ff_rl_table[3], 940);
INIT_VLC_RL(ff_rl_table[4], 962);
INIT_VLC_RL(ff_rl_table[5], 554);
mv = &ff_mv_tables[0];
INIT_VLC_STATIC(&mv->vlc, MV_VLC_BITS, mv->n + 1,
mv->table_mv_bits, 1, 1,
mv->table_mv_code, 2, 2, 3714);
mv = &mv_tables[1];
mv = &ff_mv_tables[1];
INIT_VLC_STATIC(&mv->vlc, MV_VLC_BITS, mv->n + 1,
mv->table_mv_bits, 1, 1,
mv->table_mv_code, 2, 2, 2694);
@ -1310,35 +1310,35 @@ av_cold int ff_msmpeg4_decode_init(AVCodecContext *avctx)
&v2_dc_chroma_table[0][0], 8, 4, 1506);
INIT_VLC_STATIC(&v2_intra_cbpc_vlc, V2_INTRA_CBPC_VLC_BITS, 4,
&v2_intra_cbpc[0][1], 2, 1,
&v2_intra_cbpc[0][0], 2, 1, 8);
&ff_v2_intra_cbpc[0][1], 2, 1,
&ff_v2_intra_cbpc[0][0], 2, 1, 8);
INIT_VLC_STATIC(&v2_mb_type_vlc, V2_MB_TYPE_VLC_BITS, 8,
&v2_mb_type[0][1], 2, 1,
&v2_mb_type[0][0], 2, 1, 128);
&ff_v2_mb_type[0][1], 2, 1,
&ff_v2_mb_type[0][0], 2, 1, 128);
INIT_VLC_STATIC(&v2_mv_vlc, V2_MV_VLC_BITS, 33,
&mvtab[0][1], 2, 1,
&mvtab[0][0], 2, 1, 538);
&ff_mvtab[0][1], 2, 1,
&ff_mvtab[0][0], 2, 1, 538);
INIT_VLC_STATIC(&ff_mb_non_intra_vlc[0], MB_NON_INTRA_VLC_BITS, 128,
&wmv2_inter_table[0][0][1], 8, 4,
&wmv2_inter_table[0][0][0], 8, 4, 1636);
&ff_wmv2_inter_table[0][0][1], 8, 4,
&ff_wmv2_inter_table[0][0][0], 8, 4, 1636);
INIT_VLC_STATIC(&ff_mb_non_intra_vlc[1], MB_NON_INTRA_VLC_BITS, 128,
&wmv2_inter_table[1][0][1], 8, 4,
&wmv2_inter_table[1][0][0], 8, 4, 2648);
&ff_wmv2_inter_table[1][0][1], 8, 4,
&ff_wmv2_inter_table[1][0][0], 8, 4, 2648);
INIT_VLC_STATIC(&ff_mb_non_intra_vlc[2], MB_NON_INTRA_VLC_BITS, 128,
&wmv2_inter_table[2][0][1], 8, 4,
&wmv2_inter_table[2][0][0], 8, 4, 1532);
&ff_wmv2_inter_table[2][0][1], 8, 4,
&ff_wmv2_inter_table[2][0][0], 8, 4, 1532);
INIT_VLC_STATIC(&ff_mb_non_intra_vlc[3], MB_NON_INTRA_VLC_BITS, 128,
&wmv2_inter_table[3][0][1], 8, 4,
&wmv2_inter_table[3][0][0], 8, 4, 2488);
&ff_wmv2_inter_table[3][0][1], 8, 4,
&ff_wmv2_inter_table[3][0][0], 8, 4, 2488);
INIT_VLC_STATIC(&ff_msmp4_mb_i_vlc, MB_INTRA_VLC_BITS, 64,
&ff_msmp4_mb_i_table[0][1], 4, 2,
&ff_msmp4_mb_i_table[0][0], 4, 2, 536);
INIT_VLC_STATIC(&ff_inter_intra_vlc, INTER_INTRA_VLC_BITS, 4,
&table_inter_intra[0][1], 2, 1,
&table_inter_intra[0][0], 2, 1, 8);
&ff_table_inter_intra[0][1], 2, 1,
&ff_table_inter_intra[0][0], 2, 1, 8);
}
switch(s->msmpeg4_version){
@ -1363,7 +1363,7 @@ av_cold int ff_msmpeg4_decode_init(AVCodecContext *avctx)
return 0;
}
int msmpeg4_decode_picture_header(MpegEncContext * s)
int ff_msmpeg4_decode_picture_header(MpegEncContext * s)
{
int code;
@ -1430,7 +1430,7 @@ int msmpeg4_decode_picture_header(MpegEncContext * s)
s->dc_table_index = get_bits1(&s->gb);
break;
case 4:
msmpeg4_decode_ext_header(s, (2+5+5+17+7)/8);
ff_msmpeg4_decode_ext_header(s, (2+5+5+17+7)/8);
if(s->bit_rate > MBAC_BITRATE) s->per_mb_rl_table= get_bits1(&s->gb);
else s->per_mb_rl_table= 0;
@ -1517,7 +1517,7 @@ int msmpeg4_decode_picture_header(MpegEncContext * s)
return 0;
}
int msmpeg4_decode_ext_header(MpegEncContext * s, int buf_size)
int ff_msmpeg4_decode_ext_header(MpegEncContext * s, int buf_size)
{
int left= buf_size*8 - get_bits_count(&s->gb);
int length= s->msmpeg4_version>=3 ? 17 : 16;
@ -1627,13 +1627,13 @@ int ff_msmpeg4_decode_block(MpegEncContext * s, DCTELEM * block,
else return -1;
}
if (n < 4) {
rl = &rl_table[s->rl_table_index];
rl = &ff_rl_table[s->rl_table_index];
if(level > 256*s->y_dc_scale){
av_log(s->avctx, AV_LOG_ERROR, "dc overflow+ L qscale: %d//\n", s->qscale);
if(!s->inter_intra_pred) return -1;
}
} else {
rl = &rl_table[3 + s->rl_chroma_table_index];
rl = &ff_rl_table[3 + s->rl_chroma_table_index];
if(level > 256*s->c_dc_scale){
av_log(s->avctx, AV_LOG_ERROR, "dc overflow+ C qscale: %d//\n", s->qscale);
if(!s->inter_intra_pred) return -1;
@ -1659,7 +1659,7 @@ int ff_msmpeg4_decode_block(MpegEncContext * s, DCTELEM * block,
qmul = s->qscale << 1;
qadd = (s->qscale - 1) | 1;
i = -1;
rl = &rl_table[3 + s->rl_table_index];
rl = &ff_rl_table[3 + s->rl_table_index];
if(s->msmpeg4_version==2)
run_diff = 0;
@ -1829,7 +1829,7 @@ int ff_msmpeg4_decode_block(MpegEncContext * s, DCTELEM * block,
}
not_coded:
if (s->mb_intra) {
mpeg4_pred_ac(s, block, n, dc_pred_dir);
ff_mpeg4_pred_ac(s, block, n, dc_pred_dir);
if (s->ac_pred) {
i = 63; /* XXX: not optimal */
}
@ -1846,7 +1846,7 @@ int ff_msmpeg4_decode_motion(MpegEncContext * s,
MVTable *mv;
int code, mx, my;
mv = &mv_tables[s->mv_table_index];
mv = &ff_mv_tables[s->mv_table_index];
code = get_vlc2(&s->gb, mv->vlc.table, MV_VLC_BITS, 2);
if (code < 0){

@ -54,7 +54,7 @@ const uint16_t ff_msmp4_mb_i_table[64][2] = {
};
/* non intra picture macroblock coded block pattern + mb type */
const uint32_t table_mb_non_intra[128][2] = {
const uint32_t ff_table_mb_non_intra[128][2] = {
{ 0x40, 7 },{ 0x13c9, 13 },{ 0x9fd, 12 },{ 0x1fc, 15 },
{ 0x9fc, 12 },{ 0xa83, 18 },{ 0x12d34, 17 },{ 0x83bc, 16 },
{ 0x83a, 12 },{ 0x7f8, 17 },{ 0x3fd, 16 },{ 0x3ff, 16 },
@ -592,15 +592,15 @@ static const int8_t table4_run[168] = {
29, 30, 31, 32, 33, 34, 35, 36,
};
extern const uint16_t inter_vlc[103][2];
extern const int8_t inter_level[102];
extern const int8_t inter_run[102];
extern const uint16_t ff_inter_vlc[103][2];
extern const int8_t ff_inter_level[102];
extern const int8_t ff_inter_run[102];
extern const uint16_t ff_mpeg4_intra_vlc[103][2];
extern const int8_t ff_mpeg4_intra_level[102];
extern const int8_t ff_mpeg4_intra_run[102];
RLTable rl_table[NB_RL_TABLES] = {
RLTable ff_rl_table[NB_RL_TABLES] = {
/* intra luminance tables */
/* low motion */
{
@ -647,9 +647,9 @@ RLTable rl_table[NB_RL_TABLES] = {
{
102,
58,
inter_vlc,
inter_run,
inter_level,
ff_inter_vlc,
ff_inter_run,
ff_inter_level,
},
};
@ -1784,7 +1784,7 @@ static const uint8_t table1_mvy[1099] = {
34, 28, 21,
};
MVTable mv_tables[2] = {
MVTable ff_mv_tables[2] = {
{
1099,
table0_mv_code,
@ -1801,30 +1801,30 @@ MVTable mv_tables[2] = {
}
};
const uint8_t v2_mb_type[8][2] = {
const uint8_t ff_v2_mb_type[8][2] = {
{1, 1}, {0 , 2}, {3 , 3}, {9 , 5},
{5, 4}, {0x21, 7}, {0x20, 7}, {0x11, 6},
};
const uint8_t v2_intra_cbpc[4][2] = {
const uint8_t ff_v2_intra_cbpc[4][2] = {
{1, 1}, {0, 3}, {1, 3}, {1, 2},
};
const uint8_t wmv1_y_dc_scale_table[32]={
const uint8_t ff_wmv1_y_dc_scale_table[32]={
// 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
0, 8, 8, 8, 8, 8, 9, 9,10,10,11,11,12,12,13,13,14,14,15,15,16,16,17,17,18,18,19,19,20,20,21,21
};
const uint8_t wmv1_c_dc_scale_table[32]={
const uint8_t ff_wmv1_c_dc_scale_table[32]={
// 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
0, 8, 8, 8, 8, 9, 9,10,10,11,11,12,12,13,13,14,14,15,15,16,16,17,17,18,18,19,19,20,20,21,21,22
};
const uint8_t old_ff_y_dc_scale_table[32]={
const uint8_t ff_old_ff_y_dc_scale_table[32]={
// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
0, 8, 8, 8, 8,10,12,14,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39
};
const uint8_t wmv1_scantable[WMV1_SCANTABLE_COUNT][64]={
const uint8_t ff_wmv1_scantable[WMV1_SCANTABLE_COUNT][64]={
{
0x00, 0x08, 0x01, 0x02, 0x09, 0x10, 0x18, 0x11,
0x0A, 0x03, 0x04, 0x0B, 0x12, 0x19, 0x20, 0x28,
@ -1867,7 +1867,7 @@ const uint8_t wmv1_scantable[WMV1_SCANTABLE_COUNT][64]={
}
};
const uint8_t table_inter_intra[4][2]={
const uint8_t ff_table_inter_intra[4][2]={
{0,1} /*Luma-Left Chroma-Left*/,
{2,2} /*Luma-Top Chroma-Left*/,
{6,3} /*luma-Left Chroma-Top */,
@ -1979,21 +1979,21 @@ static const uint32_t table_mb_non_intra4[128][2] = {
{0x000011, 5}, {0x0001AC, 9}, {0x0000F3, 8}, {0x000439, 11},
};
const uint32_t (* const wmv2_inter_table[WMV2_INTER_CBP_TABLE_COUNT])[2]={
const uint32_t (* const ff_wmv2_inter_table[WMV2_INTER_CBP_TABLE_COUNT])[2]={
table_mb_non_intra2,
table_mb_non_intra3,
table_mb_non_intra4,
table_mb_non_intra,
ff_table_mb_non_intra,
};
const uint8_t wmv2_scantableA[64]={
const uint8_t ff_wmv2_scantableA[64]={
0x00, 0x01, 0x02, 0x08, 0x03, 0x09, 0x0A, 0x10,
0x04, 0x0B, 0x11, 0x18, 0x12, 0x0C, 0x05, 0x13,
0x19, 0x0D, 0x14, 0x1A, 0x1B, 0x06, 0x15, 0x1C,
0x0E, 0x16, 0x1D, 0x07, 0x1E, 0x0F, 0x17, 0x1F,
};
const uint8_t wmv2_scantableB[64]={
const uint8_t ff_wmv2_scantableB[64]={
0x00, 0x08, 0x01, 0x10, 0x09, 0x18, 0x11, 0x02,
0x20, 0x0A, 0x19, 0x28, 0x12, 0x30, 0x21, 0x1A,
0x38, 0x29, 0x22, 0x03, 0x31, 0x39, 0x0B, 0x2A,

@ -54,23 +54,23 @@ extern const uint16_t ff_msmp4_mb_i_table[64][2];
#define WMV1_SCANTABLE_COUNT 4
extern const uint8_t wmv1_scantable[WMV1_SCANTABLE_COUNT][64];
extern const uint8_t ff_wmv1_scantable[WMV1_SCANTABLE_COUNT][64];
#define NB_RL_TABLES 6
extern RLTable rl_table[NB_RL_TABLES];
extern RLTable ff_rl_table[NB_RL_TABLES];
extern const uint8_t wmv1_y_dc_scale_table[32];
extern const uint8_t wmv1_c_dc_scale_table[32];
extern const uint8_t old_ff_y_dc_scale_table[32];
extern const uint8_t ff_wmv1_y_dc_scale_table[32];
extern const uint8_t ff_wmv1_c_dc_scale_table[32];
extern const uint8_t ff_old_ff_y_dc_scale_table[32];
extern MVTable mv_tables[2];
extern MVTable ff_mv_tables[2];
extern const uint8_t v2_mb_type[8][2];
extern const uint8_t v2_intra_cbpc[4][2];
extern const uint8_t ff_v2_mb_type[8][2];
extern const uint8_t ff_v2_intra_cbpc[4][2];
extern const uint32_t table_mb_non_intra[128][2];
extern const uint8_t table_inter_intra[4][2];
extern const uint32_t ff_table_mb_non_intra[128][2];
extern const uint8_t ff_table_inter_intra[4][2];
extern const uint32_t ff_table0_dc_lum[120][2];
extern const uint32_t ff_table1_dc_lum[120][2];
@ -78,9 +78,9 @@ extern const uint32_t ff_table0_dc_chroma[120][2];
extern const uint32_t ff_table1_dc_chroma[120][2];
#define WMV2_INTER_CBP_TABLE_COUNT 4
extern const uint32_t (* const wmv2_inter_table[WMV2_INTER_CBP_TABLE_COUNT])[2];
extern const uint32_t (* const ff_wmv2_inter_table[WMV2_INTER_CBP_TABLE_COUNT])[2];
extern const uint8_t wmv2_scantableA[64];
extern const uint8_t wmv2_scantableB[64];
extern const uint8_t ff_wmv2_scantableA[64];
extern const uint8_t ff_wmv2_scantableB[64];
#endif /* AVCODEC_MSMPEG4DATA_H */

@ -122,7 +122,7 @@ static av_cold int decode_init(AVCodecContext * avctx) {
av_lfg_init(&s->random_state, 0);
ff_mdct_init(&s->imdct_ctx, 8, 1, 1.0);
dsputil_init(&s->dsp, avctx);
ff_dsputil_init(&s->dsp, avctx);
if (avctx->request_sample_fmt == AV_SAMPLE_FMT_FLT) {
s->scale_bias = 1.0/(32768*8);

@ -148,7 +148,7 @@ static av_cold int encode_init(AVCodecContext *avctx)
avctx->frame_size = NELLY_SAMPLES;
s->avctx = avctx;
ff_mdct_init(&s->mdct_ctx, 8, 0, 32768.0);
dsputil_init(&s->dsp, avctx);
ff_dsputil_init(&s->dsp, avctx);
/* Generate overlap window */
ff_sine_window_init(ff_sine_128, 128);

@ -125,10 +125,10 @@ static int codec_reinit(AVCodecContext *avctx, int width, int height, int qualit
av_log(avctx, AV_LOG_ERROR, "Can't allocate decompression buffer.\n");
return AVERROR(ENOMEM);
}
rtjpeg_decode_init(&c->rtj, &c->dsp, c->width, c->height, c->lq, c->cq);
ff_rtjpeg_decode_init(&c->rtj, &c->dsp, c->width, c->height, c->lq, c->cq);
return 1;
} else if (quality != c->quality)
rtjpeg_decode_init(&c->rtj, &c->dsp, c->width, c->height, c->lq, c->cq);
ff_rtjpeg_decode_init(&c->rtj, &c->dsp, c->width, c->height, c->lq, c->cq);
return 0;
}
@ -160,7 +160,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size,
ret = get_quant(avctx, c, buf, buf_size);
if (ret < 0)
return ret;
rtjpeg_decode_init(&c->rtj, &c->dsp, c->width, c->height, c->lq, c->cq);
ff_rtjpeg_decode_init(&c->rtj, &c->dsp, c->width, c->height, c->lq, c->cq);
return orig_size;
}
@ -238,7 +238,7 @@ retry:
}
case NUV_RTJPEG_IN_LZO:
case NUV_RTJPEG: {
rtjpeg_decode_frame_yuv420(&c->rtj, &c->pic, buf, buf_size);
ff_rtjpeg_decode_frame_yuv420(&c->rtj, &c->pic, buf, buf_size);
break;
}
case NUV_BLACK: {
@ -272,7 +272,7 @@ static av_cold int decode_init(AVCodecContext *avctx) {
c->codec_frameheader = avctx->codec_tag == MKTAG('R', 'J', 'P', 'G');
if (avctx->extradata_size)
get_quant(avctx, c, avctx->extradata, avctx->extradata_size);
dsputil_init(&c->dsp, avctx);
ff_dsputil_init(&c->dsp, avctx);
if (codec_reinit(avctx, avctx->width, avctx->height, -1) < 0)
return 1;
return 0;

@ -410,7 +410,7 @@ static av_cold int png_enc_init(AVCodecContext *avctx){
avcodec_get_frame_defaults(&s->picture);
avctx->coded_frame= &s->picture;
dsputil_init(&s->dsp, avctx);
ff_dsputil_init(&s->dsp, avctx);
s->filter_type = av_clip(avctx->prediction_method, PNG_FILTER_VALUE_NONE, PNG_FILTER_VALUE_MIXED);
if(avctx->pix_fmt == PIX_FMT_MONOBLACK)

@ -609,7 +609,7 @@ static void add_bytes_altivec(uint8_t *dst, uint8_t *src, int w) {
}
/* next one assumes that ((line_size % 16) == 0) */
void put_pixels16_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
void ff_put_pixels16_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
{
register vector unsigned char pixelsv1, pixelsv2;
register vector unsigned char pixelsv1B, pixelsv2B;
@ -651,7 +651,7 @@ void put_pixels16_altivec(uint8_t *block, const uint8_t *pixels, int line_size,
/* next one assumes that ((line_size % 16) == 0) */
#define op_avg(a,b) a = ( ((a)|(b)) - ((((a)^(b))&0xFEFEFEFEUL)>>1) )
void avg_pixels16_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
void ff_avg_pixels16_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
{
register vector unsigned char pixelsv1, pixelsv2, pixelsv, blockv;
register vector unsigned char perm = vec_lvsl(0, pixels);
@ -1371,7 +1371,7 @@ static void avg_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int l
}
}
void dsputil_init_altivec(DSPContext* c, AVCodecContext *avctx)
void ff_dsputil_init_altivec(DSPContext* c, AVCodecContext *avctx)
{
const int high_bit_depth = avctx->bits_per_raw_sample > 8;
@ -1391,10 +1391,10 @@ void dsputil_init_altivec(DSPContext* c, AVCodecContext *avctx)
if (!high_bit_depth) {
c->get_pixels = get_pixels_altivec;
c->clear_block = clear_block_altivec;
c->put_pixels_tab[0][0] = put_pixels16_altivec;
c->put_pixels_tab[0][0] = ff_put_pixels16_altivec;
/* the two functions do the same thing, so use the same code */
c->put_no_rnd_pixels_tab[0][0] = put_pixels16_altivec;
c->avg_pixels_tab[0][0] = avg_pixels16_altivec;
c->put_no_rnd_pixels_tab[0][0] = ff_put_pixels16_altivec;
c->avg_pixels_tab[0][0] = ff_avg_pixels16_altivec;
c->avg_pixels_tab[1][0] = avg_pixels8_altivec;
c->avg_pixels_tab[1][3] = avg_pixels8_xy2_altivec;
c->put_pixels_tab[1][3] = put_pixels8_xy2_altivec;

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save