Merge remote-tracking branch 'qatar/master'

* qatar/master: (71 commits)
  movenc: Allow writing to a non-seekable output if using empty moov
  movenc: Support adding isml (smooth streaming live) metadata
  libavcodec: Don't crash in avcodec_encode_audio if time_base isn't set
  sunrast: Document the different Sun Raster file format types.
  sunrast: Add a check for experimental type.
  libspeexenc: use AVSampleFormat instead of deprecated/removed SampleFormat
  lavf: remove disabled FF_API_SET_PTS_INFO cruft
  lavf: remove disabled FF_API_OLD_INTERRUPT_CB cruft
  lavf: remove disabled FF_API_REORDER_PRIVATE cruft
  lavf: remove disabled FF_API_SEEK_PUBLIC cruft
  lavf: remove disabled FF_API_STREAM_COPY cruft
  lavf: remove disabled FF_API_PRELOAD cruft
  lavf: remove disabled FF_API_NEW_STREAM cruft
  lavf: remove disabled FF_API_RTSP_URL_OPTIONS cruft
  lavf: remove disabled FF_API_MUXRATE cruft
  lavf: remove disabled FF_API_FILESIZE cruft
  lavf: remove disabled FF_API_TIMESTAMP cruft
  lavf: remove disabled FF_API_LOOP_OUTPUT cruft
  lavf: remove disabled FF_API_LOOP_INPUT cruft
  lavf: remove disabled FF_API_AVSTREAM_QUALITY cruft
  ...

Conflicts:
	doc/APIchanges
	libavcodec/8bps.c
	libavcodec/avcodec.h
	libavcodec/libx264.c
	libavcodec/mjpegbdec.c
	libavcodec/options.c
	libavcodec/sunrast.c
	libavcodec/utils.c
	libavcodec/version.h
	libavcodec/x86/h264_deblock.asm
	libavdevice/libdc1394.c
	libavdevice/v4l2.c
	libavformat/avformat.h
	libavformat/avio.c
	libavformat/avio.h
	libavformat/aviobuf.c
	libavformat/dv.c
	libavformat/mov.c
	libavformat/utils.c
	libavformat/version.h
	libavformat/wtv.c
	libavutil/Makefile
	libavutil/file.c
	libswscale/x86/input.asm
	libswscale/x86/swscale_mmx.c
	libswscale/x86/swscale_template.c
	tests/ref/lavf/ffm

Merged-by: Michael Niedermayer <michaelni@gmx.at>
pull/3/merge
Michael Niedermayer 13 years ago
commit e37f161e66
  1. 27
      cmdutils.c
  2. 15
      configure
  3. 4
      doc/APIchanges
  4. 17
      ffmpeg.c
  5. 2
      ffplay.c
  6. 251
      libavcodec/8bps.c
  7. 2
      libavcodec/Makefile
  8. 5
      libavcodec/ac3dec.c
  9. 34
      libavcodec/audioconvert.c
  10. 40
      libavcodec/audioconvert.h
  11. 507
      libavcodec/avcodec.h
  12. 68
      libavcodec/flacenc.c
  13. 2
      libavcodec/frwu.c
  14. 4
      libavcodec/g723_1.c
  15. 2
      libavcodec/h264.c
  16. 56
      libavcodec/imgconvert.c
  17. 2
      libavcodec/libaacplus.c
  18. 3
      libavcodec/libmp3lame.c
  19. 2
      libavcodec/libspeexenc.c
  20. 65
      libavcodec/libx264.c
  21. 36
      libavcodec/libxavs.c
  22. 6
      libavcodec/mjpegbdec.c
  23. 4
      libavcodec/mjpegdec.c
  24. 7
      libavcodec/mpeg12enc.c
  25. 2
      libavcodec/mpeg4videodec.c
  26. 21
      libavcodec/mpegaudiodec.c
  27. 16
      libavcodec/mpegaudiodec_float.c
  28. 62
      libavcodec/mpegvideo_enc.c
  29. 34
      libavcodec/opt.h
  30. 123
      libavcodec/options.c
  31. 21
      libavcodec/sunrast.c
  32. 16
      libavcodec/tiffenc.c
  33. 47
      libavcodec/utils.c
  34. 1
      libavcodec/vc1dec.c
  35. 79
      libavcodec/version.h
  36. 10
      libavcodec/x86/ac3dsp.asm
  37. 4
      libavcodec/x86/dct32_sse.asm
  38. 4
      libavcodec/x86/diracdsp_yasm.asm
  39. 54
      libavcodec/x86/dsputil_yasm.asm
  40. 4
      libavcodec/x86/dsputilenc_yasm.asm
  41. 28
      libavcodec/x86/fft_mmx.asm
  42. 10
      libavcodec/x86/fmtconvert.asm
  43. 16
      libavcodec/x86/h264_chromamc.asm
  44. 4
      libavcodec/x86/h264_chromamc_10bit.asm
  45. 30
      libavcodec/x86/h264_deblock.asm
  46. 12
      libavcodec/x86/h264_deblock_10bit.asm
  47. 60
      libavcodec/x86/h264_idct.asm
  48. 26
      libavcodec/x86/h264_idct_10bit.asm
  49. 20
      libavcodec/x86/h264_intrapred.asm
  50. 26
      libavcodec/x86/h264_intrapred_10bit.asm
  51. 8
      libavcodec/x86/h264_qpel_10bit.asm
  52. 4
      libavcodec/x86/h264_weight.asm
  53. 2
      libavcodec/x86/h264_weight_10bit.asm
  54. 8
      libavcodec/x86/imdct36_sse.asm
  55. 2
      libavcodec/x86/proresdsp.asm
  56. 18
      libavcodec/x86/vp3dsp.asm
  57. 2
      libavcodec/x86/vp56dsp.asm
  58. 3
      libavdevice/alsa-audio-dec.c
  59. 2
      libavdevice/bktr.c
  60. 6
      libavdevice/dshow.c
  61. 2
      libavdevice/dv1394.c
  62. 3
      libavdevice/fbdev.c
  63. 2
      libavdevice/jack_audio.c
  64. 4
      libavdevice/lavfi.c
  65. 2
      libavdevice/libcdio.c
  66. 414
      libavdevice/libdc1394.c
  67. 2
      libavdevice/openal-dec.c
  68. 2
      libavdevice/oss_audio.c
  69. 3
      libavdevice/pulse.c
  70. 3
      libavdevice/sndio_dec.c
  71. 6
      libavdevice/v4l2.c
  72. 2
      libavdevice/vfwcap.c
  73. 2
      libavdevice/x11grab.c
  74. 2
      libavfilter/libmpcodecs/mp_image.h
  75. 3
      libavformat/4xm.c
  76. 3
      libavformat/aacdec.c
  77. 3
      libavformat/act.c
  78. 2
      libavformat/adxdec.c
  79. 3
      libavformat/aea.c
  80. 5
      libavformat/aiffdec.c
  81. 3
      libavformat/amr.c
  82. 3
      libavformat/anm.c
  83. 2
      libavformat/apc.c
  84. 2
      libavformat/ape.c
  85. 2
      libavformat/applehttp.c
  86. 2
      libavformat/asfdec.c
  87. 2
      libavformat/assdec.c
  88. 3
      libavformat/au.c
  89. 391
      libavformat/avformat.h
  90. 4
      libavformat/avidec.c
  91. 132
      libavformat/avio.c
  92. 270
      libavformat/avio.h
  93. 284
      libavformat/aviobuf.c
  94. 2
      libavformat/avisynth.c
  95. 2
      libavformat/avs.c
  96. 3
      libavformat/bethsoftvid.c
  97. 2
      libavformat/bfi.c
  98. 2
      libavformat/bink.c
  99. 45
      libavformat/bintext.c
  100. 2
      libavformat/bit.c
  101. Some files were not shown because too many files have changed in this diff Show More

@ -250,14 +250,12 @@ int parse_option(void *optctx, const char *opt, const char *arg,
if (!po->name && opt[0] == 'n' && opt[1] == 'o') {
/* handle 'no' bool option */
po = find_option(options, opt + 2);
if (!(po->name && (po->flags & OPT_BOOL)))
goto unknown_opt;
bool_val = 0;
if ((po->name && (po->flags & OPT_BOOL)))
bool_val = 0;
}
if (!po->name)
po = find_option(options, "default");
if (!po->name) {
unknown_opt:
av_log(NULL, AV_LOG_ERROR, "Unrecognized option '%s'\n", opt);
return AVERROR(EINVAL);
}
@ -852,21 +850,16 @@ int opt_bsfs(const char *opt, const char *arg)
int opt_protocols(const char *opt, const char *arg)
{
URLProtocol *up=NULL;
void *opaque = NULL;
const char *name;
printf("Supported file protocols:\n"
"I.. = Input supported\n"
".O. = Output supported\n"
"..S = Seek supported\n"
"FLAGS NAME\n"
"----- \n");
while((up = av_protocol_next(up)))
printf("%c%c%c %s\n",
up->url_read ? 'I' : '.',
up->url_write ? 'O' : '.',
up->url_seek ? 'S' : '.',
up->name);
return 0;
"Input:\n");
while ((name = avio_enum_protocols(&opaque, 0)))
printf("%s\n", name);
printf("Output:\n");
while ((name = avio_enum_protocols(&opaque, 1)))
printf("%s\n", name);
}
int opt_filters(const char *opt, const char *arg)

15
configure vendored

@ -563,7 +563,8 @@ print_config_mak(){
}
print_config_asm(){
enabled $1 && echo "%define $2"
enabled $1 && v=1 || v=0
echo "%define $2 $v"
}
print_config(){
@ -1190,6 +1191,8 @@ HAVE_LIST="
isatty
kbhit
ldbrx
libdc1394_1
libdc1394_2
llrint
llrintf
local_aligned_16
@ -3130,7 +3133,6 @@ enabled libass && require_pkg_config libass ass/ass.h ass_library_init
enabled libcelt && require libcelt celt/celt.h celt_decode -lcelt0 &&
{ check_lib celt/celt.h celt_decoder_create_custom -lcelt0 ||
die "ERROR: libcelt version must be >= 0.11.0."; }
enabled libdc1394 && require_pkg_config libdc1394-2 dc1394/dc1394.h dc1394_new
enabled libdirac && require_pkg_config dirac \
"libdirac_decoder/dirac_parser.h libdirac_encoder/dirac_encoder.h" \
"dirac_decoder_init dirac_encoder_init"
@ -3178,6 +3180,15 @@ enabled openssl && { check_lib openssl/ssl.h SSL_library_init -lssl -lcrypto
check_lib openssl/ssl.h SSL_library_init -lssl -lcrypto -lws2_32 -lgdi32 ||
die "ERROR: openssl not found"; }
# libdc1394 check
if enabled libdc1394; then
{ check_lib dc1394/dc1394.h dc1394_new -ldc1394 -lraw1394 &&
enable libdc1394_2; } ||
{ check_lib libdc1394/dc1394_control.h dc1394_create_handle -ldc1394_control -lraw1394 &&
enable libdc1394_1; } ||
die "ERROR: No version of libdc1394 found "
fi
SDL_CONFIG="${cross_prefix}sdl-config"
if check_pkg_config sdl SDL_version.h SDL_Linked_Version; then
check_cpp_condition SDL.h "(SDL_MAJOR_VERSION<<16 | SDL_MINOR_VERSION<<8 | SDL_PATCHLEVEL) >= 0x010201" $sdl_cflags &&

@ -2,10 +2,10 @@ Never assume the API of libav* to be stable unless at least 1 month has passed
since the last major version increase.
The last version increases were:
libavcodec: 2011-04-18
libavcodec: 2012-01-27
libavdevice: 2011-04-18
libavfilter: 2011-04-18
libavformat: 2011-04-18
libavformat: 2012-01-27
libpostproc: 2011-04-18
libswscale: 2011-06-20
libavutil: 2011-04-18

@ -133,8 +133,6 @@ static int video_discard = 0;
static int same_quant = 0;
static int do_deinterlace = 0;
static int intra_dc_precision = 8;
static int loop_input = 0;
static int loop_output = AVFMT_NOOUTPUTLOOP;
static int qp_hist = 0;
static int intra_only = 0;
static const char *video_codec_name = NULL;
@ -3690,14 +3688,6 @@ static int opt_input_file(OptionsContext *o, const char *opt, const char *filena
ic->flags |= AVFMT_FLAG_NONBLOCK;
ic->interrupt_callback = int_cb;
if (loop_input) {
av_log(NULL, AV_LOG_WARNING,
"-loop_input is deprecated, use -loop 1\n"
"Note, both loop options only work with -f image2\n"
);
ic->loop_input = loop_input;
}
/* open the input file with generic avformat function */
err = avformat_open_input(&ic, filename, file_iformat, &format_opts);
if (err < 0) {
@ -4478,11 +4468,6 @@ static void opt_output_file(void *optctx, const char *filename)
}
oc->max_delay = (int)(o->mux_max_delay * AV_TIME_BASE);
if (loop_output >= 0) {
av_log(NULL, AV_LOG_WARNING, "-loop_output is deprecated, use -loop\n");
oc->loop_output = loop_output;
}
/* copy metadata */
for (i = 0; i < o->nb_metadata_map; i++) {
char *p;
@ -4980,8 +4965,6 @@ static const OptionDef options[] = {
{ "hex", OPT_BOOL | OPT_EXPERT, {(void*)&do_hex_dump},
"when dumping packets, also dump the payload" },
{ "re", OPT_BOOL | OPT_EXPERT | OPT_OFFSET, {.off = OFFSET(rate_emu)}, "read input at native frame rate", "" },
{ "loop_input", OPT_BOOL | OPT_EXPERT, {(void*)&loop_input}, "deprecated, use -loop" },
{ "loop_output", HAS_ARG | OPT_INT | OPT_EXPERT, {(void*)&loop_output}, "deprecated, use -loop", "" },
{ "target", HAS_ARG | OPT_FUNC2, {(void*)opt_target}, "specify target file type (\"vcd\", \"svcd\", \"dvd\", \"dv\", \"dv50\", \"pal-vcd\", \"ntsc-svcd\", ...)", "type" },
{ "vsync", HAS_ARG | OPT_EXPERT, {(void*)opt_vsync}, "video sync method", "" },
{ "async", HAS_ARG | OPT_INT | OPT_EXPERT, {(void*)&audio_sync_method}, "audio sync method", "" },

@ -1545,7 +1545,7 @@ static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
AVFilterContext *ctx = codec->opaque;
AVFilterBufferRef *ref;
int perms = AV_PERM_WRITE;
int i, w, h, stride[4];
int i, w, h, stride[AV_NUM_DATA_POINTERS];
unsigned edge;
int pixel_size;

@ -38,20 +38,20 @@
#include "avcodec.h"
static const enum PixelFormat pixfmt_rgb24[] = {PIX_FMT_BGR24, PIX_FMT_RGB32, PIX_FMT_NONE};
static const enum PixelFormat pixfmt_rgb24[] = {
PIX_FMT_BGR24, PIX_FMT_RGB32, PIX_FMT_NONE };
/*
* Decoder context
*/
typedef struct EightBpsContext {
AVCodecContext *avctx;
AVFrame pic;
AVCodecContext *avctx;
AVFrame pic;
unsigned char planes;
unsigned char planemap[4];
unsigned char planes;
unsigned char planemap[4];
uint32_t pal[256];
uint32_t pal[256];
} EightBpsContext;
@ -60,87 +60,90 @@ typedef struct EightBpsContext {
* Decode a frame
*
*/
static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt)
static int decode_frame(AVCodecContext *avctx, void *data,
int *data_size, AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
EightBpsContext * const c = avctx->priv_data;
const unsigned char *encoded = buf;
unsigned char *pixptr, *pixptr_end;
unsigned int height = avctx->height; // Real image height
unsigned int dlen, p, row;
const unsigned char *lp, *dp;
unsigned char count;
unsigned int planes = c->planes;
unsigned char *planemap = c->planemap;
if(c->pic.data[0])
avctx->release_buffer(avctx, &c->pic);
c->pic.reference = 0;
c->pic.buffer_hints = FF_BUFFER_HINTS_VALID;
if(avctx->get_buffer(avctx, &c->pic) < 0){
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1;
}
/* Set data pointer after line lengths */
dp = encoded + planes * (height << 1);
for (p = 0; p < planes; p++) {
/* Lines length pointer for this plane */
lp = encoded + p * (height << 1);
/* Decode a plane */
for(row = 0; row < height; row++) {
pixptr = c->pic.data[0] + row * c->pic.linesize[0] + planemap[p];
pixptr_end = pixptr + c->pic.linesize[0];
dlen = av_be2ne16(*(const unsigned short *)(lp+row*2));
/* Decode a row of this plane */
while(dlen > 0) {
if(dp + 1 >= buf+buf_size) return -1;
if ((count = *dp++) <= 127) {
count++;
dlen -= count + 1;
if (pixptr + count * planes > pixptr_end)
break;
if(dp + count > buf+buf_size) return -1;
while(count--) {
*pixptr = *dp++;
pixptr += planes;
}
} else {
count = 257 - count;
if (pixptr + count * planes > pixptr_end)
break;
while(count--) {
*pixptr = *dp;
pixptr += planes;
}
dp++;
dlen -= 2;
}
}
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
EightBpsContext * const c = avctx->priv_data;
const unsigned char *encoded = buf;
unsigned char *pixptr, *pixptr_end;
unsigned int height = avctx->height; // Real image height
unsigned int dlen, p, row;
const unsigned char *lp, *dp;
unsigned char count;
unsigned int planes = c->planes;
unsigned char *planemap = c->planemap;
if (c->pic.data[0])
avctx->release_buffer(avctx, &c->pic);
c->pic.reference = 0;
c->pic.buffer_hints = FF_BUFFER_HINTS_VALID;
if (avctx->get_buffer(avctx, &c->pic) < 0){
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1;
}
/* Set data pointer after line lengths */
dp = encoded + planes * (height << 1);
for (p = 0; p < planes; p++) {
/* Lines length pointer for this plane */
lp = encoded + p * (height << 1);
/* Decode a plane */
for (row = 0; row < height; row++) {
pixptr = c->pic.data[0] + row * c->pic.linesize[0] + planemap[p];
pixptr_end = pixptr + c->pic.linesize[0];
dlen = av_be2ne16(*(const unsigned short *)(lp + row * 2));
/* Decode a row of this plane */
while (dlen > 0) {
if (dp + 1 >= buf + buf_size)
return -1;
if ((count = *dp++) <= 127) {
count++;
dlen -= count + 1;
if (pixptr + count * planes > pixptr_end)
break;
if (dp + count > buf + buf_size)
return -1;
while (count--) {
*pixptr = *dp++;
pixptr += planes;
}
} else {
count = 257 - count;
if (pixptr + count * planes > pixptr_end)
break;
while (count--) {
*pixptr = *dp;
pixptr += planes;
}
dp++;
dlen -= 2;
}
}
}
if (avctx->bits_per_coded_sample <= 8) {
const uint8_t *pal = av_packet_get_side_data(avpkt,
AV_PKT_DATA_PALETTE,
NULL);
if (pal) {
c->pic.palette_has_changed = 1;
memcpy(c->pal, pal, AVPALETTE_SIZE);
}
memcpy (c->pic.data[1], c->pal, AVPALETTE_SIZE);
}
if (avctx->bits_per_coded_sample <= 8) {
const uint8_t *pal = av_packet_get_side_data(avpkt,
AV_PKT_DATA_PALETTE,
NULL);
if (pal) {
c->pic.palette_has_changed = 1;
memcpy(c->pal, pal, AVPALETTE_SIZE);
}
*data_size = sizeof(AVFrame);
*(AVFrame*)data = c->pic;
memcpy (c->pic.data[1], c->pal, AVPALETTE_SIZE);
}
*data_size = sizeof(AVFrame);
*(AVFrame*)data = c->pic;
/* always report that the buffer was completely consumed */
return buf_size;
/* always report that the buffer was completely consumed */
return buf_size;
}
@ -151,47 +154,47 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
*/
static av_cold int decode_init(AVCodecContext *avctx)
{
EightBpsContext * const c = avctx->priv_data;
c->avctx = avctx;
avcodec_get_frame_defaults(&c->pic);
c->pic.data[0] = NULL;
switch (avctx->bits_per_coded_sample) {
case 8:
avctx->pix_fmt = PIX_FMT_PAL8;
c->planes = 1;
c->planemap[0] = 0; // 1st plane is palette indexes
break;
case 24:
avctx->pix_fmt = avctx->get_format(avctx, pixfmt_rgb24);
c->planes = 3;
c->planemap[0] = 2; // 1st plane is red
c->planemap[1] = 1; // 2nd plane is green
c->planemap[2] = 0; // 3rd plane is blue
break;
case 32:
avctx->pix_fmt = PIX_FMT_RGB32;
c->planes = 4;
EightBpsContext * const c = avctx->priv_data;
c->avctx = avctx;
c->pic.data[0] = NULL;
avcodec_get_frame_defaults(&c->pic);
switch (avctx->bits_per_coded_sample) {
case 8:
avctx->pix_fmt = PIX_FMT_PAL8;
c->planes = 1;
c->planemap[0] = 0; // 1st plane is palette indexes
break;
case 24:
avctx->pix_fmt = avctx->get_format(avctx, pixfmt_rgb24);
c->planes = 3;
c->planemap[0] = 2; // 1st plane is red
c->planemap[1] = 1; // 2nd plane is green
c->planemap[2] = 0; // 3rd plane is blue
break;
case 32:
avctx->pix_fmt = PIX_FMT_RGB32;
c->planes = 4;
#if HAVE_BIGENDIAN
c->planemap[0] = 1; // 1st plane is red
c->planemap[1] = 2; // 2nd plane is green
c->planemap[2] = 3; // 3rd plane is blue
c->planemap[3] = 0; // 4th plane is alpha
c->planemap[0] = 1; // 1st plane is red
c->planemap[1] = 2; // 2nd plane is green
c->planemap[2] = 3; // 3rd plane is blue
c->planemap[3] = 0; // 4th plane is alpha
#else
c->planemap[0] = 2; // 1st plane is red
c->planemap[1] = 1; // 2nd plane is green
c->planemap[2] = 0; // 3rd plane is blue
c->planemap[3] = 3; // 4th plane is alpha
c->planemap[0] = 2; // 1st plane is red
c->planemap[1] = 1; // 2nd plane is green
c->planemap[2] = 0; // 3rd plane is blue
c->planemap[3] = 3; // 4th plane is alpha
#endif
break;
default:
av_log(avctx, AV_LOG_ERROR, "Error: Unsupported color depth: %u.\n", avctx->bits_per_coded_sample);
return -1;
}
return 0;
break;
default:
av_log(avctx, AV_LOG_ERROR, "Error: Unsupported color depth: %u.\n",
avctx->bits_per_coded_sample);
return -1;
}
return 0;
}
@ -204,12 +207,12 @@ static av_cold int decode_init(AVCodecContext *avctx)
*/
static av_cold int decode_end(AVCodecContext *avctx)
{
EightBpsContext * const c = avctx->priv_data;
EightBpsContext * const c = avctx->priv_data;
if (c->pic.data[0])
avctx->release_buffer(avctx, &c->pic);
if (c->pic.data[0])
avctx->release_buffer(avctx, &c->pic);
return 0;
return 0;
}

@ -3,7 +3,7 @@ include $(SUBDIR)../config.mak
NAME = avcodec
FFLIBS = avutil
HEADERS = avcodec.h avfft.h dxva2.h opt.h vaapi.h vda.h vdpau.h version.h xvmc.h
HEADERS = avcodec.h avfft.h dxva2.h vaapi.h vda.h vdpau.h version.h xvmc.h
OBJS = allcodecs.o \
audioconvert.o \

@ -162,11 +162,6 @@ static av_cold int ac3_decode_init(AVCodecContext *avctx)
AC3DecodeContext *s = avctx->priv_data;
s->avctx = avctx;
#if FF_API_DRC_SCALE
if (avctx->drc_scale)
s->drc_scale = avctx->drc_scale;
#endif
ff_ac3_common_init();
ac3_tables_init();
ff_mdct_init(&s->imdct_256, 8, 1, 1.0);

@ -31,23 +31,6 @@
#include "avcodec.h"
#include "audioconvert.h"
#if FF_API_OLD_SAMPLE_FMT
const char *avcodec_get_sample_fmt_name(int sample_fmt)
{
return av_get_sample_fmt_name(sample_fmt);
}
enum AVSampleFormat avcodec_get_sample_fmt(const char* name)
{
return av_get_sample_fmt(name);
}
void avcodec_sample_fmt_string (char *buf, int buf_size, int sample_fmt)
{
av_get_sample_fmt_string(buf, buf_size, sample_fmt);
}
#endif
uint64_t avcodec_guess_channel_layout(int nb_channels, enum CodecID codec_id, const char *fmt_name)
{
switch(nb_channels) {
@ -62,23 +45,6 @@ uint64_t avcodec_guess_channel_layout(int nb_channels, enum CodecID codec_id, co
}
}
#if FF_API_OLD_AUDIOCONVERT
int64_t avcodec_get_channel_layout(const char *name)
{
return av_get_channel_layout(name);
}
void avcodec_get_channel_layout_string(char *buf, int buf_size, int nb_channels, int64_t channel_layout)
{
av_get_channel_layout_string(buf, buf_size, nb_channels, channel_layout);
}
int avcodec_channel_layout_num_channels(int64_t channel_layout)
{
return av_get_channel_layout_nb_channels(channel_layout);
}
#endif
struct AVAudioConvert {
int in_channels, out_channels;
int fmt_pair;

@ -33,46 +33,6 @@
#include "avcodec.h"
#include "libavutil/audioconvert.h"
#if FF_API_OLD_SAMPLE_FMT
/**
* @deprecated Use av_get_sample_fmt_string() instead.
*/
attribute_deprecated
void avcodec_sample_fmt_string(char *buf, int buf_size, int sample_fmt);
/**
* @deprecated Use av_get_sample_fmt_name() instead.
*/
attribute_deprecated
const char *avcodec_get_sample_fmt_name(int sample_fmt);
/**
* @deprecated Use av_get_sample_fmt() instead.
*/
attribute_deprecated
enum AVSampleFormat avcodec_get_sample_fmt(const char* name);
#endif
#if FF_API_OLD_AUDIOCONVERT
/**
* @deprecated Use av_get_channel_layout() instead.
*/
attribute_deprecated
int64_t avcodec_get_channel_layout(const char *name);
/**
* @deprecated Use av_get_channel_layout_string() instead.
*/
attribute_deprecated
void avcodec_get_channel_layout_string(char *buf, int buf_size, int nb_channels, int64_t channel_layout);
/**
* @deprecated Use av_get_channel_layout_nb_channels() instead.
*/
attribute_deprecated
int avcodec_channel_layout_num_channels(int64_t channel_layout);
#endif
/**
* Guess the channel layout
* @param nb_channels

@ -34,6 +34,7 @@
#include "libavutil/log.h"
#include "libavutil/pixfmt.h"
#include "libavutil/rational.h"
#include "libavutil/audioconvert.h"
#include "libavcodec/version.h"
/**
@ -355,10 +356,6 @@ enum CodecID {
CODEC_ID_MACE3,
CODEC_ID_MACE6,
CODEC_ID_VMDAUDIO,
#if LIBAVCODEC_VERSION_MAJOR == 53
CODEC_ID_SONIC,
CODEC_ID_SONIC_LS,
#endif
CODEC_ID_FLAC,
CODEC_ID_MP3ADU,
CODEC_ID_MP3ON4,
@ -411,6 +408,8 @@ enum CodecID {
CODEC_ID_G723_1= 0x15801,
CODEC_ID_FFWAVESYNTH = MKBETAG('F','F','W','S'),
CODEC_ID_8SVX_RAW = MKBETAG('8','S','V','X'),
CODEC_ID_SONIC = MKBETAG('S','O','N','C'),
CODEC_ID_SONIC_LS = MKBETAG('S','O','N','L'),
/* subtitle codecs */
CODEC_ID_FIRST_SUBTITLE = 0x17000, ///< A dummy ID pointing at the start of subtitle codecs.
@ -441,66 +440,6 @@ enum CodecID {
CODEC_ID_FFMETADATA = 0x21000, ///< Dummy codec for streams containing only metadata information.
};
#if FF_API_OLD_SAMPLE_FMT
#define SampleFormat AVSampleFormat
#define SAMPLE_FMT_NONE AV_SAMPLE_FMT_NONE
#define SAMPLE_FMT_U8 AV_SAMPLE_FMT_U8
#define SAMPLE_FMT_S16 AV_SAMPLE_FMT_S16
#define SAMPLE_FMT_S32 AV_SAMPLE_FMT_S32
#define SAMPLE_FMT_FLT AV_SAMPLE_FMT_FLT
#define SAMPLE_FMT_DBL AV_SAMPLE_FMT_DBL
#define SAMPLE_FMT_NB AV_SAMPLE_FMT_NB
#endif
#if FF_API_OLD_AUDIOCONVERT
#include "libavutil/audioconvert.h"
/* Audio channel masks */
#define CH_FRONT_LEFT AV_CH_FRONT_LEFT
#define CH_FRONT_RIGHT AV_CH_FRONT_RIGHT
#define CH_FRONT_CENTER AV_CH_FRONT_CENTER
#define CH_LOW_FREQUENCY AV_CH_LOW_FREQUENCY
#define CH_BACK_LEFT AV_CH_BACK_LEFT
#define CH_BACK_RIGHT AV_CH_BACK_RIGHT
#define CH_FRONT_LEFT_OF_CENTER AV_CH_FRONT_LEFT_OF_CENTER
#define CH_FRONT_RIGHT_OF_CENTER AV_CH_FRONT_RIGHT_OF_CENTER
#define CH_BACK_CENTER AV_CH_BACK_CENTER
#define CH_SIDE_LEFT AV_CH_SIDE_LEFT
#define CH_SIDE_RIGHT AV_CH_SIDE_RIGHT
#define CH_TOP_CENTER AV_CH_TOP_CENTER
#define CH_TOP_FRONT_LEFT AV_CH_TOP_FRONT_LEFT
#define CH_TOP_FRONT_CENTER AV_CH_TOP_FRONT_CENTER
#define CH_TOP_FRONT_RIGHT AV_CH_TOP_FRONT_RIGHT
#define CH_TOP_BACK_LEFT AV_CH_TOP_BACK_LEFT
#define CH_TOP_BACK_CENTER AV_CH_TOP_BACK_CENTER
#define CH_TOP_BACK_RIGHT AV_CH_TOP_BACK_RIGHT
#define CH_STEREO_LEFT AV_CH_STEREO_LEFT
#define CH_STEREO_RIGHT AV_CH_STEREO_RIGHT
/** Channel mask value used for AVCodecContext.request_channel_layout
to indicate that the user requests the channel order of the decoder output
to be the native codec channel order. */
#define CH_LAYOUT_NATIVE AV_CH_LAYOUT_NATIVE
/* Audio channel convenience macros */
#define CH_LAYOUT_MONO AV_CH_LAYOUT_MONO
#define CH_LAYOUT_STEREO AV_CH_LAYOUT_STEREO
#define CH_LAYOUT_2_1 AV_CH_LAYOUT_2_1
#define CH_LAYOUT_SURROUND AV_CH_LAYOUT_SURROUND
#define CH_LAYOUT_4POINT0 AV_CH_LAYOUT_4POINT0
#define CH_LAYOUT_2_2 AV_CH_LAYOUT_2_2
#define CH_LAYOUT_QUAD AV_CH_LAYOUT_QUAD
#define CH_LAYOUT_5POINT0 AV_CH_LAYOUT_5POINT0
#define CH_LAYOUT_5POINT1 AV_CH_LAYOUT_5POINT1
#define CH_LAYOUT_5POINT0_BACK AV_CH_LAYOUT_5POINT0_BACK
#define CH_LAYOUT_5POINT1_BACK AV_CH_LAYOUT_5POINT1_BACK
#define CH_LAYOUT_7POINT0 AV_CH_LAYOUT_7POINT0
#define CH_LAYOUT_7POINT1 AV_CH_LAYOUT_7POINT1
#define CH_LAYOUT_7POINT1_WIDE AV_CH_LAYOUT_7POINT1_WIDE
#define CH_LAYOUT_STEREO_DOWNMIX AV_CH_LAYOUT_STEREO_DOWNMIX
#endif
#if FF_API_OLD_DECODE_AUDIO
/* in bytes */
#define AVCODEC_MAX_AUDIO_FRAME_SIZE 192000 // 1 second of 48khz 32bit audio
@ -604,20 +543,6 @@ enum AVChromaLocation{
AVCHROMA_LOC_NB , ///< Not part of ABI
};
#if FF_API_FLAC_GLOBAL_OPTS
/**
* LPC analysis type
*/
enum AVLPCType {
AV_LPC_TYPE_DEFAULT = -1, ///< use the codec default LPC type
AV_LPC_TYPE_NONE = 0, ///< do not use LPC prediction or use all zero coefficients
AV_LPC_TYPE_FIXED = 1, ///< fixed LPC coefficients
AV_LPC_TYPE_LEVINSON = 2, ///< Levinson-Durbin recursion
AV_LPC_TYPE_CHOLESKY = 3, ///< Cholesky factorization
AV_LPC_TYPE_NB , ///< Not part of ABI
};
#endif
enum AVAudioServiceType {
AV_AUDIO_SERVICE_TYPE_MAIN = 0,
AV_AUDIO_SERVICE_TYPE_EFFECTS = 1,
@ -679,51 +604,10 @@ typedef struct RcOverride{
#define CODEC_FLAG2_STRICT_GOP 0x00000002 ///< Strictly enforce GOP size.
#define CODEC_FLAG2_NO_OUTPUT 0x00000004 ///< Skip bitstream encoding.
#define CODEC_FLAG2_LOCAL_HEADER 0x00000008 ///< Place global headers at every keyframe instead of in extradata.
#define CODEC_FLAG2_DROP_FRAME_TIMECODE 0x00002000 ///< timecode is in drop frame format. DEPRECATED!!!!
#define CODEC_FLAG2_SKIP_RD 0x00004000 ///< RD optimal MB level residual skipping
#define CODEC_FLAG2_CHUNKS 0x00008000 ///< Input bitstream might be truncated at a packet boundaries instead of only at frame boundaries.
#define CODEC_FLAG2_SHOW_ALL 0x00400000 ///< Show all frames before the first keyframe
/**
* @defgroup deprecated_flags Deprecated codec flags
* Use corresponding private codec options instead.
* @{
*/
#if FF_API_MPEGVIDEO_GLOBAL_OPTS
#define CODEC_FLAG_OBMC 0x00000001 ///< OBMC
#define CODEC_FLAG_H263P_AIV 0x00000008 ///< H.263 alternative inter VLC
#define CODEC_FLAG_PART 0x0080 ///< Use data partitioning.
#define CODEC_FLAG_ALT_SCAN 0x00100000 ///< Use alternate scan.
#define CODEC_FLAG_H263P_UMV 0x02000000 ///< unlimited motion vector
#define CODEC_FLAG_H263P_SLICE_STRUCT 0x10000000
#define CODEC_FLAG_SVCD_SCAN_OFFSET 0x40000000 ///< Will reserve space for SVCD scan offset user data.
#define CODEC_FLAG2_INTRA_VLC 0x00000800 ///< Use MPEG-2 intra VLC table.
#define CODEC_FLAG2_DROP_FRAME_TIMECODE 0x00002000 ///< timecode is in drop frame format.
#define CODEC_FLAG2_NON_LINEAR_QUANT 0x00010000 ///< Use MPEG-2 nonlinear quantizer.
#endif
#if FF_API_MJPEG_GLOBAL_OPTS
#define CODEC_FLAG_EXTERN_HUFF 0x1000 ///< Use external Huffman table (for MJPEG).
#endif
#if FF_API_X264_GLOBAL_OPTS
#define CODEC_FLAG2_BPYRAMID 0x00000010 ///< H.264 allow B-frames to be used as references.
#define CODEC_FLAG2_WPRED 0x00000020 ///< H.264 weighted biprediction for B-frames
#define CODEC_FLAG2_MIXED_REFS 0x00000040 ///< H.264 one reference per partition, as opposed to one reference per macroblock
#define CODEC_FLAG2_8X8DCT 0x00000080 ///< H.264 high profile 8x8 transform
#define CODEC_FLAG2_FASTPSKIP 0x00000100 ///< H.264 fast pskip
#define CODEC_FLAG2_AUD 0x00000200 ///< H.264 access unit delimiters
#define CODEC_FLAG2_BRDO 0x00000400 ///< B-frame rate-distortion optimization
#define CODEC_FLAG2_MBTREE 0x00040000 ///< Use macroblock tree ratecontrol (x264 only)
#define CODEC_FLAG2_PSY 0x00080000 ///< Use psycho visual optimizations.
#define CODEC_FLAG2_SSIM 0x00100000 ///< Compute SSIM during encoding, error[] values are undefined.
#define CODEC_FLAG2_INTRA_REFRESH 0x00200000 ///< Use periodic insertion of intra blocks instead of keyframes.
#endif
#if FF_API_SNOW_GLOBAL_OPTS
#define CODEC_FLAG2_MEMC_ONLY 0x00001000 ///< Only do ME/MC (I frames -> ref, P frame -> ME+MC).
#endif
#if FF_API_LAME_GLOBAL_OPTS
#define CODEC_FLAG2_BIT_RESERVOIR 0x00020000 ///< Use a bit reservoir when encoding if possible
#endif
/**
* @}
*/
/* Unsupported options :
* Syntax Arithmetic coding (SAC)
@ -739,10 +623,6 @@ typedef struct RcOverride{
* assume the buffer was allocated by avcodec_default_get_buffer.
*/
#define CODEC_CAP_DR1 0x0002
#if FF_API_PARSE_FRAME
/* If 'parse_only' field is true, then avcodec_parse_frame() can be used. */
#define CODEC_CAP_PARSE_ONLY 0x0004
#endif
#define CODEC_CAP_TRUNCATED 0x0008
/* Codec can export data for HW decoding (XvMC). */
#define CODEC_CAP_HWACCEL 0x0010
@ -894,17 +774,6 @@ typedef struct AVPanScan{
#define FF_BUFFER_TYPE_SHARED 4 ///< Buffer from somewhere else; don't deallocate image (data/base), all other tables are not shared.
#define FF_BUFFER_TYPE_COPY 8 ///< Just a (modified) copy of some other buffer, don't deallocate anything.
#if FF_API_OLD_FF_PICT_TYPES
/* DEPRECATED, directly use the AV_PICTURE_TYPE_* enum values */
#define FF_I_TYPE AV_PICTURE_TYPE_I ///< Intra
#define FF_P_TYPE AV_PICTURE_TYPE_P ///< Predicted
#define FF_B_TYPE AV_PICTURE_TYPE_B ///< Bi-dir predicted
#define FF_S_TYPE AV_PICTURE_TYPE_S ///< S(GMC)-VOP MPEG4
#define FF_SI_TYPE AV_PICTURE_TYPE_SI ///< Switching Intra
#define FF_SP_TYPE AV_PICTURE_TYPE_SP ///< Switching Predicted
#define FF_BI_TYPE AV_PICTURE_TYPE_BI
#endif
#define FF_BUFFER_HINTS_VALID 0x01 // Buffer hints value is meaningful (if 0 ignore).
#define FF_BUFFER_HINTS_READABLE 0x02 // Codec will read from buffer.
#define FF_BUFFER_HINTS_PRESERVE 0x04 // User must not alter buffer content.
@ -1014,11 +883,7 @@ enum AVSideDataParamChangeFlags {
* sizeof(AVFrame) must not be used outside libavcodec.
*/
typedef struct AVFrame {
#if FF_API_DATA_POINTERS
#define AV_NUM_DATA_POINTERS 4
#else
#define AV_NUM_DATA_POINTERS 8
#endif
/**
* pointer to the picture/channel planes.
* This might be different from the first allocated byte
@ -1087,13 +952,6 @@ typedef struct AVFrame {
*/
int quality;
#if FF_API_AVFRAME_AGE
/**
* @deprecated unused
*/
attribute_deprecated int age;
#endif
/**
* is this picture used as reference
* The values for this are the same as the MpegEncContext.picture_structure
@ -1707,21 +1565,6 @@ typedef struct AVCodecContext {
*/
float b_quant_offset;
#if FF_API_ER
/**
* Error recognition; higher values will detect more errors but may
* misdetect some more or less valid parts as errors.
* - encoding: unused
* - decoding: Set by user.
*/
attribute_deprecated int error_recognition;
#define FF_ER_CAREFUL 1
#define FF_ER_COMPLIANT 2
#define FF_ER_AGGRESSIVE 3
#define FF_ER_VERY_AGGRESSIVE 4
#define FF_ER_EXPLODE 5
#endif /* FF_API_ER */
/**
* Called at the beginning of each frame to get a buffer for it.
*
@ -1804,16 +1647,6 @@ typedef struct AVCodecContext {
*/
int block_align;
#if FF_API_PARSE_FRAME
/**
* If true, only parsing is done. The frame data is returned.
* Only MPEG audio decoders support this now.
* - encoding: unused
* - decoding: Set by user
*/
attribute_deprecated int parse_only;
#endif
/**
* 0-> h263 quant 1-> mpeg quant
* - encoding: Set by user.
@ -2237,22 +2070,6 @@ typedef struct AVCodecContext {
*/
int color_table_id;
#if FF_API_INTERNAL_CONTEXT
/**
* internal_buffer count
* Don't touch, used by libavcodec default_get_buffer().
* @deprecated this field was moved to an internal context
*/
attribute_deprecated int internal_buffer_count;
/**
* internal_buffers
* Don't touch, used by libavcodec default_get_buffer().
* @deprecated this field was moved to an internal context
*/
attribute_deprecated void *internal_buffer;
#endif
/**
* Global quality for codecs which cannot change it per frame.
* This should be proportional to MPEG-1/2/4 qscale.
@ -2359,15 +2176,6 @@ typedef struct AVCodecContext {
*/
int lmax;
#if FF_API_PALETTE_CONTROL
/**
* palette control structure
* - encoding: ??? (no palette-enabled encoder yet)
* - decoding: Set by user.
*/
struct AVPaletteControl *palctrl;
#endif
/**
* noise reduction strength
* - encoding: Set by user.
@ -2417,19 +2225,6 @@ typedef struct AVCodecContext {
*/
int error_rate;
#if FF_API_ANTIALIAS_ALGO
/**
* MP3 antialias algorithm, see FF_AA_* below.
* - encoding: unused
* - decoding: Set by user.
*/
attribute_deprecated int antialias_algo;
#define FF_AA_AUTO 0
#define FF_AA_FASTINT 1 //not implemented yet
#define FF_AA_INT 2
#define FF_AA_FLOAT 3
#endif
/**
* quantizer noise shaping
* - encoding: Set by user.
@ -2688,24 +2483,6 @@ typedef struct AVCodecContext {
*/
int brd_scale;
#if FF_API_X264_GLOBAL_OPTS
/**
* constant rate factor - quality-based VBR - values ~correspond to qps
* - encoding: Set by user.
* - decoding: unused
* @deprecated use 'crf' libx264 private option
*/
attribute_deprecated float crf;
/**
* constant quantization parameter rate control method
* - encoding: Set by user.
* - decoding: unused
* @deprecated use 'cqp' libx264 private option
*/
attribute_deprecated int cqp;
#endif
/**
* minimum GOP size
* - encoding: Set by user.
@ -2727,15 +2504,6 @@ typedef struct AVCodecContext {
*/
int chromaoffset;
#if FF_API_X264_GLOBAL_OPTS
/**
* Influence how often B-frames are used.
* - encoding: Set by user.
* - decoding: unused
*/
attribute_deprecated int bframebias;
#endif
/**
* trellis RD quantization
* - encoding: Set by user.
@ -2743,50 +2511,6 @@ typedef struct AVCodecContext {
*/
int trellis;
#if FF_API_X264_GLOBAL_OPTS
/**
* Reduce fluctuations in qp (before curve compression).
* - encoding: Set by user.
* - decoding: unused
*/
attribute_deprecated float complexityblur;
/**
* in-loop deblocking filter alphac0 parameter
* alpha is in the range -6...6
* - encoding: Set by user.
* - decoding: unused
*/
attribute_deprecated int deblockalpha;
/**
* in-loop deblocking filter beta parameter
* beta is in the range -6...6
* - encoding: Set by user.
* - decoding: unused
*/
attribute_deprecated int deblockbeta;
/**
* macroblock subpartition sizes to consider - p8x8, p4x4, b8x8, i8x8, i4x4
* - encoding: Set by user.
* - decoding: unused
*/
attribute_deprecated int partitions;
#define X264_PART_I4X4 0x001 /* Analyze i4x4 */
#define X264_PART_I8X8 0x002 /* Analyze i8x8 (requires 8x8 transform) */
#define X264_PART_P8X8 0x010 /* Analyze p16x8, p8x16 and p8x8 */
#define X264_PART_P4X4 0x020 /* Analyze p8x4, p4x8, p4x4 */
#define X264_PART_B8X8 0x100 /* Analyze b16x8, b8x16 and b8x8 */
/**
* direct MV prediction mode - 0 (none), 1 (spatial), 2 (temporal), 3 (auto)
* - encoding: Set by user.
* - decoding: unused
*/
attribute_deprecated int directpred;
#endif
/**
* Audio cutoff bandwidth (0 means "automatic")
* - encoding: Set by user.
@ -2835,43 +2559,6 @@ typedef struct AVCodecContext {
*/
int max_prediction_order;
#if FF_API_FLAC_GLOBAL_OPTS
/**
* @name FLAC options
* @deprecated Use FLAC encoder private options instead.
* @{
*/
/**
* LPC coefficient precision - used by FLAC encoder
* - encoding: Set by user.
* - decoding: unused
*/
attribute_deprecated int lpc_coeff_precision;
/**
* search method for selecting prediction order
* - encoding: Set by user.
* - decoding: unused
*/
attribute_deprecated int prediction_order_method;
/**
* - encoding: Set by user.
* - decoding: unused
*/
attribute_deprecated int min_partition_order;
/**
* - encoding: Set by user.
* - decoding: unused
*/
attribute_deprecated int max_partition_order;
/**
* @}
*/
#endif
/**
* GOP timecode frame start number
* - encoding: Set by user, in non drop frame format
@ -2889,17 +2576,6 @@ typedef struct AVCodecContext {
int request_channels;
#endif
#if FF_API_DRC_SCALE
/**
* Percentage of dynamic range compression to be applied by the decoder.
* The default value is 1.0, corresponding to full compression.
* - encoding: unused
* - decoding: Set by user.
* @deprecated use AC3 decoder private option instead.
*/
attribute_deprecated float drc_scale;
#endif
/**
* opaque 64bit number (generally a PTS) that will be reordered and
* output in AVFrame.reordered_opaque
@ -3027,87 +2703,8 @@ typedef struct AVCodecContext {
*/
int (*execute2)(struct AVCodecContext *c, int (*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count);
#if FF_API_X264_GLOBAL_OPTS
/**
* explicit P-frame weighted prediction analysis method
* 0: off
* 1: fast blind weighting (one reference duplicate with -1 offset)
* 2: smart weighting (full fade detection analysis)
* - encoding: Set by user.
* - decoding: unused
*/
attribute_deprecated int weighted_p_pred;
/**
* AQ mode
* 0: Disabled
* 1: Variance AQ (complexity mask)
* 2: Auto-variance AQ (experimental)
* - encoding: Set by user
* - decoding: unused
*/
attribute_deprecated int aq_mode;
/**
* AQ strength
* Reduces blocking and blurring in flat and textured areas.
* - encoding: Set by user
* - decoding: unused
*/
attribute_deprecated float aq_strength;
/**
* PSY RD
* Strength of psychovisual optimization
* - encoding: Set by user
* - decoding: unused
*/
attribute_deprecated float psy_rd;
/**
* PSY trellis
* Strength of psychovisual optimization
* - encoding: Set by user
* - decoding: unused
*/
attribute_deprecated float psy_trellis;
/**
* RC lookahead
* Number of frames for frametype and ratecontrol lookahead
* - encoding: Set by user
* - decoding: unused
*/
attribute_deprecated int rc_lookahead;
/**
* Constant rate factor maximum
* With CRF encoding mode and VBV restrictions enabled, prevents quality from being worse
* than crf_max, even if doing so would violate VBV restrictions.
* - encoding: Set by user.
* - decoding: unused
*/
attribute_deprecated float crf_max;
#endif
int log_level_offset;
#if FF_API_FLAC_GLOBAL_OPTS
/**
* Determine which LPC analysis algorithm to use.
* - encoding: Set by user
* - decoding: unused
*/
attribute_deprecated enum AVLPCType lpc_type;
/**
* Number of passes to use for Cholesky factorization during LPC analysis
* - encoding: Set by user
* - decoding: unused
*/
attribute_deprecated int lpc_passes;
#endif
/**
* Number of slices.
* Indicates number of picture subdivisions. Used for parallelized
@ -3137,19 +2734,6 @@ typedef struct AVCodecContext {
*/
AVPacket *pkt;
#if FF_API_INTERNAL_CONTEXT
/**
* Whether this is a copy of the context which had init() called on it.
* This is used by multithreading - shared tables and picture pointers
* should be freed from the original context only.
* - encoding: Set by libavcodec.
* - decoding: Set by libavcodec.
*
* @deprecated this field has been moved to an internal context
*/
attribute_deprecated int is_copy;
#endif
/**
* Which multithreading methods to use.
* Use of FF_THREAD_FRAME will increase decoding delay by one frame per thread,
@ -3441,29 +3025,6 @@ typedef struct AVPicture {
#define AVPALETTE_SIZE 1024
#define AVPALETTE_COUNT 256
#if FF_API_PALETTE_CONTROL
/**
* AVPaletteControl
* This structure defines a method for communicating palette changes
* between and demuxer and a decoder.
*
* @deprecated Use AVPacket to send palette changes instead.
* This is totally broken.
*/
typedef struct AVPaletteControl {
/* Demuxer sets this to 1 to indicate the palette has changed;
* decoder resets to 0. */
int palette_changed;
/* 4-byte ARGB palette entries, stored in native byte order; note that
* the individual palette components should be on a 8-bit scale; if
* the palette data comes from an IBM VGA native format, the component
* data is probably 6 bits in size and needs to be scaled. */
unsigned int palette[AVPALETTE_COUNT];
} AVPaletteControl attribute_deprecated;
#endif
enum AVSubtitleType {
SUBTITLE_NONE,
@ -3764,17 +3325,6 @@ void avcodec_get_chroma_sub_sample(enum PixelFormat pix_fmt, int *h_shift, int *
*/
const char *avcodec_get_name(enum CodecID id);
#if FF_API_GET_PIX_FMT_NAME
/**
* Return the short name for a pixel format.
*
* \see av_get_pix_fmt(), av_get_pix_fmt_string().
* @deprecated Deprecated in favor of av_get_pix_fmt_name().
*/
attribute_deprecated
const char *avcodec_get_pix_fmt_name(enum PixelFormat pix_fmt);
#endif
void avcodec_set_dimensions(AVCodecContext *s, int width, int height);
/**
@ -3881,18 +3431,6 @@ enum PixelFormat avcodec_find_best_pix_fmt(int64_t pix_fmt_mask, enum PixelForma
enum PixelFormat avcodec_find_best_pix_fmt2(enum PixelFormat dst_pix_fmt1, enum PixelFormat dst_pix_fmt2,
enum PixelFormat src_pix_fmt, int has_alpha, int *loss_ptr);
#if FF_API_GET_ALPHA_INFO
#define FF_ALPHA_TRANSP 0x0001 /* image has some totally transparent pixels */
#define FF_ALPHA_SEMI_TRANSP 0x0002 /* image has some transparent pixels */
/**
* Tell if an image really has transparent alpha values.
* @return ored mask of FF_ALPHA_xxx constants
*/
attribute_deprecated
int img_get_alpha_info(const AVPicture *src,
enum PixelFormat pix_fmt, int width, int height);
#endif
/* deinterlace a picture */
/* deinterlace - if not supported return -1 */
@ -3923,15 +3461,6 @@ const char *avcodec_configuration(void);
*/
const char *avcodec_license(void);
#if FF_API_AVCODEC_INIT
/**
* @deprecated this function is called automatically from avcodec_register()
* and avcodec_register_all(), there is no need to call it manually
*/
attribute_deprecated
void avcodec_init(void);
#endif
/**
* Register the codec codec and initialize libavcodec.
*
@ -4108,14 +3637,6 @@ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
enum PixelFormat avcodec_default_get_format(struct AVCodecContext *s, const enum PixelFormat * fmt);
#if FF_API_THREAD_INIT
/**
* @deprecated Set s->thread_count before calling avcodec_open2() instead of calling this.
*/
attribute_deprecated
int avcodec_thread_init(AVCodecContext *s, int thread_count);
#endif
int avcodec_default_execute(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2),void *arg, int *ret, int count, int size);
int avcodec_default_execute2(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2, int, int),void *arg, int *ret, int count);
//FIXME func typedef
@ -4486,18 +4007,6 @@ void avcodec_default_free_buffers(AVCodecContext *s);
/* misc useful functions */
#if FF_API_OLD_FF_PICT_TYPES
/**
* Return a single letter to describe the given picture type pict_type.
*
* @param[in] pict_type the picture type
* @return A single character representing the picture type.
* @deprecated Use av_get_picture_type_char() instead.
*/
attribute_deprecated
char av_get_pict_type_char(int pict_type);
#endif
/**
* Return codec bits per sample.
*
@ -4506,14 +4015,6 @@ char av_get_pict_type_char(int pict_type);
*/
int av_get_bits_per_sample(enum CodecID codec_id);
#if FF_API_OLD_SAMPLE_FMT
/**
* @deprecated Use av_get_bytes_per_sample() instead.
*/
attribute_deprecated
int av_get_bits_per_sample_format(enum AVSampleFormat sample_fmt);
#endif
/* frame parsing */
typedef struct AVCodecParserContext {
void *priv_data;

@ -295,29 +295,6 @@ static av_cold int flac_encode_init(AVCodecContext *avctx)
if (s->options.max_partition_order < 0)
s->options.max_partition_order = ((int[]){ 2, 2, 3, 3, 3, 8, 8, 8, 8, 8, 8, 8, 8})[level];
/* set compression option overrides from AVCodecContext */
#if FF_API_FLAC_GLOBAL_OPTS
if (avctx->lpc_type > FF_LPC_TYPE_DEFAULT) {
if (avctx->lpc_type > FF_LPC_TYPE_CHOLESKY) {
av_log(avctx, AV_LOG_ERROR, "unknown lpc type: %d\n", avctx->lpc_type);
return -1;
}
s->options.lpc_type = avctx->lpc_type;
if (s->options.lpc_type == FF_LPC_TYPE_CHOLESKY) {
if (avctx->lpc_passes < 0) {
// default number of passes for Cholesky
s->options.lpc_passes = 2;
} else if (avctx->lpc_passes == 0) {
av_log(avctx, AV_LOG_ERROR, "invalid number of lpc passes: %d\n",
avctx->lpc_passes);
return -1;
} else {
s->options.lpc_passes = avctx->lpc_passes;
}
}
}
#endif
if (s->options.lpc_type == FF_LPC_TYPE_NONE) {
s->options.min_prediction_order = 0;
} else if (avctx->min_prediction_order >= 0) {
@ -358,39 +335,6 @@ static av_cold int flac_encode_init(AVCodecContext *avctx)
return -1;
}
#if FF_API_FLAC_GLOBAL_OPTS
if (avctx->prediction_order_method >= 0) {
if (avctx->prediction_order_method > ORDER_METHOD_LOG) {
av_log(avctx, AV_LOG_ERROR, "invalid prediction order method: %d\n",
avctx->prediction_order_method);
return -1;
}
s->options.prediction_order_method = avctx->prediction_order_method;
}
if (avctx->min_partition_order >= 0) {
if (avctx->min_partition_order > MAX_PARTITION_ORDER) {
av_log(avctx, AV_LOG_ERROR, "invalid min partition order: %d\n",
avctx->min_partition_order);
return -1;
}
s->options.min_partition_order = avctx->min_partition_order;
}
if (avctx->max_partition_order >= 0) {
if (avctx->max_partition_order > MAX_PARTITION_ORDER) {
av_log(avctx, AV_LOG_ERROR, "invalid max partition order: %d\n",
avctx->max_partition_order);
return -1;
}
s->options.max_partition_order = avctx->max_partition_order;
}
if (s->options.max_partition_order < s->options.min_partition_order) {
av_log(avctx, AV_LOG_ERROR, "invalid partition orders: min=%d max=%d\n",
s->options.min_partition_order, s->options.max_partition_order);
return -1;
}
#endif
if (avctx->frame_size > 0) {
if (avctx->frame_size < FLAC_MIN_BLOCKSIZE ||
avctx->frame_size > FLAC_MAX_BLOCKSIZE) {
@ -403,18 +347,6 @@ static av_cold int flac_encode_init(AVCodecContext *avctx)
}
s->max_blocksize = s->avctx->frame_size;
#if FF_API_FLAC_GLOBAL_OPTS
/* set LPC precision */
if (avctx->lpc_coeff_precision > 0) {
if (avctx->lpc_coeff_precision > MAX_LPC_PRECISION) {
av_log(avctx, AV_LOG_ERROR, "invalid lpc coeff precision: %d\n",
avctx->lpc_coeff_precision);
return -1;
}
s->options.lpc_coeff_precision = avctx->lpc_coeff_precision;
}
#endif
/* set maximum encoded frame size in verbatim mode */
s->max_framesize = ff_flac_get_max_frame_size(s->avctx->frame_size,
s->channels, 16);

@ -33,6 +33,8 @@ static av_cold int decode_init(AVCodecContext *avctx)
avctx->pix_fmt = PIX_FMT_UYVY422;
avctx->coded_frame = avcodec_alloc_frame();
if (!avctx->coded_frame)
return AVERROR(ENOMEM);
return 0;
}

@ -2224,7 +2224,7 @@ AVCodec ff_g723_1_encoder = {
.init = g723_1_encode_init,
.encode = g723_1_encode_frame,
.long_name = NULL_IF_CONFIG_SMALL("G.723.1"),
.sample_fmts = (const enum SampleFormat[]){AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_NONE},
.sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_NONE},
};
#endif

@ -3695,7 +3695,7 @@ static int decode_slice(struct AVCodecContext *avctx, void *arg){
tprintf(s->avctx, "slice end %d %d\n", get_bits_count(&s->gb), s->gb.size_in_bits);
if( get_bits_count(&s->gb) == s->gb.size_in_bits
|| get_bits_count(&s->gb) < s->gb.size_in_bits && s->avctx->error_recognition < FF_ER_AGGRESSIVE) {
|| get_bits_count(&s->gb) < s->gb.size_in_bits && !(s->avctx->err_recognition & AV_EF_AGGRESSIVE)) {
ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, ER_MB_END&part_mask);
return 0;

@ -289,13 +289,6 @@ void avcodec_get_chroma_sub_sample(enum PixelFormat pix_fmt, int *h_shift, int *
*v_shift = av_pix_fmt_descriptors[pix_fmt].log2_chroma_h;
}
#if FF_API_GET_PIX_FMT_NAME
const char *avcodec_get_pix_fmt_name(enum PixelFormat pix_fmt)
{
return av_get_pix_fmt_name(pix_fmt);
}
#endif
int ff_is_hwaccel_pix_fmt(enum PixelFormat pix_fmt)
{
return av_pix_fmt_descriptors[pix_fmt].flags & PIX_FMT_HWACCEL;
@ -753,55 +746,6 @@ int av_picture_pad(AVPicture *dst, const AVPicture *src, int height, int width,
return 0;
}
#if FF_API_GET_ALPHA_INFO
/* NOTE: we scan all the pixels to have an exact information */
static int get_alpha_info_pal8(const AVPicture *src, int width, int height)
{
const unsigned char *p;
int src_wrap, ret, x, y;
unsigned int a;
uint32_t *palette = (uint32_t *)src->data[1];
p = src->data[0];
src_wrap = src->linesize[0] - width;
ret = 0;
for(y=0;y<height;y++) {
for(x=0;x<width;x++) {
a = palette[p[0]] >> 24;
if (a == 0x00) {
ret |= FF_ALPHA_TRANSP;
} else if (a != 0xff) {
ret |= FF_ALPHA_SEMI_TRANSP;
}
p++;
}
p += src_wrap;
}
return ret;
}
int img_get_alpha_info(const AVPicture *src,
enum PixelFormat pix_fmt, int width, int height)
{
const PixFmtInfo *pf = &pix_fmt_info[pix_fmt];
int ret;
/* no alpha can be represented in format */
if (!pf->is_alpha)
return 0;
switch(pix_fmt) {
case PIX_FMT_PAL8:
ret = get_alpha_info_pal8(src, width, height);
break;
default:
/* we do not know, so everything is indicated */
ret = FF_ALPHA_TRANSP | FF_ALPHA_SEMI_TRANSP;
break;
}
return ret;
}
#endif
#if !(HAVE_MMX && HAVE_YASM)
/* filter parameters: [-1 4 2 4 -1] // 8 */
static void deinterlace_line_c(uint8_t *dst,

@ -129,6 +129,6 @@ AVCodec ff_libaacplus_encoder = {
.init = aacPlus_encode_init,
.encode = aacPlus_encode_frame,
.close = aacPlus_encode_close,
.sample_fmts = (const enum SampleFormat[]){AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE},
.sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE},
.long_name = NULL_IF_CONFIG_SMALL("libaacplus AAC+ (Advanced Audio Codec with SBR+PS)"),
};

@ -75,9 +75,6 @@ static av_cold int MP3lame_encode_init(AVCodecContext *avctx)
lame_set_VBR_quality(s->gfp, avctx->global_quality / (float)FF_QP2LAMBDA);
}
lame_set_bWriteVbrTag(s->gfp,0);
#if FF_API_LAME_GLOBAL_OPTS
s->reservoir = avctx->flags2 & CODEC_FLAG2_BIT_RESERVOIR;
#endif
lame_set_disable_reservoir(s->gfp, !s->reservoir);
if (lame_init_params(s->gfp) < 0)
goto err_close;

@ -319,7 +319,7 @@ AVCodec ff_libspeex_encoder = {
.encode = encode_frame,
.close = encode_close,
.capabilities = CODEC_CAP_DELAY,
.sample_fmts = (const enum SampleFormat[]){ AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE },
.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE },
.long_name = NULL_IF_CONFIG_SMALL("libspeex Speex"),
.priv_class = &class,
.defaults = defaults,

@ -314,17 +314,6 @@ static av_cold int X264_init(AVCodecContext *avctx)
if (avctx->flags & CODEC_FLAG_PASS2) {
x4->params.rc.b_stat_read = 1;
} else {
#if FF_API_X264_GLOBAL_OPTS
if (avctx->crf) {
x4->params.rc.i_rc_method = X264_RC_CRF;
x4->params.rc.f_rf_constant = avctx->crf;
x4->params.rc.f_rf_constant_max = avctx->crf_max;
} else if (avctx->cqp > -1) {
x4->params.rc.i_rc_method = X264_RC_CQP;
x4->params.rc.i_qp_constant = avctx->cqp;
}
#endif
if (x4->crf >= 0) {
x4->params.rc.i_rc_method = X264_RC_CRF;
x4->params.rc.f_rf_constant = x4->crf;
@ -360,55 +349,6 @@ static av_cold int X264_init(AVCodecContext *avctx)
}
}
#if FF_API_X264_GLOBAL_OPTS
if (avctx->aq_mode >= 0)
x4->params.rc.i_aq_mode = avctx->aq_mode;
if (avctx->aq_strength >= 0)
x4->params.rc.f_aq_strength = avctx->aq_strength;
if (avctx->psy_rd >= 0)
x4->params.analyse.f_psy_rd = avctx->psy_rd;
if (avctx->psy_trellis >= 0)
x4->params.analyse.f_psy_trellis = avctx->psy_trellis;
if (avctx->rc_lookahead >= 0)
x4->params.rc.i_lookahead = avctx->rc_lookahead;
if (avctx->weighted_p_pred >= 0)
x4->params.analyse.i_weighted_pred = avctx->weighted_p_pred;
if (avctx->bframebias)
x4->params.i_bframe_bias = avctx->bframebias;
if (avctx->deblockalpha)
x4->params.i_deblocking_filter_alphac0 = avctx->deblockalpha;
if (avctx->deblockbeta)
x4->params.i_deblocking_filter_beta = avctx->deblockbeta;
if (avctx->complexityblur >= 0)
x4->params.rc.f_complexity_blur = avctx->complexityblur;
if (avctx->directpred >= 0)
x4->params.analyse.i_direct_mv_pred = avctx->directpred;
if (avctx->partitions) {
if (avctx->partitions & X264_PART_I4X4)
x4->params.analyse.inter |= X264_ANALYSE_I4x4;
if (avctx->partitions & X264_PART_I8X8)
x4->params.analyse.inter |= X264_ANALYSE_I8x8;
if (avctx->partitions & X264_PART_P8X8)
x4->params.analyse.inter |= X264_ANALYSE_PSUB16x16;
if (avctx->partitions & X264_PART_P4X4)
x4->params.analyse.inter |= X264_ANALYSE_PSUB8x8;
if (avctx->partitions & X264_PART_B8X8)
x4->params.analyse.inter |= X264_ANALYSE_BSUB16x16;
}
if (avctx->flags2) {
x4->params.analyse.b_ssim = avctx->flags2 & CODEC_FLAG2_SSIM;
x4->params.b_intra_refresh = avctx->flags2 & CODEC_FLAG2_INTRA_REFRESH;
x4->params.i_bframe_pyramid = avctx->flags2 & CODEC_FLAG2_BPYRAMID ? X264_B_PYRAMID_NORMAL : X264_B_PYRAMID_NONE;
x4->params.analyse.b_weighted_bipred = avctx->flags2 & CODEC_FLAG2_WPRED;
x4->params.analyse.b_mixed_references = avctx->flags2 & CODEC_FLAG2_MIXED_REFS;
x4->params.analyse.b_transform_8x8 = avctx->flags2 & CODEC_FLAG2_8X8DCT;
x4->params.analyse.b_fast_pskip = avctx->flags2 & CODEC_FLAG2_FASTPSKIP;
x4->params.b_aud = avctx->flags2 & CODEC_FLAG2_AUD;
x4->params.analyse.b_psy = avctx->flags2 & CODEC_FLAG2_PSY;
x4->params.rc.b_mb_tree = !!(avctx->flags2 & CODEC_FLAG2_MBTREE);
}
#endif
if (avctx->me_method == ME_EPZS)
x4->params.analyse.i_me_method = X264_ME_DIA;
else if (avctx->me_method == ME_HEX)
@ -536,9 +476,6 @@ static av_cold int X264_init(AVCodecContext *avctx)
avctx->max_b_frames = 0;
avctx->bit_rate = x4->params.rc.i_bitrate*1000;
#if FF_API_X264_GLOBAL_OPTS
avctx->crf = x4->params.rc.f_rf_constant;
#endif
x4->enc = x264_encoder_open(&x4->params);
if (!x4->enc)
@ -675,7 +612,7 @@ static const AVCodecDefault x264_defaults[] = {
{ "qdiff", "-1" },
{ "qblur", "-1" },
{ "qcomp", "-1" },
{ "rc_lookahead", "-1" },
// { "rc_lookahead", "-1" },
{ "refs", "-1" },
{ "sc_threshold", "-1" },
{ "trellis", "-1" },

@ -203,16 +203,6 @@ static av_cold int XAVS_init(AVCodecContext *avctx)
if (avctx->flags & CODEC_FLAG_PASS2) {
x4->params.rc.b_stat_read = 1;
} else {
#if FF_API_X264_GLOBAL_OPTS
if (avctx->crf) {
x4->params.rc.i_rc_method = XAVS_RC_CRF;
x4->params.rc.f_rf_constant = avctx->crf;
} else if (avctx->cqp > -1) {
x4->params.rc.i_rc_method = XAVS_RC_CQP;
x4->params.rc.i_qp_constant = avctx->cqp;
}
#endif
if (x4->crf >= 0) {
x4->params.rc.i_rc_method = XAVS_RC_CRF;
x4->params.rc.f_rf_constant = x4->crf;
@ -222,32 +212,6 @@ static av_cold int XAVS_init(AVCodecContext *avctx)
}
}
#if FF_API_X264_GLOBAL_OPTS
if (avctx->bframebias)
x4->params.i_bframe_bias = avctx->bframebias;
if (avctx->deblockalpha)
x4->params.i_deblocking_filter_alphac0 = avctx->deblockalpha;
if (avctx->deblockbeta)
x4->params.i_deblocking_filter_beta = avctx->deblockbeta;
if (avctx->complexityblur >= 0)
x4->params.rc.f_complexity_blur = avctx->complexityblur;
if (avctx->directpred >= 0)
x4->params.analyse.i_direct_mv_pred = avctx->directpred;
if (avctx->partitions) {
if (avctx->partitions & XAVS_PART_I8X8)
x4->params.analyse.inter |= XAVS_ANALYSE_I8x8;
if (avctx->partitions & XAVS_PART_P8X8)
x4->params.analyse.inter |= XAVS_ANALYSE_PSUB16x16;
if (avctx->partitions & XAVS_PART_B8X8)
x4->params.analyse.inter |= XAVS_ANALYSE_BSUB16x16;
}
x4->params.rc.b_mb_tree = !!(avctx->flags2 & CODEC_FLAG2_MBTREE);
x4->params.b_aud = avctx->flags2 & CODEC_FLAG2_AUD;
x4->params.analyse.b_mixed_references = avctx->flags2 & CODEC_FLAG2_MIXED_REFS;
x4->params.analyse.b_fast_pskip = avctx->flags2 & CODEC_FLAG2_FASTPSKIP;
x4->params.analyse.b_weighted_bipred = avctx->flags2 & CODEC_FLAG2_WPRED;
#endif
if (x4->aud >= 0)
x4->params.b_aud = x4->aud;
if (x4->mbtree >= 0)

@ -59,6 +59,9 @@ read_header:
s->restart_count = 0;
s->mjpb_skiptosod = 0;
if (buf_end - buf_ptr >= 1 << 28)
return AVERROR_INVALIDDATA;
init_get_bits(&hgb, buf_ptr, /*buf_size*/(buf_end - buf_ptr)*8);
skip_bits(&hgb, 32); /* reserved zeros */
@ -111,7 +114,8 @@ read_header:
av_log(avctx, AV_LOG_DEBUG, "sod offs: 0x%x\n", sod_offs);
if (sos_offs)
{
init_get_bits(&s->gb, buf_ptr+sos_offs, FFMIN(field_size, buf_end - (buf_ptr+sos_offs))*8);
init_get_bits(&s->gb, buf_ptr + sos_offs,
8 * FFMIN(field_size, buf_end - buf_ptr - sos_offs));
s->mjpb_skiptosod = (sod_offs - sos_offs - show_bits(&s->gb, 16));
s->start_code = SOS;
if (ff_mjpeg_decode_sos(s, NULL, NULL) < 0 &&

@ -103,10 +103,6 @@ av_cold int ff_mjpeg_decode_init(AVCodecContext *avctx)
build_basic_mjpeg_vlc(s);
#if FF_API_MJPEG_GLOBAL_OPTS
if (avctx->flags & CODEC_FLAG_EXTERN_HUFF)
s->extern_huff = 1;
#endif
if (s->extern_huff) {
av_log(avctx, AV_LOG_INFO, "mjpeg: using external huffman table\n");
init_get_bits(&s->gb, avctx->extradata, avctx->extradata_size * 8);

@ -135,13 +135,6 @@ static av_cold int encode_init(AVCodecContext *avctx)
if(MPV_encode_init(avctx) < 0)
return -1;
#if FF_API_MPEGVIDEO_GLOBAL_OPTS
if (avctx->flags2 & CODEC_FLAG2_DROP_FRAME_TIMECODE)
s->drop_frame_timecode = 1;
if (avctx->flags & CODEC_FLAG_SVCD_SCAN_OFFSET)
s->scan_offset = 1;
#endif
if(find_frame_rate_index(s) < 0){
if(s->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL){
av_log(avctx, AV_LOG_ERROR, "MPEG1/2 does not support %d/%d fps\n", avctx->time_base.den, avctx->time_base.num);

@ -1493,7 +1493,7 @@ end:
if(s->codec_id==CODEC_ID_MPEG4){
int next= mpeg4_is_resync(s);
if(next) {
if (s->mb_x + s->mb_y*s->mb_width + 1 > next && s->avctx->error_recognition >= FF_ER_AGGRESSIVE) {
if (s->mb_x + s->mb_y*s->mb_width + 1 > next && (s->avctx->err_recognition & AV_EF_AGGRESSIVE)) {
return -1;
} else if (s->mb_x + s->mb_y*s->mb_width + 1 >= next)
return SLICE_END;

@ -1727,11 +1727,6 @@ static int decode_frame_adu(AVCodecContext *avctx, void *data,
s->frame_size = len;
#if FF_API_PARSE_FRAME
if (avctx->parse_only)
out_size = buf_size;
else
#endif
out_size = mp_decode_frame(s, NULL, buf, buf_size);
*got_frame_ptr = 1;
@ -1979,11 +1974,7 @@ AVCodec ff_mp1_decoder = {
.priv_data_size = sizeof(MPADecodeContext),
.init = decode_init,
.decode = decode_frame,
#if FF_API_PARSE_FRAME
.capabilities = CODEC_CAP_PARSE_ONLY | CODEC_CAP_DR1,
#else
.capabilities = CODEC_CAP_DR1,
#endif
.flush = flush,
.long_name = NULL_IF_CONFIG_SMALL("MP1 (MPEG audio layer 1)"),
};
@ -1996,11 +1987,7 @@ AVCodec ff_mp2_decoder = {
.priv_data_size = sizeof(MPADecodeContext),
.init = decode_init,
.decode = decode_frame,
#if FF_API_PARSE_FRAME
.capabilities = CODEC_CAP_PARSE_ONLY | CODEC_CAP_DR1,
#else
.capabilities = CODEC_CAP_DR1,
#endif
.flush = flush,
.long_name = NULL_IF_CONFIG_SMALL("MP2 (MPEG audio layer 2)"),
};
@ -2013,11 +2000,7 @@ AVCodec ff_mp3_decoder = {
.priv_data_size = sizeof(MPADecodeContext),
.init = decode_init,
.decode = decode_frame,
#if FF_API_PARSE_FRAME
.capabilities = CODEC_CAP_PARSE_ONLY | CODEC_CAP_DR1,
#else
.capabilities = CODEC_CAP_DR1,
#endif
.flush = flush,
.long_name = NULL_IF_CONFIG_SMALL("MP3 (MPEG audio layer 3)"),
};
@ -2030,11 +2013,7 @@ AVCodec ff_mp3adu_decoder = {
.priv_data_size = sizeof(MPADecodeContext),
.init = decode_init,
.decode = decode_frame_adu,
#if FF_API_PARSE_FRAME
.capabilities = CODEC_CAP_PARSE_ONLY | CODEC_CAP_DR1,
#else
.capabilities = CODEC_CAP_DR1,
#endif
.flush = flush,
.long_name = NULL_IF_CONFIG_SMALL("ADU (Application Data Unit) MP3 (MPEG audio layer 3)"),
};

@ -30,11 +30,7 @@ AVCodec ff_mp1float_decoder = {
.priv_data_size = sizeof(MPADecodeContext),
.init = decode_init,
.decode = decode_frame,
#if FF_API_PARSE_FRAME
.capabilities = CODEC_CAP_PARSE_ONLY | CODEC_CAP_DR1,
#else
.capabilities = CODEC_CAP_DR1,
#endif
.flush = flush,
.long_name = NULL_IF_CONFIG_SMALL("MP1 (MPEG audio layer 1)"),
};
@ -47,11 +43,7 @@ AVCodec ff_mp2float_decoder = {
.priv_data_size = sizeof(MPADecodeContext),
.init = decode_init,
.decode = decode_frame,
#if FF_API_PARSE_FRAME
.capabilities = CODEC_CAP_PARSE_ONLY | CODEC_CAP_DR1,
#else
.capabilities = CODEC_CAP_DR1,
#endif
.flush = flush,
.long_name = NULL_IF_CONFIG_SMALL("MP2 (MPEG audio layer 2)"),
};
@ -64,11 +56,7 @@ AVCodec ff_mp3float_decoder = {
.priv_data_size = sizeof(MPADecodeContext),
.init = decode_init,
.decode = decode_frame,
#if FF_API_PARSE_FRAME
.capabilities = CODEC_CAP_PARSE_ONLY | CODEC_CAP_DR1,
#else
.capabilities = CODEC_CAP_DR1,
#endif
.flush = flush,
.long_name = NULL_IF_CONFIG_SMALL("MP3 (MPEG audio layer 3)"),
};
@ -81,11 +69,7 @@ AVCodec ff_mp3adufloat_decoder = {
.priv_data_size = sizeof(MPADecodeContext),
.init = decode_init,
.decode = decode_frame_adu,
#if FF_API_PARSE_FRAME
.capabilities = CODEC_CAP_PARSE_ONLY | CODEC_CAP_DR1,
#else
.capabilities = CODEC_CAP_DR1,
#endif
.flush = flush,
.long_name = NULL_IF_CONFIG_SMALL("ADU (Application Data Unit) MP3 (MPEG audio layer 3)"),
};

@ -353,10 +353,6 @@ av_cold int MPV_encode_init(AVCodecContext *avctx)
s->luma_elim_threshold = avctx->luma_elim_threshold;
s->chroma_elim_threshold = avctx->chroma_elim_threshold;
s->strict_std_compliance = avctx->strict_std_compliance;
#if FF_API_MPEGVIDEO_GLOBAL_OPTS
if (avctx->flags & CODEC_FLAG_PART)
s->data_partitioning = 1;
#endif
s->quarter_sample = (avctx->flags & CODEC_FLAG_QPEL) != 0;
s->mpeg_quant = avctx->mpeg_quant;
s->rtp_mode = !!avctx->rtp_payload_size;
@ -385,12 +381,6 @@ av_cold int MPV_encode_init(AVCodecContext *avctx)
!s->fixed_qscale;
s->loop_filter = !!(s->flags & CODEC_FLAG_LOOP_FILTER);
#if FF_API_MPEGVIDEO_GLOBAL_OPTS
s->alternate_scan = !!(s->flags & CODEC_FLAG_ALT_SCAN);
s->intra_vlc_format = !!(s->flags2 & CODEC_FLAG2_INTRA_VLC);
s->q_scale_type = !!(s->flags2 & CODEC_FLAG2_NON_LINEAR_QUANT);
s->obmc = !!(s->flags & CODEC_FLAG_OBMC);
#endif
if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
@ -458,27 +448,11 @@ av_cold int MPV_encode_init(AVCodecContext *avctx)
return -1;
}
#if FF_API_MPEGVIDEO_GLOBAL_OPTS
if (s->obmc && s->codec_id != CODEC_ID_H263 &&
s->codec_id != CODEC_ID_H263P) {
av_log(avctx, AV_LOG_ERROR, "OBMC is only supported with H263(+)\n");
return -1;
}
#endif
if (s->quarter_sample && s->codec_id != CODEC_ID_MPEG4) {
av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
return -1;
}
#if FF_API_MPEGVIDEO_GLOBAL_OPTS
if (s->data_partitioning && s->codec_id != CODEC_ID_MPEG4) {
av_log(avctx, AV_LOG_ERROR,
"data partitioning not supported by codec\n");
return -1;
}
#endif
if (s->max_b_frames &&
s->codec_id != CODEC_ID_MPEG4 &&
s->codec_id != CODEC_ID_MPEG1VIDEO &&
@ -499,11 +473,7 @@ av_cold int MPV_encode_init(AVCodecContext *avctx)
avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 255);
}
if ((s->flags & (CODEC_FLAG_INTERLACED_DCT | CODEC_FLAG_INTERLACED_ME
#if FF_API_MPEGVIDEO_GLOBAL_OPTS
| CODEC_FLAG_ALT_SCAN
#endif
)) &&
if ((s->flags & (CODEC_FLAG_INTERLACED_DCT | CODEC_FLAG_INTERLACED_ME)) &&
s->codec_id != CODEC_ID_MPEG4 && s->codec_id != CODEC_ID_MPEG2VIDEO) {
av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
return -1;
@ -535,15 +505,6 @@ av_cold int MPV_encode_init(AVCodecContext *avctx)
return -1;
}
#if FF_API_MPEGVIDEO_GLOBAL_OPTS
if ((s->flags2 & CODEC_FLAG2_INTRA_VLC) &&
s->codec_id != CODEC_ID_MPEG2VIDEO) {
av_log(avctx, AV_LOG_ERROR,
"intra vlc table not supported by codec\n");
return -1;
}
#endif
if (s->flags & CODEC_FLAG_LOW_DELAY) {
if (s->codec_id != CODEC_ID_MPEG2VIDEO) {
av_log(avctx, AV_LOG_ERROR,
@ -558,13 +519,6 @@ av_cold int MPV_encode_init(AVCodecContext *avctx)
}
if (s->q_scale_type == 1) {
#if FF_API_MPEGVIDEO_GLOBAL_OPTS
if (s->codec_id != CODEC_ID_MPEG2VIDEO) {
av_log(avctx, AV_LOG_ERROR,
"non linear quant is only available for mpeg2\n");
return -1;
}
#endif
if (avctx->qmax > 12) {
av_log(avctx, AV_LOG_ERROR,
"non linear quant only supports qmax <= 12 currently\n");
@ -576,11 +530,7 @@ av_cold int MPV_encode_init(AVCodecContext *avctx)
s->codec_id != CODEC_ID_MPEG4 &&
s->codec_id != CODEC_ID_MPEG1VIDEO &&
s->codec_id != CODEC_ID_MPEG2VIDEO &&
(s->codec_id != CODEC_ID_H263P
#if FF_API_MPEGVIDEO_GLOBAL_OPTS
|| !(s->flags & CODEC_FLAG_H263P_SLICE_STRUCT)
#endif
)) {
(s->codec_id != CODEC_ID_H263P)) {
av_log(avctx, AV_LOG_ERROR,
"multi threaded encoding not supported by codec\n");
return -1;
@ -727,14 +677,6 @@ av_cold int MPV_encode_init(AVCodecContext *avctx)
s->out_format = FMT_H263;
s->h263_plus = 1;
/* Fx */
#if FF_API_MPEGVIDEO_GLOBAL_OPTS
if (avctx->flags & CODEC_FLAG_H263P_UMV)
s->umvplus = 1;
if (avctx->flags & CODEC_FLAG_H263P_AIV)
s->alt_inter_vlc = 1;
if (avctx->flags & CODEC_FLAG_H263P_SLICE_STRUCT)
s->h263_slice_structured = 1;
#endif
s->h263_aic = (avctx->flags & CODEC_FLAG_AC_PRED) ? 1 : 0;
s->modified_quant = s->h263_aic;
s->loop_filter = (avctx->flags & CODEC_FLAG_LOOP_FILTER) ? 1 : 0;

@ -1,34 +0,0 @@
/*
* This file is part of Libav.
*
* Libav is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* Libav is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with Libav; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* This header is provided for compatibility only and will be removed
* on next major bump
*/
#ifndef AVCODEC_OPT_H
#define AVCODEC_OPT_H
#include "libavcodec/version.h"
#if FF_API_OPT_H
#include "libavutil/opt.h"
#endif
#endif /* AVCODEC_OPT_H */

@ -80,23 +80,14 @@ static const AVOption options[]={
{"bt", "set video bitrate tolerance (in bits/s)", OFFSET(bit_rate_tolerance), AV_OPT_TYPE_INT, {.dbl = AV_CODEC_DEFAULT_BITRATE*20 }, 1, INT_MAX, V|E},
{"flags", NULL, OFFSET(flags), AV_OPT_TYPE_FLAGS, {.dbl = DEFAULT }, 0, UINT_MAX, V|A|E|D, "flags"},
{"mv4", "use four motion vector by macroblock (mpeg4)", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_4MV }, INT_MIN, INT_MAX, V|E, "flags"},
#if FF_API_MPEGVIDEO_GLOBAL_OPTS
{"obmc", "use overlapped block motion compensation (h263+)", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_OBMC }, INT_MIN, INT_MAX, V|E, "flags"},
#endif
{"qpel", "use 1/4 pel motion compensation", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_QPEL }, INT_MIN, INT_MAX, V|E, "flags"},
{"loop", "use loop filter", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_LOOP_FILTER }, INT_MIN, INT_MAX, V|E, "flags"},
{"qscale", "use fixed qscale", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_QSCALE }, INT_MIN, INT_MAX, 0, "flags"},
{"gmc", "use gmc", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_GMC }, INT_MIN, INT_MAX, V|E, "flags"},
{"mv0", "always try a mb with mv=<0,0>", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_MV0 }, INT_MIN, INT_MAX, V|E, "flags"},
#if FF_API_MPEGVIDEO_GLOBAL_OPTS
{"part", "use data partitioning", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_PART }, INT_MIN, INT_MAX, V|E, "flags"},
#endif
{"input_preserved", NULL, 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_INPUT_PRESERVED }, INT_MIN, INT_MAX, 0, "flags"},
{"pass1", "use internal 2pass ratecontrol in first pass mode", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_PASS1 }, INT_MIN, INT_MAX, 0, "flags"},
{"pass2", "use internal 2pass ratecontrol in second pass mode", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_PASS2 }, INT_MIN, INT_MAX, 0, "flags"},
#if FF_API_MJPEG_GLOBAL_OPTS
{"extern_huff", "use external huffman table (for mjpeg)", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_EXTERN_HUFF }, INT_MIN, INT_MAX, 0, "flags"},
#endif
{"gray", "only decode/encode grayscale", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_GRAY }, INT_MIN, INT_MAX, V|E|D, "flags"},
{"emu_edge", "don't draw edges", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_EMU_EDGE }, INT_MIN, INT_MAX, 0, "flags"},
{"psnr", "error[?] variables will be set during encoding", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_PSNR }, INT_MIN, INT_MAX, V|E, "flags"},
@ -104,25 +95,12 @@ static const AVOption options[]={
{"naq", "normalize adaptive quantization", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_NORMALIZE_AQP }, INT_MIN, INT_MAX, V|E, "flags"},
{"ildct", "use interlaced dct", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_INTERLACED_DCT }, INT_MIN, INT_MAX, V|E, "flags"},
{"low_delay", "force low delay", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_LOW_DELAY }, INT_MIN, INT_MAX, V|D|E, "flags"},
#if FF_API_MPEGVIDEO_GLOBAL_OPTS
{"alt", "enable alternate scantable (mpeg2/mpeg4)", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_ALT_SCAN }, INT_MIN, INT_MAX, V|E, "flags"},
#endif
{"global_header", "place global headers in extradata instead of every keyframe", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_GLOBAL_HEADER }, INT_MIN, INT_MAX, V|A|E, "flags"},
{"bitexact", "use only bitexact stuff (except (i)dct)", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_BITEXACT }, INT_MIN, INT_MAX, A|V|S|D|E, "flags"},
{"aic", "h263 advanced intra coding / mpeg4 ac prediction", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_AC_PRED }, INT_MIN, INT_MAX, V|E, "flags"},
#if FF_API_MPEGVIDEO_GLOBAL_OPTS
{"umv", "use unlimited motion vectors", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_H263P_UMV }, INT_MIN, INT_MAX, V|E, "flags"},
#endif
{"cbp", "use rate distortion optimization for cbp", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_CBP_RD }, INT_MIN, INT_MAX, V|E, "flags"},
{"qprd", "use rate distortion optimization for qp selection", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_QP_RD }, INT_MIN, INT_MAX, V|E, "flags"},
#if FF_API_MPEGVIDEO_GLOBAL_OPTS
{"aiv", "h263 alternative inter vlc", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_H263P_AIV }, INT_MIN, INT_MAX, V|E, "flags"},
{"slice", NULL, 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_H263P_SLICE_STRUCT }, INT_MIN, INT_MAX, V|E, "flags"},
#endif
{"ilme", "interlaced motion estimation", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_INTERLACED_ME }, INT_MIN, INT_MAX, V|E, "flags"},
#if FF_API_MPEGVIDEO_GLOBAL_OPTS
{"scan_offset", "will reserve space for svcd scan offset user data", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_SVCD_SCAN_OFFSET }, INT_MIN, INT_MAX, V|E, "flags"},
#endif
{"cgop", "closed gop", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_CLOSED_GOP }, INT_MIN, INT_MAX, V|E, "flags"},
{"fast", "allow non spec compliant speedup tricks", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG2_FAST }, INT_MIN, INT_MAX, V|E, "flags2"},
{"sgop", "strictly enforce gop size", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG2_STRICT_GOP }, INT_MIN, INT_MAX, V|E, "flags2"},
@ -161,9 +139,6 @@ static const AVOption options[]={
{"b_qfactor", "qp factor between p and b frames", OFFSET(b_quant_factor), AV_OPT_TYPE_FLOAT, {.dbl = 1.25 }, -FLT_MAX, FLT_MAX, V|E},
{"rc_strategy", "ratecontrol method", OFFSET(rc_strategy), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
{"b_strategy", "strategy to choose between I/P/B-frames", OFFSET(b_frame_strategy), AV_OPT_TYPE_INT, {.dbl = 0 }, INT_MIN, INT_MAX, V|E},
#if FF_API_X264_GLOBAL_OPTS
{"wpredp", "weighted prediction analysis method", OFFSET(weighted_p_pred), AV_OPT_TYPE_INT, {.dbl = -1 }, INT_MIN, INT_MAX, V|E},
#endif
{"ps", "rtp payload size in bytes", OFFSET(rtp_payload_size), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
{"mv_bits", NULL, OFFSET(mv_bits), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX},
{"header_bits", NULL, OFFSET(header_bits), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX},
@ -201,14 +176,6 @@ static const AVOption options[]={
{"unofficial", "allow unofficial extensions", 0, AV_OPT_TYPE_CONST, {.dbl = FF_COMPLIANCE_UNOFFICIAL }, INT_MIN, INT_MAX, V|D|E, "strict"},
{"experimental", "allow non standardized experimental things", 0, AV_OPT_TYPE_CONST, {.dbl = FF_COMPLIANCE_EXPERIMENTAL }, INT_MIN, INT_MAX, V|D|E, "strict"},
{"b_qoffset", "qp offset between P and B frames", OFFSET(b_quant_offset), AV_OPT_TYPE_FLOAT, {.dbl = 1.25 }, -FLT_MAX, FLT_MAX, V|E},
#if FF_API_ER
{"er", "set error detection aggressivity", OFFSET(error_recognition), AV_OPT_TYPE_INT, {.dbl = FF_ER_CAREFUL }, INT_MIN, INT_MAX, A|V|D, "er"},
{"careful", NULL, 0, AV_OPT_TYPE_CONST, {.dbl = FF_ER_CAREFUL }, INT_MIN, INT_MAX, V|D, "er"},
{"compliant", NULL, 0, AV_OPT_TYPE_CONST, {.dbl = FF_ER_COMPLIANT }, INT_MIN, INT_MAX, V|D, "er"},
{"aggressive", NULL, 0, AV_OPT_TYPE_CONST, {.dbl = FF_ER_AGGRESSIVE }, INT_MIN, INT_MAX, V|D, "er"},
{"very_aggressive", NULL, 0, AV_OPT_TYPE_CONST, {.dbl = FF_ER_VERY_AGGRESSIVE }, INT_MIN, INT_MAX, V|D, "er"},
{"explode", "abort decoding on error recognition", 0, AV_OPT_TYPE_CONST, {.dbl = FF_ER_EXPLODE }, INT_MIN, INT_MAX, V|D, "er"},
#endif /* FF_API_ER */
{"err_detect", "set error detection flags", OFFSET(err_recognition), AV_OPT_TYPE_FLAGS, {.dbl = AV_EF_CRCCHECK }, INT_MIN, INT_MAX, A|V|D, "err_detect"},
{"crccheck", "verify embedded CRCs", 0, AV_OPT_TYPE_CONST, {.dbl = AV_EF_CRCCHECK }, INT_MIN, INT_MAX, A|V|D, "err_detect"},
{"bitstream", "detect bitstream specification deviations", 0, AV_OPT_TYPE_CONST, {.dbl = AV_EF_BITSTREAM }, INT_MIN, INT_MAX, A|V|D, "err_detect"},
@ -219,9 +186,6 @@ static const AVOption options[]={
{"aggressive", "consider things that a sane encoder shouldnt do as an error", 0, AV_OPT_TYPE_CONST, {.dbl = AV_EF_AGGRESSIVE }, INT_MIN, INT_MAX, A|V|D, "err_detect"},
{"has_b_frames", NULL, OFFSET(has_b_frames), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX},
{"block_align", NULL, OFFSET(block_align), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX},
#if FF_API_PARSE_FRAME
{"parse_only", NULL, OFFSET(parse_only), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX},
#endif
{"mpeg_quant", "use MPEG quantizers instead of H.263", OFFSET(mpeg_quant), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
{"stats_out", NULL, OFFSET(stats_out), AV_OPT_TYPE_STRING, {.str = NULL}, CHAR_MIN, CHAR_MAX},
{"stats_in", NULL, OFFSET(stats_in), AV_OPT_TYPE_STRING, {.str = NULL}, CHAR_MIN, CHAR_MAX},
@ -356,25 +320,8 @@ static const AVOption options[]={
{"nr", "noise reduction", OFFSET(noise_reduction), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
{"rc_init_occupancy", "number of bits which should be loaded into the rc buffer before decoding starts", OFFSET(rc_initial_buffer_occupancy), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
{"inter_threshold", NULL, OFFSET(inter_threshold), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
#if FF_API_X264_GLOBAL_OPTS
#define X264_DEFAULTS CODEC_FLAG2_FASTPSKIP|CODEC_FLAG2_PSY|CODEC_FLAG2_MBTREE
#else
#define X264_DEFAULTS 0
#endif
#if FF_API_LAME_GLOBAL_OPTS
#define LAME_DEFAULTS CODEC_FLAG2_BIT_RESERVOIR
#else
#define LAME_DEFAULTS 0
#endif
{"flags2", NULL, OFFSET(flags2), AV_OPT_TYPE_FLAGS, {.dbl = X264_DEFAULTS|LAME_DEFAULTS }, 0, UINT_MAX, V|A|E|D, "flags2"},
{"flags2", NULL, OFFSET(flags2), AV_OPT_TYPE_FLAGS, {.dbl = DEFAULT}, 0, UINT_MAX, V|A|E|D, "flags2"},
{"error", NULL, OFFSET(error_rate), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
#if FF_API_ANTIALIAS_ALGO
{"antialias", "MP3 antialias algorithm", OFFSET(antialias_algo), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|D, "aa"},
{"auto", NULL, 0, AV_OPT_TYPE_CONST, {.dbl = FF_AA_AUTO }, INT_MIN, INT_MAX, V|D, "aa"},
{"fastint", NULL, 0, AV_OPT_TYPE_CONST, {.dbl = FF_AA_FASTINT }, INT_MIN, INT_MAX, V|D, "aa"},
{"int", NULL, 0, AV_OPT_TYPE_CONST, {.dbl = FF_AA_INT }, INT_MIN, INT_MAX, V|D, "aa"},
{"float", NULL, 0, AV_OPT_TYPE_CONST, {.dbl = FF_AA_FLOAT }, INT_MIN, INT_MAX, V|D, "aa"},
#endif
{"qns", "quantizer noise shaping", OFFSET(quantizer_noise_shaping), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
{"threads", NULL, OFFSET(thread_count), AV_OPT_TYPE_INT, {.dbl = 1 }, 0, INT_MAX, V|E|D, "threads"},
{"auto", "detect a good number of threads", 0, AV_OPT_TYPE_CONST, {.dbl = 0 }, INT_MIN, INT_MAX, V|E|D, "threads"},
@ -417,70 +364,21 @@ static const AVOption options[]={
{"all" , NULL, 0, AV_OPT_TYPE_CONST, {.dbl = AVDISCARD_ALL }, INT_MIN, INT_MAX, V|D, "avdiscard"},
{"bidir_refine", "refine the two motion vectors used in bidirectional macroblocks", OFFSET(bidir_refine), AV_OPT_TYPE_INT, {.dbl = 1 }, 0, 4, V|E},
{"brd_scale", "downscales frames for dynamic B-frame decision", OFFSET(brd_scale), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, 0, 10, V|E},
#if FF_API_X264_GLOBAL_OPTS
{"crf", "enables constant quality mode, and selects the quality (x264/VP8)", OFFSET(crf), AV_OPT_TYPE_FLOAT, {.dbl = DEFAULT }, 0, 63, V|E},
{"cqp", "constant quantization parameter rate control method", OFFSET(cqp), AV_OPT_TYPE_INT, {.dbl = -1 }, INT_MIN, INT_MAX, V|E},
#endif
{"keyint_min", "minimum interval between IDR-frames", OFFSET(keyint_min), AV_OPT_TYPE_INT, {.dbl = 25 }, INT_MIN, INT_MAX, V|E},
{"refs", "reference frames to consider for motion compensation", OFFSET(refs), AV_OPT_TYPE_INT, {.dbl = 1 }, INT_MIN, INT_MAX, V|E},
{"chromaoffset", "chroma qp offset from luma", OFFSET(chromaoffset), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
#if FF_API_X264_GLOBAL_OPTS
{"bframebias", "influences how often B-frames are used", OFFSET(bframebias), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
#endif
{"trellis", "rate-distortion optimal quantization", OFFSET(trellis), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|A|E},
#if FF_API_X264_GLOBAL_OPTS
{"directpred", "direct mv prediction mode - 0 (none), 1 (spatial), 2 (temporal), 3 (auto)", OFFSET(directpred), AV_OPT_TYPE_INT, {.dbl = -1 }, INT_MIN, INT_MAX, V|E},
{"bpyramid", "allows B-frames to be used as references for predicting", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG2_BPYRAMID }, INT_MIN, INT_MAX, V|E, "flags2"},
{"wpred", "weighted biprediction for b-frames (H.264)", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG2_WPRED }, INT_MIN, INT_MAX, V|E, "flags2"},
{"mixed_refs", "one reference per partition, as opposed to one reference per macroblock", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG2_MIXED_REFS }, INT_MIN, INT_MAX, V|E, "flags2"},
{"dct8x8", "high profile 8x8 transform (H.264)", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG2_8X8DCT }, INT_MIN, INT_MAX, V|E, "flags2"},
{"fastpskip", "fast pskip (H.264)", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG2_FASTPSKIP }, INT_MIN, INT_MAX, V|E, "flags2"},
{"aud", "access unit delimiters (H.264)", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG2_AUD }, INT_MIN, INT_MAX, V|E, "flags2"},
#endif
{"skiprd", "RD optimal MB level residual skipping", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG2_SKIP_RD }, INT_MIN, INT_MAX, V|E, "flags2"},
#if FF_API_X264_GLOBAL_OPTS
{"complexityblur", "reduce fluctuations in qp (before curve compression)", OFFSET(complexityblur), AV_OPT_TYPE_FLOAT, {.dbl = -1 }, -1, FLT_MAX, V|E},
{"deblockalpha", "in-loop deblocking filter alphac0 parameter", OFFSET(deblockalpha), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, -6, 6, V|E},
{"deblockbeta", "in-loop deblocking filter beta parameter", OFFSET(deblockbeta), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, -6, 6, V|E},
{"partitions", "macroblock subpartition sizes to consider", OFFSET(partitions), AV_OPT_TYPE_FLAGS, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E, "partitions"},
{"parti4x4", NULL, 0, AV_OPT_TYPE_CONST, {.dbl = X264_PART_I4X4 }, INT_MIN, INT_MAX, V|E, "partitions"},
{"parti8x8", NULL, 0, AV_OPT_TYPE_CONST, {.dbl = X264_PART_I8X8 }, INT_MIN, INT_MAX, V|E, "partitions"},
{"partp4x4", NULL, 0, AV_OPT_TYPE_CONST, {.dbl = X264_PART_P4X4 }, INT_MIN, INT_MAX, V|E, "partitions"},
{"partp8x8", NULL, 0, AV_OPT_TYPE_CONST, {.dbl = X264_PART_P8X8 }, INT_MIN, INT_MAX, V|E, "partitions"},
{"partb8x8", NULL, 0, AV_OPT_TYPE_CONST, {.dbl = X264_PART_B8X8 }, INT_MIN, INT_MAX, V|E, "partitions"},
#endif
{"sc_factor", "multiplied by qscale for each frame and added to scene_change_score", OFFSET(scenechange_factor), AV_OPT_TYPE_INT, {.dbl = 6 }, 0, INT_MAX, V|E},
{"mv0_threshold", NULL, OFFSET(mv0_threshold), AV_OPT_TYPE_INT, {.dbl = 256 }, 0, INT_MAX, V|E},
#if FF_API_MPEGVIDEO_GLOBAL_OPTS
{"ivlc", "intra vlc table", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG2_INTRA_VLC }, INT_MIN, INT_MAX, V|E, "flags2"},
#endif
{"b_sensitivity", "adjusts sensitivity of b_frame_strategy 1", OFFSET(b_sensitivity), AV_OPT_TYPE_INT, {.dbl = 40 }, 1, INT_MAX, V|E},
{"compression_level", NULL, OFFSET(compression_level), AV_OPT_TYPE_INT, {.dbl = FF_COMPRESSION_DEFAULT }, INT_MIN, INT_MAX, V|A|E},
{"min_prediction_order", NULL, OFFSET(min_prediction_order), AV_OPT_TYPE_INT, {.dbl = -1 }, INT_MIN, INT_MAX, A|E},
{"max_prediction_order", NULL, OFFSET(max_prediction_order), AV_OPT_TYPE_INT, {.dbl = -1 }, INT_MIN, INT_MAX, A|E},
#if FF_API_FLAC_GLOBAL_OPTS
{"lpc_coeff_precision", "deprecated, use flac-specific options", OFFSET(lpc_coeff_precision), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, 0, INT_MAX, A|E},
{"prediction_order_method", "deprecated, use flac-specific options", OFFSET(prediction_order_method), AV_OPT_TYPE_INT, {.dbl = -1 }, INT_MIN, INT_MAX, A|E},
{"min_partition_order", "deprecated, use flac-specific options", OFFSET(min_partition_order), AV_OPT_TYPE_INT, {.dbl = -1 }, INT_MIN, INT_MAX, A|E},
{"max_partition_order", "deprecated, use flac-specific options", OFFSET(max_partition_order), AV_OPT_TYPE_INT, {.dbl = -1 }, INT_MIN, INT_MAX, A|E},
#endif
{"timecode_frame_start", "GOP timecode frame start number, in non drop frame format", OFFSET(timecode_frame_start), AV_OPT_TYPE_INT64, {.dbl = 0 }, 0, INT64_MAX, V|E},
#if FF_API_MPEGVIDEO_GLOBAL_OPTS
{"drop_frame_timecode", NULL, 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG2_DROP_FRAME_TIMECODE }, INT_MIN, INT_MAX, V|E, "flags2"},
{"non_linear_q", "use non linear quantizer", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG2_NON_LINEAR_QUANT }, INT_MIN, INT_MAX, V|E, "flags2"},
#endif
#if FF_API_REQUEST_CHANNELS
{"request_channels", "set desired number of audio channels", OFFSET(request_channels), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, 0, INT_MAX, A|D},
#endif
#if FF_API_DRC_SCALE
{"drc_scale", "percentage of dynamic range compression to apply", OFFSET(drc_scale), AV_OPT_TYPE_FLOAT, {.dbl = 0.0 }, 0.0, 1.0, A|D},
#endif
#if FF_API_LAME_GLOBAL_OPTS
{"reservoir", "use bit reservoir", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG2_BIT_RESERVOIR }, INT_MIN, INT_MAX, A|E, "flags2"},
#endif
#if FF_API_X264_GLOBAL_OPTS
{"mbtree", "use macroblock tree ratecontrol (x264 only)", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG2_MBTREE }, INT_MIN, INT_MAX, V|E, "flags2"},
#endif
{"bits_per_raw_sample", NULL, OFFSET(bits_per_raw_sample), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX},
{"channel_layout", NULL, OFFSET(channel_layout), AV_OPT_TYPE_INT64, {.dbl = DEFAULT }, 0, INT64_MAX, A|E|D, "channel_layout"},
{"request_channel_layout", NULL, OFFSET(request_channel_layout), AV_OPT_TYPE_INT64, {.dbl = DEFAULT }, 0, INT64_MAX, A|D, "request_channel_layout"},
@ -492,26 +390,7 @@ static const AVOption options[]={
{"colorspace", NULL, OFFSET(colorspace), AV_OPT_TYPE_INT, {.dbl = AVCOL_SPC_UNSPECIFIED }, 1, AVCOL_SPC_NB-1, V|E|D},
{"color_range", NULL, OFFSET(color_range), AV_OPT_TYPE_INT, {.dbl = AVCOL_RANGE_UNSPECIFIED }, 0, AVCOL_RANGE_NB-1, V|E|D},
{"chroma_sample_location", NULL, OFFSET(chroma_sample_location), AV_OPT_TYPE_INT, {.dbl = AVCHROMA_LOC_UNSPECIFIED }, 0, AVCHROMA_LOC_NB-1, V|E|D},
#if FF_API_X264_GLOBAL_OPTS
{"psy", "use psycho visual optimization", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG2_PSY }, INT_MIN, INT_MAX, V|E, "flags2"},
{"psy_rd", "specify psycho visual strength", OFFSET(psy_rd), AV_OPT_TYPE_FLOAT, {.dbl = -1.0 }, -1, FLT_MAX, V|E},
{"psy_trellis", "specify psycho visual trellis", OFFSET(psy_trellis), AV_OPT_TYPE_FLOAT, {.dbl = -1 }, -1, FLT_MAX, V|E},
{"aq_mode", "specify aq method", OFFSET(aq_mode), AV_OPT_TYPE_INT, {.dbl = -1 }, -1, INT_MAX, V|E},
{"aq_strength", "specify aq strength", OFFSET(aq_strength), AV_OPT_TYPE_FLOAT, {.dbl = -1.0 }, -1, FLT_MAX, V|E},
{"rc_lookahead", "specify number of frames to look ahead for frametype", OFFSET(rc_lookahead), AV_OPT_TYPE_INT, {.dbl = -1 }, -1, INT_MAX, V|E},
{"ssim", "ssim will be calculated during encoding", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG2_SSIM }, INT_MIN, INT_MAX, V|E, "flags2"},
{"intra_refresh", "use periodic insertion of intra blocks instead of keyframes", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG2_INTRA_REFRESH }, INT_MIN, INT_MAX, V|E, "flags2"},
{"crf_max", "in crf mode, prevents vbv from lowering quality beyond this point", OFFSET(crf_max), AV_OPT_TYPE_FLOAT, {.dbl = DEFAULT }, 0, 51, V|E},
#endif
{"log_level_offset", "set the log level offset", OFFSET(log_level_offset), AV_OPT_TYPE_INT, {.dbl = 0 }, INT_MIN, INT_MAX },
#if FF_API_FLAC_GLOBAL_OPTS
{"lpc_type", "deprecated, use flac-specific options", OFFSET(lpc_type), AV_OPT_TYPE_INT, {.dbl = AV_LPC_TYPE_DEFAULT }, AV_LPC_TYPE_DEFAULT, AV_LPC_TYPE_NB-1, A|E},
{"none", NULL, 0, AV_OPT_TYPE_CONST, {.dbl = AV_LPC_TYPE_NONE }, INT_MIN, INT_MAX, A|E, "lpc_type"},
{"fixed", NULL, 0, AV_OPT_TYPE_CONST, {.dbl = AV_LPC_TYPE_FIXED }, INT_MIN, INT_MAX, A|E, "lpc_type"},
{"levinson", NULL, 0, AV_OPT_TYPE_CONST, {.dbl = AV_LPC_TYPE_LEVINSON }, INT_MIN, INT_MAX, A|E, "lpc_type"},
{"cholesky", NULL, 0, AV_OPT_TYPE_CONST, {.dbl = AV_LPC_TYPE_CHOLESKY }, INT_MIN, INT_MAX, A|E, "lpc_type"},
{"lpc_passes", "deprecated, use flac-specific options", OFFSET(lpc_passes), AV_OPT_TYPE_INT, {.dbl = -1 }, INT_MIN, INT_MAX, A|E},
#endif
{"slices", "number of slices, used in parallelized encoding", OFFSET(slices), AV_OPT_TYPE_INT, {.dbl = 0 }, 0, INT_MAX, V|E},
{"thread_type", "select multithreading type", OFFSET(thread_type), AV_OPT_TYPE_FLAGS, {.dbl = FF_THREAD_SLICE|FF_THREAD_FRAME }, 0, INT_MAX, V|E|D, "thread_type"},
{"slice", NULL, 0, AV_OPT_TYPE_CONST, {.dbl = FF_THREAD_SLICE }, INT_MIN, INT_MAX, V|E|D, "thread_type"},

@ -23,13 +23,30 @@
#include "libavutil/imgutils.h"
#include "avcodec.h"
/* The Old and Standard format types indicate that the image data is
* uncompressed. There is no difference between the two formats. */
#define RT_OLD 0
#define RT_STANDARD 1
/* The Byte-Encoded format type indicates that the image data is compressed
* using a run-length encoding scheme. */
#define RT_BYTE_ENCODED 2
/* The RGB format type indicates that the image is uncompressed with reverse
* component order from Old and Standard (RGB vs BGR). */
#define RT_FORMAT_RGB 3
/* The TIFF and IFF format types indicate that the raster file was originally
* converted from either of these file formats. We do not have any samples or
* documentation of the format details. */
#define RT_FORMAT_TIFF 4
#define RT_FORMAT_IFF 5
/* The Experimental format type is implementation-specific and is generally an
* indication that the image file does not conform to the Sun Raster file
* format specification. */
#define RT_EXPERIMENTAL 0xffff
typedef struct SUNRASTContext {
AVFrame picture;
} SUNRASTContext;
@ -70,6 +87,10 @@ static int sunrast_decode_frame(AVCodecContext *avctx, void *data,
maplength = AV_RB32(buf+28);
buf += 32;
if (type == RT_EXPERIMENTAL) {
av_log(avctx, AV_LOG_ERROR, "unsupported (compression) type\n");
return -1;
}
if (type > RT_FORMAT_IFF) {
av_log(avctx, AV_LOG_ERROR, "invalid (compression) type\n");
return -1;

@ -233,22 +233,6 @@ static int encode_frame(AVCodecContext * avctx, unsigned char *buf,
p->key_frame = 1;
avctx->coded_frame= &s->picture;
#if FF_API_TIFFENC_COMPLEVEL
if (avctx->compression_level != FF_COMPRESSION_DEFAULT)
av_log(avctx, AV_LOG_WARNING, "Using compression_level to set compression "
"algorithm is deprecated. Please use the compression_algo private "
"option instead.\n");
if (avctx->compression_level == 0) {
s->compr = TIFF_RAW;
} else if(avctx->compression_level == 2) {
s->compr = TIFF_LZW;
#if CONFIG_ZLIB
} else if ((avctx->compression_level >= 3)) {
s->compr = TIFF_DEFLATE;
#endif
}
#endif
s->width = avctx->width;
s->height = avctx->height;
s->subsampling[0] = 1;

@ -107,10 +107,7 @@ AVCodec *av_codec_next(AVCodec *c){
else return first_avcodec;
}
#if !FF_API_AVCODEC_INIT
static
#endif
void avcodec_init(void)
static void avcodec_init(void)
{
static int initialized = 0;
@ -799,23 +796,13 @@ int attribute_align_arg avcodec_open2(AVCodecContext *avctx, AVCodec *codec, AVD
goto free_and_end;
}
avctx->frame_number = 0;
#if FF_API_ER
av_log(avctx, AV_LOG_DEBUG, "err{or,}_recognition separate: %d; %X\n",
avctx->error_recognition, avctx->err_recognition);
switch(avctx->error_recognition){
case FF_ER_EXPLODE : avctx->err_recognition |= AV_EF_EXPLODE | AV_EF_COMPLIANT | AV_EF_CAREFUL;
break;
case FF_ER_VERY_AGGRESSIVE:
case FF_ER_AGGRESSIVE : avctx->err_recognition |= AV_EF_AGGRESSIVE;
case FF_ER_COMPLIANT : avctx->err_recognition |= AV_EF_COMPLIANT;
case FF_ER_CAREFUL : avctx->err_recognition |= AV_EF_CAREFUL;
if (avctx->codec_type == AVMEDIA_TYPE_AUDIO &&
(!avctx->time_base.num || !avctx->time_base.den)) {
avctx->time_base.num = 1;
avctx->time_base.den = avctx->sample_rate;
}
av_log(avctx, AV_LOG_DEBUG, "err{or,}_recognition combined: %d; %X\n",
avctx->error_recognition, avctx->err_recognition);
#endif
if (!HAVE_THREADS)
av_log(avctx, AV_LOG_WARNING, "Warning: not compiled with thread support, using thread emulation\n");
@ -1683,10 +1670,10 @@ const char *av_get_profile_name(const AVCodec *codec, int profile)
unsigned avcodec_version( void )
{
av_assert0(CODEC_ID_V410==164);
// av_assert0(CODEC_ID_V410==164);
av_assert0(CODEC_ID_PCM_S8_PLANAR==65563);
av_assert0(CODEC_ID_ADPCM_G722==69660);
av_assert0(CODEC_ID_BMV_AUDIO==86071);
// av_assert0(CODEC_ID_BMV_AUDIO==86071);
av_assert0(CODEC_ID_SRT==94216);
av_assert0(LIBAVCODEC_VERSION_MICRO >= 100);
@ -1766,12 +1753,6 @@ void avcodec_default_free_buffers(AVCodecContext *avctx)
}
}
#if FF_API_OLD_FF_PICT_TYPES
char av_get_pict_type_char(int pict_type){
return av_get_picture_type_char(pict_type);
}
#endif
int av_get_bits_per_sample(enum CodecID codec_id){
switch(codec_id){
case CODEC_ID_ADPCM_SBPRO_2:
@ -1821,12 +1802,6 @@ int av_get_bits_per_sample(enum CodecID codec_id){
}
}
#if FF_API_OLD_SAMPLE_FMT
int av_get_bits_per_sample_format(enum AVSampleFormat sample_fmt) {
return av_get_bytes_per_sample(sample_fmt) << 3;
}
#endif
#if !HAVE_THREADS
int ff_thread_init(AVCodecContext *s){
return -1;
@ -1982,14 +1957,6 @@ void ff_thread_await_progress(AVFrame *f, int progress, int field)
#endif
#if FF_API_THREAD_INIT
int avcodec_thread_init(AVCodecContext *s, int thread_count)
{
s->thread_count = thread_count;
return ff_thread_init(s);
}
#endif
enum AVMediaType avcodec_get_type(enum CodecID codec_id)
{
AVCodec *c= avcodec_find_decoder(codec_id);

@ -3954,6 +3954,7 @@ static int vc1_decode_p_mb_intfr(VC1Context *v)
vc1_mc_4mv_chroma4(v);
} else {
mvbp = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][2];
dmv_x = dmv_y = 0;
if (mvbp) {
get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
}

@ -20,8 +20,8 @@
#ifndef AVCODEC_VERSION_H
#define AVCODEC_VERSION_H
#define LIBAVCODEC_VERSION_MAJOR 53
#define LIBAVCODEC_VERSION_MINOR 60
#define LIBAVCODEC_VERSION_MAJOR 54
#define LIBAVCODEC_VERSION_MINOR 0
#define LIBAVCODEC_VERSION_MICRO 100
#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \
@ -38,91 +38,22 @@
* Those FF_API_* defines are not part of public API.
* They may change, break or disappear at any time.
*/
#ifndef FF_API_PALETTE_CONTROL
#define FF_API_PALETTE_CONTROL (LIBAVCODEC_VERSION_MAJOR < 54)
#endif
#ifndef FF_API_OLD_SAMPLE_FMT
#define FF_API_OLD_SAMPLE_FMT (LIBAVCODEC_VERSION_MAJOR < 54)
#endif
#ifndef FF_API_OLD_AUDIOCONVERT
#define FF_API_OLD_AUDIOCONVERT (LIBAVCODEC_VERSION_MAJOR < 54)
#endif
#ifndef FF_API_ANTIALIAS_ALGO
#define FF_API_ANTIALIAS_ALGO (LIBAVCODEC_VERSION_MAJOR < 54)
#endif
#ifndef FF_API_REQUEST_CHANNELS
#define FF_API_REQUEST_CHANNELS (LIBAVCODEC_VERSION_MAJOR < 55)
#endif
#ifndef FF_API_OPT_H
#define FF_API_OPT_H (LIBAVCODEC_VERSION_MAJOR < 54)
#endif
#ifndef FF_API_THREAD_INIT
#define FF_API_THREAD_INIT (LIBAVCODEC_VERSION_MAJOR < 54)
#endif
#ifndef FF_API_OLD_FF_PICT_TYPES
#define FF_API_OLD_FF_PICT_TYPES (LIBAVCODEC_VERSION_MAJOR < 54)
#endif
#ifndef FF_API_FLAC_GLOBAL_OPTS
#define FF_API_FLAC_GLOBAL_OPTS (LIBAVCODEC_VERSION_MAJOR < 54)
#endif
#ifndef FF_API_GET_PIX_FMT_NAME
#define FF_API_GET_PIX_FMT_NAME (LIBAVCODEC_VERSION_MAJOR < 54)
#endif
#ifndef FF_API_ALLOC_CONTEXT
#define FF_API_ALLOC_CONTEXT (LIBAVCODEC_VERSION_MAJOR < 54)
#define FF_API_ALLOC_CONTEXT (LIBAVCODEC_VERSION_MAJOR < 55)
#endif
#ifndef FF_API_AVCODEC_OPEN
#define FF_API_AVCODEC_OPEN (LIBAVCODEC_VERSION_MAJOR < 54)
#endif
#ifndef FF_API_DRC_SCALE
#define FF_API_DRC_SCALE (LIBAVCODEC_VERSION_MAJOR < 54)
#endif
#ifndef FF_API_ER
#define FF_API_ER (LIBAVCODEC_VERSION_MAJOR < 54)
#endif
#ifndef FF_API_AVCODEC_INIT
#define FF_API_AVCODEC_INIT (LIBAVCODEC_VERSION_MAJOR < 54)
#endif
#ifndef FF_API_X264_GLOBAL_OPTS
#define FF_API_X264_GLOBAL_OPTS (LIBAVCODEC_VERSION_MAJOR < 54)
#endif
#ifndef FF_API_MPEGVIDEO_GLOBAL_OPTS
#define FF_API_MPEGVIDEO_GLOBAL_OPTS (LIBAVCODEC_VERSION_MAJOR < 54)
#endif
#ifndef FF_API_LAME_GLOBAL_OPTS
#define FF_API_LAME_GLOBAL_OPTS (LIBAVCODEC_VERSION_MAJOR < 54)
#endif
#ifndef FF_API_SNOW_GLOBAL_OPTS
#define FF_API_SNOW_GLOBAL_OPTS (LIBAVCODEC_VERSION_MAJOR < 54)
#endif
#ifndef FF_API_MJPEG_GLOBAL_OPTS
#define FF_API_MJPEG_GLOBAL_OPTS (LIBAVCODEC_VERSION_MAJOR < 54)
#endif
#ifndef FF_API_GET_ALPHA_INFO
#define FF_API_GET_ALPHA_INFO (LIBAVCODEC_VERSION_MAJOR < 54)
#endif
#ifndef FF_API_PARSE_FRAME
#define FF_API_PARSE_FRAME (LIBAVCODEC_VERSION_MAJOR < 54)
#endif
#ifndef FF_API_INTERNAL_CONTEXT
#define FF_API_INTERNAL_CONTEXT (LIBAVCODEC_VERSION_MAJOR < 54)
#endif
#ifndef FF_API_TIFFENC_COMPLEVEL
#define FF_API_TIFFENC_COMPLEVEL (LIBAVCODEC_VERSION_MAJOR < 54)
#endif
#ifndef FF_API_DATA_POINTERS
#define FF_API_DATA_POINTERS (LIBAVCODEC_VERSION_MAJOR < 54)
#define FF_API_AVCODEC_OPEN (LIBAVCODEC_VERSION_MAJOR < 55)
#endif
#ifndef FF_API_OLD_DECODE_AUDIO
#define FF_API_OLD_DECODE_AUDIO (LIBAVCODEC_VERSION_MAJOR < 55)
#endif
#ifndef FF_API_OLD_TIMECODE
#define FF_API_OLD_TIMECODE (LIBAVCODEC_VERSION_MAJOR < 54)
#define FF_API_OLD_TIMECODE (LIBAVCODEC_VERSION_MAJOR < 55)
#endif
#ifndef FF_API_AVFRAME_AGE
#define FF_API_AVFRAME_AGE (LIBAVCODEC_VERSION_MAJOR < 54)
#endif
#ifndef FF_API_OLD_ENCODE_AUDIO
#define FF_API_OLD_ENCODE_AUDIO (LIBAVCODEC_VERSION_MAJOR < 55)
#endif

@ -69,12 +69,12 @@ cglobal ac3_exponent_min_%1, 3,4,2, exp, reuse_blks, expn, offset
%define LOOP_ALIGN
INIT_MMX
AC3_EXPONENT_MIN mmx
%ifdef HAVE_MMX2
%if HAVE_MMX2
%define PMINUB PMINUB_MMXEXT
%define LOOP_ALIGN ALIGN 16
AC3_EXPONENT_MIN mmxext
%endif
%ifdef HAVE_SSE
%if HAVE_SSE
INIT_XMM
AC3_EXPONENT_MIN sse2
%endif
@ -367,7 +367,7 @@ cglobal ac3_compute_mantissa_size_sse2, 1,2,4, mant_cnt, sum
pabsd %1, %1
%endmacro
%ifdef HAVE_AMD3DNOW
%if HAVE_AMD3DNOW
INIT_MMX
cglobal ac3_extract_exponents_3dnow, 3,3,0, exp, coef, len
add expq, lenq
@ -439,11 +439,11 @@ cglobal ac3_extract_exponents_%1, 3,3,5, exp, coef, len
REP_RET
%endmacro
%ifdef HAVE_SSE
%if HAVE_SSE
INIT_XMM
%define PABSD PABSD_MMX
AC3_EXTRACT_EXPONENTS sse2
%ifdef HAVE_SSSE3
%if HAVE_SSSE3
%define PABSD PABSD_SSSE3
AC3_EXTRACT_EXPONENTS ssse3
%endif

@ -211,7 +211,7 @@ ps_p1p1m1m1: dd 0, 0, 0x80000000, 0x80000000, 0, 0, 0x80000000, 0x80000000
INIT_YMM
SECTION_TEXT
%ifdef HAVE_AVX
%if HAVE_AVX
; void ff_dct32_float_avx(FFTSample *out, const FFTSample *in)
cglobal dct32_float_avx, 2,3,8, out, in, tmp
; pass 1
@ -289,7 +289,7 @@ INIT_XMM
%define BUTTERFLY BUTTERFLY_SSE
%define BUTTERFLY0 BUTTERFLY0_SSE
%ifdef ARCH_X86_64
%if ARCH_X86_64
%define SPILL SWAP
%define UNSPILL SWAP

@ -135,7 +135,7 @@ cglobal put_signed_rect_clamped_%1, 5,7,3, dst, dst_stride, src, src_stride, w,
add wd, (mmsize-1)
and wd, ~(mmsize-1)
%ifdef ARCH_X86_64
%if ARCH_X86_64
mov r10d, r5m
mov r11d, wd
%define wspill r11d
@ -176,7 +176,7 @@ cglobal add_rect_clamped_%1, 7,7,3, dst, src, stride, idwt, idwt_stride, w, h
add wd, (mmsize-1)
and wd, ~(mmsize-1)
%ifdef ARCH_X86_64
%if ARCH_X86_64
mov r11d, wd
%define wspill r11d
%else

@ -138,7 +138,7 @@ align 16
%endif
%define t0 [v1q + orderq]
%define t1 [v1q + orderq + mmsize]
%ifdef ARCH_X86_64
%if ARCH_X86_64
mova m8, t0
mova m9, t1
%define t0 m8
@ -474,7 +474,7 @@ cglobal scalarproduct_float_sse, 3,3,2, v1, v2, offset
movss xmm1, xmm0
shufps xmm0, xmm0, 1
addss xmm0, xmm1
%ifndef ARCH_X86_64
%if ARCH_X86_64 == 0
movd r0m, xmm0
fld dword r0m
%endif
@ -498,7 +498,7 @@ cglobal scalarproduct_float_sse, 3,3,2, v1, v2, offset
; function implementations. Fast are fixed-width, slow is variable-width
%macro EMU_EDGE_FUNC 0
%ifdef ARCH_X86_64
%if ARCH_X86_64
%define w_reg r10
cglobal emu_edge_core, 6, 7, 1
mov r11, r5 ; save block_h
@ -513,14 +513,14 @@ cglobal emu_edge_core, 2, 7, 0
mov w_reg, r7m
sub w_reg, r6m ; w = start_x - end_x
sub r5, r4
%ifdef ARCH_X86_64
%if ARCH_X86_64
sub r4, r3
%else
sub r4, dword r3m
%endif
cmp w_reg, 22
jg .slow_v_extend_loop
%ifdef ARCH_X86_32
%if ARCH_X86_32
mov r2, r2m ; linesize
%endif
sal w_reg, 7 ; w * 128
@ -536,7 +536,7 @@ cglobal emu_edge_core, 2, 7, 0
; horizontal extend (left/right)
mov w_reg, r6m ; start_x
sub r0, w_reg
%ifdef ARCH_X86_64
%if ARCH_X86_64
mov r3, r0 ; backup of buf+block_h*linesize
mov r5, r11
%else
@ -564,7 +564,7 @@ cglobal emu_edge_core, 2, 7, 0
; now r3(64)/r0(32)=buf,r2=linesize,r11/r5=block_h,r6/r3=val, r10/r6=end_x, r1=block_w
.right_extend:
%ifdef ARCH_X86_32
%if ARCH_X86_32
mov r0, r0m
mov r5, r5m
%endif
@ -589,13 +589,13 @@ cglobal emu_edge_core, 2, 7, 0
.h_extend_end:
RET
%ifdef ARCH_X86_64
%if ARCH_X86_64
%define vall al
%define valh ah
%define valw ax
%define valw2 r10w
%define valw3 r3w
%ifdef WIN64
%if WIN64
%define valw4 r4w
%else ; unix64
%define valw4 r3w
@ -643,7 +643,7 @@ cglobal emu_edge_core, 2, 7, 0
%endrep ; %2/16
%endif
%ifdef ARCH_X86_64
%if ARCH_X86_64
%if (%2-%%src_off) == 8
mov rax, [r1+%%src_off]
%assign %%src_off %%src_off+8
@ -692,7 +692,7 @@ cglobal emu_edge_core, 2, 7, 0
%endrep ; %2/16
%endif
%ifdef ARCH_X86_64
%if ARCH_X86_64
%if (%2-%%dst_off) == 8
mov [r0+%%dst_off], rax
%assign %%dst_off %%dst_off+8
@ -740,7 +740,7 @@ cglobal emu_edge_core, 2, 7, 0
ALIGN 128
.emuedge_v_extend_ %+ %%n:
; extend pixels above body
%ifdef ARCH_X86_64
%if ARCH_X86_64
test r3 , r3 ; if (!start_y)
jz .emuedge_copy_body_ %+ %%n %+ _loop ; goto body
%else ; ARCH_X86_32
@ -751,7 +751,7 @@ ALIGN 128
.emuedge_extend_top_ %+ %%n %+ _loop: ; do {
WRITE_NUM_BYTES top, %%n ; write bytes
add r0 , r2 ; dst += linesize
%ifdef ARCH_X86_64
%if ARCH_X86_64
dec r3d
%else ; ARCH_X86_32
dec dword r3m
@ -779,7 +779,7 @@ ALIGN 128
jnz .emuedge_extend_bottom_ %+ %%n %+ _loop ; } while (--block_h)
.emuedge_v_extend_end_ %+ %%n:
%ifdef ARCH_X86_64
%if ARCH_X86_64
ret
%else ; ARCH_X86_32
rep ret
@ -841,7 +841,7 @@ ALIGN 64
WRITE_V_PIXEL %%n, r0 ; write pixels
dec r5
jnz .emuedge_extend_left_ %+ %%n ; } while (--block_h)
%ifdef ARCH_X86_64
%if ARCH_X86_64
ret
%else ; ARCH_X86_32
rep ret
@ -856,7 +856,7 @@ ALIGN 64
%rep 11
ALIGN 64
.emuedge_extend_right_ %+ %%n: ; do {
%ifdef ARCH_X86_64
%if ARCH_X86_64
sub r3, r2 ; dst -= linesize
READ_V_PIXEL %%n, [r3+w_reg-1] ; read pixels
WRITE_V_PIXEL %%n, r3+r4-%%n ; write pixels
@ -868,7 +868,7 @@ ALIGN 64
dec r5
%endif ; ARCH_X86_64/32
jnz .emuedge_extend_right_ %+ %%n ; } while (--block_h)
%ifdef ARCH_X86_64
%if ARCH_X86_64
ret
%else ; ARCH_X86_32
rep ret
@ -876,7 +876,7 @@ ALIGN 64
%assign %%n %%n+2
%endrep
%ifdef ARCH_X86_32
%if ARCH_X86_32
%define stack_offset 0x10
%endif
%endmacro ; RIGHT_EXTEND
@ -916,7 +916,7 @@ ALIGN 64
V_COPY_NPX %1, mm0, movq, 8, 0xFFFFFFF8
%else ; sse
V_COPY_NPX %1, xmm0, movups, 16, 0xFFFFFFF0
%ifdef ARCH_X86_64
%if ARCH_X86_64
%define linesize r2
V_COPY_NPX %1, rax , mov, 8
%else ; ARCH_X86_32
@ -940,7 +940,7 @@ ALIGN 64
.slow_v_extend_loop:
; r0=buf,r1=src,r2(64)/r2m(32)=linesize,r3(64)/r3m(32)=start_x,r4=end_y,r5=block_h
; r11(64)/r3(later-64)/r2(32)=cnt_reg,r6(64)/r3(32)=val_reg,r10(64)/r6(32)=w=end_x-start_x
%ifdef ARCH_X86_64
%if ARCH_X86_64
push r11 ; save old value of block_h
test r3, r3
%define cnt_reg r11
@ -956,18 +956,18 @@ ALIGN 64
.do_body_copy:
V_COPY_ROW body, r4
%ifdef ARCH_X86_64
%if ARCH_X86_64
pop r11 ; restore old value of block_h
%define cnt_reg r3
%endif
test r5, r5
%ifdef ARCH_X86_64
%if ARCH_X86_64
jz .v_extend_end
%else
jz .skip_bottom_extend
%endif
V_COPY_ROW bottom, r5
%ifdef ARCH_X86_32
%if ARCH_X86_32
.skip_bottom_extend:
mov r2, r2m
%endif
@ -996,7 +996,7 @@ ALIGN 64
.left_extend_loop_end:
dec r5
jnz .slow_left_extend_loop
%ifdef ARCH_X86_32
%if ARCH_X86_32
mov r2, r2m
%endif
jmp .right_extend
@ -1006,7 +1006,7 @@ ALIGN 64
.slow_right_extend_loop:
; r3(64)/r0(32)=buf+block_h*linesize,r2=linesize,r4=block_w,r11(64)/r5(32)=block_h,
; r10(64)/r6(32)=end_x,r6/r3=val,r1=cntr
%ifdef ARCH_X86_64
%if ARCH_X86_64
%define buf_reg r3
%define bh_reg r11
%else
@ -1047,7 +1047,7 @@ SLOW_RIGHT_EXTEND
%endmacro
emu_edge sse
%ifdef ARCH_X86_32
%if ARCH_X86_32
emu_edge mmx
%endif
@ -1138,7 +1138,7 @@ VECTOR_CLIP_INT32 6, 1, 0, 0
%macro BUTTERFLIES_FLOAT_INTERLEAVE 0
cglobal butterflies_float_interleave, 4,4,3, dst, src0, src1, len
%ifdef ARCH_X86_64
%if ARCH_X86_64
movsxd lenq, lend
%endif
test lenq, lenq

@ -245,7 +245,7 @@ hadamard8x8_diff_%1:
lea r0, [r3*3]
DIFF_PIXELS_8 r1, r2, 0, r3, r0, rsp+gprsize
HADAMARD8
%ifdef ARCH_X86_64
%if ARCH_X86_64
TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, 8
%else
TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, [rsp+gprsize], [rsp+mmsize+gprsize]
@ -270,7 +270,7 @@ HADAMARD8_DIFF_MMX mmx2
INIT_XMM
%define ABS2 ABS2_MMX2
%ifdef ARCH_X86_64
%if ARCH_X86_64
%define ABS_SUM_8x8 ABS_SUM_8x8_64
%else
%define ABS_SUM_8x8 ABS_SUM_8x8_32

@ -30,7 +30,7 @@
%include "libavutil/x86/x86inc.asm"
%ifdef ARCH_X86_64
%if ARCH_X86_64
%define pointer resq
%else
%define pointer resd
@ -73,7 +73,7 @@ cextern cos_ %+ i
%assign i i<<1
%endrep
%ifdef ARCH_X86_64
%if ARCH_X86_64
%define pointer dq
%else
%define pointer dd
@ -299,7 +299,7 @@ IF%1 mova Z(1), m5
INIT_YMM
%ifdef HAVE_AVX
%if HAVE_AVX
align 16
fft8_avx:
mova m0, Z(0)
@ -535,7 +535,7 @@ DEFINE_ARGS z, w, n, o1, o3
INIT_YMM
%ifdef HAVE_AVX
%if HAVE_AVX
%macro INTERL_AVX 5
vunpckhps %3, %2, %1
vunpcklps %2, %2, %1
@ -639,7 +639,7 @@ cglobal fft_dispatch%3%2, 2,5,8, z, nbits
RET
%endmacro ; DECL_FFT
%ifdef HAVE_AVX
%if HAVE_AVX
INIT_YMM
DECL_FFT 6, _avx
DECL_FFT 6, _avx, _interleave
@ -751,7 +751,7 @@ INIT_XMM
%macro DECL_IMDCT 2
cglobal imdct_half%1, 3,7,8; FFTContext *s, FFTSample *output, const FFTSample *input
%ifdef ARCH_X86_64
%if ARCH_X86_64
%define rrevtab r10
%define rtcos r11
%define rtsin r12
@ -770,24 +770,24 @@ cglobal imdct_half%1, 3,7,8; FFTContext *s, FFTSample *output, const FFTSample *
mov rtsin, [r0+FFTContext.tsin]
add rtcos, r3
add rtsin, r3
%ifndef ARCH_X86_64
%if ARCH_X86_64 == 0
push rtcos
push rtsin
%endif
shr r3, 1
mov rrevtab, [r0+FFTContext.revtab]
add rrevtab, r3
%ifndef ARCH_X86_64
%if ARCH_X86_64 == 0
push rrevtab
%endif
sub r3, 4
%ifdef ARCH_X86_64
%if ARCH_X86_64
xor r4, r4
sub r4, r3
%endif
.pre:
%ifndef ARCH_X86_64
%if ARCH_X86_64 == 0
;unspill
xor r4, r4
sub r4, r3
@ -796,7 +796,7 @@ cglobal imdct_half%1, 3,7,8; FFTContext *s, FFTSample *output, const FFTSample *
%endif
PREROTATER r4, r3, r2, rtcos, rtsin
%ifdef ARCH_X86_64
%if ARCH_X86_64
movzx r5, word [rrevtab+r4-4]
movzx r6, word [rrevtab+r4-2]
movzx r13, word [rrevtab+r3]
@ -830,7 +830,7 @@ cglobal imdct_half%1, 3,7,8; FFTContext *s, FFTSample *output, const FFTSample *
mov r0d, [r5+FFTContext.mdctsize]
add r6, r0
shr r0, 1
%ifndef ARCH_X86_64
%if ARCH_X86_64 == 0
%define rtcos r2
%define rtsin r3
mov rtcos, [esp+8]
@ -840,7 +840,7 @@ cglobal imdct_half%1, 3,7,8; FFTContext *s, FFTSample *output, const FFTSample *
mov r1, -mmsize
sub r1, r0
%2 r0, r1, r6, rtcos, rtsin
%ifdef ARCH_X86_64
%if ARCH_X86_64
pop r14
pop r13
pop r12
@ -857,6 +857,6 @@ DECL_IMDCT _sse, POSROTATESHUF
INIT_YMM
%ifdef HAVE_AVX
%if HAVE_AVX
DECL_IMDCT _avx, POSROTATESHUF_AVX
%endif

@ -28,14 +28,14 @@ SECTION_TEXT
; void int32_to_float_fmul_scalar(float *dst, const int *src, float mul, int len);
;---------------------------------------------------------------------------------
%macro INT32_TO_FLOAT_FMUL_SCALAR 2
%ifdef UNIX64
%if UNIX64
cglobal int32_to_float_fmul_scalar_%1, 3,3,%2, dst, src, len
%else
cglobal int32_to_float_fmul_scalar_%1, 4,4,%2, dst, src, mul, len
%endif
%ifdef WIN64
%if WIN64
SWAP 0, 2
%elifdef ARCH_X86_32
%elif ARCH_X86_32
movss m0, mulm
%endif
SPLATD m0
@ -180,7 +180,7 @@ FLOAT_TO_INT16_INTERLEAVE2 sse2
%macro FLOAT_TO_INT16_INTERLEAVE6 1
; void float_to_int16_interleave6_sse(int16_t *dst, const float **src, int len)
cglobal float_to_int16_interleave6_%1, 2,7,0, dst, src, src1, src2, src3, src4, src5
%ifdef ARCH_X86_64
%if ARCH_X86_64
%define lend r10d
mov lend, r2d
%else
@ -241,7 +241,7 @@ FLOAT_TO_INT16_INTERLEAVE6 3dn2
%macro FLOAT_INTERLEAVE6 2
cglobal float_interleave6_%1, 2,7,%2, dst, src, src1, src2, src3, src4, src5
%ifdef ARCH_X86_64
%if ARCH_X86_64
%define lend r10d
mov lend, r2d
%else

@ -94,7 +94,7 @@ SECTION .text
; put/avg_h264_chroma_mc8_mmx_*(uint8_t *dst /*align 8*/, uint8_t *src /*align 1*/,
; int stride, int h, int mx, int my)
cglobal %1_%2_chroma_mc8_%3, 6, 7, 0
%ifdef ARCH_X86_64
%if ARCH_X86_64
movsxd r2, r2d
%endif
mov r6d, r5d
@ -113,7 +113,7 @@ cglobal %1_%2_chroma_mc8_%3, 6, 7, 0
%define rnd_1d_rv40 rnd_rv40_1d_tbl
%define rnd_2d_rv40 rnd_rv40_2d_tbl
%endif
%ifdef ARCH_X86_64
%if ARCH_X86_64
mov r10, r5
and r10, 6 ; &~1 for mx/my=[0,7]
lea r10, [r10*4+r4]
@ -147,7 +147,7 @@ cglobal %1_%2_chroma_mc8_%3, 6, 7, 0
%ifdef PIC
lea r11, [rnd_rv40_1d_tbl]
%endif
%ifndef ARCH_X86_64
%if ARCH_X86_64 == 0
mov r5, r0m
%endif
%endif
@ -198,7 +198,7 @@ cglobal %1_%2_chroma_mc8_%3, 6, 7, 0
%ifdef PIC
lea r11, [rnd_rv40_2d_tbl]
%endif
%ifndef ARCH_X86_64
%if ARCH_X86_64 == 0
mov r5, r0m
%endif
%endif
@ -279,7 +279,7 @@ cglobal %1_%2_chroma_mc8_%3, 6, 7, 0
%macro chroma_mc4_mmx_func 3
cglobal %1_%2_chroma_mc4_%3, 6, 6, 0
%ifdef ARCH_X86_64
%if ARCH_X86_64
movsxd r2, r2d
%endif
pxor m7, m7
@ -364,7 +364,7 @@ cglobal %1_%2_chroma_mc4_%3, 6, 6, 0
%macro chroma_mc2_mmx_func 3
cglobal %1_%2_chroma_mc2_%3, 6, 7, 0
%ifdef ARCH_X86_64
%if ARCH_X86_64
movsxd r2, r2d
%endif
@ -452,7 +452,7 @@ chroma_mc4_mmx_func avg, rv40, 3dnow
%macro chroma_mc8_ssse3_func 3
cglobal %1_%2_chroma_mc8_%3, 6, 7, 8
%ifdef ARCH_X86_64
%if ARCH_X86_64
movsxd r2, r2d
%endif
mov r6d, r5d
@ -600,7 +600,7 @@ cglobal %1_%2_chroma_mc8_%3, 6, 7, 8
%macro chroma_mc4_ssse3_func 3
cglobal %1_%2_chroma_mc4_%3, 6, 7, 0
%ifdef ARCH_X86_64
%if ARCH_X86_64
movsxd r2, r2d
%endif
mov r6, r4

@ -252,7 +252,7 @@ cglobal %1_h264_chroma_mc2_10_%2, 6,7
%define CHROMAMC_AVG NOTHING
INIT_XMM
CHROMA_MC8 put, sse2
%ifdef HAVE_AVX
%if HAVE_AVX
INIT_AVX
CHROMA_MC8 put, avx
%endif
@ -264,7 +264,7 @@ CHROMA_MC2 put, mmxext
%define PAVG pavgw
INIT_XMM
CHROMA_MC8 avg, sse2
%ifdef HAVE_AVX
%if HAVE_AVX
INIT_AVX
CHROMA_MC8 avg, avx
%endif

@ -200,7 +200,7 @@ cextern pb_A1
; out: %4 = |%1-%2|>%3
; clobbers: %5
%macro DIFF_GT2 5
%ifdef ARCH_X86_64
%if ARCH_X86_64
psubusb %5, %2, %1
psubusb %4, %1, %2
%else
@ -278,7 +278,7 @@ cextern pb_A1
mova %4, %2
%endmacro
%ifdef ARCH_X86_64
%if ARCH_X86_64
;-----------------------------------------------------------------------------
; void deblock_v_luma( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 )
;-----------------------------------------------------------------------------
@ -333,7 +333,7 @@ cglobal deblock_h_luma_8_%1, 5,7
lea r11, [r10+r10*2]
lea r6, [r0-4]
lea r5, [r0-4+r11]
%ifdef WIN64
%if WIN64
sub rsp, 0x98
%define pix_tmp rsp+0x30
%else
@ -352,7 +352,7 @@ cglobal deblock_h_luma_8_%1, 5,7
; don't backup r6, r5, r10, r11 because deblock_v_luma_sse2 doesn't use them
lea r0, [pix_tmp+0x30]
mov r1d, 0x10
%ifdef WIN64
%if WIN64
mov [rsp+0x20], r4
%endif
call deblock_v_luma_8_%1
@ -376,7 +376,7 @@ cglobal deblock_h_luma_8_%1, 5,7
movq m3, [pix_tmp+0x40]
TRANSPOSE8x4B_STORE PASS8ROWS(r6, r5, r10, r11)
%ifdef WIN64
%if WIN64
add rsp, 0x98
%else
add rsp, 0x68
@ -517,7 +517,7 @@ DEBLOCK_LUMA avx, v, 16
%macro LUMA_INTRA_P012 4 ; p0..p3 in memory
%ifdef ARCH_X86_64
%if ARCH_X86_64
pavgb t0, p2, p1
pavgb t1, p0, q0
%else
@ -528,7 +528,7 @@ DEBLOCK_LUMA avx, v, 16
%endif
pavgb t0, t1 ; ((p2+p1+1)/2 + (p0+q0+1)/2 + 1)/2
mova t5, t1
%ifdef ARCH_X86_64
%if ARCH_X86_64
paddb t2, p2, p1
paddb t3, p0, q0
%else
@ -546,7 +546,7 @@ DEBLOCK_LUMA avx, v, 16
pand t2, mpb_1
psubb t0, t2 ; p1' = (p2+p1+p0+q0+2)/4;
%ifdef ARCH_X86_64
%if ARCH_X86_64
pavgb t1, p2, q1
psubb t2, p2, q1
%else
@ -621,7 +621,7 @@ DEBLOCK_LUMA avx, v, 16
%define t1 m5
%define t2 m6
%define t3 m7
%ifdef ARCH_X86_64
%if ARCH_X86_64
%define p2 m8
%define q2 m9
%define t4 m10
@ -648,7 +648,7 @@ DEBLOCK_LUMA avx, v, 16
; void deblock_v_luma_intra( uint8_t *pix, int stride, int alpha, int beta )
;-----------------------------------------------------------------------------
cglobal deblock_%2_luma_intra_8_%1, 4,6,16
%ifndef ARCH_X86_64
%if ARCH_X86_64 == 0
sub esp, 0x60
%endif
lea r4, [r1*4]
@ -663,7 +663,7 @@ cglobal deblock_%2_luma_intra_8_%1, 4,6,16
mova p0, [r4+r5]
mova q0, [r0]
mova q1, [r0+r1]
%ifdef ARCH_X86_64
%if ARCH_X86_64
pxor mpb_0, mpb_0
mova mpb_1, [pb_1]
LOAD_MASK r2d, r3d, t5 ; m5=beta-1, t5=alpha-1, m7=mask0
@ -699,13 +699,13 @@ cglobal deblock_%2_luma_intra_8_%1, 4,6,16
LUMA_INTRA_SWAP_PQ
LUMA_INTRA_P012 [r0], [r0+r1], [r0+2*r1], [r0+r5]
.end:
%ifndef ARCH_X86_64
%if ARCH_X86_64 == 0
add esp, 0x60
%endif
RET
INIT_MMX
%ifdef ARCH_X86_64
%if ARCH_X86_64
;-----------------------------------------------------------------------------
; void deblock_h_luma_intra( uint8_t *pix, int stride, int alpha, int beta )
;-----------------------------------------------------------------------------
@ -785,7 +785,7 @@ DEBLOCK_LUMA_INTRA sse2, v
INIT_AVX
DEBLOCK_LUMA_INTRA avx , v
%endif
%ifndef ARCH_X86_64
%if ARCH_X86_64 == 0
INIT_MMX
DEBLOCK_LUMA_INTRA mmxext, v8
%endif
@ -830,7 +830,7 @@ cglobal deblock_v_chroma_8_mmxext, 5,6
; void ff_deblock_h_chroma( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 )
;-----------------------------------------------------------------------------
cglobal deblock_h_chroma_8_mmxext, 5,7
%ifdef ARCH_X86_64
%if ARCH_X86_64
%define buf0 [rsp-24]
%define buf1 [rsp-16]
%else

@ -302,7 +302,7 @@ cglobal deblock_h_luma_10_%1, 5,6,8*(mmsize/16)
%endmacro
INIT_XMM
%ifdef ARCH_X86_64
%if ARCH_X86_64
; in: m0=p1, m1=p0, m2=q0, m3=q1, m8=p2, m9=q2
; m12=alpha, m13=beta
; out: m0=p1', m3=q1', m1=p0', m2=q0'
@ -437,7 +437,7 @@ DEBLOCK_LUMA_64 avx
; %1=p0 %2=p1 %3=p2 %4=p3 %5=q0 %6=q1 %7=mask0
; %8=mask1p %9=2 %10=p0' %11=p1' %12=p2'
%macro LUMA_INTRA_P012 12 ; p0..p3 in memory
%ifdef ARCH_X86_64
%if ARCH_X86_64
paddw t0, %3, %2
mova t2, %4
paddw t2, %3
@ -503,7 +503,7 @@ DEBLOCK_LUMA_64 avx
LOAD_AB t0, t1, r2d, r3d
mova %1, t0
LOAD_MASK m0, m1, m2, m3, %1, t1, t0, t2, t3
%ifdef ARCH_X86_64
%if ARCH_X86_64
mova %2, t0 ; mask0
psrlw t3, %1, 2
%else
@ -600,7 +600,7 @@ DEBLOCK_LUMA_64 avx
%endif
%endmacro
%ifdef ARCH_X86_64
%if ARCH_X86_64
;-----------------------------------------------------------------------------
; void deblock_v_luma_intra( uint16_t *pix, int stride, int alpha, int beta )
;-----------------------------------------------------------------------------
@ -796,7 +796,7 @@ cglobal deblock_h_luma_intra_10_%1, 4,7,8*(mmsize/16)
RET
%endmacro
%ifndef ARCH_X86_64
%if ARCH_X86_64 == 0
INIT_MMX
DEBLOCK_LUMA mmxext
DEBLOCK_LUMA_INTRA mmxext
@ -913,7 +913,7 @@ cglobal deblock_v_chroma_intra_10_%1, 4,6-(mmsize/16),8*(mmsize/16)
%endif
%endmacro
%ifndef ARCH_X86_64
%if ARCH_X86_64 == 0
INIT_MMX
DEBLOCK_CHROMA mmxext
%endif

@ -198,14 +198,14 @@ cglobal h264_idct8_add_8_mmx, 3, 4, 0
; %1=uint8_t *dst, %2=int16_t *block, %3=int stride
%macro IDCT8_ADD_SSE 4
IDCT8_1D_FULL %2
%ifdef ARCH_X86_64
%if ARCH_X86_64
TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, 8
%else
TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, [%2], [%2+16]
%endif
paddw m0, [pw_32]
%ifndef ARCH_X86_64
%if ARCH_X86_64 == 0
mova [%2 ], m0
mova [%2+16], m4
IDCT8_1D [%2], [%2+ 16]
@ -225,7 +225,7 @@ cglobal h264_idct8_add_8_mmx, 3, 4, 0
STORE_DIFF m1, m6, m7, [%1+%3 ]
STORE_DIFF m2, m6, m7, [%1+%3*2]
STORE_DIFF m3, m6, m7, [%1+%4 ]
%ifndef ARCH_X86_64
%if ARCH_X86_64 == 0
mova m0, [%2 ]
mova m1, [%2+16]
%else
@ -371,7 +371,7 @@ cglobal h264_idct_add16_8_mmx2, 5, 7, 0
test r6, r6
jz .no_dc
DC_ADD_MMX2_INIT r2, r3, r6
%ifdef ARCH_X86_64
%if ARCH_X86_64
%define dst_reg r10
%define dst_regd r10d
%else
@ -381,7 +381,7 @@ cglobal h264_idct_add16_8_mmx2, 5, 7, 0
mov dst_regd, dword [r1+r5*4]
lea dst_reg, [r0+dst_reg]
DC_ADD_MMX2_OP movh, dst_reg, r3, r6
%ifndef ARCH_X86_64
%if ARCH_X86_64 == 0
mov r1, r1m
%endif
inc r5
@ -448,7 +448,7 @@ cglobal h264_idct_add16intra_8_mmx2, 5, 7, 0
test r6, r6
jz .skipblock
DC_ADD_MMX2_INIT r2, r3, r6
%ifdef ARCH_X86_64
%if ARCH_X86_64
%define dst_reg r10
%define dst_regd r10d
%else
@ -458,7 +458,7 @@ cglobal h264_idct_add16intra_8_mmx2, 5, 7, 0
mov dst_regd, dword [r1+r5*4]
add dst_reg, r0
DC_ADD_MMX2_OP movh, dst_reg, r3, r6
%ifndef ARCH_X86_64
%if ARCH_X86_64 == 0
mov r1, r1m
%endif
.skipblock
@ -489,7 +489,7 @@ cglobal h264_idct8_add4_8_mmx2, 5, 7, 0
test r6, r6
jz .no_dc
DC_ADD_MMX2_INIT r2, r3, r6
%ifdef ARCH_X86_64
%if ARCH_X86_64
%define dst_reg r10
%define dst_regd r10d
%else
@ -501,7 +501,7 @@ cglobal h264_idct8_add4_8_mmx2, 5, 7, 0
DC_ADD_MMX2_OP mova, dst_reg, r3, r6
lea dst_reg, [dst_reg+r3*4]
DC_ADD_MMX2_OP mova, dst_reg, r3, r6
%ifndef ARCH_X86_64
%if ARCH_X86_64 == 0
mov r1, r1m
%endif
add r5, 4
@ -550,7 +550,7 @@ cglobal h264_idct8_add4_8_sse2, 5, 7, 10
jz .no_dc
INIT_MMX
DC_ADD_MMX2_INIT r2, r3, r6
%ifdef ARCH_X86_64
%if ARCH_X86_64
%define dst_reg r10
%define dst_regd r10d
%else
@ -562,7 +562,7 @@ INIT_MMX
DC_ADD_MMX2_OP mova, dst_reg, r3, r6
lea dst_reg, [dst_reg+r3*4]
DC_ADD_MMX2_OP mova, dst_reg, r3, r6
%ifndef ARCH_X86_64
%if ARCH_X86_64 == 0
mov r1, r1m
%endif
add r5, 4
@ -575,7 +575,7 @@ INIT_XMM
mov dst_regd, dword [r1+r5*4]
add dst_reg, r0
IDCT8_ADD_SSE dst_reg, r2, r3, r6
%ifndef ARCH_X86_64
%if ARCH_X86_64 == 0
mov r1, r1m
%endif
.skipblock
@ -593,7 +593,7 @@ h264_idct_add8_mmx_plane:
or r6w, word [r2]
test r6, r6
jz .skipblock
%ifdef ARCH_X86_64
%if ARCH_X86_64
mov r0d, dword [r1+r5*4]
add r0, [r10]
%else
@ -617,13 +617,13 @@ cglobal h264_idct_add8_8_mmx, 5, 7, 0
%ifdef PIC
lea r11, [scan8_mem]
%endif
%ifdef ARCH_X86_64
%if ARCH_X86_64
mov r10, r0
%endif
call h264_idct_add8_mmx_plane
mov r5, 32
add r2, 384
%ifdef ARCH_X86_64
%if ARCH_X86_64
add r10, gprsize
%else
add r0mp, gprsize
@ -637,7 +637,7 @@ h264_idct_add8_mmx2_plane
movzx r6, byte [r4+r6]
test r6, r6
jz .try_dc
%ifdef ARCH_X86_64
%if ARCH_X86_64
mov r0d, dword [r1+r5*4]
add r0, [r10]
%else
@ -656,7 +656,7 @@ h264_idct_add8_mmx2_plane
test r6, r6
jz .skipblock
DC_ADD_MMX2_INIT r2, r3, r6
%ifdef ARCH_X86_64
%if ARCH_X86_64
mov r0d, dword [r1+r5*4]
add r0, [r10]
%else
@ -677,7 +677,7 @@ h264_idct_add8_mmx2_plane
cglobal h264_idct_add8_8_mmx2, 5, 7, 0
mov r5, 16
add r2, 512
%ifdef ARCH_X86_64
%if ARCH_X86_64
mov r10, r0
%endif
%ifdef PIC
@ -686,7 +686,7 @@ cglobal h264_idct_add8_8_mmx2, 5, 7, 0
call h264_idct_add8_mmx2_plane
mov r5, 32
add r2, 384
%ifdef ARCH_X86_64
%if ARCH_X86_64
add r10, gprsize
%else
add r0mp, gprsize
@ -738,7 +738,7 @@ x264_add8x4_idct_sse2:
test r0, r0
jz .cycle%1end
mov r0d, dword [r1+%1*8]
%ifdef ARCH_X86_64
%if ARCH_X86_64
add r0, r10
%else
add r0, r0m
@ -753,7 +753,7 @@ x264_add8x4_idct_sse2:
; ff_h264_idct_add16_sse2(uint8_t *dst, const int *block_offset,
; DCTELEM *block, int stride, const uint8_t nnzc[6*8])
cglobal h264_idct_add16_8_sse2, 5, 5, 8
%ifdef ARCH_X86_64
%if ARCH_X86_64
mov r10, r0
%endif
; unrolling of the loop leads to an average performance gain of
@ -773,7 +773,7 @@ cglobal h264_idct_add16_8_sse2, 5, 5, 8
test r0, r0
jz .try%1dc
mov r0d, dword [r1+%1*8]
%ifdef ARCH_X86_64
%if ARCH_X86_64
add r0, r10
%else
add r0, r0m
@ -785,7 +785,7 @@ cglobal h264_idct_add16_8_sse2, 5, 5, 8
or r0w, word [r2+32]
jz .cycle%1end
mov r0d, dword [r1+%1*8]
%ifdef ARCH_X86_64
%if ARCH_X86_64
add r0, r10
%else
add r0, r0m
@ -800,7 +800,7 @@ cglobal h264_idct_add16_8_sse2, 5, 5, 8
; ff_h264_idct_add16intra_sse2(uint8_t *dst, const int *block_offset,
; DCTELEM *block, int stride, const uint8_t nnzc[6*8])
cglobal h264_idct_add16intra_8_sse2, 5, 7, 8
%ifdef ARCH_X86_64
%if ARCH_X86_64
mov r10, r0
%endif
add16intra_sse2_cycle 0, 0xc
@ -817,7 +817,7 @@ cglobal h264_idct_add16intra_8_sse2, 5, 7, 8
movzx r0, word [r4+%2]
test r0, r0
jz .try%1dc
%ifdef ARCH_X86_64
%if ARCH_X86_64
mov r0d, dword [r1+(%1&1)*8+64*(1+(%1>>1))]
add r0, [r10]
%else
@ -831,7 +831,7 @@ cglobal h264_idct_add16intra_8_sse2, 5, 7, 8
movsx r0, word [r2 ]
or r0w, word [r2+32]
jz .cycle%1end
%ifdef ARCH_X86_64
%if ARCH_X86_64
mov r0d, dword [r1+(%1&1)*8+64*(1+(%1>>1))]
add r0, [r10]
%else
@ -852,12 +852,12 @@ cglobal h264_idct_add16intra_8_sse2, 5, 7, 8
; DCTELEM *block, int stride, const uint8_t nnzc[6*8])
cglobal h264_idct_add8_8_sse2, 5, 7, 8
add r2, 512
%ifdef ARCH_X86_64
%if ARCH_X86_64
mov r10, r0
%endif
add8_sse2_cycle 0, 0x34
add8_sse2_cycle 1, 0x3c
%ifdef ARCH_X86_64
%if ARCH_X86_64
add r10, gprsize
%else
add r0mp, gprsize
@ -977,11 +977,11 @@ cglobal h264_luma_dc_dequant_idct_%1, 3,4,%2
WALSH4_1D 0,1,2,3,4
; shift, tmp, output, qmul
%ifdef WIN64
%if WIN64
DECLARE_REG_TMP 0,3,1,2
; we can't avoid this, because r0 is the shift register (ecx) on win64
xchg r0, t2
%elifdef ARCH_X86_64
%elif ARCH_X86_64
DECLARE_REG_TMP 3,1,0,2
%else
DECLARE_REG_TMP 1,3,0,2

@ -98,7 +98,7 @@ cglobal h264_idct_add_10_%1, 3,3
INIT_XMM
IDCT_ADD_10 sse2
%ifdef HAVE_AVX
%if HAVE_AVX
INIT_AVX
IDCT_ADD_10 avx
%endif
@ -128,7 +128,7 @@ add4x4_idct_%1:
INIT_XMM
ALIGN 16
ADD4x4IDCT sse2
%ifdef HAVE_AVX
%if HAVE_AVX
INIT_AVX
ALIGN 16
ADD4x4IDCT avx
@ -168,7 +168,7 @@ cglobal h264_idct_add16_10_%1, 5,6
INIT_XMM
IDCT_ADD16_10 sse2
%ifdef HAVE_AVX
%if HAVE_AVX
INIT_AVX
IDCT_ADD16_10 avx
%endif
@ -234,7 +234,7 @@ cglobal h264_idct8_dc_add_10_%1,3,3,7
INIT_XMM
IDCT8_DC_ADD sse2
%ifdef HAVE_AVX
%if HAVE_AVX
INIT_AVX
IDCT8_DC_ADD avx
%endif
@ -305,7 +305,7 @@ cglobal h264_idct_add16intra_10_%1,5,7,8
INIT_XMM
IDCT_ADD16INTRA_10 sse2
%ifdef HAVE_AVX
%if HAVE_AVX
INIT_AVX
IDCT_ADD16INTRA_10 avx
%endif
@ -316,7 +316,7 @@ IDCT_ADD16INTRA_10 avx
;-----------------------------------------------------------------------------
%macro IDCT_ADD8 1
cglobal h264_idct_add8_10_%1,5,7
%ifdef ARCH_X86_64
%if ARCH_X86_64
mov r10, r0
%endif
add r2, 1024
@ -324,7 +324,7 @@ cglobal h264_idct_add8_10_%1,5,7
ADD16_OP_INTRA %1, 16, 4+ 6*8
ADD16_OP_INTRA %1, 18, 4+ 7*8
add r2, 1024-128*2
%ifdef ARCH_X86_64
%if ARCH_X86_64
mov r0, [r10+gprsize]
%else
mov r0, r0m
@ -342,7 +342,7 @@ cglobal h264_idct_add8_10_%1,5,7
INIT_XMM
IDCT_ADD8 sse2
%ifdef HAVE_AVX
%if HAVE_AVX
INIT_AVX
IDCT_ADD8 avx
%endif
@ -411,7 +411,7 @@ IDCT_ADD8 avx
; %1=int16_t *block, %2=int16_t *dstblock
%macro IDCT8_ADD_SSE_START 2
IDCT8_1D_FULL %1
%ifdef ARCH_X86_64
%if ARCH_X86_64
TRANSPOSE4x4D 0,1,2,3,8
mova [%2 ], m0
TRANSPOSE4x4D 4,5,6,7,8
@ -452,7 +452,7 @@ IDCT_ADD8 avx
%macro IDCT8_ADD 1
cglobal h264_idct8_add_10_%1, 3,4,16
%ifndef UNIX64
%if UNIX64 == 0
%assign pad 16-gprsize-(stack_offset&15)
sub rsp, pad
call h264_idct8_add1_10_%1
@ -467,7 +467,7 @@ h264_idct8_add1_10_%1:
sub rsp, pad
add dword [r1], 32
%ifdef ARCH_X86_64
%if ARCH_X86_64
IDCT8_ADD_SSE_START r1, rsp
SWAP 1, 9
SWAP 2, 10
@ -519,7 +519,7 @@ h264_idct8_add1_10_%1:
INIT_XMM
IDCT8_ADD sse2
%ifdef HAVE_AVX
%if HAVE_AVX
INIT_AVX
IDCT8_ADD avx
%endif
@ -559,7 +559,7 @@ cglobal h264_idct8_add4_10_%1, 0,7,16
INIT_XMM
IDCT8_ADD4 sse2
%ifdef HAVE_AVX
%if HAVE_AVX
INIT_AVX
IDCT8_ADD4 avx
%endif

@ -348,7 +348,7 @@ cglobal pred16x16_plane_%3_%1, 2, 7, %2
lea r3, [r0+r2*4-1]
add r4, r2
%ifdef ARCH_X86_64
%if ARCH_X86_64
%define e_reg r11
%else
%define e_reg r0
@ -369,7 +369,7 @@ cglobal pred16x16_plane_%3_%1, 2, 7, %2
lea r5, [r5+r6*4]
movzx e_reg, byte [r3 ]
%ifdef ARCH_X86_64
%if ARCH_X86_64
movzx r10, byte [r4+r2 ]
sub r10, e_reg
%else
@ -385,7 +385,7 @@ cglobal pred16x16_plane_%3_%1, 2, 7, %2
movzx r4, byte [e_reg+r2 ]
movzx r6, byte [r3 ]
sub r6, r4
%ifdef ARCH_X86_64
%if ARCH_X86_64
lea r6, [r10+r6*2]
lea r5, [r5+r6*2]
add r5, r6
@ -395,7 +395,7 @@ cglobal pred16x16_plane_%3_%1, 2, 7, %2
%endif
movzx r4, byte [e_reg ]
%ifdef ARCH_X86_64
%if ARCH_X86_64
movzx r10, byte [r3 +r2 ]
sub r10, r4
sub r5, r10
@ -409,7 +409,7 @@ cglobal pred16x16_plane_%3_%1, 2, 7, %2
movzx r4, byte [e_reg+r1 ]
movzx r6, byte [r3 +r2*2]
sub r6, r4
%ifdef ARCH_X86_64
%if ARCH_X86_64
add r6, r10
%endif
lea r5, [r5+r6*8]
@ -420,7 +420,7 @@ cglobal pred16x16_plane_%3_%1, 2, 7, %2
lea r5, [r5+r6*4]
add r5, r6 ; sum of V coefficients
%ifndef ARCH_X86_64
%if ARCH_X86_64 == 0
mov r0, r0m
%endif
@ -641,7 +641,7 @@ cglobal pred8x8_plane_%1, 2, 7, %2
lea r3, [r0 -1]
add r4, r2
%ifdef ARCH_X86_64
%if ARCH_X86_64
%define e_reg r11
%else
%define e_reg r0
@ -652,7 +652,7 @@ cglobal pred8x8_plane_%1, 2, 7, %2
sub r5, e_reg
movzx e_reg, byte [r3 ]
%ifdef ARCH_X86_64
%if ARCH_X86_64
movzx r10, byte [r4+r2 ]
sub r10, e_reg
sub r5, r10
@ -666,7 +666,7 @@ cglobal pred8x8_plane_%1, 2, 7, %2
movzx e_reg, byte [r3+r1 ]
movzx r6, byte [r4+r2*2 ]
sub r6, e_reg
%ifdef ARCH_X86_64
%if ARCH_X86_64
add r6, r10
%endif
lea r5, [r5+r6*4]
@ -680,7 +680,7 @@ cglobal pred8x8_plane_%1, 2, 7, %2
lea r5, [r5+r6*8]
sar r5, 5
%ifndef ARCH_X86_64
%if ARCH_X86_64 == 0
mov r0, r0m
%endif

@ -84,7 +84,7 @@ INIT_XMM
PRED4x4_DR sse2
%define PALIGNR PALIGNR_SSSE3
PRED4x4_DR ssse3
%ifdef HAVE_AVX
%if HAVE_AVX
INIT_AVX
PRED4x4_DR avx
%endif
@ -124,7 +124,7 @@ INIT_XMM
PRED4x4_VR sse2
%define PALIGNR PALIGNR_SSSE3
PRED4x4_VR ssse3
%ifdef HAVE_AVX
%if HAVE_AVX
INIT_AVX
PRED4x4_VR avx
%endif
@ -167,7 +167,7 @@ INIT_XMM
PRED4x4_HD sse2
%define PALIGNR PALIGNR_SSSE3
PRED4x4_HD ssse3
%ifdef HAVE_AVX
%if HAVE_AVX
INIT_AVX
PRED4x4_HD avx
%endif
@ -238,7 +238,7 @@ cglobal pred4x4_down_left_10_%1, 3,3
INIT_XMM
PRED4x4_DL sse2
%ifdef HAVE_AVX
%if HAVE_AVX
INIT_AVX
PRED4x4_DL avx
%endif
@ -267,7 +267,7 @@ cglobal pred4x4_vertical_left_10_%1, 3,3
INIT_XMM
PRED4x4_VL sse2
%ifdef HAVE_AVX
%if HAVE_AVX
INIT_AVX
PRED4x4_VL avx
%endif
@ -577,7 +577,7 @@ cglobal pred8x8l_top_dc_10_%1, 4,4,6
INIT_XMM
PRED8x8L_TOP_DC sse2
%ifdef HAVE_AVX
%if HAVE_AVX
INIT_AVX
PRED8x8L_TOP_DC avx
%endif
@ -636,7 +636,7 @@ cglobal pred8x8l_dc_10_%1, 4,6,6
INIT_XMM
PRED8x8L_DC sse2
%ifdef HAVE_AVX
%if HAVE_AVX
INIT_AVX
PRED8x8L_DC avx
%endif
@ -671,7 +671,7 @@ cglobal pred8x8l_vertical_10_%1, 4,4,6
INIT_XMM
PRED8x8L_VERTICAL sse2
%ifdef HAVE_AVX
%if HAVE_AVX
INIT_AVX
PRED8x8L_VERTICAL avx
%endif
@ -728,7 +728,7 @@ INIT_XMM
PRED8x8L_HORIZONTAL sse2
%define PALIGNR PALIGNR_SSSE3
PRED8x8L_HORIZONTAL ssse3
%ifdef HAVE_AVX
%if HAVE_AVX
INIT_AVX
PRED8x8L_HORIZONTAL avx
%endif
@ -797,7 +797,7 @@ INIT_XMM
PRED8x8L_DOWN_LEFT sse2
%define PALIGNR PALIGNR_SSSE3
PRED8x8L_DOWN_LEFT ssse3
%ifdef HAVE_AVX
%if HAVE_AVX
INIT_AVX
PRED8x8L_DOWN_LEFT avx
%endif
@ -872,7 +872,7 @@ INIT_XMM
PRED8x8L_DOWN_RIGHT sse2
%define PALIGNR PALIGNR_SSSE3
PRED8x8L_DOWN_RIGHT ssse3
%ifdef HAVE_AVX
%if HAVE_AVX
INIT_AVX
PRED8x8L_DOWN_RIGHT avx
%endif
@ -943,7 +943,7 @@ INIT_XMM
PRED8x8L_VERTICAL_RIGHT sse2
%define PALIGNR PALIGNR_SSSE3
PRED8x8L_VERTICAL_RIGHT ssse3
%ifdef HAVE_AVX
%if HAVE_AVX
INIT_AVX
PRED8x8L_VERTICAL_RIGHT avx
%endif
@ -1005,7 +1005,7 @@ INIT_XMM
PRED8x8L_HORIZONTAL_UP sse2
%define PALIGNR PALIGNR_SSSE3
PRED8x8L_HORIZONTAL_UP ssse3
%ifdef HAVE_AVX
%if HAVE_AVX
INIT_AVX
PRED8x8L_HORIZONTAL_UP avx
%endif

@ -111,7 +111,7 @@ INIT_XMM
%endmacro
%macro MCAxA 8
%ifdef ARCH_X86_64
%if ARCH_X86_64
%ifnidn %1,mmxext
MCAxA_OP %1,%2,%3,%4,%5,%6,%7,%8
%endif
@ -122,7 +122,7 @@ MCAxA_OP %1,%2,%3,%4,%5,%6,%7,%8
%macro MCAxA_OP 8
cglobal %2_h264_qpel%5_%3_10_%1, %6,%7,%8
%ifdef ARCH_X86_32
%if ARCH_X86_32
call stub_%2_h264_qpel%4_%3_10_%1
mov r0, r0m
mov r1, r1m
@ -152,7 +152,7 @@ cglobal %2_h264_qpel%5_%3_10_%1, %6,%7,%8
call stub_%2_h264_qpel%4_%3_10_%1
lea r0, [r10+r2*%4+%4*2]
lea r1, [r11+r2*%4+%4*2]
%ifndef UNIX64 ; fall through to function
%if UNIX64 == 0 ; fall through to function
call stub_%2_h264_qpel%4_%3_10_%1
RET
%endif
@ -165,7 +165,7 @@ cglobal %2_h264_qpel%5_%3_10_%1, %6,%7,%8
MCAxA %1, %2, %3, %4, i, %5,%6,%7
cglobal %2_h264_qpel%4_%3_10_%1, %5,%6,%7
%ifndef UNIX64 ; no prologue or epilogue for UNIX64
%if UNIX64 == 0 ; no prologue or epilogue for UNIX64
call stub_%2_h264_qpel%4_%3_10_%1
RET
%endif

@ -126,7 +126,7 @@ INIT_XMM
WEIGHT_FUNC_HALF_MM 8, 8, sse2
%macro BIWEIGHT_SETUP 0
%ifdef ARCH_X86_64
%if ARCH_X86_64
%define off_regd r11d
%else
%define off_regd r3d
@ -244,7 +244,7 @@ INIT_XMM
BIWEIGHT_FUNC_HALF_MM 8, 8, sse2
%macro BIWEIGHT_SSSE3_SETUP 0
%ifdef ARCH_X86_64
%if ARCH_X86_64
%define off_regd r11d
%else
%define off_regd r3d

@ -152,7 +152,7 @@ WEIGHT_FUNC_HALF_MM sse4
; void h264_biweight(uint8_t *dst, uint8_t *src, int stride, int height,
; int log2_denom, int weightd, int weights, int offset);
;-----------------------------------------------------------------------------
%ifdef ARCH_X86_32
%if ARCH_X86_32
DECLARE_REG_TMP 3
%else
DECLARE_REG_TMP 10

@ -219,13 +219,13 @@ cglobal imdct36_float, 4,4,9, out, buf, in, win
subps m5, m0, m3
%ifdef ARCH_X86_64
%if ARCH_X86_64
SWAP m5, m8
%endif
mulps m7, m2, [ps_val1]
%ifdef ARCH_X86_64
%if ARCH_X86_64
mulps m5, m8, [ps_val2]
%else
mulps m5, m5, [ps_val2]
@ -235,7 +235,7 @@ cglobal imdct36_float, 4,4,9, out, buf, in, win
mulps m5, m6, [ps_val1]
subps m7, m7, m5
%ifdef ARCH_X86_64
%if ARCH_X86_64
SWAP m5, m8
%else
subps m5, m0, m3
@ -378,7 +378,7 @@ DEFINE_IMDCT
INIT_XMM sse
%ifdef ARCH_X86_64
%if ARCH_X86_64
%define SPILL SWAP
%define UNSPILL SWAP
%define SPILLED(x) m %+ x

@ -33,7 +33,7 @@
%define W6sh2 8867 ; W6 = 35468 = 8867<<2
%define W7sh2 4520 ; W7 = 18081 = 4520<<2 + 1
%ifdef ARCH_X86_64
%if ARCH_X86_64
SECTION_RODATA

@ -106,7 +106,7 @@ SECTION .text
INIT_MMX
cglobal vp3_v_loop_filter_mmx2, 3, 4
%ifdef ARCH_X86_64
%if ARCH_X86_64
movsxd r1, r1d
%endif
mov r3, r1
@ -123,7 +123,7 @@ cglobal vp3_v_loop_filter_mmx2, 3, 4
RET
cglobal vp3_h_loop_filter_mmx2, 3, 4
%ifdef ARCH_X86_64
%if ARCH_X86_64
movsxd r1, r1d
%endif
lea r3, [r1*3]
@ -510,7 +510,7 @@ cglobal vp3_h_loop_filter_mmx2, 3, 4
%define SHIFT(x)
%define ADD(x)
VP3_1D_IDCT_SSE2
%ifdef ARCH_X86_64
%if ARCH_X86_64
TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, 8
%else
TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, [%1], [%1+16]
@ -530,7 +530,7 @@ cglobal vp3_idct_%1, 1, 1, %2
cglobal vp3_idct_put_%1, 3, %3, %2
VP3_IDCT_%1 r2
%ifdef ARCH_X86_64
%if ARCH_X86_64
mov r3, r2
mov r2, r1
mov r1, r0
@ -540,7 +540,7 @@ cglobal vp3_idct_put_%1, 3, %3, %2
mov r1m, r0
mov r2m, r1
%endif
%ifdef WIN64
%if WIN64
call put_signed_pixels_clamped_mmx
RET
%else
@ -549,7 +549,7 @@ cglobal vp3_idct_put_%1, 3, %3, %2
cglobal vp3_idct_add_%1, 3, %3, %2
VP3_IDCT_%1 r2
%ifdef ARCH_X86_64
%if ARCH_X86_64
mov r3, r2
mov r2, r1
mov r1, r0
@ -559,7 +559,7 @@ cglobal vp3_idct_add_%1, 3, %3, %2
mov r1m, r0
mov r2m, r1
%endif
%ifdef WIN64
%if WIN64
call add_pixels_clamped_mmx
RET
%else
@ -567,7 +567,7 @@ cglobal vp3_idct_add_%1, 3, %3, %2
%endif
%endmacro
%ifdef ARCH_X86_64
%if ARCH_X86_64
%define REGS 4
%else
%define REGS 3
@ -599,7 +599,7 @@ vp3_idct_funcs sse2, 9, REGS
INIT_MMX
cglobal vp3_idct_dc_add_mmx2, 3, 4
%ifdef ARCH_X86_64
%if ARCH_X86_64
movsxd r1, r1d
%endif
lea r3, [r1*3]

@ -127,7 +127,7 @@ cglobal vp6_filter_diag4_%1, 5, 7, %2
sub rsp, 8*15
movq m6, [pw_64]
%endif
%ifdef ARCH_X86_64
%if ARCH_X86_64
movsxd r2, r2d
%endif

@ -53,8 +53,7 @@
#include "avdevice.h"
#include "alsa-audio.h"
static av_cold int audio_read_header(AVFormatContext *s1,
AVFormatParameters *ap)
static av_cold int audio_read_header(AVFormatContext *s1)
{
AlsaData *s = s1->priv_data;
AVStream *st;

@ -243,7 +243,7 @@ static int grab_read_packet(AVFormatContext *s1, AVPacket *pkt)
return video_buf_size;
}
static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
static int grab_read_header(AVFormatContext *s1)
{
VideoData *s = s1->priv_data;
AVStream *st;

@ -649,7 +649,7 @@ static enum CodecID waveform_codec_id(enum AVSampleFormat sample_fmt)
}
}
static enum SampleFormat sample_fmt_bits_per_sample(int bits)
static enum AVSampleFormat sample_fmt_bits_per_sample(int bits)
{
switch (bits) {
case 8: return AV_SAMPLE_FMT_U8;
@ -660,7 +660,7 @@ static enum SampleFormat sample_fmt_bits_per_sample(int bits)
}
static int
dshow_add_device(AVFormatContext *avctx, AVFormatParameters *ap,
dshow_add_device(AVFormatContext *avctx,
enum dshowDeviceType devtype)
{
struct dshow_ctx *ctx = avctx->priv_data;
@ -784,7 +784,7 @@ static int parse_device_name(AVFormatContext *avctx)
return ret;
}
static int dshow_read_header(AVFormatContext *avctx, AVFormatParameters *ap)
static int dshow_read_header(AVFormatContext *avctx)
{
struct dshow_ctx *ctx = avctx->priv_data;
IGraphBuilder *graph = NULL;

@ -81,7 +81,7 @@ static int dv1394_start(struct dv1394_data *dv)
return 0;
}
static int dv1394_read_header(AVFormatContext * context, AVFormatParameters * ap)
static int dv1394_read_header(AVFormatContext * context)
{
struct dv1394_data *dv = context->priv_data;

@ -95,8 +95,7 @@ typedef struct {
uint8_t *data; ///< framebuffer data
} FBDevContext;
av_cold static int fbdev_read_header(AVFormatContext *avctx,
AVFormatParameters *ap)
av_cold static int fbdev_read_header(AVFormatContext *avctx)
{
FBDevContext *fbdev = avctx->priv_data;
AVStream *st = NULL;

@ -222,7 +222,7 @@ static void stop_jack(JackData *self)
ff_timefilter_destroy(self->timefilter);
}
static int audio_read_header(AVFormatContext *context, AVFormatParameters *params)
static int audio_read_header(AVFormatContext *context)
{
JackData *self = context->priv_data;
AVStream *stream;

@ -32,6 +32,7 @@
#include "libavutil/opt.h"
#include "libavutil/parseutils.h"
#include "libavutil/pixdesc.h"
#include "libavutil/audioconvert.h"
#include "libavfilter/avfilter.h"
#include "libavfilter/avfiltergraph.h"
#include "libavfilter/buffersink.h"
@ -78,8 +79,7 @@ av_cold static int lavfi_read_close(AVFormatContext *avctx)
return 0;
}
av_cold static int lavfi_read_header(AVFormatContext *avctx,
AVFormatParameters *ap)
av_cold static int lavfi_read_header(AVFormatContext *avctx)
{
LavfiContext *lavfi = avctx->priv_data;
AVFilterInOut *input_links = NULL, *output_links = NULL, *inout;

@ -47,7 +47,7 @@ typedef struct CDIOContext {
int paranoia_mode;
} CDIOContext;
static av_cold int read_header(AVFormatContext *ctx, AVFormatParameters *ap)
static av_cold int read_header(AVFormatContext *ctx)
{
CDIOContext *s = ctx->priv_data;
AVStream *st;

@ -2,7 +2,6 @@
* IIDC1394 grab interface (uses libdc1394 and libraw1394)
* Copyright (c) 2004 Roman Shaposhnik
* Copyright (c) 2008 Alessandro Sappia
* Copyright (c) 2011 Martin Lambers
*
* This file is part of FFmpeg.
*
@ -22,26 +21,46 @@
*/
#include "config.h"
#include "libavformat/avformat.h"
#include "libavformat/internal.h"
#include "libavutil/log.h"
#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
#include "avdevice.h"
#include <stdlib.h>
#include <string.h>
#include "libavutil/parseutils.h"
#include "libavutil/pixdesc.h"
#if HAVE_LIBDC1394_2
#include <dc1394/dc1394.h>
#elif HAVE_LIBDC1394_1
#include <libraw1394/raw1394.h>
#include <libdc1394/dc1394_control.h>
#define DC1394_VIDEO_MODE_320x240_YUV422 MODE_320x240_YUV422
#define DC1394_VIDEO_MODE_640x480_YUV411 MODE_640x480_YUV411
#define DC1394_VIDEO_MODE_640x480_YUV422 MODE_640x480_YUV422
#define DC1394_FRAMERATE_1_875 FRAMERATE_1_875
#define DC1394_FRAMERATE_3_75 FRAMERATE_3_75
#define DC1394_FRAMERATE_7_5 FRAMERATE_7_5
#define DC1394_FRAMERATE_15 FRAMERATE_15
#define DC1394_FRAMERATE_30 FRAMERATE_30
#define DC1394_FRAMERATE_60 FRAMERATE_60
#define DC1394_FRAMERATE_120 FRAMERATE_120
#define DC1394_FRAMERATE_240 FRAMERATE_240
#endif
#undef free
typedef struct dc1394_data {
AVClass *class;
#if HAVE_LIBDC1394_1
raw1394handle_t handle;
dc1394_cameracapture camera;
int channel;
#elif HAVE_LIBDC1394_2
dc1394_t *d;
dc1394camera_t *camera;
dc1394video_frame_t *frame;
#endif
int current_frame;
int frame_rate; /**< frames per 1000 seconds (fps * 1000) */
char *video_size; /**< String describing video size, set by a private option. */
@ -51,21 +70,16 @@ typedef struct dc1394_data {
AVPacket packet;
} dc1394_data;
/* The list of color codings that we support.
* We assume big endian for the dc1394 16bit modes: libdc1394 never sets the
* flag little_endian in dc1394video_frame_t. */
struct dc1394_color_coding {
int pix_fmt;
int score;
uint32_t coding;
} dc1394_color_codings[] = {
{ PIX_FMT_GRAY16BE, 1000, DC1394_COLOR_CODING_MONO16 },
{ PIX_FMT_RGB48BE, 1100, DC1394_COLOR_CODING_RGB16 },
{ PIX_FMT_GRAY8, 1200, DC1394_COLOR_CODING_MONO8 },
{ PIX_FMT_RGB24, 1300, DC1394_COLOR_CODING_RGB8 },
{ PIX_FMT_UYYVYY411, 1400, DC1394_COLOR_CODING_YUV411 },
{ PIX_FMT_UYVY422, 1500, DC1394_COLOR_CODING_YUV422 },
{ PIX_FMT_NONE, 0, 0 } /* gotta be the last one */
struct dc1394_frame_format {
int width;
int height;
enum PixelFormat pix_fmt;
int frame_size_id;
} dc1394_frame_formats[] = {
{ 320, 240, PIX_FMT_UYVY422, DC1394_VIDEO_MODE_320x240_YUV422 },
{ 640, 480, PIX_FMT_UYYVYY411, DC1394_VIDEO_MODE_640x480_YUV411 },
{ 640, 480, PIX_FMT_UYVY422, DC1394_VIDEO_MODE_640x480_YUV422 },
{ 0, 0, 0, 0 } /* gotta be the last one */
};
struct dc1394_frame_rate {
@ -86,6 +100,9 @@ struct dc1394_frame_rate {
#define OFFSET(x) offsetof(dc1394_data, x)
#define DEC AV_OPT_FLAG_DECODING_PARAM
static const AVOption options[] = {
#if HAVE_LIBDC1394_1
{ "channel", "", offsetof(dc1394_data, channel), AV_OPT_TYPE_INT, {.dbl = 0}, 0, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
#endif
{ "video_size", "A string describing frame size, such as 640x480 or hd720.", OFFSET(video_size), AV_OPT_TYPE_STRING, {.str = "qvga"}, 0, 0, DEC },
{ "pixel_format", "", OFFSET(pixel_format), AV_OPT_TYPE_STRING, {.str = "uyvy422"}, 0, 0, DEC },
{ "framerate", "", OFFSET(framerate), AV_OPT_TYPE_STRING, {.str = "ntsc"}, 0, 0, DEC },
@ -99,183 +116,202 @@ static const AVClass libdc1394_class = {
.version = LIBAVUTIL_VERSION_INT,
};
static int dc1394_read_header(AVFormatContext *c, AVFormatParameters * ap)
static inline int dc1394_read_common(AVFormatContext *c,
struct dc1394_frame_format **select_fmt, struct dc1394_frame_rate **select_fps)
{
dc1394_data* dc1394 = c->priv_data;
AVStream *vst;
const struct dc1394_color_coding *cc;
const struct dc1394_frame_rate *fr;
dc1394camera_list_t *list;
dc1394video_modes_t video_modes;
dc1394video_mode_t video_mode;
dc1394framerates_t frame_rates;
dc1394framerate_t frame_rate;
uint32_t dc1394_width, dc1394_height, dc1394_color_coding;
int rate, best_rate;
int score, max_score;
int final_width, final_height, final_pix_fmt, final_frame_rate;
int res, i, j;
int ret=-1;
/* Now let us prep the hardware. */
dc1394->d = dc1394_new();
dc1394_camera_enumerate (dc1394->d, &list);
if ( !list || list->num == 0) {
av_log(c, AV_LOG_ERROR, "Unable to look for an IIDC camera\n\n");
AVStream* vst;
struct dc1394_frame_format *fmt;
struct dc1394_frame_rate *fps;
enum PixelFormat pix_fmt;
int width, height;
AVRational framerate;
int ret = 0;
if ((pix_fmt = av_get_pix_fmt(dc1394->pixel_format)) == PIX_FMT_NONE) {
av_log(c, AV_LOG_ERROR, "No such pixel format: %s.\n", dc1394->pixel_format);
ret = AVERROR(EINVAL);
goto out;
}
/* FIXME: To select a specific camera I need to search in list its guid */
dc1394->camera = dc1394_camera_new (dc1394->d, list->ids[0].guid);
if (list->num > 1) {
av_log(c, AV_LOG_INFO, "Working with the first camera found\n");
if ((ret = av_parse_video_size(&width, &height, dc1394->video_size)) < 0) {
av_log(c, AV_LOG_ERROR, "Could not parse video size '%s'.\n", dc1394->video_size);
goto out;
}
/* Freeing list of cameras */
dc1394_camera_free_list (list);
/* Get the list of video modes supported by the camera. */
res = dc1394_video_get_supported_modes (dc1394->camera, &video_modes);
if (res != DC1394_SUCCESS) {
av_log(c, AV_LOG_ERROR, "Could not get video formats.\n");
goto out_camera;
if ((ret = av_parse_video_rate(&framerate, dc1394->framerate)) < 0) {
av_log(c, AV_LOG_ERROR, "Could not parse framerate '%s'.\n", dc1394->framerate);
goto out;
}
dc1394->frame_rate = av_rescale(1000, framerate.num, framerate.den);
if (dc1394->pixel_format) {
if ((ap->pix_fmt = av_get_pix_fmt(dc1394->pixel_format)) == PIX_FMT_NONE) {
av_log(c, AV_LOG_ERROR, "No such pixel format: %s.\n", dc1394->pixel_format);
ret = AVERROR(EINVAL);
goto out;
}
}
for (fmt = dc1394_frame_formats; fmt->width; fmt++)
if (fmt->pix_fmt == pix_fmt && fmt->width == width && fmt->height == height)
break;
if (dc1394->video_size) {
if ((ret = av_parse_video_size(&ap->width, &ap->height, dc1394->video_size)) < 0) {
av_log(c, AV_LOG_ERROR, "Couldn't parse video size.\n");
goto out;
}
}
for (fps = dc1394_frame_rates; fps->frame_rate; fps++)
if (fps->frame_rate == dc1394->frame_rate)
break;
/* Choose the best mode. */
rate = (ap->time_base.num ? av_rescale(1000, ap->time_base.den, ap->time_base.num) : -1);
max_score = -1;
for (i = 0; i < video_modes.num; i++) {
if (video_modes.modes[i] == DC1394_VIDEO_MODE_EXIF
|| (video_modes.modes[i] >= DC1394_VIDEO_MODE_FORMAT7_MIN
&& video_modes.modes[i] <= DC1394_VIDEO_MODE_FORMAT7_MAX)) {
/* These modes are currently not supported as they would require
* much more work. For the remaining modes, the functions
* dc1394_get_image_size_from_video_mode and
* dc1394_get_color_coding_from_video_mode do not need to query the
* camera, and thus cannot fail. */
continue;
}
dc1394_get_color_coding_from_video_mode (NULL, video_modes.modes[i],
&dc1394_color_coding);
for (cc = dc1394_color_codings; cc->pix_fmt != PIX_FMT_NONE; cc++)
if (cc->coding == dc1394_color_coding)
break;
if (cc->pix_fmt == PIX_FMT_NONE) {
/* We currently cannot handle this color coding. */
continue;
}
/* Here we know that the mode is supported. Get its frame size and the list
* of frame rates supported by the camera for this mode. This list is sorted
* in ascending order according to libdc1394 example programs. */
dc1394_get_image_size_from_video_mode (NULL, video_modes.modes[i],
&dc1394_width, &dc1394_height);
res = dc1394_video_get_supported_framerates (dc1394->camera, video_modes.modes[i],
&frame_rates);
if (res != DC1394_SUCCESS || frame_rates.num == 0) {
av_log(c, AV_LOG_ERROR, "Cannot get frame rates for video mode.\n");
goto out_camera;
}
/* Choose the best frame rate. */
best_rate = -1;
for (j = 0; j < frame_rates.num; j++) {
for (fr = dc1394_frame_rates; fr->frame_rate; fr++) {
if (fr->frame_rate_id == frame_rates.framerates[j]) {
break;
}
}
if (!fr->frame_rate) {
/* This frame rate is not supported. */
continue;
}
best_rate = fr->frame_rate;
frame_rate = fr->frame_rate_id;
if (ap->time_base.num && rate == fr->frame_rate) {
/* This is the requested frame rate. */
break;
}
}
if (best_rate == -1) {
/* No supported rate found. */
continue;
}
/* Here we know that both the mode and the rate are supported. Compute score. */
if (ap->width && ap->height
&& (dc1394_width == ap->width && dc1394_height == ap->height)) {
score = 110000;
} else {
score = dc1394_width * 10; // 1600 - 16000
}
if (ap->pix_fmt == cc->pix_fmt) {
score += 90000;
} else {
score += cc->score; // 1000 - 1500
}
if (ap->time_base.num && rate == best_rate) {
score += 70000;
} else {
score += best_rate / 1000; // 1 - 240
}
if (score > max_score) {
video_mode = video_modes.modes[i];
final_width = dc1394_width;
final_height = dc1394_height;
final_pix_fmt = cc->pix_fmt;
final_frame_rate = best_rate;
max_score = score;
}
}
if (max_score == -1) {
av_log(c, AV_LOG_ERROR, "No suitable video mode / frame rate available.\n");
goto out_camera;
}
if (ap->width && ap->height && !(ap->width == final_width && ap->height == final_height)) {
av_log(c, AV_LOG_WARNING, "Requested frame size is not available, using fallback.\n");
}
if (ap->pix_fmt != PIX_FMT_NONE && ap->pix_fmt != final_pix_fmt) {
av_log(c, AV_LOG_WARNING, "Requested pixel format is not supported, using fallback.\n");
}
if (ap->time_base.num && rate != final_frame_rate) {
av_log(c, AV_LOG_WARNING, "Requested frame rate is not available, using fallback.\n");
if (!fps->frame_rate || !fmt->width) {
av_log(c, AV_LOG_ERROR, "Can't find matching camera format for %s, %dx%d@%d:1000fps\n", av_get_pix_fmt_name(pix_fmt),
width, height, dc1394->frame_rate);
ret = AVERROR(EINVAL);
goto out;
}
/* create a video stream */
vst = avformat_new_stream(c, NULL);
if (!vst)
goto out_camera;
if (!vst) {
ret = AVERROR(ENOMEM);
goto out;
}
avpriv_set_pts_info(vst, 64, 1, 1000);
vst->codec->codec_type = AVMEDIA_TYPE_VIDEO;
vst->codec->codec_id = CODEC_ID_RAWVIDEO;
vst->codec->time_base.den = final_frame_rate;
vst->codec->time_base.num = 1000;
vst->codec->width = final_width;
vst->codec->height = final_height;
vst->codec->pix_fmt = final_pix_fmt;
vst->codec->time_base.den = framerate.num;
vst->codec->time_base.num = framerate.den;
vst->codec->width = fmt->width;
vst->codec->height = fmt->height;
vst->codec->pix_fmt = fmt->pix_fmt;
/* packet init */
av_init_packet(&dc1394->packet);
dc1394->packet.size = avpicture_get_size(final_pix_fmt, final_width, final_height);
dc1394->packet.size = avpicture_get_size(fmt->pix_fmt, fmt->width, fmt->height);
dc1394->packet.stream_index = vst->index;
dc1394->packet.flags |= AV_PKT_FLAG_KEY;
dc1394->current_frame = 0;
dc1394->frame_rate = final_frame_rate;
vst->codec->bit_rate = av_rescale(dc1394->packet.size * 8, final_frame_rate, 1000);
vst->codec->bit_rate = av_rescale(dc1394->packet.size * 8, fps->frame_rate, 1000);
*select_fps = fps;
*select_fmt = fmt;
out:
return ret;
}
#if HAVE_LIBDC1394_1
static int dc1394_v1_read_header(AVFormatContext *c)
{
dc1394_data* dc1394 = c->priv_data;
AVStream* vst;
nodeid_t* camera_nodes;
int res;
struct dc1394_frame_format *fmt = NULL;
struct dc1394_frame_rate *fps = NULL;
if (dc1394_read_common(c, &fmt, &fps) != 0)
return -1;
/* Now let us prep the hardware. */
dc1394->handle = dc1394_create_handle(0); /* FIXME: gotta have ap->port */
if (!dc1394->handle) {
av_log(c, AV_LOG_ERROR, "Can't acquire dc1394 handle on port %d\n", 0 /* ap->port */);
goto out;
}
camera_nodes = dc1394_get_camera_nodes(dc1394->handle, &res, 1);
if (!camera_nodes || camera_nodes[dc1394->channel] == DC1394_NO_CAMERA) {
av_log(c, AV_LOG_ERROR, "There's no IIDC camera on the channel %d\n", dc1394->channel);
goto out_handle;
}
res = dc1394_dma_setup_capture(dc1394->handle, camera_nodes[dc1394->channel],
0,
FORMAT_VGA_NONCOMPRESSED,
fmt->frame_size_id,
SPEED_400,
fps->frame_rate_id, 8, 1,
c->filename,
&dc1394->camera);
dc1394_free_camera_nodes(camera_nodes);
if (res != DC1394_SUCCESS) {
av_log(c, AV_LOG_ERROR, "Can't prepare camera for the DMA capture\n");
goto out_handle;
}
res = dc1394_start_iso_transmission(dc1394->handle, dc1394->camera.node);
if (res != DC1394_SUCCESS) {
av_log(c, AV_LOG_ERROR, "Can't start isochronous transmission\n");
goto out_handle_dma;
}
return 0;
out_handle_dma:
dc1394_dma_unlisten(dc1394->handle, &dc1394->camera);
dc1394_dma_release_camera(dc1394->handle, &dc1394->camera);
out_handle:
dc1394_destroy_handle(dc1394->handle);
out:
return -1;
}
static int dc1394_v1_read_packet(AVFormatContext *c, AVPacket *pkt)
{
struct dc1394_data *dc1394 = c->priv_data;
int res;
/* discard stale frame */
if (dc1394->current_frame++) {
if (dc1394_dma_done_with_buffer(&dc1394->camera) != DC1394_SUCCESS)
av_log(c, AV_LOG_ERROR, "failed to release %d frame\n", dc1394->current_frame);
}
res = dc1394_dma_single_capture(&dc1394->camera);
if (res == DC1394_SUCCESS) {
dc1394->packet.data = (uint8_t *)(dc1394->camera.capture_buffer);
dc1394->packet.pts = (dc1394->current_frame * 1000000) / dc1394->frame_rate;
res = dc1394->packet.size;
} else {
av_log(c, AV_LOG_ERROR, "DMA capture failed\n");
dc1394->packet.data = NULL;
res = -1;
}
*pkt = dc1394->packet;
return res;
}
static int dc1394_v1_close(AVFormatContext * context)
{
struct dc1394_data *dc1394 = context->priv_data;
dc1394_stop_iso_transmission(dc1394->handle, dc1394->camera.node);
dc1394_dma_unlisten(dc1394->handle, &dc1394->camera);
dc1394_dma_release_camera(dc1394->handle, &dc1394->camera);
dc1394_destroy_handle(dc1394->handle);
return 0;
}
#elif HAVE_LIBDC1394_2
static int dc1394_v2_read_header(AVFormatContext *c)
{
dc1394_data* dc1394 = c->priv_data;
dc1394camera_list_t *list;
int res, i;
struct dc1394_frame_format *fmt = NULL;
struct dc1394_frame_rate *fps = NULL;
if (dc1394_read_common(c, &fmt, &fps) != 0)
return -1;
/* Now let us prep the hardware. */
dc1394->d = dc1394_new();
dc1394_camera_enumerate (dc1394->d, &list);
if ( !list || list->num == 0) {
av_log(c, AV_LOG_ERROR, "Unable to look for an IIDC camera\n\n");
goto out;
}
/* FIXME: To select a specific camera I need to search in list its guid */
dc1394->camera = dc1394_camera_new (dc1394->d, list->ids[0].guid);
if (list->num > 1) {
av_log(c, AV_LOG_INFO, "Working with the first camera found\n");
}
/* Freeing list of cameras */
dc1394_camera_free_list (list);
/* Select MAX Speed possible from the cam */
if (dc1394->camera->bmode_capable>0) {
@ -293,13 +329,13 @@ static int dc1394_read_header(AVFormatContext *c, AVFormatParameters * ap)
goto out_camera;
}
if (dc1394_video_set_mode(dc1394->camera, video_mode) != DC1394_SUCCESS) {
if (dc1394_video_set_mode(dc1394->camera, fmt->frame_size_id) != DC1394_SUCCESS) {
av_log(c, AV_LOG_ERROR, "Couldn't set video format\n");
goto out_camera;
}
if (dc1394_video_set_framerate(dc1394->camera, frame_rate) != DC1394_SUCCESS) {
av_log(c, AV_LOG_ERROR, "Could not set framerate %d.\n", final_frame_rate);
if (dc1394_video_set_framerate(dc1394->camera,fps->frame_rate_id) != DC1394_SUCCESS) {
av_log(c, AV_LOG_ERROR, "Couldn't set framerate %d \n",fps->frame_rate);
goto out_camera;
}
if (dc1394_capture_setup(dc1394->camera, 10, DC1394_CAPTURE_FLAGS_DEFAULT)!=DC1394_SUCCESS) {
@ -319,10 +355,10 @@ out_camera:
dc1394_camera_free (dc1394->camera);
out:
dc1394_free(dc1394->d);
return ret;
return -1;
}
static int dc1394_read_packet(AVFormatContext *c, AVPacket *pkt)
static int dc1394_v2_read_packet(AVFormatContext *c, AVPacket *pkt)
{
struct dc1394_data *dc1394 = c->priv_data;
int res;
@ -348,7 +384,7 @@ static int dc1394_read_packet(AVFormatContext *c, AVPacket *pkt)
return res;
}
static int dc1394_close(AVFormatContext * context)
static int dc1394_v2_close(AVFormatContext * context)
{
struct dc1394_data *dc1394 = context->priv_data;
@ -362,11 +398,25 @@ static int dc1394_close(AVFormatContext * context)
AVInputFormat ff_libdc1394_demuxer = {
.name = "libdc1394",
.long_name = NULL_IF_CONFIG_SMALL("dc1394 A/V grab"),
.long_name = NULL_IF_CONFIG_SMALL("dc1394 v.2 A/V grab"),
.priv_data_size = sizeof(struct dc1394_data),
.read_header = dc1394_v2_read_header,
.read_packet = dc1394_v2_read_packet,
.read_close = dc1394_v2_close,
.flags = AVFMT_NOFILE,
.priv_class = &libdc1394_class,
};
#endif
#if HAVE_LIBDC1394_1
AVInputFormat ff_libdc1394_demuxer = {
.name = "libdc1394",
.long_name = NULL_IF_CONFIG_SMALL("dc1394 v.1 A/V grab"),
.priv_data_size = sizeof(struct dc1394_data),
.read_header = dc1394_read_header,
.read_packet = dc1394_read_packet,
.read_close = dc1394_close,
.read_header = dc1394_v1_read_header,
.read_packet = dc1394_v1_read_packet,
.read_close = dc1394_v1_close,
.flags = AVFMT_NOFILE,
.priv_class = &libdc1394_class,
};
#endif

@ -117,7 +117,7 @@ static inline void print_al_capture_devices(void *log_ctx)
av_log(log_ctx, AV_LOG_INFO, " %s\n", devices);
}
static int read_header(AVFormatContext *ctx, AVFormatParameters *ap)
static int read_header(AVFormatContext *ctx)
{
al_data *ad = ctx->priv_data;
static const ALCenum sample_formats[2][2] = {

@ -204,7 +204,7 @@ static int audio_write_trailer(AVFormatContext *s1)
/* grab support */
static int audio_read_header(AVFormatContext *s1, AVFormatParameters *ap)
static int audio_read_header(AVFormatContext *s1)
{
AudioData *s = s1->priv_data;
AVStream *st;

@ -66,8 +66,7 @@ static pa_sample_format_t codec_id_to_pulse_format(int codec_id) {
}
}
static av_cold int pulse_read_header(AVFormatContext *s,
AVFormatParameters *ap)
static av_cold int pulse_read_header(AVFormatContext *s)
{
PulseData *pd = s->priv_data;
AVStream *st;

@ -28,8 +28,7 @@
#include "sndio_common.h"
static av_cold int audio_read_header(AVFormatContext *s1,
AVFormatParameters *ap)
static av_cold int audio_read_header(AVFormatContext *s1)
{
SndioData *s = s1->priv_data;
AVStream *st;

@ -555,7 +555,7 @@ static void mmap_close(struct video_data *s)
av_free(s->buf_len);
}
static int v4l2_set_parameters(AVFormatContext *s1, AVFormatParameters *ap)
static int v4l2_set_parameters(AVFormatContext *s1)
{
struct video_data *s = s1->priv_data;
struct v4l2_input input = { 0 };
@ -683,7 +683,7 @@ static uint32_t device_try_init(AVFormatContext *s1,
return desired_format;
}
static int v4l2_read_header(AVFormatContext *s1, AVFormatParameters *ap)
static int v4l2_read_header(AVFormatContext *s1)
{
struct video_data *s = s1->priv_data;
AVStream *st;
@ -770,7 +770,7 @@ static int v4l2_read_header(AVFormatContext *s1, AVFormatParameters *ap)
s->frame_format = desired_format;
if ((res = v4l2_set_parameters(s1, ap)) < 0)
if ((res = v4l2_set_parameters(s1)) < 0)
goto out;
st->codec->pix_fmt = fmt_v4l2ff(desired_format, codec_id);

@ -236,7 +236,7 @@ static int vfw_read_close(AVFormatContext *s)
return 0;
}
static int vfw_read_header(AVFormatContext *s, AVFormatParameters *ap)
static int vfw_read_header(AVFormatContext *s)
{
struct vfw_ctx *ctx = s->priv_data;
AVCodecContext *codec;

@ -154,7 +154,7 @@ x11grab_region_win_init(struct x11_grab *s)
* </ul>
*/
static int
x11grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
x11grab_read_header(AVFormatContext *s1)
{
struct x11_grab *x11grab = s1->priv_data;
Display *dpy;

@ -35,7 +35,7 @@
#undef printf
#undef strncpy
#define ASMALIGN(ZEROBITS) ".p2align " #ZEROBITS "\n\t"
#define CODEC_FLAG2_MEMC_ONLY 0x00001000 ///< Only do ME/MC (I frames -> ref, P frame -> ME+MC).
//--------- codec's requirements (filled by the codec/vf) ---------

@ -91,8 +91,7 @@ static int fourxm_probe(AVProbeData *p)
return AVPROBE_SCORE_MAX;
}
static int fourxm_read_header(AVFormatContext *s,
AVFormatParameters *ap)
static int fourxm_read_header(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
unsigned int fourcc_tag;

@ -62,8 +62,7 @@ static int adts_aac_probe(AVProbeData *p)
else return 0;
}
static int adts_aac_read_header(AVFormatContext *s,
AVFormatParameters *ap)
static int adts_aac_read_header(AVFormatContext *s)
{
AVStream *st;

@ -60,8 +60,7 @@ static int probe(AVProbeData *p)
return AVPROBE_SCORE_MAX;
}
static int read_header(AVFormatContext *s,
AVFormatParameters *ap)
static int read_header(AVFormatContext *s)
{
ACTContext* ctx = s->priv_data;
AVIOContext *pb = s->pb;

@ -62,7 +62,7 @@ static int adx_read_packet(AVFormatContext *s, AVPacket *pkt)
return 0;
}
static int adx_read_header(AVFormatContext *s, AVFormatParameters *ap)
static int adx_read_header(AVFormatContext *s)
{
ADXDemuxerContext *c = s->priv_data;
AVCodecContext *avctx;

@ -54,8 +54,7 @@ static int aea_read_probe(AVProbeData *p)
return 0;
}
static int aea_read_header(AVFormatContext *s,
AVFormatParameters *ap)
static int aea_read_header(AVFormatContext *s)
{
AVStream *st = avformat_new_stream(s, NULL);
if (!st)

@ -79,7 +79,7 @@ static void get_meta(AVFormatContext *s, const char *key, int size)
}
size += (size&1)-res;
str[res] = 0;
av_dict_set(&s->metadata, key, str, AV_METADATA_DONT_STRDUP_VAL);
av_dict_set(&s->metadata, key, str, AV_DICT_DONT_STRDUP_VAL);
}else
size+= size&1;
@ -176,8 +176,7 @@ static int aiff_probe(AVProbeData *p)
}
/* aiff input */
static int aiff_read_header(AVFormatContext *s,
AVFormatParameters *ap)
static int aiff_read_header(AVFormatContext *s)
{
int size, filesize;
int64_t offset = 0;

@ -76,8 +76,7 @@ static int amr_probe(AVProbeData *p)
}
/* amr input */
static int amr_read_header(AVFormatContext *s,
AVFormatParameters *ap)
static int amr_read_header(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
AVStream *st;

@ -76,8 +76,7 @@ static int find_record(const AnmDemuxContext *anm, int record)
return AVERROR_INVALIDDATA;
}
static int read_header(AVFormatContext *s,
AVFormatParameters *ap)
static int read_header(AVFormatContext *s)
{
AnmDemuxContext *anm = s->priv_data;
AVIOContext *pb = s->pb;

@ -30,7 +30,7 @@ static int apc_probe(AVProbeData *p)
return 0;
}
static int apc_read_header(AVFormatContext *s, AVFormatParameters *ap)
static int apc_read_header(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
AVStream *st;

@ -152,7 +152,7 @@ static void ape_dumpinfo(AVFormatContext * s, APEContext * ape_ctx)
#endif
}
static int ape_read_header(AVFormatContext * s, AVFormatParameters * ap)
static int ape_read_header(AVFormatContext * s)
{
AVIOContext *pb = s->pb;
APEContext *ape = s->priv_data;

@ -445,7 +445,7 @@ reload:
goto restart;
}
static int applehttp_read_header(AVFormatContext *s, AVFormatParameters *ap)
static int applehttp_read_header(AVFormatContext *s)
{
AppleHTTPContext *c = s->priv_data;
int ret = 0, i, j, stream_offset = 0;

@ -576,7 +576,7 @@ static int asf_read_marker(AVFormatContext *s, int64_t size)
return 0;
}
static int asf_read_header(AVFormatContext *s, AVFormatParameters *ap)
static int asf_read_header(AVFormatContext *s)
{
ASFContext *asf = s->priv_data;
ff_asf_guid g;

@ -73,7 +73,7 @@ static int event_cmp(uint8_t **a, uint8_t **b)
return get_pts(*a) - get_pts(*b);
}
static int read_header(AVFormatContext *s, AVFormatParameters *ap)
static int read_header(AVFormatContext *s)
{
int i, len, header_remaining;
ASSContext *ass = s->priv_data;

@ -118,8 +118,7 @@ static int au_probe(AVProbeData *p)
}
/* au input */
static int au_read_header(AVFormatContext *s,
AVFormatParameters *ap)
static int au_read_header(AVFormatContext *s)
{
int size, bps, data_size = 0;
unsigned int tag;

@ -220,74 +220,6 @@ struct AVFormatContext;
* @}
*/
#if FF_API_OLD_METADATA2
/**
* @defgroup old_metadata Old metadata API
* The following functions are deprecated, use
* their equivalents from libavutil/dict.h instead.
* @{
*/
#define AV_METADATA_MATCH_CASE AV_DICT_MATCH_CASE
#define AV_METADATA_IGNORE_SUFFIX AV_DICT_IGNORE_SUFFIX
#define AV_METADATA_DONT_STRDUP_KEY AV_DICT_DONT_STRDUP_KEY
#define AV_METADATA_DONT_STRDUP_VAL AV_DICT_DONT_STRDUP_VAL
#define AV_METADATA_DONT_OVERWRITE AV_DICT_DONT_OVERWRITE
typedef attribute_deprecated AVDictionary AVMetadata;
typedef attribute_deprecated AVDictionaryEntry AVMetadataTag;
typedef struct AVMetadataConv AVMetadataConv;
/**
* Get a metadata element with matching key.
*
* @param prev Set to the previous matching element to find the next.
* If set to NULL the first matching element is returned.
* @param flags Allows case as well as suffix-insensitive comparisons.
* @return Found tag or NULL, changing key or value leads to undefined behavior.
*/
attribute_deprecated AVDictionaryEntry *
av_metadata_get(AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags);
/**
* Set the given tag in *pm, overwriting an existing tag.
*
* @param pm pointer to a pointer to a metadata struct. If *pm is NULL
* a metadata struct is allocated and put in *pm.
* @param key tag key to add to *pm (will be av_strduped depending on flags)
* @param value tag value to add to *pm (will be av_strduped depending on flags).
* Passing a NULL value will cause an existing tag to be deleted.
* @return >= 0 on success otherwise an error code <0
*/
attribute_deprecated int av_metadata_set2(AVDictionary **pm, const char *key, const char *value, int flags);
/**
* This function is provided for compatibility reason and currently does nothing.
*/
attribute_deprecated void av_metadata_conv(struct AVFormatContext *ctx, const AVMetadataConv *d_conv,
const AVMetadataConv *s_conv);
/**
* Copy metadata from one AVDictionary struct into another.
* @param dst pointer to a pointer to a AVDictionary struct. If *dst is NULL,
* this function will allocate a struct for you and put it in *dst
* @param src pointer to source AVDictionary struct
* @param flags flags to use when setting metadata in *dst
* @note metadata is read using the AV_DICT_IGNORE_SUFFIX flag
*/
attribute_deprecated void av_metadata_copy(AVDictionary **dst, AVDictionary *src, int flags);
/**
* Free all the memory allocated for an AVDictionary struct.
*/
attribute_deprecated void av_metadata_free(AVDictionary **m);
/**
* @}
*/
#endif
/* packet functions */
@ -345,25 +277,6 @@ typedef struct AVProbeData {
#define AVPROBE_SCORE_MAX 100 ///< maximum score, half of that is used for file-extension-based detection
#define AVPROBE_PADDING_SIZE 32 ///< extra allocated bytes at the end of the probe buffer
typedef struct AVFormatParameters {
#if FF_API_FORMAT_PARAMETERS
attribute_deprecated AVRational time_base;
attribute_deprecated int sample_rate;
attribute_deprecated int channels;
attribute_deprecated int width;
attribute_deprecated int height;
attribute_deprecated enum PixelFormat pix_fmt;
attribute_deprecated int channel; /**< Used to select DV channel. */
attribute_deprecated const char *standard; /**< deprecated, use demuxer-specific options instead. */
attribute_deprecated unsigned int mpeg2ts_raw:1; /**< deprecated, use mpegtsraw demuxer */
/**< deprecated, use mpegtsraw demuxer-specific options instead */
attribute_deprecated unsigned int mpeg2ts_compute_pcr:1;
attribute_deprecated unsigned int initial_pause:1; /**< Do not begin to play the stream
immediately (RTSP only). */
attribute_deprecated unsigned int prealloced_context:1;
#endif
} AVFormatParameters;
/// Demuxer will use avio_open, no opened file should be provided by the caller.
#define AVFMT_NOFILE 0x0001
#define AVFMT_NEEDNUMBER 0x0002 /**< Needs '%d' in filename. */
@ -422,9 +335,6 @@ typedef struct AVOutputFormat {
* AVFMT_NODIMENSIONS, AVFMT_NOSTREAMS, AVFMT_ALLOW_FLUSH
*/
int flags;
void *dummy;
int (*interleave_packet)(struct AVFormatContext *, AVPacket *out,
AVPacket *in, int flush);
@ -436,10 +346,6 @@ typedef struct AVOutputFormat {
enum CodecID subtitle_codec; /**< default subtitle codec */
#if FF_API_OLD_METADATA2
const AVMetadataConv *metadata_conv;
#endif
const AVClass *priv_class; ///< AVClass for the private context
/**
@ -496,8 +402,7 @@ typedef struct AVInputFormat {
* additional parameters. Only used in raw format right
* now. 'av_new_stream' should be called to create new streams.
*/
int (*read_header)(struct AVFormatContext *,
AVFormatParameters *ap);
int (*read_header)(struct AVFormatContext *);
/**
* Read one packet and put it in 'pkt'. pts and flags are also
@ -575,10 +480,6 @@ typedef struct AVInputFormat {
*/
int (*read_seek2)(struct AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags);
#if FF_API_OLD_METADATA2
const AVMetadataConv *metadata_conv;
#endif
const AVClass *priv_class; ///< AVClass for the private context
/* private fields */
@ -649,11 +550,6 @@ typedef struct AVStream {
AVRational r_frame_rate;
void *priv_data;
#if FF_API_REORDER_PRIVATE
/* internal data used in av_find_stream_info() */
int64_t first_dts;
#endif
/**
* encoding: pts generation when outputting stream
*/
@ -667,24 +563,8 @@ typedef struct AVStream {
* encoding: set by libavformat in av_write_header
*/
AVRational time_base;
#if FF_API_REORDER_PRIVATE
int pts_wrap_bits; /**< number of bits in pts (used for wrapping control) */
#endif
#if FF_API_STREAM_COPY
/* ffmpeg.c private use */
attribute_deprecated int stream_copy; /**< If set, just copy stream. */
#endif
enum AVDiscard discard; ///< Selects which packets can be discarded at will and do not need to be demuxed.
#if FF_API_AVSTREAM_QUALITY
//FIXME move stuff to a flags field?
/**
* Quality, as it has been removed from AVCodecContext and put in AVVideoFrame.
* MN: dunno if that is the right place for it
*/
attribute_deprecated float quality;
#endif
/**
* Decoding: pts of the first frame of the stream in presentation order, in stream time base.
* Only set this if you are absolutely 100% sure that the value you set
@ -702,31 +582,10 @@ typedef struct AVStream {
*/
int64_t duration;
#if FF_API_REORDER_PRIVATE
/* av_read_frame() support */
enum AVStreamParseType need_parsing;
struct AVCodecParserContext *parser;
int64_t cur_dts;
int last_IP_duration;
int64_t last_IP_pts;
/* av_seek_frame() support */
AVIndexEntry *index_entries; /**< Only used if the format does not
support seeking natively. */
int nb_index_entries;
unsigned int index_entries_allocated_size;
#endif
int64_t nb_frames; ///< number of frames in this stream if known or 0
int disposition; /**< AV_DISPOSITION_* bit field */
#if FF_API_REORDER_PRIVATE
AVProbeData probe_data;
#define MAX_REORDER_DELAY 16
int64_t pts_buffer[MAX_REORDER_DELAY+1];
#endif
/**
* sample aspect ratio (0 if unknown)
* - encoding: Set by user.
@ -736,38 +595,6 @@ typedef struct AVStream {
AVDictionary *metadata;
#if FF_API_REORDER_PRIVATE
/* Intended mostly for av_read_frame() support. Not supposed to be used by */
/* external applications; try to use something else if at all possible. */
const uint8_t *cur_ptr;
int cur_len;
AVPacket cur_pkt;
// Timestamp generation support:
/**
* Timestamp corresponding to the last dts sync point.
*
* Initialized when AVCodecParserContext.dts_sync_point >= 0 and
* a DTS is received from the underlying container. Otherwise set to
* AV_NOPTS_VALUE by default.
*/
int64_t reference_dts;
/**
* Number of packets to buffer for codec probing
* NOT PART OF PUBLIC API
*/
#define MAX_PROBE_PACKETS 2500
int probe_packets;
/**
* last packet in packet_buffer for this stream when muxing.
* Used internally, NOT PART OF PUBLIC API, do not read or
* write from outside of libav*
*/
struct AVPacketList *last_in_packet_buffer;
#endif
/**
* Average framerate
*/
@ -808,13 +635,6 @@ typedef struct AVStream {
int64_t codec_info_duration;
int nb_decoded_frames;
} *info;
/**
* flag to indicate that probing is requested
* NOT PART OF PUBLIC API
*/
int request_probe;
#if !FF_API_REORDER_PRIVATE
const uint8_t *cur_ptr;
int cur_len;
AVPacket cur_pkt;
@ -856,7 +676,12 @@ typedef struct AVStream {
unsigned int index_entries_allocated_size;
int pts_wrap_bits; /**< number of bits in pts (used for wrapping control) */
#endif
/**
* flag to indicate that probing is requested
* NOT PART OF PUBLIC API
*/
int request_probe;
} AVStream;
#define AV_PROGRAM_RUNNING 1
@ -947,23 +772,7 @@ typedef struct AVFormatContext {
char filename[1024]; /**< input or output filename */
/* stream info */
#if FF_API_TIMESTAMP
/**
* @deprecated use 'creation_time' metadata tag instead
*/
attribute_deprecated int64_t timestamp;
#endif
int ctx_flags; /**< Format-specific flags, see AVFMTCTX_xx */
#if FF_API_REORDER_PRIVATE
/* private data for pts handling (do not modify directly). */
/**
* This buffer is only needed when packets were already buffered but
* not decoded, for example to get the codec parameters in MPEG
* streams.
*/
struct AVPacketList *packet_buffer;
#endif
/**
* Decoding: position of the first frame of the component, in
@ -980,13 +789,6 @@ typedef struct AVFormatContext {
*/
int64_t duration;
#if FF_API_FILESIZE
/**
* decoding: total file size, 0 if unknown
*/
attribute_deprecated int64_t file_size;
#endif
/**
* Decoding: total stream bitrate in bit/s, 0 if not
* available. Never set it directly if the file_size and the
@ -994,37 +796,9 @@ typedef struct AVFormatContext {
*/
int bit_rate;
#if FF_API_REORDER_PRIVATE
/* av_read_frame() support */
AVStream *cur_st;
/* av_seek_frame() support */
int64_t data_offset; /**< offset of the first packet */
#endif
#if FF_API_MUXRATE
/**
* use mpeg muxer private options instead
*/
attribute_deprecated int mux_rate;
#endif
unsigned int packet_size;
#if FF_API_PRELOAD
attribute_deprecated int preload;
#endif
int max_delay;
#if FF_API_LOOP_OUTPUT
#define AVFMT_NOOUTPUTLOOP -1
#define AVFMT_INFINITEOUTPUTLOOP 0
/**
* number of times to loop output in formats that support it
*
* @deprecated use the 'loop' private option in the gif muxer.
*/
attribute_deprecated int loop_output;
#endif
int flags;
#define AVFMT_FLAG_GENPTS 0x0001 ///< Generate missing pts even if it requires parsing future frames.
#define AVFMT_FLAG_IGNIDX 0x0002 ///< Ignore index.
@ -1032,9 +806,6 @@ typedef struct AVFormatContext {
#define AVFMT_FLAG_IGNDTS 0x0008 ///< Ignore DTS on frames that contain both DTS & PTS
#define AVFMT_FLAG_NOFILLIN 0x0010 ///< Do not infer any values from other values, just return what is stored in the container
#define AVFMT_FLAG_NOPARSE 0x0020 ///< Do not use AVParsers, you also must set AVFMT_FLAG_NOFILLIN as the fillin code works on frames and no parsing -> no frames. Also seeking to frames can not work if parsing to find frame boundaries has been disabled
#if FF_API_FLAG_RTP_HINT
#define AVFMT_FLAG_RTP_HINT 0x0040 ///< Deprecated, use the -movflags rtphint muxer specific AVOption instead
#endif
#define AVFMT_FLAG_CUSTOM_IO 0x0080 ///< The caller has supplied a custom AVIOContext, don't avio_close() it.
#define AVFMT_FLAG_DISCARD_CORRUPT 0x0100 ///< Discard frames marked corrupted
#define AVFMT_FLAG_MP4A_LATM 0x8000 ///< Enable RTP MP4A-LATM payload
@ -1042,13 +813,6 @@ typedef struct AVFormatContext {
#define AVFMT_FLAG_PRIV_OPT 0x20000 ///< Enable use of private options by delaying codec open (this could be made default once all code is converted)
#define AVFMT_FLAG_KEEP_SIDE_DATA 0x40000 ///< Dont merge side data but keep it seperate.
#if FF_API_LOOP_INPUT
/**
* @deprecated, use the 'loop' img2 demuxer private option.
*/
attribute_deprecated int loop_input;
#endif
/**
* decoding: size of data to probe; encoding: unused.
*/
@ -1111,30 +875,8 @@ typedef struct AVFormatContext {
int debug;
#define FF_FDEBUG_TS 0x0001
#if FF_API_REORDER_PRIVATE
/**
* Raw packets from the demuxer, prior to parsing and decoding.
* This buffer is used for buffering packets until the codec can
* be identified, as parsing cannot be done without knowing the
* codec.
*/
struct AVPacketList *raw_packet_buffer;
struct AVPacketList *raw_packet_buffer_end;
struct AVPacketList *packet_buffer_end;
#endif
AVDictionary *metadata;
#if FF_API_REORDER_PRIVATE
/**
* Remaining size available for raw_packet_buffer, in bytes.
* NOT PART OF PUBLIC API
*/
#define RAW_PACKET_BUFFER_SIZE 2500000
int raw_packet_buffer_remaining_size;
#endif
/**
* Start time of the stream in real world time, in microseconds
* since the unix epoch (00:00 1st January 1970). That is, pts=0
@ -1205,7 +947,6 @@ typedef struct AVFormatContext {
* New public fields should be added right above.
*****************************************************************
*/
#if !FF_API_REORDER_PRIVATE
/**
* Raw packets from the demuxer, prior to parsing and decoding.
* This buffer is used for buffering packets until the codec can
@ -1233,7 +974,6 @@ typedef struct AVFormatContext {
/* av_seek_frame() support */
int64_t data_offset; /**< offset of the first packet */
#endif
} AVFormatContext;
typedef struct AVPacketList {
@ -1354,17 +1094,12 @@ AVProgram *av_new_program(AVFormatContext *s, int id);
*/
#if FF_API_GUESS_IMG2_CODEC
attribute_deprecated enum CodecID av_guess_image2_codec(const char *filename);
#endif
#if FF_API_PKT_DUMP
attribute_deprecated void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload);
attribute_deprecated void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt,
int dump_payload);
#endif
#if FF_API_ALLOC_OUTPUT_CONTEXT
/**
* @deprecated deprecated in favor of avformat_alloc_output_context2()
@ -1453,36 +1188,6 @@ int av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt,
const char *filename, void *logctx,
unsigned int offset, unsigned int max_probe_size);
#if FF_API_FORMAT_PARAMETERS
/**
* Allocate all the structures needed to read an input stream.
* This does not open the needed codecs for decoding the stream[s].
* @deprecated use avformat_open_input instead.
*/
attribute_deprecated int av_open_input_stream(AVFormatContext **ic_ptr,
AVIOContext *pb, const char *filename,
AVInputFormat *fmt, AVFormatParameters *ap);
/**
* Open a media file as input. The codecs are not opened. Only the file
* header (if present) is read.
*
* @param ic_ptr The opened media file handle is put here.
* @param filename filename to open
* @param fmt If non-NULL, force the file format to use.
* @param buf_size optional buffer size (zero if default is OK)
* @param ap Additional parameters needed when opening the file
* (NULL if default).
* @return 0 if OK, AVERROR_xxx otherwise
*
* @deprecated use avformat_open_input instead.
*/
attribute_deprecated int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
AVInputFormat *fmt,
int buf_size,
AVFormatParameters *ap);
#endif
/**
* Open an input stream and read the header. The codecs are not opened.
* The stream must be closed with av_close_input_file().
@ -1504,7 +1209,8 @@ attribute_deprecated int av_open_input_file(AVFormatContext **ic_ptr, const char
*/
int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputFormat *fmt, AVDictionary **options);
int av_demuxer_open(AVFormatContext *ic, AVFormatParameters *ap);
attribute_deprecated
int av_demuxer_open(AVFormatContext *ic);
#if FF_API_FORMAT_PARAMETERS
/**
@ -1685,16 +1391,6 @@ int av_read_play(AVFormatContext *s);
*/
int av_read_pause(AVFormatContext *s);
#if FF_API_FORMAT_PARAMETERS
/**
* Free a AVFormatContext allocated by av_open_input_stream.
* @param s context to free
* @deprecated use av_close_input_file()
*/
attribute_deprecated
void av_close_input_stream(AVFormatContext *s);
#endif
#if FF_API_CLOSE_INPUT_FILE
/**
* @deprecated use avformat_close_input()
@ -1744,28 +1440,6 @@ void av_set_pts_info(AVStream *s, int pts_wrap_bits,
#define AVSEEK_FLAG_ANY 4 ///< seek to any frame, even non-keyframes
#define AVSEEK_FLAG_FRAME 8 ///< seeking based on frame number
#if FF_API_SEEK_PUBLIC
attribute_deprecated
int av_seek_frame_binary(AVFormatContext *s, int stream_index,
int64_t target_ts, int flags);
attribute_deprecated
void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp);
attribute_deprecated
int64_t av_gen_search(AVFormatContext *s, int stream_index,
int64_t target_ts, int64_t pos_min,
int64_t pos_max, int64_t pos_limit,
int64_t ts_min, int64_t ts_max,
int flags, int64_t *ts_ret,
int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ));
#endif
#if FF_API_FORMAT_PARAMETERS
/**
* @deprecated pass the options to avformat_write_header directly.
*/
attribute_deprecated int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap);
#endif
/**
* @addtogroup lavf_encoding
* @{
@ -1787,21 +1461,6 @@ attribute_deprecated int av_set_parameters(AVFormatContext *s, AVFormatParameter
*/
int avformat_write_header(AVFormatContext *s, AVDictionary **options);
#if FF_API_FORMAT_PARAMETERS
/**
* Allocate the stream private data and write the stream header to an
* output media file.
* @note: this sets stream time-bases, if possible to stream->codec->time_base
* but for some formats it might also be some other time base
*
* @param s media file handle
* @return 0 if OK, AVERROR_xxx on error
*
* @deprecated use avformat_write_header.
*/
attribute_deprecated int av_write_header(AVFormatContext *s);
#endif
/**
* Write a packet to an output media file.
*
@ -2047,45 +1706,17 @@ void av_url_split(char *proto, int proto_size,
char *path, int path_size,
const char *url);
#if FF_API_DUMP_FORMAT
/**
* @deprecated Deprecated in favor of av_dump_format().
*/
attribute_deprecated void dump_format(AVFormatContext *ic,
int index,
const char *url,
int is_output);
#endif
void av_dump_format(AVFormatContext *ic,
int index,
const char *url,
int is_output);
#if FF_API_PARSE_DATE
/**
* Parse datestr and return a corresponding number of microseconds.
*
* @param datestr String representing a date or a duration.
* See av_parse_time() for the syntax of the provided string.
* @deprecated in favor of av_parse_time()
*/
attribute_deprecated
int64_t parse_date(const char *datestr, int duration);
#endif
/**
* Get the current time in microseconds.
*/
int64_t av_gettime(void);
#if FF_API_FIND_INFO_TAG
/**
* @deprecated use av_find_info_tag in libavutil instead.
*/
attribute_deprecated int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info);
#endif
/**
* Return in 'buf' the path with '%d' replaced by a number.
*
@ -2125,10 +1756,6 @@ int av_filename_number_test(const char *filename);
*/
int av_sdp_create(AVFormatContext *ac[], int n_files, char *buf, int size);
#if FF_API_SDP_CREATE
attribute_deprecated int avf_sdp_create(AVFormatContext *ac[], int n_files, char *buff, int size);
#endif
/**
* Return a positive value if the given filename has one of the given
* extensions, 0 otherwise.

@ -351,7 +351,7 @@ static void avi_read_nikon(AVFormatContext *s, uint64_t end)
}
}
static int avi_read_header(AVFormatContext *s, AVFormatParameters *ap)
static int avi_read_header(AVFormatContext *s)
{
AVIContext *avi = s->priv_data;
AVIOContext *pb = s->pb;
@ -719,7 +719,7 @@ static int avi_read_header(AVFormatContext *s, AVFormatParameters *ap)
case MKTAG('i', 'n', 'd', 'x'):
i= avio_tell(pb);
if(pb->seekable && !(s->flags & AVFMT_FLAG_IGNIDX) && avi->use_odml &&
read_braindead_odml_indx(s, 0) < 0 && s->error_recognition >= FF_ER_EXPLODE)
read_braindead_odml_indx(s, 0) < 0 && (s->error_recognition & AV_EF_EXPLODE))
goto fail;
avio_seek(pb, i+size, SEEK_SET);
break;

@ -84,16 +84,6 @@ const AVClass ffurl_context_class = {
/*@}*/
#if FF_API_OLD_INTERRUPT_CB
static int default_interrupt_cb(void);
int (*url_interrupt_cb)(void) = default_interrupt_cb;
#endif
URLProtocol *av_protocol_next(URLProtocol *p)
{
return ffurl_protocol_next(p);
}
const char *avio_enum_protocols(void **opaque, int output)
{
URLProtocol **p = opaque;
@ -190,9 +180,7 @@ static int url_alloc_for_protocol (URLContext **puc, struct URLProtocol *up,
int ffurl_connect(URLContext* uc, AVDictionary **options)
{
int err =
#if !FF_API_OLD_AVIO
uc->prot->url_open2 ? uc->prot->url_open2(uc, uc->filename, uc->flags, options) :
#endif
uc->prot->url_open(uc, uc->filename, uc->flags);
if (err)
return err;
@ -205,81 +193,6 @@ int ffurl_connect(URLContext* uc, AVDictionary **options)
return 0;
}
#if FF_API_OLD_AVIO
int url_open_protocol (URLContext **puc, struct URLProtocol *up,
const char *filename, int flags)
{
int ret;
ret = url_alloc_for_protocol(puc, up, filename, flags, NULL);
if (ret)
goto fail;
ret = ffurl_connect(*puc, NULL);
if (!ret)
return 0;
fail:
ffurl_close(*puc);
*puc = NULL;
return ret;
}
int url_alloc(URLContext **puc, const char *filename, int flags)
{
return ffurl_alloc(puc, filename, flags, NULL);
}
int url_connect(URLContext* uc)
{
return ffurl_connect(uc, NULL);
}
int url_open(URLContext **puc, const char *filename, int flags)
{
return ffurl_open(puc, filename, flags, NULL, NULL);
}
int url_read(URLContext *h, unsigned char *buf, int size)
{
return ffurl_read(h, buf, size);
}
int url_read_complete(URLContext *h, unsigned char *buf, int size)
{
return ffurl_read_complete(h, buf, size);
}
int url_write(URLContext *h, const unsigned char *buf, int size)
{
return ffurl_write(h, buf, size);
}
int64_t url_seek(URLContext *h, int64_t pos, int whence)
{
return ffurl_seek(h, pos, whence);
}
int url_close(URLContext *h)
{
return ffurl_close(h);
}
int64_t url_filesize(URLContext *h)
{
return ffurl_size(h);
}
int url_get_file_handle(URLContext *h)
{
return ffurl_get_file_handle(h);
}
int url_get_max_packet_size(URLContext *h)
{
return h->max_packet_size;
}
void url_get_filename(URLContext *h, char *buf, int buf_size)
{
av_strlcpy(buf, h->filename, buf_size);
}
void url_set_interrupt_cb(URLInterruptCB *interrupt_cb)
{
avio_set_interrupt_cb(interrupt_cb);
}
int av_register_protocol2(URLProtocol *protocol, int size)
{
return ffurl_register_protocol(protocol, size);
}
#endif
#define URL_SCHEME_CHARS \
"abcdefghijklmnopqrstuvwxyz" \
"ABCDEFGHIJKLMNOPQRSTUVWXYZ" \
@ -422,17 +335,6 @@ int ffurl_close(URLContext *h)
return ret;
}
#if FF_API_OLD_AVIO
int url_exist(const char *filename)
{
URLContext *h;
if (ffurl_open(&h, filename, AVIO_FLAG_READ, NULL, NULL) < 0)
return 0;
ffurl_close(h);
return 1;
}
#endif
int avio_check(const char *url, int flags)
{
URLContext *h;
@ -474,45 +376,11 @@ int ffurl_get_file_handle(URLContext *h)
return h->prot->url_get_file_handle(h);
}
#if FF_API_OLD_INTERRUPT_CB
static int default_interrupt_cb(void)
{
return 0;
}
void avio_set_interrupt_cb(int (*interrupt_cb)(void))
{
if (!interrupt_cb)
interrupt_cb = default_interrupt_cb;
url_interrupt_cb = interrupt_cb;
}
#endif
int ff_check_interrupt(AVIOInterruptCB *cb)
{
int ret;
if (cb && cb->callback && (ret = cb->callback(cb->opaque)))
return ret;
#if FF_API_OLD_INTERRUPT_CB
return url_interrupt_cb();
#else
return 0;
#endif
}
#if FF_API_OLD_AVIO
int av_url_read_pause(URLContext *h, int pause)
{
if (!h->prot->url_read_pause)
return AVERROR(ENOSYS);
return h->prot->url_read_pause(h, pause);
}
int64_t av_url_read_seek(URLContext *h,
int stream_index, int64_t timestamp, int flags)
{
if (!h->prot->url_read_seek)
return AVERROR(ENOSYS);
return h->prot->url_read_seek(h, stream_index, timestamp, flags);
}
#endif

@ -66,7 +66,6 @@ typedef struct {
* function pointers specified in avio_alloc_context()
*/
typedef struct {
#if !FF_API_OLD_AVIO
/**
* A class for private options.
*
@ -80,7 +79,6 @@ typedef struct {
* to any av_opt_* functions in that case.
*/
AVClass *av_class;
#endif
unsigned char *buffer; /**< Start of the buffer. */
int buffer_size; /**< Maximum buffer size */
unsigned char *buf_ptr; /**< Current position in the buffer */
@ -97,9 +95,6 @@ typedef struct {
int must_flush; /**< true if the next seek should flush */
int eof_reached; /**< true if eof reached */
int write_flag; /**< true if open for writing */
#if FF_API_OLD_AVIO
attribute_deprecated int is_streamed;
#endif
int max_packet_size;
unsigned long checksum;
unsigned char *checksum_ptr;
@ -130,259 +125,6 @@ typedef struct {
/* unbuffered I/O */
#if FF_API_OLD_AVIO
/**
* URL Context.
* New fields can be added to the end with minor version bumps.
* Removal, reordering and changes to existing fields require a major
* version bump.
* sizeof(URLContext) must not be used outside libav*.
* @deprecated This struct will be made private
*/
typedef struct URLContext {
const AVClass *av_class; ///< information for av_log(). Set by url_open().
struct URLProtocol *prot;
int flags;
int is_streamed; /**< true if streamed (no seek possible), default = false */
int max_packet_size; /**< if non zero, the stream is packetized with this max packet size */
void *priv_data;
char *filename; /**< specified URL */
int is_connected;
AVIOInterruptCB interrupt_callback;
} URLContext;
#define URL_PROTOCOL_FLAG_NESTED_SCHEME 1 /*< The protocol name can be the first part of a nested protocol scheme */
#define URL_PROTOCOL_FLAG_NETWORK 2 /*< The protocol uses network */
/**
* @deprecated This struct is to be made private. Use the higher-level
* AVIOContext-based API instead.
*/
typedef struct URLProtocol {
const char *name;
int (*url_open)(URLContext *h, const char *url, int flags);
int (*url_read)(URLContext *h, unsigned char *buf, int size);
int (*url_write)(URLContext *h, const unsigned char *buf, int size);
int64_t (*url_seek)(URLContext *h, int64_t pos, int whence);
int (*url_close)(URLContext *h);
struct URLProtocol *next;
int (*url_read_pause)(URLContext *h, int pause);
int64_t (*url_read_seek)(URLContext *h, int stream_index,
int64_t timestamp, int flags);
int (*url_get_file_handle)(URLContext *h);
int priv_data_size;
const AVClass *priv_data_class;
int flags;
int (*url_check)(URLContext *h, int mask);
} URLProtocol;
typedef struct URLPollEntry {
URLContext *handle;
int events;
int revents;
} URLPollEntry;
/* not implemented */
attribute_deprecated int url_poll(URLPollEntry *poll_table, int n, int timeout);
/**
* @name URL open modes
* The flags argument to url_open and cosins must be one of the following
* constants, optionally ORed with other flags.
* @{
*/
#define URL_RDONLY 1 /**< read-only */
#define URL_WRONLY 2 /**< write-only */
#define URL_RDWR (URL_RDONLY|URL_WRONLY) /**< read-write */
/**
* @}
*/
/**
* Use non-blocking mode.
* If this flag is set, operations on the context will return
* AVERROR(EAGAIN) if they can not be performed immediately.
* If this flag is not set, operations on the context will never return
* AVERROR(EAGAIN).
* Note that this flag does not affect the opening/connecting of the
* context. Connecting a protocol will always block if necessary (e.g. on
* network protocols) but never hang (e.g. on busy devices).
* Warning: non-blocking protocols is work-in-progress; this flag may be
* silently ignored.
*/
#define URL_FLAG_NONBLOCK 8
typedef int URLInterruptCB(void);
extern URLInterruptCB *url_interrupt_cb;
/**
* @defgroup old_url_funcs Old url_* functions
* The following functions are deprecated. Use the buffered API based on #AVIOContext instead.
* @{
* @ingroup lavf_io
*/
attribute_deprecated int url_open_protocol (URLContext **puc, struct URLProtocol *up,
const char *url, int flags);
attribute_deprecated int url_alloc(URLContext **h, const char *url, int flags);
attribute_deprecated int url_connect(URLContext *h);
attribute_deprecated int url_open(URLContext **h, const char *url, int flags);
attribute_deprecated int url_read(URLContext *h, unsigned char *buf, int size);
attribute_deprecated int url_read_complete(URLContext *h, unsigned char *buf, int size);
attribute_deprecated int url_write(URLContext *h, const unsigned char *buf, int size);
attribute_deprecated int64_t url_seek(URLContext *h, int64_t pos, int whence);
attribute_deprecated int url_close(URLContext *h);
attribute_deprecated int64_t url_filesize(URLContext *h);
attribute_deprecated int url_get_file_handle(URLContext *h);
attribute_deprecated int url_get_max_packet_size(URLContext *h);
attribute_deprecated void url_get_filename(URLContext *h, char *buf, int buf_size);
attribute_deprecated int av_url_read_pause(URLContext *h, int pause);
attribute_deprecated int64_t av_url_read_seek(URLContext *h, int stream_index,
int64_t timestamp, int flags);
attribute_deprecated void url_set_interrupt_cb(int (*interrupt_cb)(void));
/**
* returns the next registered protocol after the given protocol (the first if
* NULL is given), or NULL if protocol is the last one.
*/
URLProtocol *av_protocol_next(URLProtocol *p);
/**
* Register the URLProtocol protocol.
*
* @param size the size of the URLProtocol struct referenced
*/
attribute_deprecated int av_register_protocol2(URLProtocol *protocol, int size);
/**
* @}
*/
typedef attribute_deprecated AVIOContext ByteIOContext;
attribute_deprecated int init_put_byte(AVIOContext *s,
unsigned char *buffer,
int buffer_size,
int write_flag,
void *opaque,
int (*read_packet)(void *opaque, uint8_t *buf, int buf_size),
int (*write_packet)(void *opaque, uint8_t *buf, int buf_size),
int64_t (*seek)(void *opaque, int64_t offset, int whence));
attribute_deprecated AVIOContext *av_alloc_put_byte(
unsigned char *buffer,
int buffer_size,
int write_flag,
void *opaque,
int (*read_packet)(void *opaque, uint8_t *buf, int buf_size),
int (*write_packet)(void *opaque, uint8_t *buf, int buf_size),
int64_t (*seek)(void *opaque, int64_t offset, int whence));
/**
* @defgroup old_avio_funcs Old put_/get_*() functions
* The following functions are deprecated. Use the "avio_"-prefixed functions instead.
* @{
* @ingroup lavf_io
*/
attribute_deprecated int get_buffer(AVIOContext *s, unsigned char *buf, int size);
attribute_deprecated int get_partial_buffer(AVIOContext *s, unsigned char *buf, int size);
attribute_deprecated int get_byte(AVIOContext *s);
attribute_deprecated unsigned int get_le16(AVIOContext *s);
attribute_deprecated unsigned int get_le24(AVIOContext *s);
attribute_deprecated unsigned int get_le32(AVIOContext *s);
attribute_deprecated uint64_t get_le64(AVIOContext *s);
attribute_deprecated unsigned int get_be16(AVIOContext *s);
attribute_deprecated unsigned int get_be24(AVIOContext *s);
attribute_deprecated unsigned int get_be32(AVIOContext *s);
attribute_deprecated uint64_t get_be64(AVIOContext *s);
attribute_deprecated void put_byte(AVIOContext *s, int b);
attribute_deprecated void put_nbyte(AVIOContext *s, int b, int count);
attribute_deprecated void put_buffer(AVIOContext *s, const unsigned char *buf, int size);
attribute_deprecated void put_le64(AVIOContext *s, uint64_t val);
attribute_deprecated void put_be64(AVIOContext *s, uint64_t val);
attribute_deprecated void put_le32(AVIOContext *s, unsigned int val);
attribute_deprecated void put_be32(AVIOContext *s, unsigned int val);
attribute_deprecated void put_le24(AVIOContext *s, unsigned int val);
attribute_deprecated void put_be24(AVIOContext *s, unsigned int val);
attribute_deprecated void put_le16(AVIOContext *s, unsigned int val);
attribute_deprecated void put_be16(AVIOContext *s, unsigned int val);
attribute_deprecated void put_tag(AVIOContext *s, const char *tag);
/**
* @}
*/
attribute_deprecated int av_url_read_fpause(AVIOContext *h, int pause);
attribute_deprecated int64_t av_url_read_fseek (AVIOContext *h, int stream_index,
int64_t timestamp, int flags);
/**
* @defgroup old_url_f_funcs Old url_f* functions
* The following functions are deprecated, use the "avio_"-prefixed functions instead.
* @{
* @ingroup lavf_io
*/
attribute_deprecated int url_fopen( AVIOContext **s, const char *url, int flags);
attribute_deprecated int url_fclose(AVIOContext *s);
attribute_deprecated int64_t url_fseek(AVIOContext *s, int64_t offset, int whence);
attribute_deprecated int url_fskip(AVIOContext *s, int64_t offset);
attribute_deprecated int64_t url_ftell(AVIOContext *s);
attribute_deprecated int64_t url_fsize(AVIOContext *s);
#define URL_EOF (-1)
attribute_deprecated int url_fgetc(AVIOContext *s);
attribute_deprecated int url_setbufsize(AVIOContext *s, int buf_size);
attribute_deprecated int url_fprintf(AVIOContext *s, const char *fmt, ...) av_printf_format(2, 3);
attribute_deprecated void put_flush_packet(AVIOContext *s);
attribute_deprecated int url_open_dyn_buf(AVIOContext **s);
attribute_deprecated int url_open_dyn_packet_buf(AVIOContext **s, int max_packet_size);
attribute_deprecated int url_close_dyn_buf(AVIOContext *s, uint8_t **pbuffer);
attribute_deprecated int url_fdopen(AVIOContext **s, URLContext *h);
/**
* @}
*/
attribute_deprecated int url_ferror(AVIOContext *s);
attribute_deprecated int udp_set_remote_url(URLContext *h, const char *uri);
attribute_deprecated int udp_get_local_port(URLContext *h);
attribute_deprecated void init_checksum(AVIOContext *s,
unsigned long (*update_checksum)(unsigned long c, const uint8_t *p, unsigned int len),
unsigned long checksum);
attribute_deprecated unsigned long get_checksum(AVIOContext *s);
attribute_deprecated void put_strz(AVIOContext *s, const char *buf);
/** @note unlike fgets, the EOL character is not returned and a whole
line is parsed. return NULL if first char read was EOF */
attribute_deprecated char *url_fgets(AVIOContext *s, char *buf, int buf_size);
/**
* @deprecated use avio_get_str instead
*/
attribute_deprecated char *get_strz(AVIOContext *s, char *buf, int maxlen);
/**
* @deprecated Use AVIOContext.seekable field directly.
*/
attribute_deprecated static inline int url_is_streamed(AVIOContext *s)
{
return !s->seekable;
}
attribute_deprecated URLContext *url_fileno(AVIOContext *s);
/**
* @deprecated use AVIOContext.max_packet_size directly.
*/
attribute_deprecated int url_fget_max_packet_size(AVIOContext *s);
attribute_deprecated int url_open_buf(AVIOContext **s, uint8_t *buf, int buf_size, int flags);
/** return the written or read size */
attribute_deprecated int url_close_buf(AVIOContext *s);
/**
* Return a non-zero value if the resource indicated by url
* exists, 0 otherwise.
* @deprecated Use avio_check instead.
*/
attribute_deprecated int url_exist(const char *url);
#endif // FF_API_OLD_AVIO
/**
* Return AVIO_FLAG_* access flags corresponding to the access permissions
* of the resource in url, or a negative value corresponding to an
@ -397,18 +139,6 @@ attribute_deprecated int url_exist(const char *url);
*/
int avio_check(const char *url, int flags);
#if FF_API_OLD_INTERRUPT_CB
/**
* The callback is called in blocking functions to test regulary if
* asynchronous interruption is needed. AVERROR_EXIT is returned
* in this case by the interrupted function. 'NULL' means no interrupt
* callback is given.
* @deprecated Use interrupt_callback in AVFormatContext/avio_open2
* instead.
*/
attribute_deprecated void avio_set_interrupt_cb(int (*interrupt_cb)(void));
#endif
/**
* Allocate and initialize an AVIOContext for buffered I/O. It must be later
* freed with av_free().

@ -40,7 +40,6 @@
*/
#define SHORT_SEEK_THRESHOLD 4096
#if !FF_API_OLD_AVIO
static void *ffio_url_child_next(void *obj, void *prev)
{
AVIOContext *s = obj;
@ -64,7 +63,7 @@ const AVClass ffio_url_class = {
.child_next = ffio_url_child_next,
.child_class_next = ffio_url_child_class_next,
};
#endif
static void fill_buffer(AVIOContext *s);
static int url_resetbuf(AVIOContext *s, int flags);
@ -89,9 +88,6 @@ int ffio_init_context(AVIOContext *s,
s->must_flush = 0;
s->eof_reached = 0;
s->error = 0;
#if FF_API_OLD_AVIO
s->is_streamed = 0;
#endif
s->seekable = AVIO_SEEKABLE_NORMAL;
s->max_packet_size = 0;
s->update_checksum= NULL;
@ -104,33 +100,6 @@ int ffio_init_context(AVIOContext *s,
return 0;
}
#if FF_API_OLD_AVIO
int init_put_byte(AVIOContext *s,
unsigned char *buffer,
int buffer_size,
int write_flag,
void *opaque,
int (*read_packet)(void *opaque, uint8_t *buf, int buf_size),
int (*write_packet)(void *opaque, uint8_t *buf, int buf_size),
int64_t (*seek)(void *opaque, int64_t offset, int whence))
{
return ffio_init_context(s, buffer, buffer_size, write_flag, opaque,
read_packet, write_packet, seek);
}
AVIOContext *av_alloc_put_byte(
unsigned char *buffer,
int buffer_size,
int write_flag,
void *opaque,
int (*read_packet)(void *opaque, uint8_t *buf, int buf_size),
int (*write_packet)(void *opaque, uint8_t *buf, int buf_size),
int64_t (*seek)(void *opaque, int64_t offset, int whence))
{
return avio_alloc_context(buffer, buffer_size, write_flag, opaque,
read_packet, write_packet, seek);
}
#endif
AVIOContext *avio_alloc_context(
unsigned char *buffer,
int buffer_size,
@ -270,19 +239,6 @@ int64_t avio_skip(AVIOContext *s, int64_t offset)
return avio_seek(s, offset, SEEK_CUR);
}
#if FF_API_OLD_AVIO
int url_fskip(AVIOContext *s, int64_t offset)
{
int64_t ret = avio_seek(s, offset, SEEK_CUR);
return ret < 0 ? ret : 0;
}
int64_t url_ftell(AVIOContext *s)
{
return avio_seek(s, 0, SEEK_CUR);
}
#endif
int64_t avio_size(AVIOContext *s)
{
int64_t size;
@ -313,15 +269,6 @@ int url_feof(AVIOContext *s)
return s->eof_reached;
}
#if FF_API_OLD_AVIO
int url_ferror(AVIOContext *s)
{
if(!s)
return 0;
return s->error;
}
#endif
void avio_wl32(AVIOContext *s, unsigned int val)
{
avio_w8(s, val);
@ -338,143 +285,6 @@ void avio_wb32(AVIOContext *s, unsigned int val)
avio_w8(s, val);
}
#if FF_API_OLD_AVIO
void put_strz(AVIOContext *s, const char *str)
{
avio_put_str(s, str);
}
#define GET(name, type) \
type get_be ##name(AVIOContext *s) \
{\
return avio_rb ##name(s);\
}\
type get_le ##name(AVIOContext *s) \
{\
return avio_rl ##name(s);\
}
GET(16, unsigned int)
GET(24, unsigned int)
GET(32, unsigned int)
GET(64, uint64_t)
#undef GET
#define PUT(name, type ) \
void put_le ##name(AVIOContext *s, type val)\
{\
avio_wl ##name(s, val);\
}\
void put_be ##name(AVIOContext *s, type val)\
{\
avio_wb ##name(s, val);\
}
PUT(16, unsigned int)
PUT(24, unsigned int)
PUT(32, unsigned int)
PUT(64, uint64_t)
#undef PUT
int get_byte(AVIOContext *s)
{
return avio_r8(s);
}
int get_buffer(AVIOContext *s, unsigned char *buf, int size)
{
return avio_read(s, buf, size);
}
int get_partial_buffer(AVIOContext *s, unsigned char *buf, int size)
{
return ffio_read_partial(s, buf, size);
}
void put_byte(AVIOContext *s, int val)
{
avio_w8(s, val);
}
void put_buffer(AVIOContext *s, const unsigned char *buf, int size)
{
avio_write(s, buf, size);
}
void put_nbyte(AVIOContext *s, int b, int count)
{
ffio_fill(s, b, count);
}
int url_fopen(AVIOContext **s, const char *filename, int flags)
{
return avio_open(s, filename, flags);
}
int url_fclose(AVIOContext *s)
{
return avio_close(s);
}
int64_t url_fseek(AVIOContext *s, int64_t offset, int whence)
{
return avio_seek(s, offset, whence);
}
int64_t url_fsize(AVIOContext *s)
{
return avio_size(s);
}
int url_setbufsize(AVIOContext *s, int buf_size)
{
return ffio_set_buf_size(s, buf_size);
}
int url_fprintf(AVIOContext *s, const char *fmt, ...)
{
va_list ap;
char buf[4096];
int ret;
va_start(ap, fmt);
ret = vsnprintf(buf, sizeof(buf), fmt, ap);
va_end(ap);
avio_write(s, buf, strlen(buf));
return ret;
}
void put_flush_packet(AVIOContext *s)
{
avio_flush(s);
}
int av_url_read_fpause(AVIOContext *s, int pause)
{
return avio_pause(s, pause);
}
int64_t av_url_read_fseek(AVIOContext *s, int stream_index,
int64_t timestamp, int flags)
{
return avio_seek_time(s, stream_index, timestamp, flags);
}
void init_checksum(AVIOContext *s,
unsigned long (*update_checksum)(unsigned long c, const uint8_t *p, unsigned int len),
unsigned long checksum)
{
ffio_init_checksum(s, update_checksum, checksum);
}
unsigned long get_checksum(AVIOContext *s)
{
return ffio_get_checksum(s);
}
int url_open_dyn_buf(AVIOContext **s)
{
return avio_open_dyn_buf(s);
}
int url_open_dyn_packet_buf(AVIOContext **s, int max_packet_size)
{
return ffio_open_dyn_packet_buf(s, max_packet_size);
}
int url_close_dyn_buf(AVIOContext *s, uint8_t **pbuffer)
{
return avio_close_dyn_buf(s, pbuffer);
}
int url_fdopen(AVIOContext **s, URLContext *h)
{
return ffio_fdopen(s, h);
}
#endif
int avio_put_str(AVIOContext *s, const char *str)
{
int len = 1;
@ -557,15 +367,6 @@ void avio_wb24(AVIOContext *s, unsigned int val)
avio_w8(s, val);
}
#if FF_API_OLD_AVIO
void put_tag(AVIOContext *s, const char *tag)
{
while (*tag) {
avio_w8(s, *tag++);
}
}
#endif
/* Input stream */
static void fill_buffer(AVIOContext *s)
@ -647,17 +448,6 @@ int avio_r8(AVIOContext *s)
return 0;
}
#if FF_API_OLD_AVIO
int url_fgetc(AVIOContext *s)
{
if (s->buf_ptr >= s->buf_end)
fill_buffer(s);
if (s->buf_ptr < s->buf_end)
return *s->buf_ptr++;
return URL_EOF;
}
#endif
int avio_read(AVIOContext *s, unsigned char *buf, int size)
{
int len, size1;
@ -783,14 +573,6 @@ unsigned int avio_rb32(AVIOContext *s)
return val;
}
#if FF_API_OLD_AVIO
char *get_strz(AVIOContext *s, char *buf, int maxlen)
{
avio_get_str(s, INT_MAX, buf, maxlen);
return buf;
}
#endif
int ff_get_line(AVIOContext *s, char *buf, int maxlen)
{
int i = 0;
@ -888,19 +670,13 @@ int ffio_fdopen(AVIOContext **s, URLContext *h)
av_free(buffer);
return AVERROR(ENOMEM);
}
#if FF_API_OLD_AVIO
(*s)->is_streamed = h->is_streamed;
#endif
(*s)->seekable = h->is_streamed ? 0 : AVIO_SEEKABLE_NORMAL;
(*s)->max_packet_size = max_packet_size;
if(h->prot) {
(*s)->read_pause = (int (*)(void *, int))h->prot->url_read_pause;
(*s)->read_seek = (int64_t (*)(void *, int, int64_t, int))h->prot->url_read_seek;
}
#if !FF_API_OLD_AVIO
(*s)->av_class = &ffio_url_class;
#endif
return 0;
}
@ -1003,13 +779,6 @@ int avio_close(AVIOContext *s)
return ffurl_close(h);
}
#if FF_API_OLD_AVIO
URLContext *url_fileno(AVIOContext *s)
{
return s->opaque;
}
#endif
int avio_printf(AVIOContext *s, const char *fmt, ...)
{
va_list ap;
@ -1023,34 +792,6 @@ int avio_printf(AVIOContext *s, const char *fmt, ...)
return ret;
}
#if FF_API_OLD_AVIO
char *url_fgets(AVIOContext *s, char *buf, int buf_size)
{
int c;
char *q;
c = avio_r8(s);
if (url_feof(s))
return NULL;
q = buf;
for(;;) {
if (url_feof(s) || c == '\n')
break;
if ((q - buf) < buf_size - 1)
*q++ = c;
c = avio_r8(s);
}
if (buf_size > 0)
*q = '\0';
return buf;
}
int url_fget_max_packet_size(AVIOContext *s)
{
return s->max_packet_size;
}
#endif
int avio_pause(AVIOContext *s, int pause)
{
if (!s->read_pause)
@ -1078,29 +819,6 @@ int64_t avio_seek_time(AVIOContext *s, int stream_index,
return ret;
}
/* buffer handling */
#if FF_API_OLD_AVIO
int url_open_buf(AVIOContext **s, uint8_t *buf, int buf_size, int flags)
{
int ret;
*s = av_mallocz(sizeof(AVIOContext));
if(!*s)
return AVERROR(ENOMEM);
ret = ffio_init_context(*s, buf, buf_size,
flags & AVIO_FLAG_WRITE,
NULL, NULL, NULL, NULL);
if(ret != 0)
av_freep(s);
return ret;
}
int url_close_buf(AVIOContext *s)
{
avio_flush(s);
return s->buf_ptr - s->buffer;
}
#endif
/* output in a dynamic buffer */
typedef struct DynBuffer {

@ -41,7 +41,7 @@ typedef struct {
int next_stream;
} AVISynthContext;
static int avisynth_read_header(AVFormatContext *s, AVFormatParameters *ap)
static int avisynth_read_header(AVFormatContext *s)
{
AVISynthContext *avs = s->priv_data;
HRESULT res;

@ -55,7 +55,7 @@ static int avs_probe(AVProbeData * p)
return 0;
}
static int avs_read_header(AVFormatContext * s, AVFormatParameters * ap)
static int avs_read_header(AVFormatContext * s)
{
AvsFormat *avs = s->priv_data;

@ -57,8 +57,7 @@ static int vid_probe(AVProbeData *p)
return AVPROBE_SCORE_MAX;
}
static int vid_read_header(AVFormatContext *s,
AVFormatParameters *ap)
static int vid_read_header(AVFormatContext *s)
{
BVID_DemuxContext *vid = s->priv_data;
AVIOContext *pb = s->pb;

@ -47,7 +47,7 @@ static int bfi_probe(AVProbeData * p)
return 0;
}
static int bfi_read_header(AVFormatContext * s, AVFormatParameters * ap)
static int bfi_read_header(AVFormatContext * s)
{
BFIContext *bfi = s->priv_data;
AVIOContext *pb = s->pb;

@ -68,7 +68,7 @@ static int probe(AVProbeData *p)
return 0;
}
static int read_header(AVFormatContext *s, AVFormatParameters *ap)
static int read_header(AVFormatContext *s)
{
BinkDemuxContext *bink = s->priv_data;
AVIOContext *pb = s->pb;

@ -99,8 +99,7 @@ static void predict_width(AVCodecContext *avctx, uint64_t fsize, int got_width)
avctx->width = fsize > 4000 ? (160<<3) : (80<<3);
}
static AVStream * init_stream(AVFormatContext *s,
AVFormatParameters *ap)
static AVStream * init_stream(AVFormatContext *s)
{
BinDemuxContext *bin = s->priv_data;
AVStream *st = avformat_new_stream(s, NULL);
@ -109,27 +108,26 @@ static AVStream * init_stream(AVFormatContext *s,
st->codec->codec_tag = 0;
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
if (!ap->time_base.num) {
// if (!ap->time_base.num) {
avpriv_set_pts_info(st, 60, 1, 25);
} else {
avpriv_set_pts_info(st, 60, ap->time_base.num, ap->time_base.den);
}
// } else {
// avpriv_set_pts_info(st, 60, ap->time_base.num, ap->time_base.den);
// }
/* simulate tty display speed */
bin->chars_per_frame = FFMAX(av_q2d(st->time_base) * (ap->sample_rate ? ap->sample_rate : LINE_RATE), 1);
bin->chars_per_frame = FFMAX(av_q2d(st->time_base) * (/*ap->sample_rate ? ap->sample_rate :*/ LINE_RATE), 1);
st->codec->width = ap->width ? ap->width : (80<<3);
st->codec->height = ap->height ? ap->height : (25<<4);
st->codec->width = /*ap->width ? ap->width :*/ (80<<3);
st->codec->height = /*ap->height ? ap->height :*/ (25<<4);
return st;
}
static int bintext_read_header(AVFormatContext *s,
AVFormatParameters *ap)
static int bintext_read_header(AVFormatContext *s)
{
BinDemuxContext *bin = s->priv_data;
AVIOContext *pb = s->pb;
AVStream *st = init_stream(s, ap);
AVStream *st = init_stream(s);
if (!st)
return AVERROR(ENOMEM);
st->codec->codec_id = CODEC_ID_BINTEXT;
@ -146,9 +144,9 @@ static int bintext_read_header(AVFormatContext *s,
bin->fsize = avio_size(pb);
if (ff_sauce_read(s, &bin->fsize, &got_width, 0) < 0)
next_tag_read(s, &bin->fsize);
if (!ap->width)
// if (!ap->width)
predict_width(st->codec, bin->fsize, got_width);
if (!ap->height)
// if (!ap->height)
calculate_height(st->codec, bin->fsize);
avio_seek(pb, 0, SEEK_SET);
}
@ -168,14 +166,13 @@ static int xbin_probe(AVProbeData *p)
return 0;
}
static int xbin_read_header(AVFormatContext *s,
AVFormatParameters *ap)
static int xbin_read_header(AVFormatContext *s)
{
BinDemuxContext *bin = s->priv_data;
AVIOContext *pb = s->pb;
char fontheight, flags;
AVStream *st = init_stream(s, ap);
AVStream *st = init_stream(s);
if (!st)
return AVERROR(ENOMEM);
@ -212,8 +209,7 @@ static int xbin_read_header(AVFormatContext *s,
#endif /* CONFIG_XBIN_DEMUXER */
#if CONFIG_ADF_DEMUXER
static int adf_read_header(AVFormatContext *s,
AVFormatParameters *ap)
static int adf_read_header(AVFormatContext *s)
{
BinDemuxContext *bin = s->priv_data;
AVIOContext *pb = s->pb;
@ -222,7 +218,7 @@ static int adf_read_header(AVFormatContext *s,
if (avio_r8(pb) != 1)
return AVERROR_INVALIDDATA;
st = init_stream(s, ap);
st = init_stream(s);
if (!st)
return AVERROR(ENOMEM);
st->codec->codec_id = CODEC_ID_BINTEXT;
@ -247,7 +243,7 @@ static int adf_read_header(AVFormatContext *s,
bin->fsize = avio_size(pb) - 1 - 192 - 4096;
st->codec->width = 80<<3;
ff_sauce_read(s, &bin->fsize, &got_width, 0);
if (!ap->height)
// if (!ap->height)
calculate_height(st->codec, bin->fsize);
avio_seek(pb, 1 + 192 + 4096, SEEK_SET);
}
@ -269,8 +265,7 @@ static int idf_probe(AVProbeData *p)
return 0;
}
static int idf_read_header(AVFormatContext *s,
AVFormatParameters *ap)
static int idf_read_header(AVFormatContext *s)
{
BinDemuxContext *bin = s->priv_data;
AVIOContext *pb = s->pb;
@ -280,7 +275,7 @@ static int idf_read_header(AVFormatContext *s,
if (!pb->seekable)
return AVERROR(EIO);
st = init_stream(s, ap);
st = init_stream(s);
if (!st)
return AVERROR(ENOMEM);
st->codec->codec_id = CODEC_ID_IDF;
@ -301,7 +296,7 @@ static int idf_read_header(AVFormatContext *s,
bin->fsize = avio_size(pb) - 12 - 4096 - 48;
ff_sauce_read(s, &bin->fsize, &got_width, 0);
if (!ap->height)
// if (!ap->height)
calculate_height(st->codec, bin->fsize);
avio_seek(pb, 12, SEEK_SET);
return 0;

@ -47,7 +47,7 @@ static int probe(AVProbeData *p)
return AVPROBE_SCORE_MAX/2;
}
static int read_header(AVFormatContext *s, AVFormatParameters *ap)
static int read_header(AVFormatContext *s)
{
AVStream* st;

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save