Merge commit 'def97856de6021965db86c25a732d78689bd6bb0'

* commit 'def97856de6021965db86c25a732d78689bd6bb0':
  lavc: AV-prefix all codec capabilities

Conflicts:
	cmdutils.c
	ffmpeg.c
	ffplay.c
	libavcodec/8svx.c
	libavcodec/aacenc.c
	libavcodec/ac3dec.c
	libavcodec/adpcm.c
	libavcodec/alac.c
	libavcodec/atrac3plusdec.c
	libavcodec/bink.c
	libavcodec/dnxhddec.c
	libavcodec/dvdec.c
	libavcodec/dvenc.c
	libavcodec/ffv1dec.c
	libavcodec/ffv1enc.c
	libavcodec/fic.c
	libavcodec/flacdec.c
	libavcodec/flacenc.c
	libavcodec/flvdec.c
	libavcodec/fraps.c
	libavcodec/frwu.c
	libavcodec/gifdec.c
	libavcodec/h261dec.c
	libavcodec/hevc.c
	libavcodec/iff.c
	libavcodec/imc.c
	libavcodec/libopenjpegdec.c
	libavcodec/libvo-aacenc.c
	libavcodec/libvorbisenc.c
	libavcodec/libvpxdec.c
	libavcodec/libvpxenc.c
	libavcodec/libx264.c
	libavcodec/mjpegbdec.c
	libavcodec/mjpegdec.c
	libavcodec/mpegaudiodec_float.c
	libavcodec/msmpeg4dec.c
	libavcodec/mxpegdec.c
	libavcodec/nvenc_h264.c
	libavcodec/nvenc_hevc.c
	libavcodec/pngdec.c
	libavcodec/qpeg.c
	libavcodec/ra288.c
	libavcodec/rv10.c
	libavcodec/s302m.c
	libavcodec/sp5xdec.c
	libavcodec/takdec.c
	libavcodec/tiff.c
	libavcodec/tta.c
	libavcodec/utils.c
	libavcodec/v210dec.c
	libavcodec/vp6.c
	libavcodec/vp9.c
	libavcodec/wavpack.c
	libavcodec/yop.c

Merged-by: Michael Niedermayer <michael@niedermayer.cc>
pull/161/head
Michael Niedermayer 10 years ago
commit 444e9874a7
  1. 22
      cmdutils.c
  2. 2
      doc/examples/decoding_encoding.c
  3. 2
      doc/examples/muxing.c
  4. 2
      doc/examples/transcoding.c
  5. 2
      doc/multithreading.txt
  6. 8
      ffmpeg.c
  7. 4
      ffmpeg_filter.c
  8. 2
      ffplay.c
  9. 2
      libavcodec/012v.c
  10. 2
      libavcodec/4xm.c
  11. 2
      libavcodec/8bps.c
  12. 4
      libavcodec/8svx.c
  13. 4
      libavcodec/a64multienc.c
  14. 4
      libavcodec/aacdec.c
  15. 2
      libavcodec/aacdec_fixed.c
  16. 4
      libavcodec/aacenc.c
  17. 2
      libavcodec/aasc.c
  18. 2
      libavcodec/ac3dec_fixed.c
  19. 4
      libavcodec/ac3dec_float.c
  20. 2
      libavcodec/adpcm.c
  21. 2
      libavcodec/adxdec.c
  22. 2
      libavcodec/aic.c
  23. 2
      libavcodec/alac.c
  24. 2
      libavcodec/alacenc.c
  25. 2
      libavcodec/aliaspixdec.c
  26. 2
      libavcodec/alsdec.c
  27. 2
      libavcodec/amrnbdec.c
  28. 2
      libavcodec/amrwbdec.c
  29. 2
      libavcodec/anm.c
  30. 2
      libavcodec/ansi.c
  31. 3
      libavcodec/apedec.c
  32. 4
      libavcodec/asvdec.c
  33. 2
      libavcodec/atrac1.c
  34. 2
      libavcodec/atrac3.c
  35. 2
      libavcodec/atrac3plusdec.c
  36. 2
      libavcodec/aura.c
  37. 134
      libavcodec/avcodec.h
  38. 2
      libavcodec/avrndec.c
  39. 2
      libavcodec/avs.c
  40. 2
      libavcodec/avuidec.c
  41. 2
      libavcodec/avuienc.c
  42. 2
      libavcodec/bethsoftvideo.c
  43. 2
      libavcodec/bfi.c
  44. 2
      libavcodec/bink.c
  45. 4
      libavcodec/binkaudio.c
  46. 6
      libavcodec/bintext.c
  47. 2
      libavcodec/bmp.c
  48. 2
      libavcodec/bmvaudio.c
  49. 2
      libavcodec/bmvvideo.c
  50. 2
      libavcodec/brenderpix.c
  51. 2
      libavcodec/c93.c
  52. 2
      libavcodec/cavsdec.c
  53. 2
      libavcodec/cdgraphics.c
  54. 2
      libavcodec/cdxl.c
  55. 2
      libavcodec/cinepak.c
  56. 2
      libavcodec/cljrdec.c
  57. 2
      libavcodec/cllc.c
  58. 2
      libavcodec/cngdec.c
  59. 2
      libavcodec/cook.c
  60. 2
      libavcodec/cpia.c
  61. 12
      libavcodec/crystalhd.c
  62. 2
      libavcodec/cscd.c
  63. 4
      libavcodec/cyuv.c
  64. 2
      libavcodec/dcadec.c
  65. 2
      libavcodec/dcaenc.c
  66. 2
      libavcodec/dds.c
  67. 2
      libavcodec/dfa.c
  68. 2
      libavcodec/diracdec.c
  69. 2
      libavcodec/dnxhddec.c
  70. 2
      libavcodec/dnxhdenc.c
  71. 2
      libavcodec/dpcm.c
  72. 2
      libavcodec/dpx.c
  73. 2
      libavcodec/dsicinaudio.c
  74. 2
      libavcodec/dsicinvideo.c
  75. 2
      libavcodec/dss_sp.c
  76. 2
      libavcodec/dvdec.c
  77. 2
      libavcodec/dvenc.c
  78. 2
      libavcodec/dxa.c
  79. 2
      libavcodec/dxtory.c
  80. 2
      libavcodec/eacmv.c
  81. 2
      libavcodec/eamad.c
  82. 2
      libavcodec/eatgq.c
  83. 2
      libavcodec/eatgv.c
  84. 2
      libavcodec/eatqi.c
  85. 2
      libavcodec/error_resilience.c
  86. 2
      libavcodec/escape124.c
  87. 2
      libavcodec/escape130.c
  88. 2
      libavcodec/evrcdec.c
  89. 4
      libavcodec/exr.c
  90. 4
      libavcodec/ffv1dec.c
  91. 2
      libavcodec/ffv1enc.c
  92. 2
      libavcodec/ffwavesynth.c
  93. 2
      libavcodec/fic.c
  94. 2
      libavcodec/flacdec.c
  95. 2
      libavcodec/flacenc.c
  96. 4
      libavcodec/flashsv.c
  97. 2
      libavcodec/flicvideo.c
  98. 2
      libavcodec/flvdec.c
  99. 2
      libavcodec/frame_thread_encoder.c
  100. 2
      libavcodec/fraps.c
  101. Some files were not shown because too many files have changed in this diff Show More

@ -1324,12 +1324,12 @@ static void print_codec(const AVCodec *c)
if (c->type == AVMEDIA_TYPE_VIDEO || if (c->type == AVMEDIA_TYPE_VIDEO ||
c->type == AVMEDIA_TYPE_AUDIO) { c->type == AVMEDIA_TYPE_AUDIO) {
printf(" Threading capabilities: "); printf(" Threading capabilities: ");
switch (c->capabilities & (CODEC_CAP_FRAME_THREADS | switch (c->capabilities & (AV_CODEC_CAP_FRAME_THREADS |
CODEC_CAP_SLICE_THREADS)) { AV_CODEC_CAP_SLICE_THREADS)) {
case CODEC_CAP_FRAME_THREADS | case AV_CODEC_CAP_FRAME_THREADS |
CODEC_CAP_SLICE_THREADS: printf("frame and slice"); break; AV_CODEC_CAP_SLICE_THREADS: printf("frame and slice"); break;
case CODEC_CAP_FRAME_THREADS: printf("frame"); break; case AV_CODEC_CAP_FRAME_THREADS: printf("frame"); break;
case CODEC_CAP_SLICE_THREADS: printf("slice"); break; case AV_CODEC_CAP_SLICE_THREADS: printf("slice"); break;
default: printf("no"); break; default: printf("no"); break;
} }
printf("\n"); printf("\n");
@ -1503,11 +1503,11 @@ static void print_codecs(int encoder)
while ((codec = next_codec_for_id(desc->id, codec, encoder))) { while ((codec = next_codec_for_id(desc->id, codec, encoder))) {
printf(" %c", get_media_type_char(desc->type)); printf(" %c", get_media_type_char(desc->type));
printf((codec->capabilities & CODEC_CAP_FRAME_THREADS) ? "F" : "."); printf((codec->capabilities & AV_CODEC_CAP_FRAME_THREADS) ? "F" : ".");
printf((codec->capabilities & CODEC_CAP_SLICE_THREADS) ? "S" : "."); printf((codec->capabilities & AV_CODEC_CAP_SLICE_THREADS) ? "S" : ".");
printf((codec->capabilities & CODEC_CAP_EXPERIMENTAL) ? "X" : "."); printf((codec->capabilities & AV_CODEC_CAP_EXPERIMENTAL) ? "X" : ".");
printf((codec->capabilities & CODEC_CAP_DRAW_HORIZ_BAND)?"B" : "."); printf((codec->capabilities & AV_CODEC_CAP_DRAW_HORIZ_BAND)?"B" : ".");
printf((codec->capabilities & CODEC_CAP_DR1) ? "D" : "."); printf((codec->capabilities & AV_CODEC_CAP_DR1) ? "D" : ".");
printf(" %-20s %s", codec->name, codec->long_name ? codec->long_name : ""); printf(" %-20s %s", codec->name, codec->long_name ? codec->long_name : "");
if (strcmp(codec->name, desc->name)) if (strcmp(codec->name, desc->name))

@ -561,7 +561,7 @@ static void video_decode_example(const char *outfilename, const char *filename)
exit(1); exit(1);
} }
if(codec->capabilities&CODEC_CAP_TRUNCATED) if (codec->capabilities & AV_CODEC_CAP_TRUNCATED)
c->flags |= AV_CODEC_FLAG_TRUNCATED; // we do not send complete frames c->flags |= AV_CODEC_FLAG_TRUNCATED; // we do not send complete frames
/* For some codecs, such as msmpeg4 and mpeg4, width and height /* For some codecs, such as msmpeg4 and mpeg4, width and height

@ -230,7 +230,7 @@ static void open_audio(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, A
/* increment frequency by 110 Hz per second */ /* increment frequency by 110 Hz per second */
ost->tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate; ost->tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
if (c->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE) if (c->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE)
nb_samples = 10000; nb_samples = 10000;
else else
nb_samples = c->frame_size; nb_samples = c->frame_size;

@ -449,7 +449,7 @@ static int flush_encoder(unsigned int stream_index)
int got_frame; int got_frame;
if (!(ofmt_ctx->streams[stream_index]->codec->codec->capabilities & if (!(ofmt_ctx->streams[stream_index]->codec->codec->capabilities &
CODEC_CAP_DELAY)) AV_CODEC_CAP_DELAY))
return 0; return 0;
while (1) { while (1) {

@ -54,7 +54,7 @@ thread.
If the codec allocates writable tables in its init(), add an init_thread_copy() If the codec allocates writable tables in its init(), add an init_thread_copy()
which re-allocates them for other threads. which re-allocates them for other threads.
Add CODEC_CAP_FRAME_THREADS to the codec capabilities. There will be very little Add AV_CODEC_CAP_FRAME_THREADS to the codec capabilities. There will be very little
speed gain at this point but it should work. speed gain at this point but it should work.
If there are inter-frame dependencies, so the codec calls If there are inter-frame dependencies, so the codec calls

@ -1198,7 +1198,7 @@ static void do_video_out(AVFormatContext *s,
av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base)); av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
} }
if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & CODEC_CAP_DELAY)) if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
pkt.pts = ost->sync_opts; pkt.pts = ost->sync_opts;
av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base); av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
@ -1377,7 +1377,7 @@ static int reap_filters(int flush)
do_video_out(of->ctx, ost, filtered_frame, float_pts); do_video_out(of->ctx, ost, filtered_frame, float_pts);
break; break;
case AVMEDIA_TYPE_AUDIO: case AVMEDIA_TYPE_AUDIO:
if (!(enc->codec->capabilities & CODEC_CAP_PARAM_CHANGE) && if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
enc->channels != av_frame_get_channels(filtered_frame)) { enc->channels != av_frame_get_channels(filtered_frame)) {
av_log(NULL, AV_LOG_ERROR, av_log(NULL, AV_LOG_ERROR,
"Audio filter graph output is not normalized and encoder does not support parameter changes\n"); "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
@ -2297,7 +2297,7 @@ static int process_input_packet(InputStream *ist, const AVPacket *pkt)
ist->dts = ist->next_dts; ist->dts = ist->next_dts;
if (avpkt.size && avpkt.size != pkt->size && if (avpkt.size && avpkt.size != pkt->size &&
!(ist->dec->capabilities & CODEC_CAP_SUBFRAMES)) { !(ist->dec->capabilities & AV_CODEC_CAP_SUBFRAMES)) {
av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING, av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
"Multiple frames in a packet from stream %d\n", pkt->stream_index); "Multiple frames in a packet from stream %d\n", pkt->stream_index);
ist->showed_multi_packet_warning = 1; ist->showed_multi_packet_warning = 1;
@ -2592,7 +2592,7 @@ static int init_output_stream(OutputStream *ost, char *error, int error_len)
return ret; return ret;
} }
if (ost->enc->type == AVMEDIA_TYPE_AUDIO && if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
!(ost->enc->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)) !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
av_buffersink_set_frame_size(ost->filter->filter, av_buffersink_set_frame_size(ost->filter->filter,
ost->enc_ctx->frame_size); ost->enc_ctx->frame_size);
assert_avoptions(ost->encoder_opts); assert_avoptions(ost->encoder_opts);

@ -85,7 +85,7 @@ void choose_sample_fmt(AVStream *st, AVCodec *codec)
break; break;
} }
if (*p == -1) { if (*p == -1) {
if((codec->capabilities & CODEC_CAP_LOSSLESS) && av_get_sample_fmt_name(st->codec->sample_fmt) > av_get_sample_fmt_name(codec->sample_fmts[0])) if((codec->capabilities & AV_CODEC_CAP_LOSSLESS) && av_get_sample_fmt_name(st->codec->sample_fmt) > av_get_sample_fmt_name(codec->sample_fmts[0]))
av_log(NULL, AV_LOG_ERROR, "Conversion will not be lossless.\n"); av_log(NULL, AV_LOG_ERROR, "Conversion will not be lossless.\n");
if(av_get_sample_fmt_name(st->codec->sample_fmt)) if(av_get_sample_fmt_name(st->codec->sample_fmt))
av_log(NULL, AV_LOG_WARNING, av_log(NULL, AV_LOG_WARNING,
@ -1036,7 +1036,7 @@ int configure_filtergraph(FilterGraph *fg)
OutputStream *ost = fg->outputs[i]->ost; OutputStream *ost = fg->outputs[i]->ost;
if (ost && if (ost &&
ost->enc->type == AVMEDIA_TYPE_AUDIO && ost->enc->type == AVMEDIA_TYPE_AUDIO &&
!(ost->enc->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)) !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
av_buffersink_set_frame_size(ost->filter->filter, av_buffersink_set_frame_size(ost->filter->filter,
ost->enc_ctx->frame_size); ost->enc_ctx->frame_size);
} }

@ -2698,7 +2698,7 @@ static int stream_component_open(VideoState *is, int stream_index)
if(stream_lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE; if(stream_lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
if (fast) if (fast)
avctx->flags2 |= AV_CODEC_FLAG2_FAST; avctx->flags2 |= AV_CODEC_FLAG2_FAST;
if(codec->capabilities & CODEC_CAP_DR1) if(codec->capabilities & AV_CODEC_CAP_DR1)
avctx->flags |= CODEC_FLAG_EMU_EDGE; avctx->flags |= CODEC_FLAG_EMU_EDGE;
opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec); opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec);

@ -151,5 +151,5 @@ AVCodec ff_zero12v_decoder = {
.id = AV_CODEC_ID_012V, .id = AV_CODEC_ID_012V,
.init = zero12v_decode_init, .init = zero12v_decode_init,
.decode = zero12v_decode_frame, .decode = zero12v_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
}; };

@ -1026,5 +1026,5 @@ AVCodec ff_fourxm_decoder = {
.init = decode_init, .init = decode_init,
.close = decode_end, .close = decode_end,
.decode = decode_frame, .decode = decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
}; };

@ -184,5 +184,5 @@ AVCodec ff_eightbps_decoder = {
.priv_data_size = sizeof(EightBpsContext), .priv_data_size = sizeof(EightBpsContext),
.init = decode_init, .init = decode_init,
.decode = decode_frame, .decode = decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
}; };

@ -194,7 +194,7 @@ AVCodec ff_eightsvx_fib_decoder = {
.init = eightsvx_decode_init, .init = eightsvx_decode_init,
.decode = eightsvx_decode_frame, .decode = eightsvx_decode_frame,
.close = eightsvx_decode_close, .close = eightsvx_decode_close,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_U8P, .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_U8P,
AV_SAMPLE_FMT_NONE }, AV_SAMPLE_FMT_NONE },
}; };
@ -209,7 +209,7 @@ AVCodec ff_eightsvx_exp_decoder = {
.init = eightsvx_decode_init, .init = eightsvx_decode_init,
.decode = eightsvx_decode_frame, .decode = eightsvx_decode_frame,
.close = eightsvx_decode_close, .close = eightsvx_decode_close,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_U8P, .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_U8P,
AV_SAMPLE_FMT_NONE }, AV_SAMPLE_FMT_NONE },
}; };

@ -406,7 +406,7 @@ AVCodec ff_a64multi_encoder = {
.encode2 = a64multi_encode_frame, .encode2 = a64multi_encode_frame,
.close = a64multi_close_encoder, .close = a64multi_close_encoder,
.pix_fmts = (const enum AVPixelFormat[]) {AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE}, .pix_fmts = (const enum AVPixelFormat[]) {AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE},
.capabilities = CODEC_CAP_DELAY, .capabilities = AV_CODEC_CAP_DELAY,
}; };
#endif #endif
#if CONFIG_A64MULTI5_ENCODER #if CONFIG_A64MULTI5_ENCODER
@ -420,6 +420,6 @@ AVCodec ff_a64multi5_encoder = {
.encode2 = a64multi_encode_frame, .encode2 = a64multi_encode_frame,
.close = a64multi_close_encoder, .close = a64multi_close_encoder,
.pix_fmts = (const enum AVPixelFormat[]) {AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE}, .pix_fmts = (const enum AVPixelFormat[]) {AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE},
.capabilities = CODEC_CAP_DELAY, .capabilities = AV_CODEC_CAP_DELAY,
}; };
#endif #endif

@ -550,7 +550,7 @@ AVCodec ff_aac_decoder = {
.sample_fmts = (const enum AVSampleFormat[]) { .sample_fmts = (const enum AVSampleFormat[]) {
AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_NONE AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_NONE
}, },
.capabilities = CODEC_CAP_CHANNEL_CONF | CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_CHANNEL_CONF | AV_CODEC_CAP_DR1,
.channel_layouts = aac_channel_layout, .channel_layouts = aac_channel_layout,
.flush = flush, .flush = flush,
.priv_class = &aac_decoder_class, .priv_class = &aac_decoder_class,
@ -574,7 +574,7 @@ AVCodec ff_aac_latm_decoder = {
.sample_fmts = (const enum AVSampleFormat[]) { .sample_fmts = (const enum AVSampleFormat[]) {
AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_NONE AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_NONE
}, },
.capabilities = CODEC_CAP_CHANNEL_CONF | CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_CHANNEL_CONF | AV_CODEC_CAP_DR1,
.channel_layouts = aac_channel_layout, .channel_layouts = aac_channel_layout,
.flush = flush, .flush = flush,
.profiles = profiles, .profiles = profiles,

@ -437,7 +437,7 @@ AVCodec ff_aac_fixed_decoder = {
.sample_fmts = (const enum AVSampleFormat[]) { .sample_fmts = (const enum AVSampleFormat[]) {
AV_SAMPLE_FMT_S32P, AV_SAMPLE_FMT_NONE AV_SAMPLE_FMT_S32P, AV_SAMPLE_FMT_NONE
}, },
.capabilities = CODEC_CAP_CHANNEL_CONF | CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_CHANNEL_CONF | AV_CODEC_CAP_DR1,
.channel_layouts = aac_channel_layout, .channel_layouts = aac_channel_layout,
.flush = flush, .flush = flush,
}; };

@ -943,8 +943,8 @@ AVCodec ff_aac_encoder = {
.encode2 = aac_encode_frame, .encode2 = aac_encode_frame,
.close = aac_encode_end, .close = aac_encode_end,
.supported_samplerates = mpeg4audio_sample_rates, .supported_samplerates = mpeg4audio_sample_rates,
.capabilities = CODEC_CAP_SMALL_LAST_FRAME | CODEC_CAP_DELAY | .capabilities = AV_CODEC_CAP_SMALL_LAST_FRAME | AV_CODEC_CAP_DELAY |
CODEC_CAP_EXPERIMENTAL, AV_CODEC_CAP_EXPERIMENTAL,
.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_FLTP, .sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_NONE }, AV_SAMPLE_FMT_NONE },
.priv_class = &aacenc_class, .priv_class = &aacenc_class,

@ -158,5 +158,5 @@ AVCodec ff_aasc_decoder = {
.init = aasc_decode_init, .init = aasc_decode_init,
.close = aasc_decode_end, .close = aasc_decode_end,
.decode = aasc_decode_frame, .decode = aasc_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
}; };

@ -188,7 +188,7 @@ AVCodec ff_ac3_fixed_decoder = {
.init = ac3_decode_init, .init = ac3_decode_init,
.close = ac3_decode_end, .close = ac3_decode_end,
.decode = ac3_decode_frame, .decode = ac3_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("ATSC A/52A (AC-3)"), .long_name = NULL_IF_CONFIG_SMALL("ATSC A/52A (AC-3)"),
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16P, .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16P,
AV_SAMPLE_FMT_NONE }, AV_SAMPLE_FMT_NONE },

@ -60,7 +60,7 @@ AVCodec ff_ac3_decoder = {
.init = ac3_decode_init, .init = ac3_decode_init,
.close = ac3_decode_end, .close = ac3_decode_end,
.decode = ac3_decode_frame, .decode = ac3_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("ATSC A/52A (AC-3)"), .long_name = NULL_IF_CONFIG_SMALL("ATSC A/52A (AC-3)"),
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP, .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_NONE }, AV_SAMPLE_FMT_NONE },
@ -83,7 +83,7 @@ AVCodec ff_eac3_decoder = {
.init = ac3_decode_init, .init = ac3_decode_init,
.close = ac3_decode_end, .close = ac3_decode_end,
.decode = ac3_decode_frame, .decode = ac3_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("ATSC A/52B (AC-3, E-AC-3)"), .long_name = NULL_IF_CONFIG_SMALL("ATSC A/52B (AC-3, E-AC-3)"),
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP, .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_NONE }, AV_SAMPLE_FMT_NONE },

@ -1591,7 +1591,7 @@ AVCodec ff_ ## name_ ## _decoder = { \
.init = adpcm_decode_init, \ .init = adpcm_decode_init, \
.decode = adpcm_decode_frame, \ .decode = adpcm_decode_frame, \
.flush = adpcm_flush, \ .flush = adpcm_flush, \
.capabilities = CODEC_CAP_DR1, \ .capabilities = AV_CODEC_CAP_DR1, \
.sample_fmts = sample_fmts_, \ .sample_fmts = sample_fmts_, \
} }

@ -183,7 +183,7 @@ AVCodec ff_adpcm_adx_decoder = {
.init = adx_decode_init, .init = adx_decode_init,
.decode = adx_decode_frame, .decode = adx_decode_frame,
.flush = adx_decode_flush, .flush = adx_decode_flush,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16P, .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16P,
AV_SAMPLE_FMT_NONE }, AV_SAMPLE_FMT_NONE },
}; };

@ -488,5 +488,5 @@ AVCodec ff_aic_decoder = {
.init = aic_decode_init, .init = aic_decode_init,
.close = aic_decode_close, .close = aic_decode_close,
.decode = aic_decode_frame, .decode = aic_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
}; };

@ -674,6 +674,6 @@ AVCodec ff_alac_decoder = {
.close = alac_decode_close, .close = alac_decode_close,
.decode = alac_decode_frame, .decode = alac_decode_frame,
.init_thread_copy = ONLY_IF_THREADS_ENABLED(init_thread_copy), .init_thread_copy = ONLY_IF_THREADS_ENABLED(init_thread_copy),
.capabilities = CODEC_CAP_DR1 | CODEC_CAP_FRAME_THREADS, .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
.priv_class = &alac_class .priv_class = &alac_class
}; };

@ -653,7 +653,7 @@ AVCodec ff_alac_encoder = {
.init = alac_encode_init, .init = alac_encode_init,
.encode2 = alac_encode_frame, .encode2 = alac_encode_frame,
.close = alac_encode_close, .close = alac_encode_close,
.capabilities = CODEC_CAP_SMALL_LAST_FRAME, .capabilities = AV_CODEC_CAP_SMALL_LAST_FRAME,
.channel_layouts = ff_alac_channel_layouts, .channel_layouts = ff_alac_channel_layouts,
.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S32P, .sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S32P,
AV_SAMPLE_FMT_S16P, AV_SAMPLE_FMT_S16P,

@ -124,5 +124,5 @@ AVCodec ff_alias_pix_decoder = {
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_ALIAS_PIX, .id = AV_CODEC_ID_ALIAS_PIX,
.decode = decode_frame, .decode = decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
}; };

@ -1853,5 +1853,5 @@ AVCodec ff_als_decoder = {
.close = decode_end, .close = decode_end,
.decode = decode_frame, .decode = decode_frame,
.flush = flush, .flush = flush,
.capabilities = CODEC_CAP_SUBFRAMES | CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DR1,
}; };

@ -1088,7 +1088,7 @@ AVCodec ff_amrnb_decoder = {
.priv_data_size = sizeof(AMRContext), .priv_data_size = sizeof(AMRContext),
.init = amrnb_decode_init, .init = amrnb_decode_init,
.decode = amrnb_decode_frame, .decode = amrnb_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_FLT, .sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_FLT,
AV_SAMPLE_FMT_NONE }, AV_SAMPLE_FMT_NONE },
}; };

@ -1273,7 +1273,7 @@ AVCodec ff_amrwb_decoder = {
.priv_data_size = sizeof(AMRWBContext), .priv_data_size = sizeof(AMRWBContext),
.init = amrwb_decode_init, .init = amrwb_decode_init,
.decode = amrwb_decode_frame, .decode = amrwb_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_FLT, .sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_FLT,
AV_SAMPLE_FMT_NONE }, AV_SAMPLE_FMT_NONE },
}; };

@ -198,5 +198,5 @@ AVCodec ff_anm_decoder = {
.init = decode_init, .init = decode_init,
.close = decode_end, .close = decode_end,
.decode = decode_frame, .decode = decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
}; };

@ -478,5 +478,5 @@ AVCodec ff_ansi_decoder = {
.init = decode_init, .init = decode_init,
.close = decode_close, .close = decode_close,
.decode = decode_frame, .decode = decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
}; };

@ -1570,7 +1570,8 @@ AVCodec ff_ape_decoder = {
.init = ape_decode_init, .init = ape_decode_init,
.close = ape_decode_close, .close = ape_decode_close,
.decode = ape_decode_frame, .decode = ape_decode_frame,
.capabilities = CODEC_CAP_SUBFRAMES | CODEC_CAP_DELAY | CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DELAY |
AV_CODEC_CAP_DR1,
.flush = ape_flush, .flush = ape_flush,
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_U8P, .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_U8P,
AV_SAMPLE_FMT_S16P, AV_SAMPLE_FMT_S16P,

@ -322,7 +322,7 @@ AVCodec ff_asv1_decoder = {
.init = decode_init, .init = decode_init,
.close = decode_end, .close = decode_end,
.decode = decode_frame, .decode = decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
}; };
#endif #endif
@ -336,6 +336,6 @@ AVCodec ff_asv2_decoder = {
.init = decode_init, .init = decode_init,
.close = decode_end, .close = decode_end,
.decode = decode_frame, .decode = decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
}; };
#endif #endif

@ -386,7 +386,7 @@ AVCodec ff_atrac1_decoder = {
.init = atrac1_decode_init, .init = atrac1_decode_init,
.close = atrac1_decode_end, .close = atrac1_decode_end,
.decode = atrac1_decode_frame, .decode = atrac1_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP, .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_NONE }, AV_SAMPLE_FMT_NONE },
}; };

@ -934,7 +934,7 @@ AVCodec ff_atrac3_decoder = {
.init = atrac3_decode_init, .init = atrac3_decode_init,
.close = atrac3_decode_close, .close = atrac3_decode_close,
.decode = atrac3_decode_frame, .decode = atrac3_decode_frame,
.capabilities = CODEC_CAP_SUBFRAMES | CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DR1,
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP, .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_NONE }, AV_SAMPLE_FMT_NONE },
}; };

@ -392,7 +392,7 @@ AVCodec ff_atrac3p_decoder = {
.long_name = NULL_IF_CONFIG_SMALL("ATRAC3+ (Adaptive TRansform Acoustic Coding 3+)"), .long_name = NULL_IF_CONFIG_SMALL("ATRAC3+ (Adaptive TRansform Acoustic Coding 3+)"),
.type = AVMEDIA_TYPE_AUDIO, .type = AVMEDIA_TYPE_AUDIO,
.id = AV_CODEC_ID_ATRAC3P, .id = AV_CODEC_ID_ATRAC3P,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
.priv_data_size = sizeof(ATRAC3PContext), .priv_data_size = sizeof(ATRAC3PContext),
.init = atrac3p_decode_init, .init = atrac3p_decode_init,
.close = atrac3p_decode_close, .close = atrac3p_decode_close,

@ -104,5 +104,5 @@ AVCodec ff_aura2_decoder = {
.id = AV_CODEC_ID_AURA2, .id = AV_CODEC_ID_AURA2,
.init = aura_decode_init, .init = aura_decode_init,
.decode = aura_decode_frame, .decode = aura_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
}; };

@ -826,6 +826,112 @@ typedef struct RcOverride{
*/ */
#define AV_CODEC_FLAG2_SKIP_MANUAL 0x20000000 #define AV_CODEC_FLAG2_SKIP_MANUAL 0x20000000
/* Unsupported options :
* Syntax Arithmetic coding (SAC)
* Reference Picture Selection
* Independent Segment Decoding */
/* /Fx */
/* codec capabilities */
/**
* Decoder can use draw_horiz_band callback.
*/
#define AV_CODEC_CAP_DRAW_HORIZ_BAND (1 << 0)
/**
* Codec uses get_buffer() for allocating buffers and supports custom allocators.
* If not set, it might not use get_buffer() at all or use operations that
* assume the buffer was allocated by avcodec_default_get_buffer.
*/
#define AV_CODEC_CAP_DR1 (1 << 1)
#define AV_CODEC_CAP_TRUNCATED (1 << 3)
/**
* Encoder or decoder requires flushing with NULL input at the end in order to
* give the complete and correct output.
*
* NOTE: If this flag is not set, the codec is guaranteed to never be fed with
* with NULL data. The user can still send NULL data to the public encode
* or decode function, but libavcodec will not pass it along to the codec
* unless this flag is set.
*
* Decoders:
* The decoder has a non-zero delay and needs to be fed with avpkt->data=NULL,
* avpkt->size=0 at the end to get the delayed data until the decoder no longer
* returns frames.
*
* Encoders:
* The encoder needs to be fed with NULL data at the end of encoding until the
* encoder no longer returns data.
*
* NOTE: For encoders implementing the AVCodec.encode2() function, setting this
* flag also means that the encoder must set the pts and duration for
* each output packet. If this flag is not set, the pts and duration will
* be determined by libavcodec from the input frame.
*/
#define AV_CODEC_CAP_DELAY (1 << 5)
/**
* Codec can be fed a final frame with a smaller size.
* This can be used to prevent truncation of the last audio samples.
*/
#define AV_CODEC_CAP_SMALL_LAST_FRAME (1 << 6)
#if FF_API_CAP_VDPAU
/**
* Codec can export data for HW decoding (VDPAU).
*/
#define AV_CODEC_CAP_HWACCEL_VDPAU (1 << 7)
#endif
/**
* Codec can output multiple frames per AVPacket
* Normally demuxers return one frame at a time, demuxers which do not do
* are connected to a parser to split what they return into proper frames.
* This flag is reserved to the very rare category of codecs which have a
* bitstream that cannot be split into frames without timeconsuming
* operations like full decoding. Demuxers carring such bitstreams thus
* may return multiple frames in a packet. This has many disadvantages like
* prohibiting stream copy in many cases thus it should only be considered
* as a last resort.
*/
#define AV_CODEC_CAP_SUBFRAMES (1 << 8)
/**
* Codec is experimental and is thus avoided in favor of non experimental
* encoders
*/
#define AV_CODEC_CAP_EXPERIMENTAL (1 << 9)
/**
* Codec should fill in channel configuration and samplerate instead of container
*/
#define AV_CODEC_CAP_CHANNEL_CONF (1 << 10)
/**
* Codec supports frame-level multithreading.
*/
#define AV_CODEC_CAP_FRAME_THREADS (1 << 12)
/**
* Codec supports slice-based (or partition-based) multithreading.
*/
#define AV_CODEC_CAP_SLICE_THREADS (1 << 13)
/**
* Codec supports changed parameters at any point.
*/
#define AV_CODEC_CAP_PARAM_CHANGE (1 << 14)
/**
* Codec supports avctx->thread_count == 0 (auto).
*/
#define AV_CODEC_CAP_AUTO_THREADS (1 << 15)
/**
* Audio encoder supports receiving a different number of samples in each call.
*/
#define AV_CODEC_CAP_VARIABLE_FRAME_SIZE (1 << 16)
/**
* Codec is intra only.
*/
#define AV_CODEC_CAP_INTRA_ONLY 0x40000000
/**
* Codec is lossless.
*/
#define AV_CODEC_CAP_LOSSLESS 0x80000000
/** /**
* Allow decoders to produce frames with data planes that are not aligned * Allow decoders to produce frames with data planes that are not aligned
* to CPU requirements (e.g. due to cropping). * to CPU requirements (e.g. due to cropping).
@ -2156,7 +2262,7 @@ typedef struct AVCodecContext {
* *
* - encoding: set by libavcodec in avcodec_open2(). Each submitted frame * - encoding: set by libavcodec in avcodec_open2(). Each submitted frame
* except the last must contain exactly frame_size samples per channel. * except the last must contain exactly frame_size samples per channel.
* May be 0 when the codec has CODEC_CAP_VARIABLE_FRAME_SIZE set, then the * May be 0 when the codec has AV_CODEC_CAP_VARIABLE_FRAME_SIZE set, then the
* frame size is not restricted. * frame size is not restricted.
* - decoding: may be set by some decoders to indicate constant frame size * - decoding: may be set by some decoders to indicate constant frame size
*/ */
@ -2358,7 +2464,7 @@ typedef struct AVCodecContext {
* buffers than buf[] can hold. extended_buf will be freed in * buffers than buf[] can hold. extended_buf will be freed in
* av_frame_unref(). * av_frame_unref().
* *
* If CODEC_CAP_DR1 is not set then get_buffer2() must call * If AV_CODEC_CAP_DR1 is not set then get_buffer2() must call
* avcodec_default_get_buffer2() instead of providing buffers allocated by * avcodec_default_get_buffer2() instead of providing buffers allocated by
* some other means. * some other means.
* *
@ -3369,7 +3475,7 @@ typedef struct AVCodec {
enum AVCodecID id; enum AVCodecID id;
/** /**
* Codec capabilities. * Codec capabilities.
* see CODEC_CAP_* * see AV_CODEC_CAP_*
*/ */
int capabilities; int capabilities;
const AVRational *supported_framerates; ///< array of supported framerates, or NULL if any, array is terminated by {0,0} const AVRational *supported_framerates; ///< array of supported framerates, or NULL if any, array is terminated by {0,0}
@ -4154,7 +4260,7 @@ attribute_deprecated int avcodec_default_reget_buffer(AVCodecContext *s, AVFrame
/** /**
* The default callback for AVCodecContext.get_buffer2(). It is made public so * The default callback for AVCodecContext.get_buffer2(). It is made public so
* it can be called by custom get_buffer2() implementations for decoders without * it can be called by custom get_buffer2() implementations for decoders without
* CODEC_CAP_DR1 set. * AV_CODEC_CAP_DR1 set.
*/ */
int avcodec_default_get_buffer2(AVCodecContext *s, AVFrame *frame, int flags); int avcodec_default_get_buffer2(AVCodecContext *s, AVFrame *frame, int flags);
@ -4178,7 +4284,7 @@ unsigned avcodec_get_edge_width(void);
* buffer that is acceptable for the codec if you do not use any horizontal * buffer that is acceptable for the codec if you do not use any horizontal
* padding. * padding.
* *
* May only be used if a codec with CODEC_CAP_DR1 has been opened. * May only be used if a codec with AV_CODEC_CAP_DR1 has been opened.
*/ */
void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height); void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height);
@ -4187,7 +4293,7 @@ void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height);
* buffer that is acceptable for the codec if you also ensure that all * buffer that is acceptable for the codec if you also ensure that all
* line sizes are a multiple of the respective linesize_align[i]. * line sizes are a multiple of the respective linesize_align[i].
* *
* May only be used if a codec with CODEC_CAP_DR1 has been opened. * May only be used if a codec with AV_CODEC_CAP_DR1 has been opened.
*/ */
void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
int linesize_align[AV_NUM_DATA_POINTERS]); int linesize_align[AV_NUM_DATA_POINTERS]);
@ -4285,13 +4391,13 @@ attribute_deprecated int avcodec_decode_audio3(AVCodecContext *avctx, int16_t *s
* needs to be fed to the decoder with remaining data until it is completely * needs to be fed to the decoder with remaining data until it is completely
* consumed or an error occurs. * consumed or an error occurs.
* *
* Some decoders (those marked with CODEC_CAP_DELAY) have a delay between input * Some decoders (those marked with AV_CODEC_CAP_DELAY) have a delay between input
* and output. This means that for some packets they will not immediately * and output. This means that for some packets they will not immediately
* produce decoded output and need to be flushed at the end of decoding to get * produce decoded output and need to be flushed at the end of decoding to get
* all the decoded data. Flushing is done by calling this function with packets * all the decoded data. Flushing is done by calling this function with packets
* with avpkt->data set to NULL and avpkt->size set to 0 until it stops * with avpkt->data set to NULL and avpkt->size set to 0 until it stops
* returning samples. It is safe to flush even those decoders that are not * returning samples. It is safe to flush even those decoders that are not
* marked with CODEC_CAP_DELAY, then no samples will be returned. * marked with AV_CODEC_CAP_DELAY, then no samples will be returned.
* *
* @warning The input buffer, avpkt->data must be FF_INPUT_BUFFER_PADDING_SIZE * @warning The input buffer, avpkt->data must be FF_INPUT_BUFFER_PADDING_SIZE
* larger than the actual read bytes because some optimized bitstream * larger than the actual read bytes because some optimized bitstream
@ -4316,7 +4422,7 @@ attribute_deprecated int avcodec_decode_audio3(AVCodecContext *avctx, int16_t *s
* @param[out] got_frame_ptr Zero if no frame could be decoded, otherwise it is * @param[out] got_frame_ptr Zero if no frame could be decoded, otherwise it is
* non-zero. Note that this field being set to zero * non-zero. Note that this field being set to zero
* does not mean that an error has occurred. For * does not mean that an error has occurred. For
* decoders with CODEC_CAP_DELAY set, no given decode * decoders with AV_CODEC_CAP_DELAY set, no given decode
* call is guaranteed to produce a frame. * call is guaranteed to produce a frame.
* @param[in] avpkt The input AVPacket containing the input buffer. * @param[in] avpkt The input AVPacket containing the input buffer.
* At least avpkt->data and avpkt->size should be set. Some * At least avpkt->data and avpkt->size should be set. Some
@ -4340,7 +4446,7 @@ int avcodec_decode_audio4(AVCodecContext *avctx, AVFrame *frame,
* @warning The end of the input buffer buf should be set to 0 to ensure that * @warning The end of the input buffer buf should be set to 0 to ensure that
* no overreading happens for damaged MPEG streams. * no overreading happens for damaged MPEG streams.
* *
* @note Codecs which have the CODEC_CAP_DELAY capability set have a delay * @note Codecs which have the AV_CODEC_CAP_DELAY capability set have a delay
* between input and output, these need to be fed with avpkt->data=NULL, * between input and output, these need to be fed with avpkt->data=NULL,
* avpkt->size=0 at the end to return the remaining frames. * avpkt->size=0 at the end to return the remaining frames.
* *
@ -4380,7 +4486,7 @@ int avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture,
* Return a negative value on error, otherwise return the number of bytes used. * Return a negative value on error, otherwise return the number of bytes used.
* If no subtitle could be decompressed, got_sub_ptr is zero. * If no subtitle could be decompressed, got_sub_ptr is zero.
* Otherwise, the subtitle is stored in *sub. * Otherwise, the subtitle is stored in *sub.
* Note that CODEC_CAP_DR1 is not available for subtitle codecs. This is for * Note that AV_CODEC_CAP_DR1 is not available for subtitle codecs. This is for
* simplicity, because the performance difference is expect to be negligible * simplicity, because the performance difference is expect to be negligible
* and reusing a get_buffer written for video codecs would probably perform badly * and reusing a get_buffer written for video codecs would probably perform badly
* due to a potentially very different allocation pattern. * due to a potentially very different allocation pattern.
@ -4742,8 +4848,8 @@ int attribute_deprecated avcodec_encode_audio(AVCodecContext *avctx,
* called to free the user supplied buffer). * called to free the user supplied buffer).
* @param[in] frame AVFrame containing the raw audio data to be encoded. * @param[in] frame AVFrame containing the raw audio data to be encoded.
* May be NULL when flushing an encoder that has the * May be NULL when flushing an encoder that has the
* CODEC_CAP_DELAY capability set. * AV_CODEC_CAP_DELAY capability set.
* If CODEC_CAP_VARIABLE_FRAME_SIZE is set, then each frame * If AV_CODEC_CAP_VARIABLE_FRAME_SIZE is set, then each frame
* can have any number of samples. * can have any number of samples.
* If it is not set, frame->nb_samples must be equal to * If it is not set, frame->nb_samples must be equal to
* avctx->frame_size for all frames except the last. * avctx->frame_size for all frames except the last.
@ -4804,7 +4910,7 @@ int avcodec_encode_video(AVCodecContext *avctx, uint8_t *buf, int buf_size,
* called to free the user supplied buffer). * called to free the user supplied buffer).
* @param[in] frame AVFrame containing the raw video data to be encoded. * @param[in] frame AVFrame containing the raw video data to be encoded.
* May be NULL when flushing an encoder that has the * May be NULL when flushing an encoder that has the
* CODEC_CAP_DELAY capability set. * AV_CODEC_CAP_DELAY capability set.
* @param[out] got_packet_ptr This field is set to 1 by libavcodec if the * @param[out] got_packet_ptr This field is set to 1 by libavcodec if the
* output packet is non-empty, and to 0 if it is * output packet is non-empty, and to 0 if it is
* empty. If the function returns an error, the * empty. If the function returns an error, the

@ -168,6 +168,6 @@ AVCodec ff_avrn_decoder = {
.init = init, .init = init,
.close = end, .close = end,
.decode = decode_frame, .decode = decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
.max_lowres = 3, .max_lowres = 3,
}; };

@ -185,5 +185,5 @@ AVCodec ff_avs_decoder = {
.init = avs_decode_init, .init = avs_decode_init,
.decode = avs_decode_frame, .decode = avs_decode_frame,
.close = avs_decode_end, .close = avs_decode_end,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
}; };

@ -126,5 +126,5 @@ AVCodec ff_avui_decoder = {
.id = AV_CODEC_ID_AVUI, .id = AV_CODEC_ID_AVUI,
.init = avui_decode_init, .init = avui_decode_init,
.decode = avui_decode_frame, .decode = avui_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
}; };

@ -101,6 +101,6 @@ AVCodec ff_avui_encoder = {
.id = AV_CODEC_ID_AVUI, .id = AV_CODEC_ID_AVUI,
.init = avui_encode_init, .init = avui_encode_init,
.encode2 = avui_encode_frame, .encode2 = avui_encode_frame,
.capabilities = CODEC_CAP_EXPERIMENTAL, .capabilities = AV_CODEC_CAP_EXPERIMENTAL,
.pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_UYVY422, AV_PIX_FMT_NONE }, .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_UYVY422, AV_PIX_FMT_NONE },
}; };

@ -162,5 +162,5 @@ AVCodec ff_bethsoftvid_decoder = {
.init = bethsoftvid_decode_init, .init = bethsoftvid_decode_init,
.close = bethsoftvid_decode_end, .close = bethsoftvid_decode_end,
.decode = bethsoftvid_decode_frame, .decode = bethsoftvid_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
}; };

@ -184,5 +184,5 @@ AVCodec ff_bfi_decoder = {
.init = bfi_decode_init, .init = bfi_decode_init,
.close = bfi_decode_close, .close = bfi_decode_close,
.decode = bfi_decode_frame, .decode = bfi_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
}; };

@ -1354,5 +1354,5 @@ AVCodec ff_bink_decoder = {
.close = decode_end, .close = decode_end,
.decode = decode_frame, .decode = decode_frame,
.flush = flush, .flush = flush,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
}; };

@ -343,7 +343,7 @@ AVCodec ff_binkaudio_rdft_decoder = {
.init = decode_init, .init = decode_init,
.close = decode_end, .close = decode_end,
.decode = decode_frame, .decode = decode_frame,
.capabilities = CODEC_CAP_DELAY | CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_DR1,
}; };
AVCodec ff_binkaudio_dct_decoder = { AVCodec ff_binkaudio_dct_decoder = {
@ -355,5 +355,5 @@ AVCodec ff_binkaudio_dct_decoder = {
.init = decode_init, .init = decode_init,
.close = decode_end, .close = decode_end,
.decode = decode_frame, .decode = decode_frame,
.capabilities = CODEC_CAP_DELAY | CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_DR1,
}; };

@ -227,7 +227,7 @@ AVCodec ff_bintext_decoder = {
.init = decode_init, .init = decode_init,
.close = decode_end, .close = decode_end,
.decode = decode_frame, .decode = decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
}; };
#endif #endif
#if CONFIG_XBIN_DECODER #if CONFIG_XBIN_DECODER
@ -240,7 +240,7 @@ AVCodec ff_xbin_decoder = {
.init = decode_init, .init = decode_init,
.close = decode_end, .close = decode_end,
.decode = decode_frame, .decode = decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
}; };
#endif #endif
#if CONFIG_IDF_DECODER #if CONFIG_IDF_DECODER
@ -253,6 +253,6 @@ AVCodec ff_idf_decoder = {
.init = decode_init, .init = decode_init,
.close = decode_end, .close = decode_end,
.decode = decode_frame, .decode = decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
}; };
#endif #endif

@ -349,5 +349,5 @@ AVCodec ff_bmp_decoder = {
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_BMP, .id = AV_CODEC_ID_BMP,
.decode = bmp_decode_frame, .decode = bmp_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
}; };

@ -85,5 +85,5 @@ AVCodec ff_bmv_audio_decoder = {
.id = AV_CODEC_ID_BMV_AUDIO, .id = AV_CODEC_ID_BMV_AUDIO,
.init = bmv_aud_decode_init, .init = bmv_aud_decode_init,
.decode = bmv_aud_decode_frame, .decode = bmv_aud_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
}; };

@ -294,5 +294,5 @@ AVCodec ff_bmv_video_decoder = {
.priv_data_size = sizeof(BMVDecContext), .priv_data_size = sizeof(BMVDecContext),
.init = decode_init, .init = decode_init,
.decode = decode_frame, .decode = decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
}; };

@ -288,5 +288,5 @@ AVCodec ff_brender_pix_decoder = {
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_BRENDER_PIX, .id = AV_CODEC_ID_BRENDER_PIX,
.decode = pix_decode_frame, .decode = pix_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
}; };

@ -268,5 +268,5 @@ AVCodec ff_c93_decoder = {
.init = decode_init, .init = decode_init,
.close = decode_end, .close = decode_end,
.decode = decode_frame, .decode = decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
}; };

@ -1262,6 +1262,6 @@ AVCodec ff_cavs_decoder = {
.init = ff_cavs_init, .init = ff_cavs_init,
.close = ff_cavs_end, .close = ff_cavs_end,
.decode = cavs_decode_frame, .decode = cavs_decode_frame,
.capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY, .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
.flush = cavs_flush, .flush = cavs_flush,
}; };

@ -376,5 +376,5 @@ AVCodec ff_cdgraphics_decoder = {
.init = cdg_decode_init, .init = cdg_decode_init,
.close = cdg_decode_end, .close = cdg_decode_end,
.decode = cdg_decode_frame, .decode = cdg_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
}; };

@ -305,5 +305,5 @@ AVCodec ff_cdxl_decoder = {
.init = cdxl_decode_init, .init = cdxl_decode_init,
.close = cdxl_decode_end, .close = cdxl_decode_end,
.decode = cdxl_decode_frame, .decode = cdxl_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
}; };

@ -484,5 +484,5 @@ AVCodec ff_cinepak_decoder = {
.init = cinepak_decode_init, .init = cinepak_decode_init,
.close = cinepak_decode_end, .close = cinepak_decode_end,
.decode = cinepak_decode_frame, .decode = cinepak_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
}; };

@ -89,6 +89,6 @@ AVCodec ff_cljr_decoder = {
.id = AV_CODEC_ID_CLJR, .id = AV_CODEC_ID_CLJR,
.init = decode_init, .init = decode_init,
.decode = decode_frame, .decode = decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
}; };

@ -495,5 +495,5 @@ AVCodec ff_cllc_decoder = {
.init = cllc_decode_init, .init = cllc_decode_init,
.decode = cllc_decode_frame, .decode = cllc_decode_frame,
.close = cllc_decode_close, .close = cllc_decode_close,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
}; };

@ -167,5 +167,5 @@ AVCodec ff_comfortnoise_decoder = {
.close = cng_decode_close, .close = cng_decode_close,
.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16, .sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_NONE }, AV_SAMPLE_FMT_NONE },
.capabilities = CODEC_CAP_DELAY | CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_DR1,
}; };

@ -1282,7 +1282,7 @@ AVCodec ff_cook_decoder = {
.init = cook_decode_init, .init = cook_decode_init,
.close = cook_decode_close, .close = cook_decode_close,
.decode = cook_decode_frame, .decode = cook_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP, .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_NONE }, AV_SAMPLE_FMT_NONE },
}; };

@ -229,5 +229,5 @@ AVCodec ff_cpia_decoder = {
.init = cpia_decode_init, .init = cpia_decode_init,
.close = cpia_decode_end, .close = cpia_decode_end,
.decode = cpia_decode_frame, .decode = cpia_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
}; };

@ -1098,7 +1098,7 @@ AVCodec ff_h264_crystalhd_decoder = {
.init = init, .init = init,
.close = uninit, .close = uninit,
.decode = decode, .decode = decode,
.capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY, .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
.flush = flush, .flush = flush,
.pix_fmts = (const enum AVPixelFormat[]){AV_PIX_FMT_YUYV422, AV_PIX_FMT_NONE}, .pix_fmts = (const enum AVPixelFormat[]){AV_PIX_FMT_YUYV422, AV_PIX_FMT_NONE},
.priv_class = &h264_class, .priv_class = &h264_class,
@ -1122,7 +1122,7 @@ AVCodec ff_mpeg2_crystalhd_decoder = {
.init = init, .init = init,
.close = uninit, .close = uninit,
.decode = decode, .decode = decode,
.capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY, .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
.flush = flush, .flush = flush,
.pix_fmts = (const enum AVPixelFormat[]){AV_PIX_FMT_YUYV422, AV_PIX_FMT_NONE}, .pix_fmts = (const enum AVPixelFormat[]){AV_PIX_FMT_YUYV422, AV_PIX_FMT_NONE},
.priv_class = &mpeg2_class, .priv_class = &mpeg2_class,
@ -1146,7 +1146,7 @@ AVCodec ff_mpeg4_crystalhd_decoder = {
.init = init, .init = init,
.close = uninit, .close = uninit,
.decode = decode, .decode = decode,
.capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY, .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
.flush = flush, .flush = flush,
.pix_fmts = (const enum AVPixelFormat[]){AV_PIX_FMT_YUYV422, AV_PIX_FMT_NONE}, .pix_fmts = (const enum AVPixelFormat[]){AV_PIX_FMT_YUYV422, AV_PIX_FMT_NONE},
.priv_class = &mpeg4_class, .priv_class = &mpeg4_class,
@ -1170,7 +1170,7 @@ AVCodec ff_msmpeg4_crystalhd_decoder = {
.init = init, .init = init,
.close = uninit, .close = uninit,
.decode = decode, .decode = decode,
.capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_EXPERIMENTAL, .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY | AV_CODEC_CAP_EXPERIMENTAL,
.flush = flush, .flush = flush,
.pix_fmts = (const enum AVPixelFormat[]){AV_PIX_FMT_YUYV422, AV_PIX_FMT_NONE}, .pix_fmts = (const enum AVPixelFormat[]){AV_PIX_FMT_YUYV422, AV_PIX_FMT_NONE},
.priv_class = &msmpeg4_class, .priv_class = &msmpeg4_class,
@ -1194,7 +1194,7 @@ AVCodec ff_vc1_crystalhd_decoder = {
.init = init, .init = init,
.close = uninit, .close = uninit,
.decode = decode, .decode = decode,
.capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY, .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
.flush = flush, .flush = flush,
.pix_fmts = (const enum AVPixelFormat[]){AV_PIX_FMT_YUYV422, AV_PIX_FMT_NONE}, .pix_fmts = (const enum AVPixelFormat[]){AV_PIX_FMT_YUYV422, AV_PIX_FMT_NONE},
.priv_class = &vc1_class, .priv_class = &vc1_class,
@ -1218,7 +1218,7 @@ AVCodec ff_wmv3_crystalhd_decoder = {
.init = init, .init = init,
.close = uninit, .close = uninit,
.decode = decode, .decode = decode,
.capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY, .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
.flush = flush, .flush = flush,
.pix_fmts = (const enum AVPixelFormat[]){AV_PIX_FMT_YUYV422, AV_PIX_FMT_NONE}, .pix_fmts = (const enum AVPixelFormat[]){AV_PIX_FMT_YUYV422, AV_PIX_FMT_NONE},
.priv_class = &wmv3_class, .priv_class = &wmv3_class,

@ -166,5 +166,5 @@ AVCodec ff_cscd_decoder = {
.init = decode_init, .init = decode_init,
.close = decode_end, .close = decode_end,
.decode = decode_frame, .decode = decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
}; };

@ -184,7 +184,7 @@ AVCodec ff_aura_decoder = {
.priv_data_size = sizeof(CyuvDecodeContext), .priv_data_size = sizeof(CyuvDecodeContext),
.init = cyuv_decode_init, .init = cyuv_decode_init,
.decode = cyuv_decode_frame, .decode = cyuv_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
}; };
#endif #endif
@ -197,6 +197,6 @@ AVCodec ff_cyuv_decoder = {
.priv_data_size = sizeof(CyuvDecodeContext), .priv_data_size = sizeof(CyuvDecodeContext),
.init = cyuv_decode_init, .init = cyuv_decode_init,
.decode = cyuv_decode_frame, .decode = cyuv_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
}; };
#endif #endif

@ -2068,7 +2068,7 @@ AVCodec ff_dca_decoder = {
.init = dca_decode_init, .init = dca_decode_init,
.decode = dca_decode_frame, .decode = dca_decode_frame,
.close = dca_decode_end, .close = dca_decode_end,
.capabilities = CODEC_CAP_CHANNEL_CONF | CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_CHANNEL_CONF | AV_CODEC_CAP_DR1,
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP, .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_NONE }, AV_SAMPLE_FMT_NONE },
.profiles = NULL_IF_CONFIG_SMALL(profiles), .profiles = NULL_IF_CONFIG_SMALL(profiles),

@ -960,7 +960,7 @@ AVCodec ff_dca_encoder = {
.priv_data_size = sizeof(DCAEncContext), .priv_data_size = sizeof(DCAEncContext),
.init = encode_init, .init = encode_init,
.encode2 = encode_frame, .encode2 = encode_frame,
.capabilities = CODEC_CAP_EXPERIMENTAL, .capabilities = AV_CODEC_CAP_EXPERIMENTAL,
.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S32, .sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S32,
AV_SAMPLE_FMT_NONE }, AV_SAMPLE_FMT_NONE },
.supported_samplerates = sample_rates, .supported_samplerates = sample_rates,

@ -694,6 +694,6 @@ AVCodec ff_dds_decoder = {
.id = AV_CODEC_ID_DDS, .id = AV_CODEC_ID_DDS,
.decode = dds_decode, .decode = dds_decode,
.priv_data_size = sizeof(DDSContext), .priv_data_size = sizeof(DDSContext),
.capabilities = CODEC_CAP_DR1 | CODEC_CAP_SLICE_THREADS, .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_SLICE_THREADS,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE
}; };

@ -418,5 +418,5 @@ AVCodec ff_dfa_decoder = {
.init = dfa_decode_init, .init = dfa_decode_init,
.close = dfa_decode_end, .close = dfa_decode_end,
.decode = dfa_decode_frame, .decode = dfa_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
}; };

@ -2040,6 +2040,6 @@ AVCodec ff_dirac_decoder = {
.init = dirac_decode_init, .init = dirac_decode_init,
.close = dirac_decode_end, .close = dirac_decode_end,
.decode = dirac_decode_frame, .decode = dirac_decode_frame,
.capabilities = CODEC_CAP_DELAY, .capabilities = AV_CODEC_CAP_DELAY,
.flush = dirac_decode_flush, .flush = dirac_decode_flush,
}; };

@ -515,5 +515,5 @@ AVCodec ff_dnxhd_decoder = {
.init = dnxhd_decode_init, .init = dnxhd_decode_init,
.close = dnxhd_decode_close, .close = dnxhd_decode_close,
.decode = dnxhd_decode_frame, .decode = dnxhd_decode_frame,
.capabilities = CODEC_CAP_DR1 | CODEC_CAP_FRAME_THREADS, .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
}; };

@ -1165,7 +1165,7 @@ AVCodec ff_dnxhd_encoder = {
.init = dnxhd_encode_init, .init = dnxhd_encode_init,
.encode2 = dnxhd_encode_picture, .encode2 = dnxhd_encode_picture,
.close = dnxhd_encode_end, .close = dnxhd_encode_end,
.capabilities = CODEC_CAP_SLICE_THREADS, .capabilities = AV_CODEC_CAP_SLICE_THREADS,
.pix_fmts = (const enum AVPixelFormat[]) { .pix_fmts = (const enum AVPixelFormat[]) {
AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV422P,
AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV422P10,

@ -334,7 +334,7 @@ AVCodec ff_ ## name_ ## _decoder = { \
.priv_data_size = sizeof(DPCMContext), \ .priv_data_size = sizeof(DPCMContext), \
.init = dpcm_decode_init, \ .init = dpcm_decode_init, \
.decode = dpcm_decode_frame, \ .decode = dpcm_decode_frame, \
.capabilities = CODEC_CAP_DR1, \ .capabilities = AV_CODEC_CAP_DR1, \
} }
DPCM_DECODER(AV_CODEC_ID_INTERPLAY_DPCM, interplay_dpcm, "DPCM Interplay"); DPCM_DECODER(AV_CODEC_ID_INTERPLAY_DPCM, interplay_dpcm, "DPCM Interplay");

@ -392,5 +392,5 @@ AVCodec ff_dpx_decoder = {
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_DPX, .id = AV_CODEC_ID_DPX,
.decode = decode_frame, .decode = decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
}; };

@ -129,5 +129,5 @@ AVCodec ff_dsicinaudio_decoder = {
.priv_data_size = sizeof(CinAudioContext), .priv_data_size = sizeof(CinAudioContext),
.init = cinaudio_decode_init, .init = cinaudio_decode_init,
.decode = cinaudio_decode_frame, .decode = cinaudio_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
}; };

@ -313,5 +313,5 @@ AVCodec ff_dsicinvideo_decoder = {
.init = cinvideo_decode_init, .init = cinvideo_decode_init,
.close = cinvideo_decode_end, .close = cinvideo_decode_end,
.decode = cinvideo_decode_frame, .decode = cinvideo_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
}; };

@ -783,5 +783,5 @@ AVCodec ff_dss_sp_decoder = {
.priv_data_size = sizeof(DssSpContext), .priv_data_size = sizeof(DssSpContext),
.init = dss_sp_decode_init, .init = dss_sp_decode_init,
.decode = dss_sp_decode_frame, .decode = dss_sp_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
}; };

@ -568,6 +568,6 @@ AVCodec ff_dvvideo_decoder = {
.priv_data_size = sizeof(DVVideoContext), .priv_data_size = sizeof(DVVideoContext),
.init = dvvideo_decode_init, .init = dvvideo_decode_init,
.decode = dvvideo_decode_frame, .decode = dvvideo_decode_frame,
.capabilities = CODEC_CAP_DR1 | CODEC_CAP_SLICE_THREADS, .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_SLICE_THREADS,
.max_lowres = 3, .max_lowres = 3,
}; };

@ -751,7 +751,7 @@ AVCodec ff_dvvideo_encoder = {
.priv_data_size = sizeof(DVVideoContext), .priv_data_size = sizeof(DVVideoContext),
.init = dvvideo_encode_init, .init = dvvideo_encode_init,
.encode2 = dvvideo_encode_frame, .encode2 = dvvideo_encode_frame,
.capabilities = CODEC_CAP_SLICE_THREADS | CODEC_CAP_FRAME_THREADS | CODEC_CAP_INTRA_ONLY, .capabilities = AV_CODEC_CAP_SLICE_THREADS | AV_CODEC_CAP_FRAME_THREADS | AV_CODEC_CAP_INTRA_ONLY,
.pix_fmts = (const enum AVPixelFormat[]) { .pix_fmts = (const enum AVPixelFormat[]) {
AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV422P,
AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE

@ -370,5 +370,5 @@ AVCodec ff_dxa_decoder = {
.init = decode_init, .init = decode_init,
.close = decode_end, .close = decode_end,
.decode = decode_frame, .decode = decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
}; };

@ -752,5 +752,5 @@ AVCodec ff_dxtory_decoder = {
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_DXTORY, .id = AV_CODEC_ID_DXTORY,
.decode = decode_frame, .decode = decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
}; };

@ -242,5 +242,5 @@ AVCodec ff_eacmv_decoder = {
.init = cmv_decode_init, .init = cmv_decode_init,
.close = cmv_decode_end, .close = cmv_decode_end,
.decode = cmv_decode_frame, .decode = cmv_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
}; };

@ -348,5 +348,5 @@ AVCodec ff_eamad_decoder = {
.init = decode_init, .init = decode_init,
.close = decode_end, .close = decode_end,
.decode = decode_frame, .decode = decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
}; };

@ -249,5 +249,5 @@ AVCodec ff_eatgq_decoder = {
.priv_data_size = sizeof(TgqContext), .priv_data_size = sizeof(TgqContext),
.init = tgq_decode_init, .init = tgq_decode_init,
.decode = tgq_decode_frame, .decode = tgq_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
}; };

@ -364,5 +364,5 @@ AVCodec ff_eatgv_decoder = {
.init = tgv_decode_init, .init = tgv_decode_init,
.close = tgv_decode_end, .close = tgv_decode_end,
.decode = tgv_decode_frame, .decode = tgv_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
}; };

@ -162,5 +162,5 @@ AVCodec ff_eatqi_decoder = {
.init = tqi_decode_init, .init = tqi_decode_init,
.close = tqi_decode_end, .close = tqi_decode_end,
.decode = tqi_decode_frame, .decode = tqi_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
}; };

@ -777,7 +777,7 @@ void ff_er_frame_start(ERContext *s)
static int er_supported(ERContext *s) static int er_supported(ERContext *s)
{ {
if(s->avctx->hwaccel && s->avctx->hwaccel->decode_slice || if(s->avctx->hwaccel && s->avctx->hwaccel->decode_slice ||
s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU || s->avctx->codec->capabilities&AV_CODEC_CAP_HWACCEL_VDPAU ||
!s->cur_pic.f || !s->cur_pic.f ||
s->cur_pic.field_picture s->cur_pic.field_picture
) )

@ -373,5 +373,5 @@ AVCodec ff_escape124_decoder = {
.init = escape124_decode_init, .init = escape124_decode_init,
.close = escape124_decode_close, .close = escape124_decode_close,
.decode = escape124_decode_frame, .decode = escape124_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
}; };

@ -356,5 +356,5 @@ AVCodec ff_escape130_decoder = {
.init = escape130_decode_init, .init = escape130_decode_init,
.close = escape130_decode_close, .close = escape130_decode_close,
.decode = escape130_decode_frame, .decode = escape130_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
}; };

@ -935,7 +935,7 @@ AVCodec ff_evrc_decoder = {
.id = AV_CODEC_ID_EVRC, .id = AV_CODEC_ID_EVRC,
.init = evrc_decode_init, .init = evrc_decode_init,
.decode = evrc_decode_frame, .decode = evrc_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
.priv_data_size = sizeof(EVRCContext), .priv_data_size = sizeof(EVRCContext),
.priv_class = &evrcdec_class, .priv_class = &evrcdec_class,
}; };

@ -1447,7 +1447,7 @@ AVCodec ff_exr_decoder = {
.init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy), .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
.close = decode_end, .close = decode_end,
.decode = decode_frame, .decode = decode_frame,
.capabilities = CODEC_CAP_DR1 | CODEC_CAP_FRAME_THREADS | .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS |
CODEC_CAP_SLICE_THREADS, AV_CODEC_CAP_SLICE_THREADS,
.priv_class = &exr_class, .priv_class = &exr_class,
}; };

@ -1110,6 +1110,6 @@ AVCodec ff_ffv1_decoder = {
.decode = decode_frame, .decode = decode_frame,
.init_thread_copy = ONLY_IF_THREADS_ENABLED(init_thread_copy), .init_thread_copy = ONLY_IF_THREADS_ENABLED(init_thread_copy),
.update_thread_context = ONLY_IF_THREADS_ENABLED(update_thread_context), .update_thread_context = ONLY_IF_THREADS_ENABLED(update_thread_context),
.capabilities = CODEC_CAP_DR1 /*| CODEC_CAP_DRAW_HORIZ_BAND*/ | .capabilities = AV_CODEC_CAP_DR1 /*| AV_CODEC_CAP_DRAW_HORIZ_BAND*/ |
CODEC_CAP_FRAME_THREADS | CODEC_CAP_SLICE_THREADS, AV_CODEC_CAP_FRAME_THREADS | AV_CODEC_CAP_SLICE_THREADS,
}; };

@ -1369,7 +1369,7 @@ AVCodec ff_ffv1_encoder = {
.init = encode_init, .init = encode_init,
.encode2 = encode_frame, .encode2 = encode_frame,
.close = encode_close, .close = encode_close,
.capabilities = CODEC_CAP_SLICE_THREADS | CODEC_CAP_DELAY, .capabilities = AV_CODEC_CAP_SLICE_THREADS | AV_CODEC_CAP_DELAY,
.pix_fmts = (const enum AVPixelFormat[]) { .pix_fmts = (const enum AVPixelFormat[]) {
AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUV444P,
AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV411P,

@ -477,5 +477,5 @@ AVCodec ff_ffwavesynth_decoder = {
.init = wavesynth_init, .init = wavesynth_init,
.close = wavesynth_close, .close = wavesynth_close,
.decode = wavesynth_decode, .decode = wavesynth_decode,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
}; };

@ -476,6 +476,6 @@ AVCodec ff_fic_decoder = {
.init = fic_decode_init, .init = fic_decode_init,
.decode = fic_decode_frame, .decode = fic_decode_frame,
.close = fic_decode_close, .close = fic_decode_close,
.capabilities = CODEC_CAP_DR1 | CODEC_CAP_SLICE_THREADS, .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_SLICE_THREADS,
.priv_class = &fic_decoder_class, .priv_class = &fic_decoder_class,
}; };

@ -665,7 +665,7 @@ AVCodec ff_flac_decoder = {
.close = flac_decode_close, .close = flac_decode_close,
.decode = flac_decode_frame, .decode = flac_decode_frame,
.init_thread_copy = ONLY_IF_THREADS_ENABLED(init_thread_copy), .init_thread_copy = ONLY_IF_THREADS_ENABLED(init_thread_copy),
.capabilities = CODEC_CAP_DR1 | CODEC_CAP_FRAME_THREADS, .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16, .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_S16P, AV_SAMPLE_FMT_S16P,
AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32,

@ -1475,7 +1475,7 @@ AVCodec ff_flac_encoder = {
.init = flac_encode_init, .init = flac_encode_init,
.encode2 = flac_encode_frame, .encode2 = flac_encode_frame,
.close = flac_encode_close, .close = flac_encode_close,
.capabilities = CODEC_CAP_SMALL_LAST_FRAME | CODEC_CAP_DELAY | CODEC_CAP_LOSSLESS, .capabilities = AV_CODEC_CAP_SMALL_LAST_FRAME | AV_CODEC_CAP_DELAY | AV_CODEC_CAP_LOSSLESS,
.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16, .sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32,
AV_SAMPLE_FMT_NONE }, AV_SAMPLE_FMT_NONE },

@ -509,7 +509,7 @@ AVCodec ff_flashsv_decoder = {
.init = flashsv_decode_init, .init = flashsv_decode_init,
.close = flashsv_decode_end, .close = flashsv_decode_end,
.decode = flashsv_decode_frame, .decode = flashsv_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
.pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_BGR24, AV_PIX_FMT_NONE }, .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_BGR24, AV_PIX_FMT_NONE },
}; };
#endif /* CONFIG_FLASHSV_DECODER */ #endif /* CONFIG_FLASHSV_DECODER */
@ -572,7 +572,7 @@ AVCodec ff_flashsv2_decoder = {
.init = flashsv2_decode_init, .init = flashsv2_decode_init,
.close = flashsv2_decode_end, .close = flashsv2_decode_end,
.decode = flashsv_decode_frame, .decode = flashsv_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
.pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_BGR24, AV_PIX_FMT_NONE }, .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_BGR24, AV_PIX_FMT_NONE },
}; };
#endif /* CONFIG_FLASHSV2_DECODER */ #endif /* CONFIG_FLASHSV2_DECODER */

@ -814,5 +814,5 @@ AVCodec ff_flic_decoder = {
.init = flic_decode_init, .init = flic_decode_init,
.close = flic_decode_end, .close = flic_decode_end,
.decode = flic_decode_frame, .decode = flic_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
}; };

@ -122,7 +122,7 @@ AVCodec ff_flv_decoder = {
.init = ff_h263_decode_init, .init = ff_h263_decode_init,
.close = ff_h263_decode_end, .close = ff_h263_decode_end,
.decode = ff_h263_decode_frame, .decode = ff_h263_decode_frame,
.capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1,
.max_lowres = 3, .max_lowres = 3,
.pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV420P, .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV420P,
AV_PIX_FMT_NONE }, AV_PIX_FMT_NONE },

@ -122,7 +122,7 @@ int ff_frame_thread_encoder_init(AVCodecContext *avctx, AVDictionary *options){
if( !(avctx->thread_type & FF_THREAD_FRAME) if( !(avctx->thread_type & FF_THREAD_FRAME)
|| !(avctx->codec->capabilities & CODEC_CAP_INTRA_ONLY)) || !(avctx->codec->capabilities & AV_CODEC_CAP_INTRA_ONLY))
return 0; return 0;
if( !avctx->thread_count if( !avctx->thread_count

@ -322,5 +322,5 @@ AVCodec ff_fraps_decoder = {
.init = decode_init, .init = decode_init,
.close = decode_end, .close = decode_end,
.decode = decode_frame, .decode = decode_frame,
.capabilities = CODEC_CAP_DR1 | CODEC_CAP_FRAME_THREADS, .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
}; };

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save