lavc: deprecate AV_CODEC_CAP_SUBFRAMES

There is nothing meaningful the caller can do with it.
pull/389/head
Anton Khirnov 2 years ago
parent 51d6ae099f
commit 8b20d0dcb5
  1. 3
      doc/APIchanges
  2. 6
      libavcodec/alsdec.c
  3. 6
      libavcodec/apac.c
  4. 6
      libavcodec/apedec.c
  5. 12
      libavcodec/atrac3.c
  6. 6
      libavcodec/atrac9dec.c
  7. 6
      libavcodec/bonk.c
  8. 3
      libavcodec/codec.h
  9. 6
      libavcodec/ftr.c
  10. 6
      libavcodec/g723_1dec.c
  11. 12
      libavcodec/g729dec.c
  12. 6
      libavcodec/libspeexdec.c
  13. 5
      libavcodec/misc4.c
  14. 6
      libavcodec/shorten.c
  15. 3
      libavcodec/tests/avcodec.c
  16. 1
      libavcodec/version_major.h
  17. 2
      libavcodec/wavarc.c
  18. 6
      libavcodec/wmalosslessdec.c
  19. 18
      libavcodec/wmaprodec.c
  20. 6
      libavcodec/wmavoice.c

@ -2,6 +2,9 @@ The last version increases of all libraries were on 2023-02-09
API changes, most recent first: API changes, most recent first:
2023-05-xx - xxxxxxxxxx - lavc 60 - codec.h
Depreate AV_CODEC_CAP_SUBFRAMES without replacement.
2023-05-xx - xxxxxxxxxx - lavc 60.11.100 - codec_par.h 2023-05-xx - xxxxxxxxxx - lavc 60.11.100 - codec_par.h
Add AVCodecParameters.framerate. Add AVCodecParameters.framerate.

@ -2190,6 +2190,10 @@ const FFCodec ff_als_decoder = {
.close = decode_end, .close = decode_end,
FF_CODEC_DECODE_CB(decode_frame), FF_CODEC_DECODE_CB(decode_frame),
.flush = flush, .flush = flush,
.p.capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF, .p.capabilities =
#if FF_API_SUBFRAMES
AV_CODEC_CAP_SUBFRAMES |
#endif
AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF,
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP, .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
}; };

@ -269,8 +269,10 @@ const FFCodec ff_apac_decoder = {
FF_CODEC_DECODE_CB(apac_decode), FF_CODEC_DECODE_CB(apac_decode),
.close = apac_close, .close = apac_close,
.p.capabilities = AV_CODEC_CAP_DELAY | .p.capabilities = AV_CODEC_CAP_DELAY |
AV_CODEC_CAP_DR1 | #if FF_API_SUBFRAMES
AV_CODEC_CAP_SUBFRAMES, AV_CODEC_CAP_SUBFRAMES |
#endif
AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP, .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
.p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_U8P, .p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_U8P,
AV_SAMPLE_FMT_S16P, AV_SAMPLE_FMT_S16P,

@ -1680,7 +1680,11 @@ const FFCodec ff_ape_decoder = {
.init = ape_decode_init, .init = ape_decode_init,
.close = ape_decode_close, .close = ape_decode_close,
FF_CODEC_DECODE_CB(ape_decode_frame), FF_CODEC_DECODE_CB(ape_decode_frame),
.p.capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DELAY | .p.capabilities =
#if FF_API_SUBFRAMES
AV_CODEC_CAP_SUBFRAMES |
#endif
AV_CODEC_CAP_DELAY |
AV_CODEC_CAP_DR1, AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP, .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
.flush = ape_flush, .flush = ape_flush,

@ -1026,7 +1026,11 @@ const FFCodec ff_atrac3_decoder = {
.init = atrac3_decode_init, .init = atrac3_decode_init,
.close = atrac3_decode_close, .close = atrac3_decode_close,
FF_CODEC_DECODE_CB(atrac3_decode_frame), FF_CODEC_DECODE_CB(atrac3_decode_frame),
.p.capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DR1, .p.capabilities =
#if FF_API_SUBFRAMES
AV_CODEC_CAP_SUBFRAMES |
#endif
AV_CODEC_CAP_DR1,
.p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP, .p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_NONE }, AV_SAMPLE_FMT_NONE },
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP, .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
@ -1041,7 +1045,11 @@ const FFCodec ff_atrac3al_decoder = {
.init = atrac3_decode_init, .init = atrac3_decode_init,
.close = atrac3_decode_close, .close = atrac3_decode_close,
FF_CODEC_DECODE_CB(atrac3al_decode_frame), FF_CODEC_DECODE_CB(atrac3al_decode_frame),
.p.capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DR1, .p.capabilities =
#if FF_API_SUBFRAMES
AV_CODEC_CAP_SUBFRAMES |
#endif
AV_CODEC_CAP_DR1,
.p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP, .p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_NONE }, AV_SAMPLE_FMT_NONE },
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP, .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,

@ -1003,5 +1003,9 @@ const FFCodec ff_atrac9_decoder = {
FF_CODEC_DECODE_CB(atrac9_decode_frame), FF_CODEC_DECODE_CB(atrac9_decode_frame),
.flush = atrac9_decode_flush, .flush = atrac9_decode_flush,
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP, .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
.p.capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF, .p.capabilities =
#if FF_API_SUBFRAMES
AV_CODEC_CAP_SUBFRAMES |
#endif
AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF,
}; };

@ -428,8 +428,10 @@ const FFCodec ff_bonk_decoder = {
FF_CODEC_DECODE_CB(bonk_decode), FF_CODEC_DECODE_CB(bonk_decode),
.close = bonk_close, .close = bonk_close,
.p.capabilities = AV_CODEC_CAP_DELAY | .p.capabilities = AV_CODEC_CAP_DELAY |
AV_CODEC_CAP_DR1 | #if FF_API_SUBFRAMES
AV_CODEC_CAP_SUBFRAMES, AV_CODEC_CAP_SUBFRAMES |
#endif
AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP, .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
.p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16P, .p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16P,
AV_SAMPLE_FMT_NONE }, AV_SAMPLE_FMT_NONE },

@ -80,6 +80,7 @@
*/ */
#define AV_CODEC_CAP_SMALL_LAST_FRAME (1 << 6) #define AV_CODEC_CAP_SMALL_LAST_FRAME (1 << 6)
#if FF_API_SUBFRAMES
/** /**
* Codec can output multiple frames per AVPacket * Codec can output multiple frames per AVPacket
* Normally demuxers return one frame at a time, demuxers which do not do * Normally demuxers return one frame at a time, demuxers which do not do
@ -92,6 +93,8 @@
* as a last resort. * as a last resort.
*/ */
#define AV_CODEC_CAP_SUBFRAMES (1 << 8) #define AV_CODEC_CAP_SUBFRAMES (1 << 8)
#endif
/** /**
* Codec is experimental and is thus avoided in favor of non experimental * Codec is experimental and is thus avoided in favor of non experimental
* encoders * encoders

@ -203,6 +203,10 @@ const FFCodec ff_ftr_decoder = {
.close = ftr_close, .close = ftr_close,
.flush = ftr_flush, .flush = ftr_flush,
.priv_data_size = sizeof(FTRContext), .priv_data_size = sizeof(FTRContext),
.p.capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DR1, .p.capabilities =
#if FF_API_SUBFRAMES
AV_CODEC_CAP_SUBFRAMES |
#endif
AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP, .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
}; };

@ -1118,6 +1118,10 @@ const FFCodec ff_g723_1_decoder = {
.priv_data_size = sizeof(G723_1_Context), .priv_data_size = sizeof(G723_1_Context),
.init = g723_1_decode_init, .init = g723_1_decode_init,
FF_CODEC_DECODE_CB(g723_1_decode_frame), FF_CODEC_DECODE_CB(g723_1_decode_frame),
.p.capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DR1, .p.capabilities =
#if FF_API_SUBFRAMES
AV_CODEC_CAP_SUBFRAMES |
#endif
AV_CODEC_CAP_DR1,
.p.priv_class = &g723_1dec_class, .p.priv_class = &g723_1dec_class,
}; };

@ -760,7 +760,11 @@ const FFCodec ff_g729_decoder = {
.init = decoder_init, .init = decoder_init,
FF_CODEC_DECODE_CB(decode_frame), FF_CODEC_DECODE_CB(decode_frame),
.close = decode_close, .close = decode_close,
.p.capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DR1, .p.capabilities =
#if FF_API_SUBFRAMES
AV_CODEC_CAP_SUBFRAMES |
#endif
AV_CODEC_CAP_DR1,
}; };
const FFCodec ff_acelp_kelvin_decoder = { const FFCodec ff_acelp_kelvin_decoder = {
@ -772,5 +776,9 @@ const FFCodec ff_acelp_kelvin_decoder = {
.init = decoder_init, .init = decoder_init,
FF_CODEC_DECODE_CB(decode_frame), FF_CODEC_DECODE_CB(decode_frame),
.close = decode_close, .close = decode_close,
.p.capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DR1, .p.capabilities =
#if FF_API_SUBFRAMES
AV_CODEC_CAP_SUBFRAMES |
#endif
AV_CODEC_CAP_DR1,
}; };

@ -195,7 +195,11 @@ const FFCodec ff_libspeex_decoder = {
CODEC_LONG_NAME("libspeex Speex"), CODEC_LONG_NAME("libspeex Speex"),
.p.type = AVMEDIA_TYPE_AUDIO, .p.type = AVMEDIA_TYPE_AUDIO,
.p.id = AV_CODEC_ID_SPEEX, .p.id = AV_CODEC_ID_SPEEX,
.p.capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DELAY | AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF, .p.capabilities =
#if FF_API_SUBFRAMES
AV_CODEC_CAP_SUBFRAMES |
#endif
AV_CODEC_CAP_DELAY | AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF,
.p.wrapper_name = "libspeex", .p.wrapper_name = "libspeex",
.caps_internal = FF_CODEC_CAP_NOT_INIT_THREADSAFE, .caps_internal = FF_CODEC_CAP_NOT_INIT_THREADSAFE,
.priv_data_size = sizeof(LibSpeexContext), .priv_data_size = sizeof(LibSpeexContext),

@ -179,7 +179,10 @@ const FFCodec ff_misc4_decoder = {
.priv_data_size = sizeof(MISC4Context), .priv_data_size = sizeof(MISC4Context),
.init = misc4_init, .init = misc4_init,
FF_CODEC_DECODE_CB(misc4_decode), FF_CODEC_DECODE_CB(misc4_decode),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_SUBFRAMES | .p.capabilities = AV_CODEC_CAP_DR1 |
#if FF_API_SUBFRAMES
AV_CODEC_CAP_SUBFRAMES |
#endif
AV_CODEC_CAP_CHANNEL_CONF, AV_CODEC_CAP_CHANNEL_CONF,
.p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16, .p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_NONE }, AV_SAMPLE_FMT_NONE },

@ -814,8 +814,10 @@ const FFCodec ff_shorten_decoder = {
FF_CODEC_DECODE_CB(shorten_decode_frame), FF_CODEC_DECODE_CB(shorten_decode_frame),
.p.capabilities = AV_CODEC_CAP_CHANNEL_CONF | .p.capabilities = AV_CODEC_CAP_CHANNEL_CONF |
AV_CODEC_CAP_DELAY | AV_CODEC_CAP_DELAY |
AV_CODEC_CAP_DR1 | #if FF_API_SUBFRAMES
AV_CODEC_CAP_SUBFRAMES , AV_CODEC_CAP_SUBFRAMES |
#endif
AV_CODEC_CAP_DR1,
.p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16P, .p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16P,
AV_SAMPLE_FMT_U8P, AV_SAMPLE_FMT_U8P,
AV_SAMPLE_FMT_NONE }, AV_SAMPLE_FMT_NONE },

@ -149,8 +149,7 @@ int main(void){
FF_CODEC_CAP_SETS_FRAME_PROPS) || FF_CODEC_CAP_SETS_FRAME_PROPS) ||
codec->capabilities & (AV_CODEC_CAP_AVOID_PROBING | codec->capabilities & (AV_CODEC_CAP_AVOID_PROBING |
AV_CODEC_CAP_CHANNEL_CONF | AV_CODEC_CAP_CHANNEL_CONF |
AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DRAW_HORIZ_BAND))
AV_CODEC_CAP_SUBFRAMES))
ERR("Encoder %s has decoder-only capabilities set\n"); ERR("Encoder %s has decoder-only capabilities set\n");
if (codec->capabilities & AV_CODEC_CAP_FRAME_THREADS && if (codec->capabilities & AV_CODEC_CAP_FRAME_THREADS &&
codec->capabilities & AV_CODEC_CAP_ENCODER_FLUSH) codec->capabilities & AV_CODEC_CAP_ENCODER_FLUSH)

@ -46,6 +46,7 @@
#define FF_API_VT_HWACCEL_CONTEXT (LIBAVCODEC_VERSION_MAJOR < 61) #define FF_API_VT_HWACCEL_CONTEXT (LIBAVCODEC_VERSION_MAJOR < 61)
#define FF_API_AVCTX_FRAME_NUMBER (LIBAVCODEC_VERSION_MAJOR < 61) #define FF_API_AVCTX_FRAME_NUMBER (LIBAVCODEC_VERSION_MAJOR < 61)
#define FF_API_SLICE_OFFSET (LIBAVCODEC_VERSION_MAJOR < 61) #define FF_API_SLICE_OFFSET (LIBAVCODEC_VERSION_MAJOR < 61)
#define FF_API_SUBFRAMES (LIBAVCODEC_VERSION_MAJOR < 61)
// reminder to remove CrystalHD decoders on next major bump // reminder to remove CrystalHD decoders on next major bump
#define FF_CODEC_CRYSTAL_HD (LIBAVCODEC_VERSION_MAJOR < 61) #define FF_CODEC_CRYSTAL_HD (LIBAVCODEC_VERSION_MAJOR < 61)

@ -499,7 +499,9 @@ const FFCodec ff_wavarc_decoder = {
FF_CODEC_DECODE_CB(wavarc_decode), FF_CODEC_DECODE_CB(wavarc_decode),
.close = wavarc_close, .close = wavarc_close,
.p.capabilities = AV_CODEC_CAP_DR1 | .p.capabilities = AV_CODEC_CAP_DR1 |
#if FF_API_SUBFRAMES
AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_SUBFRAMES |
#endif
AV_CODEC_CAP_DELAY, AV_CODEC_CAP_DELAY,
.p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_U8P, .p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_U8P,
AV_SAMPLE_FMT_S16P, AV_SAMPLE_FMT_S16P,

@ -1334,7 +1334,11 @@ const FFCodec ff_wmalossless_decoder = {
.close = decode_close, .close = decode_close,
FF_CODEC_DECODE_CB(decode_packet), FF_CODEC_DECODE_CB(decode_packet),
.flush = flush, .flush = flush,
.p.capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY, .p.capabilities =
#if FF_API_SUBFRAMES
AV_CODEC_CAP_SUBFRAMES |
#endif
AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP, .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
.p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16P, .p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16P,
AV_SAMPLE_FMT_S32P, AV_SAMPLE_FMT_S32P,

@ -2094,7 +2094,11 @@ const FFCodec ff_wmapro_decoder = {
.init = wmapro_decode_init, .init = wmapro_decode_init,
.close = wmapro_decode_end, .close = wmapro_decode_end,
FF_CODEC_DECODE_CB(wmapro_decode_packet), FF_CODEC_DECODE_CB(wmapro_decode_packet),
.p.capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DR1, .p.capabilities =
#if FF_API_SUBFRAMES
AV_CODEC_CAP_SUBFRAMES |
#endif
AV_CODEC_CAP_DR1,
.flush = wmapro_flush, .flush = wmapro_flush,
.p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP, .p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_NONE }, AV_SAMPLE_FMT_NONE },
@ -2111,7 +2115,11 @@ const FFCodec ff_xma1_decoder = {
.close = xma_decode_end, .close = xma_decode_end,
FF_CODEC_DECODE_CB(xma_decode_packet), FF_CODEC_DECODE_CB(xma_decode_packet),
.flush = xma_flush, .flush = xma_flush,
.p.capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY, .p.capabilities =
#if FF_API_SUBFRAMES
AV_CODEC_CAP_SUBFRAMES |
#endif
AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
.p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP, .p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_NONE }, AV_SAMPLE_FMT_NONE },
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP, .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
@ -2127,7 +2135,11 @@ const FFCodec ff_xma2_decoder = {
.close = xma_decode_end, .close = xma_decode_end,
FF_CODEC_DECODE_CB(xma_decode_packet), FF_CODEC_DECODE_CB(xma_decode_packet),
.flush = xma_flush, .flush = xma_flush,
.p.capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY, .p.capabilities =
#if FF_API_SUBFRAMES
AV_CODEC_CAP_SUBFRAMES |
#endif
AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
.p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP, .p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_NONE }, AV_SAMPLE_FMT_NONE },
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP, .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,

@ -2004,7 +2004,11 @@ const FFCodec ff_wmavoice_decoder = {
.init = wmavoice_decode_init, .init = wmavoice_decode_init,
.close = wmavoice_decode_end, .close = wmavoice_decode_end,
FF_CODEC_DECODE_CB(wmavoice_decode_packet), FF_CODEC_DECODE_CB(wmavoice_decode_packet),
.p.capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY, .p.capabilities =
#if FF_API_SUBFRAMES
AV_CODEC_CAP_SUBFRAMES |
#endif
AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP, .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
.flush = wmavoice_flush, .flush = wmavoice_flush,
}; };

Loading…
Cancel
Save