avcodec/codec_internal: Use union for FFCodec decode/encode callbacks

This is possible, because every given FFCodec has to implement
exactly one of these. Doing so decreases sizeof(FFCodec) and
therefore decreases the size of the binary.
Notice that in case of position-independent code the decrease
is in .data.rel.ro, so that this translates to decreased
memory consumption.

Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@outlook.com>
release/5.1
Andreas Rheinhardt 3 years ago
parent ce7dbd0481
commit 4243da4ff4
  1. 2
      libavcodec/012v.c
  2. 2
      libavcodec/4xm.c
  3. 2
      libavcodec/8bps.c
  4. 4
      libavcodec/8svx.c
  5. 4
      libavcodec/a64multienc.c
  6. 4
      libavcodec/aacdec.c
  7. 2
      libavcodec/aacdec_fixed.c
  8. 2
      libavcodec/aacenc.c
  9. 2
      libavcodec/aasc.c
  10. 2
      libavcodec/ac3dec_fixed.c
  11. 4
      libavcodec/ac3dec_float.c
  12. 2
      libavcodec/ac3enc_fixed.c
  13. 2
      libavcodec/ac3enc_float.c
  14. 2
      libavcodec/adpcm.c
  15. 2
      libavcodec/adpcmenc.c
  16. 2
      libavcodec/adxdec.c
  17. 2
      libavcodec/adxenc.c
  18. 2
      libavcodec/agm.c
  19. 2
      libavcodec/aic.c
  20. 2
      libavcodec/alac.c
  21. 2
      libavcodec/alacenc.c
  22. 2
      libavcodec/aliaspixdec.c
  23. 2
      libavcodec/aliaspixenc.c
  24. 2
      libavcodec/alsdec.c
  25. 2
      libavcodec/amfenc_h264.c
  26. 2
      libavcodec/amfenc_hevc.c
  27. 2
      libavcodec/amrnbdec.c
  28. 2
      libavcodec/amrwbdec.c
  29. 2
      libavcodec/anm.c
  30. 2
      libavcodec/ansi.c
  31. 2
      libavcodec/apedec.c
  32. 4
      libavcodec/aptxdec.c
  33. 4
      libavcodec/aptxenc.c
  34. 2
      libavcodec/arbc.c
  35. 2
      libavcodec/argo.c
  36. 4
      libavcodec/assdec.c
  37. 4
      libavcodec/assenc.c
  38. 4
      libavcodec/asvdec.c
  39. 4
      libavcodec/asvenc.c
  40. 2
      libavcodec/atrac1.c
  41. 4
      libavcodec/atrac3.c
  42. 4
      libavcodec/atrac3plusdec.c
  43. 2
      libavcodec/atrac9dec.c
  44. 2
      libavcodec/audiotoolboxdec.c
  45. 2
      libavcodec/audiotoolboxenc.c
  46. 2
      libavcodec/aura.c
  47. 2
      libavcodec/av1dec.c
  48. 2
      libavcodec/avrndec.c
  49. 2
      libavcodec/avs.c
  50. 2
      libavcodec/avuidec.c
  51. 2
      libavcodec/avuienc.c
  52. 2
      libavcodec/bethsoftvideo.c
  53. 2
      libavcodec/bfi.c
  54. 2
      libavcodec/bink.c
  55. 4
      libavcodec/binkaudio.c
  56. 6
      libavcodec/bintext.c
  57. 2
      libavcodec/bitpacked_dec.c
  58. 2
      libavcodec/bitpacked_enc.c
  59. 2
      libavcodec/bmp.c
  60. 2
      libavcodec/bmpenc.c
  61. 2
      libavcodec/bmvaudio.c
  62. 2
      libavcodec/bmvvideo.c
  63. 2
      libavcodec/brenderpix.c
  64. 2
      libavcodec/c93.c
  65. 2
      libavcodec/cavsdec.c
  66. 2
      libavcodec/ccaption_dec.c
  67. 2
      libavcodec/cdgraphics.c
  68. 2
      libavcodec/cdtoons.c
  69. 2
      libavcodec/cdxl.c
  70. 2
      libavcodec/cfhd.c
  71. 2
      libavcodec/cfhdenc.c
  72. 2
      libavcodec/cinepak.c
  73. 2
      libavcodec/cinepakenc.c
  74. 2
      libavcodec/clearvideo.c
  75. 2
      libavcodec/cljrdec.c
  76. 2
      libavcodec/cljrenc.c
  77. 2
      libavcodec/cllc.c
  78. 2
      libavcodec/cngdec.c
  79. 2
      libavcodec/cngenc.c
  80. 153
      libavcodec/codec_internal.h
  81. 2
      libavcodec/cook.c
  82. 2
      libavcodec/cpia.c
  83. 2
      libavcodec/cri.c
  84. 2
      libavcodec/crystalhd.c
  85. 2
      libavcodec/cscd.c
  86. 2
      libavcodec/cuviddec.c
  87. 4
      libavcodec/cyuv.c
  88. 2
      libavcodec/dcadec.c
  89. 2
      libavcodec/dcaenc.c
  90. 2
      libavcodec/dds.c
  91. 8
      libavcodec/decode.c
  92. 2
      libavcodec/dfa.c
  93. 2
      libavcodec/dfpwmdec.c
  94. 2
      libavcodec/dfpwmenc.c
  95. 2
      libavcodec/diracdec.c
  96. 2
      libavcodec/dnxhddec.c
  97. 2
      libavcodec/dnxhdenc.c
  98. 2
      libavcodec/dolby_e.c
  99. 2
      libavcodec/dpcm.c
  100. 2
      libavcodec/dpx.c
  101. Some files were not shown because too many files have changed in this diff Show More

@ -150,7 +150,7 @@ const FFCodec ff_zero12v_decoder = {
.p.type = AVMEDIA_TYPE_VIDEO, .p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_012V, .p.id = AV_CODEC_ID_012V,
.init = zero12v_decode_init, .init = zero12v_decode_init,
.decode = zero12v_decode_frame, FF_CODEC_DECODE_CB(zero12v_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1, .p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
}; };

@ -1034,7 +1034,7 @@ const FFCodec ff_fourxm_decoder = {
.priv_data_size = sizeof(FourXContext), .priv_data_size = sizeof(FourXContext),
.init = decode_init, .init = decode_init,
.close = decode_end, .close = decode_end,
.decode = decode_frame, FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1, .p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
}; };

@ -180,7 +180,7 @@ const FFCodec ff_eightbps_decoder = {
.p.id = AV_CODEC_ID_8BPS, .p.id = AV_CODEC_ID_8BPS,
.priv_data_size = sizeof(EightBpsContext), .priv_data_size = sizeof(EightBpsContext),
.init = decode_init, .init = decode_init,
.decode = decode_frame, FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1, .p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
}; };

@ -194,7 +194,7 @@ const FFCodec ff_eightsvx_fib_decoder = {
.p.id = AV_CODEC_ID_8SVX_FIB, .p.id = AV_CODEC_ID_8SVX_FIB,
.priv_data_size = sizeof (EightSvxContext), .priv_data_size = sizeof (EightSvxContext),
.init = eightsvx_decode_init, .init = eightsvx_decode_init,
.decode = eightsvx_decode_frame, FF_CODEC_DECODE_CB(eightsvx_decode_frame),
.close = eightsvx_decode_close, .close = eightsvx_decode_close,
.p.capabilities = AV_CODEC_CAP_DR1, .p.capabilities = AV_CODEC_CAP_DR1,
.p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_U8P, .p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_U8P,
@ -210,7 +210,7 @@ const FFCodec ff_eightsvx_exp_decoder = {
.p.id = AV_CODEC_ID_8SVX_EXP, .p.id = AV_CODEC_ID_8SVX_EXP,
.priv_data_size = sizeof (EightSvxContext), .priv_data_size = sizeof (EightSvxContext),
.init = eightsvx_decode_init, .init = eightsvx_decode_init,
.decode = eightsvx_decode_frame, FF_CODEC_DECODE_CB(eightsvx_decode_frame),
.close = eightsvx_decode_close, .close = eightsvx_decode_close,
.p.capabilities = AV_CODEC_CAP_DR1, .p.capabilities = AV_CODEC_CAP_DR1,
.p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_U8P, .p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_U8P,

@ -401,7 +401,7 @@ const FFCodec ff_a64multi_encoder = {
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY, .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
.priv_data_size = sizeof(A64Context), .priv_data_size = sizeof(A64Context),
.init = a64multi_encode_init, .init = a64multi_encode_init,
.encode2 = a64multi_encode_frame, FF_CODEC_ENCODE_CB(a64multi_encode_frame),
.close = a64multi_close_encoder, .close = a64multi_close_encoder,
.p.pix_fmts = (const enum AVPixelFormat[]) {AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE}, .p.pix_fmts = (const enum AVPixelFormat[]) {AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE},
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP | FF_CODEC_CAP_INIT_THREADSAFE, .caps_internal = FF_CODEC_CAP_INIT_CLEANUP | FF_CODEC_CAP_INIT_THREADSAFE,
@ -416,7 +416,7 @@ const FFCodec ff_a64multi5_encoder = {
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY, .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
.priv_data_size = sizeof(A64Context), .priv_data_size = sizeof(A64Context),
.init = a64multi_encode_init, .init = a64multi_encode_init,
.encode2 = a64multi_encode_frame, FF_CODEC_ENCODE_CB(a64multi_encode_frame),
.close = a64multi_close_encoder, .close = a64multi_close_encoder,
.p.pix_fmts = (const enum AVPixelFormat[]) {AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE}, .p.pix_fmts = (const enum AVPixelFormat[]) {AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE},
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP | FF_CODEC_CAP_INIT_THREADSAFE, .caps_internal = FF_CODEC_CAP_INIT_CLEANUP | FF_CODEC_CAP_INIT_THREADSAFE,

@ -560,7 +560,7 @@ const FFCodec ff_aac_decoder = {
.priv_data_size = sizeof(AACContext), .priv_data_size = sizeof(AACContext),
.init = aac_decode_init, .init = aac_decode_init,
.close = aac_decode_close, .close = aac_decode_close,
.decode = aac_decode_frame, FF_CODEC_DECODE_CB(aac_decode_frame),
.p.sample_fmts = (const enum AVSampleFormat[]) { .p.sample_fmts = (const enum AVSampleFormat[]) {
AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_NONE AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_NONE
}, },
@ -588,7 +588,7 @@ const FFCodec ff_aac_latm_decoder = {
.priv_data_size = sizeof(struct LATMContext), .priv_data_size = sizeof(struct LATMContext),
.init = latm_decode_init, .init = latm_decode_init,
.close = aac_decode_close, .close = aac_decode_close,
.decode = latm_decode_frame, FF_CODEC_DECODE_CB(latm_decode_frame),
.p.sample_fmts = (const enum AVSampleFormat[]) { .p.sample_fmts = (const enum AVSampleFormat[]) {
AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_NONE AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_NONE
}, },

@ -458,7 +458,7 @@ const FFCodec ff_aac_fixed_decoder = {
.priv_data_size = sizeof(AACContext), .priv_data_size = sizeof(AACContext),
.init = aac_decode_init, .init = aac_decode_init,
.close = aac_decode_close, .close = aac_decode_close,
.decode = aac_decode_frame, FF_CODEC_DECODE_CB(aac_decode_frame),
.p.sample_fmts = (const enum AVSampleFormat[]) { .p.sample_fmts = (const enum AVSampleFormat[]) {
AV_SAMPLE_FMT_S32P, AV_SAMPLE_FMT_NONE AV_SAMPLE_FMT_S32P, AV_SAMPLE_FMT_NONE
}, },

@ -1144,7 +1144,7 @@ const FFCodec ff_aac_encoder = {
.p.id = AV_CODEC_ID_AAC, .p.id = AV_CODEC_ID_AAC,
.priv_data_size = sizeof(AACEncContext), .priv_data_size = sizeof(AACEncContext),
.init = aac_encode_init, .init = aac_encode_init,
.encode2 = aac_encode_frame, FF_CODEC_ENCODE_CB(aac_encode_frame),
.close = aac_encode_end, .close = aac_encode_end,
.defaults = aac_encode_defaults, .defaults = aac_encode_defaults,
.p.supported_samplerates = ff_mpeg4audio_sample_rates, .p.supported_samplerates = ff_mpeg4audio_sample_rates,

@ -157,7 +157,7 @@ const FFCodec ff_aasc_decoder = {
.priv_data_size = sizeof(AascContext), .priv_data_size = sizeof(AascContext),
.init = aasc_decode_init, .init = aasc_decode_init,
.close = aasc_decode_end, .close = aasc_decode_end,
.decode = aasc_decode_frame, FF_CODEC_DECODE_CB(aasc_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1, .p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
}; };

@ -178,7 +178,7 @@ const FFCodec ff_ac3_fixed_decoder = {
.priv_data_size = sizeof (AC3DecodeContext), .priv_data_size = sizeof (AC3DecodeContext),
.init = ac3_decode_init, .init = ac3_decode_init,
.close = ac3_decode_end, .close = ac3_decode_end,
.decode = ac3_decode_frame, FF_CODEC_DECODE_CB(ac3_decode_frame),
.p.capabilities = AV_CODEC_CAP_CHANNEL_CONF | .p.capabilities = AV_CODEC_CAP_CHANNEL_CONF |
AV_CODEC_CAP_DR1, AV_CODEC_CAP_DR1,
.p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16P, .p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16P,

@ -66,7 +66,7 @@ const FFCodec ff_ac3_decoder = {
.priv_data_size = sizeof (AC3DecodeContext), .priv_data_size = sizeof (AC3DecodeContext),
.init = ac3_decode_init, .init = ac3_decode_init,
.close = ac3_decode_end, .close = ac3_decode_end,
.decode = ac3_decode_frame, FF_CODEC_DECODE_CB(ac3_decode_frame),
.p.capabilities = AV_CODEC_CAP_CHANNEL_CONF | .p.capabilities = AV_CODEC_CAP_CHANNEL_CONF |
AV_CODEC_CAP_DR1, AV_CODEC_CAP_DR1,
.p.long_name = NULL_IF_CONFIG_SMALL("ATSC A/52A (AC-3)"), .p.long_name = NULL_IF_CONFIG_SMALL("ATSC A/52A (AC-3)"),
@ -84,7 +84,7 @@ const FFCodec ff_eac3_decoder = {
.priv_data_size = sizeof (AC3DecodeContext), .priv_data_size = sizeof (AC3DecodeContext),
.init = ac3_decode_init, .init = ac3_decode_init,
.close = ac3_decode_end, .close = ac3_decode_end,
.decode = ac3_decode_frame, FF_CODEC_DECODE_CB(ac3_decode_frame),
.p.capabilities = AV_CODEC_CAP_CHANNEL_CONF | .p.capabilities = AV_CODEC_CAP_CHANNEL_CONF |
AV_CODEC_CAP_DR1, AV_CODEC_CAP_DR1,
.p.long_name = NULL_IF_CONFIG_SMALL("ATSC A/52B (AC-3, E-AC-3)"), .p.long_name = NULL_IF_CONFIG_SMALL("ATSC A/52B (AC-3, E-AC-3)"),

@ -128,7 +128,7 @@ const FFCodec ff_ac3_fixed_encoder = {
.p.capabilities = AV_CODEC_CAP_DR1, .p.capabilities = AV_CODEC_CAP_DR1,
.priv_data_size = sizeof(AC3EncodeContext), .priv_data_size = sizeof(AC3EncodeContext),
.init = ac3_fixed_encode_init, .init = ac3_fixed_encode_init,
.encode2 = ff_ac3_fixed_encode_frame, FF_CODEC_ENCODE_CB(ff_ac3_fixed_encode_frame),
.close = ff_ac3_encode_close, .close = ff_ac3_encode_close,
.p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S32P, .p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S32P,
AV_SAMPLE_FMT_NONE }, AV_SAMPLE_FMT_NONE },

@ -132,7 +132,7 @@ const FFCodec ff_ac3_encoder = {
.p.capabilities = AV_CODEC_CAP_DR1, .p.capabilities = AV_CODEC_CAP_DR1,
.priv_data_size = sizeof(AC3EncodeContext), .priv_data_size = sizeof(AC3EncodeContext),
.init = ff_ac3_float_encode_init, .init = ff_ac3_float_encode_init,
.encode2 = ff_ac3_float_encode_frame, FF_CODEC_ENCODE_CB(ff_ac3_float_encode_frame),
.close = ff_ac3_encode_close, .close = ff_ac3_encode_close,
.p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_FLTP, .p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_NONE }, AV_SAMPLE_FMT_NONE },

@ -2294,7 +2294,7 @@ const FFCodec ff_ ## name_ ## _decoder = { \
.p.sample_fmts = sample_fmts_, \ .p.sample_fmts = sample_fmts_, \
.priv_data_size = sizeof(ADPCMDecodeContext), \ .priv_data_size = sizeof(ADPCMDecodeContext), \
.init = adpcm_decode_init, \ .init = adpcm_decode_init, \
.decode = adpcm_decode_frame, \ FF_CODEC_DECODE_CB(adpcm_decode_frame), \
.flush = adpcm_flush, \ .flush = adpcm_flush, \
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, \ .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, \
}; };

@ -1007,7 +1007,7 @@ const FFCodec ff_ ## name_ ## _encoder = { \
.p.priv_class = &adpcm_encoder_class, \ .p.priv_class = &adpcm_encoder_class, \
.priv_data_size = sizeof(ADPCMEncodeContext), \ .priv_data_size = sizeof(ADPCMEncodeContext), \
.init = adpcm_encode_init, \ .init = adpcm_encode_init, \
.encode2 = adpcm_encode_frame, \ FF_CODEC_ENCODE_CB(adpcm_encode_frame), \
.close = adpcm_encode_close, \ .close = adpcm_encode_close, \
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP | FF_CODEC_CAP_INIT_THREADSAFE, \ .caps_internal = FF_CODEC_CAP_INIT_CLEANUP | FF_CODEC_CAP_INIT_THREADSAFE, \
}; };

@ -197,7 +197,7 @@ const FFCodec ff_adpcm_adx_decoder = {
.p.id = AV_CODEC_ID_ADPCM_ADX, .p.id = AV_CODEC_ID_ADPCM_ADX,
.priv_data_size = sizeof(ADXContext), .priv_data_size = sizeof(ADXContext),
.init = adx_decode_init, .init = adx_decode_init,
.decode = adx_decode_frame, FF_CODEC_DECODE_CB(adx_decode_frame),
.flush = adx_decode_flush, .flush = adx_decode_flush,
.p.capabilities = AV_CODEC_CAP_CHANNEL_CONF | .p.capabilities = AV_CODEC_CAP_CHANNEL_CONF |
AV_CODEC_CAP_DR1, AV_CODEC_CAP_DR1,

@ -197,7 +197,7 @@ const FFCodec ff_adpcm_adx_encoder = {
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY, .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
.priv_data_size = sizeof(ADXContext), .priv_data_size = sizeof(ADXContext),
.init = adx_encode_init, .init = adx_encode_init,
.encode2 = adx_encode_frame, FF_CODEC_ENCODE_CB(adx_encode_frame),
.p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16, .p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_NONE }, AV_SAMPLE_FMT_NONE },
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,

@ -1294,7 +1294,7 @@ const FFCodec ff_agm_decoder = {
.priv_data_size = sizeof(AGMContext), .priv_data_size = sizeof(AGMContext),
.init = decode_init, .init = decode_init,
.close = decode_close, .close = decode_close,
.decode = decode_frame, FF_CODEC_DECODE_CB(decode_frame),
.flush = decode_flush, .flush = decode_flush,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
FF_CODEC_CAP_INIT_CLEANUP | FF_CODEC_CAP_INIT_CLEANUP |

@ -503,7 +503,7 @@ const FFCodec ff_aic_decoder = {
.priv_data_size = sizeof(AICContext), .priv_data_size = sizeof(AICContext),
.init = aic_decode_init, .init = aic_decode_init,
.close = aic_decode_close, .close = aic_decode_close,
.decode = aic_decode_frame, FF_CODEC_DECODE_CB(aic_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS, .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
}; };

@ -618,7 +618,7 @@ const FFCodec ff_alac_decoder = {
.priv_data_size = sizeof(ALACContext), .priv_data_size = sizeof(ALACContext),
.init = alac_decode_init, .init = alac_decode_init,
.close = alac_decode_close, .close = alac_decode_close,
.decode = alac_decode_frame, FF_CODEC_DECODE_CB(alac_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS | AV_CODEC_CAP_CHANNEL_CONF, .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS | AV_CODEC_CAP_CHANNEL_CONF,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
.p.priv_class = &alac_class .p.priv_class = &alac_class

@ -657,7 +657,7 @@ const FFCodec ff_alac_encoder = {
.priv_data_size = sizeof(AlacEncodeContext), .priv_data_size = sizeof(AlacEncodeContext),
.p.priv_class = &alacenc_class, .p.priv_class = &alacenc_class,
.init = alac_encode_init, .init = alac_encode_init,
.encode2 = alac_encode_frame, FF_CODEC_ENCODE_CB(alac_encode_frame),
.close = alac_encode_close, .close = alac_encode_close,
.p.capabilities = AV_CODEC_CAP_SMALL_LAST_FRAME, .p.capabilities = AV_CODEC_CAP_SMALL_LAST_FRAME,
#if FF_API_OLD_CHANNEL_LAYOUT #if FF_API_OLD_CHANNEL_LAYOUT

@ -127,5 +127,5 @@ const FFCodec ff_alias_pix_decoder = {
.p.type = AVMEDIA_TYPE_VIDEO, .p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_ALIAS_PIX, .p.id = AV_CODEC_ID_ALIAS_PIX,
.p.capabilities = AV_CODEC_CAP_DR1, .p.capabilities = AV_CODEC_CAP_DR1,
.decode = decode_frame, FF_CODEC_DECODE_CB(decode_frame),
}; };

@ -106,7 +106,7 @@ const FFCodec ff_alias_pix_encoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("Alias/Wavefront PIX image"), .p.long_name = NULL_IF_CONFIG_SMALL("Alias/Wavefront PIX image"),
.p.type = AVMEDIA_TYPE_VIDEO, .p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_ALIAS_PIX, .p.id = AV_CODEC_ID_ALIAS_PIX,
.encode2 = encode_frame, FF_CODEC_ENCODE_CB(encode_frame),
.p.pix_fmts = (const enum AVPixelFormat[]) { .p.pix_fmts = (const enum AVPixelFormat[]) {
AV_PIX_FMT_BGR24, AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE AV_PIX_FMT_BGR24, AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE
}, },

@ -2183,7 +2183,7 @@ const FFCodec ff_als_decoder = {
.priv_data_size = sizeof(ALSDecContext), .priv_data_size = sizeof(ALSDecContext),
.init = decode_init, .init = decode_init,
.close = decode_end, .close = decode_end,
.decode = decode_frame, FF_CODEC_DECODE_CB(decode_frame),
.flush = flush, .flush = flush,
.p.capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF, .p.capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,

@ -384,7 +384,7 @@ const FFCodec ff_h264_amf_encoder = {
.p.type = AVMEDIA_TYPE_VIDEO, .p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_H264, .p.id = AV_CODEC_ID_H264,
.init = amf_encode_init_h264, .init = amf_encode_init_h264,
.receive_packet = ff_amf_receive_packet, FF_CODEC_RECEIVE_PACKET_CB(ff_amf_receive_packet),
.close = ff_amf_encode_close, .close = ff_amf_encode_close,
.priv_data_size = sizeof(AmfContext), .priv_data_size = sizeof(AmfContext),
.p.priv_class = &h264_amf_class, .p.priv_class = &h264_amf_class,

@ -316,7 +316,7 @@ const FFCodec ff_hevc_amf_encoder = {
.p.type = AVMEDIA_TYPE_VIDEO, .p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_HEVC, .p.id = AV_CODEC_ID_HEVC,
.init = amf_encode_init_hevc, .init = amf_encode_init_hevc,
.receive_packet = ff_amf_receive_packet, FF_CODEC_RECEIVE_PACKET_CB(ff_amf_receive_packet),
.close = ff_amf_encode_close, .close = ff_amf_encode_close,
.priv_data_size = sizeof(AmfContext), .priv_data_size = sizeof(AmfContext),
.p.priv_class = &hevc_amf_class, .p.priv_class = &hevc_amf_class,

@ -1103,7 +1103,7 @@ const FFCodec ff_amrnb_decoder = {
.p.id = AV_CODEC_ID_AMR_NB, .p.id = AV_CODEC_ID_AMR_NB,
.priv_data_size = sizeof(AMRChannelsContext), .priv_data_size = sizeof(AMRChannelsContext),
.init = amrnb_decode_init, .init = amrnb_decode_init,
.decode = amrnb_decode_frame, FF_CODEC_DECODE_CB(amrnb_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF, .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF,
.p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_FLTP, .p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_NONE }, AV_SAMPLE_FMT_NONE },

@ -1299,7 +1299,7 @@ const FFCodec ff_amrwb_decoder = {
.p.id = AV_CODEC_ID_AMR_WB, .p.id = AV_CODEC_ID_AMR_WB,
.priv_data_size = sizeof(AMRWBChannelsContext), .priv_data_size = sizeof(AMRWBChannelsContext),
.init = amrwb_decode_init, .init = amrwb_decode_init,
.decode = amrwb_decode_frame, FF_CODEC_DECODE_CB(amrwb_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF, .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF,
.p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_FLT, .p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_FLT,
AV_SAMPLE_FMT_NONE }, AV_SAMPLE_FMT_NONE },

@ -197,7 +197,7 @@ const FFCodec ff_anm_decoder = {
.priv_data_size = sizeof(AnmContext), .priv_data_size = sizeof(AnmContext),
.init = decode_init, .init = decode_init,
.close = decode_end, .close = decode_end,
.decode = decode_frame, FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1, .p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
}; };

@ -488,7 +488,7 @@ const FFCodec ff_ansi_decoder = {
.priv_data_size = sizeof(AnsiContext), .priv_data_size = sizeof(AnsiContext),
.init = decode_init, .init = decode_init,
.close = decode_close, .close = decode_close,
.decode = decode_frame, FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1, .p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
.defaults = ansi_defaults, .defaults = ansi_defaults,

@ -1666,7 +1666,7 @@ const FFCodec ff_ape_decoder = {
.priv_data_size = sizeof(APEContext), .priv_data_size = sizeof(APEContext),
.init = ape_decode_init, .init = ape_decode_init,
.close = ape_decode_close, .close = ape_decode_close,
.decode = ape_decode_frame, FF_CODEC_DECODE_CB(ape_decode_frame),
.p.capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DELAY | .p.capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DELAY |
AV_CODEC_CAP_DR1, AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,

@ -181,7 +181,7 @@ const FFCodec ff_aptx_decoder = {
.p.id = AV_CODEC_ID_APTX, .p.id = AV_CODEC_ID_APTX,
.priv_data_size = sizeof(AptXContext), .priv_data_size = sizeof(AptXContext),
.init = ff_aptx_init, .init = ff_aptx_init,
.decode = aptx_decode_frame, FF_CODEC_DECODE_CB(aptx_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1, .p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
#if FF_API_OLD_CHANNEL_LAYOUT #if FF_API_OLD_CHANNEL_LAYOUT
@ -201,7 +201,7 @@ const FFCodec ff_aptx_hd_decoder = {
.p.id = AV_CODEC_ID_APTX_HD, .p.id = AV_CODEC_ID_APTX_HD,
.priv_data_size = sizeof(AptXContext), .priv_data_size = sizeof(AptXContext),
.init = ff_aptx_init, .init = ff_aptx_init,
.decode = aptx_decode_frame, FF_CODEC_DECODE_CB(aptx_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1, .p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
#if FF_API_OLD_CHANNEL_LAYOUT #if FF_API_OLD_CHANNEL_LAYOUT

@ -253,7 +253,7 @@ const FFCodec ff_aptx_encoder = {
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_SMALL_LAST_FRAME, .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_SMALL_LAST_FRAME,
.priv_data_size = sizeof(AptXContext), .priv_data_size = sizeof(AptXContext),
.init = ff_aptx_init, .init = ff_aptx_init,
.encode2 = aptx_encode_frame, FF_CODEC_ENCODE_CB(aptx_encode_frame),
.close = aptx_close, .close = aptx_close,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
#if FF_API_OLD_CHANNEL_LAYOUT #if FF_API_OLD_CHANNEL_LAYOUT
@ -275,7 +275,7 @@ const FFCodec ff_aptx_hd_encoder = {
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_SMALL_LAST_FRAME, .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_SMALL_LAST_FRAME,
.priv_data_size = sizeof(AptXContext), .priv_data_size = sizeof(AptXContext),
.init = ff_aptx_init, .init = ff_aptx_init,
.encode2 = aptx_encode_frame, FF_CODEC_ENCODE_CB(aptx_encode_frame),
.close = aptx_close, .close = aptx_close,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
#if FF_API_OLD_CHANNEL_LAYOUT #if FF_API_OLD_CHANNEL_LAYOUT

@ -218,7 +218,7 @@ const FFCodec ff_arbc_decoder = {
.p.id = AV_CODEC_ID_ARBC, .p.id = AV_CODEC_ID_ARBC,
.priv_data_size = sizeof(ARBCContext), .priv_data_size = sizeof(ARBCContext),
.init = decode_init, .init = decode_init,
.decode = decode_frame, FF_CODEC_DECODE_CB(decode_frame),
.flush = decode_flush, .flush = decode_flush,
.close = decode_close, .close = decode_close,
.p.capabilities = AV_CODEC_CAP_DR1, .p.capabilities = AV_CODEC_CAP_DR1,

@ -740,7 +740,7 @@ const FFCodec ff_argo_decoder = {
.p.id = AV_CODEC_ID_ARGO, .p.id = AV_CODEC_ID_ARGO,
.priv_data_size = sizeof(ArgoContext), .priv_data_size = sizeof(ArgoContext),
.init = decode_init, .init = decode_init,
.decode = decode_frame, FF_CODEC_DECODE_CB(decode_frame),
.flush = decode_flush, .flush = decode_flush,
.close = decode_close, .close = decode_close,
.p.capabilities = AV_CODEC_CAP_DR1, .p.capabilities = AV_CODEC_CAP_DR1,

@ -68,7 +68,7 @@ const FFCodec ff_ssa_decoder = {
.p.type = AVMEDIA_TYPE_SUBTITLE, .p.type = AVMEDIA_TYPE_SUBTITLE,
.p.id = AV_CODEC_ID_ASS, .p.id = AV_CODEC_ID_ASS,
.init = ass_decode_init, .init = ass_decode_init,
.decode_sub = ass_decode_frame, FF_CODEC_DECODE_SUB_CB(ass_decode_frame),
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
}; };
#endif #endif
@ -80,7 +80,7 @@ const FFCodec ff_ass_decoder = {
.p.type = AVMEDIA_TYPE_SUBTITLE, .p.type = AVMEDIA_TYPE_SUBTITLE,
.p.id = AV_CODEC_ID_ASS, .p.id = AV_CODEC_ID_ASS,
.init = ass_decode_init, .init = ass_decode_init,
.decode_sub = ass_decode_frame, FF_CODEC_DECODE_SUB_CB(ass_decode_frame),
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
}; };
#endif #endif

@ -75,7 +75,7 @@ const FFCodec ff_ssa_encoder = {
.p.type = AVMEDIA_TYPE_SUBTITLE, .p.type = AVMEDIA_TYPE_SUBTITLE,
.p.id = AV_CODEC_ID_ASS, .p.id = AV_CODEC_ID_ASS,
.init = ass_encode_init, .init = ass_encode_init,
.encode_sub = ass_encode_frame, FF_CODEC_ENCODE_SUB_CB(ass_encode_frame),
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
}; };
#endif #endif
@ -87,7 +87,7 @@ const FFCodec ff_ass_encoder = {
.p.type = AVMEDIA_TYPE_SUBTITLE, .p.type = AVMEDIA_TYPE_SUBTITLE,
.p.id = AV_CODEC_ID_ASS, .p.id = AV_CODEC_ID_ASS,
.init = ass_encode_init, .init = ass_encode_init,
.encode_sub = ass_encode_frame, FF_CODEC_ENCODE_SUB_CB(ass_encode_frame),
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
}; };
#endif #endif

@ -336,7 +336,7 @@ const FFCodec ff_asv1_decoder = {
.priv_data_size = sizeof(ASV1Context), .priv_data_size = sizeof(ASV1Context),
.init = decode_init, .init = decode_init,
.close = decode_end, .close = decode_end,
.decode = decode_frame, FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1, .p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
}; };
@ -350,7 +350,7 @@ const FFCodec ff_asv2_decoder = {
.p.id = AV_CODEC_ID_ASV2, .p.id = AV_CODEC_ID_ASV2,
.priv_data_size = sizeof(ASV1Context), .priv_data_size = sizeof(ASV1Context),
.init = decode_init, .init = decode_init,
.decode = decode_frame, FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1, .p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
}; };

@ -350,7 +350,7 @@ const FFCodec ff_asv1_encoder = {
.p.id = AV_CODEC_ID_ASV1, .p.id = AV_CODEC_ID_ASV1,
.priv_data_size = sizeof(ASV1Context), .priv_data_size = sizeof(ASV1Context),
.init = encode_init, .init = encode_init,
.encode2 = encode_frame, FF_CODEC_ENCODE_CB(encode_frame),
.p.pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV420P, .p.pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV420P,
AV_PIX_FMT_NONE }, AV_PIX_FMT_NONE },
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
@ -365,7 +365,7 @@ const FFCodec ff_asv2_encoder = {
.p.id = AV_CODEC_ID_ASV2, .p.id = AV_CODEC_ID_ASV2,
.priv_data_size = sizeof(ASV1Context), .priv_data_size = sizeof(ASV1Context),
.init = encode_init, .init = encode_init,
.encode2 = encode_frame, FF_CODEC_ENCODE_CB(encode_frame),
.p.pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV420P, .p.pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV420P,
AV_PIX_FMT_NONE }, AV_PIX_FMT_NONE },
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,

@ -392,7 +392,7 @@ const FFCodec ff_atrac1_decoder = {
.priv_data_size = sizeof(AT1Ctx), .priv_data_size = sizeof(AT1Ctx),
.init = atrac1_decode_init, .init = atrac1_decode_init,
.close = atrac1_decode_end, .close = atrac1_decode_end,
.decode = atrac1_decode_frame, FF_CODEC_DECODE_CB(atrac1_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1, .p.capabilities = AV_CODEC_CAP_DR1,
.p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP, .p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_NONE }, AV_SAMPLE_FMT_NONE },

@ -1023,7 +1023,7 @@ const FFCodec ff_atrac3_decoder = {
.priv_data_size = sizeof(ATRAC3Context), .priv_data_size = sizeof(ATRAC3Context),
.init = atrac3_decode_init, .init = atrac3_decode_init,
.close = atrac3_decode_close, .close = atrac3_decode_close,
.decode = atrac3_decode_frame, FF_CODEC_DECODE_CB(atrac3_decode_frame),
.p.capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DR1, .p.capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DR1,
.p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP, .p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_NONE }, AV_SAMPLE_FMT_NONE },
@ -1038,7 +1038,7 @@ const FFCodec ff_atrac3al_decoder = {
.priv_data_size = sizeof(ATRAC3Context), .priv_data_size = sizeof(ATRAC3Context),
.init = atrac3_decode_init, .init = atrac3_decode_init,
.close = atrac3_decode_close, .close = atrac3_decode_close,
.decode = atrac3al_decode_frame, FF_CODEC_DECODE_CB(atrac3al_decode_frame),
.p.capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DR1, .p.capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DR1,
.p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP, .p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_NONE }, AV_SAMPLE_FMT_NONE },

@ -400,7 +400,7 @@ const FFCodec ff_atrac3p_decoder = {
.priv_data_size = sizeof(ATRAC3PContext), .priv_data_size = sizeof(ATRAC3PContext),
.init = atrac3p_decode_init, .init = atrac3p_decode_init,
.close = atrac3p_decode_close, .close = atrac3p_decode_close,
.decode = atrac3p_decode_frame, FF_CODEC_DECODE_CB(atrac3p_decode_frame),
}; };
const FFCodec ff_atrac3pal_decoder = { const FFCodec ff_atrac3pal_decoder = {
@ -413,5 +413,5 @@ const FFCodec ff_atrac3pal_decoder = {
.priv_data_size = sizeof(ATRAC3PContext), .priv_data_size = sizeof(ATRAC3PContext),
.init = atrac3p_decode_init, .init = atrac3p_decode_init,
.close = atrac3p_decode_close, .close = atrac3p_decode_close,
.decode = atrac3p_decode_frame, FF_CODEC_DECODE_CB(atrac3p_decode_frame),
}; };

@ -995,7 +995,7 @@ const FFCodec ff_atrac9_decoder = {
.priv_data_size = sizeof(ATRAC9Context), .priv_data_size = sizeof(ATRAC9Context),
.init = atrac9_decode_init, .init = atrac9_decode_init,
.close = atrac9_decode_close, .close = atrac9_decode_close,
.decode = atrac9_decode_frame, FF_CODEC_DECODE_CB(atrac9_decode_frame),
.flush = atrac9_decode_flush, .flush = atrac9_decode_flush,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
.p.capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF, .p.capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF,

@ -592,7 +592,7 @@ static av_cold int ffat_close_decoder(AVCodecContext *avctx)
.priv_data_size = sizeof(ATDecodeContext), \ .priv_data_size = sizeof(ATDecodeContext), \
.init = ffat_init_decoder, \ .init = ffat_init_decoder, \
.close = ffat_close_decoder, \ .close = ffat_close_decoder, \
.decode = ffat_decode, \ FF_CODEC_DECODE_CB(ffat_decode), \
.flush = ffat_decode_flush, \ .flush = ffat_decode_flush, \
.p.priv_class = &ffat_##NAME##_dec_class, \ .p.priv_class = &ffat_##NAME##_dec_class, \
.bsfs = bsf_name, \ .bsfs = bsf_name, \

@ -621,7 +621,7 @@ static const AVOption options[] = {
.priv_data_size = sizeof(ATDecodeContext), \ .priv_data_size = sizeof(ATDecodeContext), \
.init = ffat_init_encoder, \ .init = ffat_init_encoder, \
.close = ffat_close_encoder, \ .close = ffat_close_encoder, \
.encode2 = ffat_encode, \ FF_CODEC_ENCODE_CB(ffat_encode), \
.flush = ffat_encode_flush, \ .flush = ffat_encode_flush, \
.p.priv_class = &ffat_##NAME##_enc_class, \ .p.priv_class = &ffat_##NAME##_enc_class, \
.p.capabilities = AV_CODEC_CAP_DELAY | \ .p.capabilities = AV_CODEC_CAP_DELAY | \

@ -102,7 +102,7 @@ const FFCodec ff_aura2_decoder = {
.p.type = AVMEDIA_TYPE_VIDEO, .p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_AURA2, .p.id = AV_CODEC_ID_AURA2,
.init = aura_decode_init, .init = aura_decode_init,
.decode = aura_decode_frame, FF_CODEC_DECODE_CB(aura_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1, .p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
}; };

@ -1246,7 +1246,7 @@ const FFCodec ff_av1_decoder = {
.priv_data_size = sizeof(AV1DecContext), .priv_data_size = sizeof(AV1DecContext),
.init = av1_decode_init, .init = av1_decode_init,
.close = av1_decode_free, .close = av1_decode_free,
.decode = av1_decode_frame, FF_CODEC_DECODE_CB(av1_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_AVOID_PROBING, .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_AVOID_PROBING,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
FF_CODEC_CAP_INIT_CLEANUP | FF_CODEC_CAP_INIT_CLEANUP |

@ -96,7 +96,7 @@ const FFCodec ff_avrn_decoder = {
.p.id = AV_CODEC_ID_AVRN, .p.id = AV_CODEC_ID_AVRN,
.priv_data_size = sizeof(AVRnContext), .priv_data_size = sizeof(AVRnContext),
.init = init, .init = init,
.decode = decode_frame, FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1, .p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
}; };

@ -182,7 +182,7 @@ const FFCodec ff_avs_decoder = {
.p.id = AV_CODEC_ID_AVS, .p.id = AV_CODEC_ID_AVS,
.priv_data_size = sizeof(AvsContext), .priv_data_size = sizeof(AvsContext),
.init = avs_decode_init, .init = avs_decode_init,
.decode = avs_decode_frame, FF_CODEC_DECODE_CB(avs_decode_frame),
.close = avs_decode_end, .close = avs_decode_end,
.p.capabilities = AV_CODEC_CAP_DR1, .p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,

@ -126,6 +126,6 @@ const FFCodec ff_avui_decoder = {
.p.id = AV_CODEC_ID_AVUI, .p.id = AV_CODEC_ID_AVUI,
.p.capabilities = AV_CODEC_CAP_DR1, .p.capabilities = AV_CODEC_CAP_DR1,
.init = avui_decode_init, .init = avui_decode_init,
.decode = avui_decode_frame, FF_CODEC_DECODE_CB(avui_decode_frame),
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
}; };

@ -99,6 +99,6 @@ const FFCodec ff_avui_encoder = {
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_EXPERIMENTAL, .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_EXPERIMENTAL,
.p.pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_UYVY422, AV_PIX_FMT_NONE }, .p.pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_UYVY422, AV_PIX_FMT_NONE },
.init = avui_encode_init, .init = avui_encode_init,
.encode2 = avui_encode_frame, FF_CODEC_ENCODE_CB(avui_encode_frame),
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
}; };

@ -166,7 +166,7 @@ const FFCodec ff_bethsoftvid_decoder = {
.priv_data_size = sizeof(BethsoftvidContext), .priv_data_size = sizeof(BethsoftvidContext),
.init = bethsoftvid_decode_init, .init = bethsoftvid_decode_init,
.close = bethsoftvid_decode_end, .close = bethsoftvid_decode_end,
.decode = bethsoftvid_decode_frame, FF_CODEC_DECODE_CB(bethsoftvid_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1, .p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
}; };

@ -183,7 +183,7 @@ const FFCodec ff_bfi_decoder = {
.priv_data_size = sizeof(BFIContext), .priv_data_size = sizeof(BFIContext),
.init = bfi_decode_init, .init = bfi_decode_init,
.close = bfi_decode_close, .close = bfi_decode_close,
.decode = bfi_decode_frame, FF_CODEC_DECODE_CB(bfi_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1, .p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
}; };

@ -1427,7 +1427,7 @@ const FFCodec ff_bink_decoder = {
.priv_data_size = sizeof(BinkContext), .priv_data_size = sizeof(BinkContext),
.init = decode_init, .init = decode_init,
.close = decode_end, .close = decode_end,
.decode = decode_frame, FF_CODEC_DECODE_CB(decode_frame),
.flush = flush, .flush = flush,
.p.capabilities = AV_CODEC_CAP_DR1, .p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,

@ -372,7 +372,7 @@ const FFCodec ff_binkaudio_rdft_decoder = {
.init = decode_init, .init = decode_init,
.flush = decode_flush, .flush = decode_flush,
.close = decode_end, .close = decode_end,
.receive_frame = binkaudio_receive_frame, FF_CODEC_RECEIVE_FRAME_CB(binkaudio_receive_frame),
.p.capabilities = AV_CODEC_CAP_DR1, .p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
}; };
@ -386,7 +386,7 @@ const FFCodec ff_binkaudio_dct_decoder = {
.init = decode_init, .init = decode_init,
.flush = decode_flush, .flush = decode_flush,
.close = decode_end, .close = decode_end,
.receive_frame = binkaudio_receive_frame, FF_CODEC_RECEIVE_FRAME_CB(binkaudio_receive_frame),
.p.capabilities = AV_CODEC_CAP_DR1, .p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
}; };

@ -224,7 +224,7 @@ const FFCodec ff_bintext_decoder = {
.p.id = AV_CODEC_ID_BINTEXT, .p.id = AV_CODEC_ID_BINTEXT,
.priv_data_size = sizeof(XbinContext), .priv_data_size = sizeof(XbinContext),
.init = decode_init, .init = decode_init,
.decode = decode_frame, FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1, .p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
}; };
@ -237,7 +237,7 @@ const FFCodec ff_xbin_decoder = {
.p.id = AV_CODEC_ID_XBIN, .p.id = AV_CODEC_ID_XBIN,
.priv_data_size = sizeof(XbinContext), .priv_data_size = sizeof(XbinContext),
.init = decode_init, .init = decode_init,
.decode = decode_frame, FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1, .p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
}; };
@ -250,7 +250,7 @@ const FFCodec ff_idf_decoder = {
.p.id = AV_CODEC_ID_IDF, .p.id = AV_CODEC_ID_IDF,
.priv_data_size = sizeof(XbinContext), .priv_data_size = sizeof(XbinContext),
.init = decode_init, .init = decode_init,
.decode = decode_frame, FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1, .p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
}; };

@ -149,7 +149,7 @@ const FFCodec ff_bitpacked_decoder = {
.p.capabilities = AV_CODEC_CAP_FRAME_THREADS, .p.capabilities = AV_CODEC_CAP_FRAME_THREADS,
.priv_data_size = sizeof(struct BitpackedContext), .priv_data_size = sizeof(struct BitpackedContext),
.init = bitpacked_init_decoder, .init = bitpacked_init_decoder,
.decode = bitpacked_decode, FF_CODEC_DECODE_CB(bitpacked_decode),
.codec_tags = (const uint32_t []){ .codec_tags = (const uint32_t []){
MKTAG('U', 'Y', 'V', 'Y'), MKTAG('U', 'Y', 'V', 'Y'),
FF_CODEC_TAGS_END, FF_CODEC_TAGS_END,

@ -112,7 +112,7 @@ const FFCodec ff_bitpacked_encoder = {
.priv_data_size = sizeof(struct BitpackedContext), .priv_data_size = sizeof(struct BitpackedContext),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS, .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
.init = encode_init, .init = encode_init,
.encode2 = encode_frame, FF_CODEC_ENCODE_CB(encode_frame),
.p.pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV422P10, .p.pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV422P10,
AV_PIX_FMT_NONE }, AV_PIX_FMT_NONE },
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,

@ -370,5 +370,5 @@ const FFCodec ff_bmp_decoder = {
.p.type = AVMEDIA_TYPE_VIDEO, .p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_BMP, .p.id = AV_CODEC_ID_BMP,
.p.capabilities = AV_CODEC_CAP_DR1, .p.capabilities = AV_CODEC_CAP_DR1,
.decode = bmp_decode_frame, FF_CODEC_DECODE_CB(bmp_decode_frame),
}; };

@ -162,7 +162,7 @@ const FFCodec ff_bmp_encoder = {
.p.id = AV_CODEC_ID_BMP, .p.id = AV_CODEC_ID_BMP,
.p.capabilities = AV_CODEC_CAP_DR1, .p.capabilities = AV_CODEC_CAP_DR1,
.init = bmp_encode_init, .init = bmp_encode_init,
.encode2 = bmp_encode_frame, FF_CODEC_ENCODE_CB(bmp_encode_frame),
.p.pix_fmts = (const enum AVPixelFormat[]){ .p.pix_fmts = (const enum AVPixelFormat[]){
AV_PIX_FMT_BGRA, AV_PIX_FMT_BGR24, AV_PIX_FMT_BGRA, AV_PIX_FMT_BGR24,
AV_PIX_FMT_RGB565, AV_PIX_FMT_RGB555, AV_PIX_FMT_RGB444, AV_PIX_FMT_RGB565, AV_PIX_FMT_RGB555, AV_PIX_FMT_RGB444,

@ -84,7 +84,7 @@ const FFCodec ff_bmv_audio_decoder = {
.p.type = AVMEDIA_TYPE_AUDIO, .p.type = AVMEDIA_TYPE_AUDIO,
.p.id = AV_CODEC_ID_BMV_AUDIO, .p.id = AV_CODEC_ID_BMV_AUDIO,
.init = bmv_aud_decode_init, .init = bmv_aud_decode_init,
.decode = bmv_aud_decode_frame, FF_CODEC_DECODE_CB(bmv_aud_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF, .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
}; };

@ -292,7 +292,7 @@ const FFCodec ff_bmv_video_decoder = {
.p.id = AV_CODEC_ID_BMV_VIDEO, .p.id = AV_CODEC_ID_BMV_VIDEO,
.priv_data_size = sizeof(BMVDecContext), .priv_data_size = sizeof(BMVDecContext),
.init = decode_init, .init = decode_init,
.decode = decode_frame, FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1, .p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
}; };

@ -290,5 +290,5 @@ const FFCodec ff_brender_pix_decoder = {
.p.type = AVMEDIA_TYPE_VIDEO, .p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_BRENDER_PIX, .p.id = AV_CODEC_ID_BRENDER_PIX,
.p.capabilities = AV_CODEC_CAP_DR1, .p.capabilities = AV_CODEC_CAP_DR1,
.decode = pix_decode_frame, FF_CODEC_DECODE_CB(pix_decode_frame),
}; };

@ -266,7 +266,7 @@ const FFCodec ff_c93_decoder = {
.priv_data_size = sizeof(C93DecoderContext), .priv_data_size = sizeof(C93DecoderContext),
.init = decode_init, .init = decode_init,
.close = decode_end, .close = decode_end,
.decode = decode_frame, FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1, .p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
}; };

@ -1318,7 +1318,7 @@ const FFCodec ff_cavs_decoder = {
.priv_data_size = sizeof(AVSContext), .priv_data_size = sizeof(AVSContext),
.init = ff_cavs_init, .init = ff_cavs_init,
.close = ff_cavs_end, .close = ff_cavs_end,
.decode = cavs_decode_frame, FF_CODEC_DECODE_CB(cavs_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY, .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
.flush = cavs_flush, .flush = cavs_flush,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,

@ -955,6 +955,6 @@ const FFCodec ff_ccaption_decoder = {
.init = init_decoder, .init = init_decoder,
.close = close_decoder, .close = close_decoder,
.flush = flush_decoder, .flush = flush_decoder,
.decode_sub = decode, FF_CODEC_DECODE_SUB_CB(decode),
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
}; };

@ -395,7 +395,7 @@ const FFCodec ff_cdgraphics_decoder = {
.priv_data_size = sizeof(CDGraphicsContext), .priv_data_size = sizeof(CDGraphicsContext),
.init = cdg_decode_init, .init = cdg_decode_init,
.close = cdg_decode_end, .close = cdg_decode_end,
.decode = cdg_decode_frame, FF_CODEC_DECODE_CB(cdg_decode_frame),
.flush = cdg_decode_flush, .flush = cdg_decode_flush,
.p.capabilities = AV_CODEC_CAP_DR1, .p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,

@ -451,7 +451,7 @@ const FFCodec ff_cdtoons_decoder = {
.priv_data_size = sizeof(CDToonsContext), .priv_data_size = sizeof(CDToonsContext),
.init = cdtoons_decode_init, .init = cdtoons_decode_init,
.close = cdtoons_decode_end, .close = cdtoons_decode_end,
.decode = cdtoons_decode_frame, FF_CODEC_DECODE_CB(cdtoons_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1, .p.capabilities = AV_CODEC_CAP_DR1,
.flush = cdtoons_flush, .flush = cdtoons_flush,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,

@ -344,7 +344,7 @@ const FFCodec ff_cdxl_decoder = {
.priv_data_size = sizeof(CDXLVideoContext), .priv_data_size = sizeof(CDXLVideoContext),
.init = cdxl_decode_init, .init = cdxl_decode_init,
.close = cdxl_decode_end, .close = cdxl_decode_end,
.decode = cdxl_decode_frame, FF_CODEC_DECODE_CB(cdxl_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1, .p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
}; };

@ -1463,7 +1463,7 @@ const FFCodec ff_cfhd_decoder = {
.priv_data_size = sizeof(CFHDContext), .priv_data_size = sizeof(CFHDContext),
.init = cfhd_init, .init = cfhd_init,
.close = cfhd_close, .close = cfhd_close,
.decode = cfhd_decode, FF_CODEC_DECODE_CB(cfhd_decode),
.update_thread_context = ONLY_IF_THREADS_ENABLED(update_thread_context), .update_thread_context = ONLY_IF_THREADS_ENABLED(update_thread_context),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS, .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,

@ -854,7 +854,7 @@ const FFCodec ff_cfhd_encoder = {
.p.priv_class = &cfhd_class, .p.priv_class = &cfhd_class,
.init = cfhd_encode_init, .init = cfhd_encode_init,
.close = cfhd_encode_close, .close = cfhd_encode_close,
.encode2 = cfhd_encode_frame, FF_CODEC_ENCODE_CB(cfhd_encode_frame),
.p.capabilities = AV_CODEC_CAP_FRAME_THREADS, .p.capabilities = AV_CODEC_CAP_FRAME_THREADS,
.p.pix_fmts = (const enum AVPixelFormat[]) { .p.pix_fmts = (const enum AVPixelFormat[]) {
AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV422P10,

@ -514,7 +514,7 @@ const FFCodec ff_cinepak_decoder = {
.priv_data_size = sizeof(CinepakContext), .priv_data_size = sizeof(CinepakContext),
.init = cinepak_decode_init, .init = cinepak_decode_init,
.close = cinepak_decode_end, .close = cinepak_decode_end,
.decode = cinepak_decode_frame, FF_CODEC_DECODE_CB(cinepak_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1, .p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
}; };

@ -1201,7 +1201,7 @@ const FFCodec ff_cinepak_encoder = {
.p.id = AV_CODEC_ID_CINEPAK, .p.id = AV_CODEC_ID_CINEPAK,
.priv_data_size = sizeof(CinepakEncContext), .priv_data_size = sizeof(CinepakEncContext),
.init = cinepak_encode_init, .init = cinepak_encode_init,
.encode2 = cinepak_encode_frame, FF_CODEC_ENCODE_CB(cinepak_encode_frame),
.close = cinepak_encode_end, .close = cinepak_encode_end,
.p.pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_RGB24, AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE }, .p.pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_RGB24, AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE },
.p.priv_class = &cinepak_class, .p.priv_class = &cinepak_class,

@ -775,7 +775,7 @@ const FFCodec ff_clearvideo_decoder = {
.priv_data_size = sizeof(CLVContext), .priv_data_size = sizeof(CLVContext),
.init = clv_decode_init, .init = clv_decode_init,
.close = clv_decode_end, .close = clv_decode_end,
.decode = clv_decode_frame, FF_CODEC_DECODE_CB(clv_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1, .p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
}; };

@ -87,7 +87,7 @@ const FFCodec ff_cljr_decoder = {
.p.type = AVMEDIA_TYPE_VIDEO, .p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_CLJR, .p.id = AV_CODEC_ID_CLJR,
.init = decode_init, .init = decode_init,
.decode = decode_frame, FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1, .p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
}; };

@ -115,7 +115,7 @@ const FFCodec ff_cljr_encoder = {
.p.id = AV_CODEC_ID_CLJR, .p.id = AV_CODEC_ID_CLJR,
.p.capabilities = AV_CODEC_CAP_DR1, .p.capabilities = AV_CODEC_CAP_DR1,
.priv_data_size = sizeof(CLJRContext), .priv_data_size = sizeof(CLJRContext),
.encode2 = encode_frame, FF_CODEC_ENCODE_CB(encode_frame),
.p.pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV411P, .p.pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV411P,
AV_PIX_FMT_NONE }, AV_PIX_FMT_NONE },
.p.priv_class = &cljr_class, .p.priv_class = &cljr_class,

@ -498,7 +498,7 @@ const FFCodec ff_cllc_decoder = {
.p.id = AV_CODEC_ID_CLLC, .p.id = AV_CODEC_ID_CLLC,
.priv_data_size = sizeof(CLLCContext), .priv_data_size = sizeof(CLLCContext),
.init = cllc_decode_init, .init = cllc_decode_init,
.decode = cllc_decode_frame, FF_CODEC_DECODE_CB(cllc_decode_frame),
.close = cllc_decode_close, .close = cllc_decode_close,
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS, .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,

@ -169,7 +169,7 @@ const FFCodec ff_comfortnoise_decoder = {
.p.id = AV_CODEC_ID_COMFORT_NOISE, .p.id = AV_CODEC_ID_COMFORT_NOISE,
.priv_data_size = sizeof(CNGContext), .priv_data_size = sizeof(CNGContext),
.init = cng_decode_init, .init = cng_decode_init,
.decode = cng_decode_frame, FF_CODEC_DECODE_CB(cng_decode_frame),
.flush = cng_decode_flush, .flush = cng_decode_flush,
.close = cng_decode_close, .close = cng_decode_close,
.p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16, .p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16,

@ -104,7 +104,7 @@ const FFCodec ff_comfortnoise_encoder = {
.p.capabilities = AV_CODEC_CAP_DR1, .p.capabilities = AV_CODEC_CAP_DR1,
.priv_data_size = sizeof(CNGContext), .priv_data_size = sizeof(CNGContext),
.init = cng_encode_init, .init = cng_encode_init,
.encode2 = cng_encode_frame, FF_CODEC_ENCODE_CB(cng_encode_frame),
.close = cng_encode_close, .close = cng_encode_close,
.p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16, .p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_NONE }, AV_SAMPLE_FMT_NONE },

@ -88,6 +88,27 @@ struct AVCodecContext;
struct AVSubtitle; struct AVSubtitle;
struct AVPacket; struct AVPacket;
enum FFCodecType {
/* The codec is a decoder using the decode callback;
* audio and video codecs only. */
FF_CODEC_CB_TYPE_DECODE,
/* The codec is a decoder using the decode_sub callback;
* subtitle codecs only. */
FF_CODEC_CB_TYPE_DECODE_SUB,
/* The codec is a decoder using the receive_frame callback;
* audio and video codecs only. */
FF_CODEC_CB_TYPE_RECEIVE_FRAME,
/* The codec is an encoder using the encode callback;
* audio and video codecs only. */
FF_CODEC_CB_TYPE_ENCODE,
/* The codec is an encoder using the encode_sub callback;
* subtitle codecs only. */
FF_CODEC_CB_TYPE_ENCODE_SUB,
/* The codec is an encoder using the receive_packet callback;
* audio and video codecs only. */
FF_CODEC_CB_TYPE_RECEIVE_PACKET,
};
typedef struct FFCodec { typedef struct FFCodec {
/** /**
* The public AVCodec. See codec.h for it. * The public AVCodec. See codec.h for it.
@ -97,7 +118,14 @@ typedef struct FFCodec {
/** /**
* Internal codec capabilities FF_CODEC_CAP_*. * Internal codec capabilities FF_CODEC_CAP_*.
*/ */
int caps_internal; unsigned caps_internal:29;
/**
* This field determines the type of the codec (decoder/encoder)
* and also the exact callback cb implemented by the codec.
* cb_type uses enum FFCodecType values.
*/
unsigned cb_type:3;
int priv_data_size; int priv_data_size;
/** /**
@ -133,53 +161,69 @@ typedef struct FFCodec {
void (*init_static_data)(struct FFCodec *codec); void (*init_static_data)(struct FFCodec *codec);
int (*init)(struct AVCodecContext *); int (*init)(struct AVCodecContext *);
int (*encode_sub)(struct AVCodecContext *, uint8_t *buf, int buf_size,
const struct AVSubtitle *sub); union {
/** /**
* Encode data to an AVPacket. * Decode to an AVFrame.
* * cb is in this state if cb_type is FF_CODEC_CB_TYPE_DECODE.
* @param avctx codec context *
* @param avpkt output AVPacket * @param avctx codec context
* @param[in] frame AVFrame containing the raw data to be encoded * @param[out] frame AVFrame for output
* @param[out] got_packet_ptr encoder sets to 0 or 1 to indicate that a * @param[out] got_frame_ptr decoder sets to 0 or 1 to indicate that
* non-empty packet was returned in avpkt. * a non-empty frame was returned in frame.
* @return 0 on success, negative error code on failure * @param[in] avpkt AVPacket containing the data to be decoded
*/ * @return amount of bytes read from the packet on success,
int (*encode2)(struct AVCodecContext *avctx, struct AVPacket *avpkt, * negative error code on failure
const struct AVFrame *frame, int *got_packet_ptr); */
/** int (*decode)(struct AVCodecContext *avctx, struct AVFrame *frame,
* Decode to an AVFrame.
*
* @param avctx codec context
* @param frame AVFrame for output
* @param[out] got_frame_ptr decoder sets to 0 or 1 to indicate that a
* non-empty frame was returned in outdata.
* @param[in] avpkt AVPacket containing the data to be decoded
* @return amount of bytes read from the packet on success, negative error
* code on failure
*/
int (*decode)(struct AVCodecContext *avctx, struct AVFrame *frame,
int *got_frame_ptr, struct AVPacket *avpkt);
/**
* Decode subtitle data. Same as decode except that it uses
* a struct AVSubtitle structure for output.
*/
int (*decode_sub)(struct AVCodecContext *avctx, struct AVSubtitle *sub,
int *got_frame_ptr, struct AVPacket *avpkt); int *got_frame_ptr, struct AVPacket *avpkt);
/**
* Decode subtitle data to an AVSubtitle.
* cb is in this state if cb_type is FF_CODEC_CB_TYPE_DECODE_SUB.
*
* Apart from that this is like the decode callback.
*/
int (*decode_sub)(struct AVCodecContext *avctx, struct AVSubtitle *sub,
int *got_frame_ptr, struct AVPacket *avpkt);
/**
* Decode API with decoupled packet/frame dataflow.
* cb is in this state if cb_type is FF_CODEC_CB_TYPE_RECEIVE_FRAME.
*
* This function is called to get one output frame. It should call
* ff_decode_get_packet() to obtain input data.
*/
int (*receive_frame)(struct AVCodecContext *avctx, struct AVFrame *frame);
/**
* Encode data to an AVPacket.
* cb is in this state if cb_type is FF_CODEC_CB_TYPE_ENCODE
*
* @param avctx codec context
* @param[out] avpkt output AVPacket
* @param[in] frame AVFrame containing the input to be encoded
* @param[out] got_packet_ptr encoder sets to 0 or 1 to indicate that a
* non-empty packet was returned in avpkt.
* @return 0 on success, negative error code on failure
*/
int (*encode)(struct AVCodecContext *avctx, struct AVPacket *avpkt,
const struct AVFrame *frame, int *got_packet_ptr);
/**
* Encode subtitles to a raw buffer.
* cb is in this state if cb_type is FF_CODEC_CB_TYPE_ENCODE_SUB.
*/
int (*encode_sub)(struct AVCodecContext *avctx, uint8_t *buf,
int buf_size, const struct AVSubtitle *sub);
/**
* Encode API with decoupled frame/packet dataflow.
* cb is in this state if cb_type is FF_CODEC_CB_TYPE_RECEIVE_PACKET.
*
* This function is called to get one output packet.
* It should call ff_encode_get_frame() to obtain input data.
*/
int (*receive_packet)(struct AVCodecContext *avctx, struct AVPacket *avpkt);
} cb;
int (*close)(struct AVCodecContext *); int (*close)(struct AVCodecContext *);
/**
* Encode API with decoupled frame/packet dataflow. This function is called
* to get one output packet. It should call ff_encode_get_frame() to obtain
* input data.
*/
int (*receive_packet)(struct AVCodecContext *avctx, struct AVPacket *avpkt);
/**
* Decode API with decoupled packet/frame dataflow. This function is called
* to get one output frame. It should call ff_decode_get_packet() to obtain
* input data.
*/
int (*receive_frame)(struct AVCodecContext *avctx, struct AVFrame *frame);
/** /**
* Flush buffers. * Flush buffers.
* Will be called when seeking * Will be called when seeking
@ -207,6 +251,25 @@ typedef struct FFCodec {
const uint32_t *codec_tags; const uint32_t *codec_tags;
} FFCodec; } FFCodec;
#define FF_CODEC_DECODE_CB(func) \
.cb_type = FF_CODEC_CB_TYPE_DECODE, \
.cb.decode = (func)
#define FF_CODEC_DECODE_SUB_CB(func) \
.cb_type = FF_CODEC_CB_TYPE_DECODE_SUB, \
.cb.decode_sub = (func)
#define FF_CODEC_RECEIVE_FRAME_CB(func) \
.cb_type = FF_CODEC_CB_TYPE_RECEIVE_FRAME, \
.cb.receive_frame = (func)
#define FF_CODEC_ENCODE_CB(func) \
.cb_type = FF_CODEC_CB_TYPE_ENCODE, \
.cb.encode = (func)
#define FF_CODEC_ENCODE_SUB_CB(func) \
.cb_type = FF_CODEC_CB_TYPE_ENCODE_SUB, \
.cb.encode_sub = (func)
#define FF_CODEC_RECEIVE_PACKET_CB(func) \
.cb_type = FF_CODEC_CB_TYPE_RECEIVE_PACKET, \
.cb.receive_packet = (func)
static av_always_inline const FFCodec *ffcodec(const AVCodec *codec) static av_always_inline const FFCodec *ffcodec(const AVCodec *codec)
{ {
return (const FFCodec*)codec; return (const FFCodec*)codec;

@ -1304,7 +1304,7 @@ const FFCodec ff_cook_decoder = {
.priv_data_size = sizeof(COOKContext), .priv_data_size = sizeof(COOKContext),
.init = cook_decode_init, .init = cook_decode_init,
.close = cook_decode_close, .close = cook_decode_close,
.decode = cook_decode_frame, FF_CODEC_DECODE_CB(cook_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1, .p.capabilities = AV_CODEC_CAP_DR1,
.p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP, .p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_NONE }, AV_SAMPLE_FMT_NONE },

@ -230,7 +230,7 @@ const FFCodec ff_cpia_decoder = {
.priv_data_size = sizeof(CpiaContext), .priv_data_size = sizeof(CpiaContext),
.init = cpia_decode_init, .init = cpia_decode_init,
.close = cpia_decode_end, .close = cpia_decode_end,
.decode = cpia_decode_frame, FF_CODEC_DECODE_CB(cpia_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1, .p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
}; };

@ -429,7 +429,7 @@ const FFCodec ff_cri_decoder = {
.p.id = AV_CODEC_ID_CRI, .p.id = AV_CODEC_ID_CRI,
.priv_data_size = sizeof(CRIContext), .priv_data_size = sizeof(CRIContext),
.init = cri_decode_init, .init = cri_decode_init,
.decode = cri_decode_frame, FF_CODEC_DECODE_CB(cri_decode_frame),
.close = cri_decode_close, .close = cri_decode_close,
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS, .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,

@ -783,7 +783,7 @@ static int crystalhd_receive_frame(AVCodecContext *avctx, AVFrame *frame)
.p.priv_class = &x##_crystalhd_class, \ .p.priv_class = &x##_crystalhd_class, \
.init = init, \ .init = init, \
.close = uninit, \ .close = uninit, \
.receive_frame = crystalhd_receive_frame, \ FF_CODEC_RECEIVE_FRAME_CB(crystalhd_receive_frame), \
.flush = flush, \ .flush = flush, \
.bsfs = bsf_name, \ .bsfs = bsf_name, \
.p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_AVOID_PROBING | AV_CODEC_CAP_HARDWARE, \ .p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_AVOID_PROBING | AV_CODEC_CAP_HARDWARE, \

@ -175,7 +175,7 @@ const FFCodec ff_cscd_decoder = {
.priv_data_size = sizeof(CamStudioContext), .priv_data_size = sizeof(CamStudioContext),
.init = decode_init, .init = decode_init,
.close = decode_end, .close = decode_end,
.decode = decode_frame, FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1, .p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
}; };

@ -1112,7 +1112,7 @@ static const AVCodecHWConfigInternal *const cuvid_hw_configs[] = {
.p.priv_class = &x##_cuvid_class, \ .p.priv_class = &x##_cuvid_class, \
.init = cuvid_decode_init, \ .init = cuvid_decode_init, \
.close = cuvid_decode_end, \ .close = cuvid_decode_end, \
.receive_frame = cuvid_output_frame, \ FF_CODEC_RECEIVE_FRAME_CB(cuvid_output_frame), \
.flush = cuvid_flush, \ .flush = cuvid_flush, \
.bsfs = bsf_name, \ .bsfs = bsf_name, \
.p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_AVOID_PROBING | AV_CODEC_CAP_HARDWARE, \ .p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_AVOID_PROBING | AV_CODEC_CAP_HARDWARE, \

@ -184,7 +184,7 @@ const FFCodec ff_aura_decoder = {
.p.id = AV_CODEC_ID_AURA, .p.id = AV_CODEC_ID_AURA,
.priv_data_size = sizeof(CyuvDecodeContext), .priv_data_size = sizeof(CyuvDecodeContext),
.init = cyuv_decode_init, .init = cyuv_decode_init,
.decode = cyuv_decode_frame, FF_CODEC_DECODE_CB(cyuv_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1, .p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
}; };
@ -198,7 +198,7 @@ const FFCodec ff_cyuv_decoder = {
.p.id = AV_CODEC_ID_CYUV, .p.id = AV_CODEC_ID_CYUV,
.priv_data_size = sizeof(CyuvDecodeContext), .priv_data_size = sizeof(CyuvDecodeContext),
.init = cyuv_decode_init, .init = cyuv_decode_init,
.decode = cyuv_decode_frame, FF_CODEC_DECODE_CB(cyuv_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1, .p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
}; };

@ -417,7 +417,7 @@ const FFCodec ff_dca_decoder = {
.p.id = AV_CODEC_ID_DTS, .p.id = AV_CODEC_ID_DTS,
.priv_data_size = sizeof(DCAContext), .priv_data_size = sizeof(DCAContext),
.init = dcadec_init, .init = dcadec_init,
.decode = dcadec_decode_frame, FF_CODEC_DECODE_CB(dcadec_decode_frame),
.close = dcadec_close, .close = dcadec_close,
.flush = dcadec_flush, .flush = dcadec_flush,
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF, .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF,

@ -1249,7 +1249,7 @@ const FFCodec ff_dca_encoder = {
.priv_data_size = sizeof(DCAEncContext), .priv_data_size = sizeof(DCAEncContext),
.init = encode_init, .init = encode_init,
.close = encode_close, .close = encode_close,
.encode2 = encode_frame, FF_CODEC_ENCODE_CB(encode_frame),
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
.p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S32, .p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S32,
AV_SAMPLE_FMT_NONE }, AV_SAMPLE_FMT_NONE },

@ -753,7 +753,7 @@ const FFCodec ff_dds_decoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("DirectDraw Surface image decoder"), .p.long_name = NULL_IF_CONFIG_SMALL("DirectDraw Surface image decoder"),
.p.type = AVMEDIA_TYPE_VIDEO, .p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_DDS, .p.id = AV_CODEC_ID_DDS,
.decode = dds_decode, FF_CODEC_DECODE_CB(dds_decode),
.priv_data_size = sizeof(DDSContext), .priv_data_size = sizeof(DDSContext),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_SLICE_THREADS, .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_SLICE_THREADS,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE

@ -322,7 +322,7 @@ static inline int decode_simple_internal(AVCodecContext *avctx, AVFrame *frame,
if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME) { if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME) {
ret = ff_thread_decode_frame(avctx, frame, &got_frame, pkt); ret = ff_thread_decode_frame(avctx, frame, &got_frame, pkt);
} else { } else {
ret = codec->decode(avctx, frame, &got_frame, pkt); ret = codec->cb.decode(avctx, frame, &got_frame, pkt);
if (!(codec->caps_internal & FF_CODEC_CAP_SETS_PKT_DTS)) if (!(codec->caps_internal & FF_CODEC_CAP_SETS_PKT_DTS))
frame->pkt_dts = pkt->dts; frame->pkt_dts = pkt->dts;
@ -546,8 +546,8 @@ static int decode_receive_frame_internal(AVCodecContext *avctx, AVFrame *frame)
av_assert0(!frame->buf[0]); av_assert0(!frame->buf[0]);
if (codec->receive_frame) { if (codec->cb_type == FF_CODEC_CB_TYPE_RECEIVE_FRAME) {
ret = codec->receive_frame(avctx, frame); ret = codec->cb.receive_frame(avctx, frame);
if (ret != AVERROR(EAGAIN)) if (ret != AVERROR(EAGAIN))
av_packet_unref(avci->last_pkt_props); av_packet_unref(avci->last_pkt_props);
} else } else
@ -862,7 +862,7 @@ int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub,
if (avctx->pkt_timebase.num && avpkt->pts != AV_NOPTS_VALUE) if (avctx->pkt_timebase.num && avpkt->pts != AV_NOPTS_VALUE)
sub->pts = av_rescale_q(avpkt->pts, sub->pts = av_rescale_q(avpkt->pts,
avctx->pkt_timebase, AV_TIME_BASE_Q); avctx->pkt_timebase, AV_TIME_BASE_Q);
ret = ffcodec(avctx->codec)->decode_sub(avctx, sub, got_sub_ptr, pkt); ret = ffcodec(avctx->codec)->cb.decode_sub(avctx, sub, got_sub_ptr, pkt);
if (pkt == avci->buffer_pkt) // did we recode? if (pkt == avci->buffer_pkt) // did we recode?
av_packet_unref(avci->buffer_pkt); av_packet_unref(avci->buffer_pkt);
if (ret < 0) { if (ret < 0) {

@ -429,7 +429,7 @@ const FFCodec ff_dfa_decoder = {
.priv_data_size = sizeof(DfaContext), .priv_data_size = sizeof(DfaContext),
.init = dfa_decode_init, .init = dfa_decode_init,
.close = dfa_decode_end, .close = dfa_decode_end,
.decode = dfa_decode_frame, FF_CODEC_DECODE_CB(dfa_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1, .p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
}; };

@ -131,7 +131,7 @@ const FFCodec ff_dfpwm_decoder = {
.p.id = AV_CODEC_ID_DFPWM, .p.id = AV_CODEC_ID_DFPWM,
.priv_data_size = sizeof(DFPWMState), .priv_data_size = sizeof(DFPWMState),
.init = dfpwm_dec_init, .init = dfpwm_dec_init,
.decode = dfpwm_dec_frame, FF_CODEC_DECODE_CB(dfpwm_dec_frame),
.p.capabilities = AV_CODEC_CAP_DR1, .p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
}; };

@ -114,7 +114,7 @@ const FFCodec ff_dfpwm_encoder = {
.p.id = AV_CODEC_ID_DFPWM, .p.id = AV_CODEC_ID_DFPWM,
.priv_data_size = sizeof(DFPWMState), .priv_data_size = sizeof(DFPWMState),
.init = dfpwm_enc_init, .init = dfpwm_enc_init,
.encode2 = dfpwm_enc_frame, FF_CODEC_ENCODE_CB(dfpwm_enc_frame),
.p.sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_NONE}, .p.sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_NONE},
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_VARIABLE_FRAME_SIZE, .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_VARIABLE_FRAME_SIZE,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,

@ -2364,7 +2364,7 @@ const FFCodec ff_dirac_decoder = {
.priv_data_size = sizeof(DiracContext), .priv_data_size = sizeof(DiracContext),
.init = dirac_decode_init, .init = dirac_decode_init,
.close = dirac_decode_end, .close = dirac_decode_end,
.decode = dirac_decode_frame, FF_CODEC_DECODE_CB(dirac_decode_frame),
.p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_SLICE_THREADS | AV_CODEC_CAP_DR1, .p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_SLICE_THREADS | AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
.flush = dirac_decode_flush, .flush = dirac_decode_flush,

@ -732,7 +732,7 @@ const FFCodec ff_dnxhd_decoder = {
.priv_data_size = sizeof(DNXHDContext), .priv_data_size = sizeof(DNXHDContext),
.init = dnxhd_decode_init, .init = dnxhd_decode_init,
.close = dnxhd_decode_close, .close = dnxhd_decode_close,
.decode = dnxhd_decode_frame, FF_CODEC_DECODE_CB(dnxhd_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS | .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS |
AV_CODEC_CAP_SLICE_THREADS, AV_CODEC_CAP_SLICE_THREADS,
.p.profiles = NULL_IF_CONFIG_SMALL(ff_dnxhd_profiles), .p.profiles = NULL_IF_CONFIG_SMALL(ff_dnxhd_profiles),

@ -1361,7 +1361,7 @@ const FFCodec ff_dnxhd_encoder = {
AV_CODEC_CAP_SLICE_THREADS, AV_CODEC_CAP_SLICE_THREADS,
.priv_data_size = sizeof(DNXHDEncContext), .priv_data_size = sizeof(DNXHDEncContext),
.init = dnxhd_encode_init, .init = dnxhd_encode_init,
.encode2 = dnxhd_encode_picture, FF_CODEC_ENCODE_CB(dnxhd_encode_picture),
.close = dnxhd_encode_end, .close = dnxhd_encode_end,
.p.pix_fmts = (const enum AVPixelFormat[]) { .p.pix_fmts = (const enum AVPixelFormat[]) {
AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV422P,

@ -1305,7 +1305,7 @@ const FFCodec ff_dolby_e_decoder = {
.priv_data_size = sizeof(DBEDecodeContext), .priv_data_size = sizeof(DBEDecodeContext),
.p.priv_class = &dolby_e_decoder_class, .p.priv_class = &dolby_e_decoder_class,
.init = dolby_e_init, .init = dolby_e_init,
.decode = dolby_e_decode_frame, FF_CODEC_DECODE_CB(dolby_e_decode_frame),
.close = dolby_e_close, .close = dolby_e_close,
.flush = dolby_e_flush, .flush = dolby_e_flush,
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF, .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF,

@ -418,7 +418,7 @@ const FFCodec ff_ ## name_ ## _decoder = { \
.p.capabilities = AV_CODEC_CAP_DR1, \ .p.capabilities = AV_CODEC_CAP_DR1, \
.priv_data_size = sizeof(DPCMContext), \ .priv_data_size = sizeof(DPCMContext), \
.init = dpcm_decode_init, \ .init = dpcm_decode_init, \
.decode = dpcm_decode_frame, \ FF_CODEC_DECODE_CB(dpcm_decode_frame), \
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, \ .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, \
} }

@ -765,6 +765,6 @@ const FFCodec ff_dpx_decoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("DPX (Digital Picture Exchange) image"), .p.long_name = NULL_IF_CONFIG_SMALL("DPX (Digital Picture Exchange) image"),
.p.type = AVMEDIA_TYPE_VIDEO, .p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_DPX, .p.id = AV_CODEC_ID_DPX,
.decode = decode_frame, FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1, .p.capabilities = AV_CODEC_CAP_DR1,
}; };

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save