pthread_frame: merge the functionality for normal decoder init and init_thread_copy

The current design, where
- proper init is called for the first per-thread context
- first thread's private data is copied into private data for all the
  other threads
- a "fixup" function is called for all the other threads to e.g.
  allocate dynamically allocated data
is very fragile and hard to follow, so it is abandoned. Instead, the
same init function is used to init each per-thread context. Where
necessary, AVCodecInternal.is_copy can be used to differentiate between
the first thread and the other ones (e.g. for decoding the extradata
just once).
pull/336/head
Anton Khirnov 8 years ago
parent 665e5b0fba
commit 1f4cf92cfb
  1. 3
      doc/multithreading.txt
  2. 1
      libavcodec/aic.c
  3. 10
      libavcodec/alac.c
  4. 6
      libavcodec/avcodec.h
  5. 6
      libavcodec/cfhd.c
  6. 14
      libavcodec/cllc.c
  7. 16
      libavcodec/dnxhddec.c
  8. 15
      libavcodec/exr.c
  9. 29
      libavcodec/ffv1dec.c
  10. 14
      libavcodec/flacdec.c
  11. 38
      libavcodec/h264dec.c
  12. 29
      libavcodec/hevcdec.c
  13. 3
      libavcodec/hqx.c
  14. 32
      libavcodec/huffyuvdec.c
  15. 11
      libavcodec/lagarith.c
  16. 9
      libavcodec/lcldec.c
  17. 16
      libavcodec/magicyuv.c
  18. 12
      libavcodec/mdec.c
  19. 22
      libavcodec/mimic.c
  20. 10
      libavcodec/mpeg4videodec.c
  21. 16
      libavcodec/pixlet.c
  22. 6
      libavcodec/pngdec.c
  23. 12
      libavcodec/proresdec2.c
  24. 31
      libavcodec/pthread_frame.c
  25. 1
      libavcodec/rv30.c
  26. 28
      libavcodec/rv34.c
  27. 1
      libavcodec/rv34.h
  28. 1
      libavcodec/rv40.c
  29. 14
      libavcodec/sheervideo.c
  30. 8
      libavcodec/takdec.c
  31. 1
      libavcodec/tiff.c
  32. 8
      libavcodec/tta.c
  33. 1
      libavcodec/vble.c
  34. 45
      libavcodec/vp3.c
  35. 16
      libavcodec/vp8.c
  36. 6
      libavcodec/vp9.c
  37. 15
      libavcodec/wavpack.c
  38. 19
      libavcodec/ylc.c

@ -51,9 +51,6 @@ the decode process starts. Call ff_thread_finish_setup() afterwards. If
some code can't be moved, have update_thread_context() run it in the next
thread.
If the codec allocates writable tables in its init(), add an init_thread_copy()
which re-allocates them for other threads.
Add AV_CODEC_CAP_FRAME_THREADS to the codec capabilities. There will be very little
speed gain at this point but it should work.

@ -504,6 +504,5 @@ AVCodec ff_aic_decoder = {
.close = aic_decode_close,
.decode = aic_decode_frame,
.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
.init_thread_copy = ONLY_IF_THREADS_ENABLED(aic_decode_init),
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};

@ -601,15 +601,6 @@ static av_cold int alac_decode_init(AVCodecContext * avctx)
return 0;
}
#if HAVE_THREADS
static int init_thread_copy(AVCodecContext *avctx)
{
ALACContext *alac = avctx->priv_data;
alac->avctx = avctx;
return allocate_buffers(alac);
}
#endif
static const AVOption options[] = {
{ "extra_bits_bug", "Force non-standard decoding process",
offsetof(ALACContext, extra_bit_bug), AV_OPT_TYPE_BOOL, { .i64 = 0 },
@ -633,7 +624,6 @@ AVCodec ff_alac_decoder = {
.init = alac_decode_init,
.close = alac_decode_close,
.decode = alac_decode_frame,
.init_thread_copy = ONLY_IF_THREADS_ENABLED(init_thread_copy),
.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
.priv_class = &alac_class
};

@ -2634,12 +2634,6 @@ typedef struct AVCodec {
* @name Frame-level threading support functions
* @{
*/
/**
* If defined, called on thread contexts when they are created.
* If the codec allocates writable tables in init(), re-allocate them here.
* priv_data will be set to a copy of the original.
*/
int (*init_thread_copy)(AVCodecContext *);
/**
* Copy necessary context variables from a previous thread context to the current one.
* If not defined, the next thread will start automatically; otherwise, the codec

@ -1039,10 +1039,8 @@ static av_cold int cfhd_close(AVCodecContext *avctx)
free_buffers(s);
if (!avctx->internal->is_copy) {
ff_free_vlc(&s->vlc_9);
ff_free_vlc(&s->vlc_18);
}
ff_free_vlc(&s->vlc_9);
ff_free_vlc(&s->vlc_18);
return 0;
}

@ -483,19 +483,6 @@ static int cllc_decode_frame(AVCodecContext *avctx, void *data,
return avpkt->size;
}
#if HAVE_THREADS
static int cllc_init_thread_copy(AVCodecContext *avctx)
{
CLLCContext *ctx = avctx->priv_data;
ctx->avctx = avctx;
ctx->swapped_buf = NULL;
ctx->swapped_buf_size = 0;
return 0;
}
#endif
static av_cold int cllc_decode_close(AVCodecContext *avctx)
{
CLLCContext *ctx = avctx->priv_data;
@ -526,7 +513,6 @@ AVCodec ff_cllc_decoder = {
.id = AV_CODEC_ID_CLLC,
.priv_data_size = sizeof(CLLCContext),
.init = cllc_decode_init,
.init_thread_copy = ONLY_IF_THREADS_ENABLED(cllc_init_thread_copy),
.decode = cllc_decode_frame,
.close = cllc_decode_close,
.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,

@ -144,21 +144,6 @@ static int dnxhd_init_vlc(DNXHDContext *ctx, uint32_t cid, int bitdepth)
return 0;
}
static av_cold int dnxhd_decode_init_thread_copy(AVCodecContext *avctx)
{
DNXHDContext *ctx = avctx->priv_data;
ctx->avctx = avctx;
// make sure VLC tables will be loaded when cid is parsed
ctx->cid = -1;
ctx->rows = av_mallocz_array(avctx->thread_count, sizeof(RowContext));
if (!ctx->rows)
return AVERROR(ENOMEM);
return 0;
}
static int dnxhd_get_profile(int cid)
{
switch(cid) {
@ -740,6 +725,5 @@ AVCodec ff_dnxhd_decoder = {
.decode = dnxhd_decode_frame,
.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS |
AV_CODEC_CAP_SLICE_THREADS,
.init_thread_copy = ONLY_IF_THREADS_ENABLED(dnxhd_decode_init_thread_copy),
.profiles = NULL_IF_CONFIG_SMALL(ff_dnxhd_profiles),
};

@ -1863,20 +1863,6 @@ static av_cold int decode_init(AVCodecContext *avctx)
return 0;
}
#if HAVE_THREADS
static int decode_init_thread_copy(AVCodecContext *avctx)
{
EXRContext *s = avctx->priv_data;
// allocate thread data, used for non EXR_RAW compression types
s->thread_data = av_mallocz_array(avctx->thread_count, sizeof(EXRThreadData));
if (!s->thread_data)
return AVERROR_INVALIDDATA;
return 0;
}
#endif
static av_cold int decode_end(AVCodecContext *avctx)
{
EXRContext *s = avctx->priv_data;
@ -1956,7 +1942,6 @@ AVCodec ff_exr_decoder = {
.id = AV_CODEC_ID_EXR,
.priv_data_size = sizeof(EXRContext),
.init = decode_init,
.init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
.close = decode_end,
.decode = decode_frame,
.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS |

@ -977,34 +977,6 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac
return buf_size;
}
#if HAVE_THREADS
static int init_thread_copy(AVCodecContext *avctx)
{
FFV1Context *f = avctx->priv_data;
int i, ret;
f->picture.f = NULL;
f->last_picture.f = NULL;
f->sample_buffer = NULL;
f->max_slice_count = 0;
f->slice_count = 0;
for (i = 0; i < f->quant_table_count; i++) {
av_assert0(f->version > 1);
f->initial_states[i] = av_memdup(f->initial_states[i],
f->context_count[i] * sizeof(*f->initial_states[i]));
}
f->picture.f = av_frame_alloc();
f->last_picture.f = av_frame_alloc();
if ((ret = ff_ffv1_init_slice_contexts(f)) < 0)
return ret;
return 0;
}
#endif
static void copy_fields(FFV1Context *fsdst, FFV1Context *fssrc, FFV1Context *fsrc)
{
fsdst->version = fsrc->version;
@ -1088,7 +1060,6 @@ AVCodec ff_ffv1_decoder = {
.init = decode_init,
.close = ff_ffv1_close,
.decode = decode_frame,
.init_thread_copy = ONLY_IF_THREADS_ENABLED(init_thread_copy),
.update_thread_context = ONLY_IF_THREADS_ENABLED(update_thread_context),
.capabilities = AV_CODEC_CAP_DR1 /*| AV_CODEC_CAP_DRAW_HORIZ_BAND*/ |
AV_CODEC_CAP_FRAME_THREADS | AV_CODEC_CAP_SLICE_THREADS,

@ -639,19 +639,6 @@ static int flac_decode_frame(AVCodecContext *avctx, void *data,
return bytes_read;
}
#if HAVE_THREADS
static int init_thread_copy(AVCodecContext *avctx)
{
FLACContext *s = avctx->priv_data;
s->decoded_buffer = NULL;
s->decoded_buffer_size = 0;
s->avctx = avctx;
if (s->flac_stream_info.max_blocksize)
return allocate_buffers(s);
return 0;
}
#endif
static av_cold int flac_decode_close(AVCodecContext *avctx)
{
FLACContext *s = avctx->priv_data;
@ -682,7 +669,6 @@ AVCodec ff_flac_decoder = {
.init = flac_decode_init,
.close = flac_decode_close,
.decode = flac_decode_frame,
.init_thread_copy = ONLY_IF_THREADS_ENABLED(init_thread_copy),
.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_S16P,

@ -409,13 +409,15 @@ static av_cold int h264_decode_init(AVCodecContext *avctx)
}
avctx->ticks_per_frame = 2;
if (avctx->extradata_size > 0 && avctx->extradata) {
ret = ff_h264_decode_extradata(avctx->extradata, avctx->extradata_size,
&h->ps, &h->is_avc, &h->nal_length_size,
avctx->err_recognition, avctx);
if (ret < 0) {
h264_decode_end(avctx);
return ret;
if (!avctx->internal->is_copy) {
if (avctx->extradata_size > 0 && avctx->extradata) {
ret = ff_h264_decode_extradata(avctx->extradata, avctx->extradata_size,
&h->ps, &h->is_avc, &h->nal_length_size,
avctx->err_recognition, avctx);
if (ret < 0) {
h264_decode_end(avctx);
return ret;
}
}
}
@ -438,27 +440,6 @@ static av_cold int h264_decode_init(AVCodecContext *avctx)
return 0;
}
#if HAVE_THREADS
static int decode_init_thread_copy(AVCodecContext *avctx)
{
H264Context *h = avctx->priv_data;
int ret;
if (!avctx->internal->is_copy)
return 0;
memset(h, 0, sizeof(*h));
ret = h264_init_context(avctx, h);
if (ret < 0)
return ret;
h->context_initialized = 0;
return 0;
}
#endif
/**
* instantaneous decoder refresh.
*/
@ -1081,7 +1062,6 @@ AVCodec ff_h264_decoder = {
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_EXPORTS_CROPPING |
FF_CODEC_CAP_ALLOCATE_PROGRESS,
.flush = flush_dpb,
.init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
.update_thread_context = ONLY_IF_THREADS_ENABLED(ff_h264_update_thread_context),
.profiles = NULL_IF_CONFIG_SMALL(ff_h264_profiles),
.priv_class = &h264_class,

@ -3506,11 +3506,13 @@ static av_cold int hevc_decode_init(AVCodecContext *avctx)
else
s->threads_number = 1;
if (avctx->extradata_size > 0 && avctx->extradata) {
ret = hevc_decode_extradata(s, avctx->extradata, avctx->extradata_size, 1);
if (ret < 0) {
hevc_decode_free(avctx);
return ret;
if (!avctx->internal->is_copy) {
if (avctx->extradata_size > 0 && avctx->extradata) {
ret = hevc_decode_extradata(s, avctx->extradata, avctx->extradata_size, 1);
if (ret < 0) {
hevc_decode_free(avctx);
return ret;
}
}
}
@ -3522,22 +3524,6 @@ static av_cold int hevc_decode_init(AVCodecContext *avctx)
return 0;
}
#if HAVE_THREADS
static av_cold int hevc_init_thread_copy(AVCodecContext *avctx)
{
HEVCContext *s = avctx->priv_data;
int ret;
memset(s, 0, sizeof(*s));
ret = hevc_init_context(avctx);
if (ret < 0)
return ret;
return 0;
}
#endif
static void hevc_decode_flush(AVCodecContext *avctx)
{
HEVCContext *s = avctx->priv_data;
@ -3577,7 +3563,6 @@ AVCodec ff_hevc_decoder = {
.decode = hevc_decode_frame,
.flush = hevc_decode_flush,
.update_thread_context = ONLY_IF_THREADS_ENABLED(hevc_update_thread_context),
.init_thread_copy = ONLY_IF_THREADS_ENABLED(hevc_init_thread_copy),
.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY |
AV_CODEC_CAP_SLICE_THREADS | AV_CODEC_CAP_FRAME_THREADS,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_EXPORTS_CROPPING |

@ -520,9 +520,6 @@ static av_cold int hqx_decode_close(AVCodecContext *avctx)
int i;
HQXContext *ctx = avctx->priv_data;
if (avctx->internal->is_copy)
return 0;
ff_free_vlc(&ctx->cbp_vlc);
for (i = 0; i < 3; i++) {
ff_free_vlc(&ctx->dc_vlc[i]);

@ -570,35 +570,6 @@ static av_cold int decode_init(AVCodecContext *avctx)
return ret;
}
#if HAVE_THREADS
static av_cold int decode_init_thread_copy(AVCodecContext *avctx)
{
HYuvContext *s = avctx->priv_data;
int i, ret;
s->avctx = avctx;
if ((ret = ff_huffyuv_alloc_temp(s)) < 0) {
ff_huffyuv_common_end(s);
return ret;
}
for (i = 0; i < 8; i++)
s->vlc[i].table = NULL;
if (s->version >= 2) {
if ((ret = read_huffman_tables(s, avctx->extradata + 4,
avctx->extradata_size)) < 0)
return ret;
} else {
if ((ret = read_old_huffman_tables(s)) < 0)
return ret;
}
return 0;
}
#endif
/** Subset of GET_VLC for use in hand-roller VLC code */
#define VLC_INTERN(dst, table, gb, name, bits, max_depth) \
code = table[index][0]; \
@ -1302,7 +1273,6 @@ AVCodec ff_huffyuv_decoder = {
.decode = decode_frame,
.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DRAW_HORIZ_BAND |
AV_CODEC_CAP_FRAME_THREADS,
.init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
};
#if CONFIG_FFVHUFF_DECODER
@ -1317,7 +1287,6 @@ AVCodec ff_ffvhuff_decoder = {
.decode = decode_frame,
.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DRAW_HORIZ_BAND |
AV_CODEC_CAP_FRAME_THREADS,
.init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
};
#endif /* CONFIG_FFVHUFF_DECODER */
@ -1333,6 +1302,5 @@ AVCodec ff_hymt_decoder = {
.decode = decode_frame,
.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DRAW_HORIZ_BAND |
AV_CODEC_CAP_FRAME_THREADS,
.init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
};
#endif /* CONFIG_HYMT_DECODER */

@ -712,16 +712,6 @@ static av_cold int lag_decode_init(AVCodecContext *avctx)
return 0;
}
#if HAVE_THREADS
static av_cold int lag_decode_init_thread_copy(AVCodecContext *avctx)
{
LagarithContext *l = avctx->priv_data;
l->avctx = avctx;
return 0;
}
#endif
AVCodec ff_lagarith_decoder = {
.name = "lagarith",
.long_name = NULL_IF_CONFIG_SMALL("Lagarith lossless"),
@ -729,7 +719,6 @@ AVCodec ff_lagarith_decoder = {
.id = AV_CODEC_ID_LAGARITH,
.priv_data_size = sizeof(LagarithContext),
.init = lag_decode_init,
.init_thread_copy = ONLY_IF_THREADS_ENABLED(lag_decode_init_thread_copy),
.decode = lag_decode_frame,
.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
};

@ -622,13 +622,6 @@ static av_cold int decode_init(AVCodecContext *avctx)
return 0;
}
#if HAVE_THREADS
static int init_thread_copy(AVCodecContext *avctx)
{
return decode_init(avctx);
}
#endif
static av_cold int decode_end(AVCodecContext *avctx)
{
LclDecContext * const c = avctx->priv_data;
@ -650,7 +643,6 @@ AVCodec ff_mszh_decoder = {
.id = AV_CODEC_ID_MSZH,
.priv_data_size = sizeof(LclDecContext),
.init = decode_init,
.init_thread_copy = ONLY_IF_THREADS_ENABLED(init_thread_copy),
.close = decode_end,
.decode = decode_frame,
.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
@ -666,7 +658,6 @@ AVCodec ff_zlib_decoder = {
.id = AV_CODEC_ID_ZLIB,
.priv_data_size = sizeof(LclDecContext),
.init = decode_init,
.init_thread_copy = ONLY_IF_THREADS_ENABLED(init_thread_copy),
.close = decode_end,
.decode = decode_frame,
.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,

@ -749,21 +749,6 @@ static int magy_decode_frame(AVCodecContext *avctx, void *data,
return avpkt->size;
}
#if HAVE_THREADS
static int magy_init_thread_copy(AVCodecContext *avctx)
{
MagicYUVContext *s = avctx->priv_data;
int i;
for (i = 0; i < FF_ARRAY_ELEMS(s->slices); i++) {
s->slices[i] = NULL;
s->slices_size[i] = 0;
}
return 0;
}
#endif
static av_cold int magy_decode_init(AVCodecContext *avctx)
{
MagicYUVContext *s = avctx->priv_data;
@ -792,7 +777,6 @@ AVCodec ff_magicyuv_decoder = {
.id = AV_CODEC_ID_MAGICYUV,
.priv_data_size = sizeof(MagicYUVContext),
.init = magy_decode_init,
.init_thread_copy = ONLY_IF_THREADS_ENABLED(magy_init_thread_copy),
.close = magy_decode_end,
.decode = magy_decode_frame,
.capabilities = AV_CODEC_CAP_DR1 |

@ -240,17 +240,6 @@ static av_cold int decode_init(AVCodecContext *avctx)
return 0;
}
#if HAVE_THREADS
static av_cold int decode_init_thread_copy(AVCodecContext *avctx)
{
MDECContext * const a = avctx->priv_data;
a->avctx = avctx;
return 0;
}
#endif
static av_cold int decode_end(AVCodecContext *avctx)
{
MDECContext * const a = avctx->priv_data;
@ -271,5 +260,4 @@ AVCodec ff_mdec_decoder = {
.close = decode_end,
.decode = decode_frame,
.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
.init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy)
};

@ -128,8 +128,7 @@ static av_cold int mimic_decode_end(AVCodecContext *avctx)
av_frame_free(&ctx->frames[i].f);
}
if (!avctx->internal->is_copy)
ff_free_vlc(&ctx->vlc);
ff_free_vlc(&ctx->vlc);
return 0;
}
@ -449,24 +448,6 @@ static int mimic_decode_frame(AVCodecContext *avctx, void *data,
return buf_size;
}
#if HAVE_THREADS
static av_cold int mimic_init_thread_copy(AVCodecContext *avctx)
{
MimicContext *ctx = avctx->priv_data;
int i;
for (i = 0; i < FF_ARRAY_ELEMS(ctx->frames); i++) {
ctx->frames[i].f = av_frame_alloc();
if (!ctx->frames[i].f) {
mimic_decode_end(avctx);
return AVERROR(ENOMEM);
}
}
return 0;
}
#endif
AVCodec ff_mimic_decoder = {
.name = "mimic",
.long_name = NULL_IF_CONFIG_SMALL("Mimic"),
@ -478,6 +459,5 @@ AVCodec ff_mimic_decoder = {
.decode = mimic_decode_frame,
.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
.update_thread_context = ONLY_IF_THREADS_ENABLED(mimic_decode_update_thread_context),
.init_thread_copy = ONLY_IF_THREADS_ENABLED(mimic_init_thread_copy),
.caps_internal = FF_CODEC_CAP_ALLOCATE_PROGRESS,
};

@ -3559,13 +3559,11 @@ static av_cold int decode_end(AVCodecContext *avctx)
Mpeg4DecContext *ctx = avctx->priv_data;
int i;
if (!avctx->internal->is_copy) {
for (i = 0; i < 12; i++)
ff_free_vlc(&ctx->studio_intra_tab[i]);
for (i = 0; i < 12; i++)
ff_free_vlc(&ctx->studio_intra_tab[i]);
ff_free_vlc(&ctx->studio_luma_dc);
ff_free_vlc(&ctx->studio_chroma_dc);
}
ff_free_vlc(&ctx->studio_luma_dc);
ff_free_vlc(&ctx->studio_chroma_dc);
return ff_h263_decode_end(avctx);
}

@ -675,28 +675,12 @@ static int pixlet_decode_frame(AVCodecContext *avctx, void *data,
return pktsize;
}
#if HAVE_THREADS
static int pixlet_init_thread_copy(AVCodecContext *avctx)
{
PixletContext *ctx = avctx->priv_data;
ctx->filter[0] = NULL;
ctx->filter[1] = NULL;
ctx->prediction = NULL;
ctx->w = 0;
ctx->h = 0;
return 0;
}
#endif /* HAVE_THREADS */
AVCodec ff_pixlet_decoder = {
.name = "pixlet",
.long_name = NULL_IF_CONFIG_SMALL("Apple Pixlet"),
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_PIXLET,
.init = pixlet_init,
.init_thread_copy = ONLY_IF_THREADS_ENABLED(pixlet_init_thread_copy),
.close = pixlet_close,
.decode = pixlet_decode_frame,
.priv_data_size = sizeof(PixletContext),

@ -1771,9 +1771,7 @@ static av_cold int png_dec_init(AVCodecContext *avctx)
return AVERROR(ENOMEM);
}
if (!avctx->internal->is_copy) {
ff_pngdsp_init(&s->dsp);
}
ff_pngdsp_init(&s->dsp);
return 0;
}
@ -1808,7 +1806,6 @@ AVCodec ff_apng_decoder = {
.init = png_dec_init,
.close = png_dec_end,
.decode = decode_frame_apng,
.init_thread_copy = ONLY_IF_THREADS_ENABLED(png_dec_init),
.update_thread_context = ONLY_IF_THREADS_ENABLED(update_thread_context),
.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS /*| AV_CODEC_CAP_DRAW_HORIZ_BAND*/,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
@ -1826,7 +1823,6 @@ AVCodec ff_png_decoder = {
.init = png_dec_init,
.close = png_dec_end,
.decode = decode_frame_png,
.init_thread_copy = ONLY_IF_THREADS_ENABLED(png_dec_init),
.update_thread_context = ONLY_IF_THREADS_ENABLED(update_thread_context),
.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS /*| AV_CODEC_CAP_DRAW_HORIZ_BAND*/,
.caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM | FF_CODEC_CAP_INIT_THREADSAFE |

@ -807,17 +807,6 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
return avpkt->size;
}
#if HAVE_THREADS
static int decode_init_thread_copy(AVCodecContext *avctx)
{
ProresContext *ctx = avctx->priv_data;
ctx->slices = NULL;
return 0;
}
#endif
static av_cold int decode_close(AVCodecContext *avctx)
{
ProresContext *ctx = avctx->priv_data;
@ -834,7 +823,6 @@ AVCodec ff_prores_decoder = {
.id = AV_CODEC_ID_PRORES,
.priv_data_size = sizeof(ProresContext),
.init = decode_init,
.init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
.close = decode_close,
.decode = decode_frame,
.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_SLICE_THREADS | AV_CODEC_CAP_FRAME_THREADS,

@ -703,7 +703,10 @@ void ff_frame_thread_free(AVCodecContext *avctx, int thread_count)
av_freep(&p->released_buffers);
if (i && p->avctx) {
if (codec->priv_class)
av_opt_free(p->avctx->priv_data);
av_freep(&p->avctx->priv_data);
av_freep(&p->avctx->slice_offset);
}
@ -809,28 +812,30 @@ int ff_frame_thread_init(AVCodecContext *avctx)
copy->internal->thread_ctx = p;
copy->internal->last_pkt_props = &p->avpkt;
if (!i) {
src = copy;
if (codec->init)
err = codec->init(copy);
update_context_from_thread(avctx, copy, 1);
} else {
copy->priv_data = av_malloc(codec->priv_data_size);
if (i) {
copy->priv_data = av_mallocz(codec->priv_data_size);
if (!copy->priv_data) {
err = AVERROR(ENOMEM);
goto error;
}
memcpy(copy->priv_data, src->priv_data, codec->priv_data_size);
copy->internal->is_copy = 1;
if (codec->init_thread_copy)
err = codec->init_thread_copy(copy);
if (codec->priv_class) {
*(const AVClass **)copy->priv_data = codec->priv_class;
err = av_opt_copy(copy->priv_data, src->priv_data);
if (err < 0)
goto error;
}
copy->internal->is_copy = 1;
}
if (codec->init)
err = codec->init(copy);
if (err) goto error;
if (!i)
update_context_from_thread(avctx, copy, 1);
atomic_init(&p->debug_threads, (copy->debug & FF_DEBUG_THREADS) != 0);
err = AVERROR(pthread_create(&p->thread, NULL, frame_worker_thread, p));

@ -304,7 +304,6 @@ AVCodec ff_rv30_decoder = {
AV_PIX_FMT_YUV420P,
AV_PIX_FMT_NONE
},
.init_thread_copy = ONLY_IF_THREADS_ENABLED(ff_rv34_decode_init_thread_copy),
.update_thread_context = ONLY_IF_THREADS_ENABLED(ff_rv34_decode_update_thread_context),
.caps_internal = FF_CODEC_CAP_ALLOCATE_PROGRESS,
};

@ -1529,34 +1529,6 @@ av_cold int ff_rv34_decode_init(AVCodecContext *avctx)
return 0;
}
int ff_rv34_decode_init_thread_copy(AVCodecContext *avctx)
{
int err;
RV34DecContext *r = avctx->priv_data;
r->s.avctx = avctx;
if (avctx->internal->is_copy) {
r->tmp_b_block_base = NULL;
r->cbp_chroma = NULL;
r->cbp_luma = NULL;
r->deblock_coefs = NULL;
r->intra_types_hist = NULL;
r->mb_type = NULL;
ff_mpv_idct_init(&r->s);
if ((err = ff_mpv_common_init(&r->s)) < 0)
return err;
if ((err = rv34_decoder_alloc(r)) < 0) {
ff_mpv_common_end(&r->s);
return err;
}
}
return 0;
}
int ff_rv34_decode_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
{
RV34DecContext *r = dst->priv_data, *r1 = src->priv_data;

@ -136,7 +136,6 @@ int ff_rv34_get_start_offset(GetBitContext *gb, int blocks);
int ff_rv34_decode_init(AVCodecContext *avctx);
int ff_rv34_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt);
int ff_rv34_decode_end(AVCodecContext *avctx);
int ff_rv34_decode_init_thread_copy(AVCodecContext *avctx);
int ff_rv34_decode_update_thread_context(AVCodecContext *dst, const AVCodecContext *src);
#endif /* AVCODEC_RV34_H */

@ -583,7 +583,6 @@ AVCodec ff_rv40_decoder = {
AV_PIX_FMT_YUV420P,
AV_PIX_FMT_NONE
},
.init_thread_copy = ONLY_IF_THREADS_ENABLED(ff_rv34_decode_init_thread_copy),
.update_thread_context = ONLY_IF_THREADS_ENABLED(ff_rv34_decode_update_thread_context),
.caps_internal = FF_CODEC_CAP_ALLOCATE_PROGRESS,
};

@ -2063,19 +2063,6 @@ static int decode_frame(AVCodecContext *avctx,
return avpkt->size;
}
#if HAVE_THREADS
static int decode_init_thread_copy(AVCodecContext *avctx)
{
SheerVideoContext *s = avctx->priv_data;
s->format = 0;
memset(&s->vlc[0], 0, sizeof(s->vlc[0]));
memset(&s->vlc[1], 0, sizeof(s->vlc[1]));
return 0;
}
#endif
static av_cold int decode_end(AVCodecContext *avctx)
{
SheerVideoContext *s = avctx->priv_data;
@ -2092,7 +2079,6 @@ AVCodec ff_sheervideo_decoder = {
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_SHEERVIDEO,
.priv_data_size = sizeof(SheerVideoContext),
.init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
.close = decode_end,
.decode = decode_frame,
.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,

@ -915,13 +915,6 @@ static int tak_decode_frame(AVCodecContext *avctx, void *data,
}
#if HAVE_THREADS
static int init_thread_copy(AVCodecContext *avctx)
{
TAKDecContext *s = avctx->priv_data;
s->avctx = avctx;
return 0;
}
static int update_thread_context(AVCodecContext *dst,
const AVCodecContext *src)
{
@ -953,7 +946,6 @@ AVCodec ff_tak_decoder = {
.init = tak_decode_init,
.close = tak_decode_close,
.decode = tak_decode_frame,
.init_thread_copy = ONLY_IF_THREADS_ENABLED(init_thread_copy),
.update_thread_context = ONLY_IF_THREADS_ENABLED(update_thread_context),
.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_U8P,

@ -2165,7 +2165,6 @@ AVCodec ff_tiff_decoder = {
.init = tiff_init,
.close = tiff_end,
.decode = decode_frame,
.init_thread_copy = ONLY_IF_THREADS_ENABLED(tiff_init),
.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
.priv_class = &tiff_decoder_class,

@ -389,13 +389,6 @@ error:
return ret;
}
static int init_thread_copy(AVCodecContext *avctx)
{
TTAContext *s = avctx->priv_data;
s->avctx = avctx;
return allocate_buffers(avctx);
}
static av_cold int tta_decode_close(AVCodecContext *avctx) {
TTAContext *s = avctx->priv_data;
@ -430,7 +423,6 @@ AVCodec ff_tta_decoder = {
.init = tta_decode_init,
.close = tta_decode_close,
.decode = tta_decode_frame,
.init_thread_copy = ONLY_IF_THREADS_ENABLED(init_thread_copy),
.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
.priv_class = &tta_decoder_class,
};

@ -214,6 +214,5 @@ AVCodec ff_vble_decoder = {
.close = vble_decode_close,
.decode = vble_decode_frame,
.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
.init_thread_copy = ONLY_IF_THREADS_ENABLED(vble_decode_init),
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};

@ -347,9 +347,6 @@ static av_cold int vp3_decode_end(AVCodecContext *avctx)
av_frame_free(&s->last_frame.f);
av_frame_free(&s->golden_frame.f);
if (avctx->internal->is_copy)
return 0;
for (i = 0; i < 16; i++) {
ff_free_vlc(&s->dc_vlc[i]);
ff_free_vlc(&s->ac_vlc_1[i]);
@ -2601,23 +2598,6 @@ static int vp3_update_thread_context(AVCodecContext *dst, const AVCodecContext *
}
if (s != s1) {
if (!s->current_frame.f)
return AVERROR(ENOMEM);
// init tables if the first frame hasn't been decoded
if (!s->current_frame.f->data[0]) {
int y_fragment_count, c_fragment_count;
s->avctx = dst;
err = allocate_tables(dst);
if (err)
return err;
y_fragment_count = s->fragment_width[0] * s->fragment_height[0];
c_fragment_count = s->fragment_width[1] * s->fragment_height[1];
memcpy(s->motion_val[0], s1->motion_val[0],
y_fragment_count * sizeof(*s->motion_val[0]));
memcpy(s->motion_val[1], s1->motion_val[1],
c_fragment_count * sizeof(*s->motion_val[1]));
}
// copy previous frame data
if ((err = ref_frames(s, s1)) < 0)
return err;
@ -2927,28 +2907,6 @@ static int read_huffman_tree(AVCodecContext *avctx, GetBitContext *gb)
return 0;
}
#if HAVE_THREADS
static int vp3_init_thread_copy(AVCodecContext *avctx)
{
Vp3DecodeContext *s = avctx->priv_data;
s->superblock_coding = NULL;
s->all_fragments = NULL;
s->coded_fragment_list[0] = NULL;
s-> kf_coded_fragment_list= NULL;
s->nkf_coded_fragment_list= NULL;
s->dct_tokens_base = NULL;
s->superblock_fragments = NULL;
s->macroblock_coding = NULL;
s->motion_val[0] = NULL;
s->motion_val[1] = NULL;
s->edge_emu_buffer = NULL;
s->dc_pred_row = NULL;
return init_frames(s);
}
#endif
#if CONFIG_THEORA_DECODER
static const enum AVPixelFormat theora_pix_fmts[4] = {
AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P
@ -3262,7 +3220,6 @@ AVCodec ff_theora_decoder = {
.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DRAW_HORIZ_BAND |
AV_CODEC_CAP_FRAME_THREADS,
.flush = vp3_decode_flush,
.init_thread_copy = ONLY_IF_THREADS_ENABLED(vp3_init_thread_copy),
.update_thread_context = ONLY_IF_THREADS_ENABLED(vp3_update_thread_context),
.caps_internal = FF_CODEC_CAP_EXPORTS_CROPPING | FF_CODEC_CAP_ALLOCATE_PROGRESS,
};
@ -3280,7 +3237,6 @@ AVCodec ff_vp3_decoder = {
.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DRAW_HORIZ_BAND |
AV_CODEC_CAP_FRAME_THREADS,
.flush = vp3_decode_flush,
.init_thread_copy = ONLY_IF_THREADS_ENABLED(vp3_init_thread_copy),
.update_thread_context = ONLY_IF_THREADS_ENABLED(vp3_update_thread_context),
.caps_internal = FF_CODEC_CAP_ALLOCATE_PROGRESS,
};
@ -3298,7 +3254,6 @@ AVCodec ff_vp4_decoder = {
.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DRAW_HORIZ_BAND |
AV_CODEC_CAP_FRAME_THREADS,
.flush = vp3_decode_flush,
.init_thread_copy = ONLY_IF_THREADS_ENABLED(vp3_init_thread_copy),
.update_thread_context = ONLY_IF_THREADS_ENABLED(vp3_update_thread_context),
.caps_internal = FF_CODEC_CAP_ALLOCATE_PROGRESS,
};

@ -2894,21 +2894,6 @@ av_cold int ff_vp8_decode_init(AVCodecContext *avctx)
#if CONFIG_VP8_DECODER
#if HAVE_THREADS
static av_cold int vp8_decode_init_thread_copy(AVCodecContext *avctx)
{
VP8Context *s = avctx->priv_data;
int ret;
s->avctx = avctx;
if ((ret = vp8_init_frames(s)) < 0) {
ff_vp8_decode_free(avctx);
return ret;
}
return 0;
}
#define REBASE(pic) ((pic) ? (pic) - &s_src->frames[0] + &s->frames[0] : NULL)
static int vp8_decode_update_thread_context(AVCodecContext *dst,
@ -2976,7 +2961,6 @@ AVCodec ff_vp8_decoder = {
.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS |
AV_CODEC_CAP_SLICE_THREADS,
.flush = vp8_decode_flush,
.init_thread_copy = ONLY_IF_THREADS_ENABLED(vp8_decode_init_thread_copy),
.update_thread_context = ONLY_IF_THREADS_ENABLED(vp8_decode_update_thread_context),
.hw_configs = (const AVCodecHWConfigInternal*[]) {
#if CONFIG_VP8_VAAPI_HWACCEL

@ -1748,11 +1748,6 @@ static av_cold int vp9_decode_init(AVCodecContext *avctx)
}
#if HAVE_THREADS
static av_cold int vp9_decode_init_thread_copy(AVCodecContext *avctx)
{
return init_frames(avctx);
}
static int vp9_decode_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
{
int i, ret;
@ -1812,7 +1807,6 @@ AVCodec ff_vp9_decoder = {
.caps_internal = FF_CODEC_CAP_SLICE_THREAD_HAS_MF |
FF_CODEC_CAP_ALLOCATE_PROGRESS,
.flush = vp9_decode_flush,
.init_thread_copy = ONLY_IF_THREADS_ENABLED(vp9_decode_init_thread_copy),
.update_thread_context = ONLY_IF_THREADS_ENABLED(vp9_decode_update_thread_context),
.profiles = NULL_IF_CONFIG_SMALL(ff_vp9_profiles),
.bsfs = "vp9_superframe_split",

@ -1009,20 +1009,6 @@ static int wv_dsd_reset(WavpackContext *s, int channels)
}
#if HAVE_THREADS
static int init_thread_copy(AVCodecContext *avctx)
{
WavpackContext *s = avctx->priv_data;
s->avctx = avctx;
s->curr_frame.f = av_frame_alloc();
s->prev_frame.f = av_frame_alloc();
if (!s->curr_frame.f || !s->prev_frame.f)
return AVERROR(ENOMEM);
return 0;
}
static int update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
{
WavpackContext *fsrc = src->priv_data;
@ -1714,7 +1700,6 @@ AVCodec ff_wavpack_decoder = {
.close = wavpack_decode_end,
.decode = wavpack_decode_frame,
.flush = wavpack_decode_flush,
.init_thread_copy = ONLY_IF_THREADS_ENABLED(init_thread_copy),
.update_thread_context = ONLY_IF_THREADS_ENABLED(update_thread_context),
.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS |
AV_CODEC_CAP_SLICE_THREADS,

@ -453,24 +453,6 @@ static int decode_frame(AVCodecContext *avctx,
return avpkt->size;
}
#if HAVE_THREADS
static int init_thread_copy(AVCodecContext *avctx)
{
YLCContext *s = avctx->priv_data;
memset(&s->vlc[0], 0, sizeof(VLC));
memset(&s->vlc[1], 0, sizeof(VLC));
memset(&s->vlc[2], 0, sizeof(VLC));
memset(&s->vlc[3], 0, sizeof(VLC));
s->table_bits = NULL;
s->table_bits_size = 0;
s->bitstream_bits = NULL;
s->bitstream_bits_size = 0;
return 0;
}
#endif
static av_cold int decode_end(AVCodecContext *avctx)
{
YLCContext *s = avctx->priv_data;
@ -494,7 +476,6 @@ AVCodec ff_ylc_decoder = {
.id = AV_CODEC_ID_YLC,
.priv_data_size = sizeof(YLCContext),
.init = decode_init,
.init_thread_copy = ONLY_IF_THREADS_ENABLED(init_thread_copy),
.close = decode_end,
.decode = decode_frame,
.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,

Loading…
Cancel
Save