avcodec/mpegpicture: Make MPVPicture refcounted

Up until now, an initialized MpegEncContext had an array of
MPVPictures (way more than were ever needed) and the MPVPicture*
contained in the MPVWorkPictures as well as the input_picture
and reordered_input_picture arrays (for the encoder) pointed
into this array. Several of the pointers could point to the
same slot and because there was no reference counting involved,
one had to check for aliasing before unreferencing.
Furthermore, given that these pointers were not ownership pointers
the pointers were often simply reset without unreferencing
the slot (happened e.g. for the RV30 and RV40 decoders) or
there were moved without resetting the src pointer (happened
for the encoders where the entries in the input_picture
and reordered_input_picture arrays were not reset).
Instead actually releasing these pictures was performed by looping
over the whole array and checking which one of the entries needed
to be kept. Given that the array had way too many slots (36),
this meant that more than 30 MPVPictures have been unnecessarily
unreferenced in every ff_mpv_frame_start(); something similar
happened for the encoder.

This commit changes this by making the MPVPictures refcounted
via the RefStruct API. The MPVPictures itself are part of a pool
so that this does not entail constant allocations; instead,
the amount of allocations actually goes down, because the
earlier code used such a large array of MPVPictures (36 entries) and
allocated an AVFrame for every one of these on every
ff_mpv_common_init(). In fact, the pool is only freed when closing
the codec, so that reinitializations don't lead to new allocations
(this avoids having to sync the pool in update_thread_context).

Making MPVPictures refcounted also has another key benefit:
It makes it possible to directly share them across threads
(when using frame-threaded decoding), eliminating ugly code
with underlying av_frame_ref()'s; sharing these pictures
can't fail any more.

The pool is allocated in ff_mpv_decode_init() for decoders,
which therefore can fail now. This and the fact that the pool
is not unreferenced in ff_mpv_common_end() also necessitated
to mark several mpegvideo-decoders with the FF_CODEC_CAP_INIT_CLEANUP
flag.

*: This also means that there is no good reason any more for
ff_mpv_common_frame_size_change() to exist.

Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@outlook.com>
release/7.1
Andreas Rheinhardt 1 year ago
parent 99d26939af
commit 9ce56f91c0
  1. 3
      libavcodec/flvdec.c
  2. 5
      libavcodec/h261dec.c
  3. 14
      libavcodec/h263dec.c
  4. 3
      libavcodec/intelh263dec.c
  5. 2
      libavcodec/mjpegenc.c
  6. 11
      libavcodec/mpeg12dec.c
  7. 3
      libavcodec/mpeg4videodec.c
  8. 157
      libavcodec/mpegpicture.c
  9. 14
      libavcodec/mpegpicture.h
  10. 21
      libavcodec/mpegvideo.c
  11. 2
      libavcodec/mpegvideo.h
  12. 72
      libavcodec/mpegvideo_dec.c
  13. 74
      libavcodec/mpegvideo_enc.c
  14. 4
      libavcodec/mpegvideodec.h
  15. 12
      libavcodec/msmpeg4dec.c
  16. 8
      libavcodec/rv10.c
  17. 6
      libavcodec/rv34.c
  18. 3
      libavcodec/svq1enc.c
  19. 8
      libavcodec/vc1dec.c

@ -121,6 +121,7 @@ const FFCodec ff_flv_decoder = {
FF_CODEC_DECODE_CB(ff_h263_decode_frame),
.close = ff_mpv_decode_close,
.p.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,
.p.max_lowres = 3,
};

@ -84,10 +84,13 @@ static av_cold int h261_decode_init(AVCodecContext *avctx)
static AVOnce init_static_once = AV_ONCE_INIT;
H261DecContext *const h = avctx->priv_data;
MpegEncContext *const s = &h->s;
int ret;
s->private_ctx = &h->common;
// set defaults
ff_mpv_decode_init(s, avctx);
ret = ff_mpv_decode_init(s, avctx);
if (ret < 0)
return ret;
s->out_format = FMT_H261;
s->low_delay = 1;

@ -95,7 +95,9 @@ av_cold int ff_h263_decode_init(AVCodecContext *avctx)
s->out_format = FMT_H263;
// set defaults
ff_mpv_decode_init(s, avctx);
ret = ff_mpv_decode_init(s, avctx);
if (ret < 0)
return ret;
s->quant_precision = 5;
s->decode_mb = ff_h263_decode_mb;
@ -427,7 +429,7 @@ int ff_h263_decode_frame(AVCodecContext *avctx, AVFrame *pict,
if (s->low_delay == 0 && s->next_pic.ptr) {
if ((ret = av_frame_ref(pict, s->next_pic.ptr->f)) < 0)
return ret;
s->next_pic.ptr = NULL;
ff_mpv_unref_picture(&s->next_pic);
*got_frame = 1;
} else if (s->skipped_last_frame && s->cur_pic.ptr) {
@ -439,7 +441,7 @@ int ff_h263_decode_frame(AVCodecContext *avctx, AVFrame *pict,
* returned picture would be reused */
if ((ret = ff_decode_frame_props(avctx, pict)) < 0)
return ret;
s->cur_pic.ptr = NULL;
ff_mpv_unref_picture(&s->cur_pic);
*got_frame = 1;
}
@ -698,7 +700,8 @@ const FFCodec ff_h263_decoder = {
.close = ff_mpv_decode_close,
.p.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 |
AV_CODEC_CAP_DELAY,
.caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,
.flush = ff_mpeg_flush,
.p.max_lowres = 3,
.hw_configs = h263_hw_config_list,
@ -715,7 +718,8 @@ const FFCodec ff_h263p_decoder = {
.close = ff_mpv_decode_close,
.p.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 |
AV_CODEC_CAP_DELAY,
.caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,
.flush = ff_mpeg_flush,
.p.max_lowres = 3,
.hw_configs = h263_hw_config_list,

@ -135,5 +135,6 @@ const FFCodec ff_h263i_decoder = {
FF_CODEC_DECODE_CB(ff_h263_decode_frame),
.close = ff_mpv_decode_close,
.p.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,
};

@ -80,7 +80,7 @@ static av_cold void init_uni_ac_vlc(const uint8_t huff_size_ac[256],
static void mjpeg_encode_picture_header(MpegEncContext *s)
{
ff_mjpeg_encode_picture_header(s->avctx, &s->pb, s->picture->f, s->mjpeg_ctx,
ff_mjpeg_encode_picture_header(s->avctx, &s->pb, s->cur_pic.ptr->f, s->mjpeg_ctx,
s->intra_scantable.permutated, 0,
s->intra_matrix, s->chroma_intra_matrix,
s->slice_context_count > 1);

@ -793,13 +793,16 @@ static av_cold int mpeg_decode_init(AVCodecContext *avctx)
{
Mpeg1Context *s = avctx->priv_data;
MpegEncContext *s2 = &s->mpeg_enc_ctx;
int ret;
s2->out_format = FMT_MPEG1;
if ( avctx->codec_tag != AV_RL32("VCR2")
&& avctx->codec_tag != AV_RL32("BW10"))
avctx->coded_width = avctx->coded_height = 0; // do not trust dimensions from input
ff_mpv_decode_init(s2, avctx);
ret = ff_mpv_decode_init(s2, avctx);
if (ret < 0)
return ret;
ff_mpeg12_init_vlcs();
@ -2529,7 +2532,7 @@ static int mpeg_decode_frame(AVCodecContext *avctx, AVFrame *picture,
if (ret < 0)
return ret;
s2->next_pic.ptr = NULL;
ff_mpv_unref_picture(&s2->next_pic);
*got_output = 1;
}
@ -2552,14 +2555,14 @@ static int mpeg_decode_frame(AVCodecContext *avctx, AVFrame *picture,
}
s->extradata_decoded = 1;
if (ret < 0 && (avctx->err_recognition & AV_EF_EXPLODE)) {
s2->cur_pic.ptr = NULL;
ff_mpv_unref_picture(&s2->cur_pic);
return ret;
}
}
ret = decode_chunks(avctx, picture, got_output, buf, buf_size);
if (ret<0 || *got_output) {
s2->cur_pic.ptr = NULL;
ff_mpv_unref_picture(&s2->cur_pic);
if (s->timecode_frame_start != -1 && *got_output) {
char tcbuf[AV_TIMECODE_STR_SIZE];

@ -3866,7 +3866,8 @@ const FFCodec ff_mpeg4_decoder = {
.close = ff_mpv_decode_close,
.p.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 |
AV_CODEC_CAP_DELAY | AV_CODEC_CAP_FRAME_THREADS,
.caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,
.flush = ff_mpeg_flush,
.p.max_lowres = 3,
.p.profiles = NULL_IF_CONFIG_SMALL(ff_mpeg4_video_profiles),

@ -30,8 +30,14 @@
#include "refstruct.h"
#include "threadframe.h"
static void av_noinline free_picture_tables(MPVPicture *pic)
static void mpv_pic_reset(FFRefStructOpaque unused, void *obj)
{
MPVPicture *pic = obj;
ff_thread_release_ext_buffer(&pic->tf);
ff_refstruct_unref(&pic->hwaccel_picture_private);
ff_refstruct_unref(&pic->mbskip_table);
ff_refstruct_unref(&pic->qscale_table_base);
ff_refstruct_unref(&pic->mb_type_base);
@ -39,16 +45,53 @@ static void av_noinline free_picture_tables(MPVPicture *pic)
for (int i = 0; i < 2; i++) {
ff_refstruct_unref(&pic->motion_val_base[i]);
ff_refstruct_unref(&pic->ref_index[i]);
pic->motion_val[i] = NULL;
}
pic->mb_type = NULL;
pic->qscale_table = NULL;
pic->mb_stride =
pic->mb_width =
pic->mb_height = 0;
pic->dummy = 0;
pic->field_picture = 0;
pic->b_frame_score = 0;
pic->reference = 0;
pic->shared = 0;
pic->display_picture_number = 0;
pic->coded_picture_number = 0;
}
static int av_cold mpv_pic_init(FFRefStructOpaque unused, void *obj)
{
MPVPicture *pic = obj;
pic->f = av_frame_alloc();
if (!pic->f)
return AVERROR(ENOMEM);
pic->tf.f = pic->f;
return 0;
}
static void av_cold mpv_pic_free(FFRefStructOpaque unused, void *obj)
{
MPVPicture *pic = obj;
av_frame_free(&pic->f);
}
av_cold FFRefStructPool *ff_mpv_alloc_pic_pool(void)
{
return ff_refstruct_pool_alloc_ext(sizeof(MPVPicture), 0, NULL,
mpv_pic_init, mpv_pic_reset, mpv_pic_free, NULL);
}
void ff_mpv_unref_picture(MPVWorkPicture *pic)
{
if (pic->ptr)
ff_mpeg_unref_picture(pic->ptr);
ff_refstruct_unref(&pic->ptr);
memset(pic, 0, sizeof(*pic));
}
@ -71,16 +114,18 @@ static void set_workpic_from_pic(MPVWorkPicture *wpic, const MPVPicture *pic)
void ff_mpv_replace_picture(MPVWorkPicture *dst, const MPVWorkPicture *src)
{
av_assert1(dst != src);
ff_refstruct_replace(&dst->ptr, src->ptr);
memcpy(dst, src, sizeof(*dst));
}
void ff_mpv_workpic_from_pic(MPVWorkPicture *wpic, MPVPicture *pic)
{
ff_refstruct_replace(&wpic->ptr, pic);
if (!pic) {
memset(wpic, 0, sizeof(*wpic));
return;
}
wpic->ptr = pic;
set_workpic_from_pic(wpic, pic);
}
@ -212,107 +257,3 @@ fail:
av_log(avctx, AV_LOG_ERROR, "Error allocating picture accessories.\n");
return ret;
}
/**
* Deallocate a picture; frees the picture tables in case they
* need to be reallocated anyway.
*/
void ff_mpeg_unref_picture(MPVPicture *pic)
{
pic->tf.f = pic->f;
ff_thread_release_ext_buffer(&pic->tf);
ff_refstruct_unref(&pic->hwaccel_picture_private);
free_picture_tables(pic);
pic->dummy = 0;
pic->field_picture = 0;
pic->b_frame_score = 0;
pic->reference = 0;
pic->shared = 0;
pic->display_picture_number = 0;
pic->coded_picture_number = 0;
}
static void update_picture_tables(MPVPicture *dst, const MPVPicture *src)
{
ff_refstruct_replace(&dst->mbskip_table, src->mbskip_table);
ff_refstruct_replace(&dst->qscale_table_base, src->qscale_table_base);
ff_refstruct_replace(&dst->mb_type_base, src->mb_type_base);
for (int i = 0; i < 2; i++) {
ff_refstruct_replace(&dst->motion_val_base[i], src->motion_val_base[i]);
ff_refstruct_replace(&dst->ref_index[i], src->ref_index[i]);
}
dst->qscale_table = src->qscale_table;
dst->mb_type = src->mb_type;
for (int i = 0; i < 2; i++)
dst->motion_val[i] = src->motion_val[i];
dst->mb_width = src->mb_width;
dst->mb_height = src->mb_height;
dst->mb_stride = src->mb_stride;
}
int ff_mpeg_ref_picture(MPVPicture *dst, MPVPicture *src)
{
int ret;
av_assert0(!dst->f->buf[0]);
av_assert0(src->f->buf[0]);
src->tf.f = src->f;
dst->tf.f = dst->f;
ret = ff_thread_ref_frame(&dst->tf, &src->tf);
if (ret < 0)
goto fail;
update_picture_tables(dst, src);
ff_refstruct_replace(&dst->hwaccel_picture_private,
src->hwaccel_picture_private);
dst->dummy = src->dummy;
dst->field_picture = src->field_picture;
dst->b_frame_score = src->b_frame_score;
dst->reference = src->reference;
dst->shared = src->shared;
dst->display_picture_number = src->display_picture_number;
dst->coded_picture_number = src->coded_picture_number;
return 0;
fail:
ff_mpeg_unref_picture(dst);
return ret;
}
int ff_find_unused_picture(AVCodecContext *avctx, MPVPicture *picture, int shared)
{
for (int i = 0; i < MAX_PICTURE_COUNT; i++)
if (!picture[i].f->buf[0])
return i;
av_log(avctx, AV_LOG_FATAL,
"Internal error, picture buffer overflow\n");
/* We could return -1, but the codec would crash trying to draw into a
* non-existing frame anyway. This is safer than waiting for a random crash.
* Also the return of this is never useful, an encoder must only allocate
* as much as allowed in the specification. This has no relationship to how
* much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
* enough for such valid streams).
* Plus, a decoder has to check stream validity and remove frames if too
* many reference frames are around. Waiting for "OOM" is not correct at
* all. Similarly, missing reference frames have to be replaced by
* interpolated/MC frames, anything else is a bug in the codec ...
*/
abort();
return -1;
}
void av_cold ff_mpv_picture_free(MPVPicture *pic)
{
ff_mpeg_unref_picture(pic);
av_frame_free(&pic->f);
}

@ -29,7 +29,6 @@
#include "threadframe.h"
#define MPV_MAX_PLANES 3
#define MAX_PICTURE_COUNT 36
#define EDGE_WIDTH 16
typedef struct ScratchpadContext {
@ -94,7 +93,7 @@ typedef struct MPVWorkPicture {
uint8_t *data[MPV_MAX_PLANES];
ptrdiff_t linesize[MPV_MAX_PLANES];
MPVPicture *ptr;
MPVPicture *ptr; ///< RefStruct reference
int8_t *qscale_table;
@ -109,6 +108,11 @@ typedef struct MPVWorkPicture {
int reference;
} MPVWorkPicture;
/**
* Allocate a pool of MPVPictures.
*/
struct FFRefStructPool *ff_mpv_alloc_pic_pool(void);
/**
* Allocate an MPVPicture's accessories (but not the AVFrame's buffer itself)
* and set the MPVWorkPicture's fields.
@ -129,14 +133,8 @@ int ff_mpv_pic_check_linesize(void *logctx, const struct AVFrame *f,
int ff_mpeg_framesize_alloc(AVCodecContext *avctx, MotionEstContext *me,
ScratchpadContext *sc, int linesize);
int ff_mpeg_ref_picture(MPVPicture *dst, MPVPicture *src);
void ff_mpv_unref_picture(MPVWorkPicture *pic);
void ff_mpv_workpic_from_pic(MPVWorkPicture *wpic, MPVPicture *pic);
void ff_mpv_replace_picture(MPVWorkPicture *dst, const MPVWorkPicture *src);
void ff_mpeg_unref_picture(MPVPicture *picture);
void ff_mpv_picture_free(MPVPicture *pic);
int ff_find_unused_picture(AVCodecContext *avctx, MPVPicture *picture, int shared);
#endif /* AVCODEC_MPEGPICTURE_H */

@ -701,7 +701,6 @@ static void clear_context(MpegEncContext *s)
s->bitstream_buffer = NULL;
s->allocated_bitstream_buffer_size = 0;
s->picture = NULL;
s->p_field_mv_table_base = NULL;
for (int i = 0; i < 2; i++)
for (int j = 0; j < 2; j++)
@ -726,10 +725,10 @@ static void clear_context(MpegEncContext *s)
*/
av_cold int ff_mpv_common_init(MpegEncContext *s)
{
int i, ret;
int nb_slices = (HAVE_THREADS &&
s->avctx->active_thread_type & FF_THREAD_SLICE) ?
s->avctx->thread_count : 1;
int ret;
clear_context(s);
@ -755,14 +754,6 @@ av_cold int ff_mpv_common_init(MpegEncContext *s)
if (ret)
return ret;
if (!FF_ALLOCZ_TYPED_ARRAY(s->picture, MAX_PICTURE_COUNT))
return AVERROR(ENOMEM);
for (i = 0; i < MAX_PICTURE_COUNT; i++) {
s->picture[i].f = av_frame_alloc();
if (!s->picture[i].f)
goto fail_nomem;
}
if ((ret = ff_mpv_init_context_frame(s)))
goto fail;
@ -789,8 +780,6 @@ av_cold int ff_mpv_common_init(MpegEncContext *s)
// }
return 0;
fail_nomem:
ret = AVERROR(ENOMEM);
fail:
ff_mpv_common_end(s);
return ret;
@ -830,11 +819,9 @@ void ff_mpv_common_end(MpegEncContext *s)
av_freep(&s->bitstream_buffer);
s->allocated_bitstream_buffer_size = 0;
if (s->picture) {
for (int i = 0; i < MAX_PICTURE_COUNT; i++)
ff_mpv_picture_free(&s->picture[i]);
}
av_freep(&s->picture);
ff_mpv_unref_picture(&s->last_pic);
ff_mpv_unref_picture(&s->cur_pic);
ff_mpv_unref_picture(&s->next_pic);
s->context_initialized = 0;
s->context_reinit = 0;

@ -128,7 +128,7 @@ typedef struct MpegEncContext {
int mb_num; ///< number of MBs of a picture
ptrdiff_t linesize; ///< line size, in bytes, may be different from width
ptrdiff_t uvlinesize; ///< line size, for chroma in bytes, may be different from width
MPVPicture *picture; ///< main picture buffer
struct FFRefStructPool *picture_pool; ///< Pool for MPVPictures
MPVPicture **input_picture;///< next pictures on display order for encoding
MPVPicture **reordered_input_picture; ///< pointer to the next pictures in coded order for encoding

@ -38,11 +38,12 @@
#include "mpegvideo.h"
#include "mpegvideodec.h"
#include "mpeg4videodec.h"
#include "refstruct.h"
#include "thread.h"
#include "threadframe.h"
#include "wmv2dec.h"
void ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
int ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
{
ff_mpv_common_defaults(s);
@ -57,6 +58,14 @@ void ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
ff_mpv_idct_init(s);
ff_h264chroma_init(&s->h264chroma, 8); //for lowres
if (!s->picture_pool && // VC-1 can call this multiple times
ff_thread_sync_ref(avctx, offsetof(MpegEncContext, picture_pool))) {
s->picture_pool = ff_mpv_alloc_pic_pool();
if (!s->picture_pool)
return AVERROR(ENOMEM);
}
return 0;
}
int ff_mpeg_update_thread_context(AVCodecContext *dst,
@ -103,26 +112,9 @@ int ff_mpeg_update_thread_context(AVCodecContext *dst,
s->coded_picture_number = s1->coded_picture_number;
s->picture_number = s1->picture_number;
av_assert0(!s->picture || s->picture != s1->picture);
if (s->picture)
for (int i = 0; i < MAX_PICTURE_COUNT; i++) {
ff_mpeg_unref_picture(&s->picture[i]);
if (s1->picture && s1->picture[i].f->buf[0] &&
(ret = ff_mpeg_ref_picture(&s->picture[i], &s1->picture[i])) < 0)
return ret;
}
#define UPDATE_PICTURE(pic)\
do {\
if (s->picture && s1->picture && s1->pic.ptr && s1->pic.ptr->f->buf[0]) {\
ff_mpv_workpic_from_pic(&s->pic, &s->picture[s1->pic.ptr - s1->picture]);\
} else\
ff_mpv_unref_picture(&s->pic);\
} while (0)
UPDATE_PICTURE(cur_pic);
UPDATE_PICTURE(last_pic);
UPDATE_PICTURE(next_pic);
ff_mpv_replace_picture(&s->cur_pic, &s1->cur_pic);
ff_mpv_replace_picture(&s->last_pic, &s1->last_pic);
ff_mpv_replace_picture(&s->next_pic, &s1->next_pic);
s->linesize = s1->linesize;
s->uvlinesize = s1->uvlinesize;
@ -177,6 +169,7 @@ int ff_mpv_decode_close(AVCodecContext *avctx)
{
MpegEncContext *s = avctx->priv_data;
ff_refstruct_pool_uninit(&s->picture_pool);
ff_mpv_common_end(s);
return 0;
}
@ -190,9 +183,9 @@ int ff_mpv_common_frame_size_change(MpegEncContext *s)
ff_mpv_free_context_frame(s);
s->last_pic.ptr =
s->next_pic.ptr =
s->cur_pic.ptr = NULL;
ff_mpv_unref_picture(&s->last_pic);
ff_mpv_unref_picture(&s->next_pic);
ff_mpv_unref_picture(&s->cur_pic);
if ((s->width || s->height) &&
(err = av_image_check_size(s->width, s->height, 0, s->avctx)) < 0)
@ -228,14 +221,12 @@ int ff_mpv_common_frame_size_change(MpegEncContext *s)
static int alloc_picture(MpegEncContext *s, MPVWorkPicture *dst, int reference)
{
AVCodecContext *avctx = s->avctx;
int idx = ff_find_unused_picture(s->avctx, s->picture, 0);
MPVPicture *pic;
MPVPicture *pic = ff_refstruct_pool_get(s->picture_pool);
int ret;
if (idx < 0)
return idx;
if (!pic)
return AVERROR(ENOMEM);
pic = &s->picture[idx];
dst->ptr = pic;
pic->tf.f = pic->f;
@ -368,22 +359,7 @@ int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
return AVERROR_BUG;
}
/* mark & release old frames */
if (s->pict_type != AV_PICTURE_TYPE_B && s->last_pic.ptr &&
s->last_pic.ptr != s->next_pic.ptr &&
s->last_pic.ptr->f->buf[0]) {
ff_mpeg_unref_picture(s->last_pic.ptr);
}
/* release non reference/forgotten frames */
for (int i = 0; i < MAX_PICTURE_COUNT; i++) {
if (!s->picture[i].reference ||
(&s->picture[i] != s->last_pic.ptr &&
&s->picture[i] != s->next_pic.ptr)) {
ff_mpeg_unref_picture(&s->picture[i]);
}
}
ff_mpv_unref_picture(&s->cur_pic);
ret = alloc_picture(s, &s->cur_pic,
s->pict_type != AV_PICTURE_TYPE_B && !s->droppable);
if (ret < 0)
@ -495,12 +471,6 @@ void ff_mpeg_flush(AVCodecContext *avctx)
{
MpegEncContext *const s = avctx->priv_data;
if (!s->picture)
return;
for (int i = 0; i < MAX_PICTURE_COUNT; i++)
ff_mpeg_unref_picture(&s->picture[i]);
ff_mpv_unref_picture(&s->cur_pic);
ff_mpv_unref_picture(&s->last_pic);
ff_mpv_unref_picture(&s->next_pic);

@ -75,6 +75,7 @@
#include "wmv2enc.h"
#include "rv10enc.h"
#include "packet_internal.h"
#include "refstruct.h"
#include <limits.h>
#include "sp5x.h"
@ -821,7 +822,8 @@ av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
!FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix16, 32) ||
!FF_ALLOCZ_TYPED_ARRAY(s->input_picture, MAX_B_FRAMES + 1) ||
!FF_ALLOCZ_TYPED_ARRAY(s->reordered_input_picture, MAX_B_FRAMES + 1) ||
!(s->new_pic = av_frame_alloc()))
!(s->new_pic = av_frame_alloc()) ||
!(s->picture_pool = ff_mpv_alloc_pic_pool()))
return AVERROR(ENOMEM);
/* Allocate MV tables; the MV and MB tables will be copied
@ -992,7 +994,14 @@ av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
ff_rate_control_uninit(&s->rc_context);
ff_mpv_common_end(s);
ff_refstruct_pool_uninit(&s->picture_pool);
if (s->input_picture && s->reordered_input_picture) {
for (int i = 0; i < MAX_B_FRAMES + 1; i++) {
ff_refstruct_unref(&s->input_picture[i]);
ff_refstruct_unref(&s->reordered_input_picture[i]);
}
}
for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
av_frame_free(&s->tmp_frames[i]);
@ -1131,12 +1140,14 @@ static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
{
MPVPicture *pic = NULL;
int64_t pts;
int i, display_picture_number = 0, ret;
int display_picture_number = 0, ret;
int encoding_delay = s->max_b_frames ? s->max_b_frames
: (s->low_delay ? 0 : 1);
int flush_offset = 1;
int direct = 1;
av_assert1(!s->input_picture[0]);
if (pic_arg) {
pts = pic_arg->pts;
display_picture_number = s->input_picture_number++;
@ -1182,16 +1193,13 @@ static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
ff_dlog(s->avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
pic_arg->linesize[1], s->linesize, s->uvlinesize);
i = ff_find_unused_picture(s->avctx, s->picture, direct);
if (i < 0)
return i;
pic = &s->picture[i];
pic->reference = 3;
pic = ff_refstruct_pool_get(s->picture_pool);
if (!pic)
return AVERROR(ENOMEM);
if (direct) {
if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
return ret;
goto fail;
pic->shared = 1;
} else {
ret = prepare_picture(s, pic->f, pic_arg);
@ -1241,17 +1249,17 @@ static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
pic->display_picture_number = display_picture_number;
pic->f->pts = pts; // we set this here to avoid modifying pic_arg
} else {
/* Flushing: When we have not received enough input frames,
* ensure s->input_picture[0] contains the first picture */
} else if (!s->reordered_input_picture[1]) {
/* Flushing: When the above check is true, the encoder is about to run
* out of frames to encode. Check if there are input_pictures left;
* if so, ensure s->input_picture[0] contains the first picture.
* A flush_offset != 1 will only happen if we did not receive enough
* input frames. */
for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
if (s->input_picture[flush_offset])
break;
if (flush_offset <= 1)
flush_offset = 1;
else
encoding_delay = encoding_delay - flush_offset + 1;
encoding_delay -= flush_offset - 1;
}
/* shift buffer entries */
@ -1262,7 +1270,7 @@ static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
return 0;
fail:
ff_mpeg_unref_picture(pic);
ff_refstruct_unref(&pic);
return ret;
}
@ -1475,8 +1483,10 @@ fail:
/**
* Determines whether an input picture is discarded or not
* and if not determines the length of the next chain of B frames
* and puts these pictures (including the P frame) into
* and moves these pictures (including the P frame) into
* reordered_input_picture.
* input_picture[0] is always NULL when exiting this function, even on error;
* reordered_input_picture[0] is always NULL when exiting this function on error.
*/
static int set_bframe_chain_length(MpegEncContext *s)
{
@ -1490,7 +1500,7 @@ static int set_bframe_chain_length(MpegEncContext *s)
s->next_pic.ptr &&
skip_check(s, s->input_picture[0], s->next_pic.ptr)) {
// FIXME check that the gop check above is +-1 correct
ff_mpeg_unref_picture(s->input_picture[0]);
ff_refstruct_unref(&s->input_picture[0]);
ff_vbv_update(s, 0);
@ -1501,6 +1511,7 @@ static int set_bframe_chain_length(MpegEncContext *s)
if (/*s->picture_in_gop_number >= s->gop_size ||*/
!s->next_pic.ptr || s->intra_only) {
s->reordered_input_picture[0] = s->input_picture[0];
s->input_picture[0] = NULL;
s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_I;
s->reordered_input_picture[0]->coded_picture_number =
s->coded_picture_number++;
@ -1555,7 +1566,7 @@ static int set_bframe_chain_length(MpegEncContext *s)
} else if (s->b_frame_strategy == 2) {
b_frames = estimate_best_b_count(s);
if (b_frames < 0) {
ff_mpeg_unref_picture(s->input_picture[0]);
ff_refstruct_unref(&s->input_picture[0]);
return b_frames;
}
}
@ -1589,12 +1600,14 @@ static int set_bframe_chain_length(MpegEncContext *s)
b_frames--;
s->reordered_input_picture[0] = s->input_picture[b_frames];
s->input_picture[b_frames] = NULL;
if (s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_I)
s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_P;
s->reordered_input_picture[0]->coded_picture_number =
s->coded_picture_number++;
for (int i = 0; i < b_frames; i++) {
s->reordered_input_picture[i + 1] = s->input_picture[i];
s->input_picture[i] = NULL;
s->reordered_input_picture[i + 1]->f->pict_type =
AV_PICTURE_TYPE_B;
s->reordered_input_picture[i + 1]->coded_picture_number =
@ -1609,11 +1622,14 @@ static int select_input_picture(MpegEncContext *s)
{
int ret;
av_assert1(!s->reordered_input_picture[0]);
for (int i = 1; i <= MAX_B_FRAMES; i++)
s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
s->reordered_input_picture[MAX_B_FRAMES] = NULL;
ret = set_bframe_chain_length(s);
av_assert1(!s->input_picture[0]);
if (ret < 0)
return ret;
@ -1643,6 +1659,7 @@ static int select_input_picture(MpegEncContext *s)
}
}
s->cur_pic.ptr = s->reordered_input_picture[0];
s->reordered_input_picture[0] = NULL;
av_assert1(s->mb_width == s->buffer_pools.alloc_mb_width);
av_assert1(s->mb_height == s->buffer_pools.alloc_mb_height);
av_assert1(s->mb_stride == s->buffer_pools.alloc_mb_stride);
@ -1657,7 +1674,7 @@ static int select_input_picture(MpegEncContext *s)
}
return 0;
fail:
ff_mpeg_unref_picture(s->reordered_input_picture[0]);
ff_refstruct_unref(&s->reordered_input_picture[0]);
return ret;
}
@ -1720,13 +1737,6 @@ static void update_noise_reduction(MpegEncContext *s)
static void frame_start(MpegEncContext *s)
{
/* mark & release old frames */
if (s->pict_type != AV_PICTURE_TYPE_B && s->last_pic.ptr &&
s->last_pic.ptr != s->next_pic.ptr &&
s->last_pic.ptr->f->buf[0]) {
ff_mpv_unref_picture(&s->last_pic);
}
s->cur_pic.ptr->f->pict_type = s->pict_type;
if (s->pict_type != AV_PICTURE_TYPE_B) {
@ -1747,6 +1757,8 @@ int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
int i, stuffing_count, ret;
int context_count = s->slice_context_count;
ff_mpv_unref_picture(&s->cur_pic);
s->vbv_ignore_qmax = 0;
s->picture_in_gop_number++;
@ -1973,11 +1985,7 @@ vbv_retry:
s->frame_bits = 0;
}
/* release non-reference frames */
for (i = 0; i < MAX_PICTURE_COUNT; i++) {
if (!s->picture[i].reference)
ff_mpeg_unref_picture(&s->picture[i]);
}
ff_mpv_unref_picture(&s->cur_pic);
av_assert1((s->frame_bits & 7) == 0);

@ -44,8 +44,10 @@
* Initialize the given MpegEncContext for decoding.
* the changed fields will not depend upon
* the prior state of the MpegEncContext.
*
* Also initialize the picture pool.
*/
void ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx);
int ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx);
int ff_mpv_common_frame_size_change(MpegEncContext *s);

@ -852,7 +852,8 @@ const FFCodec ff_msmpeg4v1_decoder = {
FF_CODEC_DECODE_CB(ff_h263_decode_frame),
.close = ff_mpv_decode_close,
.p.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,
.p.max_lowres = 3,
};
@ -866,7 +867,8 @@ const FFCodec ff_msmpeg4v2_decoder = {
FF_CODEC_DECODE_CB(ff_h263_decode_frame),
.close = ff_mpv_decode_close,
.p.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,
.p.max_lowres = 3,
};
@ -880,7 +882,8 @@ const FFCodec ff_msmpeg4v3_decoder = {
FF_CODEC_DECODE_CB(ff_h263_decode_frame),
.close = ff_mpv_decode_close,
.p.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,
.p.max_lowres = 3,
};
@ -894,6 +897,7 @@ const FFCodec ff_wmv1_decoder = {
FF_CODEC_DECODE_CB(ff_h263_decode_frame),
.close = ff_mpv_decode_close,
.p.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,
.p.max_lowres = 3,
};

@ -364,7 +364,9 @@ static av_cold int rv10_decode_init(AVCodecContext *avctx)
avctx->coded_height, 0, avctx)) < 0)
return ret;
ff_mpv_decode_init(s, avctx);
ret = ff_mpv_decode_init(s, avctx);
if (ret < 0)
return ret;
s->out_format = FMT_H263;
@ -645,7 +647,7 @@ static int rv10_decode_frame(AVCodecContext *avctx, AVFrame *pict,
}
// so we can detect if frame_end was not called (find some nicer solution...)
s->cur_pic.ptr = NULL;
ff_mpv_unref_picture(&s->cur_pic);
}
return avpkt->size;
@ -662,6 +664,7 @@ const FFCodec ff_rv10_decoder = {
.close = ff_mpv_decode_close,
.p.capabilities = AV_CODEC_CAP_DR1,
.p.max_lowres = 3,
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
};
const FFCodec ff_rv20_decoder = {
@ -676,4 +679,5 @@ const FFCodec ff_rv20_decoder = {
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
.flush = ff_mpeg_flush,
.p.max_lowres = 3,
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
};

@ -1510,7 +1510,9 @@ av_cold int ff_rv34_decode_init(AVCodecContext *avctx)
MpegEncContext *s = &r->s;
int ret;
ff_mpv_decode_init(s, avctx);
ret = ff_mpv_decode_init(s, avctx);
if (ret < 0)
return ret;
s->out_format = FMT_H263;
avctx->pix_fmt = AV_PIX_FMT_YUV420P;
@ -1632,7 +1634,7 @@ int ff_rv34_decode_frame(AVCodecContext *avctx, AVFrame *pict,
if (s->next_pic.ptr) {
if ((ret = av_frame_ref(pict, s->next_pic.ptr->f)) < 0)
return ret;
s->next_pic.ptr = NULL;
ff_mpv_unref_picture(&s->next_pic);
*got_picture_ptr = 1;
}

@ -60,7 +60,6 @@ typedef struct SVQ1EncContext {
* else, the idea is to make the motion estimation eventually independent
* of MpegEncContext, so this will be removed then. */
MpegEncContext m;
MPVPicture cur_pic, last_pic;
AVCodecContext *avctx;
MECmpContext mecc;
HpelDSPContext hdsp;
@ -327,8 +326,6 @@ static int svq1_encode_plane(SVQ1EncContext *s, int plane,
if (s->pict_type == AV_PICTURE_TYPE_P) {
s->m.avctx = s->avctx;
s->m.cur_pic.ptr = &s->cur_pic;
s->m.last_pic.ptr = &s->last_pic;
s->m.last_pic.data[0] = ref_plane;
s->m.linesize =
s->m.last_pic.linesize[0] =

@ -461,7 +461,9 @@ av_cold int ff_vc1_decode_init(AVCodecContext *avctx)
if (ret < 0)
return ret;
ff_mpv_decode_init(s, avctx);
ret = ff_mpv_decode_init(s, avctx);
if (ret < 0)
return ret;
avctx->pix_fmt = vc1_get_format(avctx);
@ -846,7 +848,7 @@ static int vc1_decode_frame(AVCodecContext *avctx, AVFrame *pict,
if (s->low_delay == 0 && s->next_pic.ptr) {
if ((ret = av_frame_ref(pict, s->next_pic.ptr->f)) < 0)
return ret;
s->next_pic.ptr = NULL;
ff_mpv_unref_picture(&s->next_pic);
*got_frame = 1;
}
@ -997,7 +999,7 @@ static int vc1_decode_frame(AVCodecContext *avctx, AVFrame *pict,
if (s->context_initialized &&
(s->width != avctx->coded_width ||
s->height != avctx->coded_height)) {
ff_vc1_decode_end(avctx);
vc1_decode_reset(avctx);
}
if (!s->context_initialized) {

Loading…
Cancel
Save