mpegvideo: move frame size dependent memory management to separate functions

This is a preparation for supporting frame size changes during
frame-based multithreading.
pull/28/head
Janne Grunau 13 years ago
parent aeeb782c2a
commit 1b3439b305
  1. 285
      libavcodec/mpegvideo.c

@ -654,51 +654,12 @@ void ff_MPV_decode_defaults(MpegEncContext *s)
} }
/** /**
* init common structure for both encoder and decoder. * Initialize and allocates MpegEncContext fields dependent on the resolution.
* this assumes that some variables like width/height are already set
*/ */
av_cold int ff_MPV_common_init(MpegEncContext *s) static int init_context_frame(MpegEncContext *s)
{ {
int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y; int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
int nb_slices = (HAVE_THREADS &&
s->avctx->active_thread_type & FF_THREAD_SLICE) ?
s->avctx->thread_count : 1;
if (s->encoding && s->avctx->slices)
nb_slices = s->avctx->slices;
if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
s->mb_height = (s->height + 31) / 32 * 2;
else if (s->codec_id != AV_CODEC_ID_H264)
s->mb_height = (s->height + 15) / 16;
if (s->avctx->pix_fmt == PIX_FMT_NONE) {
av_log(s->avctx, AV_LOG_ERROR,
"decoding to PIX_FMT_NONE is not supported.\n");
return -1;
}
if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
int max_slices;
if (s->mb_height)
max_slices = FFMIN(MAX_THREADS, s->mb_height);
else
max_slices = MAX_THREADS;
av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
" reducing to %d\n", nb_slices, max_slices);
nb_slices = max_slices;
}
if ((s->width || s->height) &&
av_image_check_size(s->width, s->height, 0, s->avctx))
return -1;
ff_dct_common_init(s);
s->flags = s->avctx->flags;
s->flags2 = s->avctx->flags2;
if (s->width && s->height) {
s->mb_width = (s->width + 15) / 16; s->mb_width = (s->width + 15) / 16;
s->mb_stride = s->mb_width + 1; s->mb_stride = s->mb_width + 1;
s->b8_stride = s->mb_width * 2 + 1; s->b8_stride = s->mb_width * 2 + 1;
@ -706,10 +667,6 @@ av_cold int ff_MPV_common_init(MpegEncContext *s)
mb_array_size = s->mb_height * s->mb_stride; mb_array_size = s->mb_height * s->mb_stride;
mv_table_size = (s->mb_height + 2) * s->mb_stride + 1; mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
/* set chroma shifts */
avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &s->chroma_x_shift,
&s->chroma_y_shift);
/* set default edge pos, will be overriden /* set default edge pos, will be overriden
* in decode_header if needed */ * in decode_header if needed */
s->h_edge_pos = s->mb_width * 16; s->h_edge_pos = s->mb_width * 16;
@ -728,13 +685,6 @@ av_cold int ff_MPV_common_init(MpegEncContext *s)
c_size = s->mb_stride * (s->mb_height + 1); c_size = s->mb_stride * (s->mb_height + 1);
yc_size = y_size + 2 * c_size; yc_size = y_size + 2 * c_size;
/* convert fourcc to upper case */
s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
s->avctx->coded_frame = &s->current_picture.f;
FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
fail); // error ressilience code looks cleaner with this fail); // error ressilience code looks cleaner with this
for (y = 0; y < s->mb_height; y++) for (y = 0; y < s->mb_height; y++)
@ -758,25 +708,14 @@ av_cold int ff_MPV_common_init(MpegEncContext *s)
mv_table_size * 2 * sizeof(int16_t), fail); mv_table_size * 2 * sizeof(int16_t), fail);
FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base, FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base,
mv_table_size * 2 * sizeof(int16_t), fail); mv_table_size * 2 * sizeof(int16_t), fail);
s->p_mv_table = s->p_mv_table_base + s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
s->mb_stride + 1; s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
s->b_forw_mv_table = s->b_forw_mv_table_base + s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
s->mb_stride + 1;
s->b_back_mv_table = s->b_back_mv_table_base +
s->mb_stride + 1;
s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base + s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base +
s->mb_stride + 1; s->mb_stride + 1;
s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base + s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base +
s->mb_stride + 1; s->mb_stride + 1;
s->b_direct_mv_table = s->b_direct_mv_table_base + s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
s->mb_stride + 1;
if (s->msmpeg4_version) {
FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
2 * 2 * (MAX_LEVEL + 1) *
(MAX_RUN + 1) * 2 * sizeof(int), fail);
}
FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
/* Allocate MB type table */ /* Allocate MB type table */
FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size * FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size *
@ -785,39 +724,13 @@ av_cold int ff_MPV_common_init(MpegEncContext *s)
FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size *
sizeof(int), fail); sizeof(int), fail);
FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix,
64 * 32 * sizeof(int), fail);
FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix,
64 * 32 * sizeof(int), fail);
FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16,
64 * 32 * 2 * sizeof(uint16_t), fail);
FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16,
64 * 32 * 2 * sizeof(uint16_t), fail);
FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
MAX_PICTURE_COUNT * sizeof(Picture *), fail);
FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
MAX_PICTURE_COUNT * sizeof(Picture *), fail);
if (s->avctx->noise_reduction) {
FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
2 * 64 * sizeof(uint16_t), fail);
}
FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab, FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
mb_array_size * sizeof(float), fail); mb_array_size * sizeof(float), fail);
FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab, FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
mb_array_size * sizeof(float), fail); mb_array_size * sizeof(float), fail);
}
}
s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
s->picture_count * sizeof(Picture), fail);
for (i = 0; i < s->picture_count; i++) {
avcodec_get_frame_defaults(&s->picture[i].f);
} }
if (s->width && s->height) {
FF_ALLOC_OR_GOTO(s->avctx, s->er_temp_buffer, FF_ALLOC_OR_GOTO(s->avctx, s->er_temp_buffer,
mb_array_size * sizeof(uint8_t), fail); mb_array_size * sizeof(uint8_t), fail);
FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table, FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table,
@ -838,17 +751,14 @@ av_cold int ff_MPV_common_init(MpegEncContext *s)
s->mb_stride + 1; s->mb_stride + 1;
} }
FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j],
mb_array_size * 2 * sizeof(uint8_t), mb_array_size * 2 * sizeof(uint8_t), fail);
fail);
FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j],
mv_table_size * 2 * sizeof(int16_t), mv_table_size * 2 * sizeof(int16_t), fail);
fail);
s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]
+ s->mb_stride + 1; + s->mb_stride + 1;
} }
FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i],
mb_array_size * 2 * sizeof(uint8_t), mb_array_size * 2 * sizeof(uint8_t), fail);
fail);
} }
} }
if (s->out_format == FMT_H263) { if (s->out_format == FMT_H263) {
@ -883,7 +793,6 @@ av_cold int ff_MPV_common_init(MpegEncContext *s)
FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail); FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
// Note the + 1 is for a quicker mpeg4 slice_end detection // Note the + 1 is for a quicker mpeg4 slice_end detection
s->parse_context.state = -1;
if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) || if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
s->avctx->debug_mv) { s->avctx->debug_mv) {
s->visualization_buffer[0] = av_malloc((s->mb_width * 16 + s->visualization_buffer[0] = av_malloc((s->mb_width * 16 +
@ -893,6 +802,109 @@ av_cold int ff_MPV_common_init(MpegEncContext *s)
s->visualization_buffer[2] = av_malloc((s->mb_width * 16 + s->visualization_buffer[2] = av_malloc((s->mb_width * 16 +
2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH); 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
} }
return 0;
fail:
return AVERROR(ENOMEM);
}
/**
* init common structure for both encoder and decoder.
* this assumes that some variables like width/height are already set
*/
av_cold int ff_MPV_common_init(MpegEncContext *s)
{
int i, err;
int nb_slices = (HAVE_THREADS &&
s->avctx->active_thread_type & FF_THREAD_SLICE) ?
s->avctx->thread_count : 1;
if (s->encoding && s->avctx->slices)
nb_slices = s->avctx->slices;
if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
s->mb_height = (s->height + 31) / 32 * 2;
else if (s->codec_id != AV_CODEC_ID_H264)
s->mb_height = (s->height + 15) / 16;
if (s->avctx->pix_fmt == PIX_FMT_NONE) {
av_log(s->avctx, AV_LOG_ERROR,
"decoding to PIX_FMT_NONE is not supported.\n");
return -1;
}
if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
int max_slices;
if (s->mb_height)
max_slices = FFMIN(MAX_THREADS, s->mb_height);
else
max_slices = MAX_THREADS;
av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
" reducing to %d\n", nb_slices, max_slices);
nb_slices = max_slices;
}
if ((s->width || s->height) &&
av_image_check_size(s->width, s->height, 0, s->avctx))
return -1;
ff_dct_common_init(s);
s->flags = s->avctx->flags;
s->flags2 = s->avctx->flags2;
if (s->width && s->height) {
/* set chroma shifts */
avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &s->chroma_x_shift,
&s->chroma_y_shift);
/* convert fourcc to upper case */
s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
s->avctx->coded_frame = &s->current_picture.f;
if (s->encoding) {
if (s->msmpeg4_version) {
FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
2 * 2 * (MAX_LEVEL + 1) *
(MAX_RUN + 1) * 2 * sizeof(int), fail);
}
FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix,
64 * 32 * sizeof(int), fail);
FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix,
64 * 32 * sizeof(int), fail);
FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16,
64 * 32 * 2 * sizeof(uint16_t), fail);
FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16,
64 * 32 * 2 * sizeof(uint16_t), fail);
FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
MAX_PICTURE_COUNT * sizeof(Picture *), fail);
FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
MAX_PICTURE_COUNT * sizeof(Picture *), fail);
if (s->avctx->noise_reduction) {
FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
2 * 64 * sizeof(uint16_t), fail);
}
}
}
s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
s->picture_count * sizeof(Picture), fail);
for (i = 0; i < s->picture_count; i++) {
avcodec_get_frame_defaults(&s->picture[i].f);
}
if (s->width && s->height) {
if ((err = init_context_frame(s)))
goto fail;
s->parse_context.state = -1;
} }
s->context_initialized = 1; s->context_initialized = 1;
@ -928,24 +940,15 @@ av_cold int ff_MPV_common_init(MpegEncContext *s)
return -1; return -1;
} }
/* init common structure for both encoder and decoder */ /**
void ff_MPV_common_end(MpegEncContext *s) * Frees and resets MpegEncContext fields depending on the resolution.
* Is used during resolution changes to avoid a full reinitialization of the
* codec.
*/
static int free_context_frame(MpegEncContext *s)
{ {
int i, j, k; int i, j, k;
if (s->slice_context_count > 1) {
for (i = 0; i < s->slice_context_count; i++) {
free_duplicate_context(s->thread_context[i]);
}
for (i = 1; i < s->slice_context_count; i++) {
av_freep(&s->thread_context[i]);
}
s->slice_context_count = 1;
} else free_duplicate_context(s);
av_freep(&s->parse_context.buffer);
s->parse_context.buffer_size = 0;
av_freep(&s->mb_type); av_freep(&s->mb_type);
av_freep(&s->p_mv_table_base); av_freep(&s->p_mv_table_base);
av_freep(&s->b_forw_mv_table_base); av_freep(&s->b_forw_mv_table_base);
@ -979,15 +982,49 @@ void ff_MPV_common_end(MpegEncContext *s)
av_freep(&s->pred_dir_table); av_freep(&s->pred_dir_table);
av_freep(&s->mbskip_table); av_freep(&s->mbskip_table);
av_freep(&s->bitstream_buffer);
s->allocated_bitstream_buffer_size = 0;
av_freep(&s->avctx->stats_out);
av_freep(&s->ac_stats);
av_freep(&s->error_status_table); av_freep(&s->error_status_table);
av_freep(&s->er_temp_buffer); av_freep(&s->er_temp_buffer);
av_freep(&s->mb_index2xy); av_freep(&s->mb_index2xy);
av_freep(&s->lambda_table); av_freep(&s->lambda_table);
av_freep(&s->cplx_tab);
av_freep(&s->bits_tab);
s->linesize = s->uvlinesize = 0;
for (i = 0; i < 3; i++)
av_freep(&s->visualization_buffer[i]);
if (!(s->avctx->active_thread_type & FF_THREAD_FRAME))
avcodec_default_free_buffers(s->avctx);
return 0;
}
/* init common structure for both encoder and decoder */
void ff_MPV_common_end(MpegEncContext *s)
{
int i;
if (s->slice_context_count > 1) {
for (i = 0; i < s->slice_context_count; i++) {
free_duplicate_context(s->thread_context[i]);
}
for (i = 1; i < s->slice_context_count; i++) {
av_freep(&s->thread_context[i]);
}
s->slice_context_count = 1;
} else free_duplicate_context(s);
av_freep(&s->parse_context.buffer);
s->parse_context.buffer_size = 0;
av_freep(&s->bitstream_buffer);
s->allocated_bitstream_buffer_size = 0;
av_freep(&s->avctx->stats_out);
av_freep(&s->ac_stats);
av_freep(&s->q_intra_matrix); av_freep(&s->q_intra_matrix);
av_freep(&s->q_inter_matrix); av_freep(&s->q_inter_matrix);
av_freep(&s->q_intra_matrix16); av_freep(&s->q_intra_matrix16);
@ -995,8 +1032,8 @@ void ff_MPV_common_end(MpegEncContext *s)
av_freep(&s->input_picture); av_freep(&s->input_picture);
av_freep(&s->reordered_input_picture); av_freep(&s->reordered_input_picture);
av_freep(&s->dct_offset); av_freep(&s->dct_offset);
av_freep(&s->cplx_tab);
av_freep(&s->bits_tab); free_context_frame(s);
if (s->picture && !s->avctx->internal->is_copy) { if (s->picture && !s->avctx->internal->is_copy) {
for (i = 0; i < s->picture_count; i++) { for (i = 0; i < s->picture_count; i++) {
@ -1009,12 +1046,6 @@ void ff_MPV_common_end(MpegEncContext *s)
s->next_picture_ptr = s->next_picture_ptr =
s->current_picture_ptr = NULL; s->current_picture_ptr = NULL;
s->linesize = s->uvlinesize = 0; s->linesize = s->uvlinesize = 0;
for (i = 0; i < 3; i++)
av_freep(&s->visualization_buffer[i]);
if (!(s->avctx->active_thread_type & FF_THREAD_FRAME))
avcodec_default_free_buffers(s->avctx);
} }
void ff_init_rl(RLTable *rl, void ff_init_rl(RLTable *rl,

Loading…
Cancel
Save