|
|
|
@ -1426,9 +1426,6 @@ av_cold int ff_h264_decode_init(AVCodecContext *avctx) |
|
|
|
|
|
|
|
|
|
h->avctx = avctx; |
|
|
|
|
|
|
|
|
|
h->width = h->avctx->width; |
|
|
|
|
h->height = h->avctx->height; |
|
|
|
|
|
|
|
|
|
h->bit_depth_luma = 8; |
|
|
|
|
h->chroma_format_idc = 1; |
|
|
|
|
|
|
|
|
@ -2978,14 +2975,49 @@ static enum AVPixelFormat get_pixel_format(H264Context *h) |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
/* export coded and cropped frame dimensions to AVCodecContext */ |
|
|
|
|
static int init_dimensions(H264Context *h) |
|
|
|
|
{ |
|
|
|
|
int width = h->width - (h->sps.crop_right + h->sps.crop_left); |
|
|
|
|
int height = h->height - (h->sps.crop_top + h->sps.crop_bottom); |
|
|
|
|
|
|
|
|
|
/* handle container cropping */ |
|
|
|
|
if (!h->sps.crop && |
|
|
|
|
FFALIGN(h->avctx->width, 16) == h->width && |
|
|
|
|
FFALIGN(h->avctx->height, 16) == h->height) { |
|
|
|
|
width = h->avctx->width; |
|
|
|
|
height = h->avctx->height; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
if (width <= 0 || height <= 0) { |
|
|
|
|
av_log(h->avctx, AV_LOG_ERROR, "Invalid cropped dimensions: %dx%d.\n", |
|
|
|
|
width, height); |
|
|
|
|
if (h->avctx->err_recognition & AV_EF_EXPLODE) |
|
|
|
|
return AVERROR_INVALIDDATA; |
|
|
|
|
|
|
|
|
|
av_log(h->avctx, AV_LOG_WARNING, "Ignoring cropping information.\n"); |
|
|
|
|
h->sps.crop_bottom = h->sps.crop_top = h->sps.crop_right = h->sps.crop_left = 0; |
|
|
|
|
h->sps.crop = 0; |
|
|
|
|
|
|
|
|
|
width = h->width; |
|
|
|
|
height = h->height; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
h->avctx->coded_width = h->width; |
|
|
|
|
h->avctx->coded_height = h->height; |
|
|
|
|
h->avctx->width = width; |
|
|
|
|
h->avctx->height = height; |
|
|
|
|
|
|
|
|
|
return 0; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static int h264_slice_header_init(H264Context *h, int reinit) |
|
|
|
|
{ |
|
|
|
|
int nb_slices = (HAVE_THREADS && |
|
|
|
|
h->avctx->active_thread_type & FF_THREAD_SLICE) ? |
|
|
|
|
h->avctx->thread_count : 1; |
|
|
|
|
int i; |
|
|
|
|
int i, ret; |
|
|
|
|
|
|
|
|
|
avcodec_set_dimensions(h->avctx, h->width, h->height); |
|
|
|
|
h->avctx->sample_aspect_ratio = h->sps.sar; |
|
|
|
|
av_assert0(h->avctx->sample_aspect_ratio.den); |
|
|
|
|
av_pix_fmt_get_chroma_sub_sample(h->avctx->pix_fmt, |
|
|
|
@ -3196,17 +3228,12 @@ static int decode_slice_header(H264Context *h, H264Context *h0) |
|
|
|
|
|
|
|
|
|
h->chroma_y_shift = h->sps.chroma_format_idc <= 1; // 400 uses yuv420p
|
|
|
|
|
|
|
|
|
|
h->width = 16 * h->mb_width - (2 >> CHROMA444(h)) * FFMIN(h->sps.crop_right, (8 << CHROMA444(h)) - 1); |
|
|
|
|
if (h->sps.frame_mbs_only_flag) |
|
|
|
|
h->height = 16 * h->mb_height - (1 << h->chroma_y_shift) * FFMIN(h->sps.crop_bottom, (16 >> h->chroma_y_shift) - 1); |
|
|
|
|
else |
|
|
|
|
h->height = 16 * h->mb_height - (2 << h->chroma_y_shift) * FFMIN(h->sps.crop_bottom, (16 >> h->chroma_y_shift) - 1); |
|
|
|
|
h->width = 16 * h->mb_width; |
|
|
|
|
h->height = 16 * h->mb_height; |
|
|
|
|
|
|
|
|
|
if (FFALIGN(h->avctx->width, 16) == h->width && |
|
|
|
|
FFALIGN(h->avctx->height, 16) == h->height) { |
|
|
|
|
h->width = h->avctx->width; |
|
|
|
|
h->height = h->avctx->height; |
|
|
|
|
} |
|
|
|
|
ret = init_dimensions(h); |
|
|
|
|
if (ret < 0) |
|
|
|
|
return ret; |
|
|
|
|
|
|
|
|
|
if (h->sps.video_signal_type_present_flag) { |
|
|
|
|
h->avctx->color_range = h->sps.full_range ? AVCOL_RANGE_JPEG |
|
|
|
@ -3221,8 +3248,8 @@ static int decode_slice_header(H264Context *h, H264Context *h0) |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
if (h->context_initialized && |
|
|
|
|
(h->width != h->avctx->width || |
|
|
|
|
h->height != h->avctx->height || |
|
|
|
|
(h->width != h->avctx->coded_width || |
|
|
|
|
h->height != h->avctx->coded_height || |
|
|
|
|
needs_reinit)) { |
|
|
|
|
|
|
|
|
|
if (h != h0) { |
|
|
|
@ -4611,6 +4638,26 @@ static int get_consumed_bytes(int pos, int buf_size) |
|
|
|
|
return pos; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static int output_frame(H264Context *h, AVFrame *dst, AVFrame *src) |
|
|
|
|
{ |
|
|
|
|
int i; |
|
|
|
|
int ret = av_frame_ref(dst, src); |
|
|
|
|
if (ret < 0) |
|
|
|
|
return ret; |
|
|
|
|
|
|
|
|
|
if (!h->sps.crop) |
|
|
|
|
return 0; |
|
|
|
|
|
|
|
|
|
for (i = 0; i < 3; i++) { |
|
|
|
|
int hshift = (i > 0) ? h->chroma_x_shift : 0; |
|
|
|
|
int vshift = (i > 0) ? h->chroma_y_shift : 0; |
|
|
|
|
int off = ((h->sps.crop_left >> hshift) << h->pixel_shift) + |
|
|
|
|
(h->sps.crop_top >> vshift) * dst->linesize[i]; |
|
|
|
|
dst->data[i] += off; |
|
|
|
|
} |
|
|
|
|
return 0; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static int decode_frame(AVCodecContext *avctx, void *data, |
|
|
|
|
int *got_frame, AVPacket *avpkt) |
|
|
|
|
{ |
|
|
|
@ -4648,7 +4695,8 @@ out: |
|
|
|
|
h->delayed_pic[i] = h->delayed_pic[i + 1]; |
|
|
|
|
|
|
|
|
|
if (out) { |
|
|
|
|
if ((ret = av_frame_ref(pict, &out->f)) < 0) |
|
|
|
|
ret = output_frame(h, pict, &out->f); |
|
|
|
|
if (ret < 0) |
|
|
|
|
return ret; |
|
|
|
|
*got_frame = 1; |
|
|
|
|
} |
|
|
|
@ -4683,7 +4731,8 @@ out: |
|
|
|
|
/* Wait for second field. */ |
|
|
|
|
*got_frame = 0; |
|
|
|
|
} else { |
|
|
|
|
if ((ret = av_frame_ref(pict, &h->next_output_pic->f)) < 0) |
|
|
|
|
ret = output_frame(h, pict, &h->next_output_pic->f); |
|
|
|
|
if (ret < 0) |
|
|
|
|
return ret; |
|
|
|
|
*got_frame = 1; |
|
|
|
|
} |
|
|
|
|