|
|
|
@ -163,7 +163,7 @@ static int decode_pal_v2(MSS12Context *ctx, const uint8_t *buf, int buf_size) |
|
|
|
|
|
|
|
|
|
ncol = *buf++; |
|
|
|
|
if (ncol > ctx->free_colours || buf_size < 2 + ncol * 3) |
|
|
|
|
return -1; |
|
|
|
|
return AVERROR_INVALIDDATA; |
|
|
|
|
for (i = 0; i < ncol; i++) |
|
|
|
|
*pal++ = AV_RB24(buf + 3 * i); |
|
|
|
|
|
|
|
|
@ -189,7 +189,7 @@ static int decode_555(GetByteContext *gB, uint16_t *dst, int stride, |
|
|
|
|
READ_PAIR(y, endy) |
|
|
|
|
|
|
|
|
|
if (endx >= w || endy >= h || x > endx || y > endy) |
|
|
|
|
return -1; |
|
|
|
|
return AVERROR_INVALIDDATA; |
|
|
|
|
dst += x + stride * y; |
|
|
|
|
w = endx - x + 1; |
|
|
|
|
h = endy - y + 1; |
|
|
|
@ -373,13 +373,14 @@ static int decode_wmv9(AVCodecContext *avctx, const uint8_t *buf, int buf_size, |
|
|
|
|
VC1Context *v = avctx->priv_data; |
|
|
|
|
MpegEncContext *s = &v->s; |
|
|
|
|
AVFrame *f; |
|
|
|
|
int ret; |
|
|
|
|
|
|
|
|
|
ff_mpeg_flush(avctx); |
|
|
|
|
|
|
|
|
|
if (s->current_picture_ptr == NULL || s->current_picture_ptr->f.data[0]) { |
|
|
|
|
int i = ff_find_unused_picture(s, 0); |
|
|
|
|
if (i < 0) |
|
|
|
|
return -1; |
|
|
|
|
return i; |
|
|
|
|
s->current_picture_ptr = &s->picture[i]; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
@ -399,10 +400,10 @@ static int decode_wmv9(AVCodecContext *avctx, const uint8_t *buf, int buf_size, |
|
|
|
|
|
|
|
|
|
avctx->pix_fmt = AV_PIX_FMT_YUV420P; |
|
|
|
|
|
|
|
|
|
if (ff_MPV_frame_start(s, avctx) < 0) { |
|
|
|
|
if ((ret = ff_MPV_frame_start(s, avctx)) < 0) { |
|
|
|
|
av_log(v->s.avctx, AV_LOG_ERROR, "ff_MPV_frame_start error\n"); |
|
|
|
|
avctx->pix_fmt = AV_PIX_FMT_RGB24; |
|
|
|
|
return -1; |
|
|
|
|
return ret; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
ff_er_frame_start(s); |
|
|
|
@ -617,7 +618,7 @@ static int mss2_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, |
|
|
|
|
ctx->last_pic.linesize[0] * (avctx->height - 1); |
|
|
|
|
} else { |
|
|
|
|
av_log(avctx, AV_LOG_ERROR, "Missing keyframe\n"); |
|
|
|
|
return -1; |
|
|
|
|
return AVERROR_INVALIDDATA; |
|
|
|
|
} |
|
|
|
|
} else { |
|
|
|
|
if (ctx->last_pic.data[0]) |
|
|
|
@ -753,6 +754,7 @@ static int mss2_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, |
|
|
|
|
static av_cold int wmv9_init(AVCodecContext *avctx) |
|
|
|
|
{ |
|
|
|
|
VC1Context *v = avctx->priv_data; |
|
|
|
|
int ret; |
|
|
|
|
|
|
|
|
|
v->s.avctx = avctx; |
|
|
|
|
avctx->flags |= CODEC_FLAG_EMU_EDGE; |
|
|
|
@ -761,8 +763,8 @@ static av_cold int wmv9_init(AVCodecContext *avctx) |
|
|
|
|
if (avctx->idct_algo == FF_IDCT_AUTO) |
|
|
|
|
avctx->idct_algo = FF_IDCT_WMV2; |
|
|
|
|
|
|
|
|
|
if (ff_vc1_init_common(v) < 0) |
|
|
|
|
return -1; |
|
|
|
|
if ((ret = ff_vc1_init_common(v)) < 0) |
|
|
|
|
return ret; |
|
|
|
|
ff_vc1dsp_init(&v->vc1dsp); |
|
|
|
|
|
|
|
|
|
v->profile = PROFILE_MAIN; |
|
|
|
@ -802,9 +804,9 @@ static av_cold int wmv9_init(AVCodecContext *avctx) |
|
|
|
|
|
|
|
|
|
ff_vc1_init_transposed_scantables(v); |
|
|
|
|
|
|
|
|
|
if (ff_msmpeg4_decode_init(avctx) < 0 || |
|
|
|
|
ff_vc1_decode_init_alloc_tables(v) < 0) |
|
|
|
|
return -1; |
|
|
|
|
if ((ret = ff_msmpeg4_decode_init(avctx)) < 0 || |
|
|
|
|
(ret = ff_vc1_decode_init_alloc_tables(v)) < 0) |
|
|
|
|
return ret; |
|
|
|
|
|
|
|
|
|
/* error concealment */ |
|
|
|
|
v->s.me.qpel_put = v->s.dsp.put_qpel_pixels_tab; |
|
|
|
|