diff --git a/libavcodec/flicvideo.c b/libavcodec/flicvideo.c index 76a7976a7d..a2d59e86c3 100644 --- a/libavcodec/flicvideo.c +++ b/libavcodec/flicvideo.c @@ -71,7 +71,7 @@ typedef struct FlicDecodeContext { AVCodecContext *avctx; - AVFrame frame; + AVFrame *frame; unsigned int palette[256]; int new_palette; @@ -141,7 +141,10 @@ static av_cold int flic_decode_init(AVCodecContext *avctx) return AVERROR_INVALIDDATA; } - avcodec_get_frame_defaults(&s->frame); + s->frame = av_frame_alloc(); + if (!s->frame) + return AVERROR(ENOMEM); + s->new_palette = 0; return 0; @@ -185,11 +188,11 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx, bytestream2_init(&g2, buf, buf_size); - if ((ret = ff_reget_buffer(avctx, &s->frame)) < 0) + if ((ret = ff_reget_buffer(avctx, s->frame)) < 0) return ret; - pixels = s->frame.data[0]; - pixel_limit = s->avctx->height * s->frame.linesize[0]; + pixels = s->frame->data[0]; + pixel_limit = s->avctx->height * s->frame->linesize[0]; if (buf_size < 16 || buf_size > INT_MAX - (3 * 256 + FF_INPUT_BUFFER_PADDING_SIZE)) return AVERROR_INVALIDDATA; frame_size = bytestream2_get_le32(&g2); @@ -273,12 +276,12 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx, if ((line_packets & 0xC000) == 0xC000) { // line skip opcode line_packets = -line_packets; - y_ptr += line_packets * s->frame.linesize[0]; + y_ptr += line_packets * s->frame->linesize[0]; } else if ((line_packets & 0xC000) == 0x4000) { av_log(avctx, AV_LOG_ERROR, "Undefined opcode (%x) in DELTA_FLI\n", line_packets); } else if ((line_packets & 0xC000) == 0x8000) { // "last byte" opcode - pixel_ptr= y_ptr + s->frame.linesize[0] - 1; + pixel_ptr= y_ptr + s->frame->linesize[0] - 1; CHECK_PIXEL_PTR(0); pixels[pixel_ptr] = line_packets & 0xff; } else { @@ -313,7 +316,7 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx, } } - y_ptr += s->frame.linesize[0]; + y_ptr += s->frame->linesize[0]; } } break; @@ -322,7 +325,7 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx, /* line compressed */ starting_line = bytestream2_get_le16(&g2); y_ptr = 0; - y_ptr += starting_line * s->frame.linesize[0]; + y_ptr += starting_line * s->frame->linesize[0]; compressed_lines = bytestream2_get_le16(&g2); while (compressed_lines > 0) { @@ -359,7 +362,7 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx, } } - y_ptr += s->frame.linesize[0]; + y_ptr += s->frame->linesize[0]; compressed_lines--; } break; @@ -367,7 +370,7 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx, case FLI_BLACK: /* set the whole frame to color 0 (which is usually black) */ memset(pixels, 0, - s->frame.linesize[0] * s->avctx->height); + s->frame->linesize[0] * s->avctx->height); break; case FLI_BRUN: @@ -414,7 +417,7 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx, } } - y_ptr += s->frame.linesize[0]; + y_ptr += s->frame->linesize[0]; } break; @@ -425,8 +428,8 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx, "has incorrect size, skipping chunk\n", chunk_size - 6); bytestream2_skip(&g2, chunk_size - 6); } else { - for (y_ptr = 0; y_ptr < s->frame.linesize[0] * s->avctx->height; - y_ptr += s->frame.linesize[0]) { + for (y_ptr = 0; y_ptr < s->frame->linesize[0] * s->avctx->height; + y_ptr += s->frame->linesize[0]) { bytestream2_get_buffer(&g2, &pixels[y_ptr], s->avctx->width); } @@ -457,13 +460,13 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx, buf_size - bytestream2_get_bytes_left(&g2)); /* make the palette available on the way out */ - memcpy(s->frame.data[1], s->palette, AVPALETTE_SIZE); + memcpy(s->frame->data[1], s->palette, AVPALETTE_SIZE); if (s->new_palette) { - s->frame.palette_has_changed = 1; + s->frame->palette_has_changed = 1; s->new_palette = 0; } - if ((ret = av_frame_ref(data, &s->frame)) < 0) + if ((ret = av_frame_ref(data, s->frame)) < 0) return ret; *got_frame = 1; @@ -504,11 +507,11 @@ static int flic_decode_frame_15_16BPP(AVCodecContext *avctx, bytestream2_init(&g2, buf, buf_size); - if ((ret = ff_reget_buffer(avctx, &s->frame)) < 0) + if ((ret = ff_reget_buffer(avctx, s->frame)) < 0) return ret; - pixels = s->frame.data[0]; - pixel_limit = s->avctx->height * s->frame.linesize[0]; + pixels = s->frame->data[0]; + pixel_limit = s->avctx->height * s->frame->linesize[0]; frame_size = bytestream2_get_le32(&g2); bytestream2_skip(&g2, 2); /* skip the magic number */ @@ -556,7 +559,7 @@ static int flic_decode_frame_15_16BPP(AVCodecContext *avctx, line_packets = bytestream2_get_le16(&g2); if (line_packets < 0) { line_packets = -line_packets; - y_ptr += line_packets * s->frame.linesize[0]; + y_ptr += line_packets * s->frame->linesize[0]; } else { compressed_lines--; pixel_ptr = y_ptr; @@ -589,7 +592,7 @@ static int flic_decode_frame_15_16BPP(AVCodecContext *avctx, } } - y_ptr += s->frame.linesize[0]; + y_ptr += s->frame->linesize[0]; } } break; @@ -602,7 +605,7 @@ static int flic_decode_frame_15_16BPP(AVCodecContext *avctx, case FLI_BLACK: /* set the whole frame to 0x0000 which is black in both 15Bpp and 16Bpp modes. */ memset(pixels, 0x0000, - s->frame.linesize[0] * s->avctx->height); + s->frame->linesize[0] * s->avctx->height); break; case FLI_BRUN: @@ -657,7 +660,7 @@ static int flic_decode_frame_15_16BPP(AVCodecContext *avctx, pixel_ptr += 2; } #endif - y_ptr += s->frame.linesize[0]; + y_ptr += s->frame->linesize[0]; } break; @@ -701,7 +704,7 @@ static int flic_decode_frame_15_16BPP(AVCodecContext *avctx, } } - y_ptr += s->frame.linesize[0]; + y_ptr += s->frame->linesize[0]; } break; @@ -714,8 +717,8 @@ static int flic_decode_frame_15_16BPP(AVCodecContext *avctx, bytestream2_skip(&g2, chunk_size - 6); } else { - for (y_ptr = 0; y_ptr < s->frame.linesize[0] * s->avctx->height; - y_ptr += s->frame.linesize[0]) { + for (y_ptr = 0; y_ptr < s->frame->linesize[0] * s->avctx->height; + y_ptr += s->frame->linesize[0]) { pixel_countdown = s->avctx->width; pixel_ptr = 0; @@ -748,7 +751,7 @@ static int flic_decode_frame_15_16BPP(AVCodecContext *avctx, av_log(avctx, AV_LOG_ERROR, "Processed FLI chunk where chunk size = %d " \ "and final chunk ptr = %d\n", buf_size, bytestream2_tell(&g2)); - if ((ret = av_frame_ref(data, &s->frame)) < 0) + if ((ret = av_frame_ref(data, s->frame)) < 0) return ret; *got_frame = 1; @@ -797,7 +800,7 @@ static av_cold int flic_decode_end(AVCodecContext *avctx) { FlicDecodeContext *s = avctx->priv_data; - av_frame_unref(&s->frame); + av_frame_free(&s->frame); return 0; } diff --git a/libavcodec/mmvideo.c b/libavcodec/mmvideo.c index ea182a0107..ab59b58781 100644 --- a/libavcodec/mmvideo.c +++ b/libavcodec/mmvideo.c @@ -48,7 +48,7 @@ typedef struct MmContext { AVCodecContext *avctx; - AVFrame frame; + AVFrame *frame; int palette[AVPALETTE_COUNT]; GetByteContext gb; } MmContext; @@ -61,7 +61,9 @@ static av_cold int mm_decode_init(AVCodecContext *avctx) avctx->pix_fmt = AV_PIX_FMT_PAL8; - avcodec_get_frame_defaults(&s->frame); + s->frame = av_frame_alloc(); + if (!s->frame) + return AVERROR(ENOMEM); return 0; } @@ -108,9 +110,9 @@ static int mm_decode_intra(MmContext * s, int half_horiz, int half_vert) return AVERROR_INVALIDDATA; if (color) { - memset(s->frame.data[0] + y*s->frame.linesize[0] + x, color, run_length); + memset(s->frame->data[0] + y*s->frame->linesize[0] + x, color, run_length); if (half_vert) - memset(s->frame.data[0] + (y+1)*s->frame.linesize[0] + x, color, run_length); + memset(s->frame->data[0] + (y+1)*s->frame->linesize[0] + x, color, run_length); } x+= run_length; @@ -159,13 +161,13 @@ static int mm_decode_inter(MmContext * s, int half_horiz, int half_vert) return AVERROR_INVALIDDATA; if (replace) { int color = bytestream2_get_byte(&data_ptr); - s->frame.data[0][y*s->frame.linesize[0] + x] = color; + s->frame->data[0][y*s->frame->linesize[0] + x] = color; if (half_horiz) - s->frame.data[0][y*s->frame.linesize[0] + x + 1] = color; + s->frame->data[0][y*s->frame->linesize[0] + x + 1] = color; if (half_vert) { - s->frame.data[0][(y+1)*s->frame.linesize[0] + x] = color; + s->frame->data[0][(y+1)*s->frame->linesize[0] + x] = color; if (half_horiz) - s->frame.data[0][(y+1)*s->frame.linesize[0] + x + 1] = color; + s->frame->data[0][(y+1)*s->frame->linesize[0] + x + 1] = color; } } x += 1 + half_horiz; @@ -194,7 +196,7 @@ static int mm_decode_frame(AVCodecContext *avctx, buf_size -= MM_PREAMBLE_SIZE; bytestream2_init(&s->gb, buf, buf_size); - if ((res = ff_reget_buffer(avctx, &s->frame)) < 0) + if ((res = ff_reget_buffer(avctx, s->frame)) < 0) return res; switch(type) { @@ -212,9 +214,9 @@ static int mm_decode_frame(AVCodecContext *avctx, if (res < 0) return res; - memcpy(s->frame.data[1], s->palette, AVPALETTE_SIZE); + memcpy(s->frame->data[1], s->palette, AVPALETTE_SIZE); - if ((res = av_frame_ref(data, &s->frame)) < 0) + if ((res = av_frame_ref(data, s->frame)) < 0) return res; *got_frame = 1; @@ -226,7 +228,7 @@ static av_cold int mm_decode_end(AVCodecContext *avctx) { MmContext *s = avctx->priv_data; - av_frame_unref(&s->frame); + av_frame_free(&s->frame); return 0; } diff --git a/libavcodec/smacker.c b/libavcodec/smacker.c index 3cdb0d58b3..717e9ea033 100644 --- a/libavcodec/smacker.c +++ b/libavcodec/smacker.c @@ -584,6 +584,10 @@ static av_cold int decode_init(AVCodecContext *avctx) avctx->pix_fmt = AV_PIX_FMT_PAL8; + c->pic = av_frame_alloc(); + if (!c->pic) + return AVERROR(ENOMEM); + /* decode huffman trees from extradata */ if(avctx->extradata_size < 16){ av_log(avctx, AV_LOG_ERROR, "Extradata missing!\n"); @@ -596,10 +600,6 @@ static av_cold int decode_init(AVCodecContext *avctx) return ret; } - c->pic = av_frame_alloc(); - if (!c->pic) - return AVERROR(ENOMEM); - return 0; } diff --git a/libavcodec/zmbvenc.c b/libavcodec/zmbvenc.c index 2907c99abc..28dbe20f06 100644 --- a/libavcodec/zmbvenc.c +++ b/libavcodec/zmbvenc.c @@ -44,6 +44,7 @@ */ typedef struct ZmbvEncContext { AVCodecContext *avctx; + int range; uint8_t *comp_buf, *work_buf; uint8_t pal[768]; @@ -119,7 +120,7 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pict, int *got_packet) { ZmbvEncContext * const c = avctx->priv_data; - AVFrame * const p = (AVFrame *)pict; + const AVFrame * const p = pict; uint8_t *src, *prev, *buf; uint32_t *palptr; int keyframe, chpal; @@ -132,8 +133,8 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, c->curfrm++; if(c->curfrm == c->keyint) c->curfrm = 0; - p->pict_type= keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P; - p->key_frame= keyframe; + avctx->coded_frame->pict_type = keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P; + avctx->coded_frame->key_frame = keyframe; chpal = !keyframe && memcmp(p->data[1], c->pal2, 1024); palptr = (uint32_t*)p->data[1]; @@ -248,6 +249,20 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, return 0; } +static av_cold int encode_end(AVCodecContext *avctx) +{ + ZmbvEncContext * const c = avctx->priv_data; + + av_freep(&c->comp_buf); + av_freep(&c->work_buf); + + deflateEnd(&c->zstream); + av_freep(&c->prev); + + av_frame_free(&avctx->coded_frame); + + return 0; +} /** * Init zmbv encoder @@ -309,23 +324,11 @@ static av_cold int encode_init(AVCodecContext *avctx) return -1; } - return 0; -} - - - -/** - * Uninit zmbv encoder - */ -static av_cold int encode_end(AVCodecContext *avctx) -{ - ZmbvEncContext * const c = avctx->priv_data; - - av_freep(&c->comp_buf); - av_freep(&c->work_buf); - - deflateEnd(&c->zstream); - av_freep(&c->prev); + avctx->coded_frame = av_frame_alloc(); + if (!avctx->coded_frame) { + encode_end(avctx); + return AVERROR(ENOMEM); + } return 0; }