|
|
|
@ -30,7 +30,7 @@ |
|
|
|
|
|
|
|
|
|
typedef struct QpegContext{ |
|
|
|
|
AVCodecContext *avctx; |
|
|
|
|
AVFrame pic; |
|
|
|
|
AVFrame *pic; |
|
|
|
|
uint8_t *refdata; |
|
|
|
|
uint32_t pal[256]; |
|
|
|
|
GetByteContext buffer; |
|
|
|
@ -250,7 +250,7 @@ static int decode_frame(AVCodecContext *avctx, |
|
|
|
|
{ |
|
|
|
|
uint8_t ctable[128]; |
|
|
|
|
QpegContext * const a = avctx->priv_data; |
|
|
|
|
AVFrame * const p = &a->pic; |
|
|
|
|
AVFrame * const p = a->pic; |
|
|
|
|
uint8_t* outdata; |
|
|
|
|
int delta, ret; |
|
|
|
|
const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL); |
|
|
|
@ -265,26 +265,26 @@ static int decode_frame(AVCodecContext *avctx, |
|
|
|
|
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); |
|
|
|
|
return ret; |
|
|
|
|
} |
|
|
|
|
outdata = a->pic.data[0]; |
|
|
|
|
outdata = p->data[0]; |
|
|
|
|
bytestream2_skip(&a->buffer, 4); |
|
|
|
|
bytestream2_get_buffer(&a->buffer, ctable, 128); |
|
|
|
|
bytestream2_skip(&a->buffer, 1); |
|
|
|
|
|
|
|
|
|
delta = bytestream2_get_byte(&a->buffer); |
|
|
|
|
if(delta == 0x10) { |
|
|
|
|
qpeg_decode_intra(a, outdata, a->pic.linesize[0], avctx->width, avctx->height); |
|
|
|
|
qpeg_decode_intra(a, outdata, p->linesize[0], avctx->width, avctx->height); |
|
|
|
|
} else { |
|
|
|
|
qpeg_decode_inter(a, outdata, a->pic.linesize[0], avctx->width, avctx->height, delta, ctable, a->refdata); |
|
|
|
|
qpeg_decode_inter(a, outdata, p->linesize[0], avctx->width, avctx->height, delta, ctable, a->refdata); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
/* make the palette available on the way out */ |
|
|
|
|
if (pal) { |
|
|
|
|
a->pic.palette_has_changed = 1; |
|
|
|
|
p->palette_has_changed = 1; |
|
|
|
|
memcpy(a->pal, pal, AVPALETTE_SIZE); |
|
|
|
|
} |
|
|
|
|
memcpy(a->pic.data[1], a->pal, AVPALETTE_SIZE); |
|
|
|
|
memcpy(p->data[1], a->pal, AVPALETTE_SIZE); |
|
|
|
|
|
|
|
|
|
if ((ret = av_frame_ref(data, &a->pic)) < 0) |
|
|
|
|
if ((ret = av_frame_ref(data, p)) < 0) |
|
|
|
|
return ret; |
|
|
|
|
|
|
|
|
|
*got_frame = 1; |
|
|
|
@ -292,25 +292,29 @@ static int decode_frame(AVCodecContext *avctx, |
|
|
|
|
return avpkt->size; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static av_cold int decode_init(AVCodecContext *avctx){ |
|
|
|
|
static av_cold int decode_end(AVCodecContext *avctx) |
|
|
|
|
{ |
|
|
|
|
QpegContext * const a = avctx->priv_data; |
|
|
|
|
|
|
|
|
|
a->avctx = avctx; |
|
|
|
|
avctx->pix_fmt= AV_PIX_FMT_PAL8; |
|
|
|
|
a->refdata = av_malloc(avctx->width * avctx->height); |
|
|
|
|
|
|
|
|
|
avcodec_get_frame_defaults(&a->pic); |
|
|
|
|
av_frame_free(&a->pic); |
|
|
|
|
|
|
|
|
|
av_free(a->refdata); |
|
|
|
|
return 0; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static av_cold int decode_end(AVCodecContext *avctx){ |
|
|
|
|
static av_cold int decode_init(AVCodecContext *avctx){ |
|
|
|
|
QpegContext * const a = avctx->priv_data; |
|
|
|
|
AVFrame * const p = &a->pic; |
|
|
|
|
|
|
|
|
|
av_frame_unref(p); |
|
|
|
|
a->avctx = avctx; |
|
|
|
|
avctx->pix_fmt= AV_PIX_FMT_PAL8; |
|
|
|
|
a->refdata = av_malloc(avctx->width * avctx->height); |
|
|
|
|
|
|
|
|
|
a->pic = av_frame_alloc(); |
|
|
|
|
if (!a->pic) { |
|
|
|
|
decode_end(avctx); |
|
|
|
|
return AVERROR(ENOMEM); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
av_free(a->refdata); |
|
|
|
|
return 0; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|