Video-1, Apple RPZA, Cinepak, Westwood IMA ADPCM Originally committed as revision 2324 to svn://svn.ffmpeg.org/ffmpeg/trunkpull/126/head
parent
f2f6134b9e
commit
2fdf638b0c
18 changed files with 2140 additions and 3 deletions
@ -0,0 +1,456 @@ |
||||
/*
|
||||
* Cinepak Video Decoder |
||||
* Copyright (C) 2003 the ffmpeg project |
||||
* |
||||
* This library is free software; you can redistribute it and/or |
||||
* modify it under the terms of the GNU Lesser General Public |
||||
* License as published by the Free Software Foundation; either |
||||
* version 2 of the License, or (at your option) any later version. |
||||
* |
||||
* This library is distributed in the hope that it will be useful, |
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
||||
* Lesser General Public License for more details. |
||||
* |
||||
* You should have received a copy of the GNU Lesser General Public |
||||
* License along with this library; if not, write to the Free Software |
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
||||
* |
||||
*/ |
||||
|
||||
/**
|
||||
* @file cinepak.c |
||||
* Cinepak video decoder |
||||
* by Ewald Snel <ewald@rambo.its.tudelft.nl> |
||||
* For more information on the Cinepak algorithm, visit: |
||||
* http://www.csse.monash.edu.au/~timf/
|
||||
*/ |
||||
|
||||
#include <stdio.h> |
||||
#include <stdlib.h> |
||||
#include <string.h> |
||||
#include <unistd.h> |
||||
|
||||
#include "common.h" |
||||
#include "avcodec.h" |
||||
#include "dsputil.h" |
||||
|
||||
#define PALETTE_COUNT 256 |
||||
|
||||
#define BE_16(x) ((((uint8_t*)(x))[0] << 8) | ((uint8_t*)(x))[1]) |
||||
#define BE_32(x) ((((uint8_t*)(x))[0] << 24) | \ |
||||
(((uint8_t*)(x))[1] << 16) | \
|
||||
(((uint8_t*)(x))[2] << 8) | \
|
||||
((uint8_t*)(x))[3]) |
||||
|
||||
typedef struct { |
||||
uint8_t y0, y1, y2, y3; |
||||
uint8_t u, v; |
||||
} cvid_codebook_t; |
||||
|
||||
#define MAX_STRIPS 32 |
||||
|
||||
typedef struct { |
||||
uint16_t id; |
||||
uint16_t x1, y1; |
||||
uint16_t x2, y2; |
||||
cvid_codebook_t v4_codebook[256]; |
||||
cvid_codebook_t v1_codebook[256]; |
||||
} cvid_strip_t; |
||||
|
||||
typedef struct CinepakContext { |
||||
|
||||
AVCodecContext *avctx; |
||||
DSPContext dsp; |
||||
AVFrame frame; |
||||
AVFrame prev_frame; |
||||
|
||||
unsigned char *data; |
||||
int size; |
||||
|
||||
unsigned char palette[PALETTE_COUNT * 4]; |
||||
int palette_video; |
||||
cvid_strip_t strips[MAX_STRIPS]; |
||||
|
||||
} CinepakContext; |
||||
|
||||
static void cinepak_decode_codebook (cvid_codebook_t *codebook, |
||||
int chunk_id, int size, uint8_t *data) |
||||
{ |
||||
uint8_t *eod = (data + size); |
||||
uint32_t flag, mask; |
||||
int i, n; |
||||
|
||||
/* check if this chunk contains 4- or 6-element vectors */ |
||||
n = (chunk_id & 0x0400) ? 4 : 6; |
||||
flag = 0; |
||||
mask = 0; |
||||
|
||||
for (i=0; i < 256; i++) { |
||||
if ((chunk_id & 0x0100) && !(mask >>= 1)) { |
||||
if ((data + 4) > eod) |
||||
break; |
||||
|
||||
flag = BE_32 (data); |
||||
data += 4; |
||||
mask = 0x80000000; |
||||
} |
||||
|
||||
if (!(chunk_id & 0x0100) || (flag & mask)) { |
||||
if ((data + n) > eod) |
||||
break; |
||||
|
||||
if (n == 6) { |
||||
codebook[i].y0 = *data++; |
||||
codebook[i].y1 = *data++; |
||||
codebook[i].y2 = *data++; |
||||
codebook[i].y3 = *data++; |
||||
codebook[i].u = 128 + *data++; |
||||
codebook[i].v = 128 + *data++; |
||||
} else { |
||||
/* this codebook type indicates either greyscale or
|
||||
* palettized video; if palettized, U & V components will |
||||
* not be used so it is safe to set them to 128 for the |
||||
* benefit of greyscale rendering in YUV420P */ |
||||
codebook[i].y0 = *data++; |
||||
codebook[i].y1 = *data++; |
||||
codebook[i].y2 = *data++; |
||||
codebook[i].y3 = *data++; |
||||
codebook[i].u = 128; |
||||
codebook[i].v = 128; |
||||
} |
||||
} |
||||
} |
||||
} |
||||
|
||||
static int cinepak_decode_vectors (CinepakContext *s, cvid_strip_t *strip, |
||||
int chunk_id, int size, uint8_t *data) |
||||
{ |
||||
uint8_t *eod = (data + size); |
||||
uint32_t flag, mask; |
||||
cvid_codebook_t *codebook; |
||||
unsigned int i, j, x, y; |
||||
uint32_t iy[4]; |
||||
uint32_t iu[2]; |
||||
uint32_t iv[2]; |
||||
|
||||
flag = 0; |
||||
mask = 0; |
||||
|
||||
for (y=strip->y1; y < strip->y2; y+=4) { |
||||
|
||||
iy[0] = strip->x1 + (y * s->frame.linesize[0]); |
||||
iy[1] = iy[0] + s->frame.linesize[0]; |
||||
iy[2] = iy[1] + s->frame.linesize[0]; |
||||
iy[3] = iy[2] + s->frame.linesize[0]; |
||||
iu[0] = (strip->x1/2) + ((y/2) * s->frame.linesize[1]); |
||||
iu[1] = iu[0] + s->frame.linesize[1]; |
||||
iv[0] = (strip->x1/2) + ((y/2) * s->frame.linesize[2]); |
||||
iv[1] = iv[0] + s->frame.linesize[2]; |
||||
|
||||
for (x=strip->x1; x < strip->x2; x+=4) { |
||||
if ((chunk_id & 0x0100) && !(mask >>= 1)) { |
||||
if ((data + 4) > eod) |
||||
return -1; |
||||
|
||||
flag = BE_32 (data); |
||||
data += 4; |
||||
mask = 0x80000000; |
||||
} |
||||
|
||||
if (!(chunk_id & 0x0100) || (flag & mask)) { |
||||
if (!(chunk_id & 0x0200) && !(mask >>= 1)) { |
||||
if ((data + 4) > eod) |
||||
return -1; |
||||
|
||||
flag = BE_32 (data); |
||||
data += 4; |
||||
mask = 0x80000000; |
||||
} |
||||
|
||||
if ((chunk_id & 0x0200) || (~flag & mask)) { |
||||
if (data >= eod) |
||||
return -1; |
||||
|
||||
codebook = &strip->v1_codebook[*data++]; |
||||
s->frame.data[0][iy[0] + 0] = codebook->y0; |
||||
s->frame.data[0][iy[0] + 1] = codebook->y0; |
||||
s->frame.data[0][iy[1] + 0] = codebook->y0; |
||||
s->frame.data[0][iy[1] + 1] = codebook->y0; |
||||
if (!s->palette_video) { |
||||
s->frame.data[1][iu[0]] = codebook->u; |
||||
s->frame.data[2][iv[0]] = codebook->v; |
||||
} |
||||
|
||||
s->frame.data[0][iy[0] + 2] = codebook->y0; |
||||
s->frame.data[0][iy[0] + 3] = codebook->y0; |
||||
s->frame.data[0][iy[1] + 2] = codebook->y0; |
||||
s->frame.data[0][iy[1] + 3] = codebook->y0; |
||||
if (!s->palette_video) { |
||||
s->frame.data[1][iu[0] + 1] = codebook->u; |
||||
s->frame.data[2][iv[0] + 1] = codebook->v; |
||||
} |
||||
|
||||
s->frame.data[0][iy[2] + 0] = codebook->y0; |
||||
s->frame.data[0][iy[2] + 1] = codebook->y0; |
||||
s->frame.data[0][iy[3] + 0] = codebook->y0; |
||||
s->frame.data[0][iy[3] + 1] = codebook->y0; |
||||
if (!s->palette_video) { |
||||
s->frame.data[1][iu[1]] = codebook->u; |
||||
s->frame.data[2][iv[1]] = codebook->v; |
||||
} |
||||
|
||||
s->frame.data[0][iy[2] + 2] = codebook->y0; |
||||
s->frame.data[0][iy[2] + 3] = codebook->y0; |
||||
s->frame.data[0][iy[3] + 2] = codebook->y0; |
||||
s->frame.data[0][iy[3] + 3] = codebook->y0; |
||||
if (!s->palette_video) { |
||||
s->frame.data[1][iu[1] + 1] = codebook->u; |
||||
s->frame.data[2][iv[1] + 1] = codebook->v; |
||||
} |
||||
|
||||
} else if (flag & mask) { |
||||
if ((data + 4) > eod) |
||||
return -1; |
||||
|
||||
codebook = &strip->v4_codebook[*data++]; |
||||
s->frame.data[0][iy[0] + 0] = codebook->y0; |
||||
s->frame.data[0][iy[0] + 1] = codebook->y1; |
||||
s->frame.data[0][iy[1] + 0] = codebook->y2; |
||||
s->frame.data[0][iy[1] + 1] = codebook->y3; |
||||
if (!s->palette_video) { |
||||
s->frame.data[1][iu[0]] = codebook->u; |
||||
s->frame.data[2][iv[0]] = codebook->v; |
||||
} |
||||
|
||||
codebook = &strip->v4_codebook[*data++]; |
||||
s->frame.data[0][iy[0] + 2] = codebook->y0; |
||||
s->frame.data[0][iy[0] + 3] = codebook->y1; |
||||
s->frame.data[0][iy[1] + 2] = codebook->y2; |
||||
s->frame.data[0][iy[1] + 3] = codebook->y3; |
||||
if (!s->palette_video) { |
||||
s->frame.data[1][iu[0] + 1] = codebook->u; |
||||
s->frame.data[2][iv[0] + 1] = codebook->v; |
||||
} |
||||
|
||||
codebook = &strip->v4_codebook[*data++]; |
||||
s->frame.data[0][iy[2] + 0] = codebook->y0; |
||||
s->frame.data[0][iy[2] + 1] = codebook->y1; |
||||
s->frame.data[0][iy[3] + 0] = codebook->y2; |
||||
s->frame.data[0][iy[3] + 1] = codebook->y3; |
||||
if (!s->palette_video) { |
||||
s->frame.data[1][iu[1]] = codebook->u; |
||||
s->frame.data[2][iv[1]] = codebook->v; |
||||
} |
||||
|
||||
codebook = &strip->v4_codebook[*data++]; |
||||
s->frame.data[0][iy[2] + 2] = codebook->y0; |
||||
s->frame.data[0][iy[2] + 3] = codebook->y1; |
||||
s->frame.data[0][iy[3] + 2] = codebook->y2; |
||||
s->frame.data[0][iy[3] + 3] = codebook->y3; |
||||
if (!s->palette_video) { |
||||
s->frame.data[1][iu[1] + 1] = codebook->u; |
||||
s->frame.data[2][iv[1] + 1] = codebook->v; |
||||
} |
||||
|
||||
} |
||||
} else { |
||||
/* copy from the previous frame */ |
||||
for (i = 0; i < 4; i++) { |
||||
for (j = 0; j < 4; j++) { |
||||
s->frame.data[0][iy[i] + j] = |
||||
s->prev_frame.data[0][iy[i] + j]; |
||||
} |
||||
} |
||||
for (i = 0; i < 2; i++) { |
||||
for (j = 0; j < 2; j++) { |
||||
s->frame.data[1][iu[i] + j] = |
||||
s->prev_frame.data[1][iu[i] + j]; |
||||
s->frame.data[2][iv[i] + j] = |
||||
s->prev_frame.data[2][iv[i] + j]; |
||||
} |
||||
} |
||||
} |
||||
|
||||
iy[0] += 4; iy[1] += 4; |
||||
iy[2] += 4; iy[3] += 4; |
||||
iu[0] += 2; iu[1] += 2; |
||||
iv[0] += 2; iv[1] += 2; |
||||
} |
||||
} |
||||
|
||||
return 0; |
||||
} |
||||
|
||||
static int cinepak_decode_strip (CinepakContext *s, |
||||
cvid_strip_t *strip, uint8_t *data, int size) |
||||
{ |
||||
uint8_t *eod = (data + size); |
||||
int chunk_id, chunk_size; |
||||
|
||||
/* coordinate sanity checks */ |
||||
if (strip->x1 >= s->avctx->width || strip->x2 > s->avctx->width || |
||||
strip->y1 >= s->avctx->height || strip->y2 > s->avctx->height || |
||||
strip->x1 >= strip->x2 || strip->y1 >= strip->y2) |
||||
return -1; |
||||
|
||||
while ((data + 4) <= eod) { |
||||
chunk_id = BE_16 (&data[0]); |
||||
chunk_size = BE_16 (&data[2]) - 4; |
||||
data += 4; |
||||
chunk_size = ((data + chunk_size) > eod) ? (eod - data) : chunk_size; |
||||
|
||||
switch (chunk_id) { |
||||
|
||||
case 0x2000: |
||||
case 0x2100: |
||||
case 0x2400: |
||||
case 0x2500: |
||||
cinepak_decode_codebook (strip->v4_codebook, chunk_id,
|
||||
chunk_size, data); |
||||
break; |
||||
|
||||
case 0x2200: |
||||
case 0x2300: |
||||
case 0x2600: |
||||
case 0x2700: |
||||
cinepak_decode_codebook (strip->v1_codebook, chunk_id,
|
||||
chunk_size, data); |
||||
break; |
||||
|
||||
case 0x3000: |
||||
case 0x3100: |
||||
case 0x3200: |
||||
return cinepak_decode_vectors (s, strip, chunk_id,
|
||||
chunk_size, data); |
||||
} |
||||
|
||||
data += chunk_size; |
||||
} |
||||
|
||||
return -1; |
||||
} |
||||
|
||||
static int cinepak_decode (CinepakContext *s) |
||||
{ |
||||
uint8_t *eod = (s->data + s->size); |
||||
int i, result, strip_size, frame_flags, num_strips; |
||||
int y0 = 0; |
||||
|
||||
if (s->size < 10) |
||||
return -1; |
||||
|
||||
frame_flags = s->data[0]; |
||||
num_strips = BE_16 (&s->data[8]); |
||||
s->data += 10; |
||||
|
||||
if (num_strips > MAX_STRIPS) |
||||
num_strips = MAX_STRIPS; |
||||
|
||||
for (i=0; i < num_strips; i++) { |
||||
if ((s->data + 12) > eod) |
||||
return -1; |
||||
|
||||
s->strips[i].id = BE_16 (s->data); |
||||
s->strips[i].y1 = y0; |
||||
s->strips[i].x1 = 0; |
||||
s->strips[i].y2 = y0 + BE_16 (&s->data[8]); |
||||
s->strips[i].x2 = s->avctx->width; |
||||
|
||||
strip_size = BE_16 (&s->data[2]) - 12; |
||||
s->data += 12; |
||||
strip_size = ((s->data + strip_size) > eod) ? (eod - s->data) : strip_size; |
||||
|
||||
if ((i > 0) && !(frame_flags & 0x01)) { |
||||
memcpy (s->strips[i].v4_codebook, s->strips[i-1].v4_codebook, |
||||
sizeof(s->strips[i].v4_codebook)); |
||||
memcpy (s->strips[i].v1_codebook, s->strips[i-1].v1_codebook, |
||||
sizeof(s->strips[i].v1_codebook)); |
||||
} |
||||
|
||||
result = cinepak_decode_strip (s, &s->strips[i], s->data, strip_size); |
||||
|
||||
if (result != 0) |
||||
return result; |
||||
|
||||
s->data += strip_size; |
||||
y0 = s->strips[i].y2; |
||||
} |
||||
return 0; |
||||
} |
||||
|
||||
static int cinepak_decode_init(AVCodecContext *avctx) |
||||
{ |
||||
CinepakContext *s = (CinepakContext *)avctx->priv_data; |
||||
/*
|
||||
int i; |
||||
unsigned char r, g, b; |
||||
unsigned char *raw_palette; |
||||
unsigned int *palette32; |
||||
*/ |
||||
|
||||
s->avctx = avctx; |
||||
|
||||
// check for paletted data
|
||||
s->palette_video = 0; |
||||
|
||||
|
||||
avctx->pix_fmt = PIX_FMT_YUV420P; |
||||
avctx->has_b_frames = 0; |
||||
dsputil_init(&s->dsp, avctx); |
||||
|
||||
s->frame.data[0] = s->prev_frame.data[0] = NULL; |
||||
|
||||
return 0; |
||||
} |
||||
|
||||
static int cinepak_decode_frame(AVCodecContext *avctx, |
||||
void *data, int *data_size, |
||||
uint8_t *buf, int buf_size) |
||||
{ |
||||
CinepakContext *s = (CinepakContext *)avctx->priv_data; |
||||
|
||||
s->data = buf; |
||||
s->size = buf_size; |
||||
|
||||
if (avctx->get_buffer(avctx, &s->frame)) { |
||||
printf (" Cinepak: get_buffer() failed\n"); |
||||
return -1; |
||||
} |
||||
|
||||
cinepak_decode(s); |
||||
|
||||
if (s->prev_frame.data[0]) |
||||
avctx->release_buffer(avctx, &s->prev_frame); |
||||
|
||||
/* shuffle frames */ |
||||
s->prev_frame = s->frame; |
||||
|
||||
*data_size = sizeof(AVFrame); |
||||
*(AVFrame*)data = s->frame; |
||||
|
||||
/* report that the buffer was completely consumed */ |
||||
return buf_size; |
||||
} |
||||
|
||||
static int cinepak_decode_end(AVCodecContext *avctx) |
||||
{ |
||||
CinepakContext *s = (CinepakContext *)avctx->priv_data; |
||||
|
||||
if (s->prev_frame.data[0]) |
||||
avctx->release_buffer(avctx, &s->prev_frame); |
||||
|
||||
return 0; |
||||
} |
||||
|
||||
AVCodec cinepak_decoder = { |
||||
"cinepak", |
||||
CODEC_TYPE_VIDEO, |
||||
CODEC_ID_CINEPAK, |
||||
sizeof(CinepakContext), |
||||
cinepak_decode_init, |
||||
NULL, |
||||
cinepak_decode_end, |
||||
cinepak_decode_frame, |
||||
CODEC_CAP_DR1, |
||||
}; |
@ -0,0 +1,219 @@ |
||||
/*
|
||||
* Micrsoft RLE Video Decoder |
||||
* Copyright (C) 2003 the ffmpeg project |
||||
* |
||||
* This library is free software; you can redistribute it and/or |
||||
* modify it under the terms of the GNU Lesser General Public |
||||
* License as published by the Free Software Foundation; either |
||||
* version 2 of the License, or (at your option) any later version. |
||||
* |
||||
* This library is distributed in the hope that it will be useful, |
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
||||
* Lesser General Public License for more details. |
||||
* |
||||
* You should have received a copy of the GNU Lesser General Public |
||||
* License along with this library; if not, write to the Free Software |
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
||||
*/ |
||||
|
||||
/**
|
||||
* @file msrle.c |
||||
* MS RLE Video Decoder by Mike Melanson (melanson@pcisys.net) |
||||
* For more information about the MS RLE format, visit: |
||||
* http://www.pcisys.net/~melanson/codecs/
|
||||
* |
||||
* The MS RLE decoder outputs PAL8 colorspace data. |
||||
* |
||||
* Note that this decoder expects the palette colors from the end of the |
||||
* BITMAPINFO header passed through extradata. |
||||
*/ |
||||
|
||||
#include <stdio.h> |
||||
#include <stdlib.h> |
||||
#include <string.h> |
||||
#include <unistd.h> |
||||
|
||||
#include "common.h" |
||||
#include "avcodec.h" |
||||
#include "dsputil.h" |
||||
|
||||
typedef struct MsrleContext { |
||||
AVCodecContext *avctx; |
||||
AVFrame frame; |
||||
AVFrame prev_frame; |
||||
|
||||
unsigned char *buf; |
||||
int size; |
||||
|
||||
unsigned int palette[256]; |
||||
} MsrleContext; |
||||
|
||||
#define FETCH_NEXT_STREAM_BYTE() \ |
||||
if (stream_ptr >= s->size) \
|
||||
{ \
|
||||
printf(" MS RLE: stream ptr just went out of bounds (1)\n"); \
|
||||
return; \
|
||||
} \
|
||||
stream_byte = s->buf[stream_ptr++]; |
||||
|
||||
static void msrle_decode_pal8(MsrleContext *s) |
||||
{ |
||||
int stream_ptr = 0; |
||||
unsigned char rle_code; |
||||
unsigned char extra_byte; |
||||
unsigned char stream_byte; |
||||
int pixel_ptr = 0; |
||||
int row_dec = s->frame.linesize[0]; |
||||
int row_ptr = (s->avctx->height - 1) * row_dec; |
||||
int frame_size = row_dec * s->avctx->height; |
||||
|
||||
while (row_ptr >= 0) { |
||||
FETCH_NEXT_STREAM_BYTE(); |
||||
rle_code = stream_byte; |
||||
if (rle_code == 0) { |
||||
/* fetch the next byte to see how to handle escape code */ |
||||
FETCH_NEXT_STREAM_BYTE(); |
||||
if (stream_byte == 0) { |
||||
/* line is done, goto the next one */ |
||||
row_ptr -= row_dec; |
||||
pixel_ptr = 0; |
||||
} else if (stream_byte == 1) { |
||||
/* decode is done */ |
||||
return; |
||||
} else if (stream_byte == 2) { |
||||
/* reposition frame decode coordinates */ |
||||
FETCH_NEXT_STREAM_BYTE(); |
||||
pixel_ptr += stream_byte; |
||||
FETCH_NEXT_STREAM_BYTE(); |
||||
row_ptr -= stream_byte * row_dec; |
||||
} else { |
||||
/* copy pixels from encoded stream */ |
||||
if ((row_ptr + pixel_ptr + stream_byte > frame_size) || |
||||
(row_ptr < 0)) { |
||||
printf(" MS RLE: frame ptr just went out of bounds (1)\n"); |
||||
return; |
||||
} |
||||
|
||||
rle_code = stream_byte; |
||||
extra_byte = stream_byte & 0x01; |
||||
if (stream_ptr + rle_code + extra_byte > s->size) { |
||||
printf(" MS RLE: stream ptr just went out of bounds (2)\n"); |
||||
return; |
||||
} |
||||
|
||||
while (rle_code--) { |
||||
FETCH_NEXT_STREAM_BYTE(); |
||||
s->frame.data[0][row_ptr + pixel_ptr] = stream_byte; |
||||
pixel_ptr++; |
||||
} |
||||
|
||||
/* if the RLE code is odd, skip a byte in the stream */ |
||||
if (extra_byte) |
||||
stream_ptr++; |
||||
} |
||||
} else { |
||||
/* decode a run of data */ |
||||
if ((row_ptr + pixel_ptr + stream_byte > frame_size) || |
||||
(row_ptr < 0)) { |
||||
printf(" MS RLE: frame ptr just went out of bounds (2)\n"); |
||||
return; |
||||
} |
||||
|
||||
FETCH_NEXT_STREAM_BYTE(); |
||||
|
||||
while(rle_code--) { |
||||
s->frame.data[0][row_ptr + pixel_ptr] = stream_byte; |
||||
pixel_ptr++; |
||||
} |
||||
} |
||||
} |
||||
|
||||
/* make the palette available */ |
||||
memcpy(s->frame.data[1], s->palette, 256 * 4); |
||||
|
||||
/* one last sanity check on the way out */ |
||||
if (stream_ptr < s->size) |
||||
printf(" MS RLE: ended frame decode with bytes left over (%d < %d)\n", |
||||
stream_ptr, s->size); |
||||
} |
||||
|
||||
static int msrle_decode_init(AVCodecContext *avctx) |
||||
{ |
||||
MsrleContext *s = (MsrleContext *)avctx->priv_data; |
||||
int i, j; |
||||
unsigned char *palette; |
||||
|
||||
s->avctx = avctx; |
||||
|
||||
avctx->pix_fmt = PIX_FMT_PAL8; |
||||
avctx->has_b_frames = 0; |
||||
s->frame.data[0] = s->prev_frame.data[0] = NULL; |
||||
|
||||
/* convert palette */ |
||||
palette = (unsigned char *)s->avctx->extradata; |
||||
memset (s->palette, 0, 256 * 4); |
||||
for (i = 0, j = 0; i < s->avctx->extradata_size / 4; i++, j += 4) |
||||
s->palette[i] =
|
||||
(palette[j + 2] << 16) | |
||||
(palette[j + 1] << 8) | |
||||
(palette[j + 0] << 0); |
||||
|
||||
return 0; |
||||
} |
||||
|
||||
static int msrle_decode_frame(AVCodecContext *avctx, |
||||
void *data, int *data_size, |
||||
uint8_t *buf, int buf_size) |
||||
{ |
||||
MsrleContext *s = (MsrleContext *)avctx->priv_data; |
||||
|
||||
s->buf = buf; |
||||
s->size = buf_size; |
||||
|
||||
if (avctx->get_buffer(avctx, &s->frame)) { |
||||
printf (" MS RLE: get_buffer() failed\n"); |
||||
return -1; |
||||
} |
||||
|
||||
/* grossly inefficient, but...oh well */ |
||||
memcpy(s->frame.data[0], s->prev_frame.data[0],
|
||||
s->frame.linesize[0] * s->avctx->height); |
||||
|
||||
msrle_decode_pal8(s); |
||||
|
||||
if (s->frame.data[0]) |
||||
avctx->release_buffer(avctx, &s->frame); |
||||
|
||||
/* shuffle frames */ |
||||
s->prev_frame = s->frame; |
||||
|
||||
*data_size = sizeof(AVFrame); |
||||
*(AVFrame*)data = s->frame; |
||||
|
||||
/* report that the buffer was completely consumed */ |
||||
return buf_size; |
||||
} |
||||
|
||||
static int msrle_decode_end(AVCodecContext *avctx) |
||||
{ |
||||
MsrleContext *s = (MsrleContext *)avctx->priv_data; |
||||
|
||||
/* release the last frame */ |
||||
if (s->prev_frame.data[0]) |
||||
avctx->release_buffer(avctx, &s->prev_frame); |
||||
|
||||
return 0; |
||||
} |
||||
|
||||
AVCodec msrle_decoder = { |
||||
"msrle", |
||||
CODEC_TYPE_VIDEO, |
||||
CODEC_ID_MSRLE, |
||||
sizeof(MsrleContext), |
||||
msrle_decode_init, |
||||
NULL, |
||||
msrle_decode_end, |
||||
msrle_decode_frame, |
||||
CODEC_CAP_DR1, |
||||
}; |
@ -0,0 +1,378 @@ |
||||
/*
|
||||
* Microsoft Video-1 Decoder |
||||
* Copyright (C) 2003 the ffmpeg project |
||||
* |
||||
* This library is free software; you can redistribute it and/or |
||||
* modify it under the terms of the GNU Lesser General Public |
||||
* License as published by the Free Software Foundation; either |
||||
* version 2 of the License, or (at your option) any later version. |
||||
* |
||||
* This library is distributed in the hope that it will be useful, |
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
||||
* Lesser General Public License for more details. |
||||
* |
||||
* You should have received a copy of the GNU Lesser General Public |
||||
* License along with this library; if not, write to the Free Software |
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
||||
* |
||||
*/ |
||||
|
||||
/**
|
||||
* @file msvideo1.c |
||||
* Microsoft Video-1 Decoder by Mike Melanson (melanson@pcisys.net) |
||||
* For more information about the MS Video-1 format, visit: |
||||
* http://www.pcisys.net/~melanson/codecs/
|
||||
* |
||||
* This decoder outputs either PAL8 or RGB555 data, depending on the |
||||
* whether a RGB palette was passed through via extradata; if the extradata |
||||
* is present, then the data is PAL8; RGB555 otherwise. |
||||
*/ |
||||
|
||||
#include <stdio.h> |
||||
#include <stdlib.h> |
||||
#include <string.h> |
||||
#include <unistd.h> |
||||
|
||||
#include "common.h" |
||||
#include "avcodec.h" |
||||
#include "dsputil.h" |
||||
|
||||
#define PALETTE_COUNT 256 |
||||
#define LE_16(x) ((((uint8_t*)(x))[1] << 8) | ((uint8_t*)(x))[0]) |
||||
#define CHECK_STREAM_PTR(n) \ |
||||
if ((stream_ptr + n) > s->size ) { \
|
||||
printf (" MS Video-1 warning: stream_ptr out of bounds (%d >= %d)\n", \
|
||||
stream_ptr + n, s->size); \
|
||||
return; \
|
||||
} |
||||
|
||||
#define COPY_PREV_BLOCK() \ |
||||
pixel_ptr = block_ptr; \
|
||||
for (pixel_y = 0; pixel_y < 4; pixel_y++) { \
|
||||
for (pixel_x = 0; pixel_x < 4; pixel_x++, pixel_ptr++) \
|
||||
pixels[pixel_ptr] = prev_pixels[pixel_ptr]; \
|
||||
pixel_ptr -= row_dec; \
|
||||
} |
||||
|
||||
typedef struct Msvideo1Context { |
||||
|
||||
AVCodecContext *avctx; |
||||
DSPContext dsp; |
||||
AVFrame frame; |
||||
AVFrame prev_frame; |
||||
|
||||
unsigned char *buf; |
||||
int size; |
||||
|
||||
int mode_8bit; /* if it's not 8-bit, it's 16-bit */ |
||||
unsigned char palette[PALETTE_COUNT * 4]; |
||||
|
||||
} Msvideo1Context; |
||||
|
||||
static int msvideo1_decode_init(AVCodecContext *avctx) |
||||
{ |
||||
Msvideo1Context *s = (Msvideo1Context *)avctx->priv_data; |
||||
int i; |
||||
unsigned char r, g, b; |
||||
unsigned char *raw_palette; |
||||
unsigned int *palette32; |
||||
|
||||
s->avctx = avctx; |
||||
|
||||
/* figure out the colorspace based on the presence of a palette in
|
||||
* extradata */ |
||||
if (s->avctx->extradata_size) { |
||||
s->mode_8bit = 1; |
||||
/* load up the palette */ |
||||
palette32 = (unsigned int *)s->palette; |
||||
raw_palette = (unsigned char *)s->avctx->extradata; |
||||
for (i = 0; i < s->avctx->extradata_size / 4; i++) { |
||||
b = *raw_palette++; |
||||
g = *raw_palette++; |
||||
r = *raw_palette++; |
||||
raw_palette++; |
||||
palette32[i] = (r << 16) | (g << 8) | (b); |
||||
} |
||||
avctx->pix_fmt = PIX_FMT_PAL8; |
||||
} else { |
||||
s->mode_8bit = 0; |
||||
avctx->pix_fmt = PIX_FMT_RGB555; |
||||
} |
||||
|
||||
avctx->has_b_frames = 0; |
||||
dsputil_init(&s->dsp, avctx); |
||||
|
||||
s->frame.data[0] = s->prev_frame.data[0] = NULL; |
||||
|
||||
return 0; |
||||
} |
||||
|
||||
static void msvideo1_decode_8bit(Msvideo1Context *s) |
||||
{ |
||||
int block_ptr, pixel_ptr; |
||||
int total_blocks; |
||||
int pixel_x, pixel_y; /* pixel width and height iterators */ |
||||
int block_x, block_y; /* block width and height iterators */ |
||||
int blocks_wide, blocks_high; /* width and height in 4x4 blocks */ |
||||
int block_inc; |
||||
int row_dec; |
||||
|
||||
/* decoding parameters */ |
||||
int stream_ptr; |
||||
unsigned char byte_a, byte_b; |
||||
unsigned short flags; |
||||
int skip_blocks; |
||||
unsigned char colors[8]; |
||||
unsigned char *pixels = s->frame.data[0]; |
||||
unsigned char *prev_pixels = s->prev_frame.data[0]; |
||||
int stride = s->frame.linesize[0]; |
||||
|
||||
stream_ptr = 0; |
||||
skip_blocks = 0; |
||||
blocks_wide = s->avctx->width / 4; |
||||
blocks_high = s->avctx->height / 4; |
||||
total_blocks = blocks_wide * blocks_high; |
||||
block_inc = 4; |
||||
row_dec = stride + 4; |
||||
|
||||
for (block_y = blocks_high; block_y > 0; block_y--) { |
||||
block_ptr = ((block_y * 4) - 1) * stride; |
||||
for (block_x = blocks_wide; block_x > 0; block_x--) { |
||||
/* check if this block should be skipped */ |
||||
if (skip_blocks) { |
||||
COPY_PREV_BLOCK(); |
||||
block_ptr += block_inc; |
||||
skip_blocks--; |
||||
total_blocks--; |
||||
continue; |
||||
} |
||||
|
||||
pixel_ptr = block_ptr; |
||||
|
||||
/* get the next two bytes in the encoded data stream */ |
||||
CHECK_STREAM_PTR(2); |
||||
byte_a = s->buf[stream_ptr++]; |
||||
byte_b = s->buf[stream_ptr++]; |
||||
|
||||
/* check if the decode is finished */ |
||||
if ((byte_a == 0) && (byte_b == 0) && (total_blocks == 0)) |
||||
return; |
||||
else if ((byte_b & 0xFC) == 0x84) { |
||||
/* skip code, but don't count the current block */ |
||||
skip_blocks = ((byte_b - 0x84) << 8) + byte_a - 1; |
||||
COPY_PREV_BLOCK(); |
||||
} else if (byte_b < 0x80) { |
||||
/* 2-color encoding */ |
||||
flags = (byte_b << 8) | byte_a; |
||||
|
||||
CHECK_STREAM_PTR(2); |
||||
colors[0] = s->buf[stream_ptr++]; |
||||
colors[1] = s->buf[stream_ptr++]; |
||||
|
||||
for (pixel_y = 0; pixel_y < 4; pixel_y++) { |
||||
for (pixel_x = 0; pixel_x < 4; pixel_x++, flags >>= 1) |
||||
pixels[pixel_ptr++] = colors[(flags & 0x1) ^ 1]; |
||||
pixel_ptr -= row_dec; |
||||
} |
||||
} else if (byte_b >= 0x90) { |
||||
/* 8-color encoding */ |
||||
flags = (byte_b << 8) | byte_a; |
||||
|
||||
CHECK_STREAM_PTR(8); |
||||
memcpy(colors, &s->buf[stream_ptr], 8); |
||||
stream_ptr += 8; |
||||
|
||||
for (pixel_y = 0; pixel_y < 4; pixel_y++) { |
||||
for (pixel_x = 0; pixel_x < 4; pixel_x++, flags >>= 1) |
||||
pixels[pixel_ptr++] =
|
||||
colors[((pixel_y & 0x2) << 1) +
|
||||
(pixel_x & 0x2) + ((flags & 0x1) ^ 1)]; |
||||
pixel_ptr -= row_dec; |
||||
} |
||||
} else { |
||||
/* 1-color encoding */ |
||||
colors[0] = byte_a; |
||||
|
||||
for (pixel_y = 0; pixel_y < 4; pixel_y++) { |
||||
for (pixel_x = 0; pixel_x < 4; pixel_x++) |
||||
pixels[pixel_ptr++] = colors[0]; |
||||
pixel_ptr -= row_dec; |
||||
} |
||||
} |
||||
|
||||
block_ptr += block_inc; |
||||
total_blocks--; |
||||
} |
||||
} |
||||
|
||||
/* make the palette available on the way out */ |
||||
if (s->avctx->pix_fmt == PIX_FMT_PAL8) |
||||
memcpy(s->frame.data[1], s->palette, PALETTE_COUNT * 4); |
||||
} |
||||
|
||||
static void msvideo1_decode_16bit(Msvideo1Context *s) |
||||
{ |
||||
int block_ptr, pixel_ptr; |
||||
int total_blocks; |
||||
int pixel_x, pixel_y; /* pixel width and height iterators */ |
||||
int block_x, block_y; /* block width and height iterators */ |
||||
int blocks_wide, blocks_high; /* width and height in 4x4 blocks */ |
||||
int block_inc; |
||||
int row_dec; |
||||
|
||||
/* decoding parameters */ |
||||
int stream_ptr; |
||||
unsigned char byte_a, byte_b; |
||||
unsigned short flags; |
||||
int skip_blocks; |
||||
unsigned short colors[8]; |
||||
unsigned short *pixels = (unsigned short *)s->frame.data[0]; |
||||
unsigned short *prev_pixels = (unsigned short *)s->prev_frame.data[0]; |
||||
int stride = s->frame.linesize[0] / 2; |
||||
|
||||
stream_ptr = 0; |
||||
skip_blocks = 0; |
||||
blocks_wide = s->avctx->width / 4; |
||||
blocks_high = s->avctx->height / 4; |
||||
total_blocks = blocks_wide * blocks_high; |
||||
block_inc = 4; |
||||
row_dec = stride + 4; |
||||
|
||||
for (block_y = blocks_high; block_y > 0; block_y--) { |
||||
block_ptr = ((block_y * 4) - 1) * stride; |
||||
for (block_x = blocks_wide; block_x > 0; block_x--) { |
||||
/* check if this block should be skipped */ |
||||
if (skip_blocks) { |
||||
COPY_PREV_BLOCK(); |
||||
block_ptr += block_inc; |
||||
skip_blocks--; |
||||
total_blocks--; |
||||
continue; |
||||
} |
||||
|
||||
pixel_ptr = block_ptr; |
||||
|
||||
/* get the next two bytes in the encoded data stream */ |
||||
CHECK_STREAM_PTR(2); |
||||
byte_a = s->buf[stream_ptr++]; |
||||
byte_b = s->buf[stream_ptr++]; |
||||
|
||||
/* check if the decode is finished */ |
||||
if ((byte_a == 0) && (byte_b == 0) && (total_blocks == 0)) { |
||||
return; |
||||
} else if ((byte_b & 0xFC) == 0x84) { |
||||
/* skip code, but don't count the current block */ |
||||
skip_blocks = ((byte_b - 0x84) << 8) + byte_a - 1; |
||||
COPY_PREV_BLOCK(); |
||||
} else if (byte_b < 0x80) { |
||||
/* 2- or 8-color encoding modes */ |
||||
flags = (byte_b << 8) | byte_a; |
||||
|
||||
CHECK_STREAM_PTR(4); |
||||
colors[0] = LE_16(&s->buf[stream_ptr]); |
||||
stream_ptr += 2; |
||||
colors[1] = LE_16(&s->buf[stream_ptr]); |
||||
stream_ptr += 2; |
||||
|
||||
if (colors[0] & 0x8000) { |
||||
/* 8-color encoding */ |
||||
CHECK_STREAM_PTR(12); |
||||
colors[2] = LE_16(&s->buf[stream_ptr]); |
||||
stream_ptr += 2; |
||||
colors[3] = LE_16(&s->buf[stream_ptr]); |
||||
stream_ptr += 2; |
||||
colors[4] = LE_16(&s->buf[stream_ptr]); |
||||
stream_ptr += 2; |
||||
colors[5] = LE_16(&s->buf[stream_ptr]); |
||||
stream_ptr += 2; |
||||
colors[6] = LE_16(&s->buf[stream_ptr]); |
||||
stream_ptr += 2; |
||||
colors[7] = LE_16(&s->buf[stream_ptr]); |
||||
stream_ptr += 2; |
||||
|
||||
for (pixel_y = 0; pixel_y < 4; pixel_y++) { |
||||
for (pixel_x = 0; pixel_x < 4; pixel_x++, flags >>= 1) |
||||
pixels[pixel_ptr++] =
|
||||
colors[((pixel_y & 0x2) << 1) +
|
||||
(pixel_x & 0x2) + ((flags & 0x1) ^ 1)]; |
||||
pixel_ptr -= row_dec; |
||||
} |
||||
} else { |
||||
/* 2-color encoding */ |
||||
for (pixel_y = 0; pixel_y < 4; pixel_y++) { |
||||
for (pixel_x = 0; pixel_x < 4; pixel_x++, flags >>= 1) |
||||
pixels[pixel_ptr++] = colors[(flags & 0x1) ^ 1]; |
||||
pixel_ptr -= row_dec; |
||||
} |
||||
} |
||||
} else { |
||||
/* otherwise, it's a 1-color block */ |
||||
colors[0] = (byte_b << 8) | byte_a; |
||||
|
||||
for (pixel_y = 0; pixel_y < 4; pixel_y++) { |
||||
for (pixel_x = 0; pixel_x < 4; pixel_x++) |
||||
pixels[pixel_ptr++] = colors[0]; |
||||
pixel_ptr -= row_dec; |
||||
} |
||||
} |
||||
|
||||
block_ptr += block_inc; |
||||
total_blocks--; |
||||
} |
||||
} |
||||
} |
||||
|
||||
static int msvideo1_decode_frame(AVCodecContext *avctx, |
||||
void *data, int *data_size, |
||||
uint8_t *buf, int buf_size) |
||||
{ |
||||
Msvideo1Context *s = (Msvideo1Context *)avctx->priv_data; |
||||
|
||||
s->buf = buf; |
||||
s->size = buf_size; |
||||
|
||||
if (avctx->get_buffer(avctx, &s->frame)) { |
||||
printf (" MS Video-1 Video: get_buffer() failed\n"); |
||||
return -1; |
||||
} |
||||
|
||||
if (s->mode_8bit) |
||||
msvideo1_decode_8bit(s); |
||||
else |
||||
msvideo1_decode_16bit(s); |
||||
|
||||
if (s->prev_frame.data[0]) |
||||
avctx->release_buffer(avctx, &s->prev_frame); |
||||
|
||||
/* shuffle frames */ |
||||
s->prev_frame = s->frame; |
||||
|
||||
*data_size = sizeof(AVFrame); |
||||
*(AVFrame*)data = s->frame; |
||||
|
||||
/* report that the buffer was completely consumed */ |
||||
return buf_size; |
||||
} |
||||
|
||||
static int msvideo1_decode_end(AVCodecContext *avctx) |
||||
{ |
||||
Msvideo1Context *s = (Msvideo1Context *)avctx->priv_data; |
||||
|
||||
if (s->prev_frame.data[0]) |
||||
avctx->release_buffer(avctx, &s->prev_frame); |
||||
|
||||
return 0; |
||||
} |
||||
|
||||
AVCodec msvideo1_decoder = { |
||||
"msvideo1", |
||||
CODEC_TYPE_VIDEO, |
||||
CODEC_ID_MSVIDEO1, |
||||
sizeof(Msvideo1Context), |
||||
msvideo1_decode_init, |
||||
NULL, |
||||
msvideo1_decode_end, |
||||
msvideo1_decode_frame, |
||||
CODEC_CAP_DR1, |
||||
}; |
@ -0,0 +1,310 @@ |
||||
/*
|
||||
* Quicktime Video (RPZA) Video Decoder |
||||
* Copyright (C) 2003 the ffmpeg project |
||||
* |
||||
* This library is free software; you can redistribute it and/or |
||||
* modify it under the terms of the GNU Lesser General Public |
||||
* License as published by the Free Software Foundation; either |
||||
* version 2 of the License, or (at your option) any later version. |
||||
* |
||||
* This library is distributed in the hope that it will be useful, |
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
||||
* Lesser General Public License for more details. |
||||
* |
||||
* You should have received a copy of the GNU Lesser General Public |
||||
* License along with this library; if not, write to the Free Software |
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
||||
* |
||||
*/ |
||||
|
||||
/**
|
||||
* @file rpza.c |
||||
* QT RPZA Video Decoder by Roberto Togni <rtogni@bresciaonline.it> |
||||
* For more information about the RPZA format, visit: |
||||
* http://www.pcisys.net/~melanson/codecs/
|
||||
* |
||||
* The RPZA decoder outputs RGB555 colorspace data. |
||||
* |
||||
* Note that this decoder reads big endian RGB555 pixel values from the |
||||
* bytestream, arranges them in the host's endian order, and outputs |
||||
* them to the final rendered map in the same host endian order. This is |
||||
* intended behavior as the ffmpeg documentation states that RGB555 pixels |
||||
* shall be stored in native CPU endianness. |
||||
*/ |
||||
|
||||
#include <stdio.h> |
||||
#include <stdlib.h> |
||||
#include <string.h> |
||||
#include <unistd.h> |
||||
|
||||
#include "common.h" |
||||
#include "avcodec.h" |
||||
#include "dsputil.h" |
||||
|
||||
typedef struct RpzaContext { |
||||
|
||||
AVCodecContext *avctx; |
||||
DSPContext dsp; |
||||
AVFrame frame; |
||||
AVFrame prev_frame; |
||||
|
||||
unsigned char *buf; |
||||
int size; |
||||
|
||||
} RpzaContext; |
||||
|
||||
#define BE_16(x) ((((uint8_t*)(x))[0] << 8) | ((uint8_t*)(x))[1]) |
||||
#define BE_32(x) ((((uint8_t*)(x))[0] << 24) | \ |
||||
(((uint8_t*)(x))[1] << 16) | \
|
||||
(((uint8_t*)(x))[2] << 8) | \
|
||||
((uint8_t*)(x))[3]) |
||||
|
||||
#define ADVANCE_BLOCK() \ |
||||
{ \
|
||||
pixel_ptr += 4; \
|
||||
if (pixel_ptr >= width) \
|
||||
{ \
|
||||
pixel_ptr = 0; \
|
||||
row_ptr += stride * 4; \
|
||||
} \
|
||||
total_blocks--; \
|
||||
if (total_blocks < 0) \
|
||||
{ \
|
||||
printf("warning: block counter just went negative (this should not happen)\n"); \
|
||||
return; \
|
||||
} \
|
||||
} |
||||
|
||||
static void rpza_decode_stream(RpzaContext *s) |
||||
{ |
||||
int width = s->avctx->width; |
||||
int stride = s->frame.linesize[0] / 2; |
||||
int row_inc = stride - 4; |
||||
int stream_ptr = 0; |
||||
int chunk_size; |
||||
unsigned char opcode; |
||||
int n_blocks; |
||||
unsigned short colorA = 0, colorB; |
||||
unsigned short color4[4]; |
||||
unsigned char index, idx; |
||||
unsigned short ta, tb; |
||||
unsigned short *pixels = (unsigned short *)s->frame.data[0]; |
||||
unsigned short *prev_pixels = (unsigned short *)s->prev_frame.data[0]; |
||||
|
||||
int row_ptr = 0; |
||||
int pixel_ptr = 0; |
||||
int block_ptr; |
||||
int pixel_x, pixel_y; |
||||
int total_blocks; |
||||
|
||||
/* First byte is always 0xe1. Warn if it's different */ |
||||
if (s->buf[stream_ptr] != 0xe1) |
||||
printf("First chunk byte is 0x%02x instead of 0x1e\n", |
||||
s->buf[stream_ptr]); |
||||
|
||||
/* Get chunk size, ingnoring first byte */ |
||||
chunk_size = BE_32(&s->buf[stream_ptr]) & 0x00FFFFFF; |
||||
stream_ptr += 4; |
||||
|
||||
/* If length mismatch use size from MOV file and try to decode anyway */ |
||||
if (chunk_size != s->size) |
||||
printf("MOV chunk size != encoded chunk size; using MOV chunk size\n"); |
||||
|
||||
chunk_size = s->size; |
||||
|
||||
/* Number of 4x4 blocks in frame. */ |
||||
total_blocks = (s->avctx->width * s->avctx->height) / (4 * 4); |
||||
|
||||
/* Process chunk data */ |
||||
while (stream_ptr < chunk_size) { |
||||
opcode = s->buf[stream_ptr++]; /* Get opcode */ |
||||
|
||||
n_blocks = (opcode & 0x1f) + 1; /* Extract block counter from opcode */ |
||||
|
||||
/* If opcode MSbit is 0, we need more data to decide what to do */ |
||||
if ((opcode & 0x80) == 0) { |
||||
colorA = (opcode << 8) | (s->buf[stream_ptr++]); |
||||
opcode = 0; |
||||
if ((s->buf[stream_ptr] & 0x80) != 0) { |
||||
/* Must behave as opcode 110xxxxx, using colorA computed
|
||||
* above. Use fake opcode 0x20 to enter switch block at
|
||||
* the right place */ |
||||
opcode = 0x20; |
||||
n_blocks = 1; |
||||
} |
||||
} |
||||
|
||||
switch (opcode & 0xe0) { |
||||
|
||||
/* Skip blocks */ |
||||
case 0x80: |
||||
while (n_blocks--) { |
||||
block_ptr = row_ptr + pixel_ptr; |
||||
for (pixel_y = 0; pixel_y < 4; pixel_y++) { |
||||
for (pixel_x = 0; pixel_x < 4; pixel_x++){ |
||||
pixels[block_ptr] = prev_pixels[block_ptr]; |
||||
block_ptr++; |
||||
} |
||||
block_ptr += row_inc; |
||||
} |
||||
ADVANCE_BLOCK(); |
||||
} |
||||
break; |
||||
|
||||
/* Fill blocks with one color */ |
||||
case 0xa0: |
||||
colorA = BE_16 (&s->buf[stream_ptr]); |
||||
stream_ptr += 2; |
||||
while (n_blocks--) { |
||||
block_ptr = row_ptr + pixel_ptr; |
||||
for (pixel_y = 0; pixel_y < 4; pixel_y++) { |
||||
for (pixel_x = 0; pixel_x < 4; pixel_x++){ |
||||
pixels[block_ptr] = colorA; |
||||
block_ptr++; |
||||
} |
||||
block_ptr += row_inc; |
||||
} |
||||
ADVANCE_BLOCK(); |
||||
} |
||||
break; |
||||
|
||||
/* Fill blocks with 4 colors */ |
||||
case 0xc0: |
||||
colorA = BE_16 (&s->buf[stream_ptr]); |
||||
stream_ptr += 2; |
||||
case 0x20: |
||||
colorB = BE_16 (&s->buf[stream_ptr]); |
||||
stream_ptr += 2; |
||||
|
||||
/* sort out the colors */ |
||||
color4[0] = colorB; |
||||
color4[1] = 0; |
||||
color4[2] = 0; |
||||
color4[3] = colorA; |
||||
|
||||
/* red components */ |
||||
ta = (colorA >> 10) & 0x1F; |
||||
tb = (colorB >> 10) & 0x1F; |
||||
color4[1] |= ((11 * ta + 21 * tb) >> 5) << 10; |
||||
color4[2] |= ((21 * ta + 11 * tb) >> 5) << 10; |
||||
|
||||
/* green components */ |
||||
ta = (colorA >> 5) & 0x1F; |
||||
tb = (colorB >> 5) & 0x1F; |
||||
color4[1] |= ((11 * ta + 21 * tb) >> 5) << 5; |
||||
color4[2] |= ((21 * ta + 11 * tb) >> 5) << 5; |
||||
|
||||
/* blue components */ |
||||
ta = colorA & 0x1F; |
||||
tb = colorB & 0x1F; |
||||
color4[1] |= ((11 * ta + 21 * tb) >> 5); |
||||
color4[2] |= ((21 * ta + 11 * tb) >> 5); |
||||
|
||||
while (n_blocks--) { |
||||
block_ptr = row_ptr + pixel_ptr; |
||||
for (pixel_y = 0; pixel_y < 4; pixel_y++) { |
||||
index = s->buf[stream_ptr++]; |
||||
for (pixel_x = 0; pixel_x < 4; pixel_x++){ |
||||
idx = (index >> (2 * (3 - pixel_x))) & 0x03; |
||||
pixels[block_ptr] = color4[idx]; |
||||
block_ptr++; |
||||
} |
||||
block_ptr += row_inc; |
||||
} |
||||
ADVANCE_BLOCK(); |
||||
} |
||||
break; |
||||
|
||||
/* Fill block with 16 colors */ |
||||
case 0x00: |
||||
block_ptr = row_ptr + pixel_ptr; |
||||
for (pixel_y = 0; pixel_y < 4; pixel_y++) { |
||||
for (pixel_x = 0; pixel_x < 4; pixel_x++){ |
||||
/* We already have color of upper left pixel */ |
||||
if ((pixel_y != 0) || (pixel_x !=0)) { |
||||
colorA = BE_16 (&s->buf[stream_ptr]); |
||||
stream_ptr += 2; |
||||
} |
||||
pixels[block_ptr] = colorA; |
||||
block_ptr++; |
||||
} |
||||
block_ptr += row_inc; |
||||
} |
||||
ADVANCE_BLOCK(); |
||||
break; |
||||
|
||||
/* Unknown opcode */ |
||||
default: |
||||
printf("Unknown opcode %d in rpza chunk." |
||||
" Skip remaining %d bytes of chunk data.\n", opcode, |
||||
chunk_size - stream_ptr); |
||||
return; |
||||
} /* Opcode switch */ |
||||
} |
||||
} |
||||
|
||||
static int rpza_decode_init(AVCodecContext *avctx) |
||||
{ |
||||
RpzaContext *s = (RpzaContext *)avctx->priv_data; |
||||
|
||||
s->avctx = avctx; |
||||
avctx->pix_fmt = PIX_FMT_RGB555; |
||||
avctx->has_b_frames = 0; |
||||
dsputil_init(&s->dsp, avctx); |
||||
|
||||
s->frame.data[0] = s->prev_frame.data[0] = NULL; |
||||
|
||||
return 0; |
||||
} |
||||
|
||||
static int rpza_decode_frame(AVCodecContext *avctx, |
||||
void *data, int *data_size, |
||||
uint8_t *buf, int buf_size) |
||||
{ |
||||
RpzaContext *s = (RpzaContext *)avctx->priv_data; |
||||
|
||||
s->buf = buf; |
||||
s->size = buf_size; |
||||
|
||||
if (avctx->get_buffer(avctx, &s->frame)) { |
||||
printf (" RPZA Video: get_buffer() failed\n"); |
||||
return -1; |
||||
} |
||||
|
||||
rpza_decode_stream(s); |
||||
|
||||
if (s->prev_frame.data[0]) |
||||
avctx->release_buffer(avctx, &s->prev_frame); |
||||
|
||||
/* shuffle frames */ |
||||
s->prev_frame = s->frame; |
||||
|
||||
*data_size = sizeof(AVFrame); |
||||
*(AVFrame*)data = s->frame; |
||||
|
||||
/* always report that the buffer was completely consumed */ |
||||
return buf_size; |
||||
} |
||||
|
||||
static int rpza_decode_end(AVCodecContext *avctx) |
||||
{ |
||||
RpzaContext *s = (RpzaContext *)avctx->priv_data; |
||||
|
||||
if (s->prev_frame.data[0]) |
||||
avctx->release_buffer(avctx, &s->prev_frame); |
||||
|
||||
return 0; |
||||
} |
||||
|
||||
AVCodec rpza_decoder = { |
||||
"rpza", |
||||
CODEC_TYPE_VIDEO, |
||||
CODEC_ID_RPZA, |
||||
sizeof(RpzaContext), |
||||
rpza_decode_init, |
||||
NULL, |
||||
rpza_decode_end, |
||||
rpza_decode_frame, |
||||
CODEC_CAP_DR1, |
||||
}; |
@ -0,0 +1,323 @@ |
||||
/*
|
||||
* Sega FILM Format (CPK) Demuxer |
||||
* Copyright (c) 2003 The ffmpeg Project |
||||
* |
||||
* This library is free software; you can redistribute it and/or |
||||
* modify it under the terms of the GNU Lesser General Public |
||||
* License as published by the Free Software Foundation; either |
||||
* version 2 of the License, or (at your option) any later version. |
||||
* |
||||
* This library is distributed in the hope that it will be useful, |
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
||||
* Lesser General Public License for more details. |
||||
* |
||||
* You should have received a copy of the GNU Lesser General Public |
||||
* License along with this library; if not, write to the Free Software |
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
||||
*/ |
||||
|
||||
/**
|
||||
* @file segafilm.c |
||||
* Sega FILM (.cpk) file demuxer |
||||
* by Mike Melanson (melanson@pcisys.net) |
||||
* For more information regarding the Sega FILM file format, visit: |
||||
* http://www.pcisys.net/~melanson/codecs/
|
||||
*/ |
||||
|
||||
#include "avformat.h" |
||||
|
||||
#define BE_16(x) ((((uint8_t*)(x))[0] << 8) | ((uint8_t*)(x))[1]) |
||||
#define BE_32(x) ((((uint8_t*)(x))[0] << 24) | \ |
||||
(((uint8_t*)(x))[1] << 16) | \
|
||||
(((uint8_t*)(x))[2] << 8) | \
|
||||
((uint8_t*)(x))[3]) |
||||
|
||||
#define FOURCC_TAG( ch0, ch1, ch2, ch3 ) \ |
||||
( (long)(unsigned char)(ch3) | \
|
||||
( (long)(unsigned char)(ch2) << 8 ) | \
|
||||
( (long)(unsigned char)(ch1) << 16 ) | \
|
||||
( (long)(unsigned char)(ch0) << 24 ) ) |
||||
|
||||
#define FILM_TAG FOURCC_TAG('F', 'I', 'L', 'M') |
||||
#define FDSC_TAG FOURCC_TAG('F', 'D', 'S', 'C') |
||||
#define STAB_TAG FOURCC_TAG('S', 'T', 'A', 'B') |
||||
#define CVID_TAG FOURCC_TAG('c', 'v', 'i', 'd') |
||||
|
||||
typedef struct { |
||||
int stream; |
||||
off_t sample_offset; |
||||
unsigned int sample_size; |
||||
int64_t pts; |
||||
int keyframe; |
||||
} film_sample_t; |
||||
|
||||
typedef struct FilmDemuxContext { |
||||
int video_stream_index; |
||||
int audio_stream_index; |
||||
|
||||
unsigned int audio_type; |
||||
unsigned int audio_samplerate; |
||||
unsigned int audio_bits; |
||||
unsigned int audio_channels; |
||||
|
||||
unsigned int video_type; |
||||
unsigned int sample_count; |
||||
film_sample_t *sample_table; |
||||
unsigned int current_sample; |
||||
|
||||
unsigned int base_clock; |
||||
unsigned int version; |
||||
int cvid_extra_bytes; /* the number of bytes thrown into the Cinepak
|
||||
* chunk header to throw off decoders */ |
||||
|
||||
/* buffer used for interleaving stereo PCM data */ |
||||
unsigned char *stereo_buffer; |
||||
int stereo_buffer_size; |
||||
} FilmDemuxContext; |
||||
|
||||
static int film_probe(AVProbeData *p) |
||||
{ |
||||
if (p->buf_size < 4) |
||||
return 0; |
||||
|
||||
if (BE_32(&p->buf[0]) != FILM_TAG) |
||||
return 0; |
||||
|
||||
return AVPROBE_SCORE_MAX; |
||||
} |
||||
|
||||
static int film_read_header(AVFormatContext *s, |
||||
AVFormatParameters *ap) |
||||
{ |
||||
FilmDemuxContext *film = (FilmDemuxContext *)s->priv_data; |
||||
ByteIOContext *pb = &s->pb; |
||||
AVStream *st; |
||||
unsigned char scratch[256]; |
||||
int i; |
||||
unsigned int data_offset; |
||||
unsigned int audio_frame_counter; |
||||
|
||||
film->sample_table = NULL; |
||||
film->stereo_buffer = NULL; |
||||
film->stereo_buffer_size = 0; |
||||
|
||||
/* load the main FILM header */ |
||||
if (get_buffer(pb, scratch, 16) != 16) |
||||
return -EIO; |
||||
data_offset = BE_32(&scratch[4]); |
||||
film->version = BE_32(&scratch[8]); |
||||
|
||||
/* load the FDSC chunk */ |
||||
if (film->version == 0) { |
||||
/* special case for Lemmings .film files; 20-byte header */ |
||||
if (get_buffer(pb, scratch, 20) != 20) |
||||
return -EIO; |
||||
/* make some assumptions about the audio parameters */ |
||||
film->audio_type = CODEC_ID_PCM_S8; |
||||
film->audio_samplerate = 22050; |
||||
film->audio_channels = 1; |
||||
film->audio_bits = 8; |
||||
} else { |
||||
/* normal Saturn .cpk files; 32-byte header */ |
||||
if (get_buffer(pb, scratch, 32) != 32) |
||||
return -EIO; |
||||
film->audio_samplerate = BE_16(&scratch[24]);; |
||||
film->audio_channels = scratch[21]; |
||||
film->audio_bits = scratch[22]; |
||||
if (film->audio_bits == 8) |
||||
film->audio_type = CODEC_ID_PCM_S8; |
||||
else if (film->audio_bits == 16) |
||||
film->audio_type = CODEC_ID_PCM_S16BE; |
||||
else |
||||
film->audio_type = 0; |
||||
} |
||||
|
||||
if (BE_32(&scratch[0]) != FDSC_TAG) |
||||
return AVERROR_INVALIDDATA; |
||||
|
||||
film->cvid_extra_bytes = 0; |
||||
if (BE_32(&scratch[8]) == CVID_TAG) { |
||||
film->video_type = CODEC_ID_CINEPAK; |
||||
if (film->version) |
||||
film->cvid_extra_bytes = 2; |
||||
else |
||||
film->cvid_extra_bytes = 6; /* Lemmings 3DO case */ |
||||
} else |
||||
film->video_type = 0; |
||||
|
||||
/* initialize the decoder streams */ |
||||
if (film->video_type) { |
||||
st = av_new_stream(s, 0); |
||||
if (!st) |
||||
return AVERROR_NOMEM; |
||||
film->video_stream_index = st->index; |
||||
st->codec.codec_type = CODEC_TYPE_VIDEO; |
||||
st->codec.codec_id = film->video_type; |
||||
st->codec.codec_tag = 0; /* no fourcc */ |
||||
st->codec.width = BE_32(&scratch[16]); |
||||
st->codec.height = BE_32(&scratch[12]); |
||||
} |
||||
|
||||
if (film->audio_type) { |
||||
st = av_new_stream(s, 0); |
||||
if (!st) |
||||
return AVERROR_NOMEM; |
||||
film->audio_stream_index = st->index; |
||||
st->codec.codec_type = CODEC_TYPE_AUDIO; |
||||
st->codec.codec_id = film->audio_type; |
||||
st->codec.codec_tag = 1; |
||||
st->codec.channels = film->audio_channels; |
||||
st->codec.bits_per_sample = film->audio_bits; |
||||
st->codec.sample_rate = film->audio_samplerate; |
||||
st->codec.bit_rate = st->codec.channels * st->codec.sample_rate * |
||||
st->codec.bits_per_sample; |
||||
st->codec.block_align = st->codec.channels *
|
||||
st->codec.bits_per_sample / 8; |
||||
} |
||||
|
||||
/* load the sample table */ |
||||
if (get_buffer(pb, scratch, 16) != 16) |
||||
return -EIO; |
||||
if (BE_32(&scratch[0]) != STAB_TAG) |
||||
return AVERROR_INVALIDDATA; |
||||
film->base_clock = BE_32(&scratch[8]); |
||||
film->sample_count = BE_32(&scratch[12]); |
||||
film->sample_table = av_malloc(film->sample_count * sizeof(film_sample_t)); |
||||
|
||||
audio_frame_counter = 0; |
||||
for (i = 0; i < film->sample_count; i++) { |
||||
/* load the next sample record and transfer it to an internal struct */ |
||||
if (get_buffer(pb, scratch, 16) != 16) { |
||||
av_free(film->sample_table); |
||||
return -EIO; |
||||
} |
||||
film->sample_table[i].sample_offset =
|
||||
data_offset + BE_32(&scratch[0]); |
||||
film->sample_table[i].sample_size = BE_32(&scratch[4]); |
||||
if (BE_32(&scratch[8]) == 0xFFFFFFFF) { |
||||
film->sample_table[i].stream = film->audio_stream_index; |
||||
film->sample_table[i].pts = audio_frame_counter; |
||||
film->sample_table[i].pts *= film->base_clock; |
||||
film->sample_table[i].pts /= film->audio_samplerate; |
||||
|
||||
audio_frame_counter += (film->sample_table[i].sample_size / |
||||
(film->audio_channels * film->audio_bits / 8)); |
||||
} else { |
||||
film->sample_table[i].stream = film->video_stream_index; |
||||
film->sample_table[i].pts = BE_32(&scratch[8]) & 0x7FFFFFFF; |
||||
film->sample_table[i].keyframe = (scratch[8] & 0x80) ? 0 : 1; |
||||
} |
||||
} |
||||
|
||||
film->current_sample = 0; |
||||
|
||||
/* set the pts reference to match the tick rate of the file */ |
||||
s->pts_num = 1; |
||||
s->pts_den = film->base_clock; |
||||
|
||||
return 0; |
||||
} |
||||
|
||||
static int film_read_packet(AVFormatContext *s, |
||||
AVPacket *pkt) |
||||
{ |
||||
FilmDemuxContext *film = (FilmDemuxContext *)s->priv_data; |
||||
ByteIOContext *pb = &s->pb; |
||||
film_sample_t *sample; |
||||
int ret = 0; |
||||
int i; |
||||
int left, right; |
||||
|
||||
if (film->current_sample >= film->sample_count) |
||||
return -EIO; |
||||
|
||||
sample = &film->sample_table[film->current_sample]; |
||||
|
||||
/* position the stream (will probably be there anyway) */ |
||||
url_fseek(pb, sample->sample_offset, SEEK_SET); |
||||
|
||||
/* do a special song and dance when loading FILM Cinepak chunks */ |
||||
if ((sample->stream == film->video_stream_index) &&
|
||||
(film->video_type == CODEC_ID_CINEPAK)) { |
||||
if (av_new_packet(pkt, sample->sample_size - film->cvid_extra_bytes)) |
||||
return AVERROR_NOMEM; |
||||
ret = get_buffer(pb, pkt->data, 10); |
||||
/* skip the non-spec CVID bytes */ |
||||
url_fseek(pb, film->cvid_extra_bytes, SEEK_CUR); |
||||
ret += get_buffer(pb, pkt->data + 10,
|
||||
sample->sample_size - 10 - film->cvid_extra_bytes); |
||||
if (ret != sample->sample_size - film->cvid_extra_bytes) |
||||
ret = -EIO; |
||||
} else if ((sample->stream == film->audio_stream_index) && |
||||
(film->audio_channels == 2)) { |
||||
/* stereo PCM needs to be interleaved */ |
||||
|
||||
if (av_new_packet(pkt, sample->sample_size)) |
||||
return AVERROR_NOMEM; |
||||
|
||||
/* make sure the interleave buffer is large enough */ |
||||
if (sample->sample_size > film->stereo_buffer_size) { |
||||
av_free(film->stereo_buffer); |
||||
film->stereo_buffer_size = sample->sample_size; |
||||
film->stereo_buffer = av_malloc(film->stereo_buffer_size); |
||||
} |
||||
|
||||
ret = get_buffer(pb, film->stereo_buffer, sample->sample_size); |
||||
if (ret != sample->sample_size) |
||||
ret = -EIO; |
||||
|
||||
left = 0; |
||||
right = sample->sample_size / 2; |
||||
for (i = 0; i < sample->sample_size; ) { |
||||
if (film->audio_bits == 8) { |
||||
pkt->data[i++] = film->stereo_buffer[left++]; |
||||
pkt->data[i++] = film->stereo_buffer[right++]; |
||||
} else { |
||||
pkt->data[i++] = film->stereo_buffer[left++]; |
||||
pkt->data[i++] = film->stereo_buffer[left++]; |
||||
pkt->data[i++] = film->stereo_buffer[right++]; |
||||
pkt->data[i++] = film->stereo_buffer[right++]; |
||||
} |
||||
} |
||||
} else { |
||||
if (av_new_packet(pkt, sample->sample_size)) |
||||
return AVERROR_NOMEM; |
||||
ret = get_buffer(pb, pkt->data, sample->sample_size); |
||||
if (ret != sample->sample_size) |
||||
ret = -EIO; |
||||
} |
||||
|
||||
pkt->stream_index = sample->stream; |
||||
pkt->pts = sample->pts; |
||||
|
||||
film->current_sample++; |
||||
|
||||
return ret; |
||||
} |
||||
|
||||
static int film_read_close(AVFormatContext *s) |
||||
{ |
||||
FilmDemuxContext *film = (FilmDemuxContext *)s->priv_data; |
||||
|
||||
av_free(film->sample_table); |
||||
av_free(film->stereo_buffer); |
||||
|
||||
return 0; |
||||
} |
||||
|
||||
static AVInputFormat film_iformat = { |
||||
"film_cpk", |
||||
"Sega FILM/CPK format", |
||||
sizeof(FilmDemuxContext), |
||||
film_probe, |
||||
film_read_header, |
||||
film_read_packet, |
||||
film_read_close, |
||||
}; |
||||
|
||||
int film_init(void) |
||||
{ |
||||
av_register_input_format(&film_iformat); |
||||
return 0; |
||||
} |
@ -0,0 +1,375 @@ |
||||
/*
|
||||
* Westwood Studios Multimedia Formats Demuxer (VQA, AUD) |
||||
* Copyright (c) 2003 The ffmpeg Project |
||||
* |
||||
* This library is free software; you can redistribute it and/or |
||||
* modify it under the terms of the GNU Lesser General Public |
||||
* License as published by the Free Software Foundation; either |
||||
* version 2 of the License, or (at your option) any later version. |
||||
* |
||||
* This library is distributed in the hope that it will be useful, |
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
||||
* Lesser General Public License for more details. |
||||
* |
||||
* You should have received a copy of the GNU Lesser General Public |
||||
* License along with this library; if not, write to the Free Software |
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
||||
*/ |
||||
|
||||
/**
|
||||
* @file westwood.c |
||||
* Westwood Studios VQA & AUD file demuxers |
||||
* by Mike Melanson (melanson@pcisys.net) |
||||
* for more information on the Westwood file formats, visit: |
||||
* http://www.pcisys.net/~melanson/codecs/
|
||||
* http://www.geocities.com/SiliconValley/8682/aud3.txt
|
||||
* |
||||
* Implementation note: There is no definite file signature for AUD files. |
||||
* The demuxer uses a probabilistic strategy for content detection. This |
||||
* entails performing sanity checks on certain header values in order to |
||||
* qualify a file. Refer to wsaud_probe() for the precise parameters. |
||||
*/ |
||||
|
||||
#include "avformat.h" |
||||
|
||||
#define LE_16(x) ((((uint8_t*)(x))[1] << 8) | ((uint8_t*)(x))[0]) |
||||
#define LE_32(x) ((((uint8_t*)(x))[3] << 24) | \ |
||||
(((uint8_t*)(x))[2] << 16) | \
|
||||
(((uint8_t*)(x))[1] << 8) | \
|
||||
((uint8_t*)(x))[0]) |
||||
#define BE_32(x) ((((uint8_t*)(x))[0] << 24) | \ |
||||
(((uint8_t*)(x))[1] << 16) | \
|
||||
(((uint8_t*)(x))[2] << 8) | \
|
||||
((uint8_t*)(x))[3]) |
||||
|
||||
#define AUD_HEADER_SIZE 12 |
||||
#define AUD_CHUNK_PREAMBLE_SIZE 8 |
||||
#define AUD_CHUNK_SIGNATURE 0x0000DEAF |
||||
|
||||
#define FOURCC_TAG( ch0, ch1, ch2, ch3 ) \ |
||||
( (long)(unsigned char)(ch3) | \
|
||||
( (long)(unsigned char)(ch2) << 8 ) | \
|
||||
( (long)(unsigned char)(ch1) << 16 ) | \
|
||||
( (long)(unsigned char)(ch0) << 24 ) ) |
||||
|
||||
#define FORM_TAG FOURCC_TAG('F', 'O', 'R', 'M') |
||||
#define WVQA_TAG FOURCC_TAG('W', 'V', 'Q', 'A') |
||||
#define VQHD_TAG FOURCC_TAG('V', 'Q', 'H', 'D') |
||||
#define FINF_TAG FOURCC_TAG('F', 'I', 'N', 'F') |
||||
#define SND0_TAG FOURCC_TAG('S', 'N', 'D', '0') |
||||
#define SND2_TAG FOURCC_TAG('S', 'N', 'D', '2') |
||||
#define VQFR_TAG FOURCC_TAG('V', 'Q', 'F', 'R') |
||||
|
||||
#define VQA_HEADER_SIZE 0x2A |
||||
#define VQA_FRAMERATE 15 |
||||
#define VQA_VIDEO_PTS_INC (90000 / VQA_FRAMERATE) |
||||
#define VQA_PREAMBLE_SIZE 8 |
||||
|
||||
typedef struct WsAudDemuxContext { |
||||
int audio_samplerate; |
||||
int audio_channels; |
||||
int audio_bits; |
||||
int audio_type; |
||||
int audio_stream_index; |
||||
int64_t audio_frame_counter; |
||||
} WsAudDemuxContext; |
||||
|
||||
typedef struct WsVqaDemuxContext { |
||||
int audio_samplerate; |
||||
int audio_channels; |
||||
int audio_bits; |
||||
|
||||
int audio_stream_index; |
||||
int video_stream_index; |
||||
|
||||
int64_t audio_frame_counter; |
||||
int64_t video_pts; |
||||
} WsVqaDemuxContext; |
||||
|
||||
static int wsaud_probe(AVProbeData *p) |
||||
{ |
||||
int field; |
||||
|
||||
/* Probabilistic content detection strategy: There is no file signature
|
||||
* so perform sanity checks on various header parameters: |
||||
* 8000 <= sample rate (16 bits) <= 48000 ==> 40001 acceptable numbers |
||||
* compression type (8 bits) = 1 or 99 ==> 2 acceptable numbers |
||||
* There is a total of 24 bits. The number space contains 2^24 = |
||||
* 16777216 numbers. There are 40001 * 2 = 80002 acceptable combinations |
||||
* of numbers. There is a 80002/16777216 = 0.48% chance of a false |
||||
* positive. |
||||
*/ |
||||
|
||||
if (p->buf_size < AUD_HEADER_SIZE) |
||||
return 0; |
||||
|
||||
/* check sample rate */ |
||||
field = LE_16(&p->buf[0]); |
||||
if ((field < 8000) || (field > 48000)) |
||||
return 0; |
||||
|
||||
/* note: only check for WS IMA (type 99) right now since there is no
|
||||
* support for type 1 */ |
||||
if (p->buf[11] != 99) |
||||
return 0; |
||||
|
||||
/* return 1/2 certainty since this file check is a little sketchy */ |
||||
return AVPROBE_SCORE_MAX / 2; |
||||
} |
||||
|
||||
static int wsaud_read_header(AVFormatContext *s, |
||||
AVFormatParameters *ap) |
||||
{ |
||||
WsAudDemuxContext *wsaud = (WsAudDemuxContext *)s->priv_data; |
||||
ByteIOContext *pb = &s->pb; |
||||
AVStream *st; |
||||
unsigned char header[AUD_HEADER_SIZE]; |
||||
|
||||
if (get_buffer(pb, header, AUD_HEADER_SIZE) != AUD_HEADER_SIZE) |
||||
return -EIO; |
||||
wsaud->audio_samplerate = LE_16(&header[0]); |
||||
if (header[11] == 99) |
||||
wsaud->audio_type = CODEC_ID_ADPCM_IMA_WS; |
||||
else |
||||
return AVERROR_INVALIDDATA; |
||||
|
||||
/* flag 0 indicates stereo */ |
||||
wsaud->audio_channels = (header[10] & 0x1) + 1; |
||||
/* flag 1 indicates 16 bit audio */ |
||||
wsaud->audio_bits = (((header[10] & 0x2) >> 1) + 1) * 8; |
||||
|
||||
/* set the pts reference the same as the sample rate */ |
||||
s->pts_num = 1; |
||||
s->pts_den = wsaud->audio_samplerate; |
||||
|
||||
/* initialize the audio decoder stream */ |
||||
st = av_new_stream(s, 0); |
||||
if (!st) |
||||
return AVERROR_NOMEM; |
||||
st->codec.codec_type = CODEC_TYPE_AUDIO; |
||||
st->codec.codec_id = wsaud->audio_type; |
||||
st->codec.codec_tag = 0; /* no tag */ |
||||
st->codec.channels = wsaud->audio_channels; |
||||
st->codec.sample_rate = wsaud->audio_samplerate; |
||||
st->codec.bits_per_sample = wsaud->audio_bits; |
||||
st->codec.bit_rate = st->codec.channels * st->codec.sample_rate * |
||||
st->codec.bits_per_sample / 4; |
||||
st->codec.block_align = st->codec.channels * st->codec.bits_per_sample; |
||||
|
||||
wsaud->audio_stream_index = st->index; |
||||
wsaud->audio_frame_counter = 0; |
||||
|
||||
return 0; |
||||
} |
||||
|
||||
static int wsaud_read_packet(AVFormatContext *s, |
||||
AVPacket *pkt) |
||||
{ |
||||
WsAudDemuxContext *wsaud = (WsAudDemuxContext *)s->priv_data; |
||||
ByteIOContext *pb = &s->pb; |
||||
unsigned char preamble[AUD_CHUNK_PREAMBLE_SIZE]; |
||||
unsigned int chunk_size; |
||||
int ret = 0; |
||||
|
||||
if (get_buffer(pb, preamble, AUD_CHUNK_PREAMBLE_SIZE) != |
||||
AUD_CHUNK_PREAMBLE_SIZE) |
||||
return -EIO; |
||||
|
||||
/* validate the chunk */ |
||||
if (LE_32(&preamble[4]) != AUD_CHUNK_SIGNATURE) |
||||
return AVERROR_INVALIDDATA; |
||||
|
||||
chunk_size = LE_16(&preamble[0]); |
||||
if (av_new_packet(pkt, chunk_size)) |
||||
return -EIO; |
||||
pkt->stream_index = wsaud->audio_stream_index; |
||||
pkt->pts = wsaud->audio_frame_counter; |
||||
pkt->pts /= wsaud->audio_samplerate; |
||||
if ((ret = get_buffer(pb, pkt->data, chunk_size)) != chunk_size) { |
||||
av_free_packet(pkt); |
||||
ret = -EIO; |
||||
} |
||||
|
||||
/* 2 samples/byte, 1 or 2 samples per frame depending on stereo */ |
||||
wsaud->audio_frame_counter += (chunk_size * 2) / wsaud->audio_channels; |
||||
|
||||
return ret; |
||||
} |
||||
|
||||
static int wsaud_read_close(AVFormatContext *s) |
||||
{ |
||||
// WsAudDemuxContext *wsaud = (WsAudDemuxContext *)s->priv_data;
|
||||
|
||||
return 0; |
||||
} |
||||
|
||||
|
||||
static int wsvqa_probe(AVProbeData *p) |
||||
{ |
||||
/* need 12 bytes to qualify */ |
||||
if (p->buf_size < 12) |
||||
return 0; |
||||
|
||||
/* check for the VQA signatures */ |
||||
if ((BE_32(&p->buf[0]) != FORM_TAG) || |
||||
(BE_32(&p->buf[8]) != WVQA_TAG)) |
||||
return 0; |
||||
|
||||
return AVPROBE_SCORE_MAX; |
||||
} |
||||
|
||||
static int wsvqa_read_header(AVFormatContext *s, |
||||
AVFormatParameters *ap) |
||||
{ |
||||
WsVqaDemuxContext *wsvqa = (WsVqaDemuxContext *)s->priv_data; |
||||
ByteIOContext *pb = &s->pb; |
||||
AVStream *st; |
||||
unsigned char *header; |
||||
unsigned char scratch[VQA_PREAMBLE_SIZE]; |
||||
|
||||
/* set the pts reference (1 pts = 1/90000) */ |
||||
s->pts_num = 1; |
||||
s->pts_den = 90000; |
||||
|
||||
/* initialize the video decoder stream */ |
||||
st = av_new_stream(s, 0); |
||||
if (!st) |
||||
return AVERROR_NOMEM; |
||||
wsvqa->video_stream_index = st->index; |
||||
st->codec.codec_type = CODEC_TYPE_VIDEO; |
||||
st->codec.codec_id = CODEC_ID_WS_VQA; |
||||
st->codec.codec_tag = 0; /* no fourcc */ |
||||
|
||||
/* skip to the start of the VQA header */ |
||||
url_fseek(pb, 20, SEEK_SET); |
||||
|
||||
/* the VQA header needs to go to the decoder */ |
||||
st->codec.extradata_size = VQA_HEADER_SIZE; |
||||
st->codec.extradata = av_malloc(VQA_HEADER_SIZE); |
||||
header = (unsigned char *)st->codec.extradata; |
||||
if (get_buffer(pb, st->codec.extradata, VQA_HEADER_SIZE) != |
||||
VQA_HEADER_SIZE) { |
||||
av_free(st->codec.extradata); |
||||
return -EIO; |
||||
} |
||||
st->codec.width = LE_16(&header[6]); |
||||
st->codec.height = LE_16(&header[8]); |
||||
|
||||
/* initialize the audio decoder stream */ |
||||
st = av_new_stream(s, 0); |
||||
if (!st) |
||||
return AVERROR_NOMEM; |
||||
st->codec.codec_type = CODEC_TYPE_AUDIO; |
||||
st->codec.codec_id = CODEC_ID_ADPCM_IMA_WS; |
||||
st->codec.codec_tag = 0; /* no tag */ |
||||
st->codec.sample_rate = LE_16(&header[24]); |
||||
st->codec.channels = header[26]; |
||||
st->codec.bits_per_sample = 16; |
||||
st->codec.bit_rate = st->codec.channels * st->codec.sample_rate * |
||||
st->codec.bits_per_sample / 4; |
||||
st->codec.block_align = st->codec.channels * st->codec.bits_per_sample; |
||||
|
||||
wsvqa->audio_stream_index = st->index; |
||||
wsvqa->audio_samplerate = st->codec.sample_rate; |
||||
wsvqa->audio_channels = st->codec.channels; |
||||
wsvqa->audio_frame_counter = 0; |
||||
|
||||
/* skip the useless FINF chunk index */ |
||||
if (get_buffer(pb, scratch, VQA_PREAMBLE_SIZE) != VQA_PREAMBLE_SIZE) { |
||||
av_free(st->codec.extradata); |
||||
return -EIO; |
||||
} |
||||
url_fseek(pb, BE_32(&scratch[4]), SEEK_CUR); |
||||
wsvqa->video_pts = wsvqa->audio_frame_counter = 0; |
||||
|
||||
return 0; |
||||
} |
||||
|
||||
static int wsvqa_read_packet(AVFormatContext *s, |
||||
AVPacket *pkt) |
||||
{ |
||||
WsVqaDemuxContext *wsvqa = (WsVqaDemuxContext *)s->priv_data; |
||||
ByteIOContext *pb = &s->pb; |
||||
int ret = 0; |
||||
unsigned char preamble[VQA_PREAMBLE_SIZE]; |
||||
unsigned int chunk_type; |
||||
unsigned int chunk_size; |
||||
int skip_byte; |
||||
|
||||
if (get_buffer(pb, preamble, VQA_PREAMBLE_SIZE) != VQA_PREAMBLE_SIZE) |
||||
return -EIO; |
||||
|
||||
chunk_type = BE_32(&preamble[0]); |
||||
chunk_size = BE_32(&preamble[4]); |
||||
skip_byte = chunk_size & 0x01; |
||||
|
||||
if ((chunk_type == SND2_TAG) || (chunk_type == VQFR_TAG)) { |
||||
|
||||
if (av_new_packet(pkt, chunk_size)) |
||||
return -EIO; |
||||
ret = get_buffer(pb, pkt->data, chunk_size); |
||||
if (ret != chunk_size) { |
||||
av_free_packet(pkt); |
||||
ret = -EIO; |
||||
} |
||||
|
||||
if (chunk_type == SND2_TAG) { |
||||
pkt->stream_index = wsvqa->audio_stream_index; |
||||
|
||||
pkt->pts = 90000; |
||||
pkt->pts *= wsvqa->audio_frame_counter; |
||||
pkt->pts /= wsvqa->audio_samplerate; |
||||
|
||||
/* 2 samples/byte, 1 or 2 samples per frame depending on stereo */ |
||||
wsvqa->audio_frame_counter += (chunk_size * 2) /
|
||||
wsvqa->audio_channels; |
||||
} else { |
||||
pkt->stream_index = wsvqa->video_stream_index; |
||||
pkt->pts = wsvqa->video_pts; |
||||
wsvqa->video_pts += VQA_VIDEO_PTS_INC; |
||||
} |
||||
|
||||
} else |
||||
return AVERROR_INVALIDDATA; |
||||
|
||||
/* stay on 16-bit alignment */ |
||||
if (skip_byte) |
||||
url_fseek(pb, 1, SEEK_CUR); |
||||
|
||||
return ret; |
||||
} |
||||
|
||||
static int wsvqa_read_close(AVFormatContext *s) |
||||
{ |
||||
// WsVqaDemuxContext *wsvqa = (WsVqaDemuxContext *)s->priv_data;
|
||||
|
||||
return 0; |
||||
} |
||||
|
||||
static AVInputFormat wsaud_iformat = { |
||||
"wsaud", |
||||
"Westwood Studios audio format", |
||||
sizeof(WsAudDemuxContext), |
||||
wsaud_probe, |
||||
wsaud_read_header, |
||||
wsaud_read_packet, |
||||
wsaud_read_close, |
||||
}; |
||||
|
||||
static AVInputFormat wsvqa_iformat = { |
||||
"wsvqa", |
||||
"Westwood Studios VQA format", |
||||
sizeof(WsVqaDemuxContext), |
||||
wsvqa_probe, |
||||
wsvqa_read_header, |
||||
wsvqa_read_packet, |
||||
wsvqa_read_close, |
||||
}; |
||||
|
||||
int westwood_init(void) |
||||
{ |
||||
av_register_input_format(&wsaud_iformat); |
||||
av_register_input_format(&wsvqa_iformat); |
||||
return 0; |
||||
} |
Loading…
Reference in new issue