Use AV_xx throughout libavcodec

Originally committed as revision 9169 to svn://svn.ffmpeg.org/ffmpeg/trunk
pull/126/head
Ramiro Polla 18 years ago
parent 29b29011e5
commit 2c124cb65c
  1. 6
      libavcodec/ac3enc.c
  2. 15
      libavcodec/adpcm.c
  3. 8
      libavcodec/dsicinav.c
  4. 10
      libavcodec/gif.c
  5. 3
      libavcodec/gifdec.c
  6. 27
      libavcodec/interplayvideo.c
  7. 6
      libavcodec/kmvc.c
  8. 9
      libavcodec/lcl.c
  9. 4
      libavcodec/libtheoraenc.c
  10. 9
      libavcodec/mjpegenc.c
  11. 5
      libavcodec/mp3_header_decompress_bsf.c
  12. 3
      libavcodec/mp3lameaudio.c
  13. 14
      libavcodec/mpegaudio_parser.c
  14. 6
      libavcodec/mpegaudiodec.c
  15. 4
      libavcodec/oggvorbis.c
  16. 10
      libavcodec/png.c
  17. 5
      libavcodec/pnmenc.c
  18. 4
      libavcodec/rangecoder.c
  19. 14
      libavcodec/smacker.c
  20. 6
      libavcodec/sp5xdec.c
  21. 2
      libavcodec/tiertexseqv.c
  22. 24
      libavcodec/ulti.c
  23. 4
      libavcodec/vp56.h
  24. 9
      libavcodec/wmadec.c
  25. 14
      libavcodec/wmaenc.c
  26. 3
      libavcodec/xan.c
  27. 4
      libavcodec/zmbv.c
  28. 8
      libavcodec/zmbvenc.c

@ -1136,12 +1136,10 @@ static int output_frame_end(AC3EncodeContext *s)
/* XXX: could precompute crc_inv */ /* XXX: could precompute crc_inv */
crc_inv = pow_poly((CRC16_POLY >> 1), (16 * frame_size_58) - 16, CRC16_POLY); crc_inv = pow_poly((CRC16_POLY >> 1), (16 * frame_size_58) - 16, CRC16_POLY);
crc1 = mul_poly(crc_inv, crc1, CRC16_POLY); crc1 = mul_poly(crc_inv, crc1, CRC16_POLY);
frame[2] = crc1 >> 8; AV_WB16(frame+2,crc1);
frame[3] = crc1;
crc2 = bswap_16(av_crc(av_crc8005, 0, frame + 2 * frame_size_58, (frame_size - frame_size_58) * 2 - 2)); crc2 = bswap_16(av_crc(av_crc8005, 0, frame + 2 * frame_size_58, (frame_size - frame_size_58) * 2 - 2));
frame[2*frame_size - 2] = crc2 >> 8; AV_WB16(frame+2*frame_size-2,crc2);
frame[2*frame_size - 1] = crc2;
// printf("n=%d frame_size=%d\n", n, frame_size); // printf("n=%d frame_size=%d\n", n, frame_size);
return frame_size * 2; return frame_size * 2;

@ -451,16 +451,14 @@ static int adpcm_encode_frame(AVCodecContext *avctx,
n = avctx->frame_size / 8; n = avctx->frame_size / 8;
c->status[0].prev_sample = (signed short)samples[0]; /* XXX */ c->status[0].prev_sample = (signed short)samples[0]; /* XXX */
/* c->status[0].step_index = 0; *//* XXX: not sure how to init the state machine */ /* c->status[0].step_index = 0; *//* XXX: not sure how to init the state machine */
*dst++ = (c->status[0].prev_sample) & 0xFF; /* little endian */ bytestream_put_le16(&dst, c->status[0].prev_sample);
*dst++ = (c->status[0].prev_sample >> 8) & 0xFF;
*dst++ = (unsigned char)c->status[0].step_index; *dst++ = (unsigned char)c->status[0].step_index;
*dst++ = 0; /* unknown */ *dst++ = 0; /* unknown */
samples++; samples++;
if (avctx->channels == 2) { if (avctx->channels == 2) {
c->status[1].prev_sample = (signed short)samples[1]; c->status[1].prev_sample = (signed short)samples[1];
/* c->status[1].step_index = 0; */ /* c->status[1].step_index = 0; */
*dst++ = (c->status[1].prev_sample) & 0xFF; bytestream_put_le16(&dst, c->status[1].prev_sample);
*dst++ = (c->status[1].prev_sample >> 8) & 0xFF;
*dst++ = (unsigned char)c->status[1].step_index; *dst++ = (unsigned char)c->status[1].step_index;
*dst++ = 0; *dst++ = 0;
samples++; samples++;
@ -553,20 +551,17 @@ static int adpcm_encode_frame(AVCodecContext *avctx,
if (c->status[i].idelta < 16) if (c->status[i].idelta < 16)
c->status[i].idelta = 16; c->status[i].idelta = 16;
*dst++ = c->status[i].idelta & 0xFF; bytestream_put_le16(&dst, c->status[i].idelta);
*dst++ = c->status[i].idelta >> 8;
} }
for(i=0; i<avctx->channels; i++){ for(i=0; i<avctx->channels; i++){
c->status[i].sample1= *samples++; c->status[i].sample1= *samples++;
*dst++ = c->status[i].sample1 & 0xFF; bytestream_put_le16(&dst, c->status[i].sample1);
*dst++ = c->status[i].sample1 >> 8;
} }
for(i=0; i<avctx->channels; i++){ for(i=0; i<avctx->channels; i++){
c->status[i].sample2= *samples++; c->status[i].sample2= *samples++;
*dst++ = c->status[i].sample2 & 0xFF; bytestream_put_le16(&dst, c->status[i].sample2);
*dst++ = c->status[i].sample2 >> 8;
} }
if(avctx->trellis > 0) { if(avctx->trellis > 0) {

@ -25,6 +25,7 @@
*/ */
#include "avcodec.h" #include "avcodec.h"
#include "bytestream.h"
typedef enum CinVideoBitmapIndex { typedef enum CinVideoBitmapIndex {
@ -206,7 +207,7 @@ static int cinvideo_decode_frame(AVCodecContext *avctx,
} }
palette_type = buf[0]; palette_type = buf[0];
palette_colors_count = buf[1] | (buf[2] << 8); palette_colors_count = AV_RL16(buf+1);
bitmap_frame_type = buf[3]; bitmap_frame_type = buf[3];
buf += 4; buf += 4;
@ -215,13 +216,12 @@ static int cinvideo_decode_frame(AVCodecContext *avctx,
/* handle palette */ /* handle palette */
if (palette_type == 0) { if (palette_type == 0) {
for (i = 0; i < palette_colors_count; ++i) { for (i = 0; i < palette_colors_count; ++i) {
cin->palette[i] = (buf[2] << 16) | (buf[1] << 8) | buf[0]; cin->palette[i] = bytestream_get_le24(&buf);
buf += 3;
bitmap_frame_size -= 3; bitmap_frame_size -= 3;
} }
} else { } else {
for (i = 0; i < palette_colors_count; ++i) { for (i = 0; i < palette_colors_count; ++i) {
cin->palette[buf[0]] = (buf[3] << 16) | (buf[2] << 8) | buf[1]; cin->palette[buf[0]] = AV_RL24(buf+1);
buf += 4; buf += 4;
bitmap_frame_size -= 4; bitmap_frame_size -= 4;
} }

@ -132,13 +132,9 @@ static void gif_put_bits_rev(PutBitContext *s, int n, unsigned int value)
} else { } else {
bit_buf |= value << (bit_cnt); bit_buf |= value << (bit_cnt);
*s->buf_ptr = bit_buf & 0xff; bytestream_put_le32(&s->buf_ptr, bit_buf);
s->buf_ptr[1] = (bit_buf >> 8) & 0xff;
s->buf_ptr[2] = (bit_buf >> 16) & 0xff;
s->buf_ptr[3] = (bit_buf >> 24) & 0xff;
//printf("bitbuf = %08x\n", bit_buf); //printf("bitbuf = %08x\n", bit_buf);
s->buf_ptr+=4;
if (s->buf_ptr >= s->buf_end) if (s->buf_ptr >= s->buf_end)
puts("bit buffer overflow !!"); // should never happen ! who got rid of the callback ??? puts("bit buffer overflow !!"); // should never happen ! who got rid of the callback ???
// flush_buffer_rev(s); // flush_buffer_rev(s);
@ -195,9 +191,7 @@ static int gif_image_write_header(uint8_t **bytestream,
} else { } else {
for(i=0;i<256;i++) { for(i=0;i<256;i++) {
v = palette[i]; v = palette[i];
bytestream_put_byte(bytestream, (v >> 16) & 0xff); bytestream_put_be24(bytestream, v);
bytestream_put_byte(bytestream, (v >> 8) & 0xff);
bytestream_put_byte(bytestream, (v) & 0xff);
} }
} }

@ -96,8 +96,7 @@ static int gif_read_image(GifState *s)
n = (1 << bits_per_pixel); n = (1 << bits_per_pixel);
spal = palette; spal = palette;
for(i = 0; i < n; i++) { for(i = 0; i < n; i++) {
s->image_palette[i] = (0xff << 24) | s->image_palette[i] = (0xff << 24) | AV_RB24(spal);
(spal[0] << 16) | (spal[1] << 8) | (spal[2]);
spal += 3; spal += 3;
} }
for(; i < 256; i++) for(; i < 256; i++)

@ -41,6 +41,7 @@
#include <unistd.h> #include <unistd.h>
#include "avcodec.h" #include "avcodec.h"
#include "bytestream.h"
#include "dsputil.h" #include "dsputil.h"
#define PALETTE_COUNT 256 #define PALETTE_COUNT 256
@ -297,10 +298,8 @@ static int ipvideo_decode_block_opcode_0x7(IpvideoContext *s)
/* need 2 more bytes from the stream */ /* need 2 more bytes from the stream */
CHECK_STREAM_PTR(2); CHECK_STREAM_PTR(2);
B[0] = *s->stream_ptr++;
B[1] = *s->stream_ptr++;
flags = (B[1] << 8) | B[0]; flags = bytestream_get_le16(&s->stream_ptr);
bitmask = 0x0001; bitmask = 0x0001;
for (y = 0; y < 8; y += 2) { for (y = 0; y < 8; y += 2) {
for (x = 0; x < 8; x += 2, bitmask <<= 1) { for (x = 0; x < 8; x += 2, bitmask <<= 1) {
@ -478,7 +477,6 @@ static int ipvideo_decode_block_opcode_0x9(IpvideoContext *s)
{ {
int x, y; int x, y;
unsigned char P[4]; unsigned char P[4];
unsigned char B[4];
unsigned int flags = 0; unsigned int flags = 0;
int shifter = 0; int shifter = 0;
unsigned char pix; unsigned char pix;
@ -496,8 +494,7 @@ static int ipvideo_decode_block_opcode_0x9(IpvideoContext *s)
for (y = 0; y < 8; y++) { for (y = 0; y < 8; y++) {
/* get the next set of 8 2-bit flags */ /* get the next set of 8 2-bit flags */
flags = (s->stream_ptr[1] << 8) | s->stream_ptr[0]; flags = bytestream_get_le16(&s->stream_ptr);
s->stream_ptr += 2;
for (x = 0, shifter = 0; x < 8; x++, shifter += 2) { for (x = 0, shifter = 0; x < 8; x++, shifter += 2) {
*s->pixel_ptr++ = P[(flags >> shifter) & 0x03]; *s->pixel_ptr++ = P[(flags >> shifter) & 0x03];
} }
@ -509,11 +506,7 @@ static int ipvideo_decode_block_opcode_0x9(IpvideoContext *s)
/* 1 of 4 colors for each 2x2 block, need 4 more bytes */ /* 1 of 4 colors for each 2x2 block, need 4 more bytes */
CHECK_STREAM_PTR(4); CHECK_STREAM_PTR(4);
B[0] = *s->stream_ptr++; flags = bytestream_get_le32(&s->stream_ptr);
B[1] = *s->stream_ptr++;
B[2] = *s->stream_ptr++;
B[3] = *s->stream_ptr++;
flags = (B[3] << 24) | (B[2] << 16) | (B[1] << 8) | B[0];
shifter = 0; shifter = 0;
for (y = 0; y < 8; y += 2) { for (y = 0; y < 8; y += 2) {
@ -535,11 +528,7 @@ static int ipvideo_decode_block_opcode_0x9(IpvideoContext *s)
for (y = 0; y < 8; y++) { for (y = 0; y < 8; y++) {
/* time to reload flags? */ /* time to reload flags? */
if ((y == 0) || (y == 4)) { if ((y == 0) || (y == 4)) {
B[0] = *s->stream_ptr++; flags = bytestream_get_le32(&s->stream_ptr);
B[1] = *s->stream_ptr++;
B[2] = *s->stream_ptr++;
B[3] = *s->stream_ptr++;
flags = (B[3] << 24) | (B[2] << 16) | (B[1] << 8) | B[0];
shifter = 0; shifter = 0;
} }
for (x = 0; x < 8; x += 2, shifter += 2) { for (x = 0; x < 8; x += 2, shifter += 2) {
@ -558,11 +547,7 @@ static int ipvideo_decode_block_opcode_0x9(IpvideoContext *s)
for (y = 0; y < 8; y += 2) { for (y = 0; y < 8; y += 2) {
/* time to reload flags? */ /* time to reload flags? */
if ((y == 0) || (y == 4)) { if ((y == 0) || (y == 4)) {
B[0] = *s->stream_ptr++; flags = bytestream_get_le32(&s->stream_ptr);
B[1] = *s->stream_ptr++;
B[2] = *s->stream_ptr++;
B[3] = *s->stream_ptr++;
flags = (B[3] << 24) | (B[2] << 16) | (B[1] << 8) | B[0];
shifter = 0; shifter = 0;
} }
for (x = 0; x < 8; x++, shifter += 2) { for (x = 0; x < 8; x++, shifter += 2) {

@ -29,6 +29,7 @@
#include <stdlib.h> #include <stdlib.h>
#include "avcodec.h" #include "avcodec.h"
#include "bytestream.h"
#define KMVC_KEYFRAME 0x80 #define KMVC_KEYFRAME 0x80
#define KMVC_PALETTE 0x40 #define KMVC_PALETTE 0x40
@ -249,7 +250,7 @@ static int decode_frame(AVCodecContext * avctx, void *data, int *data_size, uint
if (buf[0] == 127) { if (buf[0] == 127) {
buf += 3; buf += 3;
for (i = 0; i < 127; i++) { for (i = 0; i < 127; i++) {
ctx->pal[i + (header & 0x81)] = (buf[0] << 16) | (buf[1] << 8) | buf[2]; ctx->pal[i + (header & 0x81)] = AV_RB24(buf);
buf += 4; buf += 4;
} }
buf -= 127 * 4 + 3; buf -= 127 * 4 + 3;
@ -274,8 +275,7 @@ static int decode_frame(AVCodecContext * avctx, void *data, int *data_size, uint
ctx->pic.palette_has_changed = 1; ctx->pic.palette_has_changed = 1;
// palette starts from index 1 and has 127 entries // palette starts from index 1 and has 127 entries
for (i = 1; i <= ctx->palsize; i++) { for (i = 1; i <= ctx->palsize; i++) {
ctx->pal[i] = (buf[0] << 16) | (buf[1] << 8) | buf[2]; ctx->pal[i] = bytestream_get_be24(&buf);
buf += 3;
} }
} }

@ -358,13 +358,12 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, uint8
for (row = 0; row < height; row++) { for (row = 0; row < height; row++) {
pixel_ptr = row * width * 3; pixel_ptr = row * width * 3;
yq = encoded[pixel_ptr++]; yq = encoded[pixel_ptr++];
uqvq = encoded[pixel_ptr++]; uqvq = AV_RL16(encoded+pixel_ptr);
uqvq+=(encoded[pixel_ptr++] << 8); pixel_ptr += 2;
for (col = 1; col < width; col++) { for (col = 1; col < width; col++) {
encoded[pixel_ptr] = yq -= encoded[pixel_ptr]; encoded[pixel_ptr] = yq -= encoded[pixel_ptr];
uqvq -= (encoded[pixel_ptr+1] | (encoded[pixel_ptr+2]<<8)); uqvq -= AV_RL16(encoded+pixel_ptr+1);
encoded[pixel_ptr+1] = (uqvq) & 0xff; AV_WL16(encoded+pixel_ptr+1, uqvq);
encoded[pixel_ptr+2] = ((uqvq)>>8) & 0xff;
pixel_ptr += 3; pixel_ptr += 3;
} }
} }

@ -69,8 +69,8 @@ static int concatenate_packet(unsigned int* offset, AVCodecContext* avc_context,
avc_context->extradata = newdata; avc_context->extradata = newdata;
avc_context->extradata_size = newsize; avc_context->extradata_size = newsize;
avc_context->extradata[ (*offset)++ ] = packet->bytes >> 8; AV_WB16(avc_context->extradata + (*offset), packet->bytes);
avc_context->extradata[ (*offset)++ ] = packet->bytes & 0xff; *offset += 2;
memcpy( avc_context->extradata + (*offset), packet->packet, packet->bytes ); memcpy( avc_context->extradata + (*offset), packet->packet, packet->bytes );
(*offset) += packet->bytes; (*offset) += packet->bytes;
return 0; return 0;

@ -147,8 +147,7 @@ static void jpeg_table_header(MpegEncContext *s)
ff_mjpeg_val_ac_luminance); ff_mjpeg_val_ac_luminance);
size += put_huffman_table(s, 1, 1, ff_mjpeg_bits_ac_chrominance, size += put_huffman_table(s, 1, 1, ff_mjpeg_bits_ac_chrominance,
ff_mjpeg_val_ac_chrominance); ff_mjpeg_val_ac_chrominance);
ptr[0] = size >> 8; AV_WB16(ptr, size);
ptr[1] = size;
} }
static void jpeg_put_comments(MpegEncContext *s) static void jpeg_put_comments(MpegEncContext *s)
@ -179,8 +178,7 @@ static void jpeg_put_comments(MpegEncContext *s)
put_bits(p, 16, 0); /* patched later */ put_bits(p, 16, 0); /* patched later */
ff_put_string(p, LIBAVCODEC_IDENT, 1); ff_put_string(p, LIBAVCODEC_IDENT, 1);
size = strlen(LIBAVCODEC_IDENT)+3; size = strlen(LIBAVCODEC_IDENT)+3;
ptr[0] = size >> 8; AV_WB16(ptr, size);
ptr[1] = size;
} }
if( s->avctx->pix_fmt == PIX_FMT_YUV420P if( s->avctx->pix_fmt == PIX_FMT_YUV420P
@ -192,8 +190,7 @@ static void jpeg_put_comments(MpegEncContext *s)
put_bits(p, 16, 0); /* patched later */ put_bits(p, 16, 0); /* patched later */
ff_put_string(p, "CS=ITU601", 1); ff_put_string(p, "CS=ITU601", 1);
size = strlen("CS=ITU601")+3; size = strlen("CS=ITU601")+3;
ptr[0] = size >> 8; AV_WB16(ptr, size);
ptr[1] = size;
} }
} }

@ -84,10 +84,7 @@ static int mp3_header_decompress(AVBitStreamFilterContext *bsfc, AVCodecContext
} }
} }
(*poutbuf)[0]= header>>24; AV_WB32(*poutbuf, header);
(*poutbuf)[1]= header>>16;
(*poutbuf)[2]= header>> 8;
(*poutbuf)[3]= header ;
return 1; return 1;
} }

@ -106,8 +106,7 @@ static const int sBitsPerSlot[3] = {
static int mp3len(void *data, int *samplesPerFrame, int *sampleRate) static int mp3len(void *data, int *samplesPerFrame, int *sampleRate)
{ {
uint8_t *dataTmp = (uint8_t *)data; uint32_t header = AV_RB32(data);
uint32_t header = ( (uint32_t)dataTmp[0] << 24 ) | ( (uint32_t)dataTmp[1] << 16 ) | ( (uint32_t)dataTmp[2] << 8 ) | (uint32_t)dataTmp[3];
int layerID = 3 - ((header >> 17) & 0x03); int layerID = 3 - ((header >> 17) & 0x03);
int bitRateID = ((header >> 12) & 0x0f); int bitRateID = ((header >> 12) & 0x0f);
int sampleRateID = ((header >> 10) & 0x03); int sampleRateID = ((header >> 10) & 0x03);

@ -105,10 +105,7 @@ static int mpegaudio_parse(AVCodecParserContext *s1,
/* special case for next header for first frame in free /* special case for next header for first frame in free
format case (XXX: find a simpler method) */ format case (XXX: find a simpler method) */
if (s->free_format_next_header != 0) { if (s->free_format_next_header != 0) {
s->inbuf[0] = s->free_format_next_header >> 24; AV_WB32(s->inbuf, s->free_format_next_header);
s->inbuf[1] = s->free_format_next_header >> 16;
s->inbuf[2] = s->free_format_next_header >> 8;
s->inbuf[3] = s->free_format_next_header;
s->inbuf_ptr = s->inbuf + 4; s->inbuf_ptr = s->inbuf + 4;
s->free_format_next_header = 0; s->free_format_next_header = 0;
goto got_header; goto got_header;
@ -124,8 +121,7 @@ static int mpegaudio_parse(AVCodecParserContext *s1,
} }
if ((s->inbuf_ptr - s->inbuf) >= MPA_HEADER_SIZE) { if ((s->inbuf_ptr - s->inbuf) >= MPA_HEADER_SIZE) {
got_header: got_header:
header = (s->inbuf[0] << 24) | (s->inbuf[1] << 16) | header = AV_RB32(s->inbuf);
(s->inbuf[2] << 8) | s->inbuf[3];
ret = ff_mpa_decode_header(avctx, header, &sr); ret = ff_mpa_decode_header(avctx, header, &sr);
if (ret < 0) { if (ret < 0) {
@ -176,10 +172,8 @@ static int mpegaudio_parse(AVCodecParserContext *s1,
p = s->inbuf_ptr - 3; p = s->inbuf_ptr - 3;
pend = s->inbuf_ptr + len - 4; pend = s->inbuf_ptr + len - 4;
while (p <= pend) { while (p <= pend) {
header = (p[0] << 24) | (p[1] << 16) | header = AV_RB32(p);
(p[2] << 8) | p[3]; header1 = AV_RB32(s->inbuf);
header1 = (s->inbuf[0] << 24) | (s->inbuf[1] << 16) |
(s->inbuf[2] << 8) | s->inbuf[3];
/* check with high probability that we have a /* check with high probability that we have a
valid header */ valid header */
if ((header & SAME_HEADER_MASK) == if ((header & SAME_HEADER_MASK) ==

@ -2380,7 +2380,7 @@ retry:
if(buf_size < HEADER_SIZE) if(buf_size < HEADER_SIZE)
return -1; return -1;
header = (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3]; header = AV_RB32(buf);
if(ff_mpa_check_header(header) < 0){ if(ff_mpa_check_header(header) < 0){
buf++; buf++;
// buf_size--; // buf_size--;
@ -2459,7 +2459,7 @@ static int decode_frame_adu(AVCodecContext * avctx,
len = MPA_MAX_CODED_FRAME_SIZE; len = MPA_MAX_CODED_FRAME_SIZE;
// Get header and restore sync word // Get header and restore sync word
header = (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3] | 0xffe00000; header = AV_RB32(buf) | 0xffe00000;
if (ff_mpa_check_header(header) < 0) { // Bad header, discard frame if (ff_mpa_check_header(header) < 0) { // Bad header, discard frame
*data_size = 0; *data_size = 0;
@ -2604,7 +2604,7 @@ static int decode_frame_mp3on4(AVCodecContext * avctx,
assert (m != NULL); assert (m != NULL);
// Get header // Get header
header = (start[0] << 24) | (start[1] << 16) | (start[2] << 8) | start[3] | 0xfff00000; header = AV_RB32(start) | 0xfff00000;
if (ff_mpa_check_header(header) < 0) { // Bad header, discard block if (ff_mpa_check_header(header) < 0) { // Bad header, discard block
*data_size = 0; *data_size = 0;

@ -27,6 +27,7 @@
#include <vorbis/vorbisenc.h> #include <vorbis/vorbisenc.h>
#include "avcodec.h" #include "avcodec.h"
#include "bytestream.h"
#undef NDEBUG #undef NDEBUG
#include <assert.h> #include <assert.h>
@ -234,8 +235,7 @@ static int oggvorbis_decode_init(AVCodecContext *avccontext) {
if(p[0] == 0 && p[1] == 30) { if(p[0] == 0 && p[1] == 30) {
for(i = 0; i < 3; i++){ for(i = 0; i < 3; i++){
hsizes[i] = *p++ << 8; hsizes[i] = bytestream_get_be16(&p);
hsizes[i] += *p++;
headers[i] = p; headers[i] = p;
p += hsizes[i]; p += hsizes[i];
} }

@ -693,10 +693,7 @@ static void png_write_chunk(uint8_t **f, uint32_t tag,
bytestream_put_be32(f, length); bytestream_put_be32(f, length);
crc = crc32(0, Z_NULL, 0); crc = crc32(0, Z_NULL, 0);
tagbuf[0] = tag; AV_WL32(tagbuf, tag);
tagbuf[1] = tag >> 8;
tagbuf[2] = tag >> 16;
tagbuf[3] = tag >> 24;
crc = crc32(crc, tagbuf, 4); crc = crc32(crc, tagbuf, 4);
bytestream_put_be32(f, bswap_32(tag)); bytestream_put_be32(f, bswap_32(tag));
if (length > 0) { if (length > 0) {
@ -833,10 +830,7 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size,
if (alpha && alpha != 0xff) if (alpha && alpha != 0xff)
has_alpha = 1; has_alpha = 1;
*alpha_ptr++ = alpha; *alpha_ptr++ = alpha;
ptr[0] = v >> 16; bytestream_put_be24(&ptr, v);
ptr[1] = v >> 8;
ptr[2] = v;
ptr += 3;
} }
png_write_chunk(&s->bytestream, MKTAG('P', 'L', 'T', 'E'), s->buf, 256 * 3); png_write_chunk(&s->bytestream, MKTAG('P', 'L', 'T', 'E'), s->buf, 256 * 3);
if (has_alpha) { if (has_alpha) {

@ -19,6 +19,7 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/ */
#include "avcodec.h" #include "avcodec.h"
#include "bytestream.h"
#include "pnm.h" #include "pnm.h"
@ -303,9 +304,7 @@ static int pam_encode_frame(AVCodecContext *avctx, unsigned char *outbuf, int bu
for(i=0;i<h;i++) { for(i=0;i<h;i++) {
for(j=0;j<w;j++) { for(j=0;j<w;j++) {
v = ((uint32_t *)ptr)[j]; v = ((uint32_t *)ptr)[j];
*s->bytestream++ = v >> 16; bytestream_put_be24(&s->bytestream, v);
*s->bytestream++ = v >> 8;
*s->bytestream++ = v;
*s->bytestream++ = v >> 24; *s->bytestream++ = v >> 24;
} }
ptr += linesize; ptr += linesize;

@ -36,6 +36,7 @@
#include "avcodec.h" #include "avcodec.h"
#include "rangecoder.h" #include "rangecoder.h"
#include "bytestream.h"
void ff_init_range_encoder(RangeCoder *c, uint8_t *buf, int buf_size){ void ff_init_range_encoder(RangeCoder *c, uint8_t *buf, int buf_size){
@ -53,8 +54,7 @@ void ff_init_range_decoder(RangeCoder *c, const uint8_t *buf, int buf_size){
/* cast to avoid compiler warning */ /* cast to avoid compiler warning */
ff_init_range_encoder(c, (uint8_t *) buf, buf_size); ff_init_range_encoder(c, (uint8_t *) buf, buf_size);
c->low =(*c->bytestream++)<<8; c->low = bytestream_get_be16(&c->bytestream);
c->low+= *c->bytestream++;
} }
void ff_build_rac_states(RangeCoder *c, int factor, int max_p){ void ff_build_rac_states(RangeCoder *c, int factor, int max_p){

@ -436,11 +436,9 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, uint8
case 0: case 0:
for(i = 0; i < 4; i++) { for(i = 0; i < 4; i++) {
pix = smk_get_code(&gb, smk->full_tbl, smk->full_last); pix = smk_get_code(&gb, smk->full_tbl, smk->full_last);
out[2] = pix & 0xFF; AV_WL16(out+2,pix);
out[3] = pix >> 8;
pix = smk_get_code(&gb, smk->full_tbl, smk->full_last); pix = smk_get_code(&gb, smk->full_tbl, smk->full_last);
out[0] = pix & 0xFF; AV_WL16(out,pix);
out[1] = pix >> 8;
out += stride; out += stride;
} }
break; break;
@ -465,11 +463,11 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, uint8
uint16_t pix1, pix2; uint16_t pix1, pix2;
pix1 = smk_get_code(&gb, smk->full_tbl, smk->full_last); pix1 = smk_get_code(&gb, smk->full_tbl, smk->full_last);
pix2 = smk_get_code(&gb, smk->full_tbl, smk->full_last); pix2 = smk_get_code(&gb, smk->full_tbl, smk->full_last);
out[0] = pix1 & 0xFF; out[1] = pix1 >> 8; AV_WL16(out,pix1);
out[2] = pix2 & 0xFF; out[3] = pix2 >> 8; AV_WL16(out+2,pix2);
out += stride; out += stride;
out[0] = pix1 & 0xFF; out[1] = pix1 >> 8; AV_WL16(out,pix1);
out[2] = pix2 & 0xFF; out[3] = pix2 >> 8; AV_WL16(out+2,pix2);
out += stride; out += stride;
} }
break; break;

@ -65,10 +65,8 @@ static int sp5x_decode_frame(AVCodecContext *avctx,
j += sizeof(sp5x_data_dht); j += sizeof(sp5x_data_dht);
memcpy(recoded+j, &sp5x_data_sof[0], sizeof(sp5x_data_sof)); memcpy(recoded+j, &sp5x_data_sof[0], sizeof(sp5x_data_sof));
recoded[j+5] = (avctx->coded_height >> 8) & 0xFF; AV_WB16(recoded+j+5, avctx->coded_height);
recoded[j+6] = avctx->coded_height & 0xFF; AV_WB16(recoded+j+7, avctx->coded_width);
recoded[j+7] = (avctx->coded_width >> 8) & 0xFF;
recoded[j+8] = avctx->coded_width & 0xFF;
j += sizeof(sp5x_data_sof); j += sizeof(sp5x_data_sof);
memcpy(recoded+j, &sp5x_data_sos[0], sizeof(sp5x_data_sos)); memcpy(recoded+j, &sp5x_data_sos[0], sizeof(sp5x_data_sos));

@ -146,7 +146,7 @@ static void seqvideo_decode(SeqVideoContext *seq, unsigned char *data, int data_
for (i = 0; i < 256; i++) { for (i = 0; i < 256; i++) {
for (j = 0; j < 3; j++, data++) for (j = 0; j < 3; j++, data++)
c[j] = (*data << 2) | (*data >> 4); c[j] = (*data << 2) | (*data >> 4);
seq->palette[i] = (c[0] << 16) | (c[1] << 8) | c[2]; seq->palette[i] = AV_RB24(c);
} }
memcpy(seq->frame.data[1], seq->palette, sizeof(seq->palette)); memcpy(seq->frame.data[1], seq->palette, sizeof(seq->palette));
seq->frame.palette_has_changed = 1; seq->frame.palette_has_changed = 1;

@ -31,6 +31,7 @@
#include <unistd.h> #include <unistd.h>
#include "avcodec.h" #include "avcodec.h"
#include "bytestream.h"
#include "ulti_cb.h" #include "ulti_cb.h"
@ -305,9 +306,7 @@ static int ulti_decode_frame(AVCodecContext *avctx,
case 2: case 2:
if (modifier) { // unpack four luma samples if (modifier) { // unpack four luma samples
tmp = (*buf++) << 16; tmp = bytestream_get_be24(&buf);
tmp += (*buf++) << 8;
tmp += *buf++;
Y[0] = (tmp >> 18) & 0x3F; Y[0] = (tmp >> 18) & 0x3F;
Y[1] = (tmp >> 12) & 0x3F; Y[1] = (tmp >> 12) & 0x3F;
@ -315,8 +314,7 @@ static int ulti_decode_frame(AVCodecContext *avctx,
Y[3] = tmp & 0x3F; Y[3] = tmp & 0x3F;
angle = 16; angle = 16;
} else { // retrieve luma samples from codebook } else { // retrieve luma samples from codebook
tmp = (*buf++) << 8; tmp = bytestream_get_be16(&buf);
tmp += (*buf++);
angle = (tmp >> 12) & 0xF; angle = (tmp >> 12) & 0xF;
tmp &= 0xFFF; tmp &= 0xFFF;
@ -332,33 +330,25 @@ static int ulti_decode_frame(AVCodecContext *avctx,
if (modifier) { // all 16 luma samples if (modifier) { // all 16 luma samples
uint8_t Luma[16]; uint8_t Luma[16];
tmp = (*buf++) << 16; tmp = bytestream_get_be24(&buf);
tmp += (*buf++) << 8;
tmp += *buf++;
Luma[0] = (tmp >> 18) & 0x3F; Luma[0] = (tmp >> 18) & 0x3F;
Luma[1] = (tmp >> 12) & 0x3F; Luma[1] = (tmp >> 12) & 0x3F;
Luma[2] = (tmp >> 6) & 0x3F; Luma[2] = (tmp >> 6) & 0x3F;
Luma[3] = tmp & 0x3F; Luma[3] = tmp & 0x3F;
tmp = (*buf++) << 16; tmp = bytestream_get_be24(&buf);
tmp += (*buf++) << 8;
tmp += *buf++;
Luma[4] = (tmp >> 18) & 0x3F; Luma[4] = (tmp >> 18) & 0x3F;
Luma[5] = (tmp >> 12) & 0x3F; Luma[5] = (tmp >> 12) & 0x3F;
Luma[6] = (tmp >> 6) & 0x3F; Luma[6] = (tmp >> 6) & 0x3F;
Luma[7] = tmp & 0x3F; Luma[7] = tmp & 0x3F;
tmp = (*buf++) << 16; tmp = bytestream_get_be24(&buf);
tmp += (*buf++) << 8;
tmp += *buf++;
Luma[8] = (tmp >> 18) & 0x3F; Luma[8] = (tmp >> 18) & 0x3F;
Luma[9] = (tmp >> 12) & 0x3F; Luma[9] = (tmp >> 12) & 0x3F;
Luma[10] = (tmp >> 6) & 0x3F; Luma[10] = (tmp >> 6) & 0x3F;
Luma[11] = tmp & 0x3F; Luma[11] = tmp & 0x3F;
tmp = (*buf++) << 16; tmp = bytestream_get_be24(&buf);
tmp += (*buf++) << 8;
tmp += *buf++;
Luma[12] = (tmp >> 18) & 0x3F; Luma[12] = (tmp >> 18) & 0x3F;
Luma[13] = (tmp >> 12) & 0x3F; Luma[13] = (tmp >> 12) & 0x3F;
Luma[14] = (tmp >> 6) & 0x3F; Luma[14] = (tmp >> 6) & 0x3F;

@ -27,6 +27,7 @@
#include "vp56data.h" #include "vp56data.h"
#include "dsputil.h" #include "dsputil.h"
#include "mpegvideo.h" #include "mpegvideo.h"
#include "bytestream.h"
typedef struct vp56_context vp56_context_t; typedef struct vp56_context vp56_context_t;
@ -169,8 +170,7 @@ static inline void vp56_init_range_decoder(vp56_range_coder_t *c,
c->high = 255; c->high = 255;
c->bits = 8; c->bits = 8;
c->buffer = buf; c->buffer = buf;
c->code_word = *c->buffer++ << 8; c->code_word = bytestream_get_be16(&c->buffer);
c->code_word |= *c->buffer++;
} }
static inline int vp56_rac_get_prob(vp56_range_coder_t *c, uint8_t prob) static inline int vp56_rac_get_prob(vp56_range_coder_t *c, uint8_t prob)

@ -92,12 +92,11 @@ static int wma_decode_init(AVCodecContext * avctx)
flags2 = 0; flags2 = 0;
extradata = avctx->extradata; extradata = avctx->extradata;
if (avctx->codec->id == CODEC_ID_WMAV1 && avctx->extradata_size >= 4) { if (avctx->codec->id == CODEC_ID_WMAV1 && avctx->extradata_size >= 4) {
flags1 = extradata[0] | (extradata[1] << 8); flags1 = AV_RL16(extradata);
flags2 = extradata[2] | (extradata[3] << 8); flags2 = AV_RL16(extradata+2);
} else if (avctx->codec->id == CODEC_ID_WMAV2 && avctx->extradata_size >= 6) { } else if (avctx->codec->id == CODEC_ID_WMAV2 && avctx->extradata_size >= 6) {
flags1 = extradata[0] | (extradata[1] << 8) | flags1 = AV_RL32(extradata);
(extradata[2] << 16) | (extradata[3] << 24); flags2 = AV_RL16(extradata+4);
flags2 = extradata[4] | (extradata[5] << 8);
} }
// for(i=0; i<avctx->extradata_size; i++) // for(i=0; i<avctx->extradata_size; i++)
// av_log(NULL, AV_LOG_ERROR, "%02X ", extradata[i]); // av_log(NULL, AV_LOG_ERROR, "%02X ", extradata[i]);

@ -45,19 +45,13 @@ static int encode_init(AVCodecContext * avctx){
if (avctx->codec->id == CODEC_ID_WMAV1) { if (avctx->codec->id == CODEC_ID_WMAV1) {
extradata= av_malloc(4); extradata= av_malloc(4);
avctx->extradata_size= 4; avctx->extradata_size= 4;
extradata[0] = flags1; AV_WL16(extradata, flags1);
extradata[1] = flags1>>8; AV_WL16(extradata+2, flags2);
extradata[2] = flags2;
extradata[3] = flags2>>8;
} else if (avctx->codec->id == CODEC_ID_WMAV2) { } else if (avctx->codec->id == CODEC_ID_WMAV2) {
extradata= av_mallocz(10); extradata= av_mallocz(10);
avctx->extradata_size= 10; avctx->extradata_size= 10;
extradata[0] = flags1; AV_WL32(extradata, flags1);
extradata[1] = flags1>>8; AV_WL16(extradata+4, flags2);
extradata[2] = flags1>>16;
extradata[3] = flags1>>24;
extradata[4] = flags2;
extradata[5] = flags2>>8;
}else }else
assert(0); assert(0);
avctx->extradata= extradata; avctx->extradata= extradata;

@ -354,8 +354,7 @@ static void xan_wc3_decode_frame(XanContext *s) {
case 11: case 11:
case 21: case 21:
size = (size_segment[0] << 16) | (size_segment[1] << 8) | size = AV_RB24(size_segment);
size_segment[2];
size_segment += 3; size_segment += 3;
break; break;
} }

@ -579,9 +579,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, uint8
for(i = 0; i < c->width; i++) { for(i = 0; i < c->width; i++) {
uint32_t tmp = AV_RL32(src); uint32_t tmp = AV_RL32(src);
src += 4; src += 4;
out[i * 3 + 0] = tmp >> 16; AV_WB24(out+(i*3), tmp);
out[i * 3 + 1] = tmp >> 8;
out[i * 3 + 2] = tmp >> 0;
} }
out += c->pic.linesize[0]; out += c->pic.linesize[0];
} }

@ -144,9 +144,7 @@ static int encode_frame(AVCodecContext *avctx, uint8_t *buf, int buf_size, void
if(chpal){ if(chpal){
uint8_t tpal[3]; uint8_t tpal[3];
for(i = 0; i < 256; i++){ for(i = 0; i < 256; i++){
tpal[0] = palptr[i] >> 16; AV_WB24(tpal, palptr[i]);
tpal[1] = palptr[i] >> 8;
tpal[2] = palptr[i];
c->work_buf[work_size++] = tpal[0] ^ c->pal[i * 3 + 0]; c->work_buf[work_size++] = tpal[0] ^ c->pal[i * 3 + 0];
c->work_buf[work_size++] = tpal[1] ^ c->pal[i * 3 + 1]; c->work_buf[work_size++] = tpal[1] ^ c->pal[i * 3 + 1];
c->work_buf[work_size++] = tpal[2] ^ c->pal[i * 3 + 2]; c->work_buf[work_size++] = tpal[2] ^ c->pal[i * 3 + 2];
@ -158,9 +156,7 @@ static int encode_frame(AVCodecContext *avctx, uint8_t *buf, int buf_size, void
} }
if(keyframe){ if(keyframe){
for(i = 0; i < 256; i++){ for(i = 0; i < 256; i++){
c->pal[i*3 + 0] = palptr[i] >> 16; AV_WB24(c->pal+(i*3), palptr[i]);
c->pal[i*3 + 1] = palptr[i] >> 8;
c->pal[i*3 + 2] = palptr[i];
} }
memcpy(c->work_buf, c->pal, 768); memcpy(c->work_buf, c->pal, 768);
memcpy(c->pal2, p->data[1], 1024); memcpy(c->pal2, p->data[1], 1024);

Loading…
Cancel
Save