Remove unused variables

pull/2/head
Mans Rullgard 14 years ago
parent 808d8ff6bb
commit e65ab9d94f
  1. 8
      ffmpeg.c
  2. 2
      libavcodec/a64multienc.c
  3. 9
      libavcodec/aaccoder.c
  4. 3
      libavcodec/celp_filters.c
  5. 6
      libavcodec/dca.c
  6. 4
      libavcodec/dirac.c
  7. 6
      libavcodec/ituh263dec.c
  8. 3
      libavcodec/mpeg12.c
  9. 3
      libavcodec/mpeg12enc.c
  10. 19
      libavcodec/mpeg4videodec.c
  11. 3
      libavcodec/mpegvideo_parser.c
  12. 4
      libavcodec/msmpeg4.c
  13. 13
      libavcodec/pngdec.c
  14. 4
      libavcodec/rv10.c
  15. 3
      libavcodec/s302m.c
  16. 3
      libavcodec/shorten.c
  17. 3
      libavcodec/sp5xdec.c
  18. 3
      libavcodec/svq1dec.c
  19. 6
      libavcodec/targa.c
  20. 14
      libavcodec/truemotion2.c
  21. 3
      libavcodec/tscc.c
  22. 2
      libavcodec/vmdav.c
  23. 4
      libavcodec/zmbvenc.c
  24. 3
      libavfilter/vf_fieldorder.c
  25. 4
      libavformat/4xm.c
  26. 4
      libavformat/apetag.c
  27. 19
      libavformat/asfdec.c
  28. 4
      libavformat/avidec.c
  29. 4
      libavformat/gif.c
  30. 6
      libavformat/mmf.c
  31. 6
      libavformat/mov.c
  32. 5
      libavformat/oggdec.c
  33. 4
      libavformat/oggparseogm.c
  34. 6
      libavformat/rl2.c
  35. 4
      libavformat/rmdec.c
  36. 4
      libavformat/rpl.c
  37. 5
      libavformat/rtpdec_latm.c
  38. 4
      libavformat/sauce.c
  39. 2
      libavformat/smacker.c
  40. 3
      libavformat/sol.c
  41. 4
      libavformat/yuv4mpeg.c

@ -1117,7 +1117,7 @@ static void do_video_out(AVFormatContext *s,
int *frame_size)
{
int nb_frames, i, ret, resample_changed;
AVFrame *final_picture, *formatted_picture, *resampling_dst;
AVFrame *final_picture, *formatted_picture;
AVCodecContext *enc, *dec;
double sync_ipts;
@ -1162,7 +1162,6 @@ static void do_video_out(AVFormatContext *s,
formatted_picture = in_picture;
final_picture = formatted_picture;
resampling_dst = &ost->pict_tmp;
resample_changed = ost->resample_width != dec->width ||
ost->resample_height != dec->height ||
@ -1198,7 +1197,7 @@ static void do_video_out(AVFormatContext *s,
}
}
sws_scale(ost->img_resample_ctx, formatted_picture->data, formatted_picture->linesize,
0, ost->resample_height, resampling_dst->data, resampling_dst->linesize);
0, ost->resample_height, final_picture->data, final_picture->linesize);
}
#endif
@ -3617,7 +3616,6 @@ static void new_audio_stream(AVFormatContext *oc, int file_idx)
static void new_data_stream(AVFormatContext *oc, int file_idx)
{
AVStream *st;
AVOutputStream *ost;
AVCodec *codec=NULL;
AVCodecContext *data_enc;
@ -3626,7 +3624,7 @@ static void new_data_stream(AVFormatContext *oc, int file_idx)
fprintf(stderr, "Could not alloc stream\n");
ffmpeg_exit(1);
}
ost = new_output_stream(oc, file_idx);
new_output_stream(oc, file_idx);
data_enc = st->codec;
output_codecs = grow_array(output_codecs, sizeof(*output_codecs), &nb_output_codecs, nb_output_codecs + 1);
if (!data_stream_copy) {

@ -252,7 +252,6 @@ static int a64multi_encode_frame(AVCodecContext *avctx, unsigned char *buf,
int b_width;
int req_size;
int num_frames = c->mc_lifetime;
int *charmap = c->mc_charmap;
uint8_t *colram = c->mc_colram;
@ -280,7 +279,6 @@ static int a64multi_encode_frame(AVCodecContext *avctx, unsigned char *buf,
if (!c->mc_lifetime) return 0;
/* no more frames in queue, prepare to flush remaining frames */
if (!c->mc_frame_counter) {
num_frames = c->mc_lifetime;
c->mc_lifetime = 0;
}
/* still frames in queue so limit lifetime to remaining frames */

@ -311,7 +311,7 @@ static void encode_window_bands_info(AACEncContext *s, SingleChannelElement *sce
int win, int group_len, const float lambda)
{
BandCodingPath path[120][12];
int w, swb, cb, start, start2, size;
int w, swb, cb, start, size;
int i, j;
const int max_sfb = sce->ics.max_sfb;
const int run_bits = sce->ics.num_windows == 1 ? 5 : 3;
@ -329,7 +329,6 @@ static void encode_window_bands_info(AACEncContext *s, SingleChannelElement *sce
path[0][cb].run = 0;
}
for (swb = 0; swb < max_sfb; swb++) {
start2 = start;
size = sce->ics.swb_sizes[swb];
if (sce->zeroes[win*16 + swb]) {
for (cb = 0; cb < 12; cb++) {
@ -413,7 +412,7 @@ static void codebook_trellis_rate(AACEncContext *s, SingleChannelElement *sce,
int win, int group_len, const float lambda)
{
BandCodingPath path[120][12];
int w, swb, cb, start, start2, size;
int w, swb, cb, start, size;
int i, j;
const int max_sfb = sce->ics.max_sfb;
const int run_bits = sce->ics.num_windows == 1 ? 5 : 3;
@ -431,7 +430,6 @@ static void codebook_trellis_rate(AACEncContext *s, SingleChannelElement *sce,
path[0][cb].run = 0;
}
for (swb = 0; swb < max_sfb; swb++) {
start2 = start;
size = sce->ics.swb_sizes[swb];
if (sce->zeroes[win*16 + swb]) {
for (cb = 0; cb < 12; cb++) {
@ -1006,12 +1004,11 @@ static void search_for_quantizers_fast(AVCodecContext *avctx, AACEncContext *s,
SingleChannelElement *sce,
const float lambda)
{
int start = 0, i, w, w2, g;
int i, w, w2, g;
int minq = 255;
memset(sce->sf_idx, 0, sizeof(sce->sf_idx));
for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) {
start = w*128;
for (g = 0; g < sce->ics.num_swb; g++) {
for (w2 = 0; w2 < sce->ics.group_len[w]; w2++) {
FFPsyBand *band = &s->psy.psy_bands[s->cur_channel*PSY_MAX_BANDS+(w+w2)*16+g];

@ -109,7 +109,7 @@ void ff_celp_lp_synthesis_filterf(float *out, const float *filter_coeffs,
old_out2 = out[-2];
old_out3 = out[-1];
for (n = 0; n <= buffer_length - 4; n+=4) {
float tmp0,tmp1,tmp2,tmp3;
float tmp0,tmp1,tmp2;
float val;
out0 = in[0];
@ -160,7 +160,6 @@ void ff_celp_lp_synthesis_filterf(float *out, const float *filter_coeffs,
tmp0 = out0;
tmp1 = out1;
tmp2 = out2;
tmp3 = out3;
out3 -= a * tmp2;
out2 -= a * tmp1;

@ -1535,8 +1535,6 @@ static void dca_exss_parse_header(DCAContext *s)
{
int ss_index;
int blownup;
int header_size;
int hd_size;
int num_audiop = 1;
int num_assets = 1;
int active_ss_mask[8];
@ -1549,8 +1547,8 @@ static void dca_exss_parse_header(DCAContext *s)
ss_index = get_bits(&s->gb, 2);
blownup = get_bits1(&s->gb);
header_size = get_bits(&s->gb, 8 + 4 * blownup) + 1;
hd_size = get_bits_long(&s->gb, 16 + 4 * blownup) + 1;
skip_bits(&s->gb, 8 + 4 * blownup); // header_size
skip_bits(&s->gb, 16 + 4 * blownup); // hd_size
s->static_fields = get_bits1(&s->gb);
if (s->static_fields) {

@ -245,11 +245,11 @@ static int parse_source_parameters(AVCodecContext *avctx, GetBitContext *gb,
int ff_dirac_parse_sequence_header(AVCodecContext *avctx, GetBitContext *gb,
dirac_source_params *source)
{
unsigned version_major, version_minor;
unsigned version_major;
unsigned video_format, picture_coding_mode;
version_major = svq3_get_ue_golomb(gb);
version_minor = svq3_get_ue_golomb(gb);
svq3_get_ue_golomb(gb); /* version_minor */
avctx->profile = svq3_get_ue_golomb(gb);
avctx->level = svq3_get_ue_golomb(gb);
video_format = svq3_get_ue_golomb(gb);

@ -152,7 +152,7 @@ int ff_h263_decode_mba(MpegEncContext *s)
*/
static int h263_decode_gob_header(MpegEncContext *s)
{
unsigned int val, gfid, gob_number;
unsigned int val, gob_number;
int left;
/* Check for GOB Start Code */
@ -183,12 +183,12 @@ static int h263_decode_gob_header(MpegEncContext *s)
s->qscale = get_bits(&s->gb, 5); /* SQUANT */
if(get_bits1(&s->gb)==0)
return -1;
gfid = get_bits(&s->gb, 2); /* GFID */
skip_bits(&s->gb, 2); /* GFID */
}else{
gob_number = get_bits(&s->gb, 5); /* GN */
s->mb_x= 0;
s->mb_y= s->gob_index* gob_number;
gfid = get_bits(&s->gb, 2); /* GFID */
skip_bits(&s->gb, 2); /* GFID */
s->qscale = get_bits(&s->gb, 5); /* GQUANT */
}

@ -2128,14 +2128,13 @@ static void mpeg_decode_gop(AVCodecContext *avctx,
Mpeg1Context *s1 = avctx->priv_data;
MpegEncContext *s = &s1->mpeg_enc_ctx;
int drop_frame_flag;
int time_code_hours, time_code_minutes;
int time_code_seconds, time_code_pictures;
int broken_link;
init_get_bits(&s->gb, buf, buf_size*8);
drop_frame_flag = get_bits1(&s->gb);
skip_bits1(&s->gb); /* drop_frame_flag */
time_code_hours=get_bits(&s->gb,5);
time_code_minutes = get_bits(&s->gb,6);

@ -761,10 +761,9 @@ void ff_mpeg1_encode_init(MpegEncContext *s)
if(mv==0) len= ff_mpeg12_mbMotionVectorTable[0][1];
else{
int val, bit_size, range, code;
int val, bit_size, code;
bit_size = f_code - 1;
range = 1 << bit_size;
val=mv;
if (val < 0)

@ -397,14 +397,13 @@ int mpeg4_decode_video_packet_header(MpegEncContext *s)
header_extension= get_bits1(&s->gb);
}
if(header_extension){
int time_increment;
int time_incr=0;
while (get_bits1(&s->gb) != 0)
time_incr++;
check_marker(&s->gb, "before time_increment in video packed header");
time_increment= get_bits(&s->gb, s->time_increment_bits);
skip_bits(&s->gb, s->time_increment_bits); /* time_increment */
check_marker(&s->gb, "before vop_coding_type in video packed header");
skip_bits(&s->gb, 2); /* vop coding type */
@ -1801,16 +1800,14 @@ no_cplx_est:
if (s->scalability) {
GetBitContext bak= *gb;
int ref_layer_id;
int ref_layer_sampling_dir;
int h_sampling_factor_n;
int h_sampling_factor_m;
int v_sampling_factor_n;
int v_sampling_factor_m;
s->hierachy_type= get_bits1(gb);
ref_layer_id= get_bits(gb, 4);
ref_layer_sampling_dir= get_bits1(gb);
skip_bits(gb, 4); /* ref_layer_id */
skip_bits1(gb); /* ref_layer_sampling_dir */
h_sampling_factor_n= get_bits(gb, 5);
h_sampling_factor_m= get_bits(gb, 5);
v_sampling_factor_n= get_bits(gb, 5);
@ -1989,15 +1986,13 @@ static int decode_vop_header(MpegEncContext *s, GetBitContext *gb){
if (s->shape != RECT_SHAPE) {
if (s->vol_sprite_usage != 1 || s->pict_type != AV_PICTURE_TYPE_I) {
int width, height, hor_spat_ref, ver_spat_ref;
width = get_bits(gb, 13);
skip_bits(gb, 13); /* width */
skip_bits1(gb); /* marker */
height = get_bits(gb, 13);
skip_bits(gb, 13); /* height */
skip_bits1(gb); /* marker */
hor_spat_ref = get_bits(gb, 13); /* hor_spat_ref */
skip_bits(gb, 13); /* hor_spat_ref */
skip_bits1(gb); /* marker */
ver_spat_ref = get_bits(gb, 13); /* ver_spat_ref */
skip_bits(gb, 13); /* ver_spat_ref */
}
skip_bits1(gb); /* change_CR_disable */

@ -32,7 +32,7 @@ static void mpegvideo_extract_headers(AVCodecParserContext *s,
uint32_t start_code;
int frame_rate_index, ext_type, bytes_left;
int frame_rate_ext_n, frame_rate_ext_d;
int picture_structure, top_field_first, repeat_first_field, progressive_frame;
int top_field_first, repeat_first_field, progressive_frame;
int horiz_size_ext, vert_size_ext, bit_rate_ext;
int did_set_size=0;
//FIXME replace the crap with get_bits()
@ -91,7 +91,6 @@ static void mpegvideo_extract_headers(AVCodecParserContext *s,
break;
case 0x8: /* picture coding extension */
if (bytes_left >= 5) {
picture_structure = buf[2]&3;
top_field_first = buf[3] & (1 << 7);
repeat_first_field = buf[3] & (1 << 1);
progressive_frame = buf[4] & (1 << 7);

@ -1528,9 +1528,7 @@ int msmpeg4_decode_ext_header(MpegEncContext * s, int buf_size)
/* the alt_bitstream reader could read over the end so we need to check it */
if(left>=length && left<length+8)
{
int fps;
fps= get_bits(&s->gb, 5);
skip_bits(&s->gb, 5); /* fps */
s->bit_rate= get_bits(&s->gb, 11)*1024;
if(s->msmpeg4_version>=3)
s->flipflop_rounding= get_bits1(&s->gb);

@ -397,7 +397,7 @@ static int decode_frame(AVCodecContext *avctx,
AVFrame *p;
uint8_t *crow_buf_base = NULL;
uint32_t tag, length;
int ret, crc;
int ret;
FFSWAP(AVFrame *, s->current_picture, s->last_picture);
avctx->coded_frame= s->current_picture;
@ -451,7 +451,7 @@ static int decode_frame(AVCodecContext *avctx,
s->compression_type = *s->bytestream++;
s->filter_type = *s->bytestream++;
s->interlace_type = *s->bytestream++;
crc = bytestream_get_be32(&s->bytestream);
s->bytestream += 4; /* crc */
s->state |= PNG_IHDR;
av_dlog(avctx, "width=%d height=%d depth=%d color_type=%d compression_type=%d filter_type=%d interlace_type=%d\n",
s->width, s->height, s->bit_depth, s->color_type,
@ -547,8 +547,7 @@ static int decode_frame(AVCodecContext *avctx,
s->state |= PNG_IDAT;
if (png_decode_idat(s, length) < 0)
goto fail;
/* skip crc */
crc = bytestream_get_be32(&s->bytestream);
s->bytestream += 4; /* crc */
break;
case MKTAG('P', 'L', 'T', 'E'):
{
@ -568,7 +567,7 @@ static int decode_frame(AVCodecContext *avctx,
s->palette[i] = (0xff << 24);
}
s->state |= PNG_PLTE;
crc = bytestream_get_be32(&s->bytestream);
s->bytestream += 4; /* crc */
}
break;
case MKTAG('t', 'R', 'N', 'S'):
@ -584,13 +583,13 @@ static int decode_frame(AVCodecContext *avctx,
v = *s->bytestream++;
s->palette[i] = (s->palette[i] & 0x00ffffff) | (v << 24);
}
crc = bytestream_get_be32(&s->bytestream);
s->bytestream += 4; /* crc */
}
break;
case MKTAG('I', 'E', 'N', 'D'):
if (!(s->state & PNG_ALLIMAGE))
goto fail;
crc = bytestream_get_be32(&s->bytestream);
s->bytestream += 4; /* crc */
goto exit_loop;
default:
/* skip tag */

@ -235,7 +235,7 @@ int rv_decode_dc(MpegEncContext *s, int n)
/* read RV 1.0 compatible frame header */
static int rv10_decode_picture_header(MpegEncContext *s)
{
int mb_count, pb_frame, marker, unk, mb_xy;
int mb_count, pb_frame, marker, mb_xy;
marker = get_bits1(&s->gb);
@ -282,7 +282,7 @@ static int rv10_decode_picture_header(MpegEncContext *s)
s->mb_y = 0;
mb_count = s->mb_width * s->mb_height;
}
unk= get_bits(&s->gb, 3); /* ignored */
skip_bits(&s->gb, 3); /* ignored */
s->f_code = 1;
s->unrestricted_mv = 1;

@ -29,7 +29,7 @@ static int s302m_parse_frame_header(AVCodecContext *avctx, const uint8_t *buf,
int buf_size)
{
uint32_t h;
int frame_size, channels, id, bits;
int frame_size, channels, bits;
if (buf_size <= AES3_HEADER_LEN) {
av_log(avctx, AV_LOG_ERROR, "frame is too short\n");
@ -48,7 +48,6 @@ static int s302m_parse_frame_header(AVCodecContext *avctx, const uint8_t *buf,
h = AV_RB32(buf);
frame_size = (h >> 16) & 0xffff;
channels = ((h >> 14) & 0x0003) * 2 + 2;
id = (h >> 6) & 0x00ff;
bits = ((h >> 4) & 0x0003) * 4 + 16;
if (AES3_HEADER_LEN + frame_size != buf_size || bits > 24) {

@ -196,7 +196,6 @@ static int decode_wave_header(AVCodecContext *avctx, uint8_t *header, int header
{
GetBitContext hb;
int len;
int chunk_size;
short wave_format;
init_get_bits(&hb, header, header_size*8);
@ -205,7 +204,7 @@ static int decode_wave_header(AVCodecContext *avctx, uint8_t *header, int header
return -1;
}
chunk_size = get_le32(&hb);
skip_bits_long(&hb, 32); /* chunk_size */
if (get_le32(&hb) != MKTAG('W','A','V','E')) {
av_log(avctx, AV_LOG_ERROR, "missing WAVE tag\n");

@ -38,15 +38,12 @@ static int sp5x_decode_frame(AVCodecContext *avctx,
int buf_size = avpkt->size;
AVPacket avpkt_recoded;
const int qscale = 5;
const uint8_t *buf_ptr;
uint8_t *recoded;
int i = 0, j = 0;
if (!avctx->width || !avctx->height)
return -1;
buf_ptr = buf;
recoded = av_mallocz(buf_size + 1024);
if (!recoded)
return -1;

@ -554,9 +554,8 @@ static void svq1_parse_string (GetBitContext *bitbuf, uint8_t *out) {
static int svq1_decode_frame_header (GetBitContext *bitbuf,MpegEncContext *s) {
int frame_size_code;
int temporal_reference;
temporal_reference = get_bits (bitbuf, 8);
skip_bits(bitbuf, 8); /* temporal_reference */
/* frame type */
s->pict_type= get_bits (bitbuf, 2)+1;

@ -108,18 +108,18 @@ static int decode_frame(AVCodecContext *avctx,
AVFrame * const p= (AVFrame*)&s->picture;
uint8_t *dst;
int stride;
int idlen, pal, compr, x, y, w, h, bpp, flags;
int idlen, compr, y, w, h, bpp, flags;
int first_clr, colors, csize;
/* parse image header */
CHECK_BUFFER_SIZE(buf, buf_end, 18, "header");
idlen = *buf++;
pal = *buf++;
buf++; /* pal */
compr = *buf++;
first_clr = AV_RL16(buf); buf += 2;
colors = AV_RL16(buf); buf += 2;
csize = *buf++;
x = AV_RL16(buf); buf += 2;
buf += 2; /* x */
y = AV_RL16(buf); buf += 2;
w = AV_RL16(buf); buf += 2;
h = AV_RL16(buf); buf += 2;

@ -201,7 +201,6 @@ static inline int tm2_read_header(TM2Context *ctx, const uint8_t *buf)
{
uint32_t magic;
const uint8_t *obuf;
int length;
obuf = buf;
@ -212,19 +211,6 @@ static inline int tm2_read_header(TM2Context *ctx, const uint8_t *buf)
/* av_log (ctx->avctx, AV_LOG_ERROR, "TM2 old header: not implemented (yet)\n"); */
return 40;
} else if(magic == 0x00000101) { /* new header */
int w, h, size, flags, xr, yr;
length = AV_RL32(buf);
buf += 4;
init_get_bits(&ctx->gb, buf, 32 * 8);
size = get_bits_long(&ctx->gb, 31);
h = get_bits(&ctx->gb, 15);
w = get_bits(&ctx->gb, 15);
flags = get_bits_long(&ctx->gb, 31);
yr = get_bits(&ctx->gb, 9);
xr = get_bits(&ctx->gb, 9);
return 40;
} else {
av_log (ctx->avctx, AV_LOG_ERROR, "Not a TM2 header: 0x%08X\n", magic);

@ -75,7 +75,6 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
int buf_size = avpkt->size;
CamtasiaContext * const c = avctx->priv_data;
const unsigned char *encoded = buf;
unsigned char *outptr;
int zret; // Zlib return code
int len = buf_size;
@ -89,8 +88,6 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
return -1;
}
outptr = c->pic.data[0]; // Output image pointer
zret = inflateReset(&(c->zstream));
if (zret != Z_OK) {
av_log(avctx, AV_LOG_ERROR, "Inflate reset error: %d\n", zret);

@ -199,7 +199,6 @@ static void vmd_decode(VmdVideoContext *s)
int frame_x, frame_y;
int frame_width, frame_height;
int dp_size;
frame_x = AV_RL16(&s->buf[6]);
frame_y = AV_RL16(&s->buf[8]);
@ -247,7 +246,6 @@ static void vmd_decode(VmdVideoContext *s)
}
dp = &s->frame.data[0][frame_y * s->frame.linesize[0] + frame_x];
dp_size = s->frame.linesize[0] * s->avctx->height;
pp = &s->prev_frame.data[0][frame_y * s->prev_frame.linesize[0] + frame_x];
switch (meth) {
case 1:

@ -181,7 +181,7 @@ static int encode_frame(AVCodecContext *avctx, uint8_t *buf, int buf_size, void
int x, y, bh2, bw2, xored;
uint8_t *tsrc, *tprev;
uint8_t *mv;
int mx, my, bv;
int mx, my;
bw = (avctx->width + ZMBV_BLOCK - 1) / ZMBV_BLOCK;
bh = (avctx->height + ZMBV_BLOCK - 1) / ZMBV_BLOCK;
@ -197,7 +197,7 @@ static int encode_frame(AVCodecContext *avctx, uint8_t *buf, int buf_size, void
tsrc = src + x;
tprev = prev + x;
bv = zmbv_me(c, tsrc, p->linesize[0], tprev, c->pstride, x, y, &mx, &my, &xored);
zmbv_me(c, tsrc, p->linesize[0], tprev, c->pstride, x, y, &mx, &my, &xored);
mv[0] = (mx << 1) | !!xored;
mv[1] = my << 1;
tprev += mx + my * c->pstride;

@ -153,7 +153,7 @@ static void end_frame(AVFilterLink *inlink)
AVFilterBufferRef *inpicref = inlink->cur_buf;
AVFilterBufferRef *outpicref = outlink->out_buf;
int h, w, plane, line_step, line_size, line;
int h, plane, line_step, line_size, line;
uint8_t *cpy_src, *cpy_dst;
if ( inpicref->video->interlaced
@ -162,7 +162,6 @@ static void end_frame(AVFilterLink *inlink)
"picture will move %s one line\n",
fieldorder->dst_tff ? "up" : "down");
h = inpicref->video->h;
w = inpicref->video->w;
for (plane = 0; plane < 4 && inpicref->data[plane]; plane++) {
line_step = inpicref->linesize[plane];
line_size = fieldorder->line_size[plane];

@ -246,7 +246,7 @@ static int fourxm_read_packet(AVFormatContext *s,
FourxmDemuxContext *fourxm = s->priv_data;
AVIOContext *pb = s->pb;
unsigned int fourcc_tag;
unsigned int size, out_size;
unsigned int size;
int ret = 0;
unsigned int track_number;
int packet_read = 0;
@ -295,7 +295,7 @@ static int fourxm_read_packet(AVFormatContext *s,
case snd__TAG:
track_number = avio_rl32(pb);
out_size= avio_rl32(pb);
avio_skip(pb, 4);
size-=8;
if (track_number < fourxm->track_count && fourxm->tracks[track_number].channels>0) {

@ -35,11 +35,11 @@ static int ape_tag_read_field(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
uint8_t key[1024], *value;
uint32_t size, flags;
uint32_t size;
int i, c;
size = avio_rl32(pb); /* field size */
flags = avio_rl32(pb); /* field flags */
avio_skip(pb, 4); /* field flags */
for (i = 0; i < sizeof(key) - 1; i++) {
c = avio_r8(pb);
if (c < 0x20 || c > 0x7E)

@ -216,7 +216,6 @@ static int asf_read_stream_properties(AVFormatContext *s, int64_t size)
ff_asf_guid g;
enum AVMediaType type;
int type_specific_size, sizeX;
uint64_t total_size;
unsigned int tag1;
int64_t pos1, pos2, start_time;
int test_for_ext_stream_audio, is_dvr_ms_audio=0;
@ -264,7 +263,7 @@ static int asf_read_stream_properties(AVFormatContext *s, int64_t size)
return -1;
}
ff_get_guid(pb, &g);
total_size = avio_rl64(pb);
avio_skip(pb, 8); /* total_size */
type_specific_size = avio_rl32(pb);
avio_rl32(pb);
st->id = avio_rl16(pb) & 0x7f; /* stream id */
@ -401,7 +400,7 @@ static int asf_read_ext_stream_properties(AVFormatContext *s, int64_t size)
AVIOContext *pb = s->pb;
ff_asf_guid g;
int ext_len, payload_ext_ct, stream_ct, i;
uint32_t ext_d, leak_rate, stream_num;
uint32_t leak_rate, stream_num;
unsigned int stream_languageid_index;
avio_rl64(pb); // starttime
@ -435,7 +434,7 @@ static int asf_read_ext_stream_properties(AVFormatContext *s, int64_t size)
for (i=0; i<payload_ext_ct; i++){
ff_get_guid(pb, &g);
ext_d=avio_rl16(pb);
avio_skip(pb, 2);
ext_len=avio_rl32(pb);
avio_skip(pb, ext_len);
}
@ -519,7 +518,7 @@ static int asf_read_metadata(AVFormatContext *s, int64_t size)
{
AVIOContext *pb = s->pb;
ASFContext *asf = s->priv_data;
int n, stream_num, name_len, value_len, value_type, value_num;
int n, stream_num, name_len, value_len, value_num;
int ret, i;
n = avio_rl16(pb);
@ -529,7 +528,7 @@ static int asf_read_metadata(AVFormatContext *s, int64_t size)
avio_rl16(pb); //lang_list_index
stream_num= avio_rl16(pb);
name_len= avio_rl16(pb);
value_type= avio_rl16(pb);
avio_skip(pb, 2); /* value_type */
value_len= avio_rl32(pb);
if ((ret = avio_get_str16le(pb, name_len, name, sizeof(name))) < name_len)
@ -634,10 +633,8 @@ static int asf_read_header(AVFormatContext *s, AVFormatParameters *ap)
// if so the next iteration will pick it up
continue;
} else if (!ff_guidcmp(&g, &ff_asf_head1_guid)) {
int v1, v2;
ff_get_guid(pb, &g);
v1 = avio_rl32(pb);
v2 = avio_rl16(pb);
avio_skip(pb, 6);
continue;
} else if (!ff_guidcmp(&g, &ff_asf_marker_header)) {
asf_read_marker(s, gsize);
@ -807,7 +804,7 @@ static int asf_read_frame_header(AVFormatContext *s, AVIOContext *pb){
ASFContext *asf = s->priv_data;
int rsize = 1;
int num = avio_r8(pb);
int64_t ts0, ts1;
int64_t ts0;
asf->packet_segments--;
asf->packet_key_frame = num >> 7;
@ -830,7 +827,7 @@ static int asf_read_frame_header(AVFormatContext *s, AVIOContext *pb){
// av_log(s, AV_LOG_DEBUG, "\n");
avio_skip(pb, 10);
ts0= avio_rl64(pb);
ts1= avio_rl64(pb);
avio_skip(pb, 8);;
avio_skip(pb, 12);
avio_rl32(pb);
avio_skip(pb, asf->packet_replic_size - 8 - 38 - 4);

@ -337,7 +337,7 @@ static int avi_read_header(AVFormatContext *s, AVFormatParameters *ap)
AVIContext *avi = s->priv_data;
AVIOContext *pb = s->pb;
unsigned int tag, tag1, handler;
int codec_type, stream_index, frame_period, bit_rate;
int codec_type, stream_index, frame_period;
unsigned int size;
int i;
AVStream *st;
@ -407,7 +407,7 @@ static int avi_read_header(AVFormatContext *s, AVFormatParameters *ap)
/* AVI header */
/* using frame_period is bad idea */
frame_period = avio_rl32(pb);
bit_rate = avio_rl32(pb) * 8;
avio_skip(pb, 4);
avio_rl32(pb);
avi->non_interleaved |= avio_rl32(pb) & AVIF_MUSTUSEINDEX;

@ -295,9 +295,7 @@ static int gif_write_video(AVFormatContext *s,
AVCodecContext *enc, const uint8_t *buf, int size)
{
AVIOContext *pb = s->pb;
GIFContext *gif = s->priv_data;
int jiffies;
int64_t delay;
/* graphic control extension block */
avio_w8(pb, 0x21);
@ -307,8 +305,6 @@ static int gif_write_video(AVFormatContext *s,
/* 1 jiffy is 1/70 s */
/* the delay_time field indicates the number of jiffies - 1 */
delay = gif->file_time - gif->time;
/* XXX: should use delay, in order to be more accurate */
/* instead of using the same rounded value each time */
/* XXX: don't even remember if I really use it for now */

@ -186,13 +186,13 @@ static int mmf_read_header(AVFormatContext *s,
unsigned int tag;
AVIOContext *pb = s->pb;
AVStream *st;
int64_t file_size, size;
int64_t size;
int rate, params;
tag = avio_rl32(pb);
if (tag != MKTAG('M', 'M', 'M', 'D'))
return -1;
file_size = avio_rb32(pb);
avio_skip(pb, 4); /* file_size */
/* Skip some unused chunks that may or may not be present */
for(;; avio_skip(pb, size)) {
@ -263,12 +263,10 @@ static int mmf_read_packet(AVFormatContext *s,
AVPacket *pkt)
{
MMFContext *mmf = s->priv_data;
AVStream *st;
int ret, size;
if (s->pb->eof_reached)
return AVERROR(EIO);
st = s->streams[0];
size = MAX_SIZE;
if(size > mmf->data_size)

@ -464,21 +464,21 @@ static int mov_read_hdlr(MOVContext *c, AVIOContext *pb, MOVAtom atom)
int ff_mov_read_esds(AVFormatContext *fc, AVIOContext *pb, MOVAtom atom)
{
AVStream *st;
int tag, len;
int tag;
if (fc->nb_streams < 1)
return 0;
st = fc->streams[fc->nb_streams-1];
avio_rb32(pb); /* version + flags */
len = ff_mp4_read_descr(fc, pb, &tag);
ff_mp4_read_descr(fc, pb, &tag);
if (tag == MP4ESDescrTag) {
avio_rb16(pb); /* ID */
avio_r8(pb); /* priority */
} else
avio_rb16(pb); /* ID */
len = ff_mp4_read_descr(fc, pb, &tag);
ff_mp4_read_descr(fc, pb, &tag);
if (tag == MP4DecConfigDescrTag)
ff_mp4_read_dec_config_descr(fc, st, pb);
return 0;

@ -196,8 +196,6 @@ static int ogg_read_page(AVFormatContext *s, int *str)
int flags, nsegs;
uint64_t gp;
uint32_t serial;
uint32_t seq;
uint32_t crc;
int size, idx;
uint8_t sync[4];
int sp = 0;
@ -230,8 +228,7 @@ static int ogg_read_page(AVFormatContext *s, int *str)
flags = avio_r8(bc);
gp = avio_rl64 (bc);
serial = avio_rl32 (bc);
seq = avio_rl32 (bc);
crc = avio_rl32 (bc);
avio_skip(bc, 8); /* seq, crc */
nsegs = avio_r8(bc);
idx = ogg_find_stream (ogg, serial);

@ -39,7 +39,6 @@ ogm_header(AVFormatContext *s, int idx)
const uint8_t *p = os->buf + os->pstart;
uint64_t time_unit;
uint64_t spu;
uint32_t default_len;
if(!(*p & 1))
return 0;
@ -74,8 +73,7 @@ ogm_header(AVFormatContext *s, int idx)
time_unit = bytestream_get_le64(&p);
spu = bytestream_get_le64(&p);
default_len = bytestream_get_le32(&p);
p += 4; /* default_len */
p += 8; /* buffersize + bits_per_sample */
if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO){

@ -80,8 +80,6 @@ static av_cold int rl2_read_header(AVFormatContext *s,
unsigned int audio_frame_counter = 0;
unsigned int video_frame_counter = 0;
unsigned int back_size;
int data_size;
unsigned short encoding_method;
unsigned short sound_rate;
unsigned short rate;
unsigned short channels;
@ -98,14 +96,14 @@ static av_cold int rl2_read_header(AVFormatContext *s,
avio_skip(pb,4); /* skip FORM tag */
back_size = avio_rl32(pb); /**< get size of the background frame */
signature = avio_rb32(pb);
data_size = avio_rb32(pb);
avio_skip(pb, 4); /* data size */
frame_count = avio_rl32(pb);
/* disallow back_sizes and frame_counts that may lead to overflows later */
if(back_size > INT_MAX/2 || frame_count > INT_MAX / sizeof(uint32_t))
return AVERROR_INVALIDDATA;
encoding_method = avio_rl16(pb);
avio_skip(pb, 2); /* encoding mentod */
sound_rate = avio_rl16(pb);
rate = avio_rl16(pb);
channels = avio_rl16(pb);

@ -280,7 +280,7 @@ ff_rm_read_mdpr_codecdata (AVFormatContext *s, AVIOContext *pb,
if (rm_read_audio_stream_info(s, pb, st, rst, 0))
return -1;
} else {
int fps, fps2;
int fps;
if (avio_rl32(pb) != MKTAG('V', 'I', 'D', 'O')) {
fail1:
av_log(st->codec, AV_LOG_ERROR, "Unsupported video codec\n");
@ -298,7 +298,7 @@ ff_rm_read_mdpr_codecdata (AVFormatContext *s, AVIOContext *pb,
fps= avio_rb16(pb);
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
avio_rb32(pb);
fps2= avio_rb16(pb);
avio_skip(pb, 2);
avio_rb16(pb);
if ((ret = rm_read_extradata(pb, st->codec, codec_data_size - (avio_tell(pb) - codec_pos))) < 0)

@ -299,9 +299,9 @@ static int rpl_read_packet(AVFormatContext *s, AVPacket *pkt)
stream->codec->codec_tag == 124) {
// We have to split Escape 124 frames because there are
// multiple frames per chunk in Escape 124 samples.
uint32_t frame_size, frame_flags;
uint32_t frame_size;
frame_flags = avio_rl32(pb);
avio_skip(pb, 4); /* flags */
frame_size = avio_rl32(pb);
if (avio_seek(pb, -8, SEEK_CUR) < 0)
return AVERROR(EIO);

@ -108,8 +108,7 @@ static int parse_fmtp_config(AVStream *st, char *value)
int len = ff_hex_to_data(NULL, value), i, ret = 0;
GetBitContext gb;
uint8_t *config;
int audio_mux_version, same_time_framing, num_sub_frames,
num_programs, num_layers;
int audio_mux_version, same_time_framing, num_programs, num_layers;
/* Pad this buffer, too, to avoid out of bounds reads with get_bits below */
config = av_mallocz(len + FF_INPUT_BUFFER_PADDING_SIZE);
@ -119,7 +118,7 @@ static int parse_fmtp_config(AVStream *st, char *value)
init_get_bits(&gb, config, len*8);
audio_mux_version = get_bits(&gb, 1);
same_time_framing = get_bits(&gb, 1);
num_sub_frames = get_bits(&gb, 6);
skip_bits(&gb, 6); /* num_sub_frames */
num_programs = get_bits(&gb, 4);
num_layers = get_bits(&gb, 3);
if (audio_mux_version != 0 || same_time_framing != 1 || num_programs != 0 ||

@ -32,7 +32,7 @@ int ff_sauce_read(AVFormatContext *avctx, uint64_t *fsize, int *got_width, int g
{
AVIOContext *pb = avctx->pb;
char buf[36];
int datatype, filetype, t1, t2, nb_comments, flags;
int datatype, filetype, t1, t2, nb_comments;
uint64_t start_pos = avio_size(pb) - 128;
avio_seek(pb, start_pos, SEEK_SET);
@ -57,7 +57,7 @@ int ff_sauce_read(AVFormatContext *avctx, uint64_t *fsize, int *got_width, int g
t1 = avio_rl16(pb);
t2 = avio_rl16(pb);
nb_comments = avio_r8(pb);
flags = avio_r8(pb);
avio_skip(pb, 1); /* flags */
avio_skip(pb, 4);
GET_SAUCE_META("encoder", 22);

@ -233,7 +233,6 @@ static int smacker_read_packet(AVFormatContext *s, AVPacket *pkt)
int i;
int frame_size = 0;
int palchange = 0;
int pos;
if (s->pb->eof_reached || smk->cur_frame >= smk->frames)
return AVERROR_EOF;
@ -244,7 +243,6 @@ static int smacker_read_packet(AVFormatContext *s, AVPacket *pkt)
frame_size = smk->frm_size[smk->cur_frame] & (~3);
flags = smk->frm_flags[smk->cur_frame];
/* handle palette change event */
pos = avio_tell(s->pb);
if(flags & SMACKER_PAL){
int size, sz, t, off, j, pos;
uint8_t *pal = smk->pal;

@ -85,7 +85,6 @@ static int sol_channels(int magic, int type)
static int sol_read_header(AVFormatContext *s,
AVFormatParameters *ap)
{
int size;
unsigned int magic,tag;
AVIOContext *pb = s->pb;
unsigned int id, channels, rate, type;
@ -99,7 +98,7 @@ static int sol_read_header(AVFormatContext *s,
return -1;
rate = avio_rl16(pb);
type = avio_r8(pb);
size = avio_rl32(pb);
avio_skip(pb, 4); /* size */
if (magic != 0x0B8D)
avio_r8(pb); /* newer SOLs contain padding byte */

@ -94,7 +94,7 @@ static int yuv4_write_packet(AVFormatContext *s, AVPacket *pkt)
AVPicture *picture;
int* first_pkt = s->priv_data;
int width, height, h_chroma_shift, v_chroma_shift;
int i, m;
int i;
char buf2[Y4M_LINE_MAX+1];
char buf1[20];
uint8_t *ptr, *ptr1, *ptr2;
@ -114,7 +114,7 @@ static int yuv4_write_packet(AVFormatContext *s, AVPacket *pkt)
/* construct frame header */
m = snprintf(buf1, sizeof(buf1), "%s\n", Y4M_FRAME_MAGIC);
snprintf(buf1, sizeof(buf1), "%s\n", Y4M_FRAME_MAGIC);
avio_write(pb, buf1, strlen(buf1));
width = st->codec->width;

Loading…
Cancel
Save