fix some signedness warnings

Originally committed as revision 6355 to svn://svn.ffmpeg.org/ffmpeg/trunk
pull/126/head
Måns Rullgård 19 years ago
parent d80f243ae9
commit 191e8ca752
  1. 4
      libavcodec/4xm.c
  2. 6
      libavcodec/cyuv.c
  3. 6
      libavcodec/error_resilience.c
  4. 5
      libavcodec/h263.c
  5. 6
      libavcodec/h264.c
  6. 2
      libavcodec/indeo3.c
  7. 2
      libavcodec/mjpeg.c
  8. 8
      libavcodec/mpeg12.c
  9. 4
      libavcodec/mpeg12data.h
  10. 2
      libavcodec/mpegaudiodec.c
  11. 2
      libavcodec/mpegvideo.c
  12. 10
      libavcodec/mpegvideo.h
  13. 2
      libavcodec/msmpeg4.c
  14. 2
      libavcodec/parser.c
  15. 4
      libavcodec/svq1.c
  16. 14
      libavcodec/vc1.c
  17. 2
      libavcodec/vp3.c
  18. 2
      libavcodec/wmadec.c
  19. 4
      libavformat/amr.c
  20. 2
      libavformat/asf.c
  21. 2
      libavformat/avformat.h
  22. 9
      libavformat/avienc.c
  23. 2
      libavformat/matroska.c
  24. 4
      libavformat/mpeg.c
  25. 2
      libavformat/ogg2.c
  26. 4
      libavformat/ogg2.h
  27. 2
      libavformat/oggparsevorbis.c
  28. 2
      libavformat/rtp.c
  29. 3
      libavformat/rtpproto.c
  30. 3
      libavformat/udp.c

@ -104,8 +104,8 @@ static VLC block_type_vlc[4];
typedef struct CFrameBuffer{
int allocated_size;
int size;
unsigned int allocated_size;
unsigned int size;
int id;
uint8_t *data;
}CFrameBuffer;

@ -75,9 +75,9 @@ static int cyuv_decode_frame(AVCodecContext *avctx,
int v_ptr;
/* prediction error tables (make it clear that they are signed values) */
signed char *y_table = buf + 0;
signed char *u_table = buf + 16;
signed char *v_table = buf + 32;
signed char *y_table = (signed char*)buf + 0;
signed char *u_table = (signed char*)buf + 16;
signed char *v_table = (signed char*)buf + 32;
unsigned char y_pred, u_pred, v_pred;
int stream_ptr;

@ -70,7 +70,7 @@ static void put_dc(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t
}
}
static void filter181(int16_t *data, int width, int height, int stride){
static void filter181(uint16_t *data, int width, int height, int stride){
int x,y;
/* horizontal filter */
@ -111,7 +111,7 @@ static void filter181(int16_t *data, int width, int height, int stride){
* @param w width in 8 pixel blocks
* @param h height in 8 pixel blocks
*/
static void guess_dc(MpegEncContext *s, int16_t *dc, int w, int h, int stride, int is_luma){
static void guess_dc(MpegEncContext *s, uint16_t *dc, int w, int h, int stride, int is_luma){
int b_x, b_y;
for(b_y=0; b_y<h; b_y++){
@ -931,7 +931,7 @@ void ff_er_frame_end(MpegEncContext *s){
for(mb_y=0; mb_y<s->mb_height; mb_y++){
for(mb_x=0; mb_x<s->mb_width; mb_x++){
int dc, dcu, dcv, y, n;
int16_t *dc_ptr;
uint16_t *dc_ptr;
uint8_t *dest_y, *dest_cb, *dest_cr;
const int mb_xy= mb_x + mb_y * s->mb_stride;
const int mb_type= s->current_picture.mb_type[mb_xy];

@ -1518,7 +1518,7 @@ void ff_h263_loop_filter(MpegEncContext * s){
static int h263_pred_dc(MpegEncContext * s, int n, uint16_t **dc_val_ptr)
{
int x, y, wrap, a, c, pred_dc, scale;
int16_t *dc_val;
uint16_t *dc_val;
/* find prediction */
if (n < 4) {
@ -1563,7 +1563,8 @@ static int h263_pred_dc(MpegEncContext * s, int n, uint16_t **dc_val_ptr)
static void h263_pred_acdc(MpegEncContext * s, DCTELEM *block, int n)
{
int x, y, wrap, a, c, pred_dc, scale, i;
int16_t *dc_val, *ac_val, *ac_val1;
uint16_t *dc_val;
int16_t *ac_val, *ac_val1;
/* find prediction */
if (n < 4) {

@ -619,7 +619,7 @@ static void fill_caches(H264Context *h, int mb_type, int for_deblock){
if(USES_LIST(mb_type,list)){
uint32_t *src = (uint32_t*)s->current_picture.motion_val[list][h->mb2b_xy[mb_xy]];
uint32_t *dst = (uint32_t*)h->mv_cache[list][scan8[0]];
uint8_t *ref = &s->current_picture.ref_index[list][h->mb2b8_xy[mb_xy]];
int8_t *ref = &s->current_picture.ref_index[list][h->mb2b8_xy[mb_xy]];
for(i=0; i<4; i++, dst+=8, src+=h->b_stride){
dst[0] = src[0];
dst[1] = src[1];
@ -1133,7 +1133,7 @@ static inline int fetch_diagonal_mv(H264Context *h, const int16_t **C, int i, in
* make mbaff happy, so we can't move all this logic to fill_caches */
if(FRAME_MBAFF){
MpegEncContext *s = &h->s;
const int *mb_types = s->current_picture_ptr->mb_type;
const uint32_t *mb_types = s->current_picture_ptr->mb_type;
const int16_t *mv;
*(uint32_t*)h->mv_cache[list][scan8[0]-2] = 0;
*C = h->mv_cache[list][scan8[0]-2];
@ -1721,7 +1721,7 @@ static inline void write_back_motion(H264Context *h, int mb_type){
}
{
uint8_t *ref_index = &s->current_picture.ref_index[list][b8_xy];
int8_t *ref_index = &s->current_picture.ref_index[list][b8_xy];
ref_index[0+0*h->b8_stride]= h->ref_cache[list][scan8[0]];
ref_index[1+0*h->b8_stride]= h->ref_cache[list][scan8[4]];
ref_index[0+1*h->b8_stride]= h->ref_cache[list][scan8[8]];

@ -381,7 +381,7 @@ static void iv_Decode_Chunk(Indeo3DecodeContext *s,
} else if(cmd == 3) {
if(strip->usl7 == 0) {
strip->usl7 = 1;
ref_vectors = buf2 + (*buf1 * 2);
ref_vectors = (signed char*)buf2 + (*buf1 * 2);
buf1++;
continue;
}

@ -1801,7 +1801,7 @@ static int mjpeg_decode_com(MJpegDecodeContext *s)
{
int len = get_bits(&s->gb, 16);
if (len >= 2 && 8*len - 16 + get_bits_count(&s->gb) <= s->gb.size_in_bits) {
uint8_t *cbuf = av_malloc(len - 1);
char *cbuf = av_malloc(len - 1);
if (cbuf) {
int i;
for (i = 0; i < len - 2; i++)

@ -2695,7 +2695,8 @@ static int slice_decode_thread(AVCodecContext *c, void *arg){
s->error_count= 3*(s->end_mb_y - s->start_mb_y)*s->mb_width;
for(;;){
int start_code, ret;
uint32_t start_code;
int ret;
ret= mpeg_decode_slice((Mpeg1Context*)s, mb_y, &buf, s->gb.buffer_end - buf);
emms_c();
@ -3033,7 +3034,8 @@ static int mpeg_decode_frame(AVCodecContext *avctx,
Mpeg1Context *s = avctx->priv_data;
const uint8_t *buf_end;
const uint8_t *buf_ptr;
int ret, start_code, input_size;
uint32_t start_code;
int ret, input_size;
AVFrame *picture = data;
MpegEncContext *s2 = &s->mpeg_enc_ctx;
dprintf("fill_buffer\n");
@ -3080,7 +3082,7 @@ static int mpeg_decode_frame(AVCodecContext *avctx,
/* find start next code */
start_code = -1;
buf_ptr = ff_find_start_code(buf_ptr,buf_end, &start_code);
if (start_code < 0){
if (start_code > 0x1ff){
if(s2->pict_type != B_TYPE || avctx->skip_frame <= AVDISCARD_DEFAULT){
if(avctx->thread_count > 1){
int i;

@ -23,7 +23,7 @@
* MPEG1/2 tables.
*/
const int16_t ff_mpeg1_default_intra_matrix[64] = {
const uint16_t ff_mpeg1_default_intra_matrix[64] = {
8, 16, 19, 22, 26, 27, 29, 34,
16, 16, 22, 24, 27, 29, 34, 37,
19, 22, 26, 27, 29, 34, 34, 38,
@ -34,7 +34,7 @@ const int16_t ff_mpeg1_default_intra_matrix[64] = {
27, 29, 35, 38, 46, 56, 69, 83
};
const int16_t ff_mpeg1_default_non_intra_matrix[64] = {
const uint16_t ff_mpeg1_default_non_intra_matrix[64] = {
16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16,

@ -86,7 +86,7 @@ typedef struct MPADecodeContext {
#endif
void (*compute_antialias)(struct MPADecodeContext *s, struct GranuleDef *g);
int adu_mode; ///< 0 for standard mp3, 1 for adu formatted mp3
unsigned int dither_state;
int dither_state;
} MPADecodeContext;
/**

@ -217,7 +217,7 @@ void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_s
}
#ifdef CONFIG_ENCODERS
void ff_write_quant_matrix(PutBitContext *pb, int16_t *matrix){
void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix){
int i;
if(matrix){

@ -344,8 +344,8 @@ typedef struct MpegEncContext {
Picture *current_picture_ptr; ///< pointer to the current picture
uint8_t *visualization_buffer[3]; //< temporary buffer vor MV visualization
int last_dc[3]; ///< last DC values for MPEG1
int16_t *dc_val_base;
int16_t *dc_val[3]; ///< used for mpeg4 DC prediction, all 3 arrays must be continuous
uint16_t *dc_val_base;
uint16_t *dc_val[3]; ///< used for mpeg4 DC prediction, all 3 arrays must be continuous
int16_t dc_cache[4*5];
int y_dc_scale, c_dc_scale;
const uint8_t *y_dc_scale_table; ///< qscale -> y_dc_scale table
@ -772,7 +772,7 @@ int ff_combine_frame(ParseContext *pc, int next, uint8_t **buf, int *buf_size);
void ff_parse_close(AVCodecParserContext *s);
void ff_mpeg_flush(AVCodecContext *avctx);
void ff_print_debug_info(MpegEncContext *s, AVFrame *pict);
void ff_write_quant_matrix(PutBitContext *pb, int16_t *matrix);
void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix);
int ff_find_unused_picture(MpegEncContext *s, int shared);
void ff_denoise_dct(MpegEncContext *s, DCTELEM *block);
void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src);
@ -828,8 +828,8 @@ int inline ff_get_mb_score(MpegEncContext * s, int mx, int my, int src_index,
int ref_index, int size, int h, int add_rate);
/* mpeg12.c */
extern const int16_t ff_mpeg1_default_intra_matrix[64];
extern const int16_t ff_mpeg1_default_non_intra_matrix[64];
extern const uint16_t ff_mpeg1_default_intra_matrix[64];
extern const uint16_t ff_mpeg1_default_non_intra_matrix[64];
extern const uint8_t ff_mpeg1_dc_scale_table[128];
void mpeg1_encode_picture_header(MpegEncContext *s, int picture_number);

@ -630,7 +630,7 @@ static inline int msmpeg4_pred_dc(MpegEncContext * s, int n,
uint16_t **dc_val_ptr, int *dir_ptr)
{
int a, b, c, wrap, pred, scale;
int16_t *dc_val;
uint16_t *dc_val;
/* find prediction */
if (n < 4) {

@ -306,7 +306,7 @@ static void mpegvideo_extract_headers(AVCodecParserContext *s,
{
ParseContext1 *pc = s->priv_data;
const uint8_t *buf_end;
int32_t start_code;
uint32_t start_code;
int frame_rate_index, ext_type, bytes_left;
int frame_rate_ext_n, frame_rate_ext_d;
int picture_structure, top_field_first, repeat_first_field, progressive_frame;

@ -655,9 +655,9 @@ static int svq1_decode_frame_header (GetBitContext *bitbuf,MpegEncContext *s) {
}
if ((s->f_code ^ 0x10) >= 0x50) {
char msg[256];
uint8_t msg[256];
svq1_parse_string (bitbuf, (char *) msg);
svq1_parse_string (bitbuf, msg);
av_log(s->avctx, AV_LOG_INFO, "embedded message: \"%s\"\n", (char *) msg);
}

@ -2341,10 +2341,10 @@ static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2], int
* @param dir_ptr Prediction direction for use in AC prediction
*/
static inline int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
int16_t **dc_val_ptr, int *dir_ptr)
uint16_t **dc_val_ptr, int *dir_ptr)
{
int a, b, c, wrap, pred, scale;
int16_t *dc_val;
uint16_t *dc_val;
static const uint16_t dcpred[32] = {
-1, 1024, 512, 341, 256, 205, 171, 146, 128,
114, 102, 93, 85, 79, 73, 68, 64,
@ -2402,10 +2402,10 @@ static inline int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
*/
static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
int a_avail, int c_avail,
int16_t **dc_val_ptr, int *dir_ptr)
uint16_t **dc_val_ptr, int *dir_ptr)
{
int a, b, c, wrap, pred, scale;
int16_t *dc_val;
uint16_t *dc_val;
int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
int q1, q2 = 0;
@ -2578,7 +2578,7 @@ static int vc1_decode_i_block(VC1Context *v, DCTELEM block[64], int n, int coded
MpegEncContext *s = &v->s;
int dc_pred_dir = 0; /* Direction of the DC prediction used */
int run_diff, i;
int16_t *dc_val;
uint16_t *dc_val;
int16_t *ac_val, *ac_val2;
int dcdiff;
@ -2743,7 +2743,7 @@ static int vc1_decode_i_block_adv(VC1Context *v, DCTELEM block[64], int n, int c
MpegEncContext *s = &v->s;
int dc_pred_dir = 0; /* Direction of the DC prediction used */
int run_diff, i;
int16_t *dc_val;
uint16_t *dc_val;
int16_t *ac_val, *ac_val2;
int dcdiff;
int a_avail = v->a_avail, c_avail = v->c_avail;
@ -2940,7 +2940,7 @@ static int vc1_decode_intra_block(VC1Context *v, DCTELEM block[64], int n, int c
MpegEncContext *s = &v->s;
int dc_pred_dir = 0; /* Direction of the DC prediction used */
int run_diff, i;
int16_t *dc_val;
uint16_t *dc_val;
int16_t *ac_val, *ac_val2;
int dcdiff;
int mb_pos = s->mb_x + s->mb_y * s->mb_stride;

@ -315,7 +315,7 @@ typedef struct Vp3DecodeContext {
int last_coded_c_fragment;
uint8_t edge_emu_buffer[9*2048]; //FIXME dynamic alloc
uint8_t qscale_table[2048]; //FIXME dynamic alloc (width+15)/16
int8_t qscale_table[2048]; //FIXME dynamic alloc (width+15)/16
/* Huffman decode */
int hti;

@ -871,7 +871,7 @@ static int wma_decode_block(WMADecodeContext *s)
VLC *coef_vlc;
int level, run, sign, tindex;
int16_t *ptr, *eptr;
const int16_t *level_table, *run_table;
const uint16_t *level_table, *run_table;
/* special VLC tables are used for ms stereo because
there is potentially less energy there */

@ -25,8 +25,8 @@ Only mono files are supported.
*/
#include "avformat.h"
static const unsigned char AMR_header [] = "#!AMR\n";
static const unsigned char AMRWB_header [] = "#!AMR-WB\n";
static const char AMR_header [] = "#!AMR\n";
static const char AMRWB_header [] = "#!AMR-WB\n";
#ifdef CONFIG_MUXERS
static int amr_write_header(AVFormatContext *s)

@ -705,7 +705,7 @@ static int asf_read_packet(AVFormatContext *s, AVPacket *pkt)
/* return packet */
if (asf_st->ds_span > 1) {
/* packet descrambling */
char* newdata = av_malloc(asf_st->pkt.size);
uint8_t *newdata = av_malloc(asf_st->pkt.size);
if (newdata) {
int offset = 0;
while (offset < asf_st->pkt.size) {

@ -281,7 +281,7 @@ typedef struct AVStream {
AVIndexEntry *index_entries; /* only used if the format does not
support seeking natively */
int nb_index_entries;
int index_entries_allocated_size;
unsigned int index_entries_allocated_size;
int64_t nb_frames; ///< number of frames in this stream if known or 0

@ -73,8 +73,7 @@ static offset_t avi_start_new_riff(AVIContext *avi, ByteIOContext *pb,
return loff;
}
static unsigned char* avi_stream2fourcc(unsigned char* tag, int index,
enum CodecType type)
static char* avi_stream2fourcc(char* tag, int index, enum CodecType type)
{
tag[0] = '0';
tag[1] = '0' + index;
@ -338,8 +337,8 @@ static int avi_write_ix(AVFormatContext *s)
{
ByteIOContext *pb = &s->pb;
AVIContext *avi = s->priv_data;
unsigned char tag[5];
unsigned char ix_tag[] = "ix00";
char tag[5];
char ix_tag[] = "ix00";
int i, j;
assert(!url_is_streamed(pb));
@ -397,7 +396,7 @@ static int avi_write_idx1(AVFormatContext *s)
AVIContext *avi = s->priv_data;
offset_t idx_chunk;
int i;
unsigned char tag[5];
char tag[5];
if (!url_is_streamed(pb)) {
AVIIentry* ie = 0, *tie;

@ -1667,7 +1667,7 @@ matroska_parse_index (MatroskaDemuxContext *matroska)
switch (id) {
/* one single index entry ('point') */
case MATROSKA_ID_CUETIME: {
int64_t time;
uint64_t time;
if ((res = ebml_read_uint(matroska, &id,
&time)) < 0)
break;

@ -1308,7 +1308,7 @@ static int mpegps_probe(AVProbeData *p)
typedef struct MpegDemuxContext {
int header_state;
int32_t header_state;
unsigned char psm_es_type[256];
} MpegDemuxContext;
@ -1339,7 +1339,7 @@ static int64_t get_pts(ByteIOContext *pb, int c)
}
static int find_next_start_code(ByteIOContext *pb, int *size_ptr,
uint32_t *header_state)
int32_t *header_state)
{
unsigned int state, v;
int val, n;

@ -235,7 +235,7 @@ ogg_read_page (AVFormatContext * s, int *str)
uint32_t seq;
uint32_t crc;
int size, idx;
char sync[4];
uint8_t sync[4];
int sp = 0;
if (get_buffer (bc, sync, 4) < 4)

@ -28,7 +28,7 @@
#include "avformat.h"
typedef struct ogg_codec {
uint8_t *magic;
int8_t *magic;
uint8_t magicsize;
int8_t *name;
int (*header)(AVFormatContext *, int);
@ -80,6 +80,6 @@ extern ogg_codec_t ogm_video_codec;
extern ogg_codec_t ogm_audio_codec;
extern ogg_codec_t ogm_old_codec;
extern int vorbis_comment(AVFormatContext *ms, char *buf, int size);
extern int vorbis_comment(AVFormatContext *ms, uint8_t *buf, int size);
#endif

@ -29,7 +29,7 @@
#include "ogg2.h"
extern int
vorbis_comment (AVFormatContext * as, char *buf, int size)
vorbis_comment (AVFormatContext * as, uint8_t *buf, int size)
{
char *p = buf;
int s, n, j;

@ -495,7 +495,7 @@ int rtp_parse_packet(RTPDemuxContext *s, AVPacket *pkt,
len -= infos->au_headers[0].size;
}
s->read_buf_size = len;
s->buf_ptr = (char *)buf;
s->buf_ptr = buf;
pkt->stream_index = s->st->index;
return 0;
default:

@ -175,7 +175,8 @@ static int rtp_read(URLContext *h, uint8_t *buf, int size)
{
RTPContext *s = h->priv_data;
struct sockaddr_in from;
int from_len, len, fd_max, n;
socklen_t from_len;
int len, fd_max, n;
fd_set rfds;
#if 0
for(;;) {

@ -428,7 +428,8 @@ static int udp_read(URLContext *h, uint8_t *buf, int size)
#else
struct sockaddr_storage from;
#endif
int from_len, len;
socklen_t from_len;
int len;
for(;;) {
from_len = sizeof(from);

Loading…
Cancel
Save