replace deprecated FF_*_TYPE symbols with AV_PICTURE_TYPE_*

pull/2/head
Stefano Sabatini 14 years ago
parent 301183d9be
commit ce5e49b0c2
  1. 2
      ffmpeg.c
  2. 8
      libavcodec/4xm.c
  3. 4
      libavcodec/a64multienc.c
  4. 4
      libavcodec/ansi.c
  5. 4
      libavcodec/asv1.c
  6. 2
      libavcodec/avcodec.h
  7. 4
      libavcodec/avs.c
  8. 4
      libavcodec/bfi.c
  9. 2
      libavcodec/bmp.c
  10. 2
      libavcodec/bmpenc.c
  11. 4
      libavcodec/c93.c
  12. 2
      libavcodec/cavs.h
  13. 30
      libavcodec/cavsdec.c
  14. 4
      libavcodec/cljr.c
  15. 4
      libavcodec/cscd.c
  16. 2
      libavcodec/dirac_parser.c
  17. 2
      libavcodec/dnxhddec.c
  18. 2
      libavcodec/dnxhdenc.c
  19. 2
      libavcodec/dpxenc.c
  20. 4
      libavcodec/dv.c
  21. 8
      libavcodec/dxa.c
  22. 4
      libavcodec/dxva2_h264.c
  23. 8
      libavcodec/dxva2_mpeg2.c
  24. 12
      libavcodec/dxva2_vc1.c
  25. 4
      libavcodec/eacmv.c
  26. 2
      libavcodec/eatgq.c
  27. 4
      libavcodec/eatgv.c
  28. 8
      libavcodec/error_resilience.c
  29. 4
      libavcodec/ffv1.c
  30. 4
      libavcodec/flashsvenc.c
  31. 6
      libavcodec/flvdec.c
  32. 2
      libavcodec/flvenc.c
  33. 20
      libavcodec/fraps.c
  34. 2
      libavcodec/frwu.c
  35. 2
      libavcodec/gif.c
  36. 10
      libavcodec/h261dec.c
  37. 2
      libavcodec/h263.c
  38. 22
      libavcodec/h263dec.c
  39. 60
      libavcodec/h264.c
  40. 6
      libavcodec/h264.h
  41. 20
      libavcodec/h264_cabac.c
  42. 14
      libavcodec/h264_cavlc.c
  43. 2
      libavcodec/h264_direct.c
  44. 2
      libavcodec/h264_parser.c
  45. 4
      libavcodec/h264_refs.c
  46. 2
      libavcodec/h264data.h
  47. 2
      libavcodec/huffyuv.c
  48. 2
      libavcodec/intelh263dec.c
  49. 24
      libavcodec/ituh263dec.c
  50. 10
      libavcodec/ituh263enc.c
  51. 2
      libavcodec/jpeglsenc.c
  52. 2
      libavcodec/jvdec.c
  53. 4
      libavcodec/kmvc.c
  54. 2
      libavcodec/lclenc.c
  55. 4
      libavcodec/libvpxenc.c
  56. 12
      libavcodec/libx264.c
  57. 6
      libavcodec/libxavs.c
  58. 2
      libavcodec/libxvid_rc.c
  59. 14
      libavcodec/libxvidff.c
  60. 2
      libavcodec/ljpegenc.c
  61. 2
      libavcodec/mdec.c
  62. 2
      libavcodec/mimic.c
  63. 2
      libavcodec/mjpegdec.c
  64. 4
      libavcodec/motion_est.c
  65. 2
      libavcodec/motion_est_template.c
  66. 48
      libavcodec/mpeg12.c
  67. 18
      libavcodec/mpeg12enc.c
  68. 8
      libavcodec/mpeg4video.c
  69. 76
      libavcodec/mpeg4videodec.c
  70. 30
      libavcodec/mpeg4videoenc.c
  71. 64
      libavcodec/mpegvideo.c
  72. 2
      libavcodec/mpegvideo.h
  73. 6
      libavcodec/mpegvideo_common.h
  74. 74
      libavcodec/mpegvideo_enc.c
  75. 6
      libavcodec/mpegvideo_xvmc.c
  76. 28
      libavcodec/msmpeg4.c
  77. 4
      libavcodec/mxpegdec.c
  78. 2
      libavcodec/nuv.c
  79. 2
      libavcodec/pamenc.c
  80. 4
      libavcodec/parser.c
  81. 2
      libavcodec/pcx.c
  82. 2
      libavcodec/pcxenc.c
  83. 2
      libavcodec/pictordec.c
  84. 2
      libavcodec/pngdec.c
  85. 2
      libavcodec/pngenc.c
  86. 2
      libavcodec/pnmdec.c
  87. 2
      libavcodec/pnmenc.c
  88. 2
      libavcodec/ptx.c
  89. 2
      libavcodec/qdrw.c
  90. 4
      libavcodec/qtrleenc.c
  91. 2
      libavcodec/r210dec.c
  92. 66
      libavcodec/ratecontrol.c
  93. 2
      libavcodec/rawdec.c
  94. 2
      libavcodec/rawenc.c
  95. 26
      libavcodec/rv10.c
  96. 4
      libavcodec/rv10enc.c
  97. 2
      libavcodec/rv20enc.c
  98. 2
      libavcodec/rv30.c
  99. 20
      libavcodec/rv34.c
  100. 2
      libavcodec/rv40.c
  101. Some files were not shown because too many files have changed in this diff Show More

@ -1270,7 +1270,7 @@ static void do_video_out(AVFormatContext *s,
//av_log(NULL, AV_LOG_DEBUG, "%"PRId64" -> encoder\n", ost->sync_opts); //av_log(NULL, AV_LOG_DEBUG, "%"PRId64" -> encoder\n", ost->sync_opts);
if (ost->forced_kf_index < ost->forced_kf_count && if (ost->forced_kf_index < ost->forced_kf_count &&
big_picture.pts >= ost->forced_kf_pts[ost->forced_kf_index]) { big_picture.pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
big_picture.pict_type = FF_I_TYPE; big_picture.pict_type = AV_PICTURE_TYPE_I;
ost->forced_kf_index++; ost->forced_kf_index++;
} }
ret = avcodec_encode_video(enc, ret = avcodec_encode_video(enc,

@ -780,11 +780,11 @@ static int decode_frame(AVCodecContext *avctx,
} }
if(frame_4cc == AV_RL32("ifr2")){ if(frame_4cc == AV_RL32("ifr2")){
p->pict_type= FF_I_TYPE; p->pict_type= AV_PICTURE_TYPE_I;
if(decode_i2_frame(f, buf-4, frame_size) < 0) if(decode_i2_frame(f, buf-4, frame_size) < 0)
return -1; return -1;
}else if(frame_4cc == AV_RL32("ifrm")){ }else if(frame_4cc == AV_RL32("ifrm")){
p->pict_type= FF_I_TYPE; p->pict_type= AV_PICTURE_TYPE_I;
if(decode_i_frame(f, buf, frame_size) < 0) if(decode_i_frame(f, buf, frame_size) < 0)
return -1; return -1;
}else if(frame_4cc == AV_RL32("pfrm") || frame_4cc == AV_RL32("pfr2")){ }else if(frame_4cc == AV_RL32("pfrm") || frame_4cc == AV_RL32("pfr2")){
@ -796,7 +796,7 @@ static int decode_frame(AVCodecContext *avctx,
} }
} }
p->pict_type= FF_P_TYPE; p->pict_type= AV_PICTURE_TYPE_P;
if(decode_p_frame(f, buf, frame_size) < 0) if(decode_p_frame(f, buf, frame_size) < 0)
return -1; return -1;
}else if(frame_4cc == AV_RL32("snd_")){ }else if(frame_4cc == AV_RL32("snd_")){
@ -805,7 +805,7 @@ static int decode_frame(AVCodecContext *avctx,
av_log(avctx, AV_LOG_ERROR, "ignoring unknown chunk length:%d\n", buf_size); av_log(avctx, AV_LOG_ERROR, "ignoring unknown chunk length:%d\n", buf_size);
} }
p->key_frame= p->pict_type == FF_I_TYPE; p->key_frame= p->pict_type == AV_PICTURE_TYPE_I;
*picture= *p; *picture= *p;
*data_size = sizeof(AVPicture); *data_size = sizeof(AVPicture);

@ -216,7 +216,7 @@ static av_cold int a64multi_init_encoder(AVCodecContext *avctx)
avcodec_get_frame_defaults(&c->picture); avcodec_get_frame_defaults(&c->picture);
avctx->coded_frame = &c->picture; avctx->coded_frame = &c->picture;
avctx->coded_frame->pict_type = FF_I_TYPE; avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
avctx->coded_frame->key_frame = 1; avctx->coded_frame->key_frame = 1;
if (!avctx->codec_tag) if (!avctx->codec_tag)
avctx->codec_tag = AV_RL32("a64m"); avctx->codec_tag = AV_RL32("a64m");
@ -290,7 +290,7 @@ static int a64multi_encode_frame(AVCodecContext *avctx, unsigned char *buf,
/* fill up mc_meta_charset with data until lifetime exceeds */ /* fill up mc_meta_charset with data until lifetime exceeds */
if (c->mc_frame_counter < c->mc_lifetime) { if (c->mc_frame_counter < c->mc_lifetime) {
*p = *pict; *p = *pict;
p->pict_type = FF_I_TYPE; p->pict_type = AV_PICTURE_TYPE_I;
p->key_frame = 1; p->key_frame = 1;
to_meta_with_crop(avctx, p, meta + 32000 * c->mc_frame_counter); to_meta_with_crop(avctx, p, meta + 32000 * c->mc_frame_counter);
c->mc_frame_counter++; c->mc_frame_counter++;

@ -226,7 +226,7 @@ static int execute_code(AVCodecContext * avctx, int c)
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
s->frame.pict_type = FF_I_TYPE; s->frame.pict_type = AV_PICTURE_TYPE_I;
s->frame.palette_has_changed = 1; s->frame.palette_has_changed = 1;
memcpy(s->frame.data[1], ff_cga_palette, 16 * 4); memcpy(s->frame.data[1], ff_cga_palette, 16 * 4);
erase_screen(avctx); erase_screen(avctx);
@ -323,7 +323,7 @@ static int decode_frame(AVCodecContext *avctx,
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
s->frame.pict_type = FF_I_TYPE; s->frame.pict_type = AV_PICTURE_TYPE_I;
s->frame.palette_has_changed = 1; s->frame.palette_has_changed = 1;
memcpy(s->frame.data[1], ff_cga_palette, 16 * 4); memcpy(s->frame.data[1], ff_cga_palette, 16 * 4);

@ -405,7 +405,7 @@ static int decode_frame(AVCodecContext *avctx,
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1; return -1;
} }
p->pict_type= FF_I_TYPE; p->pict_type= AV_PICTURE_TYPE_I;
p->key_frame= 1; p->key_frame= 1;
av_fast_malloc(&a->bitstream_buffer, &a->bitstream_buffer_size, buf_size + FF_INPUT_BUFFER_PADDING_SIZE); av_fast_malloc(&a->bitstream_buffer, &a->bitstream_buffer_size, buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
@ -470,7 +470,7 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size,
init_put_bits(&a->pb, buf, buf_size); init_put_bits(&a->pb, buf, buf_size);
*p = *pict; *p = *pict;
p->pict_type= FF_I_TYPE; p->pict_type= AV_PICTURE_TYPE_I;
p->key_frame= 1; p->key_frame= 1;
for(mb_y=0; mb_y<a->mb_height2; mb_y++){ for(mb_y=0; mb_y<a->mb_height2; mb_y++){

@ -3920,7 +3920,7 @@ typedef struct AVCodecParserContext {
/*! /*!
* Set by parser to 1 for key frames and 0 for non-key frames. * Set by parser to 1 for key frames and 0 for non-key frames.
* It is initialized to -1, so if the parser doesn't set this flag, * It is initialized to -1, so if the parser doesn't set this flag,
* old-style fallback using FF_I_TYPE picture type as key frames * old-style fallback using AV_PICTURE_TYPE_I picture type as key frames
* will be used. * will be used.
*/ */
int key_frame; int key_frame;

@ -63,7 +63,7 @@ avs_decode_frame(AVCodecContext * avctx,
return -1; return -1;
} }
p->reference = 1; p->reference = 1;
p->pict_type = FF_P_TYPE; p->pict_type = AV_PICTURE_TYPE_P;
p->key_frame = 0; p->key_frame = 0;
out = avs->picture.data[0]; out = avs->picture.data[0];
@ -93,7 +93,7 @@ avs_decode_frame(AVCodecContext * avctx,
switch (sub_type) { switch (sub_type) {
case AVS_I_FRAME: case AVS_I_FRAME:
p->pict_type = FF_I_TYPE; p->pict_type = AV_PICTURE_TYPE_I;
p->key_frame = 1; p->key_frame = 1;
case AVS_P_FRAME_3X3: case AVS_P_FRAME_3X3:
vect_w = 3; vect_w = 3;

@ -69,7 +69,7 @@ static int bfi_decode_frame(AVCodecContext * avctx, void *data,
/* Set frame parameters and palette, if necessary */ /* Set frame parameters and palette, if necessary */
if (!avctx->frame_number) { if (!avctx->frame_number) {
bfi->frame.pict_type = FF_I_TYPE; bfi->frame.pict_type = AV_PICTURE_TYPE_I;
bfi->frame.key_frame = 1; bfi->frame.key_frame = 1;
/* Setting the palette */ /* Setting the palette */
if(avctx->extradata_size>768) { if(avctx->extradata_size>768) {
@ -89,7 +89,7 @@ static int bfi_decode_frame(AVCodecContext * avctx, void *data,
memcpy(bfi->pal, bfi->frame.data[1], sizeof(bfi->pal)); memcpy(bfi->pal, bfi->frame.data[1], sizeof(bfi->pal));
bfi->frame.palette_has_changed = 1; bfi->frame.palette_has_changed = 1;
} else { } else {
bfi->frame.pict_type = FF_P_TYPE; bfi->frame.pict_type = AV_PICTURE_TYPE_P;
bfi->frame.key_frame = 0; bfi->frame.key_frame = 0;
bfi->frame.palette_has_changed = 0; bfi->frame.palette_has_changed = 0;
memcpy(bfi->frame.data[1], bfi->pal, sizeof(bfi->pal)); memcpy(bfi->frame.data[1], bfi->pal, sizeof(bfi->pal));

@ -200,7 +200,7 @@ static int bmp_decode_frame(AVCodecContext *avctx,
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1; return -1;
} }
p->pict_type = FF_I_TYPE; p->pict_type = AV_PICTURE_TYPE_I;
p->key_frame = 1; p->key_frame = 1;
buf = buf0 + hsize; buf = buf0 + hsize;

@ -74,7 +74,7 @@ static int bmp_encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_s
uint8_t *ptr; uint8_t *ptr;
unsigned char* buf0 = buf; unsigned char* buf0 = buf;
*p = *pict; *p = *pict;
p->pict_type= FF_I_TYPE; p->pict_type= AV_PICTURE_TYPE_I;
p->key_frame= 1; p->key_frame= 1;
switch (avctx->pix_fmt) { switch (avctx->pix_fmt) {
case PIX_FMT_RGB565: case PIX_FMT_RGB565:

@ -137,10 +137,10 @@ static int decode_frame(AVCodecContext *avctx, void *data,
stride = newpic->linesize[0]; stride = newpic->linesize[0];
if (buf[0] & C93_FIRST_FRAME) { if (buf[0] & C93_FIRST_FRAME) {
newpic->pict_type = FF_I_TYPE; newpic->pict_type = AV_PICTURE_TYPE_I;
newpic->key_frame = 1; newpic->key_frame = 1;
} else { } else {
newpic->pict_type = FF_P_TYPE; newpic->pict_type = AV_PICTURE_TYPE_P;
newpic->key_frame = 0; newpic->key_frame = 0;
} }

@ -278,7 +278,7 @@ static inline void set_mv_intra(AVSContext *h) {
set_mvs(&h->mv[MV_FWD_X0], BLK_16X16); set_mvs(&h->mv[MV_FWD_X0], BLK_16X16);
h->mv[MV_BWD_X0] = ff_cavs_intra_mv; h->mv[MV_BWD_X0] = ff_cavs_intra_mv;
set_mvs(&h->mv[MV_BWD_X0], BLK_16X16); set_mvs(&h->mv[MV_BWD_X0], BLK_16X16);
if(h->pic_type != FF_B_TYPE) if(h->pic_type != AV_PICTURE_TYPE_B)
h->col_type_base[h->mbidx] = I_8X8; h->col_type_base[h->mbidx] = I_8X8;
} }

@ -220,7 +220,7 @@ static int decode_mb_i(AVSContext *h, int cbp_code) {
ff_cavs_modify_mb_i(h, &pred_mode_uv); ff_cavs_modify_mb_i(h, &pred_mode_uv);
/* get coded block pattern */ /* get coded block pattern */
if(h->pic_type == FF_I_TYPE) if(h->pic_type == AV_PICTURE_TYPE_I)
cbp_code = get_ue_golomb(gb); cbp_code = get_ue_golomb(gb);
if(cbp_code > 63){ if(cbp_code > 63){
av_log(h->s.avctx, AV_LOG_ERROR, "illegal intra cbp\n"); av_log(h->s.avctx, AV_LOG_ERROR, "illegal intra cbp\n");
@ -424,7 +424,7 @@ static inline int decode_slice_header(AVSContext *h, GetBitContext *gb) {
h->qp = get_bits(gb,6); h->qp = get_bits(gb,6);
} }
/* inter frame or second slice can have weighting params */ /* inter frame or second slice can have weighting params */
if((h->pic_type != FF_I_TYPE) || (!h->pic_structure && h->mby >= h->mb_width/2)) if((h->pic_type != AV_PICTURE_TYPE_I) || (!h->pic_structure && h->mby >= h->mb_width/2))
if(get_bits1(gb)) { //slice_weighting_flag if(get_bits1(gb)) { //slice_weighting_flag
av_log(h->s.avctx, AV_LOG_ERROR, av_log(h->s.avctx, AV_LOG_ERROR,
"weighted prediction not yet supported\n"); "weighted prediction not yet supported\n");
@ -470,17 +470,17 @@ static int decode_pic(AVSContext *h) {
} }
skip_bits(&s->gb,16);//bbv_dwlay skip_bits(&s->gb,16);//bbv_dwlay
if(h->stc == PIC_PB_START_CODE) { if(h->stc == PIC_PB_START_CODE) {
h->pic_type = get_bits(&s->gb,2) + FF_I_TYPE; h->pic_type = get_bits(&s->gb,2) + AV_PICTURE_TYPE_I;
if(h->pic_type > FF_B_TYPE) { if(h->pic_type > AV_PICTURE_TYPE_B) {
av_log(s->avctx, AV_LOG_ERROR, "illegal picture type\n"); av_log(s->avctx, AV_LOG_ERROR, "illegal picture type\n");
return -1; return -1;
} }
/* make sure we have the reference frames we need */ /* make sure we have the reference frames we need */
if(!h->DPB[0].data[0] || if(!h->DPB[0].data[0] ||
(!h->DPB[1].data[0] && h->pic_type == FF_B_TYPE)) (!h->DPB[1].data[0] && h->pic_type == AV_PICTURE_TYPE_B))
return -1; return -1;
} else { } else {
h->pic_type = FF_I_TYPE; h->pic_type = AV_PICTURE_TYPE_I;
if(get_bits1(&s->gb)) if(get_bits1(&s->gb))
skip_bits(&s->gb,24);//time_code skip_bits(&s->gb,24);//time_code
/* old sample clips were all progressive and no low_delay, /* old sample clips were all progressive and no low_delay,
@ -502,7 +502,7 @@ static int decode_pic(AVSContext *h) {
h->picture.poc = get_bits(&s->gb,8)*2; h->picture.poc = get_bits(&s->gb,8)*2;
/* get temporal distances and MV scaling factors */ /* get temporal distances and MV scaling factors */
if(h->pic_type != FF_B_TYPE) { if(h->pic_type != AV_PICTURE_TYPE_B) {
h->dist[0] = (h->picture.poc - h->DPB[0].poc + 512) % 512; h->dist[0] = (h->picture.poc - h->DPB[0].poc + 512) % 512;
} else { } else {
h->dist[0] = (h->DPB[0].poc - h->picture.poc + 512) % 512; h->dist[0] = (h->DPB[0].poc - h->picture.poc + 512) % 512;
@ -510,7 +510,7 @@ static int decode_pic(AVSContext *h) {
h->dist[1] = (h->picture.poc - h->DPB[1].poc + 512) % 512; h->dist[1] = (h->picture.poc - h->DPB[1].poc + 512) % 512;
h->scale_den[0] = h->dist[0] ? 512/h->dist[0] : 0; h->scale_den[0] = h->dist[0] ? 512/h->dist[0] : 0;
h->scale_den[1] = h->dist[1] ? 512/h->dist[1] : 0; h->scale_den[1] = h->dist[1] ? 512/h->dist[1] : 0;
if(h->pic_type == FF_B_TYPE) { if(h->pic_type == AV_PICTURE_TYPE_B) {
h->sym_factor = h->dist[0]*h->scale_den[1]; h->sym_factor = h->dist[0]*h->scale_den[1];
} else { } else {
h->direct_den[0] = h->dist[0] ? 16384/h->dist[0] : 0; h->direct_den[0] = h->dist[0] ? 16384/h->dist[0] : 0;
@ -529,12 +529,12 @@ static int decode_pic(AVSContext *h) {
skip_bits1(&s->gb); //repeat_first_field skip_bits1(&s->gb); //repeat_first_field
h->qp_fixed = get_bits1(&s->gb); h->qp_fixed = get_bits1(&s->gb);
h->qp = get_bits(&s->gb,6); h->qp = get_bits(&s->gb,6);
if(h->pic_type == FF_I_TYPE) { if(h->pic_type == AV_PICTURE_TYPE_I) {
if(!h->progressive && !h->pic_structure) if(!h->progressive && !h->pic_structure)
skip_bits1(&s->gb);//what is this? skip_bits1(&s->gb);//what is this?
skip_bits(&s->gb,4); //reserved bits skip_bits(&s->gb,4); //reserved bits
} else { } else {
if(!(h->pic_type == FF_B_TYPE && h->pic_structure == 1)) if(!(h->pic_type == AV_PICTURE_TYPE_B && h->pic_structure == 1))
h->ref_flag = get_bits1(&s->gb); h->ref_flag = get_bits1(&s->gb);
skip_bits(&s->gb,4); //reserved bits skip_bits(&s->gb,4); //reserved bits
h->skip_mode_flag = get_bits1(&s->gb); h->skip_mode_flag = get_bits1(&s->gb);
@ -546,12 +546,12 @@ static int decode_pic(AVSContext *h) {
} else { } else {
h->alpha_offset = h->beta_offset = 0; h->alpha_offset = h->beta_offset = 0;
} }
if(h->pic_type == FF_I_TYPE) { if(h->pic_type == AV_PICTURE_TYPE_I) {
do { do {
check_for_slice(h); check_for_slice(h);
decode_mb_i(h, 0); decode_mb_i(h, 0);
} while(ff_cavs_next_mb(h)); } while(ff_cavs_next_mb(h));
} else if(h->pic_type == FF_P_TYPE) { } else if(h->pic_type == AV_PICTURE_TYPE_P) {
do { do {
if(check_for_slice(h)) if(check_for_slice(h))
skip_count = -1; skip_count = -1;
@ -567,7 +567,7 @@ static int decode_pic(AVSContext *h) {
decode_mb_p(h,mb_type); decode_mb_p(h,mb_type);
} }
} while(ff_cavs_next_mb(h)); } while(ff_cavs_next_mb(h));
} else { /* FF_B_TYPE */ } else { /* AV_PICTURE_TYPE_B */
do { do {
if(check_for_slice(h)) if(check_for_slice(h))
skip_count = -1; skip_count = -1;
@ -584,7 +584,7 @@ static int decode_pic(AVSContext *h) {
} }
} while(ff_cavs_next_mb(h)); } while(ff_cavs_next_mb(h));
} }
if(h->pic_type != FF_B_TYPE) { if(h->pic_type != AV_PICTURE_TYPE_B) {
if(h->DPB[1].data[0]) if(h->DPB[1].data[0])
s->avctx->release_buffer(s->avctx, (AVFrame *)&h->DPB[1]); s->avctx->release_buffer(s->avctx, (AVFrame *)&h->DPB[1]);
h->DPB[1] = h->DPB[0]; h->DPB[1] = h->DPB[0];
@ -684,7 +684,7 @@ static int cavs_decode_frame(AVCodecContext * avctx,void *data, int *data_size,
if(decode_pic(h)) if(decode_pic(h))
break; break;
*data_size = sizeof(AVPicture); *data_size = sizeof(AVPicture);
if(h->pic_type != FF_B_TYPE) { if(h->pic_type != AV_PICTURE_TYPE_B) {
if(h->DPB[1].data[0]) { if(h->DPB[1].data[0]) {
*picture = *(AVFrame *) &h->DPB[1]; *picture = *(AVFrame *) &h->DPB[1];
} else { } else {

@ -64,7 +64,7 @@ static int decode_frame(AVCodecContext *avctx,
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1; return -1;
} }
p->pict_type= FF_I_TYPE; p->pict_type= AV_PICTURE_TYPE_I;
p->key_frame= 1; p->key_frame= 1;
init_get_bits(&a->gb, buf, buf_size); init_get_bits(&a->gb, buf, buf_size);
@ -100,7 +100,7 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size,
int size; int size;
*p = *pict; *p = *pict;
p->pict_type= FF_I_TYPE; p->pict_type= AV_PICTURE_TYPE_I;
p->key_frame= 1; p->key_frame= 1;
emms_c(); emms_c();

@ -183,7 +183,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size,
// flip upside down, add difference frame // flip upside down, add difference frame
if (buf[0] & 1) { // keyframe if (buf[0] & 1) { // keyframe
c->pic.pict_type = FF_I_TYPE; c->pic.pict_type = AV_PICTURE_TYPE_I;
c->pic.key_frame = 1; c->pic.key_frame = 1;
switch (c->bpp) { switch (c->bpp) {
case 16: case 16:
@ -197,7 +197,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size,
c->linelen, c->height); c->linelen, c->height);
} }
} else { } else {
c->pic.pict_type = FF_P_TYPE; c->pic.pict_type = AV_PICTURE_TYPE_P;
c->pic.key_frame = 0; c->pic.key_frame = 0;
switch (c->bpp) { switch (c->bpp) {
case 16: case 16:

@ -194,7 +194,7 @@ static int dirac_combine_frame(AVCodecParserContext *s, AVCodecContext *avctx,
avctx->has_b_frames = 1; avctx->has_b_frames = 1;
} }
if (avctx->has_b_frames && s->pts == s->dts) if (avctx->has_b_frames && s->pts == s->dts)
s->pict_type = FF_B_TYPE; s->pict_type = AV_PICTURE_TYPE_B;
/* Finally have a complete Dirac data unit */ /* Finally have a complete Dirac data unit */
*buf = pc->dirac_unit; *buf = pc->dirac_unit;

@ -55,7 +55,7 @@ static av_cold int dnxhd_decode_init(AVCodecContext *avctx)
ctx->avctx = avctx; ctx->avctx = avctx;
dsputil_init(&ctx->dsp, avctx); dsputil_init(&ctx->dsp, avctx);
avctx->coded_frame = &ctx->picture; avctx->coded_frame = &ctx->picture;
ctx->picture.type = FF_I_TYPE; ctx->picture.type = AV_PICTURE_TYPE_I;
ctx->picture.key_frame = 1; ctx->picture.key_frame = 1;
return 0; return 0;
} }

@ -222,7 +222,7 @@ static int dnxhd_encode_init(AVCodecContext *avctx)
FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->mb_qscale, ctx->m.mb_num *sizeof(uint8_t) , fail); FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->mb_qscale, ctx->m.mb_num *sizeof(uint8_t) , fail);
ctx->frame.key_frame = 1; ctx->frame.key_frame = 1;
ctx->frame.pict_type = FF_I_TYPE; ctx->frame.pict_type = AV_PICTURE_TYPE_I;
ctx->m.avctx->coded_frame = &ctx->frame; ctx->m.avctx->coded_frame = &ctx->frame;
if (avctx->thread_count > MAX_THREADS) { if (avctx->thread_count > MAX_THREADS) {

@ -35,7 +35,7 @@ static av_cold int encode_init(AVCodecContext *avctx)
DPXContext *s = avctx->priv_data; DPXContext *s = avctx->priv_data;
avctx->coded_frame = &s->picture; avctx->coded_frame = &s->picture;
avctx->coded_frame->pict_type = FF_I_TYPE; avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
avctx->coded_frame->key_frame = 1; avctx->coded_frame->key_frame = 1;
s->big_endian = 1; s->big_endian = 1;

@ -1095,7 +1095,7 @@ static int dvvideo_decode_frame(AVCodecContext *avctx,
s->picture.reference = 0; s->picture.reference = 0;
s->picture.key_frame = 1; s->picture.key_frame = 1;
s->picture.pict_type = FF_I_TYPE; s->picture.pict_type = AV_PICTURE_TYPE_I;
avctx->pix_fmt = s->sys->pix_fmt; avctx->pix_fmt = s->sys->pix_fmt;
avctx->time_base = s->sys->time_base; avctx->time_base = s->sys->time_base;
avcodec_set_dimensions(avctx, s->sys->width, s->sys->height); avcodec_set_dimensions(avctx, s->sys->width, s->sys->height);
@ -1264,7 +1264,7 @@ static int dvvideo_encode_frame(AVCodecContext *c, uint8_t *buf, int buf_size,
c->pix_fmt = s->sys->pix_fmt; c->pix_fmt = s->sys->pix_fmt;
s->picture = *((AVFrame *)data); s->picture = *((AVFrame *)data);
s->picture.key_frame = 1; s->picture.key_frame = 1;
s->picture.pict_type = FF_I_TYPE; s->picture.pict_type = AV_PICTURE_TYPE_I;
s->buf = buf; s->buf = buf;
c->execute(c, dv_encode_video_segment, s->sys->work_chunks, NULL, c->execute(c, dv_encode_video_segment, s->sys->work_chunks, NULL,

@ -240,13 +240,13 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
switch(compr){ switch(compr){
case -1: case -1:
c->pic.key_frame = 0; c->pic.key_frame = 0;
c->pic.pict_type = FF_P_TYPE; c->pic.pict_type = AV_PICTURE_TYPE_P;
if(c->prev.data[0]) if(c->prev.data[0])
memcpy(c->pic.data[0], c->prev.data[0], c->pic.linesize[0] * avctx->height); memcpy(c->pic.data[0], c->prev.data[0], c->pic.linesize[0] * avctx->height);
else{ // Should happen only when first frame is 'NULL' else{ // Should happen only when first frame is 'NULL'
memset(c->pic.data[0], 0, c->pic.linesize[0] * avctx->height); memset(c->pic.data[0], 0, c->pic.linesize[0] * avctx->height);
c->pic.key_frame = 1; c->pic.key_frame = 1;
c->pic.pict_type = FF_I_TYPE; c->pic.pict_type = AV_PICTURE_TYPE_I;
} }
break; break;
case 2: case 2:
@ -254,7 +254,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
case 4: case 4:
case 5: case 5:
c->pic.key_frame = !(compr & 1); c->pic.key_frame = !(compr & 1);
c->pic.pict_type = (compr & 1) ? FF_P_TYPE : FF_I_TYPE; c->pic.pict_type = (compr & 1) ? AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_I;
for(j = 0; j < avctx->height; j++){ for(j = 0; j < avctx->height; j++){
if(compr & 1){ if(compr & 1){
for(i = 0; i < avctx->width; i++) for(i = 0; i < avctx->width; i++)
@ -269,7 +269,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
case 12: // ScummVM coding case 12: // ScummVM coding
case 13: case 13:
c->pic.key_frame = 0; c->pic.key_frame = 0;
c->pic.pict_type = FF_P_TYPE; c->pic.pict_type = AV_PICTURE_TYPE_P;
decode_13(avctx, c, c->pic.data[0], srcptr, c->prev.data[0]); decode_13(avctx, c, c->pic.data[0], srcptr, c->prev.data[0]);
break; break;
default: default:

@ -246,7 +246,7 @@ static void fill_slice_long(AVCodecContext *avctx, DXVA_Slice_H264_Long *slice,
slice->slice_qs_delta = 0; /* XXX not implemented by FFmpeg */ slice->slice_qs_delta = 0; /* XXX not implemented by FFmpeg */
slice->slice_qp_delta = s->qscale - h->pps.init_qp; slice->slice_qp_delta = s->qscale - h->pps.init_qp;
slice->redundant_pic_cnt = h->redundant_pic_count; slice->redundant_pic_cnt = h->redundant_pic_count;
if (h->slice_type == FF_B_TYPE) if (h->slice_type == AV_PICTURE_TYPE_B)
slice->direct_spatial_mv_pred_flag = h->direct_spatial_mv_pred; slice->direct_spatial_mv_pred_flag = h->direct_spatial_mv_pred;
slice->cabac_init_idc = h->pps.cabac ? h->cabac_init_idc : 0; slice->cabac_init_idc = h->pps.cabac ? h->cabac_init_idc : 0;
if (h->deblocking_filter < 2) if (h->deblocking_filter < 2)
@ -403,7 +403,7 @@ static int decode_slice(AVCodecContext *avctx,
position, size); position, size);
ctx_pic->slice_count++; ctx_pic->slice_count++;
if (h->slice_type != FF_I_TYPE && h->slice_type != FF_SI_TYPE) if (h->slice_type != AV_PICTURE_TYPE_I && h->slice_type != AV_PICTURE_TYPE_SI)
ctx_pic->pp.wBitFields &= ~(1 << 15); /* Set IntraPicFlag to 0 */ ctx_pic->pp.wBitFields &= ~(1 << 15); /* Set IntraPicFlag to 0 */
return 0; return 0;
} }

@ -44,11 +44,11 @@ static void fill_picture_parameters(AVCodecContext *avctx,
memset(pp, 0, sizeof(*pp)); memset(pp, 0, sizeof(*pp));
pp->wDecodedPictureIndex = ff_dxva2_get_surface_index(ctx, current_picture); pp->wDecodedPictureIndex = ff_dxva2_get_surface_index(ctx, current_picture);
pp->wDeblockedPictureIndex = 0; pp->wDeblockedPictureIndex = 0;
if (s->pict_type != FF_I_TYPE) if (s->pict_type != AV_PICTURE_TYPE_I)
pp->wForwardRefPictureIndex = ff_dxva2_get_surface_index(ctx, &s->last_picture); pp->wForwardRefPictureIndex = ff_dxva2_get_surface_index(ctx, &s->last_picture);
else else
pp->wForwardRefPictureIndex = 0xffff; pp->wForwardRefPictureIndex = 0xffff;
if (s->pict_type == FF_B_TYPE) if (s->pict_type == AV_PICTURE_TYPE_B)
pp->wBackwardRefPictureIndex = ff_dxva2_get_surface_index(ctx, &s->next_picture); pp->wBackwardRefPictureIndex = ff_dxva2_get_surface_index(ctx, &s->next_picture);
else else
pp->wBackwardRefPictureIndex = 0xffff; pp->wBackwardRefPictureIndex = 0xffff;
@ -61,8 +61,8 @@ static void fill_picture_parameters(AVCodecContext *avctx,
pp->bBPPminus1 = 7; pp->bBPPminus1 = 7;
pp->bPicStructure = s->picture_structure; pp->bPicStructure = s->picture_structure;
pp->bSecondField = is_field && !s->first_field; pp->bSecondField = is_field && !s->first_field;
pp->bPicIntra = s->pict_type == FF_I_TYPE; pp->bPicIntra = s->pict_type == AV_PICTURE_TYPE_I;
pp->bPicBackwardPrediction = s->pict_type == FF_B_TYPE; pp->bPicBackwardPrediction = s->pict_type == AV_PICTURE_TYPE_B;
pp->bBidirectionalAveragingMode = 0; pp->bBidirectionalAveragingMode = 0;
pp->bMVprecisionAndChromaRelation= 0; /* FIXME */ pp->bMVprecisionAndChromaRelation= 0; /* FIXME */
pp->bChromaFormat = s->chroma_format; pp->bChromaFormat = s->chroma_format;

@ -42,11 +42,11 @@ static void fill_picture_parameters(AVCodecContext *avctx,
memset(pp, 0, sizeof(*pp)); memset(pp, 0, sizeof(*pp));
pp->wDecodedPictureIndex = pp->wDecodedPictureIndex =
pp->wDeblockedPictureIndex = ff_dxva2_get_surface_index(ctx, current_picture); pp->wDeblockedPictureIndex = ff_dxva2_get_surface_index(ctx, current_picture);
if (s->pict_type != FF_I_TYPE) if (s->pict_type != AV_PICTURE_TYPE_I)
pp->wForwardRefPictureIndex = ff_dxva2_get_surface_index(ctx, &s->last_picture); pp->wForwardRefPictureIndex = ff_dxva2_get_surface_index(ctx, &s->last_picture);
else else
pp->wForwardRefPictureIndex = 0xffff; pp->wForwardRefPictureIndex = 0xffff;
if (s->pict_type == FF_B_TYPE) if (s->pict_type == AV_PICTURE_TYPE_B)
pp->wBackwardRefPictureIndex = ff_dxva2_get_surface_index(ctx, &s->next_picture); pp->wBackwardRefPictureIndex = ff_dxva2_get_surface_index(ctx, &s->next_picture);
else else
pp->wBackwardRefPictureIndex = 0xffff; pp->wBackwardRefPictureIndex = 0xffff;
@ -69,8 +69,8 @@ static void fill_picture_parameters(AVCodecContext *avctx,
if (s->picture_structure & PICT_BOTTOM_FIELD) if (s->picture_structure & PICT_BOTTOM_FIELD)
pp->bPicStructure |= 0x02; pp->bPicStructure |= 0x02;
pp->bSecondField = v->interlace && v->fcm != 0x03 && !s->first_field; pp->bSecondField = v->interlace && v->fcm != 0x03 && !s->first_field;
pp->bPicIntra = s->pict_type == FF_I_TYPE; pp->bPicIntra = s->pict_type == AV_PICTURE_TYPE_I;
pp->bPicBackwardPrediction = s->pict_type == FF_B_TYPE; pp->bPicBackwardPrediction = s->pict_type == AV_PICTURE_TYPE_B;
pp->bBidirectionalAveragingMode = (1 << 7) | pp->bBidirectionalAveragingMode = (1 << 7) |
((ctx->cfg->ConfigIntraResidUnsigned != 0) << 6) | ((ctx->cfg->ConfigIntraResidUnsigned != 0) << 6) |
((ctx->cfg->ConfigResidDiffAccelerator != 0) << 5) | ((ctx->cfg->ConfigResidDiffAccelerator != 0) << 5) |
@ -108,10 +108,10 @@ static void fill_picture_parameters(AVCodecContext *avctx,
(v->interlace << 5) | (v->interlace << 5) |
(v->tfcntrflag << 4) | (v->tfcntrflag << 4) |
(v->finterpflag << 3) | (v->finterpflag << 3) |
((s->pict_type != FF_B_TYPE) << 2) | ((s->pict_type != AV_PICTURE_TYPE_B) << 2) |
(v->psf << 1) | (v->psf << 1) |
(v->extended_dmv ); (v->extended_dmv );
if (s->pict_type != FF_I_TYPE) if (s->pict_type != AV_PICTURE_TYPE_I)
pp->bPic4MVallowed = v->mv_mode == MV_PMODE_MIXED_MV || pp->bPic4MVallowed = v->mv_mode == MV_PMODE_MIXED_MV ||
(v->mv_mode == MV_PMODE_INTENSITY_COMP && (v->mv_mode == MV_PMODE_INTENSITY_COMP &&
v->mv_mode2 == MV_PMODE_MIXED_MV); v->mv_mode2 == MV_PMODE_MIXED_MV);

@ -180,10 +180,10 @@ static int cmv_decode_frame(AVCodecContext *avctx,
if ((buf[0]&1)) { // subtype if ((buf[0]&1)) { // subtype
cmv_decode_inter(s, buf+2, buf_end); cmv_decode_inter(s, buf+2, buf_end);
s->frame.key_frame = 0; s->frame.key_frame = 0;
s->frame.pict_type = FF_P_TYPE; s->frame.pict_type = AV_PICTURE_TYPE_P;
}else{ }else{
s->frame.key_frame = 1; s->frame.key_frame = 1;
s->frame.pict_type = FF_I_TYPE; s->frame.pict_type = AV_PICTURE_TYPE_I;
cmv_decode_intra(s, buf+2, buf_end); cmv_decode_intra(s, buf+2, buf_end);
} }

@ -218,7 +218,7 @@ static int tgq_decode_frame(AVCodecContext *avctx,
if (!s->frame.data[0]) { if (!s->frame.data[0]) {
s->frame.key_frame = 1; s->frame.key_frame = 1;
s->frame.pict_type = FF_I_TYPE; s->frame.pict_type = AV_PICTURE_TYPE_I;
s->frame.buffer_hints = FF_BUFFER_HINTS_VALID; s->frame.buffer_hints = FF_BUFFER_HINTS_VALID;
if (avctx->get_buffer(avctx, &s->frame)) { if (avctx->get_buffer(avctx, &s->frame)) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");

@ -300,7 +300,7 @@ static int tgv_decode_frame(AVCodecContext *avctx,
if(chunk_type==kVGT_TAG) { if(chunk_type==kVGT_TAG) {
s->frame.key_frame = 1; s->frame.key_frame = 1;
s->frame.pict_type = FF_I_TYPE; s->frame.pict_type = AV_PICTURE_TYPE_I;
if (unpack(buf, buf_end, s->frame.data[0], s->avctx->width, s->avctx->height)<0) { if (unpack(buf, buf_end, s->frame.data[0], s->avctx->width, s->avctx->height)<0) {
av_log(avctx, AV_LOG_WARNING, "truncated intra frame\n"); av_log(avctx, AV_LOG_WARNING, "truncated intra frame\n");
return -1; return -1;
@ -311,7 +311,7 @@ static int tgv_decode_frame(AVCodecContext *avctx,
return buf_size; return buf_size;
} }
s->frame.key_frame = 0; s->frame.key_frame = 0;
s->frame.pict_type = FF_P_TYPE; s->frame.pict_type = AV_PICTURE_TYPE_P;
if (tgv_decode_inter(s, buf, buf_end)<0) { if (tgv_decode_inter(s, buf, buf_end)<0) {
av_log(avctx, AV_LOG_WARNING, "truncated inter frame\n"); av_log(avctx, AV_LOG_WARNING, "truncated inter frame\n");
return -1; return -1;

@ -639,7 +639,7 @@ static int is_intra_more_likely(MpegEncContext *s){
if(undamaged_count < 5) return 0; //almost all MBs damaged -> use temporal prediction if(undamaged_count < 5) return 0; //almost all MBs damaged -> use temporal prediction
//prevent dsp.sad() check, that requires access to the image //prevent dsp.sad() check, that requires access to the image
if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration && s->pict_type == FF_I_TYPE) if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration && s->pict_type == AV_PICTURE_TYPE_I)
return 1; return 1;
skip_amount= FFMAX(undamaged_count/50, 1); //check only upto 50 MBs skip_amount= FFMAX(undamaged_count/50, 1); //check only upto 50 MBs
@ -658,7 +658,7 @@ static int is_intra_more_likely(MpegEncContext *s){
j++; j++;
if((j%skip_amount) != 0) continue; //skip a few to speed things up if((j%skip_amount) != 0) continue; //skip a few to speed things up
if(s->pict_type==FF_I_TYPE){ if(s->pict_type==AV_PICTURE_TYPE_I){
uint8_t *mb_ptr = s->current_picture.data[0] + mb_x*16 + mb_y*16*s->linesize; uint8_t *mb_ptr = s->current_picture.data[0] + mb_x*16 + mb_y*16*s->linesize;
uint8_t *last_mb_ptr= s->last_picture.data [0] + mb_x*16 + mb_y*16*s->linesize; uint8_t *last_mb_ptr= s->last_picture.data [0] + mb_x*16 + mb_y*16*s->linesize;
@ -977,7 +977,7 @@ void ff_er_frame_end(MpegEncContext *s){
} }
/* guess MVs */ /* guess MVs */
if(s->pict_type==FF_B_TYPE){ if(s->pict_type==AV_PICTURE_TYPE_B){
for(mb_y=0; mb_y<s->mb_height; mb_y++){ for(mb_y=0; mb_y<s->mb_height; mb_y++){
for(mb_x=0; mb_x<s->mb_width; mb_x++){ for(mb_x=0; mb_x<s->mb_width; mb_x++){
int xy= mb_x*2 + mb_y*2*s->b8_stride; int xy= mb_x*2 + mb_y*2*s->b8_stride;
@ -1114,7 +1114,7 @@ ec_clean:
const int mb_xy= s->mb_index2xy[i]; const int mb_xy= s->mb_index2xy[i];
int error= s->error_status_table[mb_xy]; int error= s->error_status_table[mb_xy];
if(s->pict_type!=FF_B_TYPE && (error&(DC_ERROR|MV_ERROR|AC_ERROR))){ if(s->pict_type!=AV_PICTURE_TYPE_B && (error&(DC_ERROR|MV_ERROR|AC_ERROR))){
s->mbskip_table[mb_xy]=0; s->mbskip_table[mb_xy]=0;
} }
s->mbintra_table[mb_xy]=1; s->mbintra_table[mb_xy]=1;

@ -1160,7 +1160,7 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size,
ff_build_rac_states(c, 0.05*(1LL<<32), 256-8); ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
*p = *pict; *p = *pict;
p->pict_type= FF_I_TYPE; p->pict_type= AV_PICTURE_TYPE_I;
if(avctx->gop_size==0 || f->picture_number % avctx->gop_size == 0){ if(avctx->gop_size==0 || f->picture_number % avctx->gop_size == 0){
put_rac(c, &keystate, 1); put_rac(c, &keystate, 1);
@ -1723,7 +1723,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
ff_build_rac_states(c, 0.05*(1LL<<32), 256-8); ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
p->pict_type= FF_I_TYPE; //FIXME I vs. P p->pict_type= AV_PICTURE_TYPE_I; //FIXME I vs. P
if(get_rac(c, &keystate)){ if(get_rac(c, &keystate)){
p->key_frame= 1; p->key_frame= 1;
if(read_header(f) < 0) if(read_header(f) < 0)

@ -260,12 +260,12 @@ static int flashsv_encode_frame(AVCodecContext *avctx, uint8_t *buf,
//mark the frame type so the muxer can mux it correctly //mark the frame type so the muxer can mux it correctly
if (I_frame) { if (I_frame) {
p->pict_type = FF_I_TYPE; p->pict_type = AV_PICTURE_TYPE_I;
p->key_frame = 1; p->key_frame = 1;
s->last_key_frame = avctx->frame_number; s->last_key_frame = avctx->frame_number;
av_log(avctx, AV_LOG_DEBUG, "Inserting key frame at frame %d\n", avctx->frame_number); av_log(avctx, AV_LOG_DEBUG, "Inserting key frame at frame %d\n", avctx->frame_number);
} else { } else {
p->pict_type = FF_P_TYPE; p->pict_type = AV_PICTURE_TYPE_P;
p->key_frame = 0; p->key_frame = 0;
} }

@ -88,10 +88,10 @@ int ff_flv_decode_picture_header(MpegEncContext *s)
s->width = width; s->width = width;
s->height = height; s->height = height;
s->pict_type = FF_I_TYPE + get_bits(&s->gb, 2); s->pict_type = AV_PICTURE_TYPE_I + get_bits(&s->gb, 2);
s->dropable= s->pict_type > FF_P_TYPE; s->dropable= s->pict_type > AV_PICTURE_TYPE_P;
if (s->dropable) if (s->dropable)
s->pict_type = FF_P_TYPE; s->pict_type = AV_PICTURE_TYPE_P;
skip_bits1(&s->gb); /* deblocking flag */ skip_bits1(&s->gb); /* deblocking flag */
s->chroma_qscale= s->qscale = get_bits(&s->gb, 5); s->chroma_qscale= s->qscale = get_bits(&s->gb, 5);

@ -53,7 +53,7 @@ void ff_flv_encode_picture_header(MpegEncContext * s, int picture_number)
put_bits(&s->pb, 16, s->width); put_bits(&s->pb, 16, s->width);
put_bits(&s->pb, 16, s->height); put_bits(&s->pb, 16, s->height);
} }
put_bits(&s->pb, 2, s->pict_type == FF_P_TYPE); /* PictureType */ put_bits(&s->pb, 2, s->pict_type == AV_PICTURE_TYPE_P); /* PictureType */
put_bits(&s->pb, 1, 1); /* DeblockingFlag: on */ put_bits(&s->pb, 1, 1); /* DeblockingFlag: on */
put_bits(&s->pb, 5, s->qscale); /* Quantizer */ put_bits(&s->pb, 5, s->qscale); /* Quantizer */
put_bits(&s->pb, 1, 0); /* ExtraInformation */ put_bits(&s->pb, 1, 0); /* ExtraInformation */

@ -180,10 +180,10 @@ static int decode_frame(AVCodecContext *avctx,
return -1; return -1;
} }
/* bit 31 means same as previous pic */ /* bit 31 means same as previous pic */
f->pict_type = (header & (1U<<31))? FF_P_TYPE : FF_I_TYPE; f->pict_type = (header & (1U<<31))? AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_I;
f->key_frame = f->pict_type == FF_I_TYPE; f->key_frame = f->pict_type == AV_PICTURE_TYPE_I;
if (f->pict_type == FF_I_TYPE) { if (f->pict_type == AV_PICTURE_TYPE_I) {
buf32=(const uint32_t*)buf; buf32=(const uint32_t*)buf;
for(y=0; y<avctx->height/2; y++){ for(y=0; y<avctx->height/2; y++){
luma1=(uint32_t*)&f->data[0][ y*2*f->linesize[0] ]; luma1=(uint32_t*)&f->data[0][ y*2*f->linesize[0] ];
@ -223,10 +223,10 @@ static int decode_frame(AVCodecContext *avctx,
return -1; return -1;
} }
/* bit 31 means same as previous pic */ /* bit 31 means same as previous pic */
f->pict_type = (header & (1U<<31))? FF_P_TYPE : FF_I_TYPE; f->pict_type = (header & (1U<<31))? AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_I;
f->key_frame = f->pict_type == FF_I_TYPE; f->key_frame = f->pict_type == AV_PICTURE_TYPE_I;
if (f->pict_type == FF_I_TYPE) { if (f->pict_type == AV_PICTURE_TYPE_I) {
for(y=0; y<avctx->height; y++) for(y=0; y<avctx->height; y++)
memcpy(&f->data[0][ (avctx->height-y)*f->linesize[0] ], memcpy(&f->data[0][ (avctx->height-y)*f->linesize[0] ],
&buf[y*avctx->width*3], &buf[y*avctx->width*3],
@ -252,11 +252,11 @@ static int decode_frame(AVCodecContext *avctx,
} }
/* skip frame */ /* skip frame */
if(buf_size == 8) { if(buf_size == 8) {
f->pict_type = FF_P_TYPE; f->pict_type = AV_PICTURE_TYPE_P;
f->key_frame = 0; f->key_frame = 0;
break; break;
} }
f->pict_type = FF_I_TYPE; f->pict_type = AV_PICTURE_TYPE_I;
f->key_frame = 1; f->key_frame = 1;
if ((AV_RL32(buf) != FPS_TAG)||(buf_size < (planes*1024 + 24))) { if ((AV_RL32(buf) != FPS_TAG)||(buf_size < (planes*1024 + 24))) {
av_log(avctx, AV_LOG_ERROR, "Fraps: error in data stream\n"); av_log(avctx, AV_LOG_ERROR, "Fraps: error in data stream\n");
@ -297,11 +297,11 @@ static int decode_frame(AVCodecContext *avctx,
} }
/* skip frame */ /* skip frame */
if(buf_size == 8) { if(buf_size == 8) {
f->pict_type = FF_P_TYPE; f->pict_type = AV_PICTURE_TYPE_P;
f->key_frame = 0; f->key_frame = 0;
break; break;
} }
f->pict_type = FF_I_TYPE; f->pict_type = AV_PICTURE_TYPE_I;
f->key_frame = 1; f->key_frame = 1;
if ((AV_RL32(buf) != FPS_TAG)||(buf_size < (planes*1024 + 24))) { if ((AV_RL32(buf) != FPS_TAG)||(buf_size < (planes*1024 + 24))) {
av_log(avctx, AV_LOG_ERROR, "Fraps: error in data stream\n"); av_log(avctx, AV_LOG_ERROR, "Fraps: error in data stream\n");

@ -61,7 +61,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size,
if (avctx->get_buffer(avctx, pic) < 0) if (avctx->get_buffer(avctx, pic) < 0)
return -1; return -1;
pic->pict_type = FF_I_TYPE; pic->pict_type = AV_PICTURE_TYPE_I;
pic->key_frame = 1; pic->key_frame = 1;
pic->interlaced_frame = 1; pic->interlaced_frame = 1;
pic->top_field_first = 1; pic->top_field_first = 1;

@ -150,7 +150,7 @@ static int gif_encode_frame(AVCodecContext *avctx, unsigned char *outbuf, int bu
uint8_t *end = outbuf + buf_size; uint8_t *end = outbuf + buf_size;
*p = *pict; *p = *pict;
p->pict_type = FF_I_TYPE; p->pict_type = AV_PICTURE_TYPE_I;
p->key_frame = 1; p->key_frame = 1;
gif_image_write_header(avctx, &outbuf_ptr, (uint32_t *)pict->data[1]); gif_image_write_header(avctx, &outbuf_ptr, (uint32_t *)pict->data[1]);
gif_image_write_image(avctx, &outbuf_ptr, end, pict->data[0], pict->linesize[0]); gif_image_write_image(avctx, &outbuf_ptr, end, pict->data[0], pict->linesize[0]);

@ -497,9 +497,9 @@ static int h261_decode_picture_header(H261Context *h){
skip_bits(&s->gb, 8); skip_bits(&s->gb, 8);
} }
// h261 has no I-FRAMES, but if we pass FF_I_TYPE for the first frame, the codec crashes if it does // h261 has no I-FRAMES, but if we pass AV_PICTURE_TYPE_I for the first frame, the codec crashes if it does
// not contain all I-blocks (e.g. when a packet is lost) // not contain all I-blocks (e.g. when a packet is lost)
s->pict_type = FF_P_TYPE; s->pict_type = AV_PICTURE_TYPE_P;
h->gob_number = 0; h->gob_number = 0;
return 0; return 0;
@ -597,10 +597,10 @@ retry:
// for skipping the frame // for skipping the frame
s->current_picture.pict_type= s->pict_type; s->current_picture.pict_type= s->pict_type;
s->current_picture.key_frame= s->pict_type == FF_I_TYPE; s->current_picture.key_frame= s->pict_type == AV_PICTURE_TYPE_I;
if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==FF_B_TYPE) if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==AV_PICTURE_TYPE_B)
||(avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=FF_I_TYPE) ||(avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=AV_PICTURE_TYPE_I)
|| avctx->skip_frame >= AVDISCARD_ALL) || avctx->skip_frame >= AVDISCARD_ALL)
return get_consumed_bytes(s, buf_size); return get_consumed_bytes(s, buf_size);

@ -148,7 +148,7 @@ void ff_h263_loop_filter(MpegEncContext * s){
uint8_t *dest_cb= s->dest[1]; uint8_t *dest_cb= s->dest[1];
uint8_t *dest_cr= s->dest[2]; uint8_t *dest_cr= s->dest[2];
// if(s->pict_type==FF_B_TYPE && !s->readable) return; // if(s->pict_type==AV_PICTURE_TYPE_B && !s->readable) return;
/* /*
Diag Top Diag Top

@ -218,7 +218,7 @@ static int decode_slice(MpegEncContext *s){
//printf("%d %d %06X\n", ret, get_bits_count(&s->gb), show_bits(&s->gb, 24)); //printf("%d %d %06X\n", ret, get_bits_count(&s->gb), show_bits(&s->gb, 24));
ret= s->decode_mb(s, s->block); ret= s->decode_mb(s, s->block);
if (s->pict_type!=FF_B_TYPE) if (s->pict_type!=AV_PICTURE_TYPE_B)
ff_h263_update_motion_val(s); ff_h263_update_motion_val(s);
if(ret<0){ if(ret<0){
@ -310,7 +310,7 @@ static int decode_slice(MpegEncContext *s){
int max_extra=7; int max_extra=7;
/* no markers in M$ crap */ /* no markers in M$ crap */
if(s->msmpeg4_version && s->pict_type==FF_I_TYPE) if(s->msmpeg4_version && s->pict_type==AV_PICTURE_TYPE_I)
max_extra+= 17; max_extra+= 17;
/* buggy padding but the frame should still end approximately at the bitstream end */ /* buggy padding but the frame should still end approximately at the bitstream end */
@ -600,26 +600,26 @@ retry:
// for skipping the frame // for skipping the frame
s->current_picture.pict_type= s->pict_type; s->current_picture.pict_type= s->pict_type;
s->current_picture.key_frame= s->pict_type == FF_I_TYPE; s->current_picture.key_frame= s->pict_type == AV_PICTURE_TYPE_I;
/* skip B-frames if we don't have reference frames */ /* skip B-frames if we don't have reference frames */
if(s->last_picture_ptr==NULL && (s->pict_type==FF_B_TYPE || s->dropable)) return get_consumed_bytes(s, buf_size); if(s->last_picture_ptr==NULL && (s->pict_type==AV_PICTURE_TYPE_B || s->dropable)) return get_consumed_bytes(s, buf_size);
if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==FF_B_TYPE) if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==AV_PICTURE_TYPE_B)
|| (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=FF_I_TYPE) || (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=AV_PICTURE_TYPE_I)
|| avctx->skip_frame >= AVDISCARD_ALL) || avctx->skip_frame >= AVDISCARD_ALL)
return get_consumed_bytes(s, buf_size); return get_consumed_bytes(s, buf_size);
if(s->next_p_frame_damaged){ if(s->next_p_frame_damaged){
if(s->pict_type==FF_B_TYPE) if(s->pict_type==AV_PICTURE_TYPE_B)
return get_consumed_bytes(s, buf_size); return get_consumed_bytes(s, buf_size);
else else
s->next_p_frame_damaged=0; s->next_p_frame_damaged=0;
} }
if((s->avctx->flags2 & CODEC_FLAG2_FAST) && s->pict_type==FF_B_TYPE){ if((s->avctx->flags2 & CODEC_FLAG2_FAST) && s->pict_type==AV_PICTURE_TYPE_B){
s->me.qpel_put= s->dsp.put_2tap_qpel_pixels_tab; s->me.qpel_put= s->dsp.put_2tap_qpel_pixels_tab;
s->me.qpel_avg= s->dsp.avg_2tap_qpel_pixels_tab; s->me.qpel_avg= s->dsp.avg_2tap_qpel_pixels_tab;
}else if((!s->no_rounding) || s->pict_type==FF_B_TYPE){ }else if((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
s->me.qpel_put= s->dsp.put_qpel_pixels_tab; s->me.qpel_put= s->dsp.put_qpel_pixels_tab;
s->me.qpel_avg= s->dsp.avg_qpel_pixels_tab; s->me.qpel_avg= s->dsp.avg_qpel_pixels_tab;
}else{ }else{
@ -672,7 +672,7 @@ retry:
decode_slice(s); decode_slice(s);
} }
if (s->h263_msmpeg4 && s->msmpeg4_version<4 && s->pict_type==FF_I_TYPE) if (s->h263_msmpeg4 && s->msmpeg4_version<4 && s->pict_type==AV_PICTURE_TYPE_I)
if(!CONFIG_MSMPEG4_DECODER || msmpeg4_decode_ext_header(s, buf_size) < 0){ if(!CONFIG_MSMPEG4_DECODER || msmpeg4_decode_ext_header(s, buf_size) < 0){
s->error_status_table[s->mb_num-1]= AC_ERROR|DC_ERROR|MV_ERROR; s->error_status_table[s->mb_num-1]= AC_ERROR|DC_ERROR|MV_ERROR;
} }
@ -722,7 +722,7 @@ intrax8_decoded:
assert(s->current_picture.pict_type == s->current_picture_ptr->pict_type); assert(s->current_picture.pict_type == s->current_picture_ptr->pict_type);
assert(s->current_picture.pict_type == s->pict_type); assert(s->current_picture.pict_type == s->pict_type);
if (s->pict_type == FF_B_TYPE || s->low_delay) { if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
*pict= *(AVFrame*)s->current_picture_ptr; *pict= *(AVFrame*)s->current_picture_ptr;
} else if (s->last_picture_ptr != NULL) { } else if (s->last_picture_ptr != NULL) {
*pict= *(AVFrame*)s->last_picture_ptr; *pict= *(AVFrame*)s->last_picture_ptr;

@ -918,7 +918,7 @@ static void decode_postinit(H264Context *h){
else if((out_of_order && pics-1 == s->avctx->has_b_frames && s->avctx->has_b_frames < MAX_DELAYED_PIC_COUNT) else if((out_of_order && pics-1 == s->avctx->has_b_frames && s->avctx->has_b_frames < MAX_DELAYED_PIC_COUNT)
|| (s->low_delay && || (s->low_delay &&
((h->next_outputed_poc != INT_MIN && out->poc > h->next_outputed_poc + 2) ((h->next_outputed_poc != INT_MIN && out->poc > h->next_outputed_poc + 2)
|| cur->pict_type == FF_B_TYPE))) || cur->pict_type == AV_PICTURE_TYPE_B)))
{ {
s->low_delay = 0; s->low_delay = 0;
s->avctx->has_b_frames++; s->avctx->has_b_frames++;
@ -1449,7 +1449,7 @@ static int pred_weight_table(H264Context *h){
} }
} }
} }
if(h->slice_type_nos != FF_B_TYPE) break; if(h->slice_type_nos != AV_PICTURE_TYPE_B) break;
} }
h->use_weight= h->use_weight || h->use_weight_chroma; h->use_weight= h->use_weight || h->use_weight_chroma;
return 0; return 0;
@ -1817,7 +1817,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
h->slice_type_fixed=0; h->slice_type_fixed=0;
slice_type= golomb_to_pict_type[ slice_type ]; slice_type= golomb_to_pict_type[ slice_type ];
if (slice_type == FF_I_TYPE if (slice_type == AV_PICTURE_TYPE_I
|| (h0->current_slice != 0 && slice_type == h0->last_slice_type) ) { || (h0->current_slice != 0 && slice_type == h0->last_slice_type) ) {
default_ref_list_done = 1; default_ref_list_done = 1;
} }
@ -2110,15 +2110,15 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
h->ref_count[0]= h->pps.ref_count[0]; h->ref_count[0]= h->pps.ref_count[0];
h->ref_count[1]= h->pps.ref_count[1]; h->ref_count[1]= h->pps.ref_count[1];
if(h->slice_type_nos != FF_I_TYPE){ if(h->slice_type_nos != AV_PICTURE_TYPE_I){
if(h->slice_type_nos == FF_B_TYPE){ if(h->slice_type_nos == AV_PICTURE_TYPE_B){
h->direct_spatial_mv_pred= get_bits1(&s->gb); h->direct_spatial_mv_pred= get_bits1(&s->gb);
} }
num_ref_idx_active_override_flag= get_bits1(&s->gb); num_ref_idx_active_override_flag= get_bits1(&s->gb);
if(num_ref_idx_active_override_flag){ if(num_ref_idx_active_override_flag){
h->ref_count[0]= get_ue_golomb(&s->gb) + 1; h->ref_count[0]= get_ue_golomb(&s->gb) + 1;
if(h->slice_type_nos==FF_B_TYPE) if(h->slice_type_nos==AV_PICTURE_TYPE_B)
h->ref_count[1]= get_ue_golomb(&s->gb) + 1; h->ref_count[1]= get_ue_golomb(&s->gb) + 1;
if(h->ref_count[0]-1 > 32-1 || h->ref_count[1]-1 > 32-1){ if(h->ref_count[0]-1 > 32-1 || h->ref_count[1]-1 > 32-1){
@ -2127,7 +2127,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
return -1; return -1;
} }
} }
if(h->slice_type_nos == FF_B_TYPE) if(h->slice_type_nos == AV_PICTURE_TYPE_B)
h->list_count= 2; h->list_count= 2;
else else
h->list_count= 1; h->list_count= 1;
@ -2138,22 +2138,22 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
ff_h264_fill_default_ref_list(h); ff_h264_fill_default_ref_list(h);
} }
if(h->slice_type_nos!=FF_I_TYPE && ff_h264_decode_ref_pic_list_reordering(h) < 0) if(h->slice_type_nos!=AV_PICTURE_TYPE_I && ff_h264_decode_ref_pic_list_reordering(h) < 0)
return -1; return -1;
if(h->slice_type_nos!=FF_I_TYPE){ if(h->slice_type_nos!=AV_PICTURE_TYPE_I){
s->last_picture_ptr= &h->ref_list[0][0]; s->last_picture_ptr= &h->ref_list[0][0];
ff_copy_picture(&s->last_picture, s->last_picture_ptr); ff_copy_picture(&s->last_picture, s->last_picture_ptr);
} }
if(h->slice_type_nos==FF_B_TYPE){ if(h->slice_type_nos==AV_PICTURE_TYPE_B){
s->next_picture_ptr= &h->ref_list[1][0]; s->next_picture_ptr= &h->ref_list[1][0];
ff_copy_picture(&s->next_picture, s->next_picture_ptr); ff_copy_picture(&s->next_picture, s->next_picture_ptr);
} }
if( (h->pps.weighted_pred && h->slice_type_nos == FF_P_TYPE ) if( (h->pps.weighted_pred && h->slice_type_nos == AV_PICTURE_TYPE_P )
|| (h->pps.weighted_bipred_idc==1 && h->slice_type_nos== FF_B_TYPE ) ) || (h->pps.weighted_bipred_idc==1 && h->slice_type_nos== AV_PICTURE_TYPE_B ) )
pred_weight_table(h); pred_weight_table(h);
else if(h->pps.weighted_bipred_idc==2 && h->slice_type_nos== FF_B_TYPE){ else if(h->pps.weighted_bipred_idc==2 && h->slice_type_nos== AV_PICTURE_TYPE_B){
implicit_weight_table(h, -1); implicit_weight_table(h, -1);
}else { }else {
h->use_weight = 0; h->use_weight = 0;
@ -2169,17 +2169,17 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
if(FRAME_MBAFF){ if(FRAME_MBAFF){
ff_h264_fill_mbaff_ref_list(h); ff_h264_fill_mbaff_ref_list(h);
if(h->pps.weighted_bipred_idc==2 && h->slice_type_nos== FF_B_TYPE){ if(h->pps.weighted_bipred_idc==2 && h->slice_type_nos== AV_PICTURE_TYPE_B){
implicit_weight_table(h, 0); implicit_weight_table(h, 0);
implicit_weight_table(h, 1); implicit_weight_table(h, 1);
} }
} }
if(h->slice_type_nos==FF_B_TYPE && !h->direct_spatial_mv_pred) if(h->slice_type_nos==AV_PICTURE_TYPE_B && !h->direct_spatial_mv_pred)
ff_h264_direct_dist_scale_factor(h); ff_h264_direct_dist_scale_factor(h);
ff_h264_direct_ref_list_init(h); ff_h264_direct_ref_list_init(h);
if( h->slice_type_nos != FF_I_TYPE && h->pps.cabac ){ if( h->slice_type_nos != AV_PICTURE_TYPE_I && h->pps.cabac ){
tmp = get_ue_golomb_31(&s->gb); tmp = get_ue_golomb_31(&s->gb);
if(tmp > 2){ if(tmp > 2){
av_log(s->avctx, AV_LOG_ERROR, "cabac_init_idc overflow\n"); av_log(s->avctx, AV_LOG_ERROR, "cabac_init_idc overflow\n");
@ -2198,10 +2198,10 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
h->chroma_qp[0] = get_chroma_qp(h, 0, s->qscale); h->chroma_qp[0] = get_chroma_qp(h, 0, s->qscale);
h->chroma_qp[1] = get_chroma_qp(h, 1, s->qscale); h->chroma_qp[1] = get_chroma_qp(h, 1, s->qscale);
//FIXME qscale / qp ... stuff //FIXME qscale / qp ... stuff
if(h->slice_type == FF_SP_TYPE){ if(h->slice_type == AV_PICTURE_TYPE_SP){
get_bits1(&s->gb); /* sp_for_switch_flag */ get_bits1(&s->gb); /* sp_for_switch_flag */
} }
if(h->slice_type==FF_SP_TYPE || h->slice_type == FF_SI_TYPE){ if(h->slice_type==AV_PICTURE_TYPE_SP || h->slice_type == AV_PICTURE_TYPE_SI){
get_se_golomb(&s->gb); /* slice_qs_delta */ get_se_golomb(&s->gb); /* slice_qs_delta */
} }
@ -2230,8 +2230,8 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
} }
if( s->avctx->skip_loop_filter >= AVDISCARD_ALL if( s->avctx->skip_loop_filter >= AVDISCARD_ALL
||(s->avctx->skip_loop_filter >= AVDISCARD_NONKEY && h->slice_type_nos != FF_I_TYPE) ||(s->avctx->skip_loop_filter >= AVDISCARD_NONKEY && h->slice_type_nos != AV_PICTURE_TYPE_I)
||(s->avctx->skip_loop_filter >= AVDISCARD_BIDIR && h->slice_type_nos == FF_B_TYPE) ||(s->avctx->skip_loop_filter >= AVDISCARD_BIDIR && h->slice_type_nos == AV_PICTURE_TYPE_B)
||(s->avctx->skip_loop_filter >= AVDISCARD_NONREF && h->nal_ref_idc == 0)) ||(s->avctx->skip_loop_filter >= AVDISCARD_NONREF && h->nal_ref_idc == 0))
h->deblocking_filter= 0; h->deblocking_filter= 0;
@ -2315,7 +2315,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
h->deblocking_filter, h->slice_alpha_c0_offset/2-26, h->slice_beta_offset/2-26, h->deblocking_filter, h->slice_alpha_c0_offset/2-26, h->slice_beta_offset/2-26,
h->use_weight, h->use_weight,
h->use_weight==1 && h->use_weight_chroma ? "c" : "", h->use_weight==1 && h->use_weight_chroma ? "c" : "",
h->slice_type == FF_B_TYPE ? (h->direct_spatial_mv_pred ? "SPAT" : "TEMP") : "" h->slice_type == AV_PICTURE_TYPE_B ? (h->direct_spatial_mv_pred ? "SPAT" : "TEMP") : ""
); );
} }
@ -2325,11 +2325,11 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
int ff_h264_get_slice_type(const H264Context *h) int ff_h264_get_slice_type(const H264Context *h)
{ {
switch (h->slice_type) { switch (h->slice_type) {
case FF_P_TYPE: return 0; case AV_PICTURE_TYPE_P: return 0;
case FF_B_TYPE: return 1; case AV_PICTURE_TYPE_B: return 1;
case FF_I_TYPE: return 2; case AV_PICTURE_TYPE_I: return 2;
case FF_SP_TYPE: return 3; case AV_PICTURE_TYPE_SP: return 3;
case FF_SI_TYPE: return 4; case AV_PICTURE_TYPE_SI: return 4;
default: return -1; default: return -1;
} }
} }
@ -3000,8 +3000,8 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){
if(hx->redundant_pic_count==0 if(hx->redundant_pic_count==0
&& (avctx->skip_frame < AVDISCARD_NONREF || hx->nal_ref_idc) && (avctx->skip_frame < AVDISCARD_NONREF || hx->nal_ref_idc)
&& (avctx->skip_frame < AVDISCARD_BIDIR || hx->slice_type_nos!=FF_B_TYPE) && (avctx->skip_frame < AVDISCARD_BIDIR || hx->slice_type_nos!=AV_PICTURE_TYPE_B)
&& (avctx->skip_frame < AVDISCARD_NONKEY || hx->slice_type_nos==FF_I_TYPE) && (avctx->skip_frame < AVDISCARD_NONKEY || hx->slice_type_nos==AV_PICTURE_TYPE_I)
&& avctx->skip_frame < AVDISCARD_ALL){ && avctx->skip_frame < AVDISCARD_ALL){
if(avctx->hwaccel) { if(avctx->hwaccel) {
if (avctx->hwaccel->decode_slice(avctx, &buf[buf_index - consumed], consumed) < 0) if (avctx->hwaccel->decode_slice(avctx, &buf[buf_index - consumed], consumed) < 0)
@ -3037,8 +3037,8 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){
if(hx->redundant_pic_count==0 && hx->intra_gb_ptr && hx->s.data_partitioning if(hx->redundant_pic_count==0 && hx->intra_gb_ptr && hx->s.data_partitioning
&& s->context_initialized && s->context_initialized
&& (avctx->skip_frame < AVDISCARD_NONREF || hx->nal_ref_idc) && (avctx->skip_frame < AVDISCARD_NONREF || hx->nal_ref_idc)
&& (avctx->skip_frame < AVDISCARD_BIDIR || hx->slice_type_nos!=FF_B_TYPE) && (avctx->skip_frame < AVDISCARD_BIDIR || hx->slice_type_nos!=AV_PICTURE_TYPE_B)
&& (avctx->skip_frame < AVDISCARD_NONKEY || hx->slice_type_nos==FF_I_TYPE) && (avctx->skip_frame < AVDISCARD_NONKEY || hx->slice_type_nos==AV_PICTURE_TYPE_I)
&& avctx->skip_frame < AVDISCARD_ALL) && avctx->skip_frame < AVDISCARD_ALL)
context_count++; context_count++;
break; break;

@ -1118,7 +1118,7 @@ static void fill_decode_caches(H264Context *h, int mb_type){
} }
AV_ZERO16(h->mvd_cache [list][scan8[4 ]]); AV_ZERO16(h->mvd_cache [list][scan8[4 ]]);
AV_ZERO16(h->mvd_cache [list][scan8[12]]); AV_ZERO16(h->mvd_cache [list][scan8[12]]);
if(h->slice_type_nos == FF_B_TYPE){ if(h->slice_type_nos == AV_PICTURE_TYPE_B){
fill_rectangle(&h->direct_cache[scan8[0]], 4, 4, 8, MB_TYPE_16x16>>1, 1); fill_rectangle(&h->direct_cache[scan8[0]], 4, 4, 8, MB_TYPE_16x16>>1, 1);
if(IS_DIRECT(top_type)){ if(IS_DIRECT(top_type)){
@ -1255,7 +1255,7 @@ static inline void write_back_motion(H264Context *h, int mb_type){
} }
} }
if(h->slice_type_nos == FF_B_TYPE && CABAC){ if(h->slice_type_nos == AV_PICTURE_TYPE_B && CABAC){
if(IS_8X8(mb_type)){ if(IS_8X8(mb_type)){
uint8_t *direct_table = &h->direct_table[4*h->mb_xy]; uint8_t *direct_table = &h->direct_table[4*h->mb_xy];
direct_table[1] = h->sub_mb_type[1]>>1; direct_table[1] = h->sub_mb_type[1]>>1;
@ -1286,7 +1286,7 @@ static void av_unused decode_mb_skip(H264Context *h){
if(MB_FIELD) if(MB_FIELD)
mb_type|= MB_TYPE_INTERLACED; mb_type|= MB_TYPE_INTERLACED;
if( h->slice_type_nos == FF_B_TYPE ) if( h->slice_type_nos == AV_PICTURE_TYPE_B )
{ {
// just for fill_caches. pred_direct_motion will set the real mb_type // just for fill_caches. pred_direct_motion will set the real mb_type
mb_type|= MB_TYPE_L0L1|MB_TYPE_DIRECT2|MB_TYPE_SKIP; mb_type|= MB_TYPE_L0L1|MB_TYPE_DIRECT2|MB_TYPE_SKIP;

@ -691,7 +691,7 @@ void ff_h264_init_cabac_states(H264Context *h) {
const int8_t (*tab)[2]; const int8_t (*tab)[2];
const int slice_qp = av_clip(s->qscale - 6*(h->sps.bit_depth_luma-8), 0, 51); const int slice_qp = av_clip(s->qscale - 6*(h->sps.bit_depth_luma-8), 0, 51);
if( h->slice_type_nos == FF_I_TYPE ) tab = cabac_context_init_I; if( h->slice_type_nos == AV_PICTURE_TYPE_I ) tab = cabac_context_init_I;
else tab = cabac_context_init_PB[h->cabac_init_idc]; else tab = cabac_context_init_PB[h->cabac_init_idc];
/* calculate pre-state */ /* calculate pre-state */
@ -779,7 +779,7 @@ static int decode_cabac_mb_skip( H264Context *h, int mb_x, int mb_y ) {
if( h->slice_table[mbb_xy] == h->slice_num && !IS_SKIP( s->current_picture.mb_type[mbb_xy] )) if( h->slice_table[mbb_xy] == h->slice_num && !IS_SKIP( s->current_picture.mb_type[mbb_xy] ))
ctx++; ctx++;
if( h->slice_type_nos == FF_B_TYPE ) if( h->slice_type_nos == AV_PICTURE_TYPE_B )
ctx += 13; ctx += 13;
return get_cabac_noinline( &h->cabac, &h->cabac_state[11+ctx] ); return get_cabac_noinline( &h->cabac, &h->cabac_state[11+ctx] );
} }
@ -888,7 +888,7 @@ static int decode_cabac_mb_ref( H264Context *h, int list, int n ) {
int ref = 0; int ref = 0;
int ctx = 0; int ctx = 0;
if( h->slice_type_nos == FF_B_TYPE) { if( h->slice_type_nos == AV_PICTURE_TYPE_B) {
if( refa > 0 && !(h->direct_cache[scan8[n] - 1]&(MB_TYPE_DIRECT2>>1)) ) if( refa > 0 && !(h->direct_cache[scan8[n] - 1]&(MB_TYPE_DIRECT2>>1)) )
ctx++; ctx++;
if( refb > 0 && !(h->direct_cache[scan8[n] - 8]&(MB_TYPE_DIRECT2>>1)) ) if( refb > 0 && !(h->direct_cache[scan8[n] - 8]&(MB_TYPE_DIRECT2>>1)) )
@ -1208,7 +1208,7 @@ int ff_h264_decode_mb_cabac(H264Context *h) {
mb_xy = h->mb_xy = s->mb_x + s->mb_y*s->mb_stride; mb_xy = h->mb_xy = s->mb_x + s->mb_y*s->mb_stride;
tprintf(s->avctx, "pic:%d mb:%d/%d\n", h->frame_num, s->mb_x, s->mb_y); tprintf(s->avctx, "pic:%d mb:%d/%d\n", h->frame_num, s->mb_x, s->mb_y);
if( h->slice_type_nos != FF_I_TYPE ) { if( h->slice_type_nos != AV_PICTURE_TYPE_I ) {
int skip; int skip;
/* a skipped mb needs the aff flag from the following mb */ /* a skipped mb needs the aff flag from the following mb */
if( FRAME_MBAFF && (s->mb_y&1)==1 && h->prev_mb_skipped ) if( FRAME_MBAFF && (s->mb_y&1)==1 && h->prev_mb_skipped )
@ -1244,9 +1244,9 @@ int ff_h264_decode_mb_cabac(H264Context *h) {
fill_decode_neighbors(h, -(MB_FIELD)); fill_decode_neighbors(h, -(MB_FIELD));
if( h->slice_type_nos == FF_B_TYPE ) { if( h->slice_type_nos == AV_PICTURE_TYPE_B ) {
int ctx = 0; int ctx = 0;
assert(h->slice_type_nos == FF_B_TYPE); assert(h->slice_type_nos == AV_PICTURE_TYPE_B);
if( !IS_DIRECT( h->left_type[0]-1 ) ) if( !IS_DIRECT( h->left_type[0]-1 ) )
ctx++; ctx++;
@ -1279,7 +1279,7 @@ int ff_h264_decode_mb_cabac(H264Context *h) {
} }
partition_count= b_mb_type_info[mb_type].partition_count; partition_count= b_mb_type_info[mb_type].partition_count;
mb_type= b_mb_type_info[mb_type].type; mb_type= b_mb_type_info[mb_type].type;
} else if( h->slice_type_nos == FF_P_TYPE ) { } else if( h->slice_type_nos == AV_PICTURE_TYPE_P ) {
if( get_cabac_noinline( &h->cabac, &h->cabac_state[14] ) == 0 ) { if( get_cabac_noinline( &h->cabac, &h->cabac_state[14] ) == 0 ) {
/* P-type */ /* P-type */
if( get_cabac_noinline( &h->cabac, &h->cabac_state[15] ) == 0 ) { if( get_cabac_noinline( &h->cabac, &h->cabac_state[15] ) == 0 ) {
@ -1297,9 +1297,9 @@ int ff_h264_decode_mb_cabac(H264Context *h) {
} }
} else { } else {
mb_type= decode_cabac_intra_mb_type(h, 3, 1); mb_type= decode_cabac_intra_mb_type(h, 3, 1);
if(h->slice_type == FF_SI_TYPE && mb_type) if(h->slice_type == AV_PICTURE_TYPE_SI && mb_type)
mb_type--; mb_type--;
assert(h->slice_type_nos == FF_I_TYPE); assert(h->slice_type_nos == AV_PICTURE_TYPE_I);
decode_intra_mb: decode_intra_mb:
partition_count = 0; partition_count = 0;
cbp= i_mb_type_info[mb_type].cbp; cbp= i_mb_type_info[mb_type].cbp;
@ -1388,7 +1388,7 @@ decode_intra_mb:
} else if( partition_count == 4 ) { } else if( partition_count == 4 ) {
int i, j, sub_partition_count[4], list, ref[2][4]; int i, j, sub_partition_count[4], list, ref[2][4];
if( h->slice_type_nos == FF_B_TYPE ) { if( h->slice_type_nos == AV_PICTURE_TYPE_B ) {
for( i = 0; i < 4; i++ ) { for( i = 0; i < 4; i++ ) {
h->sub_mb_type[i] = decode_cabac_b_mb_sub_type( h ); h->sub_mb_type[i] = decode_cabac_b_mb_sub_type( h );
sub_partition_count[i]= b_sub_mb_type_info[ h->sub_mb_type[i] ].partition_count; sub_partition_count[i]= b_sub_mb_type_info[ h->sub_mb_type[i] ].partition_count;

@ -548,7 +548,7 @@ int ff_h264_decode_mb_cavlc(H264Context *h){
tprintf(s->avctx, "pic:%d mb:%d/%d\n", h->frame_num, s->mb_x, s->mb_y); tprintf(s->avctx, "pic:%d mb:%d/%d\n", h->frame_num, s->mb_x, s->mb_y);
cbp = 0; /* avoid warning. FIXME: find a solution without slowing cbp = 0; /* avoid warning. FIXME: find a solution without slowing
down the code */ down the code */
if(h->slice_type_nos != FF_I_TYPE){ if(h->slice_type_nos != AV_PICTURE_TYPE_I){
if(s->mb_skip_run==-1) if(s->mb_skip_run==-1)
s->mb_skip_run= get_ue_golomb(&s->gb); s->mb_skip_run= get_ue_golomb(&s->gb);
@ -569,7 +569,7 @@ int ff_h264_decode_mb_cavlc(H264Context *h){
h->prev_mb_skipped= 0; h->prev_mb_skipped= 0;
mb_type= get_ue_golomb(&s->gb); mb_type= get_ue_golomb(&s->gb);
if(h->slice_type_nos == FF_B_TYPE){ if(h->slice_type_nos == AV_PICTURE_TYPE_B){
if(mb_type < 23){ if(mb_type < 23){
partition_count= b_mb_type_info[mb_type].partition_count; partition_count= b_mb_type_info[mb_type].partition_count;
mb_type= b_mb_type_info[mb_type].type; mb_type= b_mb_type_info[mb_type].type;
@ -577,7 +577,7 @@ int ff_h264_decode_mb_cavlc(H264Context *h){
mb_type -= 23; mb_type -= 23;
goto decode_intra_mb; goto decode_intra_mb;
} }
}else if(h->slice_type_nos == FF_P_TYPE){ }else if(h->slice_type_nos == AV_PICTURE_TYPE_P){
if(mb_type < 5){ if(mb_type < 5){
partition_count= p_mb_type_info[mb_type].partition_count; partition_count= p_mb_type_info[mb_type].partition_count;
mb_type= p_mb_type_info[mb_type].type; mb_type= p_mb_type_info[mb_type].type;
@ -586,8 +586,8 @@ int ff_h264_decode_mb_cavlc(H264Context *h){
goto decode_intra_mb; goto decode_intra_mb;
} }
}else{ }else{
assert(h->slice_type_nos == FF_I_TYPE); assert(h->slice_type_nos == AV_PICTURE_TYPE_I);
if(h->slice_type == FF_SI_TYPE && mb_type) if(h->slice_type == AV_PICTURE_TYPE_SI && mb_type)
mb_type--; mb_type--;
decode_intra_mb: decode_intra_mb:
if(mb_type > 25){ if(mb_type > 25){
@ -678,7 +678,7 @@ decode_intra_mb:
}else if(partition_count==4){ }else if(partition_count==4){
int i, j, sub_partition_count[4], list, ref[2][4]; int i, j, sub_partition_count[4], list, ref[2][4];
if(h->slice_type_nos == FF_B_TYPE){ if(h->slice_type_nos == AV_PICTURE_TYPE_B){
for(i=0; i<4; i++){ for(i=0; i<4; i++){
h->sub_mb_type[i]= get_ue_golomb_31(&s->gb); h->sub_mb_type[i]= get_ue_golomb_31(&s->gb);
if(h->sub_mb_type[i] >=13){ if(h->sub_mb_type[i] >=13){
@ -696,7 +696,7 @@ decode_intra_mb:
h->ref_cache[1][scan8[12]] = PART_NOT_AVAILABLE; h->ref_cache[1][scan8[12]] = PART_NOT_AVAILABLE;
} }
}else{ }else{
assert(h->slice_type_nos == FF_P_TYPE); //FIXME SP correct ? assert(h->slice_type_nos == AV_PICTURE_TYPE_P); //FIXME SP correct ?
for(i=0; i<4; i++){ for(i=0; i<4; i++){
h->sub_mb_type[i]= get_ue_golomb_31(&s->gb); h->sub_mb_type[i]= get_ue_golomb_31(&s->gb);
if(h->sub_mb_type[i] >=4){ if(h->sub_mb_type[i] >=4){

@ -130,7 +130,7 @@ void ff_h264_direct_ref_list_init(H264Context * const h){
h->col_fieldoff= 2*(h->ref_list[1][0].reference) - 3; h->col_fieldoff= 2*(h->ref_list[1][0].reference) - 3;
} }
if(cur->pict_type != FF_B_TYPE || h->direct_spatial_mv_pred) if(cur->pict_type != AV_PICTURE_TYPE_B || h->direct_spatial_mv_pred)
return; return;
for(list=0; list<2; list++){ for(list=0; list<2; list++){

@ -117,7 +117,7 @@ static inline int parse_nal_units(AVCodecParserContext *s,
const uint8_t *ptr; const uint8_t *ptr;
/* set some sane default values */ /* set some sane default values */
s->pict_type = FF_I_TYPE; s->pict_type = AV_PICTURE_TYPE_I;
s->key_frame = 0; s->key_frame = 0;
h->s.avctx= avctx; h->s.avctx= avctx;

@ -110,7 +110,7 @@ int ff_h264_fill_default_ref_list(H264Context *h){
MpegEncContext * const s = &h->s; MpegEncContext * const s = &h->s;
int i, len; int i, len;
if(h->slice_type_nos==FF_B_TYPE){ if(h->slice_type_nos==AV_PICTURE_TYPE_B){
Picture *sorted[32]; Picture *sorted[32];
int cur_poc, list; int cur_poc, list;
int lens[2]; int lens[2];
@ -149,7 +149,7 @@ int ff_h264_fill_default_ref_list(H264Context *h){
for (i=0; i<h->ref_count[0]; i++) { for (i=0; i<h->ref_count[0]; i++) {
tprintf(h->s.avctx, "List0: %s fn:%d 0x%p\n", (h->default_ref_list[0][i].long_ref ? "LT" : "ST"), h->default_ref_list[0][i].pic_id, h->default_ref_list[0][i].data[0]); tprintf(h->s.avctx, "List0: %s fn:%d 0x%p\n", (h->default_ref_list[0][i].long_ref ? "LT" : "ST"), h->default_ref_list[0][i].pic_id, h->default_ref_list[0][i].data[0]);
} }
if(h->slice_type_nos==FF_B_TYPE){ if(h->slice_type_nos==AV_PICTURE_TYPE_B){
for (i=0; i<h->ref_count[1]; i++) { for (i=0; i<h->ref_count[1]; i++) {
tprintf(h->s.avctx, "List1: %s fn:%d 0x%p\n", (h->default_ref_list[1][i].long_ref ? "LT" : "ST"), h->default_ref_list[1][i].pic_id, h->default_ref_list[1][i].data[0]); tprintf(h->s.avctx, "List1: %s fn:%d 0x%p\n", (h->default_ref_list[1][i].long_ref ? "LT" : "ST"), h->default_ref_list[1][i].pic_id, h->default_ref_list[1][i].data[0]);
} }

@ -36,7 +36,7 @@
static const uint8_t golomb_to_pict_type[5]= static const uint8_t golomb_to_pict_type[5]=
{FF_P_TYPE, FF_B_TYPE, FF_I_TYPE, FF_SP_TYPE, FF_SI_TYPE}; {AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_SP, AV_PICTURE_TYPE_SI};
static const uint8_t golomb_to_intra4x4_cbp[48]={ static const uint8_t golomb_to_intra4x4_cbp[48]={
47, 31, 15, 0, 23, 27, 29, 30, 7, 11, 13, 14, 39, 43, 45, 46, 47, 31, 15, 0, 23, 27, 29, 30, 7, 11, 13, 14, 39, 43, 45, 46,

@ -1238,7 +1238,7 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size,
int i, j, size=0; int i, j, size=0;
*p = *pict; *p = *pict;
p->pict_type= FF_I_TYPE; p->pict_type= AV_PICTURE_TYPE_I;
p->key_frame= 1; p->key_frame= 1;
if(s->context){ if(s->context){

@ -52,7 +52,7 @@ int ff_intel_h263_decode_picture_header(MpegEncContext *s)
} }
s->h263_plus = 0; s->h263_plus = 0;
s->pict_type = FF_I_TYPE + get_bits1(&s->gb); s->pict_type = AV_PICTURE_TYPE_I + get_bits1(&s->gb);
s->unrestricted_mv = get_bits1(&s->gb); s->unrestricted_mv = get_bits1(&s->gb);
s->h263_long_vectors = s->unrestricted_mv; s->h263_long_vectors = s->unrestricted_mv;

@ -347,7 +347,7 @@ static void preview_obmc(MpegEncContext *s){
s->block_index[i]+= 1; s->block_index[i]+= 1;
s->mb_x++; s->mb_x++;
assert(s->pict_type == FF_P_TYPE); assert(s->pict_type == AV_PICTURE_TYPE_P);
do{ do{
if (get_bits1(&s->gb)) { if (get_bits1(&s->gb)) {
@ -460,7 +460,7 @@ static int h263_decode_block(MpegEncContext * s, DCTELEM * block,
/* DC coef */ /* DC coef */
if(s->codec_id == CODEC_ID_RV10){ if(s->codec_id == CODEC_ID_RV10){
#if CONFIG_RV10_DECODER #if CONFIG_RV10_DECODER
if (s->rv10_version == 3 && s->pict_type == FF_I_TYPE) { if (s->rv10_version == 3 && s->pict_type == AV_PICTURE_TYPE_I) {
int component, diff; int component, diff;
component = (n <= 3 ? 0 : n - 4 + 1); component = (n <= 3 ? 0 : n - 4 + 1);
level = s->last_dc[component]; level = s->last_dc[component];
@ -608,7 +608,7 @@ int ff_h263_decode_mb(MpegEncContext *s,
assert(!s->h263_pred); assert(!s->h263_pred);
if (s->pict_type == FF_P_TYPE) { if (s->pict_type == AV_PICTURE_TYPE_P) {
do{ do{
if (get_bits1(&s->gb)) { if (get_bits1(&s->gb)) {
/* skip mb */ /* skip mb */
@ -700,7 +700,7 @@ int ff_h263_decode_mb(MpegEncContext *s,
mot_val[1] = my; mot_val[1] = my;
} }
} }
} else if(s->pict_type==FF_B_TYPE) { } else if(s->pict_type==AV_PICTURE_TYPE_B) {
int mb_type; int mb_type;
const int stride= s->b8_stride; const int stride= s->b8_stride;
int16_t *mot_val0 = s->current_picture.motion_val[0][ 2*(s->mb_x + s->mb_y*stride) ]; int16_t *mot_val0 = s->current_picture.motion_val[0][ 2*(s->mb_x + s->mb_y*stride) ];
@ -843,7 +843,7 @@ intra:
if(s->pb_frame && h263_skip_b_part(s, cbpb) < 0) if(s->pb_frame && h263_skip_b_part(s, cbpb) < 0)
return -1; return -1;
if(s->obmc && !s->mb_intra){ if(s->obmc && !s->mb_intra){
if(s->pict_type == FF_P_TYPE && s->mb_x+1<s->mb_width && s->mb_num_left != 1) if(s->pict_type == AV_PICTURE_TYPE_P && s->mb_x+1<s->mb_width && s->mb_num_left != 1)
preview_obmc(s); preview_obmc(s);
} }
end: end:
@ -921,7 +921,7 @@ int h263_decode_picture_header(MpegEncContext *s)
if (!width) if (!width)
return -1; return -1;
s->pict_type = FF_I_TYPE + get_bits1(&s->gb); s->pict_type = AV_PICTURE_TYPE_I + get_bits1(&s->gb);
s->h263_long_vectors = get_bits1(&s->gb); s->h263_long_vectors = get_bits1(&s->gb);
@ -985,11 +985,11 @@ int h263_decode_picture_header(MpegEncContext *s)
/* MPPTYPE */ /* MPPTYPE */
s->pict_type = get_bits(&s->gb, 3); s->pict_type = get_bits(&s->gb, 3);
switch(s->pict_type){ switch(s->pict_type){
case 0: s->pict_type= FF_I_TYPE;break; case 0: s->pict_type= AV_PICTURE_TYPE_I;break;
case 1: s->pict_type= FF_P_TYPE;break; case 1: s->pict_type= AV_PICTURE_TYPE_P;break;
case 2: s->pict_type= FF_P_TYPE;s->pb_frame = 3;break; case 2: s->pict_type= AV_PICTURE_TYPE_P;s->pb_frame = 3;break;
case 3: s->pict_type= FF_B_TYPE;break; case 3: s->pict_type= AV_PICTURE_TYPE_B;break;
case 7: s->pict_type= FF_I_TYPE;break; //ZYGO case 7: s->pict_type= AV_PICTURE_TYPE_I;break; //ZYGO
default: default:
return -1; return -1;
} }
@ -1112,7 +1112,7 @@ int h263_decode_picture_header(MpegEncContext *s)
} }
ff_h263_show_pict_info(s); ff_h263_show_pict_info(s);
if (s->pict_type == FF_I_TYPE && s->codec_tag == AV_RL32("ZYGO")){ if (s->pict_type == AV_PICTURE_TYPE_I && s->codec_tag == AV_RL32("ZYGO")){
int i,j; int i,j;
for(i=0; i<85; i++) av_log(s->avctx, AV_LOG_DEBUG, "%d", get_bits1(&s->gb)); for(i=0; i<85; i++) av_log(s->avctx, AV_LOG_DEBUG, "%d", get_bits1(&s->gb));
av_log(s->avctx, AV_LOG_DEBUG, "\n"); av_log(s->avctx, AV_LOG_DEBUG, "\n");

@ -145,7 +145,7 @@ void h263_encode_picture_header(MpegEncContext * s, int picture_number)
if (!s->h263_plus) { if (!s->h263_plus) {
/* H.263v1 */ /* H.263v1 */
put_bits(&s->pb, 3, format); put_bits(&s->pb, 3, format);
put_bits(&s->pb, 1, (s->pict_type == FF_P_TYPE)); put_bits(&s->pb, 1, (s->pict_type == AV_PICTURE_TYPE_P));
/* By now UMV IS DISABLED ON H.263v1, since the restrictions /* By now UMV IS DISABLED ON H.263v1, since the restrictions
of H.263v1 UMV implies to check the predicted MV after of H.263v1 UMV implies to check the predicted MV after
calculation of the current MB to see if we're on the limits */ calculation of the current MB to see if we're on the limits */
@ -181,7 +181,7 @@ void h263_encode_picture_header(MpegEncContext * s, int picture_number)
put_bits(&s->pb,1,1); /* "1" to prevent start code emulation */ put_bits(&s->pb,1,1); /* "1" to prevent start code emulation */
put_bits(&s->pb,3,0); /* Reserved */ put_bits(&s->pb,3,0); /* Reserved */
put_bits(&s->pb, 3, s->pict_type == FF_P_TYPE); put_bits(&s->pb, 3, s->pict_type == AV_PICTURE_TYPE_P);
put_bits(&s->pb,1,0); /* Reference Picture Resampling: off */ put_bits(&s->pb,1,0); /* Reference Picture Resampling: off */
put_bits(&s->pb,1,0); /* Reduced-Resolution Update: off */ put_bits(&s->pb,1,0); /* Reduced-Resolution Update: off */
@ -260,12 +260,12 @@ void h263_encode_gob_header(MpegEncContext * s, int mb_line)
put_bits(&s->pb, 1, 1); put_bits(&s->pb, 1, 1);
put_bits(&s->pb, 5, s->qscale); /* GQUANT */ put_bits(&s->pb, 5, s->qscale); /* GQUANT */
put_bits(&s->pb, 1, 1); put_bits(&s->pb, 1, 1);
put_bits(&s->pb, 2, s->pict_type == FF_I_TYPE); /* GFID */ put_bits(&s->pb, 2, s->pict_type == AV_PICTURE_TYPE_I); /* GFID */
}else{ }else{
int gob_number= mb_line / s->gob_index; int gob_number= mb_line / s->gob_index;
put_bits(&s->pb, 5, gob_number); /* GN */ put_bits(&s->pb, 5, gob_number); /* GN */
put_bits(&s->pb, 2, s->pict_type == FF_I_TYPE); /* GFID */ put_bits(&s->pb, 2, s->pict_type == AV_PICTURE_TYPE_I); /* GFID */
put_bits(&s->pb, 5, s->qscale); /* GQUANT */ put_bits(&s->pb, 5, s->qscale); /* GQUANT */
} }
} }
@ -607,7 +607,7 @@ void h263_encode_mb(MpegEncContext * s,
} }
cbpc = cbp & 3; cbpc = cbp & 3;
if (s->pict_type == FF_I_TYPE) { if (s->pict_type == AV_PICTURE_TYPE_I) {
if(s->dquant) cbpc+=4; if(s->dquant) cbpc+=4;
put_bits(&s->pb, put_bits(&s->pb,
ff_h263_intra_MCBPC_bits[cbpc], ff_h263_intra_MCBPC_bits[cbpc],

@ -245,7 +245,7 @@ static int encode_picture_ls(AVCodecContext *avctx, unsigned char *buf, int buf_
init_put_bits(&pb2, buf2, buf_size); init_put_bits(&pb2, buf2, buf_size);
*p = *pict; *p = *pict;
p->pict_type= FF_I_TYPE; p->pict_type= AV_PICTURE_TYPE_I;
p->key_frame= 1; p->key_frame= 1;
if(avctx->pix_fmt == PIX_FMT_GRAY8 || avctx->pix_fmt == PIX_FMT_GRAY16) if(avctx->pix_fmt == PIX_FMT_GRAY8 || avctx->pix_fmt == PIX_FMT_GRAY16)

@ -180,7 +180,7 @@ static int decode_frame(AVCodecContext *avctx,
if (video_size) { if (video_size) {
s->frame.key_frame = 1; s->frame.key_frame = 1;
s->frame.pict_type = FF_I_TYPE; s->frame.pict_type = AV_PICTURE_TYPE_I;
s->frame.palette_has_changed = s->palette_has_changed; s->frame.palette_has_changed = s->palette_has_changed;
s->palette_has_changed = 0; s->palette_has_changed = 0;
memcpy(s->frame.data[1], s->palette, AVPALETTE_SIZE); memcpy(s->frame.data[1], s->palette, AVPALETTE_SIZE);

@ -259,10 +259,10 @@ static int decode_frame(AVCodecContext * avctx, void *data, int *data_size, AVPa
if (header & KMVC_KEYFRAME) { if (header & KMVC_KEYFRAME) {
ctx->pic.key_frame = 1; ctx->pic.key_frame = 1;
ctx->pic.pict_type = FF_I_TYPE; ctx->pic.pict_type = AV_PICTURE_TYPE_I;
} else { } else {
ctx->pic.key_frame = 0; ctx->pic.key_frame = 0;
ctx->pic.pict_type = FF_P_TYPE; ctx->pic.pict_type = AV_PICTURE_TYPE_P;
} }
if (header & KMVC_PALETTE) { if (header & KMVC_PALETTE) {

@ -76,7 +76,7 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size,
int zret; // Zlib return code int zret; // Zlib return code
*p = *pict; *p = *pict;
p->pict_type= FF_I_TYPE; p->pict_type= AV_PICTURE_TYPE_I;
p->key_frame= 1; p->key_frame= 1;
if(avctx->pix_fmt != PIX_FMT_BGR24){ if(avctx->pix_fmt != PIX_FMT_BGR24){

@ -358,9 +358,9 @@ static int storeframe(AVCodecContext *avctx, struct FrameListData *cx_frame,
coded_frame->key_frame = !!(cx_frame->flags & VPX_FRAME_IS_KEY); coded_frame->key_frame = !!(cx_frame->flags & VPX_FRAME_IS_KEY);
if (coded_frame->key_frame) if (coded_frame->key_frame)
coded_frame->pict_type = FF_I_TYPE; coded_frame->pict_type = AV_PICTURE_TYPE_I;
else else
coded_frame->pict_type = FF_P_TYPE; coded_frame->pict_type = AV_PICTURE_TYPE_P;
} else { } else {
av_log(avctx, AV_LOG_ERROR, av_log(avctx, AV_LOG_ERROR,
"Compressed frame larger than storage provided! (%zu/%d)\n", "Compressed frame larger than storage provided! (%zu/%d)\n",

@ -110,9 +110,9 @@ static int X264_frame(AVCodecContext *ctx, uint8_t *buf,
x4->pic.i_pts = frame->pts; x4->pic.i_pts = frame->pts;
x4->pic.i_type = x4->pic.i_type =
frame->pict_type == FF_I_TYPE ? X264_TYPE_KEYFRAME : frame->pict_type == AV_PICTURE_TYPE_I ? X264_TYPE_KEYFRAME :
frame->pict_type == FF_P_TYPE ? X264_TYPE_P : frame->pict_type == AV_PICTURE_TYPE_P ? X264_TYPE_P :
frame->pict_type == FF_B_TYPE ? X264_TYPE_B : frame->pict_type == AV_PICTURE_TYPE_B ? X264_TYPE_B :
X264_TYPE_AUTO; X264_TYPE_AUTO;
if (x4->params.b_tff != frame->top_field_first) { if (x4->params.b_tff != frame->top_field_first) {
x4->params.b_tff = frame->top_field_first; x4->params.b_tff = frame->top_field_first;
@ -135,14 +135,14 @@ static int X264_frame(AVCodecContext *ctx, uint8_t *buf,
switch (pic_out.i_type) { switch (pic_out.i_type) {
case X264_TYPE_IDR: case X264_TYPE_IDR:
case X264_TYPE_I: case X264_TYPE_I:
x4->out_pic.pict_type = FF_I_TYPE; x4->out_pic.pict_type = AV_PICTURE_TYPE_I;
break; break;
case X264_TYPE_P: case X264_TYPE_P:
x4->out_pic.pict_type = FF_P_TYPE; x4->out_pic.pict_type = AV_PICTURE_TYPE_P;
break; break;
case X264_TYPE_B: case X264_TYPE_B:
case X264_TYPE_BREF: case X264_TYPE_BREF:
x4->out_pic.pict_type = FF_B_TYPE; x4->out_pic.pict_type = AV_PICTURE_TYPE_B;
break; break;
} }

@ -138,14 +138,14 @@ static int XAVS_frame(AVCodecContext *ctx, uint8_t *buf,
switch (pic_out.i_type) { switch (pic_out.i_type) {
case XAVS_TYPE_IDR: case XAVS_TYPE_IDR:
case XAVS_TYPE_I: case XAVS_TYPE_I:
x4->out_pic.pict_type = FF_I_TYPE; x4->out_pic.pict_type = AV_PICTURE_TYPE_I;
break; break;
case XAVS_TYPE_P: case XAVS_TYPE_P:
x4->out_pic.pict_type = FF_P_TYPE; x4->out_pic.pict_type = AV_PICTURE_TYPE_P;
break; break;
case XAVS_TYPE_B: case XAVS_TYPE_B:
case XAVS_TYPE_BREF: case XAVS_TYPE_BREF:
x4->out_pic.pict_type = FF_B_TYPE; x4->out_pic.pict_type = AV_PICTURE_TYPE_B;
break; break;
} }

@ -134,7 +134,7 @@ float ff_xvid_rate_estimate_qscale(MpegEncContext *s, int dry_run){
if(!dry_run) if(!dry_run)
s->rc_context.dry_run_qscale= 0; s->rc_context.dry_run_qscale= 0;
if(s->pict_type == FF_B_TYPE) //FIXME this is not exactly identical to xvid if(s->pict_type == AV_PICTURE_TYPE_B) //FIXME this is not exactly identical to xvid
return xvid_plg_data.quant * FF_QP2LAMBDA * s->avctx->b_quant_factor + s->avctx->b_quant_offset; return xvid_plg_data.quant * FF_QP2LAMBDA * s->avctx->b_quant_factor + s->avctx->b_quant_offset;
else else
return xvid_plg_data.quant * FF_QP2LAMBDA; return xvid_plg_data.quant * FF_QP2LAMBDA;

@ -450,9 +450,9 @@ static int xvid_encode_frame(AVCodecContext *avctx,
xvid_enc_frame.vol_flags = x->vol_flags; xvid_enc_frame.vol_flags = x->vol_flags;
xvid_enc_frame.motion = x->me_flags; xvid_enc_frame.motion = x->me_flags;
xvid_enc_frame.type = xvid_enc_frame.type =
picture->pict_type == FF_I_TYPE ? XVID_TYPE_IVOP : picture->pict_type == AV_PICTURE_TYPE_I ? XVID_TYPE_IVOP :
picture->pict_type == FF_P_TYPE ? XVID_TYPE_PVOP : picture->pict_type == AV_PICTURE_TYPE_P ? XVID_TYPE_PVOP :
picture->pict_type == FF_B_TYPE ? XVID_TYPE_BVOP : picture->pict_type == AV_PICTURE_TYPE_B ? XVID_TYPE_BVOP :
XVID_TYPE_AUTO; XVID_TYPE_AUTO;
/* Pixel aspect ratio setting */ /* Pixel aspect ratio setting */
@ -493,13 +493,13 @@ static int xvid_encode_frame(AVCodecContext *avctx,
if( 0 <= xerr ) { if( 0 <= xerr ) {
p->quality = xvid_enc_stats.quant * FF_QP2LAMBDA; p->quality = xvid_enc_stats.quant * FF_QP2LAMBDA;
if( xvid_enc_stats.type == XVID_TYPE_PVOP ) if( xvid_enc_stats.type == XVID_TYPE_PVOP )
p->pict_type = FF_P_TYPE; p->pict_type = AV_PICTURE_TYPE_P;
else if( xvid_enc_stats.type == XVID_TYPE_BVOP ) else if( xvid_enc_stats.type == XVID_TYPE_BVOP )
p->pict_type = FF_B_TYPE; p->pict_type = AV_PICTURE_TYPE_B;
else if( xvid_enc_stats.type == XVID_TYPE_SVOP ) else if( xvid_enc_stats.type == XVID_TYPE_SVOP )
p->pict_type = FF_S_TYPE; p->pict_type = AV_PICTURE_TYPE_S;
else else
p->pict_type = FF_I_TYPE; p->pict_type = AV_PICTURE_TYPE_I;
if( xvid_enc_frame.out_flags & XVID_KEYFRAME ) { if( xvid_enc_frame.out_flags & XVID_KEYFRAME ) {
p->key_frame = 1; p->key_frame = 1;
if( x->quicktime_format ) if( x->quicktime_format )

@ -49,7 +49,7 @@ static int encode_picture_lossless(AVCodecContext *avctx, unsigned char *buf, in
init_put_bits(&s->pb, buf, buf_size); init_put_bits(&s->pb, buf, buf_size);
*p = *pict; *p = *pict;
p->pict_type= FF_I_TYPE; p->pict_type= AV_PICTURE_TYPE_I;
p->key_frame= 1; p->key_frame= 1;
ff_mjpeg_encode_picture_header(s); ff_mjpeg_encode_picture_header(s);

@ -170,7 +170,7 @@ static int decode_frame(AVCodecContext *avctx,
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1; return -1;
} }
p->pict_type= FF_I_TYPE; p->pict_type= AV_PICTURE_TYPE_I;
p->key_frame= 1; p->key_frame= 1;
av_fast_malloc(&a->bitstream_buffer, &a->bitstream_buffer_size, buf_size + FF_INPUT_BUFFER_PADDING_SIZE); av_fast_malloc(&a->bitstream_buffer, &a->bitstream_buffer_size, buf_size + FF_INPUT_BUFFER_PADDING_SIZE);

@ -352,7 +352,7 @@ static int mimic_decode_frame(AVCodecContext *avctx, void *data,
} }
ctx->buf_ptrs[ctx->cur_index].reference = 1; ctx->buf_ptrs[ctx->cur_index].reference = 1;
ctx->buf_ptrs[ctx->cur_index].pict_type = is_pframe ? FF_P_TYPE:FF_I_TYPE; ctx->buf_ptrs[ctx->cur_index].pict_type = is_pframe ? AV_PICTURE_TYPE_P:AV_PICTURE_TYPE_I;
if(ff_thread_get_buffer(avctx, &ctx->buf_ptrs[ctx->cur_index])) { if(ff_thread_get_buffer(avctx, &ctx->buf_ptrs[ctx->cur_index])) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1; return -1;

@ -353,7 +353,7 @@ int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1; return -1;
} }
s->picture_ptr->pict_type= FF_I_TYPE; s->picture_ptr->pict_type= AV_PICTURE_TYPE_I;
s->picture_ptr->key_frame= 1; s->picture_ptr->key_frame= 1;
s->got_picture = 1; s->got_picture = 1;

@ -1893,7 +1893,7 @@ int ff_get_best_fcode(MpegEncContext * s, int16_t (*mv_table)[2], int type)
continue; continue;
for(j=0; j<fcode && j<8; j++){ for(j=0; j<fcode && j<8; j++){
if(s->pict_type==FF_B_TYPE || s->current_picture.mc_mb_var[xy] < s->current_picture.mb_var[xy]) if(s->pict_type==AV_PICTURE_TYPE_B || s->current_picture.mc_mb_var[xy] < s->current_picture.mb_var[xy])
score[j]-= 170; score[j]-= 170;
} }
} }
@ -1925,7 +1925,7 @@ void ff_fix_long_p_mvs(MpegEncContext * s)
MotionEstContext * const c= &s->me; MotionEstContext * const c= &s->me;
const int f_code= s->f_code; const int f_code= s->f_code;
int y, range; int y, range;
assert(s->pict_type==FF_P_TYPE); assert(s->pict_type==AV_PICTURE_TYPE_P);
range = (((s->out_format == FMT_MPEG1 || s->msmpeg4_version) ? 8 : 16) << f_code); range = (((s->out_format == FMT_MPEG1 || s->msmpeg4_version) ? 8 : 16) << f_code);

@ -1037,7 +1037,7 @@ static av_always_inline int epzs_motion_search_internal(MpegEncContext * s, int
score_map[0]= dmin; score_map[0]= dmin;
//FIXME precalc first term below? //FIXME precalc first term below?
if((s->pict_type == FF_B_TYPE && !(c->flags & FLAG_DIRECT)) || s->flags&CODEC_FLAG_MV0) if((s->pict_type == AV_PICTURE_TYPE_B && !(c->flags & FLAG_DIRECT)) || s->flags&CODEC_FLAG_MV0)
dmin += (mv_penalty[pred_x] + mv_penalty[pred_y])*penalty_factor; dmin += (mv_penalty[pred_x] + mv_penalty[pred_y])*penalty_factor;
/* first line */ /* first line */

@ -214,7 +214,7 @@ static int mpeg_decode_mb(MpegEncContext *s,
assert(s->mb_skipped==0); assert(s->mb_skipped==0);
if (s->mb_skip_run-- != 0) { if (s->mb_skip_run-- != 0) {
if (s->pict_type == FF_P_TYPE) { if (s->pict_type == AV_PICTURE_TYPE_P) {
s->mb_skipped = 1; s->mb_skipped = 1;
s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride ]= MB_TYPE_SKIP | MB_TYPE_L0 | MB_TYPE_16x16; s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride ]= MB_TYPE_SKIP | MB_TYPE_L0 | MB_TYPE_16x16;
} else { } else {
@ -240,7 +240,7 @@ static int mpeg_decode_mb(MpegEncContext *s,
switch(s->pict_type) { switch(s->pict_type) {
default: default:
case FF_I_TYPE: case AV_PICTURE_TYPE_I:
if (get_bits1(&s->gb) == 0) { if (get_bits1(&s->gb) == 0) {
if (get_bits1(&s->gb) == 0){ if (get_bits1(&s->gb) == 0){
av_log(s->avctx, AV_LOG_ERROR, "invalid mb type in I Frame at %d %d\n", s->mb_x, s->mb_y); av_log(s->avctx, AV_LOG_ERROR, "invalid mb type in I Frame at %d %d\n", s->mb_x, s->mb_y);
@ -251,7 +251,7 @@ static int mpeg_decode_mb(MpegEncContext *s,
mb_type = MB_TYPE_INTRA; mb_type = MB_TYPE_INTRA;
} }
break; break;
case FF_P_TYPE: case AV_PICTURE_TYPE_P:
mb_type = get_vlc2(&s->gb, mb_ptype_vlc.table, MB_PTYPE_VLC_BITS, 1); mb_type = get_vlc2(&s->gb, mb_ptype_vlc.table, MB_PTYPE_VLC_BITS, 1);
if (mb_type < 0){ if (mb_type < 0){
av_log(s->avctx, AV_LOG_ERROR, "invalid mb type in P Frame at %d %d\n", s->mb_x, s->mb_y); av_log(s->avctx, AV_LOG_ERROR, "invalid mb type in P Frame at %d %d\n", s->mb_x, s->mb_y);
@ -259,7 +259,7 @@ static int mpeg_decode_mb(MpegEncContext *s,
} }
mb_type = ptype2mb_type[ mb_type ]; mb_type = ptype2mb_type[ mb_type ];
break; break;
case FF_B_TYPE: case AV_PICTURE_TYPE_B:
mb_type = get_vlc2(&s->gb, mb_btype_vlc.table, MB_BTYPE_VLC_BITS, 1); mb_type = get_vlc2(&s->gb, mb_btype_vlc.table, MB_BTYPE_VLC_BITS, 1);
if (mb_type < 0){ if (mb_type < 0){
av_log(s->avctx, AV_LOG_ERROR, "invalid mb type in B Frame at %d %d\n", s->mb_x, s->mb_y); av_log(s->avctx, AV_LOG_ERROR, "invalid mb type in B Frame at %d %d\n", s->mb_x, s->mb_y);
@ -1198,7 +1198,7 @@ static int mpeg_decode_update_thread_context(AVCodecContext *avctx, const AVCode
if(!ctx->mpeg_enc_ctx_allocated) if(!ctx->mpeg_enc_ctx_allocated)
memcpy(s + 1, s1 + 1, sizeof(Mpeg1Context) - sizeof(MpegEncContext)); memcpy(s + 1, s1 + 1, sizeof(Mpeg1Context) - sizeof(MpegEncContext));
if(!(s->pict_type == FF_B_TYPE || s->low_delay)) if(!(s->pict_type == AV_PICTURE_TYPE_B || s->low_delay))
s->picture_number++; s->picture_number++;
return 0; return 0;
@ -1408,7 +1408,7 @@ static int mpeg1_decode_picture(AVCodecContext *avctx,
return -1; return -1;
vbv_delay= get_bits(&s->gb, 16); vbv_delay= get_bits(&s->gb, 16);
if (s->pict_type == FF_P_TYPE || s->pict_type == FF_B_TYPE) { if (s->pict_type == AV_PICTURE_TYPE_P || s->pict_type == AV_PICTURE_TYPE_B) {
s->full_pel[0] = get_bits1(&s->gb); s->full_pel[0] = get_bits1(&s->gb);
f_code = get_bits(&s->gb, 3); f_code = get_bits(&s->gb, 3);
if (f_code == 0 && avctx->error_recognition >= FF_ER_COMPLIANT) if (f_code == 0 && avctx->error_recognition >= FF_ER_COMPLIANT)
@ -1416,7 +1416,7 @@ static int mpeg1_decode_picture(AVCodecContext *avctx,
s->mpeg_f_code[0][0] = f_code; s->mpeg_f_code[0][0] = f_code;
s->mpeg_f_code[0][1] = f_code; s->mpeg_f_code[0][1] = f_code;
} }
if (s->pict_type == FF_B_TYPE) { if (s->pict_type == AV_PICTURE_TYPE_B) {
s->full_pel[1] = get_bits1(&s->gb); s->full_pel[1] = get_bits1(&s->gb);
f_code = get_bits(&s->gb, 3); f_code = get_bits(&s->gb, 3);
if (f_code == 0 && avctx->error_recognition >= FF_ER_COMPLIANT) if (f_code == 0 && avctx->error_recognition >= FF_ER_COMPLIANT)
@ -1425,7 +1425,7 @@ static int mpeg1_decode_picture(AVCodecContext *avctx,
s->mpeg_f_code[1][1] = f_code; s->mpeg_f_code[1][1] = f_code;
} }
s->current_picture.pict_type= s->pict_type; s->current_picture.pict_type= s->pict_type;
s->current_picture.key_frame= s->pict_type == FF_I_TYPE; s->current_picture.key_frame= s->pict_type == AV_PICTURE_TYPE_I;
if(avctx->debug & FF_DEBUG_PICT_INFO) if(avctx->debug & FF_DEBUG_PICT_INFO)
av_log(avctx, AV_LOG_DEBUG, "vbv_delay %d, ref %d type:%d\n", vbv_delay, ref, s->pict_type); av_log(avctx, AV_LOG_DEBUG, "vbv_delay %d, ref %d type:%d\n", vbv_delay, ref, s->pict_type);
@ -1573,13 +1573,13 @@ static void mpeg_decode_picture_coding_extension(Mpeg1Context *s1)
av_log(s->avctx, AV_LOG_ERROR, "Missing picture start code, guessing missing values\n"); av_log(s->avctx, AV_LOG_ERROR, "Missing picture start code, guessing missing values\n");
if(s->mpeg_f_code[1][0] == 15 && s->mpeg_f_code[1][1]==15){ if(s->mpeg_f_code[1][0] == 15 && s->mpeg_f_code[1][1]==15){
if(s->mpeg_f_code[0][0] == 15 && s->mpeg_f_code[0][1] == 15) if(s->mpeg_f_code[0][0] == 15 && s->mpeg_f_code[0][1] == 15)
s->pict_type= FF_I_TYPE; s->pict_type= AV_PICTURE_TYPE_I;
else else
s->pict_type= FF_P_TYPE; s->pict_type= AV_PICTURE_TYPE_P;
}else }else
s->pict_type= FF_B_TYPE; s->pict_type= AV_PICTURE_TYPE_B;
s->current_picture.pict_type= s->pict_type; s->current_picture.pict_type= s->pict_type;
s->current_picture.key_frame= s->pict_type == FF_I_TYPE; s->current_picture.key_frame= s->pict_type == AV_PICTURE_TYPE_I;
} }
s->intra_dc_precision = get_bits(&s->gb, 2); s->intra_dc_precision = get_bits(&s->gb, 2);
s->picture_structure = get_bits(&s->gb, 2); s->picture_structure = get_bits(&s->gb, 2);
@ -1790,7 +1790,7 @@ static int mpeg_decode_slice(Mpeg1Context *s1, int mb_y,
if(s->avctx->debug&FF_DEBUG_PICT_INFO){ if(s->avctx->debug&FF_DEBUG_PICT_INFO){
av_log(s->avctx, AV_LOG_DEBUG, "qp:%d fc:%2d%2d%2d%2d %s %s %s %s %s dc:%d pstruct:%d fdct:%d cmv:%d qtype:%d ivlc:%d rff:%d %s\n", av_log(s->avctx, AV_LOG_DEBUG, "qp:%d fc:%2d%2d%2d%2d %s %s %s %s %s dc:%d pstruct:%d fdct:%d cmv:%d qtype:%d ivlc:%d rff:%d %s\n",
s->qscale, s->mpeg_f_code[0][0],s->mpeg_f_code[0][1],s->mpeg_f_code[1][0],s->mpeg_f_code[1][1], s->qscale, s->mpeg_f_code[0][0],s->mpeg_f_code[0][1],s->mpeg_f_code[1][0],s->mpeg_f_code[1][1],
s->pict_type == FF_I_TYPE ? "I" : (s->pict_type == FF_P_TYPE ? "P" : (s->pict_type == FF_B_TYPE ? "B" : "S")), s->pict_type == AV_PICTURE_TYPE_I ? "I" : (s->pict_type == AV_PICTURE_TYPE_P ? "P" : (s->pict_type == AV_PICTURE_TYPE_B ? "B" : "S")),
s->progressive_sequence ? "ps" :"", s->progressive_frame ? "pf" : "", s->alternate_scan ? "alt" :"", s->top_field_first ? "top" :"", s->progressive_sequence ? "ps" :"", s->progressive_frame ? "pf" : "", s->alternate_scan ? "alt" :"", s->top_field_first ? "top" :"",
s->intra_dc_precision, s->picture_structure, s->frame_pred_frame_dct, s->concealment_motion_vectors, s->intra_dc_precision, s->picture_structure, s->frame_pred_frame_dct, s->concealment_motion_vectors,
s->q_scale_type, s->intra_vlc_format, s->repeat_first_field, s->chroma_420_type ? "420" :""); s->q_scale_type, s->intra_vlc_format, s->repeat_first_field, s->chroma_420_type ? "420" :"");
@ -1813,7 +1813,7 @@ static int mpeg_decode_slice(Mpeg1Context *s1, int mb_y,
for(i=0; i<2; i++){ for(i=0; i<2; i++){
for(dir=0; dir<2; dir++){ for(dir=0; dir<2; dir++){
if (s->mb_intra || (dir==1 && s->pict_type != FF_B_TYPE)) { if (s->mb_intra || (dir==1 && s->pict_type != AV_PICTURE_TYPE_B)) {
motion_x = motion_y = 0; motion_x = motion_y = 0;
}else if (s->mv_type == MV_TYPE_16X16 || (s->mv_type == MV_TYPE_FIELD && field_pic)){ }else if (s->mv_type == MV_TYPE_16X16 || (s->mv_type == MV_TYPE_FIELD && field_pic)){
motion_x = s->mv[dir][0][0]; motion_x = s->mv[dir][0][0];
@ -1853,7 +1853,7 @@ static int mpeg_decode_slice(Mpeg1Context *s1, int mb_y,
if(s->mb_y >= s->mb_height){ if(s->mb_y >= s->mb_height){
int left= get_bits_left(&s->gb); int left= get_bits_left(&s->gb);
int is_d10= s->chroma_format==2 && s->pict_type==FF_I_TYPE && avctx->profile==0 && avctx->level==5 int is_d10= s->chroma_format==2 && s->pict_type==AV_PICTURE_TYPE_I && avctx->profile==0 && avctx->level==5
&& s->intra_dc_precision == 2 && s->q_scale_type == 1 && s->alternate_scan == 0 && s->intra_dc_precision == 2 && s->q_scale_type == 1 && s->alternate_scan == 0
&& s->progressive_frame == 0 /* vbv_delay == 0xBBB || 0xE10*/; && s->progressive_frame == 0 /* vbv_delay == 0xBBB || 0xE10*/;
@ -1896,7 +1896,7 @@ static int mpeg_decode_slice(Mpeg1Context *s1, int mb_y,
} }
if(s->mb_skip_run){ if(s->mb_skip_run){
int i; int i;
if(s->pict_type == FF_I_TYPE){ if(s->pict_type == AV_PICTURE_TYPE_I){
av_log(s->avctx, AV_LOG_ERROR, "skipped MB in I frame at %d %d\n", s->mb_x, s->mb_y); av_log(s->avctx, AV_LOG_ERROR, "skipped MB in I frame at %d %d\n", s->mb_x, s->mb_y);
return -1; return -1;
} }
@ -1909,7 +1909,7 @@ static int mpeg_decode_slice(Mpeg1Context *s1, int mb_y,
s->mv_type = MV_TYPE_16X16; s->mv_type = MV_TYPE_16X16;
else else
s->mv_type = MV_TYPE_FIELD; s->mv_type = MV_TYPE_FIELD;
if (s->pict_type == FF_P_TYPE) { if (s->pict_type == AV_PICTURE_TYPE_P) {
/* if P type, zero motion vector is implied */ /* if P type, zero motion vector is implied */
s->mv_dir = MV_DIR_FORWARD; s->mv_dir = MV_DIR_FORWARD;
s->mv[0][0][0] = s->mv[0][0][1] = 0; s->mv[0][0][0] = s->mv[0][0][1] = 0;
@ -2000,7 +2000,7 @@ static int slice_end(AVCodecContext *avctx, AVFrame *pict)
MPV_frame_end(s); MPV_frame_end(s);
if (s->pict_type == FF_B_TYPE || s->low_delay) { if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
*pict= *(AVFrame*)s->current_picture_ptr; *pict= *(AVFrame*)s->current_picture_ptr;
ff_print_debug_info(s, pict); ff_print_debug_info(s, pict);
} else { } else {
@ -2339,7 +2339,7 @@ static int decode_chunks(AVCodecContext *avctx,
uint32_t start_code = -1; uint32_t start_code = -1;
buf_ptr = ff_find_start_code(buf_ptr,buf_end, &start_code); buf_ptr = ff_find_start_code(buf_ptr,buf_end, &start_code);
if (start_code > 0x1ff){ if (start_code > 0x1ff){
if(s2->pict_type != FF_B_TYPE || avctx->skip_frame <= AVDISCARD_DEFAULT){ if(s2->pict_type != AV_PICTURE_TYPE_B || avctx->skip_frame <= AVDISCARD_DEFAULT){
if(HAVE_THREADS && avctx->active_thread_type&FF_THREAD_SLICE){ if(HAVE_THREADS && avctx->active_thread_type&FF_THREAD_SLICE){
int i; int i;
@ -2465,19 +2465,19 @@ static int decode_chunks(AVCodecContext *avctx,
if(s2->last_picture_ptr==NULL){ if(s2->last_picture_ptr==NULL){
/* Skip B-frames if we do not have reference frames and gop is not closed */ /* Skip B-frames if we do not have reference frames and gop is not closed */
if(s2->pict_type==FF_B_TYPE){ if(s2->pict_type==AV_PICTURE_TYPE_B){
if(!s2->closed_gop) if(!s2->closed_gop)
break; break;
} }
} }
if(s2->pict_type==FF_I_TYPE) if(s2->pict_type==AV_PICTURE_TYPE_I)
s->sync=1; s->sync=1;
if(s2->next_picture_ptr==NULL){ if(s2->next_picture_ptr==NULL){
/* Skip P-frames if we do not have a reference frame or we have an invalid header. */ /* Skip P-frames if we do not have a reference frame or we have an invalid header. */
if(s2->pict_type==FF_P_TYPE && !s->sync) break; if(s2->pict_type==AV_PICTURE_TYPE_P && !s->sync) break;
} }
if( (avctx->skip_frame >= AVDISCARD_NONREF && s2->pict_type==FF_B_TYPE) if( (avctx->skip_frame >= AVDISCARD_NONREF && s2->pict_type==AV_PICTURE_TYPE_B)
||(avctx->skip_frame >= AVDISCARD_NONKEY && s2->pict_type!=FF_I_TYPE) ||(avctx->skip_frame >= AVDISCARD_NONKEY && s2->pict_type!=AV_PICTURE_TYPE_I)
|| avctx->skip_frame >= AVDISCARD_ALL) || avctx->skip_frame >= AVDISCARD_ALL)
break; break;

@ -354,7 +354,7 @@ void mpeg1_encode_picture_header(MpegEncContext *s, int picture_number)
put_bits(&s->pb, 16, 0xFFFF); /* vbv_delay */ put_bits(&s->pb, 16, 0xFFFF); /* vbv_delay */
// RAL: Forward f_code also needed for B frames // RAL: Forward f_code also needed for B frames
if (s->pict_type == FF_P_TYPE || s->pict_type == FF_B_TYPE) { if (s->pict_type == AV_PICTURE_TYPE_P || s->pict_type == AV_PICTURE_TYPE_B) {
put_bits(&s->pb, 1, 0); /* half pel coordinates */ put_bits(&s->pb, 1, 0); /* half pel coordinates */
if(s->codec_id == CODEC_ID_MPEG1VIDEO) if(s->codec_id == CODEC_ID_MPEG1VIDEO)
put_bits(&s->pb, 3, s->f_code); /* forward_f_code */ put_bits(&s->pb, 3, s->f_code); /* forward_f_code */
@ -363,7 +363,7 @@ void mpeg1_encode_picture_header(MpegEncContext *s, int picture_number)
} }
// RAL: Backward f_code necessary for B frames // RAL: Backward f_code necessary for B frames
if (s->pict_type == FF_B_TYPE) { if (s->pict_type == AV_PICTURE_TYPE_B) {
put_bits(&s->pb, 1, 0); /* half pel coordinates */ put_bits(&s->pb, 1, 0); /* half pel coordinates */
if(s->codec_id == CODEC_ID_MPEG1VIDEO) if(s->codec_id == CODEC_ID_MPEG1VIDEO)
put_bits(&s->pb, 3, s->b_code); /* backward_f_code */ put_bits(&s->pb, 3, s->b_code); /* backward_f_code */
@ -377,13 +377,13 @@ void mpeg1_encode_picture_header(MpegEncContext *s, int picture_number)
if(s->codec_id == CODEC_ID_MPEG2VIDEO){ if(s->codec_id == CODEC_ID_MPEG2VIDEO){
put_header(s, EXT_START_CODE); put_header(s, EXT_START_CODE);
put_bits(&s->pb, 4, 8); //pic ext put_bits(&s->pb, 4, 8); //pic ext
if (s->pict_type == FF_P_TYPE || s->pict_type == FF_B_TYPE) { if (s->pict_type == AV_PICTURE_TYPE_P || s->pict_type == AV_PICTURE_TYPE_B) {
put_bits(&s->pb, 4, s->f_code); put_bits(&s->pb, 4, s->f_code);
put_bits(&s->pb, 4, s->f_code); put_bits(&s->pb, 4, s->f_code);
}else{ }else{
put_bits(&s->pb, 8, 255); put_bits(&s->pb, 8, 255);
} }
if (s->pict_type == FF_B_TYPE) { if (s->pict_type == AV_PICTURE_TYPE_B) {
put_bits(&s->pb, 4, s->b_code); put_bits(&s->pb, 4, s->b_code);
put_bits(&s->pb, 4, s->b_code); put_bits(&s->pb, 4, s->b_code);
}else{ }else{
@ -456,15 +456,15 @@ static av_always_inline void mpeg1_encode_mb_internal(MpegEncContext *s,
if (cbp == 0 && !first_mb && s->mv_type == MV_TYPE_16X16 && if (cbp == 0 && !first_mb && s->mv_type == MV_TYPE_16X16 &&
(mb_x != s->mb_width - 1 || (mb_y != s->mb_height - 1 && s->codec_id == CODEC_ID_MPEG1VIDEO)) && (mb_x != s->mb_width - 1 || (mb_y != s->mb_height - 1 && s->codec_id == CODEC_ID_MPEG1VIDEO)) &&
((s->pict_type == FF_P_TYPE && (motion_x | motion_y) == 0) || ((s->pict_type == AV_PICTURE_TYPE_P && (motion_x | motion_y) == 0) ||
(s->pict_type == FF_B_TYPE && s->mv_dir == s->last_mv_dir && (((s->mv_dir & MV_DIR_FORWARD) ? ((s->mv[0][0][0] - s->last_mv[0][0][0])|(s->mv[0][0][1] - s->last_mv[0][0][1])) : 0) | (s->pict_type == AV_PICTURE_TYPE_B && s->mv_dir == s->last_mv_dir && (((s->mv_dir & MV_DIR_FORWARD) ? ((s->mv[0][0][0] - s->last_mv[0][0][0])|(s->mv[0][0][1] - s->last_mv[0][0][1])) : 0) |
((s->mv_dir & MV_DIR_BACKWARD) ? ((s->mv[1][0][0] - s->last_mv[1][0][0])|(s->mv[1][0][1] - s->last_mv[1][0][1])) : 0)) == 0))) { ((s->mv_dir & MV_DIR_BACKWARD) ? ((s->mv[1][0][0] - s->last_mv[1][0][0])|(s->mv[1][0][1] - s->last_mv[1][0][1])) : 0)) == 0))) {
s->mb_skip_run++; s->mb_skip_run++;
s->qscale -= s->dquant; s->qscale -= s->dquant;
s->skip_count++; s->skip_count++;
s->misc_bits++; s->misc_bits++;
s->last_bits++; s->last_bits++;
if(s->pict_type == FF_P_TYPE){ if(s->pict_type == AV_PICTURE_TYPE_P){
s->last_mv[0][1][0]= s->last_mv[0][0][0]= s->last_mv[0][1][0]= s->last_mv[0][0][0]=
s->last_mv[0][1][1]= s->last_mv[0][0][1]= 0; s->last_mv[0][1][1]= s->last_mv[0][0][1]= 0;
} }
@ -476,7 +476,7 @@ static av_always_inline void mpeg1_encode_mb_internal(MpegEncContext *s,
encode_mb_skip_run(s, s->mb_skip_run); encode_mb_skip_run(s, s->mb_skip_run);
} }
if (s->pict_type == FF_I_TYPE) { if (s->pict_type == AV_PICTURE_TYPE_I) {
if(s->dquant && cbp){ if(s->dquant && cbp){
put_mb_modes(s, 2, 1, 0, 0); /* macroblock_type : macroblock_quant = 1 */ put_mb_modes(s, 2, 1, 0, 0); /* macroblock_type : macroblock_quant = 1 */
put_qscale(s); put_qscale(s);
@ -497,7 +497,7 @@ static av_always_inline void mpeg1_encode_mb_internal(MpegEncContext *s,
s->misc_bits+= get_bits_diff(s); s->misc_bits+= get_bits_diff(s);
s->i_count++; s->i_count++;
memset(s->last_mv, 0, sizeof(s->last_mv)); memset(s->last_mv, 0, sizeof(s->last_mv));
} else if (s->pict_type == FF_P_TYPE) { } else if (s->pict_type == AV_PICTURE_TYPE_P) {
if(s->mv_type == MV_TYPE_16X16){ if(s->mv_type == MV_TYPE_16X16){
if (cbp != 0) { if (cbp != 0) {
if ((motion_x|motion_y) == 0) { if ((motion_x|motion_y) == 0) {

@ -28,12 +28,12 @@ uint8_t ff_mpeg4_static_rl_table_store[3][2][2*MAX_RUN + MAX_LEVEL + 3];
int ff_mpeg4_get_video_packet_prefix_length(MpegEncContext *s){ int ff_mpeg4_get_video_packet_prefix_length(MpegEncContext *s){
switch(s->pict_type){ switch(s->pict_type){
case FF_I_TYPE: case AV_PICTURE_TYPE_I:
return 16; return 16;
case FF_P_TYPE: case AV_PICTURE_TYPE_P:
case FF_S_TYPE: case AV_PICTURE_TYPE_S:
return s->f_code+15; return s->f_code+15;
case FF_B_TYPE: case AV_PICTURE_TYPE_B:
return FFMAX3(s->f_code, s->b_code, 2) + 15; return FFMAX3(s->f_code, s->b_code, 2) + 15;
default: default:
return -1; return -1;

@ -118,7 +118,7 @@ static inline int mpeg4_is_resync(MpegEncContext *s){
} }
while(v<=0xFF){ while(v<=0xFF){
if(s->pict_type==FF_B_TYPE || (v>>(8-s->pict_type)!=1) || s->partitioned_frame) if(s->pict_type==AV_PICTURE_TYPE_B || (v>>(8-s->pict_type)!=1) || s->partitioned_frame)
break; break;
skip_bits(&s->gb, 8+s->pict_type); skip_bits(&s->gb, 8+s->pict_type);
bits_count+= 8+s->pict_type; bits_count+= 8+s->pict_type;
@ -373,7 +373,7 @@ int mpeg4_decode_video_packet_header(MpegEncContext *s)
av_log(s->avctx, AV_LOG_ERROR, "illegal mb_num in video packet (%d %d) \n", mb_num, s->mb_num); av_log(s->avctx, AV_LOG_ERROR, "illegal mb_num in video packet (%d %d) \n", mb_num, s->mb_num);
return -1; return -1;
} }
if(s->pict_type == FF_B_TYPE){ if(s->pict_type == AV_PICTURE_TYPE_B){
int mb_x = 0, mb_y = 0; int mb_x = 0, mb_y = 0;
while(s->next_picture.mbskip_table[ s->mb_index2xy[ mb_num ] ]) { while(s->next_picture.mbskip_table[ s->mb_index2xy[ mb_num ] ]) {
@ -413,20 +413,20 @@ int mpeg4_decode_video_packet_header(MpegEncContext *s)
if(s->shape != BIN_ONLY_SHAPE){ if(s->shape != BIN_ONLY_SHAPE){
skip_bits(&s->gb, 3); /* intra dc vlc threshold */ skip_bits(&s->gb, 3); /* intra dc vlc threshold */
//FIXME don't just ignore everything //FIXME don't just ignore everything
if(s->pict_type == FF_S_TYPE && s->vol_sprite_usage==GMC_SPRITE){ if(s->pict_type == AV_PICTURE_TYPE_S && s->vol_sprite_usage==GMC_SPRITE){
mpeg4_decode_sprite_trajectory(s, &s->gb); mpeg4_decode_sprite_trajectory(s, &s->gb);
av_log(s->avctx, AV_LOG_ERROR, "untested\n"); av_log(s->avctx, AV_LOG_ERROR, "untested\n");
} }
//FIXME reduced res stuff here //FIXME reduced res stuff here
if (s->pict_type != FF_I_TYPE) { if (s->pict_type != AV_PICTURE_TYPE_I) {
int f_code = get_bits(&s->gb, 3); /* fcode_for */ int f_code = get_bits(&s->gb, 3); /* fcode_for */
if(f_code==0){ if(f_code==0){
av_log(s->avctx, AV_LOG_ERROR, "Error, video packet header damaged (f_code=0)\n"); av_log(s->avctx, AV_LOG_ERROR, "Error, video packet header damaged (f_code=0)\n");
} }
} }
if (s->pict_type == FF_B_TYPE) { if (s->pict_type == AV_PICTURE_TYPE_B) {
int b_code = get_bits(&s->gb, 3); int b_code = get_bits(&s->gb, 3);
if(b_code==0){ if(b_code==0){
av_log(s->avctx, AV_LOG_ERROR, "Error, video packet header damaged (b_code=0)\n"); av_log(s->avctx, AV_LOG_ERROR, "Error, video packet header damaged (b_code=0)\n");
@ -555,7 +555,7 @@ static int mpeg4_decode_partition_a(MpegEncContext *s){
if(s->mb_x == s->resync_mb_x && s->mb_y == s->resync_mb_y+1) if(s->mb_x == s->resync_mb_x && s->mb_y == s->resync_mb_y+1)
s->first_slice_line=0; s->first_slice_line=0;
if(s->pict_type==FF_I_TYPE){ if(s->pict_type==AV_PICTURE_TYPE_I){
int i; int i;
do{ do{
@ -604,7 +604,7 @@ try_again:
skip_bits1(&s->gb); skip_bits1(&s->gb);
if(bits&0x10000){ if(bits&0x10000){
/* skip mb */ /* skip mb */
if(s->pict_type==FF_S_TYPE && s->vol_sprite_usage==GMC_SPRITE){ if(s->pict_type==AV_PICTURE_TYPE_S && s->vol_sprite_usage==GMC_SPRITE){
s->current_picture.mb_type[xy]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_GMC | MB_TYPE_L0; s->current_picture.mb_type[xy]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_GMC | MB_TYPE_L0;
mx= get_amv(s, 0); mx= get_amv(s, 0);
my= get_amv(s, 1); my= get_amv(s, 1);
@ -645,7 +645,7 @@ try_again:
if(s->mbintra_table[xy]) if(s->mbintra_table[xy])
ff_clean_intra_table_entries(s); ff_clean_intra_table_entries(s);
if(s->pict_type==FF_S_TYPE && s->vol_sprite_usage==GMC_SPRITE && (cbpc & 16) == 0) if(s->pict_type==AV_PICTURE_TYPE_S && s->vol_sprite_usage==GMC_SPRITE && (cbpc & 16) == 0)
s->mcsel= get_bits1(&s->gb); s->mcsel= get_bits1(&s->gb);
else s->mcsel= 0; else s->mcsel= 0;
@ -717,7 +717,7 @@ static int mpeg4_decode_partition_b(MpegEncContext *s, int mb_count){
if(s->mb_x == s->resync_mb_x && s->mb_y == s->resync_mb_y+1) if(s->mb_x == s->resync_mb_x && s->mb_y == s->resync_mb_y+1)
s->first_slice_line=0; s->first_slice_line=0;
if(s->pict_type==FF_I_TYPE){ if(s->pict_type==AV_PICTURE_TYPE_I){
int ac_pred= get_bits1(&s->gb); int ac_pred= get_bits1(&s->gb);
int cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1); int cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1);
if(cbpy<0){ if(cbpy<0){
@ -791,8 +791,8 @@ static int mpeg4_decode_partition_b(MpegEncContext *s, int mb_count){
int ff_mpeg4_decode_partitions(MpegEncContext *s) int ff_mpeg4_decode_partitions(MpegEncContext *s)
{ {
int mb_num; int mb_num;
const int part_a_error= s->pict_type==FF_I_TYPE ? (DC_ERROR|MV_ERROR) : MV_ERROR; const int part_a_error= s->pict_type==AV_PICTURE_TYPE_I ? (DC_ERROR|MV_ERROR) : MV_ERROR;
const int part_a_end = s->pict_type==FF_I_TYPE ? (DC_END |MV_END) : MV_END; const int part_a_end = s->pict_type==AV_PICTURE_TYPE_I ? (DC_END |MV_END) : MV_END;
mb_num= mpeg4_decode_partition_a(s); mb_num= mpeg4_decode_partition_a(s);
if(mb_num<0){ if(mb_num<0){
@ -808,7 +808,7 @@ int ff_mpeg4_decode_partitions(MpegEncContext *s)
s->mb_num_left= mb_num; s->mb_num_left= mb_num;
if(s->pict_type==FF_I_TYPE){ if(s->pict_type==AV_PICTURE_TYPE_I){
while(show_bits(&s->gb, 9) == 1) while(show_bits(&s->gb, 9) == 1)
skip_bits(&s->gb, 9); skip_bits(&s->gb, 9);
if(get_bits_long(&s->gb, 19)!=DC_MARKER){ if(get_bits_long(&s->gb, 19)!=DC_MARKER){
@ -826,11 +826,11 @@ int ff_mpeg4_decode_partitions(MpegEncContext *s)
ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, part_a_end); ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, part_a_end);
if( mpeg4_decode_partition_b(s, mb_num) < 0){ if( mpeg4_decode_partition_b(s, mb_num) < 0){
if(s->pict_type==FF_P_TYPE) if(s->pict_type==AV_PICTURE_TYPE_P)
ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, DC_ERROR); ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, DC_ERROR);
return -1; return -1;
}else{ }else{
if(s->pict_type==FF_P_TYPE) if(s->pict_type==AV_PICTURE_TYPE_P)
ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, DC_END); ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, DC_END);
} }
@ -1101,7 +1101,7 @@ static int mpeg4_decode_partitioned_mb(MpegEncContext *s, DCTELEM block[6][64])
ff_set_qscale(s, s->current_picture.qscale_table[xy] ); ff_set_qscale(s, s->current_picture.qscale_table[xy] );
} }
if (s->pict_type == FF_P_TYPE || s->pict_type==FF_S_TYPE) { if (s->pict_type == AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
int i; int i;
for(i=0; i<4; i++){ for(i=0; i<4; i++){
s->mv[0][i][0] = s->current_picture.motion_val[0][ s->block_index[i] ][0]; s->mv[0][i][0] = s->current_picture.motion_val[0][ s->block_index[i] ][0];
@ -1115,7 +1115,7 @@ static int mpeg4_decode_partitioned_mb(MpegEncContext *s, DCTELEM block[6][64])
s->block_last_index[i] = -1; s->block_last_index[i] = -1;
s->mv_dir = MV_DIR_FORWARD; s->mv_dir = MV_DIR_FORWARD;
s->mv_type = MV_TYPE_16X16; s->mv_type = MV_TYPE_16X16;
if(s->pict_type==FF_S_TYPE && s->vol_sprite_usage==GMC_SPRITE){ if(s->pict_type==AV_PICTURE_TYPE_S && s->vol_sprite_usage==GMC_SPRITE){
s->mcsel=1; s->mcsel=1;
s->mb_skipped = 0; s->mb_skipped = 0;
}else{ }else{
@ -1179,7 +1179,7 @@ static int mpeg4_decode_mb(MpegEncContext *s,
assert(s->h263_pred); assert(s->h263_pred);
if (s->pict_type == FF_P_TYPE || s->pict_type==FF_S_TYPE) { if (s->pict_type == AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
do{ do{
if (get_bits1(&s->gb)) { if (get_bits1(&s->gb)) {
/* skip mb */ /* skip mb */
@ -1188,7 +1188,7 @@ static int mpeg4_decode_mb(MpegEncContext *s,
s->block_last_index[i] = -1; s->block_last_index[i] = -1;
s->mv_dir = MV_DIR_FORWARD; s->mv_dir = MV_DIR_FORWARD;
s->mv_type = MV_TYPE_16X16; s->mv_type = MV_TYPE_16X16;
if(s->pict_type==FF_S_TYPE && s->vol_sprite_usage==GMC_SPRITE){ if(s->pict_type==AV_PICTURE_TYPE_S && s->vol_sprite_usage==GMC_SPRITE){
s->current_picture.mb_type[xy]= MB_TYPE_SKIP | MB_TYPE_GMC | MB_TYPE_16x16 | MB_TYPE_L0; s->current_picture.mb_type[xy]= MB_TYPE_SKIP | MB_TYPE_GMC | MB_TYPE_16x16 | MB_TYPE_L0;
s->mcsel=1; s->mcsel=1;
s->mv[0][0][0]= get_amv(s, 0); s->mv[0][0][0]= get_amv(s, 0);
@ -1216,7 +1216,7 @@ static int mpeg4_decode_mb(MpegEncContext *s,
s->mb_intra = ((cbpc & 4) != 0); s->mb_intra = ((cbpc & 4) != 0);
if (s->mb_intra) goto intra; if (s->mb_intra) goto intra;
if(s->pict_type==FF_S_TYPE && s->vol_sprite_usage==GMC_SPRITE && (cbpc & 16) == 0) if(s->pict_type==AV_PICTURE_TYPE_S && s->vol_sprite_usage==GMC_SPRITE && (cbpc & 16) == 0)
s->mcsel= get_bits1(&s->gb); s->mcsel= get_bits1(&s->gb);
else s->mcsel= 0; else s->mcsel= 0;
cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1) ^ 0x0F; cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1) ^ 0x0F;
@ -1295,7 +1295,7 @@ static int mpeg4_decode_mb(MpegEncContext *s,
mot_val[1] = my; mot_val[1] = my;
} }
} }
} else if(s->pict_type==FF_B_TYPE) { } else if(s->pict_type==AV_PICTURE_TYPE_B) {
int modb1; // first bit of modb int modb1; // first bit of modb
int modb2; // second bit of modb int modb2; // second bit of modb
int mb_type; int mb_type;
@ -1492,12 +1492,12 @@ end:
if(mpeg4_is_resync(s)){ if(mpeg4_is_resync(s)){
const int delta= s->mb_x + 1 == s->mb_width ? 2 : 1; const int delta= s->mb_x + 1 == s->mb_width ? 2 : 1;
if(s->pict_type==FF_B_TYPE){ if(s->pict_type==AV_PICTURE_TYPE_B){
ff_thread_await_progress((AVFrame*)s->next_picture_ptr, ff_thread_await_progress((AVFrame*)s->next_picture_ptr,
(s->mb_x + delta >= s->mb_width) ? FFMIN(s->mb_y+1, s->mb_height-1) : s->mb_y, 0); (s->mb_x + delta >= s->mb_width) ? FFMIN(s->mb_y+1, s->mb_height-1) : s->mb_y, 0);
} }
if(s->pict_type==FF_B_TYPE && s->next_picture.mbskip_table[xy + delta]) if(s->pict_type==AV_PICTURE_TYPE_B && s->next_picture.mbskip_table[xy + delta])
return SLICE_OK; return SLICE_OK;
return SLICE_END; return SLICE_END;
} }
@ -1897,13 +1897,13 @@ static int decode_user_data(MpegEncContext *s, GetBitContext *gb){
static int decode_vop_header(MpegEncContext *s, GetBitContext *gb){ static int decode_vop_header(MpegEncContext *s, GetBitContext *gb){
int time_incr, time_increment; int time_incr, time_increment;
s->pict_type = get_bits(gb, 2) + FF_I_TYPE; /* pict type: I = 0 , P = 1 */ s->pict_type = get_bits(gb, 2) + AV_PICTURE_TYPE_I; /* pict type: I = 0 , P = 1 */
if(s->pict_type==FF_B_TYPE && s->low_delay && s->vol_control_parameters==0 && !(s->flags & CODEC_FLAG_LOW_DELAY)){ if(s->pict_type==AV_PICTURE_TYPE_B && s->low_delay && s->vol_control_parameters==0 && !(s->flags & CODEC_FLAG_LOW_DELAY)){
av_log(s->avctx, AV_LOG_ERROR, "low_delay flag incorrectly, clearing it\n"); av_log(s->avctx, AV_LOG_ERROR, "low_delay flag incorrectly, clearing it\n");
s->low_delay=0; s->low_delay=0;
} }
s->partitioned_frame= s->data_partitioning && s->pict_type!=FF_B_TYPE; s->partitioned_frame= s->data_partitioning && s->pict_type!=AV_PICTURE_TYPE_B;
if(s->partitioned_frame) if(s->partitioned_frame)
s->decode_mb= mpeg4_decode_partitioned_mb; s->decode_mb= mpeg4_decode_partitioned_mb;
else else
@ -1919,8 +1919,8 @@ static int decode_vop_header(MpegEncContext *s, GetBitContext *gb){
av_log(s->avctx, AV_LOG_ERROR, "hmm, seems the headers are not complete, trying to guess time_increment_bits\n"); av_log(s->avctx, AV_LOG_ERROR, "hmm, seems the headers are not complete, trying to guess time_increment_bits\n");
for(s->time_increment_bits=1 ;s->time_increment_bits<16; s->time_increment_bits++){ for(s->time_increment_bits=1 ;s->time_increment_bits<16; s->time_increment_bits++){
if ( s->pict_type == FF_P_TYPE if ( s->pict_type == AV_PICTURE_TYPE_P
|| (s->pict_type == FF_S_TYPE && s->vol_sprite_usage==GMC_SPRITE)) { || (s->pict_type == AV_PICTURE_TYPE_S && s->vol_sprite_usage==GMC_SPRITE)) {
if((show_bits(gb, s->time_increment_bits+6)&0x37) == 0x30) break; if((show_bits(gb, s->time_increment_bits+6)&0x37) == 0x30) break;
}else }else
if((show_bits(gb, s->time_increment_bits+5)&0x1F) == 0x18) break; if((show_bits(gb, s->time_increment_bits+5)&0x1F) == 0x18) break;
@ -1932,7 +1932,7 @@ static int decode_vop_header(MpegEncContext *s, GetBitContext *gb){
if(IS_3IV1) time_increment= get_bits1(gb); //FIXME investigate further if(IS_3IV1) time_increment= get_bits1(gb); //FIXME investigate further
else time_increment= get_bits(gb, s->time_increment_bits); else time_increment= get_bits(gb, s->time_increment_bits);
if(s->pict_type!=FF_B_TYPE){ if(s->pict_type!=AV_PICTURE_TYPE_B){
s->last_time_base= s->time_base; s->last_time_base= s->time_base;
s->time_base+= time_incr; s->time_base+= time_incr;
s->time= s->time_base*s->avctx->time_base.den + time_increment; s->time= s->time_base*s->avctx->time_base.den + time_increment;
@ -1982,8 +1982,8 @@ static int decode_vop_header(MpegEncContext *s, GetBitContext *gb){
av_log(s->avctx, AV_LOG_ERROR, "vop not coded\n"); av_log(s->avctx, AV_LOG_ERROR, "vop not coded\n");
return FRAME_SKIPPED; return FRAME_SKIPPED;
} }
if (s->shape != BIN_ONLY_SHAPE && ( s->pict_type == FF_P_TYPE if (s->shape != BIN_ONLY_SHAPE && ( s->pict_type == AV_PICTURE_TYPE_P
|| (s->pict_type == FF_S_TYPE && s->vol_sprite_usage==GMC_SPRITE))) { || (s->pict_type == AV_PICTURE_TYPE_S && s->vol_sprite_usage==GMC_SPRITE))) {
/* rounding type for motion estimation */ /* rounding type for motion estimation */
s->no_rounding = get_bits1(gb); s->no_rounding = get_bits1(gb);
} else { } else {
@ -1992,7 +1992,7 @@ static int decode_vop_header(MpegEncContext *s, GetBitContext *gb){
//FIXME reduced res stuff //FIXME reduced res stuff
if (s->shape != RECT_SHAPE) { if (s->shape != RECT_SHAPE) {
if (s->vol_sprite_usage != 1 || s->pict_type != FF_I_TYPE) { if (s->vol_sprite_usage != 1 || s->pict_type != AV_PICTURE_TYPE_I) {
int width, height, hor_spat_ref, ver_spat_ref; int width, height, hor_spat_ref, ver_spat_ref;
width = get_bits(gb, 13); width = get_bits(gb, 13);
@ -2013,9 +2013,9 @@ static int decode_vop_header(MpegEncContext *s, GetBitContext *gb){
if (s->shape != BIN_ONLY_SHAPE) { if (s->shape != BIN_ONLY_SHAPE) {
skip_bits_long(gb, s->cplx_estimation_trash_i); skip_bits_long(gb, s->cplx_estimation_trash_i);
if(s->pict_type != FF_I_TYPE) if(s->pict_type != AV_PICTURE_TYPE_I)
skip_bits_long(gb, s->cplx_estimation_trash_p); skip_bits_long(gb, s->cplx_estimation_trash_p);
if(s->pict_type == FF_B_TYPE) if(s->pict_type == AV_PICTURE_TYPE_B)
skip_bits_long(gb, s->cplx_estimation_trash_b); skip_bits_long(gb, s->cplx_estimation_trash_b);
s->intra_dc_threshold= mpeg4_dc_threshold[ get_bits(gb, 3) ]; s->intra_dc_threshold= mpeg4_dc_threshold[ get_bits(gb, 3) ];
@ -2038,7 +2038,7 @@ static int decode_vop_header(MpegEncContext *s, GetBitContext *gb){
ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan); ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
} }
if(s->pict_type == FF_S_TYPE && (s->vol_sprite_usage==STATIC_SPRITE || s->vol_sprite_usage==GMC_SPRITE)){ if(s->pict_type == AV_PICTURE_TYPE_S && (s->vol_sprite_usage==STATIC_SPRITE || s->vol_sprite_usage==GMC_SPRITE)){
mpeg4_decode_sprite_trajectory(s, gb); mpeg4_decode_sprite_trajectory(s, gb);
if(s->sprite_brightness_change) av_log(s->avctx, AV_LOG_ERROR, "sprite_brightness_change not supported\n"); if(s->sprite_brightness_change) av_log(s->avctx, AV_LOG_ERROR, "sprite_brightness_change not supported\n");
if(s->vol_sprite_usage==STATIC_SPRITE) av_log(s->avctx, AV_LOG_ERROR, "static sprite not supported\n"); if(s->vol_sprite_usage==STATIC_SPRITE) av_log(s->avctx, AV_LOG_ERROR, "static sprite not supported\n");
@ -2051,7 +2051,7 @@ static int decode_vop_header(MpegEncContext *s, GetBitContext *gb){
return -1; // makes no sense to continue, as there is nothing left from the image then return -1; // makes no sense to continue, as there is nothing left from the image then
} }
if (s->pict_type != FF_I_TYPE) { if (s->pict_type != AV_PICTURE_TYPE_I) {
s->f_code = get_bits(gb, 3); /* fcode_for */ s->f_code = get_bits(gb, 3); /* fcode_for */
if(s->f_code==0){ if(s->f_code==0){
av_log(s->avctx, AV_LOG_ERROR, "Error, header damaged or not MPEG4 header (f_code=0)\n"); av_log(s->avctx, AV_LOG_ERROR, "Error, header damaged or not MPEG4 header (f_code=0)\n");
@ -2060,7 +2060,7 @@ static int decode_vop_header(MpegEncContext *s, GetBitContext *gb){
}else }else
s->f_code=1; s->f_code=1;
if (s->pict_type == FF_B_TYPE) { if (s->pict_type == AV_PICTURE_TYPE_B) {
s->b_code = get_bits(gb, 3); s->b_code = get_bits(gb, 3);
}else }else
s->b_code=1; s->b_code=1;
@ -2068,14 +2068,14 @@ static int decode_vop_header(MpegEncContext *s, GetBitContext *gb){
if(s->avctx->debug&FF_DEBUG_PICT_INFO){ if(s->avctx->debug&FF_DEBUG_PICT_INFO){
av_log(s->avctx, AV_LOG_DEBUG, "qp:%d fc:%d,%d %s size:%d pro:%d alt:%d top:%d %spel part:%d resync:%d w:%d a:%d rnd:%d vot:%d%s dc:%d ce:%d/%d/%d\n", av_log(s->avctx, AV_LOG_DEBUG, "qp:%d fc:%d,%d %s size:%d pro:%d alt:%d top:%d %spel part:%d resync:%d w:%d a:%d rnd:%d vot:%d%s dc:%d ce:%d/%d/%d\n",
s->qscale, s->f_code, s->b_code, s->qscale, s->f_code, s->b_code,
s->pict_type == FF_I_TYPE ? "I" : (s->pict_type == FF_P_TYPE ? "P" : (s->pict_type == FF_B_TYPE ? "B" : "S")), s->pict_type == AV_PICTURE_TYPE_I ? "I" : (s->pict_type == AV_PICTURE_TYPE_P ? "P" : (s->pict_type == AV_PICTURE_TYPE_B ? "B" : "S")),
gb->size_in_bits,s->progressive_sequence, s->alternate_scan, s->top_field_first, gb->size_in_bits,s->progressive_sequence, s->alternate_scan, s->top_field_first,
s->quarter_sample ? "q" : "h", s->data_partitioning, s->resync_marker, s->num_sprite_warping_points, s->quarter_sample ? "q" : "h", s->data_partitioning, s->resync_marker, s->num_sprite_warping_points,
s->sprite_warping_accuracy, 1-s->no_rounding, s->vo_type, s->vol_control_parameters ? " VOLC" : " ", s->intra_dc_threshold, s->cplx_estimation_trash_i, s->cplx_estimation_trash_p, s->cplx_estimation_trash_b); s->sprite_warping_accuracy, 1-s->no_rounding, s->vo_type, s->vol_control_parameters ? " VOLC" : " ", s->intra_dc_threshold, s->cplx_estimation_trash_i, s->cplx_estimation_trash_p, s->cplx_estimation_trash_b);
} }
if(!s->scalability){ if(!s->scalability){
if (s->shape!=RECT_SHAPE && s->pict_type!=FF_I_TYPE) { if (s->shape!=RECT_SHAPE && s->pict_type!=AV_PICTURE_TYPE_I) {
skip_bits1(gb); // vop shape coding type skip_bits1(gb); // vop shape coding type
} }
}else{ }else{

@ -205,7 +205,7 @@ void ff_clean_mpeg4_qscales(MpegEncContext *s){
ff_clean_h263_qscales(s); ff_clean_h263_qscales(s);
if(s->pict_type== FF_B_TYPE){ if(s->pict_type== AV_PICTURE_TYPE_B){
int odd=0; int odd=0;
/* ok, come on, this isn't funny anymore, there's more code for handling this mpeg4 mess than for the actual adaptive quantization */ /* ok, come on, this isn't funny anymore, there's more code for handling this mpeg4 mess than for the actual adaptive quantization */
@ -497,14 +497,14 @@ void mpeg4_encode_mb(MpegEncContext * s,
{ {
int cbpc, cbpy, pred_x, pred_y; int cbpc, cbpy, pred_x, pred_y;
PutBitContext * const pb2 = s->data_partitioning ? &s->pb2 : &s->pb; PutBitContext * const pb2 = s->data_partitioning ? &s->pb2 : &s->pb;
PutBitContext * const tex_pb = s->data_partitioning && s->pict_type!=FF_B_TYPE ? &s->tex_pb : &s->pb; PutBitContext * const tex_pb = s->data_partitioning && s->pict_type!=AV_PICTURE_TYPE_B ? &s->tex_pb : &s->pb;
PutBitContext * const dc_pb = s->data_partitioning && s->pict_type!=FF_I_TYPE ? &s->pb2 : &s->pb; PutBitContext * const dc_pb = s->data_partitioning && s->pict_type!=AV_PICTURE_TYPE_I ? &s->pb2 : &s->pb;
const int interleaved_stats= (s->flags&CODEC_FLAG_PASS1) && !s->data_partitioning ? 1 : 0; const int interleaved_stats= (s->flags&CODEC_FLAG_PASS1) && !s->data_partitioning ? 1 : 0;
if (!s->mb_intra) { if (!s->mb_intra) {
int i, cbp; int i, cbp;
if(s->pict_type==FF_B_TYPE){ if(s->pict_type==AV_PICTURE_TYPE_B){
static const int mb_type_table[8]= {-1, 3, 2, 1,-1,-1,-1, 0}; /* convert from mv_dir to type */ static const int mb_type_table[8]= {-1, 3, 2, 1,-1,-1,-1, 0}; /* convert from mv_dir to type */
int mb_type= mb_type_table[s->mv_dir]; int mb_type= mb_type_table[s->mv_dir];
@ -637,7 +637,7 @@ void mpeg4_encode_mb(MpegEncContext * s,
s->p_tex_bits+= get_bits_diff(s); s->p_tex_bits+= get_bits_diff(s);
} }
}else{ /* s->pict_type==FF_B_TYPE */ }else{ /* s->pict_type==AV_PICTURE_TYPE_B */
cbp= get_p_cbp(s, block, motion_x, motion_y); cbp= get_p_cbp(s, block, motion_x, motion_y);
if ((cbp | motion_x | motion_y | s->dquant) == 0 && s->mv_type==MV_TYPE_16X16) { if ((cbp | motion_x | motion_y | s->dquant) == 0 && s->mv_type==MV_TYPE_16X16) {
@ -660,7 +660,7 @@ void mpeg4_encode_mb(MpegEncContext * s,
int diff; int diff;
Picture *pic= s->reordered_input_picture[i+1]; Picture *pic= s->reordered_input_picture[i+1];
if(pic==NULL || pic->pict_type!=FF_B_TYPE) break; if(pic==NULL || pic->pict_type!=AV_PICTURE_TYPE_B) break;
b_pic= pic->data[0] + offset; b_pic= pic->data[0] + offset;
if(pic->type != FF_BUFFER_TYPE_SHARED) if(pic->type != FF_BUFFER_TYPE_SHARED)
@ -824,7 +824,7 @@ void mpeg4_encode_mb(MpegEncContext * s,
} }
cbpc = cbp & 3; cbpc = cbp & 3;
if (s->pict_type == FF_I_TYPE) { if (s->pict_type == AV_PICTURE_TYPE_I) {
if(s->dquant) cbpc+=4; if(s->dquant) cbpc+=4;
put_bits(&s->pb, put_bits(&s->pb,
ff_h263_intra_MCBPC_bits[cbpc], ff_h263_intra_MCBPC_bits[cbpc],
@ -876,7 +876,7 @@ void ff_mpeg4_stuffing(PutBitContext * pbc)
/* must be called before writing the header */ /* must be called before writing the header */
void ff_set_mpeg4_time(MpegEncContext * s){ void ff_set_mpeg4_time(MpegEncContext * s){
if(s->pict_type==FF_B_TYPE){ if(s->pict_type==AV_PICTURE_TYPE_B){
ff_mpeg4_init_direct_mv(s); ff_mpeg4_init_direct_mv(s);
}else{ }else{
s->last_time_base= s->time_base; s->last_time_base= s->time_base;
@ -1066,7 +1066,7 @@ void mpeg4_encode_picture_header(MpegEncContext * s, int picture_number)
int time_incr; int time_incr;
int time_div, time_mod; int time_div, time_mod;
if(s->pict_type==FF_I_TYPE){ if(s->pict_type==AV_PICTURE_TYPE_I){
if(!(s->flags&CODEC_FLAG_GLOBAL_HEADER)){ if(!(s->flags&CODEC_FLAG_GLOBAL_HEADER)){
if(s->strict_std_compliance < FF_COMPLIANCE_VERY_STRICT) //HACK, the reference sw is buggy if(s->strict_std_compliance < FF_COMPLIANCE_VERY_STRICT) //HACK, the reference sw is buggy
mpeg4_encode_visual_object_header(s); mpeg4_encode_visual_object_header(s);
@ -1077,7 +1077,7 @@ void mpeg4_encode_picture_header(MpegEncContext * s, int picture_number)
mpeg4_encode_gop_header(s); mpeg4_encode_gop_header(s);
} }
s->partitioned_frame= s->data_partitioning && s->pict_type!=FF_B_TYPE; s->partitioned_frame= s->data_partitioning && s->pict_type!=AV_PICTURE_TYPE_B;
put_bits(&s->pb, 16, 0); /* vop header */ put_bits(&s->pb, 16, 0); /* vop header */
put_bits(&s->pb, 16, VOP_STARTCODE); /* vop header */ put_bits(&s->pb, 16, VOP_STARTCODE); /* vop header */
@ -1097,8 +1097,8 @@ void mpeg4_encode_picture_header(MpegEncContext * s, int picture_number)
put_bits(&s->pb, s->time_increment_bits, time_mod); /* time increment */ put_bits(&s->pb, s->time_increment_bits, time_mod); /* time increment */
put_bits(&s->pb, 1, 1); /* marker */ put_bits(&s->pb, 1, 1); /* marker */
put_bits(&s->pb, 1, 1); /* vop coded */ put_bits(&s->pb, 1, 1); /* vop coded */
if ( s->pict_type == FF_P_TYPE if ( s->pict_type == AV_PICTURE_TYPE_P
|| (s->pict_type == FF_S_TYPE && s->vol_sprite_usage==GMC_SPRITE)) { || (s->pict_type == AV_PICTURE_TYPE_S && s->vol_sprite_usage==GMC_SPRITE)) {
put_bits(&s->pb, 1, s->no_rounding); /* rounding type */ put_bits(&s->pb, 1, s->no_rounding); /* rounding type */
} }
put_bits(&s->pb, 3, 0); /* intra dc VLC threshold */ put_bits(&s->pb, 3, 0); /* intra dc VLC threshold */
@ -1110,9 +1110,9 @@ void mpeg4_encode_picture_header(MpegEncContext * s, int picture_number)
put_bits(&s->pb, 5, s->qscale); put_bits(&s->pb, 5, s->qscale);
if (s->pict_type != FF_I_TYPE) if (s->pict_type != AV_PICTURE_TYPE_I)
put_bits(&s->pb, 3, s->f_code); /* fcode_for */ put_bits(&s->pb, 3, s->f_code); /* fcode_for */
if (s->pict_type == FF_B_TYPE) if (s->pict_type == AV_PICTURE_TYPE_B)
put_bits(&s->pb, 3, s->b_code); /* fcode_back */ put_bits(&s->pb, 3, s->b_code); /* fcode_back */
} }
@ -1317,7 +1317,7 @@ void ff_mpeg4_merge_partitions(MpegEncContext *s)
const int tex_pb_len= put_bits_count(&s->tex_pb); const int tex_pb_len= put_bits_count(&s->tex_pb);
const int bits= put_bits_count(&s->pb); const int bits= put_bits_count(&s->pb);
if(s->pict_type==FF_I_TYPE){ if(s->pict_type==AV_PICTURE_TYPE_I){
put_bits(&s->pb, 19, DC_MARKER); put_bits(&s->pb, 19, DC_MARKER);
s->misc_bits+=19 + pb2_len + bits - s->last_bits; s->misc_bits+=19 + pb2_len + bits - s->last_bits;
s->i_tex_bits+= tex_pb_len; s->i_tex_bits+= tex_pb_len;

@ -313,8 +313,8 @@ int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared){
/* It might be nicer if the application would keep track of these /* It might be nicer if the application would keep track of these
* but it would require an API change. */ * but it would require an API change. */
memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1); memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1);
s->prev_pict_types[0]= s->dropable ? FF_B_TYPE : s->pict_type; s->prev_pict_types[0]= s->dropable ? AV_PICTURE_TYPE_B : s->pict_type;
if(pic->age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->age] == FF_B_TYPE) if(pic->age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->age] == AV_PICTURE_TYPE_B)
pic->age= INT_MAX; // Skipped MBs in B-frames are quite rare in MPEG-1/2 and it is a bit tricky to skip them anyway. pic->age= INT_MAX; // Skipped MBs in B-frames are quite rare in MPEG-1/2 and it is a bit tricky to skip them anyway.
pic->owner2 = s; pic->owner2 = s;
@ -527,7 +527,7 @@ int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src
s->last_pict_type= s1->pict_type; s->last_pict_type= s1->pict_type;
if (s1->current_picture_ptr) s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->quality; if (s1->current_picture_ptr) s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->quality;
if(s1->pict_type!=FF_B_TYPE){ if(s1->pict_type!=AV_PICTURE_TYPE_B){
s->last_non_b_pict_type= s1->pict_type; s->last_non_b_pict_type= s1->pict_type;
} }
} }
@ -1023,7 +1023,7 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3); assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
/* mark&release old frames */ /* mark&release old frames */
if (s->pict_type != FF_B_TYPE && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->data[0]) { if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->data[0]) {
if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){ if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){
free_frame_buffer(s, s->last_picture_ptr); free_frame_buffer(s, s->last_picture_ptr);
@ -1054,7 +1054,7 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
if (!s->dropable){ if (!s->dropable){
if (s->codec_id == CODEC_ID_H264) if (s->codec_id == CODEC_ID_H264)
pic->reference = s->picture_structure; pic->reference = s->picture_structure;
else if (s->pict_type != FF_B_TYPE) else if (s->pict_type != AV_PICTURE_TYPE_B)
pic->reference = 3; pic->reference = 3;
} }
@ -1077,11 +1077,11 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
s->current_picture_ptr->pict_type= s->pict_type; s->current_picture_ptr->pict_type= s->pict_type;
// if(s->flags && CODEC_FLAG_QSCALE) // if(s->flags && CODEC_FLAG_QSCALE)
// s->current_picture_ptr->quality= s->new_picture_ptr->quality; // s->current_picture_ptr->quality= s->new_picture_ptr->quality;
s->current_picture_ptr->key_frame= s->pict_type == FF_I_TYPE; s->current_picture_ptr->key_frame= s->pict_type == AV_PICTURE_TYPE_I;
ff_copy_picture(&s->current_picture, s->current_picture_ptr); ff_copy_picture(&s->current_picture, s->current_picture_ptr);
if (s->pict_type != FF_B_TYPE) { if (s->pict_type != AV_PICTURE_TYPE_B) {
s->last_picture_ptr= s->next_picture_ptr; s->last_picture_ptr= s->next_picture_ptr;
if(!s->dropable) if(!s->dropable)
s->next_picture_ptr= s->current_picture_ptr; s->next_picture_ptr= s->current_picture_ptr;
@ -1093,7 +1093,7 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
s->pict_type, s->dropable);*/ s->pict_type, s->dropable);*/
if(s->codec_id != CODEC_ID_H264){ if(s->codec_id != CODEC_ID_H264){
if((s->last_picture_ptr==NULL || s->last_picture_ptr->data[0]==NULL) && s->pict_type!=FF_I_TYPE){ if((s->last_picture_ptr==NULL || s->last_picture_ptr->data[0]==NULL) && s->pict_type!=AV_PICTURE_TYPE_I){
av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n"); av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n");
/* Allocate a dummy frame */ /* Allocate a dummy frame */
i= ff_find_unused_picture(s, 0); i= ff_find_unused_picture(s, 0);
@ -1103,7 +1103,7 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 0); ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 0);
ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 1); ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 1);
} }
if((s->next_picture_ptr==NULL || s->next_picture_ptr->data[0]==NULL) && s->pict_type==FF_B_TYPE){ if((s->next_picture_ptr==NULL || s->next_picture_ptr->data[0]==NULL) && s->pict_type==AV_PICTURE_TYPE_B){
/* Allocate a dummy frame */ /* Allocate a dummy frame */
i= ff_find_unused_picture(s, 0); i= ff_find_unused_picture(s, 0);
s->next_picture_ptr= &s->picture[i]; s->next_picture_ptr= &s->picture[i];
@ -1117,7 +1117,7 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
if(s->last_picture_ptr) ff_copy_picture(&s->last_picture, s->last_picture_ptr); if(s->last_picture_ptr) ff_copy_picture(&s->last_picture, s->last_picture_ptr);
if(s->next_picture_ptr) ff_copy_picture(&s->next_picture, s->next_picture_ptr); if(s->next_picture_ptr) ff_copy_picture(&s->next_picture, s->next_picture_ptr);
assert(s->pict_type == FF_I_TYPE || (s->last_picture_ptr && s->last_picture_ptr->data[0])); assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr && s->last_picture_ptr->data[0]));
if(s->picture_structure!=PICT_FRAME && s->out_format != FMT_H264){ if(s->picture_structure!=PICT_FRAME && s->out_format != FMT_H264){
int i; int i;
@ -1185,7 +1185,7 @@ void MPV_frame_end(MpegEncContext *s)
s->last_pict_type = s->pict_type; s->last_pict_type = s->pict_type;
s->last_lambda_for[s->pict_type]= s->current_picture_ptr->quality; s->last_lambda_for[s->pict_type]= s->current_picture_ptr->quality;
if(s->pict_type!=FF_B_TYPE){ if(s->pict_type!=AV_PICTURE_TYPE_B){
s->last_non_b_pict_type= s->pict_type; s->last_non_b_pict_type= s->pict_type;
} }
#if 0 #if 0
@ -1314,12 +1314,12 @@ void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){
av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: "); av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
switch (pict->pict_type) { switch (pict->pict_type) {
case FF_I_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"I\n"); break; case AV_PICTURE_TYPE_I: av_log(s->avctx,AV_LOG_DEBUG,"I\n"); break;
case FF_P_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"P\n"); break; case AV_PICTURE_TYPE_P: av_log(s->avctx,AV_LOG_DEBUG,"P\n"); break;
case FF_B_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"B\n"); break; case AV_PICTURE_TYPE_B: av_log(s->avctx,AV_LOG_DEBUG,"B\n"); break;
case FF_S_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"S\n"); break; case AV_PICTURE_TYPE_S: av_log(s->avctx,AV_LOG_DEBUG,"S\n"); break;
case FF_SI_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"SI\n"); break; case AV_PICTURE_TYPE_SI: av_log(s->avctx,AV_LOG_DEBUG,"SI\n"); break;
case FF_SP_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"SP\n"); break; case AV_PICTURE_TYPE_SP: av_log(s->avctx,AV_LOG_DEBUG,"SP\n"); break;
} }
for(y=0; y<s->mb_height; y++){ for(y=0; y<s->mb_height; y++){
for(x=0; x<s->mb_width; x++){ for(x=0; x<s->mb_width; x++){
@ -1415,15 +1415,15 @@ void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){
for(type=0; type<3; type++){ for(type=0; type<3; type++){
int direction = 0; int direction = 0;
switch (type) { switch (type) {
case 0: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_P_FOR)) || (pict->pict_type!=FF_P_TYPE)) case 0: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_P_FOR)) || (pict->pict_type!=AV_PICTURE_TYPE_P))
continue; continue;
direction = 0; direction = 0;
break; break;
case 1: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_FOR)) || (pict->pict_type!=FF_B_TYPE)) case 1: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_FOR)) || (pict->pict_type!=AV_PICTURE_TYPE_B))
continue; continue;
direction = 0; direction = 0;
break; break;
case 2: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_BACK)) || (pict->pict_type!=FF_B_TYPE)) case 2: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_BACK)) || (pict->pict_type!=AV_PICTURE_TYPE_B))
continue; continue;
direction = 1; direction = 1;
break; break;
@ -1819,7 +1819,7 @@ static inline void MPV_motion_lowres(MpegEncContext *s,
ref_picture, pix_op, ref_picture, pix_op,
s->mv[dir][1][0], s->mv[dir][1][1], block_s, mb_y); s->mv[dir][1][0], s->mv[dir][1][1], block_s, mb_y);
} else { } else {
if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != FF_B_TYPE && !s->first_field){ if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != AV_PICTURE_TYPE_B && !s->first_field){
ref_picture= s->current_picture_ptr->data; ref_picture= s->current_picture_ptr->data;
} }
@ -1833,7 +1833,7 @@ static inline void MPV_motion_lowres(MpegEncContext *s,
for(i=0; i<2; i++){ for(i=0; i<2; i++){
uint8_t ** ref2picture; uint8_t ** ref2picture;
if(s->picture_structure == s->field_select[dir][i] + 1 || s->pict_type == FF_B_TYPE || s->first_field){ if(s->picture_structure == s->field_select[dir][i] + 1 || s->pict_type == AV_PICTURE_TYPE_B || s->first_field){
ref2picture= ref_picture; ref2picture= ref_picture;
}else{ }else{
ref2picture= s->current_picture_ptr->data; ref2picture= s->current_picture_ptr->data;
@ -2029,14 +2029,14 @@ void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
else if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
s->mbintra_table[mb_xy]=1; s->mbintra_table[mb_xy]=1;
if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==FF_B_TYPE) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
uint8_t *dest_y, *dest_cb, *dest_cr; uint8_t *dest_y, *dest_cb, *dest_cr;
int dct_linesize, dct_offset; int dct_linesize, dct_offset;
op_pixels_func (*op_pix)[4]; op_pixels_func (*op_pix)[4];
qpel_mc_func (*op_qpix)[16]; qpel_mc_func (*op_qpix)[16];
const int linesize= s->current_picture.linesize[0]; //not s->linesize as this would be wrong for field pics const int linesize= s->current_picture.linesize[0]; //not s->linesize as this would be wrong for field pics
const int uvlinesize= s->current_picture.linesize[1]; const int uvlinesize= s->current_picture.linesize[1];
const int readable= s->pict_type != FF_B_TYPE || s->encoding || s->avctx->draw_horiz_band || lowres_flag; const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8; const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
/* avoid copy if macroblock skipped in last frame too */ /* avoid copy if macroblock skipped in last frame too */
@ -2049,7 +2049,7 @@ void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
if (s->mb_skipped) { if (s->mb_skipped) {
s->mb_skipped= 0; s->mb_skipped= 0;
assert(s->pict_type!=FF_I_TYPE); assert(s->pict_type!=AV_PICTURE_TYPE_I);
(*mbskip_ptr) ++; /* indicate that this time we skipped it */ (*mbskip_ptr) ++; /* indicate that this time we skipped it */
if(*mbskip_ptr >99) *mbskip_ptr= 99; if(*mbskip_ptr >99) *mbskip_ptr= 99;
@ -2105,7 +2105,7 @@ void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
} }
}else{ }else{
op_qpix= s->me.qpel_put; op_qpix= s->me.qpel_put;
if ((!s->no_rounding) || s->pict_type==FF_B_TYPE){ if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
op_pix = s->dsp.put_pixels_tab; op_pix = s->dsp.put_pixels_tab;
}else{ }else{
op_pix = s->dsp.put_no_rnd_pixels_tab; op_pix = s->dsp.put_no_rnd_pixels_tab;
@ -2123,8 +2123,8 @@ void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
/* skip dequant / idct if we are really late ;) */ /* skip dequant / idct if we are really late ;) */
if(s->avctx->skip_idct){ if(s->avctx->skip_idct){
if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == FF_B_TYPE) if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != FF_I_TYPE) ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
|| s->avctx->skip_idct >= AVDISCARD_ALL) || s->avctx->skip_idct >= AVDISCARD_ALL)
goto skip_idct; goto skip_idct;
} }
@ -2287,14 +2287,14 @@ void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
AVFrame *src; AVFrame *src;
int offset[4]; int offset[4];
if(s->pict_type==FF_B_TYPE || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER)) if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
src= (AVFrame*)s->current_picture_ptr; src= (AVFrame*)s->current_picture_ptr;
else if(s->last_picture_ptr) else if(s->last_picture_ptr)
src= (AVFrame*)s->last_picture_ptr; src= (AVFrame*)s->last_picture_ptr;
else else
return; return;
if(s->pict_type==FF_B_TYPE && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){ if(s->pict_type==AV_PICTURE_TYPE_B && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
offset[0]= offset[0]=
offset[1]= offset[1]=
offset[2]= offset[2]=
@ -2330,7 +2330,7 @@ void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
s->dest[1] = s->current_picture.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift)); s->dest[1] = s->current_picture.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
s->dest[2] = s->current_picture.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift)); s->dest[2] = s->current_picture.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
if(!(s->pict_type==FF_B_TYPE && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME)) if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
{ {
if(s->picture_structure==PICT_FRAME){ if(s->picture_structure==PICT_FRAME){
s->dest[0] += s->mb_y * linesize << mb_size; s->dest[0] += s->mb_y * linesize << mb_size;
@ -2609,6 +2609,6 @@ void ff_set_qscale(MpegEncContext * s, int qscale)
void MPV_report_decode_progress(MpegEncContext *s) void MPV_report_decode_progress(MpegEncContext *s)
{ {
if (s->pict_type != FF_B_TYPE && !s->partitioned_frame) if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame)
ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_y, 0); ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_y, 0);
} }

@ -330,7 +330,7 @@ typedef struct MpegEncContext {
int adaptive_quant; ///< use adaptive quantization int adaptive_quant; ///< use adaptive quantization
int dquant; ///< qscale difference to prev qscale int dquant; ///< qscale difference to prev qscale
int closed_gop; ///< MPEG1/2 GOP is closed int closed_gop; ///< MPEG1/2 GOP is closed
int pict_type; ///< FF_I_TYPE, FF_P_TYPE, FF_B_TYPE, ... int pict_type; ///< AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
int last_pict_type; //FIXME removes int last_pict_type; //FIXME removes
int last_non_b_pict_type; ///< used for mpeg4 gmc b-frames & ratecontrol int last_non_b_pict_type; ///< used for mpeg4 gmc b-frames & ratecontrol
int dropable; int dropable;

@ -649,7 +649,7 @@ static av_always_inline void MPV_motion_internal(MpegEncContext *s,
prefetch_motion(s, ref_picture, dir); prefetch_motion(s, ref_picture, dir);
if(!is_mpeg12 && s->obmc && s->pict_type != FF_B_TYPE){ if(!is_mpeg12 && s->obmc && s->pict_type != AV_PICTURE_TYPE_B){
int16_t mv_cache[4][4][2]; int16_t mv_cache[4][4][2];
const int xy= s->mb_x + s->mb_y*s->mb_stride; const int xy= s->mb_x + s->mb_y*s->mb_stride;
const int mot_stride= s->b8_stride; const int mot_stride= s->b8_stride;
@ -816,7 +816,7 @@ static av_always_inline void MPV_motion_internal(MpegEncContext *s,
s->mv[dir][1][0], s->mv[dir][1][1], 8, mb_y); s->mv[dir][1][0], s->mv[dir][1][1], 8, mb_y);
} }
} else { } else {
if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != FF_B_TYPE && !s->first_field){ if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != AV_PICTURE_TYPE_B && !s->first_field){
ref_picture= s->current_picture_ptr->data; ref_picture= s->current_picture_ptr->data;
} }
@ -831,7 +831,7 @@ static av_always_inline void MPV_motion_internal(MpegEncContext *s,
uint8_t ** ref2picture; uint8_t ** ref2picture;
if(s->picture_structure == s->field_select[dir][i] + 1 if(s->picture_structure == s->field_select[dir][i] + 1
|| s->pict_type == FF_B_TYPE || s->first_field){ || s->pict_type == AV_PICTURE_TYPE_B || s->first_field){
ref2picture= ref_picture; ref2picture= ref_picture;
}else{ }else{
ref2picture= s->current_picture_ptr->data; ref2picture= s->current_picture_ptr->data;

@ -968,8 +968,8 @@ static int estimate_best_b_count(MpegEncContext *s){
assert(scale>=0 && scale <=3); assert(scale>=0 && scale <=3);
// emms_c(); // emms_c();
p_lambda= s->last_lambda_for[FF_P_TYPE]; //s->next_picture_ptr->quality; p_lambda= s->last_lambda_for[AV_PICTURE_TYPE_P]; //s->next_picture_ptr->quality;
b_lambda= s->last_lambda_for[FF_B_TYPE]; //p_lambda *FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset; b_lambda= s->last_lambda_for[AV_PICTURE_TYPE_B]; //p_lambda *FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
if(!b_lambda) b_lambda= p_lambda; //FIXME we should do this somewhere else if(!b_lambda) b_lambda= p_lambda; //FIXME we should do this somewhere else
lambda2= (b_lambda*b_lambda + (1<<FF_LAMBDA_SHIFT)/2 ) >> FF_LAMBDA_SHIFT; lambda2= (b_lambda*b_lambda + (1<<FF_LAMBDA_SHIFT)/2 ) >> FF_LAMBDA_SHIFT;
@ -1024,7 +1024,7 @@ static int estimate_best_b_count(MpegEncContext *s){
c->error[0]= c->error[1]= c->error[2]= 0; c->error[0]= c->error[1]= c->error[2]= 0;
input[0].pict_type= FF_I_TYPE; input[0].pict_type= AV_PICTURE_TYPE_I;
input[0].quality= 1 * FF_QP2LAMBDA; input[0].quality= 1 * FF_QP2LAMBDA;
out_size = avcodec_encode_video(c, outbuf, outbuf_size, &input[0]); out_size = avcodec_encode_video(c, outbuf, outbuf_size, &input[0]);
// rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT; // rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
@ -1032,7 +1032,7 @@ static int estimate_best_b_count(MpegEncContext *s){
for(i=0; i<s->max_b_frames+1; i++){ for(i=0; i<s->max_b_frames+1; i++){
int is_p= i % (j+1) == j || i==s->max_b_frames; int is_p= i % (j+1) == j || i==s->max_b_frames;
input[i+1].pict_type= is_p ? FF_P_TYPE : FF_B_TYPE; input[i+1].pict_type= is_p ? AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
input[i+1].quality= is_p ? p_lambda : b_lambda; input[i+1].quality= is_p ? p_lambda : b_lambda;
out_size = avcodec_encode_video(c, outbuf, outbuf_size, &input[i+1]); out_size = avcodec_encode_video(c, outbuf, outbuf_size, &input[i+1]);
rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3); rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
@ -1074,7 +1074,7 @@ static int select_input_picture(MpegEncContext *s){
if(s->reordered_input_picture[0]==NULL && s->input_picture[0]){ if(s->reordered_input_picture[0]==NULL && s->input_picture[0]){
if(/*s->picture_in_gop_number >= s->gop_size ||*/ s->next_picture_ptr==NULL || s->intra_only){ if(/*s->picture_in_gop_number >= s->gop_size ||*/ s->next_picture_ptr==NULL || s->intra_only){
s->reordered_input_picture[0]= s->input_picture[0]; s->reordered_input_picture[0]= s->input_picture[0];
s->reordered_input_picture[0]->pict_type= FF_I_TYPE; s->reordered_input_picture[0]->pict_type= AV_PICTURE_TYPE_I;
s->reordered_input_picture[0]->coded_picture_number= s->coded_picture_number++; s->reordered_input_picture[0]->coded_picture_number= s->coded_picture_number++;
}else{ }else{
int b_frames; int b_frames;
@ -1109,7 +1109,7 @@ static int select_input_picture(MpegEncContext *s){
if(pict_num >= s->rc_context.num_entries) if(pict_num >= s->rc_context.num_entries)
break; break;
if(!s->input_picture[i]){ if(!s->input_picture[i]){
s->rc_context.entry[pict_num-1].new_pict_type = FF_P_TYPE; s->rc_context.entry[pict_num-1].new_pict_type = AV_PICTURE_TYPE_P;
break; break;
} }
@ -1153,10 +1153,10 @@ static int select_input_picture(MpegEncContext *s){
for(i= b_frames - 1; i>=0; i--){ for(i= b_frames - 1; i>=0; i--){
int type= s->input_picture[i]->pict_type; int type= s->input_picture[i]->pict_type;
if(type && type != FF_B_TYPE) if(type && type != AV_PICTURE_TYPE_B)
b_frames= i; b_frames= i;
} }
if(s->input_picture[b_frames]->pict_type == FF_B_TYPE && b_frames == s->max_b_frames){ if(s->input_picture[b_frames]->pict_type == AV_PICTURE_TYPE_B && b_frames == s->max_b_frames){
av_log(s->avctx, AV_LOG_ERROR, "warning, too many b frames in a row\n"); av_log(s->avctx, AV_LOG_ERROR, "warning, too many b frames in a row\n");
} }
@ -1166,29 +1166,29 @@ static int select_input_picture(MpegEncContext *s){
}else{ }else{
if(s->flags & CODEC_FLAG_CLOSED_GOP) if(s->flags & CODEC_FLAG_CLOSED_GOP)
b_frames=0; b_frames=0;
s->input_picture[b_frames]->pict_type= FF_I_TYPE; s->input_picture[b_frames]->pict_type= AV_PICTURE_TYPE_I;
} }
} }
if( (s->flags & CODEC_FLAG_CLOSED_GOP) if( (s->flags & CODEC_FLAG_CLOSED_GOP)
&& b_frames && b_frames
&& s->input_picture[b_frames]->pict_type== FF_I_TYPE) && s->input_picture[b_frames]->pict_type== AV_PICTURE_TYPE_I)
b_frames--; b_frames--;
s->reordered_input_picture[0]= s->input_picture[b_frames]; s->reordered_input_picture[0]= s->input_picture[b_frames];
if(s->reordered_input_picture[0]->pict_type != FF_I_TYPE) if(s->reordered_input_picture[0]->pict_type != AV_PICTURE_TYPE_I)
s->reordered_input_picture[0]->pict_type= FF_P_TYPE; s->reordered_input_picture[0]->pict_type= AV_PICTURE_TYPE_P;
s->reordered_input_picture[0]->coded_picture_number= s->coded_picture_number++; s->reordered_input_picture[0]->coded_picture_number= s->coded_picture_number++;
for(i=0; i<b_frames; i++){ for(i=0; i<b_frames; i++){
s->reordered_input_picture[i+1]= s->input_picture[i]; s->reordered_input_picture[i+1]= s->input_picture[i];
s->reordered_input_picture[i+1]->pict_type= FF_B_TYPE; s->reordered_input_picture[i+1]->pict_type= AV_PICTURE_TYPE_B;
s->reordered_input_picture[i+1]->coded_picture_number= s->coded_picture_number++; s->reordered_input_picture[i+1]->coded_picture_number= s->coded_picture_number++;
} }
} }
} }
no_output_pic: no_output_pic:
if(s->reordered_input_picture[0]){ if(s->reordered_input_picture[0]){
s->reordered_input_picture[0]->reference= s->reordered_input_picture[0]->pict_type!=FF_B_TYPE ? 3 : 0; s->reordered_input_picture[0]->reference= s->reordered_input_picture[0]->pict_type!=AV_PICTURE_TYPE_B ? 3 : 0;
ff_copy_picture(&s->new_picture, s->reordered_input_picture[0]); ff_copy_picture(&s->new_picture, s->reordered_input_picture[0]);
@ -1296,11 +1296,11 @@ vbv_retry:
s->lambda_table[i]= FFMAX(s->lambda_table[i]+1, s->lambda_table[i]*(s->qscale+1) / s->qscale); s->lambda_table[i]= FFMAX(s->lambda_table[i]+1, s->lambda_table[i]*(s->qscale+1) / s->qscale);
} }
s->mb_skipped = 0; //done in MPV_frame_start() s->mb_skipped = 0; //done in MPV_frame_start()
if(s->pict_type==FF_P_TYPE){ //done in encode_picture() so we must undo it if(s->pict_type==AV_PICTURE_TYPE_P){ //done in encode_picture() so we must undo it
if(s->flipflop_rounding || s->codec_id == CODEC_ID_H263P || s->codec_id == CODEC_ID_MPEG4) if(s->flipflop_rounding || s->codec_id == CODEC_ID_H263P || s->codec_id == CODEC_ID_MPEG4)
s->no_rounding ^= 1; s->no_rounding ^= 1;
} }
if(s->pict_type!=FF_B_TYPE){ if(s->pict_type!=AV_PICTURE_TYPE_B){
s->time_base= s->last_time_base; s->time_base= s->last_time_base;
s->last_non_b_time= s->time - s->pp_time; s->last_non_b_time= s->time - s->pp_time;
} }
@ -1528,7 +1528,7 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s, int motion_x,
if(s->codec_id==CODEC_ID_MPEG4){ if(s->codec_id==CODEC_ID_MPEG4){
if(!s->mb_intra){ if(!s->mb_intra){
if(s->pict_type == FF_B_TYPE){ if(s->pict_type == AV_PICTURE_TYPE_B){
if(s->dquant&1 || s->mv_dir&MV_DIRECT) if(s->dquant&1 || s->mv_dir&MV_DIRECT)
s->dquant= 0; s->dquant= 0;
} }
@ -1605,7 +1605,7 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s, int motion_x,
dest_cb = s->dest[1]; dest_cb = s->dest[1];
dest_cr = s->dest[2]; dest_cr = s->dest[2];
if ((!s->no_rounding) || s->pict_type==FF_B_TYPE){ if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
op_pix = s->dsp.put_pixels_tab; op_pix = s->dsp.put_pixels_tab;
op_qpix= s->dsp.put_qpel_pixels_tab; op_qpix= s->dsp.put_qpel_pixels_tab;
}else{ }else{
@ -1995,7 +1995,7 @@ static int estimate_motion_thread(AVCodecContext *c, void *arg){
s->block_index[3]+=2; s->block_index[3]+=2;
/* compute motion vector & mb_type and store in context */ /* compute motion vector & mb_type and store in context */
if(s->pict_type==FF_B_TYPE) if(s->pict_type==AV_PICTURE_TYPE_B)
ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y); ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
else else
ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y); ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
@ -2393,7 +2393,7 @@ static int encode_thread(AVCodecContext *c, void *arg){
s->mv[1][0][0] = best_s.mv[1][0][0]; s->mv[1][0][0] = best_s.mv[1][0][0];
s->mv[1][0][1] = best_s.mv[1][0][1]; s->mv[1][0][1] = best_s.mv[1][0][1];
qpi = s->pict_type == FF_B_TYPE ? 2 : 0; qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
for(; qpi<4; qpi++){ for(; qpi<4; qpi++){
int dquant= dquant_tab[qpi]; int dquant= dquant_tab[qpi];
qp= last_qp + dquant; qp= last_qp + dquant;
@ -2495,7 +2495,7 @@ static int encode_thread(AVCodecContext *c, void *arg){
s->last_bits= put_bits_count(&s->pb); s->last_bits= put_bits_count(&s->pb);
if (CONFIG_H263_ENCODER && if (CONFIG_H263_ENCODER &&
s->out_format == FMT_H263 && s->pict_type!=FF_B_TYPE) s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
ff_h263_update_motion_val(s); ff_h263_update_motion_val(s);
if(next_block==0){ //FIXME 16 vs linesize16 if(next_block==0){ //FIXME 16 vs linesize16
@ -2622,7 +2622,7 @@ static int encode_thread(AVCodecContext *c, void *arg){
s->last_mv_dir = s->mv_dir; s->last_mv_dir = s->mv_dir;
if (CONFIG_H263_ENCODER && if (CONFIG_H263_ENCODER &&
s->out_format == FMT_H263 && s->pict_type!=FF_B_TYPE) s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
ff_h263_update_motion_val(s); ff_h263_update_motion_val(s);
MPV_decode_mb(s, s->block); MPV_decode_mb(s, s->block);
@ -2660,7 +2660,7 @@ static int encode_thread(AVCodecContext *c, void *arg){
} }
//not beautiful here but we must write it before flushing so it has to be here //not beautiful here but we must write it before flushing so it has to be here
if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == FF_I_TYPE) if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
msmpeg4_encode_ext_header(s); msmpeg4_encode_ext_header(s);
write_slice_end(s); write_slice_end(s);
@ -2758,7 +2758,7 @@ static void set_frame_distances(MpegEncContext * s){
assert(s->current_picture_ptr->pts != AV_NOPTS_VALUE); assert(s->current_picture_ptr->pts != AV_NOPTS_VALUE);
s->time= s->current_picture_ptr->pts*s->avctx->time_base.num; s->time= s->current_picture_ptr->pts*s->avctx->time_base.num;
if(s->pict_type==FF_B_TYPE){ if(s->pict_type==AV_PICTURE_TYPE_B){
s->pb_time= s->pp_time - (s->last_non_b_time - s->time); s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
assert(s->pb_time > 0 && s->pb_time < s->pp_time); assert(s->pb_time > 0 && s->pb_time < s->pp_time);
}else{ }else{
@ -2791,10 +2791,10 @@ static int encode_picture(MpegEncContext *s, int picture_number)
// s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
if(s->pict_type==FF_I_TYPE){ if(s->pict_type==AV_PICTURE_TYPE_I){
if(s->msmpeg4_version >= 3) s->no_rounding=1; if(s->msmpeg4_version >= 3) s->no_rounding=1;
else s->no_rounding=0; else s->no_rounding=0;
}else if(s->pict_type!=FF_B_TYPE){ }else if(s->pict_type!=AV_PICTURE_TYPE_B){
if(s->flipflop_rounding || s->codec_id == CODEC_ID_H263P || s->codec_id == CODEC_ID_MPEG4) if(s->flipflop_rounding || s->codec_id == CODEC_ID_H263P || s->codec_id == CODEC_ID_MPEG4)
s->no_rounding ^= 1; s->no_rounding ^= 1;
} }
@ -2804,7 +2804,7 @@ static int encode_picture(MpegEncContext *s, int picture_number)
return -1; return -1;
ff_get_2pass_fcode(s); ff_get_2pass_fcode(s);
}else if(!(s->flags & CODEC_FLAG_QSCALE)){ }else if(!(s->flags & CODEC_FLAG_QSCALE)){
if(s->pict_type==FF_B_TYPE) if(s->pict_type==AV_PICTURE_TYPE_B)
s->lambda= s->last_lambda_for[s->pict_type]; s->lambda= s->last_lambda_for[s->pict_type];
else else
s->lambda= s->last_lambda_for[s->last_non_b_pict_type]; s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
@ -2820,17 +2820,17 @@ static int encode_picture(MpegEncContext *s, int picture_number)
return -1; return -1;
/* Estimate motion for every MB */ /* Estimate motion for every MB */
if(s->pict_type != FF_I_TYPE){ if(s->pict_type != AV_PICTURE_TYPE_I){
s->lambda = (s->lambda * s->avctx->me_penalty_compensation + 128)>>8; s->lambda = (s->lambda * s->avctx->me_penalty_compensation + 128)>>8;
s->lambda2= (s->lambda2* (int64_t)s->avctx->me_penalty_compensation + 128)>>8; s->lambda2= (s->lambda2* (int64_t)s->avctx->me_penalty_compensation + 128)>>8;
if(s->pict_type != FF_B_TYPE && s->avctx->me_threshold==0){ if(s->pict_type != AV_PICTURE_TYPE_B && s->avctx->me_threshold==0){
if((s->avctx->pre_me && s->last_non_b_pict_type==FF_I_TYPE) || s->avctx->pre_me==2){ if((s->avctx->pre_me && s->last_non_b_pict_type==AV_PICTURE_TYPE_I) || s->avctx->pre_me==2){
s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*)); s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
} }
} }
s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*)); s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
}else /* if(s->pict_type == FF_I_TYPE) */{ }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
/* I-Frame */ /* I-Frame */
for(i=0; i<s->mb_stride*s->mb_height; i++) for(i=0; i<s->mb_stride*s->mb_height; i++)
s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA; s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
@ -2847,15 +2847,15 @@ static int encode_picture(MpegEncContext *s, int picture_number)
s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp; s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
emms_c(); emms_c();
if(s->me.scene_change_score > s->avctx->scenechange_threshold && s->pict_type == FF_P_TYPE){ if(s->me.scene_change_score > s->avctx->scenechange_threshold && s->pict_type == AV_PICTURE_TYPE_P){
s->pict_type= FF_I_TYPE; s->pict_type= AV_PICTURE_TYPE_I;
for(i=0; i<s->mb_stride*s->mb_height; i++) for(i=0; i<s->mb_stride*s->mb_height; i++)
s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA; s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
//printf("Scene change detected, encoding as I Frame %d %d\n", s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum); //printf("Scene change detected, encoding as I Frame %d %d\n", s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
} }
if(!s->umvplus){ if(!s->umvplus){
if(s->pict_type==FF_P_TYPE || s->pict_type==FF_S_TYPE) { if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER); s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
if(s->flags & CODEC_FLAG_INTERLACED_ME){ if(s->flags & CODEC_FLAG_INTERLACED_ME){
@ -2877,7 +2877,7 @@ static int encode_picture(MpegEncContext *s, int picture_number)
} }
} }
if(s->pict_type==FF_B_TYPE){ if(s->pict_type==AV_PICTURE_TYPE_B){
int a, b; int a, b;
a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD); a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
@ -2911,7 +2911,7 @@ static int encode_picture(MpegEncContext *s, int picture_number)
if (estimate_qp(s, 0) < 0) if (estimate_qp(s, 0) < 0)
return -1; return -1;
if(s->qscale < 3 && s->max_qcoeff<=128 && s->pict_type==FF_I_TYPE && !(s->flags & CODEC_FLAG_QSCALE)) if(s->qscale < 3 && s->max_qcoeff<=128 && s->pict_type==AV_PICTURE_TYPE_I && !(s->flags & CODEC_FLAG_QSCALE))
s->qscale= 3; //reduce clipping problems s->qscale= 3; //reduce clipping problems
if (s->out_format == FMT_MJPEG) { if (s->out_format == FMT_MJPEG) {
@ -2931,7 +2931,7 @@ static int encode_picture(MpegEncContext *s, int picture_number)
//FIXME var duplication //FIXME var duplication
s->current_picture_ptr->key_frame= s->current_picture_ptr->key_frame=
s->current_picture.key_frame= s->pict_type == FF_I_TYPE; //FIXME pic_ptr s->current_picture.key_frame= s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
s->current_picture_ptr->pict_type= s->current_picture_ptr->pict_type=
s->current_picture.pict_type= s->pict_type; s->current_picture.pict_type= s->pict_type;

@ -110,9 +110,9 @@ int ff_xvmc_field_start(MpegEncContext *s, AVCodecContext *avctx)
render->p_past_surface = NULL; render->p_past_surface = NULL;
switch(s->pict_type) { switch(s->pict_type) {
case FF_I_TYPE: case AV_PICTURE_TYPE_I:
return 0; // no prediction from other frames return 0; // no prediction from other frames
case FF_B_TYPE: case AV_PICTURE_TYPE_B:
next = (struct xvmc_pix_fmt*)s->next_picture.data[2]; next = (struct xvmc_pix_fmt*)s->next_picture.data[2];
if (!next) if (!next)
return -1; return -1;
@ -120,7 +120,7 @@ int ff_xvmc_field_start(MpegEncContext *s, AVCodecContext *avctx)
return -1; return -1;
render->p_future_surface = next->p_surface; render->p_future_surface = next->p_surface;
// no return here, going to set forward prediction // no return here, going to set forward prediction
case FF_P_TYPE: case AV_PICTURE_TYPE_P:
last = (struct xvmc_pix_fmt*)s->last_picture.data[2]; last = (struct xvmc_pix_fmt*)s->last_picture.data[2];
if (!last) if (!last)
last = render; // predict second field from the first last = render; // predict second field from the first

@ -308,7 +308,7 @@ static void find_best_tables(MpegEncContext * s)
int intra_luma_count = s->ac_stats[1][0][level][run][last]; int intra_luma_count = s->ac_stats[1][0][level][run][last];
int intra_chroma_count= s->ac_stats[1][1][level][run][last]; int intra_chroma_count= s->ac_stats[1][1][level][run][last];
if(s->pict_type==FF_I_TYPE){ if(s->pict_type==AV_PICTURE_TYPE_I){
size += intra_luma_count *rl_length[i ][level][run][last]; size += intra_luma_count *rl_length[i ][level][run][last];
chroma_size+= intra_chroma_count*rl_length[i+3][level][run][last]; chroma_size+= intra_chroma_count*rl_length[i+3][level][run][last];
}else{ }else{
@ -333,7 +333,7 @@ static void find_best_tables(MpegEncContext * s)
// printf("type:%d, best:%d, qp:%d, var:%d, mcvar:%d, size:%d //\n", // printf("type:%d, best:%d, qp:%d, var:%d, mcvar:%d, size:%d //\n",
// s->pict_type, best, s->qscale, s->mb_var_sum, s->mc_mb_var_sum, best_size); // s->pict_type, best, s->qscale, s->mb_var_sum, s->mc_mb_var_sum, best_size);
if(s->pict_type==FF_P_TYPE) chroma_best= best; if(s->pict_type==AV_PICTURE_TYPE_P) chroma_best= best;
memset(s->ac_stats, 0, sizeof(int)*(MAX_LEVEL+1)*(MAX_RUN+1)*2*2*2); memset(s->ac_stats, 0, sizeof(int)*(MAX_LEVEL+1)*(MAX_RUN+1)*2*2*2);
@ -342,7 +342,7 @@ static void find_best_tables(MpegEncContext * s)
if(s->pict_type != s->last_non_b_pict_type){ if(s->pict_type != s->last_non_b_pict_type){
s->rl_table_index= 2; s->rl_table_index= 2;
if(s->pict_type==FF_I_TYPE) if(s->pict_type==AV_PICTURE_TYPE_I)
s->rl_chroma_table_index= 1; s->rl_chroma_table_index= 1;
else else
s->rl_chroma_table_index= 2; s->rl_chroma_table_index= 2;
@ -369,10 +369,10 @@ void msmpeg4_encode_picture_header(MpegEncContext * s, int picture_number)
s->use_skip_mb_code = 1; /* only if P frame */ s->use_skip_mb_code = 1; /* only if P frame */
s->per_mb_rl_table = 0; s->per_mb_rl_table = 0;
if(s->msmpeg4_version==4) if(s->msmpeg4_version==4)
s->inter_intra_pred= (s->width*s->height < 320*240 && s->bit_rate<=II_BITRATE && s->pict_type==FF_P_TYPE); s->inter_intra_pred= (s->width*s->height < 320*240 && s->bit_rate<=II_BITRATE && s->pict_type==AV_PICTURE_TYPE_P);
//printf("%d %d %d %d %d\n", s->pict_type, s->bit_rate, s->inter_intra_pred, s->width, s->height); //printf("%d %d %d %d %d\n", s->pict_type, s->bit_rate, s->inter_intra_pred, s->width, s->height);
if (s->pict_type == FF_I_TYPE) { if (s->pict_type == AV_PICTURE_TYPE_I) {
s->slice_height= s->mb_height/1; s->slice_height= s->mb_height/1;
put_bits(&s->pb, 5, 0x16 + s->mb_height/s->slice_height); put_bits(&s->pb, 5, 0x16 + s->mb_height/s->slice_height);
@ -619,7 +619,7 @@ void msmpeg4_encode_mb(MpegEncContext * s,
} }
if(s->msmpeg4_version<=2){ if(s->msmpeg4_version<=2){
if (s->pict_type == FF_I_TYPE) { if (s->pict_type == AV_PICTURE_TYPE_I) {
put_bits(&s->pb, put_bits(&s->pb,
v2_intra_cbpc[cbp&3][1], v2_intra_cbpc[cbp&3][0]); v2_intra_cbpc[cbp&3][1], v2_intra_cbpc[cbp&3][0]);
} else { } else {
@ -634,7 +634,7 @@ void msmpeg4_encode_mb(MpegEncContext * s,
ff_h263_cbpy_tab[cbp>>2][1], ff_h263_cbpy_tab[cbp>>2][1],
ff_h263_cbpy_tab[cbp>>2][0]); ff_h263_cbpy_tab[cbp>>2][0]);
}else{ }else{
if (s->pict_type == FF_I_TYPE) { if (s->pict_type == AV_PICTURE_TYPE_I) {
put_bits(&s->pb, put_bits(&s->pb,
ff_msmp4_mb_i_table[coded_cbp][1], ff_msmp4_mb_i_table[coded_cbp][0]); ff_msmp4_mb_i_table[coded_cbp][1], ff_msmp4_mb_i_table[coded_cbp][0]);
} else { } else {
@ -1094,7 +1094,7 @@ static int msmpeg4v12_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
{ {
int cbp, code, i; int cbp, code, i;
if (s->pict_type == FF_P_TYPE) { if (s->pict_type == AV_PICTURE_TYPE_P) {
if (s->use_skip_mb_code) { if (s->use_skip_mb_code) {
if (get_bits1(&s->gb)) { if (get_bits1(&s->gb)) {
/* skip mb */ /* skip mb */
@ -1161,7 +1161,7 @@ static int msmpeg4v12_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
} else{ } else{
s->ac_pred = 0; s->ac_pred = 0;
cbp|= get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1)<<2; //FIXME check errors cbp|= get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1)<<2; //FIXME check errors
if(s->pict_type==FF_P_TYPE) cbp^=0x3C; if(s->pict_type==AV_PICTURE_TYPE_P) cbp^=0x3C;
} }
} }
@ -1182,7 +1182,7 @@ static int msmpeg4v34_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
uint8_t *coded_val; uint8_t *coded_val;
uint32_t * const mb_type_ptr= &s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride ]; uint32_t * const mb_type_ptr= &s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride ];
if (s->pict_type == FF_P_TYPE) { if (s->pict_type == AV_PICTURE_TYPE_P) {
if (s->use_skip_mb_code) { if (s->use_skip_mb_code) {
if (get_bits1(&s->gb)) { if (get_bits1(&s->gb)) {
/* skip mb */ /* skip mb */
@ -1390,15 +1390,15 @@ int msmpeg4_decode_picture_header(MpegEncContext * s)
} }
s->pict_type = get_bits(&s->gb, 2) + 1; s->pict_type = get_bits(&s->gb, 2) + 1;
if (s->pict_type != FF_I_TYPE && if (s->pict_type != AV_PICTURE_TYPE_I &&
s->pict_type != FF_P_TYPE){ s->pict_type != AV_PICTURE_TYPE_P){
av_log(s->avctx, AV_LOG_ERROR, "invalid picture type\n"); av_log(s->avctx, AV_LOG_ERROR, "invalid picture type\n");
return -1; return -1;
} }
#if 0 #if 0
{ {
static int had_i=0; static int had_i=0;
if(s->pict_type == FF_I_TYPE) had_i=1; if(s->pict_type == AV_PICTURE_TYPE_I) had_i=1;
if(!had_i) return -1; if(!had_i) return -1;
} }
#endif #endif
@ -1408,7 +1408,7 @@ int msmpeg4_decode_picture_header(MpegEncContext * s)
return -1; return -1;
} }
if (s->pict_type == FF_I_TYPE) { if (s->pict_type == AV_PICTURE_TYPE_I) {
code = get_bits(&s->gb, 5); code = get_bits(&s->gb, 5);
if(s->msmpeg4_version==1){ if(s->msmpeg4_version==1){
if(code==0 || code>s->mb_height){ if(code==0 || code>s->mb_height){

@ -256,11 +256,11 @@ static int mxpeg_decode_frame(AVCodecContext *avctx,
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
} }
jpg->picture_ptr->pict_type = FF_P_TYPE; jpg->picture_ptr->pict_type = AV_PICTURE_TYPE_P;
jpg->picture_ptr->key_frame = 0; jpg->picture_ptr->key_frame = 0;
jpg->got_picture = 1; jpg->got_picture = 1;
} else { } else {
jpg->picture_ptr->pict_type = FF_I_TYPE; jpg->picture_ptr->pict_type = AV_PICTURE_TYPE_I;
jpg->picture_ptr->key_frame = 1; jpg->picture_ptr->key_frame = 1;
} }

@ -208,7 +208,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size,
return -1; return -1;
} }
c->pic.pict_type = keyframe ? FF_I_TYPE : FF_P_TYPE; c->pic.pict_type = keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
c->pic.key_frame = keyframe; c->pic.key_frame = keyframe;
// decompress/copy/whatever data // decompress/copy/whatever data
switch (comptype) { switch (comptype) {

@ -40,7 +40,7 @@ static int pam_encode_frame(AVCodecContext *avctx, unsigned char *outbuf,
} }
*p = *pict; *p = *pict;
p->pict_type = FF_I_TYPE; p->pict_type = AV_PICTURE_TYPE_I;
p->key_frame = 1; p->key_frame = 1;
s->bytestream_start = s->bytestream_start =

@ -72,7 +72,7 @@ AVCodecParserContext *av_parser_init(int codec_id)
} }
} }
s->fetch_timestamp=1; s->fetch_timestamp=1;
s->pict_type = FF_I_TYPE; s->pict_type = AV_PICTURE_TYPE_I;
s->key_frame = -1; s->key_frame = -1;
s->convergence_duration = 0; s->convergence_duration = 0;
s->dts_sync_point = INT_MIN; s->dts_sync_point = INT_MIN;
@ -186,7 +186,7 @@ int av_parser_change(AVCodecParserContext *s,
*poutbuf_size= buf_size; *poutbuf_size= buf_size;
if(avctx->extradata){ if(avctx->extradata){
if( (keyframe && (avctx->flags2 & CODEC_FLAG2_LOCAL_HEADER)) if( (keyframe && (avctx->flags2 & CODEC_FLAG2_LOCAL_HEADER))
/*||(s->pict_type != FF_I_TYPE && (s->flags & PARSER_FLAG_DUMP_EXTRADATA_AT_NOKEY))*/ /*||(s->pict_type != AV_PICTURE_TYPE_I && (s->flags & PARSER_FLAG_DUMP_EXTRADATA_AT_NOKEY))*/
/*||(? && (s->flags & PARSER_FLAG_DUMP_EXTRADATA_AT_BEGIN)*/){ /*||(? && (s->flags & PARSER_FLAG_DUMP_EXTRADATA_AT_BEGIN)*/){
int size= buf_size + avctx->extradata_size; int size= buf_size + avctx->extradata_size;
*poutbuf_size= size; *poutbuf_size= size;

@ -152,7 +152,7 @@ static int pcx_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
return -1; return -1;
} }
p->pict_type = FF_I_TYPE; p->pict_type = AV_PICTURE_TYPE_I;
ptr = p->data[0]; ptr = p->data[0];
stride = p->linesize[0]; stride = p->linesize[0];

@ -108,7 +108,7 @@ static int pcx_encode_frame(AVCodecContext *avctx,
const uint8_t *src; const uint8_t *src;
*pict = *(AVFrame *)data; *pict = *(AVFrame *)data;
pict->pict_type = FF_I_TYPE; pict->pict_type = AV_PICTURE_TYPE_I;
pict->key_frame = 1; pict->key_frame = 1;
if (avctx->width > 65535 || avctx->height > 65535) { if (avctx->width > 65535 || avctx->height > 65535) {

@ -148,7 +148,7 @@ static int decode_frame(AVCodecContext *avctx,
return -1; return -1;
} }
memset(s->frame.data[0], 0, s->height * s->frame.linesize[0]); memset(s->frame.data[0], 0, s->height * s->frame.linesize[0]);
s->frame.pict_type = FF_I_TYPE; s->frame.pict_type = AV_PICTURE_TYPE_I;
s->frame.palette_has_changed = 1; s->frame.palette_has_changed = 1;
palette = (uint32_t*)s->frame.data[1]; palette = (uint32_t*)s->frame.data[1];

@ -485,7 +485,7 @@ static int decode_frame(AVCodecContext *avctx,
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
goto fail; goto fail;
} }
p->pict_type= FF_I_TYPE; p->pict_type= AV_PICTURE_TYPE_I;
p->key_frame= 1; p->key_frame= 1;
p->interlaced_frame = !!s->interlace_type; p->interlaced_frame = !!s->interlace_type;

@ -243,7 +243,7 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size,
uint8_t *top_buf = NULL; uint8_t *top_buf = NULL;
*p = *pict; *p = *pict;
p->pict_type= FF_I_TYPE; p->pict_type= AV_PICTURE_TYPE_I;
p->key_frame= 1; p->key_frame= 1;
s->bytestream_start= s->bytestream_start=

@ -52,7 +52,7 @@ static int pnm_decode_frame(AVCodecContext *avctx, void *data,
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1; return -1;
} }
p->pict_type = FF_I_TYPE; p->pict_type = AV_PICTURE_TYPE_I;
p->key_frame = 1; p->key_frame = 1;
switch (avctx->pix_fmt) { switch (avctx->pix_fmt) {

@ -39,7 +39,7 @@ static int pnm_encode_frame(AVCodecContext *avctx, unsigned char *outbuf,
} }
*p = *pict; *p = *pict;
p->pict_type = FF_I_TYPE; p->pict_type = AV_PICTURE_TYPE_I;
p->key_frame = 1; p->key_frame = 1;
s->bytestream_start = s->bytestream_start =

@ -74,7 +74,7 @@ static int ptx_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
return -1; return -1;
} }
p->pict_type = FF_I_TYPE; p->pict_type = AV_PICTURE_TYPE_I;
ptr = p->data[0]; ptr = p->data[0];
stride = p->linesize[0]; stride = p->linesize[0];

@ -54,7 +54,7 @@ static int decode_frame(AVCodecContext *avctx,
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1; return -1;
} }
p->pict_type= FF_I_TYPE; p->pict_type= AV_PICTURE_TYPE_I;
p->key_frame= 1; p->key_frame= 1;
outdata = a->pic.data[0]; outdata = a->pic.data[0];

@ -295,11 +295,11 @@ static int qtrle_encode_frame(AVCodecContext *avctx, uint8_t *buf, int buf_size,
if (avctx->gop_size == 0 || (s->avctx->frame_number % avctx->gop_size) == 0) { if (avctx->gop_size == 0 || (s->avctx->frame_number % avctx->gop_size) == 0) {
/* I-Frame */ /* I-Frame */
p->pict_type = FF_I_TYPE; p->pict_type = AV_PICTURE_TYPE_I;
p->key_frame = 1; p->key_frame = 1;
} else { } else {
/* P-Frame */ /* P-Frame */
p->pict_type = FF_P_TYPE; p->pict_type = AV_PICTURE_TYPE_P;
p->key_frame = 0; p->key_frame = 0;
} }

@ -54,7 +54,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size,
if (avctx->get_buffer(avctx, pic) < 0) if (avctx->get_buffer(avctx, pic) < 0)
return -1; return -1;
pic->pict_type = FF_I_TYPE; pic->pict_type = AV_PICTURE_TYPE_I;
pic->key_frame = 1; pic->key_frame = 1;
dst_line = pic->data[0]; dst_line = pic->data[0];

@ -144,7 +144,7 @@ int ff_rate_control_init(MpegEncContext *s)
/* init all to skipped p frames (with b frames we might have a not encoded frame at the end FIXME) */ /* init all to skipped p frames (with b frames we might have a not encoded frame at the end FIXME) */
for(i=0; i<rcc->num_entries; i++){ for(i=0; i<rcc->num_entries; i++){
RateControlEntry *rce= &rcc->entry[i]; RateControlEntry *rce= &rcc->entry[i];
rce->pict_type= rce->new_pict_type=FF_P_TYPE; rce->pict_type= rce->new_pict_type=AV_PICTURE_TYPE_P;
rce->qscale= rce->new_qscale=FF_QP2LAMBDA * 2; rce->qscale= rce->new_qscale=FF_QP2LAMBDA * 2;
rce->misc_bits= s->mb_num + 10; rce->misc_bits= s->mb_num + 10;
rce->mb_var_sum= s->mb_num*100; rce->mb_var_sum= s->mb_num*100;
@ -211,9 +211,9 @@ int ff_rate_control_init(MpegEncContext *s)
double bits= s->avctx->rc_initial_cplx * (i/10000.0 + 1.0)*s->mb_num; double bits= s->avctx->rc_initial_cplx * (i/10000.0 + 1.0)*s->mb_num;
RateControlEntry rce; RateControlEntry rce;
if (i%((s->gop_size+3)/4)==0) rce.pict_type= FF_I_TYPE; if (i%((s->gop_size+3)/4)==0) rce.pict_type= AV_PICTURE_TYPE_I;
else if(i%(s->max_b_frames+1)) rce.pict_type= FF_B_TYPE; else if(i%(s->max_b_frames+1)) rce.pict_type= AV_PICTURE_TYPE_B;
else rce.pict_type= FF_P_TYPE; else rce.pict_type= AV_PICTURE_TYPE_P;
rce.new_pict_type= rce.pict_type; rce.new_pict_type= rce.pict_type;
rce.mc_mb_var_sum= bits*s->mb_num/100000; rce.mc_mb_var_sum= bits*s->mb_num/100000;
@ -223,7 +223,7 @@ int ff_rate_control_init(MpegEncContext *s)
rce.b_code = 1; rce.b_code = 1;
rce.misc_bits= 1; rce.misc_bits= 1;
if(s->pict_type== FF_I_TYPE){ if(s->pict_type== AV_PICTURE_TYPE_I){
rce.i_count = s->mb_num; rce.i_count = s->mb_num;
rce.i_tex_bits= bits; rce.i_tex_bits= bits;
rce.p_tex_bits= 0; rce.p_tex_bits= 0;
@ -317,23 +317,23 @@ static double get_qscale(MpegEncContext *s, RateControlEntry *rce, double rate_f
rce->p_tex_bits*rce->qscale, rce->p_tex_bits*rce->qscale,
(rce->i_tex_bits + rce->p_tex_bits)*(double)rce->qscale, (rce->i_tex_bits + rce->p_tex_bits)*(double)rce->qscale,
rce->mv_bits/mb_num, rce->mv_bits/mb_num,
rce->pict_type == FF_B_TYPE ? (rce->f_code + rce->b_code)*0.5 : rce->f_code, rce->pict_type == AV_PICTURE_TYPE_B ? (rce->f_code + rce->b_code)*0.5 : rce->f_code,
rce->i_count/mb_num, rce->i_count/mb_num,
rce->mc_mb_var_sum/mb_num, rce->mc_mb_var_sum/mb_num,
rce->mb_var_sum/mb_num, rce->mb_var_sum/mb_num,
rce->pict_type == FF_I_TYPE, rce->pict_type == AV_PICTURE_TYPE_I,
rce->pict_type == FF_P_TYPE, rce->pict_type == AV_PICTURE_TYPE_P,
rce->pict_type == FF_B_TYPE, rce->pict_type == AV_PICTURE_TYPE_B,
rcc->qscale_sum[pict_type] / (double)rcc->frame_count[pict_type], rcc->qscale_sum[pict_type] / (double)rcc->frame_count[pict_type],
a->qcompress, a->qcompress,
/* rcc->last_qscale_for[FF_I_TYPE], /* rcc->last_qscale_for[AV_PICTURE_TYPE_I],
rcc->last_qscale_for[FF_P_TYPE], rcc->last_qscale_for[AV_PICTURE_TYPE_P],
rcc->last_qscale_for[FF_B_TYPE], rcc->last_qscale_for[AV_PICTURE_TYPE_B],
rcc->next_non_b_qscale,*/ rcc->next_non_b_qscale,*/
rcc->i_cplx_sum[FF_I_TYPE] / (double)rcc->frame_count[FF_I_TYPE], rcc->i_cplx_sum[AV_PICTURE_TYPE_I] / (double)rcc->frame_count[AV_PICTURE_TYPE_I],
rcc->i_cplx_sum[FF_P_TYPE] / (double)rcc->frame_count[FF_P_TYPE], rcc->i_cplx_sum[AV_PICTURE_TYPE_P] / (double)rcc->frame_count[AV_PICTURE_TYPE_P],
rcc->p_cplx_sum[FF_P_TYPE] / (double)rcc->frame_count[FF_P_TYPE], rcc->p_cplx_sum[AV_PICTURE_TYPE_P] / (double)rcc->frame_count[AV_PICTURE_TYPE_P],
rcc->p_cplx_sum[FF_B_TYPE] / (double)rcc->frame_count[FF_B_TYPE], rcc->p_cplx_sum[AV_PICTURE_TYPE_B] / (double)rcc->frame_count[AV_PICTURE_TYPE_B],
(rcc->i_cplx_sum[pict_type] + rcc->p_cplx_sum[pict_type]) / (double)rcc->frame_count[pict_type], (rcc->i_cplx_sum[pict_type] + rcc->p_cplx_sum[pict_type]) / (double)rcc->frame_count[pict_type],
0 0
}; };
@ -364,9 +364,9 @@ static double get_qscale(MpegEncContext *s, RateControlEntry *rce, double rate_f
q= bits2qp(rce, bits); q= bits2qp(rce, bits);
/* I/B difference */ /* I/B difference */
if (pict_type==FF_I_TYPE && s->avctx->i_quant_factor<0.0) if (pict_type==AV_PICTURE_TYPE_I && s->avctx->i_quant_factor<0.0)
q= -q*s->avctx->i_quant_factor + s->avctx->i_quant_offset; q= -q*s->avctx->i_quant_factor + s->avctx->i_quant_offset;
else if(pict_type==FF_B_TYPE && s->avctx->b_quant_factor<0.0) else if(pict_type==AV_PICTURE_TYPE_B && s->avctx->b_quant_factor<0.0)
q= -q*s->avctx->b_quant_factor + s->avctx->b_quant_offset; q= -q*s->avctx->b_quant_factor + s->avctx->b_quant_offset;
if(q<1) q=1; if(q<1) q=1;
@ -377,17 +377,17 @@ static double get_diff_limited_q(MpegEncContext *s, RateControlEntry *rce, doubl
RateControlContext *rcc= &s->rc_context; RateControlContext *rcc= &s->rc_context;
AVCodecContext *a= s->avctx; AVCodecContext *a= s->avctx;
const int pict_type= rce->new_pict_type; const int pict_type= rce->new_pict_type;
const double last_p_q = rcc->last_qscale_for[FF_P_TYPE]; const double last_p_q = rcc->last_qscale_for[AV_PICTURE_TYPE_P];
const double last_non_b_q= rcc->last_qscale_for[rcc->last_non_b_pict_type]; const double last_non_b_q= rcc->last_qscale_for[rcc->last_non_b_pict_type];
if (pict_type==FF_I_TYPE && (a->i_quant_factor>0.0 || rcc->last_non_b_pict_type==FF_P_TYPE)) if (pict_type==AV_PICTURE_TYPE_I && (a->i_quant_factor>0.0 || rcc->last_non_b_pict_type==AV_PICTURE_TYPE_P))
q= last_p_q *FFABS(a->i_quant_factor) + a->i_quant_offset; q= last_p_q *FFABS(a->i_quant_factor) + a->i_quant_offset;
else if(pict_type==FF_B_TYPE && a->b_quant_factor>0.0) else if(pict_type==AV_PICTURE_TYPE_B && a->b_quant_factor>0.0)
q= last_non_b_q* a->b_quant_factor + a->b_quant_offset; q= last_non_b_q* a->b_quant_factor + a->b_quant_offset;
if(q<1) q=1; if(q<1) q=1;
/* last qscale / qdiff stuff */ /* last qscale / qdiff stuff */
if(rcc->last_non_b_pict_type==pict_type || pict_type!=FF_I_TYPE){ if(rcc->last_non_b_pict_type==pict_type || pict_type!=AV_PICTURE_TYPE_I){
double last_q= rcc->last_qscale_for[pict_type]; double last_q= rcc->last_qscale_for[pict_type];
const int maxdiff= FF_QP2LAMBDA * a->max_qdiff; const int maxdiff= FF_QP2LAMBDA * a->max_qdiff;
@ -397,7 +397,7 @@ static double get_diff_limited_q(MpegEncContext *s, RateControlEntry *rce, doubl
rcc->last_qscale_for[pict_type]= q; //Note we cannot do that after blurring rcc->last_qscale_for[pict_type]= q; //Note we cannot do that after blurring
if(pict_type!=FF_B_TYPE) if(pict_type!=AV_PICTURE_TYPE_B)
rcc->last_non_b_pict_type= pict_type; rcc->last_non_b_pict_type= pict_type;
return q; return q;
@ -412,10 +412,10 @@ static void get_qminmax(int *qmin_ret, int *qmax_ret, MpegEncContext *s, int pic
assert(qmin <= qmax); assert(qmin <= qmax);
if(pict_type==FF_B_TYPE){ if(pict_type==AV_PICTURE_TYPE_B){
qmin= (int)(qmin*FFABS(s->avctx->b_quant_factor)+s->avctx->b_quant_offset + 0.5); qmin= (int)(qmin*FFABS(s->avctx->b_quant_factor)+s->avctx->b_quant_offset + 0.5);
qmax= (int)(qmax*FFABS(s->avctx->b_quant_factor)+s->avctx->b_quant_offset + 0.5); qmax= (int)(qmax*FFABS(s->avctx->b_quant_factor)+s->avctx->b_quant_offset + 0.5);
}else if(pict_type==FF_I_TYPE){ }else if(pict_type==AV_PICTURE_TYPE_I){
qmin= (int)(qmin*FFABS(s->avctx->i_quant_factor)+s->avctx->i_quant_offset + 0.5); qmin= (int)(qmin*FFABS(s->avctx->i_quant_factor)+s->avctx->i_quant_offset + 0.5);
qmax= (int)(qmax*FFABS(s->avctx->i_quant_factor)+s->avctx->i_quant_offset + 0.5); qmax= (int)(qmax*FFABS(s->avctx->i_quant_factor)+s->avctx->i_quant_offset + 0.5);
} }
@ -441,7 +441,7 @@ static double modify_qscale(MpegEncContext *s, RateControlEntry *rce, double q,
get_qminmax(&qmin, &qmax, s, pict_type); get_qminmax(&qmin, &qmax, s, pict_type);
/* modulation */ /* modulation */
if(s->avctx->rc_qmod_freq && frame_num%s->avctx->rc_qmod_freq==0 && pict_type==FF_P_TYPE) if(s->avctx->rc_qmod_freq && frame_num%s->avctx->rc_qmod_freq==0 && pict_type==AV_PICTURE_TYPE_P)
q*= s->avctx->rc_qmod_amp; q*= s->avctx->rc_qmod_amp;
//printf("q:%f\n", q); //printf("q:%f\n", q);
@ -684,7 +684,7 @@ float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
//printf("input_pic_num:%d pic_num:%d frame_rate:%d\n", s->input_picture_number, s->picture_number, s->frame_rate); //printf("input_pic_num:%d pic_num:%d frame_rate:%d\n", s->input_picture_number, s->picture_number, s->frame_rate);
/* update predictors */ /* update predictors */
if(picture_number>2 && !dry_run){ if(picture_number>2 && !dry_run){
const int last_var= s->last_pict_type == FF_I_TYPE ? rcc->last_mb_var_sum : rcc->last_mc_mb_var_sum; const int last_var= s->last_pict_type == AV_PICTURE_TYPE_I ? rcc->last_mb_var_sum : rcc->last_mc_mb_var_sum;
update_predictor(&rcc->pred[s->last_pict_type], rcc->last_qscale, sqrt(last_var), s->frame_bits); update_predictor(&rcc->pred[s->last_pict_type], rcc->last_qscale, sqrt(last_var), s->frame_bits);
} }
@ -699,7 +699,7 @@ float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
//FIXME add a dts field to AVFrame and ensure its set and use it here instead of reordering //FIXME add a dts field to AVFrame and ensure its set and use it here instead of reordering
//but the reordering is simpler for now until h.264 b pyramid must be handeld //but the reordering is simpler for now until h.264 b pyramid must be handeld
if(s->pict_type == FF_B_TYPE || s->low_delay) if(s->pict_type == AV_PICTURE_TYPE_B || s->low_delay)
dts_pic= s->current_picture_ptr; dts_pic= s->current_picture_ptr;
else else
dts_pic= s->last_picture_ptr; dts_pic= s->last_picture_ptr;
@ -717,11 +717,11 @@ float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
br_compensation= (a->bit_rate_tolerance - diff)/a->bit_rate_tolerance; br_compensation= (a->bit_rate_tolerance - diff)/a->bit_rate_tolerance;
if(br_compensation<=0.0) br_compensation=0.001; if(br_compensation<=0.0) br_compensation=0.001;
var= pict_type == FF_I_TYPE ? pic->mb_var_sum : pic->mc_mb_var_sum; var= pict_type == AV_PICTURE_TYPE_I ? pic->mb_var_sum : pic->mc_mb_var_sum;
short_term_q = 0; /* avoid warning */ short_term_q = 0; /* avoid warning */
if(s->flags&CODEC_FLAG_PASS2){ if(s->flags&CODEC_FLAG_PASS2){
if(pict_type!=FF_I_TYPE) if(pict_type!=AV_PICTURE_TYPE_I)
assert(pict_type == rce->new_pict_type); assert(pict_type == rce->new_pict_type);
q= rce->new_qscale / br_compensation; q= rce->new_qscale / br_compensation;
@ -737,7 +737,7 @@ float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
rce->misc_bits= 1; rce->misc_bits= 1;
bits= predict_size(&rcc->pred[pict_type], rce->qscale, sqrt(var)); bits= predict_size(&rcc->pred[pict_type], rce->qscale, sqrt(var));
if(pict_type== FF_I_TYPE){ if(pict_type== AV_PICTURE_TYPE_I){
rce->i_count = s->mb_num; rce->i_count = s->mb_num;
rce->i_tex_bits= bits; rce->i_tex_bits= bits;
rce->p_tex_bits= 0; rce->p_tex_bits= 0;
@ -767,7 +767,7 @@ float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
//printf("%f ", q); //printf("%f ", q);
assert(q>0.0); assert(q>0.0);
if(pict_type==FF_P_TYPE || s->intra_only){ //FIXME type dependent blur like in 2-pass if(pict_type==AV_PICTURE_TYPE_P || s->intra_only){ //FIXME type dependent blur like in 2-pass
rcc->short_term_qsum*=a->qblur; rcc->short_term_qsum*=a->qblur;
rcc->short_term_qcount*=a->qblur; rcc->short_term_qcount*=a->qblur;
@ -842,7 +842,7 @@ static int init_pass2(MpegEncContext *s)
complexity[rce->new_pict_type]+= (rce->i_tex_bits+ rce->p_tex_bits)*(double)rce->qscale; complexity[rce->new_pict_type]+= (rce->i_tex_bits+ rce->p_tex_bits)*(double)rce->qscale;
const_bits[rce->new_pict_type]+= rce->mv_bits + rce->misc_bits; const_bits[rce->new_pict_type]+= rce->mv_bits + rce->misc_bits;
} }
all_const_bits= const_bits[FF_I_TYPE] + const_bits[FF_P_TYPE] + const_bits[FF_B_TYPE]; all_const_bits= const_bits[AV_PICTURE_TYPE_I] + const_bits[AV_PICTURE_TYPE_P] + const_bits[AV_PICTURE_TYPE_B];
if(all_available_bits < all_const_bits){ if(all_available_bits < all_const_bits){
av_log(s->avctx, AV_LOG_ERROR, "requested bitrate is too low\n"); av_log(s->avctx, AV_LOG_ERROR, "requested bitrate is too low\n");

@ -92,7 +92,7 @@ static av_cold int raw_init_decoder(AVCodecContext *avctx)
if (!context->buffer) if (!context->buffer)
return -1; return -1;
} }
context->pic.pict_type = FF_I_TYPE; context->pic.pict_type = AV_PICTURE_TYPE_I;
context->pic.key_frame = 1; context->pic.key_frame = 1;
avctx->coded_frame= &context->pic; avctx->coded_frame= &context->pic;

@ -32,7 +32,7 @@
static av_cold int raw_init_encoder(AVCodecContext *avctx) static av_cold int raw_init_encoder(AVCodecContext *avctx)
{ {
avctx->coded_frame = (AVFrame *)avctx->priv_data; avctx->coded_frame = (AVFrame *)avctx->priv_data;
avctx->coded_frame->pict_type = FF_I_TYPE; avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
avctx->coded_frame->key_frame = 1; avctx->coded_frame->key_frame = 1;
avctx->bits_per_coded_sample = av_get_bits_per_pixel(&av_pix_fmt_descriptors[avctx->pix_fmt]); avctx->bits_per_coded_sample = av_get_bits_per_pixel(&av_pix_fmt_descriptors[avctx->pix_fmt]);
if(!avctx->codec_tag) if(!avctx->codec_tag)

@ -240,9 +240,9 @@ static int rv10_decode_picture_header(MpegEncContext *s)
marker = get_bits1(&s->gb); marker = get_bits1(&s->gb);
if (get_bits1(&s->gb)) if (get_bits1(&s->gb))
s->pict_type = FF_P_TYPE; s->pict_type = AV_PICTURE_TYPE_P;
else else
s->pict_type = FF_I_TYPE; s->pict_type = AV_PICTURE_TYPE_I;
if(!marker) av_log(s->avctx, AV_LOG_ERROR, "marker missing\n"); if(!marker) av_log(s->avctx, AV_LOG_ERROR, "marker missing\n");
pb_frame = get_bits1(&s->gb); pb_frame = get_bits1(&s->gb);
@ -259,7 +259,7 @@ static int rv10_decode_picture_header(MpegEncContext *s)
return -1; return -1;
} }
if (s->pict_type == FF_I_TYPE) { if (s->pict_type == AV_PICTURE_TYPE_I) {
if (s->rv10_version == 3) { if (s->rv10_version == 3) {
/* specific MPEG like DC coding not used */ /* specific MPEG like DC coding not used */
s->last_dc[0] = get_bits(&s->gb, 8); s->last_dc[0] = get_bits(&s->gb, 8);
@ -319,16 +319,16 @@ static int rv20_decode_picture_header(MpegEncContext *s)
i= get_bits(&s->gb, 2); i= get_bits(&s->gb, 2);
switch(i){ switch(i){
case 0: s->pict_type= FF_I_TYPE; break; case 0: s->pict_type= AV_PICTURE_TYPE_I; break;
case 1: s->pict_type= FF_I_TYPE; break; //hmm ... case 1: s->pict_type= AV_PICTURE_TYPE_I; break; //hmm ...
case 2: s->pict_type= FF_P_TYPE; break; case 2: s->pict_type= AV_PICTURE_TYPE_P; break;
case 3: s->pict_type= FF_B_TYPE; break; case 3: s->pict_type= AV_PICTURE_TYPE_B; break;
default: default:
av_log(s->avctx, AV_LOG_ERROR, "unknown frame type\n"); av_log(s->avctx, AV_LOG_ERROR, "unknown frame type\n");
return -1; return -1;
} }
if(s->last_picture_ptr==NULL && s->pict_type==FF_B_TYPE){ if(s->last_picture_ptr==NULL && s->pict_type==AV_PICTURE_TYPE_B){
av_log(s->avctx, AV_LOG_ERROR, "early B pix\n"); av_log(s->avctx, AV_LOG_ERROR, "early B pix\n");
return -1; return -1;
} }
@ -399,7 +399,7 @@ static int rv20_decode_picture_header(MpegEncContext *s)
if(seq - s->time > 0x4000) seq -= 0x8000; if(seq - s->time > 0x4000) seq -= 0x8000;
if(seq - s->time < -0x4000) seq += 0x8000; if(seq - s->time < -0x4000) seq += 0x8000;
if(seq != s->time){ if(seq != s->time){
if(s->pict_type!=FF_B_TYPE){ if(s->pict_type!=AV_PICTURE_TYPE_B){
s->time= seq; s->time= seq;
s->pp_time= s->time - s->last_non_b_time; s->pp_time= s->time - s->last_non_b_time;
s->last_non_b_time= s->time; s->last_non_b_time= s->time;
@ -422,7 +422,7 @@ av_log(s->avctx, AV_LOG_DEBUG, "\n");*/
s->f_code = 1; s->f_code = 1;
s->unrestricted_mv = 1; s->unrestricted_mv = 1;
s->h263_aic= s->pict_type == FF_I_TYPE; s->h263_aic= s->pict_type == AV_PICTURE_TYPE_I;
// s->alt_inter_vlc=1; // s->alt_inter_vlc=1;
// s->obmc=1; // s->obmc=1;
// s->umvplus=1; // s->umvplus=1;
@ -435,7 +435,7 @@ av_log(s->avctx, AV_LOG_DEBUG, "\n");*/
seq, s->mb_x, s->mb_y, s->pict_type, s->qscale, s->no_rounding); seq, s->mb_x, s->mb_y, s->pict_type, s->qscale, s->no_rounding);
} }
assert(s->pict_type != FF_B_TYPE || !s->low_delay); assert(s->pict_type != AV_PICTURE_TYPE_B || !s->low_delay);
return s->mb_width*s->mb_height - mb_pos; return s->mb_width*s->mb_height - mb_pos;
} }
@ -616,7 +616,7 @@ static int rv10_decode_packet(AVCodecContext *avctx,
av_log(s->avctx, AV_LOG_ERROR, "ERROR at MB %d %d\n", s->mb_x, s->mb_y); av_log(s->avctx, AV_LOG_ERROR, "ERROR at MB %d %d\n", s->mb_x, s->mb_y);
return -1; return -1;
} }
if(s->pict_type != FF_B_TYPE) if(s->pict_type != AV_PICTURE_TYPE_B)
ff_h263_update_motion_val(s); ff_h263_update_motion_val(s);
MPV_decode_mb(s, s->block); MPV_decode_mb(s, s->block);
if(s->loop_filter) if(s->loop_filter)
@ -693,7 +693,7 @@ static int rv10_decode_frame(AVCodecContext *avctx,
ff_er_frame_end(s); ff_er_frame_end(s);
MPV_frame_end(s); MPV_frame_end(s);
if (s->pict_type == FF_B_TYPE || s->low_delay) { if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
*pict= *(AVFrame*)s->current_picture_ptr; *pict= *(AVFrame*)s->current_picture_ptr;
} else if (s->last_picture_ptr != NULL) { } else if (s->last_picture_ptr != NULL) {
*pict= *(AVFrame*)s->last_picture_ptr; *pict= *(AVFrame*)s->last_picture_ptr;

@ -36,13 +36,13 @@ void rv10_encode_picture_header(MpegEncContext *s, int picture_number)
put_bits(&s->pb, 1, 1); /* marker */ put_bits(&s->pb, 1, 1); /* marker */
put_bits(&s->pb, 1, (s->pict_type == FF_P_TYPE)); put_bits(&s->pb, 1, (s->pict_type == AV_PICTURE_TYPE_P));
put_bits(&s->pb, 1, 0); /* not PB frame */ put_bits(&s->pb, 1, 0); /* not PB frame */
put_bits(&s->pb, 5, s->qscale); put_bits(&s->pb, 5, s->qscale);
if (s->pict_type == FF_I_TYPE) { if (s->pict_type == AV_PICTURE_TYPE_I) {
/* specific MPEG like DC coding not used */ /* specific MPEG like DC coding not used */
} }
/* if multiple packets per frame are sent, the position at which /* if multiple packets per frame are sent, the position at which

@ -47,7 +47,7 @@ void rv20_encode_picture_header(MpegEncContext *s, int picture_number){
assert(s->modified_quant==1); assert(s->modified_quant==1);
assert(s->loop_filter==1); assert(s->loop_filter==1);
s->h263_aic= s->pict_type == FF_I_TYPE; s->h263_aic= s->pict_type == AV_PICTURE_TYPE_I;
if(s->h263_aic){ if(s->h263_aic){
s->y_dc_scale_table= s->y_dc_scale_table=
s->c_dc_scale_table= ff_aic_dc_scale_table; s->c_dc_scale_table= ff_aic_dc_scale_table;

@ -111,7 +111,7 @@ static int rv30_decode_mb_info(RV34DecContext *r)
av_log(s->avctx, AV_LOG_ERROR, "dquant needed\n"); av_log(s->avctx, AV_LOG_ERROR, "dquant needed\n");
code -= 6; code -= 6;
} }
if(s->pict_type != FF_B_TYPE) if(s->pict_type != AV_PICTURE_TYPE_B)
return rv30_p_types[code]; return rv30_p_types[code];
else else
return rv30_b_types[code]; return rv30_b_types[code];

@ -816,7 +816,7 @@ static int rv34_decode_mv(RV34DecContext *r, int block_type)
ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride); ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
return 0; return 0;
case RV34_MB_SKIP: case RV34_MB_SKIP:
if(s->pict_type == FF_P_TYPE){ if(s->pict_type == AV_PICTURE_TYPE_P){
ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride); ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, 0); rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, 0);
break; break;
@ -1055,9 +1055,9 @@ static int rv34_decode_mb_header(RV34DecContext *r, int8_t *intra_types)
s->current_picture_ptr->mb_type[mb_pos] = rv34_mb_type_to_lavc[r->block_type]; s->current_picture_ptr->mb_type[mb_pos] = rv34_mb_type_to_lavc[r->block_type];
r->mb_type[mb_pos] = r->block_type; r->mb_type[mb_pos] = r->block_type;
if(r->block_type == RV34_MB_SKIP){ if(r->block_type == RV34_MB_SKIP){
if(s->pict_type == FF_P_TYPE) if(s->pict_type == AV_PICTURE_TYPE_P)
r->mb_type[mb_pos] = RV34_MB_P_16x16; r->mb_type[mb_pos] = RV34_MB_P_16x16;
if(s->pict_type == FF_B_TYPE) if(s->pict_type == AV_PICTURE_TYPE_B)
r->mb_type[mb_pos] = RV34_MB_B_DIRECT; r->mb_type[mb_pos] = RV34_MB_B_DIRECT;
} }
r->is16 = !!IS_INTRA16x16(s->current_picture_ptr->mb_type[mb_pos]); r->is16 = !!IS_INTRA16x16(s->current_picture_ptr->mb_type[mb_pos]);
@ -1197,7 +1197,7 @@ static int rv34_decode_macroblock(RV34DecContext *r, int8_t *intra_types)
cbp = cbp2 = rv34_decode_mb_header(r, intra_types); cbp = cbp2 = rv34_decode_mb_header(r, intra_types);
r->cbp_luma [mb_pos] = cbp; r->cbp_luma [mb_pos] = cbp;
r->cbp_chroma[mb_pos] = cbp >> 16; r->cbp_chroma[mb_pos] = cbp >> 16;
if(s->pict_type == FF_I_TYPE) if(s->pict_type == AV_PICTURE_TYPE_I)
r->deblock_coefs[mb_pos] = 0xFFFF; r->deblock_coefs[mb_pos] = 0xFFFF;
else else
r->deblock_coefs[mb_pos] = rv34_set_deblock_coef(r) | r->cbp_luma[mb_pos]; r->deblock_coefs[mb_pos] = rv34_set_deblock_coef(r) | r->cbp_luma[mb_pos];
@ -1298,12 +1298,12 @@ static int rv34_decode_slice(RV34DecContext *r, int end, const uint8_t* buf, int
r->cbp_chroma = av_realloc(r->cbp_chroma, r->s.mb_stride * r->s.mb_height * sizeof(*r->cbp_chroma)); r->cbp_chroma = av_realloc(r->cbp_chroma, r->s.mb_stride * r->s.mb_height * sizeof(*r->cbp_chroma));
r->deblock_coefs = av_realloc(r->deblock_coefs, r->s.mb_stride * r->s.mb_height * sizeof(*r->deblock_coefs)); r->deblock_coefs = av_realloc(r->deblock_coefs, r->s.mb_stride * r->s.mb_height * sizeof(*r->deblock_coefs));
} }
s->pict_type = r->si.type ? r->si.type : FF_I_TYPE; s->pict_type = r->si.type ? r->si.type : AV_PICTURE_TYPE_I;
if(MPV_frame_start(s, s->avctx) < 0) if(MPV_frame_start(s, s->avctx) < 0)
return -1; return -1;
ff_er_frame_start(s); ff_er_frame_start(s);
r->cur_pts = r->si.pts; r->cur_pts = r->si.pts;
if(s->pict_type != FF_B_TYPE){ if(s->pict_type != AV_PICTURE_TYPE_B){
r->last_pts = r->next_pts; r->last_pts = r->next_pts;
r->next_pts = r->cur_pts; r->next_pts = r->cur_pts;
} }
@ -1452,10 +1452,10 @@ int ff_rv34_decode_frame(AVCodecContext *avctx,
av_log(avctx, AV_LOG_ERROR, "First slice header is incorrect\n"); av_log(avctx, AV_LOG_ERROR, "First slice header is incorrect\n");
return -1; return -1;
} }
if((!s->last_picture_ptr || !s->last_picture_ptr->data[0]) && si.type == FF_B_TYPE) if((!s->last_picture_ptr || !s->last_picture_ptr->data[0]) && si.type == AV_PICTURE_TYPE_B)
return -1; return -1;
if( (avctx->skip_frame >= AVDISCARD_NONREF && si.type==FF_B_TYPE) if( (avctx->skip_frame >= AVDISCARD_NONREF && si.type==AV_PICTURE_TYPE_B)
|| (avctx->skip_frame >= AVDISCARD_NONKEY && si.type!=FF_I_TYPE) || (avctx->skip_frame >= AVDISCARD_NONKEY && si.type!=AV_PICTURE_TYPE_I)
|| avctx->skip_frame >= AVDISCARD_ALL) || avctx->skip_frame >= AVDISCARD_ALL)
return buf_size; return buf_size;
@ -1494,7 +1494,7 @@ int ff_rv34_decode_frame(AVCodecContext *avctx,
r->loop_filter(r, s->mb_height - 1); r->loop_filter(r, s->mb_height - 1);
ff_er_frame_end(s); ff_er_frame_end(s);
MPV_frame_end(s); MPV_frame_end(s);
if (s->pict_type == FF_B_TYPE || s->low_delay) { if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
*pict= *(AVFrame*)s->current_picture_ptr; *pict= *(AVFrame*)s->current_picture_ptr;
} else if (s->last_picture_ptr != NULL) { } else if (s->last_picture_ptr != NULL) {
*pict= *(AVFrame*)s->last_picture_ptr; *pict= *(AVFrame*)s->last_picture_ptr;

@ -253,7 +253,7 @@ static int rv40_decode_mb_info(RV34DecContext *r)
prev_type = i; prev_type = i;
} }
} }
if(s->pict_type == FF_P_TYPE){ if(s->pict_type == AV_PICTURE_TYPE_P){
prev_type = block_num_to_ptype_vlc_num[prev_type]; prev_type = block_num_to_ptype_vlc_num[prev_type];
q = get_vlc2(gb, ptype_vlc[prev_type].table, PTYPE_VLC_BITS, 1); q = get_vlc2(gb, ptype_vlc[prev_type].table, PTYPE_VLC_BITS, 1);
if(q < PBTYPE_ESCAPE) if(q < PBTYPE_ESCAPE)

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save