Replace AVFrame pointer casts by proper struct member accesses.

pull/3/merge
Diego Biurrun 13 years ago
parent 562b6c744a
commit 47c0ac96aa
  1. 10
      libavcodec/cavsdec.c
  2. 6
      libavcodec/error_resilience.c
  3. 35
      libavcodec/h264.c
  4. 4
      libavcodec/h264_direct.c
  5. 2
      libavcodec/ljpegenc.c
  6. 7
      libavcodec/mpeg4videodec.c
  7. 42
      libavcodec/mpegvideo.c
  8. 12
      libavcodec/mpegvideo_enc.c

@ -500,9 +500,9 @@ static int decode_pic(AVSContext *h) {
} }
/* release last B frame */ /* release last B frame */
if(h->picture.f.data[0]) if(h->picture.f.data[0])
s->avctx->release_buffer(s->avctx, (AVFrame *)&h->picture); s->avctx->release_buffer(s->avctx, &h->picture.f);
s->avctx->get_buffer(s->avctx, (AVFrame *)&h->picture); s->avctx->get_buffer(s->avctx, &h->picture.f);
ff_cavs_init_pic(h); ff_cavs_init_pic(h);
h->picture.poc = get_bits(&s->gb,8)*2; h->picture.poc = get_bits(&s->gb,8)*2;
@ -591,7 +591,7 @@ static int decode_pic(AVSContext *h) {
} }
if(h->pic_type != AV_PICTURE_TYPE_B) { if(h->pic_type != AV_PICTURE_TYPE_B) {
if(h->DPB[1].f.data[0]) if(h->DPB[1].f.data[0])
s->avctx->release_buffer(s->avctx, (AVFrame *)&h->DPB[1]); s->avctx->release_buffer(s->avctx, &h->DPB[1].f);
h->DPB[1] = h->DPB[0]; h->DPB[1] = h->DPB[0];
h->DPB[0] = h->picture; h->DPB[0] = h->picture;
memset(&h->picture,0,sizeof(Picture)); memset(&h->picture,0,sizeof(Picture));
@ -675,9 +675,9 @@ static int cavs_decode_frame(AVCodecContext * avctx,void *data, int *data_size,
case PIC_I_START_CODE: case PIC_I_START_CODE:
if(!h->got_keyframe) { if(!h->got_keyframe) {
if(h->DPB[0].f.data[0]) if(h->DPB[0].f.data[0])
avctx->release_buffer(avctx, (AVFrame *)&h->DPB[0]); avctx->release_buffer(avctx, &h->DPB[0].f);
if(h->DPB[1].f.data[0]) if(h->DPB[1].f.data[0])
avctx->release_buffer(avctx, (AVFrame *)&h->DPB[1]); avctx->release_buffer(avctx, &h->DPB[1].f);
h->got_keyframe = 1; h->got_keyframe = 1;
} }
case PIC_PB_START_CODE: case PIC_PB_START_CODE:

@ -592,7 +592,7 @@ skip_mean_and_median:
if (s->avctx->codec_id == CODEC_ID_H264) { if (s->avctx->codec_id == CODEC_ID_H264) {
// FIXME // FIXME
} else { } else {
ff_thread_await_progress((AVFrame *) s->last_picture_ptr, ff_thread_await_progress(&s->last_picture_ptr->f,
mb_y, 0); mb_y, 0);
} }
if (!s->last_picture.f.motion_val[0] || if (!s->last_picture.f.motion_val[0] ||
@ -763,7 +763,7 @@ static int is_intra_more_likely(MpegEncContext *s)
if (s->avctx->codec_id == CODEC_ID_H264) { if (s->avctx->codec_id == CODEC_ID_H264) {
// FIXME // FIXME
} else { } else {
ff_thread_await_progress((AVFrame *) s->last_picture_ptr, ff_thread_await_progress(&s->last_picture_ptr->f,
mb_y, 0); mb_y, 0);
} }
is_intra_likely += s->dsp.sad[0](NULL, last_mb_ptr, mb_ptr, is_intra_likely += s->dsp.sad[0](NULL, last_mb_ptr, mb_ptr,
@ -1144,7 +1144,7 @@ void ff_er_frame_end(MpegEncContext *s)
if (s->avctx->codec_id == CODEC_ID_H264) { if (s->avctx->codec_id == CODEC_ID_H264) {
// FIXME // FIXME
} else { } else {
ff_thread_await_progress((AVFrame *) s->next_picture_ptr, mb_y, 0); ff_thread_await_progress(&s->next_picture_ptr->f, mb_y, 0);
} }
s->mv[0][0][0] = s->next_picture.f.motion_val[0][xy][0] * time_pb / time_pp; s->mv[0][0][0] = s->next_picture.f.motion_val[0][xy][0] * time_pb / time_pp;
s->mv[0][0][1] = s->next_picture.f.motion_val[0][xy][1] * time_pb / time_pp; s->mv[0][0][1] = s->next_picture.f.motion_val[0][xy][1] * time_pb / time_pp;

@ -361,14 +361,26 @@ static void await_references(H264Context *h){
nrefs[list]--; nrefs[list]--;
if(!FIELD_PICTURE && ref_field_picture){ // frame referencing two fields if(!FIELD_PICTURE && ref_field_picture){ // frame referencing two fields
ff_thread_await_progress((AVFrame*)ref_pic, FFMIN((row >> 1) - !(row&1), pic_height-1), 1); ff_thread_await_progress(&ref_pic->f,
ff_thread_await_progress((AVFrame*)ref_pic, FFMIN((row >> 1) , pic_height-1), 0); FFMIN((row >> 1) - !(row & 1),
pic_height - 1),
1);
ff_thread_await_progress(&ref_pic->f,
FFMIN((row >> 1), pic_height - 1),
0);
}else if(FIELD_PICTURE && !ref_field_picture){ // field referencing one field of a frame }else if(FIELD_PICTURE && !ref_field_picture){ // field referencing one field of a frame
ff_thread_await_progress((AVFrame*)ref_pic, FFMIN(row*2 + ref_field , pic_height-1), 0); ff_thread_await_progress(&ref_pic->f,
FFMIN(row * 2 + ref_field,
pic_height - 1),
0);
}else if(FIELD_PICTURE){ }else if(FIELD_PICTURE){
ff_thread_await_progress((AVFrame*)ref_pic, FFMIN(row, pic_height-1), ref_field); ff_thread_await_progress(&ref_pic->f,
FFMIN(row, pic_height - 1),
ref_field);
}else{ }else{
ff_thread_await_progress((AVFrame*)ref_pic, FFMIN(row, pic_height-1), 0); ff_thread_await_progress(&ref_pic->f,
FFMIN(row, pic_height - 1),
0);
} }
} }
} }
@ -2522,8 +2534,9 @@ static int field_end(H264Context *h, int in_setup){
s->mb_y= 0; s->mb_y= 0;
if (!in_setup && !s->dropable) if (!in_setup && !s->dropable)
ff_thread_report_progress((AVFrame*)s->current_picture_ptr, (16*s->mb_height >> FIELD_PICTURE) - 1, ff_thread_report_progress(&s->current_picture_ptr->f,
s->picture_structure==PICT_BOTTOM_FIELD); (16 * s->mb_height >> FIELD_PICTURE) - 1,
s->picture_structure == PICT_BOTTOM_FIELD);
if (CONFIG_H264_VDPAU_DECODER && s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU) if (CONFIG_H264_VDPAU_DECODER && s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
ff_vdpau_h264_set_reference_frames(s); ff_vdpau_h264_set_reference_frames(s);
@ -2893,8 +2906,8 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
h->prev_frame_num++; h->prev_frame_num++;
h->prev_frame_num %= 1<<h->sps.log2_max_frame_num; h->prev_frame_num %= 1<<h->sps.log2_max_frame_num;
s->current_picture_ptr->frame_num= h->prev_frame_num; s->current_picture_ptr->frame_num= h->prev_frame_num;
ff_thread_report_progress((AVFrame*)s->current_picture_ptr, INT_MAX, 0); ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, 0);
ff_thread_report_progress((AVFrame*)s->current_picture_ptr, INT_MAX, 1); ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, 1);
ff_generate_sliding_window_mmcos(h); ff_generate_sliding_window_mmcos(h);
if (ff_h264_execute_ref_pic_marking(h, h->mmco, h->mmco_index) < 0 && if (ff_h264_execute_ref_pic_marking(h, h->mmco, h->mmco_index) < 0 &&
(s->avctx->err_recognition & AV_EF_EXPLODE)) (s->avctx->err_recognition & AV_EF_EXPLODE))
@ -3557,8 +3570,8 @@ static void decode_finish_row(H264Context *h){
if (s->dropable) return; if (s->dropable) return;
ff_thread_report_progress((AVFrame*)s->current_picture_ptr, top + height - 1, ff_thread_report_progress(&s->current_picture_ptr->f, top + height - 1,
s->picture_structure==PICT_BOTTOM_FIELD); s->picture_structure == PICT_BOTTOM_FIELD);
} }
static int decode_slice(struct AVCodecContext *avctx, void *arg){ static int decode_slice(struct AVCodecContext *avctx, void *arg){

@ -153,7 +153,9 @@ static void await_reference_mb_row(H264Context * const h, Picture *ref, int mb_y
//FIXME it can be safe to access mb stuff //FIXME it can be safe to access mb stuff
//even if pixels aren't deblocked yet //even if pixels aren't deblocked yet
ff_thread_await_progress((AVFrame*)ref, FFMIN(16*mb_y >> ref_field_picture, ref_height-1), ff_thread_await_progress(&ref->f,
FFMIN(16 * mb_y >> ref_field_picture,
ref_height - 1),
ref_field_picture && ref_field); ref_field_picture && ref_field);
} }

@ -45,7 +45,7 @@ static int encode_picture_lossless(AVCodecContext *avctx, AVPacket *pkt,
MJpegContext * const m = s->mjpeg_ctx; MJpegContext * const m = s->mjpeg_ctx;
const int width= s->width; const int width= s->width;
const int height= s->height; const int height= s->height;
AVFrame * const p= (AVFrame*)&s->current_picture; AVFrame * const p = &s->current_picture.f;
const int predictor= avctx->prediction_method+1; const int predictor= avctx->prediction_method+1;
const int mb_width = (width + s->mjpeg_hsample[0] - 1) / s->mjpeg_hsample[0]; const int mb_width = (width + s->mjpeg_hsample[0] - 1) / s->mjpeg_hsample[0];
const int mb_height = (height + s->mjpeg_vsample[0] - 1) / s->mjpeg_vsample[0]; const int mb_height = (height + s->mjpeg_vsample[0] - 1) / s->mjpeg_vsample[0];

@ -377,7 +377,8 @@ int ff_mpeg4_decode_video_packet_header(MpegEncContext *s)
int mb_x = 0, mb_y = 0; int mb_x = 0, mb_y = 0;
while (s->next_picture.f.mbskip_table[s->mb_index2xy[mb_num]]) { while (s->next_picture.f.mbskip_table[s->mb_index2xy[mb_num]]) {
if (!mb_x) ff_thread_await_progress((AVFrame*)s->next_picture_ptr, mb_y++, 0); if (!mb_x)
ff_thread_await_progress(&s->next_picture_ptr->f, mb_y++, 0);
mb_num++; mb_num++;
if (++mb_x == s->mb_width) mb_x = 0; if (++mb_x == s->mb_width) mb_x = 0;
} }
@ -1288,7 +1289,7 @@ static int mpeg4_decode_mb(MpegEncContext *s,
s->last_mv[i][1][1]= 0; s->last_mv[i][1][1]= 0;
} }
ff_thread_await_progress((AVFrame*)s->next_picture_ptr, s->mb_y, 0); ff_thread_await_progress(&s->next_picture_ptr->f, s->mb_y, 0);
} }
/* if we skipped it in the future P Frame than skip it now too */ /* if we skipped it in the future P Frame than skip it now too */
@ -1470,7 +1471,7 @@ end:
const int delta= s->mb_x + 1 == s->mb_width ? 2 : 1; const int delta= s->mb_x + 1 == s->mb_width ? 2 : 1;
if (s->pict_type == AV_PICTURE_TYPE_B && s->next_picture.f.mbskip_table[xy + delta]) { if (s->pict_type == AV_PICTURE_TYPE_B && s->next_picture.f.mbskip_table[xy + delta]) {
ff_thread_await_progress((AVFrame*)s->next_picture_ptr, ff_thread_await_progress(&s->next_picture_ptr->f,
(s->mb_x + delta >= s->mb_width) ? FFMIN(s->mb_y+1, s->mb_height-1) : s->mb_y, 0); (s->mb_x + delta >= s->mb_width) ? FFMIN(s->mb_y+1, s->mb_height-1) : s->mb_y, 0);
} }

@ -232,9 +232,9 @@ static void free_frame_buffer(MpegEncContext *s, Picture *pic)
* dimensions; ignore user defined callbacks for these * dimensions; ignore user defined callbacks for these
*/ */
if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE) if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE)
ff_thread_release_buffer(s->avctx, (AVFrame *) pic); ff_thread_release_buffer(s->avctx, &pic->f);
else else
avcodec_default_release_buffer(s->avctx, (AVFrame *) pic); avcodec_default_release_buffer(s->avctx, &pic->f);
av_freep(&pic->f.hwaccel_picture_private); av_freep(&pic->f.hwaccel_picture_private);
} }
@ -257,9 +257,9 @@ static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
} }
if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE) if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE)
r = ff_thread_get_buffer(s->avctx, (AVFrame *) pic); r = ff_thread_get_buffer(s->avctx, &pic->f);
else else
r = avcodec_default_get_buffer(s->avctx, (AVFrame *) pic); r = avcodec_default_get_buffer(s->avctx, &pic->f);
if (r < 0 || !pic->f.type || !pic->f.data[0]) { if (r < 0 || !pic->f.type || !pic->f.data[0]) {
av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %p)\n", av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %p)\n",
@ -729,7 +729,7 @@ av_cold int ff_MPV_common_init(MpegEncContext *s)
s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag); s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
s->avctx->coded_frame = (AVFrame *)&s->current_picture; s->avctx->coded_frame = &s->current_picture.f;
FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
fail); // error ressilience code looks cleaner with this fail); // error ressilience code looks cleaner with this
@ -805,7 +805,7 @@ av_cold int ff_MPV_common_init(MpegEncContext *s)
FF_ALLOCZ_OR_GOTO(s->avctx, s->picture, FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
s->picture_count * sizeof(Picture), fail); s->picture_count * sizeof(Picture), fail);
for (i = 0; i < s->picture_count; i++) { for (i = 0; i < s->picture_count; i++) {
avcodec_get_frame_defaults((AVFrame *) &s->picture[i]); avcodec_get_frame_defaults(&s->picture[i].f);
} }
if (s->width && s->height) { if (s->width && s->height) {
@ -1269,10 +1269,8 @@ int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
s->last_picture_ptr = &s->picture[i]; s->last_picture_ptr = &s->picture[i];
if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0)
return -1; return -1;
ff_thread_report_progress((AVFrame *) s->last_picture_ptr, ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 0);
INT_MAX, 0); ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 1);
ff_thread_report_progress((AVFrame *) s->last_picture_ptr,
INT_MAX, 1);
} }
if ((s->next_picture_ptr == NULL || if ((s->next_picture_ptr == NULL ||
s->next_picture_ptr->f.data[0] == NULL) && s->next_picture_ptr->f.data[0] == NULL) &&
@ -1282,10 +1280,8 @@ int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
s->next_picture_ptr = &s->picture[i]; s->next_picture_ptr = &s->picture[i];
if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0)
return -1; return -1;
ff_thread_report_progress((AVFrame *) s->next_picture_ptr, ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 0);
INT_MAX, 0); ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 1);
ff_thread_report_progress((AVFrame *) s->next_picture_ptr,
INT_MAX, 1);
} }
} }
@ -1410,10 +1406,10 @@ void ff_MPV_frame_end(MpegEncContext *s)
memset(&s->next_picture, 0, sizeof(Picture)); memset(&s->next_picture, 0, sizeof(Picture));
memset(&s->current_picture, 0, sizeof(Picture)); memset(&s->current_picture, 0, sizeof(Picture));
#endif #endif
s->avctx->coded_frame = (AVFrame *) s->current_picture_ptr; s->avctx->coded_frame = &s->current_picture_ptr->f;
if (s->codec_id != CODEC_ID_H264 && s->current_picture.f.reference) { if (s->codec_id != CODEC_ID_H264 && s->current_picture.f.reference) {
ff_thread_report_progress((AVFrame *) s->current_picture_ptr, ff_thread_report_progress(&s->current_picture_ptr->f,
s->mb_height - 1, 0); s->mb_height - 1, 0);
} }
} }
@ -2365,10 +2361,14 @@ void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) { if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
if (s->mv_dir & MV_DIR_FORWARD) { if (s->mv_dir & MV_DIR_FORWARD) {
ff_thread_await_progress((AVFrame*)s->last_picture_ptr, ff_MPV_lowest_referenced_row(s, 0), 0); ff_thread_await_progress(&s->last_picture_ptr->f,
ff_MPV_lowest_referenced_row(s, 0),
0);
} }
if (s->mv_dir & MV_DIR_BACKWARD) { if (s->mv_dir & MV_DIR_BACKWARD) {
ff_thread_await_progress((AVFrame*)s->next_picture_ptr, ff_MPV_lowest_referenced_row(s, 1), 0); ff_thread_await_progress(&s->next_picture_ptr->f,
ff_MPV_lowest_referenced_row(s, 1),
0);
} }
} }
@ -2575,9 +2575,9 @@ void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
int i; int i;
if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER)) if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
src= (AVFrame*)s->current_picture_ptr; src = &s->current_picture_ptr->f;
else if(s->last_picture_ptr) else if(s->last_picture_ptr)
src= (AVFrame*)s->last_picture_ptr; src = &s->last_picture_ptr->f;
else else
return; return;
@ -2896,5 +2896,5 @@ void ff_set_qscale(MpegEncContext * s, int qscale)
void ff_MPV_report_decode_progress(MpegEncContext *s) void ff_MPV_report_decode_progress(MpegEncContext *s)
{ {
if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->error_occurred) if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->error_occurred)
ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_y, 0); ff_thread_report_progress(&s->current_picture_ptr->f, s->mb_y, 0);
} }

@ -974,7 +974,7 @@ static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg)
if (i < 0) if (i < 0)
return i; return i;
pic = (AVFrame *) &s->picture[i]; pic = &s->picture[i].f;
pic->reference = 3; pic->reference = 3;
for (i = 0; i < 4; i++) { for (i = 0; i < 4; i++) {
@ -989,7 +989,7 @@ static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg)
if (i < 0) if (i < 0)
return i; return i;
pic = (AVFrame *) &s->picture[i]; pic = &s->picture[i].f;
pic->reference = 3; pic->reference = 3;
if (ff_alloc_picture(s, (Picture *) pic, 0) < 0) { if (ff_alloc_picture(s, (Picture *) pic, 0) < 0) {
@ -1241,7 +1241,7 @@ static int select_input_picture(MpegEncContext *s)
s->input_picture[0]->f.type == FF_BUFFER_TYPE_INTERNAL); s->input_picture[0]->f.type == FF_BUFFER_TYPE_INTERNAL);
s->avctx->release_buffer(s->avctx, s->avctx->release_buffer(s->avctx,
(AVFrame *) s->input_picture[0]); &s->input_picture[0]->f);
} }
emms_c(); emms_c();
@ -1374,13 +1374,13 @@ no_output_pic:
/* mark us unused / free shared pic */ /* mark us unused / free shared pic */
if (s->reordered_input_picture[0]->f.type == FF_BUFFER_TYPE_INTERNAL) if (s->reordered_input_picture[0]->f.type == FF_BUFFER_TYPE_INTERNAL)
s->avctx->release_buffer(s->avctx, s->avctx->release_buffer(s->avctx,
(AVFrame *) s->reordered_input_picture[0]); &s->reordered_input_picture[0]->f);
for (i = 0; i < 4; i++) for (i = 0; i < 4; i++)
s->reordered_input_picture[0]->f.data[i] = NULL; s->reordered_input_picture[0]->f.data[i] = NULL;
s->reordered_input_picture[0]->f.type = 0; s->reordered_input_picture[0]->f.type = 0;
copy_picture_attributes(s, (AVFrame *) pic, copy_picture_attributes(s, &pic->f,
(AVFrame *) s->reordered_input_picture[0]); &s->reordered_input_picture[0]->f);
s->current_picture_ptr = pic; s->current_picture_ptr = pic;
} else { } else {

Loading…
Cancel
Save