ffmpeg: cosmetics to reduce difference to qatar by about 90 lines

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
pull/30/merge
Michael Niedermayer 13 years ago
parent c53326afad
commit 3ba90d9e97
  1. 161
      ffmpeg.c

@ -1959,6 +1959,7 @@ static void do_video_out(AVFormatContext *s, OutputStream *ost,
AVFrame *in_picture, float quality) AVFrame *in_picture, float quality)
{ {
int nb_frames, i, ret, format_video_sync; int nb_frames, i, ret, format_video_sync;
AVPacket pkt;
AVCodecContext *enc; AVCodecContext *enc;
double sync_ipts, delta; double sync_ipts, delta;
double duration = 0; double duration = 0;
@ -2015,95 +2016,97 @@ static void do_video_out(AVFormatContext *s, OutputStream *ost,
av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1); av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
} }
/* duplicates frame if needed */
for (i = 0; i < nb_frames; i++) {
AVPacket pkt;
av_init_packet(&pkt);
pkt.data = NULL;
pkt.size = 0;
if (s->oformat->flags & AVFMT_RAWPICTURE &&
enc->codec->id == CODEC_ID_RAWVIDEO) {
/* raw pictures are written as AVPicture structure to
avoid any copies. We support temporarily the older
method. */
enc->coded_frame->interlaced_frame = in_picture->interlaced_frame;
enc->coded_frame->top_field_first = in_picture->top_field_first;
pkt.data = (uint8_t *)in_picture;
pkt.size = sizeof(AVPicture);
pkt.pts = av_rescale_q(ost->sync_opts, enc->time_base, ost->st->time_base);
pkt.flags |= AV_PKT_FLAG_KEY;
write_frame(s, &pkt, ost); duplicate_frame:
} else { av_init_packet(&pkt);
int got_packet; pkt.data = NULL;
AVFrame big_picture; pkt.size = 0;
big_picture = *in_picture;
/* better than nothing: use input picture interlaced
settings */
big_picture.interlaced_frame = in_picture->interlaced_frame;
if (ost->st->codec->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME)) {
if (ost->top_field_first == -1)
big_picture.top_field_first = in_picture->top_field_first;
else
big_picture.top_field_first = !!ost->top_field_first;
}
/* handles same_quant here. This is not correct because it may if (s->oformat->flags & AVFMT_RAWPICTURE &&
not be a global option */ enc->codec->id == CODEC_ID_RAWVIDEO) {
big_picture.quality = quality; /* raw pictures are written as AVPicture structure to
if (!enc->me_threshold) avoid any copies. We support temporarily the older
big_picture.pict_type = 0; method. */
big_picture.pts = ost->sync_opts; enc->coded_frame->interlaced_frame = in_picture->interlaced_frame;
if (ost->forced_kf_index < ost->forced_kf_count && enc->coded_frame->top_field_first = in_picture->top_field_first;
big_picture.pts >= ost->forced_kf_pts[ost->forced_kf_index]) { pkt.data = (uint8_t *)in_picture;
big_picture.pict_type = AV_PICTURE_TYPE_I; pkt.size = sizeof(AVPicture);
ost->forced_kf_index++; pkt.pts = av_rescale_q(ost->sync_opts, enc->time_base, ost->st->time_base);
} pkt.flags |= AV_PKT_FLAG_KEY;
update_benchmark(NULL);
ret = avcodec_encode_video2(enc, &pkt, &big_picture, &got_packet);
update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
if (ret < 0) {
av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
exit_program(1);
}
if (got_packet) { write_frame(s, &pkt, ost);
if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & CODEC_CAP_DELAY)) } else {
pkt.pts = ost->sync_opts; int got_packet;
AVFrame big_picture;
big_picture = *in_picture;
/* better than nothing: use input picture interlaced
settings */
big_picture.interlaced_frame = in_picture->interlaced_frame;
if (ost->st->codec->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME)) {
if (ost->top_field_first == -1)
big_picture.top_field_first = in_picture->top_field_first;
else
big_picture.top_field_first = !!ost->top_field_first;
}
if (pkt.pts != AV_NOPTS_VALUE) /* handles same_quant here. This is not correct because it may
pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base); not be a global option */
if (pkt.dts != AV_NOPTS_VALUE) big_picture.quality = quality;
pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base); if (!enc->me_threshold)
big_picture.pict_type = 0;
big_picture.pts = ost->sync_opts;
if (ost->forced_kf_index < ost->forced_kf_count &&
big_picture.pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
big_picture.pict_type = AV_PICTURE_TYPE_I;
ost->forced_kf_index++;
}
update_benchmark(NULL);
ret = avcodec_encode_video2(enc, &pkt, &big_picture, &got_packet);
update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
if (ret < 0) {
av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
exit_program(1);
}
if (debug_ts) { if (got_packet) {
av_log(NULL, AV_LOG_INFO, "encoder -> type:video " if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & CODEC_CAP_DELAY))
"pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n", pkt.pts = ost->sync_opts;
av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
}
write_frame(s, &pkt, ost); if (pkt.pts != AV_NOPTS_VALUE)
frame_size = pkt.size; pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
video_size += pkt.size; if (pkt.dts != AV_NOPTS_VALUE)
av_free_packet(&pkt); pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
/* if two pass, output log */ if (debug_ts) {
if (ost->logfile && enc->stats_out) { av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
fprintf(ost->logfile, "%s", enc->stats_out); "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
} av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
}
write_frame(s, &pkt, ost);
frame_size = pkt.size;
video_size += pkt.size;
av_free_packet(&pkt);
/* if two pass, output log */
if (ost->logfile && enc->stats_out) {
fprintf(ost->logfile, "%s", enc->stats_out);
} }
} }
ost->sync_opts++;
/*
* For video, number of frames in == number of packets out.
* But there may be reordering, so we can't throw away frames on encoder
* flush, we need to limit them here, before they go into encoder.
*/
ost->frame_number++;
} }
ost->sync_opts++;
/*
* For video, number of frames in == number of packets out.
* But there may be reordering, so we can't throw away frames on encoder
* flush, we need to limit them here, before they go into encoder.
*/
ost->frame_number++;
if(--nb_frames)
goto duplicate_frame;
if (vstats_filename && frame_size) if (vstats_filename && frame_size)
do_video_stats(output_files[ost->file_index]->ctx, ost, frame_size); do_video_stats(output_files[ost->file_index]->ctx, ost, frame_size);
} }

Loading…
Cancel
Save