|
|
|
@ -1959,6 +1959,7 @@ static void do_video_out(AVFormatContext *s, OutputStream *ost, |
|
|
|
|
AVFrame *in_picture, float quality) |
|
|
|
|
{ |
|
|
|
|
int nb_frames, i, ret, format_video_sync; |
|
|
|
|
AVPacket pkt; |
|
|
|
|
AVCodecContext *enc; |
|
|
|
|
double sync_ipts, delta; |
|
|
|
|
double duration = 0; |
|
|
|
@ -2015,95 +2016,97 @@ static void do_video_out(AVFormatContext *s, OutputStream *ost, |
|
|
|
|
av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
/* duplicates frame if needed */ |
|
|
|
|
for (i = 0; i < nb_frames; i++) { |
|
|
|
|
AVPacket pkt; |
|
|
|
|
av_init_packet(&pkt); |
|
|
|
|
pkt.data = NULL; |
|
|
|
|
pkt.size = 0; |
|
|
|
|
|
|
|
|
|
if (s->oformat->flags & AVFMT_RAWPICTURE && |
|
|
|
|
enc->codec->id == CODEC_ID_RAWVIDEO) { |
|
|
|
|
/* raw pictures are written as AVPicture structure to
|
|
|
|
|
avoid any copies. We support temporarily the older |
|
|
|
|
method. */ |
|
|
|
|
enc->coded_frame->interlaced_frame = in_picture->interlaced_frame; |
|
|
|
|
enc->coded_frame->top_field_first = in_picture->top_field_first; |
|
|
|
|
pkt.data = (uint8_t *)in_picture; |
|
|
|
|
pkt.size = sizeof(AVPicture); |
|
|
|
|
pkt.pts = av_rescale_q(ost->sync_opts, enc->time_base, ost->st->time_base); |
|
|
|
|
pkt.flags |= AV_PKT_FLAG_KEY; |
|
|
|
|
|
|
|
|
|
write_frame(s, &pkt, ost); |
|
|
|
|
} else { |
|
|
|
|
int got_packet; |
|
|
|
|
AVFrame big_picture; |
|
|
|
|
|
|
|
|
|
big_picture = *in_picture; |
|
|
|
|
/* better than nothing: use input picture interlaced
|
|
|
|
|
settings */ |
|
|
|
|
big_picture.interlaced_frame = in_picture->interlaced_frame; |
|
|
|
|
if (ost->st->codec->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME)) { |
|
|
|
|
if (ost->top_field_first == -1) |
|
|
|
|
big_picture.top_field_first = in_picture->top_field_first; |
|
|
|
|
else |
|
|
|
|
big_picture.top_field_first = !!ost->top_field_first; |
|
|
|
|
} |
|
|
|
|
duplicate_frame: |
|
|
|
|
av_init_packet(&pkt); |
|
|
|
|
pkt.data = NULL; |
|
|
|
|
pkt.size = 0; |
|
|
|
|
|
|
|
|
|
/* handles same_quant here. This is not correct because it may
|
|
|
|
|
not be a global option */ |
|
|
|
|
big_picture.quality = quality; |
|
|
|
|
if (!enc->me_threshold) |
|
|
|
|
big_picture.pict_type = 0; |
|
|
|
|
big_picture.pts = ost->sync_opts; |
|
|
|
|
if (ost->forced_kf_index < ost->forced_kf_count && |
|
|
|
|
big_picture.pts >= ost->forced_kf_pts[ost->forced_kf_index]) { |
|
|
|
|
big_picture.pict_type = AV_PICTURE_TYPE_I; |
|
|
|
|
ost->forced_kf_index++; |
|
|
|
|
} |
|
|
|
|
update_benchmark(NULL); |
|
|
|
|
ret = avcodec_encode_video2(enc, &pkt, &big_picture, &got_packet); |
|
|
|
|
update_benchmark("encode_video %d.%d", ost->file_index, ost->index); |
|
|
|
|
if (ret < 0) { |
|
|
|
|
av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n"); |
|
|
|
|
exit_program(1); |
|
|
|
|
} |
|
|
|
|
if (s->oformat->flags & AVFMT_RAWPICTURE && |
|
|
|
|
enc->codec->id == CODEC_ID_RAWVIDEO) { |
|
|
|
|
/* raw pictures are written as AVPicture structure to
|
|
|
|
|
avoid any copies. We support temporarily the older |
|
|
|
|
method. */ |
|
|
|
|
enc->coded_frame->interlaced_frame = in_picture->interlaced_frame; |
|
|
|
|
enc->coded_frame->top_field_first = in_picture->top_field_first; |
|
|
|
|
pkt.data = (uint8_t *)in_picture; |
|
|
|
|
pkt.size = sizeof(AVPicture); |
|
|
|
|
pkt.pts = av_rescale_q(ost->sync_opts, enc->time_base, ost->st->time_base); |
|
|
|
|
pkt.flags |= AV_PKT_FLAG_KEY; |
|
|
|
|
|
|
|
|
|
if (got_packet) { |
|
|
|
|
if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & CODEC_CAP_DELAY)) |
|
|
|
|
pkt.pts = ost->sync_opts; |
|
|
|
|
write_frame(s, &pkt, ost); |
|
|
|
|
} else { |
|
|
|
|
int got_packet; |
|
|
|
|
AVFrame big_picture; |
|
|
|
|
|
|
|
|
|
big_picture = *in_picture; |
|
|
|
|
/* better than nothing: use input picture interlaced
|
|
|
|
|
settings */ |
|
|
|
|
big_picture.interlaced_frame = in_picture->interlaced_frame; |
|
|
|
|
if (ost->st->codec->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME)) { |
|
|
|
|
if (ost->top_field_first == -1) |
|
|
|
|
big_picture.top_field_first = in_picture->top_field_first; |
|
|
|
|
else |
|
|
|
|
big_picture.top_field_first = !!ost->top_field_first; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
if (pkt.pts != AV_NOPTS_VALUE) |
|
|
|
|
pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base); |
|
|
|
|
if (pkt.dts != AV_NOPTS_VALUE) |
|
|
|
|
pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base); |
|
|
|
|
/* handles same_quant here. This is not correct because it may
|
|
|
|
|
not be a global option */ |
|
|
|
|
big_picture.quality = quality; |
|
|
|
|
if (!enc->me_threshold) |
|
|
|
|
big_picture.pict_type = 0; |
|
|
|
|
big_picture.pts = ost->sync_opts; |
|
|
|
|
if (ost->forced_kf_index < ost->forced_kf_count && |
|
|
|
|
big_picture.pts >= ost->forced_kf_pts[ost->forced_kf_index]) { |
|
|
|
|
big_picture.pict_type = AV_PICTURE_TYPE_I; |
|
|
|
|
ost->forced_kf_index++; |
|
|
|
|
} |
|
|
|
|
update_benchmark(NULL); |
|
|
|
|
ret = avcodec_encode_video2(enc, &pkt, &big_picture, &got_packet); |
|
|
|
|
update_benchmark("encode_video %d.%d", ost->file_index, ost->index); |
|
|
|
|
if (ret < 0) { |
|
|
|
|
av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n"); |
|
|
|
|
exit_program(1); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
if (debug_ts) { |
|
|
|
|
av_log(NULL, AV_LOG_INFO, "encoder -> type:video " |
|
|
|
|
"pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n", |
|
|
|
|
av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base), |
|
|
|
|
av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base)); |
|
|
|
|
} |
|
|
|
|
if (got_packet) { |
|
|
|
|
if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & CODEC_CAP_DELAY)) |
|
|
|
|
pkt.pts = ost->sync_opts; |
|
|
|
|
|
|
|
|
|
write_frame(s, &pkt, ost); |
|
|
|
|
frame_size = pkt.size; |
|
|
|
|
video_size += pkt.size; |
|
|
|
|
av_free_packet(&pkt); |
|
|
|
|
if (pkt.pts != AV_NOPTS_VALUE) |
|
|
|
|
pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base); |
|
|
|
|
if (pkt.dts != AV_NOPTS_VALUE) |
|
|
|
|
pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base); |
|
|
|
|
|
|
|
|
|
/* if two pass, output log */ |
|
|
|
|
if (ost->logfile && enc->stats_out) { |
|
|
|
|
fprintf(ost->logfile, "%s", enc->stats_out); |
|
|
|
|
} |
|
|
|
|
if (debug_ts) { |
|
|
|
|
av_log(NULL, AV_LOG_INFO, "encoder -> type:video " |
|
|
|
|
"pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n", |
|
|
|
|
av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base), |
|
|
|
|
av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base)); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
write_frame(s, &pkt, ost); |
|
|
|
|
frame_size = pkt.size; |
|
|
|
|
video_size += pkt.size; |
|
|
|
|
av_free_packet(&pkt); |
|
|
|
|
|
|
|
|
|
/* if two pass, output log */ |
|
|
|
|
if (ost->logfile && enc->stats_out) { |
|
|
|
|
fprintf(ost->logfile, "%s", enc->stats_out); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
ost->sync_opts++; |
|
|
|
|
/*
|
|
|
|
|
* For video, number of frames in == number of packets out. |
|
|
|
|
* But there may be reordering, so we can't throw away frames on encoder |
|
|
|
|
* flush, we need to limit them here, before they go into encoder. |
|
|
|
|
*/ |
|
|
|
|
ost->frame_number++; |
|
|
|
|
} |
|
|
|
|
ost->sync_opts++; |
|
|
|
|
/*
|
|
|
|
|
* For video, number of frames in == number of packets out. |
|
|
|
|
* But there may be reordering, so we can't throw away frames on encoder |
|
|
|
|
* flush, we need to limit them here, before they go into encoder. |
|
|
|
|
*/ |
|
|
|
|
ost->frame_number++; |
|
|
|
|
|
|
|
|
|
if(--nb_frames) |
|
|
|
|
goto duplicate_frame; |
|
|
|
|
|
|
|
|
|
if (vstats_filename && frame_size) |
|
|
|
|
do_video_stats(output_files[ost->file_index]->ctx, ost, frame_size); |
|
|
|
|
} |
|
|
|
|