mirror of https://github.com/FFmpeg/FFmpeg.git
You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
4261 lines
146 KiB
4261 lines
146 KiB
/* |
|
* Copyright (c) 2000-2003 Fabrice Bellard |
|
* |
|
* This file is part of FFmpeg. |
|
* |
|
* FFmpeg is free software; you can redistribute it and/or |
|
* modify it under the terms of the GNU Lesser General Public |
|
* License as published by the Free Software Foundation; either |
|
* version 2.1 of the License, or (at your option) any later version. |
|
* |
|
* FFmpeg is distributed in the hope that it will be useful, |
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
|
* Lesser General Public License for more details. |
|
* |
|
* You should have received a copy of the GNU Lesser General Public |
|
* License along with FFmpeg; if not, write to the Free Software |
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
|
*/ |
|
|
|
/** |
|
* @file |
|
* multimedia converter based on the FFmpeg libraries |
|
*/ |
|
|
|
#include "config.h" |
|
#include <ctype.h> |
|
#include <string.h> |
|
#include <math.h> |
|
#include <stdlib.h> |
|
#include <errno.h> |
|
#include <limits.h> |
|
#include <stdatomic.h> |
|
#include <stdint.h> |
|
|
|
#if HAVE_IO_H |
|
#include <io.h> |
|
#endif |
|
#if HAVE_UNISTD_H |
|
#include <unistd.h> |
|
#endif |
|
|
|
#include "libavformat/avformat.h" |
|
#include "libavdevice/avdevice.h" |
|
#include "libswresample/swresample.h" |
|
#include "libavutil/opt.h" |
|
#include "libavutil/channel_layout.h" |
|
#include "libavutil/parseutils.h" |
|
#include "libavutil/samplefmt.h" |
|
#include "libavutil/fifo.h" |
|
#include "libavutil/hwcontext.h" |
|
#include "libavutil/internal.h" |
|
#include "libavutil/intreadwrite.h" |
|
#include "libavutil/dict.h" |
|
#include "libavutil/display.h" |
|
#include "libavutil/mathematics.h" |
|
#include "libavutil/pixdesc.h" |
|
#include "libavutil/avstring.h" |
|
#include "libavutil/libm.h" |
|
#include "libavutil/imgutils.h" |
|
#include "libavutil/timestamp.h" |
|
#include "libavutil/bprint.h" |
|
#include "libavutil/time.h" |
|
#include "libavutil/thread.h" |
|
#include "libavutil/threadmessage.h" |
|
#include "libavcodec/mathops.h" |
|
#include "libavcodec/version.h" |
|
#include "libavformat/os_support.h" |
|
|
|
# include "libavfilter/avfilter.h" |
|
# include "libavfilter/buffersrc.h" |
|
# include "libavfilter/buffersink.h" |
|
|
|
#if HAVE_SYS_RESOURCE_H |
|
#include <sys/time.h> |
|
#include <sys/types.h> |
|
#include <sys/resource.h> |
|
#elif HAVE_GETPROCESSTIMES |
|
#include <windows.h> |
|
#endif |
|
#if HAVE_GETPROCESSMEMORYINFO |
|
#include <windows.h> |
|
#include <psapi.h> |
|
#endif |
|
#if HAVE_SETCONSOLECTRLHANDLER |
|
#include <windows.h> |
|
#endif |
|
|
|
|
|
#if HAVE_SYS_SELECT_H |
|
#include <sys/select.h> |
|
#endif |
|
|
|
#if HAVE_TERMIOS_H |
|
#include <fcntl.h> |
|
#include <sys/ioctl.h> |
|
#include <sys/time.h> |
|
#include <termios.h> |
|
#elif HAVE_KBHIT |
|
#include <conio.h> |
|
#endif |
|
|
|
#include <time.h> |
|
|
|
#include "ffmpeg.h" |
|
#include "cmdutils.h" |
|
#include "sync_queue.h" |
|
|
|
#include "libavutil/avassert.h" |
|
|
|
const char program_name[] = "ffmpeg"; |
|
const int program_birth_year = 2000; |
|
|
|
static FILE *vstats_file; |
|
|
|
const char *const forced_keyframes_const_names[] = { |
|
"n", |
|
"n_forced", |
|
"prev_forced_n", |
|
"prev_forced_t", |
|
"t", |
|
NULL |
|
}; |
|
|
|
typedef struct BenchmarkTimeStamps { |
|
int64_t real_usec; |
|
int64_t user_usec; |
|
int64_t sys_usec; |
|
} BenchmarkTimeStamps; |
|
|
|
static BenchmarkTimeStamps get_benchmark_time_stamps(void); |
|
static int64_t getmaxrss(void); |
|
static int ifilter_has_all_input_formats(FilterGraph *fg); |
|
|
|
static int64_t nb_frames_dup = 0; |
|
static uint64_t dup_warning = 1000; |
|
static int64_t nb_frames_drop = 0; |
|
static int64_t decode_error_stat[2]; |
|
unsigned nb_output_dumped = 0; |
|
|
|
static BenchmarkTimeStamps current_time; |
|
AVIOContext *progress_avio = NULL; |
|
|
|
InputStream **input_streams = NULL; |
|
int nb_input_streams = 0; |
|
InputFile **input_files = NULL; |
|
int nb_input_files = 0; |
|
|
|
OutputStream **output_streams = NULL; |
|
int nb_output_streams = 0; |
|
OutputFile **output_files = NULL; |
|
int nb_output_files = 0; |
|
|
|
FilterGraph **filtergraphs; |
|
int nb_filtergraphs; |
|
|
|
#if HAVE_TERMIOS_H |
|
|
|
/* init terminal so that we can grab keys */ |
|
static struct termios oldtty; |
|
static int restore_tty; |
|
#endif |
|
|
|
/* sub2video hack: |
|
Convert subtitles to video with alpha to insert them in filter graphs. |
|
This is a temporary solution until libavfilter gets real subtitles support. |
|
*/ |
|
|
|
static int sub2video_get_blank_frame(InputStream *ist) |
|
{ |
|
int ret; |
|
AVFrame *frame = ist->sub2video.frame; |
|
|
|
av_frame_unref(frame); |
|
ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w; |
|
ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h; |
|
ist->sub2video.frame->format = AV_PIX_FMT_RGB32; |
|
if ((ret = av_frame_get_buffer(frame, 0)) < 0) |
|
return ret; |
|
memset(frame->data[0], 0, frame->height * frame->linesize[0]); |
|
return 0; |
|
} |
|
|
|
static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h, |
|
AVSubtitleRect *r) |
|
{ |
|
uint32_t *pal, *dst2; |
|
uint8_t *src, *src2; |
|
int x, y; |
|
|
|
if (r->type != SUBTITLE_BITMAP) { |
|
av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n"); |
|
return; |
|
} |
|
if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) { |
|
av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n", |
|
r->x, r->y, r->w, r->h, w, h |
|
); |
|
return; |
|
} |
|
|
|
dst += r->y * dst_linesize + r->x * 4; |
|
src = r->data[0]; |
|
pal = (uint32_t *)r->data[1]; |
|
for (y = 0; y < r->h; y++) { |
|
dst2 = (uint32_t *)dst; |
|
src2 = src; |
|
for (x = 0; x < r->w; x++) |
|
*(dst2++) = pal[*(src2++)]; |
|
dst += dst_linesize; |
|
src += r->linesize[0]; |
|
} |
|
} |
|
|
|
static void sub2video_push_ref(InputStream *ist, int64_t pts) |
|
{ |
|
AVFrame *frame = ist->sub2video.frame; |
|
int i; |
|
int ret; |
|
|
|
av_assert1(frame->data[0]); |
|
ist->sub2video.last_pts = frame->pts = pts; |
|
for (i = 0; i < ist->nb_filters; i++) { |
|
ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame, |
|
AV_BUFFERSRC_FLAG_KEEP_REF | |
|
AV_BUFFERSRC_FLAG_PUSH); |
|
if (ret != AVERROR_EOF && ret < 0) |
|
av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n", |
|
av_err2str(ret)); |
|
} |
|
} |
|
|
|
void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub) |
|
{ |
|
AVFrame *frame = ist->sub2video.frame; |
|
int8_t *dst; |
|
int dst_linesize; |
|
int num_rects, i; |
|
int64_t pts, end_pts; |
|
|
|
if (!frame) |
|
return; |
|
if (sub) { |
|
pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL, |
|
AV_TIME_BASE_Q, ist->st->time_base); |
|
end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL, |
|
AV_TIME_BASE_Q, ist->st->time_base); |
|
num_rects = sub->num_rects; |
|
} else { |
|
/* If we are initializing the system, utilize current heartbeat |
|
PTS as the start time, and show until the following subpicture |
|
is received. Otherwise, utilize the previous subpicture's end time |
|
as the fall-back value. */ |
|
pts = ist->sub2video.initialize ? |
|
heartbeat_pts : ist->sub2video.end_pts; |
|
end_pts = INT64_MAX; |
|
num_rects = 0; |
|
} |
|
if (sub2video_get_blank_frame(ist) < 0) { |
|
av_log(NULL, AV_LOG_ERROR, |
|
"Impossible to get a blank canvas.\n"); |
|
return; |
|
} |
|
dst = frame->data [0]; |
|
dst_linesize = frame->linesize[0]; |
|
for (i = 0; i < num_rects; i++) |
|
sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]); |
|
sub2video_push_ref(ist, pts); |
|
ist->sub2video.end_pts = end_pts; |
|
ist->sub2video.initialize = 0; |
|
} |
|
|
|
static void sub2video_heartbeat(InputStream *ist, int64_t pts) |
|
{ |
|
InputFile *infile = input_files[ist->file_index]; |
|
int i, j, nb_reqs; |
|
int64_t pts2; |
|
|
|
/* When a frame is read from a file, examine all sub2video streams in |
|
the same file and send the sub2video frame again. Otherwise, decoded |
|
video frames could be accumulating in the filter graph while a filter |
|
(possibly overlay) is desperately waiting for a subtitle frame. */ |
|
for (i = 0; i < infile->nb_streams; i++) { |
|
InputStream *ist2 = input_streams[infile->ist_index + i]; |
|
if (!ist2->sub2video.frame) |
|
continue; |
|
/* subtitles seem to be usually muxed ahead of other streams; |
|
if not, subtracting a larger time here is necessary */ |
|
pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1; |
|
/* do not send the heartbeat frame if the subtitle is already ahead */ |
|
if (pts2 <= ist2->sub2video.last_pts) |
|
continue; |
|
if (pts2 >= ist2->sub2video.end_pts || ist2->sub2video.initialize) |
|
/* if we have hit the end of the current displayed subpicture, |
|
or if we need to initialize the system, update the |
|
overlayed subpicture and its start/end times */ |
|
sub2video_update(ist2, pts2 + 1, NULL); |
|
for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++) |
|
nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter); |
|
if (nb_reqs) |
|
sub2video_push_ref(ist2, pts2); |
|
} |
|
} |
|
|
|
static void sub2video_flush(InputStream *ist) |
|
{ |
|
int i; |
|
int ret; |
|
|
|
if (ist->sub2video.end_pts < INT64_MAX) |
|
sub2video_update(ist, INT64_MAX, NULL); |
|
for (i = 0; i < ist->nb_filters; i++) { |
|
ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL); |
|
if (ret != AVERROR_EOF && ret < 0) |
|
av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n"); |
|
} |
|
} |
|
|
|
/* end of sub2video hack */ |
|
|
|
static void term_exit_sigsafe(void) |
|
{ |
|
#if HAVE_TERMIOS_H |
|
if(restore_tty) |
|
tcsetattr (0, TCSANOW, &oldtty); |
|
#endif |
|
} |
|
|
|
void term_exit(void) |
|
{ |
|
av_log(NULL, AV_LOG_QUIET, "%s", ""); |
|
term_exit_sigsafe(); |
|
} |
|
|
|
static volatile int received_sigterm = 0; |
|
static volatile int received_nb_signals = 0; |
|
static atomic_int transcode_init_done = ATOMIC_VAR_INIT(0); |
|
static volatile int ffmpeg_exited = 0; |
|
int main_return_code = 0; |
|
static int64_t copy_ts_first_pts = AV_NOPTS_VALUE; |
|
|
|
static void |
|
sigterm_handler(int sig) |
|
{ |
|
int ret; |
|
received_sigterm = sig; |
|
received_nb_signals++; |
|
term_exit_sigsafe(); |
|
if(received_nb_signals > 3) { |
|
ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n", |
|
strlen("Received > 3 system signals, hard exiting\n")); |
|
if (ret < 0) { /* Do nothing */ }; |
|
exit(123); |
|
} |
|
} |
|
|
|
#if HAVE_SETCONSOLECTRLHANDLER |
|
static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType) |
|
{ |
|
av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType); |
|
|
|
switch (fdwCtrlType) |
|
{ |
|
case CTRL_C_EVENT: |
|
case CTRL_BREAK_EVENT: |
|
sigterm_handler(SIGINT); |
|
return TRUE; |
|
|
|
case CTRL_CLOSE_EVENT: |
|
case CTRL_LOGOFF_EVENT: |
|
case CTRL_SHUTDOWN_EVENT: |
|
sigterm_handler(SIGTERM); |
|
/* Basically, with these 3 events, when we return from this method the |
|
process is hard terminated, so stall as long as we need to |
|
to try and let the main thread(s) clean up and gracefully terminate |
|
(we have at most 5 seconds, but should be done far before that). */ |
|
while (!ffmpeg_exited) { |
|
Sleep(0); |
|
} |
|
return TRUE; |
|
|
|
default: |
|
av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType); |
|
return FALSE; |
|
} |
|
} |
|
#endif |
|
|
|
#ifdef __linux__ |
|
#define SIGNAL(sig, func) \ |
|
do { \ |
|
action.sa_handler = func; \ |
|
sigaction(sig, &action, NULL); \ |
|
} while (0) |
|
#else |
|
#define SIGNAL(sig, func) \ |
|
signal(sig, func) |
|
#endif |
|
|
|
void term_init(void) |
|
{ |
|
#if defined __linux__ |
|
struct sigaction action = {0}; |
|
action.sa_handler = sigterm_handler; |
|
|
|
/* block other interrupts while processing this one */ |
|
sigfillset(&action.sa_mask); |
|
|
|
/* restart interruptible functions (i.e. don't fail with EINTR) */ |
|
action.sa_flags = SA_RESTART; |
|
#endif |
|
|
|
#if HAVE_TERMIOS_H |
|
if (stdin_interaction) { |
|
struct termios tty; |
|
if (tcgetattr (0, &tty) == 0) { |
|
oldtty = tty; |
|
restore_tty = 1; |
|
|
|
tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP |
|
|INLCR|IGNCR|ICRNL|IXON); |
|
tty.c_oflag |= OPOST; |
|
tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN); |
|
tty.c_cflag &= ~(CSIZE|PARENB); |
|
tty.c_cflag |= CS8; |
|
tty.c_cc[VMIN] = 1; |
|
tty.c_cc[VTIME] = 0; |
|
|
|
tcsetattr (0, TCSANOW, &tty); |
|
} |
|
SIGNAL(SIGQUIT, sigterm_handler); /* Quit (POSIX). */ |
|
} |
|
#endif |
|
|
|
SIGNAL(SIGINT , sigterm_handler); /* Interrupt (ANSI). */ |
|
SIGNAL(SIGTERM, sigterm_handler); /* Termination (ANSI). */ |
|
#ifdef SIGXCPU |
|
SIGNAL(SIGXCPU, sigterm_handler); |
|
#endif |
|
#ifdef SIGPIPE |
|
signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */ |
|
#endif |
|
#if HAVE_SETCONSOLECTRLHANDLER |
|
SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE); |
|
#endif |
|
} |
|
|
|
/* read a key without blocking */ |
|
static int read_key(void) |
|
{ |
|
unsigned char ch; |
|
#if HAVE_TERMIOS_H |
|
int n = 1; |
|
struct timeval tv; |
|
fd_set rfds; |
|
|
|
FD_ZERO(&rfds); |
|
FD_SET(0, &rfds); |
|
tv.tv_sec = 0; |
|
tv.tv_usec = 0; |
|
n = select(1, &rfds, NULL, NULL, &tv); |
|
if (n > 0) { |
|
n = read(0, &ch, 1); |
|
if (n == 1) |
|
return ch; |
|
|
|
return n; |
|
} |
|
#elif HAVE_KBHIT |
|
# if HAVE_PEEKNAMEDPIPE |
|
static int is_pipe; |
|
static HANDLE input_handle; |
|
DWORD dw, nchars; |
|
if(!input_handle){ |
|
input_handle = GetStdHandle(STD_INPUT_HANDLE); |
|
is_pipe = !GetConsoleMode(input_handle, &dw); |
|
} |
|
|
|
if (is_pipe) { |
|
/* When running under a GUI, you will end here. */ |
|
if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) { |
|
// input pipe may have been closed by the program that ran ffmpeg |
|
return -1; |
|
} |
|
//Read it |
|
if(nchars != 0) { |
|
read(0, &ch, 1); |
|
return ch; |
|
}else{ |
|
return -1; |
|
} |
|
} |
|
# endif |
|
if(kbhit()) |
|
return(getch()); |
|
#endif |
|
return -1; |
|
} |
|
|
|
static int decode_interrupt_cb(void *ctx) |
|
{ |
|
return received_nb_signals > atomic_load(&transcode_init_done); |
|
} |
|
|
|
const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL }; |
|
|
|
static void ffmpeg_cleanup(int ret) |
|
{ |
|
int i, j; |
|
|
|
if (do_benchmark) { |
|
int maxrss = getmaxrss() / 1024; |
|
av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss); |
|
} |
|
|
|
for (i = 0; i < nb_filtergraphs; i++) { |
|
FilterGraph *fg = filtergraphs[i]; |
|
avfilter_graph_free(&fg->graph); |
|
for (j = 0; j < fg->nb_inputs; j++) { |
|
InputFilter *ifilter = fg->inputs[j]; |
|
struct InputStream *ist = ifilter->ist; |
|
|
|
if (ifilter->frame_queue) { |
|
AVFrame *frame; |
|
while (av_fifo_read(ifilter->frame_queue, &frame, 1) >= 0) |
|
av_frame_free(&frame); |
|
av_fifo_freep2(&ifilter->frame_queue); |
|
} |
|
av_freep(&ifilter->displaymatrix); |
|
if (ist->sub2video.sub_queue) { |
|
AVSubtitle sub; |
|
while (av_fifo_read(ist->sub2video.sub_queue, &sub, 1) >= 0) |
|
avsubtitle_free(&sub); |
|
av_fifo_freep2(&ist->sub2video.sub_queue); |
|
} |
|
av_buffer_unref(&ifilter->hw_frames_ctx); |
|
av_freep(&ifilter->name); |
|
av_freep(&fg->inputs[j]); |
|
} |
|
av_freep(&fg->inputs); |
|
for (j = 0; j < fg->nb_outputs; j++) { |
|
OutputFilter *ofilter = fg->outputs[j]; |
|
|
|
avfilter_inout_free(&ofilter->out_tmp); |
|
av_freep(&ofilter->name); |
|
av_channel_layout_uninit(&ofilter->ch_layout); |
|
av_freep(&fg->outputs[j]); |
|
} |
|
av_freep(&fg->outputs); |
|
av_freep(&fg->graph_desc); |
|
|
|
av_freep(&filtergraphs[i]); |
|
} |
|
av_freep(&filtergraphs); |
|
|
|
/* close files */ |
|
for (i = 0; i < nb_output_files; i++) |
|
of_close(&output_files[i]); |
|
|
|
for (i = 0; i < nb_output_streams; i++) { |
|
OutputStream *ost = output_streams[i]; |
|
|
|
if (!ost) |
|
continue; |
|
|
|
av_bsf_free(&ost->bsf_ctx); |
|
|
|
av_frame_free(&ost->filtered_frame); |
|
av_frame_free(&ost->sq_frame); |
|
av_frame_free(&ost->last_frame); |
|
av_packet_free(&ost->pkt); |
|
av_dict_free(&ost->encoder_opts); |
|
|
|
av_freep(&ost->forced_keyframes); |
|
av_expr_free(ost->forced_keyframes_pexpr); |
|
av_freep(&ost->avfilter); |
|
av_freep(&ost->logfile_prefix); |
|
|
|
#if FFMPEG_OPT_MAP_CHANNEL |
|
av_freep(&ost->audio_channels_map); |
|
ost->audio_channels_mapped = 0; |
|
#endif |
|
|
|
av_dict_free(&ost->sws_dict); |
|
av_dict_free(&ost->swr_opts); |
|
|
|
if (ost->enc_ctx) |
|
av_freep(&ost->enc_ctx->stats_in); |
|
avcodec_free_context(&ost->enc_ctx); |
|
|
|
av_freep(&output_streams[i]); |
|
} |
|
free_input_threads(); |
|
for (i = 0; i < nb_input_files; i++) { |
|
avformat_close_input(&input_files[i]->ctx); |
|
av_freep(&input_files[i]); |
|
} |
|
for (i = 0; i < nb_input_streams; i++) { |
|
InputStream *ist = input_streams[i]; |
|
|
|
av_frame_free(&ist->decoded_frame); |
|
av_packet_free(&ist->pkt); |
|
av_dict_free(&ist->decoder_opts); |
|
avsubtitle_free(&ist->prev_sub.subtitle); |
|
av_frame_free(&ist->sub2video.frame); |
|
av_freep(&ist->filters); |
|
av_freep(&ist->hwaccel_device); |
|
av_freep(&ist->dts_buffer); |
|
|
|
avcodec_free_context(&ist->dec_ctx); |
|
|
|
av_freep(&input_streams[i]); |
|
} |
|
|
|
if (vstats_file) { |
|
if (fclose(vstats_file)) |
|
av_log(NULL, AV_LOG_ERROR, |
|
"Error closing vstats file, loss of information possible: %s\n", |
|
av_err2str(AVERROR(errno))); |
|
} |
|
av_freep(&vstats_filename); |
|
av_freep(&filter_nbthreads); |
|
|
|
av_freep(&input_streams); |
|
av_freep(&input_files); |
|
av_freep(&output_streams); |
|
av_freep(&output_files); |
|
|
|
uninit_opts(); |
|
|
|
avformat_network_deinit(); |
|
|
|
if (received_sigterm) { |
|
av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n", |
|
(int) received_sigterm); |
|
} else if (ret && atomic_load(&transcode_init_done)) { |
|
av_log(NULL, AV_LOG_INFO, "Conversion failed!\n"); |
|
} |
|
term_exit(); |
|
ffmpeg_exited = 1; |
|
} |
|
|
|
void remove_avoptions(AVDictionary **a, AVDictionary *b) |
|
{ |
|
const AVDictionaryEntry *t = NULL; |
|
|
|
while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) { |
|
av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE); |
|
} |
|
} |
|
|
|
void assert_avoptions(AVDictionary *m) |
|
{ |
|
const AVDictionaryEntry *t; |
|
if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) { |
|
av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key); |
|
exit_program(1); |
|
} |
|
} |
|
|
|
static void abort_codec_experimental(const AVCodec *c, int encoder) |
|
{ |
|
exit_program(1); |
|
} |
|
|
|
static void update_benchmark(const char *fmt, ...) |
|
{ |
|
if (do_benchmark_all) { |
|
BenchmarkTimeStamps t = get_benchmark_time_stamps(); |
|
va_list va; |
|
char buf[1024]; |
|
|
|
if (fmt) { |
|
va_start(va, fmt); |
|
vsnprintf(buf, sizeof(buf), fmt, va); |
|
va_end(va); |
|
av_log(NULL, AV_LOG_INFO, |
|
"bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 " real %s \n", |
|
t.user_usec - current_time.user_usec, |
|
t.sys_usec - current_time.sys_usec, |
|
t.real_usec - current_time.real_usec, buf); |
|
} |
|
current_time = t; |
|
} |
|
} |
|
|
|
static void close_output_stream(OutputStream *ost) |
|
{ |
|
OutputFile *of = output_files[ost->file_index]; |
|
ost->finished |= ENCODER_FINISHED; |
|
|
|
if (ost->sq_idx_encode >= 0) |
|
sq_send(of->sq_encode, ost->sq_idx_encode, SQFRAME(NULL)); |
|
} |
|
|
|
/* |
|
* Send a single packet to the output, applying any bitstream filters |
|
* associated with the output stream. This may result in any number |
|
* of packets actually being written, depending on what bitstream |
|
* filters are applied. The supplied packet is consumed and will be |
|
* blank (as if newly-allocated) when this function returns. |
|
* |
|
* If eof is set, instead indicate EOF to all bitstream filters and |
|
* therefore flush any delayed packets to the output. A blank packet |
|
* must be supplied in this case. |
|
*/ |
|
static void output_packet(OutputFile *of, AVPacket *pkt, |
|
OutputStream *ost, int eof) |
|
{ |
|
const char *err_msg; |
|
int ret = 0; |
|
|
|
if (!eof && pkt->dts != AV_NOPTS_VALUE) |
|
ost->last_mux_dts = av_rescale_q(pkt->dts, ost->mux_timebase, AV_TIME_BASE_Q); |
|
|
|
/* apply the output bitstream filters */ |
|
if (ost->bsf_ctx) { |
|
int bsf_eof = 0; |
|
|
|
ret = av_bsf_send_packet(ost->bsf_ctx, eof ? NULL : pkt); |
|
if (ret < 0) { |
|
err_msg = "submitting a packet for bitstream filtering"; |
|
goto fail; |
|
} |
|
|
|
while (!bsf_eof) { |
|
ret = av_bsf_receive_packet(ost->bsf_ctx, pkt); |
|
if (ret == AVERROR(EAGAIN)) |
|
return; |
|
else if (ret == AVERROR_EOF) |
|
bsf_eof = 1; |
|
else if (ret < 0) { |
|
err_msg = "applying bitstream filters to a packet"; |
|
goto fail; |
|
} |
|
|
|
ret = of_submit_packet(of, bsf_eof ? NULL : pkt, ost); |
|
if (ret < 0) |
|
goto mux_fail; |
|
} |
|
} else { |
|
ret = of_submit_packet(of, eof ? NULL : pkt, ost); |
|
if (ret < 0) |
|
goto mux_fail; |
|
} |
|
|
|
return; |
|
|
|
mux_fail: |
|
err_msg = "submitting a packet to the muxer"; |
|
|
|
fail: |
|
av_log(NULL, AV_LOG_ERROR, "Error %s for output stream #%d:%d.\n", |
|
err_msg, ost->file_index, ost->index); |
|
if (exit_on_error) |
|
exit_program(1); |
|
|
|
} |
|
|
|
static int check_recording_time(OutputStream *ost) |
|
{ |
|
OutputFile *of = output_files[ost->file_index]; |
|
|
|
if (of->recording_time != INT64_MAX && |
|
av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time, |
|
AV_TIME_BASE_Q) >= 0) { |
|
close_output_stream(ost); |
|
return 0; |
|
} |
|
return 1; |
|
} |
|
|
|
static double adjust_frame_pts_to_encoder_tb(OutputFile *of, OutputStream *ost, |
|
AVFrame *frame) |
|
{ |
|
double float_pts = AV_NOPTS_VALUE; // this is identical to frame.pts but with higher precision |
|
int64_t orig_pts = AV_NOPTS_VALUE; |
|
AVCodecContext *enc = ost->enc_ctx; |
|
AVRational filter_tb = (AVRational){ -1, -1 }; |
|
if (!frame || frame->pts == AV_NOPTS_VALUE || |
|
!enc || !ost->filter || !ost->filter->graph->graph) |
|
goto early_exit; |
|
|
|
{ |
|
AVFilterContext *filter = ost->filter->filter; |
|
|
|
int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time; |
|
AVRational tb = enc->time_base; |
|
int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16); |
|
filter_tb = av_buffersink_get_time_base(filter); |
|
orig_pts = frame->pts; |
|
|
|
tb.den <<= extra_bits; |
|
float_pts = |
|
av_rescale_q(frame->pts, filter_tb, tb) - |
|
av_rescale_q(start_time, AV_TIME_BASE_Q, tb); |
|
float_pts /= 1 << extra_bits; |
|
// avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers |
|
float_pts += FFSIGN(float_pts) * 1.0 / (1<<17); |
|
|
|
frame->pts = |
|
av_rescale_q(frame->pts, filter_tb, enc->time_base) - |
|
av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base); |
|
} |
|
|
|
early_exit: |
|
|
|
if (debug_ts) { |
|
av_log(NULL, AV_LOG_INFO, "filter_raw -> pts:%s pts_time:%s time_base:%d/%d\n", |
|
frame ? av_ts2str(orig_pts) : "NULL", |
|
frame ? av_ts2timestr(orig_pts, &filter_tb) : "NULL", |
|
filter_tb.num, filter_tb.den); |
|
|
|
av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n", |
|
frame ? av_ts2str(frame->pts) : "NULL", |
|
(enc && frame) ? av_ts2timestr(frame->pts, &enc->time_base) : "NULL", |
|
float_pts, |
|
enc ? enc->time_base.num : -1, |
|
enc ? enc->time_base.den : -1); |
|
} |
|
|
|
return float_pts; |
|
} |
|
|
|
static int init_output_stream(OutputStream *ost, AVFrame *frame, |
|
char *error, int error_len); |
|
|
|
static int init_output_stream_wrapper(OutputStream *ost, AVFrame *frame, |
|
unsigned int fatal) |
|
{ |
|
int ret = AVERROR_BUG; |
|
char error[1024] = {0}; |
|
|
|
if (ost->initialized) |
|
return 0; |
|
|
|
ret = init_output_stream(ost, frame, error, sizeof(error)); |
|
if (ret < 0) { |
|
av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n", |
|
ost->file_index, ost->index, error); |
|
|
|
if (fatal) |
|
exit_program(1); |
|
} |
|
|
|
return ret; |
|
} |
|
|
|
static double psnr(double d) |
|
{ |
|
return -10.0 * log10(d); |
|
} |
|
|
|
static void update_video_stats(OutputStream *ost, const AVPacket *pkt, int write_vstats) |
|
{ |
|
const uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS, |
|
NULL); |
|
AVCodecContext *enc = ost->enc_ctx; |
|
int64_t frame_number; |
|
double ti1, bitrate, avg_bitrate; |
|
|
|
ost->quality = sd ? AV_RL32(sd) : -1; |
|
ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE; |
|
|
|
for (int i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) { |
|
if (sd && i < sd[5]) |
|
ost->error[i] = AV_RL64(sd + 8 + 8*i); |
|
else |
|
ost->error[i] = -1; |
|
} |
|
|
|
if (!write_vstats) |
|
return; |
|
|
|
/* this is executed just the first time update_video_stats is called */ |
|
if (!vstats_file) { |
|
vstats_file = fopen(vstats_filename, "w"); |
|
if (!vstats_file) { |
|
perror("fopen"); |
|
exit_program(1); |
|
} |
|
} |
|
|
|
frame_number = ost->packets_encoded; |
|
if (vstats_version <= 1) { |
|
fprintf(vstats_file, "frame= %5"PRId64" q= %2.1f ", frame_number, |
|
ost->quality / (float)FF_QP2LAMBDA); |
|
} else { |
|
fprintf(vstats_file, "out= %2d st= %2d frame= %5"PRId64" q= %2.1f ", ost->file_index, ost->index, frame_number, |
|
ost->quality / (float)FF_QP2LAMBDA); |
|
} |
|
|
|
if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR)) |
|
fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0))); |
|
|
|
fprintf(vstats_file,"f_size= %6d ", pkt->size); |
|
/* compute pts value */ |
|
ti1 = pkt->dts * av_q2d(ost->mux_timebase); |
|
if (ti1 < 0.01) |
|
ti1 = 0.01; |
|
|
|
bitrate = (pkt->size * 8) / av_q2d(enc->time_base) / 1000.0; |
|
avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0; |
|
fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ", |
|
(double)ost->data_size / 1024, ti1, bitrate, avg_bitrate); |
|
fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type)); |
|
} |
|
|
|
static int encode_frame(OutputFile *of, OutputStream *ost, AVFrame *frame) |
|
{ |
|
AVCodecContext *enc = ost->enc_ctx; |
|
AVPacket *pkt = ost->pkt; |
|
const char *type_desc = av_get_media_type_string(enc->codec_type); |
|
const char *action = frame ? "encode" : "flush"; |
|
int ret; |
|
|
|
if (frame) { |
|
ost->frames_encoded++; |
|
ost->samples_encoded += frame->nb_samples; |
|
|
|
if (debug_ts) { |
|
av_log(NULL, AV_LOG_INFO, "encoder <- type:%s " |
|
"frame_pts:%s frame_pts_time:%s time_base:%d/%d\n", |
|
type_desc, |
|
av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base), |
|
enc->time_base.num, enc->time_base.den); |
|
} |
|
} |
|
|
|
update_benchmark(NULL); |
|
|
|
ret = avcodec_send_frame(enc, frame); |
|
if (ret < 0 && !(ret == AVERROR_EOF && !frame)) { |
|
av_log(NULL, AV_LOG_ERROR, "Error submitting %s frame to the encoder\n", |
|
type_desc); |
|
return ret; |
|
} |
|
|
|
while (1) { |
|
ret = avcodec_receive_packet(enc, pkt); |
|
update_benchmark("%s_%s %d.%d", action, type_desc, |
|
ost->file_index, ost->index); |
|
|
|
/* if two pass, output log on success and EOF */ |
|
if ((ret >= 0 || ret == AVERROR_EOF) && ost->logfile && enc->stats_out) |
|
fprintf(ost->logfile, "%s", enc->stats_out); |
|
|
|
if (ret == AVERROR(EAGAIN)) { |
|
av_assert0(frame); // should never happen during flushing |
|
return 0; |
|
} else if (ret == AVERROR_EOF) { |
|
output_packet(of, pkt, ost, 1); |
|
return ret; |
|
} else if (ret < 0) { |
|
av_log(NULL, AV_LOG_ERROR, "%s encoding failed\n", type_desc); |
|
return ret; |
|
} |
|
|
|
if (debug_ts) { |
|
av_log(NULL, AV_LOG_INFO, "encoder -> type:%s " |
|
"pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s " |
|
"duration:%s duration_time:%s\n", |
|
type_desc, |
|
av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &enc->time_base), |
|
av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &enc->time_base), |
|
av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, &enc->time_base)); |
|
} |
|
|
|
av_packet_rescale_ts(pkt, enc->time_base, ost->mux_timebase); |
|
|
|
if (debug_ts) { |
|
av_log(NULL, AV_LOG_INFO, "encoder -> type:%s " |
|
"pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s " |
|
"duration:%s duration_time:%s\n", |
|
type_desc, |
|
av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &enc->time_base), |
|
av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &enc->time_base), |
|
av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, &enc->time_base)); |
|
} |
|
|
|
if (enc->codec_type == AVMEDIA_TYPE_VIDEO) |
|
update_video_stats(ost, pkt, !!vstats_filename); |
|
|
|
ost->packets_encoded++; |
|
|
|
output_packet(of, pkt, ost, 0); |
|
} |
|
|
|
av_assert0(0); |
|
} |
|
|
|
static int submit_encode_frame(OutputFile *of, OutputStream *ost, |
|
AVFrame *frame) |
|
{ |
|
int ret; |
|
|
|
if (ost->sq_idx_encode < 0) |
|
return encode_frame(of, ost, frame); |
|
|
|
if (frame) { |
|
ret = av_frame_ref(ost->sq_frame, frame); |
|
if (ret < 0) |
|
return ret; |
|
frame = ost->sq_frame; |
|
} |
|
|
|
ret = sq_send(of->sq_encode, ost->sq_idx_encode, |
|
SQFRAME(frame)); |
|
if (ret < 0) { |
|
if (frame) |
|
av_frame_unref(frame); |
|
if (ret != AVERROR_EOF) |
|
return ret; |
|
} |
|
|
|
while (1) { |
|
AVFrame *enc_frame = ost->sq_frame; |
|
|
|
ret = sq_receive(of->sq_encode, ost->sq_idx_encode, |
|
SQFRAME(enc_frame)); |
|
if (ret == AVERROR_EOF) { |
|
enc_frame = NULL; |
|
} else if (ret < 0) { |
|
return (ret == AVERROR(EAGAIN)) ? 0 : ret; |
|
} |
|
|
|
ret = encode_frame(of, ost, enc_frame); |
|
if (enc_frame) |
|
av_frame_unref(enc_frame); |
|
if (ret < 0) { |
|
if (ret == AVERROR_EOF) |
|
close_output_stream(ost); |
|
return ret; |
|
} |
|
} |
|
} |
|
|
|
static void do_audio_out(OutputFile *of, OutputStream *ost, |
|
AVFrame *frame) |
|
{ |
|
int ret; |
|
|
|
adjust_frame_pts_to_encoder_tb(of, ost, frame); |
|
|
|
if (!check_recording_time(ost)) |
|
return; |
|
|
|
if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0) |
|
frame->pts = ost->sync_opts; |
|
ost->sync_opts = frame->pts + frame->nb_samples; |
|
|
|
ret = submit_encode_frame(of, ost, frame); |
|
if (ret < 0 && ret != AVERROR_EOF) |
|
exit_program(1); |
|
} |
|
|
|
static void do_subtitle_out(OutputFile *of, |
|
OutputStream *ost, |
|
AVSubtitle *sub) |
|
{ |
|
int subtitle_out_max_size = 1024 * 1024; |
|
int subtitle_out_size, nb, i, ret; |
|
AVCodecContext *enc; |
|
AVPacket *pkt = ost->pkt; |
|
int64_t pts; |
|
|
|
if (sub->pts == AV_NOPTS_VALUE) { |
|
av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n"); |
|
if (exit_on_error) |
|
exit_program(1); |
|
return; |
|
} |
|
|
|
enc = ost->enc_ctx; |
|
|
|
/* Note: DVB subtitle need one packet to draw them and one other |
|
packet to clear them */ |
|
/* XXX: signal it in the codec context ? */ |
|
if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) |
|
nb = 2; |
|
else |
|
nb = 1; |
|
|
|
/* shift timestamp to honor -ss and make check_recording_time() work with -t */ |
|
pts = sub->pts; |
|
if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE) |
|
pts -= output_files[ost->file_index]->start_time; |
|
for (i = 0; i < nb; i++) { |
|
unsigned save_num_rects = sub->num_rects; |
|
|
|
ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base); |
|
if (!check_recording_time(ost)) |
|
return; |
|
|
|
ret = av_new_packet(pkt, subtitle_out_max_size); |
|
if (ret < 0) { |
|
av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle encode buffer\n"); |
|
exit_program(1); |
|
} |
|
|
|
sub->pts = pts; |
|
// start_display_time is required to be 0 |
|
sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q); |
|
sub->end_display_time -= sub->start_display_time; |
|
sub->start_display_time = 0; |
|
if (i == 1) |
|
sub->num_rects = 0; |
|
|
|
ost->frames_encoded++; |
|
|
|
subtitle_out_size = avcodec_encode_subtitle(enc, pkt->data, pkt->size, sub); |
|
if (i == 1) |
|
sub->num_rects = save_num_rects; |
|
if (subtitle_out_size < 0) { |
|
av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n"); |
|
exit_program(1); |
|
} |
|
|
|
av_shrink_packet(pkt, subtitle_out_size); |
|
pkt->pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase); |
|
pkt->duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase); |
|
if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) { |
|
/* XXX: the pts correction is handled here. Maybe handling |
|
it in the codec would be better */ |
|
if (i == 0) |
|
pkt->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase); |
|
else |
|
pkt->pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase); |
|
} |
|
pkt->dts = pkt->pts; |
|
output_packet(of, pkt, ost, 0); |
|
} |
|
} |
|
|
|
/* May modify/reset next_picture */ |
|
static void do_video_out(OutputFile *of, |
|
OutputStream *ost, |
|
AVFrame *next_picture) |
|
{ |
|
int ret; |
|
AVCodecContext *enc = ost->enc_ctx; |
|
AVRational frame_rate; |
|
int64_t nb_frames, nb0_frames, i; |
|
double delta, delta0; |
|
double duration = 0; |
|
double sync_ipts = AV_NOPTS_VALUE; |
|
InputStream *ist = NULL; |
|
AVFilterContext *filter = ost->filter->filter; |
|
|
|
init_output_stream_wrapper(ost, next_picture, 1); |
|
sync_ipts = adjust_frame_pts_to_encoder_tb(of, ost, next_picture); |
|
|
|
if (ost->source_index >= 0) |
|
ist = input_streams[ost->source_index]; |
|
|
|
frame_rate = av_buffersink_get_frame_rate(filter); |
|
if (frame_rate.num > 0 && frame_rate.den > 0) |
|
duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base)); |
|
|
|
if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num) |
|
duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base))); |
|
|
|
if (!ost->filters_script && |
|
!ost->filters && |
|
(nb_filtergraphs == 0 || !filtergraphs[0]->graph_desc) && |
|
next_picture && |
|
ist && |
|
lrintf(next_picture->duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) { |
|
duration = lrintf(next_picture->duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)); |
|
} |
|
|
|
if (!next_picture) { |
|
//end, flushing |
|
nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0], |
|
ost->last_nb0_frames[1], |
|
ost->last_nb0_frames[2]); |
|
} else { |
|
delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output. |
|
delta = delta0 + duration; |
|
|
|
/* by default, we output a single frame */ |
|
nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR) |
|
nb_frames = 1; |
|
|
|
if (delta0 < 0 && |
|
delta > 0 && |
|
ost->vsync_method != VSYNC_PASSTHROUGH && |
|
ost->vsync_method != VSYNC_DROP) { |
|
if (delta0 < -0.6) { |
|
av_log(NULL, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0); |
|
} else |
|
av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0); |
|
sync_ipts = ost->sync_opts; |
|
duration += delta0; |
|
delta0 = 0; |
|
} |
|
|
|
switch (ost->vsync_method) { |
|
case VSYNC_VSCFR: |
|
if (ost->vsync_frame_number == 0 && delta0 >= 0.5) { |
|
av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0)); |
|
delta = duration; |
|
delta0 = 0; |
|
ost->sync_opts = llrint(sync_ipts); |
|
} |
|
case VSYNC_CFR: |
|
// FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c |
|
if (frame_drop_threshold && delta < frame_drop_threshold && ost->vsync_frame_number) { |
|
nb_frames = 0; |
|
} else if (delta < -1.1) |
|
nb_frames = 0; |
|
else if (delta > 1.1) { |
|
nb_frames = llrintf(delta); |
|
if (delta0 > 1.1) |
|
nb0_frames = llrintf(delta0 - 0.6); |
|
} |
|
next_picture->duration = 1; |
|
break; |
|
case VSYNC_VFR: |
|
if (delta <= -0.6) |
|
nb_frames = 0; |
|
else if (delta > 0.6) |
|
ost->sync_opts = llrint(sync_ipts); |
|
next_picture->duration = duration; |
|
break; |
|
case VSYNC_DROP: |
|
case VSYNC_PASSTHROUGH: |
|
next_picture->duration = duration; |
|
ost->sync_opts = llrint(sync_ipts); |
|
break; |
|
default: |
|
av_assert0(0); |
|
} |
|
} |
|
|
|
/* |
|
* For video, number of frames in == number of packets out. |
|
* But there may be reordering, so we can't throw away frames on encoder |
|
* flush, we need to limit them here, before they go into encoder. |
|
*/ |
|
nb_frames = FFMIN(nb_frames, ost->max_frames - ost->vsync_frame_number); |
|
nb0_frames = FFMIN(nb0_frames, nb_frames); |
|
|
|
memmove(ost->last_nb0_frames + 1, |
|
ost->last_nb0_frames, |
|
sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1)); |
|
ost->last_nb0_frames[0] = nb0_frames; |
|
|
|
if (nb0_frames == 0 && ost->last_dropped) { |
|
nb_frames_drop++; |
|
av_log(NULL, AV_LOG_VERBOSE, |
|
"*** dropping frame %"PRId64" from stream %d at ts %"PRId64"\n", |
|
ost->vsync_frame_number, ost->st->index, ost->last_frame->pts); |
|
} |
|
if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) { |
|
if (nb_frames > dts_error_threshold * 30) { |
|
av_log(NULL, AV_LOG_ERROR, "%"PRId64" frame duplication too large, skipping\n", nb_frames - 1); |
|
nb_frames_drop++; |
|
return; |
|
} |
|
nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames); |
|
av_log(NULL, AV_LOG_VERBOSE, "*** %"PRId64" dup!\n", nb_frames - 1); |
|
if (nb_frames_dup > dup_warning) { |
|
av_log(NULL, AV_LOG_WARNING, "More than %"PRIu64" frames duplicated\n", dup_warning); |
|
dup_warning *= 10; |
|
} |
|
} |
|
ost->last_dropped = nb_frames == nb0_frames && next_picture; |
|
ost->dropped_keyframe = ost->last_dropped && next_picture && next_picture->key_frame; |
|
|
|
/* duplicates frame if needed */ |
|
for (i = 0; i < nb_frames; i++) { |
|
AVFrame *in_picture; |
|
int forced_keyframe = 0; |
|
double pts_time; |
|
|
|
if (i < nb0_frames && ost->last_frame->buf[0]) { |
|
in_picture = ost->last_frame; |
|
} else |
|
in_picture = next_picture; |
|
|
|
if (!in_picture) |
|
return; |
|
|
|
in_picture->pts = ost->sync_opts; |
|
|
|
if (!check_recording_time(ost)) |
|
return; |
|
|
|
in_picture->quality = enc->global_quality; |
|
in_picture->pict_type = 0; |
|
|
|
if (ost->forced_kf_ref_pts == AV_NOPTS_VALUE && |
|
in_picture->pts != AV_NOPTS_VALUE) |
|
ost->forced_kf_ref_pts = in_picture->pts; |
|
|
|
pts_time = in_picture->pts != AV_NOPTS_VALUE ? |
|
(in_picture->pts - ost->forced_kf_ref_pts) * av_q2d(enc->time_base) : NAN; |
|
if (ost->forced_kf_index < ost->forced_kf_count && |
|
in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) { |
|
ost->forced_kf_index++; |
|
forced_keyframe = 1; |
|
} else if (ost->forced_keyframes_pexpr) { |
|
double res; |
|
ost->forced_keyframes_expr_const_values[FKF_T] = pts_time; |
|
res = av_expr_eval(ost->forced_keyframes_pexpr, |
|
ost->forced_keyframes_expr_const_values, NULL); |
|
ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n", |
|
ost->forced_keyframes_expr_const_values[FKF_N], |
|
ost->forced_keyframes_expr_const_values[FKF_N_FORCED], |
|
ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N], |
|
ost->forced_keyframes_expr_const_values[FKF_T], |
|
ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T], |
|
res); |
|
if (res) { |
|
forced_keyframe = 1; |
|
ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = |
|
ost->forced_keyframes_expr_const_values[FKF_N]; |
|
ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = |
|
ost->forced_keyframes_expr_const_values[FKF_T]; |
|
ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1; |
|
} |
|
|
|
ost->forced_keyframes_expr_const_values[FKF_N] += 1; |
|
} else if ( ost->forced_keyframes |
|
&& !strncmp(ost->forced_keyframes, "source", 6) |
|
&& in_picture->key_frame==1 |
|
&& !i) { |
|
forced_keyframe = 1; |
|
} else if ( ost->forced_keyframes |
|
&& !strncmp(ost->forced_keyframes, "source_no_drop", 14) |
|
&& !i) { |
|
forced_keyframe = (in_picture->key_frame == 1) || ost->dropped_keyframe; |
|
ost->dropped_keyframe = 0; |
|
} |
|
|
|
if (forced_keyframe) { |
|
in_picture->pict_type = AV_PICTURE_TYPE_I; |
|
av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time); |
|
} |
|
|
|
ret = submit_encode_frame(of, ost, in_picture); |
|
if (ret < 0 && ret != AVERROR_EOF) |
|
exit_program(1); |
|
|
|
ost->sync_opts++; |
|
ost->vsync_frame_number++; |
|
} |
|
|
|
av_frame_unref(ost->last_frame); |
|
if (next_picture) |
|
av_frame_move_ref(ost->last_frame, next_picture); |
|
} |
|
|
|
/** |
|
* Get and encode new output from any of the filtergraphs, without causing |
|
* activity. |
|
* |
|
* @return 0 for success, <0 for severe errors |
|
*/ |
|
static int reap_filters(int flush) |
|
{ |
|
AVFrame *filtered_frame = NULL; |
|
int i; |
|
|
|
/* Reap all buffers present in the buffer sinks */ |
|
for (i = 0; i < nb_output_streams; i++) { |
|
OutputStream *ost = output_streams[i]; |
|
OutputFile *of = output_files[ost->file_index]; |
|
AVFilterContext *filter; |
|
AVCodecContext *enc = ost->enc_ctx; |
|
int ret = 0; |
|
|
|
if (!ost->filter || !ost->filter->graph->graph) |
|
continue; |
|
filter = ost->filter->filter; |
|
|
|
/* |
|
* Unlike video, with audio the audio frame size matters. |
|
* Currently we are fully reliant on the lavfi filter chain to |
|
* do the buffering deed for us, and thus the frame size parameter |
|
* needs to be set accordingly. Where does one get the required |
|
* frame size? From the initialized AVCodecContext of an audio |
|
* encoder. Thus, if we have gotten to an audio stream, initialize |
|
* the encoder earlier than receiving the first AVFrame. |
|
*/ |
|
if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_AUDIO) |
|
init_output_stream_wrapper(ost, NULL, 1); |
|
|
|
filtered_frame = ost->filtered_frame; |
|
|
|
while (1) { |
|
ret = av_buffersink_get_frame_flags(filter, filtered_frame, |
|
AV_BUFFERSINK_FLAG_NO_REQUEST); |
|
if (ret < 0) { |
|
if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) { |
|
av_log(NULL, AV_LOG_WARNING, |
|
"Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret)); |
|
} else if (flush && ret == AVERROR_EOF) { |
|
if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_VIDEO) |
|
do_video_out(of, ost, NULL); |
|
} |
|
break; |
|
} |
|
if (ost->finished) { |
|
av_frame_unref(filtered_frame); |
|
continue; |
|
} |
|
|
|
if (filtered_frame->pts != AV_NOPTS_VALUE) { |
|
AVRational tb = av_buffersink_get_time_base(filter); |
|
ost->last_filter_pts = av_rescale_q(filtered_frame->pts, tb, |
|
AV_TIME_BASE_Q); |
|
} |
|
|
|
switch (av_buffersink_get_type(filter)) { |
|
case AVMEDIA_TYPE_VIDEO: |
|
if (!ost->frame_aspect_ratio.num) |
|
enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio; |
|
|
|
do_video_out(of, ost, filtered_frame); |
|
break; |
|
case AVMEDIA_TYPE_AUDIO: |
|
if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) && |
|
enc->ch_layout.nb_channels != filtered_frame->ch_layout.nb_channels) { |
|
av_log(NULL, AV_LOG_ERROR, |
|
"Audio filter graph output is not normalized and encoder does not support parameter changes\n"); |
|
break; |
|
} |
|
do_audio_out(of, ost, filtered_frame); |
|
break; |
|
default: |
|
// TODO support subtitle filters |
|
av_assert0(0); |
|
} |
|
|
|
av_frame_unref(filtered_frame); |
|
} |
|
} |
|
|
|
return 0; |
|
} |
|
|
|
static void print_final_stats(int64_t total_size) |
|
{ |
|
uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0; |
|
uint64_t subtitle_size = 0; |
|
uint64_t data_size = 0; |
|
float percent = -1.0; |
|
int i, j; |
|
int pass1_used = 1; |
|
|
|
for (i = 0; i < nb_output_streams; i++) { |
|
OutputStream *ost = output_streams[i]; |
|
AVCodecParameters *par = ost->st->codecpar; |
|
switch (par->codec_type) { |
|
case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break; |
|
case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break; |
|
case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break; |
|
default: other_size += ost->data_size; break; |
|
} |
|
extra_size += par->extradata_size; |
|
data_size += ost->data_size; |
|
if (ost->enc_ctx && |
|
(ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2)) |
|
!= AV_CODEC_FLAG_PASS1) |
|
pass1_used = 0; |
|
} |
|
|
|
if (data_size && total_size>0 && total_size >= data_size) |
|
percent = 100.0 * (total_size - data_size) / data_size; |
|
|
|
av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ", |
|
video_size / 1024.0, |
|
audio_size / 1024.0, |
|
subtitle_size / 1024.0, |
|
other_size / 1024.0, |
|
extra_size / 1024.0); |
|
if (percent >= 0.0) |
|
av_log(NULL, AV_LOG_INFO, "%f%%", percent); |
|
else |
|
av_log(NULL, AV_LOG_INFO, "unknown"); |
|
av_log(NULL, AV_LOG_INFO, "\n"); |
|
|
|
/* print verbose per-stream stats */ |
|
for (i = 0; i < nb_input_files; i++) { |
|
InputFile *f = input_files[i]; |
|
uint64_t total_packets = 0, total_size = 0; |
|
|
|
av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n", |
|
i, f->ctx->url); |
|
|
|
for (j = 0; j < f->nb_streams; j++) { |
|
InputStream *ist = input_streams[f->ist_index + j]; |
|
enum AVMediaType type = ist->st->codecpar->codec_type; |
|
|
|
total_size += ist->data_size; |
|
total_packets += ist->nb_packets; |
|
|
|
av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ", |
|
i, j, av_get_media_type_string(type)); |
|
av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ", |
|
ist->nb_packets, ist->data_size); |
|
|
|
if (ist->decoding_needed) { |
|
av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded", |
|
ist->frames_decoded); |
|
if (type == AVMEDIA_TYPE_AUDIO) |
|
av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded); |
|
av_log(NULL, AV_LOG_VERBOSE, "; "); |
|
} |
|
|
|
av_log(NULL, AV_LOG_VERBOSE, "\n"); |
|
} |
|
|
|
av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n", |
|
total_packets, total_size); |
|
} |
|
|
|
for (i = 0; i < nb_output_files; i++) { |
|
OutputFile *of = output_files[i]; |
|
uint64_t total_packets = 0, total_size = 0; |
|
|
|
av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n", |
|
i, of->url); |
|
|
|
for (j = 0; j < of->nb_streams; j++) { |
|
OutputStream *ost = output_streams[of->ost_index + j]; |
|
enum AVMediaType type = ost->st->codecpar->codec_type; |
|
|
|
total_size += ost->data_size; |
|
total_packets += atomic_load(&ost->packets_written); |
|
|
|
av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ", |
|
i, j, av_get_media_type_string(type)); |
|
if (ost->enc_ctx) { |
|
av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded", |
|
ost->frames_encoded); |
|
if (type == AVMEDIA_TYPE_AUDIO) |
|
av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded); |
|
av_log(NULL, AV_LOG_VERBOSE, "; "); |
|
} |
|
|
|
av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ", |
|
atomic_load(&ost->packets_written), ost->data_size); |
|
|
|
av_log(NULL, AV_LOG_VERBOSE, "\n"); |
|
} |
|
|
|
av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n", |
|
total_packets, total_size); |
|
} |
|
if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){ |
|
av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded "); |
|
if (pass1_used) { |
|
av_log(NULL, AV_LOG_WARNING, "\n"); |
|
} else { |
|
av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n"); |
|
} |
|
} |
|
} |
|
|
|
static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time) |
|
{ |
|
AVBPrint buf, buf_script; |
|
int64_t total_size = of_filesize(output_files[0]); |
|
int vid, i; |
|
double bitrate; |
|
double speed; |
|
int64_t pts = INT64_MIN + 1; |
|
static int64_t last_time = -1; |
|
static int first_report = 1; |
|
static int qp_histogram[52]; |
|
int hours, mins, secs, us; |
|
const char *hours_sign; |
|
int ret; |
|
float t; |
|
|
|
if (!print_stats && !is_last_report && !progress_avio) |
|
return; |
|
|
|
if (!is_last_report) { |
|
if (last_time == -1) { |
|
last_time = cur_time; |
|
} |
|
if (((cur_time - last_time) < stats_period && !first_report) || |
|
(first_report && nb_output_dumped < nb_output_files)) |
|
return; |
|
last_time = cur_time; |
|
} |
|
|
|
t = (cur_time-timer_start) / 1000000.0; |
|
|
|
vid = 0; |
|
av_bprint_init(&buf, 0, AV_BPRINT_SIZE_AUTOMATIC); |
|
av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC); |
|
for (i = 0; i < nb_output_streams; i++) { |
|
OutputStream * const ost = output_streams[i]; |
|
const AVCodecContext * const enc = ost->enc_ctx; |
|
const float q = enc ? ost->quality / (float) FF_QP2LAMBDA : -1; |
|
|
|
if (vid && ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) { |
|
av_bprintf(&buf, "q=%2.1f ", q); |
|
av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n", |
|
ost->file_index, ost->index, q); |
|
} |
|
if (!vid && ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) { |
|
float fps; |
|
uint64_t frame_number = atomic_load(&ost->packets_written); |
|
|
|
fps = t > 1 ? frame_number / t : 0; |
|
av_bprintf(&buf, "frame=%5"PRId64" fps=%3.*f q=%3.1f ", |
|
frame_number, fps < 9.95, fps, q); |
|
av_bprintf(&buf_script, "frame=%"PRId64"\n", frame_number); |
|
av_bprintf(&buf_script, "fps=%.2f\n", fps); |
|
av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n", |
|
ost->file_index, ost->index, q); |
|
if (is_last_report) |
|
av_bprintf(&buf, "L"); |
|
if (qp_hist) { |
|
int j; |
|
int qp = lrintf(q); |
|
if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram)) |
|
qp_histogram[qp]++; |
|
for (j = 0; j < 32; j++) |
|
av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1)); |
|
} |
|
|
|
if (enc && (enc->flags & AV_CODEC_FLAG_PSNR) && |
|
(ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) { |
|
int j; |
|
double error, error_sum = 0; |
|
double scale, scale_sum = 0; |
|
double p; |
|
char type[3] = { 'Y','U','V' }; |
|
av_bprintf(&buf, "PSNR="); |
|
for (j = 0; j < 3; j++) { |
|
if (is_last_report) { |
|
error = enc->error[j]; |
|
scale = enc->width * enc->height * 255.0 * 255.0 * frame_number; |
|
} else { |
|
error = ost->error[j]; |
|
scale = enc->width * enc->height * 255.0 * 255.0; |
|
} |
|
if (j) |
|
scale /= 4; |
|
error_sum += error; |
|
scale_sum += scale; |
|
p = psnr(error / scale); |
|
av_bprintf(&buf, "%c:%2.2f ", type[j], p); |
|
av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n", |
|
ost->file_index, ost->index, type[j] | 32, p); |
|
} |
|
p = psnr(error_sum / scale_sum); |
|
av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum)); |
|
av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n", |
|
ost->file_index, ost->index, p); |
|
} |
|
vid = 1; |
|
} |
|
/* compute min output value */ |
|
if (ost->last_mux_dts != AV_NOPTS_VALUE) { |
|
pts = FFMAX(pts, ost->last_mux_dts); |
|
if (copy_ts) { |
|
if (copy_ts_first_pts == AV_NOPTS_VALUE && pts > 1) |
|
copy_ts_first_pts = pts; |
|
if (copy_ts_first_pts != AV_NOPTS_VALUE) |
|
pts -= copy_ts_first_pts; |
|
} |
|
} |
|
|
|
if (is_last_report) |
|
nb_frames_drop += ost->last_dropped; |
|
} |
|
|
|
secs = FFABS(pts) / AV_TIME_BASE; |
|
us = FFABS(pts) % AV_TIME_BASE; |
|
mins = secs / 60; |
|
secs %= 60; |
|
hours = mins / 60; |
|
mins %= 60; |
|
hours_sign = (pts < 0) ? "-" : ""; |
|
|
|
bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1; |
|
speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1; |
|
|
|
if (total_size < 0) av_bprintf(&buf, "size=N/A time="); |
|
else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0); |
|
if (pts == AV_NOPTS_VALUE) { |
|
av_bprintf(&buf, "N/A "); |
|
} else { |
|
av_bprintf(&buf, "%s%02d:%02d:%02d.%02d ", |
|
hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE); |
|
} |
|
|
|
if (bitrate < 0) { |
|
av_bprintf(&buf, "bitrate=N/A"); |
|
av_bprintf(&buf_script, "bitrate=N/A\n"); |
|
}else{ |
|
av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate); |
|
av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate); |
|
} |
|
|
|
if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n"); |
|
else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size); |
|
if (pts == AV_NOPTS_VALUE) { |
|
av_bprintf(&buf_script, "out_time_us=N/A\n"); |
|
av_bprintf(&buf_script, "out_time_ms=N/A\n"); |
|
av_bprintf(&buf_script, "out_time=N/A\n"); |
|
} else { |
|
av_bprintf(&buf_script, "out_time_us=%"PRId64"\n", pts); |
|
av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts); |
|
av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n", |
|
hours_sign, hours, mins, secs, us); |
|
} |
|
|
|
if (nb_frames_dup || nb_frames_drop) |
|
av_bprintf(&buf, " dup=%"PRId64" drop=%"PRId64, nb_frames_dup, nb_frames_drop); |
|
av_bprintf(&buf_script, "dup_frames=%"PRId64"\n", nb_frames_dup); |
|
av_bprintf(&buf_script, "drop_frames=%"PRId64"\n", nb_frames_drop); |
|
|
|
if (speed < 0) { |
|
av_bprintf(&buf, " speed=N/A"); |
|
av_bprintf(&buf_script, "speed=N/A\n"); |
|
} else { |
|
av_bprintf(&buf, " speed=%4.3gx", speed); |
|
av_bprintf(&buf_script, "speed=%4.3gx\n", speed); |
|
} |
|
|
|
if (print_stats || is_last_report) { |
|
const char end = is_last_report ? '\n' : '\r'; |
|
if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) { |
|
fprintf(stderr, "%s %c", buf.str, end); |
|
} else |
|
av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end); |
|
|
|
fflush(stderr); |
|
} |
|
av_bprint_finalize(&buf, NULL); |
|
|
|
if (progress_avio) { |
|
av_bprintf(&buf_script, "progress=%s\n", |
|
is_last_report ? "end" : "continue"); |
|
avio_write(progress_avio, buf_script.str, |
|
FFMIN(buf_script.len, buf_script.size - 1)); |
|
avio_flush(progress_avio); |
|
av_bprint_finalize(&buf_script, NULL); |
|
if (is_last_report) { |
|
if ((ret = avio_closep(&progress_avio)) < 0) |
|
av_log(NULL, AV_LOG_ERROR, |
|
"Error closing progress log, loss of information possible: %s\n", av_err2str(ret)); |
|
} |
|
} |
|
|
|
first_report = 0; |
|
|
|
if (is_last_report) |
|
print_final_stats(total_size); |
|
} |
|
|
|
static int ifilter_parameters_from_codecpar(InputFilter *ifilter, AVCodecParameters *par) |
|
{ |
|
int ret; |
|
|
|
// We never got any input. Set a fake format, which will |
|
// come from libavformat. |
|
ifilter->format = par->format; |
|
ifilter->sample_rate = par->sample_rate; |
|
ifilter->width = par->width; |
|
ifilter->height = par->height; |
|
ifilter->sample_aspect_ratio = par->sample_aspect_ratio; |
|
ret = av_channel_layout_copy(&ifilter->ch_layout, &par->ch_layout); |
|
if (ret < 0) |
|
return ret; |
|
|
|
return 0; |
|
} |
|
|
|
static void flush_encoders(void) |
|
{ |
|
int i, ret; |
|
|
|
for (i = 0; i < nb_output_streams; i++) { |
|
OutputStream *ost = output_streams[i]; |
|
OutputFile *of = output_files[ost->file_index]; |
|
if (ost->sq_idx_encode >= 0) |
|
sq_send(of->sq_encode, ost->sq_idx_encode, SQFRAME(NULL)); |
|
} |
|
|
|
for (i = 0; i < nb_output_streams; i++) { |
|
OutputStream *ost = output_streams[i]; |
|
AVCodecContext *enc = ost->enc_ctx; |
|
OutputFile *of = output_files[ost->file_index]; |
|
|
|
if (!enc) |
|
continue; |
|
|
|
// Try to enable encoding with no input frames. |
|
// Maybe we should just let encoding fail instead. |
|
if (!ost->initialized) { |
|
FilterGraph *fg = ost->filter->graph; |
|
|
|
av_log(NULL, AV_LOG_WARNING, |
|
"Finishing stream %d:%d without any data written to it.\n", |
|
ost->file_index, ost->st->index); |
|
|
|
if (ost->filter && !fg->graph) { |
|
int x; |
|
for (x = 0; x < fg->nb_inputs; x++) { |
|
InputFilter *ifilter = fg->inputs[x]; |
|
if (ifilter->format < 0 && |
|
ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar) < 0) { |
|
av_log(NULL, AV_LOG_ERROR, "Error copying paramerets from input stream\n"); |
|
exit_program(1); |
|
} |
|
} |
|
|
|
if (!ifilter_has_all_input_formats(fg)) |
|
continue; |
|
|
|
ret = configure_filtergraph(fg); |
|
if (ret < 0) { |
|
av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n"); |
|
exit_program(1); |
|
} |
|
|
|
output_packet(of, ost->pkt, ost, 1); |
|
} |
|
|
|
init_output_stream_wrapper(ost, NULL, 1); |
|
} |
|
|
|
if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO) |
|
continue; |
|
|
|
ret = submit_encode_frame(of, ost, NULL); |
|
if (ret != AVERROR_EOF) |
|
exit_program(1); |
|
} |
|
} |
|
|
|
/* |
|
* Check whether a packet from ist should be written into ost at this time |
|
*/ |
|
static int check_output_constraints(InputStream *ist, OutputStream *ost) |
|
{ |
|
OutputFile *of = output_files[ost->file_index]; |
|
int ist_index = input_files[ist->file_index]->ist_index + ist->st->index; |
|
|
|
if (ost->source_index != ist_index) |
|
return 0; |
|
|
|
if (ost->finished & MUXER_FINISHED) |
|
return 0; |
|
|
|
if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time) |
|
return 0; |
|
|
|
return 1; |
|
} |
|
|
|
static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt) |
|
{ |
|
OutputFile *of = output_files[ost->file_index]; |
|
InputFile *f = input_files [ist->file_index]; |
|
int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time; |
|
int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase); |
|
AVPacket *opkt = ost->pkt; |
|
|
|
av_packet_unref(opkt); |
|
// EOF: flush output bitstream filters. |
|
if (!pkt) { |
|
output_packet(of, opkt, ost, 1); |
|
return; |
|
} |
|
|
|
if (!ost->streamcopy_started && !(pkt->flags & AV_PKT_FLAG_KEY) && |
|
!ost->copy_initial_nonkeyframes) |
|
return; |
|
|
|
if (!ost->streamcopy_started && !ost->copy_prior_start) { |
|
if (pkt->pts == AV_NOPTS_VALUE ? |
|
ist->pts < ost->ts_copy_start : |
|
pkt->pts < av_rescale_q(ost->ts_copy_start, AV_TIME_BASE_Q, ist->st->time_base)) |
|
return; |
|
} |
|
|
|
if (of->recording_time != INT64_MAX && |
|
ist->pts >= of->recording_time + start_time) { |
|
close_output_stream(ost); |
|
return; |
|
} |
|
|
|
if (f->recording_time != INT64_MAX) { |
|
start_time = 0; |
|
if (copy_ts) { |
|
start_time += f->start_time != AV_NOPTS_VALUE ? f->start_time : 0; |
|
start_time += start_at_zero ? 0 : f->ctx->start_time; |
|
} |
|
if (ist->pts >= f->recording_time + start_time) { |
|
close_output_stream(ost); |
|
return; |
|
} |
|
} |
|
|
|
if (av_packet_ref(opkt, pkt) < 0) |
|
exit_program(1); |
|
|
|
if (pkt->pts != AV_NOPTS_VALUE) |
|
opkt->pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time; |
|
|
|
if (pkt->dts == AV_NOPTS_VALUE) { |
|
opkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase); |
|
} else if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) { |
|
int duration = av_get_audio_frame_duration2(ist->st->codecpar, pkt->size); |
|
if(!duration) |
|
duration = ist->st->codecpar->frame_size; |
|
opkt->dts = av_rescale_delta(ist->st->time_base, pkt->dts, |
|
(AVRational){1, ist->st->codecpar->sample_rate}, duration, |
|
&ist->filter_in_rescale_delta_last, ost->mux_timebase); |
|
/* dts will be set immediately afterwards to what pts is now */ |
|
opkt->pts = opkt->dts - ost_tb_start_time; |
|
} else |
|
opkt->dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase); |
|
opkt->dts -= ost_tb_start_time; |
|
|
|
opkt->duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase); |
|
|
|
ost->sync_opts += opkt->duration; |
|
|
|
output_packet(of, opkt, ost, 0); |
|
|
|
ost->streamcopy_started = 1; |
|
} |
|
|
|
static void check_decode_result(InputStream *ist, int *got_output, int ret) |
|
{ |
|
if (*got_output || ret<0) |
|
decode_error_stat[ret<0] ++; |
|
|
|
if (ret < 0 && exit_on_error) |
|
exit_program(1); |
|
|
|
if (*got_output && ist) { |
|
if (ist->decoded_frame->decode_error_flags || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) { |
|
av_log(NULL, exit_on_error ? AV_LOG_FATAL : AV_LOG_WARNING, |
|
"%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index); |
|
if (exit_on_error) |
|
exit_program(1); |
|
} |
|
} |
|
} |
|
|
|
// Filters can be configured only if the formats of all inputs are known. |
|
static int ifilter_has_all_input_formats(FilterGraph *fg) |
|
{ |
|
int i; |
|
for (i = 0; i < fg->nb_inputs; i++) { |
|
if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO || |
|
fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO)) |
|
return 0; |
|
} |
|
return 1; |
|
} |
|
|
|
static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame, int keep_reference) |
|
{ |
|
FilterGraph *fg = ifilter->graph; |
|
AVFrameSideData *sd; |
|
int need_reinit, ret; |
|
int buffersrc_flags = AV_BUFFERSRC_FLAG_PUSH; |
|
|
|
if (keep_reference) |
|
buffersrc_flags |= AV_BUFFERSRC_FLAG_KEEP_REF; |
|
|
|
/* determine if the parameters for this input changed */ |
|
need_reinit = ifilter->format != frame->format; |
|
|
|
switch (ifilter->ist->st->codecpar->codec_type) { |
|
case AVMEDIA_TYPE_AUDIO: |
|
need_reinit |= ifilter->sample_rate != frame->sample_rate || |
|
av_channel_layout_compare(&ifilter->ch_layout, &frame->ch_layout); |
|
break; |
|
case AVMEDIA_TYPE_VIDEO: |
|
need_reinit |= ifilter->width != frame->width || |
|
ifilter->height != frame->height; |
|
break; |
|
} |
|
|
|
if (!ifilter->ist->reinit_filters && fg->graph) |
|
need_reinit = 0; |
|
|
|
if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx || |
|
(ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data)) |
|
need_reinit = 1; |
|
|
|
if (sd = av_frame_get_side_data(frame, AV_FRAME_DATA_DISPLAYMATRIX)) { |
|
if (!ifilter->displaymatrix || memcmp(sd->data, ifilter->displaymatrix, sizeof(int32_t) * 9)) |
|
need_reinit = 1; |
|
} else if (ifilter->displaymatrix) |
|
need_reinit = 1; |
|
|
|
if (need_reinit) { |
|
ret = ifilter_parameters_from_frame(ifilter, frame); |
|
if (ret < 0) |
|
return ret; |
|
} |
|
|
|
/* (re)init the graph if possible, otherwise buffer the frame and return */ |
|
if (need_reinit || !fg->graph) { |
|
if (!ifilter_has_all_input_formats(fg)) { |
|
AVFrame *tmp = av_frame_clone(frame); |
|
if (!tmp) |
|
return AVERROR(ENOMEM); |
|
|
|
ret = av_fifo_write(ifilter->frame_queue, &tmp, 1); |
|
if (ret < 0) |
|
av_frame_free(&tmp); |
|
|
|
return ret; |
|
} |
|
|
|
ret = reap_filters(1); |
|
if (ret < 0 && ret != AVERROR_EOF) { |
|
av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret)); |
|
return ret; |
|
} |
|
|
|
ret = configure_filtergraph(fg); |
|
if (ret < 0) { |
|
av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n"); |
|
return ret; |
|
} |
|
} |
|
|
|
ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, buffersrc_flags); |
|
if (ret < 0) { |
|
if (ret != AVERROR_EOF) |
|
av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret)); |
|
return ret; |
|
} |
|
|
|
return 0; |
|
} |
|
|
|
static int ifilter_send_eof(InputFilter *ifilter, int64_t pts) |
|
{ |
|
int ret; |
|
|
|
ifilter->eof = 1; |
|
|
|
if (ifilter->filter) { |
|
ret = av_buffersrc_close(ifilter->filter, pts, AV_BUFFERSRC_FLAG_PUSH); |
|
if (ret < 0) |
|
return ret; |
|
} else { |
|
// the filtergraph was never configured |
|
if (ifilter->format < 0) { |
|
ret = ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar); |
|
if (ret < 0) |
|
return ret; |
|
} |
|
if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) { |
|
av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index); |
|
return AVERROR_INVALIDDATA; |
|
} |
|
} |
|
|
|
return 0; |
|
} |
|
|
|
// This does not quite work like avcodec_decode_audio4/avcodec_decode_video2. |
|
// There is the following difference: if you got a frame, you must call |
|
// it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0 |
|
// (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet) |
|
static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt) |
|
{ |
|
int ret; |
|
|
|
*got_frame = 0; |
|
|
|
if (pkt) { |
|
ret = avcodec_send_packet(avctx, pkt); |
|
// In particular, we don't expect AVERROR(EAGAIN), because we read all |
|
// decoded frames with avcodec_receive_frame() until done. |
|
if (ret < 0 && ret != AVERROR_EOF) |
|
return ret; |
|
} |
|
|
|
ret = avcodec_receive_frame(avctx, frame); |
|
if (ret < 0 && ret != AVERROR(EAGAIN)) |
|
return ret; |
|
if (ret >= 0) |
|
*got_frame = 1; |
|
|
|
return 0; |
|
} |
|
|
|
static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame) |
|
{ |
|
int i, ret; |
|
|
|
av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */ |
|
for (i = 0; i < ist->nb_filters; i++) { |
|
ret = ifilter_send_frame(ist->filters[i], decoded_frame, i < ist->nb_filters - 1); |
|
if (ret == AVERROR_EOF) |
|
ret = 0; /* ignore */ |
|
if (ret < 0) { |
|
av_log(NULL, AV_LOG_ERROR, |
|
"Failed to inject frame into filter network: %s\n", av_err2str(ret)); |
|
break; |
|
} |
|
} |
|
return ret; |
|
} |
|
|
|
static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output, |
|
int *decode_failed) |
|
{ |
|
AVFrame *decoded_frame = ist->decoded_frame; |
|
AVCodecContext *avctx = ist->dec_ctx; |
|
int ret, err = 0; |
|
AVRational decoded_frame_tb; |
|
|
|
update_benchmark(NULL); |
|
ret = decode(avctx, decoded_frame, got_output, pkt); |
|
update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index); |
|
if (ret < 0) |
|
*decode_failed = 1; |
|
|
|
if (ret >= 0 && avctx->sample_rate <= 0) { |
|
av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate); |
|
ret = AVERROR_INVALIDDATA; |
|
} |
|
|
|
if (ret != AVERROR_EOF) |
|
check_decode_result(ist, got_output, ret); |
|
|
|
if (!*got_output || ret < 0) |
|
return ret; |
|
|
|
ist->samples_decoded += decoded_frame->nb_samples; |
|
ist->frames_decoded++; |
|
|
|
/* increment next_dts to use for the case where the input stream does not |
|
have timestamps or there are multiple frames in the packet */ |
|
ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) / |
|
avctx->sample_rate; |
|
ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) / |
|
avctx->sample_rate; |
|
|
|
if (decoded_frame->pts != AV_NOPTS_VALUE) { |
|
decoded_frame_tb = ist->st->time_base; |
|
} else if (pkt && pkt->pts != AV_NOPTS_VALUE) { |
|
decoded_frame->pts = pkt->pts; |
|
decoded_frame_tb = ist->st->time_base; |
|
}else { |
|
decoded_frame->pts = ist->dts; |
|
decoded_frame_tb = AV_TIME_BASE_Q; |
|
} |
|
if (pkt && pkt->duration && ist->prev_pkt_pts != AV_NOPTS_VALUE && |
|
pkt->pts != AV_NOPTS_VALUE && pkt->pts - ist->prev_pkt_pts > pkt->duration) |
|
ist->filter_in_rescale_delta_last = AV_NOPTS_VALUE; |
|
if (pkt) |
|
ist->prev_pkt_pts = pkt->pts; |
|
if (decoded_frame->pts != AV_NOPTS_VALUE) |
|
decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts, |
|
(AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last, |
|
(AVRational){1, avctx->sample_rate}); |
|
ist->nb_samples = decoded_frame->nb_samples; |
|
err = send_frame_to_filters(ist, decoded_frame); |
|
|
|
av_frame_unref(decoded_frame); |
|
return err < 0 ? err : ret; |
|
} |
|
|
|
static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof, |
|
int *decode_failed) |
|
{ |
|
AVFrame *decoded_frame = ist->decoded_frame; |
|
int i, ret = 0, err = 0; |
|
int64_t best_effort_timestamp; |
|
int64_t dts = AV_NOPTS_VALUE; |
|
|
|
// With fate-indeo3-2, we're getting 0-sized packets before EOF for some |
|
// reason. This seems like a semi-critical bug. Don't trigger EOF, and |
|
// skip the packet. |
|
if (!eof && pkt && pkt->size == 0) |
|
return 0; |
|
|
|
if (ist->dts != AV_NOPTS_VALUE) |
|
dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base); |
|
if (pkt) { |
|
pkt->dts = dts; // ffmpeg.c probably shouldn't do this |
|
} |
|
|
|
// The old code used to set dts on the drain packet, which does not work |
|
// with the new API anymore. |
|
if (eof) { |
|
void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0])); |
|
if (!new) |
|
return AVERROR(ENOMEM); |
|
ist->dts_buffer = new; |
|
ist->dts_buffer[ist->nb_dts_buffer++] = dts; |
|
} |
|
|
|
update_benchmark(NULL); |
|
ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt); |
|
update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index); |
|
if (ret < 0) |
|
*decode_failed = 1; |
|
|
|
// The following line may be required in some cases where there is no parser |
|
// or the parser does not has_b_frames correctly |
|
if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) { |
|
if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) { |
|
ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames; |
|
} else |
|
av_log(ist->dec_ctx, AV_LOG_WARNING, |
|
"video_delay is larger in decoder than demuxer %d > %d.\n" |
|
"If you want to help, upload a sample " |
|
"of this file to https://streams.videolan.org/upload/ " |
|
"and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n", |
|
ist->dec_ctx->has_b_frames, |
|
ist->st->codecpar->video_delay); |
|
} |
|
|
|
if (ret != AVERROR_EOF) |
|
check_decode_result(ist, got_output, ret); |
|
|
|
if (*got_output && ret >= 0) { |
|
if (ist->dec_ctx->width != decoded_frame->width || |
|
ist->dec_ctx->height != decoded_frame->height || |
|
ist->dec_ctx->pix_fmt != decoded_frame->format) { |
|
av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n", |
|
decoded_frame->width, |
|
decoded_frame->height, |
|
decoded_frame->format, |
|
ist->dec_ctx->width, |
|
ist->dec_ctx->height, |
|
ist->dec_ctx->pix_fmt); |
|
} |
|
} |
|
|
|
if (!*got_output || ret < 0) |
|
return ret; |
|
|
|
if(ist->top_field_first>=0) |
|
decoded_frame->top_field_first = ist->top_field_first; |
|
|
|
ist->frames_decoded++; |
|
|
|
if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) { |
|
err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame); |
|
if (err < 0) |
|
goto fail; |
|
} |
|
|
|
best_effort_timestamp= decoded_frame->best_effort_timestamp; |
|
*duration_pts = decoded_frame->duration; |
|
|
|
if (ist->framerate.num) |
|
best_effort_timestamp = ist->cfr_next_pts++; |
|
|
|
if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) { |
|
best_effort_timestamp = ist->dts_buffer[0]; |
|
|
|
for (i = 0; i < ist->nb_dts_buffer - 1; i++) |
|
ist->dts_buffer[i] = ist->dts_buffer[i + 1]; |
|
ist->nb_dts_buffer--; |
|
} |
|
|
|
if(best_effort_timestamp != AV_NOPTS_VALUE) { |
|
int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q); |
|
|
|
if (ts != AV_NOPTS_VALUE) |
|
ist->next_pts = ist->pts = ts; |
|
} |
|
|
|
if (debug_ts) { |
|
av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video " |
|
"frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n", |
|
ist->st->index, av_ts2str(decoded_frame->pts), |
|
av_ts2timestr(decoded_frame->pts, &ist->st->time_base), |
|
best_effort_timestamp, |
|
av_ts2timestr(best_effort_timestamp, &ist->st->time_base), |
|
decoded_frame->key_frame, decoded_frame->pict_type, |
|
ist->st->time_base.num, ist->st->time_base.den); |
|
} |
|
|
|
if (ist->st->sample_aspect_ratio.num) |
|
decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio; |
|
|
|
err = send_frame_to_filters(ist, decoded_frame); |
|
|
|
fail: |
|
av_frame_unref(decoded_frame); |
|
return err < 0 ? err : ret; |
|
} |
|
|
|
static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output, |
|
int *decode_failed) |
|
{ |
|
AVSubtitle subtitle; |
|
int free_sub = 1; |
|
int i, ret = avcodec_decode_subtitle2(ist->dec_ctx, |
|
&subtitle, got_output, pkt); |
|
|
|
check_decode_result(NULL, got_output, ret); |
|
|
|
if (ret < 0 || !*got_output) { |
|
*decode_failed = 1; |
|
if (!pkt->size) |
|
sub2video_flush(ist); |
|
return ret; |
|
} |
|
|
|
if (ist->fix_sub_duration) { |
|
int end = 1; |
|
if (ist->prev_sub.got_output) { |
|
end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts, |
|
1000, AV_TIME_BASE); |
|
if (end < ist->prev_sub.subtitle.end_display_time) { |
|
av_log(NULL, AV_LOG_DEBUG, |
|
"Subtitle duration reduced from %"PRId32" to %d%s\n", |
|
ist->prev_sub.subtitle.end_display_time, end, |
|
end <= 0 ? ", dropping it" : ""); |
|
ist->prev_sub.subtitle.end_display_time = end; |
|
} |
|
} |
|
FFSWAP(int, *got_output, ist->prev_sub.got_output); |
|
FFSWAP(int, ret, ist->prev_sub.ret); |
|
FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle); |
|
if (end <= 0) |
|
goto out; |
|
} |
|
|
|
if (!*got_output) |
|
return ret; |
|
|
|
if (ist->sub2video.frame) { |
|
sub2video_update(ist, INT64_MIN, &subtitle); |
|
} else if (ist->nb_filters) { |
|
if (!ist->sub2video.sub_queue) |
|
ist->sub2video.sub_queue = av_fifo_alloc2(8, sizeof(AVSubtitle), AV_FIFO_FLAG_AUTO_GROW); |
|
if (!ist->sub2video.sub_queue) |
|
exit_program(1); |
|
|
|
ret = av_fifo_write(ist->sub2video.sub_queue, &subtitle, 1); |
|
if (ret < 0) |
|
exit_program(1); |
|
free_sub = 0; |
|
} |
|
|
|
if (!subtitle.num_rects) |
|
goto out; |
|
|
|
ist->frames_decoded++; |
|
|
|
for (i = 0; i < nb_output_streams; i++) { |
|
OutputStream *ost = output_streams[i]; |
|
|
|
if (!check_output_constraints(ist, ost) || !ost->enc_ctx |
|
|| ost->enc->type != AVMEDIA_TYPE_SUBTITLE) |
|
continue; |
|
|
|
do_subtitle_out(output_files[ost->file_index], ost, &subtitle); |
|
} |
|
|
|
out: |
|
if (free_sub) |
|
avsubtitle_free(&subtitle); |
|
return ret; |
|
} |
|
|
|
static int send_filter_eof(InputStream *ist) |
|
{ |
|
int i, ret; |
|
/* TODO keep pts also in stream time base to avoid converting back */ |
|
int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base, |
|
AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX); |
|
|
|
for (i = 0; i < ist->nb_filters; i++) { |
|
ret = ifilter_send_eof(ist->filters[i], pts); |
|
if (ret < 0) |
|
return ret; |
|
} |
|
return 0; |
|
} |
|
|
|
/* pkt = NULL means EOF (needed to flush decoder buffers) */ |
|
static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof) |
|
{ |
|
const AVCodecParameters *par = ist->st->codecpar; |
|
int ret = 0, i; |
|
int repeating = 0; |
|
int eof_reached = 0; |
|
|
|
AVPacket *avpkt = ist->pkt; |
|
|
|
if (!ist->saw_first_ts) { |
|
ist->first_dts = |
|
ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0; |
|
ist->pts = 0; |
|
if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) { |
|
ist->first_dts = |
|
ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q); |
|
ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong |
|
} |
|
ist->saw_first_ts = 1; |
|
} |
|
|
|
if (ist->next_dts == AV_NOPTS_VALUE) |
|
ist->next_dts = ist->dts; |
|
if (ist->next_pts == AV_NOPTS_VALUE) |
|
ist->next_pts = ist->pts; |
|
|
|
if (pkt) { |
|
av_packet_unref(avpkt); |
|
ret = av_packet_ref(avpkt, pkt); |
|
if (ret < 0) |
|
return ret; |
|
} |
|
|
|
if (pkt && pkt->dts != AV_NOPTS_VALUE) { |
|
ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q); |
|
if (par->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed) |
|
ist->next_pts = ist->pts = ist->dts; |
|
} |
|
|
|
// while we have more to decode or while the decoder did output something on EOF |
|
while (ist->decoding_needed) { |
|
int64_t duration_dts = 0; |
|
int64_t duration_pts = 0; |
|
int got_output = 0; |
|
int decode_failed = 0; |
|
|
|
ist->pts = ist->next_pts; |
|
ist->dts = ist->next_dts; |
|
|
|
switch (par->codec_type) { |
|
case AVMEDIA_TYPE_AUDIO: |
|
ret = decode_audio (ist, repeating ? NULL : avpkt, &got_output, |
|
&decode_failed); |
|
av_packet_unref(avpkt); |
|
break; |
|
case AVMEDIA_TYPE_VIDEO: |
|
ret = decode_video (ist, repeating ? NULL : avpkt, &got_output, &duration_pts, !pkt, |
|
&decode_failed); |
|
if (!repeating || !pkt || got_output) { |
|
if (pkt && pkt->duration) { |
|
duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q); |
|
} else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) { |
|
int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame; |
|
duration_dts = ((int64_t)AV_TIME_BASE * |
|
ist->dec_ctx->framerate.den * ticks) / |
|
ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame; |
|
} |
|
|
|
if(ist->dts != AV_NOPTS_VALUE && duration_dts) { |
|
ist->next_dts += duration_dts; |
|
}else |
|
ist->next_dts = AV_NOPTS_VALUE; |
|
} |
|
|
|
if (got_output) { |
|
if (duration_pts > 0) { |
|
ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q); |
|
} else { |
|
ist->next_pts += duration_dts; |
|
} |
|
} |
|
av_packet_unref(avpkt); |
|
break; |
|
case AVMEDIA_TYPE_SUBTITLE: |
|
if (repeating) |
|
break; |
|
ret = transcode_subtitles(ist, avpkt, &got_output, &decode_failed); |
|
if (!pkt && ret >= 0) |
|
ret = AVERROR_EOF; |
|
av_packet_unref(avpkt); |
|
break; |
|
default: |
|
return -1; |
|
} |
|
|
|
if (ret == AVERROR_EOF) { |
|
eof_reached = 1; |
|
break; |
|
} |
|
|
|
if (ret < 0) { |
|
if (decode_failed) { |
|
av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n", |
|
ist->file_index, ist->st->index, av_err2str(ret)); |
|
} else { |
|
av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded " |
|
"data for stream #%d:%d\n", ist->file_index, ist->st->index); |
|
} |
|
if (!decode_failed || exit_on_error) |
|
exit_program(1); |
|
break; |
|
} |
|
|
|
if (got_output) |
|
ist->got_output = 1; |
|
|
|
if (!got_output) |
|
break; |
|
|
|
// During draining, we might get multiple output frames in this loop. |
|
// ffmpeg.c does not drain the filter chain on configuration changes, |
|
// which means if we send multiple frames at once to the filters, and |
|
// one of those frames changes configuration, the buffered frames will |
|
// be lost. This can upset certain FATE tests. |
|
// Decode only 1 frame per call on EOF to appease these FATE tests. |
|
// The ideal solution would be to rewrite decoding to use the new |
|
// decoding API in a better way. |
|
if (!pkt) |
|
break; |
|
|
|
repeating = 1; |
|
} |
|
|
|
/* after flushing, send an EOF on all the filter inputs attached to the stream */ |
|
/* except when looping we need to flush but not to send an EOF */ |
|
if (!pkt && ist->decoding_needed && eof_reached && !no_eof) { |
|
int ret = send_filter_eof(ist); |
|
if (ret < 0) { |
|
av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n"); |
|
exit_program(1); |
|
} |
|
} |
|
|
|
/* handle stream copy */ |
|
if (!ist->decoding_needed && pkt) { |
|
ist->dts = ist->next_dts; |
|
switch (par->codec_type) { |
|
case AVMEDIA_TYPE_AUDIO: |
|
av_assert1(pkt->duration >= 0); |
|
if (par->sample_rate) { |
|
ist->next_dts += ((int64_t)AV_TIME_BASE * par->frame_size) / |
|
par->sample_rate; |
|
} else { |
|
ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q); |
|
} |
|
break; |
|
case AVMEDIA_TYPE_VIDEO: |
|
if (ist->framerate.num) { |
|
// TODO: Remove work-around for c99-to-c89 issue 7 |
|
AVRational time_base_q = AV_TIME_BASE_Q; |
|
int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate)); |
|
ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q); |
|
} else if (pkt->duration) { |
|
ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q); |
|
} else if(ist->dec_ctx->framerate.num != 0) { |
|
int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame; |
|
ist->next_dts += ((int64_t)AV_TIME_BASE * |
|
ist->dec_ctx->framerate.den * ticks) / |
|
ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame; |
|
} |
|
break; |
|
} |
|
ist->pts = ist->dts; |
|
ist->next_pts = ist->next_dts; |
|
} else if (!ist->decoding_needed) |
|
eof_reached = 1; |
|
|
|
for (i = 0; i < nb_output_streams; i++) { |
|
OutputStream *ost = output_streams[i]; |
|
|
|
if (!check_output_constraints(ist, ost) || ost->enc_ctx || |
|
(!pkt && no_eof)) |
|
continue; |
|
|
|
do_streamcopy(ist, ost, pkt); |
|
} |
|
|
|
return !eof_reached; |
|
} |
|
|
|
static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts) |
|
{ |
|
InputStream *ist = s->opaque; |
|
const enum AVPixelFormat *p; |
|
int ret; |
|
|
|
for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) { |
|
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p); |
|
const AVCodecHWConfig *config = NULL; |
|
int i; |
|
|
|
if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL)) |
|
break; |
|
|
|
if (ist->hwaccel_id == HWACCEL_GENERIC || |
|
ist->hwaccel_id == HWACCEL_AUTO) { |
|
for (i = 0;; i++) { |
|
config = avcodec_get_hw_config(s->codec, i); |
|
if (!config) |
|
break; |
|
if (!(config->methods & |
|
AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX)) |
|
continue; |
|
if (config->pix_fmt == *p) |
|
break; |
|
} |
|
} |
|
if (config && config->device_type == ist->hwaccel_device_type) { |
|
ret = hwaccel_decode_init(s); |
|
if (ret < 0) { |
|
if (ist->hwaccel_id == HWACCEL_GENERIC) { |
|
av_log(NULL, AV_LOG_FATAL, |
|
"%s hwaccel requested for input stream #%d:%d, " |
|
"but cannot be initialized.\n", |
|
av_hwdevice_get_type_name(config->device_type), |
|
ist->file_index, ist->st->index); |
|
return AV_PIX_FMT_NONE; |
|
} |
|
continue; |
|
} |
|
|
|
ist->hwaccel_pix_fmt = *p; |
|
break; |
|
} |
|
} |
|
|
|
return *p; |
|
} |
|
|
|
static int init_input_stream(int ist_index, char *error, int error_len) |
|
{ |
|
int ret; |
|
InputStream *ist = input_streams[ist_index]; |
|
|
|
if (ist->decoding_needed) { |
|
const AVCodec *codec = ist->dec; |
|
if (!codec) { |
|
snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d", |
|
avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index); |
|
return AVERROR(EINVAL); |
|
} |
|
|
|
ist->dec_ctx->opaque = ist; |
|
ist->dec_ctx->get_format = get_format; |
|
#if LIBAVCODEC_VERSION_MAJOR < 60 |
|
AV_NOWARN_DEPRECATED({ |
|
ist->dec_ctx->thread_safe_callbacks = 1; |
|
}) |
|
#endif |
|
|
|
if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE && |
|
(ist->decoding_needed & DECODING_FOR_OST)) { |
|
av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE); |
|
if (ist->decoding_needed & DECODING_FOR_FILTER) |
|
av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n"); |
|
} |
|
|
|
/* Useful for subtitles retiming by lavf (FIXME), skipping samples in |
|
* audio, and video decoders such as cuvid or mediacodec */ |
|
ist->dec_ctx->pkt_timebase = ist->st->time_base; |
|
|
|
if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0)) |
|
av_dict_set(&ist->decoder_opts, "threads", "auto", 0); |
|
/* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */ |
|
if (ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC) |
|
av_dict_set(&ist->decoder_opts, "threads", "1", 0); |
|
|
|
ret = hw_device_setup_for_decode(ist); |
|
if (ret < 0) { |
|
snprintf(error, error_len, "Device setup failed for " |
|
"decoder on input stream #%d:%d : %s", |
|
ist->file_index, ist->st->index, av_err2str(ret)); |
|
return ret; |
|
} |
|
|
|
if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) { |
|
if (ret == AVERROR_EXPERIMENTAL) |
|
abort_codec_experimental(codec, 0); |
|
|
|
snprintf(error, error_len, |
|
"Error while opening decoder for input stream " |
|
"#%d:%d : %s", |
|
ist->file_index, ist->st->index, av_err2str(ret)); |
|
return ret; |
|
} |
|
assert_avoptions(ist->decoder_opts); |
|
} |
|
|
|
ist->next_pts = AV_NOPTS_VALUE; |
|
ist->next_dts = AV_NOPTS_VALUE; |
|
|
|
return 0; |
|
} |
|
|
|
static InputStream *get_input_stream(OutputStream *ost) |
|
{ |
|
if (ost->source_index >= 0) |
|
return input_streams[ost->source_index]; |
|
return NULL; |
|
} |
|
|
|
static int compare_int64(const void *a, const void *b) |
|
{ |
|
return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b); |
|
} |
|
|
|
static int init_output_bsfs(OutputStream *ost) |
|
{ |
|
AVBSFContext *ctx = ost->bsf_ctx; |
|
int ret; |
|
|
|
if (!ctx) |
|
return 0; |
|
|
|
ret = avcodec_parameters_copy(ctx->par_in, ost->st->codecpar); |
|
if (ret < 0) |
|
return ret; |
|
|
|
ctx->time_base_in = ost->st->time_base; |
|
|
|
ret = av_bsf_init(ctx); |
|
if (ret < 0) { |
|
av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n", |
|
ctx->filter->name); |
|
return ret; |
|
} |
|
|
|
ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out); |
|
if (ret < 0) |
|
return ret; |
|
ost->st->time_base = ctx->time_base_out; |
|
|
|
return 0; |
|
} |
|
|
|
static int init_output_stream_streamcopy(OutputStream *ost) |
|
{ |
|
OutputFile *of = output_files[ost->file_index]; |
|
InputStream *ist = get_input_stream(ost); |
|
InputFile *ifile = input_files[ist->file_index]; |
|
AVCodecParameters *par = ost->st->codecpar; |
|
AVCodecContext *codec_ctx; |
|
AVRational sar; |
|
int i, ret; |
|
uint32_t codec_tag = par->codec_tag; |
|
|
|
av_assert0(ist && !ost->filter); |
|
|
|
codec_ctx = avcodec_alloc_context3(NULL); |
|
if (!codec_ctx) |
|
return AVERROR(ENOMEM); |
|
|
|
ret = avcodec_parameters_to_context(codec_ctx, ist->st->codecpar); |
|
if (ret >= 0) |
|
ret = av_opt_set_dict(codec_ctx, &ost->encoder_opts); |
|
if (ret < 0) { |
|
av_log(NULL, AV_LOG_FATAL, |
|
"Error setting up codec context options.\n"); |
|
avcodec_free_context(&codec_ctx); |
|
return ret; |
|
} |
|
|
|
ret = avcodec_parameters_from_context(par, codec_ctx); |
|
avcodec_free_context(&codec_ctx); |
|
if (ret < 0) { |
|
av_log(NULL, AV_LOG_FATAL, |
|
"Error getting reference codec parameters.\n"); |
|
return ret; |
|
} |
|
|
|
if (!codec_tag) { |
|
unsigned int codec_tag_tmp; |
|
if (!of->format->codec_tag || |
|
av_codec_get_id (of->format->codec_tag, par->codec_tag) == par->codec_id || |
|
!av_codec_get_tag2(of->format->codec_tag, par->codec_id, &codec_tag_tmp)) |
|
codec_tag = par->codec_tag; |
|
} |
|
|
|
par->codec_tag = codec_tag; |
|
|
|
if (!ost->frame_rate.num) |
|
ost->frame_rate = ist->framerate; |
|
|
|
if (ost->frame_rate.num) |
|
ost->st->avg_frame_rate = ost->frame_rate; |
|
else |
|
ost->st->avg_frame_rate = ist->st->avg_frame_rate; |
|
|
|
ret = avformat_transfer_internal_stream_timing_info(of->format, ost->st, ist->st, copy_tb); |
|
if (ret < 0) |
|
return ret; |
|
|
|
// copy timebase while removing common factors |
|
if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0) { |
|
if (ost->frame_rate.num) |
|
ost->st->time_base = av_inv_q(ost->frame_rate); |
|
else |
|
ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1}); |
|
} |
|
|
|
// copy estimated duration as a hint to the muxer |
|
if (ost->st->duration <= 0 && ist->st->duration > 0) |
|
ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base); |
|
|
|
if (!ost->copy_prior_start) { |
|
ost->ts_copy_start = (of->start_time == AV_NOPTS_VALUE) ? |
|
0 : of->start_time; |
|
if (copy_ts && ifile->start_time != AV_NOPTS_VALUE) { |
|
ost->ts_copy_start = FFMAX(ost->ts_copy_start, |
|
ifile->start_time + ifile->ts_offset); |
|
} |
|
} |
|
|
|
if (ist->st->nb_side_data) { |
|
for (i = 0; i < ist->st->nb_side_data; i++) { |
|
const AVPacketSideData *sd_src = &ist->st->side_data[i]; |
|
uint8_t *dst_data; |
|
|
|
dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size); |
|
if (!dst_data) |
|
return AVERROR(ENOMEM); |
|
memcpy(dst_data, sd_src->data, sd_src->size); |
|
} |
|
} |
|
|
|
if (ost->rotate_overridden) { |
|
uint8_t *sd = av_stream_new_side_data(ost->st, AV_PKT_DATA_DISPLAYMATRIX, |
|
sizeof(int32_t) * 9); |
|
if (sd) |
|
av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value); |
|
} |
|
|
|
switch (par->codec_type) { |
|
case AVMEDIA_TYPE_AUDIO: |
|
if ((par->block_align == 1 || par->block_align == 1152 || par->block_align == 576) && |
|
par->codec_id == AV_CODEC_ID_MP3) |
|
par->block_align = 0; |
|
if (par->codec_id == AV_CODEC_ID_AC3) |
|
par->block_align = 0; |
|
break; |
|
case AVMEDIA_TYPE_VIDEO: |
|
if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option |
|
sar = |
|
av_mul_q(ost->frame_aspect_ratio, |
|
(AVRational){ par->height, par->width }); |
|
av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio " |
|
"with stream copy may produce invalid files\n"); |
|
} |
|
else if (ist->st->sample_aspect_ratio.num) |
|
sar = ist->st->sample_aspect_ratio; |
|
else |
|
sar = par->sample_aspect_ratio; |
|
ost->st->sample_aspect_ratio = par->sample_aspect_ratio = sar; |
|
ost->st->avg_frame_rate = ist->st->avg_frame_rate; |
|
ost->st->r_frame_rate = ist->st->r_frame_rate; |
|
break; |
|
} |
|
|
|
ost->mux_timebase = ist->st->time_base; |
|
|
|
return 0; |
|
} |
|
|
|
static void set_encoder_id(OutputFile *of, OutputStream *ost) |
|
{ |
|
uint8_t *encoder_string; |
|
int encoder_string_len; |
|
|
|
if (av_dict_get(ost->st->metadata, "encoder", NULL, 0)) |
|
return; |
|
|
|
encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2; |
|
encoder_string = av_mallocz(encoder_string_len); |
|
if (!encoder_string) |
|
exit_program(1); |
|
|
|
if (!of->bitexact && !ost->bitexact) |
|
av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len); |
|
else |
|
av_strlcpy(encoder_string, "Lavc ", encoder_string_len); |
|
av_strlcat(encoder_string, ost->enc->name, encoder_string_len); |
|
av_dict_set(&ost->st->metadata, "encoder", encoder_string, |
|
AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE); |
|
} |
|
|
|
static void parse_forced_key_frames(char *kf, OutputStream *ost, |
|
AVCodecContext *avctx) |
|
{ |
|
char *p; |
|
int n = 1, i, size, index = 0; |
|
int64_t t, *pts; |
|
|
|
for (p = kf; *p; p++) |
|
if (*p == ',') |
|
n++; |
|
size = n; |
|
pts = av_malloc_array(size, sizeof(*pts)); |
|
if (!pts) { |
|
av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n"); |
|
exit_program(1); |
|
} |
|
|
|
p = kf; |
|
for (i = 0; i < n; i++) { |
|
char *next = strchr(p, ','); |
|
|
|
if (next) |
|
*next++ = 0; |
|
|
|
if (!memcmp(p, "chapters", 8)) { |
|
OutputFile *of = output_files[ost->file_index]; |
|
AVChapter * const *ch; |
|
unsigned int nb_ch; |
|
int j; |
|
|
|
ch = of_get_chapters(of, &nb_ch); |
|
|
|
if (nb_ch > INT_MAX - size || |
|
!(pts = av_realloc_f(pts, size += nb_ch - 1, |
|
sizeof(*pts)))) { |
|
av_log(NULL, AV_LOG_FATAL, |
|
"Could not allocate forced key frames array.\n"); |
|
exit_program(1); |
|
} |
|
t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0; |
|
t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base); |
|
|
|
for (j = 0; j < nb_ch; j++) { |
|
const AVChapter *c = ch[j]; |
|
av_assert1(index < size); |
|
pts[index++] = av_rescale_q(c->start, c->time_base, |
|
avctx->time_base) + t; |
|
} |
|
|
|
} else { |
|
|
|
t = parse_time_or_die("force_key_frames", p, 1); |
|
av_assert1(index < size); |
|
pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base); |
|
|
|
} |
|
|
|
p = next; |
|
} |
|
|
|
av_assert0(index == size); |
|
qsort(pts, size, sizeof(*pts), compare_int64); |
|
ost->forced_kf_count = size; |
|
ost->forced_kf_pts = pts; |
|
} |
|
|
|
static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base) |
|
{ |
|
InputStream *ist = get_input_stream(ost); |
|
AVCodecContext *enc_ctx = ost->enc_ctx; |
|
|
|
if (ost->enc_timebase.num > 0) { |
|
enc_ctx->time_base = ost->enc_timebase; |
|
return; |
|
} |
|
|
|
if (ost->enc_timebase.num < 0) { |
|
if (ist) { |
|
enc_ctx->time_base = ist->st->time_base; |
|
return; |
|
} |
|
|
|
av_log(NULL, AV_LOG_WARNING, |
|
"Input stream data for output stream #%d:%d not available, " |
|
"using default time base\n", ost->file_index, ost->index); |
|
} |
|
|
|
enc_ctx->time_base = default_time_base; |
|
} |
|
|
|
static int init_output_stream_encode(OutputStream *ost, AVFrame *frame) |
|
{ |
|
InputStream *ist = get_input_stream(ost); |
|
AVCodecContext *enc_ctx = ost->enc_ctx; |
|
AVCodecContext *dec_ctx = NULL; |
|
OutputFile *of = output_files[ost->file_index]; |
|
int ret; |
|
|
|
set_encoder_id(output_files[ost->file_index], ost); |
|
|
|
if (ist) { |
|
dec_ctx = ist->dec_ctx; |
|
} |
|
|
|
if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) { |
|
if (!ost->frame_rate.num) |
|
ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter); |
|
if (ist && !ost->frame_rate.num && !ost->max_frame_rate.num) { |
|
ost->frame_rate = (AVRational){25, 1}; |
|
av_log(NULL, AV_LOG_WARNING, |
|
"No information " |
|
"about the input framerate is available. Falling " |
|
"back to a default value of 25fps for output stream #%d:%d. Use the -r option " |
|
"if you want a different framerate.\n", |
|
ost->file_index, ost->index); |
|
} |
|
|
|
if (ost->max_frame_rate.num && |
|
(av_q2d(ost->frame_rate) > av_q2d(ost->max_frame_rate) || |
|
!ost->frame_rate.den)) |
|
ost->frame_rate = ost->max_frame_rate; |
|
|
|
if (ost->enc->supported_framerates && !ost->force_fps) { |
|
int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates); |
|
ost->frame_rate = ost->enc->supported_framerates[idx]; |
|
} |
|
// reduce frame rate for mpeg4 to be within the spec limits |
|
if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) { |
|
av_reduce(&ost->frame_rate.num, &ost->frame_rate.den, |
|
ost->frame_rate.num, ost->frame_rate.den, 65535); |
|
} |
|
} |
|
|
|
switch (enc_ctx->codec_type) { |
|
case AVMEDIA_TYPE_AUDIO: |
|
enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter); |
|
enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter); |
|
ret = av_buffersink_get_ch_layout(ost->filter->filter, &enc_ctx->ch_layout); |
|
if (ret < 0) |
|
return ret; |
|
|
|
if (ost->bits_per_raw_sample) |
|
enc_ctx->bits_per_raw_sample = ost->bits_per_raw_sample; |
|
else if (dec_ctx && ost->filter->graph->is_meta) |
|
enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample, |
|
av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3); |
|
|
|
init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate)); |
|
break; |
|
|
|
case AVMEDIA_TYPE_VIDEO: |
|
init_encoder_time_base(ost, av_inv_q(ost->frame_rate)); |
|
|
|
if (!(enc_ctx->time_base.num && enc_ctx->time_base.den)) |
|
enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter); |
|
if ( av_q2d(enc_ctx->time_base) < 0.001 && ost->vsync_method != VSYNC_PASSTHROUGH |
|
&& (ost->vsync_method == VSYNC_CFR || ost->vsync_method == VSYNC_VSCFR || |
|
(ost->vsync_method == VSYNC_AUTO && !(of->format->flags & AVFMT_VARIABLE_FPS)))){ |
|
av_log(NULL, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n" |
|
"Please consider specifying a lower framerate, a different muxer or " |
|
"setting vsync/fps_mode to vfr\n"); |
|
} |
|
|
|
enc_ctx->width = av_buffersink_get_w(ost->filter->filter); |
|
enc_ctx->height = av_buffersink_get_h(ost->filter->filter); |
|
enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio = |
|
ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option |
|
av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) : |
|
av_buffersink_get_sample_aspect_ratio(ost->filter->filter); |
|
|
|
enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter); |
|
|
|
if (ost->bits_per_raw_sample) |
|
enc_ctx->bits_per_raw_sample = ost->bits_per_raw_sample; |
|
else if (dec_ctx && ost->filter->graph->is_meta) |
|
enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample, |
|
av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth); |
|
|
|
if (frame) { |
|
enc_ctx->color_range = frame->color_range; |
|
enc_ctx->color_primaries = frame->color_primaries; |
|
enc_ctx->color_trc = frame->color_trc; |
|
enc_ctx->colorspace = frame->colorspace; |
|
enc_ctx->chroma_sample_location = frame->chroma_location; |
|
} |
|
|
|
enc_ctx->framerate = ost->frame_rate; |
|
|
|
ost->st->avg_frame_rate = ost->frame_rate; |
|
|
|
// Field order: autodetection |
|
if (frame) { |
|
if (enc_ctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) && |
|
ost->top_field_first >= 0) |
|
frame->top_field_first = !!ost->top_field_first; |
|
|
|
if (frame->interlaced_frame) { |
|
if (enc_ctx->codec->id == AV_CODEC_ID_MJPEG) |
|
enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TT:AV_FIELD_BB; |
|
else |
|
enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TB:AV_FIELD_BT; |
|
} else |
|
enc_ctx->field_order = AV_FIELD_PROGRESSIVE; |
|
} |
|
|
|
// Field order: override |
|
if (ost->top_field_first == 0) { |
|
enc_ctx->field_order = AV_FIELD_BB; |
|
} else if (ost->top_field_first == 1) { |
|
enc_ctx->field_order = AV_FIELD_TT; |
|
} |
|
|
|
if (ost->forced_keyframes) { |
|
if (!strncmp(ost->forced_keyframes, "expr:", 5)) { |
|
ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5, |
|
forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL); |
|
if (ret < 0) { |
|
av_log(NULL, AV_LOG_ERROR, |
|
"Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5); |
|
return ret; |
|
} |
|
ost->forced_keyframes_expr_const_values[FKF_N] = 0; |
|
ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0; |
|
ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN; |
|
ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN; |
|
|
|
// Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes', |
|
// parse it only for static kf timings |
|
} else if(strncmp(ost->forced_keyframes, "source", 6)) { |
|
parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx); |
|
} |
|
} |
|
break; |
|
case AVMEDIA_TYPE_SUBTITLE: |
|
enc_ctx->time_base = AV_TIME_BASE_Q; |
|
if (!enc_ctx->width) { |
|
enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width; |
|
enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height; |
|
} |
|
break; |
|
case AVMEDIA_TYPE_DATA: |
|
break; |
|
default: |
|
abort(); |
|
break; |
|
} |
|
|
|
if (ost->bitexact) |
|
enc_ctx->flags |= AV_CODEC_FLAG_BITEXACT; |
|
|
|
if (ost->sq_idx_encode >= 0) |
|
sq_set_tb(of->sq_encode, ost->sq_idx_encode, enc_ctx->time_base); |
|
|
|
ost->mux_timebase = enc_ctx->time_base; |
|
|
|
return 0; |
|
} |
|
|
|
static int init_output_stream(OutputStream *ost, AVFrame *frame, |
|
char *error, int error_len) |
|
{ |
|
OutputFile *of = output_files[ost->file_index]; |
|
int ret = 0; |
|
|
|
if (ost->enc_ctx) { |
|
const AVCodec *codec = ost->enc; |
|
AVCodecContext *dec = NULL; |
|
InputStream *ist; |
|
|
|
ret = init_output_stream_encode(ost, frame); |
|
if (ret < 0) |
|
return ret; |
|
|
|
if ((ist = get_input_stream(ost))) |
|
dec = ist->dec_ctx; |
|
if (dec && dec->subtitle_header) { |
|
/* ASS code assumes this buffer is null terminated so add extra byte. */ |
|
ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1); |
|
if (!ost->enc_ctx->subtitle_header) |
|
return AVERROR(ENOMEM); |
|
memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size); |
|
ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size; |
|
} |
|
if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0)) |
|
av_dict_set(&ost->encoder_opts, "threads", "auto", 0); |
|
|
|
ret = hw_device_setup_for_encode(ost); |
|
if (ret < 0) { |
|
snprintf(error, error_len, "Device setup failed for " |
|
"encoder on output stream #%d:%d : %s", |
|
ost->file_index, ost->index, av_err2str(ret)); |
|
return ret; |
|
} |
|
|
|
if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE && ost->enc->type == AVMEDIA_TYPE_SUBTITLE) { |
|
int input_props = 0, output_props = 0; |
|
AVCodecDescriptor const *input_descriptor = |
|
avcodec_descriptor_get(dec->codec_id); |
|
AVCodecDescriptor const *output_descriptor = |
|
avcodec_descriptor_get(ost->enc_ctx->codec_id); |
|
if (input_descriptor) |
|
input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB); |
|
if (output_descriptor) |
|
output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB); |
|
if (input_props && output_props && input_props != output_props) { |
|
snprintf(error, error_len, |
|
"Subtitle encoding currently only possible from text to text " |
|
"or bitmap to bitmap"); |
|
return AVERROR_INVALIDDATA; |
|
} |
|
} |
|
|
|
if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) { |
|
if (ret == AVERROR_EXPERIMENTAL) |
|
abort_codec_experimental(codec, 1); |
|
snprintf(error, error_len, |
|
"Error while opening encoder for output stream #%d:%d - " |
|
"maybe incorrect parameters such as bit_rate, rate, width or height", |
|
ost->file_index, ost->index); |
|
return ret; |
|
} |
|
if (ost->enc->type == AVMEDIA_TYPE_AUDIO && |
|
!(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE)) |
|
av_buffersink_set_frame_size(ost->filter->filter, |
|
ost->enc_ctx->frame_size); |
|
assert_avoptions(ost->encoder_opts); |
|
if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 && |
|
ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */) |
|
av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low." |
|
" It takes bits/s as argument, not kbits/s\n"); |
|
|
|
ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx); |
|
if (ret < 0) { |
|
av_log(NULL, AV_LOG_FATAL, |
|
"Error initializing the output stream codec context.\n"); |
|
exit_program(1); |
|
} |
|
|
|
if (ost->enc_ctx->nb_coded_side_data) { |
|
int i; |
|
|
|
for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) { |
|
const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i]; |
|
uint8_t *dst_data; |
|
|
|
dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size); |
|
if (!dst_data) |
|
return AVERROR(ENOMEM); |
|
memcpy(dst_data, sd_src->data, sd_src->size); |
|
} |
|
} |
|
|
|
/* |
|
* Add global input side data. For now this is naive, and copies it |
|
* from the input stream's global side data. All side data should |
|
* really be funneled over AVFrame and libavfilter, then added back to |
|
* packet side data, and then potentially using the first packet for |
|
* global side data. |
|
*/ |
|
if (ist) { |
|
int i; |
|
for (i = 0; i < ist->st->nb_side_data; i++) { |
|
AVPacketSideData *sd = &ist->st->side_data[i]; |
|
if (sd->type != AV_PKT_DATA_CPB_PROPERTIES) { |
|
uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size); |
|
if (!dst) |
|
return AVERROR(ENOMEM); |
|
memcpy(dst, sd->data, sd->size); |
|
if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX) |
|
av_display_rotation_set((uint32_t *)dst, 0); |
|
} |
|
} |
|
} |
|
|
|
// copy timebase while removing common factors |
|
if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0) |
|
ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1}); |
|
|
|
// copy estimated duration as a hint to the muxer |
|
if (ost->st->duration <= 0 && ist && ist->st->duration > 0) |
|
ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base); |
|
} else if (ost->source_index >= 0) { |
|
ret = init_output_stream_streamcopy(ost); |
|
if (ret < 0) |
|
return ret; |
|
} |
|
|
|
/* initialize bitstream filters for the output stream |
|
* needs to be done here, because the codec id for streamcopy is not |
|
* known until now */ |
|
ret = init_output_bsfs(ost); |
|
if (ret < 0) |
|
return ret; |
|
|
|
if (ost->sq_idx_mux >= 0) |
|
sq_set_tb(of->sq_mux, ost->sq_idx_mux, ost->mux_timebase); |
|
|
|
ost->initialized = 1; |
|
|
|
ret = of_check_init(output_files[ost->file_index]); |
|
if (ret < 0) |
|
return ret; |
|
|
|
return ret; |
|
} |
|
|
|
static int transcode_init(void) |
|
{ |
|
int ret = 0, i, j, k; |
|
OutputStream *ost; |
|
InputStream *ist; |
|
char error[1024] = {0}; |
|
|
|
for (i = 0; i < nb_filtergraphs; i++) { |
|
FilterGraph *fg = filtergraphs[i]; |
|
for (j = 0; j < fg->nb_outputs; j++) { |
|
OutputFilter *ofilter = fg->outputs[j]; |
|
if (!ofilter->ost || ofilter->ost->source_index >= 0) |
|
continue; |
|
if (fg->nb_inputs != 1) |
|
continue; |
|
for (k = nb_input_streams-1; k >= 0 ; k--) |
|
if (fg->inputs[0]->ist == input_streams[k]) |
|
break; |
|
ofilter->ost->source_index = k; |
|
} |
|
} |
|
|
|
/* init framerate emulation */ |
|
for (i = 0; i < nb_input_files; i++) { |
|
InputFile *ifile = input_files[i]; |
|
if (ifile->readrate || ifile->rate_emu) |
|
for (j = 0; j < ifile->nb_streams; j++) |
|
input_streams[j + ifile->ist_index]->start = av_gettime_relative(); |
|
} |
|
|
|
// Correct starttime based on the enabled streams |
|
for (i = 0; i < nb_input_files; i++) { |
|
InputFile *ifile = input_files[i]; |
|
AVFormatContext *is = ifile->ctx; |
|
int64_t new_start_time = INT64_MAX; |
|
|
|
if (is->start_time == AV_NOPTS_VALUE || |
|
!(is->iformat->flags & AVFMT_TS_DISCONT)) |
|
continue; |
|
|
|
for (int j = 0; j < is->nb_streams; j++) { |
|
AVStream *st = is->streams[j]; |
|
if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE) |
|
continue; |
|
new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q)); |
|
} |
|
if (new_start_time > is->start_time) { |
|
av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time); |
|
ifile->ts_offset = -new_start_time; |
|
} |
|
} |
|
|
|
/* init input streams */ |
|
for (i = 0; i < nb_input_streams; i++) |
|
if ((ret = init_input_stream(i, error, sizeof(error))) < 0) |
|
goto dump_format; |
|
|
|
/* |
|
* initialize stream copy and subtitle/data streams. |
|
* Encoded AVFrame based streams will get initialized as follows: |
|
* - when the first AVFrame is received in do_video_out |
|
* - just before the first AVFrame is received in either transcode_step |
|
* or reap_filters due to us requiring the filter chain buffer sink |
|
* to be configured with the correct audio frame size, which is only |
|
* known after the encoder is initialized. |
|
*/ |
|
for (i = 0; i < nb_output_streams; i++) { |
|
if (output_streams[i]->enc_ctx && |
|
(output_streams[i]->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO || |
|
output_streams[i]->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO)) |
|
continue; |
|
|
|
ret = init_output_stream_wrapper(output_streams[i], NULL, 0); |
|
if (ret < 0) |
|
goto dump_format; |
|
} |
|
|
|
/* discard unused programs */ |
|
for (i = 0; i < nb_input_files; i++) { |
|
InputFile *ifile = input_files[i]; |
|
for (j = 0; j < ifile->ctx->nb_programs; j++) { |
|
AVProgram *p = ifile->ctx->programs[j]; |
|
int discard = AVDISCARD_ALL; |
|
|
|
for (k = 0; k < p->nb_stream_indexes; k++) |
|
if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) { |
|
discard = AVDISCARD_DEFAULT; |
|
break; |
|
} |
|
p->discard = discard; |
|
} |
|
} |
|
|
|
dump_format: |
|
/* dump the stream mapping */ |
|
av_log(NULL, AV_LOG_INFO, "Stream mapping:\n"); |
|
for (i = 0; i < nb_input_streams; i++) { |
|
ist = input_streams[i]; |
|
|
|
for (j = 0; j < ist->nb_filters; j++) { |
|
if (!filtergraph_is_simple(ist->filters[j]->graph)) { |
|
av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s", |
|
ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?", |
|
ist->filters[j]->name); |
|
if (nb_filtergraphs > 1) |
|
av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index); |
|
av_log(NULL, AV_LOG_INFO, "\n"); |
|
} |
|
} |
|
} |
|
|
|
for (i = 0; i < nb_output_streams; i++) { |
|
ost = output_streams[i]; |
|
|
|
if (ost->attachment_filename) { |
|
/* an attached file */ |
|
av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n", |
|
ost->attachment_filename, ost->file_index, ost->index); |
|
continue; |
|
} |
|
|
|
if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) { |
|
/* output from a complex graph */ |
|
av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name); |
|
if (nb_filtergraphs > 1) |
|
av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index); |
|
|
|
av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index, |
|
ost->index, ost->enc ? ost->enc->name : "?"); |
|
continue; |
|
} |
|
|
|
av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d", |
|
input_streams[ost->source_index]->file_index, |
|
input_streams[ost->source_index]->st->index, |
|
ost->file_index, |
|
ost->index); |
|
if (ost->enc_ctx) { |
|
const AVCodec *in_codec = input_streams[ost->source_index]->dec; |
|
const AVCodec *out_codec = ost->enc; |
|
const char *decoder_name = "?"; |
|
const char *in_codec_name = "?"; |
|
const char *encoder_name = "?"; |
|
const char *out_codec_name = "?"; |
|
const AVCodecDescriptor *desc; |
|
|
|
if (in_codec) { |
|
decoder_name = in_codec->name; |
|
desc = avcodec_descriptor_get(in_codec->id); |
|
if (desc) |
|
in_codec_name = desc->name; |
|
if (!strcmp(decoder_name, in_codec_name)) |
|
decoder_name = "native"; |
|
} |
|
|
|
if (out_codec) { |
|
encoder_name = out_codec->name; |
|
desc = avcodec_descriptor_get(out_codec->id); |
|
if (desc) |
|
out_codec_name = desc->name; |
|
if (!strcmp(encoder_name, out_codec_name)) |
|
encoder_name = "native"; |
|
} |
|
|
|
av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))", |
|
in_codec_name, decoder_name, |
|
out_codec_name, encoder_name); |
|
} else |
|
av_log(NULL, AV_LOG_INFO, " (copy)"); |
|
av_log(NULL, AV_LOG_INFO, "\n"); |
|
} |
|
|
|
if (ret) { |
|
av_log(NULL, AV_LOG_ERROR, "%s\n", error); |
|
return ret; |
|
} |
|
|
|
atomic_store(&transcode_init_done, 1); |
|
|
|
return 0; |
|
} |
|
|
|
/* Return 1 if there remain streams where more output is wanted, 0 otherwise. */ |
|
static int need_output(void) |
|
{ |
|
int i; |
|
|
|
for (i = 0; i < nb_output_streams; i++) { |
|
OutputStream *ost = output_streams[i]; |
|
|
|
if (ost->finished) |
|
continue; |
|
|
|
return 1; |
|
} |
|
|
|
return 0; |
|
} |
|
|
|
/** |
|
* Select the output stream to process. |
|
* |
|
* @return selected output stream, or NULL if none available |
|
*/ |
|
static OutputStream *choose_output(void) |
|
{ |
|
int i; |
|
int64_t opts_min = INT64_MAX; |
|
OutputStream *ost_min = NULL; |
|
|
|
for (i = 0; i < nb_output_streams; i++) { |
|
OutputStream *ost = output_streams[i]; |
|
int64_t opts; |
|
|
|
if (ost->filter && ost->last_filter_pts != AV_NOPTS_VALUE) { |
|
opts = ost->last_filter_pts; |
|
} else { |
|
opts = ost->last_mux_dts == AV_NOPTS_VALUE ? |
|
INT64_MIN : ost->last_mux_dts; |
|
if (ost->last_mux_dts == AV_NOPTS_VALUE) |
|
av_log(NULL, AV_LOG_DEBUG, |
|
"cur_dts is invalid st:%d (%d) [init:%d i_done:%d finish:%d] (this is harmless if it occurs once at the start per stream)\n", |
|
ost->st->index, ost->st->id, ost->initialized, ost->inputs_done, ost->finished); |
|
} |
|
|
|
if (!ost->initialized && !ost->inputs_done) |
|
return ost->unavailable ? NULL : ost; |
|
|
|
if (!ost->finished && opts < opts_min) { |
|
opts_min = opts; |
|
ost_min = ost->unavailable ? NULL : ost; |
|
} |
|
} |
|
return ost_min; |
|
} |
|
|
|
static void set_tty_echo(int on) |
|
{ |
|
#if HAVE_TERMIOS_H |
|
struct termios tty; |
|
if (tcgetattr(0, &tty) == 0) { |
|
if (on) tty.c_lflag |= ECHO; |
|
else tty.c_lflag &= ~ECHO; |
|
tcsetattr(0, TCSANOW, &tty); |
|
} |
|
#endif |
|
} |
|
|
|
static int check_keyboard_interaction(int64_t cur_time) |
|
{ |
|
int i, ret, key; |
|
static int64_t last_time; |
|
if (received_nb_signals) |
|
return AVERROR_EXIT; |
|
/* read_key() returns 0 on EOF */ |
|
if (cur_time - last_time >= 100000) { |
|
key = read_key(); |
|
last_time = cur_time; |
|
}else |
|
key = -1; |
|
if (key == 'q') { |
|
av_log(NULL, AV_LOG_INFO, "\n\n[q] command received. Exiting.\n\n"); |
|
return AVERROR_EXIT; |
|
} |
|
if (key == '+') av_log_set_level(av_log_get_level()+10); |
|
if (key == '-') av_log_set_level(av_log_get_level()-10); |
|
if (key == 's') qp_hist ^= 1; |
|
if (key == 'c' || key == 'C'){ |
|
char buf[4096], target[64], command[256], arg[256] = {0}; |
|
double time; |
|
int k, n = 0; |
|
fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n"); |
|
i = 0; |
|
set_tty_echo(1); |
|
while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1) |
|
if (k > 0) |
|
buf[i++] = k; |
|
buf[i] = 0; |
|
set_tty_echo(0); |
|
fprintf(stderr, "\n"); |
|
if (k > 0 && |
|
(n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) { |
|
av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s", |
|
target, time, command, arg); |
|
for (i = 0; i < nb_filtergraphs; i++) { |
|
FilterGraph *fg = filtergraphs[i]; |
|
if (fg->graph) { |
|
if (time < 0) { |
|
ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf), |
|
key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0); |
|
fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf); |
|
} else if (key == 'c') { |
|
fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n"); |
|
ret = AVERROR_PATCHWELCOME; |
|
} else { |
|
ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time); |
|
if (ret < 0) |
|
fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret)); |
|
} |
|
} |
|
} |
|
} else { |
|
av_log(NULL, AV_LOG_ERROR, |
|
"Parse error, at least 3 arguments were expected, " |
|
"only %d given in string '%s'\n", n, buf); |
|
} |
|
} |
|
if (key == 'd' || key == 'D'){ |
|
int debug=0; |
|
if(key == 'D') { |
|
debug = input_streams[0]->dec_ctx->debug << 1; |
|
if(!debug) debug = 1; |
|
while (debug & FF_DEBUG_DCT_COEFF) //unsupported, would just crash |
|
debug += debug; |
|
}else{ |
|
char buf[32]; |
|
int k = 0; |
|
i = 0; |
|
set_tty_echo(1); |
|
while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1) |
|
if (k > 0) |
|
buf[i++] = k; |
|
buf[i] = 0; |
|
set_tty_echo(0); |
|
fprintf(stderr, "\n"); |
|
if (k <= 0 || sscanf(buf, "%d", &debug)!=1) |
|
fprintf(stderr,"error parsing debug value\n"); |
|
} |
|
for(i=0;i<nb_input_streams;i++) { |
|
input_streams[i]->dec_ctx->debug = debug; |
|
} |
|
for(i=0;i<nb_output_streams;i++) { |
|
OutputStream *ost = output_streams[i]; |
|
if (ost->enc_ctx) |
|
ost->enc_ctx->debug = debug; |
|
} |
|
if(debug) av_log_set_level(AV_LOG_DEBUG); |
|
fprintf(stderr,"debug=%d\n", debug); |
|
} |
|
if (key == '?'){ |
|
fprintf(stderr, "key function\n" |
|
"? show this help\n" |
|
"+ increase verbosity\n" |
|
"- decrease verbosity\n" |
|
"c Send command to first matching filter supporting it\n" |
|
"C Send/Queue command to all matching filters\n" |
|
"D cycle through available debug modes\n" |
|
"h dump packets/hex press to cycle through the 3 states\n" |
|
"q quit\n" |
|
"s Show QP histogram\n" |
|
); |
|
} |
|
return 0; |
|
} |
|
|
|
static int got_eagain(void) |
|
{ |
|
int i; |
|
for (i = 0; i < nb_output_streams; i++) |
|
if (output_streams[i]->unavailable) |
|
return 1; |
|
return 0; |
|
} |
|
|
|
static void reset_eagain(void) |
|
{ |
|
int i; |
|
for (i = 0; i < nb_input_files; i++) |
|
input_files[i]->eagain = 0; |
|
for (i = 0; i < nb_output_streams; i++) |
|
output_streams[i]->unavailable = 0; |
|
} |
|
|
|
static void decode_flush(InputFile *ifile) |
|
{ |
|
for (int i = 0; i < ifile->nb_streams; i++) { |
|
InputStream *ist = input_streams[ifile->ist_index + i]; |
|
int ret; |
|
|
|
if (!ist->processing_needed) |
|
continue; |
|
|
|
do { |
|
ret = process_input_packet(ist, NULL, 1); |
|
} while (ret > 0); |
|
|
|
if (ist->decoding_needed) { |
|
/* report last frame duration to the demuxer thread */ |
|
if (ist->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) { |
|
LastFrameDuration dur; |
|
|
|
dur.stream_idx = i; |
|
dur.duration = av_rescale_q(ist->nb_samples, |
|
(AVRational){ 1, ist->dec_ctx->sample_rate}, |
|
ist->st->time_base); |
|
|
|
av_thread_message_queue_send(ifile->audio_duration_queue, &dur, 0); |
|
} |
|
|
|
avcodec_flush_buffers(ist->dec_ctx); |
|
} |
|
} |
|
} |
|
|
|
static void ts_discontinuity_detect(InputFile *ifile, InputStream *ist, |
|
AVPacket *pkt) |
|
{ |
|
const int fmt_is_discont = ifile->ctx->iformat->flags & AVFMT_TS_DISCONT; |
|
int disable_discontinuity_correction = copy_ts; |
|
int64_t pkt_dts = av_rescale_q_rnd(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q, |
|
AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX); |
|
|
|
if (copy_ts && ist->next_dts != AV_NOPTS_VALUE && |
|
fmt_is_discont && ist->st->pts_wrap_bits < 60) { |
|
int64_t wrap_dts = av_rescale_q_rnd(pkt->dts + (1LL<<ist->st->pts_wrap_bits), |
|
ist->st->time_base, AV_TIME_BASE_Q, |
|
AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX); |
|
if (FFABS(wrap_dts - ist->next_dts) < FFABS(pkt_dts - ist->next_dts)/10) |
|
disable_discontinuity_correction = 0; |
|
} |
|
|
|
if (ist->next_dts != AV_NOPTS_VALUE && !disable_discontinuity_correction) { |
|
int64_t delta = pkt_dts - ist->next_dts; |
|
if (fmt_is_discont) { |
|
if (FFABS(delta) > 1LL * dts_delta_threshold * AV_TIME_BASE || |
|
pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) { |
|
ifile->ts_offset_discont -= delta; |
|
av_log(NULL, AV_LOG_DEBUG, |
|
"timestamp discontinuity for stream #%d:%d " |
|
"(id=%d, type=%s): %"PRId64", new offset= %"PRId64"\n", |
|
ist->file_index, ist->st->index, ist->st->id, |
|
av_get_media_type_string(ist->st->codecpar->codec_type), |
|
delta, ifile->ts_offset_discont); |
|
pkt->dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base); |
|
if (pkt->pts != AV_NOPTS_VALUE) |
|
pkt->pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base); |
|
} |
|
} else { |
|
if (FFABS(delta) > 1LL * dts_error_threshold * AV_TIME_BASE) { |
|
av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt->dts, ist->next_dts, pkt->stream_index); |
|
pkt->dts = AV_NOPTS_VALUE; |
|
} |
|
if (pkt->pts != AV_NOPTS_VALUE){ |
|
int64_t pkt_pts = av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q); |
|
delta = pkt_pts - ist->next_dts; |
|
if (FFABS(delta) > 1LL * dts_error_threshold * AV_TIME_BASE) { |
|
av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt->pts, ist->next_dts, pkt->stream_index); |
|
pkt->pts = AV_NOPTS_VALUE; |
|
} |
|
} |
|
} |
|
} else if (ist->next_dts == AV_NOPTS_VALUE && !copy_ts && |
|
fmt_is_discont && ifile->last_ts != AV_NOPTS_VALUE) { |
|
int64_t delta = pkt_dts - ifile->last_ts; |
|
if (FFABS(delta) > 1LL * dts_delta_threshold * AV_TIME_BASE) { |
|
ifile->ts_offset_discont -= delta; |
|
av_log(NULL, AV_LOG_DEBUG, |
|
"Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n", |
|
delta, ifile->ts_offset_discont); |
|
pkt->dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base); |
|
if (pkt->pts != AV_NOPTS_VALUE) |
|
pkt->pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base); |
|
} |
|
} |
|
|
|
ifile->last_ts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q); |
|
} |
|
|
|
static void ts_discontinuity_process(InputFile *ifile, InputStream *ist, |
|
AVPacket *pkt) |
|
{ |
|
int64_t offset = av_rescale_q(ifile->ts_offset_discont, AV_TIME_BASE_Q, |
|
ist->st->time_base); |
|
|
|
// apply previously-detected timestamp-discontinuity offset |
|
// (to all streams, not just audio/video) |
|
if (pkt->dts != AV_NOPTS_VALUE) |
|
pkt->dts += offset; |
|
if (pkt->pts != AV_NOPTS_VALUE) |
|
pkt->pts += offset; |
|
|
|
// detect timestamp discontinuities for audio/video |
|
if ((ist->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO || |
|
ist->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) && |
|
pkt->dts != AV_NOPTS_VALUE) |
|
ts_discontinuity_detect(ifile, ist, pkt); |
|
} |
|
|
|
/* |
|
* Return |
|
* - 0 -- one packet was read and processed |
|
* - AVERROR(EAGAIN) -- no packets were available for selected file, |
|
* this function should be called again |
|
* - AVERROR_EOF -- this function should not be called again |
|
*/ |
|
static int process_input(int file_index) |
|
{ |
|
InputFile *ifile = input_files[file_index]; |
|
AVFormatContext *is; |
|
InputStream *ist; |
|
AVPacket *pkt; |
|
int ret, i, j; |
|
|
|
is = ifile->ctx; |
|
ret = ifile_get_packet(ifile, &pkt); |
|
|
|
if (ret == AVERROR(EAGAIN)) { |
|
ifile->eagain = 1; |
|
return ret; |
|
} |
|
if (ret == 1) { |
|
/* the input file is looped: flush the decoders */ |
|
decode_flush(ifile); |
|
return AVERROR(EAGAIN); |
|
} |
|
if (ret < 0) { |
|
if (ret != AVERROR_EOF) { |
|
print_error(is->url, ret); |
|
if (exit_on_error) |
|
exit_program(1); |
|
} |
|
|
|
for (i = 0; i < ifile->nb_streams; i++) { |
|
ist = input_streams[ifile->ist_index + i]; |
|
if (ist->processing_needed) { |
|
ret = process_input_packet(ist, NULL, 0); |
|
if (ret>0) |
|
return 0; |
|
} |
|
|
|
/* mark all outputs that don't go through lavfi as finished */ |
|
for (j = 0; j < nb_output_streams; j++) { |
|
OutputStream *ost = output_streams[j]; |
|
|
|
if (ost->source_index == ifile->ist_index + i && |
|
(!ost->enc_ctx || ost->enc->type == AVMEDIA_TYPE_SUBTITLE)) { |
|
OutputFile *of = output_files[ost->file_index]; |
|
output_packet(of, ost->pkt, ost, 1); |
|
} |
|
} |
|
} |
|
|
|
ifile->eof_reached = 1; |
|
return AVERROR(EAGAIN); |
|
} |
|
|
|
reset_eagain(); |
|
|
|
ist = input_streams[ifile->ist_index + pkt->stream_index]; |
|
|
|
ist->data_size += pkt->size; |
|
ist->nb_packets++; |
|
|
|
if (ist->discard) |
|
goto discard_packet; |
|
|
|
/* add the stream-global side data to the first packet */ |
|
if (ist->nb_packets == 1) { |
|
for (i = 0; i < ist->st->nb_side_data; i++) { |
|
AVPacketSideData *src_sd = &ist->st->side_data[i]; |
|
uint8_t *dst_data; |
|
|
|
if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX) |
|
continue; |
|
|
|
if (av_packet_get_side_data(pkt, src_sd->type, NULL)) |
|
continue; |
|
|
|
dst_data = av_packet_new_side_data(pkt, src_sd->type, src_sd->size); |
|
if (!dst_data) |
|
exit_program(1); |
|
|
|
memcpy(dst_data, src_sd->data, src_sd->size); |
|
} |
|
} |
|
|
|
// detect and try to correct for timestamp discontinuities |
|
ts_discontinuity_process(ifile, ist, pkt); |
|
|
|
if (debug_ts) { |
|
av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s duration:%s duration_time:%s off:%s off_time:%s\n", |
|
ifile->ist_index + pkt->stream_index, |
|
av_get_media_type_string(ist->st->codecpar->codec_type), |
|
av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ist->st->time_base), |
|
av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ist->st->time_base), |
|
av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, &ist->st->time_base), |
|
av_ts2str(input_files[ist->file_index]->ts_offset), |
|
av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q)); |
|
} |
|
|
|
sub2video_heartbeat(ist, pkt->pts); |
|
|
|
process_input_packet(ist, pkt, 0); |
|
|
|
discard_packet: |
|
av_packet_free(&pkt); |
|
|
|
return 0; |
|
} |
|
|
|
/** |
|
* Perform a step of transcoding for the specified filter graph. |
|
* |
|
* @param[in] graph filter graph to consider |
|
* @param[out] best_ist input stream where a frame would allow to continue |
|
* @return 0 for success, <0 for error |
|
*/ |
|
static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist) |
|
{ |
|
int i, ret; |
|
int nb_requests, nb_requests_max = 0; |
|
InputFilter *ifilter; |
|
InputStream *ist; |
|
|
|
*best_ist = NULL; |
|
ret = avfilter_graph_request_oldest(graph->graph); |
|
if (ret >= 0) |
|
return reap_filters(0); |
|
|
|
if (ret == AVERROR_EOF) { |
|
ret = reap_filters(1); |
|
for (i = 0; i < graph->nb_outputs; i++) |
|
close_output_stream(graph->outputs[i]->ost); |
|
return ret; |
|
} |
|
if (ret != AVERROR(EAGAIN)) |
|
return ret; |
|
|
|
for (i = 0; i < graph->nb_inputs; i++) { |
|
ifilter = graph->inputs[i]; |
|
ist = ifilter->ist; |
|
if (input_files[ist->file_index]->eagain || |
|
input_files[ist->file_index]->eof_reached) |
|
continue; |
|
nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter); |
|
if (nb_requests > nb_requests_max) { |
|
nb_requests_max = nb_requests; |
|
*best_ist = ist; |
|
} |
|
} |
|
|
|
if (!*best_ist) |
|
for (i = 0; i < graph->nb_outputs; i++) |
|
graph->outputs[i]->ost->unavailable = 1; |
|
|
|
return 0; |
|
} |
|
|
|
/** |
|
* Run a single step of transcoding. |
|
* |
|
* @return 0 for success, <0 for error |
|
*/ |
|
static int transcode_step(void) |
|
{ |
|
OutputStream *ost; |
|
InputStream *ist = NULL; |
|
int ret; |
|
|
|
ost = choose_output(); |
|
if (!ost) { |
|
if (got_eagain()) { |
|
reset_eagain(); |
|
av_usleep(10000); |
|
return 0; |
|
} |
|
av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n"); |
|
return AVERROR_EOF; |
|
} |
|
|
|
if (ost->filter && !ost->filter->graph->graph) { |
|
if (ifilter_has_all_input_formats(ost->filter->graph)) { |
|
ret = configure_filtergraph(ost->filter->graph); |
|
if (ret < 0) { |
|
av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n"); |
|
return ret; |
|
} |
|
} |
|
} |
|
|
|
if (ost->filter && ost->filter->graph->graph) { |
|
/* |
|
* Similar case to the early audio initialization in reap_filters. |
|
* Audio is special in ffmpeg.c currently as we depend on lavfi's |
|
* audio frame buffering/creation to get the output audio frame size |
|
* in samples correct. The audio frame size for the filter chain is |
|
* configured during the output stream initialization. |
|
* |
|
* Apparently avfilter_graph_request_oldest (called in |
|
* transcode_from_filter just down the line) peeks. Peeking already |
|
* puts one frame "ready to be given out", which means that any |
|
* update in filter buffer sink configuration afterwards will not |
|
* help us. And yes, even if it would be utilized, |
|
* av_buffersink_get_samples is affected, as it internally utilizes |
|
* the same early exit for peeked frames. |
|
* |
|
* In other words, if avfilter_graph_request_oldest would not make |
|
* further filter chain configuration or usage of |
|
* av_buffersink_get_samples useless (by just causing the return |
|
* of the peeked AVFrame as-is), we could get rid of this additional |
|
* early encoder initialization. |
|
*/ |
|
if (av_buffersink_get_type(ost->filter->filter) == AVMEDIA_TYPE_AUDIO) |
|
init_output_stream_wrapper(ost, NULL, 1); |
|
|
|
if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0) |
|
return ret; |
|
if (!ist) |
|
return 0; |
|
} else if (ost->filter) { |
|
int i; |
|
for (i = 0; i < ost->filter->graph->nb_inputs; i++) { |
|
InputFilter *ifilter = ost->filter->graph->inputs[i]; |
|
if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) { |
|
ist = ifilter->ist; |
|
break; |
|
} |
|
} |
|
if (!ist) { |
|
ost->inputs_done = 1; |
|
return 0; |
|
} |
|
} else { |
|
av_assert0(ost->source_index >= 0); |
|
ist = input_streams[ost->source_index]; |
|
} |
|
|
|
ret = process_input(ist->file_index); |
|
if (ret == AVERROR(EAGAIN)) { |
|
if (input_files[ist->file_index]->eagain) |
|
ost->unavailable = 1; |
|
return 0; |
|
} |
|
|
|
if (ret < 0) |
|
return ret == AVERROR_EOF ? 0 : ret; |
|
|
|
return reap_filters(0); |
|
} |
|
|
|
/* |
|
* The following code is the main loop of the file converter |
|
*/ |
|
static int transcode(void) |
|
{ |
|
int ret, i; |
|
OutputStream *ost; |
|
InputStream *ist; |
|
int64_t timer_start; |
|
int64_t total_packets_written = 0; |
|
|
|
ret = transcode_init(); |
|
if (ret < 0) |
|
goto fail; |
|
|
|
if (stdin_interaction) { |
|
av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n"); |
|
} |
|
|
|
timer_start = av_gettime_relative(); |
|
|
|
if ((ret = init_input_threads()) < 0) |
|
goto fail; |
|
|
|
while (!received_sigterm) { |
|
int64_t cur_time= av_gettime_relative(); |
|
|
|
/* if 'q' pressed, exits */ |
|
if (stdin_interaction) |
|
if (check_keyboard_interaction(cur_time) < 0) |
|
break; |
|
|
|
/* check if there's any stream where output is still needed */ |
|
if (!need_output()) { |
|
av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n"); |
|
break; |
|
} |
|
|
|
ret = transcode_step(); |
|
if (ret < 0 && ret != AVERROR_EOF) { |
|
av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret)); |
|
break; |
|
} |
|
|
|
/* dump report by using the output first video and audio streams */ |
|
print_report(0, timer_start, cur_time); |
|
} |
|
free_input_threads(); |
|
|
|
/* at the end of stream, we must flush the decoder buffers */ |
|
for (i = 0; i < nb_input_streams; i++) { |
|
ist = input_streams[i]; |
|
if (!input_files[ist->file_index]->eof_reached) { |
|
process_input_packet(ist, NULL, 0); |
|
} |
|
} |
|
flush_encoders(); |
|
|
|
term_exit(); |
|
|
|
/* write the trailer if needed */ |
|
for (i = 0; i < nb_output_files; i++) { |
|
ret = of_write_trailer(output_files[i]); |
|
if (ret < 0 && exit_on_error) |
|
exit_program(1); |
|
} |
|
|
|
/* dump report by using the first video and audio streams */ |
|
print_report(1, timer_start, av_gettime_relative()); |
|
|
|
/* close each encoder */ |
|
for (i = 0; i < nb_output_streams; i++) { |
|
uint64_t packets_written; |
|
ost = output_streams[i]; |
|
packets_written = atomic_load(&ost->packets_written); |
|
total_packets_written += packets_written; |
|
if (!packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT_STREAM)) { |
|
av_log(NULL, AV_LOG_FATAL, "Empty output on stream %d.\n", i); |
|
exit_program(1); |
|
} |
|
} |
|
|
|
if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) { |
|
av_log(NULL, AV_LOG_FATAL, "Empty output\n"); |
|
exit_program(1); |
|
} |
|
|
|
/* close each decoder */ |
|
for (i = 0; i < nb_input_streams; i++) { |
|
ist = input_streams[i]; |
|
if (ist->decoding_needed) { |
|
avcodec_close(ist->dec_ctx); |
|
} |
|
} |
|
|
|
hw_device_free_all(); |
|
|
|
/* finished ! */ |
|
ret = 0; |
|
|
|
fail: |
|
free_input_threads(); |
|
|
|
if (output_streams) { |
|
for (i = 0; i < nb_output_streams; i++) { |
|
ost = output_streams[i]; |
|
if (ost) { |
|
if (ost->logfile) { |
|
if (fclose(ost->logfile)) |
|
av_log(NULL, AV_LOG_ERROR, |
|
"Error closing logfile, loss of information possible: %s\n", |
|
av_err2str(AVERROR(errno))); |
|
ost->logfile = NULL; |
|
} |
|
av_freep(&ost->forced_kf_pts); |
|
av_freep(&ost->apad); |
|
av_freep(&ost->disposition); |
|
av_dict_free(&ost->encoder_opts); |
|
av_dict_free(&ost->sws_dict); |
|
av_dict_free(&ost->swr_opts); |
|
} |
|
} |
|
} |
|
return ret; |
|
} |
|
|
|
static BenchmarkTimeStamps get_benchmark_time_stamps(void) |
|
{ |
|
BenchmarkTimeStamps time_stamps = { av_gettime_relative() }; |
|
#if HAVE_GETRUSAGE |
|
struct rusage rusage; |
|
|
|
getrusage(RUSAGE_SELF, &rusage); |
|
time_stamps.user_usec = |
|
(rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec; |
|
time_stamps.sys_usec = |
|
(rusage.ru_stime.tv_sec * 1000000LL) + rusage.ru_stime.tv_usec; |
|
#elif HAVE_GETPROCESSTIMES |
|
HANDLE proc; |
|
FILETIME c, e, k, u; |
|
proc = GetCurrentProcess(); |
|
GetProcessTimes(proc, &c, &e, &k, &u); |
|
time_stamps.user_usec = |
|
((int64_t)u.dwHighDateTime << 32 | u.dwLowDateTime) / 10; |
|
time_stamps.sys_usec = |
|
((int64_t)k.dwHighDateTime << 32 | k.dwLowDateTime) / 10; |
|
#else |
|
time_stamps.user_usec = time_stamps.sys_usec = 0; |
|
#endif |
|
return time_stamps; |
|
} |
|
|
|
static int64_t getmaxrss(void) |
|
{ |
|
#if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS |
|
struct rusage rusage; |
|
getrusage(RUSAGE_SELF, &rusage); |
|
return (int64_t)rusage.ru_maxrss * 1024; |
|
#elif HAVE_GETPROCESSMEMORYINFO |
|
HANDLE proc; |
|
PROCESS_MEMORY_COUNTERS memcounters; |
|
proc = GetCurrentProcess(); |
|
memcounters.cb = sizeof(memcounters); |
|
GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters)); |
|
return memcounters.PeakPagefileUsage; |
|
#else |
|
return 0; |
|
#endif |
|
} |
|
|
|
int main(int argc, char **argv) |
|
{ |
|
int ret; |
|
BenchmarkTimeStamps ti; |
|
|
|
init_dynload(); |
|
|
|
register_exit(ffmpeg_cleanup); |
|
|
|
setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */ |
|
|
|
av_log_set_flags(AV_LOG_SKIP_REPEATED); |
|
parse_loglevel(argc, argv, options); |
|
|
|
#if CONFIG_AVDEVICE |
|
avdevice_register_all(); |
|
#endif |
|
avformat_network_init(); |
|
|
|
show_banner(argc, argv, options); |
|
|
|
/* parse options and open all input/output files */ |
|
ret = ffmpeg_parse_options(argc, argv); |
|
if (ret < 0) |
|
exit_program(1); |
|
|
|
if (nb_output_files <= 0 && nb_input_files == 0) { |
|
show_usage(); |
|
av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name); |
|
exit_program(1); |
|
} |
|
|
|
/* file converter / grab */ |
|
if (nb_output_files <= 0) { |
|
av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n"); |
|
exit_program(1); |
|
} |
|
|
|
current_time = ti = get_benchmark_time_stamps(); |
|
if (transcode() < 0) |
|
exit_program(1); |
|
if (do_benchmark) { |
|
int64_t utime, stime, rtime; |
|
current_time = get_benchmark_time_stamps(); |
|
utime = current_time.user_usec - ti.user_usec; |
|
stime = current_time.sys_usec - ti.sys_usec; |
|
rtime = current_time.real_usec - ti.real_usec; |
|
av_log(NULL, AV_LOG_INFO, |
|
"bench: utime=%0.3fs stime=%0.3fs rtime=%0.3fs\n", |
|
utime / 1000000.0, stime / 1000000.0, rtime / 1000000.0); |
|
} |
|
av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n", |
|
decode_error_stat[0], decode_error_stat[1]); |
|
if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1]) |
|
exit_program(69); |
|
|
|
exit_program(received_nb_signals ? 255 : main_return_code); |
|
return main_return_code; |
|
}
|
|
|