|
|
@ -50,11 +50,9 @@ |
|
|
|
#include "libavcodec/avfft.h" |
|
|
|
#include "libavcodec/avfft.h" |
|
|
|
#include "libswresample/swresample.h" |
|
|
|
#include "libswresample/swresample.h" |
|
|
|
|
|
|
|
|
|
|
|
#if CONFIG_AVFILTER |
|
|
|
#include "libavfilter/avfilter.h" |
|
|
|
# include "libavfilter/avfilter.h" |
|
|
|
#include "libavfilter/buffersink.h" |
|
|
|
# include "libavfilter/buffersink.h" |
|
|
|
#include "libavfilter/buffersrc.h" |
|
|
|
# include "libavfilter/buffersrc.h" |
|
|
|
|
|
|
|
#endif |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#include <SDL.h> |
|
|
|
#include <SDL.h> |
|
|
|
#include <SDL_thread.h> |
|
|
|
#include <SDL_thread.h> |
|
|
@ -109,8 +107,6 @@ const int program_birth_year = 2003; |
|
|
|
|
|
|
|
|
|
|
|
#define USE_ONEPASS_SUBTITLE_RENDER 1 |
|
|
|
#define USE_ONEPASS_SUBTITLE_RENDER 1 |
|
|
|
|
|
|
|
|
|
|
|
static unsigned sws_flags = SWS_BICUBIC; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
typedef struct MyAVPacketList { |
|
|
|
typedef struct MyAVPacketList { |
|
|
|
AVPacket *pkt; |
|
|
|
AVPacket *pkt; |
|
|
|
int serial; |
|
|
|
int serial; |
|
|
@ -250,9 +246,7 @@ typedef struct VideoState { |
|
|
|
int audio_volume; |
|
|
|
int audio_volume; |
|
|
|
int muted; |
|
|
|
int muted; |
|
|
|
struct AudioParams audio_src; |
|
|
|
struct AudioParams audio_src; |
|
|
|
#if CONFIG_AVFILTER |
|
|
|
|
|
|
|
struct AudioParams audio_filter_src; |
|
|
|
struct AudioParams audio_filter_src; |
|
|
|
#endif |
|
|
|
|
|
|
|
struct AudioParams audio_tgt; |
|
|
|
struct AudioParams audio_tgt; |
|
|
|
struct SwrContext *swr_ctx; |
|
|
|
struct SwrContext *swr_ctx; |
|
|
|
int frame_drops_early; |
|
|
|
int frame_drops_early; |
|
|
@ -284,7 +278,6 @@ typedef struct VideoState { |
|
|
|
AVStream *video_st; |
|
|
|
AVStream *video_st; |
|
|
|
PacketQueue videoq; |
|
|
|
PacketQueue videoq; |
|
|
|
double max_frame_duration; // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
|
|
|
|
double max_frame_duration; // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
|
|
|
|
struct SwsContext *img_convert_ctx; |
|
|
|
|
|
|
|
struct SwsContext *sub_convert_ctx; |
|
|
|
struct SwsContext *sub_convert_ctx; |
|
|
|
int eof; |
|
|
|
int eof; |
|
|
|
|
|
|
|
|
|
|
@ -292,14 +285,12 @@ typedef struct VideoState { |
|
|
|
int width, height, xleft, ytop; |
|
|
|
int width, height, xleft, ytop; |
|
|
|
int step; |
|
|
|
int step; |
|
|
|
|
|
|
|
|
|
|
|
#if CONFIG_AVFILTER |
|
|
|
|
|
|
|
int vfilter_idx; |
|
|
|
int vfilter_idx; |
|
|
|
AVFilterContext *in_video_filter; // the first filter in the video chain
|
|
|
|
AVFilterContext *in_video_filter; // the first filter in the video chain
|
|
|
|
AVFilterContext *out_video_filter; // the last filter in the video chain
|
|
|
|
AVFilterContext *out_video_filter; // the last filter in the video chain
|
|
|
|
AVFilterContext *in_audio_filter; // the first filter in the audio chain
|
|
|
|
AVFilterContext *in_audio_filter; // the first filter in the audio chain
|
|
|
|
AVFilterContext *out_audio_filter; // the last filter in the audio chain
|
|
|
|
AVFilterContext *out_audio_filter; // the last filter in the audio chain
|
|
|
|
AVFilterGraph *agraph; // audio filter graph
|
|
|
|
AVFilterGraph *agraph; // audio filter graph
|
|
|
|
#endif |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
int last_video_stream, last_audio_stream, last_subtitle_stream; |
|
|
|
int last_video_stream, last_audio_stream, last_subtitle_stream; |
|
|
|
|
|
|
|
|
|
|
@ -347,11 +338,9 @@ static const char *video_codec_name; |
|
|
|
double rdftspeed = 0.02; |
|
|
|
double rdftspeed = 0.02; |
|
|
|
static int64_t cursor_last_shown; |
|
|
|
static int64_t cursor_last_shown; |
|
|
|
static int cursor_hidden = 0; |
|
|
|
static int cursor_hidden = 0; |
|
|
|
#if CONFIG_AVFILTER |
|
|
|
|
|
|
|
static const char **vfilters_list = NULL; |
|
|
|
static const char **vfilters_list = NULL; |
|
|
|
static int nb_vfilters = 0; |
|
|
|
static int nb_vfilters = 0; |
|
|
|
static char *afilters = NULL; |
|
|
|
static char *afilters = NULL; |
|
|
|
#endif |
|
|
|
|
|
|
|
static int autorotate = 1; |
|
|
|
static int autorotate = 1; |
|
|
|
static int find_stream_info = 1; |
|
|
|
static int find_stream_info = 1; |
|
|
|
static int filter_nbthreads = 0; |
|
|
|
static int filter_nbthreads = 0; |
|
|
@ -393,14 +382,12 @@ static const struct TextureFormatEntry { |
|
|
|
{ AV_PIX_FMT_NONE, SDL_PIXELFORMAT_UNKNOWN }, |
|
|
|
{ AV_PIX_FMT_NONE, SDL_PIXELFORMAT_UNKNOWN }, |
|
|
|
}; |
|
|
|
}; |
|
|
|
|
|
|
|
|
|
|
|
#if CONFIG_AVFILTER |
|
|
|
|
|
|
|
static int opt_add_vfilter(void *optctx, const char *opt, const char *arg) |
|
|
|
static int opt_add_vfilter(void *optctx, const char *opt, const char *arg) |
|
|
|
{ |
|
|
|
{ |
|
|
|
GROW_ARRAY(vfilters_list, nb_vfilters); |
|
|
|
GROW_ARRAY(vfilters_list, nb_vfilters); |
|
|
|
vfilters_list[nb_vfilters - 1] = arg; |
|
|
|
vfilters_list[nb_vfilters - 1] = arg; |
|
|
|
return 0; |
|
|
|
return 0; |
|
|
|
} |
|
|
|
} |
|
|
|
#endif |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static inline |
|
|
|
static inline |
|
|
|
int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1, |
|
|
|
int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1, |
|
|
@ -891,7 +878,8 @@ static void get_sdl_pix_fmt_and_blendmode(int format, Uint32 *sdl_pix_fmt, SDL_B |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
static int upload_texture(SDL_Texture **tex, AVFrame *frame, struct SwsContext **img_convert_ctx) { |
|
|
|
static int upload_texture(SDL_Texture **tex, AVFrame *frame) |
|
|
|
|
|
|
|
{ |
|
|
|
int ret = 0; |
|
|
|
int ret = 0; |
|
|
|
Uint32 sdl_pix_fmt; |
|
|
|
Uint32 sdl_pix_fmt; |
|
|
|
SDL_BlendMode sdl_blendmode; |
|
|
|
SDL_BlendMode sdl_blendmode; |
|
|
@ -899,24 +887,6 @@ static int upload_texture(SDL_Texture **tex, AVFrame *frame, struct SwsContext * |
|
|
|
if (realloc_texture(tex, sdl_pix_fmt == SDL_PIXELFORMAT_UNKNOWN ? SDL_PIXELFORMAT_ARGB8888 : sdl_pix_fmt, frame->width, frame->height, sdl_blendmode, 0) < 0) |
|
|
|
if (realloc_texture(tex, sdl_pix_fmt == SDL_PIXELFORMAT_UNKNOWN ? SDL_PIXELFORMAT_ARGB8888 : sdl_pix_fmt, frame->width, frame->height, sdl_blendmode, 0) < 0) |
|
|
|
return -1; |
|
|
|
return -1; |
|
|
|
switch (sdl_pix_fmt) { |
|
|
|
switch (sdl_pix_fmt) { |
|
|
|
case SDL_PIXELFORMAT_UNKNOWN: |
|
|
|
|
|
|
|
/* This should only happen if we are not using avfilter... */ |
|
|
|
|
|
|
|
*img_convert_ctx = sws_getCachedContext(*img_convert_ctx, |
|
|
|
|
|
|
|
frame->width, frame->height, frame->format, frame->width, frame->height, |
|
|
|
|
|
|
|
AV_PIX_FMT_BGRA, sws_flags, NULL, NULL, NULL); |
|
|
|
|
|
|
|
if (*img_convert_ctx != NULL) { |
|
|
|
|
|
|
|
uint8_t *pixels[4]; |
|
|
|
|
|
|
|
int pitch[4]; |
|
|
|
|
|
|
|
if (!SDL_LockTexture(*tex, NULL, (void **)pixels, pitch)) { |
|
|
|
|
|
|
|
sws_scale(*img_convert_ctx, (const uint8_t * const *)frame->data, frame->linesize, |
|
|
|
|
|
|
|
0, frame->height, pixels, pitch); |
|
|
|
|
|
|
|
SDL_UnlockTexture(*tex); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} else { |
|
|
|
|
|
|
|
av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n"); |
|
|
|
|
|
|
|
ret = -1; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
break; |
|
|
|
|
|
|
|
case SDL_PIXELFORMAT_IYUV: |
|
|
|
case SDL_PIXELFORMAT_IYUV: |
|
|
|
if (frame->linesize[0] > 0 && frame->linesize[1] > 0 && frame->linesize[2] > 0) { |
|
|
|
if (frame->linesize[0] > 0 && frame->linesize[1] > 0 && frame->linesize[2] > 0) { |
|
|
|
ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0], frame->linesize[0], |
|
|
|
ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0], frame->linesize[0], |
|
|
@ -1014,7 +984,7 @@ static void video_image_display(VideoState *is) |
|
|
|
set_sdl_yuv_conversion_mode(vp->frame); |
|
|
|
set_sdl_yuv_conversion_mode(vp->frame); |
|
|
|
|
|
|
|
|
|
|
|
if (!vp->uploaded) { |
|
|
|
if (!vp->uploaded) { |
|
|
|
if (upload_texture(&is->vid_texture, vp->frame, &is->img_convert_ctx) < 0) { |
|
|
|
if (upload_texture(&is->vid_texture, vp->frame) < 0) { |
|
|
|
set_sdl_yuv_conversion_mode(NULL); |
|
|
|
set_sdl_yuv_conversion_mode(NULL); |
|
|
|
return; |
|
|
|
return; |
|
|
|
} |
|
|
|
} |
|
|
@ -1272,7 +1242,6 @@ static void stream_close(VideoState *is) |
|
|
|
frame_queue_destory(&is->sampq); |
|
|
|
frame_queue_destory(&is->sampq); |
|
|
|
frame_queue_destory(&is->subpq); |
|
|
|
frame_queue_destory(&is->subpq); |
|
|
|
SDL_DestroyCond(is->continue_read_thread); |
|
|
|
SDL_DestroyCond(is->continue_read_thread); |
|
|
|
sws_freeContext(is->img_convert_ctx); |
|
|
|
|
|
|
|
sws_freeContext(is->sub_convert_ctx); |
|
|
|
sws_freeContext(is->sub_convert_ctx); |
|
|
|
av_free(is->filename); |
|
|
|
av_free(is->filename); |
|
|
|
if (is->vis_texture) |
|
|
|
if (is->vis_texture) |
|
|
@ -1294,9 +1263,7 @@ static void do_exit(VideoState *is) |
|
|
|
if (window) |
|
|
|
if (window) |
|
|
|
SDL_DestroyWindow(window); |
|
|
|
SDL_DestroyWindow(window); |
|
|
|
uninit_opts(); |
|
|
|
uninit_opts(); |
|
|
|
#if CONFIG_AVFILTER |
|
|
|
|
|
|
|
av_freep(&vfilters_list); |
|
|
|
av_freep(&vfilters_list); |
|
|
|
#endif |
|
|
|
|
|
|
|
avformat_network_deinit(); |
|
|
|
avformat_network_deinit(); |
|
|
|
if (show_status) |
|
|
|
if (show_status) |
|
|
|
printf("\n"); |
|
|
|
printf("\n"); |
|
|
@ -1794,7 +1761,6 @@ static int get_video_frame(VideoState *is, AVFrame *frame) |
|
|
|
return got_picture; |
|
|
|
return got_picture; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
#if CONFIG_AVFILTER |
|
|
|
|
|
|
|
static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph, |
|
|
|
static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph, |
|
|
|
AVFilterContext *source_ctx, AVFilterContext *sink_ctx) |
|
|
|
AVFilterContext *source_ctx, AVFilterContext *sink_ctx) |
|
|
|
{ |
|
|
|
{ |
|
|
@ -2022,17 +1988,14 @@ end: |
|
|
|
|
|
|
|
|
|
|
|
return ret; |
|
|
|
return ret; |
|
|
|
} |
|
|
|
} |
|
|
|
#endif /* CONFIG_AVFILTER */ |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static int audio_thread(void *arg) |
|
|
|
static int audio_thread(void *arg) |
|
|
|
{ |
|
|
|
{ |
|
|
|
VideoState *is = arg; |
|
|
|
VideoState *is = arg; |
|
|
|
AVFrame *frame = av_frame_alloc(); |
|
|
|
AVFrame *frame = av_frame_alloc(); |
|
|
|
Frame *af; |
|
|
|
Frame *af; |
|
|
|
#if CONFIG_AVFILTER |
|
|
|
|
|
|
|
int last_serial = -1; |
|
|
|
int last_serial = -1; |
|
|
|
int reconfigure; |
|
|
|
int reconfigure; |
|
|
|
#endif |
|
|
|
|
|
|
|
int got_frame = 0; |
|
|
|
int got_frame = 0; |
|
|
|
AVRational tb; |
|
|
|
AVRational tb; |
|
|
|
int ret = 0; |
|
|
|
int ret = 0; |
|
|
@ -2047,7 +2010,6 @@ static int audio_thread(void *arg) |
|
|
|
if (got_frame) { |
|
|
|
if (got_frame) { |
|
|
|
tb = (AVRational){1, frame->sample_rate}; |
|
|
|
tb = (AVRational){1, frame->sample_rate}; |
|
|
|
|
|
|
|
|
|
|
|
#if CONFIG_AVFILTER |
|
|
|
|
|
|
|
reconfigure = |
|
|
|
reconfigure = |
|
|
|
cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.ch_layout.nb_channels, |
|
|
|
cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.ch_layout.nb_channels, |
|
|
|
frame->format, frame->ch_layout.nb_channels) || |
|
|
|
frame->format, frame->ch_layout.nb_channels) || |
|
|
@ -2080,7 +2042,6 @@ static int audio_thread(void *arg) |
|
|
|
|
|
|
|
|
|
|
|
while ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, frame, 0)) >= 0) { |
|
|
|
while ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, frame, 0)) >= 0) { |
|
|
|
tb = av_buffersink_get_time_base(is->out_audio_filter); |
|
|
|
tb = av_buffersink_get_time_base(is->out_audio_filter); |
|
|
|
#endif |
|
|
|
|
|
|
|
if (!(af = frame_queue_peek_writable(&is->sampq))) |
|
|
|
if (!(af = frame_queue_peek_writable(&is->sampq))) |
|
|
|
goto the_end; |
|
|
|
goto the_end; |
|
|
|
|
|
|
|
|
|
|
@ -2092,19 +2053,15 @@ static int audio_thread(void *arg) |
|
|
|
av_frame_move_ref(af->frame, frame); |
|
|
|
av_frame_move_ref(af->frame, frame); |
|
|
|
frame_queue_push(&is->sampq); |
|
|
|
frame_queue_push(&is->sampq); |
|
|
|
|
|
|
|
|
|
|
|
#if CONFIG_AVFILTER |
|
|
|
|
|
|
|
if (is->audioq.serial != is->auddec.pkt_serial) |
|
|
|
if (is->audioq.serial != is->auddec.pkt_serial) |
|
|
|
break; |
|
|
|
break; |
|
|
|
} |
|
|
|
} |
|
|
|
if (ret == AVERROR_EOF) |
|
|
|
if (ret == AVERROR_EOF) |
|
|
|
is->auddec.finished = is->auddec.pkt_serial; |
|
|
|
is->auddec.finished = is->auddec.pkt_serial; |
|
|
|
#endif |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
} while (ret >= 0 || ret == AVERROR(EAGAIN) || ret == AVERROR_EOF); |
|
|
|
} while (ret >= 0 || ret == AVERROR(EAGAIN) || ret == AVERROR_EOF); |
|
|
|
the_end: |
|
|
|
the_end: |
|
|
|
#if CONFIG_AVFILTER |
|
|
|
|
|
|
|
avfilter_graph_free(&is->agraph); |
|
|
|
avfilter_graph_free(&is->agraph); |
|
|
|
#endif |
|
|
|
|
|
|
|
av_frame_free(&frame); |
|
|
|
av_frame_free(&frame); |
|
|
|
return ret; |
|
|
|
return ret; |
|
|
|
} |
|
|
|
} |
|
|
@ -2130,7 +2087,6 @@ static int video_thread(void *arg) |
|
|
|
AVRational tb = is->video_st->time_base; |
|
|
|
AVRational tb = is->video_st->time_base; |
|
|
|
AVRational frame_rate = av_guess_frame_rate(is->ic, is->video_st, NULL); |
|
|
|
AVRational frame_rate = av_guess_frame_rate(is->ic, is->video_st, NULL); |
|
|
|
|
|
|
|
|
|
|
|
#if CONFIG_AVFILTER |
|
|
|
|
|
|
|
AVFilterGraph *graph = NULL; |
|
|
|
AVFilterGraph *graph = NULL; |
|
|
|
AVFilterContext *filt_out = NULL, *filt_in = NULL; |
|
|
|
AVFilterContext *filt_out = NULL, *filt_in = NULL; |
|
|
|
int last_w = 0; |
|
|
|
int last_w = 0; |
|
|
@ -2138,7 +2094,6 @@ static int video_thread(void *arg) |
|
|
|
enum AVPixelFormat last_format = -2; |
|
|
|
enum AVPixelFormat last_format = -2; |
|
|
|
int last_serial = -1; |
|
|
|
int last_serial = -1; |
|
|
|
int last_vfilter_idx = 0; |
|
|
|
int last_vfilter_idx = 0; |
|
|
|
#endif |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (!frame) |
|
|
|
if (!frame) |
|
|
|
return AVERROR(ENOMEM); |
|
|
|
return AVERROR(ENOMEM); |
|
|
@ -2150,7 +2105,6 @@ static int video_thread(void *arg) |
|
|
|
if (!ret) |
|
|
|
if (!ret) |
|
|
|
continue; |
|
|
|
continue; |
|
|
|
|
|
|
|
|
|
|
|
#if CONFIG_AVFILTER |
|
|
|
|
|
|
|
if ( last_w != frame->width |
|
|
|
if ( last_w != frame->width |
|
|
|
|| last_h != frame->height |
|
|
|
|| last_h != frame->height |
|
|
|
|| last_format != frame->format |
|
|
|
|| last_format != frame->format |
|
|
@ -2205,24 +2159,19 @@ static int video_thread(void *arg) |
|
|
|
if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0) |
|
|
|
if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0) |
|
|
|
is->frame_last_filter_delay = 0; |
|
|
|
is->frame_last_filter_delay = 0; |
|
|
|
tb = av_buffersink_get_time_base(filt_out); |
|
|
|
tb = av_buffersink_get_time_base(filt_out); |
|
|
|
#endif |
|
|
|
|
|
|
|
duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0); |
|
|
|
duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0); |
|
|
|
pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb); |
|
|
|
pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb); |
|
|
|
ret = queue_picture(is, frame, pts, duration, frame->pkt_pos, is->viddec.pkt_serial); |
|
|
|
ret = queue_picture(is, frame, pts, duration, frame->pkt_pos, is->viddec.pkt_serial); |
|
|
|
av_frame_unref(frame); |
|
|
|
av_frame_unref(frame); |
|
|
|
#if CONFIG_AVFILTER |
|
|
|
|
|
|
|
if (is->videoq.serial != is->viddec.pkt_serial) |
|
|
|
if (is->videoq.serial != is->viddec.pkt_serial) |
|
|
|
break; |
|
|
|
break; |
|
|
|
} |
|
|
|
} |
|
|
|
#endif |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (ret < 0) |
|
|
|
if (ret < 0) |
|
|
|
goto the_end; |
|
|
|
goto the_end; |
|
|
|
} |
|
|
|
} |
|
|
|
the_end: |
|
|
|
the_end: |
|
|
|
#if CONFIG_AVFILTER |
|
|
|
|
|
|
|
avfilter_graph_free(&graph); |
|
|
|
avfilter_graph_free(&graph); |
|
|
|
#endif |
|
|
|
|
|
|
|
av_frame_free(&frame); |
|
|
|
av_frame_free(&frame); |
|
|
|
return 0; |
|
|
|
return 0; |
|
|
|
} |
|
|
|
} |
|
|
@ -2631,7 +2580,6 @@ static int stream_component_open(VideoState *is, int stream_index) |
|
|
|
ic->streams[stream_index]->discard = AVDISCARD_DEFAULT; |
|
|
|
ic->streams[stream_index]->discard = AVDISCARD_DEFAULT; |
|
|
|
switch (avctx->codec_type) { |
|
|
|
switch (avctx->codec_type) { |
|
|
|
case AVMEDIA_TYPE_AUDIO: |
|
|
|
case AVMEDIA_TYPE_AUDIO: |
|
|
|
#if CONFIG_AVFILTER |
|
|
|
|
|
|
|
{ |
|
|
|
{ |
|
|
|
AVFilterContext *sink; |
|
|
|
AVFilterContext *sink; |
|
|
|
|
|
|
|
|
|
|
@ -2648,12 +2596,6 @@ static int stream_component_open(VideoState *is, int stream_index) |
|
|
|
if (ret < 0) |
|
|
|
if (ret < 0) |
|
|
|
goto fail; |
|
|
|
goto fail; |
|
|
|
} |
|
|
|
} |
|
|
|
#else |
|
|
|
|
|
|
|
sample_rate = avctx->sample_rate; |
|
|
|
|
|
|
|
ret = av_channel_layout_copy(&ch_layout, &avctx->ch_layout); |
|
|
|
|
|
|
|
if (ret < 0) |
|
|
|
|
|
|
|
goto fail; |
|
|
|
|
|
|
|
#endif |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* prepare audio output */ |
|
|
|
/* prepare audio output */ |
|
|
|
if ((ret = audio_open(is, &ch_layout, sample_rate, &is->audio_tgt)) < 0) |
|
|
|
if ((ret = audio_open(is, &ch_layout, sample_rate, &is->audio_tgt)) < 0) |
|
|
@ -3327,7 +3269,6 @@ static void event_loop(VideoState *cur_stream) |
|
|
|
stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE); |
|
|
|
stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE); |
|
|
|
break; |
|
|
|
break; |
|
|
|
case SDLK_w: |
|
|
|
case SDLK_w: |
|
|
|
#if CONFIG_AVFILTER |
|
|
|
|
|
|
|
if (cur_stream->show_mode == SHOW_MODE_VIDEO && cur_stream->vfilter_idx < nb_vfilters - 1) { |
|
|
|
if (cur_stream->show_mode == SHOW_MODE_VIDEO && cur_stream->vfilter_idx < nb_vfilters - 1) { |
|
|
|
if (++cur_stream->vfilter_idx >= nb_vfilters) |
|
|
|
if (++cur_stream->vfilter_idx >= nb_vfilters) |
|
|
|
cur_stream->vfilter_idx = 0; |
|
|
|
cur_stream->vfilter_idx = 0; |
|
|
@ -3335,9 +3276,6 @@ static void event_loop(VideoState *cur_stream) |
|
|
|
cur_stream->vfilter_idx = 0; |
|
|
|
cur_stream->vfilter_idx = 0; |
|
|
|
toggle_audio_display(cur_stream); |
|
|
|
toggle_audio_display(cur_stream); |
|
|
|
} |
|
|
|
} |
|
|
|
#else |
|
|
|
|
|
|
|
toggle_audio_display(cur_stream); |
|
|
|
|
|
|
|
#endif |
|
|
|
|
|
|
|
break; |
|
|
|
break; |
|
|
|
case SDLK_PAGEUP: |
|
|
|
case SDLK_PAGEUP: |
|
|
|
if (cur_stream->ic->nb_chapters <= 1) { |
|
|
|
if (cur_stream->ic->nb_chapters <= 1) { |
|
|
@ -3601,10 +3539,8 @@ static const OptionDef options[] = { |
|
|
|
{ "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" }, |
|
|
|
{ "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" }, |
|
|
|
{ "left", OPT_INT | HAS_ARG | OPT_EXPERT, { &screen_left }, "set the x position for the left of the window", "x pos" }, |
|
|
|
{ "left", OPT_INT | HAS_ARG | OPT_EXPERT, { &screen_left }, "set the x position for the left of the window", "x pos" }, |
|
|
|
{ "top", OPT_INT | HAS_ARG | OPT_EXPERT, { &screen_top }, "set the y position for the top of the window", "y pos" }, |
|
|
|
{ "top", OPT_INT | HAS_ARG | OPT_EXPERT, { &screen_top }, "set the y position for the top of the window", "y pos" }, |
|
|
|
#if CONFIG_AVFILTER |
|
|
|
|
|
|
|
{ "vf", OPT_EXPERT | HAS_ARG, { .func_arg = opt_add_vfilter }, "set video filters", "filter_graph" }, |
|
|
|
{ "vf", OPT_EXPERT | HAS_ARG, { .func_arg = opt_add_vfilter }, "set video filters", "filter_graph" }, |
|
|
|
{ "af", OPT_STRING | HAS_ARG, { &afilters }, "set audio filters", "filter_graph" }, |
|
|
|
{ "af", OPT_STRING | HAS_ARG, { &afilters }, "set audio filters", "filter_graph" }, |
|
|
|
#endif |
|
|
|
|
|
|
|
{ "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" }, |
|
|
|
{ "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" }, |
|
|
|
{ "showmode", HAS_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" }, |
|
|
|
{ "showmode", HAS_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" }, |
|
|
|
{ "i", OPT_BOOL, { &dummy}, "read specified file", "input_file"}, |
|
|
|
{ "i", OPT_BOOL, { &dummy}, "read specified file", "input_file"}, |
|
|
@ -3635,11 +3571,7 @@ void show_help_default(const char *opt, const char *arg) |
|
|
|
printf("\n"); |
|
|
|
printf("\n"); |
|
|
|
show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM); |
|
|
|
show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM); |
|
|
|
show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM); |
|
|
|
show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM); |
|
|
|
#if !CONFIG_AVFILTER |
|
|
|
|
|
|
|
show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM); |
|
|
|
|
|
|
|
#else |
|
|
|
|
|
|
|
show_help_children(avfilter_get_class(), AV_OPT_FLAG_FILTERING_PARAM); |
|
|
|
show_help_children(avfilter_get_class(), AV_OPT_FLAG_FILTERING_PARAM); |
|
|
|
#endif |
|
|
|
|
|
|
|
printf("\nWhile playing:\n" |
|
|
|
printf("\nWhile playing:\n" |
|
|
|
"q, ESC quit\n" |
|
|
|
"q, ESC quit\n" |
|
|
|
"f toggle full screen\n" |
|
|
|
"f toggle full screen\n" |
|
|
|