avplay: use buffersrc instead of custom input filter.

We do not allow user filters, so avtools shouldn't use them either.

It also allows to reuse buffer management code from avconv, thus
reducing duplication.
pull/59/head
Anton Khirnov 13 years ago
parent d55c2e05b5
commit 67339f6eb4
  1. 281
      avplay.c

@ -42,6 +42,7 @@
# include "libavfilter/avfilter.h" # include "libavfilter/avfilter.h"
# include "libavfilter/avfiltergraph.h" # include "libavfilter/avfiltergraph.h"
# include "libavfilter/buffersink.h" # include "libavfilter/buffersink.h"
# include "libavfilter/buffersrc.h"
#endif #endif
#include "cmdutils.h" #include "cmdutils.h"
@ -212,7 +213,10 @@ typedef struct VideoState {
PtsCorrectionContext pts_ctx; PtsCorrectionContext pts_ctx;
#if CONFIG_AVFILTER #if CONFIG_AVFILTER
AVFilterContext *in_video_filter; ///< the first filter in the video chain
AVFilterContext *out_video_filter; ///< the last filter in the video chain AVFilterContext *out_video_filter; ///< the last filter in the video chain
int use_dr1;
FrameBuffer *buffer_pool;
#endif #endif
float skip_frames; float skip_frames;
@ -1517,206 +1521,27 @@ static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacke
} }
#if CONFIG_AVFILTER #if CONFIG_AVFILTER
typedef struct {
VideoState *is;
AVFrame *frame;
int use_dr1;
} FilterPriv;
static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
{
AVFilterContext *ctx = codec->opaque;
AVFilterBufferRef *ref;
int perms = AV_PERM_WRITE;
int i, w, h, stride[AV_NUM_DATA_POINTERS];
unsigned edge;
int pixel_size;
if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
perms |= AV_PERM_NEG_LINESIZES;
if (pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
if (pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
if (pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
if (pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
}
if (pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
w = codec->width;
h = codec->height;
avcodec_align_dimensions2(codec, &w, &h, stride);
edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
w += edge << 1;
h += edge << 1;
if (!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
return -1;
pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1 + 1;
ref->video->w = codec->width;
ref->video->h = codec->height;
for (i = 0; i < 4; i ++) {
unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
if (ref->data[i]) {
ref->data[i] += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
}
pic->data[i] = ref->data[i];
pic->linesize[i] = ref->linesize[i];
}
pic->opaque = ref;
pic->type = FF_BUFFER_TYPE_USER;
pic->reordered_opaque = codec->reordered_opaque;
pic->width = codec->width;
pic->height = codec->height;
pic->format = codec->pix_fmt;
pic->sample_aspect_ratio = codec->sample_aspect_ratio;
if (codec->pkt) pic->pkt_pts = codec->pkt->pts;
else pic->pkt_pts = AV_NOPTS_VALUE;
return 0;
}
static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
{
memset(pic->data, 0, sizeof(pic->data));
avfilter_unref_buffer(pic->opaque);
}
static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
{
AVFilterBufferRef *ref = pic->opaque;
if (pic->data[0] == NULL) {
pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
return codec->get_buffer(codec, pic);
}
if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
(codec->pix_fmt != ref->format)) {
av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
return -1;
}
pic->reordered_opaque = codec->reordered_opaque;
if (codec->pkt) pic->pkt_pts = codec->pkt->pts;
else pic->pkt_pts = AV_NOPTS_VALUE;
return 0;
}
static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
{
FilterPriv *priv = ctx->priv;
AVCodecContext *codec;
if (!opaque) return -1;
priv->is = opaque;
codec = priv->is->video_st->codec;
codec->opaque = ctx;
if (codec->codec->capabilities & CODEC_CAP_DR1) {
priv->use_dr1 = 1;
codec->get_buffer = input_get_buffer;
codec->release_buffer = input_release_buffer;
codec->reget_buffer = input_reget_buffer;
codec->thread_safe_callbacks = 1;
}
priv->frame = avcodec_alloc_frame();
return 0;
}
static void input_uninit(AVFilterContext *ctx)
{
FilterPriv *priv = ctx->priv;
av_free(priv->frame);
}
static int input_request_frame(AVFilterLink *link)
{
FilterPriv *priv = link->src->priv;
AVFilterBufferRef *picref;
int64_t pts = 0;
AVPacket pkt;
int ret;
while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
av_free_packet(&pkt);
if (ret < 0)
return -1;
if (priv->use_dr1) {
picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
} else {
picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
av_image_copy(picref->data, picref->linesize,
priv->frame->data, priv->frame->linesize,
picref->format, link->w, link->h);
}
av_free_packet(&pkt);
avfilter_copy_frame_props(picref, priv->frame);
picref->pts = pts;
avfilter_start_frame(link, picref);
avfilter_draw_slice(link, 0, link->h, 1);
avfilter_end_frame(link);
return 0;
}
static int input_query_formats(AVFilterContext *ctx)
{
FilterPriv *priv = ctx->priv;
enum PixelFormat pix_fmts[] = {
priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
};
avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
return 0;
}
static int input_config_props(AVFilterLink *link)
{
FilterPriv *priv = link->src->priv;
AVCodecContext *c = priv->is->video_st->codec;
link->w = c->width;
link->h = c->height;
link->time_base = priv->is->video_st->time_base;
return 0;
}
static AVFilter input_filter =
{
.name = "avplay_input",
.priv_size = sizeof(FilterPriv),
.init = input_init,
.uninit = input_uninit,
.query_formats = input_query_formats,
.inputs = (AVFilterPad[]) {{ .name = NULL }},
.outputs = (AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.request_frame = input_request_frame,
.config_props = input_config_props, },
{ .name = NULL }},
};
static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters) static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
{ {
char sws_flags_str[128]; char sws_flags_str[128];
char buffersrc_args[256];
int ret; int ret;
AVFilterContext *filt_src = NULL, *filt_out = NULL, *filt_format; AVFilterContext *filt_src = NULL, *filt_out = NULL, *filt_format;
AVCodecContext *codec = is->video_st->codec;
snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags); snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
graph->scale_sws_opts = av_strdup(sws_flags_str); graph->scale_sws_opts = av_strdup(sws_flags_str);
if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src", snprintf(buffersrc_args, sizeof(buffersrc_args), "%d:%d:%d:%d:%d:%d:%d",
NULL, is, graph)) < 0) codec->width, codec->height, codec->pix_fmt,
is->video_st->time_base.num, is->video_st->time_base.den,
codec->sample_aspect_ratio.num, codec->sample_aspect_ratio.den);
if ((ret = avfilter_graph_create_filter(&filt_src,
avfilter_get_by_name("buffer"),
"src", buffersrc_args, NULL,
graph)) < 0)
return ret; return ret;
if ((ret = avfilter_graph_create_filter(&filt_out, if ((ret = avfilter_graph_create_filter(&filt_out,
avfilter_get_by_name("buffersink"), avfilter_get_by_name("buffersink"),
@ -1755,8 +1580,16 @@ static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const c
if ((ret = avfilter_graph_config(graph, NULL)) < 0) if ((ret = avfilter_graph_config(graph, NULL)) < 0)
return ret; return ret;
is->in_video_filter = filt_src;
is->out_video_filter = filt_out; is->out_video_filter = filt_out;
if (codec->codec->capabilities & CODEC_CAP_DR1) {
is->use_dr1 = 1;
codec->get_buffer = codec_get_buffer;
codec->release_buffer = codec_release_buffer;
codec->opaque = &is->buffer_pool;
}
return ret; return ret;
} }
@ -1772,25 +1605,34 @@ static int video_thread(void *arg)
#if CONFIG_AVFILTER #if CONFIG_AVFILTER
AVFilterGraph *graph = avfilter_graph_alloc(); AVFilterGraph *graph = avfilter_graph_alloc();
AVFilterContext *filt_out = NULL; AVFilterContext *filt_out = NULL, *filt_in = NULL;
int64_t pos; int64_t pos;
int last_w = is->video_st->codec->width; int last_w = is->video_st->codec->width;
int last_h = is->video_st->codec->height; int last_h = is->video_st->codec->height;
if ((ret = configure_video_filters(graph, is, vfilters)) < 0) if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
goto the_end; goto the_end;
filt_in = is->in_video_filter;
filt_out = is->out_video_filter; filt_out = is->out_video_filter;
#endif #endif
for (;;) { for (;;) {
#if !CONFIG_AVFILTER
AVPacket pkt; AVPacket pkt;
#else #if CONFIG_AVFILTER
AVFilterBufferRef *picref; AVFilterBufferRef *picref;
AVRational tb; AVRational tb;
#endif #endif
while (is->paused && !is->videoq.abort_request) while (is->paused && !is->videoq.abort_request)
SDL_Delay(10); SDL_Delay(10);
ret = get_video_frame(is, frame, &pts_int, &pkt);
if (ret < 0)
goto the_end;
av_free_packet(&pkt);
if (!ret)
continue;
#if CONFIG_AVFILTER #if CONFIG_AVFILTER
if ( last_w != is->video_st->codec->width if ( last_w != is->video_st->codec->width
|| last_h != is->video_st->codec->height) { || last_h != is->video_st->codec->height) {
@ -1804,8 +1646,33 @@ static int video_thread(void *arg)
last_w = is->video_st->codec->width; last_w = is->video_st->codec->width;
last_h = is->video_st->codec->height; last_h = is->video_st->codec->height;
} }
frame->pts = pts_int;
if (is->use_dr1) {
FrameBuffer *buf = frame->opaque;
AVFilterBufferRef *fb = avfilter_get_video_buffer_ref_from_arrays(
frame->data, frame->linesize,
AV_PERM_READ | AV_PERM_PRESERVE,
frame->width, frame->height,
frame->format);
avfilter_copy_frame_props(fb, frame);
fb->buf->priv = buf;
fb->buf->free = filter_release_buffer;
buf->refcount++;
av_buffersrc_buffer(filt_in, fb);
} else
av_buffersrc_write_frame(filt_in, frame);
while (ret >= 0) {
ret = av_buffersink_read(filt_out, &picref); ret = av_buffersink_read(filt_out, &picref);
if (picref) { if (ret < 0) {
ret = 0;
break;
}
avfilter_copy_buf_props(frame, picref); avfilter_copy_buf_props(frame, picref);
pts_int = picref->pts; pts_int = picref->pts;
@ -1813,10 +1680,7 @@ static int video_thread(void *arg)
pos = picref->pos; pos = picref->pos;
frame->opaque = picref; frame->opaque = picref;
ret = 1; if (av_cmp_q(tb, is->video_st->time_base)) {
}
if (ret >= 0 && av_cmp_q(tb, is->video_st->time_base)) {
av_unused int64_t pts1 = pts_int; av_unused int64_t pts1 = pts_int;
pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base); pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
av_dlog(NULL, "video_thread(): " av_dlog(NULL, "video_thread(): "
@ -1824,24 +1688,14 @@ static int video_thread(void *arg)
tb.num, tb.den, pts1, tb.num, tb.den, pts1,
is->video_st->time_base.num, is->video_st->time_base.den, pts_int); is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
} }
#else
ret = get_video_frame(is, frame, &pts_int, &pkt);
#endif
if (ret < 0)
goto the_end;
if (!ret)
continue;
pts = pts_int * av_q2d(is->video_st->time_base); pts = pts_int * av_q2d(is->video_st->time_base);
#if CONFIG_AVFILTER
ret = output_picture2(is, frame, pts, pos); ret = output_picture2(is, frame, pts, pos);
}
#else #else
pts = pts_int * av_q2d(is->video_st->time_base);
ret = output_picture2(is, frame, pts, pkt.pos); ret = output_picture2(is, frame, pts, pkt.pos);
av_free_packet(&pkt);
#endif #endif
if (ret < 0) if (ret < 0)
goto the_end; goto the_end;
@ -2386,6 +2240,7 @@ static void stream_component_close(VideoState *is, int stream_index)
ic->streams[stream_index]->discard = AVDISCARD_ALL; ic->streams[stream_index]->discard = AVDISCARD_ALL;
avcodec_close(avctx); avcodec_close(avctx);
free_buffer_pool(&is->buffer_pool);
switch (avctx->codec_type) { switch (avctx->codec_type) {
case AVMEDIA_TYPE_AUDIO: case AVMEDIA_TYPE_AUDIO:
is->audio_st = NULL; is->audio_st = NULL;

Loading…
Cancel
Save