lavfi: unify asink_buffer and vsink_buffer API

The new API is more generic (no distinction between audio/video for
pulling frames), and avoids code duplication.

A backward compatibility layer is kept for avoiding tools ABI breaks
(only for the video binary interface, audio interface was never used
in the tools).
pull/2/head
Stefano Sabatini 13 years ago
parent be7eed72c8
commit c4415f6ec9
  1. 10
      avconv.c
  2. 7
      doc/APIchanges
  3. 8
      doc/filters.texi
  4. 10
      ffmpeg.c
  5. 15
      ffplay.c
  6. 26
      libavdevice/lavfi.c
  7. 2
      libavfilter/Makefile
  8. 97
      libavfilter/asink_abuffer.c
  9. 47
      libavfilter/asink_abuffer.h
  10. 6
      libavfilter/avfilter.h
  11. 180
      libavfilter/vsink_buffer.c
  12. 57
      libavfilter/vsink_buffer.h

@ -407,6 +407,7 @@ static int configure_video_filters(InputStream *ist, OutputStream *ost)
AVCodecContext *codec = ost->st->codec;
AVCodecContext *icodec = ist->st->codec;
enum PixelFormat pix_fmts[] = { codec->pix_fmt, PIX_FMT_NONE };
AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc();
AVRational sample_aspect_ratio;
char args[255];
int ret;
@ -426,8 +427,15 @@ static int configure_video_filters(InputStream *ist, OutputStream *ost)
"src", args, NULL, ost->graph);
if (ret < 0)
return ret;
#if FF_API_OLD_VSINK_API
ret = avfilter_graph_create_filter(&ost->output_video_filter, avfilter_get_by_name("buffersink"),
"out", NULL, pix_fmts, ost->graph);
#else
buffersink_params->pixel_fmts = pix_fmts;
ret = avfilter_graph_create_filter(&ost->output_video_filter, avfilter_get_by_name("buffersink"),
"out", NULL, buffersink_params, ost->graph);
#endif
av_freep(&buffersink_params);
if (ret < 0)
return ret;
last_filter = ost->input_video_filter;
@ -1779,7 +1787,7 @@ static int output_packet(InputStream *ist, int ist_index,
while (frame_available) {
if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && ost->output_video_filter) {
AVRational ist_pts_tb = ost->output_video_filter->inputs[0]->time_base;
if (av_vsink_buffer_get_video_buffer_ref(ost->output_video_filter, &ost->picref, 0) < 0)
if (av_buffersink_get_buffer_ref(ost->output_video_filter, &ost->picref, 0) < 0)
goto cont;
if (ost->picref) {
avfilter_fill_frame_from_video_buffer_ref(&picture, ost->picref);

@ -13,6 +13,13 @@ libavutil: 2011-04-18
API changes, most recent first:
2011-09-06 - xxxxxxx - lavfi 2.38.0
Unify video and audio sink API.
In particular, add av_buffersink_get_buffer_ref(), deprecate
av_vsink_buffer_get_video_buffer_ref() and change the value for the
opaque field passed to the abuffersink init function.
2011-09-xx - xxxxxxx - lavu 51.16.0
Add av_asprintf().

@ -310,11 +310,11 @@ Below is a description of the currently available audio sinks.
Buffer audio frames, and make them available to the end of filter chain.
This sink is mainly intended for programmatic use, in particular
through the interface defined in @file{libavfilter/asink_abuffer.h}.
through the interface defined in @file{libavfilter/vsink_buffer.h}.
It requires a pointer to a ABufferSinkContext structure, which defines the
incoming buffers' format, to be passed as the opaque parameter to
@code{avfilter_init_filter} for initialization.
It requires a pointer to an AVABufferSinkContext structure, which
defines the incoming buffers' formats, to be passed as the opaque
parameter to @code{avfilter_init_filter} for initialization.
@section anullsink

@ -419,6 +419,7 @@ static int configure_video_filters(InputStream *ist, OutputStream *ost)
AVCodecContext *codec = ost->st->codec;
AVCodecContext *icodec = ist->st->codec;
enum PixelFormat pix_fmts[] = { codec->pix_fmt, PIX_FMT_NONE };
AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc();
AVRational sample_aspect_ratio;
char args[255];
int ret;
@ -438,8 +439,15 @@ static int configure_video_filters(InputStream *ist, OutputStream *ost)
"src", args, NULL, ost->graph);
if (ret < 0)
return ret;
#if FF_API_OLD_VSINK_API
ret = avfilter_graph_create_filter(&ost->output_video_filter, avfilter_get_by_name("buffersink"),
"out", NULL, pix_fmts, ost->graph);
#else
buffersink_params->pixel_fmts = pix_fmts;
ret = avfilter_graph_create_filter(&ost->output_video_filter, avfilter_get_by_name("buffersink"),
"out", NULL, buffersink_params, ost->graph);
#endif
av_freep(&buffersink_params);
if (ret < 0)
return ret;
last_filter = ost->input_video_filter;
@ -1814,7 +1822,7 @@ static int output_packet(InputStream *ist, int ist_index,
while (frame_available) {
if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && ost->output_video_filter) {
AVRational ist_pts_tb = ost->output_video_filter->inputs[0]->time_base;
if (av_vsink_buffer_get_video_buffer_ref(ost->output_video_filter, &ost->picref, 0) < 0)
if (av_buffersink_get_buffer_ref(ost->output_video_filter, &ost->picref, 0) < 0)
goto cont;
if (ost->picref) {
avfilter_fill_frame_from_video_buffer_ref(&picture, ost->picref);

@ -1685,6 +1685,7 @@ static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const c
char sws_flags_str[128];
int ret;
enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc();
AVFilterContext *filt_src = NULL, *filt_out = NULL;
snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
graph->scale_sws_opts = av_strdup(sws_flags_str);
@ -1692,8 +1693,16 @@ static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const c
if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
NULL, is, graph)) < 0)
return ret;
if ((ret = avfilter_graph_create_filter(&filt_out, avfilter_get_by_name("buffersink"), "out",
NULL, pix_fmts, graph)) < 0)
#if FF_API_OLD_VSINK_API
ret = avfilter_graph_create_filter(&filt_out, avfilter_get_by_name("buffersink"), "out",
NULL, pix_fmts, graph);
#else
buffersink_params->pixel_fmts = pix_fmts;
ret = avfilter_graph_create_filter(&filt_out, avfilter_get_by_name("buffersink"), "out",
NULL, buffersink_params, graph);
#endif
av_freep(&buffersink_params);
if (ret < 0)
return ret;
if(vfilters) {
@ -1768,7 +1777,7 @@ static int video_thread(void *arg)
last_w = is->video_st->codec->width;
last_h = is->video_st->codec->height;
}
ret = av_vsink_buffer_get_video_buffer_ref(filt_out, &picref, 0);
ret = av_buffersink_get_buffer_ref(filt_out, &picref, 0);
if (picref) {
avfilter_fill_frame_from_video_buffer_ref(frame, picref);
pts_int = picref->pts;

@ -173,10 +173,21 @@ av_cold static int lavfi_read_header(AVFormatContext *avctx,
for (i = 0, inout = output_links; inout; i++, inout = inout->next) {
AVFilterContext *sink;
if ((ret = avfilter_graph_create_filter(&sink, buffersink,
inout->name, NULL,
pix_fmts, lavfi->graph)) < 0)
FAIL(ret);
AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc();
#if FF_API_OLD_VSINK_API
ret = avfilter_graph_create_filter(&sink, buffersink,
inout->name, NULL,
pix_fmts, lavfi->graph);
#else
buffersink_params->pixel_fmts = pix_fmts;
ret = avfilter_graph_create_filter(&sink, buffersink,
inout->name, NULL,
buffersink_params, lavfi->graph);
#endif
av_freep(&buffersink_params);
if (ret < 0)
goto end;
lavfi->sinks[i] = sink;
if ((ret = avfilter_link(inout->filter_ctx, inout->pad_idx, sink, 0)) < 0)
FAIL(ret);
@ -225,8 +236,8 @@ static int lavfi_read_packet(AVFormatContext *avctx, AVPacket *pkt)
for (i = 0; i < avctx->nb_streams; i++) {
AVRational tb = lavfi->sinks[i]->inputs[0]->time_base;
double d;
int ret = av_vsink_buffer_get_video_buffer_ref(lavfi->sinks[i],
&picref, AV_VSINK_BUF_FLAG_PEEK);
int ret = av_buffersink_get_buffer_ref(lavfi->sinks[i],
&picref, AV_BUFFERSINK_FLAG_PEEK);
if (ret < 0)
return ret;
d = av_rescale_q(picref->pts, tb, AV_TIME_BASE_Q);
@ -239,8 +250,7 @@ static int lavfi_read_packet(AVFormatContext *avctx, AVPacket *pkt)
}
av_dlog(avctx, "min_pts_sink_idx:%i\n", min_pts_sink_idx);
av_vsink_buffer_get_video_buffer_ref(lavfi->sinks[min_pts_sink_idx],
&picref, 0);
av_buffersink_get_buffer_ref(lavfi->sinks[min_pts_sink_idx], &picref, 0);
size = avpicture_get_size(picref->format, picref->video->w, picref->video->h);
if ((ret = av_new_packet(pkt, size)) < 0)

@ -29,7 +29,7 @@ OBJS-$(CONFIG_ABUFFER_FILTER) += asrc_abuffer.o
OBJS-$(CONFIG_AMOVIE_FILTER) += src_movie.o
OBJS-$(CONFIG_ANULLSRC_FILTER) += asrc_anullsrc.o
OBJS-$(CONFIG_ABUFFERSINK_FILTER) += asink_abuffer.o
OBJS-$(CONFIG_ABUFFERSINK_FILTER) += vsink_buffer.o
OBJS-$(CONFIG_ANULLSINK_FILTER) += asink_anullsink.o
OBJS-$(CONFIG_BLACKFRAME_FILTER) += vf_blackframe.o

@ -1,97 +0,0 @@
/*
* Copyright (c) 2011 Stefano Sabatini
* Copyright (c) 2011 Mina Nagy Zaki
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* audio buffer sink
*/
#include "avfilter.h"
#include "asink_abuffer.h"
static void filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref)
{
}
static int init(AVFilterContext *ctx, const char *args, void *opaque)
{
if (!opaque) {
av_log(ctx, AV_LOG_ERROR, "Opaque field required, please pass"
" an initialized ABufferSinkContext");
return AVERROR(EINVAL);
}
memcpy(ctx->priv, opaque, sizeof(ABufferSinkContext));
return 0;
}
static int query_formats(AVFilterContext *ctx)
{
ABufferSinkContext *abuffersink = ctx->priv;
AVFilterFormats *formats = NULL;
if (!(formats = avfilter_make_format_list(abuffersink->sample_fmts)))
return AVERROR(ENOMEM);
avfilter_set_common_sample_formats(ctx, formats);
if (!(formats = avfilter_make_format64_list(abuffersink->channel_layouts)))
return AVERROR(ENOMEM);
avfilter_set_common_channel_layouts(ctx, formats);
if (!(formats = avfilter_make_format_list(abuffersink->packing_fmts)))
return AVERROR(ENOMEM);
avfilter_set_common_packing_formats(ctx, formats);
return 0;
}
int av_asink_abuffer_get_audio_buffer_ref(AVFilterContext *abuffersink,
AVFilterBufferRef **samplesref,
int av_unused flags)
{
int ret;
AVFilterLink * const inlink = abuffersink->inputs[0];
if ((ret = avfilter_request_frame(inlink)))
return ret;
if (!inlink->cur_buf)
return AVERROR(EINVAL);
*samplesref = inlink->cur_buf;
inlink->cur_buf = NULL;
return 0;
}
AVFilter avfilter_asink_abuffersink = {
.name = "abuffersink",
.description = NULL_IF_CONFIG_SMALL("Buffer audio frames, and make them available to the end of the filter graph."),
.init = init,
.priv_size = sizeof(ABufferSinkContext),
.query_formats = query_formats,
.inputs = (AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_samples = filter_samples,
.min_perms = AV_PERM_READ, },
{ .name = NULL }},
.outputs = (AVFilterPad[]) {{ .name = NULL }},
};

@ -1,47 +0,0 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVFILTER_ASINK_ABUFFER_H
#define AVFILTER_ASINK_ABUFFER_H
/**
* @file
* audio buffer sink API
*/
#include "avfilter.h"
typedef struct {
const enum AVSampleFormat *sample_fmts; ///< list of allowed sample formats, terminated by -1
const int64_t *channel_layouts; ///< list of allowed channel layouts, terminated by -1
const int *packing_fmts; ///< list of allowed packing formats, terminated by -1
} ABufferSinkContext;
/**
* Get an audio buffer from abuffersink and put it in samplesref.
*
* @param abuffersink pointer to an abuffersink context
* @param flags unused
* @return >= 0 in case of success, a negative AVERROR code in case of failure
*/
int av_asink_abuffer_get_audio_buffer_ref(AVFilterContext *abuffersink,
AVFilterBufferRef **samplesref,
int av_unused flags);
#endif /* AVFILTER_ASINK_ABUFFER_H */

@ -29,7 +29,7 @@
#include "libavutil/rational.h"
#define LIBAVFILTER_VERSION_MAJOR 2
#define LIBAVFILTER_VERSION_MINOR 37
#define LIBAVFILTER_VERSION_MINOR 38
#define LIBAVFILTER_VERSION_MICRO 0
#define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \
@ -40,6 +40,10 @@
LIBAVFILTER_VERSION_MICRO)
#define LIBAVFILTER_BUILD LIBAVFILTER_VERSION_INT
#ifndef FF_API_OLD_VSINK_API
#define FF_API_OLD_VSINK_API (LIBAVUTIL_VERSION_MAJOR < 3)
#endif
#include <stddef.h>
/**

@ -27,33 +27,60 @@
#include "avfilter.h"
#include "vsink_buffer.h"
AVBufferSinkParams *av_buffersink_params_alloc(void)
{
static const int pixel_fmts[] = { -1 };
AVBufferSinkParams *params = av_malloc(sizeof(AVBufferSinkParams));
if (!params)
return NULL;
params->pixel_fmts = pixel_fmts;
return params;
}
AVABufferSinkParams *av_abuffersink_params_alloc(void)
{
static const int sample_fmts[] = { -1 };
static const int packing_fmts[] = { -1 };
static const int64_t channel_layouts[] = { -1 };
AVABufferSinkParams *params = av_malloc(sizeof(AVABufferSinkParams));
if (!params)
return NULL;
params->sample_fmts = sample_fmts;
params->channel_layouts = channel_layouts;
params->packing_fmts = packing_fmts;
return params;
}
typedef struct {
AVFifoBuffer *fifo; ///< FIFO buffer of video frame references
enum PixelFormat *pix_fmts; ///< accepted pixel formats, must be terminated with -1
AVFifoBuffer *fifo; ///< FIFO buffer of video frame references
/* only used for video */
const enum PixelFormat *pixel_fmts; ///< list of accepted pixel formats, must be terminated with -1
/* only used for audio */
const enum AVSampleFormat *sample_fmts; ///< list of accepted sample formats, terminated by AV_SAMPLE_FMT_NONE
const int64_t *channel_layouts; ///< list of accepted channel layouts, terminated by -1
const int *packing_fmts; ///< list of accepted packing formats, terminated by -1
} BufferSinkContext;
#define FIFO_INIT_SIZE 8
static av_cold int init(AVFilterContext *ctx, const char *args, void *opaque)
static av_cold int common_init(AVFilterContext *ctx)
{
BufferSinkContext *buf = ctx->priv;
if (!opaque) {
av_log(ctx, AV_LOG_ERROR, "No opaque field provided, which is required.\n");
return AVERROR(EINVAL);
}
buf->fifo = av_fifo_alloc(FIFO_INIT_SIZE*sizeof(AVFilterBufferRef *));
if (!buf->fifo) {
av_log(ctx, AV_LOG_ERROR, "Failed to allocate fifo\n");
return AVERROR(ENOMEM);
}
buf->pix_fmts = opaque;
return 0;
}
static av_cold void uninit(AVFilterContext *ctx)
static av_cold void common_uninit(AVFilterContext *ctx)
{
BufferSinkContext *buf = ctx->priv;
AVFilterBufferRef *picref;
@ -88,21 +115,13 @@ static void end_frame(AVFilterLink *inlink)
&inlink->cur_buf, sizeof(AVFilterBufferRef *), NULL);
}
static int query_formats(AVFilterContext *ctx)
{
BufferSinkContext *buf = ctx->priv;
avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(buf->pix_fmts));
return 0;
}
int av_vsink_buffer_get_video_buffer_ref(AVFilterContext *ctx,
AVFilterBufferRef **picref, int flags)
int av_buffersink_get_buffer_ref(AVFilterContext *ctx,
AVFilterBufferRef **bufref, int flags)
{
BufferSinkContext *buf = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
int ret;
*picref = NULL;
*bufref = NULL;
/* no picref available, fetch it from the filterchain */
if (!av_fifo_size(buf->fifo)) {
@ -113,11 +132,50 @@ int av_vsink_buffer_get_video_buffer_ref(AVFilterContext *ctx,
if (!av_fifo_size(buf->fifo))
return AVERROR(EINVAL);
if (flags & AV_VSINK_BUF_FLAG_PEEK)
*picref = *((AVFilterBufferRef **)av_fifo_peek2(buf->fifo, 0));
if (flags & AV_BUFFERSINK_FLAG_PEEK)
*bufref = *((AVFilterBufferRef **)av_fifo_peek2(buf->fifo, 0));
else
av_fifo_generic_read(buf->fifo, picref, sizeof(*picref), NULL);
av_fifo_generic_read(buf->fifo, bufref, sizeof(*bufref), NULL);
return 0;
}
#if FF_API_OLD_VSINK_API
int av_vsink_buffer_get_video_buffer_ref(AVFilterContext *ctx,
AVFilterBufferRef **picref, int flags)
{
return av_buffersink_get_buffer_ref(ctx, picref, flags);
}
#endif
#if CONFIG_BUFFERSINK_FILTER
static av_cold int vsink_init(AVFilterContext *ctx, const char *args, void *opaque)
{
BufferSinkContext *buf = ctx->priv;
av_unused AVBufferSinkParams *params;
if (!opaque) {
av_log(ctx, AV_LOG_ERROR,
"No opaque field provided\n");
return AVERROR(EINVAL);
} else {
#if FF_API_OLD_VSINK_API
buf->pixel_fmts = (const enum PixelFormats *)opaque;
#else
params = (AVBufferSinkParams *)opaque;
buf->pixel_fmts = params->pixel_fmts;
#endif
}
return common_init(ctx);
}
static int vsink_query_formats(AVFilterContext *ctx)
{
BufferSinkContext *buf = ctx->priv;
avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(buf->pixel_fmts));
return 0;
}
@ -125,10 +183,10 @@ AVFilter avfilter_vsink_buffersink = {
.name = "buffersink",
.description = NULL_IF_CONFIG_SMALL("Buffer video frames, and make them available to the end of the filter graph."),
.priv_size = sizeof(BufferSinkContext),
.init = init,
.uninit = uninit,
.init = vsink_init,
.uninit = common_uninit,
.query_formats = query_formats,
.query_formats = vsink_query_formats,
.inputs = (AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_VIDEO,
@ -137,3 +195,69 @@ AVFilter avfilter_vsink_buffersink = {
{ .name = NULL }},
.outputs = (AVFilterPad[]) {{ .name = NULL }},
};
#endif /* CONFIG_BUFFERSINK_FILTER */
#if CONFIG_ABUFFERSINK_FILTER
static void filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref)
{
end_frame(link);
}
static av_cold int asink_init(AVFilterContext *ctx, const char *args, void *opaque)
{
BufferSinkContext *buf = ctx->priv;
AVABufferSinkParams *params;
if (!opaque) {
av_log(ctx, AV_LOG_ERROR,
"No opaque field provided, an AVABufferSinkParams struct is required\n");
return AVERROR(EINVAL);
} else
params = (AVABufferSinkParams *)opaque;
buf->sample_fmts = params->sample_fmts;
buf->channel_layouts = params->channel_layouts;
buf->packing_fmts = params->packing_fmts;
return common_init(ctx);
}
static int asink_query_formats(AVFilterContext *ctx)
{
BufferSinkContext *buf = ctx->priv;
AVFilterFormats *formats = NULL;
if (!(formats = avfilter_make_format_list(buf->sample_fmts)))
return AVERROR(ENOMEM);
avfilter_set_common_sample_formats(ctx, formats);
if (!(formats = avfilter_make_format64_list(buf->channel_layouts)))
return AVERROR(ENOMEM);
avfilter_set_common_channel_layouts(ctx, formats);
if (!(formats = avfilter_make_format_list(buf->packing_fmts)))
return AVERROR(ENOMEM);
avfilter_set_common_packing_formats(ctx, formats);
return 0;
}
AVFilter avfilter_asink_abuffersink = {
.name = "abuffersink",
.description = NULL_IF_CONFIG_SMALL("Buffer audio frames, and make them available to the end of the filter graph."),
.init = asink_init,
.uninit = common_uninit,
.priv_size = sizeof(BufferSinkContext),
.query_formats = asink_query_formats,
.inputs = (AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_samples = filter_samples,
.min_perms = AV_PERM_READ, },
{ .name = NULL }},
.outputs = (AVFilterPad[]) {{ .name = NULL }},
};
#endif /* CONFIG_ABUFFERSINK_FILTER */

@ -21,27 +21,68 @@
/**
* @file
* memory buffer sink API for video
* memory buffer sink API for audio and video
*/
#include "avfilter.h"
/**
* Tell av_vsink_buffer_get_video_buffer_ref() to read the picref, but not
* remove it from the buffer. This is useful if you need only to read
* the picref, without to fetch it.
* Struct to use for initializing a buffersink context.
*/
#define AV_VSINK_BUF_FLAG_PEEK 1
typedef struct {
const enum PixelFormat *pixel_fmts; ///< list of allowed pixel formats, terminated by PIX_FMT_NONE
} AVBufferSinkParams;
/**
* Get a video buffer data from buffer_sink and put it in picref.
* Create an AVBufferSinkParams structure.
*
* @param buffer_sink pointer to a buffer sink context
* @param flags a combination of AV_VSINK_BUF_FLAG_* flags
* Must be freed with av_free().
*/
AVBufferSinkParams *av_buffersink_params_alloc(void);
/**
* Struct to use for initializing an abuffersink context.
*/
typedef struct {
const enum AVSampleFormat *sample_fmts; ///< list of allowed sample formats, terminated by AV_SAMPLE_FMT_NONE
const int64_t *channel_layouts; ///< list of allowed channel layouts, terminated by -1
const int *packing_fmts; ///< list of allowed packing formats
} AVABufferSinkParams;
/**
* Create an AVABufferSinkParams structure.
*
* Must be freed with av_free().
*/
AVABufferSinkParams *av_abuffersink_params_alloc(void);
/**
* Tell av_buffersink_get_buffer_ref() to read video/samples buffer
* reference, but not remove it from the buffer. This is useful if you
* need only to read a video/samples buffer, without to fetch it.
*/
#define AV_BUFFERSINK_FLAG_PEEK 1
/**
* Get an audio/video buffer data from buffer_sink and put it in bufref.
*
* This function works with both audio and video buffer sinks.
*
* @param buffer_sink pointer to a buffersink or abuffersink context
* @param flags a combination of AV_BUFFERSINK_FLAG_* flags
* @return >= 0 in case of success, a negative AVERROR code in case of
* failure
*/
int av_buffersink_get_buffer_ref(AVFilterContext *buffer_sink,
AVFilterBufferRef **bufref, int flags);
#if FF_API_OLD_VSINK_API
/**
* @deprecated Use av_buffersink_get_buffer_ref() instead.
*/
attribute_deprecated
int av_vsink_buffer_get_video_buffer_ref(AVFilterContext *buffer_sink,
AVFilterBufferRef **picref, int flags);
#endif
#endif /* AVFILTER_VSINK_BUFFER_H */

Loading…
Cancel
Save