lavfi: switch to AVFrame.

Deprecate AVFilterBuffer/AVFilterBufferRef and everything related to it
and use AVFrame instead.
pull/12/merge
Anton Khirnov 12 years ago
parent 77b2cd7b41
commit 7e350379f8
  1. 10
      doc/filters.texi
  2. 18
      libavfilter/af_amix.c
  3. 11
      libavfilter/af_ashowinfo.c
  4. 22
      libavfilter/af_asyncts.c
  5. 4
      libavfilter/af_channelmap.c
  6. 10
      libavfilter/af_channelsplit.c
  7. 151
      libavfilter/af_join.c
  8. 58
      libavfilter/af_resample.c
  9. 12
      libavfilter/af_volume.c
  10. 4
      libavfilter/asink_anullsink.c
  11. 68
      libavfilter/audio.c
  12. 10
      libavfilter/audio.h
  13. 38
      libavfilter/avfilter.c
  14. 28
      libavfilter/avfilter.h
  15. 128
      libavfilter/buffersink.c
  16. 32
      libavfilter/buffersink.h
  17. 200
      libavfilter/buffersrc.c
  18. 29
      libavfilter/buffersrc.h
  19. 113
      libavfilter/fifo.c
  20. 29
      libavfilter/internal.h
  21. 6
      libavfilter/split.c
  22. 3
      libavfilter/version.h
  23. 4
      libavfilter/vf_aspect.c
  24. 8
      libavfilter/vf_blackframe.c
  25. 17
      libavfilter/vf_boxblur.c
  26. 20
      libavfilter/vf_copy.c
  27. 9
      libavfilter/vf_crop.c
  28. 22
      libavfilter/vf_cropdetect.c
  29. 20
      libavfilter/vf_delogo.c
  30. 9
      libavfilter/vf_drawbox.c
  31. 58
      libavfilter/vf_drawtext.c
  32. 9
      libavfilter/vf_fade.c
  33. 19
      libavfilter/vf_fieldorder.c
  34. 32
      libavfilter/vf_fps.c
  35. 28
      libavfilter/vf_frei0r.c
  36. 19
      libavfilter/vf_gradfun.c
  37. 15
      libavfilter/vf_hflip.c
  38. 22
      libavfilter/vf_hqdn3d.c
  39. 33
      libavfilter/vf_libopencv.c
  40. 18
      libavfilter/vf_lut.c
  41. 35
      libavfilter/vf_overlay.c
  42. 154
      libavfilter/vf_pad.c
  43. 14
      libavfilter/vf_pixdesctest.c
  44. 25
      libavfilter/vf_scale.c
  45. 45
      libavfilter/vf_select.c
  46. 11
      libavfilter/vf_setpts.c
  47. 2
      libavfilter/vf_settb.c
  48. 21
      libavfilter/vf_showinfo.c
  49. 25
      libavfilter/vf_transpose.c
  50. 13
      libavfilter/vf_unsharp.c
  51. 22
      libavfilter/vf_vflip.c
  52. 65
      libavfilter/vf_yadif.c
  53. 89
      libavfilter/video.c
  54. 9
      libavfilter/video.h
  55. 4
      libavfilter/vsink_nullsink.c
  56. 13
      libavfilter/vsrc_color.c
  57. 51
      libavfilter/vsrc_movie.c
  58. 51
      libavfilter/vsrc_testsrc.c
  59. 8
      libavfilter/yadif.h

@ -622,9 +622,6 @@ same as @var{out_w} and @var{out_h}
@item n
the number of input frame, starting from 0
@item pos
the position in the file of the input frame, NAN if unknown
@item t
timestamp expressed in seconds, NAN if the input timestamp is unknown
@ -1760,9 +1757,6 @@ the frame is bottom-field-first
@item key
1 if the filtered frame is a key-frame, 0 otherwise
@item pos
the position in the file of the filtered frame, -1 if the information
is not available (e.g. for synthetic video)
@end table
The default value of the select expression is "1".
@ -1854,10 +1848,6 @@ the PTS of the first video frame
@item INTERLACED
tell if the current frame is interlaced
@item POS
original position in the file of the frame, or undefined if undefined
for the current frame
@item PREV_INPTS
previous input PTS

@ -275,18 +275,18 @@ static int output_frame(AVFilterLink *outlink, int nb_samples)
{
AVFilterContext *ctx = outlink->src;
MixContext *s = ctx->priv;
AVFilterBufferRef *out_buf, *in_buf;
AVFrame *out_buf, *in_buf;
int i;
calculate_scales(s, nb_samples);
out_buf = ff_get_audio_buffer(outlink, AV_PERM_WRITE, nb_samples);
out_buf = ff_get_audio_buffer(outlink, nb_samples);
if (!out_buf)
return AVERROR(ENOMEM);
in_buf = ff_get_audio_buffer(outlink, AV_PERM_WRITE, nb_samples);
in_buf = ff_get_audio_buffer(outlink, nb_samples);
if (!in_buf) {
avfilter_unref_buffer(out_buf);
av_frame_free(&out_buf);
return AVERROR(ENOMEM);
}
@ -308,7 +308,7 @@ static int output_frame(AVFilterLink *outlink, int nb_samples)
}
}
}
avfilter_unref_buffer(in_buf);
av_frame_free(&in_buf);
out_buf->pts = s->next_pts;
if (s->next_pts != AV_NOPTS_VALUE)
@ -455,7 +455,7 @@ static int request_frame(AVFilterLink *outlink)
return output_frame(outlink, available_samples);
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
AVFilterContext *ctx = inlink->dst;
MixContext *s = ctx->priv;
@ -474,16 +474,16 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
if (i == 0) {
int64_t pts = av_rescale_q(buf->pts, inlink->time_base,
outlink->time_base);
ret = frame_list_add_frame(s->frame_list, buf->audio->nb_samples, pts);
ret = frame_list_add_frame(s->frame_list, buf->nb_samples, pts);
if (ret < 0)
goto fail;
}
ret = av_audio_fifo_write(s->fifos[i], (void **)buf->extended_data,
buf->audio->nb_samples);
buf->nb_samples);
fail:
avfilter_unref_buffer(buf);
av_frame_free(&buf);
return ret;
}

@ -65,16 +65,16 @@ static void uninit(AVFilterContext *ctx)
av_freep(&s->plane_checksums);
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
AVFilterContext *ctx = inlink->dst;
AShowInfoContext *s = ctx->priv;
char chlayout_str[128];
uint32_t checksum = 0;
int channels = av_get_channel_layout_nb_channels(buf->audio->channel_layout);
int channels = av_get_channel_layout_nb_channels(buf->channel_layout);
int planar = av_sample_fmt_is_planar(buf->format);
int block_align = av_get_bytes_per_sample(buf->format) * (planar ? 1 : channels);
int data_size = buf->audio->nb_samples * block_align;
int data_size = buf->nb_samples * block_align;
int planes = planar ? channels : 1;
int i;
@ -87,7 +87,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
}
av_get_channel_layout_string(chlayout_str, sizeof(chlayout_str), -1,
buf->audio->channel_layout);
buf->channel_layout);
av_log(ctx, AV_LOG_INFO,
"n:%"PRIu64" pts:%"PRId64" pts_time:%f "
@ -95,7 +95,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
"checksum:%08X ",
s->frame, buf->pts, buf->pts * av_q2d(inlink->time_base),
av_get_sample_fmt_name(buf->format), chlayout_str,
buf->audio->sample_rate, buf->audio->nb_samples,
buf->sample_rate, buf->nb_samples,
checksum);
av_log(ctx, AV_LOG_INFO, "plane_checksums: [ ");
@ -114,7 +114,6 @@ static const AVFilterPad inputs[] = {
.get_audio_buffer = ff_null_get_audio_buffer,
.config_props = config_input,
.filter_frame = filter_frame,
.min_perms = AV_PERM_READ,
},
{ NULL },
};

@ -158,14 +158,13 @@ static int request_frame(AVFilterLink *link)
handle_trimming(ctx);
if (nb_samples = get_delay(s)) {
AVFilterBufferRef *buf = ff_get_audio_buffer(link, AV_PERM_WRITE,
nb_samples);
AVFrame *buf = ff_get_audio_buffer(link, nb_samples);
if (!buf)
return AVERROR(ENOMEM);
ret = avresample_convert(s->avr, buf->extended_data,
buf->linesize[0], nb_samples, NULL, 0, 0);
if (ret <= 0) {
avfilter_unref_bufferp(&buf);
av_frame_free(&buf);
return (ret < 0) ? ret : AVERROR_EOF;
}
@ -177,20 +176,20 @@ static int request_frame(AVFilterLink *link)
return ret;
}
static int write_to_fifo(ASyncContext *s, AVFilterBufferRef *buf)
static int write_to_fifo(ASyncContext *s, AVFrame *buf)
{
int ret = avresample_convert(s->avr, NULL, 0, 0, buf->extended_data,
buf->linesize[0], buf->audio->nb_samples);
avfilter_unref_buffer(buf);
buf->linesize[0], buf->nb_samples);
av_frame_free(&buf);
return ret;
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
AVFilterContext *ctx = inlink->dst;
ASyncContext *s = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
int nb_channels = av_get_channel_layout_nb_channels(buf->audio->channel_layout);
int nb_channels = av_get_channel_layout_nb_channels(buf->channel_layout);
int64_t pts = (buf->pts == AV_NOPTS_VALUE) ? buf->pts :
av_rescale_q(buf->pts, inlink->time_base, outlink->time_base);
int out_size, ret;
@ -229,8 +228,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
}
if (out_size > 0) {
AVFilterBufferRef *buf_out = ff_get_audio_buffer(outlink, AV_PERM_WRITE,
out_size);
AVFrame *buf_out = ff_get_audio_buffer(outlink, out_size);
if (!buf_out) {
ret = AVERROR(ENOMEM);
goto fail;
@ -272,11 +270,11 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
s->pts = pts - avresample_get_delay(s->avr);
ret = avresample_convert(s->avr, NULL, 0, 0, buf->extended_data,
buf->linesize[0], buf->audio->nb_samples);
buf->linesize[0], buf->nb_samples);
s->first_frame = 0;
fail:
avfilter_unref_buffer(buf);
av_frame_free(&buf);
return ret;
}

@ -313,7 +313,7 @@ static int channelmap_query_formats(AVFilterContext *ctx)
return 0;
}
static int channelmap_filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
static int channelmap_filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
@ -331,7 +331,7 @@ static int channelmap_filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
uint8_t **new_extended_data =
av_mallocz(nch_out * sizeof(*buf->extended_data));
if (!new_extended_data) {
avfilter_unref_buffer(buf);
av_frame_free(&buf);
return AVERROR(ENOMEM);
}
if (buf->extended_data == buf->data) {

@ -111,13 +111,13 @@ static int query_formats(AVFilterContext *ctx)
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
AVFilterContext *ctx = inlink->dst;
int i, ret = 0;
for (i = 0; i < ctx->nb_outputs; i++) {
AVFilterBufferRef *buf_out = avfilter_ref_buffer(buf, ~AV_PERM_WRITE);
AVFrame *buf_out = av_frame_clone(buf);
if (!buf_out) {
ret = AVERROR(ENOMEM);
@ -125,14 +125,14 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
}
buf_out->data[0] = buf_out->extended_data[0] = buf_out->extended_data[i];
buf_out->audio->channel_layout =
av_channel_layout_extract_channel(buf->audio->channel_layout, i);
buf_out->channel_layout =
av_channel_layout_extract_channel(buf->channel_layout, i);
ret = ff_filter_frame(ctx->outputs[i], buf_out);
if (ret < 0)
break;
}
avfilter_unref_buffer(buf);
av_frame_free(&buf);
return ret;
}

@ -56,24 +56,14 @@ typedef struct JoinContext {
/**
* Temporary storage for input frames, until we get one on each input.
*/
AVFilterBufferRef **input_frames;
AVFrame **input_frames;
/**
* Temporary storage for data pointers, for assembling the output buffer.
* Temporary storage for buffer references, for assembling the output frame.
*/
uint8_t **data;
AVBufferRef **buffers;
} JoinContext;
/**
* To avoid copying the data from input buffers, this filter creates
* a custom output buffer that stores references to all inputs and
* unrefs them on free.
*/
typedef struct JoinBufferPriv {
AVFilterBufferRef **in_buffers;
int nb_in_buffers;
} JoinBufferPriv;
#define OFFSET(x) offsetof(JoinContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM
static const AVOption join_options[] = {
@ -93,7 +83,7 @@ static const AVClass join_class = {
.version = LIBAVUTIL_VERSION_INT,
};
static int filter_frame(AVFilterLink *link, AVFilterBufferRef *buf)
static int filter_frame(AVFilterLink *link, AVFrame *frame)
{
AVFilterContext *ctx = link->dst;
JoinContext *s = ctx->priv;
@ -104,7 +94,7 @@ static int filter_frame(AVFilterLink *link, AVFilterBufferRef *buf)
break;
av_assert0(i < ctx->nb_inputs);
av_assert0(!s->input_frames[i]);
s->input_frames[i] = buf;
s->input_frames[i] = frame;
return 0;
}
@ -208,9 +198,9 @@ static int join_init(AVFilterContext *ctx, const char *args)
s->nb_channels = av_get_channel_layout_nb_channels(s->channel_layout);
s->channels = av_mallocz(sizeof(*s->channels) * s->nb_channels);
s->data = av_mallocz(sizeof(*s->data) * s->nb_channels);
s->buffers = av_mallocz(sizeof(*s->buffers) * s->nb_channels);
s->input_frames = av_mallocz(sizeof(*s->input_frames) * s->inputs);
if (!s->channels || !s->data || !s->input_frames) {
if (!s->channels || !s->buffers|| !s->input_frames) {
ret = AVERROR(ENOMEM);
goto fail;
}
@ -249,11 +239,11 @@ static void join_uninit(AVFilterContext *ctx)
for (i = 0; i < ctx->nb_inputs; i++) {
av_freep(&ctx->input_pads[i].name);
avfilter_unref_bufferp(&s->input_frames[i]);
av_frame_free(&s->input_frames[i]);
}
av_freep(&s->channels);
av_freep(&s->data);
av_freep(&s->buffers);
av_freep(&s->input_frames);
}
@ -395,34 +385,14 @@ fail:
return ret;
}
static void join_free_buffer(AVFilterBuffer *buf)
{
JoinBufferPriv *priv = buf->priv;
if (priv) {
int i;
for (i = 0; i < priv->nb_in_buffers; i++)
avfilter_unref_bufferp(&priv->in_buffers[i]);
av_freep(&priv->in_buffers);
av_freep(&buf->priv);
}
if (buf->extended_data != buf->data)
av_freep(&buf->extended_data);
av_freep(&buf);
}
static int join_request_frame(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
JoinContext *s = ctx->priv;
AVFilterBufferRef *buf;
JoinBufferPriv *priv;
AVFrame *frame;
int linesize = INT_MAX;
int perms = ~0;
int nb_samples = 0;
int nb_buffers = 0;
int i, j, ret;
/* get a frame on each input */
@ -435,54 +405,95 @@ static int join_request_frame(AVFilterLink *outlink)
/* request the same number of samples on all inputs */
if (i == 0) {
nb_samples = s->input_frames[0]->audio->nb_samples;
nb_samples = s->input_frames[0]->nb_samples;
for (j = 1; !i && j < ctx->nb_inputs; j++)
ctx->inputs[j]->request_samples = nb_samples;
}
}
/* setup the output frame */
frame = av_frame_alloc();
if (!frame)
return AVERROR(ENOMEM);
if (s->nb_channels > FF_ARRAY_ELEMS(frame->data)) {
frame->extended_data = av_mallocz(s->nb_channels *
sizeof(*frame->extended_data));
if (!frame->extended_data) {
ret = AVERROR(ENOMEM);
goto fail;
}
}
/* copy the data pointers */
for (i = 0; i < s->nb_channels; i++) {
ChannelMap *ch = &s->channels[i];
AVFilterBufferRef *cur_buf = s->input_frames[ch->input];
s->data[i] = cur_buf->extended_data[ch->in_channel_idx];
linesize = FFMIN(linesize, cur_buf->linesize[0]);
perms &= cur_buf->perms;
}
AVFrame *cur = s->input_frames[ch->input];
AVBufferRef *buf;
av_assert0(nb_samples > 0);
buf = avfilter_get_audio_buffer_ref_from_arrays(s->data, linesize, perms,
nb_samples, outlink->format,
outlink->channel_layout);
if (!buf)
return AVERROR(ENOMEM);
frame->extended_data[i] = cur->extended_data[ch->in_channel_idx];
linesize = FFMIN(linesize, cur->linesize[0]);
buf->buf->free = join_free_buffer;
buf->pts = s->input_frames[0]->pts;
/* add the buffer where this plan is stored to the list if it's
* not already there */
buf = av_frame_get_plane_buffer(cur, ch->in_channel_idx);
if (!buf) {
ret = AVERROR(EINVAL);
goto fail;
}
for (j = 0; j < nb_buffers; j++)
if (s->buffers[j]->buffer == buf->buffer)
break;
if (j == i)
s->buffers[nb_buffers++] = buf;
}
if (!(priv = av_mallocz(sizeof(*priv))))
goto fail;
if (!(priv->in_buffers = av_mallocz(sizeof(*priv->in_buffers) * ctx->nb_inputs)))
goto fail;
/* create references to the buffers we copied to output */
if (nb_buffers > FF_ARRAY_ELEMS(frame->buf)) {
frame->nb_extended_buf = nb_buffers - FF_ARRAY_ELEMS(frame->buf);
frame->extended_buf = av_mallocz(sizeof(*frame->extended_buf) *
frame->nb_extended_buf);
if (!frame->extended_buf) {
frame->nb_extended_buf = 0;
ret = AVERROR(ENOMEM);
goto fail;
}
}
for (i = 0; i < FFMIN(FF_ARRAY_ELEMS(frame->buf), nb_buffers); i++) {
frame->buf[i] = av_buffer_ref(s->buffers[i]);
if (!frame->buf[i]) {
ret = AVERROR(ENOMEM);
goto fail;
}
}
for (i = 0; i < frame->nb_extended_buf; i++) {
frame->extended_buf[i] = av_buffer_ref(s->buffers[i +
FF_ARRAY_ELEMS(frame->buf)]);
if (!frame->extended_buf[i]) {
ret = AVERROR(ENOMEM);
goto fail;
}
}
for (i = 0; i < ctx->nb_inputs; i++)
priv->in_buffers[i] = s->input_frames[i];
priv->nb_in_buffers = ctx->nb_inputs;
buf->buf->priv = priv;
frame->nb_samples = nb_samples;
frame->channel_layout = outlink->channel_layout;
frame->sample_rate = outlink->sample_rate;
frame->pts = s->input_frames[0]->pts;
frame->linesize[0] = linesize;
if (frame->data != frame->extended_data) {
memcpy(frame->data, frame->extended_data, sizeof(*frame->data) *
FFMIN(FF_ARRAY_ELEMS(frame->data), s->nb_channels));
}
ret = ff_filter_frame(outlink, buf);
ret = ff_filter_frame(outlink, frame);
memset(s->input_frames, 0, sizeof(*s->input_frames) * ctx->nb_inputs);
return ret;
fail:
avfilter_unref_buffer(buf);
if (priv)
av_freep(&priv->in_buffers);
av_freep(&priv);
return AVERROR(ENOMEM);
av_frame_free(&frame);
return ret;
}
static const AVFilterPad avfilter_af_join_outputs[] = {

@ -174,7 +174,7 @@ static int request_frame(AVFilterLink *outlink)
/* flush the lavr delay buffer */
if (ret == AVERROR_EOF && s->avr) {
AVFilterBufferRef *buf;
AVFrame *frame;
int nb_samples = av_rescale_rnd(avresample_get_delay(s->avr),
outlink->sample_rate,
ctx->inputs[0]->sample_rate,
@ -183,25 +183,25 @@ static int request_frame(AVFilterLink *outlink)
if (!nb_samples)
return ret;
buf = ff_get_audio_buffer(outlink, AV_PERM_WRITE, nb_samples);
if (!buf)
frame = ff_get_audio_buffer(outlink, nb_samples);
if (!frame)
return AVERROR(ENOMEM);
ret = avresample_convert(s->avr, buf->extended_data,
buf->linesize[0], nb_samples,
ret = avresample_convert(s->avr, frame->extended_data,
frame->linesize[0], nb_samples,
NULL, 0, 0);
if (ret <= 0) {
avfilter_unref_buffer(buf);
av_frame_free(&frame);
return (ret == 0) ? AVERROR_EOF : ret;
}
buf->pts = s->next_pts;
return ff_filter_frame(outlink, buf);
frame->pts = s->next_pts;
return ff_filter_frame(outlink, frame);
}
return ret;
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
ResampleContext *s = ctx->priv;
@ -209,27 +209,26 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
int ret;
if (s->avr) {
AVFilterBufferRef *buf_out;
AVFrame *out;
int delay, nb_samples;
/* maximum possible samples lavr can output */
delay = avresample_get_delay(s->avr);
nb_samples = av_rescale_rnd(buf->audio->nb_samples + delay,
nb_samples = av_rescale_rnd(in->nb_samples + delay,
outlink->sample_rate, inlink->sample_rate,
AV_ROUND_UP);
buf_out = ff_get_audio_buffer(outlink, AV_PERM_WRITE, nb_samples);
if (!buf_out) {
out = ff_get_audio_buffer(outlink, nb_samples);
if (!out) {
ret = AVERROR(ENOMEM);
goto fail;
}
ret = avresample_convert(s->avr, buf_out->extended_data,
buf_out->linesize[0], nb_samples,
buf->extended_data, buf->linesize[0],
buf->audio->nb_samples);
ret = avresample_convert(s->avr, out->extended_data, out->linesize[0],
nb_samples, in->extended_data, in->linesize[0],
in->nb_samples);
if (ret <= 0) {
avfilter_unref_buffer(buf_out);
av_frame_free(&out);
if (ret < 0)
goto fail;
}
@ -237,36 +236,36 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
av_assert0(!avresample_available(s->avr));
if (s->next_pts == AV_NOPTS_VALUE) {
if (buf->pts == AV_NOPTS_VALUE) {
if (in->pts == AV_NOPTS_VALUE) {
av_log(ctx, AV_LOG_WARNING, "First timestamp is missing, "
"assuming 0.\n");
s->next_pts = 0;
} else
s->next_pts = av_rescale_q(buf->pts, inlink->time_base,
s->next_pts = av_rescale_q(in->pts, inlink->time_base,
outlink->time_base);
}
if (ret > 0) {
buf_out->audio->nb_samples = ret;
if (buf->pts != AV_NOPTS_VALUE) {
buf_out->pts = av_rescale_q(buf->pts, inlink->time_base,
out->nb_samples = ret;
if (in->pts != AV_NOPTS_VALUE) {
out->pts = av_rescale_q(in->pts, inlink->time_base,
outlink->time_base) -
av_rescale(delay, outlink->sample_rate,
inlink->sample_rate);
} else
buf_out->pts = s->next_pts;
out->pts = s->next_pts;
s->next_pts = buf_out->pts + buf_out->audio->nb_samples;
s->next_pts = out->pts + out->nb_samples;
ret = ff_filter_frame(outlink, buf_out);
ret = ff_filter_frame(outlink, out);
s->got_output = 1;
}
fail:
avfilter_unref_buffer(buf);
av_frame_free(&in);
} else {
buf->format = outlink->format;
ret = ff_filter_frame(outlink, buf);
in->format = outlink->format;
ret = ff_filter_frame(outlink, in);
s->got_output = 1;
}
@ -278,7 +277,6 @@ static const AVFilterPad avfilter_af_resample_inputs[] = {
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
.min_perms = AV_PERM_READ
},
{ NULL }
};

@ -233,21 +233,21 @@ static int config_output(AVFilterLink *outlink)
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
VolumeContext *vol = inlink->dst->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
int nb_samples = buf->audio->nb_samples;
AVFilterBufferRef *out_buf;
int nb_samples = buf->nb_samples;
AVFrame *out_buf;
if (vol->volume == 1.0 || vol->volume_i == 256)
return ff_filter_frame(outlink, buf);
/* do volume scaling in-place if input buffer is writable */
if (buf->perms & AV_PERM_WRITE) {
if (av_frame_is_writable(buf)) {
out_buf = buf;
} else {
out_buf = ff_get_audio_buffer(inlink, AV_PERM_WRITE, nb_samples);
out_buf = ff_get_audio_buffer(inlink, nb_samples);
if (!out_buf)
return AVERROR(ENOMEM);
out_buf->pts = buf->pts;
@ -283,7 +283,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
}
if (buf != out_buf)
avfilter_unref_buffer(buf);
av_frame_free(&buf);
return ff_filter_frame(outlink, out_buf);
}

@ -20,9 +20,9 @@
#include "avfilter.h"
#include "internal.h"
static int null_filter_frame(AVFilterLink *link, AVFilterBufferRef *samplesref)
static int null_filter_frame(AVFilterLink *link, AVFrame *frame)
{
avfilter_unref_bufferp(&samplesref);
av_frame_free(&frame);
return 0;
}

@ -23,60 +23,65 @@
#include "avfilter.h"
#include "internal.h"
AVFilterBufferRef *ff_null_get_audio_buffer(AVFilterLink *link, int perms,
int nb_samples)
AVFrame *ff_null_get_audio_buffer(AVFilterLink *link, int nb_samples)
{
return ff_get_audio_buffer(link->dst->outputs[0], perms, nb_samples);
return ff_get_audio_buffer(link->dst->outputs[0], nb_samples);
}
AVFilterBufferRef *ff_default_get_audio_buffer(AVFilterLink *link, int perms,
int nb_samples)
AVFrame *ff_default_get_audio_buffer(AVFilterLink *link, int nb_samples)
{
AVFilterBufferRef *samplesref = NULL;
uint8_t **data;
int planar = av_sample_fmt_is_planar(link->format);
int nb_channels = av_get_channel_layout_nb_channels(link->channel_layout);
int planes = planar ? nb_channels : 1;
int linesize;
if (!(data = av_mallocz(sizeof(*data) * planes)))
AVFrame *frame = av_frame_alloc();
int channels = av_get_channel_layout_nb_channels(link->channel_layout);
int buf_size, ret;
if (!frame)
return NULL;
buf_size = av_samples_get_buffer_size(NULL, channels, nb_samples,
link->format, 0);
if (buf_size < 0)
goto fail;
if (av_samples_alloc(data, &linesize, nb_channels, nb_samples, link->format, 0) < 0)
frame->buf[0] = av_buffer_alloc(buf_size);
if (!frame->buf[0])
goto fail;
samplesref = avfilter_get_audio_buffer_ref_from_arrays(data, linesize, perms,
nb_samples, link->format,
link->channel_layout);
if (!samplesref)
frame->nb_samples = nb_samples;
ret = avcodec_fill_audio_frame(frame, channels, link->format,
frame->buf[0]->data, buf_size, 0);
if (ret < 0)
goto fail;
av_freep(&data);
av_samples_set_silence(frame->extended_data, 0, nb_samples, channels,
link->format);
frame->nb_samples = nb_samples;
frame->format = link->format;
frame->channel_layout = link->channel_layout;
frame->sample_rate = link->sample_rate;
return frame;
fail:
if (data)
av_freep(&data[0]);
av_freep(&data);
return samplesref;
av_buffer_unref(&frame->buf[0]);
av_frame_free(&frame);
return NULL;
}
AVFilterBufferRef *ff_get_audio_buffer(AVFilterLink *link, int perms,
int nb_samples)
AVFrame *ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
{
AVFilterBufferRef *ret = NULL;
AVFrame *ret = NULL;
if (link->dstpad->get_audio_buffer)
ret = link->dstpad->get_audio_buffer(link, perms, nb_samples);
ret = link->dstpad->get_audio_buffer(link, nb_samples);
if (!ret)
ret = ff_default_get_audio_buffer(link, perms, nb_samples);
if (ret)
ret->type = AVMEDIA_TYPE_AUDIO;
ret = ff_default_get_audio_buffer(link, nb_samples);
return ret;
}
#if FF_API_AVFILTERBUFFER
AVFilterBufferRef* avfilter_get_audio_buffer_ref_from_arrays(uint8_t **data,
int linesize,int perms,
int nb_samples,
@ -146,3 +151,4 @@ fail:
av_freep(&samples);
return NULL;
}
#endif

@ -22,24 +22,20 @@
#include "avfilter.h"
/** default handler for get_audio_buffer() for audio inputs */
AVFilterBufferRef *ff_default_get_audio_buffer(AVFilterLink *link, int perms,
int nb_samples);
AVFrame *ff_default_get_audio_buffer(AVFilterLink *link, int nb_samples);
/** get_audio_buffer() handler for filters which simply pass audio along */
AVFilterBufferRef *ff_null_get_audio_buffer(AVFilterLink *link, int perms,
int nb_samples);
AVFrame *ff_null_get_audio_buffer(AVFilterLink *link, int nb_samples);
/**
* Request an audio samples buffer with a specific set of permissions.
*
* @param link the output link to the filter from which the buffer will
* be requested
* @param perms the required access permissions
* @param nb_samples the number of samples per channel
* @return A reference to the samples. This must be unreferenced with
* avfilter_unref_buffer when you are finished with it.
*/
AVFilterBufferRef *ff_get_audio_buffer(AVFilterLink *link, int perms,
int nb_samples);
AVFrame *ff_get_audio_buffer(AVFilterLink *link, int nb_samples);
#endif /* AVFILTER_AUDIO_H */

@ -451,17 +451,16 @@ enum AVMediaType avfilter_pad_get_type(AVFilterPad *pads, int pad_idx)
return pads[pad_idx].type;
}
static int default_filter_frame(AVFilterLink *link, AVFilterBufferRef *frame)
static int default_filter_frame(AVFilterLink *link, AVFrame *frame)
{
return ff_filter_frame(link->dst->outputs[0], frame);
}
int ff_filter_frame(AVFilterLink *link, AVFilterBufferRef *frame)
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
{
int (*filter_frame)(AVFilterLink *, AVFilterBufferRef *);
int (*filter_frame)(AVFilterLink *, AVFrame *);
AVFilterPad *dst = link->dstpad;
AVFilterBufferRef *out;
int perms = frame->perms;
AVFrame *out;
FF_DPRINTF_START(NULL, filter_frame);
ff_dlog_link(NULL, link, 1);
@ -469,47 +468,40 @@ int ff_filter_frame(AVFilterLink *link, AVFilterBufferRef *frame)
if (!(filter_frame = dst->filter_frame))
filter_frame = default_filter_frame;
if (frame->linesize[0] < 0)
perms |= AV_PERM_NEG_LINESIZES;
/* prepare to copy the frame if the buffer has insufficient permissions */
if ((dst->min_perms & perms) != dst->min_perms ||
dst->rej_perms & perms) {
av_log(link->dst, AV_LOG_DEBUG,
"Copying data in avfilter (have perms %x, need %x, reject %x)\n",
perms, link->dstpad->min_perms, link->dstpad->rej_perms);
/* copy the frame if needed */
if (dst->needs_writable && !av_frame_is_writable(frame)) {
av_log(link->dst, AV_LOG_DEBUG, "Copying data in avfilter.\n");
switch (link->type) {
case AVMEDIA_TYPE_VIDEO:
out = ff_get_video_buffer(link, dst->min_perms,
link->w, link->h);
out = ff_get_video_buffer(link, link->w, link->h);
break;
case AVMEDIA_TYPE_AUDIO:
out = ff_get_audio_buffer(link, dst->min_perms,
frame->audio->nb_samples);
out = ff_get_audio_buffer(link, frame->nb_samples);
break;
default: return AVERROR(EINVAL);
}
if (!out) {
avfilter_unref_buffer(frame);
av_frame_free(&frame);
return AVERROR(ENOMEM);
}
avfilter_copy_buffer_ref_props(out, frame);
av_frame_copy_props(out, frame);
switch (link->type) {
case AVMEDIA_TYPE_VIDEO:
av_image_copy(out->data, out->linesize, frame->data, frame->linesize,
frame->format, frame->video->w, frame->video->h);
frame->format, frame->width, frame->height);
break;
case AVMEDIA_TYPE_AUDIO:
av_samples_copy(out->extended_data, frame->extended_data,
0, 0, frame->audio->nb_samples,
av_get_channel_layout_nb_channels(frame->audio->channel_layout),
0, 0, frame->nb_samples,
av_get_channel_layout_nb_channels(frame->channel_layout),
frame->format);
break;
default: return AVERROR(EINVAL);
}
avfilter_unref_buffer(frame);
av_frame_free(&frame);
} else
out = frame;

@ -23,6 +23,7 @@
#define AVFILTER_AVFILTER_H
#include "libavutil/avutil.h"
#include "libavutil/frame.h"
#include "libavutil/log.h"
#include "libavutil/samplefmt.h"
#include "libavutil/pixfmt.h"
@ -54,6 +55,7 @@ typedef struct AVFilterLink AVFilterLink;
typedef struct AVFilterPad AVFilterPad;
typedef struct AVFilterFormats AVFilterFormats;
#if FF_API_AVFILTERBUFFER
/**
* A reference-counted buffer data type used by the filter system. Filters
* should not store pointers to this structure directly, but instead use the
@ -177,6 +179,7 @@ typedef struct AVFilterBufferRef {
/**
* Copy properties of src to dst, without copying the actual data
*/
attribute_deprecated
void avfilter_copy_buffer_ref_props(AVFilterBufferRef *dst, AVFilterBufferRef *src);
/**
@ -188,6 +191,7 @@ void avfilter_copy_buffer_ref_props(AVFilterBufferRef *dst, AVFilterBufferRef *s
* @return a new reference to the buffer with the same properties as the
* old, excluding any permissions denied by pmask
*/
attribute_deprecated
AVFilterBufferRef *avfilter_ref_buffer(AVFilterBufferRef *ref, int pmask);
/**
@ -199,6 +203,7 @@ AVFilterBufferRef *avfilter_ref_buffer(AVFilterBufferRef *ref, int pmask);
* @note it is recommended to use avfilter_unref_bufferp() instead of this
* function
*/
attribute_deprecated
void avfilter_unref_buffer(AVFilterBufferRef *ref);
/**
@ -208,7 +213,9 @@ void avfilter_unref_buffer(AVFilterBufferRef *ref);
*
* @param ref pointer to the buffer reference
*/
attribute_deprecated
void avfilter_unref_bufferp(AVFilterBufferRef **ref);
#endif
#if FF_API_AVFILTERPAD_PUBLIC
/**
@ -239,7 +246,7 @@ struct AVFilterPad {
*
* Input pads only.
*/
int min_perms;
attribute_deprecated int min_perms;
/**
* Permissions which are not accepted on incoming buffers. Any buffer
@ -250,7 +257,7 @@ struct AVFilterPad {
*
* Input pads only.
*/
int rej_perms;
attribute_deprecated int rej_perms;
/**
* @deprecated unused
@ -263,7 +270,7 @@ struct AVFilterPad {
*
* Input video pads only.
*/
AVFilterBufferRef *(*get_video_buffer)(AVFilterLink *link, int perms, int w, int h);
AVFrame *(*get_video_buffer)(AVFilterLink *link, int w, int h);
/**
* Callback function to get an audio buffer. If NULL, the filter system will
@ -271,8 +278,7 @@ struct AVFilterPad {
*
* Input audio pads only.
*/
AVFilterBufferRef *(*get_audio_buffer)(AVFilterLink *link, int perms,
int nb_samples);
AVFrame *(*get_audio_buffer)(AVFilterLink *link, int nb_samples);
/**
* @deprecated unused
@ -294,7 +300,7 @@ struct AVFilterPad {
* must ensure that samplesref is properly unreferenced on error if it
* hasn't been passed on to another filter.
*/
int (*filter_frame)(AVFilterLink *link, AVFilterBufferRef *frame);
int (*filter_frame)(AVFilterLink *link, AVFrame *frame);
/**
* Frame poll callback. This returns the number of immediately available
@ -339,6 +345,8 @@ struct AVFilterPad {
* input pads only.
*/
int needs_fifo;
int needs_writable;
};
#endif
@ -535,6 +543,7 @@ int avfilter_link(AVFilterContext *src, unsigned srcpad,
*/
int avfilter_config_links(AVFilterContext *filter);
#if FF_API_AVFILTERBUFFER
/**
* Create a buffer reference wrapped around an already allocated image
* buffer.
@ -546,6 +555,7 @@ int avfilter_config_links(AVFilterContext *filter);
* @param h the height of the image specified by the data and linesize arrays
* @param format the pixel format of the image specified by the data and linesize arrays
*/
attribute_deprecated
AVFilterBufferRef *
avfilter_get_video_buffer_ref_from_arrays(uint8_t *data[4], int linesize[4], int perms,
int w, int h, enum AVPixelFormat format);
@ -561,12 +571,14 @@ avfilter_get_video_buffer_ref_from_arrays(uint8_t *data[4], int linesize[4], int
* @param sample_fmt the format of each sample in the buffer to allocate
* @param channel_layout the channel layout of the buffer
*/
attribute_deprecated
AVFilterBufferRef *avfilter_get_audio_buffer_ref_from_arrays(uint8_t **data,
int linesize,
int perms,
int nb_samples,
enum AVSampleFormat sample_fmt,
uint64_t channel_layout);
#endif
/** Initialize the filter system. Register all builtin filters. */
void avfilter_register_all(void);
@ -645,12 +657,14 @@ void avfilter_free(AVFilterContext *filter);
int avfilter_insert_filter(AVFilterLink *link, AVFilterContext *filt,
unsigned filt_srcpad_idx, unsigned filt_dstpad_idx);
#if FF_API_AVFILTERBUFFER
/**
* Copy the frame properties of src to dst, without copying the actual
* image data.
*
* @return 0 on success, a negative number on error.
*/
attribute_deprecated
int avfilter_copy_frame_props(AVFilterBufferRef *dst, const AVFrame *src);
/**
@ -659,6 +673,8 @@ int avfilter_copy_frame_props(AVFilterBufferRef *dst, const AVFrame *src);
*
* @return 0 on success, a negative number on error.
*/
attribute_deprecated
int avfilter_copy_buf_props(AVFrame *dst, const AVFilterBufferRef *src);
#endif
#endif /* AVFILTER_AVFILTER_H */

@ -35,7 +35,7 @@
#include "internal.h"
typedef struct {
AVFilterBufferRef *cur_buf; ///< last buffer delivered on the sink
AVFrame *cur_frame; ///< last frame delivered on the sink
AVAudioFifo *audio_fifo; ///< FIFO for audio samples
int64_t next_pts; ///< interpolating audio pts
} BufferSinkContext;
@ -48,59 +48,57 @@ static av_cold void uninit(AVFilterContext *ctx)
av_audio_fifo_free(sink->audio_fifo);
}
static int filter_frame(AVFilterLink *link, AVFilterBufferRef *buf)
static int filter_frame(AVFilterLink *link, AVFrame *frame)
{
BufferSinkContext *s = link->dst->priv;
av_assert0(!s->cur_buf);
s->cur_buf = buf;
av_assert0(!s->cur_frame);
s->cur_frame = frame;
return 0;
}
int av_buffersink_read(AVFilterContext *ctx, AVFilterBufferRef **buf)
int av_buffersink_get_frame(AVFilterContext *ctx, AVFrame *frame)
{
BufferSinkContext *s = ctx->priv;
AVFilterLink *link = ctx->inputs[0];
int ret;
if (!buf)
return ff_poll_frame(ctx->inputs[0]);
if ((ret = ff_request_frame(link)) < 0)
return ret;
if (!s->cur_buf)
if (!s->cur_frame)
return AVERROR(EINVAL);
*buf = s->cur_buf;
s->cur_buf = NULL;
av_frame_move_ref(frame, s->cur_frame);
av_frame_free(&s->cur_frame);
return 0;
}
static int read_from_fifo(AVFilterContext *ctx, AVFilterBufferRef **pbuf,
static int read_from_fifo(AVFilterContext *ctx, AVFrame *frame,
int nb_samples)
{
BufferSinkContext *s = ctx->priv;
AVFilterLink *link = ctx->inputs[0];
AVFilterBufferRef *buf;
AVFrame *tmp;
if (!(buf = ff_get_audio_buffer(link, AV_PERM_WRITE, nb_samples)))
if (!(tmp = ff_get_audio_buffer(link, nb_samples)))
return AVERROR(ENOMEM);
av_audio_fifo_read(s->audio_fifo, (void**)buf->extended_data, nb_samples);
av_audio_fifo_read(s->audio_fifo, (void**)tmp->extended_data, nb_samples);
buf->pts = s->next_pts;
tmp->pts = s->next_pts;
s->next_pts += av_rescale_q(nb_samples, (AVRational){1, link->sample_rate},
link->time_base);
*pbuf = buf;
av_frame_move_ref(frame, tmp);
av_frame_free(&tmp);
return 0;
}
int av_buffersink_read_samples(AVFilterContext *ctx, AVFilterBufferRef **pbuf,
int nb_samples)
int av_buffersink_get_samples(AVFilterContext *ctx, AVFrame *frame, int nb_samples)
{
BufferSinkContext *s = ctx->priv;
AVFilterLink *link = ctx->inputs[0];
@ -113,38 +111,107 @@ int av_buffersink_read_samples(AVFilterContext *ctx, AVFilterBufferRef **pbuf,
}
while (ret >= 0) {
AVFilterBufferRef *buf;
if (av_audio_fifo_size(s->audio_fifo) >= nb_samples)
return read_from_fifo(ctx, pbuf, nb_samples);
return read_from_fifo(ctx, frame, nb_samples);
ret = av_buffersink_read(ctx, &buf);
ret = ff_request_frame(link);
if (ret == AVERROR_EOF && av_audio_fifo_size(s->audio_fifo))
return read_from_fifo(ctx, pbuf, av_audio_fifo_size(s->audio_fifo));
return read_from_fifo(ctx, frame, av_audio_fifo_size(s->audio_fifo));
else if (ret < 0)
return ret;
if (buf->pts != AV_NOPTS_VALUE) {
s->next_pts = buf->pts -
if (s->cur_frame->pts != AV_NOPTS_VALUE) {
s->next_pts = s->cur_frame->pts -
av_rescale_q(av_audio_fifo_size(s->audio_fifo),
(AVRational){ 1, link->sample_rate },
link->time_base);
}
ret = av_audio_fifo_write(s->audio_fifo, (void**)buf->extended_data,
buf->audio->nb_samples);
avfilter_unref_buffer(buf);
ret = av_audio_fifo_write(s->audio_fifo, (void**)s->cur_frame->extended_data,
s->cur_frame->nb_samples);
av_frame_free(&s->cur_frame);
}
return ret;
}
#if FF_API_AVFILTERBUFFER
static void compat_free_buffer(AVFilterBuffer *buf)
{
AVFrame *frame = buf->priv;
av_frame_free(&frame);
av_free(buf);
}
static int compat_read(AVFilterContext *ctx, AVFilterBufferRef **pbuf, int nb_samples)
{
AVFilterBufferRef *buf;
AVFrame *frame;
int ret;
if (!pbuf)
return ff_poll_frame(ctx->inputs[0]);
frame = av_frame_alloc();
if (!frame)
return AVERROR(ENOMEM);
if (!nb_samples)
ret = av_buffersink_get_frame(ctx, frame);
else
ret = av_buffersink_get_samples(ctx, frame, nb_samples);
if (ret < 0)
goto fail;
if (ctx->inputs[0]->type == AVMEDIA_TYPE_VIDEO) {
buf = avfilter_get_video_buffer_ref_from_arrays(frame->data, frame->linesize,
AV_PERM_READ,
frame->width, frame->height,
frame->format);
} else {
buf = avfilter_get_audio_buffer_ref_from_arrays(frame->extended_data,
frame->linesize[0], AV_PERM_READ,
frame->nb_samples,
frame->format,
frame->channel_layout);
}
if (!buf) {
ret = AVERROR(ENOMEM);
goto fail;
}
avfilter_copy_frame_props(buf, frame);
buf->buf->priv = frame;
buf->buf->free = compat_free_buffer;
*pbuf = buf;
return 0;
fail:
av_frame_free(&frame);
return ret;
}
int av_buffersink_read(AVFilterContext *ctx, AVFilterBufferRef **buf)
{
return compat_read(ctx, buf, 0);
}
int av_buffersink_read_samples(AVFilterContext *ctx, AVFilterBufferRef **buf,
int nb_samples)
{
return compat_read(ctx, buf, nb_samples);
}
#endif
static const AVFilterPad avfilter_vsink_buffer_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
.min_perms = AV_PERM_READ,
.needs_fifo = 1
},
{ NULL }
@ -165,7 +232,6 @@ static const AVFilterPad avfilter_asink_abuffer_inputs[] = {
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
.min_perms = AV_PERM_READ,
.needs_fifo = 1
},
{ NULL }

@ -26,6 +26,7 @@
#include "avfilter.h"
#if FF_API_AVFILTERBUFFER
/**
* Get a buffer with filtered data from sink and put it in buf.
*
@ -38,6 +39,7 @@
* @return >= 0 in case of success, a negative AVERROR code in case of
* failure.
*/
attribute_deprecated
int av_buffersink_read(AVFilterContext *ctx, AVFilterBufferRef **buf);
/**
@ -56,7 +58,37 @@ int av_buffersink_read(AVFilterContext *ctx, AVFilterBufferRef **buf);
* @warning do not mix this function with av_buffersink_read(). Use only one or
* the other with a single sink, not both.
*/
attribute_deprecated
int av_buffersink_read_samples(AVFilterContext *ctx, AVFilterBufferRef **buf,
int nb_samples);
#endif
/**
* Get a frame with filtered data from sink and put it in frame.
*
* @param ctx pointer to a context of a buffersink or abuffersink AVFilter.
* @param frame pointer to an allocated frame that will be filled with data.
* The data must be freed using av_frame_unref() / av_frame_free()
*
* @return >= 0 in case of success, a negative AVERROR code in case of
* failure.
*/
int av_buffersink_get_frame(AVFilterContext *ctx, AVFrame *frame);
/**
* Same as av_buffersink_get_frame(), but with the ability to specify the number
* of samples read. This function is less efficient than
* av_buffersink_get_frame(), because it copies the data around.
*
* @param ctx pointer to a context of the abuffersink AVFilter.
* @param frame pointer to an allocated frame that will be filled with data.
* The data must be freed using av_frame_unref() / av_frame_free()
* frame will contain exactly nb_samples audio samples, except at
* the end of stream, when it can contain less than nb_samples.
*
* @warning do not mix this function with av_buffersink_get_frame(). Use only one or
* the other with a single sink, not both.
*/
int av_buffersink_get_samples(AVFilterContext *ctx, AVFrame *frame, int nb_samples);
#endif /* AVFILTER_BUFFERSINK_H */

@ -26,6 +26,7 @@
#include "libavutil/channel_layout.h"
#include "libavutil/common.h"
#include "libavutil/fifo.h"
#include "libavutil/frame.h"
#include "libavutil/imgutils.h"
#include "libavutil/opt.h"
#include "libavutil/samplefmt.h"
@ -69,95 +70,168 @@ typedef struct {
return AVERROR(EINVAL);\
}
int av_buffersrc_write_frame(AVFilterContext *buffer_filter, const AVFrame *frame)
int av_buffersrc_write_frame(AVFilterContext *ctx, const AVFrame *frame)
{
BufferSourceContext *c = buffer_filter->priv;
AVFilterBufferRef *buf;
AVFrame *copy;
int ret = 0;
if (!(copy = av_frame_alloc()))
return AVERROR(ENOMEM);
ret = av_frame_ref(copy, frame);
if (ret >= 0)
ret = av_buffersrc_add_frame(ctx, copy);
av_frame_free(&copy);
return ret;
}
int av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
{
BufferSourceContext *s = ctx->priv;
AVFrame *copy;
int ret;
if (!frame) {
c->eof = 1;
s->eof = 1;
return 0;
} else if (c->eof)
} else if (s->eof)
return AVERROR(EINVAL);
if (!av_fifo_space(c->fifo) &&
(ret = av_fifo_realloc2(c->fifo, av_fifo_size(c->fifo) +
sizeof(buf))) < 0)
return ret;
switch (buffer_filter->outputs[0]->type) {
switch (ctx->outputs[0]->type) {
case AVMEDIA_TYPE_VIDEO:
CHECK_VIDEO_PARAM_CHANGE(buffer_filter, c, frame->width, frame->height,
CHECK_VIDEO_PARAM_CHANGE(ctx, s, frame->width, frame->height,
frame->format);
buf = ff_get_video_buffer(buffer_filter->outputs[0], AV_PERM_WRITE,
c->w, c->h);
if (!buf)
return AVERROR(ENOMEM);
av_image_copy(buf->data, buf->linesize, frame->data, frame->linesize,
c->pix_fmt, c->w, c->h);
break;
case AVMEDIA_TYPE_AUDIO:
CHECK_AUDIO_PARAM_CHANGE(buffer_filter, c, frame->sample_rate, frame->channel_layout,
CHECK_AUDIO_PARAM_CHANGE(ctx, s, frame->sample_rate, frame->channel_layout,
frame->format);
buf = ff_get_audio_buffer(buffer_filter->outputs[0], AV_PERM_WRITE,
frame->nb_samples);
if (!buf)
return AVERROR(ENOMEM);
av_samples_copy(buf->extended_data, frame->extended_data,
0, 0, frame->nb_samples,
av_get_channel_layout_nb_channels(frame->channel_layout),
frame->format);
break;
default:
return AVERROR(EINVAL);
}
avfilter_copy_frame_props(buf, frame);
if (!av_fifo_space(s->fifo) &&
(ret = av_fifo_realloc2(s->fifo, av_fifo_size(s->fifo) +
sizeof(copy))) < 0)
return ret;
if (!(copy = av_frame_alloc()))
return AVERROR(ENOMEM);
av_frame_move_ref(copy, frame);
if ((ret = av_fifo_generic_write(c->fifo, &buf, sizeof(buf), NULL)) < 0) {
avfilter_unref_buffer(buf);
if ((ret = av_fifo_generic_write(s->fifo, &copy, sizeof(copy), NULL)) < 0) {
av_frame_move_ref(frame, copy);
av_frame_free(&copy);
return ret;
}
return 0;
}
int av_buffersrc_buffer(AVFilterContext *s, AVFilterBufferRef *buf)
#if FF_API_AVFILTERBUFFER
static void compat_free_buffer(void *opaque, uint8_t *data)
{
BufferSourceContext *c = s->priv;
int ret;
AVFilterBufferRef *buf = opaque;
avfilter_unref_buffer(buf);
}
static void compat_unref_buffer(void *opaque, uint8_t *data)
{
AVBufferRef *buf = opaque;
av_buffer_unref(&buf);
}
int av_buffersrc_buffer(AVFilterContext *ctx, AVFilterBufferRef *buf)
{
BufferSourceContext *s = ctx->priv;
AVFrame *frame = NULL;
AVBufferRef *dummy_buf = NULL;
int ret = 0, planes, i;
if (!buf) {
c->eof = 1;
s->eof = 1;
return 0;
} else if (c->eof)
} else if (s->eof)
return AVERROR(EINVAL);
if (!av_fifo_space(c->fifo) &&
(ret = av_fifo_realloc2(c->fifo, av_fifo_size(c->fifo) +
sizeof(buf))) < 0)
return ret;
frame = av_frame_alloc();
if (!frame)
return AVERROR(ENOMEM);
switch (s->outputs[0]->type) {
case AVMEDIA_TYPE_VIDEO:
CHECK_VIDEO_PARAM_CHANGE(s, c, buf->video->w, buf->video->h, buf->format);
break;
case AVMEDIA_TYPE_AUDIO:
CHECK_AUDIO_PARAM_CHANGE(s, c, buf->audio->sample_rate, buf->audio->channel_layout,
buf->format);
break;
default:
return AVERROR(EINVAL);
dummy_buf = av_buffer_create(NULL, 0, compat_free_buffer, buf, 0);
if (!dummy_buf) {
ret = AVERROR(ENOMEM);
goto fail;
}
if ((ret = av_fifo_generic_write(c->fifo, &buf, sizeof(buf), NULL)) < 0)
return ret;
if ((ret = avfilter_copy_buf_props(frame, buf)) < 0)
goto fail;
return 0;
#define WRAP_PLANE(ref_out, data, data_size) \
do { \
AVBufferRef *dummy_ref = av_buffer_ref(dummy_buf); \
if (!dummy_ref) { \
ret = AVERROR(ENOMEM); \
goto fail; \
} \
ref_out = av_buffer_create(data, data_size, compat_unref_buffer, \
dummy_ref, 0); \
if (!ref_out) { \
av_frame_unref(frame); \
ret = AVERROR(ENOMEM); \
goto fail; \
} \
} while (0)
if (ctx->outputs[0]->type == AVMEDIA_TYPE_VIDEO) {
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format);
if (!desc) {
ret = AVERROR(EINVAL);
goto fail;
}
planes = (desc->flags & PIX_FMT_PLANAR) ? desc->nb_components : 1;
for (i = 0; i < planes; i++) {
int h_shift = (i == 1 || i == 2) ? desc->log2_chroma_h : 0;
int plane_size = (frame->width >> h_shift) * frame->linesize[i];
WRAP_PLANE(frame->buf[i], frame->data[i], plane_size);
}
} else {
int planar = av_sample_fmt_is_planar(frame->format);
int channels = av_get_channel_layout_nb_channels(frame->channel_layout);
planes = planar ? channels : 1;
if (planes > FF_ARRAY_ELEMS(frame->buf)) {
frame->nb_extended_buf = planes - FF_ARRAY_ELEMS(frame->buf);
frame->extended_buf = av_mallocz(sizeof(*frame->extended_buf) *
frame->nb_extended_buf);
if (!frame->extended_buf) {
ret = AVERROR(ENOMEM);
goto fail;
}
}
for (i = 0; i < FFMIN(planes, FF_ARRAY_ELEMS(frame->buf)); i++)
WRAP_PLANE(frame->buf[i], frame->extended_data[i], frame->linesize[0]);
for (i = 0; i < planes - FF_ARRAY_ELEMS(frame->buf); i++)
WRAP_PLANE(frame->extended_buf[i],
frame->extended_data[i + FF_ARRAY_ELEMS(frame->buf)],
frame->linesize[0]);
}
ret = av_buffersrc_add_frame(ctx, frame);
fail:
av_buffer_unref(&dummy_buf);
av_frame_free(&frame);
return ret;
}
#endif
static av_cold int init_video(AVFilterContext *ctx, const char *args)
{
@ -181,7 +255,7 @@ static av_cold int init_video(AVFilterContext *ctx, const char *args)
}
}
if (!(c->fifo = av_fifo_alloc(sizeof(AVFilterBufferRef*))))
if (!(c->fifo = av_fifo_alloc(sizeof(AVFrame*))))
return AVERROR(ENOMEM);
av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d pixfmt:%s\n", c->w, c->h, av_get_pix_fmt_name(c->pix_fmt));
@ -234,7 +308,7 @@ static av_cold int init_audio(AVFilterContext *ctx, const char *args)
goto fail;
}
if (!(s->fifo = av_fifo_alloc(sizeof(AVFilterBufferRef*)))) {
if (!(s->fifo = av_fifo_alloc(sizeof(AVFrame*)))) {
ret = AVERROR(ENOMEM);
goto fail;
}
@ -255,9 +329,9 @@ static av_cold void uninit(AVFilterContext *ctx)
{
BufferSourceContext *s = ctx->priv;
while (s->fifo && av_fifo_size(s->fifo)) {
AVFilterBufferRef *buf;
av_fifo_generic_read(s->fifo, &buf, sizeof(buf), NULL);
avfilter_unref_buffer(buf);
AVFrame *frame;
av_fifo_generic_read(s->fifo, &frame, sizeof(frame), NULL);
av_frame_free(&frame);
}
av_fifo_free(s->fifo);
s->fifo = NULL;
@ -317,7 +391,7 @@ static int config_props(AVFilterLink *link)
static int request_frame(AVFilterLink *link)
{
BufferSourceContext *c = link->src->priv;
AVFilterBufferRef *buf;
AVFrame *frame;
int ret = 0;
if (!av_fifo_size(c->fifo)) {
@ -325,9 +399,9 @@ static int request_frame(AVFilterLink *link)
return AVERROR_EOF;
return AVERROR(EAGAIN);
}
av_fifo_generic_read(c->fifo, &buf, sizeof(buf), NULL);
av_fifo_generic_read(c->fifo, &frame, sizeof(frame), NULL);
ff_filter_frame(link, buf);
ff_filter_frame(link, frame);
return ret;
}
@ -338,7 +412,7 @@ static int poll_frame(AVFilterLink *link)
int size = av_fifo_size(c->fifo);
if (!size && c->eof)
return AVERROR_EOF;
return size/sizeof(AVFilterBufferRef*);
return size/sizeof(AVFrame*);
}
static const AVFilterPad avfilter_vsrc_buffer_outputs[] = {

@ -27,24 +27,47 @@
#include "avfilter.h"
#if FF_API_AVFILTERBUFFER
/**
* Add a buffer to the filtergraph s.
*
* @param buf buffer containing frame data to be passed down the filtergraph.
* This function will take ownership of buf, the user must not free it.
* A NULL buf signals EOF -- i.e. no more frames will be sent to this filter.
*
* @deprecated use av_buffersrc_write_frame() or av_buffersrc_add_frame()
*/
attribute_deprecated
int av_buffersrc_buffer(AVFilterContext *s, AVFilterBufferRef *buf);
#endif
/**
* Add a frame to the buffer source.
*
* @param s an instance of the buffersrc filter.
* @param frame frame to be added.
* @param frame frame to be added. If the frame is reference counted, this
* function will make a new reference to it. Otherwise the frame data will be
* copied.
*
* @warning frame data will be memcpy()ed, which may be a big performance
* hit. Use av_buffersrc_buffer() to avoid copying the data.
* @return 0 on success, a negative AVERROR on error
*/
int av_buffersrc_write_frame(AVFilterContext *s, const AVFrame *frame);
/**
* Add a frame to the buffer source.
*
* @param s an instance of the buffersrc filter.
* @param frame frame to be added. If the frame is reference counted, this
* function will take ownership of the reference(s) and reset the frame.
* Otherwise the frame data will be copied. If this function returns an error,
* the input frame is not touched.
*
* @return 0 on success, a negative AVERROR on error.
*
* @note the difference between this function and av_buffersrc_write_frame() is
* that av_buffersrc_write_frame() creates a new reference to the input frame,
* while this function takes ownership of the reference passed to it.
*/
int av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame);
#endif /* AVFILTER_BUFFERSRC_H */

@ -35,7 +35,7 @@
#include "video.h"
typedef struct Buf {
AVFilterBufferRef *buf;
AVFrame *frame;
struct Buf *next;
} Buf;
@ -47,8 +47,8 @@ typedef struct {
* When a specific number of output samples is requested, the partial
* buffer is stored here
*/
AVFilterBufferRef *buf_out;
int allocated_samples; ///< number of samples buf_out was allocated for
AVFrame *out;
int allocated_samples; ///< number of samples out was allocated for
} FifoContext;
static av_cold int init(AVFilterContext *ctx, const char *args)
@ -66,25 +66,25 @@ static av_cold void uninit(AVFilterContext *ctx)
for (buf = fifo->root.next; buf; buf = tmp) {
tmp = buf->next;
avfilter_unref_bufferp(&buf->buf);
av_frame_free(&buf->frame);
av_free(buf);
}
avfilter_unref_bufferp(&fifo->buf_out);
av_frame_free(&fifo->out);
}
static int add_to_queue(AVFilterLink *inlink, AVFilterBufferRef *buf)
static int add_to_queue(AVFilterLink *inlink, AVFrame *frame)
{
FifoContext *fifo = inlink->dst->priv;
fifo->last->next = av_mallocz(sizeof(Buf));
if (!fifo->last->next) {
avfilter_unref_buffer(buf);
av_frame_free(&frame);
return AVERROR(ENOMEM);
}
fifo->last = fifo->last->next;
fifo->last->buf = buf;
fifo->last->frame = frame;
return 0;
}
@ -101,7 +101,7 @@ static void queue_pop(FifoContext *s)
/**
* Move data pointers and pts offset samples forward.
*/
static void buffer_offset(AVFilterLink *link, AVFilterBufferRef *buf,
static void buffer_offset(AVFilterLink *link, AVFrame *frame,
int offset)
{
int nb_channels = av_get_channel_layout_nb_channels(link->channel_layout);
@ -110,32 +110,32 @@ static void buffer_offset(AVFilterLink *link, AVFilterBufferRef *buf,
int block_align = av_get_bytes_per_sample(link->format) * (planar ? 1 : nb_channels);
int i;
av_assert0(buf->audio->nb_samples > offset);
av_assert0(frame->nb_samples > offset);
for (i = 0; i < planes; i++)
buf->extended_data[i] += block_align*offset;
if (buf->data != buf->extended_data)
memcpy(buf->data, buf->extended_data,
FFMIN(planes, FF_ARRAY_ELEMS(buf->data)) * sizeof(*buf->data));
buf->linesize[0] -= block_align*offset;
buf->audio->nb_samples -= offset;
if (buf->pts != AV_NOPTS_VALUE) {
buf->pts += av_rescale_q(offset, (AVRational){1, link->sample_rate},
link->time_base);
frame->extended_data[i] += block_align * offset;
if (frame->data != frame->extended_data)
memcpy(frame->data, frame->extended_data,
FFMIN(planes, FF_ARRAY_ELEMS(frame->data)) * sizeof(*frame->data));
frame->linesize[0] -= block_align*offset;
frame->nb_samples -= offset;
if (frame->pts != AV_NOPTS_VALUE) {
frame->pts += av_rescale_q(offset, (AVRational){1, link->sample_rate},
link->time_base);
}
}
static int calc_ptr_alignment(AVFilterBufferRef *buf)
static int calc_ptr_alignment(AVFrame *frame)
{
int planes = av_sample_fmt_is_planar(buf->format) ?
av_get_channel_layout_nb_channels(buf->audio->channel_layout) : 1;
int planes = av_sample_fmt_is_planar(frame->format) ?
av_get_channel_layout_nb_channels(frame->channel_layout) : 1;
int min_align = 128;
int p;
for (p = 0; p < planes; p++) {
int cur_align = 128;
while ((intptr_t)buf->extended_data[p] % cur_align)
while ((intptr_t)frame->extended_data[p] % cur_align)
cur_align >>= 1;
if (cur_align < min_align)
min_align = cur_align;
@ -147,35 +147,34 @@ static int return_audio_frame(AVFilterContext *ctx)
{
AVFilterLink *link = ctx->outputs[0];
FifoContext *s = ctx->priv;
AVFilterBufferRef *head = s->root.next->buf;
AVFilterBufferRef *buf_out;
AVFrame *head = s->root.next->frame;
AVFrame *out;
int ret;
if (!s->buf_out &&
head->audio->nb_samples >= link->request_samples &&
if (!s->out &&
head->nb_samples >= link->request_samples &&
calc_ptr_alignment(head) >= 32) {
if (head->audio->nb_samples == link->request_samples) {
buf_out = head;
if (head->nb_samples == link->request_samples) {
out = head;
queue_pop(s);
} else {
buf_out = avfilter_ref_buffer(head, AV_PERM_READ);
if (!buf_out)
out = av_frame_clone(head);
if (!out)
return AVERROR(ENOMEM);
buf_out->audio->nb_samples = link->request_samples;
out->nb_samples = link->request_samples;
buffer_offset(link, head, link->request_samples);
}
} else {
int nb_channels = av_get_channel_layout_nb_channels(link->channel_layout);
if (!s->buf_out) {
s->buf_out = ff_get_audio_buffer(link, AV_PERM_WRITE,
link->request_samples);
if (!s->buf_out)
if (!s->out) {
s->out = ff_get_audio_buffer(link, link->request_samples);
if (!s->out)
return AVERROR(ENOMEM);
s->buf_out->audio->nb_samples = 0;
s->buf_out->pts = head->pts;
s->out->nb_samples = 0;
s->out->pts = head->pts;
s->allocated_samples = link->request_samples;
} else if (link->request_samples != s->allocated_samples) {
av_log(ctx, AV_LOG_ERROR, "request_samples changed before the "
@ -183,41 +182,41 @@ static int return_audio_frame(AVFilterContext *ctx)
return AVERROR(EINVAL);
}
while (s->buf_out->audio->nb_samples < s->allocated_samples) {
int len = FFMIN(s->allocated_samples - s->buf_out->audio->nb_samples,
head->audio->nb_samples);
while (s->out->nb_samples < s->allocated_samples) {
int len = FFMIN(s->allocated_samples - s->out->nb_samples,
head->nb_samples);
av_samples_copy(s->buf_out->extended_data, head->extended_data,
s->buf_out->audio->nb_samples, 0, len, nb_channels,
av_samples_copy(s->out->extended_data, head->extended_data,
s->out->nb_samples, 0, len, nb_channels,
link->format);
s->buf_out->audio->nb_samples += len;
s->out->nb_samples += len;
if (len == head->audio->nb_samples) {
avfilter_unref_buffer(head);
if (len == head->nb_samples) {
av_frame_free(&head);
queue_pop(s);
if (!s->root.next &&
(ret = ff_request_frame(ctx->inputs[0])) < 0) {
if (ret == AVERROR_EOF) {
av_samples_set_silence(s->buf_out->extended_data,
s->buf_out->audio->nb_samples,
av_samples_set_silence(s->out->extended_data,
s->out->nb_samples,
s->allocated_samples -
s->buf_out->audio->nb_samples,
s->out->nb_samples,
nb_channels, link->format);
s->buf_out->audio->nb_samples = s->allocated_samples;
s->out->nb_samples = s->allocated_samples;
break;
}
return ret;
}
head = s->root.next->buf;
head = s->root.next->frame;
} else {
buffer_offset(link, head, len);
}
}
buf_out = s->buf_out;
s->buf_out = NULL;
out = s->out;
s->out = NULL;
}
return ff_filter_frame(link, buf_out);
return ff_filter_frame(link, out);
}
static int request_frame(AVFilterLink *outlink)
@ -233,7 +232,7 @@ static int request_frame(AVFilterLink *outlink)
if (outlink->request_samples) {
return return_audio_frame(outlink->src);
} else {
ret = ff_filter_frame(outlink, fifo->root.next->buf);
ret = ff_filter_frame(outlink, fifo->root.next->frame);
queue_pop(fifo);
}
@ -246,7 +245,6 @@ static const AVFilterPad avfilter_vf_fifo_inputs[] = {
.type = AVMEDIA_TYPE_VIDEO,
.get_video_buffer = ff_null_get_video_buffer,
.filter_frame = add_to_queue,
.rej_perms = AV_PERM_REUSE2,
},
{ NULL }
};
@ -279,7 +277,6 @@ static const AVFilterPad avfilter_af_afifo_inputs[] = {
.type = AVMEDIA_TYPE_AUDIO,
.get_audio_buffer = ff_null_get_audio_buffer,
.filter_frame = add_to_queue,
.rej_perms = AV_PERM_REUSE2,
},
{ NULL }
};

@ -43,33 +43,13 @@ struct AVFilterPad {
*/
enum AVMediaType type;
/**
* Minimum required permissions on incoming buffers. Any buffer with
* insufficient permissions will be automatically copied by the filter
* system to a new buffer which provides the needed access permissions.
*
* Input pads only.
*/
int min_perms;
/**
* Permissions which are not accepted on incoming buffers. Any buffer
* which has any of these permissions set will be automatically copied
* by the filter system to a new buffer which does not have those
* permissions. This can be used to easily disallow buffers with
* AV_PERM_REUSE.
*
* Input pads only.
*/
int rej_perms;
/**
* Callback function to get a video buffer. If NULL, the filter system will
* use avfilter_default_get_video_buffer().
*
* Input video pads only.
*/
AVFilterBufferRef *(*get_video_buffer)(AVFilterLink *link, int perms, int w, int h);
AVFrame *(*get_video_buffer)(AVFilterLink *link, int w, int h);
/**
* Callback function to get an audio buffer. If NULL, the filter system will
@ -77,8 +57,7 @@ struct AVFilterPad {
*
* Input audio pads only.
*/
AVFilterBufferRef *(*get_audio_buffer)(AVFilterLink *link, int perms,
int nb_samples);
AVFrame *(*get_audio_buffer)(AVFilterLink *link, int nb_samples);
/**
* Filtering callback. This is where a filter receives a frame with
@ -90,7 +69,7 @@ struct AVFilterPad {
* must ensure that samplesref is properly unreferenced on error if it
* hasn't been passed on to another filter.
*/
int (*filter_frame)(AVFilterLink *link, AVFilterBufferRef *frame);
int (*filter_frame)(AVFilterLink *link, AVFrame *frame);
/**
* Frame poll callback. This returns the number of immediately available
@ -215,6 +194,6 @@ int ff_request_frame(AVFilterLink *link);
* @return >= 0 on success, a negative AVERROR on error. The receiving filter
* is responsible for unreferencing frame in case of error.
*/
int ff_filter_frame(AVFilterLink *link, AVFilterBufferRef *frame);
int ff_filter_frame(AVFilterLink *link, AVFrame *frame);
#endif /* AVFILTER_INTERNAL_H */

@ -67,13 +67,13 @@ static void split_uninit(AVFilterContext *ctx)
av_freep(&ctx->output_pads[i].name);
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame)
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
int i, ret = 0;
for (i = 0; i < ctx->nb_outputs; i++) {
AVFilterBufferRef *buf_out = avfilter_ref_buffer(frame, ~AV_PERM_WRITE);
AVFrame *buf_out = av_frame_clone(frame);
if (!buf_out) {
ret = AVERROR(ENOMEM);
break;
@ -83,7 +83,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame)
if (ret < 0)
break;
}
avfilter_unref_bufferp(&frame);
av_frame_free(&frame);
return ret;
}

@ -52,5 +52,8 @@
#ifndef FF_API_FOO_COUNT
#define FF_API_FOO_COUNT (LIBAVFILTER_VERSION_MAJOR < 4)
#endif
#ifndef FF_API_AVFILTERBUFFER
#define FF_API_AVFILTERBUFFER (LIBAVFILTER_VERSION_MAJOR < 4)
#endif
#endif /* AVFILTER_VERSION_H */

@ -65,11 +65,11 @@ static av_cold int init(AVFilterContext *ctx, const char *args)
return 0;
}
static int filter_frame(AVFilterLink *link, AVFilterBufferRef *frame)
static int filter_frame(AVFilterLink *link, AVFrame *frame)
{
AspectContext *aspect = link->dst->priv;
frame->video->pixel_aspect = aspect->aspect;
frame->sample_aspect_ratio = aspect->aspect;
return ff_filter_frame(link->dst->outputs[0], frame);
}

@ -78,7 +78,7 @@ static av_cold int init(AVFilterContext *ctx, const char *args)
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame)
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
BlackFrameContext *blackframe = ctx->priv;
@ -86,7 +86,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame)
int pblack = 0;
uint8_t *p = frame->data[0];
for (i = 0; i < frame->video->h; i++) {
for (i = 0; i < frame->height; i++) {
for (x = 0; x < inlink->w; x++)
blackframe->nblack += p[x] < blackframe->bthresh;
p += frame->linesize[0];
@ -94,8 +94,8 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame)
pblack = blackframe->nblack * 100 / (inlink->w * inlink->h);
if (pblack >= blackframe->bamount)
av_log(ctx, AV_LOG_INFO, "frame:%u pblack:%u pos:%"PRId64" pts:%"PRId64" t:%f\n",
blackframe->frame, pblack, frame->pos, frame->pts,
av_log(ctx, AV_LOG_INFO, "frame:%u pblack:%u pts:%"PRId64" t:%f\n",
blackframe->frame, pblack, frame->pts,
frame->pts == AV_NOPTS_VALUE ? -1 : frame->pts * av_q2d(inlink->time_base));
blackframe->frame++;

@ -307,23 +307,23 @@ static void vblur(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_li
h, radius, power, temp);
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in)
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
BoxBlurContext *boxblur = ctx->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
AVFilterBufferRef *out;
AVFrame *out;
int plane;
int cw = inlink->w >> boxblur->hsub, ch = in->video->h >> boxblur->vsub;
int cw = inlink->w >> boxblur->hsub, ch = in->height >> boxblur->vsub;
int w[4] = { inlink->w, cw, cw, inlink->w };
int h[4] = { in->video->h, ch, ch, in->video->h };
int h[4] = { in->height, ch, ch, in->height };
out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h);
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
avfilter_unref_bufferp(&in);
av_frame_free(&in);
return AVERROR(ENOMEM);
}
avfilter_copy_buffer_ref_props(out, in);
av_frame_copy_props(out, in);
for (plane = 0; in->data[plane] && plane < 4; plane++)
hblur(out->data[plane], out->linesize[plane],
@ -337,7 +337,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in)
w[plane], h[plane], boxblur->radius[plane], boxblur->power[plane],
boxblur->temp);
avfilter_unref_bufferp(&in);
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
@ -348,7 +348,6 @@ static const AVFilterPad avfilter_vf_boxblur_inputs[] = {
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_input,
.filter_frame = filter_frame,
.min_perms = AV_PERM_READ
},
{ NULL }
};

@ -21,17 +21,35 @@
* copy video filter
*/
#include "libavutil/imgutils.h"
#include "libavutil/internal.h"
#include "avfilter.h"
#include "internal.h"
#include "video.h"
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterLink *outlink = inlink->dst->outputs[0];
AVFrame *out = ff_get_video_buffer(outlink, in->width, in->height);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
av_image_copy(out->data, out->linesize, in->data, in->linesize,
in->format, in->width, in->height);
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
static const AVFilterPad avfilter_vf_copy_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.get_video_buffer = ff_null_get_video_buffer,
.rej_perms = ~0
.filter_frame = filter_frame,
},
{ NULL }
};

@ -65,7 +65,6 @@ enum var_name {
VAR_X,
VAR_Y,
VAR_N,
VAR_POS,
VAR_T,
VAR_VARS_NB
};
@ -174,7 +173,6 @@ static int config_input(AVFilterLink *link)
crop->var_values[VAR_OUT_H] = crop->var_values[VAR_OH] = NAN;
crop->var_values[VAR_N] = 0;
crop->var_values[VAR_T] = NAN;
crop->var_values[VAR_POS] = NAN;
av_image_fill_max_pixsteps(crop->max_step, NULL, pix_desc);
crop->hsub = pix_desc->log2_chroma_w;
@ -243,19 +241,18 @@ static int config_output(AVFilterLink *link)
return 0;
}
static int filter_frame(AVFilterLink *link, AVFilterBufferRef *frame)
static int filter_frame(AVFilterLink *link, AVFrame *frame)
{
AVFilterContext *ctx = link->dst;
CropContext *crop = ctx->priv;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(link->format);
int i;
frame->video->w = crop->w;
frame->video->h = crop->h;
frame->width = crop->w;
frame->height = crop->h;
crop->var_values[VAR_T] = frame->pts == AV_NOPTS_VALUE ?
NAN : frame->pts * av_q2d(link->time_base);
crop->var_values[VAR_POS] = frame->pos == -1 ? NAN : frame->pos;
crop->var_values[VAR_X] = av_expr_eval(crop->x_pexpr, crop->var_values, NULL);
crop->var_values[VAR_Y] = av_expr_eval(crop->y_pexpr, crop->var_values, NULL);
crop->var_values[VAR_X] = av_expr_eval(crop->x_pexpr, crop->var_values, NULL);

@ -117,7 +117,7 @@ static int config_input(AVFilterLink *inlink)
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame)
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
CropDetectContext *cd = ctx->priv;
@ -128,36 +128,36 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame)
if (++cd->frame_nb > 0) {
// Reset the crop area every reset_count frames, if reset_count is > 0
if (cd->reset_count > 0 && cd->frame_nb > cd->reset_count) {
cd->x1 = frame->video->w-1;
cd->y1 = frame->video->h-1;
cd->x1 = frame->width - 1;
cd->y1 = frame->height - 1;
cd->x2 = 0;
cd->y2 = 0;
cd->frame_nb = 1;
}
for (y = 0; y < cd->y1; y++) {
if (checkline(ctx, frame->data[0] + frame->linesize[0] * y, bpp, frame->video->w, bpp) > cd->limit) {
if (checkline(ctx, frame->data[0] + frame->linesize[0] * y, bpp, frame->width, bpp) > cd->limit) {
cd->y1 = y;
break;
}
}
for (y = frame->video->h-1; y > cd->y2; y--) {
if (checkline(ctx, frame->data[0] + frame->linesize[0] * y, bpp, frame->video->w, bpp) > cd->limit) {
for (y = frame->height - 1; y > cd->y2; y--) {
if (checkline(ctx, frame->data[0] + frame->linesize[0] * y, bpp, frame->width, bpp) > cd->limit) {
cd->y2 = y;
break;
}
}
for (y = 0; y < cd->x1; y++) {
if (checkline(ctx, frame->data[0] + bpp*y, frame->linesize[0], frame->video->h, bpp) > cd->limit) {
if (checkline(ctx, frame->data[0] + bpp*y, frame->linesize[0], frame->height, bpp) > cd->limit) {
cd->x1 = y;
break;
}
}
for (y = frame->video->w-1; y > cd->x2; y--) {
if (checkline(ctx, frame->data[0] + bpp*y, frame->linesize[0], frame->video->h, bpp) > cd->limit) {
for (y = frame->width - 1; y > cd->x2; y--) {
if (checkline(ctx, frame->data[0] + bpp*y, frame->linesize[0], frame->height, bpp) > cd->limit) {
cd->x2 = y;
break;
}
@ -187,8 +187,8 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame)
y += (shrink_by/2 + 1) & ~1;
av_log(ctx, AV_LOG_INFO,
"x1:%d x2:%d y1:%d y2:%d w:%d h:%d x:%d y:%d pos:%"PRId64" pts:%"PRId64" t:%f crop=%d:%d:%d:%d\n",
cd->x1, cd->x2, cd->y1, cd->y2, w, h, x, y, frame->pos, frame->pts,
"x1:%d x2:%d y1:%d y2:%d w:%d h:%d x:%d y:%d pts:%"PRId64" t:%f crop=%d:%d:%d:%d\n",
cd->x1, cd->x2, cd->y1, cd->y2, w, h, x, y, frame->pts,
frame->pts == AV_NOPTS_VALUE ? -1 : frame->pts * av_q2d(inlink->time_base),
w, h, x, y);
}

@ -215,30 +215,30 @@ static av_cold int init(AVFilterContext *ctx, const char *args)
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in)
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
DelogoContext *delogo = inlink->dst->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
AVFilterBufferRef *out;
AVFrame *out;
int hsub0 = desc->log2_chroma_w;
int vsub0 = desc->log2_chroma_h;
int direct = 0;
int plane;
if ((in->perms & AV_PERM_WRITE) && !(in->perms & AV_PERM_PRESERVE)) {
if (av_frame_is_writable(in)) {
direct = 1;
out = in;
} else {
out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h);
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
avfilter_unref_bufferp(&in);
av_frame_free(&in);
return AVERROR(ENOMEM);
}
avfilter_copy_buffer_ref_props(out, in);
out->video->w = outlink->w;
out->video->h = outlink->h;
av_frame_copy_props(out, in);
out->width = outlink->w;
out->height = outlink->h;
}
for (plane = 0; plane < 4 && in->data[plane]; plane++) {
@ -255,7 +255,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in)
}
if (!direct)
avfilter_unref_bufferp(&in);
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
@ -266,8 +266,6 @@ static const AVFilterPad avfilter_vf_delogo_inputs[] = {
.type = AVMEDIA_TYPE_VIDEO,
.get_video_buffer = ff_null_get_video_buffer,
.filter_frame = filter_frame,
.min_perms = AV_PERM_WRITE | AV_PERM_READ,
.rej_perms = AV_PERM_PRESERVE
},
{ NULL }
};

@ -96,20 +96,20 @@ static int config_input(AVFilterLink *inlink)
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame)
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
DrawBoxContext *drawbox = inlink->dst->priv;
int plane, x, y, xb = drawbox->x, yb = drawbox->y;
unsigned char *row[4];
for (y = FFMAX(yb, 0); y < frame->video->h && y < (yb + drawbox->h); y++) {
for (y = FFMAX(yb, 0); y < frame->height && y < (yb + drawbox->h); y++) {
row[0] = frame->data[0] + y * frame->linesize[0];
for (plane = 1; plane < 3; plane++)
row[plane] = frame->data[plane] +
frame->linesize[plane] * (y >> drawbox->vsub);
for (x = FFMAX(xb, 0); x < (xb + drawbox->w) && x < frame->video->w; x++) {
for (x = FFMAX(xb, 0); x < (xb + drawbox->w) && x < frame->width; x++) {
double alpha = (double)drawbox->yuv_color[A] / 255;
if ((y - yb < 3) || (yb + drawbox->h - y < 4) ||
@ -131,8 +131,7 @@ static const AVFilterPad avfilter_vf_drawbox_inputs[] = {
.config_props = config_input,
.get_video_buffer = ff_null_get_video_buffer,
.filter_frame = filter_frame,
.min_perms = AV_PERM_WRITE | AV_PERM_READ,
.rej_perms = AV_PERM_PRESERVE
.needs_writable = 1,
},
{ NULL }
};

@ -627,19 +627,19 @@ static int config_input(AVFilterLink *inlink)
(bitmap->buffer[(r) * bitmap->pitch + ((c)>>3)] & (0x80 >> ((c)&7))) * 255 : \
bitmap->buffer[(r) * bitmap->pitch + (c)]
#define SET_PIXEL_YUV(picref, yuva_color, val, x, y, hsub, vsub) { \
luma_pos = ((x) ) + ((y) ) * picref->linesize[0]; \
#define SET_PIXEL_YUV(frame, yuva_color, val, x, y, hsub, vsub) { \
luma_pos = ((x) ) + ((y) ) * frame->linesize[0]; \
alpha = yuva_color[3] * (val) * 129; \
picref->data[0][luma_pos] = (alpha * yuva_color[0] + (255*255*129 - alpha) * picref->data[0][luma_pos] ) >> 23; \
frame->data[0][luma_pos] = (alpha * yuva_color[0] + (255*255*129 - alpha) * frame->data[0][luma_pos] ) >> 23; \
if (((x) & ((1<<(hsub)) - 1)) == 0 && ((y) & ((1<<(vsub)) - 1)) == 0) {\
chroma_pos1 = ((x) >> (hsub)) + ((y) >> (vsub)) * picref->linesize[1]; \
chroma_pos2 = ((x) >> (hsub)) + ((y) >> (vsub)) * picref->linesize[2]; \
picref->data[1][chroma_pos1] = (alpha * yuva_color[1] + (255*255*129 - alpha) * picref->data[1][chroma_pos1]) >> 23; \
picref->data[2][chroma_pos2] = (alpha * yuva_color[2] + (255*255*129 - alpha) * picref->data[2][chroma_pos2]) >> 23; \
chroma_pos1 = ((x) >> (hsub)) + ((y) >> (vsub)) * frame->linesize[1]; \
chroma_pos2 = ((x) >> (hsub)) + ((y) >> (vsub)) * frame->linesize[2]; \
frame->data[1][chroma_pos1] = (alpha * yuva_color[1] + (255*255*129 - alpha) * frame->data[1][chroma_pos1]) >> 23; \
frame->data[2][chroma_pos2] = (alpha * yuva_color[2] + (255*255*129 - alpha) * frame->data[2][chroma_pos2]) >> 23; \
}\
}
static inline int draw_glyph_yuv(AVFilterBufferRef *picref, FT_Bitmap *bitmap, unsigned int x,
static inline int draw_glyph_yuv(AVFrame *frame, FT_Bitmap *bitmap, unsigned int x,
unsigned int y, unsigned int width, unsigned int height,
const uint8_t yuva_color[4], int hsub, int vsub)
{
@ -654,22 +654,22 @@ static inline int draw_glyph_yuv(AVFilterBufferRef *picref, FT_Bitmap *bitmap, u
if (!src_val)
continue;
SET_PIXEL_YUV(picref, yuva_color, src_val, c+x, y+r, hsub, vsub);
SET_PIXEL_YUV(frame, yuva_color, src_val, c+x, y+r, hsub, vsub);
}
}
return 0;
}
#define SET_PIXEL_RGB(picref, rgba_color, val, x, y, pixel_step, r_off, g_off, b_off, a_off) { \
p = picref->data[0] + (x) * pixel_step + ((y) * picref->linesize[0]); \
#define SET_PIXEL_RGB(frame, rgba_color, val, x, y, pixel_step, r_off, g_off, b_off, a_off) { \
p = frame->data[0] + (x) * pixel_step + ((y) * frame->linesize[0]); \
alpha = rgba_color[3] * (val) * 129; \
*(p+r_off) = (alpha * rgba_color[0] + (255*255*129 - alpha) * *(p+r_off)) >> 23; \
*(p+g_off) = (alpha * rgba_color[1] + (255*255*129 - alpha) * *(p+g_off)) >> 23; \
*(p+b_off) = (alpha * rgba_color[2] + (255*255*129 - alpha) * *(p+b_off)) >> 23; \
}
static inline int draw_glyph_rgb(AVFilterBufferRef *picref, FT_Bitmap *bitmap,
static inline int draw_glyph_rgb(AVFrame *frame, FT_Bitmap *bitmap,
unsigned int x, unsigned int y,
unsigned int width, unsigned int height, int pixel_step,
const uint8_t rgba_color[4], const uint8_t rgba_map[4])
@ -685,7 +685,7 @@ static inline int draw_glyph_rgb(AVFilterBufferRef *picref, FT_Bitmap *bitmap,
if (!src_val)
continue;
SET_PIXEL_RGB(picref, rgba_color, src_val, c+x, y+r, pixel_step,
SET_PIXEL_RGB(frame, rgba_color, src_val, c+x, y+r, pixel_step,
rgba_map[0], rgba_map[1], rgba_map[2], rgba_map[3]);
}
}
@ -693,7 +693,7 @@ static inline int draw_glyph_rgb(AVFilterBufferRef *picref, FT_Bitmap *bitmap,
return 0;
}
static inline void drawbox(AVFilterBufferRef *picref, unsigned int x, unsigned int y,
static inline void drawbox(AVFrame *frame, unsigned int x, unsigned int y,
unsigned int width, unsigned int height,
uint8_t *line[4], int pixel_step[4], uint8_t color[4],
int hsub, int vsub, int is_rgba_packed, uint8_t rgba_map[4])
@ -705,22 +705,22 @@ static inline void drawbox(AVFilterBufferRef *picref, unsigned int x, unsigned i
uint8_t *p;
for (j = 0; j < height; j++)
for (i = 0; i < width; i++)
SET_PIXEL_RGB(picref, color, 255, i+x, y+j, pixel_step[0],
SET_PIXEL_RGB(frame, color, 255, i+x, y+j, pixel_step[0],
rgba_map[0], rgba_map[1], rgba_map[2], rgba_map[3]);
} else {
unsigned int luma_pos, chroma_pos1, chroma_pos2;
for (j = 0; j < height; j++)
for (i = 0; i < width; i++)
SET_PIXEL_YUV(picref, color, 255, i+x, y+j, hsub, vsub);
SET_PIXEL_YUV(frame, color, 255, i+x, y+j, hsub, vsub);
}
} else {
ff_draw_rectangle(picref->data, picref->linesize,
ff_draw_rectangle(frame->data, frame->linesize,
line, pixel_step, hsub, vsub,
x, y, width, height);
}
}
static int draw_glyphs(DrawTextContext *dtext, AVFilterBufferRef *picref,
static int draw_glyphs(DrawTextContext *dtext, AVFrame *frame,
int width, int height, const uint8_t rgbcolor[4], const uint8_t yuvcolor[4], int x, int y)
{
char *text = HAVE_LOCALTIME_R ? dtext->expanded_text : dtext->text;
@ -745,11 +745,11 @@ static int draw_glyphs(DrawTextContext *dtext, AVFilterBufferRef *picref,
return AVERROR(EINVAL);
if (dtext->is_packed_rgb) {
draw_glyph_rgb(picref, &glyph->bitmap,
draw_glyph_rgb(frame, &glyph->bitmap,
dtext->positions[i].x+x, dtext->positions[i].y+y, width, height,
dtext->pixel_step[0], rgbcolor, dtext->rgba_map);
} else {
draw_glyph_yuv(picref, &glyph->bitmap,
draw_glyph_yuv(frame, &glyph->bitmap,
dtext->positions[i].x+x, dtext->positions[i].y+y, width, height,
yuvcolor, dtext->hsub, dtext->vsub);
}
@ -758,7 +758,7 @@ static int draw_glyphs(DrawTextContext *dtext, AVFilterBufferRef *picref,
return 0;
}
static int draw_text(AVFilterContext *ctx, AVFilterBufferRef *picref,
static int draw_text(AVFilterContext *ctx, AVFrame *frame,
int width, int height)
{
DrawTextContext *dtext = ctx->priv;
@ -766,13 +766,13 @@ static int draw_text(AVFilterContext *ctx, AVFilterBufferRef *picref,
/* draw box */
if (dtext->draw_box)
drawbox(picref, dtext->x, dtext->y, dtext->w, dtext->h,
drawbox(frame, dtext->x, dtext->y, dtext->w, dtext->h,
dtext->box_line, dtext->pixel_step, dtext->boxcolor,
dtext->hsub, dtext->vsub, dtext->is_packed_rgb,
dtext->rgba_map);
if (dtext->shadowx || dtext->shadowy) {
if ((ret = draw_glyphs(dtext, picref, width, height,
if ((ret = draw_glyphs(dtext, frame, width, height,
dtext->shadowcolor_rgba,
dtext->shadowcolor,
dtext->x + dtext->shadowx,
@ -780,7 +780,7 @@ static int draw_text(AVFilterContext *ctx, AVFilterBufferRef *picref,
return ret;
}
if ((ret = draw_glyphs(dtext, picref, width, height,
if ((ret = draw_glyphs(dtext, frame, width, height,
dtext->fontcolor_rgba,
dtext->fontcolor,
dtext->x,
@ -805,7 +805,7 @@ static inline int normalize_double(int *n, double d)
return ret;
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame)
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
DrawTextContext *dtext = ctx->priv;
@ -813,7 +813,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame)
if ((ret = dtext_prepare_text(ctx)) < 0) {
av_log(ctx, AV_LOG_ERROR, "Can't draw text\n");
avfilter_unref_bufferp(&frame);
av_frame_free(&frame);
return ret;
}
@ -848,7 +848,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame)
dtext->x, dtext->y, dtext->x+dtext->w, dtext->y+dtext->h);
if (dtext->draw)
draw_text(inlink->dst, frame, frame->video->w, frame->video->h);
draw_text(inlink->dst, frame, frame->width, frame->height);
dtext->var_values[VAR_N] += 1.0;
@ -862,9 +862,7 @@ static const AVFilterPad avfilter_vf_drawtext_inputs[] = {
.get_video_buffer = ff_null_get_video_buffer,
.filter_frame = filter_frame,
.config_props = config_input,
.min_perms = AV_PERM_WRITE |
AV_PERM_READ,
.rej_perms = AV_PERM_PRESERVE
.needs_writable = 1,
},
{ NULL }
};

@ -98,7 +98,7 @@ static int config_props(AVFilterLink *inlink)
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame)
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
FadeContext *fade = inlink->dst->priv;
uint8_t *p;
@ -106,7 +106,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame)
if (fade->factor < UINT16_MAX) {
/* luma or rgb plane */
for (i = 0; i < frame->video->h; i++) {
for (i = 0; i < frame->height; i++) {
p = frame->data[0] + i * frame->linesize[0];
for (j = 0; j < inlink->w * fade->bpp; j++) {
/* fade->factor is using 16 lower-order bits for decimal
@ -120,7 +120,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame)
if (frame->data[1] && frame->data[2]) {
/* chroma planes */
for (plane = 1; plane < 3; plane++) {
for (i = 0; i < frame->video->h; i++) {
for (i = 0; i < frame->height; i++) {
p = frame->data[plane] + (i >> fade->vsub) * frame->linesize[plane];
for (j = 0; j < inlink->w >> fade->hsub; j++) {
/* 8421367 = ((128 << 1) + 1) << 15. It is an integer
@ -150,8 +150,7 @@ static const AVFilterPad avfilter_vf_fade_inputs[] = {
.config_props = config_props,
.get_video_buffer = ff_null_get_video_buffer,
.filter_frame = filter_frame,
.min_perms = AV_PERM_READ | AV_PERM_WRITE,
.rej_perms = AV_PERM_PRESERVE,
.needs_writable = 1,
},
{ NULL }
};

@ -113,15 +113,15 @@ static int config_input(AVFilterLink *inlink)
return 0;
}
static AVFilterBufferRef *get_video_buffer(AVFilterLink *inlink, int perms, int w, int h)
static AVFrame *get_video_buffer(AVFilterLink *inlink, int w, int h)
{
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
return ff_get_video_buffer(outlink, perms, w, h);
return ff_get_video_buffer(outlink, w, h);
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame)
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
FieldOrderContext *s = ctx->priv;
@ -129,14 +129,14 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame)
int h, plane, line_step, line_size, line;
uint8_t *data;
if (!frame->video->interlaced ||
frame->video->top_field_first == s->dst_tff)
if (!frame->interlaced_frame ||
frame->top_field_first == s->dst_tff)
return ff_filter_frame(outlink, frame);
av_dlog(ctx,
"picture will move %s one line\n",
s->dst_tff ? "up" : "down");
h = frame->video->h;
h = frame->height;
for (plane = 0; plane < 4 && frame->data[plane]; plane++) {
line_step = frame->linesize[plane];
line_size = s->line_size[plane];
@ -148,7 +148,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame)
* The new last line is created as a copy of the
* penultimate line from that field. */
for (line = 0; line < h; line++) {
if (1 + line < frame->video->h) {
if (1 + line < frame->height) {
memcpy(data, data + line_step, line_size);
} else {
memcpy(data, data - line_step - line_step, line_size);
@ -172,7 +172,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame)
}
}
}
frame->video->top_field_first = s->dst_tff;
frame->top_field_first = s->dst_tff;
return ff_filter_frame(outlink, frame);
}
@ -184,8 +184,7 @@ static const AVFilterPad avfilter_vf_fieldorder_inputs[] = {
.config_props = config_input,
.get_video_buffer = get_video_buffer,
.filter_frame = filter_frame,
.min_perms = AV_PERM_READ | AV_PERM_WRITE,
.rej_perms = AV_PERM_REUSE2 | AV_PERM_PRESERVE,
.needs_writable = 1,
},
{ NULL }
};

@ -84,7 +84,7 @@ static av_cold int init(AVFilterContext *ctx, const char *args)
}
av_opt_free(s);
if (!(s->fifo = av_fifo_alloc(2*sizeof(AVFilterBufferRef*))))
if (!(s->fifo = av_fifo_alloc(2*sizeof(AVFrame*))))
return AVERROR(ENOMEM);
av_log(ctx, AV_LOG_VERBOSE, "fps=%d/%d\n", s->framerate.num, s->framerate.den);
@ -94,9 +94,9 @@ static av_cold int init(AVFilterContext *ctx, const char *args)
static void flush_fifo(AVFifoBuffer *fifo)
{
while (av_fifo_size(fifo)) {
AVFilterBufferRef *tmp;
AVFrame *tmp;
av_fifo_generic_read(fifo, &tmp, sizeof(tmp), NULL);
avfilter_unref_buffer(tmp);
av_frame_free(&tmp);
}
}
@ -139,7 +139,7 @@ static int request_frame(AVFilterLink *outlink)
if (ret == AVERROR_EOF && av_fifo_size(s->fifo)) {
int i;
for (i = 0; av_fifo_size(s->fifo); i++) {
AVFilterBufferRef *buf;
AVFrame *buf;
av_fifo_generic_read(s->fifo, &buf, sizeof(buf), NULL);
buf->pts = av_rescale_q(s->first_pts, ctx->inputs[0]->time_base,
@ -156,13 +156,13 @@ static int request_frame(AVFilterLink *outlink)
return ret;
}
static int write_to_fifo(AVFifoBuffer *fifo, AVFilterBufferRef *buf)
static int write_to_fifo(AVFifoBuffer *fifo, AVFrame *buf)
{
int ret;
if (!av_fifo_space(fifo) &&
(ret = av_fifo_realloc2(fifo, 2*av_fifo_size(fifo)))) {
avfilter_unref_bufferp(&buf);
av_frame_free(&buf);
return ret;
}
@ -170,7 +170,7 @@ static int write_to_fifo(AVFifoBuffer *fifo, AVFilterBufferRef *buf)
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
AVFilterContext *ctx = inlink->dst;
FPSContext *s = ctx->priv;
@ -190,7 +190,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
} else {
av_log(ctx, AV_LOG_WARNING, "Discarding initial frame(s) with no "
"timestamp.\n");
avfilter_unref_buffer(buf);
av_frame_free(&buf);
s->drop++;
}
return 0;
@ -207,8 +207,8 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
if (delta < 1) {
/* drop the frame and everything buffered except the first */
AVFilterBufferRef *tmp;
int drop = av_fifo_size(s->fifo)/sizeof(AVFilterBufferRef*);
AVFrame *tmp;
int drop = av_fifo_size(s->fifo)/sizeof(AVFrame*);
av_log(ctx, AV_LOG_DEBUG, "Dropping %d frame(s).\n", drop);
s->drop += drop;
@ -217,18 +217,18 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
flush_fifo(s->fifo);
ret = write_to_fifo(s->fifo, tmp);
avfilter_unref_buffer(buf);
av_frame_free(&buf);
return ret;
}
/* can output >= 1 frames */
for (i = 0; i < delta; i++) {
AVFilterBufferRef *buf_out;
AVFrame *buf_out;
av_fifo_generic_read(s->fifo, &buf_out, sizeof(buf_out), NULL);
/* duplicate the frame if needed */
if (!av_fifo_size(s->fifo) && i < delta - 1) {
AVFilterBufferRef *dup = avfilter_ref_buffer(buf_out, AV_PERM_READ);
AVFrame *dup = av_frame_clone(buf_out);
av_log(ctx, AV_LOG_DEBUG, "Duplicating frame.\n");
if (dup)
@ -237,8 +237,8 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
ret = AVERROR(ENOMEM);
if (ret < 0) {
avfilter_unref_bufferp(&buf_out);
avfilter_unref_bufferp(&buf);
av_frame_free(&buf_out);
av_frame_free(&buf);
return ret;
}
@ -249,7 +249,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
outlink->time_base) + s->frames_out;
if ((ret = ff_filter_frame(outlink, buf_out)) < 0) {
avfilter_unref_bufferp(&buf);
av_frame_free(&buf);
return ret;
}

@ -346,24 +346,24 @@ static int query_formats(AVFilterContext *ctx)
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in)
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
Frei0rContext *frei0r = inlink->dst->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
AVFilterBufferRef *out;
AVFrame *out;
out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h);
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
avfilter_unref_bufferp(&in);
av_frame_free(&in);
return AVERROR(ENOMEM);
}
avfilter_copy_buffer_ref_props(out, in);
av_frame_copy_props(out, in);
frei0r->update(frei0r->instance, in->pts * av_q2d(inlink->time_base) * 1000,
(const uint32_t *)in->data[0],
(uint32_t *)out->data[0]);
avfilter_unref_bufferp(&in);
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
@ -374,7 +374,6 @@ static const AVFilterPad avfilter_vf_frei0r_inputs[] = {
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_input_props,
.filter_frame = filter_frame,
.min_perms = AV_PERM_READ
},
{ NULL }
};
@ -454,19 +453,18 @@ static int source_config_props(AVFilterLink *outlink)
static int source_request_frame(AVFilterLink *outlink)
{
Frei0rContext *frei0r = outlink->src->priv;
AVFilterBufferRef *picref = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h);
AVFrame *frame = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!picref)
if (!frame)
return AVERROR(ENOMEM);
picref->video->pixel_aspect = (AVRational) {1, 1};
picref->pts = frei0r->pts++;
picref->pos = -1;
frame->sample_aspect_ratio = (AVRational) {1, 1};
frame->pts = frei0r->pts++;
frei0r->update(frei0r->instance, av_rescale_q(picref->pts, frei0r->time_base, (AVRational){1,1000}),
NULL, (uint32_t *)picref->data[0]);
frei0r->update(frei0r->instance, av_rescale_q(frame->pts, frei0r->time_base, (AVRational){1,1000}),
NULL, (uint32_t *)frame->data[0]);
return ff_filter_frame(outlink, picref);
return ff_filter_frame(outlink, frame);
}
static const AVFilterPad avfilter_vsrc_frei0r_src_outputs[] = {

@ -182,26 +182,26 @@ static int config_input(AVFilterLink *inlink)
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in)
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
GradFunContext *gf = inlink->dst->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
AVFilterBufferRef *out;
AVFrame *out;
int p, direct;
if ((in->perms & AV_PERM_WRITE) && !(in->perms & AV_PERM_PRESERVE)) {
if (av_frame_is_writable(in)) {
direct = 1;
out = in;
} else {
out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h);
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
avfilter_unref_bufferp(&in);
av_frame_free(&in);
return AVERROR(ENOMEM);
}
avfilter_copy_buffer_ref_props(out, in);
out->video->w = outlink->w;
out->video->h = outlink->h;
av_frame_copy_props(out, in);
out->width = outlink->w;
out->height = outlink->h;
}
for (p = 0; p < 4 && in->data[p]; p++) {
@ -221,7 +221,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in)
}
if (!direct)
avfilter_unref_bufferp(&in);
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
@ -232,7 +232,6 @@ static const AVFilterPad avfilter_vf_gradfun_inputs[] = {
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_input,
.filter_frame = filter_frame,
.min_perms = AV_PERM_READ,
},
{ NULL }
};

@ -84,21 +84,21 @@ static int config_props(AVFilterLink *inlink)
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in)
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
FlipContext *flip = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
AVFilterBufferRef *out;
AVFrame *out;
uint8_t *inrow, *outrow;
int i, j, plane, step, hsub, vsub;
out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h);
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
avfilter_unref_bufferp(&in);
av_frame_free(&in);
return AVERROR(ENOMEM);
}
avfilter_copy_buffer_ref_props(out, in);
av_frame_copy_props(out, in);
for (plane = 0; plane < 4 && in->data[plane]; plane++) {
step = flip->max_step[plane];
@ -107,7 +107,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in)
outrow = out->data[plane];
inrow = in ->data[plane] + ((inlink->w >> hsub) - 1) * step;
for (i = 0; i < in->video->h >> vsub; i++) {
for (i = 0; i < in->height >> vsub; i++) {
switch (step) {
case 1:
for (j = 0; j < (inlink->w >> hsub); j++)
@ -153,7 +153,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in)
}
}
avfilter_unref_bufferp(&in);
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
@ -163,7 +163,6 @@ static const AVFilterPad avfilter_vf_hflip_inputs[] = {
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
.config_props = config_props,
.min_perms = AV_PERM_READ,
},
{ NULL }
};

@ -305,39 +305,39 @@ static int config_input(AVFilterLink *inlink)
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in)
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
HQDN3DContext *hqdn3d = inlink->dst->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
AVFilterBufferRef *out;
AVFrame *out;
int direct, c;
if ((in->perms & AV_PERM_WRITE) && !(in->perms & AV_PERM_PRESERVE)) {
if (av_frame_is_writable(in)) {
direct = 1;
out = in;
} else {
out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h);
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
avfilter_unref_bufferp(&in);
av_frame_free(&in);
return AVERROR(ENOMEM);
}
avfilter_copy_buffer_ref_props(out, in);
out->video->w = outlink->w;
out->video->h = outlink->h;
av_frame_copy_props(out, in);
out->width = outlink->w;
out->height = outlink->h;
}
for (c = 0; c < 3; c++) {
denoise(hqdn3d, in->data[c], out->data[c],
hqdn3d->line, &hqdn3d->frame_prev[c],
in->video->w >> (!!c * hqdn3d->hsub),
in->video->h >> (!!c * hqdn3d->vsub),
in->width >> (!!c * hqdn3d->hsub),
in->height >> (!!c * hqdn3d->vsub),
in->linesize[c], out->linesize[c],
hqdn3d->coefs[c?2:0], hqdn3d->coefs[c?3:1]);
}
if (!direct)
avfilter_unref_bufferp(&in);
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}

@ -35,7 +35,7 @@
#include "internal.h"
#include "video.h"
static void fill_iplimage_from_picref(IplImage *img, const AVFilterBufferRef *picref, enum AVPixelFormat pixfmt)
static void fill_iplimage_from_frame(IplImage *img, const AVFrame *frame, enum AVPixelFormat pixfmt)
{
IplImage *tmpimg;
int depth, channels_nb;
@ -45,18 +45,18 @@ static void fill_iplimage_from_picref(IplImage *img, const AVFilterBufferRef *pi
else if (pixfmt == AV_PIX_FMT_BGR24) { depth = IPL_DEPTH_8U; channels_nb = 3; }
else return;
tmpimg = cvCreateImageHeader((CvSize){picref->video->w, picref->video->h}, depth, channels_nb);
tmpimg = cvCreateImageHeader((CvSize){frame->width, frame->height}, depth, channels_nb);
*img = *tmpimg;
img->imageData = img->imageDataOrigin = picref->data[0];
img->imageData = img->imageDataOrigin = frame->data[0];
img->dataOrder = IPL_DATA_ORDER_PIXEL;
img->origin = IPL_ORIGIN_TL;
img->widthStep = picref->linesize[0];
img->widthStep = frame->linesize[0];
}
static void fill_picref_from_iplimage(AVFilterBufferRef *picref, const IplImage *img, enum AVPixelFormat pixfmt)
static void fill_frame_from_iplimage(AVFrame *frame, const IplImage *img, enum AVPixelFormat pixfmt)
{
picref->linesize[0] = img->widthStep;
picref->data[0] = img->imageData;
frame->linesize[0] = img->widthStep;
frame->data[0] = img->imageData;
}
static int query_formats(AVFilterContext *ctx)
@ -351,27 +351,27 @@ static av_cold void uninit(AVFilterContext *ctx)
memset(ocv, 0, sizeof(*ocv));
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in)
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
OCVContext *ocv = ctx->priv;
AVFilterLink *outlink= inlink->dst->outputs[0];
AVFilterBufferRef *out;
AVFrame *out;
IplImage inimg, outimg;
out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h);
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
avfilter_unref_bufferp(&in);
av_frame_free(&in);
return AVERROR(ENOMEM);
}
avfilter_copy_buffer_ref_props(out, in);
av_frame_copy_props(out, in);
fill_iplimage_from_picref(&inimg , in , inlink->format);
fill_iplimage_from_picref(&outimg, out, inlink->format);
fill_iplimage_from_frame(&inimg , in , inlink->format);
fill_iplimage_from_frame(&outimg, out, inlink->format);
ocv->end_frame_filter(ctx, &inimg, &outimg);
fill_picref_from_iplimage(out, &outimg, inlink->format);
fill_frame_from_iplimage(out, &outimg, inlink->format);
avfilter_unref_bufferp(&in);
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
@ -381,7 +381,6 @@ static const AVFilterPad avfilter_vf_ocv_inputs[] = {
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
.min_perms = AV_PERM_READ
},
{ NULL }
};

@ -295,28 +295,28 @@ static int config_props(AVFilterLink *inlink)
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in)
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
LutContext *lut = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
AVFilterBufferRef *out;
AVFrame *out;
uint8_t *inrow, *outrow, *inrow0, *outrow0;
int i, j, k, plane;
out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h);
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
avfilter_unref_bufferp(&in);
av_frame_free(&in);
return AVERROR(ENOMEM);
}
avfilter_copy_buffer_ref_props(out, in);
av_frame_copy_props(out, in);
if (lut->is_rgb) {
/* packed */
inrow0 = in ->data[0];
outrow0 = out->data[0];
for (i = 0; i < in->video->h; i ++) {
for (i = 0; i < in->height; i ++) {
inrow = inrow0;
outrow = outrow0;
for (j = 0; j < inlink->w; j++) {
@ -337,7 +337,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in)
inrow = in ->data[plane];
outrow = out->data[plane];
for (i = 0; i < in->video->h >> vsub; i ++) {
for (i = 0; i < in->height >> vsub; i ++) {
for (j = 0; j < inlink->w>>hsub; j++)
outrow[j] = lut->lut[plane][inrow[j]];
inrow += in ->linesize[plane];
@ -346,7 +346,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in)
}
}
avfilter_unref_bufferp(&in);
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
@ -355,7 +355,7 @@ static const AVFilterPad inputs[] = {
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
.config_props = config_props,
.min_perms = AV_PERM_READ, },
},
{ .name = NULL}
};
static const AVFilterPad outputs[] = {

@ -70,8 +70,8 @@ typedef struct {
char x_expr[256], y_expr[256];
AVFilterBufferRef *main;
AVFilterBufferRef *over_prev, *over_next;
AVFrame *main;
AVFrame *over_prev, *over_next;
} OverlayContext;
static av_cold int init(AVFilterContext *ctx, const char *args)
@ -91,9 +91,9 @@ static av_cold void uninit(AVFilterContext *ctx)
{
OverlayContext *s = ctx->priv;
avfilter_unref_bufferp(&s->main);
avfilter_unref_bufferp(&s->over_prev);
avfilter_unref_bufferp(&s->over_next);
av_frame_free(&s->main);
av_frame_free(&s->over_prev);
av_frame_free(&s->over_next);
}
static int query_formats(AVFilterContext *ctx)
@ -194,17 +194,17 @@ static int config_output(AVFilterLink *outlink)
}
static void blend_frame(AVFilterContext *ctx,
AVFilterBufferRef *dst, AVFilterBufferRef *src,
AVFrame *dst, AVFrame *src,
int x, int y)
{
OverlayContext *over = ctx->priv;
int i, j, k;
int width, height;
int overlay_end_y = y + src->video->h;
int overlay_end_y = y + src->height;
int end_y, start_y;
width = FFMIN(dst->video->w - x, src->video->w);
end_y = FFMIN(dst->video->h, overlay_end_y);
width = FFMIN(dst->width - x, src->width);
end_y = FFMIN(dst->height, overlay_end_y);
start_y = FFMAX(y, 0);
height = end_y - start_y;
@ -269,7 +269,7 @@ static void blend_frame(AVFilterContext *ctx,
}
}
static int filter_frame_main(AVFilterLink *inlink, AVFilterBufferRef *frame)
static int filter_frame_main(AVFilterLink *inlink, AVFrame *frame)
{
OverlayContext *s = inlink->dst->priv;
@ -279,7 +279,7 @@ static int filter_frame_main(AVFilterLink *inlink, AVFilterBufferRef *frame)
return 0;
}
static int filter_frame_overlay(AVFilterLink *inlink, AVFilterBufferRef *frame)
static int filter_frame_overlay(AVFilterLink *inlink, AVFrame *frame)
{
OverlayContext *s = inlink->dst->priv;
@ -335,8 +335,8 @@ static int request_frame(AVFilterLink *outlink)
while (s->main->pts != AV_NOPTS_VALUE &&
s->over_next->pts != AV_NOPTS_VALUE &&
av_compare_ts(s->over_next->pts, tb_over, s->main->pts, tb_main) < 0) {
avfilter_unref_bufferp(&s->over_prev);
FFSWAP(AVFilterBufferRef*, s->over_prev, s->over_next);
av_frame_free(&s->over_prev);
FFSWAP(AVFrame*, s->over_prev, s->over_next);
ret = ff_request_frame(ctx->inputs[OVERLAY]);
if (ret == AVERROR_EOF)
@ -349,8 +349,8 @@ static int request_frame(AVFilterLink *outlink)
s->over_next->pts == AV_NOPTS_VALUE ||
!av_compare_ts(s->over_next->pts, tb_over, s->main->pts, tb_main)) {
blend_frame(ctx, s->main, s->over_next, s->x, s->y);
avfilter_unref_bufferp(&s->over_prev);
FFSWAP(AVFilterBufferRef*, s->over_prev, s->over_next);
av_frame_free(&s->over_prev);
FFSWAP(AVFrame*, s->over_prev, s->over_next);
} else if (s->over_prev) {
blend_frame(ctx, s->main, s->over_prev, s->x, s->y);
}
@ -364,8 +364,7 @@ static const AVFilterPad avfilter_vf_overlay_inputs[] = {
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_input_main,
.filter_frame = filter_frame_main,
.min_perms = AV_PERM_READ,
.rej_perms = AV_PERM_REUSE2 | AV_PERM_PRESERVE,
.needs_writable = 1,
.needs_fifo = 1,
},
{
@ -373,8 +372,6 @@ static const AVFilterPad avfilter_vf_overlay_inputs[] = {
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_input_overlay,
.filter_frame = filter_frame_overlay,
.min_perms = AV_PERM_READ,
.rej_perms = AV_PERM_REUSE2,
.needs_fifo = 1,
},
{ NULL }

@ -253,101 +253,127 @@ static int config_output(AVFilterLink *outlink)
return 0;
}
static AVFilterBufferRef *get_video_buffer(AVFilterLink *inlink, int perms, int w, int h)
static AVFrame *get_video_buffer(AVFilterLink *inlink, int w, int h)
{
PadContext *pad = inlink->dst->priv;
AVFilterBufferRef *picref = ff_get_video_buffer(inlink->dst->outputs[0], perms,
w + (pad->w - pad->in_w),
h + (pad->h - pad->in_h));
AVFrame *frame = ff_get_video_buffer(inlink->dst->outputs[0],
w + (pad->w - pad->in_w),
h + (pad->h - pad->in_h));
int plane;
if (!picref)
if (!frame)
return NULL;
picref->video->w = w;
picref->video->h = h;
frame->width = w;
frame->height = h;
for (plane = 0; plane < 4 && picref->data[plane]; plane++) {
for (plane = 0; plane < 4 && frame->data[plane]; plane++) {
int hsub = (plane == 1 || plane == 2) ? pad->hsub : 0;
int vsub = (plane == 1 || plane == 2) ? pad->vsub : 0;
picref->data[plane] += (pad->x >> hsub) * pad->line_step[plane] +
(pad->y >> vsub) * picref->linesize[plane];
frame->data[plane] += (pad->x >> hsub) * pad->line_step[plane] +
(pad->y >> vsub) * frame->linesize[plane];
}
return picref;
return frame;
}
static int does_clip(PadContext *pad, AVFilterBufferRef *outpicref, int plane, int hsub, int vsub, int x, int y)
/* check whether each plane in this buffer can be padded without copying */
static int buffer_needs_copy(PadContext *s, AVFrame *frame, AVBufferRef *buf)
{
int64_t x_in_buf, y_in_buf;
int planes[4] = { -1, -1, -1, -1}, *p = planes;
int i, j;
x_in_buf = outpicref->data[plane] - outpicref->buf->data[plane]
+ (x >> hsub) * pad ->line_step[plane]
+ (y >> vsub) * outpicref->linesize [plane];
/* get all planes in this buffer */
for (i = 0; i < FF_ARRAY_ELEMS(planes) && frame->data[i]; i++) {
if (av_frame_get_plane_buffer(frame, i) == buf)
*p++ = i;
}
if(x_in_buf < 0 || x_in_buf % pad->line_step[plane])
return 1;
x_in_buf /= pad->line_step[plane];
/* for each plane in this buffer, check that it can be padded without
* going over buffer bounds or other planes */
for (i = 0; i < FF_ARRAY_ELEMS(planes) && planes[i] >= 0; i++) {
int hsub = (planes[i] == 1 || planes[i] == 2) ? s->hsub : 0;
int vsub = (planes[i] == 1 || planes[i] == 2) ? s->vsub : 0;
uint8_t *start = frame->data[planes[i]];
uint8_t *end = start + (frame->height >> hsub) *
frame->linesize[planes[i]];
/* amount of free space needed before the start and after the end
* of the plane */
ptrdiff_t req_start = (s->x >> hsub) * s->line_step[planes[i]] +
(s->y >> vsub) * frame->linesize[planes[i]];
ptrdiff_t req_end = ((s->w - s->x - frame->width) >> hsub) *
s->line_step[planes[i]] +
(s->y >> vsub) * frame->linesize[planes[i]];
if (frame->linesize[planes[i]] < (s->w >> hsub) * s->line_step[planes[i]])
return 1;
if (start - buf->data < req_start ||
(buf->data + buf->size) - end < req_end)
return 1;
#define SIGN(x) ((x) > 0 ? 1 : -1)
for (j = 0; j < FF_ARRAY_ELEMS(planes) & planes[j] >= 0; j++) {
int hsub1 = (planes[j] == 1 || planes[j] == 2) ? s->hsub : 0;
uint8_t *start1 = frame->data[planes[j]];
uint8_t *end1 = start1 + (frame->height >> hsub1) *
frame->linesize[planes[j]];
if (i == j)
continue;
if (SIGN(start - end1) != SIGN(start - end1 - req_start) ||
SIGN(end - start1) != SIGN(end - start1 + req_end))
return 1;
}
}
av_assert0(outpicref->buf->linesize[plane]>0); //while reference can use negative linesize the main buffer should not
return 0;
}
y_in_buf = x_in_buf / outpicref->buf->linesize[plane];
x_in_buf %= outpicref->buf->linesize[plane];
static int frame_needs_copy(PadContext *s, AVFrame *frame)
{
int i;
if( y_in_buf<<vsub >= outpicref->buf->h
|| x_in_buf<<hsub >= outpicref->buf->w)
if (!av_frame_is_writable(frame))
return 1;
for (i = 0; i < FF_ARRAY_ELEMS(frame->buf) && frame->buf[i]; i++)
if (buffer_needs_copy(s, frame, frame->buf[i]))
return 1;
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in)
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
PadContext *pad = inlink->dst->priv;
AVFilterBufferRef *out = avfilter_ref_buffer(in, ~0);
int plane, needs_copy;
if (!out) {
avfilter_unref_bufferp(&in);
return AVERROR(ENOMEM);
}
AVFrame *out;
int needs_copy = frame_needs_copy(pad, in);
for (plane = 0; plane < 4 && out->data[plane]; plane++) {
int hsub = (plane == 1 || plane == 2) ? pad->hsub : 0;
int vsub = (plane == 1 || plane == 2) ? pad->vsub : 0;
av_assert0(out->buf->w > 0 && out->buf->h > 0);
if (out->format != out->buf->format) //unsupported currently
break;
out->data[plane] -= (pad->x >> hsub) * pad->line_step[plane] +
(pad->y >> vsub) * out->linesize [plane];
if (does_clip(pad, out, plane, hsub, vsub, 0, 0) ||
does_clip(pad, out, plane, hsub, vsub, 0, pad->h - 1) ||
does_clip(pad, out, plane, hsub, vsub, pad->w - 1, 0) ||
does_clip(pad, out, plane, hsub, vsub, pad->w - 1, pad->h - 1))
break;
}
needs_copy = plane < 4 && out->data[plane];
if (needs_copy) {
av_log(inlink->dst, AV_LOG_DEBUG, "Direct padding impossible allocating new frame\n");
avfilter_unref_buffer(out);
out = ff_get_video_buffer(inlink->dst->outputs[0], AV_PERM_WRITE | AV_PERM_NEG_LINESIZES,
out = ff_get_video_buffer(inlink->dst->outputs[0],
FFMAX(inlink->w, pad->w),
FFMAX(inlink->h, pad->h));
if (!out) {
avfilter_unref_bufferp(&in);
av_frame_free(&in);
return AVERROR(ENOMEM);
}
avfilter_copy_buffer_ref_props(out, in);
}
av_frame_copy_props(out, in);
} else {
int i;
out->video->w = pad->w;
out->video->h = pad->h;
out = in;
for (i = 0; i < FF_ARRAY_ELEMS(out->data) && out->data[i]; i++) {
int hsub = (i == 1 || i == 2) ? pad->hsub : 0;
int vsub = (i == 1 || i == 2) ? pad->vsub : 0;
out->data[i] -= (pad->x >> hsub) * pad->line_step[i] +
(pad->y >> vsub) * out->linesize[i];
}
}
/* top bar */
if (pad->y) {
@ -365,21 +391,25 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in)
/* left border */
ff_draw_rectangle(out->data, out->linesize, pad->line, pad->line_step,
pad->hsub, pad->vsub, 0, pad->y, pad->x, in->video->h);
pad->hsub, pad->vsub, 0, pad->y, pad->x, in->height);
if (needs_copy) {
ff_copy_rectangle(out->data, out->linesize, in->data, in->linesize,
pad->line_step, pad->hsub, pad->vsub,
pad->x, pad->y, 0, in->video->w, in->video->h);
pad->x, pad->y, 0, in->width, in->height);
}
/* right border */
ff_draw_rectangle(out->data, out->linesize,
pad->line, pad->line_step, pad->hsub, pad->vsub,
pad->x + pad->in_w, pad->y, pad->w - pad->x - pad->in_w,
in->video->h);
in->height);
out->width = pad->w;
out->height = pad->h;
avfilter_unref_bufferp(&in);
if (in != out)
av_frame_free(&in);
return ff_filter_frame(inlink->dst->outputs[0], out);
}

@ -52,21 +52,20 @@ static int config_props(AVFilterLink *inlink)
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in)
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
PixdescTestContext *priv = inlink->dst->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
AVFilterBufferRef *out;
AVFrame *out;
int i, c, w = inlink->w, h = inlink->h;
out = ff_get_video_buffer(outlink, AV_PERM_WRITE,
outlink->w, outlink->h);
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
avfilter_unref_bufferp(&in);
av_frame_free(&in);
return AVERROR(ENOMEM);
}
avfilter_copy_buffer_ref_props(out, in);
av_frame_copy_props(out, in);
for (i = 0; i < 4; i++) {
int h = outlink->h;
@ -102,7 +101,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in)
}
}
avfilter_unref_bufferp(&in);
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
@ -112,7 +111,6 @@ static const AVFilterPad avfilter_vf_pixdesctest_inputs[] = {
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
.config_props = config_props,
.min_perms = AV_PERM_READ,
},
{ NULL }
};

@ -257,11 +257,11 @@ fail:
return ret;
}
static int filter_frame(AVFilterLink *link, AVFilterBufferRef *in)
static int filter_frame(AVFilterLink *link, AVFrame *in)
{
ScaleContext *scale = link->dst->priv;
AVFilterLink *outlink = link->dst->outputs[0];
AVFilterBufferRef *out;
AVFrame *out;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(link->format);
if (!scale->sws)
@ -270,25 +270,25 @@ static int filter_frame(AVFilterLink *link, AVFilterBufferRef *in)
scale->hsub = desc->log2_chroma_w;
scale->vsub = desc->log2_chroma_h;
out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h);
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
avfilter_unref_bufferp(&in);
av_frame_free(&in);
return AVERROR(ENOMEM);
}
avfilter_copy_buffer_ref_props(out, in);
out->video->w = outlink->w;
out->video->h = outlink->h;
av_frame_copy_props(out, in);
out->width = outlink->w;
out->height = outlink->h;
av_reduce(&out->video->pixel_aspect.num, &out->video->pixel_aspect.den,
(int64_t)in->video->pixel_aspect.num * outlink->h * link->w,
(int64_t)in->video->pixel_aspect.den * outlink->w * link->h,
av_reduce(&out->sample_aspect_ratio.num, &out->sample_aspect_ratio.den,
(int64_t)in->sample_aspect_ratio.num * outlink->h * link->w,
(int64_t)in->sample_aspect_ratio.den * outlink->w * link->h,
INT_MAX);
sws_scale(scale->sws, in->data, in->linesize, 0, in->video->h,
sws_scale(scale->sws, in->data, in->linesize, 0, in->height,
out->data, out->linesize);
avfilter_unref_bufferp(&in);
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
@ -297,7 +297,6 @@ static const AVFilterPad avfilter_vf_scale_inputs[] = {
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
.min_perms = AV_PERM_READ,
},
{ NULL }
};

@ -108,7 +108,6 @@ enum var_name {
VAR_PREV_SELECTED_N,
VAR_KEY,
VAR_POS,
VAR_VARS_NB
};
@ -134,7 +133,7 @@ static av_cold int init(AVFilterContext *ctx, const char *args)
return ret;
}
select->pending_frames = av_fifo_alloc(FIFO_SIZE*sizeof(AVFilterBufferRef*));
select->pending_frames = av_fifo_alloc(FIFO_SIZE*sizeof(AVFrame*));
if (!select->pending_frames) {
av_log(ctx, AV_LOG_ERROR, "Failed to allocate pending frames buffer.\n");
return AVERROR(ENOMEM);
@ -181,35 +180,33 @@ static int config_input(AVFilterLink *inlink)
#define D2TS(d) (isnan(d) ? AV_NOPTS_VALUE : (int64_t)(d))
#define TS2D(ts) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts))
static int select_frame(AVFilterContext *ctx, AVFilterBufferRef *picref)
static int select_frame(AVFilterContext *ctx, AVFrame *frame)
{
SelectContext *select = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
double res;
if (isnan(select->var_values[VAR_START_PTS]))
select->var_values[VAR_START_PTS] = TS2D(picref->pts);
select->var_values[VAR_START_PTS] = TS2D(frame->pts);
if (isnan(select->var_values[VAR_START_T]))
select->var_values[VAR_START_T] = TS2D(picref->pts) * av_q2d(inlink->time_base);
select->var_values[VAR_START_T] = TS2D(frame->pts) * av_q2d(inlink->time_base);
select->var_values[VAR_PTS] = TS2D(picref->pts);
select->var_values[VAR_T ] = TS2D(picref->pts) * av_q2d(inlink->time_base);
select->var_values[VAR_POS] = picref->pos == -1 ? NAN : picref->pos;
select->var_values[VAR_PREV_PTS] = TS2D(picref ->pts);
select->var_values[VAR_PTS] = TS2D(frame->pts);
select->var_values[VAR_T ] = TS2D(frame->pts) * av_q2d(inlink->time_base);
select->var_values[VAR_PREV_PTS] = TS2D(frame->pts);
select->var_values[VAR_INTERLACE_TYPE] =
!picref->video->interlaced ? INTERLACE_TYPE_P :
picref->video->top_field_first ? INTERLACE_TYPE_T : INTERLACE_TYPE_B;
select->var_values[VAR_PICT_TYPE] = picref->video->pict_type;
!frame->interlaced_frame ? INTERLACE_TYPE_P :
frame->top_field_first ? INTERLACE_TYPE_T : INTERLACE_TYPE_B;
select->var_values[VAR_PICT_TYPE] = frame->pict_type;
res = av_expr_eval(select->expr, select->var_values, NULL);
av_log(inlink->dst, AV_LOG_DEBUG,
"n:%d pts:%d t:%f pos:%d interlace_type:%c key:%d pict_type:%c "
"n:%d pts:%d t:%f interlace_type:%c key:%d pict_type:%c "
"-> select:%f\n",
(int)select->var_values[VAR_N],
(int)select->var_values[VAR_PTS],
select->var_values[VAR_T],
(int)select->var_values[VAR_POS],
select->var_values[VAR_INTERLACE_TYPE] == INTERLACE_TYPE_P ? 'P' :
select->var_values[VAR_INTERLACE_TYPE] == INTERLACE_TYPE_T ? 'T' :
select->var_values[VAR_INTERLACE_TYPE] == INTERLACE_TYPE_B ? 'B' : '?',
@ -228,7 +225,7 @@ static int select_frame(AVFilterContext *ctx, AVFilterBufferRef *picref)
return res;
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame)
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
SelectContext *select = inlink->dst->priv;
@ -239,7 +236,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame)
if (!av_fifo_space(select->pending_frames)) {
av_log(inlink->dst, AV_LOG_ERROR,
"Buffering limit reached, cannot cache more frames\n");
avfilter_unref_bufferp(&frame);
av_frame_free(&frame);
} else
av_fifo_generic_write(select->pending_frames, &frame,
sizeof(frame), NULL);
@ -248,7 +245,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame)
return ff_filter_frame(inlink->dst->outputs[0], frame);
}
avfilter_unref_bufferp(&frame);
av_frame_free(&frame);
return 0;
}
@ -260,10 +257,10 @@ static int request_frame(AVFilterLink *outlink)
select->select = 0;
if (av_fifo_size(select->pending_frames)) {
AVFilterBufferRef *picref;
AVFrame *frame;
av_fifo_generic_read(select->pending_frames, &picref, sizeof(picref), NULL);
return ff_filter_frame(outlink, picref);
av_fifo_generic_read(select->pending_frames, &frame, sizeof(frame), NULL);
return ff_filter_frame(outlink, frame);
}
while (!select->select) {
@ -294,20 +291,20 @@ static int poll_frame(AVFilterLink *outlink)
select->cache_frames = 0;
}
return av_fifo_size(select->pending_frames)/sizeof(AVFilterBufferRef *);
return av_fifo_size(select->pending_frames)/sizeof(AVFrame*);
}
static av_cold void uninit(AVFilterContext *ctx)
{
SelectContext *select = ctx->priv;
AVFilterBufferRef *picref;
AVFrame *frame;
av_expr_free(select->expr);
select->expr = NULL;
while (select->pending_frames &&
av_fifo_generic_read(select->pending_frames, &picref, sizeof(picref), NULL) == sizeof(picref))
avfilter_unref_buffer(picref);
av_fifo_generic_read(select->pending_frames, &frame, sizeof(frame), NULL) == sizeof(frame))
av_frame_free(&frame);
av_fifo_free(select->pending_frames);
select->pending_frames = NULL;
}

@ -40,7 +40,6 @@ static const char *const var_names[] = {
"N", ///< frame number (starting at zero)
"PHI", ///< golden ratio
"PI", ///< greek pi
"POS", ///< original position in the file of the frame
"PREV_INPTS", ///< previous input PTS
"PREV_OUTPTS", ///< previous output PTS
"PTS", ///< original pts in the file of the frame
@ -57,7 +56,6 @@ enum var_name {
VAR_N,
VAR_PHI,
VAR_PI,
VAR_POS,
VAR_PREV_INPTS,
VAR_PREV_OUTPTS,
VAR_PTS,
@ -108,7 +106,7 @@ static int config_input(AVFilterLink *inlink)
#define D2TS(d) (isnan(d) ? AV_NOPTS_VALUE : (int64_t)(d))
#define TS2D(ts) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts))
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame)
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
SetPTSContext *setpts = inlink->dst->priv;
int64_t in_pts = frame->pts;
@ -117,9 +115,8 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame)
if (isnan(setpts->var_values[VAR_STARTPTS]))
setpts->var_values[VAR_STARTPTS] = TS2D(frame->pts);
setpts->var_values[VAR_INTERLACED] = frame->video->interlaced;
setpts->var_values[VAR_INTERLACED] = frame->interlaced_frame;
setpts->var_values[VAR_PTS ] = TS2D(frame->pts);
setpts->var_values[VAR_POS ] = frame->pos == -1 ? NAN : frame->pos;
setpts->var_values[VAR_RTCTIME ] = av_gettime();
d = av_expr_eval(setpts->expr, setpts->var_values, NULL);
@ -127,10 +124,10 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame)
#ifdef DEBUG
av_log(inlink->dst, AV_LOG_DEBUG,
"n:%"PRId64" interlaced:%d pos:%"PRId64" pts:%"PRId64" t:%f -> pts:%"PRId64" t:%f\n",
"n:%"PRId64" interlaced:%d pts:%"PRId64" t:%f -> pts:%"PRId64" t:%f\n",
(int64_t)setpts->var_values[VAR_N],
(int)setpts->var_values[VAR_INTERLACED],
frame->pos, in_pts, in_pts * av_q2d(inlink->time_base),
in_pts, in_pts * av_q2d(inlink->time_base),
frame->pts, frame->pts * av_q2d(inlink->time_base));
#endif

@ -108,7 +108,7 @@ static int config_output_props(AVFilterLink *outlink)
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame)
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];

@ -41,7 +41,7 @@ static av_cold int init(AVFilterContext *ctx, const char *args)
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame)
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
ShowInfoContext *showinfo = ctx->priv;
@ -50,7 +50,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame)
int i, plane, vsub = desc->log2_chroma_h;
for (plane = 0; frame->data[plane] && plane < 4; plane++) {
size_t linesize = av_image_get_linesize(frame->format, frame->video->w, plane);
size_t linesize = av_image_get_linesize(frame->format, frame->width, plane);
uint8_t *data = frame->data[plane];
int h = plane == 1 || plane == 2 ? inlink->h >> vsub : inlink->h;
@ -62,18 +62,18 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame)
}
av_log(ctx, AV_LOG_INFO,
"n:%d pts:%"PRId64" pts_time:%f pos:%"PRId64" "
"n:%d pts:%"PRId64" pts_time:%f "
"fmt:%s sar:%d/%d s:%dx%d i:%c iskey:%d type:%c "
"checksum:%u plane_checksum:[%u %u %u %u]\n",
showinfo->frame,
frame->pts, frame->pts * av_q2d(inlink->time_base), frame->pos,
frame->pts, frame->pts * av_q2d(inlink->time_base),
desc->name,
frame->video->pixel_aspect.num, frame->video->pixel_aspect.den,
frame->video->w, frame->video->h,
!frame->video->interlaced ? 'P' : /* Progressive */
frame->video->top_field_first ? 'T' : 'B', /* Top / Bottom */
frame->video->key_frame,
av_get_picture_type_char(frame->video->pict_type),
frame->sample_aspect_ratio.num, frame->sample_aspect_ratio.den,
frame->width, frame->height,
!frame->interlaced_frame ? 'P' : /* Progressive */
frame->top_field_first ? 'T' : 'B', /* Top / Bottom */
frame->key_frame,
av_get_picture_type_char(frame->pict_type),
checksum, plane_checksum[0], plane_checksum[1], plane_checksum[2], plane_checksum[3]);
showinfo->frame++;
@ -86,7 +86,6 @@ static const AVFilterPad avfilter_vf_showinfo_inputs[] = {
.type = AVMEDIA_TYPE_VIDEO,
.get_video_buffer = ff_null_get_video_buffer,
.filter_frame = filter_frame,
.min_perms = AV_PERM_READ,
},
{ NULL }
};

@ -121,35 +121,35 @@ static int config_props_output(AVFilterLink *outlink)
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in)
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterLink *outlink = inlink->dst->outputs[0];
TransContext *trans = inlink->dst->priv;
AVFilterBufferRef *out;
AVFrame *out;
int plane;
out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h);
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
avfilter_unref_bufferp(&in);
av_frame_free(&in);
return AVERROR(ENOMEM);
}
out->pts = in->pts;
if (in->video->pixel_aspect.num == 0) {
out->video->pixel_aspect = in->video->pixel_aspect;
if (in->sample_aspect_ratio.num == 0) {
out->sample_aspect_ratio = in->sample_aspect_ratio;
} else {
out->video->pixel_aspect.num = in->video->pixel_aspect.den;
out->video->pixel_aspect.den = in->video->pixel_aspect.num;
out->sample_aspect_ratio.num = in->sample_aspect_ratio.den;
out->sample_aspect_ratio.den = in->sample_aspect_ratio.num;
}
for (plane = 0; out->data[plane]; plane++) {
int hsub = plane == 1 || plane == 2 ? trans->hsub : 0;
int vsub = plane == 1 || plane == 2 ? trans->vsub : 0;
int pixstep = trans->pixsteps[plane];
int inh = in->video->h>>vsub;
int outw = out->video->w>>hsub;
int outh = out->video->h>>vsub;
int inh = in->height >> vsub;
int outw = out->width >> hsub;
int outh = out->height >> vsub;
uint8_t *dst, *src;
int dstlinesize, srclinesize;
int x, y;
@ -194,7 +194,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in)
}
}
avfilter_unref_bufferp(&in);
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
@ -203,7 +203,6 @@ static const AVFilterPad avfilter_vf_transpose_inputs[] = {
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
.min_perms = AV_PERM_READ,
},
{ NULL }
};

@ -214,26 +214,26 @@ static av_cold void uninit(AVFilterContext *ctx)
free_filter_param(&unsharp->chroma);
}
static int filter_frame(AVFilterLink *link, AVFilterBufferRef *in)
static int filter_frame(AVFilterLink *link, AVFrame *in)
{
UnsharpContext *unsharp = link->dst->priv;
AVFilterLink *outlink = link->dst->outputs[0];
AVFilterBufferRef *out;
AVFrame *out;
int cw = SHIFTUP(link->w, unsharp->hsub);
int ch = SHIFTUP(link->h, unsharp->vsub);
out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h);
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
avfilter_unref_bufferp(&in);
av_frame_free(&in);
return AVERROR(ENOMEM);
}
avfilter_copy_buffer_ref_props(out, in);
av_frame_copy_props(out, in);
apply_unsharp(out->data[0], out->linesize[0], in->data[0], in->linesize[0], link->w, link->h, &unsharp->luma);
apply_unsharp(out->data[1], out->linesize[1], in->data[1], in->linesize[1], cw, ch, &unsharp->chroma);
apply_unsharp(out->data[2], out->linesize[2], in->data[2], in->linesize[2], cw, ch, &unsharp->chroma);
avfilter_unref_bufferp(&in);
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
@ -243,7 +243,6 @@ static const AVFilterPad avfilter_vf_unsharp_inputs[] = {
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
.config_props = config_props,
.min_perms = AV_PERM_READ,
},
{ NULL }
};

@ -43,33 +43,29 @@ static int config_input(AVFilterLink *link)
return 0;
}
static AVFilterBufferRef *get_video_buffer(AVFilterLink *link, int perms,
int w, int h)
static AVFrame *get_video_buffer(AVFilterLink *link, int w, int h)
{
FlipContext *flip = link->dst->priv;
AVFilterBufferRef *picref;
AVFrame *frame;
int i;
if (!(perms & AV_PERM_NEG_LINESIZES))
return ff_default_get_video_buffer(link, perms, w, h);
picref = ff_get_video_buffer(link->dst->outputs[0], perms, w, h);
if (!picref)
frame = ff_get_video_buffer(link->dst->outputs[0], w, h);
if (!frame)
return NULL;
for (i = 0; i < 4; i ++) {
int vsub = i == 1 || i == 2 ? flip->vsub : 0;
if (picref->data[i]) {
picref->data[i] += ((h >> vsub)-1) * picref->linesize[i];
picref->linesize[i] = -picref->linesize[i];
if (frame->data[i]) {
frame->data[i] += ((h >> vsub) - 1) * frame->linesize[i];
frame->linesize[i] = -frame->linesize[i];
}
}
return picref;
return frame;
}
static int filter_frame(AVFilterLink *link, AVFilterBufferRef *frame)
static int filter_frame(AVFilterLink *link, AVFrame *frame)
{
FlipContext *flip = link->dst->priv;
int i;

@ -31,8 +31,6 @@
#undef NDEBUG
#include <assert.h>
#define PERM_RWP AV_PERM_WRITE | AV_PERM_PRESERVE | AV_PERM_REUSE
#define CHECK(j)\
{ int score = FFABS(cur[mrefs + off_left + (j)] - cur[prefs + off_left - (j)])\
+ FFABS(cur[mrefs +(j)] - cur[prefs -(j)])\
@ -167,15 +165,15 @@ static void filter_edges_16bit(void *dst1, void *prev1, void *cur1, void *next1,
FILTER(w - 3, w)
}
static void filter(AVFilterContext *ctx, AVFilterBufferRef *dstpic,
static void filter(AVFilterContext *ctx, AVFrame *dstpic,
int parity, int tff)
{
YADIFContext *yadif = ctx->priv;
int y, i;
for (i = 0; i < yadif->csp->nb_components; i++) {
int w = dstpic->video->w;
int h = dstpic->video->h;
int w = dstpic->width;
int h = dstpic->height;
int refs = yadif->cur->linesize[i];
int df = (yadif->csp->comp[i].depth_minus1 + 8) / 8;
int l_edge, l_edge_pix;
@ -225,23 +223,22 @@ static void filter(AVFilterContext *ctx, AVFilterBufferRef *dstpic,
emms_c();
}
static AVFilterBufferRef *get_video_buffer(AVFilterLink *link, int perms,
int w, int h)
static AVFrame *get_video_buffer(AVFilterLink *link, int w, int h)
{
AVFilterBufferRef *picref;
AVFrame *frame;
int width = FFALIGN(w, 32);
int height = FFALIGN(h + 2, 32);
int i;
picref = ff_default_get_video_buffer(link, perms, width, height);
frame = ff_default_get_video_buffer(link, width, height);
picref->video->w = w;
picref->video->h = h;
frame->width = w;
frame->height = h;
for (i = 0; i < 3; i++)
picref->data[i] += picref->linesize[i];
frame->data[i] += frame->linesize[i];
return picref;
return frame;
}
static int return_frame(AVFilterContext *ctx, int is_second)
@ -251,19 +248,19 @@ static int return_frame(AVFilterContext *ctx, int is_second)
int tff, ret;
if (yadif->parity == -1) {
tff = yadif->cur->video->interlaced ?
yadif->cur->video->top_field_first : 1;
tff = yadif->cur->interlaced_frame ?
yadif->cur->top_field_first : 1;
} else {
tff = yadif->parity ^ 1;
}
if (is_second) {
yadif->out = ff_get_video_buffer(link, PERM_RWP, link->w, link->h);
yadif->out = ff_get_video_buffer(link, link->w, link->h);
if (!yadif->out)
return AVERROR(ENOMEM);
avfilter_copy_buffer_ref_props(yadif->out, yadif->cur);
yadif->out->video->interlaced = 0;
av_frame_copy_props(yadif->out, yadif->cur);
yadif->out->interlaced_frame = 0;
}
filter(ctx, yadif->out, tff ^ !is_second, tff);
@ -284,7 +281,7 @@ static int return_frame(AVFilterContext *ctx, int is_second)
return ret;
}
static int filter_frame(AVFilterLink *link, AVFilterBufferRef *picref)
static int filter_frame(AVFilterLink *link, AVFrame *frame)
{
AVFilterContext *ctx = link->dst;
YADIFContext *yadif = ctx->priv;
@ -293,36 +290,35 @@ static int filter_frame(AVFilterLink *link, AVFilterBufferRef *picref)
return_frame(ctx, 1);
if (yadif->prev)
avfilter_unref_buffer(yadif->prev);
av_frame_free(&yadif->prev);
yadif->prev = yadif->cur;
yadif->cur = yadif->next;
yadif->next = picref;
yadif->next = frame;
if (!yadif->cur)
return 0;
if (yadif->auto_enable && !yadif->cur->video->interlaced) {
yadif->out = avfilter_ref_buffer(yadif->cur, AV_PERM_READ);
if (yadif->auto_enable && !yadif->cur->interlaced_frame) {
yadif->out = av_frame_clone(yadif->cur);
if (!yadif->out)
return AVERROR(ENOMEM);
avfilter_unref_bufferp(&yadif->prev);
av_frame_free(&yadif->prev);
if (yadif->out->pts != AV_NOPTS_VALUE)
yadif->out->pts *= 2;
return ff_filter_frame(ctx->outputs[0], yadif->out);
}
if (!yadif->prev &&
!(yadif->prev = avfilter_ref_buffer(yadif->cur, AV_PERM_READ)))
!(yadif->prev = av_frame_clone(yadif->cur)))
return AVERROR(ENOMEM);
yadif->out = ff_get_video_buffer(ctx->outputs[0], PERM_RWP,
link->w, link->h);
yadif->out = ff_get_video_buffer(ctx->outputs[0], link->w, link->h);
if (!yadif->out)
return AVERROR(ENOMEM);
avfilter_copy_buffer_ref_props(yadif->out, yadif->cur);
yadif->out->video->interlaced = 0;
av_frame_copy_props(yadif->out, yadif->cur);
yadif->out->interlaced_frame = 0;
if (yadif->out->pts != AV_NOPTS_VALUE)
yadif->out->pts *= 2;
@ -349,8 +345,7 @@ static int request_frame(AVFilterLink *link)
ret = ff_request_frame(link->src->inputs[0]);
if (ret == AVERROR_EOF && yadif->next) {
AVFilterBufferRef *next =
avfilter_ref_buffer(yadif->next, AV_PERM_READ);
AVFrame *next = av_frame_clone(yadif->next);
if (!next)
return AVERROR(ENOMEM);
@ -389,7 +384,7 @@ static int poll_frame(AVFilterLink *link)
}
assert(yadif->next || !val);
if (yadif->auto_enable && yadif->next && !yadif->next->video->interlaced)
if (yadif->auto_enable && yadif->next && !yadif->next->interlaced_frame)
return val;
return val * ((yadif->mode&1)+1);
@ -399,9 +394,9 @@ static av_cold void uninit(AVFilterContext *ctx)
{
YADIFContext *yadif = ctx->priv;
if (yadif->prev) avfilter_unref_bufferp(&yadif->prev);
if (yadif->cur ) avfilter_unref_bufferp(&yadif->cur );
if (yadif->next) avfilter_unref_bufferp(&yadif->next);
if (yadif->prev) av_frame_free(&yadif->prev);
if (yadif->cur ) av_frame_free(&yadif->cur );
if (yadif->next) av_frame_free(&yadif->next);
}
static int query_formats(AVFilterContext *ctx)

@ -19,6 +19,7 @@
#include <string.h>
#include <stdio.h>
#include "libavutil/buffer.h"
#include "libavutil/imgutils.h"
#include "libavutil/mem.h"
@ -26,77 +27,34 @@
#include "internal.h"
#include "video.h"
#ifdef DEBUG
static char *ff_get_ref_perms_string(char *buf, size_t buf_size, int perms)
AVFrame *ff_null_get_video_buffer(AVFilterLink *link, int w, int h)
{
snprintf(buf, buf_size, "%s%s%s%s%s%s",
perms & AV_PERM_READ ? "r" : "",
perms & AV_PERM_WRITE ? "w" : "",
perms & AV_PERM_PRESERVE ? "p" : "",
perms & AV_PERM_REUSE ? "u" : "",
perms & AV_PERM_REUSE2 ? "U" : "",
perms & AV_PERM_NEG_LINESIZES ? "n" : "");
return buf;
}
#endif
static void ff_dlog_ref(void *ctx, AVFilterBufferRef *ref, int end)
{
av_unused char buf[16];
av_dlog(ctx,
"ref[%p buf:%p refcount:%d perms:%s data:%p linesize[%d, %d, %d, %d] pts:%"PRId64" pos:%"PRId64,
ref, ref->buf, ref->buf->refcount, ff_get_ref_perms_string(buf, sizeof(buf), ref->perms), ref->data[0],
ref->linesize[0], ref->linesize[1], ref->linesize[2], ref->linesize[3],
ref->pts, ref->pos);
if (ref->video) {
av_dlog(ctx, " a:%d/%d s:%dx%d i:%c iskey:%d type:%c",
ref->video->pixel_aspect.num, ref->video->pixel_aspect.den,
ref->video->w, ref->video->h,
!ref->video->interlaced ? 'P' : /* Progressive */
ref->video->top_field_first ? 'T' : 'B', /* Top / Bottom */
ref->video->key_frame,
av_get_picture_type_char(ref->video->pict_type));
}
if (ref->audio) {
av_dlog(ctx, " cl:%"PRId64"d n:%d r:%d p:%d",
ref->audio->channel_layout,
ref->audio->nb_samples,
ref->audio->sample_rate,
ref->audio->planar);
}
av_dlog(ctx, "]%s", end ? "\n" : "");
}
AVFilterBufferRef *ff_null_get_video_buffer(AVFilterLink *link, int perms, int w, int h)
{
return ff_get_video_buffer(link->dst->outputs[0], perms, w, h);
return ff_get_video_buffer(link->dst->outputs[0], w, h);
}
/* TODO: set the buffer's priv member to a context structure for the whole
* filter chain. This will allow for a buffer pool instead of the constant
* alloc & free cycle currently implemented. */
AVFilterBufferRef *ff_default_get_video_buffer(AVFilterLink *link, int perms, int w, int h)
AVFrame *ff_default_get_video_buffer(AVFilterLink *link, int w, int h)
{
int linesize[4];
uint8_t *data[4];
AVFilterBufferRef *picref = NULL;
AVFrame *frame = av_frame_alloc();
int ret;
// +2 is needed for swscaler, +16 to be SIMD-friendly
if (av_image_alloc(data, linesize, w, h, link->format, 16) < 0)
if (!frame)
return NULL;
picref = avfilter_get_video_buffer_ref_from_arrays(data, linesize,
perms, w, h, link->format);
if (!picref) {
av_free(data[0]);
return NULL;
}
frame->width = w;
frame->height = h;
frame->format = link->format;
return picref;
ret = av_frame_get_buffer(frame, 32);
if (ret < 0)
av_frame_free(&frame);
return frame;
}
#if FF_API_AVFILTERBUFFER
AVFilterBufferRef *
avfilter_get_video_buffer_ref_from_arrays(uint8_t *data[4], int linesize[4], int perms,
int w, int h, enum AVPixelFormat format)
@ -141,25 +99,20 @@ fail:
av_free(pic);
return NULL;
}
#endif
AVFilterBufferRef *ff_get_video_buffer(AVFilterLink *link, int perms, int w, int h)
AVFrame *ff_get_video_buffer(AVFilterLink *link, int w, int h)
{
AVFilterBufferRef *ret = NULL;
AVFrame *ret = NULL;
av_unused char buf[16];
FF_DPRINTF_START(NULL, get_video_buffer); ff_dlog_link(NULL, link, 0);
av_dlog(NULL, " perms:%s w:%d h:%d\n", ff_get_ref_perms_string(buf, sizeof(buf), perms), w, h);
if (link->dstpad->get_video_buffer)
ret = link->dstpad->get_video_buffer(link, perms, w, h);
ret = link->dstpad->get_video_buffer(link, w, h);
if (!ret)
ret = ff_default_get_video_buffer(link, perms, w, h);
if (ret)
ret->type = AVMEDIA_TYPE_VIDEO;
FF_DPRINTF_START(NULL, get_video_buffer); ff_dlog_link(NULL, link, 0); av_dlog(NULL, " returning "); ff_dlog_ref(NULL, ret, 1);
ret = ff_default_get_video_buffer(link, w, h);
return ret;
}

@ -21,22 +21,19 @@
#include "avfilter.h"
AVFilterBufferRef *ff_default_get_video_buffer(AVFilterLink *link,
int perms, int w, int h);
AVFilterBufferRef *ff_null_get_video_buffer(AVFilterLink *link, int perms, int w, int h);
AVFrame *ff_default_get_video_buffer(AVFilterLink *link, int w, int h);
AVFrame *ff_null_get_video_buffer(AVFilterLink *link, int w, int h);
/**
* Request a picture buffer with a specific set of permissions.
*
* @param link the output link to the filter from which the buffer will
* be requested
* @param perms the required access permissions
* @param w the minimum width of the buffer to allocate
* @param h the minimum height of the buffer to allocate
* @return A reference to the buffer. This must be unreferenced with
* avfilter_unref_buffer when you are finished with it.
*/
AVFilterBufferRef *ff_get_video_buffer(AVFilterLink *link, int perms,
int w, int h);
AVFrame *ff_get_video_buffer(AVFilterLink *link, int w, int h);
#endif /* AVFILTER_VIDEO_H */

@ -20,9 +20,9 @@
#include "internal.h"
#include "libavutil/internal.h"
static int filter_frame(AVFilterLink *link, AVFilterBufferRef *frame)
static int filter_frame(AVFilterLink *link, AVFrame *frame)
{
avfilter_unref_bufferp(&frame);
av_frame_free(&frame);
return 0;
}

@ -146,19 +146,18 @@ static int color_config_props(AVFilterLink *inlink)
static int color_request_frame(AVFilterLink *link)
{
ColorContext *color = link->src->priv;
AVFilterBufferRef *picref = ff_get_video_buffer(link, AV_PERM_WRITE, color->w, color->h);
AVFrame *frame = ff_get_video_buffer(link, color->w, color->h);
if (!picref)
if (!frame)
return AVERROR(ENOMEM);
picref->video->pixel_aspect = (AVRational) {1, 1};
picref->pts = color->pts++;
picref->pos = -1;
frame->sample_aspect_ratio = (AVRational) {1, 1};
frame->pts = color->pts++;
ff_draw_rectangle(picref->data, picref->linesize,
ff_draw_rectangle(frame->data, frame->linesize,
color->line, color->line_step, color->hsub, color->vsub,
0, 0, color->w, color->h);
return ff_filter_frame(link, picref);
return ff_filter_frame(link, frame);
}
static const AVFilterPad avfilter_vsrc_color_outputs[] = {

@ -54,7 +54,6 @@ typedef struct {
AVFrame *frame; ///< video frame to store the decoded images in
int w, h;
AVFilterBufferRef *picref;
} MovieContext;
#define OFFSET(x) offsetof(MovieContext, x)
@ -142,16 +141,13 @@ static int movie_init(AVFilterContext *ctx)
return AVERROR(EINVAL);
}
movie->codec_ctx->refcounted_frames = 1;
if ((ret = avcodec_open2(movie->codec_ctx, codec, NULL)) < 0) {
av_log(ctx, AV_LOG_ERROR, "Failed to open codec\n");
return ret;
}
if (!(movie->frame = avcodec_alloc_frame()) ) {
av_log(ctx, AV_LOG_ERROR, "Failed to alloc frame\n");
return AVERROR(ENOMEM);
}
movie->w = movie->codec_ctx->width;
movie->h = movie->codec_ctx->height;
@ -196,8 +192,7 @@ static av_cold void uninit(AVFilterContext *ctx)
avcodec_close(movie->codec_ctx);
if (movie->format_ctx)
avformat_close_input(&movie->format_ctx);
avfilter_unref_buffer(movie->picref);
avcodec_free_frame(&movie->frame);
av_frame_free(&movie->frame);
}
static int query_formats(AVFilterContext *ctx)
@ -225,41 +220,29 @@ static int movie_get_frame(AVFilterLink *outlink)
MovieContext *movie = outlink->src->priv;
AVPacket pkt;
int ret, frame_decoded;
AVStream *st = movie->format_ctx->streams[movie->stream_index];
AVStream av_unused *st = movie->format_ctx->streams[movie->stream_index];
if (movie->is_done == 1)
return 0;
movie->frame = av_frame_alloc();
if (!movie->frame)
return AVERROR(ENOMEM);
while ((ret = av_read_frame(movie->format_ctx, &pkt)) >= 0) {
// Is this a packet from the video stream?
if (pkt.stream_index == movie->stream_index) {
movie->codec_ctx->reordered_opaque = pkt.pos;
avcodec_decode_video2(movie->codec_ctx, movie->frame, &frame_decoded, &pkt);
if (frame_decoded) {
/* FIXME: avoid the memcpy */
movie->picref = ff_get_video_buffer(outlink, AV_PERM_WRITE | AV_PERM_PRESERVE |
AV_PERM_REUSE2, outlink->w, outlink->h);
av_image_copy(movie->picref->data, movie->picref->linesize,
movie->frame->data, movie->frame->linesize,
movie->picref->format, outlink->w, outlink->h);
avfilter_copy_frame_props(movie->picref, movie->frame);
/* FIXME: use a PTS correction mechanism as that in
* ffplay.c when some API will be available for that */
/* use pkt_dts if pkt_pts is not available */
movie->picref->pts = movie->frame->pkt_pts == AV_NOPTS_VALUE ?
movie->frame->pkt_dts : movie->frame->pkt_pts;
movie->picref->pos = movie->frame->reordered_opaque;
if (!movie->frame->sample_aspect_ratio.num)
movie->picref->video->pixel_aspect = st->sample_aspect_ratio;
if (movie->frame->pkt_pts != AV_NOPTS_VALUE)
movie->frame->pts = movie->frame->pkt_pts;
av_dlog(outlink->src,
"movie_get_frame(): file:'%s' pts:%"PRId64" time:%f pos:%"PRId64" aspect:%d/%d\n",
movie->file_name, movie->picref->pts,
(double)movie->picref->pts * av_q2d(st->time_base),
movie->picref->pos,
movie->picref->video->pixel_aspect.num, movie->picref->video->pixel_aspect.den);
"movie_get_frame(): file:'%s' pts:%"PRId64" time:%f aspect:%d/%d\n",
movie->file_name, movie->frame->pts,
(double)movie->frame->pts * av_q2d(st->time_base),
movie->frame->sample_aspect_ratio.num,
movie->frame->sample_aspect_ratio.den);
// We got it. Free the packet since we are returning
av_free_packet(&pkt);
@ -287,8 +270,8 @@ static int request_frame(AVFilterLink *outlink)
if ((ret = movie_get_frame(outlink)) < 0)
return ret;
ret = ff_filter_frame(outlink, movie->picref);
movie->picref = NULL;
ret = ff_filter_frame(outlink, movie->frame);
movie->frame = NULL;
return ret;
}

@ -53,7 +53,7 @@ typedef struct {
char *duration; ///< total duration of the generated video
AVRational sar; ///< sample aspect ratio
void (* fill_picture_fn)(AVFilterContext *ctx, AVFilterBufferRef *picref);
void (* fill_picture_fn)(AVFilterContext *ctx, AVFrame *frame);
/* only used by rgbtest */
int rgba_map[4];
@ -130,24 +130,23 @@ static int config_props(AVFilterLink *outlink)
static int request_frame(AVFilterLink *outlink)
{
TestSourceContext *test = outlink->src->priv;
AVFilterBufferRef *picref;
AVFrame *frame;
if (test->max_pts >= 0 && test->pts > test->max_pts)
return AVERROR_EOF;
picref = ff_get_video_buffer(outlink, AV_PERM_WRITE, test->w, test->h);
if (!picref)
frame = ff_get_video_buffer(outlink, test->w, test->h);
if (!frame)
return AVERROR(ENOMEM);
picref->pts = test->pts++;
picref->pos = -1;
picref->video->key_frame = 1;
picref->video->interlaced = 0;
picref->video->pict_type = AV_PICTURE_TYPE_I;
picref->video->pixel_aspect = test->sar;
frame->pts = test->pts++;
frame->key_frame = 1;
frame->interlaced_frame = 0;
frame->pict_type = AV_PICTURE_TYPE_I;
frame->sample_aspect_ratio = test->sar;
test->nb_frame++;
test->fill_picture_fn(outlink->src, picref);
test->fill_picture_fn(outlink->src, frame);
return ff_filter_frame(outlink, picref);
return ff_filter_frame(outlink, frame);
}
#if CONFIG_TESTSRC_FILTER
@ -235,7 +234,7 @@ static void draw_digit(int digit, uint8_t *dst, unsigned dst_linesize,
#define GRADIENT_SIZE (6 * 256)
static void test_fill_picture(AVFilterContext *ctx, AVFilterBufferRef *picref)
static void test_fill_picture(AVFilterContext *ctx, AVFrame *frame)
{
TestSourceContext *test = ctx->priv;
uint8_t *p, *p0;
@ -249,9 +248,9 @@ static void test_fill_picture(AVFilterContext *ctx, AVFilterBufferRef *picref)
int seg_size;
int second;
int i;
uint8_t *data = picref->data[0];
int width = picref->video->w;
int height = picref->video->h;
uint8_t *data = frame->data[0];
int width = frame->width;
int height = frame->height;
/* draw colored bars and circle */
radius = (width + height) / 4;
@ -281,11 +280,11 @@ static void test_fill_picture(AVFilterContext *ctx, AVFilterBufferRef *picref)
}
quad0 += dquad_y;
dquad_y += 2;
p0 += picref->linesize[0];
p0 += frame->linesize[0];
}
/* draw sliding color line */
p = data + picref->linesize[0] * height * 3/4;
p = data + frame->linesize[0] * height * 3/4;
grad = (256 * test->nb_frame * test->time_base.num / test->time_base.den) %
GRADIENT_SIZE;
rgrad = 0;
@ -314,8 +313,8 @@ static void test_fill_picture(AVFilterContext *ctx, AVFilterBufferRef *picref)
grad -= GRADIENT_SIZE;
}
for (y = height / 8; y > 0; y--) {
memcpy(p, p - picref->linesize[0], 3 * width);
p += picref->linesize[0];
memcpy(p, p - frame->linesize[0], 3 * width);
p += frame->linesize[0];
}
/* draw digits */
@ -324,10 +323,10 @@ static void test_fill_picture(AVFilterContext *ctx, AVFilterBufferRef *picref)
second = test->nb_frame * test->time_base.num / test->time_base.den;
x = width - (width - seg_size * 64) / 2;
y = (height - seg_size * 13) / 2;
p = data + (x*3 + y * picref->linesize[0]);
p = data + (x*3 + y * frame->linesize[0]);
for (i = 0; i < 8; i++) {
p -= 3 * 8 * seg_size;
draw_digit(second % 10, p, picref->linesize[0], seg_size);
draw_digit(second % 10, p, frame->linesize[0], seg_size);
second /= 10;
if (second == 0)
break;
@ -427,13 +426,13 @@ static void rgbtest_put_pixel(uint8_t *dst, int dst_linesize,
}
}
static void rgbtest_fill_picture(AVFilterContext *ctx, AVFilterBufferRef *picref)
static void rgbtest_fill_picture(AVFilterContext *ctx, AVFrame *frame)
{
TestSourceContext *test = ctx->priv;
int x, y, w = picref->video->w, h = picref->video->h;
int x, y, w = frame->width, h = frame->height;
for (y = 0; y < h; y++) {
for (x = 0; x < picref->video->w; x++) {
for (x = 0; x < w; x++) {
int c = 256*x/w;
int r = 0, g = 0, b = 0;
@ -441,7 +440,7 @@ static void rgbtest_fill_picture(AVFilterContext *ctx, AVFilterBufferRef *picref
else if (3*y < 2*h) g = c;
else b = c;
rgbtest_put_pixel(picref->data[0], picref->linesize[0], x, y, r, g, b,
rgbtest_put_pixel(frame->data[0], frame->linesize[0], x, y, r, g, b,
ctx->outputs[0]->format, test->rgba_map);
}
}

@ -46,10 +46,10 @@ typedef struct YADIFContext {
*/
int auto_enable;
AVFilterBufferRef *cur;
AVFilterBufferRef *next;
AVFilterBufferRef *prev;
AVFilterBufferRef *out;
AVFrame *cur;
AVFrame *next;
AVFrame *prev;
AVFrame *out;
/**
* Required alignment for filter_line

Loading…
Cancel
Save