avfilter: add showfreqs filter

pull/146/head
Paul B Mahol 10 years ago
parent e6b8797b82
commit 2fa019958b
  1. 1
      Changelog
  2. 2
      configure
  3. 115
      doc/filters.texi
  4. 1
      libavfilter/Makefile
  5. 1
      libavfilter/allfilters.c
  6. 558
      libavfilter/avf_showfreqs.c
  7. 2
      libavfilter/version.h
  8. 19
      libavutil/audio_fifo.c
  9. 16
      libavutil/audio_fifo.h
  10. 26
      libavutil/fifo.c
  11. 10
      libavutil/fifo.h

@ -31,6 +31,7 @@ version <next>:
- atadenoise video filter
- OS X VideoToolbox support
- aphasemeter filter
- showfreqs filter
version 2.7:

2
configure vendored

@ -2785,6 +2785,8 @@ select_filter_select="pixelutils"
smartblur_filter_deps="gpl swscale"
showcqt_filter_deps="avcodec"
showcqt_filter_select="fft"
showfreqs_filter_deps="avcodec"
showfreqs_filter_select="fft"
showspectrum_filter_deps="avcodec"
showspectrum_filter_select="rdft"
spp_filter_deps="gpl avcodec"

@ -12704,6 +12704,121 @@ gamma=2:gamma2=2
@end itemize
@section showfreqs
Convert input audio to video output representing the audio power spectrum.
Audio amplitude is on Y-axis while frequency is on X-axis.
The filter accepts the following options:
@table @option
@item size, s
Specify size of video. For the syntax of this option, check the
@ref{video size syntax,,"Video size" section in the ffmpeg-utils manual,ffmpeg-utils}.
Default is @code{1024x512}.
@item mode
Set display mode.
This set how each frequency bin will be represented.
It accepts the following values:
@table @samp
@item line
@item bar
@item dot
@end table
Default is @code{bar}.
@item ascale
Set amplitude scale.
It accepts the following values:
@table @samp
@item lin
Linear scale.
@item sqrt
Square root scale.
@item cbrt
Cubic root scale.
@item log
Logarithmic scale.
@end table
Default is @code{log}.
@item fscale
Set frequency scale.
It accepts the following values:
@table @samp
@item lin
Linear scale.
@item log
Logarithmic scale.
@item rlog
Reverse logarithmic scale.
@end table
Default is @code{lin}.
@item win_size
Set window size.
It accepts the following values:
@table @samp
@item w16
@item w32
@item w64
@item w128
@item w256
@item w512
@item w1024
@item w2048
@item w4096
@item w8192
@item w16384
@item w32768
@item w65536
@end table
Default is @code{w2048}
@item win_func
Set windowing function.
It accepts the following values:
@table @samp
@item rect
@item bartlett
@item hanning
@item hamming
@item blackman
@item welch
@item flattop
@item bharris
@item bnuttall
@item bhann
@item sine
@item nuttall
@end table
Default is @code{hanning}.
@item overlap
Set window overlap. In range @code{[0, 1]}. Default is @code{1},
which means optimal overlap for selected window function will be picked.
@item averaging
Set time averaging. Setting this to 0 will display current maximal peaks.
Default is @code{1}, which means time averaging is disabled.
@item color
Specify list of colors separated by space or by '|' which will be used to
draw channel frequencies. Unrecognized or missing colors will be replaced
by white color.
@end table
@section showspectrum
Convert input audio to a video output, representing the audio frequency

@ -258,6 +258,7 @@ OBJS-$(CONFIG_APHASEMETER_FILTER) += avf_aphasemeter.o
OBJS-$(CONFIG_AVECTORSCOPE_FILTER) += avf_avectorscope.o
OBJS-$(CONFIG_CONCAT_FILTER) += avf_concat.o
OBJS-$(CONFIG_SHOWCQT_FILTER) += avf_showcqt.o
OBJS-$(CONFIG_SHOWFREQS_FILTER) += avf_showfreqs.o
OBJS-$(CONFIG_SHOWSPECTRUM_FILTER) += avf_showspectrum.o
OBJS-$(CONFIG_SHOWVOLUME_FILTER) += avf_showvolume.o
OBJS-$(CONFIG_SHOWWAVES_FILTER) += avf_showwaves.o

@ -273,6 +273,7 @@ void avfilter_register_all(void)
REGISTER_FILTER(AVECTORSCOPE, avectorscope, avf);
REGISTER_FILTER(CONCAT, concat, avf);
REGISTER_FILTER(SHOWCQT, showcqt, avf);
REGISTER_FILTER(SHOWFREQS, showfreqs, avf);
REGISTER_FILTER(SHOWSPECTRUM, showspectrum, avf);
REGISTER_FILTER(SHOWVOLUME, showvolume, avf);
REGISTER_FILTER(SHOWWAVES, showwaves, avf);

@ -0,0 +1,558 @@
/*
* Copyright (c) 2015 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <math.h>
#include "libavcodec/avfft.h"
#include "libavutil/audio_fifo.h"
#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
#include "libavutil/channel_layout.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/opt.h"
#include "libavutil/parseutils.h"
#include "audio.h"
#include "video.h"
#include "avfilter.h"
#include "internal.h"
enum DisplayMode { LINE, BAR, DOT, NB_MODES };
enum FrequencyScale { FS_LINEAR, FS_LOG, FS_RLOG, NB_FSCALES };
enum AmplitudeScale { AS_LINEAR, AS_SQRT, AS_CBRT, AS_LOG, NB_ASCALES };
enum WindowFunc { WFUNC_RECT, WFUNC_HANNING, WFUNC_HAMMING, WFUNC_BLACKMAN,
WFUNC_BARTLETT, WFUNC_WELCH, WFUNC_FLATTOP,
WFUNC_BHARRIS, WFUNC_BNUTTALL, WFUNC_SINE, WFUNC_NUTTALL,
WFUNC_BHANN, NB_WFUNC };
typedef struct ShowFreqsContext {
const AVClass *class;
int w, h;
int mode;
int fft_bits;
int ascale, fscale;
int avg;
int win_func;
FFTContext *fft;
FFTComplex **fft_data;
float **avg_data;
float *window_func_lut;
float overlap;
int skip_samples;
int nb_channels;
int nb_freq;
int win_size;
float scale;
char *colors;
AVAudioFifo *fifo;
int64_t pts;
} ShowFreqsContext;
#define OFFSET(x) offsetof(ShowFreqsContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
static const AVOption showfreqs_options[] = {
{ "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "1024x512"}, 0, 0, FLAGS },
{ "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "1024x512"}, 0, 0, FLAGS },
{ "mode", "set display mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=BAR}, 0, NB_MODES-1, FLAGS, "mode" },
{ "line", "show lines", 0, AV_OPT_TYPE_CONST, {.i64=LINE}, 0, 0, FLAGS, "mode" },
{ "bar", "show bars", 0, AV_OPT_TYPE_CONST, {.i64=BAR}, 0, 0, FLAGS, "mode" },
{ "dot", "show dots", 0, AV_OPT_TYPE_CONST, {.i64=DOT}, 0, 0, FLAGS, "mode" },
{ "ascale", "set amplitude scale", OFFSET(ascale), AV_OPT_TYPE_INT, {.i64=AS_LOG}, 0, NB_ASCALES-1, FLAGS, "ascale" },
{ "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=AS_LINEAR}, 0, 0, FLAGS, "ascale" },
{ "sqrt", "square root", 0, AV_OPT_TYPE_CONST, {.i64=AS_SQRT}, 0, 0, FLAGS, "ascale" },
{ "cbrt", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64=AS_CBRT}, 0, 0, FLAGS, "ascale" },
{ "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=AS_LOG}, 0, 0, FLAGS, "ascale" },
{ "fscale", "set frequency scale", OFFSET(fscale), AV_OPT_TYPE_INT, {.i64=FS_LINEAR}, 0, NB_FSCALES-1, FLAGS, "fscale" },
{ "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=FS_LINEAR}, 0, 0, FLAGS, "fscale" },
{ "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=FS_LOG}, 0, 0, FLAGS, "fscale" },
{ "rlog", "reverse logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=FS_RLOG}, 0, 0, FLAGS, "fscale" },
{ "win_size", "set window size", OFFSET(fft_bits), AV_OPT_TYPE_INT, {.i64=11}, 4, 16, FLAGS, "fft" },
{ "w16", 0, 0, AV_OPT_TYPE_CONST, {.i64=4}, 0, 0, FLAGS, "fft" },
{ "w32", 0, 0, AV_OPT_TYPE_CONST, {.i64=5}, 0, 0, FLAGS, "fft" },
{ "w64", 0, 0, AV_OPT_TYPE_CONST, {.i64=6}, 0, 0, FLAGS, "fft" },
{ "w128", 0, 0, AV_OPT_TYPE_CONST, {.i64=7}, 0, 0, FLAGS, "fft" },
{ "w256", 0, 0, AV_OPT_TYPE_CONST, {.i64=8}, 0, 0, FLAGS, "fft" },
{ "w512", 0, 0, AV_OPT_TYPE_CONST, {.i64=9}, 0, 0, FLAGS, "fft" },
{ "w1024", 0, 0, AV_OPT_TYPE_CONST, {.i64=10}, 0, 0, FLAGS, "fft" },
{ "w2048", 0, 0, AV_OPT_TYPE_CONST, {.i64=11}, 0, 0, FLAGS, "fft" },
{ "w4096", 0, 0, AV_OPT_TYPE_CONST, {.i64=12}, 0, 0, FLAGS, "fft" },
{ "w8192", 0, 0, AV_OPT_TYPE_CONST, {.i64=13}, 0, 0, FLAGS, "fft" },
{ "w16384", 0, 0, AV_OPT_TYPE_CONST, {.i64=14}, 0, 0, FLAGS, "fft" },
{ "w32768", 0, 0, AV_OPT_TYPE_CONST, {.i64=15}, 0, 0, FLAGS, "fft" },
{ "w65536", 0, 0, AV_OPT_TYPE_CONST, {.i64=16}, 0, 0, FLAGS, "fft" },
{ "win_func", "set window function", OFFSET(win_func), AV_OPT_TYPE_INT, {.i64=WFUNC_HANNING}, 0, NB_WFUNC-1, FLAGS, "win_func" },
{ "rect", "Rectangular", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_RECT}, 0, 0, FLAGS, "win_func" },
{ "bartlett", "Bartlett", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BARTLETT}, 0, 0, FLAGS, "win_func" },
{ "hanning", "Hanning", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_HANNING}, 0, 0, FLAGS, "win_func" },
{ "hamming", "Hamming", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_HAMMING}, 0, 0, FLAGS, "win_func" },
{ "blackman", "Blackman", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BLACKMAN}, 0, 0, FLAGS, "win_func" },
{ "welch", "Welch", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_WELCH}, 0, 0, FLAGS, "win_func" },
{ "flattop", "Flat-top", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_FLATTOP}, 0, 0, FLAGS, "win_func" },
{ "bharris", "Blackman-Harris", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BHARRIS}, 0, 0, FLAGS, "win_func" },
{ "bnuttall", "Blackman-Nuttall", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BNUTTALL}, 0, 0, FLAGS, "win_func" },
{ "bhann", "Bartlett-Hann", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BHANN}, 0, 0, FLAGS, "win_func" },
{ "sine", "Sine", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_SINE}, 0, 0, FLAGS, "win_func" },
{ "nuttall", "Nuttall", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_NUTTALL}, 0, 0, FLAGS, "win_func" },
{ "overlap", "set window overlap", OFFSET(overlap), AV_OPT_TYPE_FLOAT, {.dbl=1.}, 0., 1., FLAGS },
{ "averaging", "set time averaging", OFFSET(avg), AV_OPT_TYPE_INT, {.i64=1}, 0, INT32_MAX, FLAGS },
{ "colors", "set channels colors", OFFSET(colors), AV_OPT_TYPE_STRING, {.str = "red|green|blue|yellow|orange|lime|pink|magenta|brown" }, 0, 0, FLAGS },
{ NULL }
};
AVFILTER_DEFINE_CLASS(showfreqs);
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats = NULL;
AVFilterChannelLayouts *layouts = NULL;
AVFilterLink *inlink = ctx->inputs[0];
AVFilterLink *outlink = ctx->outputs[0];
static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_NONE };
static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_RGBA, AV_PIX_FMT_NONE };
/* set input audio formats */
formats = ff_make_format_list(sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
ff_formats_ref(formats, &inlink->out_formats);
layouts = ff_all_channel_layouts();
if (!layouts)
return AVERROR(ENOMEM);
ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts);
formats = ff_all_samplerates();
if (!formats)
return AVERROR(ENOMEM);
ff_formats_ref(formats, &inlink->out_samplerates);
/* set output video format */
formats = ff_make_format_list(pix_fmts);
if (!formats)
return AVERROR(ENOMEM);
ff_formats_ref(formats, &outlink->in_formats);
return 0;
}
static void generate_window_func(float *lut, int N, int win_func, float *overlap)
{
int n;
switch (win_func) {
case WFUNC_RECT:
for (n = 0; n < N; n++)
lut[n] = 1.;
*overlap = 0.;
break;
case WFUNC_BARTLETT:
for (n = 0; n < N; n++)
lut[n] = 1.-FFABS((n-(N-1)/2.)/((N-1)/2.));
*overlap = 0.5;
break;
case WFUNC_HANNING:
for (n = 0; n < N; n++)
lut[n] = .5*(1-cos(2*M_PI*n/(N-1)));
*overlap = 0.5;
break;
case WFUNC_HAMMING:
for (n = 0; n < N; n++)
lut[n] = .54-.46*cos(2*M_PI*n/(N-1));
*overlap = 0.5;
break;
case WFUNC_BLACKMAN:
for (n = 0; n < N; n++)
lut[n] = .42659-.49656*cos(2*M_PI*n/(N-1))+.076849*cos(4*M_PI*n/(N-1));
*overlap = 0.661;
break;
case WFUNC_WELCH:
for (n = 0; n < N; n++)
lut[n] = 1.-(n-(N-1)/2.)/((N-1)/2.)*(n-(N-1)/2.)/((N-1)/2.);
*overlap = 0.293;
break;
case WFUNC_FLATTOP:
for (n = 0; n < N; n++)
lut[n] = 1.-1.985844164102*cos( 2*M_PI*n/(N-1))+1.791176438506*cos( 4*M_PI*n/(N-1))-
1.282075284005*cos( 6*M_PI*n/(N-1))+0.667777530266*cos( 8*M_PI*n/(N-1))-
0.240160796576*cos(10*M_PI*n/(N-1))+0.056656381764*cos(12*M_PI*n/(N-1))-
0.008134974479*cos(14*M_PI*n/(N-1))+0.000624544650*cos(16*M_PI*n/(N-1))-
0.000019808998*cos(18*M_PI*n/(N-1))+0.000000132974*cos(20*M_PI*n/(N-1));
*overlap = 0.841;
break;
case WFUNC_BHARRIS:
for (n = 0; n < N; n++)
lut[n] = 0.35875-0.48829*cos(2*M_PI*n/(N-1))+0.14128*cos(4*M_PI*n/(N-1))-0.01168*cos(6*M_PI*n/(N-1));
*overlap = 0.661;
break;
case WFUNC_BNUTTALL:
for (n = 0; n < N; n++)
lut[n] = 0.3635819-0.4891775*cos(2*M_PI*n/(N-1))+0.1365995*cos(4*M_PI*n/(N-1))-0.0106411*cos(6*M_PI*n/(N-1));
*overlap = 0.661;
break;
case WFUNC_BHANN:
for (n = 0; n < N; n++)
lut[n] = 0.62-0.48*FFABS(n/(N-1)-.5)-0.38*cos(2*M_PI*n/(N-1));
*overlap = 0.5;
break;
case WFUNC_SINE:
for (n = 0; n < N; n++)
lut[n] = sin(M_PI*n/(N-1));
*overlap = 0.75;
break;
case WFUNC_NUTTALL:
for (n = 0; n < N; n++)
lut[n] = 0.355768-0.487396*cos(2*M_PI*n/(N-1))+0.144232*cos(4*M_PI*n/(N-1))-0.012604*cos(6*M_PI*n/(N-1));
*overlap = 0.663;
break;
default:
av_assert0(0);
}
}
static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
AVFilterLink *inlink = ctx->inputs[0];
ShowFreqsContext *s = ctx->priv;
float overlap;
int i;
s->nb_freq = 1 << (s->fft_bits - 1);
s->win_size = s->nb_freq << 1;
av_audio_fifo_free(s->fifo);
av_fft_end(s->fft);
s->fft = av_fft_init(s->fft_bits, 0);
if (!s->fft) {
av_log(ctx, AV_LOG_ERROR, "Unable to create FFT context. "
"The window size might be too high.\n");
return AVERROR(ENOMEM);
}
/* FFT buffers: x2 for each (display) channel buffer.
* Note: we use free and malloc instead of a realloc-like function to
* make sure the buffer is aligned in memory for the FFT functions. */
for (i = 0; i < s->nb_channels; i++) {
av_freep(&s->fft_data[i]);
av_freep(&s->avg_data[i]);
}
av_freep(&s->fft_data);
av_freep(&s->avg_data);
s->nb_channels = inlink->channels;
s->fft_data = av_calloc(s->nb_channels, sizeof(*s->fft_data));
if (!s->fft_data)
return AVERROR(ENOMEM);
s->avg_data = av_calloc(s->nb_channels, sizeof(*s->avg_data));
if (!s->fft_data)
return AVERROR(ENOMEM);
for (i = 0; i < s->nb_channels; i++) {
s->fft_data[i] = av_calloc(s->win_size, sizeof(**s->fft_data));
s->avg_data[i] = av_calloc(s->nb_freq, sizeof(**s->avg_data));
if (!s->fft_data[i] || !s->avg_data[i])
return AVERROR(ENOMEM);
}
/* pre-calc windowing function */
s->window_func_lut = av_realloc_f(s->window_func_lut, s->win_size,
sizeof(*s->window_func_lut));
if (!s->window_func_lut)
return AVERROR(ENOMEM);
generate_window_func(s->window_func_lut, s->win_size, s->win_func, &overlap);
if (s->overlap == 1.)
s->overlap = overlap;
s->skip_samples = (1. - s->overlap) * s->win_size;
if (s->skip_samples < 1) {
av_log(ctx, AV_LOG_ERROR, "overlap %f too big\n", s->overlap);
return AVERROR(EINVAL);
}
for (s->scale = 0, i = 0; i < s->win_size; i++) {
s->scale += s->window_func_lut[i] * s->window_func_lut[i];
}
outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
outlink->frame_rate = av_make_q(inlink->sample_rate, s->win_size * (1.-s->overlap));
outlink->sample_aspect_ratio = (AVRational){1,1};
outlink->w = s->w;
outlink->h = s->h;
s->fifo = av_audio_fifo_alloc(inlink->format, inlink->channels, s->win_size);
if (!s->fifo)
return AVERROR(ENOMEM);
return 0;
}
static inline void draw_dot(AVFrame *out, int x, int y, uint8_t fg[4])
{
uint32_t color = AV_RL32(out->data[0] + y * out->linesize[0] + x * 4);
if ((color & 0xffffff) != 0)
AV_WL32(out->data[0] + y * out->linesize[0] + x * 4, AV_RL32(fg) | color);
else
AV_WL32(out->data[0] + y * out->linesize[0] + x * 4, AV_RL32(fg));
}
static int get_sx(ShowFreqsContext *s, int f)
{
switch (s->fscale) {
case FS_LINEAR:
return (s->w/(float)s->nb_freq)*f;
case FS_LOG:
return s->w-pow(s->w, (s->nb_freq-f-1)/(s->nb_freq-1.));
case FS_RLOG:
return pow(s->w, f/(s->nb_freq-1.));
}
return 0;
}
static float get_bsize(ShowFreqsContext *s, int f)
{
switch (s->fscale) {
case FS_LINEAR:
return s->w/(float)s->nb_freq;
case FS_LOG:
return pow(s->w, (s->nb_freq-f-1)/(s->nb_freq-1.))-
pow(s->w, (s->nb_freq-f-2)/(s->nb_freq-1.));
case FS_RLOG:
return pow(s->w, (f+1)/(s->nb_freq-1.))-
pow(s->w, f /(s->nb_freq-1.));
}
return 1.;
}
static inline void plot_freq(ShowFreqsContext *s, int ch,
double a, int f, uint8_t fg[4], int *prev_y,
AVFrame *out, AVFilterLink *outlink)
{
const int w = s->w;
const float avg = s->avg_data[ch][f];
const float bsize = get_bsize(s, f);
const int sx = get_sx(s, f);
int x, y, i;
switch(s->ascale) {
case AS_SQRT:
a = 1.0 - sqrt(a);
break;
case AS_CBRT:
a = 1.0 - cbrt(a);
break;
case AS_LOG:
a = log(av_clipd(a, 1e-6, 1)) / log(1e-6);
break;
case AS_LINEAR:
a = 1.0 - a;
break;
}
y = a * outlink->h - 1;
if (y < 0)
return;
switch (s->avg) {
case 0:
y = s->avg_data[ch][f] = !outlink->frame_count ? y : FFMIN(avg, y);
break;
case 1:
break;
default:
s->avg_data[ch][f] = avg + y * (y - avg) / (FFMIN(outlink->frame_count + 1, s->avg) * y);
y = s->avg_data[ch][f];
break;
}
switch(s->mode) {
case LINE:
if (*prev_y == -1) {
*prev_y = y;
}
if (y <= *prev_y) {
for (x = sx + 1; x < sx + bsize && x < w; x++)
draw_dot(out, x, y, fg);
for (i = y; i <= *prev_y; i++)
draw_dot(out, sx, i, fg);
} else {
for (i = *prev_y; i <= y; i++)
draw_dot(out, sx, i, fg);
for (x = sx + 1; x < sx + bsize && x < w; x++)
draw_dot(out, x, i - 1, fg);
}
*prev_y = y;
break;
case BAR:
for (x = sx; x < sx + bsize && x < w; x++)
for (i = y; i < outlink->h; i++)
draw_dot(out, x, i, fg);
break;
case DOT:
for (x = sx; x < sx + bsize && x < w; x++)
draw_dot(out, x, y, fg);
break;
}
}
static int plot_freqs(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
ShowFreqsContext *s = ctx->priv;
const int win_size = s->win_size;
char *colors, *color, *saveptr = NULL;
AVFrame *out;
int ch, n;
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out)
return AVERROR(ENOMEM);
for (n = 0; n < outlink->h; n++)
memset(out->data[0] + out->linesize[0] * n, 0, outlink->w * 4);
/* fill FFT input with the number of samples available */
for (ch = 0; ch < s->nb_channels; ch++) {
const float *p = (float *)in->extended_data[ch];
for (n = 0; n < in->nb_samples; n++) {
s->fft_data[ch][n].re = p[n] * s->window_func_lut[n];
s->fft_data[ch][n].im = 0;
}
for (; n < win_size; n++) {
s->fft_data[ch][n].re = 0;
s->fft_data[ch][n].im = 0;
}
}
/* run FFT on each samples set */
for (ch = 0; ch < s->nb_channels; ch++) {
av_fft_permute(s->fft, s->fft_data[ch]);
av_fft_calc(s->fft, s->fft_data[ch]);
}
#define RE(x, ch) s->fft_data[ch][x].re
#define IM(x, ch) s->fft_data[ch][x].im
#define M(a, b) (sqrt((a) * (a) + (b) * (b)))
colors = av_strdup(s->colors);
if (!colors)
return AVERROR(ENOMEM);
for (ch = 0; ch < s->nb_channels; ch++) {
uint8_t fg[4] = { 0xff, 0xff, 0xff, 0xff };
int prev_y = -1, f;
double a;
color = av_strtok(ch == 0 ? colors : NULL, " |", &saveptr);
if (color)
av_parse_color(fg, color, -1, ctx);
a = av_clipd(M(RE(0, ch), 0) / s->scale, 0, 1);
plot_freq(s, ch, a, 0, fg, &prev_y, out, outlink);
for (f = 1; f < s->nb_freq; f++) {
a = av_clipd(M(RE(f, ch), IM(f, ch)) / s->scale, 0, 1);
plot_freq(s, ch, a, f, fg, &prev_y, out, outlink);
}
}
av_free(colors);
out->pts = in->pts;
return ff_filter_frame(outlink, out);
}
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
ShowFreqsContext *s = ctx->priv;
AVFrame *fin = NULL;
int ret;
av_audio_fifo_write(s->fifo, (void **)in->extended_data, in->nb_samples);
while (av_audio_fifo_size(s->fifo) >= s->win_size) {
fin = ff_get_audio_buffer(inlink, s->win_size);
if (!fin) {
ret = AVERROR(ENOMEM);
goto fail;
}
fin->pts = s->pts;
s->pts += s->skip_samples;
ret = av_audio_fifo_peek(s->fifo, (void **)fin->extended_data, s->win_size);
if (ret < 0)
goto fail;
ret = plot_freqs(inlink, fin);
av_audio_fifo_drain(s->fifo, s->skip_samples);
if (ret < 0)
goto fail;
}
fail:
av_frame_free(&fin);
av_frame_free(&in);
return ret;
}
static av_cold void uninit(AVFilterContext *ctx)
{
ShowFreqsContext *s = ctx->priv;
int i;
av_fft_end(s->fft);
for (i = 0; i < s->nb_channels; i++) {
av_freep(&s->fft_data[i]);
av_freep(&s->avg_data[i]);
}
av_freep(&s->fft_data);
av_freep(&s->avg_data);
av_freep(&s->window_func_lut);
av_audio_fifo_free(s->fifo);
}
static const AVFilterPad showfreqs_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
},
{ NULL }
};
static const AVFilterPad showfreqs_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_output,
},
{ NULL }
};
AVFilter ff_avf_showfreqs = {
.name = "showfreqs",
.description = NULL_IF_CONFIG_SMALL("Convert input audio to a frequencies video output."),
.uninit = uninit,
.query_formats = query_formats,
.priv_size = sizeof(ShowFreqsContext),
.inputs = showfreqs_inputs,
.outputs = showfreqs_outputs,
.priv_class = &showfreqs_class,
};

@ -30,7 +30,7 @@
#include "libavutil/version.h"
#define LIBAVFILTER_VERSION_MAJOR 5
#define LIBAVFILTER_VERSION_MINOR 34
#define LIBAVFILTER_VERSION_MINOR 35
#define LIBAVFILTER_VERSION_MICRO 100
#define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \

@ -136,6 +136,25 @@ int av_audio_fifo_write(AVAudioFifo *af, void **data, int nb_samples)
return nb_samples;
}
int av_audio_fifo_peek(AVAudioFifo *af, void **data, int nb_samples)
{
int i, ret, size;
if (nb_samples < 0)
return AVERROR(EINVAL);
nb_samples = FFMIN(nb_samples, af->nb_samples);
if (!nb_samples)
return 0;
size = nb_samples * af->sample_size;
for (i = 0; i < af->nb_buffers; i++) {
if ((ret = av_fifo_generic_peek(af->buf[i], data[i], size, NULL)) < 0)
return AVERROR_BUG;
}
return nb_samples;
}
int av_audio_fifo_read(AVAudioFifo *af, void **data, int nb_samples)
{
int i, ret, size;

@ -93,6 +93,22 @@ int av_audio_fifo_realloc(AVAudioFifo *af, int nb_samples);
*/
int av_audio_fifo_write(AVAudioFifo *af, void **data, int nb_samples);
/**
* Peek data from an AVAudioFifo.
*
* @see enum AVSampleFormat
* The documentation for AVSampleFormat describes the data layout.
*
* @param af AVAudioFifo to read from
* @param data audio data plane pointers
* @param nb_samples number of samples to peek
* @return number of samples actually peek, or negative AVERROR code
* on failure. The number of samples actually peek will not
* be greater than nb_samples, and will only be less than
* nb_samples if av_audio_fifo_size is less than nb_samples.
*/
int av_audio_fifo_peek(AVAudioFifo *af, void **data, int nb_samples);
/**
* Read data from an AVAudioFifo.
*

@ -148,6 +148,32 @@ int av_fifo_generic_write(AVFifoBuffer *f, void *src, int size,
return total - size;
}
int av_fifo_generic_peek(AVFifoBuffer *f, void *dest, int buf_size,
void (*func)(void *, void *, int))
{
// Read memory barrier needed for SMP here in theory
uint8_t *rptr = f->rptr;
uint32_t rndx = f->rndx;
do {
int len = FFMIN(f->end - f->rptr, buf_size);
if (func)
func(dest, f->rptr, len);
else {
memcpy(dest, f->rptr, len);
dest = (uint8_t *)dest + len;
}
// memory barrier needed for SMP here in theory
av_fifo_drain(f, len);
buf_size -= len;
} while (buf_size > 0);
f->rptr = rptr;
f->rndx = rndx;
return 0;
}
int av_fifo_generic_read(AVFifoBuffer *f, void *dest, int buf_size,
void (*func)(void *, void *, int))
{

@ -83,6 +83,16 @@ int av_fifo_size(const AVFifoBuffer *f);
*/
int av_fifo_space(const AVFifoBuffer *f);
/**
* Feed data from an AVFifoBuffer to a user-supplied callback.
* Similar as av_fifo_gereric_read but without discarding data.
* @param f AVFifoBuffer to read from
* @param buf_size number of bytes to read
* @param func generic read function
* @param dest data destination
*/
int av_fifo_generic_peek(AVFifoBuffer *f, void *dest, int buf_size, void (*func)(void*, void*, int));
/**
* Feed data from an AVFifoBuffer to a user-supplied callback.
* @param f AVFifoBuffer to read from

Loading…
Cancel
Save