|
|
|
@ -55,23 +55,6 @@ typedef struct ADecorrelateContext { |
|
|
|
|
AVFrame *in, AVFrame *out); |
|
|
|
|
} ADecorrelateContext; |
|
|
|
|
|
|
|
|
|
static int query_formats(AVFilterContext *ctx) |
|
|
|
|
{ |
|
|
|
|
static const enum AVSampleFormat sample_fmts[] = { |
|
|
|
|
AV_SAMPLE_FMT_DBLP, |
|
|
|
|
AV_SAMPLE_FMT_NONE |
|
|
|
|
}; |
|
|
|
|
int ret = ff_set_common_formats_from_list(ctx, sample_fmts); |
|
|
|
|
if (ret < 0) |
|
|
|
|
return ret; |
|
|
|
|
|
|
|
|
|
ret = ff_set_common_all_channel_counts(ctx); |
|
|
|
|
if (ret < 0) |
|
|
|
|
return ret; |
|
|
|
|
|
|
|
|
|
return ff_set_common_all_samplerates(ctx); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static int ap_init(APContext *ap, int fs, double delay) |
|
|
|
|
{ |
|
|
|
|
const int delay_samples = lrint(round(delay * fs)); |
|
|
|
@ -262,7 +245,7 @@ const AVFilter ff_af_adecorrelate = { |
|
|
|
|
.uninit = uninit, |
|
|
|
|
FILTER_INPUTS(inputs), |
|
|
|
|
FILTER_OUTPUTS(outputs), |
|
|
|
|
FILTER_QUERY_FUNC(query_formats), |
|
|
|
|
FILTER_SINGLE_SAMPLEFMT(AV_SAMPLE_FMT_DBLP), |
|
|
|
|
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | |
|
|
|
|
AVFILTER_FLAG_SLICE_THREADS, |
|
|
|
|
}; |
|
|
|
|