* [FFmpeg-devel] [PATCH] avfilter: add Affine Projection adaptive audio filter
@ 2023-11-26 14:41 Paul B Mahol
2023-11-28 12:33 ` Paul B Mahol
0 siblings, 1 reply; 3+ messages in thread
From: Paul B Mahol @ 2023-11-26 14:41 UTC (permalink / raw)
To: FFmpeg development discussions and patches
[-- Attachment #1: Type: text/plain, Size: 10 bytes --]
Attached.
[-- Attachment #2: 0001-avfilter-add-Affine-Projection-adaptive-audio-filter.patch --]
[-- Type: text/x-patch, Size: 17055 bytes --]
From 6c355f79e9c21a11e5e1266da7936a4ac2dc07ac Mon Sep 17 00:00:00 2001
From: Paul B Mahol <onemda@gmail.com>
Date: Sun, 30 Apr 2023 17:06:00 +0200
Subject: [PATCH] avfilter: add Affine Projection adaptive audio filter
Signed-off-by: Paul B Mahol <onemda@gmail.com>
---
doc/filters.texi | 43 ++++
libavfilter/Makefile | 1 +
libavfilter/af_aap.c | 451 +++++++++++++++++++++++++++++++++++++++
libavfilter/allfilters.c | 1 +
4 files changed, 496 insertions(+)
create mode 100644 libavfilter/af_aap.c
diff --git a/doc/filters.texi b/doc/filters.texi
index 5268b2003c..3c9d32aa76 100644
--- a/doc/filters.texi
+++ b/doc/filters.texi
@@ -418,6 +418,49 @@ build.
Below is a description of the currently available audio filters.
+@section aap
+Apply Affine Projection algorithm to the first audio stream using the second audio stream.
+
+This adaptive filter is used to estimate unknown audio based on multiple input audio samples.
+Affine projection algorithm can make trade-offs between computation complexity with convergence speed.
+
+A description of the accepted options follows.
+
+@table @option
+@item order
+Set the filter order.
+
+@item projection
+Set the projection order.
+
+@item mu
+Set the filter mu.
+
+@item delta
+Set the coefficient to initialize internal covariance matrix.
+
+@item out_mode
+Set the filter output samples. It accepts the following values:
+@table @option
+@item i
+Pass the 1st input.
+
+@item d
+Pass the 2nd input.
+
+@item o
+Pass difference between desired, 2nd input and error signal estimate.
+
+@item n
+Pass difference between input, 1st input and error signal estimate.
+
+@item e
+Pass error signal estimated samples.
+
+Default value is @var{o}.
+@end table
+@end table
+
@section acompressor
A compressor is mainly used to reduce the dynamic range of a signal.
diff --git a/libavfilter/Makefile b/libavfilter/Makefile
index 5e5068b564..0da62540f8 100644
--- a/libavfilter/Makefile
+++ b/libavfilter/Makefile
@@ -35,6 +35,7 @@ OBJS-$(CONFIG_DNN) += dnn_filter_common.o
include $(SRC_PATH)/libavfilter/dnn/Makefile
# audio filters
+OBJS-$(CONFIG_AAP_FILTER) += af_aap.o
OBJS-$(CONFIG_ABENCH_FILTER) += f_bench.o
OBJS-$(CONFIG_ACOMPRESSOR_FILTER) += af_sidechaincompress.o
OBJS-$(CONFIG_ACONTRAST_FILTER) += af_acontrast.o
diff --git a/libavfilter/af_aap.c b/libavfilter/af_aap.c
new file mode 100644
index 0000000000..978a853137
--- /dev/null
+++ b/libavfilter/af_aap.c
@@ -0,0 +1,451 @@
+/*
+ * Copyright (c) 2023 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/channel_layout.h"
+#include "libavutil/common.h"
+#include "libavutil/float_dsp.h"
+#include "libavutil/opt.h"
+
+#include "audio.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "filters.h"
+#include "internal.h"
+
+enum OutModes {
+ IN_MODE,
+ DESIRED_MODE,
+ OUT_MODE,
+ NOISE_MODE,
+ ERROR_MODE,
+ NB_OMODES
+};
+
+typedef struct AudioAPContext {
+ const AVClass *class;
+
+ int order;
+ int projection;
+ float mu;
+ float delta;
+ int output_mode;
+
+ int kernel_size;
+ AVFrame *offset;
+ AVFrame *delay;
+ AVFrame *coeffs;
+ AVFrame *e;
+ AVFrame *p;
+ AVFrame *x;
+ AVFrame *w;
+ AVFrame *dcoeffs;
+ AVFrame *tmp;
+ AVFrame *tmpm;
+ AVFrame *itmpm;
+
+ float **tmpmp;
+ float **itmpmp;
+
+ AVFrame *frame[2];
+
+ AVFloatDSPContext *fdsp;
+} AudioAPContext;
+
+#define OFFSET(x) offsetof(AudioAPContext, x)
+#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+#define AT AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
+
+static const AVOption aap_options[] = {
+ { "order", "set the filter order", OFFSET(order), AV_OPT_TYPE_INT, {.i64=16}, 1, INT16_MAX, A },
+ { "projection", "set the filter projection", OFFSET(projection), AV_OPT_TYPE_INT, {.i64=2}, 1, 256, A },
+ { "mu", "set the filter mu", OFFSET(mu), AV_OPT_TYPE_FLOAT, {.dbl=0.0001},0,1, AT },
+ { "delta", "set the filter delta", OFFSET(delta), AV_OPT_TYPE_FLOAT, {.dbl=0.001},0, 1, AT },
+ { "out_mode", "set output mode", OFFSET(output_mode), AV_OPT_TYPE_INT, {.i64=OUT_MODE}, 0, NB_OMODES-1, AT, "mode" },
+ { "i", "input", 0, AV_OPT_TYPE_CONST, {.i64=IN_MODE}, 0, 0, AT, "mode" },
+ { "d", "desired", 0, AV_OPT_TYPE_CONST, {.i64=DESIRED_MODE}, 0, 0, AT, "mode" },
+ { "o", "output", 0, AV_OPT_TYPE_CONST, {.i64=OUT_MODE}, 0, 0, AT, "mode" },
+ { "n", "noise", 0, AV_OPT_TYPE_CONST, {.i64=NOISE_MODE}, 0, 0, AT, "mode" },
+ { "e", "error", 0, AV_OPT_TYPE_CONST, {.i64=ERROR_MODE}, 0, 0, AT, "mode" },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(aap);
+
+static float fir_sample(AudioAPContext *s, float sample, float *delay,
+ float *coeffs, float *tmp, int *offset)
+{
+ const int order = s->order;
+ float output;
+
+ delay[*offset] = sample;
+
+ memcpy(tmp, coeffs + order - *offset, order * sizeof(float));
+ output = s->fdsp->scalarproduct_float(delay, tmp, s->kernel_size);
+
+ if (--(*offset) < 0)
+ *offset = order - 1;
+
+ return output;
+}
+
+static int lup_decompose(float **MA, const int N, const float tol, int *P)
+{
+ for (int i = 0; i <= N; i++)
+ P[i] = i;
+
+ for (int i = 0; i < N; i++) {
+ float maxA = 0.f;
+ int imax = i;
+
+ for (int k = i; k < N; k++) {
+ float absA = fabs(MA[k][i]);
+ if (absA > maxA) {
+ maxA = absA;
+ imax = k;
+ }
+ }
+
+ if (maxA < tol)
+ return 0;
+
+ if (imax != i) {
+ FFSWAP(int, P[i], P[imax]);
+ FFSWAP(float *, MA[i], MA[imax]);
+ P[N]++;
+ }
+
+ for (int j = i + 1; j < N; j++) {
+ MA[j][i] /= MA[i][i];
+
+ for (int k = i + 1; k < N; k++)
+ MA[j][k] -= MA[j][i] * MA[i][k];
+ }
+ }
+
+ return 1;
+}
+
+static void lup_invert(float *const *MA, const int *P, const int N, float **IA)
+{
+ for (int j = 0; j < N; j++) {
+ for (int i = 0; i < N; i++) {
+ IA[i][j] = P[i] == j ? 1.f : 0.f;
+
+ for (int k = 0; k < i; k++)
+ IA[i][j] -= MA[i][k] * IA[k][j];
+ }
+
+ for (int i = N - 1; i >= 0; i--) {
+ for (int k = i + 1; k < N; k++)
+ IA[i][j] -= MA[i][k] * IA[k][j];
+
+ IA[i][j] /= MA[i][i];
+ }
+ }
+}
+
+static float process_sample(AudioAPContext *s, float input, float desired, int ch)
+{
+ float *dcoeffs = (float *)s->dcoeffs->extended_data[ch];
+ float *coeffs = (float *)s->coeffs->extended_data[ch];
+ float *delay = (float *)s->delay->extended_data[ch];
+ float **itmpmp = &s->itmpmp[s->projection * ch];
+ float **tmpmp = &s->tmpmp[s->projection * ch];
+ float *tmpm = (float *)s->tmpm->extended_data[ch];
+ float *tmp = (float *)s->tmp->extended_data[ch];
+ float *e = (float *)s->e->extended_data[ch];
+ float *x = (float *)s->x->extended_data[ch];
+ float *w = (float *)s->w->extended_data[ch];
+ int *p = (int *)s->p->extended_data[ch];
+ int *offset = (int *)s->offset->extended_data[ch];
+ const int projection = s->projection;
+ const float delta = s->delta;
+ const int order = s->order;
+ const int length = projection + order;
+ const float mu = s->mu;
+ const float tol = 0.00001f;
+ float output;
+
+ x[offset[2] + length] = x[offset[2]] = input;
+ delay[offset[0] + order] = input;
+
+ output = fir_sample(s, input, delay, coeffs, tmp, offset);
+ e[offset[1]] = e[offset[1] + projection] = desired - output;
+
+ for (int i = 0; i < projection; i++) {
+ const int iprojection = i * projection;
+
+ for (int j = i; j < projection; j++) {
+ float sum = 0.f;
+ for (int k = 0; k < order; k++)
+ sum += x[offset[2] + i + k] * x[offset[2] + j + k];
+ tmpm[iprojection + j] = sum;
+ if (i != j)
+ tmpm[j * projection + i] = sum;
+ }
+
+ tmpm[iprojection + i] += delta;
+ }
+
+ lup_decompose(tmpmp, projection, tol, p);
+ lup_invert(tmpmp, p, projection, itmpmp);
+
+ for (int i = 0; i < projection; i++) {
+ float sum = 0.f;
+ for (int j = 0; j < projection; j++)
+ sum += itmpmp[i][j] * e[j + offset[1]];
+ w[i] = sum;
+ }
+
+ for (int i = 0; i < order; i++) {
+ float sum = 0.f;
+ for (int j = 0; j < projection; j++)
+ sum += x[offset[2] + i + j] * w[j];
+ dcoeffs[i] = sum;
+ }
+
+ for (int i = 0; i < order; i++)
+ coeffs[i] = coeffs[i + order] = coeffs[i] + mu * dcoeffs[i];
+
+ if (--offset[1] < 0)
+ offset[1] = projection - 1;
+
+ if (--offset[2] < 0)
+ offset[2] = length - 1;
+
+ switch (s->output_mode) {
+ case IN_MODE: output = input; break;
+ case DESIRED_MODE: output = desired; break;
+ case OUT_MODE: output = desired - output; break;
+ case NOISE_MODE: output = input - output; break;
+ case ERROR_MODE: break;
+ }
+ return output;
+}
+
+static int process_channels(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
+{
+ AudioAPContext *s = ctx->priv;
+ AVFrame *out = arg;
+ const int start = (out->ch_layout.nb_channels * jobnr) / nb_jobs;
+ const int end = (out->ch_layout.nb_channels * (jobnr+1)) / nb_jobs;
+
+ for (int c = start; c < end; c++) {
+ const float *input = (const float *)s->frame[0]->extended_data[c];
+ const float *desired = (const float *)s->frame[1]->extended_data[c];
+ float *output = (float *)out->extended_data[c];
+
+ for (int n = 0; n < out->nb_samples; n++) {
+ output[n] = process_sample(s, input[n], desired[n], c);
+ if (ctx->is_disabled)
+ output[n] = input[n];
+ }
+ }
+
+ return 0;
+}
+
+static int activate(AVFilterContext *ctx)
+{
+ AudioAPContext *s = ctx->priv;
+ int i, ret, status;
+ int nb_samples;
+ int64_t pts;
+
+ FF_FILTER_FORWARD_STATUS_BACK_ALL(ctx->outputs[0], ctx);
+
+ nb_samples = FFMIN(ff_inlink_queued_samples(ctx->inputs[0]),
+ ff_inlink_queued_samples(ctx->inputs[1]));
+ for (i = 0; i < ctx->nb_inputs && nb_samples > 0; i++) {
+ if (s->frame[i])
+ continue;
+
+ if (ff_inlink_check_available_samples(ctx->inputs[i], nb_samples) > 0) {
+ ret = ff_inlink_consume_samples(ctx->inputs[i], nb_samples, nb_samples, &s->frame[i]);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ if (s->frame[0] && s->frame[1]) {
+ AVFrame *out;
+
+ out = ff_get_audio_buffer(ctx->outputs[0], s->frame[0]->nb_samples);
+ if (!out) {
+ av_frame_free(&s->frame[0]);
+ av_frame_free(&s->frame[1]);
+ return AVERROR(ENOMEM);
+ }
+
+ ff_filter_execute(ctx, process_channels, out, NULL,
+ FFMIN(ctx->outputs[0]->ch_layout.nb_channels, ff_filter_get_nb_threads(ctx)));
+
+ out->pts = s->frame[0]->pts;
+
+ av_frame_free(&s->frame[0]);
+ av_frame_free(&s->frame[1]);
+
+ ret = ff_filter_frame(ctx->outputs[0], out);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (!nb_samples) {
+ for (i = 0; i < 2; i++) {
+ if (ff_inlink_acknowledge_status(ctx->inputs[i], &status, &pts)) {
+ ff_outlink_set_status(ctx->outputs[0], status, pts);
+ return 0;
+ }
+ }
+ }
+
+ if (ff_outlink_frame_wanted(ctx->outputs[0])) {
+ for (i = 0; i < 2; i++) {
+ if (ff_inlink_queued_samples(ctx->inputs[i]) > 0)
+ continue;
+ ff_inlink_request_frame(ctx->inputs[i]);
+ return 0;
+ }
+ }
+ return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ const int channels = outlink->ch_layout.nb_channels;
+ AVFilterContext *ctx = outlink->src;
+ AudioAPContext *s = ctx->priv;
+
+ s->kernel_size = FFALIGN(s->order, 16);
+
+ if (!s->offset)
+ s->offset = ff_get_audio_buffer(outlink, 3);
+ if (!s->delay)
+ s->delay = ff_get_audio_buffer(outlink, 2 * s->kernel_size);
+ if (!s->dcoeffs)
+ s->dcoeffs = ff_get_audio_buffer(outlink, s->kernel_size);
+ if (!s->coeffs)
+ s->coeffs = ff_get_audio_buffer(outlink, 2 * s->kernel_size);
+ if (!s->e)
+ s->e = ff_get_audio_buffer(outlink, 2 * s->projection);
+ if (!s->p)
+ s->p = ff_get_audio_buffer(outlink, s->projection + 1);
+ if (!s->x)
+ s->x = ff_get_audio_buffer(outlink, 2 * (s->projection + s->order));
+ if (!s->w)
+ s->w = ff_get_audio_buffer(outlink, s->projection);
+ if (!s->tmp)
+ s->tmp = ff_get_audio_buffer(outlink, s->kernel_size);
+ if (!s->tmpm)
+ s->tmpm = ff_get_audio_buffer(outlink, s->projection * s->projection);
+ if (!s->itmpm)
+ s->itmpm = ff_get_audio_buffer(outlink, s->projection * s->projection);
+
+ if (!s->tmpmp)
+ s->tmpmp = av_calloc(s->projection * channels, sizeof(*s->tmpmp));
+ if (!s->itmpmp)
+ s->itmpmp = av_calloc(s->projection * channels, sizeof(*s->itmpmp));
+
+ if (!s->offset || !s->delay || !s->dcoeffs || !s->coeffs || !s->tmpmp || !s->itmpmp ||
+ !s->e || !s->p || !s->x || !s->w || !s->tmp || !s->tmpm || !s->itmpm)
+ return AVERROR(ENOMEM);
+
+ for (int ch = 0; ch < channels; ch++) {
+ float *itmpm = (float *)s->itmpm->extended_data[ch];
+ float *tmpm = (float *)s->tmpm->extended_data[ch];
+ float **itmpmp = &s->itmpmp[s->projection * ch];
+ float **tmpmp = &s->tmpmp[s->projection * ch];
+
+ for (int i = 0; i < s->projection; i++) {
+ itmpmp[i] = &itmpm[i * s->projection];
+ tmpmp[i] = &tmpm[i * s->projection];
+ }
+ }
+
+ return 0;
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ AudioAPContext *s = ctx->priv;
+
+ s->fdsp = avpriv_float_dsp_alloc(0);
+ if (!s->fdsp)
+ return AVERROR(ENOMEM);
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ AudioAPContext *s = ctx->priv;
+
+ av_freep(&s->fdsp);
+
+ av_frame_free(&s->offset);
+ av_frame_free(&s->delay);
+ av_frame_free(&s->dcoeffs);
+ av_frame_free(&s->coeffs);
+ av_frame_free(&s->e);
+ av_frame_free(&s->p);
+ av_frame_free(&s->w);
+ av_frame_free(&s->x);
+ av_frame_free(&s->tmp);
+ av_frame_free(&s->tmpm);
+ av_frame_free(&s->itmpm);
+
+ av_freep(&s->tmpmp);
+ av_freep(&s->itmpmp);
+}
+
+static const AVFilterPad inputs[] = {
+ {
+ .name = "input",
+ .type = AVMEDIA_TYPE_AUDIO,
+ },
+ {
+ .name = "desired",
+ .type = AVMEDIA_TYPE_AUDIO,
+ },
+};
+
+static const AVFilterPad outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_output,
+ },
+};
+
+const AVFilter ff_af_aap = {
+ .name = "aap",
+ .description = NULL_IF_CONFIG_SMALL("Apply Affine Projection algorithm to first audio stream."),
+ .priv_size = sizeof(AudioAPContext),
+ .priv_class = &aap_class,
+ .init = init,
+ .uninit = uninit,
+ .activate = activate,
+ FILTER_INPUTS(inputs),
+ FILTER_OUTPUTS(outputs),
+ FILTER_SINGLE_SAMPLEFMT(AV_SAMPLE_FMT_FLTP),
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL |
+ AVFILTER_FLAG_SLICE_THREADS,
+ .process_command = ff_filter_process_command,
+};
diff --git a/libavfilter/allfilters.c b/libavfilter/allfilters.c
index 5fbfe9d906..9d66213a62 100644
--- a/libavfilter/allfilters.c
+++ b/libavfilter/allfilters.c
@@ -21,6 +21,7 @@
#include "avfilter.h"
+extern const AVFilter ff_af_aap;
extern const AVFilter ff_af_abench;
extern const AVFilter ff_af_acompressor;
extern const AVFilter ff_af_acontrast;
--
2.42.1
[-- Attachment #3: Type: text/plain, Size: 251 bytes --]
_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [FFmpeg-devel] [PATCH] avfilter: add Affine Projection adaptive audio filter
2023-11-26 14:41 [FFmpeg-devel] [PATCH] avfilter: add Affine Projection adaptive audio filter Paul B Mahol
@ 2023-11-28 12:33 ` Paul B Mahol
0 siblings, 0 replies; 3+ messages in thread
From: Paul B Mahol @ 2023-11-28 12:33 UTC (permalink / raw)
To: FFmpeg development discussions and patches
Gonna apply soon.
_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
^ permalink raw reply [flat|nested] 3+ messages in thread
* [FFmpeg-devel] [PATCH] avfilter: add Affine Projection adaptive audio filter
@ 2023-05-01 12:30 Paul B Mahol
0 siblings, 0 replies; 3+ messages in thread
From: Paul B Mahol @ 2023-05-01 12:30 UTC (permalink / raw)
To: FFmpeg development discussions and patches
[-- Attachment #1: Type: text/plain, Size: 16 bytes --]
Attached patch.
[-- Attachment #2: 0001-avfilter-add-Affine-Projection-adaptive-audio-filter.patch --]
[-- Type: text/x-patch, Size: 15539 bytes --]
From 391216f55c8e226974fb2e8a0e725b254811c2b7 Mon Sep 17 00:00:00 2001
From: Paul B Mahol <onemda@gmail.com>
Date: Sun, 30 Apr 2023 17:06:00 +0200
Subject: [PATCH] avfilter: add Affine Projection adaptive audio filter
Signed-off-by: Paul B Mahol <onemda@gmail.com>
---
libavfilter/Makefile | 1 +
libavfilter/af_aap.c | 452 +++++++++++++++++++++++++++++++++++++++
libavfilter/allfilters.c | 1 +
3 files changed, 454 insertions(+)
create mode 100644 libavfilter/af_aap.c
diff --git a/libavfilter/Makefile b/libavfilter/Makefile
index 0eee5fccbe..d7c79fabd6 100644
--- a/libavfilter/Makefile
+++ b/libavfilter/Makefile
@@ -34,6 +34,7 @@ OBJS-$(CONFIG_DNN) += dnn_filter_common.o
include $(SRC_PATH)/libavfilter/dnn/Makefile
# audio filters
+OBJS-$(CONFIG_AAP_FILTER) += af_aap.o
OBJS-$(CONFIG_ABENCH_FILTER) += f_bench.o
OBJS-$(CONFIG_ACOMPRESSOR_FILTER) += af_sidechaincompress.o
OBJS-$(CONFIG_ACONTRAST_FILTER) += af_acontrast.o
diff --git a/libavfilter/af_aap.c b/libavfilter/af_aap.c
new file mode 100644
index 0000000000..2ee2b61558
--- /dev/null
+++ b/libavfilter/af_aap.c
@@ -0,0 +1,452 @@
+/*
+ * Copyright (c) 2023 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/channel_layout.h"
+#include "libavutil/common.h"
+#include "libavutil/float_dsp.h"
+#include "libavutil/opt.h"
+
+#include "audio.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "filters.h"
+#include "internal.h"
+
+enum OutModes {
+ IN_MODE,
+ DESIRED_MODE,
+ OUT_MODE,
+ NOISE_MODE,
+ NB_OMODES
+};
+
+typedef struct AudioAPContext {
+ const AVClass *class;
+
+ int order;
+ int projection;
+ float mu;
+ float delta;
+ int output_mode;
+
+ int kernel_size;
+ AVFrame *offset;
+ AVFrame *delay;
+ AVFrame *coeffs;
+ AVFrame *e;
+ AVFrame *p;
+ AVFrame *x;
+ AVFrame *w;
+ AVFrame *dcoeffs;
+ AVFrame *tmp;
+ AVFrame *tmpm;
+ AVFrame *itmpm;
+
+ float **tmpmp;
+ float **itmpmp;
+
+ AVFrame *frame[2];
+
+ AVFloatDSPContext *fdsp;
+} AudioAPContext;
+
+#define OFFSET(x) offsetof(AudioAPContext, x)
+#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+#define AT AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
+
+static const AVOption aap_options[] = {
+ { "order", "set the filter order", OFFSET(order), AV_OPT_TYPE_INT, {.i64=16}, 1, INT16_MAX, A },
+ { "projection", "set the filter projection", OFFSET(projection), AV_OPT_TYPE_INT, {.i64=2}, 1, 256, A },
+ { "mu", "set the filter mu", OFFSET(mu), AV_OPT_TYPE_FLOAT, {.dbl=0.0001},0,1, AT },
+ { "delta", "set the filter delta", OFFSET(delta), AV_OPT_TYPE_FLOAT, {.dbl=0.001},0, 1, AT },
+ { "out_mode", "set output mode", OFFSET(output_mode), AV_OPT_TYPE_INT, {.i64=OUT_MODE}, 0, NB_OMODES-1, AT, "mode" },
+ { "i", "input", 0, AV_OPT_TYPE_CONST, {.i64=IN_MODE}, 0, 0, AT, "mode" },
+ { "d", "desired", 0, AV_OPT_TYPE_CONST, {.i64=DESIRED_MODE}, 0, 0, AT, "mode" },
+ { "o", "output", 0, AV_OPT_TYPE_CONST, {.i64=OUT_MODE}, 0, 0, AT, "mode" },
+ { "n", "noise", 0, AV_OPT_TYPE_CONST, {.i64=NOISE_MODE}, 0, 0, AT, "mode" },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(aap);
+
+static float fir_sample(AudioAPContext *s, float sample, float *delay,
+ float *coeffs, float *tmp, int *offset)
+{
+ const int order = s->order;
+ float output;
+
+ delay[*offset] = sample;
+
+ memcpy(tmp, coeffs + order - *offset, order * sizeof(float));
+ output = s->fdsp->scalarproduct_float(delay, tmp, s->kernel_size);
+
+ if (--(*offset) < 0)
+ *offset = order - 1;
+
+ return output;
+}
+
+static int lup_decompose(float **MA, int N, float tol, int *P)
+{
+ float maxA, *ptr, absA;
+ int i, j, k, imax;
+
+ for (i = 0; i <= N; i++)
+ P[i] = i;
+
+ for (i = 0; i < N; i++) {
+ maxA = 0.f;
+ imax = i;
+
+ for (k = i; k < N; k++)
+ if ((absA = fabs(MA[k][i])) > maxA) {
+ maxA = absA;
+ imax = k;
+ }
+
+ if (maxA < tol)
+ return 0;
+
+ if (imax != i) {
+ j = P[i];
+ P[i] = P[imax];
+ P[imax] = j;
+
+ ptr = MA[i];
+ MA[i] = MA[imax];
+ MA[imax] = ptr;
+
+ P[N]++;
+ }
+
+ for (j = i + 1; j < N; j++) {
+ MA[j][i] /= MA[i][i];
+
+ for (k = i + 1; k < N; k++)
+ MA[j][k] -= MA[j][i] * MA[i][k];
+ }
+ }
+
+ return 1;
+}
+
+static void lup_invert(float **MA, int *P, int N, float **IA)
+{
+ for (int j = 0; j < N; j++) {
+ for (int i = 0; i < N; i++) {
+ IA[i][j] = P[i] == j ? 1.f : 0.f;
+
+ for (int k = 0; k < i; k++)
+ IA[i][j] -= MA[i][k] * IA[k][j];
+ }
+
+ for (int i = N - 1; i >= 0; i--) {
+ for (int k = i + 1; k < N; k++)
+ IA[i][j] -= MA[i][k] * IA[k][j];
+
+ IA[i][j] /= MA[i][i];
+ }
+ }
+}
+
+static float process_sample(AudioAPContext *s, float input, float desired, int ch)
+{
+ float *dcoeffs = (float *)s->dcoeffs->extended_data[ch];
+ float *coeffs = (float *)s->coeffs->extended_data[ch];
+ float *delay = (float *)s->delay->extended_data[ch];
+ float **itmpmp = &s->itmpmp[s->projection * ch];
+ float **tmpmp = &s->tmpmp[s->projection * ch];
+ float *tmpm = (float *)s->tmpm->extended_data[ch];
+ float *tmp = (float *)s->tmp->extended_data[ch];
+ float *e = (float *)s->e->extended_data[ch];
+ float *x = (float *)s->x->extended_data[ch];
+ float *w = (float *)s->w->extended_data[ch];
+ int *p = (int *)s->p->extended_data[ch];
+ int *offset = (int *)s->offset->extended_data[ch];
+ const int projection = s->projection;
+ const float delta = s->delta;
+ const int order = s->order;
+ const int length = projection + order;
+ const float mu = s->mu;
+ const float tol = 0.00001f;
+ float output;
+
+ x[offset[2] + length] = x[offset[2]] = input;
+ delay[offset[0] + order] = input;
+
+ output = fir_sample(s, input, delay, coeffs, tmp, offset);
+ e[offset[1]] = e[offset[1] + projection] = desired - output;
+
+ for (int i = 0; i < projection; i++) {
+ const int iprojection = i * projection;
+
+ for (int j = i; j < projection; j++) {
+ tmpm[iprojection + j] = 0.f;
+ for (int k = 0; k < order; k++)
+ tmpm[iprojection + j] += x[offset[2] + i + k] * x[offset[2] + j + k];
+ if (i != j)
+ tmpm[j * projection + i] = tmpm[iprojection + j];
+ }
+
+ tmpm[iprojection + i] += delta;
+ }
+
+ lup_decompose(tmpmp, projection, tol, p);
+ lup_invert(tmpmp, p, projection, itmpmp);
+
+ for (int i = 0; i < projection; i++) {
+ w[i] = 0.f;
+ for (int j = 0; j < projection; j++)
+ w[i] += itmpmp[i][j] * e[j + offset[1]];
+ }
+
+ for (int i = 0; i < order; i++) {
+ dcoeffs[i] = 0.f;
+ for (int j = 0; j < projection; j++)
+ dcoeffs[i] += x[offset[2] + i + j] * w[j];
+ }
+
+ for (int i = 0; i < order; i++)
+ coeffs[i] = coeffs[i + order] = coeffs[i] + mu * dcoeffs[i];
+
+ if (--offset[1] < 0)
+ offset[1] = projection - 1;
+
+ if (--offset[2] < 0)
+ offset[2] = length - 1;
+
+ switch (s->output_mode) {
+ case IN_MODE: output = input; break;
+ case DESIRED_MODE: output = desired; break;
+ case OUT_MODE: output = desired - output; break;
+ case NOISE_MODE: output = input - output; break;
+ }
+ return output;
+}
+
+static int process_channels(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
+{
+ AudioAPContext *s = ctx->priv;
+ AVFrame *out = arg;
+ const int start = (out->ch_layout.nb_channels * jobnr) / nb_jobs;
+ const int end = (out->ch_layout.nb_channels * (jobnr+1)) / nb_jobs;
+
+ for (int c = start; c < end; c++) {
+ const float *input = (const float *)s->frame[0]->extended_data[c];
+ const float *desired = (const float *)s->frame[1]->extended_data[c];
+ float *output = (float *)out->extended_data[c];
+
+ for (int n = 0; n < out->nb_samples; n++) {
+ output[n] = process_sample(s, input[n], desired[n], c);
+ if (ctx->is_disabled)
+ output[n] = input[n];
+ }
+ }
+
+ return 0;
+}
+
+static int activate(AVFilterContext *ctx)
+{
+ AudioAPContext *s = ctx->priv;
+ int i, ret, status;
+ int nb_samples;
+ int64_t pts;
+
+ FF_FILTER_FORWARD_STATUS_BACK_ALL(ctx->outputs[0], ctx);
+
+ nb_samples = FFMIN(ff_inlink_queued_samples(ctx->inputs[0]),
+ ff_inlink_queued_samples(ctx->inputs[1]));
+ for (i = 0; i < ctx->nb_inputs && nb_samples > 0; i++) {
+ if (s->frame[i])
+ continue;
+
+ if (ff_inlink_check_available_samples(ctx->inputs[i], nb_samples) > 0) {
+ ret = ff_inlink_consume_samples(ctx->inputs[i], nb_samples, nb_samples, &s->frame[i]);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ if (s->frame[0] && s->frame[1]) {
+ AVFrame *out;
+
+ out = ff_get_audio_buffer(ctx->outputs[0], s->frame[0]->nb_samples);
+ if (!out) {
+ av_frame_free(&s->frame[0]);
+ av_frame_free(&s->frame[1]);
+ return AVERROR(ENOMEM);
+ }
+
+ ff_filter_execute(ctx, process_channels, out, NULL,
+ FFMIN(ctx->outputs[0]->ch_layout.nb_channels, ff_filter_get_nb_threads(ctx)));
+
+ out->pts = s->frame[0]->pts;
+
+ av_frame_free(&s->frame[0]);
+ av_frame_free(&s->frame[1]);
+
+ ret = ff_filter_frame(ctx->outputs[0], out);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (!nb_samples) {
+ for (i = 0; i < 2; i++) {
+ if (ff_inlink_acknowledge_status(ctx->inputs[i], &status, &pts)) {
+ ff_outlink_set_status(ctx->outputs[0], status, pts);
+ return 0;
+ }
+ }
+ }
+
+ if (ff_outlink_frame_wanted(ctx->outputs[0])) {
+ for (i = 0; i < 2; i++) {
+ if (ff_inlink_queued_samples(ctx->inputs[i]) > 0)
+ continue;
+ ff_inlink_request_frame(ctx->inputs[i]);
+ return 0;
+ }
+ }
+ return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ const int channels = outlink->ch_layout.nb_channels;
+ AVFilterContext *ctx = outlink->src;
+ AudioAPContext *s = ctx->priv;
+
+ s->kernel_size = FFALIGN(s->order, 16);
+
+ if (!s->offset)
+ s->offset = ff_get_audio_buffer(outlink, 3);
+ if (!s->delay)
+ s->delay = ff_get_audio_buffer(outlink, 2 * s->kernel_size);
+ if (!s->dcoeffs)
+ s->dcoeffs = ff_get_audio_buffer(outlink, s->kernel_size);
+ if (!s->coeffs)
+ s->coeffs = ff_get_audio_buffer(outlink, 2 * s->kernel_size);
+ if (!s->e)
+ s->e = ff_get_audio_buffer(outlink, 2 * s->projection);
+ if (!s->p)
+ s->p = ff_get_audio_buffer(outlink, s->projection + 1);
+ if (!s->x)
+ s->x = ff_get_audio_buffer(outlink, 2 * (s->projection + s->order));
+ if (!s->w)
+ s->w = ff_get_audio_buffer(outlink, s->projection);
+ if (!s->tmp)
+ s->tmp = ff_get_audio_buffer(outlink, s->kernel_size);
+ if (!s->tmpm)
+ s->tmpm = ff_get_audio_buffer(outlink, s->projection * s->projection);
+ if (!s->itmpm)
+ s->itmpm = ff_get_audio_buffer(outlink, s->projection * s->projection);
+
+ if (!s->tmpmp)
+ s->tmpmp = av_calloc(s->projection * channels, sizeof(*s->tmpmp));
+ if (!s->itmpmp)
+ s->itmpmp = av_calloc(s->projection * channels, sizeof(*s->itmpmp));
+
+ if (!s->offset || !s->delay || !s->dcoeffs || !s->coeffs || !s->tmpmp || !s->itmpmp ||
+ !s->e || !s->p || !s->x || !s->w || !s->tmp || !s->tmpm || !s->itmpm)
+ return AVERROR(ENOMEM);
+
+ for (int ch = 0; ch < channels; ch++) {
+ float *itmpm = (float *)s->itmpm->extended_data[ch];
+ float *tmpm = (float *)s->tmpm->extended_data[ch];
+ float **itmpmp = &s->itmpmp[s->projection * ch];
+ float **tmpmp = &s->tmpmp[s->projection * ch];
+
+ for (int i = 0; i < s->projection; i++) {
+ itmpmp[i] = &itmpm[i * s->projection];
+ tmpmp[i] = &tmpm[i * s->projection];
+ }
+ }
+
+ return 0;
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ AudioAPContext *s = ctx->priv;
+
+ s->fdsp = avpriv_float_dsp_alloc(0);
+ if (!s->fdsp)
+ return AVERROR(ENOMEM);
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ AudioAPContext *s = ctx->priv;
+
+ av_freep(&s->fdsp);
+
+ av_frame_free(&s->offset);
+ av_frame_free(&s->delay);
+ av_frame_free(&s->dcoeffs);
+ av_frame_free(&s->coeffs);
+ av_frame_free(&s->e);
+ av_frame_free(&s->p);
+ av_frame_free(&s->w);
+ av_frame_free(&s->x);
+ av_frame_free(&s->tmp);
+ av_frame_free(&s->tmpm);
+ av_frame_free(&s->itmpm);
+
+ av_freep(&s->tmpmp);
+ av_freep(&s->itmpmp);
+}
+
+static const AVFilterPad inputs[] = {
+ {
+ .name = "input",
+ .type = AVMEDIA_TYPE_AUDIO,
+ },
+ {
+ .name = "desired",
+ .type = AVMEDIA_TYPE_AUDIO,
+ },
+};
+
+static const AVFilterPad outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_output,
+ },
+};
+
+const AVFilter ff_af_aap = {
+ .name = "aap",
+ .description = NULL_IF_CONFIG_SMALL("Apply Affine Projection algorithm to first audio stream."),
+ .priv_size = sizeof(AudioAPContext),
+ .priv_class = &aap_class,
+ .init = init,
+ .uninit = uninit,
+ .activate = activate,
+ FILTER_INPUTS(inputs),
+ FILTER_OUTPUTS(outputs),
+ FILTER_SINGLE_SAMPLEFMT(AV_SAMPLE_FMT_FLTP),
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL |
+ AVFILTER_FLAG_SLICE_THREADS,
+ .process_command = ff_filter_process_command,
+};
diff --git a/libavfilter/allfilters.c b/libavfilter/allfilters.c
index 5e38d5a7da..c6c482755b 100644
--- a/libavfilter/allfilters.c
+++ b/libavfilter/allfilters.c
@@ -21,6 +21,7 @@
#include "avfilter.h"
+extern const AVFilter ff_af_aap;
extern const AVFilter ff_af_abench;
extern const AVFilter ff_af_acompressor;
extern const AVFilter ff_af_acontrast;
--
2.39.1
[-- Attachment #3: Type: text/plain, Size: 251 bytes --]
_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2023-11-28 12:25 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-11-26 14:41 [FFmpeg-devel] [PATCH] avfilter: add Affine Projection adaptive audio filter Paul B Mahol
2023-11-28 12:33 ` Paul B Mahol
-- strict thread matches above, loose matches on Subject: below --
2023-05-01 12:30 Paul B Mahol
Git Inbox Mirror of the ffmpeg-devel mailing list - see https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
This inbox may be cloned and mirrored by anyone:
git clone --mirror https://master.gitmailbox.com/ffmpegdev/0 ffmpegdev/git/0.git
# If you have public-inbox 1.1+ installed, you may
# initialize and index your mirror using the following commands:
public-inbox-init -V2 ffmpegdev ffmpegdev/ https://master.gitmailbox.com/ffmpegdev \
ffmpegdev@gitmailbox.com
public-inbox-index ffmpegdev
Example config snippet for mirrors.
AGPL code for this site: git clone https://public-inbox.org/public-inbox.git