libavfilter/af_compand.c
6b68e2a4
 /*
  * Copyright (c) 1999 Chris Bagwell
  * Copyright (c) 1999 Nick Bailey
  * Copyright (c) 2007 Rob Sykes <robs@users.sourceforge.net>
  * Copyright (c) 2013 Paul B Mahol
66ad3292
  * Copyright (c) 2014 Andrew Kelley
6b68e2a4
  *
  * This file is part of FFmpeg.
  *
  * FFmpeg is free software; you can redistribute it and/or
  * modify it under the terms of the GNU Lesser General Public
  * License as published by the Free Software Foundation; either
  * version 2.1 of the License, or (at your option) any later version.
  *
  * FFmpeg is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * Lesser General Public License for more details.
  *
  * You should have received a copy of the GNU Lesser General Public
  * License along with FFmpeg; if not, write to the Free Software
  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  */
 
5c78fe08
 /**
  * @file
  * audio compand filter
  */
 
64c54f83
 #include "libavutil/avassert.h"
6b68e2a4
 #include "libavutil/avstring.h"
db1a642c
 #include "libavutil/ffmath.h"
6b68e2a4
 #include "libavutil/opt.h"
 #include "libavutil/samplefmt.h"
 #include "audio.h"
c62d83a5
 #include "avfilter.h"
6b68e2a4
 #include "internal.h"
 
 typedef struct ChanParam {
     double attack;
     double decay;
     double volume;
 } ChanParam;
 
 typedef struct CompandSegment {
     double x, y;
     double a, b;
 } CompandSegment;
 
 typedef struct CompandContext {
     const AVClass *class;
c62d83a5
     int nb_segments;
6b68e2a4
     char *attacks, *decays, *points;
     CompandSegment *segments;
     ChanParam *channels;
     double in_min_lin;
     double out_min_lin;
     double curve_dB;
     double gain_dB;
     double initial_volume;
     double delay;
86a191e2
     AVFrame *delay_frame;
6b68e2a4
     int delay_samples;
     int delay_count;
     int delay_index;
     int64_t pts;
 
     int (*compand)(AVFilterContext *ctx, AVFrame *frame);
 } CompandContext;
 
 #define OFFSET(x) offsetof(CompandContext, x)
 #define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
 
 static const AVOption compand_options[] = {
13f4d077
     { "attacks", "set time over which increase of volume is determined", OFFSET(attacks), AV_OPT_TYPE_STRING, { .str = "0" }, 0, 0, A },
5e02ff3e
     { "decays", "set time over which decrease of volume is determined", OFFSET(decays), AV_OPT_TYPE_STRING, { .str = "0.8" }, 0, 0, A },
69b83f59
     { "points", "set points of transfer function", OFFSET(points), AV_OPT_TYPE_STRING, { .str = "-70/-70|-60/-20|1/0" }, 0, 0, A },
22307f17
     { "soft-knee", "set soft-knee", OFFSET(curve_dB), AV_OPT_TYPE_DOUBLE, { .dbl = 0.01 }, 0.01, 900, A },
     { "gain", "set output gain", OFFSET(gain_dB), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, -900, 900, A },
     { "volume", "set initial volume", OFFSET(initial_volume), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, -900, 0, A },
     { "delay", "set delay for samples before sending them to volume adjuster", OFFSET(delay), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, 0, 20, A },
b211607b
     { NULL }
6b68e2a4
 };
 
 AVFILTER_DEFINE_CLASS(compand);
 
 static av_cold int init(AVFilterContext *ctx)
 {
     CompandContext *s = ctx->priv;
4b879751
     s->pts            = AV_NOPTS_VALUE;
6b68e2a4
     return 0;
 }
 
 static av_cold void uninit(AVFilterContext *ctx)
 {
     CompandContext *s = ctx->priv;
 
     av_freep(&s->channels);
     av_freep(&s->segments);
86a191e2
     av_frame_free(&s->delay_frame);
6b68e2a4
 }
 
 static int query_formats(AVFilterContext *ctx)
 {
     AVFilterChannelLayouts *layouts;
     AVFilterFormats *formats;
     static const enum AVSampleFormat sample_fmts[] = {
         AV_SAMPLE_FMT_DBLP,
         AV_SAMPLE_FMT_NONE
     };
a0854c08
     int ret;
6b68e2a4
 
494b7924
     layouts = ff_all_channel_counts();
6b68e2a4
     if (!layouts)
         return AVERROR(ENOMEM);
a0854c08
     ret = ff_set_common_channel_layouts(ctx, layouts);
     if (ret < 0)
         return ret;
6b68e2a4
 
     formats = ff_make_format_list(sample_fmts);
     if (!formats)
         return AVERROR(ENOMEM);
a0854c08
     ret = ff_set_common_formats(ctx, formats);
     if (ret < 0)
         return ret;
6b68e2a4
 
     formats = ff_all_samplerates();
     if (!formats)
         return AVERROR(ENOMEM);
a0854c08
     return ff_set_common_samplerates(ctx, formats);
6b68e2a4
 }
 
 static void count_items(char *item_str, int *nb_items)
 {
     char *p;
 
     *nb_items = 1;
     for (p = item_str; *p; p++) {
8b804859
         if (*p == ' ' || *p == '|')
6b68e2a4
             (*nb_items)++;
     }
 }
 
 static void update_volume(ChanParam *cp, double in)
 {
     double delta = in - cp->volume;
 
     if (delta > 0.0)
         cp->volume += delta * cp->attack;
     else
         cp->volume += delta * cp->decay;
 }
 
 static double get_volume(CompandContext *s, double in_lin)
 {
     CompandSegment *cs;
     double in_log, out_log;
     int i;
 
     if (in_lin < s->in_min_lin)
         return s->out_min_lin;
 
     in_log = log(in_lin);
 
9e329185
     for (i = 1; i < s->nb_segments; i++)
         if (in_log <= s->segments[i].x)
6b68e2a4
             break;
9e329185
     cs = &s->segments[i - 1];
6b68e2a4
     in_log -= cs->x;
     out_log = cs->y + in_log * (cs->a * in_log + cs->b);
 
     return exp(out_log);
 }
 
 static int compand_nodelay(AVFilterContext *ctx, AVFrame *frame)
 {
22307f17
     CompandContext *s    = ctx->priv;
6b68e2a4
     AVFilterLink *inlink = ctx->inputs[0];
a2e4b4e9
     const int channels   = inlink->channels;
6b68e2a4
     const int nb_samples = frame->nb_samples;
     AVFrame *out_frame;
     int chan, i;
e509df4b
     int err;
6b68e2a4
 
     if (av_frame_is_writable(frame)) {
         out_frame = frame;
     } else {
         out_frame = ff_get_audio_buffer(inlink, nb_samples);
709746b6
         if (!out_frame) {
             av_frame_free(&frame);
6b68e2a4
             return AVERROR(ENOMEM);
709746b6
         }
e509df4b
         err = av_frame_copy_props(out_frame, frame);
         if (err < 0) {
             av_frame_free(&out_frame);
             av_frame_free(&frame);
             return err;
         }
6b68e2a4
     }
 
     for (chan = 0; chan < channels; chan++) {
60abdb6c
         const double *src = (double *)frame->extended_data[chan];
         double *dst = (double *)out_frame->extended_data[chan];
6b68e2a4
         ChanParam *cp = &s->channels[chan];
 
         for (i = 0; i < nb_samples; i++) {
             update_volume(cp, fabs(src[i]));
 
b2517b02
             dst[i] = src[i] * get_volume(s, cp->volume);
6b68e2a4
         }
     }
 
     if (frame != out_frame)
         av_frame_free(&frame);
 
     return ff_filter_frame(ctx->outputs[0], out_frame);
 }
 
 #define MOD(a, b) (((a) >= (b)) ? (a) - (b) : (a))
 
 static int compand_delay(AVFilterContext *ctx, AVFrame *frame)
 {
22307f17
     CompandContext *s    = ctx->priv;
6b68e2a4
     AVFilterLink *inlink = ctx->inputs[0];
     const int channels = inlink->channels;
     const int nb_samples = frame->nb_samples;
64c54f83
     int chan, i, av_uninit(dindex), oindex, av_uninit(count);
22307f17
     AVFrame *out_frame   = NULL;
e509df4b
     int err;
6b68e2a4
 
4b879751
     if (s->pts == AV_NOPTS_VALUE) {
         s->pts = (frame->pts == AV_NOPTS_VALUE) ? 0 : frame->pts;
     }
 
64c54f83
     av_assert1(channels > 0); /* would corrupt delay_count and delay_index */
 
6b68e2a4
     for (chan = 0; chan < channels; chan++) {
86a191e2
         AVFrame *delay_frame = s->delay_frame;
a2e4b4e9
         const double *src    = (double *)frame->extended_data[chan];
         double *dbuf         = (double *)delay_frame->extended_data[chan];
5c78fe08
         ChanParam *cp        = &s->channels[chan];
6b68e2a4
         double *dst;
 
         count  = s->delay_count;
         dindex = s->delay_index;
         for (i = 0, oindex = 0; i < nb_samples; i++) {
             const double in = src[i];
             update_volume(cp, fabs(in));
 
             if (count >= s->delay_samples) {
                 if (!out_frame) {
                     out_frame = ff_get_audio_buffer(inlink, nb_samples - i);
709746b6
                     if (!out_frame) {
                         av_frame_free(&frame);
6b68e2a4
                         return AVERROR(ENOMEM);
709746b6
                     }
e509df4b
                     err = av_frame_copy_props(out_frame, frame);
                     if (err < 0) {
                         av_frame_free(&out_frame);
                         av_frame_free(&frame);
                         return err;
                     }
6b68e2a4
                     out_frame->pts = s->pts;
22307f17
                     s->pts += av_rescale_q(nb_samples - i,
                         (AVRational){ 1, inlink->sample_rate },
                         inlink->time_base);
6b68e2a4
                 }
 
60abdb6c
                 dst = (double *)out_frame->extended_data[chan];
b2517b02
                 dst[oindex++] = dbuf[dindex] * get_volume(s, cp->volume);
6b68e2a4
             } else {
                 count++;
             }
 
             dbuf[dindex] = in;
             dindex = MOD(dindex + 1, s->delay_samples);
         }
     }
 
     s->delay_count = count;
     s->delay_index = dindex;
 
     av_frame_free(&frame);
d3cfd7af
 
     if (out_frame) {
         err = ff_filter_frame(ctx->outputs[0], out_frame);
         return err;
     }
 
     return 0;
6b68e2a4
 }
 
 static int compand_drain(AVFilterLink *outlink)
 {
     AVFilterContext *ctx = outlink->src;
22307f17
     CompandContext *s    = ctx->priv;
a2e4b4e9
     const int channels   = outlink->channels;
     AVFrame *frame       = NULL;
6b68e2a4
     int chan, i, dindex;
 
05412f4c
     /* 2048 is to limit output frame size during drain */
6b68e2a4
     frame = ff_get_audio_buffer(outlink, FFMIN(2048, s->delay_count));
     if (!frame)
         return AVERROR(ENOMEM);
     frame->pts = s->pts;
22307f17
     s->pts += av_rescale_q(frame->nb_samples,
             (AVRational){ 1, outlink->sample_rate }, outlink->time_base);
6b68e2a4
 
6c23a850
     av_assert0(channels > 0);
6b68e2a4
     for (chan = 0; chan < channels; chan++) {
86a191e2
         AVFrame *delay_frame = s->delay_frame;
         double *dbuf = (double *)delay_frame->extended_data[chan];
60abdb6c
         double *dst = (double *)frame->extended_data[chan];
6b68e2a4
         ChanParam *cp = &s->channels[chan];
 
         dindex = s->delay_index;
         for (i = 0; i < frame->nb_samples; i++) {
b2517b02
             dst[i] = dbuf[dindex] * get_volume(s, cp->volume);
6b68e2a4
             dindex = MOD(dindex + 1, s->delay_samples);
         }
     }
     s->delay_count -= frame->nb_samples;
     s->delay_index = dindex;
 
     return ff_filter_frame(outlink, frame);
 }
 
 static int config_output(AVFilterLink *outlink)
 {
22307f17
     AVFilterContext *ctx  = outlink->src;
     CompandContext *s     = ctx->priv;
6b68e2a4
     const int sample_rate = outlink->sample_rate;
05412f4c
     double radius         = s->curve_dB * M_LN10 / 20.0;
c62d83a5
     char *p, *saveptr     = NULL;
e509df4b
     const int channels    = outlink->channels;
c62d83a5
     int nb_attacks, nb_decays, nb_points;
6b68e2a4
     int new_nb_items, num;
     int i;
86a191e2
     int err;
 
6b68e2a4
 
     count_items(s->attacks, &nb_attacks);
     count_items(s->decays, &nb_decays);
     count_items(s->points, &nb_points);
 
e509df4b
     if (channels <= 0) {
         av_log(ctx, AV_LOG_ERROR, "Invalid number of channels: %d\n", channels);
         return AVERROR(EINVAL);
     }
 
05412f4c
     if (nb_attacks > channels || nb_decays > channels) {
c62d83a5
         av_log(ctx, AV_LOG_ERROR,
                 "Number of attacks/decays bigger than number of channels.\n");
6b68e2a4
         return AVERROR(EINVAL);
     }
 
     uninit(ctx);
 
619d6b8f
     s->channels = av_mallocz_array(channels, sizeof(*s->channels));
9e329185
     s->nb_segments = (nb_points + 4) * 2;
     s->segments = av_mallocz_array(s->nb_segments, sizeof(*s->segments));
6b68e2a4
 
27ba05ad
     if (!s->channels || !s->segments) {
         uninit(ctx);
6b68e2a4
         return AVERROR(ENOMEM);
27ba05ad
     }
6b68e2a4
 
     p = s->attacks;
     for (i = 0, new_nb_items = 0; i < nb_attacks; i++) {
8b804859
         char *tstr = av_strtok(p, " |", &saveptr);
88612f8d
         if (!tstr) {
             uninit(ctx);
             return AVERROR(EINVAL);
         }
6b68e2a4
         p = NULL;
         new_nb_items += sscanf(tstr, "%lf", &s->channels[i].attack) == 1;
27ba05ad
         if (s->channels[i].attack < 0) {
             uninit(ctx);
6b68e2a4
             return AVERROR(EINVAL);
27ba05ad
         }
6b68e2a4
     }
     nb_attacks = new_nb_items;
 
     p = s->decays;
     for (i = 0, new_nb_items = 0; i < nb_decays; i++) {
8b804859
         char *tstr = av_strtok(p, " |", &saveptr);
88612f8d
         if (!tstr) {
             uninit(ctx);
             return AVERROR(EINVAL);
         }
6b68e2a4
         p = NULL;
         new_nb_items += sscanf(tstr, "%lf", &s->channels[i].decay) == 1;
27ba05ad
         if (s->channels[i].decay < 0) {
             uninit(ctx);
6b68e2a4
             return AVERROR(EINVAL);
27ba05ad
         }
6b68e2a4
     }
     nb_decays = new_nb_items;
 
     if (nb_attacks != nb_decays) {
22307f17
         av_log(ctx, AV_LOG_ERROR,
                 "Number of attacks %d differs from number of decays %d.\n",
                 nb_attacks, nb_decays);
27ba05ad
         uninit(ctx);
6b68e2a4
         return AVERROR(EINVAL);
     }
 
297df52f
     for (i = nb_decays; i < channels; i++) {
         s->channels[i].attack = s->channels[nb_decays - 1].attack;
         s->channels[i].decay = s->channels[nb_decays - 1].decay;
     }
 
6b68e2a4
 #define S(x) s->segments[2 * ((x) + 1)]
     p = s->points;
     for (i = 0, new_nb_items = 0; i < nb_points; i++) {
8b804859
         char *tstr = av_strtok(p, " |", &saveptr);
6b68e2a4
         p = NULL;
88612f8d
         if (!tstr || sscanf(tstr, "%lf/%lf", &S(i).x, &S(i).y) != 2) {
22307f17
             av_log(ctx, AV_LOG_ERROR,
                     "Invalid and/or missing input/output value.\n");
27ba05ad
             uninit(ctx);
6b68e2a4
             return AVERROR(EINVAL);
         }
         if (i && S(i - 1).x > S(i).x) {
22307f17
             av_log(ctx, AV_LOG_ERROR,
                     "Transfer function input values must be increasing.\n");
27ba05ad
             uninit(ctx);
6b68e2a4
             return AVERROR(EINVAL);
         }
         S(i).y -= S(i).x;
1bbf94d8
         av_log(ctx, AV_LOG_DEBUG, "%d: x=%f y=%f\n", i, S(i).x, S(i).y);
6b68e2a4
         new_nb_items++;
     }
     num = new_nb_items;
 
     /* Add 0,0 if necessary */
     if (num == 0 || S(num - 1).x)
         num++;
 
 #undef S
 #define S(x) s->segments[2 * (x)]
     /* Add a tail off segment at the start */
     S(0).x = S(1).x - 2 * s->curve_dB;
     S(0).y = S(1).y;
     num++;
 
     /* Join adjacent colinear segments */
     for (i = 2; i < num; i++) {
         double g1 = (S(i - 1).y - S(i - 2).y) * (S(i - 0).x - S(i - 1).x);
         double g2 = (S(i - 0).y - S(i - 1).y) * (S(i - 1).x - S(i - 2).x);
         int j;
 
         if (fabs(g1 - g2))
             continue;
         num--;
         for (j = --i; j < num; j++)
             S(j) = S(j + 1);
     }
 
b2517b02
     for (i = 0; i < s->nb_segments; i += 2) {
6b68e2a4
         s->segments[i].y += s->gain_dB;
         s->segments[i].x *= M_LN10 / 20;
         s->segments[i].y *= M_LN10 / 20;
     }
 
 #define L(x) s->segments[i - (x)]
b2517b02
     for (i = 4; i < s->nb_segments; i += 2) {
6b68e2a4
         double x, y, cx, cy, in1, in2, out1, out2, theta, len, r;
 
         L(4).a = 0;
         L(4).b = (L(2).y - L(4).y) / (L(2).x - L(4).x);
 
         L(2).a = 0;
         L(2).b = (L(0).y - L(2).y) / (L(0).x - L(2).x);
 
         theta = atan2(L(2).y - L(4).y, L(2).x - L(4).x);
5a41a5a4
         len = hypot(L(2).x - L(4).x, L(2).y - L(4).y);
6b68e2a4
         r = FFMIN(radius, len);
         L(3).x = L(2).x - r * cos(theta);
         L(3).y = L(2).y - r * sin(theta);
 
         theta = atan2(L(0).y - L(2).y, L(0).x - L(2).x);
5a41a5a4
         len = hypot(L(0).x - L(2).x, L(0).y - L(2).y);
6b68e2a4
         r = FFMIN(radius, len / 2);
         x = L(2).x + r * cos(theta);
         y = L(2).y + r * sin(theta);
 
         cx = (L(3).x + L(2).x + x) / 3;
         cy = (L(3).y + L(2).y + y) / 3;
 
         L(2).x = x;
         L(2).y = y;
 
22307f17
         in1  = cx - L(3).x;
6b68e2a4
         out1 = cy - L(3).y;
22307f17
         in2  = L(2).x - L(3).x;
6b68e2a4
         out2 = L(2).y - L(3).y;
22307f17
         L(3).a = (out2 / in2 - out1 / in1) / (in2 - in1);
6b68e2a4
         L(3).b = out1 / in1 - L(3).a * in1;
     }
     L(3).x = 0;
     L(3).y = L(2).y;
 
     s->in_min_lin  = exp(s->segments[1].x);
     s->out_min_lin = exp(s->segments[1].y);
 
619d6b8f
     for (i = 0; i < channels; i++) {
6b68e2a4
         ChanParam *cp = &s->channels[i];
 
         if (cp->attack > 1.0 / sample_rate)
             cp->attack = 1.0 - exp(-1.0 / (sample_rate * cp->attack));
         else
             cp->attack = 1.0;
         if (cp->decay > 1.0 / sample_rate)
             cp->decay = 1.0 - exp(-1.0 / (sample_rate * cp->decay));
         else
             cp->decay = 1.0;
421679db
         cp->volume = ff_exp10(s->initial_volume / 20);
6b68e2a4
     }
 
     s->delay_samples = s->delay * sample_rate;
86a191e2
     if (s->delay_samples <= 0) {
6b68e2a4
         s->compand = compand_nodelay;
86a191e2
         return 0;
6b68e2a4
     }
86a191e2
 
     s->delay_frame = av_frame_alloc();
     if (!s->delay_frame) {
         uninit(ctx);
         return AVERROR(ENOMEM);
     }
 
     s->delay_frame->format         = outlink->format;
     s->delay_frame->nb_samples     = s->delay_samples;
     s->delay_frame->channel_layout = outlink->channel_layout;
 
     err = av_frame_get_buffer(s->delay_frame, 32);
     if (err)
         return err;
 
     s->compand = compand_delay;
6b68e2a4
     return 0;
 }
 
 static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
 {
     AVFilterContext *ctx = inlink->dst;
22307f17
     CompandContext *s    = ctx->priv;
6b68e2a4
 
     return s->compand(ctx, frame);
 }
 
 static int request_frame(AVFilterLink *outlink)
 {
     AVFilterContext *ctx = outlink->src;
22307f17
     CompandContext *s    = ctx->priv;
d3cfd7af
     int ret = 0;
6b68e2a4
 
     ret = ff_request_frame(ctx->inputs[0]);
 
     if (ret == AVERROR_EOF && !ctx->is_disabled && s->delay_count)
         ret = compand_drain(outlink);
 
     return ret;
 }
 
 static const AVFilterPad compand_inputs[] = {
     {
         .name         = "default",
         .type         = AVMEDIA_TYPE_AUDIO,
         .filter_frame = filter_frame,
     },
b211607b
     { NULL }
6b68e2a4
 };
 
 static const AVFilterPad compand_outputs[] = {
     {
         .name          = "default",
         .request_frame = request_frame,
         .config_props  = config_output,
         .type          = AVMEDIA_TYPE_AUDIO,
     },
b211607b
     { NULL }
6b68e2a4
 };
 
22307f17
 
325f6e0a
 AVFilter ff_af_compand = {
22307f17
     .name           = "compand",
     .description    = NULL_IF_CONFIG_SMALL(
             "Compress or expand audio dynamic range."),
     .query_formats  = query_formats,
     .priv_size      = sizeof(CompandContext),
     .priv_class     = &compand_class,
     .init           = init,
     .uninit         = uninit,
     .inputs         = compand_inputs,
     .outputs        = compand_outputs,
6b68e2a4
 };