h264_metadata: Add option to delete filler data
[libav.git] / libavfilter / af_compand.c
1 /*
2 * Copyright (c) 1999 Chris Bagwell
3 * Copyright (c) 1999 Nick Bailey
4 * Copyright (c) 2007 Rob Sykes <robs@users.sourceforge.net>
5 * Copyright (c) 2013 Paul B Mahol
6 * Copyright (c) 2014 Andrew Kelley
7 *
8 * This file is part of libav.
9 *
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
14 *
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25 /**
26 * @file
27 * audio compand filter
28 */
29
30 #include <string.h>
31
32 #include "libavutil/avstring.h"
33 #include "libavutil/channel_layout.h"
34 #include "libavutil/common.h"
35 #include "libavutil/mathematics.h"
36 #include "libavutil/mem.h"
37 #include "libavutil/opt.h"
38 #include "audio.h"
39 #include "avfilter.h"
40 #include "formats.h"
41 #include "internal.h"
42
43 typedef struct ChanParam {
44 float attack;
45 float decay;
46 float volume;
47 } ChanParam;
48
49 typedef struct CompandSegment {
50 float x, y;
51 float a, b;
52 } CompandSegment;
53
54 typedef struct CompandContext {
55 const AVClass *class;
56 int nb_channels;
57 int nb_segments;
58 char *attacks, *decays, *points;
59 CompandSegment *segments;
60 ChanParam *channels;
61 float in_min_lin;
62 float out_min_lin;
63 double curve_dB;
64 double gain_dB;
65 double initial_volume;
66 double delay;
67 AVFrame *delay_frame;
68 int delay_samples;
69 int delay_count;
70 int delay_index;
71 int64_t pts;
72
73 int (*compand)(AVFilterContext *ctx, AVFrame *frame);
74 /* set by filter_frame() to signal an output frame to request_frame() */
75 int got_output;
76 } CompandContext;
77
78 #define OFFSET(x) offsetof(CompandContext, x)
79 #define A AV_OPT_FLAG_AUDIO_PARAM
80
81 static const AVOption compand_options[] = {
82 { "attacks", "set time over which increase of volume is determined", OFFSET(attacks), AV_OPT_TYPE_STRING, { .str = "0.3" }, 0, 0, A },
83 { "decays", "set time over which decrease of volume is determined", OFFSET(decays), AV_OPT_TYPE_STRING, { .str = "0.8" }, 0, 0, A },
84 { "points", "set points of transfer function", OFFSET(points), AV_OPT_TYPE_STRING, { .str = "-70/-70|-60/-20" }, 0, 0, A },
85 { "soft-knee", "set soft-knee", OFFSET(curve_dB), AV_OPT_TYPE_DOUBLE, { .dbl = 0.01 }, 0.01, 900, A },
86 { "gain", "set output gain", OFFSET(gain_dB), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, -900, 900, A },
87 { "volume", "set initial volume", OFFSET(initial_volume), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, -900, 0, A },
88 { "delay", "set delay for samples before sending them to volume adjuster", OFFSET(delay), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, 0, 20, A },
89 { NULL }
90 };
91
92 static const AVClass compand_class = {
93 .class_name = "compand filter",
94 .item_name = av_default_item_name,
95 .option = compand_options,
96 .version = LIBAVUTIL_VERSION_INT,
97 };
98
99 static av_cold int init(AVFilterContext *ctx)
100 {
101 CompandContext *s = ctx->priv;
102 s->pts = AV_NOPTS_VALUE;
103 return 0;
104 }
105
106 static av_cold void uninit(AVFilterContext *ctx)
107 {
108 CompandContext *s = ctx->priv;
109
110 av_freep(&s->channels);
111 av_freep(&s->segments);
112 av_frame_free(&s->delay_frame);
113 }
114
115 static int query_formats(AVFilterContext *ctx)
116 {
117 AVFilterChannelLayouts *layouts;
118 AVFilterFormats *formats;
119 static const enum AVSampleFormat sample_fmts[] = {
120 AV_SAMPLE_FMT_FLTP,
121 AV_SAMPLE_FMT_NONE
122 };
123
124 layouts = ff_all_channel_layouts();
125 if (!layouts)
126 return AVERROR(ENOMEM);
127 ff_set_common_channel_layouts(ctx, layouts);
128
129 formats = ff_make_format_list(sample_fmts);
130 if (!formats)
131 return AVERROR(ENOMEM);
132 ff_set_common_formats(ctx, formats);
133
134 formats = ff_all_samplerates();
135 if (!formats)
136 return AVERROR(ENOMEM);
137 ff_set_common_samplerates(ctx, formats);
138
139 return 0;
140 }
141
142 static void count_items(char *item_str, int *nb_items)
143 {
144 char *p;
145
146 *nb_items = 1;
147 for (p = item_str; *p; p++) {
148 if (*p == '|')
149 (*nb_items)++;
150 }
151 }
152
153 static void update_volume(ChanParam *cp, float in)
154 {
155 float delta = in - cp->volume;
156
157 if (delta > 0.0)
158 cp->volume += delta * cp->attack;
159 else
160 cp->volume += delta * cp->decay;
161 }
162
163 static float get_volume(CompandContext *s, float in_lin)
164 {
165 CompandSegment *cs;
166 float in_log, out_log;
167 int i;
168
169 if (in_lin < s->in_min_lin)
170 return s->out_min_lin;
171
172 in_log = logf(in_lin);
173
174 for (i = 1; i < s->nb_segments; i++)
175 if (in_log <= s->segments[i].x)
176 break;
177 cs = &s->segments[i - 1];
178 in_log -= cs->x;
179 out_log = cs->y + in_log * (cs->a * in_log + cs->b);
180
181 return expf(out_log);
182 }
183
184 static int compand_nodelay(AVFilterContext *ctx, AVFrame *frame)
185 {
186 CompandContext *s = ctx->priv;
187 AVFilterLink *inlink = ctx->inputs[0];
188 const int channels = s->nb_channels;
189 const int nb_samples = frame->nb_samples;
190 AVFrame *out_frame;
191 int chan, i;
192 int err;
193
194 if (av_frame_is_writable(frame)) {
195 out_frame = frame;
196 } else {
197 out_frame = ff_get_audio_buffer(inlink, nb_samples);
198 if (!out_frame) {
199 av_frame_free(&frame);
200 return AVERROR(ENOMEM);
201 }
202 err = av_frame_copy_props(out_frame, frame);
203 if (err < 0) {
204 av_frame_free(&out_frame);
205 av_frame_free(&frame);
206 return err;
207 }
208 }
209
210 for (chan = 0; chan < channels; chan++) {
211 const float *src = (float *)frame->extended_data[chan];
212 float *dst = (float *)out_frame->extended_data[chan];
213 ChanParam *cp = &s->channels[chan];
214
215 for (i = 0; i < nb_samples; i++) {
216 update_volume(cp, fabs(src[i]));
217
218 dst[i] = av_clipf(src[i] * get_volume(s, cp->volume), -1.0f, 1.0f);
219 }
220 }
221
222 if (frame != out_frame)
223 av_frame_free(&frame);
224
225 return ff_filter_frame(ctx->outputs[0], out_frame);
226 }
227
228 #define MOD(a, b) (((a) >= (b)) ? (a) - (b) : (a))
229
230 static int compand_delay(AVFilterContext *ctx, AVFrame *frame)
231 {
232 CompandContext *s = ctx->priv;
233 AVFilterLink *inlink = ctx->inputs[0];
234 const int channels = s->nb_channels;
235 const int nb_samples = frame->nb_samples;
236 int chan, i, dindex = 0, oindex, count = 0;
237 AVFrame *out_frame = NULL;
238 int err;
239
240 if (s->pts == AV_NOPTS_VALUE) {
241 s->pts = (frame->pts == AV_NOPTS_VALUE) ? 0 : frame->pts;
242 }
243
244 for (chan = 0; chan < channels; chan++) {
245 AVFrame *delay_frame = s->delay_frame;
246 const float *src = (float *)frame->extended_data[chan];
247 float *dbuf = (float *)delay_frame->extended_data[chan];
248 ChanParam *cp = &s->channels[chan];
249 float *dst;
250
251 count = s->delay_count;
252 dindex = s->delay_index;
253 for (i = 0, oindex = 0; i < nb_samples; i++) {
254 const float in = src[i];
255 update_volume(cp, fabs(in));
256
257 if (count >= s->delay_samples) {
258 if (!out_frame) {
259 out_frame = ff_get_audio_buffer(inlink, nb_samples - i);
260 if (!out_frame) {
261 av_frame_free(&frame);
262 return AVERROR(ENOMEM);
263 }
264 err = av_frame_copy_props(out_frame, frame);
265 if (err < 0) {
266 av_frame_free(&out_frame);
267 av_frame_free(&frame);
268 return err;
269 }
270 out_frame->pts = s->pts;
271 s->pts += av_rescale_q(nb_samples - i,
272 (AVRational){ 1, inlink->sample_rate },
273 inlink->time_base);
274 }
275
276 dst = (float *)out_frame->extended_data[chan];
277 dst[oindex++] = av_clipf(dbuf[dindex] *
278 get_volume(s, cp->volume), -1.0f, 1.0f);
279 } else {
280 count++;
281 }
282
283 dbuf[dindex] = in;
284 dindex = MOD(dindex + 1, s->delay_samples);
285 }
286 }
287
288 s->delay_count = count;
289 s->delay_index = dindex;
290
291 av_frame_free(&frame);
292
293 if (out_frame) {
294 err = ff_filter_frame(ctx->outputs[0], out_frame);
295 if (err >= 0)
296 s->got_output = 1;
297 return err;
298 }
299
300 return 0;
301 }
302
303 static int compand_drain(AVFilterLink *outlink)
304 {
305 AVFilterContext *ctx = outlink->src;
306 CompandContext *s = ctx->priv;
307 const int channels = s->nb_channels;
308 AVFrame *frame = NULL;
309 int chan, i, dindex;
310
311 /* 2048 is to limit output frame size during drain */
312 frame = ff_get_audio_buffer(outlink, FFMIN(2048, s->delay_count));
313 if (!frame)
314 return AVERROR(ENOMEM);
315 frame->pts = s->pts;
316 s->pts += av_rescale_q(frame->nb_samples,
317 (AVRational){ 1, outlink->sample_rate }, outlink->time_base);
318
319 for (chan = 0; chan < channels; chan++) {
320 AVFrame *delay_frame = s->delay_frame;
321 float *dbuf = (float *)delay_frame->extended_data[chan];
322 float *dst = (float *)frame->extended_data[chan];
323 ChanParam *cp = &s->channels[chan];
324
325 dindex = s->delay_index;
326 for (i = 0; i < frame->nb_samples; i++) {
327 dst[i] = av_clipf(dbuf[dindex] * get_volume(s, cp->volume),
328 -1.0f, 1.0f);
329 dindex = MOD(dindex + 1, s->delay_samples);
330 }
331 }
332 s->delay_count -= frame->nb_samples;
333 s->delay_index = dindex;
334
335 return ff_filter_frame(outlink, frame);
336 }
337
338 static int config_output(AVFilterLink *outlink)
339 {
340 AVFilterContext *ctx = outlink->src;
341 CompandContext *s = ctx->priv;
342 const int sample_rate = outlink->sample_rate;
343 double radius = s->curve_dB * M_LN10 / 20.0;
344 const char *p;
345 const int channels =
346 av_get_channel_layout_nb_channels(outlink->channel_layout);
347 int nb_attacks, nb_decays, nb_points;
348 int new_nb_items, num;
349 int i;
350 int err;
351
352
353 count_items(s->attacks, &nb_attacks);
354 count_items(s->decays, &nb_decays);
355 count_items(s->points, &nb_points);
356
357 if (channels <= 0) {
358 av_log(ctx, AV_LOG_ERROR, "Invalid number of channels: %d\n", channels);
359 return AVERROR(EINVAL);
360 }
361
362 if (nb_attacks > channels || nb_decays > channels) {
363 av_log(ctx, AV_LOG_ERROR,
364 "Number of attacks/decays bigger than number of channels.\n");
365 return AVERROR(EINVAL);
366 }
367
368 uninit(ctx);
369
370 s->nb_channels = channels;
371 s->channels = av_mallocz_array(channels, sizeof(*s->channels));
372 s->nb_segments = (nb_points + 4) * 2;
373 s->segments = av_mallocz_array(s->nb_segments, sizeof(*s->segments));
374
375 if (!s->channels || !s->segments) {
376 uninit(ctx);
377 return AVERROR(ENOMEM);
378 }
379
380 p = s->attacks;
381 for (i = 0, new_nb_items = 0; i < nb_attacks; i++) {
382 char *tstr = av_get_token(&p, "|");
383 if (!tstr)
384 return AVERROR(ENOMEM);
385
386 new_nb_items += sscanf(tstr, "%f", &s->channels[i].attack) == 1;
387 av_freep(&tstr);
388 if (s->channels[i].attack < 0) {
389 uninit(ctx);
390 return AVERROR(EINVAL);
391 }
392 if (*p)
393 p++;
394 }
395 nb_attacks = new_nb_items;
396
397 p = s->decays;
398 for (i = 0, new_nb_items = 0; i < nb_decays; i++) {
399 char *tstr = av_get_token(&p, "|");
400 if (!tstr)
401 return AVERROR(ENOMEM);
402 new_nb_items += sscanf(tstr, "%f", &s->channels[i].decay) == 1;
403 av_freep(&tstr);
404 if (s->channels[i].decay < 0) {
405 uninit(ctx);
406 return AVERROR(EINVAL);
407 }
408 if (*p)
409 p++;
410 }
411 nb_decays = new_nb_items;
412
413 if (nb_attacks != nb_decays) {
414 av_log(ctx, AV_LOG_ERROR,
415 "Number of attacks %d differs from number of decays %d.\n",
416 nb_attacks, nb_decays);
417 uninit(ctx);
418 return AVERROR(EINVAL);
419 }
420
421 #define S(x) s->segments[2 * ((x) + 1)]
422 p = s->points;
423 for (i = 0, new_nb_items = 0; i < nb_points; i++) {
424 char *tstr = av_get_token(&p, "|");
425 if (!tstr)
426 return AVERROR(ENOMEM);
427
428 err = sscanf(tstr, "%f/%f", &S(i).x, &S(i).y);
429 av_freep(&tstr);
430 if (err != 2) {
431 av_log(ctx, AV_LOG_ERROR,
432 "Invalid and/or missing input/output value.\n");
433 uninit(ctx);
434 return AVERROR(EINVAL);
435 }
436 if (i && S(i - 1).x > S(i).x) {
437 av_log(ctx, AV_LOG_ERROR,
438 "Transfer function input values must be increasing.\n");
439 uninit(ctx);
440 return AVERROR(EINVAL);
441 }
442 S(i).y -= S(i).x;
443 av_log(ctx, AV_LOG_DEBUG, "%d: x=%f y=%f\n", i, S(i).x, S(i).y);
444 new_nb_items++;
445 if (*p)
446 p++;
447 }
448 num = new_nb_items;
449
450 /* Add 0,0 if necessary */
451 if (num == 0 || S(num - 1).x)
452 num++;
453
454 #undef S
455 #define S(x) s->segments[2 * (x)]
456 /* Add a tail off segment at the start */
457 S(0).x = S(1).x - 2 * s->curve_dB;
458 S(0).y = S(1).y;
459 num++;
460
461 /* Join adjacent colinear segments */
462 for (i = 2; i < num; i++) {
463 double g1 = (S(i - 1).y - S(i - 2).y) * (S(i - 0).x - S(i - 1).x);
464 double g2 = (S(i - 0).y - S(i - 1).y) * (S(i - 1).x - S(i - 2).x);
465 int j;
466
467 /* here we purposefully lose precision so that we can compare floats */
468 if (fabs(g1 - g2))
469 continue;
470 num--;
471 for (j = --i; j < num; j++)
472 S(j) = S(j + 1);
473 }
474
475 for (i = 0; !i || s->segments[i - 2].x; i += 2) {
476 s->segments[i].y += s->gain_dB;
477 s->segments[i].x *= M_LN10 / 20;
478 s->segments[i].y *= M_LN10 / 20;
479 }
480
481 #define L(x) s->segments[i - (x)]
482 for (i = 4; s->segments[i - 2].x; i += 2) {
483 double x, y, cx, cy, in1, in2, out1, out2, theta, len, r;
484
485 L(4).a = 0;
486 L(4).b = (L(2).y - L(4).y) / (L(2).x - L(4).x);
487
488 L(2).a = 0;
489 L(2).b = (L(0).y - L(2).y) / (L(0).x - L(2).x);
490
491 theta = atan2(L(2).y - L(4).y, L(2).x - L(4).x);
492 len = sqrt(pow(L(2).x - L(4).x, 2.) + pow(L(2).y - L(4).y, 2.));
493 r = FFMIN(radius, len);
494 L(3).x = L(2).x - r * cos(theta);
495 L(3).y = L(2).y - r * sin(theta);
496
497 theta = atan2(L(0).y - L(2).y, L(0).x - L(2).x);
498 len = sqrt(pow(L(0).x - L(2).x, 2.) + pow(L(0).y - L(2).y, 2.));
499 r = FFMIN(radius, len / 2);
500 x = L(2).x + r * cos(theta);
501 y = L(2).y + r * sin(theta);
502
503 cx = (L(3).x + L(2).x + x) / 3;
504 cy = (L(3).y + L(2).y + y) / 3;
505
506 L(2).x = x;
507 L(2).y = y;
508
509 in1 = cx - L(3).x;
510 out1 = cy - L(3).y;
511 in2 = L(2).x - L(3).x;
512 out2 = L(2).y - L(3).y;
513 L(3).a = (out2 / in2 - out1 / in1) / (in2 - in1);
514 L(3).b = out1 / in1 - L(3).a * in1;
515 }
516 L(3).x = 0;
517 L(3).y = L(2).y;
518
519 s->in_min_lin = exp(s->segments[1].x);
520 s->out_min_lin = exp(s->segments[1].y);
521
522 for (i = 0; i < channels; i++) {
523 ChanParam *cp = &s->channels[i];
524
525 if (cp->attack > 1.0 / sample_rate)
526 cp->attack = 1.0 - exp(-1.0 / (sample_rate * cp->attack));
527 else
528 cp->attack = 1.0;
529 if (cp->decay > 1.0 / sample_rate)
530 cp->decay = 1.0 - exp(-1.0 / (sample_rate * cp->decay));
531 else
532 cp->decay = 1.0;
533 cp->volume = pow(10.0, s->initial_volume / 20);
534 }
535
536 s->delay_samples = s->delay * sample_rate;
537 if (s->delay_samples <= 0) {
538 s->compand = compand_nodelay;
539 return 0;
540 }
541
542 s->delay_frame = av_frame_alloc();
543 if (!s->delay_frame) {
544 uninit(ctx);
545 return AVERROR(ENOMEM);
546 }
547
548 s->delay_frame->format = outlink->format;
549 s->delay_frame->nb_samples = s->delay_samples;
550 s->delay_frame->channel_layout = outlink->channel_layout;
551
552 err = av_frame_get_buffer(s->delay_frame, 32);
553 if (err)
554 return err;
555
556 s->compand = compand_delay;
557 return 0;
558 }
559
560 static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
561 {
562 AVFilterContext *ctx = inlink->dst;
563 CompandContext *s = ctx->priv;
564
565 return s->compand(ctx, frame);
566 }
567
568 static int request_frame(AVFilterLink *outlink)
569 {
570 AVFilterContext *ctx = outlink->src;
571 CompandContext *s = ctx->priv;
572 int ret = 0;
573
574 s->got_output = 0;
575 while (ret >= 0 && !s->got_output)
576 ret = ff_request_frame(ctx->inputs[0]);
577
578 if (ret == AVERROR_EOF && s->delay_count)
579 ret = compand_drain(outlink);
580
581 return ret;
582 }
583
584 static const AVFilterPad compand_inputs[] = {
585 {
586 .name = "default",
587 .type = AVMEDIA_TYPE_AUDIO,
588 .filter_frame = filter_frame,
589 },
590 { NULL }
591 };
592
593 static const AVFilterPad compand_outputs[] = {
594 {
595 .name = "default",
596 .request_frame = request_frame,
597 .config_props = config_output,
598 .type = AVMEDIA_TYPE_AUDIO,
599 },
600 { NULL }
601 };
602
603
604 AVFilter ff_af_compand = {
605 .name = "compand",
606 .description = NULL_IF_CONFIG_SMALL(
607 "Compress or expand audio dynamic range."),
608 .query_formats = query_formats,
609 .priv_size = sizeof(CompandContext),
610 .priv_class = &compand_class,
611 .init = init,
612 .uninit = uninit,
613 .inputs = compand_inputs,
614 .outputs = compand_outputs,
615 };