h264_metadata: Add option to delete filler data
[libav.git] / libavfilter / af_amix.c
CommitLineData
c7448c18
JR
1/*
2 * Audio Mix Filter
3 * Copyright (c) 2012 Justin Ruggles <justin.ruggles@gmail.com>
4 *
5 * This file is part of Libav.
6 *
7 * Libav is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * Libav is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with Libav; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22/**
23 * @file
24 * Audio Mix Filter
25 *
26 * Mixes audio from multiple sources into a single output. The channel layout,
27 * sample rate, and sample format will be the same for all inputs and the
28 * output.
29 */
30
093804a9 31#include "libavutil/attributes.h"
c7448c18
JR
32#include "libavutil/audio_fifo.h"
33#include "libavutil/avassert.h"
34#include "libavutil/avstring.h"
a903f8f0 35#include "libavutil/channel_layout.h"
1d9c2dc8 36#include "libavutil/common.h"
f0ece49e 37#include "libavutil/float_dsp.h"
c7448c18
JR
38#include "libavutil/mathematics.h"
39#include "libavutil/opt.h"
40#include "libavutil/samplefmt.h"
41
42#include "audio.h"
43#include "avfilter.h"
44#include "formats.h"
45#include "internal.h"
46
47#define INPUT_OFF 0 /**< input has reached EOF */
48#define INPUT_ON 1 /**< input is active */
49#define INPUT_INACTIVE 2 /**< input is on, but is currently inactive */
50
51#define DURATION_LONGEST 0
52#define DURATION_SHORTEST 1
53#define DURATION_FIRST 2
54
55
56typedef struct FrameInfo {
57 int nb_samples;
58 int64_t pts;
59 struct FrameInfo *next;
60} FrameInfo;
61
62/**
63 * Linked list used to store timestamps and frame sizes of all frames in the
64 * FIFO for the first input.
65 *
66 * This is needed to keep timestamps synchronized for the case where multiple
67 * input frames are pushed to the filter for processing before a frame is
68 * requested by the output link.
69 */
70typedef struct FrameList {
71 int nb_frames;
72 int nb_samples;
73 FrameInfo *list;
74 FrameInfo *end;
75} FrameList;
76
77static void frame_list_clear(FrameList *frame_list)
78{
79 if (frame_list) {
80 while (frame_list->list) {
81 FrameInfo *info = frame_list->list;
82 frame_list->list = info->next;
83 av_free(info);
84 }
85 frame_list->nb_frames = 0;
86 frame_list->nb_samples = 0;
87 frame_list->end = NULL;
88 }
89}
90
91static int frame_list_next_frame_size(FrameList *frame_list)
92{
93 if (!frame_list->list)
94 return 0;
95 return frame_list->list->nb_samples;
96}
97
98static int64_t frame_list_next_pts(FrameList *frame_list)
99{
100 if (!frame_list->list)
101 return AV_NOPTS_VALUE;
102 return frame_list->list->pts;
103}
104
105static void frame_list_remove_samples(FrameList *frame_list, int nb_samples)
106{
107 if (nb_samples >= frame_list->nb_samples) {
108 frame_list_clear(frame_list);
109 } else {
110 int samples = nb_samples;
111 while (samples > 0) {
112 FrameInfo *info = frame_list->list;
113 av_assert0(info != NULL);
114 if (info->nb_samples <= samples) {
115 samples -= info->nb_samples;
116 frame_list->list = info->next;
117 if (!frame_list->list)
118 frame_list->end = NULL;
119 frame_list->nb_frames--;
120 frame_list->nb_samples -= info->nb_samples;
121 av_free(info);
122 } else {
123 info->nb_samples -= samples;
124 info->pts += samples;
125 frame_list->nb_samples -= samples;
126 samples = 0;
127 }
128 }
129 }
130}
131
132static int frame_list_add_frame(FrameList *frame_list, int nb_samples, int64_t pts)
133{
134 FrameInfo *info = av_malloc(sizeof(*info));
135 if (!info)
136 return AVERROR(ENOMEM);
137 info->nb_samples = nb_samples;
138 info->pts = pts;
139 info->next = NULL;
140
141 if (!frame_list->list) {
142 frame_list->list = info;
143 frame_list->end = info;
144 } else {
145 av_assert0(frame_list->end != NULL);
146 frame_list->end->next = info;
147 frame_list->end = info;
148 }
149 frame_list->nb_frames++;
150 frame_list->nb_samples += nb_samples;
151
152 return 0;
153}
154
155
156typedef struct MixContext {
157 const AVClass *class; /**< class for AVOptions */
f0ece49e 158 AVFloatDSPContext fdsp;
c7448c18
JR
159
160 int nb_inputs; /**< number of inputs */
161 int active_inputs; /**< number of input currently active */
162 int duration_mode; /**< mode for determining duration */
163 float dropout_transition; /**< transition time when an input drops out */
164
165 int nb_channels; /**< number of channels */
166 int sample_rate; /**< sample rate */
ae46fbee 167 int planar;
c7448c18
JR
168 AVAudioFifo **fifos; /**< audio fifo for each input */
169 uint8_t *input_state; /**< current state of each input */
170 float *input_scale; /**< mixing scale factor for each input */
171 float scale_norm; /**< normalization factor for all inputs */
172 int64_t next_pts; /**< calculated pts for next output frame */
173 FrameList *frame_list; /**< list of frame info for the first input */
174} MixContext;
175
176#define OFFSET(x) offsetof(MixContext, x)
177#define A AV_OPT_FLAG_AUDIO_PARAM
178static const AVOption options[] = {
179 { "inputs", "Number of inputs.",
e6153f17 180 OFFSET(nb_inputs), AV_OPT_TYPE_INT, { .i64 = 2 }, 1, 32, A },
c7448c18 181 { "duration", "How to determine the end-of-stream.",
e6153f17 182 OFFSET(duration_mode), AV_OPT_TYPE_INT, { .i64 = DURATION_LONGEST }, 0, 2, A, "duration" },
124134e4
MS
183 { "longest", "Duration of longest input.", 0, AV_OPT_TYPE_CONST, { .i64 = DURATION_LONGEST }, INT_MIN, INT_MAX, A, "duration" },
184 { "shortest", "Duration of shortest input.", 0, AV_OPT_TYPE_CONST, { .i64 = DURATION_SHORTEST }, INT_MIN, INT_MAX, A, "duration" },
185 { "first", "Duration of first input.", 0, AV_OPT_TYPE_CONST, { .i64 = DURATION_FIRST }, INT_MIN, INT_MAX, A, "duration" },
c7448c18
JR
186 { "dropout_transition", "Transition time, in seconds, for volume "
187 "renormalization when an input stream ends.",
c7b610aa 188 OFFSET(dropout_transition), AV_OPT_TYPE_FLOAT, { .dbl = 2.0 }, 0, INT_MAX, A },
c7448c18
JR
189 { NULL },
190};
191
192static const AVClass amix_class = {
193 .class_name = "amix filter",
194 .item_name = av_default_item_name,
195 .option = options,
196 .version = LIBAVUTIL_VERSION_INT,
197};
198
199
200/**
201 * Update the scaling factors to apply to each input during mixing.
202 *
203 * This balances the full volume range between active inputs and handles
204 * volume transitions when EOF is encountered on an input but mixing continues
205 * with the remaining inputs.
206 */
207static void calculate_scales(MixContext *s, int nb_samples)
208{
209 int i;
210
211 if (s->scale_norm > s->active_inputs) {
212 s->scale_norm -= nb_samples / (s->dropout_transition * s->sample_rate);
213 s->scale_norm = FFMAX(s->scale_norm, s->active_inputs);
214 }
215
216 for (i = 0; i < s->nb_inputs; i++) {
217 if (s->input_state[i] == INPUT_ON)
218 s->input_scale[i] = 1.0f / s->scale_norm;
219 else
220 s->input_scale[i] = 0.0f;
221 }
222}
223
224static int config_output(AVFilterLink *outlink)
225{
226 AVFilterContext *ctx = outlink->src;
227 MixContext *s = ctx->priv;
228 int i;
229 char buf[64];
230
ae46fbee 231 s->planar = av_sample_fmt_is_planar(outlink->format);
c7448c18
JR
232 s->sample_rate = outlink->sample_rate;
233 outlink->time_base = (AVRational){ 1, outlink->sample_rate };
234 s->next_pts = AV_NOPTS_VALUE;
235
236 s->frame_list = av_mallocz(sizeof(*s->frame_list));
237 if (!s->frame_list)
238 return AVERROR(ENOMEM);
239
240 s->fifos = av_mallocz(s->nb_inputs * sizeof(*s->fifos));
241 if (!s->fifos)
242 return AVERROR(ENOMEM);
243
244 s->nb_channels = av_get_channel_layout_nb_channels(outlink->channel_layout);
245 for (i = 0; i < s->nb_inputs; i++) {
246 s->fifos[i] = av_audio_fifo_alloc(outlink->format, s->nb_channels, 1024);
247 if (!s->fifos[i])
248 return AVERROR(ENOMEM);
249 }
250
251 s->input_state = av_malloc(s->nb_inputs);
252 if (!s->input_state)
253 return AVERROR(ENOMEM);
254 memset(s->input_state, INPUT_ON, s->nb_inputs);
255 s->active_inputs = s->nb_inputs;
256
257 s->input_scale = av_mallocz(s->nb_inputs * sizeof(*s->input_scale));
258 if (!s->input_scale)
259 return AVERROR(ENOMEM);
260 s->scale_norm = s->active_inputs;
261 calculate_scales(s, 0);
262
263 av_get_channel_layout_string(buf, sizeof(buf), -1, outlink->channel_layout);
264
265 av_log(ctx, AV_LOG_VERBOSE,
422008ac 266 "inputs:%d fmt:%s srate:%d cl:%s\n", s->nb_inputs,
c7448c18
JR
267 av_get_sample_fmt_name(outlink->format), outlink->sample_rate, buf);
268
269 return 0;
270}
271
c7448c18
JR
272/**
273 * Read samples from the input FIFOs, mix, and write to the output link.
274 */
275static int output_frame(AVFilterLink *outlink, int nb_samples)
276{
277 AVFilterContext *ctx = outlink->src;
278 MixContext *s = ctx->priv;
7e350379 279 AVFrame *out_buf, *in_buf;
c7448c18
JR
280 int i;
281
282 calculate_scales(s, nb_samples);
283
7e350379 284 out_buf = ff_get_audio_buffer(outlink, nb_samples);
c7448c18
JR
285 if (!out_buf)
286 return AVERROR(ENOMEM);
287
7e350379 288 in_buf = ff_get_audio_buffer(outlink, nb_samples);
8501c098 289 if (!in_buf) {
7e350379 290 av_frame_free(&out_buf);
c7448c18 291 return AVERROR(ENOMEM);
8501c098 292 }
c7448c18
JR
293
294 for (i = 0; i < s->nb_inputs; i++) {
295 if (s->input_state[i] == INPUT_ON) {
ae46fbee
JR
296 int planes, plane_size, p;
297
c7448c18
JR
298 av_audio_fifo_read(s->fifos[i], (void **)in_buf->extended_data,
299 nb_samples);
ae46fbee
JR
300
301 planes = s->planar ? s->nb_channels : 1;
302 plane_size = nb_samples * (s->planar ? 1 : s->nb_channels);
303 plane_size = FFALIGN(plane_size, 16);
304
305 for (p = 0; p < planes; p++) {
306 s->fdsp.vector_fmac_scalar((float *)out_buf->extended_data[p],
307 (float *) in_buf->extended_data[p],
308 s->input_scale[i], plane_size);
309 }
c7448c18
JR
310 }
311 }
7e350379 312 av_frame_free(&in_buf);
c7448c18
JR
313
314 out_buf->pts = s->next_pts;
315 if (s->next_pts != AV_NOPTS_VALUE)
316 s->next_pts += nb_samples;
317
565e4993 318 return ff_filter_frame(outlink, out_buf);
c7448c18
JR
319}
320
321/**
322 * Returns the smallest number of samples available in the input FIFOs other
323 * than that of the first input.
324 */
325static int get_available_samples(MixContext *s)
326{
327 int i;
328 int available_samples = INT_MAX;
329
330 av_assert0(s->nb_inputs > 1);
331
332 for (i = 1; i < s->nb_inputs; i++) {
333 int nb_samples;
334 if (s->input_state[i] == INPUT_OFF)
335 continue;
336 nb_samples = av_audio_fifo_size(s->fifos[i]);
337 available_samples = FFMIN(available_samples, nb_samples);
338 }
339 if (available_samples == INT_MAX)
340 return 0;
341 return available_samples;
342}
343
344/**
345 * Requests a frame, if needed, from each input link other than the first.
346 */
347static int request_samples(AVFilterContext *ctx, int min_samples)
348{
349 MixContext *s = ctx->priv;
350 int i, ret;
351
352 av_assert0(s->nb_inputs > 1);
353
354 for (i = 1; i < s->nb_inputs; i++) {
355 ret = 0;
356 if (s->input_state[i] == INPUT_OFF)
357 continue;
358 while (!ret && av_audio_fifo_size(s->fifos[i]) < min_samples)
803391f7 359 ret = ff_request_frame(ctx->inputs[i]);
c7448c18
JR
360 if (ret == AVERROR_EOF) {
361 if (av_audio_fifo_size(s->fifos[i]) == 0) {
362 s->input_state[i] = INPUT_OFF;
363 continue;
364 }
428b3698 365 } else if (ret < 0)
c7448c18
JR
366 return ret;
367 }
368 return 0;
369}
370
371/**
372 * Calculates the number of active inputs and determines EOF based on the
373 * duration option.
374 *
375 * @return 0 if mixing should continue, or AVERROR_EOF if mixing should stop.
376 */
377static int calc_active_inputs(MixContext *s)
378{
379 int i;
380 int active_inputs = 0;
381 for (i = 0; i < s->nb_inputs; i++)
382 active_inputs += !!(s->input_state[i] != INPUT_OFF);
383 s->active_inputs = active_inputs;
384
385 if (!active_inputs ||
386 (s->duration_mode == DURATION_FIRST && s->input_state[0] == INPUT_OFF) ||
387 (s->duration_mode == DURATION_SHORTEST && active_inputs != s->nb_inputs))
388 return AVERROR_EOF;
389 return 0;
390}
391
392static int request_frame(AVFilterLink *outlink)
393{
394 AVFilterContext *ctx = outlink->src;
395 MixContext *s = ctx->priv;
396 int ret;
397 int wanted_samples, available_samples;
398
c7bd556d
JR
399 ret = calc_active_inputs(s);
400 if (ret < 0)
401 return ret;
402
c7448c18
JR
403 if (s->input_state[0] == INPUT_OFF) {
404 ret = request_samples(ctx, 1);
405 if (ret < 0)
406 return ret;
407
408 ret = calc_active_inputs(s);
409 if (ret < 0)
410 return ret;
411
412 available_samples = get_available_samples(s);
413 if (!available_samples)
54bf88e6 414 return AVERROR(EAGAIN);
c7448c18
JR
415
416 return output_frame(outlink, available_samples);
417 }
418
419 if (s->frame_list->nb_frames == 0) {
803391f7 420 ret = ff_request_frame(ctx->inputs[0]);
c7448c18
JR
421 if (ret == AVERROR_EOF) {
422 s->input_state[0] = INPUT_OFF;
423 if (s->nb_inputs == 1)
424 return AVERROR_EOF;
425 else
426 return AVERROR(EAGAIN);
428b3698 427 } else if (ret < 0)
c7448c18
JR
428 return ret;
429 }
430 av_assert0(s->frame_list->nb_frames > 0);
431
432 wanted_samples = frame_list_next_frame_size(s->frame_list);
c7448c18
JR
433
434 if (s->active_inputs > 1) {
c7bd556d
JR
435 ret = request_samples(ctx, wanted_samples);
436 if (ret < 0)
437 return ret;
438
439 ret = calc_active_inputs(s);
440 if (ret < 0)
441 return ret;
b7558ac2 442 }
c7bd556d 443
b7558ac2 444 if (s->active_inputs > 1) {
c7448c18
JR
445 available_samples = get_available_samples(s);
446 if (!available_samples)
54bf88e6 447 return AVERROR(EAGAIN);
c7448c18
JR
448 available_samples = FFMIN(available_samples, wanted_samples);
449 } else {
450 available_samples = wanted_samples;
451 }
452
453 s->next_pts = frame_list_next_pts(s->frame_list);
454 frame_list_remove_samples(s->frame_list, available_samples);
455
456 return output_frame(outlink, available_samples);
457}
458
7e350379 459static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
c7448c18
JR
460{
461 AVFilterContext *ctx = inlink->dst;
462 MixContext *s = ctx->priv;
463 AVFilterLink *outlink = ctx->outputs[0];
cd991462 464 int i, ret = 0;
c7448c18 465
9baeff95 466 for (i = 0; i < ctx->nb_inputs; i++)
c7448c18
JR
467 if (ctx->inputs[i] == inlink)
468 break;
9baeff95 469 if (i >= ctx->nb_inputs) {
c7448c18 470 av_log(ctx, AV_LOG_ERROR, "unknown input link\n");
cd991462
AK
471 ret = AVERROR(EINVAL);
472 goto fail;
c7448c18
JR
473 }
474
475 if (i == 0) {
476 int64_t pts = av_rescale_q(buf->pts, inlink->time_base,
477 outlink->time_base);
7e350379 478 ret = frame_list_add_frame(s->frame_list, buf->nb_samples, pts);
cd991462
AK
479 if (ret < 0)
480 goto fail;
c7448c18
JR
481 }
482
cd991462 483 ret = av_audio_fifo_write(s->fifos[i], (void **)buf->extended_data,
7e350379 484 buf->nb_samples);
c7448c18 485
cd991462 486fail:
7e350379 487 av_frame_free(&buf);
cd991462
AK
488
489 return ret;
c7448c18
JR
490}
491
093804a9 492static av_cold int init(AVFilterContext *ctx)
c7448c18
JR
493{
494 MixContext *s = ctx->priv;
ac20e3ab 495 int i;
c7448c18
JR
496
497 for (i = 0; i < s->nb_inputs; i++) {
498 char name[32];
499 AVFilterPad pad = { 0 };
500
501 snprintf(name, sizeof(name), "input%d", i);
502 pad.type = AVMEDIA_TYPE_AUDIO;
503 pad.name = av_strdup(name);
565e4993 504 pad.filter_frame = filter_frame;
c7448c18 505
fa417fcd 506 ff_insert_inpad(ctx, i, &pad);
c7448c18
JR
507 }
508
f0ece49e
JR
509 avpriv_float_dsp_init(&s->fdsp, 0);
510
c7448c18
JR
511 return 0;
512}
513
093804a9 514static av_cold void uninit(AVFilterContext *ctx)
c7448c18
JR
515{
516 int i;
517 MixContext *s = ctx->priv;
518
519 if (s->fifos) {
520 for (i = 0; i < s->nb_inputs; i++)
521 av_audio_fifo_free(s->fifos[i]);
522 av_freep(&s->fifos);
523 }
524 frame_list_clear(s->frame_list);
525 av_freep(&s->frame_list);
526 av_freep(&s->input_state);
527 av_freep(&s->input_scale);
528
9baeff95 529 for (i = 0; i < ctx->nb_inputs; i++)
c7448c18
JR
530 av_freep(&ctx->input_pads[i].name);
531}
532
533static int query_formats(AVFilterContext *ctx)
534{
535 AVFilterFormats *formats = NULL;
b74a1da4 536 ff_add_format(&formats, AV_SAMPLE_FMT_FLT);
ae46fbee 537 ff_add_format(&formats, AV_SAMPLE_FMT_FLTP);
b74a1da4 538 ff_set_common_formats(ctx, formats);
c7448c18
JR
539 ff_set_common_channel_layouts(ctx, ff_all_channel_layouts());
540 ff_set_common_samplerates(ctx, ff_all_samplerates());
541 return 0;
542}
543
568c70e7
MR
544static const AVFilterPad avfilter_af_amix_outputs[] = {
545 {
546 .name = "default",
547 .type = AVMEDIA_TYPE_AUDIO,
548 .config_props = config_output,
549 .request_frame = request_frame
550 },
551 { NULL }
552};
553
cd43ca04 554AVFilter ff_af_amix = {
c7448c18
JR
555 .name = "amix",
556 .description = NULL_IF_CONFIG_SMALL("Audio mixing."),
557 .priv_size = sizeof(MixContext),
ac20e3ab 558 .priv_class = &amix_class,
c7448c18
JR
559
560 .init = init,
561 .uninit = uninit,
562 .query_formats = query_formats,
563
1fce361d 564 .inputs = NULL,
568c70e7 565 .outputs = avfilter_af_amix_outputs,
7cdd737b
AK
566
567 .flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
c7448c18 568};