aac: Handle HE-AACv2 when sniffing a channel order.
[libav.git] / libavcodec / aacdec.c
1 /*
2 * AAC decoder
3 * Copyright (c) 2005-2006 Oded Shimon ( ods15 ods15 dyndns org )
4 * Copyright (c) 2006-2007 Maxim Gavrilov ( maxim.gavrilov gmail com )
5 *
6 * AAC LATM decoder
7 * Copyright (c) 2008-2010 Paul Kendall <paul@kcbbs.gen.nz>
8 * Copyright (c) 2010 Janne Grunau <janne-libav@jannau.net>
9 *
10 * This file is part of Libav.
11 *
12 * Libav is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU Lesser General Public
14 * License as published by the Free Software Foundation; either
15 * version 2.1 of the License, or (at your option) any later version.
16 *
17 * Libav is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * Lesser General Public License for more details.
21 *
22 * You should have received a copy of the GNU Lesser General Public
23 * License along with Libav; if not, write to the Free Software
24 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 */
26
27 /**
28 * @file
29 * AAC decoder
30 * @author Oded Shimon ( ods15 ods15 dyndns org )
31 * @author Maxim Gavrilov ( maxim.gavrilov gmail com )
32 */
33
34 /*
35 * supported tools
36 *
37 * Support? Name
38 * N (code in SoC repo) gain control
39 * Y block switching
40 * Y window shapes - standard
41 * N window shapes - Low Delay
42 * Y filterbank - standard
43 * N (code in SoC repo) filterbank - Scalable Sample Rate
44 * Y Temporal Noise Shaping
45 * Y Long Term Prediction
46 * Y intensity stereo
47 * Y channel coupling
48 * Y frequency domain prediction
49 * Y Perceptual Noise Substitution
50 * Y Mid/Side stereo
51 * N Scalable Inverse AAC Quantization
52 * N Frequency Selective Switch
53 * N upsampling filter
54 * Y quantization & coding - AAC
55 * N quantization & coding - TwinVQ
56 * N quantization & coding - BSAC
57 * N AAC Error Resilience tools
58 * N Error Resilience payload syntax
59 * N Error Protection tool
60 * N CELP
61 * N Silence Compression
62 * N HVXC
63 * N HVXC 4kbits/s VR
64 * N Structured Audio tools
65 * N Structured Audio Sample Bank Format
66 * N MIDI
67 * N Harmonic and Individual Lines plus Noise
68 * N Text-To-Speech Interface
69 * Y Spectral Band Replication
70 * Y (not in this code) Layer-1
71 * Y (not in this code) Layer-2
72 * Y (not in this code) Layer-3
73 * N SinuSoidal Coding (Transient, Sinusoid, Noise)
74 * Y Parametric Stereo
75 * N Direct Stream Transfer
76 *
77 * Note: - HE AAC v1 comprises LC AAC with Spectral Band Replication.
78 * - HE AAC v2 comprises LC AAC with Spectral Band Replication and
79 Parametric Stereo.
80 */
81
82
83 #include "avcodec.h"
84 #include "internal.h"
85 #include "get_bits.h"
86 #include "dsputil.h"
87 #include "fft.h"
88 #include "fmtconvert.h"
89 #include "lpc.h"
90 #include "kbdwin.h"
91 #include "sinewin.h"
92
93 #include "aac.h"
94 #include "aactab.h"
95 #include "aacdectab.h"
96 #include "cbrt_tablegen.h"
97 #include "sbr.h"
98 #include "aacsbr.h"
99 #include "mpeg4audio.h"
100 #include "aacadtsdec.h"
101 #include "libavutil/intfloat.h"
102
103 #include <assert.h>
104 #include <errno.h>
105 #include <math.h>
106 #include <string.h>
107
108 #if ARCH_ARM
109 # include "arm/aac.h"
110 #endif
111
112 static VLC vlc_scalefactors;
113 static VLC vlc_spectral[11];
114
115 static const char overread_err[] = "Input buffer exhausted before END element found\n";
116
117 static int count_channels(uint8_t (*layout)[3], int tags)
118 {
119 int i, sum = 0;
120 for (i = 0; i < tags; i++) {
121 int syn_ele = layout[i][0];
122 int pos = layout[i][2];
123 sum += (1 + (syn_ele == TYPE_CPE)) *
124 (pos != AAC_CHANNEL_OFF && pos != AAC_CHANNEL_CC);
125 }
126 return sum;
127 }
128
129 /**
130 * Check for the channel element in the current channel position configuration.
131 * If it exists, make sure the appropriate element is allocated and map the
132 * channel order to match the internal Libav channel layout.
133 *
134 * @param che_pos current channel position configuration
135 * @param type channel element type
136 * @param id channel element id
137 * @param channels count of the number of channels in the configuration
138 *
139 * @return Returns error status. 0 - OK, !0 - error
140 */
141 static av_cold int che_configure(AACContext *ac,
142 enum ChannelPosition che_pos,
143 int type, int id, int *channels)
144 {
145 if (che_pos) {
146 if (!ac->che[type][id]) {
147 if (!(ac->che[type][id] = av_mallocz(sizeof(ChannelElement))))
148 return AVERROR(ENOMEM);
149 ff_aac_sbr_ctx_init(ac, &ac->che[type][id]->sbr);
150 }
151 if (type != TYPE_CCE) {
152 ac->output_data[(*channels)++] = ac->che[type][id]->ch[0].ret;
153 if (type == TYPE_CPE ||
154 (type == TYPE_SCE && ac->oc[1].m4ac.ps == 1)) {
155 ac->output_data[(*channels)++] = ac->che[type][id]->ch[1].ret;
156 }
157 }
158 } else {
159 if (ac->che[type][id])
160 ff_aac_sbr_ctx_close(&ac->che[type][id]->sbr);
161 av_freep(&ac->che[type][id]);
162 }
163 return 0;
164 }
165
166 struct elem_to_channel {
167 uint64_t av_position;
168 uint8_t syn_ele;
169 uint8_t elem_id;
170 uint8_t aac_position;
171 };
172
173 static int assign_pair(struct elem_to_channel e2c_vec[MAX_ELEM_ID],
174 uint8_t (*layout_map)[3], int offset, int tags, uint64_t left,
175 uint64_t right, int pos)
176 {
177 if (layout_map[offset][0] == TYPE_CPE) {
178 e2c_vec[offset] = (struct elem_to_channel) {
179 .av_position = left | right, .syn_ele = TYPE_CPE,
180 .elem_id = layout_map[offset ][1], .aac_position = pos };
181 return 1;
182 } else {
183 e2c_vec[offset] = (struct elem_to_channel) {
184 .av_position = left, .syn_ele = TYPE_SCE,
185 .elem_id = layout_map[offset ][1], .aac_position = pos };
186 e2c_vec[offset + 1] = (struct elem_to_channel) {
187 .av_position = right, .syn_ele = TYPE_SCE,
188 .elem_id = layout_map[offset + 1][1], .aac_position = pos };
189 return 2;
190 }
191 }
192
193 static int count_paired_channels(uint8_t (*layout_map)[3], int tags, int pos, int *current) {
194 int num_pos_channels = 0;
195 int first_cpe = 0;
196 int sce_parity = 0;
197 int i;
198 for (i = *current; i < tags; i++) {
199 if (layout_map[i][2] != pos)
200 break;
201 if (layout_map[i][0] == TYPE_CPE) {
202 if (sce_parity) {
203 if (pos == AAC_CHANNEL_FRONT && !first_cpe) {
204 sce_parity = 0;
205 } else {
206 return -1;
207 }
208 }
209 num_pos_channels += 2;
210 first_cpe = 1;
211 } else {
212 num_pos_channels++;
213 sce_parity ^= 1;
214 }
215 }
216 if (sce_parity &&
217 ((pos == AAC_CHANNEL_FRONT && first_cpe) || pos == AAC_CHANNEL_SIDE))
218 return -1;
219 *current = i;
220 return num_pos_channels;
221 }
222
223 static uint64_t sniff_channel_order(uint8_t (*layout_map)[3], int tags)
224 {
225 int i, n, total_non_cc_elements;
226 struct elem_to_channel e2c_vec[4*MAX_ELEM_ID] = {{ 0 }};
227 int num_front_channels, num_side_channels, num_back_channels;
228 uint64_t layout;
229
230 if (FF_ARRAY_ELEMS(e2c_vec) < tags)
231 return 0;
232
233 i = 0;
234 num_front_channels =
235 count_paired_channels(layout_map, tags, AAC_CHANNEL_FRONT, &i);
236 if (num_front_channels < 0)
237 return 0;
238 num_side_channels =
239 count_paired_channels(layout_map, tags, AAC_CHANNEL_SIDE, &i);
240 if (num_side_channels < 0)
241 return 0;
242 num_back_channels =
243 count_paired_channels(layout_map, tags, AAC_CHANNEL_BACK, &i);
244 if (num_back_channels < 0)
245 return 0;
246
247 i = 0;
248 if (num_front_channels & 1) {
249 e2c_vec[i] = (struct elem_to_channel) {
250 .av_position = AV_CH_FRONT_CENTER, .syn_ele = TYPE_SCE,
251 .elem_id = layout_map[i][1], .aac_position = AAC_CHANNEL_FRONT };
252 i++;
253 num_front_channels--;
254 }
255 if (num_front_channels >= 4) {
256 i += assign_pair(e2c_vec, layout_map, i, tags,
257 AV_CH_FRONT_LEFT_OF_CENTER,
258 AV_CH_FRONT_RIGHT_OF_CENTER,
259 AAC_CHANNEL_FRONT);
260 num_front_channels -= 2;
261 }
262 if (num_front_channels >= 2) {
263 i += assign_pair(e2c_vec, layout_map, i, tags,
264 AV_CH_FRONT_LEFT,
265 AV_CH_FRONT_RIGHT,
266 AAC_CHANNEL_FRONT);
267 num_front_channels -= 2;
268 }
269 while (num_front_channels >= 2) {
270 i += assign_pair(e2c_vec, layout_map, i, tags,
271 UINT64_MAX,
272 UINT64_MAX,
273 AAC_CHANNEL_FRONT);
274 num_front_channels -= 2;
275 }
276
277 if (num_side_channels >= 2) {
278 i += assign_pair(e2c_vec, layout_map, i, tags,
279 AV_CH_SIDE_LEFT,
280 AV_CH_SIDE_RIGHT,
281 AAC_CHANNEL_FRONT);
282 num_side_channels -= 2;
283 }
284 while (num_side_channels >= 2) {
285 i += assign_pair(e2c_vec, layout_map, i, tags,
286 UINT64_MAX,
287 UINT64_MAX,
288 AAC_CHANNEL_SIDE);
289 num_side_channels -= 2;
290 }
291
292 while (num_back_channels >= 4) {
293 i += assign_pair(e2c_vec, layout_map, i, tags,
294 UINT64_MAX,
295 UINT64_MAX,
296 AAC_CHANNEL_BACK);
297 num_back_channels -= 2;
298 }
299 if (num_back_channels >= 2) {
300 i += assign_pair(e2c_vec, layout_map, i, tags,
301 AV_CH_BACK_LEFT,
302 AV_CH_BACK_RIGHT,
303 AAC_CHANNEL_BACK);
304 num_back_channels -= 2;
305 }
306 if (num_back_channels) {
307 e2c_vec[i] = (struct elem_to_channel) {
308 .av_position = AV_CH_BACK_CENTER, .syn_ele = TYPE_SCE,
309 .elem_id = layout_map[i][1], .aac_position = AAC_CHANNEL_BACK };
310 i++;
311 num_back_channels--;
312 }
313
314 if (i < tags && layout_map[i][2] == AAC_CHANNEL_LFE) {
315 e2c_vec[i] = (struct elem_to_channel) {
316 .av_position = AV_CH_LOW_FREQUENCY, .syn_ele = TYPE_LFE,
317 .elem_id = layout_map[i][1], .aac_position = AAC_CHANNEL_LFE };
318 i++;
319 }
320 while (i < tags && layout_map[i][2] == AAC_CHANNEL_LFE) {
321 e2c_vec[i] = (struct elem_to_channel) {
322 .av_position = UINT64_MAX, .syn_ele = TYPE_LFE,
323 .elem_id = layout_map[i][1], .aac_position = AAC_CHANNEL_LFE };
324 i++;
325 }
326
327 // Must choose a stable sort
328 total_non_cc_elements = n = i;
329 do {
330 int next_n = 0;
331 for (i = 1; i < n; i++) {
332 if (e2c_vec[i-1].av_position > e2c_vec[i].av_position) {
333 FFSWAP(struct elem_to_channel, e2c_vec[i-1], e2c_vec[i]);
334 next_n = i;
335 }
336 }
337 n = next_n;
338 } while (n > 0);
339
340 layout = 0;
341 for (i = 0; i < total_non_cc_elements; i++) {
342 layout_map[i][0] = e2c_vec[i].syn_ele;
343 layout_map[i][1] = e2c_vec[i].elem_id;
344 layout_map[i][2] = e2c_vec[i].aac_position;
345 if (e2c_vec[i].av_position != UINT64_MAX) {
346 layout |= e2c_vec[i].av_position;
347 }
348 }
349
350 return layout;
351 }
352
353 /**
354 * Save current output configuration if and only if it has been locked.
355 */
356 static void push_output_configuration(AACContext *ac) {
357 if (ac->oc[1].status == OC_LOCKED) {
358 ac->oc[0] = ac->oc[1];
359 }
360 ac->oc[1].status = OC_NONE;
361 }
362
363 /**
364 * Restore the previous output configuration if and only if the current
365 * configuration is unlocked.
366 */
367 static void pop_output_configuration(AACContext *ac) {
368 if (ac->oc[1].status != OC_LOCKED) {
369 ac->oc[1] = ac->oc[0];
370 ac->avctx->channels = ac->oc[1].channels;
371 ac->avctx->channel_layout = ac->oc[1].channels;
372 }
373 }
374
375 /**
376 * Configure output channel order based on the current program configuration element.
377 *
378 * @return Returns error status. 0 - OK, !0 - error
379 */
380 static int output_configure(AACContext *ac,
381 uint8_t layout_map[MAX_ELEM_ID*4][3], int tags,
382 int channel_config, enum OCStatus oc_type)
383 {
384 AVCodecContext *avctx = ac->avctx;
385 int i, channels = 0, ret;
386 uint64_t layout = 0;
387
388 if (ac->oc[1].layout_map != layout_map) {
389 memcpy(ac->oc[1].layout_map, layout_map, tags * sizeof(layout_map[0]));
390 ac->oc[1].layout_map_tags = tags;
391 }
392
393 // Try to sniff a reasonable channel order, otherwise output the
394 // channels in the order the PCE declared them.
395 if (avctx->request_channel_layout != AV_CH_LAYOUT_NATIVE)
396 layout = sniff_channel_order(layout_map, tags);
397 for (i = 0; i < tags; i++) {
398 int type = layout_map[i][0];
399 int id = layout_map[i][1];
400 int position = layout_map[i][2];
401 // Allocate or free elements depending on if they are in the
402 // current program configuration.
403 ret = che_configure(ac, position, type, id, &channels);
404 if (ret < 0)
405 return ret;
406 }
407 if (ac->oc[1].m4ac.ps == 1 && channels == 2) {
408 if (layout == AV_CH_FRONT_CENTER) {
409 layout = AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT;
410 } else {
411 layout = 0;
412 }
413 }
414
415 memcpy(ac->tag_che_map, ac->che, 4 * MAX_ELEM_ID * sizeof(ac->che[0][0]));
416 avctx->channel_layout = ac->oc[1].channel_layout = layout;
417 avctx->channels = ac->oc[1].channels = channels;
418 ac->oc[1].status = oc_type;
419
420 return 0;
421 }
422
423 /**
424 * Set up channel positions based on a default channel configuration
425 * as specified in table 1.17.
426 *
427 * @return Returns error status. 0 - OK, !0 - error
428 */
429 static int set_default_channel_config(AVCodecContext *avctx,
430 uint8_t (*layout_map)[3],
431 int *tags,
432 int channel_config)
433 {
434 if (channel_config < 1 || channel_config > 7) {
435 av_log(avctx, AV_LOG_ERROR, "invalid default channel configuration (%d)\n",
436 channel_config);
437 return -1;
438 }
439 *tags = tags_per_config[channel_config];
440 memcpy(layout_map, aac_channel_layout_map[channel_config-1], *tags * sizeof(*layout_map));
441 return 0;
442 }
443
444 static ChannelElement *get_che(AACContext *ac, int type, int elem_id)
445 {
446 // For PCE based channel configurations map the channels solely based on tags.
447 if (!ac->oc[1].m4ac.chan_config) {
448 return ac->tag_che_map[type][elem_id];
449 }
450 // Allow single CPE stereo files to be signalled with mono configuration.
451 if (!ac->tags_mapped && type == TYPE_CPE && ac->oc[1].m4ac.chan_config == 1) {
452 uint8_t layout_map[MAX_ELEM_ID*4][3];
453 int layout_map_tags;
454 push_output_configuration(ac);
455
456 if (set_default_channel_config(ac->avctx, layout_map, &layout_map_tags,
457 2) < 0)
458 return NULL;
459 if (output_configure(ac, layout_map, layout_map_tags,
460 2, OC_TRIAL_FRAME) < 0)
461 return NULL;
462
463 ac->oc[1].m4ac.chan_config = 2;
464 }
465 // And vice-versa
466 if (!ac->tags_mapped && type == TYPE_SCE && ac->oc[1].m4ac.chan_config == 2) {
467 uint8_t layout_map[MAX_ELEM_ID*4][3];
468 int layout_map_tags;
469 push_output_configuration(ac);
470
471 if (set_default_channel_config(ac->avctx, layout_map, &layout_map_tags,
472 1) < 0)
473 return NULL;
474 if (output_configure(ac, layout_map, layout_map_tags,
475 1, OC_TRIAL_FRAME) < 0)
476 return NULL;
477
478 ac->oc[1].m4ac.chan_config = 1;
479 }
480 // For indexed channel configurations map the channels solely based on position.
481 switch (ac->oc[1].m4ac.chan_config) {
482 case 7:
483 if (ac->tags_mapped == 3 && type == TYPE_CPE) {
484 ac->tags_mapped++;
485 return ac->tag_che_map[TYPE_CPE][elem_id] = ac->che[TYPE_CPE][2];
486 }
487 case 6:
488 /* Some streams incorrectly code 5.1 audio as SCE[0] CPE[0] CPE[1] SCE[1]
489 instead of SCE[0] CPE[0] CPE[1] LFE[0]. If we seem to have
490 encountered such a stream, transfer the LFE[0] element to the SCE[1]'s mapping */
491 if (ac->tags_mapped == tags_per_config[ac->oc[1].m4ac.chan_config] - 1 && (type == TYPE_LFE || type == TYPE_SCE)) {
492 ac->tags_mapped++;
493 return ac->tag_che_map[type][elem_id] = ac->che[TYPE_LFE][0];
494 }
495 case 5:
496 if (ac->tags_mapped == 2 && type == TYPE_CPE) {
497 ac->tags_mapped++;
498 return ac->tag_che_map[TYPE_CPE][elem_id] = ac->che[TYPE_CPE][1];
499 }
500 case 4:
501 if (ac->tags_mapped == 2 && ac->oc[1].m4ac.chan_config == 4 && type == TYPE_SCE) {
502 ac->tags_mapped++;
503 return ac->tag_che_map[TYPE_SCE][elem_id] = ac->che[TYPE_SCE][1];
504 }
505 case 3:
506 case 2:
507 if (ac->tags_mapped == (ac->oc[1].m4ac.chan_config != 2) && type == TYPE_CPE) {
508 ac->tags_mapped++;
509 return ac->tag_che_map[TYPE_CPE][elem_id] = ac->che[TYPE_CPE][0];
510 } else if (ac->oc[1].m4ac.chan_config == 2) {
511 return NULL;
512 }
513 case 1:
514 if (!ac->tags_mapped && type == TYPE_SCE) {
515 ac->tags_mapped++;
516 return ac->tag_che_map[TYPE_SCE][elem_id] = ac->che[TYPE_SCE][0];
517 }
518 default:
519 return NULL;
520 }
521 }
522
523 /**
524 * Decode an array of 4 bit element IDs, optionally interleaved with a stereo/mono switching bit.
525 *
526 * @param type speaker type/position for these channels
527 */
528 static void decode_channel_map(uint8_t layout_map[][3],
529 enum ChannelPosition type,
530 GetBitContext *gb, int n)
531 {
532 while (n--) {
533 enum RawDataBlockType syn_ele;
534 switch (type) {
535 case AAC_CHANNEL_FRONT:
536 case AAC_CHANNEL_BACK:
537 case AAC_CHANNEL_SIDE:
538 syn_ele = get_bits1(gb);
539 break;
540 case AAC_CHANNEL_CC:
541 skip_bits1(gb);
542 syn_ele = TYPE_CCE;
543 break;
544 case AAC_CHANNEL_LFE:
545 syn_ele = TYPE_LFE;
546 break;
547 }
548 layout_map[0][0] = syn_ele;
549 layout_map[0][1] = get_bits(gb, 4);
550 layout_map[0][2] = type;
551 layout_map++;
552 }
553 }
554
555 /**
556 * Decode program configuration element; reference: table 4.2.
557 *
558 * @return Returns error status. 0 - OK, !0 - error
559 */
560 static int decode_pce(AVCodecContext *avctx, MPEG4AudioConfig *m4ac,
561 uint8_t (*layout_map)[3],
562 GetBitContext *gb)
563 {
564 int num_front, num_side, num_back, num_lfe, num_assoc_data, num_cc, sampling_index;
565 int comment_len;
566 int tags;
567
568 skip_bits(gb, 2); // object_type
569
570 sampling_index = get_bits(gb, 4);
571 if (m4ac->sampling_index != sampling_index)
572 av_log(avctx, AV_LOG_WARNING, "Sample rate index in program config element does not match the sample rate index configured by the container.\n");
573
574 num_front = get_bits(gb, 4);
575 num_side = get_bits(gb, 4);
576 num_back = get_bits(gb, 4);
577 num_lfe = get_bits(gb, 2);
578 num_assoc_data = get_bits(gb, 3);
579 num_cc = get_bits(gb, 4);
580
581 if (get_bits1(gb))
582 skip_bits(gb, 4); // mono_mixdown_tag
583 if (get_bits1(gb))
584 skip_bits(gb, 4); // stereo_mixdown_tag
585
586 if (get_bits1(gb))
587 skip_bits(gb, 3); // mixdown_coeff_index and pseudo_surround
588
589 decode_channel_map(layout_map , AAC_CHANNEL_FRONT, gb, num_front);
590 tags = num_front;
591 decode_channel_map(layout_map + tags, AAC_CHANNEL_SIDE, gb, num_side);
592 tags += num_side;
593 decode_channel_map(layout_map + tags, AAC_CHANNEL_BACK, gb, num_back);
594 tags += num_back;
595 decode_channel_map(layout_map + tags, AAC_CHANNEL_LFE, gb, num_lfe);
596 tags += num_lfe;
597
598 skip_bits_long(gb, 4 * num_assoc_data);
599
600 decode_channel_map(layout_map + tags, AAC_CHANNEL_CC, gb, num_cc);
601 tags += num_cc;
602
603 align_get_bits(gb);
604
605 /* comment field, first byte is length */
606 comment_len = get_bits(gb, 8) * 8;
607 if (get_bits_left(gb) < comment_len) {
608 av_log(avctx, AV_LOG_ERROR, overread_err);
609 return -1;
610 }
611 skip_bits_long(gb, comment_len);
612 return tags;
613 }
614
615 /**
616 * Decode GA "General Audio" specific configuration; reference: table 4.1.
617 *
618 * @param ac pointer to AACContext, may be null
619 * @param avctx pointer to AVCCodecContext, used for logging
620 *
621 * @return Returns error status. 0 - OK, !0 - error
622 */
623 static int decode_ga_specific_config(AACContext *ac, AVCodecContext *avctx,
624 GetBitContext *gb,
625 MPEG4AudioConfig *m4ac,
626 int channel_config)
627 {
628 int extension_flag, ret;
629 uint8_t layout_map[MAX_ELEM_ID*4][3];
630 int tags = 0;
631
632 if (get_bits1(gb)) { // frameLengthFlag
633 av_log_missing_feature(avctx, "960/120 MDCT window is", 1);
634 return -1;
635 }
636
637 if (get_bits1(gb)) // dependsOnCoreCoder
638 skip_bits(gb, 14); // coreCoderDelay
639 extension_flag = get_bits1(gb);
640
641 if (m4ac->object_type == AOT_AAC_SCALABLE ||
642 m4ac->object_type == AOT_ER_AAC_SCALABLE)
643 skip_bits(gb, 3); // layerNr
644
645 if (channel_config == 0) {
646 skip_bits(gb, 4); // element_instance_tag
647 tags = decode_pce(avctx, m4ac, layout_map, gb);
648 if (tags < 0)
649 return tags;
650 } else {
651 if ((ret = set_default_channel_config(avctx, layout_map, &tags, channel_config)))
652 return ret;
653 }
654
655 if (count_channels(layout_map, tags) > 1) {
656 m4ac->ps = 0;
657 } else if (m4ac->sbr == 1 && m4ac->ps == -1)
658 m4ac->ps = 1;
659
660 if (ac && (ret = output_configure(ac, layout_map, tags,
661 channel_config, OC_GLOBAL_HDR)))
662 return ret;
663
664 if (extension_flag) {
665 switch (m4ac->object_type) {
666 case AOT_ER_BSAC:
667 skip_bits(gb, 5); // numOfSubFrame
668 skip_bits(gb, 11); // layer_length
669 break;
670 case AOT_ER_AAC_LC:
671 case AOT_ER_AAC_LTP:
672 case AOT_ER_AAC_SCALABLE:
673 case AOT_ER_AAC_LD:
674 skip_bits(gb, 3); /* aacSectionDataResilienceFlag
675 * aacScalefactorDataResilienceFlag
676 * aacSpectralDataResilienceFlag
677 */
678 break;
679 }
680 skip_bits1(gb); // extensionFlag3 (TBD in version 3)
681 }
682 return 0;
683 }
684
685 /**
686 * Decode audio specific configuration; reference: table 1.13.
687 *
688 * @param ac pointer to AACContext, may be null
689 * @param avctx pointer to AVCCodecContext, used for logging
690 * @param m4ac pointer to MPEG4AudioConfig, used for parsing
691 * @param data pointer to buffer holding an audio specific config
692 * @param bit_size size of audio specific config or data in bits
693 * @param sync_extension look for an appended sync extension
694 *
695 * @return Returns error status or number of consumed bits. <0 - error
696 */
697 static int decode_audio_specific_config(AACContext *ac,
698 AVCodecContext *avctx,
699 MPEG4AudioConfig *m4ac,
700 const uint8_t *data, int bit_size,
701 int sync_extension)
702 {
703 GetBitContext gb;
704 int i;
705
706 av_dlog(avctx, "extradata size %d\n", avctx->extradata_size);
707 for (i = 0; i < avctx->extradata_size; i++)
708 av_dlog(avctx, "%02x ", avctx->extradata[i]);
709 av_dlog(avctx, "\n");
710
711 init_get_bits(&gb, data, bit_size);
712
713 if ((i = avpriv_mpeg4audio_get_config(m4ac, data, bit_size, sync_extension)) < 0)
714 return -1;
715 if (m4ac->sampling_index > 12) {
716 av_log(avctx, AV_LOG_ERROR, "invalid sampling rate index %d\n", m4ac->sampling_index);
717 return -1;
718 }
719
720 skip_bits_long(&gb, i);
721
722 switch (m4ac->object_type) {
723 case AOT_AAC_MAIN:
724 case AOT_AAC_LC:
725 case AOT_AAC_LTP:
726 if (decode_ga_specific_config(ac, avctx, &gb, m4ac, m4ac->chan_config))
727 return -1;
728 break;
729 default:
730 av_log(avctx, AV_LOG_ERROR, "Audio object type %s%d is not supported.\n",
731 m4ac->sbr == 1? "SBR+" : "", m4ac->object_type);
732 return -1;
733 }
734
735 av_dlog(avctx, "AOT %d chan config %d sampling index %d (%d) SBR %d PS %d\n",
736 m4ac->object_type, m4ac->chan_config, m4ac->sampling_index,
737 m4ac->sample_rate, m4ac->sbr, m4ac->ps);
738
739 return get_bits_count(&gb);
740 }
741
742 /**
743 * linear congruential pseudorandom number generator
744 *
745 * @param previous_val pointer to the current state of the generator
746 *
747 * @return Returns a 32-bit pseudorandom integer
748 */
749 static av_always_inline int lcg_random(int previous_val)
750 {
751 return previous_val * 1664525 + 1013904223;
752 }
753
754 static av_always_inline void reset_predict_state(PredictorState *ps)
755 {
756 ps->r0 = 0.0f;
757 ps->r1 = 0.0f;
758 ps->cor0 = 0.0f;
759 ps->cor1 = 0.0f;
760 ps->var0 = 1.0f;
761 ps->var1 = 1.0f;
762 }
763
764 static void reset_all_predictors(PredictorState *ps)
765 {
766 int i;
767 for (i = 0; i < MAX_PREDICTORS; i++)
768 reset_predict_state(&ps[i]);
769 }
770
771 static int sample_rate_idx (int rate)
772 {
773 if (92017 <= rate) return 0;
774 else if (75132 <= rate) return 1;
775 else if (55426 <= rate) return 2;
776 else if (46009 <= rate) return 3;
777 else if (37566 <= rate) return 4;
778 else if (27713 <= rate) return 5;
779 else if (23004 <= rate) return 6;
780 else if (18783 <= rate) return 7;
781 else if (13856 <= rate) return 8;
782 else if (11502 <= rate) return 9;
783 else if (9391 <= rate) return 10;
784 else return 11;
785 }
786
787 static void reset_predictor_group(PredictorState *ps, int group_num)
788 {
789 int i;
790 for (i = group_num - 1; i < MAX_PREDICTORS; i += 30)
791 reset_predict_state(&ps[i]);
792 }
793
794 #define AAC_INIT_VLC_STATIC(num, size) \
795 INIT_VLC_STATIC(&vlc_spectral[num], 8, ff_aac_spectral_sizes[num], \
796 ff_aac_spectral_bits[num], sizeof( ff_aac_spectral_bits[num][0]), sizeof( ff_aac_spectral_bits[num][0]), \
797 ff_aac_spectral_codes[num], sizeof(ff_aac_spectral_codes[num][0]), sizeof(ff_aac_spectral_codes[num][0]), \
798 size);
799
800 static av_cold int aac_decode_init(AVCodecContext *avctx)
801 {
802 AACContext *ac = avctx->priv_data;
803 float output_scale_factor;
804
805 ac->avctx = avctx;
806 ac->oc[1].m4ac.sample_rate = avctx->sample_rate;
807
808 if (avctx->extradata_size > 0) {
809 if (decode_audio_specific_config(ac, ac->avctx, &ac->oc[1].m4ac,
810 avctx->extradata,
811 avctx->extradata_size*8, 1) < 0)
812 return -1;
813 } else {
814 int sr, i;
815 uint8_t layout_map[MAX_ELEM_ID*4][3];
816 int layout_map_tags;
817
818 sr = sample_rate_idx(avctx->sample_rate);
819 ac->oc[1].m4ac.sampling_index = sr;
820 ac->oc[1].m4ac.channels = avctx->channels;
821 ac->oc[1].m4ac.sbr = -1;
822 ac->oc[1].m4ac.ps = -1;
823
824 for (i = 0; i < FF_ARRAY_ELEMS(ff_mpeg4audio_channels); i++)
825 if (ff_mpeg4audio_channels[i] == avctx->channels)
826 break;
827 if (i == FF_ARRAY_ELEMS(ff_mpeg4audio_channels)) {
828 i = 0;
829 }
830 ac->oc[1].m4ac.chan_config = i;
831
832 if (ac->oc[1].m4ac.chan_config) {
833 int ret = set_default_channel_config(avctx, layout_map,
834 &layout_map_tags, ac->oc[1].m4ac.chan_config);
835 if (!ret)
836 output_configure(ac, layout_map, layout_map_tags,
837 ac->oc[1].m4ac.chan_config, OC_GLOBAL_HDR);
838 else if (avctx->err_recognition & AV_EF_EXPLODE)
839 return AVERROR_INVALIDDATA;
840 }
841 }
842
843 if (avctx->request_sample_fmt == AV_SAMPLE_FMT_FLT) {
844 avctx->sample_fmt = AV_SAMPLE_FMT_FLT;
845 output_scale_factor = 1.0 / 32768.0;
846 } else {
847 avctx->sample_fmt = AV_SAMPLE_FMT_S16;
848 output_scale_factor = 1.0;
849 }
850
851 AAC_INIT_VLC_STATIC( 0, 304);
852 AAC_INIT_VLC_STATIC( 1, 270);
853 AAC_INIT_VLC_STATIC( 2, 550);
854 AAC_INIT_VLC_STATIC( 3, 300);
855 AAC_INIT_VLC_STATIC( 4, 328);
856 AAC_INIT_VLC_STATIC( 5, 294);
857 AAC_INIT_VLC_STATIC( 6, 306);
858 AAC_INIT_VLC_STATIC( 7, 268);
859 AAC_INIT_VLC_STATIC( 8, 510);
860 AAC_INIT_VLC_STATIC( 9, 366);
861 AAC_INIT_VLC_STATIC(10, 462);
862
863 ff_aac_sbr_init();
864
865 ff_dsputil_init(&ac->dsp, avctx);
866 ff_fmt_convert_init(&ac->fmt_conv, avctx);
867
868 ac->random_state = 0x1f2e3d4c;
869
870 ff_aac_tableinit();
871
872 INIT_VLC_STATIC(&vlc_scalefactors,7,FF_ARRAY_ELEMS(ff_aac_scalefactor_code),
873 ff_aac_scalefactor_bits, sizeof(ff_aac_scalefactor_bits[0]), sizeof(ff_aac_scalefactor_bits[0]),
874 ff_aac_scalefactor_code, sizeof(ff_aac_scalefactor_code[0]), sizeof(ff_aac_scalefactor_code[0]),
875 352);
876
877 ff_mdct_init(&ac->mdct, 11, 1, output_scale_factor/1024.0);
878 ff_mdct_init(&ac->mdct_small, 8, 1, output_scale_factor/128.0);
879 ff_mdct_init(&ac->mdct_ltp, 11, 0, -2.0/output_scale_factor);
880 // window initialization
881 ff_kbd_window_init(ff_aac_kbd_long_1024, 4.0, 1024);
882 ff_kbd_window_init(ff_aac_kbd_short_128, 6.0, 128);
883 ff_init_ff_sine_windows(10);
884 ff_init_ff_sine_windows( 7);
885
886 cbrt_tableinit();
887
888 avcodec_get_frame_defaults(&ac->frame);
889 avctx->coded_frame = &ac->frame;
890
891 return 0;
892 }
893
894 /**
895 * Skip data_stream_element; reference: table 4.10.
896 */
897 static int skip_data_stream_element(AACContext *ac, GetBitContext *gb)
898 {
899 int byte_align = get_bits1(gb);
900 int count = get_bits(gb, 8);
901 if (count == 255)
902 count += get_bits(gb, 8);
903 if (byte_align)
904 align_get_bits(gb);
905
906 if (get_bits_left(gb) < 8 * count) {
907 av_log(ac->avctx, AV_LOG_ERROR, overread_err);
908 return -1;
909 }
910 skip_bits_long(gb, 8 * count);
911 return 0;
912 }
913
914 static int decode_prediction(AACContext *ac, IndividualChannelStream *ics,
915 GetBitContext *gb)
916 {
917 int sfb;
918 if (get_bits1(gb)) {
919 ics->predictor_reset_group = get_bits(gb, 5);
920 if (ics->predictor_reset_group == 0 || ics->predictor_reset_group > 30) {
921 av_log(ac->avctx, AV_LOG_ERROR, "Invalid Predictor Reset Group.\n");
922 return -1;
923 }
924 }
925 for (sfb = 0; sfb < FFMIN(ics->max_sfb, ff_aac_pred_sfb_max[ac->oc[1].m4ac.sampling_index]); sfb++) {
926 ics->prediction_used[sfb] = get_bits1(gb);
927 }
928 return 0;
929 }
930
931 /**
932 * Decode Long Term Prediction data; reference: table 4.xx.
933 */
934 static void decode_ltp(AACContext *ac, LongTermPrediction *ltp,
935 GetBitContext *gb, uint8_t max_sfb)
936 {
937 int sfb;
938
939 ltp->lag = get_bits(gb, 11);
940 ltp->coef = ltp_coef[get_bits(gb, 3)];
941 for (sfb = 0; sfb < FFMIN(max_sfb, MAX_LTP_LONG_SFB); sfb++)
942 ltp->used[sfb] = get_bits1(gb);
943 }
944
945 /**
946 * Decode Individual Channel Stream info; reference: table 4.6.
947 */
948 static int decode_ics_info(AACContext *ac, IndividualChannelStream *ics,
949 GetBitContext *gb)
950 {
951 if (get_bits1(gb)) {
952 av_log(ac->avctx, AV_LOG_ERROR, "Reserved bit set.\n");
953 return AVERROR_INVALIDDATA;
954 }
955 ics->window_sequence[1] = ics->window_sequence[0];
956 ics->window_sequence[0] = get_bits(gb, 2);
957 ics->use_kb_window[1] = ics->use_kb_window[0];
958 ics->use_kb_window[0] = get_bits1(gb);
959 ics->num_window_groups = 1;
960 ics->group_len[0] = 1;
961 if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) {
962 int i;
963 ics->max_sfb = get_bits(gb, 4);
964 for (i = 0; i < 7; i++) {
965 if (get_bits1(gb)) {
966 ics->group_len[ics->num_window_groups - 1]++;
967 } else {
968 ics->num_window_groups++;
969 ics->group_len[ics->num_window_groups - 1] = 1;
970 }
971 }
972 ics->num_windows = 8;
973 ics->swb_offset = ff_swb_offset_128[ac->oc[1].m4ac.sampling_index];
974 ics->num_swb = ff_aac_num_swb_128[ac->oc[1].m4ac.sampling_index];
975 ics->tns_max_bands = ff_tns_max_bands_128[ac->oc[1].m4ac.sampling_index];
976 ics->predictor_present = 0;
977 } else {
978 ics->max_sfb = get_bits(gb, 6);
979 ics->num_windows = 1;
980 ics->swb_offset = ff_swb_offset_1024[ac->oc[1].m4ac.sampling_index];
981 ics->num_swb = ff_aac_num_swb_1024[ac->oc[1].m4ac.sampling_index];
982 ics->tns_max_bands = ff_tns_max_bands_1024[ac->oc[1].m4ac.sampling_index];
983 ics->predictor_present = get_bits1(gb);
984 ics->predictor_reset_group = 0;
985 if (ics->predictor_present) {
986 if (ac->oc[1].m4ac.object_type == AOT_AAC_MAIN) {
987 if (decode_prediction(ac, ics, gb)) {
988 return AVERROR_INVALIDDATA;
989 }
990 } else if (ac->oc[1].m4ac.object_type == AOT_AAC_LC) {
991 av_log(ac->avctx, AV_LOG_ERROR, "Prediction is not allowed in AAC-LC.\n");
992 return AVERROR_INVALIDDATA;
993 } else {
994 if ((ics->ltp.present = get_bits(gb, 1)))
995 decode_ltp(ac, &ics->ltp, gb, ics->max_sfb);
996 }
997 }
998 }
999
1000 if (ics->max_sfb > ics->num_swb) {
1001 av_log(ac->avctx, AV_LOG_ERROR,
1002 "Number of scalefactor bands in group (%d) exceeds limit (%d).\n",
1003 ics->max_sfb, ics->num_swb);
1004 return AVERROR_INVALIDDATA;
1005 }
1006
1007 return 0;
1008 }
1009
1010 /**
1011 * Decode band types (section_data payload); reference: table 4.46.
1012 *
1013 * @param band_type array of the used band type
1014 * @param band_type_run_end array of the last scalefactor band of a band type run
1015 *
1016 * @return Returns error status. 0 - OK, !0 - error
1017 */
1018 static int decode_band_types(AACContext *ac, enum BandType band_type[120],
1019 int band_type_run_end[120], GetBitContext *gb,
1020 IndividualChannelStream *ics)
1021 {
1022 int g, idx = 0;
1023 const int bits = (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) ? 3 : 5;
1024 for (g = 0; g < ics->num_window_groups; g++) {
1025 int k = 0;
1026 while (k < ics->max_sfb) {
1027 uint8_t sect_end = k;
1028 int sect_len_incr;
1029 int sect_band_type = get_bits(gb, 4);
1030 if (sect_band_type == 12) {
1031 av_log(ac->avctx, AV_LOG_ERROR, "invalid band type\n");
1032 return -1;
1033 }
1034 do {
1035 sect_len_incr = get_bits(gb, bits);
1036 sect_end += sect_len_incr;
1037 if (get_bits_left(gb) < 0) {
1038 av_log(ac->avctx, AV_LOG_ERROR, overread_err);
1039 return -1;
1040 }
1041 if (sect_end > ics->max_sfb) {
1042 av_log(ac->avctx, AV_LOG_ERROR,
1043 "Number of bands (%d) exceeds limit (%d).\n",
1044 sect_end, ics->max_sfb);
1045 return -1;
1046 }
1047 } while (sect_len_incr == (1 << bits) - 1);
1048 for (; k < sect_end; k++) {
1049 band_type [idx] = sect_band_type;
1050 band_type_run_end[idx++] = sect_end;
1051 }
1052 }
1053 }
1054 return 0;
1055 }
1056
1057 /**
1058 * Decode scalefactors; reference: table 4.47.
1059 *
1060 * @param global_gain first scalefactor value as scalefactors are differentially coded
1061 * @param band_type array of the used band type
1062 * @param band_type_run_end array of the last scalefactor band of a band type run
1063 * @param sf array of scalefactors or intensity stereo positions
1064 *
1065 * @return Returns error status. 0 - OK, !0 - error
1066 */
1067 static int decode_scalefactors(AACContext *ac, float sf[120], GetBitContext *gb,
1068 unsigned int global_gain,
1069 IndividualChannelStream *ics,
1070 enum BandType band_type[120],
1071 int band_type_run_end[120])
1072 {
1073 int g, i, idx = 0;
1074 int offset[3] = { global_gain, global_gain - 90, 0 };
1075 int clipped_offset;
1076 int noise_flag = 1;
1077 for (g = 0; g < ics->num_window_groups; g++) {
1078 for (i = 0; i < ics->max_sfb;) {
1079 int run_end = band_type_run_end[idx];
1080 if (band_type[idx] == ZERO_BT) {
1081 for (; i < run_end; i++, idx++)
1082 sf[idx] = 0.;
1083 } else if ((band_type[idx] == INTENSITY_BT) || (band_type[idx] == INTENSITY_BT2)) {
1084 for (; i < run_end; i++, idx++) {
1085 offset[2] += get_vlc2(gb, vlc_scalefactors.table, 7, 3) - 60;
1086 clipped_offset = av_clip(offset[2], -155, 100);
1087 if (offset[2] != clipped_offset) {
1088 av_log_ask_for_sample(ac->avctx, "Intensity stereo "
1089 "position clipped (%d -> %d).\nIf you heard an "
1090 "audible artifact, there may be a bug in the "
1091 "decoder. ", offset[2], clipped_offset);
1092 }
1093 sf[idx] = ff_aac_pow2sf_tab[-clipped_offset + POW_SF2_ZERO];
1094 }
1095 } else if (band_type[idx] == NOISE_BT) {
1096 for (; i < run_end; i++, idx++) {
1097 if (noise_flag-- > 0)
1098 offset[1] += get_bits(gb, 9) - 256;
1099 else
1100 offset[1] += get_vlc2(gb, vlc_scalefactors.table, 7, 3) - 60;
1101 clipped_offset = av_clip(offset[1], -100, 155);
1102 if (offset[1] != clipped_offset) {
1103 av_log_ask_for_sample(ac->avctx, "Noise gain clipped "
1104 "(%d -> %d).\nIf you heard an audible "
1105 "artifact, there may be a bug in the decoder. ",
1106 offset[1], clipped_offset);
1107 }
1108 sf[idx] = -ff_aac_pow2sf_tab[clipped_offset + POW_SF2_ZERO];
1109 }
1110 } else {
1111 for (; i < run_end; i++, idx++) {
1112 offset[0] += get_vlc2(gb, vlc_scalefactors.table, 7, 3) - 60;
1113 if (offset[0] > 255U) {
1114 av_log(ac->avctx, AV_LOG_ERROR,
1115 "Scalefactor (%d) out of range.\n", offset[0]);
1116 return -1;
1117 }
1118 sf[idx] = -ff_aac_pow2sf_tab[offset[0] - 100 + POW_SF2_ZERO];
1119 }
1120 }
1121 }
1122 }
1123 return 0;
1124 }
1125
1126 /**
1127 * Decode pulse data; reference: table 4.7.
1128 */
1129 static int decode_pulses(Pulse *pulse, GetBitContext *gb,
1130 const uint16_t *swb_offset, int num_swb)
1131 {
1132 int i, pulse_swb;
1133 pulse->num_pulse = get_bits(gb, 2) + 1;
1134 pulse_swb = get_bits(gb, 6);
1135 if (pulse_swb >= num_swb)
1136 return -1;
1137 pulse->pos[0] = swb_offset[pulse_swb];
1138 pulse->pos[0] += get_bits(gb, 5);
1139 if (pulse->pos[0] > 1023)
1140 return -1;
1141 pulse->amp[0] = get_bits(gb, 4);
1142 for (i = 1; i < pulse->num_pulse; i++) {
1143 pulse->pos[i] = get_bits(gb, 5) + pulse->pos[i - 1];
1144 if (pulse->pos[i] > 1023)
1145 return -1;
1146 pulse->amp[i] = get_bits(gb, 4);
1147 }
1148 return 0;
1149 }
1150
1151 /**
1152 * Decode Temporal Noise Shaping data; reference: table 4.48.
1153 *
1154 * @return Returns error status. 0 - OK, !0 - error
1155 */
1156 static int decode_tns(AACContext *ac, TemporalNoiseShaping *tns,
1157 GetBitContext *gb, const IndividualChannelStream *ics)
1158 {
1159 int w, filt, i, coef_len, coef_res, coef_compress;
1160 const int is8 = ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE;
1161 const int tns_max_order = is8 ? 7 : ac->oc[1].m4ac.object_type == AOT_AAC_MAIN ? 20 : 12;
1162 for (w = 0; w < ics->num_windows; w++) {
1163 if ((tns->n_filt[w] = get_bits(gb, 2 - is8))) {
1164 coef_res = get_bits1(gb);
1165
1166 for (filt = 0; filt < tns->n_filt[w]; filt++) {
1167 int tmp2_idx;
1168 tns->length[w][filt] = get_bits(gb, 6 - 2 * is8);
1169
1170 if ((tns->order[w][filt] = get_bits(gb, 5 - 2 * is8)) > tns_max_order) {
1171 av_log(ac->avctx, AV_LOG_ERROR, "TNS filter order %d is greater than maximum %d.\n",
1172 tns->order[w][filt], tns_max_order);
1173 tns->order[w][filt] = 0;
1174 return -1;
1175 }
1176 if (tns->order[w][filt]) {
1177 tns->direction[w][filt] = get_bits1(gb);
1178 coef_compress = get_bits1(gb);
1179 coef_len = coef_res + 3 - coef_compress;
1180 tmp2_idx = 2 * coef_compress + coef_res;
1181
1182 for (i = 0; i < tns->order[w][filt]; i++)
1183 tns->coef[w][filt][i] = tns_tmp2_map[tmp2_idx][get_bits(gb, coef_len)];
1184 }
1185 }
1186 }
1187 }
1188 return 0;
1189 }
1190
1191 /**
1192 * Decode Mid/Side data; reference: table 4.54.
1193 *
1194 * @param ms_present Indicates mid/side stereo presence. [0] mask is all 0s;
1195 * [1] mask is decoded from bitstream; [2] mask is all 1s;
1196 * [3] reserved for scalable AAC
1197 */
1198 static void decode_mid_side_stereo(ChannelElement *cpe, GetBitContext *gb,
1199 int ms_present)
1200 {
1201 int idx;
1202 if (ms_present == 1) {
1203 for (idx = 0; idx < cpe->ch[0].ics.num_window_groups * cpe->ch[0].ics.max_sfb; idx++)
1204 cpe->ms_mask[idx] = get_bits1(gb);
1205 } else if (ms_present == 2) {
1206 memset(cpe->ms_mask, 1, cpe->ch[0].ics.num_window_groups * cpe->ch[0].ics.max_sfb * sizeof(cpe->ms_mask[0]));
1207 }
1208 }
1209
1210 #ifndef VMUL2
1211 static inline float *VMUL2(float *dst, const float *v, unsigned idx,
1212 const float *scale)
1213 {
1214 float s = *scale;
1215 *dst++ = v[idx & 15] * s;
1216 *dst++ = v[idx>>4 & 15] * s;
1217 return dst;
1218 }
1219 #endif
1220
1221 #ifndef VMUL4
1222 static inline float *VMUL4(float *dst, const float *v, unsigned idx,
1223 const float *scale)
1224 {
1225 float s = *scale;
1226 *dst++ = v[idx & 3] * s;
1227 *dst++ = v[idx>>2 & 3] * s;
1228 *dst++ = v[idx>>4 & 3] * s;
1229 *dst++ = v[idx>>6 & 3] * s;
1230 return dst;
1231 }
1232 #endif
1233
1234 #ifndef VMUL2S
1235 static inline float *VMUL2S(float *dst, const float *v, unsigned idx,
1236 unsigned sign, const float *scale)
1237 {
1238 union av_intfloat32 s0, s1;
1239
1240 s0.f = s1.f = *scale;
1241 s0.i ^= sign >> 1 << 31;
1242 s1.i ^= sign << 31;
1243
1244 *dst++ = v[idx & 15] * s0.f;
1245 *dst++ = v[idx>>4 & 15] * s1.f;
1246
1247 return dst;
1248 }
1249 #endif
1250
1251 #ifndef VMUL4S
1252 static inline float *VMUL4S(float *dst, const float *v, unsigned idx,
1253 unsigned sign, const float *scale)
1254 {
1255 unsigned nz = idx >> 12;
1256 union av_intfloat32 s = { .f = *scale };
1257 union av_intfloat32 t;
1258
1259 t.i = s.i ^ (sign & 1U<<31);
1260 *dst++ = v[idx & 3] * t.f;
1261
1262 sign <<= nz & 1; nz >>= 1;
1263 t.i = s.i ^ (sign & 1U<<31);
1264 *dst++ = v[idx>>2 & 3] * t.f;
1265
1266 sign <<= nz & 1; nz >>= 1;
1267 t.i = s.i ^ (sign & 1U<<31);
1268 *dst++ = v[idx>>4 & 3] * t.f;
1269
1270 sign <<= nz & 1; nz >>= 1;
1271 t.i = s.i ^ (sign & 1U<<31);
1272 *dst++ = v[idx>>6 & 3] * t.f;
1273
1274 return dst;
1275 }
1276 #endif
1277
1278 /**
1279 * Decode spectral data; reference: table 4.50.
1280 * Dequantize and scale spectral data; reference: 4.6.3.3.
1281 *
1282 * @param coef array of dequantized, scaled spectral data
1283 * @param sf array of scalefactors or intensity stereo positions
1284 * @param pulse_present set if pulses are present
1285 * @param pulse pointer to pulse data struct
1286 * @param band_type array of the used band type
1287 *
1288 * @return Returns error status. 0 - OK, !0 - error
1289 */
1290 static int decode_spectrum_and_dequant(AACContext *ac, float coef[1024],
1291 GetBitContext *gb, const float sf[120],
1292 int pulse_present, const Pulse *pulse,
1293 const IndividualChannelStream *ics,
1294 enum BandType band_type[120])
1295 {
1296 int i, k, g, idx = 0;
1297 const int c = 1024 / ics->num_windows;
1298 const uint16_t *offsets = ics->swb_offset;
1299 float *coef_base = coef;
1300
1301 for (g = 0; g < ics->num_windows; g++)
1302 memset(coef + g * 128 + offsets[ics->max_sfb], 0, sizeof(float) * (c - offsets[ics->max_sfb]));
1303
1304 for (g = 0; g < ics->num_window_groups; g++) {
1305 unsigned g_len = ics->group_len[g];
1306
1307 for (i = 0; i < ics->max_sfb; i++, idx++) {
1308 const unsigned cbt_m1 = band_type[idx] - 1;
1309 float *cfo = coef + offsets[i];
1310 int off_len = offsets[i + 1] - offsets[i];
1311 int group;
1312
1313 if (cbt_m1 >= INTENSITY_BT2 - 1) {
1314 for (group = 0; group < g_len; group++, cfo+=128) {
1315 memset(cfo, 0, off_len * sizeof(float));
1316 }
1317 } else if (cbt_m1 == NOISE_BT - 1) {
1318 for (group = 0; group < g_len; group++, cfo+=128) {
1319 float scale;
1320 float band_energy;
1321
1322 for (k = 0; k < off_len; k++) {
1323 ac->random_state = lcg_random(ac->random_state);
1324 cfo[k] = ac->random_state;
1325 }
1326
1327 band_energy = ac->dsp.scalarproduct_float(cfo, cfo, off_len);
1328 scale = sf[idx] / sqrtf(band_energy);
1329 ac->dsp.vector_fmul_scalar(cfo, cfo, scale, off_len);
1330 }
1331 } else {
1332 const float *vq = ff_aac_codebook_vector_vals[cbt_m1];
1333 const uint16_t *cb_vector_idx = ff_aac_codebook_vector_idx[cbt_m1];
1334 VLC_TYPE (*vlc_tab)[2] = vlc_spectral[cbt_m1].table;
1335 OPEN_READER(re, gb);
1336
1337 switch (cbt_m1 >> 1) {
1338 case 0:
1339 for (group = 0; group < g_len; group++, cfo+=128) {
1340 float *cf = cfo;
1341 int len = off_len;
1342
1343 do {
1344 int code;
1345 unsigned cb_idx;
1346
1347 UPDATE_CACHE(re, gb);
1348 GET_VLC(code, re, gb, vlc_tab, 8, 2);
1349 cb_idx = cb_vector_idx[code];
1350 cf = VMUL4(cf, vq, cb_idx, sf + idx);
1351 } while (len -= 4);
1352 }
1353 break;
1354
1355 case 1:
1356 for (group = 0; group < g_len; group++, cfo+=128) {
1357 float *cf = cfo;
1358 int len = off_len;
1359
1360 do {
1361 int code;
1362 unsigned nnz;
1363 unsigned cb_idx;
1364 uint32_t bits;
1365
1366 UPDATE_CACHE(re, gb);
1367 GET_VLC(code, re, gb, vlc_tab, 8, 2);
1368 cb_idx = cb_vector_idx[code];
1369 nnz = cb_idx >> 8 & 15;
1370 bits = nnz ? GET_CACHE(re, gb) : 0;
1371 LAST_SKIP_BITS(re, gb, nnz);
1372 cf = VMUL4S(cf, vq, cb_idx, bits, sf + idx);
1373 } while (len -= 4);
1374 }
1375 break;
1376
1377 case 2:
1378 for (group = 0; group < g_len; group++, cfo+=128) {
1379 float *cf = cfo;
1380 int len = off_len;
1381
1382 do {
1383 int code;
1384 unsigned cb_idx;
1385
1386 UPDATE_CACHE(re, gb);
1387 GET_VLC(code, re, gb, vlc_tab, 8, 2);
1388 cb_idx = cb_vector_idx[code];
1389 cf = VMUL2(cf, vq, cb_idx, sf + idx);
1390 } while (len -= 2);
1391 }
1392 break;
1393
1394 case 3:
1395 case 4:
1396 for (group = 0; group < g_len; group++, cfo+=128) {
1397 float *cf = cfo;
1398 int len = off_len;
1399
1400 do {
1401 int code;
1402 unsigned nnz;
1403 unsigned cb_idx;
1404 unsigned sign;
1405
1406 UPDATE_CACHE(re, gb);
1407 GET_VLC(code, re, gb, vlc_tab, 8, 2);
1408 cb_idx = cb_vector_idx[code];
1409 nnz = cb_idx >> 8 & 15;
1410 sign = nnz ? SHOW_UBITS(re, gb, nnz) << (cb_idx >> 12) : 0;
1411 LAST_SKIP_BITS(re, gb, nnz);
1412 cf = VMUL2S(cf, vq, cb_idx, sign, sf + idx);
1413 } while (len -= 2);
1414 }
1415 break;
1416
1417 default:
1418 for (group = 0; group < g_len; group++, cfo+=128) {
1419 float *cf = cfo;
1420 uint32_t *icf = (uint32_t *) cf;
1421 int len = off_len;
1422
1423 do {
1424 int code;
1425 unsigned nzt, nnz;
1426 unsigned cb_idx;
1427 uint32_t bits;
1428 int j;
1429
1430 UPDATE_CACHE(re, gb);
1431 GET_VLC(code, re, gb, vlc_tab, 8, 2);
1432
1433 if (!code) {
1434 *icf++ = 0;
1435 *icf++ = 0;
1436 continue;
1437 }
1438
1439 cb_idx = cb_vector_idx[code];
1440 nnz = cb_idx >> 12;
1441 nzt = cb_idx >> 8;
1442 bits = SHOW_UBITS(re, gb, nnz) << (32-nnz);
1443 LAST_SKIP_BITS(re, gb, nnz);
1444
1445 for (j = 0; j < 2; j++) {
1446 if (nzt & 1<<j) {
1447 uint32_t b;
1448 int n;
1449 /* The total length of escape_sequence must be < 22 bits according
1450 to the specification (i.e. max is 111111110xxxxxxxxxxxx). */
1451 UPDATE_CACHE(re, gb);
1452 b = GET_CACHE(re, gb);
1453 b = 31 - av_log2(~b);
1454
1455 if (b > 8) {
1456 av_log(ac->avctx, AV_LOG_ERROR, "error in spectral data, ESC overflow\n");
1457 return -1;
1458 }
1459
1460 SKIP_BITS(re, gb, b + 1);
1461 b += 4;
1462 n = (1 << b) + SHOW_UBITS(re, gb, b);
1463 LAST_SKIP_BITS(re, gb, b);
1464 *icf++ = cbrt_tab[n] | (bits & 1U<<31);
1465 bits <<= 1;
1466 } else {
1467 unsigned v = ((const uint32_t*)vq)[cb_idx & 15];
1468 *icf++ = (bits & 1U<<31) | v;
1469 bits <<= !!v;
1470 }
1471 cb_idx >>= 4;
1472 }
1473 } while (len -= 2);
1474
1475 ac->dsp.vector_fmul_scalar(cfo, cfo, sf[idx], off_len);
1476 }
1477 }
1478
1479 CLOSE_READER(re, gb);
1480 }
1481 }
1482 coef += g_len << 7;
1483 }
1484
1485 if (pulse_present) {
1486 idx = 0;
1487 for (i = 0; i < pulse->num_pulse; i++) {
1488 float co = coef_base[ pulse->pos[i] ];
1489 while (offsets[idx + 1] <= pulse->pos[i])
1490 idx++;
1491 if (band_type[idx] != NOISE_BT && sf[idx]) {
1492 float ico = -pulse->amp[i];
1493 if (co) {
1494 co /= sf[idx];
1495 ico = co / sqrtf(sqrtf(fabsf(co))) + (co > 0 ? -ico : ico);
1496 }
1497 coef_base[ pulse->pos[i] ] = cbrtf(fabsf(ico)) * ico * sf[idx];
1498 }
1499 }
1500 }
1501 return 0;
1502 }
1503
1504 static av_always_inline float flt16_round(float pf)
1505 {
1506 union av_intfloat32 tmp;
1507 tmp.f = pf;
1508 tmp.i = (tmp.i + 0x00008000U) & 0xFFFF0000U;
1509 return tmp.f;
1510 }
1511
1512 static av_always_inline float flt16_even(float pf)
1513 {
1514 union av_intfloat32 tmp;
1515 tmp.f = pf;
1516 tmp.i = (tmp.i + 0x00007FFFU + (tmp.i & 0x00010000U >> 16)) & 0xFFFF0000U;
1517 return tmp.f;
1518 }
1519
1520 static av_always_inline float flt16_trunc(float pf)
1521 {
1522 union av_intfloat32 pun;
1523 pun.f = pf;
1524 pun.i &= 0xFFFF0000U;
1525 return pun.f;
1526 }
1527
1528 static av_always_inline void predict(PredictorState *ps, float *coef,
1529 int output_enable)
1530 {
1531 const float a = 0.953125; // 61.0 / 64
1532 const float alpha = 0.90625; // 29.0 / 32
1533 float e0, e1;
1534 float pv;
1535 float k1, k2;
1536 float r0 = ps->r0, r1 = ps->r1;
1537 float cor0 = ps->cor0, cor1 = ps->cor1;
1538 float var0 = ps->var0, var1 = ps->var1;
1539
1540 k1 = var0 > 1 ? cor0 * flt16_even(a / var0) : 0;
1541 k2 = var1 > 1 ? cor1 * flt16_even(a / var1) : 0;
1542
1543 pv = flt16_round(k1 * r0 + k2 * r1);
1544 if (output_enable)
1545 *coef += pv;
1546
1547 e0 = *coef;
1548 e1 = e0 - k1 * r0;
1549
1550 ps->cor1 = flt16_trunc(alpha * cor1 + r1 * e1);
1551 ps->var1 = flt16_trunc(alpha * var1 + 0.5f * (r1 * r1 + e1 * e1));
1552 ps->cor0 = flt16_trunc(alpha * cor0 + r0 * e0);
1553 ps->var0 = flt16_trunc(alpha * var0 + 0.5f * (r0 * r0 + e0 * e0));
1554
1555 ps->r1 = flt16_trunc(a * (r0 - k1 * e0));
1556 ps->r0 = flt16_trunc(a * e0);
1557 }
1558
1559 /**
1560 * Apply AAC-Main style frequency domain prediction.
1561 */
1562 static void apply_prediction(AACContext *ac, SingleChannelElement *sce)
1563 {
1564 int sfb, k;
1565
1566 if (!sce->ics.predictor_initialized) {
1567 reset_all_predictors(sce->predictor_state);
1568 sce->ics.predictor_initialized = 1;
1569 }
1570
1571 if (sce->ics.window_sequence[0] != EIGHT_SHORT_SEQUENCE) {
1572 for (sfb = 0; sfb < ff_aac_pred_sfb_max[ac->oc[1].m4ac.sampling_index]; sfb++) {
1573 for (k = sce->ics.swb_offset[sfb]; k < sce->ics.swb_offset[sfb + 1]; k++) {
1574 predict(&sce->predictor_state[k], &sce->coeffs[k],
1575 sce->ics.predictor_present && sce->ics.prediction_used[sfb]);
1576 }
1577 }
1578 if (sce->ics.predictor_reset_group)
1579 reset_predictor_group(sce->predictor_state, sce->ics.predictor_reset_group);
1580 } else
1581 reset_all_predictors(sce->predictor_state);
1582 }
1583
1584 /**
1585 * Decode an individual_channel_stream payload; reference: table 4.44.
1586 *
1587 * @param common_window Channels have independent [0], or shared [1], Individual Channel Stream information.
1588 * @param scale_flag scalable [1] or non-scalable [0] AAC (Unused until scalable AAC is implemented.)
1589 *
1590 * @return Returns error status. 0 - OK, !0 - error
1591 */
1592 static int decode_ics(AACContext *ac, SingleChannelElement *sce,
1593 GetBitContext *gb, int common_window, int scale_flag)
1594 {
1595 Pulse pulse;
1596 TemporalNoiseShaping *tns = &sce->tns;
1597 IndividualChannelStream *ics = &sce->ics;
1598 float *out = sce->coeffs;
1599 int global_gain, pulse_present = 0;
1600
1601 /* This assignment is to silence a GCC warning about the variable being used
1602 * uninitialized when in fact it always is.
1603 */
1604 pulse.num_pulse = 0;
1605
1606 global_gain = get_bits(gb, 8);
1607
1608 if (!common_window && !scale_flag) {
1609 if (decode_ics_info(ac, ics, gb) < 0)
1610 return AVERROR_INVALIDDATA;
1611 }
1612
1613 if (decode_band_types(ac, sce->band_type, sce->band_type_run_end, gb, ics) < 0)
1614 return -1;
1615 if (decode_scalefactors(ac, sce->sf, gb, global_gain, ics, sce->band_type, sce->band_type_run_end) < 0)
1616 return -1;
1617
1618 pulse_present = 0;
1619 if (!scale_flag) {
1620 if ((pulse_present = get_bits1(gb))) {
1621 if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) {
1622 av_log(ac->avctx, AV_LOG_ERROR, "Pulse tool not allowed in eight short sequence.\n");
1623 return -1;
1624 }
1625 if (decode_pulses(&pulse, gb, ics->swb_offset, ics->num_swb)) {
1626 av_log(ac->avctx, AV_LOG_ERROR, "Pulse data corrupt or invalid.\n");
1627 return -1;
1628 }
1629 }
1630 if ((tns->present = get_bits1(gb)) && decode_tns(ac, tns, gb, ics))
1631 return -1;
1632 if (get_bits1(gb)) {
1633 av_log_missing_feature(ac->avctx, "SSR", 1);
1634 return -1;
1635 }
1636 }
1637
1638 if (decode_spectrum_and_dequant(ac, out, gb, sce->sf, pulse_present, &pulse, ics, sce->band_type) < 0)
1639 return -1;
1640
1641 if (ac->oc[1].m4ac.object_type == AOT_AAC_MAIN && !common_window)
1642 apply_prediction(ac, sce);
1643
1644 return 0;
1645 }
1646
1647 /**
1648 * Mid/Side stereo decoding; reference: 4.6.8.1.3.
1649 */
1650 static void apply_mid_side_stereo(AACContext *ac, ChannelElement *cpe)
1651 {
1652 const IndividualChannelStream *ics = &cpe->ch[0].ics;
1653 float *ch0 = cpe->ch[0].coeffs;
1654 float *ch1 = cpe->ch[1].coeffs;
1655 int g, i, group, idx = 0;
1656 const uint16_t *offsets = ics->swb_offset;
1657 for (g = 0; g < ics->num_window_groups; g++) {
1658 for (i = 0; i < ics->max_sfb; i++, idx++) {
1659 if (cpe->ms_mask[idx] &&
1660 cpe->ch[0].band_type[idx] < NOISE_BT && cpe->ch[1].band_type[idx] < NOISE_BT) {
1661 for (group = 0; group < ics->group_len[g]; group++) {
1662 ac->dsp.butterflies_float(ch0 + group * 128 + offsets[i],
1663 ch1 + group * 128 + offsets[i],
1664 offsets[i+1] - offsets[i]);
1665 }
1666 }
1667 }
1668 ch0 += ics->group_len[g] * 128;
1669 ch1 += ics->group_len[g] * 128;
1670 }
1671 }
1672
1673 /**
1674 * intensity stereo decoding; reference: 4.6.8.2.3
1675 *
1676 * @param ms_present Indicates mid/side stereo presence. [0] mask is all 0s;
1677 * [1] mask is decoded from bitstream; [2] mask is all 1s;
1678 * [3] reserved for scalable AAC
1679 */
1680 static void apply_intensity_stereo(AACContext *ac, ChannelElement *cpe, int ms_present)
1681 {
1682 const IndividualChannelStream *ics = &cpe->ch[1].ics;
1683 SingleChannelElement *sce1 = &cpe->ch[1];
1684 float *coef0 = cpe->ch[0].coeffs, *coef1 = cpe->ch[1].coeffs;
1685 const uint16_t *offsets = ics->swb_offset;
1686 int g, group, i, idx = 0;
1687 int c;
1688 float scale;
1689 for (g = 0; g < ics->num_window_groups; g++) {
1690 for (i = 0; i < ics->max_sfb;) {
1691 if (sce1->band_type[idx] == INTENSITY_BT || sce1->band_type[idx] == INTENSITY_BT2) {
1692 const int bt_run_end = sce1->band_type_run_end[idx];
1693 for (; i < bt_run_end; i++, idx++) {
1694 c = -1 + 2 * (sce1->band_type[idx] - 14);
1695 if (ms_present)
1696 c *= 1 - 2 * cpe->ms_mask[idx];
1697 scale = c * sce1->sf[idx];
1698 for (group = 0; group < ics->group_len[g]; group++)
1699 ac->dsp.vector_fmul_scalar(coef1 + group * 128 + offsets[i],
1700 coef0 + group * 128 + offsets[i],
1701 scale,
1702 offsets[i + 1] - offsets[i]);
1703 }
1704 } else {
1705 int bt_run_end = sce1->band_type_run_end[idx];
1706 idx += bt_run_end - i;
1707 i = bt_run_end;
1708 }
1709 }
1710 coef0 += ics->group_len[g] * 128;
1711 coef1 += ics->group_len[g] * 128;
1712 }
1713 }
1714
1715 /**
1716 * Decode a channel_pair_element; reference: table 4.4.
1717 *
1718 * @return Returns error status. 0 - OK, !0 - error
1719 */
1720 static int decode_cpe(AACContext *ac, GetBitContext *gb, ChannelElement *cpe)
1721 {
1722 int i, ret, common_window, ms_present = 0;
1723
1724 common_window = get_bits1(gb);
1725 if (common_window) {
1726 if (decode_ics_info(ac, &cpe->ch[0].ics, gb))
1727 return AVERROR_INVALIDDATA;
1728 i = cpe->ch[1].ics.use_kb_window[0];
1729 cpe->ch[1].ics = cpe->ch[0].ics;
1730 cpe->ch[1].ics.use_kb_window[1] = i;
1731 if (cpe->ch[1].ics.predictor_present && (ac->oc[1].m4ac.object_type != AOT_AAC_MAIN))
1732 if ((cpe->ch[1].ics.ltp.present = get_bits(gb, 1)))
1733 decode_ltp(ac, &cpe->ch[1].ics.ltp, gb, cpe->ch[1].ics.max_sfb);
1734 ms_present = get_bits(gb, 2);
1735 if (ms_present == 3) {
1736 av_log(ac->avctx, AV_LOG_ERROR, "ms_present = 3 is reserved.\n");
1737 return -1;
1738 } else if (ms_present)
1739 decode_mid_side_stereo(cpe, gb, ms_present);
1740 }
1741 if ((ret = decode_ics(ac, &cpe->ch[0], gb, common_window, 0)))
1742 return ret;
1743 if ((ret = decode_ics(ac, &cpe->ch[1], gb, common_window, 0)))
1744 return ret;
1745
1746 if (common_window) {
1747 if (ms_present)
1748 apply_mid_side_stereo(ac, cpe);
1749 if (ac->oc[1].m4ac.object_type == AOT_AAC_MAIN) {
1750 apply_prediction(ac, &cpe->ch[0]);
1751 apply_prediction(ac, &cpe->ch[1]);
1752 }
1753 }
1754
1755 apply_intensity_stereo(ac, cpe, ms_present);
1756 return 0;
1757 }
1758
1759 static const float cce_scale[] = {
1760 1.09050773266525765921, //2^(1/8)
1761 1.18920711500272106672, //2^(1/4)
1762 M_SQRT2,
1763 2,
1764 };
1765
1766 /**
1767 * Decode coupling_channel_element; reference: table 4.8.
1768 *
1769 * @return Returns error status. 0 - OK, !0 - error
1770 */
1771 static int decode_cce(AACContext *ac, GetBitContext *gb, ChannelElement *che)
1772 {
1773 int num_gain = 0;
1774 int c, g, sfb, ret;
1775 int sign;
1776 float scale;
1777 SingleChannelElement *sce = &che->ch[0];
1778 ChannelCoupling *coup = &che->coup;
1779
1780 coup->coupling_point = 2 * get_bits1(gb);
1781 coup->num_coupled = get_bits(gb, 3);
1782 for (c = 0; c <= coup->num_coupled; c++) {
1783 num_gain++;
1784 coup->type[c] = get_bits1(gb) ? TYPE_CPE : TYPE_SCE;
1785 coup->id_select[c] = get_bits(gb, 4);
1786 if (coup->type[c] == TYPE_CPE) {
1787 coup->ch_select[c] = get_bits(gb, 2);
1788 if (coup->ch_select[c] == 3)
1789 num_gain++;
1790 } else
1791 coup->ch_select[c] = 2;
1792 }
1793 coup->coupling_point += get_bits1(gb) || (coup->coupling_point >> 1);
1794
1795 sign = get_bits(gb, 1);
1796 scale = cce_scale[get_bits(gb, 2)];
1797
1798 if ((ret = decode_ics(ac, sce, gb, 0, 0)))
1799 return ret;
1800
1801 for (c = 0; c < num_gain; c++) {
1802 int idx = 0;
1803 int cge = 1;
1804 int gain = 0;
1805 float gain_cache = 1.;
1806 if (c) {
1807 cge = coup->coupling_point == AFTER_IMDCT ? 1 : get_bits1(gb);
1808 gain = cge ? get_vlc2(gb, vlc_scalefactors.table, 7, 3) - 60: 0;
1809 gain_cache = powf(scale, -gain);
1810 }
1811 if (coup->coupling_point == AFTER_IMDCT) {
1812 coup->gain[c][0] = gain_cache;
1813 } else {
1814 for (g = 0; g < sce->ics.num_window_groups; g++) {
1815 for (sfb = 0; sfb < sce->ics.max_sfb; sfb++, idx++) {
1816 if (sce->band_type[idx] != ZERO_BT) {
1817 if (!cge) {
1818 int t = get_vlc2(gb, vlc_scalefactors.table, 7, 3) - 60;
1819 if (t) {
1820 int s = 1;
1821 t = gain += t;
1822 if (sign) {
1823 s -= 2 * (t & 0x1);
1824 t >>= 1;
1825 }
1826 gain_cache = powf(scale, -t) * s;
1827 }
1828 }
1829 coup->gain[c][idx] = gain_cache;
1830 }
1831 }
1832 }
1833 }
1834 }
1835 return 0;
1836 }
1837
1838 /**
1839 * Parse whether channels are to be excluded from Dynamic Range Compression; reference: table 4.53.
1840 *
1841 * @return Returns number of bytes consumed.
1842 */
1843 static int decode_drc_channel_exclusions(DynamicRangeControl *che_drc,
1844 GetBitContext *gb)
1845 {
1846 int i;
1847 int num_excl_chan = 0;
1848
1849 do {
1850 for (i = 0; i < 7; i++)
1851 che_drc->exclude_mask[num_excl_chan++] = get_bits1(gb);
1852 } while (num_excl_chan < MAX_CHANNELS - 7 && get_bits1(gb));
1853
1854 return num_excl_chan / 7;
1855 }
1856
1857 /**
1858 * Decode dynamic range information; reference: table 4.52.
1859 *
1860 * @param cnt length of TYPE_FIL syntactic element in bytes
1861 *
1862 * @return Returns number of bytes consumed.
1863 */
1864 static int decode_dynamic_range(DynamicRangeControl *che_drc,
1865 GetBitContext *gb, int cnt)
1866 {
1867 int n = 1;
1868 int drc_num_bands = 1;
1869 int i;
1870
1871 /* pce_tag_present? */
1872 if (get_bits1(gb)) {
1873 che_drc->pce_instance_tag = get_bits(gb, 4);
1874 skip_bits(gb, 4); // tag_reserved_bits
1875 n++;
1876 }
1877
1878 /* excluded_chns_present? */
1879 if (get_bits1(gb)) {
1880 n += decode_drc_channel_exclusions(che_drc, gb);
1881 }
1882
1883 /* drc_bands_present? */
1884 if (get_bits1(gb)) {
1885 che_drc->band_incr = get_bits(gb, 4);
1886 che_drc->interpolation_scheme = get_bits(gb, 4);
1887 n++;
1888 drc_num_bands += che_drc->band_incr;
1889 for (i = 0; i < drc_num_bands; i++) {
1890 che_drc->band_top[i] = get_bits(gb, 8);
1891 n++;
1892 }
1893 }
1894
1895 /* prog_ref_level_present? */
1896 if (get_bits1(gb)) {
1897 che_drc->prog_ref_level = get_bits(gb, 7);
1898 skip_bits1(gb); // prog_ref_level_reserved_bits
1899 n++;
1900 }
1901
1902 for (i = 0; i < drc_num_bands; i++) {
1903 che_drc->dyn_rng_sgn[i] = get_bits1(gb);
1904 che_drc->dyn_rng_ctl[i] = get_bits(gb, 7);
1905 n++;
1906 }
1907
1908 return n;
1909 }
1910
1911 /**
1912 * Decode extension data (incomplete); reference: table 4.51.
1913 *
1914 * @param cnt length of TYPE_FIL syntactic element in bytes
1915 *
1916 * @return Returns number of bytes consumed
1917 */
1918 static int decode_extension_payload(AACContext *ac, GetBitContext *gb, int cnt,
1919 ChannelElement *che, enum RawDataBlockType elem_type)
1920 {
1921 int crc_flag = 0;
1922 int res = cnt;
1923 switch (get_bits(gb, 4)) { // extension type
1924 case EXT_SBR_DATA_CRC:
1925 crc_flag++;
1926 case EXT_SBR_DATA:
1927 if (!che) {
1928 av_log(ac->avctx, AV_LOG_ERROR, "SBR was found before the first channel element.\n");
1929 return res;
1930 } else if (!ac->oc[1].m4ac.sbr) {
1931 av_log(ac->avctx, AV_LOG_ERROR, "SBR signaled to be not-present but was found in the bitstream.\n");
1932 skip_bits_long(gb, 8 * cnt - 4);
1933 return res;
1934 } else if (ac->oc[1].m4ac.sbr == -1 && ac->oc[1].status == OC_LOCKED) {
1935 av_log(ac->avctx, AV_LOG_ERROR, "Implicit SBR was found with a first occurrence after the first frame.\n");
1936 skip_bits_long(gb, 8 * cnt - 4);
1937 return res;
1938 } else if (ac->oc[1].m4ac.ps == -1 && ac->oc[1].status < OC_LOCKED && ac->avctx->channels == 1) {
1939 ac->oc[1].m4ac.sbr = 1;
1940 ac->oc[1].m4ac.ps = 1;
1941 output_configure(ac, ac->oc[1].layout_map, ac->oc[1].layout_map_tags,
1942 ac->oc[1].m4ac.chan_config, ac->oc[1].status);
1943 } else {
1944 ac->oc[1].m4ac.sbr = 1;
1945 }
1946 res = ff_decode_sbr_extension(ac, &che->sbr, gb, crc_flag, cnt, elem_type);
1947 break;
1948 case EXT_DYNAMIC_RANGE:
1949 res = decode_dynamic_range(&ac->che_drc, gb, cnt);
1950 break;
1951 case EXT_FILL:
1952 case EXT_FILL_DATA:
1953 case EXT_DATA_ELEMENT:
1954 default:
1955 skip_bits_long(gb, 8 * cnt - 4);
1956 break;
1957 };
1958 return res;
1959 }
1960
1961 /**
1962 * Decode Temporal Noise Shaping filter coefficients and apply all-pole filters; reference: 4.6.9.3.
1963 *
1964 * @param decode 1 if tool is used normally, 0 if tool is used in LTP.
1965 * @param coef spectral coefficients
1966 */
1967 static void apply_tns(float coef[1024], TemporalNoiseShaping *tns,
1968 IndividualChannelStream *ics, int decode)
1969 {
1970 const int mmm = FFMIN(ics->tns_max_bands, ics->max_sfb);
1971 int w, filt, m, i;
1972 int bottom, top, order, start, end, size, inc;
1973 float lpc[TNS_MAX_ORDER];
1974 float tmp[TNS_MAX_ORDER];
1975
1976 for (w = 0; w < ics->num_windows; w++) {
1977 bottom = ics->num_swb;
1978 for (filt = 0; filt < tns->n_filt[w]; filt++) {
1979 top = bottom;
1980 bottom = FFMAX(0, top - tns->length[w][filt]);
1981 order = tns->order[w][filt];
1982 if (order == 0)
1983 continue;
1984
1985 // tns_decode_coef
1986 compute_lpc_coefs(tns->coef[w][filt], order, lpc, 0, 0, 0);
1987
1988 start = ics->swb_offset[FFMIN(bottom, mmm)];
1989 end = ics->swb_offset[FFMIN( top, mmm)];
1990 if ((size = end - start) <= 0)
1991 continue;
1992 if (tns->direction[w][filt]) {
1993 inc = -1;
1994 start = end - 1;
1995 } else {
1996 inc = 1;
1997 }
1998 start += w * 128;
1999
2000 if (decode) {
2001 // ar filter
2002 for (m = 0; m < size; m++, start += inc)
2003 for (i = 1; i <= FFMIN(m, order); i++)
2004 coef[start] -= coef[start - i * inc] * lpc[i - 1];
2005 } else {
2006 // ma filter
2007 for (m = 0; m < size; m++, start += inc) {
2008 tmp[0] = coef[start];
2009 for (i = 1; i <= FFMIN(m, order); i++)
2010 coef[start] += tmp[i] * lpc[i - 1];
2011 for (i = order; i > 0; i--)
2012 tmp[i] = tmp[i - 1];
2013 }
2014 }
2015 }
2016 }
2017 }
2018
2019 /**
2020 * Apply windowing and MDCT to obtain the spectral
2021 * coefficient from the predicted sample by LTP.
2022 */
2023 static void windowing_and_mdct_ltp(AACContext *ac, float *out,
2024 float *in, IndividualChannelStream *ics)
2025 {
2026 const float *lwindow = ics->use_kb_window[0] ? ff_aac_kbd_long_1024 : ff_sine_1024;
2027 const float *swindow = ics->use_kb_window[0] ? ff_aac_kbd_short_128 : ff_sine_128;
2028 const float *lwindow_prev = ics->use_kb_window[1] ? ff_aac_kbd_long_1024 : ff_sine_1024;
2029 const float *swindow_prev = ics->use_kb_window[1] ? ff_aac_kbd_short_128 : ff_sine_128;
2030
2031 if (ics->window_sequence[0] != LONG_STOP_SEQUENCE) {
2032 ac->dsp.vector_fmul(in, in, lwindow_prev, 1024);
2033 } else {
2034 memset(in, 0, 448 * sizeof(float));
2035 ac->dsp.vector_fmul(in + 448, in + 448, swindow_prev, 128);
2036 }
2037 if (ics->window_sequence[0] != LONG_START_SEQUENCE) {
2038 ac->dsp.vector_fmul_reverse(in + 1024, in + 1024, lwindow, 1024);
2039 } else {
2040 ac->dsp.vector_fmul_reverse(in + 1024 + 448, in + 1024 + 448, swindow, 128);
2041 memset(in + 1024 + 576, 0, 448 * sizeof(float));
2042 }
2043 ac->mdct_ltp.mdct_calc(&ac->mdct_ltp, out, in);
2044 }
2045
2046 /**
2047 * Apply the long term prediction
2048 */
2049 static void apply_ltp(AACContext *ac, SingleChannelElement *sce)
2050 {
2051 const LongTermPrediction *ltp = &sce->ics.ltp;
2052 const uint16_t *offsets = sce->ics.swb_offset;
2053 int i, sfb;
2054
2055 if (sce->ics.window_sequence[0] != EIGHT_SHORT_SEQUENCE) {
2056 float *predTime = sce->ret;
2057 float *predFreq = ac->buf_mdct;
2058 int16_t num_samples = 2048;
2059
2060 if (ltp->lag < 1024)
2061 num_samples = ltp->lag + 1024;
2062 for (i = 0; i < num_samples; i++)
2063 predTime[i] = sce->ltp_state[i + 2048 - ltp->lag] * ltp->coef;
2064 memset(&predTime[i], 0, (2048 - i) * sizeof(float));
2065
2066 windowing_and_mdct_ltp(ac, predFreq, predTime, &sce->ics);
2067
2068 if (sce->tns.present)
2069 apply_tns(predFreq, &sce->tns, &sce->ics, 0);
2070
2071 for (sfb = 0; sfb < FFMIN(sce->ics.max_sfb, MAX_LTP_LONG_SFB); sfb++)
2072 if (ltp->used[sfb])
2073 for (i = offsets[sfb]; i < offsets[sfb + 1]; i++)
2074 sce->coeffs[i] += predFreq[i];
2075 }
2076 }
2077
2078 /**
2079 * Update the LTP buffer for next frame
2080 */
2081 static void update_ltp(AACContext *ac, SingleChannelElement *sce)
2082 {
2083 IndividualChannelStream *ics = &sce->ics;
2084 float *saved = sce->saved;
2085 float *saved_ltp = sce->coeffs;
2086 const float *lwindow = ics->use_kb_window[0] ? ff_aac_kbd_long_1024 : ff_sine_1024;
2087 const float *swindow = ics->use_kb_window[0] ? ff_aac_kbd_short_128 : ff_sine_128;
2088 int i;
2089
2090 if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) {
2091 memcpy(saved_ltp, saved, 512 * sizeof(float));
2092 memset(saved_ltp + 576, 0, 448 * sizeof(float));
2093 ac->dsp.vector_fmul_reverse(saved_ltp + 448, ac->buf_mdct + 960, &swindow[64], 64);
2094 for (i = 0; i < 64; i++)
2095 saved_ltp[i + 512] = ac->buf_mdct[1023 - i] * swindow[63 - i];
2096 } else if (ics->window_sequence[0] == LONG_START_SEQUENCE) {
2097 memcpy(saved_ltp, ac->buf_mdct + 512, 448 * sizeof(float));
2098 memset(saved_ltp + 576, 0, 448 * sizeof(float));
2099 ac->dsp.vector_fmul_reverse(saved_ltp + 448, ac->buf_mdct + 960, &swindow[64], 64);
2100 for (i = 0; i < 64; i++)
2101 saved_ltp[i + 512] = ac->buf_mdct[1023 - i] * swindow[63 - i];
2102 } else { // LONG_STOP or ONLY_LONG
2103 ac->dsp.vector_fmul_reverse(saved_ltp, ac->buf_mdct + 512, &lwindow[512], 512);
2104 for (i = 0; i < 512; i++)
2105 saved_ltp[i + 512] = ac->buf_mdct[1023 - i] * lwindow[511 - i];
2106 }
2107
2108 memcpy(sce->ltp_state, sce->ltp_state+1024, 1024 * sizeof(*sce->ltp_state));
2109 memcpy(sce->ltp_state+1024, sce->ret, 1024 * sizeof(*sce->ltp_state));
2110 memcpy(sce->ltp_state+2048, saved_ltp, 1024 * sizeof(*sce->ltp_state));
2111 }
2112
2113 /**
2114 * Conduct IMDCT and windowing.
2115 */
2116 static void imdct_and_windowing(AACContext *ac, SingleChannelElement *sce)
2117 {
2118 IndividualChannelStream *ics = &sce->ics;
2119 float *in = sce->coeffs;
2120 float *out = sce->ret;
2121 float *saved = sce->saved;
2122 const float *swindow = ics->use_kb_window[0] ? ff_aac_kbd_short_128 : ff_sine_128;
2123 const float *lwindow_prev = ics->use_kb_window[1] ? ff_aac_kbd_long_1024 : ff_sine_1024;
2124 const float *swindow_prev = ics->use_kb_window[1] ? ff_aac_kbd_short_128 : ff_sine_128;
2125 float *buf = ac->buf_mdct;
2126 float *temp = ac->temp;
2127 int i;
2128
2129 // imdct
2130 if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) {
2131 for (i = 0; i < 1024; i += 128)
2132 ac->mdct_small.imdct_half(&ac->mdct_small, buf + i, in + i);
2133 } else
2134 ac->mdct.imdct_half(&ac->mdct, buf, in);
2135
2136 /* window overlapping
2137 * NOTE: To simplify the overlapping code, all 'meaningless' short to long
2138 * and long to short transitions are considered to be short to short
2139 * transitions. This leaves just two cases (long to long and short to short)
2140 * with a little special sauce for EIGHT_SHORT_SEQUENCE.
2141 */
2142 if ((ics->window_sequence[1] == ONLY_LONG_SEQUENCE || ics->window_sequence[1] == LONG_STOP_SEQUENCE) &&
2143 (ics->window_sequence[0] == ONLY_LONG_SEQUENCE || ics->window_sequence[0] == LONG_START_SEQUENCE)) {
2144 ac->dsp.vector_fmul_window( out, saved, buf, lwindow_prev, 512);
2145 } else {
2146 memcpy( out, saved, 448 * sizeof(float));
2147
2148 if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) {
2149 ac->dsp.vector_fmul_window(out + 448 + 0*128, saved + 448, buf + 0*128, swindow_prev, 64);
2150 ac->dsp.vector_fmul_window(out + 448 + 1*128, buf + 0*128 + 64, buf + 1*128, swindow, 64);
2151 ac->dsp.vector_fmul_window(out + 448 + 2*128, buf + 1*128 + 64, buf + 2*128, swindow, 64);
2152 ac->dsp.vector_fmul_window(out + 448 + 3*128, buf + 2*128 + 64, buf + 3*128, swindow, 64);
2153 ac->dsp.vector_fmul_window(temp, buf + 3*128 + 64, buf + 4*128, swindow, 64);
2154 memcpy( out + 448 + 4*128, temp, 64 * sizeof(float));
2155 } else {
2156 ac->dsp.vector_fmul_window(out + 448, saved + 448, buf, swindow_prev, 64);
2157 memcpy( out + 576, buf + 64, 448 * sizeof(float));
2158 }
2159 }
2160
2161 // buffer update
2162 if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) {
2163 memcpy( saved, temp + 64, 64 * sizeof(float));
2164 ac->dsp.vector_fmul_window(saved + 64, buf + 4*128 + 64, buf + 5*128, swindow, 64);
2165 ac->dsp.vector_fmul_window(saved + 192, buf + 5*128 + 64, buf + 6*128, swindow, 64);
2166 ac->dsp.vector_fmul_window(saved + 320, buf + 6*128 + 64, buf + 7*128, swindow, 64);
2167 memcpy( saved + 448, buf + 7*128 + 64, 64 * sizeof(float));
2168 } else if (ics->window_sequence[0] == LONG_START_SEQUENCE) {
2169 memcpy( saved, buf + 512, 448 * sizeof(float));
2170 memcpy( saved + 448, buf + 7*128 + 64, 64 * sizeof(float));
2171 } else { // LONG_STOP or ONLY_LONG
2172 memcpy( saved, buf + 512, 512 * sizeof(float));
2173 }
2174 }
2175
2176 /**
2177 * Apply dependent channel coupling (applied before IMDCT).
2178 *
2179 * @param index index into coupling gain array
2180 */
2181 static void apply_dependent_coupling(AACContext *ac,
2182 SingleChannelElement *target,
2183 ChannelElement *cce, int index)
2184 {
2185 IndividualChannelStream *ics = &cce->ch[0].ics;
2186 const uint16_t *offsets = ics->swb_offset;
2187 float *dest = target->coeffs;
2188 const float *src = cce->ch[0].coeffs;
2189 int g, i, group, k, idx = 0;
2190 if (ac->oc[1].m4ac.object_type == AOT_AAC_LTP) {
2191 av_log(ac->avctx, AV_LOG_ERROR,
2192 "Dependent coupling is not supported together with LTP\n");
2193 return;
2194 }
2195 for (g = 0; g < ics->num_window_groups; g++) {
2196 for (i = 0; i < ics->max_sfb; i++, idx++) {
2197 if (cce->ch[0].band_type[idx] != ZERO_BT) {
2198 const float gain = cce->coup.gain[index][idx];
2199 for (group = 0; group < ics->group_len[g]; group++) {
2200 for (k = offsets[i]; k < offsets[i + 1]; k++) {
2201 // XXX dsputil-ize
2202 dest[group * 128 + k] += gain * src[group * 128 + k];
2203 }
2204 }
2205 }
2206 }
2207 dest += ics->group_len[g] * 128;
2208 src += ics->group_len[g] * 128;
2209 }
2210 }
2211
2212 /**
2213 * Apply independent channel coupling (applied after IMDCT).
2214 *
2215 * @param index index into coupling gain array
2216 */
2217 static void apply_independent_coupling(AACContext *ac,
2218 SingleChannelElement *target,
2219 ChannelElement *cce, int index)
2220 {
2221 int i;
2222 const float gain = cce->coup.gain[index][0];
2223 const float *src = cce->ch[0].ret;
2224 float *dest = target->ret;
2225 const int len = 1024 << (ac->oc[1].m4ac.sbr == 1);
2226
2227 for (i = 0; i < len; i++)
2228 dest[i] += gain * src[i];
2229 }
2230
2231 /**
2232 * channel coupling transformation interface
2233 *
2234 * @param apply_coupling_method pointer to (in)dependent coupling function
2235 */
2236 static void apply_channel_coupling(AACContext *ac, ChannelElement *cc,
2237 enum RawDataBlockType type, int elem_id,
2238 enum CouplingPoint coupling_point,
2239 void (*apply_coupling_method)(AACContext *ac, SingleChannelElement *target, ChannelElement *cce, int index))
2240 {
2241 int i, c;
2242
2243 for (i = 0; i < MAX_ELEM_ID; i++) {
2244 ChannelElement *cce = ac->che[TYPE_CCE][i];
2245 int index = 0;
2246
2247 if (cce && cce->coup.coupling_point == coupling_point) {
2248 ChannelCoupling *coup = &cce->coup;
2249
2250 for (c = 0; c <= coup->num_coupled; c++) {
2251 if (coup->type[c] == type && coup->id_select[c] == elem_id) {
2252 if (coup->ch_select[c] != 1) {
2253 apply_coupling_method(ac, &cc->ch[0], cce, index);
2254 if (coup->ch_select[c] != 0)
2255 index++;
2256 }
2257 if (coup->ch_select[c] != 2)
2258 apply_coupling_method(ac, &cc->ch[1], cce, index++);
2259 } else
2260 index += 1 + (coup->ch_select[c] == 3);
2261 }
2262 }
2263 }
2264 }
2265
2266 /**
2267 * Convert spectral data to float samples, applying all supported tools as appropriate.
2268 */
2269 static void spectral_to_sample(AACContext *ac)
2270 {
2271 int i, type;
2272 for (type = 3; type >= 0; type--) {
2273 for (i = 0; i < MAX_ELEM_ID; i++) {
2274 ChannelElement *che = ac->che[type][i];
2275 if (che) {
2276 if (type <= TYPE_CPE)
2277 apply_channel_coupling(ac, che, type, i, BEFORE_TNS, apply_dependent_coupling);
2278 if (ac->oc[1].m4ac.object_type == AOT_AAC_LTP) {
2279 if (che->ch[0].ics.predictor_present) {
2280 if (che->ch[0].ics.ltp.present)
2281 apply_ltp(ac, &che->ch[0]);
2282 if (che->ch[1].ics.ltp.present && type == TYPE_CPE)
2283 apply_ltp(ac, &che->ch[1]);
2284 }
2285 }
2286 if (che->ch[0].tns.present)
2287 apply_tns(che->ch[0].coeffs, &che->ch[0].tns, &che->ch[0].ics, 1);
2288 if (che->ch[1].tns.present)
2289 apply_tns(che->ch[1].coeffs, &che->ch[1].tns, &che->ch[1].ics, 1);
2290 if (type <= TYPE_CPE)
2291 apply_channel_coupling(ac, che, type, i, BETWEEN_TNS_AND_IMDCT, apply_dependent_coupling);
2292 if (type != TYPE_CCE || che->coup.coupling_point == AFTER_IMDCT) {
2293 imdct_and_windowing(ac, &che->ch[0]);
2294 if (ac->oc[1].m4ac.object_type == AOT_AAC_LTP)
2295 update_ltp(ac, &che->ch[0]);
2296 if (type == TYPE_CPE) {
2297 imdct_and_windowing(ac, &che->ch[1]);
2298 if (ac->oc[1].m4ac.object_type == AOT_AAC_LTP)
2299 update_ltp(ac, &che->ch[1]);
2300 }
2301 if (ac->oc[1].m4ac.sbr > 0) {
2302 ff_sbr_apply(ac, &che->sbr, type, che->ch[0].ret, che->ch[1].ret);
2303 }
2304 }
2305 if (type <= TYPE_CCE)
2306 apply_channel_coupling(ac, che, type, i, AFTER_IMDCT, apply_independent_coupling);
2307 }
2308 }
2309 }
2310 }
2311
2312 static int parse_adts_frame_header(AACContext *ac, GetBitContext *gb)
2313 {
2314 int size;
2315 AACADTSHeaderInfo hdr_info;
2316 uint8_t layout_map[MAX_ELEM_ID*4][3];
2317 int layout_map_tags;
2318
2319 size = avpriv_aac_parse_header(gb, &hdr_info);
2320 if (size > 0) {
2321 if (hdr_info.num_aac_frames != 1) {
2322 av_log_missing_feature(ac->avctx, "More than one AAC RDB per ADTS frame is", 0);
2323 return -1;
2324 }
2325 push_output_configuration(ac);
2326 if (hdr_info.chan_config) {
2327 ac->oc[1].m4ac.chan_config = hdr_info.chan_config;
2328 if (set_default_channel_config(ac->avctx, layout_map,
2329 &layout_map_tags, hdr_info.chan_config))
2330 return -7;
2331 if (output_configure(ac, layout_map, layout_map_tags,
2332 hdr_info.chan_config,
2333 FFMAX(ac->oc[1].status, OC_TRIAL_FRAME)))
2334 return -7;
2335 } else {
2336 ac->oc[1].m4ac.chan_config = 0;
2337 }
2338 ac->oc[1].m4ac.sample_rate = hdr_info.sample_rate;
2339 ac->oc[1].m4ac.sampling_index = hdr_info.sampling_index;
2340 ac->oc[1].m4ac.object_type = hdr_info.object_type;
2341 if (ac->oc[0].status != OC_LOCKED ||
2342 ac->oc[0].m4ac.chan_config != hdr_info.chan_config ||
2343 ac->oc[0].m4ac.sample_rate != hdr_info.sample_rate) {
2344 ac->oc[1].m4ac.sbr = -1;
2345 ac->oc[1].m4ac.ps = -1;
2346 }
2347 if (!hdr_info.crc_absent)
2348 skip_bits(gb, 16);
2349 }
2350 return size;
2351 }
2352
2353 static int aac_decode_frame_int(AVCodecContext *avctx, void *data,
2354 int *got_frame_ptr, GetBitContext *gb)
2355 {
2356 AACContext *ac = avctx->priv_data;
2357 ChannelElement *che = NULL, *che_prev = NULL;
2358 enum RawDataBlockType elem_type, elem_type_prev = TYPE_END;
2359 int err, elem_id;
2360 int samples = 0, multiplier, audio_found = 0, pce_found = 0;
2361
2362 if (show_bits(gb, 12) == 0xfff) {
2363 if (parse_adts_frame_header(ac, gb) < 0) {
2364 av_log(avctx, AV_LOG_ERROR, "Error decoding AAC frame header.\n");
2365 err = -1;
2366 goto fail;
2367 }
2368 if (ac->oc[1].m4ac.sampling_index > 12) {
2369 av_log(ac->avctx, AV_LOG_ERROR, "invalid sampling rate index %d\n", ac->oc[1].m4ac.sampling_index);
2370 err = -1;
2371 goto fail;
2372 }
2373 }
2374
2375 ac->tags_mapped = 0;
2376 // parse
2377 while ((elem_type = get_bits(gb, 3)) != TYPE_END) {
2378 elem_id = get_bits(gb, 4);
2379
2380 if (elem_type < TYPE_DSE) {
2381 if (!(che=get_che(ac, elem_type, elem_id))) {
2382 av_log(ac->avctx, AV_LOG_ERROR, "channel element %d.%d is not allocated\n",
2383 elem_type, elem_id);
2384 err = -1;
2385 goto fail;
2386 }
2387 samples = 1024;
2388 }
2389
2390 switch (elem_type) {
2391
2392 case TYPE_SCE:
2393 err = decode_ics(ac, &che->ch[0], gb, 0, 0);
2394 audio_found = 1;
2395 break;
2396
2397 case TYPE_CPE:
2398 err = decode_cpe(ac, gb, che);
2399 audio_found = 1;
2400 break;
2401
2402 case TYPE_CCE:
2403 err = decode_cce(ac, gb, che);
2404 break;
2405
2406 case TYPE_LFE:
2407 err = decode_ics(ac, &che->ch[0], gb, 0, 0);
2408 audio_found = 1;
2409 break;
2410
2411 case TYPE_DSE:
2412 err = skip_data_stream_element(ac, gb);
2413 break;
2414
2415 case TYPE_PCE: {
2416 uint8_t layout_map[MAX_ELEM_ID*4][3];
2417 int tags;
2418 push_output_configuration(ac);
2419 tags = decode_pce(avctx, &ac->oc[1].m4ac, layout_map, gb);
2420 if (tags < 0) {
2421 err = tags;
2422 break;
2423 }
2424 if (pce_found) {
2425 av_log(avctx, AV_LOG_ERROR,
2426 "Not evaluating a further program_config_element as this construct is dubious at best.\n");
2427 pop_output_configuration(ac);
2428 } else {
2429 err = output_configure(ac, layout_map, tags, 0, OC_TRIAL_PCE);
2430 pce_found = 1;
2431 }
2432 break;
2433 }
2434
2435 case TYPE_FIL:
2436 if (elem_id == 15)
2437 elem_id += get_bits(gb, 8) - 1;
2438 if (get_bits_left(gb) < 8 * elem_id) {
2439 av_log(avctx, AV_LOG_ERROR, overread_err);
2440 err = -1;
2441 goto fail;
2442 }
2443 while (elem_id > 0)
2444 elem_id -= decode_extension_payload(ac, gb, elem_id, che_prev, elem_type_prev);
2445 err = 0; /* FIXME */
2446 break;
2447
2448 default:
2449 err = -1; /* should not happen, but keeps compiler happy */
2450 break;
2451 }
2452
2453 che_prev = che;
2454 elem_type_prev = elem_type;
2455
2456 if (err)
2457 goto fail;
2458
2459 if (get_bits_left(gb) < 3) {
2460 av_log(avctx, AV_LOG_ERROR, overread_err);
2461 err = -1;
2462 goto fail;
2463 }
2464 }
2465
2466 spectral_to_sample(ac);
2467
2468 multiplier = (ac->oc[1].m4ac.sbr == 1) ? ac->oc[1].m4ac.ext_sample_rate > ac->oc[1].m4ac.sample_rate : 0;
2469 samples <<= multiplier;
2470
2471 if (samples) {
2472 /* get output buffer */
2473 ac->frame.nb_samples = samples;
2474 if ((err = avctx->get_buffer(avctx, &ac->frame)) < 0) {
2475 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
2476 err = -1;
2477 goto fail;
2478 }
2479
2480 if (avctx->sample_fmt == AV_SAMPLE_FMT_FLT)
2481 ac->fmt_conv.float_interleave((float *)ac->frame.data[0],
2482 (const float **)ac->output_data,
2483 samples, avctx->channels);
2484 else
2485 ac->fmt_conv.float_to_int16_interleave((int16_t *)ac->frame.data[0],
2486 (const float **)ac->output_data,
2487 samples, avctx->channels);
2488
2489 *(AVFrame *)data = ac->frame;
2490 }
2491 *got_frame_ptr = !!samples;
2492
2493 if (ac->oc[1].status && audio_found) {
2494 avctx->sample_rate = ac->oc[1].m4ac.sample_rate << multiplier;
2495 avctx->frame_size = samples;
2496 ac->oc[1].status = OC_LOCKED;
2497 }
2498
2499 return 0;
2500 fail:
2501 pop_output_configuration(ac);
2502 return err;
2503 }
2504
2505 static int aac_decode_frame(AVCodecContext *avctx, void *data,
2506 int *got_frame_ptr, AVPacket *avpkt)
2507 {
2508 AACContext *ac = avctx->priv_data;
2509 const uint8_t *buf = avpkt->data;
2510 int buf_size = avpkt->size;
2511 GetBitContext gb;
2512 int buf_consumed;
2513 int buf_offset;
2514 int err;
2515 int new_extradata_size;
2516 const uint8_t *new_extradata = av_packet_get_side_data(avpkt,
2517 AV_PKT_DATA_NEW_EXTRADATA,
2518 &new_extradata_size);
2519
2520 if (new_extradata) {
2521 av_free(avctx->extradata);
2522 avctx->extradata = av_mallocz(new_extradata_size +
2523 FF_INPUT_BUFFER_PADDING_SIZE);
2524 if (!avctx->extradata)
2525 return AVERROR(ENOMEM);
2526 avctx->extradata_size = new_extradata_size;
2527 memcpy(avctx->extradata, new_extradata, new_extradata_size);
2528 push_output_configuration(ac);
2529 if (decode_audio_specific_config(ac, ac->avctx, &ac->oc[1].m4ac,
2530 avctx->extradata,
2531 avctx->extradata_size*8, 1) < 0) {
2532 pop_output_configuration(ac);
2533 return AVERROR_INVALIDDATA;
2534 }
2535 }
2536
2537 init_get_bits(&gb, buf, buf_size * 8);
2538
2539 if ((err = aac_decode_frame_int(avctx, data, got_frame_ptr, &gb)) < 0)
2540 return err;
2541
2542 buf_consumed = (get_bits_count(&gb) + 7) >> 3;
2543 for (buf_offset = buf_consumed; buf_offset < buf_size; buf_offset++)
2544 if (buf[buf_offset])
2545 break;
2546
2547 return buf_size > buf_offset ? buf_consumed : buf_size;
2548 }
2549
2550 static av_cold int aac_decode_close(AVCodecContext *avctx)
2551 {
2552 AACContext *ac = avctx->priv_data;
2553 int i, type;
2554
2555 for (i = 0; i < MAX_ELEM_ID; i++) {
2556 for (type = 0; type < 4; type++) {
2557 if (ac->che[type][i])
2558 ff_aac_sbr_ctx_close(&ac->che[type][i]->sbr);
2559 av_freep(&ac->che[type][i]);
2560 }
2561 }
2562
2563 ff_mdct_end(&ac->mdct);
2564 ff_mdct_end(&ac->mdct_small);
2565 ff_mdct_end(&ac->mdct_ltp);
2566 return 0;
2567 }
2568
2569
2570 #define LOAS_SYNC_WORD 0x2b7 ///< 11 bits LOAS sync word
2571
2572 struct LATMContext {
2573 AACContext aac_ctx; ///< containing AACContext
2574 int initialized; ///< initilized after a valid extradata was seen
2575
2576 // parser data
2577 int audio_mux_version_A; ///< LATM syntax version
2578 int frame_length_type; ///< 0/1 variable/fixed frame length
2579 int frame_length; ///< frame length for fixed frame length
2580 };
2581
2582 static inline uint32_t latm_get_value(GetBitContext *b)
2583 {
2584 int length = get_bits(b, 2);
2585
2586 return get_bits_long(b, (length+1)*8);
2587 }
2588
2589 static int latm_decode_audio_specific_config(struct LATMContext *latmctx,
2590 GetBitContext *gb, int asclen)
2591 {
2592 AACContext *ac = &latmctx->aac_ctx;
2593 AVCodecContext *avctx = ac->avctx;
2594 MPEG4AudioConfig m4ac = { 0 };
2595 int config_start_bit = get_bits_count(gb);
2596 int sync_extension = 0;
2597 int bits_consumed, esize;
2598
2599 if (asclen) {
2600 sync_extension = 1;
2601 asclen = FFMIN(asclen, get_bits_left(gb));
2602 } else
2603 asclen = get_bits_left(gb);
2604
2605 if (config_start_bit % 8) {
2606 av_log_missing_feature(latmctx->aac_ctx.avctx, "audio specific "
2607 "config not byte aligned.\n", 1);
2608 return AVERROR_INVALIDDATA;
2609 }
2610 if (asclen <= 0)
2611 return AVERROR_INVALIDDATA;
2612 bits_consumed = decode_audio_specific_config(NULL, avctx, &m4ac,
2613 gb->buffer + (config_start_bit / 8),
2614 asclen, sync_extension);
2615
2616 if (bits_consumed < 0)
2617 return AVERROR_INVALIDDATA;
2618
2619 if (ac->oc[1].m4ac.sample_rate != m4ac.sample_rate ||
2620 ac->oc[1].m4ac.chan_config != m4ac.chan_config) {
2621
2622 av_log(avctx, AV_LOG_INFO, "audio config changed\n");
2623 latmctx->initialized = 0;
2624
2625 esize = (bits_consumed+7) / 8;
2626
2627 if (avctx->extradata_size < esize) {
2628 av_free(avctx->extradata);
2629 avctx->extradata = av_malloc(esize + FF_INPUT_BUFFER_PADDING_SIZE);
2630 if (!avctx->extradata)
2631 return AVERROR(ENOMEM);
2632 }
2633
2634 avctx->extradata_size = esize;
2635 memcpy(avctx->extradata, gb->buffer + (config_start_bit/8), esize);
2636 memset(avctx->extradata+esize, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2637 }
2638 skip_bits_long(gb, bits_consumed);
2639
2640 return bits_consumed;
2641 }
2642
2643 static int read_stream_mux_config(struct LATMContext *latmctx,
2644 GetBitContext *gb)
2645 {
2646 int ret, audio_mux_version = get_bits(gb, 1);
2647
2648 latmctx->audio_mux_version_A = 0;
2649 if (audio_mux_version)
2650 latmctx->audio_mux_version_A = get_bits(gb, 1);
2651
2652 if (!latmctx->audio_mux_version_A) {
2653
2654 if (audio_mux_version)
2655 latm_get_value(gb); // taraFullness
2656
2657 skip_bits(gb, 1); // allStreamSameTimeFraming
2658 skip_bits(gb, 6); // numSubFrames
2659 // numPrograms
2660 if (get_bits(gb, 4)) { // numPrograms
2661 av_log_missing_feature(latmctx->aac_ctx.avctx,
2662 "multiple programs are not supported\n", 1);
2663 return AVERROR_PATCHWELCOME;
2664 }
2665
2666 // for each program (which there is only on in DVB)
2667
2668 // for each layer (which there is only on in DVB)
2669 if (get_bits(gb, 3)) { // numLayer
2670 av_log_missing_feature(latmctx->aac_ctx.avctx,
2671 "multiple layers are not supported\n", 1);
2672 return AVERROR_PATCHWELCOME;
2673 }
2674
2675 // for all but first stream: use_same_config = get_bits(gb, 1);
2676 if (!audio_mux_version) {
2677 if ((ret = latm_decode_audio_specific_config(latmctx, gb, 0)) < 0)
2678 return ret;
2679 } else {
2680 int ascLen = latm_get_value(gb);
2681 if ((ret = latm_decode_audio_specific_config(latmctx, gb, ascLen)) < 0)
2682 return ret;
2683 ascLen -= ret;
2684 skip_bits_long(gb, ascLen);
2685 }
2686
2687 latmctx->frame_length_type = get_bits(gb, 3);
2688 switch (latmctx->frame_length_type) {
2689 case 0:
2690 skip_bits(gb, 8); // latmBufferFullness
2691 break;
2692 case 1:
2693 latmctx->frame_length = get_bits(gb, 9);
2694 break;
2695 case 3:
2696 case 4:
2697 case 5:
2698 skip_bits(gb, 6); // CELP frame length table index
2699 break;
2700 case 6:
2701 case 7:
2702 skip_bits(gb, 1); // HVXC frame length table index
2703 break;
2704 }
2705
2706 if (get_bits(gb, 1)) { // other data
2707 if (audio_mux_version) {
2708 latm_get_value(gb); // other_data_bits
2709 } else {
2710 int esc;
2711 do {
2712 esc = get_bits(gb, 1);
2713 skip_bits(gb, 8);
2714 } while (esc);
2715 }
2716 }
2717
2718 if (get_bits(gb, 1)) // crc present
2719 skip_bits(gb, 8); // config_crc
2720 }
2721
2722 return 0;
2723 }
2724
2725 static int read_payload_length_info(struct LATMContext *ctx, GetBitContext *gb)
2726 {
2727 uint8_t tmp;
2728
2729 if (ctx->frame_length_type == 0) {
2730 int mux_slot_length = 0;
2731 do {
2732 tmp = get_bits(gb, 8);
2733 mux_slot_length += tmp;
2734 } while (tmp == 255);
2735 return mux_slot_length;
2736 } else if (ctx->frame_length_type == 1) {
2737 return ctx->frame_length;
2738 } else if (ctx->frame_length_type == 3 ||
2739 ctx->frame_length_type == 5 ||
2740 ctx->frame_length_type == 7) {
2741 skip_bits(gb, 2); // mux_slot_length_coded
2742 }
2743 return 0;
2744 }
2745
2746 static int read_audio_mux_element(struct LATMContext *latmctx,
2747 GetBitContext *gb)
2748 {
2749 int err;
2750 uint8_t use_same_mux = get_bits(gb, 1);
2751 if (!use_same_mux) {
2752 if ((err = read_stream_mux_config(latmctx, gb)) < 0)
2753 return err;
2754 } else if (!latmctx->aac_ctx.avctx->extradata) {
2755 av_log(latmctx->aac_ctx.avctx, AV_LOG_DEBUG,
2756 "no decoder config found\n");
2757 return AVERROR(EAGAIN);
2758 }
2759 if (latmctx->audio_mux_version_A == 0) {
2760 int mux_slot_length_bytes = read_payload_length_info(latmctx, gb);
2761 if (mux_slot_length_bytes * 8 > get_bits_left(gb)) {
2762 av_log(latmctx->aac_ctx.avctx, AV_LOG_ERROR, "incomplete frame\n");
2763 return AVERROR_INVALIDDATA;
2764 } else if (mux_slot_length_bytes * 8 + 256 < get_bits_left(gb)) {
2765 av_log(latmctx->aac_ctx.avctx, AV_LOG_ERROR,
2766 "frame length mismatch %d << %d\n",
2767 mux_slot_length_bytes * 8, get_bits_left(gb));
2768 return AVERROR_INVALIDDATA;
2769 }
2770 }
2771 return 0;
2772 }
2773
2774
2775 static int latm_decode_frame(AVCodecContext *avctx, void *out,
2776 int *got_frame_ptr, AVPacket *avpkt)
2777 {
2778 struct LATMContext *latmctx = avctx->priv_data;
2779 int muxlength, err;
2780 GetBitContext gb;
2781
2782 init_get_bits(&gb, avpkt->data, avpkt->size * 8);
2783
2784 // check for LOAS sync word
2785 if (get_bits(&gb, 11) != LOAS_SYNC_WORD)
2786 return AVERROR_INVALIDDATA;
2787
2788 muxlength = get_bits(&gb, 13) + 3;
2789 // not enough data, the parser should have sorted this
2790 if (muxlength > avpkt->size)
2791 return AVERROR_INVALIDDATA;
2792
2793 if ((err = read_audio_mux_element(latmctx, &gb)) < 0)
2794 return err;
2795
2796 if (!latmctx->initialized) {
2797 if (!avctx->extradata) {
2798 *got_frame_ptr = 0;
2799 return avpkt->size;
2800 } else {
2801 push_output_configuration(&latmctx->aac_ctx);
2802 if ((err = decode_audio_specific_config(
2803 &latmctx->aac_ctx, avctx, &latmctx->aac_ctx.oc[1].m4ac,
2804 avctx->extradata, avctx->extradata_size*8, 1)) < 0) {
2805 pop_output_configuration(&latmctx->aac_ctx);
2806 return err;
2807 }
2808 latmctx->initialized = 1;
2809 }
2810 }
2811
2812 if (show_bits(&gb, 12) == 0xfff) {
2813 av_log(latmctx->aac_ctx.avctx, AV_LOG_ERROR,
2814 "ADTS header detected, probably as result of configuration "
2815 "misparsing\n");
2816 return AVERROR_INVALIDDATA;
2817 }
2818
2819 if ((err = aac_decode_frame_int(avctx, out, got_frame_ptr, &gb)) < 0)
2820 return err;
2821
2822 return muxlength;
2823 }
2824
2825 static av_cold int latm_decode_init(AVCodecContext *avctx)
2826 {
2827 struct LATMContext *latmctx = avctx->priv_data;
2828 int ret = aac_decode_init(avctx);
2829
2830 if (avctx->extradata_size > 0)
2831 latmctx->initialized = !ret;
2832
2833 return ret;
2834 }
2835
2836
2837 AVCodec ff_aac_decoder = {
2838 .name = "aac",
2839 .type = AVMEDIA_TYPE_AUDIO,
2840 .id = CODEC_ID_AAC,
2841 .priv_data_size = sizeof(AACContext),
2842 .init = aac_decode_init,
2843 .close = aac_decode_close,
2844 .decode = aac_decode_frame,
2845 .long_name = NULL_IF_CONFIG_SMALL("Advanced Audio Coding"),
2846 .sample_fmts = (const enum AVSampleFormat[]) {
2847 AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE
2848 },
2849 .capabilities = CODEC_CAP_CHANNEL_CONF | CODEC_CAP_DR1,
2850 .channel_layouts = aac_channel_layout,
2851 };
2852
2853 /*
2854 Note: This decoder filter is intended to decode LATM streams transferred
2855 in MPEG transport streams which only contain one program.
2856 To do a more complex LATM demuxing a separate LATM demuxer should be used.
2857 */
2858 AVCodec ff_aac_latm_decoder = {
2859 .name = "aac_latm",
2860 .type = AVMEDIA_TYPE_AUDIO,
2861 .id = CODEC_ID_AAC_LATM,
2862 .priv_data_size = sizeof(struct LATMContext),
2863 .init = latm_decode_init,
2864 .close = aac_decode_close,
2865 .decode = latm_decode_frame,
2866 .long_name = NULL_IF_CONFIG_SMALL("AAC LATM (Advanced Audio Codec LATM syntax)"),
2867 .sample_fmts = (const enum AVSampleFormat[]) {
2868 AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE
2869 },
2870 .capabilities = CODEC_CAP_CHANNEL_CONF | CODEC_CAP_DR1,
2871 .channel_layouts = aac_channel_layout,
2872 };