59f39fd26fd347c467fae3f6091eb54f7fc9e35e
[libav.git] / libavcodec / aacdec.c
1 /*
2 * AAC decoder
3 * Copyright (c) 2005-2006 Oded Shimon ( ods15 ods15 dyndns org )
4 * Copyright (c) 2006-2007 Maxim Gavrilov ( maxim.gavrilov gmail com )
5 *
6 * AAC LATM decoder
7 * Copyright (c) 2008-2010 Paul Kendall <paul@kcbbs.gen.nz>
8 * Copyright (c) 2010 Janne Grunau <janne-libav@jannau.net>
9 *
10 * This file is part of Libav.
11 *
12 * Libav is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU Lesser General Public
14 * License as published by the Free Software Foundation; either
15 * version 2.1 of the License, or (at your option) any later version.
16 *
17 * Libav is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * Lesser General Public License for more details.
21 *
22 * You should have received a copy of the GNU Lesser General Public
23 * License along with Libav; if not, write to the Free Software
24 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 */
26
27 /**
28 * @file
29 * AAC decoder
30 * @author Oded Shimon ( ods15 ods15 dyndns org )
31 * @author Maxim Gavrilov ( maxim.gavrilov gmail com )
32 */
33
34 /*
35 * supported tools
36 *
37 * Support? Name
38 * N (code in SoC repo) gain control
39 * Y block switching
40 * Y window shapes - standard
41 * N window shapes - Low Delay
42 * Y filterbank - standard
43 * N (code in SoC repo) filterbank - Scalable Sample Rate
44 * Y Temporal Noise Shaping
45 * Y Long Term Prediction
46 * Y intensity stereo
47 * Y channel coupling
48 * Y frequency domain prediction
49 * Y Perceptual Noise Substitution
50 * Y Mid/Side stereo
51 * N Scalable Inverse AAC Quantization
52 * N Frequency Selective Switch
53 * N upsampling filter
54 * Y quantization & coding - AAC
55 * N quantization & coding - TwinVQ
56 * N quantization & coding - BSAC
57 * N AAC Error Resilience tools
58 * N Error Resilience payload syntax
59 * N Error Protection tool
60 * N CELP
61 * N Silence Compression
62 * N HVXC
63 * N HVXC 4kbits/s VR
64 * N Structured Audio tools
65 * N Structured Audio Sample Bank Format
66 * N MIDI
67 * N Harmonic and Individual Lines plus Noise
68 * N Text-To-Speech Interface
69 * Y Spectral Band Replication
70 * Y (not in this code) Layer-1
71 * Y (not in this code) Layer-2
72 * Y (not in this code) Layer-3
73 * N SinuSoidal Coding (Transient, Sinusoid, Noise)
74 * Y Parametric Stereo
75 * N Direct Stream Transfer
76 *
77 * Note: - HE AAC v1 comprises LC AAC with Spectral Band Replication.
78 * - HE AAC v2 comprises LC AAC with Spectral Band Replication and
79 Parametric Stereo.
80 */
81
82 #include "libavutil/float_dsp.h"
83 #include "avcodec.h"
84 #include "internal.h"
85 #include "get_bits.h"
86 #include "dsputil.h"
87 #include "fft.h"
88 #include "fmtconvert.h"
89 #include "lpc.h"
90 #include "kbdwin.h"
91 #include "sinewin.h"
92
93 #include "aac.h"
94 #include "aactab.h"
95 #include "aacdectab.h"
96 #include "cbrt_tablegen.h"
97 #include "sbr.h"
98 #include "aacsbr.h"
99 #include "mpeg4audio.h"
100 #include "aacadtsdec.h"
101 #include "libavutil/intfloat.h"
102
103 #include <assert.h>
104 #include <errno.h>
105 #include <math.h>
106 #include <string.h>
107
108 #if ARCH_ARM
109 # include "arm/aac.h"
110 #endif
111
112 static VLC vlc_scalefactors;
113 static VLC vlc_spectral[11];
114
115 static const char overread_err[] = "Input buffer exhausted before END element found\n";
116
117 static int count_channels(uint8_t (*layout)[3], int tags)
118 {
119 int i, sum = 0;
120 for (i = 0; i < tags; i++) {
121 int syn_ele = layout[i][0];
122 int pos = layout[i][2];
123 sum += (1 + (syn_ele == TYPE_CPE)) *
124 (pos != AAC_CHANNEL_OFF && pos != AAC_CHANNEL_CC);
125 }
126 return sum;
127 }
128
129 /**
130 * Check for the channel element in the current channel position configuration.
131 * If it exists, make sure the appropriate element is allocated and map the
132 * channel order to match the internal Libav channel layout.
133 *
134 * @param che_pos current channel position configuration
135 * @param type channel element type
136 * @param id channel element id
137 * @param channels count of the number of channels in the configuration
138 *
139 * @return Returns error status. 0 - OK, !0 - error
140 */
141 static av_cold int che_configure(AACContext *ac,
142 enum ChannelPosition che_pos,
143 int type, int id, int *channels)
144 {
145 if (che_pos) {
146 if (!ac->che[type][id]) {
147 if (!(ac->che[type][id] = av_mallocz(sizeof(ChannelElement))))
148 return AVERROR(ENOMEM);
149 ff_aac_sbr_ctx_init(ac, &ac->che[type][id]->sbr);
150 }
151 if (type != TYPE_CCE) {
152 ac->output_data[(*channels)++] = ac->che[type][id]->ch[0].ret;
153 if (type == TYPE_CPE ||
154 (type == TYPE_SCE && ac->oc[1].m4ac.ps == 1)) {
155 ac->output_data[(*channels)++] = ac->che[type][id]->ch[1].ret;
156 }
157 }
158 } else {
159 if (ac->che[type][id])
160 ff_aac_sbr_ctx_close(&ac->che[type][id]->sbr);
161 av_freep(&ac->che[type][id]);
162 }
163 return 0;
164 }
165
166 struct elem_to_channel {
167 uint64_t av_position;
168 uint8_t syn_ele;
169 uint8_t elem_id;
170 uint8_t aac_position;
171 };
172
173 static int assign_pair(struct elem_to_channel e2c_vec[MAX_ELEM_ID],
174 uint8_t (*layout_map)[3], int offset, uint64_t left,
175 uint64_t right, int pos)
176 {
177 if (layout_map[offset][0] == TYPE_CPE) {
178 e2c_vec[offset] = (struct elem_to_channel) {
179 .av_position = left | right, .syn_ele = TYPE_CPE,
180 .elem_id = layout_map[offset ][1], .aac_position = pos };
181 return 1;
182 } else {
183 e2c_vec[offset] = (struct elem_to_channel) {
184 .av_position = left, .syn_ele = TYPE_SCE,
185 .elem_id = layout_map[offset ][1], .aac_position = pos };
186 e2c_vec[offset + 1] = (struct elem_to_channel) {
187 .av_position = right, .syn_ele = TYPE_SCE,
188 .elem_id = layout_map[offset + 1][1], .aac_position = pos };
189 return 2;
190 }
191 }
192
193 static int count_paired_channels(uint8_t (*layout_map)[3], int tags, int pos, int *current) {
194 int num_pos_channels = 0;
195 int first_cpe = 0;
196 int sce_parity = 0;
197 int i;
198 for (i = *current; i < tags; i++) {
199 if (layout_map[i][2] != pos)
200 break;
201 if (layout_map[i][0] == TYPE_CPE) {
202 if (sce_parity) {
203 if (pos == AAC_CHANNEL_FRONT && !first_cpe) {
204 sce_parity = 0;
205 } else {
206 return -1;
207 }
208 }
209 num_pos_channels += 2;
210 first_cpe = 1;
211 } else {
212 num_pos_channels++;
213 sce_parity ^= 1;
214 }
215 }
216 if (sce_parity &&
217 ((pos == AAC_CHANNEL_FRONT && first_cpe) || pos == AAC_CHANNEL_SIDE))
218 return -1;
219 *current = i;
220 return num_pos_channels;
221 }
222
223 static uint64_t sniff_channel_order(uint8_t (*layout_map)[3], int tags)
224 {
225 int i, n, total_non_cc_elements;
226 struct elem_to_channel e2c_vec[4*MAX_ELEM_ID] = {{ 0 }};
227 int num_front_channels, num_side_channels, num_back_channels;
228 uint64_t layout;
229
230 if (FF_ARRAY_ELEMS(e2c_vec) < tags)
231 return 0;
232
233 i = 0;
234 num_front_channels =
235 count_paired_channels(layout_map, tags, AAC_CHANNEL_FRONT, &i);
236 if (num_front_channels < 0)
237 return 0;
238 num_side_channels =
239 count_paired_channels(layout_map, tags, AAC_CHANNEL_SIDE, &i);
240 if (num_side_channels < 0)
241 return 0;
242 num_back_channels =
243 count_paired_channels(layout_map, tags, AAC_CHANNEL_BACK, &i);
244 if (num_back_channels < 0)
245 return 0;
246
247 i = 0;
248 if (num_front_channels & 1) {
249 e2c_vec[i] = (struct elem_to_channel) {
250 .av_position = AV_CH_FRONT_CENTER, .syn_ele = TYPE_SCE,
251 .elem_id = layout_map[i][1], .aac_position = AAC_CHANNEL_FRONT };
252 i++;
253 num_front_channels--;
254 }
255 if (num_front_channels >= 4) {
256 i += assign_pair(e2c_vec, layout_map, i,
257 AV_CH_FRONT_LEFT_OF_CENTER,
258 AV_CH_FRONT_RIGHT_OF_CENTER,
259 AAC_CHANNEL_FRONT);
260 num_front_channels -= 2;
261 }
262 if (num_front_channels >= 2) {
263 i += assign_pair(e2c_vec, layout_map, i,
264 AV_CH_FRONT_LEFT,
265 AV_CH_FRONT_RIGHT,
266 AAC_CHANNEL_FRONT);
267 num_front_channels -= 2;
268 }
269 while (num_front_channels >= 2) {
270 i += assign_pair(e2c_vec, layout_map, i,
271 UINT64_MAX,
272 UINT64_MAX,
273 AAC_CHANNEL_FRONT);
274 num_front_channels -= 2;
275 }
276
277 if (num_side_channels >= 2) {
278 i += assign_pair(e2c_vec, layout_map, i,
279 AV_CH_SIDE_LEFT,
280 AV_CH_SIDE_RIGHT,
281 AAC_CHANNEL_FRONT);
282 num_side_channels -= 2;
283 }
284 while (num_side_channels >= 2) {
285 i += assign_pair(e2c_vec, layout_map, i,
286 UINT64_MAX,
287 UINT64_MAX,
288 AAC_CHANNEL_SIDE);
289 num_side_channels -= 2;
290 }
291
292 while (num_back_channels >= 4) {
293 i += assign_pair(e2c_vec, layout_map, i,
294 UINT64_MAX,
295 UINT64_MAX,
296 AAC_CHANNEL_BACK);
297 num_back_channels -= 2;
298 }
299 if (num_back_channels >= 2) {
300 i += assign_pair(e2c_vec, layout_map, i,
301 AV_CH_BACK_LEFT,
302 AV_CH_BACK_RIGHT,
303 AAC_CHANNEL_BACK);
304 num_back_channels -= 2;
305 }
306 if (num_back_channels) {
307 e2c_vec[i] = (struct elem_to_channel) {
308 .av_position = AV_CH_BACK_CENTER, .syn_ele = TYPE_SCE,
309 .elem_id = layout_map[i][1], .aac_position = AAC_CHANNEL_BACK };
310 i++;
311 num_back_channels--;
312 }
313
314 if (i < tags && layout_map[i][2] == AAC_CHANNEL_LFE) {
315 e2c_vec[i] = (struct elem_to_channel) {
316 .av_position = AV_CH_LOW_FREQUENCY, .syn_ele = TYPE_LFE,
317 .elem_id = layout_map[i][1], .aac_position = AAC_CHANNEL_LFE };
318 i++;
319 }
320 while (i < tags && layout_map[i][2] == AAC_CHANNEL_LFE) {
321 e2c_vec[i] = (struct elem_to_channel) {
322 .av_position = UINT64_MAX, .syn_ele = TYPE_LFE,
323 .elem_id = layout_map[i][1], .aac_position = AAC_CHANNEL_LFE };
324 i++;
325 }
326
327 // Must choose a stable sort
328 total_non_cc_elements = n = i;
329 do {
330 int next_n = 0;
331 for (i = 1; i < n; i++) {
332 if (e2c_vec[i-1].av_position > e2c_vec[i].av_position) {
333 FFSWAP(struct elem_to_channel, e2c_vec[i-1], e2c_vec[i]);
334 next_n = i;
335 }
336 }
337 n = next_n;
338 } while (n > 0);
339
340 layout = 0;
341 for (i = 0; i < total_non_cc_elements; i++) {
342 layout_map[i][0] = e2c_vec[i].syn_ele;
343 layout_map[i][1] = e2c_vec[i].elem_id;
344 layout_map[i][2] = e2c_vec[i].aac_position;
345 if (e2c_vec[i].av_position != UINT64_MAX) {
346 layout |= e2c_vec[i].av_position;
347 }
348 }
349
350 return layout;
351 }
352
353 /**
354 * Save current output configuration if and only if it has been locked.
355 */
356 static void push_output_configuration(AACContext *ac) {
357 if (ac->oc[1].status == OC_LOCKED) {
358 ac->oc[0] = ac->oc[1];
359 }
360 ac->oc[1].status = OC_NONE;
361 }
362
363 /**
364 * Restore the previous output configuration if and only if the current
365 * configuration is unlocked.
366 */
367 static void pop_output_configuration(AACContext *ac) {
368 if (ac->oc[1].status != OC_LOCKED && ac->oc[0].status != OC_NONE) {
369 ac->oc[1] = ac->oc[0];
370 ac->avctx->channels = ac->oc[1].channels;
371 ac->avctx->channel_layout = ac->oc[1].channel_layout;
372 }
373 }
374
375 /**
376 * Configure output channel order based on the current program configuration element.
377 *
378 * @return Returns error status. 0 - OK, !0 - error
379 */
380 static int output_configure(AACContext *ac,
381 uint8_t layout_map[MAX_ELEM_ID*4][3], int tags,
382 enum OCStatus oc_type)
383 {
384 AVCodecContext *avctx = ac->avctx;
385 int i, channels = 0, ret;
386 uint64_t layout = 0;
387
388 if (ac->oc[1].layout_map != layout_map) {
389 memcpy(ac->oc[1].layout_map, layout_map, tags * sizeof(layout_map[0]));
390 ac->oc[1].layout_map_tags = tags;
391 }
392
393 // Try to sniff a reasonable channel order, otherwise output the
394 // channels in the order the PCE declared them.
395 if (avctx->request_channel_layout != AV_CH_LAYOUT_NATIVE)
396 layout = sniff_channel_order(layout_map, tags);
397 for (i = 0; i < tags; i++) {
398 int type = layout_map[i][0];
399 int id = layout_map[i][1];
400 int position = layout_map[i][2];
401 // Allocate or free elements depending on if they are in the
402 // current program configuration.
403 ret = che_configure(ac, position, type, id, &channels);
404 if (ret < 0)
405 return ret;
406 }
407 if (ac->oc[1].m4ac.ps == 1 && channels == 2) {
408 if (layout == AV_CH_FRONT_CENTER) {
409 layout = AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT;
410 } else {
411 layout = 0;
412 }
413 }
414
415 memcpy(ac->tag_che_map, ac->che, 4 * MAX_ELEM_ID * sizeof(ac->che[0][0]));
416 avctx->channel_layout = ac->oc[1].channel_layout = layout;
417 avctx->channels = ac->oc[1].channels = channels;
418 ac->oc[1].status = oc_type;
419
420 return 0;
421 }
422
423 /**
424 * Set up channel positions based on a default channel configuration
425 * as specified in table 1.17.
426 *
427 * @return Returns error status. 0 - OK, !0 - error
428 */
429 static int set_default_channel_config(AVCodecContext *avctx,
430 uint8_t (*layout_map)[3],
431 int *tags,
432 int channel_config)
433 {
434 if (channel_config < 1 || channel_config > 7) {
435 av_log(avctx, AV_LOG_ERROR, "invalid default channel configuration (%d)\n",
436 channel_config);
437 return -1;
438 }
439 *tags = tags_per_config[channel_config];
440 memcpy(layout_map, aac_channel_layout_map[channel_config-1], *tags * sizeof(*layout_map));
441 return 0;
442 }
443
444 static ChannelElement *get_che(AACContext *ac, int type, int elem_id)
445 {
446 // For PCE based channel configurations map the channels solely based on tags.
447 if (!ac->oc[1].m4ac.chan_config) {
448 return ac->tag_che_map[type][elem_id];
449 }
450 // Allow single CPE stereo files to be signalled with mono configuration.
451 if (!ac->tags_mapped && type == TYPE_CPE && ac->oc[1].m4ac.chan_config == 1) {
452 uint8_t layout_map[MAX_ELEM_ID*4][3];
453 int layout_map_tags;
454 push_output_configuration(ac);
455
456 if (set_default_channel_config(ac->avctx, layout_map, &layout_map_tags,
457 2) < 0)
458 return NULL;
459 if (output_configure(ac, layout_map, layout_map_tags,
460 OC_TRIAL_FRAME) < 0)
461 return NULL;
462
463 ac->oc[1].m4ac.chan_config = 2;
464 ac->oc[1].m4ac.ps = 0;
465 }
466 // And vice-versa
467 if (!ac->tags_mapped && type == TYPE_SCE && ac->oc[1].m4ac.chan_config == 2) {
468 uint8_t layout_map[MAX_ELEM_ID*4][3];
469 int layout_map_tags;
470 push_output_configuration(ac);
471
472 if (set_default_channel_config(ac->avctx, layout_map, &layout_map_tags,
473 1) < 0)
474 return NULL;
475 if (output_configure(ac, layout_map, layout_map_tags,
476 OC_TRIAL_FRAME) < 0)
477 return NULL;
478
479 ac->oc[1].m4ac.chan_config = 1;
480 if (ac->oc[1].m4ac.sbr)
481 ac->oc[1].m4ac.ps = -1;
482 }
483 // For indexed channel configurations map the channels solely based on position.
484 switch (ac->oc[1].m4ac.chan_config) {
485 case 7:
486 if (ac->tags_mapped == 3 && type == TYPE_CPE) {
487 ac->tags_mapped++;
488 return ac->tag_che_map[TYPE_CPE][elem_id] = ac->che[TYPE_CPE][2];
489 }
490 case 6:
491 /* Some streams incorrectly code 5.1 audio as SCE[0] CPE[0] CPE[1] SCE[1]
492 instead of SCE[0] CPE[0] CPE[1] LFE[0]. If we seem to have
493 encountered such a stream, transfer the LFE[0] element to the SCE[1]'s mapping */
494 if (ac->tags_mapped == tags_per_config[ac->oc[1].m4ac.chan_config] - 1 && (type == TYPE_LFE || type == TYPE_SCE)) {
495 ac->tags_mapped++;
496 return ac->tag_che_map[type][elem_id] = ac->che[TYPE_LFE][0];
497 }
498 case 5:
499 if (ac->tags_mapped == 2 && type == TYPE_CPE) {
500 ac->tags_mapped++;
501 return ac->tag_che_map[TYPE_CPE][elem_id] = ac->che[TYPE_CPE][1];
502 }
503 case 4:
504 if (ac->tags_mapped == 2 && ac->oc[1].m4ac.chan_config == 4 && type == TYPE_SCE) {
505 ac->tags_mapped++;
506 return ac->tag_che_map[TYPE_SCE][elem_id] = ac->che[TYPE_SCE][1];
507 }
508 case 3:
509 case 2:
510 if (ac->tags_mapped == (ac->oc[1].m4ac.chan_config != 2) && type == TYPE_CPE) {
511 ac->tags_mapped++;
512 return ac->tag_che_map[TYPE_CPE][elem_id] = ac->che[TYPE_CPE][0];
513 } else if (ac->oc[1].m4ac.chan_config == 2) {
514 return NULL;
515 }
516 case 1:
517 if (!ac->tags_mapped && type == TYPE_SCE) {
518 ac->tags_mapped++;
519 return ac->tag_che_map[TYPE_SCE][elem_id] = ac->che[TYPE_SCE][0];
520 }
521 default:
522 return NULL;
523 }
524 }
525
526 /**
527 * Decode an array of 4 bit element IDs, optionally interleaved with a stereo/mono switching bit.
528 *
529 * @param type speaker type/position for these channels
530 */
531 static void decode_channel_map(uint8_t layout_map[][3],
532 enum ChannelPosition type,
533 GetBitContext *gb, int n)
534 {
535 while (n--) {
536 enum RawDataBlockType syn_ele;
537 switch (type) {
538 case AAC_CHANNEL_FRONT:
539 case AAC_CHANNEL_BACK:
540 case AAC_CHANNEL_SIDE:
541 syn_ele = get_bits1(gb);
542 break;
543 case AAC_CHANNEL_CC:
544 skip_bits1(gb);
545 syn_ele = TYPE_CCE;
546 break;
547 case AAC_CHANNEL_LFE:
548 syn_ele = TYPE_LFE;
549 break;
550 }
551 layout_map[0][0] = syn_ele;
552 layout_map[0][1] = get_bits(gb, 4);
553 layout_map[0][2] = type;
554 layout_map++;
555 }
556 }
557
558 /**
559 * Decode program configuration element; reference: table 4.2.
560 *
561 * @return Returns error status. 0 - OK, !0 - error
562 */
563 static int decode_pce(AVCodecContext *avctx, MPEG4AudioConfig *m4ac,
564 uint8_t (*layout_map)[3],
565 GetBitContext *gb)
566 {
567 int num_front, num_side, num_back, num_lfe, num_assoc_data, num_cc, sampling_index;
568 int comment_len;
569 int tags;
570
571 skip_bits(gb, 2); // object_type
572
573 sampling_index = get_bits(gb, 4);
574 if (m4ac->sampling_index != sampling_index)
575 av_log(avctx, AV_LOG_WARNING, "Sample rate index in program config element does not match the sample rate index configured by the container.\n");
576
577 num_front = get_bits(gb, 4);
578 num_side = get_bits(gb, 4);
579 num_back = get_bits(gb, 4);
580 num_lfe = get_bits(gb, 2);
581 num_assoc_data = get_bits(gb, 3);
582 num_cc = get_bits(gb, 4);
583
584 if (get_bits1(gb))
585 skip_bits(gb, 4); // mono_mixdown_tag
586 if (get_bits1(gb))
587 skip_bits(gb, 4); // stereo_mixdown_tag
588
589 if (get_bits1(gb))
590 skip_bits(gb, 3); // mixdown_coeff_index and pseudo_surround
591
592 decode_channel_map(layout_map , AAC_CHANNEL_FRONT, gb, num_front);
593 tags = num_front;
594 decode_channel_map(layout_map + tags, AAC_CHANNEL_SIDE, gb, num_side);
595 tags += num_side;
596 decode_channel_map(layout_map + tags, AAC_CHANNEL_BACK, gb, num_back);
597 tags += num_back;
598 decode_channel_map(layout_map + tags, AAC_CHANNEL_LFE, gb, num_lfe);
599 tags += num_lfe;
600
601 skip_bits_long(gb, 4 * num_assoc_data);
602
603 decode_channel_map(layout_map + tags, AAC_CHANNEL_CC, gb, num_cc);
604 tags += num_cc;
605
606 align_get_bits(gb);
607
608 /* comment field, first byte is length */
609 comment_len = get_bits(gb, 8) * 8;
610 if (get_bits_left(gb) < comment_len) {
611 av_log(avctx, AV_LOG_ERROR, overread_err);
612 return -1;
613 }
614 skip_bits_long(gb, comment_len);
615 return tags;
616 }
617
618 /**
619 * Decode GA "General Audio" specific configuration; reference: table 4.1.
620 *
621 * @param ac pointer to AACContext, may be null
622 * @param avctx pointer to AVCCodecContext, used for logging
623 *
624 * @return Returns error status. 0 - OK, !0 - error
625 */
626 static int decode_ga_specific_config(AACContext *ac, AVCodecContext *avctx,
627 GetBitContext *gb,
628 MPEG4AudioConfig *m4ac,
629 int channel_config)
630 {
631 int extension_flag, ret;
632 uint8_t layout_map[MAX_ELEM_ID*4][3];
633 int tags = 0;
634
635 if (get_bits1(gb)) { // frameLengthFlag
636 av_log_missing_feature(avctx, "960/120 MDCT window", 1);
637 return AVERROR_PATCHWELCOME;
638 }
639
640 if (get_bits1(gb)) // dependsOnCoreCoder
641 skip_bits(gb, 14); // coreCoderDelay
642 extension_flag = get_bits1(gb);
643
644 if (m4ac->object_type == AOT_AAC_SCALABLE ||
645 m4ac->object_type == AOT_ER_AAC_SCALABLE)
646 skip_bits(gb, 3); // layerNr
647
648 if (channel_config == 0) {
649 skip_bits(gb, 4); // element_instance_tag
650 tags = decode_pce(avctx, m4ac, layout_map, gb);
651 if (tags < 0)
652 return tags;
653 } else {
654 if ((ret = set_default_channel_config(avctx, layout_map, &tags, channel_config)))
655 return ret;
656 }
657
658 if (count_channels(layout_map, tags) > 1) {
659 m4ac->ps = 0;
660 } else if (m4ac->sbr == 1 && m4ac->ps == -1)
661 m4ac->ps = 1;
662
663 if (ac && (ret = output_configure(ac, layout_map, tags, OC_GLOBAL_HDR)))
664 return ret;
665
666 if (extension_flag) {
667 switch (m4ac->object_type) {
668 case AOT_ER_BSAC:
669 skip_bits(gb, 5); // numOfSubFrame
670 skip_bits(gb, 11); // layer_length
671 break;
672 case AOT_ER_AAC_LC:
673 case AOT_ER_AAC_LTP:
674 case AOT_ER_AAC_SCALABLE:
675 case AOT_ER_AAC_LD:
676 skip_bits(gb, 3); /* aacSectionDataResilienceFlag
677 * aacScalefactorDataResilienceFlag
678 * aacSpectralDataResilienceFlag
679 */
680 break;
681 }
682 skip_bits1(gb); // extensionFlag3 (TBD in version 3)
683 }
684 return 0;
685 }
686
687 /**
688 * Decode audio specific configuration; reference: table 1.13.
689 *
690 * @param ac pointer to AACContext, may be null
691 * @param avctx pointer to AVCCodecContext, used for logging
692 * @param m4ac pointer to MPEG4AudioConfig, used for parsing
693 * @param data pointer to buffer holding an audio specific config
694 * @param bit_size size of audio specific config or data in bits
695 * @param sync_extension look for an appended sync extension
696 *
697 * @return Returns error status or number of consumed bits. <0 - error
698 */
699 static int decode_audio_specific_config(AACContext *ac,
700 AVCodecContext *avctx,
701 MPEG4AudioConfig *m4ac,
702 const uint8_t *data, int bit_size,
703 int sync_extension)
704 {
705 GetBitContext gb;
706 int i;
707
708 av_dlog(avctx, "extradata size %d\n", avctx->extradata_size);
709 for (i = 0; i < avctx->extradata_size; i++)
710 av_dlog(avctx, "%02x ", avctx->extradata[i]);
711 av_dlog(avctx, "\n");
712
713 init_get_bits(&gb, data, bit_size);
714
715 if ((i = avpriv_mpeg4audio_get_config(m4ac, data, bit_size, sync_extension)) < 0)
716 return -1;
717 if (m4ac->sampling_index > 12) {
718 av_log(avctx, AV_LOG_ERROR, "invalid sampling rate index %d\n", m4ac->sampling_index);
719 return -1;
720 }
721
722 skip_bits_long(&gb, i);
723
724 switch (m4ac->object_type) {
725 case AOT_AAC_MAIN:
726 case AOT_AAC_LC:
727 case AOT_AAC_LTP:
728 if (decode_ga_specific_config(ac, avctx, &gb, m4ac, m4ac->chan_config))
729 return -1;
730 break;
731 default:
732 av_log(avctx, AV_LOG_ERROR, "Audio object type %s%d is not supported.\n",
733 m4ac->sbr == 1? "SBR+" : "", m4ac->object_type);
734 return -1;
735 }
736
737 av_dlog(avctx, "AOT %d chan config %d sampling index %d (%d) SBR %d PS %d\n",
738 m4ac->object_type, m4ac->chan_config, m4ac->sampling_index,
739 m4ac->sample_rate, m4ac->sbr, m4ac->ps);
740
741 return get_bits_count(&gb);
742 }
743
744 /**
745 * linear congruential pseudorandom number generator
746 *
747 * @param previous_val pointer to the current state of the generator
748 *
749 * @return Returns a 32-bit pseudorandom integer
750 */
751 static av_always_inline int lcg_random(int previous_val)
752 {
753 return previous_val * 1664525 + 1013904223;
754 }
755
756 static av_always_inline void reset_predict_state(PredictorState *ps)
757 {
758 ps->r0 = 0.0f;
759 ps->r1 = 0.0f;
760 ps->cor0 = 0.0f;
761 ps->cor1 = 0.0f;
762 ps->var0 = 1.0f;
763 ps->var1 = 1.0f;
764 }
765
766 static void reset_all_predictors(PredictorState *ps)
767 {
768 int i;
769 for (i = 0; i < MAX_PREDICTORS; i++)
770 reset_predict_state(&ps[i]);
771 }
772
773 static int sample_rate_idx (int rate)
774 {
775 if (92017 <= rate) return 0;
776 else if (75132 <= rate) return 1;
777 else if (55426 <= rate) return 2;
778 else if (46009 <= rate) return 3;
779 else if (37566 <= rate) return 4;
780 else if (27713 <= rate) return 5;
781 else if (23004 <= rate) return 6;
782 else if (18783 <= rate) return 7;
783 else if (13856 <= rate) return 8;
784 else if (11502 <= rate) return 9;
785 else if (9391 <= rate) return 10;
786 else return 11;
787 }
788
789 static void reset_predictor_group(PredictorState *ps, int group_num)
790 {
791 int i;
792 for (i = group_num - 1; i < MAX_PREDICTORS; i += 30)
793 reset_predict_state(&ps[i]);
794 }
795
796 #define AAC_INIT_VLC_STATIC(num, size) \
797 INIT_VLC_STATIC(&vlc_spectral[num], 8, ff_aac_spectral_sizes[num], \
798 ff_aac_spectral_bits[num], sizeof( ff_aac_spectral_bits[num][0]), sizeof( ff_aac_spectral_bits[num][0]), \
799 ff_aac_spectral_codes[num], sizeof(ff_aac_spectral_codes[num][0]), sizeof(ff_aac_spectral_codes[num][0]), \
800 size);
801
802 static av_cold int aac_decode_init(AVCodecContext *avctx)
803 {
804 AACContext *ac = avctx->priv_data;
805 float output_scale_factor;
806
807 ac->avctx = avctx;
808 ac->oc[1].m4ac.sample_rate = avctx->sample_rate;
809
810 if (avctx->extradata_size > 0) {
811 if (decode_audio_specific_config(ac, ac->avctx, &ac->oc[1].m4ac,
812 avctx->extradata,
813 avctx->extradata_size*8, 1) < 0)
814 return -1;
815 } else {
816 int sr, i;
817 uint8_t layout_map[MAX_ELEM_ID*4][3];
818 int layout_map_tags;
819
820 sr = sample_rate_idx(avctx->sample_rate);
821 ac->oc[1].m4ac.sampling_index = sr;
822 ac->oc[1].m4ac.channels = avctx->channels;
823 ac->oc[1].m4ac.sbr = -1;
824 ac->oc[1].m4ac.ps = -1;
825
826 for (i = 0; i < FF_ARRAY_ELEMS(ff_mpeg4audio_channels); i++)
827 if (ff_mpeg4audio_channels[i] == avctx->channels)
828 break;
829 if (i == FF_ARRAY_ELEMS(ff_mpeg4audio_channels)) {
830 i = 0;
831 }
832 ac->oc[1].m4ac.chan_config = i;
833
834 if (ac->oc[1].m4ac.chan_config) {
835 int ret = set_default_channel_config(avctx, layout_map,
836 &layout_map_tags, ac->oc[1].m4ac.chan_config);
837 if (!ret)
838 output_configure(ac, layout_map, layout_map_tags,
839 OC_GLOBAL_HDR);
840 else if (avctx->err_recognition & AV_EF_EXPLODE)
841 return AVERROR_INVALIDDATA;
842 }
843 }
844
845 if (avctx->request_sample_fmt == AV_SAMPLE_FMT_FLT) {
846 avctx->sample_fmt = AV_SAMPLE_FMT_FLT;
847 output_scale_factor = 1.0 / 32768.0;
848 } else {
849 avctx->sample_fmt = AV_SAMPLE_FMT_S16;
850 output_scale_factor = 1.0;
851 }
852
853 AAC_INIT_VLC_STATIC( 0, 304);
854 AAC_INIT_VLC_STATIC( 1, 270);
855 AAC_INIT_VLC_STATIC( 2, 550);
856 AAC_INIT_VLC_STATIC( 3, 300);
857 AAC_INIT_VLC_STATIC( 4, 328);
858 AAC_INIT_VLC_STATIC( 5, 294);
859 AAC_INIT_VLC_STATIC( 6, 306);
860 AAC_INIT_VLC_STATIC( 7, 268);
861 AAC_INIT_VLC_STATIC( 8, 510);
862 AAC_INIT_VLC_STATIC( 9, 366);
863 AAC_INIT_VLC_STATIC(10, 462);
864
865 ff_aac_sbr_init();
866
867 ff_dsputil_init(&ac->dsp, avctx);
868 ff_fmt_convert_init(&ac->fmt_conv, avctx);
869 avpriv_float_dsp_init(&ac->fdsp, avctx->flags & CODEC_FLAG_BITEXACT);
870
871 ac->random_state = 0x1f2e3d4c;
872
873 ff_aac_tableinit();
874
875 INIT_VLC_STATIC(&vlc_scalefactors,7,FF_ARRAY_ELEMS(ff_aac_scalefactor_code),
876 ff_aac_scalefactor_bits, sizeof(ff_aac_scalefactor_bits[0]), sizeof(ff_aac_scalefactor_bits[0]),
877 ff_aac_scalefactor_code, sizeof(ff_aac_scalefactor_code[0]), sizeof(ff_aac_scalefactor_code[0]),
878 352);
879
880 ff_mdct_init(&ac->mdct, 11, 1, output_scale_factor/1024.0);
881 ff_mdct_init(&ac->mdct_small, 8, 1, output_scale_factor/128.0);
882 ff_mdct_init(&ac->mdct_ltp, 11, 0, -2.0/output_scale_factor);
883 // window initialization
884 ff_kbd_window_init(ff_aac_kbd_long_1024, 4.0, 1024);
885 ff_kbd_window_init(ff_aac_kbd_short_128, 6.0, 128);
886 ff_init_ff_sine_windows(10);
887 ff_init_ff_sine_windows( 7);
888
889 cbrt_tableinit();
890
891 avcodec_get_frame_defaults(&ac->frame);
892 avctx->coded_frame = &ac->frame;
893
894 return 0;
895 }
896
897 /**
898 * Skip data_stream_element; reference: table 4.10.
899 */
900 static int skip_data_stream_element(AACContext *ac, GetBitContext *gb)
901 {
902 int byte_align = get_bits1(gb);
903 int count = get_bits(gb, 8);
904 if (count == 255)
905 count += get_bits(gb, 8);
906 if (byte_align)
907 align_get_bits(gb);
908
909 if (get_bits_left(gb) < 8 * count) {
910 av_log(ac->avctx, AV_LOG_ERROR, overread_err);
911 return -1;
912 }
913 skip_bits_long(gb, 8 * count);
914 return 0;
915 }
916
917 static int decode_prediction(AACContext *ac, IndividualChannelStream *ics,
918 GetBitContext *gb)
919 {
920 int sfb;
921 if (get_bits1(gb)) {
922 ics->predictor_reset_group = get_bits(gb, 5);
923 if (ics->predictor_reset_group == 0 || ics->predictor_reset_group > 30) {
924 av_log(ac->avctx, AV_LOG_ERROR, "Invalid Predictor Reset Group.\n");
925 return -1;
926 }
927 }
928 for (sfb = 0; sfb < FFMIN(ics->max_sfb, ff_aac_pred_sfb_max[ac->oc[1].m4ac.sampling_index]); sfb++) {
929 ics->prediction_used[sfb] = get_bits1(gb);
930 }
931 return 0;
932 }
933
934 /**
935 * Decode Long Term Prediction data; reference: table 4.xx.
936 */
937 static void decode_ltp(LongTermPrediction *ltp,
938 GetBitContext *gb, uint8_t max_sfb)
939 {
940 int sfb;
941
942 ltp->lag = get_bits(gb, 11);
943 ltp->coef = ltp_coef[get_bits(gb, 3)];
944 for (sfb = 0; sfb < FFMIN(max_sfb, MAX_LTP_LONG_SFB); sfb++)
945 ltp->used[sfb] = get_bits1(gb);
946 }
947
948 /**
949 * Decode Individual Channel Stream info; reference: table 4.6.
950 */
951 static int decode_ics_info(AACContext *ac, IndividualChannelStream *ics,
952 GetBitContext *gb)
953 {
954 if (get_bits1(gb)) {
955 av_log(ac->avctx, AV_LOG_ERROR, "Reserved bit set.\n");
956 return AVERROR_INVALIDDATA;
957 }
958 ics->window_sequence[1] = ics->window_sequence[0];
959 ics->window_sequence[0] = get_bits(gb, 2);
960 ics->use_kb_window[1] = ics->use_kb_window[0];
961 ics->use_kb_window[0] = get_bits1(gb);
962 ics->num_window_groups = 1;
963 ics->group_len[0] = 1;
964 if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) {
965 int i;
966 ics->max_sfb = get_bits(gb, 4);
967 for (i = 0; i < 7; i++) {
968 if (get_bits1(gb)) {
969 ics->group_len[ics->num_window_groups - 1]++;
970 } else {
971 ics->num_window_groups++;
972 ics->group_len[ics->num_window_groups - 1] = 1;
973 }
974 }
975 ics->num_windows = 8;
976 ics->swb_offset = ff_swb_offset_128[ac->oc[1].m4ac.sampling_index];
977 ics->num_swb = ff_aac_num_swb_128[ac->oc[1].m4ac.sampling_index];
978 ics->tns_max_bands = ff_tns_max_bands_128[ac->oc[1].m4ac.sampling_index];
979 ics->predictor_present = 0;
980 } else {
981 ics->max_sfb = get_bits(gb, 6);
982 ics->num_windows = 1;
983 ics->swb_offset = ff_swb_offset_1024[ac->oc[1].m4ac.sampling_index];
984 ics->num_swb = ff_aac_num_swb_1024[ac->oc[1].m4ac.sampling_index];
985 ics->tns_max_bands = ff_tns_max_bands_1024[ac->oc[1].m4ac.sampling_index];
986 ics->predictor_present = get_bits1(gb);
987 ics->predictor_reset_group = 0;
988 if (ics->predictor_present) {
989 if (ac->oc[1].m4ac.object_type == AOT_AAC_MAIN) {
990 if (decode_prediction(ac, ics, gb)) {
991 return AVERROR_INVALIDDATA;
992 }
993 } else if (ac->oc[1].m4ac.object_type == AOT_AAC_LC) {
994 av_log(ac->avctx, AV_LOG_ERROR, "Prediction is not allowed in AAC-LC.\n");
995 return AVERROR_INVALIDDATA;
996 } else {
997 if ((ics->ltp.present = get_bits(gb, 1)))
998 decode_ltp(&ics->ltp, gb, ics->max_sfb);
999 }
1000 }
1001 }
1002
1003 if (ics->max_sfb > ics->num_swb) {
1004 av_log(ac->avctx, AV_LOG_ERROR,
1005 "Number of scalefactor bands in group (%d) exceeds limit (%d).\n",
1006 ics->max_sfb, ics->num_swb);
1007 return AVERROR_INVALIDDATA;
1008 }
1009
1010 return 0;
1011 }
1012
1013 /**
1014 * Decode band types (section_data payload); reference: table 4.46.
1015 *
1016 * @param band_type array of the used band type
1017 * @param band_type_run_end array of the last scalefactor band of a band type run
1018 *
1019 * @return Returns error status. 0 - OK, !0 - error
1020 */
1021 static int decode_band_types(AACContext *ac, enum BandType band_type[120],
1022 int band_type_run_end[120], GetBitContext *gb,
1023 IndividualChannelStream *ics)
1024 {
1025 int g, idx = 0;
1026 const int bits = (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) ? 3 : 5;
1027 for (g = 0; g < ics->num_window_groups; g++) {
1028 int k = 0;
1029 while (k < ics->max_sfb) {
1030 uint8_t sect_end = k;
1031 int sect_len_incr;
1032 int sect_band_type = get_bits(gb, 4);
1033 if (sect_band_type == 12) {
1034 av_log(ac->avctx, AV_LOG_ERROR, "invalid band type\n");
1035 return -1;
1036 }
1037 do {
1038 sect_len_incr = get_bits(gb, bits);
1039 sect_end += sect_len_incr;
1040 if (get_bits_left(gb) < 0) {
1041 av_log(ac->avctx, AV_LOG_ERROR, overread_err);
1042 return -1;
1043 }
1044 if (sect_end > ics->max_sfb) {
1045 av_log(ac->avctx, AV_LOG_ERROR,
1046 "Number of bands (%d) exceeds limit (%d).\n",
1047 sect_end, ics->max_sfb);
1048 return -1;
1049 }
1050 } while (sect_len_incr == (1 << bits) - 1);
1051 for (; k < sect_end; k++) {
1052 band_type [idx] = sect_band_type;
1053 band_type_run_end[idx++] = sect_end;
1054 }
1055 }
1056 }
1057 return 0;
1058 }
1059
1060 /**
1061 * Decode scalefactors; reference: table 4.47.
1062 *
1063 * @param global_gain first scalefactor value as scalefactors are differentially coded
1064 * @param band_type array of the used band type
1065 * @param band_type_run_end array of the last scalefactor band of a band type run
1066 * @param sf array of scalefactors or intensity stereo positions
1067 *
1068 * @return Returns error status. 0 - OK, !0 - error
1069 */
1070 static int decode_scalefactors(AACContext *ac, float sf[120], GetBitContext *gb,
1071 unsigned int global_gain,
1072 IndividualChannelStream *ics,
1073 enum BandType band_type[120],
1074 int band_type_run_end[120])
1075 {
1076 int g, i, idx = 0;
1077 int offset[3] = { global_gain, global_gain - 90, 0 };
1078 int clipped_offset;
1079 int noise_flag = 1;
1080 for (g = 0; g < ics->num_window_groups; g++) {
1081 for (i = 0; i < ics->max_sfb;) {
1082 int run_end = band_type_run_end[idx];
1083 if (band_type[idx] == ZERO_BT) {
1084 for (; i < run_end; i++, idx++)
1085 sf[idx] = 0.;
1086 } else if ((band_type[idx] == INTENSITY_BT) || (band_type[idx] == INTENSITY_BT2)) {
1087 for (; i < run_end; i++, idx++) {
1088 offset[2] += get_vlc2(gb, vlc_scalefactors.table, 7, 3) - 60;
1089 clipped_offset = av_clip(offset[2], -155, 100);
1090 if (offset[2] != clipped_offset) {
1091 av_log_ask_for_sample(ac->avctx, "Intensity stereo "
1092 "position clipped (%d -> %d).\nIf you heard an "
1093 "audible artifact, there may be a bug in the "
1094 "decoder. ", offset[2], clipped_offset);
1095 }
1096 sf[idx] = ff_aac_pow2sf_tab[-clipped_offset + POW_SF2_ZERO];
1097 }
1098 } else if (band_type[idx] == NOISE_BT) {
1099 for (; i < run_end; i++, idx++) {
1100 if (noise_flag-- > 0)
1101 offset[1] += get_bits(gb, 9) - 256;
1102 else
1103 offset[1] += get_vlc2(gb, vlc_scalefactors.table, 7, 3) - 60;
1104 clipped_offset = av_clip(offset[1], -100, 155);
1105 if (offset[1] != clipped_offset) {
1106 av_log_ask_for_sample(ac->avctx, "Noise gain clipped "
1107 "(%d -> %d).\nIf you heard an audible "
1108 "artifact, there may be a bug in the decoder. ",
1109 offset[1], clipped_offset);
1110 }
1111 sf[idx] = -ff_aac_pow2sf_tab[clipped_offset + POW_SF2_ZERO];
1112 }
1113 } else {
1114 for (; i < run_end; i++, idx++) {
1115 offset[0] += get_vlc2(gb, vlc_scalefactors.table, 7, 3) - 60;
1116 if (offset[0] > 255U) {
1117 av_log(ac->avctx, AV_LOG_ERROR,
1118 "Scalefactor (%d) out of range.\n", offset[0]);
1119 return -1;
1120 }
1121 sf[idx] = -ff_aac_pow2sf_tab[offset[0] - 100 + POW_SF2_ZERO];
1122 }
1123 }
1124 }
1125 }
1126 return 0;
1127 }
1128
1129 /**
1130 * Decode pulse data; reference: table 4.7.
1131 */
1132 static int decode_pulses(Pulse *pulse, GetBitContext *gb,
1133 const uint16_t *swb_offset, int num_swb)
1134 {
1135 int i, pulse_swb;
1136 pulse->num_pulse = get_bits(gb, 2) + 1;
1137 pulse_swb = get_bits(gb, 6);
1138 if (pulse_swb >= num_swb)
1139 return -1;
1140 pulse->pos[0] = swb_offset[pulse_swb];
1141 pulse->pos[0] += get_bits(gb, 5);
1142 if (pulse->pos[0] > 1023)
1143 return -1;
1144 pulse->amp[0] = get_bits(gb, 4);
1145 for (i = 1; i < pulse->num_pulse; i++) {
1146 pulse->pos[i] = get_bits(gb, 5) + pulse->pos[i - 1];
1147 if (pulse->pos[i] > 1023)
1148 return -1;
1149 pulse->amp[i] = get_bits(gb, 4);
1150 }
1151 return 0;
1152 }
1153
1154 /**
1155 * Decode Temporal Noise Shaping data; reference: table 4.48.
1156 *
1157 * @return Returns error status. 0 - OK, !0 - error
1158 */
1159 static int decode_tns(AACContext *ac, TemporalNoiseShaping *tns,
1160 GetBitContext *gb, const IndividualChannelStream *ics)
1161 {
1162 int w, filt, i, coef_len, coef_res, coef_compress;
1163 const int is8 = ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE;
1164 const int tns_max_order = is8 ? 7 : ac->oc[1].m4ac.object_type == AOT_AAC_MAIN ? 20 : 12;
1165 for (w = 0; w < ics->num_windows; w++) {
1166 if ((tns->n_filt[w] = get_bits(gb, 2 - is8))) {
1167 coef_res = get_bits1(gb);
1168
1169 for (filt = 0; filt < tns->n_filt[w]; filt++) {
1170 int tmp2_idx;
1171 tns->length[w][filt] = get_bits(gb, 6 - 2 * is8);
1172
1173 if ((tns->order[w][filt] = get_bits(gb, 5 - 2 * is8)) > tns_max_order) {
1174 av_log(ac->avctx, AV_LOG_ERROR, "TNS filter order %d is greater than maximum %d.\n",
1175 tns->order[w][filt], tns_max_order);
1176 tns->order[w][filt] = 0;
1177 return -1;
1178 }
1179 if (tns->order[w][filt]) {
1180 tns->direction[w][filt] = get_bits1(gb);
1181 coef_compress = get_bits1(gb);
1182 coef_len = coef_res + 3 - coef_compress;
1183 tmp2_idx = 2 * coef_compress + coef_res;
1184
1185 for (i = 0; i < tns->order[w][filt]; i++)
1186 tns->coef[w][filt][i] = tns_tmp2_map[tmp2_idx][get_bits(gb, coef_len)];
1187 }
1188 }
1189 }
1190 }
1191 return 0;
1192 }
1193
1194 /**
1195 * Decode Mid/Side data; reference: table 4.54.
1196 *
1197 * @param ms_present Indicates mid/side stereo presence. [0] mask is all 0s;
1198 * [1] mask is decoded from bitstream; [2] mask is all 1s;
1199 * [3] reserved for scalable AAC
1200 */
1201 static void decode_mid_side_stereo(ChannelElement *cpe, GetBitContext *gb,
1202 int ms_present)
1203 {
1204 int idx;
1205 if (ms_present == 1) {
1206 for (idx = 0; idx < cpe->ch[0].ics.num_window_groups * cpe->ch[0].ics.max_sfb; idx++)
1207 cpe->ms_mask[idx] = get_bits1(gb);
1208 } else if (ms_present == 2) {
1209 memset(cpe->ms_mask, 1, cpe->ch[0].ics.num_window_groups * cpe->ch[0].ics.max_sfb * sizeof(cpe->ms_mask[0]));
1210 }
1211 }
1212
1213 #ifndef VMUL2
1214 static inline float *VMUL2(float *dst, const float *v, unsigned idx,
1215 const float *scale)
1216 {
1217 float s = *scale;
1218 *dst++ = v[idx & 15] * s;
1219 *dst++ = v[idx>>4 & 15] * s;
1220 return dst;
1221 }
1222 #endif
1223
1224 #ifndef VMUL4
1225 static inline float *VMUL4(float *dst, const float *v, unsigned idx,
1226 const float *scale)
1227 {
1228 float s = *scale;
1229 *dst++ = v[idx & 3] * s;
1230 *dst++ = v[idx>>2 & 3] * s;
1231 *dst++ = v[idx>>4 & 3] * s;
1232 *dst++ = v[idx>>6 & 3] * s;
1233 return dst;
1234 }
1235 #endif
1236
1237 #ifndef VMUL2S
1238 static inline float *VMUL2S(float *dst, const float *v, unsigned idx,
1239 unsigned sign, const float *scale)
1240 {
1241 union av_intfloat32 s0, s1;
1242
1243 s0.f = s1.f = *scale;
1244 s0.i ^= sign >> 1 << 31;
1245 s1.i ^= sign << 31;
1246
1247 *dst++ = v[idx & 15] * s0.f;
1248 *dst++ = v[idx>>4 & 15] * s1.f;
1249
1250 return dst;
1251 }
1252 #endif
1253
1254 #ifndef VMUL4S
1255 static inline float *VMUL4S(float *dst, const float *v, unsigned idx,
1256 unsigned sign, const float *scale)
1257 {
1258 unsigned nz = idx >> 12;
1259 union av_intfloat32 s = { .f = *scale };
1260 union av_intfloat32 t;
1261
1262 t.i = s.i ^ (sign & 1U<<31);
1263 *dst++ = v[idx & 3] * t.f;
1264
1265 sign <<= nz & 1; nz >>= 1;
1266 t.i = s.i ^ (sign & 1U<<31);
1267 *dst++ = v[idx>>2 & 3] * t.f;
1268
1269 sign <<= nz & 1; nz >>= 1;
1270 t.i = s.i ^ (sign & 1U<<31);
1271 *dst++ = v[idx>>4 & 3] * t.f;
1272
1273 sign <<= nz & 1;
1274 t.i = s.i ^ (sign & 1U<<31);
1275 *dst++ = v[idx>>6 & 3] * t.f;
1276
1277 return dst;
1278 }
1279 #endif
1280
1281 /**
1282 * Decode spectral data; reference: table 4.50.
1283 * Dequantize and scale spectral data; reference: 4.6.3.3.
1284 *
1285 * @param coef array of dequantized, scaled spectral data
1286 * @param sf array of scalefactors or intensity stereo positions
1287 * @param pulse_present set if pulses are present
1288 * @param pulse pointer to pulse data struct
1289 * @param band_type array of the used band type
1290 *
1291 * @return Returns error status. 0 - OK, !0 - error
1292 */
1293 static int decode_spectrum_and_dequant(AACContext *ac, float coef[1024],
1294 GetBitContext *gb, const float sf[120],
1295 int pulse_present, const Pulse *pulse,
1296 const IndividualChannelStream *ics,
1297 enum BandType band_type[120])
1298 {
1299 int i, k, g, idx = 0;
1300 const int c = 1024 / ics->num_windows;
1301 const uint16_t *offsets = ics->swb_offset;
1302 float *coef_base = coef;
1303
1304 for (g = 0; g < ics->num_windows; g++)
1305 memset(coef + g * 128 + offsets[ics->max_sfb], 0, sizeof(float) * (c - offsets[ics->max_sfb]));
1306
1307 for (g = 0; g < ics->num_window_groups; g++) {
1308 unsigned g_len = ics->group_len[g];
1309
1310 for (i = 0; i < ics->max_sfb; i++, idx++) {
1311 const unsigned cbt_m1 = band_type[idx] - 1;
1312 float *cfo = coef + offsets[i];
1313 int off_len = offsets[i + 1] - offsets[i];
1314 int group;
1315
1316 if (cbt_m1 >= INTENSITY_BT2 - 1) {
1317 for (group = 0; group < g_len; group++, cfo+=128) {
1318 memset(cfo, 0, off_len * sizeof(float));
1319 }
1320 } else if (cbt_m1 == NOISE_BT - 1) {
1321 for (group = 0; group < g_len; group++, cfo+=128) {
1322 float scale;
1323 float band_energy;
1324
1325 for (k = 0; k < off_len; k++) {
1326 ac->random_state = lcg_random(ac->random_state);
1327 cfo[k] = ac->random_state;
1328 }
1329
1330 band_energy = ac->dsp.scalarproduct_float(cfo, cfo, off_len);
1331 scale = sf[idx] / sqrtf(band_energy);
1332 ac->dsp.vector_fmul_scalar(cfo, cfo, scale, off_len);
1333 }
1334 } else {
1335 const float *vq = ff_aac_codebook_vector_vals[cbt_m1];
1336 const uint16_t *cb_vector_idx = ff_aac_codebook_vector_idx[cbt_m1];
1337 VLC_TYPE (*vlc_tab)[2] = vlc_spectral[cbt_m1].table;
1338 OPEN_READER(re, gb);
1339
1340 switch (cbt_m1 >> 1) {
1341 case 0:
1342 for (group = 0; group < g_len; group++, cfo+=128) {
1343 float *cf = cfo;
1344 int len = off_len;
1345
1346 do {
1347 int code;
1348 unsigned cb_idx;
1349
1350 UPDATE_CACHE(re, gb);
1351 GET_VLC(code, re, gb, vlc_tab, 8, 2);
1352 cb_idx = cb_vector_idx[code];
1353 cf = VMUL4(cf, vq, cb_idx, sf + idx);
1354 } while (len -= 4);
1355 }
1356 break;
1357
1358 case 1:
1359 for (group = 0; group < g_len; group++, cfo+=128) {
1360 float *cf = cfo;
1361 int len = off_len;
1362
1363 do {
1364 int code;
1365 unsigned nnz;
1366 unsigned cb_idx;
1367 uint32_t bits;
1368
1369 UPDATE_CACHE(re, gb);
1370 GET_VLC(code, re, gb, vlc_tab, 8, 2);
1371 cb_idx = cb_vector_idx[code];
1372 nnz = cb_idx >> 8 & 15;
1373 bits = nnz ? GET_CACHE(re, gb) : 0;
1374 LAST_SKIP_BITS(re, gb, nnz);
1375 cf = VMUL4S(cf, vq, cb_idx, bits, sf + idx);
1376 } while (len -= 4);
1377 }
1378 break;
1379
1380 case 2:
1381 for (group = 0; group < g_len; group++, cfo+=128) {
1382 float *cf = cfo;
1383 int len = off_len;
1384
1385 do {
1386 int code;
1387 unsigned cb_idx;
1388
1389 UPDATE_CACHE(re, gb);
1390 GET_VLC(code, re, gb, vlc_tab, 8, 2);
1391 cb_idx = cb_vector_idx[code];
1392 cf = VMUL2(cf, vq, cb_idx, sf + idx);
1393 } while (len -= 2);
1394 }
1395 break;
1396
1397 case 3:
1398 case 4:
1399 for (group = 0; group < g_len; group++, cfo+=128) {
1400 float *cf = cfo;
1401 int len = off_len;
1402
1403 do {
1404 int code;
1405 unsigned nnz;
1406 unsigned cb_idx;
1407 unsigned sign;
1408
1409 UPDATE_CACHE(re, gb);
1410 GET_VLC(code, re, gb, vlc_tab, 8, 2);
1411 cb_idx = cb_vector_idx[code];
1412 nnz = cb_idx >> 8 & 15;
1413 sign = nnz ? SHOW_UBITS(re, gb, nnz) << (cb_idx >> 12) : 0;
1414 LAST_SKIP_BITS(re, gb, nnz);
1415 cf = VMUL2S(cf, vq, cb_idx, sign, sf + idx);
1416 } while (len -= 2);
1417 }
1418 break;
1419
1420 default:
1421 for (group = 0; group < g_len; group++, cfo+=128) {
1422 float *cf = cfo;
1423 uint32_t *icf = (uint32_t *) cf;
1424 int len = off_len;
1425
1426 do {
1427 int code;
1428 unsigned nzt, nnz;
1429 unsigned cb_idx;
1430 uint32_t bits;
1431 int j;
1432
1433 UPDATE_CACHE(re, gb);
1434 GET_VLC(code, re, gb, vlc_tab, 8, 2);
1435
1436 if (!code) {
1437 *icf++ = 0;
1438 *icf++ = 0;
1439 continue;
1440 }
1441
1442 cb_idx = cb_vector_idx[code];
1443 nnz = cb_idx >> 12;
1444 nzt = cb_idx >> 8;
1445 bits = SHOW_UBITS(re, gb, nnz) << (32-nnz);
1446 LAST_SKIP_BITS(re, gb, nnz);
1447
1448 for (j = 0; j < 2; j++) {
1449 if (nzt & 1<<j) {
1450 uint32_t b;
1451 int n;
1452 /* The total length of escape_sequence must be < 22 bits according
1453 to the specification (i.e. max is 111111110xxxxxxxxxxxx). */
1454 UPDATE_CACHE(re, gb);
1455 b = GET_CACHE(re, gb);
1456 b = 31 - av_log2(~b);
1457
1458 if (b > 8) {
1459 av_log(ac->avctx, AV_LOG_ERROR, "error in spectral data, ESC overflow\n");
1460 return -1;
1461 }
1462
1463 SKIP_BITS(re, gb, b + 1);
1464 b += 4;
1465 n = (1 << b) + SHOW_UBITS(re, gb, b);
1466 LAST_SKIP_BITS(re, gb, b);
1467 *icf++ = cbrt_tab[n] | (bits & 1U<<31);
1468 bits <<= 1;
1469 } else {
1470 unsigned v = ((const uint32_t*)vq)[cb_idx & 15];
1471 *icf++ = (bits & 1U<<31) | v;
1472 bits <<= !!v;
1473 }
1474 cb_idx >>= 4;
1475 }
1476 } while (len -= 2);
1477
1478 ac->dsp.vector_fmul_scalar(cfo, cfo, sf[idx], off_len);
1479 }
1480 }
1481
1482 CLOSE_READER(re, gb);
1483 }
1484 }
1485 coef += g_len << 7;
1486 }
1487
1488 if (pulse_present) {
1489 idx = 0;
1490 for (i = 0; i < pulse->num_pulse; i++) {
1491 float co = coef_base[ pulse->pos[i] ];
1492 while (offsets[idx + 1] <= pulse->pos[i])
1493 idx++;
1494 if (band_type[idx] != NOISE_BT && sf[idx]) {
1495 float ico = -pulse->amp[i];
1496 if (co) {
1497 co /= sf[idx];
1498 ico = co / sqrtf(sqrtf(fabsf(co))) + (co > 0 ? -ico : ico);
1499 }
1500 coef_base[ pulse->pos[i] ] = cbrtf(fabsf(ico)) * ico * sf[idx];
1501 }
1502 }
1503 }
1504 return 0;
1505 }
1506
1507 static av_always_inline float flt16_round(float pf)
1508 {
1509 union av_intfloat32 tmp;
1510 tmp.f = pf;
1511 tmp.i = (tmp.i + 0x00008000U) & 0xFFFF0000U;
1512 return tmp.f;
1513 }
1514
1515 static av_always_inline float flt16_even(float pf)
1516 {
1517 union av_intfloat32 tmp;
1518 tmp.f = pf;
1519 tmp.i = (tmp.i + 0x00007FFFU + (tmp.i & 0x00010000U >> 16)) & 0xFFFF0000U;
1520 return tmp.f;
1521 }
1522
1523 static av_always_inline float flt16_trunc(float pf)
1524 {
1525 union av_intfloat32 pun;
1526 pun.f = pf;
1527 pun.i &= 0xFFFF0000U;
1528 return pun.f;
1529 }
1530
1531 static av_always_inline void predict(PredictorState *ps, float *coef,
1532 int output_enable)
1533 {
1534 const float a = 0.953125; // 61.0 / 64
1535 const float alpha = 0.90625; // 29.0 / 32
1536 float e0, e1;
1537 float pv;
1538 float k1, k2;
1539 float r0 = ps->r0, r1 = ps->r1;
1540 float cor0 = ps->cor0, cor1 = ps->cor1;
1541 float var0 = ps->var0, var1 = ps->var1;
1542
1543 k1 = var0 > 1 ? cor0 * flt16_even(a / var0) : 0;
1544 k2 = var1 > 1 ? cor1 * flt16_even(a / var1) : 0;
1545
1546 pv = flt16_round(k1 * r0 + k2 * r1);
1547 if (output_enable)
1548 *coef += pv;
1549
1550 e0 = *coef;
1551 e1 = e0 - k1 * r0;
1552
1553 ps->cor1 = flt16_trunc(alpha * cor1 + r1 * e1);
1554 ps->var1 = flt16_trunc(alpha * var1 + 0.5f * (r1 * r1 + e1 * e1));
1555 ps->cor0 = flt16_trunc(alpha * cor0 + r0 * e0);
1556 ps->var0 = flt16_trunc(alpha * var0 + 0.5f * (r0 * r0 + e0 * e0));
1557
1558 ps->r1 = flt16_trunc(a * (r0 - k1 * e0));
1559 ps->r0 = flt16_trunc(a * e0);
1560 }
1561
1562 /**
1563 * Apply AAC-Main style frequency domain prediction.
1564 */
1565 static void apply_prediction(AACContext *ac, SingleChannelElement *sce)
1566 {
1567 int sfb, k;
1568
1569 if (!sce->ics.predictor_initialized) {
1570 reset_all_predictors(sce->predictor_state);
1571 sce->ics.predictor_initialized = 1;
1572 }
1573
1574 if (sce->ics.window_sequence[0] != EIGHT_SHORT_SEQUENCE) {
1575 for (sfb = 0; sfb < ff_aac_pred_sfb_max[ac->oc[1].m4ac.sampling_index]; sfb++) {
1576 for (k = sce->ics.swb_offset[sfb]; k < sce->ics.swb_offset[sfb + 1]; k++) {
1577 predict(&sce->predictor_state[k], &sce->coeffs[k],
1578 sce->ics.predictor_present && sce->ics.prediction_used[sfb]);
1579 }
1580 }
1581 if (sce->ics.predictor_reset_group)
1582 reset_predictor_group(sce->predictor_state, sce->ics.predictor_reset_group);
1583 } else
1584 reset_all_predictors(sce->predictor_state);
1585 }
1586
1587 /**
1588 * Decode an individual_channel_stream payload; reference: table 4.44.
1589 *
1590 * @param common_window Channels have independent [0], or shared [1], Individual Channel Stream information.
1591 * @param scale_flag scalable [1] or non-scalable [0] AAC (Unused until scalable AAC is implemented.)
1592 *
1593 * @return Returns error status. 0 - OK, !0 - error
1594 */
1595 static int decode_ics(AACContext *ac, SingleChannelElement *sce,
1596 GetBitContext *gb, int common_window, int scale_flag)
1597 {
1598 Pulse pulse;
1599 TemporalNoiseShaping *tns = &sce->tns;
1600 IndividualChannelStream *ics = &sce->ics;
1601 float *out = sce->coeffs;
1602 int global_gain, pulse_present = 0;
1603
1604 /* This assignment is to silence a GCC warning about the variable being used
1605 * uninitialized when in fact it always is.
1606 */
1607 pulse.num_pulse = 0;
1608
1609 global_gain = get_bits(gb, 8);
1610
1611 if (!common_window && !scale_flag) {
1612 if (decode_ics_info(ac, ics, gb) < 0)
1613 return AVERROR_INVALIDDATA;
1614 }
1615
1616 if (decode_band_types(ac, sce->band_type, sce->band_type_run_end, gb, ics) < 0)
1617 return -1;
1618 if (decode_scalefactors(ac, sce->sf, gb, global_gain, ics, sce->band_type, sce->band_type_run_end) < 0)
1619 return -1;
1620
1621 pulse_present = 0;
1622 if (!scale_flag) {
1623 if ((pulse_present = get_bits1(gb))) {
1624 if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) {
1625 av_log(ac->avctx, AV_LOG_ERROR, "Pulse tool not allowed in eight short sequence.\n");
1626 return -1;
1627 }
1628 if (decode_pulses(&pulse, gb, ics->swb_offset, ics->num_swb)) {
1629 av_log(ac->avctx, AV_LOG_ERROR, "Pulse data corrupt or invalid.\n");
1630 return -1;
1631 }
1632 }
1633 if ((tns->present = get_bits1(gb)) && decode_tns(ac, tns, gb, ics))
1634 return -1;
1635 if (get_bits1(gb)) {
1636 av_log_missing_feature(ac->avctx, "SSR", 1);
1637 return AVERROR_PATCHWELCOME;
1638 }
1639 }
1640
1641 if (decode_spectrum_and_dequant(ac, out, gb, sce->sf, pulse_present, &pulse, ics, sce->band_type) < 0)
1642 return -1;
1643
1644 if (ac->oc[1].m4ac.object_type == AOT_AAC_MAIN && !common_window)
1645 apply_prediction(ac, sce);
1646
1647 return 0;
1648 }
1649
1650 /**
1651 * Mid/Side stereo decoding; reference: 4.6.8.1.3.
1652 */
1653 static void apply_mid_side_stereo(AACContext *ac, ChannelElement *cpe)
1654 {
1655 const IndividualChannelStream *ics = &cpe->ch[0].ics;
1656 float *ch0 = cpe->ch[0].coeffs;
1657 float *ch1 = cpe->ch[1].coeffs;
1658 int g, i, group, idx = 0;
1659 const uint16_t *offsets = ics->swb_offset;
1660 for (g = 0; g < ics->num_window_groups; g++) {
1661 for (i = 0; i < ics->max_sfb; i++, idx++) {
1662 if (cpe->ms_mask[idx] &&
1663 cpe->ch[0].band_type[idx] < NOISE_BT && cpe->ch[1].band_type[idx] < NOISE_BT) {
1664 for (group = 0; group < ics->group_len[g]; group++) {
1665 ac->dsp.butterflies_float(ch0 + group * 128 + offsets[i],
1666 ch1 + group * 128 + offsets[i],
1667 offsets[i+1] - offsets[i]);
1668 }
1669 }
1670 }
1671 ch0 += ics->group_len[g] * 128;
1672 ch1 += ics->group_len[g] * 128;
1673 }
1674 }
1675
1676 /**
1677 * intensity stereo decoding; reference: 4.6.8.2.3
1678 *
1679 * @param ms_present Indicates mid/side stereo presence. [0] mask is all 0s;
1680 * [1] mask is decoded from bitstream; [2] mask is all 1s;
1681 * [3] reserved for scalable AAC
1682 */
1683 static void apply_intensity_stereo(AACContext *ac, ChannelElement *cpe, int ms_present)
1684 {
1685 const IndividualChannelStream *ics = &cpe->ch[1].ics;
1686 SingleChannelElement *sce1 = &cpe->ch[1];
1687 float *coef0 = cpe->ch[0].coeffs, *coef1 = cpe->ch[1].coeffs;
1688 const uint16_t *offsets = ics->swb_offset;
1689 int g, group, i, idx = 0;
1690 int c;
1691 float scale;
1692 for (g = 0; g < ics->num_window_groups; g++) {
1693 for (i = 0; i < ics->max_sfb;) {
1694 if (sce1->band_type[idx] == INTENSITY_BT || sce1->band_type[idx] == INTENSITY_BT2) {
1695 const int bt_run_end = sce1->band_type_run_end[idx];
1696 for (; i < bt_run_end; i++, idx++) {
1697 c = -1 + 2 * (sce1->band_type[idx] - 14);
1698 if (ms_present)
1699 c *= 1 - 2 * cpe->ms_mask[idx];
1700 scale = c * sce1->sf[idx];
1701 for (group = 0; group < ics->group_len[g]; group++)
1702 ac->dsp.vector_fmul_scalar(coef1 + group * 128 + offsets[i],
1703 coef0 + group * 128 + offsets[i],
1704 scale,
1705 offsets[i + 1] - offsets[i]);
1706 }
1707 } else {
1708 int bt_run_end = sce1->band_type_run_end[idx];
1709 idx += bt_run_end - i;
1710 i = bt_run_end;
1711 }
1712 }
1713 coef0 += ics->group_len[g] * 128;
1714 coef1 += ics->group_len[g] * 128;
1715 }
1716 }
1717
1718 /**
1719 * Decode a channel_pair_element; reference: table 4.4.
1720 *
1721 * @return Returns error status. 0 - OK, !0 - error
1722 */
1723 static int decode_cpe(AACContext *ac, GetBitContext *gb, ChannelElement *cpe)
1724 {
1725 int i, ret, common_window, ms_present = 0;
1726
1727 common_window = get_bits1(gb);
1728 if (common_window) {
1729 if (decode_ics_info(ac, &cpe->ch[0].ics, gb))
1730 return AVERROR_INVALIDDATA;
1731 i = cpe->ch[1].ics.use_kb_window[0];
1732 cpe->ch[1].ics = cpe->ch[0].ics;
1733 cpe->ch[1].ics.use_kb_window[1] = i;
1734 if (cpe->ch[1].ics.predictor_present && (ac->oc[1].m4ac.object_type != AOT_AAC_MAIN))
1735 if ((cpe->ch[1].ics.ltp.present = get_bits(gb, 1)))
1736 decode_ltp(&cpe->ch[1].ics.ltp, gb, cpe->ch[1].ics.max_sfb);
1737 ms_present = get_bits(gb, 2);
1738 if (ms_present == 3) {
1739 av_log(ac->avctx, AV_LOG_ERROR, "ms_present = 3 is reserved.\n");
1740 return -1;
1741 } else if (ms_present)
1742 decode_mid_side_stereo(cpe, gb, ms_present);
1743 }
1744 if ((ret = decode_ics(ac, &cpe->ch[0], gb, common_window, 0)))
1745 return ret;
1746 if ((ret = decode_ics(ac, &cpe->ch[1], gb, common_window, 0)))
1747 return ret;
1748
1749 if (common_window) {
1750 if (ms_present)
1751 apply_mid_side_stereo(ac, cpe);
1752 if (ac->oc[1].m4ac.object_type == AOT_AAC_MAIN) {
1753 apply_prediction(ac, &cpe->ch[0]);
1754 apply_prediction(ac, &cpe->ch[1]);
1755 }
1756 }
1757
1758 apply_intensity_stereo(ac, cpe, ms_present);
1759 return 0;
1760 }
1761
1762 static const float cce_scale[] = {
1763 1.09050773266525765921, //2^(1/8)
1764 1.18920711500272106672, //2^(1/4)
1765 M_SQRT2,
1766 2,
1767 };
1768
1769 /**
1770 * Decode coupling_channel_element; reference: table 4.8.
1771 *
1772 * @return Returns error status. 0 - OK, !0 - error
1773 */
1774 static int decode_cce(AACContext *ac, GetBitContext *gb, ChannelElement *che)
1775 {
1776 int num_gain = 0;
1777 int c, g, sfb, ret;
1778 int sign;
1779 float scale;
1780 SingleChannelElement *sce = &che->ch[0];
1781 ChannelCoupling *coup = &che->coup;
1782
1783 coup->coupling_point = 2 * get_bits1(gb);
1784 coup->num_coupled = get_bits(gb, 3);
1785 for (c = 0; c <= coup->num_coupled; c++) {
1786 num_gain++;
1787 coup->type[c] = get_bits1(gb) ? TYPE_CPE : TYPE_SCE;
1788 coup->id_select[c] = get_bits(gb, 4);
1789 if (coup->type[c] == TYPE_CPE) {
1790 coup->ch_select[c] = get_bits(gb, 2);
1791 if (coup->ch_select[c] == 3)
1792 num_gain++;
1793 } else
1794 coup->ch_select[c] = 2;
1795 }
1796 coup->coupling_point += get_bits1(gb) || (coup->coupling_point >> 1);
1797
1798 sign = get_bits(gb, 1);
1799 scale = cce_scale[get_bits(gb, 2)];
1800
1801 if ((ret = decode_ics(ac, sce, gb, 0, 0)))
1802 return ret;
1803
1804 for (c = 0; c < num_gain; c++) {
1805 int idx = 0;
1806 int cge = 1;
1807 int gain = 0;
1808 float gain_cache = 1.;
1809 if (c) {
1810 cge = coup->coupling_point == AFTER_IMDCT ? 1 : get_bits1(gb);
1811 gain = cge ? get_vlc2(gb, vlc_scalefactors.table, 7, 3) - 60: 0;
1812 gain_cache = powf(scale, -gain);
1813 }
1814 if (coup->coupling_point == AFTER_IMDCT) {
1815 coup->gain[c][0] = gain_cache;
1816 } else {
1817 for (g = 0; g < sce->ics.num_window_groups; g++) {
1818 for (sfb = 0; sfb < sce->ics.max_sfb; sfb++, idx++) {
1819 if (sce->band_type[idx] != ZERO_BT) {
1820 if (!cge) {
1821 int t = get_vlc2(gb, vlc_scalefactors.table, 7, 3) - 60;
1822 if (t) {
1823 int s = 1;
1824 t = gain += t;
1825 if (sign) {
1826 s -= 2 * (t & 0x1);
1827 t >>= 1;
1828 }
1829 gain_cache = powf(scale, -t) * s;
1830 }
1831 }
1832 coup->gain[c][idx] = gain_cache;
1833 }
1834 }
1835 }
1836 }
1837 }
1838 return 0;
1839 }
1840
1841 /**
1842 * Parse whether channels are to be excluded from Dynamic Range Compression; reference: table 4.53.
1843 *
1844 * @return Returns number of bytes consumed.
1845 */
1846 static int decode_drc_channel_exclusions(DynamicRangeControl *che_drc,
1847 GetBitContext *gb)
1848 {
1849 int i;
1850 int num_excl_chan = 0;
1851
1852 do {
1853 for (i = 0; i < 7; i++)
1854 che_drc->exclude_mask[num_excl_chan++] = get_bits1(gb);
1855 } while (num_excl_chan < MAX_CHANNELS - 7 && get_bits1(gb));
1856
1857 return num_excl_chan / 7;
1858 }
1859
1860 /**
1861 * Decode dynamic range information; reference: table 4.52.
1862 *
1863 * @return Returns number of bytes consumed.
1864 */
1865 static int decode_dynamic_range(DynamicRangeControl *che_drc,
1866 GetBitContext *gb)
1867 {
1868 int n = 1;
1869 int drc_num_bands = 1;
1870 int i;
1871
1872 /* pce_tag_present? */
1873 if (get_bits1(gb)) {
1874 che_drc->pce_instance_tag = get_bits(gb, 4);
1875 skip_bits(gb, 4); // tag_reserved_bits
1876 n++;
1877 }
1878
1879 /* excluded_chns_present? */
1880 if (get_bits1(gb)) {
1881 n += decode_drc_channel_exclusions(che_drc, gb);
1882 }
1883
1884 /* drc_bands_present? */
1885 if (get_bits1(gb)) {
1886 che_drc->band_incr = get_bits(gb, 4);
1887 che_drc->interpolation_scheme = get_bits(gb, 4);
1888 n++;
1889 drc_num_bands += che_drc->band_incr;
1890 for (i = 0; i < drc_num_bands; i++) {
1891 che_drc->band_top[i] = get_bits(gb, 8);
1892 n++;
1893 }
1894 }
1895
1896 /* prog_ref_level_present? */
1897 if (get_bits1(gb)) {
1898 che_drc->prog_ref_level = get_bits(gb, 7);
1899 skip_bits1(gb); // prog_ref_level_reserved_bits
1900 n++;
1901 }
1902
1903 for (i = 0; i < drc_num_bands; i++) {
1904 che_drc->dyn_rng_sgn[i] = get_bits1(gb);
1905 che_drc->dyn_rng_ctl[i] = get_bits(gb, 7);
1906 n++;
1907 }
1908
1909 return n;
1910 }
1911
1912 /**
1913 * Decode extension data (incomplete); reference: table 4.51.
1914 *
1915 * @param cnt length of TYPE_FIL syntactic element in bytes
1916 *
1917 * @return Returns number of bytes consumed
1918 */
1919 static int decode_extension_payload(AACContext *ac, GetBitContext *gb, int cnt,
1920 ChannelElement *che, enum RawDataBlockType elem_type)
1921 {
1922 int crc_flag = 0;
1923 int res = cnt;
1924 switch (get_bits(gb, 4)) { // extension type
1925 case EXT_SBR_DATA_CRC:
1926 crc_flag++;
1927 case EXT_SBR_DATA:
1928 if (!che) {
1929 av_log(ac->avctx, AV_LOG_ERROR, "SBR was found before the first channel element.\n");
1930 return res;
1931 } else if (!ac->oc[1].m4ac.sbr) {
1932 av_log(ac->avctx, AV_LOG_ERROR, "SBR signaled to be not-present but was found in the bitstream.\n");
1933 skip_bits_long(gb, 8 * cnt - 4);
1934 return res;
1935 } else if (ac->oc[1].m4ac.sbr == -1 && ac->oc[1].status == OC_LOCKED) {
1936 av_log(ac->avctx, AV_LOG_ERROR, "Implicit SBR was found with a first occurrence after the first frame.\n");
1937 skip_bits_long(gb, 8 * cnt - 4);
1938 return res;
1939 } else if (ac->oc[1].m4ac.ps == -1 && ac->oc[1].status < OC_LOCKED && ac->avctx->channels == 1) {
1940 ac->oc[1].m4ac.sbr = 1;
1941 ac->oc[1].m4ac.ps = 1;
1942 output_configure(ac, ac->oc[1].layout_map, ac->oc[1].layout_map_tags,
1943 ac->oc[1].status);
1944 } else {
1945 ac->oc[1].m4ac.sbr = 1;
1946 }
1947 res = ff_decode_sbr_extension(ac, &che->sbr, gb, crc_flag, cnt, elem_type);
1948 break;
1949 case EXT_DYNAMIC_RANGE:
1950 res = decode_dynamic_range(&ac->che_drc, gb);
1951 break;
1952 case EXT_FILL:
1953 case EXT_FILL_DATA:
1954 case EXT_DATA_ELEMENT:
1955 default:
1956 skip_bits_long(gb, 8 * cnt - 4);
1957 break;
1958 };
1959 return res;
1960 }
1961
1962 /**
1963 * Decode Temporal Noise Shaping filter coefficients and apply all-pole filters; reference: 4.6.9.3.
1964 *
1965 * @param decode 1 if tool is used normally, 0 if tool is used in LTP.
1966 * @param coef spectral coefficients
1967 */
1968 static void apply_tns(float coef[1024], TemporalNoiseShaping *tns,
1969 IndividualChannelStream *ics, int decode)
1970 {
1971 const int mmm = FFMIN(ics->tns_max_bands, ics->max_sfb);
1972 int w, filt, m, i;
1973 int bottom, top, order, start, end, size, inc;
1974 float lpc[TNS_MAX_ORDER];
1975 float tmp[TNS_MAX_ORDER];
1976
1977 for (w = 0; w < ics->num_windows; w++) {
1978 bottom = ics->num_swb;
1979 for (filt = 0; filt < tns->n_filt[w]; filt++) {
1980 top = bottom;
1981 bottom = FFMAX(0, top - tns->length[w][filt]);
1982 order = tns->order[w][filt];
1983 if (order == 0)
1984 continue;
1985
1986 // tns_decode_coef
1987 compute_lpc_coefs(tns->coef[w][filt], order, lpc, 0, 0, 0);
1988
1989 start = ics->swb_offset[FFMIN(bottom, mmm)];
1990 end = ics->swb_offset[FFMIN( top, mmm)];
1991 if ((size = end - start) <= 0)
1992 continue;
1993 if (tns->direction[w][filt]) {
1994 inc = -1;
1995 start = end - 1;
1996 } else {
1997 inc = 1;
1998 }
1999 start += w * 128;
2000
2001 if (decode) {
2002 // ar filter
2003 for (m = 0; m < size; m++, start += inc)
2004 for (i = 1; i <= FFMIN(m, order); i++)
2005 coef[start] -= coef[start - i * inc] * lpc[i - 1];
2006 } else {
2007 // ma filter
2008 for (m = 0; m < size; m++, start += inc) {
2009 tmp[0] = coef[start];
2010 for (i = 1; i <= FFMIN(m, order); i++)
2011 coef[start] += tmp[i] * lpc[i - 1];
2012 for (i = order; i > 0; i--)
2013 tmp[i] = tmp[i - 1];
2014 }
2015 }
2016 }
2017 }
2018 }
2019
2020 /**
2021 * Apply windowing and MDCT to obtain the spectral
2022 * coefficient from the predicted sample by LTP.
2023 */
2024 static void windowing_and_mdct_ltp(AACContext *ac, float *out,
2025 float *in, IndividualChannelStream *ics)
2026 {
2027 const float *lwindow = ics->use_kb_window[0] ? ff_aac_kbd_long_1024 : ff_sine_1024;
2028 const float *swindow = ics->use_kb_window[0] ? ff_aac_kbd_short_128 : ff_sine_128;
2029 const float *lwindow_prev = ics->use_kb_window[1] ? ff_aac_kbd_long_1024 : ff_sine_1024;
2030 const float *swindow_prev = ics->use_kb_window[1] ? ff_aac_kbd_short_128 : ff_sine_128;
2031
2032 if (ics->window_sequence[0] != LONG_STOP_SEQUENCE) {
2033 ac->fdsp.vector_fmul(in, in, lwindow_prev, 1024);
2034 } else {
2035 memset(in, 0, 448 * sizeof(float));
2036 ac->fdsp.vector_fmul(in + 448, in + 448, swindow_prev, 128);
2037 }
2038 if (ics->window_sequence[0] != LONG_START_SEQUENCE) {
2039 ac->dsp.vector_fmul_reverse(in + 1024, in + 1024, lwindow, 1024);
2040 } else {
2041 ac->dsp.vector_fmul_reverse(in + 1024 + 448, in + 1024 + 448, swindow, 128);
2042 memset(in + 1024 + 576, 0, 448 * sizeof(float));
2043 }
2044 ac->mdct_ltp.mdct_calc(&ac->mdct_ltp, out, in);
2045 }
2046
2047 /**
2048 * Apply the long term prediction
2049 */
2050 static void apply_ltp(AACContext *ac, SingleChannelElement *sce)
2051 {
2052 const LongTermPrediction *ltp = &sce->ics.ltp;
2053 const uint16_t *offsets = sce->ics.swb_offset;
2054 int i, sfb;
2055
2056 if (sce->ics.window_sequence[0] != EIGHT_SHORT_SEQUENCE) {
2057 float *predTime = sce->ret;
2058 float *predFreq = ac->buf_mdct;
2059 int16_t num_samples = 2048;
2060
2061 if (ltp->lag < 1024)
2062 num_samples = ltp->lag + 1024;
2063 for (i = 0; i < num_samples; i++)
2064 predTime[i] = sce->ltp_state[i + 2048 - ltp->lag] * ltp->coef;
2065 memset(&predTime[i], 0, (2048 - i) * sizeof(float));
2066
2067 windowing_and_mdct_ltp(ac, predFreq, predTime, &sce->ics);
2068
2069 if (sce->tns.present)
2070 apply_tns(predFreq, &sce->tns, &sce->ics, 0);
2071
2072 for (sfb = 0; sfb < FFMIN(sce->ics.max_sfb, MAX_LTP_LONG_SFB); sfb++)
2073 if (ltp->used[sfb])
2074 for (i = offsets[sfb]; i < offsets[sfb + 1]; i++)
2075 sce->coeffs[i] += predFreq[i];
2076 }
2077 }
2078
2079 /**
2080 * Update the LTP buffer for next frame
2081 */
2082 static void update_ltp(AACContext *ac, SingleChannelElement *sce)
2083 {
2084 IndividualChannelStream *ics = &sce->ics;
2085 float *saved = sce->saved;
2086 float *saved_ltp = sce->coeffs;
2087 const float *lwindow = ics->use_kb_window[0] ? ff_aac_kbd_long_1024 : ff_sine_1024;
2088 const float *swindow = ics->use_kb_window[0] ? ff_aac_kbd_short_128 : ff_sine_128;
2089 int i;
2090
2091 if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) {
2092 memcpy(saved_ltp, saved, 512 * sizeof(float));
2093 memset(saved_ltp + 576, 0, 448 * sizeof(float));
2094 ac->dsp.vector_fmul_reverse(saved_ltp + 448, ac->buf_mdct + 960, &swindow[64], 64);
2095 for (i = 0; i < 64; i++)
2096 saved_ltp[i + 512] = ac->buf_mdct[1023 - i] * swindow[63 - i];
2097 } else if (ics->window_sequence[0] == LONG_START_SEQUENCE) {
2098 memcpy(saved_ltp, ac->buf_mdct + 512, 448 * sizeof(float));
2099 memset(saved_ltp + 576, 0, 448 * sizeof(float));
2100 ac->dsp.vector_fmul_reverse(saved_ltp + 448, ac->buf_mdct + 960, &swindow[64], 64);
2101 for (i = 0; i < 64; i++)
2102 saved_ltp[i + 512] = ac->buf_mdct[1023 - i] * swindow[63 - i];
2103 } else { // LONG_STOP or ONLY_LONG
2104 ac->dsp.vector_fmul_reverse(saved_ltp, ac->buf_mdct + 512, &lwindow[512], 512);
2105 for (i = 0; i < 512; i++)
2106 saved_ltp[i + 512] = ac->buf_mdct[1023 - i] * lwindow[511 - i];
2107 }
2108
2109 memcpy(sce->ltp_state, sce->ltp_state+1024, 1024 * sizeof(*sce->ltp_state));
2110 memcpy(sce->ltp_state+1024, sce->ret, 1024 * sizeof(*sce->ltp_state));
2111 memcpy(sce->ltp_state+2048, saved_ltp, 1024 * sizeof(*sce->ltp_state));
2112 }
2113
2114 /**
2115 * Conduct IMDCT and windowing.
2116 */
2117 static void imdct_and_windowing(AACContext *ac, SingleChannelElement *sce)
2118 {
2119 IndividualChannelStream *ics = &sce->ics;
2120 float *in = sce->coeffs;
2121 float *out = sce->ret;
2122 float *saved = sce->saved;
2123 const float *swindow = ics->use_kb_window[0] ? ff_aac_kbd_short_128 : ff_sine_128;
2124 const float *lwindow_prev = ics->use_kb_window[1] ? ff_aac_kbd_long_1024 : ff_sine_1024;
2125 const float *swindow_prev = ics->use_kb_window[1] ? ff_aac_kbd_short_128 : ff_sine_128;
2126 float *buf = ac->buf_mdct;
2127 float *temp = ac->temp;
2128 int i;
2129
2130 // imdct
2131 if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) {
2132 for (i = 0; i < 1024; i += 128)
2133 ac->mdct_small.imdct_half(&ac->mdct_small, buf + i, in + i);
2134 } else
2135 ac->mdct.imdct_half(&ac->mdct, buf, in);
2136
2137 /* window overlapping
2138 * NOTE: To simplify the overlapping code, all 'meaningless' short to long
2139 * and long to short transitions are considered to be short to short
2140 * transitions. This leaves just two cases (long to long and short to short)
2141 * with a little special sauce for EIGHT_SHORT_SEQUENCE.
2142 */
2143 if ((ics->window_sequence[1] == ONLY_LONG_SEQUENCE || ics->window_sequence[1] == LONG_STOP_SEQUENCE) &&
2144 (ics->window_sequence[0] == ONLY_LONG_SEQUENCE || ics->window_sequence[0] == LONG_START_SEQUENCE)) {
2145 ac->dsp.vector_fmul_window( out, saved, buf, lwindow_prev, 512);
2146 } else {
2147 memcpy( out, saved, 448 * sizeof(float));
2148
2149 if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) {
2150 ac->dsp.vector_fmul_window(out + 448 + 0*128, saved + 448, buf + 0*128, swindow_prev, 64);
2151 ac->dsp.vector_fmul_window(out + 448 + 1*128, buf + 0*128 + 64, buf + 1*128, swindow, 64);
2152 ac->dsp.vector_fmul_window(out + 448 + 2*128, buf + 1*128 + 64, buf + 2*128, swindow, 64);
2153 ac->dsp.vector_fmul_window(out + 448 + 3*128, buf + 2*128 + 64, buf + 3*128, swindow, 64);
2154 ac->dsp.vector_fmul_window(temp, buf + 3*128 + 64, buf + 4*128, swindow, 64);
2155 memcpy( out + 448 + 4*128, temp, 64 * sizeof(float));
2156 } else {
2157 ac->dsp.vector_fmul_window(out + 448, saved + 448, buf, swindow_prev, 64);
2158 memcpy( out + 576, buf + 64, 448 * sizeof(float));
2159 }
2160 }
2161
2162 // buffer update
2163 if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) {
2164 memcpy( saved, temp + 64, 64 * sizeof(float));
2165 ac->dsp.vector_fmul_window(saved + 64, buf + 4*128 + 64, buf + 5*128, swindow, 64);
2166 ac->dsp.vector_fmul_window(saved + 192, buf + 5*128 + 64, buf + 6*128, swindow, 64);
2167 ac->dsp.vector_fmul_window(saved + 320, buf + 6*128 + 64, buf + 7*128, swindow, 64);
2168 memcpy( saved + 448, buf + 7*128 + 64, 64 * sizeof(float));
2169 } else if (ics->window_sequence[0] == LONG_START_SEQUENCE) {
2170 memcpy( saved, buf + 512, 448 * sizeof(float));
2171 memcpy( saved + 448, buf + 7*128 + 64, 64 * sizeof(float));
2172 } else { // LONG_STOP or ONLY_LONG
2173 memcpy( saved, buf + 512, 512 * sizeof(float));
2174 }
2175 }
2176
2177 /**
2178 * Apply dependent channel coupling (applied before IMDCT).
2179 *
2180 * @param index index into coupling gain array
2181 */
2182 static void apply_dependent_coupling(AACContext *ac,
2183 SingleChannelElement *target,
2184 ChannelElement *cce, int index)
2185 {
2186 IndividualChannelStream *ics = &cce->ch[0].ics;
2187 const uint16_t *offsets = ics->swb_offset;
2188 float *dest = target->coeffs;
2189 const float *src = cce->ch[0].coeffs;
2190 int g, i, group, k, idx = 0;
2191 if (ac->oc[1].m4ac.object_type == AOT_AAC_LTP) {
2192 av_log(ac->avctx, AV_LOG_ERROR,
2193 "Dependent coupling is not supported together with LTP\n");
2194 return;
2195 }
2196 for (g = 0; g < ics->num_window_groups; g++) {
2197 for (i = 0; i < ics->max_sfb; i++, idx++) {
2198 if (cce->ch[0].band_type[idx] != ZERO_BT) {
2199 const float gain = cce->coup.gain[index][idx];
2200 for (group = 0; group < ics->group_len[g]; group++) {
2201 for (k = offsets[i]; k < offsets[i + 1]; k++) {
2202 // XXX dsputil-ize
2203 dest[group * 128 + k] += gain * src[group * 128 + k];
2204 }
2205 }
2206 }
2207 }
2208 dest += ics->group_len[g] * 128;
2209 src += ics->group_len[g] * 128;
2210 }
2211 }
2212
2213 /**
2214 * Apply independent channel coupling (applied after IMDCT).
2215 *
2216 * @param index index into coupling gain array
2217 */
2218 static void apply_independent_coupling(AACContext *ac,
2219 SingleChannelElement *target,
2220 ChannelElement *cce, int index)
2221 {
2222 int i;
2223 const float gain = cce->coup.gain[index][0];
2224 const float *src = cce->ch[0].ret;
2225 float *dest = target->ret;
2226 const int len = 1024 << (ac->oc[1].m4ac.sbr == 1);
2227
2228 for (i = 0; i < len; i++)
2229 dest[i] += gain * src[i];
2230 }
2231
2232 /**
2233 * channel coupling transformation interface
2234 *
2235 * @param apply_coupling_method pointer to (in)dependent coupling function
2236 */
2237 static void apply_channel_coupling(AACContext *ac, ChannelElement *cc,
2238 enum RawDataBlockType type, int elem_id,
2239 enum CouplingPoint coupling_point,
2240 void (*apply_coupling_method)(AACContext *ac, SingleChannelElement *target, ChannelElement *cce, int index))
2241 {
2242 int i, c;
2243
2244 for (i = 0; i < MAX_ELEM_ID; i++) {
2245 ChannelElement *cce = ac->che[TYPE_CCE][i];
2246 int index = 0;
2247
2248 if (cce && cce->coup.coupling_point == coupling_point) {
2249 ChannelCoupling *coup = &cce->coup;
2250
2251 for (c = 0; c <= coup->num_coupled; c++) {
2252 if (coup->type[c] == type && coup->id_select[c] == elem_id) {
2253 if (coup->ch_select[c] != 1) {
2254 apply_coupling_method(ac, &cc->ch[0], cce, index);
2255 if (coup->ch_select[c] != 0)
2256 index++;
2257 }
2258 if (coup->ch_select[c] != 2)
2259 apply_coupling_method(ac, &cc->ch[1], cce, index++);
2260 } else
2261 index += 1 + (coup->ch_select[c] == 3);
2262 }
2263 }
2264 }
2265 }
2266
2267 /**
2268 * Convert spectral data to float samples, applying all supported tools as appropriate.
2269 */
2270 static void spectral_to_sample(AACContext *ac)
2271 {
2272 int i, type;
2273 for (type = 3; type >= 0; type--) {
2274 for (i = 0; i < MAX_ELEM_ID; i++) {
2275 ChannelElement *che = ac->che[type][i];
2276 if (che) {
2277 if (type <= TYPE_CPE)
2278 apply_channel_coupling(ac, che, type, i, BEFORE_TNS, apply_dependent_coupling);
2279 if (ac->oc[1].m4ac.object_type == AOT_AAC_LTP) {
2280 if (che->ch[0].ics.predictor_present) {
2281 if (che->ch[0].ics.ltp.present)
2282 apply_ltp(ac, &che->ch[0]);
2283 if (che->ch[1].ics.ltp.present && type == TYPE_CPE)
2284 apply_ltp(ac, &che->ch[1]);
2285 }
2286 }
2287 if (che->ch[0].tns.present)
2288 apply_tns(che->ch[0].coeffs, &che->ch[0].tns, &che->ch[0].ics, 1);
2289 if (che->ch[1].tns.present)
2290 apply_tns(che->ch[1].coeffs, &che->ch[1].tns, &che->ch[1].ics, 1);
2291 if (type <= TYPE_CPE)
2292 apply_channel_coupling(ac, che, type, i, BETWEEN_TNS_AND_IMDCT, apply_dependent_coupling);
2293 if (type != TYPE_CCE || che->coup.coupling_point == AFTER_IMDCT) {
2294 imdct_and_windowing(ac, &che->ch[0]);
2295 if (ac->oc[1].m4ac.object_type == AOT_AAC_LTP)
2296 update_ltp(ac, &che->ch[0]);
2297 if (type == TYPE_CPE) {
2298 imdct_and_windowing(ac, &che->ch[1]);
2299 if (ac->oc[1].m4ac.object_type == AOT_AAC_LTP)
2300 update_ltp(ac, &che->ch[1]);
2301 }
2302 if (ac->oc[1].m4ac.sbr > 0) {
2303 ff_sbr_apply(ac, &che->sbr, type, che->ch[0].ret, che->ch[1].ret);
2304 }
2305 }
2306 if (type <= TYPE_CCE)
2307 apply_channel_coupling(ac, che, type, i, AFTER_IMDCT, apply_independent_coupling);
2308 }
2309 }
2310 }
2311 }
2312
2313 static int parse_adts_frame_header(AACContext *ac, GetBitContext *gb)
2314 {
2315 int size;
2316 AACADTSHeaderInfo hdr_info;
2317 uint8_t layout_map[MAX_ELEM_ID*4][3];
2318 int layout_map_tags;
2319
2320 size = avpriv_aac_parse_header(gb, &hdr_info);
2321 if (size > 0) {
2322 if (hdr_info.num_aac_frames != 1) {
2323 av_log_missing_feature(ac->avctx, "More than one AAC RDB per ADTS frame", 0);
2324 return AVERROR_PATCHWELCOME;
2325 }
2326 push_output_configuration(ac);
2327 if (hdr_info.chan_config) {
2328 ac->oc[1].m4ac.chan_config = hdr_info.chan_config;
2329 if (set_default_channel_config(ac->avctx, layout_map,
2330 &layout_map_tags, hdr_info.chan_config))
2331 return -7;
2332 if (output_configure(ac, layout_map, layout_map_tags,
2333 FFMAX(ac->oc[1].status, OC_TRIAL_FRAME)))
2334 return -7;
2335 } else {
2336 ac->oc[1].m4ac.chan_config = 0;
2337 }
2338 ac->oc[1].m4ac.sample_rate = hdr_info.sample_rate;
2339 ac->oc[1].m4ac.sampling_index = hdr_info.sampling_index;
2340 ac->oc[1].m4ac.object_type = hdr_info.object_type;
2341 if (ac->oc[0].status != OC_LOCKED ||
2342 ac->oc[0].m4ac.chan_config != hdr_info.chan_config ||
2343 ac->oc[0].m4ac.sample_rate != hdr_info.sample_rate) {
2344 ac->oc[1].m4ac.sbr = -1;
2345 ac->oc[1].m4ac.ps = -1;
2346 }
2347 if (!hdr_info.crc_absent)
2348 skip_bits(gb, 16);
2349 }
2350 return size;
2351 }
2352
2353 static int aac_decode_frame_int(AVCodecContext *avctx, void *data,
2354 int *got_frame_ptr, GetBitContext *gb)
2355 {
2356 AACContext *ac = avctx->priv_data;
2357 ChannelElement *che = NULL, *che_prev = NULL;
2358 enum RawDataBlockType elem_type, elem_type_prev = TYPE_END;
2359 int err, elem_id;
2360 int samples = 0, multiplier, audio_found = 0, pce_found = 0;
2361
2362 if (show_bits(gb, 12) == 0xfff) {
2363 if (parse_adts_frame_header(ac, gb) < 0) {
2364 av_log(avctx, AV_LOG_ERROR, "Error decoding AAC frame header.\n");
2365 err = -1;
2366 goto fail;
2367 }
2368 if (ac->oc[1].m4ac.sampling_index > 12) {
2369 av_log(ac->avctx, AV_LOG_ERROR, "invalid sampling rate index %d\n", ac->oc[1].m4ac.sampling_index);
2370 err = -1;
2371 goto fail;
2372 }
2373 }
2374
2375 ac->tags_mapped = 0;
2376 // parse
2377 while ((elem_type = get_bits(gb, 3)) != TYPE_END) {
2378 elem_id = get_bits(gb, 4);
2379
2380 if (elem_type < TYPE_DSE) {
2381 if (!(che=get_che(ac, elem_type, elem_id))) {
2382 av_log(ac->avctx, AV_LOG_ERROR, "channel element %d.%d is not allocated\n",
2383 elem_type, elem_id);
2384 err = -1;
2385 goto fail;
2386 }
2387 samples = 1024;
2388 }
2389
2390 switch (elem_type) {
2391
2392 case TYPE_SCE:
2393 err = decode_ics(ac, &che->ch[0], gb, 0, 0);
2394 audio_found = 1;
2395 break;
2396
2397 case TYPE_CPE:
2398 err = decode_cpe(ac, gb, che);
2399 audio_found = 1;
2400 break;
2401
2402 case TYPE_CCE:
2403 err = decode_cce(ac, gb, che);
2404 break;
2405
2406 case TYPE_LFE:
2407 err = decode_ics(ac, &che->ch[0], gb, 0, 0);
2408 audio_found = 1;
2409 break;
2410
2411 case TYPE_DSE:
2412 err = skip_data_stream_element(ac, gb);
2413 break;
2414
2415 case TYPE_PCE: {
2416 uint8_t layout_map[MAX_ELEM_ID*4][3];
2417 int tags;
2418 push_output_configuration(ac);
2419 tags = decode_pce(avctx, &ac->oc[1].m4ac, layout_map, gb);
2420 if (tags < 0) {
2421 err = tags;
2422 break;
2423 }
2424 if (pce_found) {
2425 av_log(avctx, AV_LOG_ERROR,
2426 "Not evaluating a further program_config_element as this construct is dubious at best.\n");
2427 pop_output_configuration(ac);
2428 } else {
2429 err = output_configure(ac, layout_map, tags, OC_TRIAL_PCE);
2430 pce_found = 1;
2431 }
2432 break;
2433 }
2434
2435 case TYPE_FIL:
2436 if (elem_id == 15)
2437 elem_id += get_bits(gb, 8) - 1;
2438 if (get_bits_left(gb) < 8 * elem_id) {
2439 av_log(avctx, AV_LOG_ERROR, overread_err);
2440 err = -1;
2441 goto fail;
2442 }
2443 while (elem_id > 0)
2444 elem_id -= decode_extension_payload(ac, gb, elem_id, che_prev, elem_type_prev);
2445 err = 0; /* FIXME */
2446 break;
2447
2448 default:
2449 err = -1; /* should not happen, but keeps compiler happy */
2450 break;
2451 }
2452
2453 che_prev = che;
2454 elem_type_prev = elem_type;
2455
2456 if (err)
2457 goto fail;
2458
2459 if (get_bits_left(gb) < 3) {
2460 av_log(avctx, AV_LOG_ERROR, overread_err);
2461 err = -1;
2462 goto fail;
2463 }
2464 }
2465
2466 spectral_to_sample(ac);
2467
2468 multiplier = (ac->oc[1].m4ac.sbr == 1) ? ac->oc[1].m4ac.ext_sample_rate > ac->oc[1].m4ac.sample_rate : 0;
2469 samples <<= multiplier;
2470
2471 if (samples) {
2472 /* get output buffer */
2473 ac->frame.nb_samples = samples;
2474 if ((err = avctx->get_buffer(avctx, &ac->frame)) < 0) {
2475 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
2476 err = -1;
2477 goto fail;
2478 }
2479
2480 if (avctx->sample_fmt == AV_SAMPLE_FMT_FLT)
2481 ac->fmt_conv.float_interleave((float *)ac->frame.data[0],
2482 (const float **)ac->output_data,
2483 samples, avctx->channels);
2484 else
2485 ac->fmt_conv.float_to_int16_interleave((int16_t *)ac->frame.data[0],
2486 (const float **)ac->output_data,
2487 samples, avctx->channels);
2488
2489 *(AVFrame *)data = ac->frame;
2490 }
2491 *got_frame_ptr = !!samples;
2492
2493 if (ac->oc[1].status && audio_found) {
2494 avctx->sample_rate = ac->oc[1].m4ac.sample_rate << multiplier;
2495 avctx->frame_size = samples;
2496 ac->oc[1].status = OC_LOCKED;
2497 }
2498
2499 return 0;
2500 fail:
2501 pop_output_configuration(ac);
2502 return err;
2503 }
2504
2505 static int aac_decode_frame(AVCodecContext *avctx, void *data,
2506 int *got_frame_ptr, AVPacket *avpkt)
2507 {
2508 AACContext *ac = avctx->priv_data;
2509 const uint8_t *buf = avpkt->data;
2510 int buf_size = avpkt->size;
2511 GetBitContext gb;
2512 int buf_consumed;
2513 int buf_offset;
2514 int err;
2515 int new_extradata_size;
2516 const uint8_t *new_extradata = av_packet_get_side_data(avpkt,
2517 AV_PKT_DATA_NEW_EXTRADATA,
2518 &new_extradata_size);
2519
2520 if (new_extradata) {
2521 av_free(avctx->extradata);
2522 avctx->extradata = av_mallocz(new_extradata_size +
2523 FF_INPUT_BUFFER_PADDING_SIZE);
2524 if (!avctx->extradata)
2525 return AVERROR(ENOMEM);
2526 avctx->extradata_size = new_extradata_size;
2527 memcpy(avctx->extradata, new_extradata, new_extradata_size);
2528 push_output_configuration(ac);
2529 if (decode_audio_specific_config(ac, ac->avctx, &ac->oc[1].m4ac,
2530 avctx->extradata,
2531 avctx->extradata_size*8, 1) < 0) {
2532 pop_output_configuration(ac);
2533 return AVERROR_INVALIDDATA;
2534 }
2535 }
2536
2537 init_get_bits(&gb, buf, buf_size * 8);
2538
2539 if ((err = aac_decode_frame_int(avctx, data, got_frame_ptr, &gb)) < 0)
2540 return err;
2541
2542 buf_consumed = (get_bits_count(&gb) + 7) >> 3;
2543 for (buf_offset = buf_consumed; buf_offset < buf_size; buf_offset++)
2544 if (buf[buf_offset])
2545 break;
2546
2547 return buf_size > buf_offset ? buf_consumed : buf_size;
2548 }
2549
2550 static av_cold int aac_decode_close(AVCodecContext *avctx)
2551 {
2552 AACContext *ac = avctx->priv_data;
2553 int i, type;
2554
2555 for (i = 0; i < MAX_ELEM_ID; i++) {
2556 for (type = 0; type < 4; type++) {
2557 if (ac->che[type][i])
2558 ff_aac_sbr_ctx_close(&ac->che[type][i]->sbr);
2559 av_freep(&ac->che[type][i]);
2560 }
2561 }
2562
2563 ff_mdct_end(&ac->mdct);
2564 ff_mdct_end(&ac->mdct_small);
2565 ff_mdct_end(&ac->mdct_ltp);
2566 return 0;
2567 }
2568
2569
2570 #define LOAS_SYNC_WORD 0x2b7 ///< 11 bits LOAS sync word
2571
2572 struct LATMContext {
2573 AACContext aac_ctx; ///< containing AACContext
2574 int initialized; ///< initilized after a valid extradata was seen
2575
2576 // parser data
2577 int audio_mux_version_A; ///< LATM syntax version
2578 int frame_length_type; ///< 0/1 variable/fixed frame length
2579 int frame_length; ///< frame length for fixed frame length
2580 };
2581
2582 static inline uint32_t latm_get_value(GetBitContext *b)
2583 {
2584 int length = get_bits(b, 2);
2585
2586 return get_bits_long(b, (length+1)*8);
2587 }
2588
2589 static int latm_decode_audio_specific_config(struct LATMContext *latmctx,
2590 GetBitContext *gb, int asclen)
2591 {
2592 AACContext *ac = &latmctx->aac_ctx;
2593 AVCodecContext *avctx = ac->avctx;
2594 MPEG4AudioConfig m4ac = { 0 };
2595 int config_start_bit = get_bits_count(gb);
2596 int sync_extension = 0;
2597 int bits_consumed, esize;
2598
2599 if (asclen) {
2600 sync_extension = 1;
2601 asclen = FFMIN(asclen, get_bits_left(gb));
2602 } else
2603 asclen = get_bits_left(gb);
2604
2605 if (config_start_bit % 8) {
2606 av_log_missing_feature(latmctx->aac_ctx.avctx,
2607 "Non-byte-aligned audio-specific config", 1);
2608 return AVERROR_PATCHWELCOME;
2609 }
2610 if (asclen <= 0)
2611 return AVERROR_INVALIDDATA;
2612 bits_consumed = decode_audio_specific_config(NULL, avctx, &m4ac,
2613 gb->buffer + (config_start_bit / 8),
2614 asclen, sync_extension);
2615
2616 if (bits_consumed < 0)
2617 return AVERROR_INVALIDDATA;
2618
2619 if (ac->oc[1].m4ac.sample_rate != m4ac.sample_rate ||
2620 ac->oc[1].m4ac.chan_config != m4ac.chan_config) {
2621
2622 av_log(avctx, AV_LOG_INFO, "audio config changed\n");
2623 latmctx->initialized = 0;
2624
2625 esize = (bits_consumed+7) / 8;
2626
2627 if (avctx->extradata_size < esize) {
2628 av_free(avctx->extradata);
2629 avctx->extradata = av_malloc(esize + FF_INPUT_BUFFER_PADDING_SIZE);
2630 if (!avctx->extradata)
2631 return AVERROR(ENOMEM);
2632 }
2633
2634 avctx->extradata_size = esize;
2635 memcpy(avctx->extradata, gb->buffer + (config_start_bit/8), esize);
2636 memset(avctx->extradata+esize, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2637 }
2638 skip_bits_long(gb, bits_consumed);
2639
2640 return bits_consumed;
2641 }
2642
2643 static int read_stream_mux_config(struct LATMContext *latmctx,
2644 GetBitContext *gb)
2645 {
2646 int ret, audio_mux_version = get_bits(gb, 1);
2647
2648 latmctx->audio_mux_version_A = 0;
2649 if (audio_mux_version)
2650 latmctx->audio_mux_version_A = get_bits(gb, 1);
2651
2652 if (!latmctx->audio_mux_version_A) {
2653
2654 if (audio_mux_version)
2655 latm_get_value(gb); // taraFullness
2656
2657 skip_bits(gb, 1); // allStreamSameTimeFraming
2658 skip_bits(gb, 6); // numSubFrames
2659 // numPrograms
2660 if (get_bits(gb, 4)) { // numPrograms
2661 av_log_missing_feature(latmctx->aac_ctx.avctx,
2662 "Multiple programs", 1);
2663 return AVERROR_PATCHWELCOME;
2664 }
2665
2666 // for each program (which there is only on in DVB)
2667
2668 // for each layer (which there is only on in DVB)
2669 if (get_bits(gb, 3)) { // numLayer
2670 av_log_missing_feature(latmctx->aac_ctx.avctx,
2671 "Multiple layers", 1);
2672 return AVERROR_PATCHWELCOME;
2673 }
2674
2675 // for all but first stream: use_same_config = get_bits(gb, 1);
2676 if (!audio_mux_version) {
2677 if ((ret = latm_decode_audio_specific_config(latmctx, gb, 0)) < 0)
2678 return ret;
2679 } else {
2680 int ascLen = latm_get_value(gb);
2681 if ((ret = latm_decode_audio_specific_config(latmctx, gb, ascLen)) < 0)
2682 return ret;
2683 ascLen -= ret;
2684 skip_bits_long(gb, ascLen);
2685 }
2686
2687 latmctx->frame_length_type = get_bits(gb, 3);
2688 switch (latmctx->frame_length_type) {
2689 case 0:
2690 skip_bits(gb, 8); // latmBufferFullness
2691 break;
2692 case 1:
2693 latmctx->frame_length = get_bits(gb, 9);
2694 break;
2695 case 3:
2696 case 4:
2697 case 5:
2698 skip_bits(gb, 6); // CELP frame length table index
2699 break;
2700 case 6:
2701 case 7:
2702 skip_bits(gb, 1); // HVXC frame length table index
2703 break;
2704 }
2705
2706 if (get_bits(gb, 1)) { // other data
2707 if (audio_mux_version) {
2708 latm_get_value(gb); // other_data_bits
2709 } else {
2710 int esc;
2711 do {
2712 esc = get_bits(gb, 1);
2713 skip_bits(gb, 8);
2714 } while (esc);
2715 }
2716 }
2717
2718 if (get_bits(gb, 1)) // crc present
2719 skip_bits(gb, 8); // config_crc
2720 }
2721
2722 return 0;
2723 }
2724
2725 static int read_payload_length_info(struct LATMContext *ctx, GetBitContext *gb)
2726 {
2727 uint8_t tmp;
2728
2729 if (ctx->frame_length_type == 0) {
2730 int mux_slot_length = 0;
2731 do {
2732 tmp = get_bits(gb, 8);
2733 mux_slot_length += tmp;
2734 } while (tmp == 255);
2735 return mux_slot_length;
2736 } else if (ctx->frame_length_type == 1) {
2737 return ctx->frame_length;
2738 } else if (ctx->frame_length_type == 3 ||
2739 ctx->frame_length_type == 5 ||
2740 ctx->frame_length_type == 7) {
2741 skip_bits(gb, 2); // mux_slot_length_coded
2742 }
2743 return 0;
2744 }
2745
2746 static int read_audio_mux_element(struct LATMContext *latmctx,
2747 GetBitContext *gb)
2748 {
2749 int err;
2750 uint8_t use_same_mux = get_bits(gb, 1);
2751 if (!use_same_mux) {
2752 if ((err = read_stream_mux_config(latmctx, gb)) < 0)
2753 return err;
2754 } else if (!latmctx->aac_ctx.avctx->extradata) {
2755 av_log(latmctx->aac_ctx.avctx, AV_LOG_DEBUG,
2756 "no decoder config found\n");
2757 return AVERROR(EAGAIN);
2758 }
2759 if (latmctx->audio_mux_version_A == 0) {
2760 int mux_slot_length_bytes = read_payload_length_info(latmctx, gb);
2761 if (mux_slot_length_bytes * 8 > get_bits_left(gb)) {
2762 av_log(latmctx->aac_ctx.avctx, AV_LOG_ERROR, "incomplete frame\n");
2763 return AVERROR_INVALIDDATA;
2764 } else if (mux_slot_length_bytes * 8 + 256 < get_bits_left(gb)) {
2765 av_log(latmctx->aac_ctx.avctx, AV_LOG_ERROR,
2766 "frame length mismatch %d << %d\n",
2767 mux_slot_length_bytes * 8, get_bits_left(gb));
2768 return AVERROR_INVALIDDATA;
2769 }
2770 }
2771 return 0;
2772 }
2773
2774
2775 static int latm_decode_frame(AVCodecContext *avctx, void *out,
2776 int *got_frame_ptr, AVPacket *avpkt)
2777 {
2778 struct LATMContext *latmctx = avctx->priv_data;
2779 int muxlength, err;
2780 GetBitContext gb;
2781
2782 init_get_bits(&gb, avpkt->data, avpkt->size * 8);
2783
2784 // check for LOAS sync word
2785 if (get_bits(&gb, 11) != LOAS_SYNC_WORD)
2786 return AVERROR_INVALIDDATA;
2787
2788 muxlength = get_bits(&gb, 13) + 3;
2789 // not enough data, the parser should have sorted this
2790 if (muxlength > avpkt->size)
2791 return AVERROR_INVALIDDATA;
2792
2793 if ((err = read_audio_mux_element(latmctx, &gb)) < 0)
2794 return err;
2795
2796 if (!latmctx->initialized) {
2797 if (!avctx->extradata) {
2798 *got_frame_ptr = 0;
2799 return avpkt->size;
2800 } else {
2801 push_output_configuration(&latmctx->aac_ctx);
2802 if ((err = decode_audio_specific_config(
2803 &latmctx->aac_ctx, avctx, &latmctx->aac_ctx.oc[1].m4ac,
2804 avctx->extradata, avctx->extradata_size*8, 1)) < 0) {
2805 pop_output_configuration(&latmctx->aac_ctx);
2806 return err;
2807 }
2808 latmctx->initialized = 1;
2809 }
2810 }
2811
2812 if (show_bits(&gb, 12) == 0xfff) {
2813 av_log(latmctx->aac_ctx.avctx, AV_LOG_ERROR,
2814 "ADTS header detected, probably as result of configuration "
2815 "misparsing\n");
2816 return AVERROR_INVALIDDATA;
2817 }
2818
2819 if ((err = aac_decode_frame_int(avctx, out, got_frame_ptr, &gb)) < 0)
2820 return err;
2821
2822 return muxlength;
2823 }
2824
2825 static av_cold int latm_decode_init(AVCodecContext *avctx)
2826 {
2827 struct LATMContext *latmctx = avctx->priv_data;
2828 int ret = aac_decode_init(avctx);
2829
2830 if (avctx->extradata_size > 0)
2831 latmctx->initialized = !ret;
2832
2833 return ret;
2834 }
2835
2836
2837 AVCodec ff_aac_decoder = {
2838 .name = "aac",
2839 .type = AVMEDIA_TYPE_AUDIO,
2840 .id = AV_CODEC_ID_AAC,
2841 .priv_data_size = sizeof(AACContext),
2842 .init = aac_decode_init,
2843 .close = aac_decode_close,
2844 .decode = aac_decode_frame,
2845 .long_name = NULL_IF_CONFIG_SMALL("AAC (Advanced Audio Coding)"),
2846 .sample_fmts = (const enum AVSampleFormat[]) {
2847 AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE
2848 },
2849 .capabilities = CODEC_CAP_CHANNEL_CONF | CODEC_CAP_DR1,
2850 .channel_layouts = aac_channel_layout,
2851 };
2852
2853 /*
2854 Note: This decoder filter is intended to decode LATM streams transferred
2855 in MPEG transport streams which only contain one program.
2856 To do a more complex LATM demuxing a separate LATM demuxer should be used.
2857 */
2858 AVCodec ff_aac_latm_decoder = {
2859 .name = "aac_latm",
2860 .type = AVMEDIA_TYPE_AUDIO,
2861 .id = AV_CODEC_ID_AAC_LATM,
2862 .priv_data_size = sizeof(struct LATMContext),
2863 .init = latm_decode_init,
2864 .close = aac_decode_close,
2865 .decode = latm_decode_frame,
2866 .long_name = NULL_IF_CONFIG_SMALL("AAC LATM (Advanced Audio Coding LATM syntax)"),
2867 .sample_fmts = (const enum AVSampleFormat[]) {
2868 AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE
2869 },
2870 .capabilities = CODEC_CAP_CHANNEL_CONF | CODEC_CAP_DR1,
2871 .channel_layouts = aac_channel_layout,
2872 };