lavc: Use get_bitsz where needed
[libav.git] / libavcodec / wmaprodec.c
1 /*
2 * Wmapro compatible decoder
3 * Copyright (c) 2007 Baptiste Coudurier, Benjamin Larsson, Ulion
4 * Copyright (c) 2008 - 2011 Sascha Sommer, Benjamin Larsson
5 *
6 * This file is part of Libav.
7 *
8 * Libav is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * Libav is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with Libav; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 /**
24 * @file
25 * @brief wmapro decoder implementation
26 * Wmapro is an MDCT based codec comparable to wma standard or AAC.
27 * The decoding therefore consists of the following steps:
28 * - bitstream decoding
29 * - reconstruction of per-channel data
30 * - rescaling and inverse quantization
31 * - IMDCT
32 * - windowing and overlapp-add
33 *
34 * The compressed wmapro bitstream is split into individual packets.
35 * Every such packet contains one or more wma frames.
36 * The compressed frames may have a variable length and frames may
37 * cross packet boundaries.
38 * Common to all wmapro frames is the number of samples that are stored in
39 * a frame.
40 * The number of samples and a few other decode flags are stored
41 * as extradata that has to be passed to the decoder.
42 *
43 * The wmapro frames themselves are again split into a variable number of
44 * subframes. Every subframe contains the data for 2^N time domain samples
45 * where N varies between 7 and 12.
46 *
47 * Example wmapro bitstream (in samples):
48 *
49 * || packet 0 || packet 1 || packet 2 packets
50 * ---------------------------------------------------
51 * || frame 0 || frame 1 || frame 2 || frames
52 * ---------------------------------------------------
53 * || | | || | | | || || subframes of channel 0
54 * ---------------------------------------------------
55 * || | | || | | | || || subframes of channel 1
56 * ---------------------------------------------------
57 *
58 * The frame layouts for the individual channels of a wma frame does not need
59 * to be the same.
60 *
61 * However, if the offsets and lengths of several subframes of a frame are the
62 * same, the subframes of the channels can be grouped.
63 * Every group may then use special coding techniques like M/S stereo coding
64 * to improve the compression ratio. These channel transformations do not
65 * need to be applied to a whole subframe. Instead, they can also work on
66 * individual scale factor bands (see below).
67 * The coefficients that carry the audio signal in the frequency domain
68 * are transmitted as huffman-coded vectors with 4, 2 and 1 elements.
69 * In addition to that, the encoder can switch to a runlevel coding scheme
70 * by transmitting subframe_length / 128 zero coefficients.
71 *
72 * Before the audio signal can be converted to the time domain, the
73 * coefficients have to be rescaled and inverse quantized.
74 * A subframe is therefore split into several scale factor bands that get
75 * scaled individually.
76 * Scale factors are submitted for every frame but they might be shared
77 * between the subframes of a channel. Scale factors are initially DPCM-coded.
78 * Once scale factors are shared, the differences are transmitted as runlevel
79 * codes.
80 * Every subframe length and offset combination in the frame layout shares a
81 * common quantization factor that can be adjusted for every channel by a
82 * modifier.
83 * After the inverse quantization, the coefficients get processed by an IMDCT.
84 * The resulting values are then windowed with a sine window and the first half
85 * of the values are added to the second half of the output from the previous
86 * subframe in order to reconstruct the output samples.
87 */
88
89 #include <inttypes.h>
90
91 #include "libavutil/float_dsp.h"
92 #include "libavutil/intfloat.h"
93 #include "libavutil/intreadwrite.h"
94 #include "avcodec.h"
95 #include "internal.h"
96 #include "get_bits.h"
97 #include "put_bits.h"
98 #include "wmaprodata.h"
99 #include "sinewin.h"
100 #include "wma.h"
101 #include "wma_common.h"
102
103 /** current decoder limitations */
104 #define WMAPRO_MAX_CHANNELS 8 ///< max number of handled channels
105 #define MAX_SUBFRAMES 32 ///< max number of subframes per channel
106 #define MAX_BANDS 29 ///< max number of scale factor bands
107 #define MAX_FRAMESIZE 32768 ///< maximum compressed frame size
108
109 #define WMAPRO_BLOCK_MIN_BITS 6 ///< log2 of min block size
110 #define WMAPRO_BLOCK_MAX_BITS 13 ///< log2 of max block size
111 #define WMAPRO_BLOCK_MIN_SIZE (1 << WMAPRO_BLOCK_MIN_BITS) ///< minimum block size
112 #define WMAPRO_BLOCK_MAX_SIZE (1 << WMAPRO_BLOCK_MAX_BITS) ///< maximum block size
113 #define WMAPRO_BLOCK_SIZES (WMAPRO_BLOCK_MAX_BITS - WMAPRO_BLOCK_MIN_BITS + 1) ///< possible block sizes
114
115
116 #define VLCBITS 9
117 #define SCALEVLCBITS 8
118 #define VEC4MAXDEPTH ((HUFF_VEC4_MAXBITS+VLCBITS-1)/VLCBITS)
119 #define VEC2MAXDEPTH ((HUFF_VEC2_MAXBITS+VLCBITS-1)/VLCBITS)
120 #define VEC1MAXDEPTH ((HUFF_VEC1_MAXBITS+VLCBITS-1)/VLCBITS)
121 #define SCALEMAXDEPTH ((HUFF_SCALE_MAXBITS+SCALEVLCBITS-1)/SCALEVLCBITS)
122 #define SCALERLMAXDEPTH ((HUFF_SCALE_RL_MAXBITS+VLCBITS-1)/VLCBITS)
123
124 static VLC sf_vlc; ///< scale factor DPCM vlc
125 static VLC sf_rl_vlc; ///< scale factor run length vlc
126 static VLC vec4_vlc; ///< 4 coefficients per symbol
127 static VLC vec2_vlc; ///< 2 coefficients per symbol
128 static VLC vec1_vlc; ///< 1 coefficient per symbol
129 static VLC coef_vlc[2]; ///< coefficient run length vlc codes
130 static float sin64[33]; ///< sine table for decorrelation
131
132 /**
133 * @brief frame specific decoder context for a single channel
134 */
135 typedef struct WMAProChannelCtx {
136 int16_t prev_block_len; ///< length of the previous block
137 uint8_t transmit_coefs;
138 uint8_t num_subframes;
139 uint16_t subframe_len[MAX_SUBFRAMES]; ///< subframe length in samples
140 uint16_t subframe_offset[MAX_SUBFRAMES]; ///< subframe positions in the current frame
141 uint8_t cur_subframe; ///< current subframe number
142 uint16_t decoded_samples; ///< number of already processed samples
143 uint8_t grouped; ///< channel is part of a group
144 int quant_step; ///< quantization step for the current subframe
145 int8_t reuse_sf; ///< share scale factors between subframes
146 int8_t scale_factor_step; ///< scaling step for the current subframe
147 int max_scale_factor; ///< maximum scale factor for the current subframe
148 int saved_scale_factors[2][MAX_BANDS]; ///< resampled and (previously) transmitted scale factor values
149 int8_t scale_factor_idx; ///< index for the transmitted scale factor values (used for resampling)
150 int* scale_factors; ///< pointer to the scale factor values used for decoding
151 uint8_t table_idx; ///< index in sf_offsets for the scale factor reference block
152 float* coeffs; ///< pointer to the subframe decode buffer
153 uint16_t num_vec_coeffs; ///< number of vector coded coefficients
154 DECLARE_ALIGNED(32, float, out)[WMAPRO_BLOCK_MAX_SIZE + WMAPRO_BLOCK_MAX_SIZE / 2]; ///< output buffer
155 } WMAProChannelCtx;
156
157 /**
158 * @brief channel group for channel transformations
159 */
160 typedef struct WMAProChannelGrp {
161 uint8_t num_channels; ///< number of channels in the group
162 int8_t transform; ///< transform on / off
163 int8_t transform_band[MAX_BANDS]; ///< controls if the transform is enabled for a certain band
164 float decorrelation_matrix[WMAPRO_MAX_CHANNELS*WMAPRO_MAX_CHANNELS];
165 float* channel_data[WMAPRO_MAX_CHANNELS]; ///< transformation coefficients
166 } WMAProChannelGrp;
167
168 /**
169 * @brief main decoder context
170 */
171 typedef struct WMAProDecodeCtx {
172 /* generic decoder variables */
173 AVCodecContext* avctx; ///< codec context for av_log
174 AVFloatDSPContext fdsp;
175 uint8_t frame_data[MAX_FRAMESIZE +
176 AV_INPUT_BUFFER_PADDING_SIZE];///< compressed frame data
177 PutBitContext pb; ///< context for filling the frame_data buffer
178 FFTContext mdct_ctx[WMAPRO_BLOCK_SIZES]; ///< MDCT context per block size
179 DECLARE_ALIGNED(32, float, tmp)[WMAPRO_BLOCK_MAX_SIZE]; ///< IMDCT output buffer
180 float* windows[WMAPRO_BLOCK_SIZES]; ///< windows for the different block sizes
181
182 /* frame size dependent frame information (set during initialization) */
183 uint32_t decode_flags; ///< used compression features
184 uint8_t len_prefix; ///< frame is prefixed with its length
185 uint8_t dynamic_range_compression; ///< frame contains DRC data
186 uint8_t bits_per_sample; ///< integer audio sample size for the unscaled IMDCT output (used to scale to [-1.0, 1.0])
187 uint16_t samples_per_frame; ///< number of samples to output
188 uint16_t log2_frame_size;
189 int8_t lfe_channel; ///< lfe channel index
190 uint8_t max_num_subframes;
191 uint8_t subframe_len_bits; ///< number of bits used for the subframe length
192 uint8_t max_subframe_len_bit; ///< flag indicating that the subframe is of maximum size when the first subframe length bit is 1
193 uint16_t min_samples_per_subframe;
194 int8_t num_sfb[WMAPRO_BLOCK_SIZES]; ///< scale factor bands per block size
195 int16_t sfb_offsets[WMAPRO_BLOCK_SIZES][MAX_BANDS]; ///< scale factor band offsets (multiples of 4)
196 int8_t sf_offsets[WMAPRO_BLOCK_SIZES][WMAPRO_BLOCK_SIZES][MAX_BANDS]; ///< scale factor resample matrix
197 int16_t subwoofer_cutoffs[WMAPRO_BLOCK_SIZES]; ///< subwoofer cutoff values
198
199 /* packet decode state */
200 GetBitContext pgb; ///< bitstream reader context for the packet
201 int next_packet_start; ///< start offset of the next wma packet in the demuxer packet
202 uint8_t packet_offset; ///< frame offset in the packet
203 uint8_t packet_sequence_number; ///< current packet number
204 int num_saved_bits; ///< saved number of bits
205 int frame_offset; ///< frame offset in the bit reservoir
206 int subframe_offset; ///< subframe offset in the bit reservoir
207 uint8_t packet_loss; ///< set in case of bitstream error
208 uint8_t packet_done; ///< set when a packet is fully decoded
209
210 /* frame decode state */
211 uint32_t frame_num; ///< current frame number (not used for decoding)
212 GetBitContext gb; ///< bitstream reader context
213 int buf_bit_size; ///< buffer size in bits
214 uint8_t drc_gain; ///< gain for the DRC tool
215 int8_t skip_frame; ///< skip output step
216 int8_t parsed_all_subframes; ///< all subframes decoded?
217
218 /* subframe/block decode state */
219 int16_t subframe_len; ///< current subframe length
220 int8_t channels_for_cur_subframe; ///< number of channels that contain the subframe
221 int8_t channel_indexes_for_cur_subframe[WMAPRO_MAX_CHANNELS];
222 int8_t num_bands; ///< number of scale factor bands
223 int8_t transmit_num_vec_coeffs; ///< number of vector coded coefficients is part of the bitstream
224 int16_t* cur_sfb_offsets; ///< sfb offsets for the current block
225 uint8_t table_idx; ///< index for the num_sfb, sfb_offsets, sf_offsets and subwoofer_cutoffs tables
226 int8_t esc_len; ///< length of escaped coefficients
227
228 uint8_t num_chgroups; ///< number of channel groups
229 WMAProChannelGrp chgroup[WMAPRO_MAX_CHANNELS]; ///< channel group information
230
231 WMAProChannelCtx channel[WMAPRO_MAX_CHANNELS]; ///< per channel data
232 } WMAProDecodeCtx;
233
234
235 /**
236 *@brief helper function to print the most important members of the context
237 *@param s context
238 */
239 static av_cold void dump_context(WMAProDecodeCtx *s)
240 {
241 #define PRINT(a, b) av_log(s->avctx, AV_LOG_DEBUG, " %s = %d\n", a, b);
242 #define PRINT_HEX(a, b) av_log(s->avctx, AV_LOG_DEBUG, " %s = %"PRIx32"\n", a, b);
243
244 PRINT("ed sample bit depth", s->bits_per_sample);
245 PRINT_HEX("ed decode flags", s->decode_flags);
246 PRINT("samples per frame", s->samples_per_frame);
247 PRINT("log2 frame size", s->log2_frame_size);
248 PRINT("max num subframes", s->max_num_subframes);
249 PRINT("len prefix", s->len_prefix);
250 PRINT("num channels", s->avctx->channels);
251 }
252
253 /**
254 *@brief Uninitialize the decoder and free all resources.
255 *@param avctx codec context
256 *@return 0 on success, < 0 otherwise
257 */
258 static av_cold int decode_end(AVCodecContext *avctx)
259 {
260 WMAProDecodeCtx *s = avctx->priv_data;
261 int i;
262
263 for (i = 0; i < WMAPRO_BLOCK_SIZES; i++)
264 ff_mdct_end(&s->mdct_ctx[i]);
265
266 return 0;
267 }
268
269 /**
270 *@brief Initialize the decoder.
271 *@param avctx codec context
272 *@return 0 on success, -1 otherwise
273 */
274 static av_cold int decode_init(AVCodecContext *avctx)
275 {
276 WMAProDecodeCtx *s = avctx->priv_data;
277 uint8_t *edata_ptr = avctx->extradata;
278 unsigned int channel_mask;
279 int i, bits;
280 int log2_max_num_subframes;
281 int num_possible_block_sizes;
282
283 if (!avctx->block_align) {
284 av_log(avctx, AV_LOG_ERROR, "block_align is not set\n");
285 return AVERROR(EINVAL);
286 }
287
288 s->avctx = avctx;
289 avpriv_float_dsp_init(&s->fdsp, avctx->flags & AV_CODEC_FLAG_BITEXACT);
290
291 init_put_bits(&s->pb, s->frame_data, MAX_FRAMESIZE);
292
293 avctx->sample_fmt = AV_SAMPLE_FMT_FLTP;
294
295 if (avctx->extradata_size >= 18) {
296 s->decode_flags = AV_RL16(edata_ptr+14);
297 channel_mask = AV_RL32(edata_ptr+2);
298 s->bits_per_sample = AV_RL16(edata_ptr);
299 /** dump the extradata */
300 for (i = 0; i < avctx->extradata_size; i++)
301 ff_dlog(avctx, "[%x] ", avctx->extradata[i]);
302 ff_dlog(avctx, "\n");
303
304 } else {
305 avpriv_request_sample(avctx, "Unknown extradata size");
306 return AVERROR_PATCHWELCOME;
307 }
308
309 /** generic init */
310 s->log2_frame_size = av_log2(avctx->block_align) + 4;
311
312 /** frame info */
313 s->skip_frame = 1; /* skip first frame */
314 s->packet_loss = 1;
315 s->len_prefix = (s->decode_flags & 0x40);
316
317 /** get frame len */
318 bits = ff_wma_get_frame_len_bits(avctx->sample_rate, 3, s->decode_flags);
319 if (bits > WMAPRO_BLOCK_MAX_BITS) {
320 avpriv_request_sample(avctx, "14-bit block sizes");
321 return AVERROR_PATCHWELCOME;
322 }
323 s->samples_per_frame = 1 << bits;
324
325 /** subframe info */
326 log2_max_num_subframes = ((s->decode_flags & 0x38) >> 3);
327 s->max_num_subframes = 1 << log2_max_num_subframes;
328 if (s->max_num_subframes == 16 || s->max_num_subframes == 4)
329 s->max_subframe_len_bit = 1;
330 s->subframe_len_bits = av_log2(log2_max_num_subframes) + 1;
331
332 num_possible_block_sizes = log2_max_num_subframes + 1;
333 s->min_samples_per_subframe = s->samples_per_frame / s->max_num_subframes;
334 s->dynamic_range_compression = (s->decode_flags & 0x80);
335
336 if (s->max_num_subframes > MAX_SUBFRAMES) {
337 av_log(avctx, AV_LOG_ERROR, "invalid number of subframes %"PRId8"\n",
338 s->max_num_subframes);
339 return AVERROR_INVALIDDATA;
340 }
341
342 if (s->min_samples_per_subframe < WMAPRO_BLOCK_MIN_SIZE) {
343 av_log(avctx, AV_LOG_ERROR, "Invalid minimum block size %"PRId8"\n",
344 s->max_num_subframes);
345 return AVERROR_INVALIDDATA;
346 }
347
348 if (s->avctx->sample_rate <= 0) {
349 av_log(avctx, AV_LOG_ERROR, "invalid sample rate\n");
350 return AVERROR_INVALIDDATA;
351 }
352
353 if (avctx->channels < 0) {
354 av_log(avctx, AV_LOG_ERROR, "invalid number of channels %d\n",
355 avctx->channels);
356 return AVERROR_INVALIDDATA;
357 } else if (avctx->channels > WMAPRO_MAX_CHANNELS) {
358 avpriv_request_sample(avctx,
359 "More than %d channels", WMAPRO_MAX_CHANNELS);
360 return AVERROR_PATCHWELCOME;
361 }
362
363 /** init previous block len */
364 for (i = 0; i < avctx->channels; i++)
365 s->channel[i].prev_block_len = s->samples_per_frame;
366
367 /** extract lfe channel position */
368 s->lfe_channel = -1;
369
370 if (channel_mask & 8) {
371 unsigned int mask;
372 for (mask = 1; mask < 16; mask <<= 1) {
373 if (channel_mask & mask)
374 ++s->lfe_channel;
375 }
376 }
377
378 INIT_VLC_STATIC(&sf_vlc, SCALEVLCBITS, HUFF_SCALE_SIZE,
379 scale_huffbits, 1, 1,
380 scale_huffcodes, 2, 2, 616);
381
382 INIT_VLC_STATIC(&sf_rl_vlc, VLCBITS, HUFF_SCALE_RL_SIZE,
383 scale_rl_huffbits, 1, 1,
384 scale_rl_huffcodes, 4, 4, 1406);
385
386 INIT_VLC_STATIC(&coef_vlc[0], VLCBITS, HUFF_COEF0_SIZE,
387 coef0_huffbits, 1, 1,
388 coef0_huffcodes, 4, 4, 2108);
389
390 INIT_VLC_STATIC(&coef_vlc[1], VLCBITS, HUFF_COEF1_SIZE,
391 coef1_huffbits, 1, 1,
392 coef1_huffcodes, 4, 4, 3912);
393
394 INIT_VLC_STATIC(&vec4_vlc, VLCBITS, HUFF_VEC4_SIZE,
395 vec4_huffbits, 1, 1,
396 vec4_huffcodes, 2, 2, 604);
397
398 INIT_VLC_STATIC(&vec2_vlc, VLCBITS, HUFF_VEC2_SIZE,
399 vec2_huffbits, 1, 1,
400 vec2_huffcodes, 2, 2, 562);
401
402 INIT_VLC_STATIC(&vec1_vlc, VLCBITS, HUFF_VEC1_SIZE,
403 vec1_huffbits, 1, 1,
404 vec1_huffcodes, 2, 2, 562);
405
406 /** calculate number of scale factor bands and their offsets
407 for every possible block size */
408 for (i = 0; i < num_possible_block_sizes; i++) {
409 int subframe_len = s->samples_per_frame >> i;
410 int x;
411 int band = 1;
412
413 s->sfb_offsets[i][0] = 0;
414
415 for (x = 0; x < MAX_BANDS-1 && s->sfb_offsets[i][band - 1] < subframe_len; x++) {
416 int offset = (subframe_len * 2 * critical_freq[x])
417 / s->avctx->sample_rate + 2;
418 offset &= ~3;
419 if (offset > s->sfb_offsets[i][band - 1])
420 s->sfb_offsets[i][band++] = offset;
421 }
422 s->sfb_offsets[i][band - 1] = subframe_len;
423 s->num_sfb[i] = band - 1;
424 }
425
426
427 /** Scale factors can be shared between blocks of different size
428 as every block has a different scale factor band layout.
429 The matrix sf_offsets is needed to find the correct scale factor.
430 */
431
432 for (i = 0; i < num_possible_block_sizes; i++) {
433 int b;
434 for (b = 0; b < s->num_sfb[i]; b++) {
435 int x;
436 int offset = ((s->sfb_offsets[i][b]
437 + s->sfb_offsets[i][b + 1] - 1) << i) >> 1;
438 for (x = 0; x < num_possible_block_sizes; x++) {
439 int v = 0;
440 while (s->sfb_offsets[x][v + 1] << x < offset)
441 if (++v >= MAX_BANDS)
442 return AVERROR_INVALIDDATA;
443 s->sf_offsets[i][x][b] = v;
444 }
445 }
446 }
447
448 /** init MDCT, FIXME: only init needed sizes */
449 for (i = 0; i < WMAPRO_BLOCK_SIZES; i++)
450 ff_mdct_init(&s->mdct_ctx[i], WMAPRO_BLOCK_MIN_BITS+1+i, 1,
451 1.0 / (1 << (WMAPRO_BLOCK_MIN_BITS + i - 1))
452 / (1 << (s->bits_per_sample - 1)));
453
454 /** init MDCT windows: simple sine window */
455 for (i = 0; i < WMAPRO_BLOCK_SIZES; i++) {
456 const int win_idx = WMAPRO_BLOCK_MAX_BITS - i;
457 ff_init_ff_sine_windows(win_idx);
458 s->windows[WMAPRO_BLOCK_SIZES - i - 1] = ff_sine_windows[win_idx];
459 }
460
461 /** calculate subwoofer cutoff values */
462 for (i = 0; i < num_possible_block_sizes; i++) {
463 int block_size = s->samples_per_frame >> i;
464 int cutoff = (440*block_size + 3 * (s->avctx->sample_rate >> 1) - 1)
465 / s->avctx->sample_rate;
466 s->subwoofer_cutoffs[i] = av_clip(cutoff, 4, block_size);
467 }
468
469 /** calculate sine values for the decorrelation matrix */
470 for (i = 0; i < 33; i++)
471 sin64[i] = sin(i*M_PI / 64.0);
472
473 if (avctx->debug & FF_DEBUG_BITSTREAM)
474 dump_context(s);
475
476 avctx->channel_layout = channel_mask;
477
478 return 0;
479 }
480
481 /**
482 *@brief Decode the subframe length.
483 *@param s context
484 *@param offset sample offset in the frame
485 *@return decoded subframe length on success, < 0 in case of an error
486 */
487 static int decode_subframe_length(WMAProDecodeCtx *s, int offset)
488 {
489 int frame_len_shift = 0;
490 int subframe_len;
491
492 /** no need to read from the bitstream when only one length is possible */
493 if (offset == s->samples_per_frame - s->min_samples_per_subframe)
494 return s->min_samples_per_subframe;
495
496 /** 1 bit indicates if the subframe is of maximum length */
497 if (s->max_subframe_len_bit) {
498 if (get_bits1(&s->gb))
499 frame_len_shift = 1 + get_bits(&s->gb, s->subframe_len_bits-1);
500 } else
501 frame_len_shift = get_bits(&s->gb, s->subframe_len_bits);
502
503 subframe_len = s->samples_per_frame >> frame_len_shift;
504
505 /** sanity check the length */
506 if (subframe_len < s->min_samples_per_subframe ||
507 subframe_len > s->samples_per_frame) {
508 av_log(s->avctx, AV_LOG_ERROR, "broken frame: subframe_len %i\n",
509 subframe_len);
510 return AVERROR_INVALIDDATA;
511 }
512 return subframe_len;
513 }
514
515 /**
516 *@brief Decode how the data in the frame is split into subframes.
517 * Every WMA frame contains the encoded data for a fixed number of
518 * samples per channel. The data for every channel might be split
519 * into several subframes. This function will reconstruct the list of
520 * subframes for every channel.
521 *
522 * If the subframes are not evenly split, the algorithm estimates the
523 * channels with the lowest number of total samples.
524 * Afterwards, for each of these channels a bit is read from the
525 * bitstream that indicates if the channel contains a subframe with the
526 * next subframe size that is going to be read from the bitstream or not.
527 * If a channel contains such a subframe, the subframe size gets added to
528 * the channel's subframe list.
529 * The algorithm repeats these steps until the frame is properly divided
530 * between the individual channels.
531 *
532 *@param s context
533 *@return 0 on success, < 0 in case of an error
534 */
535 static int decode_tilehdr(WMAProDecodeCtx *s)
536 {
537 uint16_t num_samples[WMAPRO_MAX_CHANNELS] = { 0 };/**< sum of samples for all currently known subframes of a channel */
538 uint8_t contains_subframe[WMAPRO_MAX_CHANNELS]; /**< flag indicating if a channel contains the current subframe */
539 int channels_for_cur_subframe = s->avctx->channels; /**< number of channels that contain the current subframe */
540 int fixed_channel_layout = 0; /**< flag indicating that all channels use the same subframe offsets and sizes */
541 int min_channel_len = 0; /**< smallest sum of samples (channels with this length will be processed first) */
542 int c;
543
544 /* Should never consume more than 3073 bits (256 iterations for the
545 * while loop when always the minimum amount of 128 samples is subtracted
546 * from missing samples in the 8 channel case).
547 * 1 + BLOCK_MAX_SIZE * MAX_CHANNELS / BLOCK_MIN_SIZE * (MAX_CHANNELS + 4)
548 */
549
550 /** reset tiling information */
551 for (c = 0; c < s->avctx->channels; c++)
552 s->channel[c].num_subframes = 0;
553
554 if (s->max_num_subframes == 1 || get_bits1(&s->gb))
555 fixed_channel_layout = 1;
556
557 /** loop until the frame data is split between the subframes */
558 do {
559 int subframe_len;
560
561 /** check which channels contain the subframe */
562 for (c = 0; c < s->avctx->channels; c++) {
563 if (num_samples[c] == min_channel_len) {
564 if (fixed_channel_layout || channels_for_cur_subframe == 1 ||
565 (min_channel_len == s->samples_per_frame - s->min_samples_per_subframe))
566 contains_subframe[c] = 1;
567 else
568 contains_subframe[c] = get_bits1(&s->gb);
569 } else
570 contains_subframe[c] = 0;
571 }
572
573 /** get subframe length, subframe_len == 0 is not allowed */
574 if ((subframe_len = decode_subframe_length(s, min_channel_len)) <= 0)
575 return AVERROR_INVALIDDATA;
576
577 /** add subframes to the individual channels and find new min_channel_len */
578 min_channel_len += subframe_len;
579 for (c = 0; c < s->avctx->channels; c++) {
580 WMAProChannelCtx* chan = &s->channel[c];
581
582 if (contains_subframe[c]) {
583 if (chan->num_subframes >= MAX_SUBFRAMES) {
584 av_log(s->avctx, AV_LOG_ERROR,
585 "broken frame: num subframes > 31\n");
586 return AVERROR_INVALIDDATA;
587 }
588 chan->subframe_len[chan->num_subframes] = subframe_len;
589 num_samples[c] += subframe_len;
590 ++chan->num_subframes;
591 if (num_samples[c] > s->samples_per_frame) {
592 av_log(s->avctx, AV_LOG_ERROR, "broken frame: "
593 "channel len > samples_per_frame\n");
594 return AVERROR_INVALIDDATA;
595 }
596 } else if (num_samples[c] <= min_channel_len) {
597 if (num_samples[c] < min_channel_len) {
598 channels_for_cur_subframe = 0;
599 min_channel_len = num_samples[c];
600 }
601 ++channels_for_cur_subframe;
602 }
603 }
604 } while (min_channel_len < s->samples_per_frame);
605
606 for (c = 0; c < s->avctx->channels; c++) {
607 int i;
608 int offset = 0;
609 for (i = 0; i < s->channel[c].num_subframes; i++) {
610 ff_dlog(s->avctx, "frame[%i] channel[%i] subframe[%i]"
611 " len %i\n", s->frame_num, c, i,
612 s->channel[c].subframe_len[i]);
613 s->channel[c].subframe_offset[i] = offset;
614 offset += s->channel[c].subframe_len[i];
615 }
616 }
617
618 return 0;
619 }
620
621 /**
622 *@brief Calculate a decorrelation matrix from the bitstream parameters.
623 *@param s codec context
624 *@param chgroup channel group for which the matrix needs to be calculated
625 */
626 static void decode_decorrelation_matrix(WMAProDecodeCtx *s,
627 WMAProChannelGrp *chgroup)
628 {
629 int i;
630 int offset = 0;
631 int8_t rotation_offset[WMAPRO_MAX_CHANNELS * WMAPRO_MAX_CHANNELS];
632 memset(chgroup->decorrelation_matrix, 0, s->avctx->channels *
633 s->avctx->channels * sizeof(*chgroup->decorrelation_matrix));
634
635 for (i = 0; i < chgroup->num_channels * (chgroup->num_channels - 1) >> 1; i++)
636 rotation_offset[i] = get_bits(&s->gb, 6);
637
638 for (i = 0; i < chgroup->num_channels; i++)
639 chgroup->decorrelation_matrix[chgroup->num_channels * i + i] =
640 get_bits1(&s->gb) ? 1.0 : -1.0;
641
642 for (i = 1; i < chgroup->num_channels; i++) {
643 int x;
644 for (x = 0; x < i; x++) {
645 int y;
646 for (y = 0; y < i + 1; y++) {
647 float v1 = chgroup->decorrelation_matrix[x * chgroup->num_channels + y];
648 float v2 = chgroup->decorrelation_matrix[i * chgroup->num_channels + y];
649 int n = rotation_offset[offset + x];
650 float sinv;
651 float cosv;
652
653 if (n < 32) {
654 sinv = sin64[n];
655 cosv = sin64[32 - n];
656 } else {
657 sinv = sin64[64 - n];
658 cosv = -sin64[n - 32];
659 }
660
661 chgroup->decorrelation_matrix[y + x * chgroup->num_channels] =
662 (v1 * sinv) - (v2 * cosv);
663 chgroup->decorrelation_matrix[y + i * chgroup->num_channels] =
664 (v1 * cosv) + (v2 * sinv);
665 }
666 }
667 offset += i;
668 }
669 }
670
671 /**
672 *@brief Decode channel transformation parameters
673 *@param s codec context
674 *@return 0 in case of success, < 0 in case of bitstream errors
675 */
676 static int decode_channel_transform(WMAProDecodeCtx* s)
677 {
678 int i;
679 /* should never consume more than 1921 bits for the 8 channel case
680 * 1 + MAX_CHANNELS * (MAX_CHANNELS + 2 + 3 * MAX_CHANNELS * MAX_CHANNELS
681 * + MAX_CHANNELS + MAX_BANDS + 1)
682 */
683
684 /** in the one channel case channel transforms are pointless */
685 s->num_chgroups = 0;
686 if (s->avctx->channels > 1) {
687 int remaining_channels = s->channels_for_cur_subframe;
688
689 if (get_bits1(&s->gb)) {
690 avpriv_request_sample(s->avctx,
691 "Channel transform bit");
692 return AVERROR_PATCHWELCOME;
693 }
694
695 for (s->num_chgroups = 0; remaining_channels &&
696 s->num_chgroups < s->channels_for_cur_subframe; s->num_chgroups++) {
697 WMAProChannelGrp* chgroup = &s->chgroup[s->num_chgroups];
698 float** channel_data = chgroup->channel_data;
699 chgroup->num_channels = 0;
700 chgroup->transform = 0;
701
702 /** decode channel mask */
703 if (remaining_channels > 2) {
704 for (i = 0; i < s->channels_for_cur_subframe; i++) {
705 int channel_idx = s->channel_indexes_for_cur_subframe[i];
706 if (!s->channel[channel_idx].grouped
707 && get_bits1(&s->gb)) {
708 ++chgroup->num_channels;
709 s->channel[channel_idx].grouped = 1;
710 *channel_data++ = s->channel[channel_idx].coeffs;
711 }
712 }
713 } else {
714 chgroup->num_channels = remaining_channels;
715 for (i = 0; i < s->channels_for_cur_subframe; i++) {
716 int channel_idx = s->channel_indexes_for_cur_subframe[i];
717 if (!s->channel[channel_idx].grouped)
718 *channel_data++ = s->channel[channel_idx].coeffs;
719 s->channel[channel_idx].grouped = 1;
720 }
721 }
722
723 /** decode transform type */
724 if (chgroup->num_channels == 2) {
725 if (get_bits1(&s->gb)) {
726 if (get_bits1(&s->gb)) {
727 avpriv_request_sample(s->avctx,
728 "Unknown channel transform type");
729 return AVERROR_PATCHWELCOME;
730 }
731 } else {
732 chgroup->transform = 1;
733 if (s->avctx->channels == 2) {
734 chgroup->decorrelation_matrix[0] = 1.0;
735 chgroup->decorrelation_matrix[1] = -1.0;
736 chgroup->decorrelation_matrix[2] = 1.0;
737 chgroup->decorrelation_matrix[3] = 1.0;
738 } else {
739 /** cos(pi/4) */
740 chgroup->decorrelation_matrix[0] = 0.70703125;
741 chgroup->decorrelation_matrix[1] = -0.70703125;
742 chgroup->decorrelation_matrix[2] = 0.70703125;
743 chgroup->decorrelation_matrix[3] = 0.70703125;
744 }
745 }
746 } else if (chgroup->num_channels > 2) {
747 if (get_bits1(&s->gb)) {
748 chgroup->transform = 1;
749 if (get_bits1(&s->gb)) {
750 decode_decorrelation_matrix(s, chgroup);
751 } else {
752 /** FIXME: more than 6 coupled channels not supported */
753 if (chgroup->num_channels > 6) {
754 avpriv_request_sample(s->avctx,
755 "Coupled channels > 6");
756 } else {
757 memcpy(chgroup->decorrelation_matrix,
758 default_decorrelation[chgroup->num_channels],
759 chgroup->num_channels * chgroup->num_channels *
760 sizeof(*chgroup->decorrelation_matrix));
761 }
762 }
763 }
764 }
765
766 /** decode transform on / off */
767 if (chgroup->transform) {
768 if (!get_bits1(&s->gb)) {
769 int i;
770 /** transform can be enabled for individual bands */
771 for (i = 0; i < s->num_bands; i++) {
772 chgroup->transform_band[i] = get_bits1(&s->gb);
773 }
774 } else {
775 memset(chgroup->transform_band, 1, s->num_bands);
776 }
777 }
778 remaining_channels -= chgroup->num_channels;
779 }
780 }
781 return 0;
782 }
783
784 /**
785 *@brief Extract the coefficients from the bitstream.
786 *@param s codec context
787 *@param c current channel number
788 *@return 0 on success, < 0 in case of bitstream errors
789 */
790 static int decode_coeffs(WMAProDecodeCtx *s, int c)
791 {
792 /* Integers 0..15 as single-precision floats. The table saves a
793 costly int to float conversion, and storing the values as
794 integers allows fast sign-flipping. */
795 static const uint32_t fval_tab[16] = {
796 0x00000000, 0x3f800000, 0x40000000, 0x40400000,
797 0x40800000, 0x40a00000, 0x40c00000, 0x40e00000,
798 0x41000000, 0x41100000, 0x41200000, 0x41300000,
799 0x41400000, 0x41500000, 0x41600000, 0x41700000,
800 };
801 int vlctable;
802 VLC* vlc;
803 WMAProChannelCtx* ci = &s->channel[c];
804 int rl_mode = 0;
805 int cur_coeff = 0;
806 int num_zeros = 0;
807 const uint16_t* run;
808 const float* level;
809
810 ff_dlog(s->avctx, "decode coefficients for channel %i\n", c);
811
812 vlctable = get_bits1(&s->gb);
813 vlc = &coef_vlc[vlctable];
814
815 if (vlctable) {
816 run = coef1_run;
817 level = coef1_level;
818 } else {
819 run = coef0_run;
820 level = coef0_level;
821 }
822
823 /** decode vector coefficients (consumes up to 167 bits per iteration for
824 4 vector coded large values) */
825 while ((s->transmit_num_vec_coeffs || !rl_mode) &&
826 (cur_coeff + 3 < ci->num_vec_coeffs)) {
827 uint32_t vals[4];
828 int i;
829 unsigned int idx;
830
831 idx = get_vlc2(&s->gb, vec4_vlc.table, VLCBITS, VEC4MAXDEPTH);
832
833 if (idx == HUFF_VEC4_SIZE - 1) {
834 for (i = 0; i < 4; i += 2) {
835 idx = get_vlc2(&s->gb, vec2_vlc.table, VLCBITS, VEC2MAXDEPTH);
836 if (idx == HUFF_VEC2_SIZE - 1) {
837 uint32_t v0, v1;
838 v0 = get_vlc2(&s->gb, vec1_vlc.table, VLCBITS, VEC1MAXDEPTH);
839 if (v0 == HUFF_VEC1_SIZE - 1)
840 v0 += ff_wma_get_large_val(&s->gb);
841 v1 = get_vlc2(&s->gb, vec1_vlc.table, VLCBITS, VEC1MAXDEPTH);
842 if (v1 == HUFF_VEC1_SIZE - 1)
843 v1 += ff_wma_get_large_val(&s->gb);
844 vals[i ] = av_float2int(v0);
845 vals[i+1] = av_float2int(v1);
846 } else {
847 vals[i] = fval_tab[symbol_to_vec2[idx] >> 4 ];
848 vals[i+1] = fval_tab[symbol_to_vec2[idx] & 0xF];
849 }
850 }
851 } else {
852 vals[0] = fval_tab[ symbol_to_vec4[idx] >> 12 ];
853 vals[1] = fval_tab[(symbol_to_vec4[idx] >> 8) & 0xF];
854 vals[2] = fval_tab[(symbol_to_vec4[idx] >> 4) & 0xF];
855 vals[3] = fval_tab[ symbol_to_vec4[idx] & 0xF];
856 }
857
858 /** decode sign */
859 for (i = 0; i < 4; i++) {
860 if (vals[i]) {
861 uint32_t sign = get_bits1(&s->gb) - 1;
862 AV_WN32A(&ci->coeffs[cur_coeff], vals[i] ^ sign << 31);
863 num_zeros = 0;
864 } else {
865 ci->coeffs[cur_coeff] = 0;
866 /** switch to run level mode when subframe_len / 128 zeros
867 were found in a row */
868 rl_mode |= (++num_zeros > s->subframe_len >> 8);
869 }
870 ++cur_coeff;
871 }
872 }
873
874 /** decode run level coded coefficients */
875 if (cur_coeff < s->subframe_len) {
876 memset(&ci->coeffs[cur_coeff], 0,
877 sizeof(*ci->coeffs) * (s->subframe_len - cur_coeff));
878 if (ff_wma_run_level_decode(s->avctx, &s->gb, vlc,
879 level, run, 1, ci->coeffs,
880 cur_coeff, s->subframe_len,
881 s->subframe_len, s->esc_len, 0))
882 return AVERROR_INVALIDDATA;
883 }
884
885 return 0;
886 }
887
888 /**
889 *@brief Extract scale factors from the bitstream.
890 *@param s codec context
891 *@return 0 on success, < 0 in case of bitstream errors
892 */
893 static int decode_scale_factors(WMAProDecodeCtx* s)
894 {
895 int i;
896
897 /** should never consume more than 5344 bits
898 * MAX_CHANNELS * (1 + MAX_BANDS * 23)
899 */
900
901 for (i = 0; i < s->channels_for_cur_subframe; i++) {
902 int c = s->channel_indexes_for_cur_subframe[i];
903 int* sf;
904 int* sf_end;
905 s->channel[c].scale_factors = s->channel[c].saved_scale_factors[!s->channel[c].scale_factor_idx];
906 sf_end = s->channel[c].scale_factors + s->num_bands;
907
908 /** resample scale factors for the new block size
909 * as the scale factors might need to be resampled several times
910 * before some new values are transmitted, a backup of the last
911 * transmitted scale factors is kept in saved_scale_factors
912 */
913 if (s->channel[c].reuse_sf) {
914 const int8_t* sf_offsets = s->sf_offsets[s->table_idx][s->channel[c].table_idx];
915 int b;
916 for (b = 0; b < s->num_bands; b++)
917 s->channel[c].scale_factors[b] =
918 s->channel[c].saved_scale_factors[s->channel[c].scale_factor_idx][*sf_offsets++];
919 }
920
921 if (!s->channel[c].cur_subframe || get_bits1(&s->gb)) {
922
923 if (!s->channel[c].reuse_sf) {
924 int val;
925 /** decode DPCM coded scale factors */
926 s->channel[c].scale_factor_step = get_bits(&s->gb, 2) + 1;
927 val = 45 / s->channel[c].scale_factor_step;
928 for (sf = s->channel[c].scale_factors; sf < sf_end; sf++) {
929 val += get_vlc2(&s->gb, sf_vlc.table, SCALEVLCBITS, SCALEMAXDEPTH) - 60;
930 *sf = val;
931 }
932 } else {
933 int i;
934 /** run level decode differences to the resampled factors */
935 for (i = 0; i < s->num_bands; i++) {
936 int idx;
937 int skip;
938 int val;
939 int sign;
940
941 idx = get_vlc2(&s->gb, sf_rl_vlc.table, VLCBITS, SCALERLMAXDEPTH);
942
943 if (!idx) {
944 uint32_t code = get_bits(&s->gb, 14);
945 val = code >> 6;
946 sign = (code & 1) - 1;
947 skip = (code & 0x3f) >> 1;
948 } else if (idx == 1) {
949 break;
950 } else {
951 skip = scale_rl_run[idx];
952 val = scale_rl_level[idx];
953 sign = get_bits1(&s->gb)-1;
954 }
955
956 i += skip;
957 if (i >= s->num_bands) {
958 av_log(s->avctx, AV_LOG_ERROR,
959 "invalid scale factor coding\n");
960 return AVERROR_INVALIDDATA;
961 }
962 s->channel[c].scale_factors[i] += (val ^ sign) - sign;
963 }
964 }
965 /** swap buffers */
966 s->channel[c].scale_factor_idx = !s->channel[c].scale_factor_idx;
967 s->channel[c].table_idx = s->table_idx;
968 s->channel[c].reuse_sf = 1;
969 }
970
971 /** calculate new scale factor maximum */
972 s->channel[c].max_scale_factor = s->channel[c].scale_factors[0];
973 for (sf = s->channel[c].scale_factors + 1; sf < sf_end; sf++) {
974 s->channel[c].max_scale_factor =
975 FFMAX(s->channel[c].max_scale_factor, *sf);
976 }
977
978 }
979 return 0;
980 }
981
982 /**
983 *@brief Reconstruct the individual channel data.
984 *@param s codec context
985 */
986 static void inverse_channel_transform(WMAProDecodeCtx *s)
987 {
988 int i;
989
990 for (i = 0; i < s->num_chgroups; i++) {
991 if (s->chgroup[i].transform) {
992 float data[WMAPRO_MAX_CHANNELS];
993 const int num_channels = s->chgroup[i].num_channels;
994 float** ch_data = s->chgroup[i].channel_data;
995 float** ch_end = ch_data + num_channels;
996 const int8_t* tb = s->chgroup[i].transform_band;
997 int16_t* sfb;
998
999 /** multichannel decorrelation */
1000 for (sfb = s->cur_sfb_offsets;
1001 sfb < s->cur_sfb_offsets + s->num_bands; sfb++) {
1002 int y;
1003 if (*tb++ == 1) {
1004 /** multiply values with the decorrelation_matrix */
1005 for (y = sfb[0]; y < FFMIN(sfb[1], s->subframe_len); y++) {
1006 const float* mat = s->chgroup[i].decorrelation_matrix;
1007 const float* data_end = data + num_channels;
1008 float* data_ptr = data;
1009 float** ch;
1010
1011 for (ch = ch_data; ch < ch_end; ch++)
1012 *data_ptr++ = (*ch)[y];
1013
1014 for (ch = ch_data; ch < ch_end; ch++) {
1015 float sum = 0;
1016 data_ptr = data;
1017 while (data_ptr < data_end)
1018 sum += *data_ptr++ * *mat++;
1019
1020 (*ch)[y] = sum;
1021 }
1022 }
1023 } else if (s->avctx->channels == 2) {
1024 int len = FFMIN(sfb[1], s->subframe_len) - sfb[0];
1025 s->fdsp.vector_fmul_scalar(ch_data[0] + sfb[0],
1026 ch_data[0] + sfb[0],
1027 181.0 / 128, len);
1028 s->fdsp.vector_fmul_scalar(ch_data[1] + sfb[0],
1029 ch_data[1] + sfb[0],
1030 181.0 / 128, len);
1031 }
1032 }
1033 }
1034 }
1035 }
1036
1037 /**
1038 *@brief Apply sine window and reconstruct the output buffer.
1039 *@param s codec context
1040 */
1041 static void wmapro_window(WMAProDecodeCtx *s)
1042 {
1043 int i;
1044 for (i = 0; i < s->channels_for_cur_subframe; i++) {
1045 int c = s->channel_indexes_for_cur_subframe[i];
1046 float* window;
1047 int winlen = s->channel[c].prev_block_len;
1048 float* start = s->channel[c].coeffs - (winlen >> 1);
1049
1050 if (s->subframe_len < winlen) {
1051 start += (winlen - s->subframe_len) >> 1;
1052 winlen = s->subframe_len;
1053 }
1054
1055 window = s->windows[av_log2(winlen) - WMAPRO_BLOCK_MIN_BITS];
1056
1057 winlen >>= 1;
1058
1059 s->fdsp.vector_fmul_window(start, start, start + winlen,
1060 window, winlen);
1061
1062 s->channel[c].prev_block_len = s->subframe_len;
1063 }
1064 }
1065
1066 /**
1067 *@brief Decode a single subframe (block).
1068 *@param s codec context
1069 *@return 0 on success, < 0 when decoding failed
1070 */
1071 static int decode_subframe(WMAProDecodeCtx *s)
1072 {
1073 int offset = s->samples_per_frame;
1074 int subframe_len = s->samples_per_frame;
1075 int i;
1076 int total_samples = s->samples_per_frame * s->avctx->channels;
1077 int transmit_coeffs = 0;
1078 int cur_subwoofer_cutoff;
1079
1080 s->subframe_offset = get_bits_count(&s->gb);
1081
1082 /** reset channel context and find the next block offset and size
1083 == the next block of the channel with the smallest number of
1084 decoded samples
1085 */
1086 for (i = 0; i < s->avctx->channels; i++) {
1087 s->channel[i].grouped = 0;
1088 if (offset > s->channel[i].decoded_samples) {
1089 offset = s->channel[i].decoded_samples;
1090 subframe_len =
1091 s->channel[i].subframe_len[s->channel[i].cur_subframe];
1092 }
1093 }
1094
1095 ff_dlog(s->avctx,
1096 "processing subframe with offset %i len %i\n", offset, subframe_len);
1097
1098 /** get a list of all channels that contain the estimated block */
1099 s->channels_for_cur_subframe = 0;
1100 for (i = 0; i < s->avctx->channels; i++) {
1101 const int cur_subframe = s->channel[i].cur_subframe;
1102 /** subtract already processed samples */
1103 total_samples -= s->channel[i].decoded_samples;
1104
1105 /** and count if there are multiple subframes that match our profile */
1106 if (offset == s->channel[i].decoded_samples &&
1107 subframe_len == s->channel[i].subframe_len[cur_subframe]) {
1108 total_samples -= s->channel[i].subframe_len[cur_subframe];
1109 s->channel[i].decoded_samples +=
1110 s->channel[i].subframe_len[cur_subframe];
1111 s->channel_indexes_for_cur_subframe[s->channels_for_cur_subframe] = i;
1112 ++s->channels_for_cur_subframe;
1113 }
1114 }
1115
1116 /** check if the frame will be complete after processing the
1117 estimated block */
1118 if (!total_samples)
1119 s->parsed_all_subframes = 1;
1120
1121
1122 ff_dlog(s->avctx, "subframe is part of %i channels\n",
1123 s->channels_for_cur_subframe);
1124
1125 /** calculate number of scale factor bands and their offsets */
1126 s->table_idx = av_log2(s->samples_per_frame/subframe_len);
1127 s->num_bands = s->num_sfb[s->table_idx];
1128 s->cur_sfb_offsets = s->sfb_offsets[s->table_idx];
1129 cur_subwoofer_cutoff = s->subwoofer_cutoffs[s->table_idx];
1130
1131 /** configure the decoder for the current subframe */
1132 offset += s->samples_per_frame >> 1;
1133
1134 for (i = 0; i < s->channels_for_cur_subframe; i++) {
1135 int c = s->channel_indexes_for_cur_subframe[i];
1136
1137 s->channel[c].coeffs = &s->channel[c].out[offset];
1138 }
1139
1140 s->subframe_len = subframe_len;
1141 s->esc_len = av_log2(s->subframe_len - 1) + 1;
1142
1143 /** skip extended header if any */
1144 if (get_bits1(&s->gb)) {
1145 int num_fill_bits;
1146 if (!(num_fill_bits = get_bits(&s->gb, 2))) {
1147 int len = get_bits(&s->gb, 4);
1148 num_fill_bits = get_bitsz(&s->gb, len) + 1;
1149 }
1150
1151 if (num_fill_bits >= 0) {
1152 if (get_bits_count(&s->gb) + num_fill_bits > s->num_saved_bits) {
1153 av_log(s->avctx, AV_LOG_ERROR, "invalid number of fill bits\n");
1154 return AVERROR_INVALIDDATA;
1155 }
1156
1157 skip_bits_long(&s->gb, num_fill_bits);
1158 }
1159 }
1160
1161 /** no idea for what the following bit is used */
1162 if (get_bits1(&s->gb)) {
1163 avpriv_request_sample(s->avctx, "Reserved bit");
1164 return AVERROR_PATCHWELCOME;
1165 }
1166
1167
1168 if (decode_channel_transform(s) < 0)
1169 return AVERROR_INVALIDDATA;
1170
1171
1172 for (i = 0; i < s->channels_for_cur_subframe; i++) {
1173 int c = s->channel_indexes_for_cur_subframe[i];
1174 if ((s->channel[c].transmit_coefs = get_bits1(&s->gb)))
1175 transmit_coeffs = 1;
1176 }
1177
1178 if (transmit_coeffs) {
1179 int step;
1180 int quant_step = 90 * s->bits_per_sample >> 4;
1181
1182 /** decode number of vector coded coefficients */
1183 if ((s->transmit_num_vec_coeffs = get_bits1(&s->gb))) {
1184 int num_bits = av_log2((s->subframe_len + 3)/4) + 1;
1185 for (i = 0; i < s->channels_for_cur_subframe; i++) {
1186 int c = s->channel_indexes_for_cur_subframe[i];
1187 int num_vec_coeffs = get_bits(&s->gb, num_bits) << 2;
1188 if (num_vec_coeffs + offset > FF_ARRAY_ELEMS(s->channel[c].out)) {
1189 av_log(s->avctx, AV_LOG_ERROR, "num_vec_coeffs %d is too large\n", num_vec_coeffs);
1190 return AVERROR_INVALIDDATA;
1191 }
1192 s->channel[c].num_vec_coeffs = num_vec_coeffs;
1193 }
1194 } else {
1195 for (i = 0; i < s->channels_for_cur_subframe; i++) {
1196 int c = s->channel_indexes_for_cur_subframe[i];
1197 s->channel[c].num_vec_coeffs = s->subframe_len;
1198 }
1199 }
1200 /** decode quantization step */
1201 step = get_sbits(&s->gb, 6);
1202 quant_step += step;
1203 if (step == -32 || step == 31) {
1204 const int sign = (step == 31) - 1;
1205 int quant = 0;
1206 while (get_bits_count(&s->gb) + 5 < s->num_saved_bits &&
1207 (step = get_bits(&s->gb, 5)) == 31) {
1208 quant += 31;
1209 }
1210 quant_step += ((quant + step) ^ sign) - sign;
1211 }
1212 if (quant_step < 0) {
1213 av_log(s->avctx, AV_LOG_DEBUG, "negative quant step\n");
1214 }
1215
1216 /** decode quantization step modifiers for every channel */
1217
1218 if (s->channels_for_cur_subframe == 1) {
1219 s->channel[s->channel_indexes_for_cur_subframe[0]].quant_step = quant_step;
1220 } else {
1221 int modifier_len = get_bits(&s->gb, 3);
1222 for (i = 0; i < s->channels_for_cur_subframe; i++) {
1223 int c = s->channel_indexes_for_cur_subframe[i];
1224 s->channel[c].quant_step = quant_step;
1225 if (get_bits1(&s->gb)) {
1226 if (modifier_len) {
1227 s->channel[c].quant_step += get_bits(&s->gb, modifier_len) + 1;
1228 } else
1229 ++s->channel[c].quant_step;
1230 }
1231 }
1232 }
1233
1234 /** decode scale factors */
1235 if (decode_scale_factors(s) < 0)
1236 return AVERROR_INVALIDDATA;
1237 }
1238
1239 ff_dlog(s->avctx, "BITSTREAM: subframe header length was %i\n",
1240 get_bits_count(&s->gb) - s->subframe_offset);
1241
1242 /** parse coefficients */
1243 for (i = 0; i < s->channels_for_cur_subframe; i++) {
1244 int c = s->channel_indexes_for_cur_subframe[i];
1245 if (s->channel[c].transmit_coefs &&
1246 get_bits_count(&s->gb) < s->num_saved_bits) {
1247 decode_coeffs(s, c);
1248 } else
1249 memset(s->channel[c].coeffs, 0,
1250 sizeof(*s->channel[c].coeffs) * subframe_len);
1251 }
1252
1253 ff_dlog(s->avctx, "BITSTREAM: subframe length was %i\n",
1254 get_bits_count(&s->gb) - s->subframe_offset);
1255
1256 if (transmit_coeffs) {
1257 FFTContext *mdct = &s->mdct_ctx[av_log2(subframe_len) - WMAPRO_BLOCK_MIN_BITS];
1258 /** reconstruct the per channel data */
1259 inverse_channel_transform(s);
1260 for (i = 0; i < s->channels_for_cur_subframe; i++) {
1261 int c = s->channel_indexes_for_cur_subframe[i];
1262 const int* sf = s->channel[c].scale_factors;
1263 int b;
1264
1265 if (c == s->lfe_channel)
1266 memset(&s->tmp[cur_subwoofer_cutoff], 0, sizeof(*s->tmp) *
1267 (subframe_len - cur_subwoofer_cutoff));
1268
1269 /** inverse quantization and rescaling */
1270 for (b = 0; b < s->num_bands; b++) {
1271 const int end = FFMIN(s->cur_sfb_offsets[b+1], s->subframe_len);
1272 const int exp = s->channel[c].quant_step -
1273 (s->channel[c].max_scale_factor - *sf++) *
1274 s->channel[c].scale_factor_step;
1275 const float quant = pow(10.0, exp / 20.0);
1276 int start = s->cur_sfb_offsets[b];
1277 s->fdsp.vector_fmul_scalar(s->tmp + start,
1278 s->channel[c].coeffs + start,
1279 quant, end - start);
1280 }
1281
1282 /** apply imdct (imdct_half == DCTIV with reverse) */
1283 mdct->imdct_half(mdct, s->channel[c].coeffs, s->tmp);
1284 }
1285 }
1286
1287 /** window and overlapp-add */
1288 wmapro_window(s);
1289
1290 /** handled one subframe */
1291 for (i = 0; i < s->channels_for_cur_subframe; i++) {
1292 int c = s->channel_indexes_for_cur_subframe[i];
1293 if (s->channel[c].cur_subframe >= s->channel[c].num_subframes) {
1294 av_log(s->avctx, AV_LOG_ERROR, "broken subframe\n");
1295 return AVERROR_INVALIDDATA;
1296 }
1297 ++s->channel[c].cur_subframe;
1298 }
1299
1300 return 0;
1301 }
1302
1303 /**
1304 *@brief Decode one WMA frame.
1305 *@param s codec context
1306 *@return 0 if the trailer bit indicates that this is the last frame,
1307 * 1 if there are additional frames
1308 */
1309 static int decode_frame(WMAProDecodeCtx *s, AVFrame *frame, int *got_frame_ptr)
1310 {
1311 AVCodecContext *avctx = s->avctx;
1312 GetBitContext* gb = &s->gb;
1313 int more_frames = 0;
1314 int len = 0;
1315 int i, ret;
1316
1317 /** get frame length */
1318 if (s->len_prefix)
1319 len = get_bits(gb, s->log2_frame_size);
1320
1321 ff_dlog(s->avctx, "decoding frame with length %x\n", len);
1322
1323 /** decode tile information */
1324 if (decode_tilehdr(s)) {
1325 s->packet_loss = 1;
1326 return 0;
1327 }
1328
1329 /** read postproc transform */
1330 if (s->avctx->channels > 1 && get_bits1(gb)) {
1331 if (get_bits1(gb)) {
1332 for (i = 0; i < avctx->channels * avctx->channels; i++)
1333 skip_bits(gb, 4);
1334 }
1335 }
1336
1337 /** read drc info */
1338 if (s->dynamic_range_compression) {
1339 s->drc_gain = get_bits(gb, 8);
1340 ff_dlog(s->avctx, "drc_gain %i\n", s->drc_gain);
1341 }
1342
1343 /** no idea what these are for, might be the number of samples
1344 that need to be skipped at the beginning or end of a stream */
1345 if (get_bits1(gb)) {
1346 int av_unused skip;
1347
1348 /** usually true for the first frame */
1349 if (get_bits1(gb)) {
1350 skip = get_bits(gb, av_log2(s->samples_per_frame * 2));
1351 ff_dlog(s->avctx, "start skip: %i\n", skip);
1352 }
1353
1354 /** sometimes true for the last frame */
1355 if (get_bits1(gb)) {
1356 skip = get_bits(gb, av_log2(s->samples_per_frame * 2));
1357 ff_dlog(s->avctx, "end skip: %i\n", skip);
1358 }
1359
1360 }
1361
1362 ff_dlog(s->avctx, "BITSTREAM: frame header length was %i\n",
1363 get_bits_count(gb) - s->frame_offset);
1364
1365 /** reset subframe states */
1366 s->parsed_all_subframes = 0;
1367 for (i = 0; i < avctx->channels; i++) {
1368 s->channel[i].decoded_samples = 0;
1369 s->channel[i].cur_subframe = 0;
1370 s->channel[i].reuse_sf = 0;
1371 }
1372
1373 /** decode all subframes */
1374 while (!s->parsed_all_subframes) {
1375 if (decode_subframe(s) < 0) {
1376 s->packet_loss = 1;
1377 return 0;
1378 }
1379 }
1380
1381 /* get output buffer */
1382 frame->nb_samples = s->samples_per_frame;
1383 if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
1384 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
1385 s->packet_loss = 1;
1386 return 0;
1387 }
1388
1389 /** copy samples to the output buffer */
1390 for (i = 0; i < avctx->channels; i++)
1391 memcpy(frame->extended_data[i], s->channel[i].out,
1392 s->samples_per_frame * sizeof(*s->channel[i].out));
1393
1394 for (i = 0; i < avctx->channels; i++) {
1395 /** reuse second half of the IMDCT output for the next frame */
1396 memcpy(&s->channel[i].out[0],
1397 &s->channel[i].out[s->samples_per_frame],
1398 s->samples_per_frame * sizeof(*s->channel[i].out) >> 1);
1399 }
1400
1401 if (s->skip_frame) {
1402 s->skip_frame = 0;
1403 *got_frame_ptr = 0;
1404 av_frame_unref(frame);
1405 } else {
1406 *got_frame_ptr = 1;
1407 }
1408
1409 if (s->len_prefix) {
1410 if (len != (get_bits_count(gb) - s->frame_offset) + 2) {
1411 /** FIXME: not sure if this is always an error */
1412 av_log(s->avctx, AV_LOG_ERROR,
1413 "frame[%"PRIu32"] would have to skip %i bits\n",
1414 s->frame_num,
1415 len - (get_bits_count(gb) - s->frame_offset) - 1);
1416 s->packet_loss = 1;
1417 return 0;
1418 }
1419
1420 /** skip the rest of the frame data */
1421 skip_bits_long(gb, len - (get_bits_count(gb) - s->frame_offset) - 1);
1422 } else {
1423 while (get_bits_count(gb) < s->num_saved_bits && get_bits1(gb) == 0) {
1424 }
1425 }
1426
1427 /** decode trailer bit */
1428 more_frames = get_bits1(gb);
1429
1430 ++s->frame_num;
1431 return more_frames;
1432 }
1433
1434 /**
1435 *@brief Calculate remaining input buffer length.
1436 *@param s codec context
1437 *@param gb bitstream reader context
1438 *@return remaining size in bits
1439 */
1440 static int remaining_bits(WMAProDecodeCtx *s, GetBitContext *gb)
1441 {
1442 return s->buf_bit_size - get_bits_count(gb);
1443 }
1444
1445 /**
1446 *@brief Fill the bit reservoir with a (partial) frame.
1447 *@param s codec context
1448 *@param gb bitstream reader context
1449 *@param len length of the partial frame
1450 *@param append decides whether to reset the buffer or not
1451 */
1452 static void save_bits(WMAProDecodeCtx *s, GetBitContext* gb, int len,
1453 int append)
1454 {
1455 int buflen;
1456
1457 /** when the frame data does not need to be concatenated, the input buffer
1458 is resetted and additional bits from the previous frame are copyed
1459 and skipped later so that a fast byte copy is possible */
1460
1461 if (!append) {
1462 s->frame_offset = get_bits_count(gb) & 7;
1463 s->num_saved_bits = s->frame_offset;
1464 init_put_bits(&s->pb, s->frame_data, MAX_FRAMESIZE);
1465 }
1466
1467 buflen = (s->num_saved_bits + len + 8) >> 3;
1468
1469 if (len <= 0 || buflen > MAX_FRAMESIZE) {
1470 avpriv_request_sample(s->avctx, "Too small input buffer");
1471 s->packet_loss = 1;
1472 return;
1473 }
1474
1475 if (len > put_bits_left(&s->pb)) {
1476 av_log(s->avctx, AV_LOG_ERROR,
1477 "Cannot append %d bits, only %d bits available.\n",
1478 len, put_bits_left(&s->pb));
1479 s->packet_loss = 1;
1480 return;
1481 }
1482
1483 s->num_saved_bits += len;
1484 if (!append) {
1485 avpriv_copy_bits(&s->pb, gb->buffer + (get_bits_count(gb) >> 3),
1486 s->num_saved_bits);
1487 } else {
1488 int align = 8 - (get_bits_count(gb) & 7);
1489 align = FFMIN(align, len);
1490 put_bits(&s->pb, align, get_bits(gb, align));
1491 len -= align;
1492 avpriv_copy_bits(&s->pb, gb->buffer + (get_bits_count(gb) >> 3), len);
1493 }
1494 skip_bits_long(gb, len);
1495
1496 {
1497 PutBitContext tmp = s->pb;
1498 flush_put_bits(&tmp);
1499 }
1500
1501 init_get_bits(&s->gb, s->frame_data, s->num_saved_bits);
1502 skip_bits(&s->gb, s->frame_offset);
1503 }
1504
1505 /**
1506 *@brief Decode a single WMA packet.
1507 *@param avctx codec context
1508 *@param data the output buffer
1509 *@param avpkt input packet
1510 *@return number of bytes that were read from the input buffer
1511 */
1512 static int decode_packet(AVCodecContext *avctx, void *data,
1513 int *got_frame_ptr, AVPacket* avpkt)
1514 {
1515 WMAProDecodeCtx *s = avctx->priv_data;
1516 GetBitContext* gb = &s->pgb;
1517 const uint8_t* buf = avpkt->data;
1518 int buf_size = avpkt->size;
1519 int num_bits_prev_frame;
1520 int packet_sequence_number;
1521
1522 *got_frame_ptr = 0;
1523
1524 if (s->packet_done || s->packet_loss) {
1525 s->packet_done = 0;
1526
1527 /** sanity check for the buffer length */
1528 if (buf_size < avctx->block_align) {
1529 av_log(avctx, AV_LOG_ERROR, "Input packet too small (%d < %d)\n",
1530 buf_size, avctx->block_align);
1531 return AVERROR_INVALIDDATA;
1532 }
1533
1534 s->next_packet_start = buf_size - avctx->block_align;
1535 buf_size = avctx->block_align;
1536 s->buf_bit_size = buf_size << 3;
1537
1538 /** parse packet header */
1539 init_get_bits(gb, buf, s->buf_bit_size);
1540 packet_sequence_number = get_bits(gb, 4);
1541 skip_bits(gb, 2);
1542
1543 /** get number of bits that need to be added to the previous frame */
1544 num_bits_prev_frame = get_bits(gb, s->log2_frame_size);
1545 ff_dlog(avctx, "packet[%d]: nbpf %x\n", avctx->frame_number,
1546 num_bits_prev_frame);
1547
1548 /** check for packet loss */
1549 if (!s->packet_loss &&
1550 ((s->packet_sequence_number + 1) & 0xF) != packet_sequence_number) {
1551 s->packet_loss = 1;
1552 av_log(avctx, AV_LOG_ERROR,
1553 "Packet loss detected! seq %"PRIx8" vs %x\n",
1554 s->packet_sequence_number, packet_sequence_number);
1555 }
1556 s->packet_sequence_number = packet_sequence_number;
1557
1558 if (num_bits_prev_frame > 0) {
1559 int remaining_packet_bits = s->buf_bit_size - get_bits_count(gb);
1560 if (num_bits_prev_frame >= remaining_packet_bits) {
1561 num_bits_prev_frame = remaining_packet_bits;
1562 s->packet_done = 1;
1563 }
1564
1565 /** append the previous frame data to the remaining data from the
1566 previous packet to create a full frame */
1567 save_bits(s, gb, num_bits_prev_frame, 1);
1568 ff_dlog(avctx, "accumulated %x bits of frame data\n",
1569 s->num_saved_bits - s->frame_offset);
1570
1571 /** decode the cross packet frame if it is valid */
1572 if (!s->packet_loss)
1573 decode_frame(s, data, got_frame_ptr);
1574 } else if (s->num_saved_bits - s->frame_offset) {
1575 ff_dlog(avctx, "ignoring %x previously saved bits\n",
1576 s->num_saved_bits - s->frame_offset);
1577 }
1578
1579 if (s->packet_loss) {
1580 /** reset number of saved bits so that the decoder
1581 does not start to decode incomplete frames in the
1582 s->len_prefix == 0 case */
1583 s->num_saved_bits = 0;
1584 s->packet_loss = 0;
1585 }
1586
1587 } else {
1588 int frame_size;
1589 s->buf_bit_size = (avpkt->size - s->next_packet_start) << 3;
1590 init_get_bits(gb, avpkt->data, s->buf_bit_size);
1591 skip_bits(gb, s->packet_offset);
1592 if (s->len_prefix && remaining_bits(s, gb) > s->log2_frame_size &&
1593 (frame_size = show_bits(gb, s->log2_frame_size)) &&
1594 frame_size <= remaining_bits(s, gb)) {
1595 save_bits(s, gb, frame_size, 0);
1596 s->packet_done = !decode_frame(s, data, got_frame_ptr);
1597 } else if (!s->len_prefix
1598 && s->num_saved_bits > get_bits_count(&s->gb)) {
1599 /** when the frames do not have a length prefix, we don't know
1600 the compressed length of the individual frames
1601 however, we know what part of a new packet belongs to the
1602 previous frame
1603 therefore we save the incoming packet first, then we append
1604 the "previous frame" data from the next packet so that
1605 we get a buffer that only contains full frames */
1606 s->packet_done = !decode_frame(s, data, got_frame_ptr);
1607 } else
1608 s->packet_done = 1;
1609 }
1610
1611 if (s->packet_done && !s->packet_loss &&
1612 remaining_bits(s, gb) > 0) {
1613 /** save the rest of the data so that it can be decoded
1614 with the next packet */
1615 save_bits(s, gb, remaining_bits(s, gb), 0);
1616 }
1617
1618 s->packet_offset = get_bits_count(gb) & 7;
1619 if (s->packet_loss)
1620 return AVERROR_INVALIDDATA;
1621
1622 return get_bits_count(gb) >> 3;
1623 }
1624
1625 /**
1626 *@brief Clear decoder buffers (for seeking).
1627 *@param avctx codec context
1628 */
1629 static void flush(AVCodecContext *avctx)
1630 {
1631 WMAProDecodeCtx *s = avctx->priv_data;
1632 int i;
1633 /** reset output buffer as a part of it is used during the windowing of a
1634 new frame */
1635 for (i = 0; i < avctx->channels; i++)
1636 memset(s->channel[i].out, 0, s->samples_per_frame *
1637 sizeof(*s->channel[i].out));
1638 s->packet_loss = 1;
1639 }
1640
1641
1642 /**
1643 *@brief wmapro decoder
1644 */
1645 AVCodec ff_wmapro_decoder = {
1646 .name = "wmapro",
1647 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Audio 9 Professional"),
1648 .type = AVMEDIA_TYPE_AUDIO,
1649 .id = AV_CODEC_ID_WMAPRO,
1650 .priv_data_size = sizeof(WMAProDecodeCtx),
1651 .init = decode_init,
1652 .close = decode_end,
1653 .decode = decode_packet,
1654 .capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DR1,
1655 .flush = flush,
1656 .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
1657 AV_SAMPLE_FMT_NONE },
1658 };