vlc: Add header #include when the types are used
[libav.git] / libavcodec / wmavoice.c
CommitLineData
fa65584f
RB
1/*
2 * Windows Media Audio Voice decoder.
3 * Copyright (c) 2009 Ronald S. Bultje
4 *
2912e87a 5 * This file is part of Libav.
fa65584f 6 *
2912e87a 7 * Libav is free software; you can redistribute it and/or
fa65584f
RB
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
2912e87a 12 * Libav is distributed in the hope that it will be useful,
fa65584f
RB
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
2912e87a 18 * License along with Libav; if not, write to the Free Software
fa65584f
RB
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22/**
ba87f080 23 * @file
fa65584f
RB
24 * @brief Windows Media Audio Voice compatible decoder
25 * @author Ronald S. Bultje <rsbultje@gmail.com>
26 */
27
28#include <math.h>
dafcbfe4 29
a903f8f0 30#include "libavutil/channel_layout.h"
d56668bd 31#include "libavutil/float_dsp.h"
5bac2d0c 32#include "libavutil/mem.h"
f7ec7f54 33
fa65584f 34#include "avcodec.h"
f7ec7f54 35#include "bitstream.h"
594d4d5d 36#include "internal.h"
fa65584f
RB
37#include "put_bits.h"
38#include "wmavoice_data.h"
fa65584f
RB
39#include "celp_filters.h"
40#include "acelp_vectors.h"
41#include "acelp_filters.h"
42#include "lsp.h"
0aded948
MR
43#include "dct.h"
44#include "rdft.h"
4538729a 45#include "sinewin.h"
73fc82f3 46#include "vlc.h"
fa65584f
RB
47
48#define MAX_BLOCKS 8 ///< maximum number of blocks per frame
49#define MAX_LSPS 16 ///< maximum filter order
9a32573b
RB
50#define MAX_LSPS_ALIGN16 16 ///< same as #MAX_LSPS; needs to be multiple
51 ///< of 16 for ASM input buffer alignment
fa65584f
RB
52#define MAX_FRAMES 3 ///< maximum number of frames per superframe
53#define MAX_FRAMESIZE 160 ///< maximum number of samples per frame
54#define MAX_SIGNAL_HISTORY 416 ///< maximum excitation signal history
55#define MAX_SFRAMESIZE (MAX_FRAMESIZE * MAX_FRAMES)
56 ///< maximum number of samples per superframe
57#define SFRAME_CACHE_MAXSIZE 256 ///< maximum cache size for frame data that
58 ///< was split over two packets
59#define VLC_NBITS 6 ///< number of bits to read per VLC iteration
60
61/**
62 * Frame type VLC coding.
63 */
64static VLC frame_type_vlc;
65
66/**
67 * Adaptive codebook types.
68 */
69enum {
70 ACB_TYPE_NONE = 0, ///< no adaptive codebook (only hardcoded fixed)
71 ACB_TYPE_ASYMMETRIC = 1, ///< adaptive codebook with per-frame pitch, which
72 ///< we interpolate to get a per-sample pitch.
73 ///< Signal is generated using an asymmetric sinc
74 ///< window function
75 ///< @note see #wmavoice_ipol1_coeffs
76 ACB_TYPE_HAMMING = 2 ///< Per-block pitch with signal generation using
77 ///< a Hamming sinc window function
78 ///< @note see #wmavoice_ipol2_coeffs
79};
80
81/**
82 * Fixed codebook types.
83 */
84enum {
85 FCB_TYPE_SILENCE = 0, ///< comfort noise during silence
86 ///< generated from a hardcoded (fixed) codebook
87 ///< with per-frame (low) gain values
88 FCB_TYPE_HARDCODED = 1, ///< hardcoded (fixed) codebook with per-block
89 ///< gain values
90 FCB_TYPE_AW_PULSES = 2, ///< Pitch-adaptive window (AW) pulse signals,
91 ///< used in particular for low-bitrate streams
92 FCB_TYPE_EXC_PULSES = 3, ///< Innovation (fixed) codebook pulse sets in
93 ///< combinations of either single pulses or
94 ///< pulse pairs
95};
96
97/**
98 * Description of frame types.
99 */
100static const struct frame_type_desc {
101 uint8_t n_blocks; ///< amount of blocks per frame (each block
102 ///< (contains 160/#n_blocks samples)
103 uint8_t log_n_blocks; ///< log2(#n_blocks)
104 uint8_t acb_type; ///< Adaptive codebook type (ACB_TYPE_*)
105 uint8_t fcb_type; ///< Fixed codebook type (FCB_TYPE_*)
106 uint8_t dbl_pulses; ///< how many pulse vectors have pulse pairs
107 ///< (rather than just one single pulse)
108 ///< only if #fcb_type == #FCB_TYPE_EXC_PULSES
109 uint16_t frame_size; ///< the amount of bits that make up the block
110 ///< data (per frame)
111} frame_descs[17] = {
112 { 1, 0, ACB_TYPE_NONE, FCB_TYPE_SILENCE, 0, 0 },
113 { 2, 1, ACB_TYPE_NONE, FCB_TYPE_HARDCODED, 0, 28 },
114 { 2, 1, ACB_TYPE_ASYMMETRIC, FCB_TYPE_AW_PULSES, 0, 46 },
115 { 2, 1, ACB_TYPE_ASYMMETRIC, FCB_TYPE_EXC_PULSES, 2, 80 },
116 { 2, 1, ACB_TYPE_ASYMMETRIC, FCB_TYPE_EXC_PULSES, 5, 104 },
117 { 4, 2, ACB_TYPE_ASYMMETRIC, FCB_TYPE_EXC_PULSES, 0, 108 },
118 { 4, 2, ACB_TYPE_ASYMMETRIC, FCB_TYPE_EXC_PULSES, 2, 132 },
119 { 4, 2, ACB_TYPE_ASYMMETRIC, FCB_TYPE_EXC_PULSES, 5, 168 },
120 { 2, 1, ACB_TYPE_HAMMING, FCB_TYPE_EXC_PULSES, 0, 64 },
121 { 2, 1, ACB_TYPE_HAMMING, FCB_TYPE_EXC_PULSES, 2, 80 },
122 { 2, 1, ACB_TYPE_HAMMING, FCB_TYPE_EXC_PULSES, 5, 104 },
123 { 4, 2, ACB_TYPE_HAMMING, FCB_TYPE_EXC_PULSES, 0, 108 },
124 { 4, 2, ACB_TYPE_HAMMING, FCB_TYPE_EXC_PULSES, 2, 132 },
125 { 4, 2, ACB_TYPE_HAMMING, FCB_TYPE_EXC_PULSES, 5, 168 },
126 { 8, 3, ACB_TYPE_HAMMING, FCB_TYPE_EXC_PULSES, 0, 176 },
127 { 8, 3, ACB_TYPE_HAMMING, FCB_TYPE_EXC_PULSES, 2, 208 },
128 { 8, 3, ACB_TYPE_HAMMING, FCB_TYPE_EXC_PULSES, 5, 256 }
129};
130
131/**
132 * WMA Voice decoding context.
133 */
7f9f771e 134typedef struct WMAVoiceContext {
fa65584f 135 /**
21a19b79 136 * @name Global values specified in the stream header / extradata or used all over.
fa65584f
RB
137 * @{
138 */
f7ec7f54 139 BitstreamContext bc; ///< packet bitreader. During decoder init,
fa65584f
RB
140 ///< it contains the extradata from the
141 ///< demuxer. During decoding, it contains
142 ///< packet data.
143 int8_t vbm_tree[25]; ///< converts VLC codes to frame type
144
145 int spillover_bitsize; ///< number of bits used to specify
146 ///< #spillover_nbits in the packet header
147 ///< = ceil(log2(ctx->block_align << 3))
148 int history_nsamples; ///< number of samples in history for signal
149 ///< prediction (through ACB)
150
9a32573b 151 /* postfilter specific values */
fa65584f
RB
152 int do_apf; ///< whether to apply the averaged
153 ///< projection filter (APF)
9a32573b
RB
154 int denoise_strength; ///< strength of denoising in Wiener filter
155 ///< [0-11]
156 int denoise_tilt_corr; ///< Whether to apply tilt correction to the
157 ///< Wiener filter coefficients (postfilter)
158 int dc_level; ///< Predicted amount of DC noise, based
159 ///< on which a DC removal filter is used
fa65584f
RB
160
161 int lsps; ///< number of LSPs per frame [10 or 16]
162 int lsp_q_mode; ///< defines quantizer defaults [0, 1]
163 int lsp_def_mode; ///< defines different sets of LSP defaults
164 ///< [0, 1]
165 int frame_lsp_bitsize; ///< size (in bits) of LSPs, when encoded
166 ///< per-frame (independent coding)
167 int sframe_lsp_bitsize; ///< size (in bits) of LSPs, when encoded
168 ///< per superframe (residual coding)
169
170 int min_pitch_val; ///< base value for pitch parsing code
171 int max_pitch_val; ///< max value + 1 for pitch parsing
172 int pitch_nbits; ///< number of bits used to specify the
173 ///< pitch value in the frame header
174 int block_pitch_nbits; ///< number of bits used to specify the
175 ///< first block's pitch value
176 int block_pitch_range; ///< range of the block pitch
177 int block_delta_pitch_nbits; ///< number of bits used to specify the
178 ///< delta pitch between this and the last
179 ///< block's pitch value, used in all but
180 ///< first block
181 int block_delta_pitch_hrange; ///< 1/2 range of the delta (full range is
182 ///< from -this to +this-1)
183 uint16_t block_conv_table[4]; ///< boundaries for block pitch unit/scale
184 ///< conversion
185
186 /**
187 * @}
21a19b79
RT
188 *
189 * @name Packet values specified in the packet header or related to a packet.
190 *
fa65584f
RB
191 * A packet is considered to be a single unit of data provided to this
192 * decoder by the demuxer.
193 * @{
194 */
195 int spillover_nbits; ///< number of bits of the previous packet's
da9cea77 196 ///< last superframe preceding this
fa65584f
RB
197 ///< packet's first full superframe (useful
198 ///< for re-synchronization also)
199 int has_residual_lsps; ///< if set, superframes contain one set of
200 ///< LSPs that cover all frames, encoded as
201 ///< independent and residual LSPs; if not
202 ///< set, each frame contains its own, fully
203 ///< independent, LSPs
204 int skip_bits_next; ///< number of bits to skip at the next call
205 ///< to #wmavoice_decode_packet() (since
206 ///< they're part of the previous superframe)
207
059a9348 208 uint8_t sframe_cache[SFRAME_CACHE_MAXSIZE + AV_INPUT_BUFFER_PADDING_SIZE];
fa65584f
RB
209 ///< cache for superframe data split over
210 ///< multiple packets
211 int sframe_cache_size; ///< set to >0 if we have data from an
212 ///< (incomplete) superframe from a previous
213 ///< packet that spilled over in the current
214 ///< packet; specifies the amount of bits in
215 ///< #sframe_cache
216 PutBitContext pb; ///< bitstream writer for #sframe_cache
217
218 /**
219 * @}
21a19b79
RT
220 *
221 * @name Frame and superframe values
fa65584f
RB
222 * Superframe and frame data - these can change from frame to frame,
223 * although some of them do in that case serve as a cache / history for
224 * the next frame or superframe.
225 * @{
226 */
227 double prev_lsps[MAX_LSPS]; ///< LSPs of the last frame of the previous
228 ///< superframe
229 int last_pitch_val; ///< pitch value of the previous frame
230 int last_acb_type; ///< frame type [0-2] of the previous frame
231 int pitch_diff_sh16; ///< ((cur_pitch_val - #last_pitch_val)
232 ///< << 16) / #MAX_FRAMESIZE
233 float silence_gain; ///< set for use in blocks if #ACB_TYPE_NONE
234
235 int aw_idx_is_ext; ///< whether the AW index was encoded in
236 ///< 8 bits (instead of 6)
237 int aw_pulse_range; ///< the range over which #aw_pulse_set1()
238 ///< can apply the pulse, relative to the
239 ///< value in aw_first_pulse_off. The exact
240 ///< position of the first AW-pulse is within
241 ///< [pulse_off, pulse_off + this], and
242 ///< depends on bitstream values; [16 or 24]
243 int aw_n_pulses[2]; ///< number of AW-pulses in each block; note
244 ///< that this number can be negative (in
245 ///< which case it basically means "zero")
246 int aw_first_pulse_off[2]; ///< index of first sample to which to
247 ///< apply AW-pulses, or -0xff if unset
248 int aw_next_pulse_off_cache; ///< the position (relative to start of the
249 ///< second block) at which pulses should
250 ///< start to be positioned, serves as a
251 ///< cache for pitch-adaptive window pulses
252 ///< between blocks
253
254 int frame_cntr; ///< current frame index [0 - 0xFFFE]; is
255 ///< only used for comfort noise in #pRNG()
256 float gain_pred_err[6]; ///< cache for gain prediction
257 float excitation_history[MAX_SIGNAL_HISTORY];
258 ///< cache of the signal of previous
259 ///< superframes, used as a history for
260 ///< signal generation
261 float synth_history[MAX_LSPS]; ///< see #excitation_history
262 /**
263 * @}
21a19b79
RT
264 *
265 * @name Postfilter values
266 *
18f1add3 267 * Variables used for postfilter implementation, mostly history for
9a32573b
RB
268 * smoothing and so on, and context variables for FFT/iFFT.
269 * @{
270 */
271 RDFTContext rdft, irdft; ///< contexts for FFT-calculation in the
272 ///< postfilter (for denoise filter)
273 DCTContext dct, dst; ///< contexts for phase shift (in Hilbert
274 ///< transform, part of postfilter)
275 float sin[511], cos[511]; ///< 8-bit cosine/sine windows over [-pi,pi]
276 ///< range
277 float postfilter_agc; ///< gain control memory, used in
278 ///< #adaptive_gain_control()
279 float dcf_mem[2]; ///< DC filter history
280 float zero_exc_pf[MAX_SIGNAL_HISTORY + MAX_SFRAMESIZE];
281 ///< zero filter output (i.e. excitation)
282 ///< by postfilter
283 float denoise_filter_cache[MAX_FRAMESIZE];
284 int denoise_filter_cache_size; ///< samples in #denoise_filter_cache
9d35fa52 285 DECLARE_ALIGNED(32, float, tilted_lpcs_pf)[0x80];
9a32573b 286 ///< aligned buffer for LPC tilting
9d35fa52 287 DECLARE_ALIGNED(32, float, denoise_coeffs_pf)[0x80];
9a32573b 288 ///< aligned buffer for denoise coefficients
9d35fa52 289 DECLARE_ALIGNED(32, float, synth_filter_out_buf)[0x80 + MAX_LSPS_ALIGN16];
9a32573b
RB
290 ///< aligned buffer for postfilter speech
291 ///< synthesis
292 /**
293 * @}
fa65584f
RB
294 */
295} WMAVoiceContext;
296
297/**
49bd8e4b 298 * Set up the variable bit mode (VBM) tree from container extradata.
f7ec7f54
AH
299 * @param bc bit I/O context.
300 * The bit context (s->bc) should be loaded with byte 23-46 of the
fa65584f
RB
301 * container extradata (i.e. the ones containing the VBM tree).
302 * @param vbm_tree pointer to array to which the decoded VBM tree will be
303 * written.
304 * @return 0 on success, <0 on error.
305 */
f7ec7f54 306static av_cold int decode_vbmtree(BitstreamContext *bc, int8_t vbm_tree[25])
fa65584f 307{
f5c48f5a
LB
308 int cntr[8] = { 0 }, n, res;
309
310 memset(vbm_tree, 0xff, sizeof(vbm_tree[0]) * 25);
311 for (n = 0; n < 17; n++) {
f7ec7f54 312 res = bitstream_read(bc, 3);
f5c48f5a
LB
313 if (cntr[res] > 3) // should be >= 3 + (res == 7))
314 return -1;
315 vbm_tree[res * 3 + cntr[res]++] = n;
316 }
317 return 0;
318}
319
320static av_cold void wmavoice_init_static_data(AVCodec *codec)
321{
fa65584f
RB
322 static const uint8_t bits[] = {
323 2, 2, 2, 4, 4, 4,
324 6, 6, 6, 8, 8, 8,
325 10, 10, 10, 12, 12, 12,
326 14, 14, 14, 14
327 };
328 static const uint16_t codes[] = {
329 0x0000, 0x0001, 0x0002, // 00/01/10
330 0x000c, 0x000d, 0x000e, // 11+00/01/10
331 0x003c, 0x003d, 0x003e, // 1111+00/01/10
332 0x00fc, 0x00fd, 0x00fe, // 111111+00/01/10
333 0x03fc, 0x03fd, 0x03fe, // 11111111+00/01/10
334 0x0ffc, 0x0ffd, 0x0ffe, // 1111111111+00/01/10
335 0x3ffc, 0x3ffd, 0x3ffe, 0x3fff // 111111111111+xx
336 };
fa65584f 337
fa65584f
RB
338 INIT_VLC_STATIC(&frame_type_vlc, VLC_NBITS, sizeof(bits),
339 bits, 1, 1, codes, 2, 2, 132);
fa65584f
RB
340}
341
342/**
343 * Set up decoder with parameters from demuxer (extradata etc.).
344 */
345static av_cold int wmavoice_decode_init(AVCodecContext *ctx)
346{
347 int n, flags, pitch_range, lsp16_flag;
348 WMAVoiceContext *s = ctx->priv_data;
349
350 /**
351 * Extradata layout:
352 * - byte 0-18: WMAPro-in-WMAVoice extradata (see wmaprodec.c),
353 * - byte 19-22: flags field (annoyingly in LE; see below for known
354 * values),
355 * - byte 23-46: variable bitmode tree (really just 17 * 3 bits,
356 * rest is 0).
357 */
358 if (ctx->extradata_size != 46) {
359 av_log(ctx, AV_LOG_ERROR,
360 "Invalid extradata size %d (should be 46)\n",
361 ctx->extradata_size);
04e9853a 362 return AVERROR_INVALIDDATA;
fa65584f
RB
363 }
364 flags = AV_RL32(ctx->extradata + 18);
365 s->spillover_bitsize = 3 + av_ceil_log2(ctx->block_align);
366 s->do_apf = flags & 0x1;
9a32573b
RB
367 if (s->do_apf) {
368 ff_rdft_init(&s->rdft, 7, DFT_R2C);
369 ff_rdft_init(&s->irdft, 7, IDFT_C2R);
370 ff_dct_init(&s->dct, 6, DCT_I);
371 ff_dct_init(&s->dst, 6, DST_I);
372
373 ff_sine_window_init(s->cos, 256);
374 memcpy(&s->sin[255], s->cos, 256 * sizeof(s->cos[0]));
375 for (n = 0; n < 255; n++) {
376 s->sin[n] = -s->sin[510 - n];
377 s->cos[510 - n] = s->cos[n];
378 }
379 }
380 s->denoise_strength = (flags >> 2) & 0xF;
381 if (s->denoise_strength >= 12) {
382 av_log(ctx, AV_LOG_ERROR,
383 "Invalid denoise filter strength %d (max=11)\n",
384 s->denoise_strength);
04e9853a 385 return AVERROR_INVALIDDATA;
9a32573b
RB
386 }
387 s->denoise_tilt_corr = !!(flags & 0x40);
388 s->dc_level = (flags >> 7) & 0xF;
fa65584f
RB
389 s->lsp_q_mode = !!(flags & 0x2000);
390 s->lsp_def_mode = !!(flags & 0x4000);
391 lsp16_flag = flags & 0x1000;
392 if (lsp16_flag) {
393 s->lsps = 16;
394 s->frame_lsp_bitsize = 34;
395 s->sframe_lsp_bitsize = 60;
396 } else {
397 s->lsps = 10;
398 s->frame_lsp_bitsize = 24;
399 s->sframe_lsp_bitsize = 48;
400 }
401 for (n = 0; n < s->lsps; n++)
402 s->prev_lsps[n] = M_PI * (n + 1.0) / (s->lsps + 1.0);
403
f7ec7f54
AH
404 bitstream_init8(&s->bc, ctx->extradata + 22, ctx->extradata_size - 22);
405 if (decode_vbmtree(&s->bc, s->vbm_tree) < 0) {
fa65584f 406 av_log(ctx, AV_LOG_ERROR, "Invalid VBM tree; broken extradata?\n");
04e9853a 407 return AVERROR_INVALIDDATA;
fa65584f
RB
408 }
409
410 s->min_pitch_val = ((ctx->sample_rate << 8) / 400 + 50) >> 8;
411 s->max_pitch_val = ((ctx->sample_rate << 8) * 37 / 2000 + 50) >> 8;
412 pitch_range = s->max_pitch_val - s->min_pitch_val;
d99427cb
LA
413 if (pitch_range <= 0) {
414 av_log(ctx, AV_LOG_ERROR, "Invalid pitch range; broken extradata?\n");
04e9853a 415 return AVERROR_INVALIDDATA;
d99427cb 416 }
fa65584f
RB
417 s->pitch_nbits = av_ceil_log2(pitch_range);
418 s->last_pitch_val = 40;
419 s->last_acb_type = ACB_TYPE_NONE;
420 s->history_nsamples = s->max_pitch_val + 8;
421
422 if (s->min_pitch_val < 1 || s->history_nsamples > MAX_SIGNAL_HISTORY) {
423 int min_sr = ((((1 << 8) - 50) * 400) + 0xFF) >> 8,
424 max_sr = ((((MAX_SIGNAL_HISTORY - 8) << 8) + 205) * 2000 / 37) >> 8;
425
426 av_log(ctx, AV_LOG_ERROR,
427 "Unsupported samplerate %d (min=%d, max=%d)\n",
428 ctx->sample_rate, min_sr, max_sr); // 322-22097 Hz
429
04e9853a 430 return AVERROR(ENOSYS);
fa65584f
RB
431 }
432
433 s->block_conv_table[0] = s->min_pitch_val;
434 s->block_conv_table[1] = (pitch_range * 25) >> 6;
435 s->block_conv_table[2] = (pitch_range * 44) >> 6;
436 s->block_conv_table[3] = s->max_pitch_val - 1;
437 s->block_delta_pitch_hrange = (pitch_range >> 3) & ~0xF;
d99427cb
LA
438 if (s->block_delta_pitch_hrange <= 0) {
439 av_log(ctx, AV_LOG_ERROR, "Invalid delta pitch hrange; broken extradata?\n");
04e9853a 440 return AVERROR_INVALIDDATA;
d99427cb 441 }
fa65584f
RB
442 s->block_delta_pitch_nbits = 1 + av_ceil_log2(s->block_delta_pitch_hrange);
443 s->block_pitch_range = s->block_conv_table[2] +
444 s->block_conv_table[3] + 1 +
445 2 * (s->block_conv_table[1] - 2 * s->min_pitch_val);
446 s->block_pitch_nbits = av_ceil_log2(s->block_pitch_range);
447
f7b85065
JR
448 ctx->channels = 1;
449 ctx->channel_layout = AV_CH_LAYOUT_MONO;
5d6e4c16 450 ctx->sample_fmt = AV_SAMPLE_FMT_FLT;
fa65584f
RB
451
452 return 0;
453}
454
455/**
21a19b79 456 * @name Postfilter functions
9a32573b
RB
457 * Postfilter functions (gain control, wiener denoise filter, DC filter,
458 * kalman smoothening, plus surrounding code to wrap it)
459 * @{
460 */
461/**
462 * Adaptive gain control (as used in postfilter).
463 *
464 * Identical to #ff_adaptive_gain_control() in acelp_vectors.c, except
465 * that the energy here is calculated using sum(abs(...)), whereas the
466 * other codecs (e.g. AMR-NB, SIPRO) use sqrt(dotproduct(...)).
467 *
468 * @param out output buffer for filtered samples
469 * @param in input buffer containing the samples as they are after the
470 * postfilter steps so far
471 * @param speech_synth input buffer containing speech synth before postfilter
472 * @param size input buffer size
473 * @param alpha exponential filter factor
474 * @param gain_mem pointer to filter memory (single float)
475 */
476static void adaptive_gain_control(float *out, const float *in,
477 const float *speech_synth,
478 int size, float alpha, float *gain_mem)
479{
480 int i;
481 float speech_energy = 0.0, postfilter_energy = 0.0, gain_scale_factor;
482 float mem = *gain_mem;
483
484 for (i = 0; i < size; i++) {
485 speech_energy += fabsf(speech_synth[i]);
486 postfilter_energy += fabsf(in[i]);
487 }
488 gain_scale_factor = (1.0 - alpha) * speech_energy / postfilter_energy;
489
490 for (i = 0; i < size; i++) {
491 mem = alpha * mem + gain_scale_factor;
492 out[i] = in[i] * mem;
493 }
494
495 *gain_mem = mem;
496}
497
498/**
499 * Kalman smoothing function.
500 *
501 * This function looks back pitch +/- 3 samples back into history to find
502 * the best fitting curve (that one giving the optimal gain of the two
503 * signals, i.e. the highest dot product between the two), and then
504 * uses that signal history to smoothen the output of the speech synthesis
505 * filter.
506 *
507 * @param s WMA Voice decoding context
508 * @param pitch pitch of the speech signal
509 * @param in input speech signal
510 * @param out output pointer for smoothened signal
511 * @param size input/output buffer size
512 *
513 * @returns -1 if no smoothening took place, e.g. because no optimal
514 * fit could be found, or 0 on success.
515 */
516static int kalman_smoothen(WMAVoiceContext *s, int pitch,
517 const float *in, float *out, int size)
518{
519 int n;
520 float optimal_gain = 0, dot;
521 const float *ptr = &in[-FFMAX(s->min_pitch_val, pitch - 3)],
522 *end = &in[-FFMIN(s->max_pitch_val, pitch + 3)],
523 *best_hist_ptr;
524
525 /* find best fitting point in history */
526 do {
d56668bd 527 dot = avpriv_scalarproduct_float_c(in, ptr, size);
9a32573b
RB
528 if (dot > optimal_gain) {
529 optimal_gain = dot;
530 best_hist_ptr = ptr;
531 }
532 } while (--ptr >= end);
533
534 if (optimal_gain <= 0)
535 return -1;
d56668bd 536 dot = avpriv_scalarproduct_float_c(best_hist_ptr, best_hist_ptr, size);
9a32573b
RB
537 if (dot <= 0) // would be 1.0
538 return -1;
539
540 if (optimal_gain <= dot) {
541 dot = dot / (dot + 0.6 * optimal_gain); // 0.625-1.000
542 } else
543 dot = 0.625;
544
545 /* actual smoothing */
546 for (n = 0; n < size; n++)
547 out[n] = best_hist_ptr[n] + dot * (in[n] - best_hist_ptr[n]);
548
549 return 0;
550}
551
552/**
553 * Get the tilt factor of a formant filter from its transfer function
554 * @see #tilt_factor() in amrnbdec.c, which does essentially the same,
555 * but somehow (??) it does a speech synthesis filter in the
556 * middle, which is missing here
557 *
558 * @param lpcs LPC coefficients
559 * @param n_lpcs Size of LPC buffer
560 * @returns the tilt factor
561 */
562static float tilt_factor(const float *lpcs, int n_lpcs)
563{
564 float rh0, rh1;
565
d56668bd
RB
566 rh0 = 1.0 + avpriv_scalarproduct_float_c(lpcs, lpcs, n_lpcs);
567 rh1 = lpcs[0] + avpriv_scalarproduct_float_c(lpcs, &lpcs[1], n_lpcs - 1);
9a32573b
RB
568
569 return rh1 / rh0;
570}
571
572/**
573 * Derive denoise filter coefficients (in real domain) from the LPCs.
574 */
575static void calc_input_response(WMAVoiceContext *s, float *lpcs,
576 int fcb_type, float *coeffs, int remainder)
577{
578 float last_coeff, min = 15.0, max = -15.0;
579 float irange, angle_mul, gain_mul, range, sq;
580 int n, idx;
581
582 /* Create frequency power spectrum of speech input (i.e. RDFT of LPCs) */
26f548bb 583 s->rdft.rdft_calc(&s->rdft, lpcs);
9a32573b
RB
584#define log_range(var, assign) do { \
585 float tmp = log10f(assign); var = tmp; \
586 max = FFMAX(max, tmp); min = FFMIN(min, tmp); \
587 } while (0)
588 log_range(last_coeff, lpcs[1] * lpcs[1]);
589 for (n = 1; n < 64; n++)
590 log_range(lpcs[n], lpcs[n * 2] * lpcs[n * 2] +
591 lpcs[n * 2 + 1] * lpcs[n * 2 + 1]);
592 log_range(lpcs[0], lpcs[0] * lpcs[0]);
593#undef log_range
594 range = max - min;
595 lpcs[64] = last_coeff;
596
597 /* Now, use this spectrum to pick out these frequencies with higher
598 * (relative) power/energy (which we then take to be "not noise"),
599 * and set up a table (still in lpc[]) of (relative) gains per frequency.
600 * These frequencies will be maintained, while others ("noise") will be
601 * decreased in the filter output. */
602 irange = 64.0 / range; // so irange*(max-value) is in the range [0, 63]
603 gain_mul = range * (fcb_type == FCB_TYPE_HARDCODED ? (5.0 / 13.0) :
604 (5.0 / 14.7));
605 angle_mul = gain_mul * (8.0 * M_LN10 / M_PI);
606 for (n = 0; n <= 64; n++) {
0b2c75cb 607 float pwr;
9a32573b
RB
608
609 idx = FFMAX(0, lrint((max - lpcs[n]) * irange) - 1);
0b2c75cb
AH
610 pwr = wmavoice_denoise_power_table[s->denoise_strength][idx];
611 lpcs[n] = angle_mul * pwr;
9a32573b
RB
612
613 /* 70.57 =~ 1/log10(1.0331663) */
0b2c75cb 614 idx = (pwr * gain_mul - 0.0295) * 70.570526123;
a5f88736 615 if (idx > 127) { // fall back if index falls outside table range
9a32573b
RB
616 coeffs[n] = wmavoice_energy_table[127] *
617 powf(1.0331663, idx - 127);
618 } else
619 coeffs[n] = wmavoice_energy_table[FFMAX(0, idx)];
620 }
621
622 /* calculate the Hilbert transform of the gains, which we do (since this
03039f4c 623 * is a sine input) by doing a phase shift (in theory, H(sin())=cos()).
9a32573b
RB
624 * Hilbert_Transform(RDFT(x)) = Laplace_Transform(x), which calculates the
625 * "moment" of the LPCs in this filter. */
26f548bb
MR
626 s->dct.dct_calc(&s->dct, lpcs);
627 s->dst.dct_calc(&s->dst, lpcs);
9a32573b
RB
628
629 /* Split out the coefficient indexes into phase/magnitude pairs */
630 idx = 255 + av_clip(lpcs[64], -255, 255);
631 coeffs[0] = coeffs[0] * s->cos[idx];
632 idx = 255 + av_clip(lpcs[64] - 2 * lpcs[63], -255, 255);
633 last_coeff = coeffs[64] * s->cos[idx];
634 for (n = 63;; n--) {
635 idx = 255 + av_clip(-lpcs[64] - 2 * lpcs[n - 1], -255, 255);
636 coeffs[n * 2 + 1] = coeffs[n] * s->sin[idx];
637 coeffs[n * 2] = coeffs[n] * s->cos[idx];
638
639 if (!--n) break;
640
641 idx = 255 + av_clip( lpcs[64] - 2 * lpcs[n - 1], -255, 255);
642 coeffs[n * 2 + 1] = coeffs[n] * s->sin[idx];
643 coeffs[n * 2] = coeffs[n] * s->cos[idx];
644 }
645 coeffs[1] = last_coeff;
646
647 /* move into real domain */
26f548bb 648 s->irdft.rdft_calc(&s->irdft, coeffs);
9a32573b
RB
649
650 /* tilt correction and normalize scale */
651 memset(&coeffs[remainder], 0, sizeof(coeffs[0]) * (128 - remainder));
652 if (s->denoise_tilt_corr) {
653 float tilt_mem = 0;
654
655 coeffs[remainder - 1] = 0;
656 ff_tilt_compensation(&tilt_mem,
657 -1.8 * tilt_factor(coeffs, remainder - 1),
658 coeffs, remainder);
659 }
d56668bd
RB
660 sq = (1.0 / 64.0) * sqrtf(1 / avpriv_scalarproduct_float_c(coeffs, coeffs,
661 remainder));
9a32573b
RB
662 for (n = 0; n < remainder; n++)
663 coeffs[n] *= sq;
664}
665
666/**
667 * This function applies a Wiener filter on the (noisy) speech signal as
668 * a means to denoise it.
669 *
670 * - take RDFT of LPCs to get the power spectrum of the noise + speech;
671 * - using this power spectrum, calculate (for each frequency) the Wiener
672 * filter gain, which depends on the frequency power and desired level
673 * of noise subtraction (when set too high, this leads to artifacts)
674 * We can do this symmetrically over the X-axis (so 0-4kHz is the inverse
675 * of 4-8kHz);
676 * - by doing a phase shift, calculate the Hilbert transform of this array
677 * of per-frequency filter-gains to get the filtering coefficients;
678 * - smoothen/normalize/de-tilt these filter coefficients as desired;
679 * - take RDFT of noisy sound, apply the coefficients and take its IRDFT
680 * to get the denoised speech signal;
681 * - the leftover (i.e. output of the IRDFT on denoised speech data beyond
682 * the frame boundary) are saved and applied to subsequent frames by an
683 * overlap-add method (otherwise you get clicking-artifacts).
684 *
685 * @param s WMA Voice decoding context
ad32966c 686 * @param fcb_type Frame (codebook) type
9a32573b
RB
687 * @param synth_pf input: the noisy speech signal, output: denoised speech
688 * data; should be 16-byte aligned (for ASM purposes)
689 * @param size size of the speech data
690 * @param lpcs LPCs used to synthesize this frame's speech data
691 */
692static void wiener_denoise(WMAVoiceContext *s, int fcb_type,
693 float *synth_pf, int size,
694 const float *lpcs)
695{
696 int remainder, lim, n;
697
698 if (fcb_type != FCB_TYPE_SILENCE) {
699 float *tilted_lpcs = s->tilted_lpcs_pf,
700 *coeffs = s->denoise_coeffs_pf, tilt_mem = 0;
701
702 tilted_lpcs[0] = 1.0;
703 memcpy(&tilted_lpcs[1], lpcs, sizeof(lpcs[0]) * s->lsps);
704 memset(&tilted_lpcs[s->lsps + 1], 0,
705 sizeof(tilted_lpcs[0]) * (128 - s->lsps - 1));
706 ff_tilt_compensation(&tilt_mem, 0.7 * tilt_factor(lpcs, s->lsps),
707 tilted_lpcs, s->lsps + 2);
708
709 /* The IRDFT output (127 samples for 7-bit filter) beyond the frame
710 * size is applied to the next frame. All input beyond this is zero,
711 * and thus all output beyond this will go towards zero, hence we can
712 * limit to min(size-1, 127-size) as a performance consideration. */
713 remainder = FFMIN(127 - size, size - 1);
714 calc_input_response(s, tilted_lpcs, fcb_type, coeffs, remainder);
715
716 /* apply coefficients (in frequency spectrum domain), i.e. complex
717 * number multiplication */
718 memset(&synth_pf[size], 0, sizeof(synth_pf[0]) * (128 - size));
26f548bb
MR
719 s->rdft.rdft_calc(&s->rdft, synth_pf);
720 s->rdft.rdft_calc(&s->rdft, coeffs);
9a32573b
RB
721 synth_pf[0] *= coeffs[0];
722 synth_pf[1] *= coeffs[1];
1302ccc1 723 for (n = 1; n < 64; n++) {
9a32573b
RB
724 float v1 = synth_pf[n * 2], v2 = synth_pf[n * 2 + 1];
725 synth_pf[n * 2] = v1 * coeffs[n * 2] - v2 * coeffs[n * 2 + 1];
726 synth_pf[n * 2 + 1] = v2 * coeffs[n * 2] + v1 * coeffs[n * 2 + 1];
727 }
26f548bb 728 s->irdft.rdft_calc(&s->irdft, synth_pf);
9a32573b
RB
729 }
730
731 /* merge filter output with the history of previous runs */
732 if (s->denoise_filter_cache_size) {
733 lim = FFMIN(s->denoise_filter_cache_size, size);
734 for (n = 0; n < lim; n++)
735 synth_pf[n] += s->denoise_filter_cache[n];
736 s->denoise_filter_cache_size -= lim;
737 memmove(s->denoise_filter_cache, &s->denoise_filter_cache[size],
738 sizeof(s->denoise_filter_cache[0]) * s->denoise_filter_cache_size);
739 }
740
741 /* move remainder of filter output into a cache for future runs */
742 if (fcb_type != FCB_TYPE_SILENCE) {
743 lim = FFMIN(remainder, s->denoise_filter_cache_size);
744 for (n = 0; n < lim; n++)
745 s->denoise_filter_cache[n] += synth_pf[size + n];
746 if (lim < remainder) {
747 memcpy(&s->denoise_filter_cache[lim], &synth_pf[size + lim],
748 sizeof(s->denoise_filter_cache[0]) * (remainder - lim));
749 s->denoise_filter_cache_size = remainder;
750 }
751 }
752}
753
754/**
755 * Averaging projection filter, the postfilter used in WMAVoice.
756 *
757 * This uses the following steps:
758 * - A zero-synthesis filter (generate excitation from synth signal)
759 * - Kalman smoothing on excitation, based on pitch
760 * - Re-synthesized smoothened output
761 * - Iterative Wiener denoise filter
762 * - Adaptive gain filter
763 * - DC filter
764 *
765 * @param s WMAVoice decoding context
766 * @param synth Speech synthesis output (before postfilter)
767 * @param samples Output buffer for filtered samples
768 * @param size Buffer size of synth & samples
769 * @param lpcs Generated LPCs used for speech synthesis
ad32966c 770 * @param zero_exc_pf destination for zero synthesis filter (16-byte aligned)
9a32573b
RB
771 * @param fcb_type Frame type (silence, hardcoded, AW-pulses or FCB-pulses)
772 * @param pitch Pitch of the input signal
773 */
774static void postfilter(WMAVoiceContext *s, const float *synth,
775 float *samples, int size,
776 const float *lpcs, float *zero_exc_pf,
777 int fcb_type, int pitch)
778{
779 float synth_filter_in_buf[MAX_FRAMESIZE / 2],
780 *synth_pf = &s->synth_filter_out_buf[MAX_LSPS_ALIGN16],
781 *synth_filter_in = zero_exc_pf;
782
783 assert(size <= MAX_FRAMESIZE / 2);
784
785 /* generate excitation from input signal */
786 ff_celp_lp_zero_synthesis_filterf(zero_exc_pf, lpcs, synth, size, s->lsps);
787
788 if (fcb_type >= FCB_TYPE_AW_PULSES &&
789 !kalman_smoothen(s, pitch, zero_exc_pf, synth_filter_in_buf, size))
790 synth_filter_in = synth_filter_in_buf;
791
792 /* re-synthesize speech after smoothening, and keep history */
793 ff_celp_lp_synthesis_filterf(synth_pf, lpcs,
794 synth_filter_in, size, s->lsps);
795 memcpy(&synth_pf[-s->lsps], &synth_pf[size - s->lsps],
796 sizeof(synth_pf[0]) * s->lsps);
797
798 wiener_denoise(s, fcb_type, synth_pf, size, lpcs);
799
800 adaptive_gain_control(samples, synth_pf, synth, size, 0.99,
801 &s->postfilter_agc);
802
803 if (s->dc_level > 8) {
804 /* remove ultra-low frequency DC noise / highpass filter;
805 * coefficients are identical to those used in SIPR decoding,
806 * and very closely resemble those used in AMR-NB decoding. */
807 ff_acelp_apply_order_2_transfer_function(samples, samples,
808 (const float[2]) { -1.99997, 1.0 },
809 (const float[2]) { -1.9330735188, 0.93589198496 },
810 0.93980580475, s->dcf_mem, size);
811 }
812}
813/**
814 * @}
815 */
816
817/**
fa65584f
RB
818 * Dequantize LSPs
819 * @param lsps output pointer to the array that will hold the LSPs
820 * @param num number of LSPs to be dequantized
821 * @param values quantized values, contains n_stages values
822 * @param sizes range (i.e. max value) of each quantized value
823 * @param n_stages number of dequantization runs
824 * @param table dequantization table to be used
825 * @param mul_q LSF multiplier
826 * @param base_q base (lowest) LSF values
827 */
828static void dequant_lsps(double *lsps, int num,
829 const uint16_t *values,
830 const uint16_t *sizes,
831 int n_stages, const uint8_t *table,
832 const double *mul_q,
833 const double *base_q)
834{
835 int n, m;
836
837 memset(lsps, 0, num * sizeof(*lsps));
838 for (n = 0; n < n_stages; n++) {
839 const uint8_t *t_off = &table[values[n] * num];
840 double base = base_q[n], mul = mul_q[n];
841
842 for (m = 0; m < num; m++)
843 lsps[m] += base + mul * t_off[m];
844
845 table += sizes[n] * num;
846 }
847}
848
849/**
21a19b79 850 * @name LSP dequantization routines
fa65584f
RB
851 * LSP dequantization routines, for 10/16LSPs and independent/residual coding.
852 * @note we assume enough bits are available, caller should check.
853 * lsp10i() consumes 24 bits; lsp10r() consumes an additional 24 bits;
854 * lsp16i() consumes 34 bits; lsp16r() consumes an additional 26 bits.
855 * @{
856 */
857/**
858 * Parse 10 independently-coded LSPs.
859 */
f7ec7f54 860static void dequant_lsp10i(BitstreamContext *bc, double *lsps)
fa65584f
RB
861{
862 static const uint16_t vec_sizes[4] = { 256, 64, 32, 32 };
863 static const double mul_lsf[4] = {
864 5.2187144800e-3, 1.4626986422e-3,
865 9.6179549166e-4, 1.1325736225e-3
866 };
867 static const double base_lsf[4] = {
868 M_PI * -2.15522e-1, M_PI * -6.1646e-2,
869 M_PI * -3.3486e-2, M_PI * -5.7408e-2
870 };
871 uint16_t v[4];
872
f7ec7f54
AH
873 v[0] = bitstream_read(bc, 8);
874 v[1] = bitstream_read(bc, 6);
875 v[2] = bitstream_read(bc, 5);
876 v[3] = bitstream_read(bc, 5);
fa65584f
RB
877
878 dequant_lsps(lsps, 10, v, vec_sizes, 4, wmavoice_dq_lsp10i,
879 mul_lsf, base_lsf);
880}
881
882/**
883 * Parse 10 independently-coded LSPs, and then derive the tables to
884 * generate LSPs for the other frames from them (residual coding).
885 */
f7ec7f54 886static void dequant_lsp10r(BitstreamContext *bc,
fa65584f
RB
887 double *i_lsps, const double *old,
888 double *a1, double *a2, int q_mode)
889{
890 static const uint16_t vec_sizes[3] = { 128, 64, 64 };
891 static const double mul_lsf[3] = {
892 2.5807601174e-3, 1.2354460219e-3, 1.1763821673e-3
893 };
894 static const double base_lsf[3] = {
895 M_PI * -1.07448e-1, M_PI * -5.2706e-2, M_PI * -5.1634e-2
896 };
897 const float (*ipol_tab)[2][10] = q_mode ?
898 wmavoice_lsp10_intercoeff_b : wmavoice_lsp10_intercoeff_a;
899 uint16_t interpol, v[3];
900 int n;
901
f7ec7f54 902 dequant_lsp10i(bc, i_lsps);
fa65584f 903
f7ec7f54
AH
904 interpol = bitstream_read(bc, 5);
905 v[0] = bitstream_read(bc, 7);
906 v[1] = bitstream_read(bc, 6);
907 v[2] = bitstream_read(bc, 6);
fa65584f
RB
908
909 for (n = 0; n < 10; n++) {
910 double delta = old[n] - i_lsps[n];
911 a1[n] = ipol_tab[interpol][0][n] * delta + i_lsps[n];
912 a1[10 + n] = ipol_tab[interpol][1][n] * delta + i_lsps[n];
913 }
914
915 dequant_lsps(a2, 20, v, vec_sizes, 3, wmavoice_dq_lsp10r,
916 mul_lsf, base_lsf);
917}
918
919/**
920 * Parse 16 independently-coded LSPs.
921 */
f7ec7f54 922static void dequant_lsp16i(BitstreamContext *bc, double *lsps)
fa65584f
RB
923{
924 static const uint16_t vec_sizes[5] = { 256, 64, 128, 64, 128 };
925 static const double mul_lsf[5] = {
926 3.3439586280e-3, 6.9908173703e-4,
927 3.3216608306e-3, 1.0334960326e-3,
928 3.1899104283e-3
929 };
930 static const double base_lsf[5] = {
931 M_PI * -1.27576e-1, M_PI * -2.4292e-2,
932 M_PI * -1.28094e-1, M_PI * -3.2128e-2,
933 M_PI * -1.29816e-1
934 };
935 uint16_t v[5];
936
f7ec7f54
AH
937 v[0] = bitstream_read(bc, 8);
938 v[1] = bitstream_read(bc, 6);
939 v[2] = bitstream_read(bc, 7);
940 v[3] = bitstream_read(bc, 6);
941 v[4] = bitstream_read(bc, 7);
fa65584f
RB
942
943 dequant_lsps( lsps, 5, v, vec_sizes, 2,
944 wmavoice_dq_lsp16i1, mul_lsf, base_lsf);
945 dequant_lsps(&lsps[5], 5, &v[2], &vec_sizes[2], 2,
946 wmavoice_dq_lsp16i2, &mul_lsf[2], &base_lsf[2]);
947 dequant_lsps(&lsps[10], 6, &v[4], &vec_sizes[4], 1,
948 wmavoice_dq_lsp16i3, &mul_lsf[4], &base_lsf[4]);
949}
950
951/**
952 * Parse 16 independently-coded LSPs, and then derive the tables to
953 * generate LSPs for the other frames from them (residual coding).
954 */
f7ec7f54 955static void dequant_lsp16r(BitstreamContext *bc,
fa65584f
RB
956 double *i_lsps, const double *old,
957 double *a1, double *a2, int q_mode)
958{
959 static const uint16_t vec_sizes[3] = { 128, 128, 128 };
960 static const double mul_lsf[3] = {
961 1.2232979501e-3, 1.4062241527e-3, 1.6114744851e-3
962 };
963 static const double base_lsf[3] = {
964 M_PI * -5.5830e-2, M_PI * -5.2908e-2, M_PI * -5.4776e-2
965 };
966 const float (*ipol_tab)[2][16] = q_mode ?
967 wmavoice_lsp16_intercoeff_b : wmavoice_lsp16_intercoeff_a;
968 uint16_t interpol, v[3];
969 int n;
970
f7ec7f54 971 dequant_lsp16i(bc, i_lsps);
fa65584f 972
f7ec7f54
AH
973 interpol = bitstream_read(bc, 5);
974 v[0] = bitstream_read(bc, 7);
975 v[1] = bitstream_read(bc, 7);
976 v[2] = bitstream_read(bc, 7);
fa65584f
RB
977
978 for (n = 0; n < 16; n++) {
979 double delta = old[n] - i_lsps[n];
980 a1[n] = ipol_tab[interpol][0][n] * delta + i_lsps[n];
981 a1[16 + n] = ipol_tab[interpol][1][n] * delta + i_lsps[n];
982 }
983
984 dequant_lsps( a2, 10, v, vec_sizes, 1,
985 wmavoice_dq_lsp16r1, mul_lsf, base_lsf);
986 dequant_lsps(&a2[10], 10, &v[1], &vec_sizes[1], 1,
987 wmavoice_dq_lsp16r2, &mul_lsf[1], &base_lsf[1]);
988 dequant_lsps(&a2[20], 12, &v[2], &vec_sizes[2], 1,
989 wmavoice_dq_lsp16r3, &mul_lsf[2], &base_lsf[2]);
990}
991
992/**
993 * @}
21a19b79 994 * @name Pitch-adaptive window coding functions
fa65584f
RB
995 * The next few functions are for pitch-adaptive window coding.
996 * @{
997 */
998/**
999 * Parse the offset of the first pitch-adaptive window pulses, and
1000 * the distribution of pulses between the two blocks in this frame.
1001 * @param s WMA Voice decoding context private data
f7ec7f54 1002 * @param bc bit I/O context
fa65584f
RB
1003 * @param pitch pitch for each block in this frame
1004 */
f7ec7f54 1005static void aw_parse_coords(WMAVoiceContext *s, BitstreamContext *bc,
fa65584f
RB
1006 const int *pitch)
1007{
1008 static const int16_t start_offset[94] = {
1009 -11, -9, -7, -5, -3, -1, 1, 3, 5, 7, 9, 11,
1010 13, 15, 18, 17, 19, 20, 21, 22, 23, 24, 25, 26,
1011 27, 28, 29, 30, 31, 32, 33, 35, 37, 39, 41, 43,
1012 45, 47, 49, 51, 53, 55, 57, 59, 61, 63, 65, 67,
1013 69, 71, 73, 75, 77, 79, 81, 83, 85, 87, 89, 91,
1014 93, 95, 97, 99, 101, 103, 105, 107, 109, 111, 113, 115,
1015 117, 119, 121, 123, 125, 127, 129, 131, 133, 135, 137, 139,
1016 141, 143, 145, 147, 149, 151, 153, 155, 157, 159
1017 };
1018 int bits, offset;
1019
1020 /* position of pulse */
1021 s->aw_idx_is_ext = 0;
f7ec7f54 1022 if ((bits = bitstream_read(bc, 6)) >= 54) {
fa65584f 1023 s->aw_idx_is_ext = 1;
f7ec7f54 1024 bits += (bits - 54) * 3 + bitstream_read(bc, 2);
fa65584f
RB
1025 }
1026
1027 /* for a repeated pulse at pulse_off with a pitch_lag of pitch[], count
1028 * the distribution of the pulses in each block contained in this frame. */
1029 s->aw_pulse_range = FFMIN(pitch[0], pitch[1]) > 32 ? 24 : 16;
1030 for (offset = start_offset[bits]; offset < 0; offset += pitch[0]) ;
1031 s->aw_n_pulses[0] = (pitch[0] - 1 + MAX_FRAMESIZE / 2 - offset) / pitch[0];
1032 s->aw_first_pulse_off[0] = offset - s->aw_pulse_range / 2;
1033 offset += s->aw_n_pulses[0] * pitch[0];
1034 s->aw_n_pulses[1] = (pitch[1] - 1 + MAX_FRAMESIZE - offset) / pitch[1];
1035 s->aw_first_pulse_off[1] = offset - (MAX_FRAMESIZE + s->aw_pulse_range) / 2;
1036
1037 /* if continuing from a position before the block, reset position to
1038 * start of block (when corrected for the range over which it can be
1039 * spread in aw_pulse_set1()). */
1040 if (start_offset[bits] < MAX_FRAMESIZE / 2) {
1041 while (s->aw_first_pulse_off[1] - pitch[1] + s->aw_pulse_range > 0)
1042 s->aw_first_pulse_off[1] -= pitch[1];
1043 if (start_offset[bits] < 0)
1044 while (s->aw_first_pulse_off[0] - pitch[0] + s->aw_pulse_range > 0)
1045 s->aw_first_pulse_off[0] -= pitch[0];
1046 }
1047}
1048
1049/**
1050 * Apply second set of pitch-adaptive window pulses.
1051 * @param s WMA Voice decoding context private data
f7ec7f54 1052 * @param bc bit I/O context
fa65584f
RB
1053 * @param block_idx block index in frame [0, 1]
1054 * @param fcb structure containing fixed codebook vector info
d14a26ed 1055 * @return -1 on error, 0 otherwise
fa65584f 1056 */
f7ec7f54 1057static int aw_pulse_set2(WMAVoiceContext *s, BitstreamContext *bc,
d14a26ed 1058 int block_idx, AMRFixed *fcb)
fa65584f 1059{
af0a61cc
RB
1060 uint16_t use_mask_mem[9]; // only 5 are used, rest is padding
1061 uint16_t *use_mask = use_mask_mem + 2;
fa65584f
RB
1062 /* in this function, idx is the index in the 80-bit (+ padding) use_mask
1063 * bit-array. Since use_mask consists of 16-bit values, the lower 4 bits
1064 * of idx are the position of the bit within a particular item in the
1065 * array (0 being the most significant bit, and 15 being the least
1066 * significant bit), and the remainder (>> 4) is the index in the
1067 * use_mask[]-array. This is faster and uses less memory than using a
1068 * 80-byte/80-int array. */
1069 int pulse_off = s->aw_first_pulse_off[block_idx],
1070 pulse_start, n, idx, range, aidx, start_off = 0;
1071
1072 /* set offset of first pulse to within this block */
1073 if (s->aw_n_pulses[block_idx] > 0)
1074 while (pulse_off + s->aw_pulse_range < 1)
1075 pulse_off += fcb->pitch_lag;
1076
1077 /* find range per pulse */
1078 if (s->aw_n_pulses[0] > 0) {
1079 if (block_idx == 0) {
1080 range = 32;
1081 } else /* block_idx = 1 */ {
1082 range = 8;
1083 if (s->aw_n_pulses[block_idx] > 0)
1084 pulse_off = s->aw_next_pulse_off_cache;
1085 }
1086 } else
1087 range = 16;
1088 pulse_start = s->aw_n_pulses[block_idx] > 0 ? pulse_off - range / 2 : 0;
1089
1090 /* aw_pulse_set1() already applies pulses around pulse_off (to be exactly,
1091 * in the range of [pulse_off, pulse_off + s->aw_pulse_range], and thus
1092 * we exclude that range from being pulsed again in this function. */
af0a61cc 1093 memset(&use_mask[-2], 0, 2 * sizeof(use_mask[0]));
fa65584f
RB
1094 memset( use_mask, -1, 5 * sizeof(use_mask[0]));
1095 memset(&use_mask[5], 0, 2 * sizeof(use_mask[0]));
1096 if (s->aw_n_pulses[block_idx] > 0)
1097 for (idx = pulse_off; idx < MAX_FRAMESIZE / 2; idx += fcb->pitch_lag) {
1098 int excl_range = s->aw_pulse_range; // always 16 or 24
1099 uint16_t *use_mask_ptr = &use_mask[idx >> 4];
1100 int first_sh = 16 - (idx & 15);
ba3f07d0 1101 *use_mask_ptr++ &= 0xFFFFu << first_sh;
fa65584f
RB
1102 excl_range -= first_sh;
1103 if (excl_range >= 16) {
1104 *use_mask_ptr++ = 0;
1105 *use_mask_ptr &= 0xFFFF >> (excl_range - 16);
1106 } else
1107 *use_mask_ptr &= 0xFFFF >> excl_range;
1108 }
1109
1110 /* find the 'aidx'th offset that is not excluded */
f7ec7f54 1111 aidx = bitstream_read(bc, s->aw_n_pulses[0] > 0 ? 5 - 2 * block_idx : 4);
fa65584f
RB
1112 for (n = 0; n <= aidx; pulse_start++) {
1113 for (idx = pulse_start; idx < 0; idx += fcb->pitch_lag) ;
1114 if (idx >= MAX_FRAMESIZE / 2) { // find from zero
1115 if (use_mask[0]) idx = 0x0F;
1116 else if (use_mask[1]) idx = 0x1F;
1117 else if (use_mask[2]) idx = 0x2F;
1118 else if (use_mask[3]) idx = 0x3F;
1119 else if (use_mask[4]) idx = 0x4F;
d14a26ed 1120 else return -1;
fa65584f
RB
1121 idx -= av_log2_16bit(use_mask[idx >> 4]);
1122 }
1123 if (use_mask[idx >> 4] & (0x8000 >> (idx & 15))) {
1124 use_mask[idx >> 4] &= ~(0x8000 >> (idx & 15));
1125 n++;
1126 start_off = idx;
1127 }
1128 }
1129
1130 fcb->x[fcb->n] = start_off;
f7ec7f54 1131 fcb->y[fcb->n] = bitstream_read_bit(bc) ? -1.0 : 1.0;
fa65584f
RB
1132 fcb->n++;
1133
1134 /* set offset for next block, relative to start of that block */
1135 n = (MAX_FRAMESIZE / 2 - start_off) % fcb->pitch_lag;
1136 s->aw_next_pulse_off_cache = n ? fcb->pitch_lag - n : 0;
d14a26ed 1137 return 0;
fa65584f
RB
1138}
1139
1140/**
1141 * Apply first set of pitch-adaptive window pulses.
1142 * @param s WMA Voice decoding context private data
f7ec7f54 1143 * @param bc bit I/O context
fa65584f
RB
1144 * @param block_idx block index in frame [0, 1]
1145 * @param fcb storage location for fixed codebook pulse info
1146 */
f7ec7f54 1147static void aw_pulse_set1(WMAVoiceContext *s, BitstreamContext *bc,
fa65584f
RB
1148 int block_idx, AMRFixed *fcb)
1149{
f7ec7f54 1150 int val = bitstream_read(bc, 12 - 2 * (s->aw_idx_is_ext && !block_idx));
fa65584f
RB
1151 float v;
1152
1153 if (s->aw_n_pulses[block_idx] > 0) {
1154 int n, v_mask, i_mask, sh, n_pulses;
1155
1156 if (s->aw_pulse_range == 24) { // 3 pulses, 1:sign + 3:index each
1157 n_pulses = 3;
1158 v_mask = 8;
1159 i_mask = 7;
1160 sh = 4;
1161 } else { // 4 pulses, 1:sign + 2:index each
1162 n_pulses = 4;
1163 v_mask = 4;
1164 i_mask = 3;
1165 sh = 3;
1166 }
1167
1168 for (n = n_pulses - 1; n >= 0; n--, val >>= sh) {
1169 fcb->y[fcb->n] = (val & v_mask) ? -1.0 : 1.0;
1170 fcb->x[fcb->n] = (val & i_mask) * n_pulses + n +
1171 s->aw_first_pulse_off[block_idx];
1172 while (fcb->x[fcb->n] < 0)
1173 fcb->x[fcb->n] += fcb->pitch_lag;
1174 if (fcb->x[fcb->n] < MAX_FRAMESIZE / 2)
1175 fcb->n++;
1176 }
1177 } else {
1178 int num2 = (val & 0x1FF) >> 1, delta, idx;
1179
1180 if (num2 < 1 * 79) { delta = 1; idx = num2 + 1; }
1181 else if (num2 < 2 * 78) { delta = 3; idx = num2 + 1 - 1 * 77; }
1182 else if (num2 < 3 * 77) { delta = 5; idx = num2 + 1 - 2 * 76; }
1183 else { delta = 7; idx = num2 + 1 - 3 * 75; }
1184 v = (val & 0x200) ? -1.0 : 1.0;
1185
1186 fcb->no_repeat_mask |= 3 << fcb->n;
1187 fcb->x[fcb->n] = idx - delta;
1188 fcb->y[fcb->n] = v;
1189 fcb->x[fcb->n + 1] = idx;
1190 fcb->y[fcb->n + 1] = (val & 1) ? -v : v;
1191 fcb->n += 2;
1192 }
1193}
1194
1195/**
1196 * @}
1197 *
41ed7ab4 1198 * Generate a random number from frame_cntr and block_idx, which will live
fa65584f
RB
1199 * in the range [0, 1000 - block_size] (so it can be used as an index in a
1200 * table of size 1000 of which you want to read block_size entries).
1201 *
1202 * @param frame_cntr current frame number
1203 * @param block_num current block index
1204 * @param block_size amount of entries we want to read from a table
1205 * that has 1000 entries
32e543f8 1206 * @return a (non-)random number in the [0, 1000 - block_size] range.
fa65584f
RB
1207 */
1208static int pRNG(int frame_cntr, int block_num, int block_size)
1209{
1210 /* array to simplify the calculation of z:
1211 * y = (x % 9) * 5 + 6;
1212 * z = (49995 * x) / y;
1213 * Since y only has 9 values, we can remove the division by using a
1214 * LUT and using FASTDIV-style divisions. For each of the 9 values
1215 * of y, we can rewrite z as:
1216 * z = x * (49995 / y) + x * ((49995 % y) / y)
1217 * In this table, each col represents one possible value of y, the
1218 * first number is 49995 / y, and the second is the FASTDIV variant
1219 * of 49995 % y / y. */
1220 static const unsigned int div_tbl[9][2] = {
1221 { 8332, 3 * 715827883U }, // y = 6
1222 { 4545, 0 * 390451573U }, // y = 11
1223 { 3124, 11 * 268435456U }, // y = 16
1224 { 2380, 15 * 204522253U }, // y = 21
1225 { 1922, 23 * 165191050U }, // y = 26
1226 { 1612, 23 * 138547333U }, // y = 31
1227 { 1388, 27 * 119304648U }, // y = 36
1228 { 1219, 16 * 104755300U }, // y = 41
1229 { 1086, 39 * 93368855U } // y = 46
1230 };
1231 unsigned int z, y, x = MUL16(block_num, 1877) + frame_cntr;
1232 if (x >= 0xFFFF) x -= 0xFFFF; // max value of x is 8*1877+0xFFFE=0x13AA6,
1233 // so this is effectively a modulo (%)
1234 y = x - 9 * MULH(477218589, x); // x % 9
1235 z = (uint16_t) (x * div_tbl[y][0] + UMULH(x, div_tbl[y][1]));
1236 // z = x * 49995 / (y * 5 + 6)
1237 return z % (1000 - block_size);
1238}
1239
1240/**
1241 * Parse hardcoded signal for a single block.
1242 * @note see #synth_block().
1243 */
f7ec7f54 1244static void synth_block_hardcoded(WMAVoiceContext *s, BitstreamContext *bc,
fa65584f
RB
1245 int block_idx, int size,
1246 const struct frame_type_desc *frame_desc,
1247 float *excitation)
1248{
1249 float gain;
1250 int n, r_idx;
1251
1252 assert(size <= MAX_FRAMESIZE);
1253
1254 /* Set the offset from which we start reading wmavoice_std_codebook */
1255 if (frame_desc->fcb_type == FCB_TYPE_SILENCE) {
1256 r_idx = pRNG(s->frame_cntr, block_idx, size);
1257 gain = s->silence_gain;
1258 } else /* FCB_TYPE_HARDCODED */ {
f7ec7f54
AH
1259 r_idx = bitstream_read(bc, 8);
1260 gain = wmavoice_gain_universal[bitstream_read(bc, 6)];
fa65584f
RB
1261 }
1262
1263 /* Clear gain prediction parameters */
1264 memset(s->gain_pred_err, 0, sizeof(s->gain_pred_err));
1265
1266 /* Apply gain to hardcoded codebook and use that as excitation signal */
1267 for (n = 0; n < size; n++)
1268 excitation[n] = wmavoice_std_codebook[r_idx + n] * gain;
1269}
1270
1271/**
1272 * Parse FCB/ACB signal for a single block.
1273 * @note see #synth_block().
1274 */
f7ec7f54 1275static void synth_block_fcb_acb(WMAVoiceContext *s, BitstreamContext *bc,
fa65584f
RB
1276 int block_idx, int size,
1277 int block_pitch_sh2,
1278 const struct frame_type_desc *frame_desc,
1279 float *excitation)
1280{
1281 static const float gain_coeff[6] = {
1282 0.8169, -0.06545, 0.1726, 0.0185, -0.0359, 0.0458
1283 };
1284 float pulses[MAX_FRAMESIZE / 2], pred_err, acb_gain, fcb_gain;
1285 int n, idx, gain_weight;
1286 AMRFixed fcb;
1287
1288 assert(size <= MAX_FRAMESIZE / 2);
1289 memset(pulses, 0, sizeof(*pulses) * size);
1290
1291 fcb.pitch_lag = block_pitch_sh2 >> 2;
1292 fcb.pitch_fac = 1.0;
1293 fcb.no_repeat_mask = 0;
1294 fcb.n = 0;
1295
1296 /* For the other frame types, this is where we apply the innovation
1297 * (fixed) codebook pulses of the speech signal. */
1298 if (frame_desc->fcb_type == FCB_TYPE_AW_PULSES) {
f7ec7f54
AH
1299 aw_pulse_set1(s, bc, block_idx, &fcb);
1300 if (aw_pulse_set2(s, bc, block_idx, &fcb)) {
d14a26ed
LB
1301 /* Conceal the block with silence and return.
1302 * Skip the correct amount of bits to read the next
1303 * block from the correct offset. */
1304 int r_idx = pRNG(s->frame_cntr, block_idx, size);
1305
1306 for (n = 0; n < size; n++)
1307 excitation[n] =
1308 wmavoice_std_codebook[r_idx + n] * s->silence_gain;
f7ec7f54 1309 bitstream_skip(bc, 7 + 1);
d14a26ed
LB
1310 return;
1311 }
fa65584f
RB
1312 } else /* FCB_TYPE_EXC_PULSES */ {
1313 int offset_nbits = 5 - frame_desc->log_n_blocks;
1314
1315 fcb.no_repeat_mask = -1;
1316 /* similar to ff_decode_10_pulses_35bits(), but with single pulses
1317 * (instead of double) for a subset of pulses */
1318 for (n = 0; n < 5; n++) {
1319 float sign;
1320 int pos1, pos2;
1321
f7ec7f54
AH
1322 sign = bitstream_read_bit(bc) ? 1.0 : -1.0;
1323 pos1 = bitstream_read(bc, offset_nbits);
fa65584f
RB
1324 fcb.x[fcb.n] = n + 5 * pos1;
1325 fcb.y[fcb.n++] = sign;
1326 if (n < frame_desc->dbl_pulses) {
f7ec7f54 1327 pos2 = bitstream_read(bc, offset_nbits);
fa65584f
RB
1328 fcb.x[fcb.n] = n + 5 * pos2;
1329 fcb.y[fcb.n++] = (pos1 < pos2) ? -sign : sign;
1330 }
1331 }
1332 }
1333 ff_set_fixed_vector(pulses, &fcb, 1.0, size);
1334
1335 /* Calculate gain for adaptive & fixed codebook signal.
1336 * see ff_amr_set_fixed_gain(). */
f7ec7f54 1337 idx = bitstream_read(bc, 7);
d56668bd
RB
1338 fcb_gain = expf(avpriv_scalarproduct_float_c(s->gain_pred_err,
1339 gain_coeff, 6) -
fa65584f
RB
1340 5.2409161640 + wmavoice_gain_codebook_fcb[idx]);
1341 acb_gain = wmavoice_gain_codebook_acb[idx];
1342 pred_err = av_clipf(wmavoice_gain_codebook_fcb[idx],
1343 -2.9957322736 /* log(0.05) */,
1344 1.6094379124 /* log(5.0) */);
1345
1346 gain_weight = 8 >> frame_desc->log_n_blocks;
1347 memmove(&s->gain_pred_err[gain_weight], s->gain_pred_err,
1348 sizeof(*s->gain_pred_err) * (6 - gain_weight));
1349 for (n = 0; n < gain_weight; n++)
1350 s->gain_pred_err[n] = pred_err;
1351
1352 /* Calculation of adaptive codebook */
1353 if (frame_desc->acb_type == ACB_TYPE_ASYMMETRIC) {
1354 int len;
1355 for (n = 0; n < size; n += len) {
1356 int next_idx_sh16;
1357 int abs_idx = block_idx * size + n;
1358 int pitch_sh16 = (s->last_pitch_val << 16) +
1359 s->pitch_diff_sh16 * abs_idx;
1360 int pitch = (pitch_sh16 + 0x6FFF) >> 16;
1361 int idx_sh16 = ((pitch << 16) - pitch_sh16) * 8 + 0x58000;
1362 idx = idx_sh16 >> 16;
1363 if (s->pitch_diff_sh16) {
1364 if (s->pitch_diff_sh16 > 0) {
1365 next_idx_sh16 = (idx_sh16) &~ 0xFFFF;
1366 } else
1367 next_idx_sh16 = (idx_sh16 + 0x10000) &~ 0xFFFF;
1368 len = av_clip((idx_sh16 - next_idx_sh16) / s->pitch_diff_sh16 / 8,
1369 1, size - n);
1370 } else
1371 len = size;
1372
1373 ff_acelp_interpolatef(&excitation[n], &excitation[n - pitch],
1374 wmavoice_ipol1_coeffs, 17,
1375 idx, 9, len);
1376 }
1377 } else /* ACB_TYPE_HAMMING */ {
1378 int block_pitch = block_pitch_sh2 >> 2;
1379 idx = block_pitch_sh2 & 3;
1380 if (idx) {
1381 ff_acelp_interpolatef(excitation, &excitation[-block_pitch],
1382 wmavoice_ipol2_coeffs, 4,
1383 idx, 8, size);
1384 } else
25fe8630 1385 av_memcpy_backptr((uint8_t *) excitation, sizeof(float) * block_pitch,
fa65584f
RB
1386 sizeof(float) * size);
1387 }
1388
1389 /* Interpolate ACB/FCB and use as excitation signal */
1390 ff_weighted_vector_sumf(excitation, excitation, pulses,
1391 acb_gain, fcb_gain, size);
1392}
1393
1394/**
1395 * Parse data in a single block.
1396 * @note we assume enough bits are available, caller should check.
1397 *
1398 * @param s WMA Voice decoding context private data
f7ec7f54 1399 * @param bc bit I/O context
fa65584f
RB
1400 * @param block_idx index of the to-be-read block
1401 * @param size amount of samples to be read in this block
1402 * @param block_pitch_sh2 pitch for this block << 2
1403 * @param lsps LSPs for (the end of) this frame
1404 * @param prev_lsps LSPs for the last frame
1405 * @param frame_desc frame type descriptor
1406 * @param excitation target memory for the ACB+FCB interpolated signal
1407 * @param synth target memory for the speech synthesis filter output
1408 * @return 0 on success, <0 on error.
1409 */
f7ec7f54 1410static void synth_block(WMAVoiceContext *s, BitstreamContext *bc,
fa65584f
RB
1411 int block_idx, int size,
1412 int block_pitch_sh2,
1413 const double *lsps, const double *prev_lsps,
1414 const struct frame_type_desc *frame_desc,
1415 float *excitation, float *synth)
1416{
1417 double i_lsps[MAX_LSPS];
1418 float lpcs[MAX_LSPS];
1419 float fac;
1420 int n;
1421
1422 if (frame_desc->acb_type == ACB_TYPE_NONE)
f7ec7f54 1423 synth_block_hardcoded(s, bc, block_idx, size, frame_desc, excitation);
fa65584f 1424 else
f7ec7f54 1425 synth_block_fcb_acb(s, bc, block_idx, size, block_pitch_sh2,
fa65584f
RB
1426 frame_desc, excitation);
1427
1428 /* convert interpolated LSPs to LPCs */
1429 fac = (block_idx + 0.5) / frame_desc->n_blocks;
1430 for (n = 0; n < s->lsps; n++) // LSF -> LSP
1431 i_lsps[n] = cos(prev_lsps[n] + fac * (lsps[n] - prev_lsps[n]));
1432 ff_acelp_lspd2lpc(i_lsps, lpcs, s->lsps >> 1);
1433
1434 /* Speech synthesis */
1435 ff_celp_lp_synthesis_filterf(synth, lpcs, excitation, size, s->lsps);
1436}
1437
1438/**
1439 * Synthesize output samples for a single frame.
1440 * @note we assume enough bits are available, caller should check.
1441 *
1442 * @param ctx WMA Voice decoder context
f7ec7f54 1443 * @param bc bit I/O context (s->bc or one for cross-packet superframes)
9a32573b 1444 * @param frame_idx Frame number within superframe [0-2]
fa65584f
RB
1445 * @param samples pointer to output sample buffer, has space for at least 160
1446 * samples
1447 * @param lsps LSP array
1448 * @param prev_lsps array of previous frame's LSPs
1449 * @param excitation target buffer for excitation signal
1450 * @param synth target buffer for synthesized speech data
1451 * @return 0 on success, <0 on error.
1452 */
f7ec7f54
AH
1453static int synth_frame(AVCodecContext *ctx, BitstreamContext *bc,
1454 int frame_idx, float *samples,
fa65584f
RB
1455 const double *lsps, const double *prev_lsps,
1456 float *excitation, float *synth)
1457{
1458 WMAVoiceContext *s = ctx->priv_data;
1459 int n, n_blocks_x2, log_n_blocks_x2, cur_pitch_val;
1460 int pitch[MAX_BLOCKS], last_block_pitch;
1461
1462 /* Parse frame type ("frame header"), see frame_descs */
f7ec7f54 1463 int bd_idx = s->vbm_tree[bitstream_read_vlc(bc, frame_type_vlc.table, 6, 3)], block_nsamples;
fa65584f
RB
1464
1465 if (bd_idx < 0) {
1466 av_log(ctx, AV_LOG_ERROR,
1467 "Invalid frame type VLC code, skipping\n");
04e9853a 1468 return AVERROR_INVALIDDATA;
fa65584f
RB
1469 }
1470
26219644
RB
1471 block_nsamples = MAX_FRAMESIZE / frame_descs[bd_idx].n_blocks;
1472
fa65584f
RB
1473 /* Pitch calculation for ACB_TYPE_ASYMMETRIC ("pitch-per-frame") */
1474 if (frame_descs[bd_idx].acb_type == ACB_TYPE_ASYMMETRIC) {
1475 /* Pitch is provided per frame, which is interpreted as the pitch of
1476 * the last sample of the last block of this frame. We can interpolate
1477 * the pitch of other blocks (and even pitch-per-sample) by gradually
1478 * incrementing/decrementing prev_frame_pitch to cur_pitch_val. */
1479 n_blocks_x2 = frame_descs[bd_idx].n_blocks << 1;
1480 log_n_blocks_x2 = frame_descs[bd_idx].log_n_blocks + 1;
f7ec7f54 1481 cur_pitch_val = s->min_pitch_val + bitstream_read(bc, s->pitch_nbits);
fa65584f
RB
1482 cur_pitch_val = FFMIN(cur_pitch_val, s->max_pitch_val - 1);
1483 if (s->last_acb_type == ACB_TYPE_NONE ||
1484 20 * abs(cur_pitch_val - s->last_pitch_val) >
1485 (cur_pitch_val + s->last_pitch_val))
1486 s->last_pitch_val = cur_pitch_val;
1487
1488 /* pitch per block */
1489 for (n = 0; n < frame_descs[bd_idx].n_blocks; n++) {
1490 int fac = n * 2 + 1;
1491
1492 pitch[n] = (MUL16(fac, cur_pitch_val) +
1493 MUL16((n_blocks_x2 - fac), s->last_pitch_val) +
1494 frame_descs[bd_idx].n_blocks) >> log_n_blocks_x2;
1495 }
1496
1497 /* "pitch-diff-per-sample" for calculation of pitch per sample */
1498 s->pitch_diff_sh16 =
1499 ((cur_pitch_val - s->last_pitch_val) << 16) / MAX_FRAMESIZE;
1500 }
1501
1502 /* Global gain (if silence) and pitch-adaptive window coordinates */
1503 switch (frame_descs[bd_idx].fcb_type) {
1504 case FCB_TYPE_SILENCE:
f7ec7f54 1505 s->silence_gain = wmavoice_gain_silence[bitstream_read(bc, 8)];
fa65584f
RB
1506 break;
1507 case FCB_TYPE_AW_PULSES:
f7ec7f54 1508 aw_parse_coords(s, bc, pitch);
fa65584f
RB
1509 break;
1510 }
1511
1512 for (n = 0; n < frame_descs[bd_idx].n_blocks; n++) {
1513 int bl_pitch_sh2;
1514
1515 /* Pitch calculation for ACB_TYPE_HAMMING ("pitch-per-block") */
1516 switch (frame_descs[bd_idx].acb_type) {
1517 case ACB_TYPE_HAMMING: {
1518 /* Pitch is given per block. Per-block pitches are encoded as an
1519 * absolute value for the first block, and then delta values
1520 * relative to this value) for all subsequent blocks. The scale of
41ed7ab4 1521 * this pitch value is semi-logarithmic compared to its use in the
fa65584f
RB
1522 * decoder, so we convert it to normal scale also. */
1523 int block_pitch,
1524 t1 = (s->block_conv_table[1] - s->block_conv_table[0]) << 2,
1525 t2 = (s->block_conv_table[2] - s->block_conv_table[1]) << 1,
1526 t3 = s->block_conv_table[3] - s->block_conv_table[2] + 1;
1527
1528 if (n == 0) {
f7ec7f54 1529 block_pitch = bitstream_read(bc, s->block_pitch_nbits);
fa65584f
RB
1530 } else
1531 block_pitch = last_block_pitch - s->block_delta_pitch_hrange +
f7ec7f54 1532 bitstream_read(bc, s->block_delta_pitch_nbits);
fa65584f
RB
1533 /* Convert last_ so that any next delta is within _range */
1534 last_block_pitch = av_clip(block_pitch,
1535 s->block_delta_pitch_hrange,
1536 s->block_pitch_range -
1537 s->block_delta_pitch_hrange);
1538
1539 /* Convert semi-log-style scale back to normal scale */
1540 if (block_pitch < t1) {
1541 bl_pitch_sh2 = (s->block_conv_table[0] << 2) + block_pitch;
1542 } else {
1543 block_pitch -= t1;
1544 if (block_pitch < t2) {
1545 bl_pitch_sh2 =
1546 (s->block_conv_table[1] << 2) + (block_pitch << 1);
1547 } else {
1548 block_pitch -= t2;
1549 if (block_pitch < t3) {
1550 bl_pitch_sh2 =
1551 (s->block_conv_table[2] + block_pitch) << 2;
1552 } else
1553 bl_pitch_sh2 = s->block_conv_table[3] << 2;
1554 }
1555 }
1556 pitch[n] = bl_pitch_sh2 >> 2;
1557 break;
1558 }
1559
1560 case ACB_TYPE_ASYMMETRIC: {
1561 bl_pitch_sh2 = pitch[n] << 2;
1562 break;
1563 }
1564
1565 default: // ACB_TYPE_NONE has no pitch
1566 bl_pitch_sh2 = 0;
1567 break;
1568 }
1569
f7ec7f54 1570 synth_block(s, bc, n, block_nsamples, bl_pitch_sh2,
fa65584f
RB
1571 lsps, prev_lsps, &frame_descs[bd_idx],
1572 &excitation[n * block_nsamples],
1573 &synth[n * block_nsamples]);
1574 }
1575
1576 /* Averaging projection filter, if applicable. Else, just copy samples
1577 * from synthesis buffer */
1578 if (s->do_apf) {
9a32573b
RB
1579 double i_lsps[MAX_LSPS];
1580 float lpcs[MAX_LSPS];
1581
1582 for (n = 0; n < s->lsps; n++) // LSF -> LSP
1583 i_lsps[n] = cos(0.5 * (prev_lsps[n] + lsps[n]));
1584 ff_acelp_lspd2lpc(i_lsps, lpcs, s->lsps >> 1);
1585 postfilter(s, synth, samples, 80, lpcs,
1586 &s->zero_exc_pf[s->history_nsamples + MAX_FRAMESIZE * frame_idx],
1587 frame_descs[bd_idx].fcb_type, pitch[0]);
1588
1589 for (n = 0; n < s->lsps; n++) // LSF -> LSP
1590 i_lsps[n] = cos(lsps[n]);
1591 ff_acelp_lspd2lpc(i_lsps, lpcs, s->lsps >> 1);
1592 postfilter(s, &synth[80], &samples[80], 80, lpcs,
1593 &s->zero_exc_pf[s->history_nsamples + MAX_FRAMESIZE * frame_idx + 80],
1594 frame_descs[bd_idx].fcb_type, pitch[0]);
1595 } else
b1078e9f 1596 memcpy(samples, synth, 160 * sizeof(synth[0]));
fa65584f
RB
1597
1598 /* Cache values for next frame */
1599 s->frame_cntr++;
1600 if (s->frame_cntr >= 0xFFFF) s->frame_cntr -= 0xFFFF; // i.e. modulo (%)
1601 s->last_acb_type = frame_descs[bd_idx].acb_type;
1602 switch (frame_descs[bd_idx].acb_type) {
1603 case ACB_TYPE_NONE:
1604 s->last_pitch_val = 0;
1605 break;
1606 case ACB_TYPE_ASYMMETRIC:
1607 s->last_pitch_val = cur_pitch_val;
1608 break;
1609 case ACB_TYPE_HAMMING:
1610 s->last_pitch_val = pitch[frame_descs[bd_idx].n_blocks - 1];
1611 break;
1612 }
1613
1614 return 0;
1615}
1616
1617/**
1618 * Ensure minimum value for first item, maximum value for last value,
1619 * proper spacing between each value and proper ordering.
1620 *
1621 * @param lsps array of LSPs
1622 * @param num size of LSP array
1623 *
1624 * @note basically a double version of #ff_acelp_reorder_lsf(), might be
1625 * useful to put in a generic location later on. Parts are also
1626 * present in #ff_set_min_dist_lsf() + #ff_sort_nearly_sorted_floats(),
1627 * which is in float.
1628 */
1629static void stabilize_lsps(double *lsps, int num)
1630{
1631 int n, m, l;
1632
1633 /* set minimum value for first, maximum value for last and minimum
1634 * spacing between LSF values.
1635 * Very similar to ff_set_min_dist_lsf(), but in double. */
1636 lsps[0] = FFMAX(lsps[0], 0.0015 * M_PI);
1637 for (n = 1; n < num; n++)
1638 lsps[n] = FFMAX(lsps[n], lsps[n - 1] + 0.0125 * M_PI);
1639 lsps[num - 1] = FFMIN(lsps[num - 1], 0.9985 * M_PI);
1640
1641 /* reorder (looks like one-time / non-recursed bubblesort).
1642 * Very similar to ff_sort_nearly_sorted_floats(), but in double. */
1643 for (n = 1; n < num; n++) {
1644 if (lsps[n] < lsps[n - 1]) {
1645 for (m = 1; m < num; m++) {
1646 double tmp = lsps[m];
1647 for (l = m - 1; l >= 0; l--) {
1648 if (lsps[l] <= tmp) break;
1649 lsps[l + 1] = lsps[l];
1650 }
1651 lsps[l + 1] = tmp;
1652 }
1653 break;
1654 }
1655 }
1656}
1657
1658/**
1659 * Test if there's enough bits to read 1 superframe.
1660 *
f7ec7f54 1661 * @param orig_bc bit I/O context used for reading. This function
fa65584f
RB
1662 * does not modify the state of the bitreader; it
1663 * only uses it to copy the current stream position
1664 * @param s WMA Voice decoding context private data
04e9853a 1665 * @return < 0 on error, 1 on not enough bits or 0 if OK.
fa65584f 1666 */
f7ec7f54 1667static int check_bits_for_superframe(BitstreamContext *orig_bc,
fa65584f
RB
1668 WMAVoiceContext *s)
1669{
f7ec7f54 1670 BitstreamContext s_bc, *bc = &s_bc;
fa65584f
RB
1671 int n, need_bits, bd_idx;
1672 const struct frame_type_desc *frame_desc;
1673
1674 /* initialize a copy */
f7ec7f54 1675 *bc = *orig_bc;
fa65584f
RB
1676
1677 /* superframe header */
f7ec7f54 1678 if (bitstream_bits_left(bc) < 14)
fa65584f 1679 return 1;
f7ec7f54 1680 if (!bitstream_read_bit(bc))
04e9853a 1681 return AVERROR(ENOSYS); // WMAPro-in-WMAVoice superframe
f7ec7f54 1682 if (bitstream_read_bit(bc)) bitstream_skip(bc, 12); // number of samples in superframe
fa65584f 1683 if (s->has_residual_lsps) { // residual LSPs (for all frames)
f7ec7f54 1684 if (bitstream_bits_left(bc) < s->sframe_lsp_bitsize)
fa65584f 1685 return 1;
f7ec7f54 1686 bitstream_skip(bc, s->sframe_lsp_bitsize);
fa65584f
RB
1687 }
1688
1689 /* frames */
1690 for (n = 0; n < MAX_FRAMES; n++) {
1691 int aw_idx_is_ext = 0;
1692
1693 if (!s->has_residual_lsps) { // independent LSPs (per-frame)
f7ec7f54
AH
1694 if (bitstream_bits_left(bc) < s->frame_lsp_bitsize)
1695 return 1;
1696 bitstream_skip(bc, s->frame_lsp_bitsize);
fa65584f 1697 }
f7ec7f54 1698 bd_idx = s->vbm_tree[bitstream_read_vlc(bc, frame_type_vlc.table, 6, 3)];
fa65584f 1699 if (bd_idx < 0)
04e9853a 1700 return AVERROR_INVALIDDATA; // invalid frame type VLC code
fa65584f
RB
1701 frame_desc = &frame_descs[bd_idx];
1702 if (frame_desc->acb_type == ACB_TYPE_ASYMMETRIC) {
f7ec7f54 1703 if (bitstream_bits_left(bc) < s->pitch_nbits)
fa65584f 1704 return 1;
f7ec7f54 1705 bitstream_skip(bc, s->pitch_nbits);
fa65584f
RB
1706 }
1707 if (frame_desc->fcb_type == FCB_TYPE_SILENCE) {
f7ec7f54 1708 bitstream_skip(bc, 8);
fa65584f 1709 } else if (frame_desc->fcb_type == FCB_TYPE_AW_PULSES) {
f7ec7f54 1710 int tmp = bitstream_read(bc, 6);
fa65584f 1711 if (tmp >= 0x36) {
f7ec7f54 1712 bitstream_skip(bc, 2);
fa65584f
RB
1713 aw_idx_is_ext = 1;
1714 }
1715 }
1716
1717 /* blocks */
1718 if (frame_desc->acb_type == ACB_TYPE_HAMMING) {
1719 need_bits = s->block_pitch_nbits +
1720 (frame_desc->n_blocks - 1) * s->block_delta_pitch_nbits;
1721 } else if (frame_desc->fcb_type == FCB_TYPE_AW_PULSES) {
1722 need_bits = 2 * !aw_idx_is_ext;
1723 } else
1724 need_bits = 0;
1725 need_bits += frame_desc->frame_size;
f7ec7f54 1726 if (bitstream_bits_left(bc) < need_bits)
fa65584f 1727 return 1;
f7ec7f54 1728 bitstream_skip(bc, need_bits);
fa65584f
RB
1729 }
1730
1731 return 0;
1732}
1733
1734/**
1735 * Synthesize output samples for a single superframe. If we have any data
1736 * cached in s->sframe_cache, that will be used instead of whatever is loaded
f7ec7f54 1737 * in s->bc.
fa65584f
RB
1738 *
1739 * WMA Voice superframes contain 3 frames, each containing 160 audio samples,
1740 * to give a total of 480 samples per frame. See #synth_frame() for frame
1741 * parsing. In addition to 3 frames, superframes can also contain the LSPs
1742 * (if these are globally specified for all frames (residually); they can
1743 * also be specified individually per-frame. See the s->has_residual_lsps
1744 * option), and can specify the number of samples encoded in this superframe
1745 * (if less than 480), usually used to prevent blanks at track boundaries.
1746 *
1747 * @param ctx WMA Voice decoder context
fa65584f
RB
1748 * @return 0 on success, <0 on error or 1 if there was not enough data to
1749 * fully parse the superframe
1750 */
5a728882
JR
1751static int synth_superframe(AVCodecContext *ctx, AVFrame *frame,
1752 int *got_frame_ptr)
fa65584f
RB
1753{
1754 WMAVoiceContext *s = ctx->priv_data;
f7ec7f54 1755 BitstreamContext *bc = &s->bc, s_bc;
0eea2129 1756 int n, res, n_samples = 480;
fa65584f
RB
1757 double lsps[MAX_FRAMES][MAX_LSPS];
1758 const double *mean_lsf = s->lsps == 16 ?
1759 wmavoice_mean_lsf16[s->lsp_def_mode] : wmavoice_mean_lsf10[s->lsp_def_mode];
1760 float excitation[MAX_SIGNAL_HISTORY + MAX_SFRAMESIZE + 12];
1761 float synth[MAX_LSPS + MAX_SFRAMESIZE];
0eea2129 1762 float *samples;
fa65584f
RB
1763
1764 memcpy(synth, s->synth_history,
1765 s->lsps * sizeof(*synth));
1766 memcpy(excitation, s->excitation_history,
1767 s->history_nsamples * sizeof(*excitation));
1768
1769 if (s->sframe_cache_size > 0) {
f7ec7f54
AH
1770 bc = &s_bc;
1771 bitstream_init(bc, s->sframe_cache, s->sframe_cache_size);
fa65584f
RB
1772 s->sframe_cache_size = 0;
1773 }
1774
f7ec7f54 1775 if ((res = check_bits_for_superframe(bc, s)) == 1) {
0eea2129 1776 *got_frame_ptr = 0;
d0640765 1777 return 1;
04e9853a
LB
1778 } else if (res < 0)
1779 return res;
fa65584f
RB
1780
1781 /* First bit is speech/music bit, it differentiates between WMAVoice
1782 * speech samples (the actual codec) and WMAVoice music samples, which
1783 * are really WMAPro-in-WMAVoice-superframes. I've never seen those in
1784 * the wild yet. */
f7ec7f54 1785 if (!bitstream_read_bit(bc)) {
12e25ed2 1786 avpriv_request_sample(ctx, "WMAPro-in-WMAVoice");
717addec 1787 return AVERROR_PATCHWELCOME;
fa65584f
RB
1788 }
1789
1790 /* (optional) nr. of samples in superframe; always <= 480 and >= 0 */
f7ec7f54
AH
1791 if (bitstream_read_bit(bc)) {
1792 if ((n_samples = bitstream_read(bc, 12)) > 480) {
fa65584f
RB
1793 av_log(ctx, AV_LOG_ERROR,
1794 "Superframe encodes >480 samples (%d), not allowed\n",
1795 n_samples);
04e9853a 1796 return AVERROR_INVALIDDATA;
fa65584f
RB
1797 }
1798 }
1799 /* Parse LSPs, if global for the superframe (can also be per-frame). */
1800 if (s->has_residual_lsps) {
1801 double prev_lsps[MAX_LSPS], a1[MAX_LSPS * 2], a2[MAX_LSPS * 2];
1802
1803 for (n = 0; n < s->lsps; n++)
1804 prev_lsps[n] = s->prev_lsps[n] - mean_lsf[n];
1805
1806 if (s->lsps == 10) {
f7ec7f54 1807 dequant_lsp10r(bc, lsps[2], prev_lsps, a1, a2, s->lsp_q_mode);
fa65584f 1808 } else /* s->lsps == 16 */
f7ec7f54 1809 dequant_lsp16r(bc, lsps[2], prev_lsps, a1, a2, s->lsp_q_mode);
fa65584f
RB
1810
1811 for (n = 0; n < s->lsps; n++) {
1812 lsps[0][n] = mean_lsf[n] + (a1[n] - a2[n * 2]);
1813 lsps[1][n] = mean_lsf[n] + (a1[s->lsps + n] - a2[n * 2 + 1]);
1814 lsps[2][n] += mean_lsf[n];
1815 }
1816 for (n = 0; n < 3; n++)
1817 stabilize_lsps(lsps[n], s->lsps);
1818 }
1819
0eea2129 1820 /* get output buffer */
5a728882 1821 frame->nb_samples = 480;
759001c5 1822 if ((res = ff_get_buffer(ctx, frame, 0)) < 0) {
0eea2129
JR
1823 av_log(ctx, AV_LOG_ERROR, "get_buffer() failed\n");
1824 return res;
813907d4 1825 }
5a728882
JR
1826 frame->nb_samples = n_samples;
1827 samples = (float *)frame->data[0];
813907d4 1828
da9cea77 1829 /* Parse frames, optionally preceded by per-frame (independent) LSPs. */
fa65584f
RB
1830 for (n = 0; n < 3; n++) {
1831 if (!s->has_residual_lsps) {
1832 int m;
1833
1834 if (s->lsps == 10) {
f7ec7f54 1835 dequant_lsp10i(bc, lsps[n]);
fa65584f 1836 } else /* s->lsps == 16 */
f7ec7f54 1837 dequant_lsp16i(bc, lsps[n]);
fa65584f
RB
1838
1839 for (m = 0; m < s->lsps; m++)
1840 lsps[n][m] += mean_lsf[m];
1841 stabilize_lsps(lsps[n], s->lsps);
1842 }
1843
f7ec7f54 1844 if ((res = synth_frame(ctx, bc, n,
fa65584f
RB
1845 &samples[n * MAX_FRAMESIZE],
1846 lsps[n], n == 0 ? s->prev_lsps : lsps[n - 1],
1847 &excitation[s->history_nsamples + n * MAX_FRAMESIZE],
d0640765 1848 &synth[s->lsps + n * MAX_FRAMESIZE]))) {
0eea2129 1849 *got_frame_ptr = 0;
fa65584f 1850 return res;
d0640765 1851 }
fa65584f
RB
1852 }
1853
1854 /* Statistics? FIXME - we don't check for length, a slight overrun
1855 * will be caught by internal buffer padding, and anything else
1856 * will be skipped, not read. */
f7ec7f54
AH
1857 if (bitstream_read_bit(bc)) {
1858 res = bitstream_read(bc, 4);
1859 bitstream_skip(bc, 10 * (res + 1));
fa65584f
RB
1860 }
1861
0eea2129 1862 *got_frame_ptr = 1;
fa65584f
RB
1863
1864 /* Update history */
1865 memcpy(s->prev_lsps, lsps[2],
1866 s->lsps * sizeof(*s->prev_lsps));
1867 memcpy(s->synth_history, &synth[MAX_SFRAMESIZE],
1868 s->lsps * sizeof(*synth));
1869 memcpy(s->excitation_history, &excitation[MAX_SFRAMESIZE],
1870 s->history_nsamples * sizeof(*excitation));
9a32573b
RB
1871 if (s->do_apf)
1872 memmove(s->zero_exc_pf, &s->zero_exc_pf[MAX_SFRAMESIZE],
1873 s->history_nsamples * sizeof(*s->zero_exc_pf));
fa65584f
RB
1874
1875 return 0;
1876}
1877
1878/**
1879 * Parse the packet header at the start of each packet (input data to this
1880 * decoder).
1881 *
1882 * @param s WMA Voice decoding context private data
32e543f8 1883 * @return 1 if not enough bits were available, or 0 on success.
fa65584f
RB
1884 */
1885static int parse_packet_header(WMAVoiceContext *s)
1886{
f7ec7f54 1887 BitstreamContext *bc = &s->bc;
fa65584f
RB
1888 unsigned int res;
1889
f7ec7f54 1890 if (bitstream_bits_left(bc) < 11)
fa65584f 1891 return 1;
f7ec7f54
AH
1892 bitstream_skip(bc, 4); // packet sequence number
1893 s->has_residual_lsps = bitstream_read_bit(bc);
fa65584f 1894 do {
f7ec7f54
AH
1895 res = bitstream_read(bc, 6); // number of superframes per packet
1896 // (minus first one if there is spillover)
1897 if (bitstream_bits_left(bc) < 6 * (res == 0x3F) + s->spillover_bitsize)
fa65584f
RB
1898 return 1;
1899 } while (res == 0x3F);
f7ec7f54 1900 s->spillover_nbits = bitstream_read(bc, s->spillover_bitsize);
fa65584f
RB
1901
1902 return 0;
1903}
1904
1905/**
f7ec7f54 1906 * Copy (unaligned) bits from bc/data/size to pb.
fa65584f
RB
1907 *
1908 * @param pb target buffer to copy bits into
1909 * @param data source buffer to copy bits from
1910 * @param size size of the source data, in bytes
f7ec7f54 1911 * @param bc bit I/O context specifying the current position in the source.
fa65584f 1912 * data. This function might use this to align the bit position to
9f51c682 1913 * a whole-byte boundary before calling #avpriv_copy_bits() on aligned
fa65584f
RB
1914 * source data
1915 * @param nbits the amount of bits to copy from source to target
1916 *
1917 * @note after calling this function, the current position in the input bit
1918 * I/O context is undefined.
1919 */
1920static void copy_bits(PutBitContext *pb,
1921 const uint8_t *data, int size,
f7ec7f54 1922 BitstreamContext *bc, int nbits)
fa65584f
RB
1923{
1924 int rmn_bytes, rmn_bits;
1925
f7ec7f54 1926 rmn_bits = rmn_bytes = bitstream_bits_left(bc);
fa65584f
RB
1927 if (rmn_bits < nbits)
1928 return;
1c1449b5
LA
1929 if (nbits > pb->size_in_bits - put_bits_count(pb))
1930 return;
fa65584f
RB
1931 rmn_bits &= 7; rmn_bytes >>= 3;
1932 if ((rmn_bits = FFMIN(rmn_bits, nbits)) > 0)
f7ec7f54 1933 put_bits(pb, rmn_bits, bitstream_read(bc, rmn_bits));
9f51c682 1934 avpriv_copy_bits(pb, data + size - rmn_bytes,
fa65584f
RB
1935 FFMIN(nbits - rmn_bits, rmn_bytes << 3));
1936}
1937
1938/**
1939 * Packet decoding: a packet is anything that the (ASF) demuxer contains,
1940 * and we expect that the demuxer / application provides it to us as such
1941 * (else you'll probably get garbage as output). Every packet has a size of
1942 * ctx->block_align bytes, starts with a packet header (see
1943 * #parse_packet_header()), and then a series of superframes. Superframe
1944 * boundaries may exceed packets, i.e. superframes can split data over
1945 * multiple (two) packets.
1946 *
1947 * For more information about frames, see #synth_superframe().
1948 */
1949static int wmavoice_decode_packet(AVCodecContext *ctx, void *data,
0eea2129 1950 int *got_frame_ptr, AVPacket *avpkt)
fa65584f
RB
1951{
1952 WMAVoiceContext *s = ctx->priv_data;
f7ec7f54 1953 BitstreamContext *bc = &s->bc;
fa65584f
RB
1954 int size, res, pos;
1955
fa65584f 1956 /* Packets are sometimes a multiple of ctx->block_align, with a packet
6001dad6 1957 * header at each ctx->block_align bytes. However, Libav's ASF demuxer
fa65584f
RB
1958 * feeds us ASF packets, which may concatenate multiple "codec" packets
1959 * in a single "muxer" packet, so we artificially emulate that by
1960 * capping the packet size at ctx->block_align. */
1961 for (size = avpkt->size; size > ctx->block_align; size -= ctx->block_align);
d0640765 1962 if (!size) {
0eea2129 1963 *got_frame_ptr = 0;
fa65584f 1964 return 0;
d0640765 1965 }
f7ec7f54 1966 bitstream_init8(&s->bc, avpkt->data, size);
fa65584f
RB
1967
1968 /* size == ctx->block_align is used to indicate whether we are dealing with
1969 * a new packet or a packet of which we already read the packet header
1970 * previously. */
1971 if (size == ctx->block_align) { // new packet header
1972 if ((res = parse_packet_header(s)) < 0)
1973 return res;
1974
1975 /* If the packet header specifies a s->spillover_nbits, then we want
1976 * to push out all data of the previous packet (+ spillover) before
1977 * continuing to parse new superframes in the current packet. */
1978 if (s->spillover_nbits > 0) {
1979 if (s->sframe_cache_size > 0) {
f7ec7f54
AH
1980 int cnt = bitstream_tell(bc);
1981 copy_bits(&s->pb, avpkt->data, size, bc, s->spillover_nbits);
fa65584f
RB
1982 flush_put_bits(&s->pb);
1983 s->sframe_cache_size += s->spillover_nbits;
5a728882 1984 if ((res = synth_superframe(ctx, data, got_frame_ptr)) == 0 &&
0eea2129 1985 *got_frame_ptr) {
fa65584f
RB
1986 cnt += s->spillover_nbits;
1987 s->skip_bits_next = cnt & 7;
1988 return cnt >> 3;
1989 } else
f7ec7f54
AH
1990 bitstream_skip (bc, s->spillover_nbits - cnt +
1991 bitstream_tell(bc)); // resync
fa65584f 1992 } else
f7ec7f54 1993 bitstream_skip(bc, s->spillover_nbits); // resync
fa65584f
RB
1994 }
1995 } else if (s->skip_bits_next)
f7ec7f54 1996 bitstream_skip(bc, s->skip_bits_next);
fa65584f
RB
1997
1998 /* Try parsing superframes in current packet */
1999 s->sframe_cache_size = 0;
2000 s->skip_bits_next = 0;
f7ec7f54 2001 pos = bitstream_bits_left(bc);
5a728882 2002 if ((res = synth_superframe(ctx, data, got_frame_ptr)) < 0) {
fa65584f 2003 return res;
0eea2129 2004 } else if (*got_frame_ptr) {
f7ec7f54 2005 int cnt = bitstream_tell(bc);
fa65584f
RB
2006 s->skip_bits_next = cnt & 7;
2007 return cnt >> 3;
2008 } else if ((s->sframe_cache_size = pos) > 0) {
2009 /* rewind bit reader to start of last (incomplete) superframe... */
f7ec7f54
AH
2010 bitstream_init8(bc, avpkt->data, size);
2011 bitstream_skip(bc, (size << 3) - pos);
2012 assert(bitstream_bits_left(bc) == pos);
fa65584f
RB
2013
2014 /* ...and cache it for spillover in next packet */
2015 init_put_bits(&s->pb, s->sframe_cache, SFRAME_CACHE_MAXSIZE);
f7ec7f54 2016 copy_bits(&s->pb, avpkt->data, size, bc, s->sframe_cache_size);
fa65584f
RB
2017 // FIXME bad - just copy bytes as whole and add use the
2018 // skip_bits_next field
2019 }
2020
2021 return size;
2022}
2023
9a32573b
RB
2024static av_cold int wmavoice_decode_end(AVCodecContext *ctx)
2025{
2026 WMAVoiceContext *s = ctx->priv_data;
2027
2028 if (s->do_apf) {
2029 ff_rdft_end(&s->rdft);
2030 ff_rdft_end(&s->irdft);
2031 ff_dct_end(&s->dct);
2032 ff_dct_end(&s->dst);
2033 }
2034
2035 return 0;
2036}
2037
fa65584f
RB
2038static av_cold void wmavoice_flush(AVCodecContext *ctx)
2039{
2040 WMAVoiceContext *s = ctx->priv_data;
2041 int n;
2042
9a32573b 2043 s->postfilter_agc = 0;
fa65584f
RB
2044 s->sframe_cache_size = 0;
2045 s->skip_bits_next = 0;
2046 for (n = 0; n < s->lsps; n++)
2047 s->prev_lsps[n] = M_PI * (n + 1.0) / (s->lsps + 1.0);
2048 memset(s->excitation_history, 0,
2049 sizeof(*s->excitation_history) * MAX_SIGNAL_HISTORY);
2050 memset(s->synth_history, 0,
2051 sizeof(*s->synth_history) * MAX_LSPS);
2052 memset(s->gain_pred_err, 0,
2053 sizeof(s->gain_pred_err));
9a32573b
RB
2054
2055 if (s->do_apf) {
2056 memset(&s->synth_filter_out_buf[MAX_LSPS_ALIGN16 - s->lsps], 0,
2057 sizeof(*s->synth_filter_out_buf) * s->lsps);
2058 memset(s->dcf_mem, 0,
2059 sizeof(*s->dcf_mem) * 2);
2060 memset(s->zero_exc_pf, 0,
2061 sizeof(*s->zero_exc_pf) * s->history_nsamples);
2062 memset(s->denoise_filter_cache, 0, sizeof(s->denoise_filter_cache));
2063 }
fa65584f
RB
2064}
2065
d36beb3f 2066AVCodec ff_wmavoice_decoder = {
f5c48f5a 2067 .name = "wmavoice",
b2bed932 2068 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Audio Voice"),
f5c48f5a
LB
2069 .type = AVMEDIA_TYPE_AUDIO,
2070 .id = AV_CODEC_ID_WMAVOICE,
2071 .priv_data_size = sizeof(WMAVoiceContext),
2072 .init = wmavoice_decode_init,
2073 .init_static_data = wmavoice_init_static_data,
2074 .close = wmavoice_decode_end,
2075 .decode = wmavoice_decode_packet,
def97856 2076 .capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DR1,
f5c48f5a 2077 .flush = wmavoice_flush,
fa65584f 2078};