spelling
[libav.git] / libavcodec / apedec.c
CommitLineData
bf4a1f17
KS
1/*
2 * Monkey's Audio lossless audio decoder
3 * Copyright (c) 2007 Benjamin Zores <ben@geexbox.org>
4 * based upon libdemac from Dave Chapman.
5 *
6 * This file is part of FFmpeg.
7 *
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23#define ALT_BITSTREAM_READER_LE
24#include "avcodec.h"
25#include "dsputil.h"
26#include "bitstream.h"
27#include "bytestream.h"
28
29/**
30 * @file apedec.c
31 * Monkey's Audio lossless audio decoder
32 */
33
34#define BLOCKS_PER_LOOP 4608
35#define MAX_CHANNELS 2
36#define MAX_BYTESPERSAMPLE 3
37
38#define APE_FRAMECODE_MONO_SILENCE 1
39#define APE_FRAMECODE_STEREO_SILENCE 3
40#define APE_FRAMECODE_PSEUDO_STEREO 4
41
42#define HISTORY_SIZE 512
43#define PREDICTOR_ORDER 8
44/** Total size of all predictor histories */
45#define PREDICTOR_SIZE 50
46
47#define YDELAYA (18 + PREDICTOR_ORDER*4)
48#define YDELAYB (18 + PREDICTOR_ORDER*3)
49#define XDELAYA (18 + PREDICTOR_ORDER*2)
50#define XDELAYB (18 + PREDICTOR_ORDER)
51
52#define YADAPTCOEFFSA 18
53#define XADAPTCOEFFSA 14
54#define YADAPTCOEFFSB 10
55#define XADAPTCOEFFSB 5
56
57/**
58 * Possible compression levels
59 * @{
60 */
61enum APECompressionLevel {
62 COMPRESSION_LEVEL_FAST = 1000,
63 COMPRESSION_LEVEL_NORMAL = 2000,
64 COMPRESSION_LEVEL_HIGH = 3000,
65 COMPRESSION_LEVEL_EXTRA_HIGH = 4000,
66 COMPRESSION_LEVEL_INSANE = 5000
67};
68/** @} */
69
70#define APE_FILTER_LEVELS 3
71
72/** Filter orders depending on compression level */
73static const uint16_t ape_filter_orders[5][APE_FILTER_LEVELS] = {
74 { 0, 0, 0 },
75 { 16, 0, 0 },
76 { 64, 0, 0 },
77 { 32, 256, 0 },
78 { 16, 256, 1280 }
79};
80
81/** Filter fraction bits depending on compression level */
82static const uint16_t ape_filter_fracbits[5][APE_FILTER_LEVELS] = {
83 { 0, 0, 0 },
84 { 11, 0, 0 },
85 { 11, 0, 0 },
86 { 10, 13, 0 },
87 { 11, 13, 15 }
88};
89
90
91/** Filters applied to the decoded data */
92typedef struct APEFilter {
93 int16_t *coeffs; ///< actual coefficients used in filtering
94 int16_t *adaptcoeffs; ///< adaptive filter coefficients used for correcting of actual filter coefficients
95 int16_t *historybuffer; ///< filter memory
96 int16_t *delay; ///< filtered values
97
98 int avg;
99} APEFilter;
100
101typedef struct APERice {
102 uint32_t k;
103 uint32_t ksum;
104} APERice;
105
106typedef struct APERangecoder {
107 uint32_t low; ///< low end of interval
108 uint32_t range; ///< length of interval
109 uint32_t help; ///< bytes_to_follow resp. intermediate value
110 unsigned int buffer; ///< buffer for input/output
111} APERangecoder;
112
113/** Filter histories */
114typedef struct APEPredictor {
115 int32_t *buf;
116
117 int32_t lastA[2];
118
119 int32_t filterA[2];
120 int32_t filterB[2];
121
122 int32_t coeffsA[2][4]; ///< adaption coefficients
123 int32_t coeffsB[2][5]; ///< adaption coefficients
124 int32_t historybuffer[HISTORY_SIZE + PREDICTOR_SIZE];
125} APEPredictor;
126
127/** Decoder context */
128typedef struct APEContext {
129 AVCodecContext *avctx;
130 DSPContext dsp;
131 int channels;
132 int samples; ///< samples left to decode in current frame
133
134 int fileversion; ///< codec version, very important in decoding process
135 int compression_level; ///< compression levels
136 int fset; ///< which filter set to use (calculated from compression level)
137 int flags; ///< global decoder flags
138
139 uint32_t CRC; ///< frame CRC
140 int frameflags; ///< frame flags
141 int currentframeblocks; ///< samples (per channel) in current frame
142 int blocksdecoded; ///< count of decoded samples in current frame
143 APEPredictor predictor; ///< predictor used for final reconstruction
144
145 int32_t decoded0[BLOCKS_PER_LOOP]; ///< decoded data for the first channel
146 int32_t decoded1[BLOCKS_PER_LOOP]; ///< decoded data for the second channel
147
148 int16_t* filterbuf[APE_FILTER_LEVELS]; ///< filter memory
149
150 APERangecoder rc; ///< rangecoder used to decode actual values
151 APERice riceX; ///< rice code parameters for the second channel
152 APERice riceY; ///< rice code parameters for the first channel
153 APEFilter filters[APE_FILTER_LEVELS][2]; ///< filters used for reconstruction
154
155 uint8_t *data; ///< current frame data
156 uint8_t *data_end; ///< frame data end
157 uint8_t *ptr; ///< current position in frame data
158 uint8_t *last_ptr; ///< position where last 4608-sample block ended
159} APEContext;
160
161// TODO: dsputilize
162static inline void vector_add(int16_t * v1, int16_t * v2, int order)
163{
164 while (order--)
165 *v1++ += *v2++;
166}
167
168// TODO: dsputilize
169static inline void vector_sub(int16_t * v1, int16_t * v2, int order)
170{
171 while (order--)
172 *v1++ -= *v2++;
173}
174
175// TODO: dsputilize
176static inline int32_t scalarproduct(int16_t * v1, int16_t * v2, int order)
177{
178 int res = 0;
179
180 while (order--)
181 res += *v1++ * *v2++;
182
183 return res;
184}
185
186static int ape_decode_init(AVCodecContext * avctx)
187{
188 APEContext *s = avctx->priv_data;
189 int i;
190
191 if (avctx->extradata_size != 6) {
192 av_log(avctx, AV_LOG_ERROR, "Incorrect extradata\n");
193 return -1;
194 }
195 if (avctx->bits_per_sample != 16) {
196 av_log(avctx, AV_LOG_ERROR, "Only 16-bit samples are supported\n");
197 return -1;
198 }
199 if (avctx->channels > 2) {
200 av_log(avctx, AV_LOG_ERROR, "Only mono and stereo is supported\n");
201 return -1;
202 }
203 s->avctx = avctx;
204 s->channels = avctx->channels;
205 s->fileversion = AV_RL16(avctx->extradata);
206 s->compression_level = AV_RL16(avctx->extradata + 2);
207 s->flags = AV_RL16(avctx->extradata + 4);
208
209 av_log(avctx, AV_LOG_DEBUG, "Compression Level: %d - Flags: %d\n", s->compression_level, s->flags);
210 if (s->compression_level % 1000 || s->compression_level > COMPRESSION_LEVEL_INSANE) {
211 av_log(avctx, AV_LOG_ERROR, "Incorrect compression level %d\n", s->compression_level);
212 return -1;
213 }
214 s->fset = s->compression_level / 1000 - 1;
215 for (i = 0; i < APE_FILTER_LEVELS; i++) {
216 if (!ape_filter_orders[s->fset][i])
217 break;
218 s->filterbuf[i] = av_malloc((ape_filter_orders[s->fset][i] * 3 + HISTORY_SIZE) * 4);
219 }
220
221 dsputil_init(&s->dsp, avctx);
222 return 0;
223}
224
225static int ape_decode_close(AVCodecContext * avctx)
226{
227 APEContext *s = avctx->priv_data;
228 int i;
229
230 for (i = 0; i < APE_FILTER_LEVELS; i++)
231 av_freep(&s->filterbuf[i]);
232
233 return 0;
234}
235
236/**
237 * @defgroup rangecoder APE range decoder
238 * @{
239 */
240
241#define CODE_BITS 32
242#define TOP_VALUE ((unsigned int)1 << (CODE_BITS-1))
243#define SHIFT_BITS (CODE_BITS - 9)
244#define EXTRA_BITS ((CODE_BITS-2) % 8 + 1)
245#define BOTTOM_VALUE (TOP_VALUE >> 8)
246
247/** Start the decoder */
248static inline void range_start_decoding(APEContext * ctx)
249{
250 ctx->rc.buffer = bytestream_get_byte(&ctx->ptr);
251 ctx->rc.low = ctx->rc.buffer >> (8 - EXTRA_BITS);
252 ctx->rc.range = (uint32_t) 1 << EXTRA_BITS;
253}
254
255/** Perform normalization */
256static inline void range_dec_normalize(APEContext * ctx)
257{
258 while (ctx->rc.range <= BOTTOM_VALUE) {
259 ctx->rc.buffer = (ctx->rc.buffer << 8) | bytestream_get_byte(&ctx->ptr);
260 ctx->rc.low = (ctx->rc.low << 8) | ((ctx->rc.buffer >> 1) & 0xFF);
261 ctx->rc.range <<= 8;
262 }
263}
264
265/**
266 * Calculate culmulative frequency for next symbol. Does NO update!
267 * @param tot_f is the total frequency or (code_value)1<<shift
268 * @return the culmulative frequency
269 */
270static inline int range_decode_culfreq(APEContext * ctx, int tot_f)
271{
272 range_dec_normalize(ctx);
273 ctx->rc.help = ctx->rc.range / tot_f;
274 return ctx->rc.low / ctx->rc.help;
275}
276
277/**
278 * Decode value with given size in bits
279 * @param shift number of bits to decode
280 */
281static inline int range_decode_culshift(APEContext * ctx, int shift)
282{
283 range_dec_normalize(ctx);
284 ctx->rc.help = ctx->rc.range >> shift;
285 return ctx->rc.low / ctx->rc.help;
286}
287
288
289/**
290 * Update decoding state
291 * @param sy_f the interval length (frequency of the symbol)
292 * @param lt_f the lower end (frequency sum of < symbols)
293 */
294static inline void range_decode_update(APEContext * ctx, int sy_f, int lt_f)
295{
296 ctx->rc.low -= ctx->rc.help * lt_f;
297 ctx->rc.range = ctx->rc.help * sy_f;
298}
299
300/** Decode n bits (n <= 16) without modelling */
301static inline int range_decode_bits(APEContext * ctx, int n)
302{
303 int sym = range_decode_culshift(ctx, n);
304 range_decode_update(ctx, 1, sym);
305 return sym;
306}
307
308
309#define MODEL_ELEMENTS 64
310
311/**
312 * Fixed probabilities for symbols in Monkey Audio version 3.97
313 */
314static const uint32_t counts_3970[65] = {
315 0, 14824, 28224, 39348, 47855, 53994, 58171, 60926,
316 62682, 63786, 64463, 64878, 65126, 65276, 65365, 65419,
317 65450, 65469, 65480, 65487, 65491, 65493, 65494, 65495,
318 65496, 65497, 65498, 65499, 65500, 65501, 65502, 65503,
319 65504, 65505, 65506, 65507, 65508, 65509, 65510, 65511,
320 65512, 65513, 65514, 65515, 65516, 65517, 65518, 65519,
321 65520, 65521, 65522, 65523, 65524, 65525, 65526, 65527,
322 65528, 65529, 65530, 65531, 65532, 65533, 65534, 65535,
323 65536
324};
325
326/**
327 * Probability ranges for symbols in Monkey Audio version 3.97
328 */
329static const uint16_t counts_diff_3970[64] = {
330 14824, 13400, 11124, 8507, 6139, 4177, 2755, 1756,
331 1104, 677, 415, 248, 150, 89, 54, 31,
332 19, 11, 7, 4, 2, 1, 1, 1,
333 1, 1, 1, 1, 1, 1, 1, 1,
334 1, 1, 1, 1, 1, 1, 1, 1,
335 1, 1, 1, 1, 1, 1, 1, 1,
336 1, 1, 1, 1, 1, 1, 1, 1,
337 1, 1, 1, 1, 1, 1, 1, 1
338};
339
340/**
341 * Fixed probabilities for symbols in Monkey Audio version 3.98
342 */
343static const uint32_t counts_3980[65] = {
344 0, 19578, 36160, 48417, 56323, 60899, 63265, 64435,
345 64971, 65232, 65351, 65416, 65447, 65466, 65476, 65482,
346 65485, 65488, 65490, 65491, 65492, 65493, 65494, 65495,
347 65496, 65497, 65498, 65499, 65500, 65501, 65502, 65503,
348 65504, 65505, 65506, 65507, 65508, 65509, 65510, 65511,
349 65512, 65513, 65514, 65515, 65516, 65517, 65518, 65519,
350 65520, 65521, 65522, 65523, 65524, 65525, 65526, 65527,
351 65528, 65529, 65530, 65531, 65532, 65533, 65534, 65535,
352 65536
353};
354
355/**
356 * Probability ranges for symbols in Monkey Audio version 3.98
357 */
358static const uint16_t counts_diff_3980[64] = {
359 19578, 16582, 12257, 7906, 4576, 2366, 1170, 536,
360 261, 119, 65, 31, 19, 10, 6, 3,
361 3, 2, 1, 1, 1, 1, 1, 1,
362 1, 1, 1, 1, 1, 1, 1, 1,
363 1, 1, 1, 1, 1, 1, 1, 1,
364 1, 1, 1, 1, 1, 1, 1, 1,
365 1, 1, 1, 1, 1, 1, 1, 1,
366 1, 1, 1, 1, 1, 1, 1, 1
367};
368
369/**
370 * Decode symbol
371 * @param counts probability range start position
372 * @param count_diffs probability range widths
373 */
374static inline int range_get_symbol(APEContext * ctx,
375 const uint32_t counts[],
376 const uint16_t counts_diff[])
377{
378 int symbol, cf;
379
380 cf = range_decode_culshift(ctx, 16);
381
382 /* figure out the symbol inefficiently; a binary search would be much better */
383 for (symbol = 0; counts[symbol + 1] <= cf; symbol++);
384
385 range_decode_update(ctx, counts_diff[symbol], counts[symbol]);
386
387 return symbol;
388}
389/** @} */ // group rangecoder
390
391static inline void update_rice(APERice *rice, int x)
392{
393 rice->ksum += ((x + 1) / 2) - ((rice->ksum + 16) >> 5);
394
395 if (rice->k == 0)
396 rice->k = 1;
397 else if (rice->ksum < (1 << (rice->k + 4)))
398 rice->k--;
399 else if (rice->ksum >= (1 << (rice->k + 5)))
400 rice->k++;
401}
402
403static inline int ape_decode_value(APEContext * ctx, APERice *rice)
404{
405 int x, overflow;
406
407 if (ctx->fileversion < 3980) {
408 int tmpk;
409
410 overflow = range_get_symbol(ctx, counts_3970, counts_diff_3970);
411
412 if (overflow == (MODEL_ELEMENTS - 1)) {
413 tmpk = range_decode_bits(ctx, 5);
414 overflow = 0;
415 } else
416 tmpk = (rice->k < 1) ? 0 : rice->k - 1;
417
418 if (tmpk <= 16)
419 x = range_decode_bits(ctx, tmpk);
420 else {
421 x = range_decode_bits(ctx, 16);
422 x |= (range_decode_bits(ctx, tmpk - 16) << 16);
423 }
424 x += overflow << tmpk;
425 } else {
426 int base, pivot;
427
428 pivot = rice->ksum >> 5;
429 if (pivot == 0)
430 pivot = 1;
431
432 overflow = range_get_symbol(ctx, counts_3980, counts_diff_3980);
433
434 if (overflow == (MODEL_ELEMENTS - 1)) {
435 overflow = range_decode_bits(ctx, 16) << 16;
436 overflow |= range_decode_bits(ctx, 16);
437 }
438
439 base = range_decode_culfreq(ctx, pivot);
440 range_decode_update(ctx, 1, base);
441
442 x = base + overflow * pivot;
443 }
444
445 update_rice(rice, x);
446
447 /* Convert to signed */
448 if (x & 1)
449 return (x >> 1) + 1;
450 else
451 return -(x >> 1);
452}
453
454static void entropy_decode(APEContext * ctx, int blockstodecode, int stereo)
455{
456 int32_t *decoded0 = ctx->decoded0;
457 int32_t *decoded1 = ctx->decoded1;
458
459 ctx->blocksdecoded = blockstodecode;
460
461 if (ctx->frameflags & APE_FRAMECODE_STEREO_SILENCE) {
462 /* We are pure silence, just memset the output buffer. */
463 memset(decoded0, 0, blockstodecode * sizeof(int32_t));
464 memset(decoded1, 0, blockstodecode * sizeof(int32_t));
465 } else {
466 while (blockstodecode--) {
467 *decoded0++ = ape_decode_value(ctx, &ctx->riceY);
468 if (stereo)
469 *decoded1++ = ape_decode_value(ctx, &ctx->riceX);
470 }
471 }
472
473 if (ctx->blocksdecoded == ctx->currentframeblocks)
474 range_dec_normalize(ctx); /* normalize to use up all bytes */
475}
476
477static void init_entropy_decoder(APEContext * ctx)
478{
479 /* Read the CRC */
480 ctx->CRC = bytestream_get_be32(&ctx->ptr);
481
482 /* Read the frame flags if they exist */
483 ctx->frameflags = 0;
484 if ((ctx->fileversion > 3820) && (ctx->CRC & 0x80000000)) {
485 ctx->CRC &= ~0x80000000;
486
487 ctx->frameflags = bytestream_get_be32(&ctx->ptr);
488 }
489
490 /* Keep a count of the blocks decoded in this frame */
491 ctx->blocksdecoded = 0;
492
52b541ad 493 /* Initialize the rice structs */
bf4a1f17
KS
494 ctx->riceX.k = 10;
495 ctx->riceX.ksum = (1 << ctx->riceX.k) * 16;
496 ctx->riceY.k = 10;
497 ctx->riceY.ksum = (1 << ctx->riceY.k) * 16;
498
499 /* The first 8 bits of input are ignored. */
500 ctx->ptr++;
501
502 range_start_decoding(ctx);
503}
504
505static const int32_t initial_coeffs[4] = {
506 360, 317, -109, 98
507};
508
509static void init_predictor_decoder(APEContext * ctx)
510{
511 APEPredictor *p = &ctx->predictor;
512
513 /* Zero the history buffers */
514 memset(p->historybuffer, 0, PREDICTOR_SIZE * sizeof(int32_t));
515 p->buf = p->historybuffer;
516
52b541ad 517 /* Initialize and zero the co-efficients */
bf4a1f17
KS
518 memcpy(p->coeffsA[0], initial_coeffs, sizeof(initial_coeffs));
519 memcpy(p->coeffsA[1], initial_coeffs, sizeof(initial_coeffs));
520 memset(p->coeffsB, 0, sizeof(p->coeffsB));
521
522 p->filterA[0] = p->filterA[1] = 0;
523 p->filterB[0] = p->filterB[1] = 0;
524 p->lastA[0] = p->lastA[1] = 0;
525}
526
527/** Get inverse sign of integer (-1 for positive, 1 for negative and 0 for zero) */
528static inline int APESIGN(int32_t x) {
529 return (x < 0) - (x > 0);
530}
531
532static int predictor_update_filter(APEPredictor *p, const int decoded, const int filter, const int delayA, const int delayB, const int adaptA, const int adaptB)
533{
534 int32_t predictionA, predictionB;
535
536 p->buf[delayA] = p->lastA[filter];
537 p->buf[adaptA] = APESIGN(p->buf[delayA]);
538 p->buf[delayA - 1] = p->buf[delayA] - p->buf[delayA - 1];
539 p->buf[adaptA - 1] = APESIGN(p->buf[delayA - 1]);
540
541 predictionA = p->buf[delayA ] * p->coeffsA[filter][0] +
542 p->buf[delayA - 1] * p->coeffsA[filter][1] +
543 p->buf[delayA - 2] * p->coeffsA[filter][2] +
544 p->buf[delayA - 3] * p->coeffsA[filter][3];
545
546 /* Apply a scaled first-order filter compression */
547 p->buf[delayB] = p->filterA[filter ^ 1] - ((p->filterB[filter] * 31) >> 5);
548 p->buf[adaptB] = APESIGN(p->buf[delayB]);
549 p->buf[delayB - 1] = p->buf[delayB] - p->buf[delayB - 1];
550 p->buf[adaptB - 1] = APESIGN(p->buf[delayB - 1]);
551 p->filterB[filter] = p->filterA[filter ^ 1];
552
553 predictionB = p->buf[delayB ] * p->coeffsB[filter][0] +
554 p->buf[delayB - 1] * p->coeffsB[filter][1] +
555 p->buf[delayB - 2] * p->coeffsB[filter][2] +
556 p->buf[delayB - 3] * p->coeffsB[filter][3] +
557 p->buf[delayB - 4] * p->coeffsB[filter][4];
558
559 p->lastA[filter] = decoded + ((predictionA + (predictionB >> 1)) >> 10);
560 p->filterA[filter] = p->lastA[filter] + ((p->filterA[filter] * 31) >> 5);
561
562 if (!decoded) // no need updating filter coefficients
563 return p->filterA[filter];
564
565 if (decoded > 0) {
566 p->coeffsA[filter][0] -= p->buf[adaptA ];
567 p->coeffsA[filter][1] -= p->buf[adaptA - 1];
568 p->coeffsA[filter][2] -= p->buf[adaptA - 2];
569 p->coeffsA[filter][3] -= p->buf[adaptA - 3];
570
571 p->coeffsB[filter][0] -= p->buf[adaptB ];
572 p->coeffsB[filter][1] -= p->buf[adaptB - 1];
573 p->coeffsB[filter][2] -= p->buf[adaptB - 2];
574 p->coeffsB[filter][3] -= p->buf[adaptB - 3];
575 p->coeffsB[filter][4] -= p->buf[adaptB - 4];
576 } else {
577 p->coeffsA[filter][0] += p->buf[adaptA ];
578 p->coeffsA[filter][1] += p->buf[adaptA - 1];
579 p->coeffsA[filter][2] += p->buf[adaptA - 2];
580 p->coeffsA[filter][3] += p->buf[adaptA - 3];
581
582 p->coeffsB[filter][0] += p->buf[adaptB ];
583 p->coeffsB[filter][1] += p->buf[adaptB - 1];
584 p->coeffsB[filter][2] += p->buf[adaptB - 2];
585 p->coeffsB[filter][3] += p->buf[adaptB - 3];
586 p->coeffsB[filter][4] += p->buf[adaptB - 4];
587 }
588 return p->filterA[filter];
589}
590
591static void predictor_decode_stereo(APEContext * ctx, int count)
592{
593 int32_t predictionA, predictionB;
594 APEPredictor *p = &ctx->predictor;
595 int32_t *decoded0 = ctx->decoded0;
596 int32_t *decoded1 = ctx->decoded1;
597
598 while (count--) {
599 /* Predictor Y */
600 predictionA = predictor_update_filter(p, *decoded0, 0, YDELAYA, YDELAYB, YADAPTCOEFFSA, YADAPTCOEFFSB);
601 predictionB = predictor_update_filter(p, *decoded1, 1, XDELAYA, XDELAYB, XADAPTCOEFFSA, XADAPTCOEFFSB);
602 *(decoded0++) = predictionA;
603 *(decoded1++) = predictionB;
604
605 /* Combined */
606 p->buf++;
607
608 /* Have we filled the history buffer? */
609 if (p->buf == p->historybuffer + HISTORY_SIZE) {
610 memmove(p->historybuffer, p->buf, PREDICTOR_SIZE * sizeof(int32_t));
611 p->buf = p->historybuffer;
612 }
613 }
614}
615
616static void predictor_decode_mono(APEContext * ctx, int count)
617{
618 APEPredictor *p = &ctx->predictor;
619 int32_t *decoded0 = ctx->decoded0;
620 int32_t predictionA, currentA, A;
621
622 currentA = p->lastA[0];
623
624 while (count--) {
625 A = *decoded0;
626
627 p->buf[YDELAYA] = currentA;
628 p->buf[YDELAYA - 1] = p->buf[YDELAYA] - p->buf[YDELAYA - 1];
629
630 predictionA = p->buf[YDELAYA ] * p->coeffsA[0][0] +
631 p->buf[YDELAYA - 1] * p->coeffsA[0][1] +
632 p->buf[YDELAYA - 2] * p->coeffsA[0][2] +
633 p->buf[YDELAYA - 3] * p->coeffsA[0][3];
634
635 currentA = A + (predictionA >> 10);
636
637 p->buf[YADAPTCOEFFSA] = APESIGN(p->buf[YDELAYA ]);
638 p->buf[YADAPTCOEFFSA - 1] = APESIGN(p->buf[YDELAYA - 1]);
639
640 if (A > 0) {
641 p->coeffsA[0][0] -= p->buf[YADAPTCOEFFSA ];
642 p->coeffsA[0][1] -= p->buf[YADAPTCOEFFSA - 1];
643 p->coeffsA[0][2] -= p->buf[YADAPTCOEFFSA - 2];
644 p->coeffsA[0][3] -= p->buf[YADAPTCOEFFSA - 3];
645 } else if (A < 0) {
646 p->coeffsA[0][0] += p->buf[YADAPTCOEFFSA ];
647 p->coeffsA[0][1] += p->buf[YADAPTCOEFFSA - 1];
648 p->coeffsA[0][2] += p->buf[YADAPTCOEFFSA - 2];
649 p->coeffsA[0][3] += p->buf[YADAPTCOEFFSA - 3];
650 }
651
652 p->buf++;
653
654 /* Have we filled the history buffer? */
655 if (p->buf == p->historybuffer + HISTORY_SIZE) {
656 memmove(p->historybuffer, p->buf, PREDICTOR_SIZE * sizeof(int32_t));
657 p->buf = p->historybuffer;
658 }
659
660 p->filterA[0] = currentA + ((p->filterA[0] * 31) >> 5);
661 *(decoded0++) = p->filterA[0];
662 }
663
664 p->lastA[0] = currentA;
665}
666
667static void do_init_filter(APEFilter *f, int16_t * buf, int order)
668{
669 f->coeffs = buf;
670 f->historybuffer = buf + order;
671 f->delay = f->historybuffer + order * 2;
672 f->adaptcoeffs = f->historybuffer + order;
673
674 memset(f->historybuffer, 0, (order * 2) * sizeof(int16_t));
675 memset(f->coeffs, 0, order * sizeof(int16_t));
676 f->avg = 0;
677}
678
679static void init_filter(APEContext * ctx, APEFilter *f, int16_t * buf, int order)
680{
681 do_init_filter(&f[0], buf, order);
682 do_init_filter(&f[1], buf + order * 3 + HISTORY_SIZE, order);
683}
684
685static inline void do_apply_filter(int version, APEFilter *f, int32_t *data, int count, int order, int fracbits)
686{
687 int res;
688 int absres;
689
690 while (count--) {
691 /* round fixedpoint scalar product */
692 res = (scalarproduct(f->delay - order, f->coeffs, order) + (1 << (fracbits - 1))) >> fracbits;
693
694 if (*data < 0)
695 vector_add(f->coeffs, f->adaptcoeffs - order, order);
696 else if (*data > 0)
697 vector_sub(f->coeffs, f->adaptcoeffs - order, order);
698
699 res += *data;
700
701 *data++ = res;
702
703 /* Update the output history */
704 *f->delay++ = av_clip_int16(res);
705
706 if (version < 3980) {
707 /* Version ??? to < 3.98 files (untested) */
708 f->adaptcoeffs[0] = (res == 0) ? 0 : ((res >> 28) & 8) - 4;
709 f->adaptcoeffs[-4] >>= 1;
710 f->adaptcoeffs[-8] >>= 1;
711 } else {
712 /* Version 3.98 and later files */
713
714 /* Update the adaption coefficients */
715 absres = (res < 0 ? -res : res);
716
717 if (absres > (f->avg * 3))
718 *f->adaptcoeffs = ((res >> 25) & 64) - 32;
719 else if (absres > (f->avg * 4) / 3)
720 *f->adaptcoeffs = ((res >> 26) & 32) - 16;
721 else if (absres > 0)
722 *f->adaptcoeffs = ((res >> 27) & 16) - 8;
723 else
724 *f->adaptcoeffs = 0;
725
726 f->avg += (absres - f->avg) / 16;
727
728 f->adaptcoeffs[-1] >>= 1;
729 f->adaptcoeffs[-2] >>= 1;
730 f->adaptcoeffs[-8] >>= 1;
731 }
732
733 f->adaptcoeffs++;
734
735 /* Have we filled the history buffer? */
736 if (f->delay == f->historybuffer + HISTORY_SIZE + (order * 2)) {
737 memmove(f->historybuffer, f->delay - (order * 2),
738 (order * 2) * sizeof(int16_t));
739 f->delay = f->historybuffer + order * 2;
740 f->adaptcoeffs = f->historybuffer + order;
741 }
742 }
743}
744
745static void apply_filter(APEContext * ctx, APEFilter *f,
746 int32_t * data0, int32_t * data1,
747 int count, int order, int fracbits)
748{
749 do_apply_filter(ctx->fileversion, &f[0], data0, count, order, fracbits);
750 if (data1)
751 do_apply_filter(ctx->fileversion, &f[1], data1, count, order, fracbits);
752}
753
754static void ape_apply_filters(APEContext * ctx, int32_t * decoded0,
755 int32_t * decoded1, int count)
756{
757 int i;
758
759 for (i = 0; i < APE_FILTER_LEVELS; i++) {
760 if (!ape_filter_orders[ctx->fset][i])
761 break;
762 apply_filter(ctx, ctx->filters[i], decoded0, decoded1, count, ape_filter_orders[ctx->fset][i], ape_filter_fracbits[ctx->fset][i]);
763 }
764}
765
766static void init_frame_decoder(APEContext * ctx)
767{
768 int i;
769 init_entropy_decoder(ctx);
770 init_predictor_decoder(ctx);
771
772 for (i = 0; i < APE_FILTER_LEVELS; i++) {
773 if (!ape_filter_orders[ctx->fset][i])
774 break;
775 init_filter(ctx, ctx->filters[i], ctx->filterbuf[i], ape_filter_orders[ctx->fset][i]);
776 }
777}
778
779static void ape_unpack_mono(APEContext * ctx, int count)
780{
781 int32_t left;
782 int32_t *decoded0 = ctx->decoded0;
783 int32_t *decoded1 = ctx->decoded1;
784
785 if (ctx->frameflags & APE_FRAMECODE_STEREO_SILENCE) {
786 entropy_decode(ctx, count, 0);
787 /* We are pure silence, so we're done. */
788 av_log(ctx->avctx, AV_LOG_DEBUG, "pure silence mono\n");
789 return;
790 }
791
792 entropy_decode(ctx, count, 0);
793 ape_apply_filters(ctx, decoded0, NULL, count);
794
795 /* Now apply the predictor decoding */
796 predictor_decode_mono(ctx, count);
797
798 /* Pseudo-stereo - just copy left channel to right channel */
799 if (ctx->channels == 2) {
800 while (count--) {
801 left = *decoded0;
802 *(decoded1++) = *(decoded0++) = left;
803 }
804 }
805}
806
807static void ape_unpack_stereo(APEContext * ctx, int count)
808{
809 int32_t left, right;
810 int32_t *decoded0 = ctx->decoded0;
811 int32_t *decoded1 = ctx->decoded1;
812
813 if (ctx->frameflags & APE_FRAMECODE_STEREO_SILENCE) {
814 /* We are pure silence, so we're done. */
815 av_log(ctx->avctx, AV_LOG_DEBUG, "pure silence stereo\n");
816 return;
817 }
818
819 entropy_decode(ctx, count, 1);
820 ape_apply_filters(ctx, decoded0, decoded1, count);
821
822 /* Now apply the predictor decoding */
823 predictor_decode_stereo(ctx, count);
824
825 /* Decorrelate and scale to output depth */
826 while (count--) {
827 left = *decoded1 - (*decoded0 / 2);
828 right = left + *decoded0;
829
830 *(decoded0++) = left;
831 *(decoded1++) = right;
832 }
833}
834
835static int ape_decode_frame(AVCodecContext * avctx,
836 void *data, int *data_size,
837 uint8_t * buf, int buf_size)
838{
839 APEContext *s = avctx->priv_data;
840 int16_t *samples = data;
841 int nblocks;
842 int i, n;
843 int blockstodecode;
844 int bytes_used;
845
846 if (buf_size == 0 && !s->samples) {
847 *data_size = 0;
848 return 0;
849 }
850
851 /* should not happen but who knows */
852 if (BLOCKS_PER_LOOP * 2 * avctx->channels > *data_size) {
853 av_log (avctx, AV_LOG_ERROR, "Packet size is too big to be handled in lavc! (max is %d where you have %d)\n", *data_size, s->samples * 2 * avctx->channels);
854 return -1;
855 }
856
857 if(!s->samples){
858 s->data = av_realloc(s->data, (buf_size + 3) & ~3);
859 s->dsp.bswap_buf(s->data, buf, buf_size >> 2);
860 s->ptr = s->last_ptr = s->data;
861 s->data_end = s->data + buf_size;
862
863 nblocks = s->samples = bytestream_get_be32(&s->ptr);
864 n = bytestream_get_be32(&s->ptr);
865 if(n < 0 || n > 3){
866 av_log(avctx, AV_LOG_ERROR, "Incorrect offset passed\n");
867 s->data = NULL;
868 return -1;
869 }
870 s->ptr += n;
871
872 s->currentframeblocks = nblocks;
873 buf += 4;
874 if (s->samples <= 0) {
875 *data_size = 0;
876 return buf_size;
877 }
878
879 memset(s->decoded0, 0, sizeof(s->decoded0));
880 memset(s->decoded1, 0, sizeof(s->decoded1));
881
882 /* Initialize the frame decoder */
883 init_frame_decoder(s);
884 }
885
886 if (!s->data) {
887 *data_size = 0;
888 return buf_size;
889 }
890
891 nblocks = s->samples;
892 blockstodecode = FFMIN(BLOCKS_PER_LOOP, nblocks);
893
894 if ((s->channels == 1) || (s->frameflags & APE_FRAMECODE_PSEUDO_STEREO))
895 ape_unpack_mono(s, blockstodecode);
896 else
897 ape_unpack_stereo(s, blockstodecode);
898
899 for (i = 0; i < blockstodecode; i++) {
900 *samples++ = s->decoded0[i];
901 if(s->channels == 2)
902 *samples++ = s->decoded1[i];
903 }
904
905 s->samples -= blockstodecode;
906
907 *data_size = blockstodecode * 2 * s->channels;
908 bytes_used = s->samples ? s->ptr - s->last_ptr : buf_size;
909 s->last_ptr = s->ptr;
910 return bytes_used;
911}
912
913AVCodec ape_decoder = {
914 "ape",
915 CODEC_TYPE_AUDIO,
916 CODEC_ID_APE,
917 sizeof(APEContext),
918 ape_decode_init,
919 NULL,
920 ape_decode_close,
921 ape_decode_frame,
922};