fix multichannel decoding
[libav.git] / libavcodec / armv4l / simple_idct_armv6.S
1 /*
2 * Simple IDCT
3 *
4 * Copyright (c) 2001 Michael Niedermayer <michaelni@gmx.at>
5 * Copyright (c) 2007 Mans Rullgard <mru@inprovide.com>
6 *
7 * This file is part of FFmpeg.
8 *
9 * FFmpeg is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
13 *
14 * FFmpeg is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with FFmpeg; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 */
23
24 #define W1 22725 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
25 #define W2 21407 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
26 #define W3 19266 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
27 #define W4 16383 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
28 #define W5 12873 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
29 #define W6 8867 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
30 #define W7 4520 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
31 #define ROW_SHIFT 11
32 #define COL_SHIFT 20
33
34 #define W13 (W1 | (W3 << 16))
35 #define W26 (W2 | (W6 << 16))
36 #define W42 (W4 | (W2 << 16))
37 #define W42n (-W4&0xffff | (-W2 << 16))
38 #define W46 (W4 | (W6 << 16))
39 #define W57 (W5 | (W7 << 16))
40
41 .text
42 .align
43 w13: .long W13
44 w26: .long W26
45 w42: .long W42
46 w42n: .long W42n
47 w46: .long W46
48 w57: .long W57
49
50 .macro idct_row_start shift
51 ldr ip, [pc, #(w42-.-8)] /* ip = W4 | (W2 << 16) */
52 ldr lr, [pc, #(w46-.-8)] /* lr = W4 | (W6 << 16) */
53 ldr v7, [pc, #(w57-.-8)] /* v7 = W5 | (W7 << 16) */
54 mov a2, #(1<<(\shift-1))
55 smlad v1, a3, ip, a2
56 smlsd v4, a3, ip, a2
57 ldr ip, [pc, #(w13-.-8)] /* ip = W1 | (W3 << 16) */
58 smlad v2, a3, lr, a2
59 smlsd v3, a3, lr, a2
60 smusdx fp, a4, v7 /* fp = B3 = W7*row[1] - W5*row[3] */
61 smuad v5, a4, ip /* v5 = B0 = W1*row[1] + W3*row[3] */
62 .endm
63 /*
64 Compute partial IDCT of single row.
65 shift = left-shift amount
66 a1 = source address
67 a3 = row[2,0] <= 2 cycles
68 a4 = row[3,1]
69 ip = w42 <= 2 cycles
70
71 Output in registers v1--v8
72 */
73 .macro idct_row shift
74 ldr lr, [a1, #12] /* lr = row[7,5] */
75 pkhtb a3, ip, v7, asr #16 /* a4 = W7 | (W3 << 16) */
76 pkhbt a2, ip, v7, lsl #16 /* a2 = W1 | (W5 << 16) */
77 smusdx v6, a3, a4 /* v6 = -B1 = W7*row[3] - W3*row[1] */
78 smlad v5, lr, v7, v5 /* B0 += W5*row[5] + W7*row[7] */
79 smusdx v7, a4, a2 /* v7 = B2 = W5*row[1] - W1*row[3] */
80
81 ldr a4, [pc, #(w42n-.-8)] /* a4 = -W4 | (-W2 << 16) */
82 smlad v7, lr, a3, v7 /* B2 += W7*row[5] + W3*row[7] */
83 ldr a3, [a1, #4] /* a3 = row[6,4] */
84 smlsdx fp, lr, ip, fp /* B3 += W3*row[5] - W1*row[7] */
85 ldr ip, [pc, #(w46-.-8)] /* ip = W4 | (W6 << 16) */
86 smlad v6, lr, a2, v6 /* B1 -= W1*row[5] + W5*row[7] */
87
88 smlad v2, a3, a4, v2 /* A1 += -W4*row[4] - W2*row[6] */
89 smlsd v3, a3, a4, v3 /* A2 += -W4*row[4] + W2*row[6] */
90 smlad v1, a3, ip, v1 /* A0 += W4*row[4] + W6*row[6] */
91 smlsd v4, a3, ip, v4 /* A3 += W4*row[4] - W6*row[6] */
92 .endm
93
94 /*
95 Compute partial IDCT of half row.
96 shift = left-shift amount
97 a3 = row[2,0]
98 a4 = row[3,1]
99 ip = w42
100
101 Output in registers v1--v8
102 */
103 .macro idct_row4 shift
104 pkhtb a3, ip, v7, asr #16 /* a4 = W7 | (W3 << 16) */
105 pkhbt a2, ip, v7, lsl #16 /* a2 = W1 | (W5 << 16) */
106 smusdx v6, a3, a4 /* v6 = -B1 = W7*row[3] - W3*row[1] */
107 smusdx v7, a4, a2 /* v7 = B2 = W5*row[1] - W1*row[3] */
108 .endm
109
110 /*
111 Compute final part of IDCT single row without shift.
112 Input in registers v1--v8
113 Output in registers ip, v1--v3, lr, v5--v7
114 */
115 .macro idct_finish
116 add ip, v1, v5 /* a2 = A0 + B0 */
117 sub lr, v1, v5 /* a3 = A0 - B0 */
118 sub v1, v2, v6 /* a3 = A1 + B1 */
119 add v5, v2, v6 /* a3 = A1 - B1 */
120 add v2, v3, v7 /* a2 = A2 + B2 */
121 sub v6, v3, v7 /* a2 = A2 - B2 */
122 add v3, v4, fp /* a3 = A3 + B3 */
123 sub v7, v4, fp /* a3 = A3 - B3 */
124 .endm
125
126 /*
127 Compute final part of IDCT single row.
128 shift = right-shift amount
129 Input/output in registers v1--v8
130 */
131 .macro idct_finish_shift shift
132 add a4, v1, v5 /* a4 = A0 + B0 */
133 sub a3, v1, v5 /* a3 = A0 - B0 */
134 mov v1, a4, asr #\shift
135 mov v5, a3, asr #\shift
136
137 sub a4, v2, v6 /* a4 = A1 + B1 */
138 add a3, v2, v6 /* a3 = A1 - B1 */
139 mov v2, a4, asr #\shift
140 mov v6, a3, asr #\shift
141
142 add a4, v3, v7 /* a4 = A2 + B2 */
143 sub a3, v3, v7 /* a3 = A2 - B2 */
144 mov v3, a4, asr #\shift
145 mov v7, a3, asr #\shift
146
147 add a4, v4, fp /* a4 = A3 + B3 */
148 sub a3, v4, fp /* a3 = A3 - B3 */
149 mov v4, a4, asr #\shift
150 mov fp, a3, asr #\shift
151 .endm
152
153 /*
154 Compute final part of IDCT single row, saturating results at 8 bits.
155 shift = right-shift amount
156 Input/output in registers v1--v8
157 */
158 .macro idct_finish_shift_sat shift
159 add a4, v1, v5 /* a4 = A0 + B0 */
160 sub ip, v1, v5 /* ip = A0 - B0 */
161 usat v1, #8, a4, asr #\shift
162 usat v5, #8, ip, asr #\shift
163
164 sub a4, v2, v6 /* a4 = A1 + B1 */
165 add ip, v2, v6 /* ip = A1 - B1 */
166 usat v2, #8, a4, asr #\shift
167 usat v6, #8, ip, asr #\shift
168
169 add a4, v3, v7 /* a4 = A2 + B2 */
170 sub ip, v3, v7 /* ip = A2 - B2 */
171 usat v3, #8, a4, asr #\shift
172 usat v7, #8, ip, asr #\shift
173
174 add a4, v4, fp /* a4 = A3 + B3 */
175 sub ip, v4, fp /* ip = A3 - B3 */
176 usat v4, #8, a4, asr #\shift
177 usat fp, #8, ip, asr #\shift
178 .endm
179
180 /*
181 Compute IDCT of single row, storing as column.
182 a1 = source
183 a2 = dest
184 */
185 .align
186 .func idct_row_armv6
187 idct_row_armv6:
188 ldr fp, [a1, #12] /* fp = row[7,5] */
189 ldr v7, [a1, #4] /* v7 = row[6,4] */
190 ldr a4, [a1, #8] /* a4 = row[3,1] */
191 ldr a3, [a1] /* a3 = row[2,0] */
192 mov ip, #(1<<(ROW_SHIFT-1))
193 orrs v5, fp, v7
194 cmpeq v5, a4
195 cmpeq v5, a3, lsr #16
196 beq 1f
197 cmp v5, #0
198 stmfd sp!, {a2, lr}
199 ldr v5, [pc, #(w42-.-8)] /* v5 = W4 | (W2 << 16) */
200 ldr v6, [pc, #(w46-.-8)] /* v6 = W4 | (W6 << 16) */
201 ldr v7, [pc, #(w57-.-8)] /* v7 = W5 | (W7 << 16) */
202
203 smlad v1, a3, v5, ip
204 smlsd v4, a3, v5, ip
205 ldr a2, [pc, #(w13-.-8)] /* a2 = W1 | (W3 << 16) */
206 smlad v2, a3, v6, ip
207 smlsd v3, a3, v6, ip
208 smusdx lr, a4, v7 /* lr = B3 = W7*row[1] - W5*row[3] */
209 smuad v5, a4, a2 /* v5 = B0 = W1*row[1] + W3*row[3] */
210
211 pkhtb a3, a2, v7, asr #16 /* a3 = W7 | (W3 << 16) */
212 pkhbt ip, a2, v7, lsl #16 /* ip = W1 | (W5 << 16) */
213 smusdx v6, a3, a4 /* v6 = -B1 = W7*row[3] - W3*row[1] */
214 smusdx a4, a4, ip /* v7 = B2 = W5*row[1] - W1*row[3] */
215 beq 3f
216
217 smlad v5, fp, v7, v5 /* B0 += W5*row[5] + W7*row[7] */
218 smlad v7, fp, a3, a4 /* B2 += W7*row[5] + W3*row[7] */
219 ldr a4, [pc, #(w42n-.-8)] /* a4 = -W4 | (-W2 << 16) */
220 ldr a3, [a1, #4] /* a3 = row[6,4] */
221 smlsdx lr, fp, a2, lr /* B3 += W3*row[5] - W1*row[7] */
222 ldr a2, [pc, #(w46-.-8)] /* a2 = W4 | (W6 << 16) */
223 smlad v6, fp, ip, v6 /* B1 -= W1*row[5] + W5*row[7] */
224
225 smlad v2, a3, a4, v2 /* A1 += -W4*row[4] - W2*row[6] */
226 smlsd v3, a3, a4, v3 /* A2 += -W4*row[4] + W2*row[6] */
227 smlad v1, a3, a2, v1 /* A0 += W4*row[4] + W6*row[6] */
228 smlsd v4, a3, a2, v4 /* A3 += W4*row[4] - W6*row[6] */
229
230 ldr a2, [sp], #4
231 add a4, v1, v5 /* a4 = A0 + B0 */
232 sub a3, v1, v5 /* a3 = A0 - B0 */
233 mov v1, a4, asr #ROW_SHIFT
234 mov v5, a3, asr #ROW_SHIFT
235
236 sub a4, v2, v6 /* a4 = A1 + B1 */
237 add a3, v2, v6 /* a3 = A1 - B1 */
238 mov v2, a4, asr #ROW_SHIFT
239 mov v6, a3, asr #ROW_SHIFT
240
241 add a4, v3, v7 /* a4 = A2 + B2 */
242 sub a3, v3, v7 /* a3 = A2 - B2 */
243 mov v3, a4, asr #ROW_SHIFT
244 mov v7, a3, asr #ROW_SHIFT
245
246 add a4, v4, lr /* a4 = A3 + B3 */
247 sub a3, v4, lr /* a3 = A3 - B3 */
248 mov v4, a4, asr #ROW_SHIFT
249 mov fp, a3, asr #ROW_SHIFT
250
251 strh v1, [a2]
252 strh v2, [a2, #(16*2)]
253 strh v3, [a2, #(16*4)]
254 strh v4, [a2, #(16*6)]
255 strh fp, [a2, #(16*1)]
256 strh v7, [a2, #(16*3)]
257 strh v6, [a2, #(16*5)]
258 strh v5, [a2, #(16*7)]
259
260 ldr pc, [sp], #4
261
262 3: ldr a2, [sp], #4
263 add v7, v1, v5 /* v7 = A0 + B0 */
264 sub a3, v1, v5 /* a3 = A0 - B0 */
265 mov v1, v7, asr #ROW_SHIFT
266 mov v5, a3, asr #ROW_SHIFT
267
268 sub v7, v2, v6 /* v7 = A1 + B1 */
269 add a3, v2, v6 /* a3 = A1 - B1 */
270 mov v2, v7, asr #ROW_SHIFT
271 mov v6, a3, asr #ROW_SHIFT
272
273 add v7, v3, a4 /* v7 = A2 + B2 */
274 sub a3, v3, a4 /* a3 = A2 - B2 */
275 mov v3, v7, asr #ROW_SHIFT
276 mov v7, a3, asr #ROW_SHIFT
277
278 add a4, v4, lr /* xx = A3 + B3 */
279 sub a3, v4, lr /* a3 = A3 - B3 */
280 mov v4, a4, asr #ROW_SHIFT
281 mov fp, a3, asr #ROW_SHIFT
282
283 strh v1, [a2]
284 strh v2, [a2, #(16*2)]
285 strh v3, [a2, #(16*4)]
286 strh v4, [a2, #(16*6)]
287 strh fp, [a2, #(16*1)]
288 strh v7, [a2, #(16*3)]
289 strh v6, [a2, #(16*5)]
290 strh v5, [a2, #(16*7)]
291
292 ldr pc, [sp], #4
293
294 1: mov a3, a3, lsl #3
295 strh a3, [a2]
296 strh a3, [a2, #(16*2)]
297 strh a3, [a2, #(16*4)]
298 strh a3, [a2, #(16*6)]
299 strh a3, [a2, #(16*1)]
300 strh a3, [a2, #(16*3)]
301 strh a3, [a2, #(16*5)]
302 strh a3, [a2, #(16*7)]
303 mov pc, lr
304 .endfunc
305
306 /*
307 Compute IDCT of single column, read as row.
308 a1 = source
309 a2 = dest
310 */
311 .align
312 .func idct_col_armv6
313 idct_col_armv6:
314 stmfd sp!, {a2, lr}
315
316 ldr a3, [a1] /* a3 = row[2,0] */
317 ldr a4, [a1, #8] /* a4 = row[3,1] */
318 idct_row_start COL_SHIFT
319 idct_row COL_SHIFT
320 ldr a2, [sp], #4
321 idct_finish_shift COL_SHIFT
322
323 strh v1, [a2]
324 strh v2, [a2, #(16*1)]
325 strh v3, [a2, #(16*2)]
326 strh v4, [a2, #(16*3)]
327 strh fp, [a2, #(16*4)]
328 strh v7, [a2, #(16*5)]
329 strh v6, [a2, #(16*6)]
330 strh v5, [a2, #(16*7)]
331
332 ldr pc, [sp], #4
333 .endfunc
334
335 /*
336 Compute IDCT of single column, read as row, store saturated 8-bit.
337 a1 = source
338 a2 = dest
339 a3 = line size
340 */
341 .align
342 .func idct_col_put_armv6
343 idct_col_put_armv6:
344 stmfd sp!, {a2, a3, lr}
345
346 ldr a3, [a1] /* a3 = row[2,0] */
347 ldr a4, [a1, #8] /* a4 = row[3,1] */
348 idct_row_start COL_SHIFT
349 idct_row COL_SHIFT
350 ldmfd sp!, {a2, a3}
351 idct_finish_shift_sat COL_SHIFT
352
353 strb v1, [a2], a3
354 strb v2, [a2], a3
355 strb v3, [a2], a3
356 strb v4, [a2], a3
357 strb fp, [a2], a3
358 strb v7, [a2], a3
359 strb v6, [a2], a3
360 strb v5, [a2], a3
361
362 sub a2, a2, a3, lsl #3
363
364 ldr pc, [sp], #4
365 .endfunc
366
367 /*
368 Compute IDCT of single column, read as row, add/store saturated 8-bit.
369 a1 = source
370 a2 = dest
371 a3 = line size
372 */
373 .align
374 .func idct_col_add_armv6
375 idct_col_add_armv6:
376 stmfd sp!, {a2, a3, lr}
377
378 ldr a3, [a1] /* a3 = row[2,0] */
379 ldr a4, [a1, #8] /* a4 = row[3,1] */
380 idct_row_start COL_SHIFT
381 idct_row COL_SHIFT
382 ldmfd sp!, {a2, a3}
383 idct_finish
384
385 ldrb a4, [a2]
386 ldrb v4, [a2, a3]
387 ldrb fp, [a2, a3, lsl #2]
388 add ip, a4, ip, asr #COL_SHIFT
389 usat ip, #8, ip
390 add v1, v4, v1, asr #COL_SHIFT
391 strb ip, [a2], a3
392 ldrb ip, [a2, a3]
393 usat v1, #8, v1
394 ldrb fp, [a2, a3, lsl #2]
395 add v2, ip, v2, asr #COL_SHIFT
396 usat v2, #8, v2
397 strb v1, [a2], a3
398 ldrb a4, [a2, a3]
399 ldrb ip, [a2, a3, lsl #2]
400 strb v2, [a2], a3
401 ldrb v4, [a2, a3]
402 ldrb v1, [a2, a3, lsl #2]
403 add v3, a4, v3, asr #COL_SHIFT
404 usat v3, #8, v3
405 add v7, v4, v7, asr #COL_SHIFT
406 usat v7, #8, v7
407 add v6, fp, v6, asr #COL_SHIFT
408 usat v6, #8, v6
409 add v5, ip, v5, asr #COL_SHIFT
410 usat v5, #8, v5
411 add lr, v1, lr, asr #COL_SHIFT
412 usat lr, #8, lr
413 strb v3, [a2], a3
414 strb v7, [a2], a3
415 strb v6, [a2], a3
416 strb v5, [a2], a3
417 strb lr, [a2], a3
418
419 sub a2, a2, a3, lsl #3
420
421 ldr pc, [sp], #4
422 .endfunc
423
424 /*
425 Compute 8 IDCT row transforms.
426 func = IDCT row->col function
427 width = width of columns in bytes
428 */
429 .macro idct_rows func width
430 bl \func
431 add a1, a1, #(16*2)
432 add a2, a2, #\width
433 bl \func
434 add a1, a1, #(16*2)
435 add a2, a2, #\width
436 bl \func
437 add a1, a1, #(16*2)
438 add a2, a2, #\width
439 bl \func
440 sub a1, a1, #(16*5)
441 add a2, a2, #\width
442 bl \func
443 add a1, a1, #(16*2)
444 add a2, a2, #\width
445 bl \func
446 add a1, a1, #(16*2)
447 add a2, a2, #\width
448 bl \func
449 add a1, a1, #(16*2)
450 add a2, a2, #\width
451 bl \func
452
453 sub a1, a1, #(16*7)
454 .endm
455
456 .align
457 .global ff_simple_idct_armv6
458 .func ff_simple_idct_armv6
459 /* void ff_simple_idct_armv6(DCTELEM *data); */
460 ff_simple_idct_armv6:
461 stmfd sp!, {v1, v2, v3, v4, v5, v6, v7, fp, lr}
462 sub sp, sp, #128
463
464 mov a2, sp
465 idct_rows idct_row_armv6, 2
466 mov a2, a1
467 mov a1, sp
468 idct_rows idct_col_armv6, 2
469
470 add sp, sp, #128
471 ldmfd sp!, {v1, v2, v3, v4, v5, v6, v7, fp, pc}
472 .endfunc
473
474 .align
475 .global ff_simple_idct_add_armv6
476 .func ff_simple_idct_add_armv6
477 /* ff_simple_idct_add_armv6(uint8_t *dest, int line_size, DCTELEM *data); */
478 ff_simple_idct_add_armv6:
479 stmfd sp!, {a1, a2, v1, v2, v3, v4, v5, v6, v7, fp, lr}
480 sub sp, sp, #128
481
482 mov a1, a3
483 mov a2, sp
484 idct_rows idct_row_armv6, 2
485 mov a1, sp
486 ldr a2, [sp, #128]
487 ldr a3, [sp, #(128+4)]
488 idct_rows idct_col_add_armv6, 1
489
490 add sp, sp, #(128+8)
491 ldmfd sp!, {v1, v2, v3, v4, v5, v6, v7, fp, pc}
492 .endfunc
493
494 .align
495 .global ff_simple_idct_put_armv6
496 .func ff_simple_idct_put_armv6
497 /* ff_simple_idct_put_armv6(uint8_t *dest, int line_size, DCTELEM *data); */
498 ff_simple_idct_put_armv6:
499 stmfd sp!, {a1, a2, v1, v2, v3, v4, v5, v6, v7, fp, lr}
500 sub sp, sp, #128
501
502 mov a1, a3
503 mov a2, sp
504 idct_rows idct_row_armv6, 2
505 mov a1, sp
506 ldr a2, [sp, #128]
507 ldr a3, [sp, #(128+4)]
508 idct_rows idct_col_put_armv6, 1
509
510 add sp, sp, #(128+8)
511 ldmfd sp!, {v1, v2, v3, v4, v5, v6, v7, fp, pc}
512 .endfunc