81835a1b776d98890cebf7bc364db14a96652f69
[libav.git] / libavcodec / armv4l / simple_idct_armv6.S
1 /*
2 * Simple IDCT
3 *
4 * Copyright (c) 2001 Michael Niedermayer <michaelni@gmx.at>
5 * Copyright (c) 2007 Mans Rullgard <mru@inprovide.com>
6 *
7 * This file is part of FFmpeg.
8 *
9 * FFmpeg is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
13 *
14 * FFmpeg is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with FFmpeg; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 */
23
24 #define W1 22725 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
25 #define W2 21407 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
26 #define W3 19266 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
27 #define W4 16383 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
28 #define W5 12873 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
29 #define W6 8867 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
30 #define W7 4520 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
31 #define ROW_SHIFT 11
32 #define COL_SHIFT 20
33
34 #define W13 (W1 | (W3 << 16))
35 #define W26 (W2 | (W6 << 16))
36 #define W42 (W4 | (W2 << 16))
37 #define W42n (-W4&0xffff | (-W2 << 16))
38 #define W46 (W4 | (W6 << 16))
39 #define W57 (W5 | (W7 << 16))
40
41 .text
42 .align
43 w13: .long W13
44 w26: .long W26
45 w42: .long W42
46 w42n: .long W42n
47 w46: .long W46
48 w57: .long W57
49
50 /*
51 Compute partial IDCT of single row.
52 shift = left-shift amount
53 a1 = source address
54 a3 = row[2,0] <= 2 cycles
55 a4 = row[3,1]
56 ip = w42 <= 2 cycles
57
58 Output in registers v1--v8
59 */
60 .macro idct_row shift
61 ldr lr, [pc, #(w46-.-8)] /* lr = W4 | (W6 << 16) */
62 mov a2, #(1<<(\shift-1))
63 smlad v1, a3, ip, a2
64 smlsd v4, a3, ip, a2
65 ldr ip, [pc, #(w13-.-8)] /* ip = W1 | (W3 << 16) */
66 ldr v7, [pc, #(w57-.-8)] /* v7 = W5 | (W7 << 16) */
67 smlad v2, a3, lr, a2
68 smlsd v3, a3, lr, a2
69
70 smuad v5, a4, ip /* v5 = B0 = W1*row[1] + W3*row[3] */
71 smusdx fp, a4, v7 /* fp = B3 = W7*row[1] - W5*row[3] */
72 ldr lr, [a1, #12] /* lr = row[7,5] */
73 pkhtb a3, ip, v7, asr #16 /* a4 = W7 | (W3 << 16) */
74 pkhbt a2, ip, v7, lsl #16 /* a2 = W1 | (W5 << 16) */
75 smusdx v6, a3, a4 /* v6 = -B1 = W7*row[3] - W3*row[1] */
76 smlad v5, lr, v7, v5 /* B0 += W5*row[5] + W7*row[7] */
77 smusdx v7, a4, a2 /* v7 = B2 = W5*row[1] - W1*row[3] */
78
79 ldr a4, [pc, #(w42n-.-8)] /* a4 = -W4 | (-W2 << 16) */
80 smlad v7, lr, a3, v7 /* B2 += W7*row[5] + W3*row[7] */
81 ldr a3, [a1, #4] /* a3 = row[6,4] */
82 smlsdx fp, lr, ip, fp /* B3 += W3*row[5] - W1*row[7] */
83 ldr ip, [pc, #(w46-.-8)] /* ip = W4 | (W6 << 16) */
84 smlad v6, lr, a2, v6 /* B1 -= W1*row[5] + W5*row[7] */
85
86 smlad v2, a3, a4, v2 /* A1 += -W4*row[4] - W2*row[6] */
87 smlsd v3, a3, a4, v3 /* A2 += -W4*row[4] + W2*row[6] */
88 smlad v1, a3, ip, v1 /* A0 += W4*row[4] + W6*row[6] */
89 smlsd v4, a3, ip, v4 /* A3 += W4*row[4] - W6*row[6] */
90 .endm
91
92 /*
93 Compute partial IDCT of half row.
94 shift = left-shift amount
95 a3 = row[2,0]
96 a4 = row[3,1]
97
98 Output in registers v1--v8
99 */
100 .macro idct_row4 shift
101 ldr ip, [pc, #(w42-.-8)] /* ip = W4 | (W2 << 16) */
102 ldr lr, [pc, #(w46-.-8)] /* lr = W4 | (W6 << 16) */
103 ldr v7, [pc, #(w57-.-8)] /* v7 = W5 | (W7 << 16) */
104 mov a2, #(1<<(\shift-1))
105 smlad v1, a3, ip, a2
106 smlsd v4, a3, ip, a2
107 ldr ip, [pc, #(w13-.-8)] /* ip = W1 | (W3 << 16) */
108 smlad v2, a3, lr, a2
109 smlsd v3, a3, lr, a2
110 smusdx fp, a4, v7 /* fp = B3 = W7*row[1] - W5*row[3] */
111 smuad v5, a4, ip /* v5 = B0 = W1*row[1] + W3*row[3] */
112 pkhtb a3, ip, v7, asr #16 /* a4 = W7 | (W3 << 16) */
113 pkhbt a2, ip, v7, lsl #16 /* a2 = W1 | (W5 << 16) */
114 smusdx v6, a3, a4 /* v6 = -B1 = W7*row[3] - W3*row[1] */
115 smusdx v7, a4, a2 /* v7 = B2 = W5*row[1] - W1*row[3] */
116 .endm
117
118 /*
119 Compute final part of IDCT single row without shift.
120 Input in registers v1--v8
121 Output in registers ip, v1--v3, lr, v5--v7
122 */
123 .macro idct_finish
124 add ip, v1, v5 /* a2 = A0 + B0 */
125 sub lr, v1, v5 /* a3 = A0 - B0 */
126 sub v1, v2, v6 /* a3 = A1 + B1 */
127 add v5, v2, v6 /* a3 = A1 - B1 */
128 add v2, v3, v7 /* a2 = A2 + B2 */
129 sub v6, v3, v7 /* a2 = A2 - B2 */
130 add v3, v4, fp /* a3 = A3 + B3 */
131 sub v7, v4, fp /* a3 = A3 - B3 */
132 .endm
133
134 /*
135 Compute final part of IDCT single row.
136 shift = right-shift amount
137 Input/output in registers v1--v8
138 */
139 .macro idct_finish_shift shift
140 add a4, v1, v5 /* a4 = A0 + B0 */
141 sub a3, v1, v5 /* a3 = A0 - B0 */
142 mov v1, a4, asr #\shift
143 mov v5, a3, asr #\shift
144
145 sub a4, v2, v6 /* a4 = A1 + B1 */
146 add a3, v2, v6 /* a3 = A1 - B1 */
147 mov v2, a4, asr #\shift
148 mov v6, a3, asr #\shift
149
150 add a4, v3, v7 /* a4 = A2 + B2 */
151 sub a3, v3, v7 /* a3 = A2 - B2 */
152 mov v3, a4, asr #\shift
153 mov v7, a3, asr #\shift
154
155 add a4, v4, fp /* a4 = A3 + B3 */
156 sub a3, v4, fp /* a3 = A3 - B3 */
157 mov v4, a4, asr #\shift
158 mov fp, a3, asr #\shift
159 .endm
160
161 /*
162 Compute final part of IDCT single row, saturating results at 8 bits.
163 shift = right-shift amount
164 Input/output in registers v1--v8
165 */
166 .macro idct_finish_shift_sat shift
167 add a4, v1, v5 /* a4 = A0 + B0 */
168 sub ip, v1, v5 /* ip = A0 - B0 */
169 usat v1, #8, a4, asr #\shift
170 usat v5, #8, ip, asr #\shift
171
172 sub a4, v2, v6 /* a4 = A1 + B1 */
173 add ip, v2, v6 /* ip = A1 - B1 */
174 usat v2, #8, a4, asr #\shift
175 usat v6, #8, ip, asr #\shift
176
177 add a4, v3, v7 /* a4 = A2 + B2 */
178 sub ip, v3, v7 /* ip = A2 - B2 */
179 usat v3, #8, a4, asr #\shift
180 usat v7, #8, ip, asr #\shift
181
182 add a4, v4, fp /* a4 = A3 + B3 */
183 sub ip, v4, fp /* ip = A3 - B3 */
184 usat v4, #8, a4, asr #\shift
185 usat fp, #8, ip, asr #\shift
186 .endm
187
188 /*
189 Compute IDCT of single row, storing as column.
190 a1 = source
191 a2 = dest
192 */
193 .align
194 .func idct_row_armv6
195 idct_row_armv6:
196 str lr, [sp, #-4]!
197
198 ldr lr, [a1, #12] /* lr = row[7,5] */
199 ldr ip, [a1, #4] /* ip = row[6,4] */
200 ldr a4, [a1, #8] /* a4 = row[3,1] */
201 ldr a3, [a1] /* a3 = row[2,0] */
202 orrs lr, lr, ip
203 cmpeq lr, a4
204 cmpeq lr, a3, lsr #16
205 beq 1f
206 str a2, [sp, #-4]!
207 ldr ip, [pc, #(w42-.-8)] /* ip = W4 | (W2 << 16) */
208 cmp lr, #0
209 beq 2f
210
211 idct_row ROW_SHIFT
212 b 3f
213
214 2: idct_row4 ROW_SHIFT
215
216 3: ldr a2, [sp], #4
217 idct_finish_shift ROW_SHIFT
218
219 strh v1, [a2]
220 strh v2, [a2, #(16*2)]
221 strh v3, [a2, #(16*4)]
222 strh v4, [a2, #(16*6)]
223 strh fp, [a2, #(16*1)]
224 strh v7, [a2, #(16*3)]
225 strh v6, [a2, #(16*5)]
226 strh v5, [a2, #(16*7)]
227
228 ldr pc, [sp], #4
229
230 1: mov a3, a3, lsl #3
231 strh a3, [a2]
232 strh a3, [a2, #(16*2)]
233 strh a3, [a2, #(16*4)]
234 strh a3, [a2, #(16*6)]
235 strh a3, [a2, #(16*1)]
236 strh a3, [a2, #(16*3)]
237 strh a3, [a2, #(16*5)]
238 strh a3, [a2, #(16*7)]
239 ldr pc, [sp], #4
240 .endfunc
241
242 /*
243 Compute IDCT of single column, read as row.
244 a1 = source
245 a2 = dest
246 */
247 .align
248 .func idct_col_armv6
249 idct_col_armv6:
250 stmfd sp!, {a2, lr}
251
252 ldr a3, [a1] /* a3 = row[2,0] */
253 ldr ip, [pc, #(w42-.-8)] /* ip = W4 | (W2 << 16) */
254 ldr a4, [a1, #8] /* a4 = row[3,1] */
255 idct_row COL_SHIFT
256 ldr a2, [sp], #4
257 idct_finish_shift COL_SHIFT
258
259 strh v1, [a2]
260 strh v2, [a2, #(16*1)]
261 strh v3, [a2, #(16*2)]
262 strh v4, [a2, #(16*3)]
263 strh fp, [a2, #(16*4)]
264 strh v7, [a2, #(16*5)]
265 strh v6, [a2, #(16*6)]
266 strh v5, [a2, #(16*7)]
267
268 ldr pc, [sp], #4
269 .endfunc
270
271 /*
272 Compute IDCT of single column, read as row, store saturated 8-bit.
273 a1 = source
274 a2 = dest
275 a3 = line size
276 */
277 .align
278 .func idct_col_put_armv6
279 idct_col_put_armv6:
280 stmfd sp!, {a2, a3, lr}
281
282 ldr a3, [a1] /* a3 = row[2,0] */
283 ldr ip, [pc, #(w42-.-8)] /* ip = W4 | (W2 << 16) */
284 ldr a4, [a1, #8] /* a4 = row[3,1] */
285 idct_row COL_SHIFT
286 ldmfd sp!, {a2, a3}
287 idct_finish_shift_sat COL_SHIFT
288
289 strb v1, [a2], a3
290 strb v2, [a2], a3
291 strb v3, [a2], a3
292 strb v4, [a2], a3
293 strb fp, [a2], a3
294 strb v7, [a2], a3
295 strb v6, [a2], a3
296 strb v5, [a2], a3
297
298 sub a2, a2, a3, lsl #3
299
300 ldr pc, [sp], #4
301 .endfunc
302
303 /*
304 Compute IDCT of single column, read as row, add/store saturated 8-bit.
305 a1 = source
306 a2 = dest
307 a3 = line size
308 */
309 .align
310 .func idct_col_add_armv6
311 idct_col_add_armv6:
312 stmfd sp!, {a2, a3, lr}
313
314 ldr a3, [a1] /* a3 = row[2,0] */
315 ldr ip, [pc, #(w42-.-8)] /* ip = W4 | (W2 << 16) */
316 ldr a4, [a1, #8] /* a4 = row[3,1] */
317 idct_row COL_SHIFT
318 ldmfd sp!, {a2, a3}
319 idct_finish
320
321 ldrb a4, [a2]
322 ldrb v4, [a2, a3]
323 ldrb fp, [a2, a3, lsl #2]
324 add ip, a4, ip, asr #COL_SHIFT
325 usat ip, #8, ip
326 add v1, v4, v1, asr #COL_SHIFT
327 strb ip, [a2], a3
328 ldrb ip, [a2, a3]
329 usat v1, #8, v1
330 ldrb fp, [a2, a3, lsl #2]
331 add v2, ip, v2, asr #COL_SHIFT
332 usat v2, #8, v2
333 strb v1, [a2], a3
334 ldrb a4, [a2, a3]
335 ldrb ip, [a2, a3, lsl #2]
336 strb v2, [a2], a3
337 ldrb v4, [a2, a3]
338 ldrb v1, [a2, a3, lsl #2]
339 add v3, a4, v3, asr #COL_SHIFT
340 usat v3, #8, v3
341 add v7, v4, v7, asr #COL_SHIFT
342 usat v7, #8, v7
343 add v6, fp, v6, asr #COL_SHIFT
344 usat v6, #8, v6
345 add v5, ip, v5, asr #COL_SHIFT
346 usat v5, #8, v5
347 add lr, v1, lr, asr #COL_SHIFT
348 usat lr, #8, lr
349 strb v3, [a2], a3
350 strb v7, [a2], a3
351 strb v6, [a2], a3
352 strb v5, [a2], a3
353 strb lr, [a2], a3
354
355 sub a2, a2, a3, lsl #3
356
357 ldr pc, [sp], #4
358 .endfunc
359
360 /*
361 Compute 8 IDCT row transforms.
362 func = IDCT row->col function
363 width = width of columns in bytes
364 */
365 .macro idct_rows func width
366 bl \func
367 add a1, a1, #(16*2)
368 add a2, a2, #\width
369 bl \func
370 add a1, a1, #(16*2)
371 add a2, a2, #\width
372 bl \func
373 add a1, a1, #(16*2)
374 add a2, a2, #\width
375 bl \func
376 sub a1, a1, #(16*5)
377 add a2, a2, #\width
378 bl \func
379 add a1, a1, #(16*2)
380 add a2, a2, #\width
381 bl \func
382 add a1, a1, #(16*2)
383 add a2, a2, #\width
384 bl \func
385 add a1, a1, #(16*2)
386 add a2, a2, #\width
387 bl \func
388
389 sub a1, a1, #(16*7)
390 .endm
391
392 .align
393 .global ff_simple_idct_armv6
394 .func ff_simple_idct_armv6
395 /* void ff_simple_idct_armv6(DCTELEM *data); */
396 ff_simple_idct_armv6:
397 stmfd sp!, {v1, v2, v3, v4, v5, v6, v7, fp, lr}
398 sub sp, sp, #128
399
400 mov a2, sp
401 idct_rows idct_row_armv6, 2
402 mov a2, a1
403 mov a1, sp
404 idct_rows idct_col_armv6, 2
405
406 add sp, sp, #128
407 ldmfd sp!, {v1, v2, v3, v4, v5, v6, v7, fp, pc}
408 .endfunc
409
410 .align
411 .global ff_simple_idct_add_armv6
412 .func ff_simple_idct_add_armv6
413 /* ff_simple_idct_add_armv6(uint8_t *dest, int line_size, DCTELEM *data); */
414 ff_simple_idct_add_armv6:
415 stmfd sp!, {a1, a2, v1, v2, v3, v4, v5, v6, v7, fp, lr}
416 sub sp, sp, #128
417
418 mov a1, a3
419 mov a2, sp
420 idct_rows idct_row_armv6, 2
421 mov a1, sp
422 ldr a2, [sp, #128]
423 ldr a3, [sp, #(128+4)]
424 idct_rows idct_col_add_armv6, 1
425
426 add sp, sp, #(128+8)
427 ldmfd sp!, {v1, v2, v3, v4, v5, v6, v7, fp, pc}
428 .endfunc
429
430 .align
431 .global ff_simple_idct_put_armv6
432 .func ff_simple_idct_put_armv6
433 /* ff_simple_idct_put_armv6(uint8_t *dest, int line_size, DCTELEM *data); */
434 ff_simple_idct_put_armv6:
435 stmfd sp!, {a1, a2, v1, v2, v3, v4, v5, v6, v7, fp, lr}
436 sub sp, sp, #128
437
438 mov a1, a3
439 mov a2, sp
440 idct_rows idct_row_armv6, 2
441 mov a1, sp
442 ldr a2, [sp, #128]
443 ldr a3, [sp, #(128+4)]
444 idct_rows idct_col_put_armv6, 1
445
446 add sp, sp, #(128+8)
447 ldmfd sp!, {v1, v2, v3, v4, v5, v6, v7, fp, pc}
448 .endfunc