optimize IDCT of rows with mostly zero coefficients
[libav.git] / libavcodec / armv4l / simple_idct_armv6.S
1 /*
2 * Simple IDCT
3 *
4 * Copyright (c) 2001 Michael Niedermayer <michaelni@gmx.at>
5 * Copyright (c) 2007 Mans Rullgard <mru@inprovide.com>
6 *
7 * This file is part of FFmpeg.
8 *
9 * FFmpeg is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
13 *
14 * FFmpeg is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with FFmpeg; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 */
23
24 #define W1 22725 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
25 #define W2 21407 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
26 #define W3 19266 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
27 #define W4 16383 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
28 #define W5 12873 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
29 #define W6 8867 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
30 #define W7 4520 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
31 #define ROW_SHIFT 11
32 #define COL_SHIFT 20
33
34 #define W13 (W1 | (W3 << 16))
35 #define W26 (W2 | (W6 << 16))
36 #define W42 (W4 | (W2 << 16))
37 #define W42n (-W4&0xffff | (-W2 << 16))
38 #define W46 (W4 | (W6 << 16))
39 #define W57 (W5 | (W7 << 16))
40
41 .text
42 .align
43 w13: .long W13
44 w26: .long W26
45 w42: .long W42
46 w42n: .long W42n
47 w46: .long W46
48 w57: .long W57
49
50 /*
51 Compute partial IDCT of single row.
52 shift = left-shift amount
53 a1 = source address
54
55 Output in registers v1--v8
56 */
57 .macro idct_row shift
58 ldr a3, [a1] /* a3 = row[2,0] */
59 ldr ip, [pc, #(w42-.-8)] /* ip = W4 | (W2 << 16) */
60 ldr lr, [pc, #(w46-.-8)] /* lr = W4 | (W6 << 16) */
61 ldr a4, [a1, #8] /* a4 = row[3,1] */
62 mov a2, #(1<<(\shift-1))
63 smlad v1, a3, ip, a2
64 smlsd v4, a3, ip, a2
65 ldr ip, [pc, #(w13-.-8)] /* ip = W1 | (W3 << 16) */
66 ldr v7, [pc, #(w57-.-8)] /* v7 = W5 | (W7 << 16) */
67 smlad v2, a3, lr, a2
68 smlsd v3, a3, lr, a2
69
70 smuad v5, a4, ip /* v5 = B0 = W1*row[1] + W3*row[3] */
71 smusdx fp, a4, v7 /* fp = B3 = W7*row[1] - W5*row[3] */
72 ldr lr, [a1, #12] /* lr = row[7,5] */
73 pkhtb a3, ip, v7, asr #16 /* a4 = W7 | (W3 << 16) */
74 pkhbt a2, ip, v7, lsl #16 /* a2 = W1 | (W5 << 16) */
75 smusdx v6, a3, a4 /* v6 = -B1 = W7*row[3] - W3*row[1] */
76 smlad v5, lr, v7, v5 /* B0 += W5*row[5] + W7*row[7] */
77 smusdx v7, a4, a2 /* v7 = B2 = W5*row[1] - W1*row[3] */
78
79 ldr a4, [pc, #(w42n-.-8)] /* a4 = -W4 | (-W2 << 16) */
80 smlad v7, lr, a3, v7 /* B2 += W7*row[5] + W3*row[7] */
81 ldr a3, [a1, #4] /* a3 = row[6,4] */
82 smlsdx fp, lr, ip, fp /* B3 += W3*row[5] - W1*row[7] */
83 ldr ip, [pc, #(w46-.-8)] /* ip = W4 | (W6 << 16) */
84 smlad v6, lr, a2, v6 /* B1 -= W1*row[5] + W5*row[7] */
85
86 smlad v2, a3, a4, v2 /* A1 += -W4*row[4] - W2*row[6] */
87 smlsd v3, a3, a4, v3 /* A2 += -W4*row[4] + W2*row[6] */
88 smlad v1, a3, ip, v1 /* A0 += W4*row[4] + W6*row[6] */
89 smlsd v4, a3, ip, v4 /* A3 += W4*row[4] - W6*row[6] */
90 .endm
91
92 /*
93 Compute partial IDCT of half row.
94 shift = left-shift amount
95 a3 = row[2,0]
96 a4 = row[3,1]
97
98 Output in registers v1--v8
99 */
100 .macro idct_row4 shift
101 ldr ip, [pc, #(w42-.-8)] /* ip = W4 | (W2 << 16) */
102 ldr lr, [pc, #(w46-.-8)] /* lr = W4 | (W6 << 16) */
103 ldr v7, [pc, #(w57-.-8)] /* v7 = W5 | (W7 << 16) */
104 mov a2, #(1<<(\shift-1))
105 smlad v1, a3, ip, a2
106 smlsd v4, a3, ip, a2
107 ldr ip, [pc, #(w13-.-8)] /* ip = W1 | (W3 << 16) */
108 smlad v2, a3, lr, a2
109 smlsd v3, a3, lr, a2
110 smusdx fp, a4, v7 /* fp = B3 = W7*row[1] - W5*row[3] */
111 smuad v5, a4, ip /* v5 = B0 = W1*row[1] + W3*row[3] */
112 pkhtb a3, ip, v7, asr #16 /* a4 = W7 | (W3 << 16) */
113 pkhbt a2, ip, v7, lsl #16 /* a2 = W1 | (W5 << 16) */
114 smusdx v6, a3, a4 /* v6 = -B1 = W7*row[3] - W3*row[1] */
115 smusdx v7, a4, a2 /* v7 = B2 = W5*row[1] - W1*row[3] */
116 .endm
117
118 /*
119 Compute final part of IDCT single row without shift.
120 Input in registers v1--v8
121 Output in registers ip, v1--v3, lr, v5--v7
122 */
123 .macro idct_finish
124 add ip, v1, v5 /* a2 = A0 + B0 */
125 sub lr, v1, v5 /* a3 = A0 - B0 */
126 sub v1, v2, v6 /* a3 = A1 + B1 */
127 add v5, v2, v6 /* a3 = A1 - B1 */
128 add v2, v3, v7 /* a2 = A2 + B2 */
129 sub v6, v3, v7 /* a2 = A2 - B2 */
130 add v3, v4, fp /* a3 = A3 + B3 */
131 sub v7, v4, fp /* a3 = A3 - B3 */
132 .endm
133
134 /*
135 Compute final part of IDCT single row.
136 shift = right-shift amount
137 Input/output in registers v1--v8
138 */
139 .macro idct_finish_shift shift
140 add a4, v1, v5 /* a4 = A0 + B0 */
141 sub a3, v1, v5 /* a3 = A0 - B0 */
142 mov v1, a4, asr #\shift
143 mov v5, a3, asr #\shift
144
145 sub a4, v2, v6 /* a4 = A1 + B1 */
146 add a3, v2, v6 /* a3 = A1 - B1 */
147 mov v2, a4, asr #\shift
148 mov v6, a3, asr #\shift
149
150 add a4, v3, v7 /* a4 = A2 + B2 */
151 sub a3, v3, v7 /* a3 = A2 - B2 */
152 mov v3, a4, asr #\shift
153 mov v7, a3, asr #\shift
154
155 add a4, v4, fp /* a4 = A3 + B3 */
156 sub a3, v4, fp /* a3 = A3 - B3 */
157 mov v4, a4, asr #\shift
158 mov fp, a3, asr #\shift
159 .endm
160
161 /*
162 Compute final part of IDCT single row, saturating results at 8 bits.
163 shift = right-shift amount
164 Input/output in registers v1--v8
165 */
166 .macro idct_finish_shift_sat shift
167 add a4, v1, v5 /* a4 = A0 + B0 */
168 sub ip, v1, v5 /* ip = A0 - B0 */
169 usat v1, #8, a4, asr #\shift
170 usat v5, #8, ip, asr #\shift
171
172 sub a4, v2, v6 /* a4 = A1 + B1 */
173 add ip, v2, v6 /* ip = A1 - B1 */
174 usat v2, #8, a4, asr #\shift
175 usat v6, #8, ip, asr #\shift
176
177 add a4, v3, v7 /* a4 = A2 + B2 */
178 sub ip, v3, v7 /* ip = A2 - B2 */
179 usat v3, #8, a4, asr #\shift
180 usat v7, #8, ip, asr #\shift
181
182 add a4, v4, fp /* a4 = A3 + B3 */
183 sub ip, v4, fp /* ip = A3 - B3 */
184 usat v4, #8, a4, asr #\shift
185 usat fp, #8, ip, asr #\shift
186 .endm
187
188 /*
189 Compute IDCT of single row, storing as column.
190 a1 = source
191 a2 = dest
192 */
193 .align
194 .func idct_row_armv6
195 idct_row_armv6:
196 str lr, [sp, #-4]!
197
198 ldr lr, [a1, #12] /* lr = row[7,5] */
199 ldr ip, [a1, #4] /* ip = row[6,4] */
200 ldr a4, [a1, #8] /* a4 = row[3,1] */
201 ldr a3, [a1] /* a3 = row[2,0] */
202 orrs lr, lr, ip
203 cmpeq lr, a4
204 cmpeq lr, a3, lsr #16
205 beq 1f
206 str a2, [sp, #-4]!
207 cmp lr, #0
208 beq 2f
209
210 idct_row ROW_SHIFT
211 b 3f
212
213 2: idct_row4 ROW_SHIFT
214
215 3: ldr a2, [sp], #4
216 idct_finish_shift ROW_SHIFT
217
218 strh v1, [a2]
219 strh v2, [a2, #(16*2)]
220 strh v3, [a2, #(16*4)]
221 strh v4, [a2, #(16*6)]
222 strh fp, [a2, #(16*1)]
223 strh v7, [a2, #(16*3)]
224 strh v6, [a2, #(16*5)]
225 strh v5, [a2, #(16*7)]
226
227 ldr pc, [sp], #4
228
229 1: mov a3, a3, lsl #3
230 strh a3, [a2]
231 strh a3, [a2, #(16*2)]
232 strh a3, [a2, #(16*4)]
233 strh a3, [a2, #(16*6)]
234 strh a3, [a2, #(16*1)]
235 strh a3, [a2, #(16*3)]
236 strh a3, [a2, #(16*5)]
237 strh a3, [a2, #(16*7)]
238 ldr pc, [sp], #4
239 .endfunc
240
241 /*
242 Compute IDCT of single column, read as row.
243 a1 = source
244 a2 = dest
245 */
246 .align
247 .func idct_col_armv6
248 idct_col_armv6:
249 stmfd sp!, {a2, lr}
250
251 idct_row COL_SHIFT
252 ldr a2, [sp], #4
253 idct_finish_shift COL_SHIFT
254
255 strh v1, [a2]
256 strh v2, [a2, #(16*1)]
257 strh v3, [a2, #(16*2)]
258 strh v4, [a2, #(16*3)]
259 strh fp, [a2, #(16*4)]
260 strh v7, [a2, #(16*5)]
261 strh v6, [a2, #(16*6)]
262 strh v5, [a2, #(16*7)]
263
264 ldr pc, [sp], #4
265 .endfunc
266
267 /*
268 Compute IDCT of single column, read as row, store saturated 8-bit.
269 a1 = source
270 a2 = dest
271 a3 = line size
272 */
273 .align
274 .func idct_col_put_armv6
275 idct_col_put_armv6:
276 stmfd sp!, {a2, a3, lr}
277
278 idct_row COL_SHIFT
279 ldmfd sp!, {a2, a3}
280 idct_finish_shift_sat COL_SHIFT
281
282 strb v1, [a2], a3
283 strb v2, [a2], a3
284 strb v3, [a2], a3
285 strb v4, [a2], a3
286 strb fp, [a2], a3
287 strb v7, [a2], a3
288 strb v6, [a2], a3
289 strb v5, [a2], a3
290
291 sub a2, a2, a3, lsl #3
292
293 ldr pc, [sp], #4
294 .endfunc
295
296 /*
297 Compute IDCT of single column, read as row, add/store saturated 8-bit.
298 a1 = source
299 a2 = dest
300 a3 = line size
301 */
302 .align
303 .func idct_col_add_armv6
304 idct_col_add_armv6:
305 stmfd sp!, {a2, a3, lr}
306
307 idct_row COL_SHIFT
308 ldmfd sp!, {a2, a3}
309 idct_finish
310
311 ldrb a4, [a2]
312 ldrb v4, [a2, a3]
313 ldrb fp, [a2, a3, lsl #2]
314 add ip, a4, ip, asr #COL_SHIFT
315 usat ip, #8, ip
316 add v1, v4, v1, asr #COL_SHIFT
317 strb ip, [a2], a3
318 ldrb ip, [a2, a3]
319 usat v1, #8, v1
320 ldrb fp, [a2, a3, lsl #2]
321 add v2, ip, v2, asr #COL_SHIFT
322 usat v2, #8, v2
323 strb v1, [a2], a3
324 ldrb a4, [a2, a3]
325 ldrb ip, [a2, a3, lsl #2]
326 strb v2, [a2], a3
327 ldrb v4, [a2, a3]
328 ldrb v1, [a2, a3, lsl #2]
329 add v3, a4, v3, asr #COL_SHIFT
330 usat v3, #8, v3
331 add v7, v4, v7, asr #COL_SHIFT
332 usat v7, #8, v7
333 add v6, fp, v6, asr #COL_SHIFT
334 usat v6, #8, v6
335 add v5, ip, v5, asr #COL_SHIFT
336 usat v5, #8, v5
337 add lr, v1, lr, asr #COL_SHIFT
338 usat lr, #8, lr
339 strb v3, [a2], a3
340 strb v7, [a2], a3
341 strb v6, [a2], a3
342 strb v5, [a2], a3
343 strb lr, [a2], a3
344
345 sub a2, a2, a3, lsl #3
346
347 ldr pc, [sp], #4
348 .endfunc
349
350 /*
351 Compute 8 IDCT row transforms.
352 func = IDCT row->col function
353 width = width of columns in bytes
354 */
355 .macro idct_rows func width
356 bl \func
357 add a1, a1, #(16*2)
358 add a2, a2, #\width
359 bl \func
360 add a1, a1, #(16*2)
361 add a2, a2, #\width
362 bl \func
363 add a1, a1, #(16*2)
364 add a2, a2, #\width
365 bl \func
366 sub a1, a1, #(16*5)
367 add a2, a2, #\width
368 bl \func
369 add a1, a1, #(16*2)
370 add a2, a2, #\width
371 bl \func
372 add a1, a1, #(16*2)
373 add a2, a2, #\width
374 bl \func
375 add a1, a1, #(16*2)
376 add a2, a2, #\width
377 bl \func
378
379 sub a1, a1, #(16*7)
380 .endm
381
382 .align
383 .global ff_simple_idct_armv6
384 .func ff_simple_idct_armv6
385 /* void ff_simple_idct_armv6(DCTELEM *data); */
386 ff_simple_idct_armv6:
387 stmfd sp!, {v1, v2, v3, v4, v5, v6, v7, fp, lr}
388 sub sp, sp, #128
389
390 mov a2, sp
391 idct_rows idct_row_armv6, 2
392 mov a2, a1
393 mov a1, sp
394 idct_rows idct_col_armv6, 2
395
396 add sp, sp, #128
397 ldmfd sp!, {v1, v2, v3, v4, v5, v6, v7, fp, pc}
398 .endfunc
399
400 .align
401 .global ff_simple_idct_add_armv6
402 .func ff_simple_idct_add_armv6
403 /* ff_simple_idct_add_armv6(uint8_t *dest, int line_size, DCTELEM *data); */
404 ff_simple_idct_add_armv6:
405 stmfd sp!, {a1, a2, v1, v2, v3, v4, v5, v6, v7, fp, lr}
406 sub sp, sp, #128
407
408 mov a1, a3
409 mov a2, sp
410 idct_rows idct_row_armv6, 2
411 mov a1, sp
412 ldr a2, [sp, #128]
413 ldr a3, [sp, #(128+4)]
414 idct_rows idct_col_add_armv6, 1
415
416 add sp, sp, #(128+8)
417 ldmfd sp!, {v1, v2, v3, v4, v5, v6, v7, fp, pc}
418 .endfunc
419
420 .align
421 .global ff_simple_idct_put_armv6
422 .func ff_simple_idct_put_armv6
423 /* ff_simple_idct_put_armv6(uint8_t *dest, int line_size, DCTELEM *data); */
424 ff_simple_idct_put_armv6:
425 stmfd sp!, {a1, a2, v1, v2, v3, v4, v5, v6, v7, fp, lr}
426 sub sp, sp, #128
427
428 mov a1, a3
429 mov a2, sp
430 idct_rows idct_row_armv6, 2
431 mov a1, sp
432 ldr a2, [sp, #128]
433 ldr a3, [sp, #(128+4)]
434 idct_rows idct_col_put_armv6, 1
435
436 add sp, sp, #(128+8)
437 ldmfd sp!, {v1, v2, v3, v4, v5, v6, v7, fp, pc}
438 .endfunc