8f3bc2694784cbe4f97e13e976a0c296226c04a1
[libav.git] / libavcodec / ppc / fdct_altivec.c
1 /*
2 * Copyright (C) 2003 James Klicman <james@klicman.org>
3 *
4 * This file is part of FFmpeg.
5 *
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21 #include "config.h"
22 #if HAVE_ALTIVEC_H
23 #include <altivec.h>
24 #endif
25 #include "libavutil/common.h"
26 #include "libavcodec/dsputil.h"
27 #include "dsputil_ppc.h"
28 #include "dsputil_altivec.h"
29
30 #define vs16(v) ((vector signed short)(v))
31 #define vs32(v) ((vector signed int)(v))
32 #define vu8(v) ((vector unsigned char)(v))
33 #define vu16(v) ((vector unsigned short)(v))
34 #define vu32(v) ((vector unsigned int)(v))
35
36
37 #define C1 0.98078525066375732421875000 /* cos(1*PI/16) */
38 #define C2 0.92387950420379638671875000 /* cos(2*PI/16) */
39 #define C3 0.83146959543228149414062500 /* cos(3*PI/16) */
40 #define C4 0.70710676908493041992187500 /* cos(4*PI/16) */
41 #define C5 0.55557024478912353515625000 /* cos(5*PI/16) */
42 #define C6 0.38268342614173889160156250 /* cos(6*PI/16) */
43 #define C7 0.19509032368659973144531250 /* cos(7*PI/16) */
44 #define SQRT_2 1.41421353816986083984375000 /* sqrt(2) */
45
46
47 #define W0 -(2 * C2)
48 #define W1 (2 * C6)
49 #define W2 (SQRT_2 * C6)
50 #define W3 (SQRT_2 * C3)
51 #define W4 (SQRT_2 * (-C1 + C3 + C5 - C7))
52 #define W5 (SQRT_2 * ( C1 + C3 - C5 + C7))
53 #define W6 (SQRT_2 * ( C1 + C3 + C5 - C7))
54 #define W7 (SQRT_2 * ( C1 + C3 - C5 - C7))
55 #define W8 (SQRT_2 * ( C7 - C3))
56 #define W9 (SQRT_2 * (-C1 - C3))
57 #define WA (SQRT_2 * (-C3 - C5))
58 #define WB (SQRT_2 * ( C5 - C3))
59
60
61 static vector float fdctconsts[3] = {
62 { W0, W1, W2, W3 },
63 { W4, W5, W6, W7 },
64 { W8, W9, WA, WB }
65 };
66
67 #define LD_W0 vec_splat(cnsts0, 0)
68 #define LD_W1 vec_splat(cnsts0, 1)
69 #define LD_W2 vec_splat(cnsts0, 2)
70 #define LD_W3 vec_splat(cnsts0, 3)
71 #define LD_W4 vec_splat(cnsts1, 0)
72 #define LD_W5 vec_splat(cnsts1, 1)
73 #define LD_W6 vec_splat(cnsts1, 2)
74 #define LD_W7 vec_splat(cnsts1, 3)
75 #define LD_W8 vec_splat(cnsts2, 0)
76 #define LD_W9 vec_splat(cnsts2, 1)
77 #define LD_WA vec_splat(cnsts2, 2)
78 #define LD_WB vec_splat(cnsts2, 3)
79
80
81 #define FDCTROW(b0,b1,b2,b3,b4,b5,b6,b7) /* {{{ */ \
82 x0 = vec_add(b0, b7); /* x0 = b0 + b7; */ \
83 x7 = vec_sub(b0, b7); /* x7 = b0 - b7; */ \
84 x1 = vec_add(b1, b6); /* x1 = b1 + b6; */ \
85 x6 = vec_sub(b1, b6); /* x6 = b1 - b6; */ \
86 x2 = vec_add(b2, b5); /* x2 = b2 + b5; */ \
87 x5 = vec_sub(b2, b5); /* x5 = b2 - b5; */ \
88 x3 = vec_add(b3, b4); /* x3 = b3 + b4; */ \
89 x4 = vec_sub(b3, b4); /* x4 = b3 - b4; */ \
90 \
91 b7 = vec_add(x0, x3); /* b7 = x0 + x3; */ \
92 b1 = vec_add(x1, x2); /* b1 = x1 + x2; */ \
93 b0 = vec_add(b7, b1); /* b0 = b7 + b1; */ \
94 b4 = vec_sub(b7, b1); /* b4 = b7 - b1; */ \
95 \
96 b2 = vec_sub(x0, x3); /* b2 = x0 - x3; */ \
97 b6 = vec_sub(x1, x2); /* b6 = x1 - x2; */ \
98 b5 = vec_add(b6, b2); /* b5 = b6 + b2; */ \
99 cnst = LD_W2; \
100 b5 = vec_madd(cnst, b5, mzero); /* b5 = b5 * W2; */ \
101 cnst = LD_W1; \
102 b2 = vec_madd(cnst, b2, b5); /* b2 = b5 + b2 * W1; */ \
103 cnst = LD_W0; \
104 b6 = vec_madd(cnst, b6, b5); /* b6 = b5 + b6 * W0; */ \
105 \
106 x0 = vec_add(x4, x7); /* x0 = x4 + x7; */ \
107 x1 = vec_add(x5, x6); /* x1 = x5 + x6; */ \
108 x2 = vec_add(x4, x6); /* x2 = x4 + x6; */ \
109 x3 = vec_add(x5, x7); /* x3 = x5 + x7; */ \
110 x8 = vec_add(x2, x3); /* x8 = x2 + x3; */ \
111 cnst = LD_W3; \
112 x8 = vec_madd(cnst, x8, mzero); /* x8 = x8 * W3; */ \
113 \
114 cnst = LD_W8; \
115 x0 = vec_madd(cnst, x0, mzero); /* x0 *= W8; */ \
116 cnst = LD_W9; \
117 x1 = vec_madd(cnst, x1, mzero); /* x1 *= W9; */ \
118 cnst = LD_WA; \
119 x2 = vec_madd(cnst, x2, x8); /* x2 = x2 * WA + x8; */ \
120 cnst = LD_WB; \
121 x3 = vec_madd(cnst, x3, x8); /* x3 = x3 * WB + x8; */ \
122 \
123 cnst = LD_W4; \
124 b7 = vec_madd(cnst, x4, x0); /* b7 = x4 * W4 + x0; */ \
125 cnst = LD_W5; \
126 b5 = vec_madd(cnst, x5, x1); /* b5 = x5 * W5 + x1; */ \
127 cnst = LD_W6; \
128 b3 = vec_madd(cnst, x6, x1); /* b3 = x6 * W6 + x1; */ \
129 cnst = LD_W7; \
130 b1 = vec_madd(cnst, x7, x0); /* b1 = x7 * W7 + x0; */ \
131 \
132 b7 = vec_add(b7, x2); /* b7 = b7 + x2; */ \
133 b5 = vec_add(b5, x3); /* b5 = b5 + x3; */ \
134 b3 = vec_add(b3, x2); /* b3 = b3 + x2; */ \
135 b1 = vec_add(b1, x3); /* b1 = b1 + x3; */ \
136 /* }}} */
137
138 #define FDCTCOL(b0,b1,b2,b3,b4,b5,b6,b7) /* {{{ */ \
139 x0 = vec_add(b0, b7); /* x0 = b0 + b7; */ \
140 x7 = vec_sub(b0, b7); /* x7 = b0 - b7; */ \
141 x1 = vec_add(b1, b6); /* x1 = b1 + b6; */ \
142 x6 = vec_sub(b1, b6); /* x6 = b1 - b6; */ \
143 x2 = vec_add(b2, b5); /* x2 = b2 + b5; */ \
144 x5 = vec_sub(b2, b5); /* x5 = b2 - b5; */ \
145 x3 = vec_add(b3, b4); /* x3 = b3 + b4; */ \
146 x4 = vec_sub(b3, b4); /* x4 = b3 - b4; */ \
147 \
148 b7 = vec_add(x0, x3); /* b7 = x0 + x3; */ \
149 b1 = vec_add(x1, x2); /* b1 = x1 + x2; */ \
150 b0 = vec_add(b7, b1); /* b0 = b7 + b1; */ \
151 b4 = vec_sub(b7, b1); /* b4 = b7 - b1; */ \
152 \
153 b2 = vec_sub(x0, x3); /* b2 = x0 - x3; */ \
154 b6 = vec_sub(x1, x2); /* b6 = x1 - x2; */ \
155 b5 = vec_add(b6, b2); /* b5 = b6 + b2; */ \
156 cnst = LD_W2; \
157 b5 = vec_madd(cnst, b5, mzero); /* b5 = b5 * W2; */ \
158 cnst = LD_W1; \
159 b2 = vec_madd(cnst, b2, b5); /* b2 = b5 + b2 * W1; */ \
160 cnst = LD_W0; \
161 b6 = vec_madd(cnst, b6, b5); /* b6 = b5 + b6 * W0; */ \
162 \
163 x0 = vec_add(x4, x7); /* x0 = x4 + x7; */ \
164 x1 = vec_add(x5, x6); /* x1 = x5 + x6; */ \
165 x2 = vec_add(x4, x6); /* x2 = x4 + x6; */ \
166 x3 = vec_add(x5, x7); /* x3 = x5 + x7; */ \
167 x8 = vec_add(x2, x3); /* x8 = x2 + x3; */ \
168 cnst = LD_W3; \
169 x8 = vec_madd(cnst, x8, mzero); /* x8 = x8 * W3; */ \
170 \
171 cnst = LD_W8; \
172 x0 = vec_madd(cnst, x0, mzero); /* x0 *= W8; */ \
173 cnst = LD_W9; \
174 x1 = vec_madd(cnst, x1, mzero); /* x1 *= W9; */ \
175 cnst = LD_WA; \
176 x2 = vec_madd(cnst, x2, x8); /* x2 = x2 * WA + x8; */ \
177 cnst = LD_WB; \
178 x3 = vec_madd(cnst, x3, x8); /* x3 = x3 * WB + x8; */ \
179 \
180 cnst = LD_W4; \
181 b7 = vec_madd(cnst, x4, x0); /* b7 = x4 * W4 + x0; */ \
182 cnst = LD_W5; \
183 b5 = vec_madd(cnst, x5, x1); /* b5 = x5 * W5 + x1; */ \
184 cnst = LD_W6; \
185 b3 = vec_madd(cnst, x6, x1); /* b3 = x6 * W6 + x1; */ \
186 cnst = LD_W7; \
187 b1 = vec_madd(cnst, x7, x0); /* b1 = x7 * W7 + x0; */ \
188 \
189 b7 = vec_add(b7, x2); /* b7 += x2; */ \
190 b5 = vec_add(b5, x3); /* b5 += x3; */ \
191 b3 = vec_add(b3, x2); /* b3 += x2; */ \
192 b1 = vec_add(b1, x3); /* b1 += x3; */ \
193 /* }}} */
194
195
196
197 /* two dimensional discrete cosine transform */
198
199 void fdct_altivec(int16_t *block)
200 {
201 POWERPC_PERF_DECLARE(altivec_fdct, 1);
202 vector signed short *bp;
203 vector float *cp;
204 vector float b00, b10, b20, b30, b40, b50, b60, b70;
205 vector float b01, b11, b21, b31, b41, b51, b61, b71;
206 vector float mzero, cnst, cnsts0, cnsts1, cnsts2;
207 vector float x0, x1, x2, x3, x4, x5, x6, x7, x8;
208
209 POWERPC_PERF_START_COUNT(altivec_fdct, 1);
210
211
212 /* setup constants {{{ */
213 /* mzero = -0.0 */
214 mzero = ((vector float)vec_splat_u32(-1));
215 mzero = ((vector float)vec_sl(vu32(mzero), vu32(mzero)));
216 cp = fdctconsts;
217 cnsts0 = vec_ld(0, cp); cp++;
218 cnsts1 = vec_ld(0, cp); cp++;
219 cnsts2 = vec_ld(0, cp);
220 /* }}} */
221
222
223 /* 8x8 matrix transpose (vector short[8]) {{{ */
224 #define MERGE_S16(hl,a,b) vec_merge##hl(vs16(a), vs16(b))
225
226 bp = (vector signed short*)block;
227 b00 = ((vector float)vec_ld(0, bp));
228 b40 = ((vector float)vec_ld(16*4, bp));
229 b01 = ((vector float)MERGE_S16(h, b00, b40));
230 b11 = ((vector float)MERGE_S16(l, b00, b40));
231 bp++;
232 b10 = ((vector float)vec_ld(0, bp));
233 b50 = ((vector float)vec_ld(16*4, bp));
234 b21 = ((vector float)MERGE_S16(h, b10, b50));
235 b31 = ((vector float)MERGE_S16(l, b10, b50));
236 bp++;
237 b20 = ((vector float)vec_ld(0, bp));
238 b60 = ((vector float)vec_ld(16*4, bp));
239 b41 = ((vector float)MERGE_S16(h, b20, b60));
240 b51 = ((vector float)MERGE_S16(l, b20, b60));
241 bp++;
242 b30 = ((vector float)vec_ld(0, bp));
243 b70 = ((vector float)vec_ld(16*4, bp));
244 b61 = ((vector float)MERGE_S16(h, b30, b70));
245 b71 = ((vector float)MERGE_S16(l, b30, b70));
246
247 x0 = ((vector float)MERGE_S16(h, b01, b41));
248 x1 = ((vector float)MERGE_S16(l, b01, b41));
249 x2 = ((vector float)MERGE_S16(h, b11, b51));
250 x3 = ((vector float)MERGE_S16(l, b11, b51));
251 x4 = ((vector float)MERGE_S16(h, b21, b61));
252 x5 = ((vector float)MERGE_S16(l, b21, b61));
253 x6 = ((vector float)MERGE_S16(h, b31, b71));
254 x7 = ((vector float)MERGE_S16(l, b31, b71));
255
256 b00 = ((vector float)MERGE_S16(h, x0, x4));
257 b10 = ((vector float)MERGE_S16(l, x0, x4));
258 b20 = ((vector float)MERGE_S16(h, x1, x5));
259 b30 = ((vector float)MERGE_S16(l, x1, x5));
260 b40 = ((vector float)MERGE_S16(h, x2, x6));
261 b50 = ((vector float)MERGE_S16(l, x2, x6));
262 b60 = ((vector float)MERGE_S16(h, x3, x7));
263 b70 = ((vector float)MERGE_S16(l, x3, x7));
264
265 #undef MERGE_S16
266 /* }}} */
267
268
269 /* Some of the initial calculations can be done as vector short before
270 * conversion to vector float. The following code section takes advantage
271 * of this.
272 */
273 #if 1
274 /* fdct rows {{{ */
275 x0 = ((vector float)vec_add(vs16(b00), vs16(b70)));
276 x7 = ((vector float)vec_sub(vs16(b00), vs16(b70)));
277 x1 = ((vector float)vec_add(vs16(b10), vs16(b60)));
278 x6 = ((vector float)vec_sub(vs16(b10), vs16(b60)));
279 x2 = ((vector float)vec_add(vs16(b20), vs16(b50)));
280 x5 = ((vector float)vec_sub(vs16(b20), vs16(b50)));
281 x3 = ((vector float)vec_add(vs16(b30), vs16(b40)));
282 x4 = ((vector float)vec_sub(vs16(b30), vs16(b40)));
283
284 b70 = ((vector float)vec_add(vs16(x0), vs16(x3)));
285 b10 = ((vector float)vec_add(vs16(x1), vs16(x2)));
286
287 b00 = ((vector float)vec_add(vs16(b70), vs16(b10)));
288 b40 = ((vector float)vec_sub(vs16(b70), vs16(b10)));
289
290 #define CTF0(n) \
291 b##n##1 = ((vector float)vec_unpackl(vs16(b##n##0))); \
292 b##n##0 = ((vector float)vec_unpackh(vs16(b##n##0))); \
293 b##n##1 = vec_ctf(vs32(b##n##1), 0); \
294 b##n##0 = vec_ctf(vs32(b##n##0), 0);
295
296 CTF0(0);
297 CTF0(4);
298
299 b20 = ((vector float)vec_sub(vs16(x0), vs16(x3)));
300 b60 = ((vector float)vec_sub(vs16(x1), vs16(x2)));
301
302 CTF0(2);
303 CTF0(6);
304
305 #undef CTF0
306
307 x0 = vec_add(b60, b20);
308 x1 = vec_add(b61, b21);
309
310 cnst = LD_W2;
311 x0 = vec_madd(cnst, x0, mzero);
312 x1 = vec_madd(cnst, x1, mzero);
313 cnst = LD_W1;
314 b20 = vec_madd(cnst, b20, x0);
315 b21 = vec_madd(cnst, b21, x1);
316 cnst = LD_W0;
317 b60 = vec_madd(cnst, b60, x0);
318 b61 = vec_madd(cnst, b61, x1);
319
320 #define CTFX(x,b) \
321 b##0 = ((vector float)vec_unpackh(vs16(x))); \
322 b##1 = ((vector float)vec_unpackl(vs16(x))); \
323 b##0 = vec_ctf(vs32(b##0), 0); \
324 b##1 = vec_ctf(vs32(b##1), 0); \
325
326 CTFX(x4, b7);
327 CTFX(x5, b5);
328 CTFX(x6, b3);
329 CTFX(x7, b1);
330
331 #undef CTFX
332
333
334 x0 = vec_add(b70, b10);
335 x1 = vec_add(b50, b30);
336 x2 = vec_add(b70, b30);
337 x3 = vec_add(b50, b10);
338 x8 = vec_add(x2, x3);
339 cnst = LD_W3;
340 x8 = vec_madd(cnst, x8, mzero);
341
342 cnst = LD_W8;
343 x0 = vec_madd(cnst, x0, mzero);
344 cnst = LD_W9;
345 x1 = vec_madd(cnst, x1, mzero);
346 cnst = LD_WA;
347 x2 = vec_madd(cnst, x2, x8);
348 cnst = LD_WB;
349 x3 = vec_madd(cnst, x3, x8);
350
351 cnst = LD_W4;
352 b70 = vec_madd(cnst, b70, x0);
353 cnst = LD_W5;
354 b50 = vec_madd(cnst, b50, x1);
355 cnst = LD_W6;
356 b30 = vec_madd(cnst, b30, x1);
357 cnst = LD_W7;
358 b10 = vec_madd(cnst, b10, x0);
359
360 b70 = vec_add(b70, x2);
361 b50 = vec_add(b50, x3);
362 b30 = vec_add(b30, x2);
363 b10 = vec_add(b10, x3);
364
365
366 x0 = vec_add(b71, b11);
367 x1 = vec_add(b51, b31);
368 x2 = vec_add(b71, b31);
369 x3 = vec_add(b51, b11);
370 x8 = vec_add(x2, x3);
371 cnst = LD_W3;
372 x8 = vec_madd(cnst, x8, mzero);
373
374 cnst = LD_W8;
375 x0 = vec_madd(cnst, x0, mzero);
376 cnst = LD_W9;
377 x1 = vec_madd(cnst, x1, mzero);
378 cnst = LD_WA;
379 x2 = vec_madd(cnst, x2, x8);
380 cnst = LD_WB;
381 x3 = vec_madd(cnst, x3, x8);
382
383 cnst = LD_W4;
384 b71 = vec_madd(cnst, b71, x0);
385 cnst = LD_W5;
386 b51 = vec_madd(cnst, b51, x1);
387 cnst = LD_W6;
388 b31 = vec_madd(cnst, b31, x1);
389 cnst = LD_W7;
390 b11 = vec_madd(cnst, b11, x0);
391
392 b71 = vec_add(b71, x2);
393 b51 = vec_add(b51, x3);
394 b31 = vec_add(b31, x2);
395 b11 = vec_add(b11, x3);
396 /* }}} */
397 #else
398 /* convert to float {{{ */
399 #define CTF(n) \
400 vs32(b##n##1) = vec_unpackl(vs16(b##n##0)); \
401 vs32(b##n##0) = vec_unpackh(vs16(b##n##0)); \
402 b##n##1 = vec_ctf(vs32(b##n##1), 0); \
403 b##n##0 = vec_ctf(vs32(b##n##0), 0); \
404
405 CTF(0);
406 CTF(1);
407 CTF(2);
408 CTF(3);
409 CTF(4);
410 CTF(5);
411 CTF(6);
412 CTF(7);
413
414 #undef CTF
415 /* }}} */
416
417 FDCTROW(b00, b10, b20, b30, b40, b50, b60, b70);
418 FDCTROW(b01, b11, b21, b31, b41, b51, b61, b71);
419 #endif
420
421
422 /* 8x8 matrix transpose (vector float[8][2]) {{{ */
423 x0 = vec_mergel(b00, b20);
424 x1 = vec_mergeh(b00, b20);
425 x2 = vec_mergel(b10, b30);
426 x3 = vec_mergeh(b10, b30);
427
428 b00 = vec_mergeh(x1, x3);
429 b10 = vec_mergel(x1, x3);
430 b20 = vec_mergeh(x0, x2);
431 b30 = vec_mergel(x0, x2);
432
433 x4 = vec_mergel(b41, b61);
434 x5 = vec_mergeh(b41, b61);
435 x6 = vec_mergel(b51, b71);
436 x7 = vec_mergeh(b51, b71);
437
438 b41 = vec_mergeh(x5, x7);
439 b51 = vec_mergel(x5, x7);
440 b61 = vec_mergeh(x4, x6);
441 b71 = vec_mergel(x4, x6);
442
443 x0 = vec_mergel(b01, b21);
444 x1 = vec_mergeh(b01, b21);
445 x2 = vec_mergel(b11, b31);
446 x3 = vec_mergeh(b11, b31);
447
448 x4 = vec_mergel(b40, b60);
449 x5 = vec_mergeh(b40, b60);
450 x6 = vec_mergel(b50, b70);
451 x7 = vec_mergeh(b50, b70);
452
453 b40 = vec_mergeh(x1, x3);
454 b50 = vec_mergel(x1, x3);
455 b60 = vec_mergeh(x0, x2);
456 b70 = vec_mergel(x0, x2);
457
458 b01 = vec_mergeh(x5, x7);
459 b11 = vec_mergel(x5, x7);
460 b21 = vec_mergeh(x4, x6);
461 b31 = vec_mergel(x4, x6);
462 /* }}} */
463
464
465 FDCTCOL(b00, b10, b20, b30, b40, b50, b60, b70);
466 FDCTCOL(b01, b11, b21, b31, b41, b51, b61, b71);
467
468
469 /* round, convert back to short {{{ */
470 #define CTS(n) \
471 b##n##0 = vec_round(b##n##0); \
472 b##n##1 = vec_round(b##n##1); \
473 b##n##0 = ((vector float)vec_cts(b##n##0, 0)); \
474 b##n##1 = ((vector float)vec_cts(b##n##1, 0)); \
475 b##n##0 = ((vector float)vec_pack(vs32(b##n##0), vs32(b##n##1))); \
476 vec_st(vs16(b##n##0), 0, bp);
477
478 bp = (vector signed short*)block;
479 CTS(0); bp++;
480 CTS(1); bp++;
481 CTS(2); bp++;
482 CTS(3); bp++;
483 CTS(4); bp++;
484 CTS(5); bp++;
485 CTS(6); bp++;
486 CTS(7);
487
488 #undef CTS
489 /* }}} */
490
491 POWERPC_PERF_STOP_COUNT(altivec_fdct, 1);
492 }
493
494 /* vim:set foldmethod=marker foldlevel=0: */