Remove PPC perf counter support
[libav.git] / libavcodec / ppc / fdct_altivec.c
1 /*
2 * Copyright (C) 2003 James Klicman <james@klicman.org>
3 *
4 * This file is part of FFmpeg.
5 *
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21 #include "config.h"
22 #if HAVE_ALTIVEC_H
23 #include <altivec.h>
24 #endif
25 #include "libavutil/common.h"
26 #include "libavcodec/dsputil.h"
27 #include "dsputil_altivec.h"
28
29 #define vs16(v) ((vector signed short)(v))
30 #define vs32(v) ((vector signed int)(v))
31 #define vu8(v) ((vector unsigned char)(v))
32 #define vu16(v) ((vector unsigned short)(v))
33 #define vu32(v) ((vector unsigned int)(v))
34
35
36 #define C1 0.98078525066375732421875000 /* cos(1*PI/16) */
37 #define C2 0.92387950420379638671875000 /* cos(2*PI/16) */
38 #define C3 0.83146959543228149414062500 /* cos(3*PI/16) */
39 #define C4 0.70710676908493041992187500 /* cos(4*PI/16) */
40 #define C5 0.55557024478912353515625000 /* cos(5*PI/16) */
41 #define C6 0.38268342614173889160156250 /* cos(6*PI/16) */
42 #define C7 0.19509032368659973144531250 /* cos(7*PI/16) */
43 #define SQRT_2 1.41421353816986083984375000 /* sqrt(2) */
44
45
46 #define W0 -(2 * C2)
47 #define W1 (2 * C6)
48 #define W2 (SQRT_2 * C6)
49 #define W3 (SQRT_2 * C3)
50 #define W4 (SQRT_2 * (-C1 + C3 + C5 - C7))
51 #define W5 (SQRT_2 * ( C1 + C3 - C5 + C7))
52 #define W6 (SQRT_2 * ( C1 + C3 + C5 - C7))
53 #define W7 (SQRT_2 * ( C1 + C3 - C5 - C7))
54 #define W8 (SQRT_2 * ( C7 - C3))
55 #define W9 (SQRT_2 * (-C1 - C3))
56 #define WA (SQRT_2 * (-C3 - C5))
57 #define WB (SQRT_2 * ( C5 - C3))
58
59
60 static vector float fdctconsts[3] = {
61 { W0, W1, W2, W3 },
62 { W4, W5, W6, W7 },
63 { W8, W9, WA, WB }
64 };
65
66 #define LD_W0 vec_splat(cnsts0, 0)
67 #define LD_W1 vec_splat(cnsts0, 1)
68 #define LD_W2 vec_splat(cnsts0, 2)
69 #define LD_W3 vec_splat(cnsts0, 3)
70 #define LD_W4 vec_splat(cnsts1, 0)
71 #define LD_W5 vec_splat(cnsts1, 1)
72 #define LD_W6 vec_splat(cnsts1, 2)
73 #define LD_W7 vec_splat(cnsts1, 3)
74 #define LD_W8 vec_splat(cnsts2, 0)
75 #define LD_W9 vec_splat(cnsts2, 1)
76 #define LD_WA vec_splat(cnsts2, 2)
77 #define LD_WB vec_splat(cnsts2, 3)
78
79
80 #define FDCTROW(b0,b1,b2,b3,b4,b5,b6,b7) /* {{{ */ \
81 x0 = vec_add(b0, b7); /* x0 = b0 + b7; */ \
82 x7 = vec_sub(b0, b7); /* x7 = b0 - b7; */ \
83 x1 = vec_add(b1, b6); /* x1 = b1 + b6; */ \
84 x6 = vec_sub(b1, b6); /* x6 = b1 - b6; */ \
85 x2 = vec_add(b2, b5); /* x2 = b2 + b5; */ \
86 x5 = vec_sub(b2, b5); /* x5 = b2 - b5; */ \
87 x3 = vec_add(b3, b4); /* x3 = b3 + b4; */ \
88 x4 = vec_sub(b3, b4); /* x4 = b3 - b4; */ \
89 \
90 b7 = vec_add(x0, x3); /* b7 = x0 + x3; */ \
91 b1 = vec_add(x1, x2); /* b1 = x1 + x2; */ \
92 b0 = vec_add(b7, b1); /* b0 = b7 + b1; */ \
93 b4 = vec_sub(b7, b1); /* b4 = b7 - b1; */ \
94 \
95 b2 = vec_sub(x0, x3); /* b2 = x0 - x3; */ \
96 b6 = vec_sub(x1, x2); /* b6 = x1 - x2; */ \
97 b5 = vec_add(b6, b2); /* b5 = b6 + b2; */ \
98 cnst = LD_W2; \
99 b5 = vec_madd(cnst, b5, mzero); /* b5 = b5 * W2; */ \
100 cnst = LD_W1; \
101 b2 = vec_madd(cnst, b2, b5); /* b2 = b5 + b2 * W1; */ \
102 cnst = LD_W0; \
103 b6 = vec_madd(cnst, b6, b5); /* b6 = b5 + b6 * W0; */ \
104 \
105 x0 = vec_add(x4, x7); /* x0 = x4 + x7; */ \
106 x1 = vec_add(x5, x6); /* x1 = x5 + x6; */ \
107 x2 = vec_add(x4, x6); /* x2 = x4 + x6; */ \
108 x3 = vec_add(x5, x7); /* x3 = x5 + x7; */ \
109 x8 = vec_add(x2, x3); /* x8 = x2 + x3; */ \
110 cnst = LD_W3; \
111 x8 = vec_madd(cnst, x8, mzero); /* x8 = x8 * W3; */ \
112 \
113 cnst = LD_W8; \
114 x0 = vec_madd(cnst, x0, mzero); /* x0 *= W8; */ \
115 cnst = LD_W9; \
116 x1 = vec_madd(cnst, x1, mzero); /* x1 *= W9; */ \
117 cnst = LD_WA; \
118 x2 = vec_madd(cnst, x2, x8); /* x2 = x2 * WA + x8; */ \
119 cnst = LD_WB; \
120 x3 = vec_madd(cnst, x3, x8); /* x3 = x3 * WB + x8; */ \
121 \
122 cnst = LD_W4; \
123 b7 = vec_madd(cnst, x4, x0); /* b7 = x4 * W4 + x0; */ \
124 cnst = LD_W5; \
125 b5 = vec_madd(cnst, x5, x1); /* b5 = x5 * W5 + x1; */ \
126 cnst = LD_W6; \
127 b3 = vec_madd(cnst, x6, x1); /* b3 = x6 * W6 + x1; */ \
128 cnst = LD_W7; \
129 b1 = vec_madd(cnst, x7, x0); /* b1 = x7 * W7 + x0; */ \
130 \
131 b7 = vec_add(b7, x2); /* b7 = b7 + x2; */ \
132 b5 = vec_add(b5, x3); /* b5 = b5 + x3; */ \
133 b3 = vec_add(b3, x2); /* b3 = b3 + x2; */ \
134 b1 = vec_add(b1, x3); /* b1 = b1 + x3; */ \
135 /* }}} */
136
137 #define FDCTCOL(b0,b1,b2,b3,b4,b5,b6,b7) /* {{{ */ \
138 x0 = vec_add(b0, b7); /* x0 = b0 + b7; */ \
139 x7 = vec_sub(b0, b7); /* x7 = b0 - b7; */ \
140 x1 = vec_add(b1, b6); /* x1 = b1 + b6; */ \
141 x6 = vec_sub(b1, b6); /* x6 = b1 - b6; */ \
142 x2 = vec_add(b2, b5); /* x2 = b2 + b5; */ \
143 x5 = vec_sub(b2, b5); /* x5 = b2 - b5; */ \
144 x3 = vec_add(b3, b4); /* x3 = b3 + b4; */ \
145 x4 = vec_sub(b3, b4); /* x4 = b3 - b4; */ \
146 \
147 b7 = vec_add(x0, x3); /* b7 = x0 + x3; */ \
148 b1 = vec_add(x1, x2); /* b1 = x1 + x2; */ \
149 b0 = vec_add(b7, b1); /* b0 = b7 + b1; */ \
150 b4 = vec_sub(b7, b1); /* b4 = b7 - b1; */ \
151 \
152 b2 = vec_sub(x0, x3); /* b2 = x0 - x3; */ \
153 b6 = vec_sub(x1, x2); /* b6 = x1 - x2; */ \
154 b5 = vec_add(b6, b2); /* b5 = b6 + b2; */ \
155 cnst = LD_W2; \
156 b5 = vec_madd(cnst, b5, mzero); /* b5 = b5 * W2; */ \
157 cnst = LD_W1; \
158 b2 = vec_madd(cnst, b2, b5); /* b2 = b5 + b2 * W1; */ \
159 cnst = LD_W0; \
160 b6 = vec_madd(cnst, b6, b5); /* b6 = b5 + b6 * W0; */ \
161 \
162 x0 = vec_add(x4, x7); /* x0 = x4 + x7; */ \
163 x1 = vec_add(x5, x6); /* x1 = x5 + x6; */ \
164 x2 = vec_add(x4, x6); /* x2 = x4 + x6; */ \
165 x3 = vec_add(x5, x7); /* x3 = x5 + x7; */ \
166 x8 = vec_add(x2, x3); /* x8 = x2 + x3; */ \
167 cnst = LD_W3; \
168 x8 = vec_madd(cnst, x8, mzero); /* x8 = x8 * W3; */ \
169 \
170 cnst = LD_W8; \
171 x0 = vec_madd(cnst, x0, mzero); /* x0 *= W8; */ \
172 cnst = LD_W9; \
173 x1 = vec_madd(cnst, x1, mzero); /* x1 *= W9; */ \
174 cnst = LD_WA; \
175 x2 = vec_madd(cnst, x2, x8); /* x2 = x2 * WA + x8; */ \
176 cnst = LD_WB; \
177 x3 = vec_madd(cnst, x3, x8); /* x3 = x3 * WB + x8; */ \
178 \
179 cnst = LD_W4; \
180 b7 = vec_madd(cnst, x4, x0); /* b7 = x4 * W4 + x0; */ \
181 cnst = LD_W5; \
182 b5 = vec_madd(cnst, x5, x1); /* b5 = x5 * W5 + x1; */ \
183 cnst = LD_W6; \
184 b3 = vec_madd(cnst, x6, x1); /* b3 = x6 * W6 + x1; */ \
185 cnst = LD_W7; \
186 b1 = vec_madd(cnst, x7, x0); /* b1 = x7 * W7 + x0; */ \
187 \
188 b7 = vec_add(b7, x2); /* b7 += x2; */ \
189 b5 = vec_add(b5, x3); /* b5 += x3; */ \
190 b3 = vec_add(b3, x2); /* b3 += x2; */ \
191 b1 = vec_add(b1, x3); /* b1 += x3; */ \
192 /* }}} */
193
194
195
196 /* two dimensional discrete cosine transform */
197
198 void fdct_altivec(int16_t *block)
199 {
200 vector signed short *bp;
201 vector float *cp;
202 vector float b00, b10, b20, b30, b40, b50, b60, b70;
203 vector float b01, b11, b21, b31, b41, b51, b61, b71;
204 vector float mzero, cnst, cnsts0, cnsts1, cnsts2;
205 vector float x0, x1, x2, x3, x4, x5, x6, x7, x8;
206
207 /* setup constants {{{ */
208 /* mzero = -0.0 */
209 mzero = ((vector float)vec_splat_u32(-1));
210 mzero = ((vector float)vec_sl(vu32(mzero), vu32(mzero)));
211 cp = fdctconsts;
212 cnsts0 = vec_ld(0, cp); cp++;
213 cnsts1 = vec_ld(0, cp); cp++;
214 cnsts2 = vec_ld(0, cp);
215 /* }}} */
216
217
218 /* 8x8 matrix transpose (vector short[8]) {{{ */
219 #define MERGE_S16(hl,a,b) vec_merge##hl(vs16(a), vs16(b))
220
221 bp = (vector signed short*)block;
222 b00 = ((vector float)vec_ld(0, bp));
223 b40 = ((vector float)vec_ld(16*4, bp));
224 b01 = ((vector float)MERGE_S16(h, b00, b40));
225 b11 = ((vector float)MERGE_S16(l, b00, b40));
226 bp++;
227 b10 = ((vector float)vec_ld(0, bp));
228 b50 = ((vector float)vec_ld(16*4, bp));
229 b21 = ((vector float)MERGE_S16(h, b10, b50));
230 b31 = ((vector float)MERGE_S16(l, b10, b50));
231 bp++;
232 b20 = ((vector float)vec_ld(0, bp));
233 b60 = ((vector float)vec_ld(16*4, bp));
234 b41 = ((vector float)MERGE_S16(h, b20, b60));
235 b51 = ((vector float)MERGE_S16(l, b20, b60));
236 bp++;
237 b30 = ((vector float)vec_ld(0, bp));
238 b70 = ((vector float)vec_ld(16*4, bp));
239 b61 = ((vector float)MERGE_S16(h, b30, b70));
240 b71 = ((vector float)MERGE_S16(l, b30, b70));
241
242 x0 = ((vector float)MERGE_S16(h, b01, b41));
243 x1 = ((vector float)MERGE_S16(l, b01, b41));
244 x2 = ((vector float)MERGE_S16(h, b11, b51));
245 x3 = ((vector float)MERGE_S16(l, b11, b51));
246 x4 = ((vector float)MERGE_S16(h, b21, b61));
247 x5 = ((vector float)MERGE_S16(l, b21, b61));
248 x6 = ((vector float)MERGE_S16(h, b31, b71));
249 x7 = ((vector float)MERGE_S16(l, b31, b71));
250
251 b00 = ((vector float)MERGE_S16(h, x0, x4));
252 b10 = ((vector float)MERGE_S16(l, x0, x4));
253 b20 = ((vector float)MERGE_S16(h, x1, x5));
254 b30 = ((vector float)MERGE_S16(l, x1, x5));
255 b40 = ((vector float)MERGE_S16(h, x2, x6));
256 b50 = ((vector float)MERGE_S16(l, x2, x6));
257 b60 = ((vector float)MERGE_S16(h, x3, x7));
258 b70 = ((vector float)MERGE_S16(l, x3, x7));
259
260 #undef MERGE_S16
261 /* }}} */
262
263
264 /* Some of the initial calculations can be done as vector short before
265 * conversion to vector float. The following code section takes advantage
266 * of this.
267 */
268 #if 1
269 /* fdct rows {{{ */
270 x0 = ((vector float)vec_add(vs16(b00), vs16(b70)));
271 x7 = ((vector float)vec_sub(vs16(b00), vs16(b70)));
272 x1 = ((vector float)vec_add(vs16(b10), vs16(b60)));
273 x6 = ((vector float)vec_sub(vs16(b10), vs16(b60)));
274 x2 = ((vector float)vec_add(vs16(b20), vs16(b50)));
275 x5 = ((vector float)vec_sub(vs16(b20), vs16(b50)));
276 x3 = ((vector float)vec_add(vs16(b30), vs16(b40)));
277 x4 = ((vector float)vec_sub(vs16(b30), vs16(b40)));
278
279 b70 = ((vector float)vec_add(vs16(x0), vs16(x3)));
280 b10 = ((vector float)vec_add(vs16(x1), vs16(x2)));
281
282 b00 = ((vector float)vec_add(vs16(b70), vs16(b10)));
283 b40 = ((vector float)vec_sub(vs16(b70), vs16(b10)));
284
285 #define CTF0(n) \
286 b##n##1 = ((vector float)vec_unpackl(vs16(b##n##0))); \
287 b##n##0 = ((vector float)vec_unpackh(vs16(b##n##0))); \
288 b##n##1 = vec_ctf(vs32(b##n##1), 0); \
289 b##n##0 = vec_ctf(vs32(b##n##0), 0);
290
291 CTF0(0);
292 CTF0(4);
293
294 b20 = ((vector float)vec_sub(vs16(x0), vs16(x3)));
295 b60 = ((vector float)vec_sub(vs16(x1), vs16(x2)));
296
297 CTF0(2);
298 CTF0(6);
299
300 #undef CTF0
301
302 x0 = vec_add(b60, b20);
303 x1 = vec_add(b61, b21);
304
305 cnst = LD_W2;
306 x0 = vec_madd(cnst, x0, mzero);
307 x1 = vec_madd(cnst, x1, mzero);
308 cnst = LD_W1;
309 b20 = vec_madd(cnst, b20, x0);
310 b21 = vec_madd(cnst, b21, x1);
311 cnst = LD_W0;
312 b60 = vec_madd(cnst, b60, x0);
313 b61 = vec_madd(cnst, b61, x1);
314
315 #define CTFX(x,b) \
316 b##0 = ((vector float)vec_unpackh(vs16(x))); \
317 b##1 = ((vector float)vec_unpackl(vs16(x))); \
318 b##0 = vec_ctf(vs32(b##0), 0); \
319 b##1 = vec_ctf(vs32(b##1), 0); \
320
321 CTFX(x4, b7);
322 CTFX(x5, b5);
323 CTFX(x6, b3);
324 CTFX(x7, b1);
325
326 #undef CTFX
327
328
329 x0 = vec_add(b70, b10);
330 x1 = vec_add(b50, b30);
331 x2 = vec_add(b70, b30);
332 x3 = vec_add(b50, b10);
333 x8 = vec_add(x2, x3);
334 cnst = LD_W3;
335 x8 = vec_madd(cnst, x8, mzero);
336
337 cnst = LD_W8;
338 x0 = vec_madd(cnst, x0, mzero);
339 cnst = LD_W9;
340 x1 = vec_madd(cnst, x1, mzero);
341 cnst = LD_WA;
342 x2 = vec_madd(cnst, x2, x8);
343 cnst = LD_WB;
344 x3 = vec_madd(cnst, x3, x8);
345
346 cnst = LD_W4;
347 b70 = vec_madd(cnst, b70, x0);
348 cnst = LD_W5;
349 b50 = vec_madd(cnst, b50, x1);
350 cnst = LD_W6;
351 b30 = vec_madd(cnst, b30, x1);
352 cnst = LD_W7;
353 b10 = vec_madd(cnst, b10, x0);
354
355 b70 = vec_add(b70, x2);
356 b50 = vec_add(b50, x3);
357 b30 = vec_add(b30, x2);
358 b10 = vec_add(b10, x3);
359
360
361 x0 = vec_add(b71, b11);
362 x1 = vec_add(b51, b31);
363 x2 = vec_add(b71, b31);
364 x3 = vec_add(b51, b11);
365 x8 = vec_add(x2, x3);
366 cnst = LD_W3;
367 x8 = vec_madd(cnst, x8, mzero);
368
369 cnst = LD_W8;
370 x0 = vec_madd(cnst, x0, mzero);
371 cnst = LD_W9;
372 x1 = vec_madd(cnst, x1, mzero);
373 cnst = LD_WA;
374 x2 = vec_madd(cnst, x2, x8);
375 cnst = LD_WB;
376 x3 = vec_madd(cnst, x3, x8);
377
378 cnst = LD_W4;
379 b71 = vec_madd(cnst, b71, x0);
380 cnst = LD_W5;
381 b51 = vec_madd(cnst, b51, x1);
382 cnst = LD_W6;
383 b31 = vec_madd(cnst, b31, x1);
384 cnst = LD_W7;
385 b11 = vec_madd(cnst, b11, x0);
386
387 b71 = vec_add(b71, x2);
388 b51 = vec_add(b51, x3);
389 b31 = vec_add(b31, x2);
390 b11 = vec_add(b11, x3);
391 /* }}} */
392 #else
393 /* convert to float {{{ */
394 #define CTF(n) \
395 vs32(b##n##1) = vec_unpackl(vs16(b##n##0)); \
396 vs32(b##n##0) = vec_unpackh(vs16(b##n##0)); \
397 b##n##1 = vec_ctf(vs32(b##n##1), 0); \
398 b##n##0 = vec_ctf(vs32(b##n##0), 0); \
399
400 CTF(0);
401 CTF(1);
402 CTF(2);
403 CTF(3);
404 CTF(4);
405 CTF(5);
406 CTF(6);
407 CTF(7);
408
409 #undef CTF
410 /* }}} */
411
412 FDCTROW(b00, b10, b20, b30, b40, b50, b60, b70);
413 FDCTROW(b01, b11, b21, b31, b41, b51, b61, b71);
414 #endif
415
416
417 /* 8x8 matrix transpose (vector float[8][2]) {{{ */
418 x0 = vec_mergel(b00, b20);
419 x1 = vec_mergeh(b00, b20);
420 x2 = vec_mergel(b10, b30);
421 x3 = vec_mergeh(b10, b30);
422
423 b00 = vec_mergeh(x1, x3);
424 b10 = vec_mergel(x1, x3);
425 b20 = vec_mergeh(x0, x2);
426 b30 = vec_mergel(x0, x2);
427
428 x4 = vec_mergel(b41, b61);
429 x5 = vec_mergeh(b41, b61);
430 x6 = vec_mergel(b51, b71);
431 x7 = vec_mergeh(b51, b71);
432
433 b41 = vec_mergeh(x5, x7);
434 b51 = vec_mergel(x5, x7);
435 b61 = vec_mergeh(x4, x6);
436 b71 = vec_mergel(x4, x6);
437
438 x0 = vec_mergel(b01, b21);
439 x1 = vec_mergeh(b01, b21);
440 x2 = vec_mergel(b11, b31);
441 x3 = vec_mergeh(b11, b31);
442
443 x4 = vec_mergel(b40, b60);
444 x5 = vec_mergeh(b40, b60);
445 x6 = vec_mergel(b50, b70);
446 x7 = vec_mergeh(b50, b70);
447
448 b40 = vec_mergeh(x1, x3);
449 b50 = vec_mergel(x1, x3);
450 b60 = vec_mergeh(x0, x2);
451 b70 = vec_mergel(x0, x2);
452
453 b01 = vec_mergeh(x5, x7);
454 b11 = vec_mergel(x5, x7);
455 b21 = vec_mergeh(x4, x6);
456 b31 = vec_mergel(x4, x6);
457 /* }}} */
458
459
460 FDCTCOL(b00, b10, b20, b30, b40, b50, b60, b70);
461 FDCTCOL(b01, b11, b21, b31, b41, b51, b61, b71);
462
463
464 /* round, convert back to short {{{ */
465 #define CTS(n) \
466 b##n##0 = vec_round(b##n##0); \
467 b##n##1 = vec_round(b##n##1); \
468 b##n##0 = ((vector float)vec_cts(b##n##0, 0)); \
469 b##n##1 = ((vector float)vec_cts(b##n##1, 0)); \
470 b##n##0 = ((vector float)vec_pack(vs32(b##n##0), vs32(b##n##1))); \
471 vec_st(vs16(b##n##0), 0, bp);
472
473 bp = (vector signed short*)block;
474 CTS(0); bp++;
475 CTS(1); bp++;
476 CTS(2); bp++;
477 CTS(3); bp++;
478 CTS(4); bp++;
479 CTS(5); bp++;
480 CTS(6); bp++;
481 CTS(7);
482
483 #undef CTS
484 /* }}} */
485 }
486
487 /* vim:set foldmethod=marker foldlevel=0: */