835d8af3ddd268eded91655647b0bc632d71137c
[libav.git] / libavcodec / ppc / dsputil_altivec.c
1 /*
2 * Copyright (c) 2002 Brian Foley
3 * Copyright (c) 2002 Dieter Shirley
4 * Copyright (c) 2003-2004 Romain Dolbeau <romain@dolbeau.org>
5 *
6 * This file is part of FFmpeg.
7 *
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #include "dsputil.h"
24
25 #include "gcc_fixes.h"
26
27 #include "dsputil_altivec.h"
28
29 #ifdef SYS_DARWIN
30 #include <sys/sysctl.h>
31 #elif __AMIGAOS4__
32 #include <exec/exec.h>
33 #include <interfaces/exec.h>
34 #include <proto/exec.h>
35 #else
36 #include <signal.h>
37 #include <setjmp.h>
38
39 static sigjmp_buf jmpbuf;
40 static volatile sig_atomic_t canjump = 0;
41
42 static void sigill_handler (int sig)
43 {
44 if (!canjump) {
45 signal (sig, SIG_DFL);
46 raise (sig);
47 }
48
49 canjump = 0;
50 siglongjmp (jmpbuf, 1);
51 }
52 #endif /* SYS_DARWIN */
53
54 int sad16_x2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
55 {
56 int i;
57 DECLARE_ALIGNED_16(int, s);
58 const_vector unsigned char zero = (const_vector unsigned char)vec_splat_u8(0);
59 vector unsigned char *tv;
60 vector unsigned char pix1v, pix2v, pix2iv, avgv, t5;
61 vector unsigned int sad;
62 vector signed int sumdiffs;
63
64 s = 0;
65 sad = (vector unsigned int)vec_splat_u32(0);
66 for(i=0;i<h;i++) {
67 /*
68 Read unaligned pixels into our vectors. The vectors are as follows:
69 pix1v: pix1[0]-pix1[15]
70 pix2v: pix2[0]-pix2[15] pix2iv: pix2[1]-pix2[16]
71 */
72 tv = (vector unsigned char *) pix1;
73 pix1v = vec_perm(tv[0], tv[1], vec_lvsl(0, pix1));
74
75 tv = (vector unsigned char *) &pix2[0];
76 pix2v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[0]));
77
78 tv = (vector unsigned char *) &pix2[1];
79 pix2iv = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[1]));
80
81 /* Calculate the average vector */
82 avgv = vec_avg(pix2v, pix2iv);
83
84 /* Calculate a sum of abs differences vector */
85 t5 = vec_sub(vec_max(pix1v, avgv), vec_min(pix1v, avgv));
86
87 /* Add each 4 pixel group together and put 4 results into sad */
88 sad = vec_sum4s(t5, sad);
89
90 pix1 += line_size;
91 pix2 += line_size;
92 }
93 /* Sum up the four partial sums, and put the result into s */
94 sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
95 sumdiffs = vec_splat(sumdiffs, 3);
96 vec_ste(sumdiffs, 0, &s);
97
98 return s;
99 }
100
101 int sad16_y2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
102 {
103 int i;
104 DECLARE_ALIGNED_16(int, s);
105 const_vector unsigned char zero = (const_vector unsigned char)vec_splat_u8(0);
106 vector unsigned char *tv;
107 vector unsigned char pix1v, pix2v, pix3v, avgv, t5;
108 vector unsigned int sad;
109 vector signed int sumdiffs;
110 uint8_t *pix3 = pix2 + line_size;
111
112 s = 0;
113 sad = (vector unsigned int)vec_splat_u32(0);
114
115 /*
116 Due to the fact that pix3 = pix2 + line_size, the pix3 of one
117 iteration becomes pix2 in the next iteration. We can use this
118 fact to avoid a potentially expensive unaligned read, each
119 time around the loop.
120 Read unaligned pixels into our vectors. The vectors are as follows:
121 pix2v: pix2[0]-pix2[15]
122 Split the pixel vectors into shorts
123 */
124 tv = (vector unsigned char *) &pix2[0];
125 pix2v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[0]));
126
127 for(i=0;i<h;i++) {
128 /*
129 Read unaligned pixels into our vectors. The vectors are as follows:
130 pix1v: pix1[0]-pix1[15]
131 pix3v: pix3[0]-pix3[15]
132 */
133 tv = (vector unsigned char *) pix1;
134 pix1v = vec_perm(tv[0], tv[1], vec_lvsl(0, pix1));
135
136 tv = (vector unsigned char *) &pix3[0];
137 pix3v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix3[0]));
138
139 /* Calculate the average vector */
140 avgv = vec_avg(pix2v, pix3v);
141
142 /* Calculate a sum of abs differences vector */
143 t5 = vec_sub(vec_max(pix1v, avgv), vec_min(pix1v, avgv));
144
145 /* Add each 4 pixel group together and put 4 results into sad */
146 sad = vec_sum4s(t5, sad);
147
148 pix1 += line_size;
149 pix2v = pix3v;
150 pix3 += line_size;
151
152 }
153
154 /* Sum up the four partial sums, and put the result into s */
155 sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
156 sumdiffs = vec_splat(sumdiffs, 3);
157 vec_ste(sumdiffs, 0, &s);
158 return s;
159 }
160
161 int sad16_xy2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
162 {
163 int i;
164 DECLARE_ALIGNED_16(int, s);
165 uint8_t *pix3 = pix2 + line_size;
166 const_vector unsigned char zero = (const_vector unsigned char)vec_splat_u8(0);
167 const_vector unsigned short two = (const_vector unsigned short)vec_splat_u16(2);
168 vector unsigned char *tv, avgv, t5;
169 vector unsigned char pix1v, pix2v, pix3v, pix2iv, pix3iv;
170 vector unsigned short pix2lv, pix2hv, pix2ilv, pix2ihv;
171 vector unsigned short pix3lv, pix3hv, pix3ilv, pix3ihv;
172 vector unsigned short avghv, avglv;
173 vector unsigned short t1, t2, t3, t4;
174 vector unsigned int sad;
175 vector signed int sumdiffs;
176
177 sad = (vector unsigned int)vec_splat_u32(0);
178
179 s = 0;
180
181 /*
182 Due to the fact that pix3 = pix2 + line_size, the pix3 of one
183 iteration becomes pix2 in the next iteration. We can use this
184 fact to avoid a potentially expensive unaligned read, as well
185 as some splitting, and vector addition each time around the loop.
186 Read unaligned pixels into our vectors. The vectors are as follows:
187 pix2v: pix2[0]-pix2[15] pix2iv: pix2[1]-pix2[16]
188 Split the pixel vectors into shorts
189 */
190 tv = (vector unsigned char *) &pix2[0];
191 pix2v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[0]));
192
193 tv = (vector unsigned char *) &pix2[1];
194 pix2iv = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[1]));
195
196 pix2hv = (vector unsigned short) vec_mergeh(zero, pix2v);
197 pix2lv = (vector unsigned short) vec_mergel(zero, pix2v);
198 pix2ihv = (vector unsigned short) vec_mergeh(zero, pix2iv);
199 pix2ilv = (vector unsigned short) vec_mergel(zero, pix2iv);
200 t1 = vec_add(pix2hv, pix2ihv);
201 t2 = vec_add(pix2lv, pix2ilv);
202
203 for(i=0;i<h;i++) {
204 /*
205 Read unaligned pixels into our vectors. The vectors are as follows:
206 pix1v: pix1[0]-pix1[15]
207 pix3v: pix3[0]-pix3[15] pix3iv: pix3[1]-pix3[16]
208 */
209 tv = (vector unsigned char *) pix1;
210 pix1v = vec_perm(tv[0], tv[1], vec_lvsl(0, pix1));
211
212 tv = (vector unsigned char *) &pix3[0];
213 pix3v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix3[0]));
214
215 tv = (vector unsigned char *) &pix3[1];
216 pix3iv = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix3[1]));
217
218 /*
219 Note that Altivec does have vec_avg, but this works on vector pairs
220 and rounds up. We could do avg(avg(a,b),avg(c,d)), but the rounding
221 would mean that, for example, avg(3,0,0,1) = 2, when it should be 1.
222 Instead, we have to split the pixel vectors into vectors of shorts,
223 and do the averaging by hand.
224 */
225
226 /* Split the pixel vectors into shorts */
227 pix3hv = (vector unsigned short) vec_mergeh(zero, pix3v);
228 pix3lv = (vector unsigned short) vec_mergel(zero, pix3v);
229 pix3ihv = (vector unsigned short) vec_mergeh(zero, pix3iv);
230 pix3ilv = (vector unsigned short) vec_mergel(zero, pix3iv);
231
232 /* Do the averaging on them */
233 t3 = vec_add(pix3hv, pix3ihv);
234 t4 = vec_add(pix3lv, pix3ilv);
235
236 avghv = vec_sr(vec_add(vec_add(t1, t3), two), two);
237 avglv = vec_sr(vec_add(vec_add(t2, t4), two), two);
238
239 /* Pack the shorts back into a result */
240 avgv = vec_pack(avghv, avglv);
241
242 /* Calculate a sum of abs differences vector */
243 t5 = vec_sub(vec_max(pix1v, avgv), vec_min(pix1v, avgv));
244
245 /* Add each 4 pixel group together and put 4 results into sad */
246 sad = vec_sum4s(t5, sad);
247
248 pix1 += line_size;
249 pix3 += line_size;
250 /* Transfer the calculated values for pix3 into pix2 */
251 t1 = t3;
252 t2 = t4;
253 }
254 /* Sum up the four partial sums, and put the result into s */
255 sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
256 sumdiffs = vec_splat(sumdiffs, 3);
257 vec_ste(sumdiffs, 0, &s);
258
259 return s;
260 }
261
262 int sad16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
263 {
264 int i;
265 DECLARE_ALIGNED_16(int, s);
266 const_vector unsigned int zero = (const_vector unsigned int)vec_splat_u32(0);
267 vector unsigned char perm1, perm2, *pix1v, *pix2v;
268 vector unsigned char t1, t2, t3,t4, t5;
269 vector unsigned int sad;
270 vector signed int sumdiffs;
271
272 sad = (vector unsigned int)vec_splat_u32(0);
273
274
275 for(i=0;i<h;i++) {
276 /* Read potentially unaligned pixels into t1 and t2 */
277 perm1 = vec_lvsl(0, pix1);
278 pix1v = (vector unsigned char *) pix1;
279 perm2 = vec_lvsl(0, pix2);
280 pix2v = (vector unsigned char *) pix2;
281 t1 = vec_perm(pix1v[0], pix1v[1], perm1);
282 t2 = vec_perm(pix2v[0], pix2v[1], perm2);
283
284 /* Calculate a sum of abs differences vector */
285 t3 = vec_max(t1, t2);
286 t4 = vec_min(t1, t2);
287 t5 = vec_sub(t3, t4);
288
289 /* Add each 4 pixel group together and put 4 results into sad */
290 sad = vec_sum4s(t5, sad);
291
292 pix1 += line_size;
293 pix2 += line_size;
294 }
295
296 /* Sum up the four partial sums, and put the result into s */
297 sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
298 sumdiffs = vec_splat(sumdiffs, 3);
299 vec_ste(sumdiffs, 0, &s);
300
301 return s;
302 }
303
304 int sad8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
305 {
306 int i;
307 DECLARE_ALIGNED_16(int, s);
308 const_vector unsigned int zero = (const_vector unsigned int)vec_splat_u32(0);
309 vector unsigned char perm1, perm2, permclear, *pix1v, *pix2v;
310 vector unsigned char t1, t2, t3,t4, t5;
311 vector unsigned int sad;
312 vector signed int sumdiffs;
313
314 sad = (vector unsigned int)vec_splat_u32(0);
315
316 permclear = (vector unsigned char)AVV(255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0);
317
318 for(i=0;i<h;i++) {
319 /* Read potentially unaligned pixels into t1 and t2
320 Since we're reading 16 pixels, and actually only want 8,
321 mask out the last 8 pixels. The 0s don't change the sum. */
322 perm1 = vec_lvsl(0, pix1);
323 pix1v = (vector unsigned char *) pix1;
324 perm2 = vec_lvsl(0, pix2);
325 pix2v = (vector unsigned char *) pix2;
326 t1 = vec_and(vec_perm(pix1v[0], pix1v[1], perm1), permclear);
327 t2 = vec_and(vec_perm(pix2v[0], pix2v[1], perm2), permclear);
328
329 /* Calculate a sum of abs differences vector */
330 t3 = vec_max(t1, t2);
331 t4 = vec_min(t1, t2);
332 t5 = vec_sub(t3, t4);
333
334 /* Add each 4 pixel group together and put 4 results into sad */
335 sad = vec_sum4s(t5, sad);
336
337 pix1 += line_size;
338 pix2 += line_size;
339 }
340
341 /* Sum up the four partial sums, and put the result into s */
342 sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
343 sumdiffs = vec_splat(sumdiffs, 3);
344 vec_ste(sumdiffs, 0, &s);
345
346 return s;
347 }
348
349 int pix_norm1_altivec(uint8_t *pix, int line_size)
350 {
351 int i;
352 DECLARE_ALIGNED_16(int, s);
353 const_vector unsigned int zero = (const_vector unsigned int)vec_splat_u32(0);
354 vector unsigned char *tv;
355 vector unsigned char pixv;
356 vector unsigned int sv;
357 vector signed int sum;
358
359 sv = (vector unsigned int)vec_splat_u32(0);
360
361 s = 0;
362 for (i = 0; i < 16; i++) {
363 /* Read in the potentially unaligned pixels */
364 tv = (vector unsigned char *) pix;
365 pixv = vec_perm(tv[0], tv[1], vec_lvsl(0, pix));
366
367 /* Square the values, and add them to our sum */
368 sv = vec_msum(pixv, pixv, sv);
369
370 pix += line_size;
371 }
372 /* Sum up the four partial sums, and put the result into s */
373 sum = vec_sums((vector signed int) sv, (vector signed int) zero);
374 sum = vec_splat(sum, 3);
375 vec_ste(sum, 0, &s);
376
377 return s;
378 }
379
380 /**
381 * Sum of Squared Errors for a 8x8 block.
382 * AltiVec-enhanced.
383 * It's the sad8_altivec code above w/ squaring added.
384 */
385 int sse8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
386 {
387 int i;
388 DECLARE_ALIGNED_16(int, s);
389 const_vector unsigned int zero = (const_vector unsigned int)vec_splat_u32(0);
390 vector unsigned char perm1, perm2, permclear, *pix1v, *pix2v;
391 vector unsigned char t1, t2, t3,t4, t5;
392 vector unsigned int sum;
393 vector signed int sumsqr;
394
395 sum = (vector unsigned int)vec_splat_u32(0);
396
397 permclear = (vector unsigned char)AVV(255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0);
398
399
400 for(i=0;i<h;i++) {
401 /* Read potentially unaligned pixels into t1 and t2
402 Since we're reading 16 pixels, and actually only want 8,
403 mask out the last 8 pixels. The 0s don't change the sum. */
404 perm1 = vec_lvsl(0, pix1);
405 pix1v = (vector unsigned char *) pix1;
406 perm2 = vec_lvsl(0, pix2);
407 pix2v = (vector unsigned char *) pix2;
408 t1 = vec_and(vec_perm(pix1v[0], pix1v[1], perm1), permclear);
409 t2 = vec_and(vec_perm(pix2v[0], pix2v[1], perm2), permclear);
410
411 /*
412 Since we want to use unsigned chars, we can take advantage
413 of the fact that abs(a-b)^2 = (a-b)^2.
414 */
415
416 /* Calculate abs differences vector */
417 t3 = vec_max(t1, t2);
418 t4 = vec_min(t1, t2);
419 t5 = vec_sub(t3, t4);
420
421 /* Square the values and add them to our sum */
422 sum = vec_msum(t5, t5, sum);
423
424 pix1 += line_size;
425 pix2 += line_size;
426 }
427
428 /* Sum up the four partial sums, and put the result into s */
429 sumsqr = vec_sums((vector signed int) sum, (vector signed int) zero);
430 sumsqr = vec_splat(sumsqr, 3);
431 vec_ste(sumsqr, 0, &s);
432
433 return s;
434 }
435
436 /**
437 * Sum of Squared Errors for a 16x16 block.
438 * AltiVec-enhanced.
439 * It's the sad16_altivec code above w/ squaring added.
440 */
441 int sse16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
442 {
443 int i;
444 DECLARE_ALIGNED_16(int, s);
445 const_vector unsigned int zero = (const_vector unsigned int)vec_splat_u32(0);
446 vector unsigned char perm1, perm2, *pix1v, *pix2v;
447 vector unsigned char t1, t2, t3,t4, t5;
448 vector unsigned int sum;
449 vector signed int sumsqr;
450
451 sum = (vector unsigned int)vec_splat_u32(0);
452
453 for(i=0;i<h;i++) {
454 /* Read potentially unaligned pixels into t1 and t2 */
455 perm1 = vec_lvsl(0, pix1);
456 pix1v = (vector unsigned char *) pix1;
457 perm2 = vec_lvsl(0, pix2);
458 pix2v = (vector unsigned char *) pix2;
459 t1 = vec_perm(pix1v[0], pix1v[1], perm1);
460 t2 = vec_perm(pix2v[0], pix2v[1], perm2);
461
462 /*
463 Since we want to use unsigned chars, we can take advantage
464 of the fact that abs(a-b)^2 = (a-b)^2.
465 */
466
467 /* Calculate abs differences vector */
468 t3 = vec_max(t1, t2);
469 t4 = vec_min(t1, t2);
470 t5 = vec_sub(t3, t4);
471
472 /* Square the values and add them to our sum */
473 sum = vec_msum(t5, t5, sum);
474
475 pix1 += line_size;
476 pix2 += line_size;
477 }
478
479 /* Sum up the four partial sums, and put the result into s */
480 sumsqr = vec_sums((vector signed int) sum, (vector signed int) zero);
481 sumsqr = vec_splat(sumsqr, 3);
482 vec_ste(sumsqr, 0, &s);
483
484 return s;
485 }
486
487 int pix_sum_altivec(uint8_t * pix, int line_size)
488 {
489 const_vector unsigned int zero = (const_vector unsigned int)vec_splat_u32(0);
490 vector unsigned char perm, *pixv;
491 vector unsigned char t1;
492 vector unsigned int sad;
493 vector signed int sumdiffs;
494
495 int i;
496 DECLARE_ALIGNED_16(int, s);
497
498 sad = (vector unsigned int)vec_splat_u32(0);
499
500 for (i = 0; i < 16; i++) {
501 /* Read the potentially unaligned 16 pixels into t1 */
502 perm = vec_lvsl(0, pix);
503 pixv = (vector unsigned char *) pix;
504 t1 = vec_perm(pixv[0], pixv[1], perm);
505
506 /* Add each 4 pixel group together and put 4 results into sad */
507 sad = vec_sum4s(t1, sad);
508
509 pix += line_size;
510 }
511
512 /* Sum up the four partial sums, and put the result into s */
513 sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
514 sumdiffs = vec_splat(sumdiffs, 3);
515 vec_ste(sumdiffs, 0, &s);
516
517 return s;
518 }
519
520 void get_pixels_altivec(DCTELEM *restrict block, const uint8_t *pixels, int line_size)
521 {
522 int i;
523 vector unsigned char perm, bytes, *pixv;
524 const_vector unsigned char zero = (const_vector unsigned char)vec_splat_u8(0);
525 vector signed short shorts;
526
527 for(i=0;i<8;i++)
528 {
529 // Read potentially unaligned pixels.
530 // We're reading 16 pixels, and actually only want 8,
531 // but we simply ignore the extras.
532 perm = vec_lvsl(0, pixels);
533 pixv = (vector unsigned char *) pixels;
534 bytes = vec_perm(pixv[0], pixv[1], perm);
535
536 // convert the bytes into shorts
537 shorts = (vector signed short)vec_mergeh(zero, bytes);
538
539 // save the data to the block, we assume the block is 16-byte aligned
540 vec_st(shorts, i*16, (vector signed short*)block);
541
542 pixels += line_size;
543 }
544 }
545
546 void diff_pixels_altivec(DCTELEM *restrict block, const uint8_t *s1,
547 const uint8_t *s2, int stride)
548 {
549 int i;
550 vector unsigned char perm, bytes, *pixv;
551 const_vector unsigned char zero = (const_vector unsigned char)vec_splat_u8(0);
552 vector signed short shorts1, shorts2;
553
554 for(i=0;i<4;i++)
555 {
556 // Read potentially unaligned pixels
557 // We're reading 16 pixels, and actually only want 8,
558 // but we simply ignore the extras.
559 perm = vec_lvsl(0, s1);
560 pixv = (vector unsigned char *) s1;
561 bytes = vec_perm(pixv[0], pixv[1], perm);
562
563 // convert the bytes into shorts
564 shorts1 = (vector signed short)vec_mergeh(zero, bytes);
565
566 // Do the same for the second block of pixels
567 perm = vec_lvsl(0, s2);
568 pixv = (vector unsigned char *) s2;
569 bytes = vec_perm(pixv[0], pixv[1], perm);
570
571 // convert the bytes into shorts
572 shorts2 = (vector signed short)vec_mergeh(zero, bytes);
573
574 // Do the subtraction
575 shorts1 = vec_sub(shorts1, shorts2);
576
577 // save the data to the block, we assume the block is 16-byte aligned
578 vec_st(shorts1, 0, (vector signed short*)block);
579
580 s1 += stride;
581 s2 += stride;
582 block += 8;
583
584
585 // The code below is a copy of the code above... This is a manual
586 // unroll.
587
588 // Read potentially unaligned pixels
589 // We're reading 16 pixels, and actually only want 8,
590 // but we simply ignore the extras.
591 perm = vec_lvsl(0, s1);
592 pixv = (vector unsigned char *) s1;
593 bytes = vec_perm(pixv[0], pixv[1], perm);
594
595 // convert the bytes into shorts
596 shorts1 = (vector signed short)vec_mergeh(zero, bytes);
597
598 // Do the same for the second block of pixels
599 perm = vec_lvsl(0, s2);
600 pixv = (vector unsigned char *) s2;
601 bytes = vec_perm(pixv[0], pixv[1], perm);
602
603 // convert the bytes into shorts
604 shorts2 = (vector signed short)vec_mergeh(zero, bytes);
605
606 // Do the subtraction
607 shorts1 = vec_sub(shorts1, shorts2);
608
609 // save the data to the block, we assume the block is 16-byte aligned
610 vec_st(shorts1, 0, (vector signed short*)block);
611
612 s1 += stride;
613 s2 += stride;
614 block += 8;
615 }
616 }
617
618 void add_bytes_altivec(uint8_t *dst, uint8_t *src, int w) {
619 register int i;
620 register vector unsigned char vdst, vsrc;
621
622 /* dst and src are 16 bytes-aligned (guaranteed) */
623 for(i = 0 ; (i + 15) < w ; i+=16)
624 {
625 vdst = vec_ld(i, (unsigned char*)dst);
626 vsrc = vec_ld(i, (unsigned char*)src);
627 vdst = vec_add(vsrc, vdst);
628 vec_st(vdst, i, (unsigned char*)dst);
629 }
630 /* if w is not a multiple of 16 */
631 for (; (i < w) ; i++)
632 {
633 dst[i] = src[i];
634 }
635 }
636
637 /* next one assumes that ((line_size % 16) == 0) */
638 void put_pixels16_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
639 {
640 POWERPC_PERF_DECLARE(altivec_put_pixels16_num, 1);
641 register vector unsigned char pixelsv1, pixelsv2;
642 register vector unsigned char pixelsv1B, pixelsv2B;
643 register vector unsigned char pixelsv1C, pixelsv2C;
644 register vector unsigned char pixelsv1D, pixelsv2D;
645
646 register vector unsigned char perm = vec_lvsl(0, pixels);
647 int i;
648 register int line_size_2 = line_size << 1;
649 register int line_size_3 = line_size + line_size_2;
650 register int line_size_4 = line_size << 2;
651
652 POWERPC_PERF_START_COUNT(altivec_put_pixels16_num, 1);
653 // hand-unrolling the loop by 4 gains about 15%
654 // mininum execution time goes from 74 to 60 cycles
655 // it's faster than -funroll-loops, but using
656 // -funroll-loops w/ this is bad - 74 cycles again.
657 // all this is on a 7450, tuning for the 7450
658 #if 0
659 for(i=0; i<h; i++) {
660 pixelsv1 = vec_ld(0, (unsigned char*)pixels);
661 pixelsv2 = vec_ld(16, (unsigned char*)pixels);
662 vec_st(vec_perm(pixelsv1, pixelsv2, perm),
663 0, (unsigned char*)block);
664 pixels+=line_size;
665 block +=line_size;
666 }
667 #else
668 for(i=0; i<h; i+=4) {
669 pixelsv1 = vec_ld(0, (unsigned char*)pixels);
670 pixelsv2 = vec_ld(15, (unsigned char*)pixels);
671 pixelsv1B = vec_ld(line_size, (unsigned char*)pixels);
672 pixelsv2B = vec_ld(15 + line_size, (unsigned char*)pixels);
673 pixelsv1C = vec_ld(line_size_2, (unsigned char*)pixels);
674 pixelsv2C = vec_ld(15 + line_size_2, (unsigned char*)pixels);
675 pixelsv1D = vec_ld(line_size_3, (unsigned char*)pixels);
676 pixelsv2D = vec_ld(15 + line_size_3, (unsigned char*)pixels);
677 vec_st(vec_perm(pixelsv1, pixelsv2, perm),
678 0, (unsigned char*)block);
679 vec_st(vec_perm(pixelsv1B, pixelsv2B, perm),
680 line_size, (unsigned char*)block);
681 vec_st(vec_perm(pixelsv1C, pixelsv2C, perm),
682 line_size_2, (unsigned char*)block);
683 vec_st(vec_perm(pixelsv1D, pixelsv2D, perm),
684 line_size_3, (unsigned char*)block);
685 pixels+=line_size_4;
686 block +=line_size_4;
687 }
688 #endif
689 POWERPC_PERF_STOP_COUNT(altivec_put_pixels16_num, 1);
690 }
691
692 /* next one assumes that ((line_size % 16) == 0) */
693 #define op_avg(a,b) a = ( ((a)|(b)) - ((((a)^(b))&0xFEFEFEFEUL)>>1) )
694 void avg_pixels16_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
695 {
696 POWERPC_PERF_DECLARE(altivec_avg_pixels16_num, 1);
697 register vector unsigned char pixelsv1, pixelsv2, pixelsv, blockv;
698 register vector unsigned char perm = vec_lvsl(0, pixels);
699 int i;
700
701 POWERPC_PERF_START_COUNT(altivec_avg_pixels16_num, 1);
702
703 for(i=0; i<h; i++) {
704 pixelsv1 = vec_ld(0, (unsigned char*)pixels);
705 pixelsv2 = vec_ld(16, (unsigned char*)pixels);
706 blockv = vec_ld(0, block);
707 pixelsv = vec_perm(pixelsv1, pixelsv2, perm);
708 blockv = vec_avg(blockv,pixelsv);
709 vec_st(blockv, 0, (unsigned char*)block);
710 pixels+=line_size;
711 block +=line_size;
712 }
713
714 POWERPC_PERF_STOP_COUNT(altivec_avg_pixels16_num, 1);
715 }
716
717 /* next one assumes that ((line_size % 8) == 0) */
718 void avg_pixels8_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h)
719 {
720 POWERPC_PERF_DECLARE(altivec_avg_pixels8_num, 1);
721 register vector unsigned char pixelsv1, pixelsv2, pixelsv, blockv;
722 int i;
723
724 POWERPC_PERF_START_COUNT(altivec_avg_pixels8_num, 1);
725
726 for (i = 0; i < h; i++) {
727 /*
728 block is 8 bytes-aligned, so we're either in the
729 left block (16 bytes-aligned) or in the right block (not)
730 */
731 int rightside = ((unsigned long)block & 0x0000000F);
732
733 blockv = vec_ld(0, block);
734 pixelsv1 = vec_ld(0, (unsigned char*)pixels);
735 pixelsv2 = vec_ld(16, (unsigned char*)pixels);
736 pixelsv = vec_perm(pixelsv1, pixelsv2, vec_lvsl(0, pixels));
737
738 if (rightside)
739 {
740 pixelsv = vec_perm(blockv, pixelsv, vcprm(0,1,s0,s1));
741 }
742 else
743 {
744 pixelsv = vec_perm(blockv, pixelsv, vcprm(s0,s1,2,3));
745 }
746
747 blockv = vec_avg(blockv, pixelsv);
748
749 vec_st(blockv, 0, block);
750
751 pixels += line_size;
752 block += line_size;
753 }
754
755 POWERPC_PERF_STOP_COUNT(altivec_avg_pixels8_num, 1);
756 }
757
758 /* next one assumes that ((line_size % 8) == 0) */
759 void put_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
760 {
761 POWERPC_PERF_DECLARE(altivec_put_pixels8_xy2_num, 1);
762 register int i;
763 register vector unsigned char
764 pixelsv1, pixelsv2,
765 pixelsavg;
766 register vector unsigned char
767 blockv, temp1, temp2;
768 register vector unsigned short
769 pixelssum1, pixelssum2, temp3;
770 register const_vector unsigned char vczero = (const_vector unsigned char)vec_splat_u8(0);
771 register const_vector unsigned short vctwo = (const_vector unsigned short)vec_splat_u16(2);
772
773 temp1 = vec_ld(0, pixels);
774 temp2 = vec_ld(16, pixels);
775 pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
776 if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F)
777 {
778 pixelsv2 = temp2;
779 }
780 else
781 {
782 pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
783 }
784 pixelsv1 = vec_mergeh(vczero, pixelsv1);
785 pixelsv2 = vec_mergeh(vczero, pixelsv2);
786 pixelssum1 = vec_add((vector unsigned short)pixelsv1,
787 (vector unsigned short)pixelsv2);
788 pixelssum1 = vec_add(pixelssum1, vctwo);
789
790 POWERPC_PERF_START_COUNT(altivec_put_pixels8_xy2_num, 1);
791 for (i = 0; i < h ; i++) {
792 int rightside = ((unsigned long)block & 0x0000000F);
793 blockv = vec_ld(0, block);
794
795 temp1 = vec_ld(line_size, pixels);
796 temp2 = vec_ld(line_size + 16, pixels);
797 pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
798 if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F)
799 {
800 pixelsv2 = temp2;
801 }
802 else
803 {
804 pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
805 }
806
807 pixelsv1 = vec_mergeh(vczero, pixelsv1);
808 pixelsv2 = vec_mergeh(vczero, pixelsv2);
809 pixelssum2 = vec_add((vector unsigned short)pixelsv1,
810 (vector unsigned short)pixelsv2);
811 temp3 = vec_add(pixelssum1, pixelssum2);
812 temp3 = vec_sra(temp3, vctwo);
813 pixelssum1 = vec_add(pixelssum2, vctwo);
814 pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);
815
816 if (rightside)
817 {
818 blockv = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
819 }
820 else
821 {
822 blockv = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
823 }
824
825 vec_st(blockv, 0, block);
826
827 block += line_size;
828 pixels += line_size;
829 }
830
831 POWERPC_PERF_STOP_COUNT(altivec_put_pixels8_xy2_num, 1);
832 }
833
834 /* next one assumes that ((line_size % 8) == 0) */
835 void put_no_rnd_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
836 {
837 POWERPC_PERF_DECLARE(altivec_put_no_rnd_pixels8_xy2_num, 1);
838 register int i;
839 register vector unsigned char
840 pixelsv1, pixelsv2,
841 pixelsavg;
842 register vector unsigned char
843 blockv, temp1, temp2;
844 register vector unsigned short
845 pixelssum1, pixelssum2, temp3;
846 register const_vector unsigned char vczero = (const_vector unsigned char)vec_splat_u8(0);
847 register const_vector unsigned short vcone = (const_vector unsigned short)vec_splat_u16(1);
848 register const_vector unsigned short vctwo = (const_vector unsigned short)vec_splat_u16(2);
849
850 temp1 = vec_ld(0, pixels);
851 temp2 = vec_ld(16, pixels);
852 pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
853 if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F)
854 {
855 pixelsv2 = temp2;
856 }
857 else
858 {
859 pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
860 }
861 pixelsv1 = vec_mergeh(vczero, pixelsv1);
862 pixelsv2 = vec_mergeh(vczero, pixelsv2);
863 pixelssum1 = vec_add((vector unsigned short)pixelsv1,
864 (vector unsigned short)pixelsv2);
865 pixelssum1 = vec_add(pixelssum1, vcone);
866
867 POWERPC_PERF_START_COUNT(altivec_put_no_rnd_pixels8_xy2_num, 1);
868 for (i = 0; i < h ; i++) {
869 int rightside = ((unsigned long)block & 0x0000000F);
870 blockv = vec_ld(0, block);
871
872 temp1 = vec_ld(line_size, pixels);
873 temp2 = vec_ld(line_size + 16, pixels);
874 pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
875 if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F)
876 {
877 pixelsv2 = temp2;
878 }
879 else
880 {
881 pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
882 }
883
884 pixelsv1 = vec_mergeh(vczero, pixelsv1);
885 pixelsv2 = vec_mergeh(vczero, pixelsv2);
886 pixelssum2 = vec_add((vector unsigned short)pixelsv1,
887 (vector unsigned short)pixelsv2);
888 temp3 = vec_add(pixelssum1, pixelssum2);
889 temp3 = vec_sra(temp3, vctwo);
890 pixelssum1 = vec_add(pixelssum2, vcone);
891 pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);
892
893 if (rightside)
894 {
895 blockv = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
896 }
897 else
898 {
899 blockv = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
900 }
901
902 vec_st(blockv, 0, block);
903
904 block += line_size;
905 pixels += line_size;
906 }
907
908 POWERPC_PERF_STOP_COUNT(altivec_put_no_rnd_pixels8_xy2_num, 1);
909 }
910
911 /* next one assumes that ((line_size % 16) == 0) */
912 void put_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h)
913 {
914 POWERPC_PERF_DECLARE(altivec_put_pixels16_xy2_num, 1);
915 register int i;
916 register vector unsigned char
917 pixelsv1, pixelsv2, pixelsv3, pixelsv4;
918 register vector unsigned char
919 blockv, temp1, temp2;
920 register vector unsigned short
921 pixelssum1, pixelssum2, temp3,
922 pixelssum3, pixelssum4, temp4;
923 register const_vector unsigned char vczero = (const_vector unsigned char)vec_splat_u8(0);
924 register const_vector unsigned short vctwo = (const_vector unsigned short)vec_splat_u16(2);
925
926 POWERPC_PERF_START_COUNT(altivec_put_pixels16_xy2_num, 1);
927
928 temp1 = vec_ld(0, pixels);
929 temp2 = vec_ld(16, pixels);
930 pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
931 if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F)
932 {
933 pixelsv2 = temp2;
934 }
935 else
936 {
937 pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
938 }
939 pixelsv3 = vec_mergel(vczero, pixelsv1);
940 pixelsv4 = vec_mergel(vczero, pixelsv2);
941 pixelsv1 = vec_mergeh(vczero, pixelsv1);
942 pixelsv2 = vec_mergeh(vczero, pixelsv2);
943 pixelssum3 = vec_add((vector unsigned short)pixelsv3,
944 (vector unsigned short)pixelsv4);
945 pixelssum3 = vec_add(pixelssum3, vctwo);
946 pixelssum1 = vec_add((vector unsigned short)pixelsv1,
947 (vector unsigned short)pixelsv2);
948 pixelssum1 = vec_add(pixelssum1, vctwo);
949
950 for (i = 0; i < h ; i++) {
951 blockv = vec_ld(0, block);
952
953 temp1 = vec_ld(line_size, pixels);
954 temp2 = vec_ld(line_size + 16, pixels);
955 pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
956 if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F)
957 {
958 pixelsv2 = temp2;
959 }
960 else
961 {
962 pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
963 }
964
965 pixelsv3 = vec_mergel(vczero, pixelsv1);
966 pixelsv4 = vec_mergel(vczero, pixelsv2);
967 pixelsv1 = vec_mergeh(vczero, pixelsv1);
968 pixelsv2 = vec_mergeh(vczero, pixelsv2);
969
970 pixelssum4 = vec_add((vector unsigned short)pixelsv3,
971 (vector unsigned short)pixelsv4);
972 pixelssum2 = vec_add((vector unsigned short)pixelsv1,
973 (vector unsigned short)pixelsv2);
974 temp4 = vec_add(pixelssum3, pixelssum4);
975 temp4 = vec_sra(temp4, vctwo);
976 temp3 = vec_add(pixelssum1, pixelssum2);
977 temp3 = vec_sra(temp3, vctwo);
978
979 pixelssum3 = vec_add(pixelssum4, vctwo);
980 pixelssum1 = vec_add(pixelssum2, vctwo);
981
982 blockv = vec_packsu(temp3, temp4);
983
984 vec_st(blockv, 0, block);
985
986 block += line_size;
987 pixels += line_size;
988 }
989
990 POWERPC_PERF_STOP_COUNT(altivec_put_pixels16_xy2_num, 1);
991 }
992
993 /* next one assumes that ((line_size % 16) == 0) */
994 void put_no_rnd_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h)
995 {
996 POWERPC_PERF_DECLARE(altivec_put_no_rnd_pixels16_xy2_num, 1);
997 register int i;
998 register vector unsigned char
999 pixelsv1, pixelsv2, pixelsv3, pixelsv4;
1000 register vector unsigned char
1001 blockv, temp1, temp2;
1002 register vector unsigned short
1003 pixelssum1, pixelssum2, temp3,
1004 pixelssum3, pixelssum4, temp4;
1005 register const_vector unsigned char vczero = (const_vector unsigned char)vec_splat_u8(0);
1006 register const_vector unsigned short vcone = (const_vector unsigned short)vec_splat_u16(1);
1007 register const_vector unsigned short vctwo = (const_vector unsigned short)vec_splat_u16(2);
1008
1009 POWERPC_PERF_START_COUNT(altivec_put_no_rnd_pixels16_xy2_num, 1);
1010
1011 temp1 = vec_ld(0, pixels);
1012 temp2 = vec_ld(16, pixels);
1013 pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
1014 if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F)
1015 {
1016 pixelsv2 = temp2;
1017 }
1018 else
1019 {
1020 pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
1021 }
1022 pixelsv3 = vec_mergel(vczero, pixelsv1);
1023 pixelsv4 = vec_mergel(vczero, pixelsv2);
1024 pixelsv1 = vec_mergeh(vczero, pixelsv1);
1025 pixelsv2 = vec_mergeh(vczero, pixelsv2);
1026 pixelssum3 = vec_add((vector unsigned short)pixelsv3,
1027 (vector unsigned short)pixelsv4);
1028 pixelssum3 = vec_add(pixelssum3, vcone);
1029 pixelssum1 = vec_add((vector unsigned short)pixelsv1,
1030 (vector unsigned short)pixelsv2);
1031 pixelssum1 = vec_add(pixelssum1, vcone);
1032
1033 for (i = 0; i < h ; i++) {
1034 blockv = vec_ld(0, block);
1035
1036 temp1 = vec_ld(line_size, pixels);
1037 temp2 = vec_ld(line_size + 16, pixels);
1038 pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
1039 if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F)
1040 {
1041 pixelsv2 = temp2;
1042 }
1043 else
1044 {
1045 pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
1046 }
1047
1048 pixelsv3 = vec_mergel(vczero, pixelsv1);
1049 pixelsv4 = vec_mergel(vczero, pixelsv2);
1050 pixelsv1 = vec_mergeh(vczero, pixelsv1);
1051 pixelsv2 = vec_mergeh(vczero, pixelsv2);
1052
1053 pixelssum4 = vec_add((vector unsigned short)pixelsv3,
1054 (vector unsigned short)pixelsv4);
1055 pixelssum2 = vec_add((vector unsigned short)pixelsv1,
1056 (vector unsigned short)pixelsv2);
1057 temp4 = vec_add(pixelssum3, pixelssum4);
1058 temp4 = vec_sra(temp4, vctwo);
1059 temp3 = vec_add(pixelssum1, pixelssum2);
1060 temp3 = vec_sra(temp3, vctwo);
1061
1062 pixelssum3 = vec_add(pixelssum4, vcone);
1063 pixelssum1 = vec_add(pixelssum2, vcone);
1064
1065 blockv = vec_packsu(temp3, temp4);
1066
1067 vec_st(blockv, 0, block);
1068
1069 block += line_size;
1070 pixels += line_size;
1071 }
1072
1073 POWERPC_PERF_STOP_COUNT(altivec_put_no_rnd_pixels16_xy2_num, 1);
1074 }
1075
1076 int hadamard8_diff8x8_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h){
1077 POWERPC_PERF_DECLARE(altivec_hadamard8_diff8x8_num, 1);
1078 int sum;
1079 register const_vector unsigned char vzero =
1080 (const_vector unsigned char)vec_splat_u8(0);
1081 register vector signed short temp0, temp1, temp2, temp3, temp4,
1082 temp5, temp6, temp7;
1083 POWERPC_PERF_START_COUNT(altivec_hadamard8_diff8x8_num, 1);
1084 {
1085 register const_vector signed short vprod1 =(const_vector signed short)
1086 AVV( 1,-1, 1,-1, 1,-1, 1,-1);
1087 register const_vector signed short vprod2 =(const_vector signed short)
1088 AVV( 1, 1,-1,-1, 1, 1,-1,-1);
1089 register const_vector signed short vprod3 =(const_vector signed short)
1090 AVV( 1, 1, 1, 1,-1,-1,-1,-1);
1091 register const_vector unsigned char perm1 = (const_vector unsigned char)
1092 AVV(0x02, 0x03, 0x00, 0x01, 0x06, 0x07, 0x04, 0x05,
1093 0x0A, 0x0B, 0x08, 0x09, 0x0E, 0x0F, 0x0C, 0x0D);
1094 register const_vector unsigned char perm2 = (const_vector unsigned char)
1095 AVV(0x04, 0x05, 0x06, 0x07, 0x00, 0x01, 0x02, 0x03,
1096 0x0C, 0x0D, 0x0E, 0x0F, 0x08, 0x09, 0x0A, 0x0B);
1097 register const_vector unsigned char perm3 = (const_vector unsigned char)
1098 AVV(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
1099 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
1100
1101 #define ONEITERBUTTERFLY(i, res) \
1102 { \
1103 register vector unsigned char src1, src2, srcO; \
1104 register vector unsigned char dst1, dst2, dstO; \
1105 register vector signed short srcV, dstV; \
1106 register vector signed short but0, but1, but2, op1, op2, op3; \
1107 src1 = vec_ld(stride * i, src); \
1108 src2 = vec_ld((stride * i) + 15, src); \
1109 srcO = vec_perm(src1, src2, vec_lvsl(stride * i, src)); \
1110 dst1 = vec_ld(stride * i, dst); \
1111 dst2 = vec_ld((stride * i) + 15, dst); \
1112 dstO = vec_perm(dst1, dst2, vec_lvsl(stride * i, dst)); \
1113 /* promote the unsigned chars to signed shorts */ \
1114 /* we're in the 8x8 function, we only care for the first 8 */ \
1115 srcV = \
1116 (vector signed short)vec_mergeh((vector signed char)vzero, \
1117 (vector signed char)srcO); \
1118 dstV = \
1119 (vector signed short)vec_mergeh((vector signed char)vzero, \
1120 (vector signed char)dstO); \
1121 /* substractions inside the first butterfly */ \
1122 but0 = vec_sub(srcV, dstV); \
1123 op1 = vec_perm(but0, but0, perm1); \
1124 but1 = vec_mladd(but0, vprod1, op1); \
1125 op2 = vec_perm(but1, but1, perm2); \
1126 but2 = vec_mladd(but1, vprod2, op2); \
1127 op3 = vec_perm(but2, but2, perm3); \
1128 res = vec_mladd(but2, vprod3, op3); \
1129 }
1130 ONEITERBUTTERFLY(0, temp0);
1131 ONEITERBUTTERFLY(1, temp1);
1132 ONEITERBUTTERFLY(2, temp2);
1133 ONEITERBUTTERFLY(3, temp3);
1134 ONEITERBUTTERFLY(4, temp4);
1135 ONEITERBUTTERFLY(5, temp5);
1136 ONEITERBUTTERFLY(6, temp6);
1137 ONEITERBUTTERFLY(7, temp7);
1138 }
1139 #undef ONEITERBUTTERFLY
1140 {
1141 register vector signed int vsum;
1142 register vector signed short line0 = vec_add(temp0, temp1);
1143 register vector signed short line1 = vec_sub(temp0, temp1);
1144 register vector signed short line2 = vec_add(temp2, temp3);
1145 register vector signed short line3 = vec_sub(temp2, temp3);
1146 register vector signed short line4 = vec_add(temp4, temp5);
1147 register vector signed short line5 = vec_sub(temp4, temp5);
1148 register vector signed short line6 = vec_add(temp6, temp7);
1149 register vector signed short line7 = vec_sub(temp6, temp7);
1150
1151 register vector signed short line0B = vec_add(line0, line2);
1152 register vector signed short line2B = vec_sub(line0, line2);
1153 register vector signed short line1B = vec_add(line1, line3);
1154 register vector signed short line3B = vec_sub(line1, line3);
1155 register vector signed short line4B = vec_add(line4, line6);
1156 register vector signed short line6B = vec_sub(line4, line6);
1157 register vector signed short line5B = vec_add(line5, line7);
1158 register vector signed short line7B = vec_sub(line5, line7);
1159
1160 register vector signed short line0C = vec_add(line0B, line4B);
1161 register vector signed short line4C = vec_sub(line0B, line4B);
1162 register vector signed short line1C = vec_add(line1B, line5B);
1163 register vector signed short line5C = vec_sub(line1B, line5B);
1164 register vector signed short line2C = vec_add(line2B, line6B);
1165 register vector signed short line6C = vec_sub(line2B, line6B);
1166 register vector signed short line3C = vec_add(line3B, line7B);
1167 register vector signed short line7C = vec_sub(line3B, line7B);
1168
1169 vsum = vec_sum4s(vec_abs(line0C), vec_splat_s32(0));
1170 vsum = vec_sum4s(vec_abs(line1C), vsum);
1171 vsum = vec_sum4s(vec_abs(line2C), vsum);
1172 vsum = vec_sum4s(vec_abs(line3C), vsum);
1173 vsum = vec_sum4s(vec_abs(line4C), vsum);
1174 vsum = vec_sum4s(vec_abs(line5C), vsum);
1175 vsum = vec_sum4s(vec_abs(line6C), vsum);
1176 vsum = vec_sum4s(vec_abs(line7C), vsum);
1177 vsum = vec_sums(vsum, (vector signed int)vzero);
1178 vsum = vec_splat(vsum, 3);
1179 vec_ste(vsum, 0, &sum);
1180 }
1181 POWERPC_PERF_STOP_COUNT(altivec_hadamard8_diff8x8_num, 1);
1182 return sum;
1183 }
1184
1185 /*
1186 16x8 works with 16 elements ; it allows to avoid replicating
1187 loads, and give the compiler more rooms for scheduling.
1188 It's only used from inside hadamard8_diff16_altivec.
1189
1190 Unfortunately, it seems gcc-3.3 is a bit dumb, and
1191 the compiled code has a LOT of spill code, it seems
1192 gcc (unlike xlc) cannot keep everything in registers
1193 by itself. The following code include hand-made
1194 registers allocation. It's not clean, but on
1195 a 7450 the resulting code is much faster (best case
1196 fall from 700+ cycles to 550).
1197
1198 xlc doesn't add spill code, but it doesn't know how to
1199 schedule for the 7450, and its code isn't much faster than
1200 gcc-3.3 on the 7450 (but uses 25% less instructions...)
1201
1202 On the 970, the hand-made RA is still a win (arount 690
1203 vs. around 780), but xlc goes to around 660 on the
1204 regular C code...
1205 */
1206
1207 static int hadamard8_diff16x8_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h) {
1208 int sum;
1209 register vector signed short
1210 temp0 REG_v(v0),
1211 temp1 REG_v(v1),
1212 temp2 REG_v(v2),
1213 temp3 REG_v(v3),
1214 temp4 REG_v(v4),
1215 temp5 REG_v(v5),
1216 temp6 REG_v(v6),
1217 temp7 REG_v(v7);
1218 register vector signed short
1219 temp0S REG_v(v8),
1220 temp1S REG_v(v9),
1221 temp2S REG_v(v10),
1222 temp3S REG_v(v11),
1223 temp4S REG_v(v12),
1224 temp5S REG_v(v13),
1225 temp6S REG_v(v14),
1226 temp7S REG_v(v15);
1227 register const_vector unsigned char vzero REG_v(v31)=
1228 (const_vector unsigned char)vec_splat_u8(0);
1229 {
1230 register const_vector signed short vprod1 REG_v(v16)=
1231 (const_vector signed short)AVV( 1,-1, 1,-1, 1,-1, 1,-1);
1232 register const_vector signed short vprod2 REG_v(v17)=
1233 (const_vector signed short)AVV( 1, 1,-1,-1, 1, 1,-1,-1);
1234 register const_vector signed short vprod3 REG_v(v18)=
1235 (const_vector signed short)AVV( 1, 1, 1, 1,-1,-1,-1,-1);
1236 register const_vector unsigned char perm1 REG_v(v19)=
1237 (const_vector unsigned char)
1238 AVV(0x02, 0x03, 0x00, 0x01, 0x06, 0x07, 0x04, 0x05,
1239 0x0A, 0x0B, 0x08, 0x09, 0x0E, 0x0F, 0x0C, 0x0D);
1240 register const_vector unsigned char perm2 REG_v(v20)=
1241 (const_vector unsigned char)
1242 AVV(0x04, 0x05, 0x06, 0x07, 0x00, 0x01, 0x02, 0x03,
1243 0x0C, 0x0D, 0x0E, 0x0F, 0x08, 0x09, 0x0A, 0x0B);
1244 register const_vector unsigned char perm3 REG_v(v21)=
1245 (const_vector unsigned char)
1246 AVV(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
1247 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
1248
1249 #define ONEITERBUTTERFLY(i, res1, res2) \
1250 { \
1251 register vector unsigned char src1 REG_v(v22), \
1252 src2 REG_v(v23), \
1253 dst1 REG_v(v24), \
1254 dst2 REG_v(v25), \
1255 srcO REG_v(v22), \
1256 dstO REG_v(v23); \
1257 \
1258 register vector signed short srcV REG_v(v24), \
1259 dstV REG_v(v25), \
1260 srcW REG_v(v26), \
1261 dstW REG_v(v27), \
1262 but0 REG_v(v28), \
1263 but0S REG_v(v29), \
1264 op1 REG_v(v30), \
1265 but1 REG_v(v22), \
1266 op1S REG_v(v23), \
1267 but1S REG_v(v24), \
1268 op2 REG_v(v25), \
1269 but2 REG_v(v26), \
1270 op2S REG_v(v27), \
1271 but2S REG_v(v28), \
1272 op3 REG_v(v29), \
1273 op3S REG_v(v30); \
1274 \
1275 src1 = vec_ld(stride * i, src); \
1276 src2 = vec_ld((stride * i) + 16, src); \
1277 srcO = vec_perm(src1, src2, vec_lvsl(stride * i, src)); \
1278 dst1 = vec_ld(stride * i, dst); \
1279 dst2 = vec_ld((stride * i) + 16, dst); \
1280 dstO = vec_perm(dst1, dst2, vec_lvsl(stride * i, dst)); \
1281 /* promote the unsigned chars to signed shorts */ \
1282 srcV = \
1283 (vector signed short)vec_mergeh((vector signed char)vzero, \
1284 (vector signed char)srcO); \
1285 dstV = \
1286 (vector signed short)vec_mergeh((vector signed char)vzero, \
1287 (vector signed char)dstO); \
1288 srcW = \
1289 (vector signed short)vec_mergel((vector signed char)vzero, \
1290 (vector signed char)srcO); \
1291 dstW = \
1292 (vector signed short)vec_mergel((vector signed char)vzero, \
1293 (vector signed char)dstO); \
1294 /* substractions inside the first butterfly */ \
1295 but0 = vec_sub(srcV, dstV); \
1296 but0S = vec_sub(srcW, dstW); \
1297 op1 = vec_perm(but0, but0, perm1); \
1298 but1 = vec_mladd(but0, vprod1, op1); \
1299 op1S = vec_perm(but0S, but0S, perm1); \
1300 but1S = vec_mladd(but0S, vprod1, op1S); \
1301 op2 = vec_perm(but1, but1, perm2); \
1302 but2 = vec_mladd(but1, vprod2, op2); \
1303 op2S = vec_perm(but1S, but1S, perm2); \
1304 but2S = vec_mladd(but1S, vprod2, op2S); \
1305 op3 = vec_perm(but2, but2, perm3); \
1306 res1 = vec_mladd(but2, vprod3, op3); \
1307 op3S = vec_perm(but2S, but2S, perm3); \
1308 res2 = vec_mladd(but2S, vprod3, op3S); \
1309 }
1310 ONEITERBUTTERFLY(0, temp0, temp0S);
1311 ONEITERBUTTERFLY(1, temp1, temp1S);
1312 ONEITERBUTTERFLY(2, temp2, temp2S);
1313 ONEITERBUTTERFLY(3, temp3, temp3S);
1314 ONEITERBUTTERFLY(4, temp4, temp4S);
1315 ONEITERBUTTERFLY(5, temp5, temp5S);
1316 ONEITERBUTTERFLY(6, temp6, temp6S);
1317 ONEITERBUTTERFLY(7, temp7, temp7S);
1318 }
1319 #undef ONEITERBUTTERFLY
1320 {
1321 register vector signed int vsum;
1322 register vector signed short line0S, line1S, line2S, line3S, line4S,
1323 line5S, line6S, line7S, line0BS,line2BS,
1324 line1BS,line3BS,line4BS,line6BS,line5BS,
1325 line7BS,line0CS,line4CS,line1CS,line5CS,
1326 line2CS,line6CS,line3CS,line7CS;
1327
1328 register vector signed short line0 = vec_add(temp0, temp1);
1329 register vector signed short line1 = vec_sub(temp0, temp1);
1330 register vector signed short line2 = vec_add(temp2, temp3);
1331 register vector signed short line3 = vec_sub(temp2, temp3);
1332 register vector signed short line4 = vec_add(temp4, temp5);
1333 register vector signed short line5 = vec_sub(temp4, temp5);
1334 register vector signed short line6 = vec_add(temp6, temp7);
1335 register vector signed short line7 = vec_sub(temp6, temp7);
1336
1337 register vector signed short line0B = vec_add(line0, line2);
1338 register vector signed short line2B = vec_sub(line0, line2);
1339 register vector signed short line1B = vec_add(line1, line3);
1340 register vector signed short line3B = vec_sub(line1, line3);
1341 register vector signed short line4B = vec_add(line4, line6);
1342 register vector signed short line6B = vec_sub(line4, line6);
1343 register vector signed short line5B = vec_add(line5, line7);
1344 register vector signed short line7B = vec_sub(line5, line7);
1345
1346 register vector signed short line0C = vec_add(line0B, line4B);
1347 register vector signed short line4C = vec_sub(line0B, line4B);
1348 register vector signed short line1C = vec_add(line1B, line5B);
1349 register vector signed short line5C = vec_sub(line1B, line5B);
1350 register vector signed short line2C = vec_add(line2B, line6B);
1351 register vector signed short line6C = vec_sub(line2B, line6B);
1352 register vector signed short line3C = vec_add(line3B, line7B);
1353 register vector signed short line7C = vec_sub(line3B, line7B);
1354
1355 vsum = vec_sum4s(vec_abs(line0C), vec_splat_s32(0));
1356 vsum = vec_sum4s(vec_abs(line1C), vsum);
1357 vsum = vec_sum4s(vec_abs(line2C), vsum);
1358 vsum = vec_sum4s(vec_abs(line3C), vsum);
1359 vsum = vec_sum4s(vec_abs(line4C), vsum);
1360 vsum = vec_sum4s(vec_abs(line5C), vsum);
1361 vsum = vec_sum4s(vec_abs(line6C), vsum);
1362 vsum = vec_sum4s(vec_abs(line7C), vsum);
1363
1364 line0S = vec_add(temp0S, temp1S);
1365 line1S = vec_sub(temp0S, temp1S);
1366 line2S = vec_add(temp2S, temp3S);
1367 line3S = vec_sub(temp2S, temp3S);
1368 line4S = vec_add(temp4S, temp5S);
1369 line5S = vec_sub(temp4S, temp5S);
1370 line6S = vec_add(temp6S, temp7S);
1371 line7S = vec_sub(temp6S, temp7S);
1372
1373 line0BS = vec_add(line0S, line2S);
1374 line2BS = vec_sub(line0S, line2S);
1375 line1BS = vec_add(line1S, line3S);
1376 line3BS = vec_sub(line1S, line3S);
1377 line4BS = vec_add(line4S, line6S);
1378 line6BS = vec_sub(line4S, line6S);
1379 line5BS = vec_add(line5S, line7S);
1380 line7BS = vec_sub(line5S, line7S);
1381
1382 line0CS = vec_add(line0BS, line4BS);
1383 line4CS = vec_sub(line0BS, line4BS);
1384 line1CS = vec_add(line1BS, line5BS);
1385 line5CS = vec_sub(line1BS, line5BS);
1386 line2CS = vec_add(line2BS, line6BS);
1387 line6CS = vec_sub(line2BS, line6BS);
1388 line3CS = vec_add(line3BS, line7BS);
1389 line7CS = vec_sub(line3BS, line7BS);
1390
1391 vsum = vec_sum4s(vec_abs(line0CS), vsum);
1392 vsum = vec_sum4s(vec_abs(line1CS), vsum);
1393 vsum = vec_sum4s(vec_abs(line2CS), vsum);
1394 vsum = vec_sum4s(vec_abs(line3CS), vsum);
1395 vsum = vec_sum4s(vec_abs(line4CS), vsum);
1396 vsum = vec_sum4s(vec_abs(line5CS), vsum);
1397 vsum = vec_sum4s(vec_abs(line6CS), vsum);
1398 vsum = vec_sum4s(vec_abs(line7CS), vsum);
1399 vsum = vec_sums(vsum, (vector signed int)vzero);
1400 vsum = vec_splat(vsum, 3);
1401 vec_ste(vsum, 0, &sum);
1402 }
1403 return sum;
1404 }
1405
1406 int hadamard8_diff16_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h){
1407 POWERPC_PERF_DECLARE(altivec_hadamard8_diff16_num, 1);
1408 int score;
1409 POWERPC_PERF_START_COUNT(altivec_hadamard8_diff16_num, 1);
1410 score = hadamard8_diff16x8_altivec(s, dst, src, stride, 8);
1411 if (h==16) {
1412 dst += 8*stride;
1413 src += 8*stride;
1414 score += hadamard8_diff16x8_altivec(s, dst, src, stride, 8);
1415 }
1416 POWERPC_PERF_STOP_COUNT(altivec_hadamard8_diff16_num, 1);
1417 return score;
1418 }
1419
1420 int has_altivec(void)
1421 {
1422 #ifdef __AMIGAOS4__
1423 ULONG result = 0;
1424 extern struct ExecIFace *IExec;
1425
1426 IExec->GetCPUInfoTags(GCIT_VectorUnit, &result, TAG_DONE);
1427 if (result == VECTORTYPE_ALTIVEC) return 1;
1428 return 0;
1429 #else /* __AMIGAOS4__ */
1430
1431 #ifdef SYS_DARWIN
1432 int sels[2] = {CTL_HW, HW_VECTORUNIT};
1433 int has_vu = 0;
1434 size_t len = sizeof(has_vu);
1435 int err;
1436
1437 err = sysctl(sels, 2, &has_vu, &len, NULL, 0);
1438
1439 if (err == 0) return (has_vu != 0);
1440 #else /* SYS_DARWIN */
1441 /* no Darwin, do it the brute-force way */
1442 /* this is borrowed from the libmpeg2 library */
1443 {
1444 signal (SIGILL, sigill_handler);
1445 if (sigsetjmp (jmpbuf, 1)) {
1446 signal (SIGILL, SIG_DFL);
1447 } else {
1448 canjump = 1;
1449
1450 asm volatile ("mtspr 256, %0\n\t"
1451 "vand %%v0, %%v0, %%v0"
1452 :
1453 : "r" (-1));
1454
1455 signal (SIGILL, SIG_DFL);
1456 return 1;
1457 }
1458 }
1459 #endif /* SYS_DARWIN */
1460 return 0;
1461 #endif /* __AMIGAOS4__ */
1462 }
1463
1464 static void vorbis_inverse_coupling_altivec(float *mag, float *ang,
1465 int blocksize)
1466 {
1467 int i;
1468 vector float m, a;
1469 vector bool int t0, t1;
1470 const vector unsigned int v_31 = //XXX
1471 vec_add(vec_add(vec_splat_u32(15),vec_splat_u32(15)),vec_splat_u32(1));
1472 for(i=0; i<blocksize; i+=4) {
1473 m = vec_ld(0, mag+i);
1474 a = vec_ld(0, ang+i);
1475 t0 = vec_cmple(m, (vector float)vec_splat_u32(0));
1476 t1 = vec_cmple(a, (vector float)vec_splat_u32(0));
1477 a = vec_xor(a, (vector float) vec_sl((vector unsigned int)t0, v_31));
1478 t0 = (vector bool int)vec_and(a, t1);
1479 t1 = (vector bool int)vec_andc(a, t1);
1480 a = vec_sub(m, (vector float)t1);
1481 m = vec_add(m, (vector float)t0);
1482 vec_stl(a, 0, ang+i);
1483 vec_stl(m, 0, mag+i);
1484 }
1485 }
1486
1487 /* next one assumes that ((line_size % 8) == 0) */
1488 void avg_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
1489 {
1490 POWERPC_PERF_DECLARE(altivec_avg_pixels8_xy2_num, 1);
1491 register int i;
1492 register vector unsigned char pixelsv1, pixelsv2, pixelsavg;
1493 register vector unsigned char blockv, temp1, temp2, blocktemp;
1494 register vector unsigned short pixelssum1, pixelssum2, temp3;
1495
1496 register const_vector unsigned char vczero = (const_vector unsigned char)
1497 vec_splat_u8(0);
1498 register const_vector unsigned short vctwo = (const_vector unsigned short)
1499 vec_splat_u16(2);
1500
1501 temp1 = vec_ld(0, pixels);
1502 temp2 = vec_ld(16, pixels);
1503 pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
1504 if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F) {
1505 pixelsv2 = temp2;
1506 } else {
1507 pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
1508 }
1509 pixelsv1 = vec_mergeh(vczero, pixelsv1);
1510 pixelsv2 = vec_mergeh(vczero, pixelsv2);
1511 pixelssum1 = vec_add((vector unsigned short)pixelsv1,
1512 (vector unsigned short)pixelsv2);
1513 pixelssum1 = vec_add(pixelssum1, vctwo);
1514
1515 POWERPC_PERF_START_COUNT(altivec_avg_pixels8_xy2_num, 1);
1516 for (i = 0; i < h ; i++) {
1517 int rightside = ((unsigned long)block & 0x0000000F);
1518 blockv = vec_ld(0, block);
1519
1520 temp1 = vec_ld(line_size, pixels);
1521 temp2 = vec_ld(line_size + 16, pixels);
1522 pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
1523 if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F)
1524 {
1525 pixelsv2 = temp2;
1526 } else {
1527 pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
1528 }
1529
1530 pixelsv1 = vec_mergeh(vczero, pixelsv1);
1531 pixelsv2 = vec_mergeh(vczero, pixelsv2);
1532 pixelssum2 = vec_add((vector unsigned short)pixelsv1,
1533 (vector unsigned short)pixelsv2);
1534 temp3 = vec_add(pixelssum1, pixelssum2);
1535 temp3 = vec_sra(temp3, vctwo);
1536 pixelssum1 = vec_add(pixelssum2, vctwo);
1537 pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);
1538
1539 if (rightside) {
1540 blocktemp = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
1541 } else {
1542 blocktemp = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
1543 }
1544
1545 blockv = vec_avg(blocktemp, blockv);
1546 vec_st(blockv, 0, block);
1547
1548 block += line_size;
1549 pixels += line_size;
1550 }
1551
1552 POWERPC_PERF_STOP_COUNT(altivec_avg_pixels8_xy2_num, 1);
1553 }
1554
1555 void dsputil_init_altivec(DSPContext* c, AVCodecContext *avctx)
1556 {
1557 c->pix_abs[0][1] = sad16_x2_altivec;
1558 c->pix_abs[0][2] = sad16_y2_altivec;
1559 c->pix_abs[0][3] = sad16_xy2_altivec;
1560 c->pix_abs[0][0] = sad16_altivec;
1561 c->pix_abs[1][0] = sad8_altivec;
1562 c->sad[0]= sad16_altivec;
1563 c->sad[1]= sad8_altivec;
1564 c->pix_norm1 = pix_norm1_altivec;
1565 c->sse[1]= sse8_altivec;
1566 c->sse[0]= sse16_altivec;
1567 c->pix_sum = pix_sum_altivec;
1568 c->diff_pixels = diff_pixels_altivec;
1569 c->get_pixels = get_pixels_altivec;
1570 c->add_bytes= add_bytes_altivec;
1571 c->put_pixels_tab[0][0] = put_pixels16_altivec;
1572 /* the two functions do the same thing, so use the same code */
1573 c->put_no_rnd_pixels_tab[0][0] = put_pixels16_altivec;
1574 c->avg_pixels_tab[0][0] = avg_pixels16_altivec;
1575 c->avg_pixels_tab[1][0] = avg_pixels8_altivec;
1576 c->avg_pixels_tab[1][3] = avg_pixels8_xy2_altivec;
1577 c->put_pixels_tab[1][3] = put_pixels8_xy2_altivec;
1578 c->put_no_rnd_pixels_tab[1][3] = put_no_rnd_pixels8_xy2_altivec;
1579 c->put_pixels_tab[0][3] = put_pixels16_xy2_altivec;
1580 c->put_no_rnd_pixels_tab[0][3] = put_no_rnd_pixels16_xy2_altivec;
1581
1582 c->hadamard8_diff[0] = hadamard8_diff16_altivec;
1583 c->hadamard8_diff[1] = hadamard8_diff8x8_altivec;
1584 #ifdef CONFIG_VORBIS_DECODER
1585 c->vorbis_inverse_coupling = vorbis_inverse_coupling_altivec;
1586 #endif
1587 }