Remove PPC perf counter support
[libav.git] / libavcodec / ppc / dsputil_altivec.c
1 /*
2 * Copyright (c) 2002 Brian Foley
3 * Copyright (c) 2002 Dieter Shirley
4 * Copyright (c) 2003-2004 Romain Dolbeau <romain@dolbeau.org>
5 *
6 * This file is part of FFmpeg.
7 *
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #include "config.h"
24 #if HAVE_ALTIVEC_H
25 #include <altivec.h>
26 #endif
27 #include "libavcodec/dsputil.h"
28 #include "util_altivec.h"
29 #include "types_altivec.h"
30 #include "dsputil_altivec.h"
31
32 static int sad16_x2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
33 {
34 int i;
35 int s;
36 const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
37 vector unsigned char *tv;
38 vector unsigned char pix1v, pix2v, pix2iv, avgv, t5;
39 vector unsigned int sad;
40 vector signed int sumdiffs;
41
42 s = 0;
43 sad = (vector unsigned int)vec_splat_u32(0);
44 for (i = 0; i < h; i++) {
45 /* Read unaligned pixels into our vectors. The vectors are as follows:
46 pix1v: pix1[0]-pix1[15]
47 pix2v: pix2[0]-pix2[15] pix2iv: pix2[1]-pix2[16] */
48 tv = (vector unsigned char *) pix1;
49 pix1v = vec_perm(tv[0], tv[1], vec_lvsl(0, pix1));
50
51 tv = (vector unsigned char *) &pix2[0];
52 pix2v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[0]));
53
54 tv = (vector unsigned char *) &pix2[1];
55 pix2iv = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[1]));
56
57 /* Calculate the average vector */
58 avgv = vec_avg(pix2v, pix2iv);
59
60 /* Calculate a sum of abs differences vector */
61 t5 = vec_sub(vec_max(pix1v, avgv), vec_min(pix1v, avgv));
62
63 /* Add each 4 pixel group together and put 4 results into sad */
64 sad = vec_sum4s(t5, sad);
65
66 pix1 += line_size;
67 pix2 += line_size;
68 }
69 /* Sum up the four partial sums, and put the result into s */
70 sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
71 sumdiffs = vec_splat(sumdiffs, 3);
72 vec_ste(sumdiffs, 0, &s);
73
74 return s;
75 }
76
77 static int sad16_y2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
78 {
79 int i;
80 int s;
81 const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
82 vector unsigned char *tv;
83 vector unsigned char pix1v, pix2v, pix3v, avgv, t5;
84 vector unsigned int sad;
85 vector signed int sumdiffs;
86 uint8_t *pix3 = pix2 + line_size;
87
88 s = 0;
89 sad = (vector unsigned int)vec_splat_u32(0);
90
91 /* Due to the fact that pix3 = pix2 + line_size, the pix3 of one
92 iteration becomes pix2 in the next iteration. We can use this
93 fact to avoid a potentially expensive unaligned read, each
94 time around the loop.
95 Read unaligned pixels into our vectors. The vectors are as follows:
96 pix2v: pix2[0]-pix2[15]
97 Split the pixel vectors into shorts */
98 tv = (vector unsigned char *) &pix2[0];
99 pix2v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[0]));
100
101 for (i = 0; i < h; i++) {
102 /* Read unaligned pixels into our vectors. The vectors are as follows:
103 pix1v: pix1[0]-pix1[15]
104 pix3v: pix3[0]-pix3[15] */
105 tv = (vector unsigned char *) pix1;
106 pix1v = vec_perm(tv[0], tv[1], vec_lvsl(0, pix1));
107
108 tv = (vector unsigned char *) &pix3[0];
109 pix3v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix3[0]));
110
111 /* Calculate the average vector */
112 avgv = vec_avg(pix2v, pix3v);
113
114 /* Calculate a sum of abs differences vector */
115 t5 = vec_sub(vec_max(pix1v, avgv), vec_min(pix1v, avgv));
116
117 /* Add each 4 pixel group together and put 4 results into sad */
118 sad = vec_sum4s(t5, sad);
119
120 pix1 += line_size;
121 pix2v = pix3v;
122 pix3 += line_size;
123
124 }
125
126 /* Sum up the four partial sums, and put the result into s */
127 sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
128 sumdiffs = vec_splat(sumdiffs, 3);
129 vec_ste(sumdiffs, 0, &s);
130 return s;
131 }
132
133 static int sad16_xy2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
134 {
135 int i;
136 int s;
137 uint8_t *pix3 = pix2 + line_size;
138 const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
139 const vector unsigned short two = (const vector unsigned short)vec_splat_u16(2);
140 vector unsigned char *tv, avgv, t5;
141 vector unsigned char pix1v, pix2v, pix3v, pix2iv, pix3iv;
142 vector unsigned short pix2lv, pix2hv, pix2ilv, pix2ihv;
143 vector unsigned short pix3lv, pix3hv, pix3ilv, pix3ihv;
144 vector unsigned short avghv, avglv;
145 vector unsigned short t1, t2, t3, t4;
146 vector unsigned int sad;
147 vector signed int sumdiffs;
148
149 sad = (vector unsigned int)vec_splat_u32(0);
150
151 s = 0;
152
153 /* Due to the fact that pix3 = pix2 + line_size, the pix3 of one
154 iteration becomes pix2 in the next iteration. We can use this
155 fact to avoid a potentially expensive unaligned read, as well
156 as some splitting, and vector addition each time around the loop.
157 Read unaligned pixels into our vectors. The vectors are as follows:
158 pix2v: pix2[0]-pix2[15] pix2iv: pix2[1]-pix2[16]
159 Split the pixel vectors into shorts */
160 tv = (vector unsigned char *) &pix2[0];
161 pix2v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[0]));
162
163 tv = (vector unsigned char *) &pix2[1];
164 pix2iv = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[1]));
165
166 pix2hv = (vector unsigned short) vec_mergeh(zero, pix2v);
167 pix2lv = (vector unsigned short) vec_mergel(zero, pix2v);
168 pix2ihv = (vector unsigned short) vec_mergeh(zero, pix2iv);
169 pix2ilv = (vector unsigned short) vec_mergel(zero, pix2iv);
170 t1 = vec_add(pix2hv, pix2ihv);
171 t2 = vec_add(pix2lv, pix2ilv);
172
173 for (i = 0; i < h; i++) {
174 /* Read unaligned pixels into our vectors. The vectors are as follows:
175 pix1v: pix1[0]-pix1[15]
176 pix3v: pix3[0]-pix3[15] pix3iv: pix3[1]-pix3[16] */
177 tv = (vector unsigned char *) pix1;
178 pix1v = vec_perm(tv[0], tv[1], vec_lvsl(0, pix1));
179
180 tv = (vector unsigned char *) &pix3[0];
181 pix3v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix3[0]));
182
183 tv = (vector unsigned char *) &pix3[1];
184 pix3iv = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix3[1]));
185
186 /* Note that AltiVec does have vec_avg, but this works on vector pairs
187 and rounds up. We could do avg(avg(a,b),avg(c,d)), but the rounding
188 would mean that, for example, avg(3,0,0,1) = 2, when it should be 1.
189 Instead, we have to split the pixel vectors into vectors of shorts,
190 and do the averaging by hand. */
191
192 /* Split the pixel vectors into shorts */
193 pix3hv = (vector unsigned short) vec_mergeh(zero, pix3v);
194 pix3lv = (vector unsigned short) vec_mergel(zero, pix3v);
195 pix3ihv = (vector unsigned short) vec_mergeh(zero, pix3iv);
196 pix3ilv = (vector unsigned short) vec_mergel(zero, pix3iv);
197
198 /* Do the averaging on them */
199 t3 = vec_add(pix3hv, pix3ihv);
200 t4 = vec_add(pix3lv, pix3ilv);
201
202 avghv = vec_sr(vec_add(vec_add(t1, t3), two), two);
203 avglv = vec_sr(vec_add(vec_add(t2, t4), two), two);
204
205 /* Pack the shorts back into a result */
206 avgv = vec_pack(avghv, avglv);
207
208 /* Calculate a sum of abs differences vector */
209 t5 = vec_sub(vec_max(pix1v, avgv), vec_min(pix1v, avgv));
210
211 /* Add each 4 pixel group together and put 4 results into sad */
212 sad = vec_sum4s(t5, sad);
213
214 pix1 += line_size;
215 pix3 += line_size;
216 /* Transfer the calculated values for pix3 into pix2 */
217 t1 = t3;
218 t2 = t4;
219 }
220 /* Sum up the four partial sums, and put the result into s */
221 sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
222 sumdiffs = vec_splat(sumdiffs, 3);
223 vec_ste(sumdiffs, 0, &s);
224
225 return s;
226 }
227
228 static int sad16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
229 {
230 int i;
231 int s;
232 const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
233 vector unsigned char perm1, perm2, *pix1v, *pix2v;
234 vector unsigned char t1, t2, t3,t4, t5;
235 vector unsigned int sad;
236 vector signed int sumdiffs;
237
238 sad = (vector unsigned int)vec_splat_u32(0);
239
240
241 for (i = 0; i < h; i++) {
242 /* Read potentially unaligned pixels into t1 and t2 */
243 perm1 = vec_lvsl(0, pix1);
244 pix1v = (vector unsigned char *) pix1;
245 perm2 = vec_lvsl(0, pix2);
246 pix2v = (vector unsigned char *) pix2;
247 t1 = vec_perm(pix1v[0], pix1v[1], perm1);
248 t2 = vec_perm(pix2v[0], pix2v[1], perm2);
249
250 /* Calculate a sum of abs differences vector */
251 t3 = vec_max(t1, t2);
252 t4 = vec_min(t1, t2);
253 t5 = vec_sub(t3, t4);
254
255 /* Add each 4 pixel group together and put 4 results into sad */
256 sad = vec_sum4s(t5, sad);
257
258 pix1 += line_size;
259 pix2 += line_size;
260 }
261
262 /* Sum up the four partial sums, and put the result into s */
263 sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
264 sumdiffs = vec_splat(sumdiffs, 3);
265 vec_ste(sumdiffs, 0, &s);
266
267 return s;
268 }
269
270 static int sad8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
271 {
272 int i;
273 int s;
274 const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
275 vector unsigned char perm1, perm2, permclear, *pix1v, *pix2v;
276 vector unsigned char t1, t2, t3,t4, t5;
277 vector unsigned int sad;
278 vector signed int sumdiffs;
279
280 sad = (vector unsigned int)vec_splat_u32(0);
281
282 permclear = (vector unsigned char){255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0};
283
284 for (i = 0; i < h; i++) {
285 /* Read potentially unaligned pixels into t1 and t2
286 Since we're reading 16 pixels, and actually only want 8,
287 mask out the last 8 pixels. The 0s don't change the sum. */
288 perm1 = vec_lvsl(0, pix1);
289 pix1v = (vector unsigned char *) pix1;
290 perm2 = vec_lvsl(0, pix2);
291 pix2v = (vector unsigned char *) pix2;
292 t1 = vec_and(vec_perm(pix1v[0], pix1v[1], perm1), permclear);
293 t2 = vec_and(vec_perm(pix2v[0], pix2v[1], perm2), permclear);
294
295 /* Calculate a sum of abs differences vector */
296 t3 = vec_max(t1, t2);
297 t4 = vec_min(t1, t2);
298 t5 = vec_sub(t3, t4);
299
300 /* Add each 4 pixel group together and put 4 results into sad */
301 sad = vec_sum4s(t5, sad);
302
303 pix1 += line_size;
304 pix2 += line_size;
305 }
306
307 /* Sum up the four partial sums, and put the result into s */
308 sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
309 sumdiffs = vec_splat(sumdiffs, 3);
310 vec_ste(sumdiffs, 0, &s);
311
312 return s;
313 }
314
315 static int pix_norm1_altivec(uint8_t *pix, int line_size)
316 {
317 int i;
318 int s;
319 const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
320 vector unsigned char *tv;
321 vector unsigned char pixv;
322 vector unsigned int sv;
323 vector signed int sum;
324
325 sv = (vector unsigned int)vec_splat_u32(0);
326
327 s = 0;
328 for (i = 0; i < 16; i++) {
329 /* Read in the potentially unaligned pixels */
330 tv = (vector unsigned char *) pix;
331 pixv = vec_perm(tv[0], tv[1], vec_lvsl(0, pix));
332
333 /* Square the values, and add them to our sum */
334 sv = vec_msum(pixv, pixv, sv);
335
336 pix += line_size;
337 }
338 /* Sum up the four partial sums, and put the result into s */
339 sum = vec_sums((vector signed int) sv, (vector signed int) zero);
340 sum = vec_splat(sum, 3);
341 vec_ste(sum, 0, &s);
342
343 return s;
344 }
345
346 /**
347 * Sum of Squared Errors for a 8x8 block.
348 * AltiVec-enhanced.
349 * It's the sad8_altivec code above w/ squaring added.
350 */
351 static int sse8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
352 {
353 int i;
354 int s;
355 const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
356 vector unsigned char perm1, perm2, permclear, *pix1v, *pix2v;
357 vector unsigned char t1, t2, t3,t4, t5;
358 vector unsigned int sum;
359 vector signed int sumsqr;
360
361 sum = (vector unsigned int)vec_splat_u32(0);
362
363 permclear = (vector unsigned char){255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0};
364
365
366 for (i = 0; i < h; i++) {
367 /* Read potentially unaligned pixels into t1 and t2
368 Since we're reading 16 pixels, and actually only want 8,
369 mask out the last 8 pixels. The 0s don't change the sum. */
370 perm1 = vec_lvsl(0, pix1);
371 pix1v = (vector unsigned char *) pix1;
372 perm2 = vec_lvsl(0, pix2);
373 pix2v = (vector unsigned char *) pix2;
374 t1 = vec_and(vec_perm(pix1v[0], pix1v[1], perm1), permclear);
375 t2 = vec_and(vec_perm(pix2v[0], pix2v[1], perm2), permclear);
376
377 /* Since we want to use unsigned chars, we can take advantage
378 of the fact that abs(a-b)^2 = (a-b)^2. */
379
380 /* Calculate abs differences vector */
381 t3 = vec_max(t1, t2);
382 t4 = vec_min(t1, t2);
383 t5 = vec_sub(t3, t4);
384
385 /* Square the values and add them to our sum */
386 sum = vec_msum(t5, t5, sum);
387
388 pix1 += line_size;
389 pix2 += line_size;
390 }
391
392 /* Sum up the four partial sums, and put the result into s */
393 sumsqr = vec_sums((vector signed int) sum, (vector signed int) zero);
394 sumsqr = vec_splat(sumsqr, 3);
395 vec_ste(sumsqr, 0, &s);
396
397 return s;
398 }
399
400 /**
401 * Sum of Squared Errors for a 16x16 block.
402 * AltiVec-enhanced.
403 * It's the sad16_altivec code above w/ squaring added.
404 */
405 static int sse16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
406 {
407 int i;
408 int s;
409 const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
410 vector unsigned char perm1, perm2, *pix1v, *pix2v;
411 vector unsigned char t1, t2, t3,t4, t5;
412 vector unsigned int sum;
413 vector signed int sumsqr;
414
415 sum = (vector unsigned int)vec_splat_u32(0);
416
417 for (i = 0; i < h; i++) {
418 /* Read potentially unaligned pixels into t1 and t2 */
419 perm1 = vec_lvsl(0, pix1);
420 pix1v = (vector unsigned char *) pix1;
421 perm2 = vec_lvsl(0, pix2);
422 pix2v = (vector unsigned char *) pix2;
423 t1 = vec_perm(pix1v[0], pix1v[1], perm1);
424 t2 = vec_perm(pix2v[0], pix2v[1], perm2);
425
426 /* Since we want to use unsigned chars, we can take advantage
427 of the fact that abs(a-b)^2 = (a-b)^2. */
428
429 /* Calculate abs differences vector */
430 t3 = vec_max(t1, t2);
431 t4 = vec_min(t1, t2);
432 t5 = vec_sub(t3, t4);
433
434 /* Square the values and add them to our sum */
435 sum = vec_msum(t5, t5, sum);
436
437 pix1 += line_size;
438 pix2 += line_size;
439 }
440
441 /* Sum up the four partial sums, and put the result into s */
442 sumsqr = vec_sums((vector signed int) sum, (vector signed int) zero);
443 sumsqr = vec_splat(sumsqr, 3);
444 vec_ste(sumsqr, 0, &s);
445
446 return s;
447 }
448
449 static int pix_sum_altivec(uint8_t * pix, int line_size)
450 {
451 const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
452 vector unsigned char perm, *pixv;
453 vector unsigned char t1;
454 vector unsigned int sad;
455 vector signed int sumdiffs;
456
457 int i;
458 int s;
459
460 sad = (vector unsigned int)vec_splat_u32(0);
461
462 for (i = 0; i < 16; i++) {
463 /* Read the potentially unaligned 16 pixels into t1 */
464 perm = vec_lvsl(0, pix);
465 pixv = (vector unsigned char *) pix;
466 t1 = vec_perm(pixv[0], pixv[1], perm);
467
468 /* Add each 4 pixel group together and put 4 results into sad */
469 sad = vec_sum4s(t1, sad);
470
471 pix += line_size;
472 }
473
474 /* Sum up the four partial sums, and put the result into s */
475 sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
476 sumdiffs = vec_splat(sumdiffs, 3);
477 vec_ste(sumdiffs, 0, &s);
478
479 return s;
480 }
481
482 static void get_pixels_altivec(DCTELEM *restrict block, const uint8_t *pixels, int line_size)
483 {
484 int i;
485 vector unsigned char perm, bytes, *pixv;
486 const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
487 vector signed short shorts;
488
489 for (i = 0; i < 8; i++) {
490 // Read potentially unaligned pixels.
491 // We're reading 16 pixels, and actually only want 8,
492 // but we simply ignore the extras.
493 perm = vec_lvsl(0, pixels);
494 pixv = (vector unsigned char *) pixels;
495 bytes = vec_perm(pixv[0], pixv[1], perm);
496
497 // convert the bytes into shorts
498 shorts = (vector signed short)vec_mergeh(zero, bytes);
499
500 // save the data to the block, we assume the block is 16-byte aligned
501 vec_st(shorts, i*16, (vector signed short*)block);
502
503 pixels += line_size;
504 }
505 }
506
507 static void diff_pixels_altivec(DCTELEM *restrict block, const uint8_t *s1,
508 const uint8_t *s2, int stride)
509 {
510 int i;
511 vector unsigned char perm, bytes, *pixv;
512 const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
513 vector signed short shorts1, shorts2;
514
515 for (i = 0; i < 4; i++) {
516 // Read potentially unaligned pixels
517 // We're reading 16 pixels, and actually only want 8,
518 // but we simply ignore the extras.
519 perm = vec_lvsl(0, s1);
520 pixv = (vector unsigned char *) s1;
521 bytes = vec_perm(pixv[0], pixv[1], perm);
522
523 // convert the bytes into shorts
524 shorts1 = (vector signed short)vec_mergeh(zero, bytes);
525
526 // Do the same for the second block of pixels
527 perm = vec_lvsl(0, s2);
528 pixv = (vector unsigned char *) s2;
529 bytes = vec_perm(pixv[0], pixv[1], perm);
530
531 // convert the bytes into shorts
532 shorts2 = (vector signed short)vec_mergeh(zero, bytes);
533
534 // Do the subtraction
535 shorts1 = vec_sub(shorts1, shorts2);
536
537 // save the data to the block, we assume the block is 16-byte aligned
538 vec_st(shorts1, 0, (vector signed short*)block);
539
540 s1 += stride;
541 s2 += stride;
542 block += 8;
543
544
545 // The code below is a copy of the code above... This is a manual
546 // unroll.
547
548 // Read potentially unaligned pixels
549 // We're reading 16 pixels, and actually only want 8,
550 // but we simply ignore the extras.
551 perm = vec_lvsl(0, s1);
552 pixv = (vector unsigned char *) s1;
553 bytes = vec_perm(pixv[0], pixv[1], perm);
554
555 // convert the bytes into shorts
556 shorts1 = (vector signed short)vec_mergeh(zero, bytes);
557
558 // Do the same for the second block of pixels
559 perm = vec_lvsl(0, s2);
560 pixv = (vector unsigned char *) s2;
561 bytes = vec_perm(pixv[0], pixv[1], perm);
562
563 // convert the bytes into shorts
564 shorts2 = (vector signed short)vec_mergeh(zero, bytes);
565
566 // Do the subtraction
567 shorts1 = vec_sub(shorts1, shorts2);
568
569 // save the data to the block, we assume the block is 16-byte aligned
570 vec_st(shorts1, 0, (vector signed short*)block);
571
572 s1 += stride;
573 s2 += stride;
574 block += 8;
575 }
576 }
577
578
579 static void clear_block_altivec(DCTELEM *block) {
580 LOAD_ZERO;
581 vec_st(zero_s16v, 0, block);
582 vec_st(zero_s16v, 16, block);
583 vec_st(zero_s16v, 32, block);
584 vec_st(zero_s16v, 48, block);
585 vec_st(zero_s16v, 64, block);
586 vec_st(zero_s16v, 80, block);
587 vec_st(zero_s16v, 96, block);
588 vec_st(zero_s16v, 112, block);
589 }
590
591
592 static void add_bytes_altivec(uint8_t *dst, uint8_t *src, int w) {
593 register int i;
594 register vector unsigned char vdst, vsrc;
595
596 /* dst and src are 16 bytes-aligned (guaranteed) */
597 for (i = 0 ; (i + 15) < w ; i+=16) {
598 vdst = vec_ld(i, (unsigned char*)dst);
599 vsrc = vec_ld(i, (unsigned char*)src);
600 vdst = vec_add(vsrc, vdst);
601 vec_st(vdst, i, (unsigned char*)dst);
602 }
603 /* if w is not a multiple of 16 */
604 for (; (i < w) ; i++) {
605 dst[i] = src[i];
606 }
607 }
608
609 /* next one assumes that ((line_size % 16) == 0) */
610 void put_pixels16_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
611 {
612 register vector unsigned char pixelsv1, pixelsv2;
613 register vector unsigned char pixelsv1B, pixelsv2B;
614 register vector unsigned char pixelsv1C, pixelsv2C;
615 register vector unsigned char pixelsv1D, pixelsv2D;
616
617 register vector unsigned char perm = vec_lvsl(0, pixels);
618 int i;
619 register int line_size_2 = line_size << 1;
620 register int line_size_3 = line_size + line_size_2;
621 register int line_size_4 = line_size << 2;
622
623 // hand-unrolling the loop by 4 gains about 15%
624 // mininum execution time goes from 74 to 60 cycles
625 // it's faster than -funroll-loops, but using
626 // -funroll-loops w/ this is bad - 74 cycles again.
627 // all this is on a 7450, tuning for the 7450
628 #if 0
629 for (i = 0; i < h; i++) {
630 pixelsv1 = vec_ld(0, pixels);
631 pixelsv2 = vec_ld(16, pixels);
632 vec_st(vec_perm(pixelsv1, pixelsv2, perm),
633 0, block);
634 pixels+=line_size;
635 block +=line_size;
636 }
637 #else
638 for (i = 0; i < h; i += 4) {
639 pixelsv1 = vec_ld( 0, pixels);
640 pixelsv2 = vec_ld(15, pixels);
641 pixelsv1B = vec_ld(line_size, pixels);
642 pixelsv2B = vec_ld(15 + line_size, pixels);
643 pixelsv1C = vec_ld(line_size_2, pixels);
644 pixelsv2C = vec_ld(15 + line_size_2, pixels);
645 pixelsv1D = vec_ld(line_size_3, pixels);
646 pixelsv2D = vec_ld(15 + line_size_3, pixels);
647 vec_st(vec_perm(pixelsv1, pixelsv2, perm),
648 0, (unsigned char*)block);
649 vec_st(vec_perm(pixelsv1B, pixelsv2B, perm),
650 line_size, (unsigned char*)block);
651 vec_st(vec_perm(pixelsv1C, pixelsv2C, perm),
652 line_size_2, (unsigned char*)block);
653 vec_st(vec_perm(pixelsv1D, pixelsv2D, perm),
654 line_size_3, (unsigned char*)block);
655 pixels+=line_size_4;
656 block +=line_size_4;
657 }
658 #endif
659 }
660
661 /* next one assumes that ((line_size % 16) == 0) */
662 #define op_avg(a,b) a = ( ((a)|(b)) - ((((a)^(b))&0xFEFEFEFEUL)>>1) )
663 void avg_pixels16_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
664 {
665 register vector unsigned char pixelsv1, pixelsv2, pixelsv, blockv;
666 register vector unsigned char perm = vec_lvsl(0, pixels);
667 int i;
668
669 for (i = 0; i < h; i++) {
670 pixelsv1 = vec_ld( 0, pixels);
671 pixelsv2 = vec_ld(16,pixels);
672 blockv = vec_ld(0, block);
673 pixelsv = vec_perm(pixelsv1, pixelsv2, perm);
674 blockv = vec_avg(blockv,pixelsv);
675 vec_st(blockv, 0, (unsigned char*)block);
676 pixels+=line_size;
677 block +=line_size;
678 }
679 }
680
681 /* next one assumes that ((line_size % 8) == 0) */
682 static void avg_pixels8_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h)
683 {
684 register vector unsigned char pixelsv1, pixelsv2, pixelsv, blockv;
685 int i;
686
687 for (i = 0; i < h; i++) {
688 /* block is 8 bytes-aligned, so we're either in the
689 left block (16 bytes-aligned) or in the right block (not) */
690 int rightside = ((unsigned long)block & 0x0000000F);
691
692 blockv = vec_ld(0, block);
693 pixelsv1 = vec_ld( 0, pixels);
694 pixelsv2 = vec_ld(16, pixels);
695 pixelsv = vec_perm(pixelsv1, pixelsv2, vec_lvsl(0, pixels));
696
697 if (rightside) {
698 pixelsv = vec_perm(blockv, pixelsv, vcprm(0,1,s0,s1));
699 } else {
700 pixelsv = vec_perm(blockv, pixelsv, vcprm(s0,s1,2,3));
701 }
702
703 blockv = vec_avg(blockv, pixelsv);
704
705 vec_st(blockv, 0, block);
706
707 pixels += line_size;
708 block += line_size;
709 }
710 }
711
712 /* next one assumes that ((line_size % 8) == 0) */
713 static void put_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
714 {
715 register int i;
716 register vector unsigned char pixelsv1, pixelsv2, pixelsavg;
717 register vector unsigned char blockv, temp1, temp2;
718 register vector unsigned short pixelssum1, pixelssum2, temp3;
719 register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
720 register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);
721
722 temp1 = vec_ld(0, pixels);
723 temp2 = vec_ld(16, pixels);
724 pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
725 if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F) {
726 pixelsv2 = temp2;
727 } else {
728 pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
729 }
730 pixelsv1 = vec_mergeh(vczero, pixelsv1);
731 pixelsv2 = vec_mergeh(vczero, pixelsv2);
732 pixelssum1 = vec_add((vector unsigned short)pixelsv1,
733 (vector unsigned short)pixelsv2);
734 pixelssum1 = vec_add(pixelssum1, vctwo);
735
736 for (i = 0; i < h ; i++) {
737 int rightside = ((unsigned long)block & 0x0000000F);
738 blockv = vec_ld(0, block);
739
740 temp1 = vec_ld(line_size, pixels);
741 temp2 = vec_ld(line_size + 16, pixels);
742 pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
743 if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F) {
744 pixelsv2 = temp2;
745 } else {
746 pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
747 }
748
749 pixelsv1 = vec_mergeh(vczero, pixelsv1);
750 pixelsv2 = vec_mergeh(vczero, pixelsv2);
751 pixelssum2 = vec_add((vector unsigned short)pixelsv1,
752 (vector unsigned short)pixelsv2);
753 temp3 = vec_add(pixelssum1, pixelssum2);
754 temp3 = vec_sra(temp3, vctwo);
755 pixelssum1 = vec_add(pixelssum2, vctwo);
756 pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);
757
758 if (rightside) {
759 blockv = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
760 } else {
761 blockv = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
762 }
763
764 vec_st(blockv, 0, block);
765
766 block += line_size;
767 pixels += line_size;
768 }
769 }
770
771 /* next one assumes that ((line_size % 8) == 0) */
772 static void put_no_rnd_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
773 {
774 register int i;
775 register vector unsigned char pixelsv1, pixelsv2, pixelsavg;
776 register vector unsigned char blockv, temp1, temp2;
777 register vector unsigned short pixelssum1, pixelssum2, temp3;
778 register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
779 register const vector unsigned short vcone = (const vector unsigned short)vec_splat_u16(1);
780 register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);
781
782 temp1 = vec_ld(0, pixels);
783 temp2 = vec_ld(16, pixels);
784 pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
785 if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F) {
786 pixelsv2 = temp2;
787 } else {
788 pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
789 }
790 pixelsv1 = vec_mergeh(vczero, pixelsv1);
791 pixelsv2 = vec_mergeh(vczero, pixelsv2);
792 pixelssum1 = vec_add((vector unsigned short)pixelsv1,
793 (vector unsigned short)pixelsv2);
794 pixelssum1 = vec_add(pixelssum1, vcone);
795
796 for (i = 0; i < h ; i++) {
797 int rightside = ((unsigned long)block & 0x0000000F);
798 blockv = vec_ld(0, block);
799
800 temp1 = vec_ld(line_size, pixels);
801 temp2 = vec_ld(line_size + 16, pixels);
802 pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
803 if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F) {
804 pixelsv2 = temp2;
805 } else {
806 pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
807 }
808
809 pixelsv1 = vec_mergeh(vczero, pixelsv1);
810 pixelsv2 = vec_mergeh(vczero, pixelsv2);
811 pixelssum2 = vec_add((vector unsigned short)pixelsv1,
812 (vector unsigned short)pixelsv2);
813 temp3 = vec_add(pixelssum1, pixelssum2);
814 temp3 = vec_sra(temp3, vctwo);
815 pixelssum1 = vec_add(pixelssum2, vcone);
816 pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);
817
818 if (rightside) {
819 blockv = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
820 } else {
821 blockv = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
822 }
823
824 vec_st(blockv, 0, block);
825
826 block += line_size;
827 pixels += line_size;
828 }
829 }
830
831 /* next one assumes that ((line_size % 16) == 0) */
832 static void put_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h)
833 {
834 register int i;
835 register vector unsigned char pixelsv1, pixelsv2, pixelsv3, pixelsv4;
836 register vector unsigned char blockv, temp1, temp2;
837 register vector unsigned short temp3, temp4,
838 pixelssum1, pixelssum2, pixelssum3, pixelssum4;
839 register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
840 register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);
841
842 temp1 = vec_ld(0, pixels);
843 temp2 = vec_ld(16, pixels);
844 pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
845 if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F) {
846 pixelsv2 = temp2;
847 } else {
848 pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
849 }
850 pixelsv3 = vec_mergel(vczero, pixelsv1);
851 pixelsv4 = vec_mergel(vczero, pixelsv2);
852 pixelsv1 = vec_mergeh(vczero, pixelsv1);
853 pixelsv2 = vec_mergeh(vczero, pixelsv2);
854 pixelssum3 = vec_add((vector unsigned short)pixelsv3,
855 (vector unsigned short)pixelsv4);
856 pixelssum3 = vec_add(pixelssum3, vctwo);
857 pixelssum1 = vec_add((vector unsigned short)pixelsv1,
858 (vector unsigned short)pixelsv2);
859 pixelssum1 = vec_add(pixelssum1, vctwo);
860
861 for (i = 0; i < h ; i++) {
862 blockv = vec_ld(0, block);
863
864 temp1 = vec_ld(line_size, pixels);
865 temp2 = vec_ld(line_size + 16, pixels);
866 pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
867 if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F) {
868 pixelsv2 = temp2;
869 } else {
870 pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
871 }
872
873 pixelsv3 = vec_mergel(vczero, pixelsv1);
874 pixelsv4 = vec_mergel(vczero, pixelsv2);
875 pixelsv1 = vec_mergeh(vczero, pixelsv1);
876 pixelsv2 = vec_mergeh(vczero, pixelsv2);
877
878 pixelssum4 = vec_add((vector unsigned short)pixelsv3,
879 (vector unsigned short)pixelsv4);
880 pixelssum2 = vec_add((vector unsigned short)pixelsv1,
881 (vector unsigned short)pixelsv2);
882 temp4 = vec_add(pixelssum3, pixelssum4);
883 temp4 = vec_sra(temp4, vctwo);
884 temp3 = vec_add(pixelssum1, pixelssum2);
885 temp3 = vec_sra(temp3, vctwo);
886
887 pixelssum3 = vec_add(pixelssum4, vctwo);
888 pixelssum1 = vec_add(pixelssum2, vctwo);
889
890 blockv = vec_packsu(temp3, temp4);
891
892 vec_st(blockv, 0, block);
893
894 block += line_size;
895 pixels += line_size;
896 }
897 }
898
899 /* next one assumes that ((line_size % 16) == 0) */
900 static void put_no_rnd_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h)
901 {
902 register int i;
903 register vector unsigned char pixelsv1, pixelsv2, pixelsv3, pixelsv4;
904 register vector unsigned char blockv, temp1, temp2;
905 register vector unsigned short temp3, temp4,
906 pixelssum1, pixelssum2, pixelssum3, pixelssum4;
907 register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
908 register const vector unsigned short vcone = (const vector unsigned short)vec_splat_u16(1);
909 register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);
910
911 temp1 = vec_ld(0, pixels);
912 temp2 = vec_ld(16, pixels);
913 pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
914 if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F) {
915 pixelsv2 = temp2;
916 } else {
917 pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
918 }
919 pixelsv3 = vec_mergel(vczero, pixelsv1);
920 pixelsv4 = vec_mergel(vczero, pixelsv2);
921 pixelsv1 = vec_mergeh(vczero, pixelsv1);
922 pixelsv2 = vec_mergeh(vczero, pixelsv2);
923 pixelssum3 = vec_add((vector unsigned short)pixelsv3,
924 (vector unsigned short)pixelsv4);
925 pixelssum3 = vec_add(pixelssum3, vcone);
926 pixelssum1 = vec_add((vector unsigned short)pixelsv1,
927 (vector unsigned short)pixelsv2);
928 pixelssum1 = vec_add(pixelssum1, vcone);
929
930 for (i = 0; i < h ; i++) {
931 blockv = vec_ld(0, block);
932
933 temp1 = vec_ld(line_size, pixels);
934 temp2 = vec_ld(line_size + 16, pixels);
935 pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
936 if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F) {
937 pixelsv2 = temp2;
938 } else {
939 pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
940 }
941
942 pixelsv3 = vec_mergel(vczero, pixelsv1);
943 pixelsv4 = vec_mergel(vczero, pixelsv2);
944 pixelsv1 = vec_mergeh(vczero, pixelsv1);
945 pixelsv2 = vec_mergeh(vczero, pixelsv2);
946
947 pixelssum4 = vec_add((vector unsigned short)pixelsv3,
948 (vector unsigned short)pixelsv4);
949 pixelssum2 = vec_add((vector unsigned short)pixelsv1,
950 (vector unsigned short)pixelsv2);
951 temp4 = vec_add(pixelssum3, pixelssum4);
952 temp4 = vec_sra(temp4, vctwo);
953 temp3 = vec_add(pixelssum1, pixelssum2);
954 temp3 = vec_sra(temp3, vctwo);
955
956 pixelssum3 = vec_add(pixelssum4, vcone);
957 pixelssum1 = vec_add(pixelssum2, vcone);
958
959 blockv = vec_packsu(temp3, temp4);
960
961 vec_st(blockv, 0, block);
962
963 block += line_size;
964 pixels += line_size;
965 }
966 }
967
968 static int hadamard8_diff8x8_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h){
969 int sum;
970 register const vector unsigned char vzero =
971 (const vector unsigned char)vec_splat_u8(0);
972 register vector signed short temp0, temp1, temp2, temp3, temp4,
973 temp5, temp6, temp7;
974 {
975 register const vector signed short vprod1 =(const vector signed short)
976 { 1,-1, 1,-1, 1,-1, 1,-1 };
977 register const vector signed short vprod2 =(const vector signed short)
978 { 1, 1,-1,-1, 1, 1,-1,-1 };
979 register const vector signed short vprod3 =(const vector signed short)
980 { 1, 1, 1, 1,-1,-1,-1,-1 };
981 register const vector unsigned char perm1 = (const vector unsigned char)
982 {0x02, 0x03, 0x00, 0x01, 0x06, 0x07, 0x04, 0x05,
983 0x0A, 0x0B, 0x08, 0x09, 0x0E, 0x0F, 0x0C, 0x0D};
984 register const vector unsigned char perm2 = (const vector unsigned char)
985 {0x04, 0x05, 0x06, 0x07, 0x00, 0x01, 0x02, 0x03,
986 0x0C, 0x0D, 0x0E, 0x0F, 0x08, 0x09, 0x0A, 0x0B};
987 register const vector unsigned char perm3 = (const vector unsigned char)
988 {0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
989 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07};
990
991 #define ONEITERBUTTERFLY(i, res) \
992 { \
993 register vector unsigned char src1, src2, srcO; \
994 register vector unsigned char dst1, dst2, dstO; \
995 register vector signed short srcV, dstV; \
996 register vector signed short but0, but1, but2, op1, op2, op3; \
997 src1 = vec_ld(stride * i, src); \
998 src2 = vec_ld((stride * i) + 15, src); \
999 srcO = vec_perm(src1, src2, vec_lvsl(stride * i, src)); \
1000 dst1 = vec_ld(stride * i, dst); \
1001 dst2 = vec_ld((stride * i) + 15, dst); \
1002 dstO = vec_perm(dst1, dst2, vec_lvsl(stride * i, dst)); \
1003 /* promote the unsigned chars to signed shorts */ \
1004 /* we're in the 8x8 function, we only care for the first 8 */ \
1005 srcV = (vector signed short)vec_mergeh((vector signed char)vzero, \
1006 (vector signed char)srcO); \
1007 dstV = (vector signed short)vec_mergeh((vector signed char)vzero, \
1008 (vector signed char)dstO); \
1009 /* subtractions inside the first butterfly */ \
1010 but0 = vec_sub(srcV, dstV); \
1011 op1 = vec_perm(but0, but0, perm1); \
1012 but1 = vec_mladd(but0, vprod1, op1); \
1013 op2 = vec_perm(but1, but1, perm2); \
1014 but2 = vec_mladd(but1, vprod2, op2); \
1015 op3 = vec_perm(but2, but2, perm3); \
1016 res = vec_mladd(but2, vprod3, op3); \
1017 }
1018 ONEITERBUTTERFLY(0, temp0);
1019 ONEITERBUTTERFLY(1, temp1);
1020 ONEITERBUTTERFLY(2, temp2);
1021 ONEITERBUTTERFLY(3, temp3);
1022 ONEITERBUTTERFLY(4, temp4);
1023 ONEITERBUTTERFLY(5, temp5);
1024 ONEITERBUTTERFLY(6, temp6);
1025 ONEITERBUTTERFLY(7, temp7);
1026 }
1027 #undef ONEITERBUTTERFLY
1028 {
1029 register vector signed int vsum;
1030 register vector signed short line0 = vec_add(temp0, temp1);
1031 register vector signed short line1 = vec_sub(temp0, temp1);
1032 register vector signed short line2 = vec_add(temp2, temp3);
1033 register vector signed short line3 = vec_sub(temp2, temp3);
1034 register vector signed short line4 = vec_add(temp4, temp5);
1035 register vector signed short line5 = vec_sub(temp4, temp5);
1036 register vector signed short line6 = vec_add(temp6, temp7);
1037 register vector signed short line7 = vec_sub(temp6, temp7);
1038
1039 register vector signed short line0B = vec_add(line0, line2);
1040 register vector signed short line2B = vec_sub(line0, line2);
1041 register vector signed short line1B = vec_add(line1, line3);
1042 register vector signed short line3B = vec_sub(line1, line3);
1043 register vector signed short line4B = vec_add(line4, line6);
1044 register vector signed short line6B = vec_sub(line4, line6);
1045 register vector signed short line5B = vec_add(line5, line7);
1046 register vector signed short line7B = vec_sub(line5, line7);
1047
1048 register vector signed short line0C = vec_add(line0B, line4B);
1049 register vector signed short line4C = vec_sub(line0B, line4B);
1050 register vector signed short line1C = vec_add(line1B, line5B);
1051 register vector signed short line5C = vec_sub(line1B, line5B);
1052 register vector signed short line2C = vec_add(line2B, line6B);
1053 register vector signed short line6C = vec_sub(line2B, line6B);
1054 register vector signed short line3C = vec_add(line3B, line7B);
1055 register vector signed short line7C = vec_sub(line3B, line7B);
1056
1057 vsum = vec_sum4s(vec_abs(line0C), vec_splat_s32(0));
1058 vsum = vec_sum4s(vec_abs(line1C), vsum);
1059 vsum = vec_sum4s(vec_abs(line2C), vsum);
1060 vsum = vec_sum4s(vec_abs(line3C), vsum);
1061 vsum = vec_sum4s(vec_abs(line4C), vsum);
1062 vsum = vec_sum4s(vec_abs(line5C), vsum);
1063 vsum = vec_sum4s(vec_abs(line6C), vsum);
1064 vsum = vec_sum4s(vec_abs(line7C), vsum);
1065 vsum = vec_sums(vsum, (vector signed int)vzero);
1066 vsum = vec_splat(vsum, 3);
1067 vec_ste(vsum, 0, &sum);
1068 }
1069 return sum;
1070 }
1071
1072 /*
1073 16x8 works with 16 elements; it allows to avoid replicating loads, and
1074 give the compiler more rooms for scheduling. It's only used from
1075 inside hadamard8_diff16_altivec.
1076
1077 Unfortunately, it seems gcc-3.3 is a bit dumb, and the compiled code has a LOT
1078 of spill code, it seems gcc (unlike xlc) cannot keep everything in registers
1079 by itself. The following code include hand-made registers allocation. It's not
1080 clean, but on a 7450 the resulting code is much faster (best case fall from
1081 700+ cycles to 550).
1082
1083 xlc doesn't add spill code, but it doesn't know how to schedule for the 7450,
1084 and its code isn't much faster than gcc-3.3 on the 7450 (but uses 25% less
1085 instructions...)
1086
1087 On the 970, the hand-made RA is still a win (around 690 vs. around 780), but
1088 xlc goes to around 660 on the regular C code...
1089 */
1090
1091 static int hadamard8_diff16x8_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h) {
1092 int sum;
1093 register vector signed short
1094 temp0 __asm__ ("v0"),
1095 temp1 __asm__ ("v1"),
1096 temp2 __asm__ ("v2"),
1097 temp3 __asm__ ("v3"),
1098 temp4 __asm__ ("v4"),
1099 temp5 __asm__ ("v5"),
1100 temp6 __asm__ ("v6"),
1101 temp7 __asm__ ("v7");
1102 register vector signed short
1103 temp0S __asm__ ("v8"),
1104 temp1S __asm__ ("v9"),
1105 temp2S __asm__ ("v10"),
1106 temp3S __asm__ ("v11"),
1107 temp4S __asm__ ("v12"),
1108 temp5S __asm__ ("v13"),
1109 temp6S __asm__ ("v14"),
1110 temp7S __asm__ ("v15");
1111 register const vector unsigned char vzero __asm__ ("v31") =
1112 (const vector unsigned char)vec_splat_u8(0);
1113 {
1114 register const vector signed short vprod1 __asm__ ("v16") =
1115 (const vector signed short){ 1,-1, 1,-1, 1,-1, 1,-1 };
1116 register const vector signed short vprod2 __asm__ ("v17") =
1117 (const vector signed short){ 1, 1,-1,-1, 1, 1,-1,-1 };
1118 register const vector signed short vprod3 __asm__ ("v18") =
1119 (const vector signed short){ 1, 1, 1, 1,-1,-1,-1,-1 };
1120 register const vector unsigned char perm1 __asm__ ("v19") =
1121 (const vector unsigned char)
1122 {0x02, 0x03, 0x00, 0x01, 0x06, 0x07, 0x04, 0x05,
1123 0x0A, 0x0B, 0x08, 0x09, 0x0E, 0x0F, 0x0C, 0x0D};
1124 register const vector unsigned char perm2 __asm__ ("v20") =
1125 (const vector unsigned char)
1126 {0x04, 0x05, 0x06, 0x07, 0x00, 0x01, 0x02, 0x03,
1127 0x0C, 0x0D, 0x0E, 0x0F, 0x08, 0x09, 0x0A, 0x0B};
1128 register const vector unsigned char perm3 __asm__ ("v21") =
1129 (const vector unsigned char)
1130 {0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
1131 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07};
1132
1133 #define ONEITERBUTTERFLY(i, res1, res2) \
1134 { \
1135 register vector unsigned char src1 __asm__ ("v22"), \
1136 src2 __asm__ ("v23"), \
1137 dst1 __asm__ ("v24"), \
1138 dst2 __asm__ ("v25"), \
1139 srcO __asm__ ("v22"), \
1140 dstO __asm__ ("v23"); \
1141 \
1142 register vector signed short srcV __asm__ ("v24"), \
1143 dstV __asm__ ("v25"), \
1144 srcW __asm__ ("v26"), \
1145 dstW __asm__ ("v27"), \
1146 but0 __asm__ ("v28"), \
1147 but0S __asm__ ("v29"), \
1148 op1 __asm__ ("v30"), \
1149 but1 __asm__ ("v22"), \
1150 op1S __asm__ ("v23"), \
1151 but1S __asm__ ("v24"), \
1152 op2 __asm__ ("v25"), \
1153 but2 __asm__ ("v26"), \
1154 op2S __asm__ ("v27"), \
1155 but2S __asm__ ("v28"), \
1156 op3 __asm__ ("v29"), \
1157 op3S __asm__ ("v30"); \
1158 \
1159 src1 = vec_ld(stride * i, src); \
1160 src2 = vec_ld((stride * i) + 16, src); \
1161 srcO = vec_perm(src1, src2, vec_lvsl(stride * i, src)); \
1162 dst1 = vec_ld(stride * i, dst); \
1163 dst2 = vec_ld((stride * i) + 16, dst); \
1164 dstO = vec_perm(dst1, dst2, vec_lvsl(stride * i, dst)); \
1165 /* promote the unsigned chars to signed shorts */ \
1166 srcV = (vector signed short)vec_mergeh((vector signed char)vzero, \
1167 (vector signed char)srcO); \
1168 dstV = (vector signed short)vec_mergeh((vector signed char)vzero, \
1169 (vector signed char)dstO); \
1170 srcW = (vector signed short)vec_mergel((vector signed char)vzero, \
1171 (vector signed char)srcO); \
1172 dstW = (vector signed short)vec_mergel((vector signed char)vzero, \
1173 (vector signed char)dstO); \
1174 /* subtractions inside the first butterfly */ \
1175 but0 = vec_sub(srcV, dstV); \
1176 but0S = vec_sub(srcW, dstW); \
1177 op1 = vec_perm(but0, but0, perm1); \
1178 but1 = vec_mladd(but0, vprod1, op1); \
1179 op1S = vec_perm(but0S, but0S, perm1); \
1180 but1S = vec_mladd(but0S, vprod1, op1S); \
1181 op2 = vec_perm(but1, but1, perm2); \
1182 but2 = vec_mladd(but1, vprod2, op2); \
1183 op2S = vec_perm(but1S, but1S, perm2); \
1184 but2S = vec_mladd(but1S, vprod2, op2S); \
1185 op3 = vec_perm(but2, but2, perm3); \
1186 res1 = vec_mladd(but2, vprod3, op3); \
1187 op3S = vec_perm(but2S, but2S, perm3); \
1188 res2 = vec_mladd(but2S, vprod3, op3S); \
1189 }
1190 ONEITERBUTTERFLY(0, temp0, temp0S);
1191 ONEITERBUTTERFLY(1, temp1, temp1S);
1192 ONEITERBUTTERFLY(2, temp2, temp2S);
1193 ONEITERBUTTERFLY(3, temp3, temp3S);
1194 ONEITERBUTTERFLY(4, temp4, temp4S);
1195 ONEITERBUTTERFLY(5, temp5, temp5S);
1196 ONEITERBUTTERFLY(6, temp6, temp6S);
1197 ONEITERBUTTERFLY(7, temp7, temp7S);
1198 }
1199 #undef ONEITERBUTTERFLY
1200 {
1201 register vector signed int vsum;
1202 register vector signed short line0S, line1S, line2S, line3S, line4S,
1203 line5S, line6S, line7S, line0BS,line2BS,
1204 line1BS,line3BS,line4BS,line6BS,line5BS,
1205 line7BS,line0CS,line4CS,line1CS,line5CS,
1206 line2CS,line6CS,line3CS,line7CS;
1207
1208 register vector signed short line0 = vec_add(temp0, temp1);
1209 register vector signed short line1 = vec_sub(temp0, temp1);
1210 register vector signed short line2 = vec_add(temp2, temp3);
1211 register vector signed short line3 = vec_sub(temp2, temp3);
1212 register vector signed short line4 = vec_add(temp4, temp5);
1213 register vector signed short line5 = vec_sub(temp4, temp5);
1214 register vector signed short line6 = vec_add(temp6, temp7);
1215 register vector signed short line7 = vec_sub(temp6, temp7);
1216
1217 register vector signed short line0B = vec_add(line0, line2);
1218 register vector signed short line2B = vec_sub(line0, line2);
1219 register vector signed short line1B = vec_add(line1, line3);
1220 register vector signed short line3B = vec_sub(line1, line3);
1221 register vector signed short line4B = vec_add(line4, line6);
1222 register vector signed short line6B = vec_sub(line4, line6);
1223 register vector signed short line5B = vec_add(line5, line7);
1224 register vector signed short line7B = vec_sub(line5, line7);
1225
1226 register vector signed short line0C = vec_add(line0B, line4B);
1227 register vector signed short line4C = vec_sub(line0B, line4B);
1228 register vector signed short line1C = vec_add(line1B, line5B);
1229 register vector signed short line5C = vec_sub(line1B, line5B);
1230 register vector signed short line2C = vec_add(line2B, line6B);
1231 register vector signed short line6C = vec_sub(line2B, line6B);
1232 register vector signed short line3C = vec_add(line3B, line7B);
1233 register vector signed short line7C = vec_sub(line3B, line7B);
1234
1235 vsum = vec_sum4s(vec_abs(line0C), vec_splat_s32(0));
1236 vsum = vec_sum4s(vec_abs(line1C), vsum);
1237 vsum = vec_sum4s(vec_abs(line2C), vsum);
1238 vsum = vec_sum4s(vec_abs(line3C), vsum);
1239 vsum = vec_sum4s(vec_abs(line4C), vsum);
1240 vsum = vec_sum4s(vec_abs(line5C), vsum);
1241 vsum = vec_sum4s(vec_abs(line6C), vsum);
1242 vsum = vec_sum4s(vec_abs(line7C), vsum);
1243
1244 line0S = vec_add(temp0S, temp1S);
1245 line1S = vec_sub(temp0S, temp1S);
1246 line2S = vec_add(temp2S, temp3S);
1247 line3S = vec_sub(temp2S, temp3S);
1248 line4S = vec_add(temp4S, temp5S);
1249 line5S = vec_sub(temp4S, temp5S);
1250 line6S = vec_add(temp6S, temp7S);
1251 line7S = vec_sub(temp6S, temp7S);
1252
1253 line0BS = vec_add(line0S, line2S);
1254 line2BS = vec_sub(line0S, line2S);
1255 line1BS = vec_add(line1S, line3S);
1256 line3BS = vec_sub(line1S, line3S);
1257 line4BS = vec_add(line4S, line6S);
1258 line6BS = vec_sub(line4S, line6S);
1259 line5BS = vec_add(line5S, line7S);
1260 line7BS = vec_sub(line5S, line7S);
1261
1262 line0CS = vec_add(line0BS, line4BS);
1263 line4CS = vec_sub(line0BS, line4BS);
1264 line1CS = vec_add(line1BS, line5BS);
1265 line5CS = vec_sub(line1BS, line5BS);
1266 line2CS = vec_add(line2BS, line6BS);
1267 line6CS = vec_sub(line2BS, line6BS);
1268 line3CS = vec_add(line3BS, line7BS);
1269 line7CS = vec_sub(line3BS, line7BS);
1270
1271 vsum = vec_sum4s(vec_abs(line0CS), vsum);
1272 vsum = vec_sum4s(vec_abs(line1CS), vsum);
1273 vsum = vec_sum4s(vec_abs(line2CS), vsum);
1274 vsum = vec_sum4s(vec_abs(line3CS), vsum);
1275 vsum = vec_sum4s(vec_abs(line4CS), vsum);
1276 vsum = vec_sum4s(vec_abs(line5CS), vsum);
1277 vsum = vec_sum4s(vec_abs(line6CS), vsum);
1278 vsum = vec_sum4s(vec_abs(line7CS), vsum);
1279 vsum = vec_sums(vsum, (vector signed int)vzero);
1280 vsum = vec_splat(vsum, 3);
1281 vec_ste(vsum, 0, &sum);
1282 }
1283 return sum;
1284 }
1285
1286 static int hadamard8_diff16_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h){
1287 int score;
1288 score = hadamard8_diff16x8_altivec(s, dst, src, stride, 8);
1289 if (h==16) {
1290 dst += 8*stride;
1291 src += 8*stride;
1292 score += hadamard8_diff16x8_altivec(s, dst, src, stride, 8);
1293 }
1294 return score;
1295 }
1296
1297 static void vorbis_inverse_coupling_altivec(float *mag, float *ang,
1298 int blocksize)
1299 {
1300 int i;
1301 vector float m, a;
1302 vector bool int t0, t1;
1303 const vector unsigned int v_31 = //XXX
1304 vec_add(vec_add(vec_splat_u32(15),vec_splat_u32(15)),vec_splat_u32(1));
1305 for (i = 0; i < blocksize; i += 4) {
1306 m = vec_ld(0, mag+i);
1307 a = vec_ld(0, ang+i);
1308 t0 = vec_cmple(m, (vector float)vec_splat_u32(0));
1309 t1 = vec_cmple(a, (vector float)vec_splat_u32(0));
1310 a = vec_xor(a, (vector float) vec_sl((vector unsigned int)t0, v_31));
1311 t0 = (vector bool int)vec_and(a, t1);
1312 t1 = (vector bool int)vec_andc(a, t1);
1313 a = vec_sub(m, (vector float)t1);
1314 m = vec_add(m, (vector float)t0);
1315 vec_stl(a, 0, ang+i);
1316 vec_stl(m, 0, mag+i);
1317 }
1318 }
1319
1320 /* next one assumes that ((line_size % 8) == 0) */
1321 static void avg_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
1322 {
1323 register int i;
1324 register vector unsigned char pixelsv1, pixelsv2, pixelsavg;
1325 register vector unsigned char blockv, temp1, temp2, blocktemp;
1326 register vector unsigned short pixelssum1, pixelssum2, temp3;
1327
1328 register const vector unsigned char vczero = (const vector unsigned char)
1329 vec_splat_u8(0);
1330 register const vector unsigned short vctwo = (const vector unsigned short)
1331 vec_splat_u16(2);
1332
1333 temp1 = vec_ld(0, pixels);
1334 temp2 = vec_ld(16, pixels);
1335 pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
1336 if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F) {
1337 pixelsv2 = temp2;
1338 } else {
1339 pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
1340 }
1341 pixelsv1 = vec_mergeh(vczero, pixelsv1);
1342 pixelsv2 = vec_mergeh(vczero, pixelsv2);
1343 pixelssum1 = vec_add((vector unsigned short)pixelsv1,
1344 (vector unsigned short)pixelsv2);
1345 pixelssum1 = vec_add(pixelssum1, vctwo);
1346
1347 for (i = 0; i < h ; i++) {
1348 int rightside = ((unsigned long)block & 0x0000000F);
1349 blockv = vec_ld(0, block);
1350
1351 temp1 = vec_ld(line_size, pixels);
1352 temp2 = vec_ld(line_size + 16, pixels);
1353 pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
1354 if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F) {
1355 pixelsv2 = temp2;
1356 } else {
1357 pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
1358 }
1359
1360 pixelsv1 = vec_mergeh(vczero, pixelsv1);
1361 pixelsv2 = vec_mergeh(vczero, pixelsv2);
1362 pixelssum2 = vec_add((vector unsigned short)pixelsv1,
1363 (vector unsigned short)pixelsv2);
1364 temp3 = vec_add(pixelssum1, pixelssum2);
1365 temp3 = vec_sra(temp3, vctwo);
1366 pixelssum1 = vec_add(pixelssum2, vctwo);
1367 pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);
1368
1369 if (rightside) {
1370 blocktemp = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
1371 } else {
1372 blocktemp = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
1373 }
1374
1375 blockv = vec_avg(blocktemp, blockv);
1376 vec_st(blockv, 0, block);
1377
1378 block += line_size;
1379 pixels += line_size;
1380 }
1381 }
1382
1383 void dsputil_init_altivec(DSPContext* c, AVCodecContext *avctx)
1384 {
1385 c->pix_abs[0][1] = sad16_x2_altivec;
1386 c->pix_abs[0][2] = sad16_y2_altivec;
1387 c->pix_abs[0][3] = sad16_xy2_altivec;
1388 c->pix_abs[0][0] = sad16_altivec;
1389 c->pix_abs[1][0] = sad8_altivec;
1390 c->sad[0]= sad16_altivec;
1391 c->sad[1]= sad8_altivec;
1392 c->pix_norm1 = pix_norm1_altivec;
1393 c->sse[1]= sse8_altivec;
1394 c->sse[0]= sse16_altivec;
1395 c->pix_sum = pix_sum_altivec;
1396 c->diff_pixels = diff_pixels_altivec;
1397 c->get_pixels = get_pixels_altivec;
1398 c->clear_block = clear_block_altivec;
1399 c->add_bytes= add_bytes_altivec;
1400 c->put_pixels_tab[0][0] = put_pixels16_altivec;
1401 /* the two functions do the same thing, so use the same code */
1402 c->put_no_rnd_pixels_tab[0][0] = put_pixels16_altivec;
1403 c->avg_pixels_tab[0][0] = avg_pixels16_altivec;
1404 c->avg_pixels_tab[1][0] = avg_pixels8_altivec;
1405 c->avg_pixels_tab[1][3] = avg_pixels8_xy2_altivec;
1406 c->put_pixels_tab[1][3] = put_pixels8_xy2_altivec;
1407 c->put_no_rnd_pixels_tab[1][3] = put_no_rnd_pixels8_xy2_altivec;
1408 c->put_pixels_tab[0][3] = put_pixels16_xy2_altivec;
1409 c->put_no_rnd_pixels_tab[0][3] = put_no_rnd_pixels16_xy2_altivec;
1410
1411 c->hadamard8_diff[0] = hadamard8_diff16_altivec;
1412 c->hadamard8_diff[1] = hadamard8_diff8x8_altivec;
1413 if (CONFIG_VORBIS_DECODER)
1414 c->vorbis_inverse_coupling = vorbis_inverse_coupling_altivec;
1415 }