ppc: dsputil: do unaligned block accesses correctly
[libav.git] / libavcodec / ppc / dsputil_altivec.c
1 /*
2 * Copyright (c) 2002 Brian Foley
3 * Copyright (c) 2002 Dieter Shirley
4 * Copyright (c) 2003-2004 Romain Dolbeau <romain@dolbeau.org>
5 *
6 * This file is part of Libav.
7 *
8 * Libav is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * Libav is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with Libav; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #include "config.h"
24 #if HAVE_ALTIVEC_H
25 #include <altivec.h>
26 #endif
27 #include "libavcodec/dsputil.h"
28 #include "util_altivec.h"
29 #include "types_altivec.h"
30 #include "dsputil_altivec.h"
31
32 static int sad16_x2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
33 {
34 int i;
35 int s;
36 const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
37 vector unsigned char perm1 = vec_lvsl(0, pix2);
38 vector unsigned char perm2 = vec_add(perm1, vec_splat_u8(1));
39 vector unsigned char pix2l, pix2r;
40 vector unsigned char pix1v, pix2v, pix2iv, avgv, t5;
41 vector unsigned int sad;
42 vector signed int sumdiffs;
43
44 s = 0;
45 sad = (vector unsigned int)vec_splat_u32(0);
46 for (i = 0; i < h; i++) {
47 /* Read unaligned pixels into our vectors. The vectors are as follows:
48 pix1v: pix1[0]-pix1[15]
49 pix2v: pix2[0]-pix2[15] pix2iv: pix2[1]-pix2[16] */
50 pix1v = vec_ld( 0, pix1);
51 pix2l = vec_ld( 0, pix2);
52 pix2r = vec_ld(16, pix2);
53 pix2v = vec_perm(pix2l, pix2r, perm1);
54 pix2iv = vec_perm(pix2l, pix2r, perm2);
55
56 /* Calculate the average vector */
57 avgv = vec_avg(pix2v, pix2iv);
58
59 /* Calculate a sum of abs differences vector */
60 t5 = vec_sub(vec_max(pix1v, avgv), vec_min(pix1v, avgv));
61
62 /* Add each 4 pixel group together and put 4 results into sad */
63 sad = vec_sum4s(t5, sad);
64
65 pix1 += line_size;
66 pix2 += line_size;
67 }
68 /* Sum up the four partial sums, and put the result into s */
69 sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
70 sumdiffs = vec_splat(sumdiffs, 3);
71 vec_ste(sumdiffs, 0, &s);
72
73 return s;
74 }
75
76 static int sad16_y2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
77 {
78 int i;
79 int s;
80 const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
81 vector unsigned char perm = vec_lvsl(0, pix2);
82 vector unsigned char pix2l, pix2r;
83 vector unsigned char pix1v, pix2v, pix3v, avgv, t5;
84 vector unsigned int sad;
85 vector signed int sumdiffs;
86 uint8_t *pix3 = pix2 + line_size;
87
88 s = 0;
89 sad = (vector unsigned int)vec_splat_u32(0);
90
91 /* Due to the fact that pix3 = pix2 + line_size, the pix3 of one
92 iteration becomes pix2 in the next iteration. We can use this
93 fact to avoid a potentially expensive unaligned read, each
94 time around the loop.
95 Read unaligned pixels into our vectors. The vectors are as follows:
96 pix2v: pix2[0]-pix2[15]
97 Split the pixel vectors into shorts */
98 pix2l = vec_ld( 0, pix2);
99 pix2r = vec_ld(15, pix2);
100 pix2v = vec_perm(pix2l, pix2r, perm);
101
102 for (i = 0; i < h; i++) {
103 /* Read unaligned pixels into our vectors. The vectors are as follows:
104 pix1v: pix1[0]-pix1[15]
105 pix3v: pix3[0]-pix3[15] */
106 pix1v = vec_ld(0, pix1);
107
108 pix2l = vec_ld( 0, pix3);
109 pix2r = vec_ld(15, pix3);
110 pix3v = vec_perm(pix2l, pix2r, perm);
111
112 /* Calculate the average vector */
113 avgv = vec_avg(pix2v, pix3v);
114
115 /* Calculate a sum of abs differences vector */
116 t5 = vec_sub(vec_max(pix1v, avgv), vec_min(pix1v, avgv));
117
118 /* Add each 4 pixel group together and put 4 results into sad */
119 sad = vec_sum4s(t5, sad);
120
121 pix1 += line_size;
122 pix2v = pix3v;
123 pix3 += line_size;
124
125 }
126
127 /* Sum up the four partial sums, and put the result into s */
128 sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
129 sumdiffs = vec_splat(sumdiffs, 3);
130 vec_ste(sumdiffs, 0, &s);
131 return s;
132 }
133
134 static int sad16_xy2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
135 {
136 int i;
137 int s;
138 uint8_t *pix3 = pix2 + line_size;
139 const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
140 const vector unsigned short two = (const vector unsigned short)vec_splat_u16(2);
141 vector unsigned char avgv, t5;
142 vector unsigned char perm1 = vec_lvsl(0, pix2);
143 vector unsigned char perm2 = vec_add(perm1, vec_splat_u8(1));
144 vector unsigned char pix2l, pix2r;
145 vector unsigned char pix1v, pix2v, pix3v, pix2iv, pix3iv;
146 vector unsigned short pix2lv, pix2hv, pix2ilv, pix2ihv;
147 vector unsigned short pix3lv, pix3hv, pix3ilv, pix3ihv;
148 vector unsigned short avghv, avglv;
149 vector unsigned short t1, t2, t3, t4;
150 vector unsigned int sad;
151 vector signed int sumdiffs;
152
153 sad = (vector unsigned int)vec_splat_u32(0);
154
155 s = 0;
156
157 /* Due to the fact that pix3 = pix2 + line_size, the pix3 of one
158 iteration becomes pix2 in the next iteration. We can use this
159 fact to avoid a potentially expensive unaligned read, as well
160 as some splitting, and vector addition each time around the loop.
161 Read unaligned pixels into our vectors. The vectors are as follows:
162 pix2v: pix2[0]-pix2[15] pix2iv: pix2[1]-pix2[16]
163 Split the pixel vectors into shorts */
164 pix2l = vec_ld( 0, pix2);
165 pix2r = vec_ld(16, pix2);
166 pix2v = vec_perm(pix2l, pix2r, perm1);
167 pix2iv = vec_perm(pix2l, pix2r, perm2);
168
169 pix2hv = (vector unsigned short) vec_mergeh(zero, pix2v);
170 pix2lv = (vector unsigned short) vec_mergel(zero, pix2v);
171 pix2ihv = (vector unsigned short) vec_mergeh(zero, pix2iv);
172 pix2ilv = (vector unsigned short) vec_mergel(zero, pix2iv);
173 t1 = vec_add(pix2hv, pix2ihv);
174 t2 = vec_add(pix2lv, pix2ilv);
175
176 for (i = 0; i < h; i++) {
177 /* Read unaligned pixels into our vectors. The vectors are as follows:
178 pix1v: pix1[0]-pix1[15]
179 pix3v: pix3[0]-pix3[15] pix3iv: pix3[1]-pix3[16] */
180 pix1v = vec_ld(0, pix1);
181
182 pix2l = vec_ld( 0, pix3);
183 pix2r = vec_ld(16, pix3);
184 pix3v = vec_perm(pix2l, pix2r, perm1);
185 pix3iv = vec_perm(pix2l, pix2r, perm2);
186
187 /* Note that AltiVec does have vec_avg, but this works on vector pairs
188 and rounds up. We could do avg(avg(a,b),avg(c,d)), but the rounding
189 would mean that, for example, avg(3,0,0,1) = 2, when it should be 1.
190 Instead, we have to split the pixel vectors into vectors of shorts,
191 and do the averaging by hand. */
192
193 /* Split the pixel vectors into shorts */
194 pix3hv = (vector unsigned short) vec_mergeh(zero, pix3v);
195 pix3lv = (vector unsigned short) vec_mergel(zero, pix3v);
196 pix3ihv = (vector unsigned short) vec_mergeh(zero, pix3iv);
197 pix3ilv = (vector unsigned short) vec_mergel(zero, pix3iv);
198
199 /* Do the averaging on them */
200 t3 = vec_add(pix3hv, pix3ihv);
201 t4 = vec_add(pix3lv, pix3ilv);
202
203 avghv = vec_sr(vec_add(vec_add(t1, t3), two), two);
204 avglv = vec_sr(vec_add(vec_add(t2, t4), two), two);
205
206 /* Pack the shorts back into a result */
207 avgv = vec_pack(avghv, avglv);
208
209 /* Calculate a sum of abs differences vector */
210 t5 = vec_sub(vec_max(pix1v, avgv), vec_min(pix1v, avgv));
211
212 /* Add each 4 pixel group together and put 4 results into sad */
213 sad = vec_sum4s(t5, sad);
214
215 pix1 += line_size;
216 pix3 += line_size;
217 /* Transfer the calculated values for pix3 into pix2 */
218 t1 = t3;
219 t2 = t4;
220 }
221 /* Sum up the four partial sums, and put the result into s */
222 sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
223 sumdiffs = vec_splat(sumdiffs, 3);
224 vec_ste(sumdiffs, 0, &s);
225
226 return s;
227 }
228
229 static int sad16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
230 {
231 int i;
232 int s;
233 const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
234 vector unsigned char perm = vec_lvsl(0, pix2);
235 vector unsigned char t1, t2, t3,t4, t5;
236 vector unsigned int sad;
237 vector signed int sumdiffs;
238
239 sad = (vector unsigned int)vec_splat_u32(0);
240
241
242 for (i = 0; i < h; i++) {
243 /* Read potentially unaligned pixels into t1 and t2 */
244 vector unsigned char pix2l = vec_ld( 0, pix2);
245 vector unsigned char pix2r = vec_ld(15, pix2);
246 t1 = vec_ld(0, pix1);
247 t2 = vec_perm(pix2l, pix2r, perm);
248
249 /* Calculate a sum of abs differences vector */
250 t3 = vec_max(t1, t2);
251 t4 = vec_min(t1, t2);
252 t5 = vec_sub(t3, t4);
253
254 /* Add each 4 pixel group together and put 4 results into sad */
255 sad = vec_sum4s(t5, sad);
256
257 pix1 += line_size;
258 pix2 += line_size;
259 }
260
261 /* Sum up the four partial sums, and put the result into s */
262 sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
263 sumdiffs = vec_splat(sumdiffs, 3);
264 vec_ste(sumdiffs, 0, &s);
265
266 return s;
267 }
268
269 static int sad8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
270 {
271 int i;
272 int s;
273 const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
274 const vector unsigned char permclear = (vector unsigned char){255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0};
275 vector unsigned char perm1 = vec_lvsl(0, pix1);
276 vector unsigned char perm2 = vec_lvsl(0, pix2);
277 vector unsigned char t1, t2, t3,t4, t5;
278 vector unsigned int sad;
279 vector signed int sumdiffs;
280
281 sad = (vector unsigned int)vec_splat_u32(0);
282
283 for (i = 0; i < h; i++) {
284 /* Read potentially unaligned pixels into t1 and t2
285 Since we're reading 16 pixels, and actually only want 8,
286 mask out the last 8 pixels. The 0s don't change the sum. */
287 vector unsigned char pix1l = vec_ld( 0, pix1);
288 vector unsigned char pix1r = vec_ld(15, pix1);
289 vector unsigned char pix2l = vec_ld( 0, pix2);
290 vector unsigned char pix2r = vec_ld(15, pix2);
291 t1 = vec_and(vec_perm(pix1l, pix1r, perm1), permclear);
292 t2 = vec_and(vec_perm(pix2l, pix2r, perm2), permclear);
293
294 /* Calculate a sum of abs differences vector */
295 t3 = vec_max(t1, t2);
296 t4 = vec_min(t1, t2);
297 t5 = vec_sub(t3, t4);
298
299 /* Add each 4 pixel group together and put 4 results into sad */
300 sad = vec_sum4s(t5, sad);
301
302 pix1 += line_size;
303 pix2 += line_size;
304 }
305
306 /* Sum up the four partial sums, and put the result into s */
307 sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
308 sumdiffs = vec_splat(sumdiffs, 3);
309 vec_ste(sumdiffs, 0, &s);
310
311 return s;
312 }
313
314 static int pix_norm1_altivec(uint8_t *pix, int line_size)
315 {
316 int i;
317 int s;
318 const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
319 vector unsigned char perm = vec_lvsl(0, pix);
320 vector unsigned char pixv;
321 vector unsigned int sv;
322 vector signed int sum;
323
324 sv = (vector unsigned int)vec_splat_u32(0);
325
326 s = 0;
327 for (i = 0; i < 16; i++) {
328 /* Read in the potentially unaligned pixels */
329 vector unsigned char pixl = vec_ld( 0, pix);
330 vector unsigned char pixr = vec_ld(15, pix);
331 pixv = vec_perm(pixl, pixr, perm);
332
333 /* Square the values, and add them to our sum */
334 sv = vec_msum(pixv, pixv, sv);
335
336 pix += line_size;
337 }
338 /* Sum up the four partial sums, and put the result into s */
339 sum = vec_sums((vector signed int) sv, (vector signed int) zero);
340 sum = vec_splat(sum, 3);
341 vec_ste(sum, 0, &s);
342
343 return s;
344 }
345
346 /**
347 * Sum of Squared Errors for a 8x8 block.
348 * AltiVec-enhanced.
349 * It's the sad8_altivec code above w/ squaring added.
350 */
351 static int sse8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
352 {
353 int i;
354 int s;
355 const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
356 const vector unsigned char permclear = (vector unsigned char){255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0};
357 vector unsigned char perm1 = vec_lvsl(0, pix1);
358 vector unsigned char perm2 = vec_lvsl(0, pix2);
359 vector unsigned char t1, t2, t3,t4, t5;
360 vector unsigned int sum;
361 vector signed int sumsqr;
362
363 sum = (vector unsigned int)vec_splat_u32(0);
364
365 for (i = 0; i < h; i++) {
366 /* Read potentially unaligned pixels into t1 and t2
367 Since we're reading 16 pixels, and actually only want 8,
368 mask out the last 8 pixels. The 0s don't change the sum. */
369 vector unsigned char pix1l = vec_ld( 0, pix1);
370 vector unsigned char pix1r = vec_ld(15, pix1);
371 vector unsigned char pix2l = vec_ld( 0, pix2);
372 vector unsigned char pix2r = vec_ld(15, pix2);
373 t1 = vec_and(vec_perm(pix1l, pix1r, perm1), permclear);
374 t2 = vec_and(vec_perm(pix2l, pix2r, perm2), permclear);
375
376 /* Since we want to use unsigned chars, we can take advantage
377 of the fact that abs(a-b)^2 = (a-b)^2. */
378
379 /* Calculate abs differences vector */
380 t3 = vec_max(t1, t2);
381 t4 = vec_min(t1, t2);
382 t5 = vec_sub(t3, t4);
383
384 /* Square the values and add them to our sum */
385 sum = vec_msum(t5, t5, sum);
386
387 pix1 += line_size;
388 pix2 += line_size;
389 }
390
391 /* Sum up the four partial sums, and put the result into s */
392 sumsqr = vec_sums((vector signed int) sum, (vector signed int) zero);
393 sumsqr = vec_splat(sumsqr, 3);
394 vec_ste(sumsqr, 0, &s);
395
396 return s;
397 }
398
399 /**
400 * Sum of Squared Errors for a 16x16 block.
401 * AltiVec-enhanced.
402 * It's the sad16_altivec code above w/ squaring added.
403 */
404 static int sse16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
405 {
406 int i;
407 int s;
408 const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
409 vector unsigned char perm = vec_lvsl(0, pix2);
410 vector unsigned char t1, t2, t3,t4, t5;
411 vector unsigned int sum;
412 vector signed int sumsqr;
413
414 sum = (vector unsigned int)vec_splat_u32(0);
415
416 for (i = 0; i < h; i++) {
417 /* Read potentially unaligned pixels into t1 and t2 */
418 vector unsigned char pix2l = vec_ld( 0, pix2);
419 vector unsigned char pix2r = vec_ld(15, pix2);
420 t1 = vec_ld(0, pix1);
421 t2 = vec_perm(pix2l, pix2r, perm);
422
423 /* Since we want to use unsigned chars, we can take advantage
424 of the fact that abs(a-b)^2 = (a-b)^2. */
425
426 /* Calculate abs differences vector */
427 t3 = vec_max(t1, t2);
428 t4 = vec_min(t1, t2);
429 t5 = vec_sub(t3, t4);
430
431 /* Square the values and add them to our sum */
432 sum = vec_msum(t5, t5, sum);
433
434 pix1 += line_size;
435 pix2 += line_size;
436 }
437
438 /* Sum up the four partial sums, and put the result into s */
439 sumsqr = vec_sums((vector signed int) sum, (vector signed int) zero);
440 sumsqr = vec_splat(sumsqr, 3);
441 vec_ste(sumsqr, 0, &s);
442
443 return s;
444 }
445
446 static int pix_sum_altivec(uint8_t * pix, int line_size)
447 {
448 const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
449 vector unsigned char perm = vec_lvsl(0, pix);
450 vector unsigned char t1;
451 vector unsigned int sad;
452 vector signed int sumdiffs;
453
454 int i;
455 int s;
456
457 sad = (vector unsigned int)vec_splat_u32(0);
458
459 for (i = 0; i < 16; i++) {
460 /* Read the potentially unaligned 16 pixels into t1 */
461 vector unsigned char pixl = vec_ld( 0, pix);
462 vector unsigned char pixr = vec_ld(15, pix);
463 t1 = vec_perm(pixl, pixr, perm);
464
465 /* Add each 4 pixel group together and put 4 results into sad */
466 sad = vec_sum4s(t1, sad);
467
468 pix += line_size;
469 }
470
471 /* Sum up the four partial sums, and put the result into s */
472 sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
473 sumdiffs = vec_splat(sumdiffs, 3);
474 vec_ste(sumdiffs, 0, &s);
475
476 return s;
477 }
478
479 static void get_pixels_altivec(DCTELEM *restrict block, const uint8_t *pixels, int line_size)
480 {
481 int i;
482 vector unsigned char perm = vec_lvsl(0, pixels);
483 vector unsigned char bytes;
484 const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
485 vector signed short shorts;
486
487 for (i = 0; i < 8; i++) {
488 // Read potentially unaligned pixels.
489 // We're reading 16 pixels, and actually only want 8,
490 // but we simply ignore the extras.
491 vector unsigned char pixl = vec_ld( 0, pixels);
492 vector unsigned char pixr = vec_ld(15, pixels);
493 bytes = vec_perm(pixl, pixr, perm);
494
495 // convert the bytes into shorts
496 shorts = (vector signed short)vec_mergeh(zero, bytes);
497
498 // save the data to the block, we assume the block is 16-byte aligned
499 vec_st(shorts, i*16, (vector signed short*)block);
500
501 pixels += line_size;
502 }
503 }
504
505 static void diff_pixels_altivec(DCTELEM *restrict block, const uint8_t *s1,
506 const uint8_t *s2, int stride)
507 {
508 int i;
509 vector unsigned char perm1 = vec_lvsl(0, s1);
510 vector unsigned char perm2 = vec_lvsl(0, s2);
511 vector unsigned char bytes, pixl, pixr;
512 const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
513 vector signed short shorts1, shorts2;
514
515 for (i = 0; i < 4; i++) {
516 // Read potentially unaligned pixels
517 // We're reading 16 pixels, and actually only want 8,
518 // but we simply ignore the extras.
519 pixl = vec_ld( 0, s1);
520 pixr = vec_ld(15, s1);
521 bytes = vec_perm(pixl, pixr, perm1);
522
523 // convert the bytes into shorts
524 shorts1 = (vector signed short)vec_mergeh(zero, bytes);
525
526 // Do the same for the second block of pixels
527 pixl = vec_ld( 0, s2);
528 pixr = vec_ld(15, s2);
529 bytes = vec_perm(pixl, pixr, perm2);
530
531 // convert the bytes into shorts
532 shorts2 = (vector signed short)vec_mergeh(zero, bytes);
533
534 // Do the subtraction
535 shorts1 = vec_sub(shorts1, shorts2);
536
537 // save the data to the block, we assume the block is 16-byte aligned
538 vec_st(shorts1, 0, (vector signed short*)block);
539
540 s1 += stride;
541 s2 += stride;
542 block += 8;
543
544
545 // The code below is a copy of the code above... This is a manual
546 // unroll.
547
548 // Read potentially unaligned pixels
549 // We're reading 16 pixels, and actually only want 8,
550 // but we simply ignore the extras.
551 pixl = vec_ld( 0, s1);
552 pixr = vec_ld(15, s1);
553 bytes = vec_perm(pixl, pixr, perm1);
554
555 // convert the bytes into shorts
556 shorts1 = (vector signed short)vec_mergeh(zero, bytes);
557
558 // Do the same for the second block of pixels
559 pixl = vec_ld( 0, s2);
560 pixr = vec_ld(15, s2);
561 bytes = vec_perm(pixl, pixr, perm2);
562
563 // convert the bytes into shorts
564 shorts2 = (vector signed short)vec_mergeh(zero, bytes);
565
566 // Do the subtraction
567 shorts1 = vec_sub(shorts1, shorts2);
568
569 // save the data to the block, we assume the block is 16-byte aligned
570 vec_st(shorts1, 0, (vector signed short*)block);
571
572 s1 += stride;
573 s2 += stride;
574 block += 8;
575 }
576 }
577
578
579 static void clear_block_altivec(DCTELEM *block) {
580 LOAD_ZERO;
581 vec_st(zero_s16v, 0, block);
582 vec_st(zero_s16v, 16, block);
583 vec_st(zero_s16v, 32, block);
584 vec_st(zero_s16v, 48, block);
585 vec_st(zero_s16v, 64, block);
586 vec_st(zero_s16v, 80, block);
587 vec_st(zero_s16v, 96, block);
588 vec_st(zero_s16v, 112, block);
589 }
590
591
592 static void add_bytes_altivec(uint8_t *dst, uint8_t *src, int w) {
593 register int i;
594 register vector unsigned char vdst, vsrc;
595
596 /* dst and src are 16 bytes-aligned (guaranteed) */
597 for (i = 0 ; (i + 15) < w ; i+=16) {
598 vdst = vec_ld(i, (unsigned char*)dst);
599 vsrc = vec_ld(i, (unsigned char*)src);
600 vdst = vec_add(vsrc, vdst);
601 vec_st(vdst, i, (unsigned char*)dst);
602 }
603 /* if w is not a multiple of 16 */
604 for (; (i < w) ; i++) {
605 dst[i] = src[i];
606 }
607 }
608
609 /* next one assumes that ((line_size % 16) == 0) */
610 void ff_put_pixels16_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
611 {
612 register vector unsigned char pixelsv1, pixelsv2;
613 register vector unsigned char pixelsv1B, pixelsv2B;
614 register vector unsigned char pixelsv1C, pixelsv2C;
615 register vector unsigned char pixelsv1D, pixelsv2D;
616
617 register vector unsigned char perm = vec_lvsl(0, pixels);
618 int i;
619 register int line_size_2 = line_size << 1;
620 register int line_size_3 = line_size + line_size_2;
621 register int line_size_4 = line_size << 2;
622
623 // hand-unrolling the loop by 4 gains about 15%
624 // mininum execution time goes from 74 to 60 cycles
625 // it's faster than -funroll-loops, but using
626 // -funroll-loops w/ this is bad - 74 cycles again.
627 // all this is on a 7450, tuning for the 7450
628 for (i = 0; i < h; i += 4) {
629 pixelsv1 = vec_ld( 0, pixels);
630 pixelsv2 = vec_ld(15, pixels);
631 pixelsv1B = vec_ld(line_size, pixels);
632 pixelsv2B = vec_ld(15 + line_size, pixels);
633 pixelsv1C = vec_ld(line_size_2, pixels);
634 pixelsv2C = vec_ld(15 + line_size_2, pixels);
635 pixelsv1D = vec_ld(line_size_3, pixels);
636 pixelsv2D = vec_ld(15 + line_size_3, pixels);
637 vec_st(vec_perm(pixelsv1, pixelsv2, perm),
638 0, (unsigned char*)block);
639 vec_st(vec_perm(pixelsv1B, pixelsv2B, perm),
640 line_size, (unsigned char*)block);
641 vec_st(vec_perm(pixelsv1C, pixelsv2C, perm),
642 line_size_2, (unsigned char*)block);
643 vec_st(vec_perm(pixelsv1D, pixelsv2D, perm),
644 line_size_3, (unsigned char*)block);
645 pixels+=line_size_4;
646 block +=line_size_4;
647 }
648 }
649
650 /* next one assumes that ((line_size % 16) == 0) */
651 #define op_avg(a,b) a = ( ((a)|(b)) - ((((a)^(b))&0xFEFEFEFEUL)>>1) )
652 void ff_avg_pixels16_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
653 {
654 register vector unsigned char pixelsv1, pixelsv2, pixelsv, blockv;
655 register vector unsigned char perm = vec_lvsl(0, pixels);
656 int i;
657
658 for (i = 0; i < h; i++) {
659 pixelsv1 = vec_ld( 0, pixels);
660 pixelsv2 = vec_ld(16,pixels);
661 blockv = vec_ld(0, block);
662 pixelsv = vec_perm(pixelsv1, pixelsv2, perm);
663 blockv = vec_avg(blockv,pixelsv);
664 vec_st(blockv, 0, (unsigned char*)block);
665 pixels+=line_size;
666 block +=line_size;
667 }
668 }
669
670 /* next one assumes that ((line_size % 8) == 0) */
671 static void avg_pixels8_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h)
672 {
673 register vector unsigned char pixelsv1, pixelsv2, pixelsv, blockv;
674 int i;
675
676 for (i = 0; i < h; i++) {
677 /* block is 8 bytes-aligned, so we're either in the
678 left block (16 bytes-aligned) or in the right block (not) */
679 int rightside = ((unsigned long)block & 0x0000000F);
680
681 blockv = vec_ld(0, block);
682 pixelsv1 = vec_ld( 0, pixels);
683 pixelsv2 = vec_ld(16, pixels);
684 pixelsv = vec_perm(pixelsv1, pixelsv2, vec_lvsl(0, pixels));
685
686 if (rightside) {
687 pixelsv = vec_perm(blockv, pixelsv, vcprm(0,1,s0,s1));
688 } else {
689 pixelsv = vec_perm(blockv, pixelsv, vcprm(s0,s1,2,3));
690 }
691
692 blockv = vec_avg(blockv, pixelsv);
693
694 vec_st(blockv, 0, block);
695
696 pixels += line_size;
697 block += line_size;
698 }
699 }
700
701 /* next one assumes that ((line_size % 8) == 0) */
702 static void put_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
703 {
704 register int i;
705 register vector unsigned char pixelsv1, pixelsv2, pixelsavg;
706 register vector unsigned char blockv, temp1, temp2;
707 register vector unsigned short pixelssum1, pixelssum2, temp3;
708 register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
709 register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);
710
711 temp1 = vec_ld(0, pixels);
712 temp2 = vec_ld(16, pixels);
713 pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
714 if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F) {
715 pixelsv2 = temp2;
716 } else {
717 pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
718 }
719 pixelsv1 = vec_mergeh(vczero, pixelsv1);
720 pixelsv2 = vec_mergeh(vczero, pixelsv2);
721 pixelssum1 = vec_add((vector unsigned short)pixelsv1,
722 (vector unsigned short)pixelsv2);
723 pixelssum1 = vec_add(pixelssum1, vctwo);
724
725 for (i = 0; i < h ; i++) {
726 int rightside = ((unsigned long)block & 0x0000000F);
727 blockv = vec_ld(0, block);
728
729 temp1 = vec_ld(line_size, pixels);
730 temp2 = vec_ld(line_size + 16, pixels);
731 pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
732 if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F) {
733 pixelsv2 = temp2;
734 } else {
735 pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
736 }
737
738 pixelsv1 = vec_mergeh(vczero, pixelsv1);
739 pixelsv2 = vec_mergeh(vczero, pixelsv2);
740 pixelssum2 = vec_add((vector unsigned short)pixelsv1,
741 (vector unsigned short)pixelsv2);
742 temp3 = vec_add(pixelssum1, pixelssum2);
743 temp3 = vec_sra(temp3, vctwo);
744 pixelssum1 = vec_add(pixelssum2, vctwo);
745 pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);
746
747 if (rightside) {
748 blockv = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
749 } else {
750 blockv = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
751 }
752
753 vec_st(blockv, 0, block);
754
755 block += line_size;
756 pixels += line_size;
757 }
758 }
759
760 /* next one assumes that ((line_size % 8) == 0) */
761 static void put_no_rnd_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
762 {
763 register int i;
764 register vector unsigned char pixelsv1, pixelsv2, pixelsavg;
765 register vector unsigned char blockv, temp1, temp2;
766 register vector unsigned short pixelssum1, pixelssum2, temp3;
767 register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
768 register const vector unsigned short vcone = (const vector unsigned short)vec_splat_u16(1);
769 register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);
770
771 temp1 = vec_ld(0, pixels);
772 temp2 = vec_ld(16, pixels);
773 pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
774 if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F) {
775 pixelsv2 = temp2;
776 } else {
777 pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
778 }
779 pixelsv1 = vec_mergeh(vczero, pixelsv1);
780 pixelsv2 = vec_mergeh(vczero, pixelsv2);
781 pixelssum1 = vec_add((vector unsigned short)pixelsv1,
782 (vector unsigned short)pixelsv2);
783 pixelssum1 = vec_add(pixelssum1, vcone);
784
785 for (i = 0; i < h ; i++) {
786 int rightside = ((unsigned long)block & 0x0000000F);
787 blockv = vec_ld(0, block);
788
789 temp1 = vec_ld(line_size, pixels);
790 temp2 = vec_ld(line_size + 16, pixels);
791 pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
792 if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F) {
793 pixelsv2 = temp2;
794 } else {
795 pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
796 }
797
798 pixelsv1 = vec_mergeh(vczero, pixelsv1);
799 pixelsv2 = vec_mergeh(vczero, pixelsv2);
800 pixelssum2 = vec_add((vector unsigned short)pixelsv1,
801 (vector unsigned short)pixelsv2);
802 temp3 = vec_add(pixelssum1, pixelssum2);
803 temp3 = vec_sra(temp3, vctwo);
804 pixelssum1 = vec_add(pixelssum2, vcone);
805 pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);
806
807 if (rightside) {
808 blockv = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
809 } else {
810 blockv = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
811 }
812
813 vec_st(blockv, 0, block);
814
815 block += line_size;
816 pixels += line_size;
817 }
818 }
819
820 /* next one assumes that ((line_size % 16) == 0) */
821 static void put_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h)
822 {
823 register int i;
824 register vector unsigned char pixelsv1, pixelsv2, pixelsv3, pixelsv4;
825 register vector unsigned char blockv, temp1, temp2;
826 register vector unsigned short temp3, temp4,
827 pixelssum1, pixelssum2, pixelssum3, pixelssum4;
828 register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
829 register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);
830
831 temp1 = vec_ld(0, pixels);
832 temp2 = vec_ld(16, pixels);
833 pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
834 if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F) {
835 pixelsv2 = temp2;
836 } else {
837 pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
838 }
839 pixelsv3 = vec_mergel(vczero, pixelsv1);
840 pixelsv4 = vec_mergel(vczero, pixelsv2);
841 pixelsv1 = vec_mergeh(vczero, pixelsv1);
842 pixelsv2 = vec_mergeh(vczero, pixelsv2);
843 pixelssum3 = vec_add((vector unsigned short)pixelsv3,
844 (vector unsigned short)pixelsv4);
845 pixelssum3 = vec_add(pixelssum3, vctwo);
846 pixelssum1 = vec_add((vector unsigned short)pixelsv1,
847 (vector unsigned short)pixelsv2);
848 pixelssum1 = vec_add(pixelssum1, vctwo);
849
850 for (i = 0; i < h ; i++) {
851 blockv = vec_ld(0, block);
852
853 temp1 = vec_ld(line_size, pixels);
854 temp2 = vec_ld(line_size + 16, pixels);
855 pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
856 if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F) {
857 pixelsv2 = temp2;
858 } else {
859 pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
860 }
861
862 pixelsv3 = vec_mergel(vczero, pixelsv1);
863 pixelsv4 = vec_mergel(vczero, pixelsv2);
864 pixelsv1 = vec_mergeh(vczero, pixelsv1);
865 pixelsv2 = vec_mergeh(vczero, pixelsv2);
866
867 pixelssum4 = vec_add((vector unsigned short)pixelsv3,
868 (vector unsigned short)pixelsv4);
869 pixelssum2 = vec_add((vector unsigned short)pixelsv1,
870 (vector unsigned short)pixelsv2);
871 temp4 = vec_add(pixelssum3, pixelssum4);
872 temp4 = vec_sra(temp4, vctwo);
873 temp3 = vec_add(pixelssum1, pixelssum2);
874 temp3 = vec_sra(temp3, vctwo);
875
876 pixelssum3 = vec_add(pixelssum4, vctwo);
877 pixelssum1 = vec_add(pixelssum2, vctwo);
878
879 blockv = vec_packsu(temp3, temp4);
880
881 vec_st(blockv, 0, block);
882
883 block += line_size;
884 pixels += line_size;
885 }
886 }
887
888 /* next one assumes that ((line_size % 16) == 0) */
889 static void put_no_rnd_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h)
890 {
891 register int i;
892 register vector unsigned char pixelsv1, pixelsv2, pixelsv3, pixelsv4;
893 register vector unsigned char blockv, temp1, temp2;
894 register vector unsigned short temp3, temp4,
895 pixelssum1, pixelssum2, pixelssum3, pixelssum4;
896 register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
897 register const vector unsigned short vcone = (const vector unsigned short)vec_splat_u16(1);
898 register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);
899
900 temp1 = vec_ld(0, pixels);
901 temp2 = vec_ld(16, pixels);
902 pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
903 if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F) {
904 pixelsv2 = temp2;
905 } else {
906 pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
907 }
908 pixelsv3 = vec_mergel(vczero, pixelsv1);
909 pixelsv4 = vec_mergel(vczero, pixelsv2);
910 pixelsv1 = vec_mergeh(vczero, pixelsv1);
911 pixelsv2 = vec_mergeh(vczero, pixelsv2);
912 pixelssum3 = vec_add((vector unsigned short)pixelsv3,
913 (vector unsigned short)pixelsv4);
914 pixelssum3 = vec_add(pixelssum3, vcone);
915 pixelssum1 = vec_add((vector unsigned short)pixelsv1,
916 (vector unsigned short)pixelsv2);
917 pixelssum1 = vec_add(pixelssum1, vcone);
918
919 for (i = 0; i < h ; i++) {
920 blockv = vec_ld(0, block);
921
922 temp1 = vec_ld(line_size, pixels);
923 temp2 = vec_ld(line_size + 16, pixels);
924 pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
925 if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F) {
926 pixelsv2 = temp2;
927 } else {
928 pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
929 }
930
931 pixelsv3 = vec_mergel(vczero, pixelsv1);
932 pixelsv4 = vec_mergel(vczero, pixelsv2);
933 pixelsv1 = vec_mergeh(vczero, pixelsv1);
934 pixelsv2 = vec_mergeh(vczero, pixelsv2);
935
936 pixelssum4 = vec_add((vector unsigned short)pixelsv3,
937 (vector unsigned short)pixelsv4);
938 pixelssum2 = vec_add((vector unsigned short)pixelsv1,
939 (vector unsigned short)pixelsv2);
940 temp4 = vec_add(pixelssum3, pixelssum4);
941 temp4 = vec_sra(temp4, vctwo);
942 temp3 = vec_add(pixelssum1, pixelssum2);
943 temp3 = vec_sra(temp3, vctwo);
944
945 pixelssum3 = vec_add(pixelssum4, vcone);
946 pixelssum1 = vec_add(pixelssum2, vcone);
947
948 blockv = vec_packsu(temp3, temp4);
949
950 vec_st(blockv, 0, block);
951
952 block += line_size;
953 pixels += line_size;
954 }
955 }
956
957 static int hadamard8_diff8x8_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h){
958 int sum;
959 register const vector unsigned char vzero =
960 (const vector unsigned char)vec_splat_u8(0);
961 register vector signed short temp0, temp1, temp2, temp3, temp4,
962 temp5, temp6, temp7;
963 {
964 register const vector signed short vprod1 =(const vector signed short)
965 { 1,-1, 1,-1, 1,-1, 1,-1 };
966 register const vector signed short vprod2 =(const vector signed short)
967 { 1, 1,-1,-1, 1, 1,-1,-1 };
968 register const vector signed short vprod3 =(const vector signed short)
969 { 1, 1, 1, 1,-1,-1,-1,-1 };
970 register const vector unsigned char perm1 = (const vector unsigned char)
971 {0x02, 0x03, 0x00, 0x01, 0x06, 0x07, 0x04, 0x05,
972 0x0A, 0x0B, 0x08, 0x09, 0x0E, 0x0F, 0x0C, 0x0D};
973 register const vector unsigned char perm2 = (const vector unsigned char)
974 {0x04, 0x05, 0x06, 0x07, 0x00, 0x01, 0x02, 0x03,
975 0x0C, 0x0D, 0x0E, 0x0F, 0x08, 0x09, 0x0A, 0x0B};
976 register const vector unsigned char perm3 = (const vector unsigned char)
977 {0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
978 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07};
979
980 #define ONEITERBUTTERFLY(i, res) \
981 { \
982 register vector unsigned char src1, src2, srcO; \
983 register vector unsigned char dst1, dst2, dstO; \
984 register vector signed short srcV, dstV; \
985 register vector signed short but0, but1, but2, op1, op2, op3; \
986 src1 = vec_ld(stride * i, src); \
987 src2 = vec_ld((stride * i) + 15, src); \
988 srcO = vec_perm(src1, src2, vec_lvsl(stride * i, src)); \
989 dst1 = vec_ld(stride * i, dst); \
990 dst2 = vec_ld((stride * i) + 15, dst); \
991 dstO = vec_perm(dst1, dst2, vec_lvsl(stride * i, dst)); \
992 /* promote the unsigned chars to signed shorts */ \
993 /* we're in the 8x8 function, we only care for the first 8 */ \
994 srcV = (vector signed short)vec_mergeh((vector signed char)vzero, \
995 (vector signed char)srcO); \
996 dstV = (vector signed short)vec_mergeh((vector signed char)vzero, \
997 (vector signed char)dstO); \
998 /* subtractions inside the first butterfly */ \
999 but0 = vec_sub(srcV, dstV); \
1000 op1 = vec_perm(but0, but0, perm1); \
1001 but1 = vec_mladd(but0, vprod1, op1); \
1002 op2 = vec_perm(but1, but1, perm2); \
1003 but2 = vec_mladd(but1, vprod2, op2); \
1004 op3 = vec_perm(but2, but2, perm3); \
1005 res = vec_mladd(but2, vprod3, op3); \
1006 }
1007 ONEITERBUTTERFLY(0, temp0);
1008 ONEITERBUTTERFLY(1, temp1);
1009 ONEITERBUTTERFLY(2, temp2);
1010 ONEITERBUTTERFLY(3, temp3);
1011 ONEITERBUTTERFLY(4, temp4);
1012 ONEITERBUTTERFLY(5, temp5);
1013 ONEITERBUTTERFLY(6, temp6);
1014 ONEITERBUTTERFLY(7, temp7);
1015 }
1016 #undef ONEITERBUTTERFLY
1017 {
1018 register vector signed int vsum;
1019 register vector signed short line0 = vec_add(temp0, temp1);
1020 register vector signed short line1 = vec_sub(temp0, temp1);
1021 register vector signed short line2 = vec_add(temp2, temp3);
1022 register vector signed short line3 = vec_sub(temp2, temp3);
1023 register vector signed short line4 = vec_add(temp4, temp5);
1024 register vector signed short line5 = vec_sub(temp4, temp5);
1025 register vector signed short line6 = vec_add(temp6, temp7);
1026 register vector signed short line7 = vec_sub(temp6, temp7);
1027
1028 register vector signed short line0B = vec_add(line0, line2);
1029 register vector signed short line2B = vec_sub(line0, line2);
1030 register vector signed short line1B = vec_add(line1, line3);
1031 register vector signed short line3B = vec_sub(line1, line3);
1032 register vector signed short line4B = vec_add(line4, line6);
1033 register vector signed short line6B = vec_sub(line4, line6);
1034 register vector signed short line5B = vec_add(line5, line7);
1035 register vector signed short line7B = vec_sub(line5, line7);
1036
1037 register vector signed short line0C = vec_add(line0B, line4B);
1038 register vector signed short line4C = vec_sub(line0B, line4B);
1039 register vector signed short line1C = vec_add(line1B, line5B);
1040 register vector signed short line5C = vec_sub(line1B, line5B);
1041 register vector signed short line2C = vec_add(line2B, line6B);
1042 register vector signed short line6C = vec_sub(line2B, line6B);
1043 register vector signed short line3C = vec_add(line3B, line7B);
1044 register vector signed short line7C = vec_sub(line3B, line7B);
1045
1046 vsum = vec_sum4s(vec_abs(line0C), vec_splat_s32(0));
1047 vsum = vec_sum4s(vec_abs(line1C), vsum);
1048 vsum = vec_sum4s(vec_abs(line2C), vsum);
1049 vsum = vec_sum4s(vec_abs(line3C), vsum);
1050 vsum = vec_sum4s(vec_abs(line4C), vsum);
1051 vsum = vec_sum4s(vec_abs(line5C), vsum);
1052 vsum = vec_sum4s(vec_abs(line6C), vsum);
1053 vsum = vec_sum4s(vec_abs(line7C), vsum);
1054 vsum = vec_sums(vsum, (vector signed int)vzero);
1055 vsum = vec_splat(vsum, 3);
1056 vec_ste(vsum, 0, &sum);
1057 }
1058 return sum;
1059 }
1060
1061 /*
1062 16x8 works with 16 elements; it allows to avoid replicating loads, and
1063 give the compiler more rooms for scheduling. It's only used from
1064 inside hadamard8_diff16_altivec.
1065
1066 Unfortunately, it seems gcc-3.3 is a bit dumb, and the compiled code has a LOT
1067 of spill code, it seems gcc (unlike xlc) cannot keep everything in registers
1068 by itself. The following code include hand-made registers allocation. It's not
1069 clean, but on a 7450 the resulting code is much faster (best case fall from
1070 700+ cycles to 550).
1071
1072 xlc doesn't add spill code, but it doesn't know how to schedule for the 7450,
1073 and its code isn't much faster than gcc-3.3 on the 7450 (but uses 25% less
1074 instructions...)
1075
1076 On the 970, the hand-made RA is still a win (around 690 vs. around 780), but
1077 xlc goes to around 660 on the regular C code...
1078 */
1079
1080 static int hadamard8_diff16x8_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h) {
1081 int sum;
1082 register vector signed short
1083 temp0 __asm__ ("v0"),
1084 temp1 __asm__ ("v1"),
1085 temp2 __asm__ ("v2"),
1086 temp3 __asm__ ("v3"),
1087 temp4 __asm__ ("v4"),
1088 temp5 __asm__ ("v5"),
1089 temp6 __asm__ ("v6"),
1090 temp7 __asm__ ("v7");
1091 register vector signed short
1092 temp0S __asm__ ("v8"),
1093 temp1S __asm__ ("v9"),
1094 temp2S __asm__ ("v10"),
1095 temp3S __asm__ ("v11"),
1096 temp4S __asm__ ("v12"),
1097 temp5S __asm__ ("v13"),
1098 temp6S __asm__ ("v14"),
1099 temp7S __asm__ ("v15");
1100 register const vector unsigned char vzero __asm__ ("v31") =
1101 (const vector unsigned char)vec_splat_u8(0);
1102 {
1103 register const vector signed short vprod1 __asm__ ("v16") =
1104 (const vector signed short){ 1,-1, 1,-1, 1,-1, 1,-1 };
1105 register const vector signed short vprod2 __asm__ ("v17") =
1106 (const vector signed short){ 1, 1,-1,-1, 1, 1,-1,-1 };
1107 register const vector signed short vprod3 __asm__ ("v18") =
1108 (const vector signed short){ 1, 1, 1, 1,-1,-1,-1,-1 };
1109 register const vector unsigned char perm1 __asm__ ("v19") =
1110 (const vector unsigned char)
1111 {0x02, 0x03, 0x00, 0x01, 0x06, 0x07, 0x04, 0x05,
1112 0x0A, 0x0B, 0x08, 0x09, 0x0E, 0x0F, 0x0C, 0x0D};
1113 register const vector unsigned char perm2 __asm__ ("v20") =
1114 (const vector unsigned char)
1115 {0x04, 0x05, 0x06, 0x07, 0x00, 0x01, 0x02, 0x03,
1116 0x0C, 0x0D, 0x0E, 0x0F, 0x08, 0x09, 0x0A, 0x0B};
1117 register const vector unsigned char perm3 __asm__ ("v21") =
1118 (const vector unsigned char)
1119 {0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
1120 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07};
1121
1122 #define ONEITERBUTTERFLY(i, res1, res2) \
1123 { \
1124 register vector unsigned char src1 __asm__ ("v22"), \
1125 src2 __asm__ ("v23"), \
1126 dst1 __asm__ ("v24"), \
1127 dst2 __asm__ ("v25"), \
1128 srcO __asm__ ("v22"), \
1129 dstO __asm__ ("v23"); \
1130 \
1131 register vector signed short srcV __asm__ ("v24"), \
1132 dstV __asm__ ("v25"), \
1133 srcW __asm__ ("v26"), \
1134 dstW __asm__ ("v27"), \
1135 but0 __asm__ ("v28"), \
1136 but0S __asm__ ("v29"), \
1137 op1 __asm__ ("v30"), \
1138 but1 __asm__ ("v22"), \
1139 op1S __asm__ ("v23"), \
1140 but1S __asm__ ("v24"), \
1141 op2 __asm__ ("v25"), \
1142 but2 __asm__ ("v26"), \
1143 op2S __asm__ ("v27"), \
1144 but2S __asm__ ("v28"), \
1145 op3 __asm__ ("v29"), \
1146 op3S __asm__ ("v30"); \
1147 \
1148 src1 = vec_ld(stride * i, src); \
1149 src2 = vec_ld((stride * i) + 16, src); \
1150 srcO = vec_perm(src1, src2, vec_lvsl(stride * i, src)); \
1151 dst1 = vec_ld(stride * i, dst); \
1152 dst2 = vec_ld((stride * i) + 16, dst); \
1153 dstO = vec_perm(dst1, dst2, vec_lvsl(stride * i, dst)); \
1154 /* promote the unsigned chars to signed shorts */ \
1155 srcV = (vector signed short)vec_mergeh((vector signed char)vzero, \
1156 (vector signed char)srcO); \
1157 dstV = (vector signed short)vec_mergeh((vector signed char)vzero, \
1158 (vector signed char)dstO); \
1159 srcW = (vector signed short)vec_mergel((vector signed char)vzero, \
1160 (vector signed char)srcO); \
1161 dstW = (vector signed short)vec_mergel((vector signed char)vzero, \
1162 (vector signed char)dstO); \
1163 /* subtractions inside the first butterfly */ \
1164 but0 = vec_sub(srcV, dstV); \
1165 but0S = vec_sub(srcW, dstW); \
1166 op1 = vec_perm(but0, but0, perm1); \
1167 but1 = vec_mladd(but0, vprod1, op1); \
1168 op1S = vec_perm(but0S, but0S, perm1); \
1169 but1S = vec_mladd(but0S, vprod1, op1S); \
1170 op2 = vec_perm(but1, but1, perm2); \
1171 but2 = vec_mladd(but1, vprod2, op2); \
1172 op2S = vec_perm(but1S, but1S, perm2); \
1173 but2S = vec_mladd(but1S, vprod2, op2S); \
1174 op3 = vec_perm(but2, but2, perm3); \
1175 res1 = vec_mladd(but2, vprod3, op3); \
1176 op3S = vec_perm(but2S, but2S, perm3); \
1177 res2 = vec_mladd(but2S, vprod3, op3S); \
1178 }
1179 ONEITERBUTTERFLY(0, temp0, temp0S);
1180 ONEITERBUTTERFLY(1, temp1, temp1S);
1181 ONEITERBUTTERFLY(2, temp2, temp2S);
1182 ONEITERBUTTERFLY(3, temp3, temp3S);
1183 ONEITERBUTTERFLY(4, temp4, temp4S);
1184 ONEITERBUTTERFLY(5, temp5, temp5S);
1185 ONEITERBUTTERFLY(6, temp6, temp6S);
1186 ONEITERBUTTERFLY(7, temp7, temp7S);
1187 }
1188 #undef ONEITERBUTTERFLY
1189 {
1190 register vector signed int vsum;
1191 register vector signed short line0S, line1S, line2S, line3S, line4S,
1192 line5S, line6S, line7S, line0BS,line2BS,
1193 line1BS,line3BS,line4BS,line6BS,line5BS,
1194 line7BS,line0CS,line4CS,line1CS,line5CS,
1195 line2CS,line6CS,line3CS,line7CS;
1196
1197 register vector signed short line0 = vec_add(temp0, temp1);
1198 register vector signed short line1 = vec_sub(temp0, temp1);
1199 register vector signed short line2 = vec_add(temp2, temp3);
1200 register vector signed short line3 = vec_sub(temp2, temp3);
1201 register vector signed short line4 = vec_add(temp4, temp5);
1202 register vector signed short line5 = vec_sub(temp4, temp5);
1203 register vector signed short line6 = vec_add(temp6, temp7);
1204 register vector signed short line7 = vec_sub(temp6, temp7);
1205
1206 register vector signed short line0B = vec_add(line0, line2);
1207 register vector signed short line2B = vec_sub(line0, line2);
1208 register vector signed short line1B = vec_add(line1, line3);
1209 register vector signed short line3B = vec_sub(line1, line3);
1210 register vector signed short line4B = vec_add(line4, line6);
1211 register vector signed short line6B = vec_sub(line4, line6);
1212 register vector signed short line5B = vec_add(line5, line7);
1213 register vector signed short line7B = vec_sub(line5, line7);
1214
1215 register vector signed short line0C = vec_add(line0B, line4B);
1216 register vector signed short line4C = vec_sub(line0B, line4B);
1217 register vector signed short line1C = vec_add(line1B, line5B);
1218 register vector signed short line5C = vec_sub(line1B, line5B);
1219 register vector signed short line2C = vec_add(line2B, line6B);
1220 register vector signed short line6C = vec_sub(line2B, line6B);
1221 register vector signed short line3C = vec_add(line3B, line7B);
1222 register vector signed short line7C = vec_sub(line3B, line7B);
1223
1224 vsum = vec_sum4s(vec_abs(line0C), vec_splat_s32(0));
1225 vsum = vec_sum4s(vec_abs(line1C), vsum);
1226 vsum = vec_sum4s(vec_abs(line2C), vsum);
1227 vsum = vec_sum4s(vec_abs(line3C), vsum);
1228 vsum = vec_sum4s(vec_abs(line4C), vsum);
1229 vsum = vec_sum4s(vec_abs(line5C), vsum);
1230 vsum = vec_sum4s(vec_abs(line6C), vsum);
1231 vsum = vec_sum4s(vec_abs(line7C), vsum);
1232
1233 line0S = vec_add(temp0S, temp1S);
1234 line1S = vec_sub(temp0S, temp1S);
1235 line2S = vec_add(temp2S, temp3S);
1236 line3S = vec_sub(temp2S, temp3S);
1237 line4S = vec_add(temp4S, temp5S);
1238 line5S = vec_sub(temp4S, temp5S);
1239 line6S = vec_add(temp6S, temp7S);
1240 line7S = vec_sub(temp6S, temp7S);
1241
1242 line0BS = vec_add(line0S, line2S);
1243 line2BS = vec_sub(line0S, line2S);
1244 line1BS = vec_add(line1S, line3S);
1245 line3BS = vec_sub(line1S, line3S);
1246 line4BS = vec_add(line4S, line6S);
1247 line6BS = vec_sub(line4S, line6S);
1248 line5BS = vec_add(line5S, line7S);
1249 line7BS = vec_sub(line5S, line7S);
1250
1251 line0CS = vec_add(line0BS, line4BS);
1252 line4CS = vec_sub(line0BS, line4BS);
1253 line1CS = vec_add(line1BS, line5BS);
1254 line5CS = vec_sub(line1BS, line5BS);
1255 line2CS = vec_add(line2BS, line6BS);
1256 line6CS = vec_sub(line2BS, line6BS);
1257 line3CS = vec_add(line3BS, line7BS);
1258 line7CS = vec_sub(line3BS, line7BS);
1259
1260 vsum = vec_sum4s(vec_abs(line0CS), vsum);
1261 vsum = vec_sum4s(vec_abs(line1CS), vsum);
1262 vsum = vec_sum4s(vec_abs(line2CS), vsum);
1263 vsum = vec_sum4s(vec_abs(line3CS), vsum);
1264 vsum = vec_sum4s(vec_abs(line4CS), vsum);
1265 vsum = vec_sum4s(vec_abs(line5CS), vsum);
1266 vsum = vec_sum4s(vec_abs(line6CS), vsum);
1267 vsum = vec_sum4s(vec_abs(line7CS), vsum);
1268 vsum = vec_sums(vsum, (vector signed int)vzero);
1269 vsum = vec_splat(vsum, 3);
1270 vec_ste(vsum, 0, &sum);
1271 }
1272 return sum;
1273 }
1274
1275 static int hadamard8_diff16_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h){
1276 int score;
1277 score = hadamard8_diff16x8_altivec(s, dst, src, stride, 8);
1278 if (h==16) {
1279 dst += 8*stride;
1280 src += 8*stride;
1281 score += hadamard8_diff16x8_altivec(s, dst, src, stride, 8);
1282 }
1283 return score;
1284 }
1285
1286 static void vorbis_inverse_coupling_altivec(float *mag, float *ang,
1287 int blocksize)
1288 {
1289 int i;
1290 vector float m, a;
1291 vector bool int t0, t1;
1292 const vector unsigned int v_31 = //XXX
1293 vec_add(vec_add(vec_splat_u32(15),vec_splat_u32(15)),vec_splat_u32(1));
1294 for (i = 0; i < blocksize; i += 4) {
1295 m = vec_ld(0, mag+i);
1296 a = vec_ld(0, ang+i);
1297 t0 = vec_cmple(m, (vector float)vec_splat_u32(0));
1298 t1 = vec_cmple(a, (vector float)vec_splat_u32(0));
1299 a = vec_xor(a, (vector float) vec_sl((vector unsigned int)t0, v_31));
1300 t0 = (vector bool int)vec_and(a, t1);
1301 t1 = (vector bool int)vec_andc(a, t1);
1302 a = vec_sub(m, (vector float)t1);
1303 m = vec_add(m, (vector float)t0);
1304 vec_stl(a, 0, ang+i);
1305 vec_stl(m, 0, mag+i);
1306 }
1307 }
1308
1309 /* next one assumes that ((line_size % 8) == 0) */
1310 static void avg_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
1311 {
1312 register int i;
1313 register vector unsigned char pixelsv1, pixelsv2, pixelsavg;
1314 register vector unsigned char blockv, temp1, temp2, blocktemp;
1315 register vector unsigned short pixelssum1, pixelssum2, temp3;
1316
1317 register const vector unsigned char vczero = (const vector unsigned char)
1318 vec_splat_u8(0);
1319 register const vector unsigned short vctwo = (const vector unsigned short)
1320 vec_splat_u16(2);
1321
1322 temp1 = vec_ld(0, pixels);
1323 temp2 = vec_ld(16, pixels);
1324 pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
1325 if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F) {
1326 pixelsv2 = temp2;
1327 } else {
1328 pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
1329 }
1330 pixelsv1 = vec_mergeh(vczero, pixelsv1);
1331 pixelsv2 = vec_mergeh(vczero, pixelsv2);
1332 pixelssum1 = vec_add((vector unsigned short)pixelsv1,
1333 (vector unsigned short)pixelsv2);
1334 pixelssum1 = vec_add(pixelssum1, vctwo);
1335
1336 for (i = 0; i < h ; i++) {
1337 int rightside = ((unsigned long)block & 0x0000000F);
1338 blockv = vec_ld(0, block);
1339
1340 temp1 = vec_ld(line_size, pixels);
1341 temp2 = vec_ld(line_size + 16, pixels);
1342 pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
1343 if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F) {
1344 pixelsv2 = temp2;
1345 } else {
1346 pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
1347 }
1348
1349 pixelsv1 = vec_mergeh(vczero, pixelsv1);
1350 pixelsv2 = vec_mergeh(vczero, pixelsv2);
1351 pixelssum2 = vec_add((vector unsigned short)pixelsv1,
1352 (vector unsigned short)pixelsv2);
1353 temp3 = vec_add(pixelssum1, pixelssum2);
1354 temp3 = vec_sra(temp3, vctwo);
1355 pixelssum1 = vec_add(pixelssum2, vctwo);
1356 pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);
1357
1358 if (rightside) {
1359 blocktemp = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
1360 } else {
1361 blocktemp = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
1362 }
1363
1364 blockv = vec_avg(blocktemp, blockv);
1365 vec_st(blockv, 0, block);
1366
1367 block += line_size;
1368 pixels += line_size;
1369 }
1370 }
1371
1372 void ff_dsputil_init_altivec(DSPContext* c, AVCodecContext *avctx)
1373 {
1374 const int high_bit_depth = avctx->bits_per_raw_sample > 8;
1375
1376 c->pix_abs[0][1] = sad16_x2_altivec;
1377 c->pix_abs[0][2] = sad16_y2_altivec;
1378 c->pix_abs[0][3] = sad16_xy2_altivec;
1379 c->pix_abs[0][0] = sad16_altivec;
1380 c->pix_abs[1][0] = sad8_altivec;
1381 c->sad[0]= sad16_altivec;
1382 c->sad[1]= sad8_altivec;
1383 c->pix_norm1 = pix_norm1_altivec;
1384 c->sse[1]= sse8_altivec;
1385 c->sse[0]= sse16_altivec;
1386 c->pix_sum = pix_sum_altivec;
1387 c->diff_pixels = diff_pixels_altivec;
1388 c->add_bytes= add_bytes_altivec;
1389 if (!high_bit_depth) {
1390 c->get_pixels = get_pixels_altivec;
1391 c->clear_block = clear_block_altivec;
1392 c->put_pixels_tab[0][0] = ff_put_pixels16_altivec;
1393 /* the two functions do the same thing, so use the same code */
1394 c->put_no_rnd_pixels_tab[0][0] = ff_put_pixels16_altivec;
1395 c->avg_pixels_tab[0][0] = ff_avg_pixels16_altivec;
1396 c->avg_pixels_tab[1][0] = avg_pixels8_altivec;
1397 c->avg_pixels_tab[1][3] = avg_pixels8_xy2_altivec;
1398 c->put_pixels_tab[1][3] = put_pixels8_xy2_altivec;
1399 c->put_no_rnd_pixels_tab[1][3] = put_no_rnd_pixels8_xy2_altivec;
1400 c->put_pixels_tab[0][3] = put_pixels16_xy2_altivec;
1401 c->put_no_rnd_pixels_tab[0][3] = put_no_rnd_pixels16_xy2_altivec;
1402 }
1403
1404 c->hadamard8_diff[0] = hadamard8_diff16_altivec;
1405 c->hadamard8_diff[1] = hadamard8_diff8x8_altivec;
1406 if (CONFIG_VORBIS_DECODER)
1407 c->vorbis_inverse_coupling = vorbis_inverse_coupling_altivec;
1408 }