2 * AltiVec optimizations (C) 2004 Romain Dolbeau <romain@dolbeau.org>
4 * based on code by Copyright (C) 2001-2003 Michael Niedermayer (michaelni@gmx.at)
6 * This file is part of FFmpeg.
8 * FFmpeg is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 #define ALTIVEC_TRANSPOSE_8x8_SHORT(src_a,src_b,src_c,src_d,src_e,src_f,src_g,src_h) \
27 __typeof__(src_a) tempA1, tempB1, tempC1, tempD1; \
28 __typeof__(src_a) tempE1, tempF1, tempG1, tempH1; \
29 __typeof__(src_a) tempA2, tempB2, tempC2, tempD2; \
30 __typeof__(src_a) tempE2, tempF2, tempG2, tempH2; \
31 tempA1 = vec_mergeh (src_a, src_e); \
32 tempB1 = vec_mergel (src_a, src_e); \
33 tempC1 = vec_mergeh (src_b, src_f); \
34 tempD1 = vec_mergel (src_b, src_f); \
35 tempE1 = vec_mergeh (src_c, src_g); \
36 tempF1 = vec_mergel (src_c, src_g); \
37 tempG1 = vec_mergeh (src_d, src_h); \
38 tempH1 = vec_mergel (src_d, src_h); \
39 tempA2 = vec_mergeh (tempA1, tempE1); \
40 tempB2 = vec_mergel (tempA1, tempE1); \
41 tempC2 = vec_mergeh (tempB1, tempF1); \
42 tempD2 = vec_mergel (tempB1, tempF1); \
43 tempE2 = vec_mergeh (tempC1, tempG1); \
44 tempF2 = vec_mergel (tempC1, tempG1); \
45 tempG2 = vec_mergeh (tempD1, tempH1); \
46 tempH2 = vec_mergel (tempD1, tempH1); \
47 src_a = vec_mergeh (tempA2, tempE2); \
48 src_b = vec_mergel (tempA2, tempE2); \
49 src_c = vec_mergeh (tempB2, tempF2); \
50 src_d = vec_mergel (tempB2, tempF2); \
51 src_e = vec_mergeh (tempC2, tempG2); \
52 src_f = vec_mergel (tempC2, tempG2); \
53 src_g = vec_mergeh (tempD2, tempH2); \
54 src_h = vec_mergel (tempD2, tempH2); \
58 static inline int vertClassify_altivec(uint8_t src
[], int stride
, PPContext
*c
) {
60 this code makes no assumption on src or stride.
61 One could remove the recomputation of the perm
62 vector by assuming (stride % 16) == 0, unfortunately
63 this is not always true.
65 DECLARE_ALIGNED(16, short, data
[8]) =
67 ((c
->nonBQP
*c
->ppMode
.baseDcDiff
)>>8) + 1,
74 vector
signed short v_dcOffset
;
75 vector
signed short v2QP
;
76 vector
unsigned short v4QP
;
77 vector
unsigned short v_dcThreshold
;
78 const int properStride
= (stride
% 16);
79 const int srcAlign
= ((unsigned long)src2
% 16);
80 const int two_vectors
= ((srcAlign
> 8) || properStride
) ?
1 : 0;
81 const vector
signed int zero
= vec_splat_s32(0);
82 const vector
signed short mask
= vec_splat_s16(1);
83 vector
signed int v_numEq
= vec_splat_s32(0);
84 vector
signed short v_data
= vec_ld(0, data
);
85 vector
signed short v_srcAss0
, v_srcAss1
, v_srcAss2
, v_srcAss3
,
86 v_srcAss4
, v_srcAss5
, v_srcAss6
, v_srcAss7
;
87 //FIXME avoid this mess if possible
96 vector
unsigned char v_srcA0
, v_srcA1
, v_srcA2
, v_srcA3
,
97 v_srcA4
, v_srcA5
, v_srcA6
, v_srcA7
;
99 v_dcOffset
= vec_splat(v_data
, 0);
100 v_dcThreshold
= (vector
unsigned short)vec_splat(v_data
, 1);
101 v2QP
= vec_splat(v_data
, 2);
102 v4QP
= (vector
unsigned short)vec_splat(v_data
, 3);
107 #define LOAD_LINE(i) \
109 vector unsigned char perm##i = vec_lvsl(j##i, src2); \
110 vector unsigned char v_srcA2##i; \
111 vector unsigned char v_srcA1##i = vec_ld(j##i, src2); \
113 v_srcA2##i = vec_ld(j##i + 16, src2); \
115 vec_perm(v_srcA1##i, v_srcA2##i, perm##i); \
117 (vector signed short)vec_mergeh((vector signed char)zero, \
118 (vector signed char)v_srcA##i); }
120 #define LOAD_LINE_ALIGNED(i) \
121 v_srcA##i = vec_ld(j##i, src2); \
123 (vector signed short)vec_mergeh((vector signed char)zero, \
124 (vector signed char)v_srcA##i)
126 /* Special-casing the aligned case is worthwhile, as all calls from
127 * the (transposed) horizontable deblocks will be aligned, in addition
128 * to the naturally aligned vertical deblocks. */
129 if (properStride
&& srcAlign
) {
130 LOAD_LINE_ALIGNED(0);
131 LOAD_LINE_ALIGNED(1);
132 LOAD_LINE_ALIGNED(2);
133 LOAD_LINE_ALIGNED(3);
134 LOAD_LINE_ALIGNED(4);
135 LOAD_LINE_ALIGNED(5);
136 LOAD_LINE_ALIGNED(6);
137 LOAD_LINE_ALIGNED(7);
149 #undef LOAD_LINE_ALIGNED
152 const vector signed short v_diff##i = \
153 vec_sub(v_srcAss##i, v_srcAss##j); \
154 const vector signed short v_sum##i = \
155 vec_add(v_diff##i, v_dcOffset); \
156 const vector signed short v_comp##i = \
157 (vector signed short)vec_cmplt((vector unsigned short)v_sum##i, \
159 const vector signed short v_part##i = vec_and(mask, v_comp##i);
170 v_numEq
= vec_sum4s(v_part0
, v_numEq
);
171 v_numEq
= vec_sum4s(v_part1
, v_numEq
);
172 v_numEq
= vec_sum4s(v_part2
, v_numEq
);
173 v_numEq
= vec_sum4s(v_part3
, v_numEq
);
174 v_numEq
= vec_sum4s(v_part4
, v_numEq
);
175 v_numEq
= vec_sum4s(v_part5
, v_numEq
);
176 v_numEq
= vec_sum4s(v_part6
, v_numEq
);
181 v_numEq
= vec_sums(v_numEq
, zero
);
183 v_numEq
= vec_splat(v_numEq
, 3);
184 vec_ste(v_numEq
, 0, &numEq
);
186 if (numEq
> c
->ppMode
.flatnessThreshold
){
187 const vector
unsigned char mmoP1
= (const vector
unsigned char)
188 AVV(0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f,
189 0x00, 0x01, 0x12, 0x13, 0x08, 0x09, 0x1A, 0x1B);
190 const vector
unsigned char mmoP2
= (const vector
unsigned char)
191 AVV(0x04, 0x05, 0x16, 0x17, 0x0C, 0x0D, 0x1E, 0x1F,
192 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
193 const vector
unsigned char mmoP
= (const vector
unsigned char)
194 vec_lvsl(8, (unsigned char*)0);
196 vector
signed short mmoL1
= vec_perm(v_srcAss0
, v_srcAss2
, mmoP1
);
197 vector
signed short mmoL2
= vec_perm(v_srcAss4
, v_srcAss6
, mmoP2
);
198 vector
signed short mmoL
= vec_perm(mmoL1
, mmoL2
, mmoP
);
199 vector
signed short mmoR1
= vec_perm(v_srcAss5
, v_srcAss7
, mmoP1
);
200 vector
signed short mmoR2
= vec_perm(v_srcAss1
, v_srcAss3
, mmoP2
);
201 vector
signed short mmoR
= vec_perm(mmoR1
, mmoR2
, mmoP
);
202 vector
signed short mmoDiff
= vec_sub(mmoL
, mmoR
);
203 vector
unsigned short mmoSum
= (vector
unsigned short)vec_add(mmoDiff
, v2QP
);
205 if (vec_any_gt(mmoSum
, v4QP
))
213 static inline void doVertLowPass_altivec(uint8_t *src
, int stride
, PPContext
*c
) {
215 this code makes no assumption on src or stride.
216 One could remove the recomputation of the perm
217 vector by assuming (stride % 16) == 0, unfortunately
218 this is not always true. Quite a lot of load/stores
219 can be removed by assuming proper alignment of
223 const vector
signed int zero
= vec_splat_s32(0);
224 const int properStride
= (stride
% 16);
225 const int srcAlign
= ((unsigned long)src2
% 16);
226 DECLARE_ALIGNED(16, short, qp
[8]);
228 vector
signed short vqp
= vec_ld(0, qp
);
229 vqp
= vec_splat(vqp
, 0);
233 vector
signed short vb0
, vb1
, vb2
, vb3
, vb4
, vb5
, vb6
, vb7
, vb8
, vb9
;
234 vector
unsigned char vbA0
, vbA1
, vbA2
, vbA3
, vbA4
, vbA5
, vbA6
, vbA7
, vbA8
, vbA9
;
235 vector
unsigned char vbB0
, vbB1
, vbB2
, vbB3
, vbB4
, vbB5
, vbB6
, vbB7
, vbB8
, vbB9
;
236 vector
unsigned char vbT0
, vbT1
, vbT2
, vbT3
, vbT4
, vbT5
, vbT6
, vbT7
, vbT8
, vbT9
;
238 #define LOAD_LINE(i) \
239 const vector unsigned char perml##i = \
240 vec_lvsl(i * stride, src2); \
241 vbA##i = vec_ld(i * stride, src2); \
242 vbB##i = vec_ld(i * stride + 16, src2); \
243 vbT##i = vec_perm(vbA##i, vbB##i, perml##i); \
245 (vector signed short)vec_mergeh((vector unsigned char)zero, \
246 (vector unsigned char)vbT##i)
248 #define LOAD_LINE_ALIGNED(i) \
249 register int j##i = i * stride; \
250 vbT##i = vec_ld(j##i, src2); \
252 (vector signed short)vec_mergeh((vector signed char)zero, \
253 (vector signed char)vbT##i)
255 /* Special-casing the aligned case is worthwhile, as all calls from
256 * the (transposed) horizontable deblocks will be aligned, in addition
257 * to the naturally aligned vertical deblocks. */
258 if (properStride
&& srcAlign
) {
259 LOAD_LINE_ALIGNED(0);
260 LOAD_LINE_ALIGNED(1);
261 LOAD_LINE_ALIGNED(2);
262 LOAD_LINE_ALIGNED(3);
263 LOAD_LINE_ALIGNED(4);
264 LOAD_LINE_ALIGNED(5);
265 LOAD_LINE_ALIGNED(6);
266 LOAD_LINE_ALIGNED(7);
267 LOAD_LINE_ALIGNED(8);
268 LOAD_LINE_ALIGNED(9);
282 #undef LOAD_LINE_ALIGNED
284 const vector
unsigned short v_2
= vec_splat_u16(2);
285 const vector
unsigned short v_4
= vec_splat_u16(4);
287 const vector
signed short v_diff01
= vec_sub(vb0
, vb1
);
288 const vector
unsigned short v_cmp01
=
289 (const vector
unsigned short) vec_cmplt(vec_abs(v_diff01
), vqp
);
290 const vector
signed short v_first
= vec_sel(vb1
, vb0
, v_cmp01
);
291 const vector
signed short v_diff89
= vec_sub(vb8
, vb9
);
292 const vector
unsigned short v_cmp89
=
293 (const vector
unsigned short) vec_cmplt(vec_abs(v_diff89
), vqp
);
294 const vector
signed short v_last
= vec_sel(vb8
, vb9
, v_cmp89
);
296 const vector
signed short temp01
= vec_mladd(v_first
, (vector
signed short)v_4
, vb1
);
297 const vector
signed short temp02
= vec_add(vb2
, vb3
);
298 const vector
signed short temp03
= vec_add(temp01
, (vector
signed short)v_4
);
299 const vector
signed short v_sumsB0
= vec_add(temp02
, temp03
);
301 const vector
signed short temp11
= vec_sub(v_sumsB0
, v_first
);
302 const vector
signed short v_sumsB1
= vec_add(temp11
, vb4
);
304 const vector
signed short temp21
= vec_sub(v_sumsB1
, v_first
);
305 const vector
signed short v_sumsB2
= vec_add(temp21
, vb5
);
307 const vector
signed short temp31
= vec_sub(v_sumsB2
, v_first
);
308 const vector
signed short v_sumsB3
= vec_add(temp31
, vb6
);
310 const vector
signed short temp41
= vec_sub(v_sumsB3
, v_first
);
311 const vector
signed short v_sumsB4
= vec_add(temp41
, vb7
);
313 const vector
signed short temp51
= vec_sub(v_sumsB4
, vb1
);
314 const vector
signed short v_sumsB5
= vec_add(temp51
, vb8
);
316 const vector
signed short temp61
= vec_sub(v_sumsB5
, vb2
);
317 const vector
signed short v_sumsB6
= vec_add(temp61
, v_last
);
319 const vector
signed short temp71
= vec_sub(v_sumsB6
, vb3
);
320 const vector
signed short v_sumsB7
= vec_add(temp71
, v_last
);
322 const vector
signed short temp81
= vec_sub(v_sumsB7
, vb4
);
323 const vector
signed short v_sumsB8
= vec_add(temp81
, v_last
);
325 const vector
signed short temp91
= vec_sub(v_sumsB8
, vb5
);
326 const vector
signed short v_sumsB9
= vec_add(temp91
, v_last
);
328 #define COMPUTE_VR(i, j, k) \
329 const vector signed short temps1##i = \
330 vec_add(v_sumsB##i, v_sumsB##k); \
331 const vector signed short temps2##i = \
332 vec_mladd(vb##j, (vector signed short)v_2, temps1##i); \
333 const vector signed short vr##j = vec_sra(temps2##i, v_4)
344 const vector
signed char neg1
= vec_splat_s8(-1);
345 const vector
unsigned char permHH
= (const vector
unsigned char)AVV(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
346 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F);
348 #define PACK_AND_STORE(i) \
349 const vector unsigned char perms##i = \
350 vec_lvsr(i * stride, src2); \
351 const vector unsigned char vf##i = \
352 vec_packsu(vr##i, (vector signed short)zero); \
353 const vector unsigned char vg##i = \
354 vec_perm(vf##i, vbT##i, permHH); \
355 const vector unsigned char mask##i = \
356 vec_perm((vector unsigned char)zero, (vector unsigned char)neg1, perms##i); \
357 const vector unsigned char vg2##i = \
358 vec_perm(vg##i, vg##i, perms##i); \
359 const vector unsigned char svA##i = \
360 vec_sel(vbA##i, vg2##i, mask##i); \
361 const vector unsigned char svB##i = \
362 vec_sel(vg2##i, vbB##i, mask##i); \
363 vec_st(svA##i, i * stride, src2); \
364 vec_st(svB##i, i * stride + 16, src2)
366 #define PACK_AND_STORE_ALIGNED(i) \
367 const vector unsigned char vf##i = \
368 vec_packsu(vr##i, (vector signed short)zero); \
369 const vector unsigned char vg##i = \
370 vec_perm(vf##i, vbT##i, permHH); \
371 vec_st(vg##i, i * stride, src2)
373 /* Special-casing the aligned case is worthwhile, as all calls from
374 * the (transposed) horizontable deblocks will be aligned, in addition
375 * to the naturally aligned vertical deblocks. */
376 if (properStride
&& srcAlign
) {
377 PACK_AND_STORE_ALIGNED(1);
378 PACK_AND_STORE_ALIGNED(2);
379 PACK_AND_STORE_ALIGNED(3);
380 PACK_AND_STORE_ALIGNED(4);
381 PACK_AND_STORE_ALIGNED(5);
382 PACK_AND_STORE_ALIGNED(6);
383 PACK_AND_STORE_ALIGNED(7);
384 PACK_AND_STORE_ALIGNED(8);
395 #undef PACK_AND_STORE
396 #undef PACK_AND_STORE_ALIGNED
401 static inline void doVertDefFilter_altivec(uint8_t src
[], int stride
, PPContext
*c
) {
403 this code makes no assumption on src or stride.
404 One could remove the recomputation of the perm
405 vector by assuming (stride % 16) == 0, unfortunately
406 this is not always true. Quite a lot of load/stores
407 can be removed by assuming proper alignment of
411 const vector
signed int zero
= vec_splat_s32(0);
412 DECLARE_ALIGNED(16, short, qp
[8]);
414 vector
signed short vqp
= vec_ld(0, qp
);
415 vqp
= vec_splat(vqp
, 0);
417 #define LOAD_LINE(i) \
418 const vector unsigned char perm##i = \
419 vec_lvsl(i * stride, src2); \
420 const vector unsigned char vbA##i = \
421 vec_ld(i * stride, src2); \
422 const vector unsigned char vbB##i = \
423 vec_ld(i * stride + 16, src2); \
424 const vector unsigned char vbT##i = \
425 vec_perm(vbA##i, vbB##i, perm##i); \
426 const vector signed short vb##i = \
427 (vector signed short)vec_mergeh((vector unsigned char)zero, \
428 (vector unsigned char)vbT##i)
442 const vector
signed short v_1
= vec_splat_s16(1);
443 const vector
signed short v_2
= vec_splat_s16(2);
444 const vector
signed short v_5
= vec_splat_s16(5);
445 const vector
signed short v_32
= vec_sl(v_1
,
446 (vector
unsigned short)v_5
);
448 const vector
signed short l3minusl6
= vec_sub(vb3
, vb6
);
449 const vector
signed short l5minusl4
= vec_sub(vb5
, vb4
);
450 const vector
signed short twotimes_l3minusl6
= vec_mladd(v_2
, l3minusl6
, (vector
signed short)zero
);
451 const vector
signed short mE
= vec_mladd(v_5
, l5minusl4
, twotimes_l3minusl6
);
452 const vector
signed short absmE
= vec_abs(mE
);
453 /* left & right energy */
454 const vector
signed short l1minusl4
= vec_sub(vb1
, vb4
);
455 const vector
signed short l3minusl2
= vec_sub(vb3
, vb2
);
456 const vector
signed short l5minusl8
= vec_sub(vb5
, vb8
);
457 const vector
signed short l7minusl6
= vec_sub(vb7
, vb6
);
458 const vector
signed short twotimes_l1minusl4
= vec_mladd(v_2
, l1minusl4
, (vector
signed short)zero
);
459 const vector
signed short twotimes_l5minusl8
= vec_mladd(v_2
, l5minusl8
, (vector
signed short)zero
);
460 const vector
signed short lE
= vec_mladd(v_5
, l3minusl2
, twotimes_l1minusl4
);
461 const vector
signed short rE
= vec_mladd(v_5
, l7minusl6
, twotimes_l5minusl8
);
463 const vector
signed short ddiff
= vec_sub(absmE
,
466 const vector
signed short ddiffclamp
= vec_max(ddiff
, (vector
signed short)zero
);
467 const vector
signed short dtimes64
= vec_mladd(v_5
, ddiffclamp
, v_32
);
468 const vector
signed short d
= vec_sra(dtimes64
, vec_splat_u16(6));
469 const vector
signed short minusd
= vec_sub((vector
signed short)zero
, d
);
470 const vector
signed short finald
= vec_sel(minusd
,
472 vec_cmpgt(vec_sub((vector
signed short)zero
, mE
),
473 (vector
signed short)zero
));
475 const vector
signed short qtimes2
= vec_sub(vb4
, vb5
);
476 /* for a shift right to behave like /2, we need to add one
477 to all negative integer */
478 const vector
signed short rounddown
= vec_sel((vector
signed short)zero
,
480 vec_cmplt(qtimes2
, (vector
signed short)zero
));
481 const vector
signed short q
= vec_sra(vec_add(qtimes2
, rounddown
), vec_splat_u16(1));
483 const vector
signed short dclamp_P1
= vec_max((vector
signed short)zero
, finald
);
484 const vector
signed short dclamp_P
= vec_min(dclamp_P1
, q
);
485 const vector
signed short dclamp_N1
= vec_min((vector
signed short)zero
, finald
);
486 const vector
signed short dclamp_N
= vec_max(dclamp_N1
, q
);
488 const vector
signed short dclampedfinal
= vec_sel(dclamp_N
,
490 vec_cmpgt(q
, (vector
signed short)zero
));
491 const vector
signed short dornotd
= vec_sel((vector
signed short)zero
,
493 vec_cmplt(absmE
, vqp
));
494 /* add/subtract to l4 and l5 */
495 const vector
signed short vb4minusd
= vec_sub(vb4
, dornotd
);
496 const vector
signed short vb5plusd
= vec_add(vb5
, dornotd
);
497 /* finally, stores */
498 const vector
unsigned char st4
= vec_packsu(vb4minusd
, (vector
signed short)zero
);
499 const vector
unsigned char st5
= vec_packsu(vb5plusd
, (vector
signed short)zero
);
501 const vector
signed char neg1
= vec_splat_s8(-1);
502 const vector
unsigned char permHH
= (const vector
unsigned char)AVV(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
503 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F);
506 const vector unsigned char perms##i = \
507 vec_lvsr(i * stride, src2); \
508 const vector unsigned char vg##i = \
509 vec_perm(st##i, vbT##i, permHH); \
510 const vector unsigned char mask##i = \
511 vec_perm((vector unsigned char)zero, (vector unsigned char)neg1, perms##i); \
512 const vector unsigned char vg2##i = \
513 vec_perm(vg##i, vg##i, perms##i); \
514 const vector unsigned char svA##i = \
515 vec_sel(vbA##i, vg2##i, mask##i); \
516 const vector unsigned char svB##i = \
517 vec_sel(vg2##i, vbB##i, mask##i); \
518 vec_st(svA##i, i * stride, src2); \
519 vec_st(svB##i, i * stride + 16, src2)
525 static inline void dering_altivec(uint8_t src
[], int stride
, PPContext
*c
) {
527 this code makes no assumption on src or stride.
528 One could remove the recomputation of the perm
529 vector by assuming (stride % 16) == 0, unfortunately
530 this is not always true. Quite a lot of load/stores
531 can be removed by assuming proper alignment of
534 uint8_t *srcCopy
= src
;
535 DECLARE_ALIGNED(16, uint8_t, dt
[16]);
536 const vector
signed int zero
= vec_splat_s32(0);
537 vector
unsigned char v_dt
;
538 dt
[0] = deringThreshold
;
539 v_dt
= vec_splat(vec_ld(0, dt
), 0);
541 #define LOAD_LINE(i) \
542 const vector unsigned char perm##i = \
543 vec_lvsl(i * stride, srcCopy); \
544 vector unsigned char sA##i = vec_ld(i * stride, srcCopy); \
545 vector unsigned char sB##i = vec_ld(i * stride + 16, srcCopy); \
546 vector unsigned char src##i = vec_perm(sA##i, sB##i, perm##i)
560 vector
unsigned char v_avg
;
562 const vector
unsigned char trunc_perm
= (vector
unsigned char)
563 AVV(0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
564 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18);
565 const vector
unsigned char trunc_src12
= vec_perm(src1
, src2
, trunc_perm
);
566 const vector
unsigned char trunc_src34
= vec_perm(src3
, src4
, trunc_perm
);
567 const vector
unsigned char trunc_src56
= vec_perm(src5
, src6
, trunc_perm
);
568 const vector
unsigned char trunc_src78
= vec_perm(src7
, src8
, trunc_perm
);
570 #define EXTRACT(op) do { \
571 const vector unsigned char s##op##_1 = vec_##op(trunc_src12, trunc_src34); \
572 const vector unsigned char s##op##_2 = vec_##op(trunc_src56, trunc_src78); \
573 const vector unsigned char s##op##_6 = vec_##op(s##op##_1, s##op##_2); \
574 const vector unsigned char s##op##_8h = vec_mergeh(s##op##_6, s##op##_6); \
575 const vector unsigned char s##op##_8l = vec_mergel(s##op##_6, s##op##_6); \
576 const vector unsigned char s##op##_9 = vec_##op(s##op##_8h, s##op##_8l); \
577 const vector unsigned char s##op##_9h = vec_mergeh(s##op##_9, s##op##_9); \
578 const vector unsigned char s##op##_9l = vec_mergel(s##op##_9, s##op##_9); \
579 const vector unsigned char s##op##_10 = vec_##op(s##op##_9h, s##op##_9l); \
580 const vector unsigned char s##op##_10h = vec_mergeh(s##op##_10, s##op##_10); \
581 const vector unsigned char s##op##_10l = vec_mergel(s##op##_10, s##op##_10); \
582 const vector unsigned char s##op##_11 = vec_##op(s##op##_10h, s##op##_10l); \
583 const vector unsigned char s##op##_11h = vec_mergeh(s##op##_11, s##op##_11); \
584 const vector unsigned char s##op##_11l = vec_mergel(s##op##_11, s##op##_11); \
585 v_##op = vec_##op(s##op##_11h, s##op##_11l); } while (0)
587 vector
unsigned char v_min
;
588 vector
unsigned char v_max
;
593 if (vec_all_lt(vec_sub(v_max
, v_min
), v_dt
))
596 v_avg
= vec_avg(v_min
, v_max
);
599 DECLARE_ALIGNED(16, signed int, S
[8]);
601 const vector
unsigned short mask1
= (vector
unsigned short)
602 AVV(0x0001, 0x0002, 0x0004, 0x0008,
603 0x0010, 0x0020, 0x0040, 0x0080);
604 const vector
unsigned short mask2
= (vector
unsigned short)
605 AVV(0x0100, 0x0200, 0x0000, 0x0000,
606 0x0000, 0x0000, 0x0000, 0x0000);
608 const vector
unsigned int vuint32_16
= vec_sl(vec_splat_u32(1), vec_splat_u32(4));
609 const vector
unsigned int vuint32_1
= vec_splat_u32(1);
612 vector signed int sum##i; \
614 const vector unsigned char cmp##i = \
615 (vector unsigned char)vec_cmpgt(src##i, v_avg); \
616 const vector unsigned short cmpHi##i = \
617 (vector unsigned short)vec_mergeh(cmp##i, cmp##i); \
618 const vector unsigned short cmpLi##i = \
619 (vector unsigned short)vec_mergel(cmp##i, cmp##i); \
620 const vector signed short cmpHf##i = \
621 (vector signed short)vec_and(cmpHi##i, mask1); \
622 const vector signed short cmpLf##i = \
623 (vector signed short)vec_and(cmpLi##i, mask2); \
624 const vector signed int sump##i = vec_sum4s(cmpHf##i, zero); \
625 const vector signed int sumq##i = vec_sum4s(cmpLf##i, sump##i); \
626 sum##i = vec_sums(sumq##i, zero); } while (0)
640 vector
signed int sumA2
;
641 vector
signed int sumB2
;
643 const vector
signed int sump02
= vec_mergel(sum0
, sum2
);
644 const vector
signed int sump13
= vec_mergel(sum1
, sum3
);
645 const vector
signed int sumA
= vec_mergel(sump02
, sump13
);
647 const vector
signed int sump46
= vec_mergel(sum4
, sum6
);
648 const vector
signed int sump57
= vec_mergel(sum5
, sum7
);
649 const vector
signed int sumB
= vec_mergel(sump46
, sump57
);
651 const vector
signed int sump8A
= vec_mergel(sum8
, zero
);
652 const vector
signed int sump9B
= vec_mergel(sum9
, zero
);
653 const vector
signed int sumC
= vec_mergel(sump8A
, sump9B
);
655 const vector
signed int tA
= vec_sl(vec_nor(zero
, sumA
), vuint32_16
);
656 const vector
signed int tB
= vec_sl(vec_nor(zero
, sumB
), vuint32_16
);
657 const vector
signed int tC
= vec_sl(vec_nor(zero
, sumC
), vuint32_16
);
658 const vector
signed int t2A
= vec_or(sumA
, tA
);
659 const vector
signed int t2B
= vec_or(sumB
, tB
);
660 const vector
signed int t2C
= vec_or(sumC
, tC
);
661 const vector
signed int t3A
= vec_and(vec_sra(t2A
, vuint32_1
),
662 vec_sl(t2A
, vuint32_1
));
663 const vector
signed int t3B
= vec_and(vec_sra(t2B
, vuint32_1
),
664 vec_sl(t2B
, vuint32_1
));
665 const vector
signed int t3C
= vec_and(vec_sra(t2C
, vuint32_1
),
666 vec_sl(t2C
, vuint32_1
));
667 const vector
signed int yA
= vec_and(t2A
, t3A
);
668 const vector
signed int yB
= vec_and(t2B
, t3B
);
669 const vector
signed int yC
= vec_and(t2C
, t3C
);
671 const vector
unsigned char strangeperm1
= vec_lvsl(4, (unsigned char*)0);
672 const vector
unsigned char strangeperm2
= vec_lvsl(8, (unsigned char*)0);
673 const vector
signed int sumAd4
= vec_perm(yA
, yB
, strangeperm1
);
674 const vector
signed int sumAd8
= vec_perm(yA
, yB
, strangeperm2
);
675 const vector
signed int sumBd4
= vec_perm(yB
, yC
, strangeperm1
);
676 const vector
signed int sumBd8
= vec_perm(yB
, yC
, strangeperm2
);
677 const vector
signed int sumAp
= vec_and(yA
,
678 vec_and(sumAd4
,sumAd8
));
679 const vector
signed int sumBp
= vec_and(yB
,
680 vec_and(sumBd4
,sumBd8
));
681 sumA2
= vec_or(sumAp
,
684 sumB2
= vec_or(sumBp
,
689 vec_st(sumB2
, 16, S
);
692 /* I'm not sure the following is actually faster
693 than straight, unvectorized C code :-( */
695 DECLARE_ALIGNED(16, int, tQP2
[4]);
696 tQP2
[0]= c
->QP
/2 + 1;
697 vector
signed int vQP2
= vec_ld(0, tQP2
);
698 vQP2
= vec_splat(vQP2
, 0);
699 const vector
signed int vsint32_8
= vec_splat_s32(8);
700 const vector
unsigned int vuint32_4
= vec_splat_u32(4);
702 const vector
unsigned char permA1
= (vector
unsigned char)
703 AVV(0x00, 0x01, 0x02, 0x10, 0x11, 0x12, 0x1F, 0x1F,
704 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F);
705 const vector
unsigned char permA2
= (vector
unsigned char)
706 AVV(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x10, 0x11,
707 0x12, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F);
708 const vector
unsigned char permA1inc
= (vector
unsigned char)
709 AVV(0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00,
710 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
711 const vector
unsigned char permA2inc
= (vector
unsigned char)
712 AVV(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01,
713 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
714 const vector
unsigned char magic
= (vector
unsigned char)
715 AVV(0x01, 0x02, 0x01, 0x02, 0x04, 0x02, 0x01, 0x02,
716 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
717 const vector
unsigned char extractPerm
= (vector
unsigned char)
718 AVV(0x10, 0x10, 0x10, 0x01, 0x10, 0x10, 0x10, 0x01,
719 0x10, 0x10, 0x10, 0x01, 0x10, 0x10, 0x10, 0x01);
720 const vector
unsigned char extractPermInc
= (vector
unsigned char)
721 AVV(0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01,
722 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01);
723 const vector
unsigned char identity
= vec_lvsl(0,(unsigned char *)0);
724 const vector
unsigned char tenRight
= (vector
unsigned char)
725 AVV(0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
726 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
727 const vector
unsigned char eightLeft
= (vector
unsigned char)
728 AVV(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
729 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08);
733 vector unsigned char tenRightM##i = tenRight; \
734 vector unsigned char permA1M##i = permA1; \
735 vector unsigned char permA2M##i = permA2; \
736 vector unsigned char extractPermM##i = extractPerm
738 #define F2(i, j, k, l) \
739 if (S[i] & (1 << (l+1))) { \
740 const vector unsigned char a_##j##_A##l = \
741 vec_perm(src##i, src##j, permA1M##i); \
742 const vector unsigned char a_##j##_B##l = \
743 vec_perm(a_##j##_A##l, src##k, permA2M##i); \
744 const vector signed int a_##j##_sump##l = \
745 (vector signed int)vec_msum(a_##j##_B##l, magic, \
746 (vector unsigned int)zero); \
747 vector signed int F_##j##_##l = \
748 vec_sr(vec_sums(a_##j##_sump##l, vsint32_8), vuint32_4); \
749 F_##j##_##l = vec_splat(F_##j##_##l, 3); \
750 const vector signed int p_##j##_##l = \
751 (vector signed int)vec_perm(src##j, \
752 (vector unsigned char)zero, \
754 const vector signed int sum_##j##_##l = vec_add( p_##j##_##l, vQP2);\
755 const vector signed int diff_##j##_##l = vec_sub( p_##j##_##l, vQP2);\
756 vector signed int newpm_##j##_##l; \
757 if (vec_all_lt(sum_##j##_##l, F_##j##_##l)) \
758 newpm_##j##_##l = sum_##j##_##l; \
759 else if (vec_all_gt(diff_##j##_##l, F_##j##_##l)) \
760 newpm_##j##_##l = diff_##j##_##l; \
761 else newpm_##j##_##l = F_##j##_##l; \
762 const vector unsigned char newpm2_##j##_##l = \
763 vec_splat((vector unsigned char)newpm_##j##_##l, 15); \
764 const vector unsigned char mask##j##l = vec_add(identity, \
766 src##j = vec_perm(src##j, newpm2_##j##_##l, mask##j##l); \
768 permA1M##i = vec_add(permA1M##i, permA1inc); \
769 permA2M##i = vec_add(permA2M##i, permA2inc); \
770 tenRightM##i = vec_sro(tenRightM##i, eightLeft); \
771 extractPermM##i = vec_add(extractPermM##i, extractPermInc)
773 #define ITER(i, j, k) \
793 const vector
signed char neg1
= vec_splat_s8(-1);
795 #define STORE_LINE(i) \
796 const vector unsigned char permST##i = \
797 vec_lvsr(i * stride, srcCopy); \
798 const vector unsigned char maskST##i = \
799 vec_perm((vector unsigned char)zero, \
800 (vector unsigned char)neg1, permST##i);\
801 src##i = vec_perm(src##i ,src##i, permST##i); \
802 sA##i= vec_sel(sA##i, src##i, maskST##i); \
803 sB##i= vec_sel(src##i, sB##i, maskST##i); \
804 vec_st(sA##i, i * stride, srcCopy); \
805 vec_st(sB##i, i * stride + 16, srcCopy)
821 #define doHorizLowPass_altivec(a...) doHorizLowPass_C(a)
822 #define doHorizDefFilter_altivec(a...) doHorizDefFilter_C(a)
823 #define do_a_deblock_altivec(a...) do_a_deblock_C(a)
825 static inline void RENAME(tempNoiseReducer
)(uint8_t *src
, int stride
,
826 uint8_t *tempBlurred
, uint32_t *tempBlurredPast
, int *maxNoise
)
828 const vector
signed int zero
= vec_splat_s32(0);
829 const vector
signed short vsint16_1
= vec_splat_s16(1);
830 vector
signed int v_dp
= zero
;
831 vector
signed int v_sysdp
= zero
;
834 tempBlurredPast
[127]= maxNoise
[0];
835 tempBlurredPast
[128]= maxNoise
[1];
836 tempBlurredPast
[129]= maxNoise
[2];
838 #define LOAD_LINE(src, i) \
839 register int j##src##i = i * stride; \
840 vector unsigned char perm##src##i = vec_lvsl(j##src##i, src); \
841 const vector unsigned char v_##src##A1##i = vec_ld(j##src##i, src); \
842 const vector unsigned char v_##src##A2##i = vec_ld(j##src##i + 16, src); \
843 const vector unsigned char v_##src##A##i = \
844 vec_perm(v_##src##A1##i, v_##src##A2##i, perm##src##i); \
845 vector signed short v_##src##Ass##i = \
846 (vector signed short)vec_mergeh((vector signed char)zero, \
847 (vector signed char)v_##src##A##i)
858 LOAD_LINE(tempBlurred
, 0);
859 LOAD_LINE(tempBlurred
, 1);
860 LOAD_LINE(tempBlurred
, 2);
861 LOAD_LINE(tempBlurred
, 3);
862 LOAD_LINE(tempBlurred
, 4);
863 LOAD_LINE(tempBlurred
, 5);
864 LOAD_LINE(tempBlurred
, 6);
865 LOAD_LINE(tempBlurred
, 7);
868 #define ACCUMULATE_DIFFS(i) \
869 vector signed short v_d##i = vec_sub(v_tempBlurredAss##i, \
871 v_dp = vec_msums(v_d##i, v_d##i, v_dp); \
872 v_sysdp = vec_msums(v_d##i, vsint16_1, v_sysdp)
882 #undef ACCUMULATE_DIFFS
884 v_dp
= vec_sums(v_dp
, zero
);
885 v_sysdp
= vec_sums(v_sysdp
, zero
);
887 v_dp
= vec_splat(v_dp
, 3);
888 v_sysdp
= vec_splat(v_sysdp
, 3);
890 vec_ste(v_dp
, 0, &d
);
891 vec_ste(v_sysdp
, 0, &sysd
);
895 +(*(tempBlurredPast
-256))
896 +(*(tempBlurredPast
-1))+ (*(tempBlurredPast
+1))
897 +(*(tempBlurredPast
+256))
902 if (d
> maxNoise
[1]) {
903 if (d
< maxNoise
[2]) {
904 #define OP(i) v_tempBlurredAss##i = vec_avg(v_tempBlurredAss##i, v_srcAss##i);
916 #define OP(i) v_tempBlurredAss##i = v_srcAss##i;
929 if (d
< maxNoise
[0]) {
930 const vector
signed short vsint16_7
= vec_splat_s16(7);
931 const vector
signed short vsint16_4
= vec_splat_s16(4);
932 const vector
unsigned short vuint16_3
= vec_splat_u16(3);
935 const vector signed short v_temp##i = \
936 vec_mladd(v_tempBlurredAss##i, \
937 vsint16_7, v_srcAss##i); \
938 const vector signed short v_temp2##i = \
939 vec_add(v_temp##i, vsint16_4); \
940 v_tempBlurredAss##i = vec_sr(v_temp2##i, vuint16_3)
952 const vector
signed short vsint16_3
= vec_splat_s16(3);
953 const vector
signed short vsint16_2
= vec_splat_s16(2);
956 const vector signed short v_temp##i = \
957 vec_mladd(v_tempBlurredAss##i, \
958 vsint16_3, v_srcAss##i); \
959 const vector signed short v_temp2##i = \
960 vec_add(v_temp##i, vsint16_2); \
961 v_tempBlurredAss##i = vec_sr(v_temp2##i, (vector unsigned short)vsint16_2)
975 const vector
signed char neg1
= vec_splat_s8(-1);
976 const vector
unsigned char permHH
= (const vector
unsigned char)AVV(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
977 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F);
979 #define PACK_AND_STORE(src, i) \
980 const vector unsigned char perms##src##i = \
981 vec_lvsr(i * stride, src); \
982 const vector unsigned char vf##src##i = \
983 vec_packsu(v_tempBlurredAss##i, (vector signed short)zero); \
984 const vector unsigned char vg##src##i = \
985 vec_perm(vf##src##i, v_##src##A##i, permHH); \
986 const vector unsigned char mask##src##i = \
987 vec_perm((vector unsigned char)zero, (vector unsigned char)neg1, perms##src##i); \
988 const vector unsigned char vg2##src##i = \
989 vec_perm(vg##src##i, vg##src##i, perms##src##i); \
990 const vector unsigned char svA##src##i = \
991 vec_sel(v_##src##A1##i, vg2##src##i, mask##src##i); \
992 const vector unsigned char svB##src##i = \
993 vec_sel(vg2##src##i, v_##src##A2##i, mask##src##i); \
994 vec_st(svA##src##i, i * stride, src); \
995 vec_st(svB##src##i, i * stride + 16, src)
997 PACK_AND_STORE(src
, 0);
998 PACK_AND_STORE(src
, 1);
999 PACK_AND_STORE(src
, 2);
1000 PACK_AND_STORE(src
, 3);
1001 PACK_AND_STORE(src
, 4);
1002 PACK_AND_STORE(src
, 5);
1003 PACK_AND_STORE(src
, 6);
1004 PACK_AND_STORE(src
, 7);
1005 PACK_AND_STORE(tempBlurred
, 0);
1006 PACK_AND_STORE(tempBlurred
, 1);
1007 PACK_AND_STORE(tempBlurred
, 2);
1008 PACK_AND_STORE(tempBlurred
, 3);
1009 PACK_AND_STORE(tempBlurred
, 4);
1010 PACK_AND_STORE(tempBlurred
, 5);
1011 PACK_AND_STORE(tempBlurred
, 6);
1012 PACK_AND_STORE(tempBlurred
, 7);
1013 #undef PACK_AND_STORE
1016 static inline void transpose_16x8_char_toPackedAlign_altivec(unsigned char* dst
, unsigned char* src
, int stride
) {
1017 const vector
unsigned char zero
= vec_splat_u8(0);
1019 #define LOAD_DOUBLE_LINE(i, j) \
1020 vector unsigned char perm1##i = vec_lvsl(i * stride, src); \
1021 vector unsigned char perm2##i = vec_lvsl(j * stride, src); \
1022 vector unsigned char srcA##i = vec_ld(i * stride, src); \
1023 vector unsigned char srcB##i = vec_ld(i * stride + 16, src); \
1024 vector unsigned char srcC##i = vec_ld(j * stride, src); \
1025 vector unsigned char srcD##i = vec_ld(j * stride+ 16, src); \
1026 vector unsigned char src##i = vec_perm(srcA##i, srcB##i, perm1##i); \
1027 vector unsigned char src##j = vec_perm(srcC##i, srcD##i, perm2##i)
1029 LOAD_DOUBLE_LINE(0, 1);
1030 LOAD_DOUBLE_LINE(2, 3);
1031 LOAD_DOUBLE_LINE(4, 5);
1032 LOAD_DOUBLE_LINE(6, 7);
1033 #undef LOAD_DOUBLE_LINE
1035 vector
unsigned char tempA
= vec_mergeh(src0
, zero
);
1036 vector
unsigned char tempB
= vec_mergel(src0
, zero
);
1037 vector
unsigned char tempC
= vec_mergeh(src1
, zero
);
1038 vector
unsigned char tempD
= vec_mergel(src1
, zero
);
1039 vector
unsigned char tempE
= vec_mergeh(src2
, zero
);
1040 vector
unsigned char tempF
= vec_mergel(src2
, zero
);
1041 vector
unsigned char tempG
= vec_mergeh(src3
, zero
);
1042 vector
unsigned char tempH
= vec_mergel(src3
, zero
);
1043 vector
unsigned char tempI
= vec_mergeh(src4
, zero
);
1044 vector
unsigned char tempJ
= vec_mergel(src4
, zero
);
1045 vector
unsigned char tempK
= vec_mergeh(src5
, zero
);
1046 vector
unsigned char tempL
= vec_mergel(src5
, zero
);
1047 vector
unsigned char tempM
= vec_mergeh(src6
, zero
);
1048 vector
unsigned char tempN
= vec_mergel(src6
, zero
);
1049 vector
unsigned char tempO
= vec_mergeh(src7
, zero
);
1050 vector
unsigned char tempP
= vec_mergel(src7
, zero
);
1052 vector
unsigned char temp0
= vec_mergeh(tempA
, tempI
);
1053 vector
unsigned char temp1
= vec_mergel(tempA
, tempI
);
1054 vector
unsigned char temp2
= vec_mergeh(tempB
, tempJ
);
1055 vector
unsigned char temp3
= vec_mergel(tempB
, tempJ
);
1056 vector
unsigned char temp4
= vec_mergeh(tempC
, tempK
);
1057 vector
unsigned char temp5
= vec_mergel(tempC
, tempK
);
1058 vector
unsigned char temp6
= vec_mergeh(tempD
, tempL
);
1059 vector
unsigned char temp7
= vec_mergel(tempD
, tempL
);
1060 vector
unsigned char temp8
= vec_mergeh(tempE
, tempM
);
1061 vector
unsigned char temp9
= vec_mergel(tempE
, tempM
);
1062 vector
unsigned char temp10
= vec_mergeh(tempF
, tempN
);
1063 vector
unsigned char temp11
= vec_mergel(tempF
, tempN
);
1064 vector
unsigned char temp12
= vec_mergeh(tempG
, tempO
);
1065 vector
unsigned char temp13
= vec_mergel(tempG
, tempO
);
1066 vector
unsigned char temp14
= vec_mergeh(tempH
, tempP
);
1067 vector
unsigned char temp15
= vec_mergel(tempH
, tempP
);
1069 tempA
= vec_mergeh(temp0
, temp8
);
1070 tempB
= vec_mergel(temp0
, temp8
);
1071 tempC
= vec_mergeh(temp1
, temp9
);
1072 tempD
= vec_mergel(temp1
, temp9
);
1073 tempE
= vec_mergeh(temp2
, temp10
);
1074 tempF
= vec_mergel(temp2
, temp10
);
1075 tempG
= vec_mergeh(temp3
, temp11
);
1076 tempH
= vec_mergel(temp3
, temp11
);
1077 tempI
= vec_mergeh(temp4
, temp12
);
1078 tempJ
= vec_mergel(temp4
, temp12
);
1079 tempK
= vec_mergeh(temp5
, temp13
);
1080 tempL
= vec_mergel(temp5
, temp13
);
1081 tempM
= vec_mergeh(temp6
, temp14
);
1082 tempN
= vec_mergel(temp6
, temp14
);
1083 tempO
= vec_mergeh(temp7
, temp15
);
1084 tempP
= vec_mergel(temp7
, temp15
);
1086 temp0
= vec_mergeh(tempA
, tempI
);
1087 temp1
= vec_mergel(tempA
, tempI
);
1088 temp2
= vec_mergeh(tempB
, tempJ
);
1089 temp3
= vec_mergel(tempB
, tempJ
);
1090 temp4
= vec_mergeh(tempC
, tempK
);
1091 temp5
= vec_mergel(tempC
, tempK
);
1092 temp6
= vec_mergeh(tempD
, tempL
);
1093 temp7
= vec_mergel(tempD
, tempL
);
1094 temp8
= vec_mergeh(tempE
, tempM
);
1095 temp9
= vec_mergel(tempE
, tempM
);
1096 temp10
= vec_mergeh(tempF
, tempN
);
1097 temp11
= vec_mergel(tempF
, tempN
);
1098 temp12
= vec_mergeh(tempG
, tempO
);
1099 temp13
= vec_mergel(tempG
, tempO
);
1100 temp14
= vec_mergeh(tempH
, tempP
);
1101 temp15
= vec_mergel(tempH
, tempP
);
1103 vec_st(temp0
, 0, dst
);
1104 vec_st(temp1
, 16, dst
);
1105 vec_st(temp2
, 32, dst
);
1106 vec_st(temp3
, 48, dst
);
1107 vec_st(temp4
, 64, dst
);
1108 vec_st(temp5
, 80, dst
);
1109 vec_st(temp6
, 96, dst
);
1110 vec_st(temp7
, 112, dst
);
1111 vec_st(temp8
, 128, dst
);
1112 vec_st(temp9
, 144, dst
);
1113 vec_st(temp10
, 160, dst
);
1114 vec_st(temp11
, 176, dst
);
1115 vec_st(temp12
, 192, dst
);
1116 vec_st(temp13
, 208, dst
);
1117 vec_st(temp14
, 224, dst
);
1118 vec_st(temp15
, 240, dst
);
1121 static inline void transpose_8x16_char_fromPackedAlign_altivec(unsigned char* dst
, unsigned char* src
, int stride
) {
1122 const vector
unsigned char zero
= vec_splat_u8(0);
1124 #define LOAD_DOUBLE_LINE(i, j) \
1125 vector unsigned char src##i = vec_ld(i * 16, src); \
1126 vector unsigned char src##j = vec_ld(j * 16, src)
1128 LOAD_DOUBLE_LINE(0, 1);
1129 LOAD_DOUBLE_LINE(2, 3);
1130 LOAD_DOUBLE_LINE(4, 5);
1131 LOAD_DOUBLE_LINE(6, 7);
1132 LOAD_DOUBLE_LINE(8, 9);
1133 LOAD_DOUBLE_LINE(10, 11);
1134 LOAD_DOUBLE_LINE(12, 13);
1135 LOAD_DOUBLE_LINE(14, 15);
1136 #undef LOAD_DOUBLE_LINE
1138 vector
unsigned char tempA
= vec_mergeh(src0
, src8
);
1139 vector
unsigned char tempB
;
1140 vector
unsigned char tempC
= vec_mergeh(src1
, src9
);
1141 vector
unsigned char tempD
;
1142 vector
unsigned char tempE
= vec_mergeh(src2
, src10
);
1143 vector
unsigned char tempG
= vec_mergeh(src3
, src11
);
1144 vector
unsigned char tempI
= vec_mergeh(src4
, src12
);
1145 vector
unsigned char tempJ
;
1146 vector
unsigned char tempK
= vec_mergeh(src5
, src13
);
1147 vector
unsigned char tempL
;
1148 vector
unsigned char tempM
= vec_mergeh(src6
, src14
);
1149 vector
unsigned char tempO
= vec_mergeh(src7
, src15
);
1151 vector
unsigned char temp0
= vec_mergeh(tempA
, tempI
);
1152 vector
unsigned char temp1
= vec_mergel(tempA
, tempI
);
1153 vector
unsigned char temp2
;
1154 vector
unsigned char temp3
;
1155 vector
unsigned char temp4
= vec_mergeh(tempC
, tempK
);
1156 vector
unsigned char temp5
= vec_mergel(tempC
, tempK
);
1157 vector
unsigned char temp6
;
1158 vector
unsigned char temp7
;
1159 vector
unsigned char temp8
= vec_mergeh(tempE
, tempM
);
1160 vector
unsigned char temp9
= vec_mergel(tempE
, tempM
);
1161 vector
unsigned char temp12
= vec_mergeh(tempG
, tempO
);
1162 vector
unsigned char temp13
= vec_mergel(tempG
, tempO
);
1164 tempA
= vec_mergeh(temp0
, temp8
);
1165 tempB
= vec_mergel(temp0
, temp8
);
1166 tempC
= vec_mergeh(temp1
, temp9
);
1167 tempD
= vec_mergel(temp1
, temp9
);
1168 tempI
= vec_mergeh(temp4
, temp12
);
1169 tempJ
= vec_mergel(temp4
, temp12
);
1170 tempK
= vec_mergeh(temp5
, temp13
);
1171 tempL
= vec_mergel(temp5
, temp13
);
1173 temp0
= vec_mergeh(tempA
, tempI
);
1174 temp1
= vec_mergel(tempA
, tempI
);
1175 temp2
= vec_mergeh(tempB
, tempJ
);
1176 temp3
= vec_mergel(tempB
, tempJ
);
1177 temp4
= vec_mergeh(tempC
, tempK
);
1178 temp5
= vec_mergel(tempC
, tempK
);
1179 temp6
= vec_mergeh(tempD
, tempL
);
1180 temp7
= vec_mergel(tempD
, tempL
);
1183 const vector
signed char neg1
= vec_splat_s8(-1);
1184 #define STORE_DOUBLE_LINE(i, j) \
1185 vector unsigned char dstA##i = vec_ld(i * stride, dst); \
1186 vector unsigned char dstB##i = vec_ld(i * stride + 16, dst); \
1187 vector unsigned char dstA##j = vec_ld(j * stride, dst); \
1188 vector unsigned char dstB##j = vec_ld(j * stride+ 16, dst); \
1189 vector unsigned char align##i = vec_lvsr(i * stride, dst); \
1190 vector unsigned char align##j = vec_lvsr(j * stride, dst); \
1191 vector unsigned char mask##i = vec_perm(zero, (vector unsigned char)neg1, align##i); \
1192 vector unsigned char mask##j = vec_perm(zero, (vector unsigned char)neg1, align##j); \
1193 vector unsigned char dstR##i = vec_perm(temp##i, temp##i, align##i);\
1194 vector unsigned char dstR##j = vec_perm(temp##j, temp##j, align##j);\
1195 vector unsigned char dstAF##i = vec_sel(dstA##i, dstR##i, mask##i); \
1196 vector unsigned char dstBF##i = vec_sel(dstR##i, dstB##i, mask##i); \
1197 vector unsigned char dstAF##j = vec_sel(dstA##j, dstR##j, mask##j); \
1198 vector unsigned char dstBF##j = vec_sel(dstR##j, dstB##j, mask##j); \
1199 vec_st(dstAF##i, i * stride, dst); \
1200 vec_st(dstBF##i, i * stride + 16, dst); \
1201 vec_st(dstAF##j, j * stride, dst); \
1202 vec_st(dstBF##j, j * stride + 16, dst)
1204 STORE_DOUBLE_LINE(0,1);
1205 STORE_DOUBLE_LINE(2,3);
1206 STORE_DOUBLE_LINE(4,5);
1207 STORE_DOUBLE_LINE(6,7);