2 * AltiVec optimizations (C) 2004 Romain Dolbeau <romain@dolbeau.org>
4 * based on code by Copyright (C) 2001-2003 Michael Niedermayer (michaelni@gmx.at)
6 * This file is part of FFmpeg.
8 * FFmpeg is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 #define ALTIVEC_TRANSPOSE_8x8_SHORT(src_a,src_b,src_c,src_d,src_e,src_f,src_g,src_h) \
27 __typeof__(src_a) tempA1, tempB1, tempC1, tempD1; \
28 __typeof__(src_a) tempE1, tempF1, tempG1, tempH1; \
29 __typeof__(src_a) tempA2, tempB2, tempC2, tempD2; \
30 __typeof__(src_a) tempE2, tempF2, tempG2, tempH2; \
31 tempA1 = vec_mergeh (src_a, src_e); \
32 tempB1 = vec_mergel (src_a, src_e); \
33 tempC1 = vec_mergeh (src_b, src_f); \
34 tempD1 = vec_mergel (src_b, src_f); \
35 tempE1 = vec_mergeh (src_c, src_g); \
36 tempF1 = vec_mergel (src_c, src_g); \
37 tempG1 = vec_mergeh (src_d, src_h); \
38 tempH1 = vec_mergel (src_d, src_h); \
39 tempA2 = vec_mergeh (tempA1, tempE1); \
40 tempB2 = vec_mergel (tempA1, tempE1); \
41 tempC2 = vec_mergeh (tempB1, tempF1); \
42 tempD2 = vec_mergel (tempB1, tempF1); \
43 tempE2 = vec_mergeh (tempC1, tempG1); \
44 tempF2 = vec_mergel (tempC1, tempG1); \
45 tempG2 = vec_mergeh (tempD1, tempH1); \
46 tempH2 = vec_mergel (tempD1, tempH1); \
47 src_a = vec_mergeh (tempA2, tempE2); \
48 src_b = vec_mergel (tempA2, tempE2); \
49 src_c = vec_mergeh (tempB2, tempF2); \
50 src_d = vec_mergel (tempB2, tempF2); \
51 src_e = vec_mergeh (tempC2, tempG2); \
52 src_f = vec_mergel (tempC2, tempG2); \
53 src_g = vec_mergeh (tempD2, tempH2); \
54 src_h = vec_mergel (tempD2, tempH2); \
58 static inline int vertClassify_altivec(uint8_t src
[], int stride
, PPContext
*c
) {
60 this code makes no assumption on src or stride.
61 One could remove the recomputation of the perm
62 vector by assuming (stride % 16) == 0, unfortunately
63 this is not always true.
65 DECLARE_ALIGNED(16, short, data
[8]);
68 vector
signed short v_dcOffset
;
69 vector
signed short v2QP
;
70 vector
unsigned short v4QP
;
71 vector
unsigned short v_dcThreshold
;
72 const int properStride
= (stride
% 16);
73 const int srcAlign
= ((unsigned long)src2
% 16);
74 const int two_vectors
= ((srcAlign
> 8) || properStride
) ?
1 : 0;
75 const vector
signed int zero
= vec_splat_s32(0);
76 const vector
signed short mask
= vec_splat_s16(1);
77 vector
signed int v_numEq
= vec_splat_s32(0);
79 data
[0] = ((c
->nonBQP
*c
->ppMode
.baseDcDiff
)>>8) + 1;
80 data
[1] = data
[0] * 2 + 1;
83 vector
signed short v_data
= vec_ld(0, data
);
84 v_dcOffset
= vec_splat(v_data
, 0);
85 v_dcThreshold
= (vector
unsigned short)vec_splat(v_data
, 1);
86 v2QP
= vec_splat(v_data
, 2);
87 v4QP
= (vector
unsigned short)vec_splat(v_data
, 3);
91 vector
signed short v_srcAss0
, v_srcAss1
, v_srcAss2
, v_srcAss3
, v_srcAss4
, v_srcAss5
, v_srcAss6
, v_srcAss7
;
93 #define LOAD_LINE(i) \
94 register int j##i = i * stride; \
95 vector unsigned char perm##i = vec_lvsl(j##i, src2); \
96 const vector unsigned char v_srcA1##i = vec_ld(j##i, src2); \
97 vector unsigned char v_srcA2##i; \
99 v_srcA2##i = vec_ld(j##i + 16, src2); \
100 const vector unsigned char v_srcA##i = \
101 vec_perm(v_srcA1##i, v_srcA2##i, perm##i); \
103 (vector signed short)vec_mergeh((vector signed char)zero, \
104 (vector signed char)v_srcA##i)
106 #define LOAD_LINE_ALIGNED(i) \
107 register int j##i = i * stride; \
108 const vector unsigned char v_srcA##i = vec_ld(j##i, src2); \
110 (vector signed short)vec_mergeh((vector signed char)zero, \
111 (vector signed char)v_srcA##i)
113 /* Special-casing the aligned case is worthwhile, as all calls from
114 * the (transposed) horizontable deblocks will be aligned, in addition
115 * to the naturally aligned vertical deblocks. */
116 if (properStride
&& srcAlign
) {
117 LOAD_LINE_ALIGNED(0);
118 LOAD_LINE_ALIGNED(1);
119 LOAD_LINE_ALIGNED(2);
120 LOAD_LINE_ALIGNED(3);
121 LOAD_LINE_ALIGNED(4);
122 LOAD_LINE_ALIGNED(5);
123 LOAD_LINE_ALIGNED(6);
124 LOAD_LINE_ALIGNED(7);
136 #undef LOAD_LINE_ALIGNED
139 const vector signed short v_diff##i = \
140 vec_sub(v_srcAss##i, v_srcAss##j); \
141 const vector signed short v_sum##i = \
142 vec_add(v_diff##i, v_dcOffset); \
143 const vector signed short v_comp##i = \
144 (vector signed short)vec_cmplt((vector unsigned short)v_sum##i, \
146 const vector signed short v_part##i = vec_and(mask, v_comp##i); \
147 v_numEq = vec_sum4s(v_part##i, v_numEq);
158 v_numEq
= vec_sums(v_numEq
, zero
);
160 v_numEq
= vec_splat(v_numEq
, 3);
161 vec_ste(v_numEq
, 0, &numEq
);
163 if (numEq
> c
->ppMode
.flatnessThreshold
){
164 const vector
unsigned char mmoP1
= (const vector
unsigned char)
165 AVV(0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f,
166 0x00, 0x01, 0x12, 0x13, 0x08, 0x09, 0x1A, 0x1B);
167 const vector
unsigned char mmoP2
= (const vector
unsigned char)
168 AVV(0x04, 0x05, 0x16, 0x17, 0x0C, 0x0D, 0x1E, 0x1F,
169 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
170 const vector
unsigned char mmoP
= (const vector
unsigned char)
171 vec_lvsl(8, (unsigned char*)0);
173 vector
signed short mmoL1
= vec_perm(v_srcAss0
, v_srcAss2
, mmoP1
);
174 vector
signed short mmoL2
= vec_perm(v_srcAss4
, v_srcAss6
, mmoP2
);
175 vector
signed short mmoL
= vec_perm(mmoL1
, mmoL2
, mmoP
);
176 vector
signed short mmoR1
= vec_perm(v_srcAss5
, v_srcAss7
, mmoP1
);
177 vector
signed short mmoR2
= vec_perm(v_srcAss1
, v_srcAss3
, mmoP2
);
178 vector
signed short mmoR
= vec_perm(mmoR1
, mmoR2
, mmoP
);
179 vector
signed short mmoDiff
= vec_sub(mmoL
, mmoR
);
180 vector
unsigned short mmoSum
= (vector
unsigned short)vec_add(mmoDiff
, v2QP
);
182 if (vec_any_gt(mmoSum
, v4QP
))
190 static inline void doVertLowPass_altivec(uint8_t *src
, int stride
, PPContext
*c
) {
192 this code makes no assumption on src or stride.
193 One could remove the recomputation of the perm
194 vector by assuming (stride % 16) == 0, unfortunately
195 this is not always true. Quite a lot of load/stores
196 can be removed by assuming proper alignment of
200 const vector
signed int zero
= vec_splat_s32(0);
201 const int properStride
= (stride
% 16);
202 const int srcAlign
= ((unsigned long)src2
% 16);
203 DECLARE_ALIGNED(16, short, qp
[8]);
205 vector
signed short vqp
= vec_ld(0, qp
);
206 vqp
= vec_splat(vqp
, 0);
210 vector
signed short vb0
, vb1
, vb2
, vb3
, vb4
, vb5
, vb6
, vb7
, vb8
, vb9
;
211 vector
unsigned char vbA0
, vbA1
, vbA2
, vbA3
, vbA4
, vbA5
, vbA6
, vbA7
, vbA8
, vbA9
;
212 vector
unsigned char vbB0
, vbB1
, vbB2
, vbB3
, vbB4
, vbB5
, vbB6
, vbB7
, vbB8
, vbB9
;
213 vector
unsigned char vbT0
, vbT1
, vbT2
, vbT3
, vbT4
, vbT5
, vbT6
, vbT7
, vbT8
, vbT9
;
215 #define LOAD_LINE(i) \
216 const vector unsigned char perml##i = \
217 vec_lvsl(i * stride, src2); \
218 vbA##i = vec_ld(i * stride, src2); \
219 vbB##i = vec_ld(i * stride + 16, src2); \
220 vbT##i = vec_perm(vbA##i, vbB##i, perml##i); \
222 (vector signed short)vec_mergeh((vector unsigned char)zero, \
223 (vector unsigned char)vbT##i)
225 #define LOAD_LINE_ALIGNED(i) \
226 register int j##i = i * stride; \
227 vbT##i = vec_ld(j##i, src2); \
229 (vector signed short)vec_mergeh((vector signed char)zero, \
230 (vector signed char)vbT##i)
232 /* Special-casing the aligned case is worthwhile, as all calls from
233 * the (transposed) horizontable deblocks will be aligned, in addition
234 * to the naturally aligned vertical deblocks. */
235 if (properStride
&& srcAlign
) {
236 LOAD_LINE_ALIGNED(0);
237 LOAD_LINE_ALIGNED(1);
238 LOAD_LINE_ALIGNED(2);
239 LOAD_LINE_ALIGNED(3);
240 LOAD_LINE_ALIGNED(4);
241 LOAD_LINE_ALIGNED(5);
242 LOAD_LINE_ALIGNED(6);
243 LOAD_LINE_ALIGNED(7);
244 LOAD_LINE_ALIGNED(8);
245 LOAD_LINE_ALIGNED(9);
259 #undef LOAD_LINE_ALIGNED
261 const vector
unsigned short v_2
= vec_splat_u16(2);
262 const vector
unsigned short v_4
= vec_splat_u16(4);
264 const vector
signed short v_diff01
= vec_sub(vb0
, vb1
);
265 const vector
unsigned short v_cmp01
=
266 (const vector
unsigned short) vec_cmplt(vec_abs(v_diff01
), vqp
);
267 const vector
signed short v_first
= vec_sel(vb1
, vb0
, v_cmp01
);
268 const vector
signed short v_diff89
= vec_sub(vb8
, vb9
);
269 const vector
unsigned short v_cmp89
=
270 (const vector
unsigned short) vec_cmplt(vec_abs(v_diff89
), vqp
);
271 const vector
signed short v_last
= vec_sel(vb8
, vb9
, v_cmp89
);
273 const vector
signed short temp01
= vec_mladd(v_first
, (vector
signed short)v_4
, vb1
);
274 const vector
signed short temp02
= vec_add(vb2
, vb3
);
275 const vector
signed short temp03
= vec_add(temp01
, (vector
signed short)v_4
);
276 const vector
signed short v_sumsB0
= vec_add(temp02
, temp03
);
278 const vector
signed short temp11
= vec_sub(v_sumsB0
, v_first
);
279 const vector
signed short v_sumsB1
= vec_add(temp11
, vb4
);
281 const vector
signed short temp21
= vec_sub(v_sumsB1
, v_first
);
282 const vector
signed short v_sumsB2
= vec_add(temp21
, vb5
);
284 const vector
signed short temp31
= vec_sub(v_sumsB2
, v_first
);
285 const vector
signed short v_sumsB3
= vec_add(temp31
, vb6
);
287 const vector
signed short temp41
= vec_sub(v_sumsB3
, v_first
);
288 const vector
signed short v_sumsB4
= vec_add(temp41
, vb7
);
290 const vector
signed short temp51
= vec_sub(v_sumsB4
, vb1
);
291 const vector
signed short v_sumsB5
= vec_add(temp51
, vb8
);
293 const vector
signed short temp61
= vec_sub(v_sumsB5
, vb2
);
294 const vector
signed short v_sumsB6
= vec_add(temp61
, v_last
);
296 const vector
signed short temp71
= vec_sub(v_sumsB6
, vb3
);
297 const vector
signed short v_sumsB7
= vec_add(temp71
, v_last
);
299 const vector
signed short temp81
= vec_sub(v_sumsB7
, vb4
);
300 const vector
signed short v_sumsB8
= vec_add(temp81
, v_last
);
302 const vector
signed short temp91
= vec_sub(v_sumsB8
, vb5
);
303 const vector
signed short v_sumsB9
= vec_add(temp91
, v_last
);
305 #define COMPUTE_VR(i, j, k) \
306 const vector signed short temps1##i = \
307 vec_add(v_sumsB##i, v_sumsB##k); \
308 const vector signed short temps2##i = \
309 vec_mladd(vb##j, (vector signed short)v_2, temps1##i); \
310 const vector signed short vr##j = vec_sra(temps2##i, v_4)
321 const vector
signed char neg1
= vec_splat_s8(-1);
322 const vector
unsigned char permHH
= (const vector
unsigned char)AVV(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
323 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F);
325 #define PACK_AND_STORE(i) \
326 const vector unsigned char perms##i = \
327 vec_lvsr(i * stride, src2); \
328 const vector unsigned char vf##i = \
329 vec_packsu(vr##i, (vector signed short)zero); \
330 const vector unsigned char vg##i = \
331 vec_perm(vf##i, vbT##i, permHH); \
332 const vector unsigned char mask##i = \
333 vec_perm((vector unsigned char)zero, (vector unsigned char)neg1, perms##i); \
334 const vector unsigned char vg2##i = \
335 vec_perm(vg##i, vg##i, perms##i); \
336 const vector unsigned char svA##i = \
337 vec_sel(vbA##i, vg2##i, mask##i); \
338 const vector unsigned char svB##i = \
339 vec_sel(vg2##i, vbB##i, mask##i); \
340 vec_st(svA##i, i * stride, src2); \
341 vec_st(svB##i, i * stride + 16, src2)
343 #define PACK_AND_STORE_ALIGNED(i) \
344 const vector unsigned char vf##i = \
345 vec_packsu(vr##i, (vector signed short)zero); \
346 const vector unsigned char vg##i = \
347 vec_perm(vf##i, vbT##i, permHH); \
348 vec_st(vg##i, i * stride, src2)
350 /* Special-casing the aligned case is worthwhile, as all calls from
351 * the (transposed) horizontable deblocks will be aligned, in addition
352 * to the naturally aligned vertical deblocks. */
353 if (properStride
&& srcAlign
) {
354 PACK_AND_STORE_ALIGNED(1);
355 PACK_AND_STORE_ALIGNED(2);
356 PACK_AND_STORE_ALIGNED(3);
357 PACK_AND_STORE_ALIGNED(4);
358 PACK_AND_STORE_ALIGNED(5);
359 PACK_AND_STORE_ALIGNED(6);
360 PACK_AND_STORE_ALIGNED(7);
361 PACK_AND_STORE_ALIGNED(8);
372 #undef PACK_AND_STORE
373 #undef PACK_AND_STORE_ALIGNED
378 static inline void doVertDefFilter_altivec(uint8_t src
[], int stride
, PPContext
*c
) {
380 this code makes no assumption on src or stride.
381 One could remove the recomputation of the perm
382 vector by assuming (stride % 16) == 0, unfortunately
383 this is not always true. Quite a lot of load/stores
384 can be removed by assuming proper alignment of
388 const vector
signed int zero
= vec_splat_s32(0);
389 DECLARE_ALIGNED(16, short, qp
[8]);
391 vector
signed short vqp
= vec_ld(0, qp
);
392 vqp
= vec_splat(vqp
, 0);
394 #define LOAD_LINE(i) \
395 const vector unsigned char perm##i = \
396 vec_lvsl(i * stride, src2); \
397 const vector unsigned char vbA##i = \
398 vec_ld(i * stride, src2); \
399 const vector unsigned char vbB##i = \
400 vec_ld(i * stride + 16, src2); \
401 const vector unsigned char vbT##i = \
402 vec_perm(vbA##i, vbB##i, perm##i); \
403 const vector signed short vb##i = \
404 (vector signed short)vec_mergeh((vector unsigned char)zero, \
405 (vector unsigned char)vbT##i)
419 const vector
signed short v_1
= vec_splat_s16(1);
420 const vector
signed short v_2
= vec_splat_s16(2);
421 const vector
signed short v_5
= vec_splat_s16(5);
422 const vector
signed short v_32
= vec_sl(v_1
,
423 (vector
unsigned short)v_5
);
425 const vector
signed short l3minusl6
= vec_sub(vb3
, vb6
);
426 const vector
signed short l5minusl4
= vec_sub(vb5
, vb4
);
427 const vector
signed short twotimes_l3minusl6
= vec_mladd(v_2
, l3minusl6
, (vector
signed short)zero
);
428 const vector
signed short mE
= vec_mladd(v_5
, l5minusl4
, twotimes_l3minusl6
);
429 const vector
signed short absmE
= vec_abs(mE
);
430 /* left & right energy */
431 const vector
signed short l1minusl4
= vec_sub(vb1
, vb4
);
432 const vector
signed short l3minusl2
= vec_sub(vb3
, vb2
);
433 const vector
signed short l5minusl8
= vec_sub(vb5
, vb8
);
434 const vector
signed short l7minusl6
= vec_sub(vb7
, vb6
);
435 const vector
signed short twotimes_l1minusl4
= vec_mladd(v_2
, l1minusl4
, (vector
signed short)zero
);
436 const vector
signed short twotimes_l5minusl8
= vec_mladd(v_2
, l5minusl8
, (vector
signed short)zero
);
437 const vector
signed short lE
= vec_mladd(v_5
, l3minusl2
, twotimes_l1minusl4
);
438 const vector
signed short rE
= vec_mladd(v_5
, l7minusl6
, twotimes_l5minusl8
);
440 const vector
signed short ddiff
= vec_sub(absmE
,
443 const vector
signed short ddiffclamp
= vec_max(ddiff
, (vector
signed short)zero
);
444 const vector
signed short dtimes64
= vec_mladd(v_5
, ddiffclamp
, v_32
);
445 const vector
signed short d
= vec_sra(dtimes64
, vec_splat_u16(6));
446 const vector
signed short minusd
= vec_sub((vector
signed short)zero
, d
);
447 const vector
signed short finald
= vec_sel(minusd
,
449 vec_cmpgt(vec_sub((vector
signed short)zero
, mE
),
450 (vector
signed short)zero
));
452 const vector
signed short qtimes2
= vec_sub(vb4
, vb5
);
453 /* for a shift right to behave like /2, we need to add one
454 to all negative integer */
455 const vector
signed short rounddown
= vec_sel((vector
signed short)zero
,
457 vec_cmplt(qtimes2
, (vector
signed short)zero
));
458 const vector
signed short q
= vec_sra(vec_add(qtimes2
, rounddown
), vec_splat_u16(1));
460 const vector
signed short dclamp_P1
= vec_max((vector
signed short)zero
, finald
);
461 const vector
signed short dclamp_P
= vec_min(dclamp_P1
, q
);
462 const vector
signed short dclamp_N1
= vec_min((vector
signed short)zero
, finald
);
463 const vector
signed short dclamp_N
= vec_max(dclamp_N1
, q
);
465 const vector
signed short dclampedfinal
= vec_sel(dclamp_N
,
467 vec_cmpgt(q
, (vector
signed short)zero
));
468 const vector
signed short dornotd
= vec_sel((vector
signed short)zero
,
470 vec_cmplt(absmE
, vqp
));
471 /* add/subtract to l4 and l5 */
472 const vector
signed short vb4minusd
= vec_sub(vb4
, dornotd
);
473 const vector
signed short vb5plusd
= vec_add(vb5
, dornotd
);
474 /* finally, stores */
475 const vector
unsigned char st4
= vec_packsu(vb4minusd
, (vector
signed short)zero
);
476 const vector
unsigned char st5
= vec_packsu(vb5plusd
, (vector
signed short)zero
);
478 const vector
signed char neg1
= vec_splat_s8(-1);
479 const vector
unsigned char permHH
= (const vector
unsigned char)AVV(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
480 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F);
483 const vector unsigned char perms##i = \
484 vec_lvsr(i * stride, src2); \
485 const vector unsigned char vg##i = \
486 vec_perm(st##i, vbT##i, permHH); \
487 const vector unsigned char mask##i = \
488 vec_perm((vector unsigned char)zero, (vector unsigned char)neg1, perms##i); \
489 const vector unsigned char vg2##i = \
490 vec_perm(vg##i, vg##i, perms##i); \
491 const vector unsigned char svA##i = \
492 vec_sel(vbA##i, vg2##i, mask##i); \
493 const vector unsigned char svB##i = \
494 vec_sel(vg2##i, vbB##i, mask##i); \
495 vec_st(svA##i, i * stride, src2); \
496 vec_st(svB##i, i * stride + 16, src2)
502 static inline void dering_altivec(uint8_t src
[], int stride
, PPContext
*c
) {
504 this code makes no assumption on src or stride.
505 One could remove the recomputation of the perm
506 vector by assuming (stride % 16) == 0, unfortunately
507 this is not always true. Quite a lot of load/stores
508 can be removed by assuming proper alignment of
511 uint8_t *srcCopy
= src
;
512 DECLARE_ALIGNED(16, uint8_t, dt
[16]);
513 const vector
signed int zero
= vec_splat_s32(0);
514 vector
unsigned char v_dt
;
515 dt
[0] = deringThreshold
;
516 v_dt
= vec_splat(vec_ld(0, dt
), 0);
518 #define LOAD_LINE(i) \
519 const vector unsigned char perm##i = \
520 vec_lvsl(i * stride, srcCopy); \
521 vector unsigned char sA##i = vec_ld(i * stride, srcCopy); \
522 vector unsigned char sB##i = vec_ld(i * stride + 16, srcCopy); \
523 vector unsigned char src##i = vec_perm(sA##i, sB##i, perm##i)
537 vector
unsigned char v_avg
;
539 const vector
unsigned char trunc_perm
= (vector
unsigned char)
540 AVV(0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
541 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18);
542 const vector
unsigned char trunc_src12
= vec_perm(src1
, src2
, trunc_perm
);
543 const vector
unsigned char trunc_src34
= vec_perm(src3
, src4
, trunc_perm
);
544 const vector
unsigned char trunc_src56
= vec_perm(src5
, src6
, trunc_perm
);
545 const vector
unsigned char trunc_src78
= vec_perm(src7
, src8
, trunc_perm
);
547 #define EXTRACT(op) do { \
548 const vector unsigned char s##op##_1 = vec_##op(trunc_src12, trunc_src34); \
549 const vector unsigned char s##op##_2 = vec_##op(trunc_src56, trunc_src78); \
550 const vector unsigned char s##op##_6 = vec_##op(s##op##_1, s##op##_2); \
551 const vector unsigned char s##op##_8h = vec_mergeh(s##op##_6, s##op##_6); \
552 const vector unsigned char s##op##_8l = vec_mergel(s##op##_6, s##op##_6); \
553 const vector unsigned char s##op##_9 = vec_##op(s##op##_8h, s##op##_8l); \
554 const vector unsigned char s##op##_9h = vec_mergeh(s##op##_9, s##op##_9); \
555 const vector unsigned char s##op##_9l = vec_mergel(s##op##_9, s##op##_9); \
556 const vector unsigned char s##op##_10 = vec_##op(s##op##_9h, s##op##_9l); \
557 const vector unsigned char s##op##_10h = vec_mergeh(s##op##_10, s##op##_10); \
558 const vector unsigned char s##op##_10l = vec_mergel(s##op##_10, s##op##_10); \
559 const vector unsigned char s##op##_11 = vec_##op(s##op##_10h, s##op##_10l); \
560 const vector unsigned char s##op##_11h = vec_mergeh(s##op##_11, s##op##_11); \
561 const vector unsigned char s##op##_11l = vec_mergel(s##op##_11, s##op##_11); \
562 v_##op = vec_##op(s##op##_11h, s##op##_11l); } while (0)
564 vector
unsigned char v_min
;
565 vector
unsigned char v_max
;
570 if (vec_all_lt(vec_sub(v_max
, v_min
), v_dt
))
573 v_avg
= vec_avg(v_min
, v_max
);
576 DECLARE_ALIGNED(16, signed int, S
[8]);
578 const vector
unsigned short mask1
= (vector
unsigned short)
579 AVV(0x0001, 0x0002, 0x0004, 0x0008,
580 0x0010, 0x0020, 0x0040, 0x0080);
581 const vector
unsigned short mask2
= (vector
unsigned short)
582 AVV(0x0100, 0x0200, 0x0000, 0x0000,
583 0x0000, 0x0000, 0x0000, 0x0000);
585 const vector
unsigned int vuint32_16
= vec_sl(vec_splat_u32(1), vec_splat_u32(4));
586 const vector
unsigned int vuint32_1
= vec_splat_u32(1);
589 vector signed int sum##i; \
591 const vector unsigned char cmp##i = \
592 (vector unsigned char)vec_cmpgt(src##i, v_avg); \
593 const vector unsigned short cmpHi##i = \
594 (vector unsigned short)vec_mergeh(cmp##i, cmp##i); \
595 const vector unsigned short cmpLi##i = \
596 (vector unsigned short)vec_mergel(cmp##i, cmp##i); \
597 const vector signed short cmpHf##i = \
598 (vector signed short)vec_and(cmpHi##i, mask1); \
599 const vector signed short cmpLf##i = \
600 (vector signed short)vec_and(cmpLi##i, mask2); \
601 const vector signed int sump##i = vec_sum4s(cmpHf##i, zero); \
602 const vector signed int sumq##i = vec_sum4s(cmpLf##i, sump##i); \
603 sum##i = vec_sums(sumq##i, zero); } while (0)
617 vector
signed int sumA2
;
618 vector
signed int sumB2
;
620 const vector
signed int sump02
= vec_mergel(sum0
, sum2
);
621 const vector
signed int sump13
= vec_mergel(sum1
, sum3
);
622 const vector
signed int sumA
= vec_mergel(sump02
, sump13
);
624 const vector
signed int sump46
= vec_mergel(sum4
, sum6
);
625 const vector
signed int sump57
= vec_mergel(sum5
, sum7
);
626 const vector
signed int sumB
= vec_mergel(sump46
, sump57
);
628 const vector
signed int sump8A
= vec_mergel(sum8
, zero
);
629 const vector
signed int sump9B
= vec_mergel(sum9
, zero
);
630 const vector
signed int sumC
= vec_mergel(sump8A
, sump9B
);
632 const vector
signed int tA
= vec_sl(vec_nor(zero
, sumA
), vuint32_16
);
633 const vector
signed int tB
= vec_sl(vec_nor(zero
, sumB
), vuint32_16
);
634 const vector
signed int tC
= vec_sl(vec_nor(zero
, sumC
), vuint32_16
);
635 const vector
signed int t2A
= vec_or(sumA
, tA
);
636 const vector
signed int t2B
= vec_or(sumB
, tB
);
637 const vector
signed int t2C
= vec_or(sumC
, tC
);
638 const vector
signed int t3A
= vec_and(vec_sra(t2A
, vuint32_1
),
639 vec_sl(t2A
, vuint32_1
));
640 const vector
signed int t3B
= vec_and(vec_sra(t2B
, vuint32_1
),
641 vec_sl(t2B
, vuint32_1
));
642 const vector
signed int t3C
= vec_and(vec_sra(t2C
, vuint32_1
),
643 vec_sl(t2C
, vuint32_1
));
644 const vector
signed int yA
= vec_and(t2A
, t3A
);
645 const vector
signed int yB
= vec_and(t2B
, t3B
);
646 const vector
signed int yC
= vec_and(t2C
, t3C
);
648 const vector
unsigned char strangeperm1
= vec_lvsl(4, (unsigned char*)0);
649 const vector
unsigned char strangeperm2
= vec_lvsl(8, (unsigned char*)0);
650 const vector
signed int sumAd4
= vec_perm(yA
, yB
, strangeperm1
);
651 const vector
signed int sumAd8
= vec_perm(yA
, yB
, strangeperm2
);
652 const vector
signed int sumBd4
= vec_perm(yB
, yC
, strangeperm1
);
653 const vector
signed int sumBd8
= vec_perm(yB
, yC
, strangeperm2
);
654 const vector
signed int sumAp
= vec_and(yA
,
655 vec_and(sumAd4
,sumAd8
));
656 const vector
signed int sumBp
= vec_and(yB
,
657 vec_and(sumBd4
,sumBd8
));
658 sumA2
= vec_or(sumAp
,
661 sumB2
= vec_or(sumBp
,
666 vec_st(sumB2
, 16, S
);
669 /* I'm not sure the following is actually faster
670 than straight, unvectorized C code :-( */
672 DECLARE_ALIGNED(16, int, tQP2
[4]);
673 tQP2
[0]= c
->QP
/2 + 1;
674 vector
signed int vQP2
= vec_ld(0, tQP2
);
675 vQP2
= vec_splat(vQP2
, 0);
676 const vector
signed int vsint32_8
= vec_splat_s32(8);
677 const vector
unsigned int vuint32_4
= vec_splat_u32(4);
679 const vector
unsigned char permA1
= (vector
unsigned char)
680 AVV(0x00, 0x01, 0x02, 0x10, 0x11, 0x12, 0x1F, 0x1F,
681 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F);
682 const vector
unsigned char permA2
= (vector
unsigned char)
683 AVV(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x10, 0x11,
684 0x12, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F);
685 const vector
unsigned char permA1inc
= (vector
unsigned char)
686 AVV(0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00,
687 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
688 const vector
unsigned char permA2inc
= (vector
unsigned char)
689 AVV(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01,
690 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
691 const vector
unsigned char magic
= (vector
unsigned char)
692 AVV(0x01, 0x02, 0x01, 0x02, 0x04, 0x02, 0x01, 0x02,
693 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
694 const vector
unsigned char extractPerm
= (vector
unsigned char)
695 AVV(0x10, 0x10, 0x10, 0x01, 0x10, 0x10, 0x10, 0x01,
696 0x10, 0x10, 0x10, 0x01, 0x10, 0x10, 0x10, 0x01);
697 const vector
unsigned char extractPermInc
= (vector
unsigned char)
698 AVV(0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01,
699 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01);
700 const vector
unsigned char identity
= vec_lvsl(0,(unsigned char *)0);
701 const vector
unsigned char tenRight
= (vector
unsigned char)
702 AVV(0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
703 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
704 const vector
unsigned char eightLeft
= (vector
unsigned char)
705 AVV(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
706 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08);
710 vector unsigned char tenRightM##i = tenRight; \
711 vector unsigned char permA1M##i = permA1; \
712 vector unsigned char permA2M##i = permA2; \
713 vector unsigned char extractPermM##i = extractPerm
715 #define F2(i, j, k, l) \
716 if (S[i] & (1 << (l+1))) { \
717 const vector unsigned char a_##j##_A##l = \
718 vec_perm(src##i, src##j, permA1M##i); \
719 const vector unsigned char a_##j##_B##l = \
720 vec_perm(a_##j##_A##l, src##k, permA2M##i); \
721 const vector signed int a_##j##_sump##l = \
722 (vector signed int)vec_msum(a_##j##_B##l, magic, \
723 (vector unsigned int)zero); \
724 vector signed int F_##j##_##l = \
725 vec_sr(vec_sums(a_##j##_sump##l, vsint32_8), vuint32_4); \
726 F_##j##_##l = vec_splat(F_##j##_##l, 3); \
727 const vector signed int p_##j##_##l = \
728 (vector signed int)vec_perm(src##j, \
729 (vector unsigned char)zero, \
731 const vector signed int sum_##j##_##l = vec_add( p_##j##_##l, vQP2);\
732 const vector signed int diff_##j##_##l = vec_sub( p_##j##_##l, vQP2);\
733 vector signed int newpm_##j##_##l; \
734 if (vec_all_lt(sum_##j##_##l, F_##j##_##l)) \
735 newpm_##j##_##l = sum_##j##_##l; \
736 else if (vec_all_gt(diff_##j##_##l, F_##j##_##l)) \
737 newpm_##j##_##l = diff_##j##_##l; \
738 else newpm_##j##_##l = F_##j##_##l; \
739 const vector unsigned char newpm2_##j##_##l = \
740 vec_splat((vector unsigned char)newpm_##j##_##l, 15); \
741 const vector unsigned char mask##j##l = vec_add(identity, \
743 src##j = vec_perm(src##j, newpm2_##j##_##l, mask##j##l); \
745 permA1M##i = vec_add(permA1M##i, permA1inc); \
746 permA2M##i = vec_add(permA2M##i, permA2inc); \
747 tenRightM##i = vec_sro(tenRightM##i, eightLeft); \
748 extractPermM##i = vec_add(extractPermM##i, extractPermInc)
750 #define ITER(i, j, k) \
770 const vector
signed char neg1
= vec_splat_s8(-1);
772 #define STORE_LINE(i) \
773 const vector unsigned char permST##i = \
774 vec_lvsr(i * stride, srcCopy); \
775 const vector unsigned char maskST##i = \
776 vec_perm((vector unsigned char)zero, \
777 (vector unsigned char)neg1, permST##i);\
778 src##i = vec_perm(src##i ,src##i, permST##i); \
779 sA##i= vec_sel(sA##i, src##i, maskST##i); \
780 sB##i= vec_sel(src##i, sB##i, maskST##i); \
781 vec_st(sA##i, i * stride, srcCopy); \
782 vec_st(sB##i, i * stride + 16, srcCopy)
798 #define doHorizLowPass_altivec(a...) doHorizLowPass_C(a)
799 #define doHorizDefFilter_altivec(a...) doHorizDefFilter_C(a)
800 #define do_a_deblock_altivec(a...) do_a_deblock_C(a)
802 static inline void RENAME(tempNoiseReducer
)(uint8_t *src
, int stride
,
803 uint8_t *tempBlurred
, uint32_t *tempBlurredPast
, int *maxNoise
)
805 const vector
signed int zero
= vec_splat_s32(0);
806 const vector
signed short vsint16_1
= vec_splat_s16(1);
807 vector
signed int v_dp
= zero
;
808 vector
signed int v_sysdp
= zero
;
811 tempBlurredPast
[127]= maxNoise
[0];
812 tempBlurredPast
[128]= maxNoise
[1];
813 tempBlurredPast
[129]= maxNoise
[2];
815 #define LOAD_LINE(src, i) \
816 register int j##src##i = i * stride; \
817 vector unsigned char perm##src##i = vec_lvsl(j##src##i, src); \
818 const vector unsigned char v_##src##A1##i = vec_ld(j##src##i, src); \
819 const vector unsigned char v_##src##A2##i = vec_ld(j##src##i + 16, src); \
820 const vector unsigned char v_##src##A##i = \
821 vec_perm(v_##src##A1##i, v_##src##A2##i, perm##src##i); \
822 vector signed short v_##src##Ass##i = \
823 (vector signed short)vec_mergeh((vector signed char)zero, \
824 (vector signed char)v_##src##A##i)
835 LOAD_LINE(tempBlurred
, 0);
836 LOAD_LINE(tempBlurred
, 1);
837 LOAD_LINE(tempBlurred
, 2);
838 LOAD_LINE(tempBlurred
, 3);
839 LOAD_LINE(tempBlurred
, 4);
840 LOAD_LINE(tempBlurred
, 5);
841 LOAD_LINE(tempBlurred
, 6);
842 LOAD_LINE(tempBlurred
, 7);
845 #define ACCUMULATE_DIFFS(i) \
846 vector signed short v_d##i = vec_sub(v_tempBlurredAss##i, \
848 v_dp = vec_msums(v_d##i, v_d##i, v_dp); \
849 v_sysdp = vec_msums(v_d##i, vsint16_1, v_sysdp)
859 #undef ACCUMULATE_DIFFS
861 v_dp
= vec_sums(v_dp
, zero
);
862 v_sysdp
= vec_sums(v_sysdp
, zero
);
864 v_dp
= vec_splat(v_dp
, 3);
865 v_sysdp
= vec_splat(v_sysdp
, 3);
867 vec_ste(v_dp
, 0, &d
);
868 vec_ste(v_sysdp
, 0, &sysd
);
872 +(*(tempBlurredPast
-256))
873 +(*(tempBlurredPast
-1))+ (*(tempBlurredPast
+1))
874 +(*(tempBlurredPast
+256))
879 if (d
> maxNoise
[1]) {
880 if (d
< maxNoise
[2]) {
881 #define OP(i) v_tempBlurredAss##i = vec_avg(v_tempBlurredAss##i, v_srcAss##i);
893 #define OP(i) v_tempBlurredAss##i = v_srcAss##i;
906 if (d
< maxNoise
[0]) {
907 const vector
signed short vsint16_7
= vec_splat_s16(7);
908 const vector
signed short vsint16_4
= vec_splat_s16(4);
909 const vector
unsigned short vuint16_3
= vec_splat_u16(3);
912 const vector signed short v_temp##i = \
913 vec_mladd(v_tempBlurredAss##i, \
914 vsint16_7, v_srcAss##i); \
915 const vector signed short v_temp2##i = \
916 vec_add(v_temp##i, vsint16_4); \
917 v_tempBlurredAss##i = vec_sr(v_temp2##i, vuint16_3)
929 const vector
signed short vsint16_3
= vec_splat_s16(3);
930 const vector
signed short vsint16_2
= vec_splat_s16(2);
933 const vector signed short v_temp##i = \
934 vec_mladd(v_tempBlurredAss##i, \
935 vsint16_3, v_srcAss##i); \
936 const vector signed short v_temp2##i = \
937 vec_add(v_temp##i, vsint16_2); \
938 v_tempBlurredAss##i = vec_sr(v_temp2##i, (vector unsigned short)vsint16_2)
952 const vector
signed char neg1
= vec_splat_s8(-1);
953 const vector
unsigned char permHH
= (const vector
unsigned char)AVV(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
954 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F);
956 #define PACK_AND_STORE(src, i) \
957 const vector unsigned char perms##src##i = \
958 vec_lvsr(i * stride, src); \
959 const vector unsigned char vf##src##i = \
960 vec_packsu(v_tempBlurredAss##i, (vector signed short)zero); \
961 const vector unsigned char vg##src##i = \
962 vec_perm(vf##src##i, v_##src##A##i, permHH); \
963 const vector unsigned char mask##src##i = \
964 vec_perm((vector unsigned char)zero, (vector unsigned char)neg1, perms##src##i); \
965 const vector unsigned char vg2##src##i = \
966 vec_perm(vg##src##i, vg##src##i, perms##src##i); \
967 const vector unsigned char svA##src##i = \
968 vec_sel(v_##src##A1##i, vg2##src##i, mask##src##i); \
969 const vector unsigned char svB##src##i = \
970 vec_sel(vg2##src##i, v_##src##A2##i, mask##src##i); \
971 vec_st(svA##src##i, i * stride, src); \
972 vec_st(svB##src##i, i * stride + 16, src)
974 PACK_AND_STORE(src
, 0);
975 PACK_AND_STORE(src
, 1);
976 PACK_AND_STORE(src
, 2);
977 PACK_AND_STORE(src
, 3);
978 PACK_AND_STORE(src
, 4);
979 PACK_AND_STORE(src
, 5);
980 PACK_AND_STORE(src
, 6);
981 PACK_AND_STORE(src
, 7);
982 PACK_AND_STORE(tempBlurred
, 0);
983 PACK_AND_STORE(tempBlurred
, 1);
984 PACK_AND_STORE(tempBlurred
, 2);
985 PACK_AND_STORE(tempBlurred
, 3);
986 PACK_AND_STORE(tempBlurred
, 4);
987 PACK_AND_STORE(tempBlurred
, 5);
988 PACK_AND_STORE(tempBlurred
, 6);
989 PACK_AND_STORE(tempBlurred
, 7);
990 #undef PACK_AND_STORE
993 static inline void transpose_16x8_char_toPackedAlign_altivec(unsigned char* dst
, unsigned char* src
, int stride
) {
994 const vector
unsigned char zero
= vec_splat_u8(0);
996 #define LOAD_DOUBLE_LINE(i, j) \
997 vector unsigned char perm1##i = vec_lvsl(i * stride, src); \
998 vector unsigned char perm2##i = vec_lvsl(j * stride, src); \
999 vector unsigned char srcA##i = vec_ld(i * stride, src); \
1000 vector unsigned char srcB##i = vec_ld(i * stride + 16, src); \
1001 vector unsigned char srcC##i = vec_ld(j * stride, src); \
1002 vector unsigned char srcD##i = vec_ld(j * stride+ 16, src); \
1003 vector unsigned char src##i = vec_perm(srcA##i, srcB##i, perm1##i); \
1004 vector unsigned char src##j = vec_perm(srcC##i, srcD##i, perm2##i)
1006 LOAD_DOUBLE_LINE(0, 1);
1007 LOAD_DOUBLE_LINE(2, 3);
1008 LOAD_DOUBLE_LINE(4, 5);
1009 LOAD_DOUBLE_LINE(6, 7);
1010 #undef LOAD_DOUBLE_LINE
1012 vector
unsigned char tempA
= vec_mergeh(src0
, zero
);
1013 vector
unsigned char tempB
= vec_mergel(src0
, zero
);
1014 vector
unsigned char tempC
= vec_mergeh(src1
, zero
);
1015 vector
unsigned char tempD
= vec_mergel(src1
, zero
);
1016 vector
unsigned char tempE
= vec_mergeh(src2
, zero
);
1017 vector
unsigned char tempF
= vec_mergel(src2
, zero
);
1018 vector
unsigned char tempG
= vec_mergeh(src3
, zero
);
1019 vector
unsigned char tempH
= vec_mergel(src3
, zero
);
1020 vector
unsigned char tempI
= vec_mergeh(src4
, zero
);
1021 vector
unsigned char tempJ
= vec_mergel(src4
, zero
);
1022 vector
unsigned char tempK
= vec_mergeh(src5
, zero
);
1023 vector
unsigned char tempL
= vec_mergel(src5
, zero
);
1024 vector
unsigned char tempM
= vec_mergeh(src6
, zero
);
1025 vector
unsigned char tempN
= vec_mergel(src6
, zero
);
1026 vector
unsigned char tempO
= vec_mergeh(src7
, zero
);
1027 vector
unsigned char tempP
= vec_mergel(src7
, zero
);
1029 vector
unsigned char temp0
= vec_mergeh(tempA
, tempI
);
1030 vector
unsigned char temp1
= vec_mergel(tempA
, tempI
);
1031 vector
unsigned char temp2
= vec_mergeh(tempB
, tempJ
);
1032 vector
unsigned char temp3
= vec_mergel(tempB
, tempJ
);
1033 vector
unsigned char temp4
= vec_mergeh(tempC
, tempK
);
1034 vector
unsigned char temp5
= vec_mergel(tempC
, tempK
);
1035 vector
unsigned char temp6
= vec_mergeh(tempD
, tempL
);
1036 vector
unsigned char temp7
= vec_mergel(tempD
, tempL
);
1037 vector
unsigned char temp8
= vec_mergeh(tempE
, tempM
);
1038 vector
unsigned char temp9
= vec_mergel(tempE
, tempM
);
1039 vector
unsigned char temp10
= vec_mergeh(tempF
, tempN
);
1040 vector
unsigned char temp11
= vec_mergel(tempF
, tempN
);
1041 vector
unsigned char temp12
= vec_mergeh(tempG
, tempO
);
1042 vector
unsigned char temp13
= vec_mergel(tempG
, tempO
);
1043 vector
unsigned char temp14
= vec_mergeh(tempH
, tempP
);
1044 vector
unsigned char temp15
= vec_mergel(tempH
, tempP
);
1046 tempA
= vec_mergeh(temp0
, temp8
);
1047 tempB
= vec_mergel(temp0
, temp8
);
1048 tempC
= vec_mergeh(temp1
, temp9
);
1049 tempD
= vec_mergel(temp1
, temp9
);
1050 tempE
= vec_mergeh(temp2
, temp10
);
1051 tempF
= vec_mergel(temp2
, temp10
);
1052 tempG
= vec_mergeh(temp3
, temp11
);
1053 tempH
= vec_mergel(temp3
, temp11
);
1054 tempI
= vec_mergeh(temp4
, temp12
);
1055 tempJ
= vec_mergel(temp4
, temp12
);
1056 tempK
= vec_mergeh(temp5
, temp13
);
1057 tempL
= vec_mergel(temp5
, temp13
);
1058 tempM
= vec_mergeh(temp6
, temp14
);
1059 tempN
= vec_mergel(temp6
, temp14
);
1060 tempO
= vec_mergeh(temp7
, temp15
);
1061 tempP
= vec_mergel(temp7
, temp15
);
1063 temp0
= vec_mergeh(tempA
, tempI
);
1064 temp1
= vec_mergel(tempA
, tempI
);
1065 temp2
= vec_mergeh(tempB
, tempJ
);
1066 temp3
= vec_mergel(tempB
, tempJ
);
1067 temp4
= vec_mergeh(tempC
, tempK
);
1068 temp5
= vec_mergel(tempC
, tempK
);
1069 temp6
= vec_mergeh(tempD
, tempL
);
1070 temp7
= vec_mergel(tempD
, tempL
);
1071 temp8
= vec_mergeh(tempE
, tempM
);
1072 temp9
= vec_mergel(tempE
, tempM
);
1073 temp10
= vec_mergeh(tempF
, tempN
);
1074 temp11
= vec_mergel(tempF
, tempN
);
1075 temp12
= vec_mergeh(tempG
, tempO
);
1076 temp13
= vec_mergel(tempG
, tempO
);
1077 temp14
= vec_mergeh(tempH
, tempP
);
1078 temp15
= vec_mergel(tempH
, tempP
);
1080 vec_st(temp0
, 0, dst
);
1081 vec_st(temp1
, 16, dst
);
1082 vec_st(temp2
, 32, dst
);
1083 vec_st(temp3
, 48, dst
);
1084 vec_st(temp4
, 64, dst
);
1085 vec_st(temp5
, 80, dst
);
1086 vec_st(temp6
, 96, dst
);
1087 vec_st(temp7
, 112, dst
);
1088 vec_st(temp8
, 128, dst
);
1089 vec_st(temp9
, 144, dst
);
1090 vec_st(temp10
, 160, dst
);
1091 vec_st(temp11
, 176, dst
);
1092 vec_st(temp12
, 192, dst
);
1093 vec_st(temp13
, 208, dst
);
1094 vec_st(temp14
, 224, dst
);
1095 vec_st(temp15
, 240, dst
);
1098 static inline void transpose_8x16_char_fromPackedAlign_altivec(unsigned char* dst
, unsigned char* src
, int stride
) {
1099 const vector
unsigned char zero
= vec_splat_u8(0);
1101 #define LOAD_DOUBLE_LINE(i, j) \
1102 vector unsigned char src##i = vec_ld(i * 16, src); \
1103 vector unsigned char src##j = vec_ld(j * 16, src)
1105 LOAD_DOUBLE_LINE(0, 1);
1106 LOAD_DOUBLE_LINE(2, 3);
1107 LOAD_DOUBLE_LINE(4, 5);
1108 LOAD_DOUBLE_LINE(6, 7);
1109 LOAD_DOUBLE_LINE(8, 9);
1110 LOAD_DOUBLE_LINE(10, 11);
1111 LOAD_DOUBLE_LINE(12, 13);
1112 LOAD_DOUBLE_LINE(14, 15);
1113 #undef LOAD_DOUBLE_LINE
1115 vector
unsigned char tempA
= vec_mergeh(src0
, src8
);
1116 vector
unsigned char tempB
;
1117 vector
unsigned char tempC
= vec_mergeh(src1
, src9
);
1118 vector
unsigned char tempD
;
1119 vector
unsigned char tempE
= vec_mergeh(src2
, src10
);
1120 vector
unsigned char tempG
= vec_mergeh(src3
, src11
);
1121 vector
unsigned char tempI
= vec_mergeh(src4
, src12
);
1122 vector
unsigned char tempJ
;
1123 vector
unsigned char tempK
= vec_mergeh(src5
, src13
);
1124 vector
unsigned char tempL
;
1125 vector
unsigned char tempM
= vec_mergeh(src6
, src14
);
1126 vector
unsigned char tempO
= vec_mergeh(src7
, src15
);
1128 vector
unsigned char temp0
= vec_mergeh(tempA
, tempI
);
1129 vector
unsigned char temp1
= vec_mergel(tempA
, tempI
);
1130 vector
unsigned char temp2
;
1131 vector
unsigned char temp3
;
1132 vector
unsigned char temp4
= vec_mergeh(tempC
, tempK
);
1133 vector
unsigned char temp5
= vec_mergel(tempC
, tempK
);
1134 vector
unsigned char temp6
;
1135 vector
unsigned char temp7
;
1136 vector
unsigned char temp8
= vec_mergeh(tempE
, tempM
);
1137 vector
unsigned char temp9
= vec_mergel(tempE
, tempM
);
1138 vector
unsigned char temp12
= vec_mergeh(tempG
, tempO
);
1139 vector
unsigned char temp13
= vec_mergel(tempG
, tempO
);
1141 tempA
= vec_mergeh(temp0
, temp8
);
1142 tempB
= vec_mergel(temp0
, temp8
);
1143 tempC
= vec_mergeh(temp1
, temp9
);
1144 tempD
= vec_mergel(temp1
, temp9
);
1145 tempI
= vec_mergeh(temp4
, temp12
);
1146 tempJ
= vec_mergel(temp4
, temp12
);
1147 tempK
= vec_mergeh(temp5
, temp13
);
1148 tempL
= vec_mergel(temp5
, temp13
);
1150 temp0
= vec_mergeh(tempA
, tempI
);
1151 temp1
= vec_mergel(tempA
, tempI
);
1152 temp2
= vec_mergeh(tempB
, tempJ
);
1153 temp3
= vec_mergel(tempB
, tempJ
);
1154 temp4
= vec_mergeh(tempC
, tempK
);
1155 temp5
= vec_mergel(tempC
, tempK
);
1156 temp6
= vec_mergeh(tempD
, tempL
);
1157 temp7
= vec_mergel(tempD
, tempL
);
1160 const vector
signed char neg1
= vec_splat_s8(-1);
1161 #define STORE_DOUBLE_LINE(i, j) \
1162 vector unsigned char dstA##i = vec_ld(i * stride, dst); \
1163 vector unsigned char dstB##i = vec_ld(i * stride + 16, dst); \
1164 vector unsigned char dstA##j = vec_ld(j * stride, dst); \
1165 vector unsigned char dstB##j = vec_ld(j * stride+ 16, dst); \
1166 vector unsigned char align##i = vec_lvsr(i * stride, dst); \
1167 vector unsigned char align##j = vec_lvsr(j * stride, dst); \
1168 vector unsigned char mask##i = vec_perm(zero, (vector unsigned char)neg1, align##i); \
1169 vector unsigned char mask##j = vec_perm(zero, (vector unsigned char)neg1, align##j); \
1170 vector unsigned char dstR##i = vec_perm(temp##i, temp##i, align##i);\
1171 vector unsigned char dstR##j = vec_perm(temp##j, temp##j, align##j);\
1172 vector unsigned char dstAF##i = vec_sel(dstA##i, dstR##i, mask##i); \
1173 vector unsigned char dstBF##i = vec_sel(dstR##i, dstB##i, mask##i); \
1174 vector unsigned char dstAF##j = vec_sel(dstA##j, dstR##j, mask##j); \
1175 vector unsigned char dstBF##j = vec_sel(dstR##j, dstB##j, mask##j); \
1176 vec_st(dstAF##i, i * stride, dst); \
1177 vec_st(dstBF##i, i * stride + 16, dst); \
1178 vec_st(dstAF##j, j * stride, dst); \
1179 vec_st(dstBF##j, j * stride + 16, dst)
1181 STORE_DOUBLE_LINE(0,1);
1182 STORE_DOUBLE_LINE(2,3);
1183 STORE_DOUBLE_LINE(4,5);
1184 STORE_DOUBLE_LINE(6,7);