10l
[libav.git] / libavcodec / libpostproc / postprocess_altivec_template.c
CommitLineData
b0ac780a
MN
1/*
2 AltiVec optimizations (C) 2004 Romain Dolbeau <romain@dolbeau.org>
3
4 based on code by Copyright (C) 2001-2003 Michael Niedermayer (michaelni@gmx.at)
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19*/
20
21
22#ifdef CONFIG_DARWIN
23#define AVV(x...) (x)
24#else
25#define AVV(x...) {x}
26#endif
27
a7b2871c
RD
28#define ALTIVEC_TRANSPOSE_8x8_SHORT(src_a,src_b,src_c,src_d,src_e,src_f,src_g,src_h) \
29 do { \
30 __typeof__(src_a) tempA1, tempB1, tempC1, tempD1; \
31 __typeof__(src_a) tempE1, tempF1, tempG1, tempH1; \
32 __typeof__(src_a) tempA2, tempB2, tempC2, tempD2; \
33 __typeof__(src_a) tempE2, tempF2, tempG2, tempH2; \
34 tempA1 = vec_mergeh (src_a, src_e); \
35 tempB1 = vec_mergel (src_a, src_e); \
36 tempC1 = vec_mergeh (src_b, src_f); \
37 tempD1 = vec_mergel (src_b, src_f); \
38 tempE1 = vec_mergeh (src_c, src_g); \
39 tempF1 = vec_mergel (src_c, src_g); \
40 tempG1 = vec_mergeh (src_d, src_h); \
41 tempH1 = vec_mergel (src_d, src_h); \
42 tempA2 = vec_mergeh (tempA1, tempE1); \
43 tempB2 = vec_mergel (tempA1, tempE1); \
44 tempC2 = vec_mergeh (tempB1, tempF1); \
45 tempD2 = vec_mergel (tempB1, tempF1); \
46 tempE2 = vec_mergeh (tempC1, tempG1); \
47 tempF2 = vec_mergel (tempC1, tempG1); \
48 tempG2 = vec_mergeh (tempD1, tempH1); \
49 tempH2 = vec_mergel (tempD1, tempH1); \
50 src_a = vec_mergeh (tempA2, tempE2); \
51 src_b = vec_mergel (tempA2, tempE2); \
52 src_c = vec_mergeh (tempB2, tempF2); \
53 src_d = vec_mergel (tempB2, tempF2); \
54 src_e = vec_mergeh (tempC2, tempG2); \
55 src_f = vec_mergel (tempC2, tempG2); \
56 src_g = vec_mergeh (tempD2, tempH2); \
57 src_h = vec_mergel (tempD2, tempH2); \
58 } while (0)
59
60
b0ac780a
MN
61static inline int vertClassify_altivec(uint8_t src[], int stride, PPContext *c) {
62 /*
63 this code makes no assumption on src or stride.
64 One could remove the recomputation of the perm
65 vector by assuming (stride % 16) == 0, unfortunately
66 this is not always true.
67 */
68 register int y;
69 short __attribute__ ((aligned(16))) data[8];
70 int numEq;
71 uint8_t *src2 = src;
72 vector signed short v_dcOffset;
73 vector signed short v2QP;
74 vector unsigned short v4QP;
75 vector unsigned short v_dcThreshold;
76 int two_vectors = ((((unsigned long)src2 % 16) > 8) || (stride % 16)) ? 1 : 0;
77 const vector signed int zero = vec_splat_s32(0);
78 const vector signed short mask = vec_splat_s16(1);
79 vector signed int v_numEq = vec_splat_s32(0);
80
81 data[0] = ((c->nonBQP*c->ppMode.baseDcDiff)>>8) + 1;
82 data[1] = data[0] * 2 + 1;
83 data[2] = c->QP * 2;
84 data[3] = c->QP * 4;
85 vector signed short v_data = vec_ld(0, data);
86 v_dcOffset = vec_splat(v_data, 0);
87 v_dcThreshold = (vector unsigned short)vec_splat(v_data, 1);
88 v2QP = vec_splat(v_data, 2);
89 v4QP = (vector unsigned short)vec_splat(v_data, 3);
90
91 src2 += stride * 4;
92
93#define LOAD_LINE(i) \
94 register int j##i = i * stride; \
95 vector unsigned char perm##i = vec_lvsl(j##i, src2); \
96 const vector unsigned char v_srcA1##i = vec_ld(j##i, src2); \
97 vector unsigned char v_srcA2##i; \
98 if (two_vectors) \
99 v_srcA2##i = vec_ld(j##i + 16, src2); \
100 const vector unsigned char v_srcA##i = \
101 vec_perm(v_srcA1##i, v_srcA2##i, perm##i); \
102 vector signed short v_srcAss##i = \
103 (vector signed short)vec_mergeh((vector signed char)zero, \
104 (vector signed char)v_srcA##i)
105
106 LOAD_LINE(0);
107 LOAD_LINE(1);
108 LOAD_LINE(2);
109 LOAD_LINE(3);
110 LOAD_LINE(4);
111 LOAD_LINE(5);
112 LOAD_LINE(6);
113 LOAD_LINE(7);
114#undef LOAD_LINE
115
116#define ITER(i, j) \
117 const vector signed short v_diff##i = \
118 vec_sub(v_srcAss##i, v_srcAss##j); \
119 const vector signed short v_sum##i = \
120 vec_add(v_diff##i, v_dcOffset); \
121 const vector signed short v_comp##i = \
122 (vector signed short)vec_cmplt((vector unsigned short)v_sum##i, \
123 v_dcThreshold); \
124 const vector signed short v_part##i = vec_and(mask, v_comp##i); \
125 v_numEq = vec_sum4s(v_part##i, v_numEq);
126
127 ITER(0, 1);
128 ITER(1, 2);
129 ITER(2, 3);
130 ITER(3, 4);
131 ITER(4, 5);
132 ITER(5, 6);
133 ITER(6, 7);
134#undef ITER
135
136 v_numEq = vec_sums(v_numEq, zero);
137
138 v_numEq = vec_splat(v_numEq, 3);
139 vec_ste(v_numEq, 0, &numEq);
140
141 if (numEq > c->ppMode.flatnessThreshold)
142 {
143 const vector unsigned char mmoP1 = (const vector unsigned char)
144 AVV(0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f,
145 0x00, 0x01, 0x12, 0x13, 0x08, 0x09, 0x1A, 0x1B);
146 const vector unsigned char mmoP2 = (const vector unsigned char)
147 AVV(0x04, 0x05, 0x16, 0x17, 0x0C, 0x0D, 0x1E, 0x1F,
148 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
149 const vector unsigned char mmoP = (const vector unsigned char)
150 vec_lvsl(8, (unsigned char*)0);
151
152 vector signed short mmoL1 = vec_perm(v_srcAss0, v_srcAss2, mmoP1);
153 vector signed short mmoL2 = vec_perm(v_srcAss4, v_srcAss6, mmoP2);
154 vector signed short mmoL = vec_perm(mmoL1, mmoL2, mmoP);
155 vector signed short mmoR1 = vec_perm(v_srcAss5, v_srcAss7, mmoP1);
156 vector signed short mmoR2 = vec_perm(v_srcAss1, v_srcAss3, mmoP2);
157 vector signed short mmoR = vec_perm(mmoR1, mmoR2, mmoP);
158 vector signed short mmoDiff = vec_sub(mmoL, mmoR);
159 vector unsigned short mmoSum = (vector unsigned short)vec_add(mmoDiff, v2QP);
160
161 if (vec_any_gt(mmoSum, v4QP))
162 return 0;
163 else
164 return 1;
165 }
166 else return 2;
167}
168
a7b2871c
RD
169/* this is the same as vertClassify_altivec,
170 with an added 8x8 transpose after the loading,
171 and w/o the stride*4 offset */
172static inline int horizClassify_altivec(uint8_t src[], int stride, PPContext *c) {
173 /*
174 this code makes no assumption on src or stride.
175 One could remove the recomputation of the perm
176 vector by assuming (stride % 16) == 0, unfortunately
177 this is not always true.
178 */
179 register int y;
180 short __attribute__ ((aligned(16))) data[8];
181 int numEq;
182 uint8_t *src2 = src;
183 vector signed short v_dcOffset;
184 vector signed short v2QP;
185 vector unsigned short v4QP;
186 vector unsigned short v_dcThreshold;
187 int two_vectors = ((((unsigned long)src2 % 16) > 8) || (stride % 16)) ? 1 : 0;
188 const vector signed int zero = vec_splat_s32(0);
189 const vector signed short mask = vec_splat_s16(1);
190 vector signed int v_numEq = vec_splat_s32(0);
191
192 data[0] = ((c->nonBQP*c->ppMode.baseDcDiff)>>8) + 1;
193 data[1] = data[0] * 2 + 1;
194 data[2] = c->QP * 2;
195 data[3] = c->QP * 4;
196 vector signed short v_data = vec_ld(0, data);
197 v_dcOffset = vec_splat(v_data, 0);
198 v_dcThreshold = (vector unsigned short)vec_splat(v_data, 1);
199 v2QP = vec_splat(v_data, 2);
200 v4QP = (vector unsigned short)vec_splat(v_data, 3);
201
202 // src2 += stride * 4;
203
204#define LOAD_LINE(i) \
205 register int j##i = i * stride; \
206 vector unsigned char perm##i = vec_lvsl(j##i, src2); \
207 const vector unsigned char v_srcA1##i = vec_ld(j##i, src2); \
208 vector unsigned char v_srcA2##i; \
209 if (two_vectors) \
210 v_srcA2##i = vec_ld(j##i + 16, src2); \
211 const vector unsigned char v_srcA##i = \
212 vec_perm(v_srcA1##i, v_srcA2##i, perm##i); \
213 vector signed short v_srcAss##i = \
214 (vector signed short)vec_mergeh((vector signed char)zero, \
215 (vector signed char)v_srcA##i)
216
217 LOAD_LINE(0);
218 LOAD_LINE(1);
219 LOAD_LINE(2);
220 LOAD_LINE(3);
221 LOAD_LINE(4);
222 LOAD_LINE(5);
223 LOAD_LINE(6);
224 LOAD_LINE(7);
225#undef LOAD_LINE
226
227 ALTIVEC_TRANSPOSE_8x8_SHORT(v_srcAss0,
228 v_srcAss1,
229 v_srcAss2,
230 v_srcAss3,
231 v_srcAss4,
232 v_srcAss5,
233 v_srcAss6,
234 v_srcAss7);
235
236#define ITER(i, j) \
237 const vector signed short v_diff##i = \
238 vec_sub(v_srcAss##i, v_srcAss##j); \
239 const vector signed short v_sum##i = \
240 vec_add(v_diff##i, v_dcOffset); \
241 const vector signed short v_comp##i = \
242 (vector signed short)vec_cmplt((vector unsigned short)v_sum##i, \
243 v_dcThreshold); \
244 const vector signed short v_part##i = vec_and(mask, v_comp##i); \
245 v_numEq = vec_sum4s(v_part##i, v_numEq);
246
247 ITER(0, 1);
248 ITER(1, 2);
249 ITER(2, 3);
250 ITER(3, 4);
251 ITER(4, 5);
252 ITER(5, 6);
253 ITER(6, 7);
254#undef ITER
255
256 v_numEq = vec_sums(v_numEq, zero);
257
258 v_numEq = vec_splat(v_numEq, 3);
259 vec_ste(v_numEq, 0, &numEq);
260
261 if (numEq > c->ppMode.flatnessThreshold)
262 {
263 const vector unsigned char mmoP1 = (const vector unsigned char)
264 AVV(0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f,
265 0x00, 0x01, 0x12, 0x13, 0x08, 0x09, 0x1A, 0x1B);
266 const vector unsigned char mmoP2 = (const vector unsigned char)
267 AVV(0x04, 0x05, 0x16, 0x17, 0x0C, 0x0D, 0x1E, 0x1F,
268 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
269 const vector unsigned char mmoP = (const vector unsigned char)
270 vec_lvsl(8, (unsigned char*)0);
271
272 vector signed short mmoL1 = vec_perm(v_srcAss0, v_srcAss2, mmoP1);
273 vector signed short mmoL2 = vec_perm(v_srcAss4, v_srcAss6, mmoP2);
274 vector signed short mmoL = vec_perm(mmoL1, mmoL2, mmoP);
275 vector signed short mmoR1 = vec_perm(v_srcAss5, v_srcAss7, mmoP1);
276 vector signed short mmoR2 = vec_perm(v_srcAss1, v_srcAss3, mmoP2);
277 vector signed short mmoR = vec_perm(mmoR1, mmoR2, mmoP);
278 vector signed short mmoDiff = vec_sub(mmoL, mmoR);
279 vector unsigned short mmoSum = (vector unsigned short)vec_add(mmoDiff, v2QP);
280
281 if (vec_any_gt(mmoSum, v4QP))
282 return 0;
283 else
284 return 1;
285 }
286 else return 2;
287}
288
b0ac780a
MN
289
290static inline void doVertLowPass_altivec(uint8_t *src, int stride, PPContext *c) {
291 /*
292 this code makes no assumption on src or stride.
293 One could remove the recomputation of the perm
294 vector by assuming (stride % 16) == 0, unfortunately
295 this is not always true. Quite a lot of load/stores
296 can be removed by assuming proper alignement of
297 src & stride :-(
298 */
299 uint8_t *src2 = src;
300 const vector signed int zero = vec_splat_s32(0);
301 short __attribute__ ((aligned(16))) qp[8];
302 qp[0] = c->QP;
303 vector signed short vqp = vec_ld(0, qp);
304 vqp = vec_splat(vqp, 0);
305
306#define LOAD_LINE(i) \
307 const vector unsigned char perml##i = \
308 vec_lvsl(i * stride, src2); \
309 const vector unsigned char vbA##i = \
310 vec_ld(i * stride, src2); \
311 const vector unsigned char vbB##i = \
312 vec_ld(i * stride + 16, src2); \
313 const vector unsigned char vbT##i = \
314 vec_perm(vbA##i, vbB##i, perml##i); \
315 const vector signed short vb##i = \
316 (vector signed short)vec_mergeh((vector unsigned char)zero, \
317 (vector unsigned char)vbT##i)
318
319 src2 += stride*3;
320
321 LOAD_LINE(0);
322 LOAD_LINE(1);
323 LOAD_LINE(2);
324 LOAD_LINE(3);
325 LOAD_LINE(4);
326 LOAD_LINE(5);
327 LOAD_LINE(6);
328 LOAD_LINE(7);
329 LOAD_LINE(8);
330 LOAD_LINE(9);
331#undef LOAD_LINE
332
333 const vector unsigned short v_1 = vec_splat_u16(1);
334 const vector unsigned short v_2 = vec_splat_u16(2);
335 const vector unsigned short v_4 = vec_splat_u16(4);
a7b2871c
RD
336
337 const vector signed short v_diff01 = vec_sub(vb0, vb1);
338 const vector unsigned short v_cmp01 =
339 (const vector unsigned short) vec_cmplt(vec_abs(v_diff01), vqp);
340 const vector signed short v_first = vec_sel(vb1, vb0, v_cmp01);
341 const vector signed short v_diff89 = vec_sub(vb8, vb9);
342 const vector unsigned short v_cmp89 =
343 (const vector unsigned short) vec_cmplt(vec_abs(v_diff89), vqp);
344 const vector signed short v_last = vec_sel(vb8, vb9, v_cmp89);
345
346 const vector signed short temp01 = vec_mladd(v_first, (vector signed short)v_4, vb1);
347 const vector signed short temp02 = vec_add(vb2, vb3);
348 const vector signed short temp03 = vec_add(temp01, (vector signed short)v_4);
349 const vector signed short v_sumsB0 = vec_add(temp02, temp03);
350
351 const vector signed short temp11 = vec_sub(v_sumsB0, v_first);
352 const vector signed short v_sumsB1 = vec_add(temp11, vb4);
353
354 const vector signed short temp21 = vec_sub(v_sumsB1, v_first);
355 const vector signed short v_sumsB2 = vec_add(temp21, vb5);
356
357 const vector signed short temp31 = vec_sub(v_sumsB2, v_first);
358 const vector signed short v_sumsB3 = vec_add(temp31, vb6);
359
360 const vector signed short temp41 = vec_sub(v_sumsB3, v_first);
361 const vector signed short v_sumsB4 = vec_add(temp41, vb7);
362
363 const vector signed short temp51 = vec_sub(v_sumsB4, vb1);
364 const vector signed short v_sumsB5 = vec_add(temp51, vb8);
365
366 const vector signed short temp61 = vec_sub(v_sumsB5, vb2);
367 const vector signed short v_sumsB6 = vec_add(temp61, v_last);
368
369 const vector signed short temp71 = vec_sub(v_sumsB6, vb3);
370 const vector signed short v_sumsB7 = vec_add(temp71, v_last);
371
372 const vector signed short temp81 = vec_sub(v_sumsB7, vb4);
373 const vector signed short v_sumsB8 = vec_add(temp81, v_last);
374
375 const vector signed short temp91 = vec_sub(v_sumsB8, vb5);
376 const vector signed short v_sumsB9 = vec_add(temp91, v_last);
377
378#define COMPUTE_VR(i, j, k) \
379 const vector signed short temps1##i = \
380 vec_add(v_sumsB##i, v_sumsB##k); \
381 const vector signed short temps2##i = \
382 vec_mladd(vb##j, (vector signed short)v_2, temps1##i); \
383 const vector signed short vr##j = vec_sra(temps2##i, v_4)
384
385 COMPUTE_VR(0, 1, 2);
386 COMPUTE_VR(1, 2, 3);
387 COMPUTE_VR(2, 3, 4);
388 COMPUTE_VR(3, 4, 5);
389 COMPUTE_VR(4, 5, 6);
390 COMPUTE_VR(5, 6, 7);
391 COMPUTE_VR(6, 7, 8);
392 COMPUTE_VR(7, 8, 9);
393
394 const vector signed char neg1 = vec_splat_s8(-1);
395 const vector unsigned char permHH = (const vector unsigned char)AVV(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
396 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F);
b0ac780a
MN
397
398#define PACK_AND_STORE(i) \
399 const vector unsigned char perms##i = \
400 vec_lvsr(i * stride, src2); \
401 const vector unsigned char vf##i = \
402 vec_packsu(vr##i, (vector signed short)zero); \
403 const vector unsigned char vg##i = \
404 vec_perm(vf##i, vbT##i, permHH); \
405 const vector unsigned char mask##i = \
a7b2871c 406 vec_perm((vector unsigned char)zero, (vector unsigned char)neg1, perms##i); \
b0ac780a
MN
407 const vector unsigned char vg2##i = \
408 vec_perm(vg##i, vg##i, perms##i); \
409 const vector unsigned char svA##i = \
410 vec_sel(vbA##i, vg2##i, mask##i); \
411 const vector unsigned char svB##i = \
412 vec_sel(vg2##i, vbB##i, mask##i); \
413 vec_st(svA##i, i * stride, src2); \
414 vec_st(svB##i, i * stride + 16, src2)
415
416 PACK_AND_STORE(1);
417 PACK_AND_STORE(2);
418 PACK_AND_STORE(3);
419 PACK_AND_STORE(4);
420 PACK_AND_STORE(5);
421 PACK_AND_STORE(6);
422 PACK_AND_STORE(7);
423 PACK_AND_STORE(8);
424
425#undef PACK_AND_STORE
426}
427
428
429
430static inline void doVertDefFilter_altivec(uint8_t src[], int stride, PPContext *c) {
431 /*
432 this code makes no assumption on src or stride.
433 One could remove the recomputation of the perm
434 vector by assuming (stride % 16) == 0, unfortunately
435 this is not always true. Quite a lot of load/stores
436 can be removed by assuming proper alignement of
437 src & stride :-(
438 */
439 uint8_t *src2 = src;
440 const vector signed int zero = vec_splat_s32(0);
441 short __attribute__ ((aligned(16))) qp[8];
442 qp[0] = 8*c->QP;
443 vector signed short vqp = vec_ld(0, qp);
444 vqp = vec_splat(vqp, 0);
445
446#define LOAD_LINE(i) \
447 const vector unsigned char perm##i = \
448 vec_lvsl(i * stride, src2); \
449 const vector unsigned char vbA##i = \
450 vec_ld(i * stride, src2); \
451 const vector unsigned char vbB##i = \
452 vec_ld(i * stride + 16, src2); \
453 const vector unsigned char vbT##i = \
454 vec_perm(vbA##i, vbB##i, perm##i); \
455 const vector signed short vb##i = \
456 (vector signed short)vec_mergeh((vector unsigned char)zero, \
457 (vector unsigned char)vbT##i)
458
459 src2 += stride*3;
460
461 LOAD_LINE(1);
462 LOAD_LINE(2);
463 LOAD_LINE(3);
464 LOAD_LINE(4);
465 LOAD_LINE(5);
466 LOAD_LINE(6);
467 LOAD_LINE(7);
468 LOAD_LINE(8);
469#undef LOAD_LINE
470
471 const vector signed short v_1 = vec_splat_s16(1);
472 const vector signed short v_2 = vec_splat_s16(2);
473 const vector signed short v_5 = vec_splat_s16(5);
474 const vector signed short v_32 = vec_sl(v_1,
475 (vector unsigned short)v_5);
476 /* middle energy */
477 const vector signed short l3minusl6 = vec_sub(vb3, vb6);
478 const vector signed short l5minusl4 = vec_sub(vb5, vb4);
479 const vector signed short twotimes_l3minusl6 = vec_mladd(v_2, l3minusl6, (vector signed short)zero);
480 const vector signed short mE = vec_mladd(v_5, l5minusl4, twotimes_l3minusl6);
481 const vector signed short absmE = vec_abs(mE);
482 /* left & right energy */
483 const vector signed short l1minusl4 = vec_sub(vb1, vb4);
484 const vector signed short l3minusl2 = vec_sub(vb3, vb2);
485 const vector signed short l5minusl8 = vec_sub(vb5, vb8);
486 const vector signed short l7minusl6 = vec_sub(vb7, vb6);
487 const vector signed short twotimes_l1minusl4 = vec_mladd(v_2, l1minusl4, (vector signed short)zero);
488 const vector signed short twotimes_l5minusl8 = vec_mladd(v_2, l5minusl8, (vector signed short)zero);
489 const vector signed short lE = vec_mladd(v_5, l3minusl2, twotimes_l1minusl4);
490 const vector signed short rE = vec_mladd(v_5, l7minusl6, twotimes_l5minusl8);
491 /* d */
492 const vector signed short ddiff = vec_sub(absmE,
493 vec_min(vec_abs(lE),
494 vec_abs(rE)));
495 const vector signed short ddiffclamp = vec_max(ddiff, (vector signed short)zero);
496 const vector signed short dtimes64 = vec_mladd(v_5, ddiffclamp, v_32);
497 const vector signed short d = vec_sra(dtimes64, vec_splat_u16(6));
498 const vector signed short minusd = vec_sub((vector signed short)zero, d);
499 const vector signed short finald = vec_sel(minusd,
500 d,
501 vec_cmpgt(vec_sub((vector signed short)zero, mE),
502 (vector signed short)zero));
503 /* q */
504 const vector signed short qtimes2 = vec_sub(vb4, vb5);
505 /* for a shift right to behave like /2, we need to add one
506 to all negative integer */
507 const vector signed short rounddown = vec_sel((vector signed short)zero,
508 v_1,
509 vec_cmplt(qtimes2, (vector signed short)zero));
510 const vector signed short q = vec_sra(vec_add(qtimes2, rounddown), vec_splat_u16(1));
511 /* clamp */
512 const vector signed short dclamp_P1 = vec_max((vector signed short)zero, finald);
513 const vector signed short dclamp_P = vec_min(dclamp_P1, q);
514 const vector signed short dclamp_N1 = vec_min((vector signed short)zero, finald);
515 const vector signed short dclamp_N = vec_max(dclamp_N1, q);
516
517 const vector signed short dclampedfinal = vec_sel(dclamp_N,
518 dclamp_P,
519 vec_cmpgt(q, (vector signed short)zero));
520 const vector signed short dornotd = vec_sel((vector signed short)zero,
521 dclampedfinal,
522 vec_cmplt(absmE, vqp));
523 /* add/substract to l4 and l5 */
524 const vector signed short vb4minusd = vec_sub(vb4, dornotd);
525 const vector signed short vb5plusd = vec_add(vb5, dornotd);
526 /* finally, stores */
527 const vector unsigned char st4 = vec_packsu(vb4minusd, (vector signed short)zero);
528 const vector unsigned char st5 = vec_packsu(vb5plusd, (vector signed short)zero);
a7b2871c
RD
529
530 const vector signed char neg1 = vec_splat_s8(-1);
531 const vector unsigned char permHH = (const vector unsigned char)AVV(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
532 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F);
b0ac780a
MN
533
534#define STORE(i) \
535 const vector unsigned char perms##i = \
536 vec_lvsr(i * stride, src2); \
537 const vector unsigned char vg##i = \
538 vec_perm(st##i, vbT##i, permHH); \
539 const vector unsigned char mask##i = \
a7b2871c 540 vec_perm((vector unsigned char)zero, (vector unsigned char)neg1, perms##i); \
b0ac780a
MN
541 const vector unsigned char vg2##i = \
542 vec_perm(vg##i, vg##i, perms##i); \
543 const vector unsigned char svA##i = \
544 vec_sel(vbA##i, vg2##i, mask##i); \
545 const vector unsigned char svB##i = \
546 vec_sel(vg2##i, vbB##i, mask##i); \
547 vec_st(svA##i, i * stride, src2); \
548 vec_st(svB##i, i * stride + 16, src2)
549
550 STORE(4);
551 STORE(5);
552}
553
554static inline void dering_altivec(uint8_t src[], int stride, PPContext *c) {
555 /*
556 this code makes no assumption on src or stride.
557 One could remove the recomputation of the perm
558 vector by assuming (stride % 16) == 0, unfortunately
559 this is not always true. Quite a lot of load/stores
560 can be removed by assuming proper alignement of
561 src & stride :-(
562 */
563 uint8_t *srcCopy = src;
564 uint8_t __attribute__((aligned(16))) dt[16];
565 const vector unsigned char vuint8_1 = vec_splat_u8(1);
566 const vector signed int zero = vec_splat_s32(0);
567 vector unsigned char v_dt;
568 dt[0] = deringThreshold;
569 v_dt = vec_splat(vec_ld(0, dt), 0);
570
571#define LOAD_LINE(i) \
572 const vector unsigned char perm##i = \
573 vec_lvsl(i * stride, srcCopy); \
574 vector unsigned char sA##i = vec_ld(i * stride, srcCopy); \
575 vector unsigned char sB##i = vec_ld(i * stride + 16, srcCopy); \
576 vector unsigned char src##i = vec_perm(sA##i, sB##i, perm##i)
577
578 LOAD_LINE(0);
579 LOAD_LINE(1);
580 LOAD_LINE(2);
581 LOAD_LINE(3);
582 LOAD_LINE(4);
583 LOAD_LINE(5);
584 LOAD_LINE(6);
585 LOAD_LINE(7);
586 LOAD_LINE(8);
587 LOAD_LINE(9);
588#undef LOAD_LINE
589
590 vector unsigned char v_avg;
591 {
592 const vector unsigned char trunc_perm = (vector unsigned char)
593 AVV(0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
594 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18);
595 const vector unsigned char trunc_src12 = vec_perm(src1, src2, trunc_perm);
596 const vector unsigned char trunc_src34 = vec_perm(src3, src4, trunc_perm);
597 const vector unsigned char trunc_src56 = vec_perm(src5, src6, trunc_perm);
598 const vector unsigned char trunc_src78 = vec_perm(src7, src8, trunc_perm);
599
600#define EXTRACT(op) do { \
601 const vector unsigned char s##op##_1 = vec_##op(trunc_src12, trunc_src34); \
602 const vector unsigned char s##op##_2 = vec_##op(trunc_src56, trunc_src78); \
603 const vector unsigned char s##op##_6 = vec_##op(s##op##_1, s##op##_2); \
604 const vector unsigned char s##op##_8h = vec_mergeh(s##op##_6, s##op##_6); \
605 const vector unsigned char s##op##_8l = vec_mergel(s##op##_6, s##op##_6); \
606 const vector unsigned char s##op##_9 = vec_##op(s##op##_8h, s##op##_8l); \
607 const vector unsigned char s##op##_9h = vec_mergeh(s##op##_9, s##op##_9); \
608 const vector unsigned char s##op##_9l = vec_mergel(s##op##_9, s##op##_9); \
609 const vector unsigned char s##op##_10 = vec_##op(s##op##_9h, s##op##_9l); \
610 const vector unsigned char s##op##_10h = vec_mergeh(s##op##_10, s##op##_10); \
611 const vector unsigned char s##op##_10l = vec_mergel(s##op##_10, s##op##_10); \
612 const vector unsigned char s##op##_11 = vec_##op(s##op##_10h, s##op##_10l); \
613 const vector unsigned char s##op##_11h = vec_mergeh(s##op##_11, s##op##_11); \
614 const vector unsigned char s##op##_11l = vec_mergel(s##op##_11, s##op##_11); \
615 v_##op = vec_##op(s##op##_11h, s##op##_11l); } while (0)
616
617 vector unsigned char v_min;
618 vector unsigned char v_max;
619 EXTRACT(min);
620 EXTRACT(max);
621#undef EXTRACT
622
623 if (vec_all_lt(vec_sub(v_max, v_min), v_dt))
624 return;
625
626 v_avg = vec_avg(v_min, v_max);
627 }
628
629 signed int __attribute__((aligned(16))) S[8];
630 {
631 const vector unsigned short mask1 = (vector unsigned short)
632 AVV(0x0001, 0x0002, 0x0004, 0x0008,
633 0x0010, 0x0020, 0x0040, 0x0080);
634 const vector unsigned short mask2 = (vector unsigned short)
635 AVV(0x0100, 0x0200, 0x0000, 0x0000,
636 0x0000, 0x0000, 0x0000, 0x0000);
637
638 const vector unsigned int vuint32_16 = vec_sl(vec_splat_u32(1), vec_splat_u32(4));
639 const vector unsigned int vuint32_1 = vec_splat_u32(1);
640
641#define COMPARE(i) \
642 vector signed int sum##i; \
643 do { \
644 const vector unsigned char cmp##i = \
645 (vector unsigned char)vec_cmpgt(src##i, v_avg); \
646 const vector unsigned short cmpHi##i = \
647 (vector unsigned short)vec_mergeh(cmp##i, cmp##i); \
648 const vector unsigned short cmpLi##i = \
649 (vector unsigned short)vec_mergel(cmp##i, cmp##i); \
650 const vector signed short cmpHf##i = \
651 (vector signed short)vec_and(cmpHi##i, mask1); \
652 const vector signed short cmpLf##i = \
653 (vector signed short)vec_and(cmpLi##i, mask2); \
654 const vector signed int sump##i = vec_sum4s(cmpHf##i, zero); \
655 const vector signed int sumq##i = vec_sum4s(cmpLf##i, sump##i); \
656 sum##i = vec_sums(sumq##i, zero); } while (0)
657
658 COMPARE(0);
659 COMPARE(1);
660 COMPARE(2);
661 COMPARE(3);
662 COMPARE(4);
663 COMPARE(5);
664 COMPARE(6);
665 COMPARE(7);
666 COMPARE(8);
667 COMPARE(9);
668#undef COMPARE
669
670 vector signed int sumA2;
671 vector signed int sumB2;
672 {
673 const vector signed int sump02 = vec_mergel(sum0, sum2);
674 const vector signed int sump13 = vec_mergel(sum1, sum3);
675 const vector signed int sumA = vec_mergel(sump02, sump13);
676
677 const vector signed int sump46 = vec_mergel(sum4, sum6);
678 const vector signed int sump57 = vec_mergel(sum5, sum7);
679 const vector signed int sumB = vec_mergel(sump46, sump57);
680
681 const vector signed int sump8A = vec_mergel(sum8, zero);
682 const vector signed int sump9B = vec_mergel(sum9, zero);
683 const vector signed int sumC = vec_mergel(sump8A, sump9B);
684
685 const vector signed int tA = vec_sl(vec_nor(zero, sumA), vuint32_16);
686 const vector signed int tB = vec_sl(vec_nor(zero, sumB), vuint32_16);
687 const vector signed int tC = vec_sl(vec_nor(zero, sumC), vuint32_16);
688 const vector signed int t2A = vec_or(sumA, tA);
689 const vector signed int t2B = vec_or(sumB, tB);
690 const vector signed int t2C = vec_or(sumC, tC);
691 const vector signed int t3A = vec_and(vec_sra(t2A, vuint32_1),
692 vec_sl(t2A, vuint32_1));
693 const vector signed int t3B = vec_and(vec_sra(t2B, vuint32_1),
694 vec_sl(t2B, vuint32_1));
695 const vector signed int t3C = vec_and(vec_sra(t2C, vuint32_1),
696 vec_sl(t2C, vuint32_1));
697 const vector signed int yA = vec_and(t2A, t3A);
698 const vector signed int yB = vec_and(t2B, t3B);
699 const vector signed int yC = vec_and(t2C, t3C);
700
701 const vector unsigned char strangeperm1 = vec_lvsl(4, (unsigned char*)0);
702 const vector unsigned char strangeperm2 = vec_lvsl(8, (unsigned char*)0);
703 const vector signed int sumAd4 = vec_perm(yA, yB, strangeperm1);
704 const vector signed int sumAd8 = vec_perm(yA, yB, strangeperm2);
705 const vector signed int sumBd4 = vec_perm(yB, yC, strangeperm1);
706 const vector signed int sumBd8 = vec_perm(yB, yC, strangeperm2);
707 const vector signed int sumAp = vec_and(yA,
708 vec_and(sumAd4,sumAd8));
709 const vector signed int sumBp = vec_and(yB,
710 vec_and(sumBd4,sumBd8));
711 sumA2 = vec_or(sumAp,
712 vec_sra(sumAp,
713 vuint32_16));
714 sumB2 = vec_or(sumBp,
715 vec_sra(sumBp,
716 vuint32_16));
717 }
718 vec_st(sumA2, 0, S);
719 vec_st(sumB2, 16, S);
720 }
721
722 /* I'm not sure the following is actually faster
723 than straight, unvectorized C code :-( */
724
725 int __attribute__((aligned(16))) tQP2[4];
726 tQP2[0]= c->QP/2 + 1;
727 vector signed int vQP2 = vec_ld(0, tQP2);
728 vQP2 = vec_splat(vQP2, 0);
729 const vector unsigned char vuint8_2 = vec_splat_u8(2);
730 const vector signed int vsint32_8 = vec_splat_s32(8);
731 const vector unsigned int vuint32_4 = vec_splat_u32(4);
732
733 const vector unsigned char permA1 = (vector unsigned char)
734 AVV(0x00, 0x01, 0x02, 0x10, 0x11, 0x12, 0x1F, 0x1F,
735 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F);
736 const vector unsigned char permA2 = (vector unsigned char)
737 AVV(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x10, 0x11,
738 0x12, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F);
739 const vector unsigned char permA1inc = (vector unsigned char)
740 AVV(0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00,
741 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
742 const vector unsigned char permA2inc = (vector unsigned char)
743 AVV(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01,
744 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
745 const vector unsigned char magic = (vector unsigned char)
746 AVV(0x01, 0x02, 0x01, 0x02, 0x04, 0x02, 0x01, 0x02,
747 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
748 const vector unsigned char extractPerm = (vector unsigned char)
749 AVV(0x10, 0x10, 0x10, 0x01, 0x10, 0x10, 0x10, 0x01,
750 0x10, 0x10, 0x10, 0x01, 0x10, 0x10, 0x10, 0x01);
751 const vector unsigned char extractPermInc = (vector unsigned char)
752 AVV(0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01,
753 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01);
754 const vector unsigned char identity = vec_lvsl(0,(unsigned char *)0);
755 const vector unsigned char tenRight = (vector unsigned char)
756 AVV(0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
757 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
758 const vector unsigned char eightLeft = (vector unsigned char)
759 AVV(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
760 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08);
761
762
763#define F_INIT(i) \
764 vector unsigned char tenRightM##i = tenRight; \
765 vector unsigned char permA1M##i = permA1; \
766 vector unsigned char permA2M##i = permA2; \
767 vector unsigned char extractPermM##i = extractPerm
768
769#define F2(i, j, k, l) \
770 if (S[i] & (1 << (l+1))) { \
771 const vector unsigned char a_##j##_A##l = \
772 vec_perm(src##i, src##j, permA1M##i); \
773 const vector unsigned char a_##j##_B##l = \
774 vec_perm(a_##j##_A##l, src##k, permA2M##i); \
775 const vector signed int a_##j##_sump##l = \
776 (vector signed int)vec_msum(a_##j##_B##l, magic, \
777 (vector unsigned int)zero); \
778 vector signed int F_##j##_##l = \
779 vec_sr(vec_sums(a_##j##_sump##l, vsint32_8), vuint32_4); \
780 F_##j##_##l = vec_splat(F_##j##_##l, 3); \
781 const vector signed int p_##j##_##l = \
782 (vector signed int)vec_perm(src##j, \
783 (vector unsigned char)zero, \
784 extractPermM##i); \
785 const vector signed int sum_##j##_##l = vec_add( p_##j##_##l, vQP2); \
786 const vector signed int diff_##j##_##l = vec_sub( p_##j##_##l, vQP2); \
787 vector signed int newpm_##j##_##l; \
788 if (vec_all_lt(sum_##j##_##l, F_##j##_##l)) \
789 newpm_##j##_##l = sum_##j##_##l; \
790 else if (vec_all_gt(diff_##j##_##l, F_##j##_##l)) \
791 newpm_##j##_##l = diff_##j##_##l; \
792 else newpm_##j##_##l = F_##j##_##l; \
793 const vector unsigned char newpm2_##j##_##l = \
794 vec_splat((vector unsigned char)newpm_##j##_##l, 15); \
795 const vector unsigned char mask##j##l = vec_add(identity, \
796 tenRightM##i); \
797 src##j = vec_perm(src##j, newpm2_##j##_##l, mask##j##l); \
798 } \
799 permA1M##i = vec_add(permA1M##i, permA1inc); \
800 permA2M##i = vec_add(permA2M##i, permA2inc); \
801 tenRightM##i = vec_sro(tenRightM##i, eightLeft); \
802 extractPermM##i = vec_add(extractPermM##i, extractPermInc)
803
804#define ITER(i, j, k) \
805 F_INIT(i); \
806 F2(i, j, k, 0); \
807 F2(i, j, k, 1); \
808 F2(i, j, k, 2); \
809 F2(i, j, k, 3); \
810 F2(i, j, k, 4); \
811 F2(i, j, k, 5); \
812 F2(i, j, k, 6); \
813 F2(i, j, k, 7)
814
815 ITER(0, 1, 2);
816 ITER(1, 2, 3);
817 ITER(2, 3, 4);
818 ITER(3, 4, 5);
819 ITER(4, 5, 6);
820 ITER(5, 6, 7);
821 ITER(6, 7, 8);
822 ITER(7, 8, 9);
823
a7b2871c 824 const vector signed char neg1 = vec_splat_s8(-1);
b0ac780a
MN
825
826#define STORE_LINE(i) \
827 const vector unsigned char permST##i = \
828 vec_lvsr(i * stride, srcCopy); \
829 const vector unsigned char maskST##i = \
830 vec_perm((vector unsigned char)zero, \
831 (vector unsigned char)neg1, permST##i); \
832 src##i = vec_perm(src##i ,src##i, permST##i); \
833 sA##i= vec_sel(sA##i, src##i, maskST##i); \
834 sB##i= vec_sel(src##i, sB##i, maskST##i); \
835 vec_st(sA##i, i * stride, srcCopy); \
836 vec_st(sB##i, i * stride + 16, srcCopy)
837
838 STORE_LINE(1);
839 STORE_LINE(2);
840 STORE_LINE(3);
841 STORE_LINE(4);
842 STORE_LINE(5);
843 STORE_LINE(6);
844 STORE_LINE(7);
845 STORE_LINE(8);
846
847#undef STORE_LINE
848#undef ITER
849#undef F2
850}
851
b0ac780a
MN
852#define doHorizLowPass_altivec(a...) doHorizLowPass_C(a)
853#define doHorizDefFilter_altivec(a...) doHorizDefFilter_C(a)
a7b2871c
RD
854#define do_a_deblock_altivec(a...) do_a_deblock_C(a)
855
856static inline void RENAME(tempNoiseReducer)(uint8_t *src, int stride,
857 uint8_t *tempBlured, uint32_t *tempBluredPast, int *maxNoise)
858{
859 const vector signed int zero = vec_splat_s32(0);
860 const vector signed short vsint16_1 = vec_splat_s16(1);
861 vector signed int v_dp = zero;
862 vector signed int v_sysdp = zero;
863 int d, sysd, i;
864
865 tempBluredPast[127]= maxNoise[0];
866 tempBluredPast[128]= maxNoise[1];
867 tempBluredPast[129]= maxNoise[2];
868
869#define LOAD_LINE(src, i) \
870 register int j##src##i = i * stride; \
871 vector unsigned char perm##src##i = vec_lvsl(j##src##i, src); \
872 const vector unsigned char v_##src##A1##i = vec_ld(j##src##i, src); \
873 const vector unsigned char v_##src##A2##i = vec_ld(j##src##i + 16, src); \
874 const vector unsigned char v_##src##A##i = \
875 vec_perm(v_##src##A1##i, v_##src##A2##i, perm##src##i); \
876 vector signed short v_##src##Ass##i = \
877 (vector signed short)vec_mergeh((vector signed char)zero, \
878 (vector signed char)v_##src##A##i)
879
880 LOAD_LINE(src, 0);
881 LOAD_LINE(src, 1);
882 LOAD_LINE(src, 2);
883 LOAD_LINE(src, 3);
884 LOAD_LINE(src, 4);
885 LOAD_LINE(src, 5);
886 LOAD_LINE(src, 6);
887 LOAD_LINE(src, 7);
888
889 LOAD_LINE(tempBlured, 0);
890 LOAD_LINE(tempBlured, 1);
891 LOAD_LINE(tempBlured, 2);
892 LOAD_LINE(tempBlured, 3);
893 LOAD_LINE(tempBlured, 4);
894 LOAD_LINE(tempBlured, 5);
895 LOAD_LINE(tempBlured, 6);
896 LOAD_LINE(tempBlured, 7);
897#undef LOAD_LINE
898
899#define ACCUMULATE_DIFFS(i) \
900 vector signed short v_d##i = vec_sub(v_tempBluredAss##i, \
901 v_srcAss##i); \
902 v_dp = vec_msums(v_d##i, v_d##i, v_dp); \
903 v_sysdp = vec_msums(v_d##i, vsint16_1, v_sysdp)
904
905 ACCUMULATE_DIFFS(0);
906 ACCUMULATE_DIFFS(1);
907 ACCUMULATE_DIFFS(2);
908 ACCUMULATE_DIFFS(3);
909 ACCUMULATE_DIFFS(4);
910 ACCUMULATE_DIFFS(5);
911 ACCUMULATE_DIFFS(6);
912 ACCUMULATE_DIFFS(7);
913#undef ACCUMULATE_DIFFS
914
915 v_dp = vec_sums(v_dp, zero);
916 v_sysdp = vec_sums(v_sysdp, zero);
917
918 v_dp = vec_splat(v_dp, 3);
919 v_sysdp = vec_splat(v_sysdp, 3);
920
921 vec_ste(v_dp, 0, &d);
922 vec_ste(v_sysdp, 0, &sysd);
923
924 i = d;
925 d = (4*d
926 +(*(tempBluredPast-256))
927 +(*(tempBluredPast-1))+ (*(tempBluredPast+1))
928 +(*(tempBluredPast+256))
929 +4)>>3;
930
931 *tempBluredPast=i;
932
933 if (d > maxNoise[1]) {
934 if (d < maxNoise[2]) {
935#define OP(i) v_tempBluredAss##i = vec_avg(v_tempBluredAss##i, v_srcAss##i);
936
937 OP(0);
938 OP(1);
939 OP(2);
940 OP(3);
941 OP(4);
942 OP(5);
943 OP(6);
944 OP(7);
945#undef OP
946 } else {
947#define OP(i) v_tempBluredAss##i = v_srcAss##i;
948
949 OP(0);
950 OP(1);
951 OP(2);
952 OP(3);
953 OP(4);
954 OP(5);
955 OP(6);
956 OP(7);
957#undef OP
958 }
959 } else {
960 if (d < maxNoise[0]) {
961 const vector signed short vsint16_7 = vec_splat_s16(7);
962 const vector signed short vsint16_4 = vec_splat_s16(4);
963 const vector unsigned short vuint16_3 = vec_splat_u16(3);
964
965#define OP(i) \
966 const vector signed short v_temp##i = \
967 vec_mladd(v_tempBluredAss##i, \
968 vsint16_7, v_srcAss##i); \
969 const vector signed short v_temp2##i = \
970 vec_add(v_temp##i, vsint16_4); \
971 v_tempBluredAss##i = vec_sr(v_temp2##i, vuint16_3)
972
973 OP(0);
974 OP(1);
975 OP(2);
976 OP(3);
977 OP(4);
978 OP(5);
979 OP(6);
980 OP(7);
981#undef OP
982 } else {
983 const vector signed short vsint16_3 = vec_splat_s16(3);
984 const vector signed short vsint16_2 = vec_splat_s16(2);
985
986#define OP(i) \
987 const vector signed short v_temp##i = \
988 vec_mladd(v_tempBluredAss##i, \
989 vsint16_3, v_srcAss##i); \
990 const vector signed short v_temp2##i = \
991 vec_add(v_temp##i, vsint16_2); \
992 v_tempBluredAss##i = vec_sr(v_temp2##i, (vector unsigned short)vsint16_2)
993
994 OP(0);
995 OP(1);
996 OP(2);
997 OP(3);
998 OP(4);
999 OP(5);
1000 OP(6);
1001 OP(7);
1002#undef OP
1003 }
1004 }
1005
1006 const vector signed char neg1 = vec_splat_s8(-1);
1007 const vector unsigned char permHH = (const vector unsigned char)AVV(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
1008 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F);
1009
1010#define PACK_AND_STORE(src, i) \
1011 const vector unsigned char perms##src##i = \
1012 vec_lvsr(i * stride, src); \
1013 const vector unsigned char vf##src##i = \
1014 vec_packsu(v_tempBluredAss##i, (vector signed short)zero); \
1015 const vector unsigned char vg##src##i = \
1016 vec_perm(vf##src##i, v_##src##A##i, permHH); \
1017 const vector unsigned char mask##src##i = \
1018 vec_perm((vector unsigned char)zero, (vector unsigned char)neg1, perms##src##i); \
1019 const vector unsigned char vg2##src##i = \
1020 vec_perm(vg##src##i, vg##src##i, perms##src##i); \
1021 const vector unsigned char svA##src##i = \
1022 vec_sel(v_##src##A1##i, vg2##src##i, mask##src##i); \
1023 const vector unsigned char svB##src##i = \
1024 vec_sel(vg2##src##i, v_##src##A2##i, mask##src##i); \
1025 vec_st(svA##src##i, i * stride, src); \
1026 vec_st(svB##src##i, i * stride + 16, src)
1027
1028 PACK_AND_STORE(src, 0);
1029 PACK_AND_STORE(src, 1);
1030 PACK_AND_STORE(src, 2);
1031 PACK_AND_STORE(src, 3);
1032 PACK_AND_STORE(src, 4);
1033 PACK_AND_STORE(src, 5);
1034 PACK_AND_STORE(src, 6);
1035 PACK_AND_STORE(src, 7);
1036 PACK_AND_STORE(tempBlured, 0);
1037 PACK_AND_STORE(tempBlured, 1);
1038 PACK_AND_STORE(tempBlured, 2);
1039 PACK_AND_STORE(tempBlured, 3);
1040 PACK_AND_STORE(tempBlured, 4);
1041 PACK_AND_STORE(tempBlured, 5);
1042 PACK_AND_STORE(tempBlured, 6);
1043 PACK_AND_STORE(tempBlured, 7);
1044#undef PACK_AND_STORE
1045}