rtsp: Move the definition of SDP_MAX_SIZE up, use it in the RTSP muxer, too
[libav.git] / libpostproc / postprocess_altivec_template.c
1 /*
2 * AltiVec optimizations (C) 2004 Romain Dolbeau <romain@dolbeau.org>
3 *
4 * based on code by Copyright (C) 2001-2003 Michael Niedermayer (michaelni@gmx.at)
5 *
6 * This file is part of FFmpeg.
7 *
8 * FFmpeg is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #include "libavutil/avutil.h"
24
25 #define ALTIVEC_TRANSPOSE_8x8_SHORT(src_a,src_b,src_c,src_d,src_e,src_f,src_g,src_h) \
26 do { \
27 __typeof__(src_a) tempA1, tempB1, tempC1, tempD1; \
28 __typeof__(src_a) tempE1, tempF1, tempG1, tempH1; \
29 __typeof__(src_a) tempA2, tempB2, tempC2, tempD2; \
30 __typeof__(src_a) tempE2, tempF2, tempG2, tempH2; \
31 tempA1 = vec_mergeh (src_a, src_e); \
32 tempB1 = vec_mergel (src_a, src_e); \
33 tempC1 = vec_mergeh (src_b, src_f); \
34 tempD1 = vec_mergel (src_b, src_f); \
35 tempE1 = vec_mergeh (src_c, src_g); \
36 tempF1 = vec_mergel (src_c, src_g); \
37 tempG1 = vec_mergeh (src_d, src_h); \
38 tempH1 = vec_mergel (src_d, src_h); \
39 tempA2 = vec_mergeh (tempA1, tempE1); \
40 tempB2 = vec_mergel (tempA1, tempE1); \
41 tempC2 = vec_mergeh (tempB1, tempF1); \
42 tempD2 = vec_mergel (tempB1, tempF1); \
43 tempE2 = vec_mergeh (tempC1, tempG1); \
44 tempF2 = vec_mergel (tempC1, tempG1); \
45 tempG2 = vec_mergeh (tempD1, tempH1); \
46 tempH2 = vec_mergel (tempD1, tempH1); \
47 src_a = vec_mergeh (tempA2, tempE2); \
48 src_b = vec_mergel (tempA2, tempE2); \
49 src_c = vec_mergeh (tempB2, tempF2); \
50 src_d = vec_mergel (tempB2, tempF2); \
51 src_e = vec_mergeh (tempC2, tempG2); \
52 src_f = vec_mergel (tempC2, tempG2); \
53 src_g = vec_mergeh (tempD2, tempH2); \
54 src_h = vec_mergel (tempD2, tempH2); \
55 } while (0)
56
57
58 static inline int vertClassify_altivec(uint8_t src[], int stride, PPContext *c) {
59 /*
60 this code makes no assumption on src or stride.
61 One could remove the recomputation of the perm
62 vector by assuming (stride % 16) == 0, unfortunately
63 this is not always true.
64 */
65 short data_0 = ((c->nonBQP*c->ppMode.baseDcDiff)>>8) + 1;
66 DECLARE_ALIGNED(16, short, data)[8] =
67 {
68 data_0,
69 data_0 * 2 + 1,
70 c->QP * 2,
71 c->QP * 4
72 };
73 int numEq;
74 uint8_t *src2 = src;
75 vector signed short v_dcOffset;
76 vector signed short v2QP;
77 vector unsigned short v4QP;
78 vector unsigned short v_dcThreshold;
79 const int properStride = (stride % 16);
80 const int srcAlign = ((unsigned long)src2 % 16);
81 const int two_vectors = ((srcAlign > 8) || properStride) ? 1 : 0;
82 const vector signed int zero = vec_splat_s32(0);
83 const vector signed short mask = vec_splat_s16(1);
84 vector signed int v_numEq = vec_splat_s32(0);
85 vector signed short v_data = vec_ld(0, data);
86 vector signed short v_srcAss0, v_srcAss1, v_srcAss2, v_srcAss3,
87 v_srcAss4, v_srcAss5, v_srcAss6, v_srcAss7;
88 //FIXME avoid this mess if possible
89 register int j0 = 0,
90 j1 = stride,
91 j2 = 2 * stride,
92 j3 = 3 * stride,
93 j4 = 4 * stride,
94 j5 = 5 * stride,
95 j6 = 6 * stride,
96 j7 = 7 * stride;
97 vector unsigned char v_srcA0, v_srcA1, v_srcA2, v_srcA3,
98 v_srcA4, v_srcA5, v_srcA6, v_srcA7;
99
100 v_dcOffset = vec_splat(v_data, 0);
101 v_dcThreshold = (vector unsigned short)vec_splat(v_data, 1);
102 v2QP = vec_splat(v_data, 2);
103 v4QP = (vector unsigned short)vec_splat(v_data, 3);
104
105 src2 += stride * 4;
106
107 #define LOAD_LINE(i) \
108 { \
109 vector unsigned char perm##i = vec_lvsl(j##i, src2); \
110 vector unsigned char v_srcA2##i; \
111 vector unsigned char v_srcA1##i = vec_ld(j##i, src2); \
112 if (two_vectors) \
113 v_srcA2##i = vec_ld(j##i + 16, src2); \
114 v_srcA##i = \
115 vec_perm(v_srcA1##i, v_srcA2##i, perm##i); \
116 v_srcAss##i = \
117 (vector signed short)vec_mergeh((vector signed char)zero, \
118 (vector signed char)v_srcA##i); }
119
120 #define LOAD_LINE_ALIGNED(i) \
121 v_srcA##i = vec_ld(j##i, src2); \
122 v_srcAss##i = \
123 (vector signed short)vec_mergeh((vector signed char)zero, \
124 (vector signed char)v_srcA##i)
125
126 /* Special-casing the aligned case is worthwhile, as all calls from
127 * the (transposed) horizontable deblocks will be aligned, in addition
128 * to the naturally aligned vertical deblocks. */
129 if (properStride && srcAlign) {
130 LOAD_LINE_ALIGNED(0);
131 LOAD_LINE_ALIGNED(1);
132 LOAD_LINE_ALIGNED(2);
133 LOAD_LINE_ALIGNED(3);
134 LOAD_LINE_ALIGNED(4);
135 LOAD_LINE_ALIGNED(5);
136 LOAD_LINE_ALIGNED(6);
137 LOAD_LINE_ALIGNED(7);
138 } else {
139 LOAD_LINE(0);
140 LOAD_LINE(1);
141 LOAD_LINE(2);
142 LOAD_LINE(3);
143 LOAD_LINE(4);
144 LOAD_LINE(5);
145 LOAD_LINE(6);
146 LOAD_LINE(7);
147 }
148 #undef LOAD_LINE
149 #undef LOAD_LINE_ALIGNED
150
151 #define ITER(i, j) \
152 const vector signed short v_diff##i = \
153 vec_sub(v_srcAss##i, v_srcAss##j); \
154 const vector signed short v_sum##i = \
155 vec_add(v_diff##i, v_dcOffset); \
156 const vector signed short v_comp##i = \
157 (vector signed short)vec_cmplt((vector unsigned short)v_sum##i, \
158 v_dcThreshold); \
159 const vector signed short v_part##i = vec_and(mask, v_comp##i);
160
161 {
162 ITER(0, 1)
163 ITER(1, 2)
164 ITER(2, 3)
165 ITER(3, 4)
166 ITER(4, 5)
167 ITER(5, 6)
168 ITER(6, 7)
169
170 v_numEq = vec_sum4s(v_part0, v_numEq);
171 v_numEq = vec_sum4s(v_part1, v_numEq);
172 v_numEq = vec_sum4s(v_part2, v_numEq);
173 v_numEq = vec_sum4s(v_part3, v_numEq);
174 v_numEq = vec_sum4s(v_part4, v_numEq);
175 v_numEq = vec_sum4s(v_part5, v_numEq);
176 v_numEq = vec_sum4s(v_part6, v_numEq);
177 }
178
179 #undef ITER
180
181 v_numEq = vec_sums(v_numEq, zero);
182
183 v_numEq = vec_splat(v_numEq, 3);
184 vec_ste(v_numEq, 0, &numEq);
185
186 if (numEq > c->ppMode.flatnessThreshold){
187 const vector unsigned char mmoP1 = (const vector unsigned char)
188 {0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f,
189 0x00, 0x01, 0x12, 0x13, 0x08, 0x09, 0x1A, 0x1B};
190 const vector unsigned char mmoP2 = (const vector unsigned char)
191 {0x04, 0x05, 0x16, 0x17, 0x0C, 0x0D, 0x1E, 0x1F,
192 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f};
193 const vector unsigned char mmoP = (const vector unsigned char)
194 vec_lvsl(8, (unsigned char*)0);
195
196 vector signed short mmoL1 = vec_perm(v_srcAss0, v_srcAss2, mmoP1);
197 vector signed short mmoL2 = vec_perm(v_srcAss4, v_srcAss6, mmoP2);
198 vector signed short mmoL = vec_perm(mmoL1, mmoL2, mmoP);
199 vector signed short mmoR1 = vec_perm(v_srcAss5, v_srcAss7, mmoP1);
200 vector signed short mmoR2 = vec_perm(v_srcAss1, v_srcAss3, mmoP2);
201 vector signed short mmoR = vec_perm(mmoR1, mmoR2, mmoP);
202 vector signed short mmoDiff = vec_sub(mmoL, mmoR);
203 vector unsigned short mmoSum = (vector unsigned short)vec_add(mmoDiff, v2QP);
204
205 if (vec_any_gt(mmoSum, v4QP))
206 return 0;
207 else
208 return 1;
209 }
210 else return 2;
211 }
212
213 static inline void doVertLowPass_altivec(uint8_t *src, int stride, PPContext *c) {
214 /*
215 this code makes no assumption on src or stride.
216 One could remove the recomputation of the perm
217 vector by assuming (stride % 16) == 0, unfortunately
218 this is not always true. Quite a lot of load/stores
219 can be removed by assuming proper alignment of
220 src & stride :-(
221 */
222 uint8_t *src2 = src;
223 const vector signed int zero = vec_splat_s32(0);
224 const int properStride = (stride % 16);
225 const int srcAlign = ((unsigned long)src2 % 16);
226 DECLARE_ALIGNED(16, short, qp)[8] = {c->QP};
227 vector signed short vqp = vec_ld(0, qp);
228 vector signed short vb0, vb1, vb2, vb3, vb4, vb5, vb6, vb7, vb8, vb9;
229 vector unsigned char vbA0, av_uninit(vbA1), av_uninit(vbA2), av_uninit(vbA3), av_uninit(vbA4), av_uninit(vbA5), av_uninit(vbA6), av_uninit(vbA7), av_uninit(vbA8), vbA9;
230 vector unsigned char vbB0, av_uninit(vbB1), av_uninit(vbB2), av_uninit(vbB3), av_uninit(vbB4), av_uninit(vbB5), av_uninit(vbB6), av_uninit(vbB7), av_uninit(vbB8), vbB9;
231 vector unsigned char vbT0, vbT1, vbT2, vbT3, vbT4, vbT5, vbT6, vbT7, vbT8, vbT9;
232 vector unsigned char perml0, perml1, perml2, perml3, perml4,
233 perml5, perml6, perml7, perml8, perml9;
234 register int j0 = 0,
235 j1 = stride,
236 j2 = 2 * stride,
237 j3 = 3 * stride,
238 j4 = 4 * stride,
239 j5 = 5 * stride,
240 j6 = 6 * stride,
241 j7 = 7 * stride,
242 j8 = 8 * stride,
243 j9 = 9 * stride;
244
245 vqp = vec_splat(vqp, 0);
246
247 src2 += stride*3;
248
249 #define LOAD_LINE(i) \
250 perml##i = vec_lvsl(i * stride, src2); \
251 vbA##i = vec_ld(i * stride, src2); \
252 vbB##i = vec_ld(i * stride + 16, src2); \
253 vbT##i = vec_perm(vbA##i, vbB##i, perml##i); \
254 vb##i = \
255 (vector signed short)vec_mergeh((vector unsigned char)zero, \
256 (vector unsigned char)vbT##i)
257
258 #define LOAD_LINE_ALIGNED(i) \
259 vbT##i = vec_ld(j##i, src2); \
260 vb##i = \
261 (vector signed short)vec_mergeh((vector signed char)zero, \
262 (vector signed char)vbT##i)
263
264 /* Special-casing the aligned case is worthwhile, as all calls from
265 * the (transposed) horizontable deblocks will be aligned, in addition
266 * to the naturally aligned vertical deblocks. */
267 if (properStride && srcAlign) {
268 LOAD_LINE_ALIGNED(0);
269 LOAD_LINE_ALIGNED(1);
270 LOAD_LINE_ALIGNED(2);
271 LOAD_LINE_ALIGNED(3);
272 LOAD_LINE_ALIGNED(4);
273 LOAD_LINE_ALIGNED(5);
274 LOAD_LINE_ALIGNED(6);
275 LOAD_LINE_ALIGNED(7);
276 LOAD_LINE_ALIGNED(8);
277 LOAD_LINE_ALIGNED(9);
278 } else {
279 LOAD_LINE(0);
280 LOAD_LINE(1);
281 LOAD_LINE(2);
282 LOAD_LINE(3);
283 LOAD_LINE(4);
284 LOAD_LINE(5);
285 LOAD_LINE(6);
286 LOAD_LINE(7);
287 LOAD_LINE(8);
288 LOAD_LINE(9);
289 }
290 #undef LOAD_LINE
291 #undef LOAD_LINE_ALIGNED
292 {
293 const vector unsigned short v_2 = vec_splat_u16(2);
294 const vector unsigned short v_4 = vec_splat_u16(4);
295
296 const vector signed short v_diff01 = vec_sub(vb0, vb1);
297 const vector unsigned short v_cmp01 =
298 (const vector unsigned short) vec_cmplt(vec_abs(v_diff01), vqp);
299 const vector signed short v_first = vec_sel(vb1, vb0, v_cmp01);
300 const vector signed short v_diff89 = vec_sub(vb8, vb9);
301 const vector unsigned short v_cmp89 =
302 (const vector unsigned short) vec_cmplt(vec_abs(v_diff89), vqp);
303 const vector signed short v_last = vec_sel(vb8, vb9, v_cmp89);
304
305 const vector signed short temp01 = vec_mladd(v_first, (vector signed short)v_4, vb1);
306 const vector signed short temp02 = vec_add(vb2, vb3);
307 const vector signed short temp03 = vec_add(temp01, (vector signed short)v_4);
308 const vector signed short v_sumsB0 = vec_add(temp02, temp03);
309
310 const vector signed short temp11 = vec_sub(v_sumsB0, v_first);
311 const vector signed short v_sumsB1 = vec_add(temp11, vb4);
312
313 const vector signed short temp21 = vec_sub(v_sumsB1, v_first);
314 const vector signed short v_sumsB2 = vec_add(temp21, vb5);
315
316 const vector signed short temp31 = vec_sub(v_sumsB2, v_first);
317 const vector signed short v_sumsB3 = vec_add(temp31, vb6);
318
319 const vector signed short temp41 = vec_sub(v_sumsB3, v_first);
320 const vector signed short v_sumsB4 = vec_add(temp41, vb7);
321
322 const vector signed short temp51 = vec_sub(v_sumsB4, vb1);
323 const vector signed short v_sumsB5 = vec_add(temp51, vb8);
324
325 const vector signed short temp61 = vec_sub(v_sumsB5, vb2);
326 const vector signed short v_sumsB6 = vec_add(temp61, v_last);
327
328 const vector signed short temp71 = vec_sub(v_sumsB6, vb3);
329 const vector signed short v_sumsB7 = vec_add(temp71, v_last);
330
331 const vector signed short temp81 = vec_sub(v_sumsB7, vb4);
332 const vector signed short v_sumsB8 = vec_add(temp81, v_last);
333
334 const vector signed short temp91 = vec_sub(v_sumsB8, vb5);
335 const vector signed short v_sumsB9 = vec_add(temp91, v_last);
336
337 #define COMPUTE_VR(i, j, k) \
338 const vector signed short temps1##i = \
339 vec_add(v_sumsB##i, v_sumsB##k); \
340 const vector signed short temps2##i = \
341 vec_mladd(vb##j, (vector signed short)v_2, temps1##i); \
342 const vector signed short vr##j = vec_sra(temps2##i, v_4)
343
344 COMPUTE_VR(0, 1, 2);
345 COMPUTE_VR(1, 2, 3);
346 COMPUTE_VR(2, 3, 4);
347 COMPUTE_VR(3, 4, 5);
348 COMPUTE_VR(4, 5, 6);
349 COMPUTE_VR(5, 6, 7);
350 COMPUTE_VR(6, 7, 8);
351 COMPUTE_VR(7, 8, 9);
352
353 const vector signed char neg1 = vec_splat_s8(-1);
354 const vector unsigned char permHH = (const vector unsigned char){0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
355 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F};
356
357 #define PACK_AND_STORE(i) \
358 { const vector unsigned char perms##i = \
359 vec_lvsr(i * stride, src2); \
360 const vector unsigned char vf##i = \
361 vec_packsu(vr##i, (vector signed short)zero); \
362 const vector unsigned char vg##i = \
363 vec_perm(vf##i, vbT##i, permHH); \
364 const vector unsigned char mask##i = \
365 vec_perm((vector unsigned char)zero, (vector unsigned char)neg1, perms##i); \
366 const vector unsigned char vg2##i = \
367 vec_perm(vg##i, vg##i, perms##i); \
368 const vector unsigned char svA##i = \
369 vec_sel(vbA##i, vg2##i, mask##i); \
370 const vector unsigned char svB##i = \
371 vec_sel(vg2##i, vbB##i, mask##i); \
372 vec_st(svA##i, i * stride, src2); \
373 vec_st(svB##i, i * stride + 16, src2);}
374
375 #define PACK_AND_STORE_ALIGNED(i) \
376 { const vector unsigned char vf##i = \
377 vec_packsu(vr##i, (vector signed short)zero); \
378 const vector unsigned char vg##i = \
379 vec_perm(vf##i, vbT##i, permHH); \
380 vec_st(vg##i, i * stride, src2);}
381
382 /* Special-casing the aligned case is worthwhile, as all calls from
383 * the (transposed) horizontable deblocks will be aligned, in addition
384 * to the naturally aligned vertical deblocks. */
385 if (properStride && srcAlign) {
386 PACK_AND_STORE_ALIGNED(1)
387 PACK_AND_STORE_ALIGNED(2)
388 PACK_AND_STORE_ALIGNED(3)
389 PACK_AND_STORE_ALIGNED(4)
390 PACK_AND_STORE_ALIGNED(5)
391 PACK_AND_STORE_ALIGNED(6)
392 PACK_AND_STORE_ALIGNED(7)
393 PACK_AND_STORE_ALIGNED(8)
394 } else {
395 PACK_AND_STORE(1)
396 PACK_AND_STORE(2)
397 PACK_AND_STORE(3)
398 PACK_AND_STORE(4)
399 PACK_AND_STORE(5)
400 PACK_AND_STORE(6)
401 PACK_AND_STORE(7)
402 PACK_AND_STORE(8)
403 }
404 #undef PACK_AND_STORE
405 #undef PACK_AND_STORE_ALIGNED
406 }
407 }
408
409
410
411 static inline void doVertDefFilter_altivec(uint8_t src[], int stride, PPContext *c) {
412 /*
413 this code makes no assumption on src or stride.
414 One could remove the recomputation of the perm
415 vector by assuming (stride % 16) == 0, unfortunately
416 this is not always true. Quite a lot of load/stores
417 can be removed by assuming proper alignment of
418 src & stride :-(
419 */
420 uint8_t *src2 = src + stride*3;
421 const vector signed int zero = vec_splat_s32(0);
422 DECLARE_ALIGNED(16, short, qp)[8] = {8*c->QP};
423 vector signed short vqp = vec_splat(
424 (vector signed short)vec_ld(0, qp), 0);
425
426 #define LOAD_LINE(i) \
427 const vector unsigned char perm##i = \
428 vec_lvsl(i * stride, src2); \
429 const vector unsigned char vbA##i = \
430 vec_ld(i * stride, src2); \
431 const vector unsigned char vbB##i = \
432 vec_ld(i * stride + 16, src2); \
433 const vector unsigned char vbT##i = \
434 vec_perm(vbA##i, vbB##i, perm##i); \
435 const vector signed short vb##i = \
436 (vector signed short)vec_mergeh((vector unsigned char)zero, \
437 (vector unsigned char)vbT##i)
438
439 LOAD_LINE(1);
440 LOAD_LINE(2);
441 LOAD_LINE(3);
442 LOAD_LINE(4);
443 LOAD_LINE(5);
444 LOAD_LINE(6);
445 LOAD_LINE(7);
446 LOAD_LINE(8);
447 #undef LOAD_LINE
448
449 const vector signed short v_1 = vec_splat_s16(1);
450 const vector signed short v_2 = vec_splat_s16(2);
451 const vector signed short v_5 = vec_splat_s16(5);
452 const vector signed short v_32 = vec_sl(v_1,
453 (vector unsigned short)v_5);
454 /* middle energy */
455 const vector signed short l3minusl6 = vec_sub(vb3, vb6);
456 const vector signed short l5minusl4 = vec_sub(vb5, vb4);
457 const vector signed short twotimes_l3minusl6 = vec_mladd(v_2, l3minusl6, (vector signed short)zero);
458 const vector signed short mE = vec_mladd(v_5, l5minusl4, twotimes_l3minusl6);
459 const vector signed short absmE = vec_abs(mE);
460 /* left & right energy */
461 const vector signed short l1minusl4 = vec_sub(vb1, vb4);
462 const vector signed short l3minusl2 = vec_sub(vb3, vb2);
463 const vector signed short l5minusl8 = vec_sub(vb5, vb8);
464 const vector signed short l7minusl6 = vec_sub(vb7, vb6);
465 const vector signed short twotimes_l1minusl4 = vec_mladd(v_2, l1minusl4, (vector signed short)zero);
466 const vector signed short twotimes_l5minusl8 = vec_mladd(v_2, l5minusl8, (vector signed short)zero);
467 const vector signed short lE = vec_mladd(v_5, l3minusl2, twotimes_l1minusl4);
468 const vector signed short rE = vec_mladd(v_5, l7minusl6, twotimes_l5minusl8);
469 /* d */
470 const vector signed short ddiff = vec_sub(absmE,
471 vec_min(vec_abs(lE),
472 vec_abs(rE)));
473 const vector signed short ddiffclamp = vec_max(ddiff, (vector signed short)zero);
474 const vector signed short dtimes64 = vec_mladd(v_5, ddiffclamp, v_32);
475 const vector signed short d = vec_sra(dtimes64, vec_splat_u16(6));
476 const vector signed short minusd = vec_sub((vector signed short)zero, d);
477 const vector signed short finald = vec_sel(minusd,
478 d,
479 vec_cmpgt(vec_sub((vector signed short)zero, mE),
480 (vector signed short)zero));
481 /* q */
482 const vector signed short qtimes2 = vec_sub(vb4, vb5);
483 /* for a shift right to behave like /2, we need to add one
484 to all negative integer */
485 const vector signed short rounddown = vec_sel((vector signed short)zero,
486 v_1,
487 vec_cmplt(qtimes2, (vector signed short)zero));
488 const vector signed short q = vec_sra(vec_add(qtimes2, rounddown), vec_splat_u16(1));
489 /* clamp */
490 const vector signed short dclamp_P1 = vec_max((vector signed short)zero, finald);
491 const vector signed short dclamp_P = vec_min(dclamp_P1, q);
492 const vector signed short dclamp_N1 = vec_min((vector signed short)zero, finald);
493 const vector signed short dclamp_N = vec_max(dclamp_N1, q);
494
495 const vector signed short dclampedfinal = vec_sel(dclamp_N,
496 dclamp_P,
497 vec_cmpgt(q, (vector signed short)zero));
498 const vector signed short dornotd = vec_sel((vector signed short)zero,
499 dclampedfinal,
500 vec_cmplt(absmE, vqp));
501 /* add/subtract to l4 and l5 */
502 const vector signed short vb4minusd = vec_sub(vb4, dornotd);
503 const vector signed short vb5plusd = vec_add(vb5, dornotd);
504 /* finally, stores */
505 const vector unsigned char st4 = vec_packsu(vb4minusd, (vector signed short)zero);
506 const vector unsigned char st5 = vec_packsu(vb5plusd, (vector signed short)zero);
507
508 const vector signed char neg1 = vec_splat_s8(-1);
509 const vector unsigned char permHH = (const vector unsigned char){0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
510 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F};
511
512 #define STORE(i) \
513 { const vector unsigned char perms##i = \
514 vec_lvsr(i * stride, src2); \
515 const vector unsigned char vg##i = \
516 vec_perm(st##i, vbT##i, permHH); \
517 const vector unsigned char mask##i = \
518 vec_perm((vector unsigned char)zero, (vector unsigned char)neg1, perms##i); \
519 const vector unsigned char vg2##i = \
520 vec_perm(vg##i, vg##i, perms##i); \
521 const vector unsigned char svA##i = \
522 vec_sel(vbA##i, vg2##i, mask##i); \
523 const vector unsigned char svB##i = \
524 vec_sel(vg2##i, vbB##i, mask##i); \
525 vec_st(svA##i, i * stride, src2); \
526 vec_st(svB##i, i * stride + 16, src2);}
527
528 STORE(4)
529 STORE(5)
530 }
531
532 static inline void dering_altivec(uint8_t src[], int stride, PPContext *c) {
533 /*
534 this code makes no assumption on src or stride.
535 One could remove the recomputation of the perm
536 vector by assuming (stride % 16) == 0, unfortunately
537 this is not always true. Quite a lot of load/stores
538 can be removed by assuming proper alignment of
539 src & stride :-(
540 */
541 uint8_t *srcCopy = src;
542 DECLARE_ALIGNED(16, uint8_t, dt)[16];
543 const vector signed int zero = vec_splat_s32(0);
544 vector unsigned char v_dt;
545 dt[0] = deringThreshold;
546 v_dt = vec_splat(vec_ld(0, dt), 0);
547
548 #define LOAD_LINE(i) \
549 const vector unsigned char perm##i = \
550 vec_lvsl(i * stride, srcCopy); \
551 vector unsigned char sA##i = vec_ld(i * stride, srcCopy); \
552 vector unsigned char sB##i = vec_ld(i * stride + 16, srcCopy); \
553 vector unsigned char src##i = vec_perm(sA##i, sB##i, perm##i)
554
555 LOAD_LINE(0);
556 LOAD_LINE(1);
557 LOAD_LINE(2);
558 LOAD_LINE(3);
559 LOAD_LINE(4);
560 LOAD_LINE(5);
561 LOAD_LINE(6);
562 LOAD_LINE(7);
563 LOAD_LINE(8);
564 LOAD_LINE(9);
565 #undef LOAD_LINE
566
567 vector unsigned char v_avg;
568 {
569 const vector unsigned char trunc_perm = (vector unsigned char)
570 {0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
571 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18};
572 const vector unsigned char trunc_src12 = vec_perm(src1, src2, trunc_perm);
573 const vector unsigned char trunc_src34 = vec_perm(src3, src4, trunc_perm);
574 const vector unsigned char trunc_src56 = vec_perm(src5, src6, trunc_perm);
575 const vector unsigned char trunc_src78 = vec_perm(src7, src8, trunc_perm);
576
577 #define EXTRACT(op) do { \
578 const vector unsigned char s##op##_1 = vec_##op(trunc_src12, trunc_src34); \
579 const vector unsigned char s##op##_2 = vec_##op(trunc_src56, trunc_src78); \
580 const vector unsigned char s##op##_6 = vec_##op(s##op##_1, s##op##_2); \
581 const vector unsigned char s##op##_8h = vec_mergeh(s##op##_6, s##op##_6); \
582 const vector unsigned char s##op##_8l = vec_mergel(s##op##_6, s##op##_6); \
583 const vector unsigned char s##op##_9 = vec_##op(s##op##_8h, s##op##_8l); \
584 const vector unsigned char s##op##_9h = vec_mergeh(s##op##_9, s##op##_9); \
585 const vector unsigned char s##op##_9l = vec_mergel(s##op##_9, s##op##_9); \
586 const vector unsigned char s##op##_10 = vec_##op(s##op##_9h, s##op##_9l); \
587 const vector unsigned char s##op##_10h = vec_mergeh(s##op##_10, s##op##_10); \
588 const vector unsigned char s##op##_10l = vec_mergel(s##op##_10, s##op##_10); \
589 const vector unsigned char s##op##_11 = vec_##op(s##op##_10h, s##op##_10l); \
590 const vector unsigned char s##op##_11h = vec_mergeh(s##op##_11, s##op##_11); \
591 const vector unsigned char s##op##_11l = vec_mergel(s##op##_11, s##op##_11); \
592 v_##op = vec_##op(s##op##_11h, s##op##_11l); } while (0)
593
594 vector unsigned char v_min;
595 vector unsigned char v_max;
596 EXTRACT(min);
597 EXTRACT(max);
598 #undef EXTRACT
599
600 if (vec_all_lt(vec_sub(v_max, v_min), v_dt))
601 return;
602
603 v_avg = vec_avg(v_min, v_max);
604 }
605
606 DECLARE_ALIGNED(16, signed int, S)[8];
607 {
608 const vector unsigned short mask1 = (vector unsigned short)
609 {0x0001, 0x0002, 0x0004, 0x0008,
610 0x0010, 0x0020, 0x0040, 0x0080};
611 const vector unsigned short mask2 = (vector unsigned short)
612 {0x0100, 0x0200, 0x0000, 0x0000,
613 0x0000, 0x0000, 0x0000, 0x0000};
614
615 const vector unsigned int vuint32_16 = vec_sl(vec_splat_u32(1), vec_splat_u32(4));
616 const vector unsigned int vuint32_1 = vec_splat_u32(1);
617
618 #define COMPARE(i) \
619 vector signed int sum##i; \
620 do { \
621 const vector unsigned char cmp##i = \
622 (vector unsigned char)vec_cmpgt(src##i, v_avg); \
623 const vector unsigned short cmpHi##i = \
624 (vector unsigned short)vec_mergeh(cmp##i, cmp##i); \
625 const vector unsigned short cmpLi##i = \
626 (vector unsigned short)vec_mergel(cmp##i, cmp##i); \
627 const vector signed short cmpHf##i = \
628 (vector signed short)vec_and(cmpHi##i, mask1); \
629 const vector signed short cmpLf##i = \
630 (vector signed short)vec_and(cmpLi##i, mask2); \
631 const vector signed int sump##i = vec_sum4s(cmpHf##i, zero); \
632 const vector signed int sumq##i = vec_sum4s(cmpLf##i, sump##i); \
633 sum##i = vec_sums(sumq##i, zero); } while (0)
634
635 COMPARE(0);
636 COMPARE(1);
637 COMPARE(2);
638 COMPARE(3);
639 COMPARE(4);
640 COMPARE(5);
641 COMPARE(6);
642 COMPARE(7);
643 COMPARE(8);
644 COMPARE(9);
645 #undef COMPARE
646
647 vector signed int sumA2;
648 vector signed int sumB2;
649 {
650 const vector signed int sump02 = vec_mergel(sum0, sum2);
651 const vector signed int sump13 = vec_mergel(sum1, sum3);
652 const vector signed int sumA = vec_mergel(sump02, sump13);
653
654 const vector signed int sump46 = vec_mergel(sum4, sum6);
655 const vector signed int sump57 = vec_mergel(sum5, sum7);
656 const vector signed int sumB = vec_mergel(sump46, sump57);
657
658 const vector signed int sump8A = vec_mergel(sum8, zero);
659 const vector signed int sump9B = vec_mergel(sum9, zero);
660 const vector signed int sumC = vec_mergel(sump8A, sump9B);
661
662 const vector signed int tA = vec_sl(vec_nor(zero, sumA), vuint32_16);
663 const vector signed int tB = vec_sl(vec_nor(zero, sumB), vuint32_16);
664 const vector signed int tC = vec_sl(vec_nor(zero, sumC), vuint32_16);
665 const vector signed int t2A = vec_or(sumA, tA);
666 const vector signed int t2B = vec_or(sumB, tB);
667 const vector signed int t2C = vec_or(sumC, tC);
668 const vector signed int t3A = vec_and(vec_sra(t2A, vuint32_1),
669 vec_sl(t2A, vuint32_1));
670 const vector signed int t3B = vec_and(vec_sra(t2B, vuint32_1),
671 vec_sl(t2B, vuint32_1));
672 const vector signed int t3C = vec_and(vec_sra(t2C, vuint32_1),
673 vec_sl(t2C, vuint32_1));
674 const vector signed int yA = vec_and(t2A, t3A);
675 const vector signed int yB = vec_and(t2B, t3B);
676 const vector signed int yC = vec_and(t2C, t3C);
677
678 const vector unsigned char strangeperm1 = vec_lvsl(4, (unsigned char*)0);
679 const vector unsigned char strangeperm2 = vec_lvsl(8, (unsigned char*)0);
680 const vector signed int sumAd4 = vec_perm(yA, yB, strangeperm1);
681 const vector signed int sumAd8 = vec_perm(yA, yB, strangeperm2);
682 const vector signed int sumBd4 = vec_perm(yB, yC, strangeperm1);
683 const vector signed int sumBd8 = vec_perm(yB, yC, strangeperm2);
684 const vector signed int sumAp = vec_and(yA,
685 vec_and(sumAd4,sumAd8));
686 const vector signed int sumBp = vec_and(yB,
687 vec_and(sumBd4,sumBd8));
688 sumA2 = vec_or(sumAp,
689 vec_sra(sumAp,
690 vuint32_16));
691 sumB2 = vec_or(sumBp,
692 vec_sra(sumBp,
693 vuint32_16));
694 }
695 vec_st(sumA2, 0, S);
696 vec_st(sumB2, 16, S);
697 }
698
699 /* I'm not sure the following is actually faster
700 than straight, unvectorized C code :-( */
701
702 DECLARE_ALIGNED(16, int, tQP2)[4];
703 tQP2[0]= c->QP/2 + 1;
704 vector signed int vQP2 = vec_ld(0, tQP2);
705 vQP2 = vec_splat(vQP2, 0);
706 const vector signed int vsint32_8 = vec_splat_s32(8);
707 const vector unsigned int vuint32_4 = vec_splat_u32(4);
708
709 const vector unsigned char permA1 = (vector unsigned char)
710 {0x00, 0x01, 0x02, 0x10, 0x11, 0x12, 0x1F, 0x1F,
711 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F};
712 const vector unsigned char permA2 = (vector unsigned char)
713 {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x10, 0x11,
714 0x12, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F};
715 const vector unsigned char permA1inc = (vector unsigned char)
716 {0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00,
717 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
718 const vector unsigned char permA2inc = (vector unsigned char)
719 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01,
720 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
721 const vector unsigned char magic = (vector unsigned char)
722 {0x01, 0x02, 0x01, 0x02, 0x04, 0x02, 0x01, 0x02,
723 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
724 const vector unsigned char extractPerm = (vector unsigned char)
725 {0x10, 0x10, 0x10, 0x01, 0x10, 0x10, 0x10, 0x01,
726 0x10, 0x10, 0x10, 0x01, 0x10, 0x10, 0x10, 0x01};
727 const vector unsigned char extractPermInc = (vector unsigned char)
728 {0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01,
729 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01};
730 const vector unsigned char identity = vec_lvsl(0,(unsigned char *)0);
731 const vector unsigned char tenRight = (vector unsigned char)
732 {0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
733 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
734 const vector unsigned char eightLeft = (vector unsigned char)
735 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
736 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08};
737
738
739 #define F_INIT(i) \
740 vector unsigned char tenRightM##i = tenRight; \
741 vector unsigned char permA1M##i = permA1; \
742 vector unsigned char permA2M##i = permA2; \
743 vector unsigned char extractPermM##i = extractPerm
744
745 #define F2(i, j, k, l) \
746 if (S[i] & (1 << (l+1))) { \
747 const vector unsigned char a_##j##_A##l = \
748 vec_perm(src##i, src##j, permA1M##i); \
749 const vector unsigned char a_##j##_B##l = \
750 vec_perm(a_##j##_A##l, src##k, permA2M##i); \
751 const vector signed int a_##j##_sump##l = \
752 (vector signed int)vec_msum(a_##j##_B##l, magic, \
753 (vector unsigned int)zero); \
754 vector signed int F_##j##_##l = \
755 vec_sr(vec_sums(a_##j##_sump##l, vsint32_8), vuint32_4); \
756 F_##j##_##l = vec_splat(F_##j##_##l, 3); \
757 const vector signed int p_##j##_##l = \
758 (vector signed int)vec_perm(src##j, \
759 (vector unsigned char)zero, \
760 extractPermM##i); \
761 const vector signed int sum_##j##_##l = vec_add( p_##j##_##l, vQP2);\
762 const vector signed int diff_##j##_##l = vec_sub( p_##j##_##l, vQP2);\
763 vector signed int newpm_##j##_##l; \
764 if (vec_all_lt(sum_##j##_##l, F_##j##_##l)) \
765 newpm_##j##_##l = sum_##j##_##l; \
766 else if (vec_all_gt(diff_##j##_##l, F_##j##_##l)) \
767 newpm_##j##_##l = diff_##j##_##l; \
768 else newpm_##j##_##l = F_##j##_##l; \
769 const vector unsigned char newpm2_##j##_##l = \
770 vec_splat((vector unsigned char)newpm_##j##_##l, 15); \
771 const vector unsigned char mask##j##l = vec_add(identity, \
772 tenRightM##i); \
773 src##j = vec_perm(src##j, newpm2_##j##_##l, mask##j##l); \
774 } \
775 permA1M##i = vec_add(permA1M##i, permA1inc); \
776 permA2M##i = vec_add(permA2M##i, permA2inc); \
777 tenRightM##i = vec_sro(tenRightM##i, eightLeft); \
778 extractPermM##i = vec_add(extractPermM##i, extractPermInc)
779
780 #define ITER(i, j, k) \
781 F_INIT(i); \
782 F2(i, j, k, 0); \
783 F2(i, j, k, 1); \
784 F2(i, j, k, 2); \
785 F2(i, j, k, 3); \
786 F2(i, j, k, 4); \
787 F2(i, j, k, 5); \
788 F2(i, j, k, 6); \
789 F2(i, j, k, 7)
790
791 ITER(0, 1, 2);
792 ITER(1, 2, 3);
793 ITER(2, 3, 4);
794 ITER(3, 4, 5);
795 ITER(4, 5, 6);
796 ITER(5, 6, 7);
797 ITER(6, 7, 8);
798 ITER(7, 8, 9);
799
800 const vector signed char neg1 = vec_splat_s8(-1);
801
802 #define STORE_LINE(i) \
803 const vector unsigned char permST##i = \
804 vec_lvsr(i * stride, srcCopy); \
805 const vector unsigned char maskST##i = \
806 vec_perm((vector unsigned char)zero, \
807 (vector unsigned char)neg1, permST##i);\
808 src##i = vec_perm(src##i ,src##i, permST##i); \
809 sA##i= vec_sel(sA##i, src##i, maskST##i); \
810 sB##i= vec_sel(src##i, sB##i, maskST##i); \
811 vec_st(sA##i, i * stride, srcCopy); \
812 vec_st(sB##i, i * stride + 16, srcCopy)
813
814 STORE_LINE(1);
815 STORE_LINE(2);
816 STORE_LINE(3);
817 STORE_LINE(4);
818 STORE_LINE(5);
819 STORE_LINE(6);
820 STORE_LINE(7);
821 STORE_LINE(8);
822
823 #undef STORE_LINE
824 #undef ITER
825 #undef F2
826 }
827
828 #define doHorizLowPass_altivec(a...) doHorizLowPass_C(a)
829 #define doHorizDefFilter_altivec(a...) doHorizDefFilter_C(a)
830 #define do_a_deblock_altivec(a...) do_a_deblock_C(a)
831
832 static inline void RENAME(tempNoiseReducer)(uint8_t *src, int stride,
833 uint8_t *tempBlurred, uint32_t *tempBlurredPast, int *maxNoise)
834 {
835 const vector signed int zero = vec_splat_s32(0);
836 const vector signed short vsint16_1 = vec_splat_s16(1);
837 vector signed int v_dp = zero;
838 vector signed int v_sysdp = zero;
839 int d, sysd, i;
840
841 tempBlurredPast[127]= maxNoise[0];
842 tempBlurredPast[128]= maxNoise[1];
843 tempBlurredPast[129]= maxNoise[2];
844
845 #define LOAD_LINE(src, i) \
846 register int j##src##i = i * stride; \
847 vector unsigned char perm##src##i = vec_lvsl(j##src##i, src); \
848 const vector unsigned char v_##src##A1##i = vec_ld(j##src##i, src); \
849 const vector unsigned char v_##src##A2##i = vec_ld(j##src##i + 16, src); \
850 const vector unsigned char v_##src##A##i = \
851 vec_perm(v_##src##A1##i, v_##src##A2##i, perm##src##i); \
852 vector signed short v_##src##Ass##i = \
853 (vector signed short)vec_mergeh((vector signed char)zero, \
854 (vector signed char)v_##src##A##i)
855
856 LOAD_LINE(src, 0);
857 LOAD_LINE(src, 1);
858 LOAD_LINE(src, 2);
859 LOAD_LINE(src, 3);
860 LOAD_LINE(src, 4);
861 LOAD_LINE(src, 5);
862 LOAD_LINE(src, 6);
863 LOAD_LINE(src, 7);
864
865 LOAD_LINE(tempBlurred, 0);
866 LOAD_LINE(tempBlurred, 1);
867 LOAD_LINE(tempBlurred, 2);
868 LOAD_LINE(tempBlurred, 3);
869 LOAD_LINE(tempBlurred, 4);
870 LOAD_LINE(tempBlurred, 5);
871 LOAD_LINE(tempBlurred, 6);
872 LOAD_LINE(tempBlurred, 7);
873 #undef LOAD_LINE
874
875 #define ACCUMULATE_DIFFS(i) \
876 vector signed short v_d##i = vec_sub(v_tempBlurredAss##i, \
877 v_srcAss##i); \
878 v_dp = vec_msums(v_d##i, v_d##i, v_dp); \
879 v_sysdp = vec_msums(v_d##i, vsint16_1, v_sysdp)
880
881 ACCUMULATE_DIFFS(0);
882 ACCUMULATE_DIFFS(1);
883 ACCUMULATE_DIFFS(2);
884 ACCUMULATE_DIFFS(3);
885 ACCUMULATE_DIFFS(4);
886 ACCUMULATE_DIFFS(5);
887 ACCUMULATE_DIFFS(6);
888 ACCUMULATE_DIFFS(7);
889 #undef ACCUMULATE_DIFFS
890
891 v_dp = vec_sums(v_dp, zero);
892 v_sysdp = vec_sums(v_sysdp, zero);
893
894 v_dp = vec_splat(v_dp, 3);
895 v_sysdp = vec_splat(v_sysdp, 3);
896
897 vec_ste(v_dp, 0, &d);
898 vec_ste(v_sysdp, 0, &sysd);
899
900 i = d;
901 d = (4*d
902 +(*(tempBlurredPast-256))
903 +(*(tempBlurredPast-1))+ (*(tempBlurredPast+1))
904 +(*(tempBlurredPast+256))
905 +4)>>3;
906
907 *tempBlurredPast=i;
908
909 if (d > maxNoise[1]) {
910 if (d < maxNoise[2]) {
911 #define OP(i) v_tempBlurredAss##i = vec_avg(v_tempBlurredAss##i, v_srcAss##i);
912
913 OP(0);
914 OP(1);
915 OP(2);
916 OP(3);
917 OP(4);
918 OP(5);
919 OP(6);
920 OP(7);
921 #undef OP
922 } else {
923 #define OP(i) v_tempBlurredAss##i = v_srcAss##i;
924
925 OP(0);
926 OP(1);
927 OP(2);
928 OP(3);
929 OP(4);
930 OP(5);
931 OP(6);
932 OP(7);
933 #undef OP
934 }
935 } else {
936 if (d < maxNoise[0]) {
937 const vector signed short vsint16_7 = vec_splat_s16(7);
938 const vector signed short vsint16_4 = vec_splat_s16(4);
939 const vector unsigned short vuint16_3 = vec_splat_u16(3);
940
941 #define OP(i) \
942 const vector signed short v_temp##i = \
943 vec_mladd(v_tempBlurredAss##i, \
944 vsint16_7, v_srcAss##i); \
945 const vector signed short v_temp2##i = \
946 vec_add(v_temp##i, vsint16_4); \
947 v_tempBlurredAss##i = vec_sr(v_temp2##i, vuint16_3)
948
949 OP(0);
950 OP(1);
951 OP(2);
952 OP(3);
953 OP(4);
954 OP(5);
955 OP(6);
956 OP(7);
957 #undef OP
958 } else {
959 const vector signed short vsint16_3 = vec_splat_s16(3);
960 const vector signed short vsint16_2 = vec_splat_s16(2);
961
962 #define OP(i) \
963 const vector signed short v_temp##i = \
964 vec_mladd(v_tempBlurredAss##i, \
965 vsint16_3, v_srcAss##i); \
966 const vector signed short v_temp2##i = \
967 vec_add(v_temp##i, vsint16_2); \
968 v_tempBlurredAss##i = vec_sr(v_temp2##i, (vector unsigned short)vsint16_2)
969
970 OP(0);
971 OP(1);
972 OP(2);
973 OP(3);
974 OP(4);
975 OP(5);
976 OP(6);
977 OP(7);
978 #undef OP
979 }
980 }
981
982 const vector signed char neg1 = vec_splat_s8(-1);
983 const vector unsigned char permHH = (const vector unsigned char){0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
984 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F};
985
986 #define PACK_AND_STORE(src, i) \
987 const vector unsigned char perms##src##i = \
988 vec_lvsr(i * stride, src); \
989 const vector unsigned char vf##src##i = \
990 vec_packsu(v_tempBlurredAss##i, (vector signed short)zero); \
991 const vector unsigned char vg##src##i = \
992 vec_perm(vf##src##i, v_##src##A##i, permHH); \
993 const vector unsigned char mask##src##i = \
994 vec_perm((vector unsigned char)zero, (vector unsigned char)neg1, perms##src##i); \
995 const vector unsigned char vg2##src##i = \
996 vec_perm(vg##src##i, vg##src##i, perms##src##i); \
997 const vector unsigned char svA##src##i = \
998 vec_sel(v_##src##A1##i, vg2##src##i, mask##src##i); \
999 const vector unsigned char svB##src##i = \
1000 vec_sel(vg2##src##i, v_##src##A2##i, mask##src##i); \
1001 vec_st(svA##src##i, i * stride, src); \
1002 vec_st(svB##src##i, i * stride + 16, src)
1003
1004 PACK_AND_STORE(src, 0);
1005 PACK_AND_STORE(src, 1);
1006 PACK_AND_STORE(src, 2);
1007 PACK_AND_STORE(src, 3);
1008 PACK_AND_STORE(src, 4);
1009 PACK_AND_STORE(src, 5);
1010 PACK_AND_STORE(src, 6);
1011 PACK_AND_STORE(src, 7);
1012 PACK_AND_STORE(tempBlurred, 0);
1013 PACK_AND_STORE(tempBlurred, 1);
1014 PACK_AND_STORE(tempBlurred, 2);
1015 PACK_AND_STORE(tempBlurred, 3);
1016 PACK_AND_STORE(tempBlurred, 4);
1017 PACK_AND_STORE(tempBlurred, 5);
1018 PACK_AND_STORE(tempBlurred, 6);
1019 PACK_AND_STORE(tempBlurred, 7);
1020 #undef PACK_AND_STORE
1021 }
1022
1023 static inline void transpose_16x8_char_toPackedAlign_altivec(unsigned char* dst, unsigned char* src, int stride) {
1024 const vector unsigned char zero = vec_splat_u8(0);
1025
1026 #define LOAD_DOUBLE_LINE(i, j) \
1027 vector unsigned char perm1##i = vec_lvsl(i * stride, src); \
1028 vector unsigned char perm2##i = vec_lvsl(j * stride, src); \
1029 vector unsigned char srcA##i = vec_ld(i * stride, src); \
1030 vector unsigned char srcB##i = vec_ld(i * stride + 16, src); \
1031 vector unsigned char srcC##i = vec_ld(j * stride, src); \
1032 vector unsigned char srcD##i = vec_ld(j * stride+ 16, src); \
1033 vector unsigned char src##i = vec_perm(srcA##i, srcB##i, perm1##i); \
1034 vector unsigned char src##j = vec_perm(srcC##i, srcD##i, perm2##i)
1035
1036 LOAD_DOUBLE_LINE(0, 1);
1037 LOAD_DOUBLE_LINE(2, 3);
1038 LOAD_DOUBLE_LINE(4, 5);
1039 LOAD_DOUBLE_LINE(6, 7);
1040 #undef LOAD_DOUBLE_LINE
1041
1042 vector unsigned char tempA = vec_mergeh(src0, zero);
1043 vector unsigned char tempB = vec_mergel(src0, zero);
1044 vector unsigned char tempC = vec_mergeh(src1, zero);
1045 vector unsigned char tempD = vec_mergel(src1, zero);
1046 vector unsigned char tempE = vec_mergeh(src2, zero);
1047 vector unsigned char tempF = vec_mergel(src2, zero);
1048 vector unsigned char tempG = vec_mergeh(src3, zero);
1049 vector unsigned char tempH = vec_mergel(src3, zero);
1050 vector unsigned char tempI = vec_mergeh(src4, zero);
1051 vector unsigned char tempJ = vec_mergel(src4, zero);
1052 vector unsigned char tempK = vec_mergeh(src5, zero);
1053 vector unsigned char tempL = vec_mergel(src5, zero);
1054 vector unsigned char tempM = vec_mergeh(src6, zero);
1055 vector unsigned char tempN = vec_mergel(src6, zero);
1056 vector unsigned char tempO = vec_mergeh(src7, zero);
1057 vector unsigned char tempP = vec_mergel(src7, zero);
1058
1059 vector unsigned char temp0 = vec_mergeh(tempA, tempI);
1060 vector unsigned char temp1 = vec_mergel(tempA, tempI);
1061 vector unsigned char temp2 = vec_mergeh(tempB, tempJ);
1062 vector unsigned char temp3 = vec_mergel(tempB, tempJ);
1063 vector unsigned char temp4 = vec_mergeh(tempC, tempK);
1064 vector unsigned char temp5 = vec_mergel(tempC, tempK);
1065 vector unsigned char temp6 = vec_mergeh(tempD, tempL);
1066 vector unsigned char temp7 = vec_mergel(tempD, tempL);
1067 vector unsigned char temp8 = vec_mergeh(tempE, tempM);
1068 vector unsigned char temp9 = vec_mergel(tempE, tempM);
1069 vector unsigned char temp10 = vec_mergeh(tempF, tempN);
1070 vector unsigned char temp11 = vec_mergel(tempF, tempN);
1071 vector unsigned char temp12 = vec_mergeh(tempG, tempO);
1072 vector unsigned char temp13 = vec_mergel(tempG, tempO);
1073 vector unsigned char temp14 = vec_mergeh(tempH, tempP);
1074 vector unsigned char temp15 = vec_mergel(tempH, tempP);
1075
1076 tempA = vec_mergeh(temp0, temp8);
1077 tempB = vec_mergel(temp0, temp8);
1078 tempC = vec_mergeh(temp1, temp9);
1079 tempD = vec_mergel(temp1, temp9);
1080 tempE = vec_mergeh(temp2, temp10);
1081 tempF = vec_mergel(temp2, temp10);
1082 tempG = vec_mergeh(temp3, temp11);
1083 tempH = vec_mergel(temp3, temp11);
1084 tempI = vec_mergeh(temp4, temp12);
1085 tempJ = vec_mergel(temp4, temp12);
1086 tempK = vec_mergeh(temp5, temp13);
1087 tempL = vec_mergel(temp5, temp13);
1088 tempM = vec_mergeh(temp6, temp14);
1089 tempN = vec_mergel(temp6, temp14);
1090 tempO = vec_mergeh(temp7, temp15);
1091 tempP = vec_mergel(temp7, temp15);
1092
1093 temp0 = vec_mergeh(tempA, tempI);
1094 temp1 = vec_mergel(tempA, tempI);
1095 temp2 = vec_mergeh(tempB, tempJ);
1096 temp3 = vec_mergel(tempB, tempJ);
1097 temp4 = vec_mergeh(tempC, tempK);
1098 temp5 = vec_mergel(tempC, tempK);
1099 temp6 = vec_mergeh(tempD, tempL);
1100 temp7 = vec_mergel(tempD, tempL);
1101 temp8 = vec_mergeh(tempE, tempM);
1102 temp9 = vec_mergel(tempE, tempM);
1103 temp10 = vec_mergeh(tempF, tempN);
1104 temp11 = vec_mergel(tempF, tempN);
1105 temp12 = vec_mergeh(tempG, tempO);
1106 temp13 = vec_mergel(tempG, tempO);
1107 temp14 = vec_mergeh(tempH, tempP);
1108 temp15 = vec_mergel(tempH, tempP);
1109
1110 vec_st(temp0, 0, dst);
1111 vec_st(temp1, 16, dst);
1112 vec_st(temp2, 32, dst);
1113 vec_st(temp3, 48, dst);
1114 vec_st(temp4, 64, dst);
1115 vec_st(temp5, 80, dst);
1116 vec_st(temp6, 96, dst);
1117 vec_st(temp7, 112, dst);
1118 vec_st(temp8, 128, dst);
1119 vec_st(temp9, 144, dst);
1120 vec_st(temp10, 160, dst);
1121 vec_st(temp11, 176, dst);
1122 vec_st(temp12, 192, dst);
1123 vec_st(temp13, 208, dst);
1124 vec_st(temp14, 224, dst);
1125 vec_st(temp15, 240, dst);
1126 }
1127
1128 static inline void transpose_8x16_char_fromPackedAlign_altivec(unsigned char* dst, unsigned char* src, int stride) {
1129 const vector unsigned char zero = vec_splat_u8(0);
1130
1131 #define LOAD_DOUBLE_LINE(i, j) \
1132 vector unsigned char src##i = vec_ld(i * 16, src); \
1133 vector unsigned char src##j = vec_ld(j * 16, src)
1134
1135 LOAD_DOUBLE_LINE(0, 1);
1136 LOAD_DOUBLE_LINE(2, 3);
1137 LOAD_DOUBLE_LINE(4, 5);
1138 LOAD_DOUBLE_LINE(6, 7);
1139 LOAD_DOUBLE_LINE(8, 9);
1140 LOAD_DOUBLE_LINE(10, 11);
1141 LOAD_DOUBLE_LINE(12, 13);
1142 LOAD_DOUBLE_LINE(14, 15);
1143 #undef LOAD_DOUBLE_LINE
1144
1145 vector unsigned char tempA = vec_mergeh(src0, src8);
1146 vector unsigned char tempB;
1147 vector unsigned char tempC = vec_mergeh(src1, src9);
1148 vector unsigned char tempD;
1149 vector unsigned char tempE = vec_mergeh(src2, src10);
1150 vector unsigned char tempG = vec_mergeh(src3, src11);
1151 vector unsigned char tempI = vec_mergeh(src4, src12);
1152 vector unsigned char tempJ;
1153 vector unsigned char tempK = vec_mergeh(src5, src13);
1154 vector unsigned char tempL;
1155 vector unsigned char tempM = vec_mergeh(src6, src14);
1156 vector unsigned char tempO = vec_mergeh(src7, src15);
1157
1158 vector unsigned char temp0 = vec_mergeh(tempA, tempI);
1159 vector unsigned char temp1 = vec_mergel(tempA, tempI);
1160 vector unsigned char temp2;
1161 vector unsigned char temp3;
1162 vector unsigned char temp4 = vec_mergeh(tempC, tempK);
1163 vector unsigned char temp5 = vec_mergel(tempC, tempK);
1164 vector unsigned char temp6;
1165 vector unsigned char temp7;
1166 vector unsigned char temp8 = vec_mergeh(tempE, tempM);
1167 vector unsigned char temp9 = vec_mergel(tempE, tempM);
1168 vector unsigned char temp12 = vec_mergeh(tempG, tempO);
1169 vector unsigned char temp13 = vec_mergel(tempG, tempO);
1170
1171 tempA = vec_mergeh(temp0, temp8);
1172 tempB = vec_mergel(temp0, temp8);
1173 tempC = vec_mergeh(temp1, temp9);
1174 tempD = vec_mergel(temp1, temp9);
1175 tempI = vec_mergeh(temp4, temp12);
1176 tempJ = vec_mergel(temp4, temp12);
1177 tempK = vec_mergeh(temp5, temp13);
1178 tempL = vec_mergel(temp5, temp13);
1179
1180 temp0 = vec_mergeh(tempA, tempI);
1181 temp1 = vec_mergel(tempA, tempI);
1182 temp2 = vec_mergeh(tempB, tempJ);
1183 temp3 = vec_mergel(tempB, tempJ);
1184 temp4 = vec_mergeh(tempC, tempK);
1185 temp5 = vec_mergel(tempC, tempK);
1186 temp6 = vec_mergeh(tempD, tempL);
1187 temp7 = vec_mergel(tempD, tempL);
1188
1189
1190 const vector signed char neg1 = vec_splat_s8(-1);
1191 #define STORE_DOUBLE_LINE(i, j) \
1192 vector unsigned char dstA##i = vec_ld(i * stride, dst); \
1193 vector unsigned char dstB##i = vec_ld(i * stride + 16, dst); \
1194 vector unsigned char dstA##j = vec_ld(j * stride, dst); \
1195 vector unsigned char dstB##j = vec_ld(j * stride+ 16, dst); \
1196 vector unsigned char align##i = vec_lvsr(i * stride, dst); \
1197 vector unsigned char align##j = vec_lvsr(j * stride, dst); \
1198 vector unsigned char mask##i = vec_perm(zero, (vector unsigned char)neg1, align##i); \
1199 vector unsigned char mask##j = vec_perm(zero, (vector unsigned char)neg1, align##j); \
1200 vector unsigned char dstR##i = vec_perm(temp##i, temp##i, align##i);\
1201 vector unsigned char dstR##j = vec_perm(temp##j, temp##j, align##j);\
1202 vector unsigned char dstAF##i = vec_sel(dstA##i, dstR##i, mask##i); \
1203 vector unsigned char dstBF##i = vec_sel(dstR##i, dstB##i, mask##i); \
1204 vector unsigned char dstAF##j = vec_sel(dstA##j, dstR##j, mask##j); \
1205 vector unsigned char dstBF##j = vec_sel(dstR##j, dstB##j, mask##j); \
1206 vec_st(dstAF##i, i * stride, dst); \
1207 vec_st(dstBF##i, i * stride + 16, dst); \
1208 vec_st(dstAF##j, j * stride, dst); \
1209 vec_st(dstBF##j, j * stride + 16, dst)
1210
1211 STORE_DOUBLE_LINE(0,1);
1212 STORE_DOUBLE_LINE(2,3);
1213 STORE_DOUBLE_LINE(4,5);
1214 STORE_DOUBLE_LINE(6,7);
1215 }