Disable usage of ff_h264_idct_add_altivec since AltiVec versions of h264_idct_add16,
[libav.git] / libavcodec / ppc / h264_altivec.c
CommitLineData
a6a12a8a
RD
1/*
2 * Copyright (c) 2004 Romain Dolbeau <romain@dolbeau.org>
3 *
b78e7197
DB
4 * This file is part of FFmpeg.
5 *
6 * FFmpeg is free software; you can redistribute it and/or
a6a12a8a
RD
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
b78e7197 9 * version 2.1 of the License, or (at your option) any later version.
a6a12a8a 10 *
b78e7197 11 * FFmpeg is distributed in the hope that it will be useful,
a6a12a8a
RD
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
b78e7197 17 * License along with FFmpeg; if not, write to the Free Software
5509bffa 18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
a6a12a8a 19 */
115329f1 20
245976da 21#include "libavcodec/dsputil.h"
337e3fd9 22#include "libavcodec/h264data.h"
a6a12a8a
RD
23
24#include "gcc_fixes.h"
25
89523bee 26#include "dsputil_ppc.h"
3035cb67 27#include "dsputil_altivec.h"
89523bee 28#include "util_altivec.h"
3813dcc9 29#include "types_altivec.h"
a6a12a8a
RD
30
31#define PUT_OP_U8_ALTIVEC(d, s, dst) d = s
32#define AVG_OP_U8_ALTIVEC(d, s, dst) d = vec_avg(dst, s)
33
34#define OP_U8_ALTIVEC PUT_OP_U8_ALTIVEC
35#define PREFIX_h264_chroma_mc8_altivec put_h264_chroma_mc8_altivec
36#define PREFIX_h264_chroma_mc8_num altivec_put_h264_chroma_mc8_num
37#define PREFIX_h264_qpel16_h_lowpass_altivec put_h264_qpel16_h_lowpass_altivec
38#define PREFIX_h264_qpel16_h_lowpass_num altivec_put_h264_qpel16_h_lowpass_num
39#define PREFIX_h264_qpel16_v_lowpass_altivec put_h264_qpel16_v_lowpass_altivec
40#define PREFIX_h264_qpel16_v_lowpass_num altivec_put_h264_qpel16_v_lowpass_num
41#define PREFIX_h264_qpel16_hv_lowpass_altivec put_h264_qpel16_hv_lowpass_altivec
42#define PREFIX_h264_qpel16_hv_lowpass_num altivec_put_h264_qpel16_hv_lowpass_num
b5f7e6eb 43#include "h264_template_altivec.c"
a6a12a8a
RD
44#undef OP_U8_ALTIVEC
45#undef PREFIX_h264_chroma_mc8_altivec
46#undef PREFIX_h264_chroma_mc8_num
47#undef PREFIX_h264_qpel16_h_lowpass_altivec
48#undef PREFIX_h264_qpel16_h_lowpass_num
49#undef PREFIX_h264_qpel16_v_lowpass_altivec
50#undef PREFIX_h264_qpel16_v_lowpass_num
51#undef PREFIX_h264_qpel16_hv_lowpass_altivec
52#undef PREFIX_h264_qpel16_hv_lowpass_num
53
54#define OP_U8_ALTIVEC AVG_OP_U8_ALTIVEC
55#define PREFIX_h264_chroma_mc8_altivec avg_h264_chroma_mc8_altivec
56#define PREFIX_h264_chroma_mc8_num altivec_avg_h264_chroma_mc8_num
57#define PREFIX_h264_qpel16_h_lowpass_altivec avg_h264_qpel16_h_lowpass_altivec
58#define PREFIX_h264_qpel16_h_lowpass_num altivec_avg_h264_qpel16_h_lowpass_num
59#define PREFIX_h264_qpel16_v_lowpass_altivec avg_h264_qpel16_v_lowpass_altivec
60#define PREFIX_h264_qpel16_v_lowpass_num altivec_avg_h264_qpel16_v_lowpass_num
61#define PREFIX_h264_qpel16_hv_lowpass_altivec avg_h264_qpel16_hv_lowpass_altivec
62#define PREFIX_h264_qpel16_hv_lowpass_num altivec_avg_h264_qpel16_hv_lowpass_num
b5f7e6eb 63#include "h264_template_altivec.c"
a6a12a8a
RD
64#undef OP_U8_ALTIVEC
65#undef PREFIX_h264_chroma_mc8_altivec
66#undef PREFIX_h264_chroma_mc8_num
67#undef PREFIX_h264_qpel16_h_lowpass_altivec
68#undef PREFIX_h264_qpel16_h_lowpass_num
69#undef PREFIX_h264_qpel16_v_lowpass_altivec
70#undef PREFIX_h264_qpel16_v_lowpass_num
71#undef PREFIX_h264_qpel16_hv_lowpass_altivec
72#undef PREFIX_h264_qpel16_hv_lowpass_num
73
74#define H264_MC(OPNAME, SIZE, CODETYPE) \
75static void OPNAME ## h264_qpel ## SIZE ## _mc00_ ## CODETYPE (uint8_t *dst, uint8_t *src, int stride){\
76 OPNAME ## pixels ## SIZE ## _ ## CODETYPE(dst, src, stride, SIZE);\
77}\
78\
79static void OPNAME ## h264_qpel ## SIZE ## _mc10_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){ \
8047fe72 80 DECLARE_ALIGNED_16(uint8_t, half[SIZE*SIZE]);\
a6a12a8a
RD
81 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
82 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src, half, stride, stride, SIZE);\
83}\
84\
85static void OPNAME ## h264_qpel ## SIZE ## _mc20_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
86 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(dst, src, stride, stride);\
87}\
88\
89static void OPNAME ## h264_qpel ## SIZE ## _mc30_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
8047fe72 90 DECLARE_ALIGNED_16(uint8_t, half[SIZE*SIZE]);\
a6a12a8a
RD
91 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
92 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src+1, half, stride, stride, SIZE);\
93}\
94\
95static void OPNAME ## h264_qpel ## SIZE ## _mc01_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
8047fe72 96 DECLARE_ALIGNED_16(uint8_t, half[SIZE*SIZE]);\
a6a12a8a
RD
97 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
98 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src, half, stride, stride, SIZE);\
99}\
100\
101static void OPNAME ## h264_qpel ## SIZE ## _mc02_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
102 OPNAME ## h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(dst, src, stride, stride);\
103}\
104\
105static void OPNAME ## h264_qpel ## SIZE ## _mc03_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
8047fe72 106 DECLARE_ALIGNED_16(uint8_t, half[SIZE*SIZE]);\
a6a12a8a
RD
107 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
108 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src+stride, half, stride, stride, SIZE);\
109}\
110\
111static void OPNAME ## h264_qpel ## SIZE ## _mc11_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
8047fe72
LB
112 DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
113 DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
a6a12a8a
RD
114 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
115 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
116 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
117}\
118\
119static void OPNAME ## h264_qpel ## SIZE ## _mc31_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
8047fe72
LB
120 DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
121 DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
a6a12a8a
RD
122 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
123 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
124 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
125}\
126\
127static void OPNAME ## h264_qpel ## SIZE ## _mc13_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
8047fe72
LB
128 DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
129 DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
a6a12a8a
RD
130 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
131 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
132 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
133}\
134\
135static void OPNAME ## h264_qpel ## SIZE ## _mc33_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
8047fe72
LB
136 DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
137 DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
a6a12a8a
RD
138 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
139 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
140 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
141}\
142\
143static void OPNAME ## h264_qpel ## SIZE ## _mc22_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
8047fe72 144 DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\
a6a12a8a
RD
145 OPNAME ## h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(dst, tmp, src, stride, SIZE, stride);\
146}\
147\
148static void OPNAME ## h264_qpel ## SIZE ## _mc21_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
8047fe72
LB
149 DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
150 DECLARE_ALIGNED_16(uint8_t, halfHV[SIZE*SIZE]);\
151 DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\
a6a12a8a
RD
152 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
153 put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
154 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfHV, stride, SIZE, SIZE);\
155}\
156\
157static void OPNAME ## h264_qpel ## SIZE ## _mc23_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
8047fe72
LB
158 DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
159 DECLARE_ALIGNED_16(uint8_t, halfHV[SIZE*SIZE]);\
160 DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\
a6a12a8a
RD
161 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
162 put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
163 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfHV, stride, SIZE, SIZE);\
164}\
165\
166static void OPNAME ## h264_qpel ## SIZE ## _mc12_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
8047fe72
LB
167 DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
168 DECLARE_ALIGNED_16(uint8_t, halfHV[SIZE*SIZE]);\
169 DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\
a6a12a8a
RD
170 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
171 put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
172 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfV, halfHV, stride, SIZE, SIZE);\
173}\
174\
175static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
8047fe72
LB
176 DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
177 DECLARE_ALIGNED_16(uint8_t, halfHV[SIZE*SIZE]);\
178 DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\
a6a12a8a
RD
179 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
180 put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
181 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfV, halfHV, stride, SIZE, SIZE);\
182}\
183
60aae27a
KS
184/* this code assume that stride % 16 == 0 */
185void put_no_rnd_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, int stride, int h, int x, int y) {
88bcb6c2 186 DECLARE_ALIGNED_16(signed int, ABCD[4]) =
60aae27a 187 {((8 - x) * (8 - y)),
830bf1f2
DB
188 ((x) * (8 - y)),
189 ((8 - x) * (y)),
190 ((x) * (y))};
60aae27a 191 register int i;
3ca96802
GP
192 vec_u8_t fperm;
193 const vec_s32_t vABCD = vec_ld(0, ABCD);
194 const vec_s16_t vA = vec_splat((vec_s16_t)vABCD, 1);
195 const vec_s16_t vB = vec_splat((vec_s16_t)vABCD, 3);
196 const vec_s16_t vC = vec_splat((vec_s16_t)vABCD, 5);
197 const vec_s16_t vD = vec_splat((vec_s16_t)vABCD, 7);
198 LOAD_ZERO;
199 const vec_s16_t v28ss = vec_sub(vec_sl(vec_splat_s16(1),vec_splat_u16(5)),vec_splat_s16(4));
e3905ce0 200 const vec_u16_t v6us = vec_splat_u16(6);
830bf1f2 201 register int loadSecond = (((unsigned long)src) % 16) <= 7 ? 0 : 1;
60aae27a
KS
202 register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0;
203
3ca96802
GP
204 vec_u8_t vsrcAuc, vsrcBuc, vsrcperm0, vsrcperm1;
205 vec_u8_t vsrc0uc, vsrc1uc;
206 vec_s16_t vsrc0ssH, vsrc1ssH;
207 vec_u8_t vsrcCuc, vsrc2uc, vsrc3uc;
208 vec_s16_t vsrc2ssH, vsrc3ssH, psum;
209 vec_u8_t vdst, ppsum, fsum;
60aae27a
KS
210
211 if (((unsigned long)dst) % 16 == 0) {
80a61f08 212 fperm = (vec_u8_t){0x10, 0x11, 0x12, 0x13,
309005c0
DB
213 0x14, 0x15, 0x16, 0x17,
214 0x08, 0x09, 0x0A, 0x0B,
80a61f08 215 0x0C, 0x0D, 0x0E, 0x0F};
60aae27a 216 } else {
80a61f08 217 fperm = (vec_u8_t){0x00, 0x01, 0x02, 0x03,
309005c0
DB
218 0x04, 0x05, 0x06, 0x07,
219 0x18, 0x19, 0x1A, 0x1B,
80a61f08 220 0x1C, 0x1D, 0x1E, 0x1F};
60aae27a
KS
221 }
222
223 vsrcAuc = vec_ld(0, src);
224
225 if (loadSecond)
830bf1f2 226 vsrcBuc = vec_ld(16, src);
60aae27a
KS
227 vsrcperm0 = vec_lvsl(0, src);
228 vsrcperm1 = vec_lvsl(1, src);
229
230 vsrc0uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm0);
231 if (reallyBadAlign)
830bf1f2 232 vsrc1uc = vsrcBuc;
60aae27a 233 else
830bf1f2 234 vsrc1uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm1);
60aae27a 235
3ca96802
GP
236 vsrc0ssH = (vec_s16_t)vec_mergeh(zero_u8v, (vec_u8_t)vsrc0uc);
237 vsrc1ssH = (vec_s16_t)vec_mergeh(zero_u8v, (vec_u8_t)vsrc1uc);
60aae27a
KS
238
239 if (!loadSecond) {// -> !reallyBadAlign
830bf1f2 240 for (i = 0 ; i < h ; i++) {
60aae27a
KS
241
242
830bf1f2 243 vsrcCuc = vec_ld(stride + 0, src);
60aae27a 244
830bf1f2
DB
245 vsrc2uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
246 vsrc3uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1);
60aae27a 247
830bf1f2
DB
248 vsrc2ssH = (vec_s16_t)vec_mergeh(zero_u8v, (vec_u8_t)vsrc2uc);
249 vsrc3ssH = (vec_s16_t)vec_mergeh(zero_u8v, (vec_u8_t)vsrc3uc);
60aae27a 250
830bf1f2
DB
251 psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0));
252 psum = vec_mladd(vB, vsrc1ssH, psum);
253 psum = vec_mladd(vC, vsrc2ssH, psum);
254 psum = vec_mladd(vD, vsrc3ssH, psum);
255 psum = vec_add(v28ss, psum);
256 psum = vec_sra(psum, v6us);
60aae27a 257
830bf1f2
DB
258 vdst = vec_ld(0, dst);
259 ppsum = (vec_u8_t)vec_packsu(psum, psum);
260 fsum = vec_perm(vdst, ppsum, fperm);
60aae27a 261
830bf1f2 262 vec_st(fsum, 0, dst);
60aae27a 263
830bf1f2
DB
264 vsrc0ssH = vsrc2ssH;
265 vsrc1ssH = vsrc3ssH;
60aae27a 266
830bf1f2
DB
267 dst += stride;
268 src += stride;
269 }
60aae27a 270 } else {
3ca96802 271 vec_u8_t vsrcDuc;
830bf1f2
DB
272 for (i = 0 ; i < h ; i++) {
273 vsrcCuc = vec_ld(stride + 0, src);
274 vsrcDuc = vec_ld(stride + 16, src);
275
276 vsrc2uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
277 if (reallyBadAlign)
278 vsrc3uc = vsrcDuc;
279 else
280 vsrc3uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1);
281
282 vsrc2ssH = (vec_s16_t)vec_mergeh(zero_u8v, (vec_u8_t)vsrc2uc);
283 vsrc3ssH = (vec_s16_t)vec_mergeh(zero_u8v, (vec_u8_t)vsrc3uc);
284
285 psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0));
286 psum = vec_mladd(vB, vsrc1ssH, psum);
287 psum = vec_mladd(vC, vsrc2ssH, psum);
288 psum = vec_mladd(vD, vsrc3ssH, psum);
289 psum = vec_add(v28ss, psum);
290 psum = vec_sr(psum, v6us);
291
292 vdst = vec_ld(0, dst);
293 ppsum = (vec_u8_t)vec_pack(psum, psum);
294 fsum = vec_perm(vdst, ppsum, fperm);
295
296 vec_st(fsum, 0, dst);
297
298 vsrc0ssH = vsrc2ssH;
299 vsrc1ssH = vsrc3ssH;
300
301 dst += stride;
302 src += stride;
303 }
60aae27a
KS
304 }
305}
306
0d18f798
LB
307static inline void put_pixels16_l2_altivec( uint8_t * dst, const uint8_t * src1,
308 const uint8_t * src2, int dst_stride,
309 int src_stride1, int h)
310{
311 int i;
3ca96802 312 vec_u8_t a, b, d, tmp1, tmp2, mask, mask_, edges, align;
a6a12a8a 313
0d18f798
LB
314 mask_ = vec_lvsl(0, src2);
315
316 for (i = 0; i < h; i++) {
317
318 tmp1 = vec_ld(i * src_stride1, src1);
319 mask = vec_lvsl(i * src_stride1, src1);
320 tmp2 = vec_ld(i * src_stride1 + 15, src1);
321
322 a = vec_perm(tmp1, tmp2, mask);
323
324 tmp1 = vec_ld(i * 16, src2);
325 tmp2 = vec_ld(i * 16 + 15, src2);
326
327 b = vec_perm(tmp1, tmp2, mask_);
328
329 tmp1 = vec_ld(0, dst);
330 mask = vec_lvsl(0, dst);
331 tmp2 = vec_ld(15, dst);
332
333 d = vec_avg(a, b);
334
335 edges = vec_perm(tmp2, tmp1, mask);
336
337 align = vec_lvsr(0, dst);
338
7e821457 339 tmp2 = vec_perm(d, edges, align);
27303c8a 340 tmp1 = vec_perm(edges, d, align);
0d18f798 341
cb243ea2 342 vec_st(tmp2, 15, dst);
27303c8a 343 vec_st(tmp1, 0 , dst);
0d18f798
LB
344
345 dst += dst_stride;
346 }
347}
348
349static inline void avg_pixels16_l2_altivec( uint8_t * dst, const uint8_t * src1,
350 const uint8_t * src2, int dst_stride,
351 int src_stride1, int h)
352{
353 int i;
3ca96802 354 vec_u8_t a, b, d, tmp1, tmp2, mask, mask_, edges, align;
0d18f798
LB
355
356 mask_ = vec_lvsl(0, src2);
357
358 for (i = 0; i < h; i++) {
359
360 tmp1 = vec_ld(i * src_stride1, src1);
361 mask = vec_lvsl(i * src_stride1, src1);
362 tmp2 = vec_ld(i * src_stride1 + 15, src1);
363
364 a = vec_perm(tmp1, tmp2, mask);
365
366 tmp1 = vec_ld(i * 16, src2);
367 tmp2 = vec_ld(i * 16 + 15, src2);
368
369 b = vec_perm(tmp1, tmp2, mask_);
370
371 tmp1 = vec_ld(0, dst);
372 mask = vec_lvsl(0, dst);
373 tmp2 = vec_ld(15, dst);
374
375 d = vec_avg(vec_perm(tmp1, tmp2, mask), vec_avg(a, b));
376
377 edges = vec_perm(tmp2, tmp1, mask);
378
379 align = vec_lvsr(0, dst);
380
7e821457 381 tmp2 = vec_perm(d, edges, align);
27303c8a 382 tmp1 = vec_perm(edges, d, align);
0d18f798 383
cb243ea2 384 vec_st(tmp2, 15, dst);
27303c8a 385 vec_st(tmp1, 0 , dst);
0d18f798
LB
386
387 dst += dst_stride;
388 }
a6a12a8a
RD
389}
390
0d18f798 391/* Implemented but could be faster
a6a12a8a
RD
392#define put_pixels16_l2_altivec(d,s1,s2,ds,s1s,h) put_pixels16_l2(d,s1,s2,ds,s1s,16,h)
393#define avg_pixels16_l2_altivec(d,s1,s2,ds,s1s,h) avg_pixels16_l2(d,s1,s2,ds,s1s,16,h)
0d18f798 394 */
a6a12a8a 395
e3905ce0
DB
396H264_MC(put_, 16, altivec)
397H264_MC(avg_, 16, altivec)
a6a12a8a 398
3813dcc9
GP
399
400/****************************************************************************
401 * IDCT transform:
402 ****************************************************************************/
403
830bf1f2
DB
404#define VEC_1D_DCT(vb0,vb1,vb2,vb3,va0,va1,va2,va3) \
405 /* 1st stage */ \
406 vz0 = vec_add(vb0,vb2); /* temp[0] = Y[0] + Y[2] */ \
407 vz1 = vec_sub(vb0,vb2); /* temp[1] = Y[0] - Y[2] */ \
408 vz2 = vec_sra(vb1,vec_splat_u16(1)); \
409 vz2 = vec_sub(vz2,vb3); /* temp[2] = Y[1].1/2 - Y[3] */ \
410 vz3 = vec_sra(vb3,vec_splat_u16(1)); \
411 vz3 = vec_add(vb1,vz3); /* temp[3] = Y[1] + Y[3].1/2 */ \
412 /* 2nd stage: output */ \
413 va0 = vec_add(vz0,vz3); /* x[0] = temp[0] + temp[3] */ \
414 va1 = vec_add(vz1,vz2); /* x[1] = temp[1] + temp[2] */ \
415 va2 = vec_sub(vz1,vz2); /* x[2] = temp[1] - temp[2] */ \
416 va3 = vec_sub(vz0,vz3) /* x[3] = temp[0] - temp[3] */
5dda2539
LB
417
418#define VEC_TRANSPOSE_4(a0,a1,a2,a3,b0,b1,b2,b3) \
419 b0 = vec_mergeh( a0, a0 ); \
420 b1 = vec_mergeh( a1, a0 ); \
421 b2 = vec_mergeh( a2, a0 ); \
422 b3 = vec_mergeh( a3, a0 ); \
423 a0 = vec_mergeh( b0, b2 ); \
424 a1 = vec_mergel( b0, b2 ); \
425 a2 = vec_mergeh( b1, b3 ); \
426 a3 = vec_mergel( b1, b3 ); \
427 b0 = vec_mergeh( a0, a2 ); \
428 b1 = vec_mergel( a0, a2 ); \
429 b2 = vec_mergeh( a1, a3 ); \
430 b3 = vec_mergel( a1, a3 )
431
432#define VEC_LOAD_U8_ADD_S16_STORE_U8(va) \
433 vdst_orig = vec_ld(0, dst); \
434 vdst = vec_perm(vdst_orig, zero_u8v, vdst_mask); \
435 vdst_ss = (vec_s16_t) vec_mergeh(zero_u8v, vdst); \
436 va = vec_add(va, vdst_ss); \
437 va_u8 = vec_packsu(va, zero_s16v); \
438 va_u32 = vec_splat((vec_u32_t)va_u8, 0); \
439 vec_ste(va_u32, element, (uint32_t*)dst);
440
441static void ff_h264_idct_add_altivec(uint8_t *dst, DCTELEM *block, int stride)
442{
443 vec_s16_t va0, va1, va2, va3;
444 vec_s16_t vz0, vz1, vz2, vz3;
445 vec_s16_t vtmp0, vtmp1, vtmp2, vtmp3;
446 vec_u8_t va_u8;
447 vec_u32_t va_u32;
448 vec_s16_t vdst_ss;
449 const vec_u16_t v6us = vec_splat_u16(6);
450 vec_u8_t vdst, vdst_orig;
451 vec_u8_t vdst_mask = vec_lvsl(0, dst);
452 int element = ((unsigned long)dst & 0xf) >> 2;
453 LOAD_ZERO;
454
455 block[0] += 32; /* add 32 as a DC-level for rounding */
456
457 vtmp0 = vec_ld(0,block);
458 vtmp1 = vec_sld(vtmp0, vtmp0, 8);
459 vtmp2 = vec_ld(16,block);
460 vtmp3 = vec_sld(vtmp2, vtmp2, 8);
461
462 VEC_1D_DCT(vtmp0,vtmp1,vtmp2,vtmp3,va0,va1,va2,va3);
463 VEC_TRANSPOSE_4(va0,va1,va2,va3,vtmp0,vtmp1,vtmp2,vtmp3);
464 VEC_1D_DCT(vtmp0,vtmp1,vtmp2,vtmp3,va0,va1,va2,va3);
465
466 va0 = vec_sra(va0,v6us);
467 va1 = vec_sra(va1,v6us);
468 va2 = vec_sra(va2,v6us);
469 va3 = vec_sra(va3,v6us);
470
471 VEC_LOAD_U8_ADD_S16_STORE_U8(va0);
472 dst += stride;
473 VEC_LOAD_U8_ADD_S16_STORE_U8(va1);
474 dst += stride;
475 VEC_LOAD_U8_ADD_S16_STORE_U8(va2);
476 dst += stride;
477 VEC_LOAD_U8_ADD_S16_STORE_U8(va3);
478}
479
3813dcc9
GP
480#define IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7, d0, d1, d2, d3, d4, d5, d6, d7) {\
481 /* a0 = SRC(0) + SRC(4); */ \
482 vec_s16_t a0v = vec_add(s0, s4); \
483 /* a2 = SRC(0) - SRC(4); */ \
484 vec_s16_t a2v = vec_sub(s0, s4); \
485 /* a4 = (SRC(2)>>1) - SRC(6); */ \
486 vec_s16_t a4v = vec_sub(vec_sra(s2, onev), s6); \
487 /* a6 = (SRC(6)>>1) + SRC(2); */ \
488 vec_s16_t a6v = vec_add(vec_sra(s6, onev), s2); \
489 /* b0 = a0 + a6; */ \
490 vec_s16_t b0v = vec_add(a0v, a6v); \
491 /* b2 = a2 + a4; */ \
492 vec_s16_t b2v = vec_add(a2v, a4v); \
493 /* b4 = a2 - a4; */ \
494 vec_s16_t b4v = vec_sub(a2v, a4v); \
495 /* b6 = a0 - a6; */ \
496 vec_s16_t b6v = vec_sub(a0v, a6v); \
497 /* a1 = SRC(5) - SRC(3) - SRC(7) - (SRC(7)>>1); */ \
498 /* a1 = (SRC(5)-SRC(3)) - (SRC(7) + (SRC(7)>>1)); */ \
499 vec_s16_t a1v = vec_sub( vec_sub(s5, s3), vec_add(s7, vec_sra(s7, onev)) ); \
500 /* a3 = SRC(7) + SRC(1) - SRC(3) - (SRC(3)>>1); */ \
501 /* a3 = (SRC(7)+SRC(1)) - (SRC(3) + (SRC(3)>>1)); */ \
502 vec_s16_t a3v = vec_sub( vec_add(s7, s1), vec_add(s3, vec_sra(s3, onev)) );\
503 /* a5 = SRC(7) - SRC(1) + SRC(5) + (SRC(5)>>1); */ \
504 /* a5 = (SRC(7)-SRC(1)) + SRC(5) + (SRC(5)>>1); */ \
505 vec_s16_t a5v = vec_add( vec_sub(s7, s1), vec_add(s5, vec_sra(s5, onev)) );\
506 /* a7 = SRC(5)+SRC(3) + SRC(1) + (SRC(1)>>1); */ \
507 vec_s16_t a7v = vec_add( vec_add(s5, s3), vec_add(s1, vec_sra(s1, onev)) );\
508 /* b1 = (a7>>2) + a1; */ \
509 vec_s16_t b1v = vec_add( vec_sra(a7v, twov), a1v); \
510 /* b3 = a3 + (a5>>2); */ \
511 vec_s16_t b3v = vec_add(a3v, vec_sra(a5v, twov)); \
512 /* b5 = (a3>>2) - a5; */ \
513 vec_s16_t b5v = vec_sub( vec_sra(a3v, twov), a5v); \
514 /* b7 = a7 - (a1>>2); */ \
515 vec_s16_t b7v = vec_sub( a7v, vec_sra(a1v, twov)); \
516 /* DST(0, b0 + b7); */ \
517 d0 = vec_add(b0v, b7v); \
518 /* DST(1, b2 + b5); */ \
519 d1 = vec_add(b2v, b5v); \
520 /* DST(2, b4 + b3); */ \
521 d2 = vec_add(b4v, b3v); \
522 /* DST(3, b6 + b1); */ \
523 d3 = vec_add(b6v, b1v); \
524 /* DST(4, b6 - b1); */ \
525 d4 = vec_sub(b6v, b1v); \
526 /* DST(5, b4 - b3); */ \
527 d5 = vec_sub(b4v, b3v); \
528 /* DST(6, b2 - b5); */ \
529 d6 = vec_sub(b2v, b5v); \
530 /* DST(7, b0 - b7); */ \
531 d7 = vec_sub(b0v, b7v); \
532}
533
534#define ALTIVEC_STORE_SUM_CLIP(dest, idctv, perm_ldv, perm_stv, sel) { \
535 /* unaligned load */ \
536 vec_u8_t hv = vec_ld( 0, dest ); \
537 vec_u8_t lv = vec_ld( 7, dest ); \
538 vec_u8_t dstv = vec_perm( hv, lv, (vec_u8_t)perm_ldv ); \
539 vec_s16_t idct_sh6 = vec_sra(idctv, sixv); \
ec4e0056 540 vec_u16_t dst16 = (vec_u16_t)vec_mergeh(zero_u8v, dstv); \
3813dcc9
GP
541 vec_s16_t idstsum = vec_adds(idct_sh6, (vec_s16_t)dst16); \
542 vec_u8_t idstsum8 = vec_packsu(zero_s16v, idstsum); \
543 vec_u8_t edgehv; \
544 /* unaligned store */ \
545 vec_u8_t bodyv = vec_perm( idstsum8, idstsum8, perm_stv );\
546 vec_u8_t edgelv = vec_perm( sel, zero_u8v, perm_stv ); \
547 lv = vec_sel( lv, bodyv, edgelv ); \
548 vec_st( lv, 7, dest ); \
549 hv = vec_ld( 0, dest ); \
550 edgehv = vec_perm( zero_u8v, sel, perm_stv ); \
551 hv = vec_sel( hv, bodyv, edgehv ); \
552 vec_st( hv, 0, dest ); \
553 }
554
555void ff_h264_idct8_add_altivec( uint8_t *dst, DCTELEM *dct, int stride ) {
556 vec_s16_t s0, s1, s2, s3, s4, s5, s6, s7;
557 vec_s16_t d0, d1, d2, d3, d4, d5, d6, d7;
558 vec_s16_t idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7;
559
560 vec_u8_t perm_ldv = vec_lvsl(0, dst);
561 vec_u8_t perm_stv = vec_lvsr(8, dst);
562
563 const vec_u16_t onev = vec_splat_u16(1);
564 const vec_u16_t twov = vec_splat_u16(2);
565 const vec_u16_t sixv = vec_splat_u16(6);
566
80a61f08 567 const vec_u8_t sel = (vec_u8_t) {0,0,0,0,0,0,0,0,-1,-1,-1,-1,-1,-1,-1,-1};
3813dcc9
GP
568 LOAD_ZERO;
569
570 dct[0] += 32; // rounding for the >>6 at the end
571
572 s0 = vec_ld(0x00, (int16_t*)dct);
573 s1 = vec_ld(0x10, (int16_t*)dct);
574 s2 = vec_ld(0x20, (int16_t*)dct);
575 s3 = vec_ld(0x30, (int16_t*)dct);
576 s4 = vec_ld(0x40, (int16_t*)dct);
577 s5 = vec_ld(0x50, (int16_t*)dct);
578 s6 = vec_ld(0x60, (int16_t*)dct);
579 s7 = vec_ld(0x70, (int16_t*)dct);
580
581 IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7,
582 d0, d1, d2, d3, d4, d5, d6, d7);
583
584 TRANSPOSE8( d0, d1, d2, d3, d4, d5, d6, d7 );
585
586 IDCT8_1D_ALTIVEC(d0, d1, d2, d3, d4, d5, d6, d7,
587 idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7);
588
589 ALTIVEC_STORE_SUM_CLIP(&dst[0*stride], idct0, perm_ldv, perm_stv, sel);
590 ALTIVEC_STORE_SUM_CLIP(&dst[1*stride], idct1, perm_ldv, perm_stv, sel);
591 ALTIVEC_STORE_SUM_CLIP(&dst[2*stride], idct2, perm_ldv, perm_stv, sel);
592 ALTIVEC_STORE_SUM_CLIP(&dst[3*stride], idct3, perm_ldv, perm_stv, sel);
593 ALTIVEC_STORE_SUM_CLIP(&dst[4*stride], idct4, perm_ldv, perm_stv, sel);
594 ALTIVEC_STORE_SUM_CLIP(&dst[5*stride], idct5, perm_ldv, perm_stv, sel);
595 ALTIVEC_STORE_SUM_CLIP(&dst[6*stride], idct6, perm_ldv, perm_stv, sel);
596 ALTIVEC_STORE_SUM_CLIP(&dst[7*stride], idct7, perm_ldv, perm_stv, sel);
597}
598
337e3fd9
GP
599// TODO: implement this in AltiVec
600static void ff_h264_idct8_dc_add_altivec(uint8_t *dst, DCTELEM *block, int stride) {
601 int i, j;
602 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
603 int dc = (block[0] + 32) >> 6;
604 for( j = 0; j < 8; j++ )
605 {
606 for( i = 0; i < 8; i++ )
607 dst[i] = cm[ dst[i] + dc ];
608 dst += stride;
609 }
610}
611
612static void ff_h264_idct8_add4_altivec(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
613 int i;
614 for(i=0; i<16; i+=4){
615 int nnz = nnzc[ scan8[i] ];
616 if(nnz){
617 if(nnz==1 && block[i*16]) ff_h264_idct8_dc_add_altivec(dst + block_offset[i], block + i*16, stride);
618 else ff_h264_idct8_add_altivec (dst + block_offset[i], block + i*16, stride);
619 }
620 }
621}
622
f057cc03 623#define transpose4x16(r0, r1, r2, r3) { \
3ca96802
GP
624 register vec_u8_t r4; \
625 register vec_u8_t r5; \
626 register vec_u8_t r6; \
627 register vec_u8_t r7; \
f057cc03
GB
628 \
629 r4 = vec_mergeh(r0, r2); /*0, 2 set 0*/ \
630 r5 = vec_mergel(r0, r2); /*0, 2 set 1*/ \
631 r6 = vec_mergeh(r1, r3); /*1, 3 set 0*/ \
632 r7 = vec_mergel(r1, r3); /*1, 3 set 1*/ \
633 \
634 r0 = vec_mergeh(r4, r6); /*all set 0*/ \
635 r1 = vec_mergel(r4, r6); /*all set 1*/ \
636 r2 = vec_mergeh(r5, r7); /*all set 2*/ \
637 r3 = vec_mergel(r5, r7); /*all set 3*/ \
638}
639
da1fce39 640static inline void write16x4(uint8_t *dst, int dst_stride,
3ca96802
GP
641 register vec_u8_t r0, register vec_u8_t r1,
642 register vec_u8_t r2, register vec_u8_t r3) {
f057cc03
GB
643 DECLARE_ALIGNED_16(unsigned char, result[64]);
644 uint32_t *src_int = (uint32_t *)result, *dst_int = (uint32_t *)dst;
645 int int_dst_stride = dst_stride/4;
646
647 vec_st(r0, 0, result);
648 vec_st(r1, 16, result);
649 vec_st(r2, 32, result);
650 vec_st(r3, 48, result);
651 /* FIXME: there has to be a better way!!!! */
652 *dst_int = *src_int;
653 *(dst_int+ int_dst_stride) = *(src_int + 1);
654 *(dst_int+ 2*int_dst_stride) = *(src_int + 2);
655 *(dst_int+ 3*int_dst_stride) = *(src_int + 3);
656 *(dst_int+ 4*int_dst_stride) = *(src_int + 4);
657 *(dst_int+ 5*int_dst_stride) = *(src_int + 5);
658 *(dst_int+ 6*int_dst_stride) = *(src_int + 6);
659 *(dst_int+ 7*int_dst_stride) = *(src_int + 7);
660 *(dst_int+ 8*int_dst_stride) = *(src_int + 8);
661 *(dst_int+ 9*int_dst_stride) = *(src_int + 9);
662 *(dst_int+10*int_dst_stride) = *(src_int + 10);
663 *(dst_int+11*int_dst_stride) = *(src_int + 11);
664 *(dst_int+12*int_dst_stride) = *(src_int + 12);
665 *(dst_int+13*int_dst_stride) = *(src_int + 13);
666 *(dst_int+14*int_dst_stride) = *(src_int + 14);
667 *(dst_int+15*int_dst_stride) = *(src_int + 15);
668}
669
670/** \brief performs a 6x16 transpose of data in src, and stores it to dst
671 \todo FIXME: see if we can't spare some vec_lvsl() by them factorizing
672 out of unaligned_load() */
673#define readAndTranspose16x6(src, src_stride, r8, r9, r10, r11, r12, r13) {\
3ca96802
GP
674 register vec_u8_t r0 = unaligned_load(0, src); \
675 register vec_u8_t r1 = unaligned_load( src_stride, src); \
676 register vec_u8_t r2 = unaligned_load(2* src_stride, src); \
677 register vec_u8_t r3 = unaligned_load(3* src_stride, src); \
678 register vec_u8_t r4 = unaligned_load(4* src_stride, src); \
679 register vec_u8_t r5 = unaligned_load(5* src_stride, src); \
680 register vec_u8_t r6 = unaligned_load(6* src_stride, src); \
681 register vec_u8_t r7 = unaligned_load(7* src_stride, src); \
682 register vec_u8_t r14 = unaligned_load(14*src_stride, src); \
683 register vec_u8_t r15 = unaligned_load(15*src_stride, src); \
f057cc03
GB
684 \
685 r8 = unaligned_load( 8*src_stride, src); \
686 r9 = unaligned_load( 9*src_stride, src); \
687 r10 = unaligned_load(10*src_stride, src); \
688 r11 = unaligned_load(11*src_stride, src); \
689 r12 = unaligned_load(12*src_stride, src); \
690 r13 = unaligned_load(13*src_stride, src); \
691 \
692 /*Merge first pairs*/ \
693 r0 = vec_mergeh(r0, r8); /*0, 8*/ \
694 r1 = vec_mergeh(r1, r9); /*1, 9*/ \
695 r2 = vec_mergeh(r2, r10); /*2,10*/ \
696 r3 = vec_mergeh(r3, r11); /*3,11*/ \
697 r4 = vec_mergeh(r4, r12); /*4,12*/ \
698 r5 = vec_mergeh(r5, r13); /*5,13*/ \
699 r6 = vec_mergeh(r6, r14); /*6,14*/ \
700 r7 = vec_mergeh(r7, r15); /*7,15*/ \
701 \
702 /*Merge second pairs*/ \
703 r8 = vec_mergeh(r0, r4); /*0,4, 8,12 set 0*/ \
704 r9 = vec_mergel(r0, r4); /*0,4, 8,12 set 1*/ \
705 r10 = vec_mergeh(r1, r5); /*1,5, 9,13 set 0*/ \
706 r11 = vec_mergel(r1, r5); /*1,5, 9,13 set 1*/ \
707 r12 = vec_mergeh(r2, r6); /*2,6,10,14 set 0*/ \
708 r13 = vec_mergel(r2, r6); /*2,6,10,14 set 1*/ \
709 r14 = vec_mergeh(r3, r7); /*3,7,11,15 set 0*/ \
710 r15 = vec_mergel(r3, r7); /*3,7,11,15 set 1*/ \
711 \
712 /*Third merge*/ \
e3905ce0
DB
713 r0 = vec_mergeh(r8, r12); /*0,2,4,6,8,10,12,14 set 0*/ \
714 r1 = vec_mergel(r8, r12); /*0,2,4,6,8,10,12,14 set 1*/ \
715 r2 = vec_mergeh(r9, r13); /*0,2,4,6,8,10,12,14 set 2*/ \
f057cc03
GB
716 r4 = vec_mergeh(r10, r14); /*1,3,5,7,9,11,13,15 set 0*/ \
717 r5 = vec_mergel(r10, r14); /*1,3,5,7,9,11,13,15 set 1*/ \
718 r6 = vec_mergeh(r11, r15); /*1,3,5,7,9,11,13,15 set 2*/ \
719 /* Don't need to compute 3 and 7*/ \
720 \
721 /*Final merge*/ \
722 r8 = vec_mergeh(r0, r4); /*all set 0*/ \
723 r9 = vec_mergel(r0, r4); /*all set 1*/ \
724 r10 = vec_mergeh(r1, r5); /*all set 2*/ \
725 r11 = vec_mergel(r1, r5); /*all set 3*/ \
726 r12 = vec_mergeh(r2, r6); /*all set 4*/ \
727 r13 = vec_mergel(r2, r6); /*all set 5*/ \
728 /* Don't need to compute 14 and 15*/ \
729 \
730}
731
732// out: o = |x-y| < a
3ca96802
GP
733static inline vec_u8_t diff_lt_altivec ( register vec_u8_t x,
734 register vec_u8_t y,
735 register vec_u8_t a) {
736
737 register vec_u8_t diff = vec_subs(x, y);
738 register vec_u8_t diffneg = vec_subs(y, x);
739 register vec_u8_t o = vec_or(diff, diffneg); /* |x-y| */
740 o = (vec_u8_t)vec_cmplt(o, a);
f057cc03
GB
741 return o;
742}
743
3ca96802
GP
744static inline vec_u8_t h264_deblock_mask ( register vec_u8_t p0,
745 register vec_u8_t p1,
746 register vec_u8_t q0,
747 register vec_u8_t q1,
748 register vec_u8_t alpha,
749 register vec_u8_t beta) {
f057cc03 750
3ca96802
GP
751 register vec_u8_t mask;
752 register vec_u8_t tempmask;
f057cc03
GB
753
754 mask = diff_lt_altivec(p0, q0, alpha);
755 tempmask = diff_lt_altivec(p1, p0, beta);
756 mask = vec_and(mask, tempmask);
757 tempmask = diff_lt_altivec(q1, q0, beta);
758 mask = vec_and(mask, tempmask);
759
760 return mask;
761}
762
22fa38f0 763// out: newp1 = clip((p2 + ((p0 + q0 + 1) >> 1)) >> 1, p1-tc0, p1+tc0)
3ca96802
GP
764static inline vec_u8_t h264_deblock_q1(register vec_u8_t p0,
765 register vec_u8_t p1,
766 register vec_u8_t p2,
767 register vec_u8_t q0,
768 register vec_u8_t tc0) {
769
770 register vec_u8_t average = vec_avg(p0, q0);
771 register vec_u8_t temp;
772 register vec_u8_t uncliped;
773 register vec_u8_t ones;
774 register vec_u8_t max;
775 register vec_u8_t min;
776 register vec_u8_t newp1;
963eca22
GP
777
778 temp = vec_xor(average, p2);
779 average = vec_avg(average, p2); /*avg(p2, avg(p0, q0)) */
780 ones = vec_splat_u8(1);
781 temp = vec_and(temp, ones); /*(p2^avg(p0, q0)) & 1 */
782 uncliped = vec_subs(average, temp); /*(p2+((p0+q0+1)>>1))>>1 */
783 max = vec_adds(p1, tc0);
784 min = vec_subs(p1, tc0);
22fa38f0
GB
785 newp1 = vec_max(min, uncliped);
786 newp1 = vec_min(max, newp1);
787 return newp1;
f057cc03
GB
788}
789
790#define h264_deblock_p0_q0(p0, p1, q0, q1, tc0masked) { \
791 \
3ca96802 792 const vec_u8_t A0v = vec_sl(vec_splat_u8(10), vec_splat_u8(4)); \
f057cc03 793 \
3ca96802
GP
794 register vec_u8_t pq0bit = vec_xor(p0,q0); \
795 register vec_u8_t q1minus; \
796 register vec_u8_t p0minus; \
797 register vec_u8_t stage1; \
798 register vec_u8_t stage2; \
799 register vec_u8_t vec160; \
800 register vec_u8_t delta; \
801 register vec_u8_t deltaneg; \
f057cc03 802 \
f4a02f6e 803 q1minus = vec_nor(q1, q1); /* 255 - q1 */ \
f057cc03
GB
804 stage1 = vec_avg(p1, q1minus); /* (p1 - q1 + 256)>>1 */ \
805 stage2 = vec_sr(stage1, vec_splat_u8(1)); /* (p1 - q1 + 256)>>2 = 64 + (p1 - q1) >> 2 */ \
f4a02f6e 806 p0minus = vec_nor(p0, p0); /* 255 - p0 */ \
f057cc03
GB
807 stage1 = vec_avg(q0, p0minus); /* (q0 - p0 + 256)>>1 */ \
808 pq0bit = vec_and(pq0bit, vec_splat_u8(1)); \
809 stage2 = vec_avg(stage2, pq0bit); /* 32 + ((q0 - p0)&1 + (p1 - q1) >> 2 + 1) >> 1 */ \
810 stage2 = vec_adds(stage2, stage1); /* 160 + ((p0 - q0) + (p1 - q1) >> 2 + 1) >> 1 */ \
811 vec160 = vec_ld(0, &A0v); \
812 deltaneg = vec_subs(vec160, stage2); /* -d */ \
813 delta = vec_subs(stage2, vec160); /* d */ \
814 deltaneg = vec_min(tc0masked, deltaneg); \
815 delta = vec_min(tc0masked, delta); \
816 p0 = vec_subs(p0, deltaneg); \
817 q0 = vec_subs(q0, delta); \
818 p0 = vec_adds(p0, delta); \
819 q0 = vec_adds(q0, deltaneg); \
820}
821
822#define h264_loop_filter_luma_altivec(p2, p1, p0, q0, q1, q2, alpha, beta, tc0) { \
823 DECLARE_ALIGNED_16(unsigned char, temp[16]); \
3ca96802
GP
824 register vec_u8_t alphavec; \
825 register vec_u8_t betavec; \
826 register vec_u8_t mask; \
827 register vec_u8_t p1mask; \
828 register vec_u8_t q1mask; \
e970d98c 829 register vector signed char tc0vec; \
3ca96802
GP
830 register vec_u8_t finaltc0; \
831 register vec_u8_t tc0masked; \
832 register vec_u8_t newp1; \
833 register vec_u8_t newq1; \
f057cc03
GB
834 \
835 temp[0] = alpha; \
836 temp[1] = beta; \
837 alphavec = vec_ld(0, temp); \
838 betavec = vec_splat(alphavec, 0x1); \
839 alphavec = vec_splat(alphavec, 0x0); \
840 mask = h264_deblock_mask(p0, p1, q0, q1, alphavec, betavec); /*if in block */ \
841 \
842 *((int *)temp) = *((int *)tc0); \
e970d98c 843 tc0vec = vec_ld(0, (signed char*)temp); \
f057cc03
GB
844 tc0vec = vec_mergeh(tc0vec, tc0vec); \
845 tc0vec = vec_mergeh(tc0vec, tc0vec); \
22fa38f0 846 mask = vec_and(mask, vec_cmpgt(tc0vec, vec_splat_s8(-1))); /* if tc0[i] >= 0 */ \
3ca96802 847 finaltc0 = vec_and((vec_u8_t)tc0vec, mask); /* tc = tc0 */ \
f057cc03
GB
848 \
849 p1mask = diff_lt_altivec(p2, p0, betavec); \
830bf1f2 850 p1mask = vec_and(p1mask, mask); /* if ( |p2 - p0| < beta) */ \
3ca96802 851 tc0masked = vec_and(p1mask, (vec_u8_t)tc0vec); \
f057cc03 852 finaltc0 = vec_sub(finaltc0, p1mask); /* tc++ */ \
22fa38f0 853 newp1 = h264_deblock_q1(p0, p1, p2, q0, tc0masked); \
f057cc03
GB
854 /*end if*/ \
855 \
856 q1mask = diff_lt_altivec(q2, q0, betavec); \
857 q1mask = vec_and(q1mask, mask); /* if ( |q2 - q0| < beta ) */\
3ca96802 858 tc0masked = vec_and(q1mask, (vec_u8_t)tc0vec); \
f057cc03 859 finaltc0 = vec_sub(finaltc0, q1mask); /* tc++ */ \
22fa38f0 860 newq1 = h264_deblock_q1(p0, q1, q2, q0, tc0masked); \
f057cc03
GB
861 /*end if*/ \
862 \
863 h264_deblock_p0_q0(p0, p1, q0, q1, finaltc0); \
22fa38f0
GB
864 p1 = newp1; \
865 q1 = newq1; \
f057cc03
GB
866}
867
868static void h264_v_loop_filter_luma_altivec(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) {
869
830bf1f2 870 if ((tc0[0] & tc0[1] & tc0[2] & tc0[3]) >= 0) {
3ca96802
GP
871 register vec_u8_t p2 = vec_ld(-3*stride, pix);
872 register vec_u8_t p1 = vec_ld(-2*stride, pix);
873 register vec_u8_t p0 = vec_ld(-1*stride, pix);
874 register vec_u8_t q0 = vec_ld(0, pix);
875 register vec_u8_t q1 = vec_ld(stride, pix);
876 register vec_u8_t q2 = vec_ld(2*stride, pix);
f057cc03
GB
877 h264_loop_filter_luma_altivec(p2, p1, p0, q0, q1, q2, alpha, beta, tc0);
878 vec_st(p1, -2*stride, pix);
879 vec_st(p0, -1*stride, pix);
880 vec_st(q0, 0, pix);
881 vec_st(q1, stride, pix);
882 }
883}
884
885static void h264_h_loop_filter_luma_altivec(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) {
886
3ca96802 887 register vec_u8_t line0, line1, line2, line3, line4, line5;
830bf1f2 888 if ((tc0[0] & tc0[1] & tc0[2] & tc0[3]) < 0)
f057cc03
GB
889 return;
890 readAndTranspose16x6(pix-3, stride, line0, line1, line2, line3, line4, line5);
891 h264_loop_filter_luma_altivec(line0, line1, line2, line3, line4, line5, alpha, beta, tc0);
892 transpose4x16(line1, line2, line3, line4);
893 write16x4(pix-2, stride, line1, line2, line3, line4);
894}
895
a6a12a8a 896void dsputil_h264_init_ppc(DSPContext* c, AVCodecContext *avctx) {
115329f1 897
830bf1f2
DB
898 if (has_altivec()) {
899 c->put_h264_chroma_pixels_tab[0] = put_h264_chroma_mc8_altivec;
900 c->put_no_rnd_h264_chroma_pixels_tab[0] = put_no_rnd_h264_chroma_mc8_altivec;
901 c->avg_h264_chroma_pixels_tab[0] = avg_h264_chroma_mc8_altivec;
337e3fd9
GP
902/* ff_h264_idct_add_altivec may be re-enabled once AltiVec versions of
903 h264_idct_add16, h264_idct_add16intra, h264_idct_add8 are implemented
830bf1f2 904 c->h264_idct_add = ff_h264_idct_add_altivec;
337e3fd9 905*/
830bf1f2 906 c->h264_idct8_add = ff_h264_idct8_add_altivec;
337e3fd9 907 c->h264_idct8_add4 = ff_h264_idct8_add4_altivec;
830bf1f2
DB
908 c->h264_v_loop_filter_luma= h264_v_loop_filter_luma_altivec;
909 c->h264_h_loop_filter_luma= h264_h_loop_filter_luma_altivec;
a6a12a8a
RD
910
911#define dspfunc(PFX, IDX, NUM) \
830bf1f2
DB
912 c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_altivec; \
913 c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_altivec; \
914 c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_altivec; \
915 c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_altivec; \
916 c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_altivec; \
917 c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_altivec; \
918 c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_altivec; \
919 c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_altivec; \
920 c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_altivec; \
921 c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_altivec; \
922 c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_altivec; \
923 c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_altivec; \
924 c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_altivec; \
925 c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_altivec; \
926 c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_altivec; \
927 c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_altivec
928
929 dspfunc(put_h264_qpel, 0, 16);
930 dspfunc(avg_h264_qpel, 0, 16);
a6a12a8a 931#undef dspfunc
830bf1f2 932 }
a6a12a8a 933}