2 * Copyright (c) 2004 Romain Dolbeau <romain@dolbeau.org>
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 #include "libavcodec/dsputil.h"
22 #include "libavcodec/h264data.h"
23 #include "libavcodec/h264dsp.h"
25 #include "dsputil_ppc.h"
26 #include "dsputil_altivec.h"
27 #include "util_altivec.h"
28 #include "types_altivec.h"
30 #define PUT_OP_U8_ALTIVEC(d, s, dst) d = s
31 #define AVG_OP_U8_ALTIVEC(d, s, dst) d = vec_avg(dst, s)
33 #define OP_U8_ALTIVEC PUT_OP_U8_ALTIVEC
34 #define PREFIX_h264_chroma_mc8_altivec put_h264_chroma_mc8_altivec
35 #define PREFIX_no_rnd_vc1_chroma_mc8_altivec put_no_rnd_vc1_chroma_mc8_altivec
36 #define PREFIX_h264_chroma_mc8_num altivec_put_h264_chroma_mc8_num
37 #define PREFIX_h264_qpel16_h_lowpass_altivec put_h264_qpel16_h_lowpass_altivec
38 #define PREFIX_h264_qpel16_h_lowpass_num altivec_put_h264_qpel16_h_lowpass_num
39 #define PREFIX_h264_qpel16_v_lowpass_altivec put_h264_qpel16_v_lowpass_altivec
40 #define PREFIX_h264_qpel16_v_lowpass_num altivec_put_h264_qpel16_v_lowpass_num
41 #define PREFIX_h264_qpel16_hv_lowpass_altivec put_h264_qpel16_hv_lowpass_altivec
42 #define PREFIX_h264_qpel16_hv_lowpass_num altivec_put_h264_qpel16_hv_lowpass_num
43 #include "h264_template_altivec.c"
45 #undef PREFIX_h264_chroma_mc8_altivec
46 #undef PREFIX_no_rnd_vc1_chroma_mc8_altivec
47 #undef PREFIX_h264_chroma_mc8_num
48 #undef PREFIX_h264_qpel16_h_lowpass_altivec
49 #undef PREFIX_h264_qpel16_h_lowpass_num
50 #undef PREFIX_h264_qpel16_v_lowpass_altivec
51 #undef PREFIX_h264_qpel16_v_lowpass_num
52 #undef PREFIX_h264_qpel16_hv_lowpass_altivec
53 #undef PREFIX_h264_qpel16_hv_lowpass_num
55 #define OP_U8_ALTIVEC AVG_OP_U8_ALTIVEC
56 #define PREFIX_h264_chroma_mc8_altivec avg_h264_chroma_mc8_altivec
57 #define PREFIX_no_rnd_vc1_chroma_mc8_altivec avg_no_rnd_vc1_chroma_mc8_altivec
58 #define PREFIX_h264_chroma_mc8_num altivec_avg_h264_chroma_mc8_num
59 #define PREFIX_h264_qpel16_h_lowpass_altivec avg_h264_qpel16_h_lowpass_altivec
60 #define PREFIX_h264_qpel16_h_lowpass_num altivec_avg_h264_qpel16_h_lowpass_num
61 #define PREFIX_h264_qpel16_v_lowpass_altivec avg_h264_qpel16_v_lowpass_altivec
62 #define PREFIX_h264_qpel16_v_lowpass_num altivec_avg_h264_qpel16_v_lowpass_num
63 #define PREFIX_h264_qpel16_hv_lowpass_altivec avg_h264_qpel16_hv_lowpass_altivec
64 #define PREFIX_h264_qpel16_hv_lowpass_num altivec_avg_h264_qpel16_hv_lowpass_num
65 #include "h264_template_altivec.c"
67 #undef PREFIX_h264_chroma_mc8_altivec
68 #undef PREFIX_no_rnd_vc1_chroma_mc8_altivec
69 #undef PREFIX_h264_chroma_mc8_num
70 #undef PREFIX_h264_qpel16_h_lowpass_altivec
71 #undef PREFIX_h264_qpel16_h_lowpass_num
72 #undef PREFIX_h264_qpel16_v_lowpass_altivec
73 #undef PREFIX_h264_qpel16_v_lowpass_num
74 #undef PREFIX_h264_qpel16_hv_lowpass_altivec
75 #undef PREFIX_h264_qpel16_hv_lowpass_num
77 #define H264_MC(OPNAME, SIZE, CODETYPE) \
78 static void OPNAME ## h264_qpel ## SIZE ## _mc00_ ## CODETYPE (uint8_t *dst, uint8_t *src, int stride){\
79 OPNAME ## pixels ## SIZE ## _ ## CODETYPE(dst, src, stride, SIZE);\
82 static void OPNAME ## h264_qpel ## SIZE ## _mc10_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){ \
83 DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\
84 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
85 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src, half, stride, stride, SIZE);\
88 static void OPNAME ## h264_qpel ## SIZE ## _mc20_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
89 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(dst, src, stride, stride);\
92 static void OPNAME ## h264_qpel ## SIZE ## _mc30_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
93 DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\
94 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
95 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src+1, half, stride, stride, SIZE);\
98 static void OPNAME ## h264_qpel ## SIZE ## _mc01_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
99 DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\
100 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
101 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src, half, stride, stride, SIZE);\
104 static void OPNAME ## h264_qpel ## SIZE ## _mc02_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
105 OPNAME ## h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(dst, src, stride, stride);\
108 static void OPNAME ## h264_qpel ## SIZE ## _mc03_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
109 DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\
110 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
111 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src+stride, half, stride, stride, SIZE);\
114 static void OPNAME ## h264_qpel ## SIZE ## _mc11_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
115 DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
116 DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
117 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
118 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
119 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
122 static void OPNAME ## h264_qpel ## SIZE ## _mc31_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
123 DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
124 DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
125 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
126 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
127 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
130 static void OPNAME ## h264_qpel ## SIZE ## _mc13_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
131 DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
132 DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
133 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
134 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
135 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
138 static void OPNAME ## h264_qpel ## SIZE ## _mc33_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
139 DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
140 DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
141 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
142 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
143 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
146 static void OPNAME ## h264_qpel ## SIZE ## _mc22_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
147 DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\
148 OPNAME ## h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(dst, tmp, src, stride, SIZE, stride);\
151 static void OPNAME ## h264_qpel ## SIZE ## _mc21_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
152 DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
153 DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\
154 DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\
155 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
156 put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
157 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfHV, stride, SIZE, SIZE);\
160 static void OPNAME ## h264_qpel ## SIZE ## _mc23_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
161 DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
162 DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\
163 DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\
164 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
165 put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
166 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfHV, stride, SIZE, SIZE);\
169 static void OPNAME ## h264_qpel ## SIZE ## _mc12_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
170 DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
171 DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\
172 DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\
173 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
174 put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
175 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfV, halfHV, stride, SIZE, SIZE);\
178 static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
179 DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
180 DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\
181 DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\
182 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
183 put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
184 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfV, halfHV, stride, SIZE, SIZE);\
187 static inline void put_pixels16_l2_altivec( uint8_t * dst, const uint8_t * src1,
188 const uint8_t * src2
, int dst_stride
,
189 int src_stride1
, int h
)
192 vec_u8 a
, b
, d
, tmp1
, tmp2
, mask
, mask_
, edges
, align
;
194 mask_
= vec_lvsl(0, src2
);
196 for (i
= 0; i
< h
; i
++) {
198 tmp1
= vec_ld(i
* src_stride1
, src1
);
199 mask
= vec_lvsl(i
* src_stride1
, src1
);
200 tmp2
= vec_ld(i
* src_stride1
+ 15, src1
);
202 a
= vec_perm(tmp1
, tmp2
, mask
);
204 tmp1
= vec_ld(i
* 16, src2
);
205 tmp2
= vec_ld(i
* 16 + 15, src2
);
207 b
= vec_perm(tmp1
, tmp2
, mask_
);
209 tmp1
= vec_ld(0, dst
);
210 mask
= vec_lvsl(0, dst
);
211 tmp2
= vec_ld(15, dst
);
215 edges
= vec_perm(tmp2
, tmp1
, mask
);
217 align
= vec_lvsr(0, dst
);
219 tmp2
= vec_perm(d
, edges
, align
);
220 tmp1
= vec_perm(edges
, d
, align
);
222 vec_st(tmp2
, 15, dst
);
223 vec_st(tmp1
, 0 , dst
);
229 static inline void avg_pixels16_l2_altivec( uint8_t * dst
, const uint8_t * src1
,
230 const uint8_t * src2
, int dst_stride
,
231 int src_stride1
, int h
)
234 vec_u8 a
, b
, d
, tmp1
, tmp2
, mask
, mask_
, edges
, align
;
236 mask_
= vec_lvsl(0, src2
);
238 for (i
= 0; i
< h
; i
++) {
240 tmp1
= vec_ld(i
* src_stride1
, src1
);
241 mask
= vec_lvsl(i
* src_stride1
, src1
);
242 tmp2
= vec_ld(i
* src_stride1
+ 15, src1
);
244 a
= vec_perm(tmp1
, tmp2
, mask
);
246 tmp1
= vec_ld(i
* 16, src2
);
247 tmp2
= vec_ld(i
* 16 + 15, src2
);
249 b
= vec_perm(tmp1
, tmp2
, mask_
);
251 tmp1
= vec_ld(0, dst
);
252 mask
= vec_lvsl(0, dst
);
253 tmp2
= vec_ld(15, dst
);
255 d
= vec_avg(vec_perm(tmp1
, tmp2
, mask
), vec_avg(a
, b
));
257 edges
= vec_perm(tmp2
, tmp1
, mask
);
259 align
= vec_lvsr(0, dst
);
261 tmp2
= vec_perm(d
, edges
, align
);
262 tmp1
= vec_perm(edges
, d
, align
);
264 vec_st(tmp2
, 15, dst
);
265 vec_st(tmp1
, 0 , dst
);
271 /* Implemented but could be faster
272 #define put_pixels16_l2_altivec(d,s1,s2,ds,s1s,h) put_pixels16_l2(d,s1,s2,ds,s1s,16,h)
273 #define avg_pixels16_l2_altivec(d,s1,s2,ds,s1s,h) avg_pixels16_l2(d,s1,s2,ds,s1s,16,h)
276 H264_MC(put_
, 16, altivec
)
277 H264_MC(avg_
, 16, altivec
)
280 /****************************************************************************
282 ****************************************************************************/
284 #define VEC_1D_DCT(vb0,vb1,vb2,vb3,va0,va1,va2,va3) \
286 vz0 = vec_add(vb0,vb2); /* temp[0] = Y[0] + Y[2] */ \
287 vz1 = vec_sub(vb0,vb2); /* temp[1] = Y[0] - Y[2] */ \
288 vz2 = vec_sra(vb1,vec_splat_u16(1)); \
289 vz2 = vec_sub(vz2,vb3); /* temp[2] = Y[1].1/2 - Y[3] */ \
290 vz3 = vec_sra(vb3,vec_splat_u16(1)); \
291 vz3 = vec_add(vb1,vz3); /* temp[3] = Y[1] + Y[3].1/2 */ \
292 /* 2nd stage: output */ \
293 va0 = vec_add(vz0,vz3); /* x[0] = temp[0] + temp[3] */ \
294 va1 = vec_add(vz1,vz2); /* x[1] = temp[1] + temp[2] */ \
295 va2 = vec_sub(vz1,vz2); /* x[2] = temp[1] - temp[2] */ \
296 va3 = vec_sub(vz0,vz3) /* x[3] = temp[0] - temp[3] */
298 #define VEC_TRANSPOSE_4(a0,a1,a2,a3,b0,b1,b2,b3) \
299 b0 = vec_mergeh( a0, a0 ); \
300 b1 = vec_mergeh( a1, a0 ); \
301 b2 = vec_mergeh( a2, a0 ); \
302 b3 = vec_mergeh( a3, a0 ); \
303 a0 = vec_mergeh( b0, b2 ); \
304 a1 = vec_mergel( b0, b2 ); \
305 a2 = vec_mergeh( b1, b3 ); \
306 a3 = vec_mergel( b1, b3 ); \
307 b0 = vec_mergeh( a0, a2 ); \
308 b1 = vec_mergel( a0, a2 ); \
309 b2 = vec_mergeh( a1, a3 ); \
310 b3 = vec_mergel( a1, a3 )
312 #define VEC_LOAD_U8_ADD_S16_STORE_U8(va) \
313 vdst_orig = vec_ld(0, dst); \
314 vdst = vec_perm(vdst_orig, zero_u8v, vdst_mask); \
315 vdst_ss = (vec_s16) vec_mergeh(zero_u8v, vdst); \
316 va = vec_add(va, vdst_ss); \
317 va_u8 = vec_packsu(va, zero_s16v); \
318 va_u32 = vec_splat((vec_u32)va_u8, 0); \
319 vec_ste(va_u32, element, (uint32_t*)dst);
321 static void ff_h264_idct_add_altivec(uint8_t *dst
, DCTELEM
*block
, int stride
)
323 vec_s16 va0
, va1
, va2
, va3
;
324 vec_s16 vz0
, vz1
, vz2
, vz3
;
325 vec_s16 vtmp0
, vtmp1
, vtmp2
, vtmp3
;
329 const vec_u16 v6us
= vec_splat_u16(6);
330 vec_u8 vdst
, vdst_orig
;
331 vec_u8 vdst_mask
= vec_lvsl(0, dst
);
332 int element
= ((unsigned long)dst
& 0xf) >> 2;
335 block
[0] += 32; /* add 32 as a DC-level for rounding */
337 vtmp0
= vec_ld(0,block
);
338 vtmp1
= vec_sld(vtmp0
, vtmp0
, 8);
339 vtmp2
= vec_ld(16,block
);
340 vtmp3
= vec_sld(vtmp2
, vtmp2
, 8);
342 VEC_1D_DCT(vtmp0
,vtmp1
,vtmp2
,vtmp3
,va0
,va1
,va2
,va3
);
343 VEC_TRANSPOSE_4(va0
,va1
,va2
,va3
,vtmp0
,vtmp1
,vtmp2
,vtmp3
);
344 VEC_1D_DCT(vtmp0
,vtmp1
,vtmp2
,vtmp3
,va0
,va1
,va2
,va3
);
346 va0
= vec_sra(va0
,v6us
);
347 va1
= vec_sra(va1
,v6us
);
348 va2
= vec_sra(va2
,v6us
);
349 va3
= vec_sra(va3
,v6us
);
351 VEC_LOAD_U8_ADD_S16_STORE_U8(va0
);
353 VEC_LOAD_U8_ADD_S16_STORE_U8(va1
);
355 VEC_LOAD_U8_ADD_S16_STORE_U8(va2
);
357 VEC_LOAD_U8_ADD_S16_STORE_U8(va3
);
360 #define IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7, d0, d1, d2, d3, d4, d5, d6, d7) {\
361 /* a0 = SRC(0) + SRC(4); */ \
362 vec_s16 a0v = vec_add(s0, s4); \
363 /* a2 = SRC(0) - SRC(4); */ \
364 vec_s16 a2v = vec_sub(s0, s4); \
365 /* a4 = (SRC(2)>>1) - SRC(6); */ \
366 vec_s16 a4v = vec_sub(vec_sra(s2, onev), s6); \
367 /* a6 = (SRC(6)>>1) + SRC(2); */ \
368 vec_s16 a6v = vec_add(vec_sra(s6, onev), s2); \
369 /* b0 = a0 + a6; */ \
370 vec_s16 b0v = vec_add(a0v, a6v); \
371 /* b2 = a2 + a4; */ \
372 vec_s16 b2v = vec_add(a2v, a4v); \
373 /* b4 = a2 - a4; */ \
374 vec_s16 b4v = vec_sub(a2v, a4v); \
375 /* b6 = a0 - a6; */ \
376 vec_s16 b6v = vec_sub(a0v, a6v); \
377 /* a1 = SRC(5) - SRC(3) - SRC(7) - (SRC(7)>>1); */ \
378 /* a1 = (SRC(5)-SRC(3)) - (SRC(7) + (SRC(7)>>1)); */ \
379 vec_s16 a1v = vec_sub( vec_sub(s5, s3), vec_add(s7, vec_sra(s7, onev)) ); \
380 /* a3 = SRC(7) + SRC(1) - SRC(3) - (SRC(3)>>1); */ \
381 /* a3 = (SRC(7)+SRC(1)) - (SRC(3) + (SRC(3)>>1)); */ \
382 vec_s16 a3v = vec_sub( vec_add(s7, s1), vec_add(s3, vec_sra(s3, onev)) );\
383 /* a5 = SRC(7) - SRC(1) + SRC(5) + (SRC(5)>>1); */ \
384 /* a5 = (SRC(7)-SRC(1)) + SRC(5) + (SRC(5)>>1); */ \
385 vec_s16 a5v = vec_add( vec_sub(s7, s1), vec_add(s5, vec_sra(s5, onev)) );\
386 /* a7 = SRC(5)+SRC(3) + SRC(1) + (SRC(1)>>1); */ \
387 vec_s16 a7v = vec_add( vec_add(s5, s3), vec_add(s1, vec_sra(s1, onev)) );\
388 /* b1 = (a7>>2) + a1; */ \
389 vec_s16 b1v = vec_add( vec_sra(a7v, twov), a1v); \
390 /* b3 = a3 + (a5>>2); */ \
391 vec_s16 b3v = vec_add(a3v, vec_sra(a5v, twov)); \
392 /* b5 = (a3>>2) - a5; */ \
393 vec_s16 b5v = vec_sub( vec_sra(a3v, twov), a5v); \
394 /* b7 = a7 - (a1>>2); */ \
395 vec_s16 b7v = vec_sub( a7v, vec_sra(a1v, twov)); \
396 /* DST(0, b0 + b7); */ \
397 d0 = vec_add(b0v, b7v); \
398 /* DST(1, b2 + b5); */ \
399 d1 = vec_add(b2v, b5v); \
400 /* DST(2, b4 + b3); */ \
401 d2 = vec_add(b4v, b3v); \
402 /* DST(3, b6 + b1); */ \
403 d3 = vec_add(b6v, b1v); \
404 /* DST(4, b6 - b1); */ \
405 d4 = vec_sub(b6v, b1v); \
406 /* DST(5, b4 - b3); */ \
407 d5 = vec_sub(b4v, b3v); \
408 /* DST(6, b2 - b5); */ \
409 d6 = vec_sub(b2v, b5v); \
410 /* DST(7, b0 - b7); */ \
411 d7 = vec_sub(b0v, b7v); \
414 #define ALTIVEC_STORE_SUM_CLIP(dest, idctv, perm_ldv, perm_stv, sel) { \
415 /* unaligned load */ \
416 vec_u8 hv = vec_ld( 0, dest ); \
417 vec_u8 lv = vec_ld( 7, dest ); \
418 vec_u8 dstv = vec_perm( hv, lv, (vec_u8)perm_ldv ); \
419 vec_s16 idct_sh6 = vec_sra(idctv, sixv); \
420 vec_u16 dst16 = (vec_u16)vec_mergeh(zero_u8v, dstv); \
421 vec_s16 idstsum = vec_adds(idct_sh6, (vec_s16)dst16); \
422 vec_u8 idstsum8 = vec_packsu(zero_s16v, idstsum); \
424 /* unaligned store */ \
425 vec_u8 bodyv = vec_perm( idstsum8, idstsum8, perm_stv );\
426 vec_u8 edgelv = vec_perm( sel, zero_u8v, perm_stv ); \
427 lv = vec_sel( lv, bodyv, edgelv ); \
428 vec_st( lv, 7, dest ); \
429 hv = vec_ld( 0, dest ); \
430 edgehv = vec_perm( zero_u8v, sel, perm_stv ); \
431 hv = vec_sel( hv, bodyv, edgehv ); \
432 vec_st( hv, 0, dest ); \
435 static void ff_h264_idct8_add_altivec( uint8_t *dst
, DCTELEM
*dct
, int stride
) {
436 vec_s16 s0
, s1
, s2
, s3
, s4
, s5
, s6
, s7
;
437 vec_s16 d0
, d1
, d2
, d3
, d4
, d5
, d6
, d7
;
438 vec_s16 idct0
, idct1
, idct2
, idct3
, idct4
, idct5
, idct6
, idct7
;
440 vec_u8 perm_ldv
= vec_lvsl(0, dst
);
441 vec_u8 perm_stv
= vec_lvsr(8, dst
);
443 const vec_u16 onev
= vec_splat_u16(1);
444 const vec_u16 twov
= vec_splat_u16(2);
445 const vec_u16 sixv
= vec_splat_u16(6);
447 const vec_u8 sel
= (vec_u8
) {0,0,0,0,0,0,0,0,-1,-1,-1,-1,-1,-1,-1,-1};
450 dct
[0] += 32; // rounding for the >>6 at the end
452 s0
= vec_ld(0x00, (int16_t*)dct
);
453 s1
= vec_ld(0x10, (int16_t*)dct
);
454 s2
= vec_ld(0x20, (int16_t*)dct
);
455 s3
= vec_ld(0x30, (int16_t*)dct
);
456 s4
= vec_ld(0x40, (int16_t*)dct
);
457 s5
= vec_ld(0x50, (int16_t*)dct
);
458 s6
= vec_ld(0x60, (int16_t*)dct
);
459 s7
= vec_ld(0x70, (int16_t*)dct
);
461 IDCT8_1D_ALTIVEC(s0
, s1
, s2
, s3
, s4
, s5
, s6
, s7
,
462 d0
, d1
, d2
, d3
, d4
, d5
, d6
, d7
);
464 TRANSPOSE8( d0
, d1
, d2
, d3
, d4
, d5
, d6
, d7
);
466 IDCT8_1D_ALTIVEC(d0
, d1
, d2
, d3
, d4
, d5
, d6
, d7
,
467 idct0
, idct1
, idct2
, idct3
, idct4
, idct5
, idct6
, idct7
);
469 ALTIVEC_STORE_SUM_CLIP(&dst
[0*stride
], idct0
, perm_ldv
, perm_stv
, sel
);
470 ALTIVEC_STORE_SUM_CLIP(&dst
[1*stride
], idct1
, perm_ldv
, perm_stv
, sel
);
471 ALTIVEC_STORE_SUM_CLIP(&dst
[2*stride
], idct2
, perm_ldv
, perm_stv
, sel
);
472 ALTIVEC_STORE_SUM_CLIP(&dst
[3*stride
], idct3
, perm_ldv
, perm_stv
, sel
);
473 ALTIVEC_STORE_SUM_CLIP(&dst
[4*stride
], idct4
, perm_ldv
, perm_stv
, sel
);
474 ALTIVEC_STORE_SUM_CLIP(&dst
[5*stride
], idct5
, perm_ldv
, perm_stv
, sel
);
475 ALTIVEC_STORE_SUM_CLIP(&dst
[6*stride
], idct6
, perm_ldv
, perm_stv
, sel
);
476 ALTIVEC_STORE_SUM_CLIP(&dst
[7*stride
], idct7
, perm_ldv
, perm_stv
, sel
);
479 static av_always_inline
void h264_idct_dc_add_internal(uint8_t *dst
, DCTELEM
*block
, int stride
, int size
)
482 vec_u8 dcplus
, dcminus
, v0
, v1
, v2
, v3
, aligner
;
484 DECLARE_ALIGNED(16, int, dc
);
487 dc
= (block
[0] + 32) >> 6;
488 dc16
= vec_splat((vec_s16
) vec_lde(0, &dc
), 1);
491 dc16
= vec_sld(dc16
, zero_s16v
, 8);
492 dcplus
= vec_packsu(dc16
, zero_s16v
);
493 dcminus
= vec_packsu(vec_sub(zero_s16v
, dc16
), zero_s16v
);
495 aligner
= vec_lvsr(0, dst
);
496 dcplus
= vec_perm(dcplus
, dcplus
, aligner
);
497 dcminus
= vec_perm(dcminus
, dcminus
, aligner
);
499 for (i
= 0; i
< size
; i
+= 4) {
500 v0
= vec_ld(0, dst
+0*stride
);
501 v1
= vec_ld(0, dst
+1*stride
);
502 v2
= vec_ld(0, dst
+2*stride
);
503 v3
= vec_ld(0, dst
+3*stride
);
505 v0
= vec_adds(v0
, dcplus
);
506 v1
= vec_adds(v1
, dcplus
);
507 v2
= vec_adds(v2
, dcplus
);
508 v3
= vec_adds(v3
, dcplus
);
510 v0
= vec_subs(v0
, dcminus
);
511 v1
= vec_subs(v1
, dcminus
);
512 v2
= vec_subs(v2
, dcminus
);
513 v3
= vec_subs(v3
, dcminus
);
515 vec_st(v0
, 0, dst
+0*stride
);
516 vec_st(v1
, 0, dst
+1*stride
);
517 vec_st(v2
, 0, dst
+2*stride
);
518 vec_st(v3
, 0, dst
+3*stride
);
524 static void h264_idct_dc_add_altivec(uint8_t *dst
, DCTELEM
*block
, int stride
)
526 h264_idct_dc_add_internal(dst
, block
, stride
, 4);
529 static void ff_h264_idct8_dc_add_altivec(uint8_t *dst
, DCTELEM
*block
, int stride
)
531 h264_idct_dc_add_internal(dst
, block
, stride
, 8);
534 static void ff_h264_idct_add16_altivec(uint8_t *dst
, const int *block_offset
, DCTELEM
*block
, int stride
, const uint8_t nnzc
[6*8]){
537 int nnz
= nnzc
[ scan8
[i
] ];
539 if(nnz
==1 && block
[i
*16]) h264_idct_dc_add_altivec(dst
+ block_offset
[i
], block
+ i
*16, stride
);
540 else ff_h264_idct_add_altivec(dst
+ block_offset
[i
], block
+ i
*16, stride
);
545 static void ff_h264_idct_add16intra_altivec(uint8_t *dst
, const int *block_offset
, DCTELEM
*block
, int stride
, const uint8_t nnzc
[6*8]){
548 if(nnzc
[ scan8
[i
] ]) ff_h264_idct_add_altivec(dst
+ block_offset
[i
], block
+ i
*16, stride
);
549 else if(block
[i
*16]) h264_idct_dc_add_altivec(dst
+ block_offset
[i
], block
+ i
*16, stride
);
553 static void ff_h264_idct8_add4_altivec(uint8_t *dst
, const int *block_offset
, DCTELEM
*block
, int stride
, const uint8_t nnzc
[6*8]){
555 for(i
=0; i
<16; i
+=4){
556 int nnz
= nnzc
[ scan8
[i
] ];
558 if(nnz
==1 && block
[i
*16]) ff_h264_idct8_dc_add_altivec(dst
+ block_offset
[i
], block
+ i
*16, stride
);
559 else ff_h264_idct8_add_altivec (dst
+ block_offset
[i
], block
+ i
*16, stride
);
564 static void ff_h264_idct_add8_altivec(uint8_t **dest
, const int *block_offset
, DCTELEM
*block
, int stride
, const uint8_t nnzc
[6*8]){
566 for(i
=16; i
<16+8; i
++){
568 ff_h264_idct_add_altivec(dest
[(i
&4)>>2] + block_offset
[i
], block
+ i
*16, stride
);
570 h264_idct_dc_add_altivec(dest
[(i
&4)>>2] + block_offset
[i
], block
+ i
*16, stride
);
574 #define transpose4x16(r0, r1, r2, r3) { \
575 register vec_u8 r4; \
576 register vec_u8 r5; \
577 register vec_u8 r6; \
578 register vec_u8 r7; \
580 r4 = vec_mergeh(r0, r2); /*0, 2 set 0*/ \
581 r5 = vec_mergel(r0, r2); /*0, 2 set 1*/ \
582 r6 = vec_mergeh(r1, r3); /*1, 3 set 0*/ \
583 r7 = vec_mergel(r1, r3); /*1, 3 set 1*/ \
585 r0 = vec_mergeh(r4, r6); /*all set 0*/ \
586 r1 = vec_mergel(r4, r6); /*all set 1*/ \
587 r2 = vec_mergeh(r5, r7); /*all set 2*/ \
588 r3 = vec_mergel(r5, r7); /*all set 3*/ \
591 static inline void write16x4(uint8_t *dst
, int dst_stride
,
592 register vec_u8 r0
, register vec_u8 r1
,
593 register vec_u8 r2
, register vec_u8 r3
) {
594 DECLARE_ALIGNED(16, unsigned char, result
)[64];
595 uint32_t *src_int
= (uint32_t *)result
, *dst_int
= (uint32_t *)dst
;
596 int int_dst_stride
= dst_stride
/4;
598 vec_st(r0
, 0, result
);
599 vec_st(r1
, 16, result
);
600 vec_st(r2
, 32, result
);
601 vec_st(r3
, 48, result
);
602 /* FIXME: there has to be a better way!!!! */
604 *(dst_int
+ int_dst_stride
) = *(src_int
+ 1);
605 *(dst_int
+ 2*int_dst_stride
) = *(src_int
+ 2);
606 *(dst_int
+ 3*int_dst_stride
) = *(src_int
+ 3);
607 *(dst_int
+ 4*int_dst_stride
) = *(src_int
+ 4);
608 *(dst_int
+ 5*int_dst_stride
) = *(src_int
+ 5);
609 *(dst_int
+ 6*int_dst_stride
) = *(src_int
+ 6);
610 *(dst_int
+ 7*int_dst_stride
) = *(src_int
+ 7);
611 *(dst_int
+ 8*int_dst_stride
) = *(src_int
+ 8);
612 *(dst_int
+ 9*int_dst_stride
) = *(src_int
+ 9);
613 *(dst_int
+10*int_dst_stride
) = *(src_int
+ 10);
614 *(dst_int
+11*int_dst_stride
) = *(src_int
+ 11);
615 *(dst_int
+12*int_dst_stride
) = *(src_int
+ 12);
616 *(dst_int
+13*int_dst_stride
) = *(src_int
+ 13);
617 *(dst_int
+14*int_dst_stride
) = *(src_int
+ 14);
618 *(dst_int
+15*int_dst_stride
) = *(src_int
+ 15);
621 /** \brief performs a 6x16 transpose of data in src, and stores it to dst
622 \todo FIXME: see if we can't spare some vec_lvsl() by them factorizing
623 out of unaligned_load() */
624 #define readAndTranspose16x6(src, src_stride, r8, r9, r10, r11, r12, r13) {\
625 register vec_u8 r0 = unaligned_load(0, src); \
626 register vec_u8 r1 = unaligned_load( src_stride, src); \
627 register vec_u8 r2 = unaligned_load(2* src_stride, src); \
628 register vec_u8 r3 = unaligned_load(3* src_stride, src); \
629 register vec_u8 r4 = unaligned_load(4* src_stride, src); \
630 register vec_u8 r5 = unaligned_load(5* src_stride, src); \
631 register vec_u8 r6 = unaligned_load(6* src_stride, src); \
632 register vec_u8 r7 = unaligned_load(7* src_stride, src); \
633 register vec_u8 r14 = unaligned_load(14*src_stride, src); \
634 register vec_u8 r15 = unaligned_load(15*src_stride, src); \
636 r8 = unaligned_load( 8*src_stride, src); \
637 r9 = unaligned_load( 9*src_stride, src); \
638 r10 = unaligned_load(10*src_stride, src); \
639 r11 = unaligned_load(11*src_stride, src); \
640 r12 = unaligned_load(12*src_stride, src); \
641 r13 = unaligned_load(13*src_stride, src); \
643 /*Merge first pairs*/ \
644 r0 = vec_mergeh(r0, r8); /*0, 8*/ \
645 r1 = vec_mergeh(r1, r9); /*1, 9*/ \
646 r2 = vec_mergeh(r2, r10); /*2,10*/ \
647 r3 = vec_mergeh(r3, r11); /*3,11*/ \
648 r4 = vec_mergeh(r4, r12); /*4,12*/ \
649 r5 = vec_mergeh(r5, r13); /*5,13*/ \
650 r6 = vec_mergeh(r6, r14); /*6,14*/ \
651 r7 = vec_mergeh(r7, r15); /*7,15*/ \
653 /*Merge second pairs*/ \
654 r8 = vec_mergeh(r0, r4); /*0,4, 8,12 set 0*/ \
655 r9 = vec_mergel(r0, r4); /*0,4, 8,12 set 1*/ \
656 r10 = vec_mergeh(r1, r5); /*1,5, 9,13 set 0*/ \
657 r11 = vec_mergel(r1, r5); /*1,5, 9,13 set 1*/ \
658 r12 = vec_mergeh(r2, r6); /*2,6,10,14 set 0*/ \
659 r13 = vec_mergel(r2, r6); /*2,6,10,14 set 1*/ \
660 r14 = vec_mergeh(r3, r7); /*3,7,11,15 set 0*/ \
661 r15 = vec_mergel(r3, r7); /*3,7,11,15 set 1*/ \
664 r0 = vec_mergeh(r8, r12); /*0,2,4,6,8,10,12,14 set 0*/ \
665 r1 = vec_mergel(r8, r12); /*0,2,4,6,8,10,12,14 set 1*/ \
666 r2 = vec_mergeh(r9, r13); /*0,2,4,6,8,10,12,14 set 2*/ \
667 r4 = vec_mergeh(r10, r14); /*1,3,5,7,9,11,13,15 set 0*/ \
668 r5 = vec_mergel(r10, r14); /*1,3,5,7,9,11,13,15 set 1*/ \
669 r6 = vec_mergeh(r11, r15); /*1,3,5,7,9,11,13,15 set 2*/ \
670 /* Don't need to compute 3 and 7*/ \
673 r8 = vec_mergeh(r0, r4); /*all set 0*/ \
674 r9 = vec_mergel(r0, r4); /*all set 1*/ \
675 r10 = vec_mergeh(r1, r5); /*all set 2*/ \
676 r11 = vec_mergel(r1, r5); /*all set 3*/ \
677 r12 = vec_mergeh(r2, r6); /*all set 4*/ \
678 r13 = vec_mergel(r2, r6); /*all set 5*/ \
679 /* Don't need to compute 14 and 15*/ \
683 // out: o = |x-y| < a
684 static inline vec_u8
diff_lt_altivec ( register vec_u8 x
,
688 register vec_u8 diff
= vec_subs(x
, y
);
689 register vec_u8 diffneg
= vec_subs(y
, x
);
690 register vec_u8 o
= vec_or(diff
, diffneg
); /* |x-y| */
691 o
= (vec_u8
)vec_cmplt(o
, a
);
695 static inline vec_u8
h264_deblock_mask ( register vec_u8 p0
,
699 register vec_u8 alpha
,
700 register vec_u8 beta
) {
702 register vec_u8 mask
;
703 register vec_u8 tempmask
;
705 mask
= diff_lt_altivec(p0
, q0
, alpha
);
706 tempmask
= diff_lt_altivec(p1
, p0
, beta
);
707 mask
= vec_and(mask
, tempmask
);
708 tempmask
= diff_lt_altivec(q1
, q0
, beta
);
709 mask
= vec_and(mask
, tempmask
);
714 // out: newp1 = clip((p2 + ((p0 + q0 + 1) >> 1)) >> 1, p1-tc0, p1+tc0)
715 static inline vec_u8
h264_deblock_q1(register vec_u8 p0
,
719 register vec_u8 tc0
) {
721 register vec_u8 average
= vec_avg(p0
, q0
);
722 register vec_u8 temp
;
723 register vec_u8 uncliped
;
724 register vec_u8 ones
;
727 register vec_u8 newp1
;
729 temp
= vec_xor(average
, p2
);
730 average
= vec_avg(average
, p2
); /*avg(p2, avg(p0, q0)) */
731 ones
= vec_splat_u8(1);
732 temp
= vec_and(temp
, ones
); /*(p2^avg(p0, q0)) & 1 */
733 uncliped
= vec_subs(average
, temp
); /*(p2+((p0+q0+1)>>1))>>1 */
734 max
= vec_adds(p1
, tc0
);
735 min
= vec_subs(p1
, tc0
);
736 newp1
= vec_max(min
, uncliped
);
737 newp1
= vec_min(max
, newp1
);
741 #define h264_deblock_p0_q0(p0, p1, q0, q1, tc0masked) { \
743 const vec_u8 A0v = vec_sl(vec_splat_u8(10), vec_splat_u8(4)); \
745 register vec_u8 pq0bit = vec_xor(p0,q0); \
746 register vec_u8 q1minus; \
747 register vec_u8 p0minus; \
748 register vec_u8 stage1; \
749 register vec_u8 stage2; \
750 register vec_u8 vec160; \
751 register vec_u8 delta; \
752 register vec_u8 deltaneg; \
754 q1minus = vec_nor(q1, q1); /* 255 - q1 */ \
755 stage1 = vec_avg(p1, q1minus); /* (p1 - q1 + 256)>>1 */ \
756 stage2 = vec_sr(stage1, vec_splat_u8(1)); /* (p1 - q1 + 256)>>2 = 64 + (p1 - q1) >> 2 */ \
757 p0minus = vec_nor(p0, p0); /* 255 - p0 */ \
758 stage1 = vec_avg(q0, p0minus); /* (q0 - p0 + 256)>>1 */ \
759 pq0bit = vec_and(pq0bit, vec_splat_u8(1)); \
760 stage2 = vec_avg(stage2, pq0bit); /* 32 + ((q0 - p0)&1 + (p1 - q1) >> 2 + 1) >> 1 */ \
761 stage2 = vec_adds(stage2, stage1); /* 160 + ((p0 - q0) + (p1 - q1) >> 2 + 1) >> 1 */ \
762 vec160 = vec_ld(0, &A0v); \
763 deltaneg = vec_subs(vec160, stage2); /* -d */ \
764 delta = vec_subs(stage2, vec160); /* d */ \
765 deltaneg = vec_min(tc0masked, deltaneg); \
766 delta = vec_min(tc0masked, delta); \
767 p0 = vec_subs(p0, deltaneg); \
768 q0 = vec_subs(q0, delta); \
769 p0 = vec_adds(p0, delta); \
770 q0 = vec_adds(q0, deltaneg); \
773 #define h264_loop_filter_luma_altivec(p2, p1, p0, q0, q1, q2, alpha, beta, tc0) { \
774 DECLARE_ALIGNED(16, unsigned char, temp)[16]; \
775 register vec_u8 alphavec; \
776 register vec_u8 betavec; \
777 register vec_u8 mask; \
778 register vec_u8 p1mask; \
779 register vec_u8 q1mask; \
780 register vector signed char tc0vec; \
781 register vec_u8 finaltc0; \
782 register vec_u8 tc0masked; \
783 register vec_u8 newp1; \
784 register vec_u8 newq1; \
788 alphavec = vec_ld(0, temp); \
789 betavec = vec_splat(alphavec, 0x1); \
790 alphavec = vec_splat(alphavec, 0x0); \
791 mask = h264_deblock_mask(p0, p1, q0, q1, alphavec, betavec); /*if in block */ \
793 *((int *)temp) = *((int *)tc0); \
794 tc0vec = vec_ld(0, (signed char*)temp); \
795 tc0vec = vec_mergeh(tc0vec, tc0vec); \
796 tc0vec = vec_mergeh(tc0vec, tc0vec); \
797 mask = vec_and(mask, vec_cmpgt(tc0vec, vec_splat_s8(-1))); /* if tc0[i] >= 0 */ \
798 finaltc0 = vec_and((vec_u8)tc0vec, mask); /* tc = tc0 */ \
800 p1mask = diff_lt_altivec(p2, p0, betavec); \
801 p1mask = vec_and(p1mask, mask); /* if ( |p2 - p0| < beta) */ \
802 tc0masked = vec_and(p1mask, (vec_u8)tc0vec); \
803 finaltc0 = vec_sub(finaltc0, p1mask); /* tc++ */ \
804 newp1 = h264_deblock_q1(p0, p1, p2, q0, tc0masked); \
807 q1mask = diff_lt_altivec(q2, q0, betavec); \
808 q1mask = vec_and(q1mask, mask); /* if ( |q2 - q0| < beta ) */\
809 tc0masked = vec_and(q1mask, (vec_u8)tc0vec); \
810 finaltc0 = vec_sub(finaltc0, q1mask); /* tc++ */ \
811 newq1 = h264_deblock_q1(p0, q1, q2, q0, tc0masked); \
814 h264_deblock_p0_q0(p0, p1, q0, q1, finaltc0); \
819 static void h264_v_loop_filter_luma_altivec(uint8_t *pix
, int stride
, int alpha
, int beta
, int8_t *tc0
) {
821 if ((tc0
[0] & tc0
[1] & tc0
[2] & tc0
[3]) >= 0) {
822 register vec_u8 p2
= vec_ld(-3*stride
, pix
);
823 register vec_u8 p1
= vec_ld(-2*stride
, pix
);
824 register vec_u8 p0
= vec_ld(-1*stride
, pix
);
825 register vec_u8 q0
= vec_ld(0, pix
);
826 register vec_u8 q1
= vec_ld(stride
, pix
);
827 register vec_u8 q2
= vec_ld(2*stride
, pix
);
828 h264_loop_filter_luma_altivec(p2
, p1
, p0
, q0
, q1
, q2
, alpha
, beta
, tc0
);
829 vec_st(p1
, -2*stride
, pix
);
830 vec_st(p0
, -1*stride
, pix
);
832 vec_st(q1
, stride
, pix
);
836 static void h264_h_loop_filter_luma_altivec(uint8_t *pix
, int stride
, int alpha
, int beta
, int8_t *tc0
) {
838 register vec_u8 line0
, line1
, line2
, line3
, line4
, line5
;
839 if ((tc0
[0] & tc0
[1] & tc0
[2] & tc0
[3]) < 0)
841 readAndTranspose16x6(pix
-3, stride
, line0
, line1
, line2
, line3
, line4
, line5
);
842 h264_loop_filter_luma_altivec(line0
, line1
, line2
, line3
, line4
, line5
, alpha
, beta
, tc0
);
843 transpose4x16(line1
, line2
, line3
, line4
);
844 write16x4(pix
-2, stride
, line1
, line2
, line3
, line4
);
847 static av_always_inline
848 void weight_h264_WxH_altivec(uint8_t *block
, int stride
, int log2_denom
, int weight
, int offset
, int w
, int h
)
852 vec_s16 vtemp
, vweight
, voffset
, v0
, v1
;
854 DECLARE_ALIGNED(16, int32_t, temp
)[4];
857 offset
<<= log2_denom
;
858 if(log2_denom
) offset
+= 1<<(log2_denom
-1);
859 temp
[0] = log2_denom
;
863 vtemp
= (vec_s16
)vec_ld(0, temp
);
864 vlog2_denom
= (vec_u16
)vec_splat(vtemp
, 1);
865 vweight
= vec_splat(vtemp
, 3);
866 voffset
= vec_splat(vtemp
, 5);
867 aligned
= !((unsigned long)block
& 0xf);
869 for (y
=0; y
<h
; y
++) {
870 vblock
= vec_ld(0, block
);
872 v0
= (vec_s16
)vec_mergeh(zero_u8v
, vblock
);
873 v1
= (vec_s16
)vec_mergel(zero_u8v
, vblock
);
875 if (w
== 16 || aligned
) {
876 v0
= vec_mladd(v0
, vweight
, zero_s16v
);
877 v0
= vec_adds(v0
, voffset
);
878 v0
= vec_sra(v0
, vlog2_denom
);
880 if (w
== 16 || !aligned
) {
881 v1
= vec_mladd(v1
, vweight
, zero_s16v
);
882 v1
= vec_adds(v1
, voffset
);
883 v1
= vec_sra(v1
, vlog2_denom
);
885 vblock
= vec_packsu(v0
, v1
);
886 vec_st(vblock
, 0, block
);
892 static av_always_inline
893 void biweight_h264_WxH_altivec(uint8_t *dst
, uint8_t *src
, int stride
, int log2_denom
,
894 int weightd
, int weights
, int offset
, int w
, int h
)
896 int y
, dst_aligned
, src_aligned
;
898 vec_s16 vtemp
, vweights
, vweightd
, voffset
, v0
, v1
, v2
, v3
;
900 DECLARE_ALIGNED(16, int32_t, temp
)[4];
903 offset
= ((offset
+ 1) | 1) << log2_denom
;
904 temp
[0] = log2_denom
+1;
909 vtemp
= (vec_s16
)vec_ld(0, temp
);
910 vlog2_denom
= (vec_u16
)vec_splat(vtemp
, 1);
911 vweights
= vec_splat(vtemp
, 3);
912 vweightd
= vec_splat(vtemp
, 5);
913 voffset
= vec_splat(vtemp
, 7);
914 dst_aligned
= !((unsigned long)dst
& 0xf);
915 src_aligned
= !((unsigned long)src
& 0xf);
917 for (y
=0; y
<h
; y
++) {
918 vdst
= vec_ld(0, dst
);
919 vsrc
= vec_ld(0, src
);
921 v0
= (vec_s16
)vec_mergeh(zero_u8v
, vdst
);
922 v1
= (vec_s16
)vec_mergel(zero_u8v
, vdst
);
923 v2
= (vec_s16
)vec_mergeh(zero_u8v
, vsrc
);
924 v3
= (vec_s16
)vec_mergel(zero_u8v
, vsrc
);
933 if (w
== 16 || dst_aligned
) {
934 v0
= vec_mladd(v0
, vweightd
, zero_s16v
);
935 v2
= vec_mladd(v2
, vweights
, zero_s16v
);
937 v0
= vec_adds(v0
, voffset
);
938 v0
= vec_adds(v0
, v2
);
939 v0
= vec_sra(v0
, vlog2_denom
);
941 if (w
== 16 || !dst_aligned
) {
942 v1
= vec_mladd(v1
, vweightd
, zero_s16v
);
943 v3
= vec_mladd(v3
, vweights
, zero_s16v
);
945 v1
= vec_adds(v1
, voffset
);
946 v1
= vec_adds(v1
, v3
);
947 v1
= vec_sra(v1
, vlog2_denom
);
949 vdst
= vec_packsu(v0
, v1
);
950 vec_st(vdst
, 0, dst
);
957 #define H264_WEIGHT(W,H) \
958 static void ff_weight_h264_pixels ## W ## x ## H ## _altivec(uint8_t *block, int stride, int log2_denom, int weight, int offset){ \
959 weight_h264_WxH_altivec(block, stride, log2_denom, weight, offset, W, H); \
961 static void ff_biweight_h264_pixels ## W ## x ## H ## _altivec(uint8_t *dst, uint8_t *src, int stride, int log2_denom, int weightd, int weights, int offset){ \
962 biweight_h264_WxH_altivec(dst, src, stride, log2_denom, weightd, weights, offset, W, H); \
971 void dsputil_h264_init_ppc(DSPContext
* c
, AVCodecContext
*avctx
) {
974 c
->put_h264_chroma_pixels_tab
[0] = put_h264_chroma_mc8_altivec
;
975 c
->avg_h264_chroma_pixels_tab
[0] = avg_h264_chroma_mc8_altivec
;
976 c
->put_no_rnd_vc1_chroma_pixels_tab
[0] = put_no_rnd_vc1_chroma_mc8_altivec
;
977 c
->avg_no_rnd_vc1_chroma_pixels_tab
[0] = avg_no_rnd_vc1_chroma_mc8_altivec
;
979 #define dspfunc(PFX, IDX, NUM) \
980 c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_altivec; \
981 c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_altivec; \
982 c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_altivec; \
983 c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_altivec; \
984 c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_altivec; \
985 c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_altivec; \
986 c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_altivec; \
987 c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_altivec; \
988 c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_altivec; \
989 c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_altivec; \
990 c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_altivec; \
991 c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_altivec; \
992 c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_altivec; \
993 c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_altivec; \
994 c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_altivec; \
995 c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_altivec
997 dspfunc(put_h264_qpel
, 0, 16);
998 dspfunc(avg_h264_qpel
, 0, 16);
1003 void ff_h264dsp_init_ppc(H264DSPContext
*c
)
1005 if (has_altivec()) {
1006 c
->h264_idct_add
= ff_h264_idct_add_altivec
;
1007 c
->h264_idct_add8
= ff_h264_idct_add8_altivec
;
1008 c
->h264_idct_add16
= ff_h264_idct_add16_altivec
;
1009 c
->h264_idct_add16intra
= ff_h264_idct_add16intra_altivec
;
1010 c
->h264_idct_dc_add
= h264_idct_dc_add_altivec
;
1011 c
->h264_idct8_dc_add
= ff_h264_idct8_dc_add_altivec
;
1012 c
->h264_idct8_add
= ff_h264_idct8_add_altivec
;
1013 c
->h264_idct8_add4
= ff_h264_idct8_add4_altivec
;
1014 c
->h264_v_loop_filter_luma
= h264_v_loop_filter_luma_altivec
;
1015 c
->h264_h_loop_filter_luma
= h264_h_loop_filter_luma_altivec
;
1017 c
->weight_h264_pixels_tab
[0] = ff_weight_h264_pixels16x16_altivec
;
1018 c
->weight_h264_pixels_tab
[1] = ff_weight_h264_pixels16x8_altivec
;
1019 c
->weight_h264_pixels_tab
[2] = ff_weight_h264_pixels8x16_altivec
;
1020 c
->weight_h264_pixels_tab
[3] = ff_weight_h264_pixels8x8_altivec
;
1021 c
->weight_h264_pixels_tab
[4] = ff_weight_h264_pixels8x4_altivec
;
1022 c
->biweight_h264_pixels_tab
[0] = ff_biweight_h264_pixels16x16_altivec
;
1023 c
->biweight_h264_pixels_tab
[1] = ff_biweight_h264_pixels16x8_altivec
;
1024 c
->biweight_h264_pixels_tab
[2] = ff_biweight_h264_pixels8x16_altivec
;
1025 c
->biweight_h264_pixels_tab
[3] = ff_biweight_h264_pixels8x8_altivec
;
1026 c
->biweight_h264_pixels_tab
[4] = ff_biweight_h264_pixels8x4_altivec
;