some MMX optimizations for the CAVS decoder
[libav.git] / libavcodec / i386 / cavsdsp_mmx.c
CommitLineData
595e7bd9
SG
1/*
2 * Chinese AVS video (AVS1-P2, JiZhun profile) decoder.
3 * Copyright (c) 2006 Stefan Gehrer <stefan.gehrer@gmx.de>
4 *
5 * MMX optimised DSP functions, based on H.264 optimisations by
6 * Michael Niedermayer and Loren Merritt
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23#include "../dsputil.h"
24#include "common.h"
25
26DECLARE_ALIGNED_8(static const uint64_t,ff_pw_4 ) = 0x0004000400040004ULL;
27DECLARE_ALIGNED_8(static const uint64_t,ff_pw_5 ) = 0x0005000500050005ULL;
28DECLARE_ALIGNED_8(static const uint64_t,ff_pw_7 ) = 0x0007000700070007ULL;
29DECLARE_ALIGNED_8(static const uint64_t,ff_pw_42) = 0x002A002A002A002AULL;
30DECLARE_ALIGNED_8(static const uint64_t,ff_pw_64) = 0x0040004000400040ULL;
31DECLARE_ALIGNED_8(static const uint64_t,ff_pw_96) = 0x0060006000600060ULL;
32
33/*****************************************************************************
34 *
35 * inverse transform
36 *
37 ****************************************************************************/
38
39#define SUMSUB_BA( a, b ) \
40 "paddw "#b", "#a" \n\t"\
41 "paddw "#b", "#b" \n\t"\
42 "psubw "#a", "#b" \n\t"
43
44#define SBUTTERFLY(a,b,t,n)\
45 "movq " #a ", " #t " \n\t" /* abcd */\
46 "punpckl" #n " " #b ", " #a " \n\t" /* aebf */\
47 "punpckh" #n " " #b ", " #t " \n\t" /* cgdh */
48
49#define TRANSPOSE4(a,b,c,d,t)\
50 SBUTTERFLY(a,b,t,wd) /* a=aebf t=cgdh */\
51 SBUTTERFLY(c,d,b,wd) /* c=imjn b=kolp */\
52 SBUTTERFLY(a,c,d,dq) /* a=aeim d=bfjn */\
53 SBUTTERFLY(t,b,c,dq) /* t=cgko c=dhlp */
54
55static inline void cavs_idct8_1d(int16_t *block, uint64_t bias)
56{
57 asm volatile(
58 "movq 112(%0), %%mm4 \n\t" /* mm4 = src7 */
59 "movq 16(%0), %%mm5 \n\t" /* mm5 = src1 */
60 "movq 80(%0), %%mm2 \n\t" /* mm2 = src5 */
61 "movq 48(%0), %%mm7 \n\t" /* mm7 = src3 */
62 "movq %%mm4, %%mm0 \n\t"
63 "movq %%mm5, %%mm3 \n\t"
64 "movq %%mm2, %%mm6 \n\t"
65 "movq %%mm7, %%mm1 \n\t"
66
67 "paddw %%mm4, %%mm4 \n\t" /* mm4 = 2*src7 */
68 "paddw %%mm3, %%mm3 \n\t" /* mm3 = 2*src1 */
69 "paddw %%mm6, %%mm6 \n\t" /* mm6 = 2*src5 */
70 "paddw %%mm1, %%mm1 \n\t" /* mm1 = 2*src3 */
71 "paddw %%mm4, %%mm0 \n\t" /* mm0 = 3*src7 */
72 "paddw %%mm3, %%mm5 \n\t" /* mm5 = 3*src1 */
73 "paddw %%mm6, %%mm2 \n\t" /* mm2 = 3*src5 */
74 "paddw %%mm1, %%mm7 \n\t" /* mm7 = 3*src3 */
75 "psubw %%mm4, %%mm5 \n\t" /* mm5 = 3*src1 - 2*src7 = a0 */
76 "paddw %%mm6, %%mm7 \n\t" /* mm7 = 3*src3 + 2*src5 = a1 */
77 "psubw %%mm2, %%mm1 \n\t" /* mm1 = 2*src3 - 3*src5 = a2 */
78 "paddw %%mm0, %%mm3 \n\t" /* mm3 = 2*src1 + 3*src7 = a3 */
79
80 "movq %%mm5, %%mm4 \n\t"
81 "movq %%mm7, %%mm6 \n\t"
82 "movq %%mm3, %%mm0 \n\t"
83 "movq %%mm1, %%mm2 \n\t"
84 SUMSUB_BA( %%mm7, %%mm5 ) /* mm7 = a0 + a1 mm5 = a0 - a1 */
85 "paddw %%mm3, %%mm7 \n\t" /* mm7 = a0 + a1 + a3 */
86 "paddw %%mm1, %%mm5 \n\t" /* mm5 = a0 - a1 + a2 */
87 "paddw %%mm7, %%mm7 \n\t"
88 "paddw %%mm5, %%mm5 \n\t"
89 "paddw %%mm6, %%mm7 \n\t" /* mm7 = b4 */
90 "paddw %%mm4, %%mm5 \n\t" /* mm5 = b5 */
91
92 SUMSUB_BA( %%mm1, %%mm3 ) /* mm1 = a3 + a2 mm3 = a3 - a2 */
93 "psubw %%mm1, %%mm4 \n\t" /* mm4 = a0 - a2 - a3 */
94 "movq %%mm4, %%mm1 \n\t" /* mm1 = a0 - a2 - a3 */
95 "psubw %%mm6, %%mm3 \n\t" /* mm3 = a3 - a2 - a1 */
96 "paddw %%mm1, %%mm1 \n\t"
97 "paddw %%mm3, %%mm3 \n\t"
98 "psubw %%mm2, %%mm1 \n\t" /* mm1 = b7 */
99 "paddw %%mm0, %%mm3 \n\t" /* mm3 = b6 */
100
101 "movq 32(%0), %%mm2 \n\t" /* mm2 = src2 */
102 "movq 96(%0), %%mm6 \n\t" /* mm6 = src6 */
103 "movq %%mm2, %%mm4 \n\t"
104 "movq %%mm6, %%mm0 \n\t"
105 "psllw $2, %%mm4 \n\t" /* mm4 = 4*src2 */
106 "psllw $2, %%mm6 \n\t" /* mm6 = 4*src6 */
107 "paddw %%mm4, %%mm2 \n\t" /* mm2 = 5*src2 */
108 "paddw %%mm6, %%mm0 \n\t" /* mm0 = 5*src6 */
109 "paddw %%mm2, %%mm2 \n\t"
110 "paddw %%mm0, %%mm0 \n\t"
111 "psubw %%mm0, %%mm4 \n\t" /* mm4 = 4*src2 - 10*src6 = a7 */
112 "paddw %%mm2, %%mm6 \n\t" /* mm6 = 4*src6 + 10*src2 = a6 */
113
114 "movq (%0), %%mm2 \n\t" /* mm2 = src0 */
115 "movq 64(%0), %%mm0 \n\t" /* mm0 = src4 */
116 SUMSUB_BA( %%mm0, %%mm2 ) /* mm0 = src0+src4 mm2 = src0-src4 */
117 "psllw $3, %%mm0 \n\t"
118 "psllw $3, %%mm2 \n\t"
119 "paddw %1, %%mm0 \n\t" /* add rounding bias */
120 "paddw %1, %%mm2 \n\t" /* add rounding bias */
121
122 SUMSUB_BA( %%mm6, %%mm0 ) /* mm6 = a4 + a6 mm0 = a4 - a6 */
123 SUMSUB_BA( %%mm4, %%mm2 ) /* mm4 = a5 + a7 mm2 = a5 - a7 */
124 SUMSUB_BA( %%mm7, %%mm6 ) /* mm7 = dst0 mm6 = dst7 */
125 SUMSUB_BA( %%mm5, %%mm4 ) /* mm5 = dst1 mm4 = dst6 */
126 SUMSUB_BA( %%mm3, %%mm2 ) /* mm3 = dst2 mm2 = dst5 */
127 SUMSUB_BA( %%mm1, %%mm0 ) /* mm1 = dst3 mm0 = dst4 */
128 :: "r"(block), "m"(bias)
129 );
130}
131
132static void cavs_idct8_add_mmx(uint8_t *dst, int16_t *block, int stride)
133{
134 int i;
135 DECLARE_ALIGNED_8(int16_t, b2[64]);
136
137 for(i=0; i<2; i++){
138 DECLARE_ALIGNED_8(uint64_t, tmp);
139
140 cavs_idct8_1d(block+4*i, ff_pw_4);
141
142 asm volatile(
143 "psraw $3, %%mm7 \n\t"
144 "psraw $3, %%mm6 \n\t"
145 "psraw $3, %%mm5 \n\t"
146 "psraw $3, %%mm4 \n\t"
147 "psraw $3, %%mm3 \n\t"
148 "psraw $3, %%mm2 \n\t"
149 "psraw $3, %%mm1 \n\t"
150 "psraw $3, %%mm0 \n\t"
151 "movq %%mm7, %0 \n\t"
152 TRANSPOSE4( %%mm0, %%mm2, %%mm4, %%mm6, %%mm7 )
153 "movq %%mm0, 8(%1) \n\t"
154 "movq %%mm6, 24(%1) \n\t"
155 "movq %%mm7, 40(%1) \n\t"
156 "movq %%mm4, 56(%1) \n\t"
157 "movq %0, %%mm7 \n\t"
158 TRANSPOSE4( %%mm7, %%mm5, %%mm3, %%mm1, %%mm0 )
159 "movq %%mm7, (%1) \n\t"
160 "movq %%mm1, 16(%1) \n\t"
161 "movq %%mm0, 32(%1) \n\t"
162 "movq %%mm3, 48(%1) \n\t"
163 : "=m"(tmp)
164 : "r"(b2+32*i)
165 : "memory"
166 );
167 }
168
169 for(i=0; i<2; i++){
170 cavs_idct8_1d(b2+4*i, ff_pw_64);
171
172 asm volatile(
173 "psraw $7, %%mm7 \n\t"
174 "psraw $7, %%mm6 \n\t"
175 "psraw $7, %%mm5 \n\t"
176 "psraw $7, %%mm4 \n\t"
177 "psraw $7, %%mm3 \n\t"
178 "psraw $7, %%mm2 \n\t"
179 "psraw $7, %%mm1 \n\t"
180 "psraw $7, %%mm0 \n\t"
181 "movq %%mm7, (%0) \n\t"
182 "movq %%mm5, 16(%0) \n\t"
183 "movq %%mm3, 32(%0) \n\t"
184 "movq %%mm1, 48(%0) \n\t"
185 "movq %%mm0, 64(%0) \n\t"
186 "movq %%mm2, 80(%0) \n\t"
187 "movq %%mm4, 96(%0) \n\t"
188 "movq %%mm6, 112(%0) \n\t"
189 :: "r"(b2+4*i)
190 : "memory"
191 );
192 }
193
194 add_pixels_clamped_mmx(b2, dst, stride);
195
196 /* clear block */
197 asm volatile(
198 "pxor %%mm7, %%mm7 \n\t"
199 "movq %%mm7, (%0) \n\t"
200 "movq %%mm7, 8(%0) \n\t"
201 "movq %%mm7, 16(%0) \n\t"
202 "movq %%mm7, 24(%0) \n\t"
203 "movq %%mm7, 32(%0) \n\t"
204 "movq %%mm7, 40(%0) \n\t"
205 "movq %%mm7, 48(%0) \n\t"
206 "movq %%mm7, 56(%0) \n\t"
207 "movq %%mm7, 64(%0) \n\t"
208 "movq %%mm7, 72(%0) \n\t"
209 "movq %%mm7, 80(%0) \n\t"
210 "movq %%mm7, 88(%0) \n\t"
211 "movq %%mm7, 96(%0) \n\t"
212 "movq %%mm7, 104(%0) \n\t"
213 "movq %%mm7, 112(%0) \n\t"
214 "movq %%mm7, 120(%0) \n\t"
215 :: "r" (block)
216 );
217}
218
219/*****************************************************************************
220 *
221 * motion compensation
222 *
223 ****************************************************************************/
224
225/* vertical filter [-1 -2 96 42 -7 0] */
226#define QPEL_CAVSV1(A,B,C,D,E,F,OP) \
227 "movd (%0), "#F" \n\t"\
228 "movq "#C", %%mm6 \n\t"\
229 "pmullw %5, %%mm6 \n\t"\
230 "movq "#D", %%mm7 \n\t"\
231 "pmullw %6, %%mm7 \n\t"\
232 "psllw $3, "#E" \n\t"\
233 "psubw "#E", %%mm6 \n\t"\
234 "psraw $3, "#E" \n\t"\
235 "paddw %%mm7, %%mm6 \n\t"\
236 "paddw "#E", %%mm6 \n\t"\
237 "paddw "#B", "#B" \n\t"\
238 "pxor %%mm7, %%mm7 \n\t"\
239 "add %2, %0 \n\t"\
240 "punpcklbw %%mm7, "#F" \n\t"\
241 "psubw "#B", %%mm6 \n\t"\
242 "psraw $1, "#B" \n\t"\
243 "psubw "#A", %%mm6 \n\t"\
244 "paddw %4, %%mm6 \n\t"\
245 "psraw $7, %%mm6 \n\t"\
246 "packuswb %%mm6, %%mm6 \n\t"\
247 OP(%%mm6, (%1), A, d) \
248 "add %3, %1 \n\t"
249
250/* vertical filter [ 0 -1 5 5 -1 0] */
251#define QPEL_CAVSV2(A,B,C,D,E,F,OP) \
252 "movd (%0), "#F" \n\t"\
253 "movq "#C", %%mm6 \n\t"\
254 "paddw "#D", %%mm6 \n\t"\
255 "pmullw %5, %%mm6 \n\t"\
256 "add %2, %0 \n\t"\
257 "punpcklbw %%mm7, "#F" \n\t"\
258 "psubw "#B", %%mm6 \n\t"\
259 "psubw "#E", %%mm6 \n\t"\
260 "paddw %4, %%mm6 \n\t"\
261 "psraw $3, %%mm6 \n\t"\
262 "packuswb %%mm6, %%mm6 \n\t"\
263 OP(%%mm6, (%1), A, d) \
264 "add %3, %1 \n\t"
265
266/* vertical filter [ 0 -7 42 96 -2 -1] */
267#define QPEL_CAVSV3(A,B,C,D,E,F,OP) \
268 "movd (%0), "#F" \n\t"\
269 "movq "#C", %%mm6 \n\t"\
270 "pmullw %6, %%mm6 \n\t"\
271 "movq "#D", %%mm7 \n\t"\
272 "pmullw %5, %%mm7 \n\t"\
273 "psllw $3, "#B" \n\t"\
274 "psubw "#B", %%mm6 \n\t"\
275 "psraw $3, "#B" \n\t"\
276 "paddw %%mm7, %%mm6 \n\t"\
277 "paddw "#B", %%mm6 \n\t"\
278 "paddw "#E", "#E" \n\t"\
279 "pxor %%mm7, %%mm7 \n\t"\
280 "add %2, %0 \n\t"\
281 "punpcklbw %%mm7, "#F" \n\t"\
282 "psubw "#E", %%mm6 \n\t"\
283 "psraw $1, "#E" \n\t"\
284 "psubw "#F", %%mm6 \n\t"\
285 "paddw %4, %%mm6 \n\t"\
286 "psraw $7, %%mm6 \n\t"\
287 "packuswb %%mm6, %%mm6 \n\t"\
288 OP(%%mm6, (%1), A, d) \
289 "add %3, %1 \n\t"
290
291
292#define QPEL_CAVSVNUM(VOP,OP,ADD,MUL1,MUL2)\
293 int w= 2;\
294 src -= 2*srcStride;\
295 \
296 while(w--){\
297 asm volatile(\
298 "pxor %%mm7, %%mm7 \n\t"\
299 "movd (%0), %%mm0 \n\t"\
300 "add %2, %0 \n\t"\
301 "movd (%0), %%mm1 \n\t"\
302 "add %2, %0 \n\t"\
303 "movd (%0), %%mm2 \n\t"\
304 "add %2, %0 \n\t"\
305 "movd (%0), %%mm3 \n\t"\
306 "add %2, %0 \n\t"\
307 "movd (%0), %%mm4 \n\t"\
308 "add %2, %0 \n\t"\
309 "punpcklbw %%mm7, %%mm0 \n\t"\
310 "punpcklbw %%mm7, %%mm1 \n\t"\
311 "punpcklbw %%mm7, %%mm2 \n\t"\
312 "punpcklbw %%mm7, %%mm3 \n\t"\
313 "punpcklbw %%mm7, %%mm4 \n\t"\
314 VOP(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
315 VOP(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
316 VOP(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
317 VOP(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
318 VOP(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP)\
319 VOP(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP)\
320 VOP(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
321 VOP(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
322 \
323 : "+a"(src), "+c"(dst)\
324 : "S"((long)srcStride), "D"((long)dstStride), "m"(ADD), "m"(MUL1), "m"(MUL2)\
325 : "memory"\
326 );\
327 if(h==16){\
328 asm volatile(\
329 VOP(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
330 VOP(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
331 VOP(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP)\
332 VOP(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP)\
333 VOP(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
334 VOP(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
335 VOP(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
336 VOP(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
337 \
338 : "+a"(src), "+c"(dst)\
339 : "S"((long)srcStride), "D"((long)dstStride), "m"(ADD), "m"(MUL1), "m"(MUL2)\
340 : "memory"\
341 );\
342 }\
343 src += 4-(h+5)*srcStride;\
344 dst += 4-h*dstStride;\
345 }
346
347#define QPEL_CAVS(OPNAME, OP, MMX)\
348static void OPNAME ## cavs_qpel8_h_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
349 int h=8;\
350 asm volatile(\
351 "pxor %%mm7, %%mm7 \n\t"\
352 "movq %5, %%mm6 \n\t"\
353 "1: \n\t"\
354 "movq (%0), %%mm0 \n\t"\
355 "movq 1(%0), %%mm2 \n\t"\
356 "movq %%mm0, %%mm1 \n\t"\
357 "movq %%mm2, %%mm3 \n\t"\
358 "punpcklbw %%mm7, %%mm0 \n\t"\
359 "punpckhbw %%mm7, %%mm1 \n\t"\
360 "punpcklbw %%mm7, %%mm2 \n\t"\
361 "punpckhbw %%mm7, %%mm3 \n\t"\
362 "paddw %%mm2, %%mm0 \n\t"\
363 "paddw %%mm3, %%mm1 \n\t"\
364 "pmullw %%mm6, %%mm0 \n\t"\
365 "pmullw %%mm6, %%mm1 \n\t"\
366 "movq -1(%0), %%mm2 \n\t"\
367 "movq 2(%0), %%mm4 \n\t"\
368 "movq %%mm2, %%mm3 \n\t"\
369 "movq %%mm4, %%mm5 \n\t"\
370 "punpcklbw %%mm7, %%mm2 \n\t"\
371 "punpckhbw %%mm7, %%mm3 \n\t"\
372 "punpcklbw %%mm7, %%mm4 \n\t"\
373 "punpckhbw %%mm7, %%mm5 \n\t"\
374 "paddw %%mm4, %%mm2 \n\t"\
375 "paddw %%mm3, %%mm5 \n\t"\
376 "psubw %%mm2, %%mm0 \n\t"\
377 "psubw %%mm5, %%mm1 \n\t"\
378 "movq %6, %%mm5 \n\t"\
379 "paddw %%mm5, %%mm0 \n\t"\
380 "paddw %%mm5, %%mm1 \n\t"\
381 "psraw $3, %%mm0 \n\t"\
382 "psraw $3, %%mm1 \n\t"\
383 "packuswb %%mm1, %%mm0 \n\t"\
384 OP(%%mm0, (%1),%%mm5, q) \
385 "add %3, %0 \n\t"\
386 "add %4, %1 \n\t"\
387 "decl %2 \n\t"\
388 " jnz 1b \n\t"\
389 : "+a"(src), "+c"(dst), "+m"(h)\
390 : "d"((long)srcStride), "S"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_4)\
391 : "memory"\
392 );\
393}\
394\
395static inline void OPNAME ## cavs_qpel8or16_v1_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
396 QPEL_CAVSVNUM(QPEL_CAVSV1,OP,ff_pw_64,ff_pw_96,ff_pw_42) \
397}\
398\
399static inline void OPNAME ## cavs_qpel8or16_v2_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
400 QPEL_CAVSVNUM(QPEL_CAVSV2,OP,ff_pw_4,ff_pw_5,ff_pw_5) \
401}\
402\
403static inline void OPNAME ## cavs_qpel8or16_v3_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
404 QPEL_CAVSVNUM(QPEL_CAVSV3,OP,ff_pw_64,ff_pw_96,ff_pw_42) \
405}\
406\
407static void OPNAME ## cavs_qpel8_v1_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
408 OPNAME ## cavs_qpel8or16_v1_ ## MMX(dst , src , dstStride, srcStride, 8);\
409}\
410static void OPNAME ## cavs_qpel16_v1_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
411 OPNAME ## cavs_qpel8or16_v1_ ## MMX(dst , src , dstStride, srcStride, 16);\
412 OPNAME ## cavs_qpel8or16_v1_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\
413}\
414\
415static void OPNAME ## cavs_qpel8_v2_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
416 OPNAME ## cavs_qpel8or16_v2_ ## MMX(dst , src , dstStride, srcStride, 8);\
417}\
418static void OPNAME ## cavs_qpel16_v2_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
419 OPNAME ## cavs_qpel8or16_v2_ ## MMX(dst , src , dstStride, srcStride, 16);\
420 OPNAME ## cavs_qpel8or16_v2_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\
421}\
422\
423static void OPNAME ## cavs_qpel8_v3_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
424 OPNAME ## cavs_qpel8or16_v3_ ## MMX(dst , src , dstStride, srcStride, 8);\
425}\
426static void OPNAME ## cavs_qpel16_v3_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
427 OPNAME ## cavs_qpel8or16_v3_ ## MMX(dst , src , dstStride, srcStride, 16);\
428 OPNAME ## cavs_qpel8or16_v3_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\
429}\
430\
431static void OPNAME ## cavs_qpel16_h_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
432 OPNAME ## cavs_qpel8_h_ ## MMX(dst , src , dstStride, srcStride);\
433 OPNAME ## cavs_qpel8_h_ ## MMX(dst+8, src+8, dstStride, srcStride);\
434 src += 8*srcStride;\
435 dst += 8*dstStride;\
436 OPNAME ## cavs_qpel8_h_ ## MMX(dst , src , dstStride, srcStride);\
437 OPNAME ## cavs_qpel8_h_ ## MMX(dst+8, src+8, dstStride, srcStride);\
438}\
439
440#define CAVS_MC(OPNAME, SIZE, MMX) \
441static void ff_ ## OPNAME ## cavs_qpel ## SIZE ## _mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
442 OPNAME ## cavs_qpel ## SIZE ## _h_ ## MMX(dst, src, stride, stride);\
443}\
444\
445static void ff_ ## OPNAME ## cavs_qpel ## SIZE ## _mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
446 OPNAME ## cavs_qpel ## SIZE ## _v1_ ## MMX(dst, src, stride, stride);\
447}\
448\
449static void ff_ ## OPNAME ## cavs_qpel ## SIZE ## _mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
450 OPNAME ## cavs_qpel ## SIZE ## _v2_ ## MMX(dst, src, stride, stride);\
451}\
452\
453static void ff_ ## OPNAME ## cavs_qpel ## SIZE ## _mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
454 OPNAME ## cavs_qpel ## SIZE ## _v3_ ## MMX(dst, src, stride, stride);\
455}\
456
457#define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b " \n\t"
458#define AVG_3DNOW_OP(a,b,temp, size) \
459"mov" #size " " #b ", " #temp " \n\t"\
460"pavgusb " #temp ", " #a " \n\t"\
461"mov" #size " " #a ", " #b " \n\t"
462#define AVG_MMX2_OP(a,b,temp, size) \
463"mov" #size " " #b ", " #temp " \n\t"\
464"pavgb " #temp ", " #a " \n\t"\
465"mov" #size " " #a ", " #b " \n\t"
466
467QPEL_CAVS(put_, PUT_OP, 3dnow)
468QPEL_CAVS(avg_, AVG_3DNOW_OP, 3dnow)
469QPEL_CAVS(put_, PUT_OP, mmx2)
470QPEL_CAVS(avg_, AVG_MMX2_OP, mmx2)
471
472CAVS_MC(put_, 8, 3dnow)
473CAVS_MC(put_, 16,3dnow)
474CAVS_MC(avg_, 8, 3dnow)
475CAVS_MC(avg_, 16,3dnow)
476CAVS_MC(put_, 8, mmx2)
477CAVS_MC(put_, 16,mmx2)
478CAVS_MC(avg_, 8, mmx2)
479CAVS_MC(avg_, 16,mmx2)
480
481void ff_put_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride);
482void ff_avg_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride);
483void ff_put_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride);
484void ff_avg_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride);
485
486void ff_cavsdsp_init_mmx2(DSPContext* c, AVCodecContext *avctx) {
487#define dspfunc(PFX, IDX, NUM) \
488 c->PFX ## _pixels_tab[IDX][ 0] = ff_ ## PFX ## NUM ## _mc00_mmx2; \
489 c->PFX ## _pixels_tab[IDX][ 2] = ff_ ## PFX ## NUM ## _mc20_mmx2; \
490 c->PFX ## _pixels_tab[IDX][ 4] = ff_ ## PFX ## NUM ## _mc01_mmx2; \
491 c->PFX ## _pixels_tab[IDX][ 8] = ff_ ## PFX ## NUM ## _mc02_mmx2; \
492 c->PFX ## _pixels_tab[IDX][12] = ff_ ## PFX ## NUM ## _mc03_mmx2; \
493
494 dspfunc(put_cavs_qpel, 0, 16);
495 dspfunc(put_cavs_qpel, 1, 8);
496 dspfunc(avg_cavs_qpel, 0, 16);
497 dspfunc(avg_cavs_qpel, 1, 8);
498#undef dspfunc
499 c->cavs_idct8_add = cavs_idct8_add_mmx;
500}
501
502void ff_cavsdsp_init_3dnow(DSPContext* c, AVCodecContext *avctx) {
503#define dspfunc(PFX, IDX, NUM) \
504 c->PFX ## _pixels_tab[IDX][ 0] = ff_ ## PFX ## NUM ## _mc00_mmx2; \
505 c->PFX ## _pixels_tab[IDX][ 2] = ff_ ## PFX ## NUM ## _mc20_3dnow; \
506 c->PFX ## _pixels_tab[IDX][ 4] = ff_ ## PFX ## NUM ## _mc01_3dnow; \
507 c->PFX ## _pixels_tab[IDX][ 8] = ff_ ## PFX ## NUM ## _mc02_3dnow; \
508 c->PFX ## _pixels_tab[IDX][12] = ff_ ## PFX ## NUM ## _mc03_3dnow; \
509
510 dspfunc(put_cavs_qpel, 0, 16);
511 dspfunc(put_cavs_qpel, 1, 8);
512 dspfunc(avg_cavs_qpel, 0, 16);
513 dspfunc(avg_cavs_qpel, 1, 8);
514#undef dspfunc
515 c->cavs_idct8_add = cavs_idct8_add_mmx;
516}