x86: move some inline asm macros to the only places they are used
[libav.git] / libavcodec / x86 / cavsdsp_mmx.c
1 /*
2 * Chinese AVS video (AVS1-P2, JiZhun profile) decoder.
3 * Copyright (c) 2006 Stefan Gehrer <stefan.gehrer@gmx.de>
4 *
5 * MMX-optimized DSP functions, based on H.264 optimizations by
6 * Michael Niedermayer and Loren Merritt
7 *
8 * This file is part of Libav.
9 *
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
14 *
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25 #include "libavutil/common.h"
26 #include "libavutil/cpu.h"
27 #include "libavutil/x86_cpu.h"
28 #include "libavcodec/dsputil.h"
29 #include "libavcodec/cavsdsp.h"
30 #include "dsputil_mmx.h"
31
32 /* in/out: mma=mma+mmb, mmb=mmb-mma */
33 #define SUMSUB_BA( a, b ) \
34 "paddw "#b", "#a" \n\t"\
35 "paddw "#b", "#b" \n\t"\
36 "psubw "#a", "#b" \n\t"
37
38 /*****************************************************************************
39 *
40 * inverse transform
41 *
42 ****************************************************************************/
43
44 static inline void cavs_idct8_1d(int16_t *block, uint64_t bias)
45 {
46 __asm__ volatile(
47 "movq 112(%0), %%mm4 \n\t" /* mm4 = src7 */
48 "movq 16(%0), %%mm5 \n\t" /* mm5 = src1 */
49 "movq 80(%0), %%mm2 \n\t" /* mm2 = src5 */
50 "movq 48(%0), %%mm7 \n\t" /* mm7 = src3 */
51 "movq %%mm4, %%mm0 \n\t"
52 "movq %%mm5, %%mm3 \n\t"
53 "movq %%mm2, %%mm6 \n\t"
54 "movq %%mm7, %%mm1 \n\t"
55
56 "paddw %%mm4, %%mm4 \n\t" /* mm4 = 2*src7 */
57 "paddw %%mm3, %%mm3 \n\t" /* mm3 = 2*src1 */
58 "paddw %%mm6, %%mm6 \n\t" /* mm6 = 2*src5 */
59 "paddw %%mm1, %%mm1 \n\t" /* mm1 = 2*src3 */
60 "paddw %%mm4, %%mm0 \n\t" /* mm0 = 3*src7 */
61 "paddw %%mm3, %%mm5 \n\t" /* mm5 = 3*src1 */
62 "paddw %%mm6, %%mm2 \n\t" /* mm2 = 3*src5 */
63 "paddw %%mm1, %%mm7 \n\t" /* mm7 = 3*src3 */
64 "psubw %%mm4, %%mm5 \n\t" /* mm5 = 3*src1 - 2*src7 = a0 */
65 "paddw %%mm6, %%mm7 \n\t" /* mm7 = 3*src3 + 2*src5 = a1 */
66 "psubw %%mm2, %%mm1 \n\t" /* mm1 = 2*src3 - 3*src5 = a2 */
67 "paddw %%mm0, %%mm3 \n\t" /* mm3 = 2*src1 + 3*src7 = a3 */
68
69 "movq %%mm5, %%mm4 \n\t"
70 "movq %%mm7, %%mm6 \n\t"
71 "movq %%mm3, %%mm0 \n\t"
72 "movq %%mm1, %%mm2 \n\t"
73 SUMSUB_BA( %%mm7, %%mm5 ) /* mm7 = a0 + a1 mm5 = a0 - a1 */
74 "paddw %%mm3, %%mm7 \n\t" /* mm7 = a0 + a1 + a3 */
75 "paddw %%mm1, %%mm5 \n\t" /* mm5 = a0 - a1 + a2 */
76 "paddw %%mm7, %%mm7 \n\t"
77 "paddw %%mm5, %%mm5 \n\t"
78 "paddw %%mm6, %%mm7 \n\t" /* mm7 = b4 */
79 "paddw %%mm4, %%mm5 \n\t" /* mm5 = b5 */
80
81 SUMSUB_BA( %%mm1, %%mm3 ) /* mm1 = a3 + a2 mm3 = a3 - a2 */
82 "psubw %%mm1, %%mm4 \n\t" /* mm4 = a0 - a2 - a3 */
83 "movq %%mm4, %%mm1 \n\t" /* mm1 = a0 - a2 - a3 */
84 "psubw %%mm6, %%mm3 \n\t" /* mm3 = a3 - a2 - a1 */
85 "paddw %%mm1, %%mm1 \n\t"
86 "paddw %%mm3, %%mm3 \n\t"
87 "psubw %%mm2, %%mm1 \n\t" /* mm1 = b7 */
88 "paddw %%mm0, %%mm3 \n\t" /* mm3 = b6 */
89
90 "movq 32(%0), %%mm2 \n\t" /* mm2 = src2 */
91 "movq 96(%0), %%mm6 \n\t" /* mm6 = src6 */
92 "movq %%mm2, %%mm4 \n\t"
93 "movq %%mm6, %%mm0 \n\t"
94 "psllw $2, %%mm4 \n\t" /* mm4 = 4*src2 */
95 "psllw $2, %%mm6 \n\t" /* mm6 = 4*src6 */
96 "paddw %%mm4, %%mm2 \n\t" /* mm2 = 5*src2 */
97 "paddw %%mm6, %%mm0 \n\t" /* mm0 = 5*src6 */
98 "paddw %%mm2, %%mm2 \n\t"
99 "paddw %%mm0, %%mm0 \n\t"
100 "psubw %%mm0, %%mm4 \n\t" /* mm4 = 4*src2 - 10*src6 = a7 */
101 "paddw %%mm2, %%mm6 \n\t" /* mm6 = 4*src6 + 10*src2 = a6 */
102
103 "movq (%0), %%mm2 \n\t" /* mm2 = src0 */
104 "movq 64(%0), %%mm0 \n\t" /* mm0 = src4 */
105 SUMSUB_BA( %%mm0, %%mm2 ) /* mm0 = src0+src4 mm2 = src0-src4 */
106 "psllw $3, %%mm0 \n\t"
107 "psllw $3, %%mm2 \n\t"
108 "paddw %1, %%mm0 \n\t" /* add rounding bias */
109 "paddw %1, %%mm2 \n\t" /* add rounding bias */
110
111 SUMSUB_BA( %%mm6, %%mm0 ) /* mm6 = a4 + a6 mm0 = a4 - a6 */
112 SUMSUB_BA( %%mm4, %%mm2 ) /* mm4 = a5 + a7 mm2 = a5 - a7 */
113 SUMSUB_BA( %%mm7, %%mm6 ) /* mm7 = dst0 mm6 = dst7 */
114 SUMSUB_BA( %%mm5, %%mm4 ) /* mm5 = dst1 mm4 = dst6 */
115 SUMSUB_BA( %%mm3, %%mm2 ) /* mm3 = dst2 mm2 = dst5 */
116 SUMSUB_BA( %%mm1, %%mm0 ) /* mm1 = dst3 mm0 = dst4 */
117 :: "r"(block), "m"(bias)
118 );
119 }
120
121 static void cavs_idct8_add_mmx(uint8_t *dst, int16_t *block, int stride)
122 {
123 int i;
124 DECLARE_ALIGNED(8, int16_t, b2)[64];
125
126 for(i=0; i<2; i++){
127 DECLARE_ALIGNED(8, uint64_t, tmp);
128
129 cavs_idct8_1d(block+4*i, ff_pw_4.a);
130
131 __asm__ volatile(
132 "psraw $3, %%mm7 \n\t"
133 "psraw $3, %%mm6 \n\t"
134 "psraw $3, %%mm5 \n\t"
135 "psraw $3, %%mm4 \n\t"
136 "psraw $3, %%mm3 \n\t"
137 "psraw $3, %%mm2 \n\t"
138 "psraw $3, %%mm1 \n\t"
139 "psraw $3, %%mm0 \n\t"
140 "movq %%mm7, %0 \n\t"
141 TRANSPOSE4( %%mm0, %%mm2, %%mm4, %%mm6, %%mm7 )
142 "movq %%mm0, 8(%1) \n\t"
143 "movq %%mm6, 24(%1) \n\t"
144 "movq %%mm7, 40(%1) \n\t"
145 "movq %%mm4, 56(%1) \n\t"
146 "movq %0, %%mm7 \n\t"
147 TRANSPOSE4( %%mm7, %%mm5, %%mm3, %%mm1, %%mm0 )
148 "movq %%mm7, (%1) \n\t"
149 "movq %%mm1, 16(%1) \n\t"
150 "movq %%mm0, 32(%1) \n\t"
151 "movq %%mm3, 48(%1) \n\t"
152 : "=m"(tmp)
153 : "r"(b2+32*i)
154 : "memory"
155 );
156 }
157
158 for(i=0; i<2; i++){
159 cavs_idct8_1d(b2+4*i, ff_pw_64.a);
160
161 __asm__ volatile(
162 "psraw $7, %%mm7 \n\t"
163 "psraw $7, %%mm6 \n\t"
164 "psraw $7, %%mm5 \n\t"
165 "psraw $7, %%mm4 \n\t"
166 "psraw $7, %%mm3 \n\t"
167 "psraw $7, %%mm2 \n\t"
168 "psraw $7, %%mm1 \n\t"
169 "psraw $7, %%mm0 \n\t"
170 "movq %%mm7, (%0) \n\t"
171 "movq %%mm5, 16(%0) \n\t"
172 "movq %%mm3, 32(%0) \n\t"
173 "movq %%mm1, 48(%0) \n\t"
174 "movq %%mm0, 64(%0) \n\t"
175 "movq %%mm2, 80(%0) \n\t"
176 "movq %%mm4, 96(%0) \n\t"
177 "movq %%mm6, 112(%0) \n\t"
178 :: "r"(b2+4*i)
179 : "memory"
180 );
181 }
182
183 ff_add_pixels_clamped_mmx(b2, dst, stride);
184 }
185
186 /*****************************************************************************
187 *
188 * motion compensation
189 *
190 ****************************************************************************/
191
192 /* vertical filter [-1 -2 96 42 -7 0] */
193 #define QPEL_CAVSV1(A,B,C,D,E,F,OP,MUL2) \
194 "movd (%0), "#F" \n\t"\
195 "movq "#C", %%mm6 \n\t"\
196 "pmullw %5, %%mm6 \n\t"\
197 "movq "#D", %%mm7 \n\t"\
198 "pmullw "MANGLE(MUL2)", %%mm7\n\t"\
199 "psllw $3, "#E" \n\t"\
200 "psubw "#E", %%mm6 \n\t"\
201 "psraw $3, "#E" \n\t"\
202 "paddw %%mm7, %%mm6 \n\t"\
203 "paddw "#E", %%mm6 \n\t"\
204 "paddw "#B", "#B" \n\t"\
205 "pxor %%mm7, %%mm7 \n\t"\
206 "add %2, %0 \n\t"\
207 "punpcklbw %%mm7, "#F" \n\t"\
208 "psubw "#B", %%mm6 \n\t"\
209 "psraw $1, "#B" \n\t"\
210 "psubw "#A", %%mm6 \n\t"\
211 "paddw %4, %%mm6 \n\t"\
212 "psraw $7, %%mm6 \n\t"\
213 "packuswb %%mm6, %%mm6 \n\t"\
214 OP(%%mm6, (%1), A, d) \
215 "add %3, %1 \n\t"
216
217 /* vertical filter [ 0 -1 5 5 -1 0] */
218 #define QPEL_CAVSV2(A,B,C,D,E,F,OP,MUL2) \
219 "movd (%0), "#F" \n\t"\
220 "movq "#C", %%mm6 \n\t"\
221 "paddw "#D", %%mm6 \n\t"\
222 "pmullw %5, %%mm6 \n\t"\
223 "add %2, %0 \n\t"\
224 "punpcklbw %%mm7, "#F" \n\t"\
225 "psubw "#B", %%mm6 \n\t"\
226 "psubw "#E", %%mm6 \n\t"\
227 "paddw %4, %%mm6 \n\t"\
228 "psraw $3, %%mm6 \n\t"\
229 "packuswb %%mm6, %%mm6 \n\t"\
230 OP(%%mm6, (%1), A, d) \
231 "add %3, %1 \n\t"
232
233 /* vertical filter [ 0 -7 42 96 -2 -1] */
234 #define QPEL_CAVSV3(A,B,C,D,E,F,OP,MUL2) \
235 "movd (%0), "#F" \n\t"\
236 "movq "#C", %%mm6 \n\t"\
237 "pmullw "MANGLE(MUL2)", %%mm6\n\t"\
238 "movq "#D", %%mm7 \n\t"\
239 "pmullw %5, %%mm7 \n\t"\
240 "psllw $3, "#B" \n\t"\
241 "psubw "#B", %%mm6 \n\t"\
242 "psraw $3, "#B" \n\t"\
243 "paddw %%mm7, %%mm6 \n\t"\
244 "paddw "#B", %%mm6 \n\t"\
245 "paddw "#E", "#E" \n\t"\
246 "pxor %%mm7, %%mm7 \n\t"\
247 "add %2, %0 \n\t"\
248 "punpcklbw %%mm7, "#F" \n\t"\
249 "psubw "#E", %%mm6 \n\t"\
250 "psraw $1, "#E" \n\t"\
251 "psubw "#F", %%mm6 \n\t"\
252 "paddw %4, %%mm6 \n\t"\
253 "psraw $7, %%mm6 \n\t"\
254 "packuswb %%mm6, %%mm6 \n\t"\
255 OP(%%mm6, (%1), A, d) \
256 "add %3, %1 \n\t"
257
258
259 #define QPEL_CAVSVNUM(VOP,OP,ADD,MUL1,MUL2)\
260 int w= 2;\
261 src -= 2*srcStride;\
262 \
263 while(w--){\
264 __asm__ volatile(\
265 "pxor %%mm7, %%mm7 \n\t"\
266 "movd (%0), %%mm0 \n\t"\
267 "add %2, %0 \n\t"\
268 "movd (%0), %%mm1 \n\t"\
269 "add %2, %0 \n\t"\
270 "movd (%0), %%mm2 \n\t"\
271 "add %2, %0 \n\t"\
272 "movd (%0), %%mm3 \n\t"\
273 "add %2, %0 \n\t"\
274 "movd (%0), %%mm4 \n\t"\
275 "add %2, %0 \n\t"\
276 "punpcklbw %%mm7, %%mm0 \n\t"\
277 "punpcklbw %%mm7, %%mm1 \n\t"\
278 "punpcklbw %%mm7, %%mm2 \n\t"\
279 "punpcklbw %%mm7, %%mm3 \n\t"\
280 "punpcklbw %%mm7, %%mm4 \n\t"\
281 VOP(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP, MUL2)\
282 VOP(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP, MUL2)\
283 VOP(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP, MUL2)\
284 VOP(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP, MUL2)\
285 VOP(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP, MUL2)\
286 VOP(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP, MUL2)\
287 VOP(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP, MUL2)\
288 VOP(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP, MUL2)\
289 \
290 : "+a"(src), "+c"(dst)\
291 : "S"((x86_reg)srcStride), "r"((x86_reg)dstStride), "m"(ADD), "m"(MUL1)\
292 : "memory"\
293 );\
294 if(h==16){\
295 __asm__ volatile(\
296 VOP(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP, MUL2)\
297 VOP(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP, MUL2)\
298 VOP(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP, MUL2)\
299 VOP(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP, MUL2)\
300 VOP(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP, MUL2)\
301 VOP(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP, MUL2)\
302 VOP(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP, MUL2)\
303 VOP(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP, MUL2)\
304 \
305 : "+a"(src), "+c"(dst)\
306 : "S"((x86_reg)srcStride), "r"((x86_reg)dstStride), "m"(ADD), "m"(MUL1)\
307 : "memory"\
308 );\
309 }\
310 src += 4-(h+5)*srcStride;\
311 dst += 4-h*dstStride;\
312 }
313
314 #define QPEL_CAVS(OPNAME, OP, MMX)\
315 static void OPNAME ## cavs_qpel8_h_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
316 int h=8;\
317 __asm__ volatile(\
318 "pxor %%mm7, %%mm7 \n\t"\
319 "movq %5, %%mm6 \n\t"\
320 "1: \n\t"\
321 "movq (%0), %%mm0 \n\t"\
322 "movq 1(%0), %%mm2 \n\t"\
323 "movq %%mm0, %%mm1 \n\t"\
324 "movq %%mm2, %%mm3 \n\t"\
325 "punpcklbw %%mm7, %%mm0 \n\t"\
326 "punpckhbw %%mm7, %%mm1 \n\t"\
327 "punpcklbw %%mm7, %%mm2 \n\t"\
328 "punpckhbw %%mm7, %%mm3 \n\t"\
329 "paddw %%mm2, %%mm0 \n\t"\
330 "paddw %%mm3, %%mm1 \n\t"\
331 "pmullw %%mm6, %%mm0 \n\t"\
332 "pmullw %%mm6, %%mm1 \n\t"\
333 "movq -1(%0), %%mm2 \n\t"\
334 "movq 2(%0), %%mm4 \n\t"\
335 "movq %%mm2, %%mm3 \n\t"\
336 "movq %%mm4, %%mm5 \n\t"\
337 "punpcklbw %%mm7, %%mm2 \n\t"\
338 "punpckhbw %%mm7, %%mm3 \n\t"\
339 "punpcklbw %%mm7, %%mm4 \n\t"\
340 "punpckhbw %%mm7, %%mm5 \n\t"\
341 "paddw %%mm4, %%mm2 \n\t"\
342 "paddw %%mm3, %%mm5 \n\t"\
343 "psubw %%mm2, %%mm0 \n\t"\
344 "psubw %%mm5, %%mm1 \n\t"\
345 "movq %6, %%mm5 \n\t"\
346 "paddw %%mm5, %%mm0 \n\t"\
347 "paddw %%mm5, %%mm1 \n\t"\
348 "psraw $3, %%mm0 \n\t"\
349 "psraw $3, %%mm1 \n\t"\
350 "packuswb %%mm1, %%mm0 \n\t"\
351 OP(%%mm0, (%1),%%mm5, q) \
352 "add %3, %0 \n\t"\
353 "add %4, %1 \n\t"\
354 "decl %2 \n\t"\
355 " jnz 1b \n\t"\
356 : "+a"(src), "+c"(dst), "+m"(h)\
357 : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride), "m"(ff_pw_5), "m"(ff_pw_4)\
358 : "memory"\
359 );\
360 }\
361 \
362 static inline void OPNAME ## cavs_qpel8or16_v1_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
363 QPEL_CAVSVNUM(QPEL_CAVSV1,OP,ff_pw_64,ff_pw_96,ff_pw_42) \
364 }\
365 \
366 static inline void OPNAME ## cavs_qpel8or16_v2_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
367 QPEL_CAVSVNUM(QPEL_CAVSV2,OP,ff_pw_4,ff_pw_5,ff_pw_5) \
368 }\
369 \
370 static inline void OPNAME ## cavs_qpel8or16_v3_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
371 QPEL_CAVSVNUM(QPEL_CAVSV3,OP,ff_pw_64,ff_pw_96,ff_pw_42) \
372 }\
373 \
374 static void OPNAME ## cavs_qpel8_v1_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
375 OPNAME ## cavs_qpel8or16_v1_ ## MMX(dst , src , dstStride, srcStride, 8);\
376 }\
377 static void OPNAME ## cavs_qpel16_v1_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
378 OPNAME ## cavs_qpel8or16_v1_ ## MMX(dst , src , dstStride, srcStride, 16);\
379 OPNAME ## cavs_qpel8or16_v1_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\
380 }\
381 \
382 static void OPNAME ## cavs_qpel8_v2_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
383 OPNAME ## cavs_qpel8or16_v2_ ## MMX(dst , src , dstStride, srcStride, 8);\
384 }\
385 static void OPNAME ## cavs_qpel16_v2_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
386 OPNAME ## cavs_qpel8or16_v2_ ## MMX(dst , src , dstStride, srcStride, 16);\
387 OPNAME ## cavs_qpel8or16_v2_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\
388 }\
389 \
390 static void OPNAME ## cavs_qpel8_v3_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
391 OPNAME ## cavs_qpel8or16_v3_ ## MMX(dst , src , dstStride, srcStride, 8);\
392 }\
393 static void OPNAME ## cavs_qpel16_v3_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
394 OPNAME ## cavs_qpel8or16_v3_ ## MMX(dst , src , dstStride, srcStride, 16);\
395 OPNAME ## cavs_qpel8or16_v3_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\
396 }\
397 \
398 static void OPNAME ## cavs_qpel16_h_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
399 OPNAME ## cavs_qpel8_h_ ## MMX(dst , src , dstStride, srcStride);\
400 OPNAME ## cavs_qpel8_h_ ## MMX(dst+8, src+8, dstStride, srcStride);\
401 src += 8*srcStride;\
402 dst += 8*dstStride;\
403 OPNAME ## cavs_qpel8_h_ ## MMX(dst , src , dstStride, srcStride);\
404 OPNAME ## cavs_qpel8_h_ ## MMX(dst+8, src+8, dstStride, srcStride);\
405 }\
406
407 #define CAVS_MC(OPNAME, SIZE, MMX) \
408 static void ff_ ## OPNAME ## cavs_qpel ## SIZE ## _mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
409 OPNAME ## cavs_qpel ## SIZE ## _h_ ## MMX(dst, src, stride, stride);\
410 }\
411 \
412 static void ff_ ## OPNAME ## cavs_qpel ## SIZE ## _mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
413 OPNAME ## cavs_qpel ## SIZE ## _v1_ ## MMX(dst, src, stride, stride);\
414 }\
415 \
416 static void ff_ ## OPNAME ## cavs_qpel ## SIZE ## _mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
417 OPNAME ## cavs_qpel ## SIZE ## _v2_ ## MMX(dst, src, stride, stride);\
418 }\
419 \
420 static void ff_ ## OPNAME ## cavs_qpel ## SIZE ## _mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
421 OPNAME ## cavs_qpel ## SIZE ## _v3_ ## MMX(dst, src, stride, stride);\
422 }\
423
424 #define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b " \n\t"
425 #define AVG_3DNOW_OP(a,b,temp, size) \
426 "mov" #size " " #b ", " #temp " \n\t"\
427 "pavgusb " #temp ", " #a " \n\t"\
428 "mov" #size " " #a ", " #b " \n\t"
429 #define AVG_MMX2_OP(a,b,temp, size) \
430 "mov" #size " " #b ", " #temp " \n\t"\
431 "pavgb " #temp ", " #a " \n\t"\
432 "mov" #size " " #a ", " #b " \n\t"
433
434 QPEL_CAVS(put_, PUT_OP, 3dnow)
435 QPEL_CAVS(avg_, AVG_3DNOW_OP, 3dnow)
436 QPEL_CAVS(put_, PUT_OP, mmx2)
437 QPEL_CAVS(avg_, AVG_MMX2_OP, mmx2)
438
439 CAVS_MC(put_, 8, 3dnow)
440 CAVS_MC(put_, 16,3dnow)
441 CAVS_MC(avg_, 8, 3dnow)
442 CAVS_MC(avg_, 16,3dnow)
443 CAVS_MC(put_, 8, mmx2)
444 CAVS_MC(put_, 16,mmx2)
445 CAVS_MC(avg_, 8, mmx2)
446 CAVS_MC(avg_, 16,mmx2)
447
448 static void ff_cavsdsp_init_mmx2(CAVSDSPContext* c, AVCodecContext *avctx) {
449 #define dspfunc(PFX, IDX, NUM) \
450 c->PFX ## _pixels_tab[IDX][ 0] = ff_ ## PFX ## NUM ## _mc00_mmx2; \
451 c->PFX ## _pixels_tab[IDX][ 2] = ff_ ## PFX ## NUM ## _mc20_mmx2; \
452 c->PFX ## _pixels_tab[IDX][ 4] = ff_ ## PFX ## NUM ## _mc01_mmx2; \
453 c->PFX ## _pixels_tab[IDX][ 8] = ff_ ## PFX ## NUM ## _mc02_mmx2; \
454 c->PFX ## _pixels_tab[IDX][12] = ff_ ## PFX ## NUM ## _mc03_mmx2; \
455
456 dspfunc(put_cavs_qpel, 0, 16);
457 dspfunc(put_cavs_qpel, 1, 8);
458 dspfunc(avg_cavs_qpel, 0, 16);
459 dspfunc(avg_cavs_qpel, 1, 8);
460 #undef dspfunc
461 c->cavs_idct8_add = cavs_idct8_add_mmx;
462 }
463
464 static void ff_cavsdsp_init_3dnow(CAVSDSPContext* c, AVCodecContext *avctx) {
465 #define dspfunc(PFX, IDX, NUM) \
466 c->PFX ## _pixels_tab[IDX][ 0] = ff_ ## PFX ## NUM ## _mc00_mmx2; \
467 c->PFX ## _pixels_tab[IDX][ 2] = ff_ ## PFX ## NUM ## _mc20_3dnow; \
468 c->PFX ## _pixels_tab[IDX][ 4] = ff_ ## PFX ## NUM ## _mc01_3dnow; \
469 c->PFX ## _pixels_tab[IDX][ 8] = ff_ ## PFX ## NUM ## _mc02_3dnow; \
470 c->PFX ## _pixels_tab[IDX][12] = ff_ ## PFX ## NUM ## _mc03_3dnow; \
471
472 dspfunc(put_cavs_qpel, 0, 16);
473 dspfunc(put_cavs_qpel, 1, 8);
474 dspfunc(avg_cavs_qpel, 0, 16);
475 dspfunc(avg_cavs_qpel, 1, 8);
476 #undef dspfunc
477 c->cavs_idct8_add = cavs_idct8_add_mmx;
478 }
479
480 void ff_cavsdsp_init_mmx(CAVSDSPContext *c, AVCodecContext *avctx)
481 {
482 int mm_flags = av_get_cpu_flags();
483
484 if (mm_flags & AV_CPU_FLAG_MMX2) ff_cavsdsp_init_mmx2 (c, avctx);
485 if (mm_flags & AV_CPU_FLAG_3DNOW) ff_cavsdsp_init_3dnow(c, avctx);
486 }