434d1859ecff0f1838106c2b7d557e3c7e50f6b4
[libav.git] / libavcodec / x86 / dsputil_mmx.c
1 /*
2 * MMX optimized DSP utils
3 * Copyright (c) 2000, 2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5 *
6 * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
7 *
8 * This file is part of Libav.
9 *
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
14 *
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25 #include "libavutil/cpu.h"
26 #include "libavutil/x86_cpu.h"
27 #include "libavcodec/dsputil.h"
28 #include "libavcodec/h264dsp.h"
29 #include "libavcodec/mpegvideo.h"
30 #include "libavcodec/simple_idct.h"
31 #include "libavcodec/ac3dec.h"
32 #include "dsputil_mmx.h"
33 #include "idct_xvid.h"
34
35 //#undef NDEBUG
36 //#include <assert.h>
37
38 /* pixel operations */
39 DECLARE_ALIGNED(8, const uint64_t, ff_bone) = 0x0101010101010101ULL;
40 DECLARE_ALIGNED(8, const uint64_t, ff_wtwo) = 0x0002000200020002ULL;
41
42 DECLARE_ALIGNED(16, const uint64_t, ff_pdw_80000000)[2] =
43 { 0x8000000080000000ULL, 0x8000000080000000ULL };
44
45 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_1) = { 0x0001000100010001ULL, 0x0001000100010001ULL };
46 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_2) = { 0x0002000200020002ULL, 0x0002000200020002ULL };
47 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_3) = { 0x0003000300030003ULL, 0x0003000300030003ULL };
48 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_4) = { 0x0004000400040004ULL, 0x0004000400040004ULL };
49 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_5) = { 0x0005000500050005ULL, 0x0005000500050005ULL };
50 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_8) = { 0x0008000800080008ULL, 0x0008000800080008ULL };
51 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_9) = { 0x0009000900090009ULL, 0x0009000900090009ULL };
52 DECLARE_ALIGNED(8, const uint64_t, ff_pw_15) = 0x000F000F000F000FULL;
53 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_16) = { 0x0010001000100010ULL, 0x0010001000100010ULL };
54 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_17) = { 0x0011001100110011ULL, 0x0011001100110011ULL };
55 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_18) = { 0x0012001200120012ULL, 0x0012001200120012ULL };
56 DECLARE_ALIGNED(8, const uint64_t, ff_pw_20) = 0x0014001400140014ULL;
57 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_27) = { 0x001B001B001B001BULL, 0x001B001B001B001BULL };
58 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_28) = { 0x001C001C001C001CULL, 0x001C001C001C001CULL };
59 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_32) = { 0x0020002000200020ULL, 0x0020002000200020ULL };
60 DECLARE_ALIGNED(8, const uint64_t, ff_pw_42) = 0x002A002A002A002AULL;
61 DECLARE_ALIGNED(8, const uint64_t, ff_pw_53) = 0x0035003500350035ULL;
62 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_63) = { 0x003F003F003F003FULL, 0x003F003F003F003FULL };
63 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_64) = { 0x0040004000400040ULL, 0x0040004000400040ULL };
64 DECLARE_ALIGNED(8, const uint64_t, ff_pw_96) = 0x0060006000600060ULL;
65 DECLARE_ALIGNED(8, const uint64_t, ff_pw_128) = 0x0080008000800080ULL;
66 DECLARE_ALIGNED(8, const uint64_t, ff_pw_255) = 0x00ff00ff00ff00ffULL;
67 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_512) = { 0x0200020002000200ULL, 0x0200020002000200ULL };
68 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_1019) = { 0x03FB03FB03FB03FBULL, 0x03FB03FB03FB03FBULL };
69
70 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_0) = { 0x0000000000000000ULL, 0x0000000000000000ULL };
71 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_1) = { 0x0101010101010101ULL, 0x0101010101010101ULL };
72 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_3) = { 0x0303030303030303ULL, 0x0303030303030303ULL };
73 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_4) = { 0x0404040404040404ULL, 0x0404040404040404ULL };
74 DECLARE_ALIGNED(8, const uint64_t, ff_pb_7) = 0x0707070707070707ULL;
75 DECLARE_ALIGNED(8, const uint64_t, ff_pb_1F) = 0x1F1F1F1F1F1F1F1FULL;
76 DECLARE_ALIGNED(8, const uint64_t, ff_pb_3F) = 0x3F3F3F3F3F3F3F3FULL;
77 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_80) = { 0x8080808080808080ULL, 0x8080808080808080ULL };
78 DECLARE_ALIGNED(8, const uint64_t, ff_pb_81) = 0x8181818181818181ULL;
79 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_A1) = { 0xA1A1A1A1A1A1A1A1ULL, 0xA1A1A1A1A1A1A1A1ULL };
80 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_F8) = { 0xF8F8F8F8F8F8F8F8ULL, 0xF8F8F8F8F8F8F8F8ULL };
81 DECLARE_ALIGNED(8, const uint64_t, ff_pb_FC) = 0xFCFCFCFCFCFCFCFCULL;
82 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_FE) = { 0xFEFEFEFEFEFEFEFEULL, 0xFEFEFEFEFEFEFEFEULL };
83
84 DECLARE_ALIGNED(16, const double, ff_pd_1)[2] = { 1.0, 1.0 };
85 DECLARE_ALIGNED(16, const double, ff_pd_2)[2] = { 2.0, 2.0 };
86
87 #define JUMPALIGN() __asm__ volatile (".p2align 3"::)
88 #define MOVQ_ZERO(regd) __asm__ volatile ("pxor %%"#regd", %%"#regd ::)
89
90 #define MOVQ_BFE(regd) \
91 __asm__ volatile ( \
92 "pcmpeqd %%"#regd", %%"#regd" \n\t" \
93 "paddb %%"#regd", %%"#regd" \n\t" ::)
94
95 #ifndef PIC
96 #define MOVQ_BONE(regd) __asm__ volatile ("movq %0, %%"#regd" \n\t" :: "m"(ff_bone))
97 #define MOVQ_WTWO(regd) __asm__ volatile ("movq %0, %%"#regd" \n\t" :: "m"(ff_wtwo))
98 #else
99 // for shared library it's better to use this way for accessing constants
100 // pcmpeqd -> -1
101 #define MOVQ_BONE(regd) \
102 __asm__ volatile ( \
103 "pcmpeqd %%"#regd", %%"#regd" \n\t" \
104 "psrlw $15, %%"#regd" \n\t" \
105 "packuswb %%"#regd", %%"#regd" \n\t" ::)
106
107 #define MOVQ_WTWO(regd) \
108 __asm__ volatile ( \
109 "pcmpeqd %%"#regd", %%"#regd" \n\t" \
110 "psrlw $15, %%"#regd" \n\t" \
111 "psllw $1, %%"#regd" \n\t"::)
112
113 #endif
114
115 // using regr as temporary and for the output result
116 // first argument is unmodifed and second is trashed
117 // regfe is supposed to contain 0xfefefefefefefefe
118 #define PAVGB_MMX_NO_RND(rega, regb, regr, regfe) \
119 "movq "#rega", "#regr" \n\t" \
120 "pand "#regb", "#regr" \n\t" \
121 "pxor "#rega", "#regb" \n\t" \
122 "pand "#regfe", "#regb" \n\t" \
123 "psrlq $1, "#regb" \n\t" \
124 "paddb "#regb", "#regr" \n\t"
125
126 #define PAVGB_MMX(rega, regb, regr, regfe) \
127 "movq "#rega", "#regr" \n\t" \
128 "por "#regb", "#regr" \n\t" \
129 "pxor "#rega", "#regb" \n\t" \
130 "pand "#regfe", "#regb" \n\t" \
131 "psrlq $1, "#regb" \n\t" \
132 "psubb "#regb", "#regr" \n\t"
133
134 // mm6 is supposed to contain 0xfefefefefefefefe
135 #define PAVGBP_MMX_NO_RND(rega, regb, regr, regc, regd, regp) \
136 "movq "#rega", "#regr" \n\t" \
137 "movq "#regc", "#regp" \n\t" \
138 "pand "#regb", "#regr" \n\t" \
139 "pand "#regd", "#regp" \n\t" \
140 "pxor "#rega", "#regb" \n\t" \
141 "pxor "#regc", "#regd" \n\t" \
142 "pand %%mm6, "#regb" \n\t" \
143 "pand %%mm6, "#regd" \n\t" \
144 "psrlq $1, "#regb" \n\t" \
145 "psrlq $1, "#regd" \n\t" \
146 "paddb "#regb", "#regr" \n\t" \
147 "paddb "#regd", "#regp" \n\t"
148
149 #define PAVGBP_MMX(rega, regb, regr, regc, regd, regp) \
150 "movq "#rega", "#regr" \n\t" \
151 "movq "#regc", "#regp" \n\t" \
152 "por "#regb", "#regr" \n\t" \
153 "por "#regd", "#regp" \n\t" \
154 "pxor "#rega", "#regb" \n\t" \
155 "pxor "#regc", "#regd" \n\t" \
156 "pand %%mm6, "#regb" \n\t" \
157 "pand %%mm6, "#regd" \n\t" \
158 "psrlq $1, "#regd" \n\t" \
159 "psrlq $1, "#regb" \n\t" \
160 "psubb "#regb", "#regr" \n\t" \
161 "psubb "#regd", "#regp" \n\t"
162
163 /***********************************/
164 /* MMX no rounding */
165 #define DEF(x, y) x ## _no_rnd_ ## y ## _mmx
166 #define SET_RND MOVQ_WONE
167 #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX_NO_RND(a, b, c, d, e, f)
168 #define PAVGB(a, b, c, e) PAVGB_MMX_NO_RND(a, b, c, e)
169 #define OP_AVG(a, b, c, e) PAVGB_MMX(a, b, c, e)
170
171 #include "dsputil_mmx_rnd_template.c"
172
173 #undef DEF
174 #undef SET_RND
175 #undef PAVGBP
176 #undef PAVGB
177 /***********************************/
178 /* MMX rounding */
179
180 #define DEF(x, y) x ## _ ## y ## _mmx
181 #define SET_RND MOVQ_WTWO
182 #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX(a, b, c, d, e, f)
183 #define PAVGB(a, b, c, e) PAVGB_MMX(a, b, c, e)
184
185 #include "dsputil_mmx_rnd_template.c"
186
187 #undef DEF
188 #undef SET_RND
189 #undef PAVGBP
190 #undef PAVGB
191 #undef OP_AVG
192
193 /***********************************/
194 /* 3Dnow specific */
195
196 #define DEF(x) x ## _3dnow
197 #define PAVGB "pavgusb"
198 #define OP_AVG PAVGB
199
200 #include "dsputil_mmx_avg_template.c"
201
202 #undef DEF
203 #undef PAVGB
204 #undef OP_AVG
205
206 /***********************************/
207 /* MMX2 specific */
208
209 #define DEF(x) x ## _mmx2
210
211 /* Introduced only in MMX2 set */
212 #define PAVGB "pavgb"
213 #define OP_AVG PAVGB
214
215 #include "dsputil_mmx_avg_template.c"
216
217 #undef DEF
218 #undef PAVGB
219 #undef OP_AVG
220
221 #define put_no_rnd_pixels16_mmx put_pixels16_mmx
222 #define put_no_rnd_pixels8_mmx put_pixels8_mmx
223 #define put_pixels16_mmx2 put_pixels16_mmx
224 #define put_pixels8_mmx2 put_pixels8_mmx
225 #define put_pixels4_mmx2 put_pixels4_mmx
226 #define put_no_rnd_pixels16_mmx2 put_no_rnd_pixels16_mmx
227 #define put_no_rnd_pixels8_mmx2 put_no_rnd_pixels8_mmx
228 #define put_pixels16_3dnow put_pixels16_mmx
229 #define put_pixels8_3dnow put_pixels8_mmx
230 #define put_pixels4_3dnow put_pixels4_mmx
231 #define put_no_rnd_pixels16_3dnow put_no_rnd_pixels16_mmx
232 #define put_no_rnd_pixels8_3dnow put_no_rnd_pixels8_mmx
233
234 /***********************************/
235 /* standard MMX */
236
237 void ff_put_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels,
238 int line_size)
239 {
240 const DCTELEM *p;
241 uint8_t *pix;
242
243 /* read the pixels */
244 p = block;
245 pix = pixels;
246 /* unrolled loop */
247 __asm__ volatile (
248 "movq %3, %%mm0 \n\t"
249 "movq 8%3, %%mm1 \n\t"
250 "movq 16%3, %%mm2 \n\t"
251 "movq 24%3, %%mm3 \n\t"
252 "movq 32%3, %%mm4 \n\t"
253 "movq 40%3, %%mm5 \n\t"
254 "movq 48%3, %%mm6 \n\t"
255 "movq 56%3, %%mm7 \n\t"
256 "packuswb %%mm1, %%mm0 \n\t"
257 "packuswb %%mm3, %%mm2 \n\t"
258 "packuswb %%mm5, %%mm4 \n\t"
259 "packuswb %%mm7, %%mm6 \n\t"
260 "movq %%mm0, (%0) \n\t"
261 "movq %%mm2, (%0, %1) \n\t"
262 "movq %%mm4, (%0, %1, 2) \n\t"
263 "movq %%mm6, (%0, %2) \n\t"
264 :: "r"(pix), "r"((x86_reg)line_size), "r"((x86_reg)line_size * 3),
265 "m"(*p)
266 : "memory");
267 pix += line_size * 4;
268 p += 32;
269
270 // if here would be an exact copy of the code above
271 // compiler would generate some very strange code
272 // thus using "r"
273 __asm__ volatile (
274 "movq (%3), %%mm0 \n\t"
275 "movq 8(%3), %%mm1 \n\t"
276 "movq 16(%3), %%mm2 \n\t"
277 "movq 24(%3), %%mm3 \n\t"
278 "movq 32(%3), %%mm4 \n\t"
279 "movq 40(%3), %%mm5 \n\t"
280 "movq 48(%3), %%mm6 \n\t"
281 "movq 56(%3), %%mm7 \n\t"
282 "packuswb %%mm1, %%mm0 \n\t"
283 "packuswb %%mm3, %%mm2 \n\t"
284 "packuswb %%mm5, %%mm4 \n\t"
285 "packuswb %%mm7, %%mm6 \n\t"
286 "movq %%mm0, (%0) \n\t"
287 "movq %%mm2, (%0, %1) \n\t"
288 "movq %%mm4, (%0, %1, 2) \n\t"
289 "movq %%mm6, (%0, %2) \n\t"
290 :: "r"(pix), "r"((x86_reg)line_size), "r"((x86_reg)line_size * 3), "r"(p)
291 : "memory");
292 }
293
294 #define put_signed_pixels_clamped_mmx_half(off) \
295 "movq "#off"(%2), %%mm1 \n\t" \
296 "movq 16 + "#off"(%2), %%mm2 \n\t" \
297 "movq 32 + "#off"(%2), %%mm3 \n\t" \
298 "movq 48 + "#off"(%2), %%mm4 \n\t" \
299 "packsswb 8 + "#off"(%2), %%mm1 \n\t" \
300 "packsswb 24 + "#off"(%2), %%mm2 \n\t" \
301 "packsswb 40 + "#off"(%2), %%mm3 \n\t" \
302 "packsswb 56 + "#off"(%2), %%mm4 \n\t" \
303 "paddb %%mm0, %%mm1 \n\t" \
304 "paddb %%mm0, %%mm2 \n\t" \
305 "paddb %%mm0, %%mm3 \n\t" \
306 "paddb %%mm0, %%mm4 \n\t" \
307 "movq %%mm1, (%0) \n\t" \
308 "movq %%mm2, (%0, %3) \n\t" \
309 "movq %%mm3, (%0, %3, 2) \n\t" \
310 "movq %%mm4, (%0, %1) \n\t"
311
312 void ff_put_signed_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels,
313 int line_size)
314 {
315 x86_reg line_skip = line_size;
316 x86_reg line_skip3;
317
318 __asm__ volatile (
319 "movq "MANGLE(ff_pb_80)", %%mm0 \n\t"
320 "lea (%3, %3, 2), %1 \n\t"
321 put_signed_pixels_clamped_mmx_half(0)
322 "lea (%0, %3, 4), %0 \n\t"
323 put_signed_pixels_clamped_mmx_half(64)
324 : "+&r"(pixels), "=&r"(line_skip3)
325 : "r"(block), "r"(line_skip)
326 : "memory");
327 }
328
329 void ff_add_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels,
330 int line_size)
331 {
332 const DCTELEM *p;
333 uint8_t *pix;
334 int i;
335
336 /* read the pixels */
337 p = block;
338 pix = pixels;
339 MOVQ_ZERO(mm7);
340 i = 4;
341 do {
342 __asm__ volatile (
343 "movq (%2), %%mm0 \n\t"
344 "movq 8(%2), %%mm1 \n\t"
345 "movq 16(%2), %%mm2 \n\t"
346 "movq 24(%2), %%mm3 \n\t"
347 "movq %0, %%mm4 \n\t"
348 "movq %1, %%mm6 \n\t"
349 "movq %%mm4, %%mm5 \n\t"
350 "punpcklbw %%mm7, %%mm4 \n\t"
351 "punpckhbw %%mm7, %%mm5 \n\t"
352 "paddsw %%mm4, %%mm0 \n\t"
353 "paddsw %%mm5, %%mm1 \n\t"
354 "movq %%mm6, %%mm5 \n\t"
355 "punpcklbw %%mm7, %%mm6 \n\t"
356 "punpckhbw %%mm7, %%mm5 \n\t"
357 "paddsw %%mm6, %%mm2 \n\t"
358 "paddsw %%mm5, %%mm3 \n\t"
359 "packuswb %%mm1, %%mm0 \n\t"
360 "packuswb %%mm3, %%mm2 \n\t"
361 "movq %%mm0, %0 \n\t"
362 "movq %%mm2, %1 \n\t"
363 : "+m"(*pix), "+m"(*(pix + line_size))
364 : "r"(p)
365 : "memory");
366 pix += line_size * 2;
367 p += 16;
368 } while (--i);
369 }
370
371 static void put_pixels4_mmx(uint8_t *block, const uint8_t *pixels,
372 int line_size, int h)
373 {
374 __asm__ volatile (
375 "lea (%3, %3), %%"REG_a" \n\t"
376 ".p2align 3 \n\t"
377 "1: \n\t"
378 "movd (%1 ), %%mm0 \n\t"
379 "movd (%1, %3), %%mm1 \n\t"
380 "movd %%mm0, (%2) \n\t"
381 "movd %%mm1, (%2, %3) \n\t"
382 "add %%"REG_a", %1 \n\t"
383 "add %%"REG_a", %2 \n\t"
384 "movd (%1 ), %%mm0 \n\t"
385 "movd (%1, %3), %%mm1 \n\t"
386 "movd %%mm0, (%2) \n\t"
387 "movd %%mm1, (%2, %3) \n\t"
388 "add %%"REG_a", %1 \n\t"
389 "add %%"REG_a", %2 \n\t"
390 "subl $4, %0 \n\t"
391 "jnz 1b \n\t"
392 : "+g"(h), "+r"(pixels), "+r"(block)
393 : "r"((x86_reg)line_size)
394 : "%"REG_a, "memory"
395 );
396 }
397
398 static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels,
399 int line_size, int h)
400 {
401 __asm__ volatile (
402 "lea (%3, %3), %%"REG_a" \n\t"
403 ".p2align 3 \n\t"
404 "1: \n\t"
405 "movq (%1 ), %%mm0 \n\t"
406 "movq (%1, %3), %%mm1 \n\t"
407 "movq %%mm0, (%2) \n\t"
408 "movq %%mm1, (%2, %3) \n\t"
409 "add %%"REG_a", %1 \n\t"
410 "add %%"REG_a", %2 \n\t"
411 "movq (%1 ), %%mm0 \n\t"
412 "movq (%1, %3), %%mm1 \n\t"
413 "movq %%mm0, (%2) \n\t"
414 "movq %%mm1, (%2, %3) \n\t"
415 "add %%"REG_a", %1 \n\t"
416 "add %%"REG_a", %2 \n\t"
417 "subl $4, %0 \n\t"
418 "jnz 1b \n\t"
419 : "+g"(h), "+r"(pixels), "+r"(block)
420 : "r"((x86_reg)line_size)
421 : "%"REG_a, "memory"
422 );
423 }
424
425 static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels,
426 int line_size, int h)
427 {
428 __asm__ volatile (
429 "lea (%3, %3), %%"REG_a" \n\t"
430 ".p2align 3 \n\t"
431 "1: \n\t"
432 "movq (%1 ), %%mm0 \n\t"
433 "movq 8(%1 ), %%mm4 \n\t"
434 "movq (%1, %3), %%mm1 \n\t"
435 "movq 8(%1, %3), %%mm5 \n\t"
436 "movq %%mm0, (%2) \n\t"
437 "movq %%mm4, 8(%2) \n\t"
438 "movq %%mm1, (%2, %3) \n\t"
439 "movq %%mm5, 8(%2, %3) \n\t"
440 "add %%"REG_a", %1 \n\t"
441 "add %%"REG_a", %2 \n\t"
442 "movq (%1 ), %%mm0 \n\t"
443 "movq 8(%1 ), %%mm4 \n\t"
444 "movq (%1, %3), %%mm1 \n\t"
445 "movq 8(%1, %3), %%mm5 \n\t"
446 "movq %%mm0, (%2) \n\t"
447 "movq %%mm4, 8(%2) \n\t"
448 "movq %%mm1, (%2, %3) \n\t"
449 "movq %%mm5, 8(%2, %3) \n\t"
450 "add %%"REG_a", %1 \n\t"
451 "add %%"REG_a", %2 \n\t"
452 "subl $4, %0 \n\t"
453 "jnz 1b \n\t"
454 : "+g"(h), "+r"(pixels), "+r"(block)
455 : "r"((x86_reg)line_size)
456 : "%"REG_a, "memory"
457 );
458 }
459
460 static void put_pixels16_sse2(uint8_t *block, const uint8_t *pixels,
461 int line_size, int h)
462 {
463 __asm__ volatile (
464 "1: \n\t"
465 "movdqu (%1 ), %%xmm0 \n\t"
466 "movdqu (%1, %3 ), %%xmm1 \n\t"
467 "movdqu (%1, %3, 2), %%xmm2 \n\t"
468 "movdqu (%1, %4 ), %%xmm3 \n\t"
469 "lea (%1, %3, 4), %1 \n\t"
470 "movdqa %%xmm0, (%2) \n\t"
471 "movdqa %%xmm1, (%2, %3) \n\t"
472 "movdqa %%xmm2, (%2, %3, 2) \n\t"
473 "movdqa %%xmm3, (%2, %4) \n\t"
474 "subl $4, %0 \n\t"
475 "lea (%2, %3, 4), %2 \n\t"
476 "jnz 1b \n\t"
477 : "+g"(h), "+r"(pixels), "+r"(block)
478 : "r"((x86_reg)line_size), "r"((x86_reg)3L * line_size)
479 : "memory"
480 );
481 }
482
483 static void avg_pixels16_sse2(uint8_t *block, const uint8_t *pixels,
484 int line_size, int h)
485 {
486 __asm__ volatile (
487 "1: \n\t"
488 "movdqu (%1 ), %%xmm0 \n\t"
489 "movdqu (%1, %3 ), %%xmm1 \n\t"
490 "movdqu (%1, %3, 2), %%xmm2 \n\t"
491 "movdqu (%1, %4 ), %%xmm3 \n\t"
492 "lea (%1, %3, 4), %1 \n\t"
493 "pavgb (%2 ), %%xmm0 \n\t"
494 "pavgb (%2, %3 ), %%xmm1 \n\t"
495 "pavgb (%2, %3, 2), %%xmm2 \n\t"
496 "pavgb (%2, %4), %%xmm3 \n\t"
497 "movdqa %%xmm0, (%2) \n\t"
498 "movdqa %%xmm1, (%2, %3) \n\t"
499 "movdqa %%xmm2, (%2, %3, 2) \n\t"
500 "movdqa %%xmm3, (%2, %4) \n\t"
501 "subl $4, %0 \n\t"
502 "lea (%2, %3, 4), %2 \n\t"
503 "jnz 1b \n\t"
504 : "+g"(h), "+r"(pixels), "+r"(block)
505 : "r"((x86_reg)line_size), "r"((x86_reg)3L * line_size)
506 : "memory"
507 );
508 }
509
510 #define CLEAR_BLOCKS(name, n) \
511 static void name(DCTELEM *blocks) \
512 { \
513 __asm__ volatile ( \
514 "pxor %%mm7, %%mm7 \n\t" \
515 "mov %1, %%"REG_a" \n\t" \
516 "1: \n\t" \
517 "movq %%mm7, (%0, %%"REG_a") \n\t" \
518 "movq %%mm7, 8(%0, %%"REG_a") \n\t" \
519 "movq %%mm7, 16(%0, %%"REG_a") \n\t" \
520 "movq %%mm7, 24(%0, %%"REG_a") \n\t" \
521 "add $32, %%"REG_a" \n\t" \
522 "js 1b \n\t" \
523 :: "r"(((uint8_t *)blocks) + 128 * n), \
524 "i"(-128 * n) \
525 : "%"REG_a \
526 ); \
527 }
528 CLEAR_BLOCKS(clear_blocks_mmx, 6)
529 CLEAR_BLOCKS(clear_block_mmx, 1)
530
531 static void clear_block_sse(DCTELEM *block)
532 {
533 __asm__ volatile (
534 "xorps %%xmm0, %%xmm0 \n"
535 "movaps %%xmm0, (%0) \n"
536 "movaps %%xmm0, 16(%0) \n"
537 "movaps %%xmm0, 32(%0) \n"
538 "movaps %%xmm0, 48(%0) \n"
539 "movaps %%xmm0, 64(%0) \n"
540 "movaps %%xmm0, 80(%0) \n"
541 "movaps %%xmm0, 96(%0) \n"
542 "movaps %%xmm0, 112(%0) \n"
543 :: "r"(block)
544 : "memory"
545 );
546 }
547
548 static void clear_blocks_sse(DCTELEM *blocks)
549 {
550 __asm__ volatile (
551 "xorps %%xmm0, %%xmm0 \n"
552 "mov %1, %%"REG_a" \n"
553 "1: \n"
554 "movaps %%xmm0, (%0, %%"REG_a") \n"
555 "movaps %%xmm0, 16(%0, %%"REG_a") \n"
556 "movaps %%xmm0, 32(%0, %%"REG_a") \n"
557 "movaps %%xmm0, 48(%0, %%"REG_a") \n"
558 "movaps %%xmm0, 64(%0, %%"REG_a") \n"
559 "movaps %%xmm0, 80(%0, %%"REG_a") \n"
560 "movaps %%xmm0, 96(%0, %%"REG_a") \n"
561 "movaps %%xmm0, 112(%0, %%"REG_a") \n"
562 "add $128, %%"REG_a" \n"
563 "js 1b \n"
564 :: "r"(((uint8_t *)blocks) + 128 * 6),
565 "i"(-128 * 6)
566 : "%"REG_a
567 );
568 }
569
570 static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w)
571 {
572 x86_reg i = 0;
573 __asm__ volatile (
574 "jmp 2f \n\t"
575 "1: \n\t"
576 "movq (%1, %0), %%mm0 \n\t"
577 "movq (%2, %0), %%mm1 \n\t"
578 "paddb %%mm0, %%mm1 \n\t"
579 "movq %%mm1, (%2, %0) \n\t"
580 "movq 8(%1, %0), %%mm0 \n\t"
581 "movq 8(%2, %0), %%mm1 \n\t"
582 "paddb %%mm0, %%mm1 \n\t"
583 "movq %%mm1, 8(%2, %0) \n\t"
584 "add $16, %0 \n\t"
585 "2: \n\t"
586 "cmp %3, %0 \n\t"
587 "js 1b \n\t"
588 : "+r"(i)
589 : "r"(src), "r"(dst), "r"((x86_reg)w - 15)
590 );
591 for ( ; i < w; i++)
592 dst[i + 0] += src[i + 0];
593 }
594
595 #if HAVE_7REGS
596 static void add_hfyu_median_prediction_cmov(uint8_t *dst, const uint8_t *top,
597 const uint8_t *diff, int w,
598 int *left, int *left_top)
599 {
600 x86_reg w2 = -w;
601 x86_reg x;
602 int l = *left & 0xff;
603 int tl = *left_top & 0xff;
604 int t;
605 __asm__ volatile (
606 "mov %7, %3 \n"
607 "1: \n"
608 "movzbl (%3, %4), %2 \n"
609 "mov %2, %k3 \n"
610 "sub %b1, %b3 \n"
611 "add %b0, %b3 \n"
612 "mov %2, %1 \n"
613 "cmp %0, %2 \n"
614 "cmovg %0, %2 \n"
615 "cmovg %1, %0 \n"
616 "cmp %k3, %0 \n"
617 "cmovg %k3, %0 \n"
618 "mov %7, %3 \n"
619 "cmp %2, %0 \n"
620 "cmovl %2, %0 \n"
621 "add (%6, %4), %b0 \n"
622 "mov %b0, (%5, %4) \n"
623 "inc %4 \n"
624 "jl 1b \n"
625 : "+&q"(l), "+&q"(tl), "=&r"(t), "=&q"(x), "+&r"(w2)
626 : "r"(dst + w), "r"(diff + w), "rm"(top + w)
627 );
628 *left = l;
629 *left_top = tl;
630 }
631 #endif
632
633 #define H263_LOOP_FILTER \
634 "pxor %%mm7, %%mm7 \n\t" \
635 "movq %0, %%mm0 \n\t" \
636 "movq %0, %%mm1 \n\t" \
637 "movq %3, %%mm2 \n\t" \
638 "movq %3, %%mm3 \n\t" \
639 "punpcklbw %%mm7, %%mm0 \n\t" \
640 "punpckhbw %%mm7, %%mm1 \n\t" \
641 "punpcklbw %%mm7, %%mm2 \n\t" \
642 "punpckhbw %%mm7, %%mm3 \n\t" \
643 "psubw %%mm2, %%mm0 \n\t" \
644 "psubw %%mm3, %%mm1 \n\t" \
645 "movq %1, %%mm2 \n\t" \
646 "movq %1, %%mm3 \n\t" \
647 "movq %2, %%mm4 \n\t" \
648 "movq %2, %%mm5 \n\t" \
649 "punpcklbw %%mm7, %%mm2 \n\t" \
650 "punpckhbw %%mm7, %%mm3 \n\t" \
651 "punpcklbw %%mm7, %%mm4 \n\t" \
652 "punpckhbw %%mm7, %%mm5 \n\t" \
653 "psubw %%mm2, %%mm4 \n\t" \
654 "psubw %%mm3, %%mm5 \n\t" \
655 "psllw $2, %%mm4 \n\t" \
656 "psllw $2, %%mm5 \n\t" \
657 "paddw %%mm0, %%mm4 \n\t" \
658 "paddw %%mm1, %%mm5 \n\t" \
659 "pxor %%mm6, %%mm6 \n\t" \
660 "pcmpgtw %%mm4, %%mm6 \n\t" \
661 "pcmpgtw %%mm5, %%mm7 \n\t" \
662 "pxor %%mm6, %%mm4 \n\t" \
663 "pxor %%mm7, %%mm5 \n\t" \
664 "psubw %%mm6, %%mm4 \n\t" \
665 "psubw %%mm7, %%mm5 \n\t" \
666 "psrlw $3, %%mm4 \n\t" \
667 "psrlw $3, %%mm5 \n\t" \
668 "packuswb %%mm5, %%mm4 \n\t" \
669 "packsswb %%mm7, %%mm6 \n\t" \
670 "pxor %%mm7, %%mm7 \n\t" \
671 "movd %4, %%mm2 \n\t" \
672 "punpcklbw %%mm2, %%mm2 \n\t" \
673 "punpcklbw %%mm2, %%mm2 \n\t" \
674 "punpcklbw %%mm2, %%mm2 \n\t" \
675 "psubusb %%mm4, %%mm2 \n\t" \
676 "movq %%mm2, %%mm3 \n\t" \
677 "psubusb %%mm4, %%mm3 \n\t" \
678 "psubb %%mm3, %%mm2 \n\t" \
679 "movq %1, %%mm3 \n\t" \
680 "movq %2, %%mm4 \n\t" \
681 "pxor %%mm6, %%mm3 \n\t" \
682 "pxor %%mm6, %%mm4 \n\t" \
683 "paddusb %%mm2, %%mm3 \n\t" \
684 "psubusb %%mm2, %%mm4 \n\t" \
685 "pxor %%mm6, %%mm3 \n\t" \
686 "pxor %%mm6, %%mm4 \n\t" \
687 "paddusb %%mm2, %%mm2 \n\t" \
688 "packsswb %%mm1, %%mm0 \n\t" \
689 "pcmpgtb %%mm0, %%mm7 \n\t" \
690 "pxor %%mm7, %%mm0 \n\t" \
691 "psubb %%mm7, %%mm0 \n\t" \
692 "movq %%mm0, %%mm1 \n\t" \
693 "psubusb %%mm2, %%mm0 \n\t" \
694 "psubb %%mm0, %%mm1 \n\t" \
695 "pand %5, %%mm1 \n\t" \
696 "psrlw $2, %%mm1 \n\t" \
697 "pxor %%mm7, %%mm1 \n\t" \
698 "psubb %%mm7, %%mm1 \n\t" \
699 "movq %0, %%mm5 \n\t" \
700 "movq %3, %%mm6 \n\t" \
701 "psubb %%mm1, %%mm5 \n\t" \
702 "paddb %%mm1, %%mm6 \n\t"
703
704 static void h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale)
705 {
706 if (CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
707 const int strength = ff_h263_loop_filter_strength[qscale];
708
709 __asm__ volatile (
710 H263_LOOP_FILTER
711
712 "movq %%mm3, %1 \n\t"
713 "movq %%mm4, %2 \n\t"
714 "movq %%mm5, %0 \n\t"
715 "movq %%mm6, %3 \n\t"
716 : "+m"(*(uint64_t*)(src - 2 * stride)),
717 "+m"(*(uint64_t*)(src - 1 * stride)),
718 "+m"(*(uint64_t*)(src + 0 * stride)),
719 "+m"(*(uint64_t*)(src + 1 * stride))
720 : "g"(2 * strength), "m"(ff_pb_FC)
721 );
722 }
723 }
724
725 static void h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale)
726 {
727 if (CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
728 const int strength = ff_h263_loop_filter_strength[qscale];
729 DECLARE_ALIGNED(8, uint64_t, temp)[4];
730 uint8_t *btemp = (uint8_t*)temp;
731
732 src -= 2;
733
734 transpose4x4(btemp, src, 8, stride);
735 transpose4x4(btemp + 4, src + 4 * stride, 8, stride);
736 __asm__ volatile (
737 H263_LOOP_FILTER // 5 3 4 6
738
739 : "+m"(temp[0]),
740 "+m"(temp[1]),
741 "+m"(temp[2]),
742 "+m"(temp[3])
743 : "g"(2 * strength), "m"(ff_pb_FC)
744 );
745
746 __asm__ volatile (
747 "movq %%mm5, %%mm1 \n\t"
748 "movq %%mm4, %%mm0 \n\t"
749 "punpcklbw %%mm3, %%mm5 \n\t"
750 "punpcklbw %%mm6, %%mm4 \n\t"
751 "punpckhbw %%mm3, %%mm1 \n\t"
752 "punpckhbw %%mm6, %%mm0 \n\t"
753 "movq %%mm5, %%mm3 \n\t"
754 "movq %%mm1, %%mm6 \n\t"
755 "punpcklwd %%mm4, %%mm5 \n\t"
756 "punpcklwd %%mm0, %%mm1 \n\t"
757 "punpckhwd %%mm4, %%mm3 \n\t"
758 "punpckhwd %%mm0, %%mm6 \n\t"
759 "movd %%mm5, (%0) \n\t"
760 "punpckhdq %%mm5, %%mm5 \n\t"
761 "movd %%mm5, (%0, %2) \n\t"
762 "movd %%mm3, (%0, %2, 2) \n\t"
763 "punpckhdq %%mm3, %%mm3 \n\t"
764 "movd %%mm3, (%0, %3) \n\t"
765 "movd %%mm1, (%1) \n\t"
766 "punpckhdq %%mm1, %%mm1 \n\t"
767 "movd %%mm1, (%1, %2) \n\t"
768 "movd %%mm6, (%1, %2, 2) \n\t"
769 "punpckhdq %%mm6, %%mm6 \n\t"
770 "movd %%mm6, (%1, %3) \n\t"
771 :: "r"(src),
772 "r"(src + 4 * stride),
773 "r"((x86_reg)stride),
774 "r"((x86_reg)(3 * stride))
775 );
776 }
777 }
778
779 /* Draw the edges of width 'w' of an image of size width, height
780 * this MMX version can only handle w == 8 || w == 16. */
781 static void draw_edges_mmx(uint8_t *buf, int wrap, int width, int height,
782 int w, int h, int sides)
783 {
784 uint8_t *ptr, *last_line;
785 int i;
786
787 last_line = buf + (height - 1) * wrap;
788 /* left and right */
789 ptr = buf;
790 if (w == 8) {
791 __asm__ volatile (
792 "1: \n\t"
793 "movd (%0), %%mm0 \n\t"
794 "punpcklbw %%mm0, %%mm0 \n\t"
795 "punpcklwd %%mm0, %%mm0 \n\t"
796 "punpckldq %%mm0, %%mm0 \n\t"
797 "movq %%mm0, -8(%0) \n\t"
798 "movq -8(%0, %2), %%mm1 \n\t"
799 "punpckhbw %%mm1, %%mm1 \n\t"
800 "punpckhwd %%mm1, %%mm1 \n\t"
801 "punpckhdq %%mm1, %%mm1 \n\t"
802 "movq %%mm1, (%0, %2) \n\t"
803 "add %1, %0 \n\t"
804 "cmp %3, %0 \n\t"
805 "jb 1b \n\t"
806 : "+r"(ptr)
807 : "r"((x86_reg)wrap), "r"((x86_reg)width), "r"(ptr + wrap * height)
808 );
809 } else {
810 __asm__ volatile (
811 "1: \n\t"
812 "movd (%0), %%mm0 \n\t"
813 "punpcklbw %%mm0, %%mm0 \n\t"
814 "punpcklwd %%mm0, %%mm0 \n\t"
815 "punpckldq %%mm0, %%mm0 \n\t"
816 "movq %%mm0, -8(%0) \n\t"
817 "movq %%mm0, -16(%0) \n\t"
818 "movq -8(%0, %2), %%mm1 \n\t"
819 "punpckhbw %%mm1, %%mm1 \n\t"
820 "punpckhwd %%mm1, %%mm1 \n\t"
821 "punpckhdq %%mm1, %%mm1 \n\t"
822 "movq %%mm1, (%0, %2) \n\t"
823 "movq %%mm1, 8(%0, %2) \n\t"
824 "add %1, %0 \n\t"
825 "cmp %3, %0 \n\t"
826 "jb 1b \n\t"
827 : "+r"(ptr)
828 : "r"((x86_reg)wrap), "r"((x86_reg)width), "r"(ptr + wrap * height)
829 );
830 }
831
832 /* top and bottom (and hopefully also the corners) */
833 if (sides & EDGE_TOP) {
834 for (i = 0; i < h; i += 4) {
835 ptr = buf - (i + 1) * wrap - w;
836 __asm__ volatile (
837 "1: \n\t"
838 "movq (%1, %0), %%mm0 \n\t"
839 "movq %%mm0, (%0) \n\t"
840 "movq %%mm0, (%0, %2) \n\t"
841 "movq %%mm0, (%0, %2, 2) \n\t"
842 "movq %%mm0, (%0, %3) \n\t"
843 "add $8, %0 \n\t"
844 "cmp %4, %0 \n\t"
845 "jb 1b \n\t"
846 : "+r"(ptr)
847 : "r"((x86_reg)buf - (x86_reg)ptr - w), "r"((x86_reg) -wrap),
848 "r"((x86_reg) -wrap * 3), "r"(ptr + width + 2 * w)
849 );
850 }
851 }
852
853 if (sides & EDGE_BOTTOM) {
854 for (i = 0; i < h; i += 4) {
855 ptr = last_line + (i + 1) * wrap - w;
856 __asm__ volatile (
857 "1: \n\t"
858 "movq (%1, %0), %%mm0 \n\t"
859 "movq %%mm0, (%0) \n\t"
860 "movq %%mm0, (%0, %2) \n\t"
861 "movq %%mm0, (%0, %2, 2) \n\t"
862 "movq %%mm0, (%0, %3) \n\t"
863 "add $8, %0 \n\t"
864 "cmp %4, %0 \n\t"
865 "jb 1b \n\t"
866 : "+r"(ptr)
867 : "r"((x86_reg)last_line - (x86_reg)ptr - w),
868 "r"((x86_reg)wrap), "r"((x86_reg)wrap * 3),
869 "r"(ptr + width + 2 * w)
870 );
871 }
872 }
873 }
874
875 #define QPEL_V_LOW(m3, m4, m5, m6, pw_20, pw_3, rnd, \
876 in0, in1, in2, in7, out, OP) \
877 "paddw "#m4", "#m3" \n\t" /* x1 */ \
878 "movq "MANGLE(ff_pw_20)", %%mm4 \n\t" /* 20 */ \
879 "pmullw "#m3", %%mm4 \n\t" /* 20x1 */ \
880 "movq "#in7", "#m3" \n\t" /* d */ \
881 "movq "#in0", %%mm5 \n\t" /* D */ \
882 "paddw "#m3", %%mm5 \n\t" /* x4 */ \
883 "psubw %%mm5, %%mm4 \n\t" /* 20x1 - x4 */ \
884 "movq "#in1", %%mm5 \n\t" /* C */ \
885 "movq "#in2", %%mm6 \n\t" /* B */ \
886 "paddw "#m6", %%mm5 \n\t" /* x3 */ \
887 "paddw "#m5", %%mm6 \n\t" /* x2 */ \
888 "paddw %%mm6, %%mm6 \n\t" /* 2x2 */ \
889 "psubw %%mm6, %%mm5 \n\t" /* -2x2 + x3 */ \
890 "pmullw "MANGLE(ff_pw_3)", %%mm5 \n\t" /* -6x2 + 3x3 */ \
891 "paddw "#rnd", %%mm4 \n\t" /* x2 */ \
892 "paddw %%mm4, %%mm5 \n\t" /* 20x1 - 6x2 + 3x3 - x4 */ \
893 "psraw $5, %%mm5 \n\t" \
894 "packuswb %%mm5, %%mm5 \n\t" \
895 OP(%%mm5, out, %%mm7, d)
896
897 #define QPEL_BASE(OPNAME, ROUNDER, RND, OP_MMX2, OP_3DNOW) \
898 static void OPNAME ## mpeg4_qpel16_h_lowpass_mmx2(uint8_t *dst, \
899 uint8_t *src, \
900 int dstStride, \
901 int srcStride, \
902 int h) \
903 { \
904 uint64_t temp; \
905 \
906 __asm__ volatile ( \
907 "pxor %%mm7, %%mm7 \n\t" \
908 "1: \n\t" \
909 "movq (%0), %%mm0 \n\t" /* ABCDEFGH */ \
910 "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */ \
911 "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */ \
912 "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */ \
913 "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */ \
914 "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */ \
915 "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */ \
916 "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */ \
917 "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */ \
918 "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */ \
919 "psllq $16, %%mm3 \n\t" /* 00ABCDEF */ \
920 "psllq $24, %%mm4 \n\t" /* 000ABCDE */ \
921 "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */ \
922 "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */ \
923 "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */ \
924 "paddw %%mm3, %%mm5 \n\t" /* b */ \
925 "paddw %%mm2, %%mm6 \n\t" /* c */ \
926 "paddw %%mm5, %%mm5 \n\t" /* 2b */ \
927 "psubw %%mm5, %%mm6 \n\t" /* c - 2b */ \
928 "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */ \
929 "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */ \
930 "paddw %%mm4, %%mm0 \n\t" /* a */ \
931 "paddw %%mm1, %%mm5 \n\t" /* d */ \
932 "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */ \
933 "psubw %%mm5, %%mm0 \n\t" /* 20a - d */ \
934 "paddw %6, %%mm6 \n\t" \
935 "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */ \
936 "psraw $5, %%mm0 \n\t" \
937 "movq %%mm0, %5 \n\t" \
938 /* mm1 = EFGH, mm2 = DEFG, mm3 = CDEF, mm4 = BCDE, mm7 = 0 */ \
939 \
940 "movq 5(%0), %%mm0 \n\t" /* FGHIJKLM */ \
941 "movq %%mm0, %%mm5 \n\t" /* FGHIJKLM */ \
942 "movq %%mm0, %%mm6 \n\t" /* FGHIJKLM */ \
943 "psrlq $8, %%mm0 \n\t" /* GHIJKLM0 */ \
944 "psrlq $16, %%mm5 \n\t" /* HIJKLM00 */ \
945 "punpcklbw %%mm7, %%mm0 \n\t" /* 0G0H0I0J */ \
946 "punpcklbw %%mm7, %%mm5 \n\t" /* 0H0I0J0K */ \
947 "paddw %%mm0, %%mm2 \n\t" /* b */ \
948 "paddw %%mm5, %%mm3 \n\t" /* c */ \
949 "paddw %%mm2, %%mm2 \n\t" /* 2b */ \
950 "psubw %%mm2, %%mm3 \n\t" /* c - 2b */ \
951 "movq %%mm6, %%mm2 \n\t" /* FGHIJKLM */ \
952 "psrlq $24, %%mm6 \n\t" /* IJKLM000 */ \
953 "punpcklbw %%mm7, %%mm2 \n\t" /* 0F0G0H0I */ \
954 "punpcklbw %%mm7, %%mm6 \n\t" /* 0I0J0K0L */ \
955 "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */ \
956 "paddw %%mm2, %%mm1 \n\t" /* a */ \
957 "paddw %%mm6, %%mm4 \n\t" /* d */ \
958 "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */ \
959 "psubw %%mm4, %%mm3 \n\t" /* - 6b +3c - d */ \
960 "paddw %6, %%mm1 \n\t" \
961 "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b +3c - d */ \
962 "psraw $5, %%mm3 \n\t" \
963 "movq %5, %%mm1 \n\t" \
964 "packuswb %%mm3, %%mm1 \n\t" \
965 OP_MMX2(%%mm1, (%1), %%mm4, q) \
966 /* mm0 = GHIJ, mm2 = FGHI, mm5 = HIJK, mm6 = IJKL, mm7 = 0 */ \
967 \
968 "movq 9(%0), %%mm1 \n\t" /* JKLMNOPQ */ \
969 "movq %%mm1, %%mm4 \n\t" /* JKLMNOPQ */ \
970 "movq %%mm1, %%mm3 \n\t" /* JKLMNOPQ */ \
971 "psrlq $8, %%mm1 \n\t" /* KLMNOPQ0 */ \
972 "psrlq $16, %%mm4 \n\t" /* LMNOPQ00 */ \
973 "punpcklbw %%mm7, %%mm1 \n\t" /* 0K0L0M0N */ \
974 "punpcklbw %%mm7, %%mm4 \n\t" /* 0L0M0N0O */ \
975 "paddw %%mm1, %%mm5 \n\t" /* b */ \
976 "paddw %%mm4, %%mm0 \n\t" /* c */ \
977 "paddw %%mm5, %%mm5 \n\t" /* 2b */ \
978 "psubw %%mm5, %%mm0 \n\t" /* c - 2b */ \
979 "movq %%mm3, %%mm5 \n\t" /* JKLMNOPQ */ \
980 "psrlq $24, %%mm3 \n\t" /* MNOPQ000 */ \
981 "pmullw "MANGLE(ff_pw_3)", %%mm0 \n\t" /* 3c - 6b */ \
982 "punpcklbw %%mm7, %%mm3 \n\t" /* 0M0N0O0P */ \
983 "paddw %%mm3, %%mm2 \n\t" /* d */ \
984 "psubw %%mm2, %%mm0 \n\t" /* -6b + 3c - d */ \
985 "movq %%mm5, %%mm2 \n\t" /* JKLMNOPQ */ \
986 "punpcklbw %%mm7, %%mm2 \n\t" /* 0J0K0L0M */ \
987 "punpckhbw %%mm7, %%mm5 \n\t" /* 0N0O0P0Q */ \
988 "paddw %%mm2, %%mm6 \n\t" /* a */ \
989 "pmullw "MANGLE(ff_pw_20)", %%mm6 \n\t" /* 20a */ \
990 "paddw %6, %%mm0 \n\t" \
991 "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */ \
992 "psraw $5, %%mm0 \n\t" \
993 /* mm1 = KLMN, mm2 = JKLM, mm3 = MNOP, */ \
994 /* mm4 = LMNO, mm5 = NOPQ mm7 = 0 */ \
995 \
996 "paddw %%mm5, %%mm3 \n\t" /* a */ \
997 "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0O0P0Q0Q */ \
998 "paddw %%mm4, %%mm6 \n\t" /* b */ \
999 "pshufw $0xBE, %%mm5, %%mm4 \n\t" /* 0P0Q0Q0P */ \
1000 "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0Q0Q0P0O */ \
1001 "paddw %%mm1, %%mm4 \n\t" /* c */ \
1002 "paddw %%mm2, %%mm5 \n\t" /* d */ \
1003 "paddw %%mm6, %%mm6 \n\t" /* 2b */ \
1004 "psubw %%mm6, %%mm4 \n\t" /* c - 2b */ \
1005 "pmullw "MANGLE(ff_pw_20)", %%mm3 \n\t" /* 20a */ \
1006 "pmullw "MANGLE(ff_pw_3)", %%mm4 \n\t" /* 3c - 6b */ \
1007 "psubw %%mm5, %%mm3 \n\t" /* -6b + 3c - d */ \
1008 "paddw %6, %%mm4 \n\t" \
1009 "paddw %%mm3, %%mm4 \n\t" /* 20a - 6b + 3c - d */ \
1010 "psraw $5, %%mm4 \n\t" \
1011 "packuswb %%mm4, %%mm0 \n\t" \
1012 OP_MMX2(%%mm0, 8(%1), %%mm4, q) \
1013 \
1014 "add %3, %0 \n\t" \
1015 "add %4, %1 \n\t" \
1016 "decl %2 \n\t" \
1017 "jnz 1b \n\t" \
1018 : "+a"(src), "+c"(dst), "+D"(h) \
1019 : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride), \
1020 /* "m"(ff_pw_20), "m"(ff_pw_3), */ "m"(temp), "m"(ROUNDER) \
1021 : "memory" \
1022 ); \
1023 } \
1024 \
1025 static void OPNAME ## mpeg4_qpel16_h_lowpass_3dnow(uint8_t *dst, \
1026 uint8_t *src, \
1027 int dstStride, \
1028 int srcStride, \
1029 int h) \
1030 { \
1031 int i; \
1032 int16_t temp[16]; \
1033 /* quick HACK, XXX FIXME MUST be optimized */ \
1034 for (i = 0; i < h; i++) { \
1035 temp[ 0] = (src[ 0] + src[ 1]) * 20 - (src[ 0] + src[ 2]) * 6 + \
1036 (src[ 1] + src[ 3]) * 3 - (src[ 2] + src[ 4]); \
1037 temp[ 1] = (src[ 1] + src[ 2]) * 20 - (src[ 0] + src[ 3]) * 6 + \
1038 (src[ 0] + src[ 4]) * 3 - (src[ 1] + src[ 5]); \
1039 temp[ 2] = (src[ 2] + src[ 3]) * 20 - (src[ 1] + src[ 4]) * 6 + \
1040 (src[ 0] + src[ 5]) * 3 - (src[ 0] + src[ 6]); \
1041 temp[ 3] = (src[ 3] + src[ 4]) * 20 - (src[ 2] + src[ 5]) * 6 + \
1042 (src[ 1] + src[ 6]) * 3 - (src[ 0] + src[ 7]); \
1043 temp[ 4] = (src[ 4] + src[ 5]) * 20 - (src[ 3] + src[ 6]) * 6 + \
1044 (src[ 2] + src[ 7]) * 3 - (src[ 1] + src[ 8]); \
1045 temp[ 5] = (src[ 5] + src[ 6]) * 20 - (src[ 4] + src[ 7]) * 6 + \
1046 (src[ 3] + src[ 8]) * 3 - (src[ 2] + src[ 9]); \
1047 temp[ 6] = (src[ 6] + src[ 7]) * 20 - (src[ 5] + src[ 8]) * 6 + \
1048 (src[ 4] + src[ 9]) * 3 - (src[ 3] + src[10]); \
1049 temp[ 7] = (src[ 7] + src[ 8]) * 20 - (src[ 6] + src[ 9]) * 6 + \
1050 (src[ 5] + src[10]) * 3 - (src[ 4] + src[11]); \
1051 temp[ 8] = (src[ 8] + src[ 9]) * 20 - (src[ 7] + src[10]) * 6 + \
1052 (src[ 6] + src[11]) * 3 - (src[ 5] + src[12]); \
1053 temp[ 9] = (src[ 9] + src[10]) * 20 - (src[ 8] + src[11]) * 6 + \
1054 (src[ 7] + src[12]) * 3 - (src[ 6] + src[13]); \
1055 temp[10] = (src[10] + src[11]) * 20 - (src[ 9] + src[12]) * 6 + \
1056 (src[ 8] + src[13]) * 3 - (src[ 7] + src[14]); \
1057 temp[11] = (src[11] + src[12]) * 20 - (src[10] + src[13]) * 6 + \
1058 (src[ 9] + src[14]) * 3 - (src[ 8] + src[15]); \
1059 temp[12] = (src[12] + src[13]) * 20 - (src[11] + src[14]) * 6 + \
1060 (src[10] + src[15]) * 3 - (src[ 9] + src[16]); \
1061 temp[13] = (src[13] + src[14]) * 20 - (src[12] + src[15]) * 6 + \
1062 (src[11] + src[16]) * 3 - (src[10] + src[16]); \
1063 temp[14] = (src[14] + src[15]) * 20 - (src[13] + src[16]) * 6 + \
1064 (src[12] + src[16]) * 3 - (src[11] + src[15]); \
1065 temp[15] = (src[15] + src[16]) * 20 - (src[14] + src[16]) * 6 + \
1066 (src[13] + src[15]) * 3 - (src[12] + src[14]); \
1067 __asm__ volatile ( \
1068 "movq (%0), %%mm0 \n\t" \
1069 "movq 8(%0), %%mm1 \n\t" \
1070 "paddw %2, %%mm0 \n\t" \
1071 "paddw %2, %%mm1 \n\t" \
1072 "psraw $5, %%mm0 \n\t" \
1073 "psraw $5, %%mm1 \n\t" \
1074 "packuswb %%mm1, %%mm0 \n\t" \
1075 OP_3DNOW(%%mm0, (%1), %%mm1, q) \
1076 "movq 16(%0), %%mm0 \n\t" \
1077 "movq 24(%0), %%mm1 \n\t" \
1078 "paddw %2, %%mm0 \n\t" \
1079 "paddw %2, %%mm1 \n\t" \
1080 "psraw $5, %%mm0 \n\t" \
1081 "psraw $5, %%mm1 \n\t" \
1082 "packuswb %%mm1, %%mm0 \n\t" \
1083 OP_3DNOW(%%mm0, 8(%1), %%mm1, q) \
1084 :: "r"(temp), "r"(dst), "m"(ROUNDER) \
1085 : "memory" \
1086 ); \
1087 dst += dstStride; \
1088 src += srcStride; \
1089 } \
1090 } \
1091 \
1092 static void OPNAME ## mpeg4_qpel8_h_lowpass_mmx2(uint8_t *dst, \
1093 uint8_t *src, \
1094 int dstStride, \
1095 int srcStride, \
1096 int h) \
1097 { \
1098 __asm__ volatile ( \
1099 "pxor %%mm7, %%mm7 \n\t" \
1100 "1: \n\t" \
1101 "movq (%0), %%mm0 \n\t" /* ABCDEFGH */ \
1102 "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */ \
1103 "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */ \
1104 "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */ \
1105 "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */ \
1106 "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */ \
1107 "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */ \
1108 "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */ \
1109 "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */ \
1110 "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */ \
1111 "psllq $16, %%mm3 \n\t" /* 00ABCDEF */ \
1112 "psllq $24, %%mm4 \n\t" /* 000ABCDE */ \
1113 "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */ \
1114 "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */ \
1115 "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */ \
1116 "paddw %%mm3, %%mm5 \n\t" /* b */ \
1117 "paddw %%mm2, %%mm6 \n\t" /* c */ \
1118 "paddw %%mm5, %%mm5 \n\t" /* 2b */ \
1119 "psubw %%mm5, %%mm6 \n\t" /* c - 2b */ \
1120 "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */ \
1121 "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */ \
1122 "paddw %%mm4, %%mm0 \n\t" /* a */ \
1123 "paddw %%mm1, %%mm5 \n\t" /* d */ \
1124 "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */ \
1125 "psubw %%mm5, %%mm0 \n\t" /* 20a - d */ \
1126 "paddw %5, %%mm6 \n\t" \
1127 "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */ \
1128 "psraw $5, %%mm0 \n\t" \
1129 /* mm1 = EFGH, mm2 = DEFG, mm3 = CDEF, mm4 = BCDE, mm7 = 0 */ \
1130 \
1131 "movd 5(%0), %%mm5 \n\t" /* FGHI */ \
1132 "punpcklbw %%mm7, %%mm5 \n\t" /* 0F0G0H0I */ \
1133 "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0G0H0I0I */ \
1134 "paddw %%mm5, %%mm1 \n\t" /* a */ \
1135 "paddw %%mm6, %%mm2 \n\t" /* b */ \
1136 "pshufw $0xBE, %%mm5, %%mm6 \n\t" /* 0H0I0I0H */ \
1137 "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0I0I0H0G */ \
1138 "paddw %%mm6, %%mm3 \n\t" /* c */ \
1139 "paddw %%mm5, %%mm4 \n\t" /* d */ \
1140 "paddw %%mm2, %%mm2 \n\t" /* 2b */ \
1141 "psubw %%mm2, %%mm3 \n\t" /* c - 2b */ \
1142 "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */ \
1143 "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */ \
1144 "psubw %%mm4, %%mm3 \n\t" /* -6b + 3c - d */ \
1145 "paddw %5, %%mm1 \n\t" \
1146 "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b + 3c - d */ \
1147 "psraw $5, %%mm3 \n\t" \
1148 "packuswb %%mm3, %%mm0 \n\t" \
1149 OP_MMX2(%%mm0, (%1), %%mm4, q) \
1150 \
1151 "add %3, %0 \n\t" \
1152 "add %4, %1 \n\t" \
1153 "decl %2 \n\t" \
1154 "jnz 1b \n\t" \
1155 : "+a"(src), "+c"(dst), "+d"(h) \
1156 : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), \
1157 /* "m"(ff_pw_20), "m"(ff_pw_3), */ "m"(ROUNDER) \
1158 : "memory" \
1159 ); \
1160 } \
1161 \
1162 static void OPNAME ## mpeg4_qpel8_h_lowpass_3dnow(uint8_t *dst, \
1163 uint8_t *src, \
1164 int dstStride, \
1165 int srcStride, \
1166 int h) \
1167 { \
1168 int i; \
1169 int16_t temp[8]; \
1170 /* quick HACK, XXX FIXME MUST be optimized */ \
1171 for (i = 0; i < h; i++) { \
1172 temp[0] = (src[0] + src[1]) * 20 - (src[0] + src[2]) * 6 + \
1173 (src[1] + src[3]) * 3 - (src[2] + src[4]); \
1174 temp[1] = (src[1] + src[2]) * 20 - (src[0] + src[3]) * 6 + \
1175 (src[0] + src[4]) * 3 - (src[1] + src[5]); \
1176 temp[2] = (src[2] + src[3]) * 20 - (src[1] + src[4]) * 6 + \
1177 (src[0] + src[5]) * 3 - (src[0] + src[6]); \
1178 temp[3] = (src[3] + src[4]) * 20 - (src[2] + src[5]) * 6 + \
1179 (src[1] + src[6]) * 3 - (src[0] + src[7]); \
1180 temp[4] = (src[4] + src[5]) * 20 - (src[3] + src[6]) * 6 + \
1181 (src[2] + src[7]) * 3 - (src[1] + src[8]); \
1182 temp[5] = (src[5] + src[6]) * 20 - (src[4] + src[7]) * 6 + \
1183 (src[3] + src[8]) * 3 - (src[2] + src[8]); \
1184 temp[6] = (src[6] + src[7]) * 20 - (src[5] + src[8]) * 6 + \
1185 (src[4] + src[8]) * 3 - (src[3] + src[7]); \
1186 temp[7] = (src[7] + src[8]) * 20 - (src[6] + src[8]) * 6 + \
1187 (src[5] + src[7]) * 3 - (src[4] + src[6]); \
1188 __asm__ volatile ( \
1189 "movq (%0), %%mm0 \n\t" \
1190 "movq 8(%0), %%mm1 \n\t" \
1191 "paddw %2, %%mm0 \n\t" \
1192 "paddw %2, %%mm1 \n\t" \
1193 "psraw $5, %%mm0 \n\t" \
1194 "psraw $5, %%mm1 \n\t" \
1195 "packuswb %%mm1, %%mm0 \n\t" \
1196 OP_3DNOW(%%mm0, (%1), %%mm1, q) \
1197 :: "r"(temp), "r"(dst), "m"(ROUNDER) \
1198 : "memory" \
1199 ); \
1200 dst += dstStride; \
1201 src += srcStride; \
1202 } \
1203 }
1204
1205 #define QPEL_OP(OPNAME, ROUNDER, RND, OP, MMX) \
1206 static void OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(uint8_t *dst, \
1207 uint8_t *src, \
1208 int dstStride, \
1209 int srcStride) \
1210 { \
1211 uint64_t temp[17 * 4]; \
1212 uint64_t *temp_ptr = temp; \
1213 int count = 17; \
1214 \
1215 /* FIXME unroll */ \
1216 __asm__ volatile ( \
1217 "pxor %%mm7, %%mm7 \n\t" \
1218 "1: \n\t" \
1219 "movq (%0), %%mm0 \n\t" \
1220 "movq (%0), %%mm1 \n\t" \
1221 "movq 8(%0), %%mm2 \n\t" \
1222 "movq 8(%0), %%mm3 \n\t" \
1223 "punpcklbw %%mm7, %%mm0 \n\t" \
1224 "punpckhbw %%mm7, %%mm1 \n\t" \
1225 "punpcklbw %%mm7, %%mm2 \n\t" \
1226 "punpckhbw %%mm7, %%mm3 \n\t" \
1227 "movq %%mm0, (%1) \n\t" \
1228 "movq %%mm1, 17 * 8(%1) \n\t" \
1229 "movq %%mm2, 2 * 17 * 8(%1) \n\t" \
1230 "movq %%mm3, 3 * 17 * 8(%1) \n\t" \
1231 "add $8, %1 \n\t" \
1232 "add %3, %0 \n\t" \
1233 "decl %2 \n\t" \
1234 "jnz 1b \n\t" \
1235 : "+r"(src), "+r"(temp_ptr), "+r"(count) \
1236 : "r"((x86_reg)srcStride) \
1237 : "memory" \
1238 ); \
1239 \
1240 temp_ptr = temp; \
1241 count = 4; \
1242 \
1243 /* FIXME reorder for speed */ \
1244 __asm__ volatile ( \
1245 /* "pxor %%mm7, %%mm7 \n\t" */ \
1246 "1: \n\t" \
1247 "movq (%0), %%mm0 \n\t" \
1248 "movq 8(%0), %%mm1 \n\t" \
1249 "movq 16(%0), %%mm2 \n\t" \
1250 "movq 24(%0), %%mm3 \n\t" \
1251 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP) \
1252 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP) \
1253 "add %4, %1 \n\t" \
1254 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP) \
1255 \
1256 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP) \
1257 "add %4, %1 \n\t" \
1258 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP) \
1259 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 72(%0), (%1, %3), OP) \
1260 "add %4, %1 \n\t" \
1261 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 80(%0), (%1), OP) \
1262 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 88(%0), (%1, %3), OP) \
1263 "add %4, %1 \n\t" \
1264 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 40(%0), 48(%0), 56(%0), 96(%0), (%1), OP) \
1265 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 48(%0), 56(%0), 64(%0), 104(%0), (%1, %3), OP) \
1266 "add %4, %1 \n\t" \
1267 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 56(%0), 64(%0), 72(%0), 112(%0), (%1), OP) \
1268 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 64(%0), 72(%0), 80(%0), 120(%0), (%1, %3), OP) \
1269 "add %4, %1 \n\t" \
1270 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 72(%0), 80(%0), 88(%0), 128(%0), (%1), OP) \
1271 \
1272 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 80(%0), 88(%0), 96(%0), 128(%0), (%1, %3), OP) \
1273 "add %4, %1 \n\t" \
1274 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 88(%0), 96(%0), 104(%0), 120(%0), (%1), OP) \
1275 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 96(%0), 104(%0), 112(%0), 112(%0), (%1, %3), OP) \
1276 \
1277 "add $136, %0 \n\t" \
1278 "add %6, %1 \n\t" \
1279 "decl %2 \n\t" \
1280 "jnz 1b \n\t" \
1281 \
1282 : "+r"(temp_ptr), "+r"(dst), "+g"(count) \
1283 : "r"((x86_reg)dstStride), "r"(2 * (x86_reg)dstStride), \
1284 /* "m"(ff_pw_20), "m"(ff_pw_3), */ "m"(ROUNDER), \
1285 "g"(4 - 14 * (x86_reg)dstStride) \
1286 : "memory" \
1287 ); \
1288 } \
1289 \
1290 static void OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(uint8_t *dst, \
1291 uint8_t *src, \
1292 int dstStride, \
1293 int srcStride) \
1294 { \
1295 uint64_t temp[9 * 2]; \
1296 uint64_t *temp_ptr = temp; \
1297 int count = 9; \
1298 \
1299 /* FIXME unroll */ \
1300 __asm__ volatile ( \
1301 "pxor %%mm7, %%mm7 \n\t" \
1302 "1: \n\t" \
1303 "movq (%0), %%mm0 \n\t" \
1304 "movq (%0), %%mm1 \n\t" \
1305 "punpcklbw %%mm7, %%mm0 \n\t" \
1306 "punpckhbw %%mm7, %%mm1 \n\t" \
1307 "movq %%mm0, (%1) \n\t" \
1308 "movq %%mm1, 9*8(%1) \n\t" \
1309 "add $8, %1 \n\t" \
1310 "add %3, %0 \n\t" \
1311 "decl %2 \n\t" \
1312 "jnz 1b \n\t" \
1313 : "+r"(src), "+r"(temp_ptr), "+r"(count) \
1314 : "r"((x86_reg)srcStride) \
1315 : "memory" \
1316 ); \
1317 \
1318 temp_ptr = temp; \
1319 count = 2; \
1320 \
1321 /* FIXME reorder for speed */ \
1322 __asm__ volatile ( \
1323 /* "pxor %%mm7, %%mm7 \n\t" */ \
1324 "1: \n\t" \
1325 "movq (%0), %%mm0 \n\t" \
1326 "movq 8(%0), %%mm1 \n\t" \
1327 "movq 16(%0), %%mm2 \n\t" \
1328 "movq 24(%0), %%mm3 \n\t" \
1329 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP) \
1330 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP) \
1331 "add %4, %1 \n\t" \
1332 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP) \
1333 \
1334 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP) \
1335 "add %4, %1 \n\t" \
1336 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP) \
1337 \
1338 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 64(%0), (%1, %3), OP) \
1339 "add %4, %1 \n\t" \
1340 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 56(%0), (%1), OP) \
1341 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 48(%0), (%1, %3), OP) \
1342 \
1343 "add $72, %0 \n\t" \
1344 "add %6, %1 \n\t" \
1345 "decl %2 \n\t" \
1346 "jnz 1b \n\t" \
1347 \
1348 : "+r"(temp_ptr), "+r"(dst), "+g"(count) \
1349 : "r"((x86_reg)dstStride), "r"(2 * (x86_reg)dstStride), \
1350 /* "m"(ff_pw_20), "m"(ff_pw_3), */ "m"(ROUNDER), \
1351 "g"(4 - 6 * (x86_reg)dstStride) \
1352 : "memory" \
1353 ); \
1354 } \
1355 \
1356 static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src, \
1357 int stride) \
1358 { \
1359 OPNAME ## pixels8_ ## MMX(dst, src, stride, 8); \
1360 } \
1361 \
1362 static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src, \
1363 int stride) \
1364 { \
1365 uint64_t temp[8]; \
1366 uint8_t * const half = (uint8_t*)temp; \
1367 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, \
1368 stride, 8); \
1369 OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8); \
1370 } \
1371 \
1372 static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src, \
1373 int stride) \
1374 { \
1375 OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride, \
1376 stride, 8); \
1377 } \
1378 \
1379 static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src, \
1380 int stride) \
1381 { \
1382 uint64_t temp[8]; \
1383 uint8_t * const half = (uint8_t*)temp; \
1384 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, \
1385 stride, 8); \
1386 OPNAME ## pixels8_l2_ ## MMX(dst, src + 1, half, stride, \
1387 stride, 8); \
1388 } \
1389 \
1390 static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src, \
1391 int stride) \
1392 { \
1393 uint64_t temp[8]; \
1394 uint8_t * const half = (uint8_t*)temp; \
1395 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride); \
1396 OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8); \
1397 } \
1398 \
1399 static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src, \
1400 int stride) \
1401 { \
1402 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src, stride, stride); \
1403 } \
1404 \
1405 static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src, \
1406 int stride) \
1407 { \
1408 uint64_t temp[8]; \
1409 uint8_t * const half = (uint8_t*)temp; \
1410 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride); \
1411 OPNAME ## pixels8_l2_ ## MMX(dst, src + stride, half, stride, \
1412 stride, 8); \
1413 } \
1414 \
1415 static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src, \
1416 int stride) \
1417 { \
1418 uint64_t half[8 + 9]; \
1419 uint8_t * const halfH = ((uint8_t*)half) + 64; \
1420 uint8_t * const halfHV = ((uint8_t*)half); \
1421 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
1422 stride, 9); \
1423 put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9); \
1424 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8); \
1425 OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8); \
1426 } \
1427 \
1428 static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src, \
1429 int stride) \
1430 { \
1431 uint64_t half[8 + 9]; \
1432 uint8_t * const halfH = ((uint8_t*)half) + 64; \
1433 uint8_t * const halfHV = ((uint8_t*)half); \
1434 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
1435 stride, 9); \
1436 put ## RND ## pixels8_l2_ ## MMX(halfH, src + 1, halfH, 8, \
1437 stride, 9); \
1438 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8); \
1439 OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8); \
1440 } \
1441 \
1442 static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src, \
1443 int stride) \
1444 { \
1445 uint64_t half[8 + 9]; \
1446 uint8_t * const halfH = ((uint8_t*)half) + 64; \
1447 uint8_t * const halfHV = ((uint8_t*)half); \
1448 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
1449 stride, 9); \
1450 put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9); \
1451 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8); \
1452 OPNAME ## pixels8_l2_ ## MMX(dst, halfH + 8, halfHV, stride, 8, 8); \
1453 } \
1454 \
1455 static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src, \
1456 int stride) \
1457 { \
1458 uint64_t half[8 + 9]; \
1459 uint8_t * const halfH = ((uint8_t*)half) + 64; \
1460 uint8_t * const halfHV = ((uint8_t*)half); \
1461 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
1462 stride, 9); \
1463 put ## RND ## pixels8_l2_ ## MMX(halfH, src + 1, halfH, 8, \
1464 stride, 9); \
1465 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8); \
1466 OPNAME ## pixels8_l2_ ## MMX(dst, halfH + 8, halfHV, stride, 8, 8); \
1467 } \
1468 \
1469 static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src, \
1470 int stride) \
1471 { \
1472 uint64_t half[8 + 9]; \
1473 uint8_t * const halfH = ((uint8_t*)half) + 64; \
1474 uint8_t * const halfHV = ((uint8_t*)half); \
1475 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
1476 stride, 9); \
1477 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8); \
1478 OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8); \
1479 } \
1480 \
1481 static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src, \
1482 int stride) \
1483 { \
1484 uint64_t half[8 + 9]; \
1485 uint8_t * const halfH = ((uint8_t*)half) + 64; \
1486 uint8_t * const halfHV = ((uint8_t*)half); \
1487 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
1488 stride, 9); \
1489 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8); \
1490 OPNAME ## pixels8_l2_ ## MMX(dst, halfH + 8, halfHV, stride, 8, 8); \
1491 } \
1492 \
1493 static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src, \
1494 int stride) \
1495 { \
1496 uint64_t half[8 + 9]; \
1497 uint8_t * const halfH = ((uint8_t*)half); \
1498 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
1499 stride, 9); \
1500 put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9); \
1501 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8); \
1502 } \
1503 \
1504 static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src, \
1505 int stride) \
1506 { \
1507 uint64_t half[8 + 9]; \
1508 uint8_t * const halfH = ((uint8_t*)half); \
1509 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
1510 stride, 9); \
1511 put ## RND ## pixels8_l2_ ## MMX(halfH, src + 1, halfH, 8, \
1512 stride, 9); \
1513 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8); \
1514 } \
1515 \
1516 static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src, \
1517 int stride) \
1518 { \
1519 uint64_t half[9]; \
1520 uint8_t * const halfH = ((uint8_t*)half); \
1521 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
1522 stride, 9); \
1523 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8); \
1524 } \
1525 \
1526 static void OPNAME ## qpel16_mc00_ ## MMX (uint8_t *dst, uint8_t *src, \
1527 int stride) \
1528 { \
1529 OPNAME ## pixels16_ ## MMX(dst, src, stride, 16); \
1530 } \
1531 \
1532 static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src, \
1533 int stride) \
1534 { \
1535 uint64_t temp[32]; \
1536 uint8_t * const half = (uint8_t*)temp; \
1537 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, \
1538 stride, 16); \
1539 OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16); \
1540 } \
1541 \
1542 static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src, \
1543 int stride) \
1544 { \
1545 OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src, \
1546 stride, stride, 16); \
1547 } \
1548 \
1549 static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src, \
1550 int stride) \
1551 { \
1552 uint64_t temp[32]; \
1553 uint8_t * const half = (uint8_t*)temp; \
1554 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, \
1555 stride, 16); \
1556 OPNAME ## pixels16_l2_ ## MMX(dst, src + 1, half, \
1557 stride, stride, 16); \
1558 } \
1559 \
1560 static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src, \
1561 int stride) \
1562 { \
1563 uint64_t temp[32]; \
1564 uint8_t * const half = (uint8_t*)temp; \
1565 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, \
1566 stride); \
1567 OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16); \
1568 } \
1569 \
1570 static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src, \
1571 int stride) \
1572 { \
1573 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src, stride, stride); \
1574 } \
1575 \
1576 static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src, \
1577 int stride) \
1578 { \
1579 uint64_t temp[32]; \
1580 uint8_t * const half = (uint8_t*)temp; \
1581 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, \
1582 stride); \
1583 OPNAME ## pixels16_l2_ ## MMX(dst, src+stride, half, \
1584 stride, stride, 16); \
1585 } \
1586 \
1587 static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src, \
1588 int stride) \
1589 { \
1590 uint64_t half[16 * 2 + 17 * 2]; \
1591 uint8_t * const halfH = ((uint8_t*)half) + 256; \
1592 uint8_t * const halfHV = ((uint8_t*)half); \
1593 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
1594 stride, 17); \
1595 put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, \
1596 stride, 17); \
1597 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \
1598 16, 16); \
1599 OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16); \
1600 } \
1601 \
1602 static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src, \
1603 int stride) \
1604 { \
1605 uint64_t half[16 * 2 + 17 * 2]; \
1606 uint8_t * const halfH = ((uint8_t*)half) + 256; \
1607 uint8_t * const halfHV = ((uint8_t*)half); \
1608 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
1609 stride, 17); \
1610 put ## RND ## pixels16_l2_ ## MMX(halfH, src + 1, halfH, 16, \
1611 stride, 17); \
1612 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \
1613 16, 16); \
1614 OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16); \
1615 } \
1616 \
1617 static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src, \
1618 int stride) \
1619 { \
1620 uint64_t half[16 * 2 + 17 * 2]; \
1621 uint8_t * const halfH = ((uint8_t*)half) + 256; \
1622 uint8_t * const halfHV = ((uint8_t*)half); \
1623 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
1624 stride, 17); \
1625 put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, \
1626 stride, 17); \
1627 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \
1628 16, 16); \
1629 OPNAME ## pixels16_l2_ ## MMX(dst, halfH + 16, halfHV, stride, \
1630 16, 16); \
1631 } \
1632 \
1633 static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src, \
1634 int stride) \
1635 { \
1636 uint64_t half[16 * 2 + 17 * 2]; \
1637 uint8_t * const halfH = ((uint8_t*)half) + 256; \
1638 uint8_t * const halfHV = ((uint8_t*)half); \
1639 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
1640 stride, 17); \
1641 put ## RND ## pixels16_l2_ ## MMX(halfH, src + 1, halfH, 16, \
1642 stride, 17); \
1643 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \
1644 16, 16); \
1645 OPNAME ## pixels16_l2_ ## MMX(dst, halfH + 16, halfHV, stride, \
1646 16, 16); \
1647 } \
1648 \
1649 static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src, \
1650 int stride) \
1651 { \
1652 uint64_t half[16 * 2 + 17 * 2]; \
1653 uint8_t * const halfH = ((uint8_t*)half) + 256; \
1654 uint8_t * const halfHV = ((uint8_t*)half); \
1655 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
1656 stride, 17); \
1657 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \
1658 16, 16); \
1659 OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16); \
1660 } \
1661 \
1662 static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src, \
1663 int stride) \
1664 { \
1665 uint64_t half[16 * 2 + 17 * 2]; \
1666 uint8_t * const halfH = ((uint8_t*)half) + 256; \
1667 uint8_t * const halfHV = ((uint8_t*)half); \
1668 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
1669 stride, 17); \
1670 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \
1671 16, 16); \
1672 OPNAME ## pixels16_l2_ ## MMX(dst, halfH + 16, halfHV, stride, \
1673 16, 16); \
1674 } \
1675 \
1676 static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src, \
1677 int stride) \
1678 { \
1679 uint64_t half[17 * 2]; \
1680 uint8_t * const halfH = ((uint8_t*)half); \
1681 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
1682 stride, 17); \
1683 put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, \
1684 stride, 17); \
1685 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16); \
1686 } \
1687 \
1688 static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src, \
1689 int stride) \
1690 { \
1691 uint64_t half[17 * 2]; \
1692 uint8_t * const halfH = ((uint8_t*)half); \
1693 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
1694 stride, 17); \
1695 put ## RND ## pixels16_l2_ ## MMX(halfH, src + 1, halfH, 16, \
1696 stride, 17); \
1697 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16); \
1698 } \
1699 \
1700 static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src, \
1701 int stride) \
1702 { \
1703 uint64_t half[17 * 2]; \
1704 uint8_t * const halfH = ((uint8_t*)half); \
1705 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
1706 stride, 17); \
1707 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16); \
1708 }
1709
1710 #define PUT_OP(a, b, temp, size) \
1711 "mov"#size" "#a", "#b" \n\t"
1712
1713 #define AVG_3DNOW_OP(a, b, temp, size) \
1714 "mov"#size" "#b", "#temp" \n\t" \
1715 "pavgusb "#temp", "#a" \n\t" \
1716 "mov"#size" "#a", "#b" \n\t"
1717
1718 #define AVG_MMX2_OP(a, b, temp, size) \
1719 "mov"#size" "#b", "#temp" \n\t" \
1720 "pavgb "#temp", "#a" \n\t" \
1721 "mov"#size" "#a", "#b" \n\t"
1722
1723 QPEL_BASE(put_, ff_pw_16, _, PUT_OP, PUT_OP)
1724 QPEL_BASE(avg_, ff_pw_16, _, AVG_MMX2_OP, AVG_3DNOW_OP)
1725 QPEL_BASE(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, PUT_OP)
1726 QPEL_OP(put_, ff_pw_16, _, PUT_OP, 3dnow)
1727 QPEL_OP(avg_, ff_pw_16, _, AVG_3DNOW_OP, 3dnow)
1728 QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, 3dnow)
1729 QPEL_OP(put_, ff_pw_16, _, PUT_OP, mmx2)
1730 QPEL_OP(avg_, ff_pw_16, _, AVG_MMX2_OP, mmx2)
1731 QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, mmx2)
1732
1733 /***********************************/
1734 /* bilinear qpel: not compliant to any spec, only for -lavdopts fast */
1735
1736 #define QPEL_2TAP_XY(OPNAME, SIZE, MMX, XY, HPEL) \
1737 static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, \
1738 uint8_t *src, \
1739 int stride) \
1740 { \
1741 OPNAME ## pixels ## SIZE ## HPEL(dst, src, stride, SIZE); \
1742 }
1743
1744 #define QPEL_2TAP_L3(OPNAME, SIZE, MMX, XY, S0, S1, S2) \
1745 static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, \
1746 uint8_t *src, \
1747 int stride) \
1748 { \
1749 OPNAME ## 2tap_qpel ## SIZE ## _l3_ ## MMX(dst, src + S0, stride, SIZE, \
1750 S1, S2); \
1751 }
1752
1753 #define QPEL_2TAP(OPNAME, SIZE, MMX) \
1754 QPEL_2TAP_XY(OPNAME, SIZE, MMX, 20, _x2_ ## MMX) \
1755 QPEL_2TAP_XY(OPNAME, SIZE, MMX, 02, _y2_ ## MMX) \
1756 QPEL_2TAP_XY(OPNAME, SIZE, MMX, 22, _xy2_mmx) \
1757 static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc00_ ## MMX = \
1758 OPNAME ## qpel ## SIZE ## _mc00_ ## MMX; \
1759 static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc21_ ## MMX = \
1760 OPNAME ## 2tap_qpel ## SIZE ## _mc20_ ## MMX; \
1761 static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc12_ ## MMX = \
1762 OPNAME ## 2tap_qpel ## SIZE ## _mc02_ ## MMX; \
1763 static void OPNAME ## 2tap_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, \
1764 uint8_t *src, \
1765 int stride) \
1766 { \
1767 OPNAME ## pixels ## SIZE ## _y2_ ## MMX(dst, src + 1, stride, SIZE); \
1768 } \
1769 static void OPNAME ## 2tap_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, \
1770 uint8_t *src, \
1771 int stride) \
1772 { \
1773 OPNAME ## pixels ## SIZE ## _x2_ ## MMX(dst, src + stride, \
1774 stride, SIZE); \
1775 } \
1776 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 10, 0, 1, 0) \
1777 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 30, 1, -1, 0) \
1778 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 01, 0, stride, 0) \
1779 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 03, stride, -stride, 0) \
1780 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 11, 0, stride, 1) \
1781 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 31, 1, stride, -1) \
1782 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 13, stride, -stride, 1) \
1783 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 33, stride + 1, -stride, -1) \
1784
1785 QPEL_2TAP(put_, 16, mmx2)
1786 QPEL_2TAP(avg_, 16, mmx2)
1787 QPEL_2TAP(put_, 8, mmx2)
1788 QPEL_2TAP(avg_, 8, mmx2)
1789 QPEL_2TAP(put_, 16, 3dnow)
1790 QPEL_2TAP(avg_, 16, 3dnow)
1791 QPEL_2TAP(put_, 8, 3dnow)
1792 QPEL_2TAP(avg_, 8, 3dnow)
1793
1794 void ff_put_rv40_qpel8_mc33_mmx(uint8_t *dst, uint8_t *src, int stride)
1795 {
1796 put_pixels8_xy2_mmx(dst, src, stride, 8);
1797 }
1798 void ff_put_rv40_qpel16_mc33_mmx(uint8_t *dst, uint8_t *src, int stride)
1799 {
1800 put_pixels16_xy2_mmx(dst, src, stride, 16);
1801 }
1802 void ff_avg_rv40_qpel8_mc33_mmx(uint8_t *dst, uint8_t *src, int stride)
1803 {
1804 avg_pixels8_xy2_mmx(dst, src, stride, 8);
1805 }
1806 void ff_avg_rv40_qpel16_mc33_mmx(uint8_t *dst, uint8_t *src, int stride)
1807 {
1808 avg_pixels16_xy2_mmx(dst, src, stride, 16);
1809 }
1810
1811 #if HAVE_YASM
1812 typedef void emu_edge_core_func(uint8_t *buf, const uint8_t *src,
1813 x86_reg linesize, x86_reg start_y,
1814 x86_reg end_y, x86_reg block_h,
1815 x86_reg start_x, x86_reg end_x,
1816 x86_reg block_w);
1817 extern emu_edge_core_func ff_emu_edge_core_mmx;
1818 extern emu_edge_core_func ff_emu_edge_core_sse;
1819
1820 static av_always_inline void emulated_edge_mc(uint8_t *buf, const uint8_t *src,
1821 int linesize,
1822 int block_w, int block_h,
1823 int src_x, int src_y,
1824 int w, int h,
1825 emu_edge_core_func *core_fn)
1826 {
1827 int start_y, start_x, end_y, end_x, src_y_add = 0;
1828
1829 if (src_y >= h) {
1830 src_y_add = h - 1 - src_y;
1831 src_y = h - 1;
1832 } else if (src_y <= -block_h) {
1833 src_y_add = 1 - block_h - src_y;
1834 src_y = 1 - block_h;
1835 }
1836 if (src_x >= w) {
1837 src += w - 1 - src_x;
1838 src_x = w - 1;
1839 } else if (src_x <= -block_w) {
1840 src += 1 - block_w - src_x;
1841 src_x = 1 - block_w;
1842 }
1843
1844 start_y = FFMAX(0, -src_y);
1845 start_x = FFMAX(0, -src_x);
1846 end_y = FFMIN(block_h, h-src_y);
1847 end_x = FFMIN(block_w, w-src_x);
1848 assert(start_x < end_x && block_w > 0);
1849 assert(start_y < end_y && block_h > 0);
1850
1851 // fill in the to-be-copied part plus all above/below
1852 src += (src_y_add + start_y) * linesize + start_x;
1853 buf += start_x;
1854 core_fn(buf, src, linesize, start_y, end_y,
1855 block_h, start_x, end_x, block_w);
1856 }
1857
1858 #if ARCH_X86_32
1859 static av_noinline void emulated_edge_mc_mmx(uint8_t *buf, const uint8_t *src,
1860 int linesize,
1861 int block_w, int block_h,
1862 int src_x, int src_y, int w, int h)
1863 {
1864 emulated_edge_mc(buf, src, linesize, block_w, block_h, src_x, src_y,
1865 w, h, &ff_emu_edge_core_mmx);
1866 }
1867 #endif
1868
1869 static av_noinline void emulated_edge_mc_sse(uint8_t *buf, const uint8_t *src,
1870 int linesize,
1871 int block_w, int block_h,
1872 int src_x, int src_y, int w, int h)
1873 {
1874 emulated_edge_mc(buf, src, linesize, block_w, block_h, src_x, src_y,
1875 w, h, &ff_emu_edge_core_sse);
1876 }
1877 #endif /* HAVE_YASM */
1878
1879 typedef void emulated_edge_mc_func(uint8_t *dst, const uint8_t *src,
1880 int linesize, int block_w, int block_h,
1881 int src_x, int src_y, int w, int h);
1882
1883 static av_always_inline void gmc(uint8_t *dst, uint8_t *src,
1884 int stride, int h, int ox, int oy,
1885 int dxx, int dxy, int dyx, int dyy,
1886 int shift, int r, int width, int height,
1887 emulated_edge_mc_func *emu_edge_fn)
1888 {
1889 const int w = 8;
1890 const int ix = ox >> (16 + shift);
1891 const int iy = oy >> (16 + shift);
1892 const int oxs = ox >> 4;
1893 const int oys = oy >> 4;
1894 const int dxxs = dxx >> 4;
1895 const int dxys = dxy >> 4;
1896 const int dyxs = dyx >> 4;
1897 const int dyys = dyy >> 4;
1898 const uint16_t r4[4] = { r, r, r, r };
1899 const uint16_t dxy4[4] = { dxys, dxys, dxys, dxys };
1900 const uint16_t dyy4[4] = { dyys, dyys, dyys, dyys };
1901 const uint64_t shift2 = 2 * shift;
1902 uint8_t edge_buf[(h + 1) * stride];
1903 int x, y;
1904
1905 const int dxw = (dxx - (1 << (16 + shift))) * (w - 1);
1906 const int dyh = (dyy - (1 << (16 + shift))) * (h - 1);
1907 const int dxh = dxy * (h - 1);
1908 const int dyw = dyx * (w - 1);
1909 if ( // non-constant fullpel offset (3% of blocks)
1910 ((ox ^ (ox + dxw)) | (ox ^ (ox + dxh)) | (ox ^ (ox + dxw + dxh)) |
1911 (oy ^ (oy + dyw)) | (oy ^ (oy + dyh)) | (oy ^ (oy + dyw + dyh))) >> (16 + shift)
1912 // uses more than 16 bits of subpel mv (only at huge resolution)
1913 || (dxx | dxy | dyx | dyy) & 15) {
1914 // FIXME could still use mmx for some of the rows
1915 ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy,
1916 shift, r, width, height);
1917 return;
1918 }
1919
1920 src += ix + iy * stride;
1921 if ((unsigned)ix >= width - w ||
1922 (unsigned)iy >= height - h) {
1923 emu_edge_fn(edge_buf, src, stride, w + 1, h + 1, ix, iy, width, height);
1924 src = edge_buf;
1925 }
1926
1927 __asm__ volatile (
1928 "movd %0, %%mm6 \n\t"
1929 "pxor %%mm7, %%mm7 \n\t"
1930 "punpcklwd %%mm6, %%mm6 \n\t"
1931 "punpcklwd %%mm6, %%mm6 \n\t"
1932 :: "r"(1<<shift)
1933 );
1934
1935 for (x = 0; x < w; x += 4) {
1936 uint16_t dx4[4] = { oxs - dxys + dxxs * (x + 0),
1937 oxs - dxys + dxxs * (x + 1),
1938 oxs - dxys + dxxs * (x + 2),
1939 oxs - dxys + dxxs * (x + 3) };
1940 uint16_t dy4[4] = { oys - dyys + dyxs * (x + 0),
1941 oys - dyys + dyxs * (x + 1),
1942 oys - dyys + dyxs * (x + 2),
1943 oys - dyys + dyxs * (x + 3) };
1944
1945 for (y = 0; y < h; y++) {
1946 __asm__ volatile (
1947 "movq %0, %%mm4 \n\t"
1948 "movq %1, %%mm5 \n\t"
1949 "paddw %2, %%mm4 \n\t"
1950 "paddw %3, %%mm5 \n\t"
1951 "movq %%mm4, %0 \n\t"
1952 "movq %%mm5, %1 \n\t"
1953 "psrlw $12, %%mm4 \n\t"
1954 "psrlw $12, %%mm5 \n\t"
1955 : "+m"(*dx4), "+m"(*dy4)
1956 : "m"(*dxy4), "m"(*dyy4)
1957 );
1958
1959 __asm__ volatile (
1960 "movq %%mm6, %%mm2 \n\t"
1961 "movq %%mm6, %%mm1 \n\t"
1962 "psubw %%mm4, %%mm2 \n\t"
1963 "psubw %%mm5, %%mm1 \n\t"
1964 "movq %%mm2, %%mm0 \n\t"
1965 "movq %%mm4, %%mm3 \n\t"
1966 "pmullw %%mm1, %%mm0 \n\t" // (s - dx) * (s - dy)
1967 "pmullw %%mm5, %%mm3 \n\t" // dx * dy
1968 "pmullw %%mm5, %%mm2 \n\t" // (s - dx) * dy
1969 "pmullw %%mm4, %%mm1 \n\t" // dx * (s - dy)
1970
1971 "movd %4, %%mm5 \n\t"
1972 "movd %3, %%mm4 \n\t"
1973 "punpcklbw %%mm7, %%mm5 \n\t"
1974 "punpcklbw %%mm7, %%mm4 \n\t"
1975 "pmullw %%mm5, %%mm3 \n\t" // src[1, 1] * dx * dy
1976 "pmullw %%mm4, %%mm2 \n\t" // src[0, 1] * (s - dx) * dy
1977
1978 "movd %2, %%mm5 \n\t"
1979 "movd %1, %%mm4 \n\t"
1980 "punpcklbw %%mm7, %%mm5 \n\t"
1981 "punpcklbw %%mm7, %%mm4 \n\t"
1982 "pmullw %%mm5, %%mm1 \n\t" // src[1, 0] * dx * (s - dy)
1983 "pmullw %%mm4, %%mm0 \n\t" // src[0, 0] * (s - dx) * (s - dy)
1984 "paddw %5, %%mm1 \n\t"
1985 "paddw %%mm3, %%mm2 \n\t"
1986 "paddw %%mm1, %%mm0 \n\t"
1987 "paddw %%mm2, %%mm0 \n\t"
1988
1989 "psrlw %6, %%mm0 \n\t"
1990 "packuswb %%mm0, %%mm0 \n\t"
1991 "movd %%mm0, %0 \n\t"
1992
1993 : "=m"(dst[x + y * stride])
1994 : "m"(src[0]), "m"(src[1]),
1995 "m"(src[stride]), "m"(src[stride + 1]),
1996 "m"(*r4), "m"(shift2)
1997 );
1998 src += stride;
1999 }
2000 src += 4 - h * stride;
2001 }
2002 }
2003
2004 #if HAVE_YASM
2005 #if ARCH_X86_32
2006 static void gmc_mmx(uint8_t *dst, uint8_t *src,
2007 int stride, int h, int ox, int oy,
2008 int dxx, int dxy, int dyx, int dyy,
2009 int shift, int r, int width, int height)
2010 {
2011 gmc(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r,
2012 width, height, &emulated_edge_mc_mmx);
2013 }
2014 #endif
2015 static void gmc_sse(uint8_t *dst, uint8_t *src,
2016 int stride, int h, int ox, int oy,
2017 int dxx, int dxy, int dyx, int dyy,
2018 int shift, int r, int width, int height)
2019 {
2020 gmc(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r,
2021 width, height, &emulated_edge_mc_sse);
2022 }
2023 #else
2024 static void gmc_mmx(uint8_t *dst, uint8_t *src,
2025 int stride, int h, int ox, int oy,
2026 int dxx, int dxy, int dyx, int dyy,
2027 int shift, int r, int width, int height)
2028 {
2029 gmc(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r,
2030 width, height, &ff_emulated_edge_mc_8);
2031 }
2032 #endif
2033
2034 #define PREFETCH(name, op) \
2035 static void name(void *mem, int stride, int h) \
2036 { \
2037 const uint8_t *p = mem; \
2038 do { \
2039 __asm__ volatile (#op" %0" :: "m"(*p)); \
2040 p += stride; \
2041 } while (--h); \
2042 }
2043
2044 PREFETCH(prefetch_mmx2, prefetcht0)
2045 PREFETCH(prefetch_3dnow, prefetch)
2046 #undef PREFETCH
2047
2048 #include "h264_qpel_mmx.c"
2049
2050 void ff_put_h264_chroma_mc8_mmx_rnd (uint8_t *dst, uint8_t *src,
2051 int stride, int h, int x, int y);
2052 void ff_avg_h264_chroma_mc8_mmx2_rnd (uint8_t *dst, uint8_t *src,
2053 int stride, int h, int x, int y);
2054 void ff_avg_h264_chroma_mc8_3dnow_rnd(uint8_t *dst, uint8_t *src,
2055 int stride, int h, int x, int y);
2056
2057 void ff_put_h264_chroma_mc4_mmx (uint8_t *dst, uint8_t *src,
2058 int stride, int h, int x, int y);
2059 void ff_avg_h264_chroma_mc4_mmx2 (uint8_t *dst, uint8_t *src,
2060 int stride, int h, int x, int y);
2061 void ff_avg_h264_chroma_mc4_3dnow (uint8_t *dst, uint8_t *src,
2062 int stride, int h, int x, int y);
2063
2064 void ff_put_h264_chroma_mc2_mmx2 (uint8_t *dst, uint8_t *src,
2065 int stride, int h, int x, int y);
2066 void ff_avg_h264_chroma_mc2_mmx2 (uint8_t *dst, uint8_t *src,
2067 int stride, int h, int x, int y);
2068
2069 void ff_put_h264_chroma_mc8_ssse3_rnd(uint8_t *dst, uint8_t *src,
2070 int stride, int h, int x, int y);
2071 void ff_put_h264_chroma_mc4_ssse3 (uint8_t *dst, uint8_t *src,
2072 int stride, int h, int x, int y);
2073
2074 void ff_avg_h264_chroma_mc8_ssse3_rnd(uint8_t *dst, uint8_t *src,
2075 int stride, int h, int x, int y);
2076 void ff_avg_h264_chroma_mc4_ssse3 (uint8_t *dst, uint8_t *src,
2077 int stride, int h, int x, int y);
2078
2079 #define CHROMA_MC(OP, NUM, DEPTH, OPT) \
2080 void ff_ ## OP ## _h264_chroma_mc ## NUM ## _ ## DEPTH ## _ ## OPT \
2081 (uint8_t *dst, uint8_t *src, \
2082 int stride, int h, int x, int y);
2083
2084 CHROMA_MC(put, 2, 10, mmxext)
2085 CHROMA_MC(avg, 2, 10, mmxext)
2086 CHROMA_MC(put, 4, 10, mmxext)
2087 CHROMA_MC(avg, 4, 10, mmxext)
2088 CHROMA_MC(put, 8, 10, sse2)
2089 CHROMA_MC(avg, 8, 10, sse2)
2090 CHROMA_MC(put, 8, 10, avx)
2091 CHROMA_MC(avg, 8, 10, avx)
2092
2093 /* CAVS-specific */
2094 void ff_put_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride)
2095 {
2096 put_pixels8_mmx(dst, src, stride, 8);
2097 }
2098
2099 void ff_avg_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride)
2100 {
2101 avg_pixels8_mmx(dst, src, stride, 8);
2102 }
2103
2104 void ff_put_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride)
2105 {
2106 put_pixels16_mmx(dst, src, stride, 16);
2107 }
2108
2109 void ff_avg_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride)
2110 {
2111 avg_pixels16_mmx(dst, src, stride, 16);
2112 }
2113
2114 /* VC-1-specific */
2115 void ff_put_vc1_mspel_mc00_mmx(uint8_t *dst, const uint8_t *src,
2116 int stride, int rnd)
2117 {
2118 put_pixels8_mmx(dst, src, stride, 8);
2119 }
2120
2121 void ff_avg_vc1_mspel_mc00_mmx2(uint8_t *dst, const uint8_t *src,
2122 int stride, int rnd)
2123 {
2124 avg_pixels8_mmx2(dst, src, stride, 8);
2125 }
2126
2127 /* XXX: Those functions should be suppressed ASAP when all IDCTs are
2128 * converted. */
2129 #if CONFIG_GPL
2130 static void ff_libmpeg2mmx_idct_put(uint8_t *dest, int line_size,
2131 DCTELEM *block)
2132 {
2133 ff_mmx_idct(block);
2134 ff_put_pixels_clamped_mmx(block, dest, line_size);
2135 }
2136
2137 static void ff_libmpeg2mmx_idct_add(uint8_t *dest, int line_size,
2138 DCTELEM *block)
2139 {
2140 ff_mmx_idct(block);
2141 ff_add_pixels_clamped_mmx(block, dest, line_size);
2142 }
2143
2144 static void ff_libmpeg2mmx2_idct_put(uint8_t *dest, int line_size,
2145 DCTELEM *block)
2146 {
2147 ff_mmxext_idct(block);
2148 ff_put_pixels_clamped_mmx(block, dest, line_size);
2149 }
2150
2151 static void ff_libmpeg2mmx2_idct_add(uint8_t *dest, int line_size,
2152 DCTELEM *block)
2153 {
2154 ff_mmxext_idct(block);
2155 ff_add_pixels_clamped_mmx(block, dest, line_size);
2156 }
2157 #endif
2158
2159 static void ff_idct_xvid_mmx_put(uint8_t *dest, int line_size, DCTELEM *block)
2160 {
2161 ff_idct_xvid_mmx(block);
2162 ff_put_pixels_clamped_mmx(block, dest, line_size);
2163 }
2164
2165 static void ff_idct_xvid_mmx_add(uint8_t *dest, int line_size, DCTELEM *block)
2166 {
2167 ff_idct_xvid_mmx(block);
2168 ff_add_pixels_clamped_mmx(block, dest, line_size);
2169 }
2170
2171 static void ff_idct_xvid_mmx2_put(uint8_t *dest, int line_size, DCTELEM *block)
2172 {
2173 ff_idct_xvid_mmx2(block);
2174 ff_put_pixels_clamped_mmx(block, dest, line_size);
2175 }
2176
2177 static void ff_idct_xvid_mmx2_add(uint8_t *dest, int line_size, DCTELEM *block)
2178 {
2179 ff_idct_xvid_mmx2(block);
2180 ff_add_pixels_clamped_mmx(block, dest, line_size);
2181 }
2182
2183 static void vorbis_inverse_coupling_3dnow(float *mag, float *ang, int blocksize)
2184 {
2185 int i;
2186 __asm__ volatile ("pxor %%mm7, %%mm7":);
2187 for (i = 0; i < blocksize; i += 2) {
2188 __asm__ volatile (
2189 "movq %0, %%mm0 \n\t"
2190 "movq %1, %%mm1 \n\t"
2191 "movq %%mm0, %%mm2 \n\t"
2192 "movq %%mm1, %%mm3 \n\t"
2193 "pfcmpge %%mm7, %%mm2 \n\t" // m <= 0.0
2194 "pfcmpge %%mm7, %%mm3 \n\t" // a <= 0.0
2195 "pslld $31, %%mm2 \n\t" // keep only the sign bit
2196 "pxor %%mm2, %%mm1 \n\t"
2197 "movq %%mm3, %%mm4 \n\t"
2198 "pand %%mm1, %%mm3 \n\t"
2199 "pandn %%mm1, %%mm4 \n\t"
2200 "pfadd %%mm0, %%mm3 \n\t" // a = m + ((a < 0) & (a ^ sign(m)))
2201 "pfsub %%mm4, %%mm0 \n\t" // m = m + ((a > 0) & (a ^ sign(m)))
2202 "movq %%mm3, %1 \n\t"
2203 "movq %%mm0, %0 \n\t"
2204 : "+m"(mag[i]), "+m"(ang[i])
2205 :: "memory"
2206 );
2207 }
2208 __asm__ volatile ("femms");
2209 }
2210
2211 static void vorbis_inverse_coupling_sse(float *mag, float *ang, int blocksize)
2212 {
2213 int i;
2214
2215 __asm__ volatile (
2216 "movaps %0, %%xmm5 \n\t"
2217 :: "m"(ff_pdw_80000000[0])
2218 );
2219 for (i = 0; i < blocksize; i += 4) {
2220 __asm__ volatile (
2221 "movaps %0, %%xmm0 \n\t"
2222 "movaps %1, %%xmm1 \n\t"
2223 "xorps %%xmm2, %%xmm2 \n\t"
2224 "xorps %%xmm3, %%xmm3 \n\t"
2225 "cmpleps %%xmm0, %%xmm2 \n\t" // m <= 0.0
2226 "cmpleps %%xmm1, %%xmm3 \n\t" // a <= 0.0
2227 "andps %%xmm5, %%xmm2 \n\t" // keep only the sign bit
2228 "xorps %%xmm2, %%xmm1 \n\t"
2229 "movaps %%xmm3, %%xmm4 \n\t"
2230 "andps %%xmm1, %%xmm3 \n\t"
2231 "andnps %%xmm1, %%xmm4 \n\t"
2232 "addps %%xmm0, %%xmm3 \n\t" // a = m + ((a < 0) & (a ^ sign(m)))
2233 "subps %%xmm4, %%xmm0 \n\t" // m = m + ((a > 0) & (a ^ sign(m)))
2234 "movaps %%xmm3, %1 \n\t"
2235 "movaps %%xmm0, %0 \n\t"
2236 : "+m"(mag[i]), "+m"(ang[i])
2237 :: "memory"
2238 );
2239 }
2240 }
2241
2242 #define IF1(x) x
2243 #define IF0(x)
2244
2245 #define MIX5(mono, stereo) \
2246 __asm__ volatile ( \
2247 "movss 0(%2), %%xmm5 \n" \
2248 "movss 8(%2), %%xmm6 \n" \
2249 "movss 24(%2), %%xmm7 \n" \
2250 "shufps $0, %%xmm5, %%xmm5 \n" \
2251 "shufps $0, %%xmm6, %%xmm6 \n" \
2252 "shufps $0, %%xmm7, %%xmm7 \n" \
2253 "1: \n" \
2254 "movaps (%0, %1), %%xmm0 \n" \
2255 "movaps 0x400(%0, %1), %%xmm1 \n" \
2256 "movaps 0x800(%0, %1), %%xmm2 \n" \
2257 "movaps 0xc00(%0, %1), %%xmm3 \n" \
2258 "movaps 0x1000(%0, %1), %%xmm4 \n" \
2259 "mulps %%xmm5, %%xmm0 \n" \
2260 "mulps %%xmm6, %%xmm1 \n" \
2261 "mulps %%xmm5, %%xmm2 \n" \
2262 "mulps %%xmm7, %%xmm3 \n" \
2263 "mulps %%xmm7, %%xmm4 \n" \
2264 stereo("addps %%xmm1, %%xmm0 \n") \
2265 "addps %%xmm1, %%xmm2 \n" \
2266 "addps %%xmm3, %%xmm0 \n" \
2267 "addps %%xmm4, %%xmm2 \n" \
2268 mono("addps %%xmm2, %%xmm0 \n") \
2269 "movaps %%xmm0, (%0, %1) \n" \
2270 stereo("movaps %%xmm2, 0x400(%0, %1) \n") \
2271 "add $16, %0 \n" \
2272 "jl 1b \n" \
2273 : "+&r"(i) \
2274 : "r"(samples[0] + len), "r"(matrix) \
2275 : XMM_CLOBBERS("%xmm0", "%xmm1", "%xmm2", "%xmm3", \
2276 "%xmm4", "%xmm5", "%xmm6", "%xmm7",) \
2277 "memory" \
2278 );
2279
2280 #define MIX_MISC(stereo) \
2281 __asm__ volatile ( \
2282 "1: \n" \
2283 "movaps (%3, %0), %%xmm0 \n" \
2284 stereo("movaps %%xmm0, %%xmm1 \n") \
2285 "mulps %%xmm4, %%xmm0 \n" \
2286 stereo("mulps %%xmm5, %%xmm1 \n") \
2287 "lea 1024(%3, %0), %1 \n" \
2288 "mov %5, %2 \n" \
2289 "2: \n" \
2290 "movaps (%1), %%xmm2 \n" \
2291 stereo("movaps %%xmm2, %%xmm3 \n") \
2292 "mulps (%4, %2), %%xmm2 \n" \
2293 stereo("mulps 16(%4, %2), %%xmm3 \n") \
2294 "addps %%xmm2, %%xmm0 \n" \
2295 stereo("addps %%xmm3, %%xmm1 \n") \
2296 "add $1024, %1 \n" \
2297 "add $32, %2 \n" \
2298 "jl 2b \n" \
2299 "movaps %%xmm0, (%3, %0) \n" \
2300 stereo("movaps %%xmm1, 1024(%3, %0) \n") \
2301 "add $16, %0 \n" \
2302 "jl 1b \n" \
2303 : "+&r"(i), "=&r"(j), "=&r"(k) \
2304 : "r"(samples[0] + len), "r"(matrix_simd + in_ch), \
2305 "g"((intptr_t) - 32 * (in_ch - 1)) \
2306 : "memory" \
2307 );
2308
2309 static void ac3_downmix_sse(float (*samples)[256], float (*matrix)[2],
2310 int out_ch, int in_ch, int len)
2311 {
2312 int (*matrix_cmp)[2] = (int(*)[2])matrix;
2313 intptr_t i, j, k;
2314
2315 i = -len * sizeof(float);
2316 if (in_ch == 5 && out_ch == 2 &&
2317 !(matrix_cmp[0][1] | matrix_cmp[2][0] |
2318 matrix_cmp[3][1] | matrix_cmp[4][0] |
2319 (matrix_cmp[1][0] ^ matrix_cmp[1][1]) |
2320 (matrix_cmp[0][0] ^ matrix_cmp[2][1]))) {
2321 MIX5(IF0, IF1);
2322 } else if (in_ch == 5 && out_ch == 1 &&
2323 matrix_cmp[0][0] == matrix_cmp[2][0] &&
2324 matrix_cmp[3][0] == matrix_cmp[4][0]) {
2325 MIX5(IF1, IF0);
2326 } else {
2327 DECLARE_ALIGNED(16, float, matrix_simd)[AC3_MAX_CHANNELS][2][4];
2328 j = 2 * in_ch * sizeof(float);
2329 __asm__ volatile (
2330 "1: \n"
2331 "sub $8, %0 \n"
2332 "movss (%2, %0), %%xmm4 \n"
2333 "movss 4(%2, %0), %%xmm5 \n"
2334 "shufps $0, %%xmm4, %%xmm4 \n"
2335 "shufps $0, %%xmm5, %%xmm5 \n"
2336 "movaps %%xmm4, (%1, %0, 4) \n"
2337 "movaps %%xmm5, 16(%1, %0, 4) \n"
2338 "jg 1b \n"
2339 : "+&r"(j)
2340 : "r"(matrix_simd), "r"(matrix)
2341 : "memory"
2342 );
2343 if (out_ch == 2) {
2344 MIX_MISC(IF1);
2345 } else {
2346 MIX_MISC(IF0);
2347 }
2348 }
2349 }
2350
2351 #if HAVE_6REGS
2352 static void vector_fmul_window_3dnow2(float *dst, const float *src0,
2353 const float *src1, const float *win,
2354 int len)
2355 {
2356 x86_reg i = -len * 4;
2357 x86_reg j = len * 4 - 8;
2358 __asm__ volatile (
2359 "1: \n"
2360 "pswapd (%5, %1), %%mm1 \n"
2361 "movq (%5, %0), %%mm0 \n"
2362 "pswapd (%4, %1), %%mm5 \n"
2363 "movq (%3, %0), %%mm4 \n"
2364 "movq %%mm0, %%mm2 \n"
2365 "movq %%mm1, %%mm3 \n"
2366 "pfmul %%mm4, %%mm2 \n" // src0[len + i] * win[len + i]
2367 "pfmul %%mm5, %%mm3 \n" // src1[j] * win[len + j]
2368 "pfmul %%mm4, %%mm1 \n" // src0[len + i] * win[len + j]
2369 "pfmul %%mm5, %%mm0 \n" // src1[j] * win[len + i]
2370 "pfadd %%mm3, %%mm2 \n"
2371 "pfsub %%mm0, %%mm1 \n"
2372 "pswapd %%mm2, %%mm2 \n"
2373 "movq %%mm1, (%2, %0) \n"
2374 "movq %%mm2, (%2, %1) \n"
2375 "sub $8, %1 \n"
2376 "add $8, %0 \n"
2377 "jl 1b \n"
2378 "femms \n"
2379 : "+r"(i), "+r"(j)
2380 : "r"(dst + len), "r"(src0 + len), "r"(src1), "r"(win + len)
2381 );
2382 }
2383
2384 static void vector_fmul_window_sse(float *dst, const float *src0,
2385 const float *src1, const float *win, int len)
2386 {
2387 x86_reg i = -len * 4;
2388 x86_reg j = len * 4 - 16;
2389 __asm__ volatile (
2390 "1: \n"
2391 "movaps (%5, %1), %%xmm1 \n"
2392 "movaps (%5, %0), %%xmm0 \n"
2393 "movaps (%4, %1), %%xmm5 \n"
2394 "movaps (%3, %0), %%xmm4 \n"
2395 "shufps $0x1b, %%xmm1, %%xmm1 \n"
2396 "shufps $0x1b, %%xmm5, %%xmm5 \n"
2397 "movaps %%xmm0, %%xmm2 \n"
2398 "movaps %%xmm1, %%xmm3 \n"
2399 "mulps %%xmm4, %%xmm2 \n" // src0[len + i] * win[len + i]
2400 "mulps %%xmm5, %%xmm3 \n" // src1[j] * win[len + j]
2401 "mulps %%xmm4, %%xmm1 \n" // src0[len + i] * win[len + j]
2402 "mulps %%xmm5, %%xmm0 \n" // src1[j] * win[len + i]
2403 "addps %%xmm3, %%xmm2 \n"
2404 "subps %%xmm0, %%xmm1 \n"
2405 "shufps $0x1b, %%xmm2, %%xmm2 \n"
2406 "movaps %%xmm1, (%2, %0) \n"
2407 "movaps %%xmm2, (%2, %1) \n"
2408 "sub $16, %1 \n"
2409 "add $16, %0 \n"
2410 "jl 1b \n"
2411 : "+r"(i), "+r"(j)
2412 : "r"(dst + len), "r"(src0 + len), "r"(src1), "r"(win + len)
2413 );
2414 }
2415 #endif /* HAVE_6REGS */
2416
2417 static void vector_clipf_sse(float *dst, const float *src,
2418 float min, float max, int len)
2419 {
2420 x86_reg i = (len - 16) * 4;
2421 __asm__ volatile (
2422 "movss %3, %%xmm4 \n\t"
2423 "movss %4, %%xmm5 \n\t"
2424 "shufps $0, %%xmm4, %%xmm4 \n\t"
2425 "shufps $0, %%xmm5, %%xmm5 \n\t"
2426 "1: \n\t"
2427 "movaps (%2, %0), %%xmm0 \n\t" // 3/1 on intel
2428 "movaps 16(%2, %0), %%xmm1 \n\t"
2429 "movaps 32(%2, %0), %%xmm2 \n\t"
2430 "movaps 48(%2, %0), %%xmm3 \n\t"
2431 "maxps %%xmm4, %%xmm0 \n\t"
2432 "maxps %%xmm4, %%xmm1 \n\t"
2433 "maxps %%xmm4, %%xmm2 \n\t"
2434 "maxps %%xmm4, %%xmm3 \n\t"
2435 "minps %%xmm5, %%xmm0 \n\t"
2436 "minps %%xmm5, %%xmm1 \n\t"
2437 "minps %%xmm5, %%xmm2 \n\t"
2438 "minps %%xmm5, %%xmm3 \n\t"
2439 "movaps %%xmm0, (%1, %0) \n\t"
2440 "movaps %%xmm1, 16(%1, %0) \n\t"
2441 "movaps %%xmm2, 32(%1, %0) \n\t"
2442 "movaps %%xmm3, 48(%1, %0) \n\t"
2443 "sub $64, %0 \n\t"
2444 "jge 1b \n\t"
2445 : "+&r"(i)
2446 : "r"(dst), "r"(src), "m"(min), "m"(max)
2447 : "memory"
2448 );
2449 }
2450
2451 void ff_vp3_idct_mmx(int16_t *input_data);
2452 void ff_vp3_idct_put_mmx(uint8_t *dest, int line_size, DCTELEM *block);
2453 void ff_vp3_idct_add_mmx(uint8_t *dest, int line_size, DCTELEM *block);
2454
2455 void ff_vp3_idct_dc_add_mmx2(uint8_t *dest, int line_size,
2456 const DCTELEM *block);
2457
2458 void ff_vp3_v_loop_filter_mmx2(uint8_t *src, int stride, int *bounding_values);
2459 void ff_vp3_h_loop_filter_mmx2(uint8_t *src, int stride, int *bounding_values);
2460
2461 void ff_vp3_idct_sse2(int16_t *input_data);
2462 void ff_vp3_idct_put_sse2(uint8_t *dest, int line_size, DCTELEM *block);
2463 void ff_vp3_idct_add_sse2(uint8_t *dest, int line_size, DCTELEM *block);
2464
2465 int32_t ff_scalarproduct_int16_mmx2(const int16_t *v1, const int16_t *v2,
2466 int order);
2467 int32_t ff_scalarproduct_int16_sse2(const int16_t *v1, const int16_t *v2,
2468 int order);
2469 int32_t ff_scalarproduct_and_madd_int16_mmx2(int16_t *v1, const int16_t *v2,
2470 const int16_t *v3,
2471 int order, int mul);
2472 int32_t ff_scalarproduct_and_madd_int16_sse2(int16_t *v1, const int16_t *v2,
2473 const int16_t *v3,
2474 int order, int mul);
2475 int32_t ff_scalarproduct_and_madd_int16_ssse3(int16_t *v1, const int16_t *v2,
2476 const int16_t *v3,
2477 int order, int mul);
2478
2479 void ff_apply_window_int16_mmxext (int16_t *output, const int16_t *input,
2480 const int16_t *window, unsigned int len);
2481 void ff_apply_window_int16_mmxext_ba (int16_t *output, const int16_t *input,
2482 const int16_t *window, unsigned int len);
2483 void ff_apply_window_int16_sse2 (int16_t *output, const int16_t *input,
2484 const int16_t *window, unsigned int len);
2485 void ff_apply_window_int16_sse2_ba (int16_t *output, const int16_t *input,
2486 const int16_t *window, unsigned int len);
2487 void ff_apply_window_int16_ssse3 (int16_t *output, const int16_t *input,
2488 const int16_t *window, unsigned int len);
2489 void ff_apply_window_int16_ssse3_atom(int16_t *output, const int16_t *input,
2490 const int16_t *window, unsigned int len);
2491
2492 void ff_bswap32_buf_ssse3(uint32_t *dst, const uint32_t *src, int w);
2493 void ff_bswap32_buf_sse2(uint32_t *dst, const uint32_t *src, int w);
2494
2495 void ff_add_hfyu_median_prediction_mmx2(uint8_t *dst, const uint8_t *top,
2496 const uint8_t *diff, int w,
2497 int *left, int *left_top);
2498 int ff_add_hfyu_left_prediction_ssse3(uint8_t *dst, const uint8_t *src,
2499 int w, int left);
2500 int ff_add_hfyu_left_prediction_sse4(uint8_t *dst, const uint8_t *src,
2501 int w, int left);
2502
2503 float ff_scalarproduct_float_sse(const float *v1, const float *v2, int order);
2504
2505 void ff_vector_fmul_reverse_sse(float *dst, const float *src0,
2506 const float *src1, int len);
2507 void ff_vector_fmul_reverse_avx(float *dst, const float *src0,
2508 const float *src1, int len);
2509
2510 void ff_vector_fmul_add_sse(float *dst, const float *src0, const float *src1,
2511 const float *src2, int len);
2512 void ff_vector_fmul_add_avx(float *dst, const float *src0, const float *src1,
2513 const float *src2, int len);
2514
2515 void ff_vector_clip_int32_mmx (int32_t *dst, const int32_t *src,
2516 int32_t min, int32_t max, unsigned int len);
2517 void ff_vector_clip_int32_sse2 (int32_t *dst, const int32_t *src,
2518 int32_t min, int32_t max, unsigned int len);
2519 void ff_vector_clip_int32_int_sse2(int32_t *dst, const int32_t *src,
2520 int32_t min, int32_t max, unsigned int len);
2521 void ff_vector_clip_int32_sse4 (int32_t *dst, const int32_t *src,
2522 int32_t min, int32_t max, unsigned int len);
2523
2524 extern void ff_butterflies_float_interleave_sse(float *dst, const float *src0,
2525 const float *src1, int len);
2526 extern void ff_butterflies_float_interleave_avx(float *dst, const float *src0,
2527 const float *src1, int len);
2528
2529 #define SET_QPEL_FUNCS(PFX, IDX, SIZE, CPU, PREFIX) \
2530 do { \
2531 c->PFX ## _pixels_tab[IDX][ 0] = PREFIX ## PFX ## SIZE ## _mc00_ ## CPU; \
2532 c->PFX ## _pixels_tab[IDX][ 1] = PREFIX ## PFX ## SIZE ## _mc10_ ## CPU; \
2533 c->PFX ## _pixels_tab[IDX][ 2] = PREFIX ## PFX ## SIZE ## _mc20_ ## CPU; \
2534 c->PFX ## _pixels_tab[IDX][ 3] = PREFIX ## PFX ## SIZE ## _mc30_ ## CPU; \
2535 c->PFX ## _pixels_tab[IDX][ 4] = PREFIX ## PFX ## SIZE ## _mc01_ ## CPU; \
2536 c->PFX ## _pixels_tab[IDX][ 5] = PREFIX ## PFX ## SIZE ## _mc11_ ## CPU; \
2537 c->PFX ## _pixels_tab[IDX][ 6] = PREFIX ## PFX ## SIZE ## _mc21_ ## CPU; \
2538 c->PFX ## _pixels_tab[IDX][ 7] = PREFIX ## PFX ## SIZE ## _mc31_ ## CPU; \
2539 c->PFX ## _pixels_tab[IDX][ 8] = PREFIX ## PFX ## SIZE ## _mc02_ ## CPU; \
2540 c->PFX ## _pixels_tab[IDX][ 9] = PREFIX ## PFX ## SIZE ## _mc12_ ## CPU; \
2541 c->PFX ## _pixels_tab[IDX][10] = PREFIX ## PFX ## SIZE ## _mc22_ ## CPU; \
2542 c->PFX ## _pixels_tab[IDX][11] = PREFIX ## PFX ## SIZE ## _mc32_ ## CPU; \
2543 c->PFX ## _pixels_tab[IDX][12] = PREFIX ## PFX ## SIZE ## _mc03_ ## CPU; \
2544 c->PFX ## _pixels_tab[IDX][13] = PREFIX ## PFX ## SIZE ## _mc13_ ## CPU; \
2545 c->PFX ## _pixels_tab[IDX][14] = PREFIX ## PFX ## SIZE ## _mc23_ ## CPU; \
2546 c->PFX ## _pixels_tab[IDX][15] = PREFIX ## PFX ## SIZE ## _mc33_ ## CPU; \
2547 } while (0)
2548
2549 #define SET_HPEL_FUNCS(PFX, IDX, SIZE, CPU) \
2550 do { \
2551 c->PFX ## _pixels_tab[IDX][0] = PFX ## _pixels ## SIZE ## _ ## CPU; \
2552 c->PFX ## _pixels_tab[IDX][1] = PFX ## _pixels ## SIZE ## _x2_ ## CPU; \
2553 c->PFX ## _pixels_tab[IDX][2] = PFX ## _pixels ## SIZE ## _y2_ ## CPU; \
2554 c->PFX ## _pixels_tab[IDX][3] = PFX ## _pixels ## SIZE ## _xy2_ ## CPU; \
2555 } while (0)
2556
2557 #define H264_QPEL_FUNCS(x, y, CPU) \
2558 do { \
2559 c->put_h264_qpel_pixels_tab[0][x + y * 4] = put_h264_qpel16_mc ## x ## y ## _ ## CPU; \
2560 c->put_h264_qpel_pixels_tab[1][x + y * 4] = put_h264_qpel8_mc ## x ## y ## _ ## CPU; \
2561 c->avg_h264_qpel_pixels_tab[0][x + y * 4] = avg_h264_qpel16_mc ## x ## y ## _ ## CPU; \
2562 c->avg_h264_qpel_pixels_tab[1][x + y * 4] = avg_h264_qpel8_mc ## x ## y ## _ ## CPU; \
2563 } while (0)
2564
2565 #define H264_QPEL_FUNCS_10(x, y, CPU) \
2566 do { \
2567 c->put_h264_qpel_pixels_tab[0][x + y * 4] = ff_put_h264_qpel16_mc ## x ## y ## _10_ ## CPU; \
2568 c->put_h264_qpel_pixels_tab[1][x + y * 4] = ff_put_h264_qpel8_mc ## x ## y ## _10_ ## CPU; \
2569 c->avg_h264_qpel_pixels_tab[0][x + y * 4] = ff_avg_h264_qpel16_mc ## x ## y ## _10_ ## CPU; \
2570 c->avg_h264_qpel_pixels_tab[1][x + y * 4] = ff_avg_h264_qpel8_mc ## x ## y ## _10_ ## CPU; \
2571 } while (0)
2572
2573 static void dsputil_init_mmx(DSPContext *c, AVCodecContext *avctx, int mm_flags)
2574 {
2575 const int high_bit_depth = avctx->bits_per_raw_sample > 8;
2576
2577 c->put_pixels_clamped = ff_put_pixels_clamped_mmx;
2578 c->put_signed_pixels_clamped = ff_put_signed_pixels_clamped_mmx;
2579 c->add_pixels_clamped = ff_add_pixels_clamped_mmx;
2580
2581 if (!high_bit_depth) {
2582 c->clear_block = clear_block_mmx;
2583 c->clear_blocks = clear_blocks_mmx;
2584 c->draw_edges = draw_edges_mmx;
2585
2586 SET_HPEL_FUNCS(put, 0, 16, mmx);
2587 SET_HPEL_FUNCS(put_no_rnd, 0, 16, mmx);
2588 SET_HPEL_FUNCS(avg, 0, 16, mmx);
2589 SET_HPEL_FUNCS(avg_no_rnd, 0, 16, mmx);
2590 SET_HPEL_FUNCS(put, 1, 8, mmx);
2591 SET_HPEL_FUNCS(put_no_rnd, 1, 8, mmx);
2592 SET_HPEL_FUNCS(avg, 1, 8, mmx);
2593 SET_HPEL_FUNCS(avg_no_rnd, 1, 8, mmx);
2594 }
2595
2596 #if ARCH_X86_32 || !HAVE_YASM
2597 c->gmc = gmc_mmx;
2598 #endif
2599 #if ARCH_X86_32 && HAVE_YASM
2600 if (!high_bit_depth)
2601 c->emulated_edge_mc = emulated_edge_mc_mmx;
2602 #endif
2603
2604 c->add_bytes = add_bytes_mmx;
2605
2606 if (CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
2607 c->h263_v_loop_filter = h263_v_loop_filter_mmx;
2608 c->h263_h_loop_filter = h263_h_loop_filter_mmx;
2609 }
2610
2611 #if HAVE_YASM
2612 if (!high_bit_depth && CONFIG_H264CHROMA) {
2613 c->put_h264_chroma_pixels_tab[0] = ff_put_h264_chroma_mc8_mmx_rnd;
2614 c->put_h264_chroma_pixels_tab[1] = ff_put_h264_chroma_mc4_mmx;
2615 }
2616
2617 c->vector_clip_int32 = ff_vector_clip_int32_mmx;
2618 #endif
2619
2620 }
2621
2622 static void dsputil_init_mmx2(DSPContext *c, AVCodecContext *avctx,
2623 int mm_flags)
2624 {
2625 const int bit_depth = avctx->bits_per_raw_sample;
2626 const int high_bit_depth = bit_depth > 8;
2627
2628 c->prefetch = prefetch_mmx2;
2629
2630 if (!high_bit_depth) {
2631 c->put_pixels_tab[0][1] = put_pixels16_x2_mmx2;
2632 c->put_pixels_tab[0][2] = put_pixels16_y2_mmx2;
2633
2634 c->avg_pixels_tab[0][0] = avg_pixels16_mmx2;
2635 c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx2;
2636 c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx2;
2637
2638 c->put_pixels_tab[1][1] = put_pixels8_x2_mmx2;
2639 c->put_pixels_tab[1][2] = put_pixels8_y2_mmx2;
2640
2641 c->avg_pixels_tab[1][0] = avg_pixels8_mmx2;
2642 c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx2;
2643 c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx2;
2644 }
2645
2646 if (!(avctx->flags & CODEC_FLAG_BITEXACT)) {
2647 if (!high_bit_depth) {
2648 c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx2;
2649 c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx2;
2650 c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx2;
2651 c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx2;
2652
2653 c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx2;
2654 c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx2;
2655 }
2656
2657 if (CONFIG_VP3_DECODER && HAVE_YASM) {
2658 c->vp3_v_loop_filter = ff_vp3_v_loop_filter_mmx2;
2659 c->vp3_h_loop_filter = ff_vp3_h_loop_filter_mmx2;
2660 }
2661 }
2662 if (CONFIG_VP3_DECODER && HAVE_YASM)
2663 c->vp3_idct_dc_add = ff_vp3_idct_dc_add_mmx2;
2664
2665 if (CONFIG_VP3_DECODER && (avctx->codec_id == CODEC_ID_VP3 ||
2666 avctx->codec_id == CODEC_ID_THEORA)) {
2667 c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_exact_mmx2;
2668 c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_exact_mmx2;
2669 }
2670
2671 if (CONFIG_H264QPEL) {
2672 SET_QPEL_FUNCS(put_qpel, 0, 16, mmx2, );
2673 SET_QPEL_FUNCS(put_qpel, 1, 8, mmx2, );
2674 SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, mmx2, );
2675 SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, mmx2, );
2676 SET_QPEL_FUNCS(avg_qpel, 0, 16, mmx2, );
2677 SET_QPEL_FUNCS(avg_qpel, 1, 8, mmx2, );
2678
2679 if (!high_bit_depth) {
2680 SET_QPEL_FUNCS(put_h264_qpel, 0, 16, mmx2, );
2681 SET_QPEL_FUNCS(put_h264_qpel, 1, 8, mmx2, );
2682 SET_QPEL_FUNCS(put_h264_qpel, 2, 4, mmx2, );
2683 SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, mmx2, );
2684 SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, mmx2, );
2685 SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, mmx2, );
2686 } else if (bit_depth == 10) {
2687 #if HAVE_YASM
2688 #if !ARCH_X86_64
2689 SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 10_mmxext, ff_);
2690 SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 10_mmxext, ff_);
2691 SET_QPEL_FUNCS(put_h264_qpel, 1, 8, 10_mmxext, ff_);
2692 SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, 10_mmxext, ff_);
2693 #endif
2694 SET_QPEL_FUNCS(put_h264_qpel, 2, 4, 10_mmxext, ff_);
2695 SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, 10_mmxext, ff_);
2696 #endif
2697 }
2698
2699 SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, mmx2, );
2700 SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, mmx2, );
2701 SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, mmx2, );
2702 SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, mmx2, );
2703 }
2704
2705 #if HAVE_YASM
2706 if (!high_bit_depth && CONFIG_H264CHROMA) {
2707 c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_mmx2_rnd;
2708 c->avg_h264_chroma_pixels_tab[1] = ff_avg_h264_chroma_mc4_mmx2;
2709 c->avg_h264_chroma_pixels_tab[2] = ff_avg_h264_chroma_mc2_mmx2;
2710 c->put_h264_chroma_pixels_tab[2] = ff_put_h264_chroma_mc2_mmx2;
2711 }
2712 if (bit_depth == 10 && CONFIG_H264CHROMA) {
2713 c->put_h264_chroma_pixels_tab[2] = ff_put_h264_chroma_mc2_10_mmxext;
2714 c->avg_h264_chroma_pixels_tab[2] = ff_avg_h264_chroma_mc2_10_mmxext;
2715 c->put_h264_chroma_pixels_tab[1] = ff_put_h264_chroma_mc4_10_mmxext;
2716 c->avg_h264_chroma_pixels_tab[1] = ff_avg_h264_chroma_mc4_10_mmxext;
2717 }
2718
2719 c->add_hfyu_median_prediction = ff_add_hfyu_median_prediction_mmx2;
2720
2721 c->scalarproduct_int16 = ff_scalarproduct_int16_mmx2;
2722 c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_mmx2;
2723
2724 if (avctx->flags & CODEC_FLAG_BITEXACT) {
2725 c->apply_window_int16 = ff_apply_window_int16_mmxext_ba;
2726 } else {
2727 c->apply_window_int16 = ff_apply_window_int16_mmxext;
2728 }
2729 #endif
2730 }
2731
2732 static void dsputil_init_3dnow(DSPContext *c, AVCodecContext *avctx,
2733 int mm_flags)
2734 {
2735 const int high_bit_depth = avctx->bits_per_raw_sample > 8;
2736
2737 c->prefetch = prefetch_3dnow;
2738
2739 if (!high_bit_depth) {
2740 c->put_pixels_tab[0][1] = put_pixels16_x2_3dnow;
2741 c->put_pixels_tab[0][2] = put_pixels16_y2_3dnow;
2742
2743 c->avg_pixels_tab[0][0] = avg_pixels16_3dnow;
2744 c->avg_pixels_tab[0][1] = avg_pixels16_x2_3dnow;
2745 c->avg_pixels_tab[0][2] = avg_pixels16_y2_3dnow;
2746
2747 c->put_pixels_tab[1][1] = put_pixels8_x2_3dnow;
2748 c->put_pixels_tab[1][2] = put_pixels8_y2_3dnow;
2749
2750 c->avg_pixels_tab[1][0] = avg_pixels8_3dnow;
2751 c->avg_pixels_tab[1][1] = avg_pixels8_x2_3dnow;
2752 c->avg_pixels_tab[1][2] = avg_pixels8_y2_3dnow;
2753
2754 if (!(avctx->flags & CODEC_FLAG_BITEXACT)){
2755 c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_3dnow;
2756 c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_3dnow;
2757 c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_3dnow;
2758 c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_3dnow;
2759
2760 c->avg_pixels_tab[0][3] = avg_pixels16_xy2_3dnow;
2761 c->avg_pixels_tab[1][3] = avg_pixels8_xy2_3dnow;
2762 }
2763 }
2764
2765 if (CONFIG_VP3_DECODER && (avctx->codec_id == CODEC_ID_VP3 ||
2766 avctx->codec_id == CODEC_ID_THEORA)) {
2767 c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_exact_3dnow;
2768 c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_exact_3dnow;
2769 }
2770
2771 if (CONFIG_H264QPEL) {
2772 SET_QPEL_FUNCS(put_qpel, 0, 16, 3dnow, );
2773 SET_QPEL_FUNCS(put_qpel, 1, 8, 3dnow, );
2774 SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, 3dnow, );
2775 SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, 3dnow, );
2776 SET_QPEL_FUNCS(avg_qpel, 0, 16, 3dnow, );
2777 SET_QPEL_FUNCS(avg_qpel, 1, 8, 3dnow, );
2778
2779 if (!high_bit_depth) {
2780 SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 3dnow, );
2781 SET_QPEL_FUNCS(put_h264_qpel, 1, 8, 3dnow, );
2782 SET_QPEL_FUNCS(put_h264_qpel, 2, 4, 3dnow, );
2783 SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 3dnow, );
2784 SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, 3dnow, );
2785 SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, 3dnow, );
2786 }
2787
2788 SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, 3dnow, );
2789 SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, 3dnow, );
2790 SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, 3dnow, );
2791 SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, 3dnow, );
2792 }
2793
2794 #if HAVE_YASM
2795 if (!high_bit_depth && CONFIG_H264CHROMA) {
2796 c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_3dnow_rnd;
2797 c->avg_h264_chroma_pixels_tab[1] = ff_avg_h264_chroma_mc4_3dnow;
2798 }
2799 #endif
2800
2801 c->vorbis_inverse_coupling = vorbis_inverse_coupling_3dnow;
2802
2803 #if HAVE_7REGS
2804 c->add_hfyu_median_prediction = add_hfyu_median_prediction_cmov;
2805 #endif
2806 }
2807
2808 static void dsputil_init_3dnow2(DSPContext *c, AVCodecContext *avctx,
2809 int mm_flags)
2810 {
2811 #if HAVE_6REGS
2812 c->vector_fmul_window = vector_fmul_window_3dnow2;
2813 #endif
2814 }
2815
2816 static void dsputil_init_sse(DSPContext *c, AVCodecContext *avctx, int mm_flags)
2817 {
2818 const int high_bit_depth = avctx->bits_per_raw_sample > 8;
2819
2820 if (!high_bit_depth) {
2821 if (!(CONFIG_MPEG_XVMC_DECODER && avctx->xvmc_acceleration > 1)) {
2822 /* XvMCCreateBlocks() may not allocate 16-byte aligned blocks */
2823 c->clear_block = clear_block_sse;
2824 c->clear_blocks = clear_blocks_sse;
2825 }
2826 }
2827
2828 c->vorbis_inverse_coupling = vorbis_inverse_coupling_sse;
2829 c->ac3_downmix = ac3_downmix_sse;
2830 #if HAVE_YASM
2831 c->vector_fmul_reverse = ff_vector_fmul_reverse_sse;
2832 c->vector_fmul_add = ff_vector_fmul_add_sse;
2833 #endif
2834
2835 #if HAVE_6REGS
2836 c->vector_fmul_window = vector_fmul_window_sse;
2837 #endif
2838
2839 c->vector_clipf = vector_clipf_sse;
2840
2841 #if HAVE_YASM
2842 c->scalarproduct_float = ff_scalarproduct_float_sse;
2843 c->butterflies_float_interleave = ff_butterflies_float_interleave_sse;
2844
2845 if (!high_bit_depth)
2846 c->emulated_edge_mc = emulated_edge_mc_sse;
2847 c->gmc = gmc_sse;
2848 #endif
2849 }
2850
2851 static void dsputil_init_sse2(DSPContext *c, AVCodecContext *avctx,
2852 int mm_flags)
2853 {
2854 const int bit_depth = avctx->bits_per_raw_sample;
2855 const int high_bit_depth = bit_depth > 8;
2856
2857 if (!(mm_flags & AV_CPU_FLAG_SSE2SLOW)) {
2858 // these functions are slower than mmx on AMD, but faster on Intel
2859 if (!high_bit_depth) {
2860 c->put_pixels_tab[0][0] = put_pixels16_sse2;
2861 c->put_no_rnd_pixels_tab[0][0] = put_pixels16_sse2;
2862 c->avg_pixels_tab[0][0] = avg_pixels16_sse2;
2863 if (CONFIG_H264QPEL)
2864 H264_QPEL_FUNCS(0, 0, sse2);
2865 }
2866 }
2867
2868 if (!high_bit_depth && CONFIG_H264QPEL) {
2869 H264_QPEL_FUNCS(0, 1, sse2);
2870 H264_QPEL_FUNCS(0, 2, sse2);
2871 H264_QPEL_FUNCS(0, 3, sse2);
2872 H264_QPEL_FUNCS(1, 1, sse2);
2873 H264_QPEL_FUNCS(1, 2, sse2);
2874 H264_QPEL_FUNCS(1, 3, sse2);
2875 H264_QPEL_FUNCS(2, 1, sse2);
2876 H264_QPEL_FUNCS(2, 2, sse2);
2877 H264_QPEL_FUNCS(2, 3, sse2);
2878 H264_QPEL_FUNCS(3, 1, sse2);
2879 H264_QPEL_FUNCS(3, 2, sse2);
2880 H264_QPEL_FUNCS(3, 3, sse2);
2881 }
2882
2883 #if HAVE_YASM
2884 if (bit_depth == 10) {
2885 if (CONFIG_H264QPEL) {
2886 SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 10_sse2, ff_);
2887 SET_QPEL_FUNCS(put_h264_qpel, 1, 8, 10_sse2, ff_);
2888 SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 10_sse2, ff_);
2889 SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, 10_sse2, ff_);
2890 H264_QPEL_FUNCS_10(1, 0, sse2_cache64);
2891 H264_QPEL_FUNCS_10(2, 0, sse2_cache64);
2892 H264_QPEL_FUNCS_10(3, 0, sse2_cache64);
2893 }
2894 if (CONFIG_H264CHROMA) {
2895 c->put_h264_chroma_pixels_tab[0] = ff_put_h264_chroma_mc8_10_sse2;
2896 c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_10_sse2;
2897 }
2898 }
2899
2900 c->scalarproduct_int16 = ff_scalarproduct_int16_sse2;
2901 c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_sse2;
2902 if (mm_flags & AV_CPU_FLAG_ATOM) {
2903 c->vector_clip_int32 = ff_vector_clip_int32_int_sse2;
2904 } else {
2905 c->vector_clip_int32 = ff_vector_clip_int32_sse2;
2906 }
2907 if (avctx->flags & CODEC_FLAG_BITEXACT) {
2908 c->apply_window_int16 = ff_apply_window_int16_sse2_ba;
2909 } else if (!(mm_flags & AV_CPU_FLAG_SSE2SLOW)) {
2910 c->apply_window_int16 = ff_apply_window_int16_sse2;
2911 }
2912 c->bswap_buf = ff_bswap32_buf_sse2;
2913 #endif
2914 }
2915
2916 static void dsputil_init_ssse3(DSPContext *c, AVCodecContext *avctx,
2917 int mm_flags)
2918 {
2919 #if HAVE_SSSE3
2920 const int high_bit_depth = avctx->bits_per_raw_sample > 8;
2921 const int bit_depth = avctx->bits_per_raw_sample;
2922
2923 if (!high_bit_depth && CONFIG_H264QPEL) {
2924 H264_QPEL_FUNCS(1, 0, ssse3);
2925 H264_QPEL_FUNCS(1, 1, ssse3);
2926 H264_QPEL_FUNCS(1, 2, ssse3);
2927 H264_QPEL_FUNCS(1, 3, ssse3);
2928 H264_QPEL_FUNCS(2, 0, ssse3);
2929 H264_QPEL_FUNCS(2, 1, ssse3);
2930 H264_QPEL_FUNCS(2, 2, ssse3);
2931 H264_QPEL_FUNCS(2, 3, ssse3);
2932 H264_QPEL_FUNCS(3, 0, ssse3);
2933 H264_QPEL_FUNCS(3, 1, ssse3);
2934 H264_QPEL_FUNCS(3, 2, ssse3);
2935 H264_QPEL_FUNCS(3, 3, ssse3);
2936 }
2937 #if HAVE_YASM
2938 else if (bit_depth == 10 && CONFIG_H264QPEL) {
2939 H264_QPEL_FUNCS_10(1, 0, ssse3_cache64);
2940 H264_QPEL_FUNCS_10(2, 0, ssse3_cache64);
2941 H264_QPEL_FUNCS_10(3, 0, ssse3_cache64);
2942 }
2943 if (!high_bit_depth && CONFIG_H264CHROMA) {
2944 c->put_h264_chroma_pixels_tab[0] = ff_put_h264_chroma_mc8_ssse3_rnd;
2945 c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_ssse3_rnd;
2946 c->put_h264_chroma_pixels_tab[1] = ff_put_h264_chroma_mc4_ssse3;
2947 c->avg_h264_chroma_pixels_tab[1] = ff_avg_h264_chroma_mc4_ssse3;
2948 }
2949 c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_ssse3;
2950 if (mm_flags & AV_CPU_FLAG_SSE4) // not really sse4, just slow on Conroe
2951 c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_sse4;
2952
2953 if (mm_flags & AV_CPU_FLAG_ATOM)
2954 c->apply_window_int16 = ff_apply_window_int16_ssse3_atom;
2955 else
2956 c->apply_window_int16 = ff_apply_window_int16_ssse3;
2957 if (!(mm_flags & (AV_CPU_FLAG_SSE42|AV_CPU_FLAG_3DNOW))) // cachesplit
2958 c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_ssse3;
2959 c->bswap_buf = ff_bswap32_buf_ssse3;
2960 #endif
2961 #endif
2962 }
2963
2964 static void dsputil_init_sse4(DSPContext *c, AVCodecContext *avctx,
2965 int mm_flags)
2966 {
2967 #if HAVE_YASM
2968 c->vector_clip_int32 = ff_vector_clip_int32_sse4;
2969 #endif
2970 }
2971
2972 static void dsputil_init_avx(DSPContext *c, AVCodecContext *avctx, int mm_flags)
2973 {
2974 #if HAVE_AVX && HAVE_YASM
2975 const int bit_depth = avctx->bits_per_raw_sample;
2976
2977 if (bit_depth == 10) {
2978 // AVX implies !cache64.
2979 // TODO: Port cache(32|64) detection from x264.
2980 if (CONFIG_H264QPEL) {
2981 H264_QPEL_FUNCS_10(1, 0, sse2);
2982 H264_QPEL_FUNCS_10(2, 0, sse2);
2983 H264_QPEL_FUNCS_10(3, 0, sse2);
2984 }
2985
2986 if (CONFIG_H264CHROMA) {
2987 c->put_h264_chroma_pixels_tab[0] = ff_put_h264_chroma_mc8_10_avx;
2988 c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_10_avx;
2989 }
2990 }
2991 c->butterflies_float_interleave = ff_butterflies_float_interleave_avx;
2992 c->vector_fmul_reverse = ff_vector_fmul_reverse_avx;
2993 c->vector_fmul_add = ff_vector_fmul_add_avx;
2994 #endif
2995 }
2996
2997 void ff_dsputil_init_mmx(DSPContext *c, AVCodecContext *avctx)
2998 {
2999 int mm_flags = av_get_cpu_flags();
3000
3001 #if 0
3002 av_log(avctx, AV_LOG_INFO, "libavcodec: CPU flags:");
3003 if (mm_flags & AV_CPU_FLAG_MMX)
3004 av_log(avctx, AV_LOG_INFO, " mmx");
3005 if (mm_flags & AV_CPU_FLAG_MMX2)
3006 av_log(avctx, AV_LOG_INFO, " mmx2");
3007 if (mm_flags & AV_CPU_FLAG_3DNOW)
3008 av_log(avctx, AV_LOG_INFO, " 3dnow");
3009 if (mm_flags & AV_CPU_FLAG_SSE)
3010 av_log(avctx, AV_LOG_INFO, " sse");
3011 if (mm_flags & AV_CPU_FLAG_SSE2)
3012 av_log(avctx, AV_LOG_INFO, " sse2");
3013 av_log(avctx, AV_LOG_INFO, "\n");
3014 #endif
3015
3016 if (mm_flags & AV_CPU_FLAG_MMX) {
3017 const int idct_algo = avctx->idct_algo;
3018
3019 if (avctx->bits_per_raw_sample <= 8) {
3020 if (idct_algo == FF_IDCT_AUTO || idct_algo == FF_IDCT_SIMPLEMMX) {
3021 c->idct_put = ff_simple_idct_put_mmx;
3022 c->idct_add = ff_simple_idct_add_mmx;
3023 c->idct = ff_simple_idct_mmx;
3024 c->idct_permutation_type = FF_SIMPLE_IDCT_PERM;
3025 #if CONFIG_GPL
3026 } else if (idct_algo == FF_IDCT_LIBMPEG2MMX) {
3027 if (mm_flags & AV_CPU_FLAG_MMX2) {
3028 c->idct_put = ff_libmpeg2mmx2_idct_put;
3029 c->idct_add = ff_libmpeg2mmx2_idct_add;
3030 c->idct = ff_mmxext_idct;
3031 } else {
3032 c->idct_put = ff_libmpeg2mmx_idct_put;
3033 c->idct_add = ff_libmpeg2mmx_idct_add;
3034 c->idct = ff_mmx_idct;
3035 }
3036 c->idct_permutation_type = FF_LIBMPEG2_IDCT_PERM;
3037 #endif
3038 } else if ((CONFIG_VP3_DECODER || CONFIG_VP5_DECODER ||
3039 CONFIG_VP6_DECODER) &&
3040 idct_algo == FF_IDCT_VP3 && HAVE_YASM) {
3041 if (mm_flags & AV_CPU_FLAG_SSE2) {
3042 c->idct_put = ff_vp3_idct_put_sse2;
3043 c->idct_add = ff_vp3_idct_add_sse2;
3044 c->idct = ff_vp3_idct_sse2;
3045 c->idct_permutation_type = FF_TRANSPOSE_IDCT_PERM;
3046 } else {
3047 c->idct_put = ff_vp3_idct_put_mmx;
3048 c->idct_add = ff_vp3_idct_add_mmx;
3049 c->idct = ff_vp3_idct_mmx;
3050 c->idct_permutation_type = FF_PARTTRANS_IDCT_PERM;
3051 }
3052 } else if (idct_algo == FF_IDCT_CAVS) {
3053 c->idct_permutation_type = FF_TRANSPOSE_IDCT_PERM;
3054 } else if (idct_algo == FF_IDCT_XVIDMMX) {
3055 if (mm_flags & AV_CPU_FLAG_SSE2) {
3056 c->idct_put = ff_idct_xvid_sse2_put;
3057 c->idct_add = ff_idct_xvid_sse2_add;
3058 c->idct = ff_idct_xvid_sse2;
3059 c->idct_permutation_type = FF_SSE2_IDCT_PERM;
3060 } else if (mm_flags & AV_CPU_FLAG_MMX2) {
3061 c->idct_put = ff_idct_xvid_mmx2_put;
3062 c->idct_add = ff_idct_xvid_mmx2_add;
3063 c->idct = ff_idct_xvid_mmx2;
3064 } else {
3065 c->idct_put = ff_idct_xvid_mmx_put;
3066 c->idct_add = ff_idct_xvid_mmx_add;
3067 c->idct = ff_idct_xvid_mmx;
3068 }
3069 }
3070 }
3071
3072 dsputil_init_mmx(c, avctx, mm_flags);
3073 }
3074
3075 if (mm_flags & AV_CPU_FLAG_MMX2)
3076 dsputil_init_mmx2(c, avctx, mm_flags);
3077
3078 if (mm_flags & AV_CPU_FLAG_3DNOW && HAVE_AMD3DNOW)
3079 dsputil_init_3dnow(c, avctx, mm_flags);
3080
3081 if (mm_flags & AV_CPU_FLAG_3DNOWEXT && HAVE_AMD3DNOWEXT)
3082 dsputil_init_3dnow2(c, avctx, mm_flags);
3083
3084 if (mm_flags & AV_CPU_FLAG_SSE && HAVE_SSE)
3085 dsputil_init_sse(c, avctx, mm_flags);
3086
3087 if (mm_flags & AV_CPU_FLAG_SSE2)
3088 dsputil_init_sse2(c, avctx, mm_flags);
3089
3090 if (mm_flags & AV_CPU_FLAG_SSSE3)
3091 dsputil_init_ssse3(c, avctx, mm_flags);
3092
3093 if (mm_flags & AV_CPU_FLAG_SSE4 && HAVE_SSE)
3094 dsputil_init_sse4(c, avctx, mm_flags);
3095
3096 if (mm_flags & AV_CPU_FLAG_AVX)
3097 dsputil_init_avx(c, avctx, mm_flags);
3098
3099 if (CONFIG_ENCODERS)
3100 ff_dsputilenc_init_mmx(c, avctx);
3101 }