e27325818aec09b44f3e2ee776e6ccc16b868ef7
[libav.git] / libavcodec / x86 / dsputil_mmx.c
1 /*
2 * MMX optimized DSP utils
3 * Copyright (c) 2000, 2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5 *
6 * This file is part of FFmpeg.
7 *
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 *
22 * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
23 */
24
25 #include "libavutil/x86_cpu.h"
26 #include "libavcodec/dsputil.h"
27 #include "libavcodec/h263.h"
28 #include "libavcodec/mpegvideo.h"
29 #include "libavcodec/simple_idct.h"
30 #include "dsputil_mmx.h"
31 #include "vp3dsp_mmx.h"
32 #include "vp3dsp_sse2.h"
33 #include "vp6dsp_mmx.h"
34 #include "vp6dsp_sse2.h"
35 #include "idct_xvid.h"
36
37 //#undef NDEBUG
38 //#include <assert.h>
39
40 int mm_flags; /* multimedia extension flags */
41
42 /* pixel operations */
43 DECLARE_ALIGNED_8 (const uint64_t, ff_bone) = 0x0101010101010101ULL;
44 DECLARE_ALIGNED_8 (const uint64_t, ff_wtwo) = 0x0002000200020002ULL;
45
46 DECLARE_ALIGNED_16(const uint64_t, ff_pdw_80000000[2]) =
47 {0x8000000080000000ULL, 0x8000000080000000ULL};
48
49 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_3 ) = 0x0003000300030003ULL;
50 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_4 ) = 0x0004000400040004ULL;
51 DECLARE_ALIGNED_16(const xmm_reg, ff_pw_5 ) = {0x0005000500050005ULL, 0x0005000500050005ULL};
52 DECLARE_ALIGNED_16(const xmm_reg, ff_pw_8 ) = {0x0008000800080008ULL, 0x0008000800080008ULL};
53 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_15 ) = 0x000F000F000F000FULL;
54 DECLARE_ALIGNED_16(const xmm_reg, ff_pw_16 ) = {0x0010001000100010ULL, 0x0010001000100010ULL};
55 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_20 ) = 0x0014001400140014ULL;
56 DECLARE_ALIGNED_16(const xmm_reg, ff_pw_28 ) = {0x001C001C001C001CULL, 0x001C001C001C001CULL};
57 DECLARE_ALIGNED_16(const xmm_reg, ff_pw_32 ) = {0x0020002000200020ULL, 0x0020002000200020ULL};
58 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_42 ) = 0x002A002A002A002AULL;
59 DECLARE_ALIGNED_16(const xmm_reg, ff_pw_64 ) = {0x0040004000400040ULL, 0x0040004000400040ULL};
60 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_96 ) = 0x0060006000600060ULL;
61 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_128) = 0x0080008000800080ULL;
62 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_255) = 0x00ff00ff00ff00ffULL;
63
64 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_1 ) = 0x0101010101010101ULL;
65 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_3 ) = 0x0303030303030303ULL;
66 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_7 ) = 0x0707070707070707ULL;
67 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_1F ) = 0x1F1F1F1F1F1F1F1FULL;
68 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_3F ) = 0x3F3F3F3F3F3F3F3FULL;
69 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_81 ) = 0x8181818181818181ULL;
70 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_A1 ) = 0xA1A1A1A1A1A1A1A1ULL;
71 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_FC ) = 0xFCFCFCFCFCFCFCFCULL;
72
73 DECLARE_ALIGNED_16(const double, ff_pd_1[2]) = { 1.0, 1.0 };
74 DECLARE_ALIGNED_16(const double, ff_pd_2[2]) = { 2.0, 2.0 };
75
76 #define JUMPALIGN() __asm__ volatile (ASMALIGN(3)::)
77 #define MOVQ_ZERO(regd) __asm__ volatile ("pxor %%" #regd ", %%" #regd ::)
78
79 #define MOVQ_BFE(regd) \
80 __asm__ volatile ( \
81 "pcmpeqd %%" #regd ", %%" #regd " \n\t"\
82 "paddb %%" #regd ", %%" #regd " \n\t" ::)
83
84 #ifndef PIC
85 #define MOVQ_BONE(regd) __asm__ volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_bone))
86 #define MOVQ_WTWO(regd) __asm__ volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_wtwo))
87 #else
88 // for shared library it's better to use this way for accessing constants
89 // pcmpeqd -> -1
90 #define MOVQ_BONE(regd) \
91 __asm__ volatile ( \
92 "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
93 "psrlw $15, %%" #regd " \n\t" \
94 "packuswb %%" #regd ", %%" #regd " \n\t" ::)
95
96 #define MOVQ_WTWO(regd) \
97 __asm__ volatile ( \
98 "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
99 "psrlw $15, %%" #regd " \n\t" \
100 "psllw $1, %%" #regd " \n\t"::)
101
102 #endif
103
104 // using regr as temporary and for the output result
105 // first argument is unmodifed and second is trashed
106 // regfe is supposed to contain 0xfefefefefefefefe
107 #define PAVGB_MMX_NO_RND(rega, regb, regr, regfe) \
108 "movq " #rega ", " #regr " \n\t"\
109 "pand " #regb ", " #regr " \n\t"\
110 "pxor " #rega ", " #regb " \n\t"\
111 "pand " #regfe "," #regb " \n\t"\
112 "psrlq $1, " #regb " \n\t"\
113 "paddb " #regb ", " #regr " \n\t"
114
115 #define PAVGB_MMX(rega, regb, regr, regfe) \
116 "movq " #rega ", " #regr " \n\t"\
117 "por " #regb ", " #regr " \n\t"\
118 "pxor " #rega ", " #regb " \n\t"\
119 "pand " #regfe "," #regb " \n\t"\
120 "psrlq $1, " #regb " \n\t"\
121 "psubb " #regb ", " #regr " \n\t"
122
123 // mm6 is supposed to contain 0xfefefefefefefefe
124 #define PAVGBP_MMX_NO_RND(rega, regb, regr, regc, regd, regp) \
125 "movq " #rega ", " #regr " \n\t"\
126 "movq " #regc ", " #regp " \n\t"\
127 "pand " #regb ", " #regr " \n\t"\
128 "pand " #regd ", " #regp " \n\t"\
129 "pxor " #rega ", " #regb " \n\t"\
130 "pxor " #regc ", " #regd " \n\t"\
131 "pand %%mm6, " #regb " \n\t"\
132 "pand %%mm6, " #regd " \n\t"\
133 "psrlq $1, " #regb " \n\t"\
134 "psrlq $1, " #regd " \n\t"\
135 "paddb " #regb ", " #regr " \n\t"\
136 "paddb " #regd ", " #regp " \n\t"
137
138 #define PAVGBP_MMX(rega, regb, regr, regc, regd, regp) \
139 "movq " #rega ", " #regr " \n\t"\
140 "movq " #regc ", " #regp " \n\t"\
141 "por " #regb ", " #regr " \n\t"\
142 "por " #regd ", " #regp " \n\t"\
143 "pxor " #rega ", " #regb " \n\t"\
144 "pxor " #regc ", " #regd " \n\t"\
145 "pand %%mm6, " #regb " \n\t"\
146 "pand %%mm6, " #regd " \n\t"\
147 "psrlq $1, " #regd " \n\t"\
148 "psrlq $1, " #regb " \n\t"\
149 "psubb " #regb ", " #regr " \n\t"\
150 "psubb " #regd ", " #regp " \n\t"
151
152 /***********************************/
153 /* MMX no rounding */
154 #define DEF(x, y) x ## _no_rnd_ ## y ##_mmx
155 #define SET_RND MOVQ_WONE
156 #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX_NO_RND(a, b, c, d, e, f)
157 #define PAVGB(a, b, c, e) PAVGB_MMX_NO_RND(a, b, c, e)
158 #define OP_AVG(a, b, c, e) PAVGB_MMX(a, b, c, e)
159
160 #include "dsputil_mmx_rnd_template.c"
161
162 #undef DEF
163 #undef SET_RND
164 #undef PAVGBP
165 #undef PAVGB
166 /***********************************/
167 /* MMX rounding */
168
169 #define DEF(x, y) x ## _ ## y ##_mmx
170 #define SET_RND MOVQ_WTWO
171 #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX(a, b, c, d, e, f)
172 #define PAVGB(a, b, c, e) PAVGB_MMX(a, b, c, e)
173
174 #include "dsputil_mmx_rnd_template.c"
175
176 #undef DEF
177 #undef SET_RND
178 #undef PAVGBP
179 #undef PAVGB
180 #undef OP_AVG
181
182 /***********************************/
183 /* 3Dnow specific */
184
185 #define DEF(x) x ## _3dnow
186 #define PAVGB "pavgusb"
187 #define OP_AVG PAVGB
188
189 #include "dsputil_mmx_avg_template.c"
190
191 #undef DEF
192 #undef PAVGB
193 #undef OP_AVG
194
195 /***********************************/
196 /* MMX2 specific */
197
198 #define DEF(x) x ## _mmx2
199
200 /* Introduced only in MMX2 set */
201 #define PAVGB "pavgb"
202 #define OP_AVG PAVGB
203
204 #include "dsputil_mmx_avg_template.c"
205
206 #undef DEF
207 #undef PAVGB
208 #undef OP_AVG
209
210 #define put_no_rnd_pixels16_mmx put_pixels16_mmx
211 #define put_no_rnd_pixels8_mmx put_pixels8_mmx
212 #define put_pixels16_mmx2 put_pixels16_mmx
213 #define put_pixels8_mmx2 put_pixels8_mmx
214 #define put_pixels4_mmx2 put_pixels4_mmx
215 #define put_no_rnd_pixels16_mmx2 put_no_rnd_pixels16_mmx
216 #define put_no_rnd_pixels8_mmx2 put_no_rnd_pixels8_mmx
217 #define put_pixels16_3dnow put_pixels16_mmx
218 #define put_pixels8_3dnow put_pixels8_mmx
219 #define put_pixels4_3dnow put_pixels4_mmx
220 #define put_no_rnd_pixels16_3dnow put_no_rnd_pixels16_mmx
221 #define put_no_rnd_pixels8_3dnow put_no_rnd_pixels8_mmx
222
223 /***********************************/
224 /* standard MMX */
225
226 void put_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
227 {
228 const DCTELEM *p;
229 uint8_t *pix;
230
231 /* read the pixels */
232 p = block;
233 pix = pixels;
234 /* unrolled loop */
235 __asm__ volatile(
236 "movq %3, %%mm0 \n\t"
237 "movq 8%3, %%mm1 \n\t"
238 "movq 16%3, %%mm2 \n\t"
239 "movq 24%3, %%mm3 \n\t"
240 "movq 32%3, %%mm4 \n\t"
241 "movq 40%3, %%mm5 \n\t"
242 "movq 48%3, %%mm6 \n\t"
243 "movq 56%3, %%mm7 \n\t"
244 "packuswb %%mm1, %%mm0 \n\t"
245 "packuswb %%mm3, %%mm2 \n\t"
246 "packuswb %%mm5, %%mm4 \n\t"
247 "packuswb %%mm7, %%mm6 \n\t"
248 "movq %%mm0, (%0) \n\t"
249 "movq %%mm2, (%0, %1) \n\t"
250 "movq %%mm4, (%0, %1, 2) \n\t"
251 "movq %%mm6, (%0, %2) \n\t"
252 ::"r" (pix), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*3), "m"(*p)
253 :"memory");
254 pix += line_size*4;
255 p += 32;
256
257 // if here would be an exact copy of the code above
258 // compiler would generate some very strange code
259 // thus using "r"
260 __asm__ volatile(
261 "movq (%3), %%mm0 \n\t"
262 "movq 8(%3), %%mm1 \n\t"
263 "movq 16(%3), %%mm2 \n\t"
264 "movq 24(%3), %%mm3 \n\t"
265 "movq 32(%3), %%mm4 \n\t"
266 "movq 40(%3), %%mm5 \n\t"
267 "movq 48(%3), %%mm6 \n\t"
268 "movq 56(%3), %%mm7 \n\t"
269 "packuswb %%mm1, %%mm0 \n\t"
270 "packuswb %%mm3, %%mm2 \n\t"
271 "packuswb %%mm5, %%mm4 \n\t"
272 "packuswb %%mm7, %%mm6 \n\t"
273 "movq %%mm0, (%0) \n\t"
274 "movq %%mm2, (%0, %1) \n\t"
275 "movq %%mm4, (%0, %1, 2) \n\t"
276 "movq %%mm6, (%0, %2) \n\t"
277 ::"r" (pix), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*3), "r"(p)
278 :"memory");
279 }
280
281 DECLARE_ASM_CONST(8, uint8_t, ff_vector128[8]) =
282 { 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 };
283
284 #define put_signed_pixels_clamped_mmx_half(off) \
285 "movq "#off"(%2), %%mm1 \n\t"\
286 "movq 16+"#off"(%2), %%mm2 \n\t"\
287 "movq 32+"#off"(%2), %%mm3 \n\t"\
288 "movq 48+"#off"(%2), %%mm4 \n\t"\
289 "packsswb 8+"#off"(%2), %%mm1 \n\t"\
290 "packsswb 24+"#off"(%2), %%mm2 \n\t"\
291 "packsswb 40+"#off"(%2), %%mm3 \n\t"\
292 "packsswb 56+"#off"(%2), %%mm4 \n\t"\
293 "paddb %%mm0, %%mm1 \n\t"\
294 "paddb %%mm0, %%mm2 \n\t"\
295 "paddb %%mm0, %%mm3 \n\t"\
296 "paddb %%mm0, %%mm4 \n\t"\
297 "movq %%mm1, (%0) \n\t"\
298 "movq %%mm2, (%0, %3) \n\t"\
299 "movq %%mm3, (%0, %3, 2) \n\t"\
300 "movq %%mm4, (%0, %1) \n\t"
301
302 void put_signed_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
303 {
304 x86_reg line_skip = line_size;
305 x86_reg line_skip3;
306
307 __asm__ volatile (
308 "movq "MANGLE(ff_vector128)", %%mm0 \n\t"
309 "lea (%3, %3, 2), %1 \n\t"
310 put_signed_pixels_clamped_mmx_half(0)
311 "lea (%0, %3, 4), %0 \n\t"
312 put_signed_pixels_clamped_mmx_half(64)
313 :"+&r" (pixels), "=&r" (line_skip3)
314 :"r" (block), "r"(line_skip)
315 :"memory");
316 }
317
318 void add_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
319 {
320 const DCTELEM *p;
321 uint8_t *pix;
322 int i;
323
324 /* read the pixels */
325 p = block;
326 pix = pixels;
327 MOVQ_ZERO(mm7);
328 i = 4;
329 do {
330 __asm__ volatile(
331 "movq (%2), %%mm0 \n\t"
332 "movq 8(%2), %%mm1 \n\t"
333 "movq 16(%2), %%mm2 \n\t"
334 "movq 24(%2), %%mm3 \n\t"
335 "movq %0, %%mm4 \n\t"
336 "movq %1, %%mm6 \n\t"
337 "movq %%mm4, %%mm5 \n\t"
338 "punpcklbw %%mm7, %%mm4 \n\t"
339 "punpckhbw %%mm7, %%mm5 \n\t"
340 "paddsw %%mm4, %%mm0 \n\t"
341 "paddsw %%mm5, %%mm1 \n\t"
342 "movq %%mm6, %%mm5 \n\t"
343 "punpcklbw %%mm7, %%mm6 \n\t"
344 "punpckhbw %%mm7, %%mm5 \n\t"
345 "paddsw %%mm6, %%mm2 \n\t"
346 "paddsw %%mm5, %%mm3 \n\t"
347 "packuswb %%mm1, %%mm0 \n\t"
348 "packuswb %%mm3, %%mm2 \n\t"
349 "movq %%mm0, %0 \n\t"
350 "movq %%mm2, %1 \n\t"
351 :"+m"(*pix), "+m"(*(pix+line_size))
352 :"r"(p)
353 :"memory");
354 pix += line_size*2;
355 p += 16;
356 } while (--i);
357 }
358
359 static void put_pixels4_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
360 {
361 __asm__ volatile(
362 "lea (%3, %3), %%"REG_a" \n\t"
363 ASMALIGN(3)
364 "1: \n\t"
365 "movd (%1), %%mm0 \n\t"
366 "movd (%1, %3), %%mm1 \n\t"
367 "movd %%mm0, (%2) \n\t"
368 "movd %%mm1, (%2, %3) \n\t"
369 "add %%"REG_a", %1 \n\t"
370 "add %%"REG_a", %2 \n\t"
371 "movd (%1), %%mm0 \n\t"
372 "movd (%1, %3), %%mm1 \n\t"
373 "movd %%mm0, (%2) \n\t"
374 "movd %%mm1, (%2, %3) \n\t"
375 "add %%"REG_a", %1 \n\t"
376 "add %%"REG_a", %2 \n\t"
377 "subl $4, %0 \n\t"
378 "jnz 1b \n\t"
379 : "+g"(h), "+r" (pixels), "+r" (block)
380 : "r"((x86_reg)line_size)
381 : "%"REG_a, "memory"
382 );
383 }
384
385 static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
386 {
387 __asm__ volatile(
388 "lea (%3, %3), %%"REG_a" \n\t"
389 ASMALIGN(3)
390 "1: \n\t"
391 "movq (%1), %%mm0 \n\t"
392 "movq (%1, %3), %%mm1 \n\t"
393 "movq %%mm0, (%2) \n\t"
394 "movq %%mm1, (%2, %3) \n\t"
395 "add %%"REG_a", %1 \n\t"
396 "add %%"REG_a", %2 \n\t"
397 "movq (%1), %%mm0 \n\t"
398 "movq (%1, %3), %%mm1 \n\t"
399 "movq %%mm0, (%2) \n\t"
400 "movq %%mm1, (%2, %3) \n\t"
401 "add %%"REG_a", %1 \n\t"
402 "add %%"REG_a", %2 \n\t"
403 "subl $4, %0 \n\t"
404 "jnz 1b \n\t"
405 : "+g"(h), "+r" (pixels), "+r" (block)
406 : "r"((x86_reg)line_size)
407 : "%"REG_a, "memory"
408 );
409 }
410
411 static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
412 {
413 __asm__ volatile(
414 "lea (%3, %3), %%"REG_a" \n\t"
415 ASMALIGN(3)
416 "1: \n\t"
417 "movq (%1), %%mm0 \n\t"
418 "movq 8(%1), %%mm4 \n\t"
419 "movq (%1, %3), %%mm1 \n\t"
420 "movq 8(%1, %3), %%mm5 \n\t"
421 "movq %%mm0, (%2) \n\t"
422 "movq %%mm4, 8(%2) \n\t"
423 "movq %%mm1, (%2, %3) \n\t"
424 "movq %%mm5, 8(%2, %3) \n\t"
425 "add %%"REG_a", %1 \n\t"
426 "add %%"REG_a", %2 \n\t"
427 "movq (%1), %%mm0 \n\t"
428 "movq 8(%1), %%mm4 \n\t"
429 "movq (%1, %3), %%mm1 \n\t"
430 "movq 8(%1, %3), %%mm5 \n\t"
431 "movq %%mm0, (%2) \n\t"
432 "movq %%mm4, 8(%2) \n\t"
433 "movq %%mm1, (%2, %3) \n\t"
434 "movq %%mm5, 8(%2, %3) \n\t"
435 "add %%"REG_a", %1 \n\t"
436 "add %%"REG_a", %2 \n\t"
437 "subl $4, %0 \n\t"
438 "jnz 1b \n\t"
439 : "+g"(h), "+r" (pixels), "+r" (block)
440 : "r"((x86_reg)line_size)
441 : "%"REG_a, "memory"
442 );
443 }
444
445 static void put_pixels16_sse2(uint8_t *block, const uint8_t *pixels, int line_size, int h)
446 {
447 __asm__ volatile(
448 "1: \n\t"
449 "movdqu (%1), %%xmm0 \n\t"
450 "movdqu (%1,%3), %%xmm1 \n\t"
451 "movdqu (%1,%3,2), %%xmm2 \n\t"
452 "movdqu (%1,%4), %%xmm3 \n\t"
453 "movdqa %%xmm0, (%2) \n\t"
454 "movdqa %%xmm1, (%2,%3) \n\t"
455 "movdqa %%xmm2, (%2,%3,2) \n\t"
456 "movdqa %%xmm3, (%2,%4) \n\t"
457 "subl $4, %0 \n\t"
458 "lea (%1,%3,4), %1 \n\t"
459 "lea (%2,%3,4), %2 \n\t"
460 "jnz 1b \n\t"
461 : "+g"(h), "+r" (pixels), "+r" (block)
462 : "r"((x86_reg)line_size), "r"((x86_reg)3L*line_size)
463 : "memory"
464 );
465 }
466
467 static void avg_pixels16_sse2(uint8_t *block, const uint8_t *pixels, int line_size, int h)
468 {
469 __asm__ volatile(
470 "1: \n\t"
471 "movdqu (%1), %%xmm0 \n\t"
472 "movdqu (%1,%3), %%xmm1 \n\t"
473 "movdqu (%1,%3,2), %%xmm2 \n\t"
474 "movdqu (%1,%4), %%xmm3 \n\t"
475 "pavgb (%2), %%xmm0 \n\t"
476 "pavgb (%2,%3), %%xmm1 \n\t"
477 "pavgb (%2,%3,2), %%xmm2 \n\t"
478 "pavgb (%2,%4), %%xmm3 \n\t"
479 "movdqa %%xmm0, (%2) \n\t"
480 "movdqa %%xmm1, (%2,%3) \n\t"
481 "movdqa %%xmm2, (%2,%3,2) \n\t"
482 "movdqa %%xmm3, (%2,%4) \n\t"
483 "subl $4, %0 \n\t"
484 "lea (%1,%3,4), %1 \n\t"
485 "lea (%2,%3,4), %2 \n\t"
486 "jnz 1b \n\t"
487 : "+g"(h), "+r" (pixels), "+r" (block)
488 : "r"((x86_reg)line_size), "r"((x86_reg)3L*line_size)
489 : "memory"
490 );
491 }
492
493 #define CLEAR_BLOCKS(name,n) \
494 static void name(DCTELEM *blocks)\
495 {\
496 __asm__ volatile(\
497 "pxor %%mm7, %%mm7 \n\t"\
498 "mov %1, %%"REG_a" \n\t"\
499 "1: \n\t"\
500 "movq %%mm7, (%0, %%"REG_a") \n\t"\
501 "movq %%mm7, 8(%0, %%"REG_a") \n\t"\
502 "movq %%mm7, 16(%0, %%"REG_a") \n\t"\
503 "movq %%mm7, 24(%0, %%"REG_a") \n\t"\
504 "add $32, %%"REG_a" \n\t"\
505 " js 1b \n\t"\
506 : : "r" (((uint8_t *)blocks)+128*n),\
507 "i" (-128*n)\
508 : "%"REG_a\
509 );\
510 }
511 CLEAR_BLOCKS(clear_blocks_mmx, 6)
512 CLEAR_BLOCKS(clear_block_mmx, 1)
513
514 static void clear_block_sse(DCTELEM *block)
515 {
516 __asm__ volatile(
517 "xorps %%xmm0, %%xmm0 \n"
518 "movaps %%xmm0, (%0) \n"
519 "movaps %%xmm0, 16(%0) \n"
520 "movaps %%xmm0, 32(%0) \n"
521 "movaps %%xmm0, 48(%0) \n"
522 "movaps %%xmm0, 64(%0) \n"
523 "movaps %%xmm0, 80(%0) \n"
524 "movaps %%xmm0, 96(%0) \n"
525 "movaps %%xmm0, 112(%0) \n"
526 :: "r"(block)
527 : "memory"
528 );
529 }
530
531 static void clear_blocks_sse(DCTELEM *blocks)
532 {\
533 __asm__ volatile(
534 "xorps %%xmm0, %%xmm0 \n"
535 "mov %1, %%"REG_a" \n"
536 "1: \n"
537 "movaps %%xmm0, (%0, %%"REG_a") \n"
538 "movaps %%xmm0, 16(%0, %%"REG_a") \n"
539 "movaps %%xmm0, 32(%0, %%"REG_a") \n"
540 "movaps %%xmm0, 48(%0, %%"REG_a") \n"
541 "movaps %%xmm0, 64(%0, %%"REG_a") \n"
542 "movaps %%xmm0, 80(%0, %%"REG_a") \n"
543 "movaps %%xmm0, 96(%0, %%"REG_a") \n"
544 "movaps %%xmm0, 112(%0, %%"REG_a") \n"
545 "add $128, %%"REG_a" \n"
546 " js 1b \n"
547 : : "r" (((uint8_t *)blocks)+128*6),
548 "i" (-128*6)
549 : "%"REG_a
550 );
551 }
552
553 static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w){
554 x86_reg i=0;
555 __asm__ volatile(
556 "jmp 2f \n\t"
557 "1: \n\t"
558 "movq (%1, %0), %%mm0 \n\t"
559 "movq (%2, %0), %%mm1 \n\t"
560 "paddb %%mm0, %%mm1 \n\t"
561 "movq %%mm1, (%2, %0) \n\t"
562 "movq 8(%1, %0), %%mm0 \n\t"
563 "movq 8(%2, %0), %%mm1 \n\t"
564 "paddb %%mm0, %%mm1 \n\t"
565 "movq %%mm1, 8(%2, %0) \n\t"
566 "add $16, %0 \n\t"
567 "2: \n\t"
568 "cmp %3, %0 \n\t"
569 " js 1b \n\t"
570 : "+r" (i)
571 : "r"(src), "r"(dst), "r"((x86_reg)w-15)
572 );
573 for(; i<w; i++)
574 dst[i+0] += src[i+0];
575 }
576
577 static void add_bytes_l2_mmx(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){
578 x86_reg i=0;
579 __asm__ volatile(
580 "jmp 2f \n\t"
581 "1: \n\t"
582 "movq (%2, %0), %%mm0 \n\t"
583 "movq 8(%2, %0), %%mm1 \n\t"
584 "paddb (%3, %0), %%mm0 \n\t"
585 "paddb 8(%3, %0), %%mm1 \n\t"
586 "movq %%mm0, (%1, %0) \n\t"
587 "movq %%mm1, 8(%1, %0) \n\t"
588 "add $16, %0 \n\t"
589 "2: \n\t"
590 "cmp %4, %0 \n\t"
591 " js 1b \n\t"
592 : "+r" (i)
593 : "r"(dst), "r"(src1), "r"(src2), "r"((x86_reg)w-15)
594 );
595 for(; i<w; i++)
596 dst[i] = src1[i] + src2[i];
597 }
598
599 #if HAVE_7REGS && HAVE_TEN_OPERANDS
600 static void add_hfyu_median_prediction_cmov(uint8_t *dst, uint8_t *top, uint8_t *diff, int w, int *left, int *left_top) {
601 x86_reg w2 = -w;
602 x86_reg x;
603 int l = *left & 0xff;
604 int tl = *left_top & 0xff;
605 int t;
606 __asm__ volatile(
607 "mov %7, %3 \n"
608 "1: \n"
609 "movzx (%3,%4), %2 \n"
610 "mov %2, %k3 \n"
611 "sub %b1, %b3 \n"
612 "add %b0, %b3 \n"
613 "mov %2, %1 \n"
614 "cmp %0, %2 \n"
615 "cmovg %0, %2 \n"
616 "cmovg %1, %0 \n"
617 "cmp %k3, %0 \n"
618 "cmovg %k3, %0 \n"
619 "mov %7, %3 \n"
620 "cmp %2, %0 \n"
621 "cmovl %2, %0 \n"
622 "add (%6,%4), %b0 \n"
623 "mov %b0, (%5,%4) \n"
624 "inc %4 \n"
625 "jl 1b \n"
626 :"+&q"(l), "+&q"(tl), "=&r"(t), "=&q"(x), "+&r"(w2)
627 :"r"(dst+w), "r"(diff+w), "rm"(top+w)
628 );
629 *left = l;
630 *left_top = tl;
631 }
632 #endif
633
634 #define H263_LOOP_FILTER \
635 "pxor %%mm7, %%mm7 \n\t"\
636 "movq %0, %%mm0 \n\t"\
637 "movq %0, %%mm1 \n\t"\
638 "movq %3, %%mm2 \n\t"\
639 "movq %3, %%mm3 \n\t"\
640 "punpcklbw %%mm7, %%mm0 \n\t"\
641 "punpckhbw %%mm7, %%mm1 \n\t"\
642 "punpcklbw %%mm7, %%mm2 \n\t"\
643 "punpckhbw %%mm7, %%mm3 \n\t"\
644 "psubw %%mm2, %%mm0 \n\t"\
645 "psubw %%mm3, %%mm1 \n\t"\
646 "movq %1, %%mm2 \n\t"\
647 "movq %1, %%mm3 \n\t"\
648 "movq %2, %%mm4 \n\t"\
649 "movq %2, %%mm5 \n\t"\
650 "punpcklbw %%mm7, %%mm2 \n\t"\
651 "punpckhbw %%mm7, %%mm3 \n\t"\
652 "punpcklbw %%mm7, %%mm4 \n\t"\
653 "punpckhbw %%mm7, %%mm5 \n\t"\
654 "psubw %%mm2, %%mm4 \n\t"\
655 "psubw %%mm3, %%mm5 \n\t"\
656 "psllw $2, %%mm4 \n\t"\
657 "psllw $2, %%mm5 \n\t"\
658 "paddw %%mm0, %%mm4 \n\t"\
659 "paddw %%mm1, %%mm5 \n\t"\
660 "pxor %%mm6, %%mm6 \n\t"\
661 "pcmpgtw %%mm4, %%mm6 \n\t"\
662 "pcmpgtw %%mm5, %%mm7 \n\t"\
663 "pxor %%mm6, %%mm4 \n\t"\
664 "pxor %%mm7, %%mm5 \n\t"\
665 "psubw %%mm6, %%mm4 \n\t"\
666 "psubw %%mm7, %%mm5 \n\t"\
667 "psrlw $3, %%mm4 \n\t"\
668 "psrlw $3, %%mm5 \n\t"\
669 "packuswb %%mm5, %%mm4 \n\t"\
670 "packsswb %%mm7, %%mm6 \n\t"\
671 "pxor %%mm7, %%mm7 \n\t"\
672 "movd %4, %%mm2 \n\t"\
673 "punpcklbw %%mm2, %%mm2 \n\t"\
674 "punpcklbw %%mm2, %%mm2 \n\t"\
675 "punpcklbw %%mm2, %%mm2 \n\t"\
676 "psubusb %%mm4, %%mm2 \n\t"\
677 "movq %%mm2, %%mm3 \n\t"\
678 "psubusb %%mm4, %%mm3 \n\t"\
679 "psubb %%mm3, %%mm2 \n\t"\
680 "movq %1, %%mm3 \n\t"\
681 "movq %2, %%mm4 \n\t"\
682 "pxor %%mm6, %%mm3 \n\t"\
683 "pxor %%mm6, %%mm4 \n\t"\
684 "paddusb %%mm2, %%mm3 \n\t"\
685 "psubusb %%mm2, %%mm4 \n\t"\
686 "pxor %%mm6, %%mm3 \n\t"\
687 "pxor %%mm6, %%mm4 \n\t"\
688 "paddusb %%mm2, %%mm2 \n\t"\
689 "packsswb %%mm1, %%mm0 \n\t"\
690 "pcmpgtb %%mm0, %%mm7 \n\t"\
691 "pxor %%mm7, %%mm0 \n\t"\
692 "psubb %%mm7, %%mm0 \n\t"\
693 "movq %%mm0, %%mm1 \n\t"\
694 "psubusb %%mm2, %%mm0 \n\t"\
695 "psubb %%mm0, %%mm1 \n\t"\
696 "pand %5, %%mm1 \n\t"\
697 "psrlw $2, %%mm1 \n\t"\
698 "pxor %%mm7, %%mm1 \n\t"\
699 "psubb %%mm7, %%mm1 \n\t"\
700 "movq %0, %%mm5 \n\t"\
701 "movq %3, %%mm6 \n\t"\
702 "psubb %%mm1, %%mm5 \n\t"\
703 "paddb %%mm1, %%mm6 \n\t"
704
705 static void h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale){
706 if(CONFIG_ANY_H263) {
707 const int strength= ff_h263_loop_filter_strength[qscale];
708
709 __asm__ volatile(
710
711 H263_LOOP_FILTER
712
713 "movq %%mm3, %1 \n\t"
714 "movq %%mm4, %2 \n\t"
715 "movq %%mm5, %0 \n\t"
716 "movq %%mm6, %3 \n\t"
717 : "+m" (*(uint64_t*)(src - 2*stride)),
718 "+m" (*(uint64_t*)(src - 1*stride)),
719 "+m" (*(uint64_t*)(src + 0*stride)),
720 "+m" (*(uint64_t*)(src + 1*stride))
721 : "g" (2*strength), "m"(ff_pb_FC)
722 );
723 }
724 }
725
726 static inline void transpose4x4(uint8_t *dst, uint8_t *src, int dst_stride, int src_stride){
727 __asm__ volatile( //FIXME could save 1 instruction if done as 8x4 ...
728 "movd %4, %%mm0 \n\t"
729 "movd %5, %%mm1 \n\t"
730 "movd %6, %%mm2 \n\t"
731 "movd %7, %%mm3 \n\t"
732 "punpcklbw %%mm1, %%mm0 \n\t"
733 "punpcklbw %%mm3, %%mm2 \n\t"
734 "movq %%mm0, %%mm1 \n\t"
735 "punpcklwd %%mm2, %%mm0 \n\t"
736 "punpckhwd %%mm2, %%mm1 \n\t"
737 "movd %%mm0, %0 \n\t"
738 "punpckhdq %%mm0, %%mm0 \n\t"
739 "movd %%mm0, %1 \n\t"
740 "movd %%mm1, %2 \n\t"
741 "punpckhdq %%mm1, %%mm1 \n\t"
742 "movd %%mm1, %3 \n\t"
743
744 : "=m" (*(uint32_t*)(dst + 0*dst_stride)),
745 "=m" (*(uint32_t*)(dst + 1*dst_stride)),
746 "=m" (*(uint32_t*)(dst + 2*dst_stride)),
747 "=m" (*(uint32_t*)(dst + 3*dst_stride))
748 : "m" (*(uint32_t*)(src + 0*src_stride)),
749 "m" (*(uint32_t*)(src + 1*src_stride)),
750 "m" (*(uint32_t*)(src + 2*src_stride)),
751 "m" (*(uint32_t*)(src + 3*src_stride))
752 );
753 }
754
755 static void h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale){
756 if(CONFIG_ANY_H263) {
757 const int strength= ff_h263_loop_filter_strength[qscale];
758 DECLARE_ALIGNED(8, uint64_t, temp[4]);
759 uint8_t *btemp= (uint8_t*)temp;
760
761 src -= 2;
762
763 transpose4x4(btemp , src , 8, stride);
764 transpose4x4(btemp+4, src + 4*stride, 8, stride);
765 __asm__ volatile(
766 H263_LOOP_FILTER // 5 3 4 6
767
768 : "+m" (temp[0]),
769 "+m" (temp[1]),
770 "+m" (temp[2]),
771 "+m" (temp[3])
772 : "g" (2*strength), "m"(ff_pb_FC)
773 );
774
775 __asm__ volatile(
776 "movq %%mm5, %%mm1 \n\t"
777 "movq %%mm4, %%mm0 \n\t"
778 "punpcklbw %%mm3, %%mm5 \n\t"
779 "punpcklbw %%mm6, %%mm4 \n\t"
780 "punpckhbw %%mm3, %%mm1 \n\t"
781 "punpckhbw %%mm6, %%mm0 \n\t"
782 "movq %%mm5, %%mm3 \n\t"
783 "movq %%mm1, %%mm6 \n\t"
784 "punpcklwd %%mm4, %%mm5 \n\t"
785 "punpcklwd %%mm0, %%mm1 \n\t"
786 "punpckhwd %%mm4, %%mm3 \n\t"
787 "punpckhwd %%mm0, %%mm6 \n\t"
788 "movd %%mm5, (%0) \n\t"
789 "punpckhdq %%mm5, %%mm5 \n\t"
790 "movd %%mm5, (%0,%2) \n\t"
791 "movd %%mm3, (%0,%2,2) \n\t"
792 "punpckhdq %%mm3, %%mm3 \n\t"
793 "movd %%mm3, (%0,%3) \n\t"
794 "movd %%mm1, (%1) \n\t"
795 "punpckhdq %%mm1, %%mm1 \n\t"
796 "movd %%mm1, (%1,%2) \n\t"
797 "movd %%mm6, (%1,%2,2) \n\t"
798 "punpckhdq %%mm6, %%mm6 \n\t"
799 "movd %%mm6, (%1,%3) \n\t"
800 :: "r" (src),
801 "r" (src + 4*stride),
802 "r" ((x86_reg) stride ),
803 "r" ((x86_reg)(3*stride))
804 );
805 }
806 }
807
808 /* draw the edges of width 'w' of an image of size width, height
809 this mmx version can only handle w==8 || w==16 */
810 static void draw_edges_mmx(uint8_t *buf, int wrap, int width, int height, int w)
811 {
812 uint8_t *ptr, *last_line;
813 int i;
814
815 last_line = buf + (height - 1) * wrap;
816 /* left and right */
817 ptr = buf;
818 if(w==8)
819 {
820 __asm__ volatile(
821 "1: \n\t"
822 "movd (%0), %%mm0 \n\t"
823 "punpcklbw %%mm0, %%mm0 \n\t"
824 "punpcklwd %%mm0, %%mm0 \n\t"
825 "punpckldq %%mm0, %%mm0 \n\t"
826 "movq %%mm0, -8(%0) \n\t"
827 "movq -8(%0, %2), %%mm1 \n\t"
828 "punpckhbw %%mm1, %%mm1 \n\t"
829 "punpckhwd %%mm1, %%mm1 \n\t"
830 "punpckhdq %%mm1, %%mm1 \n\t"
831 "movq %%mm1, (%0, %2) \n\t"
832 "add %1, %0 \n\t"
833 "cmp %3, %0 \n\t"
834 " jb 1b \n\t"
835 : "+r" (ptr)
836 : "r" ((x86_reg)wrap), "r" ((x86_reg)width), "r" (ptr + wrap*height)
837 );
838 }
839 else
840 {
841 __asm__ volatile(
842 "1: \n\t"
843 "movd (%0), %%mm0 \n\t"
844 "punpcklbw %%mm0, %%mm0 \n\t"
845 "punpcklwd %%mm0, %%mm0 \n\t"
846 "punpckldq %%mm0, %%mm0 \n\t"
847 "movq %%mm0, -8(%0) \n\t"
848 "movq %%mm0, -16(%0) \n\t"
849 "movq -8(%0, %2), %%mm1 \n\t"
850 "punpckhbw %%mm1, %%mm1 \n\t"
851 "punpckhwd %%mm1, %%mm1 \n\t"
852 "punpckhdq %%mm1, %%mm1 \n\t"
853 "movq %%mm1, (%0, %2) \n\t"
854 "movq %%mm1, 8(%0, %2) \n\t"
855 "add %1, %0 \n\t"
856 "cmp %3, %0 \n\t"
857 " jb 1b \n\t"
858 : "+r" (ptr)
859 : "r" ((x86_reg)wrap), "r" ((x86_reg)width), "r" (ptr + wrap*height)
860 );
861 }
862
863 for(i=0;i<w;i+=4) {
864 /* top and bottom (and hopefully also the corners) */
865 ptr= buf - (i + 1) * wrap - w;
866 __asm__ volatile(
867 "1: \n\t"
868 "movq (%1, %0), %%mm0 \n\t"
869 "movq %%mm0, (%0) \n\t"
870 "movq %%mm0, (%0, %2) \n\t"
871 "movq %%mm0, (%0, %2, 2) \n\t"
872 "movq %%mm0, (%0, %3) \n\t"
873 "add $8, %0 \n\t"
874 "cmp %4, %0 \n\t"
875 " jb 1b \n\t"
876 : "+r" (ptr)
877 : "r" ((x86_reg)buf - (x86_reg)ptr - w), "r" ((x86_reg)-wrap), "r" ((x86_reg)-wrap*3), "r" (ptr+width+2*w)
878 );
879 ptr= last_line + (i + 1) * wrap - w;
880 __asm__ volatile(
881 "1: \n\t"
882 "movq (%1, %0), %%mm0 \n\t"
883 "movq %%mm0, (%0) \n\t"
884 "movq %%mm0, (%0, %2) \n\t"
885 "movq %%mm0, (%0, %2, 2) \n\t"
886 "movq %%mm0, (%0, %3) \n\t"
887 "add $8, %0 \n\t"
888 "cmp %4, %0 \n\t"
889 " jb 1b \n\t"
890 : "+r" (ptr)
891 : "r" ((x86_reg)last_line - (x86_reg)ptr - w), "r" ((x86_reg)wrap), "r" ((x86_reg)wrap*3), "r" (ptr+width+2*w)
892 );
893 }
894 }
895
896 #define PAETH(cpu, abs3)\
897 static void add_png_paeth_prediction_##cpu(uint8_t *dst, uint8_t *src, uint8_t *top, int w, int bpp)\
898 {\
899 x86_reg i = -bpp;\
900 x86_reg end = w-3;\
901 __asm__ volatile(\
902 "pxor %%mm7, %%mm7 \n"\
903 "movd (%1,%0), %%mm0 \n"\
904 "movd (%2,%0), %%mm1 \n"\
905 "punpcklbw %%mm7, %%mm0 \n"\
906 "punpcklbw %%mm7, %%mm1 \n"\
907 "add %4, %0 \n"\
908 "1: \n"\
909 "movq %%mm1, %%mm2 \n"\
910 "movd (%2,%0), %%mm1 \n"\
911 "movq %%mm2, %%mm3 \n"\
912 "punpcklbw %%mm7, %%mm1 \n"\
913 "movq %%mm2, %%mm4 \n"\
914 "psubw %%mm1, %%mm3 \n"\
915 "psubw %%mm0, %%mm4 \n"\
916 "movq %%mm3, %%mm5 \n"\
917 "paddw %%mm4, %%mm5 \n"\
918 abs3\
919 "movq %%mm4, %%mm6 \n"\
920 "pminsw %%mm5, %%mm6 \n"\
921 "pcmpgtw %%mm6, %%mm3 \n"\
922 "pcmpgtw %%mm5, %%mm4 \n"\
923 "movq %%mm4, %%mm6 \n"\
924 "pand %%mm3, %%mm4 \n"\
925 "pandn %%mm3, %%mm6 \n"\
926 "pandn %%mm0, %%mm3 \n"\
927 "movd (%3,%0), %%mm0 \n"\
928 "pand %%mm1, %%mm6 \n"\
929 "pand %%mm4, %%mm2 \n"\
930 "punpcklbw %%mm7, %%mm0 \n"\
931 "movq %6, %%mm5 \n"\
932 "paddw %%mm6, %%mm0 \n"\
933 "paddw %%mm2, %%mm3 \n"\
934 "paddw %%mm3, %%mm0 \n"\
935 "pand %%mm5, %%mm0 \n"\
936 "movq %%mm0, %%mm3 \n"\
937 "packuswb %%mm3, %%mm3 \n"\
938 "movd %%mm3, (%1,%0) \n"\
939 "add %4, %0 \n"\
940 "cmp %5, %0 \n"\
941 "jle 1b \n"\
942 :"+r"(i)\
943 :"r"(dst), "r"(top), "r"(src), "r"((x86_reg)bpp), "g"(end),\
944 "m"(ff_pw_255)\
945 :"memory"\
946 );\
947 }
948
949 #define ABS3_MMX2\
950 "psubw %%mm5, %%mm7 \n"\
951 "pmaxsw %%mm7, %%mm5 \n"\
952 "pxor %%mm6, %%mm6 \n"\
953 "pxor %%mm7, %%mm7 \n"\
954 "psubw %%mm3, %%mm6 \n"\
955 "psubw %%mm4, %%mm7 \n"\
956 "pmaxsw %%mm6, %%mm3 \n"\
957 "pmaxsw %%mm7, %%mm4 \n"\
958 "pxor %%mm7, %%mm7 \n"
959
960 #define ABS3_SSSE3\
961 "pabsw %%mm3, %%mm3 \n"\
962 "pabsw %%mm4, %%mm4 \n"\
963 "pabsw %%mm5, %%mm5 \n"
964
965 PAETH(mmx2, ABS3_MMX2)
966 #if HAVE_SSSE3
967 PAETH(ssse3, ABS3_SSSE3)
968 #endif
969
970 #define QPEL_V_LOW(m3,m4,m5,m6, pw_20, pw_3, rnd, in0, in1, in2, in7, out, OP)\
971 "paddw " #m4 ", " #m3 " \n\t" /* x1 */\
972 "movq "MANGLE(ff_pw_20)", %%mm4 \n\t" /* 20 */\
973 "pmullw " #m3 ", %%mm4 \n\t" /* 20x1 */\
974 "movq "#in7", " #m3 " \n\t" /* d */\
975 "movq "#in0", %%mm5 \n\t" /* D */\
976 "paddw " #m3 ", %%mm5 \n\t" /* x4 */\
977 "psubw %%mm5, %%mm4 \n\t" /* 20x1 - x4 */\
978 "movq "#in1", %%mm5 \n\t" /* C */\
979 "movq "#in2", %%mm6 \n\t" /* B */\
980 "paddw " #m6 ", %%mm5 \n\t" /* x3 */\
981 "paddw " #m5 ", %%mm6 \n\t" /* x2 */\
982 "paddw %%mm6, %%mm6 \n\t" /* 2x2 */\
983 "psubw %%mm6, %%mm5 \n\t" /* -2x2 + x3 */\
984 "pmullw "MANGLE(ff_pw_3)", %%mm5 \n\t" /* -6x2 + 3x3 */\
985 "paddw " #rnd ", %%mm4 \n\t" /* x2 */\
986 "paddw %%mm4, %%mm5 \n\t" /* 20x1 - 6x2 + 3x3 - x4 */\
987 "psraw $5, %%mm5 \n\t"\
988 "packuswb %%mm5, %%mm5 \n\t"\
989 OP(%%mm5, out, %%mm7, d)
990
991 #define QPEL_BASE(OPNAME, ROUNDER, RND, OP_MMX2, OP_3DNOW)\
992 static void OPNAME ## mpeg4_qpel16_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
993 uint64_t temp;\
994 \
995 __asm__ volatile(\
996 "pxor %%mm7, %%mm7 \n\t"\
997 "1: \n\t"\
998 "movq (%0), %%mm0 \n\t" /* ABCDEFGH */\
999 "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\
1000 "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\
1001 "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\
1002 "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\
1003 "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\
1004 "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\
1005 "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\
1006 "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\
1007 "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\
1008 "psllq $16, %%mm3 \n\t" /* 00ABCDEF */\
1009 "psllq $24, %%mm4 \n\t" /* 000ABCDE */\
1010 "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\
1011 "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\
1012 "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\
1013 "paddw %%mm3, %%mm5 \n\t" /* b */\
1014 "paddw %%mm2, %%mm6 \n\t" /* c */\
1015 "paddw %%mm5, %%mm5 \n\t" /* 2b */\
1016 "psubw %%mm5, %%mm6 \n\t" /* c - 2b */\
1017 "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\
1018 "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\
1019 "paddw %%mm4, %%mm0 \n\t" /* a */\
1020 "paddw %%mm1, %%mm5 \n\t" /* d */\
1021 "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\
1022 "psubw %%mm5, %%mm0 \n\t" /* 20a - d */\
1023 "paddw %6, %%mm6 \n\t"\
1024 "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
1025 "psraw $5, %%mm0 \n\t"\
1026 "movq %%mm0, %5 \n\t"\
1027 /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
1028 \
1029 "movq 5(%0), %%mm0 \n\t" /* FGHIJKLM */\
1030 "movq %%mm0, %%mm5 \n\t" /* FGHIJKLM */\
1031 "movq %%mm0, %%mm6 \n\t" /* FGHIJKLM */\
1032 "psrlq $8, %%mm0 \n\t" /* GHIJKLM0 */\
1033 "psrlq $16, %%mm5 \n\t" /* HIJKLM00 */\
1034 "punpcklbw %%mm7, %%mm0 \n\t" /* 0G0H0I0J */\
1035 "punpcklbw %%mm7, %%mm5 \n\t" /* 0H0I0J0K */\
1036 "paddw %%mm0, %%mm2 \n\t" /* b */\
1037 "paddw %%mm5, %%mm3 \n\t" /* c */\
1038 "paddw %%mm2, %%mm2 \n\t" /* 2b */\
1039 "psubw %%mm2, %%mm3 \n\t" /* c - 2b */\
1040 "movq %%mm6, %%mm2 \n\t" /* FGHIJKLM */\
1041 "psrlq $24, %%mm6 \n\t" /* IJKLM000 */\
1042 "punpcklbw %%mm7, %%mm2 \n\t" /* 0F0G0H0I */\
1043 "punpcklbw %%mm7, %%mm6 \n\t" /* 0I0J0K0L */\
1044 "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\
1045 "paddw %%mm2, %%mm1 \n\t" /* a */\
1046 "paddw %%mm6, %%mm4 \n\t" /* d */\
1047 "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\
1048 "psubw %%mm4, %%mm3 \n\t" /* - 6b +3c - d */\
1049 "paddw %6, %%mm1 \n\t"\
1050 "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b +3c - d */\
1051 "psraw $5, %%mm3 \n\t"\
1052 "movq %5, %%mm1 \n\t"\
1053 "packuswb %%mm3, %%mm1 \n\t"\
1054 OP_MMX2(%%mm1, (%1),%%mm4, q)\
1055 /* mm0= GHIJ, mm2=FGHI, mm5=HIJK, mm6=IJKL, mm7=0 */\
1056 \
1057 "movq 9(%0), %%mm1 \n\t" /* JKLMNOPQ */\
1058 "movq %%mm1, %%mm4 \n\t" /* JKLMNOPQ */\
1059 "movq %%mm1, %%mm3 \n\t" /* JKLMNOPQ */\
1060 "psrlq $8, %%mm1 \n\t" /* KLMNOPQ0 */\
1061 "psrlq $16, %%mm4 \n\t" /* LMNOPQ00 */\
1062 "punpcklbw %%mm7, %%mm1 \n\t" /* 0K0L0M0N */\
1063 "punpcklbw %%mm7, %%mm4 \n\t" /* 0L0M0N0O */\
1064 "paddw %%mm1, %%mm5 \n\t" /* b */\
1065 "paddw %%mm4, %%mm0 \n\t" /* c */\
1066 "paddw %%mm5, %%mm5 \n\t" /* 2b */\
1067 "psubw %%mm5, %%mm0 \n\t" /* c - 2b */\
1068 "movq %%mm3, %%mm5 \n\t" /* JKLMNOPQ */\
1069 "psrlq $24, %%mm3 \n\t" /* MNOPQ000 */\
1070 "pmullw "MANGLE(ff_pw_3)", %%mm0 \n\t" /* 3c - 6b */\
1071 "punpcklbw %%mm7, %%mm3 \n\t" /* 0M0N0O0P */\
1072 "paddw %%mm3, %%mm2 \n\t" /* d */\
1073 "psubw %%mm2, %%mm0 \n\t" /* -6b + 3c - d */\
1074 "movq %%mm5, %%mm2 \n\t" /* JKLMNOPQ */\
1075 "punpcklbw %%mm7, %%mm2 \n\t" /* 0J0K0L0M */\
1076 "punpckhbw %%mm7, %%mm5 \n\t" /* 0N0O0P0Q */\
1077 "paddw %%mm2, %%mm6 \n\t" /* a */\
1078 "pmullw "MANGLE(ff_pw_20)", %%mm6 \n\t" /* 20a */\
1079 "paddw %6, %%mm0 \n\t"\
1080 "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
1081 "psraw $5, %%mm0 \n\t"\
1082 /* mm1=KLMN, mm2=JKLM, mm3=MNOP, mm4=LMNO, mm5=NOPQ mm7=0 */\
1083 \
1084 "paddw %%mm5, %%mm3 \n\t" /* a */\
1085 "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0O0P0Q0Q */\
1086 "paddw %%mm4, %%mm6 \n\t" /* b */\
1087 "pshufw $0xBE, %%mm5, %%mm4 \n\t" /* 0P0Q0Q0P */\
1088 "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0Q0Q0P0O */\
1089 "paddw %%mm1, %%mm4 \n\t" /* c */\
1090 "paddw %%mm2, %%mm5 \n\t" /* d */\
1091 "paddw %%mm6, %%mm6 \n\t" /* 2b */\
1092 "psubw %%mm6, %%mm4 \n\t" /* c - 2b */\
1093 "pmullw "MANGLE(ff_pw_20)", %%mm3 \n\t" /* 20a */\
1094 "pmullw "MANGLE(ff_pw_3)", %%mm4 \n\t" /* 3c - 6b */\
1095 "psubw %%mm5, %%mm3 \n\t" /* -6b + 3c - d */\
1096 "paddw %6, %%mm4 \n\t"\
1097 "paddw %%mm3, %%mm4 \n\t" /* 20a - 6b + 3c - d */\
1098 "psraw $5, %%mm4 \n\t"\
1099 "packuswb %%mm4, %%mm0 \n\t"\
1100 OP_MMX2(%%mm0, 8(%1), %%mm4, q)\
1101 \
1102 "add %3, %0 \n\t"\
1103 "add %4, %1 \n\t"\
1104 "decl %2 \n\t"\
1105 " jnz 1b \n\t"\
1106 : "+a"(src), "+c"(dst), "+D"(h)\
1107 : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(temp), "m"(ROUNDER)\
1108 : "memory"\
1109 );\
1110 }\
1111 \
1112 static void OPNAME ## mpeg4_qpel16_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1113 int i;\
1114 int16_t temp[16];\
1115 /* quick HACK, XXX FIXME MUST be optimized */\
1116 for(i=0; i<h; i++)\
1117 {\
1118 temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
1119 temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
1120 temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
1121 temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
1122 temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
1123 temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 9]);\
1124 temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 9])*3 - (src[ 3]+src[10]);\
1125 temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 9])*6 + (src[ 5]+src[10])*3 - (src[ 4]+src[11]);\
1126 temp[ 8]= (src[ 8]+src[ 9])*20 - (src[ 7]+src[10])*6 + (src[ 6]+src[11])*3 - (src[ 5]+src[12]);\
1127 temp[ 9]= (src[ 9]+src[10])*20 - (src[ 8]+src[11])*6 + (src[ 7]+src[12])*3 - (src[ 6]+src[13]);\
1128 temp[10]= (src[10]+src[11])*20 - (src[ 9]+src[12])*6 + (src[ 8]+src[13])*3 - (src[ 7]+src[14]);\
1129 temp[11]= (src[11]+src[12])*20 - (src[10]+src[13])*6 + (src[ 9]+src[14])*3 - (src[ 8]+src[15]);\
1130 temp[12]= (src[12]+src[13])*20 - (src[11]+src[14])*6 + (src[10]+src[15])*3 - (src[ 9]+src[16]);\
1131 temp[13]= (src[13]+src[14])*20 - (src[12]+src[15])*6 + (src[11]+src[16])*3 - (src[10]+src[16]);\
1132 temp[14]= (src[14]+src[15])*20 - (src[13]+src[16])*6 + (src[12]+src[16])*3 - (src[11]+src[15]);\
1133 temp[15]= (src[15]+src[16])*20 - (src[14]+src[16])*6 + (src[13]+src[15])*3 - (src[12]+src[14]);\
1134 __asm__ volatile(\
1135 "movq (%0), %%mm0 \n\t"\
1136 "movq 8(%0), %%mm1 \n\t"\
1137 "paddw %2, %%mm0 \n\t"\
1138 "paddw %2, %%mm1 \n\t"\
1139 "psraw $5, %%mm0 \n\t"\
1140 "psraw $5, %%mm1 \n\t"\
1141 "packuswb %%mm1, %%mm0 \n\t"\
1142 OP_3DNOW(%%mm0, (%1), %%mm1, q)\
1143 "movq 16(%0), %%mm0 \n\t"\
1144 "movq 24(%0), %%mm1 \n\t"\
1145 "paddw %2, %%mm0 \n\t"\
1146 "paddw %2, %%mm1 \n\t"\
1147 "psraw $5, %%mm0 \n\t"\
1148 "psraw $5, %%mm1 \n\t"\
1149 "packuswb %%mm1, %%mm0 \n\t"\
1150 OP_3DNOW(%%mm0, 8(%1), %%mm1, q)\
1151 :: "r"(temp), "r"(dst), "m"(ROUNDER)\
1152 : "memory"\
1153 );\
1154 dst+=dstStride;\
1155 src+=srcStride;\
1156 }\
1157 }\
1158 \
1159 static void OPNAME ## mpeg4_qpel8_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1160 __asm__ volatile(\
1161 "pxor %%mm7, %%mm7 \n\t"\
1162 "1: \n\t"\
1163 "movq (%0), %%mm0 \n\t" /* ABCDEFGH */\
1164 "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\
1165 "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\
1166 "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\
1167 "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\
1168 "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\
1169 "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\
1170 "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\
1171 "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\
1172 "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\
1173 "psllq $16, %%mm3 \n\t" /* 00ABCDEF */\
1174 "psllq $24, %%mm4 \n\t" /* 000ABCDE */\
1175 "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\
1176 "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\
1177 "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\
1178 "paddw %%mm3, %%mm5 \n\t" /* b */\
1179 "paddw %%mm2, %%mm6 \n\t" /* c */\
1180 "paddw %%mm5, %%mm5 \n\t" /* 2b */\
1181 "psubw %%mm5, %%mm6 \n\t" /* c - 2b */\
1182 "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\
1183 "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\
1184 "paddw %%mm4, %%mm0 \n\t" /* a */\
1185 "paddw %%mm1, %%mm5 \n\t" /* d */\
1186 "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\
1187 "psubw %%mm5, %%mm0 \n\t" /* 20a - d */\
1188 "paddw %5, %%mm6 \n\t"\
1189 "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
1190 "psraw $5, %%mm0 \n\t"\
1191 /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
1192 \
1193 "movd 5(%0), %%mm5 \n\t" /* FGHI */\
1194 "punpcklbw %%mm7, %%mm5 \n\t" /* 0F0G0H0I */\
1195 "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0G0H0I0I */\
1196 "paddw %%mm5, %%mm1 \n\t" /* a */\
1197 "paddw %%mm6, %%mm2 \n\t" /* b */\
1198 "pshufw $0xBE, %%mm5, %%mm6 \n\t" /* 0H0I0I0H */\
1199 "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0I0I0H0G */\
1200 "paddw %%mm6, %%mm3 \n\t" /* c */\
1201 "paddw %%mm5, %%mm4 \n\t" /* d */\
1202 "paddw %%mm2, %%mm2 \n\t" /* 2b */\
1203 "psubw %%mm2, %%mm3 \n\t" /* c - 2b */\
1204 "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\
1205 "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\
1206 "psubw %%mm4, %%mm3 \n\t" /* -6b + 3c - d */\
1207 "paddw %5, %%mm1 \n\t"\
1208 "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b + 3c - d */\
1209 "psraw $5, %%mm3 \n\t"\
1210 "packuswb %%mm3, %%mm0 \n\t"\
1211 OP_MMX2(%%mm0, (%1), %%mm4, q)\
1212 \
1213 "add %3, %0 \n\t"\
1214 "add %4, %1 \n\t"\
1215 "decl %2 \n\t"\
1216 " jnz 1b \n\t"\
1217 : "+a"(src), "+c"(dst), "+d"(h)\
1218 : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER)\
1219 : "memory"\
1220 );\
1221 }\
1222 \
1223 static void OPNAME ## mpeg4_qpel8_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1224 int i;\
1225 int16_t temp[8];\
1226 /* quick HACK, XXX FIXME MUST be optimized */\
1227 for(i=0; i<h; i++)\
1228 {\
1229 temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
1230 temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
1231 temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
1232 temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
1233 temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
1234 temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 8]);\
1235 temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 8])*3 - (src[ 3]+src[ 7]);\
1236 temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 8])*6 + (src[ 5]+src[ 7])*3 - (src[ 4]+src[ 6]);\
1237 __asm__ volatile(\
1238 "movq (%0), %%mm0 \n\t"\
1239 "movq 8(%0), %%mm1 \n\t"\
1240 "paddw %2, %%mm0 \n\t"\
1241 "paddw %2, %%mm1 \n\t"\
1242 "psraw $5, %%mm0 \n\t"\
1243 "psraw $5, %%mm1 \n\t"\
1244 "packuswb %%mm1, %%mm0 \n\t"\
1245 OP_3DNOW(%%mm0, (%1), %%mm1, q)\
1246 :: "r"(temp), "r"(dst), "m"(ROUNDER)\
1247 :"memory"\
1248 );\
1249 dst+=dstStride;\
1250 src+=srcStride;\
1251 }\
1252 }
1253
1254 #define QPEL_OP(OPNAME, ROUNDER, RND, OP, MMX)\
1255 \
1256 static void OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1257 uint64_t temp[17*4];\
1258 uint64_t *temp_ptr= temp;\
1259 int count= 17;\
1260 \
1261 /*FIXME unroll */\
1262 __asm__ volatile(\
1263 "pxor %%mm7, %%mm7 \n\t"\
1264 "1: \n\t"\
1265 "movq (%0), %%mm0 \n\t"\
1266 "movq (%0), %%mm1 \n\t"\
1267 "movq 8(%0), %%mm2 \n\t"\
1268 "movq 8(%0), %%mm3 \n\t"\
1269 "punpcklbw %%mm7, %%mm0 \n\t"\
1270 "punpckhbw %%mm7, %%mm1 \n\t"\
1271 "punpcklbw %%mm7, %%mm2 \n\t"\
1272 "punpckhbw %%mm7, %%mm3 \n\t"\
1273 "movq %%mm0, (%1) \n\t"\
1274 "movq %%mm1, 17*8(%1) \n\t"\
1275 "movq %%mm2, 2*17*8(%1) \n\t"\
1276 "movq %%mm3, 3*17*8(%1) \n\t"\
1277 "add $8, %1 \n\t"\
1278 "add %3, %0 \n\t"\
1279 "decl %2 \n\t"\
1280 " jnz 1b \n\t"\
1281 : "+r" (src), "+r" (temp_ptr), "+r"(count)\
1282 : "r" ((x86_reg)srcStride)\
1283 : "memory"\
1284 );\
1285 \
1286 temp_ptr= temp;\
1287 count=4;\
1288 \
1289 /*FIXME reorder for speed */\
1290 __asm__ volatile(\
1291 /*"pxor %%mm7, %%mm7 \n\t"*/\
1292 "1: \n\t"\
1293 "movq (%0), %%mm0 \n\t"\
1294 "movq 8(%0), %%mm1 \n\t"\
1295 "movq 16(%0), %%mm2 \n\t"\
1296 "movq 24(%0), %%mm3 \n\t"\
1297 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\
1298 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\
1299 "add %4, %1 \n\t"\
1300 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\
1301 \
1302 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\
1303 "add %4, %1 \n\t"\
1304 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
1305 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 72(%0), (%1, %3), OP)\
1306 "add %4, %1 \n\t"\
1307 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 80(%0), (%1), OP)\
1308 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 88(%0), (%1, %3), OP)\
1309 "add %4, %1 \n\t"\
1310 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 40(%0), 48(%0), 56(%0), 96(%0), (%1), OP)\
1311 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 48(%0), 56(%0), 64(%0),104(%0), (%1, %3), OP)\
1312 "add %4, %1 \n\t"\
1313 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 56(%0), 64(%0), 72(%0),112(%0), (%1), OP)\
1314 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 64(%0), 72(%0), 80(%0),120(%0), (%1, %3), OP)\
1315 "add %4, %1 \n\t"\
1316 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 72(%0), 80(%0), 88(%0),128(%0), (%1), OP)\
1317 \
1318 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 80(%0), 88(%0), 96(%0),128(%0), (%1, %3), OP)\
1319 "add %4, %1 \n\t" \
1320 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 88(%0), 96(%0),104(%0),120(%0), (%1), OP)\
1321 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 96(%0),104(%0),112(%0),112(%0), (%1, %3), OP)\
1322 \
1323 "add $136, %0 \n\t"\
1324 "add %6, %1 \n\t"\
1325 "decl %2 \n\t"\
1326 " jnz 1b \n\t"\
1327 \
1328 : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
1329 : "r"((x86_reg)dstStride), "r"(2*(x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-14*(x86_reg)dstStride)\
1330 :"memory"\
1331 );\
1332 }\
1333 \
1334 static void OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1335 uint64_t temp[9*2];\
1336 uint64_t *temp_ptr= temp;\
1337 int count= 9;\
1338 \
1339 /*FIXME unroll */\
1340 __asm__ volatile(\
1341 "pxor %%mm7, %%mm7 \n\t"\
1342 "1: \n\t"\
1343 "movq (%0), %%mm0 \n\t"\
1344 "movq (%0), %%mm1 \n\t"\
1345 "punpcklbw %%mm7, %%mm0 \n\t"\
1346 "punpckhbw %%mm7, %%mm1 \n\t"\
1347 "movq %%mm0, (%1) \n\t"\
1348 "movq %%mm1, 9*8(%1) \n\t"\
1349 "add $8, %1 \n\t"\
1350 "add %3, %0 \n\t"\
1351 "decl %2 \n\t"\
1352 " jnz 1b \n\t"\
1353 : "+r" (src), "+r" (temp_ptr), "+r"(count)\
1354 : "r" ((x86_reg)srcStride)\
1355 : "memory"\
1356 );\
1357 \
1358 temp_ptr= temp;\
1359 count=2;\
1360 \
1361 /*FIXME reorder for speed */\
1362 __asm__ volatile(\
1363 /*"pxor %%mm7, %%mm7 \n\t"*/\
1364 "1: \n\t"\
1365 "movq (%0), %%mm0 \n\t"\
1366 "movq 8(%0), %%mm1 \n\t"\
1367 "movq 16(%0), %%mm2 \n\t"\
1368 "movq 24(%0), %%mm3 \n\t"\
1369 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\
1370 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\
1371 "add %4, %1 \n\t"\
1372 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\
1373 \
1374 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\
1375 "add %4, %1 \n\t"\
1376 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
1377 \
1378 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 64(%0), (%1, %3), OP)\
1379 "add %4, %1 \n\t"\
1380 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 56(%0), (%1), OP)\
1381 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 48(%0), (%1, %3), OP)\
1382 \
1383 "add $72, %0 \n\t"\
1384 "add %6, %1 \n\t"\
1385 "decl %2 \n\t"\
1386 " jnz 1b \n\t"\
1387 \
1388 : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
1389 : "r"((x86_reg)dstStride), "r"(2*(x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-6*(x86_reg)dstStride)\
1390 : "memory"\
1391 );\
1392 }\
1393 \
1394 static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
1395 OPNAME ## pixels8_ ## MMX(dst, src, stride, 8);\
1396 }\
1397 \
1398 static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1399 uint64_t temp[8];\
1400 uint8_t * const half= (uint8_t*)temp;\
1401 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
1402 OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
1403 }\
1404 \
1405 static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1406 OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride, stride, 8);\
1407 }\
1408 \
1409 static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1410 uint64_t temp[8];\
1411 uint8_t * const half= (uint8_t*)temp;\
1412 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
1413 OPNAME ## pixels8_l2_ ## MMX(dst, src+1, half, stride, stride, 8);\
1414 }\
1415 \
1416 static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1417 uint64_t temp[8];\
1418 uint8_t * const half= (uint8_t*)temp;\
1419 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
1420 OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
1421 }\
1422 \
1423 static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1424 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src, stride, stride);\
1425 }\
1426 \
1427 static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1428 uint64_t temp[8];\
1429 uint8_t * const half= (uint8_t*)temp;\
1430 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
1431 OPNAME ## pixels8_l2_ ## MMX(dst, src+stride, half, stride, stride, 8);\
1432 }\
1433 static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1434 uint64_t half[8 + 9];\
1435 uint8_t * const halfH= ((uint8_t*)half) + 64;\
1436 uint8_t * const halfHV= ((uint8_t*)half);\
1437 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1438 put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
1439 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1440 OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
1441 }\
1442 static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1443 uint64_t half[8 + 9];\
1444 uint8_t * const halfH= ((uint8_t*)half) + 64;\
1445 uint8_t * const halfHV= ((uint8_t*)half);\
1446 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1447 put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
1448 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1449 OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
1450 }\
1451 static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1452 uint64_t half[8 + 9];\
1453 uint8_t * const halfH= ((uint8_t*)half) + 64;\
1454 uint8_t * const halfHV= ((uint8_t*)half);\
1455 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1456 put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
1457 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1458 OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
1459 }\
1460 static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1461 uint64_t half[8 + 9];\
1462 uint8_t * const halfH= ((uint8_t*)half) + 64;\
1463 uint8_t * const halfHV= ((uint8_t*)half);\
1464 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1465 put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
1466 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1467 OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
1468 }\
1469 static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1470 uint64_t half[8 + 9];\
1471 uint8_t * const halfH= ((uint8_t*)half) + 64;\
1472 uint8_t * const halfHV= ((uint8_t*)half);\
1473 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1474 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1475 OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
1476 }\
1477 static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1478 uint64_t half[8 + 9];\
1479 uint8_t * const halfH= ((uint8_t*)half) + 64;\
1480 uint8_t * const halfHV= ((uint8_t*)half);\
1481 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1482 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1483 OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
1484 }\
1485 static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1486 uint64_t half[8 + 9];\
1487 uint8_t * const halfH= ((uint8_t*)half);\
1488 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1489 put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
1490 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
1491 }\
1492 static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1493 uint64_t half[8 + 9];\
1494 uint8_t * const halfH= ((uint8_t*)half);\
1495 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1496 put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
1497 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
1498 }\
1499 static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1500 uint64_t half[9];\
1501 uint8_t * const halfH= ((uint8_t*)half);\
1502 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1503 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
1504 }\
1505 static void OPNAME ## qpel16_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
1506 OPNAME ## pixels16_ ## MMX(dst, src, stride, 16);\
1507 }\
1508 \
1509 static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1510 uint64_t temp[32];\
1511 uint8_t * const half= (uint8_t*)temp;\
1512 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
1513 OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
1514 }\
1515 \
1516 static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1517 OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src, stride, stride, 16);\
1518 }\
1519 \
1520 static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1521 uint64_t temp[32];\
1522 uint8_t * const half= (uint8_t*)temp;\
1523 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
1524 OPNAME ## pixels16_l2_ ## MMX(dst, src+1, half, stride, stride, 16);\
1525 }\
1526 \
1527 static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1528 uint64_t temp[32];\
1529 uint8_t * const half= (uint8_t*)temp;\
1530 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
1531 OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
1532 }\
1533 \
1534 static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1535 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src, stride, stride);\
1536 }\
1537 \
1538 static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1539 uint64_t temp[32];\
1540 uint8_t * const half= (uint8_t*)temp;\
1541 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
1542 OPNAME ## pixels16_l2_ ## MMX(dst, src+stride, half, stride, stride, 16);\
1543 }\
1544 static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1545 uint64_t half[16*2 + 17*2];\
1546 uint8_t * const halfH= ((uint8_t*)half) + 256;\
1547 uint8_t * const halfHV= ((uint8_t*)half);\
1548 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1549 put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
1550 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1551 OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
1552 }\
1553 static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1554 uint64_t half[16*2 + 17*2];\
1555 uint8_t * const halfH= ((uint8_t*)half) + 256;\
1556 uint8_t * const halfHV= ((uint8_t*)half);\
1557 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1558 put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
1559 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1560 OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
1561 }\
1562 static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1563 uint64_t half[16*2 + 17*2];\
1564 uint8_t * const halfH= ((uint8_t*)half) + 256;\
1565 uint8_t * const halfHV= ((uint8_t*)half);\
1566 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1567 put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
1568 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1569 OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
1570 }\
1571 static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1572 uint64_t half[16*2 + 17*2];\
1573 uint8_t * const halfH= ((uint8_t*)half) + 256;\
1574 uint8_t * const halfHV= ((uint8_t*)half);\
1575 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1576 put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
1577 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1578 OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
1579 }\
1580 static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1581 uint64_t half[16*2 + 17*2];\
1582 uint8_t * const halfH= ((uint8_t*)half) + 256;\
1583 uint8_t * const halfHV= ((uint8_t*)half);\
1584 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1585 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1586 OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
1587 }\
1588 static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1589 uint64_t half[16*2 + 17*2];\
1590 uint8_t * const halfH= ((uint8_t*)half) + 256;\
1591 uint8_t * const halfHV= ((uint8_t*)half);\
1592 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1593 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1594 OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
1595 }\
1596 static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1597 uint64_t half[17*2];\
1598 uint8_t * const halfH= ((uint8_t*)half);\
1599 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1600 put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
1601 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
1602 }\
1603 static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1604 uint64_t half[17*2];\
1605 uint8_t * const halfH= ((uint8_t*)half);\
1606 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1607 put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
1608 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
1609 }\
1610 static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1611 uint64_t half[17*2];\
1612 uint8_t * const halfH= ((uint8_t*)half);\
1613 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1614 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
1615 }
1616
1617 #define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b " \n\t"
1618 #define AVG_3DNOW_OP(a,b,temp, size) \
1619 "mov" #size " " #b ", " #temp " \n\t"\
1620 "pavgusb " #temp ", " #a " \n\t"\
1621 "mov" #size " " #a ", " #b " \n\t"
1622 #define AVG_MMX2_OP(a,b,temp, size) \
1623 "mov" #size " " #b ", " #temp " \n\t"\
1624 "pavgb " #temp ", " #a " \n\t"\
1625 "mov" #size " " #a ", " #b " \n\t"
1626
1627 QPEL_BASE(put_ , ff_pw_16, _ , PUT_OP, PUT_OP)
1628 QPEL_BASE(avg_ , ff_pw_16, _ , AVG_MMX2_OP, AVG_3DNOW_OP)
1629 QPEL_BASE(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, PUT_OP)
1630 QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, 3dnow)
1631 QPEL_OP(avg_ , ff_pw_16, _ , AVG_3DNOW_OP, 3dnow)
1632 QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, 3dnow)
1633 QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, mmx2)
1634 QPEL_OP(avg_ , ff_pw_16, _ , AVG_MMX2_OP, mmx2)
1635 QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, mmx2)
1636
1637 /***********************************/
1638 /* bilinear qpel: not compliant to any spec, only for -lavdopts fast */
1639
1640 #define QPEL_2TAP_XY(OPNAME, SIZE, MMX, XY, HPEL)\
1641 static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1642 OPNAME ## pixels ## SIZE ## HPEL(dst, src, stride, SIZE);\
1643 }
1644 #define QPEL_2TAP_L3(OPNAME, SIZE, MMX, XY, S0, S1, S2)\
1645 static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1646 OPNAME ## 2tap_qpel ## SIZE ## _l3_ ## MMX(dst, src+S0, stride, SIZE, S1, S2);\
1647 }
1648
1649 #define QPEL_2TAP(OPNAME, SIZE, MMX)\
1650 QPEL_2TAP_XY(OPNAME, SIZE, MMX, 20, _x2_ ## MMX)\
1651 QPEL_2TAP_XY(OPNAME, SIZE, MMX, 02, _y2_ ## MMX)\
1652 QPEL_2TAP_XY(OPNAME, SIZE, MMX, 22, _xy2_mmx)\
1653 static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc00_ ## MMX =\
1654 OPNAME ## qpel ## SIZE ## _mc00_ ## MMX;\
1655 static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc21_ ## MMX =\
1656 OPNAME ## 2tap_qpel ## SIZE ## _mc20_ ## MMX;\
1657 static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc12_ ## MMX =\
1658 OPNAME ## 2tap_qpel ## SIZE ## _mc02_ ## MMX;\
1659 static void OPNAME ## 2tap_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1660 OPNAME ## pixels ## SIZE ## _y2_ ## MMX(dst, src+1, stride, SIZE);\
1661 }\
1662 static void OPNAME ## 2tap_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1663 OPNAME ## pixels ## SIZE ## _x2_ ## MMX(dst, src+stride, stride, SIZE);\
1664 }\
1665 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 10, 0, 1, 0)\
1666 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 30, 1, -1, 0)\
1667 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 01, 0, stride, 0)\
1668 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 03, stride, -stride, 0)\
1669 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 11, 0, stride, 1)\
1670 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 31, 1, stride, -1)\
1671 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 13, stride, -stride, 1)\
1672 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 33, stride+1, -stride, -1)\
1673
1674 QPEL_2TAP(put_, 16, mmx2)
1675 QPEL_2TAP(avg_, 16, mmx2)
1676 QPEL_2TAP(put_, 8, mmx2)
1677 QPEL_2TAP(avg_, 8, mmx2)
1678 QPEL_2TAP(put_, 16, 3dnow)
1679 QPEL_2TAP(avg_, 16, 3dnow)
1680 QPEL_2TAP(put_, 8, 3dnow)
1681 QPEL_2TAP(avg_, 8, 3dnow)
1682
1683
1684 #if 0
1685 static void just_return(void) { return; }
1686 #endif
1687
1688 static void gmc_mmx(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
1689 int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height){
1690 const int w = 8;
1691 const int ix = ox>>(16+shift);
1692 const int iy = oy>>(16+shift);
1693 const int oxs = ox>>4;
1694 const int oys = oy>>4;
1695 const int dxxs = dxx>>4;
1696 const int dxys = dxy>>4;
1697 const int dyxs = dyx>>4;
1698 const int dyys = dyy>>4;
1699 const uint16_t r4[4] = {r,r,r,r};
1700 const uint16_t dxy4[4] = {dxys,dxys,dxys,dxys};
1701 const uint16_t dyy4[4] = {dyys,dyys,dyys,dyys};
1702 const uint64_t shift2 = 2*shift;
1703 uint8_t edge_buf[(h+1)*stride];
1704 int x, y;
1705
1706 const int dxw = (dxx-(1<<(16+shift)))*(w-1);
1707 const int dyh = (dyy-(1<<(16+shift)))*(h-1);
1708 const int dxh = dxy*(h-1);
1709 const int dyw = dyx*(w-1);
1710 if( // non-constant fullpel offset (3% of blocks)
1711 ((ox^(ox+dxw)) | (ox^(ox+dxh)) | (ox^(ox+dxw+dxh)) |
1712 (oy^(oy+dyw)) | (oy^(oy+dyh)) | (oy^(oy+dyw+dyh))) >> (16+shift)
1713 // uses more than 16 bits of subpel mv (only at huge resolution)
1714 || (dxx|dxy|dyx|dyy)&15 )
1715 {
1716 //FIXME could still use mmx for some of the rows
1717 ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r, width, height);
1718 return;
1719 }
1720
1721 src += ix + iy*stride;
1722 if( (unsigned)ix >= width-w ||
1723 (unsigned)iy >= height-h )
1724 {
1725 ff_emulated_edge_mc(edge_buf, src, stride, w+1, h+1, ix, iy, width, height);
1726 src = edge_buf;
1727 }
1728
1729 __asm__ volatile(
1730 "movd %0, %%mm6 \n\t"
1731 "pxor %%mm7, %%mm7 \n\t"
1732 "punpcklwd %%mm6, %%mm6 \n\t"
1733 "punpcklwd %%mm6, %%mm6 \n\t"
1734 :: "r"(1<<shift)
1735 );
1736
1737 for(x=0; x<w; x+=4){
1738 uint16_t dx4[4] = { oxs - dxys + dxxs*(x+0),
1739 oxs - dxys + dxxs*(x+1),
1740 oxs - dxys + dxxs*(x+2),
1741 oxs - dxys + dxxs*(x+3) };
1742 uint16_t dy4[4] = { oys - dyys + dyxs*(x+0),
1743 oys - dyys + dyxs*(x+1),
1744 oys - dyys + dyxs*(x+2),
1745 oys - dyys + dyxs*(x+3) };
1746
1747 for(y=0; y<h; y++){
1748 __asm__ volatile(
1749 "movq %0, %%mm4 \n\t"
1750 "movq %1, %%mm5 \n\t"
1751 "paddw %2, %%mm4 \n\t"
1752 "paddw %3, %%mm5 \n\t"
1753 "movq %%mm4, %0 \n\t"
1754 "movq %%mm5, %1 \n\t"
1755 "psrlw $12, %%mm4 \n\t"
1756 "psrlw $12, %%mm5 \n\t"
1757 : "+m"(*dx4), "+m"(*dy4)
1758 : "m"(*dxy4), "m"(*dyy4)
1759 );
1760
1761 __asm__ volatile(
1762 "movq %%mm6, %%mm2 \n\t"
1763 "movq %%mm6, %%mm1 \n\t"
1764 "psubw %%mm4, %%mm2 \n\t"
1765 "psubw %%mm5, %%mm1 \n\t"
1766 "movq %%mm2, %%mm0 \n\t"
1767 "movq %%mm4, %%mm3 \n\t"
1768 "pmullw %%mm1, %%mm0 \n\t" // (s-dx)*(s-dy)
1769 "pmullw %%mm5, %%mm3 \n\t" // dx*dy
1770 "pmullw %%mm5, %%mm2 \n\t" // (s-dx)*dy
1771 "pmullw %%mm4, %%mm1 \n\t" // dx*(s-dy)
1772
1773 "movd %4, %%mm5 \n\t"
1774 "movd %3, %%mm4 \n\t"
1775 "punpcklbw %%mm7, %%mm5 \n\t"
1776 "punpcklbw %%mm7, %%mm4 \n\t"
1777 "pmullw %%mm5, %%mm3 \n\t" // src[1,1] * dx*dy
1778 "pmullw %%mm4, %%mm2 \n\t" // src[0,1] * (s-dx)*dy
1779
1780 "movd %2, %%mm5 \n\t"
1781 "movd %1, %%mm4 \n\t"
1782 "punpcklbw %%mm7, %%mm5 \n\t"
1783 "punpcklbw %%mm7, %%mm4 \n\t"
1784 "pmullw %%mm5, %%mm1 \n\t" // src[1,0] * dx*(s-dy)
1785 "pmullw %%mm4, %%mm0 \n\t" // src[0,0] * (s-dx)*(s-dy)
1786 "paddw %5, %%mm1 \n\t"
1787 "paddw %%mm3, %%mm2 \n\t"
1788 "paddw %%mm1, %%mm0 \n\t"
1789 "paddw %%mm2, %%mm0 \n\t"
1790
1791 "psrlw %6, %%mm0 \n\t"
1792 "packuswb %%mm0, %%mm0 \n\t"
1793 "movd %%mm0, %0 \n\t"
1794
1795 : "=m"(dst[x+y*stride])
1796 : "m"(src[0]), "m"(src[1]),
1797 "m"(src[stride]), "m"(src[stride+1]),
1798 "m"(*r4), "m"(shift2)
1799 );
1800 src += stride;
1801 }
1802 src += 4-h*stride;
1803 }
1804 }
1805
1806 #define PREFETCH(name, op) \
1807 static void name(void *mem, int stride, int h){\
1808 const uint8_t *p= mem;\
1809 do{\
1810 __asm__ volatile(#op" %0" :: "m"(*p));\
1811 p+= stride;\
1812 }while(--h);\
1813 }
1814 PREFETCH(prefetch_mmx2, prefetcht0)
1815 PREFETCH(prefetch_3dnow, prefetch)
1816 #undef PREFETCH
1817
1818 #include "h264dsp_mmx.c"
1819 #include "rv40dsp_mmx.c"
1820
1821 /* CAVS specific */
1822 void ff_cavsdsp_init_mmx2(DSPContext* c, AVCodecContext *avctx);
1823 void ff_cavsdsp_init_3dnow(DSPContext* c, AVCodecContext *avctx);
1824
1825 void ff_put_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
1826 put_pixels8_mmx(dst, src, stride, 8);
1827 }
1828 void ff_avg_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
1829 avg_pixels8_mmx(dst, src, stride, 8);
1830 }
1831 void ff_put_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
1832 put_pixels16_mmx(dst, src, stride, 16);
1833 }
1834 void ff_avg_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
1835 avg_pixels16_mmx(dst, src, stride, 16);
1836 }
1837
1838 /* VC1 specific */
1839 void ff_vc1dsp_init_mmx(DSPContext* dsp, AVCodecContext *avctx);
1840
1841 void ff_put_vc1_mspel_mc00_mmx(uint8_t *dst, const uint8_t *src, int stride, int rnd) {
1842 put_pixels8_mmx(dst, src, stride, 8);
1843 }
1844 void ff_avg_vc1_mspel_mc00_mmx2(uint8_t *dst, const uint8_t *src, int stride, int rnd) {
1845 avg_pixels8_mmx2(dst, src, stride, 8);
1846 }
1847
1848 /* external functions, from idct_mmx.c */
1849 void ff_mmx_idct(DCTELEM *block);
1850 void ff_mmxext_idct(DCTELEM *block);
1851
1852 /* XXX: those functions should be suppressed ASAP when all IDCTs are
1853 converted */
1854 #if CONFIG_GPL
1855 static void ff_libmpeg2mmx_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
1856 {
1857 ff_mmx_idct (block);
1858 put_pixels_clamped_mmx(block, dest, line_size);
1859 }
1860 static void ff_libmpeg2mmx_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
1861 {
1862 ff_mmx_idct (block);
1863 add_pixels_clamped_mmx(block, dest, line_size);
1864 }
1865 static void ff_libmpeg2mmx2_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
1866 {
1867 ff_mmxext_idct (block);
1868 put_pixels_clamped_mmx(block, dest, line_size);
1869 }
1870 static void ff_libmpeg2mmx2_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
1871 {
1872 ff_mmxext_idct (block);
1873 add_pixels_clamped_mmx(block, dest, line_size);
1874 }
1875 #endif
1876 static void ff_idct_xvid_mmx_put(uint8_t *dest, int line_size, DCTELEM *block)
1877 {
1878 ff_idct_xvid_mmx (block);
1879 put_pixels_clamped_mmx(block, dest, line_size);
1880 }
1881 static void ff_idct_xvid_mmx_add(uint8_t *dest, int line_size, DCTELEM *block)
1882 {
1883 ff_idct_xvid_mmx (block);
1884 add_pixels_clamped_mmx(block, dest, line_size);
1885 }
1886 static void ff_idct_xvid_mmx2_put(uint8_t *dest, int line_size, DCTELEM *block)
1887 {
1888 ff_idct_xvid_mmx2 (block);
1889 put_pixels_clamped_mmx(block, dest, line_size);
1890 }
1891 static void ff_idct_xvid_mmx2_add(uint8_t *dest, int line_size, DCTELEM *block)
1892 {
1893 ff_idct_xvid_mmx2 (block);
1894 add_pixels_clamped_mmx(block, dest, line_size);
1895 }
1896
1897 static void vorbis_inverse_coupling_3dnow(float *mag, float *ang, int blocksize)
1898 {
1899 int i;
1900 __asm__ volatile("pxor %%mm7, %%mm7":);
1901 for(i=0; i<blocksize; i+=2) {
1902 __asm__ volatile(
1903 "movq %0, %%mm0 \n\t"
1904 "movq %1, %%mm1 \n\t"
1905 "movq %%mm0, %%mm2 \n\t"
1906 "movq %%mm1, %%mm3 \n\t"
1907 "pfcmpge %%mm7, %%mm2 \n\t" // m <= 0.0
1908 "pfcmpge %%mm7, %%mm3 \n\t" // a <= 0.0
1909 "pslld $31, %%mm2 \n\t" // keep only the sign bit
1910 "pxor %%mm2, %%mm1 \n\t"
1911 "movq %%mm3, %%mm4 \n\t"
1912 "pand %%mm1, %%mm3 \n\t"
1913 "pandn %%mm1, %%mm4 \n\t"
1914 "pfadd %%mm0, %%mm3 \n\t" // a = m + ((a<0) & (a ^ sign(m)))
1915 "pfsub %%mm4, %%mm0 \n\t" // m = m + ((a>0) & (a ^ sign(m)))
1916 "movq %%mm3, %1 \n\t"
1917 "movq %%mm0, %0 \n\t"
1918 :"+m"(mag[i]), "+m"(ang[i])
1919 ::"memory"
1920 );
1921 }
1922 __asm__ volatile("femms");
1923 }
1924 static void vorbis_inverse_coupling_sse(float *mag, float *ang, int blocksize)
1925 {
1926 int i;
1927
1928 __asm__ volatile(
1929 "movaps %0, %%xmm5 \n\t"
1930 ::"m"(ff_pdw_80000000[0])
1931 );
1932 for(i=0; i<blocksize; i+=4) {
1933 __asm__ volatile(
1934 "movaps %0, %%xmm0 \n\t"
1935 "movaps %1, %%xmm1 \n\t"
1936 "xorps %%xmm2, %%xmm2 \n\t"
1937 "xorps %%xmm3, %%xmm3 \n\t"
1938 "cmpleps %%xmm0, %%xmm2 \n\t" // m <= 0.0
1939 "cmpleps %%xmm1, %%xmm3 \n\t" // a <= 0.0
1940 "andps %%xmm5, %%xmm2 \n\t" // keep only the sign bit
1941 "xorps %%xmm2, %%xmm1 \n\t"
1942 "movaps %%xmm3, %%xmm4 \n\t"
1943 "andps %%xmm1, %%xmm3 \n\t"
1944 "andnps %%xmm1, %%xmm4 \n\t"
1945 "addps %%xmm0, %%xmm3 \n\t" // a = m + ((a<0) & (a ^ sign(m)))
1946 "subps %%xmm4, %%xmm0 \n\t" // m = m + ((a>0) & (a ^ sign(m)))
1947 "movaps %%xmm3, %1 \n\t"
1948 "movaps %%xmm0, %0 \n\t"
1949 :"+m"(mag[i]), "+m"(ang[i])
1950 ::"memory"
1951 );
1952 }
1953 }
1954
1955 #define IF1(x) x
1956 #define IF0(x)
1957
1958 #define MIX5(mono,stereo)\
1959 __asm__ volatile(\
1960 "movss 0(%2), %%xmm5 \n"\
1961 "movss 8(%2), %%xmm6 \n"\
1962 "movss 24(%2), %%xmm7 \n"\
1963 "shufps $0, %%xmm5, %%xmm5 \n"\
1964 "shufps $0, %%xmm6, %%xmm6 \n"\
1965 "shufps $0, %%xmm7, %%xmm7 \n"\
1966 "1: \n"\
1967 "movaps (%0,%1), %%xmm0 \n"\
1968 "movaps 0x400(%0,%1), %%xmm1 \n"\
1969 "movaps 0x800(%0,%1), %%xmm2 \n"\
1970 "movaps 0xc00(%0,%1), %%xmm3 \n"\
1971 "movaps 0x1000(%0,%1), %%xmm4 \n"\
1972 "mulps %%xmm5, %%xmm0 \n"\
1973 "mulps %%xmm6, %%xmm1 \n"\
1974 "mulps %%xmm5, %%xmm2 \n"\
1975 "mulps %%xmm7, %%xmm3 \n"\
1976 "mulps %%xmm7, %%xmm4 \n"\
1977 stereo("addps %%xmm1, %%xmm0 \n")\
1978 "addps %%xmm1, %%xmm2 \n"\
1979 "addps %%xmm3, %%xmm0 \n"\
1980 "addps %%xmm4, %%xmm2 \n"\
1981 mono("addps %%xmm2, %%xmm0 \n")\
1982 "movaps %%xmm0, (%0,%1) \n"\
1983 stereo("movaps %%xmm2, 0x400(%0,%1) \n")\
1984 "add $16, %0 \n"\
1985 "jl 1b \n"\
1986 :"+&r"(i)\
1987 :"r"(samples[0]+len), "r"(matrix)\
1988 :"memory"\
1989 );
1990
1991 #define MIX_MISC(stereo)\
1992 __asm__ volatile(\
1993 "1: \n"\
1994 "movaps (%3,%0), %%xmm0 \n"\
1995 stereo("movaps %%xmm0, %%xmm1 \n")\
1996 "mulps %%xmm6, %%xmm0 \n"\
1997 stereo("mulps %%xmm7, %%xmm1 \n")\
1998 "lea 1024(%3,%0), %1 \n"\
1999 "mov %5, %2 \n"\
2000 "2: \n"\
2001 "movaps (%1), %%xmm2 \n"\
2002 stereo("movaps %%xmm2, %%xmm3 \n")\
2003 "mulps (%4,%2), %%xmm2 \n"\
2004 stereo("mulps 16(%4,%2), %%xmm3 \n")\
2005 "addps %%xmm2, %%xmm0 \n"\
2006 stereo("addps %%xmm3, %%xmm1 \n")\
2007 "add $1024, %1 \n"\
2008 "add $32, %2 \n"\
2009 "jl 2b \n"\
2010 "movaps %%xmm0, (%3,%0) \n"\
2011 stereo("movaps %%xmm1, 1024(%3,%0) \n")\
2012 "add $16, %0 \n"\
2013 "jl 1b \n"\
2014 :"+&r"(i), "=&r"(j), "=&r"(k)\
2015 :"r"(samples[0]+len), "r"(matrix_simd+in_ch), "g"((intptr_t)-32*(in_ch-1))\
2016 :"memory"\
2017 );
2018
2019 static void ac3_downmix_sse(float (*samples)[256], float (*matrix)[2], int out_ch, int in_ch, int len)
2020 {
2021 int (*matrix_cmp)[2] = (int(*)[2])matrix;
2022 intptr_t i,j,k;
2023
2024 i = -len*sizeof(float);
2025 if(in_ch == 5 && out_ch == 2 && !(matrix_cmp[0][1]|matrix_cmp[2][0]|matrix_cmp[3][1]|matrix_cmp[4][0]|(matrix_cmp[1][0]^matrix_cmp[1][1])|(matrix_cmp[0][0]^matrix_cmp[2][1]))) {
2026 MIX5(IF0,IF1);
2027 } else if(in_ch == 5 && out_ch == 1 && matrix_cmp[0][0]==matrix_cmp[2][0] && matrix_cmp[3][0]==matrix_cmp[4][0]) {
2028 MIX5(IF1,IF0);
2029 } else {
2030 DECLARE_ALIGNED_16(float, matrix_simd[in_ch][2][4]);
2031 j = 2*in_ch*sizeof(float);
2032 __asm__ volatile(
2033 "1: \n"
2034 "sub $8, %0 \n"
2035 "movss (%2,%0), %%xmm6 \n"
2036 "movss 4(%2,%0), %%xmm7 \n"
2037 "shufps $0, %%xmm6, %%xmm6 \n"
2038 "shufps $0, %%xmm7, %%xmm7 \n"
2039 "movaps %%xmm6, (%1,%0,4) \n"
2040 "movaps %%xmm7, 16(%1,%0,4) \n"
2041 "jg 1b \n"
2042 :"+&r"(j)
2043 :"r"(matrix_simd), "r"(matrix)
2044 :"memory"
2045 );
2046 if(out_ch == 2) {
2047 MIX_MISC(IF1);
2048 } else {
2049 MIX_MISC(IF0);
2050 }
2051 }
2052 }
2053
2054 static void vector_fmul_3dnow(float *dst, const float *src, int len){
2055 x86_reg i = (len-4)*4;
2056 __asm__ volatile(
2057 "1: \n\t"
2058 "movq (%1,%0), %%mm0 \n\t"
2059 "movq 8(%1,%0), %%mm1 \n\t"
2060 "pfmul (%2,%0), %%mm0 \n\t"
2061 "pfmul 8(%2,%0), %%mm1 \n\t"
2062 "movq %%mm0, (%1,%0) \n\t"
2063 "movq %%mm1, 8(%1,%0) \n\t"
2064 "sub $16, %0 \n\t"
2065 "jge 1b \n\t"
2066 "femms \n\t"
2067 :"+r"(i)
2068 :"r"(dst), "r"(src)
2069 :"memory"
2070 );
2071 }
2072 static void vector_fmul_sse(float *dst, const float *src, int len){
2073 x86_reg i = (len-8)*4;
2074 __asm__ volatile(
2075 "1: \n\t"
2076 "movaps (%1,%0), %%xmm0 \n\t"
2077 "movaps 16(%1,%0), %%xmm1 \n\t"
2078 "mulps (%2,%0), %%xmm0 \n\t"
2079 "mulps 16(%2,%0), %%xmm1 \n\t"
2080 "movaps %%xmm0, (%1,%0) \n\t"
2081 "movaps %%xmm1, 16(%1,%0) \n\t"
2082 "sub $32, %0 \n\t"
2083 "jge 1b \n\t"
2084 :"+r"(i)
2085 :"r"(dst), "r"(src)
2086 :"memory"
2087 );
2088 }
2089
2090 static void vector_fmul_reverse_3dnow2(float *dst, const float *src0, const float *src1, int len){
2091 x86_reg i = len*4-16;
2092 __asm__ volatile(
2093 "1: \n\t"
2094 "pswapd 8(%1), %%mm0 \n\t"
2095 "pswapd (%1), %%mm1 \n\t"
2096 "pfmul (%3,%0), %%mm0 \n\t"
2097 "pfmul 8(%3,%0), %%mm1 \n\t"
2098 "movq %%mm0, (%2,%0) \n\t"
2099 "movq %%mm1, 8(%2,%0) \n\t"
2100 "add $16, %1 \n\t"
2101 "sub $16, %0 \n\t"
2102 "jge 1b \n\t"
2103 :"+r"(i), "+r"(src1)
2104 :"r"(dst), "r"(src0)
2105 );
2106 __asm__ volatile("femms");
2107 }
2108 static void vector_fmul_reverse_sse(float *dst, const float *src0, const float *src1, int len){
2109 x86_reg i = len*4-32;
2110 __asm__ volatile(
2111 "1: \n\t"
2112 "movaps 16(%1), %%xmm0 \n\t"
2113 "movaps (%1), %%xmm1 \n\t"
2114 "shufps $0x1b, %%xmm0, %%xmm0 \n\t"
2115 "shufps $0x1b, %%xmm1, %%xmm1 \n\t"
2116 "mulps (%3,%0), %%xmm0 \n\t"
2117 "mulps 16(%3,%0), %%xmm1 \n\t"
2118 "movaps %%xmm0, (%2,%0) \n\t"
2119 "movaps %%xmm1, 16(%2,%0) \n\t"
2120 "add $32, %1 \n\t"
2121 "sub $32, %0 \n\t"
2122 "jge 1b \n\t"
2123 :"+r"(i), "+r"(src1)
2124 :"r"(dst), "r"(src0)
2125 );
2126 }
2127
2128 static void vector_fmul_add_add_3dnow(float *dst, const float *src0, const float *src1,
2129 const float *src2, int src3, int len, int step){
2130 x86_reg i = (len-4)*4;
2131 if(step == 2 && src3 == 0){
2132 dst += (len-4)*2;
2133 __asm__ volatile(
2134 "1: \n\t"
2135 "movq (%2,%0), %%mm0 \n\t"
2136 "movq 8(%2,%0), %%mm1 \n\t"
2137 "pfmul (%3,%0), %%mm0 \n\t"
2138 "pfmul 8(%3,%0), %%mm1 \n\t"
2139 "pfadd (%4,%0), %%mm0 \n\t"
2140 "pfadd 8(%4,%0), %%mm1 \n\t"
2141 "movd %%mm0, (%1) \n\t"
2142 "movd %%mm1, 16(%1) \n\t"
2143 "psrlq $32, %%mm0 \n\t"
2144 "psrlq $32, %%mm1 \n\t"
2145 "movd %%mm0, 8(%1) \n\t"
2146 "movd %%mm1, 24(%1) \n\t"
2147 "sub $32, %1 \n\t"
2148 "sub $16, %0 \n\t"
2149 "jge 1b \n\t"
2150 :"+r"(i), "+r"(dst)
2151 :"r"(src0), "r"(src1), "r"(src2)
2152 :"memory"
2153 );
2154 }
2155 else if(step == 1 && src3 == 0){
2156 __asm__ volatile(
2157 "1: \n\t"
2158 "movq (%2,%0), %%mm0 \n\t"
2159 "movq 8(%2,%0), %%mm1 \n\t"
2160 "pfmul (%3,%0), %%mm0 \n\t"
2161 "pfmul 8(%3,%0), %%mm1 \n\t"
2162 "pfadd (%4,%0), %%mm0 \n\t"
2163 "pfadd 8(%4,%0), %%mm1 \n\t"
2164 "movq %%mm0, (%1,%0) \n\t"
2165 "movq %%mm1, 8(%1,%0) \n\t"
2166 "sub $16, %0 \n\t"
2167 "jge 1b \n\t"
2168 :"+r"(i)
2169 :"r"(dst), "r"(src0), "r"(src1), "r"(src2)
2170 :"memory"
2171 );
2172 }
2173 else
2174 ff_vector_fmul_add_add_c(dst, src0, src1, src2, src3, len, step);
2175 __asm__ volatile("femms");
2176 }
2177 static void vector_fmul_add_add_sse(float *dst, const float *src0, const float *src1,
2178 const float *src2, int src3, int len, int step){
2179 x86_reg i = (len-8)*4;
2180 if(step == 2 && src3 == 0){
2181 dst += (len-8)*2;
2182 __asm__ volatile(
2183 "1: \n\t"
2184 "movaps (%2,%0), %%xmm0 \n\t"
2185 "movaps 16(%2,%0), %%xmm1 \n\t"
2186 "mulps (%3,%0), %%xmm0 \n\t"
2187 "mulps 16(%3,%0), %%xmm1 \n\t"
2188 "addps (%4,%0), %%xmm0 \n\t"
2189 "addps 16(%4,%0), %%xmm1 \n\t"
2190 "movss %%xmm0, (%1) \n\t"
2191 "movss %%xmm1, 32(%1) \n\t"
2192 "movhlps %%xmm0, %%xmm2 \n\t"
2193 "movhlps %%xmm1, %%xmm3 \n\t"
2194 "movss %%xmm2, 16(%1) \n\t"
2195 "movss %%xmm3, 48(%1) \n\t"
2196 "shufps $0xb1, %%xmm0, %%xmm0 \n\t"
2197 "shufps $0xb1, %%xmm1, %%xmm1 \n\t"
2198 "movss %%xmm0, 8(%1) \n\t"
2199 "movss %%xmm1, 40(%1) \n\t"
2200 "movhlps %%xmm0, %%xmm2 \n\t"
2201 "movhlps %%xmm1, %%xmm3 \n\t"
2202 "movss %%xmm2, 24(%1) \n\t"
2203 "movss %%xmm3, 56(%1) \n\t"
2204 "sub $64, %1 \n\t"
2205 "sub $32, %0 \n\t"
2206 "jge 1b \n\t"
2207 :"+r"(i), "+r"(dst)
2208 :"r"(src0), "r"(src1), "r"(src2)
2209 :"memory"
2210 );
2211 }
2212 else if(step == 1 && src3 == 0){
2213 __asm__ volatile(
2214 "1: \n\t"
2215 "movaps (%2,%0), %%xmm0 \n\t"
2216 "movaps 16(%2,%0), %%xmm1 \n\t"
2217 "mulps (%3,%0), %%xmm0 \n\t"
2218 "mulps 16(%3,%0), %%xmm1 \n\t"
2219 "addps (%4,%0), %%xmm0 \n\t"
2220 "addps 16(%4,%0), %%xmm1 \n\t"
2221 "movaps %%xmm0, (%1,%0) \n\t"
2222 "movaps %%xmm1, 16(%1,%0) \n\t"
2223 "sub $32, %0 \n\t"
2224 "jge 1b \n\t"
2225 :"+r"(i)
2226 :"r"(dst), "r"(src0), "r"(src1), "r"(src2)
2227 :"memory"
2228 );
2229 }
2230 else
2231 ff_vector_fmul_add_add_c(dst, src0, src1, src2, src3, len, step);
2232 }
2233
2234 static void vector_fmul_window_3dnow2(float *dst, const float *src0, const float *src1,
2235 const float *win, float add_bias, int len){
2236 #if HAVE_6REGS
2237 if(add_bias == 0){
2238 x86_reg i = -len*4;
2239 x86_reg j = len*4-8;
2240 __asm__ volatile(
2241 "1: \n"
2242 "pswapd (%5,%1), %%mm1 \n"
2243 "movq (%5,%0), %%mm0 \n"
2244 "pswapd (%4,%1), %%mm5 \n"
2245 "movq (%3,%0), %%mm4 \n"
2246 "movq %%mm0, %%mm2 \n"
2247 "movq %%mm1, %%mm3 \n"
2248 "pfmul %%mm4, %%mm2 \n" // src0[len+i]*win[len+i]
2249 "pfmul %%mm5, %%mm3 \n" // src1[ j]*win[len+j]
2250 "pfmul %%mm4, %%mm1 \n" // src0[len+i]*win[len+j]
2251 "pfmul %%mm5, %%mm0 \n" // src1[ j]*win[len+i]
2252 "pfadd %%mm3, %%mm2 \n"
2253 "pfsub %%mm0, %%mm1 \n"
2254 "pswapd %%mm2, %%mm2 \n"
2255 "movq %%mm1, (%2,%0) \n"
2256 "movq %%mm2, (%2,%1) \n"
2257 "sub $8, %1 \n"
2258 "add $8, %0 \n"
2259 "jl 1b \n"
2260 "femms \n"
2261 :"+r"(i), "+r"(j)
2262 :"r"(dst+len), "r"(src0+len), "r"(src1), "r"(win+len)
2263 );
2264 }else
2265 #endif
2266 ff_vector_fmul_window_c(dst, src0, src1, win, add_bias, len);
2267 }
2268
2269 static void vector_fmul_window_sse(float *dst, const float *src0, const float *src1,
2270 const float *win, float add_bias, int len){
2271 #if HAVE_6REGS
2272 if(add_bias == 0){
2273 x86_reg i = -len*4;
2274 x86_reg j = len*4-16;
2275 __asm__ volatile(
2276 "1: \n"
2277 "movaps (%5,%1), %%xmm1 \n"
2278 "movaps (%5,%0), %%xmm0 \n"
2279 "movaps (%4,%1), %%xmm5 \n"
2280 "movaps (%3,%0), %%xmm4 \n"
2281 "shufps $0x1b, %%xmm1, %%xmm1 \n"
2282 "shufps $0x1b, %%xmm5, %%xmm5 \n"
2283 "movaps %%xmm0, %%xmm2 \n"
2284 "movaps %%xmm1, %%xmm3 \n"
2285 "mulps %%xmm4, %%xmm2 \n" // src0[len+i]*win[len+i]
2286 "mulps %%xmm5, %%xmm3 \n" // src1[ j]*win[len+j]
2287 "mulps %%xmm4, %%xmm1 \n" // src0[len+i]*win[len+j]
2288 "mulps %%xmm5, %%xmm0 \n" // src1[ j]*win[len+i]
2289 "addps %%xmm3, %%xmm2 \n"
2290 "subps %%xmm0, %%xmm1 \n"
2291 "shufps $0x1b, %%xmm2, %%xmm2 \n"
2292 "movaps %%xmm1, (%2,%0) \n"
2293 "movaps %%xmm2, (%2,%1) \n"
2294 "sub $16, %1 \n"
2295 "add $16, %0 \n"
2296 "jl 1b \n"
2297 :"+r"(i), "+r"(j)
2298 :"r"(dst+len), "r"(src0+len), "r"(src1), "r"(win+len)
2299 );
2300 }else
2301 #endif
2302 ff_vector_fmul_window_c(dst, src0, src1, win, add_bias, len);
2303 }
2304
2305 static void int32_to_float_fmul_scalar_sse(float *dst, const int *src, float mul, int len)
2306 {
2307 x86_reg i = -4*len;
2308 __asm__ volatile(
2309 "movss %3, %%xmm4 \n"
2310 "shufps $0, %%xmm4, %%xmm4 \n"
2311 "1: \n"
2312 "cvtpi2ps (%2,%0), %%xmm0 \n"
2313 "cvtpi2ps 8(%2,%0), %%xmm1 \n"
2314 "cvtpi2ps 16(%2,%0), %%xmm2 \n"
2315 "cvtpi2ps 24(%2,%0), %%xmm3 \n"
2316 "movlhps %%xmm1, %%xmm0 \n"
2317 "movlhps %%xmm3, %%xmm2 \n"
2318 "mulps %%xmm4, %%xmm0 \n"
2319 "mulps %%xmm4, %%xmm2 \n"
2320 "movaps %%xmm0, (%1,%0) \n"
2321 "movaps %%xmm2, 16(%1,%0) \n"
2322 "add $32, %0 \n"
2323 "jl 1b \n"
2324 :"+r"(i)
2325 :"r"(dst+len), "r"(src+len), "m"(mul)
2326 );
2327 }
2328
2329 static void int32_to_float_fmul_scalar_sse2(float *dst, const int *src, float mul, int len)
2330 {
2331 x86_reg i = -4*len;
2332 __asm__ volatile(
2333 "movss %3, %%xmm4 \n"
2334 "shufps $0, %%xmm4, %%xmm4 \n"
2335 "1: \n"
2336 "cvtdq2ps (%2,%0), %%xmm0 \n"
2337 "cvtdq2ps 16(%2,%0), %%xmm1 \n"
2338 "mulps %%xmm4, %%xmm0 \n"
2339 "mulps %%xmm4, %%xmm1 \n"
2340 "movaps %%xmm0, (%1,%0) \n"
2341 "movaps %%xmm1, 16(%1,%0) \n"
2342 "add $32, %0 \n"
2343 "jl 1b \n"
2344 :"+r"(i)
2345 :"r"(dst+len), "r"(src+len), "m"(mul)
2346 );
2347 }
2348
2349 static void float_to_int16_3dnow(int16_t *dst, const float *src, long len){
2350 x86_reg reglen = len;
2351 // not bit-exact: pf2id uses different rounding than C and SSE
2352 __asm__ volatile(
2353 "add %0 , %0 \n\t"
2354 "lea (%2,%0,2) , %2 \n\t"
2355 "add %0 , %1 \n\t"
2356 "neg %0 \n\t"
2357 "1: \n\t"
2358 "pf2id (%2,%0,2) , %%mm0 \n\t"
2359 "pf2id 8(%2,%0,2) , %%mm1 \n\t"
2360 "pf2id 16(%2,%0,2) , %%mm2 \n\t"
2361 "pf2id 24(%2,%0,2) , %%mm3 \n\t"
2362 "packssdw %%mm1 , %%mm0 \n\t"
2363 "packssdw %%mm3 , %%mm2 \n\t"
2364 "movq %%mm0 , (%1,%0) \n\t"
2365 "movq %%mm2 , 8(%1,%0) \n\t"
2366 "add $16 , %0 \n\t"
2367 " js 1b \n\t"
2368 "femms \n\t"
2369 :"+r"(reglen), "+r"(dst), "+r"(src)
2370 );
2371 }
2372 static void float_to_int16_sse(int16_t *dst, const float *src, long len){
2373 x86_reg reglen = len;
2374 __asm__ volatile(
2375 "add %0 , %0 \n\t"
2376 "lea (%2,%0,2) , %2 \n\t"
2377 "add %0 , %1 \n\t"
2378 "neg %0 \n\t"
2379 "1: \n\t"
2380 "cvtps2pi (%2,%0,2) , %%mm0 \n\t"
2381 "cvtps2pi 8(%2,%0,2) , %%mm1 \n\t"
2382 "cvtps2pi 16(%2,%0,2) , %%mm2 \n\t"
2383 "cvtps2pi 24(%2,%0,2) , %%mm3 \n\t"
2384 "packssdw %%mm1 , %%mm0 \n\t"
2385 "packssdw %%mm3 , %%mm2 \n\t"
2386 "movq %%mm0 , (%1,%0) \n\t"
2387 "movq %%mm2 , 8(%1,%0) \n\t"
2388 "add $16 , %0 \n\t"
2389 " js 1b \n\t"
2390 "emms \n\t"
2391 :"+r"(reglen), "+r"(dst), "+r"(src)
2392 );
2393 }
2394
2395 static void float_to_int16_sse2(int16_t *dst, const float *src, long len){
2396 x86_reg reglen = len;
2397 __asm__ volatile(
2398 "add %0 , %0 \n\t"
2399 "lea (%2,%0,2) , %2 \n\t"
2400 "add %0 , %1 \n\t"
2401 "neg %0 \n\t"
2402 "1: \n\t"
2403 "cvtps2dq (%2,%0,2) , %%xmm0 \n\t"
2404 "cvtps2dq 16(%2,%0,2) , %%xmm1 \n\t"
2405 "packssdw %%xmm1 , %%xmm0 \n\t"
2406 "movdqa %%xmm0 , (%1,%0) \n\t"
2407 "add $16 , %0 \n\t"
2408 " js 1b \n\t"
2409 :"+r"(reglen), "+r"(dst), "+r"(src)
2410 );
2411 }
2412
2413 #if HAVE_YASM
2414 void ff_float_to_int16_interleave6_sse(int16_t *dst, const float **src, int len);
2415 void ff_float_to_int16_interleave6_3dnow(int16_t *dst, const float **src, int len);
2416 void ff_float_to_int16_interleave6_3dn2(int16_t *dst, const float **src, int len);
2417 void ff_add_hfyu_median_prediction_mmx2(uint8_t *dst, uint8_t *top, uint8_t *diff, int w, int *left, int *left_top);
2418 void ff_x264_deblock_v_luma_sse2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0);
2419 void ff_x264_deblock_h_luma_sse2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0);
2420 void ff_x264_deblock_v8_luma_intra_mmxext(uint8_t *pix, int stride, int alpha, int beta);
2421 void ff_x264_deblock_h_luma_intra_mmxext(uint8_t *pix, int stride, int alpha, int beta);
2422 #if ARCH_X86_32
2423 static void ff_x264_deblock_v_luma_intra_mmxext(uint8_t *pix, int stride, int alpha, int beta)
2424 {
2425 ff_x264_deblock_v8_luma_intra_mmxext(pix+0, stride, alpha, beta);
2426 ff_x264_deblock_v8_luma_intra_mmxext(pix+8, stride, alpha, beta);
2427 }
2428 #endif
2429 void ff_x264_deblock_v_luma_intra_sse2(uint8_t *pix, int stride, int alpha, int beta);
2430 void ff_x264_deblock_h_luma_intra_sse2(uint8_t *pix, int stride, int alpha, int beta);
2431 #else
2432 #define ff_float_to_int16_interleave6_sse(a,b,c) float_to_int16_interleave_misc_sse(a,b,c,6)
2433 #define ff_float_to_int16_interleave6_3dnow(a,b,c) float_to_int16_interleave_misc_3dnow(a,b,c,6)
2434 #define ff_float_to_int16_interleave6_3dn2(a,b,c) float_to_int16_interleave_misc_3dnow(a,b,c,6)
2435 #endif
2436 #define ff_float_to_int16_interleave6_sse2 ff_float_to_int16_interleave6_sse
2437
2438 #define FLOAT_TO_INT16_INTERLEAVE(cpu, body) \
2439 /* gcc pessimizes register allocation if this is in the same function as float_to_int16_interleave_sse2*/\
2440 static av_noinline void float_to_int16_interleave_misc_##cpu(int16_t *dst, const float **src, long len, int channels){\
2441 DECLARE_ALIGNED_16(int16_t, tmp[len]);\
2442 int i,j,c;\
2443 for(c=0; c<channels; c++){\
2444 float_to_int16_##cpu(tmp, src[c], len);\
2445 for(i=0, j=c; i<len; i++, j+=channels)\
2446 dst[j] = tmp[i];\
2447 }\
2448 }\
2449 \
2450 static void float_to_int16_interleave_##cpu(int16_t *dst, const float **src, long len, int channels){\
2451 if(channels==1)\
2452 float_to_int16_##cpu(dst, src[0], len);\
2453 else if(channels==2){\
2454 x86_reg reglen = len; \
2455 const float *src0 = src[0];\
2456 const float *src1 = src[1];\
2457 __asm__ volatile(\
2458 "shl $2, %0 \n"\
2459 "add %0, %1 \n"\
2460 "add %0, %2 \n"\
2461 "add %0, %3 \n"\
2462 "neg %0 \n"\
2463 body\
2464 :"+r"(reglen), "+r"(dst), "+r"(src0), "+r"(src1)\
2465 );\
2466 }else if(channels==6){\
2467 ff_float_to_int16_interleave6_##cpu(dst, src, len);\
2468 }else\
2469 float_to_int16_interleave_misc_##cpu(dst, src, len, channels);\
2470 }
2471
2472 FLOAT_TO_INT16_INTERLEAVE(3dnow,
2473 "1: \n"
2474 "pf2id (%2,%0), %%mm0 \n"
2475 "pf2id 8(%2,%0), %%mm1 \n"
2476 "pf2id (%3,%0), %%mm2 \n"
2477 "pf2id 8(%3,%0), %%mm3 \n"
2478 "packssdw %%mm1, %%mm0 \n"
2479 "packssdw %%mm3, %%mm2 \n"
2480 "movq %%mm0, %%mm1 \n"
2481 "punpcklwd %%mm2, %%mm0 \n"
2482 "punpckhwd %%mm2, %%mm1 \n"
2483 "movq %%mm0, (%1,%0)\n"
2484 "movq %%mm1, 8(%1,%0)\n"
2485 "add $16, %0 \n"
2486 "js 1b \n"
2487 "femms \n"
2488 )
2489
2490 FLOAT_TO_INT16_INTERLEAVE(sse,
2491 "1: \n"
2492 "cvtps2pi (%2,%0), %%mm0 \n"
2493 "cvtps2pi 8(%2,%0), %%mm1 \n"
2494 "cvtps2pi (%3,%0), %%mm2 \n"
2495 "cvtps2pi 8(%3,%0), %%mm3 \n"
2496 "packssdw %%mm1, %%mm0 \n"
2497 "packssdw %%mm3, %%mm2 \n"
2498 "movq %%mm0, %%mm1 \n"
2499 "punpcklwd %%mm2, %%mm0 \n"
2500 "punpckhwd %%mm2, %%mm1 \n"
2501 "movq %%mm0, (%1,%0)\n"
2502 "movq %%mm1, 8(%1,%0)\n"
2503 "add $16, %0 \n"
2504 "js 1b \n"
2505 "emms \n"
2506 )
2507
2508 FLOAT_TO_INT16_INTERLEAVE(sse2,
2509 "1: \n"
2510 "cvtps2dq (%2,%0), %%xmm0 \n"
2511 "cvtps2dq (%3,%0), %%xmm1 \n"
2512 "packssdw %%xmm1, %%xmm0 \n"
2513 "movhlps %%xmm0, %%xmm1 \n"
2514 "punpcklwd %%xmm1, %%xmm0 \n"
2515 "movdqa %%xmm0, (%1,%0) \n"
2516 "add $16, %0 \n"
2517 "js 1b \n"
2518 )
2519
2520 static void float_to_int16_interleave_3dn2(int16_t *dst, const float **src, long len, int channels){
2521 if(channels==6)
2522 ff_float_to_int16_interleave6_3dn2(dst, src, len);
2523 else
2524 float_to_int16_interleave_3dnow(dst, src, len, channels);
2525 }
2526
2527
2528 void ff_snow_horizontal_compose97i_sse2(IDWTELEM *b, int width);
2529 void ff_snow_horizontal_compose97i_mmx(IDWTELEM *b, int width);
2530 void ff_snow_vertical_compose97i_sse2(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2, IDWTELEM *b3, IDWTELEM *b4, IDWTELEM *b5, int width);
2531 void ff_snow_vertical_compose97i_mmx(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2, IDWTELEM *b3, IDWTELEM *b4, IDWTELEM *b5, int width);
2532 void ff_snow_inner_add_yblock_sse2(const uint8_t *obmc, const int obmc_stride, uint8_t * * block, int b_w, int b_h,
2533 int src_x, int src_y, int src_stride, slice_buffer * sb, int add, uint8_t * dst8);
2534 void ff_snow_inner_add_yblock_mmx(const uint8_t *obmc, const int obmc_stride, uint8_t * * block, int b_w, int b_h,
2535 int src_x, int src_y, int src_stride, slice_buffer * sb, int add, uint8_t * dst8);
2536
2537
2538 static void add_int16_sse2(int16_t * v1, int16_t * v2, int order)
2539 {
2540 x86_reg o = -(order << 1);
2541 v1 += order;
2542 v2 += order;
2543 __asm__ volatile(
2544 "1: \n\t"
2545 "movdqu (%1,%2), %%xmm0 \n\t"
2546 "movdqu 16(%1,%2), %%xmm1 \n\t"
2547 "paddw (%0,%2), %%xmm0 \n\t"
2548 "paddw 16(%0,%2), %%xmm1 \n\t"
2549 "movdqa %%xmm0, (%0,%2) \n\t"
2550 "movdqa %%xmm1, 16(%0,%2) \n\t"
2551 "add $32, %2 \n\t"
2552 "js 1b \n\t"
2553 : "+r"(v1), "+r"(v2), "+r"(o)
2554 );
2555 }
2556
2557 static void sub_int16_sse2(int16_t * v1, int16_t * v2, int order)
2558 {
2559 x86_reg o = -(order << 1);
2560 v1 += order;
2561 v2 += order;
2562 __asm__ volatile(
2563 "1: \n\t"
2564 "movdqa (%0,%2), %%xmm0 \n\t"
2565 "movdqa 16(%0,%2), %%xmm2 \n\t"
2566 "movdqu (%1,%2), %%xmm1 \n\t"
2567 "movdqu 16(%1,%2), %%xmm3 \n\t"
2568 "psubw %%xmm1, %%xmm0 \n\t"
2569 "psubw %%xmm3, %%xmm2 \n\t"
2570 "movdqa %%xmm0, (%0,%2) \n\t"
2571 "movdqa %%xmm2, 16(%0,%2) \n\t"
2572 "add $32, %2 \n\t"
2573 "js 1b \n\t"
2574 : "+r"(v1), "+r"(v2), "+r"(o)
2575 );
2576 }
2577
2578 static int32_t scalarproduct_int16_sse2(int16_t * v1, int16_t * v2, int order, int shift)
2579 {
2580 int res = 0;
2581 DECLARE_ALIGNED_16(xmm_reg, sh);
2582 x86_reg o = -(order << 1);
2583
2584 v1 += order;
2585 v2 += order;
2586 sh.a = shift;
2587 __asm__ volatile(
2588 "pxor %%xmm7, %%xmm7 \n\t"
2589 "1: \n\t"
2590 "movdqu (%0,%3), %%xmm0 \n\t"
2591 "movdqu 16(%0,%3), %%xmm1 \n\t"
2592 "pmaddwd (%1,%3), %%xmm0 \n\t"
2593 "pmaddwd 16(%1,%3), %%xmm1 \n\t"
2594 "paddd %%xmm0, %%xmm7 \n\t"
2595 "paddd %%xmm1, %%xmm7 \n\t"
2596 "add $32, %3 \n\t"
2597 "js 1b \n\t"
2598 "movhlps %%xmm7, %%xmm2 \n\t"
2599 "paddd %%xmm2, %%xmm7 \n\t"
2600 "psrad %4, %%xmm7 \n\t"
2601 "pshuflw $0x4E, %%xmm7,%%xmm2 \n\t"
2602 "paddd %%xmm2, %%xmm7 \n\t"
2603 "movd %%xmm7, %2 \n\t"
2604 : "+r"(v1), "+r"(v2), "=r"(res), "+r"(o)
2605 : "m"(sh)
2606 );
2607 return res;
2608 }
2609
2610 void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
2611 {
2612 mm_flags = mm_support();
2613
2614 if (avctx->dsp_mask) {
2615 if (avctx->dsp_mask & FF_MM_FORCE)
2616 mm_flags |= (avctx->dsp_mask & 0xffff);
2617 else
2618 mm_flags &= ~(avctx->dsp_mask & 0xffff);
2619 }
2620
2621 #if 0
2622 av_log(avctx, AV_LOG_INFO, "libavcodec: CPU flags:");
2623 if (mm_flags & FF_MM_MMX)
2624 av_log(avctx, AV_LOG_INFO, " mmx");
2625 if (mm_flags & FF_MM_MMX2)
2626 av_log(avctx, AV_LOG_INFO, " mmx2");
2627 if (mm_flags & FF_MM_3DNOW)
2628 av_log(avctx, AV_LOG_INFO, " 3dnow");
2629 if (mm_flags & FF_MM_SSE)
2630 av_log(avctx, AV_LOG_INFO, " sse");
2631 if (mm_flags & FF_MM_SSE2)
2632 av_log(avctx, AV_LOG_INFO, " sse2");
2633 av_log(avctx, AV_LOG_INFO, "\n");
2634 #endif
2635
2636 if (mm_flags & FF_MM_MMX) {
2637 const int idct_algo= avctx->idct_algo;
2638
2639 if(avctx->lowres==0){
2640 if(idct_algo==FF_IDCT_AUTO || idct_algo==FF_IDCT_SIMPLEMMX){
2641 c->idct_put= ff_simple_idct_put_mmx;
2642 c->idct_add= ff_simple_idct_add_mmx;
2643 c->idct = ff_simple_idct_mmx;
2644 c->idct_permutation_type= FF_SIMPLE_IDCT_PERM;
2645 #if CONFIG_GPL
2646 }else if(idct_algo==FF_IDCT_LIBMPEG2MMX){
2647 if(mm_flags & FF_MM_MMX2){
2648 c->idct_put= ff_libmpeg2mmx2_idct_put;
2649 c->idct_add= ff_libmpeg2mmx2_idct_add;
2650 c->idct = ff_mmxext_idct;
2651 }else{
2652 c->idct_put= ff_libmpeg2mmx_idct_put;
2653 c->idct_add= ff_libmpeg2mmx_idct_add;
2654 c->idct = ff_mmx_idct;
2655 }
2656 c->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM;
2657 #endif
2658 }else if((CONFIG_VP3_DECODER || CONFIG_VP5_DECODER || CONFIG_VP6_DECODER || CONFIG_THEORA_DECODER) &&
2659 idct_algo==FF_IDCT_VP3){
2660 if(mm_flags & FF_MM_SSE2){
2661 c->idct_put= ff_vp3_idct_put_sse2;
2662 c->idct_add= ff_vp3_idct_add_sse2;
2663 c->idct = ff_vp3_idct_sse2;
2664 c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM;
2665 }else{
2666 c->idct_put= ff_vp3_idct_put_mmx;
2667 c->idct_add= ff_vp3_idct_add_mmx;
2668 c->idct = ff_vp3_idct_mmx;
2669 c->idct_permutation_type= FF_PARTTRANS_IDCT_PERM;
2670 }
2671 }else if(idct_algo==FF_IDCT_CAVS){
2672 c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM;
2673 }else if(idct_algo==FF_IDCT_XVIDMMX){
2674 if(mm_flags & FF_MM_SSE2){
2675 c->idct_put= ff_idct_xvid_sse2_put;
2676 c->idct_add= ff_idct_xvid_sse2_add;
2677 c->idct = ff_idct_xvid_sse2;
2678 c->idct_permutation_type= FF_SSE2_IDCT_PERM;
2679 }else if(mm_flags & FF_MM_MMX2){
2680 c->idct_put= ff_idct_xvid_mmx2_put;
2681 c->idct_add= ff_idct_xvid_mmx2_add;
2682 c->idct = ff_idct_xvid_mmx2;
2683 }else{
2684 c->idct_put= ff_idct_xvid_mmx_put;
2685 c->idct_add= ff_idct_xvid_mmx_add;
2686 c->idct = ff_idct_xvid_mmx;
2687 }
2688 }
2689 }
2690
2691 c->put_pixels_clamped = put_pixels_clamped_mmx;
2692 c->put_signed_pixels_clamped = put_signed_pixels_clamped_mmx;
2693 c->add_pixels_clamped = add_pixels_clamped_mmx;
2694 c->clear_block = clear_block_mmx;
2695 c->clear_blocks = clear_blocks_mmx;
2696 if (mm_flags & FF_MM_SSE){
2697 c->clear_block = clear_block_sse;
2698 c->clear_blocks = clear_blocks_sse;
2699 }
2700
2701 #define SET_HPEL_FUNCS(PFX, IDX, SIZE, CPU) \
2702 c->PFX ## _pixels_tab[IDX][0] = PFX ## _pixels ## SIZE ## _ ## CPU; \
2703 c->PFX ## _pixels_tab[IDX][1] = PFX ## _pixels ## SIZE ## _x2_ ## CPU; \
2704 c->PFX ## _pixels_tab[IDX][2] = PFX ## _pixels ## SIZE ## _y2_ ## CPU; \
2705 c->PFX ## _pixels_tab[IDX][3] = PFX ## _pixels ## SIZE ## _xy2_ ## CPU
2706
2707 SET_HPEL_FUNCS(put, 0, 16, mmx);
2708 SET_HPEL_FUNCS(put_no_rnd, 0, 16, mmx);
2709 SET_HPEL_FUNCS(avg, 0, 16, mmx);
2710 SET_HPEL_FUNCS(avg_no_rnd, 0, 16, mmx);
2711 SET_HPEL_FUNCS(put, 1, 8, mmx);
2712 SET_HPEL_FUNCS(put_no_rnd, 1, 8, mmx);
2713 SET_HPEL_FUNCS(avg, 1, 8, mmx);
2714 SET_HPEL_FUNCS(avg_no_rnd, 1, 8, mmx);
2715
2716 c->gmc= gmc_mmx;
2717
2718 c->add_bytes= add_bytes_mmx;
2719 c->add_bytes_l2= add_bytes_l2_mmx;
2720
2721 c->draw_edges = draw_edges_mmx;
2722
2723 if (CONFIG_ANY_H263) {
2724 c->h263_v_loop_filter= h263_v_loop_filter_mmx;
2725 c->h263_h_loop_filter= h263_h_loop_filter_mmx;
2726 }
2727 c->put_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_mmx_rnd;
2728 c->put_h264_chroma_pixels_tab[1]= put_h264_chroma_mc4_mmx;
2729 c->put_no_rnd_vc1_chroma_pixels_tab[0]= put_vc1_chroma_mc8_mmx_nornd;
2730
2731 c->put_rv40_chroma_pixels_tab[0]= put_rv40_chroma_mc8_mmx;
2732 c->put_rv40_chroma_pixels_tab[1]= put_rv40_chroma_mc4_mmx;
2733
2734 c->h264_idct_dc_add=
2735 c->h264_idct_add= ff_h264_idct_add_mmx;
2736 c->h264_idct8_dc_add=
2737 c->h264_idct8_add= ff_h264_idct8_add_mmx;
2738
2739 c->h264_idct_add16 = ff_h264_idct_add16_mmx;
2740 c->h264_idct8_add4 = ff_h264_idct8_add4_mmx;
2741 c->h264_idct_add8 = ff_h264_idct_add8_mmx;
2742 c->h264_idct_add16intra= ff_h264_idct_add16intra_mmx;
2743
2744 if (CONFIG_VP6_DECODER) {
2745 c->vp6_filter_diag4 = ff_vp6_filter_diag4_mmx;
2746 }
2747
2748 if (mm_flags & FF_MM_MMX2) {
2749 c->prefetch = prefetch_mmx2;
2750
2751 c->put_pixels_tab[0][1] = put_pixels16_x2_mmx2;
2752 c->put_pixels_tab[0][2] = put_pixels16_y2_mmx2;
2753
2754 c->avg_pixels_tab[0][0] = avg_pixels16_mmx2;
2755 c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx2;
2756 c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx2;
2757
2758 c->put_pixels_tab[1][1] = put_pixels8_x2_mmx2;
2759 c->put_pixels_tab[1][2] = put_pixels8_y2_mmx2;
2760
2761 c->avg_pixels_tab[1][0] = avg_pixels8_mmx2;
2762 c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx2;
2763 c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx2;
2764
2765 c->h264_idct_dc_add= ff_h264_idct_dc_add_mmx2;
2766 c->h264_idct8_dc_add= ff_h264_idct8_dc_add_mmx2;
2767 c->h264_idct_add16 = ff_h264_idct_add16_mmx2;
2768 c->h264_idct8_add4 = ff_h264_idct8_add4_mmx2;
2769 c->h264_idct_add8 = ff_h264_idct_add8_mmx2;
2770 c->h264_idct_add16intra= ff_h264_idct_add16intra_mmx2;
2771
2772 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
2773 c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx2;
2774 c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx2;
2775 c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx2;
2776 c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx2;
2777 c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx2;
2778 c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx2;
2779
2780 if (CONFIG_VP3_DECODER || CONFIG_THEORA_DECODER) {
2781 c->vp3_v_loop_filter= ff_vp3_v_loop_filter_mmx2;
2782 c->vp3_h_loop_filter= ff_vp3_h_loop_filter_mmx2;
2783 }
2784 }
2785
2786 #define SET_QPEL_FUNCS(PFX, IDX, SIZE, CPU) \
2787 c->PFX ## _pixels_tab[IDX][ 0] = PFX ## SIZE ## _mc00_ ## CPU; \
2788 c->PFX ## _pixels_tab[IDX][ 1] = PFX ## SIZE ## _mc10_ ## CPU; \
2789 c->PFX ## _pixels_tab[IDX][ 2] = PFX ## SIZE ## _mc20_ ## CPU; \
2790 c->PFX ## _pixels_tab[IDX][ 3] = PFX ## SIZE ## _mc30_ ## CPU; \
2791 c->PFX ## _pixels_tab[IDX][ 4] = PFX ## SIZE ## _mc01_ ## CPU; \
2792 c->PFX ## _pixels_tab[IDX][ 5] = PFX ## SIZE ## _mc11_ ## CPU; \
2793 c->PFX ## _pixels_tab[IDX][ 6] = PFX ## SIZE ## _mc21_ ## CPU; \
2794 c->PFX ## _pixels_tab[IDX][ 7] = PFX ## SIZE ## _mc31_ ## CPU; \
2795 c->PFX ## _pixels_tab[IDX][ 8] = PFX ## SIZE ## _mc02_ ## CPU; \
2796 c->PFX ## _pixels_tab[IDX][ 9] = PFX ## SIZE ## _mc12_ ## CPU; \
2797 c->PFX ## _pixels_tab[IDX][10] = PFX ## SIZE ## _mc22_ ## CPU; \
2798 c->PFX ## _pixels_tab[IDX][11] = PFX ## SIZE ## _mc32_ ## CPU; \
2799 c->PFX ## _pixels_tab[IDX][12] = PFX ## SIZE ## _mc03_ ## CPU; \
2800 c->PFX ## _pixels_tab[IDX][13] = PFX ## SIZE ## _mc13_ ## CPU; \
2801 c->PFX ## _pixels_tab[IDX][14] = PFX ## SIZE ## _mc23_ ## CPU; \
2802 c->PFX ## _pixels_tab[IDX][15] = PFX ## SIZE ## _mc33_ ## CPU
2803
2804 SET_QPEL_FUNCS(put_qpel, 0, 16, mmx2);
2805 SET_QPEL_FUNCS(put_qpel, 1, 8, mmx2);
2806 SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, mmx2);
2807 SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, mmx2);
2808 SET_QPEL_FUNCS(avg_qpel, 0, 16, mmx2);
2809 SET_QPEL_FUNCS(avg_qpel, 1, 8, mmx2);
2810
2811 SET_QPEL_FUNCS(put_h264_qpel, 0, 16, mmx2);
2812 SET_QPEL_FUNCS(put_h264_qpel, 1, 8, mmx2);
2813 SET_QPEL_FUNCS(put_h264_qpel, 2, 4, mmx2);
2814 SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, mmx2);
2815 SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, mmx2);
2816 SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, mmx2);
2817
2818 SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, mmx2);
2819 SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, mmx2);
2820 SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, mmx2);
2821 SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, mmx2);
2822
2823 c->avg_rv40_chroma_pixels_tab[0]= avg_rv40_chroma_mc8_mmx2;
2824 c->avg_rv40_chroma_pixels_tab[1]= avg_rv40_chroma_mc4_mmx2;
2825
2826 c->avg_no_rnd_vc1_chroma_pixels_tab[0]= avg_vc1_chroma_mc8_mmx2_nornd;
2827
2828 c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_mmx2_rnd;
2829 c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_mmx2;
2830 c->avg_h264_chroma_pixels_tab[2]= avg_h264_chroma_mc2_mmx2;
2831 c->put_h264_chroma_pixels_tab[2]= put_h264_chroma_mc2_mmx2;
2832 c->h264_v_loop_filter_luma= h264_v_loop_filter_luma_mmx2;
2833 c->h264_h_loop_filter_luma= h264_h_loop_filter_luma_mmx2;
2834 c->h264_v_loop_filter_chroma= h264_v_loop_filter_chroma_mmx2;
2835 c->h264_h_loop_filter_chroma= h264_h_loop_filter_chroma_mmx2;
2836 c->h264_v_loop_filter_chroma_intra= h264_v_loop_filter_chroma_intra_mmx2;
2837 c->h264_h_loop_filter_chroma_intra= h264_h_loop_filter_chroma_intra_mmx2;
2838 c->h264_loop_filter_strength= h264_loop_filter_strength_mmx2;
2839
2840 c->weight_h264_pixels_tab[0]= ff_h264_weight_16x16_mmx2;
2841 c->weight_h264_pixels_tab[1]= ff_h264_weight_16x8_mmx2;
2842 c->weight_h264_pixels_tab[2]= ff_h264_weight_8x16_mmx2;
2843 c->weight_h264_pixels_tab[3]= ff_h264_weight_8x8_mmx2;
2844 c->weight_h264_pixels_tab[4]= ff_h264_weight_8x4_mmx2;
2845 c->weight_h264_pixels_tab[5]= ff_h264_weight_4x8_mmx2;
2846 c->weight_h264_pixels_tab[6]= ff_h264_weight_4x4_mmx2;
2847 c->weight_h264_pixels_tab[7]= ff_h264_weight_4x2_mmx2;
2848
2849 c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16x16_mmx2;
2850 c->biweight_h264_pixels_tab[1]= ff_h264_biweight_16x8_mmx2;
2851 c->biweight_h264_pixels_tab[2]= ff_h264_biweight_8x16_mmx2;
2852 c->biweight_h264_pixels_tab[3]= ff_h264_biweight_8x8_mmx2;
2853 c->biweight_h264_pixels_tab[4]= ff_h264_biweight_8x4_mmx2;
2854 c->biweight_h264_pixels_tab[5]= ff_h264_biweight_4x8_mmx2;
2855 c->biweight_h264_pixels_tab[6]= ff_h264_biweight_4x4_mmx2;
2856 c->biweight_h264_pixels_tab[7]= ff_h264_biweight_4x2_mmx2;
2857
2858 #if HAVE_YASM
2859 c->add_hfyu_median_prediction = ff_add_hfyu_median_prediction_mmx2;
2860 #endif
2861 #if HAVE_7REGS && HAVE_TEN_OPERANDS
2862 if( mm_flags&FF_MM_3DNOW )
2863 c->add_hfyu_median_prediction = add_hfyu_median_prediction_cmov;
2864 #endif
2865
2866 if (CONFIG_CAVS_DECODER)
2867 ff_cavsdsp_init_mmx2(c, avctx);
2868
2869 if (CONFIG_VC1_DECODER || CONFIG_WMV3_DECODER)
2870 ff_vc1dsp_init_mmx(c, avctx);
2871
2872 c->add_png_paeth_prediction= add_png_paeth_prediction_mmx2;
2873 } else if (mm_flags & FF_MM_3DNOW) {
2874 c->prefetch = prefetch_3dnow;
2875
2876 c->put_pixels_tab[0][1] = put_pixels16_x2_3dnow;
2877 c->put_pixels_tab[0][2] = put_pixels16_y2_3dnow;
2878
2879 c->avg_pixels_tab[0][0] = avg_pixels16_3dnow;
2880 c->avg_pixels_tab[0][1] = avg_pixels16_x2_3dnow;
2881 c->avg_pixels_tab[0][2] = avg_pixels16_y2_3dnow;
2882
2883 c->put_pixels_tab[1][1] = put_pixels8_x2_3dnow;
2884 c->put_pixels_tab[1][2] = put_pixels8_y2_3dnow;
2885
2886 c->avg_pixels_tab[1][0] = avg_pixels8_3dnow;
2887 c->avg_pixels_tab[1][1] = avg_pixels8_x2_3dnow;
2888 c->avg_pixels_tab[1][2] = avg_pixels8_y2_3dnow;
2889
2890 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
2891 c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_3dnow;
2892 c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_3dnow;
2893 c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_3dnow;
2894 c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_3dnow;
2895 c->avg_pixels_tab[0][3] = avg_pixels16_xy2_3dnow;
2896 c->avg_pixels_tab[1][3] = avg_pixels8_xy2_3dnow;
2897 }
2898
2899 SET_QPEL_FUNCS(put_qpel, 0, 16, 3dnow);
2900 SET_QPEL_FUNCS(put_qpel, 1, 8, 3dnow);
2901 SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, 3dnow);
2902 SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, 3dnow);
2903 SET_QPEL_FUNCS(avg_qpel, 0, 16, 3dnow);
2904 SET_QPEL_FUNCS(avg_qpel, 1, 8, 3dnow);
2905
2906 SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 3dnow);
2907 SET_QPEL_FUNCS(put_h264_qpel, 1, 8, 3dnow);
2908 SET_QPEL_FUNCS(put_h264_qpel, 2, 4, 3dnow);
2909 SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 3dnow);
2910 SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, 3dnow);
2911 SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, 3dnow);
2912
2913 SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, 3dnow);
2914 SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, 3dnow);
2915 SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, 3dnow);
2916 SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, 3dnow);
2917
2918 c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_3dnow_rnd;
2919 c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_3dnow;
2920
2921 c->avg_rv40_chroma_pixels_tab[0]= avg_rv40_chroma_mc8_3dnow;
2922 c->avg_rv40_chroma_pixels_tab[1]= avg_rv40_chroma_mc4_3dnow;
2923
2924 if (CONFIG_CAVS_DECODER)
2925 ff_cavsdsp_init_3dnow(c, avctx);
2926 }
2927
2928
2929 #define H264_QPEL_FUNCS(x, y, CPU)\
2930 c->put_h264_qpel_pixels_tab[0][x+y*4] = put_h264_qpel16_mc##x##y##_##CPU;\
2931 c->put_h264_qpel_pixels_tab[1][x+y*4] = put_h264_qpel8_mc##x##y##_##CPU;\
2932 c->avg_h264_qpel_pixels_tab[0][x+y*4] = avg_h264_qpel16_mc##x##y##_##CPU;\
2933 c->avg_h264_qpel_pixels_tab[1][x+y*4] = avg_h264_qpel8_mc##x##y##_##CPU;
2934 if((mm_flags & FF_MM_SSE2) && !(mm_flags & FF_MM_3DNOW)){
2935 // these functions are slower than mmx on AMD, but faster on Intel
2936 /* FIXME works in most codecs, but crashes svq1 due to unaligned chroma
2937 c->put_pixels_tab[0][0] = put_pixels16_sse2;
2938 c->avg_pixels_tab[0][0] = avg_pixels16_sse2;
2939 */
2940 H264_QPEL_FUNCS(0, 0, sse2);
2941 }
2942 if(mm_flags & FF_MM_SSE2){
2943 c->h264_idct8_add = ff_h264_idct8_add_sse2;
2944 c->h264_idct8_add4= ff_h264_idct8_add4_sse2;
2945
2946 H264_QPEL_FUNCS(0, 1, sse2);
2947 H264_QPEL_FUNCS(0, 2, sse2);
2948 H264_QPEL_FUNCS(0, 3, sse2);
2949 H264_QPEL_FUNCS(1, 1, sse2);
2950 H264_QPEL_FUNCS(1, 2, sse2);
2951 H264_QPEL_FUNCS(1, 3, sse2);
2952 H264_QPEL_FUNCS(2, 1, sse2);
2953 H264_QPEL_FUNCS(2, 2, sse2);
2954 H264_QPEL_FUNCS(2, 3, sse2);
2955 H264_QPEL_FUNCS(3, 1, sse2);
2956 H264_QPEL_FUNCS(3, 2, sse2);
2957 H264_QPEL_FUNCS(3, 3, sse2);
2958
2959 if (CONFIG_VP6_DECODER) {
2960 c->vp6_filter_diag4 = ff_vp6_filter_diag4_sse2;
2961 }
2962 }
2963 #if HAVE_SSSE3
2964 if(mm_flags & FF_MM_SSSE3){
2965 H264_QPEL_FUNCS(1, 0, ssse3);
2966 H264_QPEL_FUNCS(1, 1, ssse3);
2967 H264_QPEL_FUNCS(1, 2, ssse3);
2968 H264_QPEL_FUNCS(1, 3, ssse3);
2969 H264_QPEL_FUNCS(2, 0, ssse3);
2970 H264_QPEL_FUNCS(2, 1, ssse3);
2971 H264_QPEL_FUNCS(2, 2, ssse3);
2972 H264_QPEL_FUNCS(2, 3, ssse3);
2973 H264_QPEL_FUNCS(3, 0, ssse3);
2974 H264_QPEL_FUNCS(3, 1, ssse3);
2975 H264_QPEL_FUNCS(3, 2, ssse3);
2976 H264_QPEL_FUNCS(3, 3, ssse3);
2977 c->put_no_rnd_vc1_chroma_pixels_tab[0]= put_vc1_chroma_mc8_ssse3_nornd;
2978 c->avg_no_rnd_vc1_chroma_pixels_tab[0]= avg_vc1_chroma_mc8_ssse3_nornd;
2979 c->put_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_ssse3_rnd;
2980 c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_ssse3_rnd;
2981 c->put_h264_chroma_pixels_tab[1]= put_h264_chroma_mc4_ssse3;
2982 c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_ssse3;
2983 c->add_png_paeth_prediction= add_png_paeth_prediction_ssse3;
2984 }
2985 #endif
2986
2987 #if CONFIG_GPL && HAVE_YASM
2988 if (mm_flags & FF_MM_MMX2){
2989 #if ARCH_X86_32
2990 c->h264_v_loop_filter_luma_intra = ff_x264_deblock_v_luma_intra_mmxext;
2991 c->h264_h_loop_filter_luma_intra = ff_x264_deblock_h_luma_intra_mmxext;
2992 #endif
2993 if( mm_flags&FF_MM_SSE2 ){
2994 #if ARCH_X86_64 || !defined(__ICC) || __ICC > 1100
2995 c->h264_v_loop_filter_luma = ff_x264_deblock_v_luma_sse2;
2996 c->h264_h_loop_filter_luma = ff_x264_deblock_h_luma_sse2;
2997 c->h264_v_loop_filter_luma_intra = ff_x264_deblock_v_luma_intra_sse2;
2998 c->h264_h_loop_filter_luma_intra = ff_x264_deblock_h_luma_intra_sse2;
2999 #endif
3000 c->h264_idct_add16 = ff_h264_idct_add16_sse2;
3001 c->h264_idct_add8 = ff_h264_idct_add8_sse2;
3002 c->h264_idct_add16intra = ff_h264_idct_add16intra_sse2;
3003 }
3004 }
3005 #endif
3006
3007 #if CONFIG_SNOW_DECODER
3008 if(mm_flags & FF_MM_SSE2 & 0){
3009 c->horizontal_compose97i = ff_snow_horizontal_compose97i_sse2;
3010 #if HAVE_7REGS
3011 c->vertical_compose97i = ff_snow_vertical_compose97i_sse2;
3012 #endif
3013 c->inner_add_yblock = ff_snow_inner_add_yblock_sse2;
3014 }
3015 else{
3016 if(mm_flags & FF_MM_MMX2){
3017 c->horizontal_compose97i = ff_snow_horizontal_compose97i_mmx;
3018 #if HAVE_7REGS
3019 c->vertical_compose97i = ff_snow_vertical_compose97i_mmx;
3020 #endif
3021 }
3022 c->inner_add_yblock = ff_snow_inner_add_yblock_mmx;
3023 }
3024 #endif
3025
3026 if(mm_flags & FF_MM_3DNOW){
3027 c->vorbis_inverse_coupling = vorbis_inverse_coupling_3dnow;
3028 c->vector_fmul = vector_fmul_3dnow;
3029 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
3030 c->float_to_int16 = float_to_int16_3dnow;
3031 c->float_to_int16_interleave = float_to_int16_interleave_3dnow;
3032 }
3033 }
3034 if(mm_flags & FF_MM_3DNOWEXT){
3035 c->vector_fmul_reverse = vector_fmul_reverse_3dnow2;
3036 c->vector_fmul_window = vector_fmul_window_3dnow2;
3037 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
3038 c->float_to_int16_interleave = float_to_int16_interleave_3dn2;
3039 }
3040 }
3041 if(mm_flags & FF_MM_SSE){
3042 c->vorbis_inverse_coupling = vorbis_inverse_coupling_sse;
3043 c->ac3_downmix = ac3_downmix_sse;
3044 c->vector_fmul = vector_fmul_sse;
3045 c->vector_fmul_reverse = vector_fmul_reverse_sse;
3046 c->vector_fmul_add_add = vector_fmul_add_add_sse;
3047 c->vector_fmul_window = vector_fmul_window_sse;
3048 c->int32_to_float_fmul_scalar = int32_to_float_fmul_scalar_sse;
3049 c->float_to_int16 = float_to_int16_sse;
3050 c->float_to_int16_interleave = float_to_int16_interleave_sse;
3051 }
3052 if(mm_flags & FF_MM_3DNOW)
3053 c->vector_fmul_add_add = vector_fmul_add_add_3dnow; // faster than sse
3054 if(mm_flags & FF_MM_SSE2){
3055 c->int32_to_float_fmul_scalar = int32_to_float_fmul_scalar_sse2;
3056 c->float_to_int16 = float_to_int16_sse2;
3057 c->float_to_int16_interleave = float_to_int16_interleave_sse2;
3058 c->add_int16 = add_int16_sse2;
3059 c->sub_int16 = sub_int16_sse2;
3060 c->scalarproduct_int16 = scalarproduct_int16_sse2;
3061 }
3062 }
3063
3064 if (CONFIG_ENCODERS)
3065 dsputilenc_init_mmx(c, avctx);
3066
3067 #if 0
3068 // for speed testing
3069 get_pixels = just_return;
3070 put_pixels_clamped = just_return;
3071 add_pixels_clamped = just_return;
3072
3073 pix_abs16x16 = just_return;
3074 pix_abs16x16_x2 = just_return;
3075 pix_abs16x16_y2 = just_return;
3076 pix_abs16x16_xy2 = just_return;
3077
3078 put_pixels_tab[0] = just_return;
3079 put_pixels_tab[1] = just_return;
3080 put_pixels_tab[2] = just_return;
3081 put_pixels_tab[3] = just_return;
3082
3083 put_no_rnd_pixels_tab[0] = just_return;
3084 put_no_rnd_pixels_tab[1] = just_return;
3085 put_no_rnd_pixels_tab[2] = just_return;
3086 put_no_rnd_pixels_tab[3] = just_return;
3087
3088 avg_pixels_tab[0] = just_return;
3089 avg_pixels_tab[1] = just_return;
3090 avg_pixels_tab[2] = just_return;
3091 avg_pixels_tab[3] = just_return;
3092
3093 avg_no_rnd_pixels_tab[0] = just_return;
3094 avg_no_rnd_pixels_tab[1] = just_return;
3095 avg_no_rnd_pixels_tab[2] = just_return;
3096 avg_no_rnd_pixels_tab[3] = just_return;
3097
3098 //av_fdct = just_return;
3099 //ff_idct = just_return;
3100 #endif
3101 }