6e1a93d711951f31a707b892a70a26365edeca69
[libav.git] / libavcodec / i386 / dsputil_mmx.c
1 /*
2 * MMX optimized DSP utils
3 * Copyright (c) 2000, 2001 Fabrice Bellard.
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5 *
6 * This file is part of FFmpeg.
7 *
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 *
22 * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
23 */
24
25 #include "libavutil/x86_cpu.h"
26 #include "libavcodec/dsputil.h"
27 #include "libavcodec/h263.h"
28 #include "libavcodec/mpegvideo.h"
29 #include "libavcodec/simple_idct.h"
30 #include "dsputil_mmx.h"
31 #include "mmx.h"
32 #include "vp3dsp_mmx.h"
33 #include "vp3dsp_sse2.h"
34 #include "idct_xvid.h"
35
36 //#undef NDEBUG
37 //#include <assert.h>
38
39 int mm_flags; /* multimedia extension flags */
40
41 /* pixel operations */
42 DECLARE_ALIGNED_8 (const uint64_t, ff_bone) = 0x0101010101010101ULL;
43 DECLARE_ALIGNED_8 (const uint64_t, ff_wtwo) = 0x0002000200020002ULL;
44
45 DECLARE_ALIGNED_16(const uint64_t, ff_pdw_80000000[2]) =
46 {0x8000000080000000ULL, 0x8000000080000000ULL};
47
48 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_3 ) = 0x0003000300030003ULL;
49 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_4 ) = 0x0004000400040004ULL;
50 DECLARE_ALIGNED_16(const xmm_t, ff_pw_5 ) = {0x0005000500050005ULL, 0x0005000500050005ULL};
51 DECLARE_ALIGNED_16(const xmm_t, ff_pw_8 ) = {0x0008000800080008ULL, 0x0008000800080008ULL};
52 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_15 ) = 0x000F000F000F000FULL;
53 DECLARE_ALIGNED_16(const xmm_t, ff_pw_16 ) = {0x0010001000100010ULL, 0x0010001000100010ULL};
54 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_20 ) = 0x0014001400140014ULL;
55 DECLARE_ALIGNED_16(const xmm_t, ff_pw_28 ) = {0x001C001C001C001CULL, 0x001C001C001C001CULL};
56 DECLARE_ALIGNED_16(const xmm_t, ff_pw_32 ) = {0x0020002000200020ULL, 0x0020002000200020ULL};
57 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_42 ) = 0x002A002A002A002AULL;
58 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_64 ) = 0x0040004000400040ULL;
59 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_96 ) = 0x0060006000600060ULL;
60 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_128) = 0x0080008000800080ULL;
61 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_255) = 0x00ff00ff00ff00ffULL;
62
63 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_1 ) = 0x0101010101010101ULL;
64 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_3 ) = 0x0303030303030303ULL;
65 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_7 ) = 0x0707070707070707ULL;
66 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_3F ) = 0x3F3F3F3F3F3F3F3FULL;
67 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_A1 ) = 0xA1A1A1A1A1A1A1A1ULL;
68 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_FC ) = 0xFCFCFCFCFCFCFCFCULL;
69
70 DECLARE_ALIGNED_16(const double, ff_pd_1[2]) = { 1.0, 1.0 };
71 DECLARE_ALIGNED_16(const double, ff_pd_2[2]) = { 2.0, 2.0 };
72
73 #define JUMPALIGN() asm volatile (ASMALIGN(3)::)
74 #define MOVQ_ZERO(regd) asm volatile ("pxor %%" #regd ", %%" #regd ::)
75
76 #define MOVQ_BFE(regd) \
77 asm volatile ( \
78 "pcmpeqd %%" #regd ", %%" #regd " \n\t"\
79 "paddb %%" #regd ", %%" #regd " \n\t" ::)
80
81 #ifndef PIC
82 #define MOVQ_BONE(regd) asm volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_bone))
83 #define MOVQ_WTWO(regd) asm volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_wtwo))
84 #else
85 // for shared library it's better to use this way for accessing constants
86 // pcmpeqd -> -1
87 #define MOVQ_BONE(regd) \
88 asm volatile ( \
89 "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
90 "psrlw $15, %%" #regd " \n\t" \
91 "packuswb %%" #regd ", %%" #regd " \n\t" ::)
92
93 #define MOVQ_WTWO(regd) \
94 asm volatile ( \
95 "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
96 "psrlw $15, %%" #regd " \n\t" \
97 "psllw $1, %%" #regd " \n\t"::)
98
99 #endif
100
101 // using regr as temporary and for the output result
102 // first argument is unmodifed and second is trashed
103 // regfe is supposed to contain 0xfefefefefefefefe
104 #define PAVGB_MMX_NO_RND(rega, regb, regr, regfe) \
105 "movq " #rega ", " #regr " \n\t"\
106 "pand " #regb ", " #regr " \n\t"\
107 "pxor " #rega ", " #regb " \n\t"\
108 "pand " #regfe "," #regb " \n\t"\
109 "psrlq $1, " #regb " \n\t"\
110 "paddb " #regb ", " #regr " \n\t"
111
112 #define PAVGB_MMX(rega, regb, regr, regfe) \
113 "movq " #rega ", " #regr " \n\t"\
114 "por " #regb ", " #regr " \n\t"\
115 "pxor " #rega ", " #regb " \n\t"\
116 "pand " #regfe "," #regb " \n\t"\
117 "psrlq $1, " #regb " \n\t"\
118 "psubb " #regb ", " #regr " \n\t"
119
120 // mm6 is supposed to contain 0xfefefefefefefefe
121 #define PAVGBP_MMX_NO_RND(rega, regb, regr, regc, regd, regp) \
122 "movq " #rega ", " #regr " \n\t"\
123 "movq " #regc ", " #regp " \n\t"\
124 "pand " #regb ", " #regr " \n\t"\
125 "pand " #regd ", " #regp " \n\t"\
126 "pxor " #rega ", " #regb " \n\t"\
127 "pxor " #regc ", " #regd " \n\t"\
128 "pand %%mm6, " #regb " \n\t"\
129 "pand %%mm6, " #regd " \n\t"\
130 "psrlq $1, " #regb " \n\t"\
131 "psrlq $1, " #regd " \n\t"\
132 "paddb " #regb ", " #regr " \n\t"\
133 "paddb " #regd ", " #regp " \n\t"
134
135 #define PAVGBP_MMX(rega, regb, regr, regc, regd, regp) \
136 "movq " #rega ", " #regr " \n\t"\
137 "movq " #regc ", " #regp " \n\t"\
138 "por " #regb ", " #regr " \n\t"\
139 "por " #regd ", " #regp " \n\t"\
140 "pxor " #rega ", " #regb " \n\t"\
141 "pxor " #regc ", " #regd " \n\t"\
142 "pand %%mm6, " #regb " \n\t"\
143 "pand %%mm6, " #regd " \n\t"\
144 "psrlq $1, " #regd " \n\t"\
145 "psrlq $1, " #regb " \n\t"\
146 "psubb " #regb ", " #regr " \n\t"\
147 "psubb " #regd ", " #regp " \n\t"
148
149 /***********************************/
150 /* MMX no rounding */
151 #define DEF(x, y) x ## _no_rnd_ ## y ##_mmx
152 #define SET_RND MOVQ_WONE
153 #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX_NO_RND(a, b, c, d, e, f)
154 #define PAVGB(a, b, c, e) PAVGB_MMX_NO_RND(a, b, c, e)
155
156 #include "dsputil_mmx_rnd.h"
157
158 #undef DEF
159 #undef SET_RND
160 #undef PAVGBP
161 #undef PAVGB
162 /***********************************/
163 /* MMX rounding */
164
165 #define DEF(x, y) x ## _ ## y ##_mmx
166 #define SET_RND MOVQ_WTWO
167 #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX(a, b, c, d, e, f)
168 #define PAVGB(a, b, c, e) PAVGB_MMX(a, b, c, e)
169
170 #include "dsputil_mmx_rnd.h"
171
172 #undef DEF
173 #undef SET_RND
174 #undef PAVGBP
175 #undef PAVGB
176
177 /***********************************/
178 /* 3Dnow specific */
179
180 #define DEF(x) x ## _3dnow
181 #define PAVGB "pavgusb"
182
183 #include "dsputil_mmx_avg.h"
184
185 #undef DEF
186 #undef PAVGB
187
188 /***********************************/
189 /* MMX2 specific */
190
191 #define DEF(x) x ## _mmx2
192
193 /* Introduced only in MMX2 set */
194 #define PAVGB "pavgb"
195
196 #include "dsputil_mmx_avg.h"
197
198 #undef DEF
199 #undef PAVGB
200
201 #define put_no_rnd_pixels16_mmx put_pixels16_mmx
202 #define put_no_rnd_pixels8_mmx put_pixels8_mmx
203 #define put_pixels16_mmx2 put_pixels16_mmx
204 #define put_pixels8_mmx2 put_pixels8_mmx
205 #define put_pixels4_mmx2 put_pixels4_mmx
206 #define put_no_rnd_pixels16_mmx2 put_no_rnd_pixels16_mmx
207 #define put_no_rnd_pixels8_mmx2 put_no_rnd_pixels8_mmx
208 #define put_pixels16_3dnow put_pixels16_mmx
209 #define put_pixels8_3dnow put_pixels8_mmx
210 #define put_pixels4_3dnow put_pixels4_mmx
211 #define put_no_rnd_pixels16_3dnow put_no_rnd_pixels16_mmx
212 #define put_no_rnd_pixels8_3dnow put_no_rnd_pixels8_mmx
213
214 /***********************************/
215 /* standard MMX */
216
217 void put_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
218 {
219 const DCTELEM *p;
220 uint8_t *pix;
221
222 /* read the pixels */
223 p = block;
224 pix = pixels;
225 /* unrolled loop */
226 asm volatile(
227 "movq %3, %%mm0 \n\t"
228 "movq 8%3, %%mm1 \n\t"
229 "movq 16%3, %%mm2 \n\t"
230 "movq 24%3, %%mm3 \n\t"
231 "movq 32%3, %%mm4 \n\t"
232 "movq 40%3, %%mm5 \n\t"
233 "movq 48%3, %%mm6 \n\t"
234 "movq 56%3, %%mm7 \n\t"
235 "packuswb %%mm1, %%mm0 \n\t"
236 "packuswb %%mm3, %%mm2 \n\t"
237 "packuswb %%mm5, %%mm4 \n\t"
238 "packuswb %%mm7, %%mm6 \n\t"
239 "movq %%mm0, (%0) \n\t"
240 "movq %%mm2, (%0, %1) \n\t"
241 "movq %%mm4, (%0, %1, 2) \n\t"
242 "movq %%mm6, (%0, %2) \n\t"
243 ::"r" (pix), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*3), "m"(*p)
244 :"memory");
245 pix += line_size*4;
246 p += 32;
247
248 // if here would be an exact copy of the code above
249 // compiler would generate some very strange code
250 // thus using "r"
251 asm volatile(
252 "movq (%3), %%mm0 \n\t"
253 "movq 8(%3), %%mm1 \n\t"
254 "movq 16(%3), %%mm2 \n\t"
255 "movq 24(%3), %%mm3 \n\t"
256 "movq 32(%3), %%mm4 \n\t"
257 "movq 40(%3), %%mm5 \n\t"
258 "movq 48(%3), %%mm6 \n\t"
259 "movq 56(%3), %%mm7 \n\t"
260 "packuswb %%mm1, %%mm0 \n\t"
261 "packuswb %%mm3, %%mm2 \n\t"
262 "packuswb %%mm5, %%mm4 \n\t"
263 "packuswb %%mm7, %%mm6 \n\t"
264 "movq %%mm0, (%0) \n\t"
265 "movq %%mm2, (%0, %1) \n\t"
266 "movq %%mm4, (%0, %1, 2) \n\t"
267 "movq %%mm6, (%0, %2) \n\t"
268 ::"r" (pix), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*3), "r"(p)
269 :"memory");
270 }
271
272 static DECLARE_ALIGNED_8(const unsigned char, vector128[8]) =
273 { 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 };
274
275 void put_signed_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
276 {
277 int i;
278
279 movq_m2r(*vector128, mm1);
280 for (i = 0; i < 8; i++) {
281 movq_m2r(*(block), mm0);
282 packsswb_m2r(*(block + 4), mm0);
283 block += 8;
284 paddb_r2r(mm1, mm0);
285 movq_r2m(mm0, *pixels);
286 pixels += line_size;
287 }
288 }
289
290 void add_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
291 {
292 const DCTELEM *p;
293 uint8_t *pix;
294 int i;
295
296 /* read the pixels */
297 p = block;
298 pix = pixels;
299 MOVQ_ZERO(mm7);
300 i = 4;
301 do {
302 asm volatile(
303 "movq (%2), %%mm0 \n\t"
304 "movq 8(%2), %%mm1 \n\t"
305 "movq 16(%2), %%mm2 \n\t"
306 "movq 24(%2), %%mm3 \n\t"
307 "movq %0, %%mm4 \n\t"
308 "movq %1, %%mm6 \n\t"
309 "movq %%mm4, %%mm5 \n\t"
310 "punpcklbw %%mm7, %%mm4 \n\t"
311 "punpckhbw %%mm7, %%mm5 \n\t"
312 "paddsw %%mm4, %%mm0 \n\t"
313 "paddsw %%mm5, %%mm1 \n\t"
314 "movq %%mm6, %%mm5 \n\t"
315 "punpcklbw %%mm7, %%mm6 \n\t"
316 "punpckhbw %%mm7, %%mm5 \n\t"
317 "paddsw %%mm6, %%mm2 \n\t"
318 "paddsw %%mm5, %%mm3 \n\t"
319 "packuswb %%mm1, %%mm0 \n\t"
320 "packuswb %%mm3, %%mm2 \n\t"
321 "movq %%mm0, %0 \n\t"
322 "movq %%mm2, %1 \n\t"
323 :"+m"(*pix), "+m"(*(pix+line_size))
324 :"r"(p)
325 :"memory");
326 pix += line_size*2;
327 p += 16;
328 } while (--i);
329 }
330
331 static void put_pixels4_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
332 {
333 asm volatile(
334 "lea (%3, %3), %%"REG_a" \n\t"
335 ASMALIGN(3)
336 "1: \n\t"
337 "movd (%1), %%mm0 \n\t"
338 "movd (%1, %3), %%mm1 \n\t"
339 "movd %%mm0, (%2) \n\t"
340 "movd %%mm1, (%2, %3) \n\t"
341 "add %%"REG_a", %1 \n\t"
342 "add %%"REG_a", %2 \n\t"
343 "movd (%1), %%mm0 \n\t"
344 "movd (%1, %3), %%mm1 \n\t"
345 "movd %%mm0, (%2) \n\t"
346 "movd %%mm1, (%2, %3) \n\t"
347 "add %%"REG_a", %1 \n\t"
348 "add %%"REG_a", %2 \n\t"
349 "subl $4, %0 \n\t"
350 "jnz 1b \n\t"
351 : "+g"(h), "+r" (pixels), "+r" (block)
352 : "r"((x86_reg)line_size)
353 : "%"REG_a, "memory"
354 );
355 }
356
357 static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
358 {
359 asm volatile(
360 "lea (%3, %3), %%"REG_a" \n\t"
361 ASMALIGN(3)
362 "1: \n\t"
363 "movq (%1), %%mm0 \n\t"
364 "movq (%1, %3), %%mm1 \n\t"
365 "movq %%mm0, (%2) \n\t"
366 "movq %%mm1, (%2, %3) \n\t"
367 "add %%"REG_a", %1 \n\t"
368 "add %%"REG_a", %2 \n\t"
369 "movq (%1), %%mm0 \n\t"
370 "movq (%1, %3), %%mm1 \n\t"
371 "movq %%mm0, (%2) \n\t"
372 "movq %%mm1, (%2, %3) \n\t"
373 "add %%"REG_a", %1 \n\t"
374 "add %%"REG_a", %2 \n\t"
375 "subl $4, %0 \n\t"
376 "jnz 1b \n\t"
377 : "+g"(h), "+r" (pixels), "+r" (block)
378 : "r"((x86_reg)line_size)
379 : "%"REG_a, "memory"
380 );
381 }
382
383 static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
384 {
385 asm volatile(
386 "lea (%3, %3), %%"REG_a" \n\t"
387 ASMALIGN(3)
388 "1: \n\t"
389 "movq (%1), %%mm0 \n\t"
390 "movq 8(%1), %%mm4 \n\t"
391 "movq (%1, %3), %%mm1 \n\t"
392 "movq 8(%1, %3), %%mm5 \n\t"
393 "movq %%mm0, (%2) \n\t"
394 "movq %%mm4, 8(%2) \n\t"
395 "movq %%mm1, (%2, %3) \n\t"
396 "movq %%mm5, 8(%2, %3) \n\t"
397 "add %%"REG_a", %1 \n\t"
398 "add %%"REG_a", %2 \n\t"
399 "movq (%1), %%mm0 \n\t"
400 "movq 8(%1), %%mm4 \n\t"
401 "movq (%1, %3), %%mm1 \n\t"
402 "movq 8(%1, %3), %%mm5 \n\t"
403 "movq %%mm0, (%2) \n\t"
404 "movq %%mm4, 8(%2) \n\t"
405 "movq %%mm1, (%2, %3) \n\t"
406 "movq %%mm5, 8(%2, %3) \n\t"
407 "add %%"REG_a", %1 \n\t"
408 "add %%"REG_a", %2 \n\t"
409 "subl $4, %0 \n\t"
410 "jnz 1b \n\t"
411 : "+g"(h), "+r" (pixels), "+r" (block)
412 : "r"((x86_reg)line_size)
413 : "%"REG_a, "memory"
414 );
415 }
416
417 static void put_pixels16_sse2(uint8_t *block, const uint8_t *pixels, int line_size, int h)
418 {
419 asm volatile(
420 "1: \n\t"
421 "movdqu (%1), %%xmm0 \n\t"
422 "movdqu (%1,%3), %%xmm1 \n\t"
423 "movdqu (%1,%3,2), %%xmm2 \n\t"
424 "movdqu (%1,%4), %%xmm3 \n\t"
425 "movdqa %%xmm0, (%2) \n\t"
426 "movdqa %%xmm1, (%2,%3) \n\t"
427 "movdqa %%xmm2, (%2,%3,2) \n\t"
428 "movdqa %%xmm3, (%2,%4) \n\t"
429 "subl $4, %0 \n\t"
430 "lea (%1,%3,4), %1 \n\t"
431 "lea (%2,%3,4), %2 \n\t"
432 "jnz 1b \n\t"
433 : "+g"(h), "+r" (pixels), "+r" (block)
434 : "r"((x86_reg)line_size), "r"((x86_reg)3L*line_size)
435 : "memory"
436 );
437 }
438
439 static void avg_pixels16_sse2(uint8_t *block, const uint8_t *pixels, int line_size, int h)
440 {
441 asm volatile(
442 "1: \n\t"
443 "movdqu (%1), %%xmm0 \n\t"
444 "movdqu (%1,%3), %%xmm1 \n\t"
445 "movdqu (%1,%3,2), %%xmm2 \n\t"
446 "movdqu (%1,%4), %%xmm3 \n\t"
447 "pavgb (%2), %%xmm0 \n\t"
448 "pavgb (%2,%3), %%xmm1 \n\t"
449 "pavgb (%2,%3,2), %%xmm2 \n\t"
450 "pavgb (%2,%4), %%xmm3 \n\t"
451 "movdqa %%xmm0, (%2) \n\t"
452 "movdqa %%xmm1, (%2,%3) \n\t"
453 "movdqa %%xmm2, (%2,%3,2) \n\t"
454 "movdqa %%xmm3, (%2,%4) \n\t"
455 "subl $4, %0 \n\t"
456 "lea (%1,%3,4), %1 \n\t"
457 "lea (%2,%3,4), %2 \n\t"
458 "jnz 1b \n\t"
459 : "+g"(h), "+r" (pixels), "+r" (block)
460 : "r"((x86_reg)line_size), "r"((x86_reg)3L*line_size)
461 : "memory"
462 );
463 }
464
465 static void clear_blocks_mmx(DCTELEM *blocks)
466 {
467 asm volatile(
468 "pxor %%mm7, %%mm7 \n\t"
469 "mov $-128*6, %%"REG_a" \n\t"
470 "1: \n\t"
471 "movq %%mm7, (%0, %%"REG_a") \n\t"
472 "movq %%mm7, 8(%0, %%"REG_a") \n\t"
473 "movq %%mm7, 16(%0, %%"REG_a") \n\t"
474 "movq %%mm7, 24(%0, %%"REG_a") \n\t"
475 "add $32, %%"REG_a" \n\t"
476 " js 1b \n\t"
477 : : "r" (((uint8_t *)blocks)+128*6)
478 : "%"REG_a
479 );
480 }
481
482 static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w){
483 x86_reg i=0;
484 asm volatile(
485 "jmp 2f \n\t"
486 "1: \n\t"
487 "movq (%1, %0), %%mm0 \n\t"
488 "movq (%2, %0), %%mm1 \n\t"
489 "paddb %%mm0, %%mm1 \n\t"
490 "movq %%mm1, (%2, %0) \n\t"
491 "movq 8(%1, %0), %%mm0 \n\t"
492 "movq 8(%2, %0), %%mm1 \n\t"
493 "paddb %%mm0, %%mm1 \n\t"
494 "movq %%mm1, 8(%2, %0) \n\t"
495 "add $16, %0 \n\t"
496 "2: \n\t"
497 "cmp %3, %0 \n\t"
498 " js 1b \n\t"
499 : "+r" (i)
500 : "r"(src), "r"(dst), "r"((x86_reg)w-15)
501 );
502 for(; i<w; i++)
503 dst[i+0] += src[i+0];
504 }
505
506 static void add_bytes_l2_mmx(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){
507 x86_reg i=0;
508 asm volatile(
509 "jmp 2f \n\t"
510 "1: \n\t"
511 "movq (%2, %0), %%mm0 \n\t"
512 "movq 8(%2, %0), %%mm1 \n\t"
513 "paddb (%3, %0), %%mm0 \n\t"
514 "paddb 8(%3, %0), %%mm1 \n\t"
515 "movq %%mm0, (%1, %0) \n\t"
516 "movq %%mm1, 8(%1, %0) \n\t"
517 "add $16, %0 \n\t"
518 "2: \n\t"
519 "cmp %4, %0 \n\t"
520 " js 1b \n\t"
521 : "+r" (i)
522 : "r"(dst), "r"(src1), "r"(src2), "r"((x86_reg)w-15)
523 );
524 for(; i<w; i++)
525 dst[i] = src1[i] + src2[i];
526 }
527
528 #define H263_LOOP_FILTER \
529 "pxor %%mm7, %%mm7 \n\t"\
530 "movq %0, %%mm0 \n\t"\
531 "movq %0, %%mm1 \n\t"\
532 "movq %3, %%mm2 \n\t"\
533 "movq %3, %%mm3 \n\t"\
534 "punpcklbw %%mm7, %%mm0 \n\t"\
535 "punpckhbw %%mm7, %%mm1 \n\t"\
536 "punpcklbw %%mm7, %%mm2 \n\t"\
537 "punpckhbw %%mm7, %%mm3 \n\t"\
538 "psubw %%mm2, %%mm0 \n\t"\
539 "psubw %%mm3, %%mm1 \n\t"\
540 "movq %1, %%mm2 \n\t"\
541 "movq %1, %%mm3 \n\t"\
542 "movq %2, %%mm4 \n\t"\
543 "movq %2, %%mm5 \n\t"\
544 "punpcklbw %%mm7, %%mm2 \n\t"\
545 "punpckhbw %%mm7, %%mm3 \n\t"\
546 "punpcklbw %%mm7, %%mm4 \n\t"\
547 "punpckhbw %%mm7, %%mm5 \n\t"\
548 "psubw %%mm2, %%mm4 \n\t"\
549 "psubw %%mm3, %%mm5 \n\t"\
550 "psllw $2, %%mm4 \n\t"\
551 "psllw $2, %%mm5 \n\t"\
552 "paddw %%mm0, %%mm4 \n\t"\
553 "paddw %%mm1, %%mm5 \n\t"\
554 "pxor %%mm6, %%mm6 \n\t"\
555 "pcmpgtw %%mm4, %%mm6 \n\t"\
556 "pcmpgtw %%mm5, %%mm7 \n\t"\
557 "pxor %%mm6, %%mm4 \n\t"\
558 "pxor %%mm7, %%mm5 \n\t"\
559 "psubw %%mm6, %%mm4 \n\t"\
560 "psubw %%mm7, %%mm5 \n\t"\
561 "psrlw $3, %%mm4 \n\t"\
562 "psrlw $3, %%mm5 \n\t"\
563 "packuswb %%mm5, %%mm4 \n\t"\
564 "packsswb %%mm7, %%mm6 \n\t"\
565 "pxor %%mm7, %%mm7 \n\t"\
566 "movd %4, %%mm2 \n\t"\
567 "punpcklbw %%mm2, %%mm2 \n\t"\
568 "punpcklbw %%mm2, %%mm2 \n\t"\
569 "punpcklbw %%mm2, %%mm2 \n\t"\
570 "psubusb %%mm4, %%mm2 \n\t"\
571 "movq %%mm2, %%mm3 \n\t"\
572 "psubusb %%mm4, %%mm3 \n\t"\
573 "psubb %%mm3, %%mm2 \n\t"\
574 "movq %1, %%mm3 \n\t"\
575 "movq %2, %%mm4 \n\t"\
576 "pxor %%mm6, %%mm3 \n\t"\
577 "pxor %%mm6, %%mm4 \n\t"\
578 "paddusb %%mm2, %%mm3 \n\t"\
579 "psubusb %%mm2, %%mm4 \n\t"\
580 "pxor %%mm6, %%mm3 \n\t"\
581 "pxor %%mm6, %%mm4 \n\t"\
582 "paddusb %%mm2, %%mm2 \n\t"\
583 "packsswb %%mm1, %%mm0 \n\t"\
584 "pcmpgtb %%mm0, %%mm7 \n\t"\
585 "pxor %%mm7, %%mm0 \n\t"\
586 "psubb %%mm7, %%mm0 \n\t"\
587 "movq %%mm0, %%mm1 \n\t"\
588 "psubusb %%mm2, %%mm0 \n\t"\
589 "psubb %%mm0, %%mm1 \n\t"\
590 "pand %5, %%mm1 \n\t"\
591 "psrlw $2, %%mm1 \n\t"\
592 "pxor %%mm7, %%mm1 \n\t"\
593 "psubb %%mm7, %%mm1 \n\t"\
594 "movq %0, %%mm5 \n\t"\
595 "movq %3, %%mm6 \n\t"\
596 "psubb %%mm1, %%mm5 \n\t"\
597 "paddb %%mm1, %%mm6 \n\t"
598
599 static void h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale){
600 if(ENABLE_ANY_H263) {
601 const int strength= ff_h263_loop_filter_strength[qscale];
602
603 asm volatile(
604
605 H263_LOOP_FILTER
606
607 "movq %%mm3, %1 \n\t"
608 "movq %%mm4, %2 \n\t"
609 "movq %%mm5, %0 \n\t"
610 "movq %%mm6, %3 \n\t"
611 : "+m" (*(uint64_t*)(src - 2*stride)),
612 "+m" (*(uint64_t*)(src - 1*stride)),
613 "+m" (*(uint64_t*)(src + 0*stride)),
614 "+m" (*(uint64_t*)(src + 1*stride))
615 : "g" (2*strength), "m"(ff_pb_FC)
616 );
617 }
618 }
619
620 static inline void transpose4x4(uint8_t *dst, uint8_t *src, int dst_stride, int src_stride){
621 asm volatile( //FIXME could save 1 instruction if done as 8x4 ...
622 "movd %4, %%mm0 \n\t"
623 "movd %5, %%mm1 \n\t"
624 "movd %6, %%mm2 \n\t"
625 "movd %7, %%mm3 \n\t"
626 "punpcklbw %%mm1, %%mm0 \n\t"
627 "punpcklbw %%mm3, %%mm2 \n\t"
628 "movq %%mm0, %%mm1 \n\t"
629 "punpcklwd %%mm2, %%mm0 \n\t"
630 "punpckhwd %%mm2, %%mm1 \n\t"
631 "movd %%mm0, %0 \n\t"
632 "punpckhdq %%mm0, %%mm0 \n\t"
633 "movd %%mm0, %1 \n\t"
634 "movd %%mm1, %2 \n\t"
635 "punpckhdq %%mm1, %%mm1 \n\t"
636 "movd %%mm1, %3 \n\t"
637
638 : "=m" (*(uint32_t*)(dst + 0*dst_stride)),
639 "=m" (*(uint32_t*)(dst + 1*dst_stride)),
640 "=m" (*(uint32_t*)(dst + 2*dst_stride)),
641 "=m" (*(uint32_t*)(dst + 3*dst_stride))
642 : "m" (*(uint32_t*)(src + 0*src_stride)),
643 "m" (*(uint32_t*)(src + 1*src_stride)),
644 "m" (*(uint32_t*)(src + 2*src_stride)),
645 "m" (*(uint32_t*)(src + 3*src_stride))
646 );
647 }
648
649 static void h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale){
650 if(ENABLE_ANY_H263) {
651 const int strength= ff_h263_loop_filter_strength[qscale];
652 DECLARE_ALIGNED(8, uint64_t, temp[4]);
653 uint8_t *btemp= (uint8_t*)temp;
654
655 src -= 2;
656
657 transpose4x4(btemp , src , 8, stride);
658 transpose4x4(btemp+4, src + 4*stride, 8, stride);
659 asm volatile(
660 H263_LOOP_FILTER // 5 3 4 6
661
662 : "+m" (temp[0]),
663 "+m" (temp[1]),
664 "+m" (temp[2]),
665 "+m" (temp[3])
666 : "g" (2*strength), "m"(ff_pb_FC)
667 );
668
669 asm volatile(
670 "movq %%mm5, %%mm1 \n\t"
671 "movq %%mm4, %%mm0 \n\t"
672 "punpcklbw %%mm3, %%mm5 \n\t"
673 "punpcklbw %%mm6, %%mm4 \n\t"
674 "punpckhbw %%mm3, %%mm1 \n\t"
675 "punpckhbw %%mm6, %%mm0 \n\t"
676 "movq %%mm5, %%mm3 \n\t"
677 "movq %%mm1, %%mm6 \n\t"
678 "punpcklwd %%mm4, %%mm5 \n\t"
679 "punpcklwd %%mm0, %%mm1 \n\t"
680 "punpckhwd %%mm4, %%mm3 \n\t"
681 "punpckhwd %%mm0, %%mm6 \n\t"
682 "movd %%mm5, (%0) \n\t"
683 "punpckhdq %%mm5, %%mm5 \n\t"
684 "movd %%mm5, (%0,%2) \n\t"
685 "movd %%mm3, (%0,%2,2) \n\t"
686 "punpckhdq %%mm3, %%mm3 \n\t"
687 "movd %%mm3, (%0,%3) \n\t"
688 "movd %%mm1, (%1) \n\t"
689 "punpckhdq %%mm1, %%mm1 \n\t"
690 "movd %%mm1, (%1,%2) \n\t"
691 "movd %%mm6, (%1,%2,2) \n\t"
692 "punpckhdq %%mm6, %%mm6 \n\t"
693 "movd %%mm6, (%1,%3) \n\t"
694 :: "r" (src),
695 "r" (src + 4*stride),
696 "r" ((x86_reg) stride ),
697 "r" ((x86_reg)(3*stride))
698 );
699 }
700 }
701
702 /* draw the edges of width 'w' of an image of size width, height
703 this mmx version can only handle w==8 || w==16 */
704 static void draw_edges_mmx(uint8_t *buf, int wrap, int width, int height, int w)
705 {
706 uint8_t *ptr, *last_line;
707 int i;
708
709 last_line = buf + (height - 1) * wrap;
710 /* left and right */
711 ptr = buf;
712 if(w==8)
713 {
714 asm volatile(
715 "1: \n\t"
716 "movd (%0), %%mm0 \n\t"
717 "punpcklbw %%mm0, %%mm0 \n\t"
718 "punpcklwd %%mm0, %%mm0 \n\t"
719 "punpckldq %%mm0, %%mm0 \n\t"
720 "movq %%mm0, -8(%0) \n\t"
721 "movq -8(%0, %2), %%mm1 \n\t"
722 "punpckhbw %%mm1, %%mm1 \n\t"
723 "punpckhwd %%mm1, %%mm1 \n\t"
724 "punpckhdq %%mm1, %%mm1 \n\t"
725 "movq %%mm1, (%0, %2) \n\t"
726 "add %1, %0 \n\t"
727 "cmp %3, %0 \n\t"
728 " jb 1b \n\t"
729 : "+r" (ptr)
730 : "r" ((x86_reg)wrap), "r" ((x86_reg)width), "r" (ptr + wrap*height)
731 );
732 }
733 else
734 {
735 asm volatile(
736 "1: \n\t"
737 "movd (%0), %%mm0 \n\t"
738 "punpcklbw %%mm0, %%mm0 \n\t"
739 "punpcklwd %%mm0, %%mm0 \n\t"
740 "punpckldq %%mm0, %%mm0 \n\t"
741 "movq %%mm0, -8(%0) \n\t"
742 "movq %%mm0, -16(%0) \n\t"
743 "movq -8(%0, %2), %%mm1 \n\t"
744 "punpckhbw %%mm1, %%mm1 \n\t"
745 "punpckhwd %%mm1, %%mm1 \n\t"
746 "punpckhdq %%mm1, %%mm1 \n\t"
747 "movq %%mm1, (%0, %2) \n\t"
748 "movq %%mm1, 8(%0, %2) \n\t"
749 "add %1, %0 \n\t"
750 "cmp %3, %0 \n\t"
751 " jb 1b \n\t"
752 : "+r" (ptr)
753 : "r" ((x86_reg)wrap), "r" ((x86_reg)width), "r" (ptr + wrap*height)
754 );
755 }
756
757 for(i=0;i<w;i+=4) {
758 /* top and bottom (and hopefully also the corners) */
759 ptr= buf - (i + 1) * wrap - w;
760 asm volatile(
761 "1: \n\t"
762 "movq (%1, %0), %%mm0 \n\t"
763 "movq %%mm0, (%0) \n\t"
764 "movq %%mm0, (%0, %2) \n\t"
765 "movq %%mm0, (%0, %2, 2) \n\t"
766 "movq %%mm0, (%0, %3) \n\t"
767 "add $8, %0 \n\t"
768 "cmp %4, %0 \n\t"
769 " jb 1b \n\t"
770 : "+r" (ptr)
771 : "r" ((x86_reg)buf - (x86_reg)ptr - w), "r" ((x86_reg)-wrap), "r" ((x86_reg)-wrap*3), "r" (ptr+width+2*w)
772 );
773 ptr= last_line + (i + 1) * wrap - w;
774 asm volatile(
775 "1: \n\t"
776 "movq (%1, %0), %%mm0 \n\t"
777 "movq %%mm0, (%0) \n\t"
778 "movq %%mm0, (%0, %2) \n\t"
779 "movq %%mm0, (%0, %2, 2) \n\t"
780 "movq %%mm0, (%0, %3) \n\t"
781 "add $8, %0 \n\t"
782 "cmp %4, %0 \n\t"
783 " jb 1b \n\t"
784 : "+r" (ptr)
785 : "r" ((x86_reg)last_line - (x86_reg)ptr - w), "r" ((x86_reg)wrap), "r" ((x86_reg)wrap*3), "r" (ptr+width+2*w)
786 );
787 }
788 }
789
790 #define PAETH(cpu, abs3)\
791 static void add_png_paeth_prediction_##cpu(uint8_t *dst, uint8_t *src, uint8_t *top, int w, int bpp)\
792 {\
793 x86_reg i = -bpp;\
794 x86_reg end = w-3;\
795 asm volatile(\
796 "pxor %%mm7, %%mm7 \n"\
797 "movd (%1,%0), %%mm0 \n"\
798 "movd (%2,%0), %%mm1 \n"\
799 "punpcklbw %%mm7, %%mm0 \n"\
800 "punpcklbw %%mm7, %%mm1 \n"\
801 "add %4, %0 \n"\
802 "1: \n"\
803 "movq %%mm1, %%mm2 \n"\
804 "movd (%2,%0), %%mm1 \n"\
805 "movq %%mm2, %%mm3 \n"\
806 "punpcklbw %%mm7, %%mm1 \n"\
807 "movq %%mm2, %%mm4 \n"\
808 "psubw %%mm1, %%mm3 \n"\
809 "psubw %%mm0, %%mm4 \n"\
810 "movq %%mm3, %%mm5 \n"\
811 "paddw %%mm4, %%mm5 \n"\
812 abs3\
813 "movq %%mm4, %%mm6 \n"\
814 "pminsw %%mm5, %%mm6 \n"\
815 "pcmpgtw %%mm6, %%mm3 \n"\
816 "pcmpgtw %%mm5, %%mm4 \n"\
817 "movq %%mm4, %%mm6 \n"\
818 "pand %%mm3, %%mm4 \n"\
819 "pandn %%mm3, %%mm6 \n"\
820 "pandn %%mm0, %%mm3 \n"\
821 "movd (%3,%0), %%mm0 \n"\
822 "pand %%mm1, %%mm6 \n"\
823 "pand %%mm4, %%mm2 \n"\
824 "punpcklbw %%mm7, %%mm0 \n"\
825 "movq %6, %%mm5 \n"\
826 "paddw %%mm6, %%mm0 \n"\
827 "paddw %%mm2, %%mm3 \n"\
828 "paddw %%mm3, %%mm0 \n"\
829 "pand %%mm5, %%mm0 \n"\
830 "movq %%mm0, %%mm3 \n"\
831 "packuswb %%mm3, %%mm3 \n"\
832 "movd %%mm3, (%1,%0) \n"\
833 "add %4, %0 \n"\
834 "cmp %5, %0 \n"\
835 "jle 1b \n"\
836 :"+r"(i)\
837 :"r"(dst), "r"(top), "r"(src), "r"((x86_reg)bpp), "g"(end),\
838 "m"(ff_pw_255)\
839 :"memory"\
840 );\
841 }
842
843 #define ABS3_MMX2\
844 "psubw %%mm5, %%mm7 \n"\
845 "pmaxsw %%mm7, %%mm5 \n"\
846 "pxor %%mm6, %%mm6 \n"\
847 "pxor %%mm7, %%mm7 \n"\
848 "psubw %%mm3, %%mm6 \n"\
849 "psubw %%mm4, %%mm7 \n"\
850 "pmaxsw %%mm6, %%mm3 \n"\
851 "pmaxsw %%mm7, %%mm4 \n"\
852 "pxor %%mm7, %%mm7 \n"
853
854 #define ABS3_SSSE3\
855 "pabsw %%mm3, %%mm3 \n"\
856 "pabsw %%mm4, %%mm4 \n"\
857 "pabsw %%mm5, %%mm5 \n"
858
859 PAETH(mmx2, ABS3_MMX2)
860 #ifdef HAVE_SSSE3
861 PAETH(ssse3, ABS3_SSSE3)
862 #endif
863
864 #define QPEL_V_LOW(m3,m4,m5,m6, pw_20, pw_3, rnd, in0, in1, in2, in7, out, OP)\
865 "paddw " #m4 ", " #m3 " \n\t" /* x1 */\
866 "movq "MANGLE(ff_pw_20)", %%mm4 \n\t" /* 20 */\
867 "pmullw " #m3 ", %%mm4 \n\t" /* 20x1 */\
868 "movq "#in7", " #m3 " \n\t" /* d */\
869 "movq "#in0", %%mm5 \n\t" /* D */\
870 "paddw " #m3 ", %%mm5 \n\t" /* x4 */\
871 "psubw %%mm5, %%mm4 \n\t" /* 20x1 - x4 */\
872 "movq "#in1", %%mm5 \n\t" /* C */\
873 "movq "#in2", %%mm6 \n\t" /* B */\
874 "paddw " #m6 ", %%mm5 \n\t" /* x3 */\
875 "paddw " #m5 ", %%mm6 \n\t" /* x2 */\
876 "paddw %%mm6, %%mm6 \n\t" /* 2x2 */\
877 "psubw %%mm6, %%mm5 \n\t" /* -2x2 + x3 */\
878 "pmullw "MANGLE(ff_pw_3)", %%mm5 \n\t" /* -6x2 + 3x3 */\
879 "paddw " #rnd ", %%mm4 \n\t" /* x2 */\
880 "paddw %%mm4, %%mm5 \n\t" /* 20x1 - 6x2 + 3x3 - x4 */\
881 "psraw $5, %%mm5 \n\t"\
882 "packuswb %%mm5, %%mm5 \n\t"\
883 OP(%%mm5, out, %%mm7, d)
884
885 #define QPEL_BASE(OPNAME, ROUNDER, RND, OP_MMX2, OP_3DNOW)\
886 static void OPNAME ## mpeg4_qpel16_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
887 uint64_t temp;\
888 \
889 asm volatile(\
890 "pxor %%mm7, %%mm7 \n\t"\
891 "1: \n\t"\
892 "movq (%0), %%mm0 \n\t" /* ABCDEFGH */\
893 "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\
894 "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\
895 "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\
896 "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\
897 "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\
898 "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\
899 "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\
900 "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\
901 "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\
902 "psllq $16, %%mm3 \n\t" /* 00ABCDEF */\
903 "psllq $24, %%mm4 \n\t" /* 000ABCDE */\
904 "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\
905 "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\
906 "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\
907 "paddw %%mm3, %%mm5 \n\t" /* b */\
908 "paddw %%mm2, %%mm6 \n\t" /* c */\
909 "paddw %%mm5, %%mm5 \n\t" /* 2b */\
910 "psubw %%mm5, %%mm6 \n\t" /* c - 2b */\
911 "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\
912 "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\
913 "paddw %%mm4, %%mm0 \n\t" /* a */\
914 "paddw %%mm1, %%mm5 \n\t" /* d */\
915 "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\
916 "psubw %%mm5, %%mm0 \n\t" /* 20a - d */\
917 "paddw %6, %%mm6 \n\t"\
918 "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
919 "psraw $5, %%mm0 \n\t"\
920 "movq %%mm0, %5 \n\t"\
921 /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
922 \
923 "movq 5(%0), %%mm0 \n\t" /* FGHIJKLM */\
924 "movq %%mm0, %%mm5 \n\t" /* FGHIJKLM */\
925 "movq %%mm0, %%mm6 \n\t" /* FGHIJKLM */\
926 "psrlq $8, %%mm0 \n\t" /* GHIJKLM0 */\
927 "psrlq $16, %%mm5 \n\t" /* HIJKLM00 */\
928 "punpcklbw %%mm7, %%mm0 \n\t" /* 0G0H0I0J */\
929 "punpcklbw %%mm7, %%mm5 \n\t" /* 0H0I0J0K */\
930 "paddw %%mm0, %%mm2 \n\t" /* b */\
931 "paddw %%mm5, %%mm3 \n\t" /* c */\
932 "paddw %%mm2, %%mm2 \n\t" /* 2b */\
933 "psubw %%mm2, %%mm3 \n\t" /* c - 2b */\
934 "movq %%mm6, %%mm2 \n\t" /* FGHIJKLM */\
935 "psrlq $24, %%mm6 \n\t" /* IJKLM000 */\
936 "punpcklbw %%mm7, %%mm2 \n\t" /* 0F0G0H0I */\
937 "punpcklbw %%mm7, %%mm6 \n\t" /* 0I0J0K0L */\
938 "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\
939 "paddw %%mm2, %%mm1 \n\t" /* a */\
940 "paddw %%mm6, %%mm4 \n\t" /* d */\
941 "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\
942 "psubw %%mm4, %%mm3 \n\t" /* - 6b +3c - d */\
943 "paddw %6, %%mm1 \n\t"\
944 "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b +3c - d */\
945 "psraw $5, %%mm3 \n\t"\
946 "movq %5, %%mm1 \n\t"\
947 "packuswb %%mm3, %%mm1 \n\t"\
948 OP_MMX2(%%mm1, (%1),%%mm4, q)\
949 /* mm0= GHIJ, mm2=FGHI, mm5=HIJK, mm6=IJKL, mm7=0 */\
950 \
951 "movq 9(%0), %%mm1 \n\t" /* JKLMNOPQ */\
952 "movq %%mm1, %%mm4 \n\t" /* JKLMNOPQ */\
953 "movq %%mm1, %%mm3 \n\t" /* JKLMNOPQ */\
954 "psrlq $8, %%mm1 \n\t" /* KLMNOPQ0 */\
955 "psrlq $16, %%mm4 \n\t" /* LMNOPQ00 */\
956 "punpcklbw %%mm7, %%mm1 \n\t" /* 0K0L0M0N */\
957 "punpcklbw %%mm7, %%mm4 \n\t" /* 0L0M0N0O */\
958 "paddw %%mm1, %%mm5 \n\t" /* b */\
959 "paddw %%mm4, %%mm0 \n\t" /* c */\
960 "paddw %%mm5, %%mm5 \n\t" /* 2b */\
961 "psubw %%mm5, %%mm0 \n\t" /* c - 2b */\
962 "movq %%mm3, %%mm5 \n\t" /* JKLMNOPQ */\
963 "psrlq $24, %%mm3 \n\t" /* MNOPQ000 */\
964 "pmullw "MANGLE(ff_pw_3)", %%mm0 \n\t" /* 3c - 6b */\
965 "punpcklbw %%mm7, %%mm3 \n\t" /* 0M0N0O0P */\
966 "paddw %%mm3, %%mm2 \n\t" /* d */\
967 "psubw %%mm2, %%mm0 \n\t" /* -6b + 3c - d */\
968 "movq %%mm5, %%mm2 \n\t" /* JKLMNOPQ */\
969 "punpcklbw %%mm7, %%mm2 \n\t" /* 0J0K0L0M */\
970 "punpckhbw %%mm7, %%mm5 \n\t" /* 0N0O0P0Q */\
971 "paddw %%mm2, %%mm6 \n\t" /* a */\
972 "pmullw "MANGLE(ff_pw_20)", %%mm6 \n\t" /* 20a */\
973 "paddw %6, %%mm0 \n\t"\
974 "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
975 "psraw $5, %%mm0 \n\t"\
976 /* mm1=KLMN, mm2=JKLM, mm3=MNOP, mm4=LMNO, mm5=NOPQ mm7=0 */\
977 \
978 "paddw %%mm5, %%mm3 \n\t" /* a */\
979 "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0O0P0Q0Q */\
980 "paddw %%mm4, %%mm6 \n\t" /* b */\
981 "pshufw $0xBE, %%mm5, %%mm4 \n\t" /* 0P0Q0Q0P */\
982 "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0Q0Q0P0O */\
983 "paddw %%mm1, %%mm4 \n\t" /* c */\
984 "paddw %%mm2, %%mm5 \n\t" /* d */\
985 "paddw %%mm6, %%mm6 \n\t" /* 2b */\
986 "psubw %%mm6, %%mm4 \n\t" /* c - 2b */\
987 "pmullw "MANGLE(ff_pw_20)", %%mm3 \n\t" /* 20a */\
988 "pmullw "MANGLE(ff_pw_3)", %%mm4 \n\t" /* 3c - 6b */\
989 "psubw %%mm5, %%mm3 \n\t" /* -6b + 3c - d */\
990 "paddw %6, %%mm4 \n\t"\
991 "paddw %%mm3, %%mm4 \n\t" /* 20a - 6b + 3c - d */\
992 "psraw $5, %%mm4 \n\t"\
993 "packuswb %%mm4, %%mm0 \n\t"\
994 OP_MMX2(%%mm0, 8(%1), %%mm4, q)\
995 \
996 "add %3, %0 \n\t"\
997 "add %4, %1 \n\t"\
998 "decl %2 \n\t"\
999 " jnz 1b \n\t"\
1000 : "+a"(src), "+c"(dst), "+D"(h)\
1001 : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(temp), "m"(ROUNDER)\
1002 : "memory"\
1003 );\
1004 }\
1005 \
1006 static void OPNAME ## mpeg4_qpel16_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1007 int i;\
1008 int16_t temp[16];\
1009 /* quick HACK, XXX FIXME MUST be optimized */\
1010 for(i=0; i<h; i++)\
1011 {\
1012 temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
1013 temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
1014 temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
1015 temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
1016 temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
1017 temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 9]);\
1018 temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 9])*3 - (src[ 3]+src[10]);\
1019 temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 9])*6 + (src[ 5]+src[10])*3 - (src[ 4]+src[11]);\
1020 temp[ 8]= (src[ 8]+src[ 9])*20 - (src[ 7]+src[10])*6 + (src[ 6]+src[11])*3 - (src[ 5]+src[12]);\
1021 temp[ 9]= (src[ 9]+src[10])*20 - (src[ 8]+src[11])*6 + (src[ 7]+src[12])*3 - (src[ 6]+src[13]);\
1022 temp[10]= (src[10]+src[11])*20 - (src[ 9]+src[12])*6 + (src[ 8]+src[13])*3 - (src[ 7]+src[14]);\
1023 temp[11]= (src[11]+src[12])*20 - (src[10]+src[13])*6 + (src[ 9]+src[14])*3 - (src[ 8]+src[15]);\
1024 temp[12]= (src[12]+src[13])*20 - (src[11]+src[14])*6 + (src[10]+src[15])*3 - (src[ 9]+src[16]);\
1025 temp[13]= (src[13]+src[14])*20 - (src[12]+src[15])*6 + (src[11]+src[16])*3 - (src[10]+src[16]);\
1026 temp[14]= (src[14]+src[15])*20 - (src[13]+src[16])*6 + (src[12]+src[16])*3 - (src[11]+src[15]);\
1027 temp[15]= (src[15]+src[16])*20 - (src[14]+src[16])*6 + (src[13]+src[15])*3 - (src[12]+src[14]);\
1028 asm volatile(\
1029 "movq (%0), %%mm0 \n\t"\
1030 "movq 8(%0), %%mm1 \n\t"\
1031 "paddw %2, %%mm0 \n\t"\
1032 "paddw %2, %%mm1 \n\t"\
1033 "psraw $5, %%mm0 \n\t"\
1034 "psraw $5, %%mm1 \n\t"\
1035 "packuswb %%mm1, %%mm0 \n\t"\
1036 OP_3DNOW(%%mm0, (%1), %%mm1, q)\
1037 "movq 16(%0), %%mm0 \n\t"\
1038 "movq 24(%0), %%mm1 \n\t"\
1039 "paddw %2, %%mm0 \n\t"\
1040 "paddw %2, %%mm1 \n\t"\
1041 "psraw $5, %%mm0 \n\t"\
1042 "psraw $5, %%mm1 \n\t"\
1043 "packuswb %%mm1, %%mm0 \n\t"\
1044 OP_3DNOW(%%mm0, 8(%1), %%mm1, q)\
1045 :: "r"(temp), "r"(dst), "m"(ROUNDER)\
1046 : "memory"\
1047 );\
1048 dst+=dstStride;\
1049 src+=srcStride;\
1050 }\
1051 }\
1052 \
1053 static void OPNAME ## mpeg4_qpel8_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1054 asm volatile(\
1055 "pxor %%mm7, %%mm7 \n\t"\
1056 "1: \n\t"\
1057 "movq (%0), %%mm0 \n\t" /* ABCDEFGH */\
1058 "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\
1059 "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\
1060 "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\
1061 "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\
1062 "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\
1063 "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\
1064 "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\
1065 "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\
1066 "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\
1067 "psllq $16, %%mm3 \n\t" /* 00ABCDEF */\
1068 "psllq $24, %%mm4 \n\t" /* 000ABCDE */\
1069 "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\
1070 "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\
1071 "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\
1072 "paddw %%mm3, %%mm5 \n\t" /* b */\
1073 "paddw %%mm2, %%mm6 \n\t" /* c */\
1074 "paddw %%mm5, %%mm5 \n\t" /* 2b */\
1075 "psubw %%mm5, %%mm6 \n\t" /* c - 2b */\
1076 "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\
1077 "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\
1078 "paddw %%mm4, %%mm0 \n\t" /* a */\
1079 "paddw %%mm1, %%mm5 \n\t" /* d */\
1080 "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\
1081 "psubw %%mm5, %%mm0 \n\t" /* 20a - d */\
1082 "paddw %5, %%mm6 \n\t"\
1083 "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
1084 "psraw $5, %%mm0 \n\t"\
1085 /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
1086 \
1087 "movd 5(%0), %%mm5 \n\t" /* FGHI */\
1088 "punpcklbw %%mm7, %%mm5 \n\t" /* 0F0G0H0I */\
1089 "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0G0H0I0I */\
1090 "paddw %%mm5, %%mm1 \n\t" /* a */\
1091 "paddw %%mm6, %%mm2 \n\t" /* b */\
1092 "pshufw $0xBE, %%mm5, %%mm6 \n\t" /* 0H0I0I0H */\
1093 "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0I0I0H0G */\
1094 "paddw %%mm6, %%mm3 \n\t" /* c */\
1095 "paddw %%mm5, %%mm4 \n\t" /* d */\
1096 "paddw %%mm2, %%mm2 \n\t" /* 2b */\
1097 "psubw %%mm2, %%mm3 \n\t" /* c - 2b */\
1098 "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\
1099 "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\
1100 "psubw %%mm4, %%mm3 \n\t" /* -6b + 3c - d */\
1101 "paddw %5, %%mm1 \n\t"\
1102 "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b + 3c - d */\
1103 "psraw $5, %%mm3 \n\t"\
1104 "packuswb %%mm3, %%mm0 \n\t"\
1105 OP_MMX2(%%mm0, (%1), %%mm4, q)\
1106 \
1107 "add %3, %0 \n\t"\
1108 "add %4, %1 \n\t"\
1109 "decl %2 \n\t"\
1110 " jnz 1b \n\t"\
1111 : "+a"(src), "+c"(dst), "+d"(h)\
1112 : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER)\
1113 : "memory"\
1114 );\
1115 }\
1116 \
1117 static void OPNAME ## mpeg4_qpel8_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1118 int i;\
1119 int16_t temp[8];\
1120 /* quick HACK, XXX FIXME MUST be optimized */\
1121 for(i=0; i<h; i++)\
1122 {\
1123 temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
1124 temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
1125 temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
1126 temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
1127 temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
1128 temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 8]);\
1129 temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 8])*3 - (src[ 3]+src[ 7]);\
1130 temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 8])*6 + (src[ 5]+src[ 7])*3 - (src[ 4]+src[ 6]);\
1131 asm volatile(\
1132 "movq (%0), %%mm0 \n\t"\
1133 "movq 8(%0), %%mm1 \n\t"\
1134 "paddw %2, %%mm0 \n\t"\
1135 "paddw %2, %%mm1 \n\t"\
1136 "psraw $5, %%mm0 \n\t"\
1137 "psraw $5, %%mm1 \n\t"\
1138 "packuswb %%mm1, %%mm0 \n\t"\
1139 OP_3DNOW(%%mm0, (%1), %%mm1, q)\
1140 :: "r"(temp), "r"(dst), "m"(ROUNDER)\
1141 :"memory"\
1142 );\
1143 dst+=dstStride;\
1144 src+=srcStride;\
1145 }\
1146 }
1147
1148 #define QPEL_OP(OPNAME, ROUNDER, RND, OP, MMX)\
1149 \
1150 static void OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1151 uint64_t temp[17*4];\
1152 uint64_t *temp_ptr= temp;\
1153 int count= 17;\
1154 \
1155 /*FIXME unroll */\
1156 asm volatile(\
1157 "pxor %%mm7, %%mm7 \n\t"\
1158 "1: \n\t"\
1159 "movq (%0), %%mm0 \n\t"\
1160 "movq (%0), %%mm1 \n\t"\
1161 "movq 8(%0), %%mm2 \n\t"\
1162 "movq 8(%0), %%mm3 \n\t"\
1163 "punpcklbw %%mm7, %%mm0 \n\t"\
1164 "punpckhbw %%mm7, %%mm1 \n\t"\
1165 "punpcklbw %%mm7, %%mm2 \n\t"\
1166 "punpckhbw %%mm7, %%mm3 \n\t"\
1167 "movq %%mm0, (%1) \n\t"\
1168 "movq %%mm1, 17*8(%1) \n\t"\
1169 "movq %%mm2, 2*17*8(%1) \n\t"\
1170 "movq %%mm3, 3*17*8(%1) \n\t"\
1171 "add $8, %1 \n\t"\
1172 "add %3, %0 \n\t"\
1173 "decl %2 \n\t"\
1174 " jnz 1b \n\t"\
1175 : "+r" (src), "+r" (temp_ptr), "+r"(count)\
1176 : "r" ((x86_reg)srcStride)\
1177 : "memory"\
1178 );\
1179 \
1180 temp_ptr= temp;\
1181 count=4;\
1182 \
1183 /*FIXME reorder for speed */\
1184 asm volatile(\
1185 /*"pxor %%mm7, %%mm7 \n\t"*/\
1186 "1: \n\t"\
1187 "movq (%0), %%mm0 \n\t"\
1188 "movq 8(%0), %%mm1 \n\t"\
1189 "movq 16(%0), %%mm2 \n\t"\
1190 "movq 24(%0), %%mm3 \n\t"\
1191 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\
1192 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\
1193 "add %4, %1 \n\t"\
1194 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\
1195 \
1196 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\
1197 "add %4, %1 \n\t"\
1198 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
1199 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 72(%0), (%1, %3), OP)\
1200 "add %4, %1 \n\t"\
1201 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 80(%0), (%1), OP)\
1202 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 88(%0), (%1, %3), OP)\
1203 "add %4, %1 \n\t"\
1204 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 40(%0), 48(%0), 56(%0), 96(%0), (%1), OP)\
1205 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 48(%0), 56(%0), 64(%0),104(%0), (%1, %3), OP)\
1206 "add %4, %1 \n\t"\
1207 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 56(%0), 64(%0), 72(%0),112(%0), (%1), OP)\
1208 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 64(%0), 72(%0), 80(%0),120(%0), (%1, %3), OP)\
1209 "add %4, %1 \n\t"\
1210 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 72(%0), 80(%0), 88(%0),128(%0), (%1), OP)\
1211 \
1212 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 80(%0), 88(%0), 96(%0),128(%0), (%1, %3), OP)\
1213 "add %4, %1 \n\t" \
1214 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 88(%0), 96(%0),104(%0),120(%0), (%1), OP)\
1215 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 96(%0),104(%0),112(%0),112(%0), (%1, %3), OP)\
1216 \
1217 "add $136, %0 \n\t"\
1218 "add %6, %1 \n\t"\
1219 "decl %2 \n\t"\
1220 " jnz 1b \n\t"\
1221 \
1222 : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
1223 : "r"((x86_reg)dstStride), "r"(2*(x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-14*(x86_reg)dstStride)\
1224 :"memory"\
1225 );\
1226 }\
1227 \
1228 static void OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1229 uint64_t temp[9*2];\
1230 uint64_t *temp_ptr= temp;\
1231 int count= 9;\
1232 \
1233 /*FIXME unroll */\
1234 asm volatile(\
1235 "pxor %%mm7, %%mm7 \n\t"\
1236 "1: \n\t"\
1237 "movq (%0), %%mm0 \n\t"\
1238 "movq (%0), %%mm1 \n\t"\
1239 "punpcklbw %%mm7, %%mm0 \n\t"\
1240 "punpckhbw %%mm7, %%mm1 \n\t"\
1241 "movq %%mm0, (%1) \n\t"\
1242 "movq %%mm1, 9*8(%1) \n\t"\
1243 "add $8, %1 \n\t"\
1244 "add %3, %0 \n\t"\
1245 "decl %2 \n\t"\
1246 " jnz 1b \n\t"\
1247 : "+r" (src), "+r" (temp_ptr), "+r"(count)\
1248 : "r" ((x86_reg)srcStride)\
1249 : "memory"\
1250 );\
1251 \
1252 temp_ptr= temp;\
1253 count=2;\
1254 \
1255 /*FIXME reorder for speed */\
1256 asm volatile(\
1257 /*"pxor %%mm7, %%mm7 \n\t"*/\
1258 "1: \n\t"\
1259 "movq (%0), %%mm0 \n\t"\
1260 "movq 8(%0), %%mm1 \n\t"\
1261 "movq 16(%0), %%mm2 \n\t"\
1262 "movq 24(%0), %%mm3 \n\t"\
1263 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\
1264 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\
1265 "add %4, %1 \n\t"\
1266 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\
1267 \
1268 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\
1269 "add %4, %1 \n\t"\
1270 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
1271 \
1272 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 64(%0), (%1, %3), OP)\
1273 "add %4, %1 \n\t"\
1274 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 56(%0), (%1), OP)\
1275 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 48(%0), (%1, %3), OP)\
1276 \
1277 "add $72, %0 \n\t"\
1278 "add %6, %1 \n\t"\
1279 "decl %2 \n\t"\
1280 " jnz 1b \n\t"\
1281 \
1282 : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
1283 : "r"((x86_reg)dstStride), "r"(2*(x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-6*(x86_reg)dstStride)\
1284 : "memory"\
1285 );\
1286 }\
1287 \
1288 static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
1289 OPNAME ## pixels8_ ## MMX(dst, src, stride, 8);\
1290 }\
1291 \
1292 static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1293 uint64_t temp[8];\
1294 uint8_t * const half= (uint8_t*)temp;\
1295 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
1296 OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
1297 }\
1298 \
1299 static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1300 OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride, stride, 8);\
1301 }\
1302 \
1303 static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1304 uint64_t temp[8];\
1305 uint8_t * const half= (uint8_t*)temp;\
1306 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
1307 OPNAME ## pixels8_l2_ ## MMX(dst, src+1, half, stride, stride, 8);\
1308 }\
1309 \
1310 static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1311 uint64_t temp[8];\
1312 uint8_t * const half= (uint8_t*)temp;\
1313 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
1314 OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
1315 }\
1316 \
1317 static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1318 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src, stride, stride);\
1319 }\
1320 \
1321 static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1322 uint64_t temp[8];\
1323 uint8_t * const half= (uint8_t*)temp;\
1324 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
1325 OPNAME ## pixels8_l2_ ## MMX(dst, src+stride, half, stride, stride, 8);\
1326 }\
1327 static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1328 uint64_t half[8 + 9];\
1329 uint8_t * const halfH= ((uint8_t*)half) + 64;\
1330 uint8_t * const halfHV= ((uint8_t*)half);\
1331 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1332 put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
1333 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1334 OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
1335 }\
1336 static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1337 uint64_t half[8 + 9];\
1338 uint8_t * const halfH= ((uint8_t*)half) + 64;\
1339 uint8_t * const halfHV= ((uint8_t*)half);\
1340 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1341 put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
1342 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1343 OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
1344 }\
1345 static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1346 uint64_t half[8 + 9];\
1347 uint8_t * const halfH= ((uint8_t*)half) + 64;\
1348 uint8_t * const halfHV= ((uint8_t*)half);\
1349 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1350 put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
1351 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1352 OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
1353 }\
1354 static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1355 uint64_t half[8 + 9];\
1356 uint8_t * const halfH= ((uint8_t*)half) + 64;\
1357 uint8_t * const halfHV= ((uint8_t*)half);\
1358 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1359 put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
1360 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1361 OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
1362 }\
1363 static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1364 uint64_t half[8 + 9];\
1365 uint8_t * const halfH= ((uint8_t*)half) + 64;\
1366 uint8_t * const halfHV= ((uint8_t*)half);\
1367 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1368 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1369 OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
1370 }\
1371 static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1372 uint64_t half[8 + 9];\
1373 uint8_t * const halfH= ((uint8_t*)half) + 64;\
1374 uint8_t * const halfHV= ((uint8_t*)half);\
1375 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1376 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1377 OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
1378 }\
1379 static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1380 uint64_t half[8 + 9];\
1381 uint8_t * const halfH= ((uint8_t*)half);\
1382 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1383 put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
1384 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
1385 }\
1386 static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1387 uint64_t half[8 + 9];\
1388 uint8_t * const halfH= ((uint8_t*)half);\
1389 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1390 put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
1391 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
1392 }\
1393 static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1394 uint64_t half[9];\
1395 uint8_t * const halfH= ((uint8_t*)half);\
1396 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1397 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
1398 }\
1399 static void OPNAME ## qpel16_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
1400 OPNAME ## pixels16_ ## MMX(dst, src, stride, 16);\
1401 }\
1402 \
1403 static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1404 uint64_t temp[32];\
1405 uint8_t * const half= (uint8_t*)temp;\
1406 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
1407 OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
1408 }\
1409 \
1410 static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1411 OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src, stride, stride, 16);\
1412 }\
1413 \
1414 static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1415 uint64_t temp[32];\
1416 uint8_t * const half= (uint8_t*)temp;\
1417 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
1418 OPNAME ## pixels16_l2_ ## MMX(dst, src+1, half, stride, stride, 16);\
1419 }\
1420 \
1421 static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1422 uint64_t temp[32];\
1423 uint8_t * const half= (uint8_t*)temp;\
1424 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
1425 OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
1426 }\
1427 \
1428 static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1429 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src, stride, stride);\
1430 }\
1431 \
1432 static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1433 uint64_t temp[32];\
1434 uint8_t * const half= (uint8_t*)temp;\
1435 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
1436 OPNAME ## pixels16_l2_ ## MMX(dst, src+stride, half, stride, stride, 16);\
1437 }\
1438 static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1439 uint64_t half[16*2 + 17*2];\
1440 uint8_t * const halfH= ((uint8_t*)half) + 256;\
1441 uint8_t * const halfHV= ((uint8_t*)half);\
1442 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1443 put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
1444 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1445 OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
1446 }\
1447 static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1448 uint64_t half[16*2 + 17*2];\
1449 uint8_t * const halfH= ((uint8_t*)half) + 256;\
1450 uint8_t * const halfHV= ((uint8_t*)half);\
1451 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1452 put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
1453 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1454 OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
1455 }\
1456 static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1457 uint64_t half[16*2 + 17*2];\
1458 uint8_t * const halfH= ((uint8_t*)half) + 256;\
1459 uint8_t * const halfHV= ((uint8_t*)half);\
1460 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1461 put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
1462 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1463 OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
1464 }\
1465 static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1466 uint64_t half[16*2 + 17*2];\
1467 uint8_t * const halfH= ((uint8_t*)half) + 256;\
1468 uint8_t * const halfHV= ((uint8_t*)half);\
1469 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1470 put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
1471 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1472 OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
1473 }\
1474 static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1475 uint64_t half[16*2 + 17*2];\
1476 uint8_t * const halfH= ((uint8_t*)half) + 256;\
1477 uint8_t * const halfHV= ((uint8_t*)half);\
1478 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1479 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1480 OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
1481 }\
1482 static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1483 uint64_t half[16*2 + 17*2];\
1484 uint8_t * const halfH= ((uint8_t*)half) + 256;\
1485 uint8_t * const halfHV= ((uint8_t*)half);\
1486 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1487 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1488 OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
1489 }\
1490 static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1491 uint64_t half[17*2];\
1492 uint8_t * const halfH= ((uint8_t*)half);\
1493 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1494 put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
1495 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
1496 }\
1497 static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1498 uint64_t half[17*2];\
1499 uint8_t * const halfH= ((uint8_t*)half);\
1500 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1501 put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
1502 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
1503 }\
1504 static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1505 uint64_t half[17*2];\
1506 uint8_t * const halfH= ((uint8_t*)half);\
1507 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1508 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
1509 }
1510
1511 #define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b " \n\t"
1512 #define AVG_3DNOW_OP(a,b,temp, size) \
1513 "mov" #size " " #b ", " #temp " \n\t"\
1514 "pavgusb " #temp ", " #a " \n\t"\
1515 "mov" #size " " #a ", " #b " \n\t"
1516 #define AVG_MMX2_OP(a,b,temp, size) \
1517 "mov" #size " " #b ", " #temp " \n\t"\
1518 "pavgb " #temp ", " #a " \n\t"\
1519 "mov" #size " " #a ", " #b " \n\t"
1520
1521 QPEL_BASE(put_ , ff_pw_16, _ , PUT_OP, PUT_OP)
1522 QPEL_BASE(avg_ , ff_pw_16, _ , AVG_MMX2_OP, AVG_3DNOW_OP)
1523 QPEL_BASE(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, PUT_OP)
1524 QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, 3dnow)
1525 QPEL_OP(avg_ , ff_pw_16, _ , AVG_3DNOW_OP, 3dnow)
1526 QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, 3dnow)
1527 QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, mmx2)
1528 QPEL_OP(avg_ , ff_pw_16, _ , AVG_MMX2_OP, mmx2)
1529 QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, mmx2)
1530
1531 /***********************************/
1532 /* bilinear qpel: not compliant to any spec, only for -lavdopts fast */
1533
1534 #define QPEL_2TAP_XY(OPNAME, SIZE, MMX, XY, HPEL)\
1535 static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1536 OPNAME ## pixels ## SIZE ## HPEL(dst, src, stride, SIZE);\
1537 }
1538 #define QPEL_2TAP_L3(OPNAME, SIZE, MMX, XY, S0, S1, S2)\
1539 static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1540 OPNAME ## 2tap_qpel ## SIZE ## _l3_ ## MMX(dst, src+S0, stride, SIZE, S1, S2);\
1541 }
1542
1543 #define QPEL_2TAP(OPNAME, SIZE, MMX)\
1544 QPEL_2TAP_XY(OPNAME, SIZE, MMX, 20, _x2_ ## MMX)\
1545 QPEL_2TAP_XY(OPNAME, SIZE, MMX, 02, _y2_ ## MMX)\
1546 QPEL_2TAP_XY(OPNAME, SIZE, MMX, 22, _xy2_mmx)\
1547 static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc00_ ## MMX =\
1548 OPNAME ## qpel ## SIZE ## _mc00_ ## MMX;\
1549 static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc21_ ## MMX =\
1550 OPNAME ## 2tap_qpel ## SIZE ## _mc20_ ## MMX;\
1551 static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc12_ ## MMX =\
1552 OPNAME ## 2tap_qpel ## SIZE ## _mc02_ ## MMX;\
1553 static void OPNAME ## 2tap_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1554 OPNAME ## pixels ## SIZE ## _y2_ ## MMX(dst, src+1, stride, SIZE);\
1555 }\
1556 static void OPNAME ## 2tap_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1557 OPNAME ## pixels ## SIZE ## _x2_ ## MMX(dst, src+stride, stride, SIZE);\
1558 }\
1559 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 10, 0, 1, 0)\
1560 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 30, 1, -1, 0)\
1561 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 01, 0, stride, 0)\
1562 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 03, stride, -stride, 0)\
1563 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 11, 0, stride, 1)\
1564 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 31, 1, stride, -1)\
1565 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 13, stride, -stride, 1)\
1566 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 33, stride+1, -stride, -1)\
1567
1568 QPEL_2TAP(put_, 16, mmx2)
1569 QPEL_2TAP(avg_, 16, mmx2)
1570 QPEL_2TAP(put_, 8, mmx2)
1571 QPEL_2TAP(avg_, 8, mmx2)
1572 QPEL_2TAP(put_, 16, 3dnow)
1573 QPEL_2TAP(avg_, 16, 3dnow)
1574 QPEL_2TAP(put_, 8, 3dnow)
1575 QPEL_2TAP(avg_, 8, 3dnow)
1576
1577
1578 #if 0
1579 static void just_return() { return; }
1580 #endif
1581
1582 static void gmc_mmx(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
1583 int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height){
1584 const int w = 8;
1585 const int ix = ox>>(16+shift);
1586 const int iy = oy>>(16+shift);
1587 const int oxs = ox>>4;
1588 const int oys = oy>>4;
1589 const int dxxs = dxx>>4;
1590 const int dxys = dxy>>4;
1591 const int dyxs = dyx>>4;
1592 const int dyys = dyy>>4;
1593 const uint16_t r4[4] = {r,r,r,r};
1594 const uint16_t dxy4[4] = {dxys,dxys,dxys,dxys};
1595 const uint16_t dyy4[4] = {dyys,dyys,dyys,dyys};
1596 const uint64_t shift2 = 2*shift;
1597 uint8_t edge_buf[(h+1)*stride];
1598 int x, y;
1599
1600 const int dxw = (dxx-(1<<(16+shift)))*(w-1);
1601 const int dyh = (dyy-(1<<(16+shift)))*(h-1);
1602 const int dxh = dxy*(h-1);
1603 const int dyw = dyx*(w-1);
1604 if( // non-constant fullpel offset (3% of blocks)
1605 ((ox^(ox+dxw)) | (ox^(ox+dxh)) | (ox^(ox+dxw+dxh)) |
1606 (oy^(oy+dyw)) | (oy^(oy+dyh)) | (oy^(oy+dyw+dyh))) >> (16+shift)
1607 // uses more than 16 bits of subpel mv (only at huge resolution)
1608 || (dxx|dxy|dyx|dyy)&15 )
1609 {
1610 //FIXME could still use mmx for some of the rows
1611 ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r, width, height);
1612 return;
1613 }
1614
1615 src += ix + iy*stride;
1616 if( (unsigned)ix >= width-w ||
1617 (unsigned)iy >= height-h )
1618 {
1619 ff_emulated_edge_mc(edge_buf, src, stride, w+1, h+1, ix, iy, width, height);
1620 src = edge_buf;
1621 }
1622
1623 asm volatile(
1624 "movd %0, %%mm6 \n\t"
1625 "pxor %%mm7, %%mm7 \n\t"
1626 "punpcklwd %%mm6, %%mm6 \n\t"
1627 "punpcklwd %%mm6, %%mm6 \n\t"
1628 :: "r"(1<<shift)
1629 );
1630
1631 for(x=0; x<w; x+=4){
1632 uint16_t dx4[4] = { oxs - dxys + dxxs*(x+0),
1633 oxs - dxys + dxxs*(x+1),
1634 oxs - dxys + dxxs*(x+2),
1635 oxs - dxys + dxxs*(x+3) };
1636 uint16_t dy4[4] = { oys - dyys + dyxs*(x+0),
1637 oys - dyys + dyxs*(x+1),
1638 oys - dyys + dyxs*(x+2),
1639 oys - dyys + dyxs*(x+3) };
1640
1641 for(y=0; y<h; y++){
1642 asm volatile(
1643 "movq %0, %%mm4 \n\t"
1644 "movq %1, %%mm5 \n\t"
1645 "paddw %2, %%mm4 \n\t"
1646 "paddw %3, %%mm5 \n\t"
1647 "movq %%mm4, %0 \n\t"
1648 "movq %%mm5, %1 \n\t"
1649 "psrlw $12, %%mm4 \n\t"
1650 "psrlw $12, %%mm5 \n\t"
1651 : "+m"(*dx4), "+m"(*dy4)
1652 : "m"(*dxy4), "m"(*dyy4)
1653 );
1654
1655 asm volatile(
1656 "movq %%mm6, %%mm2 \n\t"
1657 "movq %%mm6, %%mm1 \n\t"
1658 "psubw %%mm4, %%mm2 \n\t"
1659 "psubw %%mm5, %%mm1 \n\t"
1660 "movq %%mm2, %%mm0 \n\t"
1661 "movq %%mm4, %%mm3 \n\t"
1662 "pmullw %%mm1, %%mm0 \n\t" // (s-dx)*(s-dy)
1663 "pmullw %%mm5, %%mm3 \n\t" // dx*dy
1664 "pmullw %%mm5, %%mm2 \n\t" // (s-dx)*dy
1665 "pmullw %%mm4, %%mm1 \n\t" // dx*(s-dy)
1666
1667 "movd %4, %%mm5 \n\t"
1668 "movd %3, %%mm4 \n\t"
1669 "punpcklbw %%mm7, %%mm5 \n\t"
1670 "punpcklbw %%mm7, %%mm4 \n\t"
1671 "pmullw %%mm5, %%mm3 \n\t" // src[1,1] * dx*dy
1672 "pmullw %%mm4, %%mm2 \n\t" // src[0,1] * (s-dx)*dy
1673
1674 "movd %2, %%mm5 \n\t"
1675 "movd %1, %%mm4 \n\t"
1676 "punpcklbw %%mm7, %%mm5 \n\t"
1677 "punpcklbw %%mm7, %%mm4 \n\t"
1678 "pmullw %%mm5, %%mm1 \n\t" // src[1,0] * dx*(s-dy)
1679 "pmullw %%mm4, %%mm0 \n\t" // src[0,0] * (s-dx)*(s-dy)
1680 "paddw %5, %%mm1 \n\t"
1681 "paddw %%mm3, %%mm2 \n\t"
1682 "paddw %%mm1, %%mm0 \n\t"
1683 "paddw %%mm2, %%mm0 \n\t"
1684
1685 "psrlw %6, %%mm0 \n\t"
1686 "packuswb %%mm0, %%mm0 \n\t"
1687 "movd %%mm0, %0 \n\t"
1688
1689 : "=m"(dst[x+y*stride])
1690 : "m"(src[0]), "m"(src[1]),
1691 "m"(src[stride]), "m"(src[stride+1]),
1692 "m"(*r4), "m"(shift2)
1693 );
1694 src += stride;
1695 }
1696 src += 4-h*stride;
1697 }
1698 }
1699
1700 #define PREFETCH(name, op) \
1701 static void name(void *mem, int stride, int h){\
1702 const uint8_t *p= mem;\
1703 do{\
1704 asm volatile(#op" %0" :: "m"(*p));\
1705 p+= stride;\
1706 }while(--h);\
1707 }
1708 PREFETCH(prefetch_mmx2, prefetcht0)
1709 PREFETCH(prefetch_3dnow, prefetch)
1710 #undef PREFETCH
1711
1712 #include "h264dsp_mmx.c"
1713
1714 /* CAVS specific */
1715 void ff_cavsdsp_init_mmx2(DSPContext* c, AVCodecContext *avctx);
1716 void ff_cavsdsp_init_3dnow(DSPContext* c, AVCodecContext *avctx);
1717
1718 void ff_put_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
1719 put_pixels8_mmx(dst, src, stride, 8);
1720 }
1721 void ff_avg_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
1722 avg_pixels8_mmx(dst, src, stride, 8);
1723 }
1724 void ff_put_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
1725 put_pixels16_mmx(dst, src, stride, 16);
1726 }
1727 void ff_avg_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
1728 avg_pixels16_mmx(dst, src, stride, 16);
1729 }
1730
1731 /* VC1 specific */
1732 void ff_vc1dsp_init_mmx(DSPContext* dsp, AVCodecContext *avctx);
1733
1734 void ff_put_vc1_mspel_mc00_mmx(uint8_t *dst, const uint8_t *src, int stride, int rnd) {
1735 put_pixels8_mmx(dst, src, stride, 8);
1736 }
1737
1738 /* external functions, from idct_mmx.c */
1739 void ff_mmx_idct(DCTELEM *block);
1740 void ff_mmxext_idct(DCTELEM *block);
1741
1742 /* XXX: those functions should be suppressed ASAP when all IDCTs are
1743 converted */
1744 #ifdef CONFIG_GPL
1745 static void ff_libmpeg2mmx_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
1746 {
1747 ff_mmx_idct (block);
1748 put_pixels_clamped_mmx(block, dest, line_size);
1749 }
1750 static void ff_libmpeg2mmx_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
1751 {
1752 ff_mmx_idct (block);
1753 add_pixels_clamped_mmx(block, dest, line_size);
1754 }
1755 static void ff_libmpeg2mmx2_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
1756 {
1757 ff_mmxext_idct (block);
1758 put_pixels_clamped_mmx(block, dest, line_size);
1759 }
1760 static void ff_libmpeg2mmx2_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
1761 {
1762 ff_mmxext_idct (block);
1763 add_pixels_clamped_mmx(block, dest, line_size);
1764 }
1765 #endif
1766 static void ff_idct_xvid_mmx_put(uint8_t *dest, int line_size, DCTELEM *block)
1767 {
1768 ff_idct_xvid_mmx (block);
1769 put_pixels_clamped_mmx(block, dest, line_size);
1770 }
1771 static void ff_idct_xvid_mmx_add(uint8_t *dest, int line_size, DCTELEM *block)
1772 {
1773 ff_idct_xvid_mmx (block);
1774 add_pixels_clamped_mmx(block, dest, line_size);
1775 }
1776 static void ff_idct_xvid_mmx2_put(uint8_t *dest, int line_size, DCTELEM *block)
1777 {
1778 ff_idct_xvid_mmx2 (block);
1779 put_pixels_clamped_mmx(block, dest, line_size);
1780 }
1781 static void ff_idct_xvid_mmx2_add(uint8_t *dest, int line_size, DCTELEM *block)
1782 {
1783 ff_idct_xvid_mmx2 (block);
1784 add_pixels_clamped_mmx(block, dest, line_size);
1785 }
1786
1787 static void vorbis_inverse_coupling_3dnow(float *mag, float *ang, int blocksize)
1788 {
1789 int i;
1790 asm volatile("pxor %%mm7, %%mm7":);
1791 for(i=0; i<blocksize; i+=2) {
1792 asm volatile(
1793 "movq %0, %%mm0 \n\t"
1794 "movq %1, %%mm1 \n\t"
1795 "movq %%mm0, %%mm2 \n\t"
1796 "movq %%mm1, %%mm3 \n\t"
1797 "pfcmpge %%mm7, %%mm2 \n\t" // m <= 0.0
1798 "pfcmpge %%mm7, %%mm3 \n\t" // a <= 0.0
1799 "pslld $31, %%mm2 \n\t" // keep only the sign bit
1800 "pxor %%mm2, %%mm1 \n\t"
1801 "movq %%mm3, %%mm4 \n\t"
1802 "pand %%mm1, %%mm3 \n\t"
1803 "pandn %%mm1, %%mm4 \n\t"
1804 "pfadd %%mm0, %%mm3 \n\t" // a = m + ((a<0) & (a ^ sign(m)))
1805 "pfsub %%mm4, %%mm0 \n\t" // m = m + ((a>0) & (a ^ sign(m)))
1806 "movq %%mm3, %1 \n\t"
1807 "movq %%mm0, %0 \n\t"
1808 :"+m"(mag[i]), "+m"(ang[i])
1809 ::"memory"
1810 );
1811 }
1812 asm volatile("femms");
1813 }
1814 static void vorbis_inverse_coupling_sse(float *mag, float *ang, int blocksize)
1815 {
1816 int i;
1817
1818 asm volatile(
1819 "movaps %0, %%xmm5 \n\t"
1820 ::"m"(ff_pdw_80000000[0])
1821 );
1822 for(i=0; i<blocksize; i+=4) {
1823 asm volatile(
1824 "movaps %0, %%xmm0 \n\t"
1825 "movaps %1, %%xmm1 \n\t"
1826 "xorps %%xmm2, %%xmm2 \n\t"
1827 "xorps %%xmm3, %%xmm3 \n\t"
1828 "cmpleps %%xmm0, %%xmm2 \n\t" // m <= 0.0
1829 "cmpleps %%xmm1, %%xmm3 \n\t" // a <= 0.0
1830 "andps %%xmm5, %%xmm2 \n\t" // keep only the sign bit
1831 "xorps %%xmm2, %%xmm1 \n\t"
1832 "movaps %%xmm3, %%xmm4 \n\t"
1833 "andps %%xmm1, %%xmm3 \n\t"
1834 "andnps %%xmm1, %%xmm4 \n\t"
1835 "addps %%xmm0, %%xmm3 \n\t" // a = m + ((a<0) & (a ^ sign(m)))
1836 "subps %%xmm4, %%xmm0 \n\t" // m = m + ((a>0) & (a ^ sign(m)))
1837 "movaps %%xmm3, %1 \n\t"
1838 "movaps %%xmm0, %0 \n\t"
1839 :"+m"(mag[i]), "+m"(ang[i])
1840 ::"memory"
1841 );
1842 }
1843 }
1844
1845 #define IF1(x) x
1846 #define IF0(x)
1847
1848 #define MIX5(mono,stereo)\
1849 asm volatile(\
1850 "movss 0(%2), %%xmm5 \n"\
1851 "movss 8(%2), %%xmm6 \n"\
1852 "movss 24(%2), %%xmm7 \n"\
1853 "shufps $0, %%xmm5, %%xmm5 \n"\
1854 "shufps $0, %%xmm6, %%xmm6 \n"\
1855 "shufps $0, %%xmm7, %%xmm7 \n"\
1856 "1: \n"\
1857 "movaps (%0,%1), %%xmm0 \n"\
1858 "movaps 0x400(%0,%1), %%xmm1 \n"\
1859 "movaps 0x800(%0,%1), %%xmm2 \n"\
1860 "movaps 0xc00(%0,%1), %%xmm3 \n"\
1861 "movaps 0x1000(%0,%1), %%xmm4 \n"\
1862 "mulps %%xmm5, %%xmm0 \n"\
1863 "mulps %%xmm6, %%xmm1 \n"\
1864 "mulps %%xmm5, %%xmm2 \n"\
1865 "mulps %%xmm7, %%xmm3 \n"\
1866 "mulps %%xmm7, %%xmm4 \n"\
1867 stereo("addps %%xmm1, %%xmm0 \n")\
1868 "addps %%xmm1, %%xmm2 \n"\
1869 "addps %%xmm3, %%xmm0 \n"\
1870 "addps %%xmm4, %%xmm2 \n"\
1871 mono("addps %%xmm2, %%xmm0 \n")\
1872 "movaps %%xmm0, (%0,%1) \n"\
1873 stereo("movaps %%xmm2, 0x400(%0,%1) \n")\
1874 "add $16, %0 \n"\
1875 "jl 1b \n"\
1876 :"+&r"(i)\
1877 :"r"(samples[0]+len), "r"(matrix)\
1878 :"memory"\
1879 );
1880
1881 #define MIX_MISC(stereo)\
1882 asm volatile(\
1883 "1: \n"\
1884 "movaps (%3,%0), %%xmm0 \n"\
1885 stereo("movaps %%xmm0, %%xmm1 \n")\
1886 "mulps %%xmm6, %%xmm0 \n"\
1887 stereo("mulps %%xmm7, %%xmm1 \n")\
1888 "lea 1024(%3,%0), %1 \n"\
1889 "mov %5, %2 \n"\
1890 "2: \n"\
1891 "movaps (%1), %%xmm2 \n"\
1892 stereo("movaps %%xmm2, %%xmm3 \n")\
1893 "mulps (%4,%2), %%xmm2 \n"\
1894 stereo("mulps 16(%4,%2), %%xmm3 \n")\
1895 "addps %%xmm2, %%xmm0 \n"\
1896 stereo("addps %%xmm3, %%xmm1 \n")\
1897 "add $1024, %1 \n"\
1898 "add $32, %2 \n"\
1899 "jl 2b \n"\
1900 "movaps %%xmm0, (%3,%0) \n"\
1901 stereo("movaps %%xmm1, 1024(%3,%0) \n")\
1902 "add $16, %0 \n"\
1903 "jl 1b \n"\
1904 :"+&r"(i), "=&r"(j), "=&r"(k)\
1905 :"r"(samples[0]+len), "r"(matrix_simd+in_ch), "g"((intptr_t)-32*(in_ch-1))\
1906 :"memory"\
1907 );
1908
1909 static void ac3_downmix_sse(float (*samples)[256], float (*matrix)[2], int out_ch, int in_ch, int len)
1910 {
1911 int (*matrix_cmp)[2] = (int(*)[2])matrix;
1912 intptr_t i,j,k;
1913
1914 i = -len*sizeof(float);
1915 if(in_ch == 5 && out_ch == 2 && !(matrix_cmp[0][1]|matrix_cmp[2][0]|matrix_cmp[3][1]|matrix_cmp[4][0]|(matrix_cmp[1][0]^matrix_cmp[1][1])|(matrix_cmp[0][0]^matrix_cmp[2][1]))) {
1916 MIX5(IF0,IF1);
1917 } else if(in_ch == 5 && out_ch == 1 && matrix_cmp[0][0]==matrix_cmp[2][0] && matrix_cmp[3][0]==matrix_cmp[4][0]) {
1918 MIX5(IF1,IF0);
1919 } else {
1920 DECLARE_ALIGNED_16(float, matrix_simd[in_ch][2][4]);
1921 j = 2*in_ch*sizeof(float);
1922 asm volatile(
1923 "1: \n"
1924 "sub $8, %0 \n"
1925 "movss (%2,%0), %%xmm6 \n"
1926 "movss 4(%2,%0), %%xmm7 \n"
1927 "shufps $0, %%xmm6, %%xmm6 \n"
1928 "shufps $0, %%xmm7, %%xmm7 \n"
1929 "movaps %%xmm6, (%1,%0,4) \n"
1930 "movaps %%xmm7, 16(%1,%0,4) \n"
1931 "jg 1b \n"
1932 :"+&r"(j)
1933 :"r"(matrix_simd), "r"(matrix)
1934 :"memory"
1935 );
1936 if(out_ch == 2) {
1937 MIX_MISC(IF1);
1938 } else {
1939 MIX_MISC(IF0);
1940 }
1941 }
1942 }
1943
1944 static void vector_fmul_3dnow(float *dst, const float *src, int len){
1945 x86_reg i = (len-4)*4;
1946 asm volatile(
1947 "1: \n\t"
1948 "movq (%1,%0), %%mm0 \n\t"
1949 "movq 8(%1,%0), %%mm1 \n\t"
1950 "pfmul (%2,%0), %%mm0 \n\t"
1951 "pfmul 8(%2,%0), %%mm1 \n\t"
1952 "movq %%mm0, (%1,%0) \n\t"
1953 "movq %%mm1, 8(%1,%0) \n\t"
1954 "sub $16, %0 \n\t"
1955 "jge 1b \n\t"
1956 "femms \n\t"
1957 :"+r"(i)
1958 :"r"(dst), "r"(src)
1959 :"memory"
1960 );
1961 }
1962 static void vector_fmul_sse(float *dst, const float *src, int len){
1963 x86_reg i = (len-8)*4;
1964 asm volatile(
1965 "1: \n\t"
1966 "movaps (%1,%0), %%xmm0 \n\t"
1967 "movaps 16(%1,%0), %%xmm1 \n\t"
1968 "mulps (%2,%0), %%xmm0 \n\t"
1969 "mulps 16(%2,%0), %%xmm1 \n\t"
1970 "movaps %%xmm0, (%1,%0) \n\t"
1971 "movaps %%xmm1, 16(%1,%0) \n\t"
1972 "sub $32, %0 \n\t"
1973 "jge 1b \n\t"
1974 :"+r"(i)
1975 :"r"(dst), "r"(src)
1976 :"memory"
1977 );
1978 }
1979
1980 static void vector_fmul_reverse_3dnow2(float *dst, const float *src0, const float *src1, int len){
1981 x86_reg i = len*4-16;
1982 asm volatile(
1983 "1: \n\t"
1984 "pswapd 8(%1), %%mm0 \n\t"
1985 "pswapd (%1), %%mm1 \n\t"
1986 "pfmul (%3,%0), %%mm0 \n\t"
1987 "pfmul 8(%3,%0), %%mm1 \n\t"
1988 "movq %%mm0, (%2,%0) \n\t"
1989 "movq %%mm1, 8(%2,%0) \n\t"
1990 "add $16, %1 \n\t"
1991 "sub $16, %0 \n\t"
1992 "jge 1b \n\t"
1993 :"+r"(i), "+r"(src1)
1994 :"r"(dst), "r"(src0)
1995 );
1996 asm volatile("femms");
1997 }
1998 static void vector_fmul_reverse_sse(float *dst, const float *src0, const float *src1, int len){
1999 x86_reg i = len*4-32;
2000 asm volatile(
2001 "1: \n\t"
2002 "movaps 16(%1), %%xmm0 \n\t"
2003 "movaps (%1), %%xmm1 \n\t"
2004 "shufps $0x1b, %%xmm0, %%xmm0 \n\t"
2005 "shufps $0x1b, %%xmm1, %%xmm1 \n\t"
2006 "mulps (%3,%0), %%xmm0 \n\t"
2007 "mulps 16(%3,%0), %%xmm1 \n\t"
2008 "movaps %%xmm0, (%2,%0) \n\t"
2009 "movaps %%xmm1, 16(%2,%0) \n\t"
2010 "add $32, %1 \n\t"
2011 "sub $32, %0 \n\t"
2012 "jge 1b \n\t"
2013 :"+r"(i), "+r"(src1)
2014 :"r"(dst), "r"(src0)
2015 );
2016 }
2017
2018 static void vector_fmul_add_add_3dnow(float *dst, const float *src0, const float *src1,
2019 const float *src2, int src3, int len, int step){
2020 x86_reg i = (len-4)*4;
2021 if(step == 2 && src3 == 0){
2022 dst += (len-4)*2;
2023 asm volatile(
2024 "1: \n\t"
2025 "movq (%2,%0), %%mm0 \n\t"
2026 "movq 8(%2,%0), %%mm1 \n\t"
2027 "pfmul (%3,%0), %%mm0 \n\t"
2028 "pfmul 8(%3,%0), %%mm1 \n\t"
2029 "pfadd (%4,%0), %%mm0 \n\t"
2030 "pfadd 8(%4,%0), %%mm1 \n\t"
2031 "movd %%mm0, (%1) \n\t"
2032 "movd %%mm1, 16(%1) \n\t"
2033 "psrlq $32, %%mm0 \n\t"
2034 "psrlq $32, %%mm1 \n\t"
2035 "movd %%mm0, 8(%1) \n\t"
2036 "movd %%mm1, 24(%1) \n\t"
2037 "sub $32, %1 \n\t"
2038 "sub $16, %0 \n\t"
2039 "jge 1b \n\t"
2040 :"+r"(i), "+r"(dst)
2041 :"r"(src0), "r"(src1), "r"(src2)
2042 :"memory"
2043 );
2044 }
2045 else if(step == 1 && src3 == 0){
2046 asm volatile(
2047 "1: \n\t"
2048 "movq (%2,%0), %%mm0 \n\t"
2049 "movq 8(%2,%0), %%mm1 \n\t"
2050 "pfmul (%3,%0), %%mm0 \n\t"
2051 "pfmul 8(%3,%0), %%mm1 \n\t"
2052 "pfadd (%4,%0), %%mm0 \n\t"
2053 "pfadd 8(%4,%0), %%mm1 \n\t"
2054 "movq %%mm0, (%1,%0) \n\t"
2055 "movq %%mm1, 8(%1,%0) \n\t"
2056 "sub $16, %0 \n\t"
2057 "jge 1b \n\t"
2058 :"+r"(i)
2059 :"r"(dst), "r"(src0), "r"(src1), "r"(src2)
2060 :"memory"
2061 );
2062 }
2063 else
2064 ff_vector_fmul_add_add_c(dst, src0, src1, src2, src3, len, step);
2065 asm volatile("femms");
2066 }
2067 static void vector_fmul_add_add_sse(float *dst, const float *src0, const float *src1,
2068 const float *src2, int src3, int len, int step){
2069 x86_reg i = (len-8)*4;
2070 if(step == 2 && src3 == 0){
2071 dst += (len-8)*2;
2072 asm volatile(
2073 "1: \n\t"
2074 "movaps (%2,%0), %%xmm0 \n\t"
2075 "movaps 16(%2,%0), %%xmm1 \n\t"
2076 "mulps (%3,%0), %%xmm0 \n\t"
2077 "mulps 16(%3,%0), %%xmm1 \n\t"
2078 "addps (%4,%0), %%xmm0 \n\t"
2079 "addps 16(%4,%0), %%xmm1 \n\t"
2080 "movss %%xmm0, (%1) \n\t"
2081 "movss %%xmm1, 32(%1) \n\t"
2082 "movhlps %%xmm0, %%xmm2 \n\t"
2083 "movhlps %%xmm1, %%xmm3 \n\t"
2084 "movss %%xmm2, 16(%1) \n\t"
2085 "movss %%xmm3, 48(%1) \n\t"
2086 "shufps $0xb1, %%xmm0, %%xmm0 \n\t"
2087 "shufps $0xb1, %%xmm1, %%xmm1 \n\t"
2088 "movss %%xmm0, 8(%1) \n\t"
2089 "movss %%xmm1, 40(%1) \n\t"
2090 "movhlps %%xmm0, %%xmm2 \n\t"
2091 "movhlps %%xmm1, %%xmm3 \n\t"
2092 "movss %%xmm2, 24(%1) \n\t"
2093 "movss %%xmm3, 56(%1) \n\t"
2094 "sub $64, %1 \n\t"
2095 "sub $32, %0 \n\t"
2096 "jge 1b \n\t"
2097 :"+r"(i), "+r"(dst)
2098 :"r"(src0), "r"(src1), "r"(src2)
2099 :"memory"
2100 );
2101 }
2102 else if(step == 1 && src3 == 0){
2103 asm volatile(
2104 "1: \n\t"
2105 "movaps (%2,%0), %%xmm0 \n\t"
2106 "movaps 16(%2,%0), %%xmm1 \n\t"
2107 "mulps (%3,%0), %%xmm0 \n\t"
2108 "mulps 16(%3,%0), %%xmm1 \n\t"
2109 "addps (%4,%0), %%xmm0 \n\t"
2110 "addps 16(%4,%0), %%xmm1 \n\t"
2111 "movaps %%xmm0, (%1,%0) \n\t"
2112 "movaps %%xmm1, 16(%1,%0) \n\t"
2113 "sub $32, %0 \n\t"
2114 "jge 1b \n\t"
2115 :"+r"(i)
2116 :"r"(dst), "r"(src0), "r"(src1), "r"(src2)
2117 :"memory"
2118 );
2119 }
2120 else
2121 ff_vector_fmul_add_add_c(dst, src0, src1, src2, src3, len, step);
2122 }
2123
2124 static void vector_fmul_window_3dnow2(float *dst, const float *src0, const float *src1,
2125 const float *win, float add_bias, int len){
2126 #ifdef HAVE_6REGS
2127 if(add_bias == 0){
2128 x86_reg i = -len*4;
2129 x86_reg j = len*4-8;
2130 asm volatile(
2131 "1: \n"
2132 "pswapd (%5,%1), %%mm1 \n"
2133 "movq (%5,%0), %%mm0 \n"
2134 "pswapd (%4,%1), %%mm5 \n"
2135 "movq (%3,%0), %%mm4 \n"
2136 "movq %%mm0, %%mm2 \n"
2137 "movq %%mm1, %%mm3 \n"
2138 "pfmul %%mm4, %%mm2 \n" // src0[len+i]*win[len+i]
2139 "pfmul %%mm5, %%mm3 \n" // src1[ j]*win[len+j]
2140 "pfmul %%mm4, %%mm1 \n" // src0[len+i]*win[len+j]
2141 "pfmul %%mm5, %%mm0 \n" // src1[ j]*win[len+i]
2142 "pfadd %%mm3, %%mm2 \n"
2143 "pfsub %%mm0, %%mm1 \n"
2144 "pswapd %%mm2, %%mm2 \n"
2145 "movq %%mm1, (%2,%0) \n"
2146 "movq %%mm2, (%2,%1) \n"
2147 "sub $8, %1 \n"
2148 "add $8, %0 \n"
2149 "jl 1b \n"
2150 "femms \n"
2151 :"+r"(i), "+r"(j)
2152 :"r"(dst+len), "r"(src0+len), "r"(src1), "r"(win+len)
2153 );
2154 }else
2155 #endif
2156 ff_vector_fmul_window_c(dst, src0, src1, win, add_bias, len);
2157 }
2158
2159 static void vector_fmul_window_sse(float *dst, const float *src0, const float *src1,
2160 const float *win, float add_bias, int len){
2161 #ifdef HAVE_6REGS
2162 if(add_bias == 0){
2163 x86_reg i = -len*4;
2164 x86_reg j = len*4-16;
2165 asm volatile(
2166 "1: \n"
2167 "movaps (%5,%1), %%xmm1 \n"
2168 "movaps (%5,%0), %%xmm0 \n"
2169 "movaps (%4,%1), %%xmm5 \n"
2170 "movaps (%3,%0), %%xmm4 \n"
2171 "shufps $0x1b, %%xmm1, %%xmm1 \n"
2172 "shufps $0x1b, %%xmm5, %%xmm5 \n"
2173 "movaps %%xmm0, %%xmm2 \n"
2174 "movaps %%xmm1, %%xmm3 \n"
2175 "mulps %%xmm4, %%xmm2 \n" // src0[len+i]*win[len+i]
2176 "mulps %%xmm5, %%xmm3 \n" // src1[ j]*win[len+j]
2177 "mulps %%xmm4, %%xmm1 \n" // src0[len+i]*win[len+j]
2178 "mulps %%xmm5, %%xmm0 \n" // src1[ j]*win[len+i]
2179 "addps %%xmm3, %%xmm2 \n"
2180 "subps %%xmm0, %%xmm1 \n"
2181 "shufps $0x1b, %%xmm2, %%xmm2 \n"
2182 "movaps %%xmm1, (%2,%0) \n"
2183 "movaps %%xmm2, (%2,%1) \n"
2184 "sub $16, %1 \n"
2185 "add $16, %0 \n"
2186 "jl 1b \n"
2187 :"+r"(i), "+r"(j)
2188 :"r"(dst+len), "r"(src0+len), "r"(src1), "r"(win+len)
2189 );
2190 }else
2191 #endif
2192 ff_vector_fmul_window_c(dst, src0, src1, win, add_bias, len);
2193 }
2194
2195 static void int32_to_float_fmul_scalar_sse(float *dst, const int *src, float mul, int len)
2196 {
2197 x86_reg i = -4*len;
2198 asm volatile(
2199 "movss %3, %%xmm4 \n"
2200 "shufps $0, %%xmm4, %%xmm4 \n"
2201 "1: \n"
2202 "cvtpi2ps (%2,%0), %%xmm0 \n"
2203 "cvtpi2ps 8(%2,%0), %%xmm1 \n"
2204 "cvtpi2ps 16(%2,%0), %%xmm2 \n"
2205 "cvtpi2ps 24(%2,%0), %%xmm3 \n"
2206 "movlhps %%xmm1, %%xmm0 \n"
2207 "movlhps %%xmm3, %%xmm2 \n"
2208 "mulps %%xmm4, %%xmm0 \n"
2209 "mulps %%xmm4, %%xmm2 \n"
2210 "movaps %%xmm0, (%1,%0) \n"
2211 "movaps %%xmm2, 16(%1,%0) \n"
2212 "add $32, %0 \n"
2213 "jl 1b \n"
2214 :"+r"(i)
2215 :"r"(dst+len), "r"(src+len), "m"(mul)
2216 );
2217 }
2218
2219 static void int32_to_float_fmul_scalar_sse2(float *dst, const int *src, float mul, int len)
2220 {
2221 x86_reg i = -4*len;
2222 asm volatile(
2223 "movss %3, %%xmm4 \n"
2224 "shufps $0, %%xmm4, %%xmm4 \n"
2225 "1: \n"
2226 "cvtdq2ps (%2,%0), %%xmm0 \n"
2227 "cvtdq2ps 16(%2,%0), %%xmm1 \n"
2228 "mulps %%xmm4, %%xmm0 \n"
2229 "mulps %%xmm4, %%xmm1 \n"
2230 "movaps %%xmm0, (%1,%0) \n"
2231 "movaps %%xmm1, 16(%1,%0) \n"
2232 "add $32, %0 \n"
2233 "jl 1b \n"
2234 :"+r"(i)
2235 :"r"(dst+len), "r"(src+len), "m"(mul)
2236 );
2237 }
2238
2239 static void float_to_int16_3dnow(int16_t *dst, const float *src, long len){
2240 // not bit-exact: pf2id uses different rounding than C and SSE
2241 asm volatile(
2242 "add %0 , %0 \n\t"
2243 "lea (%2,%0,2) , %2 \n\t"
2244 "add %0 , %1 \n\t"
2245 "neg %0 \n\t"
2246 "1: \n\t"
2247 "pf2id (%2,%0,2) , %%mm0 \n\t"
2248 "pf2id 8(%2,%0,2) , %%mm1 \n\t"
2249 "pf2id 16(%2,%0,2) , %%mm2 \n\t"
2250 "pf2id 24(%2,%0,2) , %%mm3 \n\t"
2251 "packssdw %%mm1 , %%mm0 \n\t"
2252 "packssdw %%mm3 , %%mm2 \n\t"
2253 "movq %%mm0 , (%1,%0) \n\t"
2254 "movq %%mm2 , 8(%1,%0) \n\t"
2255 "add $16 , %0 \n\t"
2256 " js 1b \n\t"
2257 "femms \n\t"
2258 :"+r"(len), "+r"(dst), "+r"(src)
2259 );
2260 }
2261 static void float_to_int16_sse(int16_t *dst, const float *src, long len){
2262 asm volatile(
2263 "add %0 , %0 \n\t"
2264 "lea (%2,%0,2) , %2 \n\t"
2265 "add %0 , %1 \n\t"
2266 "neg %0 \n\t"
2267 "1: \n\t"
2268 "cvtps2pi (%2,%0,2) , %%mm0 \n\t"
2269 "cvtps2pi 8(%2,%0,2) , %%mm1 \n\t"
2270 "cvtps2pi 16(%2,%0,2) , %%mm2 \n\t"
2271 "cvtps2pi 24(%2,%0,2) , %%mm3 \n\t"
2272 "packssdw %%mm1 , %%mm0 \n\t"
2273 "packssdw %%mm3 , %%mm2 \n\t"
2274 "movq %%mm0 , (%1,%0) \n\t"
2275 "movq %%mm2 , 8(%1,%0) \n\t"
2276 "add $16 , %0 \n\t"
2277 " js 1b \n\t"
2278 "emms \n\t"
2279 :"+r"(len), "+r"(dst), "+r"(src)
2280 );
2281 }
2282
2283 static void float_to_int16_sse2(int16_t *dst, const float *src, long len){
2284 asm volatile(
2285 "add %0 , %0 \n\t"
2286 "lea (%2,%0,2) , %2 \n\t"
2287 "add %0 , %1 \n\t"
2288 "neg %0 \n\t"
2289 "1: \n\t"
2290 "cvtps2dq (%2,%0,2) , %%xmm0 \n\t"
2291 "cvtps2dq 16(%2,%0,2) , %%xmm1 \n\t"
2292 "packssdw %%xmm1 , %%xmm0 \n\t"
2293 "movdqa %%xmm0 , (%1,%0) \n\t"
2294 "add $16 , %0 \n\t"
2295 " js 1b \n\t"
2296 :"+r"(len), "+r"(dst), "+r"(src)
2297 );
2298 }
2299
2300 #ifdef HAVE_YASM
2301 void ff_float_to_int16_interleave6_sse(int16_t *dst, const float **src, int len);
2302 void ff_float_to_int16_interleave6_3dnow(int16_t *dst, const float **src, int len);
2303 void ff_float_to_int16_interleave6_3dn2(int16_t *dst, const float **src, int len);
2304 #else
2305 #define ff_float_to_int16_interleave6_sse(a,b,c) float_to_int16_interleave_misc_sse(a,b,c,6)
2306 #define ff_float_to_int16_interleave6_3dnow(a,b,c) float_to_int16_interleave_misc_3dnow(a,b,c,6)
2307 #define ff_float_to_int16_interleave6_3dn2(a,b,c) float_to_int16_interleave_misc_3dnow(a,b,c,6)
2308 #endif
2309 #define ff_float_to_int16_interleave6_sse2 ff_float_to_int16_interleave6_sse
2310
2311 #define FLOAT_TO_INT16_INTERLEAVE(cpu, body) \
2312 /* gcc pessimizes register allocation if this is in the same function as float_to_int16_interleave_sse2*/\
2313 static av_noinline void float_to_int16_interleave_misc_##cpu(int16_t *dst, const float **src, long len, int channels){\
2314 DECLARE_ALIGNED_16(int16_t, tmp[len]);\
2315 int i,j,c;\
2316 for(c=0; c<channels; c++){\
2317 float_to_int16_##cpu(tmp, src[c], len);\
2318 for(i=0, j=c; i<len; i++, j+=channels)\
2319 dst[j] = tmp[i];\
2320 }\
2321 }\
2322 \
2323 static void float_to_int16_interleave_##cpu(int16_t *dst, const float **src, long len, int channels){\
2324 if(channels==1)\
2325 float_to_int16_##cpu(dst, src[0], len);\
2326 else if(channels==2){\
2327 const float *src0 = src[0];\
2328 const float *src1 = src[1];\
2329 asm volatile(\
2330 "shl $2, %0 \n"\
2331 "add %0, %1 \n"\
2332 "add %0, %2 \n"\
2333 "add %0, %3 \n"\
2334 "neg %0 \n"\
2335 body\
2336 :"+r"(len), "+r"(dst), "+r"(src0), "+r"(src1)\
2337 );\
2338 }else if(channels==6){\
2339 ff_float_to_int16_interleave6_##cpu(dst, src, len);\
2340 }else\
2341 float_to_int16_interleave_misc_##cpu(dst, src, len, channels);\
2342 }
2343
2344 FLOAT_TO_INT16_INTERLEAVE(3dnow,
2345 "1: \n"
2346 "pf2id (%2,%0), %%mm0 \n"
2347 "pf2id 8(%2,%0), %%mm1 \n"
2348 "pf2id (%3,%0), %%mm2 \n"
2349 "pf2id 8(%3,%0), %%mm3 \n"
2350 "packssdw %%mm1, %%mm0 \n"
2351 "packssdw %%mm3, %%mm2 \n"
2352 "movq %%mm0, %%mm1 \n"
2353 "punpcklwd %%mm2, %%mm0 \n"
2354 "punpckhwd %%mm2, %%mm1 \n"
2355 "movq %%mm0, (%1,%0)\n"
2356 "movq %%mm1, 8(%1,%0)\n"
2357 "add $16, %0 \n"
2358 "js 1b \n"
2359 "femms \n"
2360 )
2361
2362 FLOAT_TO_INT16_INTERLEAVE(sse,
2363 "1: \n"
2364 "cvtps2pi (%2,%0), %%mm0 \n"
2365 "cvtps2pi 8(%2,%0), %%mm1 \n"
2366 "cvtps2pi (%3,%0), %%mm2 \n"
2367 "cvtps2pi 8(%3,%0), %%mm3 \n"
2368 "packssdw %%mm1, %%mm0 \n"
2369 "packssdw %%mm3, %%mm2 \n"
2370 "movq %%mm0, %%mm1 \n"
2371 "punpcklwd %%mm2, %%mm0 \n"
2372 "punpckhwd %%mm2, %%mm1 \n"
2373 "movq %%mm0, (%1,%0)\n"
2374 "movq %%mm1, 8(%1,%0)\n"
2375 "add $16, %0 \n"
2376 "js 1b \n"
2377 "emms \n"
2378 )
2379
2380 FLOAT_TO_INT16_INTERLEAVE(sse2,
2381 "1: \n"
2382 "cvtps2dq (%2,%0), %%xmm0 \n"
2383 "cvtps2dq (%3,%0), %%xmm1 \n"
2384 "packssdw %%xmm1, %%xmm0 \n"
2385 "movhlps %%xmm0, %%xmm1 \n"
2386 "punpcklwd %%xmm1, %%xmm0 \n"
2387 "movdqa %%xmm0, (%1,%0) \n"
2388 "add $16, %0 \n"
2389 "js 1b \n"
2390 )
2391
2392 static void float_to_int16_interleave_3dn2(int16_t *dst, const float **src, long len, int channels){
2393 if(channels==6)
2394 ff_float_to_int16_interleave6_3dn2(dst, src, len);
2395 else
2396 float_to_int16_interleave_3dnow(dst, src, len, channels);
2397 }
2398
2399
2400 extern void ff_snow_horizontal_compose97i_sse2(IDWTELEM *b, int width);
2401 extern void ff_snow_horizontal_compose97i_mmx(IDWTELEM *b, int width);
2402 extern void ff_snow_vertical_compose97i_sse2(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2, IDWTELEM *b3, IDWTELEM *b4, IDWTELEM *b5, int width);
2403 extern void ff_snow_vertical_compose97i_mmx(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2, IDWTELEM *b3, IDWTELEM *b4, IDWTELEM *b5, int width);
2404 extern void ff_snow_inner_add_yblock_sse2(const uint8_t *obmc, const int obmc_stride, uint8_t * * block, int b_w, int b_h,
2405 int src_x, int src_y, int src_stride, slice_buffer * sb, int add, uint8_t * dst8);
2406 extern void ff_snow_inner_add_yblock_mmx(const uint8_t *obmc, const int obmc_stride, uint8_t * * block, int b_w, int b_h,
2407 int src_x, int src_y, int src_stride, slice_buffer * sb, int add, uint8_t * dst8);
2408
2409
2410 static void add_int16_sse2(int16_t * v1, int16_t * v2, int order)
2411 {
2412 x86_reg o = -(order << 1);
2413 v1 += order;
2414 v2 += order;
2415 asm volatile(
2416 "1: \n\t"
2417 "movdqu (%1,%2), %%xmm0 \n\t"
2418 "movdqu 16(%1,%2), %%xmm1 \n\t"
2419 "paddw (%0,%2), %%xmm0 \n\t"
2420 "paddw 16(%0,%2), %%xmm1 \n\t"
2421 "movdqa %%xmm0, (%0,%2) \n\t"
2422 "movdqa %%xmm1, 16(%0,%2) \n\t"
2423 "add $32, %2 \n\t"
2424 "js 1b \n\t"
2425 : "+r"(v1), "+r"(v2), "+r"(o)
2426 );
2427 }
2428
2429 static void sub_int16_sse2(int16_t * v1, int16_t * v2, int order)
2430 {
2431 x86_reg o = -(order << 1);
2432 v1 += order;
2433 v2 += order;
2434 asm volatile(
2435 "1: \n\t"
2436 "movdqa (%0,%2), %%xmm0 \n\t"
2437 "movdqa 16(%0,%2), %%xmm2 \n\t"
2438 "movdqu (%1,%2), %%xmm1 \n\t"
2439 "movdqu 16(%1,%2), %%xmm3 \n\t"
2440 "psubw %%xmm1, %%xmm0 \n\t"
2441 "psubw %%xmm3, %%xmm2 \n\t"
2442 "movdqa %%xmm0, (%0,%2) \n\t"
2443 "movdqa %%xmm2, 16(%0,%2) \n\t"
2444 "add $32, %2 \n\t"
2445 "js 1b \n\t"
2446 : "+r"(v1), "+r"(v2), "+r"(o)
2447 );
2448 }
2449
2450 static int32_t scalarproduct_int16_sse2(int16_t * v1, int16_t * v2, int order, int shift)
2451 {
2452 int res = 0;
2453 DECLARE_ALIGNED_16(int64_t, sh);
2454 x86_reg o = -(order << 1);
2455
2456 v1 += order;
2457 v2 += order;
2458 sh = shift;
2459 asm volatile(
2460 "pxor %%xmm7, %%xmm7 \n\t"
2461 "1: \n\t"
2462 "movdqu (%0,%3), %%xmm0 \n\t"
2463 "movdqu 16(%0,%3), %%xmm1 \n\t"
2464 "pmaddwd (%1,%3), %%xmm0 \n\t"
2465 "pmaddwd 16(%1,%3), %%xmm1 \n\t"
2466 "paddd %%xmm0, %%xmm7 \n\t"
2467 "paddd %%xmm1, %%xmm7 \n\t"
2468 "add $32, %3 \n\t"
2469 "js 1b \n\t"
2470 "movhlps %%xmm7, %%xmm2 \n\t"
2471 "paddd %%xmm2, %%xmm7 \n\t"
2472 "psrad %4, %%xmm7 \n\t"
2473 "pshuflw $0x4E, %%xmm7,%%xmm2 \n\t"
2474 "paddd %%xmm2, %%xmm7 \n\t"
2475 "movd %%xmm7, %2 \n\t"
2476 : "+r"(v1), "+r"(v2), "=r"(res), "+r"(o)
2477 : "m"(sh)
2478 );
2479 return res;
2480 }
2481
2482 void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
2483 {
2484 mm_flags = mm_support();
2485
2486 if (avctx->dsp_mask) {
2487 if (avctx->dsp_mask & FF_MM_FORCE)
2488 mm_flags |= (avctx->dsp_mask & 0xffff);
2489 else
2490 mm_flags &= ~(avctx->dsp_mask & 0xffff);
2491 }
2492
2493 #if 0
2494 av_log(avctx, AV_LOG_INFO, "libavcodec: CPU flags:");
2495 if (mm_flags & MM_MMX)
2496 av_log(avctx, AV_LOG_INFO, " mmx");
2497 if (mm_flags & MM_MMXEXT)
2498 av_log(avctx, AV_LOG_INFO, " mmxext");
2499 if (mm_flags & MM_3DNOW)
2500 av_log(avctx, AV_LOG_INFO, " 3dnow");
2501 if (mm_flags & MM_SSE)
2502 av_log(avctx, AV_LOG_INFO, " sse");
2503 if (mm_flags & MM_SSE2)
2504 av_log(avctx, AV_LOG_INFO, " sse2");
2505 av_log(avctx, AV_LOG_INFO, "\n");
2506 #endif
2507
2508 if (mm_flags & MM_MMX) {
2509 const int idct_algo= avctx->idct_algo;
2510
2511 if(avctx->lowres==0){
2512 if(idct_algo==FF_IDCT_AUTO || idct_algo==FF_IDCT_SIMPLEMMX){
2513 c->idct_put= ff_simple_idct_put_mmx;
2514 c->idct_add= ff_simple_idct_add_mmx;
2515 c->idct = ff_simple_idct_mmx;
2516 c->idct_permutation_type= FF_SIMPLE_IDCT_PERM;
2517 #ifdef CONFIG_GPL
2518 }else if(idct_algo==FF_IDCT_LIBMPEG2MMX){
2519 if(mm_flags & MM_MMXEXT){
2520 c->idct_put= ff_libmpeg2mmx2_idct_put;
2521 c->idct_add= ff_libmpeg2mmx2_idct_add;
2522 c->idct = ff_mmxext_idct;
2523 }else{
2524 c->idct_put= ff_libmpeg2mmx_idct_put;
2525 c->idct_add= ff_libmpeg2mmx_idct_add;
2526 c->idct = ff_mmx_idct;
2527 }
2528 c->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM;
2529 #endif
2530 }else if((ENABLE_VP3_DECODER || ENABLE_VP5_DECODER || ENABLE_VP6_DECODER || ENABLE_THEORA_DECODER) &&
2531 idct_algo==FF_IDCT_VP3){
2532 if(mm_flags & MM_SSE2){
2533 c->idct_put= ff_vp3_idct_put_sse2;
2534 c->idct_add= ff_vp3_idct_add_sse2;
2535 c->idct = ff_vp3_idct_sse2;
2536 c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM;
2537 }else{
2538 c->idct_put= ff_vp3_idct_put_mmx;
2539 c->idct_add= ff_vp3_idct_add_mmx;
2540 c->idct = ff_vp3_idct_mmx;
2541 c->idct_permutation_type= FF_PARTTRANS_IDCT_PERM;
2542 }
2543 }else if(idct_algo==FF_IDCT_CAVS){
2544 c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM;
2545 }else if(idct_algo==FF_IDCT_XVIDMMX){
2546 if(mm_flags & MM_SSE2){
2547 c->idct_put= ff_idct_xvid_sse2_put;
2548 c->idct_add= ff_idct_xvid_sse2_add;
2549 c->idct = ff_idct_xvid_sse2;
2550 c->idct_permutation_type= FF_SSE2_IDCT_PERM;
2551 }else if(mm_flags & MM_MMXEXT){
2552 c->idct_put= ff_idct_xvid_mmx2_put;
2553 c->idct_add= ff_idct_xvid_mmx2_add;
2554 c->idct = ff_idct_xvid_mmx2;
2555 }else{
2556 c->idct_put= ff_idct_xvid_mmx_put;
2557 c->idct_add= ff_idct_xvid_mmx_add;
2558 c->idct = ff_idct_xvid_mmx;
2559 }
2560 }
2561 }
2562
2563 c->put_pixels_clamped = put_pixels_clamped_mmx;
2564 c->put_signed_pixels_clamped = put_signed_pixels_clamped_mmx;
2565 c->add_pixels_clamped = add_pixels_clamped_mmx;
2566 c->clear_blocks = clear_blocks_mmx;
2567
2568 #define SET_HPEL_FUNCS(PFX, IDX, SIZE, CPU) \
2569 c->PFX ## _pixels_tab[IDX][0] = PFX ## _pixels ## SIZE ## _ ## CPU; \
2570 c->PFX ## _pixels_tab[IDX][1] = PFX ## _pixels ## SIZE ## _x2_ ## CPU; \
2571 c->PFX ## _pixels_tab[IDX][2] = PFX ## _pixels ## SIZE ## _y2_ ## CPU; \
2572 c->PFX ## _pixels_tab[IDX][3] = PFX ## _pixels ## SIZE ## _xy2_ ## CPU
2573
2574 SET_HPEL_FUNCS(put, 0, 16, mmx);
2575 SET_HPEL_FUNCS(put_no_rnd, 0, 16, mmx);
2576 SET_HPEL_FUNCS(avg, 0, 16, mmx);
2577 SET_HPEL_FUNCS(avg_no_rnd, 0, 16, mmx);
2578 SET_HPEL_FUNCS(put, 1, 8, mmx);
2579 SET_HPEL_FUNCS(put_no_rnd, 1, 8, mmx);
2580 SET_HPEL_FUNCS(avg, 1, 8, mmx);
2581 SET_HPEL_FUNCS(avg_no_rnd, 1, 8, mmx);
2582
2583 c->gmc= gmc_mmx;
2584
2585 c->add_bytes= add_bytes_mmx;
2586 c->add_bytes_l2= add_bytes_l2_mmx;
2587
2588 c->draw_edges = draw_edges_mmx;
2589
2590 if (ENABLE_ANY_H263) {
2591 c->h263_v_loop_filter= h263_v_loop_filter_mmx;
2592 c->h263_h_loop_filter= h263_h_loop_filter_mmx;
2593 }
2594 c->put_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_mmx_rnd;
2595 c->put_h264_chroma_pixels_tab[1]= put_h264_chroma_mc4_mmx;
2596 c->put_no_rnd_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_mmx_nornd;
2597
2598 c->h264_idct_dc_add=
2599 c->h264_idct_add= ff_h264_idct_add_mmx;
2600 c->h264_idct8_dc_add=
2601 c->h264_idct8_add= ff_h264_idct8_add_mmx;
2602 if (mm_flags & MM_SSE2)
2603 c->h264_idct8_add= ff_h264_idct8_add_sse2;
2604
2605 if (mm_flags & MM_MMXEXT) {
2606 c->prefetch = prefetch_mmx2;
2607
2608 c->put_pixels_tab[0][1] = put_pixels16_x2_mmx2;
2609 c->put_pixels_tab[0][2] = put_pixels16_y2_mmx2;
2610
2611 c->avg_pixels_tab[0][0] = avg_pixels16_mmx2;
2612 c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx2;
2613 c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx2;
2614
2615 c->put_pixels_tab[1][1] = put_pixels8_x2_mmx2;
2616 c->put_pixels_tab[1][2] = put_pixels8_y2_mmx2;
2617
2618 c->avg_pixels_tab[1][0] = avg_pixels8_mmx2;
2619 c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx2;
2620 c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx2;
2621
2622 c->h264_idct_dc_add= ff_h264_idct_dc_add_mmx2;
2623 c->h264_idct8_dc_add= ff_h264_idct8_dc_add_mmx2;
2624
2625 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
2626 c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx2;
2627 c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx2;
2628 c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx2;
2629 c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx2;
2630 c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx2;
2631 c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx2;
2632 }
2633
2634 #define SET_QPEL_FUNCS(PFX, IDX, SIZE, CPU) \
2635 c->PFX ## _pixels_tab[IDX][ 0] = PFX ## SIZE ## _mc00_ ## CPU; \
2636 c->PFX ## _pixels_tab[IDX][ 1] = PFX ## SIZE ## _mc10_ ## CPU; \
2637 c->PFX ## _pixels_tab[IDX][ 2] = PFX ## SIZE ## _mc20_ ## CPU; \
2638 c->PFX ## _pixels_tab[IDX][ 3] = PFX ## SIZE ## _mc30_ ## CPU; \
2639 c->PFX ## _pixels_tab[IDX][ 4] = PFX ## SIZE ## _mc01_ ## CPU; \
2640 c->PFX ## _pixels_tab[IDX][ 5] = PFX ## SIZE ## _mc11_ ## CPU; \
2641 c->PFX ## _pixels_tab[IDX][ 6] = PFX ## SIZE ## _mc21_ ## CPU; \
2642 c->PFX ## _pixels_tab[IDX][ 7] = PFX ## SIZE ## _mc31_ ## CPU; \
2643 c->PFX ## _pixels_tab[IDX][ 8] = PFX ## SIZE ## _mc02_ ## CPU; \
2644 c->PFX ## _pixels_tab[IDX][ 9] = PFX ## SIZE ## _mc12_ ## CPU; \
2645 c->PFX ## _pixels_tab[IDX][10] = PFX ## SIZE ## _mc22_ ## CPU; \
2646 c->PFX ## _pixels_tab[IDX][11] = PFX ## SIZE ## _mc32_ ## CPU; \
2647 c->PFX ## _pixels_tab[IDX][12] = PFX ## SIZE ## _mc03_ ## CPU; \
2648 c->PFX ## _pixels_tab[IDX][13] = PFX ## SIZE ## _mc13_ ## CPU; \
2649 c->PFX ## _pixels_tab[IDX][14] = PFX ## SIZE ## _mc23_ ## CPU; \
2650 c->PFX ## _pixels_tab[IDX][15] = PFX ## SIZE ## _mc33_ ## CPU
2651
2652 SET_QPEL_FUNCS(put_qpel, 0, 16, mmx2);
2653 SET_QPEL_FUNCS(put_qpel, 1, 8, mmx2);
2654 SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, mmx2);
2655 SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, mmx2);
2656 SET_QPEL_FUNCS(avg_qpel, 0, 16, mmx2);
2657 SET_QPEL_FUNCS(avg_qpel, 1, 8, mmx2);
2658
2659 SET_QPEL_FUNCS(put_h264_qpel, 0, 16, mmx2);
2660 SET_QPEL_FUNCS(put_h264_qpel, 1, 8, mmx2);
2661 SET_QPEL_FUNCS(put_h264_qpel, 2, 4, mmx2);
2662 SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, mmx2);
2663 SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, mmx2);
2664 SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, mmx2);
2665
2666 SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, mmx2);
2667 SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, mmx2);
2668 SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, mmx2);
2669 SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, mmx2);
2670
2671 c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_mmx2_rnd;
2672 c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_mmx2;
2673 c->avg_h264_chroma_pixels_tab[2]= avg_h264_chroma_mc2_mmx2;
2674 c->put_h264_chroma_pixels_tab[2]= put_h264_chroma_mc2_mmx2;
2675 c->h264_v_loop_filter_luma= h264_v_loop_filter_luma_mmx2;
2676 c->h264_h_loop_filter_luma= h264_h_loop_filter_luma_mmx2;
2677 c->h264_v_loop_filter_chroma= h264_v_loop_filter_chroma_mmx2;
2678 c->h264_h_loop_filter_chroma= h264_h_loop_filter_chroma_mmx2;
2679 c->h264_v_loop_filter_chroma_intra= h264_v_loop_filter_chroma_intra_mmx2;
2680 c->h264_h_loop_filter_chroma_intra= h264_h_loop_filter_chroma_intra_mmx2;
2681 c->h264_loop_filter_strength= h264_loop_filter_strength_mmx2;
2682
2683 c->weight_h264_pixels_tab[0]= ff_h264_weight_16x16_mmx2;
2684 c->weight_h264_pixels_tab[1]= ff_h264_weight_16x8_mmx2;
2685 c->weight_h264_pixels_tab[2]= ff_h264_weight_8x16_mmx2;
2686 c->weight_h264_pixels_tab[3]= ff_h264_weight_8x8_mmx2;
2687 c->weight_h264_pixels_tab[4]= ff_h264_weight_8x4_mmx2;
2688 c->weight_h264_pixels_tab[5]= ff_h264_weight_4x8_mmx2;
2689 c->weight_h264_pixels_tab[6]= ff_h264_weight_4x4_mmx2;
2690 c->weight_h264_pixels_tab[7]= ff_h264_weight_4x2_mmx2;
2691
2692 c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16x16_mmx2;
2693 c->biweight_h264_pixels_tab[1]= ff_h264_biweight_16x8_mmx2;
2694 c->biweight_h264_pixels_tab[2]= ff_h264_biweight_8x16_mmx2;
2695 c->biweight_h264_pixels_tab[3]= ff_h264_biweight_8x8_mmx2;
2696 c->biweight_h264_pixels_tab[4]= ff_h264_biweight_8x4_mmx2;
2697 c->biweight_h264_pixels_tab[5]= ff_h264_biweight_4x8_mmx2;
2698 c->biweight_h264_pixels_tab[6]= ff_h264_biweight_4x4_mmx2;
2699 c->biweight_h264_pixels_tab[7]= ff_h264_biweight_4x2_mmx2;
2700
2701 if (ENABLE_CAVS_DECODER)
2702 ff_cavsdsp_init_mmx2(c, avctx);
2703
2704 if (ENABLE_VC1_DECODER || ENABLE_WMV3_DECODER)
2705 ff_vc1dsp_init_mmx(c, avctx);
2706
2707 c->add_png_paeth_prediction= add_png_paeth_prediction_mmx2;
2708 } else if (mm_flags & MM_3DNOW) {
2709 c->prefetch = prefetch_3dnow;
2710
2711 c->put_pixels_tab[0][1] = put_pixels16_x2_3dnow;
2712 c->put_pixels_tab[0][2] = put_pixels16_y2_3dnow;
2713
2714 c->avg_pixels_tab[0][0] = avg_pixels16_3dnow;
2715 c->avg_pixels_tab[0][1] = avg_pixels16_x2_3dnow;
2716 c->avg_pixels_tab[0][2] = avg_pixels16_y2_3dnow;
2717
2718 c->put_pixels_tab[1][1] = put_pixels8_x2_3dnow;
2719 c->put_pixels_tab[1][2] = put_pixels8_y2_3dnow;
2720
2721 c->avg_pixels_tab[1][0] = avg_pixels8_3dnow;
2722 c->avg_pixels_tab[1][1] = avg_pixels8_x2_3dnow;
2723 c->avg_pixels_tab[1][2] = avg_pixels8_y2_3dnow;
2724
2725 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
2726 c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_3dnow;
2727 c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_3dnow;
2728 c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_3dnow;
2729 c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_3dnow;
2730 c->avg_pixels_tab[0][3] = avg_pixels16_xy2_3dnow;
2731 c->avg_pixels_tab[1][3] = avg_pixels8_xy2_3dnow;
2732 }
2733
2734 SET_QPEL_FUNCS(put_qpel, 0, 16, 3dnow);
2735 SET_QPEL_FUNCS(put_qpel, 1, 8, 3dnow);
2736 SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, 3dnow);
2737 SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, 3dnow);
2738 SET_QPEL_FUNCS(avg_qpel, 0, 16, 3dnow);
2739 SET_QPEL_FUNCS(avg_qpel, 1, 8, 3dnow);
2740
2741 SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 3dnow);
2742 SET_QPEL_FUNCS(put_h264_qpel, 1, 8, 3dnow);
2743 SET_QPEL_FUNCS(put_h264_qpel, 2, 4, 3dnow);
2744 SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 3dnow);
2745 SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, 3dnow);
2746 SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, 3dnow);
2747
2748 SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, 3dnow);
2749 SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, 3dnow);
2750 SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, 3dnow);
2751 SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, 3dnow);
2752
2753 c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_3dnow_rnd;
2754 c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_3dnow;
2755
2756 if (ENABLE_CAVS_DECODER)
2757 ff_cavsdsp_init_3dnow(c, avctx);
2758 }
2759
2760
2761 #define H264_QPEL_FUNCS(x, y, CPU)\
2762 c->put_h264_qpel_pixels_tab[0][x+y*4] = put_h264_qpel16_mc##x##y##_##CPU;\
2763 c->put_h264_qpel_pixels_tab[1][x+y*4] = put_h264_qpel8_mc##x##y##_##CPU;\
2764 c->avg_h264_qpel_pixels_tab[0][x+y*4] = avg_h264_qpel16_mc##x##y##_##CPU;\
2765 c->avg_h264_qpel_pixels_tab[1][x+y*4] = avg_h264_qpel8_mc##x##y##_##CPU;
2766 if((mm_flags & MM_SSE2) && !(mm_flags & MM_3DNOW)){
2767 // these functions are slower than mmx on AMD, but faster on Intel
2768 /* FIXME works in most codecs, but crashes svq1 due to unaligned chroma
2769 c->put_pixels_tab[0][0] = put_pixels16_sse2;
2770 c->avg_pixels_tab[0][0] = avg_pixels16_sse2;
2771 */
2772 H264_QPEL_FUNCS(0, 0, sse2);
2773 }
2774 if(mm_flags & MM_SSE2){
2775 H264_QPEL_FUNCS(0, 1, sse2);
2776 H264_QPEL_FUNCS(0, 2, sse2);
2777 H264_QPEL_FUNCS(0, 3, sse2);
2778 H264_QPEL_FUNCS(1, 1, sse2);
2779 H264_QPEL_FUNCS(1, 2, sse2);
2780 H264_QPEL_FUNCS(1, 3, sse2);
2781 H264_QPEL_FUNCS(2, 1, sse2);
2782 H264_QPEL_FUNCS(2, 2, sse2);
2783 H264_QPEL_FUNCS(2, 3, sse2);
2784 H264_QPEL_FUNCS(3, 1, sse2);
2785 H264_QPEL_FUNCS(3, 2, sse2);
2786 H264_QPEL_FUNCS(3, 3, sse2);
2787 }
2788 #ifdef HAVE_SSSE3
2789 if(mm_flags & MM_SSSE3){
2790 H264_QPEL_FUNCS(1, 0, ssse3);
2791 H264_QPEL_FUNCS(1, 1, ssse3);
2792 H264_QPEL_FUNCS(1, 2, ssse3);
2793 H264_QPEL_FUNCS(1, 3, ssse3);
2794 H264_QPEL_FUNCS(2, 0, ssse3);
2795 H264_QPEL_FUNCS(2, 1, ssse3);
2796 H264_QPEL_FUNCS(2, 2, ssse3);
2797 H264_QPEL_FUNCS(2, 3, ssse3);
2798 H264_QPEL_FUNCS(3, 0, ssse3);
2799 H264_QPEL_FUNCS(3, 1, ssse3);
2800 H264_QPEL_FUNCS(3, 2, ssse3);
2801 H264_QPEL_FUNCS(3, 3, ssse3);
2802 c->put_no_rnd_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_ssse3_nornd;
2803 c->put_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_ssse3_rnd;
2804 c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_ssse3_rnd;
2805 c->put_h264_chroma_pixels_tab[1]= put_h264_chroma_mc4_ssse3;
2806 c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_ssse3;
2807 c->add_png_paeth_prediction= add_png_paeth_prediction_ssse3;
2808 }
2809 #endif
2810
2811 #ifdef CONFIG_SNOW_DECODER
2812 if(mm_flags & MM_SSE2 & 0){
2813 c->horizontal_compose97i = ff_snow_horizontal_compose97i_sse2;
2814 #ifdef HAVE_7REGS
2815 c->vertical_compose97i = ff_snow_vertical_compose97i_sse2;
2816 #endif
2817 c->inner_add_yblock = ff_snow_inner_add_yblock_sse2;
2818 }
2819 else{
2820 if(mm_flags & MM_MMXEXT){
2821 c->horizontal_compose97i = ff_snow_horizontal_compose97i_mmx;
2822 #ifdef HAVE_7REGS
2823 c->vertical_compose97i = ff_snow_vertical_compose97i_mmx;
2824 #endif
2825 }
2826 c->inner_add_yblock = ff_snow_inner_add_yblock_mmx;
2827 }
2828 #endif
2829
2830 if(mm_flags & MM_3DNOW){
2831 c->vorbis_inverse_coupling = vorbis_inverse_coupling_3dnow;
2832 c->vector_fmul = vector_fmul_3dnow;
2833 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
2834 c->float_to_int16 = float_to_int16_3dnow;
2835 c->float_to_int16_interleave = float_to_int16_interleave_3dnow;
2836 }
2837 }
2838 if(mm_flags & MM_3DNOWEXT){
2839 c->vector_fmul_reverse = vector_fmul_reverse_3dnow2;
2840 c->vector_fmul_window = vector_fmul_window_3dnow2;
2841 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
2842 c->float_to_int16_interleave = float_to_int16_interleave_3dn2;
2843 }
2844 }
2845 if(mm_flags & MM_SSE){
2846 c->vorbis_inverse_coupling = vorbis_inverse_coupling_sse;
2847 c->ac3_downmix = ac3_downmix_sse;
2848 c->vector_fmul = vector_fmul_sse;
2849 c->vector_fmul_reverse = vector_fmul_reverse_sse;
2850 c->vector_fmul_add_add = vector_fmul_add_add_sse;
2851 c->vector_fmul_window = vector_fmul_window_sse;
2852 c->int32_to_float_fmul_scalar = int32_to_float_fmul_scalar_sse;
2853 c->float_to_int16 = float_to_int16_sse;
2854 c->float_to_int16_interleave = float_to_int16_interleave_sse;
2855 }
2856 if(mm_flags & MM_3DNOW)
2857 c->vector_fmul_add_add = vector_fmul_add_add_3dnow; // faster than sse
2858 if(mm_flags & MM_SSE2){
2859 c->int32_to_float_fmul_scalar = int32_to_float_fmul_scalar_sse2;
2860 c->float_to_int16 = float_to_int16_sse2;
2861 c->float_to_int16_interleave = float_to_int16_interleave_sse2;
2862 c->add_int16 = add_int16_sse2;
2863 c->sub_int16 = sub_int16_sse2;
2864 c->scalarproduct_int16 = scalarproduct_int16_sse2;
2865 }
2866 }
2867
2868 if (ENABLE_ENCODERS)
2869 dsputilenc_init_mmx(c, avctx);
2870
2871 #if 0
2872 // for speed testing
2873 get_pixels = just_return;
2874 put_pixels_clamped = just_return;
2875 add_pixels_clamped = just_return;
2876
2877 pix_abs16x16 = just_return;
2878 pix_abs16x16_x2 = just_return;
2879 pix_abs16x16_y2 = just_return;
2880 pix_abs16x16_xy2 = just_return;
2881
2882 put_pixels_tab[0] = just_return;
2883 put_pixels_tab[1] = just_return;
2884 put_pixels_tab[2] = just_return;
2885 put_pixels_tab[3] = just_return;
2886
2887 put_no_rnd_pixels_tab[0] = just_return;
2888 put_no_rnd_pixels_tab[1] = just_return;
2889 put_no_rnd_pixels_tab[2] = just_return;
2890 put_no_rnd_pixels_tab[3] = just_return;
2891
2892 avg_pixels_tab[0] = just_return;
2893 avg_pixels_tab[1] = just_return;
2894 avg_pixels_tab[2] = just_return;
2895 avg_pixels_tab[3] = just_return;
2896
2897 avg_no_rnd_pixels_tab[0] = just_return;
2898 avg_no_rnd_pixels_tab[1] = just_return;
2899 avg_no_rnd_pixels_tab[2] = just_return;
2900 avg_no_rnd_pixels_tab[3] = just_return;
2901
2902 //av_fdct = just_return;
2903 //ff_idct = just_return;
2904 #endif
2905 }