2 * MMX optimized DSP utils
3 * Copyright (c) 2000, 2001 Fabrice Bellard.
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * This file is part of FFmpeg.
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
25 #include "libavutil/x86_cpu.h"
26 #include "libavcodec/dsputil.h"
27 #include "libavcodec/h263.h"
28 #include "libavcodec/mpegvideo.h"
29 #include "libavcodec/simple_idct.h"
30 #include "dsputil_mmx.h"
32 #include "vp3dsp_mmx.h"
33 #include "vp3dsp_sse2.h"
34 #include "idct_xvid.h"
39 int mm_flags
; /* multimedia extension flags */
41 /* pixel operations */
42 DECLARE_ALIGNED_8 (const uint64_t, ff_bone
) = 0x0101010101010101ULL
;
43 DECLARE_ALIGNED_8 (const uint64_t, ff_wtwo
) = 0x0002000200020002ULL
;
45 DECLARE_ALIGNED_16(const uint64_t, ff_pdw_80000000
[2]) =
46 {0x8000000080000000ULL
, 0x8000000080000000ULL
};
48 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_3
) = 0x0003000300030003ULL
;
49 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_4
) = 0x0004000400040004ULL
;
50 DECLARE_ALIGNED_16(const xmm_t
, ff_pw_5
) = {0x0005000500050005ULL
, 0x0005000500050005ULL
};
51 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_8
) = 0x0008000800080008ULL
;
52 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_15
) = 0x000F000F000F000FULL
;
53 DECLARE_ALIGNED_16(const xmm_t
, ff_pw_16
) = {0x0010001000100010ULL
, 0x0010001000100010ULL
};
54 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_20
) = 0x0014001400140014ULL
;
55 DECLARE_ALIGNED_16(const xmm_t
, ff_pw_28
) = {0x001C001C001C001CULL
, 0x001C001C001C001CULL
};
56 DECLARE_ALIGNED_16(const xmm_t
, ff_pw_32
) = {0x0020002000200020ULL
, 0x0020002000200020ULL
};
57 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_42
) = 0x002A002A002A002AULL
;
58 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_64
) = 0x0040004000400040ULL
;
59 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_96
) = 0x0060006000600060ULL
;
60 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_128
) = 0x0080008000800080ULL
;
61 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_255
) = 0x00ff00ff00ff00ffULL
;
63 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_1
) = 0x0101010101010101ULL
;
64 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_3
) = 0x0303030303030303ULL
;
65 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_7
) = 0x0707070707070707ULL
;
66 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_3F
) = 0x3F3F3F3F3F3F3F3FULL
;
67 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_A1
) = 0xA1A1A1A1A1A1A1A1ULL
;
68 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_FC
) = 0xFCFCFCFCFCFCFCFCULL
;
70 DECLARE_ALIGNED_16(const double, ff_pd_1
[2]) = { 1.0, 1.0 };
71 DECLARE_ALIGNED_16(const double, ff_pd_2
[2]) = { 2.0, 2.0 };
73 #define JUMPALIGN() asm volatile (ASMALIGN(3)::)
74 #define MOVQ_ZERO(regd) asm volatile ("pxor %%" #regd ", %%" #regd ::)
76 #define MOVQ_BFE(regd) \
78 "pcmpeqd %%" #regd ", %%" #regd " \n\t"\
79 "paddb %%" #regd ", %%" #regd " \n\t" ::)
82 #define MOVQ_BONE(regd) asm volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_bone))
83 #define MOVQ_WTWO(regd) asm volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_wtwo))
85 // for shared library it's better to use this way for accessing constants
87 #define MOVQ_BONE(regd) \
89 "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
90 "psrlw $15, %%" #regd " \n\t" \
91 "packuswb %%" #regd ", %%" #regd " \n\t" ::)
93 #define MOVQ_WTWO(regd) \
95 "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
96 "psrlw $15, %%" #regd " \n\t" \
97 "psllw $1, %%" #regd " \n\t"::)
101 // using regr as temporary and for the output result
102 // first argument is unmodifed and second is trashed
103 // regfe is supposed to contain 0xfefefefefefefefe
104 #define PAVGB_MMX_NO_RND(rega, regb, regr, regfe) \
105 "movq " #rega ", " #regr " \n\t"\
106 "pand " #regb ", " #regr " \n\t"\
107 "pxor " #rega ", " #regb " \n\t"\
108 "pand " #regfe "," #regb " \n\t"\
109 "psrlq $1, " #regb " \n\t"\
110 "paddb " #regb ", " #regr " \n\t"
112 #define PAVGB_MMX(rega, regb, regr, regfe) \
113 "movq " #rega ", " #regr " \n\t"\
114 "por " #regb ", " #regr " \n\t"\
115 "pxor " #rega ", " #regb " \n\t"\
116 "pand " #regfe "," #regb " \n\t"\
117 "psrlq $1, " #regb " \n\t"\
118 "psubb " #regb ", " #regr " \n\t"
120 // mm6 is supposed to contain 0xfefefefefefefefe
121 #define PAVGBP_MMX_NO_RND(rega, regb, regr, regc, regd, regp) \
122 "movq " #rega ", " #regr " \n\t"\
123 "movq " #regc ", " #regp " \n\t"\
124 "pand " #regb ", " #regr " \n\t"\
125 "pand " #regd ", " #regp " \n\t"\
126 "pxor " #rega ", " #regb " \n\t"\
127 "pxor " #regc ", " #regd " \n\t"\
128 "pand %%mm6, " #regb " \n\t"\
129 "pand %%mm6, " #regd " \n\t"\
130 "psrlq $1, " #regb " \n\t"\
131 "psrlq $1, " #regd " \n\t"\
132 "paddb " #regb ", " #regr " \n\t"\
133 "paddb " #regd ", " #regp " \n\t"
135 #define PAVGBP_MMX(rega, regb, regr, regc, regd, regp) \
136 "movq " #rega ", " #regr " \n\t"\
137 "movq " #regc ", " #regp " \n\t"\
138 "por " #regb ", " #regr " \n\t"\
139 "por " #regd ", " #regp " \n\t"\
140 "pxor " #rega ", " #regb " \n\t"\
141 "pxor " #regc ", " #regd " \n\t"\
142 "pand %%mm6, " #regb " \n\t"\
143 "pand %%mm6, " #regd " \n\t"\
144 "psrlq $1, " #regd " \n\t"\
145 "psrlq $1, " #regb " \n\t"\
146 "psubb " #regb ", " #regr " \n\t"\
147 "psubb " #regd ", " #regp " \n\t"
149 /***********************************/
150 /* MMX no rounding */
151 #define DEF(x, y) x ## _no_rnd_ ## y ##_mmx
152 #define SET_RND MOVQ_WONE
153 #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX_NO_RND(a, b, c, d, e, f)
154 #define PAVGB(a, b, c, e) PAVGB_MMX_NO_RND(a, b, c, e)
156 #include "dsputil_mmx_rnd.h"
162 /***********************************/
165 #define DEF(x, y) x ## _ ## y ##_mmx
166 #define SET_RND MOVQ_WTWO
167 #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX(a, b, c, d, e, f)
168 #define PAVGB(a, b, c, e) PAVGB_MMX(a, b, c, e)
170 #include "dsputil_mmx_rnd.h"
177 /***********************************/
180 #define DEF(x) x ## _3dnow
181 #define PAVGB "pavgusb"
183 #include "dsputil_mmx_avg.h"
188 /***********************************/
191 #define DEF(x) x ## _mmx2
193 /* Introduced only in MMX2 set */
194 #define PAVGB "pavgb"
196 #include "dsputil_mmx_avg.h"
201 #define put_no_rnd_pixels16_mmx put_pixels16_mmx
202 #define put_no_rnd_pixels8_mmx put_pixels8_mmx
203 #define put_pixels16_mmx2 put_pixels16_mmx
204 #define put_pixels8_mmx2 put_pixels8_mmx
205 #define put_pixels4_mmx2 put_pixels4_mmx
206 #define put_no_rnd_pixels16_mmx2 put_no_rnd_pixels16_mmx
207 #define put_no_rnd_pixels8_mmx2 put_no_rnd_pixels8_mmx
208 #define put_pixels16_3dnow put_pixels16_mmx
209 #define put_pixels8_3dnow put_pixels8_mmx
210 #define put_pixels4_3dnow put_pixels4_mmx
211 #define put_no_rnd_pixels16_3dnow put_no_rnd_pixels16_mmx
212 #define put_no_rnd_pixels8_3dnow put_no_rnd_pixels8_mmx
214 /***********************************/
217 void put_pixels_clamped_mmx(const DCTELEM
*block
, uint8_t *pixels
, int line_size
)
222 /* read the pixels */
227 "movq %3, %%mm0 \n\t"
228 "movq 8%3, %%mm1 \n\t"
229 "movq 16%3, %%mm2 \n\t"
230 "movq 24%3, %%mm3 \n\t"
231 "movq 32%3, %%mm4 \n\t"
232 "movq 40%3, %%mm5 \n\t"
233 "movq 48%3, %%mm6 \n\t"
234 "movq 56%3, %%mm7 \n\t"
235 "packuswb %%mm1, %%mm0 \n\t"
236 "packuswb %%mm3, %%mm2 \n\t"
237 "packuswb %%mm5, %%mm4 \n\t"
238 "packuswb %%mm7, %%mm6 \n\t"
239 "movq %%mm0, (%0) \n\t"
240 "movq %%mm2, (%0, %1) \n\t"
241 "movq %%mm4, (%0, %1, 2) \n\t"
242 "movq %%mm6, (%0, %2) \n\t"
243 ::"r" (pix
), "r" ((x86_reg
)line_size
), "r" ((x86_reg
)line_size
*3), "m"(*p
)
248 // if here would be an exact copy of the code above
249 // compiler would generate some very strange code
252 "movq (%3), %%mm0 \n\t"
253 "movq 8(%3), %%mm1 \n\t"
254 "movq 16(%3), %%mm2 \n\t"
255 "movq 24(%3), %%mm3 \n\t"
256 "movq 32(%3), %%mm4 \n\t"
257 "movq 40(%3), %%mm5 \n\t"
258 "movq 48(%3), %%mm6 \n\t"
259 "movq 56(%3), %%mm7 \n\t"
260 "packuswb %%mm1, %%mm0 \n\t"
261 "packuswb %%mm3, %%mm2 \n\t"
262 "packuswb %%mm5, %%mm4 \n\t"
263 "packuswb %%mm7, %%mm6 \n\t"
264 "movq %%mm0, (%0) \n\t"
265 "movq %%mm2, (%0, %1) \n\t"
266 "movq %%mm4, (%0, %1, 2) \n\t"
267 "movq %%mm6, (%0, %2) \n\t"
268 ::"r" (pix
), "r" ((x86_reg
)line_size
), "r" ((x86_reg
)line_size
*3), "r"(p
)
272 static DECLARE_ALIGNED_8(const unsigned char, vector128
[8]) =
273 { 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 };
275 void put_signed_pixels_clamped_mmx(const DCTELEM
*block
, uint8_t *pixels
, int line_size
)
279 movq_m2r(*vector128
, mm1
);
280 for (i
= 0; i
< 8; i
++) {
281 movq_m2r(*(block
), mm0
);
282 packsswb_m2r(*(block
+ 4), mm0
);
285 movq_r2m(mm0
, *pixels
);
290 void add_pixels_clamped_mmx(const DCTELEM
*block
, uint8_t *pixels
, int line_size
)
296 /* read the pixels */
303 "movq (%2), %%mm0 \n\t"
304 "movq 8(%2), %%mm1 \n\t"
305 "movq 16(%2), %%mm2 \n\t"
306 "movq 24(%2), %%mm3 \n\t"
307 "movq %0, %%mm4 \n\t"
308 "movq %1, %%mm6 \n\t"
309 "movq %%mm4, %%mm5 \n\t"
310 "punpcklbw %%mm7, %%mm4 \n\t"
311 "punpckhbw %%mm7, %%mm5 \n\t"
312 "paddsw %%mm4, %%mm0 \n\t"
313 "paddsw %%mm5, %%mm1 \n\t"
314 "movq %%mm6, %%mm5 \n\t"
315 "punpcklbw %%mm7, %%mm6 \n\t"
316 "punpckhbw %%mm7, %%mm5 \n\t"
317 "paddsw %%mm6, %%mm2 \n\t"
318 "paddsw %%mm5, %%mm3 \n\t"
319 "packuswb %%mm1, %%mm0 \n\t"
320 "packuswb %%mm3, %%mm2 \n\t"
321 "movq %%mm0, %0 \n\t"
322 "movq %%mm2, %1 \n\t"
323 :"+m"(*pix
), "+m"(*(pix
+line_size
))
331 static void put_pixels4_mmx(uint8_t *block
, const uint8_t *pixels
, int line_size
, int h
)
334 "lea (%3, %3), %%"REG_a
" \n\t"
337 "movd (%1), %%mm0 \n\t"
338 "movd (%1, %3), %%mm1 \n\t"
339 "movd %%mm0, (%2) \n\t"
340 "movd %%mm1, (%2, %3) \n\t"
341 "add %%"REG_a
", %1 \n\t"
342 "add %%"REG_a
", %2 \n\t"
343 "movd (%1), %%mm0 \n\t"
344 "movd (%1, %3), %%mm1 \n\t"
345 "movd %%mm0, (%2) \n\t"
346 "movd %%mm1, (%2, %3) \n\t"
347 "add %%"REG_a
", %1 \n\t"
348 "add %%"REG_a
", %2 \n\t"
351 : "+g"(h
), "+r" (pixels
), "+r" (block
)
352 : "r"((x86_reg
)line_size
)
357 static void put_pixels8_mmx(uint8_t *block
, const uint8_t *pixels
, int line_size
, int h
)
360 "lea (%3, %3), %%"REG_a
" \n\t"
363 "movq (%1), %%mm0 \n\t"
364 "movq (%1, %3), %%mm1 \n\t"
365 "movq %%mm0, (%2) \n\t"
366 "movq %%mm1, (%2, %3) \n\t"
367 "add %%"REG_a
", %1 \n\t"
368 "add %%"REG_a
", %2 \n\t"
369 "movq (%1), %%mm0 \n\t"
370 "movq (%1, %3), %%mm1 \n\t"
371 "movq %%mm0, (%2) \n\t"
372 "movq %%mm1, (%2, %3) \n\t"
373 "add %%"REG_a
", %1 \n\t"
374 "add %%"REG_a
", %2 \n\t"
377 : "+g"(h
), "+r" (pixels
), "+r" (block
)
378 : "r"((x86_reg
)line_size
)
383 static void put_pixels16_mmx(uint8_t *block
, const uint8_t *pixels
, int line_size
, int h
)
386 "lea (%3, %3), %%"REG_a
" \n\t"
389 "movq (%1), %%mm0 \n\t"
390 "movq 8(%1), %%mm4 \n\t"
391 "movq (%1, %3), %%mm1 \n\t"
392 "movq 8(%1, %3), %%mm5 \n\t"
393 "movq %%mm0, (%2) \n\t"
394 "movq %%mm4, 8(%2) \n\t"
395 "movq %%mm1, (%2, %3) \n\t"
396 "movq %%mm5, 8(%2, %3) \n\t"
397 "add %%"REG_a
", %1 \n\t"
398 "add %%"REG_a
", %2 \n\t"
399 "movq (%1), %%mm0 \n\t"
400 "movq 8(%1), %%mm4 \n\t"
401 "movq (%1, %3), %%mm1 \n\t"
402 "movq 8(%1, %3), %%mm5 \n\t"
403 "movq %%mm0, (%2) \n\t"
404 "movq %%mm4, 8(%2) \n\t"
405 "movq %%mm1, (%2, %3) \n\t"
406 "movq %%mm5, 8(%2, %3) \n\t"
407 "add %%"REG_a
", %1 \n\t"
408 "add %%"REG_a
", %2 \n\t"
411 : "+g"(h
), "+r" (pixels
), "+r" (block
)
412 : "r"((x86_reg
)line_size
)
417 static void put_pixels16_sse2(uint8_t *block
, const uint8_t *pixels
, int line_size
, int h
)
421 "movdqu (%1), %%xmm0 \n\t"
422 "movdqu (%1,%3), %%xmm1 \n\t"
423 "movdqu (%1,%3,2), %%xmm2 \n\t"
424 "movdqu (%1,%4), %%xmm3 \n\t"
425 "movdqa %%xmm0, (%2) \n\t"
426 "movdqa %%xmm1, (%2,%3) \n\t"
427 "movdqa %%xmm2, (%2,%3,2) \n\t"
428 "movdqa %%xmm3, (%2,%4) \n\t"
430 "lea (%1,%3,4), %1 \n\t"
431 "lea (%2,%3,4), %2 \n\t"
433 : "+g"(h
), "+r" (pixels
), "+r" (block
)
434 : "r"((x86_reg
)line_size
), "r"((x86_reg
)3L*line_size
)
439 static void avg_pixels16_sse2(uint8_t *block
, const uint8_t *pixels
, int line_size
, int h
)
443 "movdqu (%1), %%xmm0 \n\t"
444 "movdqu (%1,%3), %%xmm1 \n\t"
445 "movdqu (%1,%3,2), %%xmm2 \n\t"
446 "movdqu (%1,%4), %%xmm3 \n\t"
447 "pavgb (%2), %%xmm0 \n\t"
448 "pavgb (%2,%3), %%xmm1 \n\t"
449 "pavgb (%2,%3,2), %%xmm2 \n\t"
450 "pavgb (%2,%4), %%xmm3 \n\t"
451 "movdqa %%xmm0, (%2) \n\t"
452 "movdqa %%xmm1, (%2,%3) \n\t"
453 "movdqa %%xmm2, (%2,%3,2) \n\t"
454 "movdqa %%xmm3, (%2,%4) \n\t"
456 "lea (%1,%3,4), %1 \n\t"
457 "lea (%2,%3,4), %2 \n\t"
459 : "+g"(h
), "+r" (pixels
), "+r" (block
)
460 : "r"((x86_reg
)line_size
), "r"((x86_reg
)3L*line_size
)
465 static void clear_blocks_mmx(DCTELEM
*blocks
)
468 "pxor %%mm7, %%mm7 \n\t"
469 "mov $-128*6, %%"REG_a
" \n\t"
471 "movq %%mm7, (%0, %%"REG_a
") \n\t"
472 "movq %%mm7, 8(%0, %%"REG_a
") \n\t"
473 "movq %%mm7, 16(%0, %%"REG_a
") \n\t"
474 "movq %%mm7, 24(%0, %%"REG_a
") \n\t"
475 "add $32, %%"REG_a
" \n\t"
477 : : "r" (((uint8_t *)blocks
)+128*6)
482 static void add_bytes_mmx(uint8_t *dst
, uint8_t *src
, int w
){
487 "movq (%1, %0), %%mm0 \n\t"
488 "movq (%2, %0), %%mm1 \n\t"
489 "paddb %%mm0, %%mm1 \n\t"
490 "movq %%mm1, (%2, %0) \n\t"
491 "movq 8(%1, %0), %%mm0 \n\t"
492 "movq 8(%2, %0), %%mm1 \n\t"
493 "paddb %%mm0, %%mm1 \n\t"
494 "movq %%mm1, 8(%2, %0) \n\t"
500 : "r"(src
), "r"(dst
), "r"((x86_reg
)w
-15)
503 dst
[i
+0] += src
[i
+0];
506 static void add_bytes_l2_mmx(uint8_t *dst
, uint8_t *src1
, uint8_t *src2
, int w
){
511 "movq (%2, %0), %%mm0 \n\t"
512 "movq 8(%2, %0), %%mm1 \n\t"
513 "paddb (%3, %0), %%mm0 \n\t"
514 "paddb 8(%3, %0), %%mm1 \n\t"
515 "movq %%mm0, (%1, %0) \n\t"
516 "movq %%mm1, 8(%1, %0) \n\t"
522 : "r"(dst
), "r"(src1
), "r"(src2
), "r"((x86_reg
)w
-15)
525 dst
[i
] = src1
[i
] + src2
[i
];
528 #define H263_LOOP_FILTER \
529 "pxor %%mm7, %%mm7 \n\t"\
530 "movq %0, %%mm0 \n\t"\
531 "movq %0, %%mm1 \n\t"\
532 "movq %3, %%mm2 \n\t"\
533 "movq %3, %%mm3 \n\t"\
534 "punpcklbw %%mm7, %%mm0 \n\t"\
535 "punpckhbw %%mm7, %%mm1 \n\t"\
536 "punpcklbw %%mm7, %%mm2 \n\t"\
537 "punpckhbw %%mm7, %%mm3 \n\t"\
538 "psubw %%mm2, %%mm0 \n\t"\
539 "psubw %%mm3, %%mm1 \n\t"\
540 "movq %1, %%mm2 \n\t"\
541 "movq %1, %%mm3 \n\t"\
542 "movq %2, %%mm4 \n\t"\
543 "movq %2, %%mm5 \n\t"\
544 "punpcklbw %%mm7, %%mm2 \n\t"\
545 "punpckhbw %%mm7, %%mm3 \n\t"\
546 "punpcklbw %%mm7, %%mm4 \n\t"\
547 "punpckhbw %%mm7, %%mm5 \n\t"\
548 "psubw %%mm2, %%mm4 \n\t"\
549 "psubw %%mm3, %%mm5 \n\t"\
550 "psllw $2, %%mm4 \n\t"\
551 "psllw $2, %%mm5 \n\t"\
552 "paddw %%mm0, %%mm4 \n\t"\
553 "paddw %%mm1, %%mm5 \n\t"\
554 "pxor %%mm6, %%mm6 \n\t"\
555 "pcmpgtw %%mm4, %%mm6 \n\t"\
556 "pcmpgtw %%mm5, %%mm7 \n\t"\
557 "pxor %%mm6, %%mm4 \n\t"\
558 "pxor %%mm7, %%mm5 \n\t"\
559 "psubw %%mm6, %%mm4 \n\t"\
560 "psubw %%mm7, %%mm5 \n\t"\
561 "psrlw $3, %%mm4 \n\t"\
562 "psrlw $3, %%mm5 \n\t"\
563 "packuswb %%mm5, %%mm4 \n\t"\
564 "packsswb %%mm7, %%mm6 \n\t"\
565 "pxor %%mm7, %%mm7 \n\t"\
566 "movd %4, %%mm2 \n\t"\
567 "punpcklbw %%mm2, %%mm2 \n\t"\
568 "punpcklbw %%mm2, %%mm2 \n\t"\
569 "punpcklbw %%mm2, %%mm2 \n\t"\
570 "psubusb %%mm4, %%mm2 \n\t"\
571 "movq %%mm2, %%mm3 \n\t"\
572 "psubusb %%mm4, %%mm3 \n\t"\
573 "psubb %%mm3, %%mm2 \n\t"\
574 "movq %1, %%mm3 \n\t"\
575 "movq %2, %%mm4 \n\t"\
576 "pxor %%mm6, %%mm3 \n\t"\
577 "pxor %%mm6, %%mm4 \n\t"\
578 "paddusb %%mm2, %%mm3 \n\t"\
579 "psubusb %%mm2, %%mm4 \n\t"\
580 "pxor %%mm6, %%mm3 \n\t"\
581 "pxor %%mm6, %%mm4 \n\t"\
582 "paddusb %%mm2, %%mm2 \n\t"\
583 "packsswb %%mm1, %%mm0 \n\t"\
584 "pcmpgtb %%mm0, %%mm7 \n\t"\
585 "pxor %%mm7, %%mm0 \n\t"\
586 "psubb %%mm7, %%mm0 \n\t"\
587 "movq %%mm0, %%mm1 \n\t"\
588 "psubusb %%mm2, %%mm0 \n\t"\
589 "psubb %%mm0, %%mm1 \n\t"\
590 "pand %5, %%mm1 \n\t"\
591 "psrlw $2, %%mm1 \n\t"\
592 "pxor %%mm7, %%mm1 \n\t"\
593 "psubb %%mm7, %%mm1 \n\t"\
594 "movq %0, %%mm5 \n\t"\
595 "movq %3, %%mm6 \n\t"\
596 "psubb %%mm1, %%mm5 \n\t"\
597 "paddb %%mm1, %%mm6 \n\t"
599 static void h263_v_loop_filter_mmx(uint8_t *src
, int stride
, int qscale
){
600 if(ENABLE_ANY_H263
) {
601 const int strength
= ff_h263_loop_filter_strength
[qscale
];
607 "movq %%mm3, %1 \n\t"
608 "movq %%mm4, %2 \n\t"
609 "movq %%mm5, %0 \n\t"
610 "movq %%mm6, %3 \n\t"
611 : "+m" (*(uint64_t*)(src
- 2*stride
)),
612 "+m" (*(uint64_t*)(src
- 1*stride
)),
613 "+m" (*(uint64_t*)(src
+ 0*stride
)),
614 "+m" (*(uint64_t*)(src
+ 1*stride
))
615 : "g" (2*strength
), "m"(ff_pb_FC
)
620 static inline void transpose4x4(uint8_t *dst
, uint8_t *src
, int dst_stride
, int src_stride
){
621 asm volatile( //FIXME could save 1 instruction if done as 8x4 ...
622 "movd %4, %%mm0 \n\t"
623 "movd %5, %%mm1 \n\t"
624 "movd %6, %%mm2 \n\t"
625 "movd %7, %%mm3 \n\t"
626 "punpcklbw %%mm1, %%mm0 \n\t"
627 "punpcklbw %%mm3, %%mm2 \n\t"
628 "movq %%mm0, %%mm1 \n\t"
629 "punpcklwd %%mm2, %%mm0 \n\t"
630 "punpckhwd %%mm2, %%mm1 \n\t"
631 "movd %%mm0, %0 \n\t"
632 "punpckhdq %%mm0, %%mm0 \n\t"
633 "movd %%mm0, %1 \n\t"
634 "movd %%mm1, %2 \n\t"
635 "punpckhdq %%mm1, %%mm1 \n\t"
636 "movd %%mm1, %3 \n\t"
638 : "=m" (*(uint32_t*)(dst
+ 0*dst_stride
)),
639 "=m" (*(uint32_t*)(dst
+ 1*dst_stride
)),
640 "=m" (*(uint32_t*)(dst
+ 2*dst_stride
)),
641 "=m" (*(uint32_t*)(dst
+ 3*dst_stride
))
642 : "m" (*(uint32_t*)(src
+ 0*src_stride
)),
643 "m" (*(uint32_t*)(src
+ 1*src_stride
)),
644 "m" (*(uint32_t*)(src
+ 2*src_stride
)),
645 "m" (*(uint32_t*)(src
+ 3*src_stride
))
649 static void h263_h_loop_filter_mmx(uint8_t *src
, int stride
, int qscale
){
650 if(ENABLE_ANY_H263
) {
651 const int strength
= ff_h263_loop_filter_strength
[qscale
];
652 DECLARE_ALIGNED(8, uint64_t, temp
[4]);
653 uint8_t *btemp
= (uint8_t*)temp
;
657 transpose4x4(btemp
, src
, 8, stride
);
658 transpose4x4(btemp
+4, src
+ 4*stride
, 8, stride
);
660 H263_LOOP_FILTER
// 5 3 4 6
666 : "g" (2*strength
), "m"(ff_pb_FC
)
670 "movq %%mm5, %%mm1 \n\t"
671 "movq %%mm4, %%mm0 \n\t"
672 "punpcklbw %%mm3, %%mm5 \n\t"
673 "punpcklbw %%mm6, %%mm4 \n\t"
674 "punpckhbw %%mm3, %%mm1 \n\t"
675 "punpckhbw %%mm6, %%mm0 \n\t"
676 "movq %%mm5, %%mm3 \n\t"
677 "movq %%mm1, %%mm6 \n\t"
678 "punpcklwd %%mm4, %%mm5 \n\t"
679 "punpcklwd %%mm0, %%mm1 \n\t"
680 "punpckhwd %%mm4, %%mm3 \n\t"
681 "punpckhwd %%mm0, %%mm6 \n\t"
682 "movd %%mm5, (%0) \n\t"
683 "punpckhdq %%mm5, %%mm5 \n\t"
684 "movd %%mm5, (%0,%2) \n\t"
685 "movd %%mm3, (%0,%2,2) \n\t"
686 "punpckhdq %%mm3, %%mm3 \n\t"
687 "movd %%mm3, (%0,%3) \n\t"
688 "movd %%mm1, (%1) \n\t"
689 "punpckhdq %%mm1, %%mm1 \n\t"
690 "movd %%mm1, (%1,%2) \n\t"
691 "movd %%mm6, (%1,%2,2) \n\t"
692 "punpckhdq %%mm6, %%mm6 \n\t"
693 "movd %%mm6, (%1,%3) \n\t"
695 "r" (src
+ 4*stride
),
696 "r" ((x86_reg
) stride
),
697 "r" ((x86_reg
)(3*stride
))
702 /* draw the edges of width 'w' of an image of size width, height
703 this mmx version can only handle w==8 || w==16 */
704 static void draw_edges_mmx(uint8_t *buf
, int wrap
, int width
, int height
, int w
)
706 uint8_t *ptr
, *last_line
;
709 last_line
= buf
+ (height
- 1) * wrap
;
716 "movd (%0), %%mm0 \n\t"
717 "punpcklbw %%mm0, %%mm0 \n\t"
718 "punpcklwd %%mm0, %%mm0 \n\t"
719 "punpckldq %%mm0, %%mm0 \n\t"
720 "movq %%mm0, -8(%0) \n\t"
721 "movq -8(%0, %2), %%mm1 \n\t"
722 "punpckhbw %%mm1, %%mm1 \n\t"
723 "punpckhwd %%mm1, %%mm1 \n\t"
724 "punpckhdq %%mm1, %%mm1 \n\t"
725 "movq %%mm1, (%0, %2) \n\t"
730 : "r" ((x86_reg
)wrap
), "r" ((x86_reg
)width
), "r" (ptr
+ wrap
*height
)
737 "movd (%0), %%mm0 \n\t"
738 "punpcklbw %%mm0, %%mm0 \n\t"
739 "punpcklwd %%mm0, %%mm0 \n\t"
740 "punpckldq %%mm0, %%mm0 \n\t"
741 "movq %%mm0, -8(%0) \n\t"
742 "movq %%mm0, -16(%0) \n\t"
743 "movq -8(%0, %2), %%mm1 \n\t"
744 "punpckhbw %%mm1, %%mm1 \n\t"
745 "punpckhwd %%mm1, %%mm1 \n\t"
746 "punpckhdq %%mm1, %%mm1 \n\t"
747 "movq %%mm1, (%0, %2) \n\t"
748 "movq %%mm1, 8(%0, %2) \n\t"
753 : "r" ((x86_reg
)wrap
), "r" ((x86_reg
)width
), "r" (ptr
+ wrap
*height
)
758 /* top and bottom (and hopefully also the corners) */
759 ptr
= buf
- (i
+ 1) * wrap
- w
;
762 "movq (%1, %0), %%mm0 \n\t"
763 "movq %%mm0, (%0) \n\t"
764 "movq %%mm0, (%0, %2) \n\t"
765 "movq %%mm0, (%0, %2, 2) \n\t"
766 "movq %%mm0, (%0, %3) \n\t"
771 : "r" ((x86_reg
)buf
- (x86_reg
)ptr
- w
), "r" ((x86_reg
)-wrap
), "r" ((x86_reg
)-wrap
*3), "r" (ptr
+width
+2*w
)
773 ptr
= last_line
+ (i
+ 1) * wrap
- w
;
776 "movq (%1, %0), %%mm0 \n\t"
777 "movq %%mm0, (%0) \n\t"
778 "movq %%mm0, (%0, %2) \n\t"
779 "movq %%mm0, (%0, %2, 2) \n\t"
780 "movq %%mm0, (%0, %3) \n\t"
785 : "r" ((x86_reg
)last_line
- (x86_reg
)ptr
- w
), "r" ((x86_reg
)wrap
), "r" ((x86_reg
)wrap
*3), "r" (ptr
+width
+2*w
)
790 #define PAETH(cpu, abs3)\
791 void add_png_paeth_prediction_##cpu(uint8_t *dst, uint8_t *src, uint8_t *top, int w, int bpp)\
796 "pxor %%mm7, %%mm7 \n"\
797 "movd (%1,%0), %%mm0 \n"\
798 "movd (%2,%0), %%mm1 \n"\
799 "punpcklbw %%mm7, %%mm0 \n"\
800 "punpcklbw %%mm7, %%mm1 \n"\
803 "movq %%mm1, %%mm2 \n"\
804 "movd (%2,%0), %%mm1 \n"\
805 "movq %%mm2, %%mm3 \n"\
806 "punpcklbw %%mm7, %%mm1 \n"\
807 "movq %%mm2, %%mm4 \n"\
808 "psubw %%mm1, %%mm3 \n"\
809 "psubw %%mm0, %%mm4 \n"\
810 "movq %%mm3, %%mm5 \n"\
811 "paddw %%mm4, %%mm5 \n"\
813 "movq %%mm4, %%mm6 \n"\
814 "pminsw %%mm5, %%mm6 \n"\
815 "pcmpgtw %%mm6, %%mm3 \n"\
816 "pcmpgtw %%mm5, %%mm4 \n"\
817 "movq %%mm4, %%mm6 \n"\
818 "pand %%mm3, %%mm4 \n"\
819 "pandn %%mm3, %%mm6 \n"\
820 "pandn %%mm0, %%mm3 \n"\
821 "movd (%3,%0), %%mm0 \n"\
822 "pand %%mm1, %%mm6 \n"\
823 "pand %%mm4, %%mm2 \n"\
824 "punpcklbw %%mm7, %%mm0 \n"\
826 "paddw %%mm6, %%mm0 \n"\
827 "paddw %%mm2, %%mm3 \n"\
828 "paddw %%mm3, %%mm0 \n"\
829 "pand %%mm5, %%mm0 \n"\
830 "movq %%mm0, %%mm3 \n"\
831 "packuswb %%mm3, %%mm3 \n"\
832 "movd %%mm3, (%1,%0) \n"\
837 :"r"(dst), "r"(top), "r"(src), "r"((x86_reg)bpp), "g"(end),\
844 "psubw %%mm5, %%mm7 \n"\
845 "pmaxsw %%mm7, %%mm5 \n"\
846 "pxor %%mm6, %%mm6 \n"\
847 "pxor %%mm7, %%mm7 \n"\
848 "psubw %%mm3, %%mm6 \n"\
849 "psubw %%mm4, %%mm7 \n"\
850 "pmaxsw %%mm6, %%mm3 \n"\
851 "pmaxsw %%mm7, %%mm4 \n"\
852 "pxor %%mm7, %%mm7 \n"
855 "pabsw %%mm3, %%mm3 \n"\
856 "pabsw %%mm4, %%mm4 \n"\
857 "pabsw %%mm5, %%mm5 \n"
859 PAETH(mmx2
, ABS3_MMX2
)
861 PAETH(ssse3
, ABS3_SSSE3
)
864 #define QPEL_V_LOW(m3,m4,m5,m6, pw_20, pw_3, rnd, in0, in1, in2, in7, out, OP)\
865 "paddw " #m4 ", " #m3 " \n\t" /* x1 */\
866 "movq "MANGLE(ff_pw_20)", %%mm4 \n\t" /* 20 */\
867 "pmullw " #m3 ", %%mm4 \n\t" /* 20x1 */\
868 "movq "#in7", " #m3 " \n\t" /* d */\
869 "movq "#in0", %%mm5 \n\t" /* D */\
870 "paddw " #m3 ", %%mm5 \n\t" /* x4 */\
871 "psubw %%mm5, %%mm4 \n\t" /* 20x1 - x4 */\
872 "movq "#in1", %%mm5 \n\t" /* C */\
873 "movq "#in2", %%mm6 \n\t" /* B */\
874 "paddw " #m6 ", %%mm5 \n\t" /* x3 */\
875 "paddw " #m5 ", %%mm6 \n\t" /* x2 */\
876 "paddw %%mm6, %%mm6 \n\t" /* 2x2 */\
877 "psubw %%mm6, %%mm5 \n\t" /* -2x2 + x3 */\
878 "pmullw "MANGLE(ff_pw_3)", %%mm5 \n\t" /* -6x2 + 3x3 */\
879 "paddw " #rnd ", %%mm4 \n\t" /* x2 */\
880 "paddw %%mm4, %%mm5 \n\t" /* 20x1 - 6x2 + 3x3 - x4 */\
881 "psraw $5, %%mm5 \n\t"\
882 "packuswb %%mm5, %%mm5 \n\t"\
883 OP(%%mm5, out, %%mm7, d)
885 #define QPEL_BASE(OPNAME, ROUNDER, RND, OP_MMX2, OP_3DNOW)\
886 static void OPNAME ## mpeg4_qpel16_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
890 "pxor %%mm7, %%mm7 \n\t"\
892 "movq (%0), %%mm0 \n\t" /* ABCDEFGH */\
893 "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\
894 "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\
895 "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\
896 "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\
897 "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\
898 "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\
899 "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\
900 "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\
901 "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\
902 "psllq $16, %%mm3 \n\t" /* 00ABCDEF */\
903 "psllq $24, %%mm4 \n\t" /* 000ABCDE */\
904 "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\
905 "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\
906 "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\
907 "paddw %%mm3, %%mm5 \n\t" /* b */\
908 "paddw %%mm2, %%mm6 \n\t" /* c */\
909 "paddw %%mm5, %%mm5 \n\t" /* 2b */\
910 "psubw %%mm5, %%mm6 \n\t" /* c - 2b */\
911 "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\
912 "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\
913 "paddw %%mm4, %%mm0 \n\t" /* a */\
914 "paddw %%mm1, %%mm5 \n\t" /* d */\
915 "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\
916 "psubw %%mm5, %%mm0 \n\t" /* 20a - d */\
917 "paddw %6, %%mm6 \n\t"\
918 "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
919 "psraw $5, %%mm0 \n\t"\
920 "movq %%mm0, %5 \n\t"\
921 /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
923 "movq 5(%0), %%mm0 \n\t" /* FGHIJKLM */\
924 "movq %%mm0, %%mm5 \n\t" /* FGHIJKLM */\
925 "movq %%mm0, %%mm6 \n\t" /* FGHIJKLM */\
926 "psrlq $8, %%mm0 \n\t" /* GHIJKLM0 */\
927 "psrlq $16, %%mm5 \n\t" /* HIJKLM00 */\
928 "punpcklbw %%mm7, %%mm0 \n\t" /* 0G0H0I0J */\
929 "punpcklbw %%mm7, %%mm5 \n\t" /* 0H0I0J0K */\
930 "paddw %%mm0, %%mm2 \n\t" /* b */\
931 "paddw %%mm5, %%mm3 \n\t" /* c */\
932 "paddw %%mm2, %%mm2 \n\t" /* 2b */\
933 "psubw %%mm2, %%mm3 \n\t" /* c - 2b */\
934 "movq %%mm6, %%mm2 \n\t" /* FGHIJKLM */\
935 "psrlq $24, %%mm6 \n\t" /* IJKLM000 */\
936 "punpcklbw %%mm7, %%mm2 \n\t" /* 0F0G0H0I */\
937 "punpcklbw %%mm7, %%mm6 \n\t" /* 0I0J0K0L */\
938 "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\
939 "paddw %%mm2, %%mm1 \n\t" /* a */\
940 "paddw %%mm6, %%mm4 \n\t" /* d */\
941 "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\
942 "psubw %%mm4, %%mm3 \n\t" /* - 6b +3c - d */\
943 "paddw %6, %%mm1 \n\t"\
944 "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b +3c - d */\
945 "psraw $5, %%mm3 \n\t"\
946 "movq %5, %%mm1 \n\t"\
947 "packuswb %%mm3, %%mm1 \n\t"\
948 OP_MMX2(%%mm1, (%1),%%mm4, q)\
949 /* mm0= GHIJ, mm2=FGHI, mm5=HIJK, mm6=IJKL, mm7=0 */\
951 "movq 9(%0), %%mm1 \n\t" /* JKLMNOPQ */\
952 "movq %%mm1, %%mm4 \n\t" /* JKLMNOPQ */\
953 "movq %%mm1, %%mm3 \n\t" /* JKLMNOPQ */\
954 "psrlq $8, %%mm1 \n\t" /* KLMNOPQ0 */\
955 "psrlq $16, %%mm4 \n\t" /* LMNOPQ00 */\
956 "punpcklbw %%mm7, %%mm1 \n\t" /* 0K0L0M0N */\
957 "punpcklbw %%mm7, %%mm4 \n\t" /* 0L0M0N0O */\
958 "paddw %%mm1, %%mm5 \n\t" /* b */\
959 "paddw %%mm4, %%mm0 \n\t" /* c */\
960 "paddw %%mm5, %%mm5 \n\t" /* 2b */\
961 "psubw %%mm5, %%mm0 \n\t" /* c - 2b */\
962 "movq %%mm3, %%mm5 \n\t" /* JKLMNOPQ */\
963 "psrlq $24, %%mm3 \n\t" /* MNOPQ000 */\
964 "pmullw "MANGLE(ff_pw_3)", %%mm0 \n\t" /* 3c - 6b */\
965 "punpcklbw %%mm7, %%mm3 \n\t" /* 0M0N0O0P */\
966 "paddw %%mm3, %%mm2 \n\t" /* d */\
967 "psubw %%mm2, %%mm0 \n\t" /* -6b + 3c - d */\
968 "movq %%mm5, %%mm2 \n\t" /* JKLMNOPQ */\
969 "punpcklbw %%mm7, %%mm2 \n\t" /* 0J0K0L0M */\
970 "punpckhbw %%mm7, %%mm5 \n\t" /* 0N0O0P0Q */\
971 "paddw %%mm2, %%mm6 \n\t" /* a */\
972 "pmullw "MANGLE(ff_pw_20)", %%mm6 \n\t" /* 20a */\
973 "paddw %6, %%mm0 \n\t"\
974 "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
975 "psraw $5, %%mm0 \n\t"\
976 /* mm1=KLMN, mm2=JKLM, mm3=MNOP, mm4=LMNO, mm5=NOPQ mm7=0 */\
978 "paddw %%mm5, %%mm3 \n\t" /* a */\
979 "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0O0P0Q0Q */\
980 "paddw %%mm4, %%mm6 \n\t" /* b */\
981 "pshufw $0xBE, %%mm5, %%mm4 \n\t" /* 0P0Q0Q0P */\
982 "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0Q0Q0P0O */\
983 "paddw %%mm1, %%mm4 \n\t" /* c */\
984 "paddw %%mm2, %%mm5 \n\t" /* d */\
985 "paddw %%mm6, %%mm6 \n\t" /* 2b */\
986 "psubw %%mm6, %%mm4 \n\t" /* c - 2b */\
987 "pmullw "MANGLE(ff_pw_20)", %%mm3 \n\t" /* 20a */\
988 "pmullw "MANGLE(ff_pw_3)", %%mm4 \n\t" /* 3c - 6b */\
989 "psubw %%mm5, %%mm3 \n\t" /* -6b + 3c - d */\
990 "paddw %6, %%mm4 \n\t"\
991 "paddw %%mm3, %%mm4 \n\t" /* 20a - 6b + 3c - d */\
992 "psraw $5, %%mm4 \n\t"\
993 "packuswb %%mm4, %%mm0 \n\t"\
994 OP_MMX2(%%mm0, 8(%1), %%mm4, q)\
1000 : "+a"(src), "+c"(dst), "+D"(h)\
1001 : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(temp), "m"(ROUNDER)\
1006 static void OPNAME ## mpeg4_qpel16_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1009 /* quick HACK, XXX FIXME MUST be optimized */\
1012 temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
1013 temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
1014 temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
1015 temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
1016 temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
1017 temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 9]);\
1018 temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 9])*3 - (src[ 3]+src[10]);\
1019 temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 9])*6 + (src[ 5]+src[10])*3 - (src[ 4]+src[11]);\
1020 temp[ 8]= (src[ 8]+src[ 9])*20 - (src[ 7]+src[10])*6 + (src[ 6]+src[11])*3 - (src[ 5]+src[12]);\
1021 temp[ 9]= (src[ 9]+src[10])*20 - (src[ 8]+src[11])*6 + (src[ 7]+src[12])*3 - (src[ 6]+src[13]);\
1022 temp[10]= (src[10]+src[11])*20 - (src[ 9]+src[12])*6 + (src[ 8]+src[13])*3 - (src[ 7]+src[14]);\
1023 temp[11]= (src[11]+src[12])*20 - (src[10]+src[13])*6 + (src[ 9]+src[14])*3 - (src[ 8]+src[15]);\
1024 temp[12]= (src[12]+src[13])*20 - (src[11]+src[14])*6 + (src[10]+src[15])*3 - (src[ 9]+src[16]);\
1025 temp[13]= (src[13]+src[14])*20 - (src[12]+src[15])*6 + (src[11]+src[16])*3 - (src[10]+src[16]);\
1026 temp[14]= (src[14]+src[15])*20 - (src[13]+src[16])*6 + (src[12]+src[16])*3 - (src[11]+src[15]);\
1027 temp[15]= (src[15]+src[16])*20 - (src[14]+src[16])*6 + (src[13]+src[15])*3 - (src[12]+src[14]);\
1029 "movq (%0), %%mm0 \n\t"\
1030 "movq 8(%0), %%mm1 \n\t"\
1031 "paddw %2, %%mm0 \n\t"\
1032 "paddw %2, %%mm1 \n\t"\
1033 "psraw $5, %%mm0 \n\t"\
1034 "psraw $5, %%mm1 \n\t"\
1035 "packuswb %%mm1, %%mm0 \n\t"\
1036 OP_3DNOW(%%mm0, (%1), %%mm1, q)\
1037 "movq 16(%0), %%mm0 \n\t"\
1038 "movq 24(%0), %%mm1 \n\t"\
1039 "paddw %2, %%mm0 \n\t"\
1040 "paddw %2, %%mm1 \n\t"\
1041 "psraw $5, %%mm0 \n\t"\
1042 "psraw $5, %%mm1 \n\t"\
1043 "packuswb %%mm1, %%mm0 \n\t"\
1044 OP_3DNOW(%%mm0, 8(%1), %%mm1, q)\
1045 :: "r"(temp), "r"(dst), "m"(ROUNDER)\
1053 static void OPNAME ## mpeg4_qpel8_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1055 "pxor %%mm7, %%mm7 \n\t"\
1057 "movq (%0), %%mm0 \n\t" /* ABCDEFGH */\
1058 "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\
1059 "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\
1060 "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\
1061 "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\
1062 "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\
1063 "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\
1064 "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\
1065 "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\
1066 "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\
1067 "psllq $16, %%mm3 \n\t" /* 00ABCDEF */\
1068 "psllq $24, %%mm4 \n\t" /* 000ABCDE */\
1069 "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\
1070 "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\
1071 "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\
1072 "paddw %%mm3, %%mm5 \n\t" /* b */\
1073 "paddw %%mm2, %%mm6 \n\t" /* c */\
1074 "paddw %%mm5, %%mm5 \n\t" /* 2b */\
1075 "psubw %%mm5, %%mm6 \n\t" /* c - 2b */\
1076 "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\
1077 "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\
1078 "paddw %%mm4, %%mm0 \n\t" /* a */\
1079 "paddw %%mm1, %%mm5 \n\t" /* d */\
1080 "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\
1081 "psubw %%mm5, %%mm0 \n\t" /* 20a - d */\
1082 "paddw %5, %%mm6 \n\t"\
1083 "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
1084 "psraw $5, %%mm0 \n\t"\
1085 /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
1087 "movd 5(%0), %%mm5 \n\t" /* FGHI */\
1088 "punpcklbw %%mm7, %%mm5 \n\t" /* 0F0G0H0I */\
1089 "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0G0H0I0I */\
1090 "paddw %%mm5, %%mm1 \n\t" /* a */\
1091 "paddw %%mm6, %%mm2 \n\t" /* b */\
1092 "pshufw $0xBE, %%mm5, %%mm6 \n\t" /* 0H0I0I0H */\
1093 "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0I0I0H0G */\
1094 "paddw %%mm6, %%mm3 \n\t" /* c */\
1095 "paddw %%mm5, %%mm4 \n\t" /* d */\
1096 "paddw %%mm2, %%mm2 \n\t" /* 2b */\
1097 "psubw %%mm2, %%mm3 \n\t" /* c - 2b */\
1098 "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\
1099 "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\
1100 "psubw %%mm4, %%mm3 \n\t" /* -6b + 3c - d */\
1101 "paddw %5, %%mm1 \n\t"\
1102 "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b + 3c - d */\
1103 "psraw $5, %%mm3 \n\t"\
1104 "packuswb %%mm3, %%mm0 \n\t"\
1105 OP_MMX2(%%mm0, (%1), %%mm4, q)\
1111 : "+a"(src), "+c"(dst), "+d"(h)\
1112 : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER)\
1117 static void OPNAME ## mpeg4_qpel8_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1120 /* quick HACK, XXX FIXME MUST be optimized */\
1123 temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
1124 temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
1125 temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
1126 temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
1127 temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
1128 temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 8]);\
1129 temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 8])*3 - (src[ 3]+src[ 7]);\
1130 temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 8])*6 + (src[ 5]+src[ 7])*3 - (src[ 4]+src[ 6]);\
1132 "movq (%0), %%mm0 \n\t"\
1133 "movq 8(%0), %%mm1 \n\t"\
1134 "paddw %2, %%mm0 \n\t"\
1135 "paddw %2, %%mm1 \n\t"\
1136 "psraw $5, %%mm0 \n\t"\
1137 "psraw $5, %%mm1 \n\t"\
1138 "packuswb %%mm1, %%mm0 \n\t"\
1139 OP_3DNOW(%%mm0, (%1), %%mm1, q)\
1140 :: "r"(temp), "r"(dst), "m"(ROUNDER)\
1148 #define QPEL_OP(OPNAME, ROUNDER, RND, OP, MMX)\
1150 static void OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1151 uint64_t temp[17*4];\
1152 uint64_t *temp_ptr= temp;\
1157 "pxor %%mm7, %%mm7 \n\t"\
1159 "movq (%0), %%mm0 \n\t"\
1160 "movq (%0), %%mm1 \n\t"\
1161 "movq 8(%0), %%mm2 \n\t"\
1162 "movq 8(%0), %%mm3 \n\t"\
1163 "punpcklbw %%mm7, %%mm0 \n\t"\
1164 "punpckhbw %%mm7, %%mm1 \n\t"\
1165 "punpcklbw %%mm7, %%mm2 \n\t"\
1166 "punpckhbw %%mm7, %%mm3 \n\t"\
1167 "movq %%mm0, (%1) \n\t"\
1168 "movq %%mm1, 17*8(%1) \n\t"\
1169 "movq %%mm2, 2*17*8(%1) \n\t"\
1170 "movq %%mm3, 3*17*8(%1) \n\t"\
1175 : "+r" (src), "+r" (temp_ptr), "+r"(count)\
1176 : "r" ((x86_reg)srcStride)\
1183 /*FIXME reorder for speed */\
1185 /*"pxor %%mm7, %%mm7 \n\t"*/\
1187 "movq (%0), %%mm0 \n\t"\
1188 "movq 8(%0), %%mm1 \n\t"\
1189 "movq 16(%0), %%mm2 \n\t"\
1190 "movq 24(%0), %%mm3 \n\t"\
1191 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\
1192 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\
1194 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\
1196 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\
1198 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
1199 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 72(%0), (%1, %3), OP)\
1201 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 80(%0), (%1), OP)\
1202 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 88(%0), (%1, %3), OP)\
1204 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 40(%0), 48(%0), 56(%0), 96(%0), (%1), OP)\
1205 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 48(%0), 56(%0), 64(%0),104(%0), (%1, %3), OP)\
1207 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 56(%0), 64(%0), 72(%0),112(%0), (%1), OP)\
1208 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 64(%0), 72(%0), 80(%0),120(%0), (%1, %3), OP)\
1210 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 72(%0), 80(%0), 88(%0),128(%0), (%1), OP)\
1212 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 80(%0), 88(%0), 96(%0),128(%0), (%1, %3), OP)\
1214 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 88(%0), 96(%0),104(%0),120(%0), (%1), OP)\
1215 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 96(%0),104(%0),112(%0),112(%0), (%1, %3), OP)\
1217 "add $136, %0 \n\t"\
1222 : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
1223 : "r"((x86_reg)dstStride), "r"(2*(x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-14*(x86_reg)dstStride)\
1228 static void OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1229 uint64_t temp[9*2];\
1230 uint64_t *temp_ptr= temp;\
1235 "pxor %%mm7, %%mm7 \n\t"\
1237 "movq (%0), %%mm0 \n\t"\
1238 "movq (%0), %%mm1 \n\t"\
1239 "punpcklbw %%mm7, %%mm0 \n\t"\
1240 "punpckhbw %%mm7, %%mm1 \n\t"\
1241 "movq %%mm0, (%1) \n\t"\
1242 "movq %%mm1, 9*8(%1) \n\t"\
1247 : "+r" (src), "+r" (temp_ptr), "+r"(count)\
1248 : "r" ((x86_reg)srcStride)\
1255 /*FIXME reorder for speed */\
1257 /*"pxor %%mm7, %%mm7 \n\t"*/\
1259 "movq (%0), %%mm0 \n\t"\
1260 "movq 8(%0), %%mm1 \n\t"\
1261 "movq 16(%0), %%mm2 \n\t"\
1262 "movq 24(%0), %%mm3 \n\t"\
1263 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\
1264 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\
1266 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\
1268 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\
1270 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
1272 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 64(%0), (%1, %3), OP)\
1274 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 56(%0), (%1), OP)\
1275 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 48(%0), (%1, %3), OP)\
1282 : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
1283 : "r"((x86_reg)dstStride), "r"(2*(x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-6*(x86_reg)dstStride)\
1288 static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
1289 OPNAME ## pixels8_ ## MMX(dst, src, stride, 8);\
1292 static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1294 uint8_t * const half= (uint8_t*)temp;\
1295 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
1296 OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
1299 static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1300 OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride, stride, 8);\
1303 static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1305 uint8_t * const half= (uint8_t*)temp;\
1306 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
1307 OPNAME ## pixels8_l2_ ## MMX(dst, src+1, half, stride, stride, 8);\
1310 static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1312 uint8_t * const half= (uint8_t*)temp;\
1313 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
1314 OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
1317 static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1318 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src, stride, stride);\
1321 static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1323 uint8_t * const half= (uint8_t*)temp;\
1324 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
1325 OPNAME ## pixels8_l2_ ## MMX(dst, src+stride, half, stride, stride, 8);\
1327 static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1328 uint64_t half[8 + 9];\
1329 uint8_t * const halfH= ((uint8_t*)half) + 64;\
1330 uint8_t * const halfHV= ((uint8_t*)half);\
1331 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1332 put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
1333 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1334 OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
1336 static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1337 uint64_t half[8 + 9];\
1338 uint8_t * const halfH= ((uint8_t*)half) + 64;\
1339 uint8_t * const halfHV= ((uint8_t*)half);\
1340 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1341 put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
1342 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1343 OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
1345 static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1346 uint64_t half[8 + 9];\
1347 uint8_t * const halfH= ((uint8_t*)half) + 64;\
1348 uint8_t * const halfHV= ((uint8_t*)half);\
1349 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1350 put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
1351 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1352 OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
1354 static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1355 uint64_t half[8 + 9];\
1356 uint8_t * const halfH= ((uint8_t*)half) + 64;\
1357 uint8_t * const halfHV= ((uint8_t*)half);\
1358 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1359 put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
1360 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1361 OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
1363 static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1364 uint64_t half[8 + 9];\
1365 uint8_t * const halfH= ((uint8_t*)half) + 64;\
1366 uint8_t * const halfHV= ((uint8_t*)half);\
1367 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1368 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1369 OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
1371 static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1372 uint64_t half[8 + 9];\
1373 uint8_t * const halfH= ((uint8_t*)half) + 64;\
1374 uint8_t * const halfHV= ((uint8_t*)half);\
1375 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1376 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1377 OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
1379 static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1380 uint64_t half[8 + 9];\
1381 uint8_t * const halfH= ((uint8_t*)half);\
1382 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1383 put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
1384 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
1386 static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1387 uint64_t half[8 + 9];\
1388 uint8_t * const halfH= ((uint8_t*)half);\
1389 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1390 put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
1391 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
1393 static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1395 uint8_t * const halfH= ((uint8_t*)half);\
1396 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1397 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
1399 static void OPNAME ## qpel16_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
1400 OPNAME ## pixels16_ ## MMX(dst, src, stride, 16);\
1403 static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1405 uint8_t * const half= (uint8_t*)temp;\
1406 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
1407 OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
1410 static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1411 OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src, stride, stride, 16);\
1414 static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1416 uint8_t * const half= (uint8_t*)temp;\
1417 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
1418 OPNAME ## pixels16_l2_ ## MMX(dst, src+1, half, stride, stride, 16);\
1421 static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1423 uint8_t * const half= (uint8_t*)temp;\
1424 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
1425 OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
1428 static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1429 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src, stride, stride);\
1432 static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1434 uint8_t * const half= (uint8_t*)temp;\
1435 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
1436 OPNAME ## pixels16_l2_ ## MMX(dst, src+stride, half, stride, stride, 16);\
1438 static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1439 uint64_t half[16*2 + 17*2];\
1440 uint8_t * const halfH= ((uint8_t*)half) + 256;\
1441 uint8_t * const halfHV= ((uint8_t*)half);\
1442 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1443 put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
1444 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1445 OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
1447 static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1448 uint64_t half[16*2 + 17*2];\
1449 uint8_t * const halfH= ((uint8_t*)half) + 256;\
1450 uint8_t * const halfHV= ((uint8_t*)half);\
1451 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1452 put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
1453 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1454 OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
1456 static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1457 uint64_t half[16*2 + 17*2];\
1458 uint8_t * const halfH= ((uint8_t*)half) + 256;\
1459 uint8_t * const halfHV= ((uint8_t*)half);\
1460 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1461 put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
1462 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1463 OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
1465 static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1466 uint64_t half[16*2 + 17*2];\
1467 uint8_t * const halfH= ((uint8_t*)half) + 256;\
1468 uint8_t * const halfHV= ((uint8_t*)half);\
1469 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1470 put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
1471 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1472 OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
1474 static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1475 uint64_t half[16*2 + 17*2];\
1476 uint8_t * const halfH= ((uint8_t*)half) + 256;\
1477 uint8_t * const halfHV= ((uint8_t*)half);\
1478 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1479 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1480 OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
1482 static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1483 uint64_t half[16*2 + 17*2];\
1484 uint8_t * const halfH= ((uint8_t*)half) + 256;\
1485 uint8_t * const halfHV= ((uint8_t*)half);\
1486 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1487 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1488 OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
1490 static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1491 uint64_t half[17*2];\
1492 uint8_t * const halfH= ((uint8_t*)half);\
1493 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1494 put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
1495 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
1497 static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1498 uint64_t half[17*2];\
1499 uint8_t * const halfH= ((uint8_t*)half);\
1500 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1501 put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
1502 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
1504 static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1505 uint64_t half[17*2];\
1506 uint8_t * const halfH= ((uint8_t*)half);\
1507 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1508 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
1511 #define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b " \n\t"
1512 #define AVG_3DNOW_OP(a,b,temp, size) \
1513 "mov" #size " " #b ", " #temp " \n\t"\
1514 "pavgusb " #temp ", " #a " \n\t"\
1515 "mov" #size " " #a ", " #b " \n\t"
1516 #define AVG_MMX2_OP(a,b,temp, size) \
1517 "mov" #size " " #b ", " #temp " \n\t"\
1518 "pavgb " #temp ", " #a " \n\t"\
1519 "mov" #size " " #a ", " #b " \n\t"
1521 QPEL_BASE(put_
, ff_pw_16
, _
, PUT_OP
, PUT_OP
)
1522 QPEL_BASE(avg_
, ff_pw_16
, _
, AVG_MMX2_OP
, AVG_3DNOW_OP
)
1523 QPEL_BASE(put_no_rnd_
, ff_pw_15
, _no_rnd_
, PUT_OP
, PUT_OP
)
1524 QPEL_OP(put_
, ff_pw_16
, _
, PUT_OP
, 3dnow
)
1525 QPEL_OP(avg_
, ff_pw_16
, _
, AVG_3DNOW_OP
, 3dnow
)
1526 QPEL_OP(put_no_rnd_
, ff_pw_15
, _no_rnd_
, PUT_OP
, 3dnow
)
1527 QPEL_OP(put_
, ff_pw_16
, _
, PUT_OP
, mmx2
)
1528 QPEL_OP(avg_
, ff_pw_16
, _
, AVG_MMX2_OP
, mmx2
)
1529 QPEL_OP(put_no_rnd_
, ff_pw_15
, _no_rnd_
, PUT_OP
, mmx2
)
1531 /***********************************/
1532 /* bilinear qpel: not compliant to any spec, only for -lavdopts fast */
1534 #define QPEL_2TAP_XY(OPNAME, SIZE, MMX, XY, HPEL)\
1535 static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1536 OPNAME ## pixels ## SIZE ## HPEL(dst, src, stride, SIZE);\
1538 #define QPEL_2TAP_L3(OPNAME, SIZE, MMX, XY, S0, S1, S2)\
1539 static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1540 OPNAME ## 2tap_qpel ## SIZE ## _l3_ ## MMX(dst, src+S0, stride, SIZE, S1, S2);\
1543 #define QPEL_2TAP(OPNAME, SIZE, MMX)\
1544 QPEL_2TAP_XY(OPNAME, SIZE, MMX, 20, _x2_ ## MMX)\
1545 QPEL_2TAP_XY(OPNAME, SIZE, MMX, 02, _y2_ ## MMX)\
1546 QPEL_2TAP_XY(OPNAME, SIZE, MMX, 22, _xy2_mmx)\
1547 static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc00_ ## MMX =\
1548 OPNAME ## qpel ## SIZE ## _mc00_ ## MMX;\
1549 static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc21_ ## MMX =\
1550 OPNAME ## 2tap_qpel ## SIZE ## _mc20_ ## MMX;\
1551 static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc12_ ## MMX =\
1552 OPNAME ## 2tap_qpel ## SIZE ## _mc02_ ## MMX;\
1553 static void OPNAME ## 2tap_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1554 OPNAME ## pixels ## SIZE ## _y2_ ## MMX(dst, src+1, stride, SIZE);\
1556 static void OPNAME ## 2tap_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1557 OPNAME ## pixels ## SIZE ## _x2_ ## MMX(dst, src+stride, stride, SIZE);\
1559 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 10, 0, 1, 0)\
1560 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 30, 1, -1, 0)\
1561 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 01, 0, stride, 0)\
1562 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 03, stride, -stride, 0)\
1563 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 11, 0, stride, 1)\
1564 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 31, 1, stride, -1)\
1565 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 13, stride, -stride, 1)\
1566 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 33, stride+1, -stride, -1)\
1568 QPEL_2TAP(put_, 16, mmx2)
1569 QPEL_2TAP(avg_
, 16, mmx2
)
1570 QPEL_2TAP(put_
, 8, mmx2
)
1571 QPEL_2TAP(avg_
, 8, mmx2
)
1572 QPEL_2TAP(put_
, 16, 3dnow
)
1573 QPEL_2TAP(avg_
, 16, 3dnow
)
1574 QPEL_2TAP(put_
, 8, 3dnow
)
1575 QPEL_2TAP(avg_
, 8, 3dnow
)
1579 static void just_return() { return; }
1582 static void gmc_mmx(uint8_t *dst
, uint8_t *src
, int stride
, int h
, int ox
, int oy
,
1583 int dxx
, int dxy
, int dyx
, int dyy
, int shift
, int r
, int width
, int height
){
1585 const int ix
= ox
>>(16+shift
);
1586 const int iy
= oy
>>(16+shift
);
1587 const int oxs
= ox
>>4;
1588 const int oys
= oy
>>4;
1589 const int dxxs
= dxx
>>4;
1590 const int dxys
= dxy
>>4;
1591 const int dyxs
= dyx
>>4;
1592 const int dyys
= dyy
>>4;
1593 const uint16_t r4
[4] = {r
,r
,r
,r
};
1594 const uint16_t dxy4
[4] = {dxys
,dxys
,dxys
,dxys
};
1595 const uint16_t dyy4
[4] = {dyys
,dyys
,dyys
,dyys
};
1596 const uint64_t shift2
= 2*shift
;
1597 uint8_t edge_buf
[(h
+1)*stride
];
1600 const int dxw
= (dxx
-(1<<(16+shift
)))*(w
-1);
1601 const int dyh
= (dyy
-(1<<(16+shift
)))*(h
-1);
1602 const int dxh
= dxy
*(h
-1);
1603 const int dyw
= dyx
*(w
-1);
1604 if( // non-constant fullpel offset (3% of blocks)
1605 ((ox
^(ox
+dxw
)) | (ox
^(ox
+dxh
)) | (ox
^(ox
+dxw
+dxh
)) |
1606 (oy
^(oy
+dyw
)) | (oy
^(oy
+dyh
)) | (oy
^(oy
+dyw
+dyh
))) >> (16+shift
)
1607 // uses more than 16 bits of subpel mv (only at huge resolution)
1608 || (dxx
|dxy
|dyx
|dyy
)&15 )
1610 //FIXME could still use mmx for some of the rows
1611 ff_gmc_c(dst
, src
, stride
, h
, ox
, oy
, dxx
, dxy
, dyx
, dyy
, shift
, r
, width
, height
);
1615 src
+= ix
+ iy
*stride
;
1616 if( (unsigned)ix
>= width
-w
||
1617 (unsigned)iy
>= height
-h
)
1619 ff_emulated_edge_mc(edge_buf
, src
, stride
, w
+1, h
+1, ix
, iy
, width
, height
);
1624 "movd %0, %%mm6 \n\t"
1625 "pxor %%mm7, %%mm7 \n\t"
1626 "punpcklwd %%mm6, %%mm6 \n\t"
1627 "punpcklwd %%mm6, %%mm6 \n\t"
1631 for(x
=0; x
<w
; x
+=4){
1632 uint16_t dx4
[4] = { oxs
- dxys
+ dxxs
*(x
+0),
1633 oxs
- dxys
+ dxxs
*(x
+1),
1634 oxs
- dxys
+ dxxs
*(x
+2),
1635 oxs
- dxys
+ dxxs
*(x
+3) };
1636 uint16_t dy4
[4] = { oys
- dyys
+ dyxs
*(x
+0),
1637 oys
- dyys
+ dyxs
*(x
+1),
1638 oys
- dyys
+ dyxs
*(x
+2),
1639 oys
- dyys
+ dyxs
*(x
+3) };
1643 "movq %0, %%mm4 \n\t"
1644 "movq %1, %%mm5 \n\t"
1645 "paddw %2, %%mm4 \n\t"
1646 "paddw %3, %%mm5 \n\t"
1647 "movq %%mm4, %0 \n\t"
1648 "movq %%mm5, %1 \n\t"
1649 "psrlw $12, %%mm4 \n\t"
1650 "psrlw $12, %%mm5 \n\t"
1651 : "+m"(*dx4
), "+m"(*dy4
)
1652 : "m"(*dxy4
), "m"(*dyy4
)
1656 "movq %%mm6, %%mm2 \n\t"
1657 "movq %%mm6, %%mm1 \n\t"
1658 "psubw %%mm4, %%mm2 \n\t"
1659 "psubw %%mm5, %%mm1 \n\t"
1660 "movq %%mm2, %%mm0 \n\t"
1661 "movq %%mm4, %%mm3 \n\t"
1662 "pmullw %%mm1, %%mm0 \n\t" // (s-dx)*(s-dy)
1663 "pmullw %%mm5, %%mm3 \n\t" // dx*dy
1664 "pmullw %%mm5, %%mm2 \n\t" // (s-dx)*dy
1665 "pmullw %%mm4, %%mm1 \n\t" // dx*(s-dy)
1667 "movd %4, %%mm5 \n\t"
1668 "movd %3, %%mm4 \n\t"
1669 "punpcklbw %%mm7, %%mm5 \n\t"
1670 "punpcklbw %%mm7, %%mm4 \n\t"
1671 "pmullw %%mm5, %%mm3 \n\t" // src[1,1] * dx*dy
1672 "pmullw %%mm4, %%mm2 \n\t" // src[0,1] * (s-dx)*dy
1674 "movd %2, %%mm5 \n\t"
1675 "movd %1, %%mm4 \n\t"
1676 "punpcklbw %%mm7, %%mm5 \n\t"
1677 "punpcklbw %%mm7, %%mm4 \n\t"
1678 "pmullw %%mm5, %%mm1 \n\t" // src[1,0] * dx*(s-dy)
1679 "pmullw %%mm4, %%mm0 \n\t" // src[0,0] * (s-dx)*(s-dy)
1680 "paddw %5, %%mm1 \n\t"
1681 "paddw %%mm3, %%mm2 \n\t"
1682 "paddw %%mm1, %%mm0 \n\t"
1683 "paddw %%mm2, %%mm0 \n\t"
1685 "psrlw %6, %%mm0 \n\t"
1686 "packuswb %%mm0, %%mm0 \n\t"
1687 "movd %%mm0, %0 \n\t"
1689 : "=m"(dst
[x
+y
*stride
])
1690 : "m"(src
[0]), "m"(src
[1]),
1691 "m"(src
[stride
]), "m"(src
[stride
+1]),
1692 "m"(*r4
), "m"(shift2
)
1700 #define PREFETCH(name, op) \
1701 static void name(void *mem, int stride, int h){\
1702 const uint8_t *p= mem;\
1704 asm volatile(#op" %0" :: "m"(*p));\
1708 PREFETCH(prefetch_mmx2
, prefetcht0
)
1709 PREFETCH(prefetch_3dnow
, prefetch
)
1712 #include "h264dsp_mmx.c"
1715 void ff_cavsdsp_init_mmx2(DSPContext
* c
, AVCodecContext
*avctx
);
1716 void ff_cavsdsp_init_3dnow(DSPContext
* c
, AVCodecContext
*avctx
);
1718 void ff_put_cavs_qpel8_mc00_mmx2(uint8_t *dst
, uint8_t *src
, int stride
) {
1719 put_pixels8_mmx(dst
, src
, stride
, 8);
1721 void ff_avg_cavs_qpel8_mc00_mmx2(uint8_t *dst
, uint8_t *src
, int stride
) {
1722 avg_pixels8_mmx(dst
, src
, stride
, 8);
1724 void ff_put_cavs_qpel16_mc00_mmx2(uint8_t *dst
, uint8_t *src
, int stride
) {
1725 put_pixels16_mmx(dst
, src
, stride
, 16);
1727 void ff_avg_cavs_qpel16_mc00_mmx2(uint8_t *dst
, uint8_t *src
, int stride
) {
1728 avg_pixels16_mmx(dst
, src
, stride
, 16);
1732 void ff_vc1dsp_init_mmx(DSPContext
* dsp
, AVCodecContext
*avctx
);
1734 void ff_put_vc1_mspel_mc00_mmx(uint8_t *dst
, const uint8_t *src
, int stride
, int rnd
) {
1735 put_pixels8_mmx(dst
, src
, stride
, 8);
1738 /* external functions, from idct_mmx.c */
1739 void ff_mmx_idct(DCTELEM
*block
);
1740 void ff_mmxext_idct(DCTELEM
*block
);
1742 /* XXX: those functions should be suppressed ASAP when all IDCTs are
1745 static void ff_libmpeg2mmx_idct_put(uint8_t *dest
, int line_size
, DCTELEM
*block
)
1747 ff_mmx_idct (block
);
1748 put_pixels_clamped_mmx(block
, dest
, line_size
);
1750 static void ff_libmpeg2mmx_idct_add(uint8_t *dest
, int line_size
, DCTELEM
*block
)
1752 ff_mmx_idct (block
);
1753 add_pixels_clamped_mmx(block
, dest
, line_size
);
1755 static void ff_libmpeg2mmx2_idct_put(uint8_t *dest
, int line_size
, DCTELEM
*block
)
1757 ff_mmxext_idct (block
);
1758 put_pixels_clamped_mmx(block
, dest
, line_size
);
1760 static void ff_libmpeg2mmx2_idct_add(uint8_t *dest
, int line_size
, DCTELEM
*block
)
1762 ff_mmxext_idct (block
);
1763 add_pixels_clamped_mmx(block
, dest
, line_size
);
1766 static void ff_idct_xvid_mmx_put(uint8_t *dest
, int line_size
, DCTELEM
*block
)
1768 ff_idct_xvid_mmx (block
);
1769 put_pixels_clamped_mmx(block
, dest
, line_size
);
1771 static void ff_idct_xvid_mmx_add(uint8_t *dest
, int line_size
, DCTELEM
*block
)
1773 ff_idct_xvid_mmx (block
);
1774 add_pixels_clamped_mmx(block
, dest
, line_size
);
1776 static void ff_idct_xvid_mmx2_put(uint8_t *dest
, int line_size
, DCTELEM
*block
)
1778 ff_idct_xvid_mmx2 (block
);
1779 put_pixels_clamped_mmx(block
, dest
, line_size
);
1781 static void ff_idct_xvid_mmx2_add(uint8_t *dest
, int line_size
, DCTELEM
*block
)
1783 ff_idct_xvid_mmx2 (block
);
1784 add_pixels_clamped_mmx(block
, dest
, line_size
);
1787 static void vorbis_inverse_coupling_3dnow(float *mag
, float *ang
, int blocksize
)
1790 asm volatile("pxor %%mm7, %%mm7":);
1791 for(i
=0; i
<blocksize
; i
+=2) {
1793 "movq %0, %%mm0 \n\t"
1794 "movq %1, %%mm1 \n\t"
1795 "movq %%mm0, %%mm2 \n\t"
1796 "movq %%mm1, %%mm3 \n\t"
1797 "pfcmpge %%mm7, %%mm2 \n\t" // m <= 0.0
1798 "pfcmpge %%mm7, %%mm3 \n\t" // a <= 0.0
1799 "pslld $31, %%mm2 \n\t" // keep only the sign bit
1800 "pxor %%mm2, %%mm1 \n\t"
1801 "movq %%mm3, %%mm4 \n\t"
1802 "pand %%mm1, %%mm3 \n\t"
1803 "pandn %%mm1, %%mm4 \n\t"
1804 "pfadd %%mm0, %%mm3 \n\t" // a = m + ((a<0) & (a ^ sign(m)))
1805 "pfsub %%mm4, %%mm0 \n\t" // m = m + ((a>0) & (a ^ sign(m)))
1806 "movq %%mm3, %1 \n\t"
1807 "movq %%mm0, %0 \n\t"
1808 :"+m"(mag
[i
]), "+m"(ang
[i
])
1812 asm volatile("femms");
1814 static void vorbis_inverse_coupling_sse(float *mag
, float *ang
, int blocksize
)
1819 "movaps %0, %%xmm5 \n\t"
1820 ::"m"(ff_pdw_80000000
[0])
1822 for(i
=0; i
<blocksize
; i
+=4) {
1824 "movaps %0, %%xmm0 \n\t"
1825 "movaps %1, %%xmm1 \n\t"
1826 "xorps %%xmm2, %%xmm2 \n\t"
1827 "xorps %%xmm3, %%xmm3 \n\t"
1828 "cmpleps %%xmm0, %%xmm2 \n\t" // m <= 0.0
1829 "cmpleps %%xmm1, %%xmm3 \n\t" // a <= 0.0
1830 "andps %%xmm5, %%xmm2 \n\t" // keep only the sign bit
1831 "xorps %%xmm2, %%xmm1 \n\t"
1832 "movaps %%xmm3, %%xmm4 \n\t"
1833 "andps %%xmm1, %%xmm3 \n\t"
1834 "andnps %%xmm1, %%xmm4 \n\t"
1835 "addps %%xmm0, %%xmm3 \n\t" // a = m + ((a<0) & (a ^ sign(m)))
1836 "subps %%xmm4, %%xmm0 \n\t" // m = m + ((a>0) & (a ^ sign(m)))
1837 "movaps %%xmm3, %1 \n\t"
1838 "movaps %%xmm0, %0 \n\t"
1839 :"+m"(mag
[i
]), "+m"(ang
[i
])
1845 static void vector_fmul_3dnow(float *dst
, const float *src
, int len
){
1846 x86_reg i
= (len
-4)*4;
1849 "movq (%1,%0), %%mm0 \n\t"
1850 "movq 8(%1,%0), %%mm1 \n\t"
1851 "pfmul (%2,%0), %%mm0 \n\t"
1852 "pfmul 8(%2,%0), %%mm1 \n\t"
1853 "movq %%mm0, (%1,%0) \n\t"
1854 "movq %%mm1, 8(%1,%0) \n\t"
1863 static void vector_fmul_sse(float *dst
, const float *src
, int len
){
1864 x86_reg i
= (len
-8)*4;
1867 "movaps (%1,%0), %%xmm0 \n\t"
1868 "movaps 16(%1,%0), %%xmm1 \n\t"
1869 "mulps (%2,%0), %%xmm0 \n\t"
1870 "mulps 16(%2,%0), %%xmm1 \n\t"
1871 "movaps %%xmm0, (%1,%0) \n\t"
1872 "movaps %%xmm1, 16(%1,%0) \n\t"
1881 static void vector_fmul_reverse_3dnow2(float *dst
, const float *src0
, const float *src1
, int len
){
1882 x86_reg i
= len
*4-16;
1885 "pswapd 8(%1), %%mm0 \n\t"
1886 "pswapd (%1), %%mm1 \n\t"
1887 "pfmul (%3,%0), %%mm0 \n\t"
1888 "pfmul 8(%3,%0), %%mm1 \n\t"
1889 "movq %%mm0, (%2,%0) \n\t"
1890 "movq %%mm1, 8(%2,%0) \n\t"
1894 :"+r"(i
), "+r"(src1
)
1895 :"r"(dst
), "r"(src0
)
1897 asm volatile("femms");
1899 static void vector_fmul_reverse_sse(float *dst
, const float *src0
, const float *src1
, int len
){
1900 x86_reg i
= len
*4-32;
1903 "movaps 16(%1), %%xmm0 \n\t"
1904 "movaps (%1), %%xmm1 \n\t"
1905 "shufps $0x1b, %%xmm0, %%xmm0 \n\t"
1906 "shufps $0x1b, %%xmm1, %%xmm1 \n\t"
1907 "mulps (%3,%0), %%xmm0 \n\t"
1908 "mulps 16(%3,%0), %%xmm1 \n\t"
1909 "movaps %%xmm0, (%2,%0) \n\t"
1910 "movaps %%xmm1, 16(%2,%0) \n\t"
1914 :"+r"(i
), "+r"(src1
)
1915 :"r"(dst
), "r"(src0
)
1919 static void vector_fmul_add_add_3dnow(float *dst
, const float *src0
, const float *src1
,
1920 const float *src2
, int src3
, int len
, int step
){
1921 x86_reg i
= (len
-4)*4;
1922 if(step
== 2 && src3
== 0){
1926 "movq (%2,%0), %%mm0 \n\t"
1927 "movq 8(%2,%0), %%mm1 \n\t"
1928 "pfmul (%3,%0), %%mm0 \n\t"
1929 "pfmul 8(%3,%0), %%mm1 \n\t"
1930 "pfadd (%4,%0), %%mm0 \n\t"
1931 "pfadd 8(%4,%0), %%mm1 \n\t"
1932 "movd %%mm0, (%1) \n\t"
1933 "movd %%mm1, 16(%1) \n\t"
1934 "psrlq $32, %%mm0 \n\t"
1935 "psrlq $32, %%mm1 \n\t"
1936 "movd %%mm0, 8(%1) \n\t"
1937 "movd %%mm1, 24(%1) \n\t"
1942 :"r"(src0
), "r"(src1
), "r"(src2
)
1946 else if(step
== 1 && src3
== 0){
1949 "movq (%2,%0), %%mm0 \n\t"
1950 "movq 8(%2,%0), %%mm1 \n\t"
1951 "pfmul (%3,%0), %%mm0 \n\t"
1952 "pfmul 8(%3,%0), %%mm1 \n\t"
1953 "pfadd (%4,%0), %%mm0 \n\t"
1954 "pfadd 8(%4,%0), %%mm1 \n\t"
1955 "movq %%mm0, (%1,%0) \n\t"
1956 "movq %%mm1, 8(%1,%0) \n\t"
1960 :"r"(dst
), "r"(src0
), "r"(src1
), "r"(src2
)
1965 ff_vector_fmul_add_add_c(dst
, src0
, src1
, src2
, src3
, len
, step
);
1966 asm volatile("femms");
1968 static void vector_fmul_add_add_sse(float *dst
, const float *src0
, const float *src1
,
1969 const float *src2
, int src3
, int len
, int step
){
1970 x86_reg i
= (len
-8)*4;
1971 if(step
== 2 && src3
== 0){
1975 "movaps (%2,%0), %%xmm0 \n\t"
1976 "movaps 16(%2,%0), %%xmm1 \n\t"
1977 "mulps (%3,%0), %%xmm0 \n\t"
1978 "mulps 16(%3,%0), %%xmm1 \n\t"
1979 "addps (%4,%0), %%xmm0 \n\t"
1980 "addps 16(%4,%0), %%xmm1 \n\t"
1981 "movss %%xmm0, (%1) \n\t"
1982 "movss %%xmm1, 32(%1) \n\t"
1983 "movhlps %%xmm0, %%xmm2 \n\t"
1984 "movhlps %%xmm1, %%xmm3 \n\t"
1985 "movss %%xmm2, 16(%1) \n\t"
1986 "movss %%xmm3, 48(%1) \n\t"
1987 "shufps $0xb1, %%xmm0, %%xmm0 \n\t"
1988 "shufps $0xb1, %%xmm1, %%xmm1 \n\t"
1989 "movss %%xmm0, 8(%1) \n\t"
1990 "movss %%xmm1, 40(%1) \n\t"
1991 "movhlps %%xmm0, %%xmm2 \n\t"
1992 "movhlps %%xmm1, %%xmm3 \n\t"
1993 "movss %%xmm2, 24(%1) \n\t"
1994 "movss %%xmm3, 56(%1) \n\t"
1999 :"r"(src0
), "r"(src1
), "r"(src2
)
2003 else if(step
== 1 && src3
== 0){
2006 "movaps (%2,%0), %%xmm0 \n\t"
2007 "movaps 16(%2,%0), %%xmm1 \n\t"
2008 "mulps (%3,%0), %%xmm0 \n\t"
2009 "mulps 16(%3,%0), %%xmm1 \n\t"
2010 "addps (%4,%0), %%xmm0 \n\t"
2011 "addps 16(%4,%0), %%xmm1 \n\t"
2012 "movaps %%xmm0, (%1,%0) \n\t"
2013 "movaps %%xmm1, 16(%1,%0) \n\t"
2017 :"r"(dst
), "r"(src0
), "r"(src1
), "r"(src2
)
2022 ff_vector_fmul_add_add_c(dst
, src0
, src1
, src2
, src3
, len
, step
);
2025 static void float_to_int16_3dnow(int16_t *dst
, const float *src
, int len
){
2026 // not bit-exact: pf2id uses different rounding than C and SSE
2028 for(i
=0; i
<len
; i
+=4) {
2030 "pf2id %1, %%mm0 \n\t"
2031 "pf2id %2, %%mm1 \n\t"
2032 "packssdw %%mm1, %%mm0 \n\t"
2033 "movq %%mm0, %0 \n\t"
2035 :"m"(src
[i
]), "m"(src
[i
+2])
2038 asm volatile("femms");
2040 static void float_to_int16_sse(int16_t *dst
, const float *src
, int len
){
2042 for(i
=0; i
<len
; i
+=4) {
2044 "cvtps2pi %1, %%mm0 \n\t"
2045 "cvtps2pi %2, %%mm1 \n\t"
2046 "packssdw %%mm1, %%mm0 \n\t"
2047 "movq %%mm0, %0 \n\t"
2049 :"m"(src
[i
]), "m"(src
[i
+2])
2052 asm volatile("emms");
2055 extern void ff_snow_horizontal_compose97i_sse2(IDWTELEM
*b
, int width
);
2056 extern void ff_snow_horizontal_compose97i_mmx(IDWTELEM
*b
, int width
);
2057 extern void ff_snow_vertical_compose97i_sse2(IDWTELEM
*b0
, IDWTELEM
*b1
, IDWTELEM
*b2
, IDWTELEM
*b3
, IDWTELEM
*b4
, IDWTELEM
*b5
, int width
);
2058 extern void ff_snow_vertical_compose97i_mmx(IDWTELEM
*b0
, IDWTELEM
*b1
, IDWTELEM
*b2
, IDWTELEM
*b3
, IDWTELEM
*b4
, IDWTELEM
*b5
, int width
);
2059 extern void ff_snow_inner_add_yblock_sse2(const uint8_t *obmc
, const int obmc_stride
, uint8_t * * block
, int b_w
, int b_h
,
2060 int src_x
, int src_y
, int src_stride
, slice_buffer
* sb
, int add
, uint8_t * dst8
);
2061 extern void ff_snow_inner_add_yblock_mmx(const uint8_t *obmc
, const int obmc_stride
, uint8_t * * block
, int b_w
, int b_h
,
2062 int src_x
, int src_y
, int src_stride
, slice_buffer
* sb
, int add
, uint8_t * dst8
);
2064 void dsputil_init_mmx(DSPContext
* c
, AVCodecContext
*avctx
)
2066 mm_flags
= mm_support();
2068 if (avctx
->dsp_mask
) {
2069 if (avctx
->dsp_mask
& FF_MM_FORCE
)
2070 mm_flags
|= (avctx
->dsp_mask
& 0xffff);
2072 mm_flags
&= ~(avctx
->dsp_mask
& 0xffff);
2076 av_log(avctx
, AV_LOG_INFO
, "libavcodec: CPU flags:");
2077 if (mm_flags
& MM_MMX
)
2078 av_log(avctx
, AV_LOG_INFO
, " mmx");
2079 if (mm_flags
& MM_MMXEXT
)
2080 av_log(avctx
, AV_LOG_INFO
, " mmxext");
2081 if (mm_flags
& MM_3DNOW
)
2082 av_log(avctx
, AV_LOG_INFO
, " 3dnow");
2083 if (mm_flags
& MM_SSE
)
2084 av_log(avctx
, AV_LOG_INFO
, " sse");
2085 if (mm_flags
& MM_SSE2
)
2086 av_log(avctx
, AV_LOG_INFO
, " sse2");
2087 av_log(avctx
, AV_LOG_INFO
, "\n");
2090 if (mm_flags
& MM_MMX
) {
2091 const int idct_algo
= avctx
->idct_algo
;
2093 if(avctx
->lowres
==0){
2094 if(idct_algo
==FF_IDCT_AUTO
|| idct_algo
==FF_IDCT_SIMPLEMMX
){
2095 c
->idct_put
= ff_simple_idct_put_mmx
;
2096 c
->idct_add
= ff_simple_idct_add_mmx
;
2097 c
->idct
= ff_simple_idct_mmx
;
2098 c
->idct_permutation_type
= FF_SIMPLE_IDCT_PERM
;
2100 }else if(idct_algo
==FF_IDCT_LIBMPEG2MMX
){
2101 if(mm_flags
& MM_MMXEXT
){
2102 c
->idct_put
= ff_libmpeg2mmx2_idct_put
;
2103 c
->idct_add
= ff_libmpeg2mmx2_idct_add
;
2104 c
->idct
= ff_mmxext_idct
;
2106 c
->idct_put
= ff_libmpeg2mmx_idct_put
;
2107 c
->idct_add
= ff_libmpeg2mmx_idct_add
;
2108 c
->idct
= ff_mmx_idct
;
2110 c
->idct_permutation_type
= FF_LIBMPEG2_IDCT_PERM
;
2112 }else if((ENABLE_VP3_DECODER
|| ENABLE_VP5_DECODER
|| ENABLE_VP6_DECODER
) &&
2113 idct_algo
==FF_IDCT_VP3
&&
2114 avctx
->codec
->id
!=CODEC_ID_THEORA
&&
2115 !(avctx
->flags
& CODEC_FLAG_BITEXACT
)){
2116 if(mm_flags
& MM_SSE2
){
2117 c
->idct_put
= ff_vp3_idct_put_sse2
;
2118 c
->idct_add
= ff_vp3_idct_add_sse2
;
2119 c
->idct
= ff_vp3_idct_sse2
;
2120 c
->idct_permutation_type
= FF_TRANSPOSE_IDCT_PERM
;
2122 ff_vp3_dsp_init_mmx();
2123 c
->idct_put
= ff_vp3_idct_put_mmx
;
2124 c
->idct_add
= ff_vp3_idct_add_mmx
;
2125 c
->idct
= ff_vp3_idct_mmx
;
2126 c
->idct_permutation_type
= FF_PARTTRANS_IDCT_PERM
;
2128 }else if(idct_algo
==FF_IDCT_CAVS
){
2129 c
->idct_permutation_type
= FF_TRANSPOSE_IDCT_PERM
;
2130 }else if(idct_algo
==FF_IDCT_XVIDMMX
){
2131 if(mm_flags
& MM_SSE2
){
2132 c
->idct_put
= ff_idct_xvid_sse2_put
;
2133 c
->idct_add
= ff_idct_xvid_sse2_add
;
2134 c
->idct
= ff_idct_xvid_sse2
;
2135 c
->idct_permutation_type
= FF_SSE2_IDCT_PERM
;
2136 }else if(mm_flags
& MM_MMXEXT
){
2137 c
->idct_put
= ff_idct_xvid_mmx2_put
;
2138 c
->idct_add
= ff_idct_xvid_mmx2_add
;
2139 c
->idct
= ff_idct_xvid_mmx2
;
2141 c
->idct_put
= ff_idct_xvid_mmx_put
;
2142 c
->idct_add
= ff_idct_xvid_mmx_add
;
2143 c
->idct
= ff_idct_xvid_mmx
;
2148 c
->put_pixels_clamped
= put_pixels_clamped_mmx
;
2149 c
->put_signed_pixels_clamped
= put_signed_pixels_clamped_mmx
;
2150 c
->add_pixels_clamped
= add_pixels_clamped_mmx
;
2151 c
->clear_blocks
= clear_blocks_mmx
;
2153 #define SET_HPEL_FUNCS(PFX, IDX, SIZE, CPU) \
2154 c->PFX ## _pixels_tab[IDX][0] = PFX ## _pixels ## SIZE ## _ ## CPU; \
2155 c->PFX ## _pixels_tab[IDX][1] = PFX ## _pixels ## SIZE ## _x2_ ## CPU; \
2156 c->PFX ## _pixels_tab[IDX][2] = PFX ## _pixels ## SIZE ## _y2_ ## CPU; \
2157 c->PFX ## _pixels_tab[IDX][3] = PFX ## _pixels ## SIZE ## _xy2_ ## CPU
2159 SET_HPEL_FUNCS(put
, 0, 16, mmx
);
2160 SET_HPEL_FUNCS(put_no_rnd
, 0, 16, mmx
);
2161 SET_HPEL_FUNCS(avg
, 0, 16, mmx
);
2162 SET_HPEL_FUNCS(avg_no_rnd
, 0, 16, mmx
);
2163 SET_HPEL_FUNCS(put
, 1, 8, mmx
);
2164 SET_HPEL_FUNCS(put_no_rnd
, 1, 8, mmx
);
2165 SET_HPEL_FUNCS(avg
, 1, 8, mmx
);
2166 SET_HPEL_FUNCS(avg_no_rnd
, 1, 8, mmx
);
2170 c
->add_bytes
= add_bytes_mmx
;
2171 c
->add_bytes_l2
= add_bytes_l2_mmx
;
2173 c
->draw_edges
= draw_edges_mmx
;
2175 if (ENABLE_ANY_H263
) {
2176 c
->h263_v_loop_filter
= h263_v_loop_filter_mmx
;
2177 c
->h263_h_loop_filter
= h263_h_loop_filter_mmx
;
2179 c
->put_h264_chroma_pixels_tab
[0]= put_h264_chroma_mc8_mmx_rnd
;
2180 c
->put_h264_chroma_pixels_tab
[1]= put_h264_chroma_mc4_mmx
;
2181 c
->put_no_rnd_h264_chroma_pixels_tab
[0]= put_h264_chroma_mc8_mmx_nornd
;
2183 c
->h264_idct_dc_add
=
2184 c
->h264_idct_add
= ff_h264_idct_add_mmx
;
2185 c
->h264_idct8_dc_add
=
2186 c
->h264_idct8_add
= ff_h264_idct8_add_mmx
;
2187 if (mm_flags
& MM_SSE2
)
2188 c
->h264_idct8_add
= ff_h264_idct8_add_sse2
;
2190 if (mm_flags
& MM_MMXEXT
) {
2191 c
->prefetch
= prefetch_mmx2
;
2193 c
->put_pixels_tab
[0][1] = put_pixels16_x2_mmx2
;
2194 c
->put_pixels_tab
[0][2] = put_pixels16_y2_mmx2
;
2196 c
->avg_pixels_tab
[0][0] = avg_pixels16_mmx2
;
2197 c
->avg_pixels_tab
[0][1] = avg_pixels16_x2_mmx2
;
2198 c
->avg_pixels_tab
[0][2] = avg_pixels16_y2_mmx2
;
2200 c
->put_pixels_tab
[1][1] = put_pixels8_x2_mmx2
;
2201 c
->put_pixels_tab
[1][2] = put_pixels8_y2_mmx2
;
2203 c
->avg_pixels_tab
[1][0] = avg_pixels8_mmx2
;
2204 c
->avg_pixels_tab
[1][1] = avg_pixels8_x2_mmx2
;
2205 c
->avg_pixels_tab
[1][2] = avg_pixels8_y2_mmx2
;
2207 c
->h264_idct_dc_add
= ff_h264_idct_dc_add_mmx2
;
2208 c
->h264_idct8_dc_add
= ff_h264_idct8_dc_add_mmx2
;
2210 if(!(avctx
->flags
& CODEC_FLAG_BITEXACT
)){
2211 c
->put_no_rnd_pixels_tab
[0][1] = put_no_rnd_pixels16_x2_mmx2
;
2212 c
->put_no_rnd_pixels_tab
[0][2] = put_no_rnd_pixels16_y2_mmx2
;
2213 c
->put_no_rnd_pixels_tab
[1][1] = put_no_rnd_pixels8_x2_mmx2
;
2214 c
->put_no_rnd_pixels_tab
[1][2] = put_no_rnd_pixels8_y2_mmx2
;
2215 c
->avg_pixels_tab
[0][3] = avg_pixels16_xy2_mmx2
;
2216 c
->avg_pixels_tab
[1][3] = avg_pixels8_xy2_mmx2
;
2219 #define SET_QPEL_FUNCS(PFX, IDX, SIZE, CPU) \
2220 c->PFX ## _pixels_tab[IDX][ 0] = PFX ## SIZE ## _mc00_ ## CPU; \
2221 c->PFX ## _pixels_tab[IDX][ 1] = PFX ## SIZE ## _mc10_ ## CPU; \
2222 c->PFX ## _pixels_tab[IDX][ 2] = PFX ## SIZE ## _mc20_ ## CPU; \
2223 c->PFX ## _pixels_tab[IDX][ 3] = PFX ## SIZE ## _mc30_ ## CPU; \
2224 c->PFX ## _pixels_tab[IDX][ 4] = PFX ## SIZE ## _mc01_ ## CPU; \
2225 c->PFX ## _pixels_tab[IDX][ 5] = PFX ## SIZE ## _mc11_ ## CPU; \
2226 c->PFX ## _pixels_tab[IDX][ 6] = PFX ## SIZE ## _mc21_ ## CPU; \
2227 c->PFX ## _pixels_tab[IDX][ 7] = PFX ## SIZE ## _mc31_ ## CPU; \
2228 c->PFX ## _pixels_tab[IDX][ 8] = PFX ## SIZE ## _mc02_ ## CPU; \
2229 c->PFX ## _pixels_tab[IDX][ 9] = PFX ## SIZE ## _mc12_ ## CPU; \
2230 c->PFX ## _pixels_tab[IDX][10] = PFX ## SIZE ## _mc22_ ## CPU; \
2231 c->PFX ## _pixels_tab[IDX][11] = PFX ## SIZE ## _mc32_ ## CPU; \
2232 c->PFX ## _pixels_tab[IDX][12] = PFX ## SIZE ## _mc03_ ## CPU; \
2233 c->PFX ## _pixels_tab[IDX][13] = PFX ## SIZE ## _mc13_ ## CPU; \
2234 c->PFX ## _pixels_tab[IDX][14] = PFX ## SIZE ## _mc23_ ## CPU; \
2235 c->PFX ## _pixels_tab[IDX][15] = PFX ## SIZE ## _mc33_ ## CPU
2237 SET_QPEL_FUNCS(put_qpel
, 0, 16, mmx2
);
2238 SET_QPEL_FUNCS(put_qpel
, 1, 8, mmx2
);
2239 SET_QPEL_FUNCS(put_no_rnd_qpel
, 0, 16, mmx2
);
2240 SET_QPEL_FUNCS(put_no_rnd_qpel
, 1, 8, mmx2
);
2241 SET_QPEL_FUNCS(avg_qpel
, 0, 16, mmx2
);
2242 SET_QPEL_FUNCS(avg_qpel
, 1, 8, mmx2
);
2244 SET_QPEL_FUNCS(put_h264_qpel
, 0, 16, mmx2
);
2245 SET_QPEL_FUNCS(put_h264_qpel
, 1, 8, mmx2
);
2246 SET_QPEL_FUNCS(put_h264_qpel
, 2, 4, mmx2
);
2247 SET_QPEL_FUNCS(avg_h264_qpel
, 0, 16, mmx2
);
2248 SET_QPEL_FUNCS(avg_h264_qpel
, 1, 8, mmx2
);
2249 SET_QPEL_FUNCS(avg_h264_qpel
, 2, 4, mmx2
);
2251 SET_QPEL_FUNCS(put_2tap_qpel
, 0, 16, mmx2
);
2252 SET_QPEL_FUNCS(put_2tap_qpel
, 1, 8, mmx2
);
2253 SET_QPEL_FUNCS(avg_2tap_qpel
, 0, 16, mmx2
);
2254 SET_QPEL_FUNCS(avg_2tap_qpel
, 1, 8, mmx2
);
2256 c
->avg_h264_chroma_pixels_tab
[0]= avg_h264_chroma_mc8_mmx2_rnd
;
2257 c
->avg_h264_chroma_pixels_tab
[1]= avg_h264_chroma_mc4_mmx2
;
2258 c
->avg_h264_chroma_pixels_tab
[2]= avg_h264_chroma_mc2_mmx2
;
2259 c
->put_h264_chroma_pixels_tab
[2]= put_h264_chroma_mc2_mmx2
;
2260 c
->h264_v_loop_filter_luma
= h264_v_loop_filter_luma_mmx2
;
2261 c
->h264_h_loop_filter_luma
= h264_h_loop_filter_luma_mmx2
;
2262 c
->h264_v_loop_filter_chroma
= h264_v_loop_filter_chroma_mmx2
;
2263 c
->h264_h_loop_filter_chroma
= h264_h_loop_filter_chroma_mmx2
;
2264 c
->h264_v_loop_filter_chroma_intra
= h264_v_loop_filter_chroma_intra_mmx2
;
2265 c
->h264_h_loop_filter_chroma_intra
= h264_h_loop_filter_chroma_intra_mmx2
;
2266 c
->h264_loop_filter_strength
= h264_loop_filter_strength_mmx2
;
2268 c
->weight_h264_pixels_tab
[0]= ff_h264_weight_16x16_mmx2
;
2269 c
->weight_h264_pixels_tab
[1]= ff_h264_weight_16x8_mmx2
;
2270 c
->weight_h264_pixels_tab
[2]= ff_h264_weight_8x16_mmx2
;
2271 c
->weight_h264_pixels_tab
[3]= ff_h264_weight_8x8_mmx2
;
2272 c
->weight_h264_pixels_tab
[4]= ff_h264_weight_8x4_mmx2
;
2273 c
->weight_h264_pixels_tab
[5]= ff_h264_weight_4x8_mmx2
;
2274 c
->weight_h264_pixels_tab
[6]= ff_h264_weight_4x4_mmx2
;
2275 c
->weight_h264_pixels_tab
[7]= ff_h264_weight_4x2_mmx2
;
2277 c
->biweight_h264_pixels_tab
[0]= ff_h264_biweight_16x16_mmx2
;
2278 c
->biweight_h264_pixels_tab
[1]= ff_h264_biweight_16x8_mmx2
;
2279 c
->biweight_h264_pixels_tab
[2]= ff_h264_biweight_8x16_mmx2
;
2280 c
->biweight_h264_pixels_tab
[3]= ff_h264_biweight_8x8_mmx2
;
2281 c
->biweight_h264_pixels_tab
[4]= ff_h264_biweight_8x4_mmx2
;
2282 c
->biweight_h264_pixels_tab
[5]= ff_h264_biweight_4x8_mmx2
;
2283 c
->biweight_h264_pixels_tab
[6]= ff_h264_biweight_4x4_mmx2
;
2284 c
->biweight_h264_pixels_tab
[7]= ff_h264_biweight_4x2_mmx2
;
2286 if (ENABLE_CAVS_DECODER
)
2287 ff_cavsdsp_init_mmx2(c
, avctx
);
2289 if (ENABLE_VC1_DECODER
|| ENABLE_WMV3_DECODER
)
2290 ff_vc1dsp_init_mmx(c
, avctx
);
2292 c
->add_png_paeth_prediction
= add_png_paeth_prediction_mmx2
;
2293 } else if (mm_flags
& MM_3DNOW
) {
2294 c
->prefetch
= prefetch_3dnow
;
2296 c
->put_pixels_tab
[0][1] = put_pixels16_x2_3dnow
;
2297 c
->put_pixels_tab
[0][2] = put_pixels16_y2_3dnow
;
2299 c
->avg_pixels_tab
[0][0] = avg_pixels16_3dnow
;
2300 c
->avg_pixels_tab
[0][1] = avg_pixels16_x2_3dnow
;
2301 c
->avg_pixels_tab
[0][2] = avg_pixels16_y2_3dnow
;
2303 c
->put_pixels_tab
[1][1] = put_pixels8_x2_3dnow
;
2304 c
->put_pixels_tab
[1][2] = put_pixels8_y2_3dnow
;
2306 c
->avg_pixels_tab
[1][0] = avg_pixels8_3dnow
;
2307 c
->avg_pixels_tab
[1][1] = avg_pixels8_x2_3dnow
;
2308 c
->avg_pixels_tab
[1][2] = avg_pixels8_y2_3dnow
;
2310 if(!(avctx
->flags
& CODEC_FLAG_BITEXACT
)){
2311 c
->put_no_rnd_pixels_tab
[0][1] = put_no_rnd_pixels16_x2_3dnow
;
2312 c
->put_no_rnd_pixels_tab
[0][2] = put_no_rnd_pixels16_y2_3dnow
;
2313 c
->put_no_rnd_pixels_tab
[1][1] = put_no_rnd_pixels8_x2_3dnow
;
2314 c
->put_no_rnd_pixels_tab
[1][2] = put_no_rnd_pixels8_y2_3dnow
;
2315 c
->avg_pixels_tab
[0][3] = avg_pixels16_xy2_3dnow
;
2316 c
->avg_pixels_tab
[1][3] = avg_pixels8_xy2_3dnow
;
2319 SET_QPEL_FUNCS(put_qpel
, 0, 16, 3dnow
);
2320 SET_QPEL_FUNCS(put_qpel
, 1, 8, 3dnow
);
2321 SET_QPEL_FUNCS(put_no_rnd_qpel
, 0, 16, 3dnow
);
2322 SET_QPEL_FUNCS(put_no_rnd_qpel
, 1, 8, 3dnow
);
2323 SET_QPEL_FUNCS(avg_qpel
, 0, 16, 3dnow
);
2324 SET_QPEL_FUNCS(avg_qpel
, 1, 8, 3dnow
);
2326 SET_QPEL_FUNCS(put_h264_qpel
, 0, 16, 3dnow
);
2327 SET_QPEL_FUNCS(put_h264_qpel
, 1, 8, 3dnow
);
2328 SET_QPEL_FUNCS(put_h264_qpel
, 2, 4, 3dnow
);
2329 SET_QPEL_FUNCS(avg_h264_qpel
, 0, 16, 3dnow
);
2330 SET_QPEL_FUNCS(avg_h264_qpel
, 1, 8, 3dnow
);
2331 SET_QPEL_FUNCS(avg_h264_qpel
, 2, 4, 3dnow
);
2333 SET_QPEL_FUNCS(put_2tap_qpel
, 0, 16, 3dnow
);
2334 SET_QPEL_FUNCS(put_2tap_qpel
, 1, 8, 3dnow
);
2335 SET_QPEL_FUNCS(avg_2tap_qpel
, 0, 16, 3dnow
);
2336 SET_QPEL_FUNCS(avg_2tap_qpel
, 1, 8, 3dnow
);
2338 c
->avg_h264_chroma_pixels_tab
[0]= avg_h264_chroma_mc8_3dnow_rnd
;
2339 c
->avg_h264_chroma_pixels_tab
[1]= avg_h264_chroma_mc4_3dnow
;
2341 if (ENABLE_CAVS_DECODER
)
2342 ff_cavsdsp_init_3dnow(c
, avctx
);
2346 #define H264_QPEL_FUNCS(x, y, CPU)\
2347 c->put_h264_qpel_pixels_tab[0][x+y*4] = put_h264_qpel16_mc##x##y##_##CPU;\
2348 c->put_h264_qpel_pixels_tab[1][x+y*4] = put_h264_qpel8_mc##x##y##_##CPU;\
2349 c->avg_h264_qpel_pixels_tab[0][x+y*4] = avg_h264_qpel16_mc##x##y##_##CPU;\
2350 c->avg_h264_qpel_pixels_tab[1][x+y*4] = avg_h264_qpel8_mc##x##y##_##CPU;
2351 if((mm_flags
& MM_SSE2
) && !(mm_flags
& MM_3DNOW
)){
2352 // these functions are slower than mmx on AMD, but faster on Intel
2353 /* FIXME works in most codecs, but crashes svq1 due to unaligned chroma
2354 c->put_pixels_tab[0][0] = put_pixels16_sse2;
2355 c->avg_pixels_tab[0][0] = avg_pixels16_sse2;
2357 H264_QPEL_FUNCS(0, 0, sse2
);
2359 if(mm_flags
& MM_SSE2
){
2360 H264_QPEL_FUNCS(0, 1, sse2
);
2361 H264_QPEL_FUNCS(0, 2, sse2
);
2362 H264_QPEL_FUNCS(0, 3, sse2
);
2363 H264_QPEL_FUNCS(1, 1, sse2
);
2364 H264_QPEL_FUNCS(1, 2, sse2
);
2365 H264_QPEL_FUNCS(1, 3, sse2
);
2366 H264_QPEL_FUNCS(2, 1, sse2
);
2367 H264_QPEL_FUNCS(2, 2, sse2
);
2368 H264_QPEL_FUNCS(2, 3, sse2
);
2369 H264_QPEL_FUNCS(3, 1, sse2
);
2370 H264_QPEL_FUNCS(3, 2, sse2
);
2371 H264_QPEL_FUNCS(3, 3, sse2
);
2374 if(mm_flags
& MM_SSSE3
){
2375 H264_QPEL_FUNCS(1, 0, ssse3
);
2376 H264_QPEL_FUNCS(1, 1, ssse3
);
2377 H264_QPEL_FUNCS(1, 2, ssse3
);
2378 H264_QPEL_FUNCS(1, 3, ssse3
);
2379 H264_QPEL_FUNCS(2, 0, ssse3
);
2380 H264_QPEL_FUNCS(2, 1, ssse3
);
2381 H264_QPEL_FUNCS(2, 2, ssse3
);
2382 H264_QPEL_FUNCS(2, 3, ssse3
);
2383 H264_QPEL_FUNCS(3, 0, ssse3
);
2384 H264_QPEL_FUNCS(3, 1, ssse3
);
2385 H264_QPEL_FUNCS(3, 2, ssse3
);
2386 H264_QPEL_FUNCS(3, 3, ssse3
);
2387 c
->put_no_rnd_h264_chroma_pixels_tab
[0]= put_h264_chroma_mc8_ssse3_nornd
;
2388 c
->put_h264_chroma_pixels_tab
[0]= put_h264_chroma_mc8_ssse3_rnd
;
2389 c
->avg_h264_chroma_pixels_tab
[0]= avg_h264_chroma_mc8_ssse3_rnd
;
2390 c
->put_h264_chroma_pixels_tab
[1]= put_h264_chroma_mc4_ssse3
;
2391 c
->avg_h264_chroma_pixels_tab
[1]= avg_h264_chroma_mc4_ssse3
;
2392 c
->add_png_paeth_prediction
= add_png_paeth_prediction_ssse3
;
2396 #ifdef CONFIG_SNOW_DECODER
2397 if(mm_flags
& MM_SSE2
& 0){
2398 c
->horizontal_compose97i
= ff_snow_horizontal_compose97i_sse2
;
2400 c
->vertical_compose97i
= ff_snow_vertical_compose97i_sse2
;
2402 c
->inner_add_yblock
= ff_snow_inner_add_yblock_sse2
;
2405 if(mm_flags
& MM_MMXEXT
){
2406 c
->horizontal_compose97i
= ff_snow_horizontal_compose97i_mmx
;
2408 c
->vertical_compose97i
= ff_snow_vertical_compose97i_mmx
;
2411 c
->inner_add_yblock
= ff_snow_inner_add_yblock_mmx
;
2415 if(mm_flags
& MM_3DNOW
){
2416 c
->vorbis_inverse_coupling
= vorbis_inverse_coupling_3dnow
;
2417 c
->vector_fmul
= vector_fmul_3dnow
;
2418 if(!(avctx
->flags
& CODEC_FLAG_BITEXACT
))
2419 c
->float_to_int16
= float_to_int16_3dnow
;
2421 if(mm_flags
& MM_3DNOWEXT
)
2422 c
->vector_fmul_reverse
= vector_fmul_reverse_3dnow2
;
2423 if(mm_flags
& MM_SSE
){
2424 c
->vorbis_inverse_coupling
= vorbis_inverse_coupling_sse
;
2425 c
->vector_fmul
= vector_fmul_sse
;
2426 c
->float_to_int16
= float_to_int16_sse
;
2427 c
->vector_fmul_reverse
= vector_fmul_reverse_sse
;
2428 c
->vector_fmul_add_add
= vector_fmul_add_add_sse
;
2430 if(mm_flags
& MM_3DNOW
)
2431 c
->vector_fmul_add_add
= vector_fmul_add_add_3dnow
; // faster than sse
2434 if (ENABLE_ENCODERS
)
2435 dsputilenc_init_mmx(c
, avctx
);
2438 // for speed testing
2439 get_pixels
= just_return
;
2440 put_pixels_clamped
= just_return
;
2441 add_pixels_clamped
= just_return
;
2443 pix_abs16x16
= just_return
;
2444 pix_abs16x16_x2
= just_return
;
2445 pix_abs16x16_y2
= just_return
;
2446 pix_abs16x16_xy2
= just_return
;
2448 put_pixels_tab
[0] = just_return
;
2449 put_pixels_tab
[1] = just_return
;
2450 put_pixels_tab
[2] = just_return
;
2451 put_pixels_tab
[3] = just_return
;
2453 put_no_rnd_pixels_tab
[0] = just_return
;
2454 put_no_rnd_pixels_tab
[1] = just_return
;
2455 put_no_rnd_pixels_tab
[2] = just_return
;
2456 put_no_rnd_pixels_tab
[3] = just_return
;
2458 avg_pixels_tab
[0] = just_return
;
2459 avg_pixels_tab
[1] = just_return
;
2460 avg_pixels_tab
[2] = just_return
;
2461 avg_pixels_tab
[3] = just_return
;
2463 avg_no_rnd_pixels_tab
[0] = just_return
;
2464 avg_no_rnd_pixels_tab
[1] = just_return
;
2465 avg_no_rnd_pixels_tab
[2] = just_return
;
2466 avg_no_rnd_pixels_tab
[3] = just_return
;
2468 //av_fdct = just_return;
2469 //ff_idct = just_return;