x86: conditionally compile H.264 QPEL optimizations
[libav.git] / libavcodec / x86 / dsputil_mmx.c
1 /*
2 * MMX optimized DSP utils
3 * Copyright (c) 2000, 2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5 *
6 * This file is part of Libav.
7 *
8 * Libav is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * Libav is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with Libav; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 *
22 * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
23 */
24
25 #include "libavutil/cpu.h"
26 #include "libavutil/x86_cpu.h"
27 #include "libavcodec/dsputil.h"
28 #include "libavcodec/h264dsp.h"
29 #include "libavcodec/mpegvideo.h"
30 #include "libavcodec/simple_idct.h"
31 #include "libavcodec/ac3dec.h"
32 #include "dsputil_mmx.h"
33 #include "idct_xvid.h"
34
35 //#undef NDEBUG
36 //#include <assert.h>
37
38 /* pixel operations */
39 DECLARE_ALIGNED(8, const uint64_t, ff_bone) = 0x0101010101010101ULL;
40 DECLARE_ALIGNED(8, const uint64_t, ff_wtwo) = 0x0002000200020002ULL;
41
42 DECLARE_ALIGNED(16, const uint64_t, ff_pdw_80000000)[2] =
43 {0x8000000080000000ULL, 0x8000000080000000ULL};
44
45 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_1 ) = {0x0001000100010001ULL, 0x0001000100010001ULL};
46 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_2 ) = {0x0002000200020002ULL, 0x0002000200020002ULL};
47 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_3 ) = {0x0003000300030003ULL, 0x0003000300030003ULL};
48 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_4 ) = {0x0004000400040004ULL, 0x0004000400040004ULL};
49 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_5 ) = {0x0005000500050005ULL, 0x0005000500050005ULL};
50 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_8 ) = {0x0008000800080008ULL, 0x0008000800080008ULL};
51 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_9 ) = {0x0009000900090009ULL, 0x0009000900090009ULL};
52 DECLARE_ALIGNED(8, const uint64_t, ff_pw_15 ) = 0x000F000F000F000FULL;
53 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_16 ) = {0x0010001000100010ULL, 0x0010001000100010ULL};
54 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_17 ) = {0x0011001100110011ULL, 0x0011001100110011ULL};
55 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_18 ) = {0x0012001200120012ULL, 0x0012001200120012ULL};
56 DECLARE_ALIGNED(8, const uint64_t, ff_pw_20 ) = 0x0014001400140014ULL;
57 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_27 ) = {0x001B001B001B001BULL, 0x001B001B001B001BULL};
58 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_28 ) = {0x001C001C001C001CULL, 0x001C001C001C001CULL};
59 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_32 ) = {0x0020002000200020ULL, 0x0020002000200020ULL};
60 DECLARE_ALIGNED(8, const uint64_t, ff_pw_42 ) = 0x002A002A002A002AULL;
61 DECLARE_ALIGNED(8, const uint64_t, ff_pw_53 ) = 0x0035003500350035ULL;
62 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_63 ) = {0x003F003F003F003FULL, 0x003F003F003F003FULL};
63 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_64 ) = {0x0040004000400040ULL, 0x0040004000400040ULL};
64 DECLARE_ALIGNED(8, const uint64_t, ff_pw_96 ) = 0x0060006000600060ULL;
65 DECLARE_ALIGNED(8, const uint64_t, ff_pw_128) = 0x0080008000800080ULL;
66 DECLARE_ALIGNED(8, const uint64_t, ff_pw_255) = 0x00ff00ff00ff00ffULL;
67 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_512) = {0x0200020002000200ULL, 0x0200020002000200ULL};
68 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_1019)= {0x03FB03FB03FB03FBULL, 0x03FB03FB03FB03FBULL};
69
70 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_0 ) = {0x0000000000000000ULL, 0x0000000000000000ULL};
71 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_1 ) = {0x0101010101010101ULL, 0x0101010101010101ULL};
72 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_3 ) = {0x0303030303030303ULL, 0x0303030303030303ULL};
73 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_4 ) = {0x0404040404040404ULL, 0x0404040404040404ULL};
74 DECLARE_ALIGNED(8, const uint64_t, ff_pb_7 ) = 0x0707070707070707ULL;
75 DECLARE_ALIGNED(8, const uint64_t, ff_pb_1F ) = 0x1F1F1F1F1F1F1F1FULL;
76 DECLARE_ALIGNED(8, const uint64_t, ff_pb_3F ) = 0x3F3F3F3F3F3F3F3FULL;
77 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_80 ) = {0x8080808080808080ULL, 0x8080808080808080ULL};
78 DECLARE_ALIGNED(8, const uint64_t, ff_pb_81 ) = 0x8181818181818181ULL;
79 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_A1 ) = {0xA1A1A1A1A1A1A1A1ULL, 0xA1A1A1A1A1A1A1A1ULL};
80 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_F8 ) = {0xF8F8F8F8F8F8F8F8ULL, 0xF8F8F8F8F8F8F8F8ULL};
81 DECLARE_ALIGNED(8, const uint64_t, ff_pb_FC ) = 0xFCFCFCFCFCFCFCFCULL;
82 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_FE ) = {0xFEFEFEFEFEFEFEFEULL, 0xFEFEFEFEFEFEFEFEULL};
83
84 DECLARE_ALIGNED(16, const double, ff_pd_1)[2] = { 1.0, 1.0 };
85 DECLARE_ALIGNED(16, const double, ff_pd_2)[2] = { 2.0, 2.0 };
86
87 #define JUMPALIGN() __asm__ volatile (".p2align 3"::)
88 #define MOVQ_ZERO(regd) __asm__ volatile ("pxor %%" #regd ", %%" #regd ::)
89
90 #define MOVQ_BFE(regd) \
91 __asm__ volatile ( \
92 "pcmpeqd %%" #regd ", %%" #regd " \n\t"\
93 "paddb %%" #regd ", %%" #regd " \n\t" ::)
94
95 #ifndef PIC
96 #define MOVQ_BONE(regd) __asm__ volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_bone))
97 #define MOVQ_WTWO(regd) __asm__ volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_wtwo))
98 #else
99 // for shared library it's better to use this way for accessing constants
100 // pcmpeqd -> -1
101 #define MOVQ_BONE(regd) \
102 __asm__ volatile ( \
103 "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
104 "psrlw $15, %%" #regd " \n\t" \
105 "packuswb %%" #regd ", %%" #regd " \n\t" ::)
106
107 #define MOVQ_WTWO(regd) \
108 __asm__ volatile ( \
109 "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
110 "psrlw $15, %%" #regd " \n\t" \
111 "psllw $1, %%" #regd " \n\t"::)
112
113 #endif
114
115 // using regr as temporary and for the output result
116 // first argument is unmodifed and second is trashed
117 // regfe is supposed to contain 0xfefefefefefefefe
118 #define PAVGB_MMX_NO_RND(rega, regb, regr, regfe) \
119 "movq " #rega ", " #regr " \n\t"\
120 "pand " #regb ", " #regr " \n\t"\
121 "pxor " #rega ", " #regb " \n\t"\
122 "pand " #regfe "," #regb " \n\t"\
123 "psrlq $1, " #regb " \n\t"\
124 "paddb " #regb ", " #regr " \n\t"
125
126 #define PAVGB_MMX(rega, regb, regr, regfe) \
127 "movq " #rega ", " #regr " \n\t"\
128 "por " #regb ", " #regr " \n\t"\
129 "pxor " #rega ", " #regb " \n\t"\
130 "pand " #regfe "," #regb " \n\t"\
131 "psrlq $1, " #regb " \n\t"\
132 "psubb " #regb ", " #regr " \n\t"
133
134 // mm6 is supposed to contain 0xfefefefefefefefe
135 #define PAVGBP_MMX_NO_RND(rega, regb, regr, regc, regd, regp) \
136 "movq " #rega ", " #regr " \n\t"\
137 "movq " #regc ", " #regp " \n\t"\
138 "pand " #regb ", " #regr " \n\t"\
139 "pand " #regd ", " #regp " \n\t"\
140 "pxor " #rega ", " #regb " \n\t"\
141 "pxor " #regc ", " #regd " \n\t"\
142 "pand %%mm6, " #regb " \n\t"\
143 "pand %%mm6, " #regd " \n\t"\
144 "psrlq $1, " #regb " \n\t"\
145 "psrlq $1, " #regd " \n\t"\
146 "paddb " #regb ", " #regr " \n\t"\
147 "paddb " #regd ", " #regp " \n\t"
148
149 #define PAVGBP_MMX(rega, regb, regr, regc, regd, regp) \
150 "movq " #rega ", " #regr " \n\t"\
151 "movq " #regc ", " #regp " \n\t"\
152 "por " #regb ", " #regr " \n\t"\
153 "por " #regd ", " #regp " \n\t"\
154 "pxor " #rega ", " #regb " \n\t"\
155 "pxor " #regc ", " #regd " \n\t"\
156 "pand %%mm6, " #regb " \n\t"\
157 "pand %%mm6, " #regd " \n\t"\
158 "psrlq $1, " #regd " \n\t"\
159 "psrlq $1, " #regb " \n\t"\
160 "psubb " #regb ", " #regr " \n\t"\
161 "psubb " #regd ", " #regp " \n\t"
162
163 /***********************************/
164 /* MMX no rounding */
165 #define DEF(x, y) x ## _no_rnd_ ## y ##_mmx
166 #define SET_RND MOVQ_WONE
167 #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX_NO_RND(a, b, c, d, e, f)
168 #define PAVGB(a, b, c, e) PAVGB_MMX_NO_RND(a, b, c, e)
169 #define OP_AVG(a, b, c, e) PAVGB_MMX(a, b, c, e)
170
171 #include "dsputil_mmx_rnd_template.c"
172
173 #undef DEF
174 #undef SET_RND
175 #undef PAVGBP
176 #undef PAVGB
177 /***********************************/
178 /* MMX rounding */
179
180 #define DEF(x, y) x ## _ ## y ##_mmx
181 #define SET_RND MOVQ_WTWO
182 #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX(a, b, c, d, e, f)
183 #define PAVGB(a, b, c, e) PAVGB_MMX(a, b, c, e)
184
185 #include "dsputil_mmx_rnd_template.c"
186
187 #undef DEF
188 #undef SET_RND
189 #undef PAVGBP
190 #undef PAVGB
191 #undef OP_AVG
192
193 /***********************************/
194 /* 3Dnow specific */
195
196 #define DEF(x) x ## _3dnow
197 #define PAVGB "pavgusb"
198 #define OP_AVG PAVGB
199
200 #include "dsputil_mmx_avg_template.c"
201
202 #undef DEF
203 #undef PAVGB
204 #undef OP_AVG
205
206 /***********************************/
207 /* MMX2 specific */
208
209 #define DEF(x) x ## _mmx2
210
211 /* Introduced only in MMX2 set */
212 #define PAVGB "pavgb"
213 #define OP_AVG PAVGB
214
215 #include "dsputil_mmx_avg_template.c"
216
217 #undef DEF
218 #undef PAVGB
219 #undef OP_AVG
220
221 #define put_no_rnd_pixels16_mmx put_pixels16_mmx
222 #define put_no_rnd_pixels8_mmx put_pixels8_mmx
223 #define put_pixels16_mmx2 put_pixels16_mmx
224 #define put_pixels8_mmx2 put_pixels8_mmx
225 #define put_pixels4_mmx2 put_pixels4_mmx
226 #define put_no_rnd_pixels16_mmx2 put_no_rnd_pixels16_mmx
227 #define put_no_rnd_pixels8_mmx2 put_no_rnd_pixels8_mmx
228 #define put_pixels16_3dnow put_pixels16_mmx
229 #define put_pixels8_3dnow put_pixels8_mmx
230 #define put_pixels4_3dnow put_pixels4_mmx
231 #define put_no_rnd_pixels16_3dnow put_no_rnd_pixels16_mmx
232 #define put_no_rnd_pixels8_3dnow put_no_rnd_pixels8_mmx
233
234 /***********************************/
235 /* standard MMX */
236
237 void ff_put_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
238 {
239 const DCTELEM *p;
240 uint8_t *pix;
241
242 /* read the pixels */
243 p = block;
244 pix = pixels;
245 /* unrolled loop */
246 __asm__ volatile(
247 "movq %3, %%mm0 \n\t"
248 "movq 8%3, %%mm1 \n\t"
249 "movq 16%3, %%mm2 \n\t"
250 "movq 24%3, %%mm3 \n\t"
251 "movq 32%3, %%mm4 \n\t"
252 "movq 40%3, %%mm5 \n\t"
253 "movq 48%3, %%mm6 \n\t"
254 "movq 56%3, %%mm7 \n\t"
255 "packuswb %%mm1, %%mm0 \n\t"
256 "packuswb %%mm3, %%mm2 \n\t"
257 "packuswb %%mm5, %%mm4 \n\t"
258 "packuswb %%mm7, %%mm6 \n\t"
259 "movq %%mm0, (%0) \n\t"
260 "movq %%mm2, (%0, %1) \n\t"
261 "movq %%mm4, (%0, %1, 2) \n\t"
262 "movq %%mm6, (%0, %2) \n\t"
263 ::"r" (pix), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*3), "m"(*p)
264 :"memory");
265 pix += line_size*4;
266 p += 32;
267
268 // if here would be an exact copy of the code above
269 // compiler would generate some very strange code
270 // thus using "r"
271 __asm__ volatile(
272 "movq (%3), %%mm0 \n\t"
273 "movq 8(%3), %%mm1 \n\t"
274 "movq 16(%3), %%mm2 \n\t"
275 "movq 24(%3), %%mm3 \n\t"
276 "movq 32(%3), %%mm4 \n\t"
277 "movq 40(%3), %%mm5 \n\t"
278 "movq 48(%3), %%mm6 \n\t"
279 "movq 56(%3), %%mm7 \n\t"
280 "packuswb %%mm1, %%mm0 \n\t"
281 "packuswb %%mm3, %%mm2 \n\t"
282 "packuswb %%mm5, %%mm4 \n\t"
283 "packuswb %%mm7, %%mm6 \n\t"
284 "movq %%mm0, (%0) \n\t"
285 "movq %%mm2, (%0, %1) \n\t"
286 "movq %%mm4, (%0, %1, 2) \n\t"
287 "movq %%mm6, (%0, %2) \n\t"
288 ::"r" (pix), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*3), "r"(p)
289 :"memory");
290 }
291
292 #define put_signed_pixels_clamped_mmx_half(off) \
293 "movq "#off"(%2), %%mm1 \n\t"\
294 "movq 16+"#off"(%2), %%mm2 \n\t"\
295 "movq 32+"#off"(%2), %%mm3 \n\t"\
296 "movq 48+"#off"(%2), %%mm4 \n\t"\
297 "packsswb 8+"#off"(%2), %%mm1 \n\t"\
298 "packsswb 24+"#off"(%2), %%mm2 \n\t"\
299 "packsswb 40+"#off"(%2), %%mm3 \n\t"\
300 "packsswb 56+"#off"(%2), %%mm4 \n\t"\
301 "paddb %%mm0, %%mm1 \n\t"\
302 "paddb %%mm0, %%mm2 \n\t"\
303 "paddb %%mm0, %%mm3 \n\t"\
304 "paddb %%mm0, %%mm4 \n\t"\
305 "movq %%mm1, (%0) \n\t"\
306 "movq %%mm2, (%0, %3) \n\t"\
307 "movq %%mm3, (%0, %3, 2) \n\t"\
308 "movq %%mm4, (%0, %1) \n\t"
309
310 void ff_put_signed_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
311 {
312 x86_reg line_skip = line_size;
313 x86_reg line_skip3;
314
315 __asm__ volatile (
316 "movq "MANGLE(ff_pb_80)", %%mm0 \n\t"
317 "lea (%3, %3, 2), %1 \n\t"
318 put_signed_pixels_clamped_mmx_half(0)
319 "lea (%0, %3, 4), %0 \n\t"
320 put_signed_pixels_clamped_mmx_half(64)
321 :"+&r" (pixels), "=&r" (line_skip3)
322 :"r" (block), "r"(line_skip)
323 :"memory");
324 }
325
326 void ff_add_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
327 {
328 const DCTELEM *p;
329 uint8_t *pix;
330 int i;
331
332 /* read the pixels */
333 p = block;
334 pix = pixels;
335 MOVQ_ZERO(mm7);
336 i = 4;
337 do {
338 __asm__ volatile(
339 "movq (%2), %%mm0 \n\t"
340 "movq 8(%2), %%mm1 \n\t"
341 "movq 16(%2), %%mm2 \n\t"
342 "movq 24(%2), %%mm3 \n\t"
343 "movq %0, %%mm4 \n\t"
344 "movq %1, %%mm6 \n\t"
345 "movq %%mm4, %%mm5 \n\t"
346 "punpcklbw %%mm7, %%mm4 \n\t"
347 "punpckhbw %%mm7, %%mm5 \n\t"
348 "paddsw %%mm4, %%mm0 \n\t"
349 "paddsw %%mm5, %%mm1 \n\t"
350 "movq %%mm6, %%mm5 \n\t"
351 "punpcklbw %%mm7, %%mm6 \n\t"
352 "punpckhbw %%mm7, %%mm5 \n\t"
353 "paddsw %%mm6, %%mm2 \n\t"
354 "paddsw %%mm5, %%mm3 \n\t"
355 "packuswb %%mm1, %%mm0 \n\t"
356 "packuswb %%mm3, %%mm2 \n\t"
357 "movq %%mm0, %0 \n\t"
358 "movq %%mm2, %1 \n\t"
359 :"+m"(*pix), "+m"(*(pix+line_size))
360 :"r"(p)
361 :"memory");
362 pix += line_size*2;
363 p += 16;
364 } while (--i);
365 }
366
367 static void put_pixels4_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
368 {
369 __asm__ volatile(
370 "lea (%3, %3), %%"REG_a" \n\t"
371 ".p2align 3 \n\t"
372 "1: \n\t"
373 "movd (%1), %%mm0 \n\t"
374 "movd (%1, %3), %%mm1 \n\t"
375 "movd %%mm0, (%2) \n\t"
376 "movd %%mm1, (%2, %3) \n\t"
377 "add %%"REG_a", %1 \n\t"
378 "add %%"REG_a", %2 \n\t"
379 "movd (%1), %%mm0 \n\t"
380 "movd (%1, %3), %%mm1 \n\t"
381 "movd %%mm0, (%2) \n\t"
382 "movd %%mm1, (%2, %3) \n\t"
383 "add %%"REG_a", %1 \n\t"
384 "add %%"REG_a", %2 \n\t"
385 "subl $4, %0 \n\t"
386 "jnz 1b \n\t"
387 : "+g"(h), "+r" (pixels), "+r" (block)
388 : "r"((x86_reg)line_size)
389 : "%"REG_a, "memory"
390 );
391 }
392
393 static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
394 {
395 __asm__ volatile(
396 "lea (%3, %3), %%"REG_a" \n\t"
397 ".p2align 3 \n\t"
398 "1: \n\t"
399 "movq (%1), %%mm0 \n\t"
400 "movq (%1, %3), %%mm1 \n\t"
401 "movq %%mm0, (%2) \n\t"
402 "movq %%mm1, (%2, %3) \n\t"
403 "add %%"REG_a", %1 \n\t"
404 "add %%"REG_a", %2 \n\t"
405 "movq (%1), %%mm0 \n\t"
406 "movq (%1, %3), %%mm1 \n\t"
407 "movq %%mm0, (%2) \n\t"
408 "movq %%mm1, (%2, %3) \n\t"
409 "add %%"REG_a", %1 \n\t"
410 "add %%"REG_a", %2 \n\t"
411 "subl $4, %0 \n\t"
412 "jnz 1b \n\t"
413 : "+g"(h), "+r" (pixels), "+r" (block)
414 : "r"((x86_reg)line_size)
415 : "%"REG_a, "memory"
416 );
417 }
418
419 static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
420 {
421 __asm__ volatile(
422 "lea (%3, %3), %%"REG_a" \n\t"
423 ".p2align 3 \n\t"
424 "1: \n\t"
425 "movq (%1), %%mm0 \n\t"
426 "movq 8(%1), %%mm4 \n\t"
427 "movq (%1, %3), %%mm1 \n\t"
428 "movq 8(%1, %3), %%mm5 \n\t"
429 "movq %%mm0, (%2) \n\t"
430 "movq %%mm4, 8(%2) \n\t"
431 "movq %%mm1, (%2, %3) \n\t"
432 "movq %%mm5, 8(%2, %3) \n\t"
433 "add %%"REG_a", %1 \n\t"
434 "add %%"REG_a", %2 \n\t"
435 "movq (%1), %%mm0 \n\t"
436 "movq 8(%1), %%mm4 \n\t"
437 "movq (%1, %3), %%mm1 \n\t"
438 "movq 8(%1, %3), %%mm5 \n\t"
439 "movq %%mm0, (%2) \n\t"
440 "movq %%mm4, 8(%2) \n\t"
441 "movq %%mm1, (%2, %3) \n\t"
442 "movq %%mm5, 8(%2, %3) \n\t"
443 "add %%"REG_a", %1 \n\t"
444 "add %%"REG_a", %2 \n\t"
445 "subl $4, %0 \n\t"
446 "jnz 1b \n\t"
447 : "+g"(h), "+r" (pixels), "+r" (block)
448 : "r"((x86_reg)line_size)
449 : "%"REG_a, "memory"
450 );
451 }
452
453 static void put_pixels16_sse2(uint8_t *block, const uint8_t *pixels, int line_size, int h)
454 {
455 __asm__ volatile(
456 "1: \n\t"
457 "movdqu (%1), %%xmm0 \n\t"
458 "movdqu (%1,%3), %%xmm1 \n\t"
459 "movdqu (%1,%3,2), %%xmm2 \n\t"
460 "movdqu (%1,%4), %%xmm3 \n\t"
461 "lea (%1,%3,4), %1 \n\t"
462 "movdqa %%xmm0, (%2) \n\t"
463 "movdqa %%xmm1, (%2,%3) \n\t"
464 "movdqa %%xmm2, (%2,%3,2) \n\t"
465 "movdqa %%xmm3, (%2,%4) \n\t"
466 "subl $4, %0 \n\t"
467 "lea (%2,%3,4), %2 \n\t"
468 "jnz 1b \n\t"
469 : "+g"(h), "+r" (pixels), "+r" (block)
470 : "r"((x86_reg)line_size), "r"((x86_reg)3L*line_size)
471 : "memory"
472 );
473 }
474
475 static void avg_pixels16_sse2(uint8_t *block, const uint8_t *pixels, int line_size, int h)
476 {
477 __asm__ volatile(
478 "1: \n\t"
479 "movdqu (%1), %%xmm0 \n\t"
480 "movdqu (%1,%3), %%xmm1 \n\t"
481 "movdqu (%1,%3,2), %%xmm2 \n\t"
482 "movdqu (%1,%4), %%xmm3 \n\t"
483 "lea (%1,%3,4), %1 \n\t"
484 "pavgb (%2), %%xmm0 \n\t"
485 "pavgb (%2,%3), %%xmm1 \n\t"
486 "pavgb (%2,%3,2), %%xmm2 \n\t"
487 "pavgb (%2,%4), %%xmm3 \n\t"
488 "movdqa %%xmm0, (%2) \n\t"
489 "movdqa %%xmm1, (%2,%3) \n\t"
490 "movdqa %%xmm2, (%2,%3,2) \n\t"
491 "movdqa %%xmm3, (%2,%4) \n\t"
492 "subl $4, %0 \n\t"
493 "lea (%2,%3,4), %2 \n\t"
494 "jnz 1b \n\t"
495 : "+g"(h), "+r" (pixels), "+r" (block)
496 : "r"((x86_reg)line_size), "r"((x86_reg)3L*line_size)
497 : "memory"
498 );
499 }
500
501 #define CLEAR_BLOCKS(name,n) \
502 static void name(DCTELEM *blocks)\
503 {\
504 __asm__ volatile(\
505 "pxor %%mm7, %%mm7 \n\t"\
506 "mov %1, %%"REG_a" \n\t"\
507 "1: \n\t"\
508 "movq %%mm7, (%0, %%"REG_a") \n\t"\
509 "movq %%mm7, 8(%0, %%"REG_a") \n\t"\
510 "movq %%mm7, 16(%0, %%"REG_a") \n\t"\
511 "movq %%mm7, 24(%0, %%"REG_a") \n\t"\
512 "add $32, %%"REG_a" \n\t"\
513 " js 1b \n\t"\
514 : : "r" (((uint8_t *)blocks)+128*n),\
515 "i" (-128*n)\
516 : "%"REG_a\
517 );\
518 }
519 CLEAR_BLOCKS(clear_blocks_mmx, 6)
520 CLEAR_BLOCKS(clear_block_mmx, 1)
521
522 static void clear_block_sse(DCTELEM *block)
523 {
524 __asm__ volatile(
525 "xorps %%xmm0, %%xmm0 \n"
526 "movaps %%xmm0, (%0) \n"
527 "movaps %%xmm0, 16(%0) \n"
528 "movaps %%xmm0, 32(%0) \n"
529 "movaps %%xmm0, 48(%0) \n"
530 "movaps %%xmm0, 64(%0) \n"
531 "movaps %%xmm0, 80(%0) \n"
532 "movaps %%xmm0, 96(%0) \n"
533 "movaps %%xmm0, 112(%0) \n"
534 :: "r"(block)
535 : "memory"
536 );
537 }
538
539 static void clear_blocks_sse(DCTELEM *blocks)
540 {\
541 __asm__ volatile(
542 "xorps %%xmm0, %%xmm0 \n"
543 "mov %1, %%"REG_a" \n"
544 "1: \n"
545 "movaps %%xmm0, (%0, %%"REG_a") \n"
546 "movaps %%xmm0, 16(%0, %%"REG_a") \n"
547 "movaps %%xmm0, 32(%0, %%"REG_a") \n"
548 "movaps %%xmm0, 48(%0, %%"REG_a") \n"
549 "movaps %%xmm0, 64(%0, %%"REG_a") \n"
550 "movaps %%xmm0, 80(%0, %%"REG_a") \n"
551 "movaps %%xmm0, 96(%0, %%"REG_a") \n"
552 "movaps %%xmm0, 112(%0, %%"REG_a") \n"
553 "add $128, %%"REG_a" \n"
554 " js 1b \n"
555 : : "r" (((uint8_t *)blocks)+128*6),
556 "i" (-128*6)
557 : "%"REG_a
558 );
559 }
560
561 static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w){
562 x86_reg i=0;
563 __asm__ volatile(
564 "jmp 2f \n\t"
565 "1: \n\t"
566 "movq (%1, %0), %%mm0 \n\t"
567 "movq (%2, %0), %%mm1 \n\t"
568 "paddb %%mm0, %%mm1 \n\t"
569 "movq %%mm1, (%2, %0) \n\t"
570 "movq 8(%1, %0), %%mm0 \n\t"
571 "movq 8(%2, %0), %%mm1 \n\t"
572 "paddb %%mm0, %%mm1 \n\t"
573 "movq %%mm1, 8(%2, %0) \n\t"
574 "add $16, %0 \n\t"
575 "2: \n\t"
576 "cmp %3, %0 \n\t"
577 " js 1b \n\t"
578 : "+r" (i)
579 : "r"(src), "r"(dst), "r"((x86_reg)w-15)
580 );
581 for(; i<w; i++)
582 dst[i+0] += src[i+0];
583 }
584
585 #if HAVE_7REGS
586 static void add_hfyu_median_prediction_cmov(uint8_t *dst, const uint8_t *top, const uint8_t *diff, int w, int *left, int *left_top) {
587 x86_reg w2 = -w;
588 x86_reg x;
589 int l = *left & 0xff;
590 int tl = *left_top & 0xff;
591 int t;
592 __asm__ volatile(
593 "mov %7, %3 \n"
594 "1: \n"
595 "movzbl (%3,%4), %2 \n"
596 "mov %2, %k3 \n"
597 "sub %b1, %b3 \n"
598 "add %b0, %b3 \n"
599 "mov %2, %1 \n"
600 "cmp %0, %2 \n"
601 "cmovg %0, %2 \n"
602 "cmovg %1, %0 \n"
603 "cmp %k3, %0 \n"
604 "cmovg %k3, %0 \n"
605 "mov %7, %3 \n"
606 "cmp %2, %0 \n"
607 "cmovl %2, %0 \n"
608 "add (%6,%4), %b0 \n"
609 "mov %b0, (%5,%4) \n"
610 "inc %4 \n"
611 "jl 1b \n"
612 :"+&q"(l), "+&q"(tl), "=&r"(t), "=&q"(x), "+&r"(w2)
613 :"r"(dst+w), "r"(diff+w), "rm"(top+w)
614 );
615 *left = l;
616 *left_top = tl;
617 }
618 #endif
619
620 #define H263_LOOP_FILTER \
621 "pxor %%mm7, %%mm7 \n\t"\
622 "movq %0, %%mm0 \n\t"\
623 "movq %0, %%mm1 \n\t"\
624 "movq %3, %%mm2 \n\t"\
625 "movq %3, %%mm3 \n\t"\
626 "punpcklbw %%mm7, %%mm0 \n\t"\
627 "punpckhbw %%mm7, %%mm1 \n\t"\
628 "punpcklbw %%mm7, %%mm2 \n\t"\
629 "punpckhbw %%mm7, %%mm3 \n\t"\
630 "psubw %%mm2, %%mm0 \n\t"\
631 "psubw %%mm3, %%mm1 \n\t"\
632 "movq %1, %%mm2 \n\t"\
633 "movq %1, %%mm3 \n\t"\
634 "movq %2, %%mm4 \n\t"\
635 "movq %2, %%mm5 \n\t"\
636 "punpcklbw %%mm7, %%mm2 \n\t"\
637 "punpckhbw %%mm7, %%mm3 \n\t"\
638 "punpcklbw %%mm7, %%mm4 \n\t"\
639 "punpckhbw %%mm7, %%mm5 \n\t"\
640 "psubw %%mm2, %%mm4 \n\t"\
641 "psubw %%mm3, %%mm5 \n\t"\
642 "psllw $2, %%mm4 \n\t"\
643 "psllw $2, %%mm5 \n\t"\
644 "paddw %%mm0, %%mm4 \n\t"\
645 "paddw %%mm1, %%mm5 \n\t"\
646 "pxor %%mm6, %%mm6 \n\t"\
647 "pcmpgtw %%mm4, %%mm6 \n\t"\
648 "pcmpgtw %%mm5, %%mm7 \n\t"\
649 "pxor %%mm6, %%mm4 \n\t"\
650 "pxor %%mm7, %%mm5 \n\t"\
651 "psubw %%mm6, %%mm4 \n\t"\
652 "psubw %%mm7, %%mm5 \n\t"\
653 "psrlw $3, %%mm4 \n\t"\
654 "psrlw $3, %%mm5 \n\t"\
655 "packuswb %%mm5, %%mm4 \n\t"\
656 "packsswb %%mm7, %%mm6 \n\t"\
657 "pxor %%mm7, %%mm7 \n\t"\
658 "movd %4, %%mm2 \n\t"\
659 "punpcklbw %%mm2, %%mm2 \n\t"\
660 "punpcklbw %%mm2, %%mm2 \n\t"\
661 "punpcklbw %%mm2, %%mm2 \n\t"\
662 "psubusb %%mm4, %%mm2 \n\t"\
663 "movq %%mm2, %%mm3 \n\t"\
664 "psubusb %%mm4, %%mm3 \n\t"\
665 "psubb %%mm3, %%mm2 \n\t"\
666 "movq %1, %%mm3 \n\t"\
667 "movq %2, %%mm4 \n\t"\
668 "pxor %%mm6, %%mm3 \n\t"\
669 "pxor %%mm6, %%mm4 \n\t"\
670 "paddusb %%mm2, %%mm3 \n\t"\
671 "psubusb %%mm2, %%mm4 \n\t"\
672 "pxor %%mm6, %%mm3 \n\t"\
673 "pxor %%mm6, %%mm4 \n\t"\
674 "paddusb %%mm2, %%mm2 \n\t"\
675 "packsswb %%mm1, %%mm0 \n\t"\
676 "pcmpgtb %%mm0, %%mm7 \n\t"\
677 "pxor %%mm7, %%mm0 \n\t"\
678 "psubb %%mm7, %%mm0 \n\t"\
679 "movq %%mm0, %%mm1 \n\t"\
680 "psubusb %%mm2, %%mm0 \n\t"\
681 "psubb %%mm0, %%mm1 \n\t"\
682 "pand %5, %%mm1 \n\t"\
683 "psrlw $2, %%mm1 \n\t"\
684 "pxor %%mm7, %%mm1 \n\t"\
685 "psubb %%mm7, %%mm1 \n\t"\
686 "movq %0, %%mm5 \n\t"\
687 "movq %3, %%mm6 \n\t"\
688 "psubb %%mm1, %%mm5 \n\t"\
689 "paddb %%mm1, %%mm6 \n\t"
690
691 static void h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale){
692 if(CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
693 const int strength= ff_h263_loop_filter_strength[qscale];
694
695 __asm__ volatile(
696
697 H263_LOOP_FILTER
698
699 "movq %%mm3, %1 \n\t"
700 "movq %%mm4, %2 \n\t"
701 "movq %%mm5, %0 \n\t"
702 "movq %%mm6, %3 \n\t"
703 : "+m" (*(uint64_t*)(src - 2*stride)),
704 "+m" (*(uint64_t*)(src - 1*stride)),
705 "+m" (*(uint64_t*)(src + 0*stride)),
706 "+m" (*(uint64_t*)(src + 1*stride))
707 : "g" (2*strength), "m"(ff_pb_FC)
708 );
709 }
710 }
711
712 static void h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale){
713 if(CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
714 const int strength= ff_h263_loop_filter_strength[qscale];
715 DECLARE_ALIGNED(8, uint64_t, temp)[4];
716 uint8_t *btemp= (uint8_t*)temp;
717
718 src -= 2;
719
720 transpose4x4(btemp , src , 8, stride);
721 transpose4x4(btemp+4, src + 4*stride, 8, stride);
722 __asm__ volatile(
723 H263_LOOP_FILTER // 5 3 4 6
724
725 : "+m" (temp[0]),
726 "+m" (temp[1]),
727 "+m" (temp[2]),
728 "+m" (temp[3])
729 : "g" (2*strength), "m"(ff_pb_FC)
730 );
731
732 __asm__ volatile(
733 "movq %%mm5, %%mm1 \n\t"
734 "movq %%mm4, %%mm0 \n\t"
735 "punpcklbw %%mm3, %%mm5 \n\t"
736 "punpcklbw %%mm6, %%mm4 \n\t"
737 "punpckhbw %%mm3, %%mm1 \n\t"
738 "punpckhbw %%mm6, %%mm0 \n\t"
739 "movq %%mm5, %%mm3 \n\t"
740 "movq %%mm1, %%mm6 \n\t"
741 "punpcklwd %%mm4, %%mm5 \n\t"
742 "punpcklwd %%mm0, %%mm1 \n\t"
743 "punpckhwd %%mm4, %%mm3 \n\t"
744 "punpckhwd %%mm0, %%mm6 \n\t"
745 "movd %%mm5, (%0) \n\t"
746 "punpckhdq %%mm5, %%mm5 \n\t"
747 "movd %%mm5, (%0,%2) \n\t"
748 "movd %%mm3, (%0,%2,2) \n\t"
749 "punpckhdq %%mm3, %%mm3 \n\t"
750 "movd %%mm3, (%0,%3) \n\t"
751 "movd %%mm1, (%1) \n\t"
752 "punpckhdq %%mm1, %%mm1 \n\t"
753 "movd %%mm1, (%1,%2) \n\t"
754 "movd %%mm6, (%1,%2,2) \n\t"
755 "punpckhdq %%mm6, %%mm6 \n\t"
756 "movd %%mm6, (%1,%3) \n\t"
757 :: "r" (src),
758 "r" (src + 4*stride),
759 "r" ((x86_reg) stride ),
760 "r" ((x86_reg)(3*stride))
761 );
762 }
763 }
764
765 /* draw the edges of width 'w' of an image of size width, height
766 this mmx version can only handle w==8 || w==16 */
767 static void draw_edges_mmx(uint8_t *buf, int wrap, int width, int height, int w, int h, int sides)
768 {
769 uint8_t *ptr, *last_line;
770 int i;
771
772 last_line = buf + (height - 1) * wrap;
773 /* left and right */
774 ptr = buf;
775 if(w==8)
776 {
777 __asm__ volatile(
778 "1: \n\t"
779 "movd (%0), %%mm0 \n\t"
780 "punpcklbw %%mm0, %%mm0 \n\t"
781 "punpcklwd %%mm0, %%mm0 \n\t"
782 "punpckldq %%mm0, %%mm0 \n\t"
783 "movq %%mm0, -8(%0) \n\t"
784 "movq -8(%0, %2), %%mm1 \n\t"
785 "punpckhbw %%mm1, %%mm1 \n\t"
786 "punpckhwd %%mm1, %%mm1 \n\t"
787 "punpckhdq %%mm1, %%mm1 \n\t"
788 "movq %%mm1, (%0, %2) \n\t"
789 "add %1, %0 \n\t"
790 "cmp %3, %0 \n\t"
791 " jb 1b \n\t"
792 : "+r" (ptr)
793 : "r" ((x86_reg)wrap), "r" ((x86_reg)width), "r" (ptr + wrap*height)
794 );
795 }
796 else
797 {
798 __asm__ volatile(
799 "1: \n\t"
800 "movd (%0), %%mm0 \n\t"
801 "punpcklbw %%mm0, %%mm0 \n\t"
802 "punpcklwd %%mm0, %%mm0 \n\t"
803 "punpckldq %%mm0, %%mm0 \n\t"
804 "movq %%mm0, -8(%0) \n\t"
805 "movq %%mm0, -16(%0) \n\t"
806 "movq -8(%0, %2), %%mm1 \n\t"
807 "punpckhbw %%mm1, %%mm1 \n\t"
808 "punpckhwd %%mm1, %%mm1 \n\t"
809 "punpckhdq %%mm1, %%mm1 \n\t"
810 "movq %%mm1, (%0, %2) \n\t"
811 "movq %%mm1, 8(%0, %2) \n\t"
812 "add %1, %0 \n\t"
813 "cmp %3, %0 \n\t"
814 " jb 1b \n\t"
815 : "+r" (ptr)
816 : "r" ((x86_reg)wrap), "r" ((x86_reg)width), "r" (ptr + wrap*height)
817 );
818 }
819
820 /* top and bottom (and hopefully also the corners) */
821 if (sides&EDGE_TOP) {
822 for(i = 0; i < h; i += 4) {
823 ptr= buf - (i + 1) * wrap - w;
824 __asm__ volatile(
825 "1: \n\t"
826 "movq (%1, %0), %%mm0 \n\t"
827 "movq %%mm0, (%0) \n\t"
828 "movq %%mm0, (%0, %2) \n\t"
829 "movq %%mm0, (%0, %2, 2) \n\t"
830 "movq %%mm0, (%0, %3) \n\t"
831 "add $8, %0 \n\t"
832 "cmp %4, %0 \n\t"
833 " jb 1b \n\t"
834 : "+r" (ptr)
835 : "r" ((x86_reg)buf - (x86_reg)ptr - w), "r" ((x86_reg)-wrap), "r" ((x86_reg)-wrap*3), "r" (ptr+width+2*w)
836 );
837 }
838 }
839
840 if (sides&EDGE_BOTTOM) {
841 for(i = 0; i < h; i += 4) {
842 ptr= last_line + (i + 1) * wrap - w;
843 __asm__ volatile(
844 "1: \n\t"
845 "movq (%1, %0), %%mm0 \n\t"
846 "movq %%mm0, (%0) \n\t"
847 "movq %%mm0, (%0, %2) \n\t"
848 "movq %%mm0, (%0, %2, 2) \n\t"
849 "movq %%mm0, (%0, %3) \n\t"
850 "add $8, %0 \n\t"
851 "cmp %4, %0 \n\t"
852 " jb 1b \n\t"
853 : "+r" (ptr)
854 : "r" ((x86_reg)last_line - (x86_reg)ptr - w), "r" ((x86_reg)wrap), "r" ((x86_reg)wrap*3), "r" (ptr+width+2*w)
855 );
856 }
857 }
858 }
859
860 #define QPEL_V_LOW(m3,m4,m5,m6, pw_20, pw_3, rnd, in0, in1, in2, in7, out, OP)\
861 "paddw " #m4 ", " #m3 " \n\t" /* x1 */\
862 "movq "MANGLE(ff_pw_20)", %%mm4 \n\t" /* 20 */\
863 "pmullw " #m3 ", %%mm4 \n\t" /* 20x1 */\
864 "movq "#in7", " #m3 " \n\t" /* d */\
865 "movq "#in0", %%mm5 \n\t" /* D */\
866 "paddw " #m3 ", %%mm5 \n\t" /* x4 */\
867 "psubw %%mm5, %%mm4 \n\t" /* 20x1 - x4 */\
868 "movq "#in1", %%mm5 \n\t" /* C */\
869 "movq "#in2", %%mm6 \n\t" /* B */\
870 "paddw " #m6 ", %%mm5 \n\t" /* x3 */\
871 "paddw " #m5 ", %%mm6 \n\t" /* x2 */\
872 "paddw %%mm6, %%mm6 \n\t" /* 2x2 */\
873 "psubw %%mm6, %%mm5 \n\t" /* -2x2 + x3 */\
874 "pmullw "MANGLE(ff_pw_3)", %%mm5 \n\t" /* -6x2 + 3x3 */\
875 "paddw " #rnd ", %%mm4 \n\t" /* x2 */\
876 "paddw %%mm4, %%mm5 \n\t" /* 20x1 - 6x2 + 3x3 - x4 */\
877 "psraw $5, %%mm5 \n\t"\
878 "packuswb %%mm5, %%mm5 \n\t"\
879 OP(%%mm5, out, %%mm7, d)
880
881 #define QPEL_BASE(OPNAME, ROUNDER, RND, OP_MMX2, OP_3DNOW)\
882 static void OPNAME ## mpeg4_qpel16_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
883 uint64_t temp;\
884 \
885 __asm__ volatile(\
886 "pxor %%mm7, %%mm7 \n\t"\
887 "1: \n\t"\
888 "movq (%0), %%mm0 \n\t" /* ABCDEFGH */\
889 "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\
890 "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\
891 "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\
892 "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\
893 "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\
894 "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\
895 "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\
896 "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\
897 "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\
898 "psllq $16, %%mm3 \n\t" /* 00ABCDEF */\
899 "psllq $24, %%mm4 \n\t" /* 000ABCDE */\
900 "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\
901 "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\
902 "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\
903 "paddw %%mm3, %%mm5 \n\t" /* b */\
904 "paddw %%mm2, %%mm6 \n\t" /* c */\
905 "paddw %%mm5, %%mm5 \n\t" /* 2b */\
906 "psubw %%mm5, %%mm6 \n\t" /* c - 2b */\
907 "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\
908 "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\
909 "paddw %%mm4, %%mm0 \n\t" /* a */\
910 "paddw %%mm1, %%mm5 \n\t" /* d */\
911 "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\
912 "psubw %%mm5, %%mm0 \n\t" /* 20a - d */\
913 "paddw %6, %%mm6 \n\t"\
914 "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
915 "psraw $5, %%mm0 \n\t"\
916 "movq %%mm0, %5 \n\t"\
917 /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
918 \
919 "movq 5(%0), %%mm0 \n\t" /* FGHIJKLM */\
920 "movq %%mm0, %%mm5 \n\t" /* FGHIJKLM */\
921 "movq %%mm0, %%mm6 \n\t" /* FGHIJKLM */\
922 "psrlq $8, %%mm0 \n\t" /* GHIJKLM0 */\
923 "psrlq $16, %%mm5 \n\t" /* HIJKLM00 */\
924 "punpcklbw %%mm7, %%mm0 \n\t" /* 0G0H0I0J */\
925 "punpcklbw %%mm7, %%mm5 \n\t" /* 0H0I0J0K */\
926 "paddw %%mm0, %%mm2 \n\t" /* b */\
927 "paddw %%mm5, %%mm3 \n\t" /* c */\
928 "paddw %%mm2, %%mm2 \n\t" /* 2b */\
929 "psubw %%mm2, %%mm3 \n\t" /* c - 2b */\
930 "movq %%mm6, %%mm2 \n\t" /* FGHIJKLM */\
931 "psrlq $24, %%mm6 \n\t" /* IJKLM000 */\
932 "punpcklbw %%mm7, %%mm2 \n\t" /* 0F0G0H0I */\
933 "punpcklbw %%mm7, %%mm6 \n\t" /* 0I0J0K0L */\
934 "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\
935 "paddw %%mm2, %%mm1 \n\t" /* a */\
936 "paddw %%mm6, %%mm4 \n\t" /* d */\
937 "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\
938 "psubw %%mm4, %%mm3 \n\t" /* - 6b +3c - d */\
939 "paddw %6, %%mm1 \n\t"\
940 "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b +3c - d */\
941 "psraw $5, %%mm3 \n\t"\
942 "movq %5, %%mm1 \n\t"\
943 "packuswb %%mm3, %%mm1 \n\t"\
944 OP_MMX2(%%mm1, (%1),%%mm4, q)\
945 /* mm0= GHIJ, mm2=FGHI, mm5=HIJK, mm6=IJKL, mm7=0 */\
946 \
947 "movq 9(%0), %%mm1 \n\t" /* JKLMNOPQ */\
948 "movq %%mm1, %%mm4 \n\t" /* JKLMNOPQ */\
949 "movq %%mm1, %%mm3 \n\t" /* JKLMNOPQ */\
950 "psrlq $8, %%mm1 \n\t" /* KLMNOPQ0 */\
951 "psrlq $16, %%mm4 \n\t" /* LMNOPQ00 */\
952 "punpcklbw %%mm7, %%mm1 \n\t" /* 0K0L0M0N */\
953 "punpcklbw %%mm7, %%mm4 \n\t" /* 0L0M0N0O */\
954 "paddw %%mm1, %%mm5 \n\t" /* b */\
955 "paddw %%mm4, %%mm0 \n\t" /* c */\
956 "paddw %%mm5, %%mm5 \n\t" /* 2b */\
957 "psubw %%mm5, %%mm0 \n\t" /* c - 2b */\
958 "movq %%mm3, %%mm5 \n\t" /* JKLMNOPQ */\
959 "psrlq $24, %%mm3 \n\t" /* MNOPQ000 */\
960 "pmullw "MANGLE(ff_pw_3)", %%mm0 \n\t" /* 3c - 6b */\
961 "punpcklbw %%mm7, %%mm3 \n\t" /* 0M0N0O0P */\
962 "paddw %%mm3, %%mm2 \n\t" /* d */\
963 "psubw %%mm2, %%mm0 \n\t" /* -6b + 3c - d */\
964 "movq %%mm5, %%mm2 \n\t" /* JKLMNOPQ */\
965 "punpcklbw %%mm7, %%mm2 \n\t" /* 0J0K0L0M */\
966 "punpckhbw %%mm7, %%mm5 \n\t" /* 0N0O0P0Q */\
967 "paddw %%mm2, %%mm6 \n\t" /* a */\
968 "pmullw "MANGLE(ff_pw_20)", %%mm6 \n\t" /* 20a */\
969 "paddw %6, %%mm0 \n\t"\
970 "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
971 "psraw $5, %%mm0 \n\t"\
972 /* mm1=KLMN, mm2=JKLM, mm3=MNOP, mm4=LMNO, mm5=NOPQ mm7=0 */\
973 \
974 "paddw %%mm5, %%mm3 \n\t" /* a */\
975 "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0O0P0Q0Q */\
976 "paddw %%mm4, %%mm6 \n\t" /* b */\
977 "pshufw $0xBE, %%mm5, %%mm4 \n\t" /* 0P0Q0Q0P */\
978 "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0Q0Q0P0O */\
979 "paddw %%mm1, %%mm4 \n\t" /* c */\
980 "paddw %%mm2, %%mm5 \n\t" /* d */\
981 "paddw %%mm6, %%mm6 \n\t" /* 2b */\
982 "psubw %%mm6, %%mm4 \n\t" /* c - 2b */\
983 "pmullw "MANGLE(ff_pw_20)", %%mm3 \n\t" /* 20a */\
984 "pmullw "MANGLE(ff_pw_3)", %%mm4 \n\t" /* 3c - 6b */\
985 "psubw %%mm5, %%mm3 \n\t" /* -6b + 3c - d */\
986 "paddw %6, %%mm4 \n\t"\
987 "paddw %%mm3, %%mm4 \n\t" /* 20a - 6b + 3c - d */\
988 "psraw $5, %%mm4 \n\t"\
989 "packuswb %%mm4, %%mm0 \n\t"\
990 OP_MMX2(%%mm0, 8(%1), %%mm4, q)\
991 \
992 "add %3, %0 \n\t"\
993 "add %4, %1 \n\t"\
994 "decl %2 \n\t"\
995 " jnz 1b \n\t"\
996 : "+a"(src), "+c"(dst), "+D"(h)\
997 : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(temp), "m"(ROUNDER)\
998 : "memory"\
999 );\
1000 }\
1001 \
1002 static void OPNAME ## mpeg4_qpel16_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1003 int i;\
1004 int16_t temp[16];\
1005 /* quick HACK, XXX FIXME MUST be optimized */\
1006 for(i=0; i<h; i++)\
1007 {\
1008 temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
1009 temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
1010 temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
1011 temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
1012 temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
1013 temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 9]);\
1014 temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 9])*3 - (src[ 3]+src[10]);\
1015 temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 9])*6 + (src[ 5]+src[10])*3 - (src[ 4]+src[11]);\
1016 temp[ 8]= (src[ 8]+src[ 9])*20 - (src[ 7]+src[10])*6 + (src[ 6]+src[11])*3 - (src[ 5]+src[12]);\
1017 temp[ 9]= (src[ 9]+src[10])*20 - (src[ 8]+src[11])*6 + (src[ 7]+src[12])*3 - (src[ 6]+src[13]);\
1018 temp[10]= (src[10]+src[11])*20 - (src[ 9]+src[12])*6 + (src[ 8]+src[13])*3 - (src[ 7]+src[14]);\
1019 temp[11]= (src[11]+src[12])*20 - (src[10]+src[13])*6 + (src[ 9]+src[14])*3 - (src[ 8]+src[15]);\
1020 temp[12]= (src[12]+src[13])*20 - (src[11]+src[14])*6 + (src[10]+src[15])*3 - (src[ 9]+src[16]);\
1021 temp[13]= (src[13]+src[14])*20 - (src[12]+src[15])*6 + (src[11]+src[16])*3 - (src[10]+src[16]);\
1022 temp[14]= (src[14]+src[15])*20 - (src[13]+src[16])*6 + (src[12]+src[16])*3 - (src[11]+src[15]);\
1023 temp[15]= (src[15]+src[16])*20 - (src[14]+src[16])*6 + (src[13]+src[15])*3 - (src[12]+src[14]);\
1024 __asm__ volatile(\
1025 "movq (%0), %%mm0 \n\t"\
1026 "movq 8(%0), %%mm1 \n\t"\
1027 "paddw %2, %%mm0 \n\t"\
1028 "paddw %2, %%mm1 \n\t"\
1029 "psraw $5, %%mm0 \n\t"\
1030 "psraw $5, %%mm1 \n\t"\
1031 "packuswb %%mm1, %%mm0 \n\t"\
1032 OP_3DNOW(%%mm0, (%1), %%mm1, q)\
1033 "movq 16(%0), %%mm0 \n\t"\
1034 "movq 24(%0), %%mm1 \n\t"\
1035 "paddw %2, %%mm0 \n\t"\
1036 "paddw %2, %%mm1 \n\t"\
1037 "psraw $5, %%mm0 \n\t"\
1038 "psraw $5, %%mm1 \n\t"\
1039 "packuswb %%mm1, %%mm0 \n\t"\
1040 OP_3DNOW(%%mm0, 8(%1), %%mm1, q)\
1041 :: "r"(temp), "r"(dst), "m"(ROUNDER)\
1042 : "memory"\
1043 );\
1044 dst+=dstStride;\
1045 src+=srcStride;\
1046 }\
1047 }\
1048 \
1049 static void OPNAME ## mpeg4_qpel8_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1050 __asm__ volatile(\
1051 "pxor %%mm7, %%mm7 \n\t"\
1052 "1: \n\t"\
1053 "movq (%0), %%mm0 \n\t" /* ABCDEFGH */\
1054 "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\
1055 "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\
1056 "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\
1057 "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\
1058 "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\
1059 "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\
1060 "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\
1061 "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\
1062 "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\
1063 "psllq $16, %%mm3 \n\t" /* 00ABCDEF */\
1064 "psllq $24, %%mm4 \n\t" /* 000ABCDE */\
1065 "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\
1066 "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\
1067 "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\
1068 "paddw %%mm3, %%mm5 \n\t" /* b */\
1069 "paddw %%mm2, %%mm6 \n\t" /* c */\
1070 "paddw %%mm5, %%mm5 \n\t" /* 2b */\
1071 "psubw %%mm5, %%mm6 \n\t" /* c - 2b */\
1072 "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\
1073 "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\
1074 "paddw %%mm4, %%mm0 \n\t" /* a */\
1075 "paddw %%mm1, %%mm5 \n\t" /* d */\
1076 "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\
1077 "psubw %%mm5, %%mm0 \n\t" /* 20a - d */\
1078 "paddw %5, %%mm6 \n\t"\
1079 "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
1080 "psraw $5, %%mm0 \n\t"\
1081 /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
1082 \
1083 "movd 5(%0), %%mm5 \n\t" /* FGHI */\
1084 "punpcklbw %%mm7, %%mm5 \n\t" /* 0F0G0H0I */\
1085 "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0G0H0I0I */\
1086 "paddw %%mm5, %%mm1 \n\t" /* a */\
1087 "paddw %%mm6, %%mm2 \n\t" /* b */\
1088 "pshufw $0xBE, %%mm5, %%mm6 \n\t" /* 0H0I0I0H */\
1089 "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0I0I0H0G */\
1090 "paddw %%mm6, %%mm3 \n\t" /* c */\
1091 "paddw %%mm5, %%mm4 \n\t" /* d */\
1092 "paddw %%mm2, %%mm2 \n\t" /* 2b */\
1093 "psubw %%mm2, %%mm3 \n\t" /* c - 2b */\
1094 "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\
1095 "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\
1096 "psubw %%mm4, %%mm3 \n\t" /* -6b + 3c - d */\
1097 "paddw %5, %%mm1 \n\t"\
1098 "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b + 3c - d */\
1099 "psraw $5, %%mm3 \n\t"\
1100 "packuswb %%mm3, %%mm0 \n\t"\
1101 OP_MMX2(%%mm0, (%1), %%mm4, q)\
1102 \
1103 "add %3, %0 \n\t"\
1104 "add %4, %1 \n\t"\
1105 "decl %2 \n\t"\
1106 " jnz 1b \n\t"\
1107 : "+a"(src), "+c"(dst), "+d"(h)\
1108 : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER)\
1109 : "memory"\
1110 );\
1111 }\
1112 \
1113 static void OPNAME ## mpeg4_qpel8_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1114 int i;\
1115 int16_t temp[8];\
1116 /* quick HACK, XXX FIXME MUST be optimized */\
1117 for(i=0; i<h; i++)\
1118 {\
1119 temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
1120 temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
1121 temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
1122 temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
1123 temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
1124 temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 8]);\
1125 temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 8])*3 - (src[ 3]+src[ 7]);\
1126 temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 8])*6 + (src[ 5]+src[ 7])*3 - (src[ 4]+src[ 6]);\
1127 __asm__ volatile(\
1128 "movq (%0), %%mm0 \n\t"\
1129 "movq 8(%0), %%mm1 \n\t"\
1130 "paddw %2, %%mm0 \n\t"\
1131 "paddw %2, %%mm1 \n\t"\
1132 "psraw $5, %%mm0 \n\t"\
1133 "psraw $5, %%mm1 \n\t"\
1134 "packuswb %%mm1, %%mm0 \n\t"\
1135 OP_3DNOW(%%mm0, (%1), %%mm1, q)\
1136 :: "r"(temp), "r"(dst), "m"(ROUNDER)\
1137 :"memory"\
1138 );\
1139 dst+=dstStride;\
1140 src+=srcStride;\
1141 }\
1142 }
1143
1144 #define QPEL_OP(OPNAME, ROUNDER, RND, OP, MMX)\
1145 \
1146 static void OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1147 uint64_t temp[17*4];\
1148 uint64_t *temp_ptr= temp;\
1149 int count= 17;\
1150 \
1151 /*FIXME unroll */\
1152 __asm__ volatile(\
1153 "pxor %%mm7, %%mm7 \n\t"\
1154 "1: \n\t"\
1155 "movq (%0), %%mm0 \n\t"\
1156 "movq (%0), %%mm1 \n\t"\
1157 "movq 8(%0), %%mm2 \n\t"\
1158 "movq 8(%0), %%mm3 \n\t"\
1159 "punpcklbw %%mm7, %%mm0 \n\t"\
1160 "punpckhbw %%mm7, %%mm1 \n\t"\
1161 "punpcklbw %%mm7, %%mm2 \n\t"\
1162 "punpckhbw %%mm7, %%mm3 \n\t"\
1163 "movq %%mm0, (%1) \n\t"\
1164 "movq %%mm1, 17*8(%1) \n\t"\
1165 "movq %%mm2, 2*17*8(%1) \n\t"\
1166 "movq %%mm3, 3*17*8(%1) \n\t"\
1167 "add $8, %1 \n\t"\
1168 "add %3, %0 \n\t"\
1169 "decl %2 \n\t"\
1170 " jnz 1b \n\t"\
1171 : "+r" (src), "+r" (temp_ptr), "+r"(count)\
1172 : "r" ((x86_reg)srcStride)\
1173 : "memory"\
1174 );\
1175 \
1176 temp_ptr= temp;\
1177 count=4;\
1178 \
1179 /*FIXME reorder for speed */\
1180 __asm__ volatile(\
1181 /*"pxor %%mm7, %%mm7 \n\t"*/\
1182 "1: \n\t"\
1183 "movq (%0), %%mm0 \n\t"\
1184 "movq 8(%0), %%mm1 \n\t"\
1185 "movq 16(%0), %%mm2 \n\t"\
1186 "movq 24(%0), %%mm3 \n\t"\
1187 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\
1188 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\
1189 "add %4, %1 \n\t"\
1190 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\
1191 \
1192 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\
1193 "add %4, %1 \n\t"\
1194 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
1195 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 72(%0), (%1, %3), OP)\
1196 "add %4, %1 \n\t"\
1197 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 80(%0), (%1), OP)\
1198 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 88(%0), (%1, %3), OP)\
1199 "add %4, %1 \n\t"\
1200 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 40(%0), 48(%0), 56(%0), 96(%0), (%1), OP)\
1201 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 48(%0), 56(%0), 64(%0),104(%0), (%1, %3), OP)\
1202 "add %4, %1 \n\t"\
1203 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 56(%0), 64(%0), 72(%0),112(%0), (%1), OP)\
1204 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 64(%0), 72(%0), 80(%0),120(%0), (%1, %3), OP)\
1205 "add %4, %1 \n\t"\
1206 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 72(%0), 80(%0), 88(%0),128(%0), (%1), OP)\
1207 \
1208 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 80(%0), 88(%0), 96(%0),128(%0), (%1, %3), OP)\
1209 "add %4, %1 \n\t" \
1210 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 88(%0), 96(%0),104(%0),120(%0), (%1), OP)\
1211 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 96(%0),104(%0),112(%0),112(%0), (%1, %3), OP)\
1212 \
1213 "add $136, %0 \n\t"\
1214 "add %6, %1 \n\t"\
1215 "decl %2 \n\t"\
1216 " jnz 1b \n\t"\
1217 \
1218 : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
1219 : "r"((x86_reg)dstStride), "r"(2*(x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-14*(x86_reg)dstStride)\
1220 :"memory"\
1221 );\
1222 }\
1223 \
1224 static void OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1225 uint64_t temp[9*2];\
1226 uint64_t *temp_ptr= temp;\
1227 int count= 9;\
1228 \
1229 /*FIXME unroll */\
1230 __asm__ volatile(\
1231 "pxor %%mm7, %%mm7 \n\t"\
1232 "1: \n\t"\
1233 "movq (%0), %%mm0 \n\t"\
1234 "movq (%0), %%mm1 \n\t"\
1235 "punpcklbw %%mm7, %%mm0 \n\t"\
1236 "punpckhbw %%mm7, %%mm1 \n\t"\
1237 "movq %%mm0, (%1) \n\t"\
1238 "movq %%mm1, 9*8(%1) \n\t"\
1239 "add $8, %1 \n\t"\
1240 "add %3, %0 \n\t"\
1241 "decl %2 \n\t"\
1242 " jnz 1b \n\t"\
1243 : "+r" (src), "+r" (temp_ptr), "+r"(count)\
1244 : "r" ((x86_reg)srcStride)\
1245 : "memory"\
1246 );\
1247 \
1248 temp_ptr= temp;\
1249 count=2;\
1250 \
1251 /*FIXME reorder for speed */\
1252 __asm__ volatile(\
1253 /*"pxor %%mm7, %%mm7 \n\t"*/\
1254 "1: \n\t"\
1255 "movq (%0), %%mm0 \n\t"\
1256 "movq 8(%0), %%mm1 \n\t"\
1257 "movq 16(%0), %%mm2 \n\t"\
1258 "movq 24(%0), %%mm3 \n\t"\
1259 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\
1260 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\
1261 "add %4, %1 \n\t"\
1262 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\
1263 \
1264 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\
1265 "add %4, %1 \n\t"\
1266 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
1267 \
1268 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 64(%0), (%1, %3), OP)\
1269 "add %4, %1 \n\t"\
1270 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 56(%0), (%1), OP)\
1271 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 48(%0), (%1, %3), OP)\
1272 \
1273 "add $72, %0 \n\t"\
1274 "add %6, %1 \n\t"\
1275 "decl %2 \n\t"\
1276 " jnz 1b \n\t"\
1277 \
1278 : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
1279 : "r"((x86_reg)dstStride), "r"(2*(x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-6*(x86_reg)dstStride)\
1280 : "memory"\
1281 );\
1282 }\
1283 \
1284 static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
1285 OPNAME ## pixels8_ ## MMX(dst, src, stride, 8);\
1286 }\
1287 \
1288 static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1289 uint64_t temp[8];\
1290 uint8_t * const half= (uint8_t*)temp;\
1291 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
1292 OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
1293 }\
1294 \
1295 static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1296 OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride, stride, 8);\
1297 }\
1298 \
1299 static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1300 uint64_t temp[8];\
1301 uint8_t * const half= (uint8_t*)temp;\
1302 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
1303 OPNAME ## pixels8_l2_ ## MMX(dst, src+1, half, stride, stride, 8);\
1304 }\
1305 \
1306 static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1307 uint64_t temp[8];\
1308 uint8_t * const half= (uint8_t*)temp;\
1309 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
1310 OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
1311 }\
1312 \
1313 static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1314 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src, stride, stride);\
1315 }\
1316 \
1317 static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1318 uint64_t temp[8];\
1319 uint8_t * const half= (uint8_t*)temp;\
1320 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
1321 OPNAME ## pixels8_l2_ ## MMX(dst, src+stride, half, stride, stride, 8);\
1322 }\
1323 static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1324 uint64_t half[8 + 9];\
1325 uint8_t * const halfH= ((uint8_t*)half) + 64;\
1326 uint8_t * const halfHV= ((uint8_t*)half);\
1327 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1328 put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
1329 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1330 OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
1331 }\
1332 static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1333 uint64_t half[8 + 9];\
1334 uint8_t * const halfH= ((uint8_t*)half) + 64;\
1335 uint8_t * const halfHV= ((uint8_t*)half);\
1336 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1337 put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
1338 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1339 OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
1340 }\
1341 static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1342 uint64_t half[8 + 9];\
1343 uint8_t * const halfH= ((uint8_t*)half) + 64;\
1344 uint8_t * const halfHV= ((uint8_t*)half);\
1345 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1346 put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
1347 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1348 OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
1349 }\
1350 static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1351 uint64_t half[8 + 9];\
1352 uint8_t * const halfH= ((uint8_t*)half) + 64;\
1353 uint8_t * const halfHV= ((uint8_t*)half);\
1354 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1355 put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
1356 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1357 OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
1358 }\
1359 static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1360 uint64_t half[8 + 9];\
1361 uint8_t * const halfH= ((uint8_t*)half) + 64;\
1362 uint8_t * const halfHV= ((uint8_t*)half);\
1363 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1364 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1365 OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
1366 }\
1367 static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1368 uint64_t half[8 + 9];\
1369 uint8_t * const halfH= ((uint8_t*)half) + 64;\
1370 uint8_t * const halfHV= ((uint8_t*)half);\
1371 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1372 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1373 OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
1374 }\
1375 static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1376 uint64_t half[8 + 9];\
1377 uint8_t * const halfH= ((uint8_t*)half);\
1378 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1379 put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
1380 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
1381 }\
1382 static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1383 uint64_t half[8 + 9];\
1384 uint8_t * const halfH= ((uint8_t*)half);\
1385 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1386 put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
1387 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
1388 }\
1389 static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1390 uint64_t half[9];\
1391 uint8_t * const halfH= ((uint8_t*)half);\
1392 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1393 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
1394 }\
1395 static void OPNAME ## qpel16_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
1396 OPNAME ## pixels16_ ## MMX(dst, src, stride, 16);\
1397 }\
1398 \
1399 static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1400 uint64_t temp[32];\
1401 uint8_t * const half= (uint8_t*)temp;\
1402 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
1403 OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
1404 }\
1405 \
1406 static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1407 OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src, stride, stride, 16);\
1408 }\
1409 \
1410 static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1411 uint64_t temp[32];\
1412 uint8_t * const half= (uint8_t*)temp;\
1413 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
1414 OPNAME ## pixels16_l2_ ## MMX(dst, src+1, half, stride, stride, 16);\
1415 }\
1416 \
1417 static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1418 uint64_t temp[32];\
1419 uint8_t * const half= (uint8_t*)temp;\
1420 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
1421 OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
1422 }\
1423 \
1424 static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1425 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src, stride, stride);\
1426 }\
1427 \
1428 static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1429 uint64_t temp[32];\
1430 uint8_t * const half= (uint8_t*)temp;\
1431 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
1432 OPNAME ## pixels16_l2_ ## MMX(dst, src+stride, half, stride, stride, 16);\
1433 }\
1434 static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1435 uint64_t half[16*2 + 17*2];\
1436 uint8_t * const halfH= ((uint8_t*)half) + 256;\
1437 uint8_t * const halfHV= ((uint8_t*)half);\
1438 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1439 put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
1440 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1441 OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
1442 }\
1443 static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1444 uint64_t half[16*2 + 17*2];\
1445 uint8_t * const halfH= ((uint8_t*)half) + 256;\
1446 uint8_t * const halfHV= ((uint8_t*)half);\
1447 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1448 put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
1449 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1450 OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
1451 }\
1452 static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1453 uint64_t half[16*2 + 17*2];\
1454 uint8_t * const halfH= ((uint8_t*)half) + 256;\
1455 uint8_t * const halfHV= ((uint8_t*)half);\
1456 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1457 put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
1458 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1459 OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
1460 }\
1461 static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1462 uint64_t half[16*2 + 17*2];\
1463 uint8_t * const halfH= ((uint8_t*)half) + 256;\
1464 uint8_t * const halfHV= ((uint8_t*)half);\
1465 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1466 put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
1467 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1468 OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
1469 }\
1470 static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1471 uint64_t half[16*2 + 17*2];\
1472 uint8_t * const halfH= ((uint8_t*)half) + 256;\
1473 uint8_t * const halfHV= ((uint8_t*)half);\
1474 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1475 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1476 OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
1477 }\
1478 static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1479 uint64_t half[16*2 + 17*2];\
1480 uint8_t * const halfH= ((uint8_t*)half) + 256;\
1481 uint8_t * const halfHV= ((uint8_t*)half);\
1482 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1483 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1484 OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
1485 }\
1486 static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1487 uint64_t half[17*2];\
1488 uint8_t * const halfH= ((uint8_t*)half);\
1489 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1490 put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
1491 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
1492 }\
1493 static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1494 uint64_t half[17*2];\
1495 uint8_t * const halfH= ((uint8_t*)half);\
1496 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1497 put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
1498 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
1499 }\
1500 static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1501 uint64_t half[17*2];\
1502 uint8_t * const halfH= ((uint8_t*)half);\
1503 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1504 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
1505 }
1506
1507 #define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b " \n\t"
1508 #define AVG_3DNOW_OP(a,b,temp, size) \
1509 "mov" #size " " #b ", " #temp " \n\t"\
1510 "pavgusb " #temp ", " #a " \n\t"\
1511 "mov" #size " " #a ", " #b " \n\t"
1512 #define AVG_MMX2_OP(a,b,temp, size) \
1513 "mov" #size " " #b ", " #temp " \n\t"\
1514 "pavgb " #temp ", " #a " \n\t"\
1515 "mov" #size " " #a ", " #b " \n\t"
1516
1517 QPEL_BASE(put_ , ff_pw_16, _ , PUT_OP, PUT_OP)
1518 QPEL_BASE(avg_ , ff_pw_16, _ , AVG_MMX2_OP, AVG_3DNOW_OP)
1519 QPEL_BASE(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, PUT_OP)
1520 QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, 3dnow)
1521 QPEL_OP(avg_ , ff_pw_16, _ , AVG_3DNOW_OP, 3dnow)
1522 QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, 3dnow)
1523 QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, mmx2)
1524 QPEL_OP(avg_ , ff_pw_16, _ , AVG_MMX2_OP, mmx2)
1525 QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, mmx2)
1526
1527 /***********************************/
1528 /* bilinear qpel: not compliant to any spec, only for -lavdopts fast */
1529
1530 #define QPEL_2TAP_XY(OPNAME, SIZE, MMX, XY, HPEL)\
1531 static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1532 OPNAME ## pixels ## SIZE ## HPEL(dst, src, stride, SIZE);\
1533 }
1534 #define QPEL_2TAP_L3(OPNAME, SIZE, MMX, XY, S0, S1, S2)\
1535 static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1536 OPNAME ## 2tap_qpel ## SIZE ## _l3_ ## MMX(dst, src+S0, stride, SIZE, S1, S2);\
1537 }
1538
1539 #define QPEL_2TAP(OPNAME, SIZE, MMX)\
1540 QPEL_2TAP_XY(OPNAME, SIZE, MMX, 20, _x2_ ## MMX)\
1541 QPEL_2TAP_XY(OPNAME, SIZE, MMX, 02, _y2_ ## MMX)\
1542 QPEL_2TAP_XY(OPNAME, SIZE, MMX, 22, _xy2_mmx)\
1543 static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc00_ ## MMX =\
1544 OPNAME ## qpel ## SIZE ## _mc00_ ## MMX;\
1545 static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc21_ ## MMX =\
1546 OPNAME ## 2tap_qpel ## SIZE ## _mc20_ ## MMX;\
1547 static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc12_ ## MMX =\
1548 OPNAME ## 2tap_qpel ## SIZE ## _mc02_ ## MMX;\
1549 static void OPNAME ## 2tap_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1550 OPNAME ## pixels ## SIZE ## _y2_ ## MMX(dst, src+1, stride, SIZE);\
1551 }\
1552 static void OPNAME ## 2tap_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1553 OPNAME ## pixels ## SIZE ## _x2_ ## MMX(dst, src+stride, stride, SIZE);\
1554 }\
1555 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 10, 0, 1, 0)\
1556 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 30, 1, -1, 0)\
1557 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 01, 0, stride, 0)\
1558 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 03, stride, -stride, 0)\
1559 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 11, 0, stride, 1)\
1560 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 31, 1, stride, -1)\
1561 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 13, stride, -stride, 1)\
1562 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 33, stride+1, -stride, -1)\
1563
1564 QPEL_2TAP(put_, 16, mmx2)
1565 QPEL_2TAP(avg_, 16, mmx2)
1566 QPEL_2TAP(put_, 8, mmx2)
1567 QPEL_2TAP(avg_, 8, mmx2)
1568 QPEL_2TAP(put_, 16, 3dnow)
1569 QPEL_2TAP(avg_, 16, 3dnow)
1570 QPEL_2TAP(put_, 8, 3dnow)
1571 QPEL_2TAP(avg_, 8, 3dnow)
1572
1573
1574 #if HAVE_YASM
1575 typedef void emu_edge_core_func (uint8_t *buf, const uint8_t *src,
1576 x86_reg linesize, x86_reg start_y,
1577 x86_reg end_y, x86_reg block_h,
1578 x86_reg start_x, x86_reg end_x,
1579 x86_reg block_w);
1580 extern emu_edge_core_func ff_emu_edge_core_mmx;
1581 extern emu_edge_core_func ff_emu_edge_core_sse;
1582
1583 static av_always_inline
1584 void emulated_edge_mc(uint8_t *buf, const uint8_t *src, int linesize,
1585 int block_w, int block_h,
1586 int src_x, int src_y, int w, int h,
1587 emu_edge_core_func *core_fn)
1588 {
1589 int start_y, start_x, end_y, end_x, src_y_add=0;
1590
1591 if(src_y>= h){
1592 src_y_add = h-1-src_y;
1593 src_y=h-1;
1594 }else if(src_y<=-block_h){
1595 src_y_add = 1-block_h-src_y;
1596 src_y=1-block_h;
1597 }
1598 if(src_x>= w){
1599 src+= (w-1-src_x);
1600 src_x=w-1;
1601 }else if(src_x<=-block_w){
1602 src+= (1-block_w-src_x);
1603 src_x=1-block_w;
1604 }
1605
1606 start_y= FFMAX(0, -src_y);
1607 start_x= FFMAX(0, -src_x);
1608 end_y= FFMIN(block_h, h-src_y);
1609 end_x= FFMIN(block_w, w-src_x);
1610 assert(start_x < end_x && block_w > 0);
1611 assert(start_y < end_y && block_h > 0);
1612
1613 // fill in the to-be-copied part plus all above/below
1614 src += (src_y_add+start_y)*linesize + start_x;
1615 buf += start_x;
1616 core_fn(buf, src, linesize, start_y, end_y, block_h, start_x, end_x, block_w);
1617 }
1618
1619 #if ARCH_X86_32
1620 static av_noinline
1621 void emulated_edge_mc_mmx(uint8_t *buf, const uint8_t *src, int linesize,
1622 int block_w, int block_h,
1623 int src_x, int src_y, int w, int h)
1624 {
1625 emulated_edge_mc(buf, src, linesize, block_w, block_h, src_x, src_y,
1626 w, h, &ff_emu_edge_core_mmx);
1627 }
1628 #endif
1629 static av_noinline
1630 void emulated_edge_mc_sse(uint8_t *buf, const uint8_t *src, int linesize,
1631 int block_w, int block_h,
1632 int src_x, int src_y, int w, int h)
1633 {
1634 emulated_edge_mc(buf, src, linesize, block_w, block_h, src_x, src_y,
1635 w, h, &ff_emu_edge_core_sse);
1636 }
1637 #endif /* HAVE_YASM */
1638
1639 typedef void emulated_edge_mc_func (uint8_t *dst, const uint8_t *src,
1640 int linesize, int block_w, int block_h,
1641 int src_x, int src_y, int w, int h);
1642
1643 static av_always_inline
1644 void gmc(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
1645 int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height,
1646 emulated_edge_mc_func *emu_edge_fn)
1647 {
1648 const int w = 8;
1649 const int ix = ox>>(16+shift);
1650 const int iy = oy>>(16+shift);
1651 const int oxs = ox>>4;
1652 const int oys = oy>>4;
1653 const int dxxs = dxx>>4;
1654 const int dxys = dxy>>4;
1655 const int dyxs = dyx>>4;
1656 const int dyys = dyy>>4;
1657 const uint16_t r4[4] = {r,r,r,r};
1658 const uint16_t dxy4[4] = {dxys,dxys,dxys,dxys};
1659 const uint16_t dyy4[4] = {dyys,dyys,dyys,dyys};
1660 const uint64_t shift2 = 2*shift;
1661 uint8_t edge_buf[(h+1)*stride];
1662 int x, y;
1663
1664 const int dxw = (dxx-(1<<(16+shift)))*(w-1);
1665 const int dyh = (dyy-(1<<(16+shift)))*(h-1);
1666 const int dxh = dxy*(h-1);
1667 const int dyw = dyx*(w-1);
1668 if( // non-constant fullpel offset (3% of blocks)
1669 ((ox^(ox+dxw)) | (ox^(ox+dxh)) | (ox^(ox+dxw+dxh)) |
1670 (oy^(oy+dyw)) | (oy^(oy+dyh)) | (oy^(oy+dyw+dyh))) >> (16+shift)
1671 // uses more than 16 bits of subpel mv (only at huge resolution)
1672 || (dxx|dxy|dyx|dyy)&15 )
1673 {
1674 //FIXME could still use mmx for some of the rows
1675 ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r, width, height);
1676 return;
1677 }
1678
1679 src += ix + iy*stride;
1680 if( (unsigned)ix >= width-w ||
1681 (unsigned)iy >= height-h )
1682 {
1683 emu_edge_fn(edge_buf, src, stride, w+1, h+1, ix, iy, width, height);
1684 src = edge_buf;
1685 }
1686
1687 __asm__ volatile(
1688 "movd %0, %%mm6 \n\t"
1689 "pxor %%mm7, %%mm7 \n\t"
1690 "punpcklwd %%mm6, %%mm6 \n\t"
1691 "punpcklwd %%mm6, %%mm6 \n\t"
1692 :: "r"(1<<shift)
1693 );
1694
1695 for(x=0; x<w; x+=4){
1696 uint16_t dx4[4] = { oxs - dxys + dxxs*(x+0),
1697 oxs - dxys + dxxs*(x+1),
1698 oxs - dxys + dxxs*(x+2),
1699 oxs - dxys + dxxs*(x+3) };
1700 uint16_t dy4[4] = { oys - dyys + dyxs*(x+0),
1701 oys - dyys + dyxs*(x+1),
1702 oys - dyys + dyxs*(x+2),
1703 oys - dyys + dyxs*(x+3) };
1704
1705 for(y=0; y<h; y++){
1706 __asm__ volatile(
1707 "movq %0, %%mm4 \n\t"
1708 "movq %1, %%mm5 \n\t"
1709 "paddw %2, %%mm4 \n\t"
1710 "paddw %3, %%mm5 \n\t"
1711 "movq %%mm4, %0 \n\t"
1712 "movq %%mm5, %1 \n\t"
1713 "psrlw $12, %%mm4 \n\t"
1714 "psrlw $12, %%mm5 \n\t"
1715 : "+m"(*dx4), "+m"(*dy4)
1716 : "m"(*dxy4), "m"(*dyy4)
1717 );
1718
1719 __asm__ volatile(
1720 "movq %%mm6, %%mm2 \n\t"
1721 "movq %%mm6, %%mm1 \n\t"
1722 "psubw %%mm4, %%mm2 \n\t"
1723 "psubw %%mm5, %%mm1 \n\t"
1724 "movq %%mm2, %%mm0 \n\t"
1725 "movq %%mm4, %%mm3 \n\t"
1726 "pmullw %%mm1, %%mm0 \n\t" // (s-dx)*(s-dy)
1727 "pmullw %%mm5, %%mm3 \n\t" // dx*dy
1728 "pmullw %%mm5, %%mm2 \n\t" // (s-dx)*dy
1729 "pmullw %%mm4, %%mm1 \n\t" // dx*(s-dy)
1730
1731 "movd %4, %%mm5 \n\t"
1732 "movd %3, %%mm4 \n\t"
1733 "punpcklbw %%mm7, %%mm5 \n\t"
1734 "punpcklbw %%mm7, %%mm4 \n\t"
1735 "pmullw %%mm5, %%mm3 \n\t" // src[1,1] * dx*dy
1736 "pmullw %%mm4, %%mm2 \n\t" // src[0,1] * (s-dx)*dy
1737
1738 "movd %2, %%mm5 \n\t"
1739 "movd %1, %%mm4 \n\t"
1740 "punpcklbw %%mm7, %%mm5 \n\t"
1741 "punpcklbw %%mm7, %%mm4 \n\t"
1742 "pmullw %%mm5, %%mm1 \n\t" // src[1,0] * dx*(s-dy)
1743 "pmullw %%mm4, %%mm0 \n\t" // src[0,0] * (s-dx)*(s-dy)
1744 "paddw %5, %%mm1 \n\t"
1745 "paddw %%mm3, %%mm2 \n\t"
1746 "paddw %%mm1, %%mm0 \n\t"
1747 "paddw %%mm2, %%mm0 \n\t"
1748
1749 "psrlw %6, %%mm0 \n\t"
1750 "packuswb %%mm0, %%mm0 \n\t"
1751 "movd %%mm0, %0 \n\t"
1752
1753 : "=m"(dst[x+y*stride])
1754 : "m"(src[0]), "m"(src[1]),
1755 "m"(src[stride]), "m"(src[stride+1]),
1756 "m"(*r4), "m"(shift2)
1757 );
1758 src += stride;
1759 }
1760 src += 4-h*stride;
1761 }
1762 }
1763
1764 #if HAVE_YASM
1765 #if ARCH_X86_32
1766 static void gmc_mmx(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
1767 int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height)
1768 {
1769 gmc(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r,
1770 width, height, &emulated_edge_mc_mmx);
1771 }
1772 #endif
1773 static void gmc_sse(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
1774 int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height)
1775 {
1776 gmc(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r,
1777 width, height, &emulated_edge_mc_sse);
1778 }
1779 #else
1780 static void gmc_mmx(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
1781 int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height)
1782 {
1783 gmc(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r,
1784 width, height, &ff_emulated_edge_mc_8);
1785 }
1786 #endif
1787
1788 #define PREFETCH(name, op) \
1789 static void name(void *mem, int stride, int h){\
1790 const uint8_t *p= mem;\
1791 do{\
1792 __asm__ volatile(#op" %0" :: "m"(*p));\
1793 p+= stride;\
1794 }while(--h);\
1795 }
1796 PREFETCH(prefetch_mmx2, prefetcht0)
1797 PREFETCH(prefetch_3dnow, prefetch)
1798 #undef PREFETCH
1799
1800 #include "h264_qpel_mmx.c"
1801
1802 void ff_put_h264_chroma_mc8_mmx_rnd (uint8_t *dst, uint8_t *src,
1803 int stride, int h, int x, int y);
1804 void ff_avg_h264_chroma_mc8_mmx2_rnd (uint8_t *dst, uint8_t *src,
1805 int stride, int h, int x, int y);
1806 void ff_avg_h264_chroma_mc8_3dnow_rnd (uint8_t *dst, uint8_t *src,
1807 int stride, int h, int x, int y);
1808
1809 void ff_put_h264_chroma_mc4_mmx (uint8_t *dst, uint8_t *src,
1810 int stride, int h, int x, int y);
1811 void ff_avg_h264_chroma_mc4_mmx2 (uint8_t *dst, uint8_t *src,
1812 int stride, int h, int x, int y);
1813 void ff_avg_h264_chroma_mc4_3dnow (uint8_t *dst, uint8_t *src,
1814 int stride, int h, int x, int y);
1815
1816 void ff_put_h264_chroma_mc2_mmx2 (uint8_t *dst, uint8_t *src,
1817 int stride, int h, int x, int y);
1818 void ff_avg_h264_chroma_mc2_mmx2 (uint8_t *dst, uint8_t *src,
1819 int stride, int h, int x, int y);
1820
1821 void ff_put_h264_chroma_mc8_ssse3_rnd (uint8_t *dst, uint8_t *src,
1822 int stride, int h, int x, int y);
1823 void ff_put_h264_chroma_mc4_ssse3 (uint8_t *dst, uint8_t *src,
1824 int stride, int h, int x, int y);
1825
1826 void ff_avg_h264_chroma_mc8_ssse3_rnd (uint8_t *dst, uint8_t *src,
1827 int stride, int h, int x, int y);
1828 void ff_avg_h264_chroma_mc4_ssse3 (uint8_t *dst, uint8_t *src,
1829 int stride, int h, int x, int y);
1830
1831 #define CHROMA_MC(OP, NUM, DEPTH, OPT) \
1832 void ff_ ## OP ## _h264_chroma_mc ## NUM ## _ ## DEPTH ## _ ## OPT \
1833 (uint8_t *dst, uint8_t *src,\
1834 int stride, int h, int x, int y);
1835
1836 CHROMA_MC(put, 2, 10, mmxext)
1837 CHROMA_MC(avg, 2, 10, mmxext)
1838 CHROMA_MC(put, 4, 10, mmxext)
1839 CHROMA_MC(avg, 4, 10, mmxext)
1840 CHROMA_MC(put, 8, 10, sse2)
1841 CHROMA_MC(avg, 8, 10, sse2)
1842 CHROMA_MC(put, 8, 10, avx)
1843 CHROMA_MC(avg, 8, 10, avx)
1844
1845 /* CAVS specific */
1846 void ff_put_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
1847 put_pixels8_mmx(dst, src, stride, 8);
1848 }
1849 void ff_avg_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
1850 avg_pixels8_mmx(dst, src, stride, 8);
1851 }
1852 void ff_put_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
1853 put_pixels16_mmx(dst, src, stride, 16);
1854 }
1855 void ff_avg_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
1856 avg_pixels16_mmx(dst, src, stride, 16);
1857 }
1858
1859 /* VC1 specific */
1860 void ff_put_vc1_mspel_mc00_mmx(uint8_t *dst, const uint8_t *src, int stride, int rnd) {
1861 put_pixels8_mmx(dst, src, stride, 8);
1862 }
1863 void ff_avg_vc1_mspel_mc00_mmx2(uint8_t *dst, const uint8_t *src, int stride, int rnd) {
1864 avg_pixels8_mmx2(dst, src, stride, 8);
1865 }
1866
1867 /* XXX: those functions should be suppressed ASAP when all IDCTs are
1868 converted */
1869 #if CONFIG_GPL
1870 static void ff_libmpeg2mmx_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
1871 {
1872 ff_mmx_idct (block);
1873 ff_put_pixels_clamped_mmx(block, dest, line_size);
1874 }
1875 static void ff_libmpeg2mmx_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
1876 {
1877 ff_mmx_idct (block);
1878 ff_add_pixels_clamped_mmx(block, dest, line_size);
1879 }
1880 static void ff_libmpeg2mmx2_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
1881 {
1882 ff_mmxext_idct (block);
1883 ff_put_pixels_clamped_mmx(block, dest, line_size);
1884 }
1885 static void ff_libmpeg2mmx2_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
1886 {
1887 ff_mmxext_idct (block);
1888 ff_add_pixels_clamped_mmx(block, dest, line_size);
1889 }
1890 #endif
1891 static void ff_idct_xvid_mmx_put(uint8_t *dest, int line_size, DCTELEM *block)
1892 {
1893 ff_idct_xvid_mmx (block);
1894 ff_put_pixels_clamped_mmx(block, dest, line_size);
1895 }
1896 static void ff_idct_xvid_mmx_add(uint8_t *dest, int line_size, DCTELEM *block)
1897 {
1898 ff_idct_xvid_mmx (block);
1899 ff_add_pixels_clamped_mmx(block, dest, line_size);
1900 }
1901 static void ff_idct_xvid_mmx2_put(uint8_t *dest, int line_size, DCTELEM *block)
1902 {
1903 ff_idct_xvid_mmx2 (block);
1904 ff_put_pixels_clamped_mmx(block, dest, line_size);
1905 }
1906 static void ff_idct_xvid_mmx2_add(uint8_t *dest, int line_size, DCTELEM *block)
1907 {
1908 ff_idct_xvid_mmx2 (block);
1909 ff_add_pixels_clamped_mmx(block, dest, line_size);
1910 }
1911
1912 static void vorbis_inverse_coupling_3dnow(float *mag, float *ang, int blocksize)
1913 {
1914 int i;
1915 __asm__ volatile("pxor %%mm7, %%mm7":);
1916 for(i=0; i<blocksize; i+=2) {
1917 __asm__ volatile(
1918 "movq %0, %%mm0 \n\t"
1919 "movq %1, %%mm1 \n\t"
1920 "movq %%mm0, %%mm2 \n\t"
1921 "movq %%mm1, %%mm3 \n\t"
1922 "pfcmpge %%mm7, %%mm2 \n\t" // m <= 0.0
1923 "pfcmpge %%mm7, %%mm3 \n\t" // a <= 0.0
1924 "pslld $31, %%mm2 \n\t" // keep only the sign bit
1925 "pxor %%mm2, %%mm1 \n\t"
1926 "movq %%mm3, %%mm4 \n\t"
1927 "pand %%mm1, %%mm3 \n\t"
1928 "pandn %%mm1, %%mm4 \n\t"
1929 "pfadd %%mm0, %%mm3 \n\t" // a = m + ((a<0) & (a ^ sign(m)))
1930 "pfsub %%mm4, %%mm0 \n\t" // m = m + ((a>0) & (a ^ sign(m)))
1931 "movq %%mm3, %1 \n\t"
1932 "movq %%mm0, %0 \n\t"
1933 :"+m"(mag[i]), "+m"(ang[i])
1934 ::"memory"
1935 );
1936 }
1937 __asm__ volatile("femms");
1938 }
1939 static void vorbis_inverse_coupling_sse(float *mag, float *ang, int blocksize)
1940 {
1941 int i;
1942
1943 __asm__ volatile(
1944 "movaps %0, %%xmm5 \n\t"
1945 ::"m"(ff_pdw_80000000[0])
1946 );
1947 for(i=0; i<blocksize; i+=4) {
1948 __asm__ volatile(
1949 "movaps %0, %%xmm0 \n\t"
1950 "movaps %1, %%xmm1 \n\t"
1951 "xorps %%xmm2, %%xmm2 \n\t"
1952 "xorps %%xmm3, %%xmm3 \n\t"
1953 "cmpleps %%xmm0, %%xmm2 \n\t" // m <= 0.0
1954 "cmpleps %%xmm1, %%xmm3 \n\t" // a <= 0.0
1955 "andps %%xmm5, %%xmm2 \n\t" // keep only the sign bit
1956 "xorps %%xmm2, %%xmm1 \n\t"
1957 "movaps %%xmm3, %%xmm4 \n\t"
1958 "andps %%xmm1, %%xmm3 \n\t"
1959 "andnps %%xmm1, %%xmm4 \n\t"
1960 "addps %%xmm0, %%xmm3 \n\t" // a = m + ((a<0) & (a ^ sign(m)))
1961 "subps %%xmm4, %%xmm0 \n\t" // m = m + ((a>0) & (a ^ sign(m)))
1962 "movaps %%xmm3, %1 \n\t"
1963 "movaps %%xmm0, %0 \n\t"
1964 :"+m"(mag[i]), "+m"(ang[i])
1965 ::"memory"
1966 );
1967 }
1968 }
1969
1970 #define IF1(x) x
1971 #define IF0(x)
1972
1973 #define MIX5(mono,stereo)\
1974 __asm__ volatile(\
1975 "movss 0(%2), %%xmm5 \n"\
1976 "movss 8(%2), %%xmm6 \n"\
1977 "movss 24(%2), %%xmm7 \n"\
1978 "shufps $0, %%xmm5, %%xmm5 \n"\
1979 "shufps $0, %%xmm6, %%xmm6 \n"\
1980 "shufps $0, %%xmm7, %%xmm7 \n"\
1981 "1: \n"\
1982 "movaps (%0,%1), %%xmm0 \n"\
1983 "movaps 0x400(%0,%1), %%xmm1 \n"\
1984 "movaps 0x800(%0,%1), %%xmm2 \n"\
1985 "movaps 0xc00(%0,%1), %%xmm3 \n"\
1986 "movaps 0x1000(%0,%1), %%xmm4 \n"\
1987 "mulps %%xmm5, %%xmm0 \n"\
1988 "mulps %%xmm6, %%xmm1 \n"\
1989 "mulps %%xmm5, %%xmm2 \n"\
1990 "mulps %%xmm7, %%xmm3 \n"\
1991 "mulps %%xmm7, %%xmm4 \n"\
1992 stereo("addps %%xmm1, %%xmm0 \n")\
1993 "addps %%xmm1, %%xmm2 \n"\
1994 "addps %%xmm3, %%xmm0 \n"\
1995 "addps %%xmm4, %%xmm2 \n"\
1996 mono("addps %%xmm2, %%xmm0 \n")\
1997 "movaps %%xmm0, (%0,%1) \n"\
1998 stereo("movaps %%xmm2, 0x400(%0,%1) \n")\
1999 "add $16, %0 \n"\
2000 "jl 1b \n"\
2001 :"+&r"(i)\
2002 :"r"(samples[0]+len), "r"(matrix)\
2003 :XMM_CLOBBERS("%xmm0", "%xmm1", "%xmm2", "%xmm3", \
2004 "%xmm4", "%xmm5", "%xmm6", "%xmm7",)\
2005 "memory"\
2006 );
2007
2008 #define MIX_MISC(stereo)\
2009 __asm__ volatile(\
2010 "1: \n"\
2011 "movaps (%3,%0), %%xmm0 \n"\
2012 stereo("movaps %%xmm0, %%xmm1 \n")\
2013 "mulps %%xmm4, %%xmm0 \n"\
2014 stereo("mulps %%xmm5, %%xmm1 \n")\
2015 "lea 1024(%3,%0), %1 \n"\
2016 "mov %5, %2 \n"\
2017 "2: \n"\
2018 "movaps (%1), %%xmm2 \n"\
2019 stereo("movaps %%xmm2, %%xmm3 \n")\
2020 "mulps (%4,%2), %%xmm2 \n"\
2021 stereo("mulps 16(%4,%2), %%xmm3 \n")\
2022 "addps %%xmm2, %%xmm0 \n"\
2023 stereo("addps %%xmm3, %%xmm1 \n")\
2024 "add $1024, %1 \n"\
2025 "add $32, %2 \n"\
2026 "jl 2b \n"\
2027 "movaps %%xmm0, (%3,%0) \n"\
2028 stereo("movaps %%xmm1, 1024(%3,%0) \n")\
2029 "add $16, %0 \n"\
2030 "jl 1b \n"\
2031 :"+&r"(i), "=&r"(j), "=&r"(k)\
2032 :"r"(samples[0]+len), "r"(matrix_simd+in_ch), "g"((intptr_t)-32*(in_ch-1))\
2033 :"memory"\
2034 );
2035
2036 static void ac3_downmix_sse(float (*samples)[256], float (*matrix)[2], int out_ch, int in_ch, int len)
2037 {
2038 int (*matrix_cmp)[2] = (int(*)[2])matrix;
2039 intptr_t i,j,k;
2040
2041 i = -len*sizeof(float);
2042 if(in_ch == 5 && out_ch == 2 && !(matrix_cmp[0][1]|matrix_cmp[2][0]|matrix_cmp[3][1]|matrix_cmp[4][0]|(matrix_cmp[1][0]^matrix_cmp[1][1])|(matrix_cmp[0][0]^matrix_cmp[2][1]))) {
2043 MIX5(IF0,IF1);
2044 } else if(in_ch == 5 && out_ch == 1 && matrix_cmp[0][0]==matrix_cmp[2][0] && matrix_cmp[3][0]==matrix_cmp[4][0]) {
2045 MIX5(IF1,IF0);
2046 } else {
2047 DECLARE_ALIGNED(16, float, matrix_simd)[AC3_MAX_CHANNELS][2][4];
2048 j = 2*in_ch*sizeof(float);
2049 __asm__ volatile(
2050 "1: \n"
2051 "sub $8, %0 \n"
2052 "movss (%2,%0), %%xmm4 \n"
2053 "movss 4(%2,%0), %%xmm5 \n"
2054 "shufps $0, %%xmm4, %%xmm4 \n"
2055 "shufps $0, %%xmm5, %%xmm5 \n"
2056 "movaps %%xmm4, (%1,%0,4) \n"
2057 "movaps %%xmm5, 16(%1,%0,4) \n"
2058 "jg 1b \n"
2059 :"+&r"(j)
2060 :"r"(matrix_simd), "r"(matrix)
2061 :"memory"
2062 );
2063 if(out_ch == 2) {
2064 MIX_MISC(IF1);
2065 } else {
2066 MIX_MISC(IF0);
2067 }
2068 }
2069 }
2070
2071 static void vector_fmul_3dnow(float *dst, const float *src0, const float *src1, int len){
2072 x86_reg i = (len-4)*4;
2073 __asm__ volatile(
2074 "1: \n\t"
2075 "movq (%2,%0), %%mm0 \n\t"
2076 "movq 8(%2,%0), %%mm1 \n\t"
2077 "pfmul (%3,%0), %%mm0 \n\t"
2078 "pfmul 8(%3,%0), %%mm1 \n\t"
2079 "movq %%mm0, (%1,%0) \n\t"
2080 "movq %%mm1, 8(%1,%0) \n\t"
2081 "sub $16, %0 \n\t"
2082 "jge 1b \n\t"
2083 "femms \n\t"
2084 :"+r"(i)
2085 :"r"(dst), "r"(src0), "r"(src1)
2086 :"memory"
2087 );
2088 }
2089 static void vector_fmul_sse(float *dst, const float *src0, const float *src1, int len){
2090 x86_reg i = (len-8)*4;
2091 __asm__ volatile(
2092 "1: \n\t"
2093 "movaps (%2,%0), %%xmm0 \n\t"
2094 "movaps 16(%2,%0), %%xmm1 \n\t"
2095 "mulps (%3,%0), %%xmm0 \n\t"
2096 "mulps 16(%3,%0), %%xmm1 \n\t"
2097 "movaps %%xmm0, (%1,%0) \n\t"
2098 "movaps %%xmm1, 16(%1,%0) \n\t"
2099 "sub $32, %0 \n\t"
2100 "jge 1b \n\t"
2101 :"+r"(i)
2102 :"r"(dst), "r"(src0), "r"(src1)
2103 :"memory"
2104 );
2105 }
2106
2107 static void vector_fmul_reverse_3dnow2(float *dst, const float *src0, const float *src1, int len){
2108 x86_reg i = len*4-16;
2109 __asm__ volatile(
2110 "1: \n\t"
2111 "pswapd 8(%1), %%mm0 \n\t"
2112 "pswapd (%1), %%mm1 \n\t"
2113 "pfmul (%3,%0), %%mm0 \n\t"
2114 "pfmul 8(%3,%0), %%mm1 \n\t"
2115 "movq %%mm0, (%2,%0) \n\t"
2116 "movq %%mm1, 8(%2,%0) \n\t"
2117 "add $16, %1 \n\t"
2118 "sub $16, %0 \n\t"
2119 "jge 1b \n\t"
2120 :"+r"(i), "+r"(src1)
2121 :"r"(dst), "r"(src0)
2122 );
2123 __asm__ volatile("femms");
2124 }
2125 static void vector_fmul_reverse_sse(float *dst, const float *src0, const float *src1, int len){
2126 x86_reg i = len*4-32;
2127 __asm__ volatile(
2128 "1: \n\t"
2129 "movaps 16(%1), %%xmm0 \n\t"
2130 "movaps (%1), %%xmm1 \n\t"
2131 "shufps $0x1b, %%xmm0, %%xmm0 \n\t"
2132 "shufps $0x1b, %%xmm1, %%xmm1 \n\t"
2133 "mulps (%3,%0), %%xmm0 \n\t"
2134 "mulps 16(%3,%0), %%xmm1 \n\t"
2135 "movaps %%xmm0, (%2,%0) \n\t"
2136 "movaps %%xmm1, 16(%2,%0) \n\t"
2137 "add $32, %1 \n\t"
2138 "sub $32, %0 \n\t"
2139 "jge 1b \n\t"
2140 :"+r"(i), "+r"(src1)
2141 :"r"(dst), "r"(src0)
2142 );
2143 }
2144
2145 static void vector_fmul_add_3dnow(float *dst, const float *src0, const float *src1,
2146 const float *src2, int len){
2147 x86_reg i = (len-4)*4;
2148 __asm__ volatile(
2149 "1: \n\t"
2150 "movq (%2,%0), %%mm0 \n\t"
2151 "movq 8(%2,%0), %%mm1 \n\t"
2152 "pfmul (%3,%0), %%mm0 \n\t"
2153 "pfmul 8(%3,%0), %%mm1 \n\t"
2154 "pfadd (%4,%0), %%mm0 \n\t"
2155 "pfadd 8(%4,%0), %%mm1 \n\t"
2156 "movq %%mm0, (%1,%0) \n\t"
2157 "movq %%mm1, 8(%1,%0) \n\t"
2158 "sub $16, %0 \n\t"
2159 "jge 1b \n\t"
2160 :"+r"(i)
2161 :"r"(dst), "r"(src0), "r"(src1), "r"(src2)
2162 :"memory"
2163 );
2164 __asm__ volatile("femms");
2165 }
2166 static void vector_fmul_add_sse(float *dst, const float *src0, const float *src1,
2167 const float *src2, int len){
2168 x86_reg i = (len-8)*4;
2169 __asm__ volatile(
2170 "1: \n\t"
2171 "movaps (%2,%0), %%xmm0 \n\t"
2172 "movaps 16(%2,%0), %%xmm1 \n\t"
2173 "mulps (%3,%0), %%xmm0 \n\t"
2174 "mulps 16(%3,%0), %%xmm1 \n\t"
2175 "addps (%4,%0), %%xmm0 \n\t"
2176 "addps 16(%4,%0), %%xmm1 \n\t"
2177 "movaps %%xmm0, (%1,%0) \n\t"
2178 "movaps %%xmm1, 16(%1,%0) \n\t"
2179 "sub $32, %0 \n\t"
2180 "jge 1b \n\t"
2181 :"+r"(i)
2182 :"r"(dst), "r"(src0), "r"(src1), "r"(src2)
2183 :"memory"
2184 );
2185 }
2186
2187 #if HAVE_6REGS
2188 static void vector_fmul_window_3dnow2(float *dst, const float *src0, const float *src1,
2189 const float *win, int len){
2190 x86_reg i = -len*4;
2191 x86_reg j = len*4-8;
2192 __asm__ volatile(
2193 "1: \n"
2194 "pswapd (%5,%1), %%mm1 \n"
2195 "movq (%5,%0), %%mm0 \n"
2196 "pswapd (%4,%1), %%mm5 \n"
2197 "movq (%3,%0), %%mm4 \n"
2198 "movq %%mm0, %%mm2 \n"
2199 "movq %%mm1, %%mm3 \n"
2200 "pfmul %%mm4, %%mm2 \n" // src0[len+i]*win[len+i]
2201 "pfmul %%mm5, %%mm3 \n" // src1[ j]*win[len+j]
2202 "pfmul %%mm4, %%mm1 \n" // src0[len+i]*win[len+j]
2203 "pfmul %%mm5, %%mm0 \n" // src1[ j]*win[len+i]
2204 "pfadd %%mm3, %%mm2 \n"
2205 "pfsub %%mm0, %%mm1 \n"
2206 "pswapd %%mm2, %%mm2 \n"
2207 "movq %%mm1, (%2,%0) \n"
2208 "movq %%mm2, (%2,%1) \n"
2209 "sub $8, %1 \n"
2210 "add $8, %0 \n"
2211 "jl 1b \n"
2212 "femms \n"
2213 :"+r"(i), "+r"(j)
2214 :"r"(dst+len), "r"(src0+len), "r"(src1), "r"(win+len)
2215 );
2216 }
2217
2218 static void vector_fmul_window_sse(float *dst, const float *src0, const float *src1,
2219 const float *win, int len){
2220 x86_reg i = -len*4;
2221 x86_reg j = len*4-16;
2222 __asm__ volatile(
2223 "1: \n"
2224 "movaps (%5,%1), %%xmm1 \n"
2225 "movaps (%5,%0), %%xmm0 \n"
2226 "movaps (%4,%1), %%xmm5 \n"
2227 "movaps (%3,%0), %%xmm4 \n"
2228 "shufps $0x1b, %%xmm1, %%xmm1 \n"
2229 "shufps $0x1b, %%xmm5, %%xmm5 \n"
2230 "movaps %%xmm0, %%xmm2 \n"
2231 "movaps %%xmm1, %%xmm3 \n"
2232 "mulps %%xmm4, %%xmm2 \n" // src0[len+i]*win[len+i]
2233 "mulps %%xmm5, %%xmm3 \n" // src1[ j]*win[len+j]
2234 "mulps %%xmm4, %%xmm1 \n" // src0[len+i]*win[len+j]
2235 "mulps %%xmm5, %%xmm0 \n" // src1[ j]*win[len+i]
2236 "addps %%xmm3, %%xmm2 \n"
2237 "subps %%xmm0, %%xmm1 \n"
2238 "shufps $0x1b, %%xmm2, %%xmm2 \n"
2239 "movaps %%xmm1, (%2,%0) \n"
2240 "movaps %%xmm2, (%2,%1) \n"
2241 "sub $16, %1 \n"
2242 "add $16, %0 \n"
2243 "jl 1b \n"
2244 :"+r"(i), "+r"(j)
2245 :"r"(dst+len), "r"(src0+len), "r"(src1), "r"(win+len)
2246 );
2247 }
2248 #endif /* HAVE_6REGS */
2249
2250 static void vector_clipf_sse(float *dst, const float *src, float min, float max,
2251 int len)
2252 {
2253 x86_reg i = (len-16)*4;
2254 __asm__ volatile(
2255 "movss %3, %%xmm4 \n"
2256 "movss %4, %%xmm5 \n"
2257 "shufps $0, %%xmm4, %%xmm4 \n"
2258 "shufps $0, %%xmm5, %%xmm5 \n"
2259 "1: \n\t"
2260 "movaps (%2,%0), %%xmm0 \n\t" // 3/1 on intel
2261 "movaps 16(%2,%0), %%xmm1 \n\t"
2262 "movaps 32(%2,%0), %%xmm2 \n\t"
2263 "movaps 48(%2,%0), %%xmm3 \n\t"
2264 "maxps %%xmm4, %%xmm0 \n\t"
2265 "maxps %%xmm4, %%xmm1 \n\t"
2266 "maxps %%xmm4, %%xmm2 \n\t"
2267 "maxps %%xmm4, %%xmm3 \n\t"
2268 "minps %%xmm5, %%xmm0 \n\t"
2269 "minps %%xmm5, %%xmm1 \n\t"
2270 "minps %%xmm5, %%xmm2 \n\t"
2271 "minps %%xmm5, %%xmm3 \n\t"
2272 "movaps %%xmm0, (%1,%0) \n\t"
2273 "movaps %%xmm1, 16(%1,%0) \n\t"
2274 "movaps %%xmm2, 32(%1,%0) \n\t"
2275 "movaps %%xmm3, 48(%1,%0) \n\t"
2276 "sub $64, %0 \n\t"
2277 "jge 1b \n\t"
2278 :"+&r"(i)
2279 :"r"(dst), "r"(src), "m"(min), "m"(max)
2280 :"memory"
2281 );
2282 }
2283
2284 void ff_vp3_idct_mmx(int16_t *input_data);
2285 void ff_vp3_idct_put_mmx(uint8_t *dest, int line_size, DCTELEM *block);
2286 void ff_vp3_idct_add_mmx(uint8_t *dest, int line_size, DCTELEM *block);
2287
2288 void ff_vp3_idct_dc_add_mmx2(uint8_t *dest, int line_size, const DCTELEM *block);
2289
2290 void ff_vp3_v_loop_filter_mmx2(uint8_t *src, int stride, int *bounding_values);
2291 void ff_vp3_h_loop_filter_mmx2(uint8_t *src, int stride, int *bounding_values);
2292
2293 void ff_vp3_idct_sse2(int16_t *input_data);
2294 void ff_vp3_idct_put_sse2(uint8_t *dest, int line_size, DCTELEM *block);
2295 void ff_vp3_idct_add_sse2(uint8_t *dest, int line_size, DCTELEM *block);
2296
2297 int32_t ff_scalarproduct_int16_mmx2(const int16_t *v1, const int16_t *v2, int order, int shift);
2298 int32_t ff_scalarproduct_int16_sse2(const int16_t *v1, const int16_t *v2, int order, int shift);
2299 int32_t ff_scalarproduct_and_madd_int16_mmx2(int16_t *v1, const int16_t *v2, const int16_t *v3, int order, int mul);
2300 int32_t ff_scalarproduct_and_madd_int16_sse2(int16_t *v1, const int16_t *v2, const int16_t *v3, int order, int mul);
2301 int32_t ff_scalarproduct_and_madd_int16_ssse3(int16_t *v1, const int16_t *v2, const int16_t *v3, int order, int mul);
2302
2303 void ff_apply_window_int16_mmxext (int16_t *output, const int16_t *input,
2304 const int16_t *window, unsigned int len);
2305 void ff_apply_window_int16_mmxext_ba (int16_t *output, const int16_t *input,
2306 const int16_t *window, unsigned int len);
2307 void ff_apply_window_int16_sse2 (int16_t *output, const int16_t *input,
2308 const int16_t *window, unsigned int len);
2309 void ff_apply_window_int16_sse2_ba (int16_t *output, const int16_t *input,
2310 const int16_t *window, unsigned int len);
2311 void ff_apply_window_int16_ssse3 (int16_t *output, const int16_t *input,
2312 const int16_t *window, unsigned int len);
2313 void ff_apply_window_int16_ssse3_atom(int16_t *output, const int16_t *input,
2314 const int16_t *window, unsigned int len);
2315
2316 void ff_bswap32_buf_ssse3(uint32_t *dst, const uint32_t *src, int w);
2317 void ff_bswap32_buf_sse2(uint32_t *dst, const uint32_t *src, int w);
2318
2319 void ff_add_hfyu_median_prediction_mmx2(uint8_t *dst, const uint8_t *top, const uint8_t *diff, int w, int *left, int *left_top);
2320 int ff_add_hfyu_left_prediction_ssse3(uint8_t *dst, const uint8_t *src, int w, int left);
2321 int ff_add_hfyu_left_prediction_sse4(uint8_t *dst, const uint8_t *src, int w, int left);
2322
2323 float ff_scalarproduct_float_sse(const float *v1, const float *v2, int order);
2324
2325 void ff_vector_clip_int32_mmx (int32_t *dst, const int32_t *src, int32_t min,
2326 int32_t max, unsigned int len);
2327 void ff_vector_clip_int32_sse2 (int32_t *dst, const int32_t *src, int32_t min,
2328 int32_t max, unsigned int len);
2329 void ff_vector_clip_int32_int_sse2(int32_t *dst, const int32_t *src, int32_t min,
2330 int32_t max, unsigned int len);
2331 void ff_vector_clip_int32_sse4 (int32_t *dst, const int32_t *src, int32_t min,
2332 int32_t max, unsigned int len);
2333
2334 extern void ff_butterflies_float_interleave_sse(float *dst, const float *src0,
2335 const float *src1, int len);
2336 extern void ff_butterflies_float_interleave_avx(float *dst, const float *src0,
2337 const float *src1, int len);
2338
2339 #define SET_QPEL_FUNCS(PFX, IDX, SIZE, CPU, PREFIX) \
2340 do { \
2341 c->PFX ## _pixels_tab[IDX][ 0] = PREFIX ## PFX ## SIZE ## _mc00_ ## CPU; \
2342 c->PFX ## _pixels_tab[IDX][ 1] = PREFIX ## PFX ## SIZE ## _mc10_ ## CPU; \
2343 c->PFX ## _pixels_tab[IDX][ 2] = PREFIX ## PFX ## SIZE ## _mc20_ ## CPU; \
2344 c->PFX ## _pixels_tab[IDX][ 3] = PREFIX ## PFX ## SIZE ## _mc30_ ## CPU; \
2345 c->PFX ## _pixels_tab[IDX][ 4] = PREFIX ## PFX ## SIZE ## _mc01_ ## CPU; \
2346 c->PFX ## _pixels_tab[IDX][ 5] = PREFIX ## PFX ## SIZE ## _mc11_ ## CPU; \
2347 c->PFX ## _pixels_tab[IDX][ 6] = PREFIX ## PFX ## SIZE ## _mc21_ ## CPU; \
2348 c->PFX ## _pixels_tab[IDX][ 7] = PREFIX ## PFX ## SIZE ## _mc31_ ## CPU; \
2349 c->PFX ## _pixels_tab[IDX][ 8] = PREFIX ## PFX ## SIZE ## _mc02_ ## CPU; \
2350 c->PFX ## _pixels_tab[IDX][ 9] = PREFIX ## PFX ## SIZE ## _mc12_ ## CPU; \
2351 c->PFX ## _pixels_tab[IDX][10] = PREFIX ## PFX ## SIZE ## _mc22_ ## CPU; \
2352 c->PFX ## _pixels_tab[IDX][11] = PREFIX ## PFX ## SIZE ## _mc32_ ## CPU; \
2353 c->PFX ## _pixels_tab[IDX][12] = PREFIX ## PFX ## SIZE ## _mc03_ ## CPU; \
2354 c->PFX ## _pixels_tab[IDX][13] = PREFIX ## PFX ## SIZE ## _mc13_ ## CPU; \
2355 c->PFX ## _pixels_tab[IDX][14] = PREFIX ## PFX ## SIZE ## _mc23_ ## CPU; \
2356 c->PFX ## _pixels_tab[IDX][15] = PREFIX ## PFX ## SIZE ## _mc33_ ## CPU; \
2357 } while (0)
2358
2359 #define SET_HPEL_FUNCS(PFX, IDX, SIZE, CPU) \
2360 do { \
2361 c->PFX ## _pixels_tab[IDX][0] = PFX ## _pixels ## SIZE ## _ ## CPU; \
2362 c->PFX ## _pixels_tab[IDX][1] = PFX ## _pixels ## SIZE ## _x2_ ## CPU; \
2363 c->PFX ## _pixels_tab[IDX][2] = PFX ## _pixels ## SIZE ## _y2_ ## CPU; \
2364 c->PFX ## _pixels_tab[IDX][3] = PFX ## _pixels ## SIZE ## _xy2_ ## CPU;\
2365 } while (0)
2366
2367 #define H264_QPEL_FUNCS(x, y, CPU) \
2368 do { \
2369 c->put_h264_qpel_pixels_tab[0][x+y*4] = put_h264_qpel16_mc##x##y##_##CPU; \
2370 c->put_h264_qpel_pixels_tab[1][x+y*4] = put_h264_qpel8_mc##x##y##_##CPU; \
2371 c->avg_h264_qpel_pixels_tab[0][x+y*4] = avg_h264_qpel16_mc##x##y##_##CPU; \
2372 c->avg_h264_qpel_pixels_tab[1][x+y*4] = avg_h264_qpel8_mc##x##y##_##CPU; \
2373 } while (0)
2374
2375 #define H264_QPEL_FUNCS_10(x, y, CPU) \
2376 do { \
2377 c->put_h264_qpel_pixels_tab[0][x+y*4] = ff_put_h264_qpel16_mc##x##y##_10_##CPU; \
2378 c->put_h264_qpel_pixels_tab[1][x+y*4] = ff_put_h264_qpel8_mc##x##y##_10_##CPU; \
2379 c->avg_h264_qpel_pixels_tab[0][x+y*4] = ff_avg_h264_qpel16_mc##x##y##_10_##CPU; \
2380 c->avg_h264_qpel_pixels_tab[1][x+y*4] = ff_avg_h264_qpel8_mc##x##y##_10_##CPU; \
2381 } while (0)
2382
2383 static void dsputil_init_mmx(DSPContext *c, AVCodecContext *avctx, int mm_flags)
2384 {
2385 const int high_bit_depth = avctx->bits_per_raw_sample > 8;
2386
2387 c->put_pixels_clamped = ff_put_pixels_clamped_mmx;
2388 c->put_signed_pixels_clamped = ff_put_signed_pixels_clamped_mmx;
2389 c->add_pixels_clamped = ff_add_pixels_clamped_mmx;
2390
2391 if (!high_bit_depth) {
2392 c->clear_block = clear_block_mmx;
2393 c->clear_blocks = clear_blocks_mmx;
2394 c->draw_edges = draw_edges_mmx;
2395
2396 SET_HPEL_FUNCS(put, 0, 16, mmx);
2397 SET_HPEL_FUNCS(put_no_rnd, 0, 16, mmx);
2398 SET_HPEL_FUNCS(avg, 0, 16, mmx);
2399 SET_HPEL_FUNCS(avg_no_rnd, 0, 16, mmx);
2400 SET_HPEL_FUNCS(put, 1, 8, mmx);
2401 SET_HPEL_FUNCS(put_no_rnd, 1, 8, mmx);
2402 SET_HPEL_FUNCS(avg, 1, 8, mmx);
2403 SET_HPEL_FUNCS(avg_no_rnd, 1, 8, mmx);
2404 }
2405
2406 #if ARCH_X86_32 || !HAVE_YASM
2407 c->gmc= gmc_mmx;
2408 #endif
2409 #if ARCH_X86_32 && HAVE_YASM
2410 if (!high_bit_depth)
2411 c->emulated_edge_mc = emulated_edge_mc_mmx;
2412 #endif
2413
2414 c->add_bytes = add_bytes_mmx;
2415
2416 if (CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
2417 c->h263_v_loop_filter = h263_v_loop_filter_mmx;
2418 c->h263_h_loop_filter = h263_h_loop_filter_mmx;
2419 }
2420
2421 #if HAVE_YASM
2422 if (!high_bit_depth && CONFIG_H264CHROMA) {
2423 c->put_h264_chroma_pixels_tab[0] = ff_put_h264_chroma_mc8_mmx_rnd;
2424 c->put_h264_chroma_pixels_tab[1] = ff_put_h264_chroma_mc4_mmx;
2425 }
2426
2427 c->vector_clip_int32 = ff_vector_clip_int32_mmx;
2428 #endif
2429
2430 }
2431
2432 static void dsputil_init_mmx2(DSPContext *c, AVCodecContext *avctx,
2433 int mm_flags)
2434 {
2435 const int bit_depth = avctx->bits_per_raw_sample;
2436 const int high_bit_depth = bit_depth > 8;
2437
2438 c->prefetch = prefetch_mmx2;
2439
2440 if (!high_bit_depth) {
2441 c->put_pixels_tab[0][1] = put_pixels16_x2_mmx2;
2442 c->put_pixels_tab[0][2] = put_pixels16_y2_mmx2;
2443
2444 c->avg_pixels_tab[0][0] = avg_pixels16_mmx2;
2445 c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx2;
2446 c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx2;
2447
2448 c->put_pixels_tab[1][1] = put_pixels8_x2_mmx2;
2449 c->put_pixels_tab[1][2] = put_pixels8_y2_mmx2;
2450
2451 c->avg_pixels_tab[1][0] = avg_pixels8_mmx2;
2452 c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx2;
2453 c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx2;
2454 }
2455
2456 if (!(avctx->flags & CODEC_FLAG_BITEXACT)) {
2457 if (!high_bit_depth) {
2458 c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx2;
2459 c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx2;
2460 c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx2;
2461 c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx2;
2462
2463 c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx2;
2464 c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx2;
2465 }
2466
2467 if (CONFIG_VP3_DECODER && HAVE_YASM) {
2468 c->vp3_v_loop_filter = ff_vp3_v_loop_filter_mmx2;
2469 c->vp3_h_loop_filter = ff_vp3_h_loop_filter_mmx2;
2470 }
2471 }
2472 if (CONFIG_VP3_DECODER && HAVE_YASM) {
2473 c->vp3_idct_dc_add = ff_vp3_idct_dc_add_mmx2;
2474 }
2475
2476 if (CONFIG_VP3_DECODER
2477 && (avctx->codec_id == CODEC_ID_VP3 || avctx->codec_id == CODEC_ID_THEORA)) {
2478 c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_exact_mmx2;
2479 c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_exact_mmx2;
2480 }
2481
2482 if (CONFIG_H264QPEL) {
2483 SET_QPEL_FUNCS(put_qpel, 0, 16, mmx2, );
2484 SET_QPEL_FUNCS(put_qpel, 1, 8, mmx2, );
2485 SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, mmx2, );
2486 SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, mmx2, );
2487 SET_QPEL_FUNCS(avg_qpel, 0, 16, mmx2, );
2488 SET_QPEL_FUNCS(avg_qpel, 1, 8, mmx2, );
2489
2490 if (!high_bit_depth) {
2491 SET_QPEL_FUNCS(put_h264_qpel, 0, 16, mmx2, );
2492 SET_QPEL_FUNCS(put_h264_qpel, 1, 8, mmx2, );
2493 SET_QPEL_FUNCS(put_h264_qpel, 2, 4, mmx2, );
2494 SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, mmx2, );
2495 SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, mmx2, );
2496 SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, mmx2, );
2497 } else if (bit_depth == 10) {
2498 #if HAVE_YASM
2499 #if !ARCH_X86_64
2500 SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 10_mmxext, ff_);
2501 SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 10_mmxext, ff_);
2502 SET_QPEL_FUNCS(put_h264_qpel, 1, 8, 10_mmxext, ff_);
2503 SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, 10_mmxext, ff_);
2504 #endif
2505 SET_QPEL_FUNCS(put_h264_qpel, 2, 4, 10_mmxext, ff_);
2506 SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, 10_mmxext, ff_);
2507 #endif
2508 }
2509
2510 SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, mmx2, );
2511 SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, mmx2, );
2512 SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, mmx2, );
2513 SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, mmx2, );
2514 }
2515
2516 #if HAVE_YASM
2517 if (!high_bit_depth && CONFIG_H264CHROMA) {
2518 c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_mmx2_rnd;
2519 c->avg_h264_chroma_pixels_tab[1] = ff_avg_h264_chroma_mc4_mmx2;
2520 c->avg_h264_chroma_pixels_tab[2] = ff_avg_h264_chroma_mc2_mmx2;
2521 c->put_h264_chroma_pixels_tab[2] = ff_put_h264_chroma_mc2_mmx2;
2522 }
2523 if (bit_depth == 10 && CONFIG_H264CHROMA) {
2524 c->put_h264_chroma_pixels_tab[2] = ff_put_h264_chroma_mc2_10_mmxext;
2525 c->avg_h264_chroma_pixels_tab[2] = ff_avg_h264_chroma_mc2_10_mmxext;
2526 c->put_h264_chroma_pixels_tab[1] = ff_put_h264_chroma_mc4_10_mmxext;
2527 c->avg_h264_chroma_pixels_tab[1] = ff_avg_h264_chroma_mc4_10_mmxext;
2528 }
2529
2530 c->add_hfyu_median_prediction = ff_add_hfyu_median_prediction_mmx2;
2531
2532 c->scalarproduct_int16 = ff_scalarproduct_int16_mmx2;
2533 c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_mmx2;
2534
2535 if (avctx->flags & CODEC_FLAG_BITEXACT) {
2536 c->apply_window_int16 = ff_apply_window_int16_mmxext_ba;
2537 } else {
2538 c->apply_window_int16 = ff_apply_window_int16_mmxext;
2539 }
2540 #endif
2541 }
2542
2543 static void dsputil_init_3dnow(DSPContext *c, AVCodecContext *avctx,
2544 int mm_flags)
2545 {
2546 const int high_bit_depth = avctx->bits_per_raw_sample > 8;
2547
2548 c->prefetch = prefetch_3dnow;
2549
2550 if (!high_bit_depth) {
2551 c->put_pixels_tab[0][1] = put_pixels16_x2_3dnow;
2552 c->put_pixels_tab[0][2] = put_pixels16_y2_3dnow;
2553
2554 c->avg_pixels_tab[0][0] = avg_pixels16_3dnow;
2555 c->avg_pixels_tab[0][1] = avg_pixels16_x2_3dnow;
2556 c->avg_pixels_tab[0][2] = avg_pixels16_y2_3dnow;
2557
2558 c->put_pixels_tab[1][1] = put_pixels8_x2_3dnow;
2559 c->put_pixels_tab[1][2] = put_pixels8_y2_3dnow;
2560
2561 c->avg_pixels_tab[1][0] = avg_pixels8_3dnow;
2562 c->avg_pixels_tab[1][1] = avg_pixels8_x2_3dnow;
2563 c->avg_pixels_tab[1][2] = avg_pixels8_y2_3dnow;
2564
2565 if (!(avctx->flags & CODEC_FLAG_BITEXACT)){
2566 c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_3dnow;
2567 c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_3dnow;
2568 c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_3dnow;
2569 c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_3dnow;
2570
2571 c->avg_pixels_tab[0][3] = avg_pixels16_xy2_3dnow;
2572 c->avg_pixels_tab[1][3] = avg_pixels8_xy2_3dnow;
2573 }
2574 }
2575
2576 if (CONFIG_VP3_DECODER
2577 && (avctx->codec_id == CODEC_ID_VP3 || avctx->codec_id == CODEC_ID_THEORA)) {
2578 c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_exact_3dnow;
2579 c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_exact_3dnow;
2580 }
2581
2582 if (CONFIG_H264QPEL) {
2583 SET_QPEL_FUNCS(put_qpel, 0, 16, 3dnow, );
2584 SET_QPEL_FUNCS(put_qpel, 1, 8, 3dnow, );
2585 SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, 3dnow, );
2586 SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, 3dnow, );
2587 SET_QPEL_FUNCS(avg_qpel, 0, 16, 3dnow, );
2588 SET_QPEL_FUNCS(avg_qpel, 1, 8, 3dnow, );
2589
2590 if (!high_bit_depth) {
2591 SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 3dnow, );
2592 SET_QPEL_FUNCS(put_h264_qpel, 1, 8, 3dnow, );
2593 SET_QPEL_FUNCS(put_h264_qpel, 2, 4, 3dnow, );
2594 SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 3dnow, );
2595 SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, 3dnow, );
2596 SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, 3dnow, );
2597 }
2598
2599 SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, 3dnow, );
2600 SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, 3dnow, );
2601 SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, 3dnow, );
2602 SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, 3dnow, );
2603 }
2604
2605 #if HAVE_YASM
2606 if (!high_bit_depth && CONFIG_H264CHROMA) {
2607 c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_3dnow_rnd;
2608 c->avg_h264_chroma_pixels_tab[1] = ff_avg_h264_chroma_mc4_3dnow;
2609 }
2610 #endif
2611
2612 c->vorbis_inverse_coupling = vorbis_inverse_coupling_3dnow;
2613 c->vector_fmul = vector_fmul_3dnow;
2614 c->vector_fmul_add = vector_fmul_add_3dnow;
2615
2616 #if HAVE_7REGS
2617 c->add_hfyu_median_prediction = add_hfyu_median_prediction_cmov;
2618 #endif
2619 }
2620
2621 static void dsputil_init_3dnow2(DSPContext *c, AVCodecContext *avctx,
2622 int mm_flags)
2623 {
2624 c->vector_fmul_reverse = vector_fmul_reverse_3dnow2;
2625 #if HAVE_6REGS
2626 c->vector_fmul_window = vector_fmul_window_3dnow2;
2627 #endif
2628 }
2629
2630 static void dsputil_init_sse(DSPContext *c, AVCodecContext *avctx, int mm_flags)
2631 {
2632 const int high_bit_depth = avctx->bits_per_raw_sample > 8;
2633
2634 if (!high_bit_depth) {
2635 if (!(CONFIG_MPEG_XVMC_DECODER && avctx->xvmc_acceleration > 1)){
2636 /* XvMCCreateBlocks() may not allocate 16-byte aligned blocks */
2637 c->clear_block = clear_block_sse;
2638 c->clear_blocks = clear_blocks_sse;
2639 }
2640 }
2641
2642 c->vorbis_inverse_coupling = vorbis_inverse_coupling_sse;
2643 c->ac3_downmix = ac3_downmix_sse;
2644 c->vector_fmul = vector_fmul_sse;
2645 c->vector_fmul_reverse = vector_fmul_reverse_sse;
2646
2647 if (!(mm_flags & AV_CPU_FLAG_3DNOW))
2648 c->vector_fmul_add = vector_fmul_add_sse;
2649
2650 #if HAVE_6REGS
2651 c->vector_fmul_window = vector_fmul_window_sse;
2652 #endif
2653
2654 c->vector_clipf = vector_clipf_sse;
2655
2656 #if HAVE_YASM
2657 c->scalarproduct_float = ff_scalarproduct_float_sse;
2658 c->butterflies_float_interleave = ff_butterflies_float_interleave_sse;
2659
2660 if (!high_bit_depth)
2661 c->emulated_edge_mc = emulated_edge_mc_sse;
2662 c->gmc = gmc_sse;
2663 #endif
2664 }
2665
2666 static void dsputil_init_sse2(DSPContext *c, AVCodecContext *avctx,
2667 int mm_flags)
2668 {
2669 const int bit_depth = avctx->bits_per_raw_sample;
2670 const int high_bit_depth = bit_depth > 8;
2671
2672 if (mm_flags & AV_CPU_FLAG_3DNOW) {
2673 // these functions are slower than mmx on AMD, but faster on Intel
2674 if (!high_bit_depth) {
2675 c->put_pixels_tab[0][0] = put_pixels16_sse2;
2676 c->put_no_rnd_pixels_tab[0][0] = put_pixels16_sse2;
2677 c->avg_pixels_tab[0][0] = avg_pixels16_sse2;
2678 if (CONFIG_H264QPEL)
2679 H264_QPEL_FUNCS(0, 0, sse2);
2680 }
2681 }
2682
2683 if (!high_bit_depth && CONFIG_H264QPEL) {
2684 H264_QPEL_FUNCS(0, 1, sse2);
2685 H264_QPEL_FUNCS(0, 2, sse2);
2686 H264_QPEL_FUNCS(0, 3, sse2);
2687 H264_QPEL_FUNCS(1, 1, sse2);
2688 H264_QPEL_FUNCS(1, 2, sse2);
2689 H264_QPEL_FUNCS(1, 3, sse2);
2690 H264_QPEL_FUNCS(2, 1, sse2);
2691 H264_QPEL_FUNCS(2, 2, sse2);
2692 H264_QPEL_FUNCS(2, 3, sse2);
2693 H264_QPEL_FUNCS(3, 1, sse2);
2694 H264_QPEL_FUNCS(3, 2, sse2);
2695 H264_QPEL_FUNCS(3, 3, sse2);
2696 }
2697
2698 #if HAVE_YASM
2699 if (bit_depth == 10) {
2700 if (CONFIG_H264QPEL) {
2701 SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 10_sse2, ff_);
2702 SET_QPEL_FUNCS(put_h264_qpel, 1, 8, 10_sse2, ff_);
2703 SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 10_sse2, ff_);
2704 SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, 10_sse2, ff_);
2705 H264_QPEL_FUNCS_10(1, 0, sse2_cache64);
2706 H264_QPEL_FUNCS_10(2, 0, sse2_cache64);
2707 H264_QPEL_FUNCS_10(3, 0, sse2_cache64);
2708 }
2709 if (CONFIG_H264CHROMA) {
2710 c->put_h264_chroma_pixels_tab[0] = ff_put_h264_chroma_mc8_10_sse2;
2711 c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_10_sse2;
2712 }
2713 }
2714
2715 c->scalarproduct_int16 = ff_scalarproduct_int16_sse2;
2716 c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_sse2;
2717 if (mm_flags & AV_CPU_FLAG_ATOM) {
2718 c->vector_clip_int32 = ff_vector_clip_int32_int_sse2;
2719 } else {
2720 c->vector_clip_int32 = ff_vector_clip_int32_sse2;
2721 }
2722 if (avctx->flags & CODEC_FLAG_BITEXACT) {
2723 c->apply_window_int16 = ff_apply_window_int16_sse2_ba;
2724 } else if (!(mm_flags & AV_CPU_FLAG_SSE2SLOW)) {
2725 c->apply_window_int16 = ff_apply_window_int16_sse2;
2726 }
2727 c->bswap_buf = ff_bswap32_buf_sse2;
2728 #endif
2729 }
2730
2731 static void dsputil_init_ssse3(DSPContext *c, AVCodecContext *avctx,
2732 int mm_flags)
2733 {
2734 #if HAVE_SSSE3
2735 const int high_bit_depth = avctx->bits_per_raw_sample > 8;
2736 const int bit_depth = avctx->bits_per_raw_sample;
2737
2738 if (!high_bit_depth && CONFIG_H264QPEL) {
2739 H264_QPEL_FUNCS(1, 0, ssse3);
2740 H264_QPEL_FUNCS(1, 1, ssse3);
2741 H264_QPEL_FUNCS(1, 2, ssse3);
2742 H264_QPEL_FUNCS(1, 3, ssse3);
2743 H264_QPEL_FUNCS(2, 0, ssse3);
2744 H264_QPEL_FUNCS(2, 1, ssse3);
2745 H264_QPEL_FUNCS(2, 2, ssse3);
2746 H264_QPEL_FUNCS(2, 3, ssse3);
2747 H264_QPEL_FUNCS(3, 0, ssse3);
2748 H264_QPEL_FUNCS(3, 1, ssse3);
2749 H264_QPEL_FUNCS(3, 2, ssse3);
2750 H264_QPEL_FUNCS(3, 3, ssse3);
2751 }
2752 #if HAVE_YASM
2753 else if (bit_depth == 10 && CONFIG_H264QPEL) {
2754 H264_QPEL_FUNCS_10(1, 0, ssse3_cache64);
2755 H264_QPEL_FUNCS_10(2, 0, ssse3_cache64);
2756 H264_QPEL_FUNCS_10(3, 0, ssse3_cache64);
2757 }
2758 if (!high_bit_depth && CONFIG_H264CHROMA) {
2759 c->put_h264_chroma_pixels_tab[0] = ff_put_h264_chroma_mc8_ssse3_rnd;
2760 c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_ssse3_rnd;
2761 c->put_h264_chroma_pixels_tab[1] = ff_put_h264_chroma_mc4_ssse3;
2762 c->avg_h264_chroma_pixels_tab[1] = ff_avg_h264_chroma_mc4_ssse3;
2763 }
2764 c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_ssse3;
2765 if (mm_flags & AV_CPU_FLAG_SSE4) // not really sse4, just slow on Conroe
2766 c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_sse4;
2767
2768 if (mm_flags & AV_CPU_FLAG_ATOM) {
2769 c->apply_window_int16 = ff_apply_window_int16_ssse3_atom;
2770 } else {
2771 c->apply_window_int16 = ff_apply_window_int16_ssse3;
2772 }
2773 if (!(mm_flags & (AV_CPU_FLAG_SSE42|AV_CPU_FLAG_3DNOW))) { // cachesplit
2774 c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_ssse3;
2775 }
2776 c->bswap_buf = ff_bswap32_buf_ssse3;
2777 #endif
2778 #endif
2779 }
2780
2781 static void dsputil_init_sse4(DSPContext *c, AVCodecContext *avctx,
2782 int mm_flags)
2783 {
2784 #if HAVE_YASM
2785 c->vector_clip_int32 = ff_vector_clip_int32_sse4;
2786 #endif
2787 }
2788
2789 static void dsputil_init_avx(DSPContext *c, AVCodecContext *avctx, int mm_flags)
2790 {
2791 #if HAVE_AVX && HAVE_YASM
2792 const int bit_depth = avctx->bits_per_raw_sample;
2793
2794 if (bit_depth == 10) {
2795 // AVX implies !cache64.
2796 // TODO: Port cache(32|64) detection from x264.
2797 if (CONFIG_H264QPEL) {
2798 H264_QPEL_FUNCS_10(1, 0, sse2);
2799 H264_QPEL_FUNCS_10(2, 0, sse2);
2800 H264_QPEL_FUNCS_10(3, 0, sse2);
2801 }
2802
2803 if (CONFIG_H264CHROMA) {
2804 c->put_h264_chroma_pixels_tab[0] = ff_put_h264_chroma_mc8_10_avx;
2805 c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_10_avx;
2806 }
2807 }
2808 c->butterflies_float_interleave = ff_butterflies_float_interleave_avx;
2809 #endif
2810 }
2811
2812 void ff_dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
2813 {
2814 int mm_flags = av_get_cpu_flags();
2815
2816 if (avctx->dsp_mask) {
2817 if (avctx->dsp_mask & AV_CPU_FLAG_FORCE)
2818 mm_flags |= (avctx->dsp_mask & 0xffff);
2819 else
2820 mm_flags &= ~(avctx->dsp_mask & 0xffff);
2821 }
2822
2823 #if 0
2824 av_log(avctx, AV_LOG_INFO, "libavcodec: CPU flags:");
2825 if (mm_flags & AV_CPU_FLAG_MMX)
2826 av_log(avctx, AV_LOG_INFO, " mmx");
2827 if (mm_flags & AV_CPU_FLAG_MMX2)
2828 av_log(avctx, AV_LOG_INFO, " mmx2");
2829 if (mm_flags & AV_CPU_FLAG_3DNOW)
2830 av_log(avctx, AV_LOG_INFO, " 3dnow");
2831 if (mm_flags & AV_CPU_FLAG_SSE)
2832 av_log(avctx, AV_LOG_INFO, " sse");
2833 if (mm_flags & AV_CPU_FLAG_SSE2)
2834 av_log(avctx, AV_LOG_INFO, " sse2");
2835 av_log(avctx, AV_LOG_INFO, "\n");
2836 #endif
2837
2838 if (mm_flags & AV_CPU_FLAG_MMX) {
2839 const int idct_algo= avctx->idct_algo;
2840
2841 if (avctx->lowres == 0 && avctx->bits_per_raw_sample <= 8) {
2842 if(idct_algo==FF_IDCT_AUTO || idct_algo==FF_IDCT_SIMPLEMMX){
2843 c->idct_put= ff_simple_idct_put_mmx;
2844 c->idct_add= ff_simple_idct_add_mmx;
2845 c->idct = ff_simple_idct_mmx;
2846 c->idct_permutation_type= FF_SIMPLE_IDCT_PERM;
2847 #if CONFIG_GPL
2848 }else if(idct_algo==FF_IDCT_LIBMPEG2MMX){
2849 if(mm_flags & AV_CPU_FLAG_MMX2){