relicense h264 deblock sse2 to lgpl
[libav.git] / libavcodec / x86 / dsputil_mmx.c
1 /*
2 * MMX optimized DSP utils
3 * Copyright (c) 2000, 2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5 *
6 * This file is part of FFmpeg.
7 *
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 *
22 * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
23 */
24
25 #include "libavutil/x86_cpu.h"
26 #include "libavcodec/dsputil.h"
27 #include "libavcodec/h264dsp.h"
28 #include "libavcodec/mpegvideo.h"
29 #include "libavcodec/simple_idct.h"
30 #include "dsputil_mmx.h"
31 #include "vp3dsp_mmx.h"
32 #include "vp3dsp_sse2.h"
33 #include "vp6dsp_mmx.h"
34 #include "vp6dsp_sse2.h"
35 #include "idct_xvid.h"
36
37 //#undef NDEBUG
38 //#include <assert.h>
39
40 int mm_flags; /* multimedia extension flags */
41
42 /* pixel operations */
43 DECLARE_ALIGNED(8, const uint64_t, ff_bone) = 0x0101010101010101ULL;
44 DECLARE_ALIGNED(8, const uint64_t, ff_wtwo) = 0x0002000200020002ULL;
45
46 DECLARE_ALIGNED(16, const uint64_t, ff_pdw_80000000)[2] =
47 {0x8000000080000000ULL, 0x8000000080000000ULL};
48
49 DECLARE_ALIGNED(8, const uint64_t, ff_pw_3 ) = 0x0003000300030003ULL;
50 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_4 ) = {0x0004000400040004ULL, 0x0004000400040004ULL};
51 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_5 ) = {0x0005000500050005ULL, 0x0005000500050005ULL};
52 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_8 ) = {0x0008000800080008ULL, 0x0008000800080008ULL};
53 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_9 ) = {0x0009000900090009ULL, 0x0009000900090009ULL};
54 DECLARE_ALIGNED(8, const uint64_t, ff_pw_15 ) = 0x000F000F000F000FULL;
55 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_16 ) = {0x0010001000100010ULL, 0x0010001000100010ULL};
56 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_18 ) = {0x0012001200120012ULL, 0x0012001200120012ULL};
57 DECLARE_ALIGNED(8, const uint64_t, ff_pw_20 ) = 0x0014001400140014ULL;
58 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_27 ) = {0x001B001B001B001BULL, 0x001B001B001B001BULL};
59 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_28 ) = {0x001C001C001C001CULL, 0x001C001C001C001CULL};
60 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_32 ) = {0x0020002000200020ULL, 0x0020002000200020ULL};
61 DECLARE_ALIGNED(8, const uint64_t, ff_pw_42 ) = 0x002A002A002A002AULL;
62 DECLARE_ALIGNED(8, const uint64_t, ff_pw_53 ) = 0x0035003500350035ULL;
63 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_63 ) = {0x003F003F003F003FULL, 0x003F003F003F003FULL};
64 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_64 ) = {0x0040004000400040ULL, 0x0040004000400040ULL};
65 DECLARE_ALIGNED(8, const uint64_t, ff_pw_96 ) = 0x0060006000600060ULL;
66 DECLARE_ALIGNED(8, const uint64_t, ff_pw_128) = 0x0080008000800080ULL;
67 DECLARE_ALIGNED(8, const uint64_t, ff_pw_255) = 0x00ff00ff00ff00ffULL;
68
69 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_1 ) = {0x0101010101010101ULL, 0x0101010101010101ULL};
70 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_3 ) = {0x0303030303030303ULL, 0x0303030303030303ULL};
71 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_4 ) = {0x0404040404040404ULL, 0x0404040404040404ULL};
72 DECLARE_ALIGNED(8, const uint64_t, ff_pb_7 ) = 0x0707070707070707ULL;
73 DECLARE_ALIGNED(8, const uint64_t, ff_pb_1F ) = 0x1F1F1F1F1F1F1F1FULL;
74 DECLARE_ALIGNED(8, const uint64_t, ff_pb_3F ) = 0x3F3F3F3F3F3F3F3FULL;
75 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_80 ) = {0x8080808080808080ULL, 0x8080808080808080ULL};
76 DECLARE_ALIGNED(8, const uint64_t, ff_pb_81 ) = 0x8181818181818181ULL;
77 DECLARE_ALIGNED(8, const uint64_t, ff_pb_A1 ) = 0xA1A1A1A1A1A1A1A1ULL;
78 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_F8 ) = {0xF8F8F8F8F8F8F8F8ULL, 0xF8F8F8F8F8F8F8F8ULL};
79 DECLARE_ALIGNED(8, const uint64_t, ff_pb_FC ) = 0xFCFCFCFCFCFCFCFCULL;
80 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_FE ) = {0xFEFEFEFEFEFEFEFEULL, 0xFEFEFEFEFEFEFEFEULL};
81
82 DECLARE_ALIGNED(16, const double, ff_pd_1)[2] = { 1.0, 1.0 };
83 DECLARE_ALIGNED(16, const double, ff_pd_2)[2] = { 2.0, 2.0 };
84
85 #define JUMPALIGN() __asm__ volatile (ASMALIGN(3)::)
86 #define MOVQ_ZERO(regd) __asm__ volatile ("pxor %%" #regd ", %%" #regd ::)
87
88 #define MOVQ_BFE(regd) \
89 __asm__ volatile ( \
90 "pcmpeqd %%" #regd ", %%" #regd " \n\t"\
91 "paddb %%" #regd ", %%" #regd " \n\t" ::)
92
93 #ifndef PIC
94 #define MOVQ_BONE(regd) __asm__ volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_bone))
95 #define MOVQ_WTWO(regd) __asm__ volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_wtwo))
96 #else
97 // for shared library it's better to use this way for accessing constants
98 // pcmpeqd -> -1
99 #define MOVQ_BONE(regd) \
100 __asm__ volatile ( \
101 "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
102 "psrlw $15, %%" #regd " \n\t" \
103 "packuswb %%" #regd ", %%" #regd " \n\t" ::)
104
105 #define MOVQ_WTWO(regd) \
106 __asm__ volatile ( \
107 "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
108 "psrlw $15, %%" #regd " \n\t" \
109 "psllw $1, %%" #regd " \n\t"::)
110
111 #endif
112
113 // using regr as temporary and for the output result
114 // first argument is unmodifed and second is trashed
115 // regfe is supposed to contain 0xfefefefefefefefe
116 #define PAVGB_MMX_NO_RND(rega, regb, regr, regfe) \
117 "movq " #rega ", " #regr " \n\t"\
118 "pand " #regb ", " #regr " \n\t"\
119 "pxor " #rega ", " #regb " \n\t"\
120 "pand " #regfe "," #regb " \n\t"\
121 "psrlq $1, " #regb " \n\t"\
122 "paddb " #regb ", " #regr " \n\t"
123
124 #define PAVGB_MMX(rega, regb, regr, regfe) \
125 "movq " #rega ", " #regr " \n\t"\
126 "por " #regb ", " #regr " \n\t"\
127 "pxor " #rega ", " #regb " \n\t"\
128 "pand " #regfe "," #regb " \n\t"\
129 "psrlq $1, " #regb " \n\t"\
130 "psubb " #regb ", " #regr " \n\t"
131
132 // mm6 is supposed to contain 0xfefefefefefefefe
133 #define PAVGBP_MMX_NO_RND(rega, regb, regr, regc, regd, regp) \
134 "movq " #rega ", " #regr " \n\t"\
135 "movq " #regc ", " #regp " \n\t"\
136 "pand " #regb ", " #regr " \n\t"\
137 "pand " #regd ", " #regp " \n\t"\
138 "pxor " #rega ", " #regb " \n\t"\
139 "pxor " #regc ", " #regd " \n\t"\
140 "pand %%mm6, " #regb " \n\t"\
141 "pand %%mm6, " #regd " \n\t"\
142 "psrlq $1, " #regb " \n\t"\
143 "psrlq $1, " #regd " \n\t"\
144 "paddb " #regb ", " #regr " \n\t"\
145 "paddb " #regd ", " #regp " \n\t"
146
147 #define PAVGBP_MMX(rega, regb, regr, regc, regd, regp) \
148 "movq " #rega ", " #regr " \n\t"\
149 "movq " #regc ", " #regp " \n\t"\
150 "por " #regb ", " #regr " \n\t"\
151 "por " #regd ", " #regp " \n\t"\
152 "pxor " #rega ", " #regb " \n\t"\
153 "pxor " #regc ", " #regd " \n\t"\
154 "pand %%mm6, " #regb " \n\t"\
155 "pand %%mm6, " #regd " \n\t"\
156 "psrlq $1, " #regd " \n\t"\
157 "psrlq $1, " #regb " \n\t"\
158 "psubb " #regb ", " #regr " \n\t"\
159 "psubb " #regd ", " #regp " \n\t"
160
161 /***********************************/
162 /* MMX no rounding */
163 #define DEF(x, y) x ## _no_rnd_ ## y ##_mmx
164 #define SET_RND MOVQ_WONE
165 #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX_NO_RND(a, b, c, d, e, f)
166 #define PAVGB(a, b, c, e) PAVGB_MMX_NO_RND(a, b, c, e)
167 #define OP_AVG(a, b, c, e) PAVGB_MMX(a, b, c, e)
168
169 #include "dsputil_mmx_rnd_template.c"
170
171 #undef DEF
172 #undef SET_RND
173 #undef PAVGBP
174 #undef PAVGB
175 /***********************************/
176 /* MMX rounding */
177
178 #define DEF(x, y) x ## _ ## y ##_mmx
179 #define SET_RND MOVQ_WTWO
180 #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX(a, b, c, d, e, f)
181 #define PAVGB(a, b, c, e) PAVGB_MMX(a, b, c, e)
182
183 #include "dsputil_mmx_rnd_template.c"
184
185 #undef DEF
186 #undef SET_RND
187 #undef PAVGBP
188 #undef PAVGB
189 #undef OP_AVG
190
191 /***********************************/
192 /* 3Dnow specific */
193
194 #define DEF(x) x ## _3dnow
195 #define PAVGB "pavgusb"
196 #define OP_AVG PAVGB
197
198 #include "dsputil_mmx_avg_template.c"
199
200 #undef DEF
201 #undef PAVGB
202 #undef OP_AVG
203
204 /***********************************/
205 /* MMX2 specific */
206
207 #define DEF(x) x ## _mmx2
208
209 /* Introduced only in MMX2 set */
210 #define PAVGB "pavgb"
211 #define OP_AVG PAVGB
212
213 #include "dsputil_mmx_avg_template.c"
214
215 #undef DEF
216 #undef PAVGB
217 #undef OP_AVG
218
219 #define put_no_rnd_pixels16_mmx put_pixels16_mmx
220 #define put_no_rnd_pixels8_mmx put_pixels8_mmx
221 #define put_pixels16_mmx2 put_pixels16_mmx
222 #define put_pixels8_mmx2 put_pixels8_mmx
223 #define put_pixels4_mmx2 put_pixels4_mmx
224 #define put_no_rnd_pixels16_mmx2 put_no_rnd_pixels16_mmx
225 #define put_no_rnd_pixels8_mmx2 put_no_rnd_pixels8_mmx
226 #define put_pixels16_3dnow put_pixels16_mmx
227 #define put_pixels8_3dnow put_pixels8_mmx
228 #define put_pixels4_3dnow put_pixels4_mmx
229 #define put_no_rnd_pixels16_3dnow put_no_rnd_pixels16_mmx
230 #define put_no_rnd_pixels8_3dnow put_no_rnd_pixels8_mmx
231
232 /***********************************/
233 /* standard MMX */
234
235 void put_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
236 {
237 const DCTELEM *p;
238 uint8_t *pix;
239
240 /* read the pixels */
241 p = block;
242 pix = pixels;
243 /* unrolled loop */
244 __asm__ volatile(
245 "movq %3, %%mm0 \n\t"
246 "movq 8%3, %%mm1 \n\t"
247 "movq 16%3, %%mm2 \n\t"
248 "movq 24%3, %%mm3 \n\t"
249 "movq 32%3, %%mm4 \n\t"
250 "movq 40%3, %%mm5 \n\t"
251 "movq 48%3, %%mm6 \n\t"
252 "movq 56%3, %%mm7 \n\t"
253 "packuswb %%mm1, %%mm0 \n\t"
254 "packuswb %%mm3, %%mm2 \n\t"
255 "packuswb %%mm5, %%mm4 \n\t"
256 "packuswb %%mm7, %%mm6 \n\t"
257 "movq %%mm0, (%0) \n\t"
258 "movq %%mm2, (%0, %1) \n\t"
259 "movq %%mm4, (%0, %1, 2) \n\t"
260 "movq %%mm6, (%0, %2) \n\t"
261 ::"r" (pix), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*3), "m"(*p)
262 :"memory");
263 pix += line_size*4;
264 p += 32;
265
266 // if here would be an exact copy of the code above
267 // compiler would generate some very strange code
268 // thus using "r"
269 __asm__ volatile(
270 "movq (%3), %%mm0 \n\t"
271 "movq 8(%3), %%mm1 \n\t"
272 "movq 16(%3), %%mm2 \n\t"
273 "movq 24(%3), %%mm3 \n\t"
274 "movq 32(%3), %%mm4 \n\t"
275 "movq 40(%3), %%mm5 \n\t"
276 "movq 48(%3), %%mm6 \n\t"
277 "movq 56(%3), %%mm7 \n\t"
278 "packuswb %%mm1, %%mm0 \n\t"
279 "packuswb %%mm3, %%mm2 \n\t"
280 "packuswb %%mm5, %%mm4 \n\t"
281 "packuswb %%mm7, %%mm6 \n\t"
282 "movq %%mm0, (%0) \n\t"
283 "movq %%mm2, (%0, %1) \n\t"
284 "movq %%mm4, (%0, %1, 2) \n\t"
285 "movq %%mm6, (%0, %2) \n\t"
286 ::"r" (pix), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*3), "r"(p)
287 :"memory");
288 }
289
290 DECLARE_ASM_CONST(8, uint8_t, ff_vector128)[8] =
291 { 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 };
292
293 #define put_signed_pixels_clamped_mmx_half(off) \
294 "movq "#off"(%2), %%mm1 \n\t"\
295 "movq 16+"#off"(%2), %%mm2 \n\t"\
296 "movq 32+"#off"(%2), %%mm3 \n\t"\
297 "movq 48+"#off"(%2), %%mm4 \n\t"\
298 "packsswb 8+"#off"(%2), %%mm1 \n\t"\
299 "packsswb 24+"#off"(%2), %%mm2 \n\t"\
300 "packsswb 40+"#off"(%2), %%mm3 \n\t"\
301 "packsswb 56+"#off"(%2), %%mm4 \n\t"\
302 "paddb %%mm0, %%mm1 \n\t"\
303 "paddb %%mm0, %%mm2 \n\t"\
304 "paddb %%mm0, %%mm3 \n\t"\
305 "paddb %%mm0, %%mm4 \n\t"\
306 "movq %%mm1, (%0) \n\t"\
307 "movq %%mm2, (%0, %3) \n\t"\
308 "movq %%mm3, (%0, %3, 2) \n\t"\
309 "movq %%mm4, (%0, %1) \n\t"
310
311 void put_signed_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
312 {
313 x86_reg line_skip = line_size;
314 x86_reg line_skip3;
315
316 __asm__ volatile (
317 "movq "MANGLE(ff_vector128)", %%mm0 \n\t"
318 "lea (%3, %3, 2), %1 \n\t"
319 put_signed_pixels_clamped_mmx_half(0)
320 "lea (%0, %3, 4), %0 \n\t"
321 put_signed_pixels_clamped_mmx_half(64)
322 :"+&r" (pixels), "=&r" (line_skip3)
323 :"r" (block), "r"(line_skip)
324 :"memory");
325 }
326
327 void add_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
328 {
329 const DCTELEM *p;
330 uint8_t *pix;
331 int i;
332
333 /* read the pixels */
334 p = block;
335 pix = pixels;
336 MOVQ_ZERO(mm7);
337 i = 4;
338 do {
339 __asm__ volatile(
340 "movq (%2), %%mm0 \n\t"
341 "movq 8(%2), %%mm1 \n\t"
342 "movq 16(%2), %%mm2 \n\t"
343 "movq 24(%2), %%mm3 \n\t"
344 "movq %0, %%mm4 \n\t"
345 "movq %1, %%mm6 \n\t"
346 "movq %%mm4, %%mm5 \n\t"
347 "punpcklbw %%mm7, %%mm4 \n\t"
348 "punpckhbw %%mm7, %%mm5 \n\t"
349 "paddsw %%mm4, %%mm0 \n\t"
350 "paddsw %%mm5, %%mm1 \n\t"
351 "movq %%mm6, %%mm5 \n\t"
352 "punpcklbw %%mm7, %%mm6 \n\t"
353 "punpckhbw %%mm7, %%mm5 \n\t"
354 "paddsw %%mm6, %%mm2 \n\t"
355 "paddsw %%mm5, %%mm3 \n\t"
356 "packuswb %%mm1, %%mm0 \n\t"
357 "packuswb %%mm3, %%mm2 \n\t"
358 "movq %%mm0, %0 \n\t"
359 "movq %%mm2, %1 \n\t"
360 :"+m"(*pix), "+m"(*(pix+line_size))
361 :"r"(p)
362 :"memory");
363 pix += line_size*2;
364 p += 16;
365 } while (--i);
366 }
367
368 static void put_pixels4_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
369 {
370 __asm__ volatile(
371 "lea (%3, %3), %%"REG_a" \n\t"
372 ASMALIGN(3)
373 "1: \n\t"
374 "movd (%1), %%mm0 \n\t"
375 "movd (%1, %3), %%mm1 \n\t"
376 "movd %%mm0, (%2) \n\t"
377 "movd %%mm1, (%2, %3) \n\t"
378 "add %%"REG_a", %1 \n\t"
379 "add %%"REG_a", %2 \n\t"
380 "movd (%1), %%mm0 \n\t"
381 "movd (%1, %3), %%mm1 \n\t"
382 "movd %%mm0, (%2) \n\t"
383 "movd %%mm1, (%2, %3) \n\t"
384 "add %%"REG_a", %1 \n\t"
385 "add %%"REG_a", %2 \n\t"
386 "subl $4, %0 \n\t"
387 "jnz 1b \n\t"
388 : "+g"(h), "+r" (pixels), "+r" (block)
389 : "r"((x86_reg)line_size)
390 : "%"REG_a, "memory"
391 );
392 }
393
394 static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
395 {
396 __asm__ volatile(
397 "lea (%3, %3), %%"REG_a" \n\t"
398 ASMALIGN(3)
399 "1: \n\t"
400 "movq (%1), %%mm0 \n\t"
401 "movq (%1, %3), %%mm1 \n\t"
402 "movq %%mm0, (%2) \n\t"
403 "movq %%mm1, (%2, %3) \n\t"
404 "add %%"REG_a", %1 \n\t"
405 "add %%"REG_a", %2 \n\t"
406 "movq (%1), %%mm0 \n\t"
407 "movq (%1, %3), %%mm1 \n\t"
408 "movq %%mm0, (%2) \n\t"
409 "movq %%mm1, (%2, %3) \n\t"
410 "add %%"REG_a", %1 \n\t"
411 "add %%"REG_a", %2 \n\t"
412 "subl $4, %0 \n\t"
413 "jnz 1b \n\t"
414 : "+g"(h), "+r" (pixels), "+r" (block)
415 : "r"((x86_reg)line_size)
416 : "%"REG_a, "memory"
417 );
418 }
419
420 static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
421 {
422 __asm__ volatile(
423 "lea (%3, %3), %%"REG_a" \n\t"
424 ASMALIGN(3)
425 "1: \n\t"
426 "movq (%1), %%mm0 \n\t"
427 "movq 8(%1), %%mm4 \n\t"
428 "movq (%1, %3), %%mm1 \n\t"
429 "movq 8(%1, %3), %%mm5 \n\t"
430 "movq %%mm0, (%2) \n\t"
431 "movq %%mm4, 8(%2) \n\t"
432 "movq %%mm1, (%2, %3) \n\t"
433 "movq %%mm5, 8(%2, %3) \n\t"
434 "add %%"REG_a", %1 \n\t"
435 "add %%"REG_a", %2 \n\t"
436 "movq (%1), %%mm0 \n\t"
437 "movq 8(%1), %%mm4 \n\t"
438 "movq (%1, %3), %%mm1 \n\t"
439 "movq 8(%1, %3), %%mm5 \n\t"
440 "movq %%mm0, (%2) \n\t"
441 "movq %%mm4, 8(%2) \n\t"
442 "movq %%mm1, (%2, %3) \n\t"
443 "movq %%mm5, 8(%2, %3) \n\t"
444 "add %%"REG_a", %1 \n\t"
445 "add %%"REG_a", %2 \n\t"
446 "subl $4, %0 \n\t"
447 "jnz 1b \n\t"
448 : "+g"(h), "+r" (pixels), "+r" (block)
449 : "r"((x86_reg)line_size)
450 : "%"REG_a, "memory"
451 );
452 }
453
454 static void put_pixels16_sse2(uint8_t *block, const uint8_t *pixels, int line_size, int h)
455 {
456 __asm__ volatile(
457 "1: \n\t"
458 "movdqu (%1), %%xmm0 \n\t"
459 "movdqu (%1,%3), %%xmm1 \n\t"
460 "movdqu (%1,%3,2), %%xmm2 \n\t"
461 "movdqu (%1,%4), %%xmm3 \n\t"
462 "movdqa %%xmm0, (%2) \n\t"
463 "movdqa %%xmm1, (%2,%3) \n\t"
464 "movdqa %%xmm2, (%2,%3,2) \n\t"
465 "movdqa %%xmm3, (%2,%4) \n\t"
466 "subl $4, %0 \n\t"
467 "lea (%1,%3,4), %1 \n\t"
468 "lea (%2,%3,4), %2 \n\t"
469 "jnz 1b \n\t"
470 : "+g"(h), "+r" (pixels), "+r" (block)
471 : "r"((x86_reg)line_size), "r"((x86_reg)3L*line_size)
472 : "memory"
473 );
474 }
475
476 static void avg_pixels16_sse2(uint8_t *block, const uint8_t *pixels, int line_size, int h)
477 {
478 __asm__ volatile(
479 "1: \n\t"
480 "movdqu (%1), %%xmm0 \n\t"
481 "movdqu (%1,%3), %%xmm1 \n\t"
482 "movdqu (%1,%3,2), %%xmm2 \n\t"
483 "movdqu (%1,%4), %%xmm3 \n\t"
484 "pavgb (%2), %%xmm0 \n\t"
485 "pavgb (%2,%3), %%xmm1 \n\t"
486 "pavgb (%2,%3,2), %%xmm2 \n\t"
487 "pavgb (%2,%4), %%xmm3 \n\t"
488 "movdqa %%xmm0, (%2) \n\t"
489 "movdqa %%xmm1, (%2,%3) \n\t"
490 "movdqa %%xmm2, (%2,%3,2) \n\t"
491 "movdqa %%xmm3, (%2,%4) \n\t"
492 "subl $4, %0 \n\t"
493 "lea (%1,%3,4), %1 \n\t"
494 "lea (%2,%3,4), %2 \n\t"
495 "jnz 1b \n\t"
496 : "+g"(h), "+r" (pixels), "+r" (block)
497 : "r"((x86_reg)line_size), "r"((x86_reg)3L*line_size)
498 : "memory"
499 );
500 }
501
502 #define CLEAR_BLOCKS(name,n) \
503 static void name(DCTELEM *blocks)\
504 {\
505 __asm__ volatile(\
506 "pxor %%mm7, %%mm7 \n\t"\
507 "mov %1, %%"REG_a" \n\t"\
508 "1: \n\t"\
509 "movq %%mm7, (%0, %%"REG_a") \n\t"\
510 "movq %%mm7, 8(%0, %%"REG_a") \n\t"\
511 "movq %%mm7, 16(%0, %%"REG_a") \n\t"\
512 "movq %%mm7, 24(%0, %%"REG_a") \n\t"\
513 "add $32, %%"REG_a" \n\t"\
514 " js 1b \n\t"\
515 : : "r" (((uint8_t *)blocks)+128*n),\
516 "i" (-128*n)\
517 : "%"REG_a\
518 );\
519 }
520 CLEAR_BLOCKS(clear_blocks_mmx, 6)
521 CLEAR_BLOCKS(clear_block_mmx, 1)
522
523 static void clear_block_sse(DCTELEM *block)
524 {
525 __asm__ volatile(
526 "xorps %%xmm0, %%xmm0 \n"
527 "movaps %%xmm0, (%0) \n"
528 "movaps %%xmm0, 16(%0) \n"
529 "movaps %%xmm0, 32(%0) \n"
530 "movaps %%xmm0, 48(%0) \n"
531 "movaps %%xmm0, 64(%0) \n"
532 "movaps %%xmm0, 80(%0) \n"
533 "movaps %%xmm0, 96(%0) \n"
534 "movaps %%xmm0, 112(%0) \n"
535 :: "r"(block)
536 : "memory"
537 );
538 }
539
540 static void clear_blocks_sse(DCTELEM *blocks)
541 {\
542 __asm__ volatile(
543 "xorps %%xmm0, %%xmm0 \n"
544 "mov %1, %%"REG_a" \n"
545 "1: \n"
546 "movaps %%xmm0, (%0, %%"REG_a") \n"
547 "movaps %%xmm0, 16(%0, %%"REG_a") \n"
548 "movaps %%xmm0, 32(%0, %%"REG_a") \n"
549 "movaps %%xmm0, 48(%0, %%"REG_a") \n"
550 "movaps %%xmm0, 64(%0, %%"REG_a") \n"
551 "movaps %%xmm0, 80(%0, %%"REG_a") \n"
552 "movaps %%xmm0, 96(%0, %%"REG_a") \n"
553 "movaps %%xmm0, 112(%0, %%"REG_a") \n"
554 "add $128, %%"REG_a" \n"
555 " js 1b \n"
556 : : "r" (((uint8_t *)blocks)+128*6),
557 "i" (-128*6)
558 : "%"REG_a
559 );
560 }
561
562 static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w){
563 x86_reg i=0;
564 __asm__ volatile(
565 "jmp 2f \n\t"
566 "1: \n\t"
567 "movq (%1, %0), %%mm0 \n\t"
568 "movq (%2, %0), %%mm1 \n\t"
569 "paddb %%mm0, %%mm1 \n\t"
570 "movq %%mm1, (%2, %0) \n\t"
571 "movq 8(%1, %0), %%mm0 \n\t"
572 "movq 8(%2, %0), %%mm1 \n\t"
573 "paddb %%mm0, %%mm1 \n\t"
574 "movq %%mm1, 8(%2, %0) \n\t"
575 "add $16, %0 \n\t"
576 "2: \n\t"
577 "cmp %3, %0 \n\t"
578 " js 1b \n\t"
579 : "+r" (i)
580 : "r"(src), "r"(dst), "r"((x86_reg)w-15)
581 );
582 for(; i<w; i++)
583 dst[i+0] += src[i+0];
584 }
585
586 static void add_bytes_l2_mmx(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){
587 x86_reg i=0;
588 __asm__ volatile(
589 "jmp 2f \n\t"
590 "1: \n\t"
591 "movq (%2, %0), %%mm0 \n\t"
592 "movq 8(%2, %0), %%mm1 \n\t"
593 "paddb (%3, %0), %%mm0 \n\t"
594 "paddb 8(%3, %0), %%mm1 \n\t"
595 "movq %%mm0, (%1, %0) \n\t"
596 "movq %%mm1, 8(%1, %0) \n\t"
597 "add $16, %0 \n\t"
598 "2: \n\t"
599 "cmp %4, %0 \n\t"
600 " js 1b \n\t"
601 : "+r" (i)
602 : "r"(dst), "r"(src1), "r"(src2), "r"((x86_reg)w-15)
603 );
604 for(; i<w; i++)
605 dst[i] = src1[i] + src2[i];
606 }
607
608 #if HAVE_7REGS && HAVE_TEN_OPERANDS
609 static void add_hfyu_median_prediction_cmov(uint8_t *dst, const uint8_t *top, const uint8_t *diff, int w, int *left, int *left_top) {
610 x86_reg w2 = -w;
611 x86_reg x;
612 int l = *left & 0xff;
613 int tl = *left_top & 0xff;
614 int t;
615 __asm__ volatile(
616 "mov %7, %3 \n"
617 "1: \n"
618 "movzx (%3,%4), %2 \n"
619 "mov %2, %k3 \n"
620 "sub %b1, %b3 \n"
621 "add %b0, %b3 \n"
622 "mov %2, %1 \n"
623 "cmp %0, %2 \n"
624 "cmovg %0, %2 \n"
625 "cmovg %1, %0 \n"
626 "cmp %k3, %0 \n"
627 "cmovg %k3, %0 \n"
628 "mov %7, %3 \n"
629 "cmp %2, %0 \n"
630 "cmovl %2, %0 \n"
631 "add (%6,%4), %b0 \n"
632 "mov %b0, (%5,%4) \n"
633 "inc %4 \n"
634 "jl 1b \n"
635 :"+&q"(l), "+&q"(tl), "=&r"(t), "=&q"(x), "+&r"(w2)
636 :"r"(dst+w), "r"(diff+w), "rm"(top+w)
637 );
638 *left = l;
639 *left_top = tl;
640 }
641 #endif
642
643 #define H263_LOOP_FILTER \
644 "pxor %%mm7, %%mm7 \n\t"\
645 "movq %0, %%mm0 \n\t"\
646 "movq %0, %%mm1 \n\t"\
647 "movq %3, %%mm2 \n\t"\
648 "movq %3, %%mm3 \n\t"\
649 "punpcklbw %%mm7, %%mm0 \n\t"\
650 "punpckhbw %%mm7, %%mm1 \n\t"\
651 "punpcklbw %%mm7, %%mm2 \n\t"\
652 "punpckhbw %%mm7, %%mm3 \n\t"\
653 "psubw %%mm2, %%mm0 \n\t"\
654 "psubw %%mm3, %%mm1 \n\t"\
655 "movq %1, %%mm2 \n\t"\
656 "movq %1, %%mm3 \n\t"\
657 "movq %2, %%mm4 \n\t"\
658 "movq %2, %%mm5 \n\t"\
659 "punpcklbw %%mm7, %%mm2 \n\t"\
660 "punpckhbw %%mm7, %%mm3 \n\t"\
661 "punpcklbw %%mm7, %%mm4 \n\t"\
662 "punpckhbw %%mm7, %%mm5 \n\t"\
663 "psubw %%mm2, %%mm4 \n\t"\
664 "psubw %%mm3, %%mm5 \n\t"\
665 "psllw $2, %%mm4 \n\t"\
666 "psllw $2, %%mm5 \n\t"\
667 "paddw %%mm0, %%mm4 \n\t"\
668 "paddw %%mm1, %%mm5 \n\t"\
669 "pxor %%mm6, %%mm6 \n\t"\
670 "pcmpgtw %%mm4, %%mm6 \n\t"\
671 "pcmpgtw %%mm5, %%mm7 \n\t"\
672 "pxor %%mm6, %%mm4 \n\t"\
673 "pxor %%mm7, %%mm5 \n\t"\
674 "psubw %%mm6, %%mm4 \n\t"\
675 "psubw %%mm7, %%mm5 \n\t"\
676 "psrlw $3, %%mm4 \n\t"\
677 "psrlw $3, %%mm5 \n\t"\
678 "packuswb %%mm5, %%mm4 \n\t"\
679 "packsswb %%mm7, %%mm6 \n\t"\
680 "pxor %%mm7, %%mm7 \n\t"\
681 "movd %4, %%mm2 \n\t"\
682 "punpcklbw %%mm2, %%mm2 \n\t"\
683 "punpcklbw %%mm2, %%mm2 \n\t"\
684 "punpcklbw %%mm2, %%mm2 \n\t"\
685 "psubusb %%mm4, %%mm2 \n\t"\
686 "movq %%mm2, %%mm3 \n\t"\
687 "psubusb %%mm4, %%mm3 \n\t"\
688 "psubb %%mm3, %%mm2 \n\t"\
689 "movq %1, %%mm3 \n\t"\
690 "movq %2, %%mm4 \n\t"\
691 "pxor %%mm6, %%mm3 \n\t"\
692 "pxor %%mm6, %%mm4 \n\t"\
693 "paddusb %%mm2, %%mm3 \n\t"\
694 "psubusb %%mm2, %%mm4 \n\t"\
695 "pxor %%mm6, %%mm3 \n\t"\
696 "pxor %%mm6, %%mm4 \n\t"\
697 "paddusb %%mm2, %%mm2 \n\t"\
698 "packsswb %%mm1, %%mm0 \n\t"\
699 "pcmpgtb %%mm0, %%mm7 \n\t"\
700 "pxor %%mm7, %%mm0 \n\t"\
701 "psubb %%mm7, %%mm0 \n\t"\
702 "movq %%mm0, %%mm1 \n\t"\
703 "psubusb %%mm2, %%mm0 \n\t"\
704 "psubb %%mm0, %%mm1 \n\t"\
705 "pand %5, %%mm1 \n\t"\
706 "psrlw $2, %%mm1 \n\t"\
707 "pxor %%mm7, %%mm1 \n\t"\
708 "psubb %%mm7, %%mm1 \n\t"\
709 "movq %0, %%mm5 \n\t"\
710 "movq %3, %%mm6 \n\t"\
711 "psubb %%mm1, %%mm5 \n\t"\
712 "paddb %%mm1, %%mm6 \n\t"
713
714 static void h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale){
715 if(CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
716 const int strength= ff_h263_loop_filter_strength[qscale];
717
718 __asm__ volatile(
719
720 H263_LOOP_FILTER
721
722 "movq %%mm3, %1 \n\t"
723 "movq %%mm4, %2 \n\t"
724 "movq %%mm5, %0 \n\t"
725 "movq %%mm6, %3 \n\t"
726 : "+m" (*(uint64_t*)(src - 2*stride)),
727 "+m" (*(uint64_t*)(src - 1*stride)),
728 "+m" (*(uint64_t*)(src + 0*stride)),
729 "+m" (*(uint64_t*)(src + 1*stride))
730 : "g" (2*strength), "m"(ff_pb_FC)
731 );
732 }
733 }
734
735 static inline void transpose4x4(uint8_t *dst, uint8_t *src, int dst_stride, int src_stride){
736 __asm__ volatile( //FIXME could save 1 instruction if done as 8x4 ...
737 "movd %4, %%mm0 \n\t"
738 "movd %5, %%mm1 \n\t"
739 "movd %6, %%mm2 \n\t"
740 "movd %7, %%mm3 \n\t"
741 "punpcklbw %%mm1, %%mm0 \n\t"
742 "punpcklbw %%mm3, %%mm2 \n\t"
743 "movq %%mm0, %%mm1 \n\t"
744 "punpcklwd %%mm2, %%mm0 \n\t"
745 "punpckhwd %%mm2, %%mm1 \n\t"
746 "movd %%mm0, %0 \n\t"
747 "punpckhdq %%mm0, %%mm0 \n\t"
748 "movd %%mm0, %1 \n\t"
749 "movd %%mm1, %2 \n\t"
750 "punpckhdq %%mm1, %%mm1 \n\t"
751 "movd %%mm1, %3 \n\t"
752
753 : "=m" (*(uint32_t*)(dst + 0*dst_stride)),
754 "=m" (*(uint32_t*)(dst + 1*dst_stride)),
755 "=m" (*(uint32_t*)(dst + 2*dst_stride)),
756 "=m" (*(uint32_t*)(dst + 3*dst_stride))
757 : "m" (*(uint32_t*)(src + 0*src_stride)),
758 "m" (*(uint32_t*)(src + 1*src_stride)),
759 "m" (*(uint32_t*)(src + 2*src_stride)),
760 "m" (*(uint32_t*)(src + 3*src_stride))
761 );
762 }
763
764 static void h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale){
765 if(CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
766 const int strength= ff_h263_loop_filter_strength[qscale];
767 DECLARE_ALIGNED(8, uint64_t, temp)[4];
768 uint8_t *btemp= (uint8_t*)temp;
769
770 src -= 2;
771
772 transpose4x4(btemp , src , 8, stride);
773 transpose4x4(btemp+4, src + 4*stride, 8, stride);
774 __asm__ volatile(
775 H263_LOOP_FILTER // 5 3 4 6
776
777 : "+m" (temp[0]),
778 "+m" (temp[1]),
779 "+m" (temp[2]),
780 "+m" (temp[3])
781 : "g" (2*strength), "m"(ff_pb_FC)
782 );
783
784 __asm__ volatile(
785 "movq %%mm5, %%mm1 \n\t"
786 "movq %%mm4, %%mm0 \n\t"
787 "punpcklbw %%mm3, %%mm5 \n\t"
788 "punpcklbw %%mm6, %%mm4 \n\t"
789 "punpckhbw %%mm3, %%mm1 \n\t"
790 "punpckhbw %%mm6, %%mm0 \n\t"
791 "movq %%mm5, %%mm3 \n\t"
792 "movq %%mm1, %%mm6 \n\t"
793 "punpcklwd %%mm4, %%mm5 \n\t"
794 "punpcklwd %%mm0, %%mm1 \n\t"
795 "punpckhwd %%mm4, %%mm3 \n\t"
796 "punpckhwd %%mm0, %%mm6 \n\t"
797 "movd %%mm5, (%0) \n\t"
798 "punpckhdq %%mm5, %%mm5 \n\t"
799 "movd %%mm5, (%0,%2) \n\t"
800 "movd %%mm3, (%0,%2,2) \n\t"
801 "punpckhdq %%mm3, %%mm3 \n\t"
802 "movd %%mm3, (%0,%3) \n\t"
803 "movd %%mm1, (%1) \n\t"
804 "punpckhdq %%mm1, %%mm1 \n\t"
805 "movd %%mm1, (%1,%2) \n\t"
806 "movd %%mm6, (%1,%2,2) \n\t"
807 "punpckhdq %%mm6, %%mm6 \n\t"
808 "movd %%mm6, (%1,%3) \n\t"
809 :: "r" (src),
810 "r" (src + 4*stride),
811 "r" ((x86_reg) stride ),
812 "r" ((x86_reg)(3*stride))
813 );
814 }
815 }
816
817 /* draw the edges of width 'w' of an image of size width, height
818 this mmx version can only handle w==8 || w==16 */
819 static void draw_edges_mmx(uint8_t *buf, int wrap, int width, int height, int w)
820 {
821 uint8_t *ptr, *last_line;
822 int i;
823
824 last_line = buf + (height - 1) * wrap;
825 /* left and right */
826 ptr = buf;
827 if(w==8)
828 {
829 __asm__ volatile(
830 "1: \n\t"
831 "movd (%0), %%mm0 \n\t"
832 "punpcklbw %%mm0, %%mm0 \n\t"
833 "punpcklwd %%mm0, %%mm0 \n\t"
834 "punpckldq %%mm0, %%mm0 \n\t"
835 "movq %%mm0, -8(%0) \n\t"
836 "movq -8(%0, %2), %%mm1 \n\t"
837 "punpckhbw %%mm1, %%mm1 \n\t"
838 "punpckhwd %%mm1, %%mm1 \n\t"
839 "punpckhdq %%mm1, %%mm1 \n\t"
840 "movq %%mm1, (%0, %2) \n\t"
841 "add %1, %0 \n\t"
842 "cmp %3, %0 \n\t"
843 " jb 1b \n\t"
844 : "+r" (ptr)
845 : "r" ((x86_reg)wrap), "r" ((x86_reg)width), "r" (ptr + wrap*height)
846 );
847 }
848 else
849 {
850 __asm__ volatile(
851 "1: \n\t"
852 "movd (%0), %%mm0 \n\t"
853 "punpcklbw %%mm0, %%mm0 \n\t"
854 "punpcklwd %%mm0, %%mm0 \n\t"
855 "punpckldq %%mm0, %%mm0 \n\t"
856 "movq %%mm0, -8(%0) \n\t"
857 "movq %%mm0, -16(%0) \n\t"
858 "movq -8(%0, %2), %%mm1 \n\t"
859 "punpckhbw %%mm1, %%mm1 \n\t"
860 "punpckhwd %%mm1, %%mm1 \n\t"
861 "punpckhdq %%mm1, %%mm1 \n\t"
862 "movq %%mm1, (%0, %2) \n\t"
863 "movq %%mm1, 8(%0, %2) \n\t"
864 "add %1, %0 \n\t"
865 "cmp %3, %0 \n\t"
866 " jb 1b \n\t"
867 : "+r" (ptr)
868 : "r" ((x86_reg)wrap), "r" ((x86_reg)width), "r" (ptr + wrap*height)
869 );
870 }
871
872 for(i=0;i<w;i+=4) {
873 /* top and bottom (and hopefully also the corners) */
874 ptr= buf - (i + 1) * wrap - w;
875 __asm__ volatile(
876 "1: \n\t"
877 "movq (%1, %0), %%mm0 \n\t"
878 "movq %%mm0, (%0) \n\t"
879 "movq %%mm0, (%0, %2) \n\t"
880 "movq %%mm0, (%0, %2, 2) \n\t"
881 "movq %%mm0, (%0, %3) \n\t"
882 "add $8, %0 \n\t"
883 "cmp %4, %0 \n\t"
884 " jb 1b \n\t"
885 : "+r" (ptr)
886 : "r" ((x86_reg)buf - (x86_reg)ptr - w), "r" ((x86_reg)-wrap), "r" ((x86_reg)-wrap*3), "r" (ptr+width+2*w)
887 );
888 ptr= last_line + (i + 1) * wrap - w;
889 __asm__ volatile(
890 "1: \n\t"
891 "movq (%1, %0), %%mm0 \n\t"
892 "movq %%mm0, (%0) \n\t"
893 "movq %%mm0, (%0, %2) \n\t"
894 "movq %%mm0, (%0, %2, 2) \n\t"
895 "movq %%mm0, (%0, %3) \n\t"
896 "add $8, %0 \n\t"
897 "cmp %4, %0 \n\t"
898 " jb 1b \n\t"
899 : "+r" (ptr)
900 : "r" ((x86_reg)last_line - (x86_reg)ptr - w), "r" ((x86_reg)wrap), "r" ((x86_reg)wrap*3), "r" (ptr+width+2*w)
901 );
902 }
903 }
904
905 #define PAETH(cpu, abs3)\
906 static void add_png_paeth_prediction_##cpu(uint8_t *dst, uint8_t *src, uint8_t *top, int w, int bpp)\
907 {\
908 x86_reg i = -bpp;\
909 x86_reg end = w-3;\
910 __asm__ volatile(\
911 "pxor %%mm7, %%mm7 \n"\
912 "movd (%1,%0), %%mm0 \n"\
913 "movd (%2,%0), %%mm1 \n"\
914 "punpcklbw %%mm7, %%mm0 \n"\
915 "punpcklbw %%mm7, %%mm1 \n"\
916 "add %4, %0 \n"\
917 "1: \n"\
918 "movq %%mm1, %%mm2 \n"\
919 "movd (%2,%0), %%mm1 \n"\
920 "movq %%mm2, %%mm3 \n"\
921 "punpcklbw %%mm7, %%mm1 \n"\
922 "movq %%mm2, %%mm4 \n"\
923 "psubw %%mm1, %%mm3 \n"\
924 "psubw %%mm0, %%mm4 \n"\
925 "movq %%mm3, %%mm5 \n"\
926 "paddw %%mm4, %%mm5 \n"\
927 abs3\
928 "movq %%mm4, %%mm6 \n"\
929 "pminsw %%mm5, %%mm6 \n"\
930 "pcmpgtw %%mm6, %%mm3 \n"\
931 "pcmpgtw %%mm5, %%mm4 \n"\
932 "movq %%mm4, %%mm6 \n"\
933 "pand %%mm3, %%mm4 \n"\
934 "pandn %%mm3, %%mm6 \n"\
935 "pandn %%mm0, %%mm3 \n"\
936 "movd (%3,%0), %%mm0 \n"\
937 "pand %%mm1, %%mm6 \n"\
938 "pand %%mm4, %%mm2 \n"\
939 "punpcklbw %%mm7, %%mm0 \n"\
940 "movq %6, %%mm5 \n"\
941 "paddw %%mm6, %%mm0 \n"\
942 "paddw %%mm2, %%mm3 \n"\
943 "paddw %%mm3, %%mm0 \n"\
944 "pand %%mm5, %%mm0 \n"\
945 "movq %%mm0, %%mm3 \n"\
946 "packuswb %%mm3, %%mm3 \n"\
947 "movd %%mm3, (%1,%0) \n"\
948 "add %4, %0 \n"\
949 "cmp %5, %0 \n"\
950 "jle 1b \n"\
951 :"+r"(i)\
952 :"r"(dst), "r"(top), "r"(src), "r"((x86_reg)bpp), "g"(end),\
953 "m"(ff_pw_255)\
954 :"memory"\
955 );\
956 }
957
958 #define ABS3_MMX2\
959 "psubw %%mm5, %%mm7 \n"\
960 "pmaxsw %%mm7, %%mm5 \n"\
961 "pxor %%mm6, %%mm6 \n"\
962 "pxor %%mm7, %%mm7 \n"\
963 "psubw %%mm3, %%mm6 \n"\
964 "psubw %%mm4, %%mm7 \n"\
965 "pmaxsw %%mm6, %%mm3 \n"\
966 "pmaxsw %%mm7, %%mm4 \n"\
967 "pxor %%mm7, %%mm7 \n"
968
969 #define ABS3_SSSE3\
970 "pabsw %%mm3, %%mm3 \n"\
971 "pabsw %%mm4, %%mm4 \n"\
972 "pabsw %%mm5, %%mm5 \n"
973
974 PAETH(mmx2, ABS3_MMX2)
975 #if HAVE_SSSE3
976 PAETH(ssse3, ABS3_SSSE3)
977 #endif
978
979 #define QPEL_V_LOW(m3,m4,m5,m6, pw_20, pw_3, rnd, in0, in1, in2, in7, out, OP)\
980 "paddw " #m4 ", " #m3 " \n\t" /* x1 */\
981 "movq "MANGLE(ff_pw_20)", %%mm4 \n\t" /* 20 */\
982 "pmullw " #m3 ", %%mm4 \n\t" /* 20x1 */\
983 "movq "#in7", " #m3 " \n\t" /* d */\
984 "movq "#in0", %%mm5 \n\t" /* D */\
985 "paddw " #m3 ", %%mm5 \n\t" /* x4 */\
986 "psubw %%mm5, %%mm4 \n\t" /* 20x1 - x4 */\
987 "movq "#in1", %%mm5 \n\t" /* C */\
988 "movq "#in2", %%mm6 \n\t" /* B */\
989 "paddw " #m6 ", %%mm5 \n\t" /* x3 */\
990 "paddw " #m5 ", %%mm6 \n\t" /* x2 */\
991 "paddw %%mm6, %%mm6 \n\t" /* 2x2 */\
992 "psubw %%mm6, %%mm5 \n\t" /* -2x2 + x3 */\
993 "pmullw "MANGLE(ff_pw_3)", %%mm5 \n\t" /* -6x2 + 3x3 */\
994 "paddw " #rnd ", %%mm4 \n\t" /* x2 */\
995 "paddw %%mm4, %%mm5 \n\t" /* 20x1 - 6x2 + 3x3 - x4 */\
996 "psraw $5, %%mm5 \n\t"\
997 "packuswb %%mm5, %%mm5 \n\t"\
998 OP(%%mm5, out, %%mm7, d)
999
1000 #define QPEL_BASE(OPNAME, ROUNDER, RND, OP_MMX2, OP_3DNOW)\
1001 static void OPNAME ## mpeg4_qpel16_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1002 uint64_t temp;\
1003 \
1004 __asm__ volatile(\
1005 "pxor %%mm7, %%mm7 \n\t"\
1006 "1: \n\t"\
1007 "movq (%0), %%mm0 \n\t" /* ABCDEFGH */\
1008 "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\
1009 "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\
1010 "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\
1011 "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\
1012 "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\
1013 "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\
1014 "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\
1015 "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\
1016 "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\
1017 "psllq $16, %%mm3 \n\t" /* 00ABCDEF */\
1018 "psllq $24, %%mm4 \n\t" /* 000ABCDE */\
1019 "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\
1020 "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\
1021 "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\
1022 "paddw %%mm3, %%mm5 \n\t" /* b */\
1023 "paddw %%mm2, %%mm6 \n\t" /* c */\
1024 "paddw %%mm5, %%mm5 \n\t" /* 2b */\
1025 "psubw %%mm5, %%mm6 \n\t" /* c - 2b */\
1026 "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\
1027 "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\
1028 "paddw %%mm4, %%mm0 \n\t" /* a */\
1029 "paddw %%mm1, %%mm5 \n\t" /* d */\
1030 "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\
1031 "psubw %%mm5, %%mm0 \n\t" /* 20a - d */\
1032 "paddw %6, %%mm6 \n\t"\
1033 "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
1034 "psraw $5, %%mm0 \n\t"\
1035 "movq %%mm0, %5 \n\t"\
1036 /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
1037 \
1038 "movq 5(%0), %%mm0 \n\t" /* FGHIJKLM */\
1039 "movq %%mm0, %%mm5 \n\t" /* FGHIJKLM */\
1040 "movq %%mm0, %%mm6 \n\t" /* FGHIJKLM */\
1041 "psrlq $8, %%mm0 \n\t" /* GHIJKLM0 */\
1042 "psrlq $16, %%mm5 \n\t" /* HIJKLM00 */\
1043 "punpcklbw %%mm7, %%mm0 \n\t" /* 0G0H0I0J */\
1044 "punpcklbw %%mm7, %%mm5 \n\t" /* 0H0I0J0K */\
1045 "paddw %%mm0, %%mm2 \n\t" /* b */\
1046 "paddw %%mm5, %%mm3 \n\t" /* c */\
1047 "paddw %%mm2, %%mm2 \n\t" /* 2b */\
1048 "psubw %%mm2, %%mm3 \n\t" /* c - 2b */\
1049 "movq %%mm6, %%mm2 \n\t" /* FGHIJKLM */\
1050 "psrlq $24, %%mm6 \n\t" /* IJKLM000 */\
1051 "punpcklbw %%mm7, %%mm2 \n\t" /* 0F0G0H0I */\
1052 "punpcklbw %%mm7, %%mm6 \n\t" /* 0I0J0K0L */\
1053 "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\
1054 "paddw %%mm2, %%mm1 \n\t" /* a */\
1055 "paddw %%mm6, %%mm4 \n\t" /* d */\
1056 "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\
1057 "psubw %%mm4, %%mm3 \n\t" /* - 6b +3c - d */\
1058 "paddw %6, %%mm1 \n\t"\
1059 "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b +3c - d */\
1060 "psraw $5, %%mm3 \n\t"\
1061 "movq %5, %%mm1 \n\t"\
1062 "packuswb %%mm3, %%mm1 \n\t"\
1063 OP_MMX2(%%mm1, (%1),%%mm4, q)\
1064 /* mm0= GHIJ, mm2=FGHI, mm5=HIJK, mm6=IJKL, mm7=0 */\
1065 \
1066 "movq 9(%0), %%mm1 \n\t" /* JKLMNOPQ */\
1067 "movq %%mm1, %%mm4 \n\t" /* JKLMNOPQ */\
1068 "movq %%mm1, %%mm3 \n\t" /* JKLMNOPQ */\
1069 "psrlq $8, %%mm1 \n\t" /* KLMNOPQ0 */\
1070 "psrlq $16, %%mm4 \n\t" /* LMNOPQ00 */\
1071 "punpcklbw %%mm7, %%mm1 \n\t" /* 0K0L0M0N */\
1072 "punpcklbw %%mm7, %%mm4 \n\t" /* 0L0M0N0O */\
1073 "paddw %%mm1, %%mm5 \n\t" /* b */\
1074 "paddw %%mm4, %%mm0 \n\t" /* c */\
1075 "paddw %%mm5, %%mm5 \n\t" /* 2b */\
1076 "psubw %%mm5, %%mm0 \n\t" /* c - 2b */\
1077 "movq %%mm3, %%mm5 \n\t" /* JKLMNOPQ */\
1078 "psrlq $24, %%mm3 \n\t" /* MNOPQ000 */\
1079 "pmullw "MANGLE(ff_pw_3)", %%mm0 \n\t" /* 3c - 6b */\
1080 "punpcklbw %%mm7, %%mm3 \n\t" /* 0M0N0O0P */\
1081 "paddw %%mm3, %%mm2 \n\t" /* d */\
1082 "psubw %%mm2, %%mm0 \n\t" /* -6b + 3c - d */\
1083 "movq %%mm5, %%mm2 \n\t" /* JKLMNOPQ */\
1084 "punpcklbw %%mm7, %%mm2 \n\t" /* 0J0K0L0M */\
1085 "punpckhbw %%mm7, %%mm5 \n\t" /* 0N0O0P0Q */\
1086 "paddw %%mm2, %%mm6 \n\t" /* a */\
1087 "pmullw "MANGLE(ff_pw_20)", %%mm6 \n\t" /* 20a */\
1088 "paddw %6, %%mm0 \n\t"\
1089 "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
1090 "psraw $5, %%mm0 \n\t"\
1091 /* mm1=KLMN, mm2=JKLM, mm3=MNOP, mm4=LMNO, mm5=NOPQ mm7=0 */\
1092 \
1093 "paddw %%mm5, %%mm3 \n\t" /* a */\
1094 "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0O0P0Q0Q */\
1095 "paddw %%mm4, %%mm6 \n\t" /* b */\
1096 "pshufw $0xBE, %%mm5, %%mm4 \n\t" /* 0P0Q0Q0P */\
1097 "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0Q0Q0P0O */\
1098 "paddw %%mm1, %%mm4 \n\t" /* c */\
1099 "paddw %%mm2, %%mm5 \n\t" /* d */\
1100 "paddw %%mm6, %%mm6 \n\t" /* 2b */\
1101 "psubw %%mm6, %%mm4 \n\t" /* c - 2b */\
1102 "pmullw "MANGLE(ff_pw_20)", %%mm3 \n\t" /* 20a */\
1103 "pmullw "MANGLE(ff_pw_3)", %%mm4 \n\t" /* 3c - 6b */\
1104 "psubw %%mm5, %%mm3 \n\t" /* -6b + 3c - d */\
1105 "paddw %6, %%mm4 \n\t"\
1106 "paddw %%mm3, %%mm4 \n\t" /* 20a - 6b + 3c - d */\
1107 "psraw $5, %%mm4 \n\t"\
1108 "packuswb %%mm4, %%mm0 \n\t"\
1109 OP_MMX2(%%mm0, 8(%1), %%mm4, q)\
1110 \
1111 "add %3, %0 \n\t"\
1112 "add %4, %1 \n\t"\
1113 "decl %2 \n\t"\
1114 " jnz 1b \n\t"\
1115 : "+a"(src), "+c"(dst), "+D"(h)\
1116 : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(temp), "m"(ROUNDER)\
1117 : "memory"\
1118 );\
1119 }\
1120 \
1121 static void OPNAME ## mpeg4_qpel16_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1122 int i;\
1123 int16_t temp[16];\
1124 /* quick HACK, XXX FIXME MUST be optimized */\
1125 for(i=0; i<h; i++)\
1126 {\
1127 temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
1128 temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
1129 temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
1130 temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
1131 temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
1132 temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 9]);\
1133 temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 9])*3 - (src[ 3]+src[10]);\
1134 temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 9])*6 + (src[ 5]+src[10])*3 - (src[ 4]+src[11]);\
1135 temp[ 8]= (src[ 8]+src[ 9])*20 - (src[ 7]+src[10])*6 + (src[ 6]+src[11])*3 - (src[ 5]+src[12]);\
1136 temp[ 9]= (src[ 9]+src[10])*20 - (src[ 8]+src[11])*6 + (src[ 7]+src[12])*3 - (src[ 6]+src[13]);\
1137 temp[10]= (src[10]+src[11])*20 - (src[ 9]+src[12])*6 + (src[ 8]+src[13])*3 - (src[ 7]+src[14]);\
1138 temp[11]= (src[11]+src[12])*20 - (src[10]+src[13])*6 + (src[ 9]+src[14])*3 - (src[ 8]+src[15]);\
1139 temp[12]= (src[12]+src[13])*20 - (src[11]+src[14])*6 + (src[10]+src[15])*3 - (src[ 9]+src[16]);\
1140 temp[13]= (src[13]+src[14])*20 - (src[12]+src[15])*6 + (src[11]+src[16])*3 - (src[10]+src[16]);\
1141 temp[14]= (src[14]+src[15])*20 - (src[13]+src[16])*6 + (src[12]+src[16])*3 - (src[11]+src[15]);\
1142 temp[15]= (src[15]+src[16])*20 - (src[14]+src[16])*6 + (src[13]+src[15])*3 - (src[12]+src[14]);\
1143 __asm__ volatile(\
1144 "movq (%0), %%mm0 \n\t"\
1145 "movq 8(%0), %%mm1 \n\t"\
1146 "paddw %2, %%mm0 \n\t"\
1147 "paddw %2, %%mm1 \n\t"\
1148 "psraw $5, %%mm0 \n\t"\
1149 "psraw $5, %%mm1 \n\t"\
1150 "packuswb %%mm1, %%mm0 \n\t"\
1151 OP_3DNOW(%%mm0, (%1), %%mm1, q)\
1152 "movq 16(%0), %%mm0 \n\t"\
1153 "movq 24(%0), %%mm1 \n\t"\
1154 "paddw %2, %%mm0 \n\t"\
1155 "paddw %2, %%mm1 \n\t"\
1156 "psraw $5, %%mm0 \n\t"\
1157 "psraw $5, %%mm1 \n\t"\
1158 "packuswb %%mm1, %%mm0 \n\t"\
1159 OP_3DNOW(%%mm0, 8(%1), %%mm1, q)\
1160 :: "r"(temp), "r"(dst), "m"(ROUNDER)\
1161 : "memory"\
1162 );\
1163 dst+=dstStride;\
1164 src+=srcStride;\
1165 }\
1166 }\
1167 \
1168 static void OPNAME ## mpeg4_qpel8_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1169 __asm__ volatile(\
1170 "pxor %%mm7, %%mm7 \n\t"\
1171 "1: \n\t"\
1172 "movq (%0), %%mm0 \n\t" /* ABCDEFGH */\
1173 "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\
1174 "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\
1175 "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\
1176 "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\
1177 "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\
1178 "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\
1179 "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\
1180 "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\
1181 "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\
1182 "psllq $16, %%mm3 \n\t" /* 00ABCDEF */\
1183 "psllq $24, %%mm4 \n\t" /* 000ABCDE */\
1184 "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\
1185 "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\
1186 "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\
1187 "paddw %%mm3, %%mm5 \n\t" /* b */\
1188 "paddw %%mm2, %%mm6 \n\t" /* c */\
1189 "paddw %%mm5, %%mm5 \n\t" /* 2b */\
1190 "psubw %%mm5, %%mm6 \n\t" /* c - 2b */\
1191 "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\
1192 "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\
1193 "paddw %%mm4, %%mm0 \n\t" /* a */\
1194 "paddw %%mm1, %%mm5 \n\t" /* d */\
1195 "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\
1196 "psubw %%mm5, %%mm0 \n\t" /* 20a - d */\
1197 "paddw %5, %%mm6 \n\t"\
1198 "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
1199 "psraw $5, %%mm0 \n\t"\
1200 /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
1201 \
1202 "movd 5(%0), %%mm5 \n\t" /* FGHI */\
1203 "punpcklbw %%mm7, %%mm5 \n\t" /* 0F0G0H0I */\
1204 "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0G0H0I0I */\
1205 "paddw %%mm5, %%mm1 \n\t" /* a */\
1206 "paddw %%mm6, %%mm2 \n\t" /* b */\
1207 "pshufw $0xBE, %%mm5, %%mm6 \n\t" /* 0H0I0I0H */\
1208 "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0I0I0H0G */\
1209 "paddw %%mm6, %%mm3 \n\t" /* c */\
1210 "paddw %%mm5, %%mm4 \n\t" /* d */\
1211 "paddw %%mm2, %%mm2 \n\t" /* 2b */\
1212 "psubw %%mm2, %%mm3 \n\t" /* c - 2b */\
1213 "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\
1214 "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\
1215 "psubw %%mm4, %%mm3 \n\t" /* -6b + 3c - d */\
1216 "paddw %5, %%mm1 \n\t"\
1217 "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b + 3c - d */\
1218 "psraw $5, %%mm3 \n\t"\
1219 "packuswb %%mm3, %%mm0 \n\t"\
1220 OP_MMX2(%%mm0, (%1), %%mm4, q)\
1221 \
1222 "add %3, %0 \n\t"\
1223 "add %4, %1 \n\t"\
1224 "decl %2 \n\t"\
1225 " jnz 1b \n\t"\
1226 : "+a"(src), "+c"(dst), "+d"(h)\
1227 : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER)\
1228 : "memory"\
1229 );\
1230 }\
1231 \
1232 static void OPNAME ## mpeg4_qpel8_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1233 int i;\
1234 int16_t temp[8];\
1235 /* quick HACK, XXX FIXME MUST be optimized */\
1236 for(i=0; i<h; i++)\
1237 {\
1238 temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
1239 temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
1240 temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
1241 temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
1242 temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
1243 temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 8]);\
1244 temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 8])*3 - (src[ 3]+src[ 7]);\
1245 temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 8])*6 + (src[ 5]+src[ 7])*3 - (src[ 4]+src[ 6]);\
1246 __asm__ volatile(\
1247 "movq (%0), %%mm0 \n\t"\
1248 "movq 8(%0), %%mm1 \n\t"\
1249 "paddw %2, %%mm0 \n\t"\
1250 "paddw %2, %%mm1 \n\t"\
1251 "psraw $5, %%mm0 \n\t"\
1252 "psraw $5, %%mm1 \n\t"\
1253 "packuswb %%mm1, %%mm0 \n\t"\
1254 OP_3DNOW(%%mm0, (%1), %%mm1, q)\
1255 :: "r"(temp), "r"(dst), "m"(ROUNDER)\
1256 :"memory"\
1257 );\
1258 dst+=dstStride;\
1259 src+=srcStride;\
1260 }\
1261 }
1262
1263 #define QPEL_OP(OPNAME, ROUNDER, RND, OP, MMX)\
1264 \
1265 static void OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1266 uint64_t temp[17*4];\
1267 uint64_t *temp_ptr= temp;\
1268 int count= 17;\
1269 \
1270 /*FIXME unroll */\
1271 __asm__ volatile(\
1272 "pxor %%mm7, %%mm7 \n\t"\
1273 "1: \n\t"\
1274 "movq (%0), %%mm0 \n\t"\
1275 "movq (%0), %%mm1 \n\t"\
1276 "movq 8(%0), %%mm2 \n\t"\
1277 "movq 8(%0), %%mm3 \n\t"\
1278 "punpcklbw %%mm7, %%mm0 \n\t"\
1279 "punpckhbw %%mm7, %%mm1 \n\t"\
1280 "punpcklbw %%mm7, %%mm2 \n\t"\
1281 "punpckhbw %%mm7, %%mm3 \n\t"\
1282 "movq %%mm0, (%1) \n\t"\
1283 "movq %%mm1, 17*8(%1) \n\t"\
1284 "movq %%mm2, 2*17*8(%1) \n\t"\
1285 "movq %%mm3, 3*17*8(%1) \n\t"\
1286 "add $8, %1 \n\t"\
1287 "add %3, %0 \n\t"\
1288 "decl %2 \n\t"\
1289 " jnz 1b \n\t"\
1290 : "+r" (src), "+r" (temp_ptr), "+r"(count)\
1291 : "r" ((x86_reg)srcStride)\
1292 : "memory"\
1293 );\
1294 \
1295 temp_ptr= temp;\
1296 count=4;\
1297 \
1298 /*FIXME reorder for speed */\
1299 __asm__ volatile(\
1300 /*"pxor %%mm7, %%mm7 \n\t"*/\
1301 "1: \n\t"\
1302 "movq (%0), %%mm0 \n\t"\
1303 "movq 8(%0), %%mm1 \n\t"\
1304 "movq 16(%0), %%mm2 \n\t"\
1305 "movq 24(%0), %%mm3 \n\t"\
1306 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\
1307 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\
1308 "add %4, %1 \n\t"\
1309 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\
1310 \
1311 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\
1312 "add %4, %1 \n\t"\
1313 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
1314 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 72(%0), (%1, %3), OP)\
1315 "add %4, %1 \n\t"\
1316 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 80(%0), (%1), OP)\
1317 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 88(%0), (%1, %3), OP)\
1318 "add %4, %1 \n\t"\
1319 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 40(%0), 48(%0), 56(%0), 96(%0), (%1), OP)\
1320 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 48(%0), 56(%0), 64(%0),104(%0), (%1, %3), OP)\
1321 "add %4, %1 \n\t"\
1322 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 56(%0), 64(%0), 72(%0),112(%0), (%1), OP)\
1323 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 64(%0), 72(%0), 80(%0),120(%0), (%1, %3), OP)\
1324 "add %4, %1 \n\t"\
1325 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 72(%0), 80(%0), 88(%0),128(%0), (%1), OP)\
1326 \
1327 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 80(%0), 88(%0), 96(%0),128(%0), (%1, %3), OP)\
1328 "add %4, %1 \n\t" \
1329 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 88(%0), 96(%0),104(%0),120(%0), (%1), OP)\
1330 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 96(%0),104(%0),112(%0),112(%0), (%1, %3), OP)\
1331 \
1332 "add $136, %0 \n\t"\
1333 "add %6, %1 \n\t"\
1334 "decl %2 \n\t"\
1335 " jnz 1b \n\t"\
1336 \
1337 : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
1338 : "r"((x86_reg)dstStride), "r"(2*(x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-14*(x86_reg)dstStride)\
1339 :"memory"\
1340 );\
1341 }\
1342 \
1343 static void OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1344 uint64_t temp[9*2];\
1345 uint64_t *temp_ptr= temp;\
1346 int count= 9;\
1347 \
1348 /*FIXME unroll */\
1349 __asm__ volatile(\
1350 "pxor %%mm7, %%mm7 \n\t"\
1351 "1: \n\t"\
1352 "movq (%0), %%mm0 \n\t"\
1353 "movq (%0), %%mm1 \n\t"\
1354 "punpcklbw %%mm7, %%mm0 \n\t"\
1355 "punpckhbw %%mm7, %%mm1 \n\t"\
1356 "movq %%mm0, (%1) \n\t"\
1357 "movq %%mm1, 9*8(%1) \n\t"\
1358 "add $8, %1 \n\t"\
1359 "add %3, %0 \n\t"\
1360 "decl %2 \n\t"\
1361 " jnz 1b \n\t"\
1362 : "+r" (src), "+r" (temp_ptr), "+r"(count)\
1363 : "r" ((x86_reg)srcStride)\
1364 : "memory"\
1365 );\
1366 \
1367 temp_ptr= temp;\
1368 count=2;\
1369 \
1370 /*FIXME reorder for speed */\
1371 __asm__ volatile(\
1372 /*"pxor %%mm7, %%mm7 \n\t"*/\
1373 "1: \n\t"\
1374 "movq (%0), %%mm0 \n\t"\
1375 "movq 8(%0), %%mm1 \n\t"\
1376 "movq 16(%0), %%mm2 \n\t"\
1377 "movq 24(%0), %%mm3 \n\t"\
1378 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\
1379 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\
1380 "add %4, %1 \n\t"\
1381 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\
1382 \
1383 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\
1384 "add %4, %1 \n\t"\
1385 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
1386 \
1387 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 64(%0), (%1, %3), OP)\
1388 "add %4, %1 \n\t"\
1389 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 56(%0), (%1), OP)\
1390 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 48(%0), (%1, %3), OP)\
1391 \
1392 "add $72, %0 \n\t"\
1393 "add %6, %1 \n\t"\
1394 "decl %2 \n\t"\
1395 " jnz 1b \n\t"\
1396 \
1397 : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
1398 : "r"((x86_reg)dstStride), "r"(2*(x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-6*(x86_reg)dstStride)\
1399 : "memory"\
1400 );\
1401 }\
1402 \
1403 static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
1404 OPNAME ## pixels8_ ## MMX(dst, src, stride, 8);\
1405 }\
1406 \
1407 static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1408 uint64_t temp[8];\
1409 uint8_t * const half= (uint8_t*)temp;\
1410 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
1411 OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
1412 }\
1413 \
1414 static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1415 OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride, stride, 8);\
1416 }\
1417 \
1418 static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1419 uint64_t temp[8];\
1420 uint8_t * const half= (uint8_t*)temp;\
1421 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
1422 OPNAME ## pixels8_l2_ ## MMX(dst, src+1, half, stride, stride, 8);\
1423 }\
1424 \
1425 static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1426 uint64_t temp[8];\
1427 uint8_t * const half= (uint8_t*)temp;\
1428 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
1429 OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
1430 }\
1431 \
1432 static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1433 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src, stride, stride);\
1434 }\
1435 \
1436 static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1437 uint64_t temp[8];\
1438 uint8_t * const half= (uint8_t*)temp;\
1439 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
1440 OPNAME ## pixels8_l2_ ## MMX(dst, src+stride, half, stride, stride, 8);\
1441 }\
1442 static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1443 uint64_t half[8 + 9];\
1444 uint8_t * const halfH= ((uint8_t*)half) + 64;\
1445 uint8_t * const halfHV= ((uint8_t*)half);\
1446 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1447 put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
1448 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1449 OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
1450 }\
1451 static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1452 uint64_t half[8 + 9];\
1453 uint8_t * const halfH= ((uint8_t*)half) + 64;\
1454 uint8_t * const halfHV= ((uint8_t*)half);\
1455 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1456 put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
1457 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1458 OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
1459 }\
1460 static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1461 uint64_t half[8 + 9];\
1462 uint8_t * const halfH= ((uint8_t*)half) + 64;\
1463 uint8_t * const halfHV= ((uint8_t*)half);\
1464 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1465 put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
1466 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1467 OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
1468 }\
1469 static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1470 uint64_t half[8 + 9];\
1471 uint8_t * const halfH= ((uint8_t*)half) + 64;\
1472 uint8_t * const halfHV= ((uint8_t*)half);\
1473 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1474 put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
1475 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1476 OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
1477 }\
1478 static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1479 uint64_t half[8 + 9];\
1480 uint8_t * const halfH= ((uint8_t*)half) + 64;\
1481 uint8_t * const halfHV= ((uint8_t*)half);\
1482 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1483 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1484 OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
1485 }\
1486 static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1487 uint64_t half[8 + 9];\
1488 uint8_t * const halfH= ((uint8_t*)half) + 64;\
1489 uint8_t * const halfHV= ((uint8_t*)half);\
1490 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1491 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1492 OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
1493 }\
1494 static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1495 uint64_t half[8 + 9];\
1496 uint8_t * const halfH= ((uint8_t*)half);\
1497 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1498 put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
1499 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
1500 }\
1501 static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1502 uint64_t half[8 + 9];\
1503 uint8_t * const halfH= ((uint8_t*)half);\
1504 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1505 put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
1506 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
1507 }\
1508 static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1509 uint64_t half[9];\
1510 uint8_t * const halfH= ((uint8_t*)half);\
1511 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1512 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
1513 }\
1514 static void OPNAME ## qpel16_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
1515 OPNAME ## pixels16_ ## MMX(dst, src, stride, 16);\
1516 }\
1517 \
1518 static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1519 uint64_t temp[32];\
1520 uint8_t * const half= (uint8_t*)temp;\
1521 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
1522 OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
1523 }\
1524 \
1525 static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1526 OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src, stride, stride, 16);\
1527 }\
1528 \
1529 static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1530 uint64_t temp[32];\
1531 uint8_t * const half= (uint8_t*)temp;\
1532 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
1533 OPNAME ## pixels16_l2_ ## MMX(dst, src+1, half, stride, stride, 16);\
1534 }\
1535 \
1536 static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1537 uint64_t temp[32];\
1538 uint8_t * const half= (uint8_t*)temp;\
1539 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
1540 OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
1541 }\
1542 \
1543 static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1544 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src, stride, stride);\
1545 }\
1546 \
1547 static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1548 uint64_t temp[32];\
1549 uint8_t * const half= (uint8_t*)temp;\
1550 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
1551 OPNAME ## pixels16_l2_ ## MMX(dst, src+stride, half, stride, stride, 16);\
1552 }\
1553 static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1554 uint64_t half[16*2 + 17*2];\
1555 uint8_t * const halfH= ((uint8_t*)half) + 256;\
1556 uint8_t * const halfHV= ((uint8_t*)half);\
1557 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1558 put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
1559 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1560 OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
1561 }\
1562 static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1563 uint64_t half[16*2 + 17*2];\
1564 uint8_t * const halfH= ((uint8_t*)half) + 256;\
1565 uint8_t * const halfHV= ((uint8_t*)half);\
1566 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1567 put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
1568 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1569 OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
1570 }\
1571 static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1572 uint64_t half[16*2 + 17*2];\
1573 uint8_t * const halfH= ((uint8_t*)half) + 256;\
1574 uint8_t * const halfHV= ((uint8_t*)half);\
1575 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1576 put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
1577 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1578 OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
1579 }\
1580 static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1581 uint64_t half[16*2 + 17*2];\
1582 uint8_t * const halfH= ((uint8_t*)half) + 256;\
1583 uint8_t * const halfHV= ((uint8_t*)half);\
1584 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1585 put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
1586 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1587 OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
1588 }\
1589 static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1590 uint64_t half[16*2 + 17*2];\
1591 uint8_t * const halfH= ((uint8_t*)half) + 256;\
1592 uint8_t * const halfHV= ((uint8_t*)half);\
1593 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1594 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1595 OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
1596 }\
1597 static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1598 uint64_t half[16*2 + 17*2];\
1599 uint8_t * const halfH= ((uint8_t*)half) + 256;\
1600 uint8_t * const halfHV= ((uint8_t*)half);\
1601 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1602 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1603 OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
1604 }\
1605 static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1606 uint64_t half[17*2];\
1607 uint8_t * const halfH= ((uint8_t*)half);\
1608 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1609 put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
1610 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
1611 }\
1612 static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1613 uint64_t half[17*2];\
1614 uint8_t * const halfH= ((uint8_t*)half);\
1615 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1616 put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
1617 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
1618 }\
1619 static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1620 uint64_t half[17*2];\
1621 uint8_t * const halfH= ((uint8_t*)half);\
1622 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1623 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
1624 }
1625
1626 #define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b " \n\t"
1627 #define AVG_3DNOW_OP(a,b,temp, size) \
1628 "mov" #size " " #b ", " #temp " \n\t"\
1629 "pavgusb " #temp ", " #a " \n\t"\
1630 "mov" #size " " #a ", " #b " \n\t"
1631 #define AVG_MMX2_OP(a,b,temp, size) \
1632 "mov" #size " " #b ", " #temp " \n\t"\
1633 "pavgb " #temp ", " #a " \n\t"\
1634 "mov" #size " " #a ", " #b " \n\t"
1635
1636 QPEL_BASE(put_ , ff_pw_16, _ , PUT_OP, PUT_OP)
1637 QPEL_BASE(avg_ , ff_pw_16, _ , AVG_MMX2_OP, AVG_3DNOW_OP)
1638 QPEL_BASE(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, PUT_OP)
1639 QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, 3dnow)
1640 QPEL_OP(avg_ , ff_pw_16, _ , AVG_3DNOW_OP, 3dnow)
1641 QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, 3dnow)
1642 QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, mmx2)
1643 QPEL_OP(avg_ , ff_pw_16, _ , AVG_MMX2_OP, mmx2)
1644 QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, mmx2)
1645
1646 /***********************************/
1647 /* bilinear qpel: not compliant to any spec, only for -lavdopts fast */
1648
1649 #define QPEL_2TAP_XY(OPNAME, SIZE, MMX, XY, HPEL)\
1650 static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1651 OPNAME ## pixels ## SIZE ## HPEL(dst, src, stride, SIZE);\
1652 }
1653 #define QPEL_2TAP_L3(OPNAME, SIZE, MMX, XY, S0, S1, S2)\
1654 static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1655 OPNAME ## 2tap_qpel ## SIZE ## _l3_ ## MMX(dst, src+S0, stride, SIZE, S1, S2);\
1656 }
1657
1658 #define QPEL_2TAP(OPNAME, SIZE, MMX)\
1659 QPEL_2TAP_XY(OPNAME, SIZE, MMX, 20, _x2_ ## MMX)\
1660 QPEL_2TAP_XY(OPNAME, SIZE, MMX, 02, _y2_ ## MMX)\
1661 QPEL_2TAP_XY(OPNAME, SIZE, MMX, 22, _xy2_mmx)\
1662 static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc00_ ## MMX =\
1663 OPNAME ## qpel ## SIZE ## _mc00_ ## MMX;\
1664 static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc21_ ## MMX =\
1665 OPNAME ## 2tap_qpel ## SIZE ## _mc20_ ## MMX;\
1666 static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc12_ ## MMX =\
1667 OPNAME ## 2tap_qpel ## SIZE ## _mc02_ ## MMX;\
1668 static void OPNAME ## 2tap_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1669 OPNAME ## pixels ## SIZE ## _y2_ ## MMX(dst, src+1, stride, SIZE);\
1670 }\
1671 static void OPNAME ## 2tap_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1672 OPNAME ## pixels ## SIZE ## _x2_ ## MMX(dst, src+stride, stride, SIZE);\
1673 }\
1674 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 10, 0, 1, 0)\
1675 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 30, 1, -1, 0)\
1676 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 01, 0, stride, 0)\
1677 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 03, stride, -stride, 0)\
1678 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 11, 0, stride, 1)\
1679 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 31, 1, stride, -1)\
1680 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 13, stride, -stride, 1)\
1681 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 33, stride+1, -stride, -1)\
1682
1683 QPEL_2TAP(put_, 16, mmx2)
1684 QPEL_2TAP(avg_, 16, mmx2)
1685 QPEL_2TAP(put_, 8, mmx2)
1686 QPEL_2TAP(avg_, 8, mmx2)
1687 QPEL_2TAP(put_, 16, 3dnow)
1688 QPEL_2TAP(avg_, 16, 3dnow)
1689 QPEL_2TAP(put_, 8, 3dnow)
1690 QPEL_2TAP(avg_, 8, 3dnow)
1691
1692
1693 #if 0
1694 static void just_return(void) { return; }
1695 #endif
1696
1697 static void gmc_mmx(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
1698 int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height){
1699 const int w = 8;
1700 const int ix = ox>>(16+shift);
1701 const int iy = oy>>(16+shift);
1702 const int oxs = ox>>4;
1703 const int oys = oy>>4;
1704 const int dxxs = dxx>>4;
1705 const int dxys = dxy>>4;
1706 const int dyxs = dyx>>4;
1707 const int dyys = dyy>>4;
1708 const uint16_t r4[4] = {r,r,r,r};
1709 const uint16_t dxy4[4] = {dxys,dxys,dxys,dxys};
1710 const uint16_t dyy4[4] = {dyys,dyys,dyys,dyys};
1711 const uint64_t shift2 = 2*shift;
1712 uint8_t edge_buf[(h+1)*stride];
1713 int x, y;
1714
1715 const int dxw = (dxx-(1<<(16+shift)))*(w-1);
1716 const int dyh = (dyy-(1<<(16+shift)))*(h-1);
1717 const int dxh = dxy*(h-1);
1718 const int dyw = dyx*(w-1);
1719 if( // non-constant fullpel offset (3% of blocks)
1720 ((ox^(ox+dxw)) | (ox^(ox+dxh)) | (ox^(ox+dxw+dxh)) |
1721 (oy^(oy+dyw)) | (oy^(oy+dyh)) | (oy^(oy+dyw+dyh))) >> (16+shift)
1722 // uses more than 16 bits of subpel mv (only at huge resolution)
1723 || (dxx|dxy|dyx|dyy)&15 )
1724 {
1725 //FIXME could still use mmx for some of the rows
1726 ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r, width, height);
1727 return;
1728 }
1729
1730 src += ix + iy*stride;
1731 if( (unsigned)ix >= width-w ||
1732 (unsigned)iy >= height-h )
1733 {
1734 ff_emulated_edge_mc(edge_buf, src, stride, w+1, h+1, ix, iy, width, height);
1735 src = edge_buf;
1736 }
1737
1738 __asm__ volatile(
1739 "movd %0, %%mm6 \n\t"
1740 "pxor %%mm7, %%mm7 \n\t"
1741 "punpcklwd %%mm6, %%mm6 \n\t"
1742 "punpcklwd %%mm6, %%mm6 \n\t"
1743 :: "r"(1<<shift)
1744 );
1745
1746 for(x=0; x<w; x+=4){
1747 uint16_t dx4[4] = { oxs - dxys + dxxs*(x+0),
1748 oxs - dxys + dxxs*(x+1),
1749 oxs - dxys + dxxs*(x+2),
1750 oxs - dxys + dxxs*(x+3) };
1751 uint16_t dy4[4] = { oys - dyys + dyxs*(x+0),
1752 oys - dyys + dyxs*(x+1),
1753 oys - dyys + dyxs*(x+2),
1754 oys - dyys + dyxs*(x+3) };
1755
1756 for(y=0; y<h; y++){
1757 __asm__ volatile(
1758 "movq %0, %%mm4 \n\t"
1759 "movq %1, %%mm5 \n\t"
1760 "paddw %2, %%mm4 \n\t"
1761 "paddw %3, %%mm5 \n\t"
1762 "movq %%mm4, %0 \n\t"
1763 "movq %%mm5, %1 \n\t"
1764 "psrlw $12, %%mm4 \n\t"
1765 "psrlw $12, %%mm5 \n\t"
1766 : "+m"(*dx4), "+m"(*dy4)
1767 : "m"(*dxy4), "m"(*dyy4)
1768 );
1769
1770 __asm__ volatile(
1771 "movq %%mm6, %%mm2 \n\t"
1772 "movq %%mm6, %%mm1 \n\t"
1773 "psubw %%mm4, %%mm2 \n\t"
1774 "psubw %%mm5, %%mm1 \n\t"
1775 "movq %%mm2, %%mm0 \n\t"
1776 "movq %%mm4, %%mm3 \n\t"
1777 "pmullw %%mm1, %%mm0 \n\t" // (s-dx)*(s-dy)
1778 "pmullw %%mm5, %%mm3 \n\t" // dx*dy
1779 "pmullw %%mm5, %%mm2 \n\t" // (s-dx)*dy
1780 "pmullw %%mm4, %%mm1 \n\t" // dx*(s-dy)
1781
1782 "movd %4, %%mm5 \n\t"
1783 "movd %3, %%mm4 \n\t"
1784 "punpcklbw %%mm7, %%mm5 \n\t"
1785 "punpcklbw %%mm7, %%mm4 \n\t"
1786 "pmullw %%mm5, %%mm3 \n\t" // src[1,1] * dx*dy
1787 "pmullw %%mm4, %%mm2 \n\t" // src[0,1] * (s-dx)*dy
1788
1789 "movd %2, %%mm5 \n\t"
1790 "movd %1, %%mm4 \n\t"
1791 "punpcklbw %%mm7, %%mm5 \n\t"
1792 "punpcklbw %%mm7, %%mm4 \n\t"
1793 "pmullw %%mm5, %%mm1 \n\t" // src[1,0] * dx*(s-dy)
1794 "pmullw %%mm4, %%mm0 \n\t" // src[0,0] * (s-dx)*(s-dy)
1795 "paddw %5, %%mm1 \n\t"
1796 "paddw %%mm3, %%mm2 \n\t"
1797 "paddw %%mm1, %%mm0 \n\t"
1798 "paddw %%mm2, %%mm0 \n\t"
1799
1800 "psrlw %6, %%mm0 \n\t"
1801 "packuswb %%mm0, %%mm0 \n\t"
1802 "movd %%mm0, %0 \n\t"
1803
1804 : "=m"(dst[x+y*stride])
1805 : "m"(src[0]), "m"(src[1]),
1806 "m"(src[stride]), "m"(src[stride+1]),
1807 "m"(*r4), "m"(shift2)
1808 );
1809 src += stride;
1810 }
1811 src += 4-h*stride;
1812 }
1813 }
1814
1815 #define PREFETCH(name, op) \
1816 static void name(void *mem, int stride, int h){\
1817 const uint8_t *p= mem;\
1818 do{\
1819 __asm__ volatile(#op" %0" :: "m"(*p));\
1820 p+= stride;\
1821 }while(--h);\
1822 }
1823 PREFETCH(prefetch_mmx2, prefetcht0)
1824 PREFETCH(prefetch_3dnow, prefetch)
1825 #undef PREFETCH
1826
1827 #include "h264dsp_mmx.c"
1828 #include "rv40dsp_mmx.c"
1829
1830 /* CAVS specific */
1831 void ff_put_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
1832 put_pixels8_mmx(dst, src, stride, 8);
1833 }
1834 void ff_avg_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
1835 avg_pixels8_mmx(dst, src, stride, 8);
1836 }
1837 void ff_put_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
1838 put_pixels16_mmx(dst, src, stride, 16);
1839 }
1840 void ff_avg_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
1841 avg_pixels16_mmx(dst, src, stride, 16);
1842 }
1843
1844 /* VC1 specific */
1845 void ff_put_vc1_mspel_mc00_mmx(uint8_t *dst, const uint8_t *src, int stride, int rnd) {
1846 put_pixels8_mmx(dst, src, stride, 8);
1847 }
1848 void ff_avg_vc1_mspel_mc00_mmx2(uint8_t *dst, const uint8_t *src, int stride, int rnd) {
1849 avg_pixels8_mmx2(dst, src, stride, 8);
1850 }
1851
1852 /* XXX: those functions should be suppressed ASAP when all IDCTs are
1853 converted */
1854 #if CONFIG_GPL
1855 static void ff_libmpeg2mmx_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
1856 {
1857 ff_mmx_idct (block);
1858 put_pixels_clamped_mmx(block, dest, line_size);
1859 }
1860 static void ff_libmpeg2mmx_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
1861 {
1862 ff_mmx_idct (block);
1863 add_pixels_clamped_mmx(block, dest, line_size);
1864 }
1865 static void ff_libmpeg2mmx2_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
1866 {
1867 ff_mmxext_idct (block);
1868 put_pixels_clamped_mmx(block, dest, line_size);
1869 }
1870 static void ff_libmpeg2mmx2_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
1871 {
1872 ff_mmxext_idct (block);
1873 add_pixels_clamped_mmx(block, dest, line_size);
1874 }
1875 #endif
1876 static void ff_idct_xvid_mmx_put(uint8_t *dest, int line_size, DCTELEM *block)
1877 {
1878 ff_idct_xvid_mmx (block);
1879 put_pixels_clamped_mmx(block, dest, line_size);
1880 }
1881 static void ff_idct_xvid_mmx_add(uint8_t *dest, int line_size, DCTELEM *block)
1882 {
1883 ff_idct_xvid_mmx (block);
1884 add_pixels_clamped_mmx(block, dest, line_size);
1885 }
1886 static void ff_idct_xvid_mmx2_put(uint8_t *dest, int line_size, DCTELEM *block)
1887 {
1888 ff_idct_xvid_mmx2 (block);
1889 put_pixels_clamped_mmx(block, dest, line_size);
1890 }
1891 static void ff_idct_xvid_mmx2_add(uint8_t *dest, int line_size, DCTELEM *block)
1892 {
1893 ff_idct_xvid_mmx2 (block);
1894 add_pixels_clamped_mmx(block, dest, line_size);
1895 }
1896
1897 static void vorbis_inverse_coupling_3dnow(float *mag, float *ang, int blocksize)
1898 {
1899 int i;
1900 __asm__ volatile("pxor %%mm7, %%mm7":);
1901 for(i=0; i<blocksize; i+=2) {
1902 __asm__ volatile(
1903 "movq %0, %%mm0 \n\t"
1904 "movq %1, %%mm1 \n\t"
1905 "movq %%mm0, %%mm2 \n\t"
1906 "movq %%mm1, %%mm3 \n\t"
1907 "pfcmpge %%mm7, %%mm2 \n\t" // m <= 0.0
1908 "pfcmpge %%mm7, %%mm3 \n\t" // a <= 0.0
1909 "pslld $31, %%mm2 \n\t" // keep only the sign bit
1910 "pxor %%mm2, %%mm1 \n\t"
1911 "movq %%mm3, %%mm4 \n\t"
1912 "pand %%mm1, %%mm3 \n\t"
1913 "pandn %%mm1, %%mm4 \n\t"
1914 "pfadd %%mm0, %%mm3 \n\t" // a = m + ((a<0) & (a ^ sign(m)))
1915 "pfsub %%mm4, %%mm0 \n\t" // m = m + ((a>0) & (a ^ sign(m)))
1916 "movq %%mm3, %1 \n\t"
1917 "movq %%mm0, %0 \n\t"
1918 :"+m"(mag[i]), "+m"(ang[i])
1919 ::"memory"
1920 );
1921 }
1922 __asm__ volatile("femms");
1923 }
1924 static void vorbis_inverse_coupling_sse(float *mag, float *ang, int blocksize)
1925 {
1926 int i;
1927
1928 __asm__ volatile(
1929 "movaps %0, %%xmm5 \n\t"
1930 ::"m"(ff_pdw_80000000[0])
1931 );
1932 for(i=0; i<blocksize; i+=4) {
1933 __asm__ volatile(
1934 "movaps %0, %%xmm0 \n\t"
1935 "movaps %1, %%xmm1 \n\t"
1936 "xorps %%xmm2, %%xmm2 \n\t"
1937 "xorps %%xmm3, %%xmm3 \n\t"
1938 "cmpleps %%xmm0, %%xmm2 \n\t" // m <= 0.0
1939 "cmpleps %%xmm1, %%xmm3 \n\t" // a <= 0.0
1940 "andps %%xmm5, %%xmm2 \n\t" // keep only the sign bit
1941 "xorps %%xmm2, %%xmm1 \n\t"
1942 "movaps %%xmm3, %%xmm4 \n\t"
1943 "andps %%xmm1, %%xmm3 \n\t"
1944 "andnps %%xmm1, %%xmm4 \n\t"
1945 "addps %%xmm0, %%xmm3 \n\t" // a = m + ((a<0) & (a ^ sign(m)))
1946 "subps %%xmm4, %%xmm0 \n\t" // m = m + ((a>0) & (a ^ sign(m)))
1947 "movaps %%xmm3, %1 \n\t"
1948 "movaps %%xmm0, %0 \n\t"
1949 :"+m"(mag[i]), "+m"(ang[i])
1950 ::"memory"
1951 );
1952 }
1953 }
1954
1955 #define IF1(x) x
1956 #define IF0(x)
1957
1958 #define MIX5(mono,stereo)\
1959 __asm__ volatile(\
1960 "movss 0(%2), %%xmm5 \n"\
1961 "movss 8(%2), %%xmm6 \n"\
1962 "movss 24(%2), %%xmm7 \n"\
1963 "shufps $0, %%xmm5, %%xmm5 \n"\
1964 "shufps $0, %%xmm6, %%xmm6 \n"\
1965 "shufps $0, %%xmm7, %%xmm7 \n"\
1966 "1: \n"\
1967 "movaps (%0,%1), %%xmm0 \n"\
1968 "movaps 0x400(%0,%1), %%xmm1 \n"\
1969 "movaps 0x800(%0,%1), %%xmm2 \n"\
1970 "movaps 0xc00(%0,%1), %%xmm3 \n"\
1971 "movaps 0x1000(%0,%1), %%xmm4 \n"\
1972 "mulps %%xmm5, %%xmm0 \n"\
1973 "mulps %%xmm6, %%xmm1 \n"\
1974 "mulps %%xmm5, %%xmm2 \n"\
1975 "mulps %%xmm7, %%xmm3 \n"\
1976 "mulps %%xmm7, %%xmm4 \n"\
1977 stereo("addps %%xmm1, %%xmm0 \n")\
1978 "addps %%xmm1, %%xmm2 \n"\
1979 "addps %%xmm3, %%xmm0 \n"\
1980 "addps %%xmm4, %%xmm2 \n"\
1981 mono("addps %%xmm2, %%xmm0 \n")\
1982 "movaps %%xmm0, (%0,%1) \n"\
1983 stereo("movaps %%xmm2, 0x400(%0,%1) \n")\
1984 "add $16, %0 \n"\
1985 "jl 1b \n"\
1986 :"+&r"(i)\
1987 :"r"(samples[0]+len), "r"(matrix)\
1988 :"memory"\
1989 );
1990
1991 #define MIX_MISC(stereo)\
1992 __asm__ volatile(\
1993 "1: \n"\
1994 "movaps (%3,%0), %%xmm0 \n"\
1995 stereo("movaps %%xmm0, %%xmm1 \n")\
1996 "mulps %%xmm6, %%xmm0 \n"\
1997 stereo("mulps %%xmm7, %%xmm1 \n")\
1998 "lea 1024(%3,%0), %1 \n"\
1999 "mov %5, %2 \n"\
2000 "2: \n"\
2001 "movaps (%1), %%xmm2 \n"\
2002 stereo("movaps %%xmm2, %%xmm3 \n")\
2003 "mulps (%4,%2), %%xmm2 \n"\
2004 stereo("mulps 16(%4,%2), %%xmm3 \n")\
2005 "addps %%xmm2, %%xmm0 \n"\
2006 stereo("addps %%xmm3, %%xmm1 \n")\
2007 "add $1024, %1 \n"\
2008 "add $32, %2 \n"\
2009 "jl 2b \n"\
2010 "movaps %%xmm0, (%3,%0) \n"\
2011 stereo("movaps %%xmm1, 1024(%3,%0) \n")\
2012 "add $16, %0 \n"\
2013 "jl 1b \n"\
2014 :"+&r"(i), "=&r"(j), "=&r"(k)\
2015 :"r"(samples[0]+len), "r"(matrix_simd+in_ch), "g"((intptr_t)-32*(in_ch-1))\
2016 :"memory"\
2017 );
2018
2019 static void ac3_downmix_sse(float (*samples)[256], float (*matrix)[2], int out_ch, int in_ch, int len)
2020 {
2021 int (*matrix_cmp)[2] = (int(*)[2])matrix;
2022 intptr_t i,j,k;
2023
2024 i = -len*sizeof(float);
2025 if(in_ch == 5 && out_ch == 2 && !(matrix_cmp[0][1]|matrix_cmp[2][0]|matrix_cmp[3][1]|matrix_cmp[4][0]|(matrix_cmp[1][0]^matrix_cmp[1][1])|(matrix_cmp[0][0]^matrix_cmp[2][1]))) {
2026 MIX5(IF0,IF1);
2027 } else if(in_ch == 5 && out_ch == 1 && matrix_cmp[0][0]==matrix_cmp[2][0] && matrix_cmp[3][0]==matrix_cmp[4][0]) {
2028 MIX5(IF1,IF0);
2029 } else {
2030 DECLARE_ALIGNED(16, float, matrix_simd)[in_ch][2][4];
2031 j = 2*in_ch*sizeof(float);
2032 __asm__ volatile(
2033 "1: \n"
2034 "sub $8, %0 \n"
2035 "movss (%2,%0), %%xmm6 \n"
2036 "movss 4(%2,%0), %%xmm7 \n"
2037 "shufps $0, %%xmm6, %%xmm6 \n"
2038 "shufps $0, %%xmm7, %%xmm7 \n"
2039 "movaps %%xmm6, (%1,%0,4) \n"
2040 "movaps %%xmm7, 16(%1,%0,4) \n"
2041 "jg 1b \n"
2042 :"+&r"(j)
2043 :"r"(matrix_simd), "r"(matrix)
2044 :"memory"
2045 );
2046 if(out_ch == 2) {
2047 MIX_MISC(IF1);
2048 } else {
2049 MIX_MISC(IF0);
2050 }
2051 }
2052 }
2053
2054 static void vector_fmul_3dnow(float *dst, const float *src, int len){
2055 x86_reg i = (len-4)*4;
2056 __asm__ volatile(
2057 "1: \n\t"
2058 "movq (%1,%0), %%mm0 \n\t"
2059 "movq 8(%1,%0), %%mm1 \n\t"
2060 "pfmul (%2,%0), %%mm0 \n\t"
2061 "pfmul 8(%2,%0), %%mm1 \n\t"
2062 "movq %%mm0, (%1,%0) \n\t"
2063 "movq %%mm1, 8(%1,%0) \n\t"
2064 "sub $16, %0 \n\t"
2065 "jge 1b \n\t"
2066 "femms \n\t"
2067 :"+r"(i)
2068 :"r"(dst), "r"(src)
2069 :"memory"
2070 );
2071 }
2072 static void vector_fmul_sse(float *dst, const float *src, int len){
2073 x86_reg i = (len-8)*4;
2074 __asm__ volatile(
2075 "1: \n\t"
2076 "movaps (%1,%0), %%xmm0 \n\t"
2077 "movaps 16(%1,%0), %%xmm1 \n\t"
2078 "mulps (%2,%0), %%xmm0 \n\t"
2079 "mulps 16(%2,%0), %%xmm1 \n\t"
2080 "movaps %%xmm0, (%1,%0) \n\t"
2081 "movaps %%xmm1, 16(%1,%0) \n\t"
2082 "sub $32, %0 \n\t"
2083 "jge 1b \n\t"
2084 :"+r"(i)
2085 :"r"(dst), "r"(src)
2086 :"memory"
2087 );
2088 }
2089
2090 static void vector_fmul_reverse_3dnow2(float *dst, const float *src0, const float *src1, int len){
2091 x86_reg i = len*4-16;
2092 __asm__ volatile(
2093 "1: \n\t"
2094 "pswapd 8(%1), %%mm0 \n\t"
2095 "pswapd (%1), %%mm1 \n\t"
2096 "pfmul (%3,%0), %%mm0 \n\t"
2097 "pfmul 8(%3,%0), %%mm1 \n\t"
2098 "movq %%mm0, (%2,%0) \n\t"
2099 "movq %%mm1, 8(%2,%0) \n\t"
2100 "add $16, %1 \n\t"
2101 "sub $16, %0 \n\t"
2102 "jge 1b \n\t"
2103 :"+r"(i), "+r"(src1)
2104 :"r"(dst), "r"(src0)
2105 );
2106 __asm__ volatile("femms");
2107 }
2108 static void vector_fmul_reverse_sse(float *dst, const float *src0, const float *src1, int len){
2109 x86_reg i = len*4-32;
2110 __asm__ volatile(
2111 "1: \n\t"
2112 "movaps 16(%1), %%xmm0 \n\t"
2113 "movaps (%1), %%xmm1 \n\t"
2114 "shufps $0x1b, %%xmm0, %%xmm0 \n\t"
2115 "shufps $0x1b, %%xmm1, %%xmm1 \n\t"
2116 "mulps (%3,%0), %%xmm0 \n\t"
2117 "mulps 16(%3,%0), %%xmm1 \n\t"
2118 "movaps %%xmm0, (%2,%0) \n\t"
2119 "movaps %%xmm1, 16(%2,%0) \n\t"
2120 "add $32, %1 \n\t"
2121 "sub $32, %0 \n\t"
2122 "jge 1b \n\t"
2123 :"+r"(i), "+r"(src1)
2124 :"r"(dst), "r"(src0)
2125 );
2126 }
2127
2128 static void vector_fmul_add_3dnow(float *dst, const float *src0, const float *src1,
2129 const float *src2, int len){
2130 x86_reg i = (len-4)*4;
2131 __asm__ volatile(
2132 "1: \n\t"
2133 "movq (%2,%0), %%mm0 \n\t"
2134 "movq 8(%2,%0), %%mm1 \n\t"
2135 "pfmul (%3,%0), %%mm0 \n\t"
2136 "pfmul 8(%3,%0), %%mm1 \n\t"
2137 "pfadd (%4,%0), %%mm0 \n\t"
2138 "pfadd 8(%4,%0), %%mm1 \n\t"
2139 "movq %%mm0, (%1,%0) \n\t"
2140 "movq %%mm1, 8(%1,%0) \n\t"
2141 "sub $16, %0 \n\t"
2142 "jge 1b \n\t"
2143 :"+r"(i)
2144 :"r"(dst), "r"(src0), "r"(src1), "r"(src2)
2145 :"memory"
2146 );
2147 __asm__ volatile("femms");
2148 }
2149 static void vector_fmul_add_sse(float *dst, const float *src0, const float *src1,
2150 const float *src2, int len){
2151 x86_reg i = (len-8)*4;
2152 __asm__ volatile(
2153 "1: \n\t"
2154 "movaps (%2,%0), %%xmm0 \n\t"
2155 "movaps 16(%2,%0), %%xmm1 \n\t"
2156 "mulps (%3,%0), %%xmm0 \n\t"
2157 "mulps 16(%3,%0), %%xmm1 \n\t"
2158 "addps (%4,%0), %%xmm0 \n\t"
2159 "addps 16(%4,%0), %%xmm1 \n\t"
2160 "movaps %%xmm0, (%1,%0) \n\t"
2161 "movaps %%xmm1, 16(%1,%0) \n\t"
2162 "sub $32, %0 \n\t"
2163 "jge 1b \n\t"
2164 :"+r"(i)
2165 :"r"(dst), "r"(src0), "r"(src1), "r"(src2)
2166 :"memory"
2167 );
2168 }
2169
2170 static void vector_fmul_window_3dnow2(float *dst, const float *src0, const float *src1,
2171 const float *win, float add_bias, int len){
2172 #if HAVE_6REGS
2173 if(add_bias == 0){
2174 x86_reg i = -len*4;
2175 x86_reg j = len*4-8;
2176 __asm__ volatile(
2177 "1: \n"
2178 "pswapd (%5,%1), %%mm1 \n"
2179 "movq (%5,%0), %%mm0 \n"
2180 "pswapd (%4,%1), %%mm5 \n"
2181 "movq (%3,%0), %%mm4 \n"
2182 "movq %%mm0, %%mm2 \n"
2183 "movq %%mm1, %%mm3 \n"
2184 "pfmul %%mm4, %%mm2 \n" // src0[len+i]*win[len+i]
2185 "pfmul %%mm5, %%mm3 \n" // src1[ j]*win[len+j]
2186 "pfmul %%mm4, %%mm1 \n" // src0[len+i]*win[len+j]
2187 "pfmul %%mm5, %%mm0 \n" // src1[ j]*win[len+i]
2188 "pfadd %%mm3, %%mm2 \n"
2189 "pfsub %%mm0, %%mm1 \n"
2190 "pswapd %%mm2, %%mm2 \n"
2191 "movq %%mm1, (%2,%0) \n"
2192 "movq %%mm2, (%2,%1) \n"
2193 "sub $8, %1 \n"
2194 "add $8, %0 \n"
2195 "jl 1b \n"
2196 "femms \n"
2197 :"+r"(i), "+r"(j)
2198 :"r"(dst+len), "r"(src0+len), "r"(src1), "r"(win+len)
2199 );
2200 }else
2201 #endif
2202 ff_vector_fmul_window_c(dst, src0, src1, win, add_bias, len);
2203 }
2204
2205 static void vector_fmul_window_sse(float *dst, const float *src0, const float *src1,
2206 const float *win, float add_bias, int len){
2207 #if HAVE_6REGS
2208 if(add_bias == 0){
2209 x86_reg i = -len*4;
2210 x86_reg j = len*4-16;
2211 __asm__ volatile(
2212 "1: \n"
2213 "movaps (%5,%1), %%xmm1 \n"
2214 "movaps (%5,%0), %%xmm0 \n"
2215 "movaps (%4,%1), %%xmm5 \n"
2216 "movaps (%3,%0), %%xmm4 \n"
2217 "shufps $0x1b, %%xmm1, %%xmm1 \n"
2218 "shufps $0x1b, %%xmm5, %%xmm5 \n"
2219 "movaps %%xmm0, %%xmm2 \n"
2220 "movaps %%xmm1, %%xmm3 \n"
2221 "mulps %%xmm4, %%xmm2 \n" // src0[len+i]*win[len+i]
2222 "mulps %%xmm5, %%xmm3 \n" // src1[ j]*win[len+j]
2223 "mulps %%xmm4, %%xmm1 \n" // src0[len+i]*win[len+j]
2224 "mulps %%xmm5, %%xmm0 \n" // src1[ j]*win[len+i]
2225 "addps %%xmm3, %%xmm2 \n"
2226 "subps %%xmm0, %%xmm1 \n"
2227 "shufps $0x1b, %%xmm2, %%xmm2 \n"
2228 "movaps %%xmm1, (%2,%0) \n"
2229 "movaps %%xmm2, (%2,%1) \n"
2230 "sub $16, %1 \n"
2231 "add $16, %0 \n"
2232 "jl 1b \n"
2233 :"+r"(i), "+r"(j)
2234 :"r"(dst+len), "r"(src0+len), "r"(src1), "r"(win+len)
2235 );
2236 }else
2237 #endif
2238 ff_vector_fmul_window_c(dst, src0, src1, win, add_bias, len);
2239 }
2240
2241 static void int32_to_float_fmul_scalar_sse(float *dst, const int *src, float mul, int len)
2242 {
2243 x86_reg i = -4*len;
2244 __asm__ volatile(
2245 "movss %3, %%xmm4 \n"
2246 "shufps $0, %%xmm4, %%xmm4 \n"
2247 "1: \n"
2248 "cvtpi2ps (%2,%0), %%xmm0 \n"
2249 "cvtpi2ps 8(%2,%0), %%xmm1 \n"
2250 "cvtpi2ps 16(%2,%0), %%xmm2 \n"
2251 "cvtpi2ps 24(%2,%0), %%xmm3 \n"
2252 "movlhps %%xmm1, %%xmm0 \n"
2253 "movlhps %%xmm3, %%xmm2 \n"
2254 "mulps %%xmm4, %%xmm0 \n"
2255 "mulps %%xmm4, %%xmm2 \n"
2256 "movaps %%xmm0, (%1,%0) \n"
2257 "movaps %%xmm2, 16(%1,%0) \n"
2258 "add $32, %0 \n"
2259 "jl 1b \n"
2260 :"+r"(i)
2261 :"r"(dst+len), "r"(src+len), "m"(mul)
2262 );
2263 }
2264
2265 static void int32_to_float_fmul_scalar_sse2(float *dst, const int *src, float mul, int len)
2266 {
2267 x86_reg i = -4*len;
2268 __asm__ volatile(
2269 "movss %3, %%xmm4 \n"
2270 "shufps $0, %%xmm4, %%xmm4 \n"
2271 "1: \n"
2272 "cvtdq2ps (%2,%0), %%xmm0 \n"
2273 "cvtdq2ps 16(%2,%0), %%xmm1 \n"
2274 "mulps %%xmm4, %%xmm0 \n"
2275 "mulps %%xmm4, %%xmm1 \n"
2276 "movaps %%xmm0, (%1,%0) \n"
2277 "movaps %%xmm1, 16(%1,%0) \n"
2278 "add $32, %0 \n"
2279 "jl 1b \n"
2280 :"+r"(i)
2281 :"r"(dst+len), "r"(src+len), "m"(mul)
2282 );
2283 }
2284
2285 static void vector_clipf_sse(float *dst, const float *src, float min, float max,
2286 int len)
2287 {
2288 x86_reg i = (len-16)*4;
2289 __asm__ volatile(
2290 "movss %3, %%xmm4 \n"
2291 "movss %4, %%xmm5 \n"
2292 "shufps $0, %%xmm4, %%xmm4 \n"
2293 "shufps $0, %%xmm5, %%xmm5 \n"
2294 "1: \n\t"
2295 "movaps (%2,%0), %%xmm0 \n\t" // 3/1 on intel
2296 "movaps 16(%2,%0), %%xmm1 \n\t"
2297 "movaps 32(%2,%0), %%xmm2 \n\t"
2298 "movaps 48(%2,%0), %%xmm3 \n\t"
2299 "maxps %%xmm4, %%xmm0 \n\t"
2300 "maxps %%xmm4, %%xmm1 \n\t"
2301 "maxps %%xmm4, %%xmm2 \n\t"
2302 "maxps %%xmm4, %%xmm3 \n\t"
2303 "minps %%xmm5, %%xmm0 \n\t"
2304 "minps %%xmm5, %%xmm1 \n\t"
2305 "minps %%xmm5, %%xmm2 \n\t"
2306 "minps %%xmm5, %%xmm3 \n\t"
2307 "movaps %%xmm0, (%1,%0) \n\t"
2308 "movaps %%xmm1, 16(%1,%0) \n\t"
2309 "movaps %%xmm2, 32(%1,%0) \n\t"
2310 "movaps %%xmm3, 48(%1,%0) \n\t"
2311 "sub $64, %0 \n\t"
2312 "jge 1b \n\t"
2313 :"+&r"(i)
2314 :"r"(dst), "r"(src), "m"(min), "m"(max)
2315 :"memory"
2316 );
2317 }
2318
2319 static void float_to_int16_3dnow(int16_t *dst, const float *src, long len){
2320 x86_reg reglen = len;
2321 // not bit-exact: pf2id uses different rounding than C and SSE
2322 __asm__ volatile(
2323 "add %0 , %0 \n\t"
2324 "lea (%2,%0,2) , %2 \n\t"
2325 "add %0 , %1 \n\t"
2326 "neg %0 \n\t"
2327 "1: \n\t"
2328 "pf2id (%2,%0,2) , %%mm0 \n\t"
2329 "pf2id 8(%2,%0,2) , %%mm1 \n\t"
2330 "pf2id 16(%2,%0,2) , %%mm2 \n\t"
2331 "pf2id 24(%2,%0,2) , %%mm3 \n\t"
2332 "packssdw %%mm1 , %%mm0 \n\t"
2333 "packssdw %%mm3 , %%mm2 \n\t"
2334 "movq %%mm0 , (%1,%0) \n\t"
2335 "movq %%mm2 , 8(%1,%0) \n\t"
2336 "add $16 , %0 \n\t"
2337 " js 1b \n\t"
2338 "femms \n\t"
2339 :"+r"(reglen), "+r"(dst), "+r"(src)
2340 );
2341 }
2342 static void float_to_int16_sse(int16_t *dst, const float *src, long len){
2343 x86_reg reglen = len;
2344 __asm__ volatile(
2345 "add %0 , %0 \n\t"
2346 "lea (%2,%0,2) , %2 \n\t"
2347 "add %0 , %1 \n\t"
2348 "neg %0 \n\t"
2349 "1: \n\t"
2350 "cvtps2pi (%2,%0,2) , %%mm0 \n\t"
2351 "cvtps2pi 8(%2,%0,2) , %%mm1 \n\t"
2352 "cvtps2pi 16(%2,%0,2) , %%mm2 \n\t"
2353 "cvtps2pi 24(%2,%0,2) , %%mm3 \n\t"
2354 "packssdw %%mm1 , %%mm0 \n\t"
2355 "packssdw %%mm3 , %%mm2 \n\t"
2356 "movq %%mm0 , (%1,%0) \n\t"
2357 "movq %%mm2 , 8(%1,%0) \n\t"
2358 "add $16 , %0 \n\t"
2359 " js 1b \n\t"
2360 "emms \n\t"
2361 :"+r"(reglen), "+r"(dst), "+r"(src)
2362 );
2363 }
2364
2365 static void float_to_int16_sse2(int16_t *dst, const float *src, long len){
2366 x86_reg reglen = len;
2367 __asm__ volatile(
2368 "add %0 , %0 \n\t"
2369 "lea (%2,%0,2) , %2 \n\t"
2370 "add %0 , %1 \n\t"
2371 "neg %0 \n\t"
2372 "1: \n\t"
2373 "cvtps2dq (%2,%0,2) , %%xmm0 \n\t"
2374 "cvtps2dq 16(%2,%0,2) , %%xmm1 \n\t"
2375 "packssdw %%xmm1 , %%xmm0 \n\t"
2376 "movdqa %%xmm0 , (%1,%0) \n\t"
2377 "add $16 , %0 \n\t"
2378 " js 1b \n\t"
2379 :"+r"(reglen), "+r"(dst), "+r"(src)
2380 );
2381 }
2382
2383 void ff_float_to_int16_interleave6_sse(int16_t *dst, const float **src, int len);
2384 void ff_float_to_int16_interleave6_3dnow(int16_t *dst, const float **src, int len);
2385 void ff_float_to_int16_interleave6_3dn2(int16_t *dst, const float **src, int len);
2386 int32_t ff_scalarproduct_int16_mmx2(const int16_t *v1, const int16_t *v2, int order, int shift);
2387 int32_t ff_scalarproduct_int16_sse2(const int16_t *v1, const int16_t *v2, int order, int shift);
2388 int32_t ff_scalarproduct_and_madd_int16_mmx2(int16_t *v1, const int16_t *v2, const int16_t *v3, int order, int mul);
2389 int32_t ff_scalarproduct_and_madd_int16_sse2(int16_t *v1, const int16_t *v2, const int16_t *v3, int order, int mul);
2390 int32_t ff_scalarproduct_and_madd_int16_ssse3(int16_t *v1, const int16_t *v2, const int16_t *v3, int order, int mul);
2391 void ff_add_hfyu_median_prediction_mmx2(uint8_t *dst, const uint8_t *top, const uint8_t *diff, int w, int *left, int *left_top);
2392 int ff_add_hfyu_left_prediction_ssse3(uint8_t *dst, const uint8_t *src, int w, int left);
2393 int ff_add_hfyu_left_prediction_sse4(uint8_t *dst, const uint8_t *src, int w, int left);
2394 void ff_x264_deblock_v_luma_sse2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0);
2395 void ff_x264_deblock_h_luma_sse2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0);
2396 void ff_x264_deblock_h_luma_intra_mmxext(uint8_t *pix, int stride, int alpha, int beta);
2397 void ff_x264_deblock_v_luma_intra_sse2(uint8_t *pix, int stride, int alpha, int beta);
2398 void ff_x264_deblock_h_luma_intra_sse2(uint8_t *pix, int stride, int alpha, int beta);
2399
2400 #if HAVE_YASM && ARCH_X86_32
2401 void ff_x264_deblock_v8_luma_intra_mmxext(uint8_t *pix, int stride, int alpha, int beta);
2402 static void ff_x264_deblock_v_luma_intra_mmxext(uint8_t *pix, int stride, int alpha, int beta)
2403 {
2404 ff_x264_deblock_v8_luma_intra_mmxext(pix+0, stride, alpha, beta);
2405 ff_x264_deblock_v8_luma_intra_mmxext(pix+8, stride, alpha, beta);
2406 }
2407 #elif !HAVE_YASM
2408 #define ff_float_to_int16_interleave6_sse(a,b,c) float_to_int16_interleave_misc_sse(a,b,c,6)
2409 #define ff_float_to_int16_interleave6_3dnow(a,b,c) float_to_int16_interleave_misc_3dnow(a,b,c,6)
2410 #define ff_float_to_int16_interleave6_3dn2(a,b,c) float_to_int16_interleave_misc_3dnow(a,b,c,6)
2411 #endif
2412 #define ff_float_to_int16_interleave6_sse2 ff_float_to_int16_interleave6_sse
2413
2414 #define FLOAT_TO_INT16_INTERLEAVE(cpu, body) \
2415 /* gcc pessimizes register allocation if this is in the same function as float_to_int16_interleave_sse2*/\
2416 static av_noinline void float_to_int16_interleave_misc_##cpu(int16_t *dst, const float **src, long len, int channels){\
2417 DECLARE_ALIGNED(16, int16_t, tmp)[len];\
2418 int i,j,c;\
2419 for(c=0; c<channels; c++){\
2420 float_to_int16_##cpu(tmp, src[c], len);\
2421 for(i=0, j=c; i<len; i++, j+=channels)\
2422 dst[j] = tmp[i];\
2423 }\
2424 }\
2425 \
2426 static void float_to_int16_interleave_##cpu(int16_t *dst, const float **src, long len, int channels){\
2427 if(channels==1)\
2428 float_to_int16_##cpu(dst, src[0], len);\
2429 else if(channels==2){\
2430 x86_reg reglen = len; \
2431 const float *src0 = src[0];\
2432 const float *src1 = src[1];\
2433 __asm__ volatile(\
2434 "shl $2, %0 \n"\
2435 "add %0, %1 \n"\
2436 "add %0, %2 \n"\
2437 "add %0, %3 \n"\
2438 "neg %0 \n"\
2439 body\
2440 :"+r"(reglen), "+r"(dst), "+r"(src0), "+r"(src1)\
2441 );\
2442 }else if(channels==6){\
2443 ff_float_to_int16_interleave6_##cpu(dst, src, len);\
2444 }else\
2445 float_to_int16_interleave_misc_##cpu(dst, src, len, channels);\
2446 }
2447
2448 FLOAT_TO_INT16_INTERLEAVE(3dnow,
2449 "1: \n"
2450 "pf2id (%2,%0), %%mm0 \n"
2451 "pf2id 8(%2,%0), %%mm1 \n"
2452 "pf2id (%3,%0), %%mm2 \n"
2453 "pf2id 8(%3,%0), %%mm3 \n"
2454 "packssdw %%mm1, %%mm0 \n"
2455 "packssdw %%mm3, %%mm2 \n"
2456 "movq %%mm0, %%mm1 \n"
2457 "punpcklwd %%mm2, %%mm0 \n"
2458 "punpckhwd %%mm2, %%mm1 \n"
2459 "movq %%mm0, (%1,%0)\n"
2460 "movq %%mm1, 8(%1,%0)\n"
2461 "add $16, %0 \n"
2462 "js 1b \n"
2463 "femms \n"
2464 )
2465
2466 FLOAT_TO_INT16_INTERLEAVE(sse,
2467 "1: \n"
2468 "cvtps2pi (%2,%0), %%mm0 \n"
2469 "cvtps2pi 8(%2,%0), %%mm1 \n"
2470 "cvtps2pi (%3,%0), %%mm2 \n"
2471 "cvtps2pi 8(%3,%0), %%mm3 \n"
2472 "packssdw %%mm1, %%mm0 \n"
2473 "packssdw %%mm3, %%mm2 \n"
2474 "movq %%mm0, %%mm1 \n"
2475 "punpcklwd %%mm2, %%mm0 \n"
2476 "punpckhwd %%mm2, %%mm1 \n"
2477 "movq %%mm0, (%1,%0)\n"
2478 "movq %%mm1, 8(%1,%0)\n"
2479 "add $16, %0 \n"
2480 "js 1b \n"
2481 "emms \n"
2482 )
2483
2484 FLOAT_TO_INT16_INTERLEAVE(sse2,
2485 "1: \n"
2486 "cvtps2dq (%2,%0), %%xmm0 \n"
2487 "cvtps2dq (%3,%0), %%xmm1 \n"
2488 "packssdw %%xmm1, %%xmm0 \n"
2489 "movhlps %%xmm0, %%xmm1 \n"
2490 "punpcklwd %%xmm1, %%xmm0 \n"
2491 "movdqa %%xmm0, (%1,%0) \n"
2492 "add $16, %0 \n"
2493 "js 1b \n"
2494 )
2495
2496 static void float_to_int16_interleave_3dn2(int16_t *dst, const float **src, long len, int channels){
2497 if(channels==6)
2498 ff_float_to_int16_interleave6_3dn2(dst, src, len);
2499 else
2500 float_to_int16_interleave_3dnow(dst, src, len, channels);
2501 }
2502
2503 float ff_scalarproduct_float_sse(const float *v1, const float *v2, int order);
2504
2505 void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
2506 {
2507 mm_flags = mm_support();
2508
2509 if (avctx->dsp_mask) {
2510 if (avctx->dsp_mask & FF_MM_FORCE)
2511 mm_flags |= (avctx->dsp_mask & 0xffff);
2512 else
2513 mm_flags &= ~(avctx->dsp_mask & 0xffff);
2514 }
2515
2516 #if 0
2517 av_log(avctx, AV_LOG_INFO, "libavcodec: CPU flags:");
2518 if (mm_flags & FF_MM_MMX)
2519 av_log(avctx, AV_LOG_INFO, " mmx");
2520 if (mm_flags & FF_MM_MMX2)
2521 av_log(avctx, AV_LOG_INFO, " mmx2");
2522 if (mm_flags & FF_MM_3DNOW)
2523 av_log(avctx, AV_LOG_INFO, " 3dnow");
2524 if (mm_flags & FF_MM_SSE)
2525 av_log(avctx, AV_LOG_INFO, " sse");
2526 if (mm_flags & FF_MM_SSE2)
2527 av_log(avctx, AV_LOG_INFO, " sse2");
2528 av_log(avctx, AV_LOG_INFO, "\n");
2529 #endif
2530
2531 if (mm_flags & FF_MM_MMX) {
2532 const int idct_algo= avctx->idct_algo;
2533
2534 if(avctx->lowres==0){
2535 if(idct_algo==FF_IDCT_AUTO || idct_algo==FF_IDCT_SIMPLEMMX){
2536 c->idct_put= ff_simple_idct_put_mmx;
2537 c->idct_add= ff_simple_idct_add_mmx;
2538 c->idct = ff_simple_idct_mmx;
2539 c->idct_permutation_type= FF_SIMPLE_IDCT_PERM;
2540 #if CONFIG_GPL
2541 }else if(idct_algo==FF_IDCT_LIBMPEG2MMX){
2542 if(mm_flags & FF_MM_MMX2){
2543 c->idct_put= ff_libmpeg2mmx2_idct_put;
2544 c->idct_add= ff_libmpeg2mmx2_idct_add;
2545 c->idct = ff_mmxext_idct;
2546 }else{
2547 c->idct_put= ff_libmpeg2mmx_idct_put;
2548 c->idct_add= ff_libmpeg2mmx_idct_add;
2549 c->idct = ff_mmx_idct;
2550 }
2551 c->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM;
2552 #endif
2553 }else if((CONFIG_VP3_DECODER || CONFIG_VP5_DECODER || CONFIG_VP6_DECODER) &&
2554 idct_algo==FF_IDCT_VP3){
2555 if(mm_flags & FF_MM_SSE2){
2556 c->idct_put= ff_vp3_idct_put_sse2;
2557 c->idct_add= ff_vp3_idct_add_sse2;
2558 c->idct = ff_vp3_idct_sse2;
2559 c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM;
2560 }else{
2561 c->idct_put= ff_vp3_idct_put_mmx;
2562 c->idct_add= ff_vp3_idct_add_mmx;
2563 c->idct = ff_vp3_idct_mmx;
2564 c->idct_permutation_type= FF_PARTTRANS_IDCT_PERM;
2565 }
2566 }else if(idct_algo==FF_IDCT_CAVS){
2567 c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM;
2568 }else if(idct_algo==FF_IDCT_XVIDMMX){
2569 if(mm_flags & FF_MM_SSE2){
2570 c->idct_put= ff_idct_xvid_sse2_put;
2571 c->idct_add= ff_idct_xvid_sse2_add;
2572 c->idct = ff_idct_xvid_sse2;
2573 c->idct_permutation_type= FF_SSE2_IDCT_PERM;
2574 }else if(mm_flags & FF_MM_MMX2){
2575 c->idct_put= ff_idct_xvid_mmx2_put;
2576 c->idct_add= ff_idct_xvid_mmx2_add;
2577 c->idct = ff_idct_xvid_mmx2;
2578 }else{
2579 c->idct_put= ff_idct_xvid_mmx_put;
2580 c->idct_add= ff_idct_xvid_mmx_add;
2581 c->idct = ff_idct_xvid_mmx;
2582 }
2583 }
2584 }
2585
2586 c->put_pixels_clamped = put_pixels_clamped_mmx;
2587 c->put_signed_pixels_clamped = put_signed_pixels_clamped_mmx;
2588 c->add_pixels_clamped = add_pixels_clamped_mmx;
2589 c->clear_block = clear_block_mmx;
2590 c->clear_blocks = clear_blocks_mmx;
2591 if ((mm_flags & FF_MM_SSE) &&
2592 !(CONFIG_MPEG_XVMC_DECODER && avctx->xvmc_acceleration > 1)){
2593 /* XvMCCreateBlocks() may not allocate 16-byte aligned blocks */
2594 c->clear_block = clear_block_sse;
2595 c->clear_blocks = clear_blocks_sse;
2596 }
2597
2598 #define SET_HPEL_FUNCS(PFX, IDX, SIZE, CPU) \
2599 c->PFX ## _pixels_tab[IDX][0] = PFX ## _pixels ## SIZE ## _ ## CPU; \
2600 c->PFX ## _pixels_tab[IDX][1] = PFX ## _pixels ## SIZE ## _x2_ ## CPU; \
2601 c->PFX ## _pixels_tab[IDX][2] = PFX ## _pixels ## SIZE ## _y2_ ## CPU; \
2602 c->PFX ## _pixels_tab[IDX][3] = PFX ## _pixels ## SIZE ## _xy2_ ## CPU
2603
2604 SET_HPEL_FUNCS(put, 0, 16, mmx);
2605 SET_HPEL_FUNCS(put_no_rnd, 0, 16, mmx);
2606 SET_HPEL_FUNCS(avg, 0, 16, mmx);
2607 SET_HPEL_FUNCS(avg_no_rnd, 0, 16, mmx);
2608 SET_HPEL_FUNCS(put, 1, 8, mmx);
2609 SET_HPEL_FUNCS(put_no_rnd, 1, 8, mmx);
2610 SET_HPEL_FUNCS(avg, 1, 8, mmx);
2611 SET_HPEL_FUNCS(avg_no_rnd, 1, 8, mmx);
2612
2613 c->gmc= gmc_mmx;
2614
2615 c->add_bytes= add_bytes_mmx;
2616 c->add_bytes_l2= add_bytes_l2_mmx;
2617
2618 c->draw_edges = draw_edges_mmx;
2619
2620 if (CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
2621 c->h263_v_loop_filter= h263_v_loop_filter_mmx;
2622 c->h263_h_loop_filter= h263_h_loop_filter_mmx;
2623 }
2624 c->put_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_mmx_rnd;
2625 c->put_h264_chroma_pixels_tab[1]= put_h264_chroma_mc4_mmx;
2626 c->put_no_rnd_vc1_chroma_pixels_tab[0]= put_vc1_chroma_mc8_mmx_nornd;
2627
2628 c->put_rv40_chroma_pixels_tab[0]= put_rv40_chroma_mc8_mmx;
2629 c->put_rv40_chroma_pixels_tab[1]= put_rv40_chroma_mc4_mmx;
2630
2631 if (CONFIG_VP6_DECODER) {
2632 c->vp6_filter_diag4 = ff_vp6_filter_diag4_mmx;
2633 }
2634
2635 if (mm_flags & FF_MM_MMX2) {
2636 c->prefetch = prefetch_mmx2;
2637
2638 c->put_pixels_tab[0][1] = put_pixels16_x2_mmx2;
2639 c->put_pixels_tab[0][2] = put_pixels16_y2_mmx2;
2640
2641 c->avg_pixels_tab[0][0] = avg_pixels16_mmx2;
2642 c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx2;
2643 c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx2;
2644
2645 c->put_pixels_tab[1][1] = put_pixels8_x2_mmx2;
2646 c->put_pixels_tab[1][2] = put_pixels8_y2_mmx2;
2647
2648 c->avg_pixels_tab[1][0] = avg_pixels8_mmx2;
2649 c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx2;
2650 c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx2;
2651
2652 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
2653 c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx2;
2654 c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx2;
2655 c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx2;
2656 c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx2;
2657 c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx2;
2658 c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx2;
2659
2660 if (CONFIG_VP3_DECODER) {
2661 c->vp3_v_loop_filter= ff_vp3_v_loop_filter_mmx2;
2662 c->vp3_h_loop_filter= ff_vp3_h_loop_filter_mmx2;
2663 }
2664 }
2665 if (CONFIG_VP3_DECODER) {
2666 c->vp3_idct_dc_add = ff_vp3_idct_dc_add_mmx2;
2667 }
2668
2669 if (CONFIG_VP3_DECODER
2670 && (avctx->codec_id == CODEC_ID_VP3 || avctx->codec_id == CODEC_ID_THEORA)) {
2671 c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_exact_mmx2;
2672 c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_exact_mmx2;
2673 }
2674
2675 #define SET_QPEL_FUNCS(PFX, IDX, SIZE, CPU) \
2676 c->PFX ## _pixels_tab[IDX][ 0] = PFX ## SIZE ## _mc00_ ## CPU; \
2677 c->PFX ## _pixels_tab[IDX][ 1] = PFX ## SIZE ## _mc10_ ## CPU; \
2678 c->PFX ## _pixels_tab[IDX][ 2] = PFX ## SIZE ## _mc20_ ## CPU; \
2679 c->PFX ## _pixels_tab[IDX][ 3] = PFX ## SIZE ## _mc30_ ## CPU; \
2680 c->PFX ## _pixels_tab[IDX][ 4] = PFX ## SIZE ## _mc01_ ## CPU; \
2681 c->PFX ## _pixels_tab[IDX][ 5] = PFX ## SIZE ## _mc11_ ## CPU; \
2682 c->PFX ## _pixels_tab[IDX][ 6] = PFX ## SIZE ## _mc21_ ## CPU; \
2683 c->PFX ## _pixels_tab[IDX][ 7] = PFX ## SIZE ## _mc31_ ## CPU; \
2684 c->PFX ## _pixels_tab[IDX][ 8] = PFX ## SIZE ## _mc02_ ## CPU; \
2685 c->PFX ## _pixels_tab[IDX][ 9] = PFX ## SIZE ## _mc12_ ## CPU; \
2686 c->PFX ## _pixels_tab[IDX][10] = PFX ## SIZE ## _mc22_ ## CPU; \
2687 c->PFX ## _pixels_tab[IDX][11] = PFX ## SIZE ## _mc32_ ## CPU; \
2688 c->PFX ## _pixels_tab[IDX][12] = PFX ## SIZE ## _mc03_ ## CPU; \
2689 c->PFX ## _pixels_tab[IDX][13] = PFX ## SIZE ## _mc13_ ## CPU; \
2690 c->PFX ## _pixels_tab[IDX][14] = PFX ## SIZE ## _mc23_ ## CPU; \
2691 c->PFX ## _pixels_tab[IDX][15] = PFX ## SIZE ## _mc33_ ## CPU
2692
2693 SET_QPEL_FUNCS(put_qpel, 0, 16, mmx2);
2694 SET_QPEL_FUNCS(put_qpel, 1, 8, mmx2);
2695 SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, mmx2);
2696 SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, mmx2);
2697 SET_QPEL_FUNCS(avg_qpel, 0, 16, mmx2);
2698 SET_QPEL_FUNCS(avg_qpel, 1, 8, mmx2);
2699
2700 SET_QPEL_FUNCS(put_h264_qpel, 0, 16, mmx2);
2701 SET_QPEL_FUNCS(put_h264_qpel, 1, 8, mmx2);
2702 SET_QPEL_FUNCS(put_h264_qpel, 2, 4, mmx2);
2703 SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, mmx2);
2704 SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, mmx2);
2705 SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, mmx2);
2706
2707 SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, mmx2);
2708 SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, mmx2);
2709 SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, mmx2);
2710 SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, mmx2);
2711
2712 c->avg_rv40_chroma_pixels_tab[0]= avg_rv40_chroma_mc8_mmx2;
2713 c->avg_rv40_chroma_pixels_tab[1]= avg_rv40_chroma_mc4_mmx2;
2714
2715 c->avg_no_rnd_vc1_chroma_pixels_tab[0]= avg_vc1_chroma_mc8_mmx2_nornd;
2716
2717 c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_mmx2_rnd;
2718 c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_mmx2;
2719 c->avg_h264_chroma_pixels_tab[2]= avg_h264_chroma_mc2_mmx2;
2720 c->put_h264_chroma_pixels_tab[2]= put_h264_chroma_mc2_mmx2;
2721
2722 #if HAVE_YASM
2723 c->add_hfyu_median_prediction = ff_add_hfyu_median_prediction_mmx2;
2724 #endif
2725 #if HAVE_7REGS && HAVE_TEN_OPERANDS
2726 if( mm_flags&FF_MM_3DNOW )
2727 c->add_hfyu_median_prediction = add_hfyu_median_prediction_cmov;
2728 #endif
2729
2730 if (CONFIG_CAVS_DECODER)
2731 ff_cavsdsp_init_mmx2(c, avctx);
2732
2733 if (CONFIG_VC1_DECODER)
2734 ff_vc1dsp_init_mmx(c, avctx);
2735
2736 c->add_png_paeth_prediction= add_png_paeth_prediction_mmx2;
2737 } else if (mm_flags & FF_MM_3DNOW) {
2738 c->prefetch = prefetch_3dnow;
2739
2740 c->put_pixels_tab[0][1] = put_pixels16_x2_3dnow;
2741 c->put_pixels_tab[0][2] = put_pixels16_y2_3dnow;
2742
2743 c->avg_pixels_tab[0][0] = avg_pixels16_3dnow;
2744 c->avg_pixels_tab[0][1] = avg_pixels16_x2_3dnow;
2745 c->avg_pixels_tab[0][2] = avg_pixels16_y2_3dnow;
2746
2747 c->put_pixels_tab[1][1] = put_pixels8_x2_3dnow;
2748 c->put_pixels_tab[1][2] = put_pixels8_y2_3dnow;
2749
2750 c->avg_pixels_tab[1][0] = avg_pixels8_3dnow;
2751 c->avg_pixels_tab[1][1] = avg_pixels8_x2_3dnow;
2752 c->avg_pixels_tab[1][2] = avg_pixels8_y2_3dnow;
2753
2754 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
2755 c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_3dnow;
2756 c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_3dnow;
2757 c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_3dnow;
2758 c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_3dnow;
2759 c->avg_pixels_tab[0][3] = avg_pixels16_xy2_3dnow;
2760 c->avg_pixels_tab[1][3] = avg_pixels8_xy2_3dnow;
2761 }
2762
2763 if (CONFIG_VP3_DECODER
2764 && (avctx->codec_id == CODEC_ID_VP3 || avctx->codec_id == CODEC_ID_THEORA)) {
2765 c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_exact_3dnow;
2766 c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_exact_3dnow;
2767 }
2768
2769 SET_QPEL_FUNCS(put_qpel, 0, 16, 3dnow);
2770 SET_QPEL_FUNCS(put_qpel, 1, 8, 3dnow);
2771 SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, 3dnow);
2772 SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, 3dnow);
2773 SET_QPEL_FUNCS(avg_qpel, 0, 16, 3dnow);
2774 SET_QPEL_FUNCS(avg_qpel, 1, 8, 3dnow);
2775
2776 SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 3dnow);
2777 SET_QPEL_FUNCS(put_h264_qpel, 1, 8, 3dnow);
2778 SET_QPEL_FUNCS(put_h264_qpel, 2, 4, 3dnow);
2779 SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 3dnow);
2780 SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, 3dnow);
2781 SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, 3dnow);
2782
2783 SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, 3dnow);
2784 SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, 3dnow);
2785 SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, 3dnow);
2786 SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, 3dnow);
2787
2788 c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_3dnow_rnd;
2789 c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_3dnow;
2790
2791 c->avg_rv40_chroma_pixels_tab[0]= avg_rv40_chroma_mc8_3dnow;
2792 c->avg_rv40_chroma_pixels_tab[1]= avg_rv40_chroma_mc4_3dnow;
2793
2794 if (CONFIG_CAVS_DECODER)
2795 ff_cavsdsp_init_3dnow(c, avctx);
2796 }
2797
2798
2799 #define H264_QPEL_FUNCS(x, y, CPU)\
2800 c->put_h264_qpel_pixels_tab[0][x+y*4] = put_h264_qpel16_mc##x##y##_##CPU;\
2801 c->put_h264_qpel_pixels_tab[1][x+y*4] = put_h264_qpel8_mc##x##y##_##CPU;\
2802 c->avg_h264_qpel_pixels_tab[0][x+y*4] = avg_h264_qpel16_mc##x##y##_##CPU;\
2803 c->avg_h264_qpel_pixels_tab[1][x+y*4] = avg_h264_qpel8_mc##x##y##_##CPU;
2804 if((mm_flags & FF_MM_SSE2) && !(mm_flags & FF_MM_3DNOW)){
2805 // these functions are slower than mmx on AMD, but faster on Intel
2806 c->put_pixels_tab[0][0] = put_pixels16_sse2;
2807 c->avg_pixels_tab[0][0] = avg_pixels16_sse2;
2808 H264_QPEL_FUNCS(0, 0, sse2);
2809 }
2810 if(mm_flags & FF_MM_SSE2){
2811 H264_QPEL_FUNCS(0, 1, sse2);
2812 H264_QPEL_FUNCS(0, 2, sse2);
2813 H264_QPEL_FUNCS(0, 3, sse2);
2814 H264_QPEL_FUNCS(1, 1, sse2);
2815 H264_QPEL_FUNCS(1, 2, sse2);
2816 H264_QPEL_FUNCS(1, 3, sse2);
2817 H264_QPEL_FUNCS(2, 1, sse2);
2818 H264_QPEL_FUNCS(2, 2, sse2);
2819 H264_QPEL_FUNCS(2, 3, sse2);
2820 H264_QPEL_FUNCS(3, 1, sse2);
2821 H264_QPEL_FUNCS(3, 2, sse2);
2822 H264_QPEL_FUNCS(3, 3, sse2);
2823
2824 if (CONFIG_VP6_DECODER) {
2825 c->vp6_filter_diag4 = ff_vp6_filter_diag4_sse2;
2826 }
2827 }
2828 #if HAVE_SSSE3
2829 if(mm_flags & FF_MM_SSSE3){
2830 H264_QPEL_FUNCS(1, 0, ssse3);
2831 H264_QPEL_FUNCS(1, 1, ssse3);
2832 H264_QPEL_FUNCS(1, 2, ssse3);
2833 H264_QPEL_FUNCS(1, 3, ssse3);
2834 H264_QPEL_FUNCS(2, 0, ssse3);
2835 H264_QPEL_FUNCS(2, 1, ssse3);
2836 H264_QPEL_FUNCS(2, 2, ssse3);
2837 H264_QPEL_FUNCS(2, 3, ssse3);
2838 H264_QPEL_FUNCS(3, 0, ssse3);
2839 H264_QPEL_FUNCS(3, 1, ssse3);
2840 H264_QPEL_FUNCS(3, 2, ssse3);
2841 H264_QPEL_FUNCS(3, 3, ssse3);
2842 c->put_no_rnd_vc1_chroma_pixels_tab[0]= put_vc1_chroma_mc8_ssse3_nornd;
2843 c->avg_no_rnd_vc1_chroma_pixels_tab[0]= avg_vc1_chroma_mc8_ssse3_nornd;
2844 c->put_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_ssse3_rnd;
2845 c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_ssse3_rnd;
2846 c->put_h264_chroma_pixels_tab[1]= put_h264_chroma_mc4_ssse3;
2847 c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_ssse3;
2848 c->add_png_paeth_prediction= add_png_paeth_prediction_ssse3;
2849 #if HAVE_YASM
2850 c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_ssse3;
2851 if (mm_flags & FF_MM_SSE4) // not really sse4, just slow on Conroe
2852 c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_sse4;
2853 #endif
2854 }
2855 #endif
2856
2857 if(mm_flags & FF_MM_3DNOW){
2858 c->vorbis_inverse_coupling = vorbis_inverse_coupling_3dnow;
2859 c->vector_fmul = vector_fmul_3dnow;
2860 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
2861 c->float_to_int16 = float_to_int16_3dnow;
2862 c->float_to_int16_interleave = float_to_int16_interleave_3dnow;