x86: fdct: Initialize optimized fdct implementations in the standard way
[libav.git] / libavcodec / x86 / dsputilenc_mmx.c
1 /*
2 * MMX optimized DSP utils
3 * Copyright (c) 2000, 2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5 *
6 * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
7 *
8 * This file is part of Libav.
9 *
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
14 *
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25 #include "libavutil/attributes.h"
26 #include "libavutil/cpu.h"
27 #include "libavutil/x86/asm.h"
28 #include "libavutil/x86/cpu.h"
29 #include "libavcodec/dct.h"
30 #include "libavcodec/dsputil.h"
31 #include "libavcodec/mpegvideo.h"
32 #include "libavcodec/mathops.h"
33 #include "dsputil_x86.h"
34
35 void ff_get_pixels_mmx(int16_t *block, const uint8_t *pixels, int line_size);
36 void ff_get_pixels_sse2(int16_t *block, const uint8_t *pixels, int line_size);
37 void ff_diff_pixels_mmx(int16_t *block, const uint8_t *s1, const uint8_t *s2, int stride);
38 int ff_pix_sum16_mmx(uint8_t * pix, int line_size);
39 int ff_pix_norm1_mmx(uint8_t *pix, int line_size);
40
41 #if HAVE_INLINE_ASM
42
43 static int sse8_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
44 int tmp;
45 __asm__ volatile (
46 "movl %4,%%ecx\n"
47 "shr $1,%%ecx\n"
48 "pxor %%mm0,%%mm0\n" /* mm0 = 0 */
49 "pxor %%mm7,%%mm7\n" /* mm7 holds the sum */
50 "1:\n"
51 "movq (%0),%%mm1\n" /* mm1 = pix1[0][0-7] */
52 "movq (%1),%%mm2\n" /* mm2 = pix2[0][0-7] */
53 "movq (%0,%3),%%mm3\n" /* mm3 = pix1[1][0-7] */
54 "movq (%1,%3),%%mm4\n" /* mm4 = pix2[1][0-7] */
55
56 /* todo: mm1-mm2, mm3-mm4 */
57 /* algo: subtract mm1 from mm2 with saturation and vice versa */
58 /* OR the results to get absolute difference */
59 "movq %%mm1,%%mm5\n"
60 "movq %%mm3,%%mm6\n"
61 "psubusb %%mm2,%%mm1\n"
62 "psubusb %%mm4,%%mm3\n"
63 "psubusb %%mm5,%%mm2\n"
64 "psubusb %%mm6,%%mm4\n"
65
66 "por %%mm1,%%mm2\n"
67 "por %%mm3,%%mm4\n"
68
69 /* now convert to 16-bit vectors so we can square them */
70 "movq %%mm2,%%mm1\n"
71 "movq %%mm4,%%mm3\n"
72
73 "punpckhbw %%mm0,%%mm2\n"
74 "punpckhbw %%mm0,%%mm4\n"
75 "punpcklbw %%mm0,%%mm1\n" /* mm1 now spread over (mm1,mm2) */
76 "punpcklbw %%mm0,%%mm3\n" /* mm4 now spread over (mm3,mm4) */
77
78 "pmaddwd %%mm2,%%mm2\n"
79 "pmaddwd %%mm4,%%mm4\n"
80 "pmaddwd %%mm1,%%mm1\n"
81 "pmaddwd %%mm3,%%mm3\n"
82
83 "lea (%0,%3,2), %0\n" /* pix1 += 2*line_size */
84 "lea (%1,%3,2), %1\n" /* pix2 += 2*line_size */
85
86 "paddd %%mm2,%%mm1\n"
87 "paddd %%mm4,%%mm3\n"
88 "paddd %%mm1,%%mm7\n"
89 "paddd %%mm3,%%mm7\n"
90
91 "decl %%ecx\n"
92 "jnz 1b\n"
93
94 "movq %%mm7,%%mm1\n"
95 "psrlq $32, %%mm7\n" /* shift hi dword to lo */
96 "paddd %%mm7,%%mm1\n"
97 "movd %%mm1,%2\n"
98 : "+r" (pix1), "+r" (pix2), "=r"(tmp)
99 : "r" ((x86_reg)line_size) , "m" (h)
100 : "%ecx");
101 return tmp;
102 }
103
104 static int sse16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
105 int tmp;
106 __asm__ volatile (
107 "movl %4,%%ecx\n"
108 "pxor %%mm0,%%mm0\n" /* mm0 = 0 */
109 "pxor %%mm7,%%mm7\n" /* mm7 holds the sum */
110 "1:\n"
111 "movq (%0),%%mm1\n" /* mm1 = pix1[0-7] */
112 "movq (%1),%%mm2\n" /* mm2 = pix2[0-7] */
113 "movq 8(%0),%%mm3\n" /* mm3 = pix1[8-15] */
114 "movq 8(%1),%%mm4\n" /* mm4 = pix2[8-15] */
115
116 /* todo: mm1-mm2, mm3-mm4 */
117 /* algo: subtract mm1 from mm2 with saturation and vice versa */
118 /* OR the results to get absolute difference */
119 "movq %%mm1,%%mm5\n"
120 "movq %%mm3,%%mm6\n"
121 "psubusb %%mm2,%%mm1\n"
122 "psubusb %%mm4,%%mm3\n"
123 "psubusb %%mm5,%%mm2\n"
124 "psubusb %%mm6,%%mm4\n"
125
126 "por %%mm1,%%mm2\n"
127 "por %%mm3,%%mm4\n"
128
129 /* now convert to 16-bit vectors so we can square them */
130 "movq %%mm2,%%mm1\n"
131 "movq %%mm4,%%mm3\n"
132
133 "punpckhbw %%mm0,%%mm2\n"
134 "punpckhbw %%mm0,%%mm4\n"
135 "punpcklbw %%mm0,%%mm1\n" /* mm1 now spread over (mm1,mm2) */
136 "punpcklbw %%mm0,%%mm3\n" /* mm4 now spread over (mm3,mm4) */
137
138 "pmaddwd %%mm2,%%mm2\n"
139 "pmaddwd %%mm4,%%mm4\n"
140 "pmaddwd %%mm1,%%mm1\n"
141 "pmaddwd %%mm3,%%mm3\n"
142
143 "add %3,%0\n"
144 "add %3,%1\n"
145
146 "paddd %%mm2,%%mm1\n"
147 "paddd %%mm4,%%mm3\n"
148 "paddd %%mm1,%%mm7\n"
149 "paddd %%mm3,%%mm7\n"
150
151 "decl %%ecx\n"
152 "jnz 1b\n"
153
154 "movq %%mm7,%%mm1\n"
155 "psrlq $32, %%mm7\n" /* shift hi dword to lo */
156 "paddd %%mm7,%%mm1\n"
157 "movd %%mm1,%2\n"
158 : "+r" (pix1), "+r" (pix2), "=r"(tmp)
159 : "r" ((x86_reg)line_size) , "m" (h)
160 : "%ecx");
161 return tmp;
162 }
163
164 static int hf_noise8_mmx(uint8_t * pix1, int line_size, int h) {
165 int tmp;
166 __asm__ volatile (
167 "movl %3,%%ecx\n"
168 "pxor %%mm7,%%mm7\n"
169 "pxor %%mm6,%%mm6\n"
170
171 "movq (%0),%%mm0\n"
172 "movq %%mm0, %%mm1\n"
173 "psllq $8, %%mm0\n"
174 "psrlq $8, %%mm1\n"
175 "psrlq $8, %%mm0\n"
176 "movq %%mm0, %%mm2\n"
177 "movq %%mm1, %%mm3\n"
178 "punpcklbw %%mm7,%%mm0\n"
179 "punpcklbw %%mm7,%%mm1\n"
180 "punpckhbw %%mm7,%%mm2\n"
181 "punpckhbw %%mm7,%%mm3\n"
182 "psubw %%mm1, %%mm0\n"
183 "psubw %%mm3, %%mm2\n"
184
185 "add %2,%0\n"
186
187 "movq (%0),%%mm4\n"
188 "movq %%mm4, %%mm1\n"
189 "psllq $8, %%mm4\n"
190 "psrlq $8, %%mm1\n"
191 "psrlq $8, %%mm4\n"
192 "movq %%mm4, %%mm5\n"
193 "movq %%mm1, %%mm3\n"
194 "punpcklbw %%mm7,%%mm4\n"
195 "punpcklbw %%mm7,%%mm1\n"
196 "punpckhbw %%mm7,%%mm5\n"
197 "punpckhbw %%mm7,%%mm3\n"
198 "psubw %%mm1, %%mm4\n"
199 "psubw %%mm3, %%mm5\n"
200 "psubw %%mm4, %%mm0\n"
201 "psubw %%mm5, %%mm2\n"
202 "pxor %%mm3, %%mm3\n"
203 "pxor %%mm1, %%mm1\n"
204 "pcmpgtw %%mm0, %%mm3\n\t"
205 "pcmpgtw %%mm2, %%mm1\n\t"
206 "pxor %%mm3, %%mm0\n"
207 "pxor %%mm1, %%mm2\n"
208 "psubw %%mm3, %%mm0\n"
209 "psubw %%mm1, %%mm2\n"
210 "paddw %%mm0, %%mm2\n"
211 "paddw %%mm2, %%mm6\n"
212
213 "add %2,%0\n"
214 "1:\n"
215
216 "movq (%0),%%mm0\n"
217 "movq %%mm0, %%mm1\n"
218 "psllq $8, %%mm0\n"
219 "psrlq $8, %%mm1\n"
220 "psrlq $8, %%mm0\n"
221 "movq %%mm0, %%mm2\n"
222 "movq %%mm1, %%mm3\n"
223 "punpcklbw %%mm7,%%mm0\n"
224 "punpcklbw %%mm7,%%mm1\n"
225 "punpckhbw %%mm7,%%mm2\n"
226 "punpckhbw %%mm7,%%mm3\n"
227 "psubw %%mm1, %%mm0\n"
228 "psubw %%mm3, %%mm2\n"
229 "psubw %%mm0, %%mm4\n"
230 "psubw %%mm2, %%mm5\n"
231 "pxor %%mm3, %%mm3\n"
232 "pxor %%mm1, %%mm1\n"
233 "pcmpgtw %%mm4, %%mm3\n\t"
234 "pcmpgtw %%mm5, %%mm1\n\t"
235 "pxor %%mm3, %%mm4\n"
236 "pxor %%mm1, %%mm5\n"
237 "psubw %%mm3, %%mm4\n"
238 "psubw %%mm1, %%mm5\n"
239 "paddw %%mm4, %%mm5\n"
240 "paddw %%mm5, %%mm6\n"
241
242 "add %2,%0\n"
243
244 "movq (%0),%%mm4\n"
245 "movq %%mm4, %%mm1\n"
246 "psllq $8, %%mm4\n"
247 "psrlq $8, %%mm1\n"
248 "psrlq $8, %%mm4\n"
249 "movq %%mm4, %%mm5\n"
250 "movq %%mm1, %%mm3\n"
251 "punpcklbw %%mm7,%%mm4\n"
252 "punpcklbw %%mm7,%%mm1\n"
253 "punpckhbw %%mm7,%%mm5\n"
254 "punpckhbw %%mm7,%%mm3\n"
255 "psubw %%mm1, %%mm4\n"
256 "psubw %%mm3, %%mm5\n"
257 "psubw %%mm4, %%mm0\n"
258 "psubw %%mm5, %%mm2\n"
259 "pxor %%mm3, %%mm3\n"
260 "pxor %%mm1, %%mm1\n"
261 "pcmpgtw %%mm0, %%mm3\n\t"
262 "pcmpgtw %%mm2, %%mm1\n\t"
263 "pxor %%mm3, %%mm0\n"
264 "pxor %%mm1, %%mm2\n"
265 "psubw %%mm3, %%mm0\n"
266 "psubw %%mm1, %%mm2\n"
267 "paddw %%mm0, %%mm2\n"
268 "paddw %%mm2, %%mm6\n"
269
270 "add %2,%0\n"
271 "subl $2, %%ecx\n"
272 " jnz 1b\n"
273
274 "movq %%mm6, %%mm0\n"
275 "punpcklwd %%mm7,%%mm0\n"
276 "punpckhwd %%mm7,%%mm6\n"
277 "paddd %%mm0, %%mm6\n"
278
279 "movq %%mm6,%%mm0\n"
280 "psrlq $32, %%mm6\n"
281 "paddd %%mm6,%%mm0\n"
282 "movd %%mm0,%1\n"
283 : "+r" (pix1), "=r"(tmp)
284 : "r" ((x86_reg)line_size) , "g" (h-2)
285 : "%ecx");
286 return tmp;
287 }
288
289 static int hf_noise16_mmx(uint8_t * pix1, int line_size, int h) {
290 int tmp;
291 uint8_t * pix= pix1;
292 __asm__ volatile (
293 "movl %3,%%ecx\n"
294 "pxor %%mm7,%%mm7\n"
295 "pxor %%mm6,%%mm6\n"
296
297 "movq (%0),%%mm0\n"
298 "movq 1(%0),%%mm1\n"
299 "movq %%mm0, %%mm2\n"
300 "movq %%mm1, %%mm3\n"
301 "punpcklbw %%mm7,%%mm0\n"
302 "punpcklbw %%mm7,%%mm1\n"
303 "punpckhbw %%mm7,%%mm2\n"
304 "punpckhbw %%mm7,%%mm3\n"
305 "psubw %%mm1, %%mm0\n"
306 "psubw %%mm3, %%mm2\n"
307
308 "add %2,%0\n"
309
310 "movq (%0),%%mm4\n"
311 "movq 1(%0),%%mm1\n"
312 "movq %%mm4, %%mm5\n"
313 "movq %%mm1, %%mm3\n"
314 "punpcklbw %%mm7,%%mm4\n"
315 "punpcklbw %%mm7,%%mm1\n"
316 "punpckhbw %%mm7,%%mm5\n"
317 "punpckhbw %%mm7,%%mm3\n"
318 "psubw %%mm1, %%mm4\n"
319 "psubw %%mm3, %%mm5\n"
320 "psubw %%mm4, %%mm0\n"
321 "psubw %%mm5, %%mm2\n"
322 "pxor %%mm3, %%mm3\n"
323 "pxor %%mm1, %%mm1\n"
324 "pcmpgtw %%mm0, %%mm3\n\t"
325 "pcmpgtw %%mm2, %%mm1\n\t"
326 "pxor %%mm3, %%mm0\n"
327 "pxor %%mm1, %%mm2\n"
328 "psubw %%mm3, %%mm0\n"
329 "psubw %%mm1, %%mm2\n"
330 "paddw %%mm0, %%mm2\n"
331 "paddw %%mm2, %%mm6\n"
332
333 "add %2,%0\n"
334 "1:\n"
335
336 "movq (%0),%%mm0\n"
337 "movq 1(%0),%%mm1\n"
338 "movq %%mm0, %%mm2\n"
339 "movq %%mm1, %%mm3\n"
340 "punpcklbw %%mm7,%%mm0\n"
341 "punpcklbw %%mm7,%%mm1\n"
342 "punpckhbw %%mm7,%%mm2\n"
343 "punpckhbw %%mm7,%%mm3\n"
344 "psubw %%mm1, %%mm0\n"
345 "psubw %%mm3, %%mm2\n"
346 "psubw %%mm0, %%mm4\n"
347 "psubw %%mm2, %%mm5\n"
348 "pxor %%mm3, %%mm3\n"
349 "pxor %%mm1, %%mm1\n"
350 "pcmpgtw %%mm4, %%mm3\n\t"
351 "pcmpgtw %%mm5, %%mm1\n\t"
352 "pxor %%mm3, %%mm4\n"
353 "pxor %%mm1, %%mm5\n"
354 "psubw %%mm3, %%mm4\n"
355 "psubw %%mm1, %%mm5\n"
356 "paddw %%mm4, %%mm5\n"
357 "paddw %%mm5, %%mm6\n"
358
359 "add %2,%0\n"
360
361 "movq (%0),%%mm4\n"
362 "movq 1(%0),%%mm1\n"
363 "movq %%mm4, %%mm5\n"
364 "movq %%mm1, %%mm3\n"
365 "punpcklbw %%mm7,%%mm4\n"
366 "punpcklbw %%mm7,%%mm1\n"
367 "punpckhbw %%mm7,%%mm5\n"
368 "punpckhbw %%mm7,%%mm3\n"
369 "psubw %%mm1, %%mm4\n"
370 "psubw %%mm3, %%mm5\n"
371 "psubw %%mm4, %%mm0\n"
372 "psubw %%mm5, %%mm2\n"
373 "pxor %%mm3, %%mm3\n"
374 "pxor %%mm1, %%mm1\n"
375 "pcmpgtw %%mm0, %%mm3\n\t"
376 "pcmpgtw %%mm2, %%mm1\n\t"
377 "pxor %%mm3, %%mm0\n"
378 "pxor %%mm1, %%mm2\n"
379 "psubw %%mm3, %%mm0\n"
380 "psubw %%mm1, %%mm2\n"
381 "paddw %%mm0, %%mm2\n"
382 "paddw %%mm2, %%mm6\n"
383
384 "add %2,%0\n"
385 "subl $2, %%ecx\n"
386 " jnz 1b\n"
387
388 "movq %%mm6, %%mm0\n"
389 "punpcklwd %%mm7,%%mm0\n"
390 "punpckhwd %%mm7,%%mm6\n"
391 "paddd %%mm0, %%mm6\n"
392
393 "movq %%mm6,%%mm0\n"
394 "psrlq $32, %%mm6\n"
395 "paddd %%mm6,%%mm0\n"
396 "movd %%mm0,%1\n"
397 : "+r" (pix1), "=r"(tmp)
398 : "r" ((x86_reg)line_size) , "g" (h-2)
399 : "%ecx");
400 return tmp + hf_noise8_mmx(pix+8, line_size, h);
401 }
402
403 static int nsse16_mmx(void *p, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
404 MpegEncContext *c = p;
405 int score1, score2;
406
407 if(c) score1 = c->dsp.sse[0](c, pix1, pix2, line_size, h);
408 else score1 = sse16_mmx(c, pix1, pix2, line_size, h);
409 score2= hf_noise16_mmx(pix1, line_size, h) - hf_noise16_mmx(pix2, line_size, h);
410
411 if(c) return score1 + FFABS(score2)*c->avctx->nsse_weight;
412 else return score1 + FFABS(score2)*8;
413 }
414
415 static int nsse8_mmx(void *p, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
416 MpegEncContext *c = p;
417 int score1= sse8_mmx(c, pix1, pix2, line_size, h);
418 int score2= hf_noise8_mmx(pix1, line_size, h) - hf_noise8_mmx(pix2, line_size, h);
419
420 if(c) return score1 + FFABS(score2)*c->avctx->nsse_weight;
421 else return score1 + FFABS(score2)*8;
422 }
423
424 static int vsad_intra16_mmx(void *v, uint8_t * pix, uint8_t * dummy, int line_size, int h) {
425 int tmp;
426
427 assert( (((int)pix) & 7) == 0);
428 assert((line_size &7) ==0);
429
430 #define SUM(in0, in1, out0, out1) \
431 "movq (%0), %%mm2\n"\
432 "movq 8(%0), %%mm3\n"\
433 "add %2,%0\n"\
434 "movq %%mm2, " #out0 "\n"\
435 "movq %%mm3, " #out1 "\n"\
436 "psubusb " #in0 ", %%mm2\n"\
437 "psubusb " #in1 ", %%mm3\n"\
438 "psubusb " #out0 ", " #in0 "\n"\
439 "psubusb " #out1 ", " #in1 "\n"\
440 "por %%mm2, " #in0 "\n"\
441 "por %%mm3, " #in1 "\n"\
442 "movq " #in0 ", %%mm2\n"\
443 "movq " #in1 ", %%mm3\n"\
444 "punpcklbw %%mm7, " #in0 "\n"\
445 "punpcklbw %%mm7, " #in1 "\n"\
446 "punpckhbw %%mm7, %%mm2\n"\
447 "punpckhbw %%mm7, %%mm3\n"\
448 "paddw " #in1 ", " #in0 "\n"\
449 "paddw %%mm3, %%mm2\n"\
450 "paddw %%mm2, " #in0 "\n"\
451 "paddw " #in0 ", %%mm6\n"
452
453
454 __asm__ volatile (
455 "movl %3,%%ecx\n"
456 "pxor %%mm6,%%mm6\n"
457 "pxor %%mm7,%%mm7\n"
458 "movq (%0),%%mm0\n"
459 "movq 8(%0),%%mm1\n"
460 "add %2,%0\n"
461 "jmp 2f\n"
462 "1:\n"
463
464 SUM(%%mm4, %%mm5, %%mm0, %%mm1)
465 "2:\n"
466 SUM(%%mm0, %%mm1, %%mm4, %%mm5)
467
468 "subl $2, %%ecx\n"
469 "jnz 1b\n"
470
471 "movq %%mm6,%%mm0\n"
472 "psrlq $32, %%mm6\n"
473 "paddw %%mm6,%%mm0\n"
474 "movq %%mm0,%%mm6\n"
475 "psrlq $16, %%mm0\n"
476 "paddw %%mm6,%%mm0\n"
477 "movd %%mm0,%1\n"
478 : "+r" (pix), "=r"(tmp)
479 : "r" ((x86_reg)line_size) , "m" (h)
480 : "%ecx");
481 return tmp & 0xFFFF;
482 }
483 #undef SUM
484
485 static int vsad_intra16_mmxext(void *v, uint8_t *pix, uint8_t *dummy,
486 int line_size, int h)
487 {
488 int tmp;
489
490 assert( (((int)pix) & 7) == 0);
491 assert((line_size &7) ==0);
492
493 #define SUM(in0, in1, out0, out1) \
494 "movq (%0), " #out0 "\n"\
495 "movq 8(%0), " #out1 "\n"\
496 "add %2,%0\n"\
497 "psadbw " #out0 ", " #in0 "\n"\
498 "psadbw " #out1 ", " #in1 "\n"\
499 "paddw " #in1 ", " #in0 "\n"\
500 "paddw " #in0 ", %%mm6\n"
501
502 __asm__ volatile (
503 "movl %3,%%ecx\n"
504 "pxor %%mm6,%%mm6\n"
505 "pxor %%mm7,%%mm7\n"
506 "movq (%0),%%mm0\n"
507 "movq 8(%0),%%mm1\n"
508 "add %2,%0\n"
509 "jmp 2f\n"
510 "1:\n"
511
512 SUM(%%mm4, %%mm5, %%mm0, %%mm1)
513 "2:\n"
514 SUM(%%mm0, %%mm1, %%mm4, %%mm5)
515
516 "subl $2, %%ecx\n"
517 "jnz 1b\n"
518
519 "movd %%mm6,%1\n"
520 : "+r" (pix), "=r"(tmp)
521 : "r" ((x86_reg)line_size) , "m" (h)
522 : "%ecx");
523 return tmp;
524 }
525 #undef SUM
526
527 static int vsad16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
528 int tmp;
529
530 assert( (((int)pix1) & 7) == 0);
531 assert( (((int)pix2) & 7) == 0);
532 assert((line_size &7) ==0);
533
534 #define SUM(in0, in1, out0, out1) \
535 "movq (%0),%%mm2\n"\
536 "movq (%1)," #out0 "\n"\
537 "movq 8(%0),%%mm3\n"\
538 "movq 8(%1)," #out1 "\n"\
539 "add %3,%0\n"\
540 "add %3,%1\n"\
541 "psubb " #out0 ", %%mm2\n"\
542 "psubb " #out1 ", %%mm3\n"\
543 "pxor %%mm7, %%mm2\n"\
544 "pxor %%mm7, %%mm3\n"\
545 "movq %%mm2, " #out0 "\n"\
546 "movq %%mm3, " #out1 "\n"\
547 "psubusb " #in0 ", %%mm2\n"\
548 "psubusb " #in1 ", %%mm3\n"\
549 "psubusb " #out0 ", " #in0 "\n"\
550 "psubusb " #out1 ", " #in1 "\n"\
551 "por %%mm2, " #in0 "\n"\
552 "por %%mm3, " #in1 "\n"\
553 "movq " #in0 ", %%mm2\n"\
554 "movq " #in1 ", %%mm3\n"\
555 "punpcklbw %%mm7, " #in0 "\n"\
556 "punpcklbw %%mm7, " #in1 "\n"\
557 "punpckhbw %%mm7, %%mm2\n"\
558 "punpckhbw %%mm7, %%mm3\n"\
559 "paddw " #in1 ", " #in0 "\n"\
560 "paddw %%mm3, %%mm2\n"\
561 "paddw %%mm2, " #in0 "\n"\
562 "paddw " #in0 ", %%mm6\n"
563
564
565 __asm__ volatile (
566 "movl %4,%%ecx\n"
567 "pxor %%mm6,%%mm6\n"
568 "pcmpeqw %%mm7,%%mm7\n"
569 "psllw $15, %%mm7\n"
570 "packsswb %%mm7, %%mm7\n"
571 "movq (%0),%%mm0\n"
572 "movq (%1),%%mm2\n"
573 "movq 8(%0),%%mm1\n"
574 "movq 8(%1),%%mm3\n"
575 "add %3,%0\n"
576 "add %3,%1\n"
577 "psubb %%mm2, %%mm0\n"
578 "psubb %%mm3, %%mm1\n"
579 "pxor %%mm7, %%mm0\n"
580 "pxor %%mm7, %%mm1\n"
581 "jmp 2f\n"
582 "1:\n"
583
584 SUM(%%mm4, %%mm5, %%mm0, %%mm1)
585 "2:\n"
586 SUM(%%mm0, %%mm1, %%mm4, %%mm5)
587
588 "subl $2, %%ecx\n"
589 "jnz 1b\n"
590
591 "movq %%mm6,%%mm0\n"
592 "psrlq $32, %%mm6\n"
593 "paddw %%mm6,%%mm0\n"
594 "movq %%mm0,%%mm6\n"
595 "psrlq $16, %%mm0\n"
596 "paddw %%mm6,%%mm0\n"
597 "movd %%mm0,%2\n"
598 : "+r" (pix1), "+r" (pix2), "=r"(tmp)
599 : "r" ((x86_reg)line_size) , "m" (h)
600 : "%ecx");
601 return tmp & 0x7FFF;
602 }
603 #undef SUM
604
605 static int vsad16_mmxext(void *v, uint8_t *pix1, uint8_t *pix2,
606 int line_size, int h)
607 {
608 int tmp;
609
610 assert( (((int)pix1) & 7) == 0);
611 assert( (((int)pix2) & 7) == 0);
612 assert((line_size &7) ==0);
613
614 #define SUM(in0, in1, out0, out1) \
615 "movq (%0)," #out0 "\n"\
616 "movq (%1),%%mm2\n"\
617 "movq 8(%0)," #out1 "\n"\
618 "movq 8(%1),%%mm3\n"\
619 "add %3,%0\n"\
620 "add %3,%1\n"\
621 "psubb %%mm2, " #out0 "\n"\
622 "psubb %%mm3, " #out1 "\n"\
623 "pxor %%mm7, " #out0 "\n"\
624 "pxor %%mm7, " #out1 "\n"\
625 "psadbw " #out0 ", " #in0 "\n"\
626 "psadbw " #out1 ", " #in1 "\n"\
627 "paddw " #in1 ", " #in0 "\n"\
628 "paddw " #in0 ", %%mm6\n"
629
630 __asm__ volatile (
631 "movl %4,%%ecx\n"
632 "pxor %%mm6,%%mm6\n"
633 "pcmpeqw %%mm7,%%mm7\n"
634 "psllw $15, %%mm7\n"
635 "packsswb %%mm7, %%mm7\n"
636 "movq (%0),%%mm0\n"
637 "movq (%1),%%mm2\n"
638 "movq 8(%0),%%mm1\n"
639 "movq 8(%1),%%mm3\n"
640 "add %3,%0\n"
641 "add %3,%1\n"
642 "psubb %%mm2, %%mm0\n"
643 "psubb %%mm3, %%mm1\n"
644 "pxor %%mm7, %%mm0\n"
645 "pxor %%mm7, %%mm1\n"
646 "jmp 2f\n"
647 "1:\n"
648
649 SUM(%%mm4, %%mm5, %%mm0, %%mm1)
650 "2:\n"
651 SUM(%%mm0, %%mm1, %%mm4, %%mm5)
652
653 "subl $2, %%ecx\n"
654 "jnz 1b\n"
655
656 "movd %%mm6,%2\n"
657 : "+r" (pix1), "+r" (pix2), "=r"(tmp)
658 : "r" ((x86_reg)line_size) , "m" (h)
659 : "%ecx");
660 return tmp;
661 }
662 #undef SUM
663
664 static void diff_bytes_mmx(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){
665 x86_reg i=0;
666 __asm__ volatile(
667 "1: \n\t"
668 "movq (%2, %0), %%mm0 \n\t"
669 "movq (%1, %0), %%mm1 \n\t"
670 "psubb %%mm0, %%mm1 \n\t"
671 "movq %%mm1, (%3, %0) \n\t"
672 "movq 8(%2, %0), %%mm0 \n\t"
673 "movq 8(%1, %0), %%mm1 \n\t"
674 "psubb %%mm0, %%mm1 \n\t"
675 "movq %%mm1, 8(%3, %0) \n\t"
676 "add $16, %0 \n\t"
677 "cmp %4, %0 \n\t"
678 " jb 1b \n\t"
679 : "+r" (i)
680 : "r"(src1), "r"(src2), "r"(dst), "r"((x86_reg)w-15)
681 );
682 for(; i<w; i++)
683 dst[i+0] = src1[i+0]-src2[i+0];
684 }
685
686 static void sub_hfyu_median_prediction_mmxext(uint8_t *dst, const uint8_t *src1,
687 const uint8_t *src2, int w,
688 int *left, int *left_top)
689 {
690 x86_reg i=0;
691 uint8_t l, lt;
692
693 __asm__ volatile(
694 "movq (%1, %0), %%mm0 \n\t" // LT
695 "psllq $8, %%mm0 \n\t"
696 "1: \n\t"
697 "movq (%1, %0), %%mm1 \n\t" // T
698 "movq -1(%2, %0), %%mm2 \n\t" // L
699 "movq (%2, %0), %%mm3 \n\t" // X
700 "movq %%mm2, %%mm4 \n\t" // L
701 "psubb %%mm0, %%mm2 \n\t"
702 "paddb %%mm1, %%mm2 \n\t" // L + T - LT
703 "movq %%mm4, %%mm5 \n\t" // L
704 "pmaxub %%mm1, %%mm4 \n\t" // max(T, L)
705 "pminub %%mm5, %%mm1 \n\t" // min(T, L)
706 "pminub %%mm2, %%mm4 \n\t"
707 "pmaxub %%mm1, %%mm4 \n\t"
708 "psubb %%mm4, %%mm3 \n\t" // dst - pred
709 "movq %%mm3, (%3, %0) \n\t"
710 "add $8, %0 \n\t"
711 "movq -1(%1, %0), %%mm0 \n\t" // LT
712 "cmp %4, %0 \n\t"
713 " jb 1b \n\t"
714 : "+r" (i)
715 : "r"(src1), "r"(src2), "r"(dst), "r"((x86_reg)w)
716 );
717
718 l= *left;
719 lt= *left_top;
720
721 dst[0]= src2[0] - mid_pred(l, src1[0], (l + src1[0] - lt)&0xFF);
722
723 *left_top= src1[w-1];
724 *left = src2[w-1];
725 }
726
727 #define MMABS_MMX(a,z)\
728 "pxor " #z ", " #z " \n\t"\
729 "pcmpgtw " #a ", " #z " \n\t"\
730 "pxor " #z ", " #a " \n\t"\
731 "psubw " #z ", " #a " \n\t"
732
733 #define MMABS_MMXEXT(a, z) \
734 "pxor " #z ", " #z " \n\t"\
735 "psubw " #a ", " #z " \n\t"\
736 "pmaxsw " #z ", " #a " \n\t"
737
738 #define MMABS_SSSE3(a,z)\
739 "pabsw " #a ", " #a " \n\t"
740
741 #define MMABS_SUM(a,z, sum)\
742 MMABS(a,z)\
743 "paddusw " #a ", " #sum " \n\t"
744
745 /* FIXME: HSUM_* saturates at 64k, while an 8x8 hadamard or dct block can get up to
746 * about 100k on extreme inputs. But that's very unlikely to occur in natural video,
747 * and it's even more unlikely to not have any alternative mvs/modes with lower cost. */
748 #define HSUM_MMX(a, t, dst)\
749 "movq "#a", "#t" \n\t"\
750 "psrlq $32, "#a" \n\t"\
751 "paddusw "#t", "#a" \n\t"\
752 "movq "#a", "#t" \n\t"\
753 "psrlq $16, "#a" \n\t"\
754 "paddusw "#t", "#a" \n\t"\
755 "movd "#a", "#dst" \n\t"\
756
757 #define HSUM_MMXEXT(a, t, dst) \
758 "pshufw $0x0E, "#a", "#t" \n\t"\
759 "paddusw "#t", "#a" \n\t"\
760 "pshufw $0x01, "#a", "#t" \n\t"\
761 "paddusw "#t", "#a" \n\t"\
762 "movd "#a", "#dst" \n\t"\
763
764 #define HSUM_SSE2(a, t, dst)\
765 "movhlps "#a", "#t" \n\t"\
766 "paddusw "#t", "#a" \n\t"\
767 "pshuflw $0x0E, "#a", "#t" \n\t"\
768 "paddusw "#t", "#a" \n\t"\
769 "pshuflw $0x01, "#a", "#t" \n\t"\
770 "paddusw "#t", "#a" \n\t"\
771 "movd "#a", "#dst" \n\t"\
772
773 #define DCT_SAD4(m,mm,o)\
774 "mov"#m" "#o"+ 0(%1), "#mm"2 \n\t"\
775 "mov"#m" "#o"+16(%1), "#mm"3 \n\t"\
776 "mov"#m" "#o"+32(%1), "#mm"4 \n\t"\
777 "mov"#m" "#o"+48(%1), "#mm"5 \n\t"\
778 MMABS_SUM(mm##2, mm##6, mm##0)\
779 MMABS_SUM(mm##3, mm##7, mm##1)\
780 MMABS_SUM(mm##4, mm##6, mm##0)\
781 MMABS_SUM(mm##5, mm##7, mm##1)\
782
783 #define DCT_SAD_MMX\
784 "pxor %%mm0, %%mm0 \n\t"\
785 "pxor %%mm1, %%mm1 \n\t"\
786 DCT_SAD4(q, %%mm, 0)\
787 DCT_SAD4(q, %%mm, 8)\
788 DCT_SAD4(q, %%mm, 64)\
789 DCT_SAD4(q, %%mm, 72)\
790 "paddusw %%mm1, %%mm0 \n\t"\
791 HSUM(%%mm0, %%mm1, %0)
792
793 #define DCT_SAD_SSE2\
794 "pxor %%xmm0, %%xmm0 \n\t"\
795 "pxor %%xmm1, %%xmm1 \n\t"\
796 DCT_SAD4(dqa, %%xmm, 0)\
797 DCT_SAD4(dqa, %%xmm, 64)\
798 "paddusw %%xmm1, %%xmm0 \n\t"\
799 HSUM(%%xmm0, %%xmm1, %0)
800
801 #define DCT_SAD_FUNC(cpu) \
802 static int sum_abs_dctelem_##cpu(int16_t *block){\
803 int sum;\
804 __asm__ volatile(\
805 DCT_SAD\
806 :"=r"(sum)\
807 :"r"(block)\
808 );\
809 return sum&0xFFFF;\
810 }
811
812 #define DCT_SAD DCT_SAD_MMX
813 #define HSUM(a,t,dst) HSUM_MMX(a,t,dst)
814 #define MMABS(a,z) MMABS_MMX(a,z)
815 DCT_SAD_FUNC(mmx)
816 #undef MMABS
817 #undef HSUM
818
819 #define HSUM(a,t,dst) HSUM_MMXEXT(a,t,dst)
820 #define MMABS(a,z) MMABS_MMXEXT(a,z)
821 DCT_SAD_FUNC(mmxext)
822 #undef HSUM
823 #undef DCT_SAD
824
825 #define DCT_SAD DCT_SAD_SSE2
826 #define HSUM(a,t,dst) HSUM_SSE2(a,t,dst)
827 DCT_SAD_FUNC(sse2)
828 #undef MMABS
829
830 #if HAVE_SSSE3_INLINE
831 #define MMABS(a,z) MMABS_SSSE3(a,z)
832 DCT_SAD_FUNC(ssse3)
833 #undef MMABS
834 #endif
835 #undef HSUM
836 #undef DCT_SAD
837
838 static int ssd_int8_vs_int16_mmx(const int8_t *pix1, const int16_t *pix2, int size){
839 int sum;
840 x86_reg i=size;
841 __asm__ volatile(
842 "pxor %%mm4, %%mm4 \n"
843 "1: \n"
844 "sub $8, %0 \n"
845 "movq (%2,%0), %%mm2 \n"
846 "movq (%3,%0,2), %%mm0 \n"
847 "movq 8(%3,%0,2), %%mm1 \n"
848 "punpckhbw %%mm2, %%mm3 \n"
849 "punpcklbw %%mm2, %%mm2 \n"
850 "psraw $8, %%mm3 \n"
851 "psraw $8, %%mm2 \n"
852 "psubw %%mm3, %%mm1 \n"
853 "psubw %%mm2, %%mm0 \n"
854 "pmaddwd %%mm1, %%mm1 \n"
855 "pmaddwd %%mm0, %%mm0 \n"
856 "paddd %%mm1, %%mm4 \n"
857 "paddd %%mm0, %%mm4 \n"
858 "jg 1b \n"
859 "movq %%mm4, %%mm3 \n"
860 "psrlq $32, %%mm3 \n"
861 "paddd %%mm3, %%mm4 \n"
862 "movd %%mm4, %1 \n"
863 :"+r"(i), "=r"(sum)
864 :"r"(pix1), "r"(pix2)
865 );
866 return sum;
867 }
868
869 #define PHADDD(a, t)\
870 "movq "#a", "#t" \n\t"\
871 "psrlq $32, "#a" \n\t"\
872 "paddd "#t", "#a" \n\t"
873 /*
874 pmulhw: dst[0-15]=(src[0-15]*dst[0-15])[16-31]
875 pmulhrw: dst[0-15]=(src[0-15]*dst[0-15] + 0x8000)[16-31]
876 pmulhrsw: dst[0-15]=(src[0-15]*dst[0-15] + 0x4000)[15-30]
877 */
878 #define PMULHRW(x, y, s, o)\
879 "pmulhw " #s ", "#x " \n\t"\
880 "pmulhw " #s ", "#y " \n\t"\
881 "paddw " #o ", "#x " \n\t"\
882 "paddw " #o ", "#y " \n\t"\
883 "psraw $1, "#x " \n\t"\
884 "psraw $1, "#y " \n\t"
885 #define DEF(x) x ## _mmx
886 #define SET_RND MOVQ_WONE
887 #define SCALE_OFFSET 1
888
889 #include "dsputil_qns_template.c"
890
891 #undef DEF
892 #undef SET_RND
893 #undef SCALE_OFFSET
894 #undef PMULHRW
895
896 #define DEF(x) x ## _3dnow
897 #define SET_RND(x)
898 #define SCALE_OFFSET 0
899 #define PMULHRW(x, y, s, o)\
900 "pmulhrw " #s ", "#x " \n\t"\
901 "pmulhrw " #s ", "#y " \n\t"
902
903 #include "dsputil_qns_template.c"
904
905 #undef DEF
906 #undef SET_RND
907 #undef SCALE_OFFSET
908 #undef PMULHRW
909
910 #if HAVE_SSSE3_INLINE
911 #undef PHADDD
912 #define DEF(x) x ## _ssse3
913 #define SET_RND(x)
914 #define SCALE_OFFSET -1
915 #define PHADDD(a, t)\
916 "pshufw $0x0E, "#a", "#t" \n\t"\
917 "paddd "#t", "#a" \n\t" /* faster than phaddd on core2 */
918 #define PMULHRW(x, y, s, o)\
919 "pmulhrsw " #s ", "#x " \n\t"\
920 "pmulhrsw " #s ", "#y " \n\t"
921
922 #include "dsputil_qns_template.c"
923
924 #undef DEF
925 #undef SET_RND
926 #undef SCALE_OFFSET
927 #undef PMULHRW
928 #undef PHADDD
929 #endif /* HAVE_SSSE3_INLINE */
930
931 #endif /* HAVE_INLINE_ASM */
932
933 int ff_sse16_sse2(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h);
934
935 #define hadamard_func(cpu) \
936 int ff_hadamard8_diff_##cpu (void *s, uint8_t *src1, uint8_t *src2, \
937 int stride, int h); \
938 int ff_hadamard8_diff16_##cpu(void *s, uint8_t *src1, uint8_t *src2, \
939 int stride, int h);
940
941 hadamard_func(mmx)
942 hadamard_func(mmxext)
943 hadamard_func(sse2)
944 hadamard_func(ssse3)
945
946 av_cold void ff_dsputilenc_init_mmx(DSPContext *c, AVCodecContext *avctx)
947 {
948 int cpu_flags = av_get_cpu_flags();
949 const int dct_algo = avctx->dct_algo;
950
951 #if HAVE_YASM
952 int bit_depth = avctx->bits_per_raw_sample;
953
954 if (EXTERNAL_MMX(cpu_flags)) {
955 if (bit_depth <= 8)
956 c->get_pixels = ff_get_pixels_mmx;
957 c->diff_pixels = ff_diff_pixels_mmx;
958 c->pix_sum = ff_pix_sum16_mmx;
959
960 c->pix_norm1 = ff_pix_norm1_mmx;
961 }
962 if (EXTERNAL_SSE2(cpu_flags))
963 if (bit_depth <= 8)
964 c->get_pixels = ff_get_pixels_sse2;
965 #endif /* HAVE_YASM */
966
967 #if HAVE_INLINE_ASM
968 if (INLINE_MMX(cpu_flags)) {
969 if (avctx->bits_per_raw_sample <= 8 &&
970 (dct_algo == FF_DCT_AUTO || dct_algo == FF_DCT_MMX))
971 c->fdct = ff_fdct_mmx;
972
973 c->diff_bytes= diff_bytes_mmx;
974 c->sum_abs_dctelem= sum_abs_dctelem_mmx;
975
976 c->sse[0] = sse16_mmx;
977 c->sse[1] = sse8_mmx;
978 c->vsad[4]= vsad_intra16_mmx;
979
980 c->nsse[0] = nsse16_mmx;
981 c->nsse[1] = nsse8_mmx;
982 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
983 c->vsad[0] = vsad16_mmx;
984 }
985
986 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
987 c->try_8x8basis= try_8x8basis_mmx;
988 }
989 c->add_8x8basis= add_8x8basis_mmx;
990
991 c->ssd_int8_vs_int16 = ssd_int8_vs_int16_mmx;
992 }
993
994 if (INLINE_MMXEXT(cpu_flags)) {
995 if (avctx->bits_per_raw_sample <= 8 &&
996 (dct_algo == FF_DCT_AUTO || dct_algo == FF_DCT_MMX))
997 c->fdct = ff_fdct_mmxext;
998
999 c->sum_abs_dctelem = sum_abs_dctelem_mmxext;
1000 c->vsad[4] = vsad_intra16_mmxext;
1001
1002 if (!(avctx->flags & CODEC_FLAG_BITEXACT)){
1003 c->vsad[0] = vsad16_mmxext;
1004 }
1005
1006 c->sub_hfyu_median_prediction = sub_hfyu_median_prediction_mmxext;
1007 }
1008
1009 if (INLINE_SSE2(cpu_flags)) {
1010 if (avctx->bits_per_raw_sample <= 8 &&
1011 (dct_algo == FF_DCT_AUTO || dct_algo == FF_DCT_MMX))
1012 c->fdct = ff_fdct_sse2;
1013
1014 c->sum_abs_dctelem= sum_abs_dctelem_sse2;
1015 }
1016
1017 #if HAVE_SSSE3_INLINE
1018 if (INLINE_SSSE3(cpu_flags)) {
1019 if (!(avctx->flags & CODEC_FLAG_BITEXACT)) {
1020 c->try_8x8basis = try_8x8basis_ssse3;
1021 }
1022 c->add_8x8basis = add_8x8basis_ssse3;
1023 c->sum_abs_dctelem = sum_abs_dctelem_ssse3;
1024 }
1025 #endif
1026
1027 if (INLINE_AMD3DNOW(cpu_flags)) {
1028 if (!(avctx->flags & CODEC_FLAG_BITEXACT)) {
1029 c->try_8x8basis = try_8x8basis_3dnow;
1030 }
1031 c->add_8x8basis = add_8x8basis_3dnow;
1032 }
1033 #endif /* HAVE_INLINE_ASM */
1034
1035 if (EXTERNAL_MMX(cpu_flags)) {
1036 c->hadamard8_diff[0] = ff_hadamard8_diff16_mmx;
1037 c->hadamard8_diff[1] = ff_hadamard8_diff_mmx;
1038 }
1039
1040 if (EXTERNAL_MMXEXT(cpu_flags)) {
1041 c->hadamard8_diff[0] = ff_hadamard8_diff16_mmxext;
1042 c->hadamard8_diff[1] = ff_hadamard8_diff_mmxext;
1043 }
1044
1045 if (EXTERNAL_SSE2(cpu_flags)) {
1046 c->sse[0] = ff_sse16_sse2;
1047
1048 #if HAVE_ALIGNED_STACK
1049 c->hadamard8_diff[0] = ff_hadamard8_diff16_sse2;
1050 c->hadamard8_diff[1] = ff_hadamard8_diff_sse2;
1051 #endif
1052 }
1053
1054 if (EXTERNAL_SSSE3(cpu_flags) && HAVE_ALIGNED_STACK) {
1055 c->hadamard8_diff[0] = ff_hadamard8_diff16_ssse3;
1056 c->hadamard8_diff[1] = ff_hadamard8_diff_ssse3;
1057 }
1058
1059 ff_dsputil_init_pix_mmx(c, avctx);
1060 }