2 * MMX optimized DSP utils
3 * Copyright (c) 2000, 2001 Fabrice Bellard.
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
23 #include "../dsputil.h"
24 #include "../simple_idct.h"
29 extern const uint8_t ff_h263_loop_filter_strength
[32];
31 int mm_flags
; /* multimedia extension flags */
33 /* pixel operations */
34 static const uint64_t mm_bone
__attribute__ ((aligned(8))) = 0x0101010101010101ULL
;
35 static const uint64_t mm_wone
__attribute__ ((aligned(8))) = 0x0001000100010001ULL
;
36 static const uint64_t mm_wtwo
__attribute__ ((aligned(8))) = 0x0002000200020002ULL
;
38 static const uint64_t ff_pw_20
__attribute__ ((aligned(8))) = 0x0014001400140014ULL
;
39 static const uint64_t ff_pw_3
__attribute__ ((aligned(8))) = 0x0003000300030003ULL
;
40 static const uint64_t ff_pw_16
__attribute__ ((aligned(8))) = 0x0010001000100010ULL
;
41 static const uint64_t ff_pw_15
__attribute__ ((aligned(8))) = 0x000F000F000F000FULL
;
43 static const uint64_t ff_pb_FC
__attribute__ ((aligned(8))) = 0xFCFCFCFCFCFCFCFCULL
;
45 #define JUMPALIGN() __asm __volatile (".balign 8"::)
46 #define MOVQ_ZERO(regd) __asm __volatile ("pxor %%" #regd ", %%" #regd ::)
48 #define MOVQ_WONE(regd) \
50 "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
51 "psrlw $15, %%" #regd ::)
53 #define MOVQ_BFE(regd) \
55 "pcmpeqd %%" #regd ", %%" #regd " \n\t"\
56 "paddb %%" #regd ", %%" #regd " \n\t" ::)
59 #define MOVQ_BONE(regd) __asm __volatile ("movq %0, %%" #regd " \n\t" ::"m"(mm_bone))
60 #define MOVQ_WTWO(regd) __asm __volatile ("movq %0, %%" #regd " \n\t" ::"m"(mm_wtwo))
62 // for shared library it's better to use this way for accessing constants
64 #define MOVQ_BONE(regd) \
66 "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
67 "psrlw $15, %%" #regd " \n\t" \
68 "packuswb %%" #regd ", %%" #regd " \n\t" ::)
70 #define MOVQ_WTWO(regd) \
72 "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
73 "psrlw $15, %%" #regd " \n\t" \
74 "psllw $1, %%" #regd " \n\t"::)
78 // using regr as temporary and for the output result
79 // first argument is unmodifed and second is trashed
80 // regfe is supposed to contain 0xfefefefefefefefe
81 #define PAVGB_MMX_NO_RND(rega, regb, regr, regfe) \
82 "movq " #rega ", " #regr " \n\t"\
83 "pand " #regb ", " #regr " \n\t"\
84 "pxor " #rega ", " #regb " \n\t"\
85 "pand " #regfe "," #regb " \n\t"\
86 "psrlq $1, " #regb " \n\t"\
87 "paddb " #regb ", " #regr " \n\t"
89 #define PAVGB_MMX(rega, regb, regr, regfe) \
90 "movq " #rega ", " #regr " \n\t"\
91 "por " #regb ", " #regr " \n\t"\
92 "pxor " #rega ", " #regb " \n\t"\
93 "pand " #regfe "," #regb " \n\t"\
94 "psrlq $1, " #regb " \n\t"\
95 "psubb " #regb ", " #regr " \n\t"
97 // mm6 is supposed to contain 0xfefefefefefefefe
98 #define PAVGBP_MMX_NO_RND(rega, regb, regr, regc, regd, regp) \
99 "movq " #rega ", " #regr " \n\t"\
100 "movq " #regc ", " #regp " \n\t"\
101 "pand " #regb ", " #regr " \n\t"\
102 "pand " #regd ", " #regp " \n\t"\
103 "pxor " #rega ", " #regb " \n\t"\
104 "pxor " #regc ", " #regd " \n\t"\
105 "pand %%mm6, " #regb " \n\t"\
106 "pand %%mm6, " #regd " \n\t"\
107 "psrlq $1, " #regb " \n\t"\
108 "psrlq $1, " #regd " \n\t"\
109 "paddb " #regb ", " #regr " \n\t"\
110 "paddb " #regd ", " #regp " \n\t"
112 #define PAVGBP_MMX(rega, regb, regr, regc, regd, regp) \
113 "movq " #rega ", " #regr " \n\t"\
114 "movq " #regc ", " #regp " \n\t"\
115 "por " #regb ", " #regr " \n\t"\
116 "por " #regd ", " #regp " \n\t"\
117 "pxor " #rega ", " #regb " \n\t"\
118 "pxor " #regc ", " #regd " \n\t"\
119 "pand %%mm6, " #regb " \n\t"\
120 "pand %%mm6, " #regd " \n\t"\
121 "psrlq $1, " #regd " \n\t"\
122 "psrlq $1, " #regb " \n\t"\
123 "psubb " #regb ", " #regr " \n\t"\
124 "psubb " #regd ", " #regp " \n\t"
126 /***********************************/
127 /* MMX no rounding */
128 #define DEF(x, y) x ## _no_rnd_ ## y ##_mmx
129 #define SET_RND MOVQ_WONE
130 #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX_NO_RND(a, b, c, d, e, f)
131 #define PAVGB(a, b, c, e) PAVGB_MMX_NO_RND(a, b, c, e)
133 #include "dsputil_mmx_rnd.h"
139 /***********************************/
142 #define DEF(x, y) x ## _ ## y ##_mmx
143 #define SET_RND MOVQ_WTWO
144 #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX(a, b, c, d, e, f)
145 #define PAVGB(a, b, c, e) PAVGB_MMX(a, b, c, e)
147 #include "dsputil_mmx_rnd.h"
154 /***********************************/
157 #define DEF(x) x ## _3dnow
158 /* for Athlons PAVGUSB is prefered */
159 #define PAVGB "pavgusb"
161 #include "dsputil_mmx_avg.h"
166 /***********************************/
169 #define DEF(x) x ## _mmx2
171 /* Introduced only in MMX2 set */
172 #define PAVGB "pavgb"
174 #include "dsputil_mmx_avg.h"
179 /***********************************/
182 #ifdef CONFIG_ENCODERS
183 static void get_pixels_mmx(DCTELEM
*block
, const uint8_t *pixels
, int line_size
)
186 "movl $-128, %%eax \n\t"
187 "pxor %%mm7, %%mm7 \n\t"
190 "movq (%0), %%mm0 \n\t"
191 "movq (%0, %2), %%mm2 \n\t"
192 "movq %%mm0, %%mm1 \n\t"
193 "movq %%mm2, %%mm3 \n\t"
194 "punpcklbw %%mm7, %%mm0 \n\t"
195 "punpckhbw %%mm7, %%mm1 \n\t"
196 "punpcklbw %%mm7, %%mm2 \n\t"
197 "punpckhbw %%mm7, %%mm3 \n\t"
198 "movq %%mm0, (%1, %%eax)\n\t"
199 "movq %%mm1, 8(%1, %%eax)\n\t"
200 "movq %%mm2, 16(%1, %%eax)\n\t"
201 "movq %%mm3, 24(%1, %%eax)\n\t"
203 "addl $32, %%eax \n\t"
206 : "r" (block
+64), "r" (line_size
), "r" (line_size
*2)
211 static inline void diff_pixels_mmx(DCTELEM
*block
, const uint8_t *s1
, const uint8_t *s2
, int stride
)
214 "pxor %%mm7, %%mm7 \n\t"
215 "movl $-128, %%eax \n\t"
218 "movq (%0), %%mm0 \n\t"
219 "movq (%1), %%mm2 \n\t"
220 "movq %%mm0, %%mm1 \n\t"
221 "movq %%mm2, %%mm3 \n\t"
222 "punpcklbw %%mm7, %%mm0 \n\t"
223 "punpckhbw %%mm7, %%mm1 \n\t"
224 "punpcklbw %%mm7, %%mm2 \n\t"
225 "punpckhbw %%mm7, %%mm3 \n\t"
226 "psubw %%mm2, %%mm0 \n\t"
227 "psubw %%mm3, %%mm1 \n\t"
228 "movq %%mm0, (%2, %%eax)\n\t"
229 "movq %%mm1, 8(%2, %%eax)\n\t"
232 "addl $16, %%eax \n\t"
234 : "+r" (s1
), "+r" (s2
)
235 : "r" (block
+64), "r" (stride
)
239 #endif //CONFIG_ENCODERS
241 void put_pixels_clamped_mmx(const DCTELEM
*block
, uint8_t *pixels
, int line_size
)
246 /* read the pixels */
252 "movq 8%3, %%mm1\n\t"
253 "movq 16%3, %%mm2\n\t"
254 "movq 24%3, %%mm3\n\t"
255 "movq 32%3, %%mm4\n\t"
256 "movq 40%3, %%mm5\n\t"
257 "movq 48%3, %%mm6\n\t"
258 "movq 56%3, %%mm7\n\t"
259 "packuswb %%mm1, %%mm0\n\t"
260 "packuswb %%mm3, %%mm2\n\t"
261 "packuswb %%mm5, %%mm4\n\t"
262 "packuswb %%mm7, %%mm6\n\t"
263 "movq %%mm0, (%0)\n\t"
264 "movq %%mm2, (%0, %1)\n\t"
265 "movq %%mm4, (%0, %1, 2)\n\t"
266 "movq %%mm6, (%0, %2)\n\t"
267 ::"r" (pix
), "r" (line_size
), "r" (line_size
*3), "m"(*p
)
272 // if here would be an exact copy of the code above
273 // compiler would generate some very strange code
276 "movq (%3), %%mm0\n\t"
277 "movq 8(%3), %%mm1\n\t"
278 "movq 16(%3), %%mm2\n\t"
279 "movq 24(%3), %%mm3\n\t"
280 "movq 32(%3), %%mm4\n\t"
281 "movq 40(%3), %%mm5\n\t"
282 "movq 48(%3), %%mm6\n\t"
283 "movq 56(%3), %%mm7\n\t"
284 "packuswb %%mm1, %%mm0\n\t"
285 "packuswb %%mm3, %%mm2\n\t"
286 "packuswb %%mm5, %%mm4\n\t"
287 "packuswb %%mm7, %%mm6\n\t"
288 "movq %%mm0, (%0)\n\t"
289 "movq %%mm2, (%0, %1)\n\t"
290 "movq %%mm4, (%0, %1, 2)\n\t"
291 "movq %%mm6, (%0, %2)\n\t"
292 ::"r" (pix
), "r" (line_size
), "r" (line_size
*3), "r"(p
)
296 void add_pixels_clamped_mmx(const DCTELEM
*block
, uint8_t *pixels
, int line_size
)
302 /* read the pixels */
309 "movq (%2), %%mm0\n\t"
310 "movq 8(%2), %%mm1\n\t"
311 "movq 16(%2), %%mm2\n\t"
312 "movq 24(%2), %%mm3\n\t"
315 "movq %%mm4, %%mm5\n\t"
316 "punpcklbw %%mm7, %%mm4\n\t"
317 "punpckhbw %%mm7, %%mm5\n\t"
318 "paddsw %%mm4, %%mm0\n\t"
319 "paddsw %%mm5, %%mm1\n\t"
320 "movq %%mm6, %%mm5\n\t"
321 "punpcklbw %%mm7, %%mm6\n\t"
322 "punpckhbw %%mm7, %%mm5\n\t"
323 "paddsw %%mm6, %%mm2\n\t"
324 "paddsw %%mm5, %%mm3\n\t"
325 "packuswb %%mm1, %%mm0\n\t"
326 "packuswb %%mm3, %%mm2\n\t"
329 :"+m"(*pix
), "+m"(*(pix
+line_size
))
337 static void put_pixels8_mmx(uint8_t *block
, const uint8_t *pixels
, int line_size
, int h
)
340 "lea (%3, %3), %%eax \n\t"
343 "movq (%1), %%mm0 \n\t"
344 "movq (%1, %3), %%mm1 \n\t"
345 "movq %%mm0, (%2) \n\t"
346 "movq %%mm1, (%2, %3) \n\t"
347 "addl %%eax, %1 \n\t"
348 "addl %%eax, %2 \n\t"
349 "movq (%1), %%mm0 \n\t"
350 "movq (%1, %3), %%mm1 \n\t"
351 "movq %%mm0, (%2) \n\t"
352 "movq %%mm1, (%2, %3) \n\t"
353 "addl %%eax, %1 \n\t"
354 "addl %%eax, %2 \n\t"
357 : "+g"(h
), "+r" (pixels
), "+r" (block
)
363 static void put_pixels16_mmx(uint8_t *block
, const uint8_t *pixels
, int line_size
, int h
)
366 "lea (%3, %3), %%eax \n\t"
369 "movq (%1), %%mm0 \n\t"
370 "movq 8(%1), %%mm4 \n\t"
371 "movq (%1, %3), %%mm1 \n\t"
372 "movq 8(%1, %3), %%mm5 \n\t"
373 "movq %%mm0, (%2) \n\t"
374 "movq %%mm4, 8(%2) \n\t"
375 "movq %%mm1, (%2, %3) \n\t"
376 "movq %%mm5, 8(%2, %3) \n\t"
377 "addl %%eax, %1 \n\t"
378 "addl %%eax, %2 \n\t"
379 "movq (%1), %%mm0 \n\t"
380 "movq 8(%1), %%mm4 \n\t"
381 "movq (%1, %3), %%mm1 \n\t"
382 "movq 8(%1, %3), %%mm5 \n\t"
383 "movq %%mm0, (%2) \n\t"
384 "movq %%mm4, 8(%2) \n\t"
385 "movq %%mm1, (%2, %3) \n\t"
386 "movq %%mm5, 8(%2, %3) \n\t"
387 "addl %%eax, %1 \n\t"
388 "addl %%eax, %2 \n\t"
391 : "+g"(h
), "+r" (pixels
), "+r" (block
)
397 static void clear_blocks_mmx(DCTELEM
*blocks
)
400 "pxor %%mm7, %%mm7 \n\t"
401 "movl $-128*6, %%eax \n\t"
403 "movq %%mm7, (%0, %%eax) \n\t"
404 "movq %%mm7, 8(%0, %%eax) \n\t"
405 "movq %%mm7, 16(%0, %%eax) \n\t"
406 "movq %%mm7, 24(%0, %%eax) \n\t"
407 "addl $32, %%eax \n\t"
409 : : "r" (((int)blocks
)+128*6)
414 #ifdef CONFIG_ENCODERS
415 static int pix_sum16_mmx(uint8_t * pix
, int line_size
){
418 int index
= -line_size
*h
;
421 "pxor %%mm7, %%mm7 \n\t"
422 "pxor %%mm6, %%mm6 \n\t"
424 "movq (%2, %1), %%mm0 \n\t"
425 "movq (%2, %1), %%mm1 \n\t"
426 "movq 8(%2, %1), %%mm2 \n\t"
427 "movq 8(%2, %1), %%mm3 \n\t"
428 "punpcklbw %%mm7, %%mm0 \n\t"
429 "punpckhbw %%mm7, %%mm1 \n\t"
430 "punpcklbw %%mm7, %%mm2 \n\t"
431 "punpckhbw %%mm7, %%mm3 \n\t"
432 "paddw %%mm0, %%mm1 \n\t"
433 "paddw %%mm2, %%mm3 \n\t"
434 "paddw %%mm1, %%mm3 \n\t"
435 "paddw %%mm3, %%mm6 \n\t"
438 "movq %%mm6, %%mm5 \n\t"
439 "psrlq $32, %%mm6 \n\t"
440 "paddw %%mm5, %%mm6 \n\t"
441 "movq %%mm6, %%mm5 \n\t"
442 "psrlq $16, %%mm6 \n\t"
443 "paddw %%mm5, %%mm6 \n\t"
444 "movd %%mm6, %0 \n\t"
445 "andl $0xFFFF, %0 \n\t"
446 : "=&r" (sum
), "+r" (index
)
447 : "r" (pix
- index
), "r" (line_size
)
452 #endif //CONFIG_ENCODERS
454 static void add_bytes_mmx(uint8_t *dst
, uint8_t *src
, int w
){
458 "movq (%1, %0), %%mm0 \n\t"
459 "movq (%2, %0), %%mm1 \n\t"
460 "paddb %%mm0, %%mm1 \n\t"
461 "movq %%mm1, (%2, %0) \n\t"
462 "movq 8(%1, %0), %%mm0 \n\t"
463 "movq 8(%2, %0), %%mm1 \n\t"
464 "paddb %%mm0, %%mm1 \n\t"
465 "movq %%mm1, 8(%2, %0) \n\t"
470 : "r"(src
), "r"(dst
), "r"(w
-15)
473 dst
[i
+0] += src
[i
+0];
476 #define H263_LOOP_FILTER \
477 "pxor %%mm7, %%mm7 \n\t"\
478 "movq %0, %%mm0 \n\t"\
479 "movq %0, %%mm1 \n\t"\
480 "movq %3, %%mm2 \n\t"\
481 "movq %3, %%mm3 \n\t"\
482 "punpcklbw %%mm7, %%mm0 \n\t"\
483 "punpckhbw %%mm7, %%mm1 \n\t"\
484 "punpcklbw %%mm7, %%mm2 \n\t"\
485 "punpckhbw %%mm7, %%mm3 \n\t"\
486 "psubw %%mm2, %%mm0 \n\t"\
487 "psubw %%mm3, %%mm1 \n\t"\
488 "movq %1, %%mm2 \n\t"\
489 "movq %1, %%mm3 \n\t"\
490 "movq %2, %%mm4 \n\t"\
491 "movq %2, %%mm5 \n\t"\
492 "punpcklbw %%mm7, %%mm2 \n\t"\
493 "punpckhbw %%mm7, %%mm3 \n\t"\
494 "punpcklbw %%mm7, %%mm4 \n\t"\
495 "punpckhbw %%mm7, %%mm5 \n\t"\
496 "psubw %%mm2, %%mm4 \n\t"\
497 "psubw %%mm3, %%mm5 \n\t"\
498 "psllw $2, %%mm4 \n\t"\
499 "psllw $2, %%mm5 \n\t"\
500 "paddw %%mm0, %%mm4 \n\t"\
501 "paddw %%mm1, %%mm5 \n\t"\
502 "pxor %%mm6, %%mm6 \n\t"\
503 "pcmpgtw %%mm4, %%mm6 \n\t"\
504 "pcmpgtw %%mm5, %%mm7 \n\t"\
505 "pxor %%mm6, %%mm4 \n\t"\
506 "pxor %%mm7, %%mm5 \n\t"\
507 "psubw %%mm6, %%mm4 \n\t"\
508 "psubw %%mm7, %%mm5 \n\t"\
509 "psrlw $3, %%mm4 \n\t"\
510 "psrlw $3, %%mm5 \n\t"\
511 "packuswb %%mm5, %%mm4 \n\t"\
512 "packsswb %%mm7, %%mm6 \n\t"\
513 "pxor %%mm7, %%mm7 \n\t"\
514 "movd %4, %%mm2 \n\t"\
515 "punpcklbw %%mm2, %%mm2 \n\t"\
516 "punpcklbw %%mm2, %%mm2 \n\t"\
517 "punpcklbw %%mm2, %%mm2 \n\t"\
518 "psubusb %%mm4, %%mm2 \n\t"\
519 "movq %%mm2, %%mm3 \n\t"\
520 "psubusb %%mm4, %%mm3 \n\t"\
521 "psubb %%mm3, %%mm2 \n\t"\
522 "movq %1, %%mm3 \n\t"\
523 "movq %2, %%mm4 \n\t"\
524 "pxor %%mm6, %%mm3 \n\t"\
525 "pxor %%mm6, %%mm4 \n\t"\
526 "paddusb %%mm2, %%mm3 \n\t"\
527 "psubusb %%mm2, %%mm4 \n\t"\
528 "pxor %%mm6, %%mm3 \n\t"\
529 "pxor %%mm6, %%mm4 \n\t"\
530 "paddusb %%mm2, %%mm2 \n\t"\
531 "packsswb %%mm1, %%mm0 \n\t"\
532 "pcmpgtb %%mm0, %%mm7 \n\t"\
533 "pxor %%mm7, %%mm0 \n\t"\
534 "psubb %%mm7, %%mm0 \n\t"\
535 "movq %%mm0, %%mm1 \n\t"\
536 "psubusb %%mm2, %%mm0 \n\t"\
537 "psubb %%mm0, %%mm1 \n\t"\
538 "pand %5, %%mm1 \n\t"\
539 "psrlw $2, %%mm1 \n\t"\
540 "pxor %%mm7, %%mm1 \n\t"\
541 "psubb %%mm7, %%mm1 \n\t"\
542 "movq %0, %%mm5 \n\t"\
543 "movq %3, %%mm6 \n\t"\
544 "psubb %%mm1, %%mm5 \n\t"\
545 "paddb %%mm1, %%mm6 \n\t"
547 static void h263_v_loop_filter_mmx(uint8_t *src
, int stride
, int qscale
){
548 const int strength
= ff_h263_loop_filter_strength
[qscale
];
554 "movq %%mm3, %1 \n\t"
555 "movq %%mm4, %2 \n\t"
556 "movq %%mm5, %0 \n\t"
557 "movq %%mm6, %3 \n\t"
558 : "+m" (*(uint64_t*)(src
- 2*stride
)),
559 "+m" (*(uint64_t*)(src
- 1*stride
)),
560 "+m" (*(uint64_t*)(src
+ 0*stride
)),
561 "+m" (*(uint64_t*)(src
+ 1*stride
))
562 : "g" (2*strength
), "m"(ff_pb_FC
)
566 static inline void transpose4x4(uint8_t *dst
, uint8_t *src
, int dst_stride
, int src_stride
){
567 asm volatile( //FIXME could save 1 instruction if done as 8x4 ...
568 "movd %4, %%mm0 \n\t"
569 "movd %5, %%mm1 \n\t"
570 "movd %6, %%mm2 \n\t"
571 "movd %7, %%mm3 \n\t"
572 "punpcklbw %%mm1, %%mm0 \n\t"
573 "punpcklbw %%mm3, %%mm2 \n\t"
574 "movq %%mm0, %%mm1 \n\t"
575 "punpcklwd %%mm2, %%mm0 \n\t"
576 "punpckhwd %%mm2, %%mm1 \n\t"
577 "movd %%mm0, %0 \n\t"
578 "punpckhdq %%mm0, %%mm0 \n\t"
579 "movd %%mm0, %1 \n\t"
580 "movd %%mm1, %2 \n\t"
581 "punpckhdq %%mm1, %%mm1 \n\t"
582 "movd %%mm1, %3 \n\t"
584 : "=m" (*(uint32_t*)(dst
+ 0*dst_stride
)),
585 "=m" (*(uint32_t*)(dst
+ 1*dst_stride
)),
586 "=m" (*(uint32_t*)(dst
+ 2*dst_stride
)),
587 "=m" (*(uint32_t*)(dst
+ 3*dst_stride
))
588 : "m" (*(uint32_t*)(src
+ 0*src_stride
)),
589 "m" (*(uint32_t*)(src
+ 1*src_stride
)),
590 "m" (*(uint32_t*)(src
+ 2*src_stride
)),
591 "m" (*(uint32_t*)(src
+ 3*src_stride
))
595 static void h263_h_loop_filter_mmx(uint8_t *src
, int stride
, int qscale
){
596 const int strength
= ff_h263_loop_filter_strength
[qscale
];
597 uint64_t temp
[4] __attribute__ ((aligned(8)));
598 uint8_t *btemp
= (uint8_t*)temp
;
602 transpose4x4(btemp
, src
, 8, stride
);
603 transpose4x4(btemp
+4, src
+ 4*stride
, 8, stride
);
605 H263_LOOP_FILTER
// 5 3 4 6
611 : "g" (2*strength
), "m"(ff_pb_FC
)
615 "movq %%mm5, %%mm1 \n\t"
616 "movq %%mm4, %%mm0 \n\t"
617 "punpcklbw %%mm3, %%mm5 \n\t"
618 "punpcklbw %%mm6, %%mm4 \n\t"
619 "punpckhbw %%mm3, %%mm1 \n\t"
620 "punpckhbw %%mm6, %%mm0 \n\t"
621 "movq %%mm5, %%mm3 \n\t"
622 "movq %%mm1, %%mm6 \n\t"
623 "punpcklwd %%mm4, %%mm5 \n\t"
624 "punpcklwd %%mm0, %%mm1 \n\t"
625 "punpckhwd %%mm4, %%mm3 \n\t"
626 "punpckhwd %%mm0, %%mm6 \n\t"
627 "movd %%mm5, %0 \n\t"
628 "punpckhdq %%mm5, %%mm5 \n\t"
629 "movd %%mm5, %1 \n\t"
630 "movd %%mm3, %2 \n\t"
631 "punpckhdq %%mm3, %%mm3 \n\t"
632 "movd %%mm3, %3 \n\t"
633 "movd %%mm1, %4 \n\t"
634 "punpckhdq %%mm1, %%mm1 \n\t"
635 "movd %%mm1, %5 \n\t"
636 "movd %%mm6, %6 \n\t"
637 "punpckhdq %%mm6, %%mm6 \n\t"
638 "movd %%mm6, %7 \n\t"
639 : "=m" (*(uint32_t*)(src
+ 0*stride
)),
640 "=m" (*(uint32_t*)(src
+ 1*stride
)),
641 "=m" (*(uint32_t*)(src
+ 2*stride
)),
642 "=m" (*(uint32_t*)(src
+ 3*stride
)),
643 "=m" (*(uint32_t*)(src
+ 4*stride
)),
644 "=m" (*(uint32_t*)(src
+ 5*stride
)),
645 "=m" (*(uint32_t*)(src
+ 6*stride
)),
646 "=m" (*(uint32_t*)(src
+ 7*stride
))
650 #ifdef CONFIG_ENCODERS
651 static int pix_norm1_mmx(uint8_t *pix
, int line_size
) {
658 "movq (%0),%%mm2\n" /* mm2 = pix[0-7] */
659 "movq 8(%0),%%mm3\n" /* mm3 = pix[8-15] */
661 "movq %%mm2,%%mm1\n" /* mm1 = mm2 = pix[0-7] */
663 "punpckhbw %%mm0,%%mm1\n" /* mm1 = [pix4-7] */
664 "punpcklbw %%mm0,%%mm2\n" /* mm2 = [pix0-3] */
666 "movq %%mm3,%%mm4\n" /* mm4 = mm3 = pix[8-15] */
667 "punpckhbw %%mm0,%%mm3\n" /* mm3 = [pix12-15] */
668 "punpcklbw %%mm0,%%mm4\n" /* mm4 = [pix8-11] */
670 "pmaddwd %%mm1,%%mm1\n" /* mm1 = (pix0^2+pix1^2,pix2^2+pix3^2) */
671 "pmaddwd %%mm2,%%mm2\n" /* mm2 = (pix4^2+pix5^2,pix6^2+pix7^2) */
673 "pmaddwd %%mm3,%%mm3\n"
674 "pmaddwd %%mm4,%%mm4\n"
676 "paddd %%mm1,%%mm2\n" /* mm2 = (pix0^2+pix1^2+pix4^2+pix5^2,
677 pix2^2+pix3^2+pix6^2+pix7^2) */
678 "paddd %%mm3,%%mm4\n"
679 "paddd %%mm2,%%mm7\n"
682 "paddd %%mm4,%%mm7\n"
687 "psrlq $32, %%mm7\n" /* shift hi dword to lo */
688 "paddd %%mm7,%%mm1\n"
690 : "+r" (pix
), "=r"(tmp
) : "r" (line_size
) : "%ecx" );
694 static int sse16_mmx(void *v
, uint8_t * pix1
, uint8_t * pix2
, int line_size
, int h
) {
698 "pxor %%mm0,%%mm0\n" /* mm0 = 0 */
699 "pxor %%mm7,%%mm7\n" /* mm7 holds the sum */
701 "movq (%0),%%mm1\n" /* mm1 = pix1[0-7] */
702 "movq (%1),%%mm2\n" /* mm2 = pix2[0-7] */
703 "movq 8(%0),%%mm3\n" /* mm3 = pix1[8-15] */
704 "movq 8(%1),%%mm4\n" /* mm4 = pix2[8-15] */
706 /* todo: mm1-mm2, mm3-mm4 */
707 /* algo: substract mm1 from mm2 with saturation and vice versa */
708 /* OR the results to get absolute difference */
711 "psubusb %%mm2,%%mm1\n"
712 "psubusb %%mm4,%%mm3\n"
713 "psubusb %%mm5,%%mm2\n"
714 "psubusb %%mm6,%%mm4\n"
719 /* now convert to 16-bit vectors so we can square them */
723 "punpckhbw %%mm0,%%mm2\n"
724 "punpckhbw %%mm0,%%mm4\n"
725 "punpcklbw %%mm0,%%mm1\n" /* mm1 now spread over (mm1,mm2) */
726 "punpcklbw %%mm0,%%mm3\n" /* mm4 now spread over (mm3,mm4) */
728 "pmaddwd %%mm2,%%mm2\n"
729 "pmaddwd %%mm4,%%mm4\n"
730 "pmaddwd %%mm1,%%mm1\n"
731 "pmaddwd %%mm3,%%mm3\n"
736 "paddd %%mm2,%%mm1\n"
737 "paddd %%mm4,%%mm3\n"
738 "paddd %%mm1,%%mm7\n"
739 "paddd %%mm3,%%mm7\n"
745 "psrlq $32, %%mm7\n" /* shift hi dword to lo */
746 "paddd %%mm7,%%mm1\n"
748 : "+r" (pix1
), "+r" (pix2
), "=r"(tmp
)
749 : "r" (line_size
) , "m" (h
)
754 static int vsad_intra16_mmx(void *v
, uint8_t * pix
, uint8_t * dummy
, int line_size
, int h
) {
757 assert( (((int)pix
) & 7) == 0);
758 assert((line_size
&7) ==0);
760 #define SUM(in0, in1, out0, out1) \
761 "movq (%0), %%mm2\n"\
762 "movq 8(%0), %%mm3\n"\
764 "movq %%mm2, " #out0 "\n"\
765 "movq %%mm3, " #out1 "\n"\
766 "psubusb " #in0 ", %%mm2\n"\
767 "psubusb " #in1 ", %%mm3\n"\
768 "psubusb " #out0 ", " #in0 "\n"\
769 "psubusb " #out1 ", " #in1 "\n"\
770 "por %%mm2, " #in0 "\n"\
771 "por %%mm3, " #in1 "\n"\
772 "movq " #in0 ", %%mm2\n"\
773 "movq " #in1 ", %%mm3\n"\
774 "punpcklbw %%mm7, " #in0 "\n"\
775 "punpcklbw %%mm7, " #in1 "\n"\
776 "punpckhbw %%mm7, %%mm2\n"\
777 "punpckhbw %%mm7, %%mm3\n"\
778 "paddw " #in1 ", " #in0 "\n"\
779 "paddw %%mm3, %%mm2\n"\
780 "paddw %%mm2, " #in0 "\n"\
781 "paddw " #in0 ", %%mm6\n"
792 SUM(%%mm0
, %%mm1
, %%mm4
, %%mm5
)
795 SUM(%%mm4
, %%mm5
, %%mm0
, %%mm1
)
797 SUM(%%mm0
, %%mm1
, %%mm4
, %%mm5
)
804 "paddw %%mm6,%%mm0\n"
807 "paddw %%mm6,%%mm0\n"
809 : "+r" (pix
), "=r"(tmp
)
810 : "r" (line_size
) , "m" (h
)
816 static int vsad_intra16_mmx2(void *v
, uint8_t * pix
, uint8_t * dummy
, int line_size
, int h
) {
819 assert( (((int)pix
) & 7) == 0);
820 assert((line_size
&7) ==0);
822 #define SUM(in0, in1, out0, out1) \
823 "movq (%0), " #out0 "\n"\
824 "movq 8(%0), " #out1 "\n"\
826 "psadbw " #out0 ", " #in0 "\n"\
827 "psadbw " #out1 ", " #in1 "\n"\
828 "paddw " #in1 ", " #in0 "\n"\
829 "paddw " #in0 ", %%mm6\n"
839 SUM(%%mm0
, %%mm1
, %%mm4
, %%mm5
)
842 SUM(%%mm4
, %%mm5
, %%mm0
, %%mm1
)
844 SUM(%%mm0
, %%mm1
, %%mm4
, %%mm5
)
850 : "+r" (pix
), "=r"(tmp
)
851 : "r" (line_size
) , "m" (h
)
857 static int vsad16_mmx(void *v
, uint8_t * pix1
, uint8_t * pix2
, int line_size
, int h
) {
860 assert( (((int)pix1
) & 7) == 0);
861 assert( (((int)pix2
) & 7) == 0);
862 assert((line_size
&7) ==0);
864 #define SUM(in0, in1, out0, out1) \
866 "movq (%1)," #out0 "\n"\
867 "movq 8(%0),%%mm3\n"\
868 "movq 8(%1)," #out1 "\n"\
871 "psubb " #out0 ", %%mm2\n"\
872 "psubb " #out1 ", %%mm3\n"\
873 "pxor %%mm7, %%mm2\n"\
874 "pxor %%mm7, %%mm3\n"\
875 "movq %%mm2, " #out0 "\n"\
876 "movq %%mm3, " #out1 "\n"\
877 "psubusb " #in0 ", %%mm2\n"\
878 "psubusb " #in1 ", %%mm3\n"\
879 "psubusb " #out0 ", " #in0 "\n"\
880 "psubusb " #out1 ", " #in1 "\n"\
881 "por %%mm2, " #in0 "\n"\
882 "por %%mm3, " #in1 "\n"\
883 "movq " #in0 ", %%mm2\n"\
884 "movq " #in1 ", %%mm3\n"\
885 "punpcklbw %%mm7, " #in0 "\n"\
886 "punpcklbw %%mm7, " #in1 "\n"\
887 "punpckhbw %%mm7, %%mm2\n"\
888 "punpckhbw %%mm7, %%mm3\n"\
889 "paddw " #in1 ", " #in0 "\n"\
890 "paddw %%mm3, %%mm2\n"\
891 "paddw %%mm2, " #in0 "\n"\
892 "paddw " #in0 ", %%mm6\n"
898 "pcmpeqw %%mm7,%%mm7\n"
900 "packsswb %%mm7, %%mm7\n"
908 "psubb %%mm2, %%mm0\n"
909 "psubb %%mm3, %%mm1\n"
910 "pxor %%mm7, %%mm0\n"
911 "pxor %%mm7, %%mm1\n"
912 SUM(%%mm0
, %%mm1
, %%mm4
, %%mm5
)
915 SUM(%%mm4
, %%mm5
, %%mm0
, %%mm1
)
917 SUM(%%mm0
, %%mm1
, %%mm4
, %%mm5
)
924 "paddw %%mm6,%%mm0\n"
927 "paddw %%mm6,%%mm0\n"
929 : "+r" (pix1
), "+r" (pix2
), "=r"(tmp
)
930 : "r" (line_size
) , "m" (h
)
936 static int vsad16_mmx2(void *v
, uint8_t * pix1
, uint8_t * pix2
, int line_size
, int h
) {
939 assert( (((int)pix1
) & 7) == 0);
940 assert( (((int)pix2
) & 7) == 0);
941 assert((line_size
&7) ==0);
943 #define SUM(in0, in1, out0, out1) \
944 "movq (%0)," #out0 "\n"\
946 "movq 8(%0)," #out1 "\n"\
947 "movq 8(%1),%%mm3\n"\
950 "psubb %%mm2, " #out0 "\n"\
951 "psubb %%mm3, " #out1 "\n"\
952 "pxor %%mm7, " #out0 "\n"\
953 "pxor %%mm7, " #out1 "\n"\
954 "psadbw " #out0 ", " #in0 "\n"\
955 "psadbw " #out1 ", " #in1 "\n"\
956 "paddw " #in1 ", " #in0 "\n"\
957 "paddw " #in0 ", %%mm6\n"
962 "pcmpeqw %%mm7,%%mm7\n"
964 "packsswb %%mm7, %%mm7\n"
972 "psubb %%mm2, %%mm0\n"
973 "psubb %%mm3, %%mm1\n"
974 "pxor %%mm7, %%mm0\n"
975 "pxor %%mm7, %%mm1\n"
976 SUM(%%mm0
, %%mm1
, %%mm4
, %%mm5
)
979 SUM(%%mm4
, %%mm5
, %%mm0
, %%mm1
)
981 SUM(%%mm0
, %%mm1
, %%mm4
, %%mm5
)
987 : "+r" (pix1
), "+r" (pix2
), "=r"(tmp
)
988 : "r" (line_size
) , "m" (h
)
994 static void diff_bytes_mmx(uint8_t *dst
, uint8_t *src1
, uint8_t *src2
, int w
){
998 "movq (%2, %0), %%mm0 \n\t"
999 "movq (%1, %0), %%mm1 \n\t"
1000 "psubb %%mm0, %%mm1 \n\t"
1001 "movq %%mm1, (%3, %0) \n\t"
1002 "movq 8(%2, %0), %%mm0 \n\t"
1003 "movq 8(%1, %0), %%mm1 \n\t"
1004 "psubb %%mm0, %%mm1 \n\t"
1005 "movq %%mm1, 8(%3, %0) \n\t"
1010 : "r"(src1
), "r"(src2
), "r"(dst
), "r"(w
-15)
1013 dst
[i
+0] = src1
[i
+0]-src2
[i
+0];
1016 static void sub_hfyu_median_prediction_mmx2(uint8_t *dst
, uint8_t *src1
, uint8_t *src2
, int w
, int *left
, int *left_top
){
1022 "movq -1(%1, %0), %%mm0 \n\t" // LT
1023 "movq (%1, %0), %%mm1 \n\t" // T
1024 "movq -1(%2, %0), %%mm2 \n\t" // L
1025 "movq (%2, %0), %%mm3 \n\t" // X
1026 "movq %%mm2, %%mm4 \n\t" // L
1027 "psubb %%mm0, %%mm2 \n\t"
1028 "paddb %%mm1, %%mm2 \n\t" // L + T - LT
1029 "movq %%mm4, %%mm5 \n\t" // L
1030 "pmaxub %%mm1, %%mm4 \n\t" // max(T, L)
1031 "pminub %%mm5, %%mm1 \n\t" // min(T, L)
1032 "pminub %%mm2, %%mm4 \n\t"
1033 "pmaxub %%mm1, %%mm4 \n\t"
1034 "psubb %%mm4, %%mm3 \n\t" // dst - pred
1035 "movq %%mm3, (%3, %0) \n\t"
1040 : "r"(src1
), "r"(src2
), "r"(dst
), "r"(w
)
1046 dst
[0]= src2
[0] - mid_pred(l
, src1
[0], (l
+ src1
[0] - lt
)&0xFF);
1048 *left_top
= src1
[w
-1];
1052 #define LBUTTERFLY2(a1,b1,a2,b2)\
1053 "paddw " #b1 ", " #a1 " \n\t"\
1054 "paddw " #b2 ", " #a2 " \n\t"\
1055 "paddw " #b1 ", " #b1 " \n\t"\
1056 "paddw " #b2 ", " #b2 " \n\t"\
1057 "psubw " #a1 ", " #b1 " \n\t"\
1058 "psubw " #a2 ", " #b2 " \n\t"
1061 LBUTTERFLY2(%%mm0, %%mm1, %%mm2, %%mm3)\
1062 LBUTTERFLY2(%%mm4, %%mm5, %%mm6, %%mm7)\
1063 LBUTTERFLY2(%%mm0, %%mm2, %%mm1, %%mm3)\
1064 LBUTTERFLY2(%%mm4, %%mm6, %%mm5, %%mm7)\
1065 LBUTTERFLY2(%%mm0, %%mm4, %%mm1, %%mm5)\
1066 LBUTTERFLY2(%%mm2, %%mm6, %%mm3, %%mm7)\
1069 "pxor " #z ", " #z " \n\t"\
1070 "pcmpgtw " #a ", " #z " \n\t"\
1071 "pxor " #z ", " #a " \n\t"\
1072 "psubw " #z ", " #a " \n\t"
1074 #define MMABS_SUM(a,z, sum)\
1075 "pxor " #z ", " #z " \n\t"\
1076 "pcmpgtw " #a ", " #z " \n\t"\
1077 "pxor " #z ", " #a " \n\t"\
1078 "psubw " #z ", " #a " \n\t"\
1079 "paddusw " #a ", " #sum " \n\t"
1081 #define MMABS_MMX2(a,z)\
1082 "pxor " #z ", " #z " \n\t"\
1083 "psubw " #a ", " #z " \n\t"\
1084 "pmaxsw " #z ", " #a " \n\t"
1086 #define MMABS_SUM_MMX2(a,z, sum)\
1087 "pxor " #z ", " #z " \n\t"\
1088 "psubw " #a ", " #z " \n\t"\
1089 "pmaxsw " #z ", " #a " \n\t"\
1090 "paddusw " #a ", " #sum " \n\t"
1092 #define SBUTTERFLY(a,b,t,n)\
1093 "movq " #a ", " #t " \n\t" /* abcd */\
1094 "punpckl" #n " " #b ", " #a " \n\t" /* aebf */\
1095 "punpckh" #n " " #b ", " #t " \n\t" /* cgdh */\
1097 #define TRANSPOSE4(a,b,c,d,t)\
1098 SBUTTERFLY(a,b,t,wd) /* a=aebf t=cgdh */\
1099 SBUTTERFLY(c,d,b,wd) /* c=imjn b=kolp */\
1100 SBUTTERFLY(a,c,d,dq) /* a=aeim d=bfjn */\
1101 SBUTTERFLY(t,b,c,dq) /* t=cgko c=dhlp */
1103 #define LOAD4(o, a, b, c, d)\
1104 "movq "#o"(%1), " #a " \n\t"\
1105 "movq "#o"+16(%1), " #b " \n\t"\
1106 "movq "#o"+32(%1), " #c " \n\t"\
1107 "movq "#o"+48(%1), " #d " \n\t"
1109 #define STORE4(o, a, b, c, d)\
1110 "movq "#a", "#o"(%1) \n\t"\
1111 "movq "#b", "#o"+16(%1) \n\t"\
1112 "movq "#c", "#o"+32(%1) \n\t"\
1113 "movq "#d", "#o"+48(%1) \n\t"\
1115 static int hadamard8_diff_mmx(void *s, uint8_t *src1, uint8_t *src2, int stride, int h){
1116 uint64_t temp
[16] __align8
;
1121 diff_pixels_mmx((DCTELEM
*)temp
, src1
, src2
, stride
);
1124 LOAD4(0 , %%mm0
, %%mm1
, %%mm2
, %%mm3
)
1125 LOAD4(64, %%mm4
, %%mm5
, %%mm6
, %%mm7
)
1129 "movq %%mm7, 112(%1) \n\t"
1131 TRANSPOSE4(%%mm0
, %%mm1
, %%mm2
, %%mm3
, %%mm7
)
1132 STORE4(0 , %%mm0
, %%mm3
, %%mm7
, %%mm2
)
1134 "movq 112(%1), %%mm7 \n\t"
1135 TRANSPOSE4(%%mm4
, %%mm5
, %%mm6
, %%mm7
, %%mm0
)
1136 STORE4(64, %%mm4
, %%mm7
, %%mm0
, %%mm6
)
1138 LOAD4(8 , %%mm0
, %%mm1
, %%mm2
, %%mm3
)
1139 LOAD4(72, %%mm4
, %%mm5
, %%mm6
, %%mm7
)
1143 "movq %%mm7, 120(%1) \n\t"
1145 TRANSPOSE4(%%mm0
, %%mm1
, %%mm2
, %%mm3
, %%mm7
)
1146 STORE4(8 , %%mm0
, %%mm3
, %%mm7
, %%mm2
)
1148 "movq 120(%1), %%mm7 \n\t"
1149 TRANSPOSE4(%%mm4
, %%mm5
, %%mm6
, %%mm7
, %%mm0
)
1150 "movq %%mm7, %%mm5 \n\t"//FIXME remove
1151 "movq %%mm6, %%mm7 \n\t"
1152 "movq %%mm0, %%mm6 \n\t"
1153 // STORE4(72, %%mm4, %%mm7, %%mm0, %%mm6) //FIXME remove
1155 LOAD4(64, %%mm0
, %%mm1
, %%mm2
, %%mm3
)
1156 // LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7)
1159 "movq %%mm7, 64(%1) \n\t"
1161 MMABS_SUM(%%mm1
, %%mm7
, %%mm0
)
1162 MMABS_SUM(%%mm2
, %%mm7
, %%mm0
)
1163 MMABS_SUM(%%mm3
, %%mm7
, %%mm0
)
1164 MMABS_SUM(%%mm4
, %%mm7
, %%mm0
)
1165 MMABS_SUM(%%mm5
, %%mm7
, %%mm0
)
1166 MMABS_SUM(%%mm6
, %%mm7
, %%mm0
)
1167 "movq 64(%1), %%mm1 \n\t"
1168 MMABS_SUM(%%mm1
, %%mm7
, %%mm0
)
1169 "movq %%mm0, 64(%1) \n\t"
1171 LOAD4(0 , %%mm0
, %%mm1
, %%mm2
, %%mm3
)
1172 LOAD4(8 , %%mm4
, %%mm5
, %%mm6
, %%mm7
)
1175 "movq %%mm7, (%1) \n\t"
1177 MMABS_SUM(%%mm1
, %%mm7
, %%mm0
)
1178 MMABS_SUM(%%mm2
, %%mm7
, %%mm0
)
1179 MMABS_SUM(%%mm3
, %%mm7
, %%mm0
)
1180 MMABS_SUM(%%mm4
, %%mm7
, %%mm0
)
1181 MMABS_SUM(%%mm5
, %%mm7
, %%mm0
)
1182 MMABS_SUM(%%mm6
, %%mm7
, %%mm0
)
1183 "movq (%1), %%mm1 \n\t"
1184 MMABS_SUM(%%mm1
, %%mm7
, %%mm0
)
1185 "movq 64(%1), %%mm1 \n\t"
1186 MMABS_SUM(%%mm1
, %%mm7
, %%mm0
)
1188 "movq %%mm0, %%mm1 \n\t"
1189 "psrlq $32, %%mm0 \n\t"
1190 "paddusw %%mm1, %%mm0 \n\t"
1191 "movq %%mm0, %%mm1 \n\t"
1192 "psrlq $16, %%mm0 \n\t"
1193 "paddusw %%mm1, %%mm0 \n\t"
1194 "movd %%mm0, %0 \n\t"
1202 static int hadamard8_diff_mmx2(void *s
, uint8_t *src1
, uint8_t *src2
, int stride
, int h
){
1203 uint64_t temp
[16] __align8
;
1208 diff_pixels_mmx((DCTELEM
*)temp
, src1
, src2
, stride
);
1211 LOAD4(0 , %%mm0
, %%mm1
, %%mm2
, %%mm3
)
1212 LOAD4(64, %%mm4
, %%mm5
, %%mm6
, %%mm7
)
1216 "movq %%mm7, 112(%1) \n\t"
1218 TRANSPOSE4(%%mm0
, %%mm1
, %%mm2
, %%mm3
, %%mm7
)
1219 STORE4(0 , %%mm0
, %%mm3
, %%mm7
, %%mm2
)
1221 "movq 112(%1), %%mm7 \n\t"
1222 TRANSPOSE4(%%mm4
, %%mm5
, %%mm6
, %%mm7
, %%mm0
)
1223 STORE4(64, %%mm4
, %%mm7
, %%mm0
, %%mm6
)
1225 LOAD4(8 , %%mm0
, %%mm1
, %%mm2
, %%mm3
)
1226 LOAD4(72, %%mm4
, %%mm5
, %%mm6
, %%mm7
)
1230 "movq %%mm7, 120(%1) \n\t"
1232 TRANSPOSE4(%%mm0
, %%mm1
, %%mm2
, %%mm3
, %%mm7
)
1233 STORE4(8 , %%mm0
, %%mm3
, %%mm7
, %%mm2
)
1235 "movq 120(%1), %%mm7 \n\t"
1236 TRANSPOSE4(%%mm4
, %%mm5
, %%mm6
, %%mm7
, %%mm0
)
1237 "movq %%mm7, %%mm5 \n\t"//FIXME remove
1238 "movq %%mm6, %%mm7 \n\t"
1239 "movq %%mm0, %%mm6 \n\t"
1240 // STORE4(72, %%mm4, %%mm7, %%mm0, %%mm6) //FIXME remove
1242 LOAD4(64, %%mm0
, %%mm1
, %%mm2
, %%mm3
)
1243 // LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7)
1246 "movq %%mm7, 64(%1) \n\t"
1247 MMABS_MMX2(%%mm0
, %%mm7
)
1248 MMABS_SUM_MMX2(%%mm1
, %%mm7
, %%mm0
)
1249 MMABS_SUM_MMX2(%%mm2
, %%mm7
, %%mm0
)
1250 MMABS_SUM_MMX2(%%mm3
, %%mm7
, %%mm0
)
1251 MMABS_SUM_MMX2(%%mm4
, %%mm7
, %%mm0
)
1252 MMABS_SUM_MMX2(%%mm5
, %%mm7
, %%mm0
)
1253 MMABS_SUM_MMX2(%%mm6
, %%mm7
, %%mm0
)
1254 "movq 64(%1), %%mm1 \n\t"
1255 MMABS_SUM_MMX2(%%mm1
, %%mm7
, %%mm0
)
1256 "movq %%mm0, 64(%1) \n\t"
1258 LOAD4(0 , %%mm0
, %%mm1
, %%mm2
, %%mm3
)
1259 LOAD4(8 , %%mm4
, %%mm5
, %%mm6
, %%mm7
)
1262 "movq %%mm7, (%1) \n\t"
1263 MMABS_MMX2(%%mm0
, %%mm7
)
1264 MMABS_SUM_MMX2(%%mm1
, %%mm7
, %%mm0
)
1265 MMABS_SUM_MMX2(%%mm2
, %%mm7
, %%mm0
)
1266 MMABS_SUM_MMX2(%%mm3
, %%mm7
, %%mm0
)
1267 MMABS_SUM_MMX2(%%mm4
, %%mm7
, %%mm0
)
1268 MMABS_SUM_MMX2(%%mm5
, %%mm7
, %%mm0
)
1269 MMABS_SUM_MMX2(%%mm6
, %%mm7
, %%mm0
)
1270 "movq (%1), %%mm1 \n\t"
1271 MMABS_SUM_MMX2(%%mm1
, %%mm7
, %%mm0
)
1272 "movq 64(%1), %%mm1 \n\t"
1273 MMABS_SUM_MMX2(%%mm1
, %%mm7
, %%mm0
)
1275 "movq %%mm0, %%mm1 \n\t"
1276 "psrlq $32, %%mm0 \n\t"
1277 "paddusw %%mm1, %%mm0 \n\t"
1278 "movq %%mm0, %%mm1 \n\t"
1279 "psrlq $16, %%mm0 \n\t"
1280 "paddusw %%mm1, %%mm0 \n\t"
1281 "movd %%mm0, %0 \n\t"
1290 WARPER8_16_SQ(hadamard8_diff_mmx
, hadamard8_diff16_mmx
)
1291 WARPER8_16_SQ(hadamard8_diff_mmx2
, hadamard8_diff16_mmx2
)
1292 #endif //CONFIG_ENCODERS
1294 #define put_no_rnd_pixels8_mmx(a,b,c,d) put_pixels8_mmx(a,b,c,d)
1295 #define put_no_rnd_pixels16_mmx(a,b,c,d) put_pixels16_mmx(a,b,c,d)
1297 #define QPEL_V_LOW(m3,m4,m5,m6, pw_20, pw_3, rnd, in0, in1, in2, in7, out, OP)\
1298 "paddw " #m4 ", " #m3 " \n\t" /* x1 */\
1299 "movq "MANGLE(ff_pw_20)", %%mm4 \n\t" /* 20 */\
1300 "pmullw " #m3 ", %%mm4 \n\t" /* 20x1 */\
1301 "movq "#in7", " #m3 " \n\t" /* d */\
1302 "movq "#in0", %%mm5 \n\t" /* D */\
1303 "paddw " #m3 ", %%mm5 \n\t" /* x4 */\
1304 "psubw %%mm5, %%mm4 \n\t" /* 20x1 - x4 */\
1305 "movq "#in1", %%mm5 \n\t" /* C */\
1306 "movq "#in2", %%mm6 \n\t" /* B */\
1307 "paddw " #m6 ", %%mm5 \n\t" /* x3 */\
1308 "paddw " #m5 ", %%mm6 \n\t" /* x2 */\
1309 "paddw %%mm6, %%mm6 \n\t" /* 2x2 */\
1310 "psubw %%mm6, %%mm5 \n\t" /* -2x2 + x3 */\
1311 "pmullw "MANGLE(ff_pw_3)", %%mm5 \n\t" /* -6x2 + 3x3 */\
1312 "paddw " #rnd ", %%mm4 \n\t" /* x2 */\
1313 "paddw %%mm4, %%mm5 \n\t" /* 20x1 - 6x2 + 3x3 - x4 */\
1314 "psraw $5, %%mm5 \n\t"\
1315 "packuswb %%mm5, %%mm5 \n\t"\
1316 OP(%%mm5, out, %%mm7, d)
1318 #define QPEL_BASE(OPNAME, ROUNDER, RND, OP_MMX2, OP_3DNOW)\
1319 static void OPNAME ## mpeg4_qpel16_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1323 "pxor %%mm7, %%mm7 \n\t"\
1325 "movq (%0), %%mm0 \n\t" /* ABCDEFGH */\
1326 "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\
1327 "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\
1328 "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\
1329 "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\
1330 "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\
1331 "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\
1332 "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\
1333 "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\
1334 "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\
1335 "psllq $16, %%mm3 \n\t" /* 00ABCDEF */\
1336 "psllq $24, %%mm4 \n\t" /* 000ABCDE */\
1337 "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\
1338 "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\
1339 "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\
1340 "paddw %%mm3, %%mm5 \n\t" /* b */\
1341 "paddw %%mm2, %%mm6 \n\t" /* c */\
1342 "paddw %%mm5, %%mm5 \n\t" /* 2b */\
1343 "psubw %%mm5, %%mm6 \n\t" /* c - 2b */\
1344 "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\
1345 "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\
1346 "paddw %%mm4, %%mm0 \n\t" /* a */\
1347 "paddw %%mm1, %%mm5 \n\t" /* d */\
1348 "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\
1349 "psubw %%mm5, %%mm0 \n\t" /* 20a - d */\
1350 "paddw %6, %%mm6 \n\t"\
1351 "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
1352 "psraw $5, %%mm0 \n\t"\
1353 "movq %%mm0, %5 \n\t"\
1354 /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
1356 "movq 5(%0), %%mm0 \n\t" /* FGHIJKLM */\
1357 "movq %%mm0, %%mm5 \n\t" /* FGHIJKLM */\
1358 "movq %%mm0, %%mm6 \n\t" /* FGHIJKLM */\
1359 "psrlq $8, %%mm0 \n\t" /* GHIJKLM0 */\
1360 "psrlq $16, %%mm5 \n\t" /* HIJKLM00 */\
1361 "punpcklbw %%mm7, %%mm0 \n\t" /* 0G0H0I0J */\
1362 "punpcklbw %%mm7, %%mm5 \n\t" /* 0H0I0J0K */\
1363 "paddw %%mm0, %%mm2 \n\t" /* b */\
1364 "paddw %%mm5, %%mm3 \n\t" /* c */\
1365 "paddw %%mm2, %%mm2 \n\t" /* 2b */\
1366 "psubw %%mm2, %%mm3 \n\t" /* c - 2b */\
1367 "movq %%mm6, %%mm2 \n\t" /* FGHIJKLM */\
1368 "psrlq $24, %%mm6 \n\t" /* IJKLM000 */\
1369 "punpcklbw %%mm7, %%mm2 \n\t" /* 0F0G0H0I */\
1370 "punpcklbw %%mm7, %%mm6 \n\t" /* 0I0J0K0L */\
1371 "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\
1372 "paddw %%mm2, %%mm1 \n\t" /* a */\
1373 "paddw %%mm6, %%mm4 \n\t" /* d */\
1374 "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\
1375 "psubw %%mm4, %%mm3 \n\t" /* - 6b +3c - d */\
1376 "paddw %6, %%mm1 \n\t"\
1377 "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b +3c - d */\
1378 "psraw $5, %%mm3 \n\t"\
1379 "movq %5, %%mm1 \n\t"\
1380 "packuswb %%mm3, %%mm1 \n\t"\
1381 OP_MMX2(%%mm1, (%1),%%mm4, q)\
1382 /* mm0= GHIJ, mm2=FGHI, mm5=HIJK, mm6=IJKL, mm7=0 */\
1384 "movq 9(%0), %%mm1 \n\t" /* JKLMNOPQ */\
1385 "movq %%mm1, %%mm4 \n\t" /* JKLMNOPQ */\
1386 "movq %%mm1, %%mm3 \n\t" /* JKLMNOPQ */\
1387 "psrlq $8, %%mm1 \n\t" /* KLMNOPQ0 */\
1388 "psrlq $16, %%mm4 \n\t" /* LMNOPQ00 */\
1389 "punpcklbw %%mm7, %%mm1 \n\t" /* 0K0L0M0N */\
1390 "punpcklbw %%mm7, %%mm4 \n\t" /* 0L0M0N0O */\
1391 "paddw %%mm1, %%mm5 \n\t" /* b */\
1392 "paddw %%mm4, %%mm0 \n\t" /* c */\
1393 "paddw %%mm5, %%mm5 \n\t" /* 2b */\
1394 "psubw %%mm5, %%mm0 \n\t" /* c - 2b */\
1395 "movq %%mm3, %%mm5 \n\t" /* JKLMNOPQ */\
1396 "psrlq $24, %%mm3 \n\t" /* MNOPQ000 */\
1397 "pmullw "MANGLE(ff_pw_3)", %%mm0 \n\t" /* 3c - 6b */\
1398 "punpcklbw %%mm7, %%mm3 \n\t" /* 0M0N0O0P */\
1399 "paddw %%mm3, %%mm2 \n\t" /* d */\
1400 "psubw %%mm2, %%mm0 \n\t" /* -6b + 3c - d */\
1401 "movq %%mm5, %%mm2 \n\t" /* JKLMNOPQ */\
1402 "punpcklbw %%mm7, %%mm2 \n\t" /* 0J0K0L0M */\
1403 "punpckhbw %%mm7, %%mm5 \n\t" /* 0N0O0P0Q */\
1404 "paddw %%mm2, %%mm6 \n\t" /* a */\
1405 "pmullw "MANGLE(ff_pw_20)", %%mm6 \n\t" /* 20a */\
1406 "paddw %6, %%mm0 \n\t"\
1407 "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
1408 "psraw $5, %%mm0 \n\t"\
1409 /* mm1=KLMN, mm2=JKLM, mm3=MNOP, mm4=LMNO, mm5=NOPQ mm7=0 */\
1411 "paddw %%mm5, %%mm3 \n\t" /* a */\
1412 "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0O0P0Q0Q */\
1413 "paddw %%mm4, %%mm6 \n\t" /* b */\
1414 "pshufw $0xBE, %%mm5, %%mm4 \n\t" /* 0P0Q0Q0P */\
1415 "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0Q0Q0P0O */\
1416 "paddw %%mm1, %%mm4 \n\t" /* c */\
1417 "paddw %%mm2, %%mm5 \n\t" /* d */\
1418 "paddw %%mm6, %%mm6 \n\t" /* 2b */\
1419 "psubw %%mm6, %%mm4 \n\t" /* c - 2b */\
1420 "pmullw "MANGLE(ff_pw_20)", %%mm3 \n\t" /* 20a */\
1421 "pmullw "MANGLE(ff_pw_3)", %%mm4 \n\t" /* 3c - 6b */\
1422 "psubw %%mm5, %%mm3 \n\t" /* -6b + 3c - d */\
1423 "paddw %6, %%mm4 \n\t"\
1424 "paddw %%mm3, %%mm4 \n\t" /* 20a - 6b + 3c - d */\
1425 "psraw $5, %%mm4 \n\t"\
1426 "packuswb %%mm4, %%mm0 \n\t"\
1427 OP_MMX2(%%mm0, 8(%1), %%mm4, q)\
1433 : "+a"(src), "+c"(dst), "+m"(h)\
1434 : "d"(srcStride), "S"(dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(temp), "m"(ROUNDER)\
1439 static void OPNAME ## mpeg4_qpel16_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1442 /* quick HACK, XXX FIXME MUST be optimized */\
1445 temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
1446 temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
1447 temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
1448 temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
1449 temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
1450 temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 9]);\
1451 temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 9])*3 - (src[ 3]+src[10]);\
1452 temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 9])*6 + (src[ 5]+src[10])*3 - (src[ 4]+src[11]);\
1453 temp[ 8]= (src[ 8]+src[ 9])*20 - (src[ 7]+src[10])*6 + (src[ 6]+src[11])*3 - (src[ 5]+src[12]);\
1454 temp[ 9]= (src[ 9]+src[10])*20 - (src[ 8]+src[11])*6 + (src[ 7]+src[12])*3 - (src[ 6]+src[13]);\
1455 temp[10]= (src[10]+src[11])*20 - (src[ 9]+src[12])*6 + (src[ 8]+src[13])*3 - (src[ 7]+src[14]);\
1456 temp[11]= (src[11]+src[12])*20 - (src[10]+src[13])*6 + (src[ 9]+src[14])*3 - (src[ 8]+src[15]);\
1457 temp[12]= (src[12]+src[13])*20 - (src[11]+src[14])*6 + (src[10]+src[15])*3 - (src[ 9]+src[16]);\
1458 temp[13]= (src[13]+src[14])*20 - (src[12]+src[15])*6 + (src[11]+src[16])*3 - (src[10]+src[16]);\
1459 temp[14]= (src[14]+src[15])*20 - (src[13]+src[16])*6 + (src[12]+src[16])*3 - (src[11]+src[15]);\
1460 temp[15]= (src[15]+src[16])*20 - (src[14]+src[16])*6 + (src[13]+src[15])*3 - (src[12]+src[14]);\
1462 "movq (%0), %%mm0 \n\t"\
1463 "movq 8(%0), %%mm1 \n\t"\
1464 "paddw %2, %%mm0 \n\t"\
1465 "paddw %2, %%mm1 \n\t"\
1466 "psraw $5, %%mm0 \n\t"\
1467 "psraw $5, %%mm1 \n\t"\
1468 "packuswb %%mm1, %%mm0 \n\t"\
1469 OP_3DNOW(%%mm0, (%1), %%mm1, q)\
1470 "movq 16(%0), %%mm0 \n\t"\
1471 "movq 24(%0), %%mm1 \n\t"\
1472 "paddw %2, %%mm0 \n\t"\
1473 "paddw %2, %%mm1 \n\t"\
1474 "psraw $5, %%mm0 \n\t"\
1475 "psraw $5, %%mm1 \n\t"\
1476 "packuswb %%mm1, %%mm0 \n\t"\
1477 OP_3DNOW(%%mm0, 8(%1), %%mm1, q)\
1478 :: "r"(temp), "r"(dst), "m"(ROUNDER)\
1486 static void OPNAME ## mpeg4_qpel8_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1490 "pxor %%mm7, %%mm7 \n\t"\
1492 "movq (%0), %%mm0 \n\t" /* ABCDEFGH */\
1493 "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\
1494 "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\
1495 "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\
1496 "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\
1497 "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\
1498 "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\
1499 "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\
1500 "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\
1501 "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\
1502 "psllq $16, %%mm3 \n\t" /* 00ABCDEF */\
1503 "psllq $24, %%mm4 \n\t" /* 000ABCDE */\
1504 "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\
1505 "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\
1506 "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\
1507 "paddw %%mm3, %%mm5 \n\t" /* b */\
1508 "paddw %%mm2, %%mm6 \n\t" /* c */\
1509 "paddw %%mm5, %%mm5 \n\t" /* 2b */\
1510 "psubw %%mm5, %%mm6 \n\t" /* c - 2b */\
1511 "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\
1512 "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\
1513 "paddw %%mm4, %%mm0 \n\t" /* a */\
1514 "paddw %%mm1, %%mm5 \n\t" /* d */\
1515 "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\
1516 "psubw %%mm5, %%mm0 \n\t" /* 20a - d */\
1517 "paddw %6, %%mm6 \n\t"\
1518 "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
1519 "psraw $5, %%mm0 \n\t"\
1520 /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
1522 "movd 5(%0), %%mm5 \n\t" /* FGHI */\
1523 "punpcklbw %%mm7, %%mm5 \n\t" /* 0F0G0H0I */\
1524 "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0G0H0I0I */\
1525 "paddw %%mm5, %%mm1 \n\t" /* a */\
1526 "paddw %%mm6, %%mm2 \n\t" /* b */\
1527 "pshufw $0xBE, %%mm5, %%mm6 \n\t" /* 0H0I0I0H */\
1528 "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0I0I0H0G */\
1529 "paddw %%mm6, %%mm3 \n\t" /* c */\
1530 "paddw %%mm5, %%mm4 \n\t" /* d */\
1531 "paddw %%mm2, %%mm2 \n\t" /* 2b */\
1532 "psubw %%mm2, %%mm3 \n\t" /* c - 2b */\
1533 "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\
1534 "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\
1535 "psubw %%mm4, %%mm3 \n\t" /* -6b + 3c - d */\
1536 "paddw %6, %%mm1 \n\t"\
1537 "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b + 3c - d */\
1538 "psraw $5, %%mm3 \n\t"\
1539 "packuswb %%mm3, %%mm0 \n\t"\
1540 OP_MMX2(%%mm0, (%1), %%mm4, q)\
1546 : "+a"(src), "+c"(dst), "+m"(h)\
1547 : "S"(srcStride), "D"(dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(temp), "m"(ROUNDER)\
1552 static void OPNAME ## mpeg4_qpel8_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1555 /* quick HACK, XXX FIXME MUST be optimized */\
1558 temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
1559 temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
1560 temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
1561 temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
1562 temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
1563 temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 8]);\
1564 temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 8])*3 - (src[ 3]+src[ 7]);\
1565 temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 8])*6 + (src[ 5]+src[ 7])*3 - (src[ 4]+src[ 6]);\
1567 "movq (%0), %%mm0 \n\t"\
1568 "movq 8(%0), %%mm1 \n\t"\
1569 "paddw %2, %%mm0 \n\t"\
1570 "paddw %2, %%mm1 \n\t"\
1571 "psraw $5, %%mm0 \n\t"\
1572 "psraw $5, %%mm1 \n\t"\
1573 "packuswb %%mm1, %%mm0 \n\t"\
1574 OP_3DNOW(%%mm0, (%1), %%mm1, q)\
1575 :: "r"(temp), "r"(dst), "m"(ROUNDER)\
1583 #define QPEL_OP(OPNAME, ROUNDER, RND, OP, MMX)\
1585 static void OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1586 uint64_t temp[17*4];\
1587 uint64_t *temp_ptr= temp;\
1592 "pxor %%mm7, %%mm7 \n\t"\
1594 "movq (%0), %%mm0 \n\t"\
1595 "movq (%0), %%mm1 \n\t"\
1596 "movq 8(%0), %%mm2 \n\t"\
1597 "movq 8(%0), %%mm3 \n\t"\
1598 "punpcklbw %%mm7, %%mm0 \n\t"\
1599 "punpckhbw %%mm7, %%mm1 \n\t"\
1600 "punpcklbw %%mm7, %%mm2 \n\t"\
1601 "punpckhbw %%mm7, %%mm3 \n\t"\
1602 "movq %%mm0, (%1) \n\t"\
1603 "movq %%mm1, 17*8(%1) \n\t"\
1604 "movq %%mm2, 2*17*8(%1) \n\t"\
1605 "movq %%mm3, 3*17*8(%1) \n\t"\
1610 : "+r" (src), "+r" (temp_ptr), "+r"(count)\
1618 /*FIXME reorder for speed */\
1620 /*"pxor %%mm7, %%mm7 \n\t"*/\
1622 "movq (%0), %%mm0 \n\t"\
1623 "movq 8(%0), %%mm1 \n\t"\
1624 "movq 16(%0), %%mm2 \n\t"\
1625 "movq 24(%0), %%mm3 \n\t"\
1626 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\
1627 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\
1629 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\
1631 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\
1633 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
1634 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 72(%0), (%1, %3), OP)\
1636 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 80(%0), (%1), OP)\
1637 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 88(%0), (%1, %3), OP)\
1639 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 40(%0), 48(%0), 56(%0), 96(%0), (%1), OP)\
1640 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 48(%0), 56(%0), 64(%0),104(%0), (%1, %3), OP)\
1642 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 56(%0), 64(%0), 72(%0),112(%0), (%1), OP)\
1643 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 64(%0), 72(%0), 80(%0),120(%0), (%1, %3), OP)\
1645 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 72(%0), 80(%0), 88(%0),128(%0), (%1), OP)\
1647 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 80(%0), 88(%0), 96(%0),128(%0), (%1, %3), OP)\
1648 "addl %4, %1 \n\t" \
1649 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 88(%0), 96(%0),104(%0),120(%0), (%1), OP)\
1650 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 96(%0),104(%0),112(%0),112(%0), (%1, %3), OP)\
1652 "addl $136, %0 \n\t"\
1657 : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
1658 : "r"(dstStride), "r"(2*dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-14*dstStride)\
1663 static void OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1664 uint64_t temp[9*4];\
1665 uint64_t *temp_ptr= temp;\
1670 "pxor %%mm7, %%mm7 \n\t"\
1672 "movq (%0), %%mm0 \n\t"\
1673 "movq (%0), %%mm1 \n\t"\
1674 "punpcklbw %%mm7, %%mm0 \n\t"\
1675 "punpckhbw %%mm7, %%mm1 \n\t"\
1676 "movq %%mm0, (%1) \n\t"\
1677 "movq %%mm1, 9*8(%1) \n\t"\
1682 : "+r" (src), "+r" (temp_ptr), "+r"(count)\
1690 /*FIXME reorder for speed */\
1692 /*"pxor %%mm7, %%mm7 \n\t"*/\
1694 "movq (%0), %%mm0 \n\t"\
1695 "movq 8(%0), %%mm1 \n\t"\
1696 "movq 16(%0), %%mm2 \n\t"\
1697 "movq 24(%0), %%mm3 \n\t"\
1698 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\
1699 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\
1701 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\
1703 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\
1705 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
1707 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 64(%0), (%1, %3), OP)\
1709 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 56(%0), (%1), OP)\
1710 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 48(%0), (%1, %3), OP)\
1712 "addl $72, %0 \n\t"\
1717 : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
1718 : "r"(dstStride), "r"(2*dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-6*dstStride)\
1723 static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
1724 OPNAME ## pixels8_mmx(dst, src, stride, 8);\
1727 static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1729 uint8_t * const half= (uint8_t*)temp;\
1730 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
1731 OPNAME ## pixels8_l2_mmx(dst, src, half, stride, stride, 8);\
1734 static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1735 OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride, stride, 8);\
1738 static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1740 uint8_t * const half= (uint8_t*)temp;\
1741 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
1742 OPNAME ## pixels8_l2_mmx(dst, src+1, half, stride, stride, 8);\
1745 static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1747 uint8_t * const half= (uint8_t*)temp;\
1748 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
1749 OPNAME ## pixels8_l2_mmx(dst, src, half, stride, stride, 8);\
1752 static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1753 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src, stride, stride);\
1756 static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1758 uint8_t * const half= (uint8_t*)temp;\
1759 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
1760 OPNAME ## pixels8_l2_mmx(dst, src+stride, half, stride, stride, 8);\
1762 static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1763 uint64_t half[8 + 9];\
1764 uint8_t * const halfH= ((uint8_t*)half) + 64;\
1765 uint8_t * const halfHV= ((uint8_t*)half);\
1766 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1767 put ## RND ## pixels8_l2_mmx(halfH, src, halfH, 8, stride, 9);\
1768 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1769 OPNAME ## pixels8_l2_mmx(dst, halfH, halfHV, stride, 8, 8);\
1771 static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1772 uint64_t half[8 + 9];\
1773 uint8_t * const halfH= ((uint8_t*)half) + 64;\
1774 uint8_t * const halfHV= ((uint8_t*)half);\
1775 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1776 put ## RND ## pixels8_l2_mmx(halfH, src+1, halfH, 8, stride, 9);\
1777 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1778 OPNAME ## pixels8_l2_mmx(dst, halfH, halfHV, stride, 8, 8);\
1780 static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1781 uint64_t half[8 + 9];\
1782 uint8_t * const halfH= ((uint8_t*)half) + 64;\
1783 uint8_t * const halfHV= ((uint8_t*)half);\
1784 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1785 put ## RND ## pixels8_l2_mmx(halfH, src, halfH, 8, stride, 9);\
1786 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1787 OPNAME ## pixels8_l2_mmx(dst, halfH+8, halfHV, stride, 8, 8);\
1789 static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1790 uint64_t half[8 + 9];\
1791 uint8_t * const halfH= ((uint8_t*)half) + 64;\
1792 uint8_t * const halfHV= ((uint8_t*)half);\
1793 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1794 put ## RND ## pixels8_l2_mmx(halfH, src+1, halfH, 8, stride, 9);\
1795 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1796 OPNAME ## pixels8_l2_mmx(dst, halfH+8, halfHV, stride, 8, 8);\
1798 static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1799 uint64_t half[8 + 9];\
1800 uint8_t * const halfH= ((uint8_t*)half) + 64;\
1801 uint8_t * const halfHV= ((uint8_t*)half);\
1802 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1803 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1804 OPNAME ## pixels8_l2_mmx(dst, halfH, halfHV, stride, 8, 8);\
1806 static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1807 uint64_t half[8 + 9];\
1808 uint8_t * const halfH= ((uint8_t*)half) + 64;\
1809 uint8_t * const halfHV= ((uint8_t*)half);\
1810 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1811 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1812 OPNAME ## pixels8_l2_mmx(dst, halfH+8, halfHV, stride, 8, 8);\
1814 static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1815 uint64_t half[8 + 9];\
1816 uint8_t * const halfH= ((uint8_t*)half);\
1817 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1818 put ## RND ## pixels8_l2_mmx(halfH, src, halfH, 8, stride, 9);\
1819 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
1821 static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1822 uint64_t half[8 + 9];\
1823 uint8_t * const halfH= ((uint8_t*)half);\
1824 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1825 put ## RND ## pixels8_l2_mmx(halfH, src+1, halfH, 8, stride, 9);\
1826 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
1828 static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1830 uint8_t * const halfH= ((uint8_t*)half);\
1831 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1832 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
1834 static void OPNAME ## qpel16_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
1835 OPNAME ## pixels16_mmx(dst, src, stride, 16);\
1838 static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1840 uint8_t * const half= (uint8_t*)temp;\
1841 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
1842 OPNAME ## pixels16_l2_mmx(dst, src, half, stride, stride, 16);\
1845 static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1846 OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src, stride, stride, 16);\
1849 static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1851 uint8_t * const half= (uint8_t*)temp;\
1852 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
1853 OPNAME ## pixels16_l2_mmx(dst, src+1, half, stride, stride, 16);\
1856 static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1858 uint8_t * const half= (uint8_t*)temp;\
1859 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
1860 OPNAME ## pixels16_l2_mmx(dst, src, half, stride, stride, 16);\
1863 static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1864 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src, stride, stride);\
1867 static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1869 uint8_t * const half= (uint8_t*)temp;\
1870 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
1871 OPNAME ## pixels16_l2_mmx(dst, src+stride, half, stride, stride, 16);\
1873 static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1874 uint64_t half[16*2 + 17*2];\
1875 uint8_t * const halfH= ((uint8_t*)half) + 256;\
1876 uint8_t * const halfHV= ((uint8_t*)half);\
1877 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1878 put ## RND ## pixels16_l2_mmx(halfH, src, halfH, 16, stride, 17);\
1879 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1880 OPNAME ## pixels16_l2_mmx(dst, halfH, halfHV, stride, 16, 16);\
1882 static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1883 uint64_t half[16*2 + 17*2];\
1884 uint8_t * const halfH= ((uint8_t*)half) + 256;\
1885 uint8_t * const halfHV= ((uint8_t*)half);\
1886 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1887 put ## RND ## pixels16_l2_mmx(halfH, src+1, halfH, 16, stride, 17);\
1888 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1889 OPNAME ## pixels16_l2_mmx(dst, halfH, halfHV, stride, 16, 16);\
1891 static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1892 uint64_t half[16*2 + 17*2];\
1893 uint8_t * const halfH= ((uint8_t*)half) + 256;\
1894 uint8_t * const halfHV= ((uint8_t*)half);\
1895 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1896 put ## RND ## pixels16_l2_mmx(halfH, src, halfH, 16, stride, 17);\
1897 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1898 OPNAME ## pixels16_l2_mmx(dst, halfH+16, halfHV, stride, 16, 16);\
1900 static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1901 uint64_t half[16*2 + 17*2];\
1902 uint8_t * const halfH= ((uint8_t*)half) + 256;\
1903 uint8_t * const halfHV= ((uint8_t*)half);\
1904 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1905 put ## RND ## pixels16_l2_mmx(halfH, src+1, halfH, 16, stride, 17);\
1906 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1907 OPNAME ## pixels16_l2_mmx(dst, halfH+16, halfHV, stride, 16, 16);\
1909 static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1910 uint64_t half[16*2 + 17*2];\
1911 uint8_t * const halfH= ((uint8_t*)half) + 256;\
1912 uint8_t * const halfHV= ((uint8_t*)half);\
1913 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1914 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1915 OPNAME ## pixels16_l2_mmx(dst, halfH, halfHV, stride, 16, 16);\
1917 static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1918 uint64_t half[16*2 + 17*2];\
1919 uint8_t * const halfH= ((uint8_t*)half) + 256;\
1920 uint8_t * const halfHV= ((uint8_t*)half);\
1921 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1922 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1923 OPNAME ## pixels16_l2_mmx(dst, halfH+16, halfHV, stride, 16, 16);\
1925 static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1926 uint64_t half[17*2];\
1927 uint8_t * const halfH= ((uint8_t*)half);\
1928 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1929 put ## RND ## pixels16_l2_mmx(halfH, src, halfH, 16, stride, 17);\
1930 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
1932 static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1933 uint64_t half[17*2];\
1934 uint8_t * const halfH= ((uint8_t*)half);\
1935 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1936 put ## RND ## pixels16_l2_mmx(halfH, src+1, halfH, 16, stride, 17);\
1937 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
1939 static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1940 uint64_t half[17*2];\
1941 uint8_t * const halfH= ((uint8_t*)half);\
1942 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1943 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
1947 #define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b " \n\t"
1948 #define AVG_3DNOW_OP(a,b,temp, size) \
1949 "mov" #size " " #b ", " #temp " \n\t"\
1950 "pavgusb " #temp ", " #a " \n\t"\
1951 "mov" #size " " #a ", " #b " \n\t"
1952 #define AVG_MMX2_OP(a,b,temp, size) \
1953 "mov" #size " " #b ", " #temp " \n\t"\
1954 "pavgb " #temp ", " #a " \n\t"\
1955 "mov" #size " " #a ", " #b " \n\t"
1957 QPEL_BASE(put_
, ff_pw_16
, _
, PUT_OP
, PUT_OP
)
1958 QPEL_BASE(avg_
, ff_pw_16
, _
, AVG_MMX2_OP
, AVG_3DNOW_OP
)
1959 QPEL_BASE(put_no_rnd_
, ff_pw_15
, _no_rnd_
, PUT_OP
, PUT_OP
)
1960 QPEL_OP(put_
, ff_pw_16
, _
, PUT_OP
, 3dnow
)
1961 QPEL_OP(avg_
, ff_pw_16
, _
, AVG_3DNOW_OP
, 3dnow
)
1962 QPEL_OP(put_no_rnd_
, ff_pw_15
, _no_rnd_
, PUT_OP
, 3dnow
)
1963 QPEL_OP(put_
, ff_pw_16
, _
, PUT_OP
, mmx2
)
1964 QPEL_OP(avg_
, ff_pw_16
, _
, AVG_MMX2_OP
, mmx2
)
1965 QPEL_OP(put_no_rnd_
, ff_pw_15
, _no_rnd_
, PUT_OP
, mmx2
)
1968 static void just_return() { return; }
1971 #define SET_QPEL_FUNC(postfix1, postfix2) \
1972 c->put_ ## postfix1 = put_ ## postfix2;\
1973 c->put_no_rnd_ ## postfix1 = put_no_rnd_ ## postfix2;\
1974 c->avg_ ## postfix1 = avg_ ## postfix2;
1976 /* external functions, from idct_mmx.c */
1977 void ff_mmx_idct(DCTELEM
*block
);
1978 void ff_mmxext_idct(DCTELEM
*block
);
1980 /* XXX: those functions should be suppressed ASAP when all IDCTs are
1982 static void ff_libmpeg2mmx_idct_put(uint8_t *dest
, int line_size
, DCTELEM
*block
)
1984 ff_mmx_idct (block
);
1985 put_pixels_clamped_mmx(block
, dest
, line_size
);
1987 static void ff_libmpeg2mmx_idct_add(uint8_t *dest
, int line_size
, DCTELEM
*block
)
1989 ff_mmx_idct (block
);
1990 add_pixels_clamped_mmx(block
, dest
, line_size
);
1992 static void ff_libmpeg2mmx2_idct_put(uint8_t *dest
, int line_size
, DCTELEM
*block
)
1994 ff_mmxext_idct (block
);
1995 put_pixels_clamped_mmx(block
, dest
, line_size
);
1997 static void ff_libmpeg2mmx2_idct_add(uint8_t *dest
, int line_size
, DCTELEM
*block
)
1999 ff_mmxext_idct (block
);
2000 add_pixels_clamped_mmx(block
, dest
, line_size
);
2003 void dsputil_init_mmx(DSPContext
* c
, AVCodecContext
*avctx
)
2005 mm_flags
= mm_support();
2007 if (avctx
->dsp_mask
) {
2008 if (avctx
->dsp_mask
& FF_MM_FORCE
)
2009 mm_flags
|= (avctx
->dsp_mask
& 0xffff);
2011 mm_flags
&= ~(avctx
->dsp_mask
& 0xffff);
2015 fprintf(stderr
, "libavcodec: CPU flags:");
2016 if (mm_flags
& MM_MMX
)
2017 fprintf(stderr
, " mmx");
2018 if (mm_flags
& MM_MMXEXT
)
2019 fprintf(stderr
, " mmxext");
2020 if (mm_flags
& MM_3DNOW
)
2021 fprintf(stderr
, " 3dnow");
2022 if (mm_flags
& MM_SSE
)
2023 fprintf(stderr
, " sse");
2024 if (mm_flags
& MM_SSE2
)
2025 fprintf(stderr
, " sse2");
2026 fprintf(stderr
, "\n");
2029 if (mm_flags
& MM_MMX
) {
2030 const int dct_algo
= avctx
->dct_algo
;
2031 const int idct_algo
= avctx
->idct_algo
;
2033 #ifdef CONFIG_ENCODERS
2034 if(dct_algo
==FF_DCT_AUTO
|| dct_algo
==FF_DCT_MMX
){
2035 if(mm_flags
& MM_SSE2
){
2036 c
->fdct
= ff_fdct_sse2
;
2037 }else if(mm_flags
& MM_MMXEXT
){
2038 c
->fdct
= ff_fdct_mmx2
;
2040 c
->fdct
= ff_fdct_mmx
;
2043 #endif //CONFIG_ENCODERS
2045 if(idct_algo
==FF_IDCT_AUTO
|| idct_algo
==FF_IDCT_SIMPLEMMX
){
2046 c
->idct_put
= ff_simple_idct_put_mmx
;
2047 c
->idct_add
= ff_simple_idct_add_mmx
;
2048 c
->idct
= ff_simple_idct_mmx
;
2049 c
->idct_permutation_type
= FF_SIMPLE_IDCT_PERM
;
2050 }else if(idct_algo
==FF_IDCT_LIBMPEG2MMX
){
2051 if(mm_flags
& MM_MMXEXT
){
2052 c
->idct_put
= ff_libmpeg2mmx2_idct_put
;
2053 c
->idct_add
= ff_libmpeg2mmx2_idct_add
;
2054 c
->idct
= ff_mmxext_idct
;
2056 c
->idct_put
= ff_libmpeg2mmx_idct_put
;
2057 c
->idct_add
= ff_libmpeg2mmx_idct_add
;
2058 c
->idct
= ff_mmx_idct
;
2060 c
->idct_permutation_type
= FF_LIBMPEG2_IDCT_PERM
;
2063 #ifdef CONFIG_ENCODERS
2064 c
->get_pixels
= get_pixels_mmx
;
2065 c
->diff_pixels
= diff_pixels_mmx
;
2066 #endif //CONFIG_ENCODERS
2067 c
->put_pixels_clamped
= put_pixels_clamped_mmx
;
2068 c
->add_pixels_clamped
= add_pixels_clamped_mmx
;
2069 c
->clear_blocks
= clear_blocks_mmx
;
2070 #ifdef CONFIG_ENCODERS
2071 c
->pix_sum
= pix_sum16_mmx
;
2072 #endif //CONFIG_ENCODERS
2074 c
->put_pixels_tab
[0][0] = put_pixels16_mmx
;
2075 c
->put_pixels_tab
[0][1] = put_pixels16_x2_mmx
;
2076 c
->put_pixels_tab
[0][2] = put_pixels16_y2_mmx
;
2077 c
->put_pixels_tab
[0][3] = put_pixels16_xy2_mmx
;
2079 c
->put_no_rnd_pixels_tab
[0][0] = put_pixels16_mmx
;
2080 c
->put_no_rnd_pixels_tab
[0][1] = put_no_rnd_pixels16_x2_mmx
;
2081 c
->put_no_rnd_pixels_tab
[0][2] = put_no_rnd_pixels16_y2_mmx
;
2082 c
->put_no_rnd_pixels_tab
[0][3] = put_no_rnd_pixels16_xy2_mmx
;
2084 c
->avg_pixels_tab
[0][0] = avg_pixels16_mmx
;
2085 c
->avg_pixels_tab
[0][1] = avg_pixels16_x2_mmx
;
2086 c
->avg_pixels_tab
[0][2] = avg_pixels16_y2_mmx
;
2087 c
->avg_pixels_tab
[0][3] = avg_pixels16_xy2_mmx
;
2089 c
->avg_no_rnd_pixels_tab
[0][0] = avg_no_rnd_pixels16_mmx
;
2090 c
->avg_no_rnd_pixels_tab
[0][1] = avg_no_rnd_pixels16_x2_mmx
;
2091 c
->avg_no_rnd_pixels_tab
[0][2] = avg_no_rnd_pixels16_y2_mmx
;
2092 c
->avg_no_rnd_pixels_tab
[0][3] = avg_no_rnd_pixels16_xy2_mmx
;
2094 c
->put_pixels_tab
[1][0] = put_pixels8_mmx
;
2095 c
->put_pixels_tab
[1][1] = put_pixels8_x2_mmx
;
2096 c
->put_pixels_tab
[1][2] = put_pixels8_y2_mmx
;
2097 c
->put_pixels_tab
[1][3] = put_pixels8_xy2_mmx
;
2099 c
->put_no_rnd_pixels_tab
[1][0] = put_pixels8_mmx
;
2100 c
->put_no_rnd_pixels_tab
[1][1] = put_no_rnd_pixels8_x2_mmx
;
2101 c
->put_no_rnd_pixels_tab
[1][2] = put_no_rnd_pixels8_y2_mmx
;
2102 c
->put_no_rnd_pixels_tab
[1][3] = put_no_rnd_pixels8_xy2_mmx
;
2104 c
->avg_pixels_tab
[1][0] = avg_pixels8_mmx
;
2105 c
->avg_pixels_tab
[1][1] = avg_pixels8_x2_mmx
;
2106 c
->avg_pixels_tab
[1][2] = avg_pixels8_y2_mmx
;
2107 c
->avg_pixels_tab
[1][3] = avg_pixels8_xy2_mmx
;
2109 c
->avg_no_rnd_pixels_tab
[1][0] = avg_no_rnd_pixels8_mmx
;
2110 c
->avg_no_rnd_pixels_tab
[1][1] = avg_no_rnd_pixels8_x2_mmx
;
2111 c
->avg_no_rnd_pixels_tab
[1][2] = avg_no_rnd_pixels8_y2_mmx
;
2112 c
->avg_no_rnd_pixels_tab
[1][3] = avg_no_rnd_pixels8_xy2_mmx
;
2114 c
->add_bytes
= add_bytes_mmx
;
2115 #ifdef CONFIG_ENCODERS
2116 c
->diff_bytes
= diff_bytes_mmx
;
2118 c
->hadamard8_diff
[0]= hadamard8_diff16_mmx
;
2119 c
->hadamard8_diff
[1]= hadamard8_diff_mmx
;
2121 c
->pix_norm1
= pix_norm1_mmx
;
2122 c
->sse
[0] = sse16_mmx
;
2123 c
->vsad
[4]= vsad_intra16_mmx
;
2125 if(!(avctx
->flags
& CODEC_FLAG_BITEXACT
)){
2126 c
->vsad
[0] = vsad16_mmx
;
2128 #endif //CONFIG_ENCODERS
2130 c
->h263_v_loop_filter
= h263_v_loop_filter_mmx
;
2131 c
->h263_h_loop_filter
= h263_h_loop_filter_mmx
;
2133 if (mm_flags
& MM_MMXEXT
) {
2134 c
->put_pixels_tab
[0][1] = put_pixels16_x2_mmx2
;
2135 c
->put_pixels_tab
[0][2] = put_pixels16_y2_mmx2
;
2137 c
->avg_pixels_tab
[0][0] = avg_pixels16_mmx2
;
2138 c
->avg_pixels_tab
[0][1] = avg_pixels16_x2_mmx2
;
2139 c
->avg_pixels_tab
[0][2] = avg_pixels16_y2_mmx2
;
2141 c
->put_pixels_tab
[1][1] = put_pixels8_x2_mmx2
;
2142 c
->put_pixels_tab
[1][2] = put_pixels8_y2_mmx2
;
2144 c
->avg_pixels_tab
[1][0] = avg_pixels8_mmx2
;
2145 c
->avg_pixels_tab
[1][1] = avg_pixels8_x2_mmx2
;
2146 c
->avg_pixels_tab
[1][2] = avg_pixels8_y2_mmx2
;
2148 #ifdef CONFIG_ENCODERS
2149 c
->hadamard8_diff
[0]= hadamard8_diff16_mmx2
;
2150 c
->hadamard8_diff
[1]= hadamard8_diff_mmx2
;
2151 c
->vsad
[4]= vsad_intra16_mmx2
;
2152 #endif //CONFIG_ENCODERS
2154 if(!(avctx
->flags
& CODEC_FLAG_BITEXACT
)){
2155 c
->put_no_rnd_pixels_tab
[0][1] = put_no_rnd_pixels16_x2_mmx2
;
2156 c
->put_no_rnd_pixels_tab
[0][2] = put_no_rnd_pixels16_y2_mmx2
;
2157 c
->put_no_rnd_pixels_tab
[1][1] = put_no_rnd_pixels8_x2_mmx2
;
2158 c
->put_no_rnd_pixels_tab
[1][2] = put_no_rnd_pixels8_y2_mmx2
;
2159 c
->avg_pixels_tab
[0][3] = avg_pixels16_xy2_mmx2
;
2160 c
->avg_pixels_tab
[1][3] = avg_pixels8_xy2_mmx2
;
2161 c
->vsad
[0] = vsad16_mmx2
;
2165 SET_QPEL_FUNC(qpel_pixels_tab
[0][ 0], qpel16_mc00_mmx2
)
2166 SET_QPEL_FUNC(qpel_pixels_tab
[0][ 1], qpel16_mc10_mmx2
)
2167 SET_QPEL_FUNC(qpel_pixels_tab
[0][ 2], qpel16_mc20_mmx2
)
2168 SET_QPEL_FUNC(qpel_pixels_tab
[0][ 3], qpel16_mc30_mmx2
)
2169 SET_QPEL_FUNC(qpel_pixels_tab
[0][ 4], qpel16_mc01_mmx2
)
2170 SET_QPEL_FUNC(qpel_pixels_tab
[0][ 5], qpel16_mc11_mmx2
)
2171 SET_QPEL_FUNC(qpel_pixels_tab
[0][ 6], qpel16_mc21_mmx2
)
2172 SET_QPEL_FUNC(qpel_pixels_tab
[0][ 7], qpel16_mc31_mmx2
)
2173 SET_QPEL_FUNC(qpel_pixels_tab
[0][ 8], qpel16_mc02_mmx2
)
2174 SET_QPEL_FUNC(qpel_pixels_tab
[0][ 9], qpel16_mc12_mmx2
)
2175 SET_QPEL_FUNC(qpel_pixels_tab
[0][10], qpel16_mc22_mmx2
)
2176 SET_QPEL_FUNC(qpel_pixels_tab
[0][11], qpel16_mc32_mmx2
)
2177 SET_QPEL_FUNC(qpel_pixels_tab
[0][12], qpel16_mc03_mmx2
)
2178 SET_QPEL_FUNC(qpel_pixels_tab
[0][13], qpel16_mc13_mmx2
)
2179 SET_QPEL_FUNC(qpel_pixels_tab
[0][14], qpel16_mc23_mmx2
)
2180 SET_QPEL_FUNC(qpel_pixels_tab
[0][15], qpel16_mc33_mmx2
)
2181 SET_QPEL_FUNC(qpel_pixels_tab
[1][ 0], qpel8_mc00_mmx2
)
2182 SET_QPEL_FUNC(qpel_pixels_tab
[1][ 1], qpel8_mc10_mmx2
)
2183 SET_QPEL_FUNC(qpel_pixels_tab
[1][ 2], qpel8_mc20_mmx2
)
2184 SET_QPEL_FUNC(qpel_pixels_tab
[1][ 3], qpel8_mc30_mmx2
)
2185 SET_QPEL_FUNC(qpel_pixels_tab
[1][ 4], qpel8_mc01_mmx2
)
2186 SET_QPEL_FUNC(qpel_pixels_tab
[1][ 5], qpel8_mc11_mmx2
)
2187 SET_QPEL_FUNC(qpel_pixels_tab
[1][ 6], qpel8_mc21_mmx2
)
2188 SET_QPEL_FUNC(qpel_pixels_tab
[1][ 7], qpel8_mc31_mmx2
)
2189 SET_QPEL_FUNC(qpel_pixels_tab
[1][ 8], qpel8_mc02_mmx2
)
2190 SET_QPEL_FUNC(qpel_pixels_tab
[1][ 9], qpel8_mc12_mmx2
)
2191 SET_QPEL_FUNC(qpel_pixels_tab
[1][10], qpel8_mc22_mmx2
)
2192 SET_QPEL_FUNC(qpel_pixels_tab
[1][11], qpel8_mc32_mmx2
)
2193 SET_QPEL_FUNC(qpel_pixels_tab
[1][12], qpel8_mc03_mmx2
)
2194 SET_QPEL_FUNC(qpel_pixels_tab
[1][13], qpel8_mc13_mmx2
)
2195 SET_QPEL_FUNC(qpel_pixels_tab
[1][14], qpel8_mc23_mmx2
)
2196 SET_QPEL_FUNC(qpel_pixels_tab
[1][15], qpel8_mc33_mmx2
)
2199 #ifdef CONFIG_ENCODERS
2200 c
->sub_hfyu_median_prediction
= sub_hfyu_median_prediction_mmx2
;
2201 #endif //CONFIG_ENCODERS
2202 } else if (mm_flags
& MM_3DNOW
) {
2203 c
->put_pixels_tab
[0][1] = put_pixels16_x2_3dnow
;
2204 c
->put_pixels_tab
[0][2] = put_pixels16_y2_3dnow
;
2206 c
->avg_pixels_tab
[0][0] = avg_pixels16_3dnow
;
2207 c
->avg_pixels_tab
[0][1] = avg_pixels16_x2_3dnow
;
2208 c
->avg_pixels_tab
[0][2] = avg_pixels16_y2_3dnow
;
2210 c
->put_pixels_tab
[1][1] = put_pixels8_x2_3dnow
;
2211 c
->put_pixels_tab
[1][2] = put_pixels8_y2_3dnow
;
2213 c
->avg_pixels_tab
[1][0] = avg_pixels8_3dnow
;
2214 c
->avg_pixels_tab
[1][1] = avg_pixels8_x2_3dnow
;
2215 c
->avg_pixels_tab
[1][2] = avg_pixels8_y2_3dnow
;
2217 if(!(avctx
->flags
& CODEC_FLAG_BITEXACT
)){
2218 c
->put_no_rnd_pixels_tab
[0][1] = put_no_rnd_pixels16_x2_3dnow
;
2219 c
->put_no_rnd_pixels_tab
[0][2] = put_no_rnd_pixels16_y2_3dnow
;
2220 c
->put_no_rnd_pixels_tab
[1][1] = put_no_rnd_pixels8_x2_3dnow
;
2221 c
->put_no_rnd_pixels_tab
[1][2] = put_no_rnd_pixels8_y2_3dnow
;
2222 c
->avg_pixels_tab
[0][3] = avg_pixels16_xy2_3dnow
;
2223 c
->avg_pixels_tab
[1][3] = avg_pixels8_xy2_3dnow
;
2226 SET_QPEL_FUNC(qpel_pixels_tab
[0][ 0], qpel16_mc00_3dnow
)
2227 SET_QPEL_FUNC(qpel_pixels_tab
[0][ 1], qpel16_mc10_3dnow
)
2228 SET_QPEL_FUNC(qpel_pixels_tab
[0][ 2], qpel16_mc20_3dnow
)
2229 SET_QPEL_FUNC(qpel_pixels_tab
[0][ 3], qpel16_mc30_3dnow
)
2230 SET_QPEL_FUNC(qpel_pixels_tab
[0][ 4], qpel16_mc01_3dnow
)
2231 SET_QPEL_FUNC(qpel_pixels_tab
[0][ 5], qpel16_mc11_3dnow
)
2232 SET_QPEL_FUNC(qpel_pixels_tab
[0][ 6], qpel16_mc21_3dnow
)
2233 SET_QPEL_FUNC(qpel_pixels_tab
[0][ 7], qpel16_mc31_3dnow
)
2234 SET_QPEL_FUNC(qpel_pixels_tab
[0][ 8], qpel16_mc02_3dnow
)
2235 SET_QPEL_FUNC(qpel_pixels_tab
[0][ 9], qpel16_mc12_3dnow
)
2236 SET_QPEL_FUNC(qpel_pixels_tab
[0][10], qpel16_mc22_3dnow
)
2237 SET_QPEL_FUNC(qpel_pixels_tab
[0][11], qpel16_mc32_3dnow
)
2238 SET_QPEL_FUNC(qpel_pixels_tab
[0][12], qpel16_mc03_3dnow
)
2239 SET_QPEL_FUNC(qpel_pixels_tab
[0][13], qpel16_mc13_3dnow
)
2240 SET_QPEL_FUNC(qpel_pixels_tab
[0][14], qpel16_mc23_3dnow
)
2241 SET_QPEL_FUNC(qpel_pixels_tab
[0][15], qpel16_mc33_3dnow
)
2242 SET_QPEL_FUNC(qpel_pixels_tab
[1][ 0], qpel8_mc00_3dnow
)
2243 SET_QPEL_FUNC(qpel_pixels_tab
[1][ 1], qpel8_mc10_3dnow
)
2244 SET_QPEL_FUNC(qpel_pixels_tab
[1][ 2], qpel8_mc20_3dnow
)
2245 SET_QPEL_FUNC(qpel_pixels_tab
[1][ 3], qpel8_mc30_3dnow
)
2246 SET_QPEL_FUNC(qpel_pixels_tab
[1][ 4], qpel8_mc01_3dnow
)
2247 SET_QPEL_FUNC(qpel_pixels_tab
[1][ 5], qpel8_mc11_3dnow
)
2248 SET_QPEL_FUNC(qpel_pixels_tab
[1][ 6], qpel8_mc21_3dnow
)
2249 SET_QPEL_FUNC(qpel_pixels_tab
[1][ 7], qpel8_mc31_3dnow
)
2250 SET_QPEL_FUNC(qpel_pixels_tab
[1][ 8], qpel8_mc02_3dnow
)
2251 SET_QPEL_FUNC(qpel_pixels_tab
[1][ 9], qpel8_mc12_3dnow
)
2252 SET_QPEL_FUNC(qpel_pixels_tab
[1][10], qpel8_mc22_3dnow
)
2253 SET_QPEL_FUNC(qpel_pixels_tab
[1][11], qpel8_mc32_3dnow
)
2254 SET_QPEL_FUNC(qpel_pixels_tab
[1][12], qpel8_mc03_3dnow
)
2255 SET_QPEL_FUNC(qpel_pixels_tab
[1][13], qpel8_mc13_3dnow
)
2256 SET_QPEL_FUNC(qpel_pixels_tab
[1][14], qpel8_mc23_3dnow
)
2257 SET_QPEL_FUNC(qpel_pixels_tab
[1][15], qpel8_mc33_3dnow
)
2261 #ifdef CONFIG_ENCODERS
2262 dsputil_init_pix_mmx(c
, avctx
);
2263 #endif //CONFIG_ENCODERS
2265 // for speed testing
2266 get_pixels
= just_return
;
2267 put_pixels_clamped
= just_return
;
2268 add_pixels_clamped
= just_return
;
2270 pix_abs16x16
= just_return
;
2271 pix_abs16x16_x2
= just_return
;
2272 pix_abs16x16_y2
= just_return
;
2273 pix_abs16x16_xy2
= just_return
;
2275 put_pixels_tab
[0] = just_return
;
2276 put_pixels_tab
[1] = just_return
;
2277 put_pixels_tab
[2] = just_return
;
2278 put_pixels_tab
[3] = just_return
;
2280 put_no_rnd_pixels_tab
[0] = just_return
;
2281 put_no_rnd_pixels_tab
[1] = just_return
;
2282 put_no_rnd_pixels_tab
[2] = just_return
;
2283 put_no_rnd_pixels_tab
[3] = just_return
;
2285 avg_pixels_tab
[0] = just_return
;
2286 avg_pixels_tab
[1] = just_return
;
2287 avg_pixels_tab
[2] = just_return
;
2288 avg_pixels_tab
[3] = just_return
;
2290 avg_no_rnd_pixels_tab
[0] = just_return
;
2291 avg_no_rnd_pixels_tab
[1] = just_return
;
2292 avg_no_rnd_pixels_tab
[2] = just_return
;
2293 avg_no_rnd_pixels_tab
[3] = just_return
;
2295 //av_fdct = just_return;
2296 //ff_idct = just_return;