3 MultiMedia eXtensions GCC interface library for IA32.
5 To use this library, simply include this header file
6 and compile with GCC. You MUST have inlining enabled
7 in order for mmx_ok() to work; this can be done by
8 simply using -O on the GCC command line.
10 Compiling with -DMMX_TRACE will cause detailed trace
11 output to be sent to stderr for each mmx operation.
12 This adds lots of code, and obviously slows execution to
13 a crawl, but can be very useful for debugging.
15 THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY
16 EXPRESS OR IMPLIED WARRANTIES, INCLUDING, WITHOUT
17 LIMITATION, THE IMPLIED WARRANTIES OF MERCHANTABILITY
18 AND FITNESS FOR ANY PARTICULAR PURPOSE.
20 1997-99 by H. Dietz and R. Fisher
23 It appears that the latest gas has the pand problem fixed, therefore
24 I'll undefine BROKEN_PAND by default.
31 /* Warning: at this writing, the version of GAS packaged
32 with most Linux distributions does not handle the
33 parallel AND operation mnemonic correctly. If the
34 symbol BROKEN_PAND is defined, a slower alternative
35 coding will be used. If execution of mmxtest results
36 in an illegal instruction fault, define this symbol.
41 /* The type of an value that fits in an MMX register
42 (note that long long constant values MUST be suffixed
43 by LL and unsigned long long values by ULL, lest
44 they be truncated by the compiler)
47 long long q
; /* Quadword (64-bit) value */
48 unsigned long long uq
; /* Unsigned Quadword */
49 int d
[2]; /* 2 Doubleword (32-bit) values */
50 unsigned int ud
[2]; /* 2 Unsigned Doubleword */
51 short w
[4]; /* 4 Word (16-bit) values */
52 unsigned short uw
[4]; /* 4 Unsigned Word */
53 char b
[8]; /* 8 Byte (8-bit) values */
54 unsigned char ub
[8]; /* 8 Unsigned Byte */
55 float s
[2]; /* Single-precision (32-bit) value */
56 } __attribute__ ((aligned (8))) mmx_t
; /* On an 8-byte (64-bit) boundary */
59 /* Helper functions for the instruction macros that follow...
60 (note that memory-to-register, m2r, instructions are nearly
61 as efficient as register-to-register, r2r, instructions;
62 however, memory-to-memory instructions are really simulated
63 as a convenience, and are only 1/3 as efficient)
67 /* Include the stuff for printing a trace to stderr...
72 #define mmx_i2r(op, imm, reg) \
75 mmx_trace.uq = (imm); \
76 fprintf(stderr, #op "_i2r(" #imm "=0x%08x%08x, ", \
77 mmx_trace.d[1], mmx_trace.d[0]); \
78 __asm__ __volatile__ ("movq %%" #reg ", %0" \
81 fprintf(stderr, #reg "=0x%08x%08x) => ", \
82 mmx_trace.d[1], mmx_trace.d[0]); \
83 __asm__ __volatile__ (#op " %0, %%" #reg \
86 __asm__ __volatile__ ("movq %%" #reg ", %0" \
89 fprintf(stderr, #reg "=0x%08x%08x\n", \
90 mmx_trace.d[1], mmx_trace.d[0]); \
93 #define mmx_m2r(op, mem, reg) \
97 fprintf(stderr, #op "_m2r(" #mem "=0x%08x%08x, ", \
98 mmx_trace.d[1], mmx_trace.d[0]); \
99 __asm__ __volatile__ ("movq %%" #reg ", %0" \
102 fprintf(stderr, #reg "=0x%08x%08x) => ", \
103 mmx_trace.d[1], mmx_trace.d[0]); \
104 __asm__ __volatile__ (#op " %0, %%" #reg \
107 __asm__ __volatile__ ("movq %%" #reg ", %0" \
110 fprintf(stderr, #reg "=0x%08x%08x\n", \
111 mmx_trace.d[1], mmx_trace.d[0]); \
114 #define mmx_r2m(op, reg, mem) \
117 __asm__ __volatile__ ("movq %%" #reg ", %0" \
120 fprintf(stderr, #op "_r2m(" #reg "=0x%08x%08x, ", \
121 mmx_trace.d[1], mmx_trace.d[0]); \
123 fprintf(stderr, #mem "=0x%08x%08x) => ", \
124 mmx_trace.d[1], mmx_trace.d[0]); \
125 __asm__ __volatile__ (#op " %%" #reg ", %0" \
129 fprintf(stderr, #mem "=0x%08x%08x\n", \
130 mmx_trace.d[1], mmx_trace.d[0]); \
133 #define mmx_r2r(op, regs, regd) \
136 __asm__ __volatile__ ("movq %%" #regs ", %0" \
139 fprintf(stderr, #op "_r2r(" #regs "=0x%08x%08x, ", \
140 mmx_trace.d[1], mmx_trace.d[0]); \
141 __asm__ __volatile__ ("movq %%" #regd ", %0" \
144 fprintf(stderr, #regd "=0x%08x%08x) => ", \
145 mmx_trace.d[1], mmx_trace.d[0]); \
146 __asm__ __volatile__ (#op " %" #regs ", %" #regd); \
147 __asm__ __volatile__ ("movq %%" #regd ", %0" \
150 fprintf(stderr, #regd "=0x%08x%08x\n", \
151 mmx_trace.d[1], mmx_trace.d[0]); \
154 #define mmx_m2m(op, mems, memd) \
157 mmx_trace = (mems); \
158 fprintf(stderr, #op "_m2m(" #mems "=0x%08x%08x, ", \
159 mmx_trace.d[1], mmx_trace.d[0]); \
160 mmx_trace = (memd); \
161 fprintf(stderr, #memd "=0x%08x%08x) => ", \
162 mmx_trace.d[1], mmx_trace.d[0]); \
163 __asm__ __volatile__ ("movq %0, %%mm0\n\t" \
164 #op " %1, %%mm0\n\t" \
168 mmx_trace = (memd); \
169 fprintf(stderr, #memd "=0x%08x%08x\n", \
170 mmx_trace.d[1], mmx_trace.d[0]); \
175 /* These macros are a lot simpler without the tracing...
178 #define mmx_i2r(op, imm, reg) \
179 __asm__ __volatile__ (#op " %0, %%" #reg \
183 #define mmx_m2r(op, mem, reg) \
184 __asm__ __volatile__ (#op " %0, %%" #reg \
188 #define mmx_r2m(op, reg, mem) \
189 __asm__ __volatile__ (#op " %%" #reg ", %0" \
193 #define mmx_r2r(op, regs, regd) \
194 __asm__ __volatile__ (#op " %" #regs ", %" #regd)
196 #define mmx_m2m(op, mems, memd) \
197 __asm__ __volatile__ ("movq %0, %%mm0\n\t" \
198 #op " %1, %%mm0\n\t" \
206 /* 1x64 MOVe Quadword
207 (this is both a load and a store...
208 in fact, it is the only way to store)
210 #define movq_m2r(var, reg) mmx_m2r(movq, var, reg)
211 #define movq_r2m(reg, var) mmx_r2m(movq, reg, var)
212 #define movq_r2r(regs, regd) mmx_r2r(movq, regs, regd)
213 #define movq(vars, vard) \
214 __asm__ __volatile__ ("movq %1, %%mm0\n\t" \
220 /* 1x32 MOVe Doubleword
221 (like movq, this is both load and store...
222 but is most useful for moving things between
223 mmx registers and ordinary registers)
225 #define movd_m2r(var, reg) mmx_m2r(movd, var, reg)
226 #define movd_r2m(reg, var) mmx_r2m(movd, reg, var)
227 #define movd_r2r(regs, regd) mmx_r2r(movd, regs, regd)
228 #define movd(vars, vard) \
229 __asm__ __volatile__ ("movd %1, %%mm0\n\t" \
235 /* 2x32, 4x16, and 8x8 Parallel ADDs
237 #define paddd_m2r(var, reg) mmx_m2r(paddd, var, reg)
238 #define paddd_r2r(regs, regd) mmx_r2r(paddd, regs, regd)
239 #define paddd(vars, vard) mmx_m2m(paddd, vars, vard)
241 #define paddw_m2r(var, reg) mmx_m2r(paddw, var, reg)
242 #define paddw_r2r(regs, regd) mmx_r2r(paddw, regs, regd)
243 #define paddw(vars, vard) mmx_m2m(paddw, vars, vard)
245 #define paddb_m2r(var, reg) mmx_m2r(paddb, var, reg)
246 #define paddb_r2r(regs, regd) mmx_r2r(paddb, regs, regd)
247 #define paddb(vars, vard) mmx_m2m(paddb, vars, vard)
250 /* 4x16 and 8x8 Parallel ADDs using Saturation arithmetic
252 #define paddsw_m2r(var, reg) mmx_m2r(paddsw, var, reg)
253 #define paddsw_r2r(regs, regd) mmx_r2r(paddsw, regs, regd)
254 #define paddsw(vars, vard) mmx_m2m(paddsw, vars, vard)
256 #define paddsb_m2r(var, reg) mmx_m2r(paddsb, var, reg)
257 #define paddsb_r2r(regs, regd) mmx_r2r(paddsb, regs, regd)
258 #define paddsb(vars, vard) mmx_m2m(paddsb, vars, vard)
261 /* 4x16 and 8x8 Parallel ADDs using Unsigned Saturation arithmetic
263 #define paddusw_m2r(var, reg) mmx_m2r(paddusw, var, reg)
264 #define paddusw_r2r(regs, regd) mmx_r2r(paddusw, regs, regd)
265 #define paddusw(vars, vard) mmx_m2m(paddusw, vars, vard)
267 #define paddusb_m2r(var, reg) mmx_m2r(paddusb, var, reg)
268 #define paddusb_r2r(regs, regd) mmx_r2r(paddusb, regs, regd)
269 #define paddusb(vars, vard) mmx_m2m(paddusb, vars, vard)
272 /* 2x32, 4x16, and 8x8 Parallel SUBs
274 #define psubd_m2r(var, reg) mmx_m2r(psubd, var, reg)
275 #define psubd_r2r(regs, regd) mmx_r2r(psubd, regs, regd)
276 #define psubd(vars, vard) mmx_m2m(psubd, vars, vard)
278 #define psubw_m2r(var, reg) mmx_m2r(psubw, var, reg)
279 #define psubw_r2r(regs, regd) mmx_r2r(psubw, regs, regd)
280 #define psubw(vars, vard) mmx_m2m(psubw, vars, vard)
282 #define psubb_m2r(var, reg) mmx_m2r(psubb, var, reg)
283 #define psubb_r2r(regs, regd) mmx_r2r(psubb, regs, regd)
284 #define psubb(vars, vard) mmx_m2m(psubb, vars, vard)
287 /* 4x16 and 8x8 Parallel SUBs using Saturation arithmetic
289 #define psubsw_m2r(var, reg) mmx_m2r(psubsw, var, reg)
290 #define psubsw_r2r(regs, regd) mmx_r2r(psubsw, regs, regd)
291 #define psubsw(vars, vard) mmx_m2m(psubsw, vars, vard)
293 #define psubsb_m2r(var, reg) mmx_m2r(psubsb, var, reg)
294 #define psubsb_r2r(regs, regd) mmx_r2r(psubsb, regs, regd)
295 #define psubsb(vars, vard) mmx_m2m(psubsb, vars, vard)
298 /* 4x16 and 8x8 Parallel SUBs using Unsigned Saturation arithmetic
300 #define psubusw_m2r(var, reg) mmx_m2r(psubusw, var, reg)
301 #define psubusw_r2r(regs, regd) mmx_r2r(psubusw, regs, regd)
302 #define psubusw(vars, vard) mmx_m2m(psubusw, vars, vard)
304 #define psubusb_m2r(var, reg) mmx_m2r(psubusb, var, reg)
305 #define psubusb_r2r(regs, regd) mmx_r2r(psubusb, regs, regd)
306 #define psubusb(vars, vard) mmx_m2m(psubusb, vars, vard)
309 /* 4x16 Parallel MULs giving Low 4x16 portions of results
311 #define pmullw_m2r(var, reg) mmx_m2r(pmullw, var, reg)
312 #define pmullw_r2r(regs, regd) mmx_r2r(pmullw, regs, regd)
313 #define pmullw(vars, vard) mmx_m2m(pmullw, vars, vard)
316 /* 4x16 Parallel MULs giving High 4x16 portions of results
318 #define pmulhw_m2r(var, reg) mmx_m2r(pmulhw, var, reg)
319 #define pmulhw_r2r(regs, regd) mmx_r2r(pmulhw, regs, regd)
320 #define pmulhw(vars, vard) mmx_m2m(pmulhw, vars, vard)
323 /* 4x16->2x32 Parallel Mul-ADD
324 (muls like pmullw, then adds adjacent 16-bit fields
325 in the multiply result to make the final 2x32 result)
327 #define pmaddwd_m2r(var, reg) mmx_m2r(pmaddwd, var, reg)
328 #define pmaddwd_r2r(regs, regd) mmx_r2r(pmaddwd, regs, regd)
329 #define pmaddwd(vars, vard) mmx_m2m(pmaddwd, vars, vard)
335 #define pand_m2r(var, reg) \
337 mmx_m2r(pandn, (mmx_t) -1LL, reg); \
338 mmx_m2r(pandn, var, reg); \
340 #define pand_r2r(regs, regd) \
342 mmx_m2r(pandn, (mmx_t) -1LL, regd); \
343 mmx_r2r(pandn, regs, regd) \
345 #define pand(vars, vard) \
347 movq_m2r(vard, mm0); \
348 mmx_m2r(pandn, (mmx_t) -1LL, mm0); \
349 mmx_m2r(pandn, vars, mm0); \
350 movq_r2m(mm0, vard); \
353 #define pand_m2r(var, reg) mmx_m2r(pand, var, reg)
354 #define pand_r2r(regs, regd) mmx_r2r(pand, regs, regd)
355 #define pand(vars, vard) mmx_m2m(pand, vars, vard)
359 /* 1x64 bitwise AND with Not the destination
361 #define pandn_m2r(var, reg) mmx_m2r(pandn, var, reg)
362 #define pandn_r2r(regs, regd) mmx_r2r(pandn, regs, regd)
363 #define pandn(vars, vard) mmx_m2m(pandn, vars, vard)
368 #define por_m2r(var, reg) mmx_m2r(por, var, reg)
369 #define por_r2r(regs, regd) mmx_r2r(por, regs, regd)
370 #define por(vars, vard) mmx_m2m(por, vars, vard)
373 /* 1x64 bitwise eXclusive OR
375 #define pxor_m2r(var, reg) mmx_m2r(pxor, var, reg)
376 #define pxor_r2r(regs, regd) mmx_r2r(pxor, regs, regd)
377 #define pxor(vars, vard) mmx_m2m(pxor, vars, vard)
380 /* 2x32, 4x16, and 8x8 Parallel CoMPare for EQuality
381 (resulting fields are either 0 or -1)
383 #define pcmpeqd_m2r(var, reg) mmx_m2r(pcmpeqd, var, reg)
384 #define pcmpeqd_r2r(regs, regd) mmx_r2r(pcmpeqd, regs, regd)
385 #define pcmpeqd(vars, vard) mmx_m2m(pcmpeqd, vars, vard)
387 #define pcmpeqw_m2r(var, reg) mmx_m2r(pcmpeqw, var, reg)
388 #define pcmpeqw_r2r(regs, regd) mmx_r2r(pcmpeqw, regs, regd)
389 #define pcmpeqw(vars, vard) mmx_m2m(pcmpeqw, vars, vard)
391 #define pcmpeqb_m2r(var, reg) mmx_m2r(pcmpeqb, var, reg)
392 #define pcmpeqb_r2r(regs, regd) mmx_r2r(pcmpeqb, regs, regd)
393 #define pcmpeqb(vars, vard) mmx_m2m(pcmpeqb, vars, vard)
396 /* 2x32, 4x16, and 8x8 Parallel CoMPare for Greater Than
397 (resulting fields are either 0 or -1)
399 #define pcmpgtd_m2r(var, reg) mmx_m2r(pcmpgtd, var, reg)
400 #define pcmpgtd_r2r(regs, regd) mmx_r2r(pcmpgtd, regs, regd)
401 #define pcmpgtd(vars, vard) mmx_m2m(pcmpgtd, vars, vard)
403 #define pcmpgtw_m2r(var, reg) mmx_m2r(pcmpgtw, var, reg)
404 #define pcmpgtw_r2r(regs, regd) mmx_r2r(pcmpgtw, regs, regd)
405 #define pcmpgtw(vars, vard) mmx_m2m(pcmpgtw, vars, vard)
407 #define pcmpgtb_m2r(var, reg) mmx_m2r(pcmpgtb, var, reg)
408 #define pcmpgtb_r2r(regs, regd) mmx_r2r(pcmpgtb, regs, regd)
409 #define pcmpgtb(vars, vard) mmx_m2m(pcmpgtb, vars, vard)
412 /* 1x64, 2x32, and 4x16 Parallel Shift Left Logical
414 #define psllq_i2r(imm, reg) mmx_i2r(psllq, imm, reg)
415 #define psllq_m2r(var, reg) mmx_m2r(psllq, var, reg)
416 #define psllq_r2r(regs, regd) mmx_r2r(psllq, regs, regd)
417 #define psllq(vars, vard) mmx_m2m(psllq, vars, vard)
419 #define pslld_i2r(imm, reg) mmx_i2r(pslld, imm, reg)
420 #define pslld_m2r(var, reg) mmx_m2r(pslld, var, reg)
421 #define pslld_r2r(regs, regd) mmx_r2r(pslld, regs, regd)
422 #define pslld(vars, vard) mmx_m2m(pslld, vars, vard)
424 #define psllw_i2r(imm, reg) mmx_i2r(psllw, imm, reg)
425 #define psllw_m2r(var, reg) mmx_m2r(psllw, var, reg)
426 #define psllw_r2r(regs, regd) mmx_r2r(psllw, regs, regd)
427 #define psllw(vars, vard) mmx_m2m(psllw, vars, vard)
430 /* 1x64, 2x32, and 4x16 Parallel Shift Right Logical
432 #define psrlq_i2r(imm, reg) mmx_i2r(psrlq, imm, reg)
433 #define psrlq_m2r(var, reg) mmx_m2r(psrlq, var, reg)
434 #define psrlq_r2r(regs, regd) mmx_r2r(psrlq, regs, regd)
435 #define psrlq(vars, vard) mmx_m2m(psrlq, vars, vard)
437 #define psrld_i2r(imm, reg) mmx_i2r(psrld, imm, reg)
438 #define psrld_m2r(var, reg) mmx_m2r(psrld, var, reg)
439 #define psrld_r2r(regs, regd) mmx_r2r(psrld, regs, regd)
440 #define psrld(vars, vard) mmx_m2m(psrld, vars, vard)
442 #define psrlw_i2r(imm, reg) mmx_i2r(psrlw, imm, reg)
443 #define psrlw_m2r(var, reg) mmx_m2r(psrlw, var, reg)
444 #define psrlw_r2r(regs, regd) mmx_r2r(psrlw, regs, regd)
445 #define psrlw(vars, vard) mmx_m2m(psrlw, vars, vard)
448 /* 2x32 and 4x16 Parallel Shift Right Arithmetic
450 #define psrad_i2r(imm, reg) mmx_i2r(psrad, imm, reg)
451 #define psrad_m2r(var, reg) mmx_m2r(psrad, var, reg)
452 #define psrad_r2r(regs, regd) mmx_r2r(psrad, regs, regd)
453 #define psrad(vars, vard) mmx_m2m(psrad, vars, vard)
455 #define psraw_i2r(imm, reg) mmx_i2r(psraw, imm, reg)
456 #define psraw_m2r(var, reg) mmx_m2r(psraw, var, reg)
457 #define psraw_r2r(regs, regd) mmx_r2r(psraw, regs, regd)
458 #define psraw(vars, vard) mmx_m2m(psraw, vars, vard)
461 /* 2x32->4x16 and 4x16->8x8 PACK and Signed Saturate
462 (packs source and dest fields into dest in that order)
464 #define packssdw_m2r(var, reg) mmx_m2r(packssdw, var, reg)
465 #define packssdw_r2r(regs, regd) mmx_r2r(packssdw, regs, regd)
466 #define packssdw(vars, vard) mmx_m2m(packssdw, vars, vard)
468 #define packsswb_m2r(var, reg) mmx_m2r(packsswb, var, reg)
469 #define packsswb_r2r(regs, regd) mmx_r2r(packsswb, regs, regd)
470 #define packsswb(vars, vard) mmx_m2m(packsswb, vars, vard)
473 /* 4x16->8x8 PACK and Unsigned Saturate
474 (packs source and dest fields into dest in that order)
476 #define packuswb_m2r(var, reg) mmx_m2r(packuswb, var, reg)
477 #define packuswb_r2r(regs, regd) mmx_r2r(packuswb, regs, regd)
478 #define packuswb(vars, vard) mmx_m2m(packuswb, vars, vard)
481 /* 2x32->1x64, 4x16->2x32, and 8x8->4x16 UNPaCK Low
482 (interleaves low half of dest with low half of source
483 as padding in each result field)
485 #define punpckldq_m2r(var, reg) mmx_m2r(punpckldq, var, reg)
486 #define punpckldq_r2r(regs, regd) mmx_r2r(punpckldq, regs, regd)
487 #define punpckldq(vars, vard) mmx_m2m(punpckldq, vars, vard)
489 #define punpcklwd_m2r(var, reg) mmx_m2r(punpcklwd, var, reg)
490 #define punpcklwd_r2r(regs, regd) mmx_r2r(punpcklwd, regs, regd)
491 #define punpcklwd(vars, vard) mmx_m2m(punpcklwd, vars, vard)
493 #define punpcklbw_m2r(var, reg) mmx_m2r(punpcklbw, var, reg)
494 #define punpcklbw_r2r(regs, regd) mmx_r2r(punpcklbw, regs, regd)
495 #define punpcklbw(vars, vard) mmx_m2m(punpcklbw, vars, vard)
498 /* 2x32->1x64, 4x16->2x32, and 8x8->4x16 UNPaCK High
499 (interleaves high half of dest with high half of source
500 as padding in each result field)
502 #define punpckhdq_m2r(var, reg) mmx_m2r(punpckhdq, var, reg)
503 #define punpckhdq_r2r(regs, regd) mmx_r2r(punpckhdq, regs, regd)
504 #define punpckhdq(vars, vard) mmx_m2m(punpckhdq, vars, vard)
506 #define punpckhwd_m2r(var, reg) mmx_m2r(punpckhwd, var, reg)
507 #define punpckhwd_r2r(regs, regd) mmx_r2r(punpckhwd, regs, regd)
508 #define punpckhwd(vars, vard) mmx_m2m(punpckhwd, vars, vard)
510 #define punpckhbw_m2r(var, reg) mmx_m2r(punpckhbw, var, reg)
511 #define punpckhbw_r2r(regs, regd) mmx_r2r(punpckhbw, regs, regd)
512 #define punpckhbw(vars, vard) mmx_m2m(punpckhbw, vars, vard)
516 (used to clean-up when going from mmx to float use
517 of the registers that are shared by both; note that
518 there is no float-to-mmx operation needed, because
519 only the float tag word info is corruptible)
525 fprintf(stderr, "emms()\n"); \
526 __asm__ __volatile__ ("emms"); \
531 #define emms() __asm__ __volatile__ ("emms")