Home | History | Annotate | Download | only in test
      1 
      2 #include <stdio.h>
      3 #include <stdlib.h>
      4 
      5 #define HAVE_SSE2 1
      6 
      7 /* DO NOT COMPILE WITH -O/-O2/-O3 !  GENERATES INVALID ASSEMBLY. */
      8 
      9 
     10 /*   mmx.h
     11 
     12    MultiMedia eXtensions GCC interface library for IA32.
     13 
     14    To use this library, simply include this header file
     15    and compile with GCC.  You MUST have inlining enabled
     16    in order for mmx_ok() to work; this can be done by
     17    simply using -O on the GCC command line.
     18 
     19    Compiling with -DMMX_TRACE will cause detailed trace
     20    output to be sent to stderr for each mmx operation.
     21    This adds lots of code, and obviously slows execution to
     22    a crawl, but can be very useful for debugging.
     23 
     24    THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY
     25    EXPRESS OR IMPLIED WARRANTIES, INCLUDING, WITHOUT
     26    LIMITATION, THE IMPLIED WARRANTIES OF MERCHANTABILITY
     27    AND FITNESS FOR ANY PARTICULAR PURPOSE.
     28 
     29    June 11, 1998 by H. Dietz and R. Fisher
     30 */
     31 
     32 
     33 /*   The type of an value that fits in an MMX register
     34    (note that long long constant values MUST be suffixed
     35     by LL and unsigned long long values by ULL, lest
     36     they be truncated by the compiler)
     37 */
     38 typedef   union {
     39    long long            q;   /* Quadword (64-bit) value */
     40    unsigned long long   uq;   /* Unsigned Quadword */
     41    int                  d[2];   /* 2 Doubleword (32-bit) values */
     42    unsigned int         ud[2];   /* 2 Unsigned Doubleword */
     43    short                w[4];   /* 4 Word (16-bit) values */
     44    unsigned short       uw[4];   /* 4 Unsigned Word */
     45    char                 b[8];   /* 8 Byte (8-bit) values */
     46    unsigned char        ub[8];   /* 8 Unsigned Byte */
     47 } mmx_t;
     48 
     49 
     50 /*   Function to test if mmx instructions are supported...
     51 */
     52 inline extern int
     53 mmx_ok(void)
     54 {
     55    /* Returns 1 if mmx instructions are ok,
     56       0 if hardware does not support mmx
     57    */
     58    register int ok = 0;
     59 
     60    __asm__ __volatile__ (
     61       /* Get CPU version information */
     62       "movl $1, %%eax\n\t"
     63       "cpuid\n\t"
     64       "movl %%edx, %0"
     65       : "=a" (ok)
     66       : /* no input */
     67    );
     68    return((ok & 0x800000) == 0x800000);
     69 }
     70 
     71 
     72 /*   Helper functions for the instruction macros that follow...
     73    (note that memory-to-register, m2r, instructions are nearly
     74     as efficient as register-to-register, r2r, instructions;
     75     however, memory-to-memory instructions are really simulated
     76     as a convenience, and are only 1/3 as efficient)
     77 */
     78 #ifdef   MMX_TRACE
     79 
     80 /*   Include the stuff for printing a trace to stderr...
     81 */
     82 
     83 #include <stdio.h>
     84 
     85 #define   mmx_m2r(op, mem, reg) \
     86    { \
     87       mmx_t mmx_trace; \
     88       mmx_trace = (mem); \
     89       fprintf(stderr, #op "_m2r(" #mem "=0x%016llx, ", mmx_trace.q); \
     90       __asm__ __volatile__ ("movq %%" #reg ", %0" \
     91                   : "=X" (mmx_trace) \
     92                   : /* nothing */ ); \
     93       fprintf(stderr, #reg "=0x%016llx) => ", mmx_trace.q); \
     94       __asm__ __volatile__ (#op " %0, %%" #reg \
     95                   : /* nothing */ \
     96                   : "X" (mem)); \
     97       __asm__ __volatile__ ("movq %%" #reg ", %0" \
     98                   : "=X" (mmx_trace) \
     99                   : /* nothing */ ); \
    100       fprintf(stderr, #reg "=0x%016llx\n", mmx_trace.q); \
    101    }
    102 
    103 #define   mmx_r2m(op, reg, mem) \
    104    { \
    105       mmx_t mmx_trace; \
    106       __asm__ __volatile__ ("movq %%" #reg ", %0" \
    107                   : "=X" (mmx_trace) \
    108                   : /* nothing */ ); \
    109       fprintf(stderr, #op "_r2m(" #reg "=0x%016llx, ", mmx_trace.q); \
    110       mmx_trace = (mem); \
    111       fprintf(stderr, #mem "=0x%016llx) => ", mmx_trace.q); \
    112       __asm__ __volatile__ (#op " %%" #reg ", %0" \
    113                   : "=X" (mem) \
    114                   : /* nothing */ ); \
    115       mmx_trace = (mem); \
    116       fprintf(stderr, #mem "=0x%016llx\n", mmx_trace.q); \
    117    }
    118 
    119 #define   mmx_r2r(op, regs, regd) \
    120    { \
    121       mmx_t mmx_trace; \
    122       __asm__ __volatile__ ("movq %%" #regs ", %0" \
    123                   : "=X" (mmx_trace) \
    124                   : /* nothing */ ); \
    125       fprintf(stderr, #op "_r2r(" #regs "=0x%016llx, ", mmx_trace.q); \
    126       __asm__ __volatile__ ("movq %%" #regd ", %0" \
    127                   : "=X" (mmx_trace) \
    128                   : /* nothing */ ); \
    129       fprintf(stderr, #regd "=0x%016llx) => ", mmx_trace.q); \
    130       __asm__ __volatile__ (#op " %" #regs ", %" #regd); \
    131       __asm__ __volatile__ ("movq %%" #regd ", %0" \
    132                   : "=X" (mmx_trace) \
    133                   : /* nothing */ ); \
    134       fprintf(stderr, #regd "=0x%016llx\n", mmx_trace.q); \
    135    }
    136 
    137 #define   mmx_m2m(op, mems, memd) \
    138    { \
    139       mmx_t mmx_trace; \
    140       mmx_trace = (mems); \
    141       fprintf(stderr, #op "_m2m(" #mems "=0x%016llx, ", mmx_trace.q); \
    142       mmx_trace = (memd); \
    143       fprintf(stderr, #memd "=0x%016llx) => ", mmx_trace.q); \
    144       __asm__ __volatile__ ("movq %0, %%mm0\n\t" \
    145                   #op " %1, %%mm0\n\t" \
    146                   "movq %%mm0, %0" \
    147                   : "=X" (memd) \
    148                   : "X" (mems)); \
    149       mmx_trace = (memd); \
    150       fprintf(stderr, #memd "=0x%016llx\n", mmx_trace.q); \
    151    }
    152 
    153 #else
    154 
    155 /*   These macros are a lot simpler without the tracing...
    156 */
    157 
    158 #define   mmx_m2r(op, mem, reg) \
    159    __asm__ __volatile__ (#op " %0, %%" #reg \
    160                : /* nothing */ \
    161                : "X" (mem))
    162 
    163 #define   mmx_r2m(op, reg, mem) \
    164    __asm__ __volatile__ (#op " %%" #reg ", %0" \
    165                : "=X" (mem) \
    166                : /* nothing */ )
    167 
    168 #define   mmx_r2r(op, regs, regd) \
    169    __asm__ __volatile__ (#op " %" #regs ", %" #regd)
    170 
    171 #define   mmx_m2m(op, mems, memd) \
    172    __asm__ __volatile__ ("movq %0, %%mm0\n\t" \
    173                #op " %1, %%mm0\n\t" \
    174                "movq %%mm0, %0" \
    175                : "=X" (memd) \
    176                : "X" (mems))
    177 
    178 #endif
    179 
    180 
    181 /*   1x64 MOVe Quadword
    182    (this is both a load and a store...
    183     in fact, it is the only way to store)
    184 */
    185 #define   movq_m2r(var, reg)     mmx_m2r(movq, var, reg)
    186 #define   movq_r2m(reg, var)     mmx_r2m(movq, reg, var)
    187 #define   movq_r2r(regs, regd)   mmx_r2r(movq, regs, regd)
    188 #define   movq(vars, vard) \
    189    __asm__ __volatile__ ("movq %1, %%mm0\n\t" \
    190                "movq %%mm0, %0" \
    191                : "=X" (vard) \
    192                : "X" (vars))
    193 
    194 
    195 /*   1x64 MOVe Doubleword
    196    (like movq, this is both load and store...
    197     but is most useful for moving things between
    198     mmx registers and ordinary registers)
    199 */
    200 #define   movd_m2r(var, reg)     mmx_m2r(movd, var, reg)
    201 #define   movd_r2m(reg, var)     mmx_r2m(movd, reg, var)
    202 #define   movd_r2r(regs, regd)   mmx_r2r(movd, regs, regd)
    203 #define   movd(vars, vard) \
    204    __asm__ __volatile__ ("movd %1, %%mm0\n\t" \
    205                "movd %%mm0, %0" \
    206                : "=X" (vard) \
    207                : "X" (vars))
    208 
    209 
    210 /*   2x32, 4x16, and 8x8 Parallel ADDs
    211 */
    212 #define   paddd_m2r(var, reg)     mmx_m2r(paddd, var, reg)
    213 #define   paddd_r2r(regs, regd)   mmx_r2r(paddd, regs, regd)
    214 #define   paddd(vars, vard)       mmx_m2m(paddd, vars, vard)
    215 
    216 #define   paddw_m2r(var, reg)     mmx_m2r(paddw, var, reg)
    217 #define   paddw_r2r(regs, regd)   mmx_r2r(paddw, regs, regd)
    218 #define   paddw(vars, vard)       mmx_m2m(paddw, vars, vard)
    219 
    220 #define   paddb_m2r(var, reg)     mmx_m2r(paddb, var, reg)
    221 #define   paddb_r2r(regs, regd)   mmx_r2r(paddb, regs, regd)
    222 #define   paddb(vars, vard)       mmx_m2m(paddb, vars, vard)
    223 
    224 
    225 /*   4x16 and 8x8 Parallel ADDs using Saturation arithmetic
    226 */
    227 #define   paddsw_m2r(var, reg)     mmx_m2r(paddsw, var, reg)
    228 #define   paddsw_r2r(regs, regd)   mmx_r2r(paddsw, regs, regd)
    229 #define   paddsw(vars, vard)       mmx_m2m(paddsw, vars, vard)
    230 
    231 #define   paddsb_m2r(var, reg)     mmx_m2r(paddsb, var, reg)
    232 #define   paddsb_r2r(regs, regd)   mmx_r2r(paddsb, regs, regd)
    233 #define   paddsb(vars, vard)       mmx_m2m(paddsb, vars, vard)
    234 
    235 
    236 /*   4x16 and 8x8 Parallel ADDs using Unsigned Saturation arithmetic
    237 */
    238 #define   paddusw_m2r(var, reg)     mmx_m2r(paddusw, var, reg)
    239 #define   paddusw_r2r(regs, regd)   mmx_r2r(paddusw, regs, regd)
    240 #define   paddusw(vars, vard)       mmx_m2m(paddusw, vars, vard)
    241 
    242 #define   paddusb_m2r(var, reg)     mmx_m2r(paddusb, var, reg)
    243 #define   paddusb_r2r(regs, regd)   mmx_r2r(paddusb, regs, regd)
    244 #define   paddusb(vars, vard)       mmx_m2m(paddusb, vars, vard)
    245 
    246 
    247 /*   2x32, 4x16, and 8x8 Parallel SUBs
    248 */
    249 #define   psubd_m2r(var, reg)     mmx_m2r(psubd, var, reg)
    250 #define   psubd_r2r(regs, regd)   mmx_r2r(psubd, regs, regd)
    251 #define   psubd(vars, vard)       mmx_m2m(psubd, vars, vard)
    252 
    253 #define   psubw_m2r(var, reg)     mmx_m2r(psubw, var, reg)
    254 #define   psubw_r2r(regs, regd)   mmx_r2r(psubw, regs, regd)
    255 #define   psubw(vars, vard)       mmx_m2m(psubw, vars, vard)
    256 
    257 #define   psubb_m2r(var, reg)     mmx_m2r(psubb, var, reg)
    258 #define   psubb_r2r(regs, regd)   mmx_r2r(psubb, regs, regd)
    259 #define   psubb(vars, vard)       mmx_m2m(psubb, vars, vard)
    260 
    261 
    262 /*   4x16 and 8x8 Parallel SUBs using Saturation arithmetic
    263 */
    264 #define   psubsw_m2r(var, reg)     mmx_m2r(psubsw, var, reg)
    265 #define   psubsw_r2r(regs, regd)   mmx_r2r(psubsw, regs, regd)
    266 #define   psubsw(vars, vard)       mmx_m2m(psubsw, vars, vard)
    267 
    268 #define   psubsb_m2r(var, reg)     mmx_m2r(psubsb, var, reg)
    269 #define   psubsb_r2r(regs, regd)   mmx_r2r(psubsb, regs, regd)
    270 #define   psubsb(vars, vard)       mmx_m2m(psubsb, vars, vard)
    271 
    272 
    273 /*   4x16 and 8x8 Parallel SUBs using Unsigned Saturation arithmetic
    274 */
    275 #define   psubusw_m2r(var, reg)     mmx_m2r(psubusw, var, reg)
    276 #define   psubusw_r2r(regs, regd)   mmx_r2r(psubusw, regs, regd)
    277 #define   psubusw(vars, vard)       mmx_m2m(psubusw, vars, vard)
    278 
    279 #define   psubusb_m2r(var, reg)     mmx_m2r(psubusb, var, reg)
    280 #define   psubusb_r2r(regs, regd)   mmx_r2r(psubusb, regs, regd)
    281 #define   psubusb(vars, vard)       mmx_m2m(psubusb, vars, vard)
    282 
    283 
    284 /*   4x16 Parallel MULs giving Low 4x16 portions of results
    285 */
    286 #define   pmullw_m2r(var, reg)     mmx_m2r(pmullw, var, reg)
    287 #define   pmullw_r2r(regs, regd)   mmx_r2r(pmullw, regs, regd)
    288 #define   pmullw(vars, vard)       mmx_m2m(pmullw, vars, vard)
    289 
    290 
    291 /*   4x16 Parallel MULs giving High 4x16 portions of results
    292 */
    293 #define   pmulhw_m2r(var, reg)     mmx_m2r(pmulhw, var, reg)
    294 #define   pmulhw_r2r(regs, regd)   mmx_r2r(pmulhw, regs, regd)
    295 #define   pmulhw(vars, vard)       mmx_m2m(pmulhw, vars, vard)
    296 
    297 
    298 /*   4x16->2x32 Parallel Mul-ADD
    299    (muls like pmullw, then adds adjacent 16-bit fields
    300     in the multiply result to make the final 2x32 result)
    301 */
    302 #define   pmaddwd_m2r(var, reg)     mmx_m2r(pmaddwd, var, reg)
    303 #define   pmaddwd_r2r(regs, regd)   mmx_r2r(pmaddwd, regs, regd)
    304 #define   pmaddwd(vars, vard)       mmx_m2m(pmaddwd, vars, vard)
    305 
    306 
    307 /*   1x64 bitwise AND
    308 */
    309 #define   pand_m2r(var, reg)     mmx_m2r(pand, var, reg)
    310 #define   pand_r2r(regs, regd)   mmx_r2r(pand, regs, regd)
    311 #define   pand(vars, vard)       mmx_m2m(pand, vars, vard)
    312 
    313 
    314 /*   1x64 bitwise AND with Not the destination
    315 */
    316 #define   pandn_m2r(var, reg)     mmx_m2r(pandn, var, reg)
    317 #define   pandn_r2r(regs, regd)   mmx_r2r(pandn, regs, regd)
    318 #define   pandn(vars, vard)       mmx_m2m(pandn, vars, vard)
    319 
    320 
    321 /*   1x64 bitwise OR
    322 */
    323 #define   por_m2r(var, reg)     mmx_m2r(por, var, reg)
    324 #define   por_r2r(regs, regd)   mmx_r2r(por, regs, regd)
    325 #define   por(vars, vard)       mmx_m2m(por, vars, vard)
    326 
    327 
    328 /*   1x64 bitwise eXclusive OR
    329 */
    330 #define   pxor_m2r(var, reg)     mmx_m2r(pxor, var, reg)
    331 #define   pxor_r2r(regs, regd)   mmx_r2r(pxor, regs, regd)
    332 #define   pxor(vars, vard)       mmx_m2m(pxor, vars, vard)
    333 
    334 
    335 /*   2x32, 4x16, and 8x8 Parallel CoMPare for EQuality
    336    (resulting fields are either 0 or -1)
    337 */
    338 #define   pcmpeqd_m2r(var, reg)     mmx_m2r(pcmpeqd, var, reg)
    339 #define   pcmpeqd_r2r(regs, regd)   mmx_r2r(pcmpeqd, regs, regd)
    340 #define   pcmpeqd(vars, vard)       mmx_m2m(pcmpeqd, vars, vard)
    341 
    342 #define   pcmpeqw_m2r(var, reg)     mmx_m2r(pcmpeqw, var, reg)
    343 #define   pcmpeqw_r2r(regs, regd)   mmx_r2r(pcmpeqw, regs, regd)
    344 #define   pcmpeqw(vars, vard)       mmx_m2m(pcmpeqw, vars, vard)
    345 
    346 #define   pcmpeqb_m2r(var, reg)     mmx_m2r(pcmpeqb, var, reg)
    347 #define   pcmpeqb_r2r(regs, regd)   mmx_r2r(pcmpeqb, regs, regd)
    348 #define   pcmpeqb(vars, vard)       mmx_m2m(pcmpeqb, vars, vard)
    349 
    350 
    351 /*   2x32, 4x16, and 8x8 Parallel CoMPare for Greater Than
    352    (resulting fields are either 0 or -1)
    353 */
    354 #define   pcmpgtd_m2r(var, reg)   mmx_m2r(pcmpgtd, var, reg)
    355 #define   pcmpgtd_r2r(regs, regd)   mmx_r2r(pcmpgtd, regs, regd)
    356 #define   pcmpgtd(vars, vard)   mmx_m2m(pcmpgtd, vars, vard)
    357 
    358 #define   pcmpgtw_m2r(var, reg)   mmx_m2r(pcmpgtw, var, reg)
    359 #define   pcmpgtw_r2r(regs, regd)   mmx_r2r(pcmpgtw, regs, regd)
    360 #define   pcmpgtw(vars, vard)   mmx_m2m(pcmpgtw, vars, vard)
    361 
    362 #define   pcmpgtb_m2r(var, reg)   mmx_m2r(pcmpgtb, var, reg)
    363 #define   pcmpgtb_r2r(regs, regd)   mmx_r2r(pcmpgtb, regs, regd)
    364 #define   pcmpgtb(vars, vard)   mmx_m2m(pcmpgtb, vars, vard)
    365 
    366 
    367 /*   1x64, 2x32, and 4x16 Parallel Shift Left Logical
    368 */
    369 #define   psllq_m2r(var, reg)   mmx_m2r(psllq, var, reg)
    370 #define   psllq_r2r(regs, regd)   mmx_r2r(psllq, regs, regd)
    371 #define   psllq(vars, vard)   mmx_m2m(psllq, vars, vard)
    372 
    373 #define   pslld_m2r(var, reg)   mmx_m2r(pslld, var, reg)
    374 #define   pslld_r2r(regs, regd)   mmx_r2r(pslld, regs, regd)
    375 #define   pslld(vars, vard)   mmx_m2m(pslld, vars, vard)
    376 
    377 #define   psllw_m2r(var, reg)   mmx_m2r(psllw, var, reg)
    378 #define   psllw_r2r(regs, regd)   mmx_r2r(psllw, regs, regd)
    379 #define   psllw(vars, vard)   mmx_m2m(psllw, vars, vard)
    380 
    381 
    382 /*   1x64, 2x32, and 4x16 Parallel Shift Right Logical
    383 */
    384 #define   psrlq_m2r(var, reg)   mmx_m2r(psrlq, var, reg)
    385 #define   psrlq_r2r(regs, regd)   mmx_r2r(psrlq, regs, regd)
    386 #define   psrlq(vars, vard)   mmx_m2m(psrlq, vars, vard)
    387 
    388 #define   psrld_m2r(var, reg)   mmx_m2r(psrld, var, reg)
    389 #define   psrld_r2r(regs, regd)   mmx_r2r(psrld, regs, regd)
    390 #define   psrld(vars, vard)   mmx_m2m(psrld, vars, vard)
    391 
    392 #define   psrlw_m2r(var, reg)   mmx_m2r(psrlw, var, reg)
    393 #define   psrlw_r2r(regs, regd)   mmx_r2r(psrlw, regs, regd)
    394 #define   psrlw(vars, vard)   mmx_m2m(psrlw, vars, vard)
    395 
    396 
    397 /*   2x32 and 4x16 Parallel Shift Right Arithmetic
    398 */
    399 #define   psrad_m2r(var, reg)   mmx_m2r(psrad, var, reg)
    400 #define   psrad_r2r(regs, regd)   mmx_r2r(psrad, regs, regd)
    401 #define   psrad(vars, vard)   mmx_m2m(psrad, vars, vard)
    402 
    403 #define   psraw_m2r(var, reg)   mmx_m2r(psraw, var, reg)
    404 #define   psraw_r2r(regs, regd)   mmx_r2r(psraw, regs, regd)
    405 #define   psraw(vars, vard)   mmx_m2m(psraw, vars, vard)
    406 
    407 
    408 /*   2x32->4x16 and 4x16->8x8 PACK and Signed Saturate
    409    (packs source and dest fields into dest in that order)
    410 */
    411 #define   packssdw_m2r(var, reg)   mmx_m2r(packssdw, var, reg)
    412 #define   packssdw_r2r(regs, regd) mmx_r2r(packssdw, regs, regd)
    413 #define   packssdw(vars, vard)   mmx_m2m(packssdw, vars, vard)
    414 
    415 #define   packsswb_m2r(var, reg)   mmx_m2r(packsswb, var, reg)
    416 #define   packsswb_r2r(regs, regd) mmx_r2r(packsswb, regs, regd)
    417 #define   packsswb(vars, vard)   mmx_m2m(packsswb, vars, vard)
    418 
    419 
    420 /*   4x16->8x8 PACK and Unsigned Saturate
    421    (packs source and dest fields into dest in that order)
    422 */
    423 #define   packuswb_m2r(var, reg)   mmx_m2r(packuswb, var, reg)
    424 #define   packuswb_r2r(regs, regd) mmx_r2r(packuswb, regs, regd)
    425 #define   packuswb(vars, vard)   mmx_m2m(packuswb, vars, vard)
    426 
    427 
    428 /*   2x32->1x64, 4x16->2x32, and 8x8->4x16 UNPaCK Low
    429    (interleaves low half of dest with low half of source
    430     as padding in each result field)
    431 */
    432 #define   punpckldq_m2r(var, reg)   mmx_m2r(punpckldq, var, reg)
    433 #define   punpckldq_r2r(regs, regd) mmx_r2r(punpckldq, regs, regd)
    434 #define   punpckldq(vars, vard)   mmx_m2m(punpckldq, vars, vard)
    435 
    436 #define   punpcklwd_m2r(var, reg)   mmx_m2r(punpcklwd, var, reg)
    437 #define   punpcklwd_r2r(regs, regd) mmx_r2r(punpcklwd, regs, regd)
    438 #define   punpcklwd(vars, vard)   mmx_m2m(punpcklwd, vars, vard)
    439 
    440 #define   punpcklbw_m2r(var, reg)   mmx_m2r(punpcklbw, var, reg)
    441 #define   punpcklbw_r2r(regs, regd) mmx_r2r(punpcklbw, regs, regd)
    442 #define   punpcklbw(vars, vard)   mmx_m2m(punpcklbw, vars, vard)
    443 
    444 
    445 /*   2x32->1x64, 4x16->2x32, and 8x8->4x16 UNPaCK High
    446    (interleaves high half of dest with high half of source
    447     as padding in each result field)
    448 */
    449 #define   punpckhdq_m2r(var, reg)   mmx_m2r(punpckhdq, var, reg)
    450 #define   punpckhdq_r2r(regs, regd) mmx_r2r(punpckhdq, regs, regd)
    451 #define   punpckhdq(vars, vard)   mmx_m2m(punpckhdq, vars, vard)
    452 
    453 #define   punpckhwd_m2r(var, reg)   mmx_m2r(punpckhwd, var, reg)
    454 #define   punpckhwd_r2r(regs, regd) mmx_r2r(punpckhwd, regs, regd)
    455 #define   punpckhwd(vars, vard)   mmx_m2m(punpckhwd, vars, vard)
    456 
    457 #define   punpckhbw_m2r(var, reg)   mmx_m2r(punpckhbw, var, reg)
    458 #define   punpckhbw_r2r(regs, regd) mmx_r2r(punpckhbw, regs, regd)
    459 #define   punpckhbw(vars, vard)   mmx_m2m(punpckhbw, vars, vard)
    460 
    461 
    462 /* 1x64 add/sub -- this is in sse2, not in mmx. */
    463 #define   paddq_m2r(var, reg)     mmx_m2r(paddq, var, reg)
    464 #define   paddq_r2r(regs, regd)   mmx_r2r(paddq, regs, regd)
    465 #define   paddq(vars, vard)       mmx_m2m(paddq, vars, vard)
    466 
    467 #define   psubq_m2r(var, reg)     mmx_m2r(psubq, var, reg)
    468 #define   psubq_r2r(regs, regd)   mmx_r2r(psubq, regs, regd)
    469 #define   psubq(vars, vard)       mmx_m2m(psubq, vars, vard)
    470 
    471 
    472 
    473 /*   Empty MMx State
    474    (used to clean-up when going from mmx to float use
    475     of the registers that are shared by both; note that
    476     there is no float-to-mmx operation needed, because
    477     only the float tag word info is corruptible)
    478 */
    479 #ifdef   MMX_TRACE
    480 
    481 #define   emms() \
    482    { \
    483       fprintf(stderr, "emms()\n"); \
    484       __asm__ __volatile__ ("emms"); \
    485    }
    486 
    487 #else
    488 
    489 #define   emms()         __asm__ __volatile__ ("emms")
    490 
    491 #endif
    492 
    493 void mkRand( mmx_t* mm )
    494 {
    495   mm->uw[0] = 0xFFFF & (random() >> 7);
    496   mm->uw[1] = 0xFFFF & (random() >> 7);
    497   mm->uw[2] = 0xFFFF & (random() >> 7);
    498   mm->uw[3] = 0xFFFF & (random() >> 7);
    499 }
    500 
    501 
    502 
    503 int main( void )
    504 {
    505   int i;
    506   //   int rval;
    507    mmx_t ma;
    508    mmx_t mb;
    509    mmx_t ma0, mb0;
    510    movq_r2r(mm0, mm1);
    511 
    512 //   rval = mmx_ok();
    513 
    514    /* Announce return value of mmx_ok() */
    515 //   printf("Value returned from init was %x.", rval);
    516 //   printf(" (Indicates MMX %s available)\n\n",(rval)? "is" : "not");
    517 //   fflush(stdout); fflush(stdout);
    518 
    519 //   if(rval)
    520 
    521 #define do_test(_name, _operation) \
    522    for (i = 0; i < 25000; i++) {                                 \
    523       mkRand(&ma);                                               \
    524       mkRand(&mb);                                               \
    525       ma0 = ma; mb0 = mb;                                        \
    526       _operation;                                                \
    527       fprintf(stdout, "%s ( %016llx, %016llx ) -> %016llx\n",    \
    528                      _name, ma0.q, mb0.q, mb.q);                 \
    529       fflush(stdout);                                            \
    530    }
    531 
    532 
    533    {
    534      do_test("paddd", paddd(ma,mb));
    535      do_test("paddw", paddw(ma,mb));
    536      do_test("paddb", paddb(ma,mb));
    537 
    538      do_test("paddsw", paddsw(ma,mb));
    539      do_test("paddsb", paddsb(ma,mb));
    540 
    541      do_test("paddusw", paddusw(ma,mb));
    542      do_test("paddusb", paddusb(ma,mb));
    543 
    544      do_test("psubd", psubd(ma,mb));
    545      do_test("psubw", psubw(ma,mb));
    546      do_test("psubb", psubb(ma,mb));
    547 
    548      do_test("psubsw", psubsw(ma,mb));
    549      do_test("psubsb", psubsb(ma,mb));
    550 
    551      do_test("psubusw", psubusw(ma,mb));
    552      do_test("psubusb", psubusb(ma,mb));
    553 
    554      do_test("pmulhw", pmulhw(ma,mb));
    555      do_test("pmullw", pmullw(ma,mb));
    556 
    557      do_test("pmaddwd", pmaddwd(ma,mb));
    558 
    559      do_test("pcmpeqd", pcmpeqd(ma,mb));
    560      do_test("pcmpeqw", pcmpeqw(ma,mb));
    561      do_test("pcmpeqb", pcmpeqb(ma,mb));
    562 
    563      do_test("pcmpgtd", pcmpgtd(ma,mb));
    564      do_test("pcmpgtw", pcmpgtw(ma,mb));
    565      do_test("pcmpgtb", pcmpgtb(ma,mb));
    566 
    567      do_test("packssdw", packssdw(ma,mb));
    568      do_test("packsswb", packsswb(ma,mb));
    569      do_test("packuswb", packuswb(ma,mb));
    570 
    571      do_test("punpckhdq", punpckhdq(ma,mb));
    572      do_test("punpckhwd", punpckhwd(ma,mb));
    573      do_test("punpckhbw", punpckhbw(ma,mb));
    574 
    575      do_test("punpckldq", punpckldq(ma,mb));
    576      do_test("punpcklwd", punpcklwd(ma,mb));
    577      do_test("punpcklbw", punpcklbw(ma,mb));
    578 
    579      do_test("pand", pand(ma,mb));
    580      do_test("pandn", pandn(ma,mb));
    581      do_test("por", por(ma,mb));
    582      do_test("pxor", pxor(ma,mb));
    583 
    584      do_test("psllq", psllq(ma,mb));
    585      do_test("pslld", pslld(ma,mb));
    586      do_test("psllw", psllw(ma,mb));
    587 
    588      do_test("psrlq", psrlq(ma,mb));
    589      do_test("psrld", psrld(ma,mb));
    590      do_test("psrlw", psrlw(ma,mb));
    591 
    592      do_test("psrad", psrad(ma,mb));
    593      do_test("psraw", psraw(ma,mb));
    594 
    595 #if HAVE_SSE2
    596      do_test("paddq", paddq(ma,mb));
    597      do_test("psubq", psubq(ma,mb));
    598 #endif
    599 
    600      emms();
    601    }
    602 
    603    /* Clean-up and exit nicely */
    604    exit(0);
    605 }
    606