Home | History | Annotate | Download | only in priv
      1 
      2 /*---------------------------------------------------------------*/
      3 /*--- begin                                guest_amd64_defs.h ---*/
      4 /*---------------------------------------------------------------*/
      5 
      6 /*
      7    This file is part of Valgrind, a dynamic binary instrumentation
      8    framework.
      9 
     10    Copyright (C) 2004-2012 OpenWorks LLP
     11       info (at) open-works.net
     12 
     13    This program is free software; you can redistribute it and/or
     14    modify it under the terms of the GNU General Public License as
     15    published by the Free Software Foundation; either version 2 of the
     16    License, or (at your option) any later version.
     17 
     18    This program is distributed in the hope that it will be useful, but
     19    WITHOUT ANY WARRANTY; without even the implied warranty of
     20    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     21    General Public License for more details.
     22 
     23    You should have received a copy of the GNU General Public License
     24    along with this program; if not, write to the Free Software
     25    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
     26    02110-1301, USA.
     27 
     28    The GNU General Public License is contained in the file COPYING.
     29 
     30    Neither the names of the U.S. Department of Energy nor the
     31    University of California nor the names of its contributors may be
     32    used to endorse or promote products derived from this software
     33    without prior written permission.
     34 */
     35 
     36 /* Only to be used within the guest-amd64 directory. */
     37 
     38 #ifndef __VEX_GUEST_AMD64_DEFS_H
     39 #define __VEX_GUEST_AMD64_DEFS_H
     40 
     41 
     42 /*---------------------------------------------------------*/
     43 /*--- amd64 to IR conversion                            ---*/
     44 /*---------------------------------------------------------*/
     45 
     46 /* Convert one amd64 insn to IR.  See the type DisOneInstrFn in
     47    bb_to_IR.h. */
     48 extern
     49 DisResult disInstr_AMD64 ( IRSB*        irbb,
     50                            Bool         (*resteerOkFn) ( void*, Addr64 ),
     51                            Bool         resteerCisOk,
     52                            void*        callback_opaque,
     53                            UChar*       guest_code,
     54                            Long         delta,
     55                            Addr64       guest_IP,
     56                            VexArch      guest_arch,
     57                            VexArchInfo* archinfo,
     58                            VexAbiInfo*  abiinfo,
     59                            Bool         host_bigendian );
     60 
     61 /* Used by the optimiser to specialise calls to helpers. */
     62 extern
     63 IRExpr* guest_amd64_spechelper ( HChar*   function_name,
     64                                  IRExpr** args,
     65                                  IRStmt** precedingStmts,
     66                                  Int      n_precedingStmts );
     67 
     68 /* Describes to the optimiser which part of the guest state require
     69    precise memory exceptions.  This is logically part of the guest
     70    state description. */
     71 extern
     72 Bool guest_amd64_state_requires_precise_mem_exns ( Int, Int );
     73 
     74 extern
     75 VexGuestLayout amd64guest_layout;
     76 
     77 
     78 /*---------------------------------------------------------*/
     79 /*--- amd64 guest helpers                               ---*/
     80 /*---------------------------------------------------------*/
     81 
     82 /* --- CLEAN HELPERS --- */
     83 
     84 extern ULong amd64g_calculate_rflags_all (
     85                 ULong cc_op,
     86                 ULong cc_dep1, ULong cc_dep2, ULong cc_ndep
     87              );
     88 
     89 extern ULong amd64g_calculate_rflags_c (
     90                 ULong cc_op,
     91                 ULong cc_dep1, ULong cc_dep2, ULong cc_ndep
     92              );
     93 
     94 extern ULong amd64g_calculate_condition (
     95                 ULong/*AMD64Condcode*/ cond,
     96                 ULong cc_op,
     97                 ULong cc_dep1, ULong cc_dep2, ULong cc_ndep
     98              );
     99 
    100 extern ULong amd64g_calculate_FXAM ( ULong tag, ULong dbl );
    101 
    102 extern ULong amd64g_calculate_RCR  (
    103                 ULong arg, ULong rot_amt, ULong rflags_in, Long sz
    104              );
    105 
    106 extern ULong amd64g_calculate_RCL  (
    107                 ULong arg, ULong rot_amt, ULong rflags_in, Long sz
    108              );
    109 
    110 extern ULong amd64g_calculate_pclmul(ULong s1, ULong s2, ULong which);
    111 
    112 extern ULong amd64g_check_fldcw ( ULong fpucw );
    113 
    114 extern ULong amd64g_create_fpucw ( ULong fpround );
    115 
    116 extern ULong amd64g_check_ldmxcsr ( ULong mxcsr );
    117 
    118 extern ULong amd64g_create_mxcsr ( ULong sseround );
    119 
    120 extern VexEmWarn amd64g_dirtyhelper_FLDENV  ( VexGuestAMD64State*, HWord );
    121 extern VexEmWarn amd64g_dirtyhelper_FRSTOR  ( VexGuestAMD64State*, HWord );
    122 extern VexEmWarn amd64g_dirtyhelper_FRSTORS ( VexGuestAMD64State*, HWord );
    123 
    124 extern void amd64g_dirtyhelper_FSTENV  ( VexGuestAMD64State*, HWord );
    125 extern void amd64g_dirtyhelper_FNSAVE  ( VexGuestAMD64State*, HWord );
    126 extern void amd64g_dirtyhelper_FNSAVES ( VexGuestAMD64State*, HWord );
    127 
    128 /* Translate a guest virtual_addr into a guest linear address by
    129    consulting the supplied LDT/GDT structures.  Their representation
    130    must be as specified in pub/libvex_guest_amd64.h.  To indicate a
    131    translation failure, 1<<32 is returned.  On success, the lower 32
    132    bits of the returned result indicate the linear address.
    133 */
    134 //extern
    135 //ULong amd64g_use_seg_selector ( HWord ldt, HWord gdt,
    136 //                              UInt seg_selector, UInt virtual_addr );
    137 
    138 extern ULong amd64g_calculate_mmx_pmaddwd  ( ULong, ULong );
    139 extern ULong amd64g_calculate_mmx_psadbw   ( ULong, ULong );
    140 extern ULong amd64g_calculate_mmx_pmovmskb ( ULong );
    141 extern ULong amd64g_calculate_sse_pmovmskb ( ULong w64hi, ULong w64lo );
    142 
    143 extern ULong amd64g_calculate_sse_phminposuw ( ULong sLo, ULong sHi );
    144 
    145 extern ULong amd64g_calc_crc32b ( ULong crcIn, ULong b );
    146 extern ULong amd64g_calc_crc32w ( ULong crcIn, ULong w );
    147 extern ULong amd64g_calc_crc32l ( ULong crcIn, ULong l );
    148 extern ULong amd64g_calc_crc32q ( ULong crcIn, ULong q );
    149 
    150 extern ULong amd64g_calc_mpsadbw ( ULong sHi, ULong sLo,
    151                                    ULong dHi, ULong dLo,
    152                                    ULong imm_and_return_control_bit );
    153 
    154 /* --- DIRTY HELPERS --- */
    155 
    156 extern ULong amd64g_dirtyhelper_loadF80le  ( ULong/*addr*/ );
    157 
    158 extern void  amd64g_dirtyhelper_storeF80le ( ULong/*addr*/, ULong/*data*/ );
    159 
    160 extern void  amd64g_dirtyhelper_CPUID_baseline ( VexGuestAMD64State* st );
    161 extern void  amd64g_dirtyhelper_CPUID_sse3_and_cx16 ( VexGuestAMD64State* st );
    162 extern void  amd64g_dirtyhelper_CPUID_sse42_and_cx16 ( VexGuestAMD64State* st );
    163 extern void  amd64g_dirtyhelper_CPUID_avx_and_cx16 ( VexGuestAMD64State* st );
    164 
    165 extern void  amd64g_dirtyhelper_FINIT ( VexGuestAMD64State* );
    166 
    167 extern void      amd64g_dirtyhelper_FXSAVE  ( VexGuestAMD64State*, HWord );
    168 extern VexEmWarn amd64g_dirtyhelper_FXRSTOR ( VexGuestAMD64State*, HWord );
    169 
    170 extern ULong amd64g_dirtyhelper_RDTSC ( void );
    171 
    172 extern ULong amd64g_dirtyhelper_IN  ( ULong portno, ULong sz/*1,2 or 4*/ );
    173 extern void  amd64g_dirtyhelper_OUT ( ULong portno, ULong data,
    174                                       ULong sz/*1,2 or 4*/ );
    175 
    176 extern void amd64g_dirtyhelper_SxDT ( void* address,
    177                                       ULong op /* 0 or 1 */ );
    178 
    179 /* Helps with PCMP{I,E}STR{I,M}.
    180 
    181    CALLED FROM GENERATED CODE: DIRTY HELPER(s).  (But not really,
    182    actually it could be a clean helper, but for the fact that we can't
    183    pass by value 2 x V128 to a clean helper, nor have one returned.)
    184    Reads guest state, writes to guest state for the xSTRM cases, no
    185    accesses of memory, is a pure function.
    186 
    187    opc_and_imm contains (4th byte of opcode << 8) | the-imm8-byte so
    188    the callee knows which I/E and I/M variant it is dealing with and
    189    what the specific operation is.  4th byte of opcode is in the range
    190    0x60 to 0x63:
    191        istri  66 0F 3A 63
    192        istrm  66 0F 3A 62
    193        estri  66 0F 3A 61
    194        estrm  66 0F 3A 60
    195 
    196    gstOffL and gstOffR are the guest state offsets for the two XMM
    197    register inputs.  We never have to deal with the memory case since
    198    that is handled by pre-loading the relevant value into the fake
    199    XMM16 register.
    200 
    201    For ESTRx variants, edxIN and eaxIN hold the values of those two
    202    registers.
    203 
    204    In all cases, the bottom 16 bits of the result contain the new
    205    OSZACP %rflags values.  For xSTRI variants, bits[31:16] of the
    206    result hold the new %ecx value.  For xSTRM variants, the helper
    207    writes the result directly to the guest XMM0.
    208 
    209    Declarable side effects: in all cases, reads guest state at
    210    [gstOffL, +16) and [gstOffR, +16).  For xSTRM variants, also writes
    211    guest_XMM0.
    212 
    213    Is expected to be called with opc_and_imm combinations which have
    214    actually been validated, and will assert if otherwise.  The front
    215    end should ensure we're only called with verified values.
    216 */
    217 extern ULong amd64g_dirtyhelper_PCMPxSTRx (
    218           VexGuestAMD64State*,
    219           HWord opc4_and_imm,
    220           HWord gstOffL, HWord gstOffR,
    221           HWord edxIN, HWord eaxIN
    222        );
    223 
    224 /* Implementation of intel AES instructions as described in
    225    Intel  Advanced Vector Extensions
    226           Programming Reference
    227           MARCH 2008
    228           319433-002.
    229 
    230    CALLED FROM GENERATED CODE: DIRTY HELPER(s).  (But not really,
    231    actually it could be a clean helper, but for the fact that we can't
    232    pass by value 2 x V128 to a clean helper, nor have one returned.)
    233    Reads guest state, writes to guest state, no
    234    accesses of memory, is a pure function.
    235 
    236    opc4 contains the 4th byte of opcode. Front-end should only
    237    give opcode corresponding to AESENC/AESENCLAST/AESDEC/AESDECLAST/AESIMC.
    238    (will assert otherwise).
    239 
    240    gstOffL and gstOffR are the guest state offsets for the two XMM
    241    register inputs, gstOffD is the guest state offset for the XMM register
    242    output.  We never have to deal with the memory case since that is handled
    243    by pre-loading the relevant value into the fake XMM16 register.
    244 
    245 */
    246 extern void amd64g_dirtyhelper_AES (
    247           VexGuestAMD64State* gst,
    248           HWord opc4, HWord gstOffD,
    249           HWord gstOffL, HWord gstOffR
    250        );
    251 
    252 /* Implementation of AESKEYGENASSIST.
    253 
    254    CALLED FROM GENERATED CODE: DIRTY HELPER(s).  (But not really,
    255    actually it could be a clean helper, but for the fact that we can't
    256    pass by value 1 x V128 to a clean helper, nor have one returned.)
    257    Reads guest state, writes to guest state, no
    258    accesses of memory, is a pure function.
    259 
    260    imm8 is the Round Key constant.
    261 
    262    gstOffL and gstOffR are the guest state offsets for the two XMM
    263    register input and output.  We never have to deal with the memory case since
    264    that is handled by pre-loading the relevant value into the fake
    265    XMM16 register.
    266 
    267 */
    268 extern void amd64g_dirtyhelper_AESKEYGENASSIST (
    269           VexGuestAMD64State* gst,
    270           HWord imm8,
    271           HWord gstOffL, HWord gstOffR
    272        );
    273 
    274 //extern void  amd64g_dirtyhelper_CPUID_sse0 ( VexGuestAMD64State* );
    275 //extern void  amd64g_dirtyhelper_CPUID_sse1 ( VexGuestAMD64State* );
    276 //extern void  amd64g_dirtyhelper_CPUID_sse2 ( VexGuestAMD64State* );
    277 
    278 //extern void  amd64g_dirtyhelper_FSAVE ( VexGuestAMD64State*, HWord );
    279 
    280 //extern VexEmWarn
    281 //            amd64g_dirtyhelper_FRSTOR ( VexGuestAMD64State*, HWord );
    282 
    283 //extern void amd64g_dirtyhelper_FSTENV ( VexGuestAMD64State*, HWord );
    284 
    285 //extern VexEmWarn
    286 //            amd64g_dirtyhelper_FLDENV ( VexGuestAMD64State*, HWord );
    287 
    288 
    289 
    290 /*---------------------------------------------------------*/
    291 /*--- Condition code stuff                              ---*/
    292 /*---------------------------------------------------------*/
    293 
    294 /* rflags masks */
    295 #define AMD64G_CC_SHIFT_O   11
    296 #define AMD64G_CC_SHIFT_S   7
    297 #define AMD64G_CC_SHIFT_Z   6
    298 #define AMD64G_CC_SHIFT_A   4
    299 #define AMD64G_CC_SHIFT_C   0
    300 #define AMD64G_CC_SHIFT_P   2
    301 
    302 #define AMD64G_CC_MASK_O    (1ULL << AMD64G_CC_SHIFT_O)
    303 #define AMD64G_CC_MASK_S    (1ULL << AMD64G_CC_SHIFT_S)
    304 #define AMD64G_CC_MASK_Z    (1ULL << AMD64G_CC_SHIFT_Z)
    305 #define AMD64G_CC_MASK_A    (1ULL << AMD64G_CC_SHIFT_A)
    306 #define AMD64G_CC_MASK_C    (1ULL << AMD64G_CC_SHIFT_C)
    307 #define AMD64G_CC_MASK_P    (1ULL << AMD64G_CC_SHIFT_P)
    308 
    309 /* FPU flag masks */
    310 #define AMD64G_FC_SHIFT_C3   14
    311 #define AMD64G_FC_SHIFT_C2   10
    312 #define AMD64G_FC_SHIFT_C1   9
    313 #define AMD64G_FC_SHIFT_C0   8
    314 
    315 #define AMD64G_FC_MASK_C3    (1ULL << AMD64G_FC_SHIFT_C3)
    316 #define AMD64G_FC_MASK_C2    (1ULL << AMD64G_FC_SHIFT_C2)
    317 #define AMD64G_FC_MASK_C1    (1ULL << AMD64G_FC_SHIFT_C1)
    318 #define AMD64G_FC_MASK_C0    (1ULL << AMD64G_FC_SHIFT_C0)
    319 
    320 
    321 /* %RFLAGS thunk descriptors.  A four-word thunk is used to record
    322    details of the most recent flag-setting operation, so the flags can
    323    be computed later if needed.  It is possible to do this a little
    324    more efficiently using a 3-word thunk, but that makes it impossible
    325    to describe the flag data dependencies sufficiently accurately for
    326    Memcheck.  Hence 4 words are used, with minimal loss of efficiency.
    327 
    328    The four words are:
    329 
    330       CC_OP, which describes the operation.
    331 
    332       CC_DEP1 and CC_DEP2.  These are arguments to the operation.
    333          We want Memcheck to believe that the resulting flags are
    334          data-dependent on both CC_DEP1 and CC_DEP2, hence the
    335          name DEP.
    336 
    337       CC_NDEP.  This is a 3rd argument to the operation which is
    338          sometimes needed.  We arrange things so that Memcheck does
    339          not believe the resulting flags are data-dependent on CC_NDEP
    340          ("not dependent").
    341 
    342    To make Memcheck believe that (the definedness of) the encoded
    343    flags depends only on (the definedness of) CC_DEP1 and CC_DEP2
    344    requires two things:
    345 
    346    (1) In the guest state layout info (amd64guest_layout), CC_OP and
    347        CC_NDEP are marked as always defined.
    348 
    349    (2) When passing the thunk components to an evaluation function
    350        (calculate_condition, calculate_eflags, calculate_eflags_c) the
    351        IRCallee's mcx_mask must be set so as to exclude from
    352        consideration all passed args except CC_DEP1 and CC_DEP2.
    353 
    354    Strictly speaking only (2) is necessary for correctness.  However,
    355    (1) helps efficiency in that since (2) means we never ask about the
    356    definedness of CC_OP or CC_NDEP, we may as well not even bother to
    357    track their definedness.
    358 
    359    When building the thunk, it is always necessary to write words into
    360    CC_DEP1 and CC_DEP2, even if those args are not used given the
    361    CC_OP field (eg, CC_DEP2 is not used if CC_OP is CC_LOGIC1/2/4).
    362    This is important because otherwise Memcheck could give false
    363    positives as it does not understand the relationship between the
    364    CC_OP field and CC_DEP1 and CC_DEP2, and so believes that the
    365    definedness of the stored flags always depends on both CC_DEP1 and
    366    CC_DEP2.
    367 
    368    However, it is only necessary to set CC_NDEP when the CC_OP value
    369    requires it, because Memcheck ignores CC_NDEP, and the evaluation
    370    functions do understand the CC_OP fields and will only examine
    371    CC_NDEP for suitable values of CC_OP.
    372 
    373    A summary of the field usages is:
    374 
    375    Operation          DEP1               DEP2               NDEP
    376    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    377 
    378    add/sub/mul        first arg          second arg         unused
    379 
    380    adc/sbb            first arg          (second arg)
    381                                          XOR old_carry      old_carry
    382 
    383    and/or/xor         result             zero               unused
    384 
    385    inc/dec            result             zero               old_carry
    386 
    387    shl/shr/sar        result             subshifted-        unused
    388                                          result
    389 
    390    rol/ror            result             zero               old_flags
    391 
    392    copy               old_flags          zero               unused.
    393 
    394 
    395    Therefore Memcheck will believe the following:
    396 
    397    * add/sub/mul -- definedness of result flags depends on definedness
    398      of both args.
    399 
    400    * adc/sbb -- definedness of result flags depends on definedness of
    401      both args and definedness of the old C flag.  Because only two
    402      DEP fields are available, the old C flag is XOR'd into the second
    403      arg so that Memcheck sees the data dependency on it.  That means
    404      the NDEP field must contain a second copy of the old C flag
    405      so that the evaluation functions can correctly recover the second
    406      arg.
    407 
    408    * and/or/xor are straightforward -- definedness of result flags
    409      depends on definedness of result value.
    410 
    411    * inc/dec -- definedness of result flags depends only on
    412      definedness of result.  This isn't really true -- it also depends
    413      on the old C flag.  However, we don't want Memcheck to see that,
    414      and so the old C flag must be passed in NDEP and not in DEP2.
    415      It's inconceivable that a compiler would generate code that puts
    416      the C flag in an undefined state, then does an inc/dec, which
    417      leaves C unchanged, and then makes a conditional jump/move based
    418      on C.  So our fiction seems a good approximation.
    419 
    420    * shl/shr/sar -- straightforward, again, definedness of result
    421      flags depends on definedness of result value.  The subshifted
    422      value (value shifted one less) is also needed, but its
    423      definedness is the same as the definedness of the shifted value.
    424 
    425    * rol/ror -- these only set O and C, and leave A Z C P alone.
    426      However it seems prudent (as per inc/dec) to say the definedness
    427      of all resulting flags depends on the definedness of the result,
    428      hence the old flags must go in as NDEP and not DEP2.
    429 
    430    * rcl/rcr are too difficult to do in-line, and so are done by a
    431      helper function.  They are not part of this scheme.  The helper
    432      function takes the value to be rotated, the rotate amount and the
    433      old flags, and returns the new flags and the rotated value.
    434      Since the helper's mcx_mask does not have any set bits, Memcheck
    435      will lazily propagate undefinedness from any of the 3 args into
    436      both results (flags and actual value).
    437 */
    438 enum {
    439     AMD64G_CC_OP_COPY=0,  /* DEP1 = current flags, DEP2 = 0, NDEP = unused */
    440                           /* just copy DEP1 to output */
    441 
    442     AMD64G_CC_OP_ADDB,    /* 1 */
    443     AMD64G_CC_OP_ADDW,    /* 2 DEP1 = argL, DEP2 = argR, NDEP = unused */
    444     AMD64G_CC_OP_ADDL,    /* 3 */
    445     AMD64G_CC_OP_ADDQ,    /* 4 */
    446 
    447     AMD64G_CC_OP_SUBB,    /* 5 */
    448     AMD64G_CC_OP_SUBW,    /* 6 DEP1 = argL, DEP2 = argR, NDEP = unused */
    449     AMD64G_CC_OP_SUBL,    /* 7 */
    450     AMD64G_CC_OP_SUBQ,    /* 8 */
    451 
    452     AMD64G_CC_OP_ADCB,    /* 9 */
    453     AMD64G_CC_OP_ADCW,    /* 10 DEP1 = argL, DEP2 = argR ^ oldCarry, NDEP = oldCarry */
    454     AMD64G_CC_OP_ADCL,    /* 11 */
    455     AMD64G_CC_OP_ADCQ,    /* 12 */
    456 
    457     AMD64G_CC_OP_SBBB,    /* 13 */
    458     AMD64G_CC_OP_SBBW,    /* 14 DEP1 = argL, DEP2 = argR ^ oldCarry, NDEP = oldCarry */
    459     AMD64G_CC_OP_SBBL,    /* 15 */
    460     AMD64G_CC_OP_SBBQ,    /* 16 */
    461 
    462     AMD64G_CC_OP_LOGICB,  /* 17 */
    463     AMD64G_CC_OP_LOGICW,  /* 18 DEP1 = result, DEP2 = 0, NDEP = unused */
    464     AMD64G_CC_OP_LOGICL,  /* 19 */
    465     AMD64G_CC_OP_LOGICQ,  /* 20 */
    466 
    467     AMD64G_CC_OP_INCB,    /* 21 */
    468     AMD64G_CC_OP_INCW,    /* 22 DEP1 = result, DEP2 = 0, NDEP = oldCarry (0 or 1) */
    469     AMD64G_CC_OP_INCL,    /* 23 */
    470     AMD64G_CC_OP_INCQ,    /* 24 */
    471 
    472     AMD64G_CC_OP_DECB,    /* 25 */
    473     AMD64G_CC_OP_DECW,    /* 26 DEP1 = result, DEP2 = 0, NDEP = oldCarry (0 or 1) */
    474     AMD64G_CC_OP_DECL,    /* 27 */
    475     AMD64G_CC_OP_DECQ,    /* 28 */
    476 
    477     AMD64G_CC_OP_SHLB,    /* 29 DEP1 = res, DEP2 = res', NDEP = unused */
    478     AMD64G_CC_OP_SHLW,    /* 30 where res' is like res but shifted one bit less */
    479     AMD64G_CC_OP_SHLL,    /* 31 */
    480     AMD64G_CC_OP_SHLQ,    /* 32 */
    481 
    482     AMD64G_CC_OP_SHRB,    /* 33 DEP1 = res, DEP2 = res', NDEP = unused */
    483     AMD64G_CC_OP_SHRW,    /* 34 where res' is like res but shifted one bit less */
    484     AMD64G_CC_OP_SHRL,    /* 35 */
    485     AMD64G_CC_OP_SHRQ,    /* 36 */
    486 
    487     AMD64G_CC_OP_ROLB,    /* 37 */
    488     AMD64G_CC_OP_ROLW,    /* 38 DEP1 = res, DEP2 = 0, NDEP = old flags */
    489     AMD64G_CC_OP_ROLL,    /* 39 */
    490     AMD64G_CC_OP_ROLQ,    /* 40 */
    491 
    492     AMD64G_CC_OP_RORB,    /* 41 */
    493     AMD64G_CC_OP_RORW,    /* 42 DEP1 = res, DEP2 = 0, NDEP = old flags */
    494     AMD64G_CC_OP_RORL,    /* 43 */
    495     AMD64G_CC_OP_RORQ,    /* 44 */
    496 
    497     AMD64G_CC_OP_UMULB,   /* 45 */
    498     AMD64G_CC_OP_UMULW,   /* 46 DEP1 = argL, DEP2 = argR, NDEP = unused */
    499     AMD64G_CC_OP_UMULL,   /* 47 */
    500     AMD64G_CC_OP_UMULQ,   /* 48 */
    501 
    502     AMD64G_CC_OP_SMULB,   /* 49 */
    503     AMD64G_CC_OP_SMULW,   /* 50 DEP1 = argL, DEP2 = argR, NDEP = unused */
    504     AMD64G_CC_OP_SMULL,   /* 51 */
    505     AMD64G_CC_OP_SMULQ,   /* 52 */
    506 
    507     AMD64G_CC_OP_NUMBER
    508 };
    509 
    510 typedef
    511    enum {
    512       AMD64CondO      = 0,  /* overflow           */
    513       AMD64CondNO     = 1,  /* no overflow        */
    514 
    515       AMD64CondB      = 2,  /* below              */
    516       AMD64CondNB     = 3,  /* not below          */
    517 
    518       AMD64CondZ      = 4,  /* zero               */
    519       AMD64CondNZ     = 5,  /* not zero           */
    520 
    521       AMD64CondBE     = 6,  /* below or equal     */
    522       AMD64CondNBE    = 7,  /* not below or equal */
    523 
    524       AMD64CondS      = 8,  /* negative           */
    525       AMD64CondNS     = 9,  /* not negative       */
    526 
    527       AMD64CondP      = 10, /* parity even        */
    528       AMD64CondNP     = 11, /* not parity even    */
    529 
    530       AMD64CondL      = 12, /* jump less          */
    531       AMD64CondNL     = 13, /* not less           */
    532 
    533       AMD64CondLE     = 14, /* less or equal      */
    534       AMD64CondNLE    = 15, /* not less or equal  */
    535 
    536       AMD64CondAlways = 16  /* HACK */
    537    }
    538    AMD64Condcode;
    539 
    540 #endif /* ndef __VEX_GUEST_AMD64_DEFS_H */
    541 
    542 /*---------------------------------------------------------------*/
    543 /*--- end                                  guest_amd64_defs.h ---*/
    544 /*---------------------------------------------------------------*/
    545