Home | History | Annotate | Download | only in priv
      1 
      2 /*---------------------------------------------------------------*/
      3 /*--- begin                                guest_amd64_defs.h ---*/
      4 /*---------------------------------------------------------------*/
      5 
      6 /*
      7    This file is part of Valgrind, a dynamic binary instrumentation
      8    framework.
      9 
     10    Copyright (C) 2004-2013 OpenWorks LLP
     11       info (at) open-works.net
     12 
     13    This program is free software; you can redistribute it and/or
     14    modify it under the terms of the GNU General Public License as
     15    published by the Free Software Foundation; either version 2 of the
     16    License, or (at your option) any later version.
     17 
     18    This program is distributed in the hope that it will be useful, but
     19    WITHOUT ANY WARRANTY; without even the implied warranty of
     20    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     21    General Public License for more details.
     22 
     23    You should have received a copy of the GNU General Public License
     24    along with this program; if not, write to the Free Software
     25    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
     26    02110-1301, USA.
     27 
     28    The GNU General Public License is contained in the file COPYING.
     29 
     30    Neither the names of the U.S. Department of Energy nor the
     31    University of California nor the names of its contributors may be
     32    used to endorse or promote products derived from this software
     33    without prior written permission.
     34 */
     35 
     36 /* Only to be used within the guest-amd64 directory. */
     37 
     38 #ifndef __VEX_GUEST_AMD64_DEFS_H
     39 #define __VEX_GUEST_AMD64_DEFS_H
     40 
     41 #include "libvex_basictypes.h"
     42 #include "libvex_emnote.h"              // VexEmNote
     43 #include "libvex_guest_amd64.h"         // VexGuestAMD64State
     44 #include "guest_generic_bb_to_IR.h"     // DisResult
     45 
     46 /*---------------------------------------------------------*/
     47 /*--- amd64 to IR conversion                            ---*/
     48 /*---------------------------------------------------------*/
     49 
     50 /* Convert one amd64 insn to IR.  See the type DisOneInstrFn in
     51    bb_to_IR.h. */
     52 extern
     53 DisResult disInstr_AMD64 ( IRSB*        irbb,
     54                            Bool         (*resteerOkFn) ( void*, Addr ),
     55                            Bool         resteerCisOk,
     56                            void*        callback_opaque,
     57                            const UChar* guest_code,
     58                            Long         delta,
     59                            Addr         guest_IP,
     60                            VexArch      guest_arch,
     61                            const VexArchInfo* archinfo,
     62                            const VexAbiInfo*  abiinfo,
     63                            VexEndness   host_endness,
     64                            Bool         sigill_diag );
     65 
     66 /* Used by the optimiser to specialise calls to helpers. */
     67 extern
     68 IRExpr* guest_amd64_spechelper ( const HChar* function_name,
     69                                  IRExpr** args,
     70                                  IRStmt** precedingStmts,
     71                                  Int      n_precedingStmts );
     72 
     73 /* Describes to the optimiser which part of the guest state require
     74    precise memory exceptions.  This is logically part of the guest
     75    state description. */
     76 extern
     77 Bool guest_amd64_state_requires_precise_mem_exns ( Int, Int,
     78                                                    VexRegisterUpdates );
     79 
     80 extern
     81 VexGuestLayout amd64guest_layout;
     82 
     83 
     84 /*---------------------------------------------------------*/
     85 /*--- amd64 guest helpers                               ---*/
     86 /*---------------------------------------------------------*/
     87 
     88 /* --- CLEAN HELPERS --- */
     89 
     90 extern ULong amd64g_calculate_rflags_all (
     91                 ULong cc_op,
     92                 ULong cc_dep1, ULong cc_dep2, ULong cc_ndep
     93              );
     94 
     95 extern ULong amd64g_calculate_rflags_c (
     96                 ULong cc_op,
     97                 ULong cc_dep1, ULong cc_dep2, ULong cc_ndep
     98              );
     99 
    100 extern ULong amd64g_calculate_condition (
    101                 ULong/*AMD64Condcode*/ cond,
    102                 ULong cc_op,
    103                 ULong cc_dep1, ULong cc_dep2, ULong cc_ndep
    104              );
    105 
    106 extern ULong amd64g_calculate_FXAM ( ULong tag, ULong dbl );
    107 
    108 extern ULong amd64g_calculate_RCR  (
    109                 ULong arg, ULong rot_amt, ULong rflags_in, Long sz
    110              );
    111 
    112 extern ULong amd64g_calculate_RCL  (
    113                 ULong arg, ULong rot_amt, ULong rflags_in, Long sz
    114              );
    115 
    116 extern ULong amd64g_calculate_pclmul(ULong s1, ULong s2, ULong which);
    117 
    118 extern ULong amd64g_check_fldcw ( ULong fpucw );
    119 
    120 extern ULong amd64g_create_fpucw ( ULong fpround );
    121 
    122 extern ULong amd64g_check_ldmxcsr ( ULong mxcsr );
    123 
    124 extern ULong amd64g_create_mxcsr ( ULong sseround );
    125 
    126 extern VexEmNote amd64g_dirtyhelper_FLDENV  ( VexGuestAMD64State*, HWord );
    127 extern VexEmNote amd64g_dirtyhelper_FRSTOR  ( VexGuestAMD64State*, HWord );
    128 extern VexEmNote amd64g_dirtyhelper_FRSTORS ( VexGuestAMD64State*, HWord );
    129 
    130 extern void amd64g_dirtyhelper_FSTENV  ( VexGuestAMD64State*, HWord );
    131 extern void amd64g_dirtyhelper_FNSAVE  ( VexGuestAMD64State*, HWord );
    132 extern void amd64g_dirtyhelper_FNSAVES ( VexGuestAMD64State*, HWord );
    133 
    134 /* Translate a guest virtual_addr into a guest linear address by
    135    consulting the supplied LDT/GDT structures.  Their representation
    136    must be as specified in pub/libvex_guest_amd64.h.  To indicate a
    137    translation failure, 1<<32 is returned.  On success, the lower 32
    138    bits of the returned result indicate the linear address.
    139 */
    140 //extern
    141 //ULong amd64g_use_seg_selector ( HWord ldt, HWord gdt,
    142 //                              UInt seg_selector, UInt virtual_addr );
    143 
    144 extern ULong amd64g_calculate_mmx_pmaddwd  ( ULong, ULong );
    145 extern ULong amd64g_calculate_mmx_psadbw   ( ULong, ULong );
    146 
    147 extern ULong amd64g_calculate_sse_phminposuw ( ULong sLo, ULong sHi );
    148 
    149 extern ULong amd64g_calc_crc32b ( ULong crcIn, ULong b );
    150 extern ULong amd64g_calc_crc32w ( ULong crcIn, ULong w );
    151 extern ULong amd64g_calc_crc32l ( ULong crcIn, ULong l );
    152 extern ULong amd64g_calc_crc32q ( ULong crcIn, ULong q );
    153 
    154 extern ULong amd64g_calc_mpsadbw ( ULong sHi, ULong sLo,
    155                                    ULong dHi, ULong dLo,
    156                                    ULong imm_and_return_control_bit );
    157 
    158 extern ULong amd64g_calculate_pext  ( ULong, ULong );
    159 extern ULong amd64g_calculate_pdep  ( ULong, ULong );
    160 
    161 /* --- DIRTY HELPERS --- */
    162 
    163 extern ULong amd64g_dirtyhelper_loadF80le  ( Addr/*addr*/ );
    164 
    165 extern void  amd64g_dirtyhelper_storeF80le ( Addr/*addr*/, ULong/*data*/ );
    166 
    167 extern void  amd64g_dirtyhelper_CPUID_baseline ( VexGuestAMD64State* st );
    168 extern void  amd64g_dirtyhelper_CPUID_sse3_and_cx16 ( VexGuestAMD64State* st );
    169 extern void  amd64g_dirtyhelper_CPUID_sse42_and_cx16 ( VexGuestAMD64State* st );
    170 extern void  amd64g_dirtyhelper_CPUID_avx_and_cx16 ( VexGuestAMD64State* st );
    171 
    172 extern void  amd64g_dirtyhelper_FINIT ( VexGuestAMD64State* );
    173 
    174 extern void      amd64g_dirtyhelper_FXSAVE_ALL_EXCEPT_XMM
    175                     ( VexGuestAMD64State*, HWord );
    176 extern VexEmNote amd64g_dirtyhelper_FXRSTOR_ALL_EXCEPT_XMM
    177                     ( VexGuestAMD64State*, HWord );
    178 
    179 extern ULong amd64g_dirtyhelper_RDTSC ( void );
    180 extern void  amd64g_dirtyhelper_RDTSCP ( VexGuestAMD64State* st );
    181 
    182 extern ULong amd64g_dirtyhelper_IN  ( ULong portno, ULong sz/*1,2 or 4*/ );
    183 extern void  amd64g_dirtyhelper_OUT ( ULong portno, ULong data,
    184                                       ULong sz/*1,2 or 4*/ );
    185 
    186 extern void amd64g_dirtyhelper_SxDT ( void* address,
    187                                       ULong op /* 0 or 1 */ );
    188 
    189 /* Helps with PCMP{I,E}STR{I,M}.
    190 
    191    CALLED FROM GENERATED CODE: DIRTY HELPER(s).  (But not really,
    192    actually it could be a clean helper, but for the fact that we can't
    193    pass by value 2 x V128 to a clean helper, nor have one returned.)
    194    Reads guest state, writes to guest state for the xSTRM cases, no
    195    accesses of memory, is a pure function.
    196 
    197    opc_and_imm contains (4th byte of opcode << 8) | the-imm8-byte so
    198    the callee knows which I/E and I/M variant it is dealing with and
    199    what the specific operation is.  4th byte of opcode is in the range
    200    0x60 to 0x63:
    201        istri  66 0F 3A 63
    202        istrm  66 0F 3A 62
    203        estri  66 0F 3A 61
    204        estrm  66 0F 3A 60
    205 
    206    gstOffL and gstOffR are the guest state offsets for the two XMM
    207    register inputs.  We never have to deal with the memory case since
    208    that is handled by pre-loading the relevant value into the fake
    209    XMM16 register.
    210 
    211    For ESTRx variants, edxIN and eaxIN hold the values of those two
    212    registers.
    213 
    214    In all cases, the bottom 16 bits of the result contain the new
    215    OSZACP %rflags values.  For xSTRI variants, bits[31:16] of the
    216    result hold the new %ecx value.  For xSTRM variants, the helper
    217    writes the result directly to the guest XMM0.
    218 
    219    Declarable side effects: in all cases, reads guest state at
    220    [gstOffL, +16) and [gstOffR, +16).  For xSTRM variants, also writes
    221    guest_XMM0.
    222 
    223    Is expected to be called with opc_and_imm combinations which have
    224    actually been validated, and will assert if otherwise.  The front
    225    end should ensure we're only called with verified values.
    226 */
    227 extern ULong amd64g_dirtyhelper_PCMPxSTRx (
    228           VexGuestAMD64State*,
    229           HWord opc4_and_imm,
    230           HWord gstOffL, HWord gstOffR,
    231           HWord edxIN, HWord eaxIN
    232        );
    233 
    234 /* Implementation of intel AES instructions as described in
    235    Intel  Advanced Vector Extensions
    236           Programming Reference
    237           MARCH 2008
    238           319433-002.
    239 
    240    CALLED FROM GENERATED CODE: DIRTY HELPER(s).  (But not really,
    241    actually it could be a clean helper, but for the fact that we can't
    242    pass by value 2 x V128 to a clean helper, nor have one returned.)
    243    Reads guest state, writes to guest state, no
    244    accesses of memory, is a pure function.
    245 
    246    opc4 contains the 4th byte of opcode. Front-end should only
    247    give opcode corresponding to AESENC/AESENCLAST/AESDEC/AESDECLAST/AESIMC.
    248    (will assert otherwise).
    249 
    250    gstOffL and gstOffR are the guest state offsets for the two XMM
    251    register inputs, gstOffD is the guest state offset for the XMM register
    252    output.  We never have to deal with the memory case since that is handled
    253    by pre-loading the relevant value into the fake XMM16 register.
    254 
    255 */
    256 extern void amd64g_dirtyhelper_AES (
    257           VexGuestAMD64State* gst,
    258           HWord opc4, HWord gstOffD,
    259           HWord gstOffL, HWord gstOffR
    260        );
    261 
    262 /* Implementation of AESKEYGENASSIST.
    263 
    264    CALLED FROM GENERATED CODE: DIRTY HELPER(s).  (But not really,
    265    actually it could be a clean helper, but for the fact that we can't
    266    pass by value 1 x V128 to a clean helper, nor have one returned.)
    267    Reads guest state, writes to guest state, no
    268    accesses of memory, is a pure function.
    269 
    270    imm8 is the Round Key constant.
    271 
    272    gstOffL and gstOffR are the guest state offsets for the two XMM
    273    register input and output.  We never have to deal with the memory case since
    274    that is handled by pre-loading the relevant value into the fake
    275    XMM16 register.
    276 
    277 */
    278 extern void amd64g_dirtyhelper_AESKEYGENASSIST (
    279           VexGuestAMD64State* gst,
    280           HWord imm8,
    281           HWord gstOffL, HWord gstOffR
    282        );
    283 
    284 //extern void  amd64g_dirtyhelper_CPUID_sse0 ( VexGuestAMD64State* );
    285 //extern void  amd64g_dirtyhelper_CPUID_sse1 ( VexGuestAMD64State* );
    286 //extern void  amd64g_dirtyhelper_CPUID_sse2 ( VexGuestAMD64State* );
    287 
    288 //extern void  amd64g_dirtyhelper_FSAVE ( VexGuestAMD64State*, HWord );
    289 
    290 //extern VexEmNote
    291 //            amd64g_dirtyhelper_FRSTOR ( VexGuestAMD64State*, HWord );
    292 
    293 //extern void amd64g_dirtyhelper_FSTENV ( VexGuestAMD64State*, HWord );
    294 
    295 //extern VexEmNote
    296 //            amd64g_dirtyhelper_FLDENV ( VexGuestAMD64State*, HWord );
    297 
    298 
    299 
    300 /*---------------------------------------------------------*/
    301 /*--- Condition code stuff                              ---*/
    302 /*---------------------------------------------------------*/
    303 
    304 /* rflags masks */
    305 #define AMD64G_CC_SHIFT_O   11
    306 #define AMD64G_CC_SHIFT_S   7
    307 #define AMD64G_CC_SHIFT_Z   6
    308 #define AMD64G_CC_SHIFT_A   4
    309 #define AMD64G_CC_SHIFT_C   0
    310 #define AMD64G_CC_SHIFT_P   2
    311 
    312 #define AMD64G_CC_MASK_O    (1ULL << AMD64G_CC_SHIFT_O)
    313 #define AMD64G_CC_MASK_S    (1ULL << AMD64G_CC_SHIFT_S)
    314 #define AMD64G_CC_MASK_Z    (1ULL << AMD64G_CC_SHIFT_Z)
    315 #define AMD64G_CC_MASK_A    (1ULL << AMD64G_CC_SHIFT_A)
    316 #define AMD64G_CC_MASK_C    (1ULL << AMD64G_CC_SHIFT_C)
    317 #define AMD64G_CC_MASK_P    (1ULL << AMD64G_CC_SHIFT_P)
    318 
    319 /* FPU flag masks */
    320 #define AMD64G_FC_SHIFT_C3   14
    321 #define AMD64G_FC_SHIFT_C2   10
    322 #define AMD64G_FC_SHIFT_C1   9
    323 #define AMD64G_FC_SHIFT_C0   8
    324 
    325 #define AMD64G_FC_MASK_C3    (1ULL << AMD64G_FC_SHIFT_C3)
    326 #define AMD64G_FC_MASK_C2    (1ULL << AMD64G_FC_SHIFT_C2)
    327 #define AMD64G_FC_MASK_C1    (1ULL << AMD64G_FC_SHIFT_C1)
    328 #define AMD64G_FC_MASK_C0    (1ULL << AMD64G_FC_SHIFT_C0)
    329 
    330 
    331 /* %RFLAGS thunk descriptors.  A four-word thunk is used to record
    332    details of the most recent flag-setting operation, so the flags can
    333    be computed later if needed.  It is possible to do this a little
    334    more efficiently using a 3-word thunk, but that makes it impossible
    335    to describe the flag data dependencies sufficiently accurately for
    336    Memcheck.  Hence 4 words are used, with minimal loss of efficiency.
    337 
    338    The four words are:
    339 
    340       CC_OP, which describes the operation.
    341 
    342       CC_DEP1 and CC_DEP2.  These are arguments to the operation.
    343          We want Memcheck to believe that the resulting flags are
    344          data-dependent on both CC_DEP1 and CC_DEP2, hence the
    345          name DEP.
    346 
    347       CC_NDEP.  This is a 3rd argument to the operation which is
    348          sometimes needed.  We arrange things so that Memcheck does
    349          not believe the resulting flags are data-dependent on CC_NDEP
    350          ("not dependent").
    351 
    352    To make Memcheck believe that (the definedness of) the encoded
    353    flags depends only on (the definedness of) CC_DEP1 and CC_DEP2
    354    requires two things:
    355 
    356    (1) In the guest state layout info (amd64guest_layout), CC_OP and
    357        CC_NDEP are marked as always defined.
    358 
    359    (2) When passing the thunk components to an evaluation function
    360        (calculate_condition, calculate_eflags, calculate_eflags_c) the
    361        IRCallee's mcx_mask must be set so as to exclude from
    362        consideration all passed args except CC_DEP1 and CC_DEP2.
    363 
    364    Strictly speaking only (2) is necessary for correctness.  However,
    365    (1) helps efficiency in that since (2) means we never ask about the
    366    definedness of CC_OP or CC_NDEP, we may as well not even bother to
    367    track their definedness.
    368 
    369    When building the thunk, it is always necessary to write words into
    370    CC_DEP1 and CC_DEP2, even if those args are not used given the
    371    CC_OP field (eg, CC_DEP2 is not used if CC_OP is CC_LOGIC1/2/4).
    372    This is important because otherwise Memcheck could give false
    373    positives as it does not understand the relationship between the
    374    CC_OP field and CC_DEP1 and CC_DEP2, and so believes that the
    375    definedness of the stored flags always depends on both CC_DEP1 and
    376    CC_DEP2.
    377 
    378    However, it is only necessary to set CC_NDEP when the CC_OP value
    379    requires it, because Memcheck ignores CC_NDEP, and the evaluation
    380    functions do understand the CC_OP fields and will only examine
    381    CC_NDEP for suitable values of CC_OP.
    382 
    383    A summary of the field usages is:
    384 
    385    Operation          DEP1               DEP2               NDEP
    386    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    387 
    388    add/sub/mul        first arg          second arg         unused
    389 
    390    adc/sbb            first arg          (second arg)
    391                                          XOR old_carry      old_carry
    392 
    393    and/or/xor         result             zero               unused
    394 
    395    inc/dec            result             zero               old_carry
    396 
    397    shl/shr/sar        result             subshifted-        unused
    398                                          result
    399 
    400    rol/ror            result             zero               old_flags
    401 
    402    copy               old_flags          zero               unused.
    403 
    404 
    405    Therefore Memcheck will believe the following:
    406 
    407    * add/sub/mul -- definedness of result flags depends on definedness
    408      of both args.
    409 
    410    * adc/sbb -- definedness of result flags depends on definedness of
    411      both args and definedness of the old C flag.  Because only two
    412      DEP fields are available, the old C flag is XOR'd into the second
    413      arg so that Memcheck sees the data dependency on it.  That means
    414      the NDEP field must contain a second copy of the old C flag
    415      so that the evaluation functions can correctly recover the second
    416      arg.
    417 
    418    * and/or/xor are straightforward -- definedness of result flags
    419      depends on definedness of result value.
    420 
    421    * inc/dec -- definedness of result flags depends only on
    422      definedness of result.  This isn't really true -- it also depends
    423      on the old C flag.  However, we don't want Memcheck to see that,
    424      and so the old C flag must be passed in NDEP and not in DEP2.
    425      It's inconceivable that a compiler would generate code that puts
    426      the C flag in an undefined state, then does an inc/dec, which
    427      leaves C unchanged, and then makes a conditional jump/move based
    428      on C.  So our fiction seems a good approximation.
    429 
    430    * shl/shr/sar -- straightforward, again, definedness of result
    431      flags depends on definedness of result value.  The subshifted
    432      value (value shifted one less) is also needed, but its
    433      definedness is the same as the definedness of the shifted value.
    434 
    435    * rol/ror -- these only set O and C, and leave A Z C P alone.
    436      However it seems prudent (as per inc/dec) to say the definedness
    437      of all resulting flags depends on the definedness of the result,
    438      hence the old flags must go in as NDEP and not DEP2.
    439 
    440    * rcl/rcr are too difficult to do in-line, and so are done by a
    441      helper function.  They are not part of this scheme.  The helper
    442      function takes the value to be rotated, the rotate amount and the
    443      old flags, and returns the new flags and the rotated value.
    444      Since the helper's mcx_mask does not have any set bits, Memcheck
    445      will lazily propagate undefinedness from any of the 3 args into
    446      both results (flags and actual value).
    447 */
    448 enum {
    449     AMD64G_CC_OP_COPY=0,  /* DEP1 = current flags, DEP2 = 0, NDEP = unused */
    450                           /* just copy DEP1 to output */
    451 
    452     AMD64G_CC_OP_ADDB,    /* 1 */
    453     AMD64G_CC_OP_ADDW,    /* 2 DEP1 = argL, DEP2 = argR, NDEP = unused */
    454     AMD64G_CC_OP_ADDL,    /* 3 */
    455     AMD64G_CC_OP_ADDQ,    /* 4 */
    456 
    457     AMD64G_CC_OP_SUBB,    /* 5 */
    458     AMD64G_CC_OP_SUBW,    /* 6 DEP1 = argL, DEP2 = argR, NDEP = unused */
    459     AMD64G_CC_OP_SUBL,    /* 7 */
    460     AMD64G_CC_OP_SUBQ,    /* 8 */
    461 
    462     AMD64G_CC_OP_ADCB,    /* 9 */
    463     AMD64G_CC_OP_ADCW,    /* 10 DEP1 = argL, DEP2 = argR ^ oldCarry, NDEP = oldCarry */
    464     AMD64G_CC_OP_ADCL,    /* 11 */
    465     AMD64G_CC_OP_ADCQ,    /* 12 */
    466 
    467     AMD64G_CC_OP_SBBB,    /* 13 */
    468     AMD64G_CC_OP_SBBW,    /* 14 DEP1 = argL, DEP2 = argR ^ oldCarry, NDEP = oldCarry */
    469     AMD64G_CC_OP_SBBL,    /* 15 */
    470     AMD64G_CC_OP_SBBQ,    /* 16 */
    471 
    472     AMD64G_CC_OP_LOGICB,  /* 17 */
    473     AMD64G_CC_OP_LOGICW,  /* 18 DEP1 = result, DEP2 = 0, NDEP = unused */
    474     AMD64G_CC_OP_LOGICL,  /* 19 */
    475     AMD64G_CC_OP_LOGICQ,  /* 20 */
    476 
    477     AMD64G_CC_OP_INCB,    /* 21 */
    478     AMD64G_CC_OP_INCW,    /* 22 DEP1 = result, DEP2 = 0, NDEP = oldCarry (0 or 1) */
    479     AMD64G_CC_OP_INCL,    /* 23 */
    480     AMD64G_CC_OP_INCQ,    /* 24 */
    481 
    482     AMD64G_CC_OP_DECB,    /* 25 */
    483     AMD64G_CC_OP_DECW,    /* 26 DEP1 = result, DEP2 = 0, NDEP = oldCarry (0 or 1) */
    484     AMD64G_CC_OP_DECL,    /* 27 */
    485     AMD64G_CC_OP_DECQ,    /* 28 */
    486 
    487     AMD64G_CC_OP_SHLB,    /* 29 DEP1 = res, DEP2 = res', NDEP = unused */
    488     AMD64G_CC_OP_SHLW,    /* 30 where res' is like res but shifted one bit less */
    489     AMD64G_CC_OP_SHLL,    /* 31 */
    490     AMD64G_CC_OP_SHLQ,    /* 32 */
    491 
    492     AMD64G_CC_OP_SHRB,    /* 33 DEP1 = res, DEP2 = res', NDEP = unused */
    493     AMD64G_CC_OP_SHRW,    /* 34 where res' is like res but shifted one bit less */
    494     AMD64G_CC_OP_SHRL,    /* 35 */
    495     AMD64G_CC_OP_SHRQ,    /* 36 */
    496 
    497     AMD64G_CC_OP_ROLB,    /* 37 */
    498     AMD64G_CC_OP_ROLW,    /* 38 DEP1 = res, DEP2 = 0, NDEP = old flags */
    499     AMD64G_CC_OP_ROLL,    /* 39 */
    500     AMD64G_CC_OP_ROLQ,    /* 40 */
    501 
    502     AMD64G_CC_OP_RORB,    /* 41 */
    503     AMD64G_CC_OP_RORW,    /* 42 DEP1 = res, DEP2 = 0, NDEP = old flags */
    504     AMD64G_CC_OP_RORL,    /* 43 */
    505     AMD64G_CC_OP_RORQ,    /* 44 */
    506 
    507     AMD64G_CC_OP_UMULB,   /* 45 */
    508     AMD64G_CC_OP_UMULW,   /* 46 DEP1 = argL, DEP2 = argR, NDEP = unused */
    509     AMD64G_CC_OP_UMULL,   /* 47 */
    510     AMD64G_CC_OP_UMULQ,   /* 48 */
    511 
    512     AMD64G_CC_OP_SMULB,   /* 49 */
    513     AMD64G_CC_OP_SMULW,   /* 50 DEP1 = argL, DEP2 = argR, NDEP = unused */
    514     AMD64G_CC_OP_SMULL,   /* 51 */
    515     AMD64G_CC_OP_SMULQ,   /* 52 */
    516 
    517     AMD64G_CC_OP_ANDN32,  /* 53 */
    518     AMD64G_CC_OP_ANDN64,  /* 54 DEP1 = res, DEP2 = 0, NDEP = unused */
    519 
    520     AMD64G_CC_OP_BLSI32,  /* 55 */
    521     AMD64G_CC_OP_BLSI64,  /* 56 DEP1 = res, DEP2 = arg, NDEP = unused */
    522 
    523     AMD64G_CC_OP_BLSMSK32,/* 57 */
    524     AMD64G_CC_OP_BLSMSK64,/* 58 DEP1 = res, DEP2 = arg, NDEP = unused */
    525 
    526     AMD64G_CC_OP_BLSR32,  /* 59 */
    527     AMD64G_CC_OP_BLSR64,  /* 60 DEP1 = res, DEP2 = arg, NDEP = unused */
    528 
    529     AMD64G_CC_OP_NUMBER
    530 };
    531 
    532 typedef
    533    enum {
    534       AMD64CondO      = 0,  /* overflow           */
    535       AMD64CondNO     = 1,  /* no overflow        */
    536 
    537       AMD64CondB      = 2,  /* below              */
    538       AMD64CondNB     = 3,  /* not below          */
    539 
    540       AMD64CondZ      = 4,  /* zero               */
    541       AMD64CondNZ     = 5,  /* not zero           */
    542 
    543       AMD64CondBE     = 6,  /* below or equal     */
    544       AMD64CondNBE    = 7,  /* not below or equal */
    545 
    546       AMD64CondS      = 8,  /* negative           */
    547       AMD64CondNS     = 9,  /* not negative       */
    548 
    549       AMD64CondP      = 10, /* parity even        */
    550       AMD64CondNP     = 11, /* not parity even    */
    551 
    552       AMD64CondL      = 12, /* less               */
    553       AMD64CondNL     = 13, /* not less           */
    554 
    555       AMD64CondLE     = 14, /* less or equal      */
    556       AMD64CondNLE    = 15, /* not less or equal  */
    557 
    558       AMD64CondAlways = 16  /* HACK */
    559    }
    560    AMD64Condcode;
    561 
    562 #endif /* ndef __VEX_GUEST_AMD64_DEFS_H */
    563 
    564 /*---------------------------------------------------------------*/
    565 /*--- end                                  guest_amd64_defs.h ---*/
    566 /*---------------------------------------------------------------*/
    567