Home | History | Annotate | Download | only in x64
      1 // Copyright 2014 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #ifndef V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_
      6 #define V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_
      7 
      8 namespace v8 {
      9 namespace internal {
     10 namespace compiler {
     11 
     12 // X64-specific opcodes that specify which assembly sequence to emit.
     13 // Most opcodes specify a single instruction.
     14 #define TARGET_ARCH_OPCODE_LIST(V) \
     15   V(X64Add)                        \
     16   V(X64Add32)                      \
     17   V(X64And)                        \
     18   V(X64And32)                      \
     19   V(X64Cmp)                        \
     20   V(X64Cmp32)                      \
     21   V(X64Cmp16)                      \
     22   V(X64Cmp8)                       \
     23   V(X64Test)                       \
     24   V(X64Test32)                     \
     25   V(X64Test16)                     \
     26   V(X64Test8)                      \
     27   V(X64Or)                         \
     28   V(X64Or32)                       \
     29   V(X64Xor)                        \
     30   V(X64Xor32)                      \
     31   V(X64Sub)                        \
     32   V(X64Sub32)                      \
     33   V(X64Imul)                       \
     34   V(X64Imul32)                     \
     35   V(X64ImulHigh32)                 \
     36   V(X64UmulHigh32)                 \
     37   V(X64Idiv)                       \
     38   V(X64Idiv32)                     \
     39   V(X64Udiv)                       \
     40   V(X64Udiv32)                     \
     41   V(X64Not)                        \
     42   V(X64Not32)                      \
     43   V(X64Neg)                        \
     44   V(X64Neg32)                      \
     45   V(X64Shl)                        \
     46   V(X64Shl32)                      \
     47   V(X64Shr)                        \
     48   V(X64Shr32)                      \
     49   V(X64Sar)                        \
     50   V(X64Sar32)                      \
     51   V(X64Ror)                        \
     52   V(X64Ror32)                      \
     53   V(X64Lzcnt)                      \
     54   V(X64Lzcnt32)                    \
     55   V(X64Tzcnt)                      \
     56   V(X64Tzcnt32)                    \
     57   V(X64Popcnt)                     \
     58   V(X64Popcnt32)                   \
     59   V(SSEFloat32Cmp)                 \
     60   V(SSEFloat32Add)                 \
     61   V(SSEFloat32Sub)                 \
     62   V(SSEFloat32Mul)                 \
     63   V(SSEFloat32Div)                 \
     64   V(SSEFloat32Abs)                 \
     65   V(SSEFloat32Neg)                 \
     66   V(SSEFloat32Sqrt)                \
     67   V(SSEFloat32ToFloat64)           \
     68   V(SSEFloat32ToInt32)             \
     69   V(SSEFloat32ToUint32)            \
     70   V(SSEFloat32Round)               \
     71   V(SSEFloat64Cmp)                 \
     72   V(SSEFloat64Add)                 \
     73   V(SSEFloat64Sub)                 \
     74   V(SSEFloat64Mul)                 \
     75   V(SSEFloat64Div)                 \
     76   V(SSEFloat64Mod)                 \
     77   V(SSEFloat64Abs)                 \
     78   V(SSEFloat64Neg)                 \
     79   V(SSEFloat64Sqrt)                \
     80   V(SSEFloat64Round)               \
     81   V(SSEFloat32Max)                 \
     82   V(SSEFloat64Max)                 \
     83   V(SSEFloat32Min)                 \
     84   V(SSEFloat64Min)                 \
     85   V(SSEFloat64ToFloat32)           \
     86   V(SSEFloat64ToInt32)             \
     87   V(SSEFloat64ToUint32)            \
     88   V(SSEFloat32ToInt64)             \
     89   V(SSEFloat64ToInt64)             \
     90   V(SSEFloat32ToUint64)            \
     91   V(SSEFloat64ToUint64)            \
     92   V(SSEInt32ToFloat64)             \
     93   V(SSEInt32ToFloat32)             \
     94   V(SSEInt64ToFloat32)             \
     95   V(SSEInt64ToFloat64)             \
     96   V(SSEUint64ToFloat32)            \
     97   V(SSEUint64ToFloat64)            \
     98   V(SSEUint32ToFloat64)            \
     99   V(SSEUint32ToFloat32)            \
    100   V(SSEFloat64ExtractLowWord32)    \
    101   V(SSEFloat64ExtractHighWord32)   \
    102   V(SSEFloat64InsertLowWord32)     \
    103   V(SSEFloat64InsertHighWord32)    \
    104   V(SSEFloat64LoadLowWord32)       \
    105   V(SSEFloat64SilenceNaN)          \
    106   V(AVXFloat32Cmp)                 \
    107   V(AVXFloat32Add)                 \
    108   V(AVXFloat32Sub)                 \
    109   V(AVXFloat32Mul)                 \
    110   V(AVXFloat32Div)                 \
    111   V(AVXFloat64Cmp)                 \
    112   V(AVXFloat64Add)                 \
    113   V(AVXFloat64Sub)                 \
    114   V(AVXFloat64Mul)                 \
    115   V(AVXFloat64Div)                 \
    116   V(AVXFloat64Abs)                 \
    117   V(AVXFloat64Neg)                 \
    118   V(AVXFloat32Abs)                 \
    119   V(AVXFloat32Neg)                 \
    120   V(X64Movsxbl)                    \
    121   V(X64Movzxbl)                    \
    122   V(X64Movsxbq)                    \
    123   V(X64Movzxbq)                    \
    124   V(X64Movb)                       \
    125   V(X64Movsxwl)                    \
    126   V(X64Movzxwl)                    \
    127   V(X64Movsxwq)                    \
    128   V(X64Movzxwq)                    \
    129   V(X64Movw)                       \
    130   V(X64Movl)                       \
    131   V(X64Movsxlq)                    \
    132   V(X64Movq)                       \
    133   V(X64Movsd)                      \
    134   V(X64Movss)                      \
    135   V(X64BitcastFI)                  \
    136   V(X64BitcastDL)                  \
    137   V(X64BitcastIF)                  \
    138   V(X64BitcastLD)                  \
    139   V(X64Lea32)                      \
    140   V(X64Lea)                        \
    141   V(X64Dec32)                      \
    142   V(X64Inc32)                      \
    143   V(X64Push)                       \
    144   V(X64Poke)                       \
    145   V(X64StackCheck)                 \
    146   V(X64Xchgb)                      \
    147   V(X64Xchgw)                      \
    148   V(X64Xchgl)                      \
    149   V(X64Int32x4Create)              \
    150   V(X64Int32x4ExtractLane)         \
    151   V(X64Int32x4ReplaceLane)         \
    152   V(X64Int32x4Add)                 \
    153   V(X64Int32x4Sub)
    154 
    155 // Addressing modes represent the "shape" of inputs to an instruction.
    156 // Many instructions support multiple addressing modes. Addressing modes
    157 // are encoded into the InstructionCode of the instruction and tell the
    158 // code generator after register allocation which assembler method to call.
    159 //
    160 // We use the following local notation for addressing modes:
    161 //
    162 // M = memory operand
    163 // R = base register
    164 // N = index register * N for N in {1, 2, 4, 8}
    165 // I = immediate displacement (32-bit signed integer)
    166 
    167 #define TARGET_ADDRESSING_MODE_LIST(V) \
    168   V(MR)   /* [%r1            ] */      \
    169   V(MRI)  /* [%r1         + K] */      \
    170   V(MR1)  /* [%r1 + %r2*1    ] */      \
    171   V(MR2)  /* [%r1 + %r2*2    ] */      \
    172   V(MR4)  /* [%r1 + %r2*4    ] */      \
    173   V(MR8)  /* [%r1 + %r2*8    ] */      \
    174   V(MR1I) /* [%r1 + %r2*1 + K] */      \
    175   V(MR2I) /* [%r1 + %r2*2 + K] */      \
    176   V(MR4I) /* [%r1 + %r2*3 + K] */      \
    177   V(MR8I) /* [%r1 + %r2*4 + K] */      \
    178   V(M1)   /* [      %r2*1    ] */      \
    179   V(M2)   /* [      %r2*2    ] */      \
    180   V(M4)   /* [      %r2*4    ] */      \
    181   V(M8)   /* [      %r2*8    ] */      \
    182   V(M1I)  /* [      %r2*1 + K] */      \
    183   V(M2I)  /* [      %r2*2 + K] */      \
    184   V(M4I)  /* [      %r2*4 + K] */      \
    185   V(M8I)  /* [      %r2*8 + K] */      \
    186   V(Root) /* [%root       + K] */
    187 
    188 enum X64MemoryProtection { kUnprotected = 0, kProtected = 1 };
    189 
    190 }  // namespace compiler
    191 }  // namespace internal
    192 }  // namespace v8
    193 
    194 #endif  // V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_
    195