Home | History | Annotate | Download | only in x64
      1 // Copyright 2014 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #ifndef V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_
      6 #define V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_
      7 
      8 namespace v8 {
      9 namespace internal {
     10 namespace compiler {
     11 
     12 // X64-specific opcodes that specify which assembly sequence to emit.
     13 // Most opcodes specify a single instruction.
     14 #define TARGET_ARCH_OPCODE_LIST(V) \
     15   V(X64Add)                        \
     16   V(X64Add32)                      \
     17   V(X64And)                        \
     18   V(X64And32)                      \
     19   V(X64Cmp)                        \
     20   V(X64Cmp32)                      \
     21   V(X64Test)                       \
     22   V(X64Test32)                     \
     23   V(X64Or)                         \
     24   V(X64Or32)                       \
     25   V(X64Xor)                        \
     26   V(X64Xor32)                      \
     27   V(X64Sub)                        \
     28   V(X64Sub32)                      \
     29   V(X64Imul)                       \
     30   V(X64Imul32)                     \
     31   V(X64Idiv)                       \
     32   V(X64Idiv32)                     \
     33   V(X64Udiv)                       \
     34   V(X64Udiv32)                     \
     35   V(X64Not)                        \
     36   V(X64Not32)                      \
     37   V(X64Neg)                        \
     38   V(X64Neg32)                      \
     39   V(X64Shl)                        \
     40   V(X64Shl32)                      \
     41   V(X64Shr)                        \
     42   V(X64Shr32)                      \
     43   V(X64Sar)                        \
     44   V(X64Sar32)                      \
     45   V(X64Ror)                        \
     46   V(X64Ror32)                      \
     47   V(SSEFloat64Cmp)                 \
     48   V(SSEFloat64Add)                 \
     49   V(SSEFloat64Sub)                 \
     50   V(SSEFloat64Mul)                 \
     51   V(SSEFloat64Div)                 \
     52   V(SSEFloat64Mod)                 \
     53   V(SSEFloat64Sqrt)                \
     54   V(SSEFloat64ToInt32)             \
     55   V(SSEFloat64ToUint32)            \
     56   V(SSEInt32ToFloat64)             \
     57   V(SSEUint32ToFloat64)            \
     58   V(X64Movsxbl)                    \
     59   V(X64Movzxbl)                    \
     60   V(X64Movb)                       \
     61   V(X64Movsxwl)                    \
     62   V(X64Movzxwl)                    \
     63   V(X64Movw)                       \
     64   V(X64Movl)                       \
     65   V(X64Movsxlq)                    \
     66   V(X64Movq)                       \
     67   V(X64Movsd)                      \
     68   V(X64Movss)                      \
     69   V(X64Push)                       \
     70   V(X64StoreWriteBarrier)
     71 
     72 
     73 // Addressing modes represent the "shape" of inputs to an instruction.
     74 // Many instructions support multiple addressing modes. Addressing modes
     75 // are encoded into the InstructionCode of the instruction and tell the
     76 // code generator after register allocation which assembler method to call.
     77 //
     78 // We use the following local notation for addressing modes:
     79 //
     80 // R = register
     81 // O = register or stack slot
     82 // D = double register
     83 // I = immediate (handle, external, int32)
     84 // MR = [register]
     85 // MI = [immediate]
     86 // MRN = [register + register * N in {1, 2, 4, 8}]
     87 // MRI = [register + immediate]
     88 // MRNI = [register + register * N in {1, 2, 4, 8} + immediate]
     89 #define TARGET_ADDRESSING_MODE_LIST(V) \
     90   V(MR)   /* [%r1] */                  \
     91   V(MRI)  /* [%r1 + K] */              \
     92   V(MR1I) /* [%r1 + %r2 + K] */        \
     93   V(MR2I) /* [%r1 + %r2*2 + K] */      \
     94   V(MR4I) /* [%r1 + %r2*4 + K] */      \
     95   V(MR8I) /* [%r1 + %r2*8 + K] */
     96 
     97 }  // namespace compiler
     98 }  // namespace internal
     99 }  // namespace v8
    100 
    101 #endif  // V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_
    102