Home | History | Annotate | Download | only in x64
      1 // Copyright 2015 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "src/compiler/instruction-scheduler.h"
      6 
      7 namespace v8 {
      8 namespace internal {
      9 namespace compiler {
     10 
     11 bool InstructionScheduler::SchedulerSupported() { return true; }
     12 
     13 
     14 int InstructionScheduler::GetTargetInstructionFlags(
     15     const Instruction* instr) const {
     16   switch (instr->arch_opcode()) {
     17     case kX64Add:
     18     case kX64Add32:
     19     case kX64And:
     20     case kX64And32:
     21     case kX64Cmp:
     22     case kX64Cmp32:
     23     case kX64Test:
     24     case kX64Test32:
     25     case kX64Or:
     26     case kX64Or32:
     27     case kX64Xor:
     28     case kX64Xor32:
     29     case kX64Sub:
     30     case kX64Sub32:
     31     case kX64Imul:
     32     case kX64Imul32:
     33     case kX64ImulHigh32:
     34     case kX64UmulHigh32:
     35     case kX64Idiv:
     36     case kX64Idiv32:
     37     case kX64Udiv:
     38     case kX64Udiv32:
     39     case kX64Not:
     40     case kX64Not32:
     41     case kX64Neg:
     42     case kX64Neg32:
     43     case kX64Shl:
     44     case kX64Shl32:
     45     case kX64Shr:
     46     case kX64Shr32:
     47     case kX64Sar:
     48     case kX64Sar32:
     49     case kX64Ror:
     50     case kX64Ror32:
     51     case kX64Lzcnt:
     52     case kX64Lzcnt32:
     53     case kX64Tzcnt:
     54     case kX64Tzcnt32:
     55     case kX64Popcnt:
     56     case kX64Popcnt32:
     57     case kSSEFloat32Cmp:
     58     case kSSEFloat32Add:
     59     case kSSEFloat32Sub:
     60     case kSSEFloat32Mul:
     61     case kSSEFloat32Div:
     62     case kSSEFloat32Abs:
     63     case kSSEFloat32Neg:
     64     case kSSEFloat32Sqrt:
     65     case kSSEFloat32Round:
     66     case kSSEFloat32Max:
     67     case kSSEFloat32Min:
     68     case kSSEFloat32ToFloat64:
     69     case kSSEFloat64Cmp:
     70     case kSSEFloat64Add:
     71     case kSSEFloat64Sub:
     72     case kSSEFloat64Mul:
     73     case kSSEFloat64Div:
     74     case kSSEFloat64Mod:
     75     case kSSEFloat64Abs:
     76     case kSSEFloat64Neg:
     77     case kSSEFloat64Sqrt:
     78     case kSSEFloat64Round:
     79     case kSSEFloat64Max:
     80     case kSSEFloat64Min:
     81     case kSSEFloat64ToFloat32:
     82     case kSSEFloat64ToInt32:
     83     case kSSEFloat64ToUint32:
     84     case kSSEFloat64ToInt64:
     85     case kSSEFloat32ToInt64:
     86     case kSSEFloat64ToUint64:
     87     case kSSEFloat32ToUint64:
     88     case kSSEInt32ToFloat64:
     89     case kSSEInt64ToFloat32:
     90     case kSSEInt64ToFloat64:
     91     case kSSEUint64ToFloat32:
     92     case kSSEUint64ToFloat64:
     93     case kSSEUint32ToFloat64:
     94     case kSSEFloat64ExtractLowWord32:
     95     case kSSEFloat64ExtractHighWord32:
     96     case kSSEFloat64InsertLowWord32:
     97     case kSSEFloat64InsertHighWord32:
     98     case kSSEFloat64LoadLowWord32:
     99     case kAVXFloat32Cmp:
    100     case kAVXFloat32Add:
    101     case kAVXFloat32Sub:
    102     case kAVXFloat32Mul:
    103     case kAVXFloat32Div:
    104     case kAVXFloat32Max:
    105     case kAVXFloat32Min:
    106     case kAVXFloat64Cmp:
    107     case kAVXFloat64Add:
    108     case kAVXFloat64Sub:
    109     case kAVXFloat64Mul:
    110     case kAVXFloat64Div:
    111     case kAVXFloat64Max:
    112     case kAVXFloat64Min:
    113     case kAVXFloat64Abs:
    114     case kAVXFloat64Neg:
    115     case kAVXFloat32Abs:
    116     case kAVXFloat32Neg:
    117     case kX64BitcastFI:
    118     case kX64BitcastDL:
    119     case kX64BitcastIF:
    120     case kX64BitcastLD:
    121     case kX64Lea32:
    122     case kX64Lea:
    123     case kX64Dec32:
    124     case kX64Inc32:
    125       return (instr->addressing_mode() == kMode_None)
    126           ? kNoOpcodeFlags
    127           : kIsLoadOperation | kHasSideEffect;
    128 
    129     case kX64Movsxbl:
    130     case kX64Movzxbl:
    131     case kX64Movsxwl:
    132     case kX64Movzxwl:
    133     case kX64Movsxlq:
    134       DCHECK(instr->InputCount() >= 1);
    135       return instr->InputAt(0)->IsRegister() ? kNoOpcodeFlags
    136                                              : kIsLoadOperation;
    137 
    138     case kX64Movb:
    139     case kX64Movw:
    140       return kHasSideEffect;
    141 
    142     case kX64Movl:
    143       if (instr->HasOutput()) {
    144         DCHECK(instr->InputCount() >= 1);
    145         return instr->InputAt(0)->IsRegister() ? kNoOpcodeFlags
    146                                                : kIsLoadOperation;
    147       } else {
    148         return kHasSideEffect;
    149       }
    150 
    151     case kX64Movq:
    152     case kX64Movsd:
    153     case kX64Movss:
    154       return instr->HasOutput() ? kIsLoadOperation : kHasSideEffect;
    155 
    156     case kX64StackCheck:
    157       return kIsLoadOperation;
    158 
    159     case kX64Push:
    160     case kX64Poke:
    161       return kHasSideEffect;
    162 
    163 #define CASE(Name) case k##Name:
    164     COMMON_ARCH_OPCODE_LIST(CASE)
    165 #undef CASE
    166       // Already covered in architecture independent code.
    167       UNREACHABLE();
    168   }
    169 
    170   UNREACHABLE();
    171   return kNoOpcodeFlags;
    172 }
    173 
    174 
    175 int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
    176   // TODO(all): Add instruction cost modeling.
    177   return 1;
    178 }
    179 
    180 }  // namespace compiler
    181 }  // namespace internal
    182 }  // namespace v8
    183