Home | History | Annotate | Download | only in compiler
      1 // Copyright 2015 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #ifndef V8_COMPILER_INSTRUCTION_SCHEDULER_H_
      6 #define V8_COMPILER_INSTRUCTION_SCHEDULER_H_
      7 
      8 #include "src/compiler/instruction.h"
      9 #include "src/zone/zone-containers.h"
     10 
     11 namespace v8 {
     12 namespace internal {
     13 namespace compiler {
     14 
     15 // A set of flags describing properties of the instructions so that the
     16 // scheduler is aware of dependencies between instructions.
     17 enum ArchOpcodeFlags {
     18   kNoOpcodeFlags = 0,
     19   kHasSideEffect = 1,    // The instruction has some side effects (memory
     20                          // store, function call...)
     21   kIsLoadOperation = 2,  // The instruction is a memory load.
     22   kMayNeedDeoptOrTrapCheck = 4,  // The instruction may be associated with a
     23                                  // deopt or trap check which must be run before
     24                                  // instruction e.g. div on Intel platform which
     25                                  // will raise an exception when the divisor is
     26                                  // zero.
     27 };
     28 
     29 class InstructionScheduler final : public ZoneObject {
     30  public:
     31   InstructionScheduler(Zone* zone, InstructionSequence* sequence);
     32 
     33   void StartBlock(RpoNumber rpo);
     34   void EndBlock(RpoNumber rpo);
     35 
     36   void AddInstruction(Instruction* instr);
     37   void AddTerminator(Instruction* instr);
     38 
     39   static bool SchedulerSupported();
     40 
     41  private:
     42   // A scheduling graph node.
     43   // Represent an instruction and their dependencies.
     44   class ScheduleGraphNode: public ZoneObject {
     45    public:
     46     ScheduleGraphNode(Zone* zone, Instruction* instr);
     47 
     48     // Mark the instruction represented by 'node' as a dependecy of this one.
     49     // The current instruction will be registered as an unscheduled predecessor
     50     // of 'node' (i.e. it must be scheduled before 'node').
     51     void AddSuccessor(ScheduleGraphNode* node);
     52 
     53     // Check if all the predecessors of this instruction have been scheduled.
     54     bool HasUnscheduledPredecessor() {
     55       return unscheduled_predecessors_count_ != 0;
     56     }
     57 
     58     // Record that we have scheduled one of the predecessors of this node.
     59     void DropUnscheduledPredecessor() {
     60       DCHECK_LT(0, unscheduled_predecessors_count_);
     61       unscheduled_predecessors_count_--;
     62     }
     63 
     64     Instruction* instruction() { return instr_; }
     65     ZoneDeque<ScheduleGraphNode*>& successors() { return successors_; }
     66     int latency() const { return latency_; }
     67 
     68     int total_latency() const { return total_latency_; }
     69     void set_total_latency(int latency) { total_latency_ = latency; }
     70 
     71     int start_cycle() const { return start_cycle_; }
     72     void set_start_cycle(int start_cycle) { start_cycle_ = start_cycle; }
     73 
     74    private:
     75     Instruction* instr_;
     76     ZoneDeque<ScheduleGraphNode*> successors_;
     77 
     78     // Number of unscheduled predecessors for this node.
     79     int unscheduled_predecessors_count_;
     80 
     81     // Estimate of the instruction latency (the number of cycles it takes for
     82     // instruction to complete).
     83     int latency_;
     84 
     85     // The sum of all the latencies on the path from this node to the end of
     86     // the graph (i.e. a node with no successor).
     87     int total_latency_;
     88 
     89     // The scheduler keeps a nominal cycle count to keep track of when the
     90     // result of an instruction is available. This field is updated by the
     91     // scheduler to indicate when the value of all the operands of this
     92     // instruction will be available.
     93     int start_cycle_;
     94   };
     95 
     96   // Keep track of all nodes ready to be scheduled (i.e. all their dependencies
     97   // have been scheduled. Note that this class is inteded to be extended by
     98   // concrete implementation of the scheduling queue which define the policy
     99   // to pop node from the queue.
    100   class SchedulingQueueBase {
    101    public:
    102     explicit SchedulingQueueBase(InstructionScheduler* scheduler)
    103       : scheduler_(scheduler),
    104         nodes_(scheduler->zone()) {
    105     }
    106 
    107     void AddNode(ScheduleGraphNode* node);
    108 
    109     bool IsEmpty() const {
    110       return nodes_.empty();
    111     }
    112 
    113    protected:
    114     InstructionScheduler* scheduler_;
    115     ZoneLinkedList<ScheduleGraphNode*> nodes_;
    116   };
    117 
    118   // A scheduling queue which prioritize nodes on the critical path (we look
    119   // for the instruction with the highest latency on the path to reach the end
    120   // of the graph).
    121   class CriticalPathFirstQueue : public SchedulingQueueBase  {
    122    public:
    123     explicit CriticalPathFirstQueue(InstructionScheduler* scheduler)
    124       : SchedulingQueueBase(scheduler) { }
    125 
    126     // Look for the best candidate to schedule, remove it from the queue and
    127     // return it.
    128     ScheduleGraphNode* PopBestCandidate(int cycle);
    129   };
    130 
    131   // A queue which pop a random node from the queue to perform stress tests on
    132   // the scheduler.
    133   class StressSchedulerQueue : public SchedulingQueueBase  {
    134    public:
    135     explicit StressSchedulerQueue(InstructionScheduler* scheduler)
    136       : SchedulingQueueBase(scheduler) { }
    137 
    138     ScheduleGraphNode* PopBestCandidate(int cycle);
    139 
    140    private:
    141     Isolate *isolate() {
    142       return scheduler_->isolate();
    143     }
    144   };
    145 
    146   // Perform scheduling for the current block specifying the queue type to
    147   // use to determine the next best candidate.
    148   template <typename QueueType>
    149   void ScheduleBlock();
    150 
    151   // Return the scheduling properties of the given instruction.
    152   int GetInstructionFlags(const Instruction* instr) const;
    153   int GetTargetInstructionFlags(const Instruction* instr) const;
    154 
    155   // Check whether the given instruction has side effects (e.g. function call,
    156   // memory store).
    157   bool HasSideEffect(const Instruction* instr) const {
    158     return (GetInstructionFlags(instr) & kHasSideEffect) != 0;
    159   }
    160 
    161   // Return true if the instruction is a memory load.
    162   bool IsLoadOperation(const Instruction* instr) const {
    163     return (GetInstructionFlags(instr) & kIsLoadOperation) != 0;
    164   }
    165 
    166   // The scheduler will not move the following instructions before the last
    167   // deopt/trap check:
    168   //  * loads (this is conservative)
    169   //  * instructions with side effect
    170   //  * other deopts/traps
    171   // Any other instruction can be moved, apart from those that raise exceptions
    172   // on specific inputs - these are filtered out by the deopt/trap check.
    173   bool MayNeedDeoptOrTrapCheck(const Instruction* instr) const {
    174     return (GetInstructionFlags(instr) & kMayNeedDeoptOrTrapCheck) != 0;
    175   }
    176 
    177   // Return true if the instruction cannot be moved before the last deopt or
    178   // trap point we encountered.
    179   bool DependsOnDeoptOrTrap(const Instruction* instr) const {
    180     return MayNeedDeoptOrTrapCheck(instr) || instr->IsDeoptimizeCall() ||
    181            instr->IsTrap() || HasSideEffect(instr) || IsLoadOperation(instr);
    182   }
    183 
    184   // Identify nops used as a definition point for live-in registers at
    185   // function entry.
    186   bool IsFixedRegisterParameter(const Instruction* instr) const {
    187     return (instr->arch_opcode() == kArchNop) && (instr->OutputCount() == 1) &&
    188            (instr->OutputAt(0)->IsUnallocated()) &&
    189            (UnallocatedOperand::cast(instr->OutputAt(0))
    190                 ->HasFixedRegisterPolicy() ||
    191             UnallocatedOperand::cast(instr->OutputAt(0))
    192                 ->HasFixedFPRegisterPolicy());
    193   }
    194 
    195   void ComputeTotalLatencies();
    196 
    197   static int GetInstructionLatency(const Instruction* instr);
    198 
    199   Zone* zone() { return zone_; }
    200   InstructionSequence* sequence() { return sequence_; }
    201   Isolate* isolate() { return sequence()->isolate(); }
    202 
    203   Zone* zone_;
    204   InstructionSequence* sequence_;
    205   ZoneVector<ScheduleGraphNode*> graph_;
    206 
    207   friend class InstructionSchedulerTester;
    208 
    209   // Last side effect instruction encountered while building the graph.
    210   ScheduleGraphNode* last_side_effect_instr_;
    211 
    212   // Set of load instructions encountered since the last side effect instruction
    213   // which will be added as predecessors of the next instruction with side
    214   // effects.
    215   ZoneVector<ScheduleGraphNode*> pending_loads_;
    216 
    217   // Live-in register markers are nop instructions which are emitted at the
    218   // beginning of a basic block so that the register allocator will find a
    219   // defining instruction for live-in values. They must not be moved.
    220   // All these nops are chained together and added as a predecessor of every
    221   // other instructions in the basic block.
    222   ScheduleGraphNode* last_live_in_reg_marker_;
    223 
    224   // Last deoptimization or trap instruction encountered while building the
    225   // graph.
    226   ScheduleGraphNode* last_deopt_or_trap_;
    227 
    228   // Keep track of definition points for virtual registers. This is used to
    229   // record operand dependencies in the scheduling graph.
    230   ZoneMap<int32_t, ScheduleGraphNode*> operands_map_;
    231 };
    232 
    233 }  // namespace compiler
    234 }  // namespace internal
    235 }  // namespace v8
    236 
    237 #endif  // V8_COMPILER_INSTRUCTION_SCHEDULER_H_
    238