1 // Copyright 2014 the V8 project authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #ifndef V8_COMPILER_INSTRUCTION_SELECTOR_H_ 6 #define V8_COMPILER_INSTRUCTION_SELECTOR_H_ 7 8 #include <map> 9 10 #include "src/compiler/common-operator.h" 11 #include "src/compiler/instruction.h" 12 #include "src/compiler/instruction-scheduler.h" 13 #include "src/compiler/machine-operator.h" 14 #include "src/compiler/node.h" 15 #include "src/zone-containers.h" 16 17 namespace v8 { 18 namespace internal { 19 namespace compiler { 20 21 // Forward declarations. 22 class BasicBlock; 23 struct CallBuffer; // TODO(bmeurer): Remove this. 24 class FlagsContinuation; 25 class Linkage; 26 class OperandGenerator; 27 struct SwitchInfo; 28 29 // This struct connects nodes of parameters which are going to be pushed on the 30 // call stack with their parameter index in the call descriptor of the callee. 31 class PushParameter { 32 public: 33 PushParameter() : node_(nullptr), type_(MachineType::None()) {} 34 PushParameter(Node* node, MachineType type) : node_(node), type_(type) {} 35 36 Node* node() const { return node_; } 37 MachineType type() const { return type_; } 38 39 private: 40 Node* node_; 41 MachineType type_; 42 }; 43 44 // Instruction selection generates an InstructionSequence for a given Schedule. 45 class InstructionSelector final { 46 public: 47 // Forward declarations. 48 class Features; 49 50 enum SourcePositionMode { kCallSourcePositions, kAllSourcePositions }; 51 52 InstructionSelector( 53 Zone* zone, size_t node_count, Linkage* linkage, 54 InstructionSequence* sequence, Schedule* schedule, 55 SourcePositionTable* source_positions, Frame* frame, 56 SourcePositionMode source_position_mode = kCallSourcePositions, 57 Features features = SupportedFeatures()); 58 59 // Visit code for the entire graph with the included schedule. 60 void SelectInstructions(); 61 62 void StartBlock(RpoNumber rpo); 63 void EndBlock(RpoNumber rpo); 64 void AddInstruction(Instruction* instr); 65 66 // =========================================================================== 67 // ============= Architecture-independent code emission methods. ============= 68 // =========================================================================== 69 70 Instruction* Emit(InstructionCode opcode, InstructionOperand output, 71 size_t temp_count = 0, InstructionOperand* temps = nullptr); 72 Instruction* Emit(InstructionCode opcode, InstructionOperand output, 73 InstructionOperand a, size_t temp_count = 0, 74 InstructionOperand* temps = nullptr); 75 Instruction* Emit(InstructionCode opcode, InstructionOperand output, 76 InstructionOperand a, InstructionOperand b, 77 size_t temp_count = 0, InstructionOperand* temps = nullptr); 78 Instruction* Emit(InstructionCode opcode, InstructionOperand output, 79 InstructionOperand a, InstructionOperand b, 80 InstructionOperand c, size_t temp_count = 0, 81 InstructionOperand* temps = nullptr); 82 Instruction* Emit(InstructionCode opcode, InstructionOperand output, 83 InstructionOperand a, InstructionOperand b, 84 InstructionOperand c, InstructionOperand d, 85 size_t temp_count = 0, InstructionOperand* temps = nullptr); 86 Instruction* Emit(InstructionCode opcode, InstructionOperand output, 87 InstructionOperand a, InstructionOperand b, 88 InstructionOperand c, InstructionOperand d, 89 InstructionOperand e, size_t temp_count = 0, 90 InstructionOperand* temps = nullptr); 91 Instruction* Emit(InstructionCode opcode, InstructionOperand output, 92 InstructionOperand a, InstructionOperand b, 93 InstructionOperand c, InstructionOperand d, 94 InstructionOperand e, InstructionOperand f, 95 size_t temp_count = 0, InstructionOperand* temps = nullptr); 96 Instruction* Emit(InstructionCode opcode, size_t output_count, 97 InstructionOperand* outputs, size_t input_count, 98 InstructionOperand* inputs, size_t temp_count = 0, 99 InstructionOperand* temps = nullptr); 100 Instruction* Emit(Instruction* instr); 101 102 // =========================================================================== 103 // ===== Architecture-independent deoptimization exit emission methods. ====== 104 // =========================================================================== 105 106 Instruction* EmitDeoptimize(InstructionCode opcode, InstructionOperand output, 107 InstructionOperand a, InstructionOperand b, 108 Node* frame_state); 109 Instruction* EmitDeoptimize(InstructionCode opcode, size_t output_count, 110 InstructionOperand* outputs, size_t input_count, 111 InstructionOperand* inputs, Node* frame_state); 112 113 // =========================================================================== 114 // ============== Architecture-independent CPU feature methods. ============== 115 // =========================================================================== 116 117 class Features final { 118 public: 119 Features() : bits_(0) {} 120 explicit Features(unsigned bits) : bits_(bits) {} 121 explicit Features(CpuFeature f) : bits_(1u << f) {} 122 Features(CpuFeature f1, CpuFeature f2) : bits_((1u << f1) | (1u << f2)) {} 123 124 bool Contains(CpuFeature f) const { return (bits_ & (1u << f)); } 125 126 private: 127 unsigned bits_; 128 }; 129 130 bool IsSupported(CpuFeature feature) const { 131 return features_.Contains(feature); 132 } 133 134 // Returns the features supported on the target platform. 135 static Features SupportedFeatures() { 136 return Features(CpuFeatures::SupportedFeatures()); 137 } 138 139 // TODO(sigurds) This should take a CpuFeatures argument. 140 static MachineOperatorBuilder::Flags SupportedMachineOperatorFlags(); 141 142 static MachineOperatorBuilder::AlignmentRequirements AlignmentRequirements(); 143 144 // =========================================================================== 145 // ============ Architecture-independent graph covering methods. ============= 146 // =========================================================================== 147 148 // Used in pattern matching during code generation. 149 // Check if {node} can be covered while generating code for the current 150 // instruction. A node can be covered if the {user} of the node has the only 151 // edge and the two are in the same basic block. 152 bool CanCover(Node* user, Node* node) const; 153 154 // Checks if {node} was already defined, and therefore code was already 155 // generated for it. 156 bool IsDefined(Node* node) const; 157 158 // Checks if {node} has any uses, and therefore code has to be generated for 159 // it. 160 bool IsUsed(Node* node) const; 161 162 // Checks if {node} is currently live. 163 bool IsLive(Node* node) const { return !IsDefined(node) && IsUsed(node); } 164 165 // Gets the effect level of {node}. 166 int GetEffectLevel(Node* node) const; 167 168 int GetVirtualRegister(const Node* node); 169 const std::map<NodeId, int> GetVirtualRegistersForTesting() const; 170 171 Isolate* isolate() const { return sequence()->isolate(); } 172 173 private: 174 friend class OperandGenerator; 175 176 void EmitTableSwitch(const SwitchInfo& sw, InstructionOperand& index_operand); 177 void EmitLookupSwitch(const SwitchInfo& sw, 178 InstructionOperand& value_operand); 179 180 // Inform the instruction selection that {node} was just defined. 181 void MarkAsDefined(Node* node); 182 183 // Inform the instruction selection that {node} has at least one use and we 184 // will need to generate code for it. 185 void MarkAsUsed(Node* node); 186 187 // Sets the effect level of {node}. 188 void SetEffectLevel(Node* node, int effect_level); 189 190 // Inform the register allocation of the representation of the value produced 191 // by {node}. 192 void MarkAsRepresentation(MachineRepresentation rep, Node* node); 193 void MarkAsWord32(Node* node) { 194 MarkAsRepresentation(MachineRepresentation::kWord32, node); 195 } 196 void MarkAsWord64(Node* node) { 197 MarkAsRepresentation(MachineRepresentation::kWord64, node); 198 } 199 void MarkAsFloat32(Node* node) { 200 MarkAsRepresentation(MachineRepresentation::kFloat32, node); 201 } 202 void MarkAsFloat64(Node* node) { 203 MarkAsRepresentation(MachineRepresentation::kFloat64, node); 204 } 205 void MarkAsReference(Node* node) { 206 MarkAsRepresentation(MachineRepresentation::kTagged, node); 207 } 208 209 // Inform the register allocation of the representation of the unallocated 210 // operand {op}. 211 void MarkAsRepresentation(MachineRepresentation rep, 212 const InstructionOperand& op); 213 214 enum CallBufferFlag { 215 kCallCodeImmediate = 1u << 0, 216 kCallAddressImmediate = 1u << 1, 217 kCallTail = 1u << 2 218 }; 219 typedef base::Flags<CallBufferFlag> CallBufferFlags; 220 221 // Initialize the call buffer with the InstructionOperands, nodes, etc, 222 // corresponding 223 // to the inputs and outputs of the call. 224 // {call_code_immediate} to generate immediate operands to calls of code. 225 // {call_address_immediate} to generate immediate operands to address calls. 226 void InitializeCallBuffer(Node* call, CallBuffer* buffer, 227 CallBufferFlags flags, int stack_param_delta = 0); 228 bool IsTailCallAddressImmediate(); 229 int GetTempsCountForTailCallFromJSFunction(); 230 231 FrameStateDescriptor* GetFrameStateDescriptor(Node* node); 232 233 // =========================================================================== 234 // ============= Architecture-specific graph covering methods. =============== 235 // =========================================================================== 236 237 // Visit nodes in the given block and generate code. 238 void VisitBlock(BasicBlock* block); 239 240 // Visit the node for the control flow at the end of the block, generating 241 // code if necessary. 242 void VisitControl(BasicBlock* block); 243 244 // Visit the node and generate code, if any. 245 void VisitNode(Node* node); 246 247 // Visit the node and generate code for IEEE 754 functions. 248 void VisitFloat64Ieee754Binop(Node*, InstructionCode code); 249 void VisitFloat64Ieee754Unop(Node*, InstructionCode code); 250 251 #define DECLARE_GENERATOR(x) void Visit##x(Node* node); 252 MACHINE_OP_LIST(DECLARE_GENERATOR) 253 #undef DECLARE_GENERATOR 254 255 void VisitFinishRegion(Node* node); 256 void VisitParameter(Node* node); 257 void VisitIfException(Node* node); 258 void VisitOsrValue(Node* node); 259 void VisitPhi(Node* node); 260 void VisitProjection(Node* node); 261 void VisitConstant(Node* node); 262 void VisitCall(Node* call, BasicBlock* handler = nullptr); 263 void VisitDeoptimizeIf(Node* node); 264 void VisitDeoptimizeUnless(Node* node); 265 void VisitTailCall(Node* call); 266 void VisitGoto(BasicBlock* target); 267 void VisitBranch(Node* input, BasicBlock* tbranch, BasicBlock* fbranch); 268 void VisitSwitch(Node* node, const SwitchInfo& sw); 269 void VisitDeoptimize(DeoptimizeKind kind, Node* value); 270 void VisitReturn(Node* ret); 271 void VisitThrow(Node* value); 272 273 void EmitPrepareArguments(ZoneVector<compiler::PushParameter>* arguments, 274 const CallDescriptor* descriptor, Node* node); 275 276 void EmitIdentity(Node* node); 277 bool CanProduceSignalingNaN(Node* node); 278 279 // =========================================================================== 280 281 Schedule* schedule() const { return schedule_; } 282 Linkage* linkage() const { return linkage_; } 283 InstructionSequence* sequence() const { return sequence_; } 284 Zone* instruction_zone() const { return sequence()->zone(); } 285 Zone* zone() const { return zone_; } 286 287 // =========================================================================== 288 289 Zone* const zone_; 290 Linkage* const linkage_; 291 InstructionSequence* const sequence_; 292 SourcePositionTable* const source_positions_; 293 SourcePositionMode const source_position_mode_; 294 Features features_; 295 Schedule* const schedule_; 296 BasicBlock* current_block_; 297 ZoneVector<Instruction*> instructions_; 298 BoolVector defined_; 299 BoolVector used_; 300 IntVector effect_level_; 301 IntVector virtual_registers_; 302 InstructionScheduler* scheduler_; 303 Frame* frame_; 304 }; 305 306 } // namespace compiler 307 } // namespace internal 308 } // namespace v8 309 310 #endif // V8_COMPILER_INSTRUCTION_SELECTOR_H_ 311