1 //===-- SelectionDAGBuilder.h - Selection-DAG building --------*- C++ -*---===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This implements routines for translating from LLVM IR into SelectionDAG IR. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #ifndef SELECTIONDAGBUILDER_H 15 #define SELECTIONDAGBUILDER_H 16 17 #include "llvm/ADT/APInt.h" 18 #include "llvm/ADT/DenseMap.h" 19 #include "llvm/CodeGen/SelectionDAG.h" 20 #include "llvm/CodeGen/SelectionDAGNodes.h" 21 #include "llvm/IR/CallSite.h" 22 #include "llvm/IR/Constants.h" 23 #include "llvm/Support/ErrorHandling.h" 24 #include <vector> 25 26 namespace llvm { 27 28 class AddrSpaceCastInst; 29 class AliasAnalysis; 30 class AllocaInst; 31 class BasicBlock; 32 class BitCastInst; 33 class BranchInst; 34 class CallInst; 35 class DbgValueInst; 36 class ExtractElementInst; 37 class ExtractValueInst; 38 class FCmpInst; 39 class FPExtInst; 40 class FPToSIInst; 41 class FPToUIInst; 42 class FPTruncInst; 43 class Function; 44 class FunctionLoweringInfo; 45 class GetElementPtrInst; 46 class GCFunctionInfo; 47 class ICmpInst; 48 class IntToPtrInst; 49 class IndirectBrInst; 50 class InvokeInst; 51 class InsertElementInst; 52 class InsertValueInst; 53 class Instruction; 54 class LoadInst; 55 class MachineBasicBlock; 56 class MachineInstr; 57 class MachineRegisterInfo; 58 class MDNode; 59 class MVT; 60 class PHINode; 61 class PtrToIntInst; 62 class ReturnInst; 63 class SDDbgValue; 64 class SExtInst; 65 class SelectInst; 66 class ShuffleVectorInst; 67 class SIToFPInst; 68 class StoreInst; 69 class SwitchInst; 70 class DataLayout; 71 class TargetLibraryInfo; 72 class TargetLowering; 73 class TruncInst; 74 class UIToFPInst; 75 class UnreachableInst; 76 class VAArgInst; 77 class ZExtInst; 78 79 //===----------------------------------------------------------------------===// 80 /// SelectionDAGBuilder - This is the common target-independent lowering 81 /// implementation that is parameterized by a TargetLowering object. 82 /// 83 class SelectionDAGBuilder { 84 /// CurInst - The current instruction being visited 85 const Instruction *CurInst; 86 87 DenseMap<const Value*, SDValue> NodeMap; 88 89 /// UnusedArgNodeMap - Maps argument value for unused arguments. This is used 90 /// to preserve debug information for incoming arguments. 91 DenseMap<const Value*, SDValue> UnusedArgNodeMap; 92 93 /// DanglingDebugInfo - Helper type for DanglingDebugInfoMap. 94 class DanglingDebugInfo { 95 const DbgValueInst* DI; 96 DebugLoc dl; 97 unsigned SDNodeOrder; 98 public: 99 DanglingDebugInfo() : DI(nullptr), dl(DebugLoc()), SDNodeOrder(0) { } 100 DanglingDebugInfo(const DbgValueInst *di, DebugLoc DL, unsigned SDNO) : 101 DI(di), dl(DL), SDNodeOrder(SDNO) { } 102 const DbgValueInst* getDI() { return DI; } 103 DebugLoc getdl() { return dl; } 104 unsigned getSDNodeOrder() { return SDNodeOrder; } 105 }; 106 107 /// DanglingDebugInfoMap - Keeps track of dbg_values for which we have not 108 /// yet seen the referent. We defer handling these until we do see it. 109 DenseMap<const Value*, DanglingDebugInfo> DanglingDebugInfoMap; 110 111 public: 112 /// PendingLoads - Loads are not emitted to the program immediately. We bunch 113 /// them up and then emit token factor nodes when possible. This allows us to 114 /// get simple disambiguation between loads without worrying about alias 115 /// analysis. 116 SmallVector<SDValue, 8> PendingLoads; 117 private: 118 119 /// PendingExports - CopyToReg nodes that copy values to virtual registers 120 /// for export to other blocks need to be emitted before any terminator 121 /// instruction, but they have no other ordering requirements. We bunch them 122 /// up and the emit a single tokenfactor for them just before terminator 123 /// instructions. 124 SmallVector<SDValue, 8> PendingExports; 125 126 /// SDNodeOrder - A unique monotonically increasing number used to order the 127 /// SDNodes we create. 128 unsigned SDNodeOrder; 129 130 /// Case - A struct to record the Value for a switch case, and the 131 /// case's target basic block. 132 struct Case { 133 const Constant *Low; 134 const Constant *High; 135 MachineBasicBlock* BB; 136 uint32_t ExtraWeight; 137 138 Case() : Low(nullptr), High(nullptr), BB(nullptr), ExtraWeight(0) { } 139 Case(const Constant *low, const Constant *high, MachineBasicBlock *bb, 140 uint32_t extraweight) : Low(low), High(high), BB(bb), 141 ExtraWeight(extraweight) { } 142 143 APInt size() const { 144 const APInt &rHigh = cast<ConstantInt>(High)->getValue(); 145 const APInt &rLow = cast<ConstantInt>(Low)->getValue(); 146 return (rHigh - rLow + 1ULL); 147 } 148 }; 149 150 struct CaseBits { 151 uint64_t Mask; 152 MachineBasicBlock* BB; 153 unsigned Bits; 154 uint32_t ExtraWeight; 155 156 CaseBits(uint64_t mask, MachineBasicBlock* bb, unsigned bits, 157 uint32_t Weight): 158 Mask(mask), BB(bb), Bits(bits), ExtraWeight(Weight) { } 159 }; 160 161 typedef std::vector<Case> CaseVector; 162 typedef std::vector<CaseBits> CaseBitsVector; 163 typedef CaseVector::iterator CaseItr; 164 typedef std::pair<CaseItr, CaseItr> CaseRange; 165 166 /// CaseRec - A struct with ctor used in lowering switches to a binary tree 167 /// of conditional branches. 168 struct CaseRec { 169 CaseRec(MachineBasicBlock *bb, const Constant *lt, const Constant *ge, 170 CaseRange r) : 171 CaseBB(bb), LT(lt), GE(ge), Range(r) {} 172 173 /// CaseBB - The MBB in which to emit the compare and branch 174 MachineBasicBlock *CaseBB; 175 /// LT, GE - If nonzero, we know the current case value must be less-than or 176 /// greater-than-or-equal-to these Constants. 177 const Constant *LT; 178 const Constant *GE; 179 /// Range - A pair of iterators representing the range of case values to be 180 /// processed at this point in the binary search tree. 181 CaseRange Range; 182 }; 183 184 typedef std::vector<CaseRec> CaseRecVector; 185 186 /// The comparison function for sorting the switch case values in the vector. 187 /// WARNING: Case ranges should be disjoint! 188 struct CaseCmp { 189 bool operator()(const Case &C1, const Case &C2) { 190 assert(isa<ConstantInt>(C1.Low) && isa<ConstantInt>(C2.High)); 191 const ConstantInt* CI1 = cast<const ConstantInt>(C1.Low); 192 const ConstantInt* CI2 = cast<const ConstantInt>(C2.High); 193 return CI1->getValue().slt(CI2->getValue()); 194 } 195 }; 196 197 struct CaseBitsCmp { 198 bool operator()(const CaseBits &C1, const CaseBits &C2) { 199 return C1.Bits > C2.Bits; 200 } 201 }; 202 203 size_t Clusterify(CaseVector &Cases, const SwitchInst &SI); 204 205 /// CaseBlock - This structure is used to communicate between 206 /// SelectionDAGBuilder and SDISel for the code generation of additional basic 207 /// blocks needed by multi-case switch statements. 208 struct CaseBlock { 209 CaseBlock(ISD::CondCode cc, const Value *cmplhs, const Value *cmprhs, 210 const Value *cmpmiddle, 211 MachineBasicBlock *truebb, MachineBasicBlock *falsebb, 212 MachineBasicBlock *me, 213 uint32_t trueweight = 0, uint32_t falseweight = 0) 214 : CC(cc), CmpLHS(cmplhs), CmpMHS(cmpmiddle), CmpRHS(cmprhs), 215 TrueBB(truebb), FalseBB(falsebb), ThisBB(me), 216 TrueWeight(trueweight), FalseWeight(falseweight) { } 217 218 // CC - the condition code to use for the case block's setcc node 219 ISD::CondCode CC; 220 221 // CmpLHS/CmpRHS/CmpMHS - The LHS/MHS/RHS of the comparison to emit. 222 // Emit by default LHS op RHS. MHS is used for range comparisons: 223 // If MHS is not null: (LHS <= MHS) and (MHS <= RHS). 224 const Value *CmpLHS, *CmpMHS, *CmpRHS; 225 226 // TrueBB/FalseBB - the block to branch to if the setcc is true/false. 227 MachineBasicBlock *TrueBB, *FalseBB; 228 229 // ThisBB - the block into which to emit the code for the setcc and branches 230 MachineBasicBlock *ThisBB; 231 232 // TrueWeight/FalseWeight - branch weights. 233 uint32_t TrueWeight, FalseWeight; 234 }; 235 236 struct JumpTable { 237 JumpTable(unsigned R, unsigned J, MachineBasicBlock *M, 238 MachineBasicBlock *D): Reg(R), JTI(J), MBB(M), Default(D) {} 239 240 /// Reg - the virtual register containing the index of the jump table entry 241 //. to jump to. 242 unsigned Reg; 243 /// JTI - the JumpTableIndex for this jump table in the function. 244 unsigned JTI; 245 /// MBB - the MBB into which to emit the code for the indirect jump. 246 MachineBasicBlock *MBB; 247 /// Default - the MBB of the default bb, which is a successor of the range 248 /// check MBB. This is when updating PHI nodes in successors. 249 MachineBasicBlock *Default; 250 }; 251 struct JumpTableHeader { 252 JumpTableHeader(APInt F, APInt L, const Value *SV, MachineBasicBlock *H, 253 bool E = false): 254 First(F), Last(L), SValue(SV), HeaderBB(H), Emitted(E) {} 255 APInt First; 256 APInt Last; 257 const Value *SValue; 258 MachineBasicBlock *HeaderBB; 259 bool Emitted; 260 }; 261 typedef std::pair<JumpTableHeader, JumpTable> JumpTableBlock; 262 263 struct BitTestCase { 264 BitTestCase(uint64_t M, MachineBasicBlock* T, MachineBasicBlock* Tr, 265 uint32_t Weight): 266 Mask(M), ThisBB(T), TargetBB(Tr), ExtraWeight(Weight) { } 267 uint64_t Mask; 268 MachineBasicBlock *ThisBB; 269 MachineBasicBlock *TargetBB; 270 uint32_t ExtraWeight; 271 }; 272 273 typedef SmallVector<BitTestCase, 3> BitTestInfo; 274 275 struct BitTestBlock { 276 BitTestBlock(APInt F, APInt R, const Value* SV, 277 unsigned Rg, MVT RgVT, bool E, 278 MachineBasicBlock* P, MachineBasicBlock* D, 279 const BitTestInfo& C): 280 First(F), Range(R), SValue(SV), Reg(Rg), RegVT(RgVT), Emitted(E), 281 Parent(P), Default(D), Cases(C) { } 282 APInt First; 283 APInt Range; 284 const Value *SValue; 285 unsigned Reg; 286 MVT RegVT; 287 bool Emitted; 288 MachineBasicBlock *Parent; 289 MachineBasicBlock *Default; 290 BitTestInfo Cases; 291 }; 292 293 /// A class which encapsulates all of the information needed to generate a 294 /// stack protector check and signals to isel via its state being initialized 295 /// that a stack protector needs to be generated. 296 /// 297 /// *NOTE* The following is a high level documentation of SelectionDAG Stack 298 /// Protector Generation. The reason that it is placed here is for a lack of 299 /// other good places to stick it. 300 /// 301 /// High Level Overview of SelectionDAG Stack Protector Generation: 302 /// 303 /// Previously, generation of stack protectors was done exclusively in the 304 /// pre-SelectionDAG Codegen LLVM IR Pass "Stack Protector". This necessitated 305 /// splitting basic blocks at the IR level to create the success/failure basic 306 /// blocks in the tail of the basic block in question. As a result of this, 307 /// calls that would have qualified for the sibling call optimization were no 308 /// longer eligible for optimization since said calls were no longer right in 309 /// the "tail position" (i.e. the immediate predecessor of a ReturnInst 310 /// instruction). 311 /// 312 /// Then it was noticed that since the sibling call optimization causes the 313 /// callee to reuse the caller's stack, if we could delay the generation of 314 /// the stack protector check until later in CodeGen after the sibling call 315 /// decision was made, we get both the tail call optimization and the stack 316 /// protector check! 317 /// 318 /// A few goals in solving this problem were: 319 /// 320 /// 1. Preserve the architecture independence of stack protector generation. 321 /// 322 /// 2. Preserve the normal IR level stack protector check for platforms like 323 /// OpenBSD for which we support platform-specific stack protector 324 /// generation. 325 /// 326 /// The main problem that guided the present solution is that one can not 327 /// solve this problem in an architecture independent manner at the IR level 328 /// only. This is because: 329 /// 330 /// 1. The decision on whether or not to perform a sibling call on certain 331 /// platforms (for instance i386) requires lower level information 332 /// related to available registers that can not be known at the IR level. 333 /// 334 /// 2. Even if the previous point were not true, the decision on whether to 335 /// perform a tail call is done in LowerCallTo in SelectionDAG which 336 /// occurs after the Stack Protector Pass. As a result, one would need to 337 /// put the relevant callinst into the stack protector check success 338 /// basic block (where the return inst is placed) and then move it back 339 /// later at SelectionDAG/MI time before the stack protector check if the 340 /// tail call optimization failed. The MI level option was nixed 341 /// immediately since it would require platform-specific pattern 342 /// matching. The SelectionDAG level option was nixed because 343 /// SelectionDAG only processes one IR level basic block at a time 344 /// implying one could not create a DAG Combine to move the callinst. 345 /// 346 /// To get around this problem a few things were realized: 347 /// 348 /// 1. While one can not handle multiple IR level basic blocks at the 349 /// SelectionDAG Level, one can generate multiple machine basic blocks 350 /// for one IR level basic block. This is how we handle bit tests and 351 /// switches. 352 /// 353 /// 2. At the MI level, tail calls are represented via a special return 354 /// MIInst called "tcreturn". Thus if we know the basic block in which we 355 /// wish to insert the stack protector check, we get the correct behavior 356 /// by always inserting the stack protector check right before the return 357 /// statement. This is a "magical transformation" since no matter where 358 /// the stack protector check intrinsic is, we always insert the stack 359 /// protector check code at the end of the BB. 360 /// 361 /// Given the aforementioned constraints, the following solution was devised: 362 /// 363 /// 1. On platforms that do not support SelectionDAG stack protector check 364 /// generation, allow for the normal IR level stack protector check 365 /// generation to continue. 366 /// 367 /// 2. On platforms that do support SelectionDAG stack protector check 368 /// generation: 369 /// 370 /// a. Use the IR level stack protector pass to decide if a stack 371 /// protector is required/which BB we insert the stack protector check 372 /// in by reusing the logic already therein. If we wish to generate a 373 /// stack protector check in a basic block, we place a special IR 374 /// intrinsic called llvm.stackprotectorcheck right before the BB's 375 /// returninst or if there is a callinst that could potentially be 376 /// sibling call optimized, before the call inst. 377 /// 378 /// b. Then when a BB with said intrinsic is processed, we codegen the BB 379 /// normally via SelectBasicBlock. In said process, when we visit the 380 /// stack protector check, we do not actually emit anything into the 381 /// BB. Instead, we just initialize the stack protector descriptor 382 /// class (which involves stashing information/creating the success 383 /// mbbb and the failure mbb if we have not created one for this 384 /// function yet) and export the guard variable that we are going to 385 /// compare. 386 /// 387 /// c. After we finish selecting the basic block, in FinishBasicBlock if 388 /// the StackProtectorDescriptor attached to the SelectionDAGBuilder is 389 /// initialized, we first find a splice point in the parent basic block 390 /// before the terminator and then splice the terminator of said basic 391 /// block into the success basic block. Then we code-gen a new tail for 392 /// the parent basic block consisting of the two loads, the comparison, 393 /// and finally two branches to the success/failure basic blocks. We 394 /// conclude by code-gening the failure basic block if we have not 395 /// code-gened it already (all stack protector checks we generate in 396 /// the same function, use the same failure basic block). 397 class StackProtectorDescriptor { 398 public: 399 StackProtectorDescriptor() : ParentMBB(nullptr), SuccessMBB(nullptr), 400 FailureMBB(nullptr), Guard(nullptr) { } 401 ~StackProtectorDescriptor() { } 402 403 /// Returns true if all fields of the stack protector descriptor are 404 /// initialized implying that we should/are ready to emit a stack protector. 405 bool shouldEmitStackProtector() const { 406 return ParentMBB && SuccessMBB && FailureMBB && Guard; 407 } 408 409 /// Initialize the stack protector descriptor structure for a new basic 410 /// block. 411 void initialize(const BasicBlock *BB, 412 MachineBasicBlock *MBB, 413 const CallInst &StackProtCheckCall) { 414 // Make sure we are not initialized yet. 415 assert(!shouldEmitStackProtector() && "Stack Protector Descriptor is " 416 "already initialized!"); 417 ParentMBB = MBB; 418 SuccessMBB = AddSuccessorMBB(BB, MBB); 419 FailureMBB = AddSuccessorMBB(BB, MBB, FailureMBB); 420 if (!Guard) 421 Guard = StackProtCheckCall.getArgOperand(0); 422 } 423 424 /// Reset state that changes when we handle different basic blocks. 425 /// 426 /// This currently includes: 427 /// 428 /// 1. The specific basic block we are generating a 429 /// stack protector for (ParentMBB). 430 /// 431 /// 2. The successor machine basic block that will contain the tail of 432 /// parent mbb after we create the stack protector check (SuccessMBB). This 433 /// BB is visited only on stack protector check success. 434 void resetPerBBState() { 435 ParentMBB = nullptr; 436 SuccessMBB = nullptr; 437 } 438 439 /// Reset state that only changes when we switch functions. 440 /// 441 /// This currently includes: 442 /// 443 /// 1. FailureMBB since we reuse the failure code path for all stack 444 /// protector checks created in an individual function. 445 /// 446 /// 2.The guard variable since the guard variable we are checking against is 447 /// always the same. 448 void resetPerFunctionState() { 449 FailureMBB = nullptr; 450 Guard = nullptr; 451 } 452 453 MachineBasicBlock *getParentMBB() { return ParentMBB; } 454 MachineBasicBlock *getSuccessMBB() { return SuccessMBB; } 455 MachineBasicBlock *getFailureMBB() { return FailureMBB; } 456 const Value *getGuard() { return Guard; } 457 458 private: 459 /// The basic block for which we are generating the stack protector. 460 /// 461 /// As a result of stack protector generation, we will splice the 462 /// terminators of this basic block into the successor mbb SuccessMBB and 463 /// replace it with a compare/branch to the successor mbbs 464 /// SuccessMBB/FailureMBB depending on whether or not the stack protector 465 /// was violated. 466 MachineBasicBlock *ParentMBB; 467 468 /// A basic block visited on stack protector check success that contains the 469 /// terminators of ParentMBB. 470 MachineBasicBlock *SuccessMBB; 471 472 /// This basic block visited on stack protector check failure that will 473 /// contain a call to __stack_chk_fail(). 474 MachineBasicBlock *FailureMBB; 475 476 /// The guard variable which we will compare against the stored value in the 477 /// stack protector stack slot. 478 const Value *Guard; 479 480 /// Add a successor machine basic block to ParentMBB. If the successor mbb 481 /// has not been created yet (i.e. if SuccMBB = 0), then the machine basic 482 /// block will be created. 483 MachineBasicBlock *AddSuccessorMBB(const BasicBlock *BB, 484 MachineBasicBlock *ParentMBB, 485 MachineBasicBlock *SuccMBB = nullptr); 486 }; 487 488 private: 489 const TargetMachine &TM; 490 public: 491 /// Lowest valid SDNodeOrder. The special case 0 is reserved for scheduling 492 /// nodes without a corresponding SDNode. 493 static const unsigned LowestSDNodeOrder = 1; 494 495 SelectionDAG &DAG; 496 const DataLayout *DL; 497 AliasAnalysis *AA; 498 const TargetLibraryInfo *LibInfo; 499 500 /// SwitchCases - Vector of CaseBlock structures used to communicate 501 /// SwitchInst code generation information. 502 std::vector<CaseBlock> SwitchCases; 503 /// JTCases - Vector of JumpTable structures used to communicate 504 /// SwitchInst code generation information. 505 std::vector<JumpTableBlock> JTCases; 506 /// BitTestCases - Vector of BitTestBlock structures used to communicate 507 /// SwitchInst code generation information. 508 std::vector<BitTestBlock> BitTestCases; 509 /// A StackProtectorDescriptor structure used to communicate stack protector 510 /// information in between SelectBasicBlock and FinishBasicBlock. 511 StackProtectorDescriptor SPDescriptor; 512 513 // Emit PHI-node-operand constants only once even if used by multiple 514 // PHI nodes. 515 DenseMap<const Constant *, unsigned> ConstantsOut; 516 517 /// FuncInfo - Information about the function as a whole. 518 /// 519 FunctionLoweringInfo &FuncInfo; 520 521 /// OptLevel - What optimization level we're generating code for. 522 /// 523 CodeGenOpt::Level OptLevel; 524 525 /// GFI - Garbage collection metadata for the function. 526 GCFunctionInfo *GFI; 527 528 /// LPadToCallSiteMap - Map a landing pad to the call site indexes. 529 DenseMap<MachineBasicBlock*, SmallVector<unsigned, 4> > LPadToCallSiteMap; 530 531 /// HasTailCall - This is set to true if a call in the current 532 /// block has been translated as a tail call. In this case, 533 /// no subsequent DAG nodes should be created. 534 /// 535 bool HasTailCall; 536 537 LLVMContext *Context; 538 539 SelectionDAGBuilder(SelectionDAG &dag, FunctionLoweringInfo &funcinfo, 540 CodeGenOpt::Level ol) 541 : CurInst(nullptr), SDNodeOrder(LowestSDNodeOrder), TM(dag.getTarget()), 542 DAG(dag), FuncInfo(funcinfo), OptLevel(ol), 543 HasTailCall(false) { 544 } 545 546 void init(GCFunctionInfo *gfi, AliasAnalysis &aa, 547 const TargetLibraryInfo *li); 548 549 /// clear - Clear out the current SelectionDAG and the associated 550 /// state and prepare this SelectionDAGBuilder object to be used 551 /// for a new block. This doesn't clear out information about 552 /// additional blocks that are needed to complete switch lowering 553 /// or PHI node updating; that information is cleared out as it is 554 /// consumed. 555 void clear(); 556 557 /// clearDanglingDebugInfo - Clear the dangling debug information 558 /// map. This function is separated from the clear so that debug 559 /// information that is dangling in a basic block can be properly 560 /// resolved in a different basic block. This allows the 561 /// SelectionDAG to resolve dangling debug information attached 562 /// to PHI nodes. 563 void clearDanglingDebugInfo(); 564 565 /// getRoot - Return the current virtual root of the Selection DAG, 566 /// flushing any PendingLoad items. This must be done before emitting 567 /// a store or any other node that may need to be ordered after any 568 /// prior load instructions. 569 /// 570 SDValue getRoot(); 571 572 /// getControlRoot - Similar to getRoot, but instead of flushing all the 573 /// PendingLoad items, flush all the PendingExports items. It is necessary 574 /// to do this before emitting a terminator instruction. 575 /// 576 SDValue getControlRoot(); 577 578 SDLoc getCurSDLoc() const { 579 return SDLoc(CurInst, SDNodeOrder); 580 } 581 582 DebugLoc getCurDebugLoc() const { 583 return CurInst ? CurInst->getDebugLoc() : DebugLoc(); 584 } 585 586 unsigned getSDNodeOrder() const { return SDNodeOrder; } 587 588 void CopyValueToVirtualRegister(const Value *V, unsigned Reg); 589 590 void visit(const Instruction &I); 591 592 void visit(unsigned Opcode, const User &I); 593 594 // resolveDanglingDebugInfo - if we saw an earlier dbg_value referring to V, 595 // generate the debug data structures now that we've seen its definition. 596 void resolveDanglingDebugInfo(const Value *V, SDValue Val); 597 SDValue getValue(const Value *V); 598 SDValue getNonRegisterValue(const Value *V); 599 SDValue getValueImpl(const Value *V); 600 601 void setValue(const Value *V, SDValue NewN) { 602 SDValue &N = NodeMap[V]; 603 assert(!N.getNode() && "Already set a value for this node!"); 604 N = NewN; 605 } 606 607 void setUnusedArgValue(const Value *V, SDValue NewN) { 608 SDValue &N = UnusedArgNodeMap[V]; 609 assert(!N.getNode() && "Already set a value for this node!"); 610 N = NewN; 611 } 612 613 void FindMergedConditions(const Value *Cond, MachineBasicBlock *TBB, 614 MachineBasicBlock *FBB, MachineBasicBlock *CurBB, 615 MachineBasicBlock *SwitchBB, unsigned Opc, 616 uint32_t TW, uint32_t FW); 617 void EmitBranchForMergedCondition(const Value *Cond, MachineBasicBlock *TBB, 618 MachineBasicBlock *FBB, 619 MachineBasicBlock *CurBB, 620 MachineBasicBlock *SwitchBB, 621 uint32_t TW, uint32_t FW); 622 bool ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases); 623 bool isExportableFromCurrentBlock(const Value *V, const BasicBlock *FromBB); 624 void CopyToExportRegsIfNeeded(const Value *V); 625 void ExportFromCurrentBlock(const Value *V); 626 void LowerCallTo(ImmutableCallSite CS, SDValue Callee, bool IsTailCall, 627 MachineBasicBlock *LandingPad = nullptr); 628 629 std::pair<SDValue, SDValue> LowerCallOperands(const CallInst &CI, 630 unsigned ArgIdx, 631 unsigned NumArgs, 632 SDValue Callee, 633 bool useVoidTy = false); 634 635 /// UpdateSplitBlock - When an MBB was split during scheduling, update the 636 /// references that need to refer to the last resulting block. 637 void UpdateSplitBlock(MachineBasicBlock *First, MachineBasicBlock *Last); 638 639 private: 640 // Terminator instructions. 641 void visitRet(const ReturnInst &I); 642 void visitBr(const BranchInst &I); 643 void visitSwitch(const SwitchInst &I); 644 void visitIndirectBr(const IndirectBrInst &I); 645 void visitUnreachable(const UnreachableInst &I); 646 647 // Helpers for visitSwitch 648 bool handleSmallSwitchRange(CaseRec& CR, 649 CaseRecVector& WorkList, 650 const Value* SV, 651 MachineBasicBlock* Default, 652 MachineBasicBlock *SwitchBB); 653 bool handleJTSwitchCase(CaseRec& CR, 654 CaseRecVector& WorkList, 655 const Value* SV, 656 MachineBasicBlock* Default, 657 MachineBasicBlock *SwitchBB); 658 bool handleBTSplitSwitchCase(CaseRec& CR, 659 CaseRecVector& WorkList, 660 const Value* SV, 661 MachineBasicBlock* Default, 662 MachineBasicBlock *SwitchBB); 663 bool handleBitTestsSwitchCase(CaseRec& CR, 664 CaseRecVector& WorkList, 665 const Value* SV, 666 MachineBasicBlock* Default, 667 MachineBasicBlock *SwitchBB); 668 669 uint32_t getEdgeWeight(const MachineBasicBlock *Src, 670 const MachineBasicBlock *Dst) const; 671 void addSuccessorWithWeight(MachineBasicBlock *Src, MachineBasicBlock *Dst, 672 uint32_t Weight = 0); 673 public: 674 void visitSwitchCase(CaseBlock &CB, 675 MachineBasicBlock *SwitchBB); 676 void visitSPDescriptorParent(StackProtectorDescriptor &SPD, 677 MachineBasicBlock *ParentBB); 678 void visitSPDescriptorFailure(StackProtectorDescriptor &SPD); 679 void visitBitTestHeader(BitTestBlock &B, MachineBasicBlock *SwitchBB); 680 void visitBitTestCase(BitTestBlock &BB, 681 MachineBasicBlock* NextMBB, 682 uint32_t BranchWeightToNext, 683 unsigned Reg, 684 BitTestCase &B, 685 MachineBasicBlock *SwitchBB); 686 void visitJumpTable(JumpTable &JT); 687 void visitJumpTableHeader(JumpTable &JT, JumpTableHeader &JTH, 688 MachineBasicBlock *SwitchBB); 689 690 private: 691 // These all get lowered before this pass. 692 void visitInvoke(const InvokeInst &I); 693 void visitResume(const ResumeInst &I); 694 695 void visitBinary(const User &I, unsigned OpCode); 696 void visitShift(const User &I, unsigned Opcode); 697 void visitAdd(const User &I) { visitBinary(I, ISD::ADD); } 698 void visitFAdd(const User &I) { visitBinary(I, ISD::FADD); } 699 void visitSub(const User &I) { visitBinary(I, ISD::SUB); } 700 void visitFSub(const User &I); 701 void visitMul(const User &I) { visitBinary(I, ISD::MUL); } 702 void visitFMul(const User &I) { visitBinary(I, ISD::FMUL); } 703 void visitURem(const User &I) { visitBinary(I, ISD::UREM); } 704 void visitSRem(const User &I) { visitBinary(I, ISD::SREM); } 705 void visitFRem(const User &I) { visitBinary(I, ISD::FREM); } 706 void visitUDiv(const User &I) { visitBinary(I, ISD::UDIV); } 707 void visitSDiv(const User &I); 708 void visitFDiv(const User &I) { visitBinary(I, ISD::FDIV); } 709 void visitAnd (const User &I) { visitBinary(I, ISD::AND); } 710 void visitOr (const User &I) { visitBinary(I, ISD::OR); } 711 void visitXor (const User &I) { visitBinary(I, ISD::XOR); } 712 void visitShl (const User &I) { visitShift(I, ISD::SHL); } 713 void visitLShr(const User &I) { visitShift(I, ISD::SRL); } 714 void visitAShr(const User &I) { visitShift(I, ISD::SRA); } 715 void visitICmp(const User &I); 716 void visitFCmp(const User &I); 717 // Visit the conversion instructions 718 void visitTrunc(const User &I); 719 void visitZExt(const User &I); 720 void visitSExt(const User &I); 721 void visitFPTrunc(const User &I); 722 void visitFPExt(const User &I); 723 void visitFPToUI(const User &I); 724 void visitFPToSI(const User &I); 725 void visitUIToFP(const User &I); 726 void visitSIToFP(const User &I); 727 void visitPtrToInt(const User &I); 728 void visitIntToPtr(const User &I); 729 void visitBitCast(const User &I); 730 void visitAddrSpaceCast(const User &I); 731 732 void visitExtractElement(const User &I); 733 void visitInsertElement(const User &I); 734 void visitShuffleVector(const User &I); 735 736 void visitExtractValue(const ExtractValueInst &I); 737 void visitInsertValue(const InsertValueInst &I); 738 void visitLandingPad(const LandingPadInst &I); 739 740 void visitGetElementPtr(const User &I); 741 void visitSelect(const User &I); 742 743 void visitAlloca(const AllocaInst &I); 744 void visitLoad(const LoadInst &I); 745 void visitStore(const StoreInst &I); 746 void visitAtomicCmpXchg(const AtomicCmpXchgInst &I); 747 void visitAtomicRMW(const AtomicRMWInst &I); 748 void visitFence(const FenceInst &I); 749 void visitPHI(const PHINode &I); 750 void visitCall(const CallInst &I); 751 bool visitMemCmpCall(const CallInst &I); 752 bool visitMemChrCall(const CallInst &I); 753 bool visitStrCpyCall(const CallInst &I, bool isStpcpy); 754 bool visitStrCmpCall(const CallInst &I); 755 bool visitStrLenCall(const CallInst &I); 756 bool visitStrNLenCall(const CallInst &I); 757 bool visitUnaryFloatCall(const CallInst &I, unsigned Opcode); 758 void visitAtomicLoad(const LoadInst &I); 759 void visitAtomicStore(const StoreInst &I); 760 761 void visitInlineAsm(ImmutableCallSite CS); 762 const char *visitIntrinsicCall(const CallInst &I, unsigned Intrinsic); 763 void visitTargetIntrinsic(const CallInst &I, unsigned Intrinsic); 764 765 void visitVAStart(const CallInst &I); 766 void visitVAArg(const VAArgInst &I); 767 void visitVAEnd(const CallInst &I); 768 void visitVACopy(const CallInst &I); 769 void visitStackmap(const CallInst &I); 770 void visitPatchpoint(const CallInst &I); 771 772 void visitUserOp1(const Instruction &I) { 773 llvm_unreachable("UserOp1 should not exist at instruction selection time!"); 774 } 775 void visitUserOp2(const Instruction &I) { 776 llvm_unreachable("UserOp2 should not exist at instruction selection time!"); 777 } 778 779 void processIntegerCallValue(const Instruction &I, 780 SDValue Value, bool IsSigned); 781 782 void HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB); 783 784 /// EmitFuncArgumentDbgValue - If V is an function argument then create 785 /// corresponding DBG_VALUE machine instruction for it now. At the end of 786 /// instruction selection, they will be inserted to the entry BB. 787 bool EmitFuncArgumentDbgValue(const Value *V, MDNode *Variable, 788 int64_t Offset, bool IsIndirect, 789 const SDValue &N); 790 }; 791 792 } // end namespace llvm 793 794 #endif 795