1 //===-- SelectionDAGBuilder.h - Selection-DAG building --------*- C++ -*---===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This implements routines for translating from LLVM IR into SelectionDAG IR. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #ifndef LLVM_LIB_CODEGEN_SELECTIONDAG_SELECTIONDAGBUILDER_H 15 #define LLVM_LIB_CODEGEN_SELECTIONDAG_SELECTIONDAGBUILDER_H 16 17 #include "StatepointLowering.h" 18 #include "llvm/ADT/APInt.h" 19 #include "llvm/ADT/DenseMap.h" 20 #include "llvm/Analysis/AliasAnalysis.h" 21 #include "llvm/CodeGen/SelectionDAG.h" 22 #include "llvm/CodeGen/SelectionDAGNodes.h" 23 #include "llvm/IR/CallSite.h" 24 #include "llvm/IR/Constants.h" 25 #include "llvm/IR/Statepoint.h" 26 #include "llvm/Support/ErrorHandling.h" 27 #include "llvm/Target/TargetLowering.h" 28 #include <utility> 29 #include <vector> 30 31 namespace llvm { 32 33 class AddrSpaceCastInst; 34 class AllocaInst; 35 class BasicBlock; 36 class BitCastInst; 37 class BranchInst; 38 class CallInst; 39 class DbgValueInst; 40 class ExtractElementInst; 41 class ExtractValueInst; 42 class FCmpInst; 43 class FPExtInst; 44 class FPToSIInst; 45 class FPToUIInst; 46 class FPTruncInst; 47 class Function; 48 class FunctionLoweringInfo; 49 class GetElementPtrInst; 50 class GCFunctionInfo; 51 class ICmpInst; 52 class IntToPtrInst; 53 class IndirectBrInst; 54 class InvokeInst; 55 class InsertElementInst; 56 class InsertValueInst; 57 class Instruction; 58 class LoadInst; 59 class MachineBasicBlock; 60 class MachineInstr; 61 class MachineRegisterInfo; 62 class MDNode; 63 class MVT; 64 class PHINode; 65 class PtrToIntInst; 66 class ReturnInst; 67 class SDDbgValue; 68 class SExtInst; 69 class SelectInst; 70 class ShuffleVectorInst; 71 class SIToFPInst; 72 class StoreInst; 73 class SwitchInst; 74 class DataLayout; 75 class TargetLibraryInfo; 76 class TargetLowering; 77 class TruncInst; 78 class UIToFPInst; 79 class UnreachableInst; 80 class VAArgInst; 81 class ZExtInst; 82 83 //===----------------------------------------------------------------------===// 84 /// SelectionDAGBuilder - This is the common target-independent lowering 85 /// implementation that is parameterized by a TargetLowering object. 86 /// 87 class SelectionDAGBuilder { 88 /// CurInst - The current instruction being visited 89 const Instruction *CurInst; 90 91 DenseMap<const Value*, SDValue> NodeMap; 92 93 /// UnusedArgNodeMap - Maps argument value for unused arguments. This is used 94 /// to preserve debug information for incoming arguments. 95 DenseMap<const Value*, SDValue> UnusedArgNodeMap; 96 97 /// DanglingDebugInfo - Helper type for DanglingDebugInfoMap. 98 class DanglingDebugInfo { 99 const DbgValueInst* DI; 100 DebugLoc dl; 101 unsigned SDNodeOrder; 102 public: 103 DanglingDebugInfo() : DI(nullptr), dl(DebugLoc()), SDNodeOrder(0) { } 104 DanglingDebugInfo(const DbgValueInst *di, DebugLoc DL, unsigned SDNO) 105 : DI(di), dl(std::move(DL)), SDNodeOrder(SDNO) {} 106 const DbgValueInst* getDI() { return DI; } 107 DebugLoc getdl() { return dl; } 108 unsigned getSDNodeOrder() { return SDNodeOrder; } 109 }; 110 111 /// DanglingDebugInfoMap - Keeps track of dbg_values for which we have not 112 /// yet seen the referent. We defer handling these until we do see it. 113 DenseMap<const Value*, DanglingDebugInfo> DanglingDebugInfoMap; 114 115 public: 116 /// PendingLoads - Loads are not emitted to the program immediately. We bunch 117 /// them up and then emit token factor nodes when possible. This allows us to 118 /// get simple disambiguation between loads without worrying about alias 119 /// analysis. 120 SmallVector<SDValue, 8> PendingLoads; 121 122 /// State used while lowering a statepoint sequence (gc_statepoint, 123 /// gc_relocate, and gc_result). See StatepointLowering.hpp/cpp for details. 124 StatepointLoweringState StatepointLowering; 125 private: 126 127 /// PendingExports - CopyToReg nodes that copy values to virtual registers 128 /// for export to other blocks need to be emitted before any terminator 129 /// instruction, but they have no other ordering requirements. We bunch them 130 /// up and the emit a single tokenfactor for them just before terminator 131 /// instructions. 132 SmallVector<SDValue, 8> PendingExports; 133 134 /// SDNodeOrder - A unique monotonically increasing number used to order the 135 /// SDNodes we create. 136 unsigned SDNodeOrder; 137 138 enum CaseClusterKind { 139 /// A cluster of adjacent case labels with the same destination, or just one 140 /// case. 141 CC_Range, 142 /// A cluster of cases suitable for jump table lowering. 143 CC_JumpTable, 144 /// A cluster of cases suitable for bit test lowering. 145 CC_BitTests 146 }; 147 148 /// A cluster of case labels. 149 struct CaseCluster { 150 CaseClusterKind Kind; 151 const ConstantInt *Low, *High; 152 union { 153 MachineBasicBlock *MBB; 154 unsigned JTCasesIndex; 155 unsigned BTCasesIndex; 156 }; 157 BranchProbability Prob; 158 159 static CaseCluster range(const ConstantInt *Low, const ConstantInt *High, 160 MachineBasicBlock *MBB, BranchProbability Prob) { 161 CaseCluster C; 162 C.Kind = CC_Range; 163 C.Low = Low; 164 C.High = High; 165 C.MBB = MBB; 166 C.Prob = Prob; 167 return C; 168 } 169 170 static CaseCluster jumpTable(const ConstantInt *Low, 171 const ConstantInt *High, unsigned JTCasesIndex, 172 BranchProbability Prob) { 173 CaseCluster C; 174 C.Kind = CC_JumpTable; 175 C.Low = Low; 176 C.High = High; 177 C.JTCasesIndex = JTCasesIndex; 178 C.Prob = Prob; 179 return C; 180 } 181 182 static CaseCluster bitTests(const ConstantInt *Low, const ConstantInt *High, 183 unsigned BTCasesIndex, BranchProbability Prob) { 184 CaseCluster C; 185 C.Kind = CC_BitTests; 186 C.Low = Low; 187 C.High = High; 188 C.BTCasesIndex = BTCasesIndex; 189 C.Prob = Prob; 190 return C; 191 } 192 }; 193 194 typedef std::vector<CaseCluster> CaseClusterVector; 195 typedef CaseClusterVector::iterator CaseClusterIt; 196 197 struct CaseBits { 198 uint64_t Mask; 199 MachineBasicBlock* BB; 200 unsigned Bits; 201 BranchProbability ExtraProb; 202 203 CaseBits(uint64_t mask, MachineBasicBlock* bb, unsigned bits, 204 BranchProbability Prob): 205 Mask(mask), BB(bb), Bits(bits), ExtraProb(Prob) { } 206 207 CaseBits() : Mask(0), BB(nullptr), Bits(0) {} 208 }; 209 210 typedef std::vector<CaseBits> CaseBitsVector; 211 212 /// Sort Clusters and merge adjacent cases. 213 void sortAndRangeify(CaseClusterVector &Clusters); 214 215 /// CaseBlock - This structure is used to communicate between 216 /// SelectionDAGBuilder and SDISel for the code generation of additional basic 217 /// blocks needed by multi-case switch statements. 218 struct CaseBlock { 219 CaseBlock(ISD::CondCode cc, const Value *cmplhs, const Value *cmprhs, 220 const Value *cmpmiddle, MachineBasicBlock *truebb, 221 MachineBasicBlock *falsebb, MachineBasicBlock *me, 222 BranchProbability trueprob = BranchProbability::getUnknown(), 223 BranchProbability falseprob = BranchProbability::getUnknown()) 224 : CC(cc), CmpLHS(cmplhs), CmpMHS(cmpmiddle), CmpRHS(cmprhs), 225 TrueBB(truebb), FalseBB(falsebb), ThisBB(me), TrueProb(trueprob), 226 FalseProb(falseprob) {} 227 228 // CC - the condition code to use for the case block's setcc node 229 ISD::CondCode CC; 230 231 // CmpLHS/CmpRHS/CmpMHS - The LHS/MHS/RHS of the comparison to emit. 232 // Emit by default LHS op RHS. MHS is used for range comparisons: 233 // If MHS is not null: (LHS <= MHS) and (MHS <= RHS). 234 const Value *CmpLHS, *CmpMHS, *CmpRHS; 235 236 // TrueBB/FalseBB - the block to branch to if the setcc is true/false. 237 MachineBasicBlock *TrueBB, *FalseBB; 238 239 // ThisBB - the block into which to emit the code for the setcc and branches 240 MachineBasicBlock *ThisBB; 241 242 // TrueProb/FalseProb - branch weights. 243 BranchProbability TrueProb, FalseProb; 244 }; 245 246 struct JumpTable { 247 JumpTable(unsigned R, unsigned J, MachineBasicBlock *M, 248 MachineBasicBlock *D): Reg(R), JTI(J), MBB(M), Default(D) {} 249 250 /// Reg - the virtual register containing the index of the jump table entry 251 //. to jump to. 252 unsigned Reg; 253 /// JTI - the JumpTableIndex for this jump table in the function. 254 unsigned JTI; 255 /// MBB - the MBB into which to emit the code for the indirect jump. 256 MachineBasicBlock *MBB; 257 /// Default - the MBB of the default bb, which is a successor of the range 258 /// check MBB. This is when updating PHI nodes in successors. 259 MachineBasicBlock *Default; 260 }; 261 struct JumpTableHeader { 262 JumpTableHeader(APInt F, APInt L, const Value *SV, MachineBasicBlock *H, 263 bool E = false) 264 : First(std::move(F)), Last(std::move(L)), SValue(SV), HeaderBB(H), 265 Emitted(E) {} 266 APInt First; 267 APInt Last; 268 const Value *SValue; 269 MachineBasicBlock *HeaderBB; 270 bool Emitted; 271 }; 272 typedef std::pair<JumpTableHeader, JumpTable> JumpTableBlock; 273 274 struct BitTestCase { 275 BitTestCase(uint64_t M, MachineBasicBlock* T, MachineBasicBlock* Tr, 276 BranchProbability Prob): 277 Mask(M), ThisBB(T), TargetBB(Tr), ExtraProb(Prob) { } 278 uint64_t Mask; 279 MachineBasicBlock *ThisBB; 280 MachineBasicBlock *TargetBB; 281 BranchProbability ExtraProb; 282 }; 283 284 typedef SmallVector<BitTestCase, 3> BitTestInfo; 285 286 struct BitTestBlock { 287 BitTestBlock(APInt F, APInt R, const Value *SV, unsigned Rg, MVT RgVT, 288 bool E, bool CR, MachineBasicBlock *P, MachineBasicBlock *D, 289 BitTestInfo C, BranchProbability Pr) 290 : First(std::move(F)), Range(std::move(R)), SValue(SV), Reg(Rg), 291 RegVT(RgVT), Emitted(E), ContiguousRange(CR), Parent(P), Default(D), 292 Cases(std::move(C)), Prob(Pr) {} 293 APInt First; 294 APInt Range; 295 const Value *SValue; 296 unsigned Reg; 297 MVT RegVT; 298 bool Emitted; 299 bool ContiguousRange; 300 MachineBasicBlock *Parent; 301 MachineBasicBlock *Default; 302 BitTestInfo Cases; 303 BranchProbability Prob; 304 BranchProbability DefaultProb; 305 }; 306 307 /// Check whether a range of clusters is dense enough for a jump table. 308 bool isDense(const CaseClusterVector &Clusters, unsigned *TotalCases, 309 unsigned First, unsigned Last, unsigned MinDensity); 310 311 /// Build a jump table cluster from Clusters[First..Last]. Returns false if it 312 /// decides it's not a good idea. 313 bool buildJumpTable(CaseClusterVector &Clusters, unsigned First, 314 unsigned Last, const SwitchInst *SI, 315 MachineBasicBlock *DefaultMBB, CaseCluster &JTCluster); 316 317 /// Find clusters of cases suitable for jump table lowering. 318 void findJumpTables(CaseClusterVector &Clusters, const SwitchInst *SI, 319 MachineBasicBlock *DefaultMBB); 320 321 /// Check whether the range [Low,High] fits in a machine word. 322 bool rangeFitsInWord(const APInt &Low, const APInt &High); 323 324 /// Check whether these clusters are suitable for lowering with bit tests based 325 /// on the number of destinations, comparison metric, and range. 326 bool isSuitableForBitTests(unsigned NumDests, unsigned NumCmps, 327 const APInt &Low, const APInt &High); 328 329 /// Build a bit test cluster from Clusters[First..Last]. Returns false if it 330 /// decides it's not a good idea. 331 bool buildBitTests(CaseClusterVector &Clusters, unsigned First, unsigned Last, 332 const SwitchInst *SI, CaseCluster &BTCluster); 333 334 /// Find clusters of cases suitable for bit test lowering. 335 void findBitTestClusters(CaseClusterVector &Clusters, const SwitchInst *SI); 336 337 struct SwitchWorkListItem { 338 MachineBasicBlock *MBB; 339 CaseClusterIt FirstCluster; 340 CaseClusterIt LastCluster; 341 const ConstantInt *GE; 342 const ConstantInt *LT; 343 BranchProbability DefaultProb; 344 }; 345 typedef SmallVector<SwitchWorkListItem, 4> SwitchWorkList; 346 347 /// Determine the rank by weight of CC in [First,Last]. If CC has more weight 348 /// than each cluster in the range, its rank is 0. 349 static unsigned caseClusterRank(const CaseCluster &CC, CaseClusterIt First, 350 CaseClusterIt Last); 351 352 /// Emit comparison and split W into two subtrees. 353 void splitWorkItem(SwitchWorkList &WorkList, const SwitchWorkListItem &W, 354 Value *Cond, MachineBasicBlock *SwitchMBB); 355 356 /// Lower W. 357 void lowerWorkItem(SwitchWorkListItem W, Value *Cond, 358 MachineBasicBlock *SwitchMBB, 359 MachineBasicBlock *DefaultMBB); 360 361 362 /// A class which encapsulates all of the information needed to generate a 363 /// stack protector check and signals to isel via its state being initialized 364 /// that a stack protector needs to be generated. 365 /// 366 /// *NOTE* The following is a high level documentation of SelectionDAG Stack 367 /// Protector Generation. The reason that it is placed here is for a lack of 368 /// other good places to stick it. 369 /// 370 /// High Level Overview of SelectionDAG Stack Protector Generation: 371 /// 372 /// Previously, generation of stack protectors was done exclusively in the 373 /// pre-SelectionDAG Codegen LLVM IR Pass "Stack Protector". This necessitated 374 /// splitting basic blocks at the IR level to create the success/failure basic 375 /// blocks in the tail of the basic block in question. As a result of this, 376 /// calls that would have qualified for the sibling call optimization were no 377 /// longer eligible for optimization since said calls were no longer right in 378 /// the "tail position" (i.e. the immediate predecessor of a ReturnInst 379 /// instruction). 380 /// 381 /// Then it was noticed that since the sibling call optimization causes the 382 /// callee to reuse the caller's stack, if we could delay the generation of 383 /// the stack protector check until later in CodeGen after the sibling call 384 /// decision was made, we get both the tail call optimization and the stack 385 /// protector check! 386 /// 387 /// A few goals in solving this problem were: 388 /// 389 /// 1. Preserve the architecture independence of stack protector generation. 390 /// 391 /// 2. Preserve the normal IR level stack protector check for platforms like 392 /// OpenBSD for which we support platform-specific stack protector 393 /// generation. 394 /// 395 /// The main problem that guided the present solution is that one can not 396 /// solve this problem in an architecture independent manner at the IR level 397 /// only. This is because: 398 /// 399 /// 1. The decision on whether or not to perform a sibling call on certain 400 /// platforms (for instance i386) requires lower level information 401 /// related to available registers that can not be known at the IR level. 402 /// 403 /// 2. Even if the previous point were not true, the decision on whether to 404 /// perform a tail call is done in LowerCallTo in SelectionDAG which 405 /// occurs after the Stack Protector Pass. As a result, one would need to 406 /// put the relevant callinst into the stack protector check success 407 /// basic block (where the return inst is placed) and then move it back 408 /// later at SelectionDAG/MI time before the stack protector check if the 409 /// tail call optimization failed. The MI level option was nixed 410 /// immediately since it would require platform-specific pattern 411 /// matching. The SelectionDAG level option was nixed because 412 /// SelectionDAG only processes one IR level basic block at a time 413 /// implying one could not create a DAG Combine to move the callinst. 414 /// 415 /// To get around this problem a few things were realized: 416 /// 417 /// 1. While one can not handle multiple IR level basic blocks at the 418 /// SelectionDAG Level, one can generate multiple machine basic blocks 419 /// for one IR level basic block. This is how we handle bit tests and 420 /// switches. 421 /// 422 /// 2. At the MI level, tail calls are represented via a special return 423 /// MIInst called "tcreturn". Thus if we know the basic block in which we 424 /// wish to insert the stack protector check, we get the correct behavior 425 /// by always inserting the stack protector check right before the return 426 /// statement. This is a "magical transformation" since no matter where 427 /// the stack protector check intrinsic is, we always insert the stack 428 /// protector check code at the end of the BB. 429 /// 430 /// Given the aforementioned constraints, the following solution was devised: 431 /// 432 /// 1. On platforms that do not support SelectionDAG stack protector check 433 /// generation, allow for the normal IR level stack protector check 434 /// generation to continue. 435 /// 436 /// 2. On platforms that do support SelectionDAG stack protector check 437 /// generation: 438 /// 439 /// a. Use the IR level stack protector pass to decide if a stack 440 /// protector is required/which BB we insert the stack protector check 441 /// in by reusing the logic already therein. If we wish to generate a 442 /// stack protector check in a basic block, we place a special IR 443 /// intrinsic called llvm.stackprotectorcheck right before the BB's 444 /// returninst or if there is a callinst that could potentially be 445 /// sibling call optimized, before the call inst. 446 /// 447 /// b. Then when a BB with said intrinsic is processed, we codegen the BB 448 /// normally via SelectBasicBlock. In said process, when we visit the 449 /// stack protector check, we do not actually emit anything into the 450 /// BB. Instead, we just initialize the stack protector descriptor 451 /// class (which involves stashing information/creating the success 452 /// mbbb and the failure mbb if we have not created one for this 453 /// function yet) and export the guard variable that we are going to 454 /// compare. 455 /// 456 /// c. After we finish selecting the basic block, in FinishBasicBlock if 457 /// the StackProtectorDescriptor attached to the SelectionDAGBuilder is 458 /// initialized, we produce the validation code with one of these 459 /// techniques: 460 /// 1) with a call to a guard check function 461 /// 2) with inlined instrumentation 462 /// 463 /// 1) We insert a call to the check function before the terminator. 464 /// 465 /// 2) We first find a splice point in the parent basic block 466 /// before the terminator and then splice the terminator of said basic 467 /// block into the success basic block. Then we code-gen a new tail for 468 /// the parent basic block consisting of the two loads, the comparison, 469 /// and finally two branches to the success/failure basic blocks. We 470 /// conclude by code-gening the failure basic block if we have not 471 /// code-gened it already (all stack protector checks we generate in 472 /// the same function, use the same failure basic block). 473 class StackProtectorDescriptor { 474 public: 475 StackProtectorDescriptor() 476 : ParentMBB(nullptr), SuccessMBB(nullptr), FailureMBB(nullptr) {} 477 478 /// Returns true if all fields of the stack protector descriptor are 479 /// initialized implying that we should/are ready to emit a stack protector. 480 bool shouldEmitStackProtector() const { 481 return ParentMBB && SuccessMBB && FailureMBB; 482 } 483 484 bool shouldEmitFunctionBasedCheckStackProtector() const { 485 return ParentMBB && !SuccessMBB && !FailureMBB; 486 } 487 488 /// Initialize the stack protector descriptor structure for a new basic 489 /// block. 490 void initialize(const BasicBlock *BB, MachineBasicBlock *MBB, 491 bool FunctionBasedInstrumentation) { 492 // Make sure we are not initialized yet. 493 assert(!shouldEmitStackProtector() && "Stack Protector Descriptor is " 494 "already initialized!"); 495 ParentMBB = MBB; 496 if (!FunctionBasedInstrumentation) { 497 SuccessMBB = AddSuccessorMBB(BB, MBB, /* IsLikely */ true); 498 FailureMBB = AddSuccessorMBB(BB, MBB, /* IsLikely */ false, FailureMBB); 499 } 500 } 501 502 /// Reset state that changes when we handle different basic blocks. 503 /// 504 /// This currently includes: 505 /// 506 /// 1. The specific basic block we are generating a 507 /// stack protector for (ParentMBB). 508 /// 509 /// 2. The successor machine basic block that will contain the tail of 510 /// parent mbb after we create the stack protector check (SuccessMBB). This 511 /// BB is visited only on stack protector check success. 512 void resetPerBBState() { 513 ParentMBB = nullptr; 514 SuccessMBB = nullptr; 515 } 516 517 /// Reset state that only changes when we switch functions. 518 /// 519 /// This currently includes: 520 /// 521 /// 1. FailureMBB since we reuse the failure code path for all stack 522 /// protector checks created in an individual function. 523 /// 524 /// 2.The guard variable since the guard variable we are checking against is 525 /// always the same. 526 void resetPerFunctionState() { 527 FailureMBB = nullptr; 528 } 529 530 MachineBasicBlock *getParentMBB() { return ParentMBB; } 531 MachineBasicBlock *getSuccessMBB() { return SuccessMBB; } 532 MachineBasicBlock *getFailureMBB() { return FailureMBB; } 533 534 private: 535 /// The basic block for which we are generating the stack protector. 536 /// 537 /// As a result of stack protector generation, we will splice the 538 /// terminators of this basic block into the successor mbb SuccessMBB and 539 /// replace it with a compare/branch to the successor mbbs 540 /// SuccessMBB/FailureMBB depending on whether or not the stack protector 541 /// was violated. 542 MachineBasicBlock *ParentMBB; 543 544 /// A basic block visited on stack protector check success that contains the 545 /// terminators of ParentMBB. 546 MachineBasicBlock *SuccessMBB; 547 548 /// This basic block visited on stack protector check failure that will 549 /// contain a call to __stack_chk_fail(). 550 MachineBasicBlock *FailureMBB; 551 552 /// Add a successor machine basic block to ParentMBB. If the successor mbb 553 /// has not been created yet (i.e. if SuccMBB = 0), then the machine basic 554 /// block will be created. Assign a large weight if IsLikely is true. 555 MachineBasicBlock *AddSuccessorMBB(const BasicBlock *BB, 556 MachineBasicBlock *ParentMBB, 557 bool IsLikely, 558 MachineBasicBlock *SuccMBB = nullptr); 559 }; 560 561 private: 562 const TargetMachine &TM; 563 public: 564 /// Lowest valid SDNodeOrder. The special case 0 is reserved for scheduling 565 /// nodes without a corresponding SDNode. 566 static const unsigned LowestSDNodeOrder = 1; 567 568 SelectionDAG &DAG; 569 const DataLayout *DL; 570 AliasAnalysis *AA; 571 const TargetLibraryInfo *LibInfo; 572 573 /// SwitchCases - Vector of CaseBlock structures used to communicate 574 /// SwitchInst code generation information. 575 std::vector<CaseBlock> SwitchCases; 576 /// JTCases - Vector of JumpTable structures used to communicate 577 /// SwitchInst code generation information. 578 std::vector<JumpTableBlock> JTCases; 579 /// BitTestCases - Vector of BitTestBlock structures used to communicate 580 /// SwitchInst code generation information. 581 std::vector<BitTestBlock> BitTestCases; 582 /// A StackProtectorDescriptor structure used to communicate stack protector 583 /// information in between SelectBasicBlock and FinishBasicBlock. 584 StackProtectorDescriptor SPDescriptor; 585 586 // Emit PHI-node-operand constants only once even if used by multiple 587 // PHI nodes. 588 DenseMap<const Constant *, unsigned> ConstantsOut; 589 590 /// FuncInfo - Information about the function as a whole. 591 /// 592 FunctionLoweringInfo &FuncInfo; 593 594 /// GFI - Garbage collection metadata for the function. 595 GCFunctionInfo *GFI; 596 597 /// LPadToCallSiteMap - Map a landing pad to the call site indexes. 598 DenseMap<MachineBasicBlock*, SmallVector<unsigned, 4> > LPadToCallSiteMap; 599 600 /// HasTailCall - This is set to true if a call in the current 601 /// block has been translated as a tail call. In this case, 602 /// no subsequent DAG nodes should be created. 603 /// 604 bool HasTailCall; 605 606 LLVMContext *Context; 607 608 SelectionDAGBuilder(SelectionDAG &dag, FunctionLoweringInfo &funcinfo, 609 CodeGenOpt::Level ol) 610 : CurInst(nullptr), SDNodeOrder(LowestSDNodeOrder), TM(dag.getTarget()), 611 DAG(dag), FuncInfo(funcinfo), 612 HasTailCall(false) { 613 } 614 615 void init(GCFunctionInfo *gfi, AliasAnalysis &aa, 616 const TargetLibraryInfo *li); 617 618 /// clear - Clear out the current SelectionDAG and the associated 619 /// state and prepare this SelectionDAGBuilder object to be used 620 /// for a new block. This doesn't clear out information about 621 /// additional blocks that are needed to complete switch lowering 622 /// or PHI node updating; that information is cleared out as it is 623 /// consumed. 624 void clear(); 625 626 /// clearDanglingDebugInfo - Clear the dangling debug information 627 /// map. This function is separated from the clear so that debug 628 /// information that is dangling in a basic block can be properly 629 /// resolved in a different basic block. This allows the 630 /// SelectionDAG to resolve dangling debug information attached 631 /// to PHI nodes. 632 void clearDanglingDebugInfo(); 633 634 /// getRoot - Return the current virtual root of the Selection DAG, 635 /// flushing any PendingLoad items. This must be done before emitting 636 /// a store or any other node that may need to be ordered after any 637 /// prior load instructions. 638 /// 639 SDValue getRoot(); 640 641 /// getControlRoot - Similar to getRoot, but instead of flushing all the 642 /// PendingLoad items, flush all the PendingExports items. It is necessary 643 /// to do this before emitting a terminator instruction. 644 /// 645 SDValue getControlRoot(); 646 647 SDLoc getCurSDLoc() const { 648 return SDLoc(CurInst, SDNodeOrder); 649 } 650 651 DebugLoc getCurDebugLoc() const { 652 return CurInst ? CurInst->getDebugLoc() : DebugLoc(); 653 } 654 655 unsigned getSDNodeOrder() const { return SDNodeOrder; } 656 657 void CopyValueToVirtualRegister(const Value *V, unsigned Reg); 658 659 void visit(const Instruction &I); 660 661 void visit(unsigned Opcode, const User &I); 662 663 /// getCopyFromRegs - If there was virtual register allocated for the value V 664 /// emit CopyFromReg of the specified type Ty. Return empty SDValue() otherwise. 665 SDValue getCopyFromRegs(const Value *V, Type *Ty); 666 667 // resolveDanglingDebugInfo - if we saw an earlier dbg_value referring to V, 668 // generate the debug data structures now that we've seen its definition. 669 void resolveDanglingDebugInfo(const Value *V, SDValue Val); 670 SDValue getValue(const Value *V); 671 bool findValue(const Value *V) const; 672 673 SDValue getNonRegisterValue(const Value *V); 674 SDValue getValueImpl(const Value *V); 675 676 void setValue(const Value *V, SDValue NewN) { 677 SDValue &N = NodeMap[V]; 678 assert(!N.getNode() && "Already set a value for this node!"); 679 N = NewN; 680 } 681 682 void setUnusedArgValue(const Value *V, SDValue NewN) { 683 SDValue &N = UnusedArgNodeMap[V]; 684 assert(!N.getNode() && "Already set a value for this node!"); 685 N = NewN; 686 } 687 688 void FindMergedConditions(const Value *Cond, MachineBasicBlock *TBB, 689 MachineBasicBlock *FBB, MachineBasicBlock *CurBB, 690 MachineBasicBlock *SwitchBB, 691 Instruction::BinaryOps Opc, BranchProbability TW, 692 BranchProbability FW); 693 void EmitBranchForMergedCondition(const Value *Cond, MachineBasicBlock *TBB, 694 MachineBasicBlock *FBB, 695 MachineBasicBlock *CurBB, 696 MachineBasicBlock *SwitchBB, 697 BranchProbability TW, BranchProbability FW); 698 bool ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases); 699 bool isExportableFromCurrentBlock(const Value *V, const BasicBlock *FromBB); 700 void CopyToExportRegsIfNeeded(const Value *V); 701 void ExportFromCurrentBlock(const Value *V); 702 void LowerCallTo(ImmutableCallSite CS, SDValue Callee, bool IsTailCall, 703 const BasicBlock *EHPadBB = nullptr); 704 705 // Lower range metadata from 0 to N to assert zext to an integer of nearest 706 // floor power of two. 707 SDValue lowerRangeToAssertZExt(SelectionDAG &DAG, const Instruction &I, 708 SDValue Op); 709 710 void populateCallLoweringInfo(TargetLowering::CallLoweringInfo &CLI, 711 ImmutableCallSite CS, unsigned ArgIdx, 712 unsigned NumArgs, SDValue Callee, 713 Type *ReturnTy, bool IsPatchPoint); 714 715 std::pair<SDValue, SDValue> 716 lowerInvokable(TargetLowering::CallLoweringInfo &CLI, 717 const BasicBlock *EHPadBB = nullptr); 718 719 /// UpdateSplitBlock - When an MBB was split during scheduling, update the 720 /// references that need to refer to the last resulting block. 721 void UpdateSplitBlock(MachineBasicBlock *First, MachineBasicBlock *Last); 722 723 /// Describes a gc.statepoint or a gc.statepoint like thing for the purposes 724 /// of lowering into a STATEPOINT node. 725 struct StatepointLoweringInfo { 726 /// Bases[i] is the base pointer for Ptrs[i]. Together they denote the set 727 /// of gc pointers this STATEPOINT has to relocate. 728 SmallVector<const Value *, 16> Bases; 729 SmallVector<const Value *, 16> Ptrs; 730 731 /// The set of gc.relocate calls associated with this gc.statepoint. 732 SmallVector<const GCRelocateInst *, 16> GCRelocates; 733 734 /// The full list of gc arguments to the gc.statepoint being lowered. 735 ArrayRef<const Use> GCArgs; 736 737 /// The gc.statepoint instruction. 738 const Instruction *StatepointInstr = nullptr; 739 740 /// The list of gc transition arguments present in the gc.statepoint being 741 /// lowered. 742 ArrayRef<const Use> GCTransitionArgs; 743 744 /// The ID that the resulting STATEPOINT instruction has to report. 745 unsigned ID = -1; 746 747 /// Information regarding the underlying call instruction. 748 TargetLowering::CallLoweringInfo CLI; 749 750 /// The deoptimization state associated with this gc.statepoint call, if 751 /// any. 752 ArrayRef<const Use> DeoptState; 753 754 /// Flags associated with the meta arguments being lowered. 755 uint64_t StatepointFlags = -1; 756 757 /// The number of patchable bytes the call needs to get lowered into. 758 unsigned NumPatchBytes = -1; 759 760 /// The exception handling unwind destination, in case this represents an 761 /// invoke of gc.statepoint. 762 const BasicBlock *EHPadBB = nullptr; 763 764 explicit StatepointLoweringInfo(SelectionDAG &DAG) : CLI(DAG) {} 765 }; 766 767 /// Lower \p SLI into a STATEPOINT instruction. 768 SDValue LowerAsSTATEPOINT(StatepointLoweringInfo &SLI); 769 770 // This function is responsible for the whole statepoint lowering process. 771 // It uniformly handles invoke and call statepoints. 772 void LowerStatepoint(ImmutableStatepoint Statepoint, 773 const BasicBlock *EHPadBB = nullptr); 774 775 void LowerCallSiteWithDeoptBundle(ImmutableCallSite CS, SDValue Callee, 776 const BasicBlock *EHPadBB); 777 778 void LowerDeoptimizeCall(const CallInst *CI); 779 void LowerDeoptimizingReturn(); 780 781 void LowerCallSiteWithDeoptBundleImpl(ImmutableCallSite CS, SDValue Callee, 782 const BasicBlock *EHPadBB, 783 bool VarArgDisallowed, 784 bool ForceVoidReturnTy); 785 786 private: 787 // Terminator instructions. 788 void visitRet(const ReturnInst &I); 789 void visitBr(const BranchInst &I); 790 void visitSwitch(const SwitchInst &I); 791 void visitIndirectBr(const IndirectBrInst &I); 792 void visitUnreachable(const UnreachableInst &I); 793 void visitCleanupRet(const CleanupReturnInst &I); 794 void visitCatchSwitch(const CatchSwitchInst &I); 795 void visitCatchRet(const CatchReturnInst &I); 796 void visitCatchPad(const CatchPadInst &I); 797 void visitCleanupPad(const CleanupPadInst &CPI); 798 799 BranchProbability getEdgeProbability(const MachineBasicBlock *Src, 800 const MachineBasicBlock *Dst) const; 801 void addSuccessorWithProb( 802 MachineBasicBlock *Src, MachineBasicBlock *Dst, 803 BranchProbability Prob = BranchProbability::getUnknown()); 804 805 public: 806 void visitSwitchCase(CaseBlock &CB, 807 MachineBasicBlock *SwitchBB); 808 void visitSPDescriptorParent(StackProtectorDescriptor &SPD, 809 MachineBasicBlock *ParentBB); 810 void visitSPDescriptorFailure(StackProtectorDescriptor &SPD); 811 void visitBitTestHeader(BitTestBlock &B, MachineBasicBlock *SwitchBB); 812 void visitBitTestCase(BitTestBlock &BB, 813 MachineBasicBlock* NextMBB, 814 BranchProbability BranchProbToNext, 815 unsigned Reg, 816 BitTestCase &B, 817 MachineBasicBlock *SwitchBB); 818 void visitJumpTable(JumpTable &JT); 819 void visitJumpTableHeader(JumpTable &JT, JumpTableHeader &JTH, 820 MachineBasicBlock *SwitchBB); 821 822 private: 823 // These all get lowered before this pass. 824 void visitInvoke(const InvokeInst &I); 825 void visitResume(const ResumeInst &I); 826 827 void visitBinary(const User &I, unsigned OpCode); 828 void visitShift(const User &I, unsigned Opcode); 829 void visitAdd(const User &I) { visitBinary(I, ISD::ADD); } 830 void visitFAdd(const User &I) { visitBinary(I, ISD::FADD); } 831 void visitSub(const User &I) { visitBinary(I, ISD::SUB); } 832 void visitFSub(const User &I); 833 void visitMul(const User &I) { visitBinary(I, ISD::MUL); } 834 void visitFMul(const User &I) { visitBinary(I, ISD::FMUL); } 835 void visitURem(const User &I) { visitBinary(I, ISD::UREM); } 836 void visitSRem(const User &I) { visitBinary(I, ISD::SREM); } 837 void visitFRem(const User &I) { visitBinary(I, ISD::FREM); } 838 void visitUDiv(const User &I) { visitBinary(I, ISD::UDIV); } 839 void visitSDiv(const User &I); 840 void visitFDiv(const User &I) { visitBinary(I, ISD::FDIV); } 841 void visitAnd (const User &I) { visitBinary(I, ISD::AND); } 842 void visitOr (const User &I) { visitBinary(I, ISD::OR); } 843 void visitXor (const User &I) { visitBinary(I, ISD::XOR); } 844 void visitShl (const User &I) { visitShift(I, ISD::SHL); } 845 void visitLShr(const User &I) { visitShift(I, ISD::SRL); } 846 void visitAShr(const User &I) { visitShift(I, ISD::SRA); } 847 void visitICmp(const User &I); 848 void visitFCmp(const User &I); 849 // Visit the conversion instructions 850 void visitTrunc(const User &I); 851 void visitZExt(const User &I); 852 void visitSExt(const User &I); 853 void visitFPTrunc(const User &I); 854 void visitFPExt(const User &I); 855 void visitFPToUI(const User &I); 856 void visitFPToSI(const User &I); 857 void visitUIToFP(const User &I); 858 void visitSIToFP(const User &I); 859 void visitPtrToInt(const User &I); 860 void visitIntToPtr(const User &I); 861 void visitBitCast(const User &I); 862 void visitAddrSpaceCast(const User &I); 863 864 void visitExtractElement(const User &I); 865 void visitInsertElement(const User &I); 866 void visitShuffleVector(const User &I); 867 868 void visitExtractValue(const ExtractValueInst &I); 869 void visitInsertValue(const InsertValueInst &I); 870 void visitLandingPad(const LandingPadInst &I); 871 872 void visitGetElementPtr(const User &I); 873 void visitSelect(const User &I); 874 875 void visitAlloca(const AllocaInst &I); 876 void visitLoad(const LoadInst &I); 877 void visitStore(const StoreInst &I); 878 void visitMaskedLoad(const CallInst &I); 879 void visitMaskedStore(const CallInst &I); 880 void visitMaskedGather(const CallInst &I); 881 void visitMaskedScatter(const CallInst &I); 882 void visitAtomicCmpXchg(const AtomicCmpXchgInst &I); 883 void visitAtomicRMW(const AtomicRMWInst &I); 884 void visitFence(const FenceInst &I); 885 void visitPHI(const PHINode &I); 886 void visitCall(const CallInst &I); 887 bool visitMemCmpCall(const CallInst &I); 888 bool visitMemChrCall(const CallInst &I); 889 bool visitStrCpyCall(const CallInst &I, bool isStpcpy); 890 bool visitStrCmpCall(const CallInst &I); 891 bool visitStrLenCall(const CallInst &I); 892 bool visitStrNLenCall(const CallInst &I); 893 bool visitUnaryFloatCall(const CallInst &I, unsigned Opcode); 894 bool visitBinaryFloatCall(const CallInst &I, unsigned Opcode); 895 void visitAtomicLoad(const LoadInst &I); 896 void visitAtomicStore(const StoreInst &I); 897 void visitLoadFromSwiftError(const LoadInst &I); 898 void visitStoreToSwiftError(const StoreInst &I); 899 900 void visitInlineAsm(ImmutableCallSite CS); 901 const char *visitIntrinsicCall(const CallInst &I, unsigned Intrinsic); 902 void visitTargetIntrinsic(const CallInst &I, unsigned Intrinsic); 903 904 void visitVAStart(const CallInst &I); 905 void visitVAArg(const VAArgInst &I); 906 void visitVAEnd(const CallInst &I); 907 void visitVACopy(const CallInst &I); 908 void visitStackmap(const CallInst &I); 909 void visitPatchpoint(ImmutableCallSite CS, 910 const BasicBlock *EHPadBB = nullptr); 911 912 // These two are implemented in StatepointLowering.cpp 913 void visitGCRelocate(const GCRelocateInst &I); 914 void visitGCResult(const GCResultInst &I); 915 916 void visitUserOp1(const Instruction &I) { 917 llvm_unreachable("UserOp1 should not exist at instruction selection time!"); 918 } 919 void visitUserOp2(const Instruction &I) { 920 llvm_unreachable("UserOp2 should not exist at instruction selection time!"); 921 } 922 923 void processIntegerCallValue(const Instruction &I, 924 SDValue Value, bool IsSigned); 925 926 void HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB); 927 928 void emitInlineAsmError(ImmutableCallSite CS, const Twine &Message); 929 930 /// EmitFuncArgumentDbgValue - If V is an function argument then create 931 /// corresponding DBG_VALUE machine instruction for it now. At the end of 932 /// instruction selection, they will be inserted to the entry BB. 933 bool EmitFuncArgumentDbgValue(const Value *V, DILocalVariable *Variable, 934 DIExpression *Expr, DILocation *DL, 935 int64_t Offset, bool IsIndirect, 936 const SDValue &N); 937 938 /// Return the next block after MBB, or nullptr if there is none. 939 MachineBasicBlock *NextBlock(MachineBasicBlock *MBB); 940 941 /// Update the DAG and DAG builder with the relevant information after 942 /// a new root node has been created which could be a tail call. 943 void updateDAGForMaybeTailCall(SDValue MaybeTC); 944 }; 945 946 /// RegsForValue - This struct represents the registers (physical or virtual) 947 /// that a particular set of values is assigned, and the type information about 948 /// the value. The most common situation is to represent one value at a time, 949 /// but struct or array values are handled element-wise as multiple values. The 950 /// splitting of aggregates is performed recursively, so that we never have 951 /// aggregate-typed registers. The values at this point do not necessarily have 952 /// legal types, so each value may require one or more registers of some legal 953 /// type. 954 /// 955 struct RegsForValue { 956 /// ValueVTs - The value types of the values, which may not be legal, and 957 /// may need be promoted or synthesized from one or more registers. 958 /// 959 SmallVector<EVT, 4> ValueVTs; 960 961 /// RegVTs - The value types of the registers. This is the same size as 962 /// ValueVTs and it records, for each value, what the type of the assigned 963 /// register or registers are. (Individual values are never synthesized 964 /// from more than one type of register.) 965 /// 966 /// With virtual registers, the contents of RegVTs is redundant with TLI's 967 /// getRegisterType member function, however when with physical registers 968 /// it is necessary to have a separate record of the types. 969 /// 970 SmallVector<MVT, 4> RegVTs; 971 972 /// Regs - This list holds the registers assigned to the values. 973 /// Each legal or promoted value requires one register, and each 974 /// expanded value requires multiple registers. 975 /// 976 SmallVector<unsigned, 4> Regs; 977 978 RegsForValue(); 979 980 RegsForValue(const SmallVector<unsigned, 4> ®s, MVT regvt, EVT valuevt); 981 982 RegsForValue(LLVMContext &Context, const TargetLowering &TLI, 983 const DataLayout &DL, unsigned Reg, Type *Ty); 984 985 /// append - Add the specified values to this one. 986 void append(const RegsForValue &RHS) { 987 ValueVTs.append(RHS.ValueVTs.begin(), RHS.ValueVTs.end()); 988 RegVTs.append(RHS.RegVTs.begin(), RHS.RegVTs.end()); 989 Regs.append(RHS.Regs.begin(), RHS.Regs.end()); 990 } 991 992 /// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from 993 /// this value and returns the result as a ValueVTs value. This uses 994 /// Chain/Flag as the input and updates them for the output Chain/Flag. 995 /// If the Flag pointer is NULL, no flag is used. 996 SDValue getCopyFromRegs(SelectionDAG &DAG, FunctionLoweringInfo &FuncInfo, 997 const SDLoc &dl, SDValue &Chain, SDValue *Flag, 998 const Value *V = nullptr) const; 999 1000 /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the specified 1001 /// value into the registers specified by this object. This uses Chain/Flag 1002 /// as the input and updates them for the output Chain/Flag. If the Flag 1003 /// pointer is nullptr, no flag is used. If V is not nullptr, then it is used 1004 /// in printing better diagnostic messages on error. 1005 void getCopyToRegs(SDValue Val, SelectionDAG &DAG, const SDLoc &dl, 1006 SDValue &Chain, SDValue *Flag, const Value *V = nullptr, 1007 ISD::NodeType PreferredExtendType = ISD::ANY_EXTEND) const; 1008 1009 /// AddInlineAsmOperands - Add this value to the specified inlineasm node 1010 /// operand list. This adds the code marker, matching input operand index 1011 /// (if applicable), and includes the number of values added into it. 1012 void AddInlineAsmOperands(unsigned Kind, bool HasMatching, 1013 unsigned MatchingIdx, const SDLoc &dl, 1014 SelectionDAG &DAG, std::vector<SDValue> &Ops) const; 1015 }; 1016 1017 } // end namespace llvm 1018 1019 #endif 1020