Home | History | Annotate | Download | only in Scalar
      1 //===- PlaceSafepoints.cpp - Place GC Safepoints --------------------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // Place garbage collection safepoints at appropriate locations in the IR. This
     11 // does not make relocation semantics or variable liveness explicit.  That's
     12 // done by RewriteStatepointsForGC.
     13 //
     14 // Terminology:
     15 // - A call is said to be "parseable" if there is a stack map generated for the
     16 // return PC of the call.  A runtime can determine where values listed in the
     17 // deopt arguments and (after RewriteStatepointsForGC) gc arguments are located
     18 // on the stack when the code is suspended inside such a call.  Every parse
     19 // point is represented by a call wrapped in an gc.statepoint intrinsic.
     20 // - A "poll" is an explicit check in the generated code to determine if the
     21 // runtime needs the generated code to cooperate by calling a helper routine
     22 // and thus suspending its execution at a known state. The call to the helper
     23 // routine will be parseable.  The (gc & runtime specific) logic of a poll is
     24 // assumed to be provided in a function of the name "gc.safepoint_poll".
     25 //
     26 // We aim to insert polls such that running code can quickly be brought to a
     27 // well defined state for inspection by the collector.  In the current
     28 // implementation, this is done via the insertion of poll sites at method entry
     29 // and the backedge of most loops.  We try to avoid inserting more polls than
     30 // are necessary to ensure a finite period between poll sites.  This is not
     31 // because the poll itself is expensive in the generated code; it's not.  Polls
     32 // do tend to impact the optimizer itself in negative ways; we'd like to avoid
     33 // perturbing the optimization of the method as much as we can.
     34 //
     35 // We also need to make most call sites parseable.  The callee might execute a
     36 // poll (or otherwise be inspected by the GC).  If so, the entire stack
     37 // (including the suspended frame of the current method) must be parseable.
     38 //
     39 // This pass will insert:
     40 // - Call parse points ("call safepoints") for any call which may need to
     41 // reach a safepoint during the execution of the callee function.
     42 // - Backedge safepoint polls and entry safepoint polls to ensure that
     43 // executing code reaches a safepoint poll in a finite amount of time.
     44 //
     45 // We do not currently support return statepoints, but adding them would not
     46 // be hard.  They are not required for correctness - entry safepoints are an
     47 // alternative - but some GCs may prefer them.  Patches welcome.
     48 //
     49 //===----------------------------------------------------------------------===//
     50 
     51 #include "llvm/Pass.h"
     52 
     53 #include "llvm/ADT/SetVector.h"
     54 #include "llvm/ADT/Statistic.h"
     55 #include "llvm/Analysis/CFG.h"
     56 #include "llvm/Analysis/ScalarEvolution.h"
     57 #include "llvm/Analysis/TargetLibraryInfo.h"
     58 #include "llvm/Transforms/Utils/Local.h"
     59 #include "llvm/IR/CallSite.h"
     60 #include "llvm/IR/Dominators.h"
     61 #include "llvm/IR/IntrinsicInst.h"
     62 #include "llvm/IR/LegacyPassManager.h"
     63 #include "llvm/IR/Statepoint.h"
     64 #include "llvm/Support/CommandLine.h"
     65 #include "llvm/Support/Debug.h"
     66 #include "llvm/Transforms/Scalar.h"
     67 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
     68 #include "llvm/Transforms/Utils/Cloning.h"
     69 
     70 #define DEBUG_TYPE "safepoint-placement"
     71 
     72 STATISTIC(NumEntrySafepoints, "Number of entry safepoints inserted");
     73 STATISTIC(NumBackedgeSafepoints, "Number of backedge safepoints inserted");
     74 
     75 STATISTIC(CallInLoop,
     76           "Number of loops without safepoints due to calls in loop");
     77 STATISTIC(FiniteExecution,
     78           "Number of loops without safepoints finite execution");
     79 
     80 using namespace llvm;
     81 
     82 // Ignore opportunities to avoid placing safepoints on backedges, useful for
     83 // validation
     84 static cl::opt<bool> AllBackedges("spp-all-backedges", cl::Hidden,
     85                                   cl::init(false));
     86 
     87 /// How narrow does the trip count of a loop have to be to have to be considered
     88 /// "counted"?  Counted loops do not get safepoints at backedges.
     89 static cl::opt<int> CountedLoopTripWidth("spp-counted-loop-trip-width",
     90                                          cl::Hidden, cl::init(32));
     91 
     92 // If true, split the backedge of a loop when placing the safepoint, otherwise
     93 // split the latch block itself.  Both are useful to support for
     94 // experimentation, but in practice, it looks like splitting the backedge
     95 // optimizes better.
     96 static cl::opt<bool> SplitBackedge("spp-split-backedge", cl::Hidden,
     97                                    cl::init(false));
     98 
     99 namespace {
    100 
    101 /// An analysis pass whose purpose is to identify each of the backedges in
    102 /// the function which require a safepoint poll to be inserted.
    103 struct PlaceBackedgeSafepointsImpl : public FunctionPass {
    104   static char ID;
    105 
    106   /// The output of the pass - gives a list of each backedge (described by
    107   /// pointing at the branch) which need a poll inserted.
    108   std::vector<TerminatorInst *> PollLocations;
    109 
    110   /// True unless we're running spp-no-calls in which case we need to disable
    111   /// the call-dependent placement opts.
    112   bool CallSafepointsEnabled;
    113 
    114   ScalarEvolution *SE = nullptr;
    115   DominatorTree *DT = nullptr;
    116   LoopInfo *LI = nullptr;
    117   TargetLibraryInfo *TLI = nullptr;
    118 
    119   PlaceBackedgeSafepointsImpl(bool CallSafepoints = false)
    120       : FunctionPass(ID), CallSafepointsEnabled(CallSafepoints) {
    121     initializePlaceBackedgeSafepointsImplPass(*PassRegistry::getPassRegistry());
    122   }
    123 
    124   bool runOnLoop(Loop *);
    125   void runOnLoopAndSubLoops(Loop *L) {
    126     // Visit all the subloops
    127     for (Loop *I : *L)
    128       runOnLoopAndSubLoops(I);
    129     runOnLoop(L);
    130   }
    131 
    132   bool runOnFunction(Function &F) override {
    133     SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
    134     DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
    135     LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
    136     TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
    137     for (Loop *I : *LI) {
    138       runOnLoopAndSubLoops(I);
    139     }
    140     return false;
    141   }
    142 
    143   void getAnalysisUsage(AnalysisUsage &AU) const override {
    144     AU.addRequired<DominatorTreeWrapperPass>();
    145     AU.addRequired<ScalarEvolutionWrapperPass>();
    146     AU.addRequired<LoopInfoWrapperPass>();
    147     AU.addRequired<TargetLibraryInfoWrapperPass>();
    148     // We no longer modify the IR at all in this pass.  Thus all
    149     // analysis are preserved.
    150     AU.setPreservesAll();
    151   }
    152 };
    153 }
    154 
    155 static cl::opt<bool> NoEntry("spp-no-entry", cl::Hidden, cl::init(false));
    156 static cl::opt<bool> NoCall("spp-no-call", cl::Hidden, cl::init(false));
    157 static cl::opt<bool> NoBackedge("spp-no-backedge", cl::Hidden, cl::init(false));
    158 
    159 namespace {
    160 struct PlaceSafepoints : public FunctionPass {
    161   static char ID; // Pass identification, replacement for typeid
    162 
    163   PlaceSafepoints() : FunctionPass(ID) {
    164     initializePlaceSafepointsPass(*PassRegistry::getPassRegistry());
    165   }
    166   bool runOnFunction(Function &F) override;
    167 
    168   void getAnalysisUsage(AnalysisUsage &AU) const override {
    169     // We modify the graph wholesale (inlining, block insertion, etc).  We
    170     // preserve nothing at the moment.  We could potentially preserve dom tree
    171     // if that was worth doing
    172     AU.addRequired<TargetLibraryInfoWrapperPass>();
    173   }
    174 };
    175 }
    176 
    177 // Insert a safepoint poll immediately before the given instruction.  Does
    178 // not handle the parsability of state at the runtime call, that's the
    179 // callers job.
    180 static void
    181 InsertSafepointPoll(Instruction *InsertBefore,
    182                     std::vector<CallSite> &ParsePointsNeeded /*rval*/,
    183                     const TargetLibraryInfo &TLI);
    184 
    185 static bool needsStatepoint(const CallSite &CS, const TargetLibraryInfo &TLI) {
    186   if (callsGCLeafFunction(CS, TLI))
    187     return false;
    188   if (CS.isCall()) {
    189     CallInst *call = cast<CallInst>(CS.getInstruction());
    190     if (call->isInlineAsm())
    191       return false;
    192   }
    193 
    194   return !(isStatepoint(CS) || isGCRelocate(CS) || isGCResult(CS));
    195 }
    196 
    197 /// Returns true if this loop is known to contain a call safepoint which
    198 /// must unconditionally execute on any iteration of the loop which returns
    199 /// to the loop header via an edge from Pred.  Returns a conservative correct
    200 /// answer; i.e. false is always valid.
    201 static bool containsUnconditionalCallSafepoint(Loop *L, BasicBlock *Header,
    202                                                BasicBlock *Pred,
    203                                                DominatorTree &DT,
    204                                                const TargetLibraryInfo &TLI) {
    205   // In general, we're looking for any cut of the graph which ensures
    206   // there's a call safepoint along every edge between Header and Pred.
    207   // For the moment, we look only for the 'cuts' that consist of a single call
    208   // instruction in a block which is dominated by the Header and dominates the
    209   // loop latch (Pred) block.  Somewhat surprisingly, walking the entire chain
    210   // of such dominating blocks gets substantially more occurrences than just
    211   // checking the Pred and Header blocks themselves.  This may be due to the
    212   // density of loop exit conditions caused by range and null checks.
    213   // TODO: structure this as an analysis pass, cache the result for subloops,
    214   // avoid dom tree recalculations
    215   assert(DT.dominates(Header, Pred) && "loop latch not dominated by header?");
    216 
    217   BasicBlock *Current = Pred;
    218   while (true) {
    219     for (Instruction &I : *Current) {
    220       if (auto CS = CallSite(&I))
    221         // Note: Technically, needing a safepoint isn't quite the right
    222         // condition here.  We should instead be checking if the target method
    223         // has an
    224         // unconditional poll. In practice, this is only a theoretical concern
    225         // since we don't have any methods with conditional-only safepoint
    226         // polls.
    227         if (needsStatepoint(CS, TLI))
    228           return true;
    229     }
    230 
    231     if (Current == Header)
    232       break;
    233     Current = DT.getNode(Current)->getIDom()->getBlock();
    234   }
    235 
    236   return false;
    237 }
    238 
    239 /// Returns true if this loop is known to terminate in a finite number of
    240 /// iterations.  Note that this function may return false for a loop which
    241 /// does actual terminate in a finite constant number of iterations due to
    242 /// conservatism in the analysis.
    243 static bool mustBeFiniteCountedLoop(Loop *L, ScalarEvolution *SE,
    244                                     BasicBlock *Pred) {
    245   // A conservative bound on the loop as a whole.
    246   const SCEV *MaxTrips = SE->getMaxBackedgeTakenCount(L);
    247   if (MaxTrips != SE->getCouldNotCompute() &&
    248       SE->getUnsignedRange(MaxTrips).getUnsignedMax().isIntN(
    249           CountedLoopTripWidth))
    250     return true;
    251 
    252   // If this is a conditional branch to the header with the alternate path
    253   // being outside the loop, we can ask questions about the execution frequency
    254   // of the exit block.
    255   if (L->isLoopExiting(Pred)) {
    256     // This returns an exact expression only.  TODO: We really only need an
    257     // upper bound here, but SE doesn't expose that.
    258     const SCEV *MaxExec = SE->getExitCount(L, Pred);
    259     if (MaxExec != SE->getCouldNotCompute() &&
    260         SE->getUnsignedRange(MaxExec).getUnsignedMax().isIntN(
    261             CountedLoopTripWidth))
    262         return true;
    263   }
    264 
    265   return /* not finite */ false;
    266 }
    267 
    268 static void scanOneBB(Instruction *Start, Instruction *End,
    269                       std::vector<CallInst *> &Calls,
    270                       DenseSet<BasicBlock *> &Seen,
    271                       std::vector<BasicBlock *> &Worklist) {
    272   for (BasicBlock::iterator BBI(Start), BBE0 = Start->getParent()->end(),
    273                                         BBE1 = BasicBlock::iterator(End);
    274        BBI != BBE0 && BBI != BBE1; BBI++) {
    275     if (CallInst *CI = dyn_cast<CallInst>(&*BBI))
    276       Calls.push_back(CI);
    277 
    278     // FIXME: This code does not handle invokes
    279     assert(!isa<InvokeInst>(&*BBI) &&
    280            "support for invokes in poll code needed");
    281 
    282     // Only add the successor blocks if we reach the terminator instruction
    283     // without encountering end first
    284     if (BBI->isTerminator()) {
    285       BasicBlock *BB = BBI->getParent();
    286       for (BasicBlock *Succ : successors(BB)) {
    287         if (Seen.insert(Succ).second) {
    288           Worklist.push_back(Succ);
    289         }
    290       }
    291     }
    292   }
    293 }
    294 
    295 static void scanInlinedCode(Instruction *Start, Instruction *End,
    296                             std::vector<CallInst *> &Calls,
    297                             DenseSet<BasicBlock *> &Seen) {
    298   Calls.clear();
    299   std::vector<BasicBlock *> Worklist;
    300   Seen.insert(Start->getParent());
    301   scanOneBB(Start, End, Calls, Seen, Worklist);
    302   while (!Worklist.empty()) {
    303     BasicBlock *BB = Worklist.back();
    304     Worklist.pop_back();
    305     scanOneBB(&*BB->begin(), End, Calls, Seen, Worklist);
    306   }
    307 }
    308 
    309 bool PlaceBackedgeSafepointsImpl::runOnLoop(Loop *L) {
    310   // Loop through all loop latches (branches controlling backedges).  We need
    311   // to place a safepoint on every backedge (potentially).
    312   // Note: In common usage, there will be only one edge due to LoopSimplify
    313   // having run sometime earlier in the pipeline, but this code must be correct
    314   // w.r.t. loops with multiple backedges.
    315   BasicBlock *Header = L->getHeader();
    316   SmallVector<BasicBlock*, 16> LoopLatches;
    317   L->getLoopLatches(LoopLatches);
    318   for (BasicBlock *Pred : LoopLatches) {
    319     assert(L->contains(Pred));
    320 
    321     // Make a policy decision about whether this loop needs a safepoint or
    322     // not.  Note that this is about unburdening the optimizer in loops, not
    323     // avoiding the runtime cost of the actual safepoint.
    324     if (!AllBackedges) {
    325       if (mustBeFiniteCountedLoop(L, SE, Pred)) {
    326         LLVM_DEBUG(dbgs() << "skipping safepoint placement in finite loop\n");
    327         FiniteExecution++;
    328         continue;
    329       }
    330       if (CallSafepointsEnabled &&
    331           containsUnconditionalCallSafepoint(L, Header, Pred, *DT, *TLI)) {
    332         // Note: This is only semantically legal since we won't do any further
    333         // IPO or inlining before the actual call insertion..  If we hadn't, we
    334         // might latter loose this call safepoint.
    335         LLVM_DEBUG(
    336             dbgs()
    337             << "skipping safepoint placement due to unconditional call\n");
    338         CallInLoop++;
    339         continue;
    340       }
    341     }
    342 
    343     // TODO: We can create an inner loop which runs a finite number of
    344     // iterations with an outer loop which contains a safepoint.  This would
    345     // not help runtime performance that much, but it might help our ability to
    346     // optimize the inner loop.
    347 
    348     // Safepoint insertion would involve creating a new basic block (as the
    349     // target of the current backedge) which does the safepoint (of all live
    350     // variables) and branches to the true header
    351     TerminatorInst *Term = Pred->getTerminator();
    352 
    353     LLVM_DEBUG(dbgs() << "[LSP] terminator instruction: " << *Term);
    354 
    355     PollLocations.push_back(Term);
    356   }
    357 
    358   return false;
    359 }
    360 
    361 /// Returns true if an entry safepoint is not required before this callsite in
    362 /// the caller function.
    363 static bool doesNotRequireEntrySafepointBefore(const CallSite &CS) {
    364   Instruction *Inst = CS.getInstruction();
    365   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
    366     switch (II->getIntrinsicID()) {
    367     case Intrinsic::experimental_gc_statepoint:
    368     case Intrinsic::experimental_patchpoint_void:
    369     case Intrinsic::experimental_patchpoint_i64:
    370       // The can wrap an actual call which may grow the stack by an unbounded
    371       // amount or run forever.
    372       return false;
    373     default:
    374       // Most LLVM intrinsics are things which do not expand to actual calls, or
    375       // at least if they do, are leaf functions that cause only finite stack
    376       // growth.  In particular, the optimizer likes to form things like memsets
    377       // out of stores in the original IR.  Another important example is
    378       // llvm.localescape which must occur in the entry block.  Inserting a
    379       // safepoint before it is not legal since it could push the localescape
    380       // out of the entry block.
    381       return true;
    382     }
    383   }
    384   return false;
    385 }
    386 
    387 static Instruction *findLocationForEntrySafepoint(Function &F,
    388                                                   DominatorTree &DT) {
    389 
    390   // Conceptually, this poll needs to be on method entry, but in
    391   // practice, we place it as late in the entry block as possible.  We
    392   // can place it as late as we want as long as it dominates all calls
    393   // that can grow the stack.  This, combined with backedge polls,
    394   // give us all the progress guarantees we need.
    395 
    396   // hasNextInstruction and nextInstruction are used to iterate
    397   // through a "straight line" execution sequence.
    398 
    399   auto HasNextInstruction = [](Instruction *I) {
    400     if (!I->isTerminator())
    401       return true;
    402 
    403     BasicBlock *nextBB = I->getParent()->getUniqueSuccessor();
    404     return nextBB && (nextBB->getUniquePredecessor() != nullptr);
    405   };
    406 
    407   auto NextInstruction = [&](Instruction *I) {
    408     assert(HasNextInstruction(I) &&
    409            "first check if there is a next instruction!");
    410 
    411     if (I->isTerminator())
    412       return &I->getParent()->getUniqueSuccessor()->front();
    413     return &*++I->getIterator();
    414   };
    415 
    416   Instruction *Cursor = nullptr;
    417   for (Cursor = &F.getEntryBlock().front(); HasNextInstruction(Cursor);
    418        Cursor = NextInstruction(Cursor)) {
    419 
    420     // We need to ensure a safepoint poll occurs before any 'real' call.  The
    421     // easiest way to ensure finite execution between safepoints in the face of
    422     // recursive and mutually recursive functions is to enforce that each take
    423     // a safepoint.  Additionally, we need to ensure a poll before any call
    424     // which can grow the stack by an unbounded amount.  This isn't required
    425     // for GC semantics per se, but is a common requirement for languages
    426     // which detect stack overflow via guard pages and then throw exceptions.
    427     if (auto CS = CallSite(Cursor)) {
    428       if (doesNotRequireEntrySafepointBefore(CS))
    429         continue;
    430       break;
    431     }
    432   }
    433 
    434   assert((HasNextInstruction(Cursor) || Cursor->isTerminator()) &&
    435          "either we stopped because of a call, or because of terminator");
    436 
    437   return Cursor;
    438 }
    439 
    440 static const char *const GCSafepointPollName = "gc.safepoint_poll";
    441 
    442 static bool isGCSafepointPoll(Function &F) {
    443   return F.getName().equals(GCSafepointPollName);
    444 }
    445 
    446 /// Returns true if this function should be rewritten to include safepoint
    447 /// polls and parseable call sites.  The main point of this function is to be
    448 /// an extension point for custom logic.
    449 static bool shouldRewriteFunction(Function &F) {
    450   // TODO: This should check the GCStrategy
    451   if (F.hasGC()) {
    452     const auto &FunctionGCName = F.getGC();
    453     const StringRef StatepointExampleName("statepoint-example");
    454     const StringRef CoreCLRName("coreclr");
    455     return (StatepointExampleName == FunctionGCName) ||
    456            (CoreCLRName == FunctionGCName);
    457   } else
    458     return false;
    459 }
    460 
    461 // TODO: These should become properties of the GCStrategy, possibly with
    462 // command line overrides.
    463 static bool enableEntrySafepoints(Function &F) { return !NoEntry; }
    464 static bool enableBackedgeSafepoints(Function &F) { return !NoBackedge; }
    465 static bool enableCallSafepoints(Function &F) { return !NoCall; }
    466 
    467 bool PlaceSafepoints::runOnFunction(Function &F) {
    468   if (F.isDeclaration() || F.empty()) {
    469     // This is a declaration, nothing to do.  Must exit early to avoid crash in
    470     // dom tree calculation
    471     return false;
    472   }
    473 
    474   if (isGCSafepointPoll(F)) {
    475     // Given we're inlining this inside of safepoint poll insertion, this
    476     // doesn't make any sense.  Note that we do make any contained calls
    477     // parseable after we inline a poll.
    478     return false;
    479   }
    480 
    481   if (!shouldRewriteFunction(F))
    482     return false;
    483 
    484   const TargetLibraryInfo &TLI =
    485       getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
    486 
    487   bool Modified = false;
    488 
    489   // In various bits below, we rely on the fact that uses are reachable from
    490   // defs.  When there are basic blocks unreachable from the entry, dominance
    491   // and reachablity queries return non-sensical results.  Thus, we preprocess
    492   // the function to ensure these properties hold.
    493   Modified |= removeUnreachableBlocks(F);
    494 
    495   // STEP 1 - Insert the safepoint polling locations.  We do not need to
    496   // actually insert parse points yet.  That will be done for all polls and
    497   // calls in a single pass.
    498 
    499   DominatorTree DT;
    500   DT.recalculate(F);
    501 
    502   SmallVector<Instruction *, 16> PollsNeeded;
    503   std::vector<CallSite> ParsePointNeeded;
    504 
    505   if (enableBackedgeSafepoints(F)) {
    506     // Construct a pass manager to run the LoopPass backedge logic.  We
    507     // need the pass manager to handle scheduling all the loop passes
    508     // appropriately.  Doing this by hand is painful and just not worth messing
    509     // with for the moment.
    510     legacy::FunctionPassManager FPM(F.getParent());
    511     bool CanAssumeCallSafepoints = enableCallSafepoints(F);
    512     auto *PBS = new PlaceBackedgeSafepointsImpl(CanAssumeCallSafepoints);
    513     FPM.add(PBS);
    514     FPM.run(F);
    515 
    516     // We preserve dominance information when inserting the poll, otherwise
    517     // we'd have to recalculate this on every insert
    518     DT.recalculate(F);
    519 
    520     auto &PollLocations = PBS->PollLocations;
    521 
    522     auto OrderByBBName = [](Instruction *a, Instruction *b) {
    523       return a->getParent()->getName() < b->getParent()->getName();
    524     };
    525     // We need the order of list to be stable so that naming ends up stable
    526     // when we split edges.  This makes test cases much easier to write.
    527     llvm::sort(PollLocations.begin(), PollLocations.end(), OrderByBBName);
    528 
    529     // We can sometimes end up with duplicate poll locations.  This happens if
    530     // a single loop is visited more than once.   The fact this happens seems
    531     // wrong, but it does happen for the split-backedge.ll test case.
    532     PollLocations.erase(std::unique(PollLocations.begin(),
    533                                     PollLocations.end()),
    534                         PollLocations.end());
    535 
    536     // Insert a poll at each point the analysis pass identified
    537     // The poll location must be the terminator of a loop latch block.
    538     for (TerminatorInst *Term : PollLocations) {
    539       // We are inserting a poll, the function is modified
    540       Modified = true;
    541 
    542       if (SplitBackedge) {
    543         // Split the backedge of the loop and insert the poll within that new
    544         // basic block.  This creates a loop with two latches per original
    545         // latch (which is non-ideal), but this appears to be easier to
    546         // optimize in practice than inserting the poll immediately before the
    547         // latch test.
    548 
    549         // Since this is a latch, at least one of the successors must dominate
    550         // it. Its possible that we have a) duplicate edges to the same header
    551         // and b) edges to distinct loop headers.  We need to insert pools on
    552         // each.
    553         SetVector<BasicBlock *> Headers;
    554         for (unsigned i = 0; i < Term->getNumSuccessors(); i++) {
    555           BasicBlock *Succ = Term->getSuccessor(i);
    556           if (DT.dominates(Succ, Term->getParent())) {
    557             Headers.insert(Succ);
    558           }
    559         }
    560         assert(!Headers.empty() && "poll location is not a loop latch?");
    561 
    562         // The split loop structure here is so that we only need to recalculate
    563         // the dominator tree once.  Alternatively, we could just keep it up to
    564         // date and use a more natural merged loop.
    565         SetVector<BasicBlock *> SplitBackedges;
    566         for (BasicBlock *Header : Headers) {
    567           BasicBlock *NewBB = SplitEdge(Term->getParent(), Header, &DT);
    568           PollsNeeded.push_back(NewBB->getTerminator());
    569           NumBackedgeSafepoints++;
    570         }
    571       } else {
    572         // Split the latch block itself, right before the terminator.
    573         PollsNeeded.push_back(Term);
    574         NumBackedgeSafepoints++;
    575       }
    576     }
    577   }
    578 
    579   if (enableEntrySafepoints(F)) {
    580     if (Instruction *Location = findLocationForEntrySafepoint(F, DT)) {
    581       PollsNeeded.push_back(Location);
    582       Modified = true;
    583       NumEntrySafepoints++;
    584     }
    585     // TODO: else we should assert that there was, in fact, a policy choice to
    586     // not insert a entry safepoint poll.
    587   }
    588 
    589   // Now that we've identified all the needed safepoint poll locations, insert
    590   // safepoint polls themselves.
    591   for (Instruction *PollLocation : PollsNeeded) {
    592     std::vector<CallSite> RuntimeCalls;
    593     InsertSafepointPoll(PollLocation, RuntimeCalls, TLI);
    594     ParsePointNeeded.insert(ParsePointNeeded.end(), RuntimeCalls.begin(),
    595                             RuntimeCalls.end());
    596   }
    597 
    598   return Modified;
    599 }
    600 
    601 char PlaceBackedgeSafepointsImpl::ID = 0;
    602 char PlaceSafepoints::ID = 0;
    603 
    604 FunctionPass *llvm::createPlaceSafepointsPass() {
    605   return new PlaceSafepoints();
    606 }
    607 
    608 INITIALIZE_PASS_BEGIN(PlaceBackedgeSafepointsImpl,
    609                       "place-backedge-safepoints-impl",
    610                       "Place Backedge Safepoints", false, false)
    611 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
    612 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
    613 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
    614 INITIALIZE_PASS_END(PlaceBackedgeSafepointsImpl,
    615                     "place-backedge-safepoints-impl",
    616                     "Place Backedge Safepoints", false, false)
    617 
    618 INITIALIZE_PASS_BEGIN(PlaceSafepoints, "place-safepoints", "Place Safepoints",
    619                       false, false)
    620 INITIALIZE_PASS_END(PlaceSafepoints, "place-safepoints", "Place Safepoints",
    621                     false, false)
    622 
    623 static void
    624 InsertSafepointPoll(Instruction *InsertBefore,
    625                     std::vector<CallSite> &ParsePointsNeeded /*rval*/,
    626                     const TargetLibraryInfo &TLI) {
    627   BasicBlock *OrigBB = InsertBefore->getParent();
    628   Module *M = InsertBefore->getModule();
    629   assert(M && "must be part of a module");
    630 
    631   // Inline the safepoint poll implementation - this will get all the branch,
    632   // control flow, etc..  Most importantly, it will introduce the actual slow
    633   // path call - where we need to insert a safepoint (parsepoint).
    634 
    635   auto *F = M->getFunction(GCSafepointPollName);
    636   assert(F && "gc.safepoint_poll function is missing");
    637   assert(F->getValueType() ==
    638          FunctionType::get(Type::getVoidTy(M->getContext()), false) &&
    639          "gc.safepoint_poll declared with wrong type");
    640   assert(!F->empty() && "gc.safepoint_poll must be a non-empty function");
    641   CallInst *PollCall = CallInst::Create(F, "", InsertBefore);
    642 
    643   // Record some information about the call site we're replacing
    644   BasicBlock::iterator Before(PollCall), After(PollCall);
    645   bool IsBegin = false;
    646   if (Before == OrigBB->begin())
    647     IsBegin = true;
    648   else
    649     Before--;
    650 
    651   After++;
    652   assert(After != OrigBB->end() && "must have successor");
    653 
    654   // Do the actual inlining
    655   InlineFunctionInfo IFI;
    656   bool InlineStatus = InlineFunction(PollCall, IFI);
    657   assert(InlineStatus && "inline must succeed");
    658   (void)InlineStatus; // suppress warning in release-asserts
    659 
    660   // Check post-conditions
    661   assert(IFI.StaticAllocas.empty() && "can't have allocs");
    662 
    663   std::vector<CallInst *> Calls; // new calls
    664   DenseSet<BasicBlock *> BBs;    // new BBs + insertee
    665 
    666   // Include only the newly inserted instructions, Note: begin may not be valid
    667   // if we inserted to the beginning of the basic block
    668   BasicBlock::iterator Start = IsBegin ? OrigBB->begin() : std::next(Before);
    669 
    670   // If your poll function includes an unreachable at the end, that's not
    671   // valid.  Bugpoint likes to create this, so check for it.
    672   assert(isPotentiallyReachable(&*Start, &*After) &&
    673          "malformed poll function");
    674 
    675   scanInlinedCode(&*Start, &*After, Calls, BBs);
    676   assert(!Calls.empty() && "slow path not found for safepoint poll");
    677 
    678   // Record the fact we need a parsable state at the runtime call contained in
    679   // the poll function.  This is required so that the runtime knows how to
    680   // parse the last frame when we actually take  the safepoint (i.e. execute
    681   // the slow path)
    682   assert(ParsePointsNeeded.empty());
    683   for (auto *CI : Calls) {
    684     // No safepoint needed or wanted
    685     if (!needsStatepoint(CI, TLI))
    686       continue;
    687 
    688     // These are likely runtime calls.  Should we assert that via calling
    689     // convention or something?
    690     ParsePointsNeeded.push_back(CallSite(CI));
    691   }
    692   assert(ParsePointsNeeded.size() <= Calls.size());
    693 }
    694