Home | History | Annotate | Download | only in Scalar
      1 //===- PlaceSafepoints.cpp - Place GC Safepoints --------------------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // Place garbage collection safepoints at appropriate locations in the IR. This
     11 // does not make relocation semantics or variable liveness explicit.  That's
     12 // done by RewriteStatepointsForGC.
     13 //
     14 // Terminology:
     15 // - A call is said to be "parseable" if there is a stack map generated for the
     16 // return PC of the call.  A runtime can determine where values listed in the
     17 // deopt arguments and (after RewriteStatepointsForGC) gc arguments are located
     18 // on the stack when the code is suspended inside such a call.  Every parse
     19 // point is represented by a call wrapped in an gc.statepoint intrinsic.
     20 // - A "poll" is an explicit check in the generated code to determine if the
     21 // runtime needs the generated code to cooperate by calling a helper routine
     22 // and thus suspending its execution at a known state. The call to the helper
     23 // routine will be parseable.  The (gc & runtime specific) logic of a poll is
     24 // assumed to be provided in a function of the name "gc.safepoint_poll".
     25 //
     26 // We aim to insert polls such that running code can quickly be brought to a
     27 // well defined state for inspection by the collector.  In the current
     28 // implementation, this is done via the insertion of poll sites at method entry
     29 // and the backedge of most loops.  We try to avoid inserting more polls than
     30 // are necessary to ensure a finite period between poll sites.  This is not
     31 // because the poll itself is expensive in the generated code; it's not.  Polls
     32 // do tend to impact the optimizer itself in negative ways; we'd like to avoid
     33 // perturbing the optimization of the method as much as we can.
     34 //
     35 // We also need to make most call sites parseable.  The callee might execute a
     36 // poll (or otherwise be inspected by the GC).  If so, the entire stack
     37 // (including the suspended frame of the current method) must be parseable.
     38 //
     39 // This pass will insert:
     40 // - Call parse points ("call safepoints") for any call which may need to
     41 // reach a safepoint during the execution of the callee function.
     42 // - Backedge safepoint polls and entry safepoint polls to ensure that
     43 // executing code reaches a safepoint poll in a finite amount of time.
     44 //
     45 // We do not currently support return statepoints, but adding them would not
     46 // be hard.  They are not required for correctness - entry safepoints are an
     47 // alternative - but some GCs may prefer them.  Patches welcome.
     48 //
     49 //===----------------------------------------------------------------------===//
     50 
     51 #include "llvm/Pass.h"
     52 
     53 #include "llvm/ADT/SetVector.h"
     54 #include "llvm/ADT/Statistic.h"
     55 #include "llvm/Analysis/CFG.h"
     56 #include "llvm/Analysis/ScalarEvolution.h"
     57 #include "llvm/IR/CallSite.h"
     58 #include "llvm/IR/Dominators.h"
     59 #include "llvm/IR/IntrinsicInst.h"
     60 #include "llvm/IR/LegacyPassManager.h"
     61 #include "llvm/IR/Statepoint.h"
     62 #include "llvm/Support/CommandLine.h"
     63 #include "llvm/Support/Debug.h"
     64 #include "llvm/Transforms/Scalar.h"
     65 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
     66 #include "llvm/Transforms/Utils/Cloning.h"
     67 #include "llvm/Transforms/Utils/Local.h"
     68 
     69 #define DEBUG_TYPE "safepoint-placement"
     70 
     71 STATISTIC(NumEntrySafepoints, "Number of entry safepoints inserted");
     72 STATISTIC(NumBackedgeSafepoints, "Number of backedge safepoints inserted");
     73 
     74 STATISTIC(CallInLoop,
     75           "Number of loops without safepoints due to calls in loop");
     76 STATISTIC(FiniteExecution,
     77           "Number of loops without safepoints finite execution");
     78 
     79 using namespace llvm;
     80 
     81 // Ignore opportunities to avoid placing safepoints on backedges, useful for
     82 // validation
     83 static cl::opt<bool> AllBackedges("spp-all-backedges", cl::Hidden,
     84                                   cl::init(false));
     85 
     86 /// How narrow does the trip count of a loop have to be to have to be considered
     87 /// "counted"?  Counted loops do not get safepoints at backedges.
     88 static cl::opt<int> CountedLoopTripWidth("spp-counted-loop-trip-width",
     89                                          cl::Hidden, cl::init(32));
     90 
     91 // If true, split the backedge of a loop when placing the safepoint, otherwise
     92 // split the latch block itself.  Both are useful to support for
     93 // experimentation, but in practice, it looks like splitting the backedge
     94 // optimizes better.
     95 static cl::opt<bool> SplitBackedge("spp-split-backedge", cl::Hidden,
     96                                    cl::init(false));
     97 
     98 namespace {
     99 
    100 /// An analysis pass whose purpose is to identify each of the backedges in
    101 /// the function which require a safepoint poll to be inserted.
    102 struct PlaceBackedgeSafepointsImpl : public FunctionPass {
    103   static char ID;
    104 
    105   /// The output of the pass - gives a list of each backedge (described by
    106   /// pointing at the branch) which need a poll inserted.
    107   std::vector<TerminatorInst *> PollLocations;
    108 
    109   /// True unless we're running spp-no-calls in which case we need to disable
    110   /// the call-dependent placement opts.
    111   bool CallSafepointsEnabled;
    112 
    113   ScalarEvolution *SE = nullptr;
    114   DominatorTree *DT = nullptr;
    115   LoopInfo *LI = nullptr;
    116 
    117   PlaceBackedgeSafepointsImpl(bool CallSafepoints = false)
    118       : FunctionPass(ID), CallSafepointsEnabled(CallSafepoints) {
    119     initializePlaceBackedgeSafepointsImplPass(*PassRegistry::getPassRegistry());
    120   }
    121 
    122   bool runOnLoop(Loop *);
    123   void runOnLoopAndSubLoops(Loop *L) {
    124     // Visit all the subloops
    125     for (Loop *I : *L)
    126       runOnLoopAndSubLoops(I);
    127     runOnLoop(L);
    128   }
    129 
    130   bool runOnFunction(Function &F) override {
    131     SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
    132     DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
    133     LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
    134     for (Loop *I : *LI) {
    135       runOnLoopAndSubLoops(I);
    136     }
    137     return false;
    138   }
    139 
    140   void getAnalysisUsage(AnalysisUsage &AU) const override {
    141     AU.addRequired<DominatorTreeWrapperPass>();
    142     AU.addRequired<ScalarEvolutionWrapperPass>();
    143     AU.addRequired<LoopInfoWrapperPass>();
    144     // We no longer modify the IR at all in this pass.  Thus all
    145     // analysis are preserved.
    146     AU.setPreservesAll();
    147   }
    148 };
    149 }
    150 
    151 static cl::opt<bool> NoEntry("spp-no-entry", cl::Hidden, cl::init(false));
    152 static cl::opt<bool> NoCall("spp-no-call", cl::Hidden, cl::init(false));
    153 static cl::opt<bool> NoBackedge("spp-no-backedge", cl::Hidden, cl::init(false));
    154 
    155 namespace {
    156 struct PlaceSafepoints : public FunctionPass {
    157   static char ID; // Pass identification, replacement for typeid
    158 
    159   PlaceSafepoints() : FunctionPass(ID) {
    160     initializePlaceSafepointsPass(*PassRegistry::getPassRegistry());
    161   }
    162   bool runOnFunction(Function &F) override;
    163 
    164   void getAnalysisUsage(AnalysisUsage &AU) const override {
    165     // We modify the graph wholesale (inlining, block insertion, etc).  We
    166     // preserve nothing at the moment.  We could potentially preserve dom tree
    167     // if that was worth doing
    168   }
    169 };
    170 }
    171 
    172 // Insert a safepoint poll immediately before the given instruction.  Does
    173 // not handle the parsability of state at the runtime call, that's the
    174 // callers job.
    175 static void
    176 InsertSafepointPoll(Instruction *InsertBefore,
    177                     std::vector<CallSite> &ParsePointsNeeded /*rval*/);
    178 
    179 static bool needsStatepoint(const CallSite &CS) {
    180   if (callsGCLeafFunction(CS))
    181     return false;
    182   if (CS.isCall()) {
    183     CallInst *call = cast<CallInst>(CS.getInstruction());
    184     if (call->isInlineAsm())
    185       return false;
    186   }
    187 
    188   return !(isStatepoint(CS) || isGCRelocate(CS) || isGCResult(CS));
    189 }
    190 
    191 /// Returns true if this loop is known to contain a call safepoint which
    192 /// must unconditionally execute on any iteration of the loop which returns
    193 /// to the loop header via an edge from Pred.  Returns a conservative correct
    194 /// answer; i.e. false is always valid.
    195 static bool containsUnconditionalCallSafepoint(Loop *L, BasicBlock *Header,
    196                                                BasicBlock *Pred,
    197                                                DominatorTree &DT) {
    198   // In general, we're looking for any cut of the graph which ensures
    199   // there's a call safepoint along every edge between Header and Pred.
    200   // For the moment, we look only for the 'cuts' that consist of a single call
    201   // instruction in a block which is dominated by the Header and dominates the
    202   // loop latch (Pred) block.  Somewhat surprisingly, walking the entire chain
    203   // of such dominating blocks gets substantially more occurrences than just
    204   // checking the Pred and Header blocks themselves.  This may be due to the
    205   // density of loop exit conditions caused by range and null checks.
    206   // TODO: structure this as an analysis pass, cache the result for subloops,
    207   // avoid dom tree recalculations
    208   assert(DT.dominates(Header, Pred) && "loop latch not dominated by header?");
    209 
    210   BasicBlock *Current = Pred;
    211   while (true) {
    212     for (Instruction &I : *Current) {
    213       if (auto CS = CallSite(&I))
    214         // Note: Technically, needing a safepoint isn't quite the right
    215         // condition here.  We should instead be checking if the target method
    216         // has an
    217         // unconditional poll. In practice, this is only a theoretical concern
    218         // since we don't have any methods with conditional-only safepoint
    219         // polls.
    220         if (needsStatepoint(CS))
    221           return true;
    222     }
    223 
    224     if (Current == Header)
    225       break;
    226     Current = DT.getNode(Current)->getIDom()->getBlock();
    227   }
    228 
    229   return false;
    230 }
    231 
    232 /// Returns true if this loop is known to terminate in a finite number of
    233 /// iterations.  Note that this function may return false for a loop which
    234 /// does actual terminate in a finite constant number of iterations due to
    235 /// conservatism in the analysis.
    236 static bool mustBeFiniteCountedLoop(Loop *L, ScalarEvolution *SE,
    237                                     BasicBlock *Pred) {
    238   // A conservative bound on the loop as a whole.
    239   const SCEV *MaxTrips = SE->getMaxBackedgeTakenCount(L);
    240   if (MaxTrips != SE->getCouldNotCompute() &&
    241       SE->getUnsignedRange(MaxTrips).getUnsignedMax().isIntN(
    242           CountedLoopTripWidth))
    243     return true;
    244 
    245   // If this is a conditional branch to the header with the alternate path
    246   // being outside the loop, we can ask questions about the execution frequency
    247   // of the exit block.
    248   if (L->isLoopExiting(Pred)) {
    249     // This returns an exact expression only.  TODO: We really only need an
    250     // upper bound here, but SE doesn't expose that.
    251     const SCEV *MaxExec = SE->getExitCount(L, Pred);
    252     if (MaxExec != SE->getCouldNotCompute() &&
    253         SE->getUnsignedRange(MaxExec).getUnsignedMax().isIntN(
    254             CountedLoopTripWidth))
    255         return true;
    256   }
    257 
    258   return /* not finite */ false;
    259 }
    260 
    261 static void scanOneBB(Instruction *Start, Instruction *End,
    262                       std::vector<CallInst *> &Calls,
    263                       DenseSet<BasicBlock *> &Seen,
    264                       std::vector<BasicBlock *> &Worklist) {
    265   for (BasicBlock::iterator BBI(Start), BBE0 = Start->getParent()->end(),
    266                                         BBE1 = BasicBlock::iterator(End);
    267        BBI != BBE0 && BBI != BBE1; BBI++) {
    268     if (CallInst *CI = dyn_cast<CallInst>(&*BBI))
    269       Calls.push_back(CI);
    270 
    271     // FIXME: This code does not handle invokes
    272     assert(!isa<InvokeInst>(&*BBI) &&
    273            "support for invokes in poll code needed");
    274 
    275     // Only add the successor blocks if we reach the terminator instruction
    276     // without encountering end first
    277     if (BBI->isTerminator()) {
    278       BasicBlock *BB = BBI->getParent();
    279       for (BasicBlock *Succ : successors(BB)) {
    280         if (Seen.insert(Succ).second) {
    281           Worklist.push_back(Succ);
    282         }
    283       }
    284     }
    285   }
    286 }
    287 
    288 static void scanInlinedCode(Instruction *Start, Instruction *End,
    289                             std::vector<CallInst *> &Calls,
    290                             DenseSet<BasicBlock *> &Seen) {
    291   Calls.clear();
    292   std::vector<BasicBlock *> Worklist;
    293   Seen.insert(Start->getParent());
    294   scanOneBB(Start, End, Calls, Seen, Worklist);
    295   while (!Worklist.empty()) {
    296     BasicBlock *BB = Worklist.back();
    297     Worklist.pop_back();
    298     scanOneBB(&*BB->begin(), End, Calls, Seen, Worklist);
    299   }
    300 }
    301 
    302 bool PlaceBackedgeSafepointsImpl::runOnLoop(Loop *L) {
    303   // Loop through all loop latches (branches controlling backedges).  We need
    304   // to place a safepoint on every backedge (potentially).
    305   // Note: In common usage, there will be only one edge due to LoopSimplify
    306   // having run sometime earlier in the pipeline, but this code must be correct
    307   // w.r.t. loops with multiple backedges.
    308   BasicBlock *Header = L->getHeader();
    309   SmallVector<BasicBlock*, 16> LoopLatches;
    310   L->getLoopLatches(LoopLatches);
    311   for (BasicBlock *Pred : LoopLatches) {
    312     assert(L->contains(Pred));
    313 
    314     // Make a policy decision about whether this loop needs a safepoint or
    315     // not.  Note that this is about unburdening the optimizer in loops, not
    316     // avoiding the runtime cost of the actual safepoint.
    317     if (!AllBackedges) {
    318       if (mustBeFiniteCountedLoop(L, SE, Pred)) {
    319         DEBUG(dbgs() << "skipping safepoint placement in finite loop\n");
    320         FiniteExecution++;
    321         continue;
    322       }
    323       if (CallSafepointsEnabled &&
    324           containsUnconditionalCallSafepoint(L, Header, Pred, *DT)) {
    325         // Note: This is only semantically legal since we won't do any further
    326         // IPO or inlining before the actual call insertion..  If we hadn't, we
    327         // might latter loose this call safepoint.
    328         DEBUG(dbgs() << "skipping safepoint placement due to unconditional call\n");
    329         CallInLoop++;
    330         continue;
    331       }
    332     }
    333 
    334     // TODO: We can create an inner loop which runs a finite number of
    335     // iterations with an outer loop which contains a safepoint.  This would
    336     // not help runtime performance that much, but it might help our ability to
    337     // optimize the inner loop.
    338 
    339     // Safepoint insertion would involve creating a new basic block (as the
    340     // target of the current backedge) which does the safepoint (of all live
    341     // variables) and branches to the true header
    342     TerminatorInst *Term = Pred->getTerminator();
    343 
    344     DEBUG(dbgs() << "[LSP] terminator instruction: " << *Term);
    345 
    346     PollLocations.push_back(Term);
    347   }
    348 
    349   return false;
    350 }
    351 
    352 /// Returns true if an entry safepoint is not required before this callsite in
    353 /// the caller function.
    354 static bool doesNotRequireEntrySafepointBefore(const CallSite &CS) {
    355   Instruction *Inst = CS.getInstruction();
    356   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
    357     switch (II->getIntrinsicID()) {
    358     case Intrinsic::experimental_gc_statepoint:
    359     case Intrinsic::experimental_patchpoint_void:
    360     case Intrinsic::experimental_patchpoint_i64:
    361       // The can wrap an actual call which may grow the stack by an unbounded
    362       // amount or run forever.
    363       return false;
    364     default:
    365       // Most LLVM intrinsics are things which do not expand to actual calls, or
    366       // at least if they do, are leaf functions that cause only finite stack
    367       // growth.  In particular, the optimizer likes to form things like memsets
    368       // out of stores in the original IR.  Another important example is
    369       // llvm.localescape which must occur in the entry block.  Inserting a
    370       // safepoint before it is not legal since it could push the localescape
    371       // out of the entry block.
    372       return true;
    373     }
    374   }
    375   return false;
    376 }
    377 
    378 static Instruction *findLocationForEntrySafepoint(Function &F,
    379                                                   DominatorTree &DT) {
    380 
    381   // Conceptually, this poll needs to be on method entry, but in
    382   // practice, we place it as late in the entry block as possible.  We
    383   // can place it as late as we want as long as it dominates all calls
    384   // that can grow the stack.  This, combined with backedge polls,
    385   // give us all the progress guarantees we need.
    386 
    387   // hasNextInstruction and nextInstruction are used to iterate
    388   // through a "straight line" execution sequence.
    389 
    390   auto HasNextInstruction = [](Instruction *I) {
    391     if (!I->isTerminator())
    392       return true;
    393 
    394     BasicBlock *nextBB = I->getParent()->getUniqueSuccessor();
    395     return nextBB && (nextBB->getUniquePredecessor() != nullptr);
    396   };
    397 
    398   auto NextInstruction = [&](Instruction *I) {
    399     assert(HasNextInstruction(I) &&
    400            "first check if there is a next instruction!");
    401 
    402     if (I->isTerminator())
    403       return &I->getParent()->getUniqueSuccessor()->front();
    404     return &*++I->getIterator();
    405   };
    406 
    407   Instruction *Cursor = nullptr;
    408   for (Cursor = &F.getEntryBlock().front(); HasNextInstruction(Cursor);
    409        Cursor = NextInstruction(Cursor)) {
    410 
    411     // We need to ensure a safepoint poll occurs before any 'real' call.  The
    412     // easiest way to ensure finite execution between safepoints in the face of
    413     // recursive and mutually recursive functions is to enforce that each take
    414     // a safepoint.  Additionally, we need to ensure a poll before any call
    415     // which can grow the stack by an unbounded amount.  This isn't required
    416     // for GC semantics per se, but is a common requirement for languages
    417     // which detect stack overflow via guard pages and then throw exceptions.
    418     if (auto CS = CallSite(Cursor)) {
    419       if (doesNotRequireEntrySafepointBefore(CS))
    420         continue;
    421       break;
    422     }
    423   }
    424 
    425   assert((HasNextInstruction(Cursor) || Cursor->isTerminator()) &&
    426          "either we stopped because of a call, or because of terminator");
    427 
    428   return Cursor;
    429 }
    430 
    431 static const char *const GCSafepointPollName = "gc.safepoint_poll";
    432 
    433 static bool isGCSafepointPoll(Function &F) {
    434   return F.getName().equals(GCSafepointPollName);
    435 }
    436 
    437 /// Returns true if this function should be rewritten to include safepoint
    438 /// polls and parseable call sites.  The main point of this function is to be
    439 /// an extension point for custom logic.
    440 static bool shouldRewriteFunction(Function &F) {
    441   // TODO: This should check the GCStrategy
    442   if (F.hasGC()) {
    443     const auto &FunctionGCName = F.getGC();
    444     const StringRef StatepointExampleName("statepoint-example");
    445     const StringRef CoreCLRName("coreclr");
    446     return (StatepointExampleName == FunctionGCName) ||
    447            (CoreCLRName == FunctionGCName);
    448   } else
    449     return false;
    450 }
    451 
    452 // TODO: These should become properties of the GCStrategy, possibly with
    453 // command line overrides.
    454 static bool enableEntrySafepoints(Function &F) { return !NoEntry; }
    455 static bool enableBackedgeSafepoints(Function &F) { return !NoBackedge; }
    456 static bool enableCallSafepoints(Function &F) { return !NoCall; }
    457 
    458 bool PlaceSafepoints::runOnFunction(Function &F) {
    459   if (F.isDeclaration() || F.empty()) {
    460     // This is a declaration, nothing to do.  Must exit early to avoid crash in
    461     // dom tree calculation
    462     return false;
    463   }
    464 
    465   if (isGCSafepointPoll(F)) {
    466     // Given we're inlining this inside of safepoint poll insertion, this
    467     // doesn't make any sense.  Note that we do make any contained calls
    468     // parseable after we inline a poll.
    469     return false;
    470   }
    471 
    472   if (!shouldRewriteFunction(F))
    473     return false;
    474 
    475   bool Modified = false;
    476 
    477   // In various bits below, we rely on the fact that uses are reachable from
    478   // defs.  When there are basic blocks unreachable from the entry, dominance
    479   // and reachablity queries return non-sensical results.  Thus, we preprocess
    480   // the function to ensure these properties hold.
    481   Modified |= removeUnreachableBlocks(F);
    482 
    483   // STEP 1 - Insert the safepoint polling locations.  We do not need to
    484   // actually insert parse points yet.  That will be done for all polls and
    485   // calls in a single pass.
    486 
    487   DominatorTree DT;
    488   DT.recalculate(F);
    489 
    490   SmallVector<Instruction *, 16> PollsNeeded;
    491   std::vector<CallSite> ParsePointNeeded;
    492 
    493   if (enableBackedgeSafepoints(F)) {
    494     // Construct a pass manager to run the LoopPass backedge logic.  We
    495     // need the pass manager to handle scheduling all the loop passes
    496     // appropriately.  Doing this by hand is painful and just not worth messing
    497     // with for the moment.
    498     legacy::FunctionPassManager FPM(F.getParent());
    499     bool CanAssumeCallSafepoints = enableCallSafepoints(F);
    500     auto *PBS = new PlaceBackedgeSafepointsImpl(CanAssumeCallSafepoints);
    501     FPM.add(PBS);
    502     FPM.run(F);
    503 
    504     // We preserve dominance information when inserting the poll, otherwise
    505     // we'd have to recalculate this on every insert
    506     DT.recalculate(F);
    507 
    508     auto &PollLocations = PBS->PollLocations;
    509 
    510     auto OrderByBBName = [](Instruction *a, Instruction *b) {
    511       return a->getParent()->getName() < b->getParent()->getName();
    512     };
    513     // We need the order of list to be stable so that naming ends up stable
    514     // when we split edges.  This makes test cases much easier to write.
    515     std::sort(PollLocations.begin(), PollLocations.end(), OrderByBBName);
    516 
    517     // We can sometimes end up with duplicate poll locations.  This happens if
    518     // a single loop is visited more than once.   The fact this happens seems
    519     // wrong, but it does happen for the split-backedge.ll test case.
    520     PollLocations.erase(std::unique(PollLocations.begin(),
    521                                     PollLocations.end()),
    522                         PollLocations.end());
    523 
    524     // Insert a poll at each point the analysis pass identified
    525     // The poll location must be the terminator of a loop latch block.
    526     for (TerminatorInst *Term : PollLocations) {
    527       // We are inserting a poll, the function is modified
    528       Modified = true;
    529 
    530       if (SplitBackedge) {
    531         // Split the backedge of the loop and insert the poll within that new
    532         // basic block.  This creates a loop with two latches per original
    533         // latch (which is non-ideal), but this appears to be easier to
    534         // optimize in practice than inserting the poll immediately before the
    535         // latch test.
    536 
    537         // Since this is a latch, at least one of the successors must dominate
    538         // it. Its possible that we have a) duplicate edges to the same header
    539         // and b) edges to distinct loop headers.  We need to insert pools on
    540         // each.
    541         SetVector<BasicBlock *> Headers;
    542         for (unsigned i = 0; i < Term->getNumSuccessors(); i++) {
    543           BasicBlock *Succ = Term->getSuccessor(i);
    544           if (DT.dominates(Succ, Term->getParent())) {
    545             Headers.insert(Succ);
    546           }
    547         }
    548         assert(!Headers.empty() && "poll location is not a loop latch?");
    549 
    550         // The split loop structure here is so that we only need to recalculate
    551         // the dominator tree once.  Alternatively, we could just keep it up to
    552         // date and use a more natural merged loop.
    553         SetVector<BasicBlock *> SplitBackedges;
    554         for (BasicBlock *Header : Headers) {
    555           BasicBlock *NewBB = SplitEdge(Term->getParent(), Header, &DT);
    556           PollsNeeded.push_back(NewBB->getTerminator());
    557           NumBackedgeSafepoints++;
    558         }
    559       } else {
    560         // Split the latch block itself, right before the terminator.
    561         PollsNeeded.push_back(Term);
    562         NumBackedgeSafepoints++;
    563       }
    564     }
    565   }
    566 
    567   if (enableEntrySafepoints(F)) {
    568     if (Instruction *Location = findLocationForEntrySafepoint(F, DT)) {
    569       PollsNeeded.push_back(Location);
    570       Modified = true;
    571       NumEntrySafepoints++;
    572     }
    573     // TODO: else we should assert that there was, in fact, a policy choice to
    574     // not insert a entry safepoint poll.
    575   }
    576 
    577   // Now that we've identified all the needed safepoint poll locations, insert
    578   // safepoint polls themselves.
    579   for (Instruction *PollLocation : PollsNeeded) {
    580     std::vector<CallSite> RuntimeCalls;
    581     InsertSafepointPoll(PollLocation, RuntimeCalls);
    582     ParsePointNeeded.insert(ParsePointNeeded.end(), RuntimeCalls.begin(),
    583                             RuntimeCalls.end());
    584   }
    585 
    586   return Modified;
    587 }
    588 
    589 char PlaceBackedgeSafepointsImpl::ID = 0;
    590 char PlaceSafepoints::ID = 0;
    591 
    592 FunctionPass *llvm::createPlaceSafepointsPass() {
    593   return new PlaceSafepoints();
    594 }
    595 
    596 INITIALIZE_PASS_BEGIN(PlaceBackedgeSafepointsImpl,
    597                       "place-backedge-safepoints-impl",
    598                       "Place Backedge Safepoints", false, false)
    599 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
    600 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
    601 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
    602 INITIALIZE_PASS_END(PlaceBackedgeSafepointsImpl,
    603                     "place-backedge-safepoints-impl",
    604                     "Place Backedge Safepoints", false, false)
    605 
    606 INITIALIZE_PASS_BEGIN(PlaceSafepoints, "place-safepoints", "Place Safepoints",
    607                       false, false)
    608 INITIALIZE_PASS_END(PlaceSafepoints, "place-safepoints", "Place Safepoints",
    609                     false, false)
    610 
    611 static void
    612 InsertSafepointPoll(Instruction *InsertBefore,
    613                     std::vector<CallSite> &ParsePointsNeeded /*rval*/) {
    614   BasicBlock *OrigBB = InsertBefore->getParent();
    615   Module *M = InsertBefore->getModule();
    616   assert(M && "must be part of a module");
    617 
    618   // Inline the safepoint poll implementation - this will get all the branch,
    619   // control flow, etc..  Most importantly, it will introduce the actual slow
    620   // path call - where we need to insert a safepoint (parsepoint).
    621 
    622   auto *F = M->getFunction(GCSafepointPollName);
    623   assert(F && "gc.safepoint_poll function is missing");
    624   assert(F->getValueType() ==
    625          FunctionType::get(Type::getVoidTy(M->getContext()), false) &&
    626          "gc.safepoint_poll declared with wrong type");
    627   assert(!F->empty() && "gc.safepoint_poll must be a non-empty function");
    628   CallInst *PollCall = CallInst::Create(F, "", InsertBefore);
    629 
    630   // Record some information about the call site we're replacing
    631   BasicBlock::iterator Before(PollCall), After(PollCall);
    632   bool IsBegin = false;
    633   if (Before == OrigBB->begin())
    634     IsBegin = true;
    635   else
    636     Before--;
    637 
    638   After++;
    639   assert(After != OrigBB->end() && "must have successor");
    640 
    641   // Do the actual inlining
    642   InlineFunctionInfo IFI;
    643   bool InlineStatus = InlineFunction(PollCall, IFI);
    644   assert(InlineStatus && "inline must succeed");
    645   (void)InlineStatus; // suppress warning in release-asserts
    646 
    647   // Check post-conditions
    648   assert(IFI.StaticAllocas.empty() && "can't have allocs");
    649 
    650   std::vector<CallInst *> Calls; // new calls
    651   DenseSet<BasicBlock *> BBs;    // new BBs + insertee
    652 
    653   // Include only the newly inserted instructions, Note: begin may not be valid
    654   // if we inserted to the beginning of the basic block
    655   BasicBlock::iterator Start = IsBegin ? OrigBB->begin() : std::next(Before);
    656 
    657   // If your poll function includes an unreachable at the end, that's not
    658   // valid.  Bugpoint likes to create this, so check for it.
    659   assert(isPotentiallyReachable(&*Start, &*After) &&
    660          "malformed poll function");
    661 
    662   scanInlinedCode(&*Start, &*After, Calls, BBs);
    663   assert(!Calls.empty() && "slow path not found for safepoint poll");
    664 
    665   // Record the fact we need a parsable state at the runtime call contained in
    666   // the poll function.  This is required so that the runtime knows how to
    667   // parse the last frame when we actually take  the safepoint (i.e. execute
    668   // the slow path)
    669   assert(ParsePointsNeeded.empty());
    670   for (auto *CI : Calls) {
    671     // No safepoint needed or wanted
    672     if (!needsStatepoint(CI))
    673       continue;
    674 
    675     // These are likely runtime calls.  Should we assert that via calling
    676     // convention or something?
    677     ParsePointsNeeded.push_back(CallSite(CI));
    678   }
    679   assert(ParsePointsNeeded.size() <= Calls.size());
    680 }
    681