Home | History | Annotate | Download | only in Utils
      1 //===- InlineFunction.cpp - Code to perform function inlining -------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file implements inlining of a function into a call site, resolving
     11 // parameters and the return value as appropriate.
     12 //
     13 //===----------------------------------------------------------------------===//
     14 
     15 #include "llvm/Transforms/Utils/Cloning.h"
     16 #include "llvm/ADT/SetVector.h"
     17 #include "llvm/ADT/SmallSet.h"
     18 #include "llvm/ADT/SmallVector.h"
     19 #include "llvm/ADT/StringExtras.h"
     20 #include "llvm/Analysis/AliasAnalysis.h"
     21 #include "llvm/Analysis/AssumptionCache.h"
     22 #include "llvm/Analysis/CallGraph.h"
     23 #include "llvm/Analysis/CaptureTracking.h"
     24 #include "llvm/Analysis/EHPersonalities.h"
     25 #include "llvm/Analysis/InstructionSimplify.h"
     26 #include "llvm/Analysis/ValueTracking.h"
     27 #include "llvm/IR/Attributes.h"
     28 #include "llvm/IR/CallSite.h"
     29 #include "llvm/IR/CFG.h"
     30 #include "llvm/IR/Constants.h"
     31 #include "llvm/IR/DataLayout.h"
     32 #include "llvm/IR/DebugInfo.h"
     33 #include "llvm/IR/DerivedTypes.h"
     34 #include "llvm/IR/DIBuilder.h"
     35 #include "llvm/IR/Dominators.h"
     36 #include "llvm/IR/IRBuilder.h"
     37 #include "llvm/IR/Instructions.h"
     38 #include "llvm/IR/IntrinsicInst.h"
     39 #include "llvm/IR/Intrinsics.h"
     40 #include "llvm/IR/MDBuilder.h"
     41 #include "llvm/IR/Module.h"
     42 #include "llvm/Transforms/Utils/Local.h"
     43 #include "llvm/Support/CommandLine.h"
     44 #include <algorithm>
     45 
     46 using namespace llvm;
     47 
     48 static cl::opt<bool>
     49 EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true),
     50   cl::Hidden,
     51   cl::desc("Convert noalias attributes to metadata during inlining."));
     52 
     53 static cl::opt<bool>
     54 PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining",
     55   cl::init(true), cl::Hidden,
     56   cl::desc("Convert align attributes to assumptions during inlining."));
     57 
     58 bool llvm::InlineFunction(CallInst *CI, InlineFunctionInfo &IFI,
     59                           AAResults *CalleeAAR, bool InsertLifetime) {
     60   return InlineFunction(CallSite(CI), IFI, CalleeAAR, InsertLifetime);
     61 }
     62 bool llvm::InlineFunction(InvokeInst *II, InlineFunctionInfo &IFI,
     63                           AAResults *CalleeAAR, bool InsertLifetime) {
     64   return InlineFunction(CallSite(II), IFI, CalleeAAR, InsertLifetime);
     65 }
     66 
     67 namespace {
     68   /// A class for recording information about inlining a landing pad.
     69   class LandingPadInliningInfo {
     70     BasicBlock *OuterResumeDest; ///< Destination of the invoke's unwind.
     71     BasicBlock *InnerResumeDest; ///< Destination for the callee's resume.
     72     LandingPadInst *CallerLPad;  ///< LandingPadInst associated with the invoke.
     73     PHINode *InnerEHValuesPHI;   ///< PHI for EH values from landingpad insts.
     74     SmallVector<Value*, 8> UnwindDestPHIValues;
     75 
     76   public:
     77     LandingPadInliningInfo(InvokeInst *II)
     78       : OuterResumeDest(II->getUnwindDest()), InnerResumeDest(nullptr),
     79         CallerLPad(nullptr), InnerEHValuesPHI(nullptr) {
     80       // If there are PHI nodes in the unwind destination block, we need to keep
     81       // track of which values came into them from the invoke before removing
     82       // the edge from this block.
     83       llvm::BasicBlock *InvokeBB = II->getParent();
     84       BasicBlock::iterator I = OuterResumeDest->begin();
     85       for (; isa<PHINode>(I); ++I) {
     86         // Save the value to use for this edge.
     87         PHINode *PHI = cast<PHINode>(I);
     88         UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
     89       }
     90 
     91       CallerLPad = cast<LandingPadInst>(I);
     92     }
     93 
     94     /// The outer unwind destination is the target of
     95     /// unwind edges introduced for calls within the inlined function.
     96     BasicBlock *getOuterResumeDest() const {
     97       return OuterResumeDest;
     98     }
     99 
    100     BasicBlock *getInnerResumeDest();
    101 
    102     LandingPadInst *getLandingPadInst() const { return CallerLPad; }
    103 
    104     /// Forward the 'resume' instruction to the caller's landing pad block.
    105     /// When the landing pad block has only one predecessor, this is
    106     /// a simple branch. When there is more than one predecessor, we need to
    107     /// split the landing pad block after the landingpad instruction and jump
    108     /// to there.
    109     void forwardResume(ResumeInst *RI,
    110                        SmallPtrSetImpl<LandingPadInst*> &InlinedLPads);
    111 
    112     /// Add incoming-PHI values to the unwind destination block for the given
    113     /// basic block, using the values for the original invoke's source block.
    114     void addIncomingPHIValuesFor(BasicBlock *BB) const {
    115       addIncomingPHIValuesForInto(BB, OuterResumeDest);
    116     }
    117 
    118     void addIncomingPHIValuesForInto(BasicBlock *src, BasicBlock *dest) const {
    119       BasicBlock::iterator I = dest->begin();
    120       for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
    121         PHINode *phi = cast<PHINode>(I);
    122         phi->addIncoming(UnwindDestPHIValues[i], src);
    123       }
    124     }
    125   };
    126 } // anonymous namespace
    127 
    128 /// Get or create a target for the branch from ResumeInsts.
    129 BasicBlock *LandingPadInliningInfo::getInnerResumeDest() {
    130   if (InnerResumeDest) return InnerResumeDest;
    131 
    132   // Split the landing pad.
    133   BasicBlock::iterator SplitPoint = ++CallerLPad->getIterator();
    134   InnerResumeDest =
    135     OuterResumeDest->splitBasicBlock(SplitPoint,
    136                                      OuterResumeDest->getName() + ".body");
    137 
    138   // The number of incoming edges we expect to the inner landing pad.
    139   const unsigned PHICapacity = 2;
    140 
    141   // Create corresponding new PHIs for all the PHIs in the outer landing pad.
    142   Instruction *InsertPoint = &InnerResumeDest->front();
    143   BasicBlock::iterator I = OuterResumeDest->begin();
    144   for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
    145     PHINode *OuterPHI = cast<PHINode>(I);
    146     PHINode *InnerPHI = PHINode::Create(OuterPHI->getType(), PHICapacity,
    147                                         OuterPHI->getName() + ".lpad-body",
    148                                         InsertPoint);
    149     OuterPHI->replaceAllUsesWith(InnerPHI);
    150     InnerPHI->addIncoming(OuterPHI, OuterResumeDest);
    151   }
    152 
    153   // Create a PHI for the exception values.
    154   InnerEHValuesPHI = PHINode::Create(CallerLPad->getType(), PHICapacity,
    155                                      "eh.lpad-body", InsertPoint);
    156   CallerLPad->replaceAllUsesWith(InnerEHValuesPHI);
    157   InnerEHValuesPHI->addIncoming(CallerLPad, OuterResumeDest);
    158 
    159   // All done.
    160   return InnerResumeDest;
    161 }
    162 
    163 /// Forward the 'resume' instruction to the caller's landing pad block.
    164 /// When the landing pad block has only one predecessor, this is a simple
    165 /// branch. When there is more than one predecessor, we need to split the
    166 /// landing pad block after the landingpad instruction and jump to there.
    167 void LandingPadInliningInfo::forwardResume(
    168     ResumeInst *RI, SmallPtrSetImpl<LandingPadInst *> &InlinedLPads) {
    169   BasicBlock *Dest = getInnerResumeDest();
    170   BasicBlock *Src = RI->getParent();
    171 
    172   BranchInst::Create(Dest, Src);
    173 
    174   // Update the PHIs in the destination. They were inserted in an order which
    175   // makes this work.
    176   addIncomingPHIValuesForInto(Src, Dest);
    177 
    178   InnerEHValuesPHI->addIncoming(RI->getOperand(0), Src);
    179   RI->eraseFromParent();
    180 }
    181 
    182 /// Helper for getUnwindDestToken/getUnwindDestTokenHelper.
    183 static Value *getParentPad(Value *EHPad) {
    184   if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
    185     return FPI->getParentPad();
    186   return cast<CatchSwitchInst>(EHPad)->getParentPad();
    187 }
    188 
    189 typedef DenseMap<Instruction *, Value *> UnwindDestMemoTy;
    190 
    191 /// Helper for getUnwindDestToken that does the descendant-ward part of
    192 /// the search.
    193 static Value *getUnwindDestTokenHelper(Instruction *EHPad,
    194                                        UnwindDestMemoTy &MemoMap) {
    195   SmallVector<Instruction *, 8> Worklist(1, EHPad);
    196 
    197   while (!Worklist.empty()) {
    198     Instruction *CurrentPad = Worklist.pop_back_val();
    199     // We only put pads on the worklist that aren't in the MemoMap.  When
    200     // we find an unwind dest for a pad we may update its ancestors, but
    201     // the queue only ever contains uncles/great-uncles/etc. of CurrentPad,
    202     // so they should never get updated while queued on the worklist.
    203     assert(!MemoMap.count(CurrentPad));
    204     Value *UnwindDestToken = nullptr;
    205     if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(CurrentPad)) {
    206       if (CatchSwitch->hasUnwindDest()) {
    207         UnwindDestToken = CatchSwitch->getUnwindDest()->getFirstNonPHI();
    208       } else {
    209         // Catchswitch doesn't have a 'nounwind' variant, and one might be
    210         // annotated as "unwinds to caller" when really it's nounwind (see
    211         // e.g. SimplifyCFGOpt::SimplifyUnreachable), so we can't infer the
    212         // parent's unwind dest from this.  We can check its catchpads'
    213         // descendants, since they might include a cleanuppad with an
    214         // "unwinds to caller" cleanupret, which can be trusted.
    215         for (auto HI = CatchSwitch->handler_begin(),
    216                   HE = CatchSwitch->handler_end();
    217              HI != HE && !UnwindDestToken; ++HI) {
    218           BasicBlock *HandlerBlock = *HI;
    219           auto *CatchPad = cast<CatchPadInst>(HandlerBlock->getFirstNonPHI());
    220           for (User *Child : CatchPad->users()) {
    221             // Intentionally ignore invokes here -- since the catchswitch is
    222             // marked "unwind to caller", it would be a verifier error if it
    223             // contained an invoke which unwinds out of it, so any invoke we'd
    224             // encounter must unwind to some child of the catch.
    225             if (!isa<CleanupPadInst>(Child) && !isa<CatchSwitchInst>(Child))
    226               continue;
    227 
    228             Instruction *ChildPad = cast<Instruction>(Child);
    229             auto Memo = MemoMap.find(ChildPad);
    230             if (Memo == MemoMap.end()) {
    231               // Haven't figure out this child pad yet; queue it.
    232               Worklist.push_back(ChildPad);
    233               continue;
    234             }
    235             // We've already checked this child, but might have found that
    236             // it offers no proof either way.
    237             Value *ChildUnwindDestToken = Memo->second;
    238             if (!ChildUnwindDestToken)
    239               continue;
    240             // We already know the child's unwind dest, which can either
    241             // be ConstantTokenNone to indicate unwind to caller, or can
    242             // be another child of the catchpad.  Only the former indicates
    243             // the unwind dest of the catchswitch.
    244             if (isa<ConstantTokenNone>(ChildUnwindDestToken)) {
    245               UnwindDestToken = ChildUnwindDestToken;
    246               break;
    247             }
    248             assert(getParentPad(ChildUnwindDestToken) == CatchPad);
    249           }
    250         }
    251       }
    252     } else {
    253       auto *CleanupPad = cast<CleanupPadInst>(CurrentPad);
    254       for (User *U : CleanupPad->users()) {
    255         if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(U)) {
    256           if (BasicBlock *RetUnwindDest = CleanupRet->getUnwindDest())
    257             UnwindDestToken = RetUnwindDest->getFirstNonPHI();
    258           else
    259             UnwindDestToken = ConstantTokenNone::get(CleanupPad->getContext());
    260           break;
    261         }
    262         Value *ChildUnwindDestToken;
    263         if (auto *Invoke = dyn_cast<InvokeInst>(U)) {
    264           ChildUnwindDestToken = Invoke->getUnwindDest()->getFirstNonPHI();
    265         } else if (isa<CleanupPadInst>(U) || isa<CatchSwitchInst>(U)) {
    266           Instruction *ChildPad = cast<Instruction>(U);
    267           auto Memo = MemoMap.find(ChildPad);
    268           if (Memo == MemoMap.end()) {
    269             // Haven't resolved this child yet; queue it and keep searching.
    270             Worklist.push_back(ChildPad);
    271             continue;
    272           }
    273           // We've checked this child, but still need to ignore it if it
    274           // had no proof either way.
    275           ChildUnwindDestToken = Memo->second;
    276           if (!ChildUnwindDestToken)
    277             continue;
    278         } else {
    279           // Not a relevant user of the cleanuppad
    280           continue;
    281         }
    282         // In a well-formed program, the child/invoke must either unwind to
    283         // an(other) child of the cleanup, or exit the cleanup.  In the
    284         // first case, continue searching.
    285         if (isa<Instruction>(ChildUnwindDestToken) &&
    286             getParentPad(ChildUnwindDestToken) == CleanupPad)
    287           continue;
    288         UnwindDestToken = ChildUnwindDestToken;
    289         break;
    290       }
    291     }
    292     // If we haven't found an unwind dest for CurrentPad, we may have queued its
    293     // children, so move on to the next in the worklist.
    294     if (!UnwindDestToken)
    295       continue;
    296 
    297     // Now we know that CurrentPad unwinds to UnwindDestToken.  It also exits
    298     // any ancestors of CurrentPad up to but not including UnwindDestToken's
    299     // parent pad.  Record this in the memo map, and check to see if the
    300     // original EHPad being queried is one of the ones exited.
    301     Value *UnwindParent;
    302     if (auto *UnwindPad = dyn_cast<Instruction>(UnwindDestToken))
    303       UnwindParent = getParentPad(UnwindPad);
    304     else
    305       UnwindParent = nullptr;
    306     bool ExitedOriginalPad = false;
    307     for (Instruction *ExitedPad = CurrentPad;
    308          ExitedPad && ExitedPad != UnwindParent;
    309          ExitedPad = dyn_cast<Instruction>(getParentPad(ExitedPad))) {
    310       // Skip over catchpads since they just follow their catchswitches.
    311       if (isa<CatchPadInst>(ExitedPad))
    312         continue;
    313       MemoMap[ExitedPad] = UnwindDestToken;
    314       ExitedOriginalPad |= (ExitedPad == EHPad);
    315     }
    316 
    317     if (ExitedOriginalPad)
    318       return UnwindDestToken;
    319 
    320     // Continue the search.
    321   }
    322 
    323   // No definitive information is contained within this funclet.
    324   return nullptr;
    325 }
    326 
    327 /// Given an EH pad, find where it unwinds.  If it unwinds to an EH pad,
    328 /// return that pad instruction.  If it unwinds to caller, return
    329 /// ConstantTokenNone.  If it does not have a definitive unwind destination,
    330 /// return nullptr.
    331 ///
    332 /// This routine gets invoked for calls in funclets in inlinees when inlining
    333 /// an invoke.  Since many funclets don't have calls inside them, it's queried
    334 /// on-demand rather than building a map of pads to unwind dests up front.
    335 /// Determining a funclet's unwind dest may require recursively searching its
    336 /// descendants, and also ancestors and cousins if the descendants don't provide
    337 /// an answer.  Since most funclets will have their unwind dest immediately
    338 /// available as the unwind dest of a catchswitch or cleanupret, this routine
    339 /// searches top-down from the given pad and then up. To avoid worst-case
    340 /// quadratic run-time given that approach, it uses a memo map to avoid
    341 /// re-processing funclet trees.  The callers that rewrite the IR as they go
    342 /// take advantage of this, for correctness, by checking/forcing rewritten
    343 /// pads' entries to match the original callee view.
    344 static Value *getUnwindDestToken(Instruction *EHPad,
    345                                  UnwindDestMemoTy &MemoMap) {
    346   // Catchpads unwind to the same place as their catchswitch;
    347   // redirct any queries on catchpads so the code below can
    348   // deal with just catchswitches and cleanuppads.
    349   if (auto *CPI = dyn_cast<CatchPadInst>(EHPad))
    350     EHPad = CPI->getCatchSwitch();
    351 
    352   // Check if we've already determined the unwind dest for this pad.
    353   auto Memo = MemoMap.find(EHPad);
    354   if (Memo != MemoMap.end())
    355     return Memo->second;
    356 
    357   // Search EHPad and, if necessary, its descendants.
    358   Value *UnwindDestToken = getUnwindDestTokenHelper(EHPad, MemoMap);
    359   assert((UnwindDestToken == nullptr) != (MemoMap.count(EHPad) != 0));
    360   if (UnwindDestToken)
    361     return UnwindDestToken;
    362 
    363   // No information is available for this EHPad from itself or any of its
    364   // descendants.  An unwind all the way out to a pad in the caller would
    365   // need also to agree with the unwind dest of the parent funclet, so
    366   // search up the chain to try to find a funclet with information.  Put
    367   // null entries in the memo map to avoid re-processing as we go up.
    368   MemoMap[EHPad] = nullptr;
    369   Instruction *LastUselessPad = EHPad;
    370   Value *AncestorToken;
    371   for (AncestorToken = getParentPad(EHPad);
    372        auto *AncestorPad = dyn_cast<Instruction>(AncestorToken);
    373        AncestorToken = getParentPad(AncestorToken)) {
    374     // Skip over catchpads since they just follow their catchswitches.
    375     if (isa<CatchPadInst>(AncestorPad))
    376       continue;
    377     assert(!MemoMap.count(AncestorPad) || MemoMap[AncestorPad]);
    378     auto AncestorMemo = MemoMap.find(AncestorPad);
    379     if (AncestorMemo == MemoMap.end()) {
    380       UnwindDestToken = getUnwindDestTokenHelper(AncestorPad, MemoMap);
    381     } else {
    382       UnwindDestToken = AncestorMemo->second;
    383     }
    384     if (UnwindDestToken)
    385       break;
    386     LastUselessPad = AncestorPad;
    387   }
    388 
    389   // Since the whole tree under LastUselessPad has no information, it all must
    390   // match UnwindDestToken; record that to avoid repeating the search.
    391   SmallVector<Instruction *, 8> Worklist(1, LastUselessPad);
    392   while (!Worklist.empty()) {
    393     Instruction *UselessPad = Worklist.pop_back_val();
    394     assert(!MemoMap.count(UselessPad) || MemoMap[UselessPad] == nullptr);
    395     MemoMap[UselessPad] = UnwindDestToken;
    396     if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(UselessPad)) {
    397       for (BasicBlock *HandlerBlock : CatchSwitch->handlers())
    398         for (User *U : HandlerBlock->getFirstNonPHI()->users())
    399           if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
    400             Worklist.push_back(cast<Instruction>(U));
    401     } else {
    402       assert(isa<CleanupPadInst>(UselessPad));
    403       for (User *U : UselessPad->users())
    404         if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
    405           Worklist.push_back(cast<Instruction>(U));
    406     }
    407   }
    408 
    409   return UnwindDestToken;
    410 }
    411 
    412 /// When we inline a basic block into an invoke,
    413 /// we have to turn all of the calls that can throw into invokes.
    414 /// This function analyze BB to see if there are any calls, and if so,
    415 /// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI
    416 /// nodes in that block with the values specified in InvokeDestPHIValues.
    417 static BasicBlock *HandleCallsInBlockInlinedThroughInvoke(
    418     BasicBlock *BB, BasicBlock *UnwindEdge,
    419     UnwindDestMemoTy *FuncletUnwindMap = nullptr) {
    420   for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
    421     Instruction *I = &*BBI++;
    422 
    423     // We only need to check for function calls: inlined invoke
    424     // instructions require no special handling.
    425     CallInst *CI = dyn_cast<CallInst>(I);
    426 
    427     if (!CI || CI->doesNotThrow() || isa<InlineAsm>(CI->getCalledValue()))
    428       continue;
    429 
    430     // We do not need to (and in fact, cannot) convert possibly throwing calls
    431     // to @llvm.experimental_deoptimize (resp. @llvm.experimental.guard) into
    432     // invokes.  The caller's "segment" of the deoptimization continuation
    433     // attached to the newly inlined @llvm.experimental_deoptimize
    434     // (resp. @llvm.experimental.guard) call should contain the exception
    435     // handling logic, if any.
    436     if (auto *F = CI->getCalledFunction())
    437       if (F->getIntrinsicID() == Intrinsic::experimental_deoptimize ||
    438           F->getIntrinsicID() == Intrinsic::experimental_guard)
    439         continue;
    440 
    441     if (auto FuncletBundle = CI->getOperandBundle(LLVMContext::OB_funclet)) {
    442       // This call is nested inside a funclet.  If that funclet has an unwind
    443       // destination within the inlinee, then unwinding out of this call would
    444       // be UB.  Rewriting this call to an invoke which targets the inlined
    445       // invoke's unwind dest would give the call's parent funclet multiple
    446       // unwind destinations, which is something that subsequent EH table
    447       // generation can't handle and that the veirifer rejects.  So when we
    448       // see such a call, leave it as a call.
    449       auto *FuncletPad = cast<Instruction>(FuncletBundle->Inputs[0]);
    450       Value *UnwindDestToken =
    451           getUnwindDestToken(FuncletPad, *FuncletUnwindMap);
    452       if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
    453         continue;
    454 #ifndef NDEBUG
    455       Instruction *MemoKey;
    456       if (auto *CatchPad = dyn_cast<CatchPadInst>(FuncletPad))
    457         MemoKey = CatchPad->getCatchSwitch();
    458       else
    459         MemoKey = FuncletPad;
    460       assert(FuncletUnwindMap->count(MemoKey) &&
    461              (*FuncletUnwindMap)[MemoKey] == UnwindDestToken &&
    462              "must get memoized to avoid confusing later searches");
    463 #endif // NDEBUG
    464     }
    465 
    466     // Convert this function call into an invoke instruction.  First, split the
    467     // basic block.
    468     BasicBlock *Split =
    469         BB->splitBasicBlock(CI->getIterator(), CI->getName() + ".noexc");
    470 
    471     // Delete the unconditional branch inserted by splitBasicBlock
    472     BB->getInstList().pop_back();
    473 
    474     // Create the new invoke instruction.
    475     SmallVector<Value*, 8> InvokeArgs(CI->arg_begin(), CI->arg_end());
    476     SmallVector<OperandBundleDef, 1> OpBundles;
    477 
    478     CI->getOperandBundlesAsDefs(OpBundles);
    479 
    480     // Note: we're round tripping operand bundles through memory here, and that
    481     // can potentially be avoided with a cleverer API design that we do not have
    482     // as of this time.
    483 
    484     InvokeInst *II =
    485         InvokeInst::Create(CI->getCalledValue(), Split, UnwindEdge, InvokeArgs,
    486                            OpBundles, CI->getName(), BB);
    487     II->setDebugLoc(CI->getDebugLoc());
    488     II->setCallingConv(CI->getCallingConv());
    489     II->setAttributes(CI->getAttributes());
    490 
    491     // Make sure that anything using the call now uses the invoke!  This also
    492     // updates the CallGraph if present, because it uses a WeakVH.
    493     CI->replaceAllUsesWith(II);
    494 
    495     // Delete the original call
    496     Split->getInstList().pop_front();
    497     return BB;
    498   }
    499   return nullptr;
    500 }
    501 
    502 /// If we inlined an invoke site, we need to convert calls
    503 /// in the body of the inlined function into invokes.
    504 ///
    505 /// II is the invoke instruction being inlined.  FirstNewBlock is the first
    506 /// block of the inlined code (the last block is the end of the function),
    507 /// and InlineCodeInfo is information about the code that got inlined.
    508 static void HandleInlinedLandingPad(InvokeInst *II, BasicBlock *FirstNewBlock,
    509                                     ClonedCodeInfo &InlinedCodeInfo) {
    510   BasicBlock *InvokeDest = II->getUnwindDest();
    511 
    512   Function *Caller = FirstNewBlock->getParent();
    513 
    514   // The inlined code is currently at the end of the function, scan from the
    515   // start of the inlined code to its end, checking for stuff we need to
    516   // rewrite.
    517   LandingPadInliningInfo Invoke(II);
    518 
    519   // Get all of the inlined landing pad instructions.
    520   SmallPtrSet<LandingPadInst*, 16> InlinedLPads;
    521   for (Function::iterator I = FirstNewBlock->getIterator(), E = Caller->end();
    522        I != E; ++I)
    523     if (InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator()))
    524       InlinedLPads.insert(II->getLandingPadInst());
    525 
    526   // Append the clauses from the outer landing pad instruction into the inlined
    527   // landing pad instructions.
    528   LandingPadInst *OuterLPad = Invoke.getLandingPadInst();
    529   for (LandingPadInst *InlinedLPad : InlinedLPads) {
    530     unsigned OuterNum = OuterLPad->getNumClauses();
    531     InlinedLPad->reserveClauses(OuterNum);
    532     for (unsigned OuterIdx = 0; OuterIdx != OuterNum; ++OuterIdx)
    533       InlinedLPad->addClause(OuterLPad->getClause(OuterIdx));
    534     if (OuterLPad->isCleanup())
    535       InlinedLPad->setCleanup(true);
    536   }
    537 
    538   for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end();
    539        BB != E; ++BB) {
    540     if (InlinedCodeInfo.ContainsCalls)
    541       if (BasicBlock *NewBB = HandleCallsInBlockInlinedThroughInvoke(
    542               &*BB, Invoke.getOuterResumeDest()))
    543         // Update any PHI nodes in the exceptional block to indicate that there
    544         // is now a new entry in them.
    545         Invoke.addIncomingPHIValuesFor(NewBB);
    546 
    547     // Forward any resumes that are remaining here.
    548     if (ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator()))
    549       Invoke.forwardResume(RI, InlinedLPads);
    550   }
    551 
    552   // Now that everything is happy, we have one final detail.  The PHI nodes in
    553   // the exception destination block still have entries due to the original
    554   // invoke instruction. Eliminate these entries (which might even delete the
    555   // PHI node) now.
    556   InvokeDest->removePredecessor(II->getParent());
    557 }
    558 
    559 /// If we inlined an invoke site, we need to convert calls
    560 /// in the body of the inlined function into invokes.
    561 ///
    562 /// II is the invoke instruction being inlined.  FirstNewBlock is the first
    563 /// block of the inlined code (the last block is the end of the function),
    564 /// and InlineCodeInfo is information about the code that got inlined.
    565 static void HandleInlinedEHPad(InvokeInst *II, BasicBlock *FirstNewBlock,
    566                                ClonedCodeInfo &InlinedCodeInfo) {
    567   BasicBlock *UnwindDest = II->getUnwindDest();
    568   Function *Caller = FirstNewBlock->getParent();
    569 
    570   assert(UnwindDest->getFirstNonPHI()->isEHPad() && "unexpected BasicBlock!");
    571 
    572   // If there are PHI nodes in the unwind destination block, we need to keep
    573   // track of which values came into them from the invoke before removing the
    574   // edge from this block.
    575   SmallVector<Value *, 8> UnwindDestPHIValues;
    576   llvm::BasicBlock *InvokeBB = II->getParent();
    577   for (Instruction &I : *UnwindDest) {
    578     // Save the value to use for this edge.
    579     PHINode *PHI = dyn_cast<PHINode>(&I);
    580     if (!PHI)
    581       break;
    582     UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
    583   }
    584 
    585   // Add incoming-PHI values to the unwind destination block for the given basic
    586   // block, using the values for the original invoke's source block.
    587   auto UpdatePHINodes = [&](BasicBlock *Src) {
    588     BasicBlock::iterator I = UnwindDest->begin();
    589     for (Value *V : UnwindDestPHIValues) {
    590       PHINode *PHI = cast<PHINode>(I);
    591       PHI->addIncoming(V, Src);
    592       ++I;
    593     }
    594   };
    595 
    596   // This connects all the instructions which 'unwind to caller' to the invoke
    597   // destination.
    598   UnwindDestMemoTy FuncletUnwindMap;
    599   for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end();
    600        BB != E; ++BB) {
    601     if (auto *CRI = dyn_cast<CleanupReturnInst>(BB->getTerminator())) {
    602       if (CRI->unwindsToCaller()) {
    603         auto *CleanupPad = CRI->getCleanupPad();
    604         CleanupReturnInst::Create(CleanupPad, UnwindDest, CRI);
    605         CRI->eraseFromParent();
    606         UpdatePHINodes(&*BB);
    607         // Finding a cleanupret with an unwind destination would confuse
    608         // subsequent calls to getUnwindDestToken, so map the cleanuppad
    609         // to short-circuit any such calls and recognize this as an "unwind
    610         // to caller" cleanup.
    611         assert(!FuncletUnwindMap.count(CleanupPad) ||
    612                isa<ConstantTokenNone>(FuncletUnwindMap[CleanupPad]));
    613         FuncletUnwindMap[CleanupPad] =
    614             ConstantTokenNone::get(Caller->getContext());
    615       }
    616     }
    617 
    618     Instruction *I = BB->getFirstNonPHI();
    619     if (!I->isEHPad())
    620       continue;
    621 
    622     Instruction *Replacement = nullptr;
    623     if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) {
    624       if (CatchSwitch->unwindsToCaller()) {
    625         Value *UnwindDestToken;
    626         if (auto *ParentPad =
    627                 dyn_cast<Instruction>(CatchSwitch->getParentPad())) {
    628           // This catchswitch is nested inside another funclet.  If that
    629           // funclet has an unwind destination within the inlinee, then
    630           // unwinding out of this catchswitch would be UB.  Rewriting this
    631           // catchswitch to unwind to the inlined invoke's unwind dest would
    632           // give the parent funclet multiple unwind destinations, which is
    633           // something that subsequent EH table generation can't handle and
    634           // that the veirifer rejects.  So when we see such a call, leave it
    635           // as "unwind to caller".
    636           UnwindDestToken = getUnwindDestToken(ParentPad, FuncletUnwindMap);
    637           if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
    638             continue;
    639         } else {
    640           // This catchswitch has no parent to inherit constraints from, and
    641           // none of its descendants can have an unwind edge that exits it and
    642           // targets another funclet in the inlinee.  It may or may not have a
    643           // descendant that definitively has an unwind to caller.  In either
    644           // case, we'll have to assume that any unwinds out of it may need to
    645           // be routed to the caller, so treat it as though it has a definitive
    646           // unwind to caller.
    647           UnwindDestToken = ConstantTokenNone::get(Caller->getContext());
    648         }
    649         auto *NewCatchSwitch = CatchSwitchInst::Create(
    650             CatchSwitch->getParentPad(), UnwindDest,
    651             CatchSwitch->getNumHandlers(), CatchSwitch->getName(),
    652             CatchSwitch);
    653         for (BasicBlock *PadBB : CatchSwitch->handlers())
    654           NewCatchSwitch->addHandler(PadBB);
    655         // Propagate info for the old catchswitch over to the new one in
    656         // the unwind map.  This also serves to short-circuit any subsequent
    657         // checks for the unwind dest of this catchswitch, which would get
    658         // confused if they found the outer handler in the callee.
    659         FuncletUnwindMap[NewCatchSwitch] = UnwindDestToken;
    660         Replacement = NewCatchSwitch;
    661       }
    662     } else if (!isa<FuncletPadInst>(I)) {
    663       llvm_unreachable("unexpected EHPad!");
    664     }
    665 
    666     if (Replacement) {
    667       Replacement->takeName(I);
    668       I->replaceAllUsesWith(Replacement);
    669       I->eraseFromParent();
    670       UpdatePHINodes(&*BB);
    671     }
    672   }
    673 
    674   if (InlinedCodeInfo.ContainsCalls)
    675     for (Function::iterator BB = FirstNewBlock->getIterator(),
    676                             E = Caller->end();
    677          BB != E; ++BB)
    678       if (BasicBlock *NewBB = HandleCallsInBlockInlinedThroughInvoke(
    679               &*BB, UnwindDest, &FuncletUnwindMap))
    680         // Update any PHI nodes in the exceptional block to indicate that there
    681         // is now a new entry in them.
    682         UpdatePHINodes(NewBB);
    683 
    684   // Now that everything is happy, we have one final detail.  The PHI nodes in
    685   // the exception destination block still have entries due to the original
    686   // invoke instruction. Eliminate these entries (which might even delete the
    687   // PHI node) now.
    688   UnwindDest->removePredecessor(InvokeBB);
    689 }
    690 
    691 /// When inlining a call site that has !llvm.mem.parallel_loop_access metadata,
    692 /// that metadata should be propagated to all memory-accessing cloned
    693 /// instructions.
    694 static void PropagateParallelLoopAccessMetadata(CallSite CS,
    695                                                 ValueToValueMapTy &VMap) {
    696   MDNode *M =
    697     CS.getInstruction()->getMetadata(LLVMContext::MD_mem_parallel_loop_access);
    698   if (!M)
    699     return;
    700 
    701   for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
    702        VMI != VMIE; ++VMI) {
    703     if (!VMI->second)
    704       continue;
    705 
    706     Instruction *NI = dyn_cast<Instruction>(VMI->second);
    707     if (!NI)
    708       continue;
    709 
    710     if (MDNode *PM = NI->getMetadata(LLVMContext::MD_mem_parallel_loop_access)) {
    711         M = MDNode::concatenate(PM, M);
    712       NI->setMetadata(LLVMContext::MD_mem_parallel_loop_access, M);
    713     } else if (NI->mayReadOrWriteMemory()) {
    714       NI->setMetadata(LLVMContext::MD_mem_parallel_loop_access, M);
    715     }
    716   }
    717 }
    718 
    719 /// When inlining a function that contains noalias scope metadata,
    720 /// this metadata needs to be cloned so that the inlined blocks
    721 /// have different "unqiue scopes" at every call site. Were this not done, then
    722 /// aliasing scopes from a function inlined into a caller multiple times could
    723 /// not be differentiated (and this would lead to miscompiles because the
    724 /// non-aliasing property communicated by the metadata could have
    725 /// call-site-specific control dependencies).
    726 static void CloneAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap) {
    727   const Function *CalledFunc = CS.getCalledFunction();
    728   SetVector<const MDNode *> MD;
    729 
    730   // Note: We could only clone the metadata if it is already used in the
    731   // caller. I'm omitting that check here because it might confuse
    732   // inter-procedural alias analysis passes. We can revisit this if it becomes
    733   // an efficiency or overhead problem.
    734 
    735   for (const BasicBlock &I : *CalledFunc)
    736     for (const Instruction &J : I) {
    737       if (const MDNode *M = J.getMetadata(LLVMContext::MD_alias_scope))
    738         MD.insert(M);
    739       if (const MDNode *M = J.getMetadata(LLVMContext::MD_noalias))
    740         MD.insert(M);
    741     }
    742 
    743   if (MD.empty())
    744     return;
    745 
    746   // Walk the existing metadata, adding the complete (perhaps cyclic) chain to
    747   // the set.
    748   SmallVector<const Metadata *, 16> Queue(MD.begin(), MD.end());
    749   while (!Queue.empty()) {
    750     const MDNode *M = cast<MDNode>(Queue.pop_back_val());
    751     for (unsigned i = 0, ie = M->getNumOperands(); i != ie; ++i)
    752       if (const MDNode *M1 = dyn_cast<MDNode>(M->getOperand(i)))
    753         if (MD.insert(M1))
    754           Queue.push_back(M1);
    755   }
    756 
    757   // Now we have a complete set of all metadata in the chains used to specify
    758   // the noalias scopes and the lists of those scopes.
    759   SmallVector<TempMDTuple, 16> DummyNodes;
    760   DenseMap<const MDNode *, TrackingMDNodeRef> MDMap;
    761   for (const MDNode *I : MD) {
    762     DummyNodes.push_back(MDTuple::getTemporary(CalledFunc->getContext(), None));
    763     MDMap[I].reset(DummyNodes.back().get());
    764   }
    765 
    766   // Create new metadata nodes to replace the dummy nodes, replacing old
    767   // metadata references with either a dummy node or an already-created new
    768   // node.
    769   for (const MDNode *I : MD) {
    770     SmallVector<Metadata *, 4> NewOps;
    771     for (unsigned i = 0, ie = I->getNumOperands(); i != ie; ++i) {
    772       const Metadata *V = I->getOperand(i);
    773       if (const MDNode *M = dyn_cast<MDNode>(V))
    774         NewOps.push_back(MDMap[M]);
    775       else
    776         NewOps.push_back(const_cast<Metadata *>(V));
    777     }
    778 
    779     MDNode *NewM = MDNode::get(CalledFunc->getContext(), NewOps);
    780     MDTuple *TempM = cast<MDTuple>(MDMap[I]);
    781     assert(TempM->isTemporary() && "Expected temporary node");
    782 
    783     TempM->replaceAllUsesWith(NewM);
    784   }
    785 
    786   // Now replace the metadata in the new inlined instructions with the
    787   // repacements from the map.
    788   for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
    789        VMI != VMIE; ++VMI) {
    790     if (!VMI->second)
    791       continue;
    792 
    793     Instruction *NI = dyn_cast<Instruction>(VMI->second);
    794     if (!NI)
    795       continue;
    796 
    797     if (MDNode *M = NI->getMetadata(LLVMContext::MD_alias_scope)) {
    798       MDNode *NewMD = MDMap[M];
    799       // If the call site also had alias scope metadata (a list of scopes to
    800       // which instructions inside it might belong), propagate those scopes to
    801       // the inlined instructions.
    802       if (MDNode *CSM =
    803               CS.getInstruction()->getMetadata(LLVMContext::MD_alias_scope))
    804         NewMD = MDNode::concatenate(NewMD, CSM);
    805       NI->setMetadata(LLVMContext::MD_alias_scope, NewMD);
    806     } else if (NI->mayReadOrWriteMemory()) {
    807       if (MDNode *M =
    808               CS.getInstruction()->getMetadata(LLVMContext::MD_alias_scope))
    809         NI->setMetadata(LLVMContext::MD_alias_scope, M);
    810     }
    811 
    812     if (MDNode *M = NI->getMetadata(LLVMContext::MD_noalias)) {
    813       MDNode *NewMD = MDMap[M];
    814       // If the call site also had noalias metadata (a list of scopes with
    815       // which instructions inside it don't alias), propagate those scopes to
    816       // the inlined instructions.
    817       if (MDNode *CSM =
    818               CS.getInstruction()->getMetadata(LLVMContext::MD_noalias))
    819         NewMD = MDNode::concatenate(NewMD, CSM);
    820       NI->setMetadata(LLVMContext::MD_noalias, NewMD);
    821     } else if (NI->mayReadOrWriteMemory()) {
    822       if (MDNode *M = CS.getInstruction()->getMetadata(LLVMContext::MD_noalias))
    823         NI->setMetadata(LLVMContext::MD_noalias, M);
    824     }
    825   }
    826 }
    827 
    828 /// If the inlined function has noalias arguments,
    829 /// then add new alias scopes for each noalias argument, tag the mapped noalias
    830 /// parameters with noalias metadata specifying the new scope, and tag all
    831 /// non-derived loads, stores and memory intrinsics with the new alias scopes.
    832 static void AddAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap,
    833                                   const DataLayout &DL, AAResults *CalleeAAR) {
    834   if (!EnableNoAliasConversion)
    835     return;
    836 
    837   const Function *CalledFunc = CS.getCalledFunction();
    838   SmallVector<const Argument *, 4> NoAliasArgs;
    839 
    840   for (const Argument &Arg : CalledFunc->args())
    841     if (Arg.hasNoAliasAttr() && !Arg.use_empty())
    842       NoAliasArgs.push_back(&Arg);
    843 
    844   if (NoAliasArgs.empty())
    845     return;
    846 
    847   // To do a good job, if a noalias variable is captured, we need to know if
    848   // the capture point dominates the particular use we're considering.
    849   DominatorTree DT;
    850   DT.recalculate(const_cast<Function&>(*CalledFunc));
    851 
    852   // noalias indicates that pointer values based on the argument do not alias
    853   // pointer values which are not based on it. So we add a new "scope" for each
    854   // noalias function argument. Accesses using pointers based on that argument
    855   // become part of that alias scope, accesses using pointers not based on that
    856   // argument are tagged as noalias with that scope.
    857 
    858   DenseMap<const Argument *, MDNode *> NewScopes;
    859   MDBuilder MDB(CalledFunc->getContext());
    860 
    861   // Create a new scope domain for this function.
    862   MDNode *NewDomain =
    863     MDB.createAnonymousAliasScopeDomain(CalledFunc->getName());
    864   for (unsigned i = 0, e = NoAliasArgs.size(); i != e; ++i) {
    865     const Argument *A = NoAliasArgs[i];
    866 
    867     std::string Name = CalledFunc->getName();
    868     if (A->hasName()) {
    869       Name += ": %";
    870       Name += A->getName();
    871     } else {
    872       Name += ": argument ";
    873       Name += utostr(i);
    874     }
    875 
    876     // Note: We always create a new anonymous root here. This is true regardless
    877     // of the linkage of the callee because the aliasing "scope" is not just a
    878     // property of the callee, but also all control dependencies in the caller.
    879     MDNode *NewScope = MDB.createAnonymousAliasScope(NewDomain, Name);
    880     NewScopes.insert(std::make_pair(A, NewScope));
    881   }
    882 
    883   // Iterate over all new instructions in the map; for all memory-access
    884   // instructions, add the alias scope metadata.
    885   for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
    886        VMI != VMIE; ++VMI) {
    887     if (const Instruction *I = dyn_cast<Instruction>(VMI->first)) {
    888       if (!VMI->second)
    889         continue;
    890 
    891       Instruction *NI = dyn_cast<Instruction>(VMI->second);
    892       if (!NI)
    893         continue;
    894 
    895       bool IsArgMemOnlyCall = false, IsFuncCall = false;
    896       SmallVector<const Value *, 2> PtrArgs;
    897 
    898       if (const LoadInst *LI = dyn_cast<LoadInst>(I))
    899         PtrArgs.push_back(LI->getPointerOperand());
    900       else if (const StoreInst *SI = dyn_cast<StoreInst>(I))
    901         PtrArgs.push_back(SI->getPointerOperand());
    902       else if (const VAArgInst *VAAI = dyn_cast<VAArgInst>(I))
    903         PtrArgs.push_back(VAAI->getPointerOperand());
    904       else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I))
    905         PtrArgs.push_back(CXI->getPointerOperand());
    906       else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I))
    907         PtrArgs.push_back(RMWI->getPointerOperand());
    908       else if (ImmutableCallSite ICS = ImmutableCallSite(I)) {
    909         // If we know that the call does not access memory, then we'll still
    910         // know that about the inlined clone of this call site, and we don't
    911         // need to add metadata.
    912         if (ICS.doesNotAccessMemory())
    913           continue;
    914 
    915         IsFuncCall = true;
    916         if (CalleeAAR) {
    917           FunctionModRefBehavior MRB = CalleeAAR->getModRefBehavior(ICS);
    918           if (MRB == FMRB_OnlyAccessesArgumentPointees ||
    919               MRB == FMRB_OnlyReadsArgumentPointees)
    920             IsArgMemOnlyCall = true;
    921         }
    922 
    923         for (Value *Arg : ICS.args()) {
    924           // We need to check the underlying objects of all arguments, not just
    925           // the pointer arguments, because we might be passing pointers as
    926           // integers, etc.
    927           // However, if we know that the call only accesses pointer arguments,
    928           // then we only need to check the pointer arguments.
    929           if (IsArgMemOnlyCall && !Arg->getType()->isPointerTy())
    930             continue;
    931 
    932           PtrArgs.push_back(Arg);
    933         }
    934       }
    935 
    936       // If we found no pointers, then this instruction is not suitable for
    937       // pairing with an instruction to receive aliasing metadata.
    938       // However, if this is a call, this we might just alias with none of the
    939       // noalias arguments.
    940       if (PtrArgs.empty() && !IsFuncCall)
    941         continue;
    942 
    943       // It is possible that there is only one underlying object, but you
    944       // need to go through several PHIs to see it, and thus could be
    945       // repeated in the Objects list.
    946       SmallPtrSet<const Value *, 4> ObjSet;
    947       SmallVector<Metadata *, 4> Scopes, NoAliases;
    948 
    949       SmallSetVector<const Argument *, 4> NAPtrArgs;
    950       for (const Value *V : PtrArgs) {
    951         SmallVector<Value *, 4> Objects;
    952         GetUnderlyingObjects(const_cast<Value*>(V),
    953                              Objects, DL, /* LI = */ nullptr);
    954 
    955         for (Value *O : Objects)
    956           ObjSet.insert(O);
    957       }
    958 
    959       // Figure out if we're derived from anything that is not a noalias
    960       // argument.
    961       bool CanDeriveViaCapture = false, UsesAliasingPtr = false;
    962       for (const Value *V : ObjSet) {
    963         // Is this value a constant that cannot be derived from any pointer
    964         // value (we need to exclude constant expressions, for example, that
    965         // are formed from arithmetic on global symbols).
    966         bool IsNonPtrConst = isa<ConstantInt>(V) || isa<ConstantFP>(V) ||
    967                              isa<ConstantPointerNull>(V) ||
    968                              isa<ConstantDataVector>(V) || isa<UndefValue>(V);
    969         if (IsNonPtrConst)
    970           continue;
    971 
    972         // If this is anything other than a noalias argument, then we cannot
    973         // completely describe the aliasing properties using alias.scope
    974         // metadata (and, thus, won't add any).
    975         if (const Argument *A = dyn_cast<Argument>(V)) {
    976           if (!A->hasNoAliasAttr())
    977             UsesAliasingPtr = true;
    978         } else {
    979           UsesAliasingPtr = true;
    980         }
    981 
    982         // If this is not some identified function-local object (which cannot
    983         // directly alias a noalias argument), or some other argument (which,
    984         // by definition, also cannot alias a noalias argument), then we could
    985         // alias a noalias argument that has been captured).
    986         if (!isa<Argument>(V) &&
    987             !isIdentifiedFunctionLocal(const_cast<Value*>(V)))
    988           CanDeriveViaCapture = true;
    989       }
    990 
    991       // A function call can always get captured noalias pointers (via other
    992       // parameters, globals, etc.).
    993       if (IsFuncCall && !IsArgMemOnlyCall)
    994         CanDeriveViaCapture = true;
    995 
    996       // First, we want to figure out all of the sets with which we definitely
    997       // don't alias. Iterate over all noalias set, and add those for which:
    998       //   1. The noalias argument is not in the set of objects from which we
    999       //      definitely derive.
   1000       //   2. The noalias argument has not yet been captured.
   1001       // An arbitrary function that might load pointers could see captured
   1002       // noalias arguments via other noalias arguments or globals, and so we
   1003       // must always check for prior capture.
   1004       for (const Argument *A : NoAliasArgs) {
   1005         if (!ObjSet.count(A) && (!CanDeriveViaCapture ||
   1006                                  // It might be tempting to skip the
   1007                                  // PointerMayBeCapturedBefore check if
   1008                                  // A->hasNoCaptureAttr() is true, but this is
   1009                                  // incorrect because nocapture only guarantees
   1010                                  // that no copies outlive the function, not
   1011                                  // that the value cannot be locally captured.
   1012                                  !PointerMayBeCapturedBefore(A,
   1013                                    /* ReturnCaptures */ false,
   1014                                    /* StoreCaptures */ false, I, &DT)))
   1015           NoAliases.push_back(NewScopes[A]);
   1016       }
   1017 
   1018       if (!NoAliases.empty())
   1019         NI->setMetadata(LLVMContext::MD_noalias,
   1020                         MDNode::concatenate(
   1021                             NI->getMetadata(LLVMContext::MD_noalias),
   1022                             MDNode::get(CalledFunc->getContext(), NoAliases)));
   1023 
   1024       // Next, we want to figure out all of the sets to which we might belong.
   1025       // We might belong to a set if the noalias argument is in the set of
   1026       // underlying objects. If there is some non-noalias argument in our list
   1027       // of underlying objects, then we cannot add a scope because the fact
   1028       // that some access does not alias with any set of our noalias arguments
   1029       // cannot itself guarantee that it does not alias with this access
   1030       // (because there is some pointer of unknown origin involved and the
   1031       // other access might also depend on this pointer). We also cannot add
   1032       // scopes to arbitrary functions unless we know they don't access any
   1033       // non-parameter pointer-values.
   1034       bool CanAddScopes = !UsesAliasingPtr;
   1035       if (CanAddScopes && IsFuncCall)
   1036         CanAddScopes = IsArgMemOnlyCall;
   1037 
   1038       if (CanAddScopes)
   1039         for (const Argument *A : NoAliasArgs) {
   1040           if (ObjSet.count(A))
   1041             Scopes.push_back(NewScopes[A]);
   1042         }
   1043 
   1044       if (!Scopes.empty())
   1045         NI->setMetadata(
   1046             LLVMContext::MD_alias_scope,
   1047             MDNode::concatenate(NI->getMetadata(LLVMContext::MD_alias_scope),
   1048                                 MDNode::get(CalledFunc->getContext(), Scopes)));
   1049     }
   1050   }
   1051 }
   1052 
   1053 /// If the inlined function has non-byval align arguments, then
   1054 /// add @llvm.assume-based alignment assumptions to preserve this information.
   1055 static void AddAlignmentAssumptions(CallSite CS, InlineFunctionInfo &IFI) {
   1056   if (!PreserveAlignmentAssumptions)
   1057     return;
   1058   auto &DL = CS.getCaller()->getParent()->getDataLayout();
   1059 
   1060   // To avoid inserting redundant assumptions, we should check for assumptions
   1061   // already in the caller. To do this, we might need a DT of the caller.
   1062   DominatorTree DT;
   1063   bool DTCalculated = false;
   1064 
   1065   Function *CalledFunc = CS.getCalledFunction();
   1066   for (Function::arg_iterator I = CalledFunc->arg_begin(),
   1067                               E = CalledFunc->arg_end();
   1068        I != E; ++I) {
   1069     unsigned Align = I->getType()->isPointerTy() ? I->getParamAlignment() : 0;
   1070     if (Align && !I->hasByValOrInAllocaAttr() && !I->hasNUses(0)) {
   1071       if (!DTCalculated) {
   1072         DT.recalculate(const_cast<Function&>(*CS.getInstruction()->getParent()
   1073                                                ->getParent()));
   1074         DTCalculated = true;
   1075       }
   1076 
   1077       // If we can already prove the asserted alignment in the context of the
   1078       // caller, then don't bother inserting the assumption.
   1079       Value *Arg = CS.getArgument(I->getArgNo());
   1080       if (getKnownAlignment(Arg, DL, CS.getInstruction(),
   1081                             &IFI.ACT->getAssumptionCache(*CS.getCaller()),
   1082                             &DT) >= Align)
   1083         continue;
   1084 
   1085       IRBuilder<>(CS.getInstruction())
   1086           .CreateAlignmentAssumption(DL, Arg, Align);
   1087     }
   1088   }
   1089 }
   1090 
   1091 /// Once we have cloned code over from a callee into the caller,
   1092 /// update the specified callgraph to reflect the changes we made.
   1093 /// Note that it's possible that not all code was copied over, so only
   1094 /// some edges of the callgraph may remain.
   1095 static void UpdateCallGraphAfterInlining(CallSite CS,
   1096                                          Function::iterator FirstNewBlock,
   1097                                          ValueToValueMapTy &VMap,
   1098                                          InlineFunctionInfo &IFI) {
   1099   CallGraph &CG = *IFI.CG;
   1100   const Function *Caller = CS.getInstruction()->getParent()->getParent();
   1101   const Function *Callee = CS.getCalledFunction();
   1102   CallGraphNode *CalleeNode = CG[Callee];
   1103   CallGraphNode *CallerNode = CG[Caller];
   1104 
   1105   // Since we inlined some uninlined call sites in the callee into the caller,
   1106   // add edges from the caller to all of the callees of the callee.
   1107   CallGraphNode::iterator I = CalleeNode->begin(), E = CalleeNode->end();
   1108 
   1109   // Consider the case where CalleeNode == CallerNode.
   1110   CallGraphNode::CalledFunctionsVector CallCache;
   1111   if (CalleeNode == CallerNode) {
   1112     CallCache.assign(I, E);
   1113     I = CallCache.begin();
   1114     E = CallCache.end();
   1115   }
   1116 
   1117   for (; I != E; ++I) {
   1118     const Value *OrigCall = I->first;
   1119 
   1120     ValueToValueMapTy::iterator VMI = VMap.find(OrigCall);
   1121     // Only copy the edge if the call was inlined!
   1122     if (VMI == VMap.end() || VMI->second == nullptr)
   1123       continue;
   1124 
   1125     // If the call was inlined, but then constant folded, there is no edge to
   1126     // add.  Check for this case.
   1127     Instruction *NewCall = dyn_cast<Instruction>(VMI->second);
   1128     if (!NewCall)
   1129       continue;
   1130 
   1131     // We do not treat intrinsic calls like real function calls because we
   1132     // expect them to become inline code; do not add an edge for an intrinsic.
   1133     CallSite CS = CallSite(NewCall);
   1134     if (CS && CS.getCalledFunction() && CS.getCalledFunction()->isIntrinsic())
   1135       continue;
   1136 
   1137     // Remember that this call site got inlined for the client of
   1138     // InlineFunction.
   1139     IFI.InlinedCalls.push_back(NewCall);
   1140 
   1141     // It's possible that inlining the callsite will cause it to go from an
   1142     // indirect to a direct call by resolving a function pointer.  If this
   1143     // happens, set the callee of the new call site to a more precise
   1144     // destination.  This can also happen if the call graph node of the caller
   1145     // was just unnecessarily imprecise.
   1146     if (!I->second->getFunction())
   1147       if (Function *F = CallSite(NewCall).getCalledFunction()) {
   1148         // Indirect call site resolved to direct call.
   1149         CallerNode->addCalledFunction(CallSite(NewCall), CG[F]);
   1150 
   1151         continue;
   1152       }
   1153 
   1154     CallerNode->addCalledFunction(CallSite(NewCall), I->second);
   1155   }
   1156 
   1157   // Update the call graph by deleting the edge from Callee to Caller.  We must
   1158   // do this after the loop above in case Caller and Callee are the same.
   1159   CallerNode->removeCallEdgeFor(CS);
   1160 }
   1161 
   1162 static void HandleByValArgumentInit(Value *Dst, Value *Src, Module *M,
   1163                                     BasicBlock *InsertBlock,
   1164                                     InlineFunctionInfo &IFI) {
   1165   Type *AggTy = cast<PointerType>(Src->getType())->getElementType();
   1166   IRBuilder<> Builder(InsertBlock, InsertBlock->begin());
   1167 
   1168   Value *Size = Builder.getInt64(M->getDataLayout().getTypeStoreSize(AggTy));
   1169 
   1170   // Always generate a memcpy of alignment 1 here because we don't know
   1171   // the alignment of the src pointer.  Other optimizations can infer
   1172   // better alignment.
   1173   Builder.CreateMemCpy(Dst, Src, Size, /*Align=*/1);
   1174 }
   1175 
   1176 /// When inlining a call site that has a byval argument,
   1177 /// we have to make the implicit memcpy explicit by adding it.
   1178 static Value *HandleByValArgument(Value *Arg, Instruction *TheCall,
   1179                                   const Function *CalledFunc,
   1180                                   InlineFunctionInfo &IFI,
   1181                                   unsigned ByValAlignment) {
   1182   PointerType *ArgTy = cast<PointerType>(Arg->getType());
   1183   Type *AggTy = ArgTy->getElementType();
   1184 
   1185   Function *Caller = TheCall->getParent()->getParent();
   1186 
   1187   // If the called function is readonly, then it could not mutate the caller's
   1188   // copy of the byval'd memory.  In this case, it is safe to elide the copy and
   1189   // temporary.
   1190   if (CalledFunc->onlyReadsMemory()) {
   1191     // If the byval argument has a specified alignment that is greater than the
   1192     // passed in pointer, then we either have to round up the input pointer or
   1193     // give up on this transformation.
   1194     if (ByValAlignment <= 1)  // 0 = unspecified, 1 = no particular alignment.
   1195       return Arg;
   1196 
   1197     const DataLayout &DL = Caller->getParent()->getDataLayout();
   1198 
   1199     // If the pointer is already known to be sufficiently aligned, or if we can
   1200     // round it up to a larger alignment, then we don't need a temporary.
   1201     if (getOrEnforceKnownAlignment(Arg, ByValAlignment, DL, TheCall,
   1202                                    &IFI.ACT->getAssumptionCache(*Caller)) >=
   1203         ByValAlignment)
   1204       return Arg;
   1205 
   1206     // Otherwise, we have to make a memcpy to get a safe alignment.  This is bad
   1207     // for code quality, but rarely happens and is required for correctness.
   1208   }
   1209 
   1210   // Create the alloca.  If we have DataLayout, use nice alignment.
   1211   unsigned Align =
   1212       Caller->getParent()->getDataLayout().getPrefTypeAlignment(AggTy);
   1213 
   1214   // If the byval had an alignment specified, we *must* use at least that
   1215   // alignment, as it is required by the byval argument (and uses of the
   1216   // pointer inside the callee).
   1217   Align = std::max(Align, ByValAlignment);
   1218 
   1219   Value *NewAlloca = new AllocaInst(AggTy, nullptr, Align, Arg->getName(),
   1220                                     &*Caller->begin()->begin());
   1221   IFI.StaticAllocas.push_back(cast<AllocaInst>(NewAlloca));
   1222 
   1223   // Uses of the argument in the function should use our new alloca
   1224   // instead.
   1225   return NewAlloca;
   1226 }
   1227 
   1228 // Check whether this Value is used by a lifetime intrinsic.
   1229 static bool isUsedByLifetimeMarker(Value *V) {
   1230   for (User *U : V->users()) {
   1231     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) {
   1232       switch (II->getIntrinsicID()) {
   1233       default: break;
   1234       case Intrinsic::lifetime_start:
   1235       case Intrinsic::lifetime_end:
   1236         return true;
   1237       }
   1238     }
   1239   }
   1240   return false;
   1241 }
   1242 
   1243 // Check whether the given alloca already has
   1244 // lifetime.start or lifetime.end intrinsics.
   1245 static bool hasLifetimeMarkers(AllocaInst *AI) {
   1246   Type *Ty = AI->getType();
   1247   Type *Int8PtrTy = Type::getInt8PtrTy(Ty->getContext(),
   1248                                        Ty->getPointerAddressSpace());
   1249   if (Ty == Int8PtrTy)
   1250     return isUsedByLifetimeMarker(AI);
   1251 
   1252   // Do a scan to find all the casts to i8*.
   1253   for (User *U : AI->users()) {
   1254     if (U->getType() != Int8PtrTy) continue;
   1255     if (U->stripPointerCasts() != AI) continue;
   1256     if (isUsedByLifetimeMarker(U))
   1257       return true;
   1258   }
   1259   return false;
   1260 }
   1261 
   1262 /// Rebuild the entire inlined-at chain for this instruction so that the top of
   1263 /// the chain now is inlined-at the new call site.
   1264 static DebugLoc
   1265 updateInlinedAtInfo(const DebugLoc &DL, DILocation *InlinedAtNode,
   1266                     LLVMContext &Ctx,
   1267                     DenseMap<const DILocation *, DILocation *> &IANodes) {
   1268   SmallVector<DILocation *, 3> InlinedAtLocations;
   1269   DILocation *Last = InlinedAtNode;
   1270   DILocation *CurInlinedAt = DL;
   1271 
   1272   // Gather all the inlined-at nodes
   1273   while (DILocation *IA = CurInlinedAt->getInlinedAt()) {
   1274     // Skip any we've already built nodes for
   1275     if (DILocation *Found = IANodes[IA]) {
   1276       Last = Found;
   1277       break;
   1278     }
   1279 
   1280     InlinedAtLocations.push_back(IA);
   1281     CurInlinedAt = IA;
   1282   }
   1283 
   1284   // Starting from the top, rebuild the nodes to point to the new inlined-at
   1285   // location (then rebuilding the rest of the chain behind it) and update the
   1286   // map of already-constructed inlined-at nodes.
   1287   for (const DILocation *MD : reverse(InlinedAtLocations)) {
   1288     Last = IANodes[MD] = DILocation::getDistinct(
   1289         Ctx, MD->getLine(), MD->getColumn(), MD->getScope(), Last);
   1290   }
   1291 
   1292   // And finally create the normal location for this instruction, referring to
   1293   // the new inlined-at chain.
   1294   return DebugLoc::get(DL.getLine(), DL.getCol(), DL.getScope(), Last);
   1295 }
   1296 
   1297 /// Update inlined instructions' line numbers to
   1298 /// to encode location where these instructions are inlined.
   1299 static void fixupLineNumbers(Function *Fn, Function::iterator FI,
   1300                              Instruction *TheCall) {
   1301   const DebugLoc &TheCallDL = TheCall->getDebugLoc();
   1302   if (!TheCallDL)
   1303     return;
   1304 
   1305   auto &Ctx = Fn->getContext();
   1306   DILocation *InlinedAtNode = TheCallDL;
   1307 
   1308   // Create a unique call site, not to be confused with any other call from the
   1309   // same location.
   1310   InlinedAtNode = DILocation::getDistinct(
   1311       Ctx, InlinedAtNode->getLine(), InlinedAtNode->getColumn(),
   1312       InlinedAtNode->getScope(), InlinedAtNode->getInlinedAt());
   1313 
   1314   // Cache the inlined-at nodes as they're built so they are reused, without
   1315   // this every instruction's inlined-at chain would become distinct from each
   1316   // other.
   1317   DenseMap<const DILocation *, DILocation *> IANodes;
   1318 
   1319   for (; FI != Fn->end(); ++FI) {
   1320     for (BasicBlock::iterator BI = FI->begin(), BE = FI->end();
   1321          BI != BE; ++BI) {
   1322       DebugLoc DL = BI->getDebugLoc();
   1323       if (!DL) {
   1324         // If the inlined instruction has no line number, make it look as if it
   1325         // originates from the call location. This is important for
   1326         // ((__always_inline__, __nodebug__)) functions which must use caller
   1327         // location for all instructions in their function body.
   1328 
   1329         // Don't update static allocas, as they may get moved later.
   1330         if (auto *AI = dyn_cast<AllocaInst>(BI))
   1331           if (isa<Constant>(AI->getArraySize()))
   1332             continue;
   1333 
   1334         BI->setDebugLoc(TheCallDL);
   1335       } else {
   1336         BI->setDebugLoc(updateInlinedAtInfo(DL, InlinedAtNode, BI->getContext(), IANodes));
   1337       }
   1338     }
   1339   }
   1340 }
   1341 
   1342 /// This function inlines the called function into the basic block of the
   1343 /// caller. This returns false if it is not possible to inline this call.
   1344 /// The program is still in a well defined state if this occurs though.
   1345 ///
   1346 /// Note that this only does one level of inlining.  For example, if the
   1347 /// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
   1348 /// exists in the instruction stream.  Similarly this will inline a recursive
   1349 /// function by one level.
   1350 bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
   1351                           AAResults *CalleeAAR, bool InsertLifetime) {
   1352   Instruction *TheCall = CS.getInstruction();
   1353   assert(TheCall->getParent() && TheCall->getParent()->getParent() &&
   1354          "Instruction not in function!");
   1355 
   1356   // If IFI has any state in it, zap it before we fill it in.
   1357   IFI.reset();
   1358 
   1359   const Function *CalledFunc = CS.getCalledFunction();
   1360   if (!CalledFunc ||              // Can't inline external function or indirect
   1361       CalledFunc->isDeclaration() || // call, or call to a vararg function!
   1362       CalledFunc->getFunctionType()->isVarArg()) return false;
   1363 
   1364   // The inliner does not know how to inline through calls with operand bundles
   1365   // in general ...
   1366   if (CS.hasOperandBundles()) {
   1367     for (int i = 0, e = CS.getNumOperandBundles(); i != e; ++i) {
   1368       uint32_t Tag = CS.getOperandBundleAt(i).getTagID();
   1369       // ... but it knows how to inline through "deopt" operand bundles ...
   1370       if (Tag == LLVMContext::OB_deopt)
   1371         continue;
   1372       // ... and "funclet" operand bundles.
   1373       if (Tag == LLVMContext::OB_funclet)
   1374         continue;
   1375 
   1376       return false;
   1377     }
   1378   }
   1379 
   1380   // If the call to the callee cannot throw, set the 'nounwind' flag on any
   1381   // calls that we inline.
   1382   bool MarkNoUnwind = CS.doesNotThrow();
   1383 
   1384   BasicBlock *OrigBB = TheCall->getParent();
   1385   Function *Caller = OrigBB->getParent();
   1386 
   1387   // GC poses two hazards to inlining, which only occur when the callee has GC:
   1388   //  1. If the caller has no GC, then the callee's GC must be propagated to the
   1389   //     caller.
   1390   //  2. If the caller has a differing GC, it is invalid to inline.
   1391   if (CalledFunc->hasGC()) {
   1392     if (!Caller->hasGC())
   1393       Caller->setGC(CalledFunc->getGC());
   1394     else if (CalledFunc->getGC() != Caller->getGC())
   1395       return false;
   1396   }
   1397 
   1398   // Get the personality function from the callee if it contains a landing pad.
   1399   Constant *CalledPersonality =
   1400       CalledFunc->hasPersonalityFn()
   1401           ? CalledFunc->getPersonalityFn()->stripPointerCasts()
   1402           : nullptr;
   1403 
   1404   // Find the personality function used by the landing pads of the caller. If it
   1405   // exists, then check to see that it matches the personality function used in
   1406   // the callee.
   1407   Constant *CallerPersonality =
   1408       Caller->hasPersonalityFn()
   1409           ? Caller->getPersonalityFn()->stripPointerCasts()
   1410           : nullptr;
   1411   if (CalledPersonality) {
   1412     if (!CallerPersonality)
   1413       Caller->setPersonalityFn(CalledPersonality);
   1414     // If the personality functions match, then we can perform the
   1415     // inlining. Otherwise, we can't inline.
   1416     // TODO: This isn't 100% true. Some personality functions are proper
   1417     //       supersets of others and can be used in place of the other.
   1418     else if (CalledPersonality != CallerPersonality)
   1419       return false;
   1420   }
   1421 
   1422   // We need to figure out which funclet the callsite was in so that we may
   1423   // properly nest the callee.
   1424   Instruction *CallSiteEHPad = nullptr;
   1425   if (CallerPersonality) {
   1426     EHPersonality Personality = classifyEHPersonality(CallerPersonality);
   1427     if (isFuncletEHPersonality(Personality)) {
   1428       Optional<OperandBundleUse> ParentFunclet =
   1429           CS.getOperandBundle(LLVMContext::OB_funclet);
   1430       if (ParentFunclet)
   1431         CallSiteEHPad = cast<FuncletPadInst>(ParentFunclet->Inputs.front());
   1432 
   1433       // OK, the inlining site is legal.  What about the target function?
   1434 
   1435       if (CallSiteEHPad) {
   1436         if (Personality == EHPersonality::MSVC_CXX) {
   1437           // The MSVC personality cannot tolerate catches getting inlined into
   1438           // cleanup funclets.
   1439           if (isa<CleanupPadInst>(CallSiteEHPad)) {
   1440             // Ok, the call site is within a cleanuppad.  Let's check the callee
   1441             // for catchpads.
   1442             for (const BasicBlock &CalledBB : *CalledFunc) {
   1443               if (isa<CatchSwitchInst>(CalledBB.getFirstNonPHI()))
   1444                 return false;
   1445             }
   1446           }
   1447         } else if (isAsynchronousEHPersonality(Personality)) {
   1448           // SEH is even less tolerant, there may not be any sort of exceptional
   1449           // funclet in the callee.
   1450           for (const BasicBlock &CalledBB : *CalledFunc) {
   1451             if (CalledBB.isEHPad())
   1452               return false;
   1453           }
   1454         }
   1455       }
   1456     }
   1457   }
   1458 
   1459   // Determine if we are dealing with a call in an EHPad which does not unwind
   1460   // to caller.
   1461   bool EHPadForCallUnwindsLocally = false;
   1462   if (CallSiteEHPad && CS.isCall()) {
   1463     UnwindDestMemoTy FuncletUnwindMap;
   1464     Value *CallSiteUnwindDestToken =
   1465         getUnwindDestToken(CallSiteEHPad, FuncletUnwindMap);
   1466 
   1467     EHPadForCallUnwindsLocally =
   1468         CallSiteUnwindDestToken &&
   1469         !isa<ConstantTokenNone>(CallSiteUnwindDestToken);
   1470   }
   1471 
   1472   // Get an iterator to the last basic block in the function, which will have
   1473   // the new function inlined after it.
   1474   Function::iterator LastBlock = --Caller->end();
   1475 
   1476   // Make sure to capture all of the return instructions from the cloned
   1477   // function.
   1478   SmallVector<ReturnInst*, 8> Returns;
   1479   ClonedCodeInfo InlinedFunctionInfo;
   1480   Function::iterator FirstNewBlock;
   1481 
   1482   { // Scope to destroy VMap after cloning.
   1483     ValueToValueMapTy VMap;
   1484     // Keep a list of pair (dst, src) to emit byval initializations.
   1485     SmallVector<std::pair<Value*, Value*>, 4> ByValInit;
   1486 
   1487     auto &DL = Caller->getParent()->getDataLayout();
   1488 
   1489     assert(CalledFunc->arg_size() == CS.arg_size() &&
   1490            "No varargs calls can be inlined!");
   1491 
   1492     // Calculate the vector of arguments to pass into the function cloner, which
   1493     // matches up the formal to the actual argument values.
   1494     CallSite::arg_iterator AI = CS.arg_begin();
   1495     unsigned ArgNo = 0;
   1496     for (Function::const_arg_iterator I = CalledFunc->arg_begin(),
   1497          E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) {
   1498       Value *ActualArg = *AI;
   1499 
   1500       // When byval arguments actually inlined, we need to make the copy implied
   1501       // by them explicit.  However, we don't do this if the callee is readonly
   1502       // or readnone, because the copy would be unneeded: the callee doesn't
   1503       // modify the struct.
   1504       if (CS.isByValArgument(ArgNo)) {
   1505         ActualArg = HandleByValArgument(ActualArg, TheCall, CalledFunc, IFI,
   1506                                         CalledFunc->getParamAlignment(ArgNo+1));
   1507         if (ActualArg != *AI)
   1508           ByValInit.push_back(std::make_pair(ActualArg, (Value*) *AI));
   1509       }
   1510 
   1511       VMap[&*I] = ActualArg;
   1512     }
   1513 
   1514     // Add alignment assumptions if necessary. We do this before the inlined
   1515     // instructions are actually cloned into the caller so that we can easily
   1516     // check what will be known at the start of the inlined code.
   1517     AddAlignmentAssumptions(CS, IFI);
   1518 
   1519     // We want the inliner to prune the code as it copies.  We would LOVE to
   1520     // have no dead or constant instructions leftover after inlining occurs
   1521     // (which can happen, e.g., because an argument was constant), but we'll be
   1522     // happy with whatever the cloner can do.
   1523     CloneAndPruneFunctionInto(Caller, CalledFunc, VMap,
   1524                               /*ModuleLevelChanges=*/false, Returns, ".i",
   1525                               &InlinedFunctionInfo, TheCall);
   1526 
   1527     // Remember the first block that is newly cloned over.
   1528     FirstNewBlock = LastBlock; ++FirstNewBlock;
   1529 
   1530     // Inject byval arguments initialization.
   1531     for (std::pair<Value*, Value*> &Init : ByValInit)
   1532       HandleByValArgumentInit(Init.first, Init.second, Caller->getParent(),
   1533                               &*FirstNewBlock, IFI);
   1534 
   1535     Optional<OperandBundleUse> ParentDeopt =
   1536         CS.getOperandBundle(LLVMContext::OB_deopt);
   1537     if (ParentDeopt) {
   1538       SmallVector<OperandBundleDef, 2> OpDefs;
   1539 
   1540       for (auto &VH : InlinedFunctionInfo.OperandBundleCallSites) {
   1541         Instruction *I = dyn_cast_or_null<Instruction>(VH);
   1542         if (!I) continue;  // instruction was DCE'd or RAUW'ed to undef
   1543 
   1544         OpDefs.clear();
   1545 
   1546         CallSite ICS(I);
   1547         OpDefs.reserve(ICS.getNumOperandBundles());
   1548 
   1549         for (unsigned i = 0, e = ICS.getNumOperandBundles(); i < e; ++i) {
   1550           auto ChildOB = ICS.getOperandBundleAt(i);
   1551           if (ChildOB.getTagID() != LLVMContext::OB_deopt) {
   1552             // If the inlined call has other operand bundles, let them be
   1553             OpDefs.emplace_back(ChildOB);
   1554             continue;
   1555           }
   1556 
   1557           // It may be useful to separate this logic (of handling operand
   1558           // bundles) out to a separate "policy" component if this gets crowded.
   1559           // Prepend the parent's deoptimization continuation to the newly
   1560           // inlined call's deoptimization continuation.
   1561           std::vector<Value *> MergedDeoptArgs;
   1562           MergedDeoptArgs.reserve(ParentDeopt->Inputs.size() +
   1563                                   ChildOB.Inputs.size());
   1564 
   1565           MergedDeoptArgs.insert(MergedDeoptArgs.end(),
   1566                                  ParentDeopt->Inputs.begin(),
   1567                                  ParentDeopt->Inputs.end());
   1568           MergedDeoptArgs.insert(MergedDeoptArgs.end(), ChildOB.Inputs.begin(),
   1569                                  ChildOB.Inputs.end());
   1570 
   1571           OpDefs.emplace_back("deopt", std::move(MergedDeoptArgs));
   1572         }
   1573 
   1574         Instruction *NewI = nullptr;
   1575         if (isa<CallInst>(I))
   1576           NewI = CallInst::Create(cast<CallInst>(I), OpDefs, I);
   1577         else
   1578           NewI = InvokeInst::Create(cast<InvokeInst>(I), OpDefs, I);
   1579 
   1580         // Note: the RAUW does the appropriate fixup in VMap, so we need to do
   1581         // this even if the call returns void.
   1582         I->replaceAllUsesWith(NewI);
   1583 
   1584         VH = nullptr;
   1585         I->eraseFromParent();
   1586       }
   1587     }
   1588 
   1589     // Update the callgraph if requested.
   1590     if (IFI.CG)
   1591       UpdateCallGraphAfterInlining(CS, FirstNewBlock, VMap, IFI);
   1592 
   1593     // Update inlined instructions' line number information.
   1594     fixupLineNumbers(Caller, FirstNewBlock, TheCall);
   1595 
   1596     // Clone existing noalias metadata if necessary.
   1597     CloneAliasScopeMetadata(CS, VMap);
   1598 
   1599     // Add noalias metadata if necessary.
   1600     AddAliasScopeMetadata(CS, VMap, DL, CalleeAAR);
   1601 
   1602     // Propagate llvm.mem.parallel_loop_access if necessary.
   1603     PropagateParallelLoopAccessMetadata(CS, VMap);
   1604 
   1605     // FIXME: We could register any cloned assumptions instead of clearing the
   1606     // whole function's cache.
   1607     if (IFI.ACT)
   1608       IFI.ACT->getAssumptionCache(*Caller).clear();
   1609   }
   1610 
   1611   // If there are any alloca instructions in the block that used to be the entry
   1612   // block for the callee, move them to the entry block of the caller.  First
   1613   // calculate which instruction they should be inserted before.  We insert the
   1614   // instructions at the end of the current alloca list.
   1615   {
   1616     BasicBlock::iterator InsertPoint = Caller->begin()->begin();
   1617     for (BasicBlock::iterator I = FirstNewBlock->begin(),
   1618          E = FirstNewBlock->end(); I != E; ) {
   1619       AllocaInst *AI = dyn_cast<AllocaInst>(I++);
   1620       if (!AI) continue;
   1621 
   1622       // If the alloca is now dead, remove it.  This often occurs due to code
   1623       // specialization.
   1624       if (AI->use_empty()) {
   1625         AI->eraseFromParent();
   1626         continue;
   1627       }
   1628 
   1629       if (!isa<Constant>(AI->getArraySize()))
   1630         continue;
   1631 
   1632       // Keep track of the static allocas that we inline into the caller.
   1633       IFI.StaticAllocas.push_back(AI);
   1634 
   1635       // Scan for the block of allocas that we can move over, and move them
   1636       // all at once.
   1637       while (isa<AllocaInst>(I) &&
   1638              isa<Constant>(cast<AllocaInst>(I)->getArraySize())) {
   1639         IFI.StaticAllocas.push_back(cast<AllocaInst>(I));
   1640         ++I;
   1641       }
   1642 
   1643       // Transfer all of the allocas over in a block.  Using splice means
   1644       // that the instructions aren't removed from the symbol table, then
   1645       // reinserted.
   1646       Caller->getEntryBlock().getInstList().splice(
   1647           InsertPoint, FirstNewBlock->getInstList(), AI->getIterator(), I);
   1648     }
   1649     // Move any dbg.declares describing the allocas into the entry basic block.
   1650     DIBuilder DIB(*Caller->getParent());
   1651     for (auto &AI : IFI.StaticAllocas)
   1652       replaceDbgDeclareForAlloca(AI, AI, DIB, /*Deref=*/false);
   1653   }
   1654 
   1655   bool InlinedMustTailCalls = false, InlinedDeoptimizeCalls = false;
   1656   if (InlinedFunctionInfo.ContainsCalls) {
   1657     CallInst::TailCallKind CallSiteTailKind = CallInst::TCK_None;
   1658     if (CallInst *CI = dyn_cast<CallInst>(TheCall))
   1659       CallSiteTailKind = CI->getTailCallKind();
   1660 
   1661     for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E;
   1662          ++BB) {
   1663       for (Instruction &I : *BB) {
   1664         CallInst *CI = dyn_cast<CallInst>(&I);
   1665         if (!CI)
   1666           continue;
   1667 
   1668         if (Function *F = CI->getCalledFunction())
   1669           InlinedDeoptimizeCalls |=
   1670               F->getIntrinsicID() == Intrinsic::experimental_deoptimize;
   1671 
   1672         // We need to reduce the strength of any inlined tail calls.  For
   1673         // musttail, we have to avoid introducing potential unbounded stack
   1674         // growth.  For example, if functions 'f' and 'g' are mutually recursive
   1675         // with musttail, we can inline 'g' into 'f' so long as we preserve
   1676         // musttail on the cloned call to 'f'.  If either the inlined call site
   1677         // or the cloned call site is *not* musttail, the program already has
   1678         // one frame of stack growth, so it's safe to remove musttail.  Here is
   1679         // a table of example transformations:
   1680         //
   1681         //    f -> musttail g -> musttail f  ==>  f -> musttail f
   1682         //    f -> musttail g ->     tail f  ==>  f ->     tail f
   1683         //    f ->          g -> musttail f  ==>  f ->          f
   1684         //    f ->          g ->     tail f  ==>  f ->          f
   1685         CallInst::TailCallKind ChildTCK = CI->getTailCallKind();
   1686         ChildTCK = std::min(CallSiteTailKind, ChildTCK);
   1687         CI->setTailCallKind(ChildTCK);
   1688         InlinedMustTailCalls |= CI->isMustTailCall();
   1689 
   1690         // Calls inlined through a 'nounwind' call site should be marked
   1691         // 'nounwind'.
   1692         if (MarkNoUnwind)
   1693           CI->setDoesNotThrow();
   1694       }
   1695     }
   1696   }
   1697 
   1698   // Leave lifetime markers for the static alloca's, scoping them to the
   1699   // function we just inlined.
   1700   if (InsertLifetime && !IFI.StaticAllocas.empty()) {
   1701     IRBuilder<> builder(&FirstNewBlock->front());
   1702     for (unsigned ai = 0, ae = IFI.StaticAllocas.size(); ai != ae; ++ai) {
   1703       AllocaInst *AI = IFI.StaticAllocas[ai];
   1704 
   1705       // If the alloca is already scoped to something smaller than the whole
   1706       // function then there's no need to add redundant, less accurate markers.
   1707       if (hasLifetimeMarkers(AI))
   1708         continue;
   1709 
   1710       // Try to determine the size of the allocation.
   1711       ConstantInt *AllocaSize = nullptr;
   1712       if (ConstantInt *AIArraySize =
   1713           dyn_cast<ConstantInt>(AI->getArraySize())) {
   1714         auto &DL = Caller->getParent()->getDataLayout();
   1715         Type *AllocaType = AI->getAllocatedType();
   1716         uint64_t AllocaTypeSize = DL.getTypeAllocSize(AllocaType);
   1717         uint64_t AllocaArraySize = AIArraySize->getLimitedValue();
   1718 
   1719         // Don't add markers for zero-sized allocas.
   1720         if (AllocaArraySize == 0)
   1721           continue;
   1722 
   1723         // Check that array size doesn't saturate uint64_t and doesn't
   1724         // overflow when it's multiplied by type size.
   1725         if (AllocaArraySize != ~0ULL &&
   1726             UINT64_MAX / AllocaArraySize >= AllocaTypeSize) {
   1727           AllocaSize = ConstantInt::get(Type::getInt64Ty(AI->getContext()),
   1728                                         AllocaArraySize * AllocaTypeSize);
   1729         }
   1730       }
   1731 
   1732       builder.CreateLifetimeStart(AI, AllocaSize);
   1733       for (ReturnInst *RI : Returns) {
   1734         // Don't insert llvm.lifetime.end calls between a musttail or deoptimize
   1735         // call and a return.  The return kills all local allocas.
   1736         if (InlinedMustTailCalls &&
   1737             RI->getParent()->getTerminatingMustTailCall())
   1738           continue;
   1739         if (InlinedDeoptimizeCalls &&
   1740             RI->getParent()->getTerminatingDeoptimizeCall())
   1741           continue;
   1742         IRBuilder<>(RI).CreateLifetimeEnd(AI, AllocaSize);
   1743       }
   1744     }
   1745   }
   1746 
   1747   // If the inlined code contained dynamic alloca instructions, wrap the inlined
   1748   // code with llvm.stacksave/llvm.stackrestore intrinsics.
   1749   if (InlinedFunctionInfo.ContainsDynamicAllocas) {
   1750     Module *M = Caller->getParent();
   1751     // Get the two intrinsics we care about.
   1752     Function *StackSave = Intrinsic::getDeclaration(M, Intrinsic::stacksave);
   1753     Function *StackRestore=Intrinsic::getDeclaration(M,Intrinsic::stackrestore);
   1754 
   1755     // Insert the llvm.stacksave.
   1756     CallInst *SavedPtr = IRBuilder<>(&*FirstNewBlock, FirstNewBlock->begin())
   1757                              .CreateCall(StackSave, {}, "savedstack");
   1758 
   1759     // Insert a call to llvm.stackrestore before any return instructions in the
   1760     // inlined function.
   1761     for (ReturnInst *RI : Returns) {
   1762       // Don't insert llvm.stackrestore calls between a musttail or deoptimize
   1763       // call and a return.  The return will restore the stack pointer.
   1764       if (InlinedMustTailCalls && RI->getParent()->getTerminatingMustTailCall())
   1765         continue;
   1766       if (InlinedDeoptimizeCalls && RI->getParent()->getTerminatingDeoptimizeCall())
   1767         continue;
   1768       IRBuilder<>(RI).CreateCall(StackRestore, SavedPtr);
   1769     }
   1770   }
   1771 
   1772   // If we are inlining for an invoke instruction, we must make sure to rewrite
   1773   // any call instructions into invoke instructions.  This is sensitive to which
   1774   // funclet pads were top-level in the inlinee, so must be done before
   1775   // rewriting the "parent pad" links.
   1776   if (auto *II = dyn_cast<InvokeInst>(TheCall)) {
   1777     BasicBlock *UnwindDest = II->getUnwindDest();
   1778     Instruction *FirstNonPHI = UnwindDest->getFirstNonPHI();
   1779     if (isa<LandingPadInst>(FirstNonPHI)) {
   1780       HandleInlinedLandingPad(II, &*FirstNewBlock, InlinedFunctionInfo);
   1781     } else {
   1782       HandleInlinedEHPad(II, &*FirstNewBlock, InlinedFunctionInfo);
   1783     }
   1784   }
   1785 
   1786   // Update the lexical scopes of the new funclets and callsites.
   1787   // Anything that had 'none' as its parent is now nested inside the callsite's
   1788   // EHPad.
   1789 
   1790   if (CallSiteEHPad) {
   1791     for (Function::iterator BB = FirstNewBlock->getIterator(),
   1792                             E = Caller->end();
   1793          BB != E; ++BB) {
   1794       // Add bundle operands to any top-level call sites.
   1795       SmallVector<OperandBundleDef, 1> OpBundles;
   1796       for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E;) {
   1797         Instruction *I = &*BBI++;
   1798         CallSite CS(I);
   1799         if (!CS)
   1800           continue;
   1801 
   1802         // Skip call sites which are nounwind intrinsics.
   1803         auto *CalledFn =
   1804             dyn_cast<Function>(CS.getCalledValue()->stripPointerCasts());
   1805         if (CalledFn && CalledFn->isIntrinsic() && CS.doesNotThrow())
   1806           continue;
   1807 
   1808         // Skip call sites which already have a "funclet" bundle.
   1809         if (CS.getOperandBundle(LLVMContext::OB_funclet))
   1810           continue;
   1811 
   1812         CS.getOperandBundlesAsDefs(OpBundles);
   1813         OpBundles.emplace_back("funclet", CallSiteEHPad);
   1814 
   1815         Instruction *NewInst;
   1816         if (CS.isCall())
   1817           NewInst = CallInst::Create(cast<CallInst>(I), OpBundles, I);
   1818         else
   1819           NewInst = InvokeInst::Create(cast<InvokeInst>(I), OpBundles, I);
   1820         NewInst->takeName(I);
   1821         I->replaceAllUsesWith(NewInst);
   1822         I->eraseFromParent();
   1823 
   1824         OpBundles.clear();
   1825       }
   1826 
   1827       // It is problematic if the inlinee has a cleanupret which unwinds to
   1828       // caller and we inline it into a call site which doesn't unwind but into
   1829       // an EH pad that does.  Such an edge must be dynamically unreachable.
   1830       // As such, we replace the cleanupret with unreachable.
   1831       if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(BB->getTerminator()))
   1832         if (CleanupRet->unwindsToCaller() && EHPadForCallUnwindsLocally)
   1833           changeToUnreachable(CleanupRet, /*UseLLVMTrap=*/false);
   1834 
   1835       Instruction *I = BB->getFirstNonPHI();
   1836       if (!I->isEHPad())
   1837         continue;
   1838 
   1839       if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) {
   1840         if (isa<ConstantTokenNone>(CatchSwitch->getParentPad()))
   1841           CatchSwitch->setParentPad(CallSiteEHPad);
   1842       } else {
   1843         auto *FPI = cast<FuncletPadInst>(I);
   1844         if (isa<ConstantTokenNone>(FPI->getParentPad()))
   1845           FPI->setParentPad(CallSiteEHPad);
   1846       }
   1847     }
   1848   }
   1849 
   1850   if (InlinedDeoptimizeCalls) {
   1851     // We need to at least remove the deoptimizing returns from the Return set,
   1852     // so that the control flow from those returns does not get merged into the
   1853     // caller (but terminate it instead).  If the caller's return type does not
   1854     // match the callee's return type, we also need to change the return type of
   1855     // the intrinsic.
   1856     if (Caller->getReturnType() == TheCall->getType()) {
   1857       auto NewEnd = remove_if(Returns, [](ReturnInst *RI) {
   1858         return RI->getParent()->getTerminatingDeoptimizeCall() != nullptr;
   1859       });
   1860       Returns.erase(NewEnd, Returns.end());
   1861     } else {
   1862       SmallVector<ReturnInst *, 8> NormalReturns;
   1863       Function *NewDeoptIntrinsic = Intrinsic::getDeclaration(
   1864           Caller->getParent(), Intrinsic::experimental_deoptimize,
   1865           {Caller->getReturnType()});
   1866 
   1867       for (ReturnInst *RI : Returns) {
   1868         CallInst *DeoptCall = RI->getParent()->getTerminatingDeoptimizeCall();
   1869         if (!DeoptCall) {
   1870           NormalReturns.push_back(RI);
   1871           continue;
   1872         }
   1873 
   1874         // The calling convention on the deoptimize call itself may be bogus,
   1875         // since the code we're inlining may have undefined behavior (and may
   1876         // never actually execute at runtime); but all
   1877         // @llvm.experimental.deoptimize declarations have to have the same
   1878         // calling convention in a well-formed module.
   1879         auto CallingConv = DeoptCall->getCalledFunction()->getCallingConv();
   1880         NewDeoptIntrinsic->setCallingConv(CallingConv);
   1881         auto *CurBB = RI->getParent();
   1882         RI->eraseFromParent();
   1883 
   1884         SmallVector<Value *, 4> CallArgs(DeoptCall->arg_begin(),
   1885                                          DeoptCall->arg_end());
   1886 
   1887         SmallVector<OperandBundleDef, 1> OpBundles;
   1888         DeoptCall->getOperandBundlesAsDefs(OpBundles);
   1889         DeoptCall->eraseFromParent();
   1890         assert(!OpBundles.empty() &&
   1891                "Expected at least the deopt operand bundle");
   1892 
   1893         IRBuilder<> Builder(CurBB);
   1894         CallInst *NewDeoptCall =
   1895             Builder.CreateCall(NewDeoptIntrinsic, CallArgs, OpBundles);
   1896         NewDeoptCall->setCallingConv(CallingConv);
   1897         if (NewDeoptCall->getType()->isVoidTy())
   1898           Builder.CreateRetVoid();
   1899         else
   1900           Builder.CreateRet(NewDeoptCall);
   1901       }
   1902 
   1903       // Leave behind the normal returns so we can merge control flow.
   1904       std::swap(Returns, NormalReturns);
   1905     }
   1906   }
   1907 
   1908   // Handle any inlined musttail call sites.  In order for a new call site to be
   1909   // musttail, the source of the clone and the inlined call site must have been
   1910   // musttail.  Therefore it's safe to return without merging control into the
   1911   // phi below.
   1912   if (InlinedMustTailCalls) {
   1913     // Check if we need to bitcast the result of any musttail calls.
   1914     Type *NewRetTy = Caller->getReturnType();
   1915     bool NeedBitCast = !TheCall->use_empty() && TheCall->getType() != NewRetTy;
   1916 
   1917     // Handle the returns preceded by musttail calls separately.
   1918     SmallVector<ReturnInst *, 8> NormalReturns;
   1919     for (ReturnInst *RI : Returns) {
   1920       CallInst *ReturnedMustTail =
   1921           RI->getParent()->getTerminatingMustTailCall();
   1922       if (!ReturnedMustTail) {
   1923         NormalReturns.push_back(RI);
   1924         continue;
   1925       }
   1926       if (!NeedBitCast)
   1927         continue;
   1928 
   1929       // Delete the old return and any preceding bitcast.
   1930       BasicBlock *CurBB = RI->getParent();
   1931       auto *OldCast = dyn_cast_or_null<BitCastInst>(RI->getReturnValue());
   1932       RI->eraseFromParent();
   1933       if (OldCast)
   1934         OldCast->eraseFromParent();
   1935 
   1936       // Insert a new bitcast and return with the right type.
   1937       IRBuilder<> Builder(CurBB);
   1938       Builder.CreateRet(Builder.CreateBitCast(ReturnedMustTail, NewRetTy));
   1939     }
   1940 
   1941     // Leave behind the normal returns so we can merge control flow.
   1942     std::swap(Returns, NormalReturns);
   1943   }
   1944 
   1945   // If we cloned in _exactly one_ basic block, and if that block ends in a
   1946   // return instruction, we splice the body of the inlined callee directly into
   1947   // the calling basic block.
   1948   if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) {
   1949     // Move all of the instructions right before the call.
   1950     OrigBB->getInstList().splice(TheCall->getIterator(),
   1951                                  FirstNewBlock->getInstList(),
   1952                                  FirstNewBlock->begin(), FirstNewBlock->end());
   1953     // Remove the cloned basic block.
   1954     Caller->getBasicBlockList().pop_back();
   1955 
   1956     // If the call site was an invoke instruction, add a branch to the normal
   1957     // destination.
   1958     if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
   1959       BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), TheCall);
   1960       NewBr->setDebugLoc(Returns[0]->getDebugLoc());
   1961     }
   1962 
   1963     // If the return instruction returned a value, replace uses of the call with
   1964     // uses of the returned value.
   1965     if (!TheCall->use_empty()) {
   1966       ReturnInst *R = Returns[0];
   1967       if (TheCall == R->getReturnValue())
   1968         TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
   1969       else
   1970         TheCall->replaceAllUsesWith(R->getReturnValue());
   1971     }
   1972     // Since we are now done with the Call/Invoke, we can delete it.
   1973     TheCall->eraseFromParent();
   1974 
   1975     // Since we are now done with the return instruction, delete it also.
   1976     Returns[0]->eraseFromParent();
   1977 
   1978     // We are now done with the inlining.
   1979     return true;
   1980   }
   1981 
   1982   // Otherwise, we have the normal case, of more than one block to inline or
   1983   // multiple return sites.
   1984 
   1985   // We want to clone the entire callee function into the hole between the
   1986   // "starter" and "ender" blocks.  How we accomplish this depends on whether
   1987   // this is an invoke instruction or a call instruction.
   1988   BasicBlock *AfterCallBB;
   1989   BranchInst *CreatedBranchToNormalDest = nullptr;
   1990   if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
   1991 
   1992     // Add an unconditional branch to make this look like the CallInst case...
   1993     CreatedBranchToNormalDest = BranchInst::Create(II->getNormalDest(), TheCall);
   1994 
   1995     // Split the basic block.  This guarantees that no PHI nodes will have to be
   1996     // updated due to new incoming edges, and make the invoke case more
   1997     // symmetric to the call case.
   1998     AfterCallBB =
   1999         OrigBB->splitBasicBlock(CreatedBranchToNormalDest->getIterator(),
   2000                                 CalledFunc->getName() + ".exit");
   2001 
   2002   } else {  // It's a call
   2003     // If this is a call instruction, we need to split the basic block that
   2004     // the call lives in.
   2005     //
   2006     AfterCallBB = OrigBB->splitBasicBlock(TheCall->getIterator(),
   2007                                           CalledFunc->getName() + ".exit");
   2008   }
   2009 
   2010   // Change the branch that used to go to AfterCallBB to branch to the first
   2011   // basic block of the inlined function.
   2012   //
   2013   TerminatorInst *Br = OrigBB->getTerminator();
   2014   assert(Br && Br->getOpcode() == Instruction::Br &&
   2015          "splitBasicBlock broken!");
   2016   Br->setOperand(0, &*FirstNewBlock);
   2017 
   2018   // Now that the function is correct, make it a little bit nicer.  In
   2019   // particular, move the basic blocks inserted from the end of the function
   2020   // into the space made by splitting the source basic block.
   2021   Caller->getBasicBlockList().splice(AfterCallBB->getIterator(),
   2022                                      Caller->getBasicBlockList(), FirstNewBlock,
   2023                                      Caller->end());
   2024 
   2025   // Handle all of the return instructions that we just cloned in, and eliminate
   2026   // any users of the original call/invoke instruction.
   2027   Type *RTy = CalledFunc->getReturnType();
   2028 
   2029   PHINode *PHI = nullptr;
   2030   if (Returns.size() > 1) {
   2031     // The PHI node should go at the front of the new basic block to merge all
   2032     // possible incoming values.
   2033     if (!TheCall->use_empty()) {
   2034       PHI = PHINode::Create(RTy, Returns.size(), TheCall->getName(),
   2035                             &AfterCallBB->front());
   2036       // Anything that used the result of the function call should now use the
   2037       // PHI node as their operand.
   2038       TheCall->replaceAllUsesWith(PHI);
   2039     }
   2040 
   2041     // Loop over all of the return instructions adding entries to the PHI node
   2042     // as appropriate.
   2043     if (PHI) {
   2044       for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
   2045         ReturnInst *RI = Returns[i];
   2046         assert(RI->getReturnValue()->getType() == PHI->getType() &&
   2047                "Ret value not consistent in function!");
   2048         PHI->addIncoming(RI->getReturnValue(), RI->getParent());
   2049       }
   2050     }
   2051 
   2052     // Add a branch to the merge points and remove return instructions.
   2053     DebugLoc Loc;
   2054     for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
   2055       ReturnInst *RI = Returns[i];
   2056       BranchInst* BI = BranchInst::Create(AfterCallBB, RI);
   2057       Loc = RI->getDebugLoc();
   2058       BI->setDebugLoc(Loc);
   2059       RI->eraseFromParent();
   2060     }
   2061     // We need to set the debug location to *somewhere* inside the
   2062     // inlined function. The line number may be nonsensical, but the
   2063     // instruction will at least be associated with the right
   2064     // function.
   2065     if (CreatedBranchToNormalDest)
   2066       CreatedBranchToNormalDest->setDebugLoc(Loc);
   2067   } else if (!Returns.empty()) {
   2068     // Otherwise, if there is exactly one return value, just replace anything
   2069     // using the return value of the call with the computed value.
   2070     if (!TheCall->use_empty()) {
   2071       if (TheCall == Returns[0]->getReturnValue())
   2072         TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
   2073       else
   2074         TheCall->replaceAllUsesWith(Returns[0]->getReturnValue());
   2075     }
   2076 
   2077     // Update PHI nodes that use the ReturnBB to use the AfterCallBB.
   2078     BasicBlock *ReturnBB = Returns[0]->getParent();
   2079     ReturnBB->replaceAllUsesWith(AfterCallBB);
   2080 
   2081     // Splice the code from the return block into the block that it will return
   2082     // to, which contains the code that was after the call.
   2083     AfterCallBB->getInstList().splice(AfterCallBB->begin(),
   2084                                       ReturnBB->getInstList());
   2085 
   2086     if (CreatedBranchToNormalDest)
   2087       CreatedBranchToNormalDest->setDebugLoc(Returns[0]->getDebugLoc());
   2088 
   2089     // Delete the return instruction now and empty ReturnBB now.
   2090     Returns[0]->eraseFromParent();
   2091     ReturnBB->eraseFromParent();
   2092   } else if (!TheCall->use_empty()) {
   2093     // No returns, but something is using the return value of the call.  Just
   2094     // nuke the result.
   2095     TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
   2096   }
   2097 
   2098   // Since we are now done with the Call/Invoke, we can delete it.
   2099   TheCall->eraseFromParent();
   2100 
   2101   // If we inlined any musttail calls and the original return is now
   2102   // unreachable, delete it.  It can only contain a bitcast and ret.
   2103   if (InlinedMustTailCalls && pred_begin(AfterCallBB) == pred_end(AfterCallBB))
   2104     AfterCallBB->eraseFromParent();
   2105 
   2106   // We should always be able to fold the entry block of the function into the
   2107   // single predecessor of the block...
   2108   assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!");
   2109   BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0);
   2110 
   2111   // Splice the code entry block into calling block, right before the
   2112   // unconditional branch.
   2113   CalleeEntry->replaceAllUsesWith(OrigBB);  // Update PHI nodes
   2114   OrigBB->getInstList().splice(Br->getIterator(), CalleeEntry->getInstList());
   2115 
   2116   // Remove the unconditional branch.
   2117   OrigBB->getInstList().erase(Br);
   2118 
   2119   // Now we can remove the CalleeEntry block, which is now empty.
   2120   Caller->getBasicBlockList().erase(CalleeEntry);
   2121 
   2122   // If we inserted a phi node, check to see if it has a single value (e.g. all
   2123   // the entries are the same or undef).  If so, remove the PHI so it doesn't
   2124   // block other optimizations.
   2125   if (PHI) {
   2126     auto &DL = Caller->getParent()->getDataLayout();
   2127     if (Value *V = SimplifyInstruction(PHI, DL, nullptr, nullptr,
   2128                                        &IFI.ACT->getAssumptionCache(*Caller))) {
   2129       PHI->replaceAllUsesWith(V);
   2130       PHI->eraseFromParent();
   2131     }
   2132   }
   2133 
   2134   return true;
   2135 }
   2136