Home | History | Annotate | Download | only in Utils
      1 //===- InlineFunction.cpp - Code to perform function inlining -------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file implements inlining of a function into a call site, resolving
     11 // parameters and the return value as appropriate.
     12 //
     13 //===----------------------------------------------------------------------===//
     14 
     15 #include "llvm/Transforms/Utils/Cloning.h"
     16 #include "llvm/ADT/SmallSet.h"
     17 #include "llvm/ADT/SmallVector.h"
     18 #include "llvm/ADT/SetVector.h"
     19 #include "llvm/ADT/StringExtras.h"
     20 #include "llvm/Analysis/AliasAnalysis.h"
     21 #include "llvm/Analysis/AssumptionCache.h"
     22 #include "llvm/Analysis/CallGraph.h"
     23 #include "llvm/Analysis/CaptureTracking.h"
     24 #include "llvm/Analysis/InstructionSimplify.h"
     25 #include "llvm/Analysis/ValueTracking.h"
     26 #include "llvm/IR/Attributes.h"
     27 #include "llvm/IR/CallSite.h"
     28 #include "llvm/IR/CFG.h"
     29 #include "llvm/IR/Constants.h"
     30 #include "llvm/IR/DataLayout.h"
     31 #include "llvm/IR/DebugInfo.h"
     32 #include "llvm/IR/DerivedTypes.h"
     33 #include "llvm/IR/DIBuilder.h"
     34 #include "llvm/IR/Dominators.h"
     35 #include "llvm/IR/IRBuilder.h"
     36 #include "llvm/IR/Instructions.h"
     37 #include "llvm/IR/IntrinsicInst.h"
     38 #include "llvm/IR/Intrinsics.h"
     39 #include "llvm/IR/MDBuilder.h"
     40 #include "llvm/IR/Module.h"
     41 #include "llvm/Transforms/Utils/Local.h"
     42 #include "llvm/Support/CommandLine.h"
     43 #include <algorithm>
     44 using namespace llvm;
     45 
     46 static cl::opt<bool>
     47 EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true),
     48   cl::Hidden,
     49   cl::desc("Convert noalias attributes to metadata during inlining."));
     50 
     51 static cl::opt<bool>
     52 PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining",
     53   cl::init(true), cl::Hidden,
     54   cl::desc("Convert align attributes to assumptions during inlining."));
     55 
     56 bool llvm::InlineFunction(CallInst *CI, InlineFunctionInfo &IFI,
     57                           bool InsertLifetime) {
     58   return InlineFunction(CallSite(CI), IFI, InsertLifetime);
     59 }
     60 bool llvm::InlineFunction(InvokeInst *II, InlineFunctionInfo &IFI,
     61                           bool InsertLifetime) {
     62   return InlineFunction(CallSite(II), IFI, InsertLifetime);
     63 }
     64 
     65 namespace {
     66   /// A class for recording information about inlining through an invoke.
     67   class InvokeInliningInfo {
     68     BasicBlock *OuterResumeDest; ///< Destination of the invoke's unwind.
     69     BasicBlock *InnerResumeDest; ///< Destination for the callee's resume.
     70     LandingPadInst *CallerLPad;  ///< LandingPadInst associated with the invoke.
     71     PHINode *InnerEHValuesPHI;   ///< PHI for EH values from landingpad insts.
     72     SmallVector<Value*, 8> UnwindDestPHIValues;
     73 
     74   public:
     75     InvokeInliningInfo(InvokeInst *II)
     76       : OuterResumeDest(II->getUnwindDest()), InnerResumeDest(nullptr),
     77         CallerLPad(nullptr), InnerEHValuesPHI(nullptr) {
     78       // If there are PHI nodes in the unwind destination block, we need to keep
     79       // track of which values came into them from the invoke before removing
     80       // the edge from this block.
     81       llvm::BasicBlock *InvokeBB = II->getParent();
     82       BasicBlock::iterator I = OuterResumeDest->begin();
     83       for (; isa<PHINode>(I); ++I) {
     84         // Save the value to use for this edge.
     85         PHINode *PHI = cast<PHINode>(I);
     86         UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
     87       }
     88 
     89       CallerLPad = cast<LandingPadInst>(I);
     90     }
     91 
     92     /// The outer unwind destination is the target of
     93     /// unwind edges introduced for calls within the inlined function.
     94     BasicBlock *getOuterResumeDest() const {
     95       return OuterResumeDest;
     96     }
     97 
     98     BasicBlock *getInnerResumeDest();
     99 
    100     LandingPadInst *getLandingPadInst() const { return CallerLPad; }
    101 
    102     /// Forward the 'resume' instruction to the caller's landing pad block.
    103     /// When the landing pad block has only one predecessor, this is
    104     /// a simple branch. When there is more than one predecessor, we need to
    105     /// split the landing pad block after the landingpad instruction and jump
    106     /// to there.
    107     void forwardResume(ResumeInst *RI,
    108                        SmallPtrSetImpl<LandingPadInst*> &InlinedLPads);
    109 
    110     /// Add incoming-PHI values to the unwind destination block for the given
    111     /// basic block, using the values for the original invoke's source block.
    112     void addIncomingPHIValuesFor(BasicBlock *BB) const {
    113       addIncomingPHIValuesForInto(BB, OuterResumeDest);
    114     }
    115 
    116     void addIncomingPHIValuesForInto(BasicBlock *src, BasicBlock *dest) const {
    117       BasicBlock::iterator I = dest->begin();
    118       for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
    119         PHINode *phi = cast<PHINode>(I);
    120         phi->addIncoming(UnwindDestPHIValues[i], src);
    121       }
    122     }
    123   };
    124 }
    125 
    126 /// Get or create a target for the branch from ResumeInsts.
    127 BasicBlock *InvokeInliningInfo::getInnerResumeDest() {
    128   if (InnerResumeDest) return InnerResumeDest;
    129 
    130   // Split the landing pad.
    131   BasicBlock::iterator SplitPoint = CallerLPad; ++SplitPoint;
    132   InnerResumeDest =
    133     OuterResumeDest->splitBasicBlock(SplitPoint,
    134                                      OuterResumeDest->getName() + ".body");
    135 
    136   // The number of incoming edges we expect to the inner landing pad.
    137   const unsigned PHICapacity = 2;
    138 
    139   // Create corresponding new PHIs for all the PHIs in the outer landing pad.
    140   BasicBlock::iterator InsertPoint = InnerResumeDest->begin();
    141   BasicBlock::iterator I = OuterResumeDest->begin();
    142   for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
    143     PHINode *OuterPHI = cast<PHINode>(I);
    144     PHINode *InnerPHI = PHINode::Create(OuterPHI->getType(), PHICapacity,
    145                                         OuterPHI->getName() + ".lpad-body",
    146                                         InsertPoint);
    147     OuterPHI->replaceAllUsesWith(InnerPHI);
    148     InnerPHI->addIncoming(OuterPHI, OuterResumeDest);
    149   }
    150 
    151   // Create a PHI for the exception values.
    152   InnerEHValuesPHI = PHINode::Create(CallerLPad->getType(), PHICapacity,
    153                                      "eh.lpad-body", InsertPoint);
    154   CallerLPad->replaceAllUsesWith(InnerEHValuesPHI);
    155   InnerEHValuesPHI->addIncoming(CallerLPad, OuterResumeDest);
    156 
    157   // All done.
    158   return InnerResumeDest;
    159 }
    160 
    161 /// Forward the 'resume' instruction to the caller's landing pad block.
    162 /// When the landing pad block has only one predecessor, this is a simple
    163 /// branch. When there is more than one predecessor, we need to split the
    164 /// landing pad block after the landingpad instruction and jump to there.
    165 void InvokeInliningInfo::forwardResume(ResumeInst *RI,
    166                                SmallPtrSetImpl<LandingPadInst*> &InlinedLPads) {
    167   BasicBlock *Dest = getInnerResumeDest();
    168   BasicBlock *Src = RI->getParent();
    169 
    170   BranchInst::Create(Dest, Src);
    171 
    172   // Update the PHIs in the destination. They were inserted in an order which
    173   // makes this work.
    174   addIncomingPHIValuesForInto(Src, Dest);
    175 
    176   InnerEHValuesPHI->addIncoming(RI->getOperand(0), Src);
    177   RI->eraseFromParent();
    178 }
    179 
    180 /// When we inline a basic block into an invoke,
    181 /// we have to turn all of the calls that can throw into invokes.
    182 /// This function analyze BB to see if there are any calls, and if so,
    183 /// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI
    184 /// nodes in that block with the values specified in InvokeDestPHIValues.
    185 static void HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB,
    186                                                    InvokeInliningInfo &Invoke) {
    187   for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
    188     Instruction *I = BBI++;
    189 
    190     // We only need to check for function calls: inlined invoke
    191     // instructions require no special handling.
    192     CallInst *CI = dyn_cast<CallInst>(I);
    193 
    194     // If this call cannot unwind, don't convert it to an invoke.
    195     // Inline asm calls cannot throw.
    196     if (!CI || CI->doesNotThrow() || isa<InlineAsm>(CI->getCalledValue()))
    197       continue;
    198 
    199     // Convert this function call into an invoke instruction.  First, split the
    200     // basic block.
    201     BasicBlock *Split = BB->splitBasicBlock(CI, CI->getName()+".noexc");
    202 
    203     // Delete the unconditional branch inserted by splitBasicBlock
    204     BB->getInstList().pop_back();
    205 
    206     // Create the new invoke instruction.
    207     ImmutableCallSite CS(CI);
    208     SmallVector<Value*, 8> InvokeArgs(CS.arg_begin(), CS.arg_end());
    209     InvokeInst *II = InvokeInst::Create(CI->getCalledValue(), Split,
    210                                         Invoke.getOuterResumeDest(),
    211                                         InvokeArgs, CI->getName(), BB);
    212     II->setDebugLoc(CI->getDebugLoc());
    213     II->setCallingConv(CI->getCallingConv());
    214     II->setAttributes(CI->getAttributes());
    215 
    216     // Make sure that anything using the call now uses the invoke!  This also
    217     // updates the CallGraph if present, because it uses a WeakVH.
    218     CI->replaceAllUsesWith(II);
    219 
    220     // Delete the original call
    221     Split->getInstList().pop_front();
    222 
    223     // Update any PHI nodes in the exceptional block to indicate that there is
    224     // now a new entry in them.
    225     Invoke.addIncomingPHIValuesFor(BB);
    226     return;
    227   }
    228 }
    229 
    230 /// If we inlined an invoke site, we need to convert calls
    231 /// in the body of the inlined function into invokes.
    232 ///
    233 /// II is the invoke instruction being inlined.  FirstNewBlock is the first
    234 /// block of the inlined code (the last block is the end of the function),
    235 /// and InlineCodeInfo is information about the code that got inlined.
    236 static void HandleInlinedInvoke(InvokeInst *II, BasicBlock *FirstNewBlock,
    237                                 ClonedCodeInfo &InlinedCodeInfo) {
    238   BasicBlock *InvokeDest = II->getUnwindDest();
    239 
    240   Function *Caller = FirstNewBlock->getParent();
    241 
    242   // The inlined code is currently at the end of the function, scan from the
    243   // start of the inlined code to its end, checking for stuff we need to
    244   // rewrite.
    245   InvokeInliningInfo Invoke(II);
    246 
    247   // Get all of the inlined landing pad instructions.
    248   SmallPtrSet<LandingPadInst*, 16> InlinedLPads;
    249   for (Function::iterator I = FirstNewBlock, E = Caller->end(); I != E; ++I)
    250     if (InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator()))
    251       InlinedLPads.insert(II->getLandingPadInst());
    252 
    253   // Append the clauses from the outer landing pad instruction into the inlined
    254   // landing pad instructions.
    255   LandingPadInst *OuterLPad = Invoke.getLandingPadInst();
    256   for (LandingPadInst *InlinedLPad : InlinedLPads) {
    257     unsigned OuterNum = OuterLPad->getNumClauses();
    258     InlinedLPad->reserveClauses(OuterNum);
    259     for (unsigned OuterIdx = 0; OuterIdx != OuterNum; ++OuterIdx)
    260       InlinedLPad->addClause(OuterLPad->getClause(OuterIdx));
    261     if (OuterLPad->isCleanup())
    262       InlinedLPad->setCleanup(true);
    263   }
    264 
    265   for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E; ++BB){
    266     if (InlinedCodeInfo.ContainsCalls)
    267       HandleCallsInBlockInlinedThroughInvoke(BB, Invoke);
    268 
    269     // Forward any resumes that are remaining here.
    270     if (ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator()))
    271       Invoke.forwardResume(RI, InlinedLPads);
    272   }
    273 
    274   // Now that everything is happy, we have one final detail.  The PHI nodes in
    275   // the exception destination block still have entries due to the original
    276   // invoke instruction. Eliminate these entries (which might even delete the
    277   // PHI node) now.
    278   InvokeDest->removePredecessor(II->getParent());
    279 }
    280 
    281 /// When inlining a function that contains noalias scope metadata,
    282 /// this metadata needs to be cloned so that the inlined blocks
    283 /// have different "unqiue scopes" at every call site. Were this not done, then
    284 /// aliasing scopes from a function inlined into a caller multiple times could
    285 /// not be differentiated (and this would lead to miscompiles because the
    286 /// non-aliasing property communicated by the metadata could have
    287 /// call-site-specific control dependencies).
    288 static void CloneAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap) {
    289   const Function *CalledFunc = CS.getCalledFunction();
    290   SetVector<const MDNode *> MD;
    291 
    292   // Note: We could only clone the metadata if it is already used in the
    293   // caller. I'm omitting that check here because it might confuse
    294   // inter-procedural alias analysis passes. We can revisit this if it becomes
    295   // an efficiency or overhead problem.
    296 
    297   for (Function::const_iterator I = CalledFunc->begin(), IE = CalledFunc->end();
    298        I != IE; ++I)
    299     for (BasicBlock::const_iterator J = I->begin(), JE = I->end(); J != JE; ++J) {
    300       if (const MDNode *M = J->getMetadata(LLVMContext::MD_alias_scope))
    301         MD.insert(M);
    302       if (const MDNode *M = J->getMetadata(LLVMContext::MD_noalias))
    303         MD.insert(M);
    304     }
    305 
    306   if (MD.empty())
    307     return;
    308 
    309   // Walk the existing metadata, adding the complete (perhaps cyclic) chain to
    310   // the set.
    311   SmallVector<const Metadata *, 16> Queue(MD.begin(), MD.end());
    312   while (!Queue.empty()) {
    313     const MDNode *M = cast<MDNode>(Queue.pop_back_val());
    314     for (unsigned i = 0, ie = M->getNumOperands(); i != ie; ++i)
    315       if (const MDNode *M1 = dyn_cast<MDNode>(M->getOperand(i)))
    316         if (MD.insert(M1))
    317           Queue.push_back(M1);
    318   }
    319 
    320   // Now we have a complete set of all metadata in the chains used to specify
    321   // the noalias scopes and the lists of those scopes.
    322   SmallVector<TempMDTuple, 16> DummyNodes;
    323   DenseMap<const MDNode *, TrackingMDNodeRef> MDMap;
    324   for (SetVector<const MDNode *>::iterator I = MD.begin(), IE = MD.end();
    325        I != IE; ++I) {
    326     DummyNodes.push_back(MDTuple::getTemporary(CalledFunc->getContext(), None));
    327     MDMap[*I].reset(DummyNodes.back().get());
    328   }
    329 
    330   // Create new metadata nodes to replace the dummy nodes, replacing old
    331   // metadata references with either a dummy node or an already-created new
    332   // node.
    333   for (SetVector<const MDNode *>::iterator I = MD.begin(), IE = MD.end();
    334        I != IE; ++I) {
    335     SmallVector<Metadata *, 4> NewOps;
    336     for (unsigned i = 0, ie = (*I)->getNumOperands(); i != ie; ++i) {
    337       const Metadata *V = (*I)->getOperand(i);
    338       if (const MDNode *M = dyn_cast<MDNode>(V))
    339         NewOps.push_back(MDMap[M]);
    340       else
    341         NewOps.push_back(const_cast<Metadata *>(V));
    342     }
    343 
    344     MDNode *NewM = MDNode::get(CalledFunc->getContext(), NewOps);
    345     MDTuple *TempM = cast<MDTuple>(MDMap[*I]);
    346     assert(TempM->isTemporary() && "Expected temporary node");
    347 
    348     TempM->replaceAllUsesWith(NewM);
    349   }
    350 
    351   // Now replace the metadata in the new inlined instructions with the
    352   // repacements from the map.
    353   for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
    354        VMI != VMIE; ++VMI) {
    355     if (!VMI->second)
    356       continue;
    357 
    358     Instruction *NI = dyn_cast<Instruction>(VMI->second);
    359     if (!NI)
    360       continue;
    361 
    362     if (MDNode *M = NI->getMetadata(LLVMContext::MD_alias_scope)) {
    363       MDNode *NewMD = MDMap[M];
    364       // If the call site also had alias scope metadata (a list of scopes to
    365       // which instructions inside it might belong), propagate those scopes to
    366       // the inlined instructions.
    367       if (MDNode *CSM =
    368               CS.getInstruction()->getMetadata(LLVMContext::MD_alias_scope))
    369         NewMD = MDNode::concatenate(NewMD, CSM);
    370       NI->setMetadata(LLVMContext::MD_alias_scope, NewMD);
    371     } else if (NI->mayReadOrWriteMemory()) {
    372       if (MDNode *M =
    373               CS.getInstruction()->getMetadata(LLVMContext::MD_alias_scope))
    374         NI->setMetadata(LLVMContext::MD_alias_scope, M);
    375     }
    376 
    377     if (MDNode *M = NI->getMetadata(LLVMContext::MD_noalias)) {
    378       MDNode *NewMD = MDMap[M];
    379       // If the call site also had noalias metadata (a list of scopes with
    380       // which instructions inside it don't alias), propagate those scopes to
    381       // the inlined instructions.
    382       if (MDNode *CSM =
    383               CS.getInstruction()->getMetadata(LLVMContext::MD_noalias))
    384         NewMD = MDNode::concatenate(NewMD, CSM);
    385       NI->setMetadata(LLVMContext::MD_noalias, NewMD);
    386     } else if (NI->mayReadOrWriteMemory()) {
    387       if (MDNode *M = CS.getInstruction()->getMetadata(LLVMContext::MD_noalias))
    388         NI->setMetadata(LLVMContext::MD_noalias, M);
    389     }
    390   }
    391 }
    392 
    393 /// If the inlined function has noalias arguments,
    394 /// then add new alias scopes for each noalias argument, tag the mapped noalias
    395 /// parameters with noalias metadata specifying the new scope, and tag all
    396 /// non-derived loads, stores and memory intrinsics with the new alias scopes.
    397 static void AddAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap,
    398                                   const DataLayout &DL, AliasAnalysis *AA) {
    399   if (!EnableNoAliasConversion)
    400     return;
    401 
    402   const Function *CalledFunc = CS.getCalledFunction();
    403   SmallVector<const Argument *, 4> NoAliasArgs;
    404 
    405   for (Function::const_arg_iterator I = CalledFunc->arg_begin(),
    406        E = CalledFunc->arg_end(); I != E; ++I) {
    407     if (I->hasNoAliasAttr() && !I->hasNUses(0))
    408       NoAliasArgs.push_back(I);
    409   }
    410 
    411   if (NoAliasArgs.empty())
    412     return;
    413 
    414   // To do a good job, if a noalias variable is captured, we need to know if
    415   // the capture point dominates the particular use we're considering.
    416   DominatorTree DT;
    417   DT.recalculate(const_cast<Function&>(*CalledFunc));
    418 
    419   // noalias indicates that pointer values based on the argument do not alias
    420   // pointer values which are not based on it. So we add a new "scope" for each
    421   // noalias function argument. Accesses using pointers based on that argument
    422   // become part of that alias scope, accesses using pointers not based on that
    423   // argument are tagged as noalias with that scope.
    424 
    425   DenseMap<const Argument *, MDNode *> NewScopes;
    426   MDBuilder MDB(CalledFunc->getContext());
    427 
    428   // Create a new scope domain for this function.
    429   MDNode *NewDomain =
    430     MDB.createAnonymousAliasScopeDomain(CalledFunc->getName());
    431   for (unsigned i = 0, e = NoAliasArgs.size(); i != e; ++i) {
    432     const Argument *A = NoAliasArgs[i];
    433 
    434     std::string Name = CalledFunc->getName();
    435     if (A->hasName()) {
    436       Name += ": %";
    437       Name += A->getName();
    438     } else {
    439       Name += ": argument ";
    440       Name += utostr(i);
    441     }
    442 
    443     // Note: We always create a new anonymous root here. This is true regardless
    444     // of the linkage of the callee because the aliasing "scope" is not just a
    445     // property of the callee, but also all control dependencies in the caller.
    446     MDNode *NewScope = MDB.createAnonymousAliasScope(NewDomain, Name);
    447     NewScopes.insert(std::make_pair(A, NewScope));
    448   }
    449 
    450   // Iterate over all new instructions in the map; for all memory-access
    451   // instructions, add the alias scope metadata.
    452   for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
    453        VMI != VMIE; ++VMI) {
    454     if (const Instruction *I = dyn_cast<Instruction>(VMI->first)) {
    455       if (!VMI->second)
    456         continue;
    457 
    458       Instruction *NI = dyn_cast<Instruction>(VMI->second);
    459       if (!NI)
    460         continue;
    461 
    462       bool IsArgMemOnlyCall = false, IsFuncCall = false;
    463       SmallVector<const Value *, 2> PtrArgs;
    464 
    465       if (const LoadInst *LI = dyn_cast<LoadInst>(I))
    466         PtrArgs.push_back(LI->getPointerOperand());
    467       else if (const StoreInst *SI = dyn_cast<StoreInst>(I))
    468         PtrArgs.push_back(SI->getPointerOperand());
    469       else if (const VAArgInst *VAAI = dyn_cast<VAArgInst>(I))
    470         PtrArgs.push_back(VAAI->getPointerOperand());
    471       else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I))
    472         PtrArgs.push_back(CXI->getPointerOperand());
    473       else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I))
    474         PtrArgs.push_back(RMWI->getPointerOperand());
    475       else if (ImmutableCallSite ICS = ImmutableCallSite(I)) {
    476         // If we know that the call does not access memory, then we'll still
    477         // know that about the inlined clone of this call site, and we don't
    478         // need to add metadata.
    479         if (ICS.doesNotAccessMemory())
    480           continue;
    481 
    482         IsFuncCall = true;
    483         if (AA) {
    484           AliasAnalysis::ModRefBehavior MRB = AA->getModRefBehavior(ICS);
    485           if (MRB == AliasAnalysis::OnlyAccessesArgumentPointees ||
    486               MRB == AliasAnalysis::OnlyReadsArgumentPointees)
    487             IsArgMemOnlyCall = true;
    488         }
    489 
    490         for (ImmutableCallSite::arg_iterator AI = ICS.arg_begin(),
    491              AE = ICS.arg_end(); AI != AE; ++AI) {
    492           // We need to check the underlying objects of all arguments, not just
    493           // the pointer arguments, because we might be passing pointers as
    494           // integers, etc.
    495           // However, if we know that the call only accesses pointer arguments,
    496           // then we only need to check the pointer arguments.
    497           if (IsArgMemOnlyCall && !(*AI)->getType()->isPointerTy())
    498             continue;
    499 
    500           PtrArgs.push_back(*AI);
    501         }
    502       }
    503 
    504       // If we found no pointers, then this instruction is not suitable for
    505       // pairing with an instruction to receive aliasing metadata.
    506       // However, if this is a call, this we might just alias with none of the
    507       // noalias arguments.
    508       if (PtrArgs.empty() && !IsFuncCall)
    509         continue;
    510 
    511       // It is possible that there is only one underlying object, but you
    512       // need to go through several PHIs to see it, and thus could be
    513       // repeated in the Objects list.
    514       SmallPtrSet<const Value *, 4> ObjSet;
    515       SmallVector<Metadata *, 4> Scopes, NoAliases;
    516 
    517       SmallSetVector<const Argument *, 4> NAPtrArgs;
    518       for (unsigned i = 0, ie = PtrArgs.size(); i != ie; ++i) {
    519         SmallVector<Value *, 4> Objects;
    520         GetUnderlyingObjects(const_cast<Value*>(PtrArgs[i]),
    521                              Objects, DL, /* MaxLookup = */ 0);
    522 
    523         for (Value *O : Objects)
    524           ObjSet.insert(O);
    525       }
    526 
    527       // Figure out if we're derived from anything that is not a noalias
    528       // argument.
    529       bool CanDeriveViaCapture = false, UsesAliasingPtr = false;
    530       for (const Value *V : ObjSet) {
    531         // Is this value a constant that cannot be derived from any pointer
    532         // value (we need to exclude constant expressions, for example, that
    533         // are formed from arithmetic on global symbols).
    534         bool IsNonPtrConst = isa<ConstantInt>(V) || isa<ConstantFP>(V) ||
    535                              isa<ConstantPointerNull>(V) ||
    536                              isa<ConstantDataVector>(V) || isa<UndefValue>(V);
    537         if (IsNonPtrConst)
    538           continue;
    539 
    540         // If this is anything other than a noalias argument, then we cannot
    541         // completely describe the aliasing properties using alias.scope
    542         // metadata (and, thus, won't add any).
    543         if (const Argument *A = dyn_cast<Argument>(V)) {
    544           if (!A->hasNoAliasAttr())
    545             UsesAliasingPtr = true;
    546         } else {
    547           UsesAliasingPtr = true;
    548         }
    549 
    550         // If this is not some identified function-local object (which cannot
    551         // directly alias a noalias argument), or some other argument (which,
    552         // by definition, also cannot alias a noalias argument), then we could
    553         // alias a noalias argument that has been captured).
    554         if (!isa<Argument>(V) &&
    555             !isIdentifiedFunctionLocal(const_cast<Value*>(V)))
    556           CanDeriveViaCapture = true;
    557       }
    558 
    559       // A function call can always get captured noalias pointers (via other
    560       // parameters, globals, etc.).
    561       if (IsFuncCall && !IsArgMemOnlyCall)
    562         CanDeriveViaCapture = true;
    563 
    564       // First, we want to figure out all of the sets with which we definitely
    565       // don't alias. Iterate over all noalias set, and add those for which:
    566       //   1. The noalias argument is not in the set of objects from which we
    567       //      definitely derive.
    568       //   2. The noalias argument has not yet been captured.
    569       // An arbitrary function that might load pointers could see captured
    570       // noalias arguments via other noalias arguments or globals, and so we
    571       // must always check for prior capture.
    572       for (const Argument *A : NoAliasArgs) {
    573         if (!ObjSet.count(A) && (!CanDeriveViaCapture ||
    574                                  // It might be tempting to skip the
    575                                  // PointerMayBeCapturedBefore check if
    576                                  // A->hasNoCaptureAttr() is true, but this is
    577                                  // incorrect because nocapture only guarantees
    578                                  // that no copies outlive the function, not
    579                                  // that the value cannot be locally captured.
    580                                  !PointerMayBeCapturedBefore(A,
    581                                    /* ReturnCaptures */ false,
    582                                    /* StoreCaptures */ false, I, &DT)))
    583           NoAliases.push_back(NewScopes[A]);
    584       }
    585 
    586       if (!NoAliases.empty())
    587         NI->setMetadata(LLVMContext::MD_noalias,
    588                         MDNode::concatenate(
    589                             NI->getMetadata(LLVMContext::MD_noalias),
    590                             MDNode::get(CalledFunc->getContext(), NoAliases)));
    591 
    592       // Next, we want to figure out all of the sets to which we might belong.
    593       // We might belong to a set if the noalias argument is in the set of
    594       // underlying objects. If there is some non-noalias argument in our list
    595       // of underlying objects, then we cannot add a scope because the fact
    596       // that some access does not alias with any set of our noalias arguments
    597       // cannot itself guarantee that it does not alias with this access
    598       // (because there is some pointer of unknown origin involved and the
    599       // other access might also depend on this pointer). We also cannot add
    600       // scopes to arbitrary functions unless we know they don't access any
    601       // non-parameter pointer-values.
    602       bool CanAddScopes = !UsesAliasingPtr;
    603       if (CanAddScopes && IsFuncCall)
    604         CanAddScopes = IsArgMemOnlyCall;
    605 
    606       if (CanAddScopes)
    607         for (const Argument *A : NoAliasArgs) {
    608           if (ObjSet.count(A))
    609             Scopes.push_back(NewScopes[A]);
    610         }
    611 
    612       if (!Scopes.empty())
    613         NI->setMetadata(
    614             LLVMContext::MD_alias_scope,
    615             MDNode::concatenate(NI->getMetadata(LLVMContext::MD_alias_scope),
    616                                 MDNode::get(CalledFunc->getContext(), Scopes)));
    617     }
    618   }
    619 }
    620 
    621 /// If the inlined function has non-byval align arguments, then
    622 /// add @llvm.assume-based alignment assumptions to preserve this information.
    623 static void AddAlignmentAssumptions(CallSite CS, InlineFunctionInfo &IFI) {
    624   if (!PreserveAlignmentAssumptions)
    625     return;
    626   auto &DL = CS.getCaller()->getParent()->getDataLayout();
    627 
    628   // To avoid inserting redundant assumptions, we should check for assumptions
    629   // already in the caller. To do this, we might need a DT of the caller.
    630   DominatorTree DT;
    631   bool DTCalculated = false;
    632 
    633   Function *CalledFunc = CS.getCalledFunction();
    634   for (Function::arg_iterator I = CalledFunc->arg_begin(),
    635                               E = CalledFunc->arg_end();
    636        I != E; ++I) {
    637     unsigned Align = I->getType()->isPointerTy() ? I->getParamAlignment() : 0;
    638     if (Align && !I->hasByValOrInAllocaAttr() && !I->hasNUses(0)) {
    639       if (!DTCalculated) {
    640         DT.recalculate(const_cast<Function&>(*CS.getInstruction()->getParent()
    641                                                ->getParent()));
    642         DTCalculated = true;
    643       }
    644 
    645       // If we can already prove the asserted alignment in the context of the
    646       // caller, then don't bother inserting the assumption.
    647       Value *Arg = CS.getArgument(I->getArgNo());
    648       if (getKnownAlignment(Arg, DL, CS.getInstruction(),
    649                             &IFI.ACT->getAssumptionCache(*CalledFunc),
    650                             &DT) >= Align)
    651         continue;
    652 
    653       IRBuilder<>(CS.getInstruction())
    654           .CreateAlignmentAssumption(DL, Arg, Align);
    655     }
    656   }
    657 }
    658 
    659 /// Once we have cloned code over from a callee into the caller,
    660 /// update the specified callgraph to reflect the changes we made.
    661 /// Note that it's possible that not all code was copied over, so only
    662 /// some edges of the callgraph may remain.
    663 static void UpdateCallGraphAfterInlining(CallSite CS,
    664                                          Function::iterator FirstNewBlock,
    665                                          ValueToValueMapTy &VMap,
    666                                          InlineFunctionInfo &IFI) {
    667   CallGraph &CG = *IFI.CG;
    668   const Function *Caller = CS.getInstruction()->getParent()->getParent();
    669   const Function *Callee = CS.getCalledFunction();
    670   CallGraphNode *CalleeNode = CG[Callee];
    671   CallGraphNode *CallerNode = CG[Caller];
    672 
    673   // Since we inlined some uninlined call sites in the callee into the caller,
    674   // add edges from the caller to all of the callees of the callee.
    675   CallGraphNode::iterator I = CalleeNode->begin(), E = CalleeNode->end();
    676 
    677   // Consider the case where CalleeNode == CallerNode.
    678   CallGraphNode::CalledFunctionsVector CallCache;
    679   if (CalleeNode == CallerNode) {
    680     CallCache.assign(I, E);
    681     I = CallCache.begin();
    682     E = CallCache.end();
    683   }
    684 
    685   for (; I != E; ++I) {
    686     const Value *OrigCall = I->first;
    687 
    688     ValueToValueMapTy::iterator VMI = VMap.find(OrigCall);
    689     // Only copy the edge if the call was inlined!
    690     if (VMI == VMap.end() || VMI->second == nullptr)
    691       continue;
    692 
    693     // If the call was inlined, but then constant folded, there is no edge to
    694     // add.  Check for this case.
    695     Instruction *NewCall = dyn_cast<Instruction>(VMI->second);
    696     if (!NewCall)
    697       continue;
    698 
    699     // We do not treat intrinsic calls like real function calls because we
    700     // expect them to become inline code; do not add an edge for an intrinsic.
    701     CallSite CS = CallSite(NewCall);
    702     if (CS && CS.getCalledFunction() && CS.getCalledFunction()->isIntrinsic())
    703       continue;
    704 
    705     // Remember that this call site got inlined for the client of
    706     // InlineFunction.
    707     IFI.InlinedCalls.push_back(NewCall);
    708 
    709     // It's possible that inlining the callsite will cause it to go from an
    710     // indirect to a direct call by resolving a function pointer.  If this
    711     // happens, set the callee of the new call site to a more precise
    712     // destination.  This can also happen if the call graph node of the caller
    713     // was just unnecessarily imprecise.
    714     if (!I->second->getFunction())
    715       if (Function *F = CallSite(NewCall).getCalledFunction()) {
    716         // Indirect call site resolved to direct call.
    717         CallerNode->addCalledFunction(CallSite(NewCall), CG[F]);
    718 
    719         continue;
    720       }
    721 
    722     CallerNode->addCalledFunction(CallSite(NewCall), I->second);
    723   }
    724 
    725   // Update the call graph by deleting the edge from Callee to Caller.  We must
    726   // do this after the loop above in case Caller and Callee are the same.
    727   CallerNode->removeCallEdgeFor(CS);
    728 }
    729 
    730 static void HandleByValArgumentInit(Value *Dst, Value *Src, Module *M,
    731                                     BasicBlock *InsertBlock,
    732                                     InlineFunctionInfo &IFI) {
    733   Type *AggTy = cast<PointerType>(Src->getType())->getElementType();
    734   IRBuilder<> Builder(InsertBlock->begin());
    735 
    736   Value *Size = Builder.getInt64(M->getDataLayout().getTypeStoreSize(AggTy));
    737 
    738   // Always generate a memcpy of alignment 1 here because we don't know
    739   // the alignment of the src pointer.  Other optimizations can infer
    740   // better alignment.
    741   Builder.CreateMemCpy(Dst, Src, Size, /*Align=*/1);
    742 }
    743 
    744 /// When inlining a call site that has a byval argument,
    745 /// we have to make the implicit memcpy explicit by adding it.
    746 static Value *HandleByValArgument(Value *Arg, Instruction *TheCall,
    747                                   const Function *CalledFunc,
    748                                   InlineFunctionInfo &IFI,
    749                                   unsigned ByValAlignment) {
    750   PointerType *ArgTy = cast<PointerType>(Arg->getType());
    751   Type *AggTy = ArgTy->getElementType();
    752 
    753   Function *Caller = TheCall->getParent()->getParent();
    754 
    755   // If the called function is readonly, then it could not mutate the caller's
    756   // copy of the byval'd memory.  In this case, it is safe to elide the copy and
    757   // temporary.
    758   if (CalledFunc->onlyReadsMemory()) {
    759     // If the byval argument has a specified alignment that is greater than the
    760     // passed in pointer, then we either have to round up the input pointer or
    761     // give up on this transformation.
    762     if (ByValAlignment <= 1)  // 0 = unspecified, 1 = no particular alignment.
    763       return Arg;
    764 
    765     const DataLayout &DL = Caller->getParent()->getDataLayout();
    766 
    767     // If the pointer is already known to be sufficiently aligned, or if we can
    768     // round it up to a larger alignment, then we don't need a temporary.
    769     if (getOrEnforceKnownAlignment(Arg, ByValAlignment, DL, TheCall,
    770                                    &IFI.ACT->getAssumptionCache(*Caller)) >=
    771         ByValAlignment)
    772       return Arg;
    773 
    774     // Otherwise, we have to make a memcpy to get a safe alignment.  This is bad
    775     // for code quality, but rarely happens and is required for correctness.
    776   }
    777 
    778   // Create the alloca.  If we have DataLayout, use nice alignment.
    779   unsigned Align =
    780       Caller->getParent()->getDataLayout().getPrefTypeAlignment(AggTy);
    781 
    782   // If the byval had an alignment specified, we *must* use at least that
    783   // alignment, as it is required by the byval argument (and uses of the
    784   // pointer inside the callee).
    785   Align = std::max(Align, ByValAlignment);
    786 
    787   Value *NewAlloca = new AllocaInst(AggTy, nullptr, Align, Arg->getName(),
    788                                     &*Caller->begin()->begin());
    789   IFI.StaticAllocas.push_back(cast<AllocaInst>(NewAlloca));
    790 
    791   // Uses of the argument in the function should use our new alloca
    792   // instead.
    793   return NewAlloca;
    794 }
    795 
    796 // Check whether this Value is used by a lifetime intrinsic.
    797 static bool isUsedByLifetimeMarker(Value *V) {
    798   for (User *U : V->users()) {
    799     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) {
    800       switch (II->getIntrinsicID()) {
    801       default: break;
    802       case Intrinsic::lifetime_start:
    803       case Intrinsic::lifetime_end:
    804         return true;
    805       }
    806     }
    807   }
    808   return false;
    809 }
    810 
    811 // Check whether the given alloca already has
    812 // lifetime.start or lifetime.end intrinsics.
    813 static bool hasLifetimeMarkers(AllocaInst *AI) {
    814   Type *Ty = AI->getType();
    815   Type *Int8PtrTy = Type::getInt8PtrTy(Ty->getContext(),
    816                                        Ty->getPointerAddressSpace());
    817   if (Ty == Int8PtrTy)
    818     return isUsedByLifetimeMarker(AI);
    819 
    820   // Do a scan to find all the casts to i8*.
    821   for (User *U : AI->users()) {
    822     if (U->getType() != Int8PtrTy) continue;
    823     if (U->stripPointerCasts() != AI) continue;
    824     if (isUsedByLifetimeMarker(U))
    825       return true;
    826   }
    827   return false;
    828 }
    829 
    830 /// Rebuild the entire inlined-at chain for this instruction so that the top of
    831 /// the chain now is inlined-at the new call site.
    832 static DebugLoc
    833 updateInlinedAtInfo(DebugLoc DL, MDLocation *InlinedAtNode,
    834                     LLVMContext &Ctx,
    835                     DenseMap<const MDLocation *, MDLocation *> &IANodes) {
    836   SmallVector<MDLocation*, 3> InlinedAtLocations;
    837   MDLocation *Last = InlinedAtNode;
    838   MDLocation *CurInlinedAt = DL;
    839 
    840   // Gather all the inlined-at nodes
    841   while (MDLocation *IA = CurInlinedAt->getInlinedAt()) {
    842     // Skip any we've already built nodes for
    843     if (MDLocation *Found = IANodes[IA]) {
    844       Last = Found;
    845       break;
    846     }
    847 
    848     InlinedAtLocations.push_back(IA);
    849     CurInlinedAt = IA;
    850   }
    851 
    852   // Starting from the top, rebuild the nodes to point to the new inlined-at
    853   // location (then rebuilding the rest of the chain behind it) and update the
    854   // map of already-constructed inlined-at nodes.
    855   for (auto I = InlinedAtLocations.rbegin(), E = InlinedAtLocations.rend();
    856        I != E; ++I) {
    857     const MDLocation *MD = *I;
    858     Last = IANodes[MD] = MDLocation::getDistinct(
    859         Ctx, MD->getLine(), MD->getColumn(), MD->getScope(), Last);
    860   }
    861 
    862   // And finally create the normal location for this instruction, referring to
    863   // the new inlined-at chain.
    864   return DebugLoc::get(DL.getLine(), DL.getCol(), DL.getScope(), Last);
    865 }
    866 
    867 /// Update inlined instructions' line numbers to
    868 /// to encode location where these instructions are inlined.
    869 static void fixupLineNumbers(Function *Fn, Function::iterator FI,
    870                              Instruction *TheCall) {
    871   DebugLoc TheCallDL = TheCall->getDebugLoc();
    872   if (!TheCallDL)
    873     return;
    874 
    875   auto &Ctx = Fn->getContext();
    876   MDLocation *InlinedAtNode = TheCallDL;
    877 
    878   // Create a unique call site, not to be confused with any other call from the
    879   // same location.
    880   InlinedAtNode = MDLocation::getDistinct(
    881       Ctx, InlinedAtNode->getLine(), InlinedAtNode->getColumn(),
    882       InlinedAtNode->getScope(), InlinedAtNode->getInlinedAt());
    883 
    884   // Cache the inlined-at nodes as they're built so they are reused, without
    885   // this every instruction's inlined-at chain would become distinct from each
    886   // other.
    887   DenseMap<const MDLocation *, MDLocation *> IANodes;
    888 
    889   for (; FI != Fn->end(); ++FI) {
    890     for (BasicBlock::iterator BI = FI->begin(), BE = FI->end();
    891          BI != BE; ++BI) {
    892       DebugLoc DL = BI->getDebugLoc();
    893       if (!DL) {
    894         // If the inlined instruction has no line number, make it look as if it
    895         // originates from the call location. This is important for
    896         // ((__always_inline__, __nodebug__)) functions which must use caller
    897         // location for all instructions in their function body.
    898 
    899         // Don't update static allocas, as they may get moved later.
    900         if (auto *AI = dyn_cast<AllocaInst>(BI))
    901           if (isa<Constant>(AI->getArraySize()))
    902             continue;
    903 
    904         BI->setDebugLoc(TheCallDL);
    905       } else {
    906         BI->setDebugLoc(updateInlinedAtInfo(DL, InlinedAtNode, BI->getContext(), IANodes));
    907       }
    908     }
    909   }
    910 }
    911 
    912 /// This function inlines the called function into the basic block of the
    913 /// caller. This returns false if it is not possible to inline this call.
    914 /// The program is still in a well defined state if this occurs though.
    915 ///
    916 /// Note that this only does one level of inlining.  For example, if the
    917 /// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
    918 /// exists in the instruction stream.  Similarly this will inline a recursive
    919 /// function by one level.
    920 bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
    921                           bool InsertLifetime) {
    922   Instruction *TheCall = CS.getInstruction();
    923   assert(TheCall->getParent() && TheCall->getParent()->getParent() &&
    924          "Instruction not in function!");
    925 
    926   // If IFI has any state in it, zap it before we fill it in.
    927   IFI.reset();
    928 
    929   const Function *CalledFunc = CS.getCalledFunction();
    930   if (!CalledFunc ||              // Can't inline external function or indirect
    931       CalledFunc->isDeclaration() || // call, or call to a vararg function!
    932       CalledFunc->getFunctionType()->isVarArg()) return false;
    933 
    934   // If the call to the callee cannot throw, set the 'nounwind' flag on any
    935   // calls that we inline.
    936   bool MarkNoUnwind = CS.doesNotThrow();
    937 
    938   BasicBlock *OrigBB = TheCall->getParent();
    939   Function *Caller = OrigBB->getParent();
    940 
    941   // GC poses two hazards to inlining, which only occur when the callee has GC:
    942   //  1. If the caller has no GC, then the callee's GC must be propagated to the
    943   //     caller.
    944   //  2. If the caller has a differing GC, it is invalid to inline.
    945   if (CalledFunc->hasGC()) {
    946     if (!Caller->hasGC())
    947       Caller->setGC(CalledFunc->getGC());
    948     else if (CalledFunc->getGC() != Caller->getGC())
    949       return false;
    950   }
    951 
    952   // Get the personality function from the callee if it contains a landing pad.
    953   Value *CalleePersonality = nullptr;
    954   for (Function::const_iterator I = CalledFunc->begin(), E = CalledFunc->end();
    955        I != E; ++I)
    956     if (const InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator())) {
    957       const BasicBlock *BB = II->getUnwindDest();
    958       const LandingPadInst *LP = BB->getLandingPadInst();
    959       CalleePersonality = LP->getPersonalityFn();
    960       break;
    961     }
    962 
    963   // Find the personality function used by the landing pads of the caller. If it
    964   // exists, then check to see that it matches the personality function used in
    965   // the callee.
    966   if (CalleePersonality) {
    967     for (Function::const_iterator I = Caller->begin(), E = Caller->end();
    968          I != E; ++I)
    969       if (const InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator())) {
    970         const BasicBlock *BB = II->getUnwindDest();
    971         const LandingPadInst *LP = BB->getLandingPadInst();
    972 
    973         // If the personality functions match, then we can perform the
    974         // inlining. Otherwise, we can't inline.
    975         // TODO: This isn't 100% true. Some personality functions are proper
    976         //       supersets of others and can be used in place of the other.
    977         if (LP->getPersonalityFn() != CalleePersonality)
    978           return false;
    979 
    980         break;
    981       }
    982   }
    983 
    984   // Get an iterator to the last basic block in the function, which will have
    985   // the new function inlined after it.
    986   Function::iterator LastBlock = &Caller->back();
    987 
    988   // Make sure to capture all of the return instructions from the cloned
    989   // function.
    990   SmallVector<ReturnInst*, 8> Returns;
    991   ClonedCodeInfo InlinedFunctionInfo;
    992   Function::iterator FirstNewBlock;
    993 
    994   { // Scope to destroy VMap after cloning.
    995     ValueToValueMapTy VMap;
    996     // Keep a list of pair (dst, src) to emit byval initializations.
    997     SmallVector<std::pair<Value*, Value*>, 4> ByValInit;
    998 
    999     auto &DL = Caller->getParent()->getDataLayout();
   1000 
   1001     assert(CalledFunc->arg_size() == CS.arg_size() &&
   1002            "No varargs calls can be inlined!");
   1003 
   1004     // Calculate the vector of arguments to pass into the function cloner, which
   1005     // matches up the formal to the actual argument values.
   1006     CallSite::arg_iterator AI = CS.arg_begin();
   1007     unsigned ArgNo = 0;
   1008     for (Function::const_arg_iterator I = CalledFunc->arg_begin(),
   1009          E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) {
   1010       Value *ActualArg = *AI;
   1011 
   1012       // When byval arguments actually inlined, we need to make the copy implied
   1013       // by them explicit.  However, we don't do this if the callee is readonly
   1014       // or readnone, because the copy would be unneeded: the callee doesn't
   1015       // modify the struct.
   1016       if (CS.isByValArgument(ArgNo)) {
   1017         ActualArg = HandleByValArgument(ActualArg, TheCall, CalledFunc, IFI,
   1018                                         CalledFunc->getParamAlignment(ArgNo+1));
   1019         if (ActualArg != *AI)
   1020           ByValInit.push_back(std::make_pair(ActualArg, (Value*) *AI));
   1021       }
   1022 
   1023       VMap[I] = ActualArg;
   1024     }
   1025 
   1026     // Add alignment assumptions if necessary. We do this before the inlined
   1027     // instructions are actually cloned into the caller so that we can easily
   1028     // check what will be known at the start of the inlined code.
   1029     AddAlignmentAssumptions(CS, IFI);
   1030 
   1031     // We want the inliner to prune the code as it copies.  We would LOVE to
   1032     // have no dead or constant instructions leftover after inlining occurs
   1033     // (which can happen, e.g., because an argument was constant), but we'll be
   1034     // happy with whatever the cloner can do.
   1035     CloneAndPruneFunctionInto(Caller, CalledFunc, VMap,
   1036                               /*ModuleLevelChanges=*/false, Returns, ".i",
   1037                               &InlinedFunctionInfo, TheCall);
   1038 
   1039     // Remember the first block that is newly cloned over.
   1040     FirstNewBlock = LastBlock; ++FirstNewBlock;
   1041 
   1042     // Inject byval arguments initialization.
   1043     for (std::pair<Value*, Value*> &Init : ByValInit)
   1044       HandleByValArgumentInit(Init.first, Init.second, Caller->getParent(),
   1045                               FirstNewBlock, IFI);
   1046 
   1047     // Update the callgraph if requested.
   1048     if (IFI.CG)
   1049       UpdateCallGraphAfterInlining(CS, FirstNewBlock, VMap, IFI);
   1050 
   1051     // Update inlined instructions' line number information.
   1052     fixupLineNumbers(Caller, FirstNewBlock, TheCall);
   1053 
   1054     // Clone existing noalias metadata if necessary.
   1055     CloneAliasScopeMetadata(CS, VMap);
   1056 
   1057     // Add noalias metadata if necessary.
   1058     AddAliasScopeMetadata(CS, VMap, DL, IFI.AA);
   1059 
   1060     // FIXME: We could register any cloned assumptions instead of clearing the
   1061     // whole function's cache.
   1062     if (IFI.ACT)
   1063       IFI.ACT->getAssumptionCache(*Caller).clear();
   1064   }
   1065 
   1066   // If there are any alloca instructions in the block that used to be the entry
   1067   // block for the callee, move them to the entry block of the caller.  First
   1068   // calculate which instruction they should be inserted before.  We insert the
   1069   // instructions at the end of the current alloca list.
   1070   {
   1071     BasicBlock::iterator InsertPoint = Caller->begin()->begin();
   1072     for (BasicBlock::iterator I = FirstNewBlock->begin(),
   1073          E = FirstNewBlock->end(); I != E; ) {
   1074       AllocaInst *AI = dyn_cast<AllocaInst>(I++);
   1075       if (!AI) continue;
   1076 
   1077       // If the alloca is now dead, remove it.  This often occurs due to code
   1078       // specialization.
   1079       if (AI->use_empty()) {
   1080         AI->eraseFromParent();
   1081         continue;
   1082       }
   1083 
   1084       if (!isa<Constant>(AI->getArraySize()))
   1085         continue;
   1086 
   1087       // Keep track of the static allocas that we inline into the caller.
   1088       IFI.StaticAllocas.push_back(AI);
   1089 
   1090       // Scan for the block of allocas that we can move over, and move them
   1091       // all at once.
   1092       while (isa<AllocaInst>(I) &&
   1093              isa<Constant>(cast<AllocaInst>(I)->getArraySize())) {
   1094         IFI.StaticAllocas.push_back(cast<AllocaInst>(I));
   1095         ++I;
   1096       }
   1097 
   1098       // Transfer all of the allocas over in a block.  Using splice means
   1099       // that the instructions aren't removed from the symbol table, then
   1100       // reinserted.
   1101       Caller->getEntryBlock().getInstList().splice(InsertPoint,
   1102                                                    FirstNewBlock->getInstList(),
   1103                                                    AI, I);
   1104     }
   1105     // Move any dbg.declares describing the allocas into the entry basic block.
   1106     DIBuilder DIB(*Caller->getParent());
   1107     for (auto &AI : IFI.StaticAllocas)
   1108       replaceDbgDeclareForAlloca(AI, AI, DIB, /*Deref=*/false);
   1109   }
   1110 
   1111   bool InlinedMustTailCalls = false;
   1112   if (InlinedFunctionInfo.ContainsCalls) {
   1113     CallInst::TailCallKind CallSiteTailKind = CallInst::TCK_None;
   1114     if (CallInst *CI = dyn_cast<CallInst>(TheCall))
   1115       CallSiteTailKind = CI->getTailCallKind();
   1116 
   1117     for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E;
   1118          ++BB) {
   1119       for (Instruction &I : *BB) {
   1120         CallInst *CI = dyn_cast<CallInst>(&I);
   1121         if (!CI)
   1122           continue;
   1123 
   1124         // We need to reduce the strength of any inlined tail calls.  For
   1125         // musttail, we have to avoid introducing potential unbounded stack
   1126         // growth.  For example, if functions 'f' and 'g' are mutually recursive
   1127         // with musttail, we can inline 'g' into 'f' so long as we preserve
   1128         // musttail on the cloned call to 'f'.  If either the inlined call site
   1129         // or the cloned call site is *not* musttail, the program already has
   1130         // one frame of stack growth, so it's safe to remove musttail.  Here is
   1131         // a table of example transformations:
   1132         //
   1133         //    f -> musttail g -> musttail f  ==>  f -> musttail f
   1134         //    f -> musttail g ->     tail f  ==>  f ->     tail f
   1135         //    f ->          g -> musttail f  ==>  f ->          f
   1136         //    f ->          g ->     tail f  ==>  f ->          f
   1137         CallInst::TailCallKind ChildTCK = CI->getTailCallKind();
   1138         ChildTCK = std::min(CallSiteTailKind, ChildTCK);
   1139         CI->setTailCallKind(ChildTCK);
   1140         InlinedMustTailCalls |= CI->isMustTailCall();
   1141 
   1142         // Calls inlined through a 'nounwind' call site should be marked
   1143         // 'nounwind'.
   1144         if (MarkNoUnwind)
   1145           CI->setDoesNotThrow();
   1146       }
   1147     }
   1148   }
   1149 
   1150   // Leave lifetime markers for the static alloca's, scoping them to the
   1151   // function we just inlined.
   1152   if (InsertLifetime && !IFI.StaticAllocas.empty()) {
   1153     IRBuilder<> builder(FirstNewBlock->begin());
   1154     for (unsigned ai = 0, ae = IFI.StaticAllocas.size(); ai != ae; ++ai) {
   1155       AllocaInst *AI = IFI.StaticAllocas[ai];
   1156 
   1157       // If the alloca is already scoped to something smaller than the whole
   1158       // function then there's no need to add redundant, less accurate markers.
   1159       if (hasLifetimeMarkers(AI))
   1160         continue;
   1161 
   1162       // Try to determine the size of the allocation.
   1163       ConstantInt *AllocaSize = nullptr;
   1164       if (ConstantInt *AIArraySize =
   1165           dyn_cast<ConstantInt>(AI->getArraySize())) {
   1166         auto &DL = Caller->getParent()->getDataLayout();
   1167         Type *AllocaType = AI->getAllocatedType();
   1168         uint64_t AllocaTypeSize = DL.getTypeAllocSize(AllocaType);
   1169         uint64_t AllocaArraySize = AIArraySize->getLimitedValue();
   1170         assert(AllocaArraySize > 0 && "array size of AllocaInst is zero");
   1171         // Check that array size doesn't saturate uint64_t and doesn't
   1172         // overflow when it's multiplied by type size.
   1173         if (AllocaArraySize != ~0ULL &&
   1174             UINT64_MAX / AllocaArraySize >= AllocaTypeSize) {
   1175           AllocaSize = ConstantInt::get(Type::getInt64Ty(AI->getContext()),
   1176                                         AllocaArraySize * AllocaTypeSize);
   1177         }
   1178       }
   1179 
   1180       builder.CreateLifetimeStart(AI, AllocaSize);
   1181       for (ReturnInst *RI : Returns) {
   1182         // Don't insert llvm.lifetime.end calls between a musttail call and a
   1183         // return.  The return kills all local allocas.
   1184         if (InlinedMustTailCalls &&
   1185             RI->getParent()->getTerminatingMustTailCall())
   1186           continue;
   1187         IRBuilder<>(RI).CreateLifetimeEnd(AI, AllocaSize);
   1188       }
   1189     }
   1190   }
   1191 
   1192   // If the inlined code contained dynamic alloca instructions, wrap the inlined
   1193   // code with llvm.stacksave/llvm.stackrestore intrinsics.
   1194   if (InlinedFunctionInfo.ContainsDynamicAllocas) {
   1195     Module *M = Caller->getParent();
   1196     // Get the two intrinsics we care about.
   1197     Function *StackSave = Intrinsic::getDeclaration(M, Intrinsic::stacksave);
   1198     Function *StackRestore=Intrinsic::getDeclaration(M,Intrinsic::stackrestore);
   1199 
   1200     // Insert the llvm.stacksave.
   1201     CallInst *SavedPtr = IRBuilder<>(FirstNewBlock, FirstNewBlock->begin())
   1202       .CreateCall(StackSave, "savedstack");
   1203 
   1204     // Insert a call to llvm.stackrestore before any return instructions in the
   1205     // inlined function.
   1206     for (ReturnInst *RI : Returns) {
   1207       // Don't insert llvm.stackrestore calls between a musttail call and a
   1208       // return.  The return will restore the stack pointer.
   1209       if (InlinedMustTailCalls && RI->getParent()->getTerminatingMustTailCall())
   1210         continue;
   1211       IRBuilder<>(RI).CreateCall(StackRestore, SavedPtr);
   1212     }
   1213   }
   1214 
   1215   // If we are inlining for an invoke instruction, we must make sure to rewrite
   1216   // any call instructions into invoke instructions.
   1217   if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall))
   1218     HandleInlinedInvoke(II, FirstNewBlock, InlinedFunctionInfo);
   1219 
   1220   // Handle any inlined musttail call sites.  In order for a new call site to be
   1221   // musttail, the source of the clone and the inlined call site must have been
   1222   // musttail.  Therefore it's safe to return without merging control into the
   1223   // phi below.
   1224   if (InlinedMustTailCalls) {
   1225     // Check if we need to bitcast the result of any musttail calls.
   1226     Type *NewRetTy = Caller->getReturnType();
   1227     bool NeedBitCast = !TheCall->use_empty() && TheCall->getType() != NewRetTy;
   1228 
   1229     // Handle the returns preceded by musttail calls separately.
   1230     SmallVector<ReturnInst *, 8> NormalReturns;
   1231     for (ReturnInst *RI : Returns) {
   1232       CallInst *ReturnedMustTail =
   1233           RI->getParent()->getTerminatingMustTailCall();
   1234       if (!ReturnedMustTail) {
   1235         NormalReturns.push_back(RI);
   1236         continue;
   1237       }
   1238       if (!NeedBitCast)
   1239         continue;
   1240 
   1241       // Delete the old return and any preceding bitcast.
   1242       BasicBlock *CurBB = RI->getParent();
   1243       auto *OldCast = dyn_cast_or_null<BitCastInst>(RI->getReturnValue());
   1244       RI->eraseFromParent();
   1245       if (OldCast)
   1246         OldCast->eraseFromParent();
   1247 
   1248       // Insert a new bitcast and return with the right type.
   1249       IRBuilder<> Builder(CurBB);
   1250       Builder.CreateRet(Builder.CreateBitCast(ReturnedMustTail, NewRetTy));
   1251     }
   1252 
   1253     // Leave behind the normal returns so we can merge control flow.
   1254     std::swap(Returns, NormalReturns);
   1255   }
   1256 
   1257   // If we cloned in _exactly one_ basic block, and if that block ends in a
   1258   // return instruction, we splice the body of the inlined callee directly into
   1259   // the calling basic block.
   1260   if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) {
   1261     // Move all of the instructions right before the call.
   1262     OrigBB->getInstList().splice(TheCall, FirstNewBlock->getInstList(),
   1263                                  FirstNewBlock->begin(), FirstNewBlock->end());
   1264     // Remove the cloned basic block.
   1265     Caller->getBasicBlockList().pop_back();
   1266 
   1267     // If the call site was an invoke instruction, add a branch to the normal
   1268     // destination.
   1269     if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
   1270       BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), TheCall);
   1271       NewBr->setDebugLoc(Returns[0]->getDebugLoc());
   1272     }
   1273 
   1274     // If the return instruction returned a value, replace uses of the call with
   1275     // uses of the returned value.
   1276     if (!TheCall->use_empty()) {
   1277       ReturnInst *R = Returns[0];
   1278       if (TheCall == R->getReturnValue())
   1279         TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
   1280       else
   1281         TheCall->replaceAllUsesWith(R->getReturnValue());
   1282     }
   1283     // Since we are now done with the Call/Invoke, we can delete it.
   1284     TheCall->eraseFromParent();
   1285 
   1286     // Since we are now done with the return instruction, delete it also.
   1287     Returns[0]->eraseFromParent();
   1288 
   1289     // We are now done with the inlining.
   1290     return true;
   1291   }
   1292 
   1293   // Otherwise, we have the normal case, of more than one block to inline or
   1294   // multiple return sites.
   1295 
   1296   // We want to clone the entire callee function into the hole between the
   1297   // "starter" and "ender" blocks.  How we accomplish this depends on whether
   1298   // this is an invoke instruction or a call instruction.
   1299   BasicBlock *AfterCallBB;
   1300   BranchInst *CreatedBranchToNormalDest = nullptr;
   1301   if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
   1302 
   1303     // Add an unconditional branch to make this look like the CallInst case...
   1304     CreatedBranchToNormalDest = BranchInst::Create(II->getNormalDest(), TheCall);
   1305 
   1306     // Split the basic block.  This guarantees that no PHI nodes will have to be
   1307     // updated due to new incoming edges, and make the invoke case more
   1308     // symmetric to the call case.
   1309     AfterCallBB = OrigBB->splitBasicBlock(CreatedBranchToNormalDest,
   1310                                           CalledFunc->getName()+".exit");
   1311 
   1312   } else {  // It's a call
   1313     // If this is a call instruction, we need to split the basic block that
   1314     // the call lives in.
   1315     //
   1316     AfterCallBB = OrigBB->splitBasicBlock(TheCall,
   1317                                           CalledFunc->getName()+".exit");
   1318   }
   1319 
   1320   // Change the branch that used to go to AfterCallBB to branch to the first
   1321   // basic block of the inlined function.
   1322   //
   1323   TerminatorInst *Br = OrigBB->getTerminator();
   1324   assert(Br && Br->getOpcode() == Instruction::Br &&
   1325          "splitBasicBlock broken!");
   1326   Br->setOperand(0, FirstNewBlock);
   1327 
   1328 
   1329   // Now that the function is correct, make it a little bit nicer.  In
   1330   // particular, move the basic blocks inserted from the end of the function
   1331   // into the space made by splitting the source basic block.
   1332   Caller->getBasicBlockList().splice(AfterCallBB, Caller->getBasicBlockList(),
   1333                                      FirstNewBlock, Caller->end());
   1334 
   1335   // Handle all of the return instructions that we just cloned in, and eliminate
   1336   // any users of the original call/invoke instruction.
   1337   Type *RTy = CalledFunc->getReturnType();
   1338 
   1339   PHINode *PHI = nullptr;
   1340   if (Returns.size() > 1) {
   1341     // The PHI node should go at the front of the new basic block to merge all
   1342     // possible incoming values.
   1343     if (!TheCall->use_empty()) {
   1344       PHI = PHINode::Create(RTy, Returns.size(), TheCall->getName(),
   1345                             AfterCallBB->begin());
   1346       // Anything that used the result of the function call should now use the
   1347       // PHI node as their operand.
   1348       TheCall->replaceAllUsesWith(PHI);
   1349     }
   1350 
   1351     // Loop over all of the return instructions adding entries to the PHI node
   1352     // as appropriate.
   1353     if (PHI) {
   1354       for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
   1355         ReturnInst *RI = Returns[i];
   1356         assert(RI->getReturnValue()->getType() == PHI->getType() &&
   1357                "Ret value not consistent in function!");
   1358         PHI->addIncoming(RI->getReturnValue(), RI->getParent());
   1359       }
   1360     }
   1361 
   1362 
   1363     // Add a branch to the merge points and remove return instructions.
   1364     DebugLoc Loc;
   1365     for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
   1366       ReturnInst *RI = Returns[i];
   1367       BranchInst* BI = BranchInst::Create(AfterCallBB, RI);
   1368       Loc = RI->getDebugLoc();
   1369       BI->setDebugLoc(Loc);
   1370       RI->eraseFromParent();
   1371     }
   1372     // We need to set the debug location to *somewhere* inside the
   1373     // inlined function. The line number may be nonsensical, but the
   1374     // instruction will at least be associated with the right
   1375     // function.
   1376     if (CreatedBranchToNormalDest)
   1377       CreatedBranchToNormalDest->setDebugLoc(Loc);
   1378   } else if (!Returns.empty()) {
   1379     // Otherwise, if there is exactly one return value, just replace anything
   1380     // using the return value of the call with the computed value.
   1381     if (!TheCall->use_empty()) {
   1382       if (TheCall == Returns[0]->getReturnValue())
   1383         TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
   1384       else
   1385         TheCall->replaceAllUsesWith(Returns[0]->getReturnValue());
   1386     }
   1387 
   1388     // Update PHI nodes that use the ReturnBB to use the AfterCallBB.
   1389     BasicBlock *ReturnBB = Returns[0]->getParent();
   1390     ReturnBB->replaceAllUsesWith(AfterCallBB);
   1391 
   1392     // Splice the code from the return block into the block that it will return
   1393     // to, which contains the code that was after the call.
   1394     AfterCallBB->getInstList().splice(AfterCallBB->begin(),
   1395                                       ReturnBB->getInstList());
   1396 
   1397     if (CreatedBranchToNormalDest)
   1398       CreatedBranchToNormalDest->setDebugLoc(Returns[0]->getDebugLoc());
   1399 
   1400     // Delete the return instruction now and empty ReturnBB now.
   1401     Returns[0]->eraseFromParent();
   1402     ReturnBB->eraseFromParent();
   1403   } else if (!TheCall->use_empty()) {
   1404     // No returns, but something is using the return value of the call.  Just
   1405     // nuke the result.
   1406     TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
   1407   }
   1408 
   1409   // Since we are now done with the Call/Invoke, we can delete it.
   1410   TheCall->eraseFromParent();
   1411 
   1412   // If we inlined any musttail calls and the original return is now
   1413   // unreachable, delete it.  It can only contain a bitcast and ret.
   1414   if (InlinedMustTailCalls && pred_begin(AfterCallBB) == pred_end(AfterCallBB))
   1415     AfterCallBB->eraseFromParent();
   1416 
   1417   // We should always be able to fold the entry block of the function into the
   1418   // single predecessor of the block...
   1419   assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!");
   1420   BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0);
   1421 
   1422   // Splice the code entry block into calling block, right before the
   1423   // unconditional branch.
   1424   CalleeEntry->replaceAllUsesWith(OrigBB);  // Update PHI nodes
   1425   OrigBB->getInstList().splice(Br, CalleeEntry->getInstList());
   1426 
   1427   // Remove the unconditional branch.
   1428   OrigBB->getInstList().erase(Br);
   1429 
   1430   // Now we can remove the CalleeEntry block, which is now empty.
   1431   Caller->getBasicBlockList().erase(CalleeEntry);
   1432 
   1433   // If we inserted a phi node, check to see if it has a single value (e.g. all
   1434   // the entries are the same or undef).  If so, remove the PHI so it doesn't
   1435   // block other optimizations.
   1436   if (PHI) {
   1437     auto &DL = Caller->getParent()->getDataLayout();
   1438     if (Value *V = SimplifyInstruction(PHI, DL, nullptr, nullptr,
   1439                                        &IFI.ACT->getAssumptionCache(*Caller))) {
   1440       PHI->replaceAllUsesWith(V);
   1441       PHI->eraseFromParent();
   1442     }
   1443   }
   1444 
   1445   return true;
   1446 }
   1447