Home | History | Annotate | Download | only in IPA
      1 //===- InlineCost.cpp - Cost analysis for inliner -------------------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file implements inline cost analysis.
     11 //
     12 //===----------------------------------------------------------------------===//
     13 
     14 #include "llvm/Analysis/InlineCost.h"
     15 #include "llvm/ADT/STLExtras.h"
     16 #include "llvm/ADT/SetVector.h"
     17 #include "llvm/ADT/SmallPtrSet.h"
     18 #include "llvm/ADT/SmallVector.h"
     19 #include "llvm/ADT/Statistic.h"
     20 #include "llvm/Analysis/ConstantFolding.h"
     21 #include "llvm/Analysis/InstructionSimplify.h"
     22 #include "llvm/Analysis/TargetTransformInfo.h"
     23 #include "llvm/IR/CallSite.h"
     24 #include "llvm/IR/CallingConv.h"
     25 #include "llvm/IR/DataLayout.h"
     26 #include "llvm/IR/GetElementPtrTypeIterator.h"
     27 #include "llvm/IR/GlobalAlias.h"
     28 #include "llvm/IR/InstVisitor.h"
     29 #include "llvm/IR/IntrinsicInst.h"
     30 #include "llvm/IR/Operator.h"
     31 #include "llvm/Support/Debug.h"
     32 #include "llvm/Support/raw_ostream.h"
     33 
     34 using namespace llvm;
     35 
     36 #define DEBUG_TYPE "inline-cost"
     37 
     38 STATISTIC(NumCallsAnalyzed, "Number of call sites analyzed");
     39 
     40 namespace {
     41 
     42 class CallAnalyzer : public InstVisitor<CallAnalyzer, bool> {
     43   typedef InstVisitor<CallAnalyzer, bool> Base;
     44   friend class InstVisitor<CallAnalyzer, bool>;
     45 
     46   // DataLayout if available, or null.
     47   const DataLayout *const DL;
     48 
     49   /// The TargetTransformInfo available for this compilation.
     50   const TargetTransformInfo &TTI;
     51 
     52   // The called function.
     53   Function &F;
     54 
     55   int Threshold;
     56   int Cost;
     57 
     58   bool IsCallerRecursive;
     59   bool IsRecursiveCall;
     60   bool ExposesReturnsTwice;
     61   bool HasDynamicAlloca;
     62   bool ContainsNoDuplicateCall;
     63   bool HasReturn;
     64   bool HasIndirectBr;
     65 
     66   /// Number of bytes allocated statically by the callee.
     67   uint64_t AllocatedSize;
     68   unsigned NumInstructions, NumVectorInstructions;
     69   int FiftyPercentVectorBonus, TenPercentVectorBonus;
     70   int VectorBonus;
     71 
     72   // While we walk the potentially-inlined instructions, we build up and
     73   // maintain a mapping of simplified values specific to this callsite. The
     74   // idea is to propagate any special information we have about arguments to
     75   // this call through the inlinable section of the function, and account for
     76   // likely simplifications post-inlining. The most important aspect we track
     77   // is CFG altering simplifications -- when we prove a basic block dead, that
     78   // can cause dramatic shifts in the cost of inlining a function.
     79   DenseMap<Value *, Constant *> SimplifiedValues;
     80 
     81   // Keep track of the values which map back (through function arguments) to
     82   // allocas on the caller stack which could be simplified through SROA.
     83   DenseMap<Value *, Value *> SROAArgValues;
     84 
     85   // The mapping of caller Alloca values to their accumulated cost savings. If
     86   // we have to disable SROA for one of the allocas, this tells us how much
     87   // cost must be added.
     88   DenseMap<Value *, int> SROAArgCosts;
     89 
     90   // Keep track of values which map to a pointer base and constant offset.
     91   DenseMap<Value *, std::pair<Value *, APInt> > ConstantOffsetPtrs;
     92 
     93   // Custom simplification helper routines.
     94   bool isAllocaDerivedArg(Value *V);
     95   bool lookupSROAArgAndCost(Value *V, Value *&Arg,
     96                             DenseMap<Value *, int>::iterator &CostIt);
     97   void disableSROA(DenseMap<Value *, int>::iterator CostIt);
     98   void disableSROA(Value *V);
     99   void accumulateSROACost(DenseMap<Value *, int>::iterator CostIt,
    100                           int InstructionCost);
    101   bool isGEPOffsetConstant(GetElementPtrInst &GEP);
    102   bool accumulateGEPOffset(GEPOperator &GEP, APInt &Offset);
    103   bool simplifyCallSite(Function *F, CallSite CS);
    104   ConstantInt *stripAndComputeInBoundsConstantOffsets(Value *&V);
    105 
    106   // Custom analysis routines.
    107   bool analyzeBlock(BasicBlock *BB);
    108 
    109   // Disable several entry points to the visitor so we don't accidentally use
    110   // them by declaring but not defining them here.
    111   void visit(Module *);     void visit(Module &);
    112   void visit(Function *);   void visit(Function &);
    113   void visit(BasicBlock *); void visit(BasicBlock &);
    114 
    115   // Provide base case for our instruction visit.
    116   bool visitInstruction(Instruction &I);
    117 
    118   // Our visit overrides.
    119   bool visitAlloca(AllocaInst &I);
    120   bool visitPHI(PHINode &I);
    121   bool visitGetElementPtr(GetElementPtrInst &I);
    122   bool visitBitCast(BitCastInst &I);
    123   bool visitPtrToInt(PtrToIntInst &I);
    124   bool visitIntToPtr(IntToPtrInst &I);
    125   bool visitCastInst(CastInst &I);
    126   bool visitUnaryInstruction(UnaryInstruction &I);
    127   bool visitCmpInst(CmpInst &I);
    128   bool visitSub(BinaryOperator &I);
    129   bool visitBinaryOperator(BinaryOperator &I);
    130   bool visitLoad(LoadInst &I);
    131   bool visitStore(StoreInst &I);
    132   bool visitExtractValue(ExtractValueInst &I);
    133   bool visitInsertValue(InsertValueInst &I);
    134   bool visitCallSite(CallSite CS);
    135   bool visitReturnInst(ReturnInst &RI);
    136   bool visitBranchInst(BranchInst &BI);
    137   bool visitSwitchInst(SwitchInst &SI);
    138   bool visitIndirectBrInst(IndirectBrInst &IBI);
    139   bool visitResumeInst(ResumeInst &RI);
    140   bool visitUnreachableInst(UnreachableInst &I);
    141 
    142 public:
    143   CallAnalyzer(const DataLayout *DL, const TargetTransformInfo &TTI,
    144                Function &Callee, int Threshold)
    145       : DL(DL), TTI(TTI), F(Callee), Threshold(Threshold), Cost(0),
    146         IsCallerRecursive(false), IsRecursiveCall(false),
    147         ExposesReturnsTwice(false), HasDynamicAlloca(false),
    148         ContainsNoDuplicateCall(false), HasReturn(false), HasIndirectBr(false),
    149         AllocatedSize(0), NumInstructions(0), NumVectorInstructions(0),
    150         FiftyPercentVectorBonus(0), TenPercentVectorBonus(0), VectorBonus(0),
    151         NumConstantArgs(0), NumConstantOffsetPtrArgs(0), NumAllocaArgs(0),
    152         NumConstantPtrCmps(0), NumConstantPtrDiffs(0),
    153         NumInstructionsSimplified(0), SROACostSavings(0),
    154         SROACostSavingsLost(0) {}
    155 
    156   bool analyzeCall(CallSite CS);
    157 
    158   int getThreshold() { return Threshold; }
    159   int getCost() { return Cost; }
    160 
    161   // Keep a bunch of stats about the cost savings found so we can print them
    162   // out when debugging.
    163   unsigned NumConstantArgs;
    164   unsigned NumConstantOffsetPtrArgs;
    165   unsigned NumAllocaArgs;
    166   unsigned NumConstantPtrCmps;
    167   unsigned NumConstantPtrDiffs;
    168   unsigned NumInstructionsSimplified;
    169   unsigned SROACostSavings;
    170   unsigned SROACostSavingsLost;
    171 
    172   void dump();
    173 };
    174 
    175 } // namespace
    176 
    177 /// \brief Test whether the given value is an Alloca-derived function argument.
    178 bool CallAnalyzer::isAllocaDerivedArg(Value *V) {
    179   return SROAArgValues.count(V);
    180 }
    181 
    182 /// \brief Lookup the SROA-candidate argument and cost iterator which V maps to.
    183 /// Returns false if V does not map to a SROA-candidate.
    184 bool CallAnalyzer::lookupSROAArgAndCost(
    185     Value *V, Value *&Arg, DenseMap<Value *, int>::iterator &CostIt) {
    186   if (SROAArgValues.empty() || SROAArgCosts.empty())
    187     return false;
    188 
    189   DenseMap<Value *, Value *>::iterator ArgIt = SROAArgValues.find(V);
    190   if (ArgIt == SROAArgValues.end())
    191     return false;
    192 
    193   Arg = ArgIt->second;
    194   CostIt = SROAArgCosts.find(Arg);
    195   return CostIt != SROAArgCosts.end();
    196 }
    197 
    198 /// \brief Disable SROA for the candidate marked by this cost iterator.
    199 ///
    200 /// This marks the candidate as no longer viable for SROA, and adds the cost
    201 /// savings associated with it back into the inline cost measurement.
    202 void CallAnalyzer::disableSROA(DenseMap<Value *, int>::iterator CostIt) {
    203   // If we're no longer able to perform SROA we need to undo its cost savings
    204   // and prevent subsequent analysis.
    205   Cost += CostIt->second;
    206   SROACostSavings -= CostIt->second;
    207   SROACostSavingsLost += CostIt->second;
    208   SROAArgCosts.erase(CostIt);
    209 }
    210 
    211 /// \brief If 'V' maps to a SROA candidate, disable SROA for it.
    212 void CallAnalyzer::disableSROA(Value *V) {
    213   Value *SROAArg;
    214   DenseMap<Value *, int>::iterator CostIt;
    215   if (lookupSROAArgAndCost(V, SROAArg, CostIt))
    216     disableSROA(CostIt);
    217 }
    218 
    219 /// \brief Accumulate the given cost for a particular SROA candidate.
    220 void CallAnalyzer::accumulateSROACost(DenseMap<Value *, int>::iterator CostIt,
    221                                       int InstructionCost) {
    222   CostIt->second += InstructionCost;
    223   SROACostSavings += InstructionCost;
    224 }
    225 
    226 /// \brief Check whether a GEP's indices are all constant.
    227 ///
    228 /// Respects any simplified values known during the analysis of this callsite.
    229 bool CallAnalyzer::isGEPOffsetConstant(GetElementPtrInst &GEP) {
    230   for (User::op_iterator I = GEP.idx_begin(), E = GEP.idx_end(); I != E; ++I)
    231     if (!isa<Constant>(*I) && !SimplifiedValues.lookup(*I))
    232       return false;
    233 
    234   return true;
    235 }
    236 
    237 /// \brief Accumulate a constant GEP offset into an APInt if possible.
    238 ///
    239 /// Returns false if unable to compute the offset for any reason. Respects any
    240 /// simplified values known during the analysis of this callsite.
    241 bool CallAnalyzer::accumulateGEPOffset(GEPOperator &GEP, APInt &Offset) {
    242   if (!DL)
    243     return false;
    244 
    245   unsigned IntPtrWidth = DL->getPointerSizeInBits();
    246   assert(IntPtrWidth == Offset.getBitWidth());
    247 
    248   for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
    249        GTI != GTE; ++GTI) {
    250     ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand());
    251     if (!OpC)
    252       if (Constant *SimpleOp = SimplifiedValues.lookup(GTI.getOperand()))
    253         OpC = dyn_cast<ConstantInt>(SimpleOp);
    254     if (!OpC)
    255       return false;
    256     if (OpC->isZero()) continue;
    257 
    258     // Handle a struct index, which adds its field offset to the pointer.
    259     if (StructType *STy = dyn_cast<StructType>(*GTI)) {
    260       unsigned ElementIdx = OpC->getZExtValue();
    261       const StructLayout *SL = DL->getStructLayout(STy);
    262       Offset += APInt(IntPtrWidth, SL->getElementOffset(ElementIdx));
    263       continue;
    264     }
    265 
    266     APInt TypeSize(IntPtrWidth, DL->getTypeAllocSize(GTI.getIndexedType()));
    267     Offset += OpC->getValue().sextOrTrunc(IntPtrWidth) * TypeSize;
    268   }
    269   return true;
    270 }
    271 
    272 bool CallAnalyzer::visitAlloca(AllocaInst &I) {
    273   // Check whether inlining will turn a dynamic alloca into a static
    274   // alloca, and handle that case.
    275   if (I.isArrayAllocation()) {
    276     if (Constant *Size = SimplifiedValues.lookup(I.getArraySize())) {
    277       ConstantInt *AllocSize = dyn_cast<ConstantInt>(Size);
    278       assert(AllocSize && "Allocation size not a constant int?");
    279       Type *Ty = I.getAllocatedType();
    280       AllocatedSize += Ty->getPrimitiveSizeInBits() * AllocSize->getZExtValue();
    281       return Base::visitAlloca(I);
    282     }
    283   }
    284 
    285   // Accumulate the allocated size.
    286   if (I.isStaticAlloca()) {
    287     Type *Ty = I.getAllocatedType();
    288     AllocatedSize += (DL ? DL->getTypeAllocSize(Ty) :
    289                       Ty->getPrimitiveSizeInBits());
    290   }
    291 
    292   // We will happily inline static alloca instructions.
    293   if (I.isStaticAlloca())
    294     return Base::visitAlloca(I);
    295 
    296   // FIXME: This is overly conservative. Dynamic allocas are inefficient for
    297   // a variety of reasons, and so we would like to not inline them into
    298   // functions which don't currently have a dynamic alloca. This simply
    299   // disables inlining altogether in the presence of a dynamic alloca.
    300   HasDynamicAlloca = true;
    301   return false;
    302 }
    303 
    304 bool CallAnalyzer::visitPHI(PHINode &I) {
    305   // FIXME: We should potentially be tracking values through phi nodes,
    306   // especially when they collapse to a single value due to deleted CFG edges
    307   // during inlining.
    308 
    309   // FIXME: We need to propagate SROA *disabling* through phi nodes, even
    310   // though we don't want to propagate it's bonuses. The idea is to disable
    311   // SROA if it *might* be used in an inappropriate manner.
    312 
    313   // Phi nodes are always zero-cost.
    314   return true;
    315 }
    316 
    317 bool CallAnalyzer::visitGetElementPtr(GetElementPtrInst &I) {
    318   Value *SROAArg;
    319   DenseMap<Value *, int>::iterator CostIt;
    320   bool SROACandidate = lookupSROAArgAndCost(I.getPointerOperand(),
    321                                             SROAArg, CostIt);
    322 
    323   // Try to fold GEPs of constant-offset call site argument pointers. This
    324   // requires target data and inbounds GEPs.
    325   if (DL && I.isInBounds()) {
    326     // Check if we have a base + offset for the pointer.
    327     Value *Ptr = I.getPointerOperand();
    328     std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Ptr);
    329     if (BaseAndOffset.first) {
    330       // Check if the offset of this GEP is constant, and if so accumulate it
    331       // into Offset.
    332       if (!accumulateGEPOffset(cast<GEPOperator>(I), BaseAndOffset.second)) {
    333         // Non-constant GEPs aren't folded, and disable SROA.
    334         if (SROACandidate)
    335           disableSROA(CostIt);
    336         return false;
    337       }
    338 
    339       // Add the result as a new mapping to Base + Offset.
    340       ConstantOffsetPtrs[&I] = BaseAndOffset;
    341 
    342       // Also handle SROA candidates here, we already know that the GEP is
    343       // all-constant indexed.
    344       if (SROACandidate)
    345         SROAArgValues[&I] = SROAArg;
    346 
    347       return true;
    348     }
    349   }
    350 
    351   if (isGEPOffsetConstant(I)) {
    352     if (SROACandidate)
    353       SROAArgValues[&I] = SROAArg;
    354 
    355     // Constant GEPs are modeled as free.
    356     return true;
    357   }
    358 
    359   // Variable GEPs will require math and will disable SROA.
    360   if (SROACandidate)
    361     disableSROA(CostIt);
    362   return false;
    363 }
    364 
    365 bool CallAnalyzer::visitBitCast(BitCastInst &I) {
    366   // Propagate constants through bitcasts.
    367   Constant *COp = dyn_cast<Constant>(I.getOperand(0));
    368   if (!COp)
    369     COp = SimplifiedValues.lookup(I.getOperand(0));
    370   if (COp)
    371     if (Constant *C = ConstantExpr::getBitCast(COp, I.getType())) {
    372       SimplifiedValues[&I] = C;
    373       return true;
    374     }
    375 
    376   // Track base/offsets through casts
    377   std::pair<Value *, APInt> BaseAndOffset
    378     = ConstantOffsetPtrs.lookup(I.getOperand(0));
    379   // Casts don't change the offset, just wrap it up.
    380   if (BaseAndOffset.first)
    381     ConstantOffsetPtrs[&I] = BaseAndOffset;
    382 
    383   // Also look for SROA candidates here.
    384   Value *SROAArg;
    385   DenseMap<Value *, int>::iterator CostIt;
    386   if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt))
    387     SROAArgValues[&I] = SROAArg;
    388 
    389   // Bitcasts are always zero cost.
    390   return true;
    391 }
    392 
    393 bool CallAnalyzer::visitPtrToInt(PtrToIntInst &I) {
    394   const DataLayout *DL = I.getDataLayout();
    395   // Propagate constants through ptrtoint.
    396   Constant *COp = dyn_cast<Constant>(I.getOperand(0));
    397   if (!COp)
    398     COp = SimplifiedValues.lookup(I.getOperand(0));
    399   if (COp)
    400     if (Constant *C = ConstantExpr::getPtrToInt(COp, I.getType())) {
    401       SimplifiedValues[&I] = C;
    402       return true;
    403     }
    404 
    405   // Track base/offset pairs when converted to a plain integer provided the
    406   // integer is large enough to represent the pointer.
    407   unsigned IntegerSize = I.getType()->getScalarSizeInBits();
    408   if (DL && IntegerSize >= DL->getPointerSizeInBits()) {
    409     std::pair<Value *, APInt> BaseAndOffset
    410       = ConstantOffsetPtrs.lookup(I.getOperand(0));
    411     if (BaseAndOffset.first)
    412       ConstantOffsetPtrs[&I] = BaseAndOffset;
    413   }
    414 
    415   // This is really weird. Technically, ptrtoint will disable SROA. However,
    416   // unless that ptrtoint is *used* somewhere in the live basic blocks after
    417   // inlining, it will be nuked, and SROA should proceed. All of the uses which
    418   // would block SROA would also block SROA if applied directly to a pointer,
    419   // and so we can just add the integer in here. The only places where SROA is
    420   // preserved either cannot fire on an integer, or won't in-and-of themselves
    421   // disable SROA (ext) w/o some later use that we would see and disable.
    422   Value *SROAArg;
    423   DenseMap<Value *, int>::iterator CostIt;
    424   if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt))
    425     SROAArgValues[&I] = SROAArg;
    426 
    427   return TargetTransformInfo::TCC_Free == TTI.getUserCost(&I);
    428 }
    429 
    430 bool CallAnalyzer::visitIntToPtr(IntToPtrInst &I) {
    431   const DataLayout *DL = I.getDataLayout();
    432   // Propagate constants through ptrtoint.
    433   Constant *COp = dyn_cast<Constant>(I.getOperand(0));
    434   if (!COp)
    435     COp = SimplifiedValues.lookup(I.getOperand(0));
    436   if (COp)
    437     if (Constant *C = ConstantExpr::getIntToPtr(COp, I.getType())) {
    438       SimplifiedValues[&I] = C;
    439       return true;
    440     }
    441 
    442   // Track base/offset pairs when round-tripped through a pointer without
    443   // modifications provided the integer is not too large.
    444   Value *Op = I.getOperand(0);
    445   unsigned IntegerSize = Op->getType()->getScalarSizeInBits();
    446   if (DL && IntegerSize <= DL->getPointerSizeInBits()) {
    447     std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Op);
    448     if (BaseAndOffset.first)
    449       ConstantOffsetPtrs[&I] = BaseAndOffset;
    450   }
    451 
    452   // "Propagate" SROA here in the same manner as we do for ptrtoint above.
    453   Value *SROAArg;
    454   DenseMap<Value *, int>::iterator CostIt;
    455   if (lookupSROAArgAndCost(Op, SROAArg, CostIt))
    456     SROAArgValues[&I] = SROAArg;
    457 
    458   return TargetTransformInfo::TCC_Free == TTI.getUserCost(&I);
    459 }
    460 
    461 bool CallAnalyzer::visitCastInst(CastInst &I) {
    462   // Propagate constants through ptrtoint.
    463   Constant *COp = dyn_cast<Constant>(I.getOperand(0));
    464   if (!COp)
    465     COp = SimplifiedValues.lookup(I.getOperand(0));
    466   if (COp)
    467     if (Constant *C = ConstantExpr::getCast(I.getOpcode(), COp, I.getType())) {
    468       SimplifiedValues[&I] = C;
    469       return true;
    470     }
    471 
    472   // Disable SROA in the face of arbitrary casts we don't whitelist elsewhere.
    473   disableSROA(I.getOperand(0));
    474 
    475   return TargetTransformInfo::TCC_Free == TTI.getUserCost(&I);
    476 }
    477 
    478 bool CallAnalyzer::visitUnaryInstruction(UnaryInstruction &I) {
    479   Value *Operand = I.getOperand(0);
    480   Constant *COp = dyn_cast<Constant>(Operand);
    481   if (!COp)
    482     COp = SimplifiedValues.lookup(Operand);
    483   if (COp)
    484     if (Constant *C = ConstantFoldInstOperands(I.getOpcode(), I.getType(),
    485                                                COp, DL)) {
    486       SimplifiedValues[&I] = C;
    487       return true;
    488     }
    489 
    490   // Disable any SROA on the argument to arbitrary unary operators.
    491   disableSROA(Operand);
    492 
    493   return false;
    494 }
    495 
    496 bool CallAnalyzer::visitCmpInst(CmpInst &I) {
    497   Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
    498   // First try to handle simplified comparisons.
    499   if (!isa<Constant>(LHS))
    500     if (Constant *SimpleLHS = SimplifiedValues.lookup(LHS))
    501       LHS = SimpleLHS;
    502   if (!isa<Constant>(RHS))
    503     if (Constant *SimpleRHS = SimplifiedValues.lookup(RHS))
    504       RHS = SimpleRHS;
    505   if (Constant *CLHS = dyn_cast<Constant>(LHS)) {
    506     if (Constant *CRHS = dyn_cast<Constant>(RHS))
    507       if (Constant *C = ConstantExpr::getCompare(I.getPredicate(), CLHS, CRHS)) {
    508         SimplifiedValues[&I] = C;
    509         return true;
    510       }
    511   }
    512 
    513   if (I.getOpcode() == Instruction::FCmp)
    514     return false;
    515 
    516   // Otherwise look for a comparison between constant offset pointers with
    517   // a common base.
    518   Value *LHSBase, *RHSBase;
    519   APInt LHSOffset, RHSOffset;
    520   std::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS);
    521   if (LHSBase) {
    522     std::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS);
    523     if (RHSBase && LHSBase == RHSBase) {
    524       // We have common bases, fold the icmp to a constant based on the
    525       // offsets.
    526       Constant *CLHS = ConstantInt::get(LHS->getContext(), LHSOffset);
    527       Constant *CRHS = ConstantInt::get(RHS->getContext(), RHSOffset);
    528       if (Constant *C = ConstantExpr::getICmp(I.getPredicate(), CLHS, CRHS)) {
    529         SimplifiedValues[&I] = C;
    530         ++NumConstantPtrCmps;
    531         return true;
    532       }
    533     }
    534   }
    535 
    536   // If the comparison is an equality comparison with null, we can simplify it
    537   // for any alloca-derived argument.
    538   if (I.isEquality() && isa<ConstantPointerNull>(I.getOperand(1)))
    539     if (isAllocaDerivedArg(I.getOperand(0))) {
    540       // We can actually predict the result of comparisons between an
    541       // alloca-derived value and null. Note that this fires regardless of
    542       // SROA firing.
    543       bool IsNotEqual = I.getPredicate() == CmpInst::ICMP_NE;
    544       SimplifiedValues[&I] = IsNotEqual ? ConstantInt::getTrue(I.getType())
    545                                         : ConstantInt::getFalse(I.getType());
    546       return true;
    547     }
    548 
    549   // Finally check for SROA candidates in comparisons.
    550   Value *SROAArg;
    551   DenseMap<Value *, int>::iterator CostIt;
    552   if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt)) {
    553     if (isa<ConstantPointerNull>(I.getOperand(1))) {
    554       accumulateSROACost(CostIt, InlineConstants::InstrCost);
    555       return true;
    556     }
    557 
    558     disableSROA(CostIt);
    559   }
    560 
    561   return false;
    562 }
    563 
    564 bool CallAnalyzer::visitSub(BinaryOperator &I) {
    565   // Try to handle a special case: we can fold computing the difference of two
    566   // constant-related pointers.
    567   Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
    568   Value *LHSBase, *RHSBase;
    569   APInt LHSOffset, RHSOffset;
    570   std::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS);
    571   if (LHSBase) {
    572     std::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS);
    573     if (RHSBase && LHSBase == RHSBase) {
    574       // We have common bases, fold the subtract to a constant based on the
    575       // offsets.
    576       Constant *CLHS = ConstantInt::get(LHS->getContext(), LHSOffset);
    577       Constant *CRHS = ConstantInt::get(RHS->getContext(), RHSOffset);
    578       if (Constant *C = ConstantExpr::getSub(CLHS, CRHS)) {
    579         SimplifiedValues[&I] = C;
    580         ++NumConstantPtrDiffs;
    581         return true;
    582       }
    583     }
    584   }
    585 
    586   // Otherwise, fall back to the generic logic for simplifying and handling
    587   // instructions.
    588   return Base::visitSub(I);
    589 }
    590 
    591 bool CallAnalyzer::visitBinaryOperator(BinaryOperator &I) {
    592   Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
    593   if (!isa<Constant>(LHS))
    594     if (Constant *SimpleLHS = SimplifiedValues.lookup(LHS))
    595       LHS = SimpleLHS;
    596   if (!isa<Constant>(RHS))
    597     if (Constant *SimpleRHS = SimplifiedValues.lookup(RHS))
    598       RHS = SimpleRHS;
    599   Value *SimpleV = SimplifyBinOp(I.getOpcode(), LHS, RHS, DL);
    600   if (Constant *C = dyn_cast_or_null<Constant>(SimpleV)) {
    601     SimplifiedValues[&I] = C;
    602     return true;
    603   }
    604 
    605   // Disable any SROA on arguments to arbitrary, unsimplified binary operators.
    606   disableSROA(LHS);
    607   disableSROA(RHS);
    608 
    609   return false;
    610 }
    611 
    612 bool CallAnalyzer::visitLoad(LoadInst &I) {
    613   Value *SROAArg;
    614   DenseMap<Value *, int>::iterator CostIt;
    615   if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt)) {
    616     if (I.isSimple()) {
    617       accumulateSROACost(CostIt, InlineConstants::InstrCost);
    618       return true;
    619     }
    620 
    621     disableSROA(CostIt);
    622   }
    623 
    624   return false;
    625 }
    626 
    627 bool CallAnalyzer::visitStore(StoreInst &I) {
    628   Value *SROAArg;
    629   DenseMap<Value *, int>::iterator CostIt;
    630   if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt)) {
    631     if (I.isSimple()) {
    632       accumulateSROACost(CostIt, InlineConstants::InstrCost);
    633       return true;
    634     }
    635 
    636     disableSROA(CostIt);
    637   }
    638 
    639   return false;
    640 }
    641 
    642 bool CallAnalyzer::visitExtractValue(ExtractValueInst &I) {
    643   // Constant folding for extract value is trivial.
    644   Constant *C = dyn_cast<Constant>(I.getAggregateOperand());
    645   if (!C)
    646     C = SimplifiedValues.lookup(I.getAggregateOperand());
    647   if (C) {
    648     SimplifiedValues[&I] = ConstantExpr::getExtractValue(C, I.getIndices());
    649     return true;
    650   }
    651 
    652   // SROA can look through these but give them a cost.
    653   return false;
    654 }
    655 
    656 bool CallAnalyzer::visitInsertValue(InsertValueInst &I) {
    657   // Constant folding for insert value is trivial.
    658   Constant *AggC = dyn_cast<Constant>(I.getAggregateOperand());
    659   if (!AggC)
    660     AggC = SimplifiedValues.lookup(I.getAggregateOperand());
    661   Constant *InsertedC = dyn_cast<Constant>(I.getInsertedValueOperand());
    662   if (!InsertedC)
    663     InsertedC = SimplifiedValues.lookup(I.getInsertedValueOperand());
    664   if (AggC && InsertedC) {
    665     SimplifiedValues[&I] = ConstantExpr::getInsertValue(AggC, InsertedC,
    666                                                         I.getIndices());
    667     return true;
    668   }
    669 
    670   // SROA can look through these but give them a cost.
    671   return false;
    672 }
    673 
    674 /// \brief Try to simplify a call site.
    675 ///
    676 /// Takes a concrete function and callsite and tries to actually simplify it by
    677 /// analyzing the arguments and call itself with instsimplify. Returns true if
    678 /// it has simplified the callsite to some other entity (a constant), making it
    679 /// free.
    680 bool CallAnalyzer::simplifyCallSite(Function *F, CallSite CS) {
    681   // FIXME: Using the instsimplify logic directly for this is inefficient
    682   // because we have to continually rebuild the argument list even when no
    683   // simplifications can be performed. Until that is fixed with remapping
    684   // inside of instsimplify, directly constant fold calls here.
    685   if (!canConstantFoldCallTo(F))
    686     return false;
    687 
    688   // Try to re-map the arguments to constants.
    689   SmallVector<Constant *, 4> ConstantArgs;
    690   ConstantArgs.reserve(CS.arg_size());
    691   for (CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
    692        I != E; ++I) {
    693     Constant *C = dyn_cast<Constant>(*I);
    694     if (!C)
    695       C = dyn_cast_or_null<Constant>(SimplifiedValues.lookup(*I));
    696     if (!C)
    697       return false; // This argument doesn't map to a constant.
    698 
    699     ConstantArgs.push_back(C);
    700   }
    701   if (Constant *C = ConstantFoldCall(F, ConstantArgs)) {
    702     SimplifiedValues[CS.getInstruction()] = C;
    703     return true;
    704   }
    705 
    706   return false;
    707 }
    708 
    709 bool CallAnalyzer::visitCallSite(CallSite CS) {
    710   if (CS.hasFnAttr(Attribute::ReturnsTwice) &&
    711       !F.getAttributes().hasAttribute(AttributeSet::FunctionIndex,
    712                                       Attribute::ReturnsTwice)) {
    713     // This aborts the entire analysis.
    714     ExposesReturnsTwice = true;
    715     return false;
    716   }
    717   if (CS.isCall() &&
    718       cast<CallInst>(CS.getInstruction())->cannotDuplicate())
    719     ContainsNoDuplicateCall = true;
    720 
    721   if (Function *F = CS.getCalledFunction()) {
    722     // When we have a concrete function, first try to simplify it directly.
    723     if (simplifyCallSite(F, CS))
    724       return true;
    725 
    726     // Next check if it is an intrinsic we know about.
    727     // FIXME: Lift this into part of the InstVisitor.
    728     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction())) {
    729       switch (II->getIntrinsicID()) {
    730       default:
    731         return Base::visitCallSite(CS);
    732 
    733       case Intrinsic::memset:
    734       case Intrinsic::memcpy:
    735       case Intrinsic::memmove:
    736         // SROA can usually chew through these intrinsics, but they aren't free.
    737         return false;
    738       }
    739     }
    740 
    741     if (F == CS.getInstruction()->getParent()->getParent()) {
    742       // This flag will fully abort the analysis, so don't bother with anything
    743       // else.
    744       IsRecursiveCall = true;
    745       return false;
    746     }
    747 
    748     if (TTI.isLoweredToCall(F)) {
    749       // We account for the average 1 instruction per call argument setup
    750       // here.
    751       Cost += CS.arg_size() * InlineConstants::InstrCost;
    752 
    753       // Everything other than inline ASM will also have a significant cost
    754       // merely from making the call.
    755       if (!isa<InlineAsm>(CS.getCalledValue()))
    756         Cost += InlineConstants::CallPenalty;
    757     }
    758 
    759     return Base::visitCallSite(CS);
    760   }
    761 
    762   // Otherwise we're in a very special case -- an indirect function call. See
    763   // if we can be particularly clever about this.
    764   Value *Callee = CS.getCalledValue();
    765 
    766   // First, pay the price of the argument setup. We account for the average
    767   // 1 instruction per call argument setup here.
    768   Cost += CS.arg_size() * InlineConstants::InstrCost;
    769 
    770   // Next, check if this happens to be an indirect function call to a known
    771   // function in this inline context. If not, we've done all we can.
    772   Function *F = dyn_cast_or_null<Function>(SimplifiedValues.lookup(Callee));
    773   if (!F)
    774     return Base::visitCallSite(CS);
    775 
    776   // If we have a constant that we are calling as a function, we can peer
    777   // through it and see the function target. This happens not infrequently
    778   // during devirtualization and so we want to give it a hefty bonus for
    779   // inlining, but cap that bonus in the event that inlining wouldn't pan
    780   // out. Pretend to inline the function, with a custom threshold.
    781   CallAnalyzer CA(DL, TTI, *F, InlineConstants::IndirectCallThreshold);
    782   if (CA.analyzeCall(CS)) {
    783     // We were able to inline the indirect call! Subtract the cost from the
    784     // bonus we want to apply, but don't go below zero.
    785     Cost -= std::max(0, InlineConstants::IndirectCallThreshold - CA.getCost());
    786   }
    787 
    788   return Base::visitCallSite(CS);
    789 }
    790 
    791 bool CallAnalyzer::visitReturnInst(ReturnInst &RI) {
    792   // At least one return instruction will be free after inlining.
    793   bool Free = !HasReturn;
    794   HasReturn = true;
    795   return Free;
    796 }
    797 
    798 bool CallAnalyzer::visitBranchInst(BranchInst &BI) {
    799   // We model unconditional branches as essentially free -- they really
    800   // shouldn't exist at all, but handling them makes the behavior of the
    801   // inliner more regular and predictable. Interestingly, conditional branches
    802   // which will fold away are also free.
    803   return BI.isUnconditional() || isa<ConstantInt>(BI.getCondition()) ||
    804          dyn_cast_or_null<ConstantInt>(
    805              SimplifiedValues.lookup(BI.getCondition()));
    806 }
    807 
    808 bool CallAnalyzer::visitSwitchInst(SwitchInst &SI) {
    809   // We model unconditional switches as free, see the comments on handling
    810   // branches.
    811   if (isa<ConstantInt>(SI.getCondition()))
    812     return true;
    813   if (Value *V = SimplifiedValues.lookup(SI.getCondition()))
    814     if (isa<ConstantInt>(V))
    815       return true;
    816 
    817   // Otherwise, we need to accumulate a cost proportional to the number of
    818   // distinct successor blocks. This fan-out in the CFG cannot be represented
    819   // for free even if we can represent the core switch as a jumptable that
    820   // takes a single instruction.
    821   //
    822   // NB: We convert large switches which are just used to initialize large phi
    823   // nodes to lookup tables instead in simplify-cfg, so this shouldn't prevent
    824   // inlining those. It will prevent inlining in cases where the optimization
    825   // does not (yet) fire.
    826   SmallPtrSet<BasicBlock *, 8> SuccessorBlocks;
    827   SuccessorBlocks.insert(SI.getDefaultDest());
    828   for (auto I = SI.case_begin(), E = SI.case_end(); I != E; ++I)
    829     SuccessorBlocks.insert(I.getCaseSuccessor());
    830   // Add cost corresponding to the number of distinct destinations. The first
    831   // we model as free because of fallthrough.
    832   Cost += (SuccessorBlocks.size() - 1) * InlineConstants::InstrCost;
    833   return false;
    834 }
    835 
    836 bool CallAnalyzer::visitIndirectBrInst(IndirectBrInst &IBI) {
    837   // We never want to inline functions that contain an indirectbr.  This is
    838   // incorrect because all the blockaddress's (in static global initializers
    839   // for example) would be referring to the original function, and this
    840   // indirect jump would jump from the inlined copy of the function into the
    841   // original function which is extremely undefined behavior.
    842   // FIXME: This logic isn't really right; we can safely inline functions with
    843   // indirectbr's as long as no other function or global references the
    844   // blockaddress of a block within the current function.
    845   HasIndirectBr = true;
    846   return false;
    847 }
    848 
    849 bool CallAnalyzer::visitResumeInst(ResumeInst &RI) {
    850   // FIXME: It's not clear that a single instruction is an accurate model for
    851   // the inline cost of a resume instruction.
    852   return false;
    853 }
    854 
    855 bool CallAnalyzer::visitUnreachableInst(UnreachableInst &I) {
    856   // FIXME: It might be reasonably to discount the cost of instructions leading
    857   // to unreachable as they have the lowest possible impact on both runtime and
    858   // code size.
    859   return true; // No actual code is needed for unreachable.
    860 }
    861 
    862 bool CallAnalyzer::visitInstruction(Instruction &I) {
    863   // Some instructions are free. All of the free intrinsics can also be
    864   // handled by SROA, etc.
    865   if (TargetTransformInfo::TCC_Free == TTI.getUserCost(&I))
    866     return true;
    867 
    868   // We found something we don't understand or can't handle. Mark any SROA-able
    869   // values in the operand list as no longer viable.
    870   for (User::op_iterator OI = I.op_begin(), OE = I.op_end(); OI != OE; ++OI)
    871     disableSROA(*OI);
    872 
    873   return false;
    874 }
    875 
    876 
    877 /// \brief Analyze a basic block for its contribution to the inline cost.
    878 ///
    879 /// This method walks the analyzer over every instruction in the given basic
    880 /// block and accounts for their cost during inlining at this callsite. It
    881 /// aborts early if the threshold has been exceeded or an impossible to inline
    882 /// construct has been detected. It returns false if inlining is no longer
    883 /// viable, and true if inlining remains viable.
    884 bool CallAnalyzer::analyzeBlock(BasicBlock *BB) {
    885   for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
    886     // FIXME: Currently, the number of instructions in a function regardless of
    887     // our ability to simplify them during inline to constants or dead code,
    888     // are actually used by the vector bonus heuristic. As long as that's true,
    889     // we have to special case debug intrinsics here to prevent differences in
    890     // inlining due to debug symbols. Eventually, the number of unsimplified
    891     // instructions shouldn't factor into the cost computation, but until then,
    892     // hack around it here.
    893     if (isa<DbgInfoIntrinsic>(I))
    894       continue;
    895 
    896     ++NumInstructions;
    897     if (isa<ExtractElementInst>(I) || I->getType()->isVectorTy())
    898       ++NumVectorInstructions;
    899 
    900     // If the instruction simplified to a constant, there is no cost to this
    901     // instruction. Visit the instructions using our InstVisitor to account for
    902     // all of the per-instruction logic. The visit tree returns true if we
    903     // consumed the instruction in any way, and false if the instruction's base
    904     // cost should count against inlining.
    905     if (Base::visit(I))
    906       ++NumInstructionsSimplified;
    907     else
    908       Cost += InlineConstants::InstrCost;
    909 
    910     // If the visit this instruction detected an uninlinable pattern, abort.
    911     if (IsRecursiveCall || ExposesReturnsTwice || HasDynamicAlloca ||
    912         HasIndirectBr)
    913       return false;
    914 
    915     // If the caller is a recursive function then we don't want to inline
    916     // functions which allocate a lot of stack space because it would increase
    917     // the caller stack usage dramatically.
    918     if (IsCallerRecursive &&
    919         AllocatedSize > InlineConstants::TotalAllocaSizeRecursiveCaller)
    920       return false;
    921 
    922     if (NumVectorInstructions > NumInstructions/2)
    923       VectorBonus = FiftyPercentVectorBonus;
    924     else if (NumVectorInstructions > NumInstructions/10)
    925       VectorBonus = TenPercentVectorBonus;
    926     else
    927       VectorBonus = 0;
    928 
    929     // Check if we've past the threshold so we don't spin in huge basic
    930     // blocks that will never inline.
    931     if (Cost > (Threshold + VectorBonus))
    932       return false;
    933   }
    934 
    935   return true;
    936 }
    937 
    938 /// \brief Compute the base pointer and cumulative constant offsets for V.
    939 ///
    940 /// This strips all constant offsets off of V, leaving it the base pointer, and
    941 /// accumulates the total constant offset applied in the returned constant. It
    942 /// returns 0 if V is not a pointer, and returns the constant '0' if there are
    943 /// no constant offsets applied.
    944 ConstantInt *CallAnalyzer::stripAndComputeInBoundsConstantOffsets(Value *&V) {
    945   if (!DL || !V->getType()->isPointerTy())
    946     return nullptr;
    947 
    948   unsigned IntPtrWidth = DL->getPointerSizeInBits();
    949   APInt Offset = APInt::getNullValue(IntPtrWidth);
    950 
    951   // Even though we don't look through PHI nodes, we could be called on an
    952   // instruction in an unreachable block, which may be on a cycle.
    953   SmallPtrSet<Value *, 4> Visited;
    954   Visited.insert(V);
    955   do {
    956     if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
    957       if (!GEP->isInBounds() || !accumulateGEPOffset(*GEP, Offset))
    958         return nullptr;
    959       V = GEP->getPointerOperand();
    960     } else if (Operator::getOpcode(V) == Instruction::BitCast) {
    961       V = cast<Operator>(V)->getOperand(0);
    962     } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
    963       if (GA->mayBeOverridden())
    964         break;
    965       V = GA->getAliasee();
    966     } else {
    967       break;
    968     }
    969     assert(V->getType()->isPointerTy() && "Unexpected operand type!");
    970   } while (Visited.insert(V));
    971 
    972   Type *IntPtrTy = DL->getIntPtrType(V->getContext());
    973   return cast<ConstantInt>(ConstantInt::get(IntPtrTy, Offset));
    974 }
    975 
    976 /// \brief Analyze a call site for potential inlining.
    977 ///
    978 /// Returns true if inlining this call is viable, and false if it is not
    979 /// viable. It computes the cost and adjusts the threshold based on numerous
    980 /// factors and heuristics. If this method returns false but the computed cost
    981 /// is below the computed threshold, then inlining was forcibly disabled by
    982 /// some artifact of the routine.
    983 bool CallAnalyzer::analyzeCall(CallSite CS) {
    984   ++NumCallsAnalyzed;
    985 
    986   // Track whether the post-inlining function would have more than one basic
    987   // block. A single basic block is often intended for inlining. Balloon the
    988   // threshold by 50% until we pass the single-BB phase.
    989   bool SingleBB = true;
    990   int SingleBBBonus = Threshold / 2;
    991   Threshold += SingleBBBonus;
    992 
    993   // Perform some tweaks to the cost and threshold based on the direct
    994   // callsite information.
    995 
    996   // We want to more aggressively inline vector-dense kernels, so up the
    997   // threshold, and we'll lower it if the % of vector instructions gets too
    998   // low.
    999   assert(NumInstructions == 0);
   1000   assert(NumVectorInstructions == 0);
   1001   FiftyPercentVectorBonus = Threshold;
   1002   TenPercentVectorBonus = Threshold / 2;
   1003 
   1004   // Give out bonuses per argument, as the instructions setting them up will
   1005   // be gone after inlining.
   1006   for (unsigned I = 0, E = CS.arg_size(); I != E; ++I) {
   1007     if (DL && CS.isByValArgument(I)) {
   1008       // We approximate the number of loads and stores needed by dividing the
   1009       // size of the byval type by the target's pointer size.
   1010       PointerType *PTy = cast<PointerType>(CS.getArgument(I)->getType());
   1011       unsigned TypeSize = DL->getTypeSizeInBits(PTy->getElementType());
   1012       unsigned PointerSize = DL->getPointerSizeInBits();
   1013       // Ceiling division.
   1014       unsigned NumStores = (TypeSize + PointerSize - 1) / PointerSize;
   1015 
   1016       // If it generates more than 8 stores it is likely to be expanded as an
   1017       // inline memcpy so we take that as an upper bound. Otherwise we assume
   1018       // one load and one store per word copied.
   1019       // FIXME: The maxStoresPerMemcpy setting from the target should be used
   1020       // here instead of a magic number of 8, but it's not available via
   1021       // DataLayout.
   1022       NumStores = std::min(NumStores, 8U);
   1023 
   1024       Cost -= 2 * NumStores * InlineConstants::InstrCost;
   1025     } else {
   1026       // For non-byval arguments subtract off one instruction per call
   1027       // argument.
   1028       Cost -= InlineConstants::InstrCost;
   1029     }
   1030   }
   1031 
   1032   // If there is only one call of the function, and it has internal linkage,
   1033   // the cost of inlining it drops dramatically.
   1034   bool OnlyOneCallAndLocalLinkage = F.hasLocalLinkage() && F.hasOneUse() &&
   1035     &F == CS.getCalledFunction();
   1036   if (OnlyOneCallAndLocalLinkage)
   1037     Cost += InlineConstants::LastCallToStaticBonus;
   1038 
   1039   // If the instruction after the call, or if the normal destination of the
   1040   // invoke is an unreachable instruction, the function is noreturn. As such,
   1041   // there is little point in inlining this unless there is literally zero
   1042   // cost.
   1043   Instruction *Instr = CS.getInstruction();
   1044   if (InvokeInst *II = dyn_cast<InvokeInst>(Instr)) {
   1045     if (isa<UnreachableInst>(II->getNormalDest()->begin()))
   1046       Threshold = 1;
   1047   } else if (isa<UnreachableInst>(++BasicBlock::iterator(Instr)))
   1048     Threshold = 1;
   1049 
   1050   // If this function uses the coldcc calling convention, prefer not to inline
   1051   // it.
   1052   if (F.getCallingConv() == CallingConv::Cold)
   1053     Cost += InlineConstants::ColdccPenalty;
   1054 
   1055   // Check if we're done. This can happen due to bonuses and penalties.
   1056   if (Cost > Threshold)
   1057     return false;
   1058 
   1059   if (F.empty())
   1060     return true;
   1061 
   1062   Function *Caller = CS.getInstruction()->getParent()->getParent();
   1063   // Check if the caller function is recursive itself.
   1064   for (User *U : Caller->users()) {
   1065     CallSite Site(U);
   1066     if (!Site)
   1067       continue;
   1068     Instruction *I = Site.getInstruction();
   1069     if (I->getParent()->getParent() == Caller) {
   1070       IsCallerRecursive = true;
   1071       break;
   1072     }
   1073   }
   1074 
   1075   // Populate our simplified values by mapping from function arguments to call
   1076   // arguments with known important simplifications.
   1077   CallSite::arg_iterator CAI = CS.arg_begin();
   1078   for (Function::arg_iterator FAI = F.arg_begin(), FAE = F.arg_end();
   1079        FAI != FAE; ++FAI, ++CAI) {
   1080     assert(CAI != CS.arg_end());
   1081     if (Constant *C = dyn_cast<Constant>(CAI))
   1082       SimplifiedValues[FAI] = C;
   1083 
   1084     Value *PtrArg = *CAI;
   1085     if (ConstantInt *C = stripAndComputeInBoundsConstantOffsets(PtrArg)) {
   1086       ConstantOffsetPtrs[FAI] = std::make_pair(PtrArg, C->getValue());
   1087 
   1088       // We can SROA any pointer arguments derived from alloca instructions.
   1089       if (isa<AllocaInst>(PtrArg)) {
   1090         SROAArgValues[FAI] = PtrArg;
   1091         SROAArgCosts[PtrArg] = 0;
   1092       }
   1093     }
   1094   }
   1095   NumConstantArgs = SimplifiedValues.size();
   1096   NumConstantOffsetPtrArgs = ConstantOffsetPtrs.size();
   1097   NumAllocaArgs = SROAArgValues.size();
   1098 
   1099   // The worklist of live basic blocks in the callee *after* inlining. We avoid
   1100   // adding basic blocks of the callee which can be proven to be dead for this
   1101   // particular call site in order to get more accurate cost estimates. This
   1102   // requires a somewhat heavyweight iteration pattern: we need to walk the
   1103   // basic blocks in a breadth-first order as we insert live successors. To
   1104   // accomplish this, prioritizing for small iterations because we exit after
   1105   // crossing our threshold, we use a small-size optimized SetVector.
   1106   typedef SetVector<BasicBlock *, SmallVector<BasicBlock *, 16>,
   1107                                   SmallPtrSet<BasicBlock *, 16> > BBSetVector;
   1108   BBSetVector BBWorklist;
   1109   BBWorklist.insert(&F.getEntryBlock());
   1110   // Note that we *must not* cache the size, this loop grows the worklist.
   1111   for (unsigned Idx = 0; Idx != BBWorklist.size(); ++Idx) {
   1112     // Bail out the moment we cross the threshold. This means we'll under-count
   1113     // the cost, but only when undercounting doesn't matter.
   1114     if (Cost > (Threshold + VectorBonus))
   1115       break;
   1116 
   1117     BasicBlock *BB = BBWorklist[Idx];
   1118     if (BB->empty())
   1119       continue;
   1120 
   1121     // Disallow inlining a blockaddress. A blockaddress only has defined
   1122     // behavior for an indirect branch in the same function, and we do not
   1123     // currently support inlining indirect branches. But, the inliner may not
   1124     // see an indirect branch that ends up being dead code at a particular call
   1125     // site. If the blockaddress escapes the function, e.g., via a global
   1126     // variable, inlining may lead to an invalid cross-function reference.
   1127     if (BB->hasAddressTaken())
   1128       return false;
   1129 
   1130     // Analyze the cost of this block. If we blow through the threshold, this
   1131     // returns false, and we can bail on out.
   1132     if (!analyzeBlock(BB)) {
   1133       if (IsRecursiveCall || ExposesReturnsTwice || HasDynamicAlloca ||
   1134           HasIndirectBr)
   1135         return false;
   1136 
   1137       // If the caller is a recursive function then we don't want to inline
   1138       // functions which allocate a lot of stack space because it would increase
   1139       // the caller stack usage dramatically.
   1140       if (IsCallerRecursive &&
   1141           AllocatedSize > InlineConstants::TotalAllocaSizeRecursiveCaller)
   1142         return false;
   1143 
   1144       break;
   1145     }
   1146 
   1147     TerminatorInst *TI = BB->getTerminator();
   1148 
   1149     // Add in the live successors by first checking whether we have terminator
   1150     // that may be simplified based on the values simplified by this call.
   1151     if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
   1152       if (BI->isConditional()) {
   1153         Value *Cond = BI->getCondition();
   1154         if (ConstantInt *SimpleCond
   1155               = dyn_cast_or_null<ConstantInt>(SimplifiedValues.lookup(Cond))) {
   1156           BBWorklist.insert(BI->getSuccessor(SimpleCond->isZero() ? 1 : 0));
   1157           continue;
   1158         }
   1159       }
   1160     } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
   1161       Value *Cond = SI->getCondition();
   1162       if (ConstantInt *SimpleCond
   1163             = dyn_cast_or_null<ConstantInt>(SimplifiedValues.lookup(Cond))) {
   1164         BBWorklist.insert(SI->findCaseValue(SimpleCond).getCaseSuccessor());
   1165         continue;
   1166       }
   1167     }
   1168 
   1169     // If we're unable to select a particular successor, just count all of
   1170     // them.
   1171     for (unsigned TIdx = 0, TSize = TI->getNumSuccessors(); TIdx != TSize;
   1172          ++TIdx)
   1173       BBWorklist.insert(TI->getSuccessor(TIdx));
   1174 
   1175     // If we had any successors at this point, than post-inlining is likely to
   1176     // have them as well. Note that we assume any basic blocks which existed
   1177     // due to branches or switches which folded above will also fold after
   1178     // inlining.
   1179     if (SingleBB && TI->getNumSuccessors() > 1) {
   1180       // Take off the bonus we applied to the threshold.
   1181       Threshold -= SingleBBBonus;
   1182       SingleBB = false;
   1183     }
   1184   }
   1185 
   1186   // If this is a noduplicate call, we can still inline as long as
   1187   // inlining this would cause the removal of the caller (so the instruction
   1188   // is not actually duplicated, just moved).
   1189   if (!OnlyOneCallAndLocalLinkage && ContainsNoDuplicateCall)
   1190     return false;
   1191 
   1192   Threshold += VectorBonus;
   1193 
   1194   return Cost < Threshold;
   1195 }
   1196 
   1197 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
   1198 /// \brief Dump stats about this call's analysis.
   1199 void CallAnalyzer::dump() {
   1200 #define DEBUG_PRINT_STAT(x) dbgs() << "      " #x ": " << x << "\n"
   1201   DEBUG_PRINT_STAT(NumConstantArgs);
   1202   DEBUG_PRINT_STAT(NumConstantOffsetPtrArgs);
   1203   DEBUG_PRINT_STAT(NumAllocaArgs);
   1204   DEBUG_PRINT_STAT(NumConstantPtrCmps);
   1205   DEBUG_PRINT_STAT(NumConstantPtrDiffs);
   1206   DEBUG_PRINT_STAT(NumInstructionsSimplified);
   1207   DEBUG_PRINT_STAT(SROACostSavings);
   1208   DEBUG_PRINT_STAT(SROACostSavingsLost);
   1209   DEBUG_PRINT_STAT(ContainsNoDuplicateCall);
   1210   DEBUG_PRINT_STAT(Cost);
   1211   DEBUG_PRINT_STAT(Threshold);
   1212   DEBUG_PRINT_STAT(VectorBonus);
   1213 #undef DEBUG_PRINT_STAT
   1214 }
   1215 #endif
   1216 
   1217 INITIALIZE_PASS_BEGIN(InlineCostAnalysis, "inline-cost", "Inline Cost Analysis",
   1218                       true, true)
   1219 INITIALIZE_AG_DEPENDENCY(TargetTransformInfo)
   1220 INITIALIZE_PASS_END(InlineCostAnalysis, "inline-cost", "Inline Cost Analysis",
   1221                     true, true)
   1222 
   1223 char InlineCostAnalysis::ID = 0;
   1224 
   1225 InlineCostAnalysis::InlineCostAnalysis() : CallGraphSCCPass(ID) {}
   1226 
   1227 InlineCostAnalysis::~InlineCostAnalysis() {}
   1228 
   1229 void InlineCostAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
   1230   AU.setPreservesAll();
   1231   AU.addRequired<TargetTransformInfo>();
   1232   CallGraphSCCPass::getAnalysisUsage(AU);
   1233 }
   1234 
   1235 bool InlineCostAnalysis::runOnSCC(CallGraphSCC &SCC) {
   1236   TTI = &getAnalysis<TargetTransformInfo>();
   1237   return false;
   1238 }
   1239 
   1240 InlineCost InlineCostAnalysis::getInlineCost(CallSite CS, int Threshold) {
   1241   return getInlineCost(CS, CS.getCalledFunction(), Threshold);
   1242 }
   1243 
   1244 /// \brief Test that two functions either have or have not the given attribute
   1245 ///        at the same time.
   1246 static bool attributeMatches(Function *F1, Function *F2,
   1247                              Attribute::AttrKind Attr) {
   1248   return F1->hasFnAttribute(Attr) == F2->hasFnAttribute(Attr);
   1249 }
   1250 
   1251 /// \brief Test that there are no attribute conflicts between Caller and Callee
   1252 ///        that prevent inlining.
   1253 static bool functionsHaveCompatibleAttributes(Function *Caller,
   1254                                               Function *Callee) {
   1255   return attributeMatches(Caller, Callee, Attribute::SanitizeAddress) &&
   1256          attributeMatches(Caller, Callee, Attribute::SanitizeMemory) &&
   1257          attributeMatches(Caller, Callee, Attribute::SanitizeThread);
   1258 }
   1259 
   1260 InlineCost InlineCostAnalysis::getInlineCost(CallSite CS, Function *Callee,
   1261                                              int Threshold) {
   1262   // Cannot inline indirect calls.
   1263   if (!Callee)
   1264     return llvm::InlineCost::getNever();
   1265 
   1266   // Calls to functions with always-inline attributes should be inlined
   1267   // whenever possible.
   1268   if (CS.hasFnAttr(Attribute::AlwaysInline)) {
   1269     if (isInlineViable(*Callee))
   1270       return llvm::InlineCost::getAlways();
   1271     return llvm::InlineCost::getNever();
   1272   }
   1273 
   1274   // Never inline functions with conflicting attributes (unless callee has
   1275   // always-inline attribute).
   1276   if (!functionsHaveCompatibleAttributes(CS.getCaller(), Callee))
   1277     return llvm::InlineCost::getNever();
   1278 
   1279   // Don't inline this call if the caller has the optnone attribute.
   1280   if (CS.getCaller()->hasFnAttribute(Attribute::OptimizeNone))
   1281     return llvm::InlineCost::getNever();
   1282 
   1283   // Don't inline functions which can be redefined at link-time to mean
   1284   // something else.  Don't inline functions marked noinline or call sites
   1285   // marked noinline.
   1286   if (Callee->mayBeOverridden() ||
   1287       Callee->hasFnAttribute(Attribute::NoInline) || CS.isNoInline())
   1288     return llvm::InlineCost::getNever();
   1289 
   1290   DEBUG(llvm::dbgs() << "      Analyzing call of " << Callee->getName()
   1291         << "...\n");
   1292 
   1293   CallAnalyzer CA(Callee->getDataLayout(), *TTI, *Callee, Threshold);
   1294   bool ShouldInline = CA.analyzeCall(CS);
   1295 
   1296   DEBUG(CA.dump());
   1297 
   1298   // Check if there was a reason to force inlining or no inlining.
   1299   if (!ShouldInline && CA.getCost() < CA.getThreshold())
   1300     return InlineCost::getNever();
   1301   if (ShouldInline && CA.getCost() >= CA.getThreshold())
   1302     return InlineCost::getAlways();
   1303 
   1304   return llvm::InlineCost::get(CA.getCost(), CA.getThreshold());
   1305 }
   1306 
   1307 bool InlineCostAnalysis::isInlineViable(Function &F) {
   1308   bool ReturnsTwice =
   1309     F.getAttributes().hasAttribute(AttributeSet::FunctionIndex,
   1310                                    Attribute::ReturnsTwice);
   1311   for (Function::iterator BI = F.begin(), BE = F.end(); BI != BE; ++BI) {
   1312     // Disallow inlining of functions which contain indirect branches or
   1313     // blockaddresses.
   1314     if (isa<IndirectBrInst>(BI->getTerminator()) || BI->hasAddressTaken())
   1315       return false;
   1316 
   1317     for (BasicBlock::iterator II = BI->begin(), IE = BI->end(); II != IE;
   1318          ++II) {
   1319       CallSite CS(II);
   1320       if (!CS)
   1321         continue;
   1322 
   1323       // Disallow recursive calls.
   1324       if (&F == CS.getCalledFunction())
   1325         return false;
   1326 
   1327       // Disallow calls which expose returns-twice to a function not previously
   1328       // attributed as such.
   1329       if (!ReturnsTwice && CS.isCall() &&
   1330           cast<CallInst>(CS.getInstruction())->canReturnTwice())
   1331         return false;
   1332     }
   1333   }
   1334 
   1335   return true;
   1336 }
   1337