Home | History | Annotate | Download | only in Analysis
      1 //===- InlineCost.cpp - Cost analysis for inliner -------------------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file implements inline cost analysis.
     11 //
     12 //===----------------------------------------------------------------------===//
     13 
     14 #define DEBUG_TYPE "inline-cost"
     15 #include "llvm/Analysis/InlineCost.h"
     16 #include "llvm/Analysis/ConstantFolding.h"
     17 #include "llvm/Analysis/InstructionSimplify.h"
     18 #include "llvm/Support/CallSite.h"
     19 #include "llvm/Support/Debug.h"
     20 #include "llvm/Support/InstVisitor.h"
     21 #include "llvm/Support/GetElementPtrTypeIterator.h"
     22 #include "llvm/Support/raw_ostream.h"
     23 #include "llvm/CallingConv.h"
     24 #include "llvm/IntrinsicInst.h"
     25 #include "llvm/Operator.h"
     26 #include "llvm/GlobalAlias.h"
     27 #include "llvm/Target/TargetData.h"
     28 #include "llvm/ADT/STLExtras.h"
     29 #include "llvm/ADT/SetVector.h"
     30 #include "llvm/ADT/SmallVector.h"
     31 #include "llvm/ADT/SmallPtrSet.h"
     32 #include "llvm/ADT/Statistic.h"
     33 
     34 using namespace llvm;
     35 
     36 STATISTIC(NumCallsAnalyzed, "Number of call sites analyzed");
     37 
     38 namespace {
     39 
     40 class CallAnalyzer : public InstVisitor<CallAnalyzer, bool> {
     41   typedef InstVisitor<CallAnalyzer, bool> Base;
     42   friend class InstVisitor<CallAnalyzer, bool>;
     43 
     44   // TargetData if available, or null.
     45   const TargetData *const TD;
     46 
     47   // The called function.
     48   Function &F;
     49 
     50   int Threshold;
     51   int Cost;
     52   const bool AlwaysInline;
     53 
     54   bool IsRecursive;
     55   bool ExposesReturnsTwice;
     56   bool HasDynamicAlloca;
     57   unsigned NumInstructions, NumVectorInstructions;
     58   int FiftyPercentVectorBonus, TenPercentVectorBonus;
     59   int VectorBonus;
     60 
     61   // While we walk the potentially-inlined instructions, we build up and
     62   // maintain a mapping of simplified values specific to this callsite. The
     63   // idea is to propagate any special information we have about arguments to
     64   // this call through the inlinable section of the function, and account for
     65   // likely simplifications post-inlining. The most important aspect we track
     66   // is CFG altering simplifications -- when we prove a basic block dead, that
     67   // can cause dramatic shifts in the cost of inlining a function.
     68   DenseMap<Value *, Constant *> SimplifiedValues;
     69 
     70   // Keep track of the values which map back (through function arguments) to
     71   // allocas on the caller stack which could be simplified through SROA.
     72   DenseMap<Value *, Value *> SROAArgValues;
     73 
     74   // The mapping of caller Alloca values to their accumulated cost savings. If
     75   // we have to disable SROA for one of the allocas, this tells us how much
     76   // cost must be added.
     77   DenseMap<Value *, int> SROAArgCosts;
     78 
     79   // Keep track of values which map to a pointer base and constant offset.
     80   DenseMap<Value *, std::pair<Value *, APInt> > ConstantOffsetPtrs;
     81 
     82   // Custom simplification helper routines.
     83   bool isAllocaDerivedArg(Value *V);
     84   bool lookupSROAArgAndCost(Value *V, Value *&Arg,
     85                             DenseMap<Value *, int>::iterator &CostIt);
     86   void disableSROA(DenseMap<Value *, int>::iterator CostIt);
     87   void disableSROA(Value *V);
     88   void accumulateSROACost(DenseMap<Value *, int>::iterator CostIt,
     89                           int InstructionCost);
     90   bool handleSROACandidate(bool IsSROAValid,
     91                            DenseMap<Value *, int>::iterator CostIt,
     92                            int InstructionCost);
     93   bool isGEPOffsetConstant(GetElementPtrInst &GEP);
     94   bool accumulateGEPOffset(GEPOperator &GEP, APInt &Offset);
     95   ConstantInt *stripAndComputeInBoundsConstantOffsets(Value *&V);
     96 
     97   // Custom analysis routines.
     98   bool analyzeBlock(BasicBlock *BB);
     99 
    100   // Disable several entry points to the visitor so we don't accidentally use
    101   // them by declaring but not defining them here.
    102   void visit(Module *);     void visit(Module &);
    103   void visit(Function *);   void visit(Function &);
    104   void visit(BasicBlock *); void visit(BasicBlock &);
    105 
    106   // Provide base case for our instruction visit.
    107   bool visitInstruction(Instruction &I);
    108 
    109   // Our visit overrides.
    110   bool visitAlloca(AllocaInst &I);
    111   bool visitPHI(PHINode &I);
    112   bool visitGetElementPtr(GetElementPtrInst &I);
    113   bool visitBitCast(BitCastInst &I);
    114   bool visitPtrToInt(PtrToIntInst &I);
    115   bool visitIntToPtr(IntToPtrInst &I);
    116   bool visitCastInst(CastInst &I);
    117   bool visitUnaryInstruction(UnaryInstruction &I);
    118   bool visitICmp(ICmpInst &I);
    119   bool visitSub(BinaryOperator &I);
    120   bool visitBinaryOperator(BinaryOperator &I);
    121   bool visitLoad(LoadInst &I);
    122   bool visitStore(StoreInst &I);
    123   bool visitCallSite(CallSite CS);
    124 
    125 public:
    126   CallAnalyzer(const TargetData *TD, Function &Callee, int Threshold)
    127     : TD(TD), F(Callee), Threshold(Threshold), Cost(0),
    128       AlwaysInline(F.hasFnAttr(Attribute::AlwaysInline)),
    129       IsRecursive(false), ExposesReturnsTwice(false), HasDynamicAlloca(false),
    130       NumInstructions(0), NumVectorInstructions(0),
    131       FiftyPercentVectorBonus(0), TenPercentVectorBonus(0), VectorBonus(0),
    132       NumConstantArgs(0), NumConstantOffsetPtrArgs(0), NumAllocaArgs(0),
    133       NumConstantPtrCmps(0), NumConstantPtrDiffs(0),
    134       NumInstructionsSimplified(0), SROACostSavings(0), SROACostSavingsLost(0) {
    135   }
    136 
    137   bool analyzeCall(CallSite CS);
    138 
    139   int getThreshold() { return Threshold; }
    140   int getCost() { return Cost; }
    141 
    142   // Keep a bunch of stats about the cost savings found so we can print them
    143   // out when debugging.
    144   unsigned NumConstantArgs;
    145   unsigned NumConstantOffsetPtrArgs;
    146   unsigned NumAllocaArgs;
    147   unsigned NumConstantPtrCmps;
    148   unsigned NumConstantPtrDiffs;
    149   unsigned NumInstructionsSimplified;
    150   unsigned SROACostSavings;
    151   unsigned SROACostSavingsLost;
    152 
    153   void dump();
    154 };
    155 
    156 } // namespace
    157 
    158 /// \brief Test whether the given value is an Alloca-derived function argument.
    159 bool CallAnalyzer::isAllocaDerivedArg(Value *V) {
    160   return SROAArgValues.count(V);
    161 }
    162 
    163 /// \brief Lookup the SROA-candidate argument and cost iterator which V maps to.
    164 /// Returns false if V does not map to a SROA-candidate.
    165 bool CallAnalyzer::lookupSROAArgAndCost(
    166     Value *V, Value *&Arg, DenseMap<Value *, int>::iterator &CostIt) {
    167   if (SROAArgValues.empty() || SROAArgCosts.empty())
    168     return false;
    169 
    170   DenseMap<Value *, Value *>::iterator ArgIt = SROAArgValues.find(V);
    171   if (ArgIt == SROAArgValues.end())
    172     return false;
    173 
    174   Arg = ArgIt->second;
    175   CostIt = SROAArgCosts.find(Arg);
    176   return CostIt != SROAArgCosts.end();
    177 }
    178 
    179 /// \brief Disable SROA for the candidate marked by this cost iterator.
    180 ///
    181 /// This markes the candidate as no longer viable for SROA, and adds the cost
    182 /// savings associated with it back into the inline cost measurement.
    183 void CallAnalyzer::disableSROA(DenseMap<Value *, int>::iterator CostIt) {
    184   // If we're no longer able to perform SROA we need to undo its cost savings
    185   // and prevent subsequent analysis.
    186   Cost += CostIt->second;
    187   SROACostSavings -= CostIt->second;
    188   SROACostSavingsLost += CostIt->second;
    189   SROAArgCosts.erase(CostIt);
    190 }
    191 
    192 /// \brief If 'V' maps to a SROA candidate, disable SROA for it.
    193 void CallAnalyzer::disableSROA(Value *V) {
    194   Value *SROAArg;
    195   DenseMap<Value *, int>::iterator CostIt;
    196   if (lookupSROAArgAndCost(V, SROAArg, CostIt))
    197     disableSROA(CostIt);
    198 }
    199 
    200 /// \brief Accumulate the given cost for a particular SROA candidate.
    201 void CallAnalyzer::accumulateSROACost(DenseMap<Value *, int>::iterator CostIt,
    202                                       int InstructionCost) {
    203   CostIt->second += InstructionCost;
    204   SROACostSavings += InstructionCost;
    205 }
    206 
    207 /// \brief Helper for the common pattern of handling a SROA candidate.
    208 /// Either accumulates the cost savings if the SROA remains valid, or disables
    209 /// SROA for the candidate.
    210 bool CallAnalyzer::handleSROACandidate(bool IsSROAValid,
    211                                        DenseMap<Value *, int>::iterator CostIt,
    212                                        int InstructionCost) {
    213   if (IsSROAValid) {
    214     accumulateSROACost(CostIt, InstructionCost);
    215     return true;
    216   }
    217 
    218   disableSROA(CostIt);
    219   return false;
    220 }
    221 
    222 /// \brief Check whether a GEP's indices are all constant.
    223 ///
    224 /// Respects any simplified values known during the analysis of this callsite.
    225 bool CallAnalyzer::isGEPOffsetConstant(GetElementPtrInst &GEP) {
    226   for (User::op_iterator I = GEP.idx_begin(), E = GEP.idx_end(); I != E; ++I)
    227     if (!isa<Constant>(*I) && !SimplifiedValues.lookup(*I))
    228       return false;
    229 
    230   return true;
    231 }
    232 
    233 /// \brief Accumulate a constant GEP offset into an APInt if possible.
    234 ///
    235 /// Returns false if unable to compute the offset for any reason. Respects any
    236 /// simplified values known during the analysis of this callsite.
    237 bool CallAnalyzer::accumulateGEPOffset(GEPOperator &GEP, APInt &Offset) {
    238   if (!TD)
    239     return false;
    240 
    241   unsigned IntPtrWidth = TD->getPointerSizeInBits();
    242   assert(IntPtrWidth == Offset.getBitWidth());
    243 
    244   for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
    245        GTI != GTE; ++GTI) {
    246     ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand());
    247     if (!OpC)
    248       if (Constant *SimpleOp = SimplifiedValues.lookup(GTI.getOperand()))
    249         OpC = dyn_cast<ConstantInt>(SimpleOp);
    250     if (!OpC)
    251       return false;
    252     if (OpC->isZero()) continue;
    253 
    254     // Handle a struct index, which adds its field offset to the pointer.
    255     if (StructType *STy = dyn_cast<StructType>(*GTI)) {
    256       unsigned ElementIdx = OpC->getZExtValue();
    257       const StructLayout *SL = TD->getStructLayout(STy);
    258       Offset += APInt(IntPtrWidth, SL->getElementOffset(ElementIdx));
    259       continue;
    260     }
    261 
    262     APInt TypeSize(IntPtrWidth, TD->getTypeAllocSize(GTI.getIndexedType()));
    263     Offset += OpC->getValue().sextOrTrunc(IntPtrWidth) * TypeSize;
    264   }
    265   return true;
    266 }
    267 
    268 bool CallAnalyzer::visitAlloca(AllocaInst &I) {
    269   // FIXME: Check whether inlining will turn a dynamic alloca into a static
    270   // alloca, and handle that case.
    271 
    272   // We will happily inline static alloca instructions or dynamic alloca
    273   // instructions in always-inline situations.
    274   if (AlwaysInline || I.isStaticAlloca())
    275     return Base::visitAlloca(I);
    276 
    277   // FIXME: This is overly conservative. Dynamic allocas are inefficient for
    278   // a variety of reasons, and so we would like to not inline them into
    279   // functions which don't currently have a dynamic alloca. This simply
    280   // disables inlining altogether in the presence of a dynamic alloca.
    281   HasDynamicAlloca = true;
    282   return false;
    283 }
    284 
    285 bool CallAnalyzer::visitPHI(PHINode &I) {
    286   // FIXME: We should potentially be tracking values through phi nodes,
    287   // especially when they collapse to a single value due to deleted CFG edges
    288   // during inlining.
    289 
    290   // FIXME: We need to propagate SROA *disabling* through phi nodes, even
    291   // though we don't want to propagate it's bonuses. The idea is to disable
    292   // SROA if it *might* be used in an inappropriate manner.
    293 
    294   // Phi nodes are always zero-cost.
    295   return true;
    296 }
    297 
    298 bool CallAnalyzer::visitGetElementPtr(GetElementPtrInst &I) {
    299   Value *SROAArg;
    300   DenseMap<Value *, int>::iterator CostIt;
    301   bool SROACandidate = lookupSROAArgAndCost(I.getPointerOperand(),
    302                                             SROAArg, CostIt);
    303 
    304   // Try to fold GEPs of constant-offset call site argument pointers. This
    305   // requires target data and inbounds GEPs.
    306   if (TD && I.isInBounds()) {
    307     // Check if we have a base + offset for the pointer.
    308     Value *Ptr = I.getPointerOperand();
    309     std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Ptr);
    310     if (BaseAndOffset.first) {
    311       // Check if the offset of this GEP is constant, and if so accumulate it
    312       // into Offset.
    313       if (!accumulateGEPOffset(cast<GEPOperator>(I), BaseAndOffset.second)) {
    314         // Non-constant GEPs aren't folded, and disable SROA.
    315         if (SROACandidate)
    316           disableSROA(CostIt);
    317         return false;
    318       }
    319 
    320       // Add the result as a new mapping to Base + Offset.
    321       ConstantOffsetPtrs[&I] = BaseAndOffset;
    322 
    323       // Also handle SROA candidates here, we already know that the GEP is
    324       // all-constant indexed.
    325       if (SROACandidate)
    326         SROAArgValues[&I] = SROAArg;
    327 
    328       return true;
    329     }
    330   }
    331 
    332   if (isGEPOffsetConstant(I)) {
    333     if (SROACandidate)
    334       SROAArgValues[&I] = SROAArg;
    335 
    336     // Constant GEPs are modeled as free.
    337     return true;
    338   }
    339 
    340   // Variable GEPs will require math and will disable SROA.
    341   if (SROACandidate)
    342     disableSROA(CostIt);
    343   return false;
    344 }
    345 
    346 bool CallAnalyzer::visitBitCast(BitCastInst &I) {
    347   // Propagate constants through bitcasts.
    348   if (Constant *COp = dyn_cast<Constant>(I.getOperand(0)))
    349     if (Constant *C = ConstantExpr::getBitCast(COp, I.getType())) {
    350       SimplifiedValues[&I] = C;
    351       return true;
    352     }
    353 
    354   // Track base/offsets through casts
    355   std::pair<Value *, APInt> BaseAndOffset
    356     = ConstantOffsetPtrs.lookup(I.getOperand(0));
    357   // Casts don't change the offset, just wrap it up.
    358   if (BaseAndOffset.first)
    359     ConstantOffsetPtrs[&I] = BaseAndOffset;
    360 
    361   // Also look for SROA candidates here.
    362   Value *SROAArg;
    363   DenseMap<Value *, int>::iterator CostIt;
    364   if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt))
    365     SROAArgValues[&I] = SROAArg;
    366 
    367   // Bitcasts are always zero cost.
    368   return true;
    369 }
    370 
    371 bool CallAnalyzer::visitPtrToInt(PtrToIntInst &I) {
    372   // Propagate constants through ptrtoint.
    373   if (Constant *COp = dyn_cast<Constant>(I.getOperand(0)))
    374     if (Constant *C = ConstantExpr::getPtrToInt(COp, I.getType())) {
    375       SimplifiedValues[&I] = C;
    376       return true;
    377     }
    378 
    379   // Track base/offset pairs when converted to a plain integer provided the
    380   // integer is large enough to represent the pointer.
    381   unsigned IntegerSize = I.getType()->getScalarSizeInBits();
    382   if (TD && IntegerSize >= TD->getPointerSizeInBits()) {
    383     std::pair<Value *, APInt> BaseAndOffset
    384       = ConstantOffsetPtrs.lookup(I.getOperand(0));
    385     if (BaseAndOffset.first)
    386       ConstantOffsetPtrs[&I] = BaseAndOffset;
    387   }
    388 
    389   // This is really weird. Technically, ptrtoint will disable SROA. However,
    390   // unless that ptrtoint is *used* somewhere in the live basic blocks after
    391   // inlining, it will be nuked, and SROA should proceed. All of the uses which
    392   // would block SROA would also block SROA if applied directly to a pointer,
    393   // and so we can just add the integer in here. The only places where SROA is
    394   // preserved either cannot fire on an integer, or won't in-and-of themselves
    395   // disable SROA (ext) w/o some later use that we would see and disable.
    396   Value *SROAArg;
    397   DenseMap<Value *, int>::iterator CostIt;
    398   if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt))
    399     SROAArgValues[&I] = SROAArg;
    400 
    401   // A ptrtoint cast is free so long as the result is large enough to store the
    402   // pointer, and a legal integer type.
    403   return TD && TD->isLegalInteger(IntegerSize) &&
    404          IntegerSize >= TD->getPointerSizeInBits();
    405 }
    406 
    407 bool CallAnalyzer::visitIntToPtr(IntToPtrInst &I) {
    408   // Propagate constants through ptrtoint.
    409   if (Constant *COp = dyn_cast<Constant>(I.getOperand(0)))
    410     if (Constant *C = ConstantExpr::getIntToPtr(COp, I.getType())) {
    411       SimplifiedValues[&I] = C;
    412       return true;
    413     }
    414 
    415   // Track base/offset pairs when round-tripped through a pointer without
    416   // modifications provided the integer is not too large.
    417   Value *Op = I.getOperand(0);
    418   unsigned IntegerSize = Op->getType()->getScalarSizeInBits();
    419   if (TD && IntegerSize <= TD->getPointerSizeInBits()) {
    420     std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Op);
    421     if (BaseAndOffset.first)
    422       ConstantOffsetPtrs[&I] = BaseAndOffset;
    423   }
    424 
    425   // "Propagate" SROA here in the same manner as we do for ptrtoint above.
    426   Value *SROAArg;
    427   DenseMap<Value *, int>::iterator CostIt;
    428   if (lookupSROAArgAndCost(Op, SROAArg, CostIt))
    429     SROAArgValues[&I] = SROAArg;
    430 
    431   // An inttoptr cast is free so long as the input is a legal integer type
    432   // which doesn't contain values outside the range of a pointer.
    433   return TD && TD->isLegalInteger(IntegerSize) &&
    434          IntegerSize <= TD->getPointerSizeInBits();
    435 }
    436 
    437 bool CallAnalyzer::visitCastInst(CastInst &I) {
    438   // Propagate constants through ptrtoint.
    439   if (Constant *COp = dyn_cast<Constant>(I.getOperand(0)))
    440     if (Constant *C = ConstantExpr::getCast(I.getOpcode(), COp, I.getType())) {
    441       SimplifiedValues[&I] = C;
    442       return true;
    443     }
    444 
    445   // Disable SROA in the face of arbitrary casts we don't whitelist elsewhere.
    446   disableSROA(I.getOperand(0));
    447 
    448   // No-op casts don't have any cost.
    449   if (I.isLosslessCast())
    450     return true;
    451 
    452   // trunc to a native type is free (assuming the target has compare and
    453   // shift-right of the same width).
    454   if (TD && isa<TruncInst>(I) &&
    455       TD->isLegalInteger(TD->getTypeSizeInBits(I.getType())))
    456     return true;
    457 
    458   // Result of a cmp instruction is often extended (to be used by other
    459   // cmp instructions, logical or return instructions). These are usually
    460   // no-ops on most sane targets.
    461   if (isa<CmpInst>(I.getOperand(0)))
    462     return true;
    463 
    464   // Assume the rest of the casts require work.
    465   return false;
    466 }
    467 
    468 bool CallAnalyzer::visitUnaryInstruction(UnaryInstruction &I) {
    469   Value *Operand = I.getOperand(0);
    470   Constant *Ops[1] = { dyn_cast<Constant>(Operand) };
    471   if (Ops[0] || (Ops[0] = SimplifiedValues.lookup(Operand)))
    472     if (Constant *C = ConstantFoldInstOperands(I.getOpcode(), I.getType(),
    473                                                Ops, TD)) {
    474       SimplifiedValues[&I] = C;
    475       return true;
    476     }
    477 
    478   // Disable any SROA on the argument to arbitrary unary operators.
    479   disableSROA(Operand);
    480 
    481   return false;
    482 }
    483 
    484 bool CallAnalyzer::visitICmp(ICmpInst &I) {
    485   Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
    486   // First try to handle simplified comparisons.
    487   if (!isa<Constant>(LHS))
    488     if (Constant *SimpleLHS = SimplifiedValues.lookup(LHS))
    489       LHS = SimpleLHS;
    490   if (!isa<Constant>(RHS))
    491     if (Constant *SimpleRHS = SimplifiedValues.lookup(RHS))
    492       RHS = SimpleRHS;
    493   if (Constant *CLHS = dyn_cast<Constant>(LHS))
    494     if (Constant *CRHS = dyn_cast<Constant>(RHS))
    495       if (Constant *C = ConstantExpr::getICmp(I.getPredicate(), CLHS, CRHS)) {
    496         SimplifiedValues[&I] = C;
    497         return true;
    498       }
    499 
    500   // Otherwise look for a comparison between constant offset pointers with
    501   // a common base.
    502   Value *LHSBase, *RHSBase;
    503   APInt LHSOffset, RHSOffset;
    504   llvm::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS);
    505   if (LHSBase) {
    506     llvm::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS);
    507     if (RHSBase && LHSBase == RHSBase) {
    508       // We have common bases, fold the icmp to a constant based on the
    509       // offsets.
    510       Constant *CLHS = ConstantInt::get(LHS->getContext(), LHSOffset);
    511       Constant *CRHS = ConstantInt::get(RHS->getContext(), RHSOffset);
    512       if (Constant *C = ConstantExpr::getICmp(I.getPredicate(), CLHS, CRHS)) {
    513         SimplifiedValues[&I] = C;
    514         ++NumConstantPtrCmps;
    515         return true;
    516       }
    517     }
    518   }
    519 
    520   // If the comparison is an equality comparison with null, we can simplify it
    521   // for any alloca-derived argument.
    522   if (I.isEquality() && isa<ConstantPointerNull>(I.getOperand(1)))
    523     if (isAllocaDerivedArg(I.getOperand(0))) {
    524       // We can actually predict the result of comparisons between an
    525       // alloca-derived value and null. Note that this fires regardless of
    526       // SROA firing.
    527       bool IsNotEqual = I.getPredicate() == CmpInst::ICMP_NE;
    528       SimplifiedValues[&I] = IsNotEqual ? ConstantInt::getTrue(I.getType())
    529                                         : ConstantInt::getFalse(I.getType());
    530       return true;
    531     }
    532 
    533   // Finally check for SROA candidates in comparisons.
    534   Value *SROAArg;
    535   DenseMap<Value *, int>::iterator CostIt;
    536   if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt)) {
    537     if (isa<ConstantPointerNull>(I.getOperand(1))) {
    538       accumulateSROACost(CostIt, InlineConstants::InstrCost);
    539       return true;
    540     }
    541 
    542     disableSROA(CostIt);
    543   }
    544 
    545   return false;
    546 }
    547 
    548 bool CallAnalyzer::visitSub(BinaryOperator &I) {
    549   // Try to handle a special case: we can fold computing the difference of two
    550   // constant-related pointers.
    551   Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
    552   Value *LHSBase, *RHSBase;
    553   APInt LHSOffset, RHSOffset;
    554   llvm::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS);
    555   if (LHSBase) {
    556     llvm::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS);
    557     if (RHSBase && LHSBase == RHSBase) {
    558       // We have common bases, fold the subtract to a constant based on the
    559       // offsets.
    560       Constant *CLHS = ConstantInt::get(LHS->getContext(), LHSOffset);
    561       Constant *CRHS = ConstantInt::get(RHS->getContext(), RHSOffset);
    562       if (Constant *C = ConstantExpr::getSub(CLHS, CRHS)) {
    563         SimplifiedValues[&I] = C;
    564         ++NumConstantPtrDiffs;
    565         return true;
    566       }
    567     }
    568   }
    569 
    570   // Otherwise, fall back to the generic logic for simplifying and handling
    571   // instructions.
    572   return Base::visitSub(I);
    573 }
    574 
    575 bool CallAnalyzer::visitBinaryOperator(BinaryOperator &I) {
    576   Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
    577   if (!isa<Constant>(LHS))
    578     if (Constant *SimpleLHS = SimplifiedValues.lookup(LHS))
    579       LHS = SimpleLHS;
    580   if (!isa<Constant>(RHS))
    581     if (Constant *SimpleRHS = SimplifiedValues.lookup(RHS))
    582       RHS = SimpleRHS;
    583   Value *SimpleV = SimplifyBinOp(I.getOpcode(), LHS, RHS, TD);
    584   if (Constant *C = dyn_cast_or_null<Constant>(SimpleV)) {
    585     SimplifiedValues[&I] = C;
    586     return true;
    587   }
    588 
    589   // Disable any SROA on arguments to arbitrary, unsimplified binary operators.
    590   disableSROA(LHS);
    591   disableSROA(RHS);
    592 
    593   return false;
    594 }
    595 
    596 bool CallAnalyzer::visitLoad(LoadInst &I) {
    597   Value *SROAArg;
    598   DenseMap<Value *, int>::iterator CostIt;
    599   if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt)) {
    600     if (I.isSimple()) {
    601       accumulateSROACost(CostIt, InlineConstants::InstrCost);
    602       return true;
    603     }
    604 
    605     disableSROA(CostIt);
    606   }
    607 
    608   return false;
    609 }
    610 
    611 bool CallAnalyzer::visitStore(StoreInst &I) {
    612   Value *SROAArg;
    613   DenseMap<Value *, int>::iterator CostIt;
    614   if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt)) {
    615     if (I.isSimple()) {
    616       accumulateSROACost(CostIt, InlineConstants::InstrCost);
    617       return true;
    618     }
    619 
    620     disableSROA(CostIt);
    621   }
    622 
    623   return false;
    624 }
    625 
    626 bool CallAnalyzer::visitCallSite(CallSite CS) {
    627   if (CS.isCall() && cast<CallInst>(CS.getInstruction())->canReturnTwice() &&
    628       !F.hasFnAttr(Attribute::ReturnsTwice)) {
    629     // This aborts the entire analysis.
    630     ExposesReturnsTwice = true;
    631     return false;
    632   }
    633 
    634   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction())) {
    635     switch (II->getIntrinsicID()) {
    636     default:
    637       return Base::visitCallSite(CS);
    638 
    639     case Intrinsic::dbg_declare:
    640     case Intrinsic::dbg_value:
    641     case Intrinsic::invariant_start:
    642     case Intrinsic::invariant_end:
    643     case Intrinsic::lifetime_start:
    644     case Intrinsic::lifetime_end:
    645     case Intrinsic::memset:
    646     case Intrinsic::memcpy:
    647     case Intrinsic::memmove:
    648     case Intrinsic::objectsize:
    649     case Intrinsic::ptr_annotation:
    650     case Intrinsic::var_annotation:
    651       // SROA can usually chew through these intrinsics and they have no cost
    652       // so don't pay the price of analyzing them in detail.
    653       return true;
    654     }
    655   }
    656 
    657   if (Function *F = CS.getCalledFunction()) {
    658     if (F == CS.getInstruction()->getParent()->getParent()) {
    659       // This flag will fully abort the analysis, so don't bother with anything
    660       // else.
    661       IsRecursive = true;
    662       return false;
    663     }
    664 
    665     if (!callIsSmall(F)) {
    666       // We account for the average 1 instruction per call argument setup
    667       // here.
    668       Cost += CS.arg_size() * InlineConstants::InstrCost;
    669 
    670       // Everything other than inline ASM will also have a significant cost
    671       // merely from making the call.
    672       if (!isa<InlineAsm>(CS.getCalledValue()))
    673         Cost += InlineConstants::CallPenalty;
    674     }
    675 
    676     return Base::visitCallSite(CS);
    677   }
    678 
    679   // Otherwise we're in a very special case -- an indirect function call. See
    680   // if we can be particularly clever about this.
    681   Value *Callee = CS.getCalledValue();
    682 
    683   // First, pay the price of the argument setup. We account for the average
    684   // 1 instruction per call argument setup here.
    685   Cost += CS.arg_size() * InlineConstants::InstrCost;
    686 
    687   // Next, check if this happens to be an indirect function call to a known
    688   // function in this inline context. If not, we've done all we can.
    689   Function *F = dyn_cast_or_null<Function>(SimplifiedValues.lookup(Callee));
    690   if (!F)
    691     return Base::visitCallSite(CS);
    692 
    693   // If we have a constant that we are calling as a function, we can peer
    694   // through it and see the function target. This happens not infrequently
    695   // during devirtualization and so we want to give it a hefty bonus for
    696   // inlining, but cap that bonus in the event that inlining wouldn't pan
    697   // out. Pretend to inline the function, with a custom threshold.
    698   CallAnalyzer CA(TD, *F, InlineConstants::IndirectCallThreshold);
    699   if (CA.analyzeCall(CS)) {
    700     // We were able to inline the indirect call! Subtract the cost from the
    701     // bonus we want to apply, but don't go below zero.
    702     Cost -= std::max(0, InlineConstants::IndirectCallThreshold - CA.getCost());
    703   }
    704 
    705   return Base::visitCallSite(CS);
    706 }
    707 
    708 bool CallAnalyzer::visitInstruction(Instruction &I) {
    709   // We found something we don't understand or can't handle. Mark any SROA-able
    710   // values in the operand list as no longer viable.
    711   for (User::op_iterator OI = I.op_begin(), OE = I.op_end(); OI != OE; ++OI)
    712     disableSROA(*OI);
    713 
    714   return false;
    715 }
    716 
    717 
    718 /// \brief Analyze a basic block for its contribution to the inline cost.
    719 ///
    720 /// This method walks the analyzer over every instruction in the given basic
    721 /// block and accounts for their cost during inlining at this callsite. It
    722 /// aborts early if the threshold has been exceeded or an impossible to inline
    723 /// construct has been detected. It returns false if inlining is no longer
    724 /// viable, and true if inlining remains viable.
    725 bool CallAnalyzer::analyzeBlock(BasicBlock *BB) {
    726   for (BasicBlock::iterator I = BB->begin(), E = llvm::prior(BB->end());
    727        I != E; ++I) {
    728     ++NumInstructions;
    729     if (isa<ExtractElementInst>(I) || I->getType()->isVectorTy())
    730       ++NumVectorInstructions;
    731 
    732     // If the instruction simplified to a constant, there is no cost to this
    733     // instruction. Visit the instructions using our InstVisitor to account for
    734     // all of the per-instruction logic. The visit tree returns true if we
    735     // consumed the instruction in any way, and false if the instruction's base
    736     // cost should count against inlining.
    737     if (Base::visit(I))
    738       ++NumInstructionsSimplified;
    739     else
    740       Cost += InlineConstants::InstrCost;
    741 
    742     // If the visit this instruction detected an uninlinable pattern, abort.
    743     if (IsRecursive || ExposesReturnsTwice || HasDynamicAlloca)
    744       return false;
    745 
    746     if (NumVectorInstructions > NumInstructions/2)
    747       VectorBonus = FiftyPercentVectorBonus;
    748     else if (NumVectorInstructions > NumInstructions/10)
    749       VectorBonus = TenPercentVectorBonus;
    750     else
    751       VectorBonus = 0;
    752 
    753     // Check if we've past the threshold so we don't spin in huge basic
    754     // blocks that will never inline.
    755     if (!AlwaysInline && Cost > (Threshold + VectorBonus))
    756       return false;
    757   }
    758 
    759   return true;
    760 }
    761 
    762 /// \brief Compute the base pointer and cumulative constant offsets for V.
    763 ///
    764 /// This strips all constant offsets off of V, leaving it the base pointer, and
    765 /// accumulates the total constant offset applied in the returned constant. It
    766 /// returns 0 if V is not a pointer, and returns the constant '0' if there are
    767 /// no constant offsets applied.
    768 ConstantInt *CallAnalyzer::stripAndComputeInBoundsConstantOffsets(Value *&V) {
    769   if (!TD || !V->getType()->isPointerTy())
    770     return 0;
    771 
    772   unsigned IntPtrWidth = TD->getPointerSizeInBits();
    773   APInt Offset = APInt::getNullValue(IntPtrWidth);
    774 
    775   // Even though we don't look through PHI nodes, we could be called on an
    776   // instruction in an unreachable block, which may be on a cycle.
    777   SmallPtrSet<Value *, 4> Visited;
    778   Visited.insert(V);
    779   do {
    780     if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
    781       if (!GEP->isInBounds() || !accumulateGEPOffset(*GEP, Offset))
    782         return 0;
    783       V = GEP->getPointerOperand();
    784     } else if (Operator::getOpcode(V) == Instruction::BitCast) {
    785       V = cast<Operator>(V)->getOperand(0);
    786     } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
    787       if (GA->mayBeOverridden())
    788         break;
    789       V = GA->getAliasee();
    790     } else {
    791       break;
    792     }
    793     assert(V->getType()->isPointerTy() && "Unexpected operand type!");
    794   } while (Visited.insert(V));
    795 
    796   Type *IntPtrTy = TD->getIntPtrType(V->getContext());
    797   return cast<ConstantInt>(ConstantInt::get(IntPtrTy, Offset));
    798 }
    799 
    800 /// \brief Analyze a call site for potential inlining.
    801 ///
    802 /// Returns true if inlining this call is viable, and false if it is not
    803 /// viable. It computes the cost and adjusts the threshold based on numerous
    804 /// factors and heuristics. If this method returns false but the computed cost
    805 /// is below the computed threshold, then inlining was forcibly disabled by
    806 /// some artifact of the rountine.
    807 bool CallAnalyzer::analyzeCall(CallSite CS) {
    808   ++NumCallsAnalyzed;
    809 
    810   // Track whether the post-inlining function would have more than one basic
    811   // block. A single basic block is often intended for inlining. Balloon the
    812   // threshold by 50% until we pass the single-BB phase.
    813   bool SingleBB = true;
    814   int SingleBBBonus = Threshold / 2;
    815   Threshold += SingleBBBonus;
    816 
    817   // Unless we are always-inlining, perform some tweaks to the cost and
    818   // threshold based on the direct callsite information.
    819   if (!AlwaysInline) {
    820     // We want to more aggressively inline vector-dense kernels, so up the
    821     // threshold, and we'll lower it if the % of vector instructions gets too
    822     // low.
    823     assert(NumInstructions == 0);
    824     assert(NumVectorInstructions == 0);
    825     FiftyPercentVectorBonus = Threshold;
    826     TenPercentVectorBonus = Threshold / 2;
    827 
    828     // Subtract off one instruction per call argument as those will be free after
    829     // inlining.
    830     Cost -= CS.arg_size() * InlineConstants::InstrCost;
    831 
    832     // If there is only one call of the function, and it has internal linkage,
    833     // the cost of inlining it drops dramatically.
    834     if (F.hasLocalLinkage() && F.hasOneUse() && &F == CS.getCalledFunction())
    835       Cost += InlineConstants::LastCallToStaticBonus;
    836 
    837     // If the instruction after the call, or if the normal destination of the
    838     // invoke is an unreachable instruction, the function is noreturn.  As such,
    839     // there is little point in inlining this unless there is literally zero cost.
    840     if (InvokeInst *II = dyn_cast<InvokeInst>(CS.getInstruction())) {
    841       if (isa<UnreachableInst>(II->getNormalDest()->begin()))
    842         Threshold = 1;
    843     } else if (isa<UnreachableInst>(++BasicBlock::iterator(CS.getInstruction())))
    844       Threshold = 1;
    845 
    846     // If this function uses the coldcc calling convention, prefer not to inline
    847     // it.
    848     if (F.getCallingConv() == CallingConv::Cold)
    849       Cost += InlineConstants::ColdccPenalty;
    850 
    851     // Check if we're done. This can happen due to bonuses and penalties.
    852     if (Cost > Threshold)
    853       return false;
    854   }
    855 
    856   if (F.empty())
    857     return true;
    858 
    859   // Track whether we've seen a return instruction. The first return
    860   // instruction is free, as at least one will usually disappear in inlining.
    861   bool HasReturn = false;
    862 
    863   // Populate our simplified values by mapping from function arguments to call
    864   // arguments with known important simplifications.
    865   CallSite::arg_iterator CAI = CS.arg_begin();
    866   for (Function::arg_iterator FAI = F.arg_begin(), FAE = F.arg_end();
    867        FAI != FAE; ++FAI, ++CAI) {
    868     assert(CAI != CS.arg_end());
    869     if (Constant *C = dyn_cast<Constant>(CAI))
    870       SimplifiedValues[FAI] = C;
    871 
    872     Value *PtrArg = *CAI;
    873     if (ConstantInt *C = stripAndComputeInBoundsConstantOffsets(PtrArg)) {
    874       ConstantOffsetPtrs[FAI] = std::make_pair(PtrArg, C->getValue());
    875 
    876       // We can SROA any pointer arguments derived from alloca instructions.
    877       if (isa<AllocaInst>(PtrArg)) {
    878         SROAArgValues[FAI] = PtrArg;
    879         SROAArgCosts[PtrArg] = 0;
    880       }
    881     }
    882   }
    883   NumConstantArgs = SimplifiedValues.size();
    884   NumConstantOffsetPtrArgs = ConstantOffsetPtrs.size();
    885   NumAllocaArgs = SROAArgValues.size();
    886 
    887   // The worklist of live basic blocks in the callee *after* inlining. We avoid
    888   // adding basic blocks of the callee which can be proven to be dead for this
    889   // particular call site in order to get more accurate cost estimates. This
    890   // requires a somewhat heavyweight iteration pattern: we need to walk the
    891   // basic blocks in a breadth-first order as we insert live successors. To
    892   // accomplish this, prioritizing for small iterations because we exit after
    893   // crossing our threshold, we use a small-size optimized SetVector.
    894   typedef SetVector<BasicBlock *, SmallVector<BasicBlock *, 16>,
    895                                   SmallPtrSet<BasicBlock *, 16> > BBSetVector;
    896   BBSetVector BBWorklist;
    897   BBWorklist.insert(&F.getEntryBlock());
    898   // Note that we *must not* cache the size, this loop grows the worklist.
    899   for (unsigned Idx = 0; Idx != BBWorklist.size(); ++Idx) {
    900     // Bail out the moment we cross the threshold. This means we'll under-count
    901     // the cost, but only when undercounting doesn't matter.
    902     if (!AlwaysInline && Cost > (Threshold + VectorBonus))
    903       break;
    904 
    905     BasicBlock *BB = BBWorklist[Idx];
    906     if (BB->empty())
    907       continue;
    908 
    909     // Handle the terminator cost here where we can track returns and other
    910     // function-wide constructs.
    911     TerminatorInst *TI = BB->getTerminator();
    912 
    913     // We never want to inline functions that contain an indirectbr.  This is
    914     // incorrect because all the blockaddress's (in static global initializers
    915     // for example) would be referring to the original function, and this indirect
    916     // jump would jump from the inlined copy of the function into the original
    917     // function which is extremely undefined behavior.
    918     // FIXME: This logic isn't really right; we can safely inline functions
    919     // with indirectbr's as long as no other function or global references the
    920     // blockaddress of a block within the current function.  And as a QOI issue,
    921     // if someone is using a blockaddress without an indirectbr, and that
    922     // reference somehow ends up in another function or global, we probably
    923     // don't want to inline this function.
    924     if (isa<IndirectBrInst>(TI))
    925       return false;
    926 
    927     if (!HasReturn && isa<ReturnInst>(TI))
    928       HasReturn = true;
    929     else
    930       Cost += InlineConstants::InstrCost;
    931 
    932     // Analyze the cost of this block. If we blow through the threshold, this
    933     // returns false, and we can bail on out.
    934     if (!analyzeBlock(BB)) {
    935       if (IsRecursive || ExposesReturnsTwice || HasDynamicAlloca)
    936         return false;
    937       break;
    938     }
    939 
    940     // Add in the live successors by first checking whether we have terminator
    941     // that may be simplified based on the values simplified by this call.
    942     if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
    943       if (BI->isConditional()) {
    944         Value *Cond = BI->getCondition();
    945         if (ConstantInt *SimpleCond
    946               = dyn_cast_or_null<ConstantInt>(SimplifiedValues.lookup(Cond))) {
    947           BBWorklist.insert(BI->getSuccessor(SimpleCond->isZero() ? 1 : 0));
    948           continue;
    949         }
    950       }
    951     } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
    952       Value *Cond = SI->getCondition();
    953       if (ConstantInt *SimpleCond
    954             = dyn_cast_or_null<ConstantInt>(SimplifiedValues.lookup(Cond))) {
    955         BBWorklist.insert(SI->findCaseValue(SimpleCond).getCaseSuccessor());
    956         continue;
    957       }
    958     }
    959 
    960     // If we're unable to select a particular successor, just count all of
    961     // them.
    962     for (unsigned TIdx = 0, TSize = TI->getNumSuccessors(); TIdx != TSize; ++TIdx)
    963       BBWorklist.insert(TI->getSuccessor(TIdx));
    964 
    965     // If we had any successors at this point, than post-inlining is likely to
    966     // have them as well. Note that we assume any basic blocks which existed
    967     // due to branches or switches which folded above will also fold after
    968     // inlining.
    969     if (SingleBB && TI->getNumSuccessors() > 1) {
    970       // Take off the bonus we applied to the threshold.
    971       Threshold -= SingleBBBonus;
    972       SingleBB = false;
    973     }
    974   }
    975 
    976   Threshold += VectorBonus;
    977 
    978   return AlwaysInline || Cost < Threshold;
    979 }
    980 
    981 /// \brief Dump stats about this call's analysis.
    982 void CallAnalyzer::dump() {
    983 #define DEBUG_PRINT_STAT(x) llvm::dbgs() << "      " #x ": " << x << "\n"
    984   DEBUG_PRINT_STAT(NumConstantArgs);
    985   DEBUG_PRINT_STAT(NumConstantOffsetPtrArgs);
    986   DEBUG_PRINT_STAT(NumAllocaArgs);
    987   DEBUG_PRINT_STAT(NumConstantPtrCmps);
    988   DEBUG_PRINT_STAT(NumConstantPtrDiffs);
    989   DEBUG_PRINT_STAT(NumInstructionsSimplified);
    990   DEBUG_PRINT_STAT(SROACostSavings);
    991   DEBUG_PRINT_STAT(SROACostSavingsLost);
    992 #undef DEBUG_PRINT_STAT
    993 }
    994 
    995 InlineCost InlineCostAnalyzer::getInlineCost(CallSite CS, int Threshold) {
    996   return getInlineCost(CS, CS.getCalledFunction(), Threshold);
    997 }
    998 
    999 InlineCost InlineCostAnalyzer::getInlineCost(CallSite CS, Function *Callee,
   1000                                              int Threshold) {
   1001   // Don't inline functions which can be redefined at link-time to mean
   1002   // something else.  Don't inline functions marked noinline or call sites
   1003   // marked noinline.
   1004   if (!Callee || Callee->mayBeOverridden() ||
   1005       Callee->hasFnAttr(Attribute::NoInline) || CS.isNoInline())
   1006     return llvm::InlineCost::getNever();
   1007 
   1008   DEBUG(llvm::dbgs() << "      Analyzing call of " << Callee->getName() << "...\n");
   1009 
   1010   CallAnalyzer CA(TD, *Callee, Threshold);
   1011   bool ShouldInline = CA.analyzeCall(CS);
   1012 
   1013   DEBUG(CA.dump());
   1014 
   1015   // Check if there was a reason to force inlining or no inlining.
   1016   if (!ShouldInline && CA.getCost() < CA.getThreshold())
   1017     return InlineCost::getNever();
   1018   if (ShouldInline && CA.getCost() >= CA.getThreshold())
   1019     return InlineCost::getAlways();
   1020 
   1021   return llvm::InlineCost::get(CA.getCost(), CA.getThreshold());
   1022 }
   1023