1 //===- InlineCost.cpp - Cost analysis for inliner -------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements inline cost analysis. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #define DEBUG_TYPE "inline-cost" 15 #include "llvm/Analysis/InlineCost.h" 16 #include "llvm/ADT/STLExtras.h" 17 #include "llvm/ADT/SetVector.h" 18 #include "llvm/ADT/SmallPtrSet.h" 19 #include "llvm/ADT/SmallVector.h" 20 #include "llvm/ADT/Statistic.h" 21 #include "llvm/Analysis/ConstantFolding.h" 22 #include "llvm/Analysis/InstructionSimplify.h" 23 #include "llvm/Analysis/TargetTransformInfo.h" 24 #include "llvm/IR/CallingConv.h" 25 #include "llvm/IR/DataLayout.h" 26 #include "llvm/IR/GlobalAlias.h" 27 #include "llvm/IR/IntrinsicInst.h" 28 #include "llvm/IR/Operator.h" 29 #include "llvm/InstVisitor.h" 30 #include "llvm/Support/CallSite.h" 31 #include "llvm/Support/Debug.h" 32 #include "llvm/Support/GetElementPtrTypeIterator.h" 33 #include "llvm/Support/raw_ostream.h" 34 35 using namespace llvm; 36 37 STATISTIC(NumCallsAnalyzed, "Number of call sites analyzed"); 38 39 namespace { 40 41 class CallAnalyzer : public InstVisitor<CallAnalyzer, bool> { 42 typedef InstVisitor<CallAnalyzer, bool> Base; 43 friend class InstVisitor<CallAnalyzer, bool>; 44 45 // DataLayout if available, or null. 46 const DataLayout *const TD; 47 48 /// The TargetTransformInfo available for this compilation. 49 const TargetTransformInfo &TTI; 50 51 // The called function. 52 Function &F; 53 54 int Threshold; 55 int Cost; 56 57 bool IsCallerRecursive; 58 bool IsRecursiveCall; 59 bool ExposesReturnsTwice; 60 bool HasDynamicAlloca; 61 bool ContainsNoDuplicateCall; 62 63 /// Number of bytes allocated statically by the callee. 64 uint64_t AllocatedSize; 65 unsigned NumInstructions, NumVectorInstructions; 66 int FiftyPercentVectorBonus, TenPercentVectorBonus; 67 int VectorBonus; 68 69 // While we walk the potentially-inlined instructions, we build up and 70 // maintain a mapping of simplified values specific to this callsite. The 71 // idea is to propagate any special information we have about arguments to 72 // this call through the inlinable section of the function, and account for 73 // likely simplifications post-inlining. The most important aspect we track 74 // is CFG altering simplifications -- when we prove a basic block dead, that 75 // can cause dramatic shifts in the cost of inlining a function. 76 DenseMap<Value *, Constant *> SimplifiedValues; 77 78 // Keep track of the values which map back (through function arguments) to 79 // allocas on the caller stack which could be simplified through SROA. 80 DenseMap<Value *, Value *> SROAArgValues; 81 82 // The mapping of caller Alloca values to their accumulated cost savings. If 83 // we have to disable SROA for one of the allocas, this tells us how much 84 // cost must be added. 85 DenseMap<Value *, int> SROAArgCosts; 86 87 // Keep track of values which map to a pointer base and constant offset. 88 DenseMap<Value *, std::pair<Value *, APInt> > ConstantOffsetPtrs; 89 90 // Custom simplification helper routines. 91 bool isAllocaDerivedArg(Value *V); 92 bool lookupSROAArgAndCost(Value *V, Value *&Arg, 93 DenseMap<Value *, int>::iterator &CostIt); 94 void disableSROA(DenseMap<Value *, int>::iterator CostIt); 95 void disableSROA(Value *V); 96 void accumulateSROACost(DenseMap<Value *, int>::iterator CostIt, 97 int InstructionCost); 98 bool handleSROACandidate(bool IsSROAValid, 99 DenseMap<Value *, int>::iterator CostIt, 100 int InstructionCost); 101 bool isGEPOffsetConstant(GetElementPtrInst &GEP); 102 bool accumulateGEPOffset(GEPOperator &GEP, APInt &Offset); 103 bool simplifyCallSite(Function *F, CallSite CS); 104 ConstantInt *stripAndComputeInBoundsConstantOffsets(Value *&V); 105 106 // Custom analysis routines. 107 bool analyzeBlock(BasicBlock *BB); 108 109 // Disable several entry points to the visitor so we don't accidentally use 110 // them by declaring but not defining them here. 111 void visit(Module *); void visit(Module &); 112 void visit(Function *); void visit(Function &); 113 void visit(BasicBlock *); void visit(BasicBlock &); 114 115 // Provide base case for our instruction visit. 116 bool visitInstruction(Instruction &I); 117 118 // Our visit overrides. 119 bool visitAlloca(AllocaInst &I); 120 bool visitPHI(PHINode &I); 121 bool visitGetElementPtr(GetElementPtrInst &I); 122 bool visitBitCast(BitCastInst &I); 123 bool visitPtrToInt(PtrToIntInst &I); 124 bool visitIntToPtr(IntToPtrInst &I); 125 bool visitCastInst(CastInst &I); 126 bool visitUnaryInstruction(UnaryInstruction &I); 127 bool visitICmp(ICmpInst &I); 128 bool visitSub(BinaryOperator &I); 129 bool visitBinaryOperator(BinaryOperator &I); 130 bool visitLoad(LoadInst &I); 131 bool visitStore(StoreInst &I); 132 bool visitExtractValue(ExtractValueInst &I); 133 bool visitInsertValue(InsertValueInst &I); 134 bool visitCallSite(CallSite CS); 135 136 public: 137 CallAnalyzer(const DataLayout *TD, const TargetTransformInfo &TTI, 138 Function &Callee, int Threshold) 139 : TD(TD), TTI(TTI), F(Callee), Threshold(Threshold), Cost(0), 140 IsCallerRecursive(false), IsRecursiveCall(false), 141 ExposesReturnsTwice(false), HasDynamicAlloca(false), 142 ContainsNoDuplicateCall(false), AllocatedSize(0), NumInstructions(0), 143 NumVectorInstructions(0), FiftyPercentVectorBonus(0), 144 TenPercentVectorBonus(0), VectorBonus(0), NumConstantArgs(0), 145 NumConstantOffsetPtrArgs(0), NumAllocaArgs(0), NumConstantPtrCmps(0), 146 NumConstantPtrDiffs(0), NumInstructionsSimplified(0), 147 SROACostSavings(0), SROACostSavingsLost(0) {} 148 149 bool analyzeCall(CallSite CS); 150 151 int getThreshold() { return Threshold; } 152 int getCost() { return Cost; } 153 154 // Keep a bunch of stats about the cost savings found so we can print them 155 // out when debugging. 156 unsigned NumConstantArgs; 157 unsigned NumConstantOffsetPtrArgs; 158 unsigned NumAllocaArgs; 159 unsigned NumConstantPtrCmps; 160 unsigned NumConstantPtrDiffs; 161 unsigned NumInstructionsSimplified; 162 unsigned SROACostSavings; 163 unsigned SROACostSavingsLost; 164 165 void dump(); 166 }; 167 168 } // namespace 169 170 /// \brief Test whether the given value is an Alloca-derived function argument. 171 bool CallAnalyzer::isAllocaDerivedArg(Value *V) { 172 return SROAArgValues.count(V); 173 } 174 175 /// \brief Lookup the SROA-candidate argument and cost iterator which V maps to. 176 /// Returns false if V does not map to a SROA-candidate. 177 bool CallAnalyzer::lookupSROAArgAndCost( 178 Value *V, Value *&Arg, DenseMap<Value *, int>::iterator &CostIt) { 179 if (SROAArgValues.empty() || SROAArgCosts.empty()) 180 return false; 181 182 DenseMap<Value *, Value *>::iterator ArgIt = SROAArgValues.find(V); 183 if (ArgIt == SROAArgValues.end()) 184 return false; 185 186 Arg = ArgIt->second; 187 CostIt = SROAArgCosts.find(Arg); 188 return CostIt != SROAArgCosts.end(); 189 } 190 191 /// \brief Disable SROA for the candidate marked by this cost iterator. 192 /// 193 /// This marks the candidate as no longer viable for SROA, and adds the cost 194 /// savings associated with it back into the inline cost measurement. 195 void CallAnalyzer::disableSROA(DenseMap<Value *, int>::iterator CostIt) { 196 // If we're no longer able to perform SROA we need to undo its cost savings 197 // and prevent subsequent analysis. 198 Cost += CostIt->second; 199 SROACostSavings -= CostIt->second; 200 SROACostSavingsLost += CostIt->second; 201 SROAArgCosts.erase(CostIt); 202 } 203 204 /// \brief If 'V' maps to a SROA candidate, disable SROA for it. 205 void CallAnalyzer::disableSROA(Value *V) { 206 Value *SROAArg; 207 DenseMap<Value *, int>::iterator CostIt; 208 if (lookupSROAArgAndCost(V, SROAArg, CostIt)) 209 disableSROA(CostIt); 210 } 211 212 /// \brief Accumulate the given cost for a particular SROA candidate. 213 void CallAnalyzer::accumulateSROACost(DenseMap<Value *, int>::iterator CostIt, 214 int InstructionCost) { 215 CostIt->second += InstructionCost; 216 SROACostSavings += InstructionCost; 217 } 218 219 /// \brief Helper for the common pattern of handling a SROA candidate. 220 /// Either accumulates the cost savings if the SROA remains valid, or disables 221 /// SROA for the candidate. 222 bool CallAnalyzer::handleSROACandidate(bool IsSROAValid, 223 DenseMap<Value *, int>::iterator CostIt, 224 int InstructionCost) { 225 if (IsSROAValid) { 226 accumulateSROACost(CostIt, InstructionCost); 227 return true; 228 } 229 230 disableSROA(CostIt); 231 return false; 232 } 233 234 /// \brief Check whether a GEP's indices are all constant. 235 /// 236 /// Respects any simplified values known during the analysis of this callsite. 237 bool CallAnalyzer::isGEPOffsetConstant(GetElementPtrInst &GEP) { 238 for (User::op_iterator I = GEP.idx_begin(), E = GEP.idx_end(); I != E; ++I) 239 if (!isa<Constant>(*I) && !SimplifiedValues.lookup(*I)) 240 return false; 241 242 return true; 243 } 244 245 /// \brief Accumulate a constant GEP offset into an APInt if possible. 246 /// 247 /// Returns false if unable to compute the offset for any reason. Respects any 248 /// simplified values known during the analysis of this callsite. 249 bool CallAnalyzer::accumulateGEPOffset(GEPOperator &GEP, APInt &Offset) { 250 if (!TD) 251 return false; 252 253 unsigned IntPtrWidth = TD->getPointerSizeInBits(); 254 assert(IntPtrWidth == Offset.getBitWidth()); 255 256 for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP); 257 GTI != GTE; ++GTI) { 258 ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand()); 259 if (!OpC) 260 if (Constant *SimpleOp = SimplifiedValues.lookup(GTI.getOperand())) 261 OpC = dyn_cast<ConstantInt>(SimpleOp); 262 if (!OpC) 263 return false; 264 if (OpC->isZero()) continue; 265 266 // Handle a struct index, which adds its field offset to the pointer. 267 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 268 unsigned ElementIdx = OpC->getZExtValue(); 269 const StructLayout *SL = TD->getStructLayout(STy); 270 Offset += APInt(IntPtrWidth, SL->getElementOffset(ElementIdx)); 271 continue; 272 } 273 274 APInt TypeSize(IntPtrWidth, TD->getTypeAllocSize(GTI.getIndexedType())); 275 Offset += OpC->getValue().sextOrTrunc(IntPtrWidth) * TypeSize; 276 } 277 return true; 278 } 279 280 bool CallAnalyzer::visitAlloca(AllocaInst &I) { 281 // FIXME: Check whether inlining will turn a dynamic alloca into a static 282 // alloca, and handle that case. 283 284 // Accumulate the allocated size. 285 if (I.isStaticAlloca()) { 286 Type *Ty = I.getAllocatedType(); 287 AllocatedSize += (TD ? TD->getTypeAllocSize(Ty) : 288 Ty->getPrimitiveSizeInBits()); 289 } 290 291 // We will happily inline static alloca instructions. 292 if (I.isStaticAlloca()) 293 return Base::visitAlloca(I); 294 295 // FIXME: This is overly conservative. Dynamic allocas are inefficient for 296 // a variety of reasons, and so we would like to not inline them into 297 // functions which don't currently have a dynamic alloca. This simply 298 // disables inlining altogether in the presence of a dynamic alloca. 299 HasDynamicAlloca = true; 300 return false; 301 } 302 303 bool CallAnalyzer::visitPHI(PHINode &I) { 304 // FIXME: We should potentially be tracking values through phi nodes, 305 // especially when they collapse to a single value due to deleted CFG edges 306 // during inlining. 307 308 // FIXME: We need to propagate SROA *disabling* through phi nodes, even 309 // though we don't want to propagate it's bonuses. The idea is to disable 310 // SROA if it *might* be used in an inappropriate manner. 311 312 // Phi nodes are always zero-cost. 313 return true; 314 } 315 316 bool CallAnalyzer::visitGetElementPtr(GetElementPtrInst &I) { 317 Value *SROAArg; 318 DenseMap<Value *, int>::iterator CostIt; 319 bool SROACandidate = lookupSROAArgAndCost(I.getPointerOperand(), 320 SROAArg, CostIt); 321 322 // Try to fold GEPs of constant-offset call site argument pointers. This 323 // requires target data and inbounds GEPs. 324 if (TD && I.isInBounds()) { 325 // Check if we have a base + offset for the pointer. 326 Value *Ptr = I.getPointerOperand(); 327 std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Ptr); 328 if (BaseAndOffset.first) { 329 // Check if the offset of this GEP is constant, and if so accumulate it 330 // into Offset. 331 if (!accumulateGEPOffset(cast<GEPOperator>(I), BaseAndOffset.second)) { 332 // Non-constant GEPs aren't folded, and disable SROA. 333 if (SROACandidate) 334 disableSROA(CostIt); 335 return false; 336 } 337 338 // Add the result as a new mapping to Base + Offset. 339 ConstantOffsetPtrs[&I] = BaseAndOffset; 340 341 // Also handle SROA candidates here, we already know that the GEP is 342 // all-constant indexed. 343 if (SROACandidate) 344 SROAArgValues[&I] = SROAArg; 345 346 return true; 347 } 348 } 349 350 if (isGEPOffsetConstant(I)) { 351 if (SROACandidate) 352 SROAArgValues[&I] = SROAArg; 353 354 // Constant GEPs are modeled as free. 355 return true; 356 } 357 358 // Variable GEPs will require math and will disable SROA. 359 if (SROACandidate) 360 disableSROA(CostIt); 361 return false; 362 } 363 364 bool CallAnalyzer::visitBitCast(BitCastInst &I) { 365 // Propagate constants through bitcasts. 366 Constant *COp = dyn_cast<Constant>(I.getOperand(0)); 367 if (!COp) 368 COp = SimplifiedValues.lookup(I.getOperand(0)); 369 if (COp) 370 if (Constant *C = ConstantExpr::getBitCast(COp, I.getType())) { 371 SimplifiedValues[&I] = C; 372 return true; 373 } 374 375 // Track base/offsets through casts 376 std::pair<Value *, APInt> BaseAndOffset 377 = ConstantOffsetPtrs.lookup(I.getOperand(0)); 378 // Casts don't change the offset, just wrap it up. 379 if (BaseAndOffset.first) 380 ConstantOffsetPtrs[&I] = BaseAndOffset; 381 382 // Also look for SROA candidates here. 383 Value *SROAArg; 384 DenseMap<Value *, int>::iterator CostIt; 385 if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt)) 386 SROAArgValues[&I] = SROAArg; 387 388 // Bitcasts are always zero cost. 389 return true; 390 } 391 392 bool CallAnalyzer::visitPtrToInt(PtrToIntInst &I) { 393 // Propagate constants through ptrtoint. 394 Constant *COp = dyn_cast<Constant>(I.getOperand(0)); 395 if (!COp) 396 COp = SimplifiedValues.lookup(I.getOperand(0)); 397 if (COp) 398 if (Constant *C = ConstantExpr::getPtrToInt(COp, I.getType())) { 399 SimplifiedValues[&I] = C; 400 return true; 401 } 402 403 // Track base/offset pairs when converted to a plain integer provided the 404 // integer is large enough to represent the pointer. 405 unsigned IntegerSize = I.getType()->getScalarSizeInBits(); 406 if (TD && IntegerSize >= TD->getPointerSizeInBits()) { 407 std::pair<Value *, APInt> BaseAndOffset 408 = ConstantOffsetPtrs.lookup(I.getOperand(0)); 409 if (BaseAndOffset.first) 410 ConstantOffsetPtrs[&I] = BaseAndOffset; 411 } 412 413 // This is really weird. Technically, ptrtoint will disable SROA. However, 414 // unless that ptrtoint is *used* somewhere in the live basic blocks after 415 // inlining, it will be nuked, and SROA should proceed. All of the uses which 416 // would block SROA would also block SROA if applied directly to a pointer, 417 // and so we can just add the integer in here. The only places where SROA is 418 // preserved either cannot fire on an integer, or won't in-and-of themselves 419 // disable SROA (ext) w/o some later use that we would see and disable. 420 Value *SROAArg; 421 DenseMap<Value *, int>::iterator CostIt; 422 if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt)) 423 SROAArgValues[&I] = SROAArg; 424 425 return TargetTransformInfo::TCC_Free == TTI.getUserCost(&I); 426 } 427 428 bool CallAnalyzer::visitIntToPtr(IntToPtrInst &I) { 429 // Propagate constants through ptrtoint. 430 Constant *COp = dyn_cast<Constant>(I.getOperand(0)); 431 if (!COp) 432 COp = SimplifiedValues.lookup(I.getOperand(0)); 433 if (COp) 434 if (Constant *C = ConstantExpr::getIntToPtr(COp, I.getType())) { 435 SimplifiedValues[&I] = C; 436 return true; 437 } 438 439 // Track base/offset pairs when round-tripped through a pointer without 440 // modifications provided the integer is not too large. 441 Value *Op = I.getOperand(0); 442 unsigned IntegerSize = Op->getType()->getScalarSizeInBits(); 443 if (TD && IntegerSize <= TD->getPointerSizeInBits()) { 444 std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Op); 445 if (BaseAndOffset.first) 446 ConstantOffsetPtrs[&I] = BaseAndOffset; 447 } 448 449 // "Propagate" SROA here in the same manner as we do for ptrtoint above. 450 Value *SROAArg; 451 DenseMap<Value *, int>::iterator CostIt; 452 if (lookupSROAArgAndCost(Op, SROAArg, CostIt)) 453 SROAArgValues[&I] = SROAArg; 454 455 return TargetTransformInfo::TCC_Free == TTI.getUserCost(&I); 456 } 457 458 bool CallAnalyzer::visitCastInst(CastInst &I) { 459 // Propagate constants through ptrtoint. 460 Constant *COp = dyn_cast<Constant>(I.getOperand(0)); 461 if (!COp) 462 COp = SimplifiedValues.lookup(I.getOperand(0)); 463 if (COp) 464 if (Constant *C = ConstantExpr::getCast(I.getOpcode(), COp, I.getType())) { 465 SimplifiedValues[&I] = C; 466 return true; 467 } 468 469 // Disable SROA in the face of arbitrary casts we don't whitelist elsewhere. 470 disableSROA(I.getOperand(0)); 471 472 return TargetTransformInfo::TCC_Free == TTI.getUserCost(&I); 473 } 474 475 bool CallAnalyzer::visitUnaryInstruction(UnaryInstruction &I) { 476 Value *Operand = I.getOperand(0); 477 Constant *COp = dyn_cast<Constant>(Operand); 478 if (!COp) 479 COp = SimplifiedValues.lookup(Operand); 480 if (COp) 481 if (Constant *C = ConstantFoldInstOperands(I.getOpcode(), I.getType(), 482 COp, TD)) { 483 SimplifiedValues[&I] = C; 484 return true; 485 } 486 487 // Disable any SROA on the argument to arbitrary unary operators. 488 disableSROA(Operand); 489 490 return false; 491 } 492 493 bool CallAnalyzer::visitICmp(ICmpInst &I) { 494 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); 495 // First try to handle simplified comparisons. 496 if (!isa<Constant>(LHS)) 497 if (Constant *SimpleLHS = SimplifiedValues.lookup(LHS)) 498 LHS = SimpleLHS; 499 if (!isa<Constant>(RHS)) 500 if (Constant *SimpleRHS = SimplifiedValues.lookup(RHS)) 501 RHS = SimpleRHS; 502 if (Constant *CLHS = dyn_cast<Constant>(LHS)) 503 if (Constant *CRHS = dyn_cast<Constant>(RHS)) 504 if (Constant *C = ConstantExpr::getICmp(I.getPredicate(), CLHS, CRHS)) { 505 SimplifiedValues[&I] = C; 506 return true; 507 } 508 509 // Otherwise look for a comparison between constant offset pointers with 510 // a common base. 511 Value *LHSBase, *RHSBase; 512 APInt LHSOffset, RHSOffset; 513 llvm::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS); 514 if (LHSBase) { 515 llvm::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS); 516 if (RHSBase && LHSBase == RHSBase) { 517 // We have common bases, fold the icmp to a constant based on the 518 // offsets. 519 Constant *CLHS = ConstantInt::get(LHS->getContext(), LHSOffset); 520 Constant *CRHS = ConstantInt::get(RHS->getContext(), RHSOffset); 521 if (Constant *C = ConstantExpr::getICmp(I.getPredicate(), CLHS, CRHS)) { 522 SimplifiedValues[&I] = C; 523 ++NumConstantPtrCmps; 524 return true; 525 } 526 } 527 } 528 529 // If the comparison is an equality comparison with null, we can simplify it 530 // for any alloca-derived argument. 531 if (I.isEquality() && isa<ConstantPointerNull>(I.getOperand(1))) 532 if (isAllocaDerivedArg(I.getOperand(0))) { 533 // We can actually predict the result of comparisons between an 534 // alloca-derived value and null. Note that this fires regardless of 535 // SROA firing. 536 bool IsNotEqual = I.getPredicate() == CmpInst::ICMP_NE; 537 SimplifiedValues[&I] = IsNotEqual ? ConstantInt::getTrue(I.getType()) 538 : ConstantInt::getFalse(I.getType()); 539 return true; 540 } 541 542 // Finally check for SROA candidates in comparisons. 543 Value *SROAArg; 544 DenseMap<Value *, int>::iterator CostIt; 545 if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt)) { 546 if (isa<ConstantPointerNull>(I.getOperand(1))) { 547 accumulateSROACost(CostIt, InlineConstants::InstrCost); 548 return true; 549 } 550 551 disableSROA(CostIt); 552 } 553 554 return false; 555 } 556 557 bool CallAnalyzer::visitSub(BinaryOperator &I) { 558 // Try to handle a special case: we can fold computing the difference of two 559 // constant-related pointers. 560 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); 561 Value *LHSBase, *RHSBase; 562 APInt LHSOffset, RHSOffset; 563 llvm::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS); 564 if (LHSBase) { 565 llvm::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS); 566 if (RHSBase && LHSBase == RHSBase) { 567 // We have common bases, fold the subtract to a constant based on the 568 // offsets. 569 Constant *CLHS = ConstantInt::get(LHS->getContext(), LHSOffset); 570 Constant *CRHS = ConstantInt::get(RHS->getContext(), RHSOffset); 571 if (Constant *C = ConstantExpr::getSub(CLHS, CRHS)) { 572 SimplifiedValues[&I] = C; 573 ++NumConstantPtrDiffs; 574 return true; 575 } 576 } 577 } 578 579 // Otherwise, fall back to the generic logic for simplifying and handling 580 // instructions. 581 return Base::visitSub(I); 582 } 583 584 bool CallAnalyzer::visitBinaryOperator(BinaryOperator &I) { 585 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); 586 if (!isa<Constant>(LHS)) 587 if (Constant *SimpleLHS = SimplifiedValues.lookup(LHS)) 588 LHS = SimpleLHS; 589 if (!isa<Constant>(RHS)) 590 if (Constant *SimpleRHS = SimplifiedValues.lookup(RHS)) 591 RHS = SimpleRHS; 592 Value *SimpleV = SimplifyBinOp(I.getOpcode(), LHS, RHS, TD); 593 if (Constant *C = dyn_cast_or_null<Constant>(SimpleV)) { 594 SimplifiedValues[&I] = C; 595 return true; 596 } 597 598 // Disable any SROA on arguments to arbitrary, unsimplified binary operators. 599 disableSROA(LHS); 600 disableSROA(RHS); 601 602 return false; 603 } 604 605 bool CallAnalyzer::visitLoad(LoadInst &I) { 606 Value *SROAArg; 607 DenseMap<Value *, int>::iterator CostIt; 608 if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt)) { 609 if (I.isSimple()) { 610 accumulateSROACost(CostIt, InlineConstants::InstrCost); 611 return true; 612 } 613 614 disableSROA(CostIt); 615 } 616 617 return false; 618 } 619 620 bool CallAnalyzer::visitStore(StoreInst &I) { 621 Value *SROAArg; 622 DenseMap<Value *, int>::iterator CostIt; 623 if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt)) { 624 if (I.isSimple()) { 625 accumulateSROACost(CostIt, InlineConstants::InstrCost); 626 return true; 627 } 628 629 disableSROA(CostIt); 630 } 631 632 return false; 633 } 634 635 bool CallAnalyzer::visitExtractValue(ExtractValueInst &I) { 636 // Constant folding for extract value is trivial. 637 Constant *C = dyn_cast<Constant>(I.getAggregateOperand()); 638 if (!C) 639 C = SimplifiedValues.lookup(I.getAggregateOperand()); 640 if (C) { 641 SimplifiedValues[&I] = ConstantExpr::getExtractValue(C, I.getIndices()); 642 return true; 643 } 644 645 // SROA can look through these but give them a cost. 646 return false; 647 } 648 649 bool CallAnalyzer::visitInsertValue(InsertValueInst &I) { 650 // Constant folding for insert value is trivial. 651 Constant *AggC = dyn_cast<Constant>(I.getAggregateOperand()); 652 if (!AggC) 653 AggC = SimplifiedValues.lookup(I.getAggregateOperand()); 654 Constant *InsertedC = dyn_cast<Constant>(I.getInsertedValueOperand()); 655 if (!InsertedC) 656 InsertedC = SimplifiedValues.lookup(I.getInsertedValueOperand()); 657 if (AggC && InsertedC) { 658 SimplifiedValues[&I] = ConstantExpr::getInsertValue(AggC, InsertedC, 659 I.getIndices()); 660 return true; 661 } 662 663 // SROA can look through these but give them a cost. 664 return false; 665 } 666 667 /// \brief Try to simplify a call site. 668 /// 669 /// Takes a concrete function and callsite and tries to actually simplify it by 670 /// analyzing the arguments and call itself with instsimplify. Returns true if 671 /// it has simplified the callsite to some other entity (a constant), making it 672 /// free. 673 bool CallAnalyzer::simplifyCallSite(Function *F, CallSite CS) { 674 // FIXME: Using the instsimplify logic directly for this is inefficient 675 // because we have to continually rebuild the argument list even when no 676 // simplifications can be performed. Until that is fixed with remapping 677 // inside of instsimplify, directly constant fold calls here. 678 if (!canConstantFoldCallTo(F)) 679 return false; 680 681 // Try to re-map the arguments to constants. 682 SmallVector<Constant *, 4> ConstantArgs; 683 ConstantArgs.reserve(CS.arg_size()); 684 for (CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end(); 685 I != E; ++I) { 686 Constant *C = dyn_cast<Constant>(*I); 687 if (!C) 688 C = dyn_cast_or_null<Constant>(SimplifiedValues.lookup(*I)); 689 if (!C) 690 return false; // This argument doesn't map to a constant. 691 692 ConstantArgs.push_back(C); 693 } 694 if (Constant *C = ConstantFoldCall(F, ConstantArgs)) { 695 SimplifiedValues[CS.getInstruction()] = C; 696 return true; 697 } 698 699 return false; 700 } 701 702 bool CallAnalyzer::visitCallSite(CallSite CS) { 703 if (CS.isCall() && cast<CallInst>(CS.getInstruction())->canReturnTwice() && 704 !F.getAttributes().hasAttribute(AttributeSet::FunctionIndex, 705 Attribute::ReturnsTwice)) { 706 // This aborts the entire analysis. 707 ExposesReturnsTwice = true; 708 return false; 709 } 710 if (CS.isCall() && 711 cast<CallInst>(CS.getInstruction())->hasFnAttr(Attribute::NoDuplicate)) 712 ContainsNoDuplicateCall = true; 713 714 if (Function *F = CS.getCalledFunction()) { 715 // When we have a concrete function, first try to simplify it directly. 716 if (simplifyCallSite(F, CS)) 717 return true; 718 719 // Next check if it is an intrinsic we know about. 720 // FIXME: Lift this into part of the InstVisitor. 721 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction())) { 722 switch (II->getIntrinsicID()) { 723 default: 724 return Base::visitCallSite(CS); 725 726 case Intrinsic::memset: 727 case Intrinsic::memcpy: 728 case Intrinsic::memmove: 729 // SROA can usually chew through these intrinsics, but they aren't free. 730 return false; 731 } 732 } 733 734 if (F == CS.getInstruction()->getParent()->getParent()) { 735 // This flag will fully abort the analysis, so don't bother with anything 736 // else. 737 IsRecursiveCall = true; 738 return false; 739 } 740 741 if (TTI.isLoweredToCall(F)) { 742 // We account for the average 1 instruction per call argument setup 743 // here. 744 Cost += CS.arg_size() * InlineConstants::InstrCost; 745 746 // Everything other than inline ASM will also have a significant cost 747 // merely from making the call. 748 if (!isa<InlineAsm>(CS.getCalledValue())) 749 Cost += InlineConstants::CallPenalty; 750 } 751 752 return Base::visitCallSite(CS); 753 } 754 755 // Otherwise we're in a very special case -- an indirect function call. See 756 // if we can be particularly clever about this. 757 Value *Callee = CS.getCalledValue(); 758 759 // First, pay the price of the argument setup. We account for the average 760 // 1 instruction per call argument setup here. 761 Cost += CS.arg_size() * InlineConstants::InstrCost; 762 763 // Next, check if this happens to be an indirect function call to a known 764 // function in this inline context. If not, we've done all we can. 765 Function *F = dyn_cast_or_null<Function>(SimplifiedValues.lookup(Callee)); 766 if (!F) 767 return Base::visitCallSite(CS); 768 769 // If we have a constant that we are calling as a function, we can peer 770 // through it and see the function target. This happens not infrequently 771 // during devirtualization and so we want to give it a hefty bonus for 772 // inlining, but cap that bonus in the event that inlining wouldn't pan 773 // out. Pretend to inline the function, with a custom threshold. 774 CallAnalyzer CA(TD, TTI, *F, InlineConstants::IndirectCallThreshold); 775 if (CA.analyzeCall(CS)) { 776 // We were able to inline the indirect call! Subtract the cost from the 777 // bonus we want to apply, but don't go below zero. 778 Cost -= std::max(0, InlineConstants::IndirectCallThreshold - CA.getCost()); 779 } 780 781 return Base::visitCallSite(CS); 782 } 783 784 bool CallAnalyzer::visitInstruction(Instruction &I) { 785 // Some instructions are free. All of the free intrinsics can also be 786 // handled by SROA, etc. 787 if (TargetTransformInfo::TCC_Free == TTI.getUserCost(&I)) 788 return true; 789 790 // We found something we don't understand or can't handle. Mark any SROA-able 791 // values in the operand list as no longer viable. 792 for (User::op_iterator OI = I.op_begin(), OE = I.op_end(); OI != OE; ++OI) 793 disableSROA(*OI); 794 795 return false; 796 } 797 798 799 /// \brief Analyze a basic block for its contribution to the inline cost. 800 /// 801 /// This method walks the analyzer over every instruction in the given basic 802 /// block and accounts for their cost during inlining at this callsite. It 803 /// aborts early if the threshold has been exceeded or an impossible to inline 804 /// construct has been detected. It returns false if inlining is no longer 805 /// viable, and true if inlining remains viable. 806 bool CallAnalyzer::analyzeBlock(BasicBlock *BB) { 807 for (BasicBlock::iterator I = BB->begin(), E = llvm::prior(BB->end()); 808 I != E; ++I) { 809 ++NumInstructions; 810 if (isa<ExtractElementInst>(I) || I->getType()->isVectorTy()) 811 ++NumVectorInstructions; 812 813 // If the instruction simplified to a constant, there is no cost to this 814 // instruction. Visit the instructions using our InstVisitor to account for 815 // all of the per-instruction logic. The visit tree returns true if we 816 // consumed the instruction in any way, and false if the instruction's base 817 // cost should count against inlining. 818 if (Base::visit(I)) 819 ++NumInstructionsSimplified; 820 else 821 Cost += InlineConstants::InstrCost; 822 823 // If the visit this instruction detected an uninlinable pattern, abort. 824 if (IsRecursiveCall || ExposesReturnsTwice || HasDynamicAlloca) 825 return false; 826 827 // If the caller is a recursive function then we don't want to inline 828 // functions which allocate a lot of stack space because it would increase 829 // the caller stack usage dramatically. 830 if (IsCallerRecursive && 831 AllocatedSize > InlineConstants::TotalAllocaSizeRecursiveCaller) 832 return false; 833 834 if (NumVectorInstructions > NumInstructions/2) 835 VectorBonus = FiftyPercentVectorBonus; 836 else if (NumVectorInstructions > NumInstructions/10) 837 VectorBonus = TenPercentVectorBonus; 838 else 839 VectorBonus = 0; 840 841 // Check if we've past the threshold so we don't spin in huge basic 842 // blocks that will never inline. 843 if (Cost > (Threshold + VectorBonus)) 844 return false; 845 } 846 847 return true; 848 } 849 850 /// \brief Compute the base pointer and cumulative constant offsets for V. 851 /// 852 /// This strips all constant offsets off of V, leaving it the base pointer, and 853 /// accumulates the total constant offset applied in the returned constant. It 854 /// returns 0 if V is not a pointer, and returns the constant '0' if there are 855 /// no constant offsets applied. 856 ConstantInt *CallAnalyzer::stripAndComputeInBoundsConstantOffsets(Value *&V) { 857 if (!TD || !V->getType()->isPointerTy()) 858 return 0; 859 860 unsigned IntPtrWidth = TD->getPointerSizeInBits(); 861 APInt Offset = APInt::getNullValue(IntPtrWidth); 862 863 // Even though we don't look through PHI nodes, we could be called on an 864 // instruction in an unreachable block, which may be on a cycle. 865 SmallPtrSet<Value *, 4> Visited; 866 Visited.insert(V); 867 do { 868 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 869 if (!GEP->isInBounds() || !accumulateGEPOffset(*GEP, Offset)) 870 return 0; 871 V = GEP->getPointerOperand(); 872 } else if (Operator::getOpcode(V) == Instruction::BitCast) { 873 V = cast<Operator>(V)->getOperand(0); 874 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 875 if (GA->mayBeOverridden()) 876 break; 877 V = GA->getAliasee(); 878 } else { 879 break; 880 } 881 assert(V->getType()->isPointerTy() && "Unexpected operand type!"); 882 } while (Visited.insert(V)); 883 884 Type *IntPtrTy = TD->getIntPtrType(V->getContext()); 885 return cast<ConstantInt>(ConstantInt::get(IntPtrTy, Offset)); 886 } 887 888 /// \brief Analyze a call site for potential inlining. 889 /// 890 /// Returns true if inlining this call is viable, and false if it is not 891 /// viable. It computes the cost and adjusts the threshold based on numerous 892 /// factors and heuristics. If this method returns false but the computed cost 893 /// is below the computed threshold, then inlining was forcibly disabled by 894 /// some artifact of the routine. 895 bool CallAnalyzer::analyzeCall(CallSite CS) { 896 ++NumCallsAnalyzed; 897 898 // Track whether the post-inlining function would have more than one basic 899 // block. A single basic block is often intended for inlining. Balloon the 900 // threshold by 50% until we pass the single-BB phase. 901 bool SingleBB = true; 902 int SingleBBBonus = Threshold / 2; 903 Threshold += SingleBBBonus; 904 905 // Perform some tweaks to the cost and threshold based on the direct 906 // callsite information. 907 908 // We want to more aggressively inline vector-dense kernels, so up the 909 // threshold, and we'll lower it if the % of vector instructions gets too 910 // low. 911 assert(NumInstructions == 0); 912 assert(NumVectorInstructions == 0); 913 FiftyPercentVectorBonus = Threshold; 914 TenPercentVectorBonus = Threshold / 2; 915 916 // Give out bonuses per argument, as the instructions setting them up will 917 // be gone after inlining. 918 for (unsigned I = 0, E = CS.arg_size(); I != E; ++I) { 919 if (TD && CS.isByValArgument(I)) { 920 // We approximate the number of loads and stores needed by dividing the 921 // size of the byval type by the target's pointer size. 922 PointerType *PTy = cast<PointerType>(CS.getArgument(I)->getType()); 923 unsigned TypeSize = TD->getTypeSizeInBits(PTy->getElementType()); 924 unsigned PointerSize = TD->getPointerSizeInBits(); 925 // Ceiling division. 926 unsigned NumStores = (TypeSize + PointerSize - 1) / PointerSize; 927 928 // If it generates more than 8 stores it is likely to be expanded as an 929 // inline memcpy so we take that as an upper bound. Otherwise we assume 930 // one load and one store per word copied. 931 // FIXME: The maxStoresPerMemcpy setting from the target should be used 932 // here instead of a magic number of 8, but it's not available via 933 // DataLayout. 934 NumStores = std::min(NumStores, 8U); 935 936 Cost -= 2 * NumStores * InlineConstants::InstrCost; 937 } else { 938 // For non-byval arguments subtract off one instruction per call 939 // argument. 940 Cost -= InlineConstants::InstrCost; 941 } 942 } 943 944 // If there is only one call of the function, and it has internal linkage, 945 // the cost of inlining it drops dramatically. 946 bool OnlyOneCallAndLocalLinkage = F.hasLocalLinkage() && F.hasOneUse() && 947 &F == CS.getCalledFunction(); 948 if (OnlyOneCallAndLocalLinkage) 949 Cost += InlineConstants::LastCallToStaticBonus; 950 951 // If the instruction after the call, or if the normal destination of the 952 // invoke is an unreachable instruction, the function is noreturn. As such, 953 // there is little point in inlining this unless there is literally zero 954 // cost. 955 Instruction *Instr = CS.getInstruction(); 956 if (InvokeInst *II = dyn_cast<InvokeInst>(Instr)) { 957 if (isa<UnreachableInst>(II->getNormalDest()->begin())) 958 Threshold = 1; 959 } else if (isa<UnreachableInst>(++BasicBlock::iterator(Instr))) 960 Threshold = 1; 961 962 // If this function uses the coldcc calling convention, prefer not to inline 963 // it. 964 if (F.getCallingConv() == CallingConv::Cold) 965 Cost += InlineConstants::ColdccPenalty; 966 967 // Check if we're done. This can happen due to bonuses and penalties. 968 if (Cost > Threshold) 969 return false; 970 971 if (F.empty()) 972 return true; 973 974 Function *Caller = CS.getInstruction()->getParent()->getParent(); 975 // Check if the caller function is recursive itself. 976 for (Value::use_iterator U = Caller->use_begin(), E = Caller->use_end(); 977 U != E; ++U) { 978 CallSite Site(cast<Value>(*U)); 979 if (!Site) 980 continue; 981 Instruction *I = Site.getInstruction(); 982 if (I->getParent()->getParent() == Caller) { 983 IsCallerRecursive = true; 984 break; 985 } 986 } 987 988 // Track whether we've seen a return instruction. The first return 989 // instruction is free, as at least one will usually disappear in inlining. 990 bool HasReturn = false; 991 992 // Populate our simplified values by mapping from function arguments to call 993 // arguments with known important simplifications. 994 CallSite::arg_iterator CAI = CS.arg_begin(); 995 for (Function::arg_iterator FAI = F.arg_begin(), FAE = F.arg_end(); 996 FAI != FAE; ++FAI, ++CAI) { 997 assert(CAI != CS.arg_end()); 998 if (Constant *C = dyn_cast<Constant>(CAI)) 999 SimplifiedValues[FAI] = C; 1000 1001 Value *PtrArg = *CAI; 1002 if (ConstantInt *C = stripAndComputeInBoundsConstantOffsets(PtrArg)) { 1003 ConstantOffsetPtrs[FAI] = std::make_pair(PtrArg, C->getValue()); 1004 1005 // We can SROA any pointer arguments derived from alloca instructions. 1006 if (isa<AllocaInst>(PtrArg)) { 1007 SROAArgValues[FAI] = PtrArg; 1008 SROAArgCosts[PtrArg] = 0; 1009 } 1010 } 1011 } 1012 NumConstantArgs = SimplifiedValues.size(); 1013 NumConstantOffsetPtrArgs = ConstantOffsetPtrs.size(); 1014 NumAllocaArgs = SROAArgValues.size(); 1015 1016 // The worklist of live basic blocks in the callee *after* inlining. We avoid 1017 // adding basic blocks of the callee which can be proven to be dead for this 1018 // particular call site in order to get more accurate cost estimates. This 1019 // requires a somewhat heavyweight iteration pattern: we need to walk the 1020 // basic blocks in a breadth-first order as we insert live successors. To 1021 // accomplish this, prioritizing for small iterations because we exit after 1022 // crossing our threshold, we use a small-size optimized SetVector. 1023 typedef SetVector<BasicBlock *, SmallVector<BasicBlock *, 16>, 1024 SmallPtrSet<BasicBlock *, 16> > BBSetVector; 1025 BBSetVector BBWorklist; 1026 BBWorklist.insert(&F.getEntryBlock()); 1027 // Note that we *must not* cache the size, this loop grows the worklist. 1028 for (unsigned Idx = 0; Idx != BBWorklist.size(); ++Idx) { 1029 // Bail out the moment we cross the threshold. This means we'll under-count 1030 // the cost, but only when undercounting doesn't matter. 1031 if (Cost > (Threshold + VectorBonus)) 1032 break; 1033 1034 BasicBlock *BB = BBWorklist[Idx]; 1035 if (BB->empty()) 1036 continue; 1037 1038 // Handle the terminator cost here where we can track returns and other 1039 // function-wide constructs. 1040 TerminatorInst *TI = BB->getTerminator(); 1041 1042 // We never want to inline functions that contain an indirectbr. This is 1043 // incorrect because all the blockaddress's (in static global initializers 1044 // for example) would be referring to the original function, and this 1045 // indirect jump would jump from the inlined copy of the function into the 1046 // original function which is extremely undefined behavior. 1047 // FIXME: This logic isn't really right; we can safely inline functions 1048 // with indirectbr's as long as no other function or global references the 1049 // blockaddress of a block within the current function. And as a QOI issue, 1050 // if someone is using a blockaddress without an indirectbr, and that 1051 // reference somehow ends up in another function or global, we probably 1052 // don't want to inline this function. 1053 if (isa<IndirectBrInst>(TI)) 1054 return false; 1055 1056 if (!HasReturn && isa<ReturnInst>(TI)) 1057 HasReturn = true; 1058 else 1059 Cost += InlineConstants::InstrCost; 1060 1061 // Analyze the cost of this block. If we blow through the threshold, this 1062 // returns false, and we can bail on out. 1063 if (!analyzeBlock(BB)) { 1064 if (IsRecursiveCall || ExposesReturnsTwice || HasDynamicAlloca) 1065 return false; 1066 1067 // If the caller is a recursive function then we don't want to inline 1068 // functions which allocate a lot of stack space because it would increase 1069 // the caller stack usage dramatically. 1070 if (IsCallerRecursive && 1071 AllocatedSize > InlineConstants::TotalAllocaSizeRecursiveCaller) 1072 return false; 1073 1074 break; 1075 } 1076 1077 // Add in the live successors by first checking whether we have terminator 1078 // that may be simplified based on the values simplified by this call. 1079 if (BranchInst *BI = dyn_cast<BranchInst>(TI)) { 1080 if (BI->isConditional()) { 1081 Value *Cond = BI->getCondition(); 1082 if (ConstantInt *SimpleCond 1083 = dyn_cast_or_null<ConstantInt>(SimplifiedValues.lookup(Cond))) { 1084 BBWorklist.insert(BI->getSuccessor(SimpleCond->isZero() ? 1 : 0)); 1085 continue; 1086 } 1087 } 1088 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) { 1089 Value *Cond = SI->getCondition(); 1090 if (ConstantInt *SimpleCond 1091 = dyn_cast_or_null<ConstantInt>(SimplifiedValues.lookup(Cond))) { 1092 BBWorklist.insert(SI->findCaseValue(SimpleCond).getCaseSuccessor()); 1093 continue; 1094 } 1095 } 1096 1097 // If we're unable to select a particular successor, just count all of 1098 // them. 1099 for (unsigned TIdx = 0, TSize = TI->getNumSuccessors(); TIdx != TSize; 1100 ++TIdx) 1101 BBWorklist.insert(TI->getSuccessor(TIdx)); 1102 1103 // If we had any successors at this point, than post-inlining is likely to 1104 // have them as well. Note that we assume any basic blocks which existed 1105 // due to branches or switches which folded above will also fold after 1106 // inlining. 1107 if (SingleBB && TI->getNumSuccessors() > 1) { 1108 // Take off the bonus we applied to the threshold. 1109 Threshold -= SingleBBBonus; 1110 SingleBB = false; 1111 } 1112 } 1113 1114 // If this is a noduplicate call, we can still inline as long as 1115 // inlining this would cause the removal of the caller (so the instruction 1116 // is not actually duplicated, just moved). 1117 if (!OnlyOneCallAndLocalLinkage && ContainsNoDuplicateCall) 1118 return false; 1119 1120 Threshold += VectorBonus; 1121 1122 return Cost < Threshold; 1123 } 1124 1125 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1126 /// \brief Dump stats about this call's analysis. 1127 void CallAnalyzer::dump() { 1128 #define DEBUG_PRINT_STAT(x) llvm::dbgs() << " " #x ": " << x << "\n" 1129 DEBUG_PRINT_STAT(NumConstantArgs); 1130 DEBUG_PRINT_STAT(NumConstantOffsetPtrArgs); 1131 DEBUG_PRINT_STAT(NumAllocaArgs); 1132 DEBUG_PRINT_STAT(NumConstantPtrCmps); 1133 DEBUG_PRINT_STAT(NumConstantPtrDiffs); 1134 DEBUG_PRINT_STAT(NumInstructionsSimplified); 1135 DEBUG_PRINT_STAT(SROACostSavings); 1136 DEBUG_PRINT_STAT(SROACostSavingsLost); 1137 DEBUG_PRINT_STAT(ContainsNoDuplicateCall); 1138 #undef DEBUG_PRINT_STAT 1139 } 1140 #endif 1141 1142 INITIALIZE_PASS_BEGIN(InlineCostAnalysis, "inline-cost", "Inline Cost Analysis", 1143 true, true) 1144 INITIALIZE_AG_DEPENDENCY(TargetTransformInfo) 1145 INITIALIZE_PASS_END(InlineCostAnalysis, "inline-cost", "Inline Cost Analysis", 1146 true, true) 1147 1148 char InlineCostAnalysis::ID = 0; 1149 1150 InlineCostAnalysis::InlineCostAnalysis() : CallGraphSCCPass(ID), TD(0) {} 1151 1152 InlineCostAnalysis::~InlineCostAnalysis() {} 1153 1154 void InlineCostAnalysis::getAnalysisUsage(AnalysisUsage &AU) const { 1155 AU.setPreservesAll(); 1156 AU.addRequired<TargetTransformInfo>(); 1157 CallGraphSCCPass::getAnalysisUsage(AU); 1158 } 1159 1160 bool InlineCostAnalysis::runOnSCC(CallGraphSCC &SCC) { 1161 TD = getAnalysisIfAvailable<DataLayout>(); 1162 TTI = &getAnalysis<TargetTransformInfo>(); 1163 return false; 1164 } 1165 1166 InlineCost InlineCostAnalysis::getInlineCost(CallSite CS, int Threshold) { 1167 return getInlineCost(CS, CS.getCalledFunction(), Threshold); 1168 } 1169 1170 InlineCost InlineCostAnalysis::getInlineCost(CallSite CS, Function *Callee, 1171 int Threshold) { 1172 // Cannot inline indirect calls. 1173 if (!Callee) 1174 return llvm::InlineCost::getNever(); 1175 1176 // Calls to functions with always-inline attributes should be inlined 1177 // whenever possible. 1178 if (Callee->getAttributes().hasAttribute(AttributeSet::FunctionIndex, 1179 Attribute::AlwaysInline)) { 1180 if (isInlineViable(*Callee)) 1181 return llvm::InlineCost::getAlways(); 1182 return llvm::InlineCost::getNever(); 1183 } 1184 1185 // Don't inline functions which can be redefined at link-time to mean 1186 // something else. Don't inline functions marked noinline or call sites 1187 // marked noinline. 1188 if (Callee->mayBeOverridden() || 1189 Callee->getAttributes().hasAttribute(AttributeSet::FunctionIndex, 1190 Attribute::NoInline) || 1191 CS.isNoInline()) 1192 return llvm::InlineCost::getNever(); 1193 1194 DEBUG(llvm::dbgs() << " Analyzing call of " << Callee->getName() 1195 << "...\n"); 1196 1197 CallAnalyzer CA(TD, *TTI, *Callee, Threshold); 1198 bool ShouldInline = CA.analyzeCall(CS); 1199 1200 DEBUG(CA.dump()); 1201 1202 // Check if there was a reason to force inlining or no inlining. 1203 if (!ShouldInline && CA.getCost() < CA.getThreshold()) 1204 return InlineCost::getNever(); 1205 if (ShouldInline && CA.getCost() >= CA.getThreshold()) 1206 return InlineCost::getAlways(); 1207 1208 return llvm::InlineCost::get(CA.getCost(), CA.getThreshold()); 1209 } 1210 1211 bool InlineCostAnalysis::isInlineViable(Function &F) { 1212 bool ReturnsTwice = 1213 F.getAttributes().hasAttribute(AttributeSet::FunctionIndex, 1214 Attribute::ReturnsTwice); 1215 for (Function::iterator BI = F.begin(), BE = F.end(); BI != BE; ++BI) { 1216 // Disallow inlining of functions which contain an indirect branch. 1217 if (isa<IndirectBrInst>(BI->getTerminator())) 1218 return false; 1219 1220 for (BasicBlock::iterator II = BI->begin(), IE = BI->end(); II != IE; 1221 ++II) { 1222 CallSite CS(II); 1223 if (!CS) 1224 continue; 1225 1226 // Disallow recursive calls. 1227 if (&F == CS.getCalledFunction()) 1228 return false; 1229 1230 // Disallow calls which expose returns-twice to a function not previously 1231 // attributed as such. 1232 if (!ReturnsTwice && CS.isCall() && 1233 cast<CallInst>(CS.getInstruction())->canReturnTwice()) 1234 return false; 1235 } 1236 } 1237 1238 return true; 1239 } 1240