1 //===- DeadStoreElimination.cpp - Fast Dead Store Elimination -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements a trivial dead store elimination that only considers 11 // basic-block local redundant stores. 12 // 13 // FIXME: This should eventually be extended to be a post-dominator tree 14 // traversal. Doing so would be pretty trivial. 15 // 16 //===----------------------------------------------------------------------===// 17 18 #define DEBUG_TYPE "dse" 19 #include "llvm/Transforms/Scalar.h" 20 #include "llvm/Constants.h" 21 #include "llvm/Function.h" 22 #include "llvm/GlobalVariable.h" 23 #include "llvm/Instructions.h" 24 #include "llvm/IntrinsicInst.h" 25 #include "llvm/Pass.h" 26 #include "llvm/Analysis/AliasAnalysis.h" 27 #include "llvm/Analysis/CaptureTracking.h" 28 #include "llvm/Analysis/Dominators.h" 29 #include "llvm/Analysis/MemoryBuiltins.h" 30 #include "llvm/Analysis/MemoryDependenceAnalysis.h" 31 #include "llvm/Analysis/ValueTracking.h" 32 #include "llvm/Target/TargetData.h" 33 #include "llvm/Transforms/Utils/Local.h" 34 #include "llvm/Support/Debug.h" 35 #include "llvm/ADT/SmallPtrSet.h" 36 #include "llvm/ADT/Statistic.h" 37 #include "llvm/ADT/STLExtras.h" 38 using namespace llvm; 39 40 STATISTIC(NumFastStores, "Number of stores deleted"); 41 STATISTIC(NumFastOther , "Number of other instrs removed"); 42 43 namespace { 44 struct DSE : public FunctionPass { 45 AliasAnalysis *AA; 46 MemoryDependenceAnalysis *MD; 47 DominatorTree *DT; 48 49 static char ID; // Pass identification, replacement for typeid 50 DSE() : FunctionPass(ID), AA(0), MD(0), DT(0) { 51 initializeDSEPass(*PassRegistry::getPassRegistry()); 52 } 53 54 virtual bool runOnFunction(Function &F) { 55 AA = &getAnalysis<AliasAnalysis>(); 56 MD = &getAnalysis<MemoryDependenceAnalysis>(); 57 DT = &getAnalysis<DominatorTree>(); 58 59 bool Changed = false; 60 for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I) 61 // Only check non-dead blocks. Dead blocks may have strange pointer 62 // cycles that will confuse alias analysis. 63 if (DT->isReachableFromEntry(I)) 64 Changed |= runOnBasicBlock(*I); 65 66 AA = 0; MD = 0; DT = 0; 67 return Changed; 68 } 69 70 bool runOnBasicBlock(BasicBlock &BB); 71 bool HandleFree(CallInst *F); 72 bool handleEndBlock(BasicBlock &BB); 73 void RemoveAccessedObjects(const AliasAnalysis::Location &LoadedLoc, 74 SmallPtrSet<Value*, 16> &DeadStackObjects); 75 76 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 77 AU.setPreservesCFG(); 78 AU.addRequired<DominatorTree>(); 79 AU.addRequired<AliasAnalysis>(); 80 AU.addRequired<MemoryDependenceAnalysis>(); 81 AU.addPreserved<AliasAnalysis>(); 82 AU.addPreserved<DominatorTree>(); 83 AU.addPreserved<MemoryDependenceAnalysis>(); 84 } 85 }; 86 } 87 88 char DSE::ID = 0; 89 INITIALIZE_PASS_BEGIN(DSE, "dse", "Dead Store Elimination", false, false) 90 INITIALIZE_PASS_DEPENDENCY(DominatorTree) 91 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis) 92 INITIALIZE_AG_DEPENDENCY(AliasAnalysis) 93 INITIALIZE_PASS_END(DSE, "dse", "Dead Store Elimination", false, false) 94 95 FunctionPass *llvm::createDeadStoreEliminationPass() { return new DSE(); } 96 97 //===----------------------------------------------------------------------===// 98 // Helper functions 99 //===----------------------------------------------------------------------===// 100 101 /// DeleteDeadInstruction - Delete this instruction. Before we do, go through 102 /// and zero out all the operands of this instruction. If any of them become 103 /// dead, delete them and the computation tree that feeds them. 104 /// 105 /// If ValueSet is non-null, remove any deleted instructions from it as well. 106 /// 107 static void DeleteDeadInstruction(Instruction *I, 108 MemoryDependenceAnalysis &MD, 109 SmallPtrSet<Value*, 16> *ValueSet = 0) { 110 SmallVector<Instruction*, 32> NowDeadInsts; 111 112 NowDeadInsts.push_back(I); 113 --NumFastOther; 114 115 // Before we touch this instruction, remove it from memdep! 116 do { 117 Instruction *DeadInst = NowDeadInsts.pop_back_val(); 118 ++NumFastOther; 119 120 // This instruction is dead, zap it, in stages. Start by removing it from 121 // MemDep, which needs to know the operands and needs it to be in the 122 // function. 123 MD.removeInstruction(DeadInst); 124 125 for (unsigned op = 0, e = DeadInst->getNumOperands(); op != e; ++op) { 126 Value *Op = DeadInst->getOperand(op); 127 DeadInst->setOperand(op, 0); 128 129 // If this operand just became dead, add it to the NowDeadInsts list. 130 if (!Op->use_empty()) continue; 131 132 if (Instruction *OpI = dyn_cast<Instruction>(Op)) 133 if (isInstructionTriviallyDead(OpI)) 134 NowDeadInsts.push_back(OpI); 135 } 136 137 DeadInst->eraseFromParent(); 138 139 if (ValueSet) ValueSet->erase(DeadInst); 140 } while (!NowDeadInsts.empty()); 141 } 142 143 144 /// hasMemoryWrite - Does this instruction write some memory? This only returns 145 /// true for things that we can analyze with other helpers below. 146 static bool hasMemoryWrite(Instruction *I) { 147 if (isa<StoreInst>(I)) 148 return true; 149 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 150 switch (II->getIntrinsicID()) { 151 default: 152 return false; 153 case Intrinsic::memset: 154 case Intrinsic::memmove: 155 case Intrinsic::memcpy: 156 case Intrinsic::init_trampoline: 157 case Intrinsic::lifetime_end: 158 return true; 159 } 160 } 161 return false; 162 } 163 164 /// getLocForWrite - Return a Location stored to by the specified instruction. 165 /// If isRemovable returns true, this function and getLocForRead completely 166 /// describe the memory operations for this instruction. 167 static AliasAnalysis::Location 168 getLocForWrite(Instruction *Inst, AliasAnalysis &AA) { 169 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) 170 return AA.getLocation(SI); 171 172 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(Inst)) { 173 // memcpy/memmove/memset. 174 AliasAnalysis::Location Loc = AA.getLocationForDest(MI); 175 // If we don't have target data around, an unknown size in Location means 176 // that we should use the size of the pointee type. This isn't valid for 177 // memset/memcpy, which writes more than an i8. 178 if (Loc.Size == AliasAnalysis::UnknownSize && AA.getTargetData() == 0) 179 return AliasAnalysis::Location(); 180 return Loc; 181 } 182 183 IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst); 184 if (II == 0) return AliasAnalysis::Location(); 185 186 switch (II->getIntrinsicID()) { 187 default: return AliasAnalysis::Location(); // Unhandled intrinsic. 188 case Intrinsic::init_trampoline: 189 // If we don't have target data around, an unknown size in Location means 190 // that we should use the size of the pointee type. This isn't valid for 191 // init.trampoline, which writes more than an i8. 192 if (AA.getTargetData() == 0) return AliasAnalysis::Location(); 193 194 // FIXME: We don't know the size of the trampoline, so we can't really 195 // handle it here. 196 return AliasAnalysis::Location(II->getArgOperand(0)); 197 case Intrinsic::lifetime_end: { 198 uint64_t Len = cast<ConstantInt>(II->getArgOperand(0))->getZExtValue(); 199 return AliasAnalysis::Location(II->getArgOperand(1), Len); 200 } 201 } 202 } 203 204 /// getLocForRead - Return the location read by the specified "hasMemoryWrite" 205 /// instruction if any. 206 static AliasAnalysis::Location 207 getLocForRead(Instruction *Inst, AliasAnalysis &AA) { 208 assert(hasMemoryWrite(Inst) && "Unknown instruction case"); 209 210 // The only instructions that both read and write are the mem transfer 211 // instructions (memcpy/memmove). 212 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(Inst)) 213 return AA.getLocationForSource(MTI); 214 return AliasAnalysis::Location(); 215 } 216 217 218 /// isRemovable - If the value of this instruction and the memory it writes to 219 /// is unused, may we delete this instruction? 220 static bool isRemovable(Instruction *I) { 221 // Don't remove volatile/atomic stores. 222 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 223 return SI->isUnordered(); 224 225 IntrinsicInst *II = cast<IntrinsicInst>(I); 226 switch (II->getIntrinsicID()) { 227 default: llvm_unreachable("doesn't pass 'hasMemoryWrite' predicate"); 228 case Intrinsic::lifetime_end: 229 // Never remove dead lifetime_end's, e.g. because it is followed by a 230 // free. 231 return false; 232 case Intrinsic::init_trampoline: 233 // Always safe to remove init_trampoline. 234 return true; 235 236 case Intrinsic::memset: 237 case Intrinsic::memmove: 238 case Intrinsic::memcpy: 239 // Don't remove volatile memory intrinsics. 240 return !cast<MemIntrinsic>(II)->isVolatile(); 241 } 242 } 243 244 245 /// isShortenable - Returns true if this instruction can be safely shortened in 246 /// length. 247 static bool isShortenable(Instruction *I) { 248 // Don't shorten stores for now 249 if (isa<StoreInst>(I)) 250 return false; 251 252 IntrinsicInst *II = cast<IntrinsicInst>(I); 253 switch (II->getIntrinsicID()) { 254 default: return false; 255 case Intrinsic::memset: 256 case Intrinsic::memcpy: 257 // Do shorten memory intrinsics. 258 return true; 259 } 260 } 261 262 /// getStoredPointerOperand - Return the pointer that is being written to. 263 static Value *getStoredPointerOperand(Instruction *I) { 264 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 265 return SI->getPointerOperand(); 266 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) 267 return MI->getDest(); 268 269 IntrinsicInst *II = cast<IntrinsicInst>(I); 270 switch (II->getIntrinsicID()) { 271 default: llvm_unreachable("Unexpected intrinsic!"); 272 case Intrinsic::init_trampoline: 273 return II->getArgOperand(0); 274 } 275 } 276 277 static uint64_t getPointerSize(const Value *V, AliasAnalysis &AA) { 278 const TargetData *TD = AA.getTargetData(); 279 280 if (const CallInst *CI = extractMallocCall(V)) { 281 if (const ConstantInt *C = dyn_cast<ConstantInt>(CI->getArgOperand(0))) 282 return C->getZExtValue(); 283 } 284 285 if (TD == 0) 286 return AliasAnalysis::UnknownSize; 287 288 if (const AllocaInst *A = dyn_cast<AllocaInst>(V)) { 289 // Get size information for the alloca 290 if (const ConstantInt *C = dyn_cast<ConstantInt>(A->getArraySize())) 291 return C->getZExtValue() * TD->getTypeAllocSize(A->getAllocatedType()); 292 } 293 294 if (const Argument *A = dyn_cast<Argument>(V)) { 295 if (A->hasByValAttr()) 296 if (PointerType *PT = dyn_cast<PointerType>(A->getType())) 297 return TD->getTypeAllocSize(PT->getElementType()); 298 } 299 300 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) { 301 if (!GV->mayBeOverridden()) 302 return TD->getTypeAllocSize(GV->getType()->getElementType()); 303 } 304 305 return AliasAnalysis::UnknownSize; 306 } 307 308 namespace { 309 enum OverwriteResult 310 { 311 OverwriteComplete, 312 OverwriteEnd, 313 OverwriteUnknown 314 }; 315 } 316 317 /// isOverwrite - Return 'OverwriteComplete' if a store to the 'Later' location 318 /// completely overwrites a store to the 'Earlier' location. 319 /// 'OverwriteEnd' if the end of the 'Earlier' location is completely 320 /// overwritten by 'Later', or 'OverwriteUnknown' if nothing can be determined 321 static OverwriteResult isOverwrite(const AliasAnalysis::Location &Later, 322 const AliasAnalysis::Location &Earlier, 323 AliasAnalysis &AA, 324 int64_t &EarlierOff, 325 int64_t &LaterOff) { 326 const Value *P1 = Earlier.Ptr->stripPointerCasts(); 327 const Value *P2 = Later.Ptr->stripPointerCasts(); 328 329 // If the start pointers are the same, we just have to compare sizes to see if 330 // the later store was larger than the earlier store. 331 if (P1 == P2) { 332 // If we don't know the sizes of either access, then we can't do a 333 // comparison. 334 if (Later.Size == AliasAnalysis::UnknownSize || 335 Earlier.Size == AliasAnalysis::UnknownSize) { 336 // If we have no TargetData information around, then the size of the store 337 // is inferrable from the pointee type. If they are the same type, then 338 // we know that the store is safe. 339 if (AA.getTargetData() == 0 && 340 Later.Ptr->getType() == Earlier.Ptr->getType()) 341 return OverwriteComplete; 342 343 return OverwriteUnknown; 344 } 345 346 // Make sure that the Later size is >= the Earlier size. 347 if (Later.Size >= Earlier.Size) 348 return OverwriteComplete; 349 } 350 351 // Otherwise, we have to have size information, and the later store has to be 352 // larger than the earlier one. 353 if (Later.Size == AliasAnalysis::UnknownSize || 354 Earlier.Size == AliasAnalysis::UnknownSize || 355 AA.getTargetData() == 0) 356 return OverwriteUnknown; 357 358 // Check to see if the later store is to the entire object (either a global, 359 // an alloca, or a byval argument). If so, then it clearly overwrites any 360 // other store to the same object. 361 const TargetData &TD = *AA.getTargetData(); 362 363 const Value *UO1 = GetUnderlyingObject(P1, &TD), 364 *UO2 = GetUnderlyingObject(P2, &TD); 365 366 // If we can't resolve the same pointers to the same object, then we can't 367 // analyze them at all. 368 if (UO1 != UO2) 369 return OverwriteUnknown; 370 371 // If the "Later" store is to a recognizable object, get its size. 372 uint64_t ObjectSize = getPointerSize(UO2, AA); 373 if (ObjectSize != AliasAnalysis::UnknownSize) 374 if (ObjectSize == Later.Size && ObjectSize >= Earlier.Size) 375 return OverwriteComplete; 376 377 // Okay, we have stores to two completely different pointers. Try to 378 // decompose the pointer into a "base + constant_offset" form. If the base 379 // pointers are equal, then we can reason about the two stores. 380 EarlierOff = 0; 381 LaterOff = 0; 382 const Value *BP1 = GetPointerBaseWithConstantOffset(P1, EarlierOff, TD); 383 const Value *BP2 = GetPointerBaseWithConstantOffset(P2, LaterOff, TD); 384 385 // If the base pointers still differ, we have two completely different stores. 386 if (BP1 != BP2) 387 return OverwriteUnknown; 388 389 // The later store completely overlaps the earlier store if: 390 // 391 // 1. Both start at the same offset and the later one's size is greater than 392 // or equal to the earlier one's, or 393 // 394 // |--earlier--| 395 // |-- later --| 396 // 397 // 2. The earlier store has an offset greater than the later offset, but which 398 // still lies completely within the later store. 399 // 400 // |--earlier--| 401 // |----- later ------| 402 // 403 // We have to be careful here as *Off is signed while *.Size is unsigned. 404 if (EarlierOff >= LaterOff && 405 Later.Size > Earlier.Size && 406 uint64_t(EarlierOff - LaterOff) + Earlier.Size <= Later.Size) 407 return OverwriteComplete; 408 409 // The other interesting case is if the later store overwrites the end of 410 // the earlier store 411 // 412 // |--earlier--| 413 // |-- later --| 414 // 415 // In this case we may want to trim the size of earlier to avoid generating 416 // writes to addresses which will definitely be overwritten later 417 if (LaterOff > EarlierOff && 418 LaterOff < int64_t(EarlierOff + Earlier.Size) && 419 int64_t(LaterOff + Later.Size) >= int64_t(EarlierOff + Earlier.Size)) 420 return OverwriteEnd; 421 422 // Otherwise, they don't completely overlap. 423 return OverwriteUnknown; 424 } 425 426 /// isPossibleSelfRead - If 'Inst' might be a self read (i.e. a noop copy of a 427 /// memory region into an identical pointer) then it doesn't actually make its 428 /// input dead in the traditional sense. Consider this case: 429 /// 430 /// memcpy(A <- B) 431 /// memcpy(A <- A) 432 /// 433 /// In this case, the second store to A does not make the first store to A dead. 434 /// The usual situation isn't an explicit A<-A store like this (which can be 435 /// trivially removed) but a case where two pointers may alias. 436 /// 437 /// This function detects when it is unsafe to remove a dependent instruction 438 /// because the DSE inducing instruction may be a self-read. 439 static bool isPossibleSelfRead(Instruction *Inst, 440 const AliasAnalysis::Location &InstStoreLoc, 441 Instruction *DepWrite, AliasAnalysis &AA) { 442 // Self reads can only happen for instructions that read memory. Get the 443 // location read. 444 AliasAnalysis::Location InstReadLoc = getLocForRead(Inst, AA); 445 if (InstReadLoc.Ptr == 0) return false; // Not a reading instruction. 446 447 // If the read and written loc obviously don't alias, it isn't a read. 448 if (AA.isNoAlias(InstReadLoc, InstStoreLoc)) return false; 449 450 // Okay, 'Inst' may copy over itself. However, we can still remove a the 451 // DepWrite instruction if we can prove that it reads from the same location 452 // as Inst. This handles useful cases like: 453 // memcpy(A <- B) 454 // memcpy(A <- B) 455 // Here we don't know if A/B may alias, but we do know that B/B are must 456 // aliases, so removing the first memcpy is safe (assuming it writes <= # 457 // bytes as the second one. 458 AliasAnalysis::Location DepReadLoc = getLocForRead(DepWrite, AA); 459 460 if (DepReadLoc.Ptr && AA.isMustAlias(InstReadLoc.Ptr, DepReadLoc.Ptr)) 461 return false; 462 463 // If DepWrite doesn't read memory or if we can't prove it is a must alias, 464 // then it can't be considered dead. 465 return true; 466 } 467 468 469 //===----------------------------------------------------------------------===// 470 // DSE Pass 471 //===----------------------------------------------------------------------===// 472 473 bool DSE::runOnBasicBlock(BasicBlock &BB) { 474 bool MadeChange = false; 475 476 // Do a top-down walk on the BB. 477 for (BasicBlock::iterator BBI = BB.begin(), BBE = BB.end(); BBI != BBE; ) { 478 Instruction *Inst = BBI++; 479 480 // Handle 'free' calls specially. 481 if (CallInst *F = isFreeCall(Inst)) { 482 MadeChange |= HandleFree(F); 483 continue; 484 } 485 486 // If we find something that writes memory, get its memory dependence. 487 if (!hasMemoryWrite(Inst)) 488 continue; 489 490 MemDepResult InstDep = MD->getDependency(Inst); 491 492 // Ignore any store where we can't find a local dependence. 493 // FIXME: cross-block DSE would be fun. :) 494 if (!InstDep.isDef() && !InstDep.isClobber()) 495 continue; 496 497 // If we're storing the same value back to a pointer that we just 498 // loaded from, then the store can be removed. 499 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 500 if (LoadInst *DepLoad = dyn_cast<LoadInst>(InstDep.getInst())) { 501 if (SI->getPointerOperand() == DepLoad->getPointerOperand() && 502 SI->getOperand(0) == DepLoad && isRemovable(SI)) { 503 DEBUG(dbgs() << "DSE: Remove Store Of Load from same pointer:\n " 504 << "LOAD: " << *DepLoad << "\n STORE: " << *SI << '\n'); 505 506 // DeleteDeadInstruction can delete the current instruction. Save BBI 507 // in case we need it. 508 WeakVH NextInst(BBI); 509 510 DeleteDeadInstruction(SI, *MD); 511 512 if (NextInst == 0) // Next instruction deleted. 513 BBI = BB.begin(); 514 else if (BBI != BB.begin()) // Revisit this instruction if possible. 515 --BBI; 516 ++NumFastStores; 517 MadeChange = true; 518 continue; 519 } 520 } 521 } 522 523 // Figure out what location is being stored to. 524 AliasAnalysis::Location Loc = getLocForWrite(Inst, *AA); 525 526 // If we didn't get a useful location, fail. 527 if (Loc.Ptr == 0) 528 continue; 529 530 while (InstDep.isDef() || InstDep.isClobber()) { 531 // Get the memory clobbered by the instruction we depend on. MemDep will 532 // skip any instructions that 'Loc' clearly doesn't interact with. If we 533 // end up depending on a may- or must-aliased load, then we can't optimize 534 // away the store and we bail out. However, if we depend on on something 535 // that overwrites the memory location we *can* potentially optimize it. 536 // 537 // Find out what memory location the dependent instruction stores. 538 Instruction *DepWrite = InstDep.getInst(); 539 AliasAnalysis::Location DepLoc = getLocForWrite(DepWrite, *AA); 540 // If we didn't get a useful location, or if it isn't a size, bail out. 541 if (DepLoc.Ptr == 0) 542 break; 543 544 // If we find a write that is a) removable (i.e., non-volatile), b) is 545 // completely obliterated by the store to 'Loc', and c) which we know that 546 // 'Inst' doesn't load from, then we can remove it. 547 if (isRemovable(DepWrite) && 548 !isPossibleSelfRead(Inst, Loc, DepWrite, *AA)) { 549 int64_t InstWriteOffset, DepWriteOffset; 550 OverwriteResult OR = isOverwrite(Loc, DepLoc, *AA, 551 DepWriteOffset, InstWriteOffset); 552 if (OR == OverwriteComplete) { 553 DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: " 554 << *DepWrite << "\n KILLER: " << *Inst << '\n'); 555 556 // Delete the store and now-dead instructions that feed it. 557 DeleteDeadInstruction(DepWrite, *MD); 558 ++NumFastStores; 559 MadeChange = true; 560 561 // DeleteDeadInstruction can delete the current instruction in loop 562 // cases, reset BBI. 563 BBI = Inst; 564 if (BBI != BB.begin()) 565 --BBI; 566 break; 567 } else if (OR == OverwriteEnd && isShortenable(DepWrite)) { 568 // TODO: base this on the target vector size so that if the earlier 569 // store was too small to get vector writes anyway then its likely 570 // a good idea to shorten it 571 // Power of 2 vector writes are probably always a bad idea to optimize 572 // as any store/memset/memcpy is likely using vector instructions so 573 // shortening it to not vector size is likely to be slower 574 MemIntrinsic* DepIntrinsic = cast<MemIntrinsic>(DepWrite); 575 unsigned DepWriteAlign = DepIntrinsic->getAlignment(); 576 if (llvm::isPowerOf2_64(InstWriteOffset) || 577 ((DepWriteAlign != 0) && InstWriteOffset % DepWriteAlign == 0)) { 578 579 DEBUG(dbgs() << "DSE: Remove Dead Store:\n OW END: " 580 << *DepWrite << "\n KILLER (offset " 581 << InstWriteOffset << ", " 582 << DepLoc.Size << ")" 583 << *Inst << '\n'); 584 585 Value* DepWriteLength = DepIntrinsic->getLength(); 586 Value* TrimmedLength = ConstantInt::get(DepWriteLength->getType(), 587 InstWriteOffset - 588 DepWriteOffset); 589 DepIntrinsic->setLength(TrimmedLength); 590 MadeChange = true; 591 } 592 } 593 } 594 595 // If this is a may-aliased store that is clobbering the store value, we 596 // can keep searching past it for another must-aliased pointer that stores 597 // to the same location. For example, in: 598 // store -> P 599 // store -> Q 600 // store -> P 601 // we can remove the first store to P even though we don't know if P and Q 602 // alias. 603 if (DepWrite == &BB.front()) break; 604 605 // Can't look past this instruction if it might read 'Loc'. 606 if (AA->getModRefInfo(DepWrite, Loc) & AliasAnalysis::Ref) 607 break; 608 609 InstDep = MD->getPointerDependencyFrom(Loc, false, DepWrite, &BB); 610 } 611 } 612 613 // If this block ends in a return, unwind, or unreachable, all allocas are 614 // dead at its end, which means stores to them are also dead. 615 if (BB.getTerminator()->getNumSuccessors() == 0) 616 MadeChange |= handleEndBlock(BB); 617 618 return MadeChange; 619 } 620 621 /// Find all blocks that will unconditionally lead to the block BB and append 622 /// them to F. 623 static void FindUnconditionalPreds(SmallVectorImpl<BasicBlock *> &Blocks, 624 BasicBlock *BB, DominatorTree *DT) { 625 for (pred_iterator I = pred_begin(BB), E = pred_end(BB); I != E; ++I) { 626 BasicBlock *Pred = *I; 627 if (Pred == BB) continue; 628 TerminatorInst *PredTI = Pred->getTerminator(); 629 if (PredTI->getNumSuccessors() != 1) 630 continue; 631 632 if (DT->isReachableFromEntry(Pred)) 633 Blocks.push_back(Pred); 634 } 635 } 636 637 /// HandleFree - Handle frees of entire structures whose dependency is a store 638 /// to a field of that structure. 639 bool DSE::HandleFree(CallInst *F) { 640 bool MadeChange = false; 641 642 AliasAnalysis::Location Loc = AliasAnalysis::Location(F->getOperand(0)); 643 SmallVector<BasicBlock *, 16> Blocks; 644 Blocks.push_back(F->getParent()); 645 646 while (!Blocks.empty()) { 647 BasicBlock *BB = Blocks.pop_back_val(); 648 Instruction *InstPt = BB->getTerminator(); 649 if (BB == F->getParent()) InstPt = F; 650 651 MemDepResult Dep = MD->getPointerDependencyFrom(Loc, false, InstPt, BB); 652 while (Dep.isDef() || Dep.isClobber()) { 653 Instruction *Dependency = Dep.getInst(); 654 if (!hasMemoryWrite(Dependency) || !isRemovable(Dependency)) 655 break; 656 657 Value *DepPointer = 658 GetUnderlyingObject(getStoredPointerOperand(Dependency)); 659 660 // Check for aliasing. 661 if (!AA->isMustAlias(F->getArgOperand(0), DepPointer)) 662 break; 663 664 Instruction *Next = llvm::next(BasicBlock::iterator(Dependency)); 665 666 // DCE instructions only used to calculate that store 667 DeleteDeadInstruction(Dependency, *MD); 668 ++NumFastStores; 669 MadeChange = true; 670 671 // Inst's old Dependency is now deleted. Compute the next dependency, 672 // which may also be dead, as in 673 // s[0] = 0; 674 // s[1] = 0; // This has just been deleted. 675 // free(s); 676 Dep = MD->getPointerDependencyFrom(Loc, false, Next, BB); 677 } 678 679 if (Dep.isNonLocal()) 680 FindUnconditionalPreds(Blocks, BB, DT); 681 } 682 683 return MadeChange; 684 } 685 686 /// handleEndBlock - Remove dead stores to stack-allocated locations in the 687 /// function end block. Ex: 688 /// %A = alloca i32 689 /// ... 690 /// store i32 1, i32* %A 691 /// ret void 692 bool DSE::handleEndBlock(BasicBlock &BB) { 693 bool MadeChange = false; 694 695 // Keep track of all of the stack objects that are dead at the end of the 696 // function. 697 SmallPtrSet<Value*, 16> DeadStackObjects; 698 699 // Find all of the alloca'd pointers in the entry block. 700 BasicBlock *Entry = BB.getParent()->begin(); 701 for (BasicBlock::iterator I = Entry->begin(), E = Entry->end(); I != E; ++I) { 702 if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) 703 DeadStackObjects.insert(AI); 704 705 // Okay, so these are dead heap objects, but if the pointer never escapes 706 // then it's leaked by this function anyways. 707 if (CallInst *CI = extractMallocCall(I)) 708 if (!PointerMayBeCaptured(CI, true, true)) 709 DeadStackObjects.insert(CI); 710 } 711 712 // Treat byval arguments the same, stores to them are dead at the end of the 713 // function. 714 for (Function::arg_iterator AI = BB.getParent()->arg_begin(), 715 AE = BB.getParent()->arg_end(); AI != AE; ++AI) 716 if (AI->hasByValAttr()) 717 DeadStackObjects.insert(AI); 718 719 // Scan the basic block backwards 720 for (BasicBlock::iterator BBI = BB.end(); BBI != BB.begin(); ){ 721 --BBI; 722 723 // If we find a store, check to see if it points into a dead stack value. 724 if (hasMemoryWrite(BBI) && isRemovable(BBI)) { 725 // See through pointer-to-pointer bitcasts 726 Value *Pointer = GetUnderlyingObject(getStoredPointerOperand(BBI)); 727 728 // Stores to stack values are valid candidates for removal. 729 if (DeadStackObjects.count(Pointer)) { 730 Instruction *Dead = BBI++; 731 732 DEBUG(dbgs() << "DSE: Dead Store at End of Block:\n DEAD: " 733 << *Dead << "\n Object: " << *Pointer << '\n'); 734 735 // DCE instructions only used to calculate that store. 736 DeleteDeadInstruction(Dead, *MD, &DeadStackObjects); 737 ++NumFastStores; 738 MadeChange = true; 739 continue; 740 } 741 } 742 743 // Remove any dead non-memory-mutating instructions. 744 if (isInstructionTriviallyDead(BBI)) { 745 Instruction *Inst = BBI++; 746 DeleteDeadInstruction(Inst, *MD, &DeadStackObjects); 747 ++NumFastOther; 748 MadeChange = true; 749 continue; 750 } 751 752 if (AllocaInst *A = dyn_cast<AllocaInst>(BBI)) { 753 DeadStackObjects.erase(A); 754 continue; 755 } 756 757 if (CallInst *CI = extractMallocCall(BBI)) { 758 DeadStackObjects.erase(CI); 759 continue; 760 } 761 762 if (CallSite CS = cast<Value>(BBI)) { 763 // If this call does not access memory, it can't be loading any of our 764 // pointers. 765 if (AA->doesNotAccessMemory(CS)) 766 continue; 767 768 // If the call might load from any of our allocas, then any store above 769 // the call is live. 770 SmallVector<Value*, 8> LiveAllocas; 771 for (SmallPtrSet<Value*, 16>::iterator I = DeadStackObjects.begin(), 772 E = DeadStackObjects.end(); I != E; ++I) { 773 // See if the call site touches it. 774 AliasAnalysis::ModRefResult A = 775 AA->getModRefInfo(CS, *I, getPointerSize(*I, *AA)); 776 777 if (A == AliasAnalysis::ModRef || A == AliasAnalysis::Ref) 778 LiveAllocas.push_back(*I); 779 } 780 781 for (SmallVector<Value*, 8>::iterator I = LiveAllocas.begin(), 782 E = LiveAllocas.end(); I != E; ++I) 783 DeadStackObjects.erase(*I); 784 785 // If all of the allocas were clobbered by the call then we're not going 786 // to find anything else to process. 787 if (DeadStackObjects.empty()) 788 return MadeChange; 789 790 continue; 791 } 792 793 AliasAnalysis::Location LoadedLoc; 794 795 // If we encounter a use of the pointer, it is no longer considered dead 796 if (LoadInst *L = dyn_cast<LoadInst>(BBI)) { 797 if (!L->isUnordered()) // Be conservative with atomic/volatile load 798 break; 799 LoadedLoc = AA->getLocation(L); 800 } else if (VAArgInst *V = dyn_cast<VAArgInst>(BBI)) { 801 LoadedLoc = AA->getLocation(V); 802 } else if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(BBI)) { 803 LoadedLoc = AA->getLocationForSource(MTI); 804 } else if (!BBI->mayReadFromMemory()) { 805 // Instruction doesn't read memory. Note that stores that weren't removed 806 // above will hit this case. 807 continue; 808 } else { 809 // Unknown inst; assume it clobbers everything. 810 break; 811 } 812 813 // Remove any allocas from the DeadPointer set that are loaded, as this 814 // makes any stores above the access live. 815 RemoveAccessedObjects(LoadedLoc, DeadStackObjects); 816 817 // If all of the allocas were clobbered by the access then we're not going 818 // to find anything else to process. 819 if (DeadStackObjects.empty()) 820 break; 821 } 822 823 return MadeChange; 824 } 825 826 /// RemoveAccessedObjects - Check to see if the specified location may alias any 827 /// of the stack objects in the DeadStackObjects set. If so, they become live 828 /// because the location is being loaded. 829 void DSE::RemoveAccessedObjects(const AliasAnalysis::Location &LoadedLoc, 830 SmallPtrSet<Value*, 16> &DeadStackObjects) { 831 const Value *UnderlyingPointer = GetUnderlyingObject(LoadedLoc.Ptr); 832 833 // A constant can't be in the dead pointer set. 834 if (isa<Constant>(UnderlyingPointer)) 835 return; 836 837 // If the kill pointer can be easily reduced to an alloca, don't bother doing 838 // extraneous AA queries. 839 if (isa<AllocaInst>(UnderlyingPointer) || isa<Argument>(UnderlyingPointer)) { 840 DeadStackObjects.erase(const_cast<Value*>(UnderlyingPointer)); 841 return; 842 } 843 844 SmallVector<Value*, 16> NowLive; 845 for (SmallPtrSet<Value*, 16>::iterator I = DeadStackObjects.begin(), 846 E = DeadStackObjects.end(); I != E; ++I) { 847 // See if the loaded location could alias the stack location. 848 AliasAnalysis::Location StackLoc(*I, getPointerSize(*I, *AA)); 849 if (!AA->isNoAlias(StackLoc, LoadedLoc)) 850 NowLive.push_back(*I); 851 } 852 853 for (SmallVector<Value*, 16>::iterator I = NowLive.begin(), E = NowLive.end(); 854 I != E; ++I) 855 DeadStackObjects.erase(*I); 856 } 857