Home | History | Annotate | Download | only in Scalar
      1 //===- DeadStoreElimination.cpp - Fast Dead Store Elimination -------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file implements a trivial dead store elimination that only considers
     11 // basic-block local redundant stores.
     12 //
     13 // FIXME: This should eventually be extended to be a post-dominator tree
     14 // traversal.  Doing so would be pretty trivial.
     15 //
     16 //===----------------------------------------------------------------------===//
     17 
     18 #define DEBUG_TYPE "dse"
     19 #include "llvm/Transforms/Scalar.h"
     20 #include "llvm/ADT/STLExtras.h"
     21 #include "llvm/ADT/SetVector.h"
     22 #include "llvm/ADT/Statistic.h"
     23 #include "llvm/Analysis/AliasAnalysis.h"
     24 #include "llvm/Analysis/CaptureTracking.h"
     25 #include "llvm/Analysis/Dominators.h"
     26 #include "llvm/Analysis/MemoryBuiltins.h"
     27 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
     28 #include "llvm/Analysis/ValueTracking.h"
     29 #include "llvm/IR/Constants.h"
     30 #include "llvm/IR/DataLayout.h"
     31 #include "llvm/IR/Function.h"
     32 #include "llvm/IR/GlobalVariable.h"
     33 #include "llvm/IR/Instructions.h"
     34 #include "llvm/IR/IntrinsicInst.h"
     35 #include "llvm/Pass.h"
     36 #include "llvm/Support/Debug.h"
     37 #include "llvm/Target/TargetLibraryInfo.h"
     38 #include "llvm/Transforms/Utils/Local.h"
     39 using namespace llvm;
     40 
     41 STATISTIC(NumFastStores, "Number of stores deleted");
     42 STATISTIC(NumFastOther , "Number of other instrs removed");
     43 
     44 namespace {
     45   struct DSE : public FunctionPass {
     46     AliasAnalysis *AA;
     47     MemoryDependenceAnalysis *MD;
     48     DominatorTree *DT;
     49     const TargetLibraryInfo *TLI;
     50 
     51     static char ID; // Pass identification, replacement for typeid
     52     DSE() : FunctionPass(ID), AA(0), MD(0), DT(0) {
     53       initializeDSEPass(*PassRegistry::getPassRegistry());
     54     }
     55 
     56     virtual bool runOnFunction(Function &F) {
     57       AA = &getAnalysis<AliasAnalysis>();
     58       MD = &getAnalysis<MemoryDependenceAnalysis>();
     59       DT = &getAnalysis<DominatorTree>();
     60       TLI = AA->getTargetLibraryInfo();
     61 
     62       bool Changed = false;
     63       for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I)
     64         // Only check non-dead blocks.  Dead blocks may have strange pointer
     65         // cycles that will confuse alias analysis.
     66         if (DT->isReachableFromEntry(I))
     67           Changed |= runOnBasicBlock(*I);
     68 
     69       AA = 0; MD = 0; DT = 0;
     70       return Changed;
     71     }
     72 
     73     bool runOnBasicBlock(BasicBlock &BB);
     74     bool HandleFree(CallInst *F);
     75     bool handleEndBlock(BasicBlock &BB);
     76     void RemoveAccessedObjects(const AliasAnalysis::Location &LoadedLoc,
     77                                SmallSetVector<Value*, 16> &DeadStackObjects);
     78 
     79     virtual void getAnalysisUsage(AnalysisUsage &AU) const {
     80       AU.setPreservesCFG();
     81       AU.addRequired<DominatorTree>();
     82       AU.addRequired<AliasAnalysis>();
     83       AU.addRequired<MemoryDependenceAnalysis>();
     84       AU.addPreserved<AliasAnalysis>();
     85       AU.addPreserved<DominatorTree>();
     86       AU.addPreserved<MemoryDependenceAnalysis>();
     87     }
     88   };
     89 }
     90 
     91 char DSE::ID = 0;
     92 INITIALIZE_PASS_BEGIN(DSE, "dse", "Dead Store Elimination", false, false)
     93 INITIALIZE_PASS_DEPENDENCY(DominatorTree)
     94 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis)
     95 INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
     96 INITIALIZE_PASS_END(DSE, "dse", "Dead Store Elimination", false, false)
     97 
     98 FunctionPass *llvm::createDeadStoreEliminationPass() { return new DSE(); }
     99 
    100 //===----------------------------------------------------------------------===//
    101 // Helper functions
    102 //===----------------------------------------------------------------------===//
    103 
    104 /// DeleteDeadInstruction - Delete this instruction.  Before we do, go through
    105 /// and zero out all the operands of this instruction.  If any of them become
    106 /// dead, delete them and the computation tree that feeds them.
    107 ///
    108 /// If ValueSet is non-null, remove any deleted instructions from it as well.
    109 ///
    110 static void DeleteDeadInstruction(Instruction *I,
    111                                   MemoryDependenceAnalysis &MD,
    112                                   const TargetLibraryInfo *TLI,
    113                                   SmallSetVector<Value*, 16> *ValueSet = 0) {
    114   SmallVector<Instruction*, 32> NowDeadInsts;
    115 
    116   NowDeadInsts.push_back(I);
    117   --NumFastOther;
    118 
    119   // Before we touch this instruction, remove it from memdep!
    120   do {
    121     Instruction *DeadInst = NowDeadInsts.pop_back_val();
    122     ++NumFastOther;
    123 
    124     // This instruction is dead, zap it, in stages.  Start by removing it from
    125     // MemDep, which needs to know the operands and needs it to be in the
    126     // function.
    127     MD.removeInstruction(DeadInst);
    128 
    129     for (unsigned op = 0, e = DeadInst->getNumOperands(); op != e; ++op) {
    130       Value *Op = DeadInst->getOperand(op);
    131       DeadInst->setOperand(op, 0);
    132 
    133       // If this operand just became dead, add it to the NowDeadInsts list.
    134       if (!Op->use_empty()) continue;
    135 
    136       if (Instruction *OpI = dyn_cast<Instruction>(Op))
    137         if (isInstructionTriviallyDead(OpI, TLI))
    138           NowDeadInsts.push_back(OpI);
    139     }
    140 
    141     DeadInst->eraseFromParent();
    142 
    143     if (ValueSet) ValueSet->remove(DeadInst);
    144   } while (!NowDeadInsts.empty());
    145 }
    146 
    147 
    148 /// hasMemoryWrite - Does this instruction write some memory?  This only returns
    149 /// true for things that we can analyze with other helpers below.
    150 static bool hasMemoryWrite(Instruction *I, const TargetLibraryInfo *TLI) {
    151   if (isa<StoreInst>(I))
    152     return true;
    153   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
    154     switch (II->getIntrinsicID()) {
    155     default:
    156       return false;
    157     case Intrinsic::memset:
    158     case Intrinsic::memmove:
    159     case Intrinsic::memcpy:
    160     case Intrinsic::init_trampoline:
    161     case Intrinsic::lifetime_end:
    162       return true;
    163     }
    164   }
    165   if (CallSite CS = I) {
    166     if (Function *F = CS.getCalledFunction()) {
    167       if (TLI && TLI->has(LibFunc::strcpy) &&
    168           F->getName() == TLI->getName(LibFunc::strcpy)) {
    169         return true;
    170       }
    171       if (TLI && TLI->has(LibFunc::strncpy) &&
    172           F->getName() == TLI->getName(LibFunc::strncpy)) {
    173         return true;
    174       }
    175       if (TLI && TLI->has(LibFunc::strcat) &&
    176           F->getName() == TLI->getName(LibFunc::strcat)) {
    177         return true;
    178       }
    179       if (TLI && TLI->has(LibFunc::strncat) &&
    180           F->getName() == TLI->getName(LibFunc::strncat)) {
    181         return true;
    182       }
    183     }
    184   }
    185   return false;
    186 }
    187 
    188 /// getLocForWrite - Return a Location stored to by the specified instruction.
    189 /// If isRemovable returns true, this function and getLocForRead completely
    190 /// describe the memory operations for this instruction.
    191 static AliasAnalysis::Location
    192 getLocForWrite(Instruction *Inst, AliasAnalysis &AA) {
    193   if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
    194     return AA.getLocation(SI);
    195 
    196   if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(Inst)) {
    197     // memcpy/memmove/memset.
    198     AliasAnalysis::Location Loc = AA.getLocationForDest(MI);
    199     // If we don't have target data around, an unknown size in Location means
    200     // that we should use the size of the pointee type.  This isn't valid for
    201     // memset/memcpy, which writes more than an i8.
    202     if (Loc.Size == AliasAnalysis::UnknownSize && AA.getDataLayout() == 0)
    203       return AliasAnalysis::Location();
    204     return Loc;
    205   }
    206 
    207   IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst);
    208   if (II == 0) return AliasAnalysis::Location();
    209 
    210   switch (II->getIntrinsicID()) {
    211   default: return AliasAnalysis::Location(); // Unhandled intrinsic.
    212   case Intrinsic::init_trampoline:
    213     // If we don't have target data around, an unknown size in Location means
    214     // that we should use the size of the pointee type.  This isn't valid for
    215     // init.trampoline, which writes more than an i8.
    216     if (AA.getDataLayout() == 0) return AliasAnalysis::Location();
    217 
    218     // FIXME: We don't know the size of the trampoline, so we can't really
    219     // handle it here.
    220     return AliasAnalysis::Location(II->getArgOperand(0));
    221   case Intrinsic::lifetime_end: {
    222     uint64_t Len = cast<ConstantInt>(II->getArgOperand(0))->getZExtValue();
    223     return AliasAnalysis::Location(II->getArgOperand(1), Len);
    224   }
    225   }
    226 }
    227 
    228 /// getLocForRead - Return the location read by the specified "hasMemoryWrite"
    229 /// instruction if any.
    230 static AliasAnalysis::Location
    231 getLocForRead(Instruction *Inst, AliasAnalysis &AA) {
    232   assert(hasMemoryWrite(Inst, AA.getTargetLibraryInfo()) &&
    233          "Unknown instruction case");
    234 
    235   // The only instructions that both read and write are the mem transfer
    236   // instructions (memcpy/memmove).
    237   if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(Inst))
    238     return AA.getLocationForSource(MTI);
    239   return AliasAnalysis::Location();
    240 }
    241 
    242 
    243 /// isRemovable - If the value of this instruction and the memory it writes to
    244 /// is unused, may we delete this instruction?
    245 static bool isRemovable(Instruction *I) {
    246   // Don't remove volatile/atomic stores.
    247   if (StoreInst *SI = dyn_cast<StoreInst>(I))
    248     return SI->isUnordered();
    249 
    250   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
    251     switch (II->getIntrinsicID()) {
    252     default: llvm_unreachable("doesn't pass 'hasMemoryWrite' predicate");
    253     case Intrinsic::lifetime_end:
    254       // Never remove dead lifetime_end's, e.g. because it is followed by a
    255       // free.
    256       return false;
    257     case Intrinsic::init_trampoline:
    258       // Always safe to remove init_trampoline.
    259       return true;
    260 
    261     case Intrinsic::memset:
    262     case Intrinsic::memmove:
    263     case Intrinsic::memcpy:
    264       // Don't remove volatile memory intrinsics.
    265       return !cast<MemIntrinsic>(II)->isVolatile();
    266     }
    267   }
    268 
    269   if (CallSite CS = I)
    270     return CS.getInstruction()->use_empty();
    271 
    272   return false;
    273 }
    274 
    275 
    276 /// isShortenable - Returns true if this instruction can be safely shortened in
    277 /// length.
    278 static bool isShortenable(Instruction *I) {
    279   // Don't shorten stores for now
    280   if (isa<StoreInst>(I))
    281     return false;
    282 
    283   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
    284     switch (II->getIntrinsicID()) {
    285       default: return false;
    286       case Intrinsic::memset:
    287       case Intrinsic::memcpy:
    288         // Do shorten memory intrinsics.
    289         return true;
    290     }
    291   }
    292 
    293   // Don't shorten libcalls calls for now.
    294 
    295   return false;
    296 }
    297 
    298 /// getStoredPointerOperand - Return the pointer that is being written to.
    299 static Value *getStoredPointerOperand(Instruction *I) {
    300   if (StoreInst *SI = dyn_cast<StoreInst>(I))
    301     return SI->getPointerOperand();
    302   if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I))
    303     return MI->getDest();
    304 
    305   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
    306     switch (II->getIntrinsicID()) {
    307     default: llvm_unreachable("Unexpected intrinsic!");
    308     case Intrinsic::init_trampoline:
    309       return II->getArgOperand(0);
    310     }
    311   }
    312 
    313   CallSite CS = I;
    314   // All the supported functions so far happen to have dest as their first
    315   // argument.
    316   return CS.getArgument(0);
    317 }
    318 
    319 static uint64_t getPointerSize(const Value *V, AliasAnalysis &AA) {
    320   uint64_t Size;
    321   if (getObjectSize(V, Size, AA.getDataLayout(), AA.getTargetLibraryInfo()))
    322     return Size;
    323   return AliasAnalysis::UnknownSize;
    324 }
    325 
    326 namespace {
    327   enum OverwriteResult
    328   {
    329     OverwriteComplete,
    330     OverwriteEnd,
    331     OverwriteUnknown
    332   };
    333 }
    334 
    335 /// isOverwrite - Return 'OverwriteComplete' if a store to the 'Later' location
    336 /// completely overwrites a store to the 'Earlier' location.
    337 /// 'OverwriteEnd' if the end of the 'Earlier' location is completely
    338 /// overwritten by 'Later', or 'OverwriteUnknown' if nothing can be determined
    339 static OverwriteResult isOverwrite(const AliasAnalysis::Location &Later,
    340                                    const AliasAnalysis::Location &Earlier,
    341                                    AliasAnalysis &AA,
    342                                    int64_t &EarlierOff,
    343                                    int64_t &LaterOff) {
    344   const Value *P1 = Earlier.Ptr->stripPointerCasts();
    345   const Value *P2 = Later.Ptr->stripPointerCasts();
    346 
    347   // If the start pointers are the same, we just have to compare sizes to see if
    348   // the later store was larger than the earlier store.
    349   if (P1 == P2) {
    350     // If we don't know the sizes of either access, then we can't do a
    351     // comparison.
    352     if (Later.Size == AliasAnalysis::UnknownSize ||
    353         Earlier.Size == AliasAnalysis::UnknownSize) {
    354       // If we have no DataLayout information around, then the size of the store
    355       // is inferrable from the pointee type.  If they are the same type, then
    356       // we know that the store is safe.
    357       if (AA.getDataLayout() == 0 &&
    358           Later.Ptr->getType() == Earlier.Ptr->getType())
    359         return OverwriteComplete;
    360 
    361       return OverwriteUnknown;
    362     }
    363 
    364     // Make sure that the Later size is >= the Earlier size.
    365     if (Later.Size >= Earlier.Size)
    366       return OverwriteComplete;
    367   }
    368 
    369   // Otherwise, we have to have size information, and the later store has to be
    370   // larger than the earlier one.
    371   if (Later.Size == AliasAnalysis::UnknownSize ||
    372       Earlier.Size == AliasAnalysis::UnknownSize ||
    373       AA.getDataLayout() == 0)
    374     return OverwriteUnknown;
    375 
    376   // Check to see if the later store is to the entire object (either a global,
    377   // an alloca, or a byval argument).  If so, then it clearly overwrites any
    378   // other store to the same object.
    379   const DataLayout *TD = AA.getDataLayout();
    380 
    381   const Value *UO1 = GetUnderlyingObject(P1, TD),
    382               *UO2 = GetUnderlyingObject(P2, TD);
    383 
    384   // If we can't resolve the same pointers to the same object, then we can't
    385   // analyze them at all.
    386   if (UO1 != UO2)
    387     return OverwriteUnknown;
    388 
    389   // If the "Later" store is to a recognizable object, get its size.
    390   uint64_t ObjectSize = getPointerSize(UO2, AA);
    391   if (ObjectSize != AliasAnalysis::UnknownSize)
    392     if (ObjectSize == Later.Size && ObjectSize >= Earlier.Size)
    393       return OverwriteComplete;
    394 
    395   // Okay, we have stores to two completely different pointers.  Try to
    396   // decompose the pointer into a "base + constant_offset" form.  If the base
    397   // pointers are equal, then we can reason about the two stores.
    398   EarlierOff = 0;
    399   LaterOff = 0;
    400   const Value *BP1 = GetPointerBaseWithConstantOffset(P1, EarlierOff, TD);
    401   const Value *BP2 = GetPointerBaseWithConstantOffset(P2, LaterOff, TD);
    402 
    403   // If the base pointers still differ, we have two completely different stores.
    404   if (BP1 != BP2)
    405     return OverwriteUnknown;
    406 
    407   // The later store completely overlaps the earlier store if:
    408   //
    409   // 1. Both start at the same offset and the later one's size is greater than
    410   //    or equal to the earlier one's, or
    411   //
    412   //      |--earlier--|
    413   //      |--   later   --|
    414   //
    415   // 2. The earlier store has an offset greater than the later offset, but which
    416   //    still lies completely within the later store.
    417   //
    418   //        |--earlier--|
    419   //    |-----  later  ------|
    420   //
    421   // We have to be careful here as *Off is signed while *.Size is unsigned.
    422   if (EarlierOff >= LaterOff &&
    423       Later.Size >= Earlier.Size &&
    424       uint64_t(EarlierOff - LaterOff) + Earlier.Size <= Later.Size)
    425     return OverwriteComplete;
    426 
    427   // The other interesting case is if the later store overwrites the end of
    428   // the earlier store
    429   //
    430   //      |--earlier--|
    431   //                |--   later   --|
    432   //
    433   // In this case we may want to trim the size of earlier to avoid generating
    434   // writes to addresses which will definitely be overwritten later
    435   if (LaterOff > EarlierOff &&
    436       LaterOff < int64_t(EarlierOff + Earlier.Size) &&
    437       int64_t(LaterOff + Later.Size) >= int64_t(EarlierOff + Earlier.Size))
    438     return OverwriteEnd;
    439 
    440   // Otherwise, they don't completely overlap.
    441   return OverwriteUnknown;
    442 }
    443 
    444 /// isPossibleSelfRead - If 'Inst' might be a self read (i.e. a noop copy of a
    445 /// memory region into an identical pointer) then it doesn't actually make its
    446 /// input dead in the traditional sense.  Consider this case:
    447 ///
    448 ///   memcpy(A <- B)
    449 ///   memcpy(A <- A)
    450 ///
    451 /// In this case, the second store to A does not make the first store to A dead.
    452 /// The usual situation isn't an explicit A<-A store like this (which can be
    453 /// trivially removed) but a case where two pointers may alias.
    454 ///
    455 /// This function detects when it is unsafe to remove a dependent instruction
    456 /// because the DSE inducing instruction may be a self-read.
    457 static bool isPossibleSelfRead(Instruction *Inst,
    458                                const AliasAnalysis::Location &InstStoreLoc,
    459                                Instruction *DepWrite, AliasAnalysis &AA) {
    460   // Self reads can only happen for instructions that read memory.  Get the
    461   // location read.
    462   AliasAnalysis::Location InstReadLoc = getLocForRead(Inst, AA);
    463   if (InstReadLoc.Ptr == 0) return false;  // Not a reading instruction.
    464 
    465   // If the read and written loc obviously don't alias, it isn't a read.
    466   if (AA.isNoAlias(InstReadLoc, InstStoreLoc)) return false;
    467 
    468   // Okay, 'Inst' may copy over itself.  However, we can still remove a the
    469   // DepWrite instruction if we can prove that it reads from the same location
    470   // as Inst.  This handles useful cases like:
    471   //   memcpy(A <- B)
    472   //   memcpy(A <- B)
    473   // Here we don't know if A/B may alias, but we do know that B/B are must
    474   // aliases, so removing the first memcpy is safe (assuming it writes <= #
    475   // bytes as the second one.
    476   AliasAnalysis::Location DepReadLoc = getLocForRead(DepWrite, AA);
    477 
    478   if (DepReadLoc.Ptr && AA.isMustAlias(InstReadLoc.Ptr, DepReadLoc.Ptr))
    479     return false;
    480 
    481   // If DepWrite doesn't read memory or if we can't prove it is a must alias,
    482   // then it can't be considered dead.
    483   return true;
    484 }
    485 
    486 
    487 //===----------------------------------------------------------------------===//
    488 // DSE Pass
    489 //===----------------------------------------------------------------------===//
    490 
    491 bool DSE::runOnBasicBlock(BasicBlock &BB) {
    492   bool MadeChange = false;
    493 
    494   // Do a top-down walk on the BB.
    495   for (BasicBlock::iterator BBI = BB.begin(), BBE = BB.end(); BBI != BBE; ) {
    496     Instruction *Inst = BBI++;
    497 
    498     // Handle 'free' calls specially.
    499     if (CallInst *F = isFreeCall(Inst, TLI)) {
    500       MadeChange |= HandleFree(F);
    501       continue;
    502     }
    503 
    504     // If we find something that writes memory, get its memory dependence.
    505     if (!hasMemoryWrite(Inst, TLI))
    506       continue;
    507 
    508     MemDepResult InstDep = MD->getDependency(Inst);
    509 
    510     // Ignore any store where we can't find a local dependence.
    511     // FIXME: cross-block DSE would be fun. :)
    512     if (!InstDep.isDef() && !InstDep.isClobber())
    513       continue;
    514 
    515     // If we're storing the same value back to a pointer that we just
    516     // loaded from, then the store can be removed.
    517     if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
    518       if (LoadInst *DepLoad = dyn_cast<LoadInst>(InstDep.getInst())) {
    519         if (SI->getPointerOperand() == DepLoad->getPointerOperand() &&
    520             SI->getOperand(0) == DepLoad && isRemovable(SI)) {
    521           DEBUG(dbgs() << "DSE: Remove Store Of Load from same pointer:\n  "
    522                        << "LOAD: " << *DepLoad << "\n  STORE: " << *SI << '\n');
    523 
    524           // DeleteDeadInstruction can delete the current instruction.  Save BBI
    525           // in case we need it.
    526           WeakVH NextInst(BBI);
    527 
    528           DeleteDeadInstruction(SI, *MD, TLI);
    529 
    530           if (NextInst == 0)  // Next instruction deleted.
    531             BBI = BB.begin();
    532           else if (BBI != BB.begin())  // Revisit this instruction if possible.
    533             --BBI;
    534           ++NumFastStores;
    535           MadeChange = true;
    536           continue;
    537         }
    538       }
    539     }
    540 
    541     // Figure out what location is being stored to.
    542     AliasAnalysis::Location Loc = getLocForWrite(Inst, *AA);
    543 
    544     // If we didn't get a useful location, fail.
    545     if (Loc.Ptr == 0)
    546       continue;
    547 
    548     while (InstDep.isDef() || InstDep.isClobber()) {
    549       // Get the memory clobbered by the instruction we depend on.  MemDep will
    550       // skip any instructions that 'Loc' clearly doesn't interact with.  If we
    551       // end up depending on a may- or must-aliased load, then we can't optimize
    552       // away the store and we bail out.  However, if we depend on on something
    553       // that overwrites the memory location we *can* potentially optimize it.
    554       //
    555       // Find out what memory location the dependent instruction stores.
    556       Instruction *DepWrite = InstDep.getInst();
    557       AliasAnalysis::Location DepLoc = getLocForWrite(DepWrite, *AA);
    558       // If we didn't get a useful location, or if it isn't a size, bail out.
    559       if (DepLoc.Ptr == 0)
    560         break;
    561 
    562       // If we find a write that is a) removable (i.e., non-volatile), b) is
    563       // completely obliterated by the store to 'Loc', and c) which we know that
    564       // 'Inst' doesn't load from, then we can remove it.
    565       if (isRemovable(DepWrite) &&
    566           !isPossibleSelfRead(Inst, Loc, DepWrite, *AA)) {
    567         int64_t InstWriteOffset, DepWriteOffset;
    568         OverwriteResult OR = isOverwrite(Loc, DepLoc, *AA,
    569                                          DepWriteOffset, InstWriteOffset);
    570         if (OR == OverwriteComplete) {
    571           DEBUG(dbgs() << "DSE: Remove Dead Store:\n  DEAD: "
    572                 << *DepWrite << "\n  KILLER: " << *Inst << '\n');
    573 
    574           // Delete the store and now-dead instructions that feed it.
    575           DeleteDeadInstruction(DepWrite, *MD, TLI);
    576           ++NumFastStores;
    577           MadeChange = true;
    578 
    579           // DeleteDeadInstruction can delete the current instruction in loop
    580           // cases, reset BBI.
    581           BBI = Inst;
    582           if (BBI != BB.begin())
    583             --BBI;
    584           break;
    585         } else if (OR == OverwriteEnd && isShortenable(DepWrite)) {
    586           // TODO: base this on the target vector size so that if the earlier
    587           // store was too small to get vector writes anyway then its likely
    588           // a good idea to shorten it
    589           // Power of 2 vector writes are probably always a bad idea to optimize
    590           // as any store/memset/memcpy is likely using vector instructions so
    591           // shortening it to not vector size is likely to be slower
    592           MemIntrinsic* DepIntrinsic = cast<MemIntrinsic>(DepWrite);
    593           unsigned DepWriteAlign = DepIntrinsic->getAlignment();
    594           if (llvm::isPowerOf2_64(InstWriteOffset) ||
    595               ((DepWriteAlign != 0) && InstWriteOffset % DepWriteAlign == 0)) {
    596 
    597             DEBUG(dbgs() << "DSE: Remove Dead Store:\n  OW END: "
    598                   << *DepWrite << "\n  KILLER (offset "
    599                   << InstWriteOffset << ", "
    600                   << DepLoc.Size << ")"
    601                   << *Inst << '\n');
    602 
    603             Value* DepWriteLength = DepIntrinsic->getLength();
    604             Value* TrimmedLength = ConstantInt::get(DepWriteLength->getType(),
    605                                                     InstWriteOffset -
    606                                                     DepWriteOffset);
    607             DepIntrinsic->setLength(TrimmedLength);
    608             MadeChange = true;
    609           }
    610         }
    611       }
    612 
    613       // If this is a may-aliased store that is clobbering the store value, we
    614       // can keep searching past it for another must-aliased pointer that stores
    615       // to the same location.  For example, in:
    616       //   store -> P
    617       //   store -> Q
    618       //   store -> P
    619       // we can remove the first store to P even though we don't know if P and Q
    620       // alias.
    621       if (DepWrite == &BB.front()) break;
    622 
    623       // Can't look past this instruction if it might read 'Loc'.
    624       if (AA->getModRefInfo(DepWrite, Loc) & AliasAnalysis::Ref)
    625         break;
    626 
    627       InstDep = MD->getPointerDependencyFrom(Loc, false, DepWrite, &BB);
    628     }
    629   }
    630 
    631   // If this block ends in a return, unwind, or unreachable, all allocas are
    632   // dead at its end, which means stores to them are also dead.
    633   if (BB.getTerminator()->getNumSuccessors() == 0)
    634     MadeChange |= handleEndBlock(BB);
    635 
    636   return MadeChange;
    637 }
    638 
    639 /// Find all blocks that will unconditionally lead to the block BB and append
    640 /// them to F.
    641 static void FindUnconditionalPreds(SmallVectorImpl<BasicBlock *> &Blocks,
    642                                    BasicBlock *BB, DominatorTree *DT) {
    643   for (pred_iterator I = pred_begin(BB), E = pred_end(BB); I != E; ++I) {
    644     BasicBlock *Pred = *I;
    645     if (Pred == BB) continue;
    646     TerminatorInst *PredTI = Pred->getTerminator();
    647     if (PredTI->getNumSuccessors() != 1)
    648       continue;
    649 
    650     if (DT->isReachableFromEntry(Pred))
    651       Blocks.push_back(Pred);
    652   }
    653 }
    654 
    655 /// HandleFree - Handle frees of entire structures whose dependency is a store
    656 /// to a field of that structure.
    657 bool DSE::HandleFree(CallInst *F) {
    658   bool MadeChange = false;
    659 
    660   AliasAnalysis::Location Loc = AliasAnalysis::Location(F->getOperand(0));
    661   SmallVector<BasicBlock *, 16> Blocks;
    662   Blocks.push_back(F->getParent());
    663 
    664   while (!Blocks.empty()) {
    665     BasicBlock *BB = Blocks.pop_back_val();
    666     Instruction *InstPt = BB->getTerminator();
    667     if (BB == F->getParent()) InstPt = F;
    668 
    669     MemDepResult Dep = MD->getPointerDependencyFrom(Loc, false, InstPt, BB);
    670     while (Dep.isDef() || Dep.isClobber()) {
    671       Instruction *Dependency = Dep.getInst();
    672       if (!hasMemoryWrite(Dependency, TLI) || !isRemovable(Dependency))
    673         break;
    674 
    675       Value *DepPointer =
    676         GetUnderlyingObject(getStoredPointerOperand(Dependency));
    677 
    678       // Check for aliasing.
    679       if (!AA->isMustAlias(F->getArgOperand(0), DepPointer))
    680         break;
    681 
    682       Instruction *Next = llvm::next(BasicBlock::iterator(Dependency));
    683 
    684       // DCE instructions only used to calculate that store
    685       DeleteDeadInstruction(Dependency, *MD, TLI);
    686       ++NumFastStores;
    687       MadeChange = true;
    688 
    689       // Inst's old Dependency is now deleted. Compute the next dependency,
    690       // which may also be dead, as in
    691       //    s[0] = 0;
    692       //    s[1] = 0; // This has just been deleted.
    693       //    free(s);
    694       Dep = MD->getPointerDependencyFrom(Loc, false, Next, BB);
    695     }
    696 
    697     if (Dep.isNonLocal())
    698       FindUnconditionalPreds(Blocks, BB, DT);
    699   }
    700 
    701   return MadeChange;
    702 }
    703 
    704 namespace {
    705   struct CouldRef {
    706     typedef Value *argument_type;
    707     const CallSite CS;
    708     AliasAnalysis *AA;
    709 
    710     bool operator()(Value *I) {
    711       // See if the call site touches the value.
    712       AliasAnalysis::ModRefResult A =
    713         AA->getModRefInfo(CS, I, getPointerSize(I, *AA));
    714 
    715       return A == AliasAnalysis::ModRef || A == AliasAnalysis::Ref;
    716     }
    717   };
    718 }
    719 
    720 /// handleEndBlock - Remove dead stores to stack-allocated locations in the
    721 /// function end block.  Ex:
    722 /// %A = alloca i32
    723 /// ...
    724 /// store i32 1, i32* %A
    725 /// ret void
    726 bool DSE::handleEndBlock(BasicBlock &BB) {
    727   bool MadeChange = false;
    728 
    729   // Keep track of all of the stack objects that are dead at the end of the
    730   // function.
    731   SmallSetVector<Value*, 16> DeadStackObjects;
    732 
    733   // Find all of the alloca'd pointers in the entry block.
    734   BasicBlock *Entry = BB.getParent()->begin();
    735   for (BasicBlock::iterator I = Entry->begin(), E = Entry->end(); I != E; ++I) {
    736     if (isa<AllocaInst>(I))
    737       DeadStackObjects.insert(I);
    738 
    739     // Okay, so these are dead heap objects, but if the pointer never escapes
    740     // then it's leaked by this function anyways.
    741     else if (isAllocLikeFn(I, TLI) && !PointerMayBeCaptured(I, true, true))
    742       DeadStackObjects.insert(I);
    743   }
    744 
    745   // Treat byval arguments the same, stores to them are dead at the end of the
    746   // function.
    747   for (Function::arg_iterator AI = BB.getParent()->arg_begin(),
    748        AE = BB.getParent()->arg_end(); AI != AE; ++AI)
    749     if (AI->hasByValAttr())
    750       DeadStackObjects.insert(AI);
    751 
    752   // Scan the basic block backwards
    753   for (BasicBlock::iterator BBI = BB.end(); BBI != BB.begin(); ){
    754     --BBI;
    755 
    756     // If we find a store, check to see if it points into a dead stack value.
    757     if (hasMemoryWrite(BBI, TLI) && isRemovable(BBI)) {
    758       // See through pointer-to-pointer bitcasts
    759       SmallVector<Value *, 4> Pointers;
    760       GetUnderlyingObjects(getStoredPointerOperand(BBI), Pointers);
    761 
    762       // Stores to stack values are valid candidates for removal.
    763       bool AllDead = true;
    764       for (SmallVectorImpl<Value *>::iterator I = Pointers.begin(),
    765            E = Pointers.end(); I != E; ++I)
    766         if (!DeadStackObjects.count(*I)) {
    767           AllDead = false;
    768           break;
    769         }
    770 
    771       if (AllDead) {
    772         Instruction *Dead = BBI++;
    773 
    774         DEBUG(dbgs() << "DSE: Dead Store at End of Block:\n  DEAD: "
    775                      << *Dead << "\n  Objects: ";
    776               for (SmallVectorImpl<Value *>::iterator I = Pointers.begin(),
    777                    E = Pointers.end(); I != E; ++I) {
    778                 dbgs() << **I;
    779                 if (llvm::next(I) != E)
    780                   dbgs() << ", ";
    781               }
    782               dbgs() << '\n');
    783 
    784         // DCE instructions only used to calculate that store.
    785         DeleteDeadInstruction(Dead, *MD, TLI, &DeadStackObjects);
    786         ++NumFastStores;
    787         MadeChange = true;
    788         continue;
    789       }
    790     }
    791 
    792     // Remove any dead non-memory-mutating instructions.
    793     if (isInstructionTriviallyDead(BBI, TLI)) {
    794       Instruction *Inst = BBI++;
    795       DeleteDeadInstruction(Inst, *MD, TLI, &DeadStackObjects);
    796       ++NumFastOther;
    797       MadeChange = true;
    798       continue;
    799     }
    800 
    801     if (isa<AllocaInst>(BBI)) {
    802       // Remove allocas from the list of dead stack objects; there can't be
    803       // any references before the definition.
    804       DeadStackObjects.remove(BBI);
    805       continue;
    806     }
    807 
    808     if (CallSite CS = cast<Value>(BBI)) {
    809       // Remove allocation function calls from the list of dead stack objects;
    810       // there can't be any references before the definition.
    811       if (isAllocLikeFn(BBI, TLI))
    812         DeadStackObjects.remove(BBI);
    813 
    814       // If this call does not access memory, it can't be loading any of our
    815       // pointers.
    816       if (AA->doesNotAccessMemory(CS))
    817         continue;
    818 
    819       // If the call might load from any of our allocas, then any store above
    820       // the call is live.
    821       CouldRef Pred = { CS, AA };
    822       DeadStackObjects.remove_if(Pred);
    823 
    824       // If all of the allocas were clobbered by the call then we're not going
    825       // to find anything else to process.
    826       if (DeadStackObjects.empty())
    827         break;
    828 
    829       continue;
    830     }
    831 
    832     AliasAnalysis::Location LoadedLoc;
    833 
    834     // If we encounter a use of the pointer, it is no longer considered dead
    835     if (LoadInst *L = dyn_cast<LoadInst>(BBI)) {
    836       if (!L->isUnordered()) // Be conservative with atomic/volatile load
    837         break;
    838       LoadedLoc = AA->getLocation(L);
    839     } else if (VAArgInst *V = dyn_cast<VAArgInst>(BBI)) {
    840       LoadedLoc = AA->getLocation(V);
    841     } else if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(BBI)) {
    842       LoadedLoc = AA->getLocationForSource(MTI);
    843     } else if (!BBI->mayReadFromMemory()) {
    844       // Instruction doesn't read memory.  Note that stores that weren't removed
    845       // above will hit this case.
    846       continue;
    847     } else {
    848       // Unknown inst; assume it clobbers everything.
    849       break;
    850     }
    851 
    852     // Remove any allocas from the DeadPointer set that are loaded, as this
    853     // makes any stores above the access live.
    854     RemoveAccessedObjects(LoadedLoc, DeadStackObjects);
    855 
    856     // If all of the allocas were clobbered by the access then we're not going
    857     // to find anything else to process.
    858     if (DeadStackObjects.empty())
    859       break;
    860   }
    861 
    862   return MadeChange;
    863 }
    864 
    865 namespace {
    866   struct CouldAlias {
    867     typedef Value *argument_type;
    868     const AliasAnalysis::Location &LoadedLoc;
    869     AliasAnalysis *AA;
    870 
    871     bool operator()(Value *I) {
    872       // See if the loaded location could alias the stack location.
    873       AliasAnalysis::Location StackLoc(I, getPointerSize(I, *AA));
    874       return !AA->isNoAlias(StackLoc, LoadedLoc);
    875     }
    876   };
    877 }
    878 
    879 /// RemoveAccessedObjects - Check to see if the specified location may alias any
    880 /// of the stack objects in the DeadStackObjects set.  If so, they become live
    881 /// because the location is being loaded.
    882 void DSE::RemoveAccessedObjects(const AliasAnalysis::Location &LoadedLoc,
    883                                 SmallSetVector<Value*, 16> &DeadStackObjects) {
    884   const Value *UnderlyingPointer = GetUnderlyingObject(LoadedLoc.Ptr);
    885 
    886   // A constant can't be in the dead pointer set.
    887   if (isa<Constant>(UnderlyingPointer))
    888     return;
    889 
    890   // If the kill pointer can be easily reduced to an alloca, don't bother doing
    891   // extraneous AA queries.
    892   if (isa<AllocaInst>(UnderlyingPointer) || isa<Argument>(UnderlyingPointer)) {
    893     DeadStackObjects.remove(const_cast<Value*>(UnderlyingPointer));
    894     return;
    895   }
    896 
    897   // Remove objects that could alias LoadedLoc.
    898   CouldAlias Pred = { LoadedLoc, AA };
    899   DeadStackObjects.remove_if(Pred);
    900 }
    901