Home | History | Annotate | Download | only in Scalar
      1 //===- DeadStoreElimination.cpp - Fast Dead Store Elimination -------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file implements a trivial dead store elimination that only considers
     11 // basic-block local redundant stores.
     12 //
     13 // FIXME: This should eventually be extended to be a post-dominator tree
     14 // traversal.  Doing so would be pretty trivial.
     15 //
     16 //===----------------------------------------------------------------------===//
     17 
     18 #define DEBUG_TYPE "dse"
     19 #include "llvm/Transforms/Scalar.h"
     20 #include "llvm/Constants.h"
     21 #include "llvm/Function.h"
     22 #include "llvm/GlobalVariable.h"
     23 #include "llvm/Instructions.h"
     24 #include "llvm/IntrinsicInst.h"
     25 #include "llvm/Pass.h"
     26 #include "llvm/Analysis/AliasAnalysis.h"
     27 #include "llvm/Analysis/CaptureTracking.h"
     28 #include "llvm/Analysis/Dominators.h"
     29 #include "llvm/Analysis/MemoryBuiltins.h"
     30 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
     31 #include "llvm/Analysis/ValueTracking.h"
     32 #include "llvm/Target/TargetData.h"
     33 #include "llvm/Transforms/Utils/Local.h"
     34 #include "llvm/Support/Debug.h"
     35 #include "llvm/ADT/SetVector.h"
     36 #include "llvm/ADT/Statistic.h"
     37 #include "llvm/ADT/STLExtras.h"
     38 using namespace llvm;
     39 
     40 STATISTIC(NumFastStores, "Number of stores deleted");
     41 STATISTIC(NumFastOther , "Number of other instrs removed");
     42 
     43 namespace {
     44   struct DSE : public FunctionPass {
     45     AliasAnalysis *AA;
     46     MemoryDependenceAnalysis *MD;
     47     DominatorTree *DT;
     48 
     49     static char ID; // Pass identification, replacement for typeid
     50     DSE() : FunctionPass(ID), AA(0), MD(0), DT(0) {
     51       initializeDSEPass(*PassRegistry::getPassRegistry());
     52     }
     53 
     54     virtual bool runOnFunction(Function &F) {
     55       AA = &getAnalysis<AliasAnalysis>();
     56       MD = &getAnalysis<MemoryDependenceAnalysis>();
     57       DT = &getAnalysis<DominatorTree>();
     58 
     59       bool Changed = false;
     60       for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I)
     61         // Only check non-dead blocks.  Dead blocks may have strange pointer
     62         // cycles that will confuse alias analysis.
     63         if (DT->isReachableFromEntry(I))
     64           Changed |= runOnBasicBlock(*I);
     65 
     66       AA = 0; MD = 0; DT = 0;
     67       return Changed;
     68     }
     69 
     70     bool runOnBasicBlock(BasicBlock &BB);
     71     bool HandleFree(CallInst *F);
     72     bool handleEndBlock(BasicBlock &BB);
     73     void RemoveAccessedObjects(const AliasAnalysis::Location &LoadedLoc,
     74                                SmallSetVector<Value*, 16> &DeadStackObjects);
     75 
     76     virtual void getAnalysisUsage(AnalysisUsage &AU) const {
     77       AU.setPreservesCFG();
     78       AU.addRequired<DominatorTree>();
     79       AU.addRequired<AliasAnalysis>();
     80       AU.addRequired<MemoryDependenceAnalysis>();
     81       AU.addPreserved<AliasAnalysis>();
     82       AU.addPreserved<DominatorTree>();
     83       AU.addPreserved<MemoryDependenceAnalysis>();
     84     }
     85   };
     86 }
     87 
     88 char DSE::ID = 0;
     89 INITIALIZE_PASS_BEGIN(DSE, "dse", "Dead Store Elimination", false, false)
     90 INITIALIZE_PASS_DEPENDENCY(DominatorTree)
     91 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis)
     92 INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
     93 INITIALIZE_PASS_END(DSE, "dse", "Dead Store Elimination", false, false)
     94 
     95 FunctionPass *llvm::createDeadStoreEliminationPass() { return new DSE(); }
     96 
     97 //===----------------------------------------------------------------------===//
     98 // Helper functions
     99 //===----------------------------------------------------------------------===//
    100 
    101 /// DeleteDeadInstruction - Delete this instruction.  Before we do, go through
    102 /// and zero out all the operands of this instruction.  If any of them become
    103 /// dead, delete them and the computation tree that feeds them.
    104 ///
    105 /// If ValueSet is non-null, remove any deleted instructions from it as well.
    106 ///
    107 static void DeleteDeadInstruction(Instruction *I,
    108                                   MemoryDependenceAnalysis &MD,
    109                                   const TargetLibraryInfo *TLI,
    110                                   SmallSetVector<Value*, 16> *ValueSet = 0) {
    111   SmallVector<Instruction*, 32> NowDeadInsts;
    112 
    113   NowDeadInsts.push_back(I);
    114   --NumFastOther;
    115 
    116   // Before we touch this instruction, remove it from memdep!
    117   do {
    118     Instruction *DeadInst = NowDeadInsts.pop_back_val();
    119     ++NumFastOther;
    120 
    121     // This instruction is dead, zap it, in stages.  Start by removing it from
    122     // MemDep, which needs to know the operands and needs it to be in the
    123     // function.
    124     MD.removeInstruction(DeadInst);
    125 
    126     for (unsigned op = 0, e = DeadInst->getNumOperands(); op != e; ++op) {
    127       Value *Op = DeadInst->getOperand(op);
    128       DeadInst->setOperand(op, 0);
    129 
    130       // If this operand just became dead, add it to the NowDeadInsts list.
    131       if (!Op->use_empty()) continue;
    132 
    133       if (Instruction *OpI = dyn_cast<Instruction>(Op))
    134         if (isInstructionTriviallyDead(OpI, TLI))
    135           NowDeadInsts.push_back(OpI);
    136     }
    137 
    138     DeadInst->eraseFromParent();
    139 
    140     if (ValueSet) ValueSet->remove(DeadInst);
    141   } while (!NowDeadInsts.empty());
    142 }
    143 
    144 
    145 /// hasMemoryWrite - Does this instruction write some memory?  This only returns
    146 /// true for things that we can analyze with other helpers below.
    147 static bool hasMemoryWrite(Instruction *I) {
    148   if (isa<StoreInst>(I))
    149     return true;
    150   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
    151     switch (II->getIntrinsicID()) {
    152     default:
    153       return false;
    154     case Intrinsic::memset:
    155     case Intrinsic::memmove:
    156     case Intrinsic::memcpy:
    157     case Intrinsic::init_trampoline:
    158     case Intrinsic::lifetime_end:
    159       return true;
    160     }
    161   }
    162   return false;
    163 }
    164 
    165 /// getLocForWrite - Return a Location stored to by the specified instruction.
    166 /// If isRemovable returns true, this function and getLocForRead completely
    167 /// describe the memory operations for this instruction.
    168 static AliasAnalysis::Location
    169 getLocForWrite(Instruction *Inst, AliasAnalysis &AA) {
    170   if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
    171     return AA.getLocation(SI);
    172 
    173   if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(Inst)) {
    174     // memcpy/memmove/memset.
    175     AliasAnalysis::Location Loc = AA.getLocationForDest(MI);
    176     // If we don't have target data around, an unknown size in Location means
    177     // that we should use the size of the pointee type.  This isn't valid for
    178     // memset/memcpy, which writes more than an i8.
    179     if (Loc.Size == AliasAnalysis::UnknownSize && AA.getTargetData() == 0)
    180       return AliasAnalysis::Location();
    181     return Loc;
    182   }
    183 
    184   IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst);
    185   if (II == 0) return AliasAnalysis::Location();
    186 
    187   switch (II->getIntrinsicID()) {
    188   default: return AliasAnalysis::Location(); // Unhandled intrinsic.
    189   case Intrinsic::init_trampoline:
    190     // If we don't have target data around, an unknown size in Location means
    191     // that we should use the size of the pointee type.  This isn't valid for
    192     // init.trampoline, which writes more than an i8.
    193     if (AA.getTargetData() == 0) return AliasAnalysis::Location();
    194 
    195     // FIXME: We don't know the size of the trampoline, so we can't really
    196     // handle it here.
    197     return AliasAnalysis::Location(II->getArgOperand(0));
    198   case Intrinsic::lifetime_end: {
    199     uint64_t Len = cast<ConstantInt>(II->getArgOperand(0))->getZExtValue();
    200     return AliasAnalysis::Location(II->getArgOperand(1), Len);
    201   }
    202   }
    203 }
    204 
    205 /// getLocForRead - Return the location read by the specified "hasMemoryWrite"
    206 /// instruction if any.
    207 static AliasAnalysis::Location
    208 getLocForRead(Instruction *Inst, AliasAnalysis &AA) {
    209   assert(hasMemoryWrite(Inst) && "Unknown instruction case");
    210 
    211   // The only instructions that both read and write are the mem transfer
    212   // instructions (memcpy/memmove).
    213   if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(Inst))
    214     return AA.getLocationForSource(MTI);
    215   return AliasAnalysis::Location();
    216 }
    217 
    218 
    219 /// isRemovable - If the value of this instruction and the memory it writes to
    220 /// is unused, may we delete this instruction?
    221 static bool isRemovable(Instruction *I) {
    222   // Don't remove volatile/atomic stores.
    223   if (StoreInst *SI = dyn_cast<StoreInst>(I))
    224     return SI->isUnordered();
    225 
    226   IntrinsicInst *II = cast<IntrinsicInst>(I);
    227   switch (II->getIntrinsicID()) {
    228   default: llvm_unreachable("doesn't pass 'hasMemoryWrite' predicate");
    229   case Intrinsic::lifetime_end:
    230     // Never remove dead lifetime_end's, e.g. because it is followed by a
    231     // free.
    232     return false;
    233   case Intrinsic::init_trampoline:
    234     // Always safe to remove init_trampoline.
    235     return true;
    236 
    237   case Intrinsic::memset:
    238   case Intrinsic::memmove:
    239   case Intrinsic::memcpy:
    240     // Don't remove volatile memory intrinsics.
    241     return !cast<MemIntrinsic>(II)->isVolatile();
    242   }
    243 }
    244 
    245 
    246 /// isShortenable - Returns true if this instruction can be safely shortened in
    247 /// length.
    248 static bool isShortenable(Instruction *I) {
    249   // Don't shorten stores for now
    250   if (isa<StoreInst>(I))
    251     return false;
    252 
    253   IntrinsicInst *II = cast<IntrinsicInst>(I);
    254   switch (II->getIntrinsicID()) {
    255     default: return false;
    256     case Intrinsic::memset:
    257     case Intrinsic::memcpy:
    258       // Do shorten memory intrinsics.
    259       return true;
    260   }
    261 }
    262 
    263 /// getStoredPointerOperand - Return the pointer that is being written to.
    264 static Value *getStoredPointerOperand(Instruction *I) {
    265   if (StoreInst *SI = dyn_cast<StoreInst>(I))
    266     return SI->getPointerOperand();
    267   if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I))
    268     return MI->getDest();
    269 
    270   IntrinsicInst *II = cast<IntrinsicInst>(I);
    271   switch (II->getIntrinsicID()) {
    272   default: llvm_unreachable("Unexpected intrinsic!");
    273   case Intrinsic::init_trampoline:
    274     return II->getArgOperand(0);
    275   }
    276 }
    277 
    278 static uint64_t getPointerSize(const Value *V, AliasAnalysis &AA) {
    279   uint64_t Size;
    280   if (getObjectSize(V, Size, AA.getTargetData(), AA.getTargetLibraryInfo()))
    281     return Size;
    282   return AliasAnalysis::UnknownSize;
    283 }
    284 
    285 namespace {
    286   enum OverwriteResult
    287   {
    288     OverwriteComplete,
    289     OverwriteEnd,
    290     OverwriteUnknown
    291   };
    292 }
    293 
    294 /// isOverwrite - Return 'OverwriteComplete' if a store to the 'Later' location
    295 /// completely overwrites a store to the 'Earlier' location.
    296 /// 'OverwriteEnd' if the end of the 'Earlier' location is completely
    297 /// overwritten by 'Later', or 'OverwriteUnknown' if nothing can be determined
    298 static OverwriteResult isOverwrite(const AliasAnalysis::Location &Later,
    299                                    const AliasAnalysis::Location &Earlier,
    300                                    AliasAnalysis &AA,
    301                                    int64_t &EarlierOff,
    302                                    int64_t &LaterOff) {
    303   const Value *P1 = Earlier.Ptr->stripPointerCasts();
    304   const Value *P2 = Later.Ptr->stripPointerCasts();
    305 
    306   // If the start pointers are the same, we just have to compare sizes to see if
    307   // the later store was larger than the earlier store.
    308   if (P1 == P2) {
    309     // If we don't know the sizes of either access, then we can't do a
    310     // comparison.
    311     if (Later.Size == AliasAnalysis::UnknownSize ||
    312         Earlier.Size == AliasAnalysis::UnknownSize) {
    313       // If we have no TargetData information around, then the size of the store
    314       // is inferrable from the pointee type.  If they are the same type, then
    315       // we know that the store is safe.
    316       if (AA.getTargetData() == 0 &&
    317           Later.Ptr->getType() == Earlier.Ptr->getType())
    318         return OverwriteComplete;
    319 
    320       return OverwriteUnknown;
    321     }
    322 
    323     // Make sure that the Later size is >= the Earlier size.
    324     if (Later.Size >= Earlier.Size)
    325       return OverwriteComplete;
    326   }
    327 
    328   // Otherwise, we have to have size information, and the later store has to be
    329   // larger than the earlier one.
    330   if (Later.Size == AliasAnalysis::UnknownSize ||
    331       Earlier.Size == AliasAnalysis::UnknownSize ||
    332       AA.getTargetData() == 0)
    333     return OverwriteUnknown;
    334 
    335   // Check to see if the later store is to the entire object (either a global,
    336   // an alloca, or a byval argument).  If so, then it clearly overwrites any
    337   // other store to the same object.
    338   const TargetData &TD = *AA.getTargetData();
    339 
    340   const Value *UO1 = GetUnderlyingObject(P1, &TD),
    341               *UO2 = GetUnderlyingObject(P2, &TD);
    342 
    343   // If we can't resolve the same pointers to the same object, then we can't
    344   // analyze them at all.
    345   if (UO1 != UO2)
    346     return OverwriteUnknown;
    347 
    348   // If the "Later" store is to a recognizable object, get its size.
    349   uint64_t ObjectSize = getPointerSize(UO2, AA);
    350   if (ObjectSize != AliasAnalysis::UnknownSize)
    351     if (ObjectSize == Later.Size && ObjectSize >= Earlier.Size)
    352       return OverwriteComplete;
    353 
    354   // Okay, we have stores to two completely different pointers.  Try to
    355   // decompose the pointer into a "base + constant_offset" form.  If the base
    356   // pointers are equal, then we can reason about the two stores.
    357   EarlierOff = 0;
    358   LaterOff = 0;
    359   const Value *BP1 = GetPointerBaseWithConstantOffset(P1, EarlierOff, TD);
    360   const Value *BP2 = GetPointerBaseWithConstantOffset(P2, LaterOff, TD);
    361 
    362   // If the base pointers still differ, we have two completely different stores.
    363   if (BP1 != BP2)
    364     return OverwriteUnknown;
    365 
    366   // The later store completely overlaps the earlier store if:
    367   //
    368   // 1. Both start at the same offset and the later one's size is greater than
    369   //    or equal to the earlier one's, or
    370   //
    371   //      |--earlier--|
    372   //      |--   later   --|
    373   //
    374   // 2. The earlier store has an offset greater than the later offset, but which
    375   //    still lies completely within the later store.
    376   //
    377   //        |--earlier--|
    378   //    |-----  later  ------|
    379   //
    380   // We have to be careful here as *Off is signed while *.Size is unsigned.
    381   if (EarlierOff >= LaterOff &&
    382       Later.Size >= Earlier.Size &&
    383       uint64_t(EarlierOff - LaterOff) + Earlier.Size <= Later.Size)
    384     return OverwriteComplete;
    385 
    386   // The other interesting case is if the later store overwrites the end of
    387   // the earlier store
    388   //
    389   //      |--earlier--|
    390   //                |--   later   --|
    391   //
    392   // In this case we may want to trim the size of earlier to avoid generating
    393   // writes to addresses which will definitely be overwritten later
    394   if (LaterOff > EarlierOff &&
    395       LaterOff < int64_t(EarlierOff + Earlier.Size) &&
    396       int64_t(LaterOff + Later.Size) >= int64_t(EarlierOff + Earlier.Size))
    397     return OverwriteEnd;
    398 
    399   // Otherwise, they don't completely overlap.
    400   return OverwriteUnknown;
    401 }
    402 
    403 /// isPossibleSelfRead - If 'Inst' might be a self read (i.e. a noop copy of a
    404 /// memory region into an identical pointer) then it doesn't actually make its
    405 /// input dead in the traditional sense.  Consider this case:
    406 ///
    407 ///   memcpy(A <- B)
    408 ///   memcpy(A <- A)
    409 ///
    410 /// In this case, the second store to A does not make the first store to A dead.
    411 /// The usual situation isn't an explicit A<-A store like this (which can be
    412 /// trivially removed) but a case where two pointers may alias.
    413 ///
    414 /// This function detects when it is unsafe to remove a dependent instruction
    415 /// because the DSE inducing instruction may be a self-read.
    416 static bool isPossibleSelfRead(Instruction *Inst,
    417                                const AliasAnalysis::Location &InstStoreLoc,
    418                                Instruction *DepWrite, AliasAnalysis &AA) {
    419   // Self reads can only happen for instructions that read memory.  Get the
    420   // location read.
    421   AliasAnalysis::Location InstReadLoc = getLocForRead(Inst, AA);
    422   if (InstReadLoc.Ptr == 0) return false;  // Not a reading instruction.
    423 
    424   // If the read and written loc obviously don't alias, it isn't a read.
    425   if (AA.isNoAlias(InstReadLoc, InstStoreLoc)) return false;
    426 
    427   // Okay, 'Inst' may copy over itself.  However, we can still remove a the
    428   // DepWrite instruction if we can prove that it reads from the same location
    429   // as Inst.  This handles useful cases like:
    430   //   memcpy(A <- B)
    431   //   memcpy(A <- B)
    432   // Here we don't know if A/B may alias, but we do know that B/B are must
    433   // aliases, so removing the first memcpy is safe (assuming it writes <= #
    434   // bytes as the second one.
    435   AliasAnalysis::Location DepReadLoc = getLocForRead(DepWrite, AA);
    436 
    437   if (DepReadLoc.Ptr && AA.isMustAlias(InstReadLoc.Ptr, DepReadLoc.Ptr))
    438     return false;
    439 
    440   // If DepWrite doesn't read memory or if we can't prove it is a must alias,
    441   // then it can't be considered dead.
    442   return true;
    443 }
    444 
    445 
    446 //===----------------------------------------------------------------------===//
    447 // DSE Pass
    448 //===----------------------------------------------------------------------===//
    449 
    450 bool DSE::runOnBasicBlock(BasicBlock &BB) {
    451   bool MadeChange = false;
    452 
    453   // Do a top-down walk on the BB.
    454   for (BasicBlock::iterator BBI = BB.begin(), BBE = BB.end(); BBI != BBE; ) {
    455     Instruction *Inst = BBI++;
    456 
    457     // Handle 'free' calls specially.
    458     if (CallInst *F = isFreeCall(Inst, AA->getTargetLibraryInfo())) {
    459       MadeChange |= HandleFree(F);
    460       continue;
    461     }
    462 
    463     // If we find something that writes memory, get its memory dependence.
    464     if (!hasMemoryWrite(Inst))
    465       continue;
    466 
    467     MemDepResult InstDep = MD->getDependency(Inst);
    468 
    469     // Ignore any store where we can't find a local dependence.
    470     // FIXME: cross-block DSE would be fun. :)
    471     if (!InstDep.isDef() && !InstDep.isClobber())
    472       continue;
    473 
    474     // If we're storing the same value back to a pointer that we just
    475     // loaded from, then the store can be removed.
    476     if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
    477       if (LoadInst *DepLoad = dyn_cast<LoadInst>(InstDep.getInst())) {
    478         if (SI->getPointerOperand() == DepLoad->getPointerOperand() &&
    479             SI->getOperand(0) == DepLoad && isRemovable(SI)) {
    480           DEBUG(dbgs() << "DSE: Remove Store Of Load from same pointer:\n  "
    481                        << "LOAD: " << *DepLoad << "\n  STORE: " << *SI << '\n');
    482 
    483           // DeleteDeadInstruction can delete the current instruction.  Save BBI
    484           // in case we need it.
    485           WeakVH NextInst(BBI);
    486 
    487           DeleteDeadInstruction(SI, *MD, AA->getTargetLibraryInfo());
    488 
    489           if (NextInst == 0)  // Next instruction deleted.
    490             BBI = BB.begin();
    491           else if (BBI != BB.begin())  // Revisit this instruction if possible.
    492             --BBI;
    493           ++NumFastStores;
    494           MadeChange = true;
    495           continue;
    496         }
    497       }
    498     }
    499 
    500     // Figure out what location is being stored to.
    501     AliasAnalysis::Location Loc = getLocForWrite(Inst, *AA);
    502 
    503     // If we didn't get a useful location, fail.
    504     if (Loc.Ptr == 0)
    505       continue;
    506 
    507     while (InstDep.isDef() || InstDep.isClobber()) {
    508       // Get the memory clobbered by the instruction we depend on.  MemDep will
    509       // skip any instructions that 'Loc' clearly doesn't interact with.  If we
    510       // end up depending on a may- or must-aliased load, then we can't optimize
    511       // away the store and we bail out.  However, if we depend on on something
    512       // that overwrites the memory location we *can* potentially optimize it.
    513       //
    514       // Find out what memory location the dependent instruction stores.
    515       Instruction *DepWrite = InstDep.getInst();
    516       AliasAnalysis::Location DepLoc = getLocForWrite(DepWrite, *AA);
    517       // If we didn't get a useful location, or if it isn't a size, bail out.
    518       if (DepLoc.Ptr == 0)
    519         break;
    520 
    521       // If we find a write that is a) removable (i.e., non-volatile), b) is
    522       // completely obliterated by the store to 'Loc', and c) which we know that
    523       // 'Inst' doesn't load from, then we can remove it.
    524       if (isRemovable(DepWrite) &&
    525           !isPossibleSelfRead(Inst, Loc, DepWrite, *AA)) {
    526         int64_t InstWriteOffset, DepWriteOffset;
    527         OverwriteResult OR = isOverwrite(Loc, DepLoc, *AA,
    528                                          DepWriteOffset, InstWriteOffset);
    529         if (OR == OverwriteComplete) {
    530           DEBUG(dbgs() << "DSE: Remove Dead Store:\n  DEAD: "
    531                 << *DepWrite << "\n  KILLER: " << *Inst << '\n');
    532 
    533           // Delete the store and now-dead instructions that feed it.
    534           DeleteDeadInstruction(DepWrite, *MD, AA->getTargetLibraryInfo());
    535           ++NumFastStores;
    536           MadeChange = true;
    537 
    538           // DeleteDeadInstruction can delete the current instruction in loop
    539           // cases, reset BBI.
    540           BBI = Inst;
    541           if (BBI != BB.begin())
    542             --BBI;
    543           break;
    544         } else if (OR == OverwriteEnd && isShortenable(DepWrite)) {
    545           // TODO: base this on the target vector size so that if the earlier
    546           // store was too small to get vector writes anyway then its likely
    547           // a good idea to shorten it
    548           // Power of 2 vector writes are probably always a bad idea to optimize
    549           // as any store/memset/memcpy is likely using vector instructions so
    550           // shortening it to not vector size is likely to be slower
    551           MemIntrinsic* DepIntrinsic = cast<MemIntrinsic>(DepWrite);
    552           unsigned DepWriteAlign = DepIntrinsic->getAlignment();
    553           if (llvm::isPowerOf2_64(InstWriteOffset) ||
    554               ((DepWriteAlign != 0) && InstWriteOffset % DepWriteAlign == 0)) {
    555 
    556             DEBUG(dbgs() << "DSE: Remove Dead Store:\n  OW END: "
    557                   << *DepWrite << "\n  KILLER (offset "
    558                   << InstWriteOffset << ", "
    559                   << DepLoc.Size << ")"
    560                   << *Inst << '\n');
    561 
    562             Value* DepWriteLength = DepIntrinsic->getLength();
    563             Value* TrimmedLength = ConstantInt::get(DepWriteLength->getType(),
    564                                                     InstWriteOffset -
    565                                                     DepWriteOffset);
    566             DepIntrinsic->setLength(TrimmedLength);
    567             MadeChange = true;
    568           }
    569         }
    570       }
    571 
    572       // If this is a may-aliased store that is clobbering the store value, we
    573       // can keep searching past it for another must-aliased pointer that stores
    574       // to the same location.  For example, in:
    575       //   store -> P
    576       //   store -> Q
    577       //   store -> P
    578       // we can remove the first store to P even though we don't know if P and Q
    579       // alias.
    580       if (DepWrite == &BB.front()) break;
    581 
    582       // Can't look past this instruction if it might read 'Loc'.
    583       if (AA->getModRefInfo(DepWrite, Loc) & AliasAnalysis::Ref)
    584         break;
    585 
    586       InstDep = MD->getPointerDependencyFrom(Loc, false, DepWrite, &BB);
    587     }
    588   }
    589 
    590   // If this block ends in a return, unwind, or unreachable, all allocas are
    591   // dead at its end, which means stores to them are also dead.
    592   if (BB.getTerminator()->getNumSuccessors() == 0)
    593     MadeChange |= handleEndBlock(BB);
    594 
    595   return MadeChange;
    596 }
    597 
    598 /// Find all blocks that will unconditionally lead to the block BB and append
    599 /// them to F.
    600 static void FindUnconditionalPreds(SmallVectorImpl<BasicBlock *> &Blocks,
    601                                    BasicBlock *BB, DominatorTree *DT) {
    602   for (pred_iterator I = pred_begin(BB), E = pred_end(BB); I != E; ++I) {
    603     BasicBlock *Pred = *I;
    604     if (Pred == BB) continue;
    605     TerminatorInst *PredTI = Pred->getTerminator();
    606     if (PredTI->getNumSuccessors() != 1)
    607       continue;
    608 
    609     if (DT->isReachableFromEntry(Pred))
    610       Blocks.push_back(Pred);
    611   }
    612 }
    613 
    614 /// HandleFree - Handle frees of entire structures whose dependency is a store
    615 /// to a field of that structure.
    616 bool DSE::HandleFree(CallInst *F) {
    617   bool MadeChange = false;
    618 
    619   AliasAnalysis::Location Loc = AliasAnalysis::Location(F->getOperand(0));
    620   SmallVector<BasicBlock *, 16> Blocks;
    621   Blocks.push_back(F->getParent());
    622 
    623   while (!Blocks.empty()) {
    624     BasicBlock *BB = Blocks.pop_back_val();
    625     Instruction *InstPt = BB->getTerminator();
    626     if (BB == F->getParent()) InstPt = F;
    627 
    628     MemDepResult Dep = MD->getPointerDependencyFrom(Loc, false, InstPt, BB);
    629     while (Dep.isDef() || Dep.isClobber()) {
    630       Instruction *Dependency = Dep.getInst();
    631       if (!hasMemoryWrite(Dependency) || !isRemovable(Dependency))
    632         break;
    633 
    634       Value *DepPointer =
    635         GetUnderlyingObject(getStoredPointerOperand(Dependency));
    636 
    637       // Check for aliasing.
    638       if (!AA->isMustAlias(F->getArgOperand(0), DepPointer))
    639         break;
    640 
    641       Instruction *Next = llvm::next(BasicBlock::iterator(Dependency));
    642 
    643       // DCE instructions only used to calculate that store
    644       DeleteDeadInstruction(Dependency, *MD, AA->getTargetLibraryInfo());
    645       ++NumFastStores;
    646       MadeChange = true;
    647 
    648       // Inst's old Dependency is now deleted. Compute the next dependency,
    649       // which may also be dead, as in
    650       //    s[0] = 0;
    651       //    s[1] = 0; // This has just been deleted.
    652       //    free(s);
    653       Dep = MD->getPointerDependencyFrom(Loc, false, Next, BB);
    654     }
    655 
    656     if (Dep.isNonLocal())
    657       FindUnconditionalPreds(Blocks, BB, DT);
    658   }
    659 
    660   return MadeChange;
    661 }
    662 
    663 /// handleEndBlock - Remove dead stores to stack-allocated locations in the
    664 /// function end block.  Ex:
    665 /// %A = alloca i32
    666 /// ...
    667 /// store i32 1, i32* %A
    668 /// ret void
    669 bool DSE::handleEndBlock(BasicBlock &BB) {
    670   bool MadeChange = false;
    671 
    672   // Keep track of all of the stack objects that are dead at the end of the
    673   // function.
    674   SmallSetVector<Value*, 16> DeadStackObjects;
    675 
    676   // Find all of the alloca'd pointers in the entry block.
    677   BasicBlock *Entry = BB.getParent()->begin();
    678   for (BasicBlock::iterator I = Entry->begin(), E = Entry->end(); I != E; ++I) {
    679     if (isa<AllocaInst>(I))
    680       DeadStackObjects.insert(I);
    681 
    682     // Okay, so these are dead heap objects, but if the pointer never escapes
    683     // then it's leaked by this function anyways.
    684     else if (isAllocLikeFn(I, AA->getTargetLibraryInfo()) &&
    685              !PointerMayBeCaptured(I, true, true))
    686       DeadStackObjects.insert(I);
    687   }
    688 
    689   // Treat byval arguments the same, stores to them are dead at the end of the
    690   // function.
    691   for (Function::arg_iterator AI = BB.getParent()->arg_begin(),
    692        AE = BB.getParent()->arg_end(); AI != AE; ++AI)
    693     if (AI->hasByValAttr())
    694       DeadStackObjects.insert(AI);
    695 
    696   // Scan the basic block backwards
    697   for (BasicBlock::iterator BBI = BB.end(); BBI != BB.begin(); ){
    698     --BBI;
    699 
    700     // If we find a store, check to see if it points into a dead stack value.
    701     if (hasMemoryWrite(BBI) && isRemovable(BBI)) {
    702       // See through pointer-to-pointer bitcasts
    703       SmallVector<Value *, 4> Pointers;
    704       GetUnderlyingObjects(getStoredPointerOperand(BBI), Pointers);
    705 
    706       // Stores to stack values are valid candidates for removal.
    707       bool AllDead = true;
    708       for (SmallVectorImpl<Value *>::iterator I = Pointers.begin(),
    709            E = Pointers.end(); I != E; ++I)
    710         if (!DeadStackObjects.count(*I)) {
    711           AllDead = false;
    712           break;
    713         }
    714 
    715       if (AllDead) {
    716         Instruction *Dead = BBI++;
    717 
    718         DEBUG(dbgs() << "DSE: Dead Store at End of Block:\n  DEAD: "
    719                      << *Dead << "\n  Objects: ";
    720               for (SmallVectorImpl<Value *>::iterator I = Pointers.begin(),
    721                    E = Pointers.end(); I != E; ++I) {
    722                 dbgs() << **I;
    723                 if (llvm::next(I) != E)
    724                   dbgs() << ", ";
    725               }
    726               dbgs() << '\n');
    727 
    728         // DCE instructions only used to calculate that store.
    729         DeleteDeadInstruction(Dead, *MD, AA->getTargetLibraryInfo(),
    730                               &DeadStackObjects);
    731         ++NumFastStores;
    732         MadeChange = true;
    733         continue;
    734       }
    735     }
    736 
    737     // Remove any dead non-memory-mutating instructions.
    738     if (isInstructionTriviallyDead(BBI, AA->getTargetLibraryInfo())) {
    739       Instruction *Inst = BBI++;
    740       DeleteDeadInstruction(Inst, *MD, AA->getTargetLibraryInfo(),
    741                             &DeadStackObjects);
    742       ++NumFastOther;
    743       MadeChange = true;
    744       continue;
    745     }
    746 
    747     if (isa<AllocaInst>(BBI)) {
    748       // Remove allocas from the list of dead stack objects; there can't be
    749       // any references before the definition.
    750       DeadStackObjects.remove(BBI);
    751       continue;
    752     }
    753 
    754     if (CallSite CS = cast<Value>(BBI)) {
    755       // Remove allocation function calls from the list of dead stack objects;
    756       // there can't be any references before the definition.
    757       if (isAllocLikeFn(BBI, AA->getTargetLibraryInfo()))
    758         DeadStackObjects.remove(BBI);
    759 
    760       // If this call does not access memory, it can't be loading any of our
    761       // pointers.
    762       if (AA->doesNotAccessMemory(CS))
    763         continue;
    764 
    765       // If the call might load from any of our allocas, then any store above
    766       // the call is live.
    767       SmallVector<Value*, 8> LiveAllocas;
    768       for (SmallSetVector<Value*, 16>::iterator I = DeadStackObjects.begin(),
    769            E = DeadStackObjects.end(); I != E; ++I) {
    770         // See if the call site touches it.
    771         AliasAnalysis::ModRefResult A =
    772           AA->getModRefInfo(CS, *I, getPointerSize(*I, *AA));
    773 
    774         if (A == AliasAnalysis::ModRef || A == AliasAnalysis::Ref)
    775           LiveAllocas.push_back(*I);
    776       }
    777 
    778       // If all of the allocas were clobbered by the call then we're not going
    779       // to find anything else to process.
    780       if (DeadStackObjects.size() == LiveAllocas.size())
    781         break;
    782 
    783       for (SmallVector<Value*, 8>::iterator I = LiveAllocas.begin(),
    784            E = LiveAllocas.end(); I != E; ++I)
    785         DeadStackObjects.remove(*I);
    786 
    787       continue;
    788     }
    789 
    790     AliasAnalysis::Location LoadedLoc;
    791 
    792     // If we encounter a use of the pointer, it is no longer considered dead
    793     if (LoadInst *L = dyn_cast<LoadInst>(BBI)) {
    794       if (!L->isUnordered()) // Be conservative with atomic/volatile load
    795         break;
    796       LoadedLoc = AA->getLocation(L);
    797     } else if (VAArgInst *V = dyn_cast<VAArgInst>(BBI)) {
    798       LoadedLoc = AA->getLocation(V);
    799     } else if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(BBI)) {
    800       LoadedLoc = AA->getLocationForSource(MTI);
    801     } else if (!BBI->mayReadFromMemory()) {
    802       // Instruction doesn't read memory.  Note that stores that weren't removed
    803       // above will hit this case.
    804       continue;
    805     } else {
    806       // Unknown inst; assume it clobbers everything.
    807       break;
    808     }
    809 
    810     // Remove any allocas from the DeadPointer set that are loaded, as this
    811     // makes any stores above the access live.
    812     RemoveAccessedObjects(LoadedLoc, DeadStackObjects);
    813 
    814     // If all of the allocas were clobbered by the access then we're not going
    815     // to find anything else to process.
    816     if (DeadStackObjects.empty())
    817       break;
    818   }
    819 
    820   return MadeChange;
    821 }
    822 
    823 /// RemoveAccessedObjects - Check to see if the specified location may alias any
    824 /// of the stack objects in the DeadStackObjects set.  If so, they become live
    825 /// because the location is being loaded.
    826 void DSE::RemoveAccessedObjects(const AliasAnalysis::Location &LoadedLoc,
    827                                 SmallSetVector<Value*, 16> &DeadStackObjects) {
    828   const Value *UnderlyingPointer = GetUnderlyingObject(LoadedLoc.Ptr);
    829 
    830   // A constant can't be in the dead pointer set.
    831   if (isa<Constant>(UnderlyingPointer))
    832     return;
    833 
    834   // If the kill pointer can be easily reduced to an alloca, don't bother doing
    835   // extraneous AA queries.
    836   if (isa<AllocaInst>(UnderlyingPointer) || isa<Argument>(UnderlyingPointer)) {
    837     DeadStackObjects.remove(const_cast<Value*>(UnderlyingPointer));
    838     return;
    839   }
    840 
    841   SmallVector<Value*, 16> NowLive;
    842   for (SmallSetVector<Value*, 16>::iterator I = DeadStackObjects.begin(),
    843        E = DeadStackObjects.end(); I != E; ++I) {
    844     // See if the loaded location could alias the stack location.
    845     AliasAnalysis::Location StackLoc(*I, getPointerSize(*I, *AA));
    846     if (!AA->isNoAlias(StackLoc, LoadedLoc))
    847       NowLive.push_back(*I);
    848   }
    849 
    850   for (SmallVector<Value*, 16>::iterator I = NowLive.begin(), E = NowLive.end();
    851        I != E; ++I)
    852     DeadStackObjects.remove(*I);
    853 }
    854