Home | History | Annotate | Download | only in Analysis
      1 //===- MemoryDependenceAnalysis.cpp - Mem Deps Implementation  --*- C++ -*-===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file implements an analysis that determines, for a given memory
     11 // operation, what preceding memory operations it depends on.  It builds on
     12 // alias analysis information, and tries to provide a lazy, caching interface to
     13 // a common kind of alias information query.
     14 //
     15 //===----------------------------------------------------------------------===//
     16 
     17 #define DEBUG_TYPE "memdep"
     18 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
     19 #include "llvm/ADT/STLExtras.h"
     20 #include "llvm/ADT/Statistic.h"
     21 #include "llvm/Analysis/AliasAnalysis.h"
     22 #include "llvm/Analysis/Dominators.h"
     23 #include "llvm/Analysis/InstructionSimplify.h"
     24 #include "llvm/Analysis/MemoryBuiltins.h"
     25 #include "llvm/Analysis/PHITransAddr.h"
     26 #include "llvm/Analysis/ValueTracking.h"
     27 #include "llvm/IR/DataLayout.h"
     28 #include "llvm/IR/Function.h"
     29 #include "llvm/IR/Instructions.h"
     30 #include "llvm/IR/IntrinsicInst.h"
     31 #include "llvm/IR/LLVMContext.h"
     32 #include "llvm/Support/Debug.h"
     33 #include "llvm/Support/PredIteratorCache.h"
     34 using namespace llvm;
     35 
     36 STATISTIC(NumCacheNonLocal, "Number of fully cached non-local responses");
     37 STATISTIC(NumCacheDirtyNonLocal, "Number of dirty cached non-local responses");
     38 STATISTIC(NumUncacheNonLocal, "Number of uncached non-local responses");
     39 
     40 STATISTIC(NumCacheNonLocalPtr,
     41           "Number of fully cached non-local ptr responses");
     42 STATISTIC(NumCacheDirtyNonLocalPtr,
     43           "Number of cached, but dirty, non-local ptr responses");
     44 STATISTIC(NumUncacheNonLocalPtr,
     45           "Number of uncached non-local ptr responses");
     46 STATISTIC(NumCacheCompleteNonLocalPtr,
     47           "Number of block queries that were completely cached");
     48 
     49 // Limit for the number of instructions to scan in a block.
     50 // FIXME: Figure out what a sane value is for this.
     51 //        (500 is relatively insane.)
     52 static const int BlockScanLimit = 500;
     53 
     54 char MemoryDependenceAnalysis::ID = 0;
     55 
     56 // Register this pass...
     57 INITIALIZE_PASS_BEGIN(MemoryDependenceAnalysis, "memdep",
     58                 "Memory Dependence Analysis", false, true)
     59 INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
     60 INITIALIZE_PASS_END(MemoryDependenceAnalysis, "memdep",
     61                       "Memory Dependence Analysis", false, true)
     62 
     63 MemoryDependenceAnalysis::MemoryDependenceAnalysis()
     64 : FunctionPass(ID), PredCache(0) {
     65   initializeMemoryDependenceAnalysisPass(*PassRegistry::getPassRegistry());
     66 }
     67 MemoryDependenceAnalysis::~MemoryDependenceAnalysis() {
     68 }
     69 
     70 /// Clean up memory in between runs
     71 void MemoryDependenceAnalysis::releaseMemory() {
     72   LocalDeps.clear();
     73   NonLocalDeps.clear();
     74   NonLocalPointerDeps.clear();
     75   ReverseLocalDeps.clear();
     76   ReverseNonLocalDeps.clear();
     77   ReverseNonLocalPtrDeps.clear();
     78   PredCache->clear();
     79 }
     80 
     81 
     82 
     83 /// getAnalysisUsage - Does not modify anything.  It uses Alias Analysis.
     84 ///
     85 void MemoryDependenceAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
     86   AU.setPreservesAll();
     87   AU.addRequiredTransitive<AliasAnalysis>();
     88 }
     89 
     90 bool MemoryDependenceAnalysis::runOnFunction(Function &) {
     91   AA = &getAnalysis<AliasAnalysis>();
     92   TD = getAnalysisIfAvailable<DataLayout>();
     93   DT = getAnalysisIfAvailable<DominatorTree>();
     94   if (PredCache == 0)
     95     PredCache.reset(new PredIteratorCache());
     96   return false;
     97 }
     98 
     99 /// RemoveFromReverseMap - This is a helper function that removes Val from
    100 /// 'Inst's set in ReverseMap.  If the set becomes empty, remove Inst's entry.
    101 template <typename KeyTy>
    102 static void RemoveFromReverseMap(DenseMap<Instruction*,
    103                                  SmallPtrSet<KeyTy, 4> > &ReverseMap,
    104                                  Instruction *Inst, KeyTy Val) {
    105   typename DenseMap<Instruction*, SmallPtrSet<KeyTy, 4> >::iterator
    106   InstIt = ReverseMap.find(Inst);
    107   assert(InstIt != ReverseMap.end() && "Reverse map out of sync?");
    108   bool Found = InstIt->second.erase(Val);
    109   assert(Found && "Invalid reverse map!"); (void)Found;
    110   if (InstIt->second.empty())
    111     ReverseMap.erase(InstIt);
    112 }
    113 
    114 /// GetLocation - If the given instruction references a specific memory
    115 /// location, fill in Loc with the details, otherwise set Loc.Ptr to null.
    116 /// Return a ModRefInfo value describing the general behavior of the
    117 /// instruction.
    118 static
    119 AliasAnalysis::ModRefResult GetLocation(const Instruction *Inst,
    120                                         AliasAnalysis::Location &Loc,
    121                                         AliasAnalysis *AA) {
    122   if (const LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
    123     if (LI->isUnordered()) {
    124       Loc = AA->getLocation(LI);
    125       return AliasAnalysis::Ref;
    126     } else if (LI->getOrdering() == Monotonic) {
    127       Loc = AA->getLocation(LI);
    128       return AliasAnalysis::ModRef;
    129     }
    130     Loc = AliasAnalysis::Location();
    131     return AliasAnalysis::ModRef;
    132   }
    133 
    134   if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
    135     if (SI->isUnordered()) {
    136       Loc = AA->getLocation(SI);
    137       return AliasAnalysis::Mod;
    138     } else if (SI->getOrdering() == Monotonic) {
    139       Loc = AA->getLocation(SI);
    140       return AliasAnalysis::ModRef;
    141     }
    142     Loc = AliasAnalysis::Location();
    143     return AliasAnalysis::ModRef;
    144   }
    145 
    146   if (const VAArgInst *V = dyn_cast<VAArgInst>(Inst)) {
    147     Loc = AA->getLocation(V);
    148     return AliasAnalysis::ModRef;
    149   }
    150 
    151   if (const CallInst *CI = isFreeCall(Inst, AA->getTargetLibraryInfo())) {
    152     // calls to free() deallocate the entire structure
    153     Loc = AliasAnalysis::Location(CI->getArgOperand(0));
    154     return AliasAnalysis::Mod;
    155   }
    156 
    157   if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst))
    158     switch (II->getIntrinsicID()) {
    159     case Intrinsic::lifetime_start:
    160     case Intrinsic::lifetime_end:
    161     case Intrinsic::invariant_start:
    162       Loc = AliasAnalysis::Location(II->getArgOperand(1),
    163                                     cast<ConstantInt>(II->getArgOperand(0))
    164                                       ->getZExtValue(),
    165                                     II->getMetadata(LLVMContext::MD_tbaa));
    166       // These intrinsics don't really modify the memory, but returning Mod
    167       // will allow them to be handled conservatively.
    168       return AliasAnalysis::Mod;
    169     case Intrinsic::invariant_end:
    170       Loc = AliasAnalysis::Location(II->getArgOperand(2),
    171                                     cast<ConstantInt>(II->getArgOperand(1))
    172                                       ->getZExtValue(),
    173                                     II->getMetadata(LLVMContext::MD_tbaa));
    174       // These intrinsics don't really modify the memory, but returning Mod
    175       // will allow them to be handled conservatively.
    176       return AliasAnalysis::Mod;
    177     default:
    178       break;
    179     }
    180 
    181   // Otherwise, just do the coarse-grained thing that always works.
    182   if (Inst->mayWriteToMemory())
    183     return AliasAnalysis::ModRef;
    184   if (Inst->mayReadFromMemory())
    185     return AliasAnalysis::Ref;
    186   return AliasAnalysis::NoModRef;
    187 }
    188 
    189 /// getCallSiteDependencyFrom - Private helper for finding the local
    190 /// dependencies of a call site.
    191 MemDepResult MemoryDependenceAnalysis::
    192 getCallSiteDependencyFrom(CallSite CS, bool isReadOnlyCall,
    193                           BasicBlock::iterator ScanIt, BasicBlock *BB) {
    194   unsigned Limit = BlockScanLimit;
    195 
    196   // Walk backwards through the block, looking for dependencies
    197   while (ScanIt != BB->begin()) {
    198     // Limit the amount of scanning we do so we don't end up with quadratic
    199     // running time on extreme testcases.
    200     --Limit;
    201     if (!Limit)
    202       return MemDepResult::getUnknown();
    203 
    204     Instruction *Inst = --ScanIt;
    205 
    206     // If this inst is a memory op, get the pointer it accessed
    207     AliasAnalysis::Location Loc;
    208     AliasAnalysis::ModRefResult MR = GetLocation(Inst, Loc, AA);
    209     if (Loc.Ptr) {
    210       // A simple instruction.
    211       if (AA->getModRefInfo(CS, Loc) != AliasAnalysis::NoModRef)
    212         return MemDepResult::getClobber(Inst);
    213       continue;
    214     }
    215 
    216     if (CallSite InstCS = cast<Value>(Inst)) {
    217       // Debug intrinsics don't cause dependences.
    218       if (isa<DbgInfoIntrinsic>(Inst)) continue;
    219       // If these two calls do not interfere, look past it.
    220       switch (AA->getModRefInfo(CS, InstCS)) {
    221       case AliasAnalysis::NoModRef:
    222         // If the two calls are the same, return InstCS as a Def, so that
    223         // CS can be found redundant and eliminated.
    224         if (isReadOnlyCall && !(MR & AliasAnalysis::Mod) &&
    225             CS.getInstruction()->isIdenticalToWhenDefined(Inst))
    226           return MemDepResult::getDef(Inst);
    227 
    228         // Otherwise if the two calls don't interact (e.g. InstCS is readnone)
    229         // keep scanning.
    230         continue;
    231       default:
    232         return MemDepResult::getClobber(Inst);
    233       }
    234     }
    235 
    236     // If we could not obtain a pointer for the instruction and the instruction
    237     // touches memory then assume that this is a dependency.
    238     if (MR != AliasAnalysis::NoModRef)
    239       return MemDepResult::getClobber(Inst);
    240   }
    241 
    242   // No dependence found.  If this is the entry block of the function, it is
    243   // unknown, otherwise it is non-local.
    244   if (BB != &BB->getParent()->getEntryBlock())
    245     return MemDepResult::getNonLocal();
    246   return MemDepResult::getNonFuncLocal();
    247 }
    248 
    249 /// isLoadLoadClobberIfExtendedToFullWidth - Return true if LI is a load that
    250 /// would fully overlap MemLoc if done as a wider legal integer load.
    251 ///
    252 /// MemLocBase, MemLocOffset are lazily computed here the first time the
    253 /// base/offs of memloc is needed.
    254 static bool
    255 isLoadLoadClobberIfExtendedToFullWidth(const AliasAnalysis::Location &MemLoc,
    256                                        const Value *&MemLocBase,
    257                                        int64_t &MemLocOffs,
    258                                        const LoadInst *LI,
    259                                        const DataLayout *TD) {
    260   // If we have no target data, we can't do this.
    261   if (TD == 0) return false;
    262 
    263   // If we haven't already computed the base/offset of MemLoc, do so now.
    264   if (MemLocBase == 0)
    265     MemLocBase = GetPointerBaseWithConstantOffset(MemLoc.Ptr, MemLocOffs, TD);
    266 
    267   unsigned Size = MemoryDependenceAnalysis::
    268     getLoadLoadClobberFullWidthSize(MemLocBase, MemLocOffs, MemLoc.Size,
    269                                     LI, *TD);
    270   return Size != 0;
    271 }
    272 
    273 /// getLoadLoadClobberFullWidthSize - This is a little bit of analysis that
    274 /// looks at a memory location for a load (specified by MemLocBase, Offs,
    275 /// and Size) and compares it against a load.  If the specified load could
    276 /// be safely widened to a larger integer load that is 1) still efficient,
    277 /// 2) safe for the target, and 3) would provide the specified memory
    278 /// location value, then this function returns the size in bytes of the
    279 /// load width to use.  If not, this returns zero.
    280 unsigned MemoryDependenceAnalysis::
    281 getLoadLoadClobberFullWidthSize(const Value *MemLocBase, int64_t MemLocOffs,
    282                                 unsigned MemLocSize, const LoadInst *LI,
    283                                 const DataLayout &TD) {
    284   // We can only extend simple integer loads.
    285   if (!isa<IntegerType>(LI->getType()) || !LI->isSimple()) return 0;
    286 
    287   // Load widening is hostile to ThreadSanitizer: it may cause false positives
    288   // or make the reports more cryptic (access sizes are wrong).
    289   if (LI->getParent()->getParent()->getAttributes().
    290       hasAttribute(AttributeSet::FunctionIndex, Attribute::SanitizeThread))
    291     return 0;
    292 
    293   // Get the base of this load.
    294   int64_t LIOffs = 0;
    295   const Value *LIBase =
    296     GetPointerBaseWithConstantOffset(LI->getPointerOperand(), LIOffs, &TD);
    297 
    298   // If the two pointers are not based on the same pointer, we can't tell that
    299   // they are related.
    300   if (LIBase != MemLocBase) return 0;
    301 
    302   // Okay, the two values are based on the same pointer, but returned as
    303   // no-alias.  This happens when we have things like two byte loads at "P+1"
    304   // and "P+3".  Check to see if increasing the size of the "LI" load up to its
    305   // alignment (or the largest native integer type) will allow us to load all
    306   // the bits required by MemLoc.
    307 
    308   // If MemLoc is before LI, then no widening of LI will help us out.
    309   if (MemLocOffs < LIOffs) return 0;
    310 
    311   // Get the alignment of the load in bytes.  We assume that it is safe to load
    312   // any legal integer up to this size without a problem.  For example, if we're
    313   // looking at an i8 load on x86-32 that is known 1024 byte aligned, we can
    314   // widen it up to an i32 load.  If it is known 2-byte aligned, we can widen it
    315   // to i16.
    316   unsigned LoadAlign = LI->getAlignment();
    317 
    318   int64_t MemLocEnd = MemLocOffs+MemLocSize;
    319 
    320   // If no amount of rounding up will let MemLoc fit into LI, then bail out.
    321   if (LIOffs+LoadAlign < MemLocEnd) return 0;
    322 
    323   // This is the size of the load to try.  Start with the next larger power of
    324   // two.
    325   unsigned NewLoadByteSize = LI->getType()->getPrimitiveSizeInBits()/8U;
    326   NewLoadByteSize = NextPowerOf2(NewLoadByteSize);
    327 
    328   while (1) {
    329     // If this load size is bigger than our known alignment or would not fit
    330     // into a native integer register, then we fail.
    331     if (NewLoadByteSize > LoadAlign ||
    332         !TD.fitsInLegalInteger(NewLoadByteSize*8))
    333       return 0;
    334 
    335     if (LIOffs+NewLoadByteSize > MemLocEnd &&
    336         LI->getParent()->getParent()->getAttributes().
    337           hasAttribute(AttributeSet::FunctionIndex, Attribute::SanitizeAddress))
    338       // We will be reading past the location accessed by the original program.
    339       // While this is safe in a regular build, Address Safety analysis tools
    340       // may start reporting false warnings. So, don't do widening.
    341       return 0;
    342 
    343     // If a load of this width would include all of MemLoc, then we succeed.
    344     if (LIOffs+NewLoadByteSize >= MemLocEnd)
    345       return NewLoadByteSize;
    346 
    347     NewLoadByteSize <<= 1;
    348   }
    349 }
    350 
    351 /// getPointerDependencyFrom - Return the instruction on which a memory
    352 /// location depends.  If isLoad is true, this routine ignores may-aliases with
    353 /// read-only operations.  If isLoad is false, this routine ignores may-aliases
    354 /// with reads from read-only locations.  If possible, pass the query
    355 /// instruction as well; this function may take advantage of the metadata
    356 /// annotated to the query instruction to refine the result.
    357 MemDepResult MemoryDependenceAnalysis::
    358 getPointerDependencyFrom(const AliasAnalysis::Location &MemLoc, bool isLoad,
    359                          BasicBlock::iterator ScanIt, BasicBlock *BB,
    360                          Instruction *QueryInst) {
    361 
    362   const Value *MemLocBase = 0;
    363   int64_t MemLocOffset = 0;
    364   unsigned Limit = BlockScanLimit;
    365   bool isInvariantLoad = false;
    366   if (isLoad && QueryInst) {
    367     LoadInst *LI = dyn_cast<LoadInst>(QueryInst);
    368     if (LI && LI->getMetadata(LLVMContext::MD_invariant_load) != 0)
    369       isInvariantLoad = true;
    370   }
    371 
    372   // Walk backwards through the basic block, looking for dependencies.
    373   while (ScanIt != BB->begin()) {
    374     // Limit the amount of scanning we do so we don't end up with quadratic
    375     // running time on extreme testcases.
    376     --Limit;
    377     if (!Limit)
    378       return MemDepResult::getUnknown();
    379 
    380     Instruction *Inst = --ScanIt;
    381 
    382     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
    383       // Debug intrinsics don't (and can't) cause dependences.
    384       if (isa<DbgInfoIntrinsic>(II)) continue;
    385 
    386       // If we reach a lifetime begin or end marker, then the query ends here
    387       // because the value is undefined.
    388       if (II->getIntrinsicID() == Intrinsic::lifetime_start) {
    389         // FIXME: This only considers queries directly on the invariant-tagged
    390         // pointer, not on query pointers that are indexed off of them.  It'd
    391         // be nice to handle that at some point (the right approach is to use
    392         // GetPointerBaseWithConstantOffset).
    393         if (AA->isMustAlias(AliasAnalysis::Location(II->getArgOperand(1)),
    394                             MemLoc))
    395           return MemDepResult::getDef(II);
    396         continue;
    397       }
    398     }
    399 
    400     // Values depend on loads if the pointers are must aliased.  This means that
    401     // a load depends on another must aliased load from the same value.
    402     if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
    403       // Atomic loads have complications involved.
    404       // FIXME: This is overly conservative.
    405       if (!LI->isUnordered())
    406         return MemDepResult::getClobber(LI);
    407 
    408       AliasAnalysis::Location LoadLoc = AA->getLocation(LI);
    409 
    410       // If we found a pointer, check if it could be the same as our pointer.
    411       AliasAnalysis::AliasResult R = AA->alias(LoadLoc, MemLoc);
    412 
    413       if (isLoad) {
    414         if (R == AliasAnalysis::NoAlias) {
    415           // If this is an over-aligned integer load (for example,
    416           // "load i8* %P, align 4") see if it would obviously overlap with the
    417           // queried location if widened to a larger load (e.g. if the queried
    418           // location is 1 byte at P+1).  If so, return it as a load/load
    419           // clobber result, allowing the client to decide to widen the load if
    420           // it wants to.
    421           if (IntegerType *ITy = dyn_cast<IntegerType>(LI->getType()))
    422             if (LI->getAlignment()*8 > ITy->getPrimitiveSizeInBits() &&
    423                 isLoadLoadClobberIfExtendedToFullWidth(MemLoc, MemLocBase,
    424                                                        MemLocOffset, LI, TD))
    425               return MemDepResult::getClobber(Inst);
    426 
    427           continue;
    428         }
    429 
    430         // Must aliased loads are defs of each other.
    431         if (R == AliasAnalysis::MustAlias)
    432           return MemDepResult::getDef(Inst);
    433 
    434 #if 0 // FIXME: Temporarily disabled. GVN is cleverly rewriting loads
    435       // in terms of clobbering loads, but since it does this by looking
    436       // at the clobbering load directly, it doesn't know about any
    437       // phi translation that may have happened along the way.
    438 
    439         // If we have a partial alias, then return this as a clobber for the
    440         // client to handle.
    441         if (R == AliasAnalysis::PartialAlias)
    442           return MemDepResult::getClobber(Inst);
    443 #endif
    444 
    445         // Random may-alias loads don't depend on each other without a
    446         // dependence.
    447         continue;
    448       }
    449 
    450       // Stores don't depend on other no-aliased accesses.
    451       if (R == AliasAnalysis::NoAlias)
    452         continue;
    453 
    454       // Stores don't alias loads from read-only memory.
    455       if (AA->pointsToConstantMemory(LoadLoc))
    456         continue;
    457 
    458       // Stores depend on may/must aliased loads.
    459       return MemDepResult::getDef(Inst);
    460     }
    461 
    462     if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
    463       // Atomic stores have complications involved.
    464       // FIXME: This is overly conservative.
    465       if (!SI->isUnordered())
    466         return MemDepResult::getClobber(SI);
    467 
    468       // If alias analysis can tell that this store is guaranteed to not modify
    469       // the query pointer, ignore it.  Use getModRefInfo to handle cases where
    470       // the query pointer points to constant memory etc.
    471       if (AA->getModRefInfo(SI, MemLoc) == AliasAnalysis::NoModRef)
    472         continue;
    473 
    474       // Ok, this store might clobber the query pointer.  Check to see if it is
    475       // a must alias: in this case, we want to return this as a def.
    476       AliasAnalysis::Location StoreLoc = AA->getLocation(SI);
    477 
    478       // If we found a pointer, check if it could be the same as our pointer.
    479       AliasAnalysis::AliasResult R = AA->alias(StoreLoc, MemLoc);
    480 
    481       if (R == AliasAnalysis::NoAlias)
    482         continue;
    483       if (R == AliasAnalysis::MustAlias)
    484         return MemDepResult::getDef(Inst);
    485       if (isInvariantLoad)
    486        continue;
    487       return MemDepResult::getClobber(Inst);
    488     }
    489 
    490     // If this is an allocation, and if we know that the accessed pointer is to
    491     // the allocation, return Def.  This means that there is no dependence and
    492     // the access can be optimized based on that.  For example, a load could
    493     // turn into undef.
    494     // Note: Only determine this to be a malloc if Inst is the malloc call, not
    495     // a subsequent bitcast of the malloc call result.  There can be stores to
    496     // the malloced memory between the malloc call and its bitcast uses, and we
    497     // need to continue scanning until the malloc call.
    498     const TargetLibraryInfo *TLI = AA->getTargetLibraryInfo();
    499     if (isa<AllocaInst>(Inst) || isNoAliasFn(Inst, TLI)) {
    500       const Value *AccessPtr = GetUnderlyingObject(MemLoc.Ptr, TD);
    501 
    502       if (AccessPtr == Inst || AA->isMustAlias(Inst, AccessPtr))
    503         return MemDepResult::getDef(Inst);
    504       // Be conservative if the accessed pointer may alias the allocation.
    505       if (AA->alias(Inst, AccessPtr) != AliasAnalysis::NoAlias)
    506         return MemDepResult::getClobber(Inst);
    507       // If the allocation is not aliased and does not read memory (like
    508       // strdup), it is safe to ignore.
    509       if (isa<AllocaInst>(Inst) ||
    510           isMallocLikeFn(Inst, TLI) || isCallocLikeFn(Inst, TLI))
    511         continue;
    512     }
    513 
    514     // See if this instruction (e.g. a call or vaarg) mod/ref's the pointer.
    515     AliasAnalysis::ModRefResult MR = AA->getModRefInfo(Inst, MemLoc);
    516     // If necessary, perform additional analysis.
    517     if (MR == AliasAnalysis::ModRef)
    518       MR = AA->callCapturesBefore(Inst, MemLoc, DT);
    519     switch (MR) {
    520     case AliasAnalysis::NoModRef:
    521       // If the call has no effect on the queried pointer, just ignore it.
    522       continue;
    523     case AliasAnalysis::Mod:
    524       return MemDepResult::getClobber(Inst);
    525     case AliasAnalysis::Ref:
    526       // If the call is known to never store to the pointer, and if this is a
    527       // load query, we can safely ignore it (scan past it).
    528       if (isLoad)
    529         continue;
    530     default:
    531       // Otherwise, there is a potential dependence.  Return a clobber.
    532       return MemDepResult::getClobber(Inst);
    533     }
    534   }
    535 
    536   // No dependence found.  If this is the entry block of the function, it is
    537   // unknown, otherwise it is non-local.
    538   if (BB != &BB->getParent()->getEntryBlock())
    539     return MemDepResult::getNonLocal();
    540   return MemDepResult::getNonFuncLocal();
    541 }
    542 
    543 /// getDependency - Return the instruction on which a memory operation
    544 /// depends.
    545 MemDepResult MemoryDependenceAnalysis::getDependency(Instruction *QueryInst) {
    546   Instruction *ScanPos = QueryInst;
    547 
    548   // Check for a cached result
    549   MemDepResult &LocalCache = LocalDeps[QueryInst];
    550 
    551   // If the cached entry is non-dirty, just return it.  Note that this depends
    552   // on MemDepResult's default constructing to 'dirty'.
    553   if (!LocalCache.isDirty())
    554     return LocalCache;
    555 
    556   // Otherwise, if we have a dirty entry, we know we can start the scan at that
    557   // instruction, which may save us some work.
    558   if (Instruction *Inst = LocalCache.getInst()) {
    559     ScanPos = Inst;
    560 
    561     RemoveFromReverseMap(ReverseLocalDeps, Inst, QueryInst);
    562   }
    563 
    564   BasicBlock *QueryParent = QueryInst->getParent();
    565 
    566   // Do the scan.
    567   if (BasicBlock::iterator(QueryInst) == QueryParent->begin()) {
    568     // No dependence found.  If this is the entry block of the function, it is
    569     // unknown, otherwise it is non-local.
    570     if (QueryParent != &QueryParent->getParent()->getEntryBlock())
    571       LocalCache = MemDepResult::getNonLocal();
    572     else
    573       LocalCache = MemDepResult::getNonFuncLocal();
    574   } else {
    575     AliasAnalysis::Location MemLoc;
    576     AliasAnalysis::ModRefResult MR = GetLocation(QueryInst, MemLoc, AA);
    577     if (MemLoc.Ptr) {
    578       // If we can do a pointer scan, make it happen.
    579       bool isLoad = !(MR & AliasAnalysis::Mod);
    580       if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(QueryInst))
    581         isLoad |= II->getIntrinsicID() == Intrinsic::lifetime_start;
    582 
    583       LocalCache = getPointerDependencyFrom(MemLoc, isLoad, ScanPos,
    584                                             QueryParent, QueryInst);
    585     } else if (isa<CallInst>(QueryInst) || isa<InvokeInst>(QueryInst)) {
    586       CallSite QueryCS(QueryInst);
    587       bool isReadOnly = AA->onlyReadsMemory(QueryCS);
    588       LocalCache = getCallSiteDependencyFrom(QueryCS, isReadOnly, ScanPos,
    589                                              QueryParent);
    590     } else
    591       // Non-memory instruction.
    592       LocalCache = MemDepResult::getUnknown();
    593   }
    594 
    595   // Remember the result!
    596   if (Instruction *I = LocalCache.getInst())
    597     ReverseLocalDeps[I].insert(QueryInst);
    598 
    599   return LocalCache;
    600 }
    601 
    602 #ifndef NDEBUG
    603 /// AssertSorted - This method is used when -debug is specified to verify that
    604 /// cache arrays are properly kept sorted.
    605 static void AssertSorted(MemoryDependenceAnalysis::NonLocalDepInfo &Cache,
    606                          int Count = -1) {
    607   if (Count == -1) Count = Cache.size();
    608   if (Count == 0) return;
    609 
    610   for (unsigned i = 1; i != unsigned(Count); ++i)
    611     assert(!(Cache[i] < Cache[i-1]) && "Cache isn't sorted!");
    612 }
    613 #endif
    614 
    615 /// getNonLocalCallDependency - Perform a full dependency query for the
    616 /// specified call, returning the set of blocks that the value is
    617 /// potentially live across.  The returned set of results will include a
    618 /// "NonLocal" result for all blocks where the value is live across.
    619 ///
    620 /// This method assumes the instruction returns a "NonLocal" dependency
    621 /// within its own block.
    622 ///
    623 /// This returns a reference to an internal data structure that may be
    624 /// invalidated on the next non-local query or when an instruction is
    625 /// removed.  Clients must copy this data if they want it around longer than
    626 /// that.
    627 const MemoryDependenceAnalysis::NonLocalDepInfo &
    628 MemoryDependenceAnalysis::getNonLocalCallDependency(CallSite QueryCS) {
    629   assert(getDependency(QueryCS.getInstruction()).isNonLocal() &&
    630  "getNonLocalCallDependency should only be used on calls with non-local deps!");
    631   PerInstNLInfo &CacheP = NonLocalDeps[QueryCS.getInstruction()];
    632   NonLocalDepInfo &Cache = CacheP.first;
    633 
    634   /// DirtyBlocks - This is the set of blocks that need to be recomputed.  In
    635   /// the cached case, this can happen due to instructions being deleted etc. In
    636   /// the uncached case, this starts out as the set of predecessors we care
    637   /// about.
    638   SmallVector<BasicBlock*, 32> DirtyBlocks;
    639 
    640   if (!Cache.empty()) {
    641     // Okay, we have a cache entry.  If we know it is not dirty, just return it
    642     // with no computation.
    643     if (!CacheP.second) {
    644       ++NumCacheNonLocal;
    645       return Cache;
    646     }
    647 
    648     // If we already have a partially computed set of results, scan them to
    649     // determine what is dirty, seeding our initial DirtyBlocks worklist.
    650     for (NonLocalDepInfo::iterator I = Cache.begin(), E = Cache.end();
    651        I != E; ++I)
    652       if (I->getResult().isDirty())
    653         DirtyBlocks.push_back(I->getBB());
    654 
    655     // Sort the cache so that we can do fast binary search lookups below.
    656     std::sort(Cache.begin(), Cache.end());
    657 
    658     ++NumCacheDirtyNonLocal;
    659     //cerr << "CACHED CASE: " << DirtyBlocks.size() << " dirty: "
    660     //     << Cache.size() << " cached: " << *QueryInst;
    661   } else {
    662     // Seed DirtyBlocks with each of the preds of QueryInst's block.
    663     BasicBlock *QueryBB = QueryCS.getInstruction()->getParent();
    664     for (BasicBlock **PI = PredCache->GetPreds(QueryBB); *PI; ++PI)
    665       DirtyBlocks.push_back(*PI);
    666     ++NumUncacheNonLocal;
    667   }
    668 
    669   // isReadonlyCall - If this is a read-only call, we can be more aggressive.
    670   bool isReadonlyCall = AA->onlyReadsMemory(QueryCS);
    671 
    672   SmallPtrSet<BasicBlock*, 64> Visited;
    673 
    674   unsigned NumSortedEntries = Cache.size();
    675   DEBUG(AssertSorted(Cache));
    676 
    677   // Iterate while we still have blocks to update.
    678   while (!DirtyBlocks.empty()) {
    679     BasicBlock *DirtyBB = DirtyBlocks.back();
    680     DirtyBlocks.pop_back();
    681 
    682     // Already processed this block?
    683     if (!Visited.insert(DirtyBB))
    684       continue;
    685 
    686     // Do a binary search to see if we already have an entry for this block in
    687     // the cache set.  If so, find it.
    688     DEBUG(AssertSorted(Cache, NumSortedEntries));
    689     NonLocalDepInfo::iterator Entry =
    690       std::upper_bound(Cache.begin(), Cache.begin()+NumSortedEntries,
    691                        NonLocalDepEntry(DirtyBB));
    692     if (Entry != Cache.begin() && prior(Entry)->getBB() == DirtyBB)
    693       --Entry;
    694 
    695     NonLocalDepEntry *ExistingResult = 0;
    696     if (Entry != Cache.begin()+NumSortedEntries &&
    697         Entry->getBB() == DirtyBB) {
    698       // If we already have an entry, and if it isn't already dirty, the block
    699       // is done.
    700       if (!Entry->getResult().isDirty())
    701         continue;
    702 
    703       // Otherwise, remember this slot so we can update the value.
    704       ExistingResult = &*Entry;
    705     }
    706 
    707     // If the dirty entry has a pointer, start scanning from it so we don't have
    708     // to rescan the entire block.
    709     BasicBlock::iterator ScanPos = DirtyBB->end();
    710     if (ExistingResult) {
    711       if (Instruction *Inst = ExistingResult->getResult().getInst()) {
    712         ScanPos = Inst;
    713         // We're removing QueryInst's use of Inst.
    714         RemoveFromReverseMap(ReverseNonLocalDeps, Inst,
    715                              QueryCS.getInstruction());
    716       }
    717     }
    718 
    719     // Find out if this block has a local dependency for QueryInst.
    720     MemDepResult Dep;
    721 
    722     if (ScanPos != DirtyBB->begin()) {
    723       Dep = getCallSiteDependencyFrom(QueryCS, isReadonlyCall,ScanPos, DirtyBB);
    724     } else if (DirtyBB != &DirtyBB->getParent()->getEntryBlock()) {
    725       // No dependence found.  If this is the entry block of the function, it is
    726       // a clobber, otherwise it is unknown.
    727       Dep = MemDepResult::getNonLocal();
    728     } else {
    729       Dep = MemDepResult::getNonFuncLocal();
    730     }
    731 
    732     // If we had a dirty entry for the block, update it.  Otherwise, just add
    733     // a new entry.
    734     if (ExistingResult)
    735       ExistingResult->setResult(Dep);
    736     else
    737       Cache.push_back(NonLocalDepEntry(DirtyBB, Dep));
    738 
    739     // If the block has a dependency (i.e. it isn't completely transparent to
    740     // the value), remember the association!
    741     if (!Dep.isNonLocal()) {
    742       // Keep the ReverseNonLocalDeps map up to date so we can efficiently
    743       // update this when we remove instructions.
    744       if (Instruction *Inst = Dep.getInst())
    745         ReverseNonLocalDeps[Inst].insert(QueryCS.getInstruction());
    746     } else {
    747 
    748       // If the block *is* completely transparent to the load, we need to check
    749       // the predecessors of this block.  Add them to our worklist.
    750       for (BasicBlock **PI = PredCache->GetPreds(DirtyBB); *PI; ++PI)
    751         DirtyBlocks.push_back(*PI);
    752     }
    753   }
    754 
    755   return Cache;
    756 }
    757 
    758 /// getNonLocalPointerDependency - Perform a full dependency query for an
    759 /// access to the specified (non-volatile) memory location, returning the
    760 /// set of instructions that either define or clobber the value.
    761 ///
    762 /// This method assumes the pointer has a "NonLocal" dependency within its
    763 /// own block.
    764 ///
    765 void MemoryDependenceAnalysis::
    766 getNonLocalPointerDependency(const AliasAnalysis::Location &Loc, bool isLoad,
    767                              BasicBlock *FromBB,
    768                              SmallVectorImpl<NonLocalDepResult> &Result) {
    769   assert(Loc.Ptr->getType()->isPointerTy() &&
    770          "Can't get pointer deps of a non-pointer!");
    771   Result.clear();
    772 
    773   PHITransAddr Address(const_cast<Value *>(Loc.Ptr), TD);
    774 
    775   // This is the set of blocks we've inspected, and the pointer we consider in
    776   // each block.  Because of critical edges, we currently bail out if querying
    777   // a block with multiple different pointers.  This can happen during PHI
    778   // translation.
    779   DenseMap<BasicBlock*, Value*> Visited;
    780   if (!getNonLocalPointerDepFromBB(Address, Loc, isLoad, FromBB,
    781                                    Result, Visited, true))
    782     return;
    783   Result.clear();
    784   Result.push_back(NonLocalDepResult(FromBB,
    785                                      MemDepResult::getUnknown(),
    786                                      const_cast<Value *>(Loc.Ptr)));
    787 }
    788 
    789 /// GetNonLocalInfoForBlock - Compute the memdep value for BB with
    790 /// Pointer/PointeeSize using either cached information in Cache or by doing a
    791 /// lookup (which may use dirty cache info if available).  If we do a lookup,
    792 /// add the result to the cache.
    793 MemDepResult MemoryDependenceAnalysis::
    794 GetNonLocalInfoForBlock(const AliasAnalysis::Location &Loc,
    795                         bool isLoad, BasicBlock *BB,
    796                         NonLocalDepInfo *Cache, unsigned NumSortedEntries) {
    797 
    798   // Do a binary search to see if we already have an entry for this block in
    799   // the cache set.  If so, find it.
    800   NonLocalDepInfo::iterator Entry =
    801     std::upper_bound(Cache->begin(), Cache->begin()+NumSortedEntries,
    802                      NonLocalDepEntry(BB));
    803   if (Entry != Cache->begin() && (Entry-1)->getBB() == BB)
    804     --Entry;
    805 
    806   NonLocalDepEntry *ExistingResult = 0;
    807   if (Entry != Cache->begin()+NumSortedEntries && Entry->getBB() == BB)
    808     ExistingResult = &*Entry;
    809 
    810   // If we have a cached entry, and it is non-dirty, use it as the value for
    811   // this dependency.
    812   if (ExistingResult && !ExistingResult->getResult().isDirty()) {
    813     ++NumCacheNonLocalPtr;
    814     return ExistingResult->getResult();
    815   }
    816 
    817   // Otherwise, we have to scan for the value.  If we have a dirty cache
    818   // entry, start scanning from its position, otherwise we scan from the end
    819   // of the block.
    820   BasicBlock::iterator ScanPos = BB->end();
    821   if (ExistingResult && ExistingResult->getResult().getInst()) {
    822     assert(ExistingResult->getResult().getInst()->getParent() == BB &&
    823            "Instruction invalidated?");
    824     ++NumCacheDirtyNonLocalPtr;
    825     ScanPos = ExistingResult->getResult().getInst();
    826 
    827     // Eliminating the dirty entry from 'Cache', so update the reverse info.
    828     ValueIsLoadPair CacheKey(Loc.Ptr, isLoad);
    829     RemoveFromReverseMap(ReverseNonLocalPtrDeps, ScanPos, CacheKey);
    830   } else {
    831     ++NumUncacheNonLocalPtr;
    832   }
    833 
    834   // Scan the block for the dependency.
    835   MemDepResult Dep = getPointerDependencyFrom(Loc, isLoad, ScanPos, BB);
    836 
    837   // If we had a dirty entry for the block, update it.  Otherwise, just add
    838   // a new entry.
    839   if (ExistingResult)
    840     ExistingResult->setResult(Dep);
    841   else
    842     Cache->push_back(NonLocalDepEntry(BB, Dep));
    843 
    844   // If the block has a dependency (i.e. it isn't completely transparent to
    845   // the value), remember the reverse association because we just added it
    846   // to Cache!
    847   if (!Dep.isDef() && !Dep.isClobber())
    848     return Dep;
    849 
    850   // Keep the ReverseNonLocalPtrDeps map up to date so we can efficiently
    851   // update MemDep when we remove instructions.
    852   Instruction *Inst = Dep.getInst();
    853   assert(Inst && "Didn't depend on anything?");
    854   ValueIsLoadPair CacheKey(Loc.Ptr, isLoad);
    855   ReverseNonLocalPtrDeps[Inst].insert(CacheKey);
    856   return Dep;
    857 }
    858 
    859 /// SortNonLocalDepInfoCache - Sort the a NonLocalDepInfo cache, given a certain
    860 /// number of elements in the array that are already properly ordered.  This is
    861 /// optimized for the case when only a few entries are added.
    862 static void
    863 SortNonLocalDepInfoCache(MemoryDependenceAnalysis::NonLocalDepInfo &Cache,
    864                          unsigned NumSortedEntries) {
    865   switch (Cache.size() - NumSortedEntries) {
    866   case 0:
    867     // done, no new entries.
    868     break;
    869   case 2: {
    870     // Two new entries, insert the last one into place.
    871     NonLocalDepEntry Val = Cache.back();
    872     Cache.pop_back();
    873     MemoryDependenceAnalysis::NonLocalDepInfo::iterator Entry =
    874       std::upper_bound(Cache.begin(), Cache.end()-1, Val);
    875     Cache.insert(Entry, Val);
    876     // FALL THROUGH.
    877   }
    878   case 1:
    879     // One new entry, Just insert the new value at the appropriate position.
    880     if (Cache.size() != 1) {
    881       NonLocalDepEntry Val = Cache.back();
    882       Cache.pop_back();
    883       MemoryDependenceAnalysis::NonLocalDepInfo::iterator Entry =
    884         std::upper_bound(Cache.begin(), Cache.end(), Val);
    885       Cache.insert(Entry, Val);
    886     }
    887     break;
    888   default:
    889     // Added many values, do a full scale sort.
    890     std::sort(Cache.begin(), Cache.end());
    891     break;
    892   }
    893 }
    894 
    895 /// getNonLocalPointerDepFromBB - Perform a dependency query based on
    896 /// pointer/pointeesize starting at the end of StartBB.  Add any clobber/def
    897 /// results to the results vector and keep track of which blocks are visited in
    898 /// 'Visited'.
    899 ///
    900 /// This has special behavior for the first block queries (when SkipFirstBlock
    901 /// is true).  In this special case, it ignores the contents of the specified
    902 /// block and starts returning dependence info for its predecessors.
    903 ///
    904 /// This function returns false on success, or true to indicate that it could
    905 /// not compute dependence information for some reason.  This should be treated
    906 /// as a clobber dependence on the first instruction in the predecessor block.
    907 bool MemoryDependenceAnalysis::
    908 getNonLocalPointerDepFromBB(const PHITransAddr &Pointer,
    909                             const AliasAnalysis::Location &Loc,
    910                             bool isLoad, BasicBlock *StartBB,
    911                             SmallVectorImpl<NonLocalDepResult> &Result,
    912                             DenseMap<BasicBlock*, Value*> &Visited,
    913                             bool SkipFirstBlock) {
    914 
    915   // Look up the cached info for Pointer.
    916   ValueIsLoadPair CacheKey(Pointer.getAddr(), isLoad);
    917 
    918   // Set up a temporary NLPI value. If the map doesn't yet have an entry for
    919   // CacheKey, this value will be inserted as the associated value. Otherwise,
    920   // it'll be ignored, and we'll have to check to see if the cached size and
    921   // tbaa tag are consistent with the current query.
    922   NonLocalPointerInfo InitialNLPI;
    923   InitialNLPI.Size = Loc.Size;
    924   InitialNLPI.TBAATag = Loc.TBAATag;
    925 
    926   // Get the NLPI for CacheKey, inserting one into the map if it doesn't
    927   // already have one.
    928   std::pair<CachedNonLocalPointerInfo::iterator, bool> Pair =
    929     NonLocalPointerDeps.insert(std::make_pair(CacheKey, InitialNLPI));
    930   NonLocalPointerInfo *CacheInfo = &Pair.first->second;
    931 
    932   // If we already have a cache entry for this CacheKey, we may need to do some
    933   // work to reconcile the cache entry and the current query.
    934   if (!Pair.second) {
    935     if (CacheInfo->Size < Loc.Size) {
    936       // The query's Size is greater than the cached one. Throw out the
    937       // cached data and proceed with the query at the greater size.
    938       CacheInfo->Pair = BBSkipFirstBlockPair();
    939       CacheInfo->Size = Loc.Size;
    940       for (NonLocalDepInfo::iterator DI = CacheInfo->NonLocalDeps.begin(),
    941            DE = CacheInfo->NonLocalDeps.end(); DI != DE; ++DI)
    942         if (Instruction *Inst = DI->getResult().getInst())
    943           RemoveFromReverseMap(ReverseNonLocalPtrDeps, Inst, CacheKey);
    944       CacheInfo->NonLocalDeps.clear();
    945     } else if (CacheInfo->Size > Loc.Size) {
    946       // This query's Size is less than the cached one. Conservatively restart
    947       // the query using the greater size.
    948       return getNonLocalPointerDepFromBB(Pointer,
    949                                          Loc.getWithNewSize(CacheInfo->Size),
    950                                          isLoad, StartBB, Result, Visited,
    951                                          SkipFirstBlock);
    952     }
    953 
    954     // If the query's TBAATag is inconsistent with the cached one,
    955     // conservatively throw out the cached data and restart the query with
    956     // no tag if needed.
    957     if (CacheInfo->TBAATag != Loc.TBAATag) {
    958       if (CacheInfo->TBAATag) {
    959         CacheInfo->Pair = BBSkipFirstBlockPair();
    960         CacheInfo->TBAATag = 0;
    961         for (NonLocalDepInfo::iterator DI = CacheInfo->NonLocalDeps.begin(),
    962              DE = CacheInfo->NonLocalDeps.end(); DI != DE; ++DI)
    963           if (Instruction *Inst = DI->getResult().getInst())
    964             RemoveFromReverseMap(ReverseNonLocalPtrDeps, Inst, CacheKey);
    965         CacheInfo->NonLocalDeps.clear();
    966       }
    967       if (Loc.TBAATag)
    968         return getNonLocalPointerDepFromBB(Pointer, Loc.getWithoutTBAATag(),
    969                                            isLoad, StartBB, Result, Visited,
    970                                            SkipFirstBlock);
    971     }
    972   }
    973 
    974   NonLocalDepInfo *Cache = &CacheInfo->NonLocalDeps;
    975 
    976   // If we have valid cached information for exactly the block we are
    977   // investigating, just return it with no recomputation.
    978   if (CacheInfo->Pair == BBSkipFirstBlockPair(StartBB, SkipFirstBlock)) {
    979     // We have a fully cached result for this query then we can just return the
    980     // cached results and populate the visited set.  However, we have to verify
    981     // that we don't already have conflicting results for these blocks.  Check
    982     // to ensure that if a block in the results set is in the visited set that
    983     // it was for the same pointer query.
    984     if (!Visited.empty()) {
    985       for (NonLocalDepInfo::iterator I = Cache->begin(), E = Cache->end();
    986            I != E; ++I) {
    987         DenseMap<BasicBlock*, Value*>::iterator VI = Visited.find(I->getBB());
    988         if (VI == Visited.end() || VI->second == Pointer.getAddr())
    989           continue;
    990 
    991         // We have a pointer mismatch in a block.  Just return clobber, saying
    992         // that something was clobbered in this result.  We could also do a
    993         // non-fully cached query, but there is little point in doing this.
    994         return true;
    995       }
    996     }
    997 
    998     Value *Addr = Pointer.getAddr();
    999     for (NonLocalDepInfo::iterator I = Cache->begin(), E = Cache->end();
   1000          I != E; ++I) {
   1001       Visited.insert(std::make_pair(I->getBB(), Addr));
   1002       if (!I->getResult().isNonLocal() && DT->isReachableFromEntry(I->getBB()))
   1003         Result.push_back(NonLocalDepResult(I->getBB(), I->getResult(), Addr));
   1004     }
   1005     ++NumCacheCompleteNonLocalPtr;
   1006     return false;
   1007   }
   1008 
   1009   // Otherwise, either this is a new block, a block with an invalid cache
   1010   // pointer or one that we're about to invalidate by putting more info into it
   1011   // than its valid cache info.  If empty, the result will be valid cache info,
   1012   // otherwise it isn't.
   1013   if (Cache->empty())
   1014     CacheInfo->Pair = BBSkipFirstBlockPair(StartBB, SkipFirstBlock);
   1015   else
   1016     CacheInfo->Pair = BBSkipFirstBlockPair();
   1017 
   1018   SmallVector<BasicBlock*, 32> Worklist;
   1019   Worklist.push_back(StartBB);
   1020 
   1021   // PredList used inside loop.
   1022   SmallVector<std::pair<BasicBlock*, PHITransAddr>, 16> PredList;
   1023 
   1024   // Keep track of the entries that we know are sorted.  Previously cached
   1025   // entries will all be sorted.  The entries we add we only sort on demand (we
   1026   // don't insert every element into its sorted position).  We know that we
   1027   // won't get any reuse from currently inserted values, because we don't
   1028   // revisit blocks after we insert info for them.
   1029   unsigned NumSortedEntries = Cache->size();
   1030   DEBUG(AssertSorted(*Cache));
   1031 
   1032   while (!Worklist.empty()) {
   1033     BasicBlock *BB = Worklist.pop_back_val();
   1034 
   1035     // Skip the first block if we have it.
   1036     if (!SkipFirstBlock) {
   1037       // Analyze the dependency of *Pointer in FromBB.  See if we already have
   1038       // been here.
   1039       assert(Visited.count(BB) && "Should check 'visited' before adding to WL");
   1040 
   1041       // Get the dependency info for Pointer in BB.  If we have cached
   1042       // information, we will use it, otherwise we compute it.
   1043       DEBUG(AssertSorted(*Cache, NumSortedEntries));
   1044       MemDepResult Dep = GetNonLocalInfoForBlock(Loc, isLoad, BB, Cache,
   1045                                                  NumSortedEntries);
   1046 
   1047       // If we got a Def or Clobber, add this to the list of results.
   1048       if (!Dep.isNonLocal() && DT->isReachableFromEntry(BB)) {
   1049         Result.push_back(NonLocalDepResult(BB, Dep, Pointer.getAddr()));
   1050         continue;
   1051       }
   1052     }
   1053 
   1054     // If 'Pointer' is an instruction defined in this block, then we need to do
   1055     // phi translation to change it into a value live in the predecessor block.
   1056     // If not, we just add the predecessors to the worklist and scan them with
   1057     // the same Pointer.
   1058     if (!Pointer.NeedsPHITranslationFromBlock(BB)) {
   1059       SkipFirstBlock = false;
   1060       SmallVector<BasicBlock*, 16> NewBlocks;
   1061       for (BasicBlock **PI = PredCache->GetPreds(BB); *PI; ++PI) {
   1062         // Verify that we haven't looked at this block yet.
   1063         std::pair<DenseMap<BasicBlock*,Value*>::iterator, bool>
   1064           InsertRes = Visited.insert(std::make_pair(*PI, Pointer.getAddr()));
   1065         if (InsertRes.second) {
   1066           // First time we've looked at *PI.
   1067           NewBlocks.push_back(*PI);
   1068           continue;
   1069         }
   1070 
   1071         // If we have seen this block before, but it was with a different
   1072         // pointer then we have a phi translation failure and we have to treat
   1073         // this as a clobber.
   1074         if (InsertRes.first->second != Pointer.getAddr()) {
   1075           // Make sure to clean up the Visited map before continuing on to
   1076           // PredTranslationFailure.
   1077           for (unsigned i = 0; i < NewBlocks.size(); i++)
   1078             Visited.erase(NewBlocks[i]);
   1079           goto PredTranslationFailure;
   1080         }
   1081       }
   1082       Worklist.append(NewBlocks.begin(), NewBlocks.end());
   1083       continue;
   1084     }
   1085 
   1086     // We do need to do phi translation, if we know ahead of time we can't phi
   1087     // translate this value, don't even try.
   1088     if (!Pointer.IsPotentiallyPHITranslatable())
   1089       goto PredTranslationFailure;
   1090 
   1091     // We may have added values to the cache list before this PHI translation.
   1092     // If so, we haven't done anything to ensure that the cache remains sorted.
   1093     // Sort it now (if needed) so that recursive invocations of
   1094     // getNonLocalPointerDepFromBB and other routines that could reuse the cache
   1095     // value will only see properly sorted cache arrays.
   1096     if (Cache && NumSortedEntries != Cache->size()) {
   1097       SortNonLocalDepInfoCache(*Cache, NumSortedEntries);
   1098       NumSortedEntries = Cache->size();
   1099     }
   1100     Cache = 0;
   1101 
   1102     PredList.clear();
   1103     for (BasicBlock **PI = PredCache->GetPreds(BB); *PI; ++PI) {
   1104       BasicBlock *Pred = *PI;
   1105       PredList.push_back(std::make_pair(Pred, Pointer));
   1106 
   1107       // Get the PHI translated pointer in this predecessor.  This can fail if
   1108       // not translatable, in which case the getAddr() returns null.
   1109       PHITransAddr &PredPointer = PredList.back().second;
   1110       PredPointer.PHITranslateValue(BB, Pred, 0);
   1111 
   1112       Value *PredPtrVal = PredPointer.getAddr();
   1113 
   1114       // Check to see if we have already visited this pred block with another
   1115       // pointer.  If so, we can't do this lookup.  This failure can occur
   1116       // with PHI translation when a critical edge exists and the PHI node in
   1117       // the successor translates to a pointer value different than the
   1118       // pointer the block was first analyzed with.
   1119       std::pair<DenseMap<BasicBlock*,Value*>::iterator, bool>
   1120         InsertRes = Visited.insert(std::make_pair(Pred, PredPtrVal));
   1121 
   1122       if (!InsertRes.second) {
   1123         // We found the pred; take it off the list of preds to visit.
   1124         PredList.pop_back();
   1125 
   1126         // If the predecessor was visited with PredPtr, then we already did
   1127         // the analysis and can ignore it.
   1128         if (InsertRes.first->second == PredPtrVal)
   1129           continue;
   1130 
   1131         // Otherwise, the block was previously analyzed with a different
   1132         // pointer.  We can't represent the result of this case, so we just
   1133         // treat this as a phi translation failure.
   1134 
   1135         // Make sure to clean up the Visited map before continuing on to
   1136         // PredTranslationFailure.
   1137         for (unsigned i = 0; i < PredList.size(); i++)
   1138           Visited.erase(PredList[i].first);
   1139 
   1140         goto PredTranslationFailure;
   1141       }
   1142     }
   1143 
   1144     // Actually process results here; this need to be a separate loop to avoid
   1145     // calling getNonLocalPointerDepFromBB for blocks we don't want to return
   1146     // any results for.  (getNonLocalPointerDepFromBB will modify our
   1147     // datastructures in ways the code after the PredTranslationFailure label
   1148     // doesn't expect.)
   1149     for (unsigned i = 0; i < PredList.size(); i++) {
   1150       BasicBlock *Pred = PredList[i].first;
   1151       PHITransAddr &PredPointer = PredList[i].second;
   1152       Value *PredPtrVal = PredPointer.getAddr();
   1153 
   1154       bool CanTranslate = true;
   1155       // If PHI translation was unable to find an available pointer in this
   1156       // predecessor, then we have to assume that the pointer is clobbered in
   1157       // that predecessor.  We can still do PRE of the load, which would insert
   1158       // a computation of the pointer in this predecessor.
   1159       if (PredPtrVal == 0)
   1160         CanTranslate = false;
   1161 
   1162       // FIXME: it is entirely possible that PHI translating will end up with
   1163       // the same value.  Consider PHI translating something like:
   1164       // X = phi [x, bb1], [y, bb2].  PHI translating for bb1 doesn't *need*
   1165       // to recurse here, pedantically speaking.
   1166 
   1167       // If getNonLocalPointerDepFromBB fails here, that means the cached
   1168       // result conflicted with the Visited list; we have to conservatively
   1169       // assume it is unknown, but this also does not block PRE of the load.
   1170       if (!CanTranslate ||
   1171           getNonLocalPointerDepFromBB(PredPointer,
   1172                                       Loc.getWithNewPtr(PredPtrVal),
   1173                                       isLoad, Pred,
   1174                                       Result, Visited)) {
   1175         // Add the entry to the Result list.
   1176         NonLocalDepResult Entry(Pred, MemDepResult::getUnknown(), PredPtrVal);
   1177         Result.push_back(Entry);
   1178 
   1179         // Since we had a phi translation failure, the cache for CacheKey won't
   1180         // include all of the entries that we need to immediately satisfy future
   1181         // queries.  Mark this in NonLocalPointerDeps by setting the
   1182         // BBSkipFirstBlockPair pointer to null.  This requires reuse of the
   1183         // cached value to do more work but not miss the phi trans failure.
   1184         NonLocalPointerInfo &NLPI = NonLocalPointerDeps[CacheKey];
   1185         NLPI.Pair = BBSkipFirstBlockPair();
   1186         continue;
   1187       }
   1188     }
   1189 
   1190     // Refresh the CacheInfo/Cache pointer so that it isn't invalidated.
   1191     CacheInfo = &NonLocalPointerDeps[CacheKey];
   1192     Cache = &CacheInfo->NonLocalDeps;
   1193     NumSortedEntries = Cache->size();
   1194 
   1195     // Since we did phi translation, the "Cache" set won't contain all of the
   1196     // results for the query.  This is ok (we can still use it to accelerate
   1197     // specific block queries) but we can't do the fastpath "return all
   1198     // results from the set"  Clear out the indicator for this.
   1199     CacheInfo->Pair = BBSkipFirstBlockPair();
   1200     SkipFirstBlock = false;
   1201     continue;
   1202 
   1203   PredTranslationFailure:
   1204     // The following code is "failure"; we can't produce a sane translation
   1205     // for the given block.  It assumes that we haven't modified any of
   1206     // our datastructures while processing the current block.
   1207 
   1208     if (Cache == 0) {
   1209       // Refresh the CacheInfo/Cache pointer if it got invalidated.
   1210       CacheInfo = &NonLocalPointerDeps[CacheKey];
   1211       Cache = &CacheInfo->NonLocalDeps;
   1212       NumSortedEntries = Cache->size();
   1213     }
   1214 
   1215     // Since we failed phi translation, the "Cache" set won't contain all of the
   1216     // results for the query.  This is ok (we can still use it to accelerate
   1217     // specific block queries) but we can't do the fastpath "return all
   1218     // results from the set".  Clear out the indicator for this.
   1219     CacheInfo->Pair = BBSkipFirstBlockPair();
   1220 
   1221     // If *nothing* works, mark the pointer as unknown.
   1222     //
   1223     // If this is the magic first block, return this as a clobber of the whole
   1224     // incoming value.  Since we can't phi translate to one of the predecessors,
   1225     // we have to bail out.
   1226     if (SkipFirstBlock)
   1227       return true;
   1228 
   1229     for (NonLocalDepInfo::reverse_iterator I = Cache->rbegin(); ; ++I) {
   1230       assert(I != Cache->rend() && "Didn't find current block??");
   1231       if (I->getBB() != BB)
   1232         continue;
   1233 
   1234       assert(I->getResult().isNonLocal() &&
   1235              "Should only be here with transparent block");
   1236       I->setResult(MemDepResult::getUnknown());
   1237       Result.push_back(NonLocalDepResult(I->getBB(), I->getResult(),
   1238                                          Pointer.getAddr()));
   1239       break;
   1240     }
   1241   }
   1242 
   1243   // Okay, we're done now.  If we added new values to the cache, re-sort it.
   1244   SortNonLocalDepInfoCache(*Cache, NumSortedEntries);
   1245   DEBUG(AssertSorted(*Cache));
   1246   return false;
   1247 }
   1248 
   1249 /// RemoveCachedNonLocalPointerDependencies - If P exists in
   1250 /// CachedNonLocalPointerInfo, remove it.
   1251 void MemoryDependenceAnalysis::
   1252 RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair P) {
   1253   CachedNonLocalPointerInfo::iterator It =
   1254     NonLocalPointerDeps.find(P);
   1255   if (It == NonLocalPointerDeps.end()) return;
   1256 
   1257   // Remove all of the entries in the BB->val map.  This involves removing
   1258   // instructions from the reverse map.
   1259   NonLocalDepInfo &PInfo = It->second.NonLocalDeps;
   1260 
   1261   for (unsigned i = 0, e = PInfo.size(); i != e; ++i) {
   1262     Instruction *Target = PInfo[i].getResult().getInst();
   1263     if (Target == 0) continue;  // Ignore non-local dep results.
   1264     assert(Target->getParent() == PInfo[i].getBB());
   1265 
   1266     // Eliminating the dirty entry from 'Cache', so update the reverse info.
   1267     RemoveFromReverseMap(ReverseNonLocalPtrDeps, Target, P);
   1268   }
   1269 
   1270   // Remove P from NonLocalPointerDeps (which deletes NonLocalDepInfo).
   1271   NonLocalPointerDeps.erase(It);
   1272 }
   1273 
   1274 
   1275 /// invalidateCachedPointerInfo - This method is used to invalidate cached
   1276 /// information about the specified pointer, because it may be too
   1277 /// conservative in memdep.  This is an optional call that can be used when
   1278 /// the client detects an equivalence between the pointer and some other
   1279 /// value and replaces the other value with ptr. This can make Ptr available
   1280 /// in more places that cached info does not necessarily keep.
   1281 void MemoryDependenceAnalysis::invalidateCachedPointerInfo(Value *Ptr) {
   1282   // If Ptr isn't really a pointer, just ignore it.
   1283   if (!Ptr->getType()->isPointerTy()) return;
   1284   // Flush store info for the pointer.
   1285   RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(Ptr, false));
   1286   // Flush load info for the pointer.
   1287   RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(Ptr, true));
   1288 }
   1289 
   1290 /// invalidateCachedPredecessors - Clear the PredIteratorCache info.
   1291 /// This needs to be done when the CFG changes, e.g., due to splitting
   1292 /// critical edges.
   1293 void MemoryDependenceAnalysis::invalidateCachedPredecessors() {
   1294   PredCache->clear();
   1295 }
   1296 
   1297 /// removeInstruction - Remove an instruction from the dependence analysis,
   1298 /// updating the dependence of instructions that previously depended on it.
   1299 /// This method attempts to keep the cache coherent using the reverse map.
   1300 void MemoryDependenceAnalysis::removeInstruction(Instruction *RemInst) {
   1301   // Walk through the Non-local dependencies, removing this one as the value
   1302   // for any cached queries.
   1303   NonLocalDepMapType::iterator NLDI = NonLocalDeps.find(RemInst);
   1304   if (NLDI != NonLocalDeps.end()) {
   1305     NonLocalDepInfo &BlockMap = NLDI->second.first;
   1306     for (NonLocalDepInfo::iterator DI = BlockMap.begin(), DE = BlockMap.end();
   1307          DI != DE; ++DI)
   1308       if (Instruction *Inst = DI->getResult().getInst())
   1309         RemoveFromReverseMap(ReverseNonLocalDeps, Inst, RemInst);
   1310     NonLocalDeps.erase(NLDI);
   1311   }
   1312 
   1313   // If we have a cached local dependence query for this instruction, remove it.
   1314   //
   1315   LocalDepMapType::iterator LocalDepEntry = LocalDeps.find(RemInst);
   1316   if (LocalDepEntry != LocalDeps.end()) {
   1317     // Remove us from DepInst's reverse set now that the local dep info is gone.
   1318     if (Instruction *Inst = LocalDepEntry->second.getInst())
   1319       RemoveFromReverseMap(ReverseLocalDeps, Inst, RemInst);
   1320 
   1321     // Remove this local dependency info.
   1322     LocalDeps.erase(LocalDepEntry);
   1323   }
   1324 
   1325   // If we have any cached pointer dependencies on this instruction, remove
   1326   // them.  If the instruction has non-pointer type, then it can't be a pointer
   1327   // base.
   1328 
   1329   // Remove it from both the load info and the store info.  The instruction
   1330   // can't be in either of these maps if it is non-pointer.
   1331   if (RemInst->getType()->isPointerTy()) {
   1332     RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst, false));
   1333     RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst, true));
   1334   }
   1335 
   1336   // Loop over all of the things that depend on the instruction we're removing.
   1337   //
   1338   SmallVector<std::pair<Instruction*, Instruction*>, 8> ReverseDepsToAdd;
   1339 
   1340   // If we find RemInst as a clobber or Def in any of the maps for other values,
   1341   // we need to replace its entry with a dirty version of the instruction after
   1342   // it.  If RemInst is a terminator, we use a null dirty value.
   1343   //
   1344   // Using a dirty version of the instruction after RemInst saves having to scan
   1345   // the entire block to get to this point.
   1346   MemDepResult NewDirtyVal;
   1347   if (!RemInst->isTerminator())
   1348     NewDirtyVal = MemDepResult::getDirty(++BasicBlock::iterator(RemInst));
   1349 
   1350   ReverseDepMapType::iterator ReverseDepIt = ReverseLocalDeps.find(RemInst);
   1351   if (ReverseDepIt != ReverseLocalDeps.end()) {
   1352     SmallPtrSet<Instruction*, 4> &ReverseDeps = ReverseDepIt->second;
   1353     // RemInst can't be the terminator if it has local stuff depending on it.
   1354     assert(!ReverseDeps.empty() && !isa<TerminatorInst>(RemInst) &&
   1355            "Nothing can locally depend on a terminator");
   1356 
   1357     for (SmallPtrSet<Instruction*, 4>::iterator I = ReverseDeps.begin(),
   1358          E = ReverseDeps.end(); I != E; ++I) {
   1359       Instruction *InstDependingOnRemInst = *I;
   1360       assert(InstDependingOnRemInst != RemInst &&
   1361              "Already removed our local dep info");
   1362 
   1363       LocalDeps[InstDependingOnRemInst] = NewDirtyVal;
   1364 
   1365       // Make sure to remember that new things depend on NewDepInst.
   1366       assert(NewDirtyVal.getInst() && "There is no way something else can have "
   1367              "a local dep on this if it is a terminator!");
   1368       ReverseDepsToAdd.push_back(std::make_pair(NewDirtyVal.getInst(),
   1369                                                 InstDependingOnRemInst));
   1370     }
   1371 
   1372     ReverseLocalDeps.erase(ReverseDepIt);
   1373 
   1374     // Add new reverse deps after scanning the set, to avoid invalidating the
   1375     // 'ReverseDeps' reference.
   1376     while (!ReverseDepsToAdd.empty()) {
   1377       ReverseLocalDeps[ReverseDepsToAdd.back().first]
   1378         .insert(ReverseDepsToAdd.back().second);
   1379       ReverseDepsToAdd.pop_back();
   1380     }
   1381   }
   1382 
   1383   ReverseDepIt = ReverseNonLocalDeps.find(RemInst);
   1384   if (ReverseDepIt != ReverseNonLocalDeps.end()) {
   1385     SmallPtrSet<Instruction*, 4> &Set = ReverseDepIt->second;
   1386     for (SmallPtrSet<Instruction*, 4>::iterator I = Set.begin(), E = Set.end();
   1387          I != E; ++I) {
   1388       assert(*I != RemInst && "Already removed NonLocalDep info for RemInst");
   1389 
   1390       PerInstNLInfo &INLD = NonLocalDeps[*I];
   1391       // The information is now dirty!
   1392       INLD.second = true;
   1393 
   1394       for (NonLocalDepInfo::iterator DI = INLD.first.begin(),
   1395            DE = INLD.first.end(); DI != DE; ++DI) {
   1396         if (DI->getResult().getInst() != RemInst) continue;
   1397 
   1398         // Convert to a dirty entry for the subsequent instruction.
   1399         DI->setResult(NewDirtyVal);
   1400 
   1401         if (Instruction *NextI = NewDirtyVal.getInst())
   1402           ReverseDepsToAdd.push_back(std::make_pair(NextI, *I));
   1403       }
   1404     }
   1405 
   1406     ReverseNonLocalDeps.erase(ReverseDepIt);
   1407 
   1408     // Add new reverse deps after scanning the set, to avoid invalidating 'Set'
   1409     while (!ReverseDepsToAdd.empty()) {
   1410       ReverseNonLocalDeps[ReverseDepsToAdd.back().first]
   1411         .insert(ReverseDepsToAdd.back().second);
   1412       ReverseDepsToAdd.pop_back();
   1413     }
   1414   }
   1415 
   1416   // If the instruction is in ReverseNonLocalPtrDeps then it appears as a
   1417   // value in the NonLocalPointerDeps info.
   1418   ReverseNonLocalPtrDepTy::iterator ReversePtrDepIt =
   1419     ReverseNonLocalPtrDeps.find(RemInst);
   1420   if (ReversePtrDepIt != ReverseNonLocalPtrDeps.end()) {
   1421     SmallPtrSet<ValueIsLoadPair, 4> &Set = ReversePtrDepIt->second;
   1422     SmallVector<std::pair<Instruction*, ValueIsLoadPair>,8> ReversePtrDepsToAdd;
   1423 
   1424     for (SmallPtrSet<ValueIsLoadPair, 4>::iterator I = Set.begin(),
   1425          E = Set.end(); I != E; ++I) {
   1426       ValueIsLoadPair P = *I;
   1427       assert(P.getPointer() != RemInst &&
   1428              "Already removed NonLocalPointerDeps info for RemInst");
   1429 
   1430       NonLocalDepInfo &NLPDI = NonLocalPointerDeps[P].NonLocalDeps;
   1431 
   1432       // The cache is not valid for any specific block anymore.
   1433       NonLocalPointerDeps[P].Pair = BBSkipFirstBlockPair();
   1434 
   1435       // Update any entries for RemInst to use the instruction after it.
   1436       for (NonLocalDepInfo::iterator DI = NLPDI.begin(), DE = NLPDI.end();
   1437            DI != DE; ++DI) {
   1438         if (DI->getResult().getInst() != RemInst) continue;
   1439 
   1440         // Convert to a dirty entry for the subsequent instruction.
   1441         DI->setResult(NewDirtyVal);
   1442 
   1443         if (Instruction *NewDirtyInst = NewDirtyVal.getInst())
   1444           ReversePtrDepsToAdd.push_back(std::make_pair(NewDirtyInst, P));
   1445       }
   1446 
   1447       // Re-sort the NonLocalDepInfo.  Changing the dirty entry to its
   1448       // subsequent value may invalidate the sortedness.
   1449       std::sort(NLPDI.begin(), NLPDI.end());
   1450     }
   1451 
   1452     ReverseNonLocalPtrDeps.erase(ReversePtrDepIt);
   1453 
   1454     while (!ReversePtrDepsToAdd.empty()) {
   1455       ReverseNonLocalPtrDeps[ReversePtrDepsToAdd.back().first]
   1456         .insert(ReversePtrDepsToAdd.back().second);
   1457       ReversePtrDepsToAdd.pop_back();
   1458     }
   1459   }
   1460 
   1461 
   1462   assert(!NonLocalDeps.count(RemInst) && "RemInst got reinserted?");
   1463   AA->deleteValue(RemInst);
   1464   DEBUG(verifyRemoved(RemInst));
   1465 }
   1466 /// verifyRemoved - Verify that the specified instruction does not occur
   1467 /// in our internal data structures.
   1468 void MemoryDependenceAnalysis::verifyRemoved(Instruction *D) const {
   1469   for (LocalDepMapType::const_iterator I = LocalDeps.begin(),
   1470        E = LocalDeps.end(); I != E; ++I) {
   1471     assert(I->first != D && "Inst occurs in data structures");
   1472     assert(I->second.getInst() != D &&
   1473            "Inst occurs in data structures");
   1474   }
   1475 
   1476   for (CachedNonLocalPointerInfo::const_iterator I =NonLocalPointerDeps.begin(),
   1477        E = NonLocalPointerDeps.end(); I != E; ++I) {
   1478     assert(I->first.getPointer() != D && "Inst occurs in NLPD map key");
   1479     const NonLocalDepInfo &Val = I->second.NonLocalDeps;
   1480     for (NonLocalDepInfo::const_iterator II = Val.begin(), E = Val.end();
   1481          II != E; ++II)
   1482       assert(II->getResult().getInst() != D && "Inst occurs as NLPD value");
   1483   }
   1484 
   1485   for (NonLocalDepMapType::const_iterator I = NonLocalDeps.begin(),
   1486        E = NonLocalDeps.end(); I != E; ++I) {
   1487     assert(I->first != D && "Inst occurs in data structures");
   1488     const PerInstNLInfo &INLD = I->second;
   1489     for (NonLocalDepInfo::const_iterator II = INLD.first.begin(),
   1490          EE = INLD.first.end(); II  != EE; ++II)
   1491       assert(II->getResult().getInst() != D && "Inst occurs in data structures");
   1492   }
   1493 
   1494   for (ReverseDepMapType::const_iterator I = ReverseLocalDeps.begin(),
   1495        E = ReverseLocalDeps.end(); I != E; ++I) {
   1496     assert(I->first != D && "Inst occurs in data structures");
   1497     for (SmallPtrSet<Instruction*, 4>::const_iterator II = I->second.begin(),
   1498          EE = I->second.end(); II != EE; ++II)
   1499       assert(*II != D && "Inst occurs in data structures");
   1500   }
   1501 
   1502   for (ReverseDepMapType::const_iterator I = ReverseNonLocalDeps.begin(),
   1503        E = ReverseNonLocalDeps.end();
   1504        I != E; ++I) {
   1505     assert(I->first != D && "Inst occurs in data structures");
   1506     for (SmallPtrSet<Instruction*, 4>::const_iterator II = I->second.begin(),
   1507          EE = I->second.end(); II != EE; ++II)
   1508       assert(*II != D && "Inst occurs in data structures");
   1509   }
   1510 
   1511   for (ReverseNonLocalPtrDepTy::const_iterator
   1512        I = ReverseNonLocalPtrDeps.begin(),
   1513        E = ReverseNonLocalPtrDeps.end(); I != E; ++I) {
   1514     assert(I->first != D && "Inst occurs in rev NLPD map");
   1515 
   1516     for (SmallPtrSet<ValueIsLoadPair, 4>::const_iterator II = I->second.begin(),
   1517          E = I->second.end(); II != E; ++II)
   1518       assert(*II != ValueIsLoadPair(D, false) &&
   1519              *II != ValueIsLoadPair(D, true) &&
   1520              "Inst occurs in ReverseNonLocalPtrDeps map");
   1521   }
   1522 
   1523 }
   1524