Home | History | Annotate | Download | only in Analysis
      1 //===- MemorySSA.h - Build Memory SSA ---------------------------*- C++ -*-===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 /// \file
     11 /// This file exposes an interface to building/using memory SSA to
     12 /// walk memory instructions using a use/def graph.
     13 ///
     14 /// Memory SSA class builds an SSA form that links together memory access
     15 /// instructions such as loads, stores, atomics, and calls. Additionally, it
     16 /// does a trivial form of "heap versioning" Every time the memory state changes
     17 /// in the program, we generate a new heap version. It generates
     18 /// MemoryDef/Uses/Phis that are overlayed on top of the existing instructions.
     19 ///
     20 /// As a trivial example,
     21 /// define i32 @main() #0 {
     22 /// entry:
     23 ///   %call = call noalias i8* @_Znwm(i64 4) #2
     24 ///   %0 = bitcast i8* %call to i32*
     25 ///   %call1 = call noalias i8* @_Znwm(i64 4) #2
     26 ///   %1 = bitcast i8* %call1 to i32*
     27 ///   store i32 5, i32* %0, align 4
     28 ///   store i32 7, i32* %1, align 4
     29 ///   %2 = load i32* %0, align 4
     30 ///   %3 = load i32* %1, align 4
     31 ///   %add = add nsw i32 %2, %3
     32 ///   ret i32 %add
     33 /// }
     34 ///
     35 /// Will become
     36 /// define i32 @main() #0 {
     37 /// entry:
     38 ///   ; 1 = MemoryDef(0)
     39 ///   %call = call noalias i8* @_Znwm(i64 4) #3
     40 ///   %2 = bitcast i8* %call to i32*
     41 ///   ; 2 = MemoryDef(1)
     42 ///   %call1 = call noalias i8* @_Znwm(i64 4) #3
     43 ///   %4 = bitcast i8* %call1 to i32*
     44 ///   ; 3 = MemoryDef(2)
     45 ///   store i32 5, i32* %2, align 4
     46 ///   ; 4 = MemoryDef(3)
     47 ///   store i32 7, i32* %4, align 4
     48 ///   ; MemoryUse(3)
     49 ///   %7 = load i32* %2, align 4
     50 ///   ; MemoryUse(4)
     51 ///   %8 = load i32* %4, align 4
     52 ///   %add = add nsw i32 %7, %8
     53 ///   ret i32 %add
     54 /// }
     55 ///
     56 /// Given this form, all the stores that could ever effect the load at %8 can be
     57 /// gotten by using the MemoryUse associated with it, and walking from use to
     58 /// def until you hit the top of the function.
     59 ///
     60 /// Each def also has a list of users associated with it, so you can walk from
     61 /// both def to users, and users to defs. Note that we disambiguate MemoryUses,
     62 /// but not the RHS of MemoryDefs. You can see this above at %7, which would
     63 /// otherwise be a MemoryUse(4). Being disambiguated means that for a given
     64 /// store, all the MemoryUses on its use lists are may-aliases of that store
     65 /// (but the MemoryDefs on its use list may not be).
     66 ///
     67 /// MemoryDefs are not disambiguated because it would require multiple reaching
     68 /// definitions, which would require multiple phis, and multiple memoryaccesses
     69 /// per instruction.
     70 //
     71 //===----------------------------------------------------------------------===//
     72 
     73 #ifndef LLVM_ANALYSIS_MEMORYSSA_H
     74 #define LLVM_ANALYSIS_MEMORYSSA_H
     75 
     76 #include "llvm/ADT/DenseMap.h"
     77 #include "llvm/ADT/GraphTraits.h"
     78 #include "llvm/ADT/SmallPtrSet.h"
     79 #include "llvm/ADT/SmallVector.h"
     80 #include "llvm/ADT/ilist.h"
     81 #include "llvm/ADT/ilist_node.h"
     82 #include "llvm/ADT/iterator.h"
     83 #include "llvm/ADT/iterator_range.h"
     84 #include "llvm/ADT/simple_ilist.h"
     85 #include "llvm/Analysis/AliasAnalysis.h"
     86 #include "llvm/Analysis/MemoryLocation.h"
     87 #include "llvm/Analysis/PHITransAddr.h"
     88 #include "llvm/IR/BasicBlock.h"
     89 #include "llvm/IR/DerivedUser.h"
     90 #include "llvm/IR/Dominators.h"
     91 #include "llvm/IR/Module.h"
     92 #include "llvm/IR/Type.h"
     93 #include "llvm/IR/Use.h"
     94 #include "llvm/IR/User.h"
     95 #include "llvm/IR/Value.h"
     96 #include "llvm/IR/ValueHandle.h"
     97 #include "llvm/Pass.h"
     98 #include "llvm/Support/Casting.h"
     99 #include <algorithm>
    100 #include <cassert>
    101 #include <cstddef>
    102 #include <iterator>
    103 #include <memory>
    104 #include <utility>
    105 
    106 namespace llvm {
    107 
    108 class Function;
    109 class Instruction;
    110 class MemoryAccess;
    111 class MemorySSAWalker;
    112 class LLVMContext;
    113 class raw_ostream;
    114 
    115 namespace MSSAHelpers {
    116 
    117 struct AllAccessTag {};
    118 struct DefsOnlyTag {};
    119 
    120 } // end namespace MSSAHelpers
    121 
    122 enum : unsigned {
    123   // Used to signify what the default invalid ID is for MemoryAccess's
    124   // getID()
    125   INVALID_MEMORYACCESS_ID = -1U
    126 };
    127 
    128 template <class T> class memoryaccess_def_iterator_base;
    129 using memoryaccess_def_iterator = memoryaccess_def_iterator_base<MemoryAccess>;
    130 using const_memoryaccess_def_iterator =
    131     memoryaccess_def_iterator_base<const MemoryAccess>;
    132 
    133 // The base for all memory accesses. All memory accesses in a block are
    134 // linked together using an intrusive list.
    135 class MemoryAccess
    136     : public DerivedUser,
    137       public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>,
    138       public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>> {
    139 public:
    140   using AllAccessType =
    141       ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>;
    142   using DefsOnlyType =
    143       ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>>;
    144 
    145   MemoryAccess(const MemoryAccess &) = delete;
    146   MemoryAccess &operator=(const MemoryAccess &) = delete;
    147 
    148   void *operator new(size_t) = delete;
    149 
    150   // Methods for support type inquiry through isa, cast, and
    151   // dyn_cast
    152   static bool classof(const Value *V) {
    153     unsigned ID = V->getValueID();
    154     return ID == MemoryUseVal || ID == MemoryPhiVal || ID == MemoryDefVal;
    155   }
    156 
    157   BasicBlock *getBlock() const { return Block; }
    158 
    159   void print(raw_ostream &OS) const;
    160   void dump() const;
    161 
    162   /// The user iterators for a memory access
    163   using iterator = user_iterator;
    164   using const_iterator = const_user_iterator;
    165 
    166   /// This iterator walks over all of the defs in a given
    167   /// MemoryAccess. For MemoryPhi nodes, this walks arguments. For
    168   /// MemoryUse/MemoryDef, this walks the defining access.
    169   memoryaccess_def_iterator defs_begin();
    170   const_memoryaccess_def_iterator defs_begin() const;
    171   memoryaccess_def_iterator defs_end();
    172   const_memoryaccess_def_iterator defs_end() const;
    173 
    174   /// Get the iterators for the all access list and the defs only list
    175   /// We default to the all access list.
    176   AllAccessType::self_iterator getIterator() {
    177     return this->AllAccessType::getIterator();
    178   }
    179   AllAccessType::const_self_iterator getIterator() const {
    180     return this->AllAccessType::getIterator();
    181   }
    182   AllAccessType::reverse_self_iterator getReverseIterator() {
    183     return this->AllAccessType::getReverseIterator();
    184   }
    185   AllAccessType::const_reverse_self_iterator getReverseIterator() const {
    186     return this->AllAccessType::getReverseIterator();
    187   }
    188   DefsOnlyType::self_iterator getDefsIterator() {
    189     return this->DefsOnlyType::getIterator();
    190   }
    191   DefsOnlyType::const_self_iterator getDefsIterator() const {
    192     return this->DefsOnlyType::getIterator();
    193   }
    194   DefsOnlyType::reverse_self_iterator getReverseDefsIterator() {
    195     return this->DefsOnlyType::getReverseIterator();
    196   }
    197   DefsOnlyType::const_reverse_self_iterator getReverseDefsIterator() const {
    198     return this->DefsOnlyType::getReverseIterator();
    199   }
    200 
    201 protected:
    202   friend class MemoryDef;
    203   friend class MemoryPhi;
    204   friend class MemorySSA;
    205   friend class MemoryUse;
    206   friend class MemoryUseOrDef;
    207 
    208   /// Used by MemorySSA to change the block of a MemoryAccess when it is
    209   /// moved.
    210   void setBlock(BasicBlock *BB) { Block = BB; }
    211 
    212   /// Used for debugging and tracking things about MemoryAccesses.
    213   /// Guaranteed unique among MemoryAccesses, no guarantees otherwise.
    214   inline unsigned getID() const;
    215 
    216   MemoryAccess(LLVMContext &C, unsigned Vty, DeleteValueTy DeleteValue,
    217                BasicBlock *BB, unsigned NumOperands)
    218       : DerivedUser(Type::getVoidTy(C), Vty, nullptr, NumOperands, DeleteValue),
    219         Block(BB) {}
    220 
    221   // Use deleteValue() to delete a generic MemoryAccess.
    222   ~MemoryAccess() = default;
    223 
    224 private:
    225   BasicBlock *Block;
    226 };
    227 
    228 template <>
    229 struct ilist_alloc_traits<MemoryAccess> {
    230   static void deleteNode(MemoryAccess *MA) { MA->deleteValue(); }
    231 };
    232 
    233 inline raw_ostream &operator<<(raw_ostream &OS, const MemoryAccess &MA) {
    234   MA.print(OS);
    235   return OS;
    236 }
    237 
    238 /// Class that has the common methods + fields of memory uses/defs. It's
    239 /// a little awkward to have, but there are many cases where we want either a
    240 /// use or def, and there are many cases where uses are needed (defs aren't
    241 /// acceptable), and vice-versa.
    242 ///
    243 /// This class should never be instantiated directly; make a MemoryUse or
    244 /// MemoryDef instead.
    245 class MemoryUseOrDef : public MemoryAccess {
    246 public:
    247   void *operator new(size_t) = delete;
    248 
    249   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
    250 
    251   /// Get the instruction that this MemoryUse represents.
    252   Instruction *getMemoryInst() const { return MemoryInstruction; }
    253 
    254   /// Get the access that produces the memory state used by this Use.
    255   MemoryAccess *getDefiningAccess() const { return getOperand(0); }
    256 
    257   static bool classof(const Value *MA) {
    258     return MA->getValueID() == MemoryUseVal || MA->getValueID() == MemoryDefVal;
    259   }
    260 
    261   // Sadly, these have to be public because they are needed in some of the
    262   // iterators.
    263   inline bool isOptimized() const;
    264   inline MemoryAccess *getOptimized() const;
    265   inline void setOptimized(MemoryAccess *);
    266 
    267   // Retrieve AliasResult type of the optimized access. Ideally this would be
    268   // returned by the caching walker and may go away in the future.
    269   Optional<AliasResult> getOptimizedAccessType() const {
    270     return OptimizedAccessAlias;
    271   }
    272 
    273   /// Reset the ID of what this MemoryUse was optimized to, causing it to
    274   /// be rewalked by the walker if necessary.
    275   /// This really should only be called by tests.
    276   inline void resetOptimized();
    277 
    278 protected:
    279   friend class MemorySSA;
    280   friend class MemorySSAUpdater;
    281 
    282   MemoryUseOrDef(LLVMContext &C, MemoryAccess *DMA, unsigned Vty,
    283                  DeleteValueTy DeleteValue, Instruction *MI, BasicBlock *BB)
    284       : MemoryAccess(C, Vty, DeleteValue, BB, 1), MemoryInstruction(MI),
    285         OptimizedAccessAlias(MayAlias) {
    286     setDefiningAccess(DMA);
    287   }
    288 
    289   // Use deleteValue() to delete a generic MemoryUseOrDef.
    290   ~MemoryUseOrDef() = default;
    291 
    292   void setOptimizedAccessType(Optional<AliasResult> AR) {
    293     OptimizedAccessAlias = AR;
    294   }
    295 
    296   void setDefiningAccess(MemoryAccess *DMA, bool Optimized = false,
    297                          Optional<AliasResult> AR = MayAlias) {
    298     if (!Optimized) {
    299       setOperand(0, DMA);
    300       return;
    301     }
    302     setOptimized(DMA);
    303     setOptimizedAccessType(AR);
    304   }
    305 
    306 private:
    307   Instruction *MemoryInstruction;
    308   Optional<AliasResult> OptimizedAccessAlias;
    309 };
    310 
    311 template <>
    312 struct OperandTraits<MemoryUseOrDef>
    313     : public FixedNumOperandTraits<MemoryUseOrDef, 1> {};
    314 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUseOrDef, MemoryAccess)
    315 
    316 /// Represents read-only accesses to memory
    317 ///
    318 /// In particular, the set of Instructions that will be represented by
    319 /// MemoryUse's is exactly the set of Instructions for which
    320 /// AliasAnalysis::getModRefInfo returns "Ref".
    321 class MemoryUse final : public MemoryUseOrDef {
    322 public:
    323   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
    324 
    325   MemoryUse(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB)
    326       : MemoryUseOrDef(C, DMA, MemoryUseVal, deleteMe, MI, BB) {}
    327 
    328   // allocate space for exactly one operand
    329   void *operator new(size_t s) { return User::operator new(s, 1); }
    330 
    331   static bool classof(const Value *MA) {
    332     return MA->getValueID() == MemoryUseVal;
    333   }
    334 
    335   void print(raw_ostream &OS) const;
    336 
    337   void setOptimized(MemoryAccess *DMA) {
    338     OptimizedID = DMA->getID();
    339     setOperand(0, DMA);
    340   }
    341 
    342   bool isOptimized() const {
    343     return getDefiningAccess() && OptimizedID == getDefiningAccess()->getID();
    344   }
    345 
    346   MemoryAccess *getOptimized() const {
    347     return getDefiningAccess();
    348   }
    349 
    350   void resetOptimized() {
    351     OptimizedID = INVALID_MEMORYACCESS_ID;
    352   }
    353 
    354 protected:
    355   friend class MemorySSA;
    356 
    357 private:
    358   static void deleteMe(DerivedUser *Self);
    359 
    360   unsigned OptimizedID = INVALID_MEMORYACCESS_ID;
    361 };
    362 
    363 template <>
    364 struct OperandTraits<MemoryUse> : public FixedNumOperandTraits<MemoryUse, 1> {};
    365 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUse, MemoryAccess)
    366 
    367 /// Represents a read-write access to memory, whether it is a must-alias,
    368 /// or a may-alias.
    369 ///
    370 /// In particular, the set of Instructions that will be represented by
    371 /// MemoryDef's is exactly the set of Instructions for which
    372 /// AliasAnalysis::getModRefInfo returns "Mod" or "ModRef".
    373 /// Note that, in order to provide def-def chains, all defs also have a use
    374 /// associated with them. This use points to the nearest reaching
    375 /// MemoryDef/MemoryPhi.
    376 class MemoryDef final : public MemoryUseOrDef {
    377 public:
    378   friend class MemorySSA;
    379 
    380   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
    381 
    382   MemoryDef(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB,
    383             unsigned Ver)
    384       : MemoryUseOrDef(C, DMA, MemoryDefVal, deleteMe, MI, BB), ID(Ver) {}
    385 
    386   // allocate space for exactly one operand
    387   void *operator new(size_t s) { return User::operator new(s, 1); }
    388 
    389   static bool classof(const Value *MA) {
    390     return MA->getValueID() == MemoryDefVal;
    391   }
    392 
    393   void setOptimized(MemoryAccess *MA) {
    394     Optimized = MA;
    395     OptimizedID = getDefiningAccess()->getID();
    396   }
    397 
    398   MemoryAccess *getOptimized() const {
    399     return cast_or_null<MemoryAccess>(Optimized);
    400   }
    401 
    402   bool isOptimized() const {
    403     return getOptimized() && getDefiningAccess() &&
    404            OptimizedID == getDefiningAccess()->getID();
    405   }
    406 
    407   void resetOptimized() {
    408     OptimizedID = INVALID_MEMORYACCESS_ID;
    409   }
    410 
    411   void print(raw_ostream &OS) const;
    412 
    413   unsigned getID() const { return ID; }
    414 
    415 private:
    416   static void deleteMe(DerivedUser *Self);
    417 
    418   const unsigned ID;
    419   unsigned OptimizedID = INVALID_MEMORYACCESS_ID;
    420   WeakVH Optimized;
    421 };
    422 
    423 template <>
    424 struct OperandTraits<MemoryDef> : public FixedNumOperandTraits<MemoryDef, 1> {};
    425 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryDef, MemoryAccess)
    426 
    427 /// Represents phi nodes for memory accesses.
    428 ///
    429 /// These have the same semantic as regular phi nodes, with the exception that
    430 /// only one phi will ever exist in a given basic block.
    431 /// Guaranteeing one phi per block means guaranteeing there is only ever one
    432 /// valid reaching MemoryDef/MemoryPHI along each path to the phi node.
    433 /// This is ensured by not allowing disambiguation of the RHS of a MemoryDef or
    434 /// a MemoryPhi's operands.
    435 /// That is, given
    436 /// if (a) {
    437 ///   store %a
    438 ///   store %b
    439 /// }
    440 /// it *must* be transformed into
    441 /// if (a) {
    442 ///    1 = MemoryDef(liveOnEntry)
    443 ///    store %a
    444 ///    2 = MemoryDef(1)
    445 ///    store %b
    446 /// }
    447 /// and *not*
    448 /// if (a) {
    449 ///    1 = MemoryDef(liveOnEntry)
    450 ///    store %a
    451 ///    2 = MemoryDef(liveOnEntry)
    452 ///    store %b
    453 /// }
    454 /// even if the two stores do not conflict. Otherwise, both 1 and 2 reach the
    455 /// end of the branch, and if there are not two phi nodes, one will be
    456 /// disconnected completely from the SSA graph below that point.
    457 /// Because MemoryUse's do not generate new definitions, they do not have this
    458 /// issue.
    459 class MemoryPhi final : public MemoryAccess {
    460   // allocate space for exactly zero operands
    461   void *operator new(size_t s) { return User::operator new(s); }
    462 
    463 public:
    464   /// Provide fast operand accessors
    465   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
    466 
    467   MemoryPhi(LLVMContext &C, BasicBlock *BB, unsigned Ver, unsigned NumPreds = 0)
    468       : MemoryAccess(C, MemoryPhiVal, deleteMe, BB, 0), ID(Ver),
    469         ReservedSpace(NumPreds) {
    470     allocHungoffUses(ReservedSpace);
    471   }
    472 
    473   // Block iterator interface. This provides access to the list of incoming
    474   // basic blocks, which parallels the list of incoming values.
    475   using block_iterator = BasicBlock **;
    476   using const_block_iterator = BasicBlock *const *;
    477 
    478   block_iterator block_begin() {
    479     auto *Ref = reinterpret_cast<Use::UserRef *>(op_begin() + ReservedSpace);
    480     return reinterpret_cast<block_iterator>(Ref + 1);
    481   }
    482 
    483   const_block_iterator block_begin() const {
    484     const auto *Ref =
    485         reinterpret_cast<const Use::UserRef *>(op_begin() + ReservedSpace);
    486     return reinterpret_cast<const_block_iterator>(Ref + 1);
    487   }
    488 
    489   block_iterator block_end() { return block_begin() + getNumOperands(); }
    490 
    491   const_block_iterator block_end() const {
    492     return block_begin() + getNumOperands();
    493   }
    494 
    495   iterator_range<block_iterator> blocks() {
    496     return make_range(block_begin(), block_end());
    497   }
    498 
    499   iterator_range<const_block_iterator> blocks() const {
    500     return make_range(block_begin(), block_end());
    501   }
    502 
    503   op_range incoming_values() { return operands(); }
    504 
    505   const_op_range incoming_values() const { return operands(); }
    506 
    507   /// Return the number of incoming edges
    508   unsigned getNumIncomingValues() const { return getNumOperands(); }
    509 
    510   /// Return incoming value number x
    511   MemoryAccess *getIncomingValue(unsigned I) const { return getOperand(I); }
    512   void setIncomingValue(unsigned I, MemoryAccess *V) {
    513     assert(V && "PHI node got a null value!");
    514     setOperand(I, V);
    515   }
    516 
    517   static unsigned getOperandNumForIncomingValue(unsigned I) { return I; }
    518   static unsigned getIncomingValueNumForOperand(unsigned I) { return I; }
    519 
    520   /// Return incoming basic block number @p i.
    521   BasicBlock *getIncomingBlock(unsigned I) const { return block_begin()[I]; }
    522 
    523   /// Return incoming basic block corresponding
    524   /// to an operand of the PHI.
    525   BasicBlock *getIncomingBlock(const Use &U) const {
    526     assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?");
    527     return getIncomingBlock(unsigned(&U - op_begin()));
    528   }
    529 
    530   /// Return incoming basic block corresponding
    531   /// to value use iterator.
    532   BasicBlock *getIncomingBlock(MemoryAccess::const_user_iterator I) const {
    533     return getIncomingBlock(I.getUse());
    534   }
    535 
    536   void setIncomingBlock(unsigned I, BasicBlock *BB) {
    537     assert(BB && "PHI node got a null basic block!");
    538     block_begin()[I] = BB;
    539   }
    540 
    541   /// Add an incoming value to the end of the PHI list
    542   void addIncoming(MemoryAccess *V, BasicBlock *BB) {
    543     if (getNumOperands() == ReservedSpace)
    544       growOperands(); // Get more space!
    545     // Initialize some new operands.
    546     setNumHungOffUseOperands(getNumOperands() + 1);
    547     setIncomingValue(getNumOperands() - 1, V);
    548     setIncomingBlock(getNumOperands() - 1, BB);
    549   }
    550 
    551   /// Return the first index of the specified basic
    552   /// block in the value list for this PHI.  Returns -1 if no instance.
    553   int getBasicBlockIndex(const BasicBlock *BB) const {
    554     for (unsigned I = 0, E = getNumOperands(); I != E; ++I)
    555       if (block_begin()[I] == BB)
    556         return I;
    557     return -1;
    558   }
    559 
    560   MemoryAccess *getIncomingValueForBlock(const BasicBlock *BB) const {
    561     int Idx = getBasicBlockIndex(BB);
    562     assert(Idx >= 0 && "Invalid basic block argument!");
    563     return getIncomingValue(Idx);
    564   }
    565 
    566   // After deleting incoming position I, the order of incoming may be changed.
    567   void unorderedDeleteIncoming(unsigned I) {
    568     unsigned E = getNumOperands();
    569     assert(I < E && "Cannot remove out of bounds Phi entry.");
    570     // MemoryPhi must have at least two incoming values, otherwise the MemoryPhi
    571     // itself should be deleted.
    572     assert(E >= 2 && "Cannot only remove incoming values in MemoryPhis with "
    573                      "at least 2 values.");
    574     setIncomingValue(I, getIncomingValue(E - 1));
    575     setIncomingBlock(I, block_begin()[E - 1]);
    576     setOperand(E - 1, nullptr);
    577     block_begin()[E - 1] = nullptr;
    578     setNumHungOffUseOperands(getNumOperands() - 1);
    579   }
    580 
    581   // After deleting entries that satisfy Pred, remaining entries may have
    582   // changed order.
    583   template <typename Fn> void unorderedDeleteIncomingIf(Fn &&Pred) {
    584     for (unsigned I = 0, E = getNumOperands(); I != E; ++I)
    585       if (Pred(getIncomingValue(I), getIncomingBlock(I))) {
    586         unorderedDeleteIncoming(I);
    587         E = getNumOperands();
    588         --I;
    589       }
    590     assert(getNumOperands() >= 1 &&
    591            "Cannot remove all incoming blocks in a MemoryPhi.");
    592   }
    593 
    594   // After deleting incoming block BB, the incoming blocks order may be changed.
    595   void unorderedDeleteIncomingBlock(const BasicBlock *BB) {
    596     unorderedDeleteIncomingIf(
    597         [&](const MemoryAccess *, const BasicBlock *B) { return BB == B; });
    598   }
    599 
    600   // After deleting incoming memory access MA, the incoming accesses order may
    601   // be changed.
    602   void unorderedDeleteIncomingValue(const MemoryAccess *MA) {
    603     unorderedDeleteIncomingIf(
    604         [&](const MemoryAccess *M, const BasicBlock *) { return MA == M; });
    605   }
    606 
    607   static bool classof(const Value *V) {
    608     return V->getValueID() == MemoryPhiVal;
    609   }
    610 
    611   void print(raw_ostream &OS) const;
    612 
    613   unsigned getID() const { return ID; }
    614 
    615 protected:
    616   friend class MemorySSA;
    617 
    618   /// this is more complicated than the generic
    619   /// User::allocHungoffUses, because we have to allocate Uses for the incoming
    620   /// values and pointers to the incoming blocks, all in one allocation.
    621   void allocHungoffUses(unsigned N) {
    622     User::allocHungoffUses(N, /* IsPhi */ true);
    623   }
    624 
    625 private:
    626   // For debugging only
    627   const unsigned ID;
    628   unsigned ReservedSpace;
    629 
    630   /// This grows the operand list in response to a push_back style of
    631   /// operation.  This grows the number of ops by 1.5 times.
    632   void growOperands() {
    633     unsigned E = getNumOperands();
    634     // 2 op PHI nodes are VERY common, so reserve at least enough for that.
    635     ReservedSpace = std::max(E + E / 2, 2u);
    636     growHungoffUses(ReservedSpace, /* IsPhi */ true);
    637   }
    638 
    639   static void deleteMe(DerivedUser *Self);
    640 };
    641 
    642 inline unsigned MemoryAccess::getID() const {
    643   assert((isa<MemoryDef>(this) || isa<MemoryPhi>(this)) &&
    644          "only memory defs and phis have ids");
    645   if (const auto *MD = dyn_cast<MemoryDef>(this))
    646     return MD->getID();
    647   return cast<MemoryPhi>(this)->getID();
    648 }
    649 
    650 inline bool MemoryUseOrDef::isOptimized() const {
    651   if (const auto *MD = dyn_cast<MemoryDef>(this))
    652     return MD->isOptimized();
    653   return cast<MemoryUse>(this)->isOptimized();
    654 }
    655 
    656 inline MemoryAccess *MemoryUseOrDef::getOptimized() const {
    657   if (const auto *MD = dyn_cast<MemoryDef>(this))
    658     return MD->getOptimized();
    659   return cast<MemoryUse>(this)->getOptimized();
    660 }
    661 
    662 inline void MemoryUseOrDef::setOptimized(MemoryAccess *MA) {
    663   if (auto *MD = dyn_cast<MemoryDef>(this))
    664     MD->setOptimized(MA);
    665   else
    666     cast<MemoryUse>(this)->setOptimized(MA);
    667 }
    668 
    669 inline void MemoryUseOrDef::resetOptimized() {
    670   if (auto *MD = dyn_cast<MemoryDef>(this))
    671     MD->resetOptimized();
    672   else
    673     cast<MemoryUse>(this)->resetOptimized();
    674 }
    675 
    676 template <> struct OperandTraits<MemoryPhi> : public HungoffOperandTraits<2> {};
    677 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryPhi, MemoryAccess)
    678 
    679 /// Encapsulates MemorySSA, including all data associated with memory
    680 /// accesses.
    681 class MemorySSA {
    682 public:
    683   MemorySSA(Function &, AliasAnalysis *, DominatorTree *);
    684   ~MemorySSA();
    685 
    686   MemorySSAWalker *getWalker();
    687 
    688   /// Given a memory Mod/Ref'ing instruction, get the MemorySSA
    689   /// access associated with it. If passed a basic block gets the memory phi
    690   /// node that exists for that block, if there is one. Otherwise, this will get
    691   /// a MemoryUseOrDef.
    692   MemoryUseOrDef *getMemoryAccess(const Instruction *) const;
    693   MemoryPhi *getMemoryAccess(const BasicBlock *BB) const;
    694 
    695   void dump() const;
    696   void print(raw_ostream &) const;
    697 
    698   /// Return true if \p MA represents the live on entry value
    699   ///
    700   /// Loads and stores from pointer arguments and other global values may be
    701   /// defined by memory operations that do not occur in the current function, so
    702   /// they may be live on entry to the function. MemorySSA represents such
    703   /// memory state by the live on entry definition, which is guaranteed to occur
    704   /// before any other memory access in the function.
    705   inline bool isLiveOnEntryDef(const MemoryAccess *MA) const {
    706     return MA == LiveOnEntryDef.get();
    707   }
    708 
    709   inline MemoryAccess *getLiveOnEntryDef() const {
    710     return LiveOnEntryDef.get();
    711   }
    712 
    713   // Sadly, iplists, by default, owns and deletes pointers added to the
    714   // list. It's not currently possible to have two iplists for the same type,
    715   // where one owns the pointers, and one does not. This is because the traits
    716   // are per-type, not per-tag.  If this ever changes, we should make the
    717   // DefList an iplist.
    718   using AccessList = iplist<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>;
    719   using DefsList =
    720       simple_ilist<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>>;
    721 
    722   /// Return the list of MemoryAccess's for a given basic block.
    723   ///
    724   /// This list is not modifiable by the user.
    725   const AccessList *getBlockAccesses(const BasicBlock *BB) const {
    726     return getWritableBlockAccesses(BB);
    727   }
    728 
    729   /// Return the list of MemoryDef's and MemoryPhi's for a given basic
    730   /// block.
    731   ///
    732   /// This list is not modifiable by the user.
    733   const DefsList *getBlockDefs(const BasicBlock *BB) const {
    734     return getWritableBlockDefs(BB);
    735   }
    736 
    737   /// Given two memory accesses in the same basic block, determine
    738   /// whether MemoryAccess \p A dominates MemoryAccess \p B.
    739   bool locallyDominates(const MemoryAccess *A, const MemoryAccess *B) const;
    740 
    741   /// Given two memory accesses in potentially different blocks,
    742   /// determine whether MemoryAccess \p A dominates MemoryAccess \p B.
    743   bool dominates(const MemoryAccess *A, const MemoryAccess *B) const;
    744 
    745   /// Given a MemoryAccess and a Use, determine whether MemoryAccess \p A
    746   /// dominates Use \p B.
    747   bool dominates(const MemoryAccess *A, const Use &B) const;
    748 
    749   /// Verify that MemorySSA is self consistent (IE definitions dominate
    750   /// all uses, uses appear in the right places).  This is used by unit tests.
    751   void verifyMemorySSA() const;
    752 
    753   /// Used in various insertion functions to specify whether we are talking
    754   /// about the beginning or end of a block.
    755   enum InsertionPlace { Beginning, End };
    756 
    757 protected:
    758   // Used by Memory SSA annotater, dumpers, and wrapper pass
    759   friend class MemorySSAAnnotatedWriter;
    760   friend class MemorySSAPrinterLegacyPass;
    761   friend class MemorySSAUpdater;
    762 
    763   void verifyDefUses(Function &F) const;
    764   void verifyDomination(Function &F) const;
    765   void verifyOrdering(Function &F) const;
    766   void verifyDominationNumbers(const Function &F) const;
    767 
    768   // This is used by the use optimizer and updater.
    769   AccessList *getWritableBlockAccesses(const BasicBlock *BB) const {
    770     auto It = PerBlockAccesses.find(BB);
    771     return It == PerBlockAccesses.end() ? nullptr : It->second.get();
    772   }
    773 
    774   // This is used by the use optimizer and updater.
    775   DefsList *getWritableBlockDefs(const BasicBlock *BB) const {
    776     auto It = PerBlockDefs.find(BB);
    777     return It == PerBlockDefs.end() ? nullptr : It->second.get();
    778   }
    779 
    780   // These is used by the updater to perform various internal MemorySSA
    781   // machinsations.  They do not always leave the IR in a correct state, and
    782   // relies on the updater to fixup what it breaks, so it is not public.
    783 
    784   void moveTo(MemoryUseOrDef *What, BasicBlock *BB, AccessList::iterator Where);
    785   void moveTo(MemoryAccess *What, BasicBlock *BB, InsertionPlace Point);
    786 
    787   // Rename the dominator tree branch rooted at BB.
    788   void renamePass(BasicBlock *BB, MemoryAccess *IncomingVal,
    789                   SmallPtrSetImpl<BasicBlock *> &Visited) {
    790     renamePass(DT->getNode(BB), IncomingVal, Visited, true, true);
    791   }
    792 
    793   void removeFromLookups(MemoryAccess *);
    794   void removeFromLists(MemoryAccess *, bool ShouldDelete = true);
    795   void insertIntoListsForBlock(MemoryAccess *, const BasicBlock *,
    796                                InsertionPlace);
    797   void insertIntoListsBefore(MemoryAccess *, const BasicBlock *,
    798                              AccessList::iterator);
    799   MemoryUseOrDef *createDefinedAccess(Instruction *, MemoryAccess *);
    800 
    801 private:
    802   class CachingWalker;
    803   class OptimizeUses;
    804 
    805   CachingWalker *getWalkerImpl();
    806   void buildMemorySSA();
    807   void optimizeUses();
    808 
    809   void verifyUseInDefs(MemoryAccess *, MemoryAccess *) const;
    810 
    811   using AccessMap = DenseMap<const BasicBlock *, std::unique_ptr<AccessList>>;
    812   using DefsMap = DenseMap<const BasicBlock *, std::unique_ptr<DefsList>>;
    813 
    814   void
    815   determineInsertionPoint(const SmallPtrSetImpl<BasicBlock *> &DefiningBlocks);
    816   void markUnreachableAsLiveOnEntry(BasicBlock *BB);
    817   bool dominatesUse(const MemoryAccess *, const MemoryAccess *) const;
    818   MemoryPhi *createMemoryPhi(BasicBlock *BB);
    819   MemoryUseOrDef *createNewAccess(Instruction *);
    820   MemoryAccess *findDominatingDef(BasicBlock *, enum InsertionPlace);
    821   void placePHINodes(const SmallPtrSetImpl<BasicBlock *> &);
    822   MemoryAccess *renameBlock(BasicBlock *, MemoryAccess *, bool);
    823   void renameSuccessorPhis(BasicBlock *, MemoryAccess *, bool);
    824   void renamePass(DomTreeNode *, MemoryAccess *IncomingVal,
    825                   SmallPtrSetImpl<BasicBlock *> &Visited,
    826                   bool SkipVisited = false, bool RenameAllUses = false);
    827   AccessList *getOrCreateAccessList(const BasicBlock *);
    828   DefsList *getOrCreateDefsList(const BasicBlock *);
    829   void renumberBlock(const BasicBlock *) const;
    830   AliasAnalysis *AA;
    831   DominatorTree *DT;
    832   Function &F;
    833 
    834   // Memory SSA mappings
    835   DenseMap<const Value *, MemoryAccess *> ValueToMemoryAccess;
    836 
    837   // These two mappings contain the main block to access/def mappings for
    838   // MemorySSA. The list contained in PerBlockAccesses really owns all the
    839   // MemoryAccesses.
    840   // Both maps maintain the invariant that if a block is found in them, the
    841   // corresponding list is not empty, and if a block is not found in them, the
    842   // corresponding list is empty.
    843   AccessMap PerBlockAccesses;
    844   DefsMap PerBlockDefs;
    845   std::unique_ptr<MemoryAccess, ValueDeleter> LiveOnEntryDef;
    846 
    847   // Domination mappings
    848   // Note that the numbering is local to a block, even though the map is
    849   // global.
    850   mutable SmallPtrSet<const BasicBlock *, 16> BlockNumberingValid;
    851   mutable DenseMap<const MemoryAccess *, unsigned long> BlockNumbering;
    852 
    853   // Memory SSA building info
    854   std::unique_ptr<CachingWalker> Walker;
    855   unsigned NextID;
    856 };
    857 
    858 // Internal MemorySSA utils, for use by MemorySSA classes and walkers
    859 class MemorySSAUtil {
    860 protected:
    861   friend class GVNHoist;
    862   friend class MemorySSAWalker;
    863 
    864   // This function should not be used by new passes.
    865   static bool defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU,
    866                                   AliasAnalysis &AA);
    867 };
    868 
    869 // This pass does eager building and then printing of MemorySSA. It is used by
    870 // the tests to be able to build, dump, and verify Memory SSA.
    871 class MemorySSAPrinterLegacyPass : public FunctionPass {
    872 public:
    873   MemorySSAPrinterLegacyPass();
    874 
    875   bool runOnFunction(Function &) override;
    876   void getAnalysisUsage(AnalysisUsage &AU) const override;
    877 
    878   static char ID;
    879 };
    880 
    881 /// An analysis that produces \c MemorySSA for a function.
    882 ///
    883 class MemorySSAAnalysis : public AnalysisInfoMixin<MemorySSAAnalysis> {
    884   friend AnalysisInfoMixin<MemorySSAAnalysis>;
    885 
    886   static AnalysisKey Key;
    887 
    888 public:
    889   // Wrap MemorySSA result to ensure address stability of internal MemorySSA
    890   // pointers after construction.  Use a wrapper class instead of plain
    891   // unique_ptr<MemorySSA> to avoid build breakage on MSVC.
    892   struct Result {
    893     Result(std::unique_ptr<MemorySSA> &&MSSA) : MSSA(std::move(MSSA)) {}
    894 
    895     MemorySSA &getMSSA() { return *MSSA.get(); }
    896 
    897     std::unique_ptr<MemorySSA> MSSA;
    898   };
    899 
    900   Result run(Function &F, FunctionAnalysisManager &AM);
    901 };
    902 
    903 /// Printer pass for \c MemorySSA.
    904 class MemorySSAPrinterPass : public PassInfoMixin<MemorySSAPrinterPass> {
    905   raw_ostream &OS;
    906 
    907 public:
    908   explicit MemorySSAPrinterPass(raw_ostream &OS) : OS(OS) {}
    909 
    910   PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
    911 };
    912 
    913 /// Verifier pass for \c MemorySSA.
    914 struct MemorySSAVerifierPass : PassInfoMixin<MemorySSAVerifierPass> {
    915   PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
    916 };
    917 
    918 /// Legacy analysis pass which computes \c MemorySSA.
    919 class MemorySSAWrapperPass : public FunctionPass {
    920 public:
    921   MemorySSAWrapperPass();
    922 
    923   static char ID;
    924 
    925   bool runOnFunction(Function &) override;
    926   void releaseMemory() override;
    927   MemorySSA &getMSSA() { return *MSSA; }
    928   const MemorySSA &getMSSA() const { return *MSSA; }
    929 
    930   void getAnalysisUsage(AnalysisUsage &AU) const override;
    931 
    932   void verifyAnalysis() const override;
    933   void print(raw_ostream &OS, const Module *M = nullptr) const override;
    934 
    935 private:
    936   std::unique_ptr<MemorySSA> MSSA;
    937 };
    938 
    939 /// This is the generic walker interface for walkers of MemorySSA.
    940 /// Walkers are used to be able to further disambiguate the def-use chains
    941 /// MemorySSA gives you, or otherwise produce better info than MemorySSA gives
    942 /// you.
    943 /// In particular, while the def-use chains provide basic information, and are
    944 /// guaranteed to give, for example, the nearest may-aliasing MemoryDef for a
    945 /// MemoryUse as AliasAnalysis considers it, a user mant want better or other
    946 /// information. In particular, they may want to use SCEV info to further
    947 /// disambiguate memory accesses, or they may want the nearest dominating
    948 /// may-aliasing MemoryDef for a call or a store. This API enables a
    949 /// standardized interface to getting and using that info.
    950 class MemorySSAWalker {
    951 public:
    952   MemorySSAWalker(MemorySSA *);
    953   virtual ~MemorySSAWalker() = default;
    954 
    955   using MemoryAccessSet = SmallVector<MemoryAccess *, 8>;
    956 
    957   /// Given a memory Mod/Ref/ModRef'ing instruction, calling this
    958   /// will give you the nearest dominating MemoryAccess that Mod's the location
    959   /// the instruction accesses (by skipping any def which AA can prove does not
    960   /// alias the location(s) accessed by the instruction given).
    961   ///
    962   /// Note that this will return a single access, and it must dominate the
    963   /// Instruction, so if an operand of a MemoryPhi node Mod's the instruction,
    964   /// this will return the MemoryPhi, not the operand. This means that
    965   /// given:
    966   /// if (a) {
    967   ///   1 = MemoryDef(liveOnEntry)
    968   ///   store %a
    969   /// } else {
    970   ///   2 = MemoryDef(liveOnEntry)
    971   ///   store %b
    972   /// }
    973   /// 3 = MemoryPhi(2, 1)
    974   /// MemoryUse(3)
    975   /// load %a
    976   ///
    977   /// calling this API on load(%a) will return the MemoryPhi, not the MemoryDef
    978   /// in the if (a) branch.
    979   MemoryAccess *getClobberingMemoryAccess(const Instruction *I) {
    980     MemoryAccess *MA = MSSA->getMemoryAccess(I);
    981     assert(MA && "Handed an instruction that MemorySSA doesn't recognize?");
    982     return getClobberingMemoryAccess(MA);
    983   }
    984 
    985   /// Does the same thing as getClobberingMemoryAccess(const Instruction *I),
    986   /// but takes a MemoryAccess instead of an Instruction.
    987   virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) = 0;
    988 
    989   /// Given a potentially clobbering memory access and a new location,
    990   /// calling this will give you the nearest dominating clobbering MemoryAccess
    991   /// (by skipping non-aliasing def links).
    992   ///
    993   /// This version of the function is mainly used to disambiguate phi translated
    994   /// pointers, where the value of a pointer may have changed from the initial
    995   /// memory access. Note that this expects to be handed either a MemoryUse,
    996   /// or an already potentially clobbering access. Unlike the above API, if
    997   /// given a MemoryDef that clobbers the pointer as the starting access, it
    998   /// will return that MemoryDef, whereas the above would return the clobber
    999   /// starting from the use side of  the memory def.
   1000   virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
   1001                                                   const MemoryLocation &) = 0;
   1002 
   1003   /// Given a memory access, invalidate anything this walker knows about
   1004   /// that access.
   1005   /// This API is used by walkers that store information to perform basic cache
   1006   /// invalidation.  This will be called by MemorySSA at appropriate times for
   1007   /// the walker it uses or returns.
   1008   virtual void invalidateInfo(MemoryAccess *) {}
   1009 
   1010   virtual void verify(const MemorySSA *MSSA) { assert(MSSA == this->MSSA); }
   1011 
   1012 protected:
   1013   friend class MemorySSA; // For updating MSSA pointer in MemorySSA move
   1014                           // constructor.
   1015   MemorySSA *MSSA;
   1016 };
   1017 
   1018 /// A MemorySSAWalker that does no alias queries, or anything else. It
   1019 /// simply returns the links as they were constructed by the builder.
   1020 class DoNothingMemorySSAWalker final : public MemorySSAWalker {
   1021 public:
   1022   // Keep the overrides below from hiding the Instruction overload of
   1023   // getClobberingMemoryAccess.
   1024   using MemorySSAWalker::getClobberingMemoryAccess;
   1025 
   1026   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) override;
   1027   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
   1028                                           const MemoryLocation &) override;
   1029 };
   1030 
   1031 using MemoryAccessPair = std::pair<MemoryAccess *, MemoryLocation>;
   1032 using ConstMemoryAccessPair = std::pair<const MemoryAccess *, MemoryLocation>;
   1033 
   1034 /// Iterator base class used to implement const and non-const iterators
   1035 /// over the defining accesses of a MemoryAccess.
   1036 template <class T>
   1037 class memoryaccess_def_iterator_base
   1038     : public iterator_facade_base<memoryaccess_def_iterator_base<T>,
   1039                                   std::forward_iterator_tag, T, ptrdiff_t, T *,
   1040                                   T *> {
   1041   using BaseT = typename memoryaccess_def_iterator_base::iterator_facade_base;
   1042 
   1043 public:
   1044   memoryaccess_def_iterator_base(T *Start) : Access(Start) {}
   1045   memoryaccess_def_iterator_base() = default;
   1046 
   1047   bool operator==(const memoryaccess_def_iterator_base &Other) const {
   1048     return Access == Other.Access && (!Access || ArgNo == Other.ArgNo);
   1049   }
   1050 
   1051   // This is a bit ugly, but for MemoryPHI's, unlike PHINodes, you can't get the
   1052   // block from the operand in constant time (In a PHINode, the uselist has
   1053   // both, so it's just subtraction). We provide it as part of the
   1054   // iterator to avoid callers having to linear walk to get the block.
   1055   // If the operation becomes constant time on MemoryPHI's, this bit of
   1056   // abstraction breaking should be removed.
   1057   BasicBlock *getPhiArgBlock() const {
   1058     MemoryPhi *MP = dyn_cast<MemoryPhi>(Access);
   1059     assert(MP && "Tried to get phi arg block when not iterating over a PHI");
   1060     return MP->getIncomingBlock(ArgNo);
   1061   }
   1062 
   1063   typename BaseT::iterator::pointer operator*() const {
   1064     assert(Access && "Tried to access past the end of our iterator");
   1065     // Go to the first argument for phis, and the defining access for everything
   1066     // else.
   1067     if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Access))
   1068       return MP->getIncomingValue(ArgNo);
   1069     return cast<MemoryUseOrDef>(Access)->getDefiningAccess();
   1070   }
   1071 
   1072   using BaseT::operator++;
   1073   memoryaccess_def_iterator &operator++() {
   1074     assert(Access && "Hit end of iterator");
   1075     if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Access)) {
   1076       if (++ArgNo >= MP->getNumIncomingValues()) {
   1077         ArgNo = 0;
   1078         Access = nullptr;
   1079       }
   1080     } else {
   1081       Access = nullptr;
   1082     }
   1083     return *this;
   1084   }
   1085 
   1086 private:
   1087   T *Access = nullptr;
   1088   unsigned ArgNo = 0;
   1089 };
   1090 
   1091 inline memoryaccess_def_iterator MemoryAccess::defs_begin() {
   1092   return memoryaccess_def_iterator(this);
   1093 }
   1094 
   1095 inline const_memoryaccess_def_iterator MemoryAccess::defs_begin() const {
   1096   return const_memoryaccess_def_iterator(this);
   1097 }
   1098 
   1099 inline memoryaccess_def_iterator MemoryAccess::defs_end() {
   1100   return memoryaccess_def_iterator();
   1101 }
   1102 
   1103 inline const_memoryaccess_def_iterator MemoryAccess::defs_end() const {
   1104   return const_memoryaccess_def_iterator();
   1105 }
   1106 
   1107 /// GraphTraits for a MemoryAccess, which walks defs in the normal case,
   1108 /// and uses in the inverse case.
   1109 template <> struct GraphTraits<MemoryAccess *> {
   1110   using NodeRef = MemoryAccess *;
   1111   using ChildIteratorType = memoryaccess_def_iterator;
   1112 
   1113   static NodeRef getEntryNode(NodeRef N) { return N; }
   1114   static ChildIteratorType child_begin(NodeRef N) { return N->defs_begin(); }
   1115   static ChildIteratorType child_end(NodeRef N) { return N->defs_end(); }
   1116 };
   1117 
   1118 template <> struct GraphTraits<Inverse<MemoryAccess *>> {
   1119   using NodeRef = MemoryAccess *;
   1120   using ChildIteratorType = MemoryAccess::iterator;
   1121 
   1122   static NodeRef getEntryNode(NodeRef N) { return N; }
   1123   static ChildIteratorType child_begin(NodeRef N) { return N->user_begin(); }
   1124   static ChildIteratorType child_end(NodeRef N) { return N->user_end(); }
   1125 };
   1126 
   1127 /// Provide an iterator that walks defs, giving both the memory access,
   1128 /// and the current pointer location, updating the pointer location as it
   1129 /// changes due to phi node translation.
   1130 ///
   1131 /// This iterator, while somewhat specialized, is what most clients actually
   1132 /// want when walking upwards through MemorySSA def chains. It takes a pair of
   1133 /// <MemoryAccess,MemoryLocation>, and walks defs, properly translating the
   1134 /// memory location through phi nodes for the user.
   1135 class upward_defs_iterator
   1136     : public iterator_facade_base<upward_defs_iterator,
   1137                                   std::forward_iterator_tag,
   1138                                   const MemoryAccessPair> {
   1139   using BaseT = upward_defs_iterator::iterator_facade_base;
   1140 
   1141 public:
   1142   upward_defs_iterator(const MemoryAccessPair &Info)
   1143       : DefIterator(Info.first), Location(Info.second),
   1144         OriginalAccess(Info.first) {
   1145     CurrentPair.first = nullptr;
   1146 
   1147     WalkingPhi = Info.first && isa<MemoryPhi>(Info.first);
   1148     fillInCurrentPair();
   1149   }
   1150 
   1151   upward_defs_iterator() { CurrentPair.first = nullptr; }
   1152 
   1153   bool operator==(const upward_defs_iterator &Other) const {
   1154     return DefIterator == Other.DefIterator;
   1155   }
   1156 
   1157   BaseT::iterator::reference operator*() const {
   1158     assert(DefIterator != OriginalAccess->defs_end() &&
   1159            "Tried to access past the end of our iterator");
   1160     return CurrentPair;
   1161   }
   1162 
   1163   using BaseT::operator++;
   1164   upward_defs_iterator &operator++() {
   1165     assert(DefIterator != OriginalAccess->defs_end() &&
   1166            "Tried to access past the end of the iterator");
   1167     ++DefIterator;
   1168     if (DefIterator != OriginalAccess->defs_end())
   1169       fillInCurrentPair();
   1170     return *this;
   1171   }
   1172 
   1173   BasicBlock *getPhiArgBlock() const { return DefIterator.getPhiArgBlock(); }
   1174 
   1175 private:
   1176   void fillInCurrentPair() {
   1177     CurrentPair.first = *DefIterator;
   1178     if (WalkingPhi && Location.Ptr) {
   1179       PHITransAddr Translator(
   1180           const_cast<Value *>(Location.Ptr),
   1181           OriginalAccess->getBlock()->getModule()->getDataLayout(), nullptr);
   1182       if (!Translator.PHITranslateValue(OriginalAccess->getBlock(),
   1183                                         DefIterator.getPhiArgBlock(), nullptr,
   1184                                         false))
   1185         if (Translator.getAddr() != Location.Ptr) {
   1186           CurrentPair.second = Location.getWithNewPtr(Translator.getAddr());
   1187           return;
   1188         }
   1189     }
   1190     CurrentPair.second = Location;
   1191   }
   1192 
   1193   MemoryAccessPair CurrentPair;
   1194   memoryaccess_def_iterator DefIterator;
   1195   MemoryLocation Location;
   1196   MemoryAccess *OriginalAccess = nullptr;
   1197   bool WalkingPhi = false;
   1198 };
   1199 
   1200 inline upward_defs_iterator upward_defs_begin(const MemoryAccessPair &Pair) {
   1201   return upward_defs_iterator(Pair);
   1202 }
   1203 
   1204 inline upward_defs_iterator upward_defs_end() { return upward_defs_iterator(); }
   1205 
   1206 inline iterator_range<upward_defs_iterator>
   1207 upward_defs(const MemoryAccessPair &Pair) {
   1208   return make_range(upward_defs_begin(Pair), upward_defs_end());
   1209 }
   1210 
   1211 /// Walks the defining accesses of MemoryDefs. Stops after we hit something that
   1212 /// has no defining use (e.g. a MemoryPhi or liveOnEntry). Note that, when
   1213 /// comparing against a null def_chain_iterator, this will compare equal only
   1214 /// after walking said Phi/liveOnEntry.
   1215 ///
   1216 /// The UseOptimizedChain flag specifies whether to walk the clobbering
   1217 /// access chain, or all the accesses.
   1218 ///
   1219 /// Normally, MemoryDef are all just def/use linked together, so a def_chain on
   1220 /// a MemoryDef will walk all MemoryDefs above it in the program until it hits
   1221 /// a phi node.  The optimized chain walks the clobbering access of a store.
   1222 /// So if you are just trying to find, given a store, what the next
   1223 /// thing that would clobber the same memory is, you want the optimized chain.
   1224 template <class T, bool UseOptimizedChain = false>
   1225 struct def_chain_iterator
   1226     : public iterator_facade_base<def_chain_iterator<T, UseOptimizedChain>,
   1227                                   std::forward_iterator_tag, MemoryAccess *> {
   1228   def_chain_iterator() : MA(nullptr) {}
   1229   def_chain_iterator(T MA) : MA(MA) {}
   1230 
   1231   T operator*() const { return MA; }
   1232 
   1233   def_chain_iterator &operator++() {
   1234     // N.B. liveOnEntry has a null defining access.
   1235     if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) {
   1236       if (UseOptimizedChain && MUD->isOptimized())
   1237         MA = MUD->getOptimized();
   1238       else
   1239         MA = MUD->getDefiningAccess();
   1240     } else {
   1241       MA = nullptr;
   1242     }
   1243 
   1244     return *this;
   1245   }
   1246 
   1247   bool operator==(const def_chain_iterator &O) const { return MA == O.MA; }
   1248 
   1249 private:
   1250   T MA;
   1251 };
   1252 
   1253 template <class T>
   1254 inline iterator_range<def_chain_iterator<T>>
   1255 def_chain(T MA, MemoryAccess *UpTo = nullptr) {
   1256 #ifdef EXPENSIVE_CHECKS
   1257   assert((!UpTo || find(def_chain(MA), UpTo) != def_chain_iterator<T>()) &&
   1258          "UpTo isn't in the def chain!");
   1259 #endif
   1260   return make_range(def_chain_iterator<T>(MA), def_chain_iterator<T>(UpTo));
   1261 }
   1262 
   1263 template <class T>
   1264 inline iterator_range<def_chain_iterator<T, true>> optimized_def_chain(T MA) {
   1265   return make_range(def_chain_iterator<T, true>(MA),
   1266                     def_chain_iterator<T, true>(nullptr));
   1267 }
   1268 
   1269 } // end namespace llvm
   1270 
   1271 #endif // LLVM_ANALYSIS_MEMORYSSA_H
   1272