Home | History | Annotate | Download | only in Scalar
      1 //===- MemCpyOptimizer.cpp - Optimize use of memcpy and friends -----------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This pass performs various transformations related to eliminating memcpy
     11 // calls, or transforming sets of stores into memset's.
     12 //
     13 //===----------------------------------------------------------------------===//
     14 
     15 #include "llvm/Transforms/Scalar.h"
     16 #include "llvm/ADT/SmallVector.h"
     17 #include "llvm/ADT/Statistic.h"
     18 #include "llvm/Analysis/AliasAnalysis.h"
     19 #include "llvm/Analysis/AssumptionCache.h"
     20 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
     21 #include "llvm/Analysis/TargetLibraryInfo.h"
     22 #include "llvm/Analysis/ValueTracking.h"
     23 #include "llvm/IR/DataLayout.h"
     24 #include "llvm/IR/Dominators.h"
     25 #include "llvm/IR/GetElementPtrTypeIterator.h"
     26 #include "llvm/IR/GlobalVariable.h"
     27 #include "llvm/IR/IRBuilder.h"
     28 #include "llvm/IR/Instructions.h"
     29 #include "llvm/IR/IntrinsicInst.h"
     30 #include "llvm/Support/Debug.h"
     31 #include "llvm/Support/raw_ostream.h"
     32 #include "llvm/Transforms/Utils/Local.h"
     33 #include <list>
     34 using namespace llvm;
     35 
     36 #define DEBUG_TYPE "memcpyopt"
     37 
     38 STATISTIC(NumMemCpyInstr, "Number of memcpy instructions deleted");
     39 STATISTIC(NumMemSetInfer, "Number of memsets inferred");
     40 STATISTIC(NumMoveToCpy,   "Number of memmoves converted to memcpy");
     41 STATISTIC(NumCpyToSet,    "Number of memcpys converted to memset");
     42 
     43 static int64_t GetOffsetFromIndex(const GEPOperator *GEP, unsigned Idx,
     44                                   bool &VariableIdxFound,
     45                                   const DataLayout &DL) {
     46   // Skip over the first indices.
     47   gep_type_iterator GTI = gep_type_begin(GEP);
     48   for (unsigned i = 1; i != Idx; ++i, ++GTI)
     49     /*skip along*/;
     50 
     51   // Compute the offset implied by the rest of the indices.
     52   int64_t Offset = 0;
     53   for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) {
     54     ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i));
     55     if (!OpC)
     56       return VariableIdxFound = true;
     57     if (OpC->isZero()) continue;  // No offset.
     58 
     59     // Handle struct indices, which add their field offset to the pointer.
     60     if (StructType *STy = dyn_cast<StructType>(*GTI)) {
     61       Offset += DL.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
     62       continue;
     63     }
     64 
     65     // Otherwise, we have a sequential type like an array or vector.  Multiply
     66     // the index by the ElementSize.
     67     uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType());
     68     Offset += Size*OpC->getSExtValue();
     69   }
     70 
     71   return Offset;
     72 }
     73 
     74 /// IsPointerOffset - Return true if Ptr1 is provably equal to Ptr2 plus a
     75 /// constant offset, and return that constant offset.  For example, Ptr1 might
     76 /// be &A[42], and Ptr2 might be &A[40].  In this case offset would be -8.
     77 static bool IsPointerOffset(Value *Ptr1, Value *Ptr2, int64_t &Offset,
     78                             const DataLayout &DL) {
     79   Ptr1 = Ptr1->stripPointerCasts();
     80   Ptr2 = Ptr2->stripPointerCasts();
     81 
     82   // Handle the trivial case first.
     83   if (Ptr1 == Ptr2) {
     84     Offset = 0;
     85     return true;
     86   }
     87 
     88   GEPOperator *GEP1 = dyn_cast<GEPOperator>(Ptr1);
     89   GEPOperator *GEP2 = dyn_cast<GEPOperator>(Ptr2);
     90 
     91   bool VariableIdxFound = false;
     92 
     93   // If one pointer is a GEP and the other isn't, then see if the GEP is a
     94   // constant offset from the base, as in "P" and "gep P, 1".
     95   if (GEP1 && !GEP2 && GEP1->getOperand(0)->stripPointerCasts() == Ptr2) {
     96     Offset = -GetOffsetFromIndex(GEP1, 1, VariableIdxFound, DL);
     97     return !VariableIdxFound;
     98   }
     99 
    100   if (GEP2 && !GEP1 && GEP2->getOperand(0)->stripPointerCasts() == Ptr1) {
    101     Offset = GetOffsetFromIndex(GEP2, 1, VariableIdxFound, DL);
    102     return !VariableIdxFound;
    103   }
    104 
    105   // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical
    106   // base.  After that base, they may have some number of common (and
    107   // potentially variable) indices.  After that they handle some constant
    108   // offset, which determines their offset from each other.  At this point, we
    109   // handle no other case.
    110   if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0))
    111     return false;
    112 
    113   // Skip any common indices and track the GEP types.
    114   unsigned Idx = 1;
    115   for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx)
    116     if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx))
    117       break;
    118 
    119   int64_t Offset1 = GetOffsetFromIndex(GEP1, Idx, VariableIdxFound, DL);
    120   int64_t Offset2 = GetOffsetFromIndex(GEP2, Idx, VariableIdxFound, DL);
    121   if (VariableIdxFound) return false;
    122 
    123   Offset = Offset2-Offset1;
    124   return true;
    125 }
    126 
    127 
    128 /// MemsetRange - Represents a range of memset'd bytes with the ByteVal value.
    129 /// This allows us to analyze stores like:
    130 ///   store 0 -> P+1
    131 ///   store 0 -> P+0
    132 ///   store 0 -> P+3
    133 ///   store 0 -> P+2
    134 /// which sometimes happens with stores to arrays of structs etc.  When we see
    135 /// the first store, we make a range [1, 2).  The second store extends the range
    136 /// to [0, 2).  The third makes a new range [2, 3).  The fourth store joins the
    137 /// two ranges into [0, 3) which is memset'able.
    138 namespace {
    139 struct MemsetRange {
    140   // Start/End - A semi range that describes the span that this range covers.
    141   // The range is closed at the start and open at the end: [Start, End).
    142   int64_t Start, End;
    143 
    144   /// StartPtr - The getelementptr instruction that points to the start of the
    145   /// range.
    146   Value *StartPtr;
    147 
    148   /// Alignment - The known alignment of the first store.
    149   unsigned Alignment;
    150 
    151   /// TheStores - The actual stores that make up this range.
    152   SmallVector<Instruction*, 16> TheStores;
    153 
    154   bool isProfitableToUseMemset(const DataLayout &DL) const;
    155 };
    156 } // end anon namespace
    157 
    158 bool MemsetRange::isProfitableToUseMemset(const DataLayout &DL) const {
    159   // If we found more than 4 stores to merge or 16 bytes, use memset.
    160   if (TheStores.size() >= 4 || End-Start >= 16) return true;
    161 
    162   // If there is nothing to merge, don't do anything.
    163   if (TheStores.size() < 2) return false;
    164 
    165   // If any of the stores are a memset, then it is always good to extend the
    166   // memset.
    167   for (unsigned i = 0, e = TheStores.size(); i != e; ++i)
    168     if (!isa<StoreInst>(TheStores[i]))
    169       return true;
    170 
    171   // Assume that the code generator is capable of merging pairs of stores
    172   // together if it wants to.
    173   if (TheStores.size() == 2) return false;
    174 
    175   // If we have fewer than 8 stores, it can still be worthwhile to do this.
    176   // For example, merging 4 i8 stores into an i32 store is useful almost always.
    177   // However, merging 2 32-bit stores isn't useful on a 32-bit architecture (the
    178   // memset will be split into 2 32-bit stores anyway) and doing so can
    179   // pessimize the llvm optimizer.
    180   //
    181   // Since we don't have perfect knowledge here, make some assumptions: assume
    182   // the maximum GPR width is the same size as the largest legal integer
    183   // size. If so, check to see whether we will end up actually reducing the
    184   // number of stores used.
    185   unsigned Bytes = unsigned(End-Start);
    186   unsigned MaxIntSize = DL.getLargestLegalIntTypeSize();
    187   if (MaxIntSize == 0)
    188     MaxIntSize = 1;
    189   unsigned NumPointerStores = Bytes / MaxIntSize;
    190 
    191   // Assume the remaining bytes if any are done a byte at a time.
    192   unsigned NumByteStores = Bytes - NumPointerStores * MaxIntSize;
    193 
    194   // If we will reduce the # stores (according to this heuristic), do the
    195   // transformation.  This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32
    196   // etc.
    197   return TheStores.size() > NumPointerStores+NumByteStores;
    198 }
    199 
    200 
    201 namespace {
    202 class MemsetRanges {
    203   /// Ranges - A sorted list of the memset ranges.  We use std::list here
    204   /// because each element is relatively large and expensive to copy.
    205   std::list<MemsetRange> Ranges;
    206   typedef std::list<MemsetRange>::iterator range_iterator;
    207   const DataLayout &DL;
    208 public:
    209   MemsetRanges(const DataLayout &DL) : DL(DL) {}
    210 
    211   typedef std::list<MemsetRange>::const_iterator const_iterator;
    212   const_iterator begin() const { return Ranges.begin(); }
    213   const_iterator end() const { return Ranges.end(); }
    214   bool empty() const { return Ranges.empty(); }
    215 
    216   void addInst(int64_t OffsetFromFirst, Instruction *Inst) {
    217     if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
    218       addStore(OffsetFromFirst, SI);
    219     else
    220       addMemSet(OffsetFromFirst, cast<MemSetInst>(Inst));
    221   }
    222 
    223   void addStore(int64_t OffsetFromFirst, StoreInst *SI) {
    224     int64_t StoreSize = DL.getTypeStoreSize(SI->getOperand(0)->getType());
    225 
    226     addRange(OffsetFromFirst, StoreSize,
    227              SI->getPointerOperand(), SI->getAlignment(), SI);
    228   }
    229 
    230   void addMemSet(int64_t OffsetFromFirst, MemSetInst *MSI) {
    231     int64_t Size = cast<ConstantInt>(MSI->getLength())->getZExtValue();
    232     addRange(OffsetFromFirst, Size, MSI->getDest(), MSI->getAlignment(), MSI);
    233   }
    234 
    235   void addRange(int64_t Start, int64_t Size, Value *Ptr,
    236                 unsigned Alignment, Instruction *Inst);
    237 
    238 };
    239 
    240 } // end anon namespace
    241 
    242 
    243 /// addRange - Add a new store to the MemsetRanges data structure.  This adds a
    244 /// new range for the specified store at the specified offset, merging into
    245 /// existing ranges as appropriate.
    246 ///
    247 /// Do a linear search of the ranges to see if this can be joined and/or to
    248 /// find the insertion point in the list.  We keep the ranges sorted for
    249 /// simplicity here.  This is a linear search of a linked list, which is ugly,
    250 /// however the number of ranges is limited, so this won't get crazy slow.
    251 void MemsetRanges::addRange(int64_t Start, int64_t Size, Value *Ptr,
    252                             unsigned Alignment, Instruction *Inst) {
    253   int64_t End = Start+Size;
    254   range_iterator I = Ranges.begin(), E = Ranges.end();
    255 
    256   while (I != E && Start > I->End)
    257     ++I;
    258 
    259   // We now know that I == E, in which case we didn't find anything to merge
    260   // with, or that Start <= I->End.  If End < I->Start or I == E, then we need
    261   // to insert a new range.  Handle this now.
    262   if (I == E || End < I->Start) {
    263     MemsetRange &R = *Ranges.insert(I, MemsetRange());
    264     R.Start        = Start;
    265     R.End          = End;
    266     R.StartPtr     = Ptr;
    267     R.Alignment    = Alignment;
    268     R.TheStores.push_back(Inst);
    269     return;
    270   }
    271 
    272   // This store overlaps with I, add it.
    273   I->TheStores.push_back(Inst);
    274 
    275   // At this point, we may have an interval that completely contains our store.
    276   // If so, just add it to the interval and return.
    277   if (I->Start <= Start && I->End >= End)
    278     return;
    279 
    280   // Now we know that Start <= I->End and End >= I->Start so the range overlaps
    281   // but is not entirely contained within the range.
    282 
    283   // See if the range extends the start of the range.  In this case, it couldn't
    284   // possibly cause it to join the prior range, because otherwise we would have
    285   // stopped on *it*.
    286   if (Start < I->Start) {
    287     I->Start = Start;
    288     I->StartPtr = Ptr;
    289     I->Alignment = Alignment;
    290   }
    291 
    292   // Now we know that Start <= I->End and Start >= I->Start (so the startpoint
    293   // is in or right at the end of I), and that End >= I->Start.  Extend I out to
    294   // End.
    295   if (End > I->End) {
    296     I->End = End;
    297     range_iterator NextI = I;
    298     while (++NextI != E && End >= NextI->Start) {
    299       // Merge the range in.
    300       I->TheStores.append(NextI->TheStores.begin(), NextI->TheStores.end());
    301       if (NextI->End > I->End)
    302         I->End = NextI->End;
    303       Ranges.erase(NextI);
    304       NextI = I;
    305     }
    306   }
    307 }
    308 
    309 //===----------------------------------------------------------------------===//
    310 //                         MemCpyOpt Pass
    311 //===----------------------------------------------------------------------===//
    312 
    313 namespace {
    314   class MemCpyOpt : public FunctionPass {
    315     MemoryDependenceAnalysis *MD;
    316     TargetLibraryInfo *TLI;
    317   public:
    318     static char ID; // Pass identification, replacement for typeid
    319     MemCpyOpt() : FunctionPass(ID) {
    320       initializeMemCpyOptPass(*PassRegistry::getPassRegistry());
    321       MD = nullptr;
    322       TLI = nullptr;
    323     }
    324 
    325     bool runOnFunction(Function &F) override;
    326 
    327   private:
    328     // This transformation requires dominator postdominator info
    329     void getAnalysisUsage(AnalysisUsage &AU) const override {
    330       AU.setPreservesCFG();
    331       AU.addRequired<AssumptionCacheTracker>();
    332       AU.addRequired<DominatorTreeWrapperPass>();
    333       AU.addRequired<MemoryDependenceAnalysis>();
    334       AU.addRequired<AliasAnalysis>();
    335       AU.addRequired<TargetLibraryInfoWrapperPass>();
    336       AU.addPreserved<AliasAnalysis>();
    337       AU.addPreserved<MemoryDependenceAnalysis>();
    338     }
    339 
    340     // Helper fuctions
    341     bool processStore(StoreInst *SI, BasicBlock::iterator &BBI);
    342     bool processMemSet(MemSetInst *SI, BasicBlock::iterator &BBI);
    343     bool processMemCpy(MemCpyInst *M);
    344     bool processMemMove(MemMoveInst *M);
    345     bool performCallSlotOptzn(Instruction *cpy, Value *cpyDst, Value *cpySrc,
    346                               uint64_t cpyLen, unsigned cpyAlign, CallInst *C);
    347     bool processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep,
    348                                        uint64_t MSize);
    349     bool processByValArgument(CallSite CS, unsigned ArgNo);
    350     Instruction *tryMergingIntoMemset(Instruction *I, Value *StartPtr,
    351                                       Value *ByteVal);
    352 
    353     bool iterateOnFunction(Function &F);
    354   };
    355 
    356   char MemCpyOpt::ID = 0;
    357 }
    358 
    359 // createMemCpyOptPass - The public interface to this file...
    360 FunctionPass *llvm::createMemCpyOptPass() { return new MemCpyOpt(); }
    361 
    362 INITIALIZE_PASS_BEGIN(MemCpyOpt, "memcpyopt", "MemCpy Optimization",
    363                       false, false)
    364 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
    365 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
    366 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis)
    367 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
    368 INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
    369 INITIALIZE_PASS_END(MemCpyOpt, "memcpyopt", "MemCpy Optimization",
    370                     false, false)
    371 
    372 /// tryMergingIntoMemset - When scanning forward over instructions, we look for
    373 /// some other patterns to fold away.  In particular, this looks for stores to
    374 /// neighboring locations of memory.  If it sees enough consecutive ones, it
    375 /// attempts to merge them together into a memcpy/memset.
    376 Instruction *MemCpyOpt::tryMergingIntoMemset(Instruction *StartInst,
    377                                              Value *StartPtr, Value *ByteVal) {
    378   const DataLayout &DL = StartInst->getModule()->getDataLayout();
    379 
    380   // Okay, so we now have a single store that can be splatable.  Scan to find
    381   // all subsequent stores of the same value to offset from the same pointer.
    382   // Join these together into ranges, so we can decide whether contiguous blocks
    383   // are stored.
    384   MemsetRanges Ranges(DL);
    385 
    386   BasicBlock::iterator BI = StartInst;
    387   for (++BI; !isa<TerminatorInst>(BI); ++BI) {
    388     if (!isa<StoreInst>(BI) && !isa<MemSetInst>(BI)) {
    389       // If the instruction is readnone, ignore it, otherwise bail out.  We
    390       // don't even allow readonly here because we don't want something like:
    391       // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A).
    392       if (BI->mayWriteToMemory() || BI->mayReadFromMemory())
    393         break;
    394       continue;
    395     }
    396 
    397     if (StoreInst *NextStore = dyn_cast<StoreInst>(BI)) {
    398       // If this is a store, see if we can merge it in.
    399       if (!NextStore->isSimple()) break;
    400 
    401       // Check to see if this stored value is of the same byte-splattable value.
    402       if (ByteVal != isBytewiseValue(NextStore->getOperand(0)))
    403         break;
    404 
    405       // Check to see if this store is to a constant offset from the start ptr.
    406       int64_t Offset;
    407       if (!IsPointerOffset(StartPtr, NextStore->getPointerOperand(), Offset,
    408                            DL))
    409         break;
    410 
    411       Ranges.addStore(Offset, NextStore);
    412     } else {
    413       MemSetInst *MSI = cast<MemSetInst>(BI);
    414 
    415       if (MSI->isVolatile() || ByteVal != MSI->getValue() ||
    416           !isa<ConstantInt>(MSI->getLength()))
    417         break;
    418 
    419       // Check to see if this store is to a constant offset from the start ptr.
    420       int64_t Offset;
    421       if (!IsPointerOffset(StartPtr, MSI->getDest(), Offset, DL))
    422         break;
    423 
    424       Ranges.addMemSet(Offset, MSI);
    425     }
    426   }
    427 
    428   // If we have no ranges, then we just had a single store with nothing that
    429   // could be merged in.  This is a very common case of course.
    430   if (Ranges.empty())
    431     return nullptr;
    432 
    433   // If we had at least one store that could be merged in, add the starting
    434   // store as well.  We try to avoid this unless there is at least something
    435   // interesting as a small compile-time optimization.
    436   Ranges.addInst(0, StartInst);
    437 
    438   // If we create any memsets, we put it right before the first instruction that
    439   // isn't part of the memset block.  This ensure that the memset is dominated
    440   // by any addressing instruction needed by the start of the block.
    441   IRBuilder<> Builder(BI);
    442 
    443   // Now that we have full information about ranges, loop over the ranges and
    444   // emit memset's for anything big enough to be worthwhile.
    445   Instruction *AMemSet = nullptr;
    446   for (MemsetRanges::const_iterator I = Ranges.begin(), E = Ranges.end();
    447        I != E; ++I) {
    448     const MemsetRange &Range = *I;
    449 
    450     if (Range.TheStores.size() == 1) continue;
    451 
    452     // If it is profitable to lower this range to memset, do so now.
    453     if (!Range.isProfitableToUseMemset(DL))
    454       continue;
    455 
    456     // Otherwise, we do want to transform this!  Create a new memset.
    457     // Get the starting pointer of the block.
    458     StartPtr = Range.StartPtr;
    459 
    460     // Determine alignment
    461     unsigned Alignment = Range.Alignment;
    462     if (Alignment == 0) {
    463       Type *EltType =
    464         cast<PointerType>(StartPtr->getType())->getElementType();
    465       Alignment = DL.getABITypeAlignment(EltType);
    466     }
    467 
    468     AMemSet =
    469       Builder.CreateMemSet(StartPtr, ByteVal, Range.End-Range.Start, Alignment);
    470 
    471     DEBUG(dbgs() << "Replace stores:\n";
    472           for (unsigned i = 0, e = Range.TheStores.size(); i != e; ++i)
    473             dbgs() << *Range.TheStores[i] << '\n';
    474           dbgs() << "With: " << *AMemSet << '\n');
    475 
    476     if (!Range.TheStores.empty())
    477       AMemSet->setDebugLoc(Range.TheStores[0]->getDebugLoc());
    478 
    479     // Zap all the stores.
    480     for (SmallVectorImpl<Instruction *>::const_iterator
    481          SI = Range.TheStores.begin(),
    482          SE = Range.TheStores.end(); SI != SE; ++SI) {
    483       MD->removeInstruction(*SI);
    484       (*SI)->eraseFromParent();
    485     }
    486     ++NumMemSetInfer;
    487   }
    488 
    489   return AMemSet;
    490 }
    491 
    492 
    493 bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
    494   if (!SI->isSimple()) return false;
    495   const DataLayout &DL = SI->getModule()->getDataLayout();
    496 
    497   // Detect cases where we're performing call slot forwarding, but
    498   // happen to be using a load-store pair to implement it, rather than
    499   // a memcpy.
    500   if (LoadInst *LI = dyn_cast<LoadInst>(SI->getOperand(0))) {
    501     if (LI->isSimple() && LI->hasOneUse() &&
    502         LI->getParent() == SI->getParent()) {
    503       MemDepResult ldep = MD->getDependency(LI);
    504       CallInst *C = nullptr;
    505       if (ldep.isClobber() && !isa<MemCpyInst>(ldep.getInst()))
    506         C = dyn_cast<CallInst>(ldep.getInst());
    507 
    508       if (C) {
    509         // Check that nothing touches the dest of the "copy" between
    510         // the call and the store.
    511         AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
    512         AliasAnalysis::Location StoreLoc = AA.getLocation(SI);
    513         for (BasicBlock::iterator I = --BasicBlock::iterator(SI),
    514                                   E = C; I != E; --I) {
    515           if (AA.getModRefInfo(&*I, StoreLoc) != AliasAnalysis::NoModRef) {
    516             C = nullptr;
    517             break;
    518           }
    519         }
    520       }
    521 
    522       if (C) {
    523         unsigned storeAlign = SI->getAlignment();
    524         if (!storeAlign)
    525           storeAlign = DL.getABITypeAlignment(SI->getOperand(0)->getType());
    526         unsigned loadAlign = LI->getAlignment();
    527         if (!loadAlign)
    528           loadAlign = DL.getABITypeAlignment(LI->getType());
    529 
    530         bool changed = performCallSlotOptzn(
    531             LI, SI->getPointerOperand()->stripPointerCasts(),
    532             LI->getPointerOperand()->stripPointerCasts(),
    533             DL.getTypeStoreSize(SI->getOperand(0)->getType()),
    534             std::min(storeAlign, loadAlign), C);
    535         if (changed) {
    536           MD->removeInstruction(SI);
    537           SI->eraseFromParent();
    538           MD->removeInstruction(LI);
    539           LI->eraseFromParent();
    540           ++NumMemCpyInstr;
    541           return true;
    542         }
    543       }
    544     }
    545   }
    546 
    547   // There are two cases that are interesting for this code to handle: memcpy
    548   // and memset.  Right now we only handle memset.
    549 
    550   // Ensure that the value being stored is something that can be memset'able a
    551   // byte at a time like "0" or "-1" or any width, as well as things like
    552   // 0xA0A0A0A0 and 0.0.
    553   if (Value *ByteVal = isBytewiseValue(SI->getOperand(0)))
    554     if (Instruction *I = tryMergingIntoMemset(SI, SI->getPointerOperand(),
    555                                               ByteVal)) {
    556       BBI = I;  // Don't invalidate iterator.
    557       return true;
    558     }
    559 
    560   return false;
    561 }
    562 
    563 bool MemCpyOpt::processMemSet(MemSetInst *MSI, BasicBlock::iterator &BBI) {
    564   // See if there is another memset or store neighboring this memset which
    565   // allows us to widen out the memset to do a single larger store.
    566   if (isa<ConstantInt>(MSI->getLength()) && !MSI->isVolatile())
    567     if (Instruction *I = tryMergingIntoMemset(MSI, MSI->getDest(),
    568                                               MSI->getValue())) {
    569       BBI = I;  // Don't invalidate iterator.
    570       return true;
    571     }
    572   return false;
    573 }
    574 
    575 
    576 /// performCallSlotOptzn - takes a memcpy and a call that it depends on,
    577 /// and checks for the possibility of a call slot optimization by having
    578 /// the call write its result directly into the destination of the memcpy.
    579 bool MemCpyOpt::performCallSlotOptzn(Instruction *cpy,
    580                                      Value *cpyDest, Value *cpySrc,
    581                                      uint64_t cpyLen, unsigned cpyAlign,
    582                                      CallInst *C) {
    583   // The general transformation to keep in mind is
    584   //
    585   //   call @func(..., src, ...)
    586   //   memcpy(dest, src, ...)
    587   //
    588   // ->
    589   //
    590   //   memcpy(dest, src, ...)
    591   //   call @func(..., dest, ...)
    592   //
    593   // Since moving the memcpy is technically awkward, we additionally check that
    594   // src only holds uninitialized values at the moment of the call, meaning that
    595   // the memcpy can be discarded rather than moved.
    596 
    597   // Deliberately get the source and destination with bitcasts stripped away,
    598   // because we'll need to do type comparisons based on the underlying type.
    599   CallSite CS(C);
    600 
    601   // Require that src be an alloca.  This simplifies the reasoning considerably.
    602   AllocaInst *srcAlloca = dyn_cast<AllocaInst>(cpySrc);
    603   if (!srcAlloca)
    604     return false;
    605 
    606   ConstantInt *srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize());
    607   if (!srcArraySize)
    608     return false;
    609 
    610   const DataLayout &DL = cpy->getModule()->getDataLayout();
    611   uint64_t srcSize = DL.getTypeAllocSize(srcAlloca->getAllocatedType()) *
    612                      srcArraySize->getZExtValue();
    613 
    614   if (cpyLen < srcSize)
    615     return false;
    616 
    617   // Check that accessing the first srcSize bytes of dest will not cause a
    618   // trap.  Otherwise the transform is invalid since it might cause a trap
    619   // to occur earlier than it otherwise would.
    620   if (AllocaInst *A = dyn_cast<AllocaInst>(cpyDest)) {
    621     // The destination is an alloca.  Check it is larger than srcSize.
    622     ConstantInt *destArraySize = dyn_cast<ConstantInt>(A->getArraySize());
    623     if (!destArraySize)
    624       return false;
    625 
    626     uint64_t destSize = DL.getTypeAllocSize(A->getAllocatedType()) *
    627                         destArraySize->getZExtValue();
    628 
    629     if (destSize < srcSize)
    630       return false;
    631   } else if (Argument *A = dyn_cast<Argument>(cpyDest)) {
    632     if (A->getDereferenceableBytes() < srcSize) {
    633       // If the destination is an sret parameter then only accesses that are
    634       // outside of the returned struct type can trap.
    635       if (!A->hasStructRetAttr())
    636         return false;
    637 
    638       Type *StructTy = cast<PointerType>(A->getType())->getElementType();
    639       if (!StructTy->isSized()) {
    640         // The call may never return and hence the copy-instruction may never
    641         // be executed, and therefore it's not safe to say "the destination
    642         // has at least <cpyLen> bytes, as implied by the copy-instruction",
    643         return false;
    644       }
    645 
    646       uint64_t destSize = DL.getTypeAllocSize(StructTy);
    647       if (destSize < srcSize)
    648         return false;
    649     }
    650   } else {
    651     return false;
    652   }
    653 
    654   // Check that dest points to memory that is at least as aligned as src.
    655   unsigned srcAlign = srcAlloca->getAlignment();
    656   if (!srcAlign)
    657     srcAlign = DL.getABITypeAlignment(srcAlloca->getAllocatedType());
    658   bool isDestSufficientlyAligned = srcAlign <= cpyAlign;
    659   // If dest is not aligned enough and we can't increase its alignment then
    660   // bail out.
    661   if (!isDestSufficientlyAligned && !isa<AllocaInst>(cpyDest))
    662     return false;
    663 
    664   // Check that src is not accessed except via the call and the memcpy.  This
    665   // guarantees that it holds only undefined values when passed in (so the final
    666   // memcpy can be dropped), that it is not read or written between the call and
    667   // the memcpy, and that writing beyond the end of it is undefined.
    668   SmallVector<User*, 8> srcUseList(srcAlloca->user_begin(),
    669                                    srcAlloca->user_end());
    670   while (!srcUseList.empty()) {
    671     User *U = srcUseList.pop_back_val();
    672 
    673     if (isa<BitCastInst>(U) || isa<AddrSpaceCastInst>(U)) {
    674       for (User *UU : U->users())
    675         srcUseList.push_back(UU);
    676       continue;
    677     }
    678     if (GetElementPtrInst *G = dyn_cast<GetElementPtrInst>(U)) {
    679       if (!G->hasAllZeroIndices())
    680         return false;
    681 
    682       for (User *UU : U->users())
    683         srcUseList.push_back(UU);
    684       continue;
    685     }
    686     if (const IntrinsicInst *IT = dyn_cast<IntrinsicInst>(U))
    687       if (IT->getIntrinsicID() == Intrinsic::lifetime_start ||
    688           IT->getIntrinsicID() == Intrinsic::lifetime_end)
    689         continue;
    690 
    691     if (U != C && U != cpy)
    692       return false;
    693   }
    694 
    695   // Check that src isn't captured by the called function since the
    696   // transformation can cause aliasing issues in that case.
    697   for (unsigned i = 0, e = CS.arg_size(); i != e; ++i)
    698     if (CS.getArgument(i) == cpySrc && !CS.doesNotCapture(i))
    699       return false;
    700 
    701   // Since we're changing the parameter to the callsite, we need to make sure
    702   // that what would be the new parameter dominates the callsite.
    703   DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
    704   if (Instruction *cpyDestInst = dyn_cast<Instruction>(cpyDest))
    705     if (!DT.dominates(cpyDestInst, C))
    706       return false;
    707 
    708   // In addition to knowing that the call does not access src in some
    709   // unexpected manner, for example via a global, which we deduce from
    710   // the use analysis, we also need to know that it does not sneakily
    711   // access dest.  We rely on AA to figure this out for us.
    712   AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
    713   AliasAnalysis::ModRefResult MR = AA.getModRefInfo(C, cpyDest, srcSize);
    714   // If necessary, perform additional analysis.
    715   if (MR != AliasAnalysis::NoModRef)
    716     MR = AA.callCapturesBefore(C, cpyDest, srcSize, &DT);
    717   if (MR != AliasAnalysis::NoModRef)
    718     return false;
    719 
    720   // All the checks have passed, so do the transformation.
    721   bool changedArgument = false;
    722   for (unsigned i = 0; i < CS.arg_size(); ++i)
    723     if (CS.getArgument(i)->stripPointerCasts() == cpySrc) {
    724       Value *Dest = cpySrc->getType() == cpyDest->getType() ?  cpyDest
    725         : CastInst::CreatePointerCast(cpyDest, cpySrc->getType(),
    726                                       cpyDest->getName(), C);
    727       changedArgument = true;
    728       if (CS.getArgument(i)->getType() == Dest->getType())
    729         CS.setArgument(i, Dest);
    730       else
    731         CS.setArgument(i, CastInst::CreatePointerCast(Dest,
    732                           CS.getArgument(i)->getType(), Dest->getName(), C));
    733     }
    734 
    735   if (!changedArgument)
    736     return false;
    737 
    738   // If the destination wasn't sufficiently aligned then increase its alignment.
    739   if (!isDestSufficientlyAligned) {
    740     assert(isa<AllocaInst>(cpyDest) && "Can only increase alloca alignment!");
    741     cast<AllocaInst>(cpyDest)->setAlignment(srcAlign);
    742   }
    743 
    744   // Drop any cached information about the call, because we may have changed
    745   // its dependence information by changing its parameter.
    746   MD->removeInstruction(C);
    747 
    748   // Update AA metadata
    749   // FIXME: MD_tbaa_struct and MD_mem_parallel_loop_access should also be
    750   // handled here, but combineMetadata doesn't support them yet
    751   unsigned KnownIDs[] = {
    752     LLVMContext::MD_tbaa,
    753     LLVMContext::MD_alias_scope,
    754     LLVMContext::MD_noalias,
    755   };
    756   combineMetadata(C, cpy, KnownIDs);
    757 
    758   // Remove the memcpy.
    759   MD->removeInstruction(cpy);
    760   ++NumMemCpyInstr;
    761 
    762   return true;
    763 }
    764 
    765 /// processMemCpyMemCpyDependence - We've found that the (upward scanning)
    766 /// memory dependence of memcpy 'M' is the memcpy 'MDep'.  Try to simplify M to
    767 /// copy from MDep's input if we can.  MSize is the size of M's copy.
    768 ///
    769 bool MemCpyOpt::processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep,
    770                                               uint64_t MSize) {
    771   // We can only transforms memcpy's where the dest of one is the source of the
    772   // other.
    773   if (M->getSource() != MDep->getDest() || MDep->isVolatile())
    774     return false;
    775 
    776   // If dep instruction is reading from our current input, then it is a noop
    777   // transfer and substituting the input won't change this instruction.  Just
    778   // ignore the input and let someone else zap MDep.  This handles cases like:
    779   //    memcpy(a <- a)
    780   //    memcpy(b <- a)
    781   if (M->getSource() == MDep->getSource())
    782     return false;
    783 
    784   // Second, the length of the memcpy's must be the same, or the preceding one
    785   // must be larger than the following one.
    786   ConstantInt *MDepLen = dyn_cast<ConstantInt>(MDep->getLength());
    787   ConstantInt *MLen = dyn_cast<ConstantInt>(M->getLength());
    788   if (!MDepLen || !MLen || MDepLen->getZExtValue() < MLen->getZExtValue())
    789     return false;
    790 
    791   AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
    792 
    793   // Verify that the copied-from memory doesn't change in between the two
    794   // transfers.  For example, in:
    795   //    memcpy(a <- b)
    796   //    *b = 42;
    797   //    memcpy(c <- a)
    798   // It would be invalid to transform the second memcpy into memcpy(c <- b).
    799   //
    800   // TODO: If the code between M and MDep is transparent to the destination "c",
    801   // then we could still perform the xform by moving M up to the first memcpy.
    802   //
    803   // NOTE: This is conservative, it will stop on any read from the source loc,
    804   // not just the defining memcpy.
    805   MemDepResult SourceDep =
    806     MD->getPointerDependencyFrom(AA.getLocationForSource(MDep),
    807                                  false, M, M->getParent());
    808   if (!SourceDep.isClobber() || SourceDep.getInst() != MDep)
    809     return false;
    810 
    811   // If the dest of the second might alias the source of the first, then the
    812   // source and dest might overlap.  We still want to eliminate the intermediate
    813   // value, but we have to generate a memmove instead of memcpy.
    814   bool UseMemMove = false;
    815   if (!AA.isNoAlias(AA.getLocationForDest(M), AA.getLocationForSource(MDep)))
    816     UseMemMove = true;
    817 
    818   // If all checks passed, then we can transform M.
    819 
    820   // Make sure to use the lesser of the alignment of the source and the dest
    821   // since we're changing where we're reading from, but don't want to increase
    822   // the alignment past what can be read from or written to.
    823   // TODO: Is this worth it if we're creating a less aligned memcpy? For
    824   // example we could be moving from movaps -> movq on x86.
    825   unsigned Align = std::min(MDep->getAlignment(), M->getAlignment());
    826 
    827   IRBuilder<> Builder(M);
    828   if (UseMemMove)
    829     Builder.CreateMemMove(M->getRawDest(), MDep->getRawSource(), M->getLength(),
    830                           Align, M->isVolatile());
    831   else
    832     Builder.CreateMemCpy(M->getRawDest(), MDep->getRawSource(), M->getLength(),
    833                          Align, M->isVolatile());
    834 
    835   // Remove the instruction we're replacing.
    836   MD->removeInstruction(M);
    837   M->eraseFromParent();
    838   ++NumMemCpyInstr;
    839   return true;
    840 }
    841 
    842 
    843 /// processMemCpy - perform simplification of memcpy's.  If we have memcpy A
    844 /// which copies X to Y, and memcpy B which copies Y to Z, then we can rewrite
    845 /// B to be a memcpy from X to Z (or potentially a memmove, depending on
    846 /// circumstances). This allows later passes to remove the first memcpy
    847 /// altogether.
    848 bool MemCpyOpt::processMemCpy(MemCpyInst *M) {
    849   // We can only optimize non-volatile memcpy's.
    850   if (M->isVolatile()) return false;
    851 
    852   // If the source and destination of the memcpy are the same, then zap it.
    853   if (M->getSource() == M->getDest()) {
    854     MD->removeInstruction(M);
    855     M->eraseFromParent();
    856     return false;
    857   }
    858 
    859   // If copying from a constant, try to turn the memcpy into a memset.
    860   if (GlobalVariable *GV = dyn_cast<GlobalVariable>(M->getSource()))
    861     if (GV->isConstant() && GV->hasDefinitiveInitializer())
    862       if (Value *ByteVal = isBytewiseValue(GV->getInitializer())) {
    863         IRBuilder<> Builder(M);
    864         Builder.CreateMemSet(M->getRawDest(), ByteVal, M->getLength(),
    865                              M->getAlignment(), false);
    866         MD->removeInstruction(M);
    867         M->eraseFromParent();
    868         ++NumCpyToSet;
    869         return true;
    870       }
    871 
    872   // The optimizations after this point require the memcpy size.
    873   ConstantInt *CopySize = dyn_cast<ConstantInt>(M->getLength());
    874   if (!CopySize) return false;
    875 
    876   // The are three possible optimizations we can do for memcpy:
    877   //   a) memcpy-memcpy xform which exposes redundance for DSE.
    878   //   b) call-memcpy xform for return slot optimization.
    879   //   c) memcpy from freshly alloca'd space or space that has just started its
    880   //      lifetime copies undefined data, and we can therefore eliminate the
    881   //      memcpy in favor of the data that was already at the destination.
    882   MemDepResult DepInfo = MD->getDependency(M);
    883   if (DepInfo.isClobber()) {
    884     if (CallInst *C = dyn_cast<CallInst>(DepInfo.getInst())) {
    885       if (performCallSlotOptzn(M, M->getDest(), M->getSource(),
    886                                CopySize->getZExtValue(), M->getAlignment(),
    887                                C)) {
    888         MD->removeInstruction(M);
    889         M->eraseFromParent();
    890         return true;
    891       }
    892     }
    893   }
    894 
    895   AliasAnalysis::Location SrcLoc = AliasAnalysis::getLocationForSource(M);
    896   MemDepResult SrcDepInfo = MD->getPointerDependencyFrom(SrcLoc, true,
    897                                                          M, M->getParent());
    898   if (SrcDepInfo.isClobber()) {
    899     if (MemCpyInst *MDep = dyn_cast<MemCpyInst>(SrcDepInfo.getInst()))
    900       return processMemCpyMemCpyDependence(M, MDep, CopySize->getZExtValue());
    901   } else if (SrcDepInfo.isDef()) {
    902     Instruction *I = SrcDepInfo.getInst();
    903     bool hasUndefContents = false;
    904 
    905     if (isa<AllocaInst>(I)) {
    906       hasUndefContents = true;
    907     } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
    908       if (II->getIntrinsicID() == Intrinsic::lifetime_start)
    909         if (ConstantInt *LTSize = dyn_cast<ConstantInt>(II->getArgOperand(0)))
    910           if (LTSize->getZExtValue() >= CopySize->getZExtValue())
    911             hasUndefContents = true;
    912     }
    913 
    914     if (hasUndefContents) {
    915       MD->removeInstruction(M);
    916       M->eraseFromParent();
    917       ++NumMemCpyInstr;
    918       return true;
    919     }
    920   }
    921 
    922   return false;
    923 }
    924 
    925 /// processMemMove - Transforms memmove calls to memcpy calls when the src/dst
    926 /// are guaranteed not to alias.
    927 bool MemCpyOpt::processMemMove(MemMoveInst *M) {
    928   AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
    929 
    930   if (!TLI->has(LibFunc::memmove))
    931     return false;
    932 
    933   // See if the pointers alias.
    934   if (!AA.isNoAlias(AA.getLocationForDest(M), AA.getLocationForSource(M)))
    935     return false;
    936 
    937   DEBUG(dbgs() << "MemCpyOpt: Optimizing memmove -> memcpy: " << *M << "\n");
    938 
    939   // If not, then we know we can transform this.
    940   Module *Mod = M->getParent()->getParent()->getParent();
    941   Type *ArgTys[3] = { M->getRawDest()->getType(),
    942                       M->getRawSource()->getType(),
    943                       M->getLength()->getType() };
    944   M->setCalledFunction(Intrinsic::getDeclaration(Mod, Intrinsic::memcpy,
    945                                                  ArgTys));
    946 
    947   // MemDep may have over conservative information about this instruction, just
    948   // conservatively flush it from the cache.
    949   MD->removeInstruction(M);
    950 
    951   ++NumMoveToCpy;
    952   return true;
    953 }
    954 
    955 /// processByValArgument - This is called on every byval argument in call sites.
    956 bool MemCpyOpt::processByValArgument(CallSite CS, unsigned ArgNo) {
    957   const DataLayout &DL = CS.getCaller()->getParent()->getDataLayout();
    958   // Find out what feeds this byval argument.
    959   Value *ByValArg = CS.getArgument(ArgNo);
    960   Type *ByValTy = cast<PointerType>(ByValArg->getType())->getElementType();
    961   uint64_t ByValSize = DL.getTypeAllocSize(ByValTy);
    962   MemDepResult DepInfo =
    963     MD->getPointerDependencyFrom(AliasAnalysis::Location(ByValArg, ByValSize),
    964                                  true, CS.getInstruction(),
    965                                  CS.getInstruction()->getParent());
    966   if (!DepInfo.isClobber())
    967     return false;
    968 
    969   // If the byval argument isn't fed by a memcpy, ignore it.  If it is fed by
    970   // a memcpy, see if we can byval from the source of the memcpy instead of the
    971   // result.
    972   MemCpyInst *MDep = dyn_cast<MemCpyInst>(DepInfo.getInst());
    973   if (!MDep || MDep->isVolatile() ||
    974       ByValArg->stripPointerCasts() != MDep->getDest())
    975     return false;
    976 
    977   // The length of the memcpy must be larger or equal to the size of the byval.
    978   ConstantInt *C1 = dyn_cast<ConstantInt>(MDep->getLength());
    979   if (!C1 || C1->getValue().getZExtValue() < ByValSize)
    980     return false;
    981 
    982   // Get the alignment of the byval.  If the call doesn't specify the alignment,
    983   // then it is some target specific value that we can't know.
    984   unsigned ByValAlign = CS.getParamAlignment(ArgNo+1);
    985   if (ByValAlign == 0) return false;
    986 
    987   // If it is greater than the memcpy, then we check to see if we can force the
    988   // source of the memcpy to the alignment we need.  If we fail, we bail out.
    989   AssumptionCache &AC =
    990       getAnalysis<AssumptionCacheTracker>().getAssumptionCache(
    991           *CS->getParent()->getParent());
    992   DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
    993   if (MDep->getAlignment() < ByValAlign &&
    994       getOrEnforceKnownAlignment(MDep->getSource(), ByValAlign, DL,
    995                                  CS.getInstruction(), &AC, &DT) < ByValAlign)
    996     return false;
    997 
    998   // Verify that the copied-from memory doesn't change in between the memcpy and
    999   // the byval call.
   1000   //    memcpy(a <- b)
   1001   //    *b = 42;
   1002   //    foo(*a)
   1003   // It would be invalid to transform the second memcpy into foo(*b).
   1004   //
   1005   // NOTE: This is conservative, it will stop on any read from the source loc,
   1006   // not just the defining memcpy.
   1007   MemDepResult SourceDep =
   1008     MD->getPointerDependencyFrom(AliasAnalysis::getLocationForSource(MDep),
   1009                                  false, CS.getInstruction(), MDep->getParent());
   1010   if (!SourceDep.isClobber() || SourceDep.getInst() != MDep)
   1011     return false;
   1012 
   1013   Value *TmpCast = MDep->getSource();
   1014   if (MDep->getSource()->getType() != ByValArg->getType())
   1015     TmpCast = new BitCastInst(MDep->getSource(), ByValArg->getType(),
   1016                               "tmpcast", CS.getInstruction());
   1017 
   1018   DEBUG(dbgs() << "MemCpyOpt: Forwarding memcpy to byval:\n"
   1019                << "  " << *MDep << "\n"
   1020                << "  " << *CS.getInstruction() << "\n");
   1021 
   1022   // Otherwise we're good!  Update the byval argument.
   1023   CS.setArgument(ArgNo, TmpCast);
   1024   ++NumMemCpyInstr;
   1025   return true;
   1026 }
   1027 
   1028 /// iterateOnFunction - Executes one iteration of MemCpyOpt.
   1029 bool MemCpyOpt::iterateOnFunction(Function &F) {
   1030   bool MadeChange = false;
   1031 
   1032   // Walk all instruction in the function.
   1033   for (Function::iterator BB = F.begin(), BBE = F.end(); BB != BBE; ++BB) {
   1034     for (BasicBlock::iterator BI = BB->begin(), BE = BB->end(); BI != BE;) {
   1035       // Avoid invalidating the iterator.
   1036       Instruction *I = BI++;
   1037 
   1038       bool RepeatInstruction = false;
   1039 
   1040       if (StoreInst *SI = dyn_cast<StoreInst>(I))
   1041         MadeChange |= processStore(SI, BI);
   1042       else if (MemSetInst *M = dyn_cast<MemSetInst>(I))
   1043         RepeatInstruction = processMemSet(M, BI);
   1044       else if (MemCpyInst *M = dyn_cast<MemCpyInst>(I))
   1045         RepeatInstruction = processMemCpy(M);
   1046       else if (MemMoveInst *M = dyn_cast<MemMoveInst>(I))
   1047         RepeatInstruction = processMemMove(M);
   1048       else if (auto CS = CallSite(I)) {
   1049         for (unsigned i = 0, e = CS.arg_size(); i != e; ++i)
   1050           if (CS.isByValArgument(i))
   1051             MadeChange |= processByValArgument(CS, i);
   1052       }
   1053 
   1054       // Reprocess the instruction if desired.
   1055       if (RepeatInstruction) {
   1056         if (BI != BB->begin()) --BI;
   1057         MadeChange = true;
   1058       }
   1059     }
   1060   }
   1061 
   1062   return MadeChange;
   1063 }
   1064 
   1065 // MemCpyOpt::runOnFunction - This is the main transformation entry point for a
   1066 // function.
   1067 //
   1068 bool MemCpyOpt::runOnFunction(Function &F) {
   1069   if (skipOptnoneFunction(F))
   1070     return false;
   1071 
   1072   bool MadeChange = false;
   1073   MD = &getAnalysis<MemoryDependenceAnalysis>();
   1074   TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
   1075 
   1076   // If we don't have at least memset and memcpy, there is little point of doing
   1077   // anything here.  These are required by a freestanding implementation, so if
   1078   // even they are disabled, there is no point in trying hard.
   1079   if (!TLI->has(LibFunc::memset) || !TLI->has(LibFunc::memcpy))
   1080     return false;
   1081 
   1082   while (1) {
   1083     if (!iterateOnFunction(F))
   1084       break;
   1085     MadeChange = true;
   1086   }
   1087 
   1088   MD = nullptr;
   1089   return MadeChange;
   1090 }
   1091