Home | History | Annotate | Download | only in InstCombine
      1 //===- InstCombineLoadStoreAlloca.cpp -------------------------------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file implements the visit functions for load, store and alloca.
     11 //
     12 //===----------------------------------------------------------------------===//
     13 
     14 #include "InstCombineInternal.h"
     15 #include "llvm/ADT/SmallString.h"
     16 #include "llvm/ADT/Statistic.h"
     17 #include "llvm/Analysis/Loads.h"
     18 #include "llvm/IR/DataLayout.h"
     19 #include "llvm/IR/LLVMContext.h"
     20 #include "llvm/IR/IntrinsicInst.h"
     21 #include "llvm/IR/MDBuilder.h"
     22 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
     23 #include "llvm/Transforms/Utils/Local.h"
     24 using namespace llvm;
     25 
     26 #define DEBUG_TYPE "instcombine"
     27 
     28 STATISTIC(NumDeadStore,    "Number of dead stores eliminated");
     29 STATISTIC(NumGlobalCopies, "Number of allocas copied from constant global");
     30 
     31 /// pointsToConstantGlobal - Return true if V (possibly indirectly) points to
     32 /// some part of a constant global variable.  This intentionally only accepts
     33 /// constant expressions because we can't rewrite arbitrary instructions.
     34 static bool pointsToConstantGlobal(Value *V) {
     35   if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
     36     return GV->isConstant();
     37 
     38   if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
     39     if (CE->getOpcode() == Instruction::BitCast ||
     40         CE->getOpcode() == Instruction::AddrSpaceCast ||
     41         CE->getOpcode() == Instruction::GetElementPtr)
     42       return pointsToConstantGlobal(CE->getOperand(0));
     43   }
     44   return false;
     45 }
     46 
     47 /// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived)
     48 /// pointer to an alloca.  Ignore any reads of the pointer, return false if we
     49 /// see any stores or other unknown uses.  If we see pointer arithmetic, keep
     50 /// track of whether it moves the pointer (with IsOffset) but otherwise traverse
     51 /// the uses.  If we see a memcpy/memmove that targets an unoffseted pointer to
     52 /// the alloca, and if the source pointer is a pointer to a constant global, we
     53 /// can optimize this.
     54 static bool
     55 isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy,
     56                                SmallVectorImpl<Instruction *> &ToDelete) {
     57   // We track lifetime intrinsics as we encounter them.  If we decide to go
     58   // ahead and replace the value with the global, this lets the caller quickly
     59   // eliminate the markers.
     60 
     61   SmallVector<std::pair<Value *, bool>, 35> ValuesToInspect;
     62   ValuesToInspect.push_back(std::make_pair(V, false));
     63   while (!ValuesToInspect.empty()) {
     64     auto ValuePair = ValuesToInspect.pop_back_val();
     65     const bool IsOffset = ValuePair.second;
     66     for (auto &U : ValuePair.first->uses()) {
     67       Instruction *I = cast<Instruction>(U.getUser());
     68 
     69       if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
     70         // Ignore non-volatile loads, they are always ok.
     71         if (!LI->isSimple()) return false;
     72         continue;
     73       }
     74 
     75       if (isa<BitCastInst>(I) || isa<AddrSpaceCastInst>(I)) {
     76         // If uses of the bitcast are ok, we are ok.
     77         ValuesToInspect.push_back(std::make_pair(I, IsOffset));
     78         continue;
     79       }
     80       if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
     81         // If the GEP has all zero indices, it doesn't offset the pointer. If it
     82         // doesn't, it does.
     83         ValuesToInspect.push_back(
     84             std::make_pair(I, IsOffset || !GEP->hasAllZeroIndices()));
     85         continue;
     86       }
     87 
     88       if (auto CS = CallSite(I)) {
     89         // If this is the function being called then we treat it like a load and
     90         // ignore it.
     91         if (CS.isCallee(&U))
     92           continue;
     93 
     94         // Inalloca arguments are clobbered by the call.
     95         unsigned ArgNo = CS.getArgumentNo(&U);
     96         if (CS.isInAllocaArgument(ArgNo))
     97           return false;
     98 
     99         // If this is a readonly/readnone call site, then we know it is just a
    100         // load (but one that potentially returns the value itself), so we can
    101         // ignore it if we know that the value isn't captured.
    102         if (CS.onlyReadsMemory() &&
    103             (CS.getInstruction()->use_empty() || CS.doesNotCapture(ArgNo)))
    104           continue;
    105 
    106         // If this is being passed as a byval argument, the caller is making a
    107         // copy, so it is only a read of the alloca.
    108         if (CS.isByValArgument(ArgNo))
    109           continue;
    110       }
    111 
    112       // Lifetime intrinsics can be handled by the caller.
    113       if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
    114         if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
    115             II->getIntrinsicID() == Intrinsic::lifetime_end) {
    116           assert(II->use_empty() && "Lifetime markers have no result to use!");
    117           ToDelete.push_back(II);
    118           continue;
    119         }
    120       }
    121 
    122       // If this is isn't our memcpy/memmove, reject it as something we can't
    123       // handle.
    124       MemTransferInst *MI = dyn_cast<MemTransferInst>(I);
    125       if (!MI)
    126         return false;
    127 
    128       // If the transfer is using the alloca as a source of the transfer, then
    129       // ignore it since it is a load (unless the transfer is volatile).
    130       if (U.getOperandNo() == 1) {
    131         if (MI->isVolatile()) return false;
    132         continue;
    133       }
    134 
    135       // If we already have seen a copy, reject the second one.
    136       if (TheCopy) return false;
    137 
    138       // If the pointer has been offset from the start of the alloca, we can't
    139       // safely handle this.
    140       if (IsOffset) return false;
    141 
    142       // If the memintrinsic isn't using the alloca as the dest, reject it.
    143       if (U.getOperandNo() != 0) return false;
    144 
    145       // If the source of the memcpy/move is not a constant global, reject it.
    146       if (!pointsToConstantGlobal(MI->getSource()))
    147         return false;
    148 
    149       // Otherwise, the transform is safe.  Remember the copy instruction.
    150       TheCopy = MI;
    151     }
    152   }
    153   return true;
    154 }
    155 
    156 /// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only
    157 /// modified by a copy from a constant global.  If we can prove this, we can
    158 /// replace any uses of the alloca with uses of the global directly.
    159 static MemTransferInst *
    160 isOnlyCopiedFromConstantGlobal(AllocaInst *AI,
    161                                SmallVectorImpl<Instruction *> &ToDelete) {
    162   MemTransferInst *TheCopy = nullptr;
    163   if (isOnlyCopiedFromConstantGlobal(AI, TheCopy, ToDelete))
    164     return TheCopy;
    165   return nullptr;
    166 }
    167 
    168 static Instruction *simplifyAllocaArraySize(InstCombiner &IC, AllocaInst &AI) {
    169   // Check for array size of 1 (scalar allocation).
    170   if (!AI.isArrayAllocation()) {
    171     // i32 1 is the canonical array size for scalar allocations.
    172     if (AI.getArraySize()->getType()->isIntegerTy(32))
    173       return nullptr;
    174 
    175     // Canonicalize it.
    176     Value *V = IC.Builder->getInt32(1);
    177     AI.setOperand(0, V);
    178     return &AI;
    179   }
    180 
    181   // Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
    182   if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
    183     Type *NewTy = ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
    184     AllocaInst *New = IC.Builder->CreateAlloca(NewTy, nullptr, AI.getName());
    185     New->setAlignment(AI.getAlignment());
    186 
    187     // Scan to the end of the allocation instructions, to skip over a block of
    188     // allocas if possible...also skip interleaved debug info
    189     //
    190     BasicBlock::iterator It(New);
    191     while (isa<AllocaInst>(*It) || isa<DbgInfoIntrinsic>(*It))
    192       ++It;
    193 
    194     // Now that I is pointing to the first non-allocation-inst in the block,
    195     // insert our getelementptr instruction...
    196     //
    197     Type *IdxTy = IC.getDataLayout().getIntPtrType(AI.getType());
    198     Value *NullIdx = Constant::getNullValue(IdxTy);
    199     Value *Idx[2] = {NullIdx, NullIdx};
    200     Instruction *GEP =
    201         GetElementPtrInst::CreateInBounds(New, Idx, New->getName() + ".sub");
    202     IC.InsertNewInstBefore(GEP, *It);
    203 
    204     // Now make everything use the getelementptr instead of the original
    205     // allocation.
    206     return IC.ReplaceInstUsesWith(AI, GEP);
    207   }
    208 
    209   if (isa<UndefValue>(AI.getArraySize()))
    210     return IC.ReplaceInstUsesWith(AI, Constant::getNullValue(AI.getType()));
    211 
    212   // Ensure that the alloca array size argument has type intptr_t, so that
    213   // any casting is exposed early.
    214   Type *IntPtrTy = IC.getDataLayout().getIntPtrType(AI.getType());
    215   if (AI.getArraySize()->getType() != IntPtrTy) {
    216     Value *V = IC.Builder->CreateIntCast(AI.getArraySize(), IntPtrTy, false);
    217     AI.setOperand(0, V);
    218     return &AI;
    219   }
    220 
    221   return nullptr;
    222 }
    223 
    224 Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
    225   if (auto *I = simplifyAllocaArraySize(*this, AI))
    226     return I;
    227 
    228   if (AI.getAllocatedType()->isSized()) {
    229     // If the alignment is 0 (unspecified), assign it the preferred alignment.
    230     if (AI.getAlignment() == 0)
    231       AI.setAlignment(DL.getPrefTypeAlignment(AI.getAllocatedType()));
    232 
    233     // Move all alloca's of zero byte objects to the entry block and merge them
    234     // together.  Note that we only do this for alloca's, because malloc should
    235     // allocate and return a unique pointer, even for a zero byte allocation.
    236     if (DL.getTypeAllocSize(AI.getAllocatedType()) == 0) {
    237       // For a zero sized alloca there is no point in doing an array allocation.
    238       // This is helpful if the array size is a complicated expression not used
    239       // elsewhere.
    240       if (AI.isArrayAllocation()) {
    241         AI.setOperand(0, ConstantInt::get(AI.getArraySize()->getType(), 1));
    242         return &AI;
    243       }
    244 
    245       // Get the first instruction in the entry block.
    246       BasicBlock &EntryBlock = AI.getParent()->getParent()->getEntryBlock();
    247       Instruction *FirstInst = EntryBlock.getFirstNonPHIOrDbg();
    248       if (FirstInst != &AI) {
    249         // If the entry block doesn't start with a zero-size alloca then move
    250         // this one to the start of the entry block.  There is no problem with
    251         // dominance as the array size was forced to a constant earlier already.
    252         AllocaInst *EntryAI = dyn_cast<AllocaInst>(FirstInst);
    253         if (!EntryAI || !EntryAI->getAllocatedType()->isSized() ||
    254             DL.getTypeAllocSize(EntryAI->getAllocatedType()) != 0) {
    255           AI.moveBefore(FirstInst);
    256           return &AI;
    257         }
    258 
    259         // If the alignment of the entry block alloca is 0 (unspecified),
    260         // assign it the preferred alignment.
    261         if (EntryAI->getAlignment() == 0)
    262           EntryAI->setAlignment(
    263               DL.getPrefTypeAlignment(EntryAI->getAllocatedType()));
    264         // Replace this zero-sized alloca with the one at the start of the entry
    265         // block after ensuring that the address will be aligned enough for both
    266         // types.
    267         unsigned MaxAlign = std::max(EntryAI->getAlignment(),
    268                                      AI.getAlignment());
    269         EntryAI->setAlignment(MaxAlign);
    270         if (AI.getType() != EntryAI->getType())
    271           return new BitCastInst(EntryAI, AI.getType());
    272         return ReplaceInstUsesWith(AI, EntryAI);
    273       }
    274     }
    275   }
    276 
    277   if (AI.getAlignment()) {
    278     // Check to see if this allocation is only modified by a memcpy/memmove from
    279     // a constant global whose alignment is equal to or exceeds that of the
    280     // allocation.  If this is the case, we can change all users to use
    281     // the constant global instead.  This is commonly produced by the CFE by
    282     // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A'
    283     // is only subsequently read.
    284     SmallVector<Instruction *, 4> ToDelete;
    285     if (MemTransferInst *Copy = isOnlyCopiedFromConstantGlobal(&AI, ToDelete)) {
    286       unsigned SourceAlign = getOrEnforceKnownAlignment(
    287           Copy->getSource(), AI.getAlignment(), DL, &AI, AC, DT);
    288       if (AI.getAlignment() <= SourceAlign) {
    289         DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n');
    290         DEBUG(dbgs() << "  memcpy = " << *Copy << '\n');
    291         for (unsigned i = 0, e = ToDelete.size(); i != e; ++i)
    292           EraseInstFromFunction(*ToDelete[i]);
    293         Constant *TheSrc = cast<Constant>(Copy->getSource());
    294         Constant *Cast
    295           = ConstantExpr::getPointerBitCastOrAddrSpaceCast(TheSrc, AI.getType());
    296         Instruction *NewI = ReplaceInstUsesWith(AI, Cast);
    297         EraseInstFromFunction(*Copy);
    298         ++NumGlobalCopies;
    299         return NewI;
    300       }
    301     }
    302   }
    303 
    304   // At last, use the generic allocation site handler to aggressively remove
    305   // unused allocas.
    306   return visitAllocSite(AI);
    307 }
    308 
    309 /// \brief Helper to combine a load to a new type.
    310 ///
    311 /// This just does the work of combining a load to a new type. It handles
    312 /// metadata, etc., and returns the new instruction. The \c NewTy should be the
    313 /// loaded *value* type. This will convert it to a pointer, cast the operand to
    314 /// that pointer type, load it, etc.
    315 ///
    316 /// Note that this will create all of the instructions with whatever insert
    317 /// point the \c InstCombiner currently is using.
    318 static LoadInst *combineLoadToNewType(InstCombiner &IC, LoadInst &LI, Type *NewTy,
    319                                       const Twine &Suffix = "") {
    320   Value *Ptr = LI.getPointerOperand();
    321   unsigned AS = LI.getPointerAddressSpace();
    322   SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
    323   LI.getAllMetadata(MD);
    324 
    325   LoadInst *NewLoad = IC.Builder->CreateAlignedLoad(
    326       IC.Builder->CreateBitCast(Ptr, NewTy->getPointerTo(AS)),
    327       LI.getAlignment(), LI.getName() + Suffix);
    328   MDBuilder MDB(NewLoad->getContext());
    329   for (const auto &MDPair : MD) {
    330     unsigned ID = MDPair.first;
    331     MDNode *N = MDPair.second;
    332     // Note, essentially every kind of metadata should be preserved here! This
    333     // routine is supposed to clone a load instruction changing *only its type*.
    334     // The only metadata it makes sense to drop is metadata which is invalidated
    335     // when the pointer type changes. This should essentially never be the case
    336     // in LLVM, but we explicitly switch over only known metadata to be
    337     // conservatively correct. If you are adding metadata to LLVM which pertains
    338     // to loads, you almost certainly want to add it here.
    339     switch (ID) {
    340     case LLVMContext::MD_dbg:
    341     case LLVMContext::MD_tbaa:
    342     case LLVMContext::MD_prof:
    343     case LLVMContext::MD_fpmath:
    344     case LLVMContext::MD_tbaa_struct:
    345     case LLVMContext::MD_invariant_load:
    346     case LLVMContext::MD_alias_scope:
    347     case LLVMContext::MD_noalias:
    348     case LLVMContext::MD_nontemporal:
    349     case LLVMContext::MD_mem_parallel_loop_access:
    350       // All of these directly apply.
    351       NewLoad->setMetadata(ID, N);
    352       break;
    353 
    354     case LLVMContext::MD_nonnull:
    355       // This only directly applies if the new type is also a pointer.
    356       if (NewTy->isPointerTy()) {
    357         NewLoad->setMetadata(ID, N);
    358         break;
    359       }
    360       // If it's integral now, translate it to !range metadata.
    361       if (NewTy->isIntegerTy()) {
    362         auto *ITy = cast<IntegerType>(NewTy);
    363         auto *NullInt = ConstantExpr::getPtrToInt(
    364             ConstantPointerNull::get(cast<PointerType>(Ptr->getType())), ITy);
    365         auto *NonNullInt =
    366             ConstantExpr::getAdd(NullInt, ConstantInt::get(ITy, 1));
    367         NewLoad->setMetadata(LLVMContext::MD_range,
    368                              MDB.createRange(NonNullInt, NullInt));
    369       }
    370       break;
    371     case LLVMContext::MD_align:
    372     case LLVMContext::MD_dereferenceable:
    373     case LLVMContext::MD_dereferenceable_or_null:
    374       // These only directly apply if the new type is also a pointer.
    375       if (NewTy->isPointerTy())
    376         NewLoad->setMetadata(ID, N);
    377       break;
    378     case LLVMContext::MD_range:
    379       // FIXME: It would be nice to propagate this in some way, but the type
    380       // conversions make it hard. If the new type is a pointer, we could
    381       // translate it to !nonnull metadata.
    382       break;
    383     }
    384   }
    385   return NewLoad;
    386 }
    387 
    388 /// \brief Combine a store to a new type.
    389 ///
    390 /// Returns the newly created store instruction.
    391 static StoreInst *combineStoreToNewValue(InstCombiner &IC, StoreInst &SI, Value *V) {
    392   Value *Ptr = SI.getPointerOperand();
    393   unsigned AS = SI.getPointerAddressSpace();
    394   SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
    395   SI.getAllMetadata(MD);
    396 
    397   StoreInst *NewStore = IC.Builder->CreateAlignedStore(
    398       V, IC.Builder->CreateBitCast(Ptr, V->getType()->getPointerTo(AS)),
    399       SI.getAlignment());
    400   for (const auto &MDPair : MD) {
    401     unsigned ID = MDPair.first;
    402     MDNode *N = MDPair.second;
    403     // Note, essentially every kind of metadata should be preserved here! This
    404     // routine is supposed to clone a store instruction changing *only its
    405     // type*. The only metadata it makes sense to drop is metadata which is
    406     // invalidated when the pointer type changes. This should essentially
    407     // never be the case in LLVM, but we explicitly switch over only known
    408     // metadata to be conservatively correct. If you are adding metadata to
    409     // LLVM which pertains to stores, you almost certainly want to add it
    410     // here.
    411     switch (ID) {
    412     case LLVMContext::MD_dbg:
    413     case LLVMContext::MD_tbaa:
    414     case LLVMContext::MD_prof:
    415     case LLVMContext::MD_fpmath:
    416     case LLVMContext::MD_tbaa_struct:
    417     case LLVMContext::MD_alias_scope:
    418     case LLVMContext::MD_noalias:
    419     case LLVMContext::MD_nontemporal:
    420     case LLVMContext::MD_mem_parallel_loop_access:
    421       // All of these directly apply.
    422       NewStore->setMetadata(ID, N);
    423       break;
    424 
    425     case LLVMContext::MD_invariant_load:
    426     case LLVMContext::MD_nonnull:
    427     case LLVMContext::MD_range:
    428     case LLVMContext::MD_align:
    429     case LLVMContext::MD_dereferenceable:
    430     case LLVMContext::MD_dereferenceable_or_null:
    431       // These don't apply for stores.
    432       break;
    433     }
    434   }
    435 
    436   return NewStore;
    437 }
    438 
    439 /// \brief Combine loads to match the type of value their uses after looking
    440 /// through intervening bitcasts.
    441 ///
    442 /// The core idea here is that if the result of a load is used in an operation,
    443 /// we should load the type most conducive to that operation. For example, when
    444 /// loading an integer and converting that immediately to a pointer, we should
    445 /// instead directly load a pointer.
    446 ///
    447 /// However, this routine must never change the width of a load or the number of
    448 /// loads as that would introduce a semantic change. This combine is expected to
    449 /// be a semantic no-op which just allows loads to more closely model the types
    450 /// of their consuming operations.
    451 ///
    452 /// Currently, we also refuse to change the precise type used for an atomic load
    453 /// or a volatile load. This is debatable, and might be reasonable to change
    454 /// later. However, it is risky in case some backend or other part of LLVM is
    455 /// relying on the exact type loaded to select appropriate atomic operations.
    456 static Instruction *combineLoadToOperationType(InstCombiner &IC, LoadInst &LI) {
    457   // FIXME: We could probably with some care handle both volatile and atomic
    458   // loads here but it isn't clear that this is important.
    459   if (!LI.isSimple())
    460     return nullptr;
    461 
    462   if (LI.use_empty())
    463     return nullptr;
    464 
    465   Type *Ty = LI.getType();
    466   const DataLayout &DL = IC.getDataLayout();
    467 
    468   // Try to canonicalize loads which are only ever stored to operate over
    469   // integers instead of any other type. We only do this when the loaded type
    470   // is sized and has a size exactly the same as its store size and the store
    471   // size is a legal integer type.
    472   if (!Ty->isIntegerTy() && Ty->isSized() &&
    473       DL.isLegalInteger(DL.getTypeStoreSizeInBits(Ty)) &&
    474       DL.getTypeStoreSizeInBits(Ty) == DL.getTypeSizeInBits(Ty)) {
    475     if (std::all_of(LI.user_begin(), LI.user_end(), [&LI](User *U) {
    476           auto *SI = dyn_cast<StoreInst>(U);
    477           return SI && SI->getPointerOperand() != &LI;
    478         })) {
    479       LoadInst *NewLoad = combineLoadToNewType(
    480           IC, LI,
    481           Type::getIntNTy(LI.getContext(), DL.getTypeStoreSizeInBits(Ty)));
    482       // Replace all the stores with stores of the newly loaded value.
    483       for (auto UI = LI.user_begin(), UE = LI.user_end(); UI != UE;) {
    484         auto *SI = cast<StoreInst>(*UI++);
    485         IC.Builder->SetInsertPoint(SI);
    486         combineStoreToNewValue(IC, *SI, NewLoad);
    487         IC.EraseInstFromFunction(*SI);
    488       }
    489       assert(LI.use_empty() && "Failed to remove all users of the load!");
    490       // Return the old load so the combiner can delete it safely.
    491       return &LI;
    492     }
    493   }
    494 
    495   // Fold away bit casts of the loaded value by loading the desired type.
    496   // We can do this for BitCastInsts as well as casts from and to pointer types,
    497   // as long as those are noops (i.e., the source or dest type have the same
    498   // bitwidth as the target's pointers).
    499   if (LI.hasOneUse())
    500     if (auto* CI = dyn_cast<CastInst>(LI.user_back())) {
    501       if (CI->isNoopCast(DL)) {
    502         LoadInst *NewLoad = combineLoadToNewType(IC, LI, CI->getDestTy());
    503         CI->replaceAllUsesWith(NewLoad);
    504         IC.EraseInstFromFunction(*CI);
    505         return &LI;
    506       }
    507     }
    508 
    509   // FIXME: We should also canonicalize loads of vectors when their elements are
    510   // cast to other types.
    511   return nullptr;
    512 }
    513 
    514 static Instruction *unpackLoadToAggregate(InstCombiner &IC, LoadInst &LI) {
    515   // FIXME: We could probably with some care handle both volatile and atomic
    516   // stores here but it isn't clear that this is important.
    517   if (!LI.isSimple())
    518     return nullptr;
    519 
    520   Type *T = LI.getType();
    521   if (!T->isAggregateType())
    522     return nullptr;
    523 
    524   assert(LI.getAlignment() && "Alignment must be set at this point");
    525 
    526   if (auto *ST = dyn_cast<StructType>(T)) {
    527     // If the struct only have one element, we unpack.
    528     unsigned Count = ST->getNumElements();
    529     if (Count == 1) {
    530       LoadInst *NewLoad = combineLoadToNewType(IC, LI, ST->getTypeAtIndex(0U),
    531                                                ".unpack");
    532       return IC.ReplaceInstUsesWith(LI, IC.Builder->CreateInsertValue(
    533         UndefValue::get(T), NewLoad, 0, LI.getName()));
    534     }
    535 
    536     // We don't want to break loads with padding here as we'd loose
    537     // the knowledge that padding exists for the rest of the pipeline.
    538     const DataLayout &DL = IC.getDataLayout();
    539     auto *SL = DL.getStructLayout(ST);
    540     if (SL->hasPadding())
    541       return nullptr;
    542 
    543     auto Name = LI.getName();
    544     SmallString<16> LoadName = Name;
    545     LoadName += ".unpack";
    546     SmallString<16> EltName = Name;
    547     EltName += ".elt";
    548     auto *Addr = LI.getPointerOperand();
    549     Value *V = UndefValue::get(T);
    550     auto *IdxType = Type::getInt32Ty(ST->getContext());
    551     auto *Zero = ConstantInt::get(IdxType, 0);
    552     for (unsigned i = 0; i < Count; i++) {
    553       Value *Indices[2] = {
    554         Zero,
    555         ConstantInt::get(IdxType, i),
    556       };
    557       auto *Ptr = IC.Builder->CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices), EltName);
    558       auto *L = IC.Builder->CreateLoad(ST->getTypeAtIndex(i), Ptr, LoadName);
    559       V = IC.Builder->CreateInsertValue(V, L, i);
    560     }
    561 
    562     V->setName(Name);
    563     return IC.ReplaceInstUsesWith(LI, V);
    564   }
    565 
    566   if (auto *AT = dyn_cast<ArrayType>(T)) {
    567     // If the array only have one element, we unpack.
    568     if (AT->getNumElements() == 1) {
    569       LoadInst *NewLoad = combineLoadToNewType(IC, LI, AT->getElementType(),
    570                                                ".unpack");
    571       return IC.ReplaceInstUsesWith(LI, IC.Builder->CreateInsertValue(
    572         UndefValue::get(T), NewLoad, 0, LI.getName()));
    573     }
    574   }
    575 
    576   return nullptr;
    577 }
    578 
    579 // If we can determine that all possible objects pointed to by the provided
    580 // pointer value are, not only dereferenceable, but also definitively less than
    581 // or equal to the provided maximum size, then return true. Otherwise, return
    582 // false (constant global values and allocas fall into this category).
    583 //
    584 // FIXME: This should probably live in ValueTracking (or similar).
    585 static bool isObjectSizeLessThanOrEq(Value *V, uint64_t MaxSize,
    586                                      const DataLayout &DL) {
    587   SmallPtrSet<Value *, 4> Visited;
    588   SmallVector<Value *, 4> Worklist(1, V);
    589 
    590   do {
    591     Value *P = Worklist.pop_back_val();
    592     P = P->stripPointerCasts();
    593 
    594     if (!Visited.insert(P).second)
    595       continue;
    596 
    597     if (SelectInst *SI = dyn_cast<SelectInst>(P)) {
    598       Worklist.push_back(SI->getTrueValue());
    599       Worklist.push_back(SI->getFalseValue());
    600       continue;
    601     }
    602 
    603     if (PHINode *PN = dyn_cast<PHINode>(P)) {
    604       for (Value *IncValue : PN->incoming_values())
    605         Worklist.push_back(IncValue);
    606       continue;
    607     }
    608 
    609     if (GlobalAlias *GA = dyn_cast<GlobalAlias>(P)) {
    610       if (GA->mayBeOverridden())
    611         return false;
    612       Worklist.push_back(GA->getAliasee());
    613       continue;
    614     }
    615 
    616     // If we know how big this object is, and it is less than MaxSize, continue
    617     // searching. Otherwise, return false.
    618     if (AllocaInst *AI = dyn_cast<AllocaInst>(P)) {
    619       if (!AI->getAllocatedType()->isSized())
    620         return false;
    621 
    622       ConstantInt *CS = dyn_cast<ConstantInt>(AI->getArraySize());
    623       if (!CS)
    624         return false;
    625 
    626       uint64_t TypeSize = DL.getTypeAllocSize(AI->getAllocatedType());
    627       // Make sure that, even if the multiplication below would wrap as an
    628       // uint64_t, we still do the right thing.
    629       if ((CS->getValue().zextOrSelf(128)*APInt(128, TypeSize)).ugt(MaxSize))
    630         return false;
    631       continue;
    632     }
    633 
    634     if (GlobalVariable *GV = dyn_cast<GlobalVariable>(P)) {
    635       if (!GV->hasDefinitiveInitializer() || !GV->isConstant())
    636         return false;
    637 
    638       uint64_t InitSize = DL.getTypeAllocSize(GV->getType()->getElementType());
    639       if (InitSize > MaxSize)
    640         return false;
    641       continue;
    642     }
    643 
    644     return false;
    645   } while (!Worklist.empty());
    646 
    647   return true;
    648 }
    649 
    650 // If we're indexing into an object of a known size, and the outer index is
    651 // not a constant, but having any value but zero would lead to undefined
    652 // behavior, replace it with zero.
    653 //
    654 // For example, if we have:
    655 // @f.a = private unnamed_addr constant [1 x i32] [i32 12], align 4
    656 // ...
    657 // %arrayidx = getelementptr inbounds [1 x i32]* @f.a, i64 0, i64 %x
    658 // ... = load i32* %arrayidx, align 4
    659 // Then we know that we can replace %x in the GEP with i64 0.
    660 //
    661 // FIXME: We could fold any GEP index to zero that would cause UB if it were
    662 // not zero. Currently, we only handle the first such index. Also, we could
    663 // also search through non-zero constant indices if we kept track of the
    664 // offsets those indices implied.
    665 static bool canReplaceGEPIdxWithZero(InstCombiner &IC, GetElementPtrInst *GEPI,
    666                                      Instruction *MemI, unsigned &Idx) {
    667   if (GEPI->getNumOperands() < 2)
    668     return false;
    669 
    670   // Find the first non-zero index of a GEP. If all indices are zero, return
    671   // one past the last index.
    672   auto FirstNZIdx = [](const GetElementPtrInst *GEPI) {
    673     unsigned I = 1;
    674     for (unsigned IE = GEPI->getNumOperands(); I != IE; ++I) {
    675       Value *V = GEPI->getOperand(I);
    676       if (const ConstantInt *CI = dyn_cast<ConstantInt>(V))
    677         if (CI->isZero())
    678           continue;
    679 
    680       break;
    681     }
    682 
    683     return I;
    684   };
    685 
    686   // Skip through initial 'zero' indices, and find the corresponding pointer
    687   // type. See if the next index is not a constant.
    688   Idx = FirstNZIdx(GEPI);
    689   if (Idx == GEPI->getNumOperands())
    690     return false;
    691   if (isa<Constant>(GEPI->getOperand(Idx)))
    692     return false;
    693 
    694   SmallVector<Value *, 4> Ops(GEPI->idx_begin(), GEPI->idx_begin() + Idx);
    695   Type *AllocTy = GetElementPtrInst::getIndexedType(
    696       cast<PointerType>(GEPI->getOperand(0)->getType()->getScalarType())
    697           ->getElementType(),
    698       Ops);
    699   if (!AllocTy || !AllocTy->isSized())
    700     return false;
    701   const DataLayout &DL = IC.getDataLayout();
    702   uint64_t TyAllocSize = DL.getTypeAllocSize(AllocTy);
    703 
    704   // If there are more indices after the one we might replace with a zero, make
    705   // sure they're all non-negative. If any of them are negative, the overall
    706   // address being computed might be before the base address determined by the
    707   // first non-zero index.
    708   auto IsAllNonNegative = [&]() {
    709     for (unsigned i = Idx+1, e = GEPI->getNumOperands(); i != e; ++i) {
    710       bool KnownNonNegative, KnownNegative;
    711       IC.ComputeSignBit(GEPI->getOperand(i), KnownNonNegative,
    712                         KnownNegative, 0, MemI);
    713       if (KnownNonNegative)
    714         continue;
    715       return false;
    716     }
    717 
    718     return true;
    719   };
    720 
    721   // FIXME: If the GEP is not inbounds, and there are extra indices after the
    722   // one we'll replace, those could cause the address computation to wrap
    723   // (rendering the IsAllNonNegative() check below insufficient). We can do
    724   // better, ignoring zero indices (and other indices we can prove small
    725   // enough not to wrap).
    726   if (Idx+1 != GEPI->getNumOperands() && !GEPI->isInBounds())
    727     return false;
    728 
    729   // Note that isObjectSizeLessThanOrEq will return true only if the pointer is
    730   // also known to be dereferenceable.
    731   return isObjectSizeLessThanOrEq(GEPI->getOperand(0), TyAllocSize, DL) &&
    732          IsAllNonNegative();
    733 }
    734 
    735 // If we're indexing into an object with a variable index for the memory
    736 // access, but the object has only one element, we can assume that the index
    737 // will always be zero. If we replace the GEP, return it.
    738 template <typename T>
    739 static Instruction *replaceGEPIdxWithZero(InstCombiner &IC, Value *Ptr,
    740                                           T &MemI) {
    741   if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Ptr)) {
    742     unsigned Idx;
    743     if (canReplaceGEPIdxWithZero(IC, GEPI, &MemI, Idx)) {
    744       Instruction *NewGEPI = GEPI->clone();
    745       NewGEPI->setOperand(Idx,
    746         ConstantInt::get(GEPI->getOperand(Idx)->getType(), 0));
    747       NewGEPI->insertBefore(GEPI);
    748       MemI.setOperand(MemI.getPointerOperandIndex(), NewGEPI);
    749       return NewGEPI;
    750     }
    751   }
    752 
    753   return nullptr;
    754 }
    755 
    756 Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
    757   Value *Op = LI.getOperand(0);
    758 
    759   // Try to canonicalize the loaded type.
    760   if (Instruction *Res = combineLoadToOperationType(*this, LI))
    761     return Res;
    762 
    763   // Attempt to improve the alignment.
    764   unsigned KnownAlign = getOrEnforceKnownAlignment(
    765       Op, DL.getPrefTypeAlignment(LI.getType()), DL, &LI, AC, DT);
    766   unsigned LoadAlign = LI.getAlignment();
    767   unsigned EffectiveLoadAlign =
    768       LoadAlign != 0 ? LoadAlign : DL.getABITypeAlignment(LI.getType());
    769 
    770   if (KnownAlign > EffectiveLoadAlign)
    771     LI.setAlignment(KnownAlign);
    772   else if (LoadAlign == 0)
    773     LI.setAlignment(EffectiveLoadAlign);
    774 
    775   // Replace GEP indices if possible.
    776   if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Op, LI)) {
    777       Worklist.Add(NewGEPI);
    778       return &LI;
    779   }
    780 
    781   // None of the following transforms are legal for volatile/atomic loads.
    782   // FIXME: Some of it is okay for atomic loads; needs refactoring.
    783   if (!LI.isSimple()) return nullptr;
    784 
    785   if (Instruction *Res = unpackLoadToAggregate(*this, LI))
    786     return Res;
    787 
    788   // Do really simple store-to-load forwarding and load CSE, to catch cases
    789   // where there are several consecutive memory accesses to the same location,
    790   // separated by a few arithmetic operations.
    791   BasicBlock::iterator BBI(LI);
    792   AAMDNodes AATags;
    793   if (Value *AvailableVal =
    794       FindAvailableLoadedValue(Op, LI.getParent(), BBI,
    795                                DefMaxInstsToScan, AA, &AATags)) {
    796     if (LoadInst *NLI = dyn_cast<LoadInst>(AvailableVal)) {
    797       unsigned KnownIDs[] = {
    798           LLVMContext::MD_tbaa,            LLVMContext::MD_alias_scope,
    799           LLVMContext::MD_noalias,         LLVMContext::MD_range,
    800           LLVMContext::MD_invariant_load,  LLVMContext::MD_nonnull,
    801           LLVMContext::MD_invariant_group, LLVMContext::MD_align,
    802           LLVMContext::MD_dereferenceable,
    803           LLVMContext::MD_dereferenceable_or_null};
    804       combineMetadata(NLI, &LI, KnownIDs);
    805     };
    806 
    807     return ReplaceInstUsesWith(
    808         LI, Builder->CreateBitOrPointerCast(AvailableVal, LI.getType(),
    809                                             LI.getName() + ".cast"));
    810   }
    811 
    812   // load(gep null, ...) -> unreachable
    813   if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) {
    814     const Value *GEPI0 = GEPI->getOperand(0);
    815     // TODO: Consider a target hook for valid address spaces for this xform.
    816     if (isa<ConstantPointerNull>(GEPI0) && GEPI->getPointerAddressSpace() == 0){
    817       // Insert a new store to null instruction before the load to indicate
    818       // that this code is not reachable.  We do this instead of inserting
    819       // an unreachable instruction directly because we cannot modify the
    820       // CFG.
    821       new StoreInst(UndefValue::get(LI.getType()),
    822                     Constant::getNullValue(Op->getType()), &LI);
    823       return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType()));
    824     }
    825   }
    826 
    827   // load null/undef -> unreachable
    828   // TODO: Consider a target hook for valid address spaces for this xform.
    829   if (isa<UndefValue>(Op) ||
    830       (isa<ConstantPointerNull>(Op) && LI.getPointerAddressSpace() == 0)) {
    831     // Insert a new store to null instruction before the load to indicate that
    832     // this code is not reachable.  We do this instead of inserting an
    833     // unreachable instruction directly because we cannot modify the CFG.
    834     new StoreInst(UndefValue::get(LI.getType()),
    835                   Constant::getNullValue(Op->getType()), &LI);
    836     return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType()));
    837   }
    838 
    839   if (Op->hasOneUse()) {
    840     // Change select and PHI nodes to select values instead of addresses: this
    841     // helps alias analysis out a lot, allows many others simplifications, and
    842     // exposes redundancy in the code.
    843     //
    844     // Note that we cannot do the transformation unless we know that the
    845     // introduced loads cannot trap!  Something like this is valid as long as
    846     // the condition is always false: load (select bool %C, int* null, int* %G),
    847     // but it would not be valid if we transformed it to load from null
    848     // unconditionally.
    849     //
    850     if (SelectInst *SI = dyn_cast<SelectInst>(Op)) {
    851       // load (select (Cond, &V1, &V2))  --> select(Cond, load &V1, load &V2).
    852       unsigned Align = LI.getAlignment();
    853       if (isSafeToLoadUnconditionally(SI->getOperand(1), SI, Align) &&
    854           isSafeToLoadUnconditionally(SI->getOperand(2), SI, Align)) {
    855         LoadInst *V1 = Builder->CreateLoad(SI->getOperand(1),
    856                                            SI->getOperand(1)->getName()+".val");
    857         LoadInst *V2 = Builder->CreateLoad(SI->getOperand(2),
    858                                            SI->getOperand(2)->getName()+".val");
    859         V1->setAlignment(Align);
    860         V2->setAlignment(Align);
    861         return SelectInst::Create(SI->getCondition(), V1, V2);
    862       }
    863 
    864       // load (select (cond, null, P)) -> load P
    865       if (isa<ConstantPointerNull>(SI->getOperand(1)) &&
    866           LI.getPointerAddressSpace() == 0) {
    867         LI.setOperand(0, SI->getOperand(2));
    868         return &LI;
    869       }
    870 
    871       // load (select (cond, P, null)) -> load P
    872       if (isa<ConstantPointerNull>(SI->getOperand(2)) &&
    873           LI.getPointerAddressSpace() == 0) {
    874         LI.setOperand(0, SI->getOperand(1));
    875         return &LI;
    876       }
    877     }
    878   }
    879   return nullptr;
    880 }
    881 
    882 /// \brief Combine stores to match the type of value being stored.
    883 ///
    884 /// The core idea here is that the memory does not have any intrinsic type and
    885 /// where we can we should match the type of a store to the type of value being
    886 /// stored.
    887 ///
    888 /// However, this routine must never change the width of a store or the number of
    889 /// stores as that would introduce a semantic change. This combine is expected to
    890 /// be a semantic no-op which just allows stores to more closely model the types
    891 /// of their incoming values.
    892 ///
    893 /// Currently, we also refuse to change the precise type used for an atomic or
    894 /// volatile store. This is debatable, and might be reasonable to change later.
    895 /// However, it is risky in case some backend or other part of LLVM is relying
    896 /// on the exact type stored to select appropriate atomic operations.
    897 ///
    898 /// \returns true if the store was successfully combined away. This indicates
    899 /// the caller must erase the store instruction. We have to let the caller erase
    900 /// the store instruction as otherwise there is no way to signal whether it was
    901 /// combined or not: IC.EraseInstFromFunction returns a null pointer.
    902 static bool combineStoreToValueType(InstCombiner &IC, StoreInst &SI) {
    903   // FIXME: We could probably with some care handle both volatile and atomic
    904   // stores here but it isn't clear that this is important.
    905   if (!SI.isSimple())
    906     return false;
    907 
    908   Value *V = SI.getValueOperand();
    909 
    910   // Fold away bit casts of the stored value by storing the original type.
    911   if (auto *BC = dyn_cast<BitCastInst>(V)) {
    912     V = BC->getOperand(0);
    913     combineStoreToNewValue(IC, SI, V);
    914     return true;
    915   }
    916 
    917   // FIXME: We should also canonicalize loads of vectors when their elements are
    918   // cast to other types.
    919   return false;
    920 }
    921 
    922 static bool unpackStoreToAggregate(InstCombiner &IC, StoreInst &SI) {
    923   // FIXME: We could probably with some care handle both volatile and atomic
    924   // stores here but it isn't clear that this is important.
    925   if (!SI.isSimple())
    926     return false;
    927 
    928   Value *V = SI.getValueOperand();
    929   Type *T = V->getType();
    930 
    931   if (!T->isAggregateType())
    932     return false;
    933 
    934   if (auto *ST = dyn_cast<StructType>(T)) {
    935     // If the struct only have one element, we unpack.
    936     unsigned Count = ST->getNumElements();
    937     if (Count == 1) {
    938       V = IC.Builder->CreateExtractValue(V, 0);
    939       combineStoreToNewValue(IC, SI, V);
    940       return true;
    941     }
    942 
    943     // We don't want to break loads with padding here as we'd loose
    944     // the knowledge that padding exists for the rest of the pipeline.
    945     const DataLayout &DL = IC.getDataLayout();
    946     auto *SL = DL.getStructLayout(ST);
    947     if (SL->hasPadding())
    948       return false;
    949 
    950     SmallString<16> EltName = V->getName();
    951     EltName += ".elt";
    952     auto *Addr = SI.getPointerOperand();
    953     SmallString<16> AddrName = Addr->getName();
    954     AddrName += ".repack";
    955     auto *IdxType = Type::getInt32Ty(ST->getContext());
    956     auto *Zero = ConstantInt::get(IdxType, 0);
    957     for (unsigned i = 0; i < Count; i++) {
    958       Value *Indices[2] = {
    959         Zero,
    960         ConstantInt::get(IdxType, i),
    961       };
    962       auto *Ptr = IC.Builder->CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices), AddrName);
    963       auto *Val = IC.Builder->CreateExtractValue(V, i, EltName);
    964       IC.Builder->CreateStore(Val, Ptr);
    965     }
    966 
    967     return true;
    968   }
    969 
    970   if (auto *AT = dyn_cast<ArrayType>(T)) {
    971     // If the array only have one element, we unpack.
    972     if (AT->getNumElements() == 1) {
    973       V = IC.Builder->CreateExtractValue(V, 0);
    974       combineStoreToNewValue(IC, SI, V);
    975       return true;
    976     }
    977   }
    978 
    979   return false;
    980 }
    981 
    982 /// equivalentAddressValues - Test if A and B will obviously have the same
    983 /// value. This includes recognizing that %t0 and %t1 will have the same
    984 /// value in code like this:
    985 ///   %t0 = getelementptr \@a, 0, 3
    986 ///   store i32 0, i32* %t0
    987 ///   %t1 = getelementptr \@a, 0, 3
    988 ///   %t2 = load i32* %t1
    989 ///
    990 static bool equivalentAddressValues(Value *A, Value *B) {
    991   // Test if the values are trivially equivalent.
    992   if (A == B) return true;
    993 
    994   // Test if the values come form identical arithmetic instructions.
    995   // This uses isIdenticalToWhenDefined instead of isIdenticalTo because
    996   // its only used to compare two uses within the same basic block, which
    997   // means that they'll always either have the same value or one of them
    998   // will have an undefined value.
    999   if (isa<BinaryOperator>(A) ||
   1000       isa<CastInst>(A) ||
   1001       isa<PHINode>(A) ||
   1002       isa<GetElementPtrInst>(A))
   1003     if (Instruction *BI = dyn_cast<Instruction>(B))
   1004       if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
   1005         return true;
   1006 
   1007   // Otherwise they may not be equivalent.
   1008   return false;
   1009 }
   1010 
   1011 Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
   1012   Value *Val = SI.getOperand(0);
   1013   Value *Ptr = SI.getOperand(1);
   1014 
   1015   // Try to canonicalize the stored type.
   1016   if (combineStoreToValueType(*this, SI))
   1017     return EraseInstFromFunction(SI);
   1018 
   1019   // Attempt to improve the alignment.
   1020   unsigned KnownAlign = getOrEnforceKnownAlignment(
   1021       Ptr, DL.getPrefTypeAlignment(Val->getType()), DL, &SI, AC, DT);
   1022   unsigned StoreAlign = SI.getAlignment();
   1023   unsigned EffectiveStoreAlign =
   1024       StoreAlign != 0 ? StoreAlign : DL.getABITypeAlignment(Val->getType());
   1025 
   1026   if (KnownAlign > EffectiveStoreAlign)
   1027     SI.setAlignment(KnownAlign);
   1028   else if (StoreAlign == 0)
   1029     SI.setAlignment(EffectiveStoreAlign);
   1030 
   1031   // Try to canonicalize the stored type.
   1032   if (unpackStoreToAggregate(*this, SI))
   1033     return EraseInstFromFunction(SI);
   1034 
   1035   // Replace GEP indices if possible.
   1036   if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Ptr, SI)) {
   1037       Worklist.Add(NewGEPI);
   1038       return &SI;
   1039   }
   1040 
   1041   // Don't hack volatile/ordered stores.
   1042   // FIXME: Some bits are legal for ordered atomic stores; needs refactoring.
   1043   if (!SI.isUnordered()) return nullptr;
   1044 
   1045   // If the RHS is an alloca with a single use, zapify the store, making the
   1046   // alloca dead.
   1047   if (Ptr->hasOneUse()) {
   1048     if (isa<AllocaInst>(Ptr))
   1049       return EraseInstFromFunction(SI);
   1050     if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
   1051       if (isa<AllocaInst>(GEP->getOperand(0))) {
   1052         if (GEP->getOperand(0)->hasOneUse())
   1053           return EraseInstFromFunction(SI);
   1054       }
   1055     }
   1056   }
   1057 
   1058   // Do really simple DSE, to catch cases where there are several consecutive
   1059   // stores to the same location, separated by a few arithmetic operations. This
   1060   // situation often occurs with bitfield accesses.
   1061   BasicBlock::iterator BBI(SI);
   1062   for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts;
   1063        --ScanInsts) {
   1064     --BBI;
   1065     // Don't count debug info directives, lest they affect codegen,
   1066     // and we skip pointer-to-pointer bitcasts, which are NOPs.
   1067     if (isa<DbgInfoIntrinsic>(BBI) ||
   1068         (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
   1069       ScanInsts++;
   1070       continue;
   1071     }
   1072 
   1073     if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
   1074       // Prev store isn't volatile, and stores to the same location?
   1075       if (PrevSI->isUnordered() && equivalentAddressValues(PrevSI->getOperand(1),
   1076                                                         SI.getOperand(1))) {
   1077         ++NumDeadStore;
   1078         ++BBI;
   1079         EraseInstFromFunction(*PrevSI);
   1080         continue;
   1081       }
   1082       break;
   1083     }
   1084 
   1085     // If this is a load, we have to stop.  However, if the loaded value is from
   1086     // the pointer we're loading and is producing the pointer we're storing,
   1087     // then *this* store is dead (X = load P; store X -> P).
   1088     if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
   1089       if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr)) {
   1090         assert(SI.isUnordered() && "can't eliminate ordering operation");
   1091         return EraseInstFromFunction(SI);
   1092       }
   1093 
   1094       // Otherwise, this is a load from some other location.  Stores before it
   1095       // may not be dead.
   1096       break;
   1097     }
   1098 
   1099     // Don't skip over loads or things that can modify memory.
   1100     if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory())
   1101       break;
   1102   }
   1103 
   1104   // store X, null    -> turns into 'unreachable' in SimplifyCFG
   1105   if (isa<ConstantPointerNull>(Ptr) && SI.getPointerAddressSpace() == 0) {
   1106     if (!isa<UndefValue>(Val)) {
   1107       SI.setOperand(0, UndefValue::get(Val->getType()));
   1108       if (Instruction *U = dyn_cast<Instruction>(Val))
   1109         Worklist.Add(U);  // Dropped a use.
   1110     }
   1111     return nullptr;  // Do not modify these!
   1112   }
   1113 
   1114   // store undef, Ptr -> noop
   1115   if (isa<UndefValue>(Val))
   1116     return EraseInstFromFunction(SI);
   1117 
   1118   // The code below needs to be audited and adjusted for unordered atomics
   1119   if (!SI.isSimple())
   1120     return nullptr;
   1121 
   1122   // If this store is the last instruction in the basic block (possibly
   1123   // excepting debug info instructions), and if the block ends with an
   1124   // unconditional branch, try to move it to the successor block.
   1125   BBI = SI.getIterator();
   1126   do {
   1127     ++BBI;
   1128   } while (isa<DbgInfoIntrinsic>(BBI) ||
   1129            (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy()));
   1130   if (BranchInst *BI = dyn_cast<BranchInst>(BBI))
   1131     if (BI->isUnconditional())
   1132       if (SimplifyStoreAtEndOfBlock(SI))
   1133         return nullptr;  // xform done!
   1134 
   1135   return nullptr;
   1136 }
   1137 
   1138 /// SimplifyStoreAtEndOfBlock - Turn things like:
   1139 ///   if () { *P = v1; } else { *P = v2 }
   1140 /// into a phi node with a store in the successor.
   1141 ///
   1142 /// Simplify things like:
   1143 ///   *P = v1; if () { *P = v2; }
   1144 /// into a phi node with a store in the successor.
   1145 ///
   1146 bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
   1147   BasicBlock *StoreBB = SI.getParent();
   1148 
   1149   // Check to see if the successor block has exactly two incoming edges.  If
   1150   // so, see if the other predecessor contains a store to the same location.
   1151   // if so, insert a PHI node (if needed) and move the stores down.
   1152   BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0);
   1153 
   1154   // Determine whether Dest has exactly two predecessors and, if so, compute
   1155   // the other predecessor.
   1156   pred_iterator PI = pred_begin(DestBB);
   1157   BasicBlock *P = *PI;
   1158   BasicBlock *OtherBB = nullptr;
   1159 
   1160   if (P != StoreBB)
   1161     OtherBB = P;
   1162 
   1163   if (++PI == pred_end(DestBB))
   1164     return false;
   1165 
   1166   P = *PI;
   1167   if (P != StoreBB) {
   1168     if (OtherBB)
   1169       return false;
   1170     OtherBB = P;
   1171   }
   1172   if (++PI != pred_end(DestBB))
   1173     return false;
   1174 
   1175   // Bail out if all the relevant blocks aren't distinct (this can happen,
   1176   // for example, if SI is in an infinite loop)
   1177   if (StoreBB == DestBB || OtherBB == DestBB)
   1178     return false;
   1179 
   1180   // Verify that the other block ends in a branch and is not otherwise empty.
   1181   BasicBlock::iterator BBI(OtherBB->getTerminator());
   1182   BranchInst *OtherBr = dyn_cast<BranchInst>(BBI);
   1183   if (!OtherBr || BBI == OtherBB->begin())
   1184     return false;
   1185 
   1186   // If the other block ends in an unconditional branch, check for the 'if then
   1187   // else' case.  there is an instruction before the branch.
   1188   StoreInst *OtherStore = nullptr;
   1189   if (OtherBr->isUnconditional()) {
   1190     --BBI;
   1191     // Skip over debugging info.
   1192     while (isa<DbgInfoIntrinsic>(BBI) ||
   1193            (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
   1194       if (BBI==OtherBB->begin())
   1195         return false;
   1196       --BBI;
   1197     }
   1198     // If this isn't a store, isn't a store to the same location, or is not the
   1199     // right kind of store, bail out.
   1200     OtherStore = dyn_cast<StoreInst>(BBI);
   1201     if (!OtherStore || OtherStore->getOperand(1) != SI.getOperand(1) ||
   1202         !SI.isSameOperationAs(OtherStore))
   1203       return false;
   1204   } else {
   1205     // Otherwise, the other block ended with a conditional branch. If one of the
   1206     // destinations is StoreBB, then we have the if/then case.
   1207     if (OtherBr->getSuccessor(0) != StoreBB &&
   1208         OtherBr->getSuccessor(1) != StoreBB)
   1209       return false;
   1210 
   1211     // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
   1212     // if/then triangle.  See if there is a store to the same ptr as SI that
   1213     // lives in OtherBB.
   1214     for (;; --BBI) {
   1215       // Check to see if we find the matching store.
   1216       if ((OtherStore = dyn_cast<StoreInst>(BBI))) {
   1217         if (OtherStore->getOperand(1) != SI.getOperand(1) ||
   1218             !SI.isSameOperationAs(OtherStore))
   1219           return false;
   1220         break;
   1221       }
   1222       // If we find something that may be using or overwriting the stored
   1223       // value, or if we run out of instructions, we can't do the xform.
   1224       if (BBI->mayReadFromMemory() || BBI->mayWriteToMemory() ||
   1225           BBI == OtherBB->begin())
   1226         return false;
   1227     }
   1228 
   1229     // In order to eliminate the store in OtherBr, we have to
   1230     // make sure nothing reads or overwrites the stored value in
   1231     // StoreBB.
   1232     for (BasicBlock::iterator I = StoreBB->begin(); &*I != &SI; ++I) {
   1233       // FIXME: This should really be AA driven.
   1234       if (I->mayReadFromMemory() || I->mayWriteToMemory())
   1235         return false;
   1236     }
   1237   }
   1238 
   1239   // Insert a PHI node now if we need it.
   1240   Value *MergedVal = OtherStore->getOperand(0);
   1241   if (MergedVal != SI.getOperand(0)) {
   1242     PHINode *PN = PHINode::Create(MergedVal->getType(), 2, "storemerge");
   1243     PN->addIncoming(SI.getOperand(0), SI.getParent());
   1244     PN->addIncoming(OtherStore->getOperand(0), OtherBB);
   1245     MergedVal = InsertNewInstBefore(PN, DestBB->front());
   1246   }
   1247 
   1248   // Advance to a place where it is safe to insert the new store and
   1249   // insert it.
   1250   BBI = DestBB->getFirstInsertionPt();
   1251   StoreInst *NewSI = new StoreInst(MergedVal, SI.getOperand(1),
   1252                                    SI.isVolatile(),
   1253                                    SI.getAlignment(),
   1254                                    SI.getOrdering(),
   1255                                    SI.getSynchScope());
   1256   InsertNewInstBefore(NewSI, *BBI);
   1257   NewSI->setDebugLoc(OtherStore->getDebugLoc());
   1258 
   1259   // If the two stores had AA tags, merge them.
   1260   AAMDNodes AATags;
   1261   SI.getAAMetadata(AATags);
   1262   if (AATags) {
   1263     OtherStore->getAAMetadata(AATags, /* Merge = */ true);
   1264     NewSI->setAAMetadata(AATags);
   1265   }
   1266 
   1267   // Nuke the old stores.
   1268   EraseInstFromFunction(SI);
   1269   EraseInstFromFunction(*OtherStore);
   1270   return true;
   1271 }
   1272