Home | History | Annotate | Download | only in InstCombine
      1 //===- InstCombineLoadStoreAlloca.cpp -------------------------------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file implements the visit functions for load, store and alloca.
     11 //
     12 //===----------------------------------------------------------------------===//
     13 
     14 #include "InstCombineInternal.h"
     15 #include "llvm/ADT/SmallString.h"
     16 #include "llvm/ADT/Statistic.h"
     17 #include "llvm/Analysis/Loads.h"
     18 #include "llvm/IR/DataLayout.h"
     19 #include "llvm/IR/LLVMContext.h"
     20 #include "llvm/IR/IntrinsicInst.h"
     21 #include "llvm/IR/MDBuilder.h"
     22 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
     23 #include "llvm/Transforms/Utils/Local.h"
     24 using namespace llvm;
     25 
     26 #define DEBUG_TYPE "instcombine"
     27 
     28 STATISTIC(NumDeadStore,    "Number of dead stores eliminated");
     29 STATISTIC(NumGlobalCopies, "Number of allocas copied from constant global");
     30 
     31 /// pointsToConstantGlobal - Return true if V (possibly indirectly) points to
     32 /// some part of a constant global variable.  This intentionally only accepts
     33 /// constant expressions because we can't rewrite arbitrary instructions.
     34 static bool pointsToConstantGlobal(Value *V) {
     35   if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
     36     return GV->isConstant();
     37 
     38   if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
     39     if (CE->getOpcode() == Instruction::BitCast ||
     40         CE->getOpcode() == Instruction::AddrSpaceCast ||
     41         CE->getOpcode() == Instruction::GetElementPtr)
     42       return pointsToConstantGlobal(CE->getOperand(0));
     43   }
     44   return false;
     45 }
     46 
     47 /// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived)
     48 /// pointer to an alloca.  Ignore any reads of the pointer, return false if we
     49 /// see any stores or other unknown uses.  If we see pointer arithmetic, keep
     50 /// track of whether it moves the pointer (with IsOffset) but otherwise traverse
     51 /// the uses.  If we see a memcpy/memmove that targets an unoffseted pointer to
     52 /// the alloca, and if the source pointer is a pointer to a constant global, we
     53 /// can optimize this.
     54 static bool
     55 isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy,
     56                                SmallVectorImpl<Instruction *> &ToDelete) {
     57   // We track lifetime intrinsics as we encounter them.  If we decide to go
     58   // ahead and replace the value with the global, this lets the caller quickly
     59   // eliminate the markers.
     60 
     61   SmallVector<std::pair<Value *, bool>, 35> ValuesToInspect;
     62   ValuesToInspect.push_back(std::make_pair(V, false));
     63   while (!ValuesToInspect.empty()) {
     64     auto ValuePair = ValuesToInspect.pop_back_val();
     65     const bool IsOffset = ValuePair.second;
     66     for (auto &U : ValuePair.first->uses()) {
     67       Instruction *I = cast<Instruction>(U.getUser());
     68 
     69       if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
     70         // Ignore non-volatile loads, they are always ok.
     71         if (!LI->isSimple()) return false;
     72         continue;
     73       }
     74 
     75       if (isa<BitCastInst>(I) || isa<AddrSpaceCastInst>(I)) {
     76         // If uses of the bitcast are ok, we are ok.
     77         ValuesToInspect.push_back(std::make_pair(I, IsOffset));
     78         continue;
     79       }
     80       if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
     81         // If the GEP has all zero indices, it doesn't offset the pointer. If it
     82         // doesn't, it does.
     83         ValuesToInspect.push_back(
     84             std::make_pair(I, IsOffset || !GEP->hasAllZeroIndices()));
     85         continue;
     86       }
     87 
     88       if (auto CS = CallSite(I)) {
     89         // If this is the function being called then we treat it like a load and
     90         // ignore it.
     91         if (CS.isCallee(&U))
     92           continue;
     93 
     94         unsigned DataOpNo = CS.getDataOperandNo(&U);
     95         bool IsArgOperand = CS.isArgOperand(&U);
     96 
     97         // Inalloca arguments are clobbered by the call.
     98         if (IsArgOperand && CS.isInAllocaArgument(DataOpNo))
     99           return false;
    100 
    101         // If this is a readonly/readnone call site, then we know it is just a
    102         // load (but one that potentially returns the value itself), so we can
    103         // ignore it if we know that the value isn't captured.
    104         if (CS.onlyReadsMemory() &&
    105             (CS.getInstruction()->use_empty() || CS.doesNotCapture(DataOpNo)))
    106           continue;
    107 
    108         // If this is being passed as a byval argument, the caller is making a
    109         // copy, so it is only a read of the alloca.
    110         if (IsArgOperand && CS.isByValArgument(DataOpNo))
    111           continue;
    112       }
    113 
    114       // Lifetime intrinsics can be handled by the caller.
    115       if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
    116         if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
    117             II->getIntrinsicID() == Intrinsic::lifetime_end) {
    118           assert(II->use_empty() && "Lifetime markers have no result to use!");
    119           ToDelete.push_back(II);
    120           continue;
    121         }
    122       }
    123 
    124       // If this is isn't our memcpy/memmove, reject it as something we can't
    125       // handle.
    126       MemTransferInst *MI = dyn_cast<MemTransferInst>(I);
    127       if (!MI)
    128         return false;
    129 
    130       // If the transfer is using the alloca as a source of the transfer, then
    131       // ignore it since it is a load (unless the transfer is volatile).
    132       if (U.getOperandNo() == 1) {
    133         if (MI->isVolatile()) return false;
    134         continue;
    135       }
    136 
    137       // If we already have seen a copy, reject the second one.
    138       if (TheCopy) return false;
    139 
    140       // If the pointer has been offset from the start of the alloca, we can't
    141       // safely handle this.
    142       if (IsOffset) return false;
    143 
    144       // If the memintrinsic isn't using the alloca as the dest, reject it.
    145       if (U.getOperandNo() != 0) return false;
    146 
    147       // If the source of the memcpy/move is not a constant global, reject it.
    148       if (!pointsToConstantGlobal(MI->getSource()))
    149         return false;
    150 
    151       // Otherwise, the transform is safe.  Remember the copy instruction.
    152       TheCopy = MI;
    153     }
    154   }
    155   return true;
    156 }
    157 
    158 /// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only
    159 /// modified by a copy from a constant global.  If we can prove this, we can
    160 /// replace any uses of the alloca with uses of the global directly.
    161 static MemTransferInst *
    162 isOnlyCopiedFromConstantGlobal(AllocaInst *AI,
    163                                SmallVectorImpl<Instruction *> &ToDelete) {
    164   MemTransferInst *TheCopy = nullptr;
    165   if (isOnlyCopiedFromConstantGlobal(AI, TheCopy, ToDelete))
    166     return TheCopy;
    167   return nullptr;
    168 }
    169 
    170 static Instruction *simplifyAllocaArraySize(InstCombiner &IC, AllocaInst &AI) {
    171   // Check for array size of 1 (scalar allocation).
    172   if (!AI.isArrayAllocation()) {
    173     // i32 1 is the canonical array size for scalar allocations.
    174     if (AI.getArraySize()->getType()->isIntegerTy(32))
    175       return nullptr;
    176 
    177     // Canonicalize it.
    178     Value *V = IC.Builder->getInt32(1);
    179     AI.setOperand(0, V);
    180     return &AI;
    181   }
    182 
    183   // Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
    184   if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
    185     Type *NewTy = ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
    186     AllocaInst *New = IC.Builder->CreateAlloca(NewTy, nullptr, AI.getName());
    187     New->setAlignment(AI.getAlignment());
    188 
    189     // Scan to the end of the allocation instructions, to skip over a block of
    190     // allocas if possible...also skip interleaved debug info
    191     //
    192     BasicBlock::iterator It(New);
    193     while (isa<AllocaInst>(*It) || isa<DbgInfoIntrinsic>(*It))
    194       ++It;
    195 
    196     // Now that I is pointing to the first non-allocation-inst in the block,
    197     // insert our getelementptr instruction...
    198     //
    199     Type *IdxTy = IC.getDataLayout().getIntPtrType(AI.getType());
    200     Value *NullIdx = Constant::getNullValue(IdxTy);
    201     Value *Idx[2] = {NullIdx, NullIdx};
    202     Instruction *GEP =
    203         GetElementPtrInst::CreateInBounds(New, Idx, New->getName() + ".sub");
    204     IC.InsertNewInstBefore(GEP, *It);
    205 
    206     // Now make everything use the getelementptr instead of the original
    207     // allocation.
    208     return IC.replaceInstUsesWith(AI, GEP);
    209   }
    210 
    211   if (isa<UndefValue>(AI.getArraySize()))
    212     return IC.replaceInstUsesWith(AI, Constant::getNullValue(AI.getType()));
    213 
    214   // Ensure that the alloca array size argument has type intptr_t, so that
    215   // any casting is exposed early.
    216   Type *IntPtrTy = IC.getDataLayout().getIntPtrType(AI.getType());
    217   if (AI.getArraySize()->getType() != IntPtrTy) {
    218     Value *V = IC.Builder->CreateIntCast(AI.getArraySize(), IntPtrTy, false);
    219     AI.setOperand(0, V);
    220     return &AI;
    221   }
    222 
    223   return nullptr;
    224 }
    225 
    226 Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
    227   if (auto *I = simplifyAllocaArraySize(*this, AI))
    228     return I;
    229 
    230   if (AI.getAllocatedType()->isSized()) {
    231     // If the alignment is 0 (unspecified), assign it the preferred alignment.
    232     if (AI.getAlignment() == 0)
    233       AI.setAlignment(DL.getPrefTypeAlignment(AI.getAllocatedType()));
    234 
    235     // Move all alloca's of zero byte objects to the entry block and merge them
    236     // together.  Note that we only do this for alloca's, because malloc should
    237     // allocate and return a unique pointer, even for a zero byte allocation.
    238     if (DL.getTypeAllocSize(AI.getAllocatedType()) == 0) {
    239       // For a zero sized alloca there is no point in doing an array allocation.
    240       // This is helpful if the array size is a complicated expression not used
    241       // elsewhere.
    242       if (AI.isArrayAllocation()) {
    243         AI.setOperand(0, ConstantInt::get(AI.getArraySize()->getType(), 1));
    244         return &AI;
    245       }
    246 
    247       // Get the first instruction in the entry block.
    248       BasicBlock &EntryBlock = AI.getParent()->getParent()->getEntryBlock();
    249       Instruction *FirstInst = EntryBlock.getFirstNonPHIOrDbg();
    250       if (FirstInst != &AI) {
    251         // If the entry block doesn't start with a zero-size alloca then move
    252         // this one to the start of the entry block.  There is no problem with
    253         // dominance as the array size was forced to a constant earlier already.
    254         AllocaInst *EntryAI = dyn_cast<AllocaInst>(FirstInst);
    255         if (!EntryAI || !EntryAI->getAllocatedType()->isSized() ||
    256             DL.getTypeAllocSize(EntryAI->getAllocatedType()) != 0) {
    257           AI.moveBefore(FirstInst);
    258           return &AI;
    259         }
    260 
    261         // If the alignment of the entry block alloca is 0 (unspecified),
    262         // assign it the preferred alignment.
    263         if (EntryAI->getAlignment() == 0)
    264           EntryAI->setAlignment(
    265               DL.getPrefTypeAlignment(EntryAI->getAllocatedType()));
    266         // Replace this zero-sized alloca with the one at the start of the entry
    267         // block after ensuring that the address will be aligned enough for both
    268         // types.
    269         unsigned MaxAlign = std::max(EntryAI->getAlignment(),
    270                                      AI.getAlignment());
    271         EntryAI->setAlignment(MaxAlign);
    272         if (AI.getType() != EntryAI->getType())
    273           return new BitCastInst(EntryAI, AI.getType());
    274         return replaceInstUsesWith(AI, EntryAI);
    275       }
    276     }
    277   }
    278 
    279   if (AI.getAlignment()) {
    280     // Check to see if this allocation is only modified by a memcpy/memmove from
    281     // a constant global whose alignment is equal to or exceeds that of the
    282     // allocation.  If this is the case, we can change all users to use
    283     // the constant global instead.  This is commonly produced by the CFE by
    284     // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A'
    285     // is only subsequently read.
    286     SmallVector<Instruction *, 4> ToDelete;
    287     if (MemTransferInst *Copy = isOnlyCopiedFromConstantGlobal(&AI, ToDelete)) {
    288       unsigned SourceAlign = getOrEnforceKnownAlignment(
    289           Copy->getSource(), AI.getAlignment(), DL, &AI, AC, DT);
    290       if (AI.getAlignment() <= SourceAlign) {
    291         DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n');
    292         DEBUG(dbgs() << "  memcpy = " << *Copy << '\n');
    293         for (unsigned i = 0, e = ToDelete.size(); i != e; ++i)
    294           eraseInstFromFunction(*ToDelete[i]);
    295         Constant *TheSrc = cast<Constant>(Copy->getSource());
    296         Constant *Cast
    297           = ConstantExpr::getPointerBitCastOrAddrSpaceCast(TheSrc, AI.getType());
    298         Instruction *NewI = replaceInstUsesWith(AI, Cast);
    299         eraseInstFromFunction(*Copy);
    300         ++NumGlobalCopies;
    301         return NewI;
    302       }
    303     }
    304   }
    305 
    306   // At last, use the generic allocation site handler to aggressively remove
    307   // unused allocas.
    308   return visitAllocSite(AI);
    309 }
    310 
    311 /// \brief Helper to combine a load to a new type.
    312 ///
    313 /// This just does the work of combining a load to a new type. It handles
    314 /// metadata, etc., and returns the new instruction. The \c NewTy should be the
    315 /// loaded *value* type. This will convert it to a pointer, cast the operand to
    316 /// that pointer type, load it, etc.
    317 ///
    318 /// Note that this will create all of the instructions with whatever insert
    319 /// point the \c InstCombiner currently is using.
    320 static LoadInst *combineLoadToNewType(InstCombiner &IC, LoadInst &LI, Type *NewTy,
    321                                       const Twine &Suffix = "") {
    322   Value *Ptr = LI.getPointerOperand();
    323   unsigned AS = LI.getPointerAddressSpace();
    324   SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
    325   LI.getAllMetadata(MD);
    326 
    327   LoadInst *NewLoad = IC.Builder->CreateAlignedLoad(
    328       IC.Builder->CreateBitCast(Ptr, NewTy->getPointerTo(AS)),
    329       LI.getAlignment(), LI.isVolatile(), LI.getName() + Suffix);
    330   NewLoad->setAtomic(LI.getOrdering(), LI.getSynchScope());
    331   MDBuilder MDB(NewLoad->getContext());
    332   for (const auto &MDPair : MD) {
    333     unsigned ID = MDPair.first;
    334     MDNode *N = MDPair.second;
    335     // Note, essentially every kind of metadata should be preserved here! This
    336     // routine is supposed to clone a load instruction changing *only its type*.
    337     // The only metadata it makes sense to drop is metadata which is invalidated
    338     // when the pointer type changes. This should essentially never be the case
    339     // in LLVM, but we explicitly switch over only known metadata to be
    340     // conservatively correct. If you are adding metadata to LLVM which pertains
    341     // to loads, you almost certainly want to add it here.
    342     switch (ID) {
    343     case LLVMContext::MD_dbg:
    344     case LLVMContext::MD_tbaa:
    345     case LLVMContext::MD_prof:
    346     case LLVMContext::MD_fpmath:
    347     case LLVMContext::MD_tbaa_struct:
    348     case LLVMContext::MD_invariant_load:
    349     case LLVMContext::MD_alias_scope:
    350     case LLVMContext::MD_noalias:
    351     case LLVMContext::MD_nontemporal:
    352     case LLVMContext::MD_mem_parallel_loop_access:
    353       // All of these directly apply.
    354       NewLoad->setMetadata(ID, N);
    355       break;
    356 
    357     case LLVMContext::MD_nonnull:
    358       // This only directly applies if the new type is also a pointer.
    359       if (NewTy->isPointerTy()) {
    360         NewLoad->setMetadata(ID, N);
    361         break;
    362       }
    363       // If it's integral now, translate it to !range metadata.
    364       if (NewTy->isIntegerTy()) {
    365         auto *ITy = cast<IntegerType>(NewTy);
    366         auto *NullInt = ConstantExpr::getPtrToInt(
    367             ConstantPointerNull::get(cast<PointerType>(Ptr->getType())), ITy);
    368         auto *NonNullInt =
    369             ConstantExpr::getAdd(NullInt, ConstantInt::get(ITy, 1));
    370         NewLoad->setMetadata(LLVMContext::MD_range,
    371                              MDB.createRange(NonNullInt, NullInt));
    372       }
    373       break;
    374     case LLVMContext::MD_align:
    375     case LLVMContext::MD_dereferenceable:
    376     case LLVMContext::MD_dereferenceable_or_null:
    377       // These only directly apply if the new type is also a pointer.
    378       if (NewTy->isPointerTy())
    379         NewLoad->setMetadata(ID, N);
    380       break;
    381     case LLVMContext::MD_range:
    382       // FIXME: It would be nice to propagate this in some way, but the type
    383       // conversions make it hard. If the new type is a pointer, we could
    384       // translate it to !nonnull metadata.
    385       break;
    386     }
    387   }
    388   return NewLoad;
    389 }
    390 
    391 /// \brief Combine a store to a new type.
    392 ///
    393 /// Returns the newly created store instruction.
    394 static StoreInst *combineStoreToNewValue(InstCombiner &IC, StoreInst &SI, Value *V) {
    395   Value *Ptr = SI.getPointerOperand();
    396   unsigned AS = SI.getPointerAddressSpace();
    397   SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
    398   SI.getAllMetadata(MD);
    399 
    400   StoreInst *NewStore = IC.Builder->CreateAlignedStore(
    401       V, IC.Builder->CreateBitCast(Ptr, V->getType()->getPointerTo(AS)),
    402       SI.getAlignment(), SI.isVolatile());
    403   NewStore->setAtomic(SI.getOrdering(), SI.getSynchScope());
    404   for (const auto &MDPair : MD) {
    405     unsigned ID = MDPair.first;
    406     MDNode *N = MDPair.second;
    407     // Note, essentially every kind of metadata should be preserved here! This
    408     // routine is supposed to clone a store instruction changing *only its
    409     // type*. The only metadata it makes sense to drop is metadata which is
    410     // invalidated when the pointer type changes. This should essentially
    411     // never be the case in LLVM, but we explicitly switch over only known
    412     // metadata to be conservatively correct. If you are adding metadata to
    413     // LLVM which pertains to stores, you almost certainly want to add it
    414     // here.
    415     switch (ID) {
    416     case LLVMContext::MD_dbg:
    417     case LLVMContext::MD_tbaa:
    418     case LLVMContext::MD_prof:
    419     case LLVMContext::MD_fpmath:
    420     case LLVMContext::MD_tbaa_struct:
    421     case LLVMContext::MD_alias_scope:
    422     case LLVMContext::MD_noalias:
    423     case LLVMContext::MD_nontemporal:
    424     case LLVMContext::MD_mem_parallel_loop_access:
    425       // All of these directly apply.
    426       NewStore->setMetadata(ID, N);
    427       break;
    428 
    429     case LLVMContext::MD_invariant_load:
    430     case LLVMContext::MD_nonnull:
    431     case LLVMContext::MD_range:
    432     case LLVMContext::MD_align:
    433     case LLVMContext::MD_dereferenceable:
    434     case LLVMContext::MD_dereferenceable_or_null:
    435       // These don't apply for stores.
    436       break;
    437     }
    438   }
    439 
    440   return NewStore;
    441 }
    442 
    443 /// \brief Combine loads to match the type of their uses' value after looking
    444 /// through intervening bitcasts.
    445 ///
    446 /// The core idea here is that if the result of a load is used in an operation,
    447 /// we should load the type most conducive to that operation. For example, when
    448 /// loading an integer and converting that immediately to a pointer, we should
    449 /// instead directly load a pointer.
    450 ///
    451 /// However, this routine must never change the width of a load or the number of
    452 /// loads as that would introduce a semantic change. This combine is expected to
    453 /// be a semantic no-op which just allows loads to more closely model the types
    454 /// of their consuming operations.
    455 ///
    456 /// Currently, we also refuse to change the precise type used for an atomic load
    457 /// or a volatile load. This is debatable, and might be reasonable to change
    458 /// later. However, it is risky in case some backend or other part of LLVM is
    459 /// relying on the exact type loaded to select appropriate atomic operations.
    460 static Instruction *combineLoadToOperationType(InstCombiner &IC, LoadInst &LI) {
    461   // FIXME: We could probably with some care handle both volatile and ordered
    462   // atomic loads here but it isn't clear that this is important.
    463   if (!LI.isUnordered())
    464     return nullptr;
    465 
    466   if (LI.use_empty())
    467     return nullptr;
    468 
    469   Type *Ty = LI.getType();
    470   const DataLayout &DL = IC.getDataLayout();
    471 
    472   // Try to canonicalize loads which are only ever stored to operate over
    473   // integers instead of any other type. We only do this when the loaded type
    474   // is sized and has a size exactly the same as its store size and the store
    475   // size is a legal integer type.
    476   if (!Ty->isIntegerTy() && Ty->isSized() &&
    477       DL.isLegalInteger(DL.getTypeStoreSizeInBits(Ty)) &&
    478       DL.getTypeStoreSizeInBits(Ty) == DL.getTypeSizeInBits(Ty)) {
    479     if (std::all_of(LI.user_begin(), LI.user_end(), [&LI](User *U) {
    480           auto *SI = dyn_cast<StoreInst>(U);
    481           return SI && SI->getPointerOperand() != &LI;
    482         })) {
    483       LoadInst *NewLoad = combineLoadToNewType(
    484           IC, LI,
    485           Type::getIntNTy(LI.getContext(), DL.getTypeStoreSizeInBits(Ty)));
    486       // Replace all the stores with stores of the newly loaded value.
    487       for (auto UI = LI.user_begin(), UE = LI.user_end(); UI != UE;) {
    488         auto *SI = cast<StoreInst>(*UI++);
    489         IC.Builder->SetInsertPoint(SI);
    490         combineStoreToNewValue(IC, *SI, NewLoad);
    491         IC.eraseInstFromFunction(*SI);
    492       }
    493       assert(LI.use_empty() && "Failed to remove all users of the load!");
    494       // Return the old load so the combiner can delete it safely.
    495       return &LI;
    496     }
    497   }
    498 
    499   // Fold away bit casts of the loaded value by loading the desired type.
    500   // We can do this for BitCastInsts as well as casts from and to pointer types,
    501   // as long as those are noops (i.e., the source or dest type have the same
    502   // bitwidth as the target's pointers).
    503   if (LI.hasOneUse())
    504     if (auto* CI = dyn_cast<CastInst>(LI.user_back())) {
    505       if (CI->isNoopCast(DL)) {
    506         LoadInst *NewLoad = combineLoadToNewType(IC, LI, CI->getDestTy());
    507         CI->replaceAllUsesWith(NewLoad);
    508         IC.eraseInstFromFunction(*CI);
    509         return &LI;
    510       }
    511     }
    512 
    513   // FIXME: We should also canonicalize loads of vectors when their elements are
    514   // cast to other types.
    515   return nullptr;
    516 }
    517 
    518 static Instruction *unpackLoadToAggregate(InstCombiner &IC, LoadInst &LI) {
    519   // FIXME: We could probably with some care handle both volatile and atomic
    520   // stores here but it isn't clear that this is important.
    521   if (!LI.isSimple())
    522     return nullptr;
    523 
    524   Type *T = LI.getType();
    525   if (!T->isAggregateType())
    526     return nullptr;
    527 
    528   StringRef Name = LI.getName();
    529   assert(LI.getAlignment() && "Alignment must be set at this point");
    530 
    531   if (auto *ST = dyn_cast<StructType>(T)) {
    532     // If the struct only have one element, we unpack.
    533     auto NumElements = ST->getNumElements();
    534     if (NumElements == 1) {
    535       LoadInst *NewLoad = combineLoadToNewType(IC, LI, ST->getTypeAtIndex(0U),
    536                                                ".unpack");
    537       return IC.replaceInstUsesWith(LI, IC.Builder->CreateInsertValue(
    538         UndefValue::get(T), NewLoad, 0, Name));
    539     }
    540 
    541     // We don't want to break loads with padding here as we'd loose
    542     // the knowledge that padding exists for the rest of the pipeline.
    543     const DataLayout &DL = IC.getDataLayout();
    544     auto *SL = DL.getStructLayout(ST);
    545     if (SL->hasPadding())
    546       return nullptr;
    547 
    548     auto Align = LI.getAlignment();
    549     if (!Align)
    550       Align = DL.getABITypeAlignment(ST);
    551 
    552     auto *Addr = LI.getPointerOperand();
    553     auto *IdxType = Type::getInt32Ty(T->getContext());
    554     auto *Zero = ConstantInt::get(IdxType, 0);
    555 
    556     Value *V = UndefValue::get(T);
    557     for (unsigned i = 0; i < NumElements; i++) {
    558       Value *Indices[2] = {
    559         Zero,
    560         ConstantInt::get(IdxType, i),
    561       };
    562       auto *Ptr = IC.Builder->CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices),
    563                                                 Name + ".elt");
    564       auto EltAlign = MinAlign(Align, SL->getElementOffset(i));
    565       auto *L = IC.Builder->CreateAlignedLoad(Ptr, EltAlign, Name + ".unpack");
    566       V = IC.Builder->CreateInsertValue(V, L, i);
    567     }
    568 
    569     V->setName(Name);
    570     return IC.replaceInstUsesWith(LI, V);
    571   }
    572 
    573   if (auto *AT = dyn_cast<ArrayType>(T)) {
    574     auto *ET = AT->getElementType();
    575     auto NumElements = AT->getNumElements();
    576     if (NumElements == 1) {
    577       LoadInst *NewLoad = combineLoadToNewType(IC, LI, ET, ".unpack");
    578       return IC.replaceInstUsesWith(LI, IC.Builder->CreateInsertValue(
    579         UndefValue::get(T), NewLoad, 0, Name));
    580     }
    581 
    582     const DataLayout &DL = IC.getDataLayout();
    583     auto EltSize = DL.getTypeAllocSize(ET);
    584     auto Align = LI.getAlignment();
    585     if (!Align)
    586       Align = DL.getABITypeAlignment(T);
    587 
    588     auto *Addr = LI.getPointerOperand();
    589     auto *IdxType = Type::getInt64Ty(T->getContext());
    590     auto *Zero = ConstantInt::get(IdxType, 0);
    591 
    592     Value *V = UndefValue::get(T);
    593     uint64_t Offset = 0;
    594     for (uint64_t i = 0; i < NumElements; i++) {
    595       Value *Indices[2] = {
    596         Zero,
    597         ConstantInt::get(IdxType, i),
    598       };
    599       auto *Ptr = IC.Builder->CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices),
    600                                                 Name + ".elt");
    601       auto *L = IC.Builder->CreateAlignedLoad(Ptr, MinAlign(Align, Offset),
    602                                               Name + ".unpack");
    603       V = IC.Builder->CreateInsertValue(V, L, i);
    604       Offset += EltSize;
    605     }
    606 
    607     V->setName(Name);
    608     return IC.replaceInstUsesWith(LI, V);
    609   }
    610 
    611   return nullptr;
    612 }
    613 
    614 // If we can determine that all possible objects pointed to by the provided
    615 // pointer value are, not only dereferenceable, but also definitively less than
    616 // or equal to the provided maximum size, then return true. Otherwise, return
    617 // false (constant global values and allocas fall into this category).
    618 //
    619 // FIXME: This should probably live in ValueTracking (or similar).
    620 static bool isObjectSizeLessThanOrEq(Value *V, uint64_t MaxSize,
    621                                      const DataLayout &DL) {
    622   SmallPtrSet<Value *, 4> Visited;
    623   SmallVector<Value *, 4> Worklist(1, V);
    624 
    625   do {
    626     Value *P = Worklist.pop_back_val();
    627     P = P->stripPointerCasts();
    628 
    629     if (!Visited.insert(P).second)
    630       continue;
    631 
    632     if (SelectInst *SI = dyn_cast<SelectInst>(P)) {
    633       Worklist.push_back(SI->getTrueValue());
    634       Worklist.push_back(SI->getFalseValue());
    635       continue;
    636     }
    637 
    638     if (PHINode *PN = dyn_cast<PHINode>(P)) {
    639       for (Value *IncValue : PN->incoming_values())
    640         Worklist.push_back(IncValue);
    641       continue;
    642     }
    643 
    644     if (GlobalAlias *GA = dyn_cast<GlobalAlias>(P)) {
    645       if (GA->isInterposable())
    646         return false;
    647       Worklist.push_back(GA->getAliasee());
    648       continue;
    649     }
    650 
    651     // If we know how big this object is, and it is less than MaxSize, continue
    652     // searching. Otherwise, return false.
    653     if (AllocaInst *AI = dyn_cast<AllocaInst>(P)) {
    654       if (!AI->getAllocatedType()->isSized())
    655         return false;
    656 
    657       ConstantInt *CS = dyn_cast<ConstantInt>(AI->getArraySize());
    658       if (!CS)
    659         return false;
    660 
    661       uint64_t TypeSize = DL.getTypeAllocSize(AI->getAllocatedType());
    662       // Make sure that, even if the multiplication below would wrap as an
    663       // uint64_t, we still do the right thing.
    664       if ((CS->getValue().zextOrSelf(128)*APInt(128, TypeSize)).ugt(MaxSize))
    665         return false;
    666       continue;
    667     }
    668 
    669     if (GlobalVariable *GV = dyn_cast<GlobalVariable>(P)) {
    670       if (!GV->hasDefinitiveInitializer() || !GV->isConstant())
    671         return false;
    672 
    673       uint64_t InitSize = DL.getTypeAllocSize(GV->getValueType());
    674       if (InitSize > MaxSize)
    675         return false;
    676       continue;
    677     }
    678 
    679     return false;
    680   } while (!Worklist.empty());
    681 
    682   return true;
    683 }
    684 
    685 // If we're indexing into an object of a known size, and the outer index is
    686 // not a constant, but having any value but zero would lead to undefined
    687 // behavior, replace it with zero.
    688 //
    689 // For example, if we have:
    690 // @f.a = private unnamed_addr constant [1 x i32] [i32 12], align 4
    691 // ...
    692 // %arrayidx = getelementptr inbounds [1 x i32]* @f.a, i64 0, i64 %x
    693 // ... = load i32* %arrayidx, align 4
    694 // Then we know that we can replace %x in the GEP with i64 0.
    695 //
    696 // FIXME: We could fold any GEP index to zero that would cause UB if it were
    697 // not zero. Currently, we only handle the first such index. Also, we could
    698 // also search through non-zero constant indices if we kept track of the
    699 // offsets those indices implied.
    700 static bool canReplaceGEPIdxWithZero(InstCombiner &IC, GetElementPtrInst *GEPI,
    701                                      Instruction *MemI, unsigned &Idx) {
    702   if (GEPI->getNumOperands() < 2)
    703     return false;
    704 
    705   // Find the first non-zero index of a GEP. If all indices are zero, return
    706   // one past the last index.
    707   auto FirstNZIdx = [](const GetElementPtrInst *GEPI) {
    708     unsigned I = 1;
    709     for (unsigned IE = GEPI->getNumOperands(); I != IE; ++I) {
    710       Value *V = GEPI->getOperand(I);
    711       if (const ConstantInt *CI = dyn_cast<ConstantInt>(V))
    712         if (CI->isZero())
    713           continue;
    714 
    715       break;
    716     }
    717 
    718     return I;
    719   };
    720 
    721   // Skip through initial 'zero' indices, and find the corresponding pointer
    722   // type. See if the next index is not a constant.
    723   Idx = FirstNZIdx(GEPI);
    724   if (Idx == GEPI->getNumOperands())
    725     return false;
    726   if (isa<Constant>(GEPI->getOperand(Idx)))
    727     return false;
    728 
    729   SmallVector<Value *, 4> Ops(GEPI->idx_begin(), GEPI->idx_begin() + Idx);
    730   Type *AllocTy =
    731     GetElementPtrInst::getIndexedType(GEPI->getSourceElementType(), Ops);
    732   if (!AllocTy || !AllocTy->isSized())
    733     return false;
    734   const DataLayout &DL = IC.getDataLayout();
    735   uint64_t TyAllocSize = DL.getTypeAllocSize(AllocTy);
    736 
    737   // If there are more indices after the one we might replace with a zero, make
    738   // sure they're all non-negative. If any of them are negative, the overall
    739   // address being computed might be before the base address determined by the
    740   // first non-zero index.
    741   auto IsAllNonNegative = [&]() {
    742     for (unsigned i = Idx+1, e = GEPI->getNumOperands(); i != e; ++i) {
    743       bool KnownNonNegative, KnownNegative;
    744       IC.ComputeSignBit(GEPI->getOperand(i), KnownNonNegative,
    745                         KnownNegative, 0, MemI);
    746       if (KnownNonNegative)
    747         continue;
    748       return false;
    749     }
    750 
    751     return true;
    752   };
    753 
    754   // FIXME: If the GEP is not inbounds, and there are extra indices after the
    755   // one we'll replace, those could cause the address computation to wrap
    756   // (rendering the IsAllNonNegative() check below insufficient). We can do
    757   // better, ignoring zero indices (and other indices we can prove small
    758   // enough not to wrap).
    759   if (Idx+1 != GEPI->getNumOperands() && !GEPI->isInBounds())
    760     return false;
    761 
    762   // Note that isObjectSizeLessThanOrEq will return true only if the pointer is
    763   // also known to be dereferenceable.
    764   return isObjectSizeLessThanOrEq(GEPI->getOperand(0), TyAllocSize, DL) &&
    765          IsAllNonNegative();
    766 }
    767 
    768 // If we're indexing into an object with a variable index for the memory
    769 // access, but the object has only one element, we can assume that the index
    770 // will always be zero. If we replace the GEP, return it.
    771 template <typename T>
    772 static Instruction *replaceGEPIdxWithZero(InstCombiner &IC, Value *Ptr,
    773                                           T &MemI) {
    774   if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Ptr)) {
    775     unsigned Idx;
    776     if (canReplaceGEPIdxWithZero(IC, GEPI, &MemI, Idx)) {
    777       Instruction *NewGEPI = GEPI->clone();
    778       NewGEPI->setOperand(Idx,
    779         ConstantInt::get(GEPI->getOperand(Idx)->getType(), 0));
    780       NewGEPI->insertBefore(GEPI);
    781       MemI.setOperand(MemI.getPointerOperandIndex(), NewGEPI);
    782       return NewGEPI;
    783     }
    784   }
    785 
    786   return nullptr;
    787 }
    788 
    789 Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
    790   Value *Op = LI.getOperand(0);
    791 
    792   // Try to canonicalize the loaded type.
    793   if (Instruction *Res = combineLoadToOperationType(*this, LI))
    794     return Res;
    795 
    796   // Attempt to improve the alignment.
    797   unsigned KnownAlign = getOrEnforceKnownAlignment(
    798       Op, DL.getPrefTypeAlignment(LI.getType()), DL, &LI, AC, DT);
    799   unsigned LoadAlign = LI.getAlignment();
    800   unsigned EffectiveLoadAlign =
    801       LoadAlign != 0 ? LoadAlign : DL.getABITypeAlignment(LI.getType());
    802 
    803   if (KnownAlign > EffectiveLoadAlign)
    804     LI.setAlignment(KnownAlign);
    805   else if (LoadAlign == 0)
    806     LI.setAlignment(EffectiveLoadAlign);
    807 
    808   // Replace GEP indices if possible.
    809   if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Op, LI)) {
    810       Worklist.Add(NewGEPI);
    811       return &LI;
    812   }
    813 
    814   if (Instruction *Res = unpackLoadToAggregate(*this, LI))
    815     return Res;
    816 
    817   // Do really simple store-to-load forwarding and load CSE, to catch cases
    818   // where there are several consecutive memory accesses to the same location,
    819   // separated by a few arithmetic operations.
    820   BasicBlock::iterator BBI(LI);
    821   AAMDNodes AATags;
    822   bool IsLoadCSE = false;
    823   if (Value *AvailableVal =
    824       FindAvailableLoadedValue(&LI, LI.getParent(), BBI,
    825                                DefMaxInstsToScan, AA, &AATags, &IsLoadCSE)) {
    826     if (IsLoadCSE) {
    827       LoadInst *NLI = cast<LoadInst>(AvailableVal);
    828       unsigned KnownIDs[] = {
    829           LLVMContext::MD_tbaa,            LLVMContext::MD_alias_scope,
    830           LLVMContext::MD_noalias,         LLVMContext::MD_range,
    831           LLVMContext::MD_invariant_load,  LLVMContext::MD_nonnull,
    832           LLVMContext::MD_invariant_group, LLVMContext::MD_align,
    833           LLVMContext::MD_dereferenceable,
    834           LLVMContext::MD_dereferenceable_or_null};
    835       combineMetadata(NLI, &LI, KnownIDs);
    836     };
    837 
    838     return replaceInstUsesWith(
    839         LI, Builder->CreateBitOrPointerCast(AvailableVal, LI.getType(),
    840                                             LI.getName() + ".cast"));
    841   }
    842 
    843   // None of the following transforms are legal for volatile/ordered atomic
    844   // loads.  Most of them do apply for unordered atomics.
    845   if (!LI.isUnordered()) return nullptr;
    846 
    847   // load(gep null, ...) -> unreachable
    848   if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) {
    849     const Value *GEPI0 = GEPI->getOperand(0);
    850     // TODO: Consider a target hook for valid address spaces for this xform.
    851     if (isa<ConstantPointerNull>(GEPI0) && GEPI->getPointerAddressSpace() == 0){
    852       // Insert a new store to null instruction before the load to indicate
    853       // that this code is not reachable.  We do this instead of inserting
    854       // an unreachable instruction directly because we cannot modify the
    855       // CFG.
    856       new StoreInst(UndefValue::get(LI.getType()),
    857                     Constant::getNullValue(Op->getType()), &LI);
    858       return replaceInstUsesWith(LI, UndefValue::get(LI.getType()));
    859     }
    860   }
    861 
    862   // load null/undef -> unreachable
    863   // TODO: Consider a target hook for valid address spaces for this xform.
    864   if (isa<UndefValue>(Op) ||
    865       (isa<ConstantPointerNull>(Op) && LI.getPointerAddressSpace() == 0)) {
    866     // Insert a new store to null instruction before the load to indicate that
    867     // this code is not reachable.  We do this instead of inserting an
    868     // unreachable instruction directly because we cannot modify the CFG.
    869     new StoreInst(UndefValue::get(LI.getType()),
    870                   Constant::getNullValue(Op->getType()), &LI);
    871     return replaceInstUsesWith(LI, UndefValue::get(LI.getType()));
    872   }
    873 
    874   if (Op->hasOneUse()) {
    875     // Change select and PHI nodes to select values instead of addresses: this
    876     // helps alias analysis out a lot, allows many others simplifications, and
    877     // exposes redundancy in the code.
    878     //
    879     // Note that we cannot do the transformation unless we know that the
    880     // introduced loads cannot trap!  Something like this is valid as long as
    881     // the condition is always false: load (select bool %C, int* null, int* %G),
    882     // but it would not be valid if we transformed it to load from null
    883     // unconditionally.
    884     //
    885     if (SelectInst *SI = dyn_cast<SelectInst>(Op)) {
    886       // load (select (Cond, &V1, &V2))  --> select(Cond, load &V1, load &V2).
    887       unsigned Align = LI.getAlignment();
    888       if (isSafeToLoadUnconditionally(SI->getOperand(1), Align, DL, SI) &&
    889           isSafeToLoadUnconditionally(SI->getOperand(2), Align, DL, SI)) {
    890         LoadInst *V1 = Builder->CreateLoad(SI->getOperand(1),
    891                                            SI->getOperand(1)->getName()+".val");
    892         LoadInst *V2 = Builder->CreateLoad(SI->getOperand(2),
    893                                            SI->getOperand(2)->getName()+".val");
    894         assert(LI.isUnordered() && "implied by above");
    895         V1->setAlignment(Align);
    896         V1->setAtomic(LI.getOrdering(), LI.getSynchScope());
    897         V2->setAlignment(Align);
    898         V2->setAtomic(LI.getOrdering(), LI.getSynchScope());
    899         return SelectInst::Create(SI->getCondition(), V1, V2);
    900       }
    901 
    902       // load (select (cond, null, P)) -> load P
    903       if (isa<ConstantPointerNull>(SI->getOperand(1)) &&
    904           LI.getPointerAddressSpace() == 0) {
    905         LI.setOperand(0, SI->getOperand(2));
    906         return &LI;
    907       }
    908 
    909       // load (select (cond, P, null)) -> load P
    910       if (isa<ConstantPointerNull>(SI->getOperand(2)) &&
    911           LI.getPointerAddressSpace() == 0) {
    912         LI.setOperand(0, SI->getOperand(1));
    913         return &LI;
    914       }
    915     }
    916   }
    917   return nullptr;
    918 }
    919 
    920 /// \brief Look for extractelement/insertvalue sequence that acts like a bitcast.
    921 ///
    922 /// \returns underlying value that was "cast", or nullptr otherwise.
    923 ///
    924 /// For example, if we have:
    925 ///
    926 ///     %E0 = extractelement <2 x double> %U, i32 0
    927 ///     %V0 = insertvalue [2 x double] undef, double %E0, 0
    928 ///     %E1 = extractelement <2 x double> %U, i32 1
    929 ///     %V1 = insertvalue [2 x double] %V0, double %E1, 1
    930 ///
    931 /// and the layout of a <2 x double> is isomorphic to a [2 x double],
    932 /// then %V1 can be safely approximated by a conceptual "bitcast" of %U.
    933 /// Note that %U may contain non-undef values where %V1 has undef.
    934 static Value *likeBitCastFromVector(InstCombiner &IC, Value *V) {
    935   Value *U = nullptr;
    936   while (auto *IV = dyn_cast<InsertValueInst>(V)) {
    937     auto *E = dyn_cast<ExtractElementInst>(IV->getInsertedValueOperand());
    938     if (!E)
    939       return nullptr;
    940     auto *W = E->getVectorOperand();
    941     if (!U)
    942       U = W;
    943     else if (U != W)
    944       return nullptr;
    945     auto *CI = dyn_cast<ConstantInt>(E->getIndexOperand());
    946     if (!CI || IV->getNumIndices() != 1 || CI->getZExtValue() != *IV->idx_begin())
    947       return nullptr;
    948     V = IV->getAggregateOperand();
    949   }
    950   if (!isa<UndefValue>(V) ||!U)
    951     return nullptr;
    952 
    953   auto *UT = cast<VectorType>(U->getType());
    954   auto *VT = V->getType();
    955   // Check that types UT and VT are bitwise isomorphic.
    956   const auto &DL = IC.getDataLayout();
    957   if (DL.getTypeStoreSizeInBits(UT) != DL.getTypeStoreSizeInBits(VT)) {
    958     return nullptr;
    959   }
    960   if (auto *AT = dyn_cast<ArrayType>(VT)) {
    961     if (AT->getNumElements() != UT->getNumElements())
    962       return nullptr;
    963   } else {
    964     auto *ST = cast<StructType>(VT);
    965     if (ST->getNumElements() != UT->getNumElements())
    966       return nullptr;
    967     for (const auto *EltT : ST->elements()) {
    968       if (EltT != UT->getElementType())
    969         return nullptr;
    970     }
    971   }
    972   return U;
    973 }
    974 
    975 /// \brief Combine stores to match the type of value being stored.
    976 ///
    977 /// The core idea here is that the memory does not have any intrinsic type and
    978 /// where we can we should match the type of a store to the type of value being
    979 /// stored.
    980 ///
    981 /// However, this routine must never change the width of a store or the number of
    982 /// stores as that would introduce a semantic change. This combine is expected to
    983 /// be a semantic no-op which just allows stores to more closely model the types
    984 /// of their incoming values.
    985 ///
    986 /// Currently, we also refuse to change the precise type used for an atomic or
    987 /// volatile store. This is debatable, and might be reasonable to change later.
    988 /// However, it is risky in case some backend or other part of LLVM is relying
    989 /// on the exact type stored to select appropriate atomic operations.
    990 ///
    991 /// \returns true if the store was successfully combined away. This indicates
    992 /// the caller must erase the store instruction. We have to let the caller erase
    993 /// the store instruction as otherwise there is no way to signal whether it was
    994 /// combined or not: IC.EraseInstFromFunction returns a null pointer.
    995 static bool combineStoreToValueType(InstCombiner &IC, StoreInst &SI) {
    996   // FIXME: We could probably with some care handle both volatile and ordered
    997   // atomic stores here but it isn't clear that this is important.
    998   if (!SI.isUnordered())
    999     return false;
   1000 
   1001   Value *V = SI.getValueOperand();
   1002 
   1003   // Fold away bit casts of the stored value by storing the original type.
   1004   if (auto *BC = dyn_cast<BitCastInst>(V)) {
   1005     V = BC->getOperand(0);
   1006     combineStoreToNewValue(IC, SI, V);
   1007     return true;
   1008   }
   1009 
   1010   if (Value *U = likeBitCastFromVector(IC, V)) {
   1011     combineStoreToNewValue(IC, SI, U);
   1012     return true;
   1013   }
   1014 
   1015   // FIXME: We should also canonicalize stores of vectors when their elements
   1016   // are cast to other types.
   1017   return false;
   1018 }
   1019 
   1020 static bool unpackStoreToAggregate(InstCombiner &IC, StoreInst &SI) {
   1021   // FIXME: We could probably with some care handle both volatile and atomic
   1022   // stores here but it isn't clear that this is important.
   1023   if (!SI.isSimple())
   1024     return false;
   1025 
   1026   Value *V = SI.getValueOperand();
   1027   Type *T = V->getType();
   1028 
   1029   if (!T->isAggregateType())
   1030     return false;
   1031 
   1032   if (auto *ST = dyn_cast<StructType>(T)) {
   1033     // If the struct only have one element, we unpack.
   1034     unsigned Count = ST->getNumElements();
   1035     if (Count == 1) {
   1036       V = IC.Builder->CreateExtractValue(V, 0);
   1037       combineStoreToNewValue(IC, SI, V);
   1038       return true;
   1039     }
   1040 
   1041     // We don't want to break loads with padding here as we'd loose
   1042     // the knowledge that padding exists for the rest of the pipeline.
   1043     const DataLayout &DL = IC.getDataLayout();
   1044     auto *SL = DL.getStructLayout(ST);
   1045     if (SL->hasPadding())
   1046       return false;
   1047 
   1048     auto Align = SI.getAlignment();
   1049     if (!Align)
   1050       Align = DL.getABITypeAlignment(ST);
   1051 
   1052     SmallString<16> EltName = V->getName();
   1053     EltName += ".elt";
   1054     auto *Addr = SI.getPointerOperand();
   1055     SmallString<16> AddrName = Addr->getName();
   1056     AddrName += ".repack";
   1057 
   1058     auto *IdxType = Type::getInt32Ty(ST->getContext());
   1059     auto *Zero = ConstantInt::get(IdxType, 0);
   1060     for (unsigned i = 0; i < Count; i++) {
   1061       Value *Indices[2] = {
   1062         Zero,
   1063         ConstantInt::get(IdxType, i),
   1064       };
   1065       auto *Ptr = IC.Builder->CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices),
   1066                                                 AddrName);
   1067       auto *Val = IC.Builder->CreateExtractValue(V, i, EltName);
   1068       auto EltAlign = MinAlign(Align, SL->getElementOffset(i));
   1069       IC.Builder->CreateAlignedStore(Val, Ptr, EltAlign);
   1070     }
   1071 
   1072     return true;
   1073   }
   1074 
   1075   if (auto *AT = dyn_cast<ArrayType>(T)) {
   1076     // If the array only have one element, we unpack.
   1077     auto NumElements = AT->getNumElements();
   1078     if (NumElements == 1) {
   1079       V = IC.Builder->CreateExtractValue(V, 0);
   1080       combineStoreToNewValue(IC, SI, V);
   1081       return true;
   1082     }
   1083 
   1084     const DataLayout &DL = IC.getDataLayout();
   1085     auto EltSize = DL.getTypeAllocSize(AT->getElementType());
   1086     auto Align = SI.getAlignment();
   1087     if (!Align)
   1088       Align = DL.getABITypeAlignment(T);
   1089 
   1090     SmallString<16> EltName = V->getName();
   1091     EltName += ".elt";
   1092     auto *Addr = SI.getPointerOperand();
   1093     SmallString<16> AddrName = Addr->getName();
   1094     AddrName += ".repack";
   1095 
   1096     auto *IdxType = Type::getInt64Ty(T->getContext());
   1097     auto *Zero = ConstantInt::get(IdxType, 0);
   1098 
   1099     uint64_t Offset = 0;
   1100     for (uint64_t i = 0; i < NumElements; i++) {
   1101       Value *Indices[2] = {
   1102         Zero,
   1103         ConstantInt::get(IdxType, i),
   1104       };
   1105       auto *Ptr = IC.Builder->CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices),
   1106                                                 AddrName);
   1107       auto *Val = IC.Builder->CreateExtractValue(V, i, EltName);
   1108       auto EltAlign = MinAlign(Align, Offset);
   1109       IC.Builder->CreateAlignedStore(Val, Ptr, EltAlign);
   1110       Offset += EltSize;
   1111     }
   1112 
   1113     return true;
   1114   }
   1115 
   1116   return false;
   1117 }
   1118 
   1119 /// equivalentAddressValues - Test if A and B will obviously have the same
   1120 /// value. This includes recognizing that %t0 and %t1 will have the same
   1121 /// value in code like this:
   1122 ///   %t0 = getelementptr \@a, 0, 3
   1123 ///   store i32 0, i32* %t0
   1124 ///   %t1 = getelementptr \@a, 0, 3
   1125 ///   %t2 = load i32* %t1
   1126 ///
   1127 static bool equivalentAddressValues(Value *A, Value *B) {
   1128   // Test if the values are trivially equivalent.
   1129   if (A == B) return true;
   1130 
   1131   // Test if the values come form identical arithmetic instructions.
   1132   // This uses isIdenticalToWhenDefined instead of isIdenticalTo because
   1133   // its only used to compare two uses within the same basic block, which
   1134   // means that they'll always either have the same value or one of them
   1135   // will have an undefined value.
   1136   if (isa<BinaryOperator>(A) ||
   1137       isa<CastInst>(A) ||
   1138       isa<PHINode>(A) ||
   1139       isa<GetElementPtrInst>(A))
   1140     if (Instruction *BI = dyn_cast<Instruction>(B))
   1141       if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
   1142         return true;
   1143 
   1144   // Otherwise they may not be equivalent.
   1145   return false;
   1146 }
   1147 
   1148 Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
   1149   Value *Val = SI.getOperand(0);
   1150   Value *Ptr = SI.getOperand(1);
   1151 
   1152   // Try to canonicalize the stored type.
   1153   if (combineStoreToValueType(*this, SI))
   1154     return eraseInstFromFunction(SI);
   1155 
   1156   // Attempt to improve the alignment.
   1157   unsigned KnownAlign = getOrEnforceKnownAlignment(
   1158       Ptr, DL.getPrefTypeAlignment(Val->getType()), DL, &SI, AC, DT);
   1159   unsigned StoreAlign = SI.getAlignment();
   1160   unsigned EffectiveStoreAlign =
   1161       StoreAlign != 0 ? StoreAlign : DL.getABITypeAlignment(Val->getType());
   1162 
   1163   if (KnownAlign > EffectiveStoreAlign)
   1164     SI.setAlignment(KnownAlign);
   1165   else if (StoreAlign == 0)
   1166     SI.setAlignment(EffectiveStoreAlign);
   1167 
   1168   // Try to canonicalize the stored type.
   1169   if (unpackStoreToAggregate(*this, SI))
   1170     return eraseInstFromFunction(SI);
   1171 
   1172   // Replace GEP indices if possible.
   1173   if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Ptr, SI)) {
   1174       Worklist.Add(NewGEPI);
   1175       return &SI;
   1176   }
   1177 
   1178   // Don't hack volatile/ordered stores.
   1179   // FIXME: Some bits are legal for ordered atomic stores; needs refactoring.
   1180   if (!SI.isUnordered()) return nullptr;
   1181 
   1182   // If the RHS is an alloca with a single use, zapify the store, making the
   1183   // alloca dead.
   1184   if (Ptr->hasOneUse()) {
   1185     if (isa<AllocaInst>(Ptr))
   1186       return eraseInstFromFunction(SI);
   1187     if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
   1188       if (isa<AllocaInst>(GEP->getOperand(0))) {
   1189         if (GEP->getOperand(0)->hasOneUse())
   1190           return eraseInstFromFunction(SI);
   1191       }
   1192     }
   1193   }
   1194 
   1195   // Do really simple DSE, to catch cases where there are several consecutive
   1196   // stores to the same location, separated by a few arithmetic operations. This
   1197   // situation often occurs with bitfield accesses.
   1198   BasicBlock::iterator BBI(SI);
   1199   for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts;
   1200        --ScanInsts) {
   1201     --BBI;
   1202     // Don't count debug info directives, lest they affect codegen,
   1203     // and we skip pointer-to-pointer bitcasts, which are NOPs.
   1204     if (isa<DbgInfoIntrinsic>(BBI) ||
   1205         (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
   1206       ScanInsts++;
   1207       continue;
   1208     }
   1209 
   1210     if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
   1211       // Prev store isn't volatile, and stores to the same location?
   1212       if (PrevSI->isUnordered() && equivalentAddressValues(PrevSI->getOperand(1),
   1213                                                         SI.getOperand(1))) {
   1214         ++NumDeadStore;
   1215         ++BBI;
   1216         eraseInstFromFunction(*PrevSI);
   1217         continue;
   1218       }
   1219       break;
   1220     }
   1221 
   1222     // If this is a load, we have to stop.  However, if the loaded value is from
   1223     // the pointer we're loading and is producing the pointer we're storing,
   1224     // then *this* store is dead (X = load P; store X -> P).
   1225     if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
   1226       if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr)) {
   1227         assert(SI.isUnordered() && "can't eliminate ordering operation");
   1228         return eraseInstFromFunction(SI);
   1229       }
   1230 
   1231       // Otherwise, this is a load from some other location.  Stores before it
   1232       // may not be dead.
   1233       break;
   1234     }
   1235 
   1236     // Don't skip over loads or things that can modify memory.
   1237     if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory())
   1238       break;
   1239   }
   1240 
   1241   // store X, null    -> turns into 'unreachable' in SimplifyCFG
   1242   if (isa<ConstantPointerNull>(Ptr) && SI.getPointerAddressSpace() == 0) {
   1243     if (!isa<UndefValue>(Val)) {
   1244       SI.setOperand(0, UndefValue::get(Val->getType()));
   1245       if (Instruction *U = dyn_cast<Instruction>(Val))
   1246         Worklist.Add(U);  // Dropped a use.
   1247     }
   1248     return nullptr;  // Do not modify these!
   1249   }
   1250 
   1251   // store undef, Ptr -> noop
   1252   if (isa<UndefValue>(Val))
   1253     return eraseInstFromFunction(SI);
   1254 
   1255   // If this store is the last instruction in the basic block (possibly
   1256   // excepting debug info instructions), and if the block ends with an
   1257   // unconditional branch, try to move it to the successor block.
   1258   BBI = SI.getIterator();
   1259   do {
   1260     ++BBI;
   1261   } while (isa<DbgInfoIntrinsic>(BBI) ||
   1262            (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy()));
   1263   if (BranchInst *BI = dyn_cast<BranchInst>(BBI))
   1264     if (BI->isUnconditional())
   1265       if (SimplifyStoreAtEndOfBlock(SI))
   1266         return nullptr;  // xform done!
   1267 
   1268   return nullptr;
   1269 }
   1270 
   1271 /// SimplifyStoreAtEndOfBlock - Turn things like:
   1272 ///   if () { *P = v1; } else { *P = v2 }
   1273 /// into a phi node with a store in the successor.
   1274 ///
   1275 /// Simplify things like:
   1276 ///   *P = v1; if () { *P = v2; }
   1277 /// into a phi node with a store in the successor.
   1278 ///
   1279 bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
   1280   assert(SI.isUnordered() &&
   1281          "this code has not been auditted for volatile or ordered store case");
   1282 
   1283   BasicBlock *StoreBB = SI.getParent();
   1284 
   1285   // Check to see if the successor block has exactly two incoming edges.  If
   1286   // so, see if the other predecessor contains a store to the same location.
   1287   // if so, insert a PHI node (if needed) and move the stores down.
   1288   BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0);
   1289 
   1290   // Determine whether Dest has exactly two predecessors and, if so, compute
   1291   // the other predecessor.
   1292   pred_iterator PI = pred_begin(DestBB);
   1293   BasicBlock *P = *PI;
   1294   BasicBlock *OtherBB = nullptr;
   1295 
   1296   if (P != StoreBB)
   1297     OtherBB = P;
   1298 
   1299   if (++PI == pred_end(DestBB))
   1300     return false;
   1301 
   1302   P = *PI;
   1303   if (P != StoreBB) {
   1304     if (OtherBB)
   1305       return false;
   1306     OtherBB = P;
   1307   }
   1308   if (++PI != pred_end(DestBB))
   1309     return false;
   1310 
   1311   // Bail out if all the relevant blocks aren't distinct (this can happen,
   1312   // for example, if SI is in an infinite loop)
   1313   if (StoreBB == DestBB || OtherBB == DestBB)
   1314     return false;
   1315 
   1316   // Verify that the other block ends in a branch and is not otherwise empty.
   1317   BasicBlock::iterator BBI(OtherBB->getTerminator());
   1318   BranchInst *OtherBr = dyn_cast<BranchInst>(BBI);
   1319   if (!OtherBr || BBI == OtherBB->begin())
   1320     return false;
   1321 
   1322   // If the other block ends in an unconditional branch, check for the 'if then
   1323   // else' case.  there is an instruction before the branch.
   1324   StoreInst *OtherStore = nullptr;
   1325   if (OtherBr->isUnconditional()) {
   1326     --BBI;
   1327     // Skip over debugging info.
   1328     while (isa<DbgInfoIntrinsic>(BBI) ||
   1329            (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
   1330       if (BBI==OtherBB->begin())
   1331         return false;
   1332       --BBI;
   1333     }
   1334     // If this isn't a store, isn't a store to the same location, or is not the
   1335     // right kind of store, bail out.
   1336     OtherStore = dyn_cast<StoreInst>(BBI);
   1337     if (!OtherStore || OtherStore->getOperand(1) != SI.getOperand(1) ||
   1338         !SI.isSameOperationAs(OtherStore))
   1339       return false;
   1340   } else {
   1341     // Otherwise, the other block ended with a conditional branch. If one of the
   1342     // destinations is StoreBB, then we have the if/then case.
   1343     if (OtherBr->getSuccessor(0) != StoreBB &&
   1344         OtherBr->getSuccessor(1) != StoreBB)
   1345       return false;
   1346 
   1347     // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
   1348     // if/then triangle.  See if there is a store to the same ptr as SI that
   1349     // lives in OtherBB.
   1350     for (;; --BBI) {
   1351       // Check to see if we find the matching store.
   1352       if ((OtherStore = dyn_cast<StoreInst>(BBI))) {
   1353         if (OtherStore->getOperand(1) != SI.getOperand(1) ||
   1354             !SI.isSameOperationAs(OtherStore))
   1355           return false;
   1356         break;
   1357       }
   1358       // If we find something that may be using or overwriting the stored
   1359       // value, or if we run out of instructions, we can't do the xform.
   1360       if (BBI->mayReadFromMemory() || BBI->mayWriteToMemory() ||
   1361           BBI == OtherBB->begin())
   1362         return false;
   1363     }
   1364 
   1365     // In order to eliminate the store in OtherBr, we have to
   1366     // make sure nothing reads or overwrites the stored value in
   1367     // StoreBB.
   1368     for (BasicBlock::iterator I = StoreBB->begin(); &*I != &SI; ++I) {
   1369       // FIXME: This should really be AA driven.
   1370       if (I->mayReadFromMemory() || I->mayWriteToMemory())
   1371         return false;
   1372     }
   1373   }
   1374 
   1375   // Insert a PHI node now if we need it.
   1376   Value *MergedVal = OtherStore->getOperand(0);
   1377   if (MergedVal != SI.getOperand(0)) {
   1378     PHINode *PN = PHINode::Create(MergedVal->getType(), 2, "storemerge");
   1379     PN->addIncoming(SI.getOperand(0), SI.getParent());
   1380     PN->addIncoming(OtherStore->getOperand(0), OtherBB);
   1381     MergedVal = InsertNewInstBefore(PN, DestBB->front());
   1382   }
   1383 
   1384   // Advance to a place where it is safe to insert the new store and
   1385   // insert it.
   1386   BBI = DestBB->getFirstInsertionPt();
   1387   StoreInst *NewSI = new StoreInst(MergedVal, SI.getOperand(1),
   1388                                    SI.isVolatile(),
   1389                                    SI.getAlignment(),
   1390                                    SI.getOrdering(),
   1391                                    SI.getSynchScope());
   1392   InsertNewInstBefore(NewSI, *BBI);
   1393   NewSI->setDebugLoc(OtherStore->getDebugLoc());
   1394 
   1395   // If the two stores had AA tags, merge them.
   1396   AAMDNodes AATags;
   1397   SI.getAAMetadata(AATags);
   1398   if (AATags) {
   1399     OtherStore->getAAMetadata(AATags, /* Merge = */ true);
   1400     NewSI->setAAMetadata(AATags);
   1401   }
   1402 
   1403   // Nuke the old stores.
   1404   eraseInstFromFunction(SI);
   1405   eraseInstFromFunction(*OtherStore);
   1406   return true;
   1407 }
   1408