Home | History | Annotate | Download | only in Scalar
      1 //===- ScalarReplAggregates.cpp - Scalar Replacement of Aggregates --------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This transformation implements the well known scalar replacement of
     11 // aggregates transformation.  This xform breaks up alloca instructions of
     12 // aggregate type (structure or array) into individual alloca instructions for
     13 // each member (if possible).  Then, if possible, it transforms the individual
     14 // alloca instructions into nice clean scalar SSA form.
     15 //
     16 // This combines a simple SRoA algorithm with the Mem2Reg algorithm because they
     17 // often interact, especially for C++ programs.  As such, iterating between
     18 // SRoA, then Mem2Reg until we run out of things to promote works well.
     19 //
     20 //===----------------------------------------------------------------------===//
     21 
     22 #include "llvm/Transforms/Scalar.h"
     23 #include "llvm/ADT/SetVector.h"
     24 #include "llvm/ADT/SmallVector.h"
     25 #include "llvm/ADT/Statistic.h"
     26 #include "llvm/Analysis/Loads.h"
     27 #include "llvm/Analysis/ValueTracking.h"
     28 #include "llvm/IR/CallSite.h"
     29 #include "llvm/IR/Constants.h"
     30 #include "llvm/IR/DIBuilder.h"
     31 #include "llvm/IR/DataLayout.h"
     32 #include "llvm/IR/DebugInfo.h"
     33 #include "llvm/IR/DerivedTypes.h"
     34 #include "llvm/IR/Dominators.h"
     35 #include "llvm/IR/Function.h"
     36 #include "llvm/IR/GetElementPtrTypeIterator.h"
     37 #include "llvm/IR/GlobalVariable.h"
     38 #include "llvm/IR/IRBuilder.h"
     39 #include "llvm/IR/Instructions.h"
     40 #include "llvm/IR/IntrinsicInst.h"
     41 #include "llvm/IR/LLVMContext.h"
     42 #include "llvm/IR/Module.h"
     43 #include "llvm/IR/Operator.h"
     44 #include "llvm/Pass.h"
     45 #include "llvm/Support/Debug.h"
     46 #include "llvm/Support/ErrorHandling.h"
     47 #include "llvm/Support/MathExtras.h"
     48 #include "llvm/Support/raw_ostream.h"
     49 #include "llvm/Transforms/Utils/Local.h"
     50 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
     51 #include "llvm/Transforms/Utils/SSAUpdater.h"
     52 using namespace llvm;
     53 
     54 #define DEBUG_TYPE "scalarrepl"
     55 
     56 STATISTIC(NumReplaced,  "Number of allocas broken up");
     57 STATISTIC(NumPromoted,  "Number of allocas promoted");
     58 STATISTIC(NumAdjusted,  "Number of scalar allocas adjusted to allow promotion");
     59 STATISTIC(NumConverted, "Number of aggregates converted to scalar");
     60 
     61 namespace {
     62   struct SROA : public FunctionPass {
     63     SROA(int T, bool hasDT, char &ID, int ST, int AT, int SLT)
     64       : FunctionPass(ID), HasDomTree(hasDT) {
     65       if (T == -1)
     66         SRThreshold = 128;
     67       else
     68         SRThreshold = T;
     69       if (ST == -1)
     70         StructMemberThreshold = 32;
     71       else
     72         StructMemberThreshold = ST;
     73       if (AT == -1)
     74         ArrayElementThreshold = 8;
     75       else
     76         ArrayElementThreshold = AT;
     77       if (SLT == -1)
     78         // Do not limit the scalar integer load size if no threshold is given.
     79         ScalarLoadThreshold = -1;
     80       else
     81         ScalarLoadThreshold = SLT;
     82     }
     83 
     84     bool runOnFunction(Function &F) override;
     85 
     86     bool performScalarRepl(Function &F);
     87     bool performPromotion(Function &F);
     88 
     89   private:
     90     bool HasDomTree;
     91     const DataLayout *DL;
     92 
     93     /// DeadInsts - Keep track of instructions we have made dead, so that
     94     /// we can remove them after we are done working.
     95     SmallVector<Value*, 32> DeadInsts;
     96 
     97     /// AllocaInfo - When analyzing uses of an alloca instruction, this captures
     98     /// information about the uses.  All these fields are initialized to false
     99     /// and set to true when something is learned.
    100     struct AllocaInfo {
    101       /// The alloca to promote.
    102       AllocaInst *AI;
    103 
    104       /// CheckedPHIs - This is a set of verified PHI nodes, to prevent infinite
    105       /// looping and avoid redundant work.
    106       SmallPtrSet<PHINode*, 8> CheckedPHIs;
    107 
    108       /// isUnsafe - This is set to true if the alloca cannot be SROA'd.
    109       bool isUnsafe : 1;
    110 
    111       /// isMemCpySrc - This is true if this aggregate is memcpy'd from.
    112       bool isMemCpySrc : 1;
    113 
    114       /// isMemCpyDst - This is true if this aggregate is memcpy'd into.
    115       bool isMemCpyDst : 1;
    116 
    117       /// hasSubelementAccess - This is true if a subelement of the alloca is
    118       /// ever accessed, or false if the alloca is only accessed with mem
    119       /// intrinsics or load/store that only access the entire alloca at once.
    120       bool hasSubelementAccess : 1;
    121 
    122       /// hasALoadOrStore - This is true if there are any loads or stores to it.
    123       /// The alloca may just be accessed with memcpy, for example, which would
    124       /// not set this.
    125       bool hasALoadOrStore : 1;
    126 
    127       explicit AllocaInfo(AllocaInst *ai)
    128         : AI(ai), isUnsafe(false), isMemCpySrc(false), isMemCpyDst(false),
    129           hasSubelementAccess(false), hasALoadOrStore(false) {}
    130     };
    131 
    132     /// SRThreshold - The maximum alloca size to considered for SROA.
    133     unsigned SRThreshold;
    134 
    135     /// StructMemberThreshold - The maximum number of members a struct can
    136     /// contain to be considered for SROA.
    137     unsigned StructMemberThreshold;
    138 
    139     /// ArrayElementThreshold - The maximum number of elements an array can
    140     /// have to be considered for SROA.
    141     unsigned ArrayElementThreshold;
    142 
    143     /// ScalarLoadThreshold - The maximum size in bits of scalars to load when
    144     /// converting to scalar
    145     unsigned ScalarLoadThreshold;
    146 
    147     void MarkUnsafe(AllocaInfo &I, Instruction *User) {
    148       I.isUnsafe = true;
    149       DEBUG(dbgs() << "  Transformation preventing inst: " << *User << '\n');
    150     }
    151 
    152     bool isSafeAllocaToScalarRepl(AllocaInst *AI);
    153 
    154     void isSafeForScalarRepl(Instruction *I, uint64_t Offset, AllocaInfo &Info);
    155     void isSafePHISelectUseForScalarRepl(Instruction *User, uint64_t Offset,
    156                                          AllocaInfo &Info);
    157     void isSafeGEP(GetElementPtrInst *GEPI, uint64_t &Offset, AllocaInfo &Info);
    158     void isSafeMemAccess(uint64_t Offset, uint64_t MemSize,
    159                          Type *MemOpType, bool isStore, AllocaInfo &Info,
    160                          Instruction *TheAccess, bool AllowWholeAccess);
    161     bool TypeHasComponent(Type *T, uint64_t Offset, uint64_t Size);
    162     uint64_t FindElementAndOffset(Type *&T, uint64_t &Offset,
    163                                   Type *&IdxTy);
    164 
    165     void DoScalarReplacement(AllocaInst *AI,
    166                              std::vector<AllocaInst*> &WorkList);
    167     void DeleteDeadInstructions();
    168 
    169     void RewriteForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset,
    170                               SmallVectorImpl<AllocaInst *> &NewElts);
    171     void RewriteBitCast(BitCastInst *BC, AllocaInst *AI, uint64_t Offset,
    172                         SmallVectorImpl<AllocaInst *> &NewElts);
    173     void RewriteGEP(GetElementPtrInst *GEPI, AllocaInst *AI, uint64_t Offset,
    174                     SmallVectorImpl<AllocaInst *> &NewElts);
    175     void RewriteLifetimeIntrinsic(IntrinsicInst *II, AllocaInst *AI,
    176                                   uint64_t Offset,
    177                                   SmallVectorImpl<AllocaInst *> &NewElts);
    178     void RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
    179                                       AllocaInst *AI,
    180                                       SmallVectorImpl<AllocaInst *> &NewElts);
    181     void RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI,
    182                                        SmallVectorImpl<AllocaInst *> &NewElts);
    183     void RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI,
    184                                       SmallVectorImpl<AllocaInst *> &NewElts);
    185     bool ShouldAttemptScalarRepl(AllocaInst *AI);
    186   };
    187 
    188   // SROA_DT - SROA that uses DominatorTree.
    189   struct SROA_DT : public SROA {
    190     static char ID;
    191   public:
    192     SROA_DT(int T = -1, int ST = -1, int AT = -1, int SLT = -1) :
    193         SROA(T, true, ID, ST, AT, SLT) {
    194       initializeSROA_DTPass(*PassRegistry::getPassRegistry());
    195     }
    196 
    197     // getAnalysisUsage - This pass does not require any passes, but we know it
    198     // will not alter the CFG, so say so.
    199     void getAnalysisUsage(AnalysisUsage &AU) const override {
    200       AU.addRequired<DominatorTreeWrapperPass>();
    201       AU.setPreservesCFG();
    202     }
    203   };
    204 
    205   // SROA_SSAUp - SROA that uses SSAUpdater.
    206   struct SROA_SSAUp : public SROA {
    207     static char ID;
    208   public:
    209     SROA_SSAUp(int T = -1, int ST = -1, int AT = -1, int SLT = -1) :
    210         SROA(T, false, ID, ST, AT, SLT) {
    211       initializeSROA_SSAUpPass(*PassRegistry::getPassRegistry());
    212     }
    213 
    214     // getAnalysisUsage - This pass does not require any passes, but we know it
    215     // will not alter the CFG, so say so.
    216     void getAnalysisUsage(AnalysisUsage &AU) const override {
    217       AU.setPreservesCFG();
    218     }
    219   };
    220 
    221 }
    222 
    223 char SROA_DT::ID = 0;
    224 char SROA_SSAUp::ID = 0;
    225 
    226 INITIALIZE_PASS_BEGIN(SROA_DT, "scalarrepl",
    227                 "Scalar Replacement of Aggregates (DT)", false, false)
    228 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
    229 INITIALIZE_PASS_END(SROA_DT, "scalarrepl",
    230                 "Scalar Replacement of Aggregates (DT)", false, false)
    231 
    232 INITIALIZE_PASS_BEGIN(SROA_SSAUp, "scalarrepl-ssa",
    233                       "Scalar Replacement of Aggregates (SSAUp)", false, false)
    234 INITIALIZE_PASS_END(SROA_SSAUp, "scalarrepl-ssa",
    235                     "Scalar Replacement of Aggregates (SSAUp)", false, false)
    236 
    237 // Public interface to the ScalarReplAggregates pass
    238 FunctionPass *llvm::createScalarReplAggregatesPass(int Threshold,
    239                                                    bool UseDomTree,
    240                                                    int StructMemberThreshold,
    241                                                    int ArrayElementThreshold,
    242                                                    int ScalarLoadThreshold) {
    243   if (UseDomTree)
    244     return new SROA_DT(Threshold, StructMemberThreshold, ArrayElementThreshold,
    245                        ScalarLoadThreshold);
    246   return new SROA_SSAUp(Threshold, StructMemberThreshold,
    247                         ArrayElementThreshold, ScalarLoadThreshold);
    248 }
    249 
    250 
    251 //===----------------------------------------------------------------------===//
    252 // Convert To Scalar Optimization.
    253 //===----------------------------------------------------------------------===//
    254 
    255 namespace {
    256 /// ConvertToScalarInfo - This class implements the "Convert To Scalar"
    257 /// optimization, which scans the uses of an alloca and determines if it can
    258 /// rewrite it in terms of a single new alloca that can be mem2reg'd.
    259 class ConvertToScalarInfo {
    260   /// AllocaSize - The size of the alloca being considered in bytes.
    261   unsigned AllocaSize;
    262   const DataLayout &DL;
    263   unsigned ScalarLoadThreshold;
    264 
    265   /// IsNotTrivial - This is set to true if there is some access to the object
    266   /// which means that mem2reg can't promote it.
    267   bool IsNotTrivial;
    268 
    269   /// ScalarKind - Tracks the kind of alloca being considered for promotion,
    270   /// computed based on the uses of the alloca rather than the LLVM type system.
    271   enum {
    272     Unknown,
    273 
    274     // Accesses via GEPs that are consistent with element access of a vector
    275     // type. This will not be converted into a vector unless there is a later
    276     // access using an actual vector type.
    277     ImplicitVector,
    278 
    279     // Accesses via vector operations and GEPs that are consistent with the
    280     // layout of a vector type.
    281     Vector,
    282 
    283     // An integer bag-of-bits with bitwise operations for insertion and
    284     // extraction. Any combination of types can be converted into this kind
    285     // of scalar.
    286     Integer
    287   } ScalarKind;
    288 
    289   /// VectorTy - This tracks the type that we should promote the vector to if
    290   /// it is possible to turn it into a vector.  This starts out null, and if it
    291   /// isn't possible to turn into a vector type, it gets set to VoidTy.
    292   VectorType *VectorTy;
    293 
    294   /// HadNonMemTransferAccess - True if there is at least one access to the
    295   /// alloca that is not a MemTransferInst.  We don't want to turn structs into
    296   /// large integers unless there is some potential for optimization.
    297   bool HadNonMemTransferAccess;
    298 
    299   /// HadDynamicAccess - True if some element of this alloca was dynamic.
    300   /// We don't yet have support for turning a dynamic access into a large
    301   /// integer.
    302   bool HadDynamicAccess;
    303 
    304 public:
    305   explicit ConvertToScalarInfo(unsigned Size, const DataLayout &DL,
    306                                unsigned SLT)
    307     : AllocaSize(Size), DL(DL), ScalarLoadThreshold(SLT), IsNotTrivial(false),
    308     ScalarKind(Unknown), VectorTy(nullptr), HadNonMemTransferAccess(false),
    309     HadDynamicAccess(false) { }
    310 
    311   AllocaInst *TryConvert(AllocaInst *AI);
    312 
    313 private:
    314   bool CanConvertToScalar(Value *V, uint64_t Offset, Value* NonConstantIdx);
    315   void MergeInTypeForLoadOrStore(Type *In, uint64_t Offset);
    316   bool MergeInVectorType(VectorType *VInTy, uint64_t Offset);
    317   void ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, uint64_t Offset,
    318                            Value *NonConstantIdx);
    319 
    320   Value *ConvertScalar_ExtractValue(Value *NV, Type *ToType,
    321                                     uint64_t Offset, Value* NonConstantIdx,
    322                                     IRBuilder<> &Builder);
    323   Value *ConvertScalar_InsertValue(Value *StoredVal, Value *ExistingVal,
    324                                    uint64_t Offset, Value* NonConstantIdx,
    325                                    IRBuilder<> &Builder);
    326 };
    327 } // end anonymous namespace.
    328 
    329 
    330 /// TryConvert - Analyze the specified alloca, and if it is safe to do so,
    331 /// rewrite it to be a new alloca which is mem2reg'able.  This returns the new
    332 /// alloca if possible or null if not.
    333 AllocaInst *ConvertToScalarInfo::TryConvert(AllocaInst *AI) {
    334   // If we can't convert this scalar, or if mem2reg can trivially do it, bail
    335   // out.
    336   if (!CanConvertToScalar(AI, 0, nullptr) || !IsNotTrivial)
    337     return nullptr;
    338 
    339   // If an alloca has only memset / memcpy uses, it may still have an Unknown
    340   // ScalarKind. Treat it as an Integer below.
    341   if (ScalarKind == Unknown)
    342     ScalarKind = Integer;
    343 
    344   if (ScalarKind == Vector && VectorTy->getBitWidth() != AllocaSize * 8)
    345     ScalarKind = Integer;
    346 
    347   // If we were able to find a vector type that can handle this with
    348   // insert/extract elements, and if there was at least one use that had
    349   // a vector type, promote this to a vector.  We don't want to promote
    350   // random stuff that doesn't use vectors (e.g. <9 x double>) because then
    351   // we just get a lot of insert/extracts.  If at least one vector is
    352   // involved, then we probably really do have a union of vector/array.
    353   Type *NewTy;
    354   if (ScalarKind == Vector) {
    355     assert(VectorTy && "Missing type for vector scalar.");
    356     DEBUG(dbgs() << "CONVERT TO VECTOR: " << *AI << "\n  TYPE = "
    357           << *VectorTy << '\n');
    358     NewTy = VectorTy;  // Use the vector type.
    359   } else {
    360     unsigned BitWidth = AllocaSize * 8;
    361 
    362     // Do not convert to scalar integer if the alloca size exceeds the
    363     // scalar load threshold.
    364     if (BitWidth > ScalarLoadThreshold)
    365       return nullptr;
    366 
    367     if ((ScalarKind == ImplicitVector || ScalarKind == Integer) &&
    368         !HadNonMemTransferAccess && !DL.fitsInLegalInteger(BitWidth))
    369       return nullptr;
    370     // Dynamic accesses on integers aren't yet supported.  They need us to shift
    371     // by a dynamic amount which could be difficult to work out as we might not
    372     // know whether to use a left or right shift.
    373     if (ScalarKind == Integer && HadDynamicAccess)
    374       return nullptr;
    375 
    376     DEBUG(dbgs() << "CONVERT TO SCALAR INTEGER: " << *AI << "\n");
    377     // Create and insert the integer alloca.
    378     NewTy = IntegerType::get(AI->getContext(), BitWidth);
    379   }
    380   AllocaInst *NewAI = new AllocaInst(NewTy, nullptr, "",
    381                                      AI->getParent()->begin());
    382   ConvertUsesToScalar(AI, NewAI, 0, nullptr);
    383   return NewAI;
    384 }
    385 
    386 /// MergeInTypeForLoadOrStore - Add the 'In' type to the accumulated vector type
    387 /// (VectorTy) so far at the offset specified by Offset (which is specified in
    388 /// bytes).
    389 ///
    390 /// There are two cases we handle here:
    391 ///   1) A union of vector types of the same size and potentially its elements.
    392 ///      Here we turn element accesses into insert/extract element operations.
    393 ///      This promotes a <4 x float> with a store of float to the third element
    394 ///      into a <4 x float> that uses insert element.
    395 ///   2) A fully general blob of memory, which we turn into some (potentially
    396 ///      large) integer type with extract and insert operations where the loads
    397 ///      and stores would mutate the memory.  We mark this by setting VectorTy
    398 ///      to VoidTy.
    399 void ConvertToScalarInfo::MergeInTypeForLoadOrStore(Type *In,
    400                                                     uint64_t Offset) {
    401   // If we already decided to turn this into a blob of integer memory, there is
    402   // nothing to be done.
    403   if (ScalarKind == Integer)
    404     return;
    405 
    406   // If this could be contributing to a vector, analyze it.
    407 
    408   // If the In type is a vector that is the same size as the alloca, see if it
    409   // matches the existing VecTy.
    410   if (VectorType *VInTy = dyn_cast<VectorType>(In)) {
    411     if (MergeInVectorType(VInTy, Offset))
    412       return;
    413   } else if (In->isFloatTy() || In->isDoubleTy() ||
    414              (In->isIntegerTy() && In->getPrimitiveSizeInBits() >= 8 &&
    415               isPowerOf2_32(In->getPrimitiveSizeInBits()))) {
    416     // Full width accesses can be ignored, because they can always be turned
    417     // into bitcasts.
    418     unsigned EltSize = In->getPrimitiveSizeInBits()/8;
    419     if (EltSize == AllocaSize)
    420       return;
    421 
    422     // If we're accessing something that could be an element of a vector, see
    423     // if the implied vector agrees with what we already have and if Offset is
    424     // compatible with it.
    425     if (Offset % EltSize == 0 && AllocaSize % EltSize == 0 &&
    426         (!VectorTy || EltSize == VectorTy->getElementType()
    427                                          ->getPrimitiveSizeInBits()/8)) {
    428       if (!VectorTy) {
    429         ScalarKind = ImplicitVector;
    430         VectorTy = VectorType::get(In, AllocaSize/EltSize);
    431       }
    432       return;
    433     }
    434   }
    435 
    436   // Otherwise, we have a case that we can't handle with an optimized vector
    437   // form.  We can still turn this into a large integer.
    438   ScalarKind = Integer;
    439 }
    440 
    441 /// MergeInVectorType - Handles the vector case of MergeInTypeForLoadOrStore,
    442 /// returning true if the type was successfully merged and false otherwise.
    443 bool ConvertToScalarInfo::MergeInVectorType(VectorType *VInTy,
    444                                             uint64_t Offset) {
    445   if (VInTy->getBitWidth()/8 == AllocaSize && Offset == 0) {
    446     // If we're storing/loading a vector of the right size, allow it as a
    447     // vector.  If this the first vector we see, remember the type so that
    448     // we know the element size. If this is a subsequent access, ignore it
    449     // even if it is a differing type but the same size. Worst case we can
    450     // bitcast the resultant vectors.
    451     if (!VectorTy)
    452       VectorTy = VInTy;
    453     ScalarKind = Vector;
    454     return true;
    455   }
    456 
    457   return false;
    458 }
    459 
    460 /// CanConvertToScalar - V is a pointer.  If we can convert the pointee and all
    461 /// its accesses to a single vector type, return true and set VecTy to
    462 /// the new type.  If we could convert the alloca into a single promotable
    463 /// integer, return true but set VecTy to VoidTy.  Further, if the use is not a
    464 /// completely trivial use that mem2reg could promote, set IsNotTrivial.  Offset
    465 /// is the current offset from the base of the alloca being analyzed.
    466 ///
    467 /// If we see at least one access to the value that is as a vector type, set the
    468 /// SawVec flag.
    469 bool ConvertToScalarInfo::CanConvertToScalar(Value *V, uint64_t Offset,
    470                                              Value* NonConstantIdx) {
    471   for (User *U : V->users()) {
    472     Instruction *UI = cast<Instruction>(U);
    473 
    474     if (LoadInst *LI = dyn_cast<LoadInst>(UI)) {
    475       // Don't break volatile loads.
    476       if (!LI->isSimple())
    477         return false;
    478       // Don't touch MMX operations.
    479       if (LI->getType()->isX86_MMXTy())
    480         return false;
    481       HadNonMemTransferAccess = true;
    482       MergeInTypeForLoadOrStore(LI->getType(), Offset);
    483       continue;
    484     }
    485 
    486     if (StoreInst *SI = dyn_cast<StoreInst>(UI)) {
    487       // Storing the pointer, not into the value?
    488       if (SI->getOperand(0) == V || !SI->isSimple()) return false;
    489       // Don't touch MMX operations.
    490       if (SI->getOperand(0)->getType()->isX86_MMXTy())
    491         return false;
    492       HadNonMemTransferAccess = true;
    493       MergeInTypeForLoadOrStore(SI->getOperand(0)->getType(), Offset);
    494       continue;
    495     }
    496 
    497     if (BitCastInst *BCI = dyn_cast<BitCastInst>(UI)) {
    498       if (!onlyUsedByLifetimeMarkers(BCI))
    499         IsNotTrivial = true;  // Can't be mem2reg'd.
    500       if (!CanConvertToScalar(BCI, Offset, NonConstantIdx))
    501         return false;
    502       continue;
    503     }
    504 
    505     if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(UI)) {
    506       // If this is a GEP with a variable indices, we can't handle it.
    507       PointerType* PtrTy = dyn_cast<PointerType>(GEP->getPointerOperandType());
    508       if (!PtrTy)
    509         return false;
    510 
    511       // Compute the offset that this GEP adds to the pointer.
    512       SmallVector<Value*, 8> Indices(GEP->op_begin()+1, GEP->op_end());
    513       Value *GEPNonConstantIdx = nullptr;
    514       if (!GEP->hasAllConstantIndices()) {
    515         if (!isa<VectorType>(PtrTy->getElementType()))
    516           return false;
    517         if (NonConstantIdx)
    518           return false;
    519         GEPNonConstantIdx = Indices.pop_back_val();
    520         if (!GEPNonConstantIdx->getType()->isIntegerTy(32))
    521           return false;
    522         HadDynamicAccess = true;
    523       } else
    524         GEPNonConstantIdx = NonConstantIdx;
    525       uint64_t GEPOffset = DL.getIndexedOffset(PtrTy,
    526                                                Indices);
    527       // See if all uses can be converted.
    528       if (!CanConvertToScalar(GEP, Offset+GEPOffset, GEPNonConstantIdx))
    529         return false;
    530       IsNotTrivial = true;  // Can't be mem2reg'd.
    531       HadNonMemTransferAccess = true;
    532       continue;
    533     }
    534 
    535     // If this is a constant sized memset of a constant value (e.g. 0) we can
    536     // handle it.
    537     if (MemSetInst *MSI = dyn_cast<MemSetInst>(UI)) {
    538       // Store to dynamic index.
    539       if (NonConstantIdx)
    540         return false;
    541       // Store of constant value.
    542       if (!isa<ConstantInt>(MSI->getValue()))
    543         return false;
    544 
    545       // Store of constant size.
    546       ConstantInt *Len = dyn_cast<ConstantInt>(MSI->getLength());
    547       if (!Len)
    548         return false;
    549 
    550       // If the size differs from the alloca, we can only convert the alloca to
    551       // an integer bag-of-bits.
    552       // FIXME: This should handle all of the cases that are currently accepted
    553       // as vector element insertions.
    554       if (Len->getZExtValue() != AllocaSize || Offset != 0)
    555         ScalarKind = Integer;
    556 
    557       IsNotTrivial = true;  // Can't be mem2reg'd.
    558       HadNonMemTransferAccess = true;
    559       continue;
    560     }
    561 
    562     // If this is a memcpy or memmove into or out of the whole allocation, we
    563     // can handle it like a load or store of the scalar type.
    564     if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(UI)) {
    565       // Store to dynamic index.
    566       if (NonConstantIdx)
    567         return false;
    568       ConstantInt *Len = dyn_cast<ConstantInt>(MTI->getLength());
    569       if (!Len || Len->getZExtValue() != AllocaSize || Offset != 0)
    570         return false;
    571 
    572       IsNotTrivial = true;  // Can't be mem2reg'd.
    573       continue;
    574     }
    575 
    576     // If this is a lifetime intrinsic, we can handle it.
    577     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(UI)) {
    578       if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
    579           II->getIntrinsicID() == Intrinsic::lifetime_end) {
    580         continue;
    581       }
    582     }
    583 
    584     // Otherwise, we cannot handle this!
    585     return false;
    586   }
    587 
    588   return true;
    589 }
    590 
    591 /// ConvertUsesToScalar - Convert all of the users of Ptr to use the new alloca
    592 /// directly.  This happens when we are converting an "integer union" to a
    593 /// single integer scalar, or when we are converting a "vector union" to a
    594 /// vector with insert/extractelement instructions.
    595 ///
    596 /// Offset is an offset from the original alloca, in bits that need to be
    597 /// shifted to the right.  By the end of this, there should be no uses of Ptr.
    598 void ConvertToScalarInfo::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI,
    599                                               uint64_t Offset,
    600                                               Value* NonConstantIdx) {
    601   while (!Ptr->use_empty()) {
    602     Instruction *User = cast<Instruction>(Ptr->user_back());
    603 
    604     if (BitCastInst *CI = dyn_cast<BitCastInst>(User)) {
    605       ConvertUsesToScalar(CI, NewAI, Offset, NonConstantIdx);
    606       CI->eraseFromParent();
    607       continue;
    608     }
    609 
    610     if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) {
    611       // Compute the offset that this GEP adds to the pointer.
    612       SmallVector<Value*, 8> Indices(GEP->op_begin()+1, GEP->op_end());
    613       Value* GEPNonConstantIdx = nullptr;
    614       if (!GEP->hasAllConstantIndices()) {
    615         assert(!NonConstantIdx &&
    616                "Dynamic GEP reading from dynamic GEP unsupported");
    617         GEPNonConstantIdx = Indices.pop_back_val();
    618       } else
    619         GEPNonConstantIdx = NonConstantIdx;
    620       uint64_t GEPOffset = DL.getIndexedOffset(GEP->getPointerOperandType(),
    621                                                Indices);
    622       ConvertUsesToScalar(GEP, NewAI, Offset+GEPOffset*8, GEPNonConstantIdx);
    623       GEP->eraseFromParent();
    624       continue;
    625     }
    626 
    627     IRBuilder<> Builder(User);
    628 
    629     if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
    630       // The load is a bit extract from NewAI shifted right by Offset bits.
    631       Value *LoadedVal = Builder.CreateLoad(NewAI);
    632       Value *NewLoadVal
    633         = ConvertScalar_ExtractValue(LoadedVal, LI->getType(), Offset,
    634                                      NonConstantIdx, Builder);
    635       LI->replaceAllUsesWith(NewLoadVal);
    636       LI->eraseFromParent();
    637       continue;
    638     }
    639 
    640     if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
    641       assert(SI->getOperand(0) != Ptr && "Consistency error!");
    642       Instruction *Old = Builder.CreateLoad(NewAI, NewAI->getName()+".in");
    643       Value *New = ConvertScalar_InsertValue(SI->getOperand(0), Old, Offset,
    644                                              NonConstantIdx, Builder);
    645       Builder.CreateStore(New, NewAI);
    646       SI->eraseFromParent();
    647 
    648       // If the load we just inserted is now dead, then the inserted store
    649       // overwrote the entire thing.
    650       if (Old->use_empty())
    651         Old->eraseFromParent();
    652       continue;
    653     }
    654 
    655     // If this is a constant sized memset of a constant value (e.g. 0) we can
    656     // transform it into a store of the expanded constant value.
    657     if (MemSetInst *MSI = dyn_cast<MemSetInst>(User)) {
    658       assert(MSI->getRawDest() == Ptr && "Consistency error!");
    659       assert(!NonConstantIdx && "Cannot replace dynamic memset with insert");
    660       int64_t SNumBytes = cast<ConstantInt>(MSI->getLength())->getSExtValue();
    661       if (SNumBytes > 0 && (SNumBytes >> 32) == 0) {
    662         unsigned NumBytes = static_cast<unsigned>(SNumBytes);
    663         unsigned Val = cast<ConstantInt>(MSI->getValue())->getZExtValue();
    664 
    665         // Compute the value replicated the right number of times.
    666         APInt APVal(NumBytes*8, Val);
    667 
    668         // Splat the value if non-zero.
    669         if (Val)
    670           for (unsigned i = 1; i != NumBytes; ++i)
    671             APVal |= APVal << 8;
    672 
    673         Instruction *Old = Builder.CreateLoad(NewAI, NewAI->getName()+".in");
    674         Value *New = ConvertScalar_InsertValue(
    675                                     ConstantInt::get(User->getContext(), APVal),
    676                                                Old, Offset, nullptr, Builder);
    677         Builder.CreateStore(New, NewAI);
    678 
    679         // If the load we just inserted is now dead, then the memset overwrote
    680         // the entire thing.
    681         if (Old->use_empty())
    682           Old->eraseFromParent();
    683       }
    684       MSI->eraseFromParent();
    685       continue;
    686     }
    687 
    688     // If this is a memcpy or memmove into or out of the whole allocation, we
    689     // can handle it like a load or store of the scalar type.
    690     if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(User)) {
    691       assert(Offset == 0 && "must be store to start of alloca");
    692       assert(!NonConstantIdx && "Cannot replace dynamic transfer with insert");
    693 
    694       // If the source and destination are both to the same alloca, then this is
    695       // a noop copy-to-self, just delete it.  Otherwise, emit a load and store
    696       // as appropriate.
    697       AllocaInst *OrigAI = cast<AllocaInst>(GetUnderlyingObject(Ptr, &DL, 0));
    698 
    699       if (GetUnderlyingObject(MTI->getSource(), &DL, 0) != OrigAI) {
    700         // Dest must be OrigAI, change this to be a load from the original
    701         // pointer (bitcasted), then a store to our new alloca.
    702         assert(MTI->getRawDest() == Ptr && "Neither use is of pointer?");
    703         Value *SrcPtr = MTI->getSource();
    704         PointerType* SPTy = cast<PointerType>(SrcPtr->getType());
    705         PointerType* AIPTy = cast<PointerType>(NewAI->getType());
    706         if (SPTy->getAddressSpace() != AIPTy->getAddressSpace()) {
    707           AIPTy = PointerType::get(AIPTy->getElementType(),
    708                                    SPTy->getAddressSpace());
    709         }
    710         SrcPtr = Builder.CreateBitCast(SrcPtr, AIPTy);
    711 
    712         LoadInst *SrcVal = Builder.CreateLoad(SrcPtr, "srcval");
    713         SrcVal->setAlignment(MTI->getAlignment());
    714         Builder.CreateStore(SrcVal, NewAI);
    715       } else if (GetUnderlyingObject(MTI->getDest(), &DL, 0) != OrigAI) {
    716         // Src must be OrigAI, change this to be a load from NewAI then a store
    717         // through the original dest pointer (bitcasted).
    718         assert(MTI->getRawSource() == Ptr && "Neither use is of pointer?");
    719         LoadInst *SrcVal = Builder.CreateLoad(NewAI, "srcval");
    720 
    721         PointerType* DPTy = cast<PointerType>(MTI->getDest()->getType());
    722         PointerType* AIPTy = cast<PointerType>(NewAI->getType());
    723         if (DPTy->getAddressSpace() != AIPTy->getAddressSpace()) {
    724           AIPTy = PointerType::get(AIPTy->getElementType(),
    725                                    DPTy->getAddressSpace());
    726         }
    727         Value *DstPtr = Builder.CreateBitCast(MTI->getDest(), AIPTy);
    728 
    729         StoreInst *NewStore = Builder.CreateStore(SrcVal, DstPtr);
    730         NewStore->setAlignment(MTI->getAlignment());
    731       } else {
    732         // Noop transfer. Src == Dst
    733       }
    734 
    735       MTI->eraseFromParent();
    736       continue;
    737     }
    738 
    739     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(User)) {
    740       if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
    741           II->getIntrinsicID() == Intrinsic::lifetime_end) {
    742         // There's no need to preserve these, as the resulting alloca will be
    743         // converted to a register anyways.
    744         II->eraseFromParent();
    745         continue;
    746       }
    747     }
    748 
    749     llvm_unreachable("Unsupported operation!");
    750   }
    751 }
    752 
    753 /// ConvertScalar_ExtractValue - Extract a value of type ToType from an integer
    754 /// or vector value FromVal, extracting the bits from the offset specified by
    755 /// Offset.  This returns the value, which is of type ToType.
    756 ///
    757 /// This happens when we are converting an "integer union" to a single
    758 /// integer scalar, or when we are converting a "vector union" to a vector with
    759 /// insert/extractelement instructions.
    760 ///
    761 /// Offset is an offset from the original alloca, in bits that need to be
    762 /// shifted to the right.
    763 Value *ConvertToScalarInfo::
    764 ConvertScalar_ExtractValue(Value *FromVal, Type *ToType,
    765                            uint64_t Offset, Value* NonConstantIdx,
    766                            IRBuilder<> &Builder) {
    767   // If the load is of the whole new alloca, no conversion is needed.
    768   Type *FromType = FromVal->getType();
    769   if (FromType == ToType && Offset == 0)
    770     return FromVal;
    771 
    772   // If the result alloca is a vector type, this is either an element
    773   // access or a bitcast to another vector type of the same size.
    774   if (VectorType *VTy = dyn_cast<VectorType>(FromType)) {
    775     unsigned FromTypeSize = DL.getTypeAllocSize(FromType);
    776     unsigned ToTypeSize = DL.getTypeAllocSize(ToType);
    777     if (FromTypeSize == ToTypeSize)
    778         return Builder.CreateBitCast(FromVal, ToType);
    779 
    780     // Otherwise it must be an element access.
    781     unsigned Elt = 0;
    782     if (Offset) {
    783       unsigned EltSize = DL.getTypeAllocSizeInBits(VTy->getElementType());
    784       Elt = Offset/EltSize;
    785       assert(EltSize*Elt == Offset && "Invalid modulus in validity checking");
    786     }
    787     // Return the element extracted out of it.
    788     Value *Idx;
    789     if (NonConstantIdx) {
    790       if (Elt)
    791         Idx = Builder.CreateAdd(NonConstantIdx,
    792                                 Builder.getInt32(Elt),
    793                                 "dyn.offset");
    794       else
    795         Idx = NonConstantIdx;
    796     } else
    797       Idx = Builder.getInt32(Elt);
    798     Value *V = Builder.CreateExtractElement(FromVal, Idx);
    799     if (V->getType() != ToType)
    800       V = Builder.CreateBitCast(V, ToType);
    801     return V;
    802   }
    803 
    804   // If ToType is a first class aggregate, extract out each of the pieces and
    805   // use insertvalue's to form the FCA.
    806   if (StructType *ST = dyn_cast<StructType>(ToType)) {
    807     assert(!NonConstantIdx &&
    808            "Dynamic indexing into struct types not supported");
    809     const StructLayout &Layout = *DL.getStructLayout(ST);
    810     Value *Res = UndefValue::get(ST);
    811     for (unsigned i = 0, e = ST->getNumElements(); i != e; ++i) {
    812       Value *Elt = ConvertScalar_ExtractValue(FromVal, ST->getElementType(i),
    813                                         Offset+Layout.getElementOffsetInBits(i),
    814                                               nullptr, Builder);
    815       Res = Builder.CreateInsertValue(Res, Elt, i);
    816     }
    817     return Res;
    818   }
    819 
    820   if (ArrayType *AT = dyn_cast<ArrayType>(ToType)) {
    821     assert(!NonConstantIdx &&
    822            "Dynamic indexing into array types not supported");
    823     uint64_t EltSize = DL.getTypeAllocSizeInBits(AT->getElementType());
    824     Value *Res = UndefValue::get(AT);
    825     for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) {
    826       Value *Elt = ConvertScalar_ExtractValue(FromVal, AT->getElementType(),
    827                                               Offset+i*EltSize, nullptr,
    828                                               Builder);
    829       Res = Builder.CreateInsertValue(Res, Elt, i);
    830     }
    831     return Res;
    832   }
    833 
    834   // Otherwise, this must be a union that was converted to an integer value.
    835   IntegerType *NTy = cast<IntegerType>(FromVal->getType());
    836 
    837   // If this is a big-endian system and the load is narrower than the
    838   // full alloca type, we need to do a shift to get the right bits.
    839   int ShAmt = 0;
    840   if (DL.isBigEndian()) {
    841     // On big-endian machines, the lowest bit is stored at the bit offset
    842     // from the pointer given by getTypeStoreSizeInBits.  This matters for
    843     // integers with a bitwidth that is not a multiple of 8.
    844     ShAmt = DL.getTypeStoreSizeInBits(NTy) -
    845             DL.getTypeStoreSizeInBits(ToType) - Offset;
    846   } else {
    847     ShAmt = Offset;
    848   }
    849 
    850   // Note: we support negative bitwidths (with shl) which are not defined.
    851   // We do this to support (f.e.) loads off the end of a structure where
    852   // only some bits are used.
    853   if (ShAmt > 0 && (unsigned)ShAmt < NTy->getBitWidth())
    854     FromVal = Builder.CreateLShr(FromVal,
    855                                  ConstantInt::get(FromVal->getType(), ShAmt));
    856   else if (ShAmt < 0 && (unsigned)-ShAmt < NTy->getBitWidth())
    857     FromVal = Builder.CreateShl(FromVal,
    858                                 ConstantInt::get(FromVal->getType(), -ShAmt));
    859 
    860   // Finally, unconditionally truncate the integer to the right width.
    861   unsigned LIBitWidth = DL.getTypeSizeInBits(ToType);
    862   if (LIBitWidth < NTy->getBitWidth())
    863     FromVal =
    864       Builder.CreateTrunc(FromVal, IntegerType::get(FromVal->getContext(),
    865                                                     LIBitWidth));
    866   else if (LIBitWidth > NTy->getBitWidth())
    867     FromVal =
    868        Builder.CreateZExt(FromVal, IntegerType::get(FromVal->getContext(),
    869                                                     LIBitWidth));
    870 
    871   // If the result is an integer, this is a trunc or bitcast.
    872   if (ToType->isIntegerTy()) {
    873     // Should be done.
    874   } else if (ToType->isFloatingPointTy() || ToType->isVectorTy()) {
    875     // Just do a bitcast, we know the sizes match up.
    876     FromVal = Builder.CreateBitCast(FromVal, ToType);
    877   } else {
    878     // Otherwise must be a pointer.
    879     FromVal = Builder.CreateIntToPtr(FromVal, ToType);
    880   }
    881   assert(FromVal->getType() == ToType && "Didn't convert right?");
    882   return FromVal;
    883 }
    884 
    885 /// ConvertScalar_InsertValue - Insert the value "SV" into the existing integer
    886 /// or vector value "Old" at the offset specified by Offset.
    887 ///
    888 /// This happens when we are converting an "integer union" to a
    889 /// single integer scalar, or when we are converting a "vector union" to a
    890 /// vector with insert/extractelement instructions.
    891 ///
    892 /// Offset is an offset from the original alloca, in bits that need to be
    893 /// shifted to the right.
    894 ///
    895 /// NonConstantIdx is an index value if there was a GEP with a non-constant
    896 /// index value.  If this is 0 then all GEPs used to find this insert address
    897 /// are constant.
    898 Value *ConvertToScalarInfo::
    899 ConvertScalar_InsertValue(Value *SV, Value *Old,
    900                           uint64_t Offset, Value* NonConstantIdx,
    901                           IRBuilder<> &Builder) {
    902   // Convert the stored type to the actual type, shift it left to insert
    903   // then 'or' into place.
    904   Type *AllocaType = Old->getType();
    905   LLVMContext &Context = Old->getContext();
    906 
    907   if (VectorType *VTy = dyn_cast<VectorType>(AllocaType)) {
    908     uint64_t VecSize = DL.getTypeAllocSizeInBits(VTy);
    909     uint64_t ValSize = DL.getTypeAllocSizeInBits(SV->getType());
    910 
    911     // Changing the whole vector with memset or with an access of a different
    912     // vector type?
    913     if (ValSize == VecSize)
    914         return Builder.CreateBitCast(SV, AllocaType);
    915 
    916     // Must be an element insertion.
    917     Type *EltTy = VTy->getElementType();
    918     if (SV->getType() != EltTy)
    919       SV = Builder.CreateBitCast(SV, EltTy);
    920     uint64_t EltSize = DL.getTypeAllocSizeInBits(EltTy);
    921     unsigned Elt = Offset/EltSize;
    922     Value *Idx;
    923     if (NonConstantIdx) {
    924       if (Elt)
    925         Idx = Builder.CreateAdd(NonConstantIdx,
    926                                 Builder.getInt32(Elt),
    927                                 "dyn.offset");
    928       else
    929         Idx = NonConstantIdx;
    930     } else
    931       Idx = Builder.getInt32(Elt);
    932     return Builder.CreateInsertElement(Old, SV, Idx);
    933   }
    934 
    935   // If SV is a first-class aggregate value, insert each value recursively.
    936   if (StructType *ST = dyn_cast<StructType>(SV->getType())) {
    937     assert(!NonConstantIdx &&
    938            "Dynamic indexing into struct types not supported");
    939     const StructLayout &Layout = *DL.getStructLayout(ST);
    940     for (unsigned i = 0, e = ST->getNumElements(); i != e; ++i) {
    941       Value *Elt = Builder.CreateExtractValue(SV, i);
    942       Old = ConvertScalar_InsertValue(Elt, Old,
    943                                       Offset+Layout.getElementOffsetInBits(i),
    944                                       nullptr, Builder);
    945     }
    946     return Old;
    947   }
    948 
    949   if (ArrayType *AT = dyn_cast<ArrayType>(SV->getType())) {
    950     assert(!NonConstantIdx &&
    951            "Dynamic indexing into array types not supported");
    952     uint64_t EltSize = DL.getTypeAllocSizeInBits(AT->getElementType());
    953     for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) {
    954       Value *Elt = Builder.CreateExtractValue(SV, i);
    955       Old = ConvertScalar_InsertValue(Elt, Old, Offset+i*EltSize, nullptr,
    956                                       Builder);
    957     }
    958     return Old;
    959   }
    960 
    961   // If SV is a float, convert it to the appropriate integer type.
    962   // If it is a pointer, do the same.
    963   unsigned SrcWidth = DL.getTypeSizeInBits(SV->getType());
    964   unsigned DestWidth = DL.getTypeSizeInBits(AllocaType);
    965   unsigned SrcStoreWidth = DL.getTypeStoreSizeInBits(SV->getType());
    966   unsigned DestStoreWidth = DL.getTypeStoreSizeInBits(AllocaType);
    967   if (SV->getType()->isFloatingPointTy() || SV->getType()->isVectorTy())
    968     SV = Builder.CreateBitCast(SV, IntegerType::get(SV->getContext(),SrcWidth));
    969   else if (SV->getType()->isPointerTy())
    970     SV = Builder.CreatePtrToInt(SV, DL.getIntPtrType(SV->getType()));
    971 
    972   // Zero extend or truncate the value if needed.
    973   if (SV->getType() != AllocaType) {
    974     if (SV->getType()->getPrimitiveSizeInBits() <
    975              AllocaType->getPrimitiveSizeInBits())
    976       SV = Builder.CreateZExt(SV, AllocaType);
    977     else {
    978       // Truncation may be needed if storing more than the alloca can hold
    979       // (undefined behavior).
    980       SV = Builder.CreateTrunc(SV, AllocaType);
    981       SrcWidth = DestWidth;
    982       SrcStoreWidth = DestStoreWidth;
    983     }
    984   }
    985 
    986   // If this is a big-endian system and the store is narrower than the
    987   // full alloca type, we need to do a shift to get the right bits.
    988   int ShAmt = 0;
    989   if (DL.isBigEndian()) {
    990     // On big-endian machines, the lowest bit is stored at the bit offset
    991     // from the pointer given by getTypeStoreSizeInBits.  This matters for
    992     // integers with a bitwidth that is not a multiple of 8.
    993     ShAmt = DestStoreWidth - SrcStoreWidth - Offset;
    994   } else {
    995     ShAmt = Offset;
    996   }
    997 
    998   // Note: we support negative bitwidths (with shr) which are not defined.
    999   // We do this to support (f.e.) stores off the end of a structure where
   1000   // only some bits in the structure are set.
   1001   APInt Mask(APInt::getLowBitsSet(DestWidth, SrcWidth));
   1002   if (ShAmt > 0 && (unsigned)ShAmt < DestWidth) {
   1003     SV = Builder.CreateShl(SV, ConstantInt::get(SV->getType(), ShAmt));
   1004     Mask <<= ShAmt;
   1005   } else if (ShAmt < 0 && (unsigned)-ShAmt < DestWidth) {
   1006     SV = Builder.CreateLShr(SV, ConstantInt::get(SV->getType(), -ShAmt));
   1007     Mask = Mask.lshr(-ShAmt);
   1008   }
   1009 
   1010   // Mask out the bits we are about to insert from the old value, and or
   1011   // in the new bits.
   1012   if (SrcWidth != DestWidth) {
   1013     assert(DestWidth > SrcWidth);
   1014     Old = Builder.CreateAnd(Old, ConstantInt::get(Context, ~Mask), "mask");
   1015     SV = Builder.CreateOr(Old, SV, "ins");
   1016   }
   1017   return SV;
   1018 }
   1019 
   1020 
   1021 //===----------------------------------------------------------------------===//
   1022 // SRoA Driver
   1023 //===----------------------------------------------------------------------===//
   1024 
   1025 
   1026 bool SROA::runOnFunction(Function &F) {
   1027   if (skipOptnoneFunction(F))
   1028     return false;
   1029 
   1030   DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
   1031   DL = DLP ? &DLP->getDataLayout() : nullptr;
   1032 
   1033   bool Changed = performPromotion(F);
   1034 
   1035   // FIXME: ScalarRepl currently depends on DataLayout more than it
   1036   // theoretically needs to. It should be refactored in order to support
   1037   // target-independent IR. Until this is done, just skip the actual
   1038   // scalar-replacement portion of this pass.
   1039   if (!DL) return Changed;
   1040 
   1041   while (1) {
   1042     bool LocalChange = performScalarRepl(F);
   1043     if (!LocalChange) break;   // No need to repromote if no scalarrepl
   1044     Changed = true;
   1045     LocalChange = performPromotion(F);
   1046     if (!LocalChange) break;   // No need to re-scalarrepl if no promotion
   1047   }
   1048 
   1049   return Changed;
   1050 }
   1051 
   1052 namespace {
   1053 class AllocaPromoter : public LoadAndStorePromoter {
   1054   AllocaInst *AI;
   1055   DIBuilder *DIB;
   1056   SmallVector<DbgDeclareInst *, 4> DDIs;
   1057   SmallVector<DbgValueInst *, 4> DVIs;
   1058 public:
   1059   AllocaPromoter(const SmallVectorImpl<Instruction*> &Insts, SSAUpdater &S,
   1060                  DIBuilder *DB)
   1061     : LoadAndStorePromoter(Insts, S), AI(nullptr), DIB(DB) {}
   1062 
   1063   void run(AllocaInst *AI, const SmallVectorImpl<Instruction*> &Insts) {
   1064     // Remember which alloca we're promoting (for isInstInList).
   1065     this->AI = AI;
   1066     if (MDNode *DebugNode = MDNode::getIfExists(AI->getContext(), AI)) {
   1067       for (User *U : DebugNode->users())
   1068         if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(U))
   1069           DDIs.push_back(DDI);
   1070         else if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(U))
   1071           DVIs.push_back(DVI);
   1072     }
   1073 
   1074     LoadAndStorePromoter::run(Insts);
   1075     AI->eraseFromParent();
   1076     for (SmallVectorImpl<DbgDeclareInst *>::iterator I = DDIs.begin(),
   1077            E = DDIs.end(); I != E; ++I) {
   1078       DbgDeclareInst *DDI = *I;
   1079       DDI->eraseFromParent();
   1080     }
   1081     for (SmallVectorImpl<DbgValueInst *>::iterator I = DVIs.begin(),
   1082            E = DVIs.end(); I != E; ++I) {
   1083       DbgValueInst *DVI = *I;
   1084       DVI->eraseFromParent();
   1085     }
   1086   }
   1087 
   1088   bool isInstInList(Instruction *I,
   1089                     const SmallVectorImpl<Instruction*> &Insts) const override {
   1090     if (LoadInst *LI = dyn_cast<LoadInst>(I))
   1091       return LI->getOperand(0) == AI;
   1092     return cast<StoreInst>(I)->getPointerOperand() == AI;
   1093   }
   1094 
   1095   void updateDebugInfo(Instruction *Inst) const override {
   1096     for (SmallVectorImpl<DbgDeclareInst *>::const_iterator I = DDIs.begin(),
   1097            E = DDIs.end(); I != E; ++I) {
   1098       DbgDeclareInst *DDI = *I;
   1099       if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
   1100         ConvertDebugDeclareToDebugValue(DDI, SI, *DIB);
   1101       else if (LoadInst *LI = dyn_cast<LoadInst>(Inst))
   1102         ConvertDebugDeclareToDebugValue(DDI, LI, *DIB);
   1103     }
   1104     for (SmallVectorImpl<DbgValueInst *>::const_iterator I = DVIs.begin(),
   1105            E = DVIs.end(); I != E; ++I) {
   1106       DbgValueInst *DVI = *I;
   1107       Value *Arg = nullptr;
   1108       if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
   1109         // If an argument is zero extended then use argument directly. The ZExt
   1110         // may be zapped by an optimization pass in future.
   1111         if (ZExtInst *ZExt = dyn_cast<ZExtInst>(SI->getOperand(0)))
   1112           Arg = dyn_cast<Argument>(ZExt->getOperand(0));
   1113         if (SExtInst *SExt = dyn_cast<SExtInst>(SI->getOperand(0)))
   1114           Arg = dyn_cast<Argument>(SExt->getOperand(0));
   1115         if (!Arg)
   1116           Arg = SI->getOperand(0);
   1117       } else if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
   1118         Arg = LI->getOperand(0);
   1119       } else {
   1120         continue;
   1121       }
   1122       Instruction *DbgVal =
   1123         DIB->insertDbgValueIntrinsic(Arg, 0, DIVariable(DVI->getVariable()),
   1124                                      Inst);
   1125       DbgVal->setDebugLoc(DVI->getDebugLoc());
   1126     }
   1127   }
   1128 };
   1129 } // end anon namespace
   1130 
   1131 /// isSafeSelectToSpeculate - Select instructions that use an alloca and are
   1132 /// subsequently loaded can be rewritten to load both input pointers and then
   1133 /// select between the result, allowing the load of the alloca to be promoted.
   1134 /// From this:
   1135 ///   %P2 = select i1 %cond, i32* %Alloca, i32* %Other
   1136 ///   %V = load i32* %P2
   1137 /// to:
   1138 ///   %V1 = load i32* %Alloca      -> will be mem2reg'd
   1139 ///   %V2 = load i32* %Other
   1140 ///   %V = select i1 %cond, i32 %V1, i32 %V2
   1141 ///
   1142 /// We can do this to a select if its only uses are loads and if the operand to
   1143 /// the select can be loaded unconditionally.
   1144 static bool isSafeSelectToSpeculate(SelectInst *SI, const DataLayout *DL) {
   1145   bool TDerefable = SI->getTrueValue()->isDereferenceablePointer(DL);
   1146   bool FDerefable = SI->getFalseValue()->isDereferenceablePointer(DL);
   1147 
   1148   for (User *U : SI->users()) {
   1149     LoadInst *LI = dyn_cast<LoadInst>(U);
   1150     if (!LI || !LI->isSimple()) return false;
   1151 
   1152     // Both operands to the select need to be dereferencable, either absolutely
   1153     // (e.g. allocas) or at this point because we can see other accesses to it.
   1154     if (!TDerefable && !isSafeToLoadUnconditionally(SI->getTrueValue(), LI,
   1155                                                     LI->getAlignment(), DL))
   1156       return false;
   1157     if (!FDerefable && !isSafeToLoadUnconditionally(SI->getFalseValue(), LI,
   1158                                                     LI->getAlignment(), DL))
   1159       return false;
   1160   }
   1161 
   1162   return true;
   1163 }
   1164 
   1165 /// isSafePHIToSpeculate - PHI instructions that use an alloca and are
   1166 /// subsequently loaded can be rewritten to load both input pointers in the pred
   1167 /// blocks and then PHI the results, allowing the load of the alloca to be
   1168 /// promoted.
   1169 /// From this:
   1170 ///   %P2 = phi [i32* %Alloca, i32* %Other]
   1171 ///   %V = load i32* %P2
   1172 /// to:
   1173 ///   %V1 = load i32* %Alloca      -> will be mem2reg'd
   1174 ///   ...
   1175 ///   %V2 = load i32* %Other
   1176 ///   ...
   1177 ///   %V = phi [i32 %V1, i32 %V2]
   1178 ///
   1179 /// We can do this to a select if its only uses are loads and if the operand to
   1180 /// the select can be loaded unconditionally.
   1181 static bool isSafePHIToSpeculate(PHINode *PN, const DataLayout *DL) {
   1182   // For now, we can only do this promotion if the load is in the same block as
   1183   // the PHI, and if there are no stores between the phi and load.
   1184   // TODO: Allow recursive phi users.
   1185   // TODO: Allow stores.
   1186   BasicBlock *BB = PN->getParent();
   1187   unsigned MaxAlign = 0;
   1188   for (User *U : PN->users()) {
   1189     LoadInst *LI = dyn_cast<LoadInst>(U);
   1190     if (!LI || !LI->isSimple()) return false;
   1191 
   1192     // For now we only allow loads in the same block as the PHI.  This is a
   1193     // common case that happens when instcombine merges two loads through a PHI.
   1194     if (LI->getParent() != BB) return false;
   1195 
   1196     // Ensure that there are no instructions between the PHI and the load that
   1197     // could store.
   1198     for (BasicBlock::iterator BBI = PN; &*BBI != LI; ++BBI)
   1199       if (BBI->mayWriteToMemory())
   1200         return false;
   1201 
   1202     MaxAlign = std::max(MaxAlign, LI->getAlignment());
   1203   }
   1204 
   1205   // Okay, we know that we have one or more loads in the same block as the PHI.
   1206   // We can transform this if it is safe to push the loads into the predecessor
   1207   // blocks.  The only thing to watch out for is that we can't put a possibly
   1208   // trapping load in the predecessor if it is a critical edge.
   1209   for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
   1210     BasicBlock *Pred = PN->getIncomingBlock(i);
   1211     Value *InVal = PN->getIncomingValue(i);
   1212 
   1213     // If the terminator of the predecessor has side-effects (an invoke),
   1214     // there is no safe place to put a load in the predecessor.
   1215     if (Pred->getTerminator()->mayHaveSideEffects())
   1216       return false;
   1217 
   1218     // If the value is produced by the terminator of the predecessor
   1219     // (an invoke), there is no valid place to put a load in the predecessor.
   1220     if (Pred->getTerminator() == InVal)
   1221       return false;
   1222 
   1223     // If the predecessor has a single successor, then the edge isn't critical.
   1224     if (Pred->getTerminator()->getNumSuccessors() == 1)
   1225       continue;
   1226 
   1227     // If this pointer is always safe to load, or if we can prove that there is
   1228     // already a load in the block, then we can move the load to the pred block.
   1229     if (InVal->isDereferenceablePointer(DL) ||
   1230         isSafeToLoadUnconditionally(InVal, Pred->getTerminator(), MaxAlign, DL))
   1231       continue;
   1232 
   1233     return false;
   1234   }
   1235 
   1236   return true;
   1237 }
   1238 
   1239 
   1240 /// tryToMakeAllocaBePromotable - This returns true if the alloca only has
   1241 /// direct (non-volatile) loads and stores to it.  If the alloca is close but
   1242 /// not quite there, this will transform the code to allow promotion.  As such,
   1243 /// it is a non-pure predicate.
   1244 static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const DataLayout *DL) {
   1245   SetVector<Instruction*, SmallVector<Instruction*, 4>,
   1246             SmallPtrSet<Instruction*, 4> > InstsToRewrite;
   1247   for (User *U : AI->users()) {
   1248     if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
   1249       if (!LI->isSimple())
   1250         return false;
   1251       continue;
   1252     }
   1253 
   1254     if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
   1255       if (SI->getOperand(0) == AI || !SI->isSimple())
   1256         return false;   // Don't allow a store OF the AI, only INTO the AI.
   1257       continue;
   1258     }
   1259 
   1260     if (SelectInst *SI = dyn_cast<SelectInst>(U)) {
   1261       // If the condition being selected on is a constant, fold the select, yes
   1262       // this does (rarely) happen early on.
   1263       if (ConstantInt *CI = dyn_cast<ConstantInt>(SI->getCondition())) {
   1264         Value *Result = SI->getOperand(1+CI->isZero());
   1265         SI->replaceAllUsesWith(Result);
   1266         SI->eraseFromParent();
   1267 
   1268         // This is very rare and we just scrambled the use list of AI, start
   1269         // over completely.
   1270         return tryToMakeAllocaBePromotable(AI, DL);
   1271       }
   1272 
   1273       // If it is safe to turn "load (select c, AI, ptr)" into a select of two
   1274       // loads, then we can transform this by rewriting the select.
   1275       if (!isSafeSelectToSpeculate(SI, DL))
   1276         return false;
   1277 
   1278       InstsToRewrite.insert(SI);
   1279       continue;
   1280     }
   1281 
   1282     if (PHINode *PN = dyn_cast<PHINode>(U)) {
   1283       if (PN->use_empty()) {  // Dead PHIs can be stripped.
   1284         InstsToRewrite.insert(PN);
   1285         continue;
   1286       }
   1287 
   1288       // If it is safe to turn "load (phi [AI, ptr, ...])" into a PHI of loads
   1289       // in the pred blocks, then we can transform this by rewriting the PHI.
   1290       if (!isSafePHIToSpeculate(PN, DL))
   1291         return false;
   1292 
   1293       InstsToRewrite.insert(PN);
   1294       continue;
   1295     }
   1296 
   1297     if (BitCastInst *BCI = dyn_cast<BitCastInst>(U)) {
   1298       if (onlyUsedByLifetimeMarkers(BCI)) {
   1299         InstsToRewrite.insert(BCI);
   1300         continue;
   1301       }
   1302     }
   1303 
   1304     return false;
   1305   }
   1306 
   1307   // If there are no instructions to rewrite, then all uses are load/stores and
   1308   // we're done!
   1309   if (InstsToRewrite.empty())
   1310     return true;
   1311 
   1312   // If we have instructions that need to be rewritten for this to be promotable
   1313   // take care of it now.
   1314   for (unsigned i = 0, e = InstsToRewrite.size(); i != e; ++i) {
   1315     if (BitCastInst *BCI = dyn_cast<BitCastInst>(InstsToRewrite[i])) {
   1316       // This could only be a bitcast used by nothing but lifetime intrinsics.
   1317       for (BitCastInst::user_iterator I = BCI->user_begin(), E = BCI->user_end();
   1318            I != E;)
   1319         cast<Instruction>(*I++)->eraseFromParent();
   1320       BCI->eraseFromParent();
   1321       continue;
   1322     }
   1323 
   1324     if (SelectInst *SI = dyn_cast<SelectInst>(InstsToRewrite[i])) {
   1325       // Selects in InstsToRewrite only have load uses.  Rewrite each as two
   1326       // loads with a new select.
   1327       while (!SI->use_empty()) {
   1328         LoadInst *LI = cast<LoadInst>(SI->user_back());
   1329 
   1330         IRBuilder<> Builder(LI);
   1331         LoadInst *TrueLoad =
   1332           Builder.CreateLoad(SI->getTrueValue(), LI->getName()+".t");
   1333         LoadInst *FalseLoad =
   1334           Builder.CreateLoad(SI->getFalseValue(), LI->getName()+".f");
   1335 
   1336         // Transfer alignment and TBAA info if present.
   1337         TrueLoad->setAlignment(LI->getAlignment());
   1338         FalseLoad->setAlignment(LI->getAlignment());
   1339         if (MDNode *Tag = LI->getMetadata(LLVMContext::MD_tbaa)) {
   1340           TrueLoad->setMetadata(LLVMContext::MD_tbaa, Tag);
   1341           FalseLoad->setMetadata(LLVMContext::MD_tbaa, Tag);
   1342         }
   1343 
   1344         Value *V = Builder.CreateSelect(SI->getCondition(), TrueLoad, FalseLoad);
   1345         V->takeName(LI);
   1346         LI->replaceAllUsesWith(V);
   1347         LI->eraseFromParent();
   1348       }
   1349 
   1350       // Now that all the loads are gone, the select is gone too.
   1351       SI->eraseFromParent();
   1352       continue;
   1353     }
   1354 
   1355     // Otherwise, we have a PHI node which allows us to push the loads into the
   1356     // predecessors.
   1357     PHINode *PN = cast<PHINode>(InstsToRewrite[i]);
   1358     if (PN->use_empty()) {
   1359       PN->eraseFromParent();
   1360       continue;
   1361     }
   1362 
   1363     Type *LoadTy = cast<PointerType>(PN->getType())->getElementType();
   1364     PHINode *NewPN = PHINode::Create(LoadTy, PN->getNumIncomingValues(),
   1365                                      PN->getName()+".ld", PN);
   1366 
   1367     // Get the TBAA tag and alignment to use from one of the loads.  It doesn't
   1368     // matter which one we get and if any differ, it doesn't matter.
   1369     LoadInst *SomeLoad = cast<LoadInst>(PN->user_back());
   1370     MDNode *TBAATag = SomeLoad->getMetadata(LLVMContext::MD_tbaa);
   1371     unsigned Align = SomeLoad->getAlignment();
   1372 
   1373     // Rewrite all loads of the PN to use the new PHI.
   1374     while (!PN->use_empty()) {
   1375       LoadInst *LI = cast<LoadInst>(PN->user_back());
   1376       LI->replaceAllUsesWith(NewPN);
   1377       LI->eraseFromParent();
   1378     }
   1379 
   1380     // Inject loads into all of the pred blocks.  Keep track of which blocks we
   1381     // insert them into in case we have multiple edges from the same block.
   1382     DenseMap<BasicBlock*, LoadInst*> InsertedLoads;
   1383 
   1384     for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
   1385       BasicBlock *Pred = PN->getIncomingBlock(i);
   1386       LoadInst *&Load = InsertedLoads[Pred];
   1387       if (!Load) {
   1388         Load = new LoadInst(PN->getIncomingValue(i),
   1389                             PN->getName() + "." + Pred->getName(),
   1390                             Pred->getTerminator());
   1391         Load->setAlignment(Align);
   1392         if (TBAATag) Load->setMetadata(LLVMContext::MD_tbaa, TBAATag);
   1393       }
   1394 
   1395       NewPN->addIncoming(Load, Pred);
   1396     }
   1397 
   1398     PN->eraseFromParent();
   1399   }
   1400 
   1401   ++NumAdjusted;
   1402   return true;
   1403 }
   1404 
   1405 bool SROA::performPromotion(Function &F) {
   1406   std::vector<AllocaInst*> Allocas;
   1407   DominatorTree *DT = nullptr;
   1408   if (HasDomTree)
   1409     DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
   1410 
   1411   BasicBlock &BB = F.getEntryBlock();  // Get the entry node for the function
   1412   DIBuilder DIB(*F.getParent());
   1413   bool Changed = false;
   1414   SmallVector<Instruction*, 64> Insts;
   1415   while (1) {
   1416     Allocas.clear();
   1417 
   1418     // Find allocas that are safe to promote, by looking at all instructions in
   1419     // the entry node
   1420     for (BasicBlock::iterator I = BB.begin(), E = --BB.end(); I != E; ++I)
   1421       if (AllocaInst *AI = dyn_cast<AllocaInst>(I))       // Is it an alloca?
   1422         if (tryToMakeAllocaBePromotable(AI, DL))
   1423           Allocas.push_back(AI);
   1424 
   1425     if (Allocas.empty()) break;
   1426 
   1427     if (HasDomTree)
   1428       PromoteMemToReg(Allocas, *DT);
   1429     else {
   1430       SSAUpdater SSA;
   1431       for (unsigned i = 0, e = Allocas.size(); i != e; ++i) {
   1432         AllocaInst *AI = Allocas[i];
   1433 
   1434         // Build list of instructions to promote.
   1435         for (User *U : AI->users())
   1436           Insts.push_back(cast<Instruction>(U));
   1437         AllocaPromoter(Insts, SSA, &DIB).run(AI, Insts);
   1438         Insts.clear();
   1439       }
   1440     }
   1441     NumPromoted += Allocas.size();
   1442     Changed = true;
   1443   }
   1444 
   1445   return Changed;
   1446 }
   1447 
   1448 
   1449 /// ShouldAttemptScalarRepl - Decide if an alloca is a good candidate for
   1450 /// SROA.  It must be a struct or array type with a small number of elements.
   1451 bool SROA::ShouldAttemptScalarRepl(AllocaInst *AI) {
   1452   Type *T = AI->getAllocatedType();
   1453   // Do not promote any struct that has too many members.
   1454   if (StructType *ST = dyn_cast<StructType>(T))
   1455     return ST->getNumElements() <= StructMemberThreshold;
   1456   // Do not promote any array that has too many elements.
   1457   if (ArrayType *AT = dyn_cast<ArrayType>(T))
   1458     return AT->getNumElements() <= ArrayElementThreshold;
   1459   return false;
   1460 }
   1461 
   1462 // performScalarRepl - This algorithm is a simple worklist driven algorithm,
   1463 // which runs on all of the alloca instructions in the entry block, removing
   1464 // them if they are only used by getelementptr instructions.
   1465 //
   1466 bool SROA::performScalarRepl(Function &F) {
   1467   std::vector<AllocaInst*> WorkList;
   1468 
   1469   // Scan the entry basic block, adding allocas to the worklist.
   1470   BasicBlock &BB = F.getEntryBlock();
   1471   for (BasicBlock::iterator I = BB.begin(), E = BB.end(); I != E; ++I)
   1472     if (AllocaInst *A = dyn_cast<AllocaInst>(I))
   1473       WorkList.push_back(A);
   1474 
   1475   // Process the worklist
   1476   bool Changed = false;
   1477   while (!WorkList.empty()) {
   1478     AllocaInst *AI = WorkList.back();
   1479     WorkList.pop_back();
   1480 
   1481     // Handle dead allocas trivially.  These can be formed by SROA'ing arrays
   1482     // with unused elements.
   1483     if (AI->use_empty()) {
   1484       AI->eraseFromParent();
   1485       Changed = true;
   1486       continue;
   1487     }
   1488 
   1489     // If this alloca is impossible for us to promote, reject it early.
   1490     if (AI->isArrayAllocation() || !AI->getAllocatedType()->isSized())
   1491       continue;
   1492 
   1493     // Check to see if we can perform the core SROA transformation.  We cannot
   1494     // transform the allocation instruction if it is an array allocation
   1495     // (allocations OF arrays are ok though), and an allocation of a scalar
   1496     // value cannot be decomposed at all.
   1497     uint64_t AllocaSize = DL->getTypeAllocSize(AI->getAllocatedType());
   1498 
   1499     // Do not promote [0 x %struct].
   1500     if (AllocaSize == 0) continue;
   1501 
   1502     // Do not promote any struct whose size is too big.
   1503     if (AllocaSize > SRThreshold) continue;
   1504 
   1505     // If the alloca looks like a good candidate for scalar replacement, and if
   1506     // all its users can be transformed, then split up the aggregate into its
   1507     // separate elements.
   1508     if (ShouldAttemptScalarRepl(AI) && isSafeAllocaToScalarRepl(AI)) {
   1509       DoScalarReplacement(AI, WorkList);
   1510       Changed = true;
   1511       continue;
   1512     }
   1513 
   1514     // If we can turn this aggregate value (potentially with casts) into a
   1515     // simple scalar value that can be mem2reg'd into a register value.
   1516     // IsNotTrivial tracks whether this is something that mem2reg could have
   1517     // promoted itself.  If so, we don't want to transform it needlessly.  Note
   1518     // that we can't just check based on the type: the alloca may be of an i32
   1519     // but that has pointer arithmetic to set byte 3 of it or something.
   1520     if (AllocaInst *NewAI = ConvertToScalarInfo(
   1521               (unsigned)AllocaSize, *DL, ScalarLoadThreshold).TryConvert(AI)) {
   1522       NewAI->takeName(AI);
   1523       AI->eraseFromParent();
   1524       ++NumConverted;
   1525       Changed = true;
   1526       continue;
   1527     }
   1528 
   1529     // Otherwise, couldn't process this alloca.
   1530   }
   1531 
   1532   return Changed;
   1533 }
   1534 
   1535 /// DoScalarReplacement - This alloca satisfied the isSafeAllocaToScalarRepl
   1536 /// predicate, do SROA now.
   1537 void SROA::DoScalarReplacement(AllocaInst *AI,
   1538                                std::vector<AllocaInst*> &WorkList) {
   1539   DEBUG(dbgs() << "Found inst to SROA: " << *AI << '\n');
   1540   SmallVector<AllocaInst*, 32> ElementAllocas;
   1541   if (StructType *ST = dyn_cast<StructType>(AI->getAllocatedType())) {
   1542     ElementAllocas.reserve(ST->getNumContainedTypes());
   1543     for (unsigned i = 0, e = ST->getNumContainedTypes(); i != e; ++i) {
   1544       AllocaInst *NA = new AllocaInst(ST->getContainedType(i), nullptr,
   1545                                       AI->getAlignment(),
   1546                                       AI->getName() + "." + Twine(i), AI);
   1547       ElementAllocas.push_back(NA);
   1548       WorkList.push_back(NA);  // Add to worklist for recursive processing
   1549     }
   1550   } else {
   1551     ArrayType *AT = cast<ArrayType>(AI->getAllocatedType());
   1552     ElementAllocas.reserve(AT->getNumElements());
   1553     Type *ElTy = AT->getElementType();
   1554     for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) {
   1555       AllocaInst *NA = new AllocaInst(ElTy, nullptr, AI->getAlignment(),
   1556                                       AI->getName() + "." + Twine(i), AI);
   1557       ElementAllocas.push_back(NA);
   1558       WorkList.push_back(NA);  // Add to worklist for recursive processing
   1559     }
   1560   }
   1561 
   1562   // Now that we have created the new alloca instructions, rewrite all the
   1563   // uses of the old alloca.
   1564   RewriteForScalarRepl(AI, AI, 0, ElementAllocas);
   1565 
   1566   // Now erase any instructions that were made dead while rewriting the alloca.
   1567   DeleteDeadInstructions();
   1568   AI->eraseFromParent();
   1569 
   1570   ++NumReplaced;
   1571 }
   1572 
   1573 /// DeleteDeadInstructions - Erase instructions on the DeadInstrs list,
   1574 /// recursively including all their operands that become trivially dead.
   1575 void SROA::DeleteDeadInstructions() {
   1576   while (!DeadInsts.empty()) {
   1577     Instruction *I = cast<Instruction>(DeadInsts.pop_back_val());
   1578 
   1579     for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI)
   1580       if (Instruction *U = dyn_cast<Instruction>(*OI)) {
   1581         // Zero out the operand and see if it becomes trivially dead.
   1582         // (But, don't add allocas to the dead instruction list -- they are
   1583         // already on the worklist and will be deleted separately.)
   1584         *OI = nullptr;
   1585         if (isInstructionTriviallyDead(U) && !isa<AllocaInst>(U))
   1586           DeadInsts.push_back(U);
   1587       }
   1588 
   1589     I->eraseFromParent();
   1590   }
   1591 }
   1592 
   1593 /// isSafeForScalarRepl - Check if instruction I is a safe use with regard to
   1594 /// performing scalar replacement of alloca AI.  The results are flagged in
   1595 /// the Info parameter.  Offset indicates the position within AI that is
   1596 /// referenced by this instruction.
   1597 void SROA::isSafeForScalarRepl(Instruction *I, uint64_t Offset,
   1598                                AllocaInfo &Info) {
   1599   for (Use &U : I->uses()) {
   1600     Instruction *User = cast<Instruction>(U.getUser());
   1601 
   1602     if (BitCastInst *BC = dyn_cast<BitCastInst>(User)) {
   1603       isSafeForScalarRepl(BC, Offset, Info);
   1604     } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(User)) {
   1605       uint64_t GEPOffset = Offset;
   1606       isSafeGEP(GEPI, GEPOffset, Info);
   1607       if (!Info.isUnsafe)
   1608         isSafeForScalarRepl(GEPI, GEPOffset, Info);
   1609     } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(User)) {
   1610       ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength());
   1611       if (!Length || Length->isNegative())
   1612         return MarkUnsafe(Info, User);
   1613 
   1614       isSafeMemAccess(Offset, Length->getZExtValue(), nullptr,
   1615                       U.getOperandNo() == 0, Info, MI,
   1616                       true /*AllowWholeAccess*/);
   1617     } else if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
   1618       if (!LI->isSimple())
   1619         return MarkUnsafe(Info, User);
   1620       Type *LIType = LI->getType();
   1621       isSafeMemAccess(Offset, DL->getTypeAllocSize(LIType),
   1622                       LIType, false, Info, LI, true /*AllowWholeAccess*/);
   1623       Info.hasALoadOrStore = true;
   1624 
   1625     } else if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
   1626       // Store is ok if storing INTO the pointer, not storing the pointer
   1627       if (!SI->isSimple() || SI->getOperand(0) == I)
   1628         return MarkUnsafe(Info, User);
   1629 
   1630       Type *SIType = SI->getOperand(0)->getType();
   1631       isSafeMemAccess(Offset, DL->getTypeAllocSize(SIType),
   1632                       SIType, true, Info, SI, true /*AllowWholeAccess*/);
   1633       Info.hasALoadOrStore = true;
   1634     } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(User)) {
   1635       if (II->getIntrinsicID() != Intrinsic::lifetime_start &&
   1636           II->getIntrinsicID() != Intrinsic::lifetime_end)
   1637         return MarkUnsafe(Info, User);
   1638     } else if (isa<PHINode>(User) || isa<SelectInst>(User)) {
   1639       isSafePHISelectUseForScalarRepl(User, Offset, Info);
   1640     } else {
   1641       return MarkUnsafe(Info, User);
   1642     }
   1643     if (Info.isUnsafe) return;
   1644   }
   1645 }
   1646 
   1647 
   1648 /// isSafePHIUseForScalarRepl - If we see a PHI node or select using a pointer
   1649 /// derived from the alloca, we can often still split the alloca into elements.
   1650 /// This is useful if we have a large alloca where one element is phi'd
   1651 /// together somewhere: we can SRoA and promote all the other elements even if
   1652 /// we end up not being able to promote this one.
   1653 ///
   1654 /// All we require is that the uses of the PHI do not index into other parts of
   1655 /// the alloca.  The most important use case for this is single load and stores
   1656 /// that are PHI'd together, which can happen due to code sinking.
   1657 void SROA::isSafePHISelectUseForScalarRepl(Instruction *I, uint64_t Offset,
   1658                                            AllocaInfo &Info) {
   1659   // If we've already checked this PHI, don't do it again.
   1660   if (PHINode *PN = dyn_cast<PHINode>(I))
   1661     if (!Info.CheckedPHIs.insert(PN))
   1662       return;
   1663 
   1664   for (User *U : I->users()) {
   1665     Instruction *UI = cast<Instruction>(U);
   1666 
   1667     if (BitCastInst *BC = dyn_cast<BitCastInst>(UI)) {
   1668       isSafePHISelectUseForScalarRepl(BC, Offset, Info);
   1669     } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(UI)) {
   1670       // Only allow "bitcast" GEPs for simplicity.  We could generalize this,
   1671       // but would have to prove that we're staying inside of an element being
   1672       // promoted.
   1673       if (!GEPI->hasAllZeroIndices())
   1674         return MarkUnsafe(Info, UI);
   1675       isSafePHISelectUseForScalarRepl(GEPI, Offset, Info);
   1676     } else if (LoadInst *LI = dyn_cast<LoadInst>(UI)) {
   1677       if (!LI->isSimple())
   1678         return MarkUnsafe(Info, UI);
   1679       Type *LIType = LI->getType();
   1680       isSafeMemAccess(Offset, DL->getTypeAllocSize(LIType),
   1681                       LIType, false, Info, LI, false /*AllowWholeAccess*/);
   1682       Info.hasALoadOrStore = true;
   1683 
   1684     } else if (StoreInst *SI = dyn_cast<StoreInst>(UI)) {
   1685       // Store is ok if storing INTO the pointer, not storing the pointer
   1686       if (!SI->isSimple() || SI->getOperand(0) == I)
   1687         return MarkUnsafe(Info, UI);
   1688 
   1689       Type *SIType = SI->getOperand(0)->getType();
   1690       isSafeMemAccess(Offset, DL->getTypeAllocSize(SIType),
   1691                       SIType, true, Info, SI, false /*AllowWholeAccess*/);
   1692       Info.hasALoadOrStore = true;
   1693     } else if (isa<PHINode>(UI) || isa<SelectInst>(UI)) {
   1694       isSafePHISelectUseForScalarRepl(UI, Offset, Info);
   1695     } else {
   1696       return MarkUnsafe(Info, UI);
   1697     }
   1698     if (Info.isUnsafe) return;
   1699   }
   1700 }
   1701 
   1702 /// isSafeGEP - Check if a GEP instruction can be handled for scalar
   1703 /// replacement.  It is safe when all the indices are constant, in-bounds
   1704 /// references, and when the resulting offset corresponds to an element within
   1705 /// the alloca type.  The results are flagged in the Info parameter.  Upon
   1706 /// return, Offset is adjusted as specified by the GEP indices.
   1707 void SROA::isSafeGEP(GetElementPtrInst *GEPI,
   1708                      uint64_t &Offset, AllocaInfo &Info) {
   1709   gep_type_iterator GEPIt = gep_type_begin(GEPI), E = gep_type_end(GEPI);
   1710   if (GEPIt == E)
   1711     return;
   1712   bool NonConstant = false;
   1713   unsigned NonConstantIdxSize = 0;
   1714 
   1715   // Walk through the GEP type indices, checking the types that this indexes
   1716   // into.
   1717   for (; GEPIt != E; ++GEPIt) {
   1718     // Ignore struct elements, no extra checking needed for these.
   1719     if ((*GEPIt)->isStructTy())
   1720       continue;
   1721 
   1722     ConstantInt *IdxVal = dyn_cast<ConstantInt>(GEPIt.getOperand());
   1723     if (!IdxVal)
   1724       return MarkUnsafe(Info, GEPI);
   1725   }
   1726 
   1727   // Compute the offset due to this GEP and check if the alloca has a
   1728   // component element at that offset.
   1729   SmallVector<Value*, 8> Indices(GEPI->op_begin() + 1, GEPI->op_end());
   1730   // If this GEP is non-constant then the last operand must have been a
   1731   // dynamic index into a vector.  Pop this now as it has no impact on the
   1732   // constant part of the offset.
   1733   if (NonConstant)
   1734     Indices.pop_back();
   1735   Offset += DL->getIndexedOffset(GEPI->getPointerOperandType(), Indices);
   1736   if (!TypeHasComponent(Info.AI->getAllocatedType(), Offset,
   1737                         NonConstantIdxSize))
   1738     MarkUnsafe(Info, GEPI);
   1739 }
   1740 
   1741 /// isHomogeneousAggregate - Check if type T is a struct or array containing
   1742 /// elements of the same type (which is always true for arrays).  If so,
   1743 /// return true with NumElts and EltTy set to the number of elements and the
   1744 /// element type, respectively.
   1745 static bool isHomogeneousAggregate(Type *T, unsigned &NumElts,
   1746                                    Type *&EltTy) {
   1747   if (ArrayType *AT = dyn_cast<ArrayType>(T)) {
   1748     NumElts = AT->getNumElements();
   1749     EltTy = (NumElts == 0 ? nullptr : AT->getElementType());
   1750     return true;
   1751   }
   1752   if (StructType *ST = dyn_cast<StructType>(T)) {
   1753     NumElts = ST->getNumContainedTypes();
   1754     EltTy = (NumElts == 0 ? nullptr : ST->getContainedType(0));
   1755     for (unsigned n = 1; n < NumElts; ++n) {
   1756       if (ST->getContainedType(n) != EltTy)
   1757         return false;
   1758     }
   1759     return true;
   1760   }
   1761   return false;
   1762 }
   1763 
   1764 /// isCompatibleAggregate - Check if T1 and T2 are either the same type or are
   1765 /// "homogeneous" aggregates with the same element type and number of elements.
   1766 static bool isCompatibleAggregate(Type *T1, Type *T2) {
   1767   if (T1 == T2)
   1768     return true;
   1769 
   1770   unsigned NumElts1, NumElts2;
   1771   Type *EltTy1, *EltTy2;
   1772   if (isHomogeneousAggregate(T1, NumElts1, EltTy1) &&
   1773       isHomogeneousAggregate(T2, NumElts2, EltTy2) &&
   1774       NumElts1 == NumElts2 &&
   1775       EltTy1 == EltTy2)
   1776     return true;
   1777 
   1778   return false;
   1779 }
   1780 
   1781 /// isSafeMemAccess - Check if a load/store/memcpy operates on the entire AI
   1782 /// alloca or has an offset and size that corresponds to a component element
   1783 /// within it.  The offset checked here may have been formed from a GEP with a
   1784 /// pointer bitcasted to a different type.
   1785 ///
   1786 /// If AllowWholeAccess is true, then this allows uses of the entire alloca as a
   1787 /// unit.  If false, it only allows accesses known to be in a single element.
   1788 void SROA::isSafeMemAccess(uint64_t Offset, uint64_t MemSize,
   1789                            Type *MemOpType, bool isStore,
   1790                            AllocaInfo &Info, Instruction *TheAccess,
   1791                            bool AllowWholeAccess) {
   1792   // Check if this is a load/store of the entire alloca.
   1793   if (Offset == 0 && AllowWholeAccess &&
   1794       MemSize == DL->getTypeAllocSize(Info.AI->getAllocatedType())) {
   1795     // This can be safe for MemIntrinsics (where MemOpType is 0) and integer
   1796     // loads/stores (which are essentially the same as the MemIntrinsics with
   1797     // regard to copying padding between elements).  But, if an alloca is
   1798     // flagged as both a source and destination of such operations, we'll need
   1799     // to check later for padding between elements.
   1800     if (!MemOpType || MemOpType->isIntegerTy()) {
   1801       if (isStore)
   1802         Info.isMemCpyDst = true;
   1803       else
   1804         Info.isMemCpySrc = true;
   1805       return;
   1806     }
   1807     // This is also safe for references using a type that is compatible with
   1808     // the type of the alloca, so that loads/stores can be rewritten using
   1809     // insertvalue/extractvalue.
   1810     if (isCompatibleAggregate(MemOpType, Info.AI->getAllocatedType())) {
   1811       Info.hasSubelementAccess = true;
   1812       return;
   1813     }
   1814   }
   1815   // Check if the offset/size correspond to a component within the alloca type.
   1816   Type *T = Info.AI->getAllocatedType();
   1817   if (TypeHasComponent(T, Offset, MemSize)) {
   1818     Info.hasSubelementAccess = true;
   1819     return;
   1820   }
   1821 
   1822   return MarkUnsafe(Info, TheAccess);
   1823 }
   1824 
   1825 /// TypeHasComponent - Return true if T has a component type with the
   1826 /// specified offset and size.  If Size is zero, do not check the size.
   1827 bool SROA::TypeHasComponent(Type *T, uint64_t Offset, uint64_t Size) {
   1828   Type *EltTy;
   1829   uint64_t EltSize;
   1830   if (StructType *ST = dyn_cast<StructType>(T)) {
   1831     const StructLayout *Layout = DL->getStructLayout(ST);
   1832     unsigned EltIdx = Layout->getElementContainingOffset(Offset);
   1833     EltTy = ST->getContainedType(EltIdx);
   1834     EltSize = DL->getTypeAllocSize(EltTy);
   1835     Offset -= Layout->getElementOffset(EltIdx);
   1836   } else if (ArrayType *AT = dyn_cast<ArrayType>(T)) {
   1837     EltTy = AT->getElementType();
   1838     EltSize = DL->getTypeAllocSize(EltTy);
   1839     if (Offset >= AT->getNumElements() * EltSize)
   1840       return false;
   1841     Offset %= EltSize;
   1842   } else if (VectorType *VT = dyn_cast<VectorType>(T)) {
   1843     EltTy = VT->getElementType();
   1844     EltSize = DL->getTypeAllocSize(EltTy);
   1845     if (Offset >= VT->getNumElements() * EltSize)
   1846       return false;
   1847     Offset %= EltSize;
   1848   } else {
   1849     return false;
   1850   }
   1851   if (Offset == 0 && (Size == 0 || EltSize == Size))
   1852     return true;
   1853   // Check if the component spans multiple elements.
   1854   if (Offset + Size > EltSize)
   1855     return false;
   1856   return TypeHasComponent(EltTy, Offset, Size);
   1857 }
   1858 
   1859 /// RewriteForScalarRepl - Alloca AI is being split into NewElts, so rewrite
   1860 /// the instruction I, which references it, to use the separate elements.
   1861 /// Offset indicates the position within AI that is referenced by this
   1862 /// instruction.
   1863 void SROA::RewriteForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset,
   1864                                 SmallVectorImpl<AllocaInst *> &NewElts) {
   1865   for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI!=E;) {
   1866     Use &TheUse = *UI++;
   1867     Instruction *User = cast<Instruction>(TheUse.getUser());
   1868 
   1869     if (BitCastInst *BC = dyn_cast<BitCastInst>(User)) {
   1870       RewriteBitCast(BC, AI, Offset, NewElts);
   1871       continue;
   1872     }
   1873 
   1874     if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(User)) {
   1875       RewriteGEP(GEPI, AI, Offset, NewElts);
   1876       continue;
   1877     }
   1878 
   1879     if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(User)) {
   1880       ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength());
   1881       uint64_t MemSize = Length->getZExtValue();
   1882       if (Offset == 0 &&
   1883           MemSize == DL->getTypeAllocSize(AI->getAllocatedType()))
   1884         RewriteMemIntrinUserOfAlloca(MI, I, AI, NewElts);
   1885       // Otherwise the intrinsic can only touch a single element and the
   1886       // address operand will be updated, so nothing else needs to be done.
   1887       continue;
   1888     }
   1889 
   1890     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(User)) {
   1891       if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
   1892           II->getIntrinsicID() == Intrinsic::lifetime_end) {
   1893         RewriteLifetimeIntrinsic(II, AI, Offset, NewElts);
   1894       }
   1895       continue;
   1896     }
   1897 
   1898     if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
   1899       Type *LIType = LI->getType();
   1900 
   1901       if (isCompatibleAggregate(LIType, AI->getAllocatedType())) {
   1902         // Replace:
   1903         //   %res = load { i32, i32 }* %alloc
   1904         // with:
   1905         //   %load.0 = load i32* %alloc.0
   1906         //   %insert.0 insertvalue { i32, i32 } zeroinitializer, i32 %load.0, 0
   1907         //   %load.1 = load i32* %alloc.1
   1908         //   %insert = insertvalue { i32, i32 } %insert.0, i32 %load.1, 1
   1909         // (Also works for arrays instead of structs)
   1910         Value *Insert = UndefValue::get(LIType);
   1911         IRBuilder<> Builder(LI);
   1912         for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
   1913           Value *Load = Builder.CreateLoad(NewElts[i], "load");
   1914           Insert = Builder.CreateInsertValue(Insert, Load, i, "insert");
   1915         }
   1916         LI->replaceAllUsesWith(Insert);
   1917         DeadInsts.push_back(LI);
   1918       } else if (LIType->isIntegerTy() &&
   1919                  DL->getTypeAllocSize(LIType) ==
   1920                  DL->getTypeAllocSize(AI->getAllocatedType())) {
   1921         // If this is a load of the entire alloca to an integer, rewrite it.
   1922         RewriteLoadUserOfWholeAlloca(LI, AI, NewElts);
   1923       }
   1924       continue;
   1925     }
   1926 
   1927     if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
   1928       Value *Val = SI->getOperand(0);
   1929       Type *SIType = Val->getType();
   1930       if (isCompatibleAggregate(SIType, AI->getAllocatedType())) {
   1931         // Replace:
   1932         //   store { i32, i32 } %val, { i32, i32 }* %alloc
   1933         // with:
   1934         //   %val.0 = extractvalue { i32, i32 } %val, 0
   1935         //   store i32 %val.0, i32* %alloc.0
   1936         //   %val.1 = extractvalue { i32, i32 } %val, 1
   1937         //   store i32 %val.1, i32* %alloc.1
   1938         // (Also works for arrays instead of structs)
   1939         IRBuilder<> Builder(SI);
   1940         for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
   1941           Value *Extract = Builder.CreateExtractValue(Val, i, Val->getName());
   1942           Builder.CreateStore(Extract, NewElts[i]);
   1943         }
   1944         DeadInsts.push_back(SI);
   1945       } else if (SIType->isIntegerTy() &&
   1946                  DL->getTypeAllocSize(SIType) ==
   1947                  DL->getTypeAllocSize(AI->getAllocatedType())) {
   1948         // If this is a store of the entire alloca from an integer, rewrite it.
   1949         RewriteStoreUserOfWholeAlloca(SI, AI, NewElts);
   1950       }
   1951       continue;
   1952     }
   1953 
   1954     if (isa<SelectInst>(User) || isa<PHINode>(User)) {
   1955       // If we have a PHI user of the alloca itself (as opposed to a GEP or
   1956       // bitcast) we have to rewrite it.  GEP and bitcast uses will be RAUW'd to
   1957       // the new pointer.
   1958       if (!isa<AllocaInst>(I)) continue;
   1959 
   1960       assert(Offset == 0 && NewElts[0] &&
   1961              "Direct alloca use should have a zero offset");
   1962 
   1963       // If we have a use of the alloca, we know the derived uses will be
   1964       // utilizing just the first element of the scalarized result.  Insert a
   1965       // bitcast of the first alloca before the user as required.
   1966       AllocaInst *NewAI = NewElts[0];
   1967       BitCastInst *BCI = new BitCastInst(NewAI, AI->getType(), "", NewAI);
   1968       NewAI->moveBefore(BCI);
   1969       TheUse = BCI;
   1970       continue;
   1971     }
   1972   }
   1973 }
   1974 
   1975 /// RewriteBitCast - Update a bitcast reference to the alloca being replaced
   1976 /// and recursively continue updating all of its uses.
   1977 void SROA::RewriteBitCast(BitCastInst *BC, AllocaInst *AI, uint64_t Offset,
   1978                           SmallVectorImpl<AllocaInst *> &NewElts) {
   1979   RewriteForScalarRepl(BC, AI, Offset, NewElts);
   1980   if (BC->getOperand(0) != AI)
   1981     return;
   1982 
   1983   // The bitcast references the original alloca.  Replace its uses with
   1984   // references to the alloca containing offset zero (which is normally at
   1985   // index zero, but might not be in cases involving structs with elements
   1986   // of size zero).
   1987   Type *T = AI->getAllocatedType();
   1988   uint64_t EltOffset = 0;
   1989   Type *IdxTy;
   1990   uint64_t Idx = FindElementAndOffset(T, EltOffset, IdxTy);
   1991   Instruction *Val = NewElts[Idx];
   1992   if (Val->getType() != BC->getDestTy()) {
   1993     Val = new BitCastInst(Val, BC->getDestTy(), "", BC);
   1994     Val->takeName(BC);
   1995   }
   1996   BC->replaceAllUsesWith(Val);
   1997   DeadInsts.push_back(BC);
   1998 }
   1999 
   2000 /// FindElementAndOffset - Return the index of the element containing Offset
   2001 /// within the specified type, which must be either a struct or an array.
   2002 /// Sets T to the type of the element and Offset to the offset within that
   2003 /// element.  IdxTy is set to the type of the index result to be used in a
   2004 /// GEP instruction.
   2005 uint64_t SROA::FindElementAndOffset(Type *&T, uint64_t &Offset,
   2006                                     Type *&IdxTy) {
   2007   uint64_t Idx = 0;
   2008   if (StructType *ST = dyn_cast<StructType>(T)) {
   2009     const StructLayout *Layout = DL->getStructLayout(ST);
   2010     Idx = Layout->getElementContainingOffset(Offset);
   2011     T = ST->getContainedType(Idx);
   2012     Offset -= Layout->getElementOffset(Idx);
   2013     IdxTy = Type::getInt32Ty(T->getContext());
   2014     return Idx;
   2015   } else if (ArrayType *AT = dyn_cast<ArrayType>(T)) {
   2016     T = AT->getElementType();
   2017     uint64_t EltSize = DL->getTypeAllocSize(T);
   2018     Idx = Offset / EltSize;
   2019     Offset -= Idx * EltSize;
   2020     IdxTy = Type::getInt64Ty(T->getContext());
   2021     return Idx;
   2022   }
   2023   VectorType *VT = cast<VectorType>(T);
   2024   T = VT->getElementType();
   2025   uint64_t EltSize = DL->getTypeAllocSize(T);
   2026   Idx = Offset / EltSize;
   2027   Offset -= Idx * EltSize;
   2028   IdxTy = Type::getInt64Ty(T->getContext());
   2029   return Idx;
   2030 }
   2031 
   2032 /// RewriteGEP - Check if this GEP instruction moves the pointer across
   2033 /// elements of the alloca that are being split apart, and if so, rewrite
   2034 /// the GEP to be relative to the new element.
   2035 void SROA::RewriteGEP(GetElementPtrInst *GEPI, AllocaInst *AI, uint64_t Offset,
   2036                       SmallVectorImpl<AllocaInst *> &NewElts) {
   2037   uint64_t OldOffset = Offset;
   2038   SmallVector<Value*, 8> Indices(GEPI->op_begin() + 1, GEPI->op_end());
   2039   // If the GEP was dynamic then it must have been a dynamic vector lookup.
   2040   // In this case, it must be the last GEP operand which is dynamic so keep that
   2041   // aside until we've found the constant GEP offset then add it back in at the
   2042   // end.
   2043   Value* NonConstantIdx = nullptr;
   2044   if (!GEPI->hasAllConstantIndices())
   2045     NonConstantIdx = Indices.pop_back_val();
   2046   Offset += DL->getIndexedOffset(GEPI->getPointerOperandType(), Indices);
   2047 
   2048   RewriteForScalarRepl(GEPI, AI, Offset, NewElts);
   2049 
   2050   Type *T = AI->getAllocatedType();
   2051   Type *IdxTy;
   2052   uint64_t OldIdx = FindElementAndOffset(T, OldOffset, IdxTy);
   2053   if (GEPI->getOperand(0) == AI)
   2054     OldIdx = ~0ULL; // Force the GEP to be rewritten.
   2055 
   2056   T = AI->getAllocatedType();
   2057   uint64_t EltOffset = Offset;
   2058   uint64_t Idx = FindElementAndOffset(T, EltOffset, IdxTy);
   2059 
   2060   // If this GEP does not move the pointer across elements of the alloca
   2061   // being split, then it does not needs to be rewritten.
   2062   if (Idx == OldIdx)
   2063     return;
   2064 
   2065   Type *i32Ty = Type::getInt32Ty(AI->getContext());
   2066   SmallVector<Value*, 8> NewArgs;
   2067   NewArgs.push_back(Constant::getNullValue(i32Ty));
   2068   while (EltOffset != 0) {
   2069     uint64_t EltIdx = FindElementAndOffset(T, EltOffset, IdxTy);
   2070     NewArgs.push_back(ConstantInt::get(IdxTy, EltIdx));
   2071   }
   2072   if (NonConstantIdx) {
   2073     Type* GepTy = T;
   2074     // This GEP has a dynamic index.  We need to add "i32 0" to index through
   2075     // any structs or arrays in the original type until we get to the vector
   2076     // to index.
   2077     while (!isa<VectorType>(GepTy)) {
   2078       NewArgs.push_back(Constant::getNullValue(i32Ty));
   2079       GepTy = cast<CompositeType>(GepTy)->getTypeAtIndex(0U);
   2080     }
   2081     NewArgs.push_back(NonConstantIdx);
   2082   }
   2083   Instruction *Val = NewElts[Idx];
   2084   if (NewArgs.size() > 1) {
   2085     Val = GetElementPtrInst::CreateInBounds(Val, NewArgs, "", GEPI);
   2086     Val->takeName(GEPI);
   2087   }
   2088   if (Val->getType() != GEPI->getType())
   2089     Val = new BitCastInst(Val, GEPI->getType(), Val->getName(), GEPI);
   2090   GEPI->replaceAllUsesWith(Val);
   2091   DeadInsts.push_back(GEPI);
   2092 }
   2093 
   2094 /// RewriteLifetimeIntrinsic - II is a lifetime.start/lifetime.end. Rewrite it
   2095 /// to mark the lifetime of the scalarized memory.
   2096 void SROA::RewriteLifetimeIntrinsic(IntrinsicInst *II, AllocaInst *AI,
   2097                                     uint64_t Offset,
   2098                                     SmallVectorImpl<AllocaInst *> &NewElts) {
   2099   ConstantInt *OldSize = cast<ConstantInt>(II->getArgOperand(0));
   2100   // Put matching lifetime markers on everything from Offset up to
   2101   // Offset+OldSize.
   2102   Type *AIType = AI->getAllocatedType();
   2103   uint64_t NewOffset = Offset;
   2104   Type *IdxTy;
   2105   uint64_t Idx = FindElementAndOffset(AIType, NewOffset, IdxTy);
   2106 
   2107   IRBuilder<> Builder(II);
   2108   uint64_t Size = OldSize->getLimitedValue();
   2109 
   2110   if (NewOffset) {
   2111     // Splice the first element and index 'NewOffset' bytes in.  SROA will
   2112     // split the alloca again later.
   2113     unsigned AS = AI->getType()->getAddressSpace();
   2114     Value *V = Builder.CreateBitCast(NewElts[Idx], Builder.getInt8PtrTy(AS));
   2115     V = Builder.CreateGEP(V, Builder.getInt64(NewOffset));
   2116 
   2117     IdxTy = NewElts[Idx]->getAllocatedType();
   2118     uint64_t EltSize = DL->getTypeAllocSize(IdxTy) - NewOffset;
   2119     if (EltSize > Size) {
   2120       EltSize = Size;
   2121       Size = 0;
   2122     } else {
   2123       Size -= EltSize;
   2124     }
   2125     if (II->getIntrinsicID() == Intrinsic::lifetime_start)
   2126       Builder.CreateLifetimeStart(V, Builder.getInt64(EltSize));
   2127     else
   2128       Builder.CreateLifetimeEnd(V, Builder.getInt64(EltSize));
   2129     ++Idx;
   2130   }
   2131 
   2132   for (; Idx != NewElts.size() && Size; ++Idx) {
   2133     IdxTy = NewElts[Idx]->getAllocatedType();
   2134     uint64_t EltSize = DL->getTypeAllocSize(IdxTy);
   2135     if (EltSize > Size) {
   2136       EltSize = Size;
   2137       Size = 0;
   2138     } else {
   2139       Size -= EltSize;
   2140     }
   2141     if (II->getIntrinsicID() == Intrinsic::lifetime_start)
   2142       Builder.CreateLifetimeStart(NewElts[Idx],
   2143                                   Builder.getInt64(EltSize));
   2144     else
   2145       Builder.CreateLifetimeEnd(NewElts[Idx],
   2146                                 Builder.getInt64(EltSize));
   2147   }
   2148   DeadInsts.push_back(II);
   2149 }
   2150 
   2151 /// RewriteMemIntrinUserOfAlloca - MI is a memcpy/memset/memmove from or to AI.
   2152 /// Rewrite it to copy or set the elements of the scalarized memory.
   2153 void
   2154 SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
   2155                                    AllocaInst *AI,
   2156                                    SmallVectorImpl<AllocaInst *> &NewElts) {
   2157   // If this is a memcpy/memmove, construct the other pointer as the
   2158   // appropriate type.  The "Other" pointer is the pointer that goes to memory
   2159   // that doesn't have anything to do with the alloca that we are promoting. For
   2160   // memset, this Value* stays null.
   2161   Value *OtherPtr = nullptr;
   2162   unsigned MemAlignment = MI->getAlignment();
   2163   if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) { // memmove/memcopy
   2164     if (Inst == MTI->getRawDest())
   2165       OtherPtr = MTI->getRawSource();
   2166     else {
   2167       assert(Inst == MTI->getRawSource());
   2168       OtherPtr = MTI->getRawDest();
   2169     }
   2170   }
   2171 
   2172   // If there is an other pointer, we want to convert it to the same pointer
   2173   // type as AI has, so we can GEP through it safely.
   2174   if (OtherPtr) {
   2175     unsigned AddrSpace =
   2176       cast<PointerType>(OtherPtr->getType())->getAddressSpace();
   2177 
   2178     // Remove bitcasts and all-zero GEPs from OtherPtr.  This is an
   2179     // optimization, but it's also required to detect the corner case where
   2180     // both pointer operands are referencing the same memory, and where
   2181     // OtherPtr may be a bitcast or GEP that currently being rewritten.  (This
   2182     // function is only called for mem intrinsics that access the whole
   2183     // aggregate, so non-zero GEPs are not an issue here.)
   2184     OtherPtr = OtherPtr->stripPointerCasts();
   2185 
   2186     // Copying the alloca to itself is a no-op: just delete it.
   2187     if (OtherPtr == AI || OtherPtr == NewElts[0]) {
   2188       // This code will run twice for a no-op memcpy -- once for each operand.
   2189       // Put only one reference to MI on the DeadInsts list.
   2190       for (SmallVectorImpl<Value *>::const_iterator I = DeadInsts.begin(),
   2191              E = DeadInsts.end(); I != E; ++I)
   2192         if (*I == MI) return;
   2193       DeadInsts.push_back(MI);
   2194       return;
   2195     }
   2196 
   2197     // If the pointer is not the right type, insert a bitcast to the right
   2198     // type.
   2199     Type *NewTy =
   2200       PointerType::get(AI->getType()->getElementType(), AddrSpace);
   2201 
   2202     if (OtherPtr->getType() != NewTy)
   2203       OtherPtr = new BitCastInst(OtherPtr, NewTy, OtherPtr->getName(), MI);
   2204   }
   2205 
   2206   // Process each element of the aggregate.
   2207   bool SROADest = MI->getRawDest() == Inst;
   2208 
   2209   Constant *Zero = Constant::getNullValue(Type::getInt32Ty(MI->getContext()));
   2210 
   2211   for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
   2212     // If this is a memcpy/memmove, emit a GEP of the other element address.
   2213     Value *OtherElt = nullptr;
   2214     unsigned OtherEltAlign = MemAlignment;
   2215 
   2216     if (OtherPtr) {
   2217       Value *Idx[2] = { Zero,
   2218                       ConstantInt::get(Type::getInt32Ty(MI->getContext()), i) };
   2219       OtherElt = GetElementPtrInst::CreateInBounds(OtherPtr, Idx,
   2220                                               OtherPtr->getName()+"."+Twine(i),
   2221                                                    MI);
   2222       uint64_t EltOffset;
   2223       PointerType *OtherPtrTy = cast<PointerType>(OtherPtr->getType());
   2224       Type *OtherTy = OtherPtrTy->getElementType();
   2225       if (StructType *ST = dyn_cast<StructType>(OtherTy)) {
   2226         EltOffset = DL->getStructLayout(ST)->getElementOffset(i);
   2227       } else {
   2228         Type *EltTy = cast<SequentialType>(OtherTy)->getElementType();
   2229         EltOffset = DL->getTypeAllocSize(EltTy)*i;
   2230       }
   2231 
   2232       // The alignment of the other pointer is the guaranteed alignment of the
   2233       // element, which is affected by both the known alignment of the whole
   2234       // mem intrinsic and the alignment of the element.  If the alignment of
   2235       // the memcpy (f.e.) is 32 but the element is at a 4-byte offset, then the
   2236       // known alignment is just 4 bytes.
   2237       OtherEltAlign = (unsigned)MinAlign(OtherEltAlign, EltOffset);
   2238     }
   2239 
   2240     Value *EltPtr = NewElts[i];
   2241     Type *EltTy = cast<PointerType>(EltPtr->getType())->getElementType();
   2242 
   2243     // If we got down to a scalar, insert a load or store as appropriate.
   2244     if (EltTy->isSingleValueType()) {
   2245       if (isa<MemTransferInst>(MI)) {
   2246         if (SROADest) {
   2247           // From Other to Alloca.
   2248           Value *Elt = new LoadInst(OtherElt, "tmp", false, OtherEltAlign, MI);
   2249           new StoreInst(Elt, EltPtr, MI);
   2250         } else {
   2251           // From Alloca to Other.
   2252           Value *Elt = new LoadInst(EltPtr, "tmp", MI);
   2253           new StoreInst(Elt, OtherElt, false, OtherEltAlign, MI);
   2254         }
   2255         continue;
   2256       }
   2257       assert(isa<MemSetInst>(MI));
   2258 
   2259       // If the stored element is zero (common case), just store a null
   2260       // constant.
   2261       Constant *StoreVal;
   2262       if (ConstantInt *CI = dyn_cast<ConstantInt>(MI->getArgOperand(1))) {
   2263         if (CI->isZero()) {
   2264           StoreVal = Constant::getNullValue(EltTy);  // 0.0, null, 0, <0,0>
   2265         } else {
   2266           // If EltTy is a vector type, get the element type.
   2267           Type *ValTy = EltTy->getScalarType();
   2268 
   2269           // Construct an integer with the right value.
   2270           unsigned EltSize = DL->getTypeSizeInBits(ValTy);
   2271           APInt OneVal(EltSize, CI->getZExtValue());
   2272           APInt TotalVal(OneVal);
   2273           // Set each byte.
   2274           for (unsigned i = 0; 8*i < EltSize; ++i) {
   2275             TotalVal = TotalVal.shl(8);
   2276             TotalVal |= OneVal;
   2277           }
   2278 
   2279           // Convert the integer value to the appropriate type.
   2280           StoreVal = ConstantInt::get(CI->getContext(), TotalVal);
   2281           if (ValTy->isPointerTy())
   2282             StoreVal = ConstantExpr::getIntToPtr(StoreVal, ValTy);
   2283           else if (ValTy->isFloatingPointTy())
   2284             StoreVal = ConstantExpr::getBitCast(StoreVal, ValTy);
   2285           assert(StoreVal->getType() == ValTy && "Type mismatch!");
   2286 
   2287           // If the requested value was a vector constant, create it.
   2288           if (EltTy->isVectorTy()) {
   2289             unsigned NumElts = cast<VectorType>(EltTy)->getNumElements();
   2290             StoreVal = ConstantVector::getSplat(NumElts, StoreVal);
   2291           }
   2292         }
   2293         new StoreInst(StoreVal, EltPtr, MI);
   2294         continue;
   2295       }
   2296       // Otherwise, if we're storing a byte variable, use a memset call for
   2297       // this element.
   2298     }
   2299 
   2300     unsigned EltSize = DL->getTypeAllocSize(EltTy);
   2301     if (!EltSize)
   2302       continue;
   2303 
   2304     IRBuilder<> Builder(MI);
   2305 
   2306     // Finally, insert the meminst for this element.
   2307     if (isa<MemSetInst>(MI)) {
   2308       Builder.CreateMemSet(EltPtr, MI->getArgOperand(1), EltSize,
   2309                            MI->isVolatile());
   2310     } else {
   2311       assert(isa<MemTransferInst>(MI));
   2312       Value *Dst = SROADest ? EltPtr : OtherElt;  // Dest ptr
   2313       Value *Src = SROADest ? OtherElt : EltPtr;  // Src ptr
   2314 
   2315       if (isa<MemCpyInst>(MI))
   2316         Builder.CreateMemCpy(Dst, Src, EltSize, OtherEltAlign,MI->isVolatile());
   2317       else
   2318         Builder.CreateMemMove(Dst, Src, EltSize,OtherEltAlign,MI->isVolatile());
   2319     }
   2320   }
   2321   DeadInsts.push_back(MI);
   2322 }
   2323 
   2324 /// RewriteStoreUserOfWholeAlloca - We found a store of an integer that
   2325 /// overwrites the entire allocation.  Extract out the pieces of the stored
   2326 /// integer and store them individually.
   2327 void
   2328 SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI,
   2329                                     SmallVectorImpl<AllocaInst *> &NewElts) {
   2330   // Extract each element out of the integer according to its structure offset
   2331   // and store the element value to the individual alloca.
   2332   Value *SrcVal = SI->getOperand(0);
   2333   Type *AllocaEltTy = AI->getAllocatedType();
   2334   uint64_t AllocaSizeBits = DL->getTypeAllocSizeInBits(AllocaEltTy);
   2335 
   2336   IRBuilder<> Builder(SI);
   2337 
   2338   // Handle tail padding by extending the operand
   2339   if (DL->getTypeSizeInBits(SrcVal->getType()) != AllocaSizeBits)
   2340     SrcVal = Builder.CreateZExt(SrcVal,
   2341                             IntegerType::get(SI->getContext(), AllocaSizeBits));
   2342 
   2343   DEBUG(dbgs() << "PROMOTING STORE TO WHOLE ALLOCA: " << *AI << '\n' << *SI
   2344                << '\n');
   2345 
   2346   // There are two forms here: AI could be an array or struct.  Both cases
   2347   // have different ways to compute the element offset.
   2348   if (StructType *EltSTy = dyn_cast<StructType>(AllocaEltTy)) {
   2349     const StructLayout *Layout = DL->getStructLayout(EltSTy);
   2350 
   2351     for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
   2352       // Get the number of bits to shift SrcVal to get the value.
   2353       Type *FieldTy = EltSTy->getElementType(i);
   2354       uint64_t Shift = Layout->getElementOffsetInBits(i);
   2355 
   2356       if (DL->isBigEndian())
   2357         Shift = AllocaSizeBits-Shift-DL->getTypeAllocSizeInBits(FieldTy);
   2358 
   2359       Value *EltVal = SrcVal;
   2360       if (Shift) {
   2361         Value *ShiftVal = ConstantInt::get(EltVal->getType(), Shift);
   2362         EltVal = Builder.CreateLShr(EltVal, ShiftVal, "sroa.store.elt");
   2363       }
   2364 
   2365       // Truncate down to an integer of the right size.
   2366       uint64_t FieldSizeBits = DL->getTypeSizeInBits(FieldTy);
   2367 
   2368       // Ignore zero sized fields like {}, they obviously contain no data.
   2369       if (FieldSizeBits == 0) continue;
   2370 
   2371       if (FieldSizeBits != AllocaSizeBits)
   2372         EltVal = Builder.CreateTrunc(EltVal,
   2373                              IntegerType::get(SI->getContext(), FieldSizeBits));
   2374       Value *DestField = NewElts[i];
   2375       if (EltVal->getType() == FieldTy) {
   2376         // Storing to an integer field of this size, just do it.
   2377       } else if (FieldTy->isFloatingPointTy() || FieldTy->isVectorTy()) {
   2378         // Bitcast to the right element type (for fp/vector values).
   2379         EltVal = Builder.CreateBitCast(EltVal, FieldTy);
   2380       } else {
   2381         // Otherwise, bitcast the dest pointer (for aggregates).
   2382         DestField = Builder.CreateBitCast(DestField,
   2383                                      PointerType::getUnqual(EltVal->getType()));
   2384       }
   2385       new StoreInst(EltVal, DestField, SI);
   2386     }
   2387 
   2388   } else {
   2389     ArrayType *ATy = cast<ArrayType>(AllocaEltTy);
   2390     Type *ArrayEltTy = ATy->getElementType();
   2391     uint64_t ElementOffset = DL->getTypeAllocSizeInBits(ArrayEltTy);
   2392     uint64_t ElementSizeBits = DL->getTypeSizeInBits(ArrayEltTy);
   2393 
   2394     uint64_t Shift;
   2395 
   2396     if (DL->isBigEndian())
   2397       Shift = AllocaSizeBits-ElementOffset;
   2398     else
   2399       Shift = 0;
   2400 
   2401     for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
   2402       // Ignore zero sized fields like {}, they obviously contain no data.
   2403       if (ElementSizeBits == 0) continue;
   2404 
   2405       Value *EltVal = SrcVal;
   2406       if (Shift) {
   2407         Value *ShiftVal = ConstantInt::get(EltVal->getType(), Shift);
   2408         EltVal = Builder.CreateLShr(EltVal, ShiftVal, "sroa.store.elt");
   2409       }
   2410 
   2411       // Truncate down to an integer of the right size.
   2412       if (ElementSizeBits != AllocaSizeBits)
   2413         EltVal = Builder.CreateTrunc(EltVal,
   2414                                      IntegerType::get(SI->getContext(),
   2415                                                       ElementSizeBits));
   2416       Value *DestField = NewElts[i];
   2417       if (EltVal->getType() == ArrayEltTy) {
   2418         // Storing to an integer field of this size, just do it.
   2419       } else if (ArrayEltTy->isFloatingPointTy() ||
   2420                  ArrayEltTy->isVectorTy()) {
   2421         // Bitcast to the right element type (for fp/vector values).
   2422         EltVal = Builder.CreateBitCast(EltVal, ArrayEltTy);
   2423       } else {
   2424         // Otherwise, bitcast the dest pointer (for aggregates).
   2425         DestField = Builder.CreateBitCast(DestField,
   2426                                      PointerType::getUnqual(EltVal->getType()));
   2427       }
   2428       new StoreInst(EltVal, DestField, SI);
   2429 
   2430       if (DL->isBigEndian())
   2431         Shift -= ElementOffset;
   2432       else
   2433         Shift += ElementOffset;
   2434     }
   2435   }
   2436 
   2437   DeadInsts.push_back(SI);
   2438 }
   2439 
   2440 /// RewriteLoadUserOfWholeAlloca - We found a load of the entire allocation to
   2441 /// an integer.  Load the individual pieces to form the aggregate value.
   2442 void
   2443 SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI,
   2444                                    SmallVectorImpl<AllocaInst *> &NewElts) {
   2445   // Extract each element out of the NewElts according to its structure offset
   2446   // and form the result value.
   2447   Type *AllocaEltTy = AI->getAllocatedType();
   2448   uint64_t AllocaSizeBits = DL->getTypeAllocSizeInBits(AllocaEltTy);
   2449 
   2450   DEBUG(dbgs() << "PROMOTING LOAD OF WHOLE ALLOCA: " << *AI << '\n' << *LI
   2451                << '\n');
   2452 
   2453   // There are two forms here: AI could be an array or struct.  Both cases
   2454   // have different ways to compute the element offset.
   2455   const StructLayout *Layout = nullptr;
   2456   uint64_t ArrayEltBitOffset = 0;
   2457   if (StructType *EltSTy = dyn_cast<StructType>(AllocaEltTy)) {
   2458     Layout = DL->getStructLayout(EltSTy);
   2459   } else {
   2460     Type *ArrayEltTy = cast<ArrayType>(AllocaEltTy)->getElementType();
   2461     ArrayEltBitOffset = DL->getTypeAllocSizeInBits(ArrayEltTy);
   2462   }
   2463 
   2464   Value *ResultVal =
   2465     Constant::getNullValue(IntegerType::get(LI->getContext(), AllocaSizeBits));
   2466 
   2467   for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
   2468     // Load the value from the alloca.  If the NewElt is an aggregate, cast
   2469     // the pointer to an integer of the same size before doing the load.
   2470     Value *SrcField = NewElts[i];
   2471     Type *FieldTy =
   2472       cast<PointerType>(SrcField->getType())->getElementType();
   2473     uint64_t FieldSizeBits = DL->getTypeSizeInBits(FieldTy);
   2474 
   2475     // Ignore zero sized fields like {}, they obviously contain no data.
   2476     if (FieldSizeBits == 0) continue;
   2477 
   2478     IntegerType *FieldIntTy = IntegerType::get(LI->getContext(),
   2479                                                      FieldSizeBits);
   2480     if (!FieldTy->isIntegerTy() && !FieldTy->isFloatingPointTy() &&
   2481         !FieldTy->isVectorTy())
   2482       SrcField = new BitCastInst(SrcField,
   2483                                  PointerType::getUnqual(FieldIntTy),
   2484                                  "", LI);
   2485     SrcField = new LoadInst(SrcField, "sroa.load.elt", LI);
   2486 
   2487     // If SrcField is a fp or vector of the right size but that isn't an
   2488     // integer type, bitcast to an integer so we can shift it.
   2489     if (SrcField->getType() != FieldIntTy)
   2490       SrcField = new BitCastInst(SrcField, FieldIntTy, "", LI);
   2491 
   2492     // Zero extend the field to be the same size as the final alloca so that
   2493     // we can shift and insert it.
   2494     if (SrcField->getType() != ResultVal->getType())
   2495       SrcField = new ZExtInst(SrcField, ResultVal->getType(), "", LI);
   2496 
   2497     // Determine the number of bits to shift SrcField.
   2498     uint64_t Shift;
   2499     if (Layout) // Struct case.
   2500       Shift = Layout->getElementOffsetInBits(i);
   2501     else  // Array case.
   2502       Shift = i*ArrayEltBitOffset;
   2503 
   2504     if (DL->isBigEndian())
   2505       Shift = AllocaSizeBits-Shift-FieldIntTy->getBitWidth();
   2506 
   2507     if (Shift) {
   2508       Value *ShiftVal = ConstantInt::get(SrcField->getType(), Shift);
   2509       SrcField = BinaryOperator::CreateShl(SrcField, ShiftVal, "", LI);
   2510     }
   2511 
   2512     // Don't create an 'or x, 0' on the first iteration.
   2513     if (!isa<Constant>(ResultVal) ||
   2514         !cast<Constant>(ResultVal)->isNullValue())
   2515       ResultVal = BinaryOperator::CreateOr(SrcField, ResultVal, "", LI);
   2516     else
   2517       ResultVal = SrcField;
   2518   }
   2519 
   2520   // Handle tail padding by truncating the result
   2521   if (DL->getTypeSizeInBits(LI->getType()) != AllocaSizeBits)
   2522     ResultVal = new TruncInst(ResultVal, LI->getType(), "", LI);
   2523 
   2524   LI->replaceAllUsesWith(ResultVal);
   2525   DeadInsts.push_back(LI);
   2526 }
   2527 
   2528 /// HasPadding - Return true if the specified type has any structure or
   2529 /// alignment padding in between the elements that would be split apart
   2530 /// by SROA; return false otherwise.
   2531 static bool HasPadding(Type *Ty, const DataLayout &DL) {
   2532   if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
   2533     Ty = ATy->getElementType();
   2534     return DL.getTypeSizeInBits(Ty) != DL.getTypeAllocSizeInBits(Ty);
   2535   }
   2536 
   2537   // SROA currently handles only Arrays and Structs.
   2538   StructType *STy = cast<StructType>(Ty);
   2539   const StructLayout *SL = DL.getStructLayout(STy);
   2540   unsigned PrevFieldBitOffset = 0;
   2541   for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
   2542     unsigned FieldBitOffset = SL->getElementOffsetInBits(i);
   2543 
   2544     // Check to see if there is any padding between this element and the
   2545     // previous one.
   2546     if (i) {
   2547       unsigned PrevFieldEnd =
   2548         PrevFieldBitOffset+DL.getTypeSizeInBits(STy->getElementType(i-1));
   2549       if (PrevFieldEnd < FieldBitOffset)
   2550         return true;
   2551     }
   2552     PrevFieldBitOffset = FieldBitOffset;
   2553   }
   2554   // Check for tail padding.
   2555   if (unsigned EltCount = STy->getNumElements()) {
   2556     unsigned PrevFieldEnd = PrevFieldBitOffset +
   2557       DL.getTypeSizeInBits(STy->getElementType(EltCount-1));
   2558     if (PrevFieldEnd < SL->getSizeInBits())
   2559       return true;
   2560   }
   2561   return false;
   2562 }
   2563 
   2564 /// isSafeStructAllocaToScalarRepl - Check to see if the specified allocation of
   2565 /// an aggregate can be broken down into elements.  Return 0 if not, 3 if safe,
   2566 /// or 1 if safe after canonicalization has been performed.
   2567 bool SROA::isSafeAllocaToScalarRepl(AllocaInst *AI) {
   2568   // Loop over the use list of the alloca.  We can only transform it if all of
   2569   // the users are safe to transform.
   2570   AllocaInfo Info(AI);
   2571 
   2572   isSafeForScalarRepl(AI, 0, Info);
   2573   if (Info.isUnsafe) {
   2574     DEBUG(dbgs() << "Cannot transform: " << *AI << '\n');
   2575     return false;
   2576   }
   2577 
   2578   // Okay, we know all the users are promotable.  If the aggregate is a memcpy
   2579   // source and destination, we have to be careful.  In particular, the memcpy
   2580   // could be moving around elements that live in structure padding of the LLVM
   2581   // types, but may actually be used.  In these cases, we refuse to promote the
   2582   // struct.
   2583   if (Info.isMemCpySrc && Info.isMemCpyDst &&
   2584       HasPadding(AI->getAllocatedType(), *DL))
   2585     return false;
   2586 
   2587   // If the alloca never has an access to just *part* of it, but is accessed
   2588   // via loads and stores, then we should use ConvertToScalarInfo to promote
   2589   // the alloca instead of promoting each piece at a time and inserting fission
   2590   // and fusion code.
   2591   if (!Info.hasSubelementAccess && Info.hasALoadOrStore) {
   2592     // If the struct/array just has one element, use basic SRoA.
   2593     if (StructType *ST = dyn_cast<StructType>(AI->getAllocatedType())) {
   2594       if (ST->getNumElements() > 1) return false;
   2595     } else {
   2596       if (cast<ArrayType>(AI->getAllocatedType())->getNumElements() > 1)
   2597         return false;
   2598     }
   2599   }
   2600 
   2601   return true;
   2602 }
   2603