Home | History | Annotate | Download | only in Scalar
      1 //===- SROA.cpp - Scalar Replacement Of Aggregates ------------------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 /// \file
     10 /// This transformation implements the well known scalar replacement of
     11 /// aggregates transformation. It tries to identify promotable elements of an
     12 /// aggregate alloca, and promote them to registers. It will also try to
     13 /// convert uses of an element (or set of elements) of an alloca into a vector
     14 /// or bitfield-style integer scalar if appropriate.
     15 ///
     16 /// It works to do this with minimal slicing of the alloca so that regions
     17 /// which are merely transferred in and out of external memory remain unchanged
     18 /// and are not decomposed to scalar code.
     19 ///
     20 /// Because this also performs alloca promotion, it can be thought of as also
     21 /// serving the purpose of SSA formation. The algorithm iterates on the
     22 /// function until all opportunities for promotion have been realized.
     23 ///
     24 //===----------------------------------------------------------------------===//
     25 
     26 #define DEBUG_TYPE "sroa"
     27 #include "llvm/Transforms/Scalar.h"
     28 #include "llvm/ADT/STLExtras.h"
     29 #include "llvm/ADT/SetVector.h"
     30 #include "llvm/ADT/SmallVector.h"
     31 #include "llvm/ADT/Statistic.h"
     32 #include "llvm/Analysis/Dominators.h"
     33 #include "llvm/Analysis/Loads.h"
     34 #include "llvm/Analysis/PtrUseVisitor.h"
     35 #include "llvm/Analysis/ValueTracking.h"
     36 #include "llvm/DIBuilder.h"
     37 #include "llvm/DebugInfo.h"
     38 #include "llvm/IR/Constants.h"
     39 #include "llvm/IR/DataLayout.h"
     40 #include "llvm/IR/DerivedTypes.h"
     41 #include "llvm/IR/Function.h"
     42 #include "llvm/IR/IRBuilder.h"
     43 #include "llvm/IR/Instructions.h"
     44 #include "llvm/IR/IntrinsicInst.h"
     45 #include "llvm/IR/LLVMContext.h"
     46 #include "llvm/IR/Operator.h"
     47 #include "llvm/InstVisitor.h"
     48 #include "llvm/Pass.h"
     49 #include "llvm/Support/CommandLine.h"
     50 #include "llvm/Support/Compiler.h"
     51 #include "llvm/Support/Debug.h"
     52 #include "llvm/Support/ErrorHandling.h"
     53 #include "llvm/Support/MathExtras.h"
     54 #include "llvm/Support/raw_ostream.h"
     55 #include "llvm/Transforms/Utils/Local.h"
     56 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
     57 #include "llvm/Transforms/Utils/SSAUpdater.h"
     58 using namespace llvm;
     59 
     60 STATISTIC(NumAllocasAnalyzed, "Number of allocas analyzed for replacement");
     61 STATISTIC(NumAllocaPartitions, "Number of alloca partitions formed");
     62 STATISTIC(MaxPartitionsPerAlloca, "Maximum number of partitions per alloca");
     63 STATISTIC(NumAllocaPartitionUses, "Number of alloca partition uses rewritten");
     64 STATISTIC(MaxUsesPerAllocaPartition, "Maximum number of uses of a partition");
     65 STATISTIC(NumNewAllocas, "Number of new, smaller allocas introduced");
     66 STATISTIC(NumPromoted, "Number of allocas promoted to SSA values");
     67 STATISTIC(NumLoadsSpeculated, "Number of loads speculated to allow promotion");
     68 STATISTIC(NumDeleted, "Number of instructions deleted");
     69 STATISTIC(NumVectorized, "Number of vectorized aggregates");
     70 
     71 /// Hidden option to force the pass to not use DomTree and mem2reg, instead
     72 /// forming SSA values through the SSAUpdater infrastructure.
     73 static cl::opt<bool>
     74 ForceSSAUpdater("force-ssa-updater", cl::init(false), cl::Hidden);
     75 
     76 namespace {
     77 /// \brief A custom IRBuilder inserter which prefixes all names if they are
     78 /// preserved.
     79 template <bool preserveNames = true>
     80 class IRBuilderPrefixedInserter :
     81     public IRBuilderDefaultInserter<preserveNames> {
     82   std::string Prefix;
     83 
     84 public:
     85   void SetNamePrefix(const Twine &P) { Prefix = P.str(); }
     86 
     87 protected:
     88   void InsertHelper(Instruction *I, const Twine &Name, BasicBlock *BB,
     89                     BasicBlock::iterator InsertPt) const {
     90     IRBuilderDefaultInserter<preserveNames>::InsertHelper(
     91         I, Name.isTriviallyEmpty() ? Name : Prefix + Name, BB, InsertPt);
     92   }
     93 };
     94 
     95 // Specialization for not preserving the name is trivial.
     96 template <>
     97 class IRBuilderPrefixedInserter<false> :
     98     public IRBuilderDefaultInserter<false> {
     99 public:
    100   void SetNamePrefix(const Twine &P) {}
    101 };
    102 
    103 /// \brief Provide a typedef for IRBuilder that drops names in release builds.
    104 #ifndef NDEBUG
    105 typedef llvm::IRBuilder<true, ConstantFolder,
    106                         IRBuilderPrefixedInserter<true> > IRBuilderTy;
    107 #else
    108 typedef llvm::IRBuilder<false, ConstantFolder,
    109                         IRBuilderPrefixedInserter<false> > IRBuilderTy;
    110 #endif
    111 }
    112 
    113 namespace {
    114 /// \brief A used slice of an alloca.
    115 ///
    116 /// This structure represents a slice of an alloca used by some instruction. It
    117 /// stores both the begin and end offsets of this use, a pointer to the use
    118 /// itself, and a flag indicating whether we can classify the use as splittable
    119 /// or not when forming partitions of the alloca.
    120 class Slice {
    121   /// \brief The beginning offset of the range.
    122   uint64_t BeginOffset;
    123 
    124   /// \brief The ending offset, not included in the range.
    125   uint64_t EndOffset;
    126 
    127   /// \brief Storage for both the use of this slice and whether it can be
    128   /// split.
    129   PointerIntPair<Use *, 1, bool> UseAndIsSplittable;
    130 
    131 public:
    132   Slice() : BeginOffset(), EndOffset() {}
    133   Slice(uint64_t BeginOffset, uint64_t EndOffset, Use *U, bool IsSplittable)
    134       : BeginOffset(BeginOffset), EndOffset(EndOffset),
    135         UseAndIsSplittable(U, IsSplittable) {}
    136 
    137   uint64_t beginOffset() const { return BeginOffset; }
    138   uint64_t endOffset() const { return EndOffset; }
    139 
    140   bool isSplittable() const { return UseAndIsSplittable.getInt(); }
    141   void makeUnsplittable() { UseAndIsSplittable.setInt(false); }
    142 
    143   Use *getUse() const { return UseAndIsSplittable.getPointer(); }
    144 
    145   bool isDead() const { return getUse() == 0; }
    146   void kill() { UseAndIsSplittable.setPointer(0); }
    147 
    148   /// \brief Support for ordering ranges.
    149   ///
    150   /// This provides an ordering over ranges such that start offsets are
    151   /// always increasing, and within equal start offsets, the end offsets are
    152   /// decreasing. Thus the spanning range comes first in a cluster with the
    153   /// same start position.
    154   bool operator<(const Slice &RHS) const {
    155     if (beginOffset() < RHS.beginOffset()) return true;
    156     if (beginOffset() > RHS.beginOffset()) return false;
    157     if (isSplittable() != RHS.isSplittable()) return !isSplittable();
    158     if (endOffset() > RHS.endOffset()) return true;
    159     return false;
    160   }
    161 
    162   /// \brief Support comparison with a single offset to allow binary searches.
    163   friend LLVM_ATTRIBUTE_UNUSED bool operator<(const Slice &LHS,
    164                                               uint64_t RHSOffset) {
    165     return LHS.beginOffset() < RHSOffset;
    166   }
    167   friend LLVM_ATTRIBUTE_UNUSED bool operator<(uint64_t LHSOffset,
    168                                               const Slice &RHS) {
    169     return LHSOffset < RHS.beginOffset();
    170   }
    171 
    172   bool operator==(const Slice &RHS) const {
    173     return isSplittable() == RHS.isSplittable() &&
    174            beginOffset() == RHS.beginOffset() && endOffset() == RHS.endOffset();
    175   }
    176   bool operator!=(const Slice &RHS) const { return !operator==(RHS); }
    177 };
    178 } // end anonymous namespace
    179 
    180 namespace llvm {
    181 template <typename T> struct isPodLike;
    182 template <> struct isPodLike<Slice> {
    183    static const bool value = true;
    184 };
    185 }
    186 
    187 namespace {
    188 /// \brief Representation of the alloca slices.
    189 ///
    190 /// This class represents the slices of an alloca which are formed by its
    191 /// various uses. If a pointer escapes, we can't fully build a representation
    192 /// for the slices used and we reflect that in this structure. The uses are
    193 /// stored, sorted by increasing beginning offset and with unsplittable slices
    194 /// starting at a particular offset before splittable slices.
    195 class AllocaSlices {
    196 public:
    197   /// \brief Construct the slices of a particular alloca.
    198   AllocaSlices(const DataLayout &DL, AllocaInst &AI);
    199 
    200   /// \brief Test whether a pointer to the allocation escapes our analysis.
    201   ///
    202   /// If this is true, the slices are never fully built and should be
    203   /// ignored.
    204   bool isEscaped() const { return PointerEscapingInstr; }
    205 
    206   /// \brief Support for iterating over the slices.
    207   /// @{
    208   typedef SmallVectorImpl<Slice>::iterator iterator;
    209   iterator begin() { return Slices.begin(); }
    210   iterator end() { return Slices.end(); }
    211 
    212   typedef SmallVectorImpl<Slice>::const_iterator const_iterator;
    213   const_iterator begin() const { return Slices.begin(); }
    214   const_iterator end() const { return Slices.end(); }
    215   /// @}
    216 
    217   /// \brief Allow iterating the dead users for this alloca.
    218   ///
    219   /// These are instructions which will never actually use the alloca as they
    220   /// are outside the allocated range. They are safe to replace with undef and
    221   /// delete.
    222   /// @{
    223   typedef SmallVectorImpl<Instruction *>::const_iterator dead_user_iterator;
    224   dead_user_iterator dead_user_begin() const { return DeadUsers.begin(); }
    225   dead_user_iterator dead_user_end() const { return DeadUsers.end(); }
    226   /// @}
    227 
    228   /// \brief Allow iterating the dead expressions referring to this alloca.
    229   ///
    230   /// These are operands which have cannot actually be used to refer to the
    231   /// alloca as they are outside its range and the user doesn't correct for
    232   /// that. These mostly consist of PHI node inputs and the like which we just
    233   /// need to replace with undef.
    234   /// @{
    235   typedef SmallVectorImpl<Use *>::const_iterator dead_op_iterator;
    236   dead_op_iterator dead_op_begin() const { return DeadOperands.begin(); }
    237   dead_op_iterator dead_op_end() const { return DeadOperands.end(); }
    238   /// @}
    239 
    240 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
    241   void print(raw_ostream &OS, const_iterator I, StringRef Indent = "  ") const;
    242   void printSlice(raw_ostream &OS, const_iterator I,
    243                   StringRef Indent = "  ") const;
    244   void printUse(raw_ostream &OS, const_iterator I,
    245                 StringRef Indent = "  ") const;
    246   void print(raw_ostream &OS) const;
    247   void LLVM_ATTRIBUTE_NOINLINE LLVM_ATTRIBUTE_USED dump(const_iterator I) const;
    248   void LLVM_ATTRIBUTE_NOINLINE LLVM_ATTRIBUTE_USED dump() const;
    249 #endif
    250 
    251 private:
    252   template <typename DerivedT, typename RetT = void> class BuilderBase;
    253   class SliceBuilder;
    254   friend class AllocaSlices::SliceBuilder;
    255 
    256 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
    257   /// \brief Handle to alloca instruction to simplify method interfaces.
    258   AllocaInst &AI;
    259 #endif
    260 
    261   /// \brief The instruction responsible for this alloca not having a known set
    262   /// of slices.
    263   ///
    264   /// When an instruction (potentially) escapes the pointer to the alloca, we
    265   /// store a pointer to that here and abort trying to form slices of the
    266   /// alloca. This will be null if the alloca slices are analyzed successfully.
    267   Instruction *PointerEscapingInstr;
    268 
    269   /// \brief The slices of the alloca.
    270   ///
    271   /// We store a vector of the slices formed by uses of the alloca here. This
    272   /// vector is sorted by increasing begin offset, and then the unsplittable
    273   /// slices before the splittable ones. See the Slice inner class for more
    274   /// details.
    275   SmallVector<Slice, 8> Slices;
    276 
    277   /// \brief Instructions which will become dead if we rewrite the alloca.
    278   ///
    279   /// Note that these are not separated by slice. This is because we expect an
    280   /// alloca to be completely rewritten or not rewritten at all. If rewritten,
    281   /// all these instructions can simply be removed and replaced with undef as
    282   /// they come from outside of the allocated space.
    283   SmallVector<Instruction *, 8> DeadUsers;
    284 
    285   /// \brief Operands which will become dead if we rewrite the alloca.
    286   ///
    287   /// These are operands that in their particular use can be replaced with
    288   /// undef when we rewrite the alloca. These show up in out-of-bounds inputs
    289   /// to PHI nodes and the like. They aren't entirely dead (there might be
    290   /// a GEP back into the bounds using it elsewhere) and nor is the PHI, but we
    291   /// want to swap this particular input for undef to simplify the use lists of
    292   /// the alloca.
    293   SmallVector<Use *, 8> DeadOperands;
    294 };
    295 }
    296 
    297 static Value *foldSelectInst(SelectInst &SI) {
    298   // If the condition being selected on is a constant or the same value is
    299   // being selected between, fold the select. Yes this does (rarely) happen
    300   // early on.
    301   if (ConstantInt *CI = dyn_cast<ConstantInt>(SI.getCondition()))
    302     return SI.getOperand(1+CI->isZero());
    303   if (SI.getOperand(1) == SI.getOperand(2))
    304     return SI.getOperand(1);
    305 
    306   return 0;
    307 }
    308 
    309 /// \brief Builder for the alloca slices.
    310 ///
    311 /// This class builds a set of alloca slices by recursively visiting the uses
    312 /// of an alloca and making a slice for each load and store at each offset.
    313 class AllocaSlices::SliceBuilder : public PtrUseVisitor<SliceBuilder> {
    314   friend class PtrUseVisitor<SliceBuilder>;
    315   friend class InstVisitor<SliceBuilder>;
    316   typedef PtrUseVisitor<SliceBuilder> Base;
    317 
    318   const uint64_t AllocSize;
    319   AllocaSlices &S;
    320 
    321   SmallDenseMap<Instruction *, unsigned> MemTransferSliceMap;
    322   SmallDenseMap<Instruction *, uint64_t> PHIOrSelectSizes;
    323 
    324   /// \brief Set to de-duplicate dead instructions found in the use walk.
    325   SmallPtrSet<Instruction *, 4> VisitedDeadInsts;
    326 
    327 public:
    328   SliceBuilder(const DataLayout &DL, AllocaInst &AI, AllocaSlices &S)
    329       : PtrUseVisitor<SliceBuilder>(DL),
    330         AllocSize(DL.getTypeAllocSize(AI.getAllocatedType())), S(S) {}
    331 
    332 private:
    333   void markAsDead(Instruction &I) {
    334     if (VisitedDeadInsts.insert(&I))
    335       S.DeadUsers.push_back(&I);
    336   }
    337 
    338   void insertUse(Instruction &I, const APInt &Offset, uint64_t Size,
    339                  bool IsSplittable = false) {
    340     // Completely skip uses which have a zero size or start either before or
    341     // past the end of the allocation.
    342     if (Size == 0 || Offset.isNegative() || Offset.uge(AllocSize)) {
    343       DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte use @" << Offset
    344                    << " which has zero size or starts outside of the "
    345                    << AllocSize << " byte alloca:\n"
    346                    << "    alloca: " << S.AI << "\n"
    347                    << "       use: " << I << "\n");
    348       return markAsDead(I);
    349     }
    350 
    351     uint64_t BeginOffset = Offset.getZExtValue();
    352     uint64_t EndOffset = BeginOffset + Size;
    353 
    354     // Clamp the end offset to the end of the allocation. Note that this is
    355     // formulated to handle even the case where "BeginOffset + Size" overflows.
    356     // This may appear superficially to be something we could ignore entirely,
    357     // but that is not so! There may be widened loads or PHI-node uses where
    358     // some instructions are dead but not others. We can't completely ignore
    359     // them, and so have to record at least the information here.
    360     assert(AllocSize >= BeginOffset); // Established above.
    361     if (Size > AllocSize - BeginOffset) {
    362       DEBUG(dbgs() << "WARNING: Clamping a " << Size << " byte use @" << Offset
    363                    << " to remain within the " << AllocSize << " byte alloca:\n"
    364                    << "    alloca: " << S.AI << "\n"
    365                    << "       use: " << I << "\n");
    366       EndOffset = AllocSize;
    367     }
    368 
    369     S.Slices.push_back(Slice(BeginOffset, EndOffset, U, IsSplittable));
    370   }
    371 
    372   void visitBitCastInst(BitCastInst &BC) {
    373     if (BC.use_empty())
    374       return markAsDead(BC);
    375 
    376     return Base::visitBitCastInst(BC);
    377   }
    378 
    379   void visitGetElementPtrInst(GetElementPtrInst &GEPI) {
    380     if (GEPI.use_empty())
    381       return markAsDead(GEPI);
    382 
    383     return Base::visitGetElementPtrInst(GEPI);
    384   }
    385 
    386   void handleLoadOrStore(Type *Ty, Instruction &I, const APInt &Offset,
    387                          uint64_t Size, bool IsVolatile) {
    388     // We allow splitting of loads and stores where the type is an integer type
    389     // and cover the entire alloca. This prevents us from splitting over
    390     // eagerly.
    391     // FIXME: In the great blue eventually, we should eagerly split all integer
    392     // loads and stores, and then have a separate step that merges adjacent
    393     // alloca partitions into a single partition suitable for integer widening.
    394     // Or we should skip the merge step and rely on GVN and other passes to
    395     // merge adjacent loads and stores that survive mem2reg.
    396     bool IsSplittable =
    397         Ty->isIntegerTy() && !IsVolatile && Offset == 0 && Size >= AllocSize;
    398 
    399     insertUse(I, Offset, Size, IsSplittable);
    400   }
    401 
    402   void visitLoadInst(LoadInst &LI) {
    403     assert((!LI.isSimple() || LI.getType()->isSingleValueType()) &&
    404            "All simple FCA loads should have been pre-split");
    405 
    406     if (!IsOffsetKnown)
    407       return PI.setAborted(&LI);
    408 
    409     uint64_t Size = DL.getTypeStoreSize(LI.getType());
    410     return handleLoadOrStore(LI.getType(), LI, Offset, Size, LI.isVolatile());
    411   }
    412 
    413   void visitStoreInst(StoreInst &SI) {
    414     Value *ValOp = SI.getValueOperand();
    415     if (ValOp == *U)
    416       return PI.setEscapedAndAborted(&SI);
    417     if (!IsOffsetKnown)
    418       return PI.setAborted(&SI);
    419 
    420     uint64_t Size = DL.getTypeStoreSize(ValOp->getType());
    421 
    422     // If this memory access can be shown to *statically* extend outside the
    423     // bounds of of the allocation, it's behavior is undefined, so simply
    424     // ignore it. Note that this is more strict than the generic clamping
    425     // behavior of insertUse. We also try to handle cases which might run the
    426     // risk of overflow.
    427     // FIXME: We should instead consider the pointer to have escaped if this
    428     // function is being instrumented for addressing bugs or race conditions.
    429     if (Offset.isNegative() || Size > AllocSize ||
    430         Offset.ugt(AllocSize - Size)) {
    431       DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte store @" << Offset
    432                    << " which extends past the end of the " << AllocSize
    433                    << " byte alloca:\n"
    434                    << "    alloca: " << S.AI << "\n"
    435                    << "       use: " << SI << "\n");
    436       return markAsDead(SI);
    437     }
    438 
    439     assert((!SI.isSimple() || ValOp->getType()->isSingleValueType()) &&
    440            "All simple FCA stores should have been pre-split");
    441     handleLoadOrStore(ValOp->getType(), SI, Offset, Size, SI.isVolatile());
    442   }
    443 
    444 
    445   void visitMemSetInst(MemSetInst &II) {
    446     assert(II.getRawDest() == *U && "Pointer use is not the destination?");
    447     ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength());
    448     if ((Length && Length->getValue() == 0) ||
    449         (IsOffsetKnown && !Offset.isNegative() && Offset.uge(AllocSize)))
    450       // Zero-length mem transfer intrinsics can be ignored entirely.
    451       return markAsDead(II);
    452 
    453     if (!IsOffsetKnown)
    454       return PI.setAborted(&II);
    455 
    456     insertUse(II, Offset,
    457               Length ? Length->getLimitedValue()
    458                      : AllocSize - Offset.getLimitedValue(),
    459               (bool)Length);
    460   }
    461 
    462   void visitMemTransferInst(MemTransferInst &II) {
    463     ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength());
    464     if ((Length && Length->getValue() == 0) ||
    465         (IsOffsetKnown && !Offset.isNegative() && Offset.uge(AllocSize)))
    466       // Zero-length mem transfer intrinsics can be ignored entirely.
    467       return markAsDead(II);
    468 
    469     if (!IsOffsetKnown)
    470       return PI.setAborted(&II);
    471 
    472     uint64_t RawOffset = Offset.getLimitedValue();
    473     uint64_t Size = Length ? Length->getLimitedValue()
    474                            : AllocSize - RawOffset;
    475 
    476     // Check for the special case where the same exact value is used for both
    477     // source and dest.
    478     if (*U == II.getRawDest() && *U == II.getRawSource()) {
    479       // For non-volatile transfers this is a no-op.
    480       if (!II.isVolatile())
    481         return markAsDead(II);
    482 
    483       return insertUse(II, Offset, Size, /*IsSplittable=*/false);
    484     }
    485 
    486     // If we have seen both source and destination for a mem transfer, then
    487     // they both point to the same alloca.
    488     bool Inserted;
    489     SmallDenseMap<Instruction *, unsigned>::iterator MTPI;
    490     llvm::tie(MTPI, Inserted) =
    491         MemTransferSliceMap.insert(std::make_pair(&II, S.Slices.size()));
    492     unsigned PrevIdx = MTPI->second;
    493     if (!Inserted) {
    494       Slice &PrevP = S.Slices[PrevIdx];
    495 
    496       // Check if the begin offsets match and this is a non-volatile transfer.
    497       // In that case, we can completely elide the transfer.
    498       if (!II.isVolatile() && PrevP.beginOffset() == RawOffset) {
    499         PrevP.kill();
    500         return markAsDead(II);
    501       }
    502 
    503       // Otherwise we have an offset transfer within the same alloca. We can't
    504       // split those.
    505       PrevP.makeUnsplittable();
    506     }
    507 
    508     // Insert the use now that we've fixed up the splittable nature.
    509     insertUse(II, Offset, Size, /*IsSplittable=*/Inserted && Length);
    510 
    511     // Check that we ended up with a valid index in the map.
    512     assert(S.Slices[PrevIdx].getUse()->getUser() == &II &&
    513            "Map index doesn't point back to a slice with this user.");
    514   }
    515 
    516   // Disable SRoA for any intrinsics except for lifetime invariants.
    517   // FIXME: What about debug intrinsics? This matches old behavior, but
    518   // doesn't make sense.
    519   void visitIntrinsicInst(IntrinsicInst &II) {
    520     if (!IsOffsetKnown)
    521       return PI.setAborted(&II);
    522 
    523     if (II.getIntrinsicID() == Intrinsic::lifetime_start ||
    524         II.getIntrinsicID() == Intrinsic::lifetime_end) {
    525       ConstantInt *Length = cast<ConstantInt>(II.getArgOperand(0));
    526       uint64_t Size = std::min(AllocSize - Offset.getLimitedValue(),
    527                                Length->getLimitedValue());
    528       insertUse(II, Offset, Size, true);
    529       return;
    530     }
    531 
    532     Base::visitIntrinsicInst(II);
    533   }
    534 
    535   Instruction *hasUnsafePHIOrSelectUse(Instruction *Root, uint64_t &Size) {
    536     // We consider any PHI or select that results in a direct load or store of
    537     // the same offset to be a viable use for slicing purposes. These uses
    538     // are considered unsplittable and the size is the maximum loaded or stored
    539     // size.
    540     SmallPtrSet<Instruction *, 4> Visited;
    541     SmallVector<std::pair<Instruction *, Instruction *>, 4> Uses;
    542     Visited.insert(Root);
    543     Uses.push_back(std::make_pair(cast<Instruction>(*U), Root));
    544     // If there are no loads or stores, the access is dead. We mark that as
    545     // a size zero access.
    546     Size = 0;
    547     do {
    548       Instruction *I, *UsedI;
    549       llvm::tie(UsedI, I) = Uses.pop_back_val();
    550 
    551       if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
    552         Size = std::max(Size, DL.getTypeStoreSize(LI->getType()));
    553         continue;
    554       }
    555       if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
    556         Value *Op = SI->getOperand(0);
    557         if (Op == UsedI)
    558           return SI;
    559         Size = std::max(Size, DL.getTypeStoreSize(Op->getType()));
    560         continue;
    561       }
    562 
    563       if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
    564         if (!GEP->hasAllZeroIndices())
    565           return GEP;
    566       } else if (!isa<BitCastInst>(I) && !isa<PHINode>(I) &&
    567                  !isa<SelectInst>(I)) {
    568         return I;
    569       }
    570 
    571       for (Value::use_iterator UI = I->use_begin(), UE = I->use_end(); UI != UE;
    572            ++UI)
    573         if (Visited.insert(cast<Instruction>(*UI)))
    574           Uses.push_back(std::make_pair(I, cast<Instruction>(*UI)));
    575     } while (!Uses.empty());
    576 
    577     return 0;
    578   }
    579 
    580   void visitPHINode(PHINode &PN) {
    581     if (PN.use_empty())
    582       return markAsDead(PN);
    583     if (!IsOffsetKnown)
    584       return PI.setAborted(&PN);
    585 
    586     // See if we already have computed info on this node.
    587     uint64_t &PHISize = PHIOrSelectSizes[&PN];
    588     if (!PHISize) {
    589       // This is a new PHI node, check for an unsafe use of the PHI node.
    590       if (Instruction *UnsafeI = hasUnsafePHIOrSelectUse(&PN, PHISize))
    591         return PI.setAborted(UnsafeI);
    592     }
    593 
    594     // For PHI and select operands outside the alloca, we can't nuke the entire
    595     // phi or select -- the other side might still be relevant, so we special
    596     // case them here and use a separate structure to track the operands
    597     // themselves which should be replaced with undef.
    598     // FIXME: This should instead be escaped in the event we're instrumenting
    599     // for address sanitization.
    600     if ((Offset.isNegative() && (-Offset).uge(PHISize)) ||
    601         (!Offset.isNegative() && Offset.uge(AllocSize))) {
    602       S.DeadOperands.push_back(U);
    603       return;
    604     }
    605 
    606     insertUse(PN, Offset, PHISize);
    607   }
    608 
    609   void visitSelectInst(SelectInst &SI) {
    610     if (SI.use_empty())
    611       return markAsDead(SI);
    612     if (Value *Result = foldSelectInst(SI)) {
    613       if (Result == *U)
    614         // If the result of the constant fold will be the pointer, recurse
    615         // through the select as if we had RAUW'ed it.
    616         enqueueUsers(SI);
    617       else
    618         // Otherwise the operand to the select is dead, and we can replace it
    619         // with undef.
    620         S.DeadOperands.push_back(U);
    621 
    622       return;
    623     }
    624     if (!IsOffsetKnown)
    625       return PI.setAborted(&SI);
    626 
    627     // See if we already have computed info on this node.
    628     uint64_t &SelectSize = PHIOrSelectSizes[&SI];
    629     if (!SelectSize) {
    630       // This is a new Select, check for an unsafe use of it.
    631       if (Instruction *UnsafeI = hasUnsafePHIOrSelectUse(&SI, SelectSize))
    632         return PI.setAborted(UnsafeI);
    633     }
    634 
    635     // For PHI and select operands outside the alloca, we can't nuke the entire
    636     // phi or select -- the other side might still be relevant, so we special
    637     // case them here and use a separate structure to track the operands
    638     // themselves which should be replaced with undef.
    639     // FIXME: This should instead be escaped in the event we're instrumenting
    640     // for address sanitization.
    641     if ((Offset.isNegative() && Offset.uge(SelectSize)) ||
    642         (!Offset.isNegative() && Offset.uge(AllocSize))) {
    643       S.DeadOperands.push_back(U);
    644       return;
    645     }
    646 
    647     insertUse(SI, Offset, SelectSize);
    648   }
    649 
    650   /// \brief Disable SROA entirely if there are unhandled users of the alloca.
    651   void visitInstruction(Instruction &I) {
    652     PI.setAborted(&I);
    653   }
    654 };
    655 
    656 AllocaSlices::AllocaSlices(const DataLayout &DL, AllocaInst &AI)
    657     :
    658 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
    659       AI(AI),
    660 #endif
    661       PointerEscapingInstr(0) {
    662   SliceBuilder PB(DL, AI, *this);
    663   SliceBuilder::PtrInfo PtrI = PB.visitPtr(AI);
    664   if (PtrI.isEscaped() || PtrI.isAborted()) {
    665     // FIXME: We should sink the escape vs. abort info into the caller nicely,
    666     // possibly by just storing the PtrInfo in the AllocaSlices.
    667     PointerEscapingInstr = PtrI.getEscapingInst() ? PtrI.getEscapingInst()
    668                                                   : PtrI.getAbortingInst();
    669     assert(PointerEscapingInstr && "Did not track a bad instruction");
    670     return;
    671   }
    672 
    673   Slices.erase(std::remove_if(Slices.begin(), Slices.end(),
    674                               std::mem_fun_ref(&Slice::isDead)),
    675                Slices.end());
    676 
    677   // Sort the uses. This arranges for the offsets to be in ascending order,
    678   // and the sizes to be in descending order.
    679   std::sort(Slices.begin(), Slices.end());
    680 }
    681 
    682 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
    683 
    684 void AllocaSlices::print(raw_ostream &OS, const_iterator I,
    685                          StringRef Indent) const {
    686   printSlice(OS, I, Indent);
    687   printUse(OS, I, Indent);
    688 }
    689 
    690 void AllocaSlices::printSlice(raw_ostream &OS, const_iterator I,
    691                               StringRef Indent) const {
    692   OS << Indent << "[" << I->beginOffset() << "," << I->endOffset() << ")"
    693      << " slice #" << (I - begin())
    694      << (I->isSplittable() ? " (splittable)" : "") << "\n";
    695 }
    696 
    697 void AllocaSlices::printUse(raw_ostream &OS, const_iterator I,
    698                             StringRef Indent) const {
    699   OS << Indent << "  used by: " << *I->getUse()->getUser() << "\n";
    700 }
    701 
    702 void AllocaSlices::print(raw_ostream &OS) const {
    703   if (PointerEscapingInstr) {
    704     OS << "Can't analyze slices for alloca: " << AI << "\n"
    705        << "  A pointer to this alloca escaped by:\n"
    706        << "  " << *PointerEscapingInstr << "\n";
    707     return;
    708   }
    709 
    710   OS << "Slices of alloca: " << AI << "\n";
    711   for (const_iterator I = begin(), E = end(); I != E; ++I)
    712     print(OS, I);
    713 }
    714 
    715 void AllocaSlices::dump(const_iterator I) const { print(dbgs(), I); }
    716 void AllocaSlices::dump() const { print(dbgs()); }
    717 
    718 #endif // !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
    719 
    720 namespace {
    721 /// \brief Implementation of LoadAndStorePromoter for promoting allocas.
    722 ///
    723 /// This subclass of LoadAndStorePromoter adds overrides to handle promoting
    724 /// the loads and stores of an alloca instruction, as well as updating its
    725 /// debug information. This is used when a domtree is unavailable and thus
    726 /// mem2reg in its full form can't be used to handle promotion of allocas to
    727 /// scalar values.
    728 class AllocaPromoter : public LoadAndStorePromoter {
    729   AllocaInst &AI;
    730   DIBuilder &DIB;
    731 
    732   SmallVector<DbgDeclareInst *, 4> DDIs;
    733   SmallVector<DbgValueInst *, 4> DVIs;
    734 
    735 public:
    736   AllocaPromoter(const SmallVectorImpl<Instruction*> &Insts, SSAUpdater &S,
    737                  AllocaInst &AI, DIBuilder &DIB)
    738     : LoadAndStorePromoter(Insts, S), AI(AI), DIB(DIB) {}
    739 
    740   void run(const SmallVectorImpl<Instruction*> &Insts) {
    741     // Retain the debug information attached to the alloca for use when
    742     // rewriting loads and stores.
    743     if (MDNode *DebugNode = MDNode::getIfExists(AI.getContext(), &AI)) {
    744       for (Value::use_iterator UI = DebugNode->use_begin(),
    745                                UE = DebugNode->use_end();
    746            UI != UE; ++UI)
    747         if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(*UI))
    748           DDIs.push_back(DDI);
    749         else if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(*UI))
    750           DVIs.push_back(DVI);
    751     }
    752 
    753     LoadAndStorePromoter::run(Insts);
    754 
    755     // While we have the debug information, clear it off of the alloca. The
    756     // caller takes care of deleting the alloca.
    757     while (!DDIs.empty())
    758       DDIs.pop_back_val()->eraseFromParent();
    759     while (!DVIs.empty())
    760       DVIs.pop_back_val()->eraseFromParent();
    761   }
    762 
    763   virtual bool isInstInList(Instruction *I,
    764                             const SmallVectorImpl<Instruction*> &Insts) const {
    765     if (LoadInst *LI = dyn_cast<LoadInst>(I))
    766       return LI->getOperand(0) == &AI;
    767     return cast<StoreInst>(I)->getPointerOperand() == &AI;
    768   }
    769 
    770   virtual void updateDebugInfo(Instruction *Inst) const {
    771     for (SmallVectorImpl<DbgDeclareInst *>::const_iterator I = DDIs.begin(),
    772            E = DDIs.end(); I != E; ++I) {
    773       DbgDeclareInst *DDI = *I;
    774       if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
    775         ConvertDebugDeclareToDebugValue(DDI, SI, DIB);
    776       else if (LoadInst *LI = dyn_cast<LoadInst>(Inst))
    777         ConvertDebugDeclareToDebugValue(DDI, LI, DIB);
    778     }
    779     for (SmallVectorImpl<DbgValueInst *>::const_iterator I = DVIs.begin(),
    780            E = DVIs.end(); I != E; ++I) {
    781       DbgValueInst *DVI = *I;
    782       Value *Arg = 0;
    783       if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
    784         // If an argument is zero extended then use argument directly. The ZExt
    785         // may be zapped by an optimization pass in future.
    786         if (ZExtInst *ZExt = dyn_cast<ZExtInst>(SI->getOperand(0)))
    787           Arg = dyn_cast<Argument>(ZExt->getOperand(0));
    788         else if (SExtInst *SExt = dyn_cast<SExtInst>(SI->getOperand(0)))
    789           Arg = dyn_cast<Argument>(SExt->getOperand(0));
    790         if (!Arg)
    791           Arg = SI->getValueOperand();
    792       } else if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
    793         Arg = LI->getPointerOperand();
    794       } else {
    795         continue;
    796       }
    797       Instruction *DbgVal =
    798         DIB.insertDbgValueIntrinsic(Arg, 0, DIVariable(DVI->getVariable()),
    799                                      Inst);
    800       DbgVal->setDebugLoc(DVI->getDebugLoc());
    801     }
    802   }
    803 };
    804 } // end anon namespace
    805 
    806 
    807 namespace {
    808 /// \brief An optimization pass providing Scalar Replacement of Aggregates.
    809 ///
    810 /// This pass takes allocations which can be completely analyzed (that is, they
    811 /// don't escape) and tries to turn them into scalar SSA values. There are
    812 /// a few steps to this process.
    813 ///
    814 /// 1) It takes allocations of aggregates and analyzes the ways in which they
    815 ///    are used to try to split them into smaller allocations, ideally of
    816 ///    a single scalar data type. It will split up memcpy and memset accesses
    817 ///    as necessary and try to isolate individual scalar accesses.
    818 /// 2) It will transform accesses into forms which are suitable for SSA value
    819 ///    promotion. This can be replacing a memset with a scalar store of an
    820 ///    integer value, or it can involve speculating operations on a PHI or
    821 ///    select to be a PHI or select of the results.
    822 /// 3) Finally, this will try to detect a pattern of accesses which map cleanly
    823 ///    onto insert and extract operations on a vector value, and convert them to
    824 ///    this form. By doing so, it will enable promotion of vector aggregates to
    825 ///    SSA vector values.
    826 class SROA : public FunctionPass {
    827   const bool RequiresDomTree;
    828 
    829   LLVMContext *C;
    830   const DataLayout *DL;
    831   DominatorTree *DT;
    832 
    833   /// \brief Worklist of alloca instructions to simplify.
    834   ///
    835   /// Each alloca in the function is added to this. Each new alloca formed gets
    836   /// added to it as well to recursively simplify unless that alloca can be
    837   /// directly promoted. Finally, each time we rewrite a use of an alloca other
    838   /// the one being actively rewritten, we add it back onto the list if not
    839   /// already present to ensure it is re-visited.
    840   SetVector<AllocaInst *, SmallVector<AllocaInst *, 16> > Worklist;
    841 
    842   /// \brief A collection of instructions to delete.
    843   /// We try to batch deletions to simplify code and make things a bit more
    844   /// efficient.
    845   SetVector<Instruction *, SmallVector<Instruction *, 8> > DeadInsts;
    846 
    847   /// \brief Post-promotion worklist.
    848   ///
    849   /// Sometimes we discover an alloca which has a high probability of becoming
    850   /// viable for SROA after a round of promotion takes place. In those cases,
    851   /// the alloca is enqueued here for re-processing.
    852   ///
    853   /// Note that we have to be very careful to clear allocas out of this list in
    854   /// the event they are deleted.
    855   SetVector<AllocaInst *, SmallVector<AllocaInst *, 16> > PostPromotionWorklist;
    856 
    857   /// \brief A collection of alloca instructions we can directly promote.
    858   std::vector<AllocaInst *> PromotableAllocas;
    859 
    860   /// \brief A worklist of PHIs to speculate prior to promoting allocas.
    861   ///
    862   /// All of these PHIs have been checked for the safety of speculation and by
    863   /// being speculated will allow promoting allocas currently in the promotable
    864   /// queue.
    865   SetVector<PHINode *, SmallVector<PHINode *, 2> > SpeculatablePHIs;
    866 
    867   /// \brief A worklist of select instructions to speculate prior to promoting
    868   /// allocas.
    869   ///
    870   /// All of these select instructions have been checked for the safety of
    871   /// speculation and by being speculated will allow promoting allocas
    872   /// currently in the promotable queue.
    873   SetVector<SelectInst *, SmallVector<SelectInst *, 2> > SpeculatableSelects;
    874 
    875 public:
    876   SROA(bool RequiresDomTree = true)
    877       : FunctionPass(ID), RequiresDomTree(RequiresDomTree),
    878         C(0), DL(0), DT(0) {
    879     initializeSROAPass(*PassRegistry::getPassRegistry());
    880   }
    881   bool runOnFunction(Function &F);
    882   void getAnalysisUsage(AnalysisUsage &AU) const;
    883 
    884   const char *getPassName() const { return "SROA"; }
    885   static char ID;
    886 
    887 private:
    888   friend class PHIOrSelectSpeculator;
    889   friend class AllocaSliceRewriter;
    890 
    891   bool rewritePartition(AllocaInst &AI, AllocaSlices &S,
    892                         AllocaSlices::iterator B, AllocaSlices::iterator E,
    893                         int64_t BeginOffset, int64_t EndOffset,
    894                         ArrayRef<AllocaSlices::iterator> SplitUses);
    895   bool splitAlloca(AllocaInst &AI, AllocaSlices &S);
    896   bool runOnAlloca(AllocaInst &AI);
    897   void deleteDeadInstructions(SmallPtrSet<AllocaInst *, 4> &DeletedAllocas);
    898   bool promoteAllocas(Function &F);
    899 };
    900 }
    901 
    902 char SROA::ID = 0;
    903 
    904 FunctionPass *llvm::createSROAPass(bool RequiresDomTree) {
    905   return new SROA(RequiresDomTree);
    906 }
    907 
    908 INITIALIZE_PASS_BEGIN(SROA, "sroa", "Scalar Replacement Of Aggregates",
    909                       false, false)
    910 INITIALIZE_PASS_DEPENDENCY(DominatorTree)
    911 INITIALIZE_PASS_END(SROA, "sroa", "Scalar Replacement Of Aggregates",
    912                     false, false)
    913 
    914 /// Walk the range of a partitioning looking for a common type to cover this
    915 /// sequence of slices.
    916 static Type *findCommonType(AllocaSlices::const_iterator B,
    917                             AllocaSlices::const_iterator E,
    918                             uint64_t EndOffset) {
    919   Type *Ty = 0;
    920   for (AllocaSlices::const_iterator I = B; I != E; ++I) {
    921     Use *U = I->getUse();
    922     if (isa<IntrinsicInst>(*U->getUser()))
    923       continue;
    924     if (I->beginOffset() != B->beginOffset() || I->endOffset() != EndOffset)
    925       continue;
    926 
    927     Type *UserTy = 0;
    928     if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser()))
    929       UserTy = LI->getType();
    930     else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser()))
    931       UserTy = SI->getValueOperand()->getType();
    932     else
    933       return 0; // Bail if we have weird uses.
    934 
    935     if (IntegerType *ITy = dyn_cast<IntegerType>(UserTy)) {
    936       // If the type is larger than the partition, skip it. We only encounter
    937       // this for split integer operations where we want to use the type of the
    938       // entity causing the split.
    939       if (ITy->getBitWidth() / 8 > (EndOffset - B->beginOffset()))
    940         continue;
    941 
    942       // If we have found an integer type use covering the alloca, use that
    943       // regardless of the other types, as integers are often used for a
    944       // "bucket
    945       // of bits" type.
    946       return ITy;
    947     }
    948 
    949     if (Ty && Ty != UserTy)
    950       return 0;
    951 
    952     Ty = UserTy;
    953   }
    954   return Ty;
    955 }
    956 
    957 /// PHI instructions that use an alloca and are subsequently loaded can be
    958 /// rewritten to load both input pointers in the pred blocks and then PHI the
    959 /// results, allowing the load of the alloca to be promoted.
    960 /// From this:
    961 ///   %P2 = phi [i32* %Alloca, i32* %Other]
    962 ///   %V = load i32* %P2
    963 /// to:
    964 ///   %V1 = load i32* %Alloca      -> will be mem2reg'd
    965 ///   ...
    966 ///   %V2 = load i32* %Other
    967 ///   ...
    968 ///   %V = phi [i32 %V1, i32 %V2]
    969 ///
    970 /// We can do this to a select if its only uses are loads and if the operands
    971 /// to the select can be loaded unconditionally.
    972 ///
    973 /// FIXME: This should be hoisted into a generic utility, likely in
    974 /// Transforms/Util/Local.h
    975 static bool isSafePHIToSpeculate(PHINode &PN,
    976                                  const DataLayout *DL = 0) {
    977   // For now, we can only do this promotion if the load is in the same block
    978   // as the PHI, and if there are no stores between the phi and load.
    979   // TODO: Allow recursive phi users.
    980   // TODO: Allow stores.
    981   BasicBlock *BB = PN.getParent();
    982   unsigned MaxAlign = 0;
    983   bool HaveLoad = false;
    984   for (Value::use_iterator UI = PN.use_begin(), UE = PN.use_end(); UI != UE;
    985        ++UI) {
    986     LoadInst *LI = dyn_cast<LoadInst>(*UI);
    987     if (LI == 0 || !LI->isSimple())
    988       return false;
    989 
    990     // For now we only allow loads in the same block as the PHI.  This is
    991     // a common case that happens when instcombine merges two loads through
    992     // a PHI.
    993     if (LI->getParent() != BB)
    994       return false;
    995 
    996     // Ensure that there are no instructions between the PHI and the load that
    997     // could store.
    998     for (BasicBlock::iterator BBI = &PN; &*BBI != LI; ++BBI)
    999       if (BBI->mayWriteToMemory())
   1000         return false;
   1001 
   1002     MaxAlign = std::max(MaxAlign, LI->getAlignment());
   1003     HaveLoad = true;
   1004   }
   1005 
   1006   if (!HaveLoad)
   1007     return false;
   1008 
   1009   // We can only transform this if it is safe to push the loads into the
   1010   // predecessor blocks. The only thing to watch out for is that we can't put
   1011   // a possibly trapping load in the predecessor if it is a critical edge.
   1012   for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num; ++Idx) {
   1013     TerminatorInst *TI = PN.getIncomingBlock(Idx)->getTerminator();
   1014     Value *InVal = PN.getIncomingValue(Idx);
   1015 
   1016     // If the value is produced by the terminator of the predecessor (an
   1017     // invoke) or it has side-effects, there is no valid place to put a load
   1018     // in the predecessor.
   1019     if (TI == InVal || TI->mayHaveSideEffects())
   1020       return false;
   1021 
   1022     // If the predecessor has a single successor, then the edge isn't
   1023     // critical.
   1024     if (TI->getNumSuccessors() == 1)
   1025       continue;
   1026 
   1027     // If this pointer is always safe to load, or if we can prove that there
   1028     // is already a load in the block, then we can move the load to the pred
   1029     // block.
   1030     if (InVal->isDereferenceablePointer() ||
   1031         isSafeToLoadUnconditionally(InVal, TI, MaxAlign, DL))
   1032       continue;
   1033 
   1034     return false;
   1035   }
   1036 
   1037   return true;
   1038 }
   1039 
   1040 static void speculatePHINodeLoads(PHINode &PN) {
   1041   DEBUG(dbgs() << "    original: " << PN << "\n");
   1042 
   1043   Type *LoadTy = cast<PointerType>(PN.getType())->getElementType();
   1044   IRBuilderTy PHIBuilder(&PN);
   1045   PHINode *NewPN = PHIBuilder.CreatePHI(LoadTy, PN.getNumIncomingValues(),
   1046                                         PN.getName() + ".sroa.speculated");
   1047 
   1048   // Get the TBAA tag and alignment to use from one of the loads.  It doesn't
   1049   // matter which one we get and if any differ.
   1050   LoadInst *SomeLoad = cast<LoadInst>(*PN.use_begin());
   1051   MDNode *TBAATag = SomeLoad->getMetadata(LLVMContext::MD_tbaa);
   1052   unsigned Align = SomeLoad->getAlignment();
   1053 
   1054   // Rewrite all loads of the PN to use the new PHI.
   1055   while (!PN.use_empty()) {
   1056     LoadInst *LI = cast<LoadInst>(*PN.use_begin());
   1057     LI->replaceAllUsesWith(NewPN);
   1058     LI->eraseFromParent();
   1059   }
   1060 
   1061   // Inject loads into all of the pred blocks.
   1062   for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num; ++Idx) {
   1063     BasicBlock *Pred = PN.getIncomingBlock(Idx);
   1064     TerminatorInst *TI = Pred->getTerminator();
   1065     Value *InVal = PN.getIncomingValue(Idx);
   1066     IRBuilderTy PredBuilder(TI);
   1067 
   1068     LoadInst *Load = PredBuilder.CreateLoad(
   1069         InVal, (PN.getName() + ".sroa.speculate.load." + Pred->getName()));
   1070     ++NumLoadsSpeculated;
   1071     Load->setAlignment(Align);
   1072     if (TBAATag)
   1073       Load->setMetadata(LLVMContext::MD_tbaa, TBAATag);
   1074     NewPN->addIncoming(Load, Pred);
   1075   }
   1076 
   1077   DEBUG(dbgs() << "          speculated to: " << *NewPN << "\n");
   1078   PN.eraseFromParent();
   1079 }
   1080 
   1081 /// Select instructions that use an alloca and are subsequently loaded can be
   1082 /// rewritten to load both input pointers and then select between the result,
   1083 /// allowing the load of the alloca to be promoted.
   1084 /// From this:
   1085 ///   %P2 = select i1 %cond, i32* %Alloca, i32* %Other
   1086 ///   %V = load i32* %P2
   1087 /// to:
   1088 ///   %V1 = load i32* %Alloca      -> will be mem2reg'd
   1089 ///   %V2 = load i32* %Other
   1090 ///   %V = select i1 %cond, i32 %V1, i32 %V2
   1091 ///
   1092 /// We can do this to a select if its only uses are loads and if the operand
   1093 /// to the select can be loaded unconditionally.
   1094 static bool isSafeSelectToSpeculate(SelectInst &SI, const DataLayout *DL = 0) {
   1095   Value *TValue = SI.getTrueValue();
   1096   Value *FValue = SI.getFalseValue();
   1097   bool TDerefable = TValue->isDereferenceablePointer();
   1098   bool FDerefable = FValue->isDereferenceablePointer();
   1099 
   1100   for (Value::use_iterator UI = SI.use_begin(), UE = SI.use_end(); UI != UE;
   1101        ++UI) {
   1102     LoadInst *LI = dyn_cast<LoadInst>(*UI);
   1103     if (LI == 0 || !LI->isSimple())
   1104       return false;
   1105 
   1106     // Both operands to the select need to be dereferencable, either
   1107     // absolutely (e.g. allocas) or at this point because we can see other
   1108     // accesses to it.
   1109     if (!TDerefable &&
   1110         !isSafeToLoadUnconditionally(TValue, LI, LI->getAlignment(), DL))
   1111       return false;
   1112     if (!FDerefable &&
   1113         !isSafeToLoadUnconditionally(FValue, LI, LI->getAlignment(), DL))
   1114       return false;
   1115   }
   1116 
   1117   return true;
   1118 }
   1119 
   1120 static void speculateSelectInstLoads(SelectInst &SI) {
   1121   DEBUG(dbgs() << "    original: " << SI << "\n");
   1122 
   1123   IRBuilderTy IRB(&SI);
   1124   Value *TV = SI.getTrueValue();
   1125   Value *FV = SI.getFalseValue();
   1126   // Replace the loads of the select with a select of two loads.
   1127   while (!SI.use_empty()) {
   1128     LoadInst *LI = cast<LoadInst>(*SI.use_begin());
   1129     assert(LI->isSimple() && "We only speculate simple loads");
   1130 
   1131     IRB.SetInsertPoint(LI);
   1132     LoadInst *TL =
   1133         IRB.CreateLoad(TV, LI->getName() + ".sroa.speculate.load.true");
   1134     LoadInst *FL =
   1135         IRB.CreateLoad(FV, LI->getName() + ".sroa.speculate.load.false");
   1136     NumLoadsSpeculated += 2;
   1137 
   1138     // Transfer alignment and TBAA info if present.
   1139     TL->setAlignment(LI->getAlignment());
   1140     FL->setAlignment(LI->getAlignment());
   1141     if (MDNode *Tag = LI->getMetadata(LLVMContext::MD_tbaa)) {
   1142       TL->setMetadata(LLVMContext::MD_tbaa, Tag);
   1143       FL->setMetadata(LLVMContext::MD_tbaa, Tag);
   1144     }
   1145 
   1146     Value *V = IRB.CreateSelect(SI.getCondition(), TL, FL,
   1147                                 LI->getName() + ".sroa.speculated");
   1148 
   1149     DEBUG(dbgs() << "          speculated to: " << *V << "\n");
   1150     LI->replaceAllUsesWith(V);
   1151     LI->eraseFromParent();
   1152   }
   1153   SI.eraseFromParent();
   1154 }
   1155 
   1156 /// \brief Build a GEP out of a base pointer and indices.
   1157 ///
   1158 /// This will return the BasePtr if that is valid, or build a new GEP
   1159 /// instruction using the IRBuilder if GEP-ing is needed.
   1160 static Value *buildGEP(IRBuilderTy &IRB, Value *BasePtr,
   1161                        SmallVectorImpl<Value *> &Indices) {
   1162   if (Indices.empty())
   1163     return BasePtr;
   1164 
   1165   // A single zero index is a no-op, so check for this and avoid building a GEP
   1166   // in that case.
   1167   if (Indices.size() == 1 && cast<ConstantInt>(Indices.back())->isZero())
   1168     return BasePtr;
   1169 
   1170   return IRB.CreateInBoundsGEP(BasePtr, Indices, "idx");
   1171 }
   1172 
   1173 /// \brief Get a natural GEP off of the BasePtr walking through Ty toward
   1174 /// TargetTy without changing the offset of the pointer.
   1175 ///
   1176 /// This routine assumes we've already established a properly offset GEP with
   1177 /// Indices, and arrived at the Ty type. The goal is to continue to GEP with
   1178 /// zero-indices down through type layers until we find one the same as
   1179 /// TargetTy. If we can't find one with the same type, we at least try to use
   1180 /// one with the same size. If none of that works, we just produce the GEP as
   1181 /// indicated by Indices to have the correct offset.
   1182 static Value *getNaturalGEPWithType(IRBuilderTy &IRB, const DataLayout &DL,
   1183                                     Value *BasePtr, Type *Ty, Type *TargetTy,
   1184                                     SmallVectorImpl<Value *> &Indices) {
   1185   if (Ty == TargetTy)
   1186     return buildGEP(IRB, BasePtr, Indices);
   1187 
   1188   // See if we can descend into a struct and locate a field with the correct
   1189   // type.
   1190   unsigned NumLayers = 0;
   1191   Type *ElementTy = Ty;
   1192   do {
   1193     if (ElementTy->isPointerTy())
   1194       break;
   1195     if (SequentialType *SeqTy = dyn_cast<SequentialType>(ElementTy)) {
   1196       ElementTy = SeqTy->getElementType();
   1197       // Note that we use the default address space as this index is over an
   1198       // array or a vector, not a pointer.
   1199       Indices.push_back(IRB.getInt(APInt(DL.getPointerSizeInBits(0), 0)));
   1200     } else if (StructType *STy = dyn_cast<StructType>(ElementTy)) {
   1201       if (STy->element_begin() == STy->element_end())
   1202         break; // Nothing left to descend into.
   1203       ElementTy = *STy->element_begin();
   1204       Indices.push_back(IRB.getInt32(0));
   1205     } else {
   1206       break;
   1207     }
   1208     ++NumLayers;
   1209   } while (ElementTy != TargetTy);
   1210   if (ElementTy != TargetTy)
   1211     Indices.erase(Indices.end() - NumLayers, Indices.end());
   1212 
   1213   return buildGEP(IRB, BasePtr, Indices);
   1214 }
   1215 
   1216 /// \brief Recursively compute indices for a natural GEP.
   1217 ///
   1218 /// This is the recursive step for getNaturalGEPWithOffset that walks down the
   1219 /// element types adding appropriate indices for the GEP.
   1220 static Value *getNaturalGEPRecursively(IRBuilderTy &IRB, const DataLayout &DL,
   1221                                        Value *Ptr, Type *Ty, APInt &Offset,
   1222                                        Type *TargetTy,
   1223                                        SmallVectorImpl<Value *> &Indices) {
   1224   if (Offset == 0)
   1225     return getNaturalGEPWithType(IRB, DL, Ptr, Ty, TargetTy, Indices);
   1226 
   1227   // We can't recurse through pointer types.
   1228   if (Ty->isPointerTy())
   1229     return 0;
   1230 
   1231   // We try to analyze GEPs over vectors here, but note that these GEPs are
   1232   // extremely poorly defined currently. The long-term goal is to remove GEPing
   1233   // over a vector from the IR completely.
   1234   if (VectorType *VecTy = dyn_cast<VectorType>(Ty)) {
   1235     unsigned ElementSizeInBits = DL.getTypeSizeInBits(VecTy->getScalarType());
   1236     if (ElementSizeInBits % 8)
   1237       return 0; // GEPs over non-multiple of 8 size vector elements are invalid.
   1238     APInt ElementSize(Offset.getBitWidth(), ElementSizeInBits / 8);
   1239     APInt NumSkippedElements = Offset.sdiv(ElementSize);
   1240     if (NumSkippedElements.ugt(VecTy->getNumElements()))
   1241       return 0;
   1242     Offset -= NumSkippedElements * ElementSize;
   1243     Indices.push_back(IRB.getInt(NumSkippedElements));
   1244     return getNaturalGEPRecursively(IRB, DL, Ptr, VecTy->getElementType(),
   1245                                     Offset, TargetTy, Indices);
   1246   }
   1247 
   1248   if (ArrayType *ArrTy = dyn_cast<ArrayType>(Ty)) {
   1249     Type *ElementTy = ArrTy->getElementType();
   1250     APInt ElementSize(Offset.getBitWidth(), DL.getTypeAllocSize(ElementTy));
   1251     APInt NumSkippedElements = Offset.sdiv(ElementSize);
   1252     if (NumSkippedElements.ugt(ArrTy->getNumElements()))
   1253       return 0;
   1254 
   1255     Offset -= NumSkippedElements * ElementSize;
   1256     Indices.push_back(IRB.getInt(NumSkippedElements));
   1257     return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy,
   1258                                     Indices);
   1259   }
   1260 
   1261   StructType *STy = dyn_cast<StructType>(Ty);
   1262   if (!STy)
   1263     return 0;
   1264 
   1265   const StructLayout *SL = DL.getStructLayout(STy);
   1266   uint64_t StructOffset = Offset.getZExtValue();
   1267   if (StructOffset >= SL->getSizeInBytes())
   1268     return 0;
   1269   unsigned Index = SL->getElementContainingOffset(StructOffset);
   1270   Offset -= APInt(Offset.getBitWidth(), SL->getElementOffset(Index));
   1271   Type *ElementTy = STy->getElementType(Index);
   1272   if (Offset.uge(DL.getTypeAllocSize(ElementTy)))
   1273     return 0; // The offset points into alignment padding.
   1274 
   1275   Indices.push_back(IRB.getInt32(Index));
   1276   return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy,
   1277                                   Indices);
   1278 }
   1279 
   1280 /// \brief Get a natural GEP from a base pointer to a particular offset and
   1281 /// resulting in a particular type.
   1282 ///
   1283 /// The goal is to produce a "natural" looking GEP that works with the existing
   1284 /// composite types to arrive at the appropriate offset and element type for
   1285 /// a pointer. TargetTy is the element type the returned GEP should point-to if
   1286 /// possible. We recurse by decreasing Offset, adding the appropriate index to
   1287 /// Indices, and setting Ty to the result subtype.
   1288 ///
   1289 /// If no natural GEP can be constructed, this function returns null.
   1290 static Value *getNaturalGEPWithOffset(IRBuilderTy &IRB, const DataLayout &DL,
   1291                                       Value *Ptr, APInt Offset, Type *TargetTy,
   1292                                       SmallVectorImpl<Value *> &Indices) {
   1293   PointerType *Ty = cast<PointerType>(Ptr->getType());
   1294 
   1295   // Don't consider any GEPs through an i8* as natural unless the TargetTy is
   1296   // an i8.
   1297   if (Ty == IRB.getInt8PtrTy() && TargetTy->isIntegerTy(8))
   1298     return 0;
   1299 
   1300   Type *ElementTy = Ty->getElementType();
   1301   if (!ElementTy->isSized())
   1302     return 0; // We can't GEP through an unsized element.
   1303   APInt ElementSize(Offset.getBitWidth(), DL.getTypeAllocSize(ElementTy));
   1304   if (ElementSize == 0)
   1305     return 0; // Zero-length arrays can't help us build a natural GEP.
   1306   APInt NumSkippedElements = Offset.sdiv(ElementSize);
   1307 
   1308   Offset -= NumSkippedElements * ElementSize;
   1309   Indices.push_back(IRB.getInt(NumSkippedElements));
   1310   return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy,
   1311                                   Indices);
   1312 }
   1313 
   1314 /// \brief Compute an adjusted pointer from Ptr by Offset bytes where the
   1315 /// resulting pointer has PointerTy.
   1316 ///
   1317 /// This tries very hard to compute a "natural" GEP which arrives at the offset
   1318 /// and produces the pointer type desired. Where it cannot, it will try to use
   1319 /// the natural GEP to arrive at the offset and bitcast to the type. Where that
   1320 /// fails, it will try to use an existing i8* and GEP to the byte offset and
   1321 /// bitcast to the type.
   1322 ///
   1323 /// The strategy for finding the more natural GEPs is to peel off layers of the
   1324 /// pointer, walking back through bit casts and GEPs, searching for a base
   1325 /// pointer from which we can compute a natural GEP with the desired
   1326 /// properties. The algorithm tries to fold as many constant indices into
   1327 /// a single GEP as possible, thus making each GEP more independent of the
   1328 /// surrounding code.
   1329 static Value *getAdjustedPtr(IRBuilderTy &IRB, const DataLayout &DL,
   1330                              Value *Ptr, APInt Offset, Type *PointerTy) {
   1331   // Even though we don't look through PHI nodes, we could be called on an
   1332   // instruction in an unreachable block, which may be on a cycle.
   1333   SmallPtrSet<Value *, 4> Visited;
   1334   Visited.insert(Ptr);
   1335   SmallVector<Value *, 4> Indices;
   1336 
   1337   // We may end up computing an offset pointer that has the wrong type. If we
   1338   // never are able to compute one directly that has the correct type, we'll
   1339   // fall back to it, so keep it around here.
   1340   Value *OffsetPtr = 0;
   1341 
   1342   // Remember any i8 pointer we come across to re-use if we need to do a raw
   1343   // byte offset.
   1344   Value *Int8Ptr = 0;
   1345   APInt Int8PtrOffset(Offset.getBitWidth(), 0);
   1346 
   1347   Type *TargetTy = PointerTy->getPointerElementType();
   1348 
   1349   do {
   1350     // First fold any existing GEPs into the offset.
   1351     while (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) {
   1352       APInt GEPOffset(Offset.getBitWidth(), 0);
   1353       if (!GEP->accumulateConstantOffset(DL, GEPOffset))
   1354         break;
   1355       Offset += GEPOffset;
   1356       Ptr = GEP->getPointerOperand();
   1357       if (!Visited.insert(Ptr))
   1358         break;
   1359     }
   1360 
   1361     // See if we can perform a natural GEP here.
   1362     Indices.clear();
   1363     if (Value *P = getNaturalGEPWithOffset(IRB, DL, Ptr, Offset, TargetTy,
   1364                                            Indices)) {
   1365       if (P->getType() == PointerTy) {
   1366         // Zap any offset pointer that we ended up computing in previous rounds.
   1367         if (OffsetPtr && OffsetPtr->use_empty())
   1368           if (Instruction *I = dyn_cast<Instruction>(OffsetPtr))
   1369             I->eraseFromParent();
   1370         return P;
   1371       }
   1372       if (!OffsetPtr) {
   1373         OffsetPtr = P;
   1374       }
   1375     }
   1376 
   1377     // Stash this pointer if we've found an i8*.
   1378     if (Ptr->getType()->isIntegerTy(8)) {
   1379       Int8Ptr = Ptr;
   1380       Int8PtrOffset = Offset;
   1381     }
   1382 
   1383     // Peel off a layer of the pointer and update the offset appropriately.
   1384     if (Operator::getOpcode(Ptr) == Instruction::BitCast) {
   1385       Ptr = cast<Operator>(Ptr)->getOperand(0);
   1386     } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) {
   1387       if (GA->mayBeOverridden())
   1388         break;
   1389       Ptr = GA->getAliasee();
   1390     } else {
   1391       break;
   1392     }
   1393     assert(Ptr->getType()->isPointerTy() && "Unexpected operand type!");
   1394   } while (Visited.insert(Ptr));
   1395 
   1396   if (!OffsetPtr) {
   1397     if (!Int8Ptr) {
   1398       Int8Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy(),
   1399                                   "raw_cast");
   1400       Int8PtrOffset = Offset;
   1401     }
   1402 
   1403     OffsetPtr = Int8PtrOffset == 0 ? Int8Ptr :
   1404       IRB.CreateInBoundsGEP(Int8Ptr, IRB.getInt(Int8PtrOffset),
   1405                             "raw_idx");
   1406   }
   1407   Ptr = OffsetPtr;
   1408 
   1409   // On the off chance we were targeting i8*, guard the bitcast here.
   1410   if (Ptr->getType() != PointerTy)
   1411     Ptr = IRB.CreateBitCast(Ptr, PointerTy, "cast");
   1412 
   1413   return Ptr;
   1414 }
   1415 
   1416 /// \brief Test whether we can convert a value from the old to the new type.
   1417 ///
   1418 /// This predicate should be used to guard calls to convertValue in order to
   1419 /// ensure that we only try to convert viable values. The strategy is that we
   1420 /// will peel off single element struct and array wrappings to get to an
   1421 /// underlying value, and convert that value.
   1422 static bool canConvertValue(const DataLayout &DL, Type *OldTy, Type *NewTy) {
   1423   if (OldTy == NewTy)
   1424     return true;
   1425   if (IntegerType *OldITy = dyn_cast<IntegerType>(OldTy))
   1426     if (IntegerType *NewITy = dyn_cast<IntegerType>(NewTy))
   1427       if (NewITy->getBitWidth() >= OldITy->getBitWidth())
   1428         return true;
   1429   if (DL.getTypeSizeInBits(NewTy) != DL.getTypeSizeInBits(OldTy))
   1430     return false;
   1431   if (!NewTy->isSingleValueType() || !OldTy->isSingleValueType())
   1432     return false;
   1433 
   1434   if (NewTy->isPointerTy() || OldTy->isPointerTy()) {
   1435     if (NewTy->isPointerTy() && OldTy->isPointerTy())
   1436       return true;
   1437     if (NewTy->isIntegerTy() || OldTy->isIntegerTy())
   1438       return true;
   1439     return false;
   1440   }
   1441 
   1442   return true;
   1443 }
   1444 
   1445 /// \brief Generic routine to convert an SSA value to a value of a different
   1446 /// type.
   1447 ///
   1448 /// This will try various different casting techniques, such as bitcasts,
   1449 /// inttoptr, and ptrtoint casts. Use the \c canConvertValue predicate to test
   1450 /// two types for viability with this routine.
   1451 static Value *convertValue(const DataLayout &DL, IRBuilderTy &IRB, Value *V,
   1452                            Type *Ty) {
   1453   assert(canConvertValue(DL, V->getType(), Ty) &&
   1454          "Value not convertable to type");
   1455   if (V->getType() == Ty)
   1456     return V;
   1457   if (IntegerType *OldITy = dyn_cast<IntegerType>(V->getType()))
   1458     if (IntegerType *NewITy = dyn_cast<IntegerType>(Ty))
   1459       if (NewITy->getBitWidth() > OldITy->getBitWidth())
   1460         return IRB.CreateZExt(V, NewITy);
   1461   if (V->getType()->isIntegerTy() && Ty->isPointerTy())
   1462     return IRB.CreateIntToPtr(V, Ty);
   1463   if (V->getType()->isPointerTy() && Ty->isIntegerTy())
   1464     return IRB.CreatePtrToInt(V, Ty);
   1465 
   1466   return IRB.CreateBitCast(V, Ty);
   1467 }
   1468 
   1469 /// \brief Test whether the given slice use can be promoted to a vector.
   1470 ///
   1471 /// This function is called to test each entry in a partioning which is slated
   1472 /// for a single slice.
   1473 static bool isVectorPromotionViableForSlice(
   1474     const DataLayout &DL, AllocaSlices &S, uint64_t SliceBeginOffset,
   1475     uint64_t SliceEndOffset, VectorType *Ty, uint64_t ElementSize,
   1476     AllocaSlices::const_iterator I) {
   1477   // First validate the slice offsets.
   1478   uint64_t BeginOffset =
   1479       std::max(I->beginOffset(), SliceBeginOffset) - SliceBeginOffset;
   1480   uint64_t BeginIndex = BeginOffset / ElementSize;
   1481   if (BeginIndex * ElementSize != BeginOffset ||
   1482       BeginIndex >= Ty->getNumElements())
   1483     return false;
   1484   uint64_t EndOffset =
   1485       std::min(I->endOffset(), SliceEndOffset) - SliceBeginOffset;
   1486   uint64_t EndIndex = EndOffset / ElementSize;
   1487   if (EndIndex * ElementSize != EndOffset || EndIndex > Ty->getNumElements())
   1488     return false;
   1489 
   1490   assert(EndIndex > BeginIndex && "Empty vector!");
   1491   uint64_t NumElements = EndIndex - BeginIndex;
   1492   Type *SliceTy =
   1493       (NumElements == 1) ? Ty->getElementType()
   1494                          : VectorType::get(Ty->getElementType(), NumElements);
   1495 
   1496   Type *SplitIntTy =
   1497       Type::getIntNTy(Ty->getContext(), NumElements * ElementSize * 8);
   1498 
   1499   Use *U = I->getUse();
   1500 
   1501   if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U->getUser())) {
   1502     if (MI->isVolatile())
   1503       return false;
   1504     if (!I->isSplittable())
   1505       return false; // Skip any unsplittable intrinsics.
   1506   } else if (U->get()->getType()->getPointerElementType()->isStructTy()) {
   1507     // Disable vector promotion when there are loads or stores of an FCA.
   1508     return false;
   1509   } else if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) {
   1510     if (LI->isVolatile())
   1511       return false;
   1512     Type *LTy = LI->getType();
   1513     if (SliceBeginOffset > I->beginOffset() ||
   1514         SliceEndOffset < I->endOffset()) {
   1515       assert(LTy->isIntegerTy());
   1516       LTy = SplitIntTy;
   1517     }
   1518     if (!canConvertValue(DL, SliceTy, LTy))
   1519       return false;
   1520   } else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) {
   1521     if (SI->isVolatile())
   1522       return false;
   1523     Type *STy = SI->getValueOperand()->getType();
   1524     if (SliceBeginOffset > I->beginOffset() ||
   1525         SliceEndOffset < I->endOffset()) {
   1526       assert(STy->isIntegerTy());
   1527       STy = SplitIntTy;
   1528     }
   1529     if (!canConvertValue(DL, STy, SliceTy))
   1530       return false;
   1531   } else {
   1532     return false;
   1533   }
   1534 
   1535   return true;
   1536 }
   1537 
   1538 /// \brief Test whether the given alloca partitioning and range of slices can be
   1539 /// promoted to a vector.
   1540 ///
   1541 /// This is a quick test to check whether we can rewrite a particular alloca
   1542 /// partition (and its newly formed alloca) into a vector alloca with only
   1543 /// whole-vector loads and stores such that it could be promoted to a vector
   1544 /// SSA value. We only can ensure this for a limited set of operations, and we
   1545 /// don't want to do the rewrites unless we are confident that the result will
   1546 /// be promotable, so we have an early test here.
   1547 static bool
   1548 isVectorPromotionViable(const DataLayout &DL, Type *AllocaTy, AllocaSlices &S,
   1549                         uint64_t SliceBeginOffset, uint64_t SliceEndOffset,
   1550                         AllocaSlices::const_iterator I,
   1551                         AllocaSlices::const_iterator E,
   1552                         ArrayRef<AllocaSlices::iterator> SplitUses) {
   1553   VectorType *Ty = dyn_cast<VectorType>(AllocaTy);
   1554   if (!Ty)
   1555     return false;
   1556 
   1557   uint64_t ElementSize = DL.getTypeSizeInBits(Ty->getScalarType());
   1558 
   1559   // While the definition of LLVM vectors is bitpacked, we don't support sizes
   1560   // that aren't byte sized.
   1561   if (ElementSize % 8)
   1562     return false;
   1563   assert((DL.getTypeSizeInBits(Ty) % 8) == 0 &&
   1564          "vector size not a multiple of element size?");
   1565   ElementSize /= 8;
   1566 
   1567   for (; I != E; ++I)
   1568     if (!isVectorPromotionViableForSlice(DL, S, SliceBeginOffset,
   1569                                          SliceEndOffset, Ty, ElementSize, I))
   1570       return false;
   1571 
   1572   for (ArrayRef<AllocaSlices::iterator>::const_iterator SUI = SplitUses.begin(),
   1573                                                         SUE = SplitUses.end();
   1574        SUI != SUE; ++SUI)
   1575     if (!isVectorPromotionViableForSlice(DL, S, SliceBeginOffset,
   1576                                          SliceEndOffset, Ty, ElementSize, *SUI))
   1577       return false;
   1578 
   1579   return true;
   1580 }
   1581 
   1582 /// \brief Test whether a slice of an alloca is valid for integer widening.
   1583 ///
   1584 /// This implements the necessary checking for the \c isIntegerWideningViable
   1585 /// test below on a single slice of the alloca.
   1586 static bool isIntegerWideningViableForSlice(const DataLayout &DL,
   1587                                             Type *AllocaTy,
   1588                                             uint64_t AllocBeginOffset,
   1589                                             uint64_t Size, AllocaSlices &S,
   1590                                             AllocaSlices::const_iterator I,
   1591                                             bool &WholeAllocaOp) {
   1592   uint64_t RelBegin = I->beginOffset() - AllocBeginOffset;
   1593   uint64_t RelEnd = I->endOffset() - AllocBeginOffset;
   1594 
   1595   // We can't reasonably handle cases where the load or store extends past
   1596   // the end of the aloca's type and into its padding.
   1597   if (RelEnd > Size)
   1598     return false;
   1599 
   1600   Use *U = I->getUse();
   1601 
   1602   if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) {
   1603     if (LI->isVolatile())
   1604       return false;
   1605     if (RelBegin == 0 && RelEnd == Size)
   1606       WholeAllocaOp = true;
   1607     if (IntegerType *ITy = dyn_cast<IntegerType>(LI->getType())) {
   1608       if (ITy->getBitWidth() < DL.getTypeStoreSizeInBits(ITy))
   1609         return false;
   1610     } else if (RelBegin != 0 || RelEnd != Size ||
   1611                !canConvertValue(DL, AllocaTy, LI->getType())) {
   1612       // Non-integer loads need to be convertible from the alloca type so that
   1613       // they are promotable.
   1614       return false;
   1615     }
   1616   } else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) {
   1617     Type *ValueTy = SI->getValueOperand()->getType();
   1618     if (SI->isVolatile())
   1619       return false;
   1620     if (RelBegin == 0 && RelEnd == Size)
   1621       WholeAllocaOp = true;
   1622     if (IntegerType *ITy = dyn_cast<IntegerType>(ValueTy)) {
   1623       if (ITy->getBitWidth() < DL.getTypeStoreSizeInBits(ITy))
   1624         return false;
   1625     } else if (RelBegin != 0 || RelEnd != Size ||
   1626                !canConvertValue(DL, ValueTy, AllocaTy)) {
   1627       // Non-integer stores need to be convertible to the alloca type so that
   1628       // they are promotable.
   1629       return false;
   1630     }
   1631   } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U->getUser())) {
   1632     if (MI->isVolatile() || !isa<Constant>(MI->getLength()))
   1633       return false;
   1634     if (!I->isSplittable())
   1635       return false; // Skip any unsplittable intrinsics.
   1636   } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U->getUser())) {
   1637     if (II->getIntrinsicID() != Intrinsic::lifetime_start &&
   1638         II->getIntrinsicID() != Intrinsic::lifetime_end)
   1639       return false;
   1640   } else {
   1641     return false;
   1642   }
   1643 
   1644   return true;
   1645 }
   1646 
   1647 /// \brief Test whether the given alloca partition's integer operations can be
   1648 /// widened to promotable ones.
   1649 ///
   1650 /// This is a quick test to check whether we can rewrite the integer loads and
   1651 /// stores to a particular alloca into wider loads and stores and be able to
   1652 /// promote the resulting alloca.
   1653 static bool
   1654 isIntegerWideningViable(const DataLayout &DL, Type *AllocaTy,
   1655                         uint64_t AllocBeginOffset, AllocaSlices &S,
   1656                         AllocaSlices::const_iterator I,
   1657                         AllocaSlices::const_iterator E,
   1658                         ArrayRef<AllocaSlices::iterator> SplitUses) {
   1659   uint64_t SizeInBits = DL.getTypeSizeInBits(AllocaTy);
   1660   // Don't create integer types larger than the maximum bitwidth.
   1661   if (SizeInBits > IntegerType::MAX_INT_BITS)
   1662     return false;
   1663 
   1664   // Don't try to handle allocas with bit-padding.
   1665   if (SizeInBits != DL.getTypeStoreSizeInBits(AllocaTy))
   1666     return false;
   1667 
   1668   // We need to ensure that an integer type with the appropriate bitwidth can
   1669   // be converted to the alloca type, whatever that is. We don't want to force
   1670   // the alloca itself to have an integer type if there is a more suitable one.
   1671   Type *IntTy = Type::getIntNTy(AllocaTy->getContext(), SizeInBits);
   1672   if (!canConvertValue(DL, AllocaTy, IntTy) ||
   1673       !canConvertValue(DL, IntTy, AllocaTy))
   1674     return false;
   1675 
   1676   uint64_t Size = DL.getTypeStoreSize(AllocaTy);
   1677 
   1678   // While examining uses, we ensure that the alloca has a covering load or
   1679   // store. We don't want to widen the integer operations only to fail to
   1680   // promote due to some other unsplittable entry (which we may make splittable
   1681   // later). However, if there are only splittable uses, go ahead and assume
   1682   // that we cover the alloca.
   1683   bool WholeAllocaOp = (I != E) ? false : DL.isLegalInteger(SizeInBits);
   1684 
   1685   for (; I != E; ++I)
   1686     if (!isIntegerWideningViableForSlice(DL, AllocaTy, AllocBeginOffset, Size,
   1687                                          S, I, WholeAllocaOp))
   1688       return false;
   1689 
   1690   for (ArrayRef<AllocaSlices::iterator>::const_iterator SUI = SplitUses.begin(),
   1691                                                         SUE = SplitUses.end();
   1692        SUI != SUE; ++SUI)
   1693     if (!isIntegerWideningViableForSlice(DL, AllocaTy, AllocBeginOffset, Size,
   1694                                          S, *SUI, WholeAllocaOp))
   1695       return false;
   1696 
   1697   return WholeAllocaOp;
   1698 }
   1699 
   1700 static Value *extractInteger(const DataLayout &DL, IRBuilderTy &IRB, Value *V,
   1701                              IntegerType *Ty, uint64_t Offset,
   1702                              const Twine &Name) {
   1703   DEBUG(dbgs() << "       start: " << *V << "\n");
   1704   IntegerType *IntTy = cast<IntegerType>(V->getType());
   1705   assert(DL.getTypeStoreSize(Ty) + Offset <= DL.getTypeStoreSize(IntTy) &&
   1706          "Element extends past full value");
   1707   uint64_t ShAmt = 8*Offset;
   1708   if (DL.isBigEndian())
   1709     ShAmt = 8*(DL.getTypeStoreSize(IntTy) - DL.getTypeStoreSize(Ty) - Offset);
   1710   if (ShAmt) {
   1711     V = IRB.CreateLShr(V, ShAmt, Name + ".shift");
   1712     DEBUG(dbgs() << "     shifted: " << *V << "\n");
   1713   }
   1714   assert(Ty->getBitWidth() <= IntTy->getBitWidth() &&
   1715          "Cannot extract to a larger integer!");
   1716   if (Ty != IntTy) {
   1717     V = IRB.CreateTrunc(V, Ty, Name + ".trunc");
   1718     DEBUG(dbgs() << "     trunced: " << *V << "\n");
   1719   }
   1720   return V;
   1721 }
   1722 
   1723 static Value *insertInteger(const DataLayout &DL, IRBuilderTy &IRB, Value *Old,
   1724                             Value *V, uint64_t Offset, const Twine &Name) {
   1725   IntegerType *IntTy = cast<IntegerType>(Old->getType());
   1726   IntegerType *Ty = cast<IntegerType>(V->getType());
   1727   assert(Ty->getBitWidth() <= IntTy->getBitWidth() &&
   1728          "Cannot insert a larger integer!");
   1729   DEBUG(dbgs() << "       start: " << *V << "\n");
   1730   if (Ty != IntTy) {
   1731     V = IRB.CreateZExt(V, IntTy, Name + ".ext");
   1732     DEBUG(dbgs() << "    extended: " << *V << "\n");
   1733   }
   1734   assert(DL.getTypeStoreSize(Ty) + Offset <= DL.getTypeStoreSize(IntTy) &&
   1735          "Element store outside of alloca store");
   1736   uint64_t ShAmt = 8*Offset;
   1737   if (DL.isBigEndian())
   1738     ShAmt = 8*(DL.getTypeStoreSize(IntTy) - DL.getTypeStoreSize(Ty) - Offset);
   1739   if (ShAmt) {
   1740     V = IRB.CreateShl(V, ShAmt, Name + ".shift");
   1741     DEBUG(dbgs() << "     shifted: " << *V << "\n");
   1742   }
   1743 
   1744   if (ShAmt || Ty->getBitWidth() < IntTy->getBitWidth()) {
   1745     APInt Mask = ~Ty->getMask().zext(IntTy->getBitWidth()).shl(ShAmt);
   1746     Old = IRB.CreateAnd(Old, Mask, Name + ".mask");
   1747     DEBUG(dbgs() << "      masked: " << *Old << "\n");
   1748     V = IRB.CreateOr(Old, V, Name + ".insert");
   1749     DEBUG(dbgs() << "    inserted: " << *V << "\n");
   1750   }
   1751   return V;
   1752 }
   1753 
   1754 static Value *extractVector(IRBuilderTy &IRB, Value *V,
   1755                             unsigned BeginIndex, unsigned EndIndex,
   1756                             const Twine &Name) {
   1757   VectorType *VecTy = cast<VectorType>(V->getType());
   1758   unsigned NumElements = EndIndex - BeginIndex;
   1759   assert(NumElements <= VecTy->getNumElements() && "Too many elements!");
   1760 
   1761   if (NumElements == VecTy->getNumElements())
   1762     return V;
   1763 
   1764   if (NumElements == 1) {
   1765     V = IRB.CreateExtractElement(V, IRB.getInt32(BeginIndex),
   1766                                  Name + ".extract");
   1767     DEBUG(dbgs() << "     extract: " << *V << "\n");
   1768     return V;
   1769   }
   1770 
   1771   SmallVector<Constant*, 8> Mask;
   1772   Mask.reserve(NumElements);
   1773   for (unsigned i = BeginIndex; i != EndIndex; ++i)
   1774     Mask.push_back(IRB.getInt32(i));
   1775   V = IRB.CreateShuffleVector(V, UndefValue::get(V->getType()),
   1776                               ConstantVector::get(Mask),
   1777                               Name + ".extract");
   1778   DEBUG(dbgs() << "     shuffle: " << *V << "\n");
   1779   return V;
   1780 }
   1781 
   1782 static Value *insertVector(IRBuilderTy &IRB, Value *Old, Value *V,
   1783                            unsigned BeginIndex, const Twine &Name) {
   1784   VectorType *VecTy = cast<VectorType>(Old->getType());
   1785   assert(VecTy && "Can only insert a vector into a vector");
   1786 
   1787   VectorType *Ty = dyn_cast<VectorType>(V->getType());
   1788   if (!Ty) {
   1789     // Single element to insert.
   1790     V = IRB.CreateInsertElement(Old, V, IRB.getInt32(BeginIndex),
   1791                                 Name + ".insert");
   1792     DEBUG(dbgs() <<  "     insert: " << *V << "\n");
   1793     return V;
   1794   }
   1795 
   1796   assert(Ty->getNumElements() <= VecTy->getNumElements() &&
   1797          "Too many elements!");
   1798   if (Ty->getNumElements() == VecTy->getNumElements()) {
   1799     assert(V->getType() == VecTy && "Vector type mismatch");
   1800     return V;
   1801   }
   1802   unsigned EndIndex = BeginIndex + Ty->getNumElements();
   1803 
   1804   // When inserting a smaller vector into the larger to store, we first
   1805   // use a shuffle vector to widen it with undef elements, and then
   1806   // a second shuffle vector to select between the loaded vector and the
   1807   // incoming vector.
   1808   SmallVector<Constant*, 8> Mask;
   1809   Mask.reserve(VecTy->getNumElements());
   1810   for (unsigned i = 0; i != VecTy->getNumElements(); ++i)
   1811     if (i >= BeginIndex && i < EndIndex)
   1812       Mask.push_back(IRB.getInt32(i - BeginIndex));
   1813     else
   1814       Mask.push_back(UndefValue::get(IRB.getInt32Ty()));
   1815   V = IRB.CreateShuffleVector(V, UndefValue::get(V->getType()),
   1816                               ConstantVector::get(Mask),
   1817                               Name + ".expand");
   1818   DEBUG(dbgs() << "    shuffle: " << *V << "\n");
   1819 
   1820   Mask.clear();
   1821   for (unsigned i = 0; i != VecTy->getNumElements(); ++i)
   1822     Mask.push_back(IRB.getInt1(i >= BeginIndex && i < EndIndex));
   1823 
   1824   V = IRB.CreateSelect(ConstantVector::get(Mask), V, Old, Name + "blend");
   1825 
   1826   DEBUG(dbgs() << "    blend: " << *V << "\n");
   1827   return V;
   1828 }
   1829 
   1830 namespace {
   1831 /// \brief Visitor to rewrite instructions using p particular slice of an alloca
   1832 /// to use a new alloca.
   1833 ///
   1834 /// Also implements the rewriting to vector-based accesses when the partition
   1835 /// passes the isVectorPromotionViable predicate. Most of the rewriting logic
   1836 /// lives here.
   1837 class AllocaSliceRewriter : public InstVisitor<AllocaSliceRewriter, bool> {
   1838   // Befriend the base class so it can delegate to private visit methods.
   1839   friend class llvm::InstVisitor<AllocaSliceRewriter, bool>;
   1840   typedef llvm::InstVisitor<AllocaSliceRewriter, bool> Base;
   1841 
   1842   const DataLayout &DL;
   1843   AllocaSlices &S;
   1844   SROA &Pass;
   1845   AllocaInst &OldAI, &NewAI;
   1846   const uint64_t NewAllocaBeginOffset, NewAllocaEndOffset;
   1847   Type *NewAllocaTy;
   1848 
   1849   // If we are rewriting an alloca partition which can be written as pure
   1850   // vector operations, we stash extra information here. When VecTy is
   1851   // non-null, we have some strict guarantees about the rewritten alloca:
   1852   //   - The new alloca is exactly the size of the vector type here.
   1853   //   - The accesses all either map to the entire vector or to a single
   1854   //     element.
   1855   //   - The set of accessing instructions is only one of those handled above
   1856   //     in isVectorPromotionViable. Generally these are the same access kinds
   1857   //     which are promotable via mem2reg.
   1858   VectorType *VecTy;
   1859   Type *ElementTy;
   1860   uint64_t ElementSize;
   1861 
   1862   // This is a convenience and flag variable that will be null unless the new
   1863   // alloca's integer operations should be widened to this integer type due to
   1864   // passing isIntegerWideningViable above. If it is non-null, the desired
   1865   // integer type will be stored here for easy access during rewriting.
   1866   IntegerType *IntTy;
   1867 
   1868   // The offset of the slice currently being rewritten.
   1869   uint64_t BeginOffset, EndOffset;
   1870   bool IsSplittable;
   1871   bool IsSplit;
   1872   Use *OldUse;
   1873   Instruction *OldPtr;
   1874 
   1875   // Output members carrying state about the result of visiting and rewriting
   1876   // the slice of the alloca.
   1877   bool IsUsedByRewrittenSpeculatableInstructions;
   1878 
   1879   // Utility IR builder, whose name prefix is setup for each visited use, and
   1880   // the insertion point is set to point to the user.
   1881   IRBuilderTy IRB;
   1882 
   1883 public:
   1884   AllocaSliceRewriter(const DataLayout &DL, AllocaSlices &S, SROA &Pass,
   1885                       AllocaInst &OldAI, AllocaInst &NewAI,
   1886                       uint64_t NewBeginOffset, uint64_t NewEndOffset,
   1887                       bool IsVectorPromotable = false,
   1888                       bool IsIntegerPromotable = false)
   1889       : DL(DL), S(S), Pass(Pass), OldAI(OldAI), NewAI(NewAI),
   1890         NewAllocaBeginOffset(NewBeginOffset), NewAllocaEndOffset(NewEndOffset),
   1891         NewAllocaTy(NewAI.getAllocatedType()),
   1892         VecTy(IsVectorPromotable ? cast<VectorType>(NewAllocaTy) : 0),
   1893         ElementTy(VecTy ? VecTy->getElementType() : 0),
   1894         ElementSize(VecTy ? DL.getTypeSizeInBits(ElementTy) / 8 : 0),
   1895         IntTy(IsIntegerPromotable
   1896                   ? Type::getIntNTy(
   1897                         NewAI.getContext(),
   1898                         DL.getTypeSizeInBits(NewAI.getAllocatedType()))
   1899                   : 0),
   1900         BeginOffset(), EndOffset(), IsSplittable(), IsSplit(), OldUse(),
   1901         OldPtr(), IsUsedByRewrittenSpeculatableInstructions(false),
   1902         IRB(NewAI.getContext(), ConstantFolder()) {
   1903     if (VecTy) {
   1904       assert((DL.getTypeSizeInBits(ElementTy) % 8) == 0 &&
   1905              "Only multiple-of-8 sized vector elements are viable");
   1906       ++NumVectorized;
   1907     }
   1908     assert((!IsVectorPromotable && !IsIntegerPromotable) ||
   1909            IsVectorPromotable != IsIntegerPromotable);
   1910   }
   1911 
   1912   bool visit(AllocaSlices::const_iterator I) {
   1913     bool CanSROA = true;
   1914     BeginOffset = I->beginOffset();
   1915     EndOffset = I->endOffset();
   1916     IsSplittable = I->isSplittable();
   1917     IsSplit =
   1918         BeginOffset < NewAllocaBeginOffset || EndOffset > NewAllocaEndOffset;
   1919 
   1920     OldUse = I->getUse();
   1921     OldPtr = cast<Instruction>(OldUse->get());
   1922 
   1923     Instruction *OldUserI = cast<Instruction>(OldUse->getUser());
   1924     IRB.SetInsertPoint(OldUserI);
   1925     IRB.SetCurrentDebugLocation(OldUserI->getDebugLoc());
   1926     IRB.SetNamePrefix(Twine(NewAI.getName()) + "." + Twine(BeginOffset) + ".");
   1927 
   1928     CanSROA &= visit(cast<Instruction>(OldUse->getUser()));
   1929     if (VecTy || IntTy)
   1930       assert(CanSROA);
   1931     return CanSROA;
   1932   }
   1933 
   1934   /// \brief Query whether this slice is used by speculatable instructions after
   1935   /// rewriting.
   1936   ///
   1937   /// These instructions (PHIs and Selects currently) require the alloca slice
   1938   /// to run back through the rewriter. Thus, they are promotable, but not on
   1939   /// this iteration. This is distinct from a slice which is unpromotable for
   1940   /// some other reason, in which case we don't even want to perform the
   1941   /// speculation. This can be querried at any time and reflects whether (at
   1942   /// that point) a visit call has rewritten a speculatable instruction on the
   1943   /// current slice.
   1944   bool isUsedByRewrittenSpeculatableInstructions() const {
   1945     return IsUsedByRewrittenSpeculatableInstructions;
   1946   }
   1947 
   1948 private:
   1949   // Make sure the other visit overloads are visible.
   1950   using Base::visit;
   1951 
   1952   // Every instruction which can end up as a user must have a rewrite rule.
   1953   bool visitInstruction(Instruction &I) {
   1954     DEBUG(dbgs() << "    !!!! Cannot rewrite: " << I << "\n");
   1955     llvm_unreachable("No rewrite rule for this instruction!");
   1956   }
   1957 
   1958   Value *getAdjustedAllocaPtr(IRBuilderTy &IRB, uint64_t Offset,
   1959                               Type *PointerTy) {
   1960     assert(Offset >= NewAllocaBeginOffset);
   1961     return getAdjustedPtr(IRB, DL, &NewAI, APInt(DL.getPointerSizeInBits(),
   1962                                                  Offset - NewAllocaBeginOffset),
   1963                           PointerTy);
   1964   }
   1965 
   1966   /// \brief Compute suitable alignment to access an offset into the new alloca.
   1967   unsigned getOffsetAlign(uint64_t Offset) {
   1968     unsigned NewAIAlign = NewAI.getAlignment();
   1969     if (!NewAIAlign)
   1970       NewAIAlign = DL.getABITypeAlignment(NewAI.getAllocatedType());
   1971     return MinAlign(NewAIAlign, Offset);
   1972   }
   1973 
   1974   /// \brief Compute suitable alignment to access a type at an offset of the
   1975   /// new alloca.
   1976   ///
   1977   /// \returns zero if the type's ABI alignment is a suitable alignment,
   1978   /// otherwise returns the maximal suitable alignment.
   1979   unsigned getOffsetTypeAlign(Type *Ty, uint64_t Offset) {
   1980     unsigned Align = getOffsetAlign(Offset);
   1981     return Align == DL.getABITypeAlignment(Ty) ? 0 : Align;
   1982   }
   1983 
   1984   unsigned getIndex(uint64_t Offset) {
   1985     assert(VecTy && "Can only call getIndex when rewriting a vector");
   1986     uint64_t RelOffset = Offset - NewAllocaBeginOffset;
   1987     assert(RelOffset / ElementSize < UINT32_MAX && "Index out of bounds");
   1988     uint32_t Index = RelOffset / ElementSize;
   1989     assert(Index * ElementSize == RelOffset);
   1990     return Index;
   1991   }
   1992 
   1993   void deleteIfTriviallyDead(Value *V) {
   1994     Instruction *I = cast<Instruction>(V);
   1995     if (isInstructionTriviallyDead(I))
   1996       Pass.DeadInsts.insert(I);
   1997   }
   1998 
   1999   Value *rewriteVectorizedLoadInst(uint64_t NewBeginOffset,
   2000                                    uint64_t NewEndOffset) {
   2001     unsigned BeginIndex = getIndex(NewBeginOffset);
   2002     unsigned EndIndex = getIndex(NewEndOffset);
   2003     assert(EndIndex > BeginIndex && "Empty vector!");
   2004 
   2005     Value *V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
   2006                                      "load");
   2007     return extractVector(IRB, V, BeginIndex, EndIndex, "vec");
   2008   }
   2009 
   2010   Value *rewriteIntegerLoad(LoadInst &LI, uint64_t NewBeginOffset,
   2011                             uint64_t NewEndOffset) {
   2012     assert(IntTy && "We cannot insert an integer to the alloca");
   2013     assert(!LI.isVolatile());
   2014     Value *V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
   2015                                      "load");
   2016     V = convertValue(DL, IRB, V, IntTy);
   2017     assert(NewBeginOffset >= NewAllocaBeginOffset && "Out of bounds offset");
   2018     uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
   2019     if (Offset > 0 || NewEndOffset < NewAllocaEndOffset)
   2020       V = extractInteger(DL, IRB, V, cast<IntegerType>(LI.getType()), Offset,
   2021                          "extract");
   2022     return V;
   2023   }
   2024 
   2025   bool visitLoadInst(LoadInst &LI) {
   2026     DEBUG(dbgs() << "    original: " << LI << "\n");
   2027     Value *OldOp = LI.getOperand(0);
   2028     assert(OldOp == OldPtr);
   2029 
   2030     // Compute the intersecting offset range.
   2031     assert(BeginOffset < NewAllocaEndOffset);
   2032     assert(EndOffset > NewAllocaBeginOffset);
   2033     uint64_t NewBeginOffset = std::max(BeginOffset, NewAllocaBeginOffset);
   2034     uint64_t NewEndOffset = std::min(EndOffset, NewAllocaEndOffset);
   2035 
   2036     uint64_t Size = NewEndOffset - NewBeginOffset;
   2037 
   2038     Type *TargetTy = IsSplit ? Type::getIntNTy(LI.getContext(), Size * 8)
   2039                              : LI.getType();
   2040     bool IsPtrAdjusted = false;
   2041     Value *V;
   2042     if (VecTy) {
   2043       V = rewriteVectorizedLoadInst(NewBeginOffset, NewEndOffset);
   2044     } else if (IntTy && LI.getType()->isIntegerTy()) {
   2045       V = rewriteIntegerLoad(LI, NewBeginOffset, NewEndOffset);
   2046     } else if (NewBeginOffset == NewAllocaBeginOffset &&
   2047                canConvertValue(DL, NewAllocaTy, LI.getType())) {
   2048       V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
   2049                                 LI.isVolatile(), "load");
   2050     } else {
   2051       Type *LTy = TargetTy->getPointerTo();
   2052       V = IRB.CreateAlignedLoad(
   2053           getAdjustedAllocaPtr(IRB, NewBeginOffset, LTy),
   2054           getOffsetTypeAlign(TargetTy, NewBeginOffset - NewAllocaBeginOffset),
   2055           LI.isVolatile(), "load");
   2056       IsPtrAdjusted = true;
   2057     }
   2058     V = convertValue(DL, IRB, V, TargetTy);
   2059 
   2060     if (IsSplit) {
   2061       assert(!LI.isVolatile());
   2062       assert(LI.getType()->isIntegerTy() &&
   2063              "Only integer type loads and stores are split");
   2064       assert(Size < DL.getTypeStoreSize(LI.getType()) &&
   2065              "Split load isn't smaller than original load");
   2066       assert(LI.getType()->getIntegerBitWidth() ==
   2067              DL.getTypeStoreSizeInBits(LI.getType()) &&
   2068              "Non-byte-multiple bit width");
   2069       // Move the insertion point just past the load so that we can refer to it.
   2070       IRB.SetInsertPoint(llvm::next(BasicBlock::iterator(&LI)));
   2071       // Create a placeholder value with the same type as LI to use as the
   2072       // basis for the new value. This allows us to replace the uses of LI with
   2073       // the computed value, and then replace the placeholder with LI, leaving
   2074       // LI only used for this computation.
   2075       Value *Placeholder
   2076         = new LoadInst(UndefValue::get(LI.getType()->getPointerTo()));
   2077       V = insertInteger(DL, IRB, Placeholder, V, NewBeginOffset,
   2078                         "insert");
   2079       LI.replaceAllUsesWith(V);
   2080       Placeholder->replaceAllUsesWith(&LI);
   2081       delete Placeholder;
   2082     } else {
   2083       LI.replaceAllUsesWith(V);
   2084     }
   2085 
   2086     Pass.DeadInsts.insert(&LI);
   2087     deleteIfTriviallyDead(OldOp);
   2088     DEBUG(dbgs() << "          to: " << *V << "\n");
   2089     return !LI.isVolatile() && !IsPtrAdjusted;
   2090   }
   2091 
   2092   bool rewriteVectorizedStoreInst(Value *V, StoreInst &SI, Value *OldOp,
   2093                                   uint64_t NewBeginOffset,
   2094                                   uint64_t NewEndOffset) {
   2095     if (V->getType() != VecTy) {
   2096       unsigned BeginIndex = getIndex(NewBeginOffset);
   2097       unsigned EndIndex = getIndex(NewEndOffset);
   2098       assert(EndIndex > BeginIndex && "Empty vector!");
   2099       unsigned NumElements = EndIndex - BeginIndex;
   2100       assert(NumElements <= VecTy->getNumElements() && "Too many elements!");
   2101       Type *SliceTy =
   2102           (NumElements == 1) ? ElementTy
   2103                              : VectorType::get(ElementTy, NumElements);
   2104       if (V->getType() != SliceTy)
   2105         V = convertValue(DL, IRB, V, SliceTy);
   2106 
   2107       // Mix in the existing elements.
   2108       Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
   2109                                          "load");
   2110       V = insertVector(IRB, Old, V, BeginIndex, "vec");
   2111     }
   2112     StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment());
   2113     Pass.DeadInsts.insert(&SI);
   2114 
   2115     (void)Store;
   2116     DEBUG(dbgs() << "          to: " << *Store << "\n");
   2117     return true;
   2118   }
   2119 
   2120   bool rewriteIntegerStore(Value *V, StoreInst &SI,
   2121                            uint64_t NewBeginOffset, uint64_t NewEndOffset) {
   2122     assert(IntTy && "We cannot extract an integer from the alloca");
   2123     assert(!SI.isVolatile());
   2124     if (DL.getTypeSizeInBits(V->getType()) != IntTy->getBitWidth()) {
   2125       Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
   2126                                          "oldload");
   2127       Old = convertValue(DL, IRB, Old, IntTy);
   2128       assert(BeginOffset >= NewAllocaBeginOffset && "Out of bounds offset");
   2129       uint64_t Offset = BeginOffset - NewAllocaBeginOffset;
   2130       V = insertInteger(DL, IRB, Old, SI.getValueOperand(), Offset,
   2131                         "insert");
   2132     }
   2133     V = convertValue(DL, IRB, V, NewAllocaTy);
   2134     StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment());
   2135     Pass.DeadInsts.insert(&SI);
   2136     (void)Store;
   2137     DEBUG(dbgs() << "          to: " << *Store << "\n");
   2138     return true;
   2139   }
   2140 
   2141   bool visitStoreInst(StoreInst &SI) {
   2142     DEBUG(dbgs() << "    original: " << SI << "\n");
   2143     Value *OldOp = SI.getOperand(1);
   2144     assert(OldOp == OldPtr);
   2145 
   2146     Value *V = SI.getValueOperand();
   2147 
   2148     // Strip all inbounds GEPs and pointer casts to try to dig out any root
   2149     // alloca that should be re-examined after promoting this alloca.
   2150     if (V->getType()->isPointerTy())
   2151       if (AllocaInst *AI = dyn_cast<AllocaInst>(V->stripInBoundsOffsets()))
   2152         Pass.PostPromotionWorklist.insert(AI);
   2153 
   2154     // Compute the intersecting offset range.
   2155     assert(BeginOffset < NewAllocaEndOffset);
   2156     assert(EndOffset > NewAllocaBeginOffset);
   2157     uint64_t NewBeginOffset = std::max(BeginOffset, NewAllocaBeginOffset);
   2158     uint64_t NewEndOffset = std::min(EndOffset, NewAllocaEndOffset);
   2159 
   2160     uint64_t Size = NewEndOffset - NewBeginOffset;
   2161     if (Size < DL.getTypeStoreSize(V->getType())) {
   2162       assert(!SI.isVolatile());
   2163       assert(V->getType()->isIntegerTy() &&
   2164              "Only integer type loads and stores are split");
   2165       assert(V->getType()->getIntegerBitWidth() ==
   2166              DL.getTypeStoreSizeInBits(V->getType()) &&
   2167              "Non-byte-multiple bit width");
   2168       IntegerType *NarrowTy = Type::getIntNTy(SI.getContext(), Size * 8);
   2169       V = extractInteger(DL, IRB, V, NarrowTy, NewBeginOffset,
   2170                          "extract");
   2171     }
   2172 
   2173     if (VecTy)
   2174       return rewriteVectorizedStoreInst(V, SI, OldOp, NewBeginOffset,
   2175                                         NewEndOffset);
   2176     if (IntTy && V->getType()->isIntegerTy())
   2177       return rewriteIntegerStore(V, SI, NewBeginOffset, NewEndOffset);
   2178 
   2179     StoreInst *NewSI;
   2180     if (NewBeginOffset == NewAllocaBeginOffset &&
   2181         NewEndOffset == NewAllocaEndOffset &&
   2182         canConvertValue(DL, V->getType(), NewAllocaTy)) {
   2183       V = convertValue(DL, IRB, V, NewAllocaTy);
   2184       NewSI = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment(),
   2185                                      SI.isVolatile());
   2186     } else {
   2187       Value *NewPtr = getAdjustedAllocaPtr(IRB, NewBeginOffset,
   2188                                            V->getType()->getPointerTo());
   2189       NewSI = IRB.CreateAlignedStore(
   2190           V, NewPtr, getOffsetTypeAlign(
   2191                          V->getType(), NewBeginOffset - NewAllocaBeginOffset),
   2192           SI.isVolatile());
   2193     }
   2194     (void)NewSI;
   2195     Pass.DeadInsts.insert(&SI);
   2196     deleteIfTriviallyDead(OldOp);
   2197 
   2198     DEBUG(dbgs() << "          to: " << *NewSI << "\n");
   2199     return NewSI->getPointerOperand() == &NewAI && !SI.isVolatile();
   2200   }
   2201 
   2202   /// \brief Compute an integer value from splatting an i8 across the given
   2203   /// number of bytes.
   2204   ///
   2205   /// Note that this routine assumes an i8 is a byte. If that isn't true, don't
   2206   /// call this routine.
   2207   /// FIXME: Heed the advice above.
   2208   ///
   2209   /// \param V The i8 value to splat.
   2210   /// \param Size The number of bytes in the output (assuming i8 is one byte)
   2211   Value *getIntegerSplat(Value *V, unsigned Size) {
   2212     assert(Size > 0 && "Expected a positive number of bytes.");
   2213     IntegerType *VTy = cast<IntegerType>(V->getType());
   2214     assert(VTy->getBitWidth() == 8 && "Expected an i8 value for the byte");
   2215     if (Size == 1)
   2216       return V;
   2217 
   2218     Type *SplatIntTy = Type::getIntNTy(VTy->getContext(), Size*8);
   2219     V = IRB.CreateMul(IRB.CreateZExt(V, SplatIntTy, "zext"),
   2220                       ConstantExpr::getUDiv(
   2221                         Constant::getAllOnesValue(SplatIntTy),
   2222                         ConstantExpr::getZExt(
   2223                           Constant::getAllOnesValue(V->getType()),
   2224                           SplatIntTy)),
   2225                       "isplat");
   2226     return V;
   2227   }
   2228 
   2229   /// \brief Compute a vector splat for a given element value.
   2230   Value *getVectorSplat(Value *V, unsigned NumElements) {
   2231     V = IRB.CreateVectorSplat(NumElements, V, "vsplat");
   2232     DEBUG(dbgs() << "       splat: " << *V << "\n");
   2233     return V;
   2234   }
   2235 
   2236   bool visitMemSetInst(MemSetInst &II) {
   2237     DEBUG(dbgs() << "    original: " << II << "\n");
   2238     assert(II.getRawDest() == OldPtr);
   2239 
   2240     // If the memset has a variable size, it cannot be split, just adjust the
   2241     // pointer to the new alloca.
   2242     if (!isa<Constant>(II.getLength())) {
   2243       assert(!IsSplit);
   2244       assert(BeginOffset >= NewAllocaBeginOffset);
   2245       II.setDest(
   2246           getAdjustedAllocaPtr(IRB, BeginOffset, II.getRawDest()->getType()));
   2247       Type *CstTy = II.getAlignmentCst()->getType();
   2248       II.setAlignment(ConstantInt::get(CstTy, getOffsetAlign(BeginOffset)));
   2249 
   2250       deleteIfTriviallyDead(OldPtr);
   2251       return false;
   2252     }
   2253 
   2254     // Record this instruction for deletion.
   2255     Pass.DeadInsts.insert(&II);
   2256 
   2257     Type *AllocaTy = NewAI.getAllocatedType();
   2258     Type *ScalarTy = AllocaTy->getScalarType();
   2259 
   2260     // Compute the intersecting offset range.
   2261     assert(BeginOffset < NewAllocaEndOffset);
   2262     assert(EndOffset > NewAllocaBeginOffset);
   2263     uint64_t NewBeginOffset = std::max(BeginOffset, NewAllocaBeginOffset);
   2264     uint64_t NewEndOffset = std::min(EndOffset, NewAllocaEndOffset);
   2265     uint64_t SliceOffset = NewBeginOffset - NewAllocaBeginOffset;
   2266 
   2267     // If this doesn't map cleanly onto the alloca type, and that type isn't
   2268     // a single value type, just emit a memset.
   2269     if (!VecTy && !IntTy &&
   2270         (BeginOffset > NewAllocaBeginOffset ||
   2271          EndOffset < NewAllocaEndOffset ||
   2272          !AllocaTy->isSingleValueType() ||
   2273          !DL.isLegalInteger(DL.getTypeSizeInBits(ScalarTy)) ||
   2274          DL.getTypeSizeInBits(ScalarTy)%8 != 0)) {
   2275       Type *SizeTy = II.getLength()->getType();
   2276       Constant *Size = ConstantInt::get(SizeTy, NewEndOffset - NewBeginOffset);
   2277       CallInst *New = IRB.CreateMemSet(
   2278           getAdjustedAllocaPtr(IRB, NewBeginOffset, II.getRawDest()->getType()),
   2279           II.getValue(), Size, getOffsetAlign(SliceOffset), II.isVolatile());
   2280       (void)New;
   2281       DEBUG(dbgs() << "          to: " << *New << "\n");
   2282       return false;
   2283     }
   2284 
   2285     // If we can represent this as a simple value, we have to build the actual
   2286     // value to store, which requires expanding the byte present in memset to
   2287     // a sensible representation for the alloca type. This is essentially
   2288     // splatting the byte to a sufficiently wide integer, splatting it across
   2289     // any desired vector width, and bitcasting to the final type.
   2290     Value *V;
   2291 
   2292     if (VecTy) {
   2293       // If this is a memset of a vectorized alloca, insert it.
   2294       assert(ElementTy == ScalarTy);
   2295 
   2296       unsigned BeginIndex = getIndex(NewBeginOffset);
   2297       unsigned EndIndex = getIndex(NewEndOffset);
   2298       assert(EndIndex > BeginIndex && "Empty vector!");
   2299       unsigned NumElements = EndIndex - BeginIndex;
   2300       assert(NumElements <= VecTy->getNumElements() && "Too many elements!");
   2301 
   2302       Value *Splat =
   2303           getIntegerSplat(II.getValue(), DL.getTypeSizeInBits(ElementTy) / 8);
   2304       Splat = convertValue(DL, IRB, Splat, ElementTy);
   2305       if (NumElements > 1)
   2306         Splat = getVectorSplat(Splat, NumElements);
   2307 
   2308       Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
   2309                                          "oldload");
   2310       V = insertVector(IRB, Old, Splat, BeginIndex, "vec");
   2311     } else if (IntTy) {
   2312       // If this is a memset on an alloca where we can widen stores, insert the
   2313       // set integer.
   2314       assert(!II.isVolatile());
   2315 
   2316       uint64_t Size = NewEndOffset - NewBeginOffset;
   2317       V = getIntegerSplat(II.getValue(), Size);
   2318 
   2319       if (IntTy && (BeginOffset != NewAllocaBeginOffset ||
   2320                     EndOffset != NewAllocaBeginOffset)) {
   2321         Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
   2322                                            "oldload");
   2323         Old = convertValue(DL, IRB, Old, IntTy);
   2324         uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
   2325         V = insertInteger(DL, IRB, Old, V, Offset, "insert");
   2326       } else {
   2327         assert(V->getType() == IntTy &&
   2328                "Wrong type for an alloca wide integer!");
   2329       }
   2330       V = convertValue(DL, IRB, V, AllocaTy);
   2331     } else {
   2332       // Established these invariants above.
   2333       assert(NewBeginOffset == NewAllocaBeginOffset);
   2334       assert(NewEndOffset == NewAllocaEndOffset);
   2335 
   2336       V = getIntegerSplat(II.getValue(), DL.getTypeSizeInBits(ScalarTy) / 8);
   2337       if (VectorType *AllocaVecTy = dyn_cast<VectorType>(AllocaTy))
   2338         V = getVectorSplat(V, AllocaVecTy->getNumElements());
   2339 
   2340       V = convertValue(DL, IRB, V, AllocaTy);
   2341     }
   2342 
   2343     Value *New = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment(),
   2344                                         II.isVolatile());
   2345     (void)New;
   2346     DEBUG(dbgs() << "          to: " << *New << "\n");
   2347     return !II.isVolatile();
   2348   }
   2349 
   2350   bool visitMemTransferInst(MemTransferInst &II) {
   2351     // Rewriting of memory transfer instructions can be a bit tricky. We break
   2352     // them into two categories: split intrinsics and unsplit intrinsics.
   2353 
   2354     DEBUG(dbgs() << "    original: " << II << "\n");
   2355 
   2356     // Compute the intersecting offset range.
   2357     assert(BeginOffset < NewAllocaEndOffset);
   2358     assert(EndOffset > NewAllocaBeginOffset);
   2359     uint64_t NewBeginOffset = std::max(BeginOffset, NewAllocaBeginOffset);
   2360     uint64_t NewEndOffset = std::min(EndOffset, NewAllocaEndOffset);
   2361 
   2362     assert(II.getRawSource() == OldPtr || II.getRawDest() == OldPtr);
   2363     bool IsDest = II.getRawDest() == OldPtr;
   2364 
   2365     // Compute the relative offset within the transfer.
   2366     unsigned IntPtrWidth = DL.getPointerSizeInBits();
   2367     APInt RelOffset(IntPtrWidth, NewBeginOffset - BeginOffset);
   2368 
   2369     unsigned Align = II.getAlignment();
   2370     uint64_t SliceOffset = NewBeginOffset - NewAllocaBeginOffset;
   2371     if (Align > 1)
   2372       Align =
   2373           MinAlign(RelOffset.zextOrTrunc(64).getZExtValue(),
   2374                    MinAlign(II.getAlignment(), getOffsetAlign(SliceOffset)));
   2375 
   2376     // For unsplit intrinsics, we simply modify the source and destination
   2377     // pointers in place. This isn't just an optimization, it is a matter of
   2378     // correctness. With unsplit intrinsics we may be dealing with transfers
   2379     // within a single alloca before SROA ran, or with transfers that have
   2380     // a variable length. We may also be dealing with memmove instead of
   2381     // memcpy, and so simply updating the pointers is the necessary for us to
   2382     // update both source and dest of a single call.
   2383     if (!IsSplittable) {
   2384       Value *OldOp = IsDest ? II.getRawDest() : II.getRawSource();
   2385       if (IsDest)
   2386         II.setDest(
   2387             getAdjustedAllocaPtr(IRB, BeginOffset, II.getRawDest()->getType()));
   2388       else
   2389         II.setSource(getAdjustedAllocaPtr(IRB, BeginOffset,
   2390                                           II.getRawSource()->getType()));
   2391 
   2392       Type *CstTy = II.getAlignmentCst()->getType();
   2393       II.setAlignment(ConstantInt::get(CstTy, Align));
   2394 
   2395       DEBUG(dbgs() << "          to: " << II << "\n");
   2396       deleteIfTriviallyDead(OldOp);
   2397       return false;
   2398     }
   2399     // For split transfer intrinsics we have an incredibly useful assurance:
   2400     // the source and destination do not reside within the same alloca, and at
   2401     // least one of them does not escape. This means that we can replace
   2402     // memmove with memcpy, and we don't need to worry about all manner of
   2403     // downsides to splitting and transforming the operations.
   2404 
   2405     // If this doesn't map cleanly onto the alloca type, and that type isn't
   2406     // a single value type, just emit a memcpy.
   2407     bool EmitMemCpy
   2408       = !VecTy && !IntTy && (BeginOffset > NewAllocaBeginOffset ||
   2409                              EndOffset < NewAllocaEndOffset ||
   2410                              !NewAI.getAllocatedType()->isSingleValueType());
   2411 
   2412     // If we're just going to emit a memcpy, the alloca hasn't changed, and the
   2413     // size hasn't been shrunk based on analysis of the viable range, this is
   2414     // a no-op.
   2415     if (EmitMemCpy && &OldAI == &NewAI) {
   2416       // Ensure the start lines up.
   2417       assert(NewBeginOffset == BeginOffset);
   2418 
   2419       // Rewrite the size as needed.
   2420       if (NewEndOffset != EndOffset)
   2421         II.setLength(ConstantInt::get(II.getLength()->getType(),
   2422                                       NewEndOffset - NewBeginOffset));
   2423       return false;
   2424     }
   2425     // Record this instruction for deletion.
   2426     Pass.DeadInsts.insert(&II);
   2427 
   2428     // Strip all inbounds GEPs and pointer casts to try to dig out any root
   2429     // alloca that should be re-examined after rewriting this instruction.
   2430     Value *OtherPtr = IsDest ? II.getRawSource() : II.getRawDest();
   2431     if (AllocaInst *AI
   2432           = dyn_cast<AllocaInst>(OtherPtr->stripInBoundsOffsets()))
   2433       Pass.Worklist.insert(AI);
   2434 
   2435     if (EmitMemCpy) {
   2436       Type *OtherPtrTy = IsDest ? II.getRawSource()->getType()
   2437                                 : II.getRawDest()->getType();
   2438 
   2439       // Compute the other pointer, folding as much as possible to produce
   2440       // a single, simple GEP in most cases.
   2441       OtherPtr = getAdjustedPtr(IRB, DL, OtherPtr, RelOffset, OtherPtrTy);
   2442 
   2443       Value *OurPtr = getAdjustedAllocaPtr(
   2444           IRB, NewBeginOffset,
   2445           IsDest ? II.getRawDest()->getType() : II.getRawSource()->getType());
   2446       Type *SizeTy = II.getLength()->getType();
   2447       Constant *Size = ConstantInt::get(SizeTy, NewEndOffset - NewBeginOffset);
   2448 
   2449       CallInst *New = IRB.CreateMemCpy(IsDest ? OurPtr : OtherPtr,
   2450                                        IsDest ? OtherPtr : OurPtr,
   2451                                        Size, Align, II.isVolatile());
   2452       (void)New;
   2453       DEBUG(dbgs() << "          to: " << *New << "\n");
   2454       return false;
   2455     }
   2456 
   2457     // Note that we clamp the alignment to 1 here as a 0 alignment for a memcpy
   2458     // is equivalent to 1, but that isn't true if we end up rewriting this as
   2459     // a load or store.
   2460     if (!Align)
   2461       Align = 1;
   2462 
   2463     bool IsWholeAlloca = NewBeginOffset == NewAllocaBeginOffset &&
   2464                          NewEndOffset == NewAllocaEndOffset;
   2465     uint64_t Size = NewEndOffset - NewBeginOffset;
   2466     unsigned BeginIndex = VecTy ? getIndex(NewBeginOffset) : 0;
   2467     unsigned EndIndex = VecTy ? getIndex(NewEndOffset) : 0;
   2468     unsigned NumElements = EndIndex - BeginIndex;
   2469     IntegerType *SubIntTy
   2470       = IntTy ? Type::getIntNTy(IntTy->getContext(), Size*8) : 0;
   2471 
   2472     Type *OtherPtrTy = NewAI.getType();
   2473     if (VecTy && !IsWholeAlloca) {
   2474       if (NumElements == 1)
   2475         OtherPtrTy = VecTy->getElementType();
   2476       else
   2477         OtherPtrTy = VectorType::get(VecTy->getElementType(), NumElements);
   2478 
   2479       OtherPtrTy = OtherPtrTy->getPointerTo();
   2480     } else if (IntTy && !IsWholeAlloca) {
   2481       OtherPtrTy = SubIntTy->getPointerTo();
   2482     }
   2483 
   2484     Value *SrcPtr = getAdjustedPtr(IRB, DL, OtherPtr, RelOffset, OtherPtrTy);
   2485     Value *DstPtr = &NewAI;
   2486     if (!IsDest)
   2487       std::swap(SrcPtr, DstPtr);
   2488 
   2489     Value *Src;
   2490     if (VecTy && !IsWholeAlloca && !IsDest) {
   2491       Src = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
   2492                                   "load");
   2493       Src = extractVector(IRB, Src, BeginIndex, EndIndex, "vec");
   2494     } else if (IntTy && !IsWholeAlloca && !IsDest) {
   2495       Src = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
   2496                                   "load");
   2497       Src = convertValue(DL, IRB, Src, IntTy);
   2498       uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
   2499       Src = extractInteger(DL, IRB, Src, SubIntTy, Offset, "extract");
   2500     } else {
   2501       Src = IRB.CreateAlignedLoad(SrcPtr, Align, II.isVolatile(),
   2502                                   "copyload");
   2503     }
   2504 
   2505     if (VecTy && !IsWholeAlloca && IsDest) {
   2506       Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
   2507                                          "oldload");
   2508       Src = insertVector(IRB, Old, Src, BeginIndex, "vec");
   2509     } else if (IntTy && !IsWholeAlloca && IsDest) {
   2510       Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
   2511                                          "oldload");
   2512       Old = convertValue(DL, IRB, Old, IntTy);
   2513       uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
   2514       Src = insertInteger(DL, IRB, Old, Src, Offset, "insert");
   2515       Src = convertValue(DL, IRB, Src, NewAllocaTy);
   2516     }
   2517 
   2518     StoreInst *Store = cast<StoreInst>(
   2519       IRB.CreateAlignedStore(Src, DstPtr, Align, II.isVolatile()));
   2520     (void)Store;
   2521     DEBUG(dbgs() << "          to: " << *Store << "\n");
   2522     return !II.isVolatile();
   2523   }
   2524 
   2525   bool visitIntrinsicInst(IntrinsicInst &II) {
   2526     assert(II.getIntrinsicID() == Intrinsic::lifetime_start ||
   2527            II.getIntrinsicID() == Intrinsic::lifetime_end);
   2528     DEBUG(dbgs() << "    original: " << II << "\n");
   2529     assert(II.getArgOperand(1) == OldPtr);
   2530 
   2531     // Compute the intersecting offset range.
   2532     assert(BeginOffset < NewAllocaEndOffset);
   2533     assert(EndOffset > NewAllocaBeginOffset);
   2534     uint64_t NewBeginOffset = std::max(BeginOffset, NewAllocaBeginOffset);
   2535     uint64_t NewEndOffset = std::min(EndOffset, NewAllocaEndOffset);
   2536 
   2537     // Record this instruction for deletion.
   2538     Pass.DeadInsts.insert(&II);
   2539 
   2540     ConstantInt *Size
   2541       = ConstantInt::get(cast<IntegerType>(II.getArgOperand(0)->getType()),
   2542                          NewEndOffset - NewBeginOffset);
   2543     Value *Ptr =
   2544         getAdjustedAllocaPtr(IRB, NewBeginOffset, II.getArgOperand(1)->getType());
   2545     Value *New;
   2546     if (II.getIntrinsicID() == Intrinsic::lifetime_start)
   2547       New = IRB.CreateLifetimeStart(Ptr, Size);
   2548     else
   2549       New = IRB.CreateLifetimeEnd(Ptr, Size);
   2550 
   2551     (void)New;
   2552     DEBUG(dbgs() << "          to: " << *New << "\n");
   2553     return true;
   2554   }
   2555 
   2556   bool visitPHINode(PHINode &PN) {
   2557     DEBUG(dbgs() << "    original: " << PN << "\n");
   2558     assert(BeginOffset >= NewAllocaBeginOffset && "PHIs are unsplittable");
   2559     assert(EndOffset <= NewAllocaEndOffset && "PHIs are unsplittable");
   2560 
   2561     // We would like to compute a new pointer in only one place, but have it be
   2562     // as local as possible to the PHI. To do that, we re-use the location of
   2563     // the old pointer, which necessarily must be in the right position to
   2564     // dominate the PHI.
   2565     IRBuilderTy PtrBuilder(OldPtr);
   2566     PtrBuilder.SetNamePrefix(Twine(NewAI.getName()) + "." + Twine(BeginOffset) +
   2567                              ".");
   2568 
   2569     Value *NewPtr =
   2570         getAdjustedAllocaPtr(PtrBuilder, BeginOffset, OldPtr->getType());
   2571     // Replace the operands which were using the old pointer.
   2572     std::replace(PN.op_begin(), PN.op_end(), cast<Value>(OldPtr), NewPtr);
   2573 
   2574     DEBUG(dbgs() << "          to: " << PN << "\n");
   2575     deleteIfTriviallyDead(OldPtr);
   2576 
   2577     // Check whether we can speculate this PHI node, and if so remember that
   2578     // fact and queue it up for another iteration after the speculation
   2579     // occurs.
   2580     if (isSafePHIToSpeculate(PN, &DL)) {
   2581       Pass.SpeculatablePHIs.insert(&PN);
   2582       IsUsedByRewrittenSpeculatableInstructions = true;
   2583       return true;
   2584     }
   2585 
   2586     return false; // PHIs can't be promoted on their own.
   2587   }
   2588 
   2589   bool visitSelectInst(SelectInst &SI) {
   2590     DEBUG(dbgs() << "    original: " << SI << "\n");
   2591     assert((SI.getTrueValue() == OldPtr || SI.getFalseValue() == OldPtr) &&
   2592            "Pointer isn't an operand!");
   2593     assert(BeginOffset >= NewAllocaBeginOffset && "Selects are unsplittable");
   2594     assert(EndOffset <= NewAllocaEndOffset && "Selects are unsplittable");
   2595 
   2596     Value *NewPtr = getAdjustedAllocaPtr(IRB, BeginOffset, OldPtr->getType());
   2597     // Replace the operands which were using the old pointer.
   2598     if (SI.getOperand(1) == OldPtr)
   2599       SI.setOperand(1, NewPtr);
   2600     if (SI.getOperand(2) == OldPtr)
   2601       SI.setOperand(2, NewPtr);
   2602 
   2603     DEBUG(dbgs() << "          to: " << SI << "\n");
   2604     deleteIfTriviallyDead(OldPtr);
   2605 
   2606     // Check whether we can speculate this select instruction, and if so
   2607     // remember that fact and queue it up for another iteration after the
   2608     // speculation occurs.
   2609     if (isSafeSelectToSpeculate(SI, &DL)) {
   2610       Pass.SpeculatableSelects.insert(&SI);
   2611       IsUsedByRewrittenSpeculatableInstructions = true;
   2612       return true;
   2613     }
   2614 
   2615     return false; // Selects can't be promoted on their own.
   2616   }
   2617 
   2618 };
   2619 }
   2620 
   2621 namespace {
   2622 /// \brief Visitor to rewrite aggregate loads and stores as scalar.
   2623 ///
   2624 /// This pass aggressively rewrites all aggregate loads and stores on
   2625 /// a particular pointer (or any pointer derived from it which we can identify)
   2626 /// with scalar loads and stores.
   2627 class AggLoadStoreRewriter : public InstVisitor<AggLoadStoreRewriter, bool> {
   2628   // Befriend the base class so it can delegate to private visit methods.
   2629   friend class llvm::InstVisitor<AggLoadStoreRewriter, bool>;
   2630 
   2631   const DataLayout &DL;
   2632 
   2633   /// Queue of pointer uses to analyze and potentially rewrite.
   2634   SmallVector<Use *, 8> Queue;
   2635 
   2636   /// Set to prevent us from cycling with phi nodes and loops.
   2637   SmallPtrSet<User *, 8> Visited;
   2638 
   2639   /// The current pointer use being rewritten. This is used to dig up the used
   2640   /// value (as opposed to the user).
   2641   Use *U;
   2642 
   2643 public:
   2644   AggLoadStoreRewriter(const DataLayout &DL) : DL(DL) {}
   2645 
   2646   /// Rewrite loads and stores through a pointer and all pointers derived from
   2647   /// it.
   2648   bool rewrite(Instruction &I) {
   2649     DEBUG(dbgs() << "  Rewriting FCA loads and stores...\n");
   2650     enqueueUsers(I);
   2651     bool Changed = false;
   2652     while (!Queue.empty()) {
   2653       U = Queue.pop_back_val();
   2654       Changed |= visit(cast<Instruction>(U->getUser()));
   2655     }
   2656     return Changed;
   2657   }
   2658 
   2659 private:
   2660   /// Enqueue all the users of the given instruction for further processing.
   2661   /// This uses a set to de-duplicate users.
   2662   void enqueueUsers(Instruction &I) {
   2663     for (Value::use_iterator UI = I.use_begin(), UE = I.use_end(); UI != UE;
   2664          ++UI)
   2665       if (Visited.insert(*UI))
   2666         Queue.push_back(&UI.getUse());
   2667   }
   2668 
   2669   // Conservative default is to not rewrite anything.
   2670   bool visitInstruction(Instruction &I) { return false; }
   2671 
   2672   /// \brief Generic recursive split emission class.
   2673   template <typename Derived>
   2674   class OpSplitter {
   2675   protected:
   2676     /// The builder used to form new instructions.
   2677     IRBuilderTy IRB;
   2678     /// The indices which to be used with insert- or extractvalue to select the
   2679     /// appropriate value within the aggregate.
   2680     SmallVector<unsigned, 4> Indices;
   2681     /// The indices to a GEP instruction which will move Ptr to the correct slot
   2682     /// within the aggregate.
   2683     SmallVector<Value *, 4> GEPIndices;
   2684     /// The base pointer of the original op, used as a base for GEPing the
   2685     /// split operations.
   2686     Value *Ptr;
   2687 
   2688     /// Initialize the splitter with an insertion point, Ptr and start with a
   2689     /// single zero GEP index.
   2690     OpSplitter(Instruction *InsertionPoint, Value *Ptr)
   2691       : IRB(InsertionPoint), GEPIndices(1, IRB.getInt32(0)), Ptr(Ptr) {}
   2692 
   2693   public:
   2694     /// \brief Generic recursive split emission routine.
   2695     ///
   2696     /// This method recursively splits an aggregate op (load or store) into
   2697     /// scalar or vector ops. It splits recursively until it hits a single value
   2698     /// and emits that single value operation via the template argument.
   2699     ///
   2700     /// The logic of this routine relies on GEPs and insertvalue and
   2701     /// extractvalue all operating with the same fundamental index list, merely
   2702     /// formatted differently (GEPs need actual values).
   2703     ///
   2704     /// \param Ty  The type being split recursively into smaller ops.
   2705     /// \param Agg The aggregate value being built up or stored, depending on
   2706     /// whether this is splitting a load or a store respectively.
   2707     void emitSplitOps(Type *Ty, Value *&Agg, const Twine &Name) {
   2708       if (Ty->isSingleValueType())
   2709         return static_cast<Derived *>(this)->emitFunc(Ty, Agg, Name);
   2710 
   2711       if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
   2712         unsigned OldSize = Indices.size();
   2713         (void)OldSize;
   2714         for (unsigned Idx = 0, Size = ATy->getNumElements(); Idx != Size;
   2715              ++Idx) {
   2716           assert(Indices.size() == OldSize && "Did not return to the old size");
   2717           Indices.push_back(Idx);
   2718           GEPIndices.push_back(IRB.getInt32(Idx));
   2719           emitSplitOps(ATy->getElementType(), Agg, Name + "." + Twine(Idx));
   2720           GEPIndices.pop_back();
   2721           Indices.pop_back();
   2722         }
   2723         return;
   2724       }
   2725 
   2726       if (StructType *STy = dyn_cast<StructType>(Ty)) {
   2727         unsigned OldSize = Indices.size();
   2728         (void)OldSize;
   2729         for (unsigned Idx = 0, Size = STy->getNumElements(); Idx != Size;
   2730              ++Idx) {
   2731           assert(Indices.size() == OldSize && "Did not return to the old size");
   2732           Indices.push_back(Idx);
   2733           GEPIndices.push_back(IRB.getInt32(Idx));
   2734           emitSplitOps(STy->getElementType(Idx), Agg, Name + "." + Twine(Idx));
   2735           GEPIndices.pop_back();
   2736           Indices.pop_back();
   2737         }
   2738         return;
   2739       }
   2740 
   2741       llvm_unreachable("Only arrays and structs are aggregate loadable types");
   2742     }
   2743   };
   2744 
   2745   struct LoadOpSplitter : public OpSplitter<LoadOpSplitter> {
   2746     LoadOpSplitter(Instruction *InsertionPoint, Value *Ptr)
   2747       : OpSplitter<LoadOpSplitter>(InsertionPoint, Ptr) {}
   2748 
   2749     /// Emit a leaf load of a single value. This is called at the leaves of the
   2750     /// recursive emission to actually load values.
   2751     void emitFunc(Type *Ty, Value *&Agg, const Twine &Name) {
   2752       assert(Ty->isSingleValueType());
   2753       // Load the single value and insert it using the indices.
   2754       Value *GEP = IRB.CreateInBoundsGEP(Ptr, GEPIndices, Name + ".gep");
   2755       Value *Load = IRB.CreateLoad(GEP, Name + ".load");
   2756       Agg = IRB.CreateInsertValue(Agg, Load, Indices, Name + ".insert");
   2757       DEBUG(dbgs() << "          to: " << *Load << "\n");
   2758     }
   2759   };
   2760 
   2761   bool visitLoadInst(LoadInst &LI) {
   2762     assert(LI.getPointerOperand() == *U);
   2763     if (!LI.isSimple() || LI.getType()->isSingleValueType())
   2764       return false;
   2765 
   2766     // We have an aggregate being loaded, split it apart.
   2767     DEBUG(dbgs() << "    original: " << LI << "\n");
   2768     LoadOpSplitter Splitter(&LI, *U);
   2769     Value *V = UndefValue::get(LI.getType());
   2770     Splitter.emitSplitOps(LI.getType(), V, LI.getName() + ".fca");
   2771     LI.replaceAllUsesWith(V);
   2772     LI.eraseFromParent();
   2773     return true;
   2774   }
   2775 
   2776   struct StoreOpSplitter : public OpSplitter<StoreOpSplitter> {
   2777     StoreOpSplitter(Instruction *InsertionPoint, Value *Ptr)
   2778       : OpSplitter<StoreOpSplitter>(InsertionPoint, Ptr) {}
   2779 
   2780     /// Emit a leaf store of a single value. This is called at the leaves of the
   2781     /// recursive emission to actually produce stores.
   2782     void emitFunc(Type *Ty, Value *&Agg, const Twine &Name) {
   2783       assert(Ty->isSingleValueType());
   2784       // Extract the single value and store it using the indices.
   2785       Value *Store = IRB.CreateStore(
   2786         IRB.CreateExtractValue(Agg, Indices, Name + ".extract"),
   2787         IRB.CreateInBoundsGEP(Ptr, GEPIndices, Name + ".gep"));
   2788       (void)Store;
   2789       DEBUG(dbgs() << "          to: " << *Store << "\n");
   2790     }
   2791   };
   2792 
   2793   bool visitStoreInst(StoreInst &SI) {
   2794     if (!SI.isSimple() || SI.getPointerOperand() != *U)
   2795       return false;
   2796     Value *V = SI.getValueOperand();
   2797     if (V->getType()->isSingleValueType())
   2798       return false;
   2799 
   2800     // We have an aggregate being stored, split it apart.
   2801     DEBUG(dbgs() << "    original: " << SI << "\n");
   2802     StoreOpSplitter Splitter(&SI, *U);
   2803     Splitter.emitSplitOps(V->getType(), V, V->getName() + ".fca");
   2804     SI.eraseFromParent();
   2805     return true;
   2806   }
   2807 
   2808   bool visitBitCastInst(BitCastInst &BC) {
   2809     enqueueUsers(BC);
   2810     return false;
   2811   }
   2812 
   2813   bool visitGetElementPtrInst(GetElementPtrInst &GEPI) {
   2814     enqueueUsers(GEPI);
   2815     return false;
   2816   }
   2817 
   2818   bool visitPHINode(PHINode &PN) {
   2819     enqueueUsers(PN);
   2820     return false;
   2821   }
   2822 
   2823   bool visitSelectInst(SelectInst &SI) {
   2824     enqueueUsers(SI);
   2825     return false;
   2826   }
   2827 };
   2828 }
   2829 
   2830 /// \brief Strip aggregate type wrapping.
   2831 ///
   2832 /// This removes no-op aggregate types wrapping an underlying type. It will
   2833 /// strip as many layers of types as it can without changing either the type
   2834 /// size or the allocated size.
   2835 static Type *stripAggregateTypeWrapping(const DataLayout &DL, Type *Ty) {
   2836   if (Ty->isSingleValueType())
   2837     return Ty;
   2838 
   2839   uint64_t AllocSize = DL.getTypeAllocSize(Ty);
   2840   uint64_t TypeSize = DL.getTypeSizeInBits(Ty);
   2841 
   2842   Type *InnerTy;
   2843   if (ArrayType *ArrTy = dyn_cast<ArrayType>(Ty)) {
   2844     InnerTy = ArrTy->getElementType();
   2845   } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
   2846     const StructLayout *SL = DL.getStructLayout(STy);
   2847     unsigned Index = SL->getElementContainingOffset(0);
   2848     InnerTy = STy->getElementType(Index);
   2849   } else {
   2850     return Ty;
   2851   }
   2852 
   2853   if (AllocSize > DL.getTypeAllocSize(InnerTy) ||
   2854       TypeSize > DL.getTypeSizeInBits(InnerTy))
   2855     return Ty;
   2856 
   2857   return stripAggregateTypeWrapping(DL, InnerTy);
   2858 }
   2859 
   2860 /// \brief Try to find a partition of the aggregate type passed in for a given
   2861 /// offset and size.
   2862 ///
   2863 /// This recurses through the aggregate type and tries to compute a subtype
   2864 /// based on the offset and size. When the offset and size span a sub-section
   2865 /// of an array, it will even compute a new array type for that sub-section,
   2866 /// and the same for structs.
   2867 ///
   2868 /// Note that this routine is very strict and tries to find a partition of the
   2869 /// type which produces the *exact* right offset and size. It is not forgiving
   2870 /// when the size or offset cause either end of type-based partition to be off.
   2871 /// Also, this is a best-effort routine. It is reasonable to give up and not
   2872 /// return a type if necessary.
   2873 static Type *getTypePartition(const DataLayout &DL, Type *Ty,
   2874                               uint64_t Offset, uint64_t Size) {
   2875   if (Offset == 0 && DL.getTypeAllocSize(Ty) == Size)
   2876     return stripAggregateTypeWrapping(DL, Ty);
   2877   if (Offset > DL.getTypeAllocSize(Ty) ||
   2878       (DL.getTypeAllocSize(Ty) - Offset) < Size)
   2879     return 0;
   2880 
   2881   if (SequentialType *SeqTy = dyn_cast<SequentialType>(Ty)) {
   2882     // We can't partition pointers...
   2883     if (SeqTy->isPointerTy())
   2884       return 0;
   2885 
   2886     Type *ElementTy = SeqTy->getElementType();
   2887     uint64_t ElementSize = DL.getTypeAllocSize(ElementTy);
   2888     uint64_t NumSkippedElements = Offset / ElementSize;
   2889     if (ArrayType *ArrTy = dyn_cast<ArrayType>(SeqTy)) {
   2890       if (NumSkippedElements >= ArrTy->getNumElements())
   2891         return 0;
   2892     } else if (VectorType *VecTy = dyn_cast<VectorType>(SeqTy)) {
   2893       if (NumSkippedElements >= VecTy->getNumElements())
   2894         return 0;
   2895     }
   2896     Offset -= NumSkippedElements * ElementSize;
   2897 
   2898     // First check if we need to recurse.
   2899     if (Offset > 0 || Size < ElementSize) {
   2900       // Bail if the partition ends in a different array element.
   2901       if ((Offset + Size) > ElementSize)
   2902         return 0;
   2903       // Recurse through the element type trying to peel off offset bytes.
   2904       return getTypePartition(DL, ElementTy, Offset, Size);
   2905     }
   2906     assert(Offset == 0);
   2907 
   2908     if (Size == ElementSize)
   2909       return stripAggregateTypeWrapping(DL, ElementTy);
   2910     assert(Size > ElementSize);
   2911     uint64_t NumElements = Size / ElementSize;
   2912     if (NumElements * ElementSize != Size)
   2913       return 0;
   2914     return ArrayType::get(ElementTy, NumElements);
   2915   }
   2916 
   2917   StructType *STy = dyn_cast<StructType>(Ty);
   2918   if (!STy)
   2919     return 0;
   2920 
   2921   const StructLayout *SL = DL.getStructLayout(STy);
   2922   if (Offset >= SL->getSizeInBytes())
   2923     return 0;
   2924   uint64_t EndOffset = Offset + Size;
   2925   if (EndOffset > SL->getSizeInBytes())
   2926     return 0;
   2927 
   2928   unsigned Index = SL->getElementContainingOffset(Offset);
   2929   Offset -= SL->getElementOffset(Index);
   2930 
   2931   Type *ElementTy = STy->getElementType(Index);
   2932   uint64_t ElementSize = DL.getTypeAllocSize(ElementTy);
   2933   if (Offset >= ElementSize)
   2934     return 0; // The offset points into alignment padding.
   2935 
   2936   // See if any partition must be contained by the element.
   2937   if (Offset > 0 || Size < ElementSize) {
   2938     if ((Offset + Size) > ElementSize)
   2939       return 0;
   2940     return getTypePartition(DL, ElementTy, Offset, Size);
   2941   }
   2942   assert(Offset == 0);
   2943 
   2944   if (Size == ElementSize)
   2945     return stripAggregateTypeWrapping(DL, ElementTy);
   2946 
   2947   StructType::element_iterator EI = STy->element_begin() + Index,
   2948                                EE = STy->element_end();
   2949   if (EndOffset < SL->getSizeInBytes()) {
   2950     unsigned EndIndex = SL->getElementContainingOffset(EndOffset);
   2951     if (Index == EndIndex)
   2952       return 0; // Within a single element and its padding.
   2953 
   2954     // Don't try to form "natural" types if the elements don't line up with the
   2955     // expected size.
   2956     // FIXME: We could potentially recurse down through the last element in the
   2957     // sub-struct to find a natural end point.
   2958     if (SL->getElementOffset(EndIndex) != EndOffset)
   2959       return 0;
   2960 
   2961     assert(Index < EndIndex);
   2962     EE = STy->element_begin() + EndIndex;
   2963   }
   2964 
   2965   // Try to build up a sub-structure.
   2966   StructType *SubTy = StructType::get(STy->getContext(), makeArrayRef(EI, EE),
   2967                                       STy->isPacked());
   2968   const StructLayout *SubSL = DL.getStructLayout(SubTy);
   2969   if (Size != SubSL->getSizeInBytes())
   2970     return 0; // The sub-struct doesn't have quite the size needed.
   2971 
   2972   return SubTy;
   2973 }
   2974 
   2975 /// \brief Rewrite an alloca partition's users.
   2976 ///
   2977 /// This routine drives both of the rewriting goals of the SROA pass. It tries
   2978 /// to rewrite uses of an alloca partition to be conducive for SSA value
   2979 /// promotion. If the partition needs a new, more refined alloca, this will
   2980 /// build that new alloca, preserving as much type information as possible, and
   2981 /// rewrite the uses of the old alloca to point at the new one and have the
   2982 /// appropriate new offsets. It also evaluates how successful the rewrite was
   2983 /// at enabling promotion and if it was successful queues the alloca to be
   2984 /// promoted.
   2985 bool SROA::rewritePartition(AllocaInst &AI, AllocaSlices &S,
   2986                             AllocaSlices::iterator B, AllocaSlices::iterator E,
   2987                             int64_t BeginOffset, int64_t EndOffset,
   2988                             ArrayRef<AllocaSlices::iterator> SplitUses) {
   2989   assert(BeginOffset < EndOffset);
   2990   uint64_t SliceSize = EndOffset - BeginOffset;
   2991 
   2992   // Try to compute a friendly type for this partition of the alloca. This
   2993   // won't always succeed, in which case we fall back to a legal integer type
   2994   // or an i8 array of an appropriate size.
   2995   Type *SliceTy = 0;
   2996   if (Type *CommonUseTy = findCommonType(B, E, EndOffset))
   2997     if (DL->getTypeAllocSize(CommonUseTy) >= SliceSize)
   2998       SliceTy = CommonUseTy;
   2999   if (!SliceTy)
   3000     if (Type *TypePartitionTy = getTypePartition(*DL, AI.getAllocatedType(),
   3001                                                  BeginOffset, SliceSize))
   3002       SliceTy = TypePartitionTy;
   3003   if ((!SliceTy || (SliceTy->isArrayTy() &&
   3004                     SliceTy->getArrayElementType()->isIntegerTy())) &&
   3005       DL->isLegalInteger(SliceSize * 8))
   3006     SliceTy = Type::getIntNTy(*C, SliceSize * 8);
   3007   if (!SliceTy)
   3008     SliceTy = ArrayType::get(Type::getInt8Ty(*C), SliceSize);
   3009   assert(DL->getTypeAllocSize(SliceTy) >= SliceSize);
   3010 
   3011   bool IsVectorPromotable = isVectorPromotionViable(
   3012       *DL, SliceTy, S, BeginOffset, EndOffset, B, E, SplitUses);
   3013 
   3014   bool IsIntegerPromotable =
   3015       !IsVectorPromotable &&
   3016       isIntegerWideningViable(*DL, SliceTy, BeginOffset, S, B, E, SplitUses);
   3017 
   3018   // Check for the case where we're going to rewrite to a new alloca of the
   3019   // exact same type as the original, and with the same access offsets. In that
   3020   // case, re-use the existing alloca, but still run through the rewriter to
   3021   // perform phi and select speculation.
   3022   AllocaInst *NewAI;
   3023   if (SliceTy == AI.getAllocatedType()) {
   3024     assert(BeginOffset == 0 &&
   3025            "Non-zero begin offset but same alloca type");
   3026     NewAI = &AI;
   3027     // FIXME: We should be able to bail at this point with "nothing changed".
   3028     // FIXME: We might want to defer PHI speculation until after here.
   3029   } else {
   3030     unsigned Alignment = AI.getAlignment();
   3031     if (!Alignment) {
   3032       // The minimum alignment which users can rely on when the explicit
   3033       // alignment is omitted or zero is that required by the ABI for this
   3034       // type.
   3035       Alignment = DL->getABITypeAlignment(AI.getAllocatedType());
   3036     }
   3037     Alignment = MinAlign(Alignment, BeginOffset);
   3038     // If we will get at least this much alignment from the type alone, leave
   3039     // the alloca's alignment unconstrained.
   3040     if (Alignment <= DL->getABITypeAlignment(SliceTy))
   3041       Alignment = 0;
   3042     NewAI = new AllocaInst(SliceTy, 0, Alignment,
   3043                            AI.getName() + ".sroa." + Twine(B - S.begin()), &AI);
   3044     ++NumNewAllocas;
   3045   }
   3046 
   3047   DEBUG(dbgs() << "Rewriting alloca partition "
   3048                << "[" << BeginOffset << "," << EndOffset << ") to: " << *NewAI
   3049                << "\n");
   3050 
   3051   // Track the high watermark on several worklists that are only relevant for
   3052   // promoted allocas. We will reset it to this point if the alloca is not in
   3053   // fact scheduled for promotion.
   3054   unsigned PPWOldSize = PostPromotionWorklist.size();
   3055   unsigned SPOldSize = SpeculatablePHIs.size();
   3056   unsigned SSOldSize = SpeculatableSelects.size();
   3057   unsigned NumUses = 0;
   3058 
   3059   AllocaSliceRewriter Rewriter(*DL, S, *this, AI, *NewAI, BeginOffset,
   3060                                EndOffset, IsVectorPromotable,
   3061                                IsIntegerPromotable);
   3062   bool Promotable = true;
   3063   for (ArrayRef<AllocaSlices::iterator>::const_iterator SUI = SplitUses.begin(),
   3064                                                         SUE = SplitUses.end();
   3065        SUI != SUE; ++SUI) {
   3066     DEBUG(dbgs() << "  rewriting split ");
   3067     DEBUG(S.printSlice(dbgs(), *SUI, ""));
   3068     Promotable &= Rewriter.visit(*SUI);
   3069     ++NumUses;
   3070   }
   3071   for (AllocaSlices::iterator I = B; I != E; ++I) {
   3072     DEBUG(dbgs() << "  rewriting ");
   3073     DEBUG(S.printSlice(dbgs(), I, ""));
   3074     Promotable &= Rewriter.visit(I);
   3075     ++NumUses;
   3076   }
   3077 
   3078   NumAllocaPartitionUses += NumUses;
   3079   MaxUsesPerAllocaPartition =
   3080       std::max<unsigned>(NumUses, MaxUsesPerAllocaPartition);
   3081 
   3082   if (Promotable && !Rewriter.isUsedByRewrittenSpeculatableInstructions()) {
   3083     DEBUG(dbgs() << "  and queuing for promotion\n");
   3084     PromotableAllocas.push_back(NewAI);
   3085   } else if (NewAI != &AI ||
   3086              (Promotable &&
   3087               Rewriter.isUsedByRewrittenSpeculatableInstructions())) {
   3088     // If we can't promote the alloca, iterate on it to check for new
   3089     // refinements exposed by splitting the current alloca. Don't iterate on an
   3090     // alloca which didn't actually change and didn't get promoted.
   3091     //
   3092     // Alternatively, if we could promote the alloca but have speculatable
   3093     // instructions then we will speculate them after finishing our processing
   3094     // of the original alloca. Mark the new one for re-visiting in the next
   3095     // iteration so the speculated operations can be rewritten.
   3096     //
   3097     // FIXME: We should actually track whether the rewriter changed anything.
   3098     Worklist.insert(NewAI);
   3099   }
   3100 
   3101   // Drop any post-promotion work items if promotion didn't happen.
   3102   if (!Promotable) {
   3103     while (PostPromotionWorklist.size() > PPWOldSize)
   3104       PostPromotionWorklist.pop_back();
   3105     while (SpeculatablePHIs.size() > SPOldSize)
   3106       SpeculatablePHIs.pop_back();
   3107     while (SpeculatableSelects.size() > SSOldSize)
   3108       SpeculatableSelects.pop_back();
   3109   }
   3110 
   3111   return true;
   3112 }
   3113 
   3114 namespace {
   3115 struct IsSliceEndLessOrEqualTo {
   3116   uint64_t UpperBound;
   3117 
   3118   IsSliceEndLessOrEqualTo(uint64_t UpperBound) : UpperBound(UpperBound) {}
   3119 
   3120   bool operator()(const AllocaSlices::iterator &I) {
   3121     return I->endOffset() <= UpperBound;
   3122   }
   3123 };
   3124 }
   3125 
   3126 static void
   3127 removeFinishedSplitUses(SmallVectorImpl<AllocaSlices::iterator> &SplitUses,
   3128                         uint64_t &MaxSplitUseEndOffset, uint64_t Offset) {
   3129   if (Offset >= MaxSplitUseEndOffset) {
   3130     SplitUses.clear();
   3131     MaxSplitUseEndOffset = 0;
   3132     return;
   3133   }
   3134 
   3135   size_t SplitUsesOldSize = SplitUses.size();
   3136   SplitUses.erase(std::remove_if(SplitUses.begin(), SplitUses.end(),
   3137                                  IsSliceEndLessOrEqualTo(Offset)),
   3138                   SplitUses.end());
   3139   if (SplitUsesOldSize == SplitUses.size())
   3140     return;
   3141 
   3142   // Recompute the max. While this is linear, so is remove_if.
   3143   MaxSplitUseEndOffset = 0;
   3144   for (SmallVectorImpl<AllocaSlices::iterator>::iterator
   3145            SUI = SplitUses.begin(),
   3146            SUE = SplitUses.end();
   3147        SUI != SUE; ++SUI)
   3148     MaxSplitUseEndOffset = std::max((*SUI)->endOffset(), MaxSplitUseEndOffset);
   3149 }
   3150 
   3151 /// \brief Walks the slices of an alloca and form partitions based on them,
   3152 /// rewriting each of their uses.
   3153 bool SROA::splitAlloca(AllocaInst &AI, AllocaSlices &S) {
   3154   if (S.begin() == S.end())
   3155     return false;
   3156 
   3157   unsigned NumPartitions = 0;
   3158   bool Changed = false;
   3159   SmallVector<AllocaSlices::iterator, 4> SplitUses;
   3160   uint64_t MaxSplitUseEndOffset = 0;
   3161 
   3162   uint64_t BeginOffset = S.begin()->beginOffset();
   3163 
   3164   for (AllocaSlices::iterator SI = S.begin(), SJ = llvm::next(SI), SE = S.end();
   3165        SI != SE; SI = SJ) {
   3166     uint64_t MaxEndOffset = SI->endOffset();
   3167 
   3168     if (!SI->isSplittable()) {
   3169       // When we're forming an unsplittable region, it must always start at the
   3170       // first slice and will extend through its end.
   3171       assert(BeginOffset == SI->beginOffset());
   3172 
   3173       // Form a partition including all of the overlapping slices with this
   3174       // unsplittable slice.
   3175       while (SJ != SE && SJ->beginOffset() < MaxEndOffset) {
   3176         if (!SJ->isSplittable())
   3177           MaxEndOffset = std::max(MaxEndOffset, SJ->endOffset());
   3178         ++SJ;
   3179       }
   3180     } else {
   3181       assert(SI->isSplittable()); // Established above.
   3182 
   3183       // Collect all of the overlapping splittable slices.
   3184       while (SJ != SE && SJ->beginOffset() < MaxEndOffset &&
   3185              SJ->isSplittable()) {
   3186         MaxEndOffset = std::max(MaxEndOffset, SJ->endOffset());
   3187         ++SJ;
   3188       }
   3189 
   3190       // Back up MaxEndOffset and SJ if we ended the span early when
   3191       // encountering an unsplittable slice.
   3192       if (SJ != SE && SJ->beginOffset() < MaxEndOffset) {
   3193         assert(!SJ->isSplittable());
   3194         MaxEndOffset = SJ->beginOffset();
   3195       }
   3196     }
   3197 
   3198     // Check if we have managed to move the end offset forward yet. If so,
   3199     // we'll have to rewrite uses and erase old split uses.
   3200     if (BeginOffset < MaxEndOffset) {
   3201       // Rewrite a sequence of overlapping slices.
   3202       Changed |=
   3203           rewritePartition(AI, S, SI, SJ, BeginOffset, MaxEndOffset, SplitUses);
   3204       ++NumPartitions;
   3205 
   3206       removeFinishedSplitUses(SplitUses, MaxSplitUseEndOffset, MaxEndOffset);
   3207     }
   3208 
   3209     // Accumulate all the splittable slices from the [SI,SJ) region which
   3210     // overlap going forward.
   3211     for (AllocaSlices::iterator SK = SI; SK != SJ; ++SK)
   3212       if (SK->isSplittable() && SK->endOffset() > MaxEndOffset) {
   3213         SplitUses.push_back(SK);
   3214         MaxSplitUseEndOffset = std::max(SK->endOffset(), MaxSplitUseEndOffset);
   3215       }
   3216 
   3217     // If we're already at the end and we have no split uses, we're done.
   3218     if (SJ == SE && SplitUses.empty())
   3219       break;
   3220 
   3221     // If we have no split uses or no gap in offsets, we're ready to move to
   3222     // the next slice.
   3223     if (SplitUses.empty() || (SJ != SE && MaxEndOffset == SJ->beginOffset())) {
   3224       BeginOffset = SJ->beginOffset();
   3225       continue;
   3226     }
   3227 
   3228     // Even if we have split slices, if the next slice is splittable and the
   3229     // split slices reach it, we can simply set up the beginning offset of the
   3230     // next iteration to bridge between them.
   3231     if (SJ != SE && SJ->isSplittable() &&
   3232         MaxSplitUseEndOffset > SJ->beginOffset()) {
   3233       BeginOffset = MaxEndOffset;
   3234       continue;
   3235     }
   3236 
   3237     // Otherwise, we have a tail of split slices. Rewrite them with an empty
   3238     // range of slices.
   3239     uint64_t PostSplitEndOffset =
   3240         SJ == SE ? MaxSplitUseEndOffset : SJ->beginOffset();
   3241 
   3242     Changed |= rewritePartition(AI, S, SJ, SJ, MaxEndOffset, PostSplitEndOffset,
   3243                                 SplitUses);
   3244     ++NumPartitions;
   3245 
   3246     if (SJ == SE)
   3247       break; // Skip the rest, we don't need to do any cleanup.
   3248 
   3249     removeFinishedSplitUses(SplitUses, MaxSplitUseEndOffset,
   3250                             PostSplitEndOffset);
   3251 
   3252     // Now just reset the begin offset for the next iteration.
   3253     BeginOffset = SJ->beginOffset();
   3254   }
   3255 
   3256   NumAllocaPartitions += NumPartitions;
   3257   MaxPartitionsPerAlloca =
   3258       std::max<unsigned>(NumPartitions, MaxPartitionsPerAlloca);
   3259 
   3260   return Changed;
   3261 }
   3262 
   3263 /// \brief Analyze an alloca for SROA.
   3264 ///
   3265 /// This analyzes the alloca to ensure we can reason about it, builds
   3266 /// the slices of the alloca, and then hands it off to be split and
   3267 /// rewritten as needed.
   3268 bool SROA::runOnAlloca(AllocaInst &AI) {
   3269   DEBUG(dbgs() << "SROA alloca: " << AI << "\n");
   3270   ++NumAllocasAnalyzed;
   3271 
   3272   // Special case dead allocas, as they're trivial.
   3273   if (AI.use_empty()) {
   3274     AI.eraseFromParent();
   3275     return true;
   3276   }
   3277 
   3278   // Skip alloca forms that this analysis can't handle.
   3279   if (AI.isArrayAllocation() || !AI.getAllocatedType()->isSized() ||
   3280       DL->getTypeAllocSize(AI.getAllocatedType()) == 0)
   3281     return false;
   3282 
   3283   bool Changed = false;
   3284 
   3285   // First, split any FCA loads and stores touching this alloca to promote
   3286   // better splitting and promotion opportunities.
   3287   AggLoadStoreRewriter AggRewriter(*DL);
   3288   Changed |= AggRewriter.rewrite(AI);
   3289 
   3290   // Build the slices using a recursive instruction-visiting builder.
   3291   AllocaSlices S(*DL, AI);
   3292   DEBUG(S.print(dbgs()));
   3293   if (S.isEscaped())
   3294     return Changed;
   3295 
   3296   // Delete all the dead users of this alloca before splitting and rewriting it.
   3297   for (AllocaSlices::dead_user_iterator DI = S.dead_user_begin(),
   3298                                         DE = S.dead_user_end();
   3299        DI != DE; ++DI) {
   3300     Changed = true;
   3301     (*DI)->replaceAllUsesWith(UndefValue::get((*DI)->getType()));
   3302     DeadInsts.insert(*DI);
   3303   }
   3304   for (AllocaSlices::dead_op_iterator DO = S.dead_op_begin(),
   3305                                       DE = S.dead_op_end();
   3306        DO != DE; ++DO) {
   3307     Value *OldV = **DO;
   3308     // Clobber the use with an undef value.
   3309     **DO = UndefValue::get(OldV->getType());
   3310     if (Instruction *OldI = dyn_cast<Instruction>(OldV))
   3311       if (isInstructionTriviallyDead(OldI)) {
   3312         Changed = true;
   3313         DeadInsts.insert(OldI);
   3314       }
   3315   }
   3316 
   3317   // No slices to split. Leave the dead alloca for a later pass to clean up.
   3318   if (S.begin() == S.end())
   3319     return Changed;
   3320 
   3321   Changed |= splitAlloca(AI, S);
   3322 
   3323   DEBUG(dbgs() << "  Speculating PHIs\n");
   3324   while (!SpeculatablePHIs.empty())
   3325     speculatePHINodeLoads(*SpeculatablePHIs.pop_back_val());
   3326 
   3327   DEBUG(dbgs() << "  Speculating Selects\n");
   3328   while (!SpeculatableSelects.empty())
   3329     speculateSelectInstLoads(*SpeculatableSelects.pop_back_val());
   3330 
   3331   return Changed;
   3332 }
   3333 
   3334 /// \brief Delete the dead instructions accumulated in this run.
   3335 ///
   3336 /// Recursively deletes the dead instructions we've accumulated. This is done
   3337 /// at the very end to maximize locality of the recursive delete and to
   3338 /// minimize the problems of invalidated instruction pointers as such pointers
   3339 /// are used heavily in the intermediate stages of the algorithm.
   3340 ///
   3341 /// We also record the alloca instructions deleted here so that they aren't
   3342 /// subsequently handed to mem2reg to promote.
   3343 void SROA::deleteDeadInstructions(SmallPtrSet<AllocaInst*, 4> &DeletedAllocas) {
   3344   while (!DeadInsts.empty()) {
   3345     Instruction *I = DeadInsts.pop_back_val();
   3346     DEBUG(dbgs() << "Deleting dead instruction: " << *I << "\n");
   3347 
   3348     I->replaceAllUsesWith(UndefValue::get(I->getType()));
   3349 
   3350     for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI)
   3351       if (Instruction *U = dyn_cast<Instruction>(*OI)) {
   3352         // Zero out the operand and see if it becomes trivially dead.
   3353         *OI = 0;
   3354         if (isInstructionTriviallyDead(U))
   3355           DeadInsts.insert(U);
   3356       }
   3357 
   3358     if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
   3359       DeletedAllocas.insert(AI);
   3360 
   3361     ++NumDeleted;
   3362     I->eraseFromParent();
   3363   }
   3364 }
   3365 
   3366 static void enqueueUsersInWorklist(Instruction &I,
   3367                                    SmallVectorImpl<Use *> &UseWorklist,
   3368                                    SmallPtrSet<Use *, 8> &VisitedUses) {
   3369   for (Value::use_iterator UI = I.use_begin(), UE = I.use_end(); UI != UE;
   3370        ++UI)
   3371     if (VisitedUses.insert(&UI.getUse()))
   3372       UseWorklist.push_back(&UI.getUse());
   3373 }
   3374 
   3375 /// \brief Promote the allocas, using the best available technique.
   3376 ///
   3377 /// This attempts to promote whatever allocas have been identified as viable in
   3378 /// the PromotableAllocas list. If that list is empty, there is nothing to do.
   3379 /// If there is a domtree available, we attempt to promote using the full power
   3380 /// of mem2reg. Otherwise, we build and use the AllocaPromoter above which is
   3381 /// based on the SSAUpdater utilities. This function returns whether any
   3382 /// promotion occurred.
   3383 bool SROA::promoteAllocas(Function &F) {
   3384   if (PromotableAllocas.empty())
   3385     return false;
   3386 
   3387   NumPromoted += PromotableAllocas.size();
   3388 
   3389   if (DT && !ForceSSAUpdater) {
   3390     DEBUG(dbgs() << "Promoting allocas with mem2reg...\n");
   3391     PromoteMemToReg(PromotableAllocas, *DT, DL);
   3392     PromotableAllocas.clear();
   3393     return true;
   3394   }
   3395 
   3396   DEBUG(dbgs() << "Promoting allocas with SSAUpdater...\n");
   3397   SSAUpdater SSA;
   3398   DIBuilder DIB(*F.getParent());
   3399   SmallVector<Instruction*, 64> Insts;
   3400 
   3401   // We need a worklist to walk the uses of each alloca.
   3402   SmallVector<Use *, 8> UseWorklist;
   3403   SmallPtrSet<Use *, 8> VisitedUses;
   3404   SmallVector<Instruction *, 32> DeadInsts;
   3405 
   3406   for (unsigned Idx = 0, Size = PromotableAllocas.size(); Idx != Size; ++Idx) {
   3407     AllocaInst *AI = PromotableAllocas[Idx];
   3408     UseWorklist.clear();
   3409     VisitedUses.clear();
   3410 
   3411     enqueueUsersInWorklist(*AI, UseWorklist, VisitedUses);
   3412 
   3413     while (!UseWorklist.empty()) {
   3414       Use *U = UseWorklist.pop_back_val();
   3415       Instruction &I = *cast<Instruction>(U->getUser());
   3416 
   3417       // FIXME: Currently the SSAUpdater infrastructure doesn't reason about
   3418       // lifetime intrinsics and so we strip them (and the bitcasts+GEPs
   3419       // leading to them) here. Eventually it should use them to optimize the
   3420       // scalar values produced.
   3421       if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I)) {
   3422         assert(II->getIntrinsicID() == Intrinsic::lifetime_start ||
   3423                II->getIntrinsicID() == Intrinsic::lifetime_end);
   3424         II->eraseFromParent();
   3425         continue;
   3426       }
   3427 
   3428       // Push the loads and stores we find onto the list. SROA will already
   3429       // have validated that all loads and stores are viable candidates for
   3430       // promotion.
   3431       if (LoadInst *LI = dyn_cast<LoadInst>(&I)) {
   3432         assert(LI->getType() == AI->getAllocatedType());
   3433         Insts.push_back(LI);
   3434         continue;
   3435       }
   3436       if (StoreInst *SI = dyn_cast<StoreInst>(&I)) {
   3437         assert(SI->getValueOperand()->getType() == AI->getAllocatedType());
   3438         Insts.push_back(SI);
   3439         continue;
   3440       }
   3441 
   3442       // For everything else, we know that only no-op bitcasts and GEPs will
   3443       // make it this far, just recurse through them and recall them for later
   3444       // removal.
   3445       DeadInsts.push_back(&I);
   3446       enqueueUsersInWorklist(I, UseWorklist, VisitedUses);
   3447     }
   3448     AllocaPromoter(Insts, SSA, *AI, DIB).run(Insts);
   3449     Insts.clear();
   3450     while (!DeadInsts.empty())
   3451       DeadInsts.pop_back_val()->eraseFromParent();
   3452     AI->eraseFromParent();
   3453   }
   3454 
   3455   PromotableAllocas.clear();
   3456   return true;
   3457 }
   3458 
   3459 namespace {
   3460   /// \brief A predicate to test whether an alloca belongs to a set.
   3461   class IsAllocaInSet {
   3462     typedef SmallPtrSet<AllocaInst *, 4> SetType;
   3463     const SetType &Set;
   3464 
   3465   public:
   3466     typedef AllocaInst *argument_type;
   3467 
   3468     IsAllocaInSet(const SetType &Set) : Set(Set) {}
   3469     bool operator()(AllocaInst *AI) const { return Set.count(AI); }
   3470   };
   3471 }
   3472 
   3473 bool SROA::runOnFunction(Function &F) {
   3474   DEBUG(dbgs() << "SROA function: " << F.getName() << "\n");
   3475   C = &F.getContext();
   3476   DL = getAnalysisIfAvailable<DataLayout>();
   3477   if (!DL) {
   3478     DEBUG(dbgs() << "  Skipping SROA -- no target data!\n");
   3479     return false;
   3480   }
   3481   DT = getAnalysisIfAvailable<DominatorTree>();
   3482 
   3483   BasicBlock &EntryBB = F.getEntryBlock();
   3484   for (BasicBlock::iterator I = EntryBB.begin(), E = llvm::prior(EntryBB.end());
   3485        I != E; ++I)
   3486     if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
   3487       Worklist.insert(AI);
   3488 
   3489   bool Changed = false;
   3490   // A set of deleted alloca instruction pointers which should be removed from
   3491   // the list of promotable allocas.
   3492   SmallPtrSet<AllocaInst *, 4> DeletedAllocas;
   3493 
   3494   do {
   3495     while (!Worklist.empty()) {
   3496       Changed |= runOnAlloca(*Worklist.pop_back_val());
   3497       deleteDeadInstructions(DeletedAllocas);
   3498 
   3499       // Remove the deleted allocas from various lists so that we don't try to
   3500       // continue processing them.
   3501       if (!DeletedAllocas.empty()) {
   3502         Worklist.remove_if(IsAllocaInSet(DeletedAllocas));
   3503         PostPromotionWorklist.remove_if(IsAllocaInSet(DeletedAllocas));
   3504         PromotableAllocas.erase(std::remove_if(PromotableAllocas.begin(),
   3505                                                PromotableAllocas.end(),
   3506                                                IsAllocaInSet(DeletedAllocas)),
   3507                                 PromotableAllocas.end());
   3508         DeletedAllocas.clear();
   3509       }
   3510     }
   3511 
   3512     Changed |= promoteAllocas(F);
   3513 
   3514     Worklist = PostPromotionWorklist;
   3515     PostPromotionWorklist.clear();
   3516   } while (!Worklist.empty());
   3517 
   3518   return Changed;
   3519 }
   3520 
   3521 void SROA::getAnalysisUsage(AnalysisUsage &AU) const {
   3522   if (RequiresDomTree)
   3523     AU.addRequired<DominatorTree>();
   3524   AU.setPreservesCFG();
   3525 }
   3526