Home | History | Annotate | Download | only in Vectorize
      1 //===----- LoadStoreVectorizer.cpp - GPU Load & Store Vectorizer ----------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 //===----------------------------------------------------------------------===//
     11 
     12 #include "llvm/ADT/MapVector.h"
     13 #include "llvm/ADT/PostOrderIterator.h"
     14 #include "llvm/ADT/SetVector.h"
     15 #include "llvm/ADT/Statistic.h"
     16 #include "llvm/ADT/Triple.h"
     17 #include "llvm/Analysis/AliasAnalysis.h"
     18 #include "llvm/Analysis/ScalarEvolution.h"
     19 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
     20 #include "llvm/Analysis/TargetTransformInfo.h"
     21 #include "llvm/Analysis/ValueTracking.h"
     22 #include "llvm/Analysis/VectorUtils.h"
     23 #include "llvm/IR/DataLayout.h"
     24 #include "llvm/IR/Dominators.h"
     25 #include "llvm/IR/IRBuilder.h"
     26 #include "llvm/IR/Instructions.h"
     27 #include "llvm/IR/Module.h"
     28 #include "llvm/IR/Type.h"
     29 #include "llvm/IR/Value.h"
     30 #include "llvm/Support/CommandLine.h"
     31 #include "llvm/Support/Debug.h"
     32 #include "llvm/Support/raw_ostream.h"
     33 #include "llvm/Transforms/Vectorize.h"
     34 
     35 using namespace llvm;
     36 
     37 #define DEBUG_TYPE "load-store-vectorizer"
     38 STATISTIC(NumVectorInstructions, "Number of vector accesses generated");
     39 STATISTIC(NumScalarsVectorized, "Number of scalar accesses vectorized");
     40 
     41 namespace {
     42 
     43 // TODO: Remove this
     44 static const unsigned TargetBaseAlign = 4;
     45 
     46 class Vectorizer {
     47   typedef SmallVector<Value *, 8> ValueList;
     48   typedef MapVector<Value *, ValueList> ValueListMap;
     49 
     50   Function &F;
     51   AliasAnalysis &AA;
     52   DominatorTree &DT;
     53   ScalarEvolution &SE;
     54   TargetTransformInfo &TTI;
     55   const DataLayout &DL;
     56   IRBuilder<> Builder;
     57   ValueListMap StoreRefs;
     58   ValueListMap LoadRefs;
     59 
     60 public:
     61   Vectorizer(Function &F, AliasAnalysis &AA, DominatorTree &DT,
     62              ScalarEvolution &SE, TargetTransformInfo &TTI)
     63       : F(F), AA(AA), DT(DT), SE(SE), TTI(TTI),
     64         DL(F.getParent()->getDataLayout()), Builder(SE.getContext()) {}
     65 
     66   bool run();
     67 
     68 private:
     69   Value *getPointerOperand(Value *I);
     70 
     71   unsigned getPointerAddressSpace(Value *I);
     72 
     73   unsigned getAlignment(LoadInst *LI) const {
     74     unsigned Align = LI->getAlignment();
     75     if (Align != 0)
     76       return Align;
     77 
     78     return DL.getABITypeAlignment(LI->getType());
     79   }
     80 
     81   unsigned getAlignment(StoreInst *SI) const {
     82     unsigned Align = SI->getAlignment();
     83     if (Align != 0)
     84       return Align;
     85 
     86     return DL.getABITypeAlignment(SI->getValueOperand()->getType());
     87   }
     88 
     89   bool isConsecutiveAccess(Value *A, Value *B);
     90 
     91   /// After vectorization, reorder the instructions that I depends on
     92   /// (the instructions defining its operands), to ensure they dominate I.
     93   void reorder(Instruction *I);
     94 
     95   /// Returns the first and the last instructions in Chain.
     96   std::pair<BasicBlock::iterator, BasicBlock::iterator>
     97   getBoundaryInstrs(ArrayRef<Value *> Chain);
     98 
     99   /// Erases the original instructions after vectorizing.
    100   void eraseInstructions(ArrayRef<Value *> Chain);
    101 
    102   /// "Legalize" the vector type that would be produced by combining \p
    103   /// ElementSizeBits elements in \p Chain. Break into two pieces such that the
    104   /// total size of each piece is 1, 2 or a multiple of 4 bytes. \p Chain is
    105   /// expected to have more than 4 elements.
    106   std::pair<ArrayRef<Value *>, ArrayRef<Value *>>
    107   splitOddVectorElts(ArrayRef<Value *> Chain, unsigned ElementSizeBits);
    108 
    109   /// Checks for instructions which may affect the memory accessed
    110   /// in the chain between \p From and \p To. Returns Index, where
    111   /// \p Chain[0, Index) is the largest vectorizable chain prefix.
    112   /// The elements of \p Chain should be all loads or all stores.
    113   unsigned getVectorizablePrefixEndIdx(ArrayRef<Value *> Chain,
    114                                        BasicBlock::iterator From,
    115                                        BasicBlock::iterator To);
    116 
    117   /// Collects load and store instructions to vectorize.
    118   void collectInstructions(BasicBlock *BB);
    119 
    120   /// Processes the collected instructions, the \p Map. The elements of \p Map
    121   /// should be all loads or all stores.
    122   bool vectorizeChains(ValueListMap &Map);
    123 
    124   /// Finds the load/stores to consecutive memory addresses and vectorizes them.
    125   bool vectorizeInstructions(ArrayRef<Value *> Instrs);
    126 
    127   /// Vectorizes the load instructions in Chain.
    128   bool vectorizeLoadChain(ArrayRef<Value *> Chain,
    129                           SmallPtrSet<Value *, 16> *InstructionsProcessed);
    130 
    131   /// Vectorizes the store instructions in Chain.
    132   bool vectorizeStoreChain(ArrayRef<Value *> Chain,
    133                            SmallPtrSet<Value *, 16> *InstructionsProcessed);
    134 
    135   /// Check if this load/store access is misaligned accesses
    136   bool accessIsMisaligned(unsigned SzInBytes, unsigned AddressSpace,
    137                           unsigned Alignment);
    138 };
    139 
    140 class LoadStoreVectorizer : public FunctionPass {
    141 public:
    142   static char ID;
    143 
    144   LoadStoreVectorizer() : FunctionPass(ID) {
    145     initializeLoadStoreVectorizerPass(*PassRegistry::getPassRegistry());
    146   }
    147 
    148   bool runOnFunction(Function &F) override;
    149 
    150   const char *getPassName() const override {
    151     return "GPU Load and Store Vectorizer";
    152   }
    153 
    154   void getAnalysisUsage(AnalysisUsage &AU) const override {
    155     AU.addRequired<AAResultsWrapperPass>();
    156     AU.addRequired<ScalarEvolutionWrapperPass>();
    157     AU.addRequired<DominatorTreeWrapperPass>();
    158     AU.addRequired<TargetTransformInfoWrapperPass>();
    159     AU.setPreservesCFG();
    160   }
    161 };
    162 }
    163 
    164 INITIALIZE_PASS_BEGIN(LoadStoreVectorizer, DEBUG_TYPE,
    165                       "Vectorize load and Store instructions", false, false)
    166 INITIALIZE_PASS_DEPENDENCY(SCEVAAWrapperPass)
    167 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
    168 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
    169 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
    170 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
    171 INITIALIZE_PASS_END(LoadStoreVectorizer, DEBUG_TYPE,
    172                     "Vectorize load and store instructions", false, false)
    173 
    174 char LoadStoreVectorizer::ID = 0;
    175 
    176 Pass *llvm::createLoadStoreVectorizerPass() {
    177   return new LoadStoreVectorizer();
    178 }
    179 
    180 bool LoadStoreVectorizer::runOnFunction(Function &F) {
    181   // Don't vectorize when the attribute NoImplicitFloat is used.
    182   if (skipFunction(F) || F.hasFnAttribute(Attribute::NoImplicitFloat))
    183     return false;
    184 
    185   AliasAnalysis &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
    186   DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
    187   ScalarEvolution &SE = getAnalysis<ScalarEvolutionWrapperPass>().getSE();
    188   TargetTransformInfo &TTI =
    189       getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
    190 
    191   Vectorizer V(F, AA, DT, SE, TTI);
    192   return V.run();
    193 }
    194 
    195 // Vectorizer Implementation
    196 bool Vectorizer::run() {
    197   bool Changed = false;
    198 
    199   // Scan the blocks in the function in post order.
    200   for (BasicBlock *BB : post_order(&F)) {
    201     collectInstructions(BB);
    202     Changed |= vectorizeChains(LoadRefs);
    203     Changed |= vectorizeChains(StoreRefs);
    204   }
    205 
    206   return Changed;
    207 }
    208 
    209 Value *Vectorizer::getPointerOperand(Value *I) {
    210   if (LoadInst *LI = dyn_cast<LoadInst>(I))
    211     return LI->getPointerOperand();
    212   if (StoreInst *SI = dyn_cast<StoreInst>(I))
    213     return SI->getPointerOperand();
    214   return nullptr;
    215 }
    216 
    217 unsigned Vectorizer::getPointerAddressSpace(Value *I) {
    218   if (LoadInst *L = dyn_cast<LoadInst>(I))
    219     return L->getPointerAddressSpace();
    220   if (StoreInst *S = dyn_cast<StoreInst>(I))
    221     return S->getPointerAddressSpace();
    222   return -1;
    223 }
    224 
    225 // FIXME: Merge with llvm::isConsecutiveAccess
    226 bool Vectorizer::isConsecutiveAccess(Value *A, Value *B) {
    227   Value *PtrA = getPointerOperand(A);
    228   Value *PtrB = getPointerOperand(B);
    229   unsigned ASA = getPointerAddressSpace(A);
    230   unsigned ASB = getPointerAddressSpace(B);
    231 
    232   // Check that the address spaces match and that the pointers are valid.
    233   if (!PtrA || !PtrB || (ASA != ASB))
    234     return false;
    235 
    236   // Make sure that A and B are different pointers of the same size type.
    237   unsigned PtrBitWidth = DL.getPointerSizeInBits(ASA);
    238   Type *PtrATy = PtrA->getType()->getPointerElementType();
    239   Type *PtrBTy = PtrB->getType()->getPointerElementType();
    240   if (PtrA == PtrB ||
    241       DL.getTypeStoreSize(PtrATy) != DL.getTypeStoreSize(PtrBTy) ||
    242       DL.getTypeStoreSize(PtrATy->getScalarType()) !=
    243           DL.getTypeStoreSize(PtrBTy->getScalarType()))
    244     return false;
    245 
    246   APInt Size(PtrBitWidth, DL.getTypeStoreSize(PtrATy));
    247 
    248   APInt OffsetA(PtrBitWidth, 0), OffsetB(PtrBitWidth, 0);
    249   PtrA = PtrA->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetA);
    250   PtrB = PtrB->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetB);
    251 
    252   APInt OffsetDelta = OffsetB - OffsetA;
    253 
    254   // Check if they are based on the same pointer. That makes the offsets
    255   // sufficient.
    256   if (PtrA == PtrB)
    257     return OffsetDelta == Size;
    258 
    259   // Compute the necessary base pointer delta to have the necessary final delta
    260   // equal to the size.
    261   APInt BaseDelta = Size - OffsetDelta;
    262 
    263   // Compute the distance with SCEV between the base pointers.
    264   const SCEV *PtrSCEVA = SE.getSCEV(PtrA);
    265   const SCEV *PtrSCEVB = SE.getSCEV(PtrB);
    266   const SCEV *C = SE.getConstant(BaseDelta);
    267   const SCEV *X = SE.getAddExpr(PtrSCEVA, C);
    268   if (X == PtrSCEVB)
    269     return true;
    270 
    271   // Sometimes even this doesn't work, because SCEV can't always see through
    272   // patterns that look like (gep (ext (add (shl X, C1), C2))). Try checking
    273   // things the hard way.
    274 
    275   // Look through GEPs after checking they're the same except for the last
    276   // index.
    277   GetElementPtrInst *GEPA = dyn_cast<GetElementPtrInst>(getPointerOperand(A));
    278   GetElementPtrInst *GEPB = dyn_cast<GetElementPtrInst>(getPointerOperand(B));
    279   if (!GEPA || !GEPB || GEPA->getNumOperands() != GEPB->getNumOperands())
    280     return false;
    281   unsigned FinalIndex = GEPA->getNumOperands() - 1;
    282   for (unsigned i = 0; i < FinalIndex; i++)
    283     if (GEPA->getOperand(i) != GEPB->getOperand(i))
    284       return false;
    285 
    286   Instruction *OpA = dyn_cast<Instruction>(GEPA->getOperand(FinalIndex));
    287   Instruction *OpB = dyn_cast<Instruction>(GEPB->getOperand(FinalIndex));
    288   if (!OpA || !OpB || OpA->getOpcode() != OpB->getOpcode() ||
    289       OpA->getType() != OpB->getType())
    290     return false;
    291 
    292   // Only look through a ZExt/SExt.
    293   if (!isa<SExtInst>(OpA) && !isa<ZExtInst>(OpA))
    294     return false;
    295 
    296   bool Signed = isa<SExtInst>(OpA);
    297 
    298   OpA = dyn_cast<Instruction>(OpA->getOperand(0));
    299   OpB = dyn_cast<Instruction>(OpB->getOperand(0));
    300   if (!OpA || !OpB || OpA->getType() != OpB->getType())
    301     return false;
    302 
    303   // Now we need to prove that adding 1 to OpA won't overflow.
    304   bool Safe = false;
    305   // First attempt: if OpB is an add with NSW/NUW, and OpB is 1 added to OpA,
    306   // we're okay.
    307   if (OpB->getOpcode() == Instruction::Add &&
    308       isa<ConstantInt>(OpB->getOperand(1)) &&
    309       cast<ConstantInt>(OpB->getOperand(1))->getSExtValue() > 0) {
    310     if (Signed)
    311       Safe = cast<BinaryOperator>(OpB)->hasNoSignedWrap();
    312     else
    313       Safe = cast<BinaryOperator>(OpB)->hasNoUnsignedWrap();
    314   }
    315 
    316   unsigned BitWidth = OpA->getType()->getScalarSizeInBits();
    317 
    318   // Second attempt:
    319   // If any bits are known to be zero other than the sign bit in OpA, we can
    320   // add 1 to it while guaranteeing no overflow of any sort.
    321   if (!Safe) {
    322     APInt KnownZero(BitWidth, 0);
    323     APInt KnownOne(BitWidth, 0);
    324     computeKnownBits(OpA, KnownZero, KnownOne, DL, 0, nullptr, OpA, &DT);
    325     KnownZero &= ~APInt::getHighBitsSet(BitWidth, 1);
    326     if (KnownZero != 0)
    327       Safe = true;
    328   }
    329 
    330   if (!Safe)
    331     return false;
    332 
    333   const SCEV *OffsetSCEVA = SE.getSCEV(OpA);
    334   const SCEV *OffsetSCEVB = SE.getSCEV(OpB);
    335   const SCEV *One = SE.getConstant(APInt(BitWidth, 1));
    336   const SCEV *X2 = SE.getAddExpr(OffsetSCEVA, One);
    337   return X2 == OffsetSCEVB;
    338 }
    339 
    340 void Vectorizer::reorder(Instruction *I) {
    341   SmallPtrSet<Instruction *, 16> InstructionsToMove;
    342   SmallVector<Instruction *, 16> Worklist;
    343 
    344   Worklist.push_back(I);
    345   while (!Worklist.empty()) {
    346     Instruction *IW = Worklist.pop_back_val();
    347     int NumOperands = IW->getNumOperands();
    348     for (int i = 0; i < NumOperands; i++) {
    349       Instruction *IM = dyn_cast<Instruction>(IW->getOperand(i));
    350       if (!IM || IM->getOpcode() == Instruction::PHI)
    351         continue;
    352 
    353       if (!DT.dominates(IM, I)) {
    354         InstructionsToMove.insert(IM);
    355         Worklist.push_back(IM);
    356         assert(IM->getParent() == IW->getParent() &&
    357                "Instructions to move should be in the same basic block");
    358       }
    359     }
    360   }
    361 
    362   // All instructions to move should follow I. Start from I, not from begin().
    363   for (auto BBI = I->getIterator(), E = I->getParent()->end(); BBI != E;
    364        ++BBI) {
    365     if (!is_contained(InstructionsToMove, &*BBI))
    366       continue;
    367     Instruction *IM = &*BBI;
    368     --BBI;
    369     IM->removeFromParent();
    370     IM->insertBefore(I);
    371   }
    372 }
    373 
    374 std::pair<BasicBlock::iterator, BasicBlock::iterator>
    375 Vectorizer::getBoundaryInstrs(ArrayRef<Value *> Chain) {
    376   Instruction *C0 = cast<Instruction>(Chain[0]);
    377   BasicBlock::iterator FirstInstr = C0->getIterator();
    378   BasicBlock::iterator LastInstr = C0->getIterator();
    379 
    380   BasicBlock *BB = C0->getParent();
    381   unsigned NumFound = 0;
    382   for (Instruction &I : *BB) {
    383     if (!is_contained(Chain, &I))
    384       continue;
    385 
    386     ++NumFound;
    387     if (NumFound == 1) {
    388       FirstInstr = I.getIterator();
    389     }
    390     if (NumFound == Chain.size()) {
    391       LastInstr = I.getIterator();
    392       break;
    393     }
    394   }
    395 
    396   // Range is [first, last).
    397   return std::make_pair(FirstInstr, ++LastInstr);
    398 }
    399 
    400 void Vectorizer::eraseInstructions(ArrayRef<Value *> Chain) {
    401   SmallVector<Instruction *, 16> Instrs;
    402   for (Value *V : Chain) {
    403     Value *PtrOperand = getPointerOperand(V);
    404     assert(PtrOperand && "Instruction must have a pointer operand.");
    405     Instrs.push_back(cast<Instruction>(V));
    406     if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(PtrOperand))
    407       Instrs.push_back(GEP);
    408   }
    409 
    410   // Erase instructions.
    411   for (Value *V : Instrs) {
    412     Instruction *Instr = cast<Instruction>(V);
    413     if (Instr->use_empty())
    414       Instr->eraseFromParent();
    415   }
    416 }
    417 
    418 std::pair<ArrayRef<Value *>, ArrayRef<Value *>>
    419 Vectorizer::splitOddVectorElts(ArrayRef<Value *> Chain,
    420                                unsigned ElementSizeBits) {
    421   unsigned ElemSizeInBytes = ElementSizeBits / 8;
    422   unsigned SizeInBytes = ElemSizeInBytes * Chain.size();
    423   unsigned NumRight = (SizeInBytes % 4) / ElemSizeInBytes;
    424   unsigned NumLeft = Chain.size() - NumRight;
    425   return std::make_pair(Chain.slice(0, NumLeft), Chain.slice(NumLeft));
    426 }
    427 
    428 unsigned Vectorizer::getVectorizablePrefixEndIdx(ArrayRef<Value *> Chain,
    429                                                  BasicBlock::iterator From,
    430                                                  BasicBlock::iterator To) {
    431   SmallVector<std::pair<Value *, unsigned>, 16> MemoryInstrs;
    432   SmallVector<std::pair<Value *, unsigned>, 16> ChainInstrs;
    433 
    434   unsigned InstrIdx = 0;
    435   for (auto I = From; I != To; ++I, ++InstrIdx) {
    436     if (isa<LoadInst>(I) || isa<StoreInst>(I)) {
    437       if (!is_contained(Chain, &*I))
    438         MemoryInstrs.push_back({&*I, InstrIdx});
    439       else
    440         ChainInstrs.push_back({&*I, InstrIdx});
    441     } else if (I->mayHaveSideEffects()) {
    442       DEBUG(dbgs() << "LSV: Found side-effecting operation: " << *I << '\n');
    443       return 0;
    444     }
    445   }
    446 
    447   assert(Chain.size() == ChainInstrs.size() &&
    448          "All instructions in the Chain must exist in [From, To).");
    449 
    450   unsigned ChainIdx = 0;
    451   for (auto EntryChain : ChainInstrs) {
    452     Value *ChainInstrValue = EntryChain.first;
    453     unsigned ChainInstrIdx = EntryChain.second;
    454     for (auto EntryMem : MemoryInstrs) {
    455       Value *MemInstrValue = EntryMem.first;
    456       unsigned MemInstrIdx = EntryMem.second;
    457       if (isa<LoadInst>(MemInstrValue) && isa<LoadInst>(ChainInstrValue))
    458         continue;
    459 
    460       // We can ignore the alias as long as the load comes before the store,
    461       // because that means we won't be moving the load past the store to
    462       // vectorize it (the vectorized load is inserted at the location of the
    463       // first load in the chain).
    464       if (isa<StoreInst>(MemInstrValue) && isa<LoadInst>(ChainInstrValue) &&
    465           ChainInstrIdx < MemInstrIdx)
    466         continue;
    467 
    468       // Same case, but in reverse.
    469       if (isa<LoadInst>(MemInstrValue) && isa<StoreInst>(ChainInstrValue) &&
    470           ChainInstrIdx > MemInstrIdx)
    471         continue;
    472 
    473       Instruction *M0 = cast<Instruction>(MemInstrValue);
    474       Instruction *M1 = cast<Instruction>(ChainInstrValue);
    475 
    476       if (!AA.isNoAlias(MemoryLocation::get(M0), MemoryLocation::get(M1))) {
    477         DEBUG({
    478           Value *Ptr0 = getPointerOperand(M0);
    479           Value *Ptr1 = getPointerOperand(M1);
    480 
    481           dbgs() << "LSV: Found alias.\n"
    482                     "        Aliasing instruction and pointer:\n"
    483                  << *MemInstrValue << " aliases " << *Ptr0 << '\n'
    484                  << "        Aliased instruction and pointer:\n"
    485                  << *ChainInstrValue << " aliases " << *Ptr1 << '\n';
    486         });
    487 
    488         return ChainIdx;
    489       }
    490     }
    491     ChainIdx++;
    492   }
    493   return Chain.size();
    494 }
    495 
    496 void Vectorizer::collectInstructions(BasicBlock *BB) {
    497   LoadRefs.clear();
    498   StoreRefs.clear();
    499 
    500   for (Instruction &I : *BB) {
    501     if (!I.mayReadOrWriteMemory())
    502       continue;
    503 
    504     if (LoadInst *LI = dyn_cast<LoadInst>(&I)) {
    505       if (!LI->isSimple())
    506         continue;
    507 
    508       Type *Ty = LI->getType();
    509       if (!VectorType::isValidElementType(Ty->getScalarType()))
    510         continue;
    511 
    512       // Skip weird non-byte sizes. They probably aren't worth the effort of
    513       // handling correctly.
    514       unsigned TySize = DL.getTypeSizeInBits(Ty);
    515       if (TySize < 8)
    516         continue;
    517 
    518       Value *Ptr = LI->getPointerOperand();
    519       unsigned AS = Ptr->getType()->getPointerAddressSpace();
    520       unsigned VecRegSize = TTI.getLoadStoreVecRegBitWidth(AS);
    521 
    522       // No point in looking at these if they're too big to vectorize.
    523       if (TySize > VecRegSize / 2)
    524         continue;
    525 
    526       // Make sure all the users of a vector are constant-index extracts.
    527       if (isa<VectorType>(Ty) && !all_of(LI->users(), [LI](const User *U) {
    528             const Instruction *UI = cast<Instruction>(U);
    529             return isa<ExtractElementInst>(UI) &&
    530                    isa<ConstantInt>(UI->getOperand(1));
    531           }))
    532         continue;
    533 
    534       // TODO: Target hook to filter types.
    535 
    536       // Save the load locations.
    537       Value *ObjPtr = GetUnderlyingObject(Ptr, DL);
    538       LoadRefs[ObjPtr].push_back(LI);
    539 
    540     } else if (StoreInst *SI = dyn_cast<StoreInst>(&I)) {
    541       if (!SI->isSimple())
    542         continue;
    543 
    544       Type *Ty = SI->getValueOperand()->getType();
    545       if (!VectorType::isValidElementType(Ty->getScalarType()))
    546         continue;
    547 
    548       // Skip weird non-byte sizes. They probably aren't worth the effort of
    549       // handling correctly.
    550       unsigned TySize = DL.getTypeSizeInBits(Ty);
    551       if (TySize < 8)
    552         continue;
    553 
    554       Value *Ptr = SI->getPointerOperand();
    555       unsigned AS = Ptr->getType()->getPointerAddressSpace();
    556       unsigned VecRegSize = TTI.getLoadStoreVecRegBitWidth(AS);
    557       if (TySize > VecRegSize / 2)
    558         continue;
    559 
    560       if (isa<VectorType>(Ty) && !all_of(SI->users(), [SI](const User *U) {
    561             const Instruction *UI = cast<Instruction>(U);
    562             return isa<ExtractElementInst>(UI) &&
    563                    isa<ConstantInt>(UI->getOperand(1));
    564           }))
    565         continue;
    566 
    567       // Save store location.
    568       Value *ObjPtr = GetUnderlyingObject(Ptr, DL);
    569       StoreRefs[ObjPtr].push_back(SI);
    570     }
    571   }
    572 }
    573 
    574 bool Vectorizer::vectorizeChains(ValueListMap &Map) {
    575   bool Changed = false;
    576 
    577   for (const std::pair<Value *, ValueList> &Chain : Map) {
    578     unsigned Size = Chain.second.size();
    579     if (Size < 2)
    580       continue;
    581 
    582     DEBUG(dbgs() << "LSV: Analyzing a chain of length " << Size << ".\n");
    583 
    584     // Process the stores in chunks of 64.
    585     for (unsigned CI = 0, CE = Size; CI < CE; CI += 64) {
    586       unsigned Len = std::min<unsigned>(CE - CI, 64);
    587       ArrayRef<Value *> Chunk(&Chain.second[CI], Len);
    588       Changed |= vectorizeInstructions(Chunk);
    589     }
    590   }
    591 
    592   return Changed;
    593 }
    594 
    595 bool Vectorizer::vectorizeInstructions(ArrayRef<Value *> Instrs) {
    596   DEBUG(dbgs() << "LSV: Vectorizing " << Instrs.size() << " instructions.\n");
    597   SmallSetVector<int, 16> Heads, Tails;
    598   int ConsecutiveChain[64];
    599 
    600   // Do a quadratic search on all of the given stores and find all of the pairs
    601   // of stores that follow each other.
    602   for (int i = 0, e = Instrs.size(); i < e; ++i) {
    603     ConsecutiveChain[i] = -1;
    604     for (int j = e - 1; j >= 0; --j) {
    605       if (i == j)
    606         continue;
    607 
    608       if (isConsecutiveAccess(Instrs[i], Instrs[j])) {
    609         if (ConsecutiveChain[i] != -1) {
    610           int CurDistance = std::abs(ConsecutiveChain[i] - i);
    611           int NewDistance = std::abs(ConsecutiveChain[i] - j);
    612           if (j < i || NewDistance > CurDistance)
    613             continue; // Should not insert.
    614         }
    615 
    616         Tails.insert(j);
    617         Heads.insert(i);
    618         ConsecutiveChain[i] = j;
    619       }
    620     }
    621   }
    622 
    623   bool Changed = false;
    624   SmallPtrSet<Value *, 16> InstructionsProcessed;
    625 
    626   for (int Head : Heads) {
    627     if (InstructionsProcessed.count(Instrs[Head]))
    628       continue;
    629     bool longerChainExists = false;
    630     for (unsigned TIt = 0; TIt < Tails.size(); TIt++)
    631       if (Head == Tails[TIt] &&
    632           !InstructionsProcessed.count(Instrs[Heads[TIt]])) {
    633         longerChainExists = true;
    634         break;
    635       }
    636     if (longerChainExists)
    637       continue;
    638 
    639     // We found an instr that starts a chain. Now follow the chain and try to
    640     // vectorize it.
    641     SmallVector<Value *, 16> Operands;
    642     int I = Head;
    643     while (I != -1 && (Tails.count(I) || Heads.count(I))) {
    644       if (InstructionsProcessed.count(Instrs[I]))
    645         break;
    646 
    647       Operands.push_back(Instrs[I]);
    648       I = ConsecutiveChain[I];
    649     }
    650 
    651     bool Vectorized = false;
    652     if (isa<LoadInst>(*Operands.begin()))
    653       Vectorized = vectorizeLoadChain(Operands, &InstructionsProcessed);
    654     else
    655       Vectorized = vectorizeStoreChain(Operands, &InstructionsProcessed);
    656 
    657     Changed |= Vectorized;
    658   }
    659 
    660   return Changed;
    661 }
    662 
    663 bool Vectorizer::vectorizeStoreChain(
    664     ArrayRef<Value *> Chain, SmallPtrSet<Value *, 16> *InstructionsProcessed) {
    665   StoreInst *S0 = cast<StoreInst>(Chain[0]);
    666 
    667   // If the vector has an int element, default to int for the whole load.
    668   Type *StoreTy;
    669   for (const auto &V : Chain) {
    670     StoreTy = cast<StoreInst>(V)->getValueOperand()->getType();
    671     if (StoreTy->isIntOrIntVectorTy())
    672       break;
    673 
    674     if (StoreTy->isPtrOrPtrVectorTy()) {
    675       StoreTy = Type::getIntNTy(F.getParent()->getContext(),
    676                                 DL.getTypeSizeInBits(StoreTy));
    677       break;
    678     }
    679   }
    680 
    681   unsigned Sz = DL.getTypeSizeInBits(StoreTy);
    682   unsigned AS = S0->getPointerAddressSpace();
    683   unsigned VecRegSize = TTI.getLoadStoreVecRegBitWidth(AS);
    684   unsigned VF = VecRegSize / Sz;
    685   unsigned ChainSize = Chain.size();
    686 
    687   if (!isPowerOf2_32(Sz) || VF < 2 || ChainSize < 2) {
    688     InstructionsProcessed->insert(Chain.begin(), Chain.end());
    689     return false;
    690   }
    691 
    692   BasicBlock::iterator First, Last;
    693   std::tie(First, Last) = getBoundaryInstrs(Chain);
    694   unsigned StopChain = getVectorizablePrefixEndIdx(Chain, First, Last);
    695   if (StopChain == 0) {
    696     // There exists a side effect instruction, no vectorization possible.
    697     InstructionsProcessed->insert(Chain.begin(), Chain.end());
    698     return false;
    699   }
    700   if (StopChain == 1) {
    701     // Failed after the first instruction. Discard it and try the smaller chain.
    702     InstructionsProcessed->insert(Chain.front());
    703     return false;
    704   }
    705 
    706   // Update Chain to the valid vectorizable subchain.
    707   Chain = Chain.slice(0, StopChain);
    708   ChainSize = Chain.size();
    709 
    710   // Store size should be 1B, 2B or multiple of 4B.
    711   // TODO: Target hook for size constraint?
    712   unsigned SzInBytes = (Sz / 8) * ChainSize;
    713   if (SzInBytes > 2 && SzInBytes % 4 != 0) {
    714     DEBUG(dbgs() << "LSV: Size should be 1B, 2B "
    715                     "or multiple of 4B. Splitting.\n");
    716     if (SzInBytes == 3)
    717       return vectorizeStoreChain(Chain.slice(0, ChainSize - 1),
    718                                  InstructionsProcessed);
    719 
    720     auto Chains = splitOddVectorElts(Chain, Sz);
    721     return vectorizeStoreChain(Chains.first, InstructionsProcessed) |
    722            vectorizeStoreChain(Chains.second, InstructionsProcessed);
    723   }
    724 
    725   VectorType *VecTy;
    726   VectorType *VecStoreTy = dyn_cast<VectorType>(StoreTy);
    727   if (VecStoreTy)
    728     VecTy = VectorType::get(StoreTy->getScalarType(),
    729                             Chain.size() * VecStoreTy->getNumElements());
    730   else
    731     VecTy = VectorType::get(StoreTy, Chain.size());
    732 
    733   // If it's more than the max vector size, break it into two pieces.
    734   // TODO: Target hook to control types to split to.
    735   if (ChainSize > VF) {
    736     DEBUG(dbgs() << "LSV: Vector factor is too big."
    737                     " Creating two separate arrays.\n");
    738     return vectorizeStoreChain(Chain.slice(0, VF), InstructionsProcessed) |
    739            vectorizeStoreChain(Chain.slice(VF), InstructionsProcessed);
    740   }
    741 
    742   DEBUG({
    743     dbgs() << "LSV: Stores to vectorize:\n";
    744     for (Value *V : Chain)
    745       V->dump();
    746   });
    747 
    748   // We won't try again to vectorize the elements of the chain, regardless of
    749   // whether we succeed below.
    750   InstructionsProcessed->insert(Chain.begin(), Chain.end());
    751 
    752   // Check alignment restrictions.
    753   unsigned Alignment = getAlignment(S0);
    754 
    755   // If the store is going to be misaligned, don't vectorize it.
    756   if (accessIsMisaligned(SzInBytes, AS, Alignment)) {
    757     if (S0->getPointerAddressSpace() != 0)
    758       return false;
    759 
    760     // If we're storing to an object on the stack, we control its alignment,
    761     // so we can cheat and change it!
    762     Value *V = GetUnderlyingObject(S0->getPointerOperand(), DL);
    763     if (AllocaInst *AI = dyn_cast_or_null<AllocaInst>(V)) {
    764       AI->setAlignment(TargetBaseAlign);
    765       Alignment = TargetBaseAlign;
    766     } else {
    767       return false;
    768     }
    769   }
    770 
    771   // Set insert point.
    772   Builder.SetInsertPoint(&*Last);
    773 
    774   Value *Vec = UndefValue::get(VecTy);
    775 
    776   if (VecStoreTy) {
    777     unsigned VecWidth = VecStoreTy->getNumElements();
    778     for (unsigned I = 0, E = Chain.size(); I != E; ++I) {
    779       StoreInst *Store = cast<StoreInst>(Chain[I]);
    780       for (unsigned J = 0, NE = VecStoreTy->getNumElements(); J != NE; ++J) {
    781         unsigned NewIdx = J + I * VecWidth;
    782         Value *Extract = Builder.CreateExtractElement(Store->getValueOperand(),
    783                                                       Builder.getInt32(J));
    784         if (Extract->getType() != StoreTy->getScalarType())
    785           Extract = Builder.CreateBitCast(Extract, StoreTy->getScalarType());
    786 
    787         Value *Insert =
    788             Builder.CreateInsertElement(Vec, Extract, Builder.getInt32(NewIdx));
    789         Vec = Insert;
    790       }
    791     }
    792   } else {
    793     for (unsigned I = 0, E = Chain.size(); I != E; ++I) {
    794       StoreInst *Store = cast<StoreInst>(Chain[I]);
    795       Value *Extract = Store->getValueOperand();
    796       if (Extract->getType() != StoreTy->getScalarType())
    797         Extract =
    798             Builder.CreateBitOrPointerCast(Extract, StoreTy->getScalarType());
    799 
    800       Value *Insert =
    801           Builder.CreateInsertElement(Vec, Extract, Builder.getInt32(I));
    802       Vec = Insert;
    803     }
    804   }
    805 
    806   Value *Bitcast =
    807       Builder.CreateBitCast(S0->getPointerOperand(), VecTy->getPointerTo(AS));
    808   StoreInst *SI = cast<StoreInst>(Builder.CreateStore(Vec, Bitcast));
    809   propagateMetadata(SI, Chain);
    810   SI->setAlignment(Alignment);
    811 
    812   eraseInstructions(Chain);
    813   ++NumVectorInstructions;
    814   NumScalarsVectorized += Chain.size();
    815   return true;
    816 }
    817 
    818 bool Vectorizer::vectorizeLoadChain(
    819     ArrayRef<Value *> Chain, SmallPtrSet<Value *, 16> *InstructionsProcessed) {
    820   LoadInst *L0 = cast<LoadInst>(Chain[0]);
    821 
    822   // If the vector has an int element, default to int for the whole load.
    823   Type *LoadTy;
    824   for (const auto &V : Chain) {
    825     LoadTy = cast<LoadInst>(V)->getType();
    826     if (LoadTy->isIntOrIntVectorTy())
    827       break;
    828 
    829     if (LoadTy->isPtrOrPtrVectorTy()) {
    830       LoadTy = Type::getIntNTy(F.getParent()->getContext(),
    831                                DL.getTypeSizeInBits(LoadTy));
    832       break;
    833     }
    834   }
    835 
    836   unsigned Sz = DL.getTypeSizeInBits(LoadTy);
    837   unsigned AS = L0->getPointerAddressSpace();
    838   unsigned VecRegSize = TTI.getLoadStoreVecRegBitWidth(AS);
    839   unsigned VF = VecRegSize / Sz;
    840   unsigned ChainSize = Chain.size();
    841 
    842   if (!isPowerOf2_32(Sz) || VF < 2 || ChainSize < 2) {
    843     InstructionsProcessed->insert(Chain.begin(), Chain.end());
    844     return false;
    845   }
    846 
    847   BasicBlock::iterator First, Last;
    848   std::tie(First, Last) = getBoundaryInstrs(Chain);
    849   unsigned StopChain = getVectorizablePrefixEndIdx(Chain, First, Last);
    850   if (StopChain == 0) {
    851     // There exists a side effect instruction, no vectorization possible.
    852     InstructionsProcessed->insert(Chain.begin(), Chain.end());
    853     return false;
    854   }
    855   if (StopChain == 1) {
    856     // Failed after the first instruction. Discard it and try the smaller chain.
    857     InstructionsProcessed->insert(Chain.front());
    858     return false;
    859   }
    860 
    861   // Update Chain to the valid vectorizable subchain.
    862   Chain = Chain.slice(0, StopChain);
    863   ChainSize = Chain.size();
    864 
    865   // Load size should be 1B, 2B or multiple of 4B.
    866   // TODO: Should size constraint be a target hook?
    867   unsigned SzInBytes = (Sz / 8) * ChainSize;
    868   if (SzInBytes > 2 && SzInBytes % 4 != 0) {
    869     DEBUG(dbgs() << "LSV: Size should be 1B, 2B "
    870                     "or multiple of 4B. Splitting.\n");
    871     if (SzInBytes == 3)
    872       return vectorizeLoadChain(Chain.slice(0, ChainSize - 1),
    873                                 InstructionsProcessed);
    874     auto Chains = splitOddVectorElts(Chain, Sz);
    875     return vectorizeLoadChain(Chains.first, InstructionsProcessed) |
    876            vectorizeLoadChain(Chains.second, InstructionsProcessed);
    877   }
    878 
    879   VectorType *VecTy;
    880   VectorType *VecLoadTy = dyn_cast<VectorType>(LoadTy);
    881   if (VecLoadTy)
    882     VecTy = VectorType::get(LoadTy->getScalarType(),
    883                             Chain.size() * VecLoadTy->getNumElements());
    884   else
    885     VecTy = VectorType::get(LoadTy, Chain.size());
    886 
    887   // If it's more than the max vector size, break it into two pieces.
    888   // TODO: Target hook to control types to split to.
    889   if (ChainSize > VF) {
    890     DEBUG(dbgs() << "LSV: Vector factor is too big. "
    891                     "Creating two separate arrays.\n");
    892     return vectorizeLoadChain(Chain.slice(0, VF), InstructionsProcessed) |
    893            vectorizeLoadChain(Chain.slice(VF), InstructionsProcessed);
    894   }
    895 
    896   // We won't try again to vectorize the elements of the chain, regardless of
    897   // whether we succeed below.
    898   InstructionsProcessed->insert(Chain.begin(), Chain.end());
    899 
    900   // Check alignment restrictions.
    901   unsigned Alignment = getAlignment(L0);
    902 
    903   // If the load is going to be misaligned, don't vectorize it.
    904   if (accessIsMisaligned(SzInBytes, AS, Alignment)) {
    905     if (L0->getPointerAddressSpace() != 0)
    906       return false;
    907 
    908     // If we're loading from an object on the stack, we control its alignment,
    909     // so we can cheat and change it!
    910     Value *V = GetUnderlyingObject(L0->getPointerOperand(), DL);
    911     if (AllocaInst *AI = dyn_cast_or_null<AllocaInst>(V)) {
    912       AI->setAlignment(TargetBaseAlign);
    913       Alignment = TargetBaseAlign;
    914     } else {
    915       return false;
    916     }
    917   }
    918 
    919   DEBUG({
    920     dbgs() << "LSV: Loads to vectorize:\n";
    921     for (Value *V : Chain)
    922       V->dump();
    923   });
    924 
    925   // Set insert point.
    926   Builder.SetInsertPoint(&*First);
    927 
    928   Value *Bitcast =
    929       Builder.CreateBitCast(L0->getPointerOperand(), VecTy->getPointerTo(AS));
    930 
    931   LoadInst *LI = cast<LoadInst>(Builder.CreateLoad(Bitcast));
    932   propagateMetadata(LI, Chain);
    933   LI->setAlignment(Alignment);
    934 
    935   if (VecLoadTy) {
    936     SmallVector<Instruction *, 16> InstrsToErase;
    937     SmallVector<Instruction *, 16> InstrsToReorder;
    938     InstrsToReorder.push_back(cast<Instruction>(Bitcast));
    939 
    940     unsigned VecWidth = VecLoadTy->getNumElements();
    941     for (unsigned I = 0, E = Chain.size(); I != E; ++I) {
    942       for (auto Use : Chain[I]->users()) {
    943         Instruction *UI = cast<Instruction>(Use);
    944         unsigned Idx = cast<ConstantInt>(UI->getOperand(1))->getZExtValue();
    945         unsigned NewIdx = Idx + I * VecWidth;
    946         Value *V = Builder.CreateExtractElement(LI, Builder.getInt32(NewIdx));
    947         Instruction *Extracted = cast<Instruction>(V);
    948         if (Extracted->getType() != UI->getType())
    949           Extracted = cast<Instruction>(
    950               Builder.CreateBitCast(Extracted, UI->getType()));
    951 
    952         // Replace the old instruction.
    953         UI->replaceAllUsesWith(Extracted);
    954         InstrsToErase.push_back(UI);
    955       }
    956     }
    957 
    958     for (Instruction *ModUser : InstrsToReorder)
    959       reorder(ModUser);
    960 
    961     for (auto I : InstrsToErase)
    962       I->eraseFromParent();
    963   } else {
    964     SmallVector<Instruction *, 16> InstrsToReorder;
    965     InstrsToReorder.push_back(cast<Instruction>(Bitcast));
    966 
    967     for (unsigned I = 0, E = Chain.size(); I != E; ++I) {
    968       Value *V = Builder.CreateExtractElement(LI, Builder.getInt32(I));
    969       Instruction *Extracted = cast<Instruction>(V);
    970       Instruction *UI = cast<Instruction>(Chain[I]);
    971       if (Extracted->getType() != UI->getType()) {
    972         Extracted = cast<Instruction>(
    973             Builder.CreateBitOrPointerCast(Extracted, UI->getType()));
    974       }
    975 
    976       // Replace the old instruction.
    977       UI->replaceAllUsesWith(Extracted);
    978     }
    979 
    980     for (Instruction *ModUser : InstrsToReorder)
    981       reorder(ModUser);
    982   }
    983 
    984   eraseInstructions(Chain);
    985 
    986   ++NumVectorInstructions;
    987   NumScalarsVectorized += Chain.size();
    988   return true;
    989 }
    990 
    991 bool Vectorizer::accessIsMisaligned(unsigned SzInBytes, unsigned AddressSpace,
    992                                     unsigned Alignment) {
    993   bool Fast = false;
    994   bool Allows = TTI.allowsMisalignedMemoryAccesses(SzInBytes * 8, AddressSpace,
    995                                                    Alignment, &Fast);
    996   // TODO: Remove TargetBaseAlign
    997   return !(Allows && Fast) && (Alignment % SzInBytes) != 0 &&
    998          (Alignment % TargetBaseAlign) != 0;
    999 }
   1000