1 //===- SLPVectorizer.cpp - A bottom up SLP Vectorizer ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // This pass implements the Bottom Up SLP vectorizer. It detects consecutive 10 // stores that can be put together into vector-stores. Next, it attempts to 11 // construct vectorizable tree using the use-def chains. If a profitable tree 12 // was found, the SLP vectorizer performs vectorization on the tree. 13 // 14 // The pass is inspired by the work described in the paper: 15 // "Loop-Aware SLP in GCC" by Ira Rosen, Dorit Nuzman, Ayal Zaks. 16 // 17 //===----------------------------------------------------------------------===// 18 #include "llvm/Transforms/Vectorize.h" 19 #include "llvm/ADT/MapVector.h" 20 #include "llvm/ADT/PostOrderIterator.h" 21 #include "llvm/ADT/SetVector.h" 22 #include "llvm/Analysis/AliasAnalysis.h" 23 #include "llvm/Analysis/LoopInfo.h" 24 #include "llvm/Analysis/ScalarEvolution.h" 25 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 26 #include "llvm/Analysis/TargetTransformInfo.h" 27 #include "llvm/Analysis/ValueTracking.h" 28 #include "llvm/IR/DataLayout.h" 29 #include "llvm/IR/Dominators.h" 30 #include "llvm/IR/IRBuilder.h" 31 #include "llvm/IR/Instructions.h" 32 #include "llvm/IR/IntrinsicInst.h" 33 #include "llvm/IR/Module.h" 34 #include "llvm/IR/NoFolder.h" 35 #include "llvm/IR/Type.h" 36 #include "llvm/IR/Value.h" 37 #include "llvm/IR/Verifier.h" 38 #include "llvm/Pass.h" 39 #include "llvm/Support/CommandLine.h" 40 #include "llvm/Support/Debug.h" 41 #include "llvm/Support/raw_ostream.h" 42 #include "llvm/Transforms/Utils/VectorUtils.h" 43 #include <algorithm> 44 #include <map> 45 46 using namespace llvm; 47 48 #define SV_NAME "slp-vectorizer" 49 #define DEBUG_TYPE "SLP" 50 51 static cl::opt<int> 52 SLPCostThreshold("slp-threshold", cl::init(0), cl::Hidden, 53 cl::desc("Only vectorize if you gain more than this " 54 "number ")); 55 56 static cl::opt<bool> 57 ShouldVectorizeHor("slp-vectorize-hor", cl::init(false), cl::Hidden, 58 cl::desc("Attempt to vectorize horizontal reductions")); 59 60 static cl::opt<bool> ShouldStartVectorizeHorAtStore( 61 "slp-vectorize-hor-store", cl::init(false), cl::Hidden, 62 cl::desc( 63 "Attempt to vectorize horizontal reductions feeding into a store")); 64 65 namespace { 66 67 static const unsigned MinVecRegSize = 128; 68 69 static const unsigned RecursionMaxDepth = 12; 70 71 /// A helper class for numbering instructions in multiple blocks. 72 /// Numbers start at zero for each basic block. 73 struct BlockNumbering { 74 75 BlockNumbering(BasicBlock *Bb) : BB(Bb), Valid(false) {} 76 77 void numberInstructions() { 78 unsigned Loc = 0; 79 InstrIdx.clear(); 80 InstrVec.clear(); 81 // Number the instructions in the block. 82 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) { 83 InstrIdx[it] = Loc++; 84 InstrVec.push_back(it); 85 assert(InstrVec[InstrIdx[it]] == it && "Invalid allocation"); 86 } 87 Valid = true; 88 } 89 90 int getIndex(Instruction *I) { 91 assert(I->getParent() == BB && "Invalid instruction"); 92 if (!Valid) 93 numberInstructions(); 94 assert(InstrIdx.count(I) && "Unknown instruction"); 95 return InstrIdx[I]; 96 } 97 98 Instruction *getInstruction(unsigned loc) { 99 if (!Valid) 100 numberInstructions(); 101 assert(InstrVec.size() > loc && "Invalid Index"); 102 return InstrVec[loc]; 103 } 104 105 void forget() { Valid = false; } 106 107 private: 108 /// The block we are numbering. 109 BasicBlock *BB; 110 /// Is the block numbered. 111 bool Valid; 112 /// Maps instructions to numbers and back. 113 SmallDenseMap<Instruction *, int> InstrIdx; 114 /// Maps integers to Instructions. 115 SmallVector<Instruction *, 32> InstrVec; 116 }; 117 118 /// \returns the parent basic block if all of the instructions in \p VL 119 /// are in the same block or null otherwise. 120 static BasicBlock *getSameBlock(ArrayRef<Value *> VL) { 121 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 122 if (!I0) 123 return nullptr; 124 BasicBlock *BB = I0->getParent(); 125 for (int i = 1, e = VL.size(); i < e; i++) { 126 Instruction *I = dyn_cast<Instruction>(VL[i]); 127 if (!I) 128 return nullptr; 129 130 if (BB != I->getParent()) 131 return nullptr; 132 } 133 return BB; 134 } 135 136 /// \returns True if all of the values in \p VL are constants. 137 static bool allConstant(ArrayRef<Value *> VL) { 138 for (unsigned i = 0, e = VL.size(); i < e; ++i) 139 if (!isa<Constant>(VL[i])) 140 return false; 141 return true; 142 } 143 144 /// \returns True if all of the values in \p VL are identical. 145 static bool isSplat(ArrayRef<Value *> VL) { 146 for (unsigned i = 1, e = VL.size(); i < e; ++i) 147 if (VL[i] != VL[0]) 148 return false; 149 return true; 150 } 151 152 ///\returns Opcode that can be clubbed with \p Op to create an alternate 153 /// sequence which can later be merged as a ShuffleVector instruction. 154 static unsigned getAltOpcode(unsigned Op) { 155 switch (Op) { 156 case Instruction::FAdd: 157 return Instruction::FSub; 158 case Instruction::FSub: 159 return Instruction::FAdd; 160 case Instruction::Add: 161 return Instruction::Sub; 162 case Instruction::Sub: 163 return Instruction::Add; 164 default: 165 return 0; 166 } 167 } 168 169 ///\returns bool representing if Opcode \p Op can be part 170 /// of an alternate sequence which can later be merged as 171 /// a ShuffleVector instruction. 172 static bool canCombineAsAltInst(unsigned Op) { 173 if (Op == Instruction::FAdd || Op == Instruction::FSub || 174 Op == Instruction::Sub || Op == Instruction::Add) 175 return true; 176 return false; 177 } 178 179 /// \returns ShuffleVector instruction if intructions in \p VL have 180 /// alternate fadd,fsub / fsub,fadd/add,sub/sub,add sequence. 181 /// (i.e. e.g. opcodes of fadd,fsub,fadd,fsub...) 182 static unsigned isAltInst(ArrayRef<Value *> VL) { 183 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 184 unsigned Opcode = I0->getOpcode(); 185 unsigned AltOpcode = getAltOpcode(Opcode); 186 for (int i = 1, e = VL.size(); i < e; i++) { 187 Instruction *I = dyn_cast<Instruction>(VL[i]); 188 if (!I || I->getOpcode() != ((i & 1) ? AltOpcode : Opcode)) 189 return 0; 190 } 191 return Instruction::ShuffleVector; 192 } 193 194 /// \returns The opcode if all of the Instructions in \p VL have the same 195 /// opcode, or zero. 196 static unsigned getSameOpcode(ArrayRef<Value *> VL) { 197 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 198 if (!I0) 199 return 0; 200 unsigned Opcode = I0->getOpcode(); 201 for (int i = 1, e = VL.size(); i < e; i++) { 202 Instruction *I = dyn_cast<Instruction>(VL[i]); 203 if (!I || Opcode != I->getOpcode()) { 204 if (canCombineAsAltInst(Opcode) && i == 1) 205 return isAltInst(VL); 206 return 0; 207 } 208 } 209 return Opcode; 210 } 211 212 /// \returns \p I after propagating metadata from \p VL. 213 static Instruction *propagateMetadata(Instruction *I, ArrayRef<Value *> VL) { 214 Instruction *I0 = cast<Instruction>(VL[0]); 215 SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata; 216 I0->getAllMetadataOtherThanDebugLoc(Metadata); 217 218 for (unsigned i = 0, n = Metadata.size(); i != n; ++i) { 219 unsigned Kind = Metadata[i].first; 220 MDNode *MD = Metadata[i].second; 221 222 for (int i = 1, e = VL.size(); MD && i != e; i++) { 223 Instruction *I = cast<Instruction>(VL[i]); 224 MDNode *IMD = I->getMetadata(Kind); 225 226 switch (Kind) { 227 default: 228 MD = nullptr; // Remove unknown metadata 229 break; 230 case LLVMContext::MD_tbaa: 231 MD = MDNode::getMostGenericTBAA(MD, IMD); 232 break; 233 case LLVMContext::MD_fpmath: 234 MD = MDNode::getMostGenericFPMath(MD, IMD); 235 break; 236 } 237 } 238 I->setMetadata(Kind, MD); 239 } 240 return I; 241 } 242 243 /// \returns The type that all of the values in \p VL have or null if there 244 /// are different types. 245 static Type* getSameType(ArrayRef<Value *> VL) { 246 Type *Ty = VL[0]->getType(); 247 for (int i = 1, e = VL.size(); i < e; i++) 248 if (VL[i]->getType() != Ty) 249 return nullptr; 250 251 return Ty; 252 } 253 254 /// \returns True if the ExtractElement instructions in VL can be vectorized 255 /// to use the original vector. 256 static bool CanReuseExtract(ArrayRef<Value *> VL) { 257 assert(Instruction::ExtractElement == getSameOpcode(VL) && "Invalid opcode"); 258 // Check if all of the extracts come from the same vector and from the 259 // correct offset. 260 Value *VL0 = VL[0]; 261 ExtractElementInst *E0 = cast<ExtractElementInst>(VL0); 262 Value *Vec = E0->getOperand(0); 263 264 // We have to extract from the same vector type. 265 unsigned NElts = Vec->getType()->getVectorNumElements(); 266 267 if (NElts != VL.size()) 268 return false; 269 270 // Check that all of the indices extract from the correct offset. 271 ConstantInt *CI = dyn_cast<ConstantInt>(E0->getOperand(1)); 272 if (!CI || CI->getZExtValue()) 273 return false; 274 275 for (unsigned i = 1, e = VL.size(); i < e; ++i) { 276 ExtractElementInst *E = cast<ExtractElementInst>(VL[i]); 277 ConstantInt *CI = dyn_cast<ConstantInt>(E->getOperand(1)); 278 279 if (!CI || CI->getZExtValue() != i || E->getOperand(0) != Vec) 280 return false; 281 } 282 283 return true; 284 } 285 286 static void reorderInputsAccordingToOpcode(ArrayRef<Value *> VL, 287 SmallVectorImpl<Value *> &Left, 288 SmallVectorImpl<Value *> &Right) { 289 290 SmallVector<Value *, 16> OrigLeft, OrigRight; 291 292 bool AllSameOpcodeLeft = true; 293 bool AllSameOpcodeRight = true; 294 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 295 Instruction *I = cast<Instruction>(VL[i]); 296 Value *V0 = I->getOperand(0); 297 Value *V1 = I->getOperand(1); 298 299 OrigLeft.push_back(V0); 300 OrigRight.push_back(V1); 301 302 Instruction *I0 = dyn_cast<Instruction>(V0); 303 Instruction *I1 = dyn_cast<Instruction>(V1); 304 305 // Check whether all operands on one side have the same opcode. In this case 306 // we want to preserve the original order and not make things worse by 307 // reordering. 308 AllSameOpcodeLeft = I0; 309 AllSameOpcodeRight = I1; 310 311 if (i && AllSameOpcodeLeft) { 312 if(Instruction *P0 = dyn_cast<Instruction>(OrigLeft[i-1])) { 313 if(P0->getOpcode() != I0->getOpcode()) 314 AllSameOpcodeLeft = false; 315 } else 316 AllSameOpcodeLeft = false; 317 } 318 if (i && AllSameOpcodeRight) { 319 if(Instruction *P1 = dyn_cast<Instruction>(OrigRight[i-1])) { 320 if(P1->getOpcode() != I1->getOpcode()) 321 AllSameOpcodeRight = false; 322 } else 323 AllSameOpcodeRight = false; 324 } 325 326 // Sort two opcodes. In the code below we try to preserve the ability to use 327 // broadcast of values instead of individual inserts. 328 // vl1 = load 329 // vl2 = phi 330 // vr1 = load 331 // vr2 = vr2 332 // = vl1 x vr1 333 // = vl2 x vr2 334 // If we just sorted according to opcode we would leave the first line in 335 // tact but we would swap vl2 with vr2 because opcode(phi) > opcode(load). 336 // = vl1 x vr1 337 // = vr2 x vl2 338 // Because vr2 and vr1 are from the same load we loose the opportunity of a 339 // broadcast for the packed right side in the backend: we have [vr1, vl2] 340 // instead of [vr1, vr2=vr1]. 341 if (I0 && I1) { 342 if(!i && I0->getOpcode() > I1->getOpcode()) { 343 Left.push_back(I1); 344 Right.push_back(I0); 345 } else if (i && I0->getOpcode() > I1->getOpcode() && Right[i-1] != I1) { 346 // Try not to destroy a broad cast for no apparent benefit. 347 Left.push_back(I1); 348 Right.push_back(I0); 349 } else if (i && I0->getOpcode() == I1->getOpcode() && Right[i-1] == I0) { 350 // Try preserve broadcasts. 351 Left.push_back(I1); 352 Right.push_back(I0); 353 } else if (i && I0->getOpcode() == I1->getOpcode() && Left[i-1] == I1) { 354 // Try preserve broadcasts. 355 Left.push_back(I1); 356 Right.push_back(I0); 357 } else { 358 Left.push_back(I0); 359 Right.push_back(I1); 360 } 361 continue; 362 } 363 // One opcode, put the instruction on the right. 364 if (I0) { 365 Left.push_back(V1); 366 Right.push_back(I0); 367 continue; 368 } 369 Left.push_back(V0); 370 Right.push_back(V1); 371 } 372 373 bool LeftBroadcast = isSplat(Left); 374 bool RightBroadcast = isSplat(Right); 375 376 // Don't reorder if the operands where good to begin with. 377 if (!(LeftBroadcast || RightBroadcast) && 378 (AllSameOpcodeRight || AllSameOpcodeLeft)) { 379 Left = OrigLeft; 380 Right = OrigRight; 381 } 382 } 383 384 /// Bottom Up SLP Vectorizer. 385 class BoUpSLP { 386 public: 387 typedef SmallVector<Value *, 8> ValueList; 388 typedef SmallVector<Instruction *, 16> InstrList; 389 typedef SmallPtrSet<Value *, 16> ValueSet; 390 typedef SmallVector<StoreInst *, 8> StoreList; 391 392 BoUpSLP(Function *Func, ScalarEvolution *Se, const DataLayout *Dl, 393 TargetTransformInfo *Tti, TargetLibraryInfo *TLi, AliasAnalysis *Aa, 394 LoopInfo *Li, DominatorTree *Dt) 395 : F(Func), SE(Se), DL(Dl), TTI(Tti), TLI(TLi), AA(Aa), LI(Li), DT(Dt), 396 Builder(Se->getContext()) {} 397 398 /// \brief Vectorize the tree that starts with the elements in \p VL. 399 /// Returns the vectorized root. 400 Value *vectorizeTree(); 401 402 /// \returns the vectorization cost of the subtree that starts at \p VL. 403 /// A negative number means that this is profitable. 404 int getTreeCost(); 405 406 /// Construct a vectorizable tree that starts at \p Roots, ignoring users for 407 /// the purpose of scheduling and extraction in the \p UserIgnoreLst. 408 void buildTree(ArrayRef<Value *> Roots, 409 ArrayRef<Value *> UserIgnoreLst = None); 410 411 /// Clear the internal data structures that are created by 'buildTree'. 412 void deleteTree() { 413 VectorizableTree.clear(); 414 ScalarToTreeEntry.clear(); 415 MustGather.clear(); 416 ExternalUses.clear(); 417 MemBarrierIgnoreList.clear(); 418 } 419 420 /// \returns true if the memory operations A and B are consecutive. 421 bool isConsecutiveAccess(Value *A, Value *B); 422 423 /// \brief Perform LICM and CSE on the newly generated gather sequences. 424 void optimizeGatherSequence(); 425 426 private: 427 struct TreeEntry; 428 429 /// \returns the cost of the vectorizable entry. 430 int getEntryCost(TreeEntry *E); 431 432 /// This is the recursive part of buildTree. 433 void buildTree_rec(ArrayRef<Value *> Roots, unsigned Depth); 434 435 /// Vectorize a single entry in the tree. 436 Value *vectorizeTree(TreeEntry *E); 437 438 /// Vectorize a single entry in the tree, starting in \p VL. 439 Value *vectorizeTree(ArrayRef<Value *> VL); 440 441 /// \returns the pointer to the vectorized value if \p VL is already 442 /// vectorized, or NULL. They may happen in cycles. 443 Value *alreadyVectorized(ArrayRef<Value *> VL) const; 444 445 /// \brief Take the pointer operand from the Load/Store instruction. 446 /// \returns NULL if this is not a valid Load/Store instruction. 447 static Value *getPointerOperand(Value *I); 448 449 /// \brief Take the address space operand from the Load/Store instruction. 450 /// \returns -1 if this is not a valid Load/Store instruction. 451 static unsigned getAddressSpaceOperand(Value *I); 452 453 /// \returns the scalarization cost for this type. Scalarization in this 454 /// context means the creation of vectors from a group of scalars. 455 int getGatherCost(Type *Ty); 456 457 /// \returns the scalarization cost for this list of values. Assuming that 458 /// this subtree gets vectorized, we may need to extract the values from the 459 /// roots. This method calculates the cost of extracting the values. 460 int getGatherCost(ArrayRef<Value *> VL); 461 462 /// \returns the AA location that is being access by the instruction. 463 AliasAnalysis::Location getLocation(Instruction *I); 464 465 /// \brief Checks if it is possible to sink an instruction from 466 /// \p Src to \p Dst. 467 /// \returns the pointer to the barrier instruction if we can't sink. 468 Value *getSinkBarrier(Instruction *Src, Instruction *Dst); 469 470 /// \returns the index of the last instruction in the BB from \p VL. 471 int getLastIndex(ArrayRef<Value *> VL); 472 473 /// \returns the Instruction in the bundle \p VL. 474 Instruction *getLastInstruction(ArrayRef<Value *> VL); 475 476 /// \brief Set the Builder insert point to one after the last instruction in 477 /// the bundle 478 void setInsertPointAfterBundle(ArrayRef<Value *> VL); 479 480 /// \returns a vector from a collection of scalars in \p VL. 481 Value *Gather(ArrayRef<Value *> VL, VectorType *Ty); 482 483 /// \returns whether the VectorizableTree is fully vectoriable and will 484 /// be beneficial even the tree height is tiny. 485 bool isFullyVectorizableTinyTree(); 486 487 struct TreeEntry { 488 TreeEntry() : Scalars(), VectorizedValue(nullptr), LastScalarIndex(0), 489 NeedToGather(0) {} 490 491 /// \returns true if the scalars in VL are equal to this entry. 492 bool isSame(ArrayRef<Value *> VL) const { 493 assert(VL.size() == Scalars.size() && "Invalid size"); 494 return std::equal(VL.begin(), VL.end(), Scalars.begin()); 495 } 496 497 /// A vector of scalars. 498 ValueList Scalars; 499 500 /// The Scalars are vectorized into this value. It is initialized to Null. 501 Value *VectorizedValue; 502 503 /// The index in the basic block of the last scalar. 504 int LastScalarIndex; 505 506 /// Do we need to gather this sequence ? 507 bool NeedToGather; 508 }; 509 510 /// Create a new VectorizableTree entry. 511 TreeEntry *newTreeEntry(ArrayRef<Value *> VL, bool Vectorized) { 512 VectorizableTree.push_back(TreeEntry()); 513 int idx = VectorizableTree.size() - 1; 514 TreeEntry *Last = &VectorizableTree[idx]; 515 Last->Scalars.insert(Last->Scalars.begin(), VL.begin(), VL.end()); 516 Last->NeedToGather = !Vectorized; 517 if (Vectorized) { 518 Last->LastScalarIndex = getLastIndex(VL); 519 for (int i = 0, e = VL.size(); i != e; ++i) { 520 assert(!ScalarToTreeEntry.count(VL[i]) && "Scalar already in tree!"); 521 ScalarToTreeEntry[VL[i]] = idx; 522 } 523 } else { 524 Last->LastScalarIndex = 0; 525 MustGather.insert(VL.begin(), VL.end()); 526 } 527 return Last; 528 } 529 530 /// -- Vectorization State -- 531 /// Holds all of the tree entries. 532 std::vector<TreeEntry> VectorizableTree; 533 534 /// Maps a specific scalar to its tree entry. 535 SmallDenseMap<Value*, int> ScalarToTreeEntry; 536 537 /// A list of scalars that we found that we need to keep as scalars. 538 ValueSet MustGather; 539 540 /// This POD struct describes one external user in the vectorized tree. 541 struct ExternalUser { 542 ExternalUser (Value *S, llvm::User *U, int L) : 543 Scalar(S), User(U), Lane(L){}; 544 // Which scalar in our function. 545 Value *Scalar; 546 // Which user that uses the scalar. 547 llvm::User *User; 548 // Which lane does the scalar belong to. 549 int Lane; 550 }; 551 typedef SmallVector<ExternalUser, 16> UserList; 552 553 /// A list of values that need to extracted out of the tree. 554 /// This list holds pairs of (Internal Scalar : External User). 555 UserList ExternalUses; 556 557 /// A list of instructions to ignore while sinking 558 /// memory instructions. This map must be reset between runs of getCost. 559 ValueSet MemBarrierIgnoreList; 560 561 /// Holds all of the instructions that we gathered. 562 SetVector<Instruction *> GatherSeq; 563 /// A list of blocks that we are going to CSE. 564 SetVector<BasicBlock *> CSEBlocks; 565 566 /// Numbers instructions in different blocks. 567 DenseMap<BasicBlock *, BlockNumbering> BlocksNumbers; 568 569 /// \brief Get the corresponding instruction numbering list for a given 570 /// BasicBlock. The list is allocated lazily. 571 BlockNumbering &getBlockNumbering(BasicBlock *BB) { 572 auto I = BlocksNumbers.insert(std::make_pair(BB, BlockNumbering(BB))); 573 return I.first->second; 574 } 575 576 /// List of users to ignore during scheduling and that don't need extracting. 577 ArrayRef<Value *> UserIgnoreList; 578 579 // Analysis and block reference. 580 Function *F; 581 ScalarEvolution *SE; 582 const DataLayout *DL; 583 TargetTransformInfo *TTI; 584 TargetLibraryInfo *TLI; 585 AliasAnalysis *AA; 586 LoopInfo *LI; 587 DominatorTree *DT; 588 /// Instruction builder to construct the vectorized tree. 589 IRBuilder<> Builder; 590 }; 591 592 void BoUpSLP::buildTree(ArrayRef<Value *> Roots, 593 ArrayRef<Value *> UserIgnoreLst) { 594 deleteTree(); 595 UserIgnoreList = UserIgnoreLst; 596 if (!getSameType(Roots)) 597 return; 598 buildTree_rec(Roots, 0); 599 600 // Collect the values that we need to extract from the tree. 601 for (int EIdx = 0, EE = VectorizableTree.size(); EIdx < EE; ++EIdx) { 602 TreeEntry *Entry = &VectorizableTree[EIdx]; 603 604 // For each lane: 605 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 606 Value *Scalar = Entry->Scalars[Lane]; 607 608 // No need to handle users of gathered values. 609 if (Entry->NeedToGather) 610 continue; 611 612 for (User *U : Scalar->users()) { 613 DEBUG(dbgs() << "SLP: Checking user:" << *U << ".\n"); 614 615 // Skip in-tree scalars that become vectors. 616 if (ScalarToTreeEntry.count(U)) { 617 DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << 618 *U << ".\n"); 619 int Idx = ScalarToTreeEntry[U]; (void) Idx; 620 assert(!VectorizableTree[Idx].NeedToGather && "Bad state"); 621 continue; 622 } 623 Instruction *UserInst = dyn_cast<Instruction>(U); 624 if (!UserInst) 625 continue; 626 627 // Ignore users in the user ignore list. 628 if (std::find(UserIgnoreList.begin(), UserIgnoreList.end(), UserInst) != 629 UserIgnoreList.end()) 630 continue; 631 632 DEBUG(dbgs() << "SLP: Need to extract:" << *U << " from lane " << 633 Lane << " from " << *Scalar << ".\n"); 634 ExternalUses.push_back(ExternalUser(Scalar, U, Lane)); 635 } 636 } 637 } 638 } 639 640 641 void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) { 642 bool SameTy = getSameType(VL); (void)SameTy; 643 bool isAltShuffle = false; 644 assert(SameTy && "Invalid types!"); 645 646 if (Depth == RecursionMaxDepth) { 647 DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n"); 648 newTreeEntry(VL, false); 649 return; 650 } 651 652 // Don't handle vectors. 653 if (VL[0]->getType()->isVectorTy()) { 654 DEBUG(dbgs() << "SLP: Gathering due to vector type.\n"); 655 newTreeEntry(VL, false); 656 return; 657 } 658 659 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 660 if (SI->getValueOperand()->getType()->isVectorTy()) { 661 DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n"); 662 newTreeEntry(VL, false); 663 return; 664 } 665 unsigned Opcode = getSameOpcode(VL); 666 667 // Check that this shuffle vector refers to the alternate 668 // sequence of opcodes. 669 if (Opcode == Instruction::ShuffleVector) { 670 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 671 unsigned Op = I0->getOpcode(); 672 if (Op != Instruction::ShuffleVector) 673 isAltShuffle = true; 674 } 675 676 // If all of the operands are identical or constant we have a simple solution. 677 if (allConstant(VL) || isSplat(VL) || !getSameBlock(VL) || !Opcode) { 678 DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O. \n"); 679 newTreeEntry(VL, false); 680 return; 681 } 682 683 // We now know that this is a vector of instructions of the same type from 684 // the same block. 685 686 // Check if this is a duplicate of another entry. 687 if (ScalarToTreeEntry.count(VL[0])) { 688 int Idx = ScalarToTreeEntry[VL[0]]; 689 TreeEntry *E = &VectorizableTree[Idx]; 690 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 691 DEBUG(dbgs() << "SLP: \tChecking bundle: " << *VL[i] << ".\n"); 692 if (E->Scalars[i] != VL[i]) { 693 DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n"); 694 newTreeEntry(VL, false); 695 return; 696 } 697 } 698 DEBUG(dbgs() << "SLP: Perfect diamond merge at " << *VL[0] << ".\n"); 699 return; 700 } 701 702 // Check that none of the instructions in the bundle are already in the tree. 703 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 704 if (ScalarToTreeEntry.count(VL[i])) { 705 DEBUG(dbgs() << "SLP: The instruction (" << *VL[i] << 706 ") is already in tree.\n"); 707 newTreeEntry(VL, false); 708 return; 709 } 710 } 711 712 // If any of the scalars appears in the table OR it is marked as a value that 713 // needs to stat scalar then we need to gather the scalars. 714 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 715 if (ScalarToTreeEntry.count(VL[i]) || MustGather.count(VL[i])) { 716 DEBUG(dbgs() << "SLP: Gathering due to gathered scalar. \n"); 717 newTreeEntry(VL, false); 718 return; 719 } 720 } 721 722 // Check that all of the users of the scalars that we want to vectorize are 723 // schedulable. 724 Instruction *VL0 = cast<Instruction>(VL[0]); 725 int MyLastIndex = getLastIndex(VL); 726 BasicBlock *BB = cast<Instruction>(VL0)->getParent(); 727 728 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 729 Instruction *Scalar = cast<Instruction>(VL[i]); 730 DEBUG(dbgs() << "SLP: Checking users of " << *Scalar << ". \n"); 731 for (User *U : Scalar->users()) { 732 DEBUG(dbgs() << "SLP: \tUser " << *U << ". \n"); 733 Instruction *UI = dyn_cast<Instruction>(U); 734 if (!UI) { 735 DEBUG(dbgs() << "SLP: Gathering due unknown user. \n"); 736 newTreeEntry(VL, false); 737 return; 738 } 739 740 // We don't care if the user is in a different basic block. 741 BasicBlock *UserBlock = UI->getParent(); 742 if (UserBlock != BB) { 743 DEBUG(dbgs() << "SLP: User from a different basic block " 744 << *UI << ". \n"); 745 continue; 746 } 747 748 // If this is a PHINode within this basic block then we can place the 749 // extract wherever we want. 750 if (isa<PHINode>(*UI)) { 751 DEBUG(dbgs() << "SLP: \tWe can schedule PHIs:" << *UI << ". \n"); 752 continue; 753 } 754 755 // Check if this is a safe in-tree user. 756 if (ScalarToTreeEntry.count(UI)) { 757 int Idx = ScalarToTreeEntry[UI]; 758 int VecLocation = VectorizableTree[Idx].LastScalarIndex; 759 if (VecLocation <= MyLastIndex) { 760 DEBUG(dbgs() << "SLP: Gathering due to unschedulable vector. \n"); 761 newTreeEntry(VL, false); 762 return; 763 } 764 DEBUG(dbgs() << "SLP: In-tree user (" << *UI << ") at #" << 765 VecLocation << " vector value (" << *Scalar << ") at #" 766 << MyLastIndex << ".\n"); 767 continue; 768 } 769 770 // Ignore users in the user ignore list. 771 if (std::find(UserIgnoreList.begin(), UserIgnoreList.end(), UI) != 772 UserIgnoreList.end()) 773 continue; 774 775 // Make sure that we can schedule this unknown user. 776 BlockNumbering &BN = getBlockNumbering(BB); 777 int UserIndex = BN.getIndex(UI); 778 if (UserIndex < MyLastIndex) { 779 780 DEBUG(dbgs() << "SLP: Can't schedule extractelement for " 781 << *UI << ". \n"); 782 newTreeEntry(VL, false); 783 return; 784 } 785 } 786 } 787 788 // Check that every instructions appears once in this bundle. 789 for (unsigned i = 0, e = VL.size(); i < e; ++i) 790 for (unsigned j = i+1; j < e; ++j) 791 if (VL[i] == VL[j]) { 792 DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n"); 793 newTreeEntry(VL, false); 794 return; 795 } 796 797 // Check that instructions in this bundle don't reference other instructions. 798 // The runtime of this check is O(N * N-1 * uses(N)) and a typical N is 4. 799 for (unsigned i = 0, e = VL.size(); i < e; ++i) { 800 for (User *U : VL[i]->users()) { 801 for (unsigned j = 0; j < e; ++j) { 802 if (i != j && U == VL[j]) { 803 DEBUG(dbgs() << "SLP: Intra-bundle dependencies!" << *U << ". \n"); 804 newTreeEntry(VL, false); 805 return; 806 } 807 } 808 } 809 } 810 811 DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n"); 812 813 // Check if it is safe to sink the loads or the stores. 814 if (Opcode == Instruction::Load || Opcode == Instruction::Store) { 815 Instruction *Last = getLastInstruction(VL); 816 817 for (unsigned i = 0, e = VL.size(); i < e; ++i) { 818 if (VL[i] == Last) 819 continue; 820 Value *Barrier = getSinkBarrier(cast<Instruction>(VL[i]), Last); 821 if (Barrier) { 822 DEBUG(dbgs() << "SLP: Can't sink " << *VL[i] << "\n down to " << *Last 823 << "\n because of " << *Barrier << ". Gathering.\n"); 824 newTreeEntry(VL, false); 825 return; 826 } 827 } 828 } 829 830 switch (Opcode) { 831 case Instruction::PHI: { 832 PHINode *PH = dyn_cast<PHINode>(VL0); 833 834 // Check for terminator values (e.g. invoke). 835 for (unsigned j = 0; j < VL.size(); ++j) 836 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 837 TerminatorInst *Term = dyn_cast<TerminatorInst>( 838 cast<PHINode>(VL[j])->getIncomingValueForBlock(PH->getIncomingBlock(i))); 839 if (Term) { 840 DEBUG(dbgs() << "SLP: Need to swizzle PHINodes (TerminatorInst use).\n"); 841 newTreeEntry(VL, false); 842 return; 843 } 844 } 845 846 newTreeEntry(VL, true); 847 DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n"); 848 849 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 850 ValueList Operands; 851 // Prepare the operand vector. 852 for (unsigned j = 0; j < VL.size(); ++j) 853 Operands.push_back(cast<PHINode>(VL[j])->getIncomingValueForBlock( 854 PH->getIncomingBlock(i))); 855 856 buildTree_rec(Operands, Depth + 1); 857 } 858 return; 859 } 860 case Instruction::ExtractElement: { 861 bool Reuse = CanReuseExtract(VL); 862 if (Reuse) { 863 DEBUG(dbgs() << "SLP: Reusing extract sequence.\n"); 864 } 865 newTreeEntry(VL, Reuse); 866 return; 867 } 868 case Instruction::Load: { 869 // Check if the loads are consecutive or of we need to swizzle them. 870 for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) { 871 LoadInst *L = cast<LoadInst>(VL[i]); 872 if (!L->isSimple() || !isConsecutiveAccess(VL[i], VL[i + 1])) { 873 newTreeEntry(VL, false); 874 DEBUG(dbgs() << "SLP: Need to swizzle loads.\n"); 875 return; 876 } 877 } 878 newTreeEntry(VL, true); 879 DEBUG(dbgs() << "SLP: added a vector of loads.\n"); 880 return; 881 } 882 case Instruction::ZExt: 883 case Instruction::SExt: 884 case Instruction::FPToUI: 885 case Instruction::FPToSI: 886 case Instruction::FPExt: 887 case Instruction::PtrToInt: 888 case Instruction::IntToPtr: 889 case Instruction::SIToFP: 890 case Instruction::UIToFP: 891 case Instruction::Trunc: 892 case Instruction::FPTrunc: 893 case Instruction::BitCast: { 894 Type *SrcTy = VL0->getOperand(0)->getType(); 895 for (unsigned i = 0; i < VL.size(); ++i) { 896 Type *Ty = cast<Instruction>(VL[i])->getOperand(0)->getType(); 897 if (Ty != SrcTy || Ty->isAggregateType() || Ty->isVectorTy()) { 898 newTreeEntry(VL, false); 899 DEBUG(dbgs() << "SLP: Gathering casts with different src types.\n"); 900 return; 901 } 902 } 903 newTreeEntry(VL, true); 904 DEBUG(dbgs() << "SLP: added a vector of casts.\n"); 905 906 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 907 ValueList Operands; 908 // Prepare the operand vector. 909 for (unsigned j = 0; j < VL.size(); ++j) 910 Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); 911 912 buildTree_rec(Operands, Depth+1); 913 } 914 return; 915 } 916 case Instruction::ICmp: 917 case Instruction::FCmp: { 918 // Check that all of the compares have the same predicate. 919 CmpInst::Predicate P0 = dyn_cast<CmpInst>(VL0)->getPredicate(); 920 Type *ComparedTy = cast<Instruction>(VL[0])->getOperand(0)->getType(); 921 for (unsigned i = 1, e = VL.size(); i < e; ++i) { 922 CmpInst *Cmp = cast<CmpInst>(VL[i]); 923 if (Cmp->getPredicate() != P0 || 924 Cmp->getOperand(0)->getType() != ComparedTy) { 925 newTreeEntry(VL, false); 926 DEBUG(dbgs() << "SLP: Gathering cmp with different predicate.\n"); 927 return; 928 } 929 } 930 931 newTreeEntry(VL, true); 932 DEBUG(dbgs() << "SLP: added a vector of compares.\n"); 933 934 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 935 ValueList Operands; 936 // Prepare the operand vector. 937 for (unsigned j = 0; j < VL.size(); ++j) 938 Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); 939 940 buildTree_rec(Operands, Depth+1); 941 } 942 return; 943 } 944 case Instruction::Select: 945 case Instruction::Add: 946 case Instruction::FAdd: 947 case Instruction::Sub: 948 case Instruction::FSub: 949 case Instruction::Mul: 950 case Instruction::FMul: 951 case Instruction::UDiv: 952 case Instruction::SDiv: 953 case Instruction::FDiv: 954 case Instruction::URem: 955 case Instruction::SRem: 956 case Instruction::FRem: 957 case Instruction::Shl: 958 case Instruction::LShr: 959 case Instruction::AShr: 960 case Instruction::And: 961 case Instruction::Or: 962 case Instruction::Xor: { 963 newTreeEntry(VL, true); 964 DEBUG(dbgs() << "SLP: added a vector of bin op.\n"); 965 966 // Sort operands of the instructions so that each side is more likely to 967 // have the same opcode. 968 if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) { 969 ValueList Left, Right; 970 reorderInputsAccordingToOpcode(VL, Left, Right); 971 BasicBlock *LeftBB = getSameBlock(Left); 972 BasicBlock *RightBB = getSameBlock(Right); 973 // If we have common uses on separate paths in the tree make sure we 974 // process the one with greater common depth first. 975 // We can use block numbering to determine the subtree traversal as 976 // earler user has to come in between the common use and the later user. 977 if (LeftBB && RightBB && LeftBB == RightBB && 978 getLastIndex(Right) > getLastIndex(Left)) { 979 buildTree_rec(Right, Depth + 1); 980 buildTree_rec(Left, Depth + 1); 981 } else { 982 buildTree_rec(Left, Depth + 1); 983 buildTree_rec(Right, Depth + 1); 984 } 985 return; 986 } 987 988 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 989 ValueList Operands; 990 // Prepare the operand vector. 991 for (unsigned j = 0; j < VL.size(); ++j) 992 Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); 993 994 buildTree_rec(Operands, Depth+1); 995 } 996 return; 997 } 998 case Instruction::GetElementPtr: { 999 // We don't combine GEPs with complicated (nested) indexing. 1000 for (unsigned j = 0; j < VL.size(); ++j) { 1001 if (cast<Instruction>(VL[j])->getNumOperands() != 2) { 1002 DEBUG(dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n"); 1003 newTreeEntry(VL, false); 1004 return; 1005 } 1006 } 1007 1008 // We can't combine several GEPs into one vector if they operate on 1009 // different types. 1010 Type *Ty0 = cast<Instruction>(VL0)->getOperand(0)->getType(); 1011 for (unsigned j = 0; j < VL.size(); ++j) { 1012 Type *CurTy = cast<Instruction>(VL[j])->getOperand(0)->getType(); 1013 if (Ty0 != CurTy) { 1014 DEBUG(dbgs() << "SLP: not-vectorizable GEP (different types).\n"); 1015 newTreeEntry(VL, false); 1016 return; 1017 } 1018 } 1019 1020 // We don't combine GEPs with non-constant indexes. 1021 for (unsigned j = 0; j < VL.size(); ++j) { 1022 auto Op = cast<Instruction>(VL[j])->getOperand(1); 1023 if (!isa<ConstantInt>(Op)) { 1024 DEBUG( 1025 dbgs() << "SLP: not-vectorizable GEP (non-constant indexes).\n"); 1026 newTreeEntry(VL, false); 1027 return; 1028 } 1029 } 1030 1031 newTreeEntry(VL, true); 1032 DEBUG(dbgs() << "SLP: added a vector of GEPs.\n"); 1033 for (unsigned i = 0, e = 2; i < e; ++i) { 1034 ValueList Operands; 1035 // Prepare the operand vector. 1036 for (unsigned j = 0; j < VL.size(); ++j) 1037 Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); 1038 1039 buildTree_rec(Operands, Depth + 1); 1040 } 1041 return; 1042 } 1043 case Instruction::Store: { 1044 // Check if the stores are consecutive or of we need to swizzle them. 1045 for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) 1046 if (!isConsecutiveAccess(VL[i], VL[i + 1])) { 1047 newTreeEntry(VL, false); 1048 DEBUG(dbgs() << "SLP: Non-consecutive store.\n"); 1049 return; 1050 } 1051 1052 newTreeEntry(VL, true); 1053 DEBUG(dbgs() << "SLP: added a vector of stores.\n"); 1054 1055 ValueList Operands; 1056 for (unsigned j = 0; j < VL.size(); ++j) 1057 Operands.push_back(cast<Instruction>(VL[j])->getOperand(0)); 1058 1059 // We can ignore these values because we are sinking them down. 1060 MemBarrierIgnoreList.insert(VL.begin(), VL.end()); 1061 buildTree_rec(Operands, Depth + 1); 1062 return; 1063 } 1064 case Instruction::Call: { 1065 // Check if the calls are all to the same vectorizable intrinsic. 1066 CallInst *CI = cast<CallInst>(VL[0]); 1067 // Check if this is an Intrinsic call or something that can be 1068 // represented by an intrinsic call 1069 Intrinsic::ID ID = getIntrinsicIDForCall(CI, TLI); 1070 if (!isTriviallyVectorizable(ID)) { 1071 newTreeEntry(VL, false); 1072 DEBUG(dbgs() << "SLP: Non-vectorizable call.\n"); 1073 return; 1074 } 1075 Function *Int = CI->getCalledFunction(); 1076 Value *A1I = nullptr; 1077 if (hasVectorInstrinsicScalarOpd(ID, 1)) 1078 A1I = CI->getArgOperand(1); 1079 for (unsigned i = 1, e = VL.size(); i != e; ++i) { 1080 CallInst *CI2 = dyn_cast<CallInst>(VL[i]); 1081 if (!CI2 || CI2->getCalledFunction() != Int || 1082 getIntrinsicIDForCall(CI2, TLI) != ID) { 1083 newTreeEntry(VL, false); 1084 DEBUG(dbgs() << "SLP: mismatched calls:" << *CI << "!=" << *VL[i] 1085 << "\n"); 1086 return; 1087 } 1088 // ctlz,cttz and powi are special intrinsics whose second argument 1089 // should be same in order for them to be vectorized. 1090 if (hasVectorInstrinsicScalarOpd(ID, 1)) { 1091 Value *A1J = CI2->getArgOperand(1); 1092 if (A1I != A1J) { 1093 newTreeEntry(VL, false); 1094 DEBUG(dbgs() << "SLP: mismatched arguments in call:" << *CI 1095 << " argument "<< A1I<<"!=" << A1J 1096 << "\n"); 1097 return; 1098 } 1099 } 1100 } 1101 1102 newTreeEntry(VL, true); 1103 for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i) { 1104 ValueList Operands; 1105 // Prepare the operand vector. 1106 for (unsigned j = 0; j < VL.size(); ++j) { 1107 CallInst *CI2 = dyn_cast<CallInst>(VL[j]); 1108 Operands.push_back(CI2->getArgOperand(i)); 1109 } 1110 buildTree_rec(Operands, Depth + 1); 1111 } 1112 return; 1113 } 1114 case Instruction::ShuffleVector: { 1115 // If this is not an alternate sequence of opcode like add-sub 1116 // then do not vectorize this instruction. 1117 if (!isAltShuffle) { 1118 newTreeEntry(VL, false); 1119 DEBUG(dbgs() << "SLP: ShuffleVector are not vectorized.\n"); 1120 return; 1121 } 1122 newTreeEntry(VL, true); 1123 DEBUG(dbgs() << "SLP: added a ShuffleVector op.\n"); 1124 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 1125 ValueList Operands; 1126 // Prepare the operand vector. 1127 for (unsigned j = 0; j < VL.size(); ++j) 1128 Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); 1129 1130 buildTree_rec(Operands, Depth + 1); 1131 } 1132 return; 1133 } 1134 default: 1135 newTreeEntry(VL, false); 1136 DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n"); 1137 return; 1138 } 1139 } 1140 1141 int BoUpSLP::getEntryCost(TreeEntry *E) { 1142 ArrayRef<Value*> VL = E->Scalars; 1143 1144 Type *ScalarTy = VL[0]->getType(); 1145 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 1146 ScalarTy = SI->getValueOperand()->getType(); 1147 VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); 1148 1149 if (E->NeedToGather) { 1150 if (allConstant(VL)) 1151 return 0; 1152 if (isSplat(VL)) { 1153 return TTI->getShuffleCost(TargetTransformInfo::SK_Broadcast, VecTy, 0); 1154 } 1155 return getGatherCost(E->Scalars); 1156 } 1157 unsigned Opcode = getSameOpcode(VL); 1158 assert(Opcode && getSameType(VL) && getSameBlock(VL) && "Invalid VL"); 1159 Instruction *VL0 = cast<Instruction>(VL[0]); 1160 switch (Opcode) { 1161 case Instruction::PHI: { 1162 return 0; 1163 } 1164 case Instruction::ExtractElement: { 1165 if (CanReuseExtract(VL)) { 1166 int DeadCost = 0; 1167 for (unsigned i = 0, e = VL.size(); i < e; ++i) { 1168 ExtractElementInst *E = cast<ExtractElementInst>(VL[i]); 1169 if (E->hasOneUse()) 1170 // Take credit for instruction that will become dead. 1171 DeadCost += 1172 TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, i); 1173 } 1174 return -DeadCost; 1175 } 1176 return getGatherCost(VecTy); 1177 } 1178 case Instruction::ZExt: 1179 case Instruction::SExt: 1180 case Instruction::FPToUI: 1181 case Instruction::FPToSI: 1182 case Instruction::FPExt: 1183 case Instruction::PtrToInt: 1184 case Instruction::IntToPtr: 1185 case Instruction::SIToFP: 1186 case Instruction::UIToFP: 1187 case Instruction::Trunc: 1188 case Instruction::FPTrunc: 1189 case Instruction::BitCast: { 1190 Type *SrcTy = VL0->getOperand(0)->getType(); 1191 1192 // Calculate the cost of this instruction. 1193 int ScalarCost = VL.size() * TTI->getCastInstrCost(VL0->getOpcode(), 1194 VL0->getType(), SrcTy); 1195 1196 VectorType *SrcVecTy = VectorType::get(SrcTy, VL.size()); 1197 int VecCost = TTI->getCastInstrCost(VL0->getOpcode(), VecTy, SrcVecTy); 1198 return VecCost - ScalarCost; 1199 } 1200 case Instruction::FCmp: 1201 case Instruction::ICmp: 1202 case Instruction::Select: 1203 case Instruction::Add: 1204 case Instruction::FAdd: 1205 case Instruction::Sub: 1206 case Instruction::FSub: 1207 case Instruction::Mul: 1208 case Instruction::FMul: 1209 case Instruction::UDiv: 1210 case Instruction::SDiv: 1211 case Instruction::FDiv: 1212 case Instruction::URem: 1213 case Instruction::SRem: 1214 case Instruction::FRem: 1215 case Instruction::Shl: 1216 case Instruction::LShr: 1217 case Instruction::AShr: 1218 case Instruction::And: 1219 case Instruction::Or: 1220 case Instruction::Xor: { 1221 // Calculate the cost of this instruction. 1222 int ScalarCost = 0; 1223 int VecCost = 0; 1224 if (Opcode == Instruction::FCmp || Opcode == Instruction::ICmp || 1225 Opcode == Instruction::Select) { 1226 VectorType *MaskTy = VectorType::get(Builder.getInt1Ty(), VL.size()); 1227 ScalarCost = VecTy->getNumElements() * 1228 TTI->getCmpSelInstrCost(Opcode, ScalarTy, Builder.getInt1Ty()); 1229 VecCost = TTI->getCmpSelInstrCost(Opcode, VecTy, MaskTy); 1230 } else { 1231 // Certain instructions can be cheaper to vectorize if they have a 1232 // constant second vector operand. 1233 TargetTransformInfo::OperandValueKind Op1VK = 1234 TargetTransformInfo::OK_AnyValue; 1235 TargetTransformInfo::OperandValueKind Op2VK = 1236 TargetTransformInfo::OK_UniformConstantValue; 1237 1238 // If all operands are exactly the same ConstantInt then set the 1239 // operand kind to OK_UniformConstantValue. 1240 // If instead not all operands are constants, then set the operand kind 1241 // to OK_AnyValue. If all operands are constants but not the same, 1242 // then set the operand kind to OK_NonUniformConstantValue. 1243 ConstantInt *CInt = nullptr; 1244 for (unsigned i = 0; i < VL.size(); ++i) { 1245 const Instruction *I = cast<Instruction>(VL[i]); 1246 if (!isa<ConstantInt>(I->getOperand(1))) { 1247 Op2VK = TargetTransformInfo::OK_AnyValue; 1248 break; 1249 } 1250 if (i == 0) { 1251 CInt = cast<ConstantInt>(I->getOperand(1)); 1252 continue; 1253 } 1254 if (Op2VK == TargetTransformInfo::OK_UniformConstantValue && 1255 CInt != cast<ConstantInt>(I->getOperand(1))) 1256 Op2VK = TargetTransformInfo::OK_NonUniformConstantValue; 1257 } 1258 1259 ScalarCost = 1260 VecTy->getNumElements() * 1261 TTI->getArithmeticInstrCost(Opcode, ScalarTy, Op1VK, Op2VK); 1262 VecCost = TTI->getArithmeticInstrCost(Opcode, VecTy, Op1VK, Op2VK); 1263 } 1264 return VecCost - ScalarCost; 1265 } 1266 case Instruction::GetElementPtr: { 1267 TargetTransformInfo::OperandValueKind Op1VK = 1268 TargetTransformInfo::OK_AnyValue; 1269 TargetTransformInfo::OperandValueKind Op2VK = 1270 TargetTransformInfo::OK_UniformConstantValue; 1271 1272 int ScalarCost = 1273 VecTy->getNumElements() * 1274 TTI->getArithmeticInstrCost(Instruction::Add, ScalarTy, Op1VK, Op2VK); 1275 int VecCost = 1276 TTI->getArithmeticInstrCost(Instruction::Add, VecTy, Op1VK, Op2VK); 1277 1278 return VecCost - ScalarCost; 1279 } 1280 case Instruction::Load: { 1281 // Cost of wide load - cost of scalar loads. 1282 int ScalarLdCost = VecTy->getNumElements() * 1283 TTI->getMemoryOpCost(Instruction::Load, ScalarTy, 1, 0); 1284 int VecLdCost = TTI->getMemoryOpCost(Instruction::Load, VecTy, 1, 0); 1285 return VecLdCost - ScalarLdCost; 1286 } 1287 case Instruction::Store: { 1288 // We know that we can merge the stores. Calculate the cost. 1289 int ScalarStCost = VecTy->getNumElements() * 1290 TTI->getMemoryOpCost(Instruction::Store, ScalarTy, 1, 0); 1291 int VecStCost = TTI->getMemoryOpCost(Instruction::Store, VecTy, 1, 0); 1292 return VecStCost - ScalarStCost; 1293 } 1294 case Instruction::Call: { 1295 CallInst *CI = cast<CallInst>(VL0); 1296 Intrinsic::ID ID = getIntrinsicIDForCall(CI, TLI); 1297 1298 // Calculate the cost of the scalar and vector calls. 1299 SmallVector<Type*, 4> ScalarTys, VecTys; 1300 for (unsigned op = 0, opc = CI->getNumArgOperands(); op!= opc; ++op) { 1301 ScalarTys.push_back(CI->getArgOperand(op)->getType()); 1302 VecTys.push_back(VectorType::get(CI->getArgOperand(op)->getType(), 1303 VecTy->getNumElements())); 1304 } 1305 1306 int ScalarCallCost = VecTy->getNumElements() * 1307 TTI->getIntrinsicInstrCost(ID, ScalarTy, ScalarTys); 1308 1309 int VecCallCost = TTI->getIntrinsicInstrCost(ID, VecTy, VecTys); 1310 1311 DEBUG(dbgs() << "SLP: Call cost "<< VecCallCost - ScalarCallCost 1312 << " (" << VecCallCost << "-" << ScalarCallCost << ")" 1313 << " for " << *CI << "\n"); 1314 1315 return VecCallCost - ScalarCallCost; 1316 } 1317 case Instruction::ShuffleVector: { 1318 TargetTransformInfo::OperandValueKind Op1VK = 1319 TargetTransformInfo::OK_AnyValue; 1320 TargetTransformInfo::OperandValueKind Op2VK = 1321 TargetTransformInfo::OK_AnyValue; 1322 int ScalarCost = 0; 1323 int VecCost = 0; 1324 for (unsigned i = 0; i < VL.size(); ++i) { 1325 Instruction *I = cast<Instruction>(VL[i]); 1326 if (!I) 1327 break; 1328 ScalarCost += 1329 TTI->getArithmeticInstrCost(I->getOpcode(), ScalarTy, Op1VK, Op2VK); 1330 } 1331 // VecCost is equal to sum of the cost of creating 2 vectors 1332 // and the cost of creating shuffle. 1333 Instruction *I0 = cast<Instruction>(VL[0]); 1334 VecCost = 1335 TTI->getArithmeticInstrCost(I0->getOpcode(), VecTy, Op1VK, Op2VK); 1336 Instruction *I1 = cast<Instruction>(VL[1]); 1337 VecCost += 1338 TTI->getArithmeticInstrCost(I1->getOpcode(), VecTy, Op1VK, Op2VK); 1339 VecCost += 1340 TTI->getShuffleCost(TargetTransformInfo::SK_Alternate, VecTy, 0); 1341 return VecCost - ScalarCost; 1342 } 1343 default: 1344 llvm_unreachable("Unknown instruction"); 1345 } 1346 } 1347 1348 bool BoUpSLP::isFullyVectorizableTinyTree() { 1349 DEBUG(dbgs() << "SLP: Check whether the tree with height " << 1350 VectorizableTree.size() << " is fully vectorizable .\n"); 1351 1352 // We only handle trees of height 2. 1353 if (VectorizableTree.size() != 2) 1354 return false; 1355 1356 // Handle splat stores. 1357 if (!VectorizableTree[0].NeedToGather && isSplat(VectorizableTree[1].Scalars)) 1358 return true; 1359 1360 // Gathering cost would be too much for tiny trees. 1361 if (VectorizableTree[0].NeedToGather || VectorizableTree[1].NeedToGather) 1362 return false; 1363 1364 return true; 1365 } 1366 1367 int BoUpSLP::getTreeCost() { 1368 int Cost = 0; 1369 DEBUG(dbgs() << "SLP: Calculating cost for tree of size " << 1370 VectorizableTree.size() << ".\n"); 1371 1372 // We only vectorize tiny trees if it is fully vectorizable. 1373 if (VectorizableTree.size() < 3 && !isFullyVectorizableTinyTree()) { 1374 if (!VectorizableTree.size()) { 1375 assert(!ExternalUses.size() && "We should not have any external users"); 1376 } 1377 return INT_MAX; 1378 } 1379 1380 unsigned BundleWidth = VectorizableTree[0].Scalars.size(); 1381 1382 for (unsigned i = 0, e = VectorizableTree.size(); i != e; ++i) { 1383 int C = getEntryCost(&VectorizableTree[i]); 1384 DEBUG(dbgs() << "SLP: Adding cost " << C << " for bundle that starts with " 1385 << *VectorizableTree[i].Scalars[0] << " .\n"); 1386 Cost += C; 1387 } 1388 1389 SmallSet<Value *, 16> ExtractCostCalculated; 1390 int ExtractCost = 0; 1391 for (UserList::iterator I = ExternalUses.begin(), E = ExternalUses.end(); 1392 I != E; ++I) { 1393 // We only add extract cost once for the same scalar. 1394 if (!ExtractCostCalculated.insert(I->Scalar)) 1395 continue; 1396 1397 VectorType *VecTy = VectorType::get(I->Scalar->getType(), BundleWidth); 1398 ExtractCost += TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, 1399 I->Lane); 1400 } 1401 1402 DEBUG(dbgs() << "SLP: Total Cost " << Cost + ExtractCost<< ".\n"); 1403 return Cost + ExtractCost; 1404 } 1405 1406 int BoUpSLP::getGatherCost(Type *Ty) { 1407 int Cost = 0; 1408 for (unsigned i = 0, e = cast<VectorType>(Ty)->getNumElements(); i < e; ++i) 1409 Cost += TTI->getVectorInstrCost(Instruction::InsertElement, Ty, i); 1410 return Cost; 1411 } 1412 1413 int BoUpSLP::getGatherCost(ArrayRef<Value *> VL) { 1414 // Find the type of the operands in VL. 1415 Type *ScalarTy = VL[0]->getType(); 1416 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 1417 ScalarTy = SI->getValueOperand()->getType(); 1418 VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); 1419 // Find the cost of inserting/extracting values from the vector. 1420 return getGatherCost(VecTy); 1421 } 1422 1423 AliasAnalysis::Location BoUpSLP::getLocation(Instruction *I) { 1424 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 1425 return AA->getLocation(SI); 1426 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 1427 return AA->getLocation(LI); 1428 return AliasAnalysis::Location(); 1429 } 1430 1431 Value *BoUpSLP::getPointerOperand(Value *I) { 1432 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 1433 return LI->getPointerOperand(); 1434 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 1435 return SI->getPointerOperand(); 1436 return nullptr; 1437 } 1438 1439 unsigned BoUpSLP::getAddressSpaceOperand(Value *I) { 1440 if (LoadInst *L = dyn_cast<LoadInst>(I)) 1441 return L->getPointerAddressSpace(); 1442 if (StoreInst *S = dyn_cast<StoreInst>(I)) 1443 return S->getPointerAddressSpace(); 1444 return -1; 1445 } 1446 1447 bool BoUpSLP::isConsecutiveAccess(Value *A, Value *B) { 1448 Value *PtrA = getPointerOperand(A); 1449 Value *PtrB = getPointerOperand(B); 1450 unsigned ASA = getAddressSpaceOperand(A); 1451 unsigned ASB = getAddressSpaceOperand(B); 1452 1453 // Check that the address spaces match and that the pointers are valid. 1454 if (!PtrA || !PtrB || (ASA != ASB)) 1455 return false; 1456 1457 // Make sure that A and B are different pointers of the same type. 1458 if (PtrA == PtrB || PtrA->getType() != PtrB->getType()) 1459 return false; 1460 1461 unsigned PtrBitWidth = DL->getPointerSizeInBits(ASA); 1462 Type *Ty = cast<PointerType>(PtrA->getType())->getElementType(); 1463 APInt Size(PtrBitWidth, DL->getTypeStoreSize(Ty)); 1464 1465 APInt OffsetA(PtrBitWidth, 0), OffsetB(PtrBitWidth, 0); 1466 PtrA = PtrA->stripAndAccumulateInBoundsConstantOffsets(*DL, OffsetA); 1467 PtrB = PtrB->stripAndAccumulateInBoundsConstantOffsets(*DL, OffsetB); 1468 1469 APInt OffsetDelta = OffsetB - OffsetA; 1470 1471 // Check if they are based on the same pointer. That makes the offsets 1472 // sufficient. 1473 if (PtrA == PtrB) 1474 return OffsetDelta == Size; 1475 1476 // Compute the necessary base pointer delta to have the necessary final delta 1477 // equal to the size. 1478 APInt BaseDelta = Size - OffsetDelta; 1479 1480 // Otherwise compute the distance with SCEV between the base pointers. 1481 const SCEV *PtrSCEVA = SE->getSCEV(PtrA); 1482 const SCEV *PtrSCEVB = SE->getSCEV(PtrB); 1483 const SCEV *C = SE->getConstant(BaseDelta); 1484 const SCEV *X = SE->getAddExpr(PtrSCEVA, C); 1485 return X == PtrSCEVB; 1486 } 1487 1488 Value *BoUpSLP::getSinkBarrier(Instruction *Src, Instruction *Dst) { 1489 assert(Src->getParent() == Dst->getParent() && "Not the same BB"); 1490 BasicBlock::iterator I = Src, E = Dst; 1491 /// Scan all of the instruction from SRC to DST and check if 1492 /// the source may alias. 1493 for (++I; I != E; ++I) { 1494 // Ignore store instructions that are marked as 'ignore'. 1495 if (MemBarrierIgnoreList.count(I)) 1496 continue; 1497 if (Src->mayWriteToMemory()) /* Write */ { 1498 if (!I->mayReadOrWriteMemory()) 1499 continue; 1500 } else /* Read */ { 1501 if (!I->mayWriteToMemory()) 1502 continue; 1503 } 1504 AliasAnalysis::Location A = getLocation(&*I); 1505 AliasAnalysis::Location B = getLocation(Src); 1506 1507 if (!A.Ptr || !B.Ptr || AA->alias(A, B)) 1508 return I; 1509 } 1510 return nullptr; 1511 } 1512 1513 int BoUpSLP::getLastIndex(ArrayRef<Value *> VL) { 1514 BasicBlock *BB = cast<Instruction>(VL[0])->getParent(); 1515 assert(BB == getSameBlock(VL) && "Invalid block"); 1516 BlockNumbering &BN = getBlockNumbering(BB); 1517 1518 int MaxIdx = BN.getIndex(BB->getFirstNonPHI()); 1519 for (unsigned i = 0, e = VL.size(); i < e; ++i) 1520 MaxIdx = std::max(MaxIdx, BN.getIndex(cast<Instruction>(VL[i]))); 1521 return MaxIdx; 1522 } 1523 1524 Instruction *BoUpSLP::getLastInstruction(ArrayRef<Value *> VL) { 1525 BasicBlock *BB = cast<Instruction>(VL[0])->getParent(); 1526 assert(BB == getSameBlock(VL) && "Invalid block"); 1527 BlockNumbering &BN = getBlockNumbering(BB); 1528 1529 int MaxIdx = BN.getIndex(cast<Instruction>(VL[0])); 1530 for (unsigned i = 1, e = VL.size(); i < e; ++i) 1531 MaxIdx = std::max(MaxIdx, BN.getIndex(cast<Instruction>(VL[i]))); 1532 Instruction *I = BN.getInstruction(MaxIdx); 1533 assert(I && "bad location"); 1534 return I; 1535 } 1536 1537 void BoUpSLP::setInsertPointAfterBundle(ArrayRef<Value *> VL) { 1538 Instruction *VL0 = cast<Instruction>(VL[0]); 1539 Instruction *LastInst = getLastInstruction(VL); 1540 BasicBlock::iterator NextInst = LastInst; 1541 ++NextInst; 1542 Builder.SetInsertPoint(VL0->getParent(), NextInst); 1543 Builder.SetCurrentDebugLocation(VL0->getDebugLoc()); 1544 } 1545 1546 Value *BoUpSLP::Gather(ArrayRef<Value *> VL, VectorType *Ty) { 1547 Value *Vec = UndefValue::get(Ty); 1548 // Generate the 'InsertElement' instruction. 1549 for (unsigned i = 0; i < Ty->getNumElements(); ++i) { 1550 Vec = Builder.CreateInsertElement(Vec, VL[i], Builder.getInt32(i)); 1551 if (Instruction *Insrt = dyn_cast<Instruction>(Vec)) { 1552 GatherSeq.insert(Insrt); 1553 CSEBlocks.insert(Insrt->getParent()); 1554 1555 // Add to our 'need-to-extract' list. 1556 if (ScalarToTreeEntry.count(VL[i])) { 1557 int Idx = ScalarToTreeEntry[VL[i]]; 1558 TreeEntry *E = &VectorizableTree[Idx]; 1559 // Find which lane we need to extract. 1560 int FoundLane = -1; 1561 for (unsigned Lane = 0, LE = VL.size(); Lane != LE; ++Lane) { 1562 // Is this the lane of the scalar that we are looking for ? 1563 if (E->Scalars[Lane] == VL[i]) { 1564 FoundLane = Lane; 1565 break; 1566 } 1567 } 1568 assert(FoundLane >= 0 && "Could not find the correct lane"); 1569 ExternalUses.push_back(ExternalUser(VL[i], Insrt, FoundLane)); 1570 } 1571 } 1572 } 1573 1574 return Vec; 1575 } 1576 1577 Value *BoUpSLP::alreadyVectorized(ArrayRef<Value *> VL) const { 1578 SmallDenseMap<Value*, int>::const_iterator Entry 1579 = ScalarToTreeEntry.find(VL[0]); 1580 if (Entry != ScalarToTreeEntry.end()) { 1581 int Idx = Entry->second; 1582 const TreeEntry *En = &VectorizableTree[Idx]; 1583 if (En->isSame(VL) && En->VectorizedValue) 1584 return En->VectorizedValue; 1585 } 1586 return nullptr; 1587 } 1588 1589 Value *BoUpSLP::vectorizeTree(ArrayRef<Value *> VL) { 1590 if (ScalarToTreeEntry.count(VL[0])) { 1591 int Idx = ScalarToTreeEntry[VL[0]]; 1592 TreeEntry *E = &VectorizableTree[Idx]; 1593 if (E->isSame(VL)) 1594 return vectorizeTree(E); 1595 } 1596 1597 Type *ScalarTy = VL[0]->getType(); 1598 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 1599 ScalarTy = SI->getValueOperand()->getType(); 1600 VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); 1601 1602 return Gather(VL, VecTy); 1603 } 1604 1605 Value *BoUpSLP::vectorizeTree(TreeEntry *E) { 1606 IRBuilder<>::InsertPointGuard Guard(Builder); 1607 1608 if (E->VectorizedValue) { 1609 DEBUG(dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n"); 1610 return E->VectorizedValue; 1611 } 1612 1613 Instruction *VL0 = cast<Instruction>(E->Scalars[0]); 1614 Type *ScalarTy = VL0->getType(); 1615 if (StoreInst *SI = dyn_cast<StoreInst>(VL0)) 1616 ScalarTy = SI->getValueOperand()->getType(); 1617 VectorType *VecTy = VectorType::get(ScalarTy, E->Scalars.size()); 1618 1619 if (E->NeedToGather) { 1620 setInsertPointAfterBundle(E->Scalars); 1621 return Gather(E->Scalars, VecTy); 1622 } 1623 unsigned Opcode = getSameOpcode(E->Scalars); 1624 1625 switch (Opcode) { 1626 case Instruction::PHI: { 1627 PHINode *PH = dyn_cast<PHINode>(VL0); 1628 Builder.SetInsertPoint(PH->getParent()->getFirstNonPHI()); 1629 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 1630 PHINode *NewPhi = Builder.CreatePHI(VecTy, PH->getNumIncomingValues()); 1631 E->VectorizedValue = NewPhi; 1632 1633 // PHINodes may have multiple entries from the same block. We want to 1634 // visit every block once. 1635 SmallSet<BasicBlock*, 4> VisitedBBs; 1636 1637 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 1638 ValueList Operands; 1639 BasicBlock *IBB = PH->getIncomingBlock(i); 1640 1641 if (!VisitedBBs.insert(IBB)) { 1642 NewPhi->addIncoming(NewPhi->getIncomingValueForBlock(IBB), IBB); 1643 continue; 1644 } 1645 1646 // Prepare the operand vector. 1647 for (unsigned j = 0; j < E->Scalars.size(); ++j) 1648 Operands.push_back(cast<PHINode>(E->Scalars[j])-> 1649 getIncomingValueForBlock(IBB)); 1650 1651 Builder.SetInsertPoint(IBB->getTerminator()); 1652 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 1653 Value *Vec = vectorizeTree(Operands); 1654 NewPhi->addIncoming(Vec, IBB); 1655 } 1656 1657 assert(NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() && 1658 "Invalid number of incoming values"); 1659 return NewPhi; 1660 } 1661 1662 case Instruction::ExtractElement: { 1663 if (CanReuseExtract(E->Scalars)) { 1664 Value *V = VL0->getOperand(0); 1665 E->VectorizedValue = V; 1666 return V; 1667 } 1668 return Gather(E->Scalars, VecTy); 1669 } 1670 case Instruction::ZExt: 1671 case Instruction::SExt: 1672 case Instruction::FPToUI: 1673 case Instruction::FPToSI: 1674 case Instruction::FPExt: 1675 case Instruction::PtrToInt: 1676 case Instruction::IntToPtr: 1677 case Instruction::SIToFP: 1678 case Instruction::UIToFP: 1679 case Instruction::Trunc: 1680 case Instruction::FPTrunc: 1681 case Instruction::BitCast: { 1682 ValueList INVL; 1683 for (int i = 0, e = E->Scalars.size(); i < e; ++i) 1684 INVL.push_back(cast<Instruction>(E->Scalars[i])->getOperand(0)); 1685 1686 setInsertPointAfterBundle(E->Scalars); 1687 1688 Value *InVec = vectorizeTree(INVL); 1689 1690 if (Value *V = alreadyVectorized(E->Scalars)) 1691 return V; 1692 1693 CastInst *CI = dyn_cast<CastInst>(VL0); 1694 Value *V = Builder.CreateCast(CI->getOpcode(), InVec, VecTy); 1695 E->VectorizedValue = V; 1696 return V; 1697 } 1698 case Instruction::FCmp: 1699 case Instruction::ICmp: { 1700 ValueList LHSV, RHSV; 1701 for (int i = 0, e = E->Scalars.size(); i < e; ++i) { 1702 LHSV.push_back(cast<Instruction>(E->Scalars[i])->getOperand(0)); 1703 RHSV.push_back(cast<Instruction>(E->Scalars[i])->getOperand(1)); 1704 } 1705 1706 setInsertPointAfterBundle(E->Scalars); 1707 1708 Value *L = vectorizeTree(LHSV); 1709 Value *R = vectorizeTree(RHSV); 1710 1711 if (Value *V = alreadyVectorized(E->Scalars)) 1712 return V; 1713 1714 CmpInst::Predicate P0 = dyn_cast<CmpInst>(VL0)->getPredicate(); 1715 Value *V; 1716 if (Opcode == Instruction::FCmp) 1717 V = Builder.CreateFCmp(P0, L, R); 1718 else 1719 V = Builder.CreateICmp(P0, L, R); 1720 1721 E->VectorizedValue = V; 1722 return V; 1723 } 1724 case Instruction::Select: { 1725 ValueList TrueVec, FalseVec, CondVec; 1726 for (int i = 0, e = E->Scalars.size(); i < e; ++i) { 1727 CondVec.push_back(cast<Instruction>(E->Scalars[i])->getOperand(0)); 1728 TrueVec.push_back(cast<Instruction>(E->Scalars[i])->getOperand(1)); 1729 FalseVec.push_back(cast<Instruction>(E->Scalars[i])->getOperand(2)); 1730 } 1731 1732 setInsertPointAfterBundle(E->Scalars); 1733 1734 Value *Cond = vectorizeTree(CondVec); 1735 Value *True = vectorizeTree(TrueVec); 1736 Value *False = vectorizeTree(FalseVec); 1737 1738 if (Value *V = alreadyVectorized(E->Scalars)) 1739 return V; 1740 1741 Value *V = Builder.CreateSelect(Cond, True, False); 1742 E->VectorizedValue = V; 1743 return V; 1744 } 1745 case Instruction::Add: 1746 case Instruction::FAdd: 1747 case Instruction::Sub: 1748 case Instruction::FSub: 1749 case Instruction::Mul: 1750 case Instruction::FMul: 1751 case Instruction::UDiv: 1752 case Instruction::SDiv: 1753 case Instruction::FDiv: 1754 case Instruction::URem: 1755 case Instruction::SRem: 1756 case Instruction::FRem: 1757 case Instruction::Shl: 1758 case Instruction::LShr: 1759 case Instruction::AShr: 1760 case Instruction::And: 1761 case Instruction::Or: 1762 case Instruction::Xor: { 1763 ValueList LHSVL, RHSVL; 1764 if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) 1765 reorderInputsAccordingToOpcode(E->Scalars, LHSVL, RHSVL); 1766 else 1767 for (int i = 0, e = E->Scalars.size(); i < e; ++i) { 1768 LHSVL.push_back(cast<Instruction>(E->Scalars[i])->getOperand(0)); 1769 RHSVL.push_back(cast<Instruction>(E->Scalars[i])->getOperand(1)); 1770 } 1771 1772 setInsertPointAfterBundle(E->Scalars); 1773 1774 Value *LHS = vectorizeTree(LHSVL); 1775 Value *RHS = vectorizeTree(RHSVL); 1776 1777 if (LHS == RHS && isa<Instruction>(LHS)) { 1778 assert((VL0->getOperand(0) == VL0->getOperand(1)) && "Invalid order"); 1779 } 1780 1781 if (Value *V = alreadyVectorized(E->Scalars)) 1782 return V; 1783 1784 BinaryOperator *BinOp = cast<BinaryOperator>(VL0); 1785 Value *V = Builder.CreateBinOp(BinOp->getOpcode(), LHS, RHS); 1786 E->VectorizedValue = V; 1787 1788 if (Instruction *I = dyn_cast<Instruction>(V)) 1789 return propagateMetadata(I, E->Scalars); 1790 1791 return V; 1792 } 1793 case Instruction::Load: { 1794 // Loads are inserted at the head of the tree because we don't want to 1795 // sink them all the way down past store instructions. 1796 setInsertPointAfterBundle(E->Scalars); 1797 1798 LoadInst *LI = cast<LoadInst>(VL0); 1799 unsigned AS = LI->getPointerAddressSpace(); 1800 1801 Value *VecPtr = Builder.CreateBitCast(LI->getPointerOperand(), 1802 VecTy->getPointerTo(AS)); 1803 unsigned Alignment = LI->getAlignment(); 1804 LI = Builder.CreateLoad(VecPtr); 1805 if (!Alignment) 1806 Alignment = DL->getABITypeAlignment(LI->getPointerOperand()->getType()); 1807 LI->setAlignment(Alignment); 1808 E->VectorizedValue = LI; 1809 return propagateMetadata(LI, E->Scalars); 1810 } 1811 case Instruction::Store: { 1812 StoreInst *SI = cast<StoreInst>(VL0); 1813 unsigned Alignment = SI->getAlignment(); 1814 unsigned AS = SI->getPointerAddressSpace(); 1815 1816 ValueList ValueOp; 1817 for (int i = 0, e = E->Scalars.size(); i < e; ++i) 1818 ValueOp.push_back(cast<StoreInst>(E->Scalars[i])->getValueOperand()); 1819 1820 setInsertPointAfterBundle(E->Scalars); 1821 1822 Value *VecValue = vectorizeTree(ValueOp); 1823 Value *VecPtr = Builder.CreateBitCast(SI->getPointerOperand(), 1824 VecTy->getPointerTo(AS)); 1825 StoreInst *S = Builder.CreateStore(VecValue, VecPtr); 1826 if (!Alignment) 1827 Alignment = DL->getABITypeAlignment(SI->getPointerOperand()->getType()); 1828 S->setAlignment(Alignment); 1829 E->VectorizedValue = S; 1830 return propagateMetadata(S, E->Scalars); 1831 } 1832 case Instruction::GetElementPtr: { 1833 setInsertPointAfterBundle(E->Scalars); 1834 1835 ValueList Op0VL; 1836 for (int i = 0, e = E->Scalars.size(); i < e; ++i) 1837 Op0VL.push_back(cast<GetElementPtrInst>(E->Scalars[i])->getOperand(0)); 1838 1839 Value *Op0 = vectorizeTree(Op0VL); 1840 1841 std::vector<Value *> OpVecs; 1842 for (int j = 1, e = cast<GetElementPtrInst>(VL0)->getNumOperands(); j < e; 1843 ++j) { 1844 ValueList OpVL; 1845 for (int i = 0, e = E->Scalars.size(); i < e; ++i) 1846 OpVL.push_back(cast<GetElementPtrInst>(E->Scalars[i])->getOperand(j)); 1847 1848 Value *OpVec = vectorizeTree(OpVL); 1849 OpVecs.push_back(OpVec); 1850 } 1851 1852 Value *V = Builder.CreateGEP(Op0, OpVecs); 1853 E->VectorizedValue = V; 1854 1855 if (Instruction *I = dyn_cast<Instruction>(V)) 1856 return propagateMetadata(I, E->Scalars); 1857 1858 return V; 1859 } 1860 case Instruction::Call: { 1861 CallInst *CI = cast<CallInst>(VL0); 1862 setInsertPointAfterBundle(E->Scalars); 1863 Function *FI; 1864 Intrinsic::ID IID = Intrinsic::not_intrinsic; 1865 if (CI && (FI = CI->getCalledFunction())) { 1866 IID = (Intrinsic::ID) FI->getIntrinsicID(); 1867 } 1868 std::vector<Value *> OpVecs; 1869 for (int j = 0, e = CI->getNumArgOperands(); j < e; ++j) { 1870 ValueList OpVL; 1871 // ctlz,cttz and powi are special intrinsics whose second argument is 1872 // a scalar. This argument should not be vectorized. 1873 if (hasVectorInstrinsicScalarOpd(IID, 1) && j == 1) { 1874 CallInst *CEI = cast<CallInst>(E->Scalars[0]); 1875 OpVecs.push_back(CEI->getArgOperand(j)); 1876 continue; 1877 } 1878 for (int i = 0, e = E->Scalars.size(); i < e; ++i) { 1879 CallInst *CEI = cast<CallInst>(E->Scalars[i]); 1880 OpVL.push_back(CEI->getArgOperand(j)); 1881 } 1882 1883 Value *OpVec = vectorizeTree(OpVL); 1884 DEBUG(dbgs() << "SLP: OpVec[" << j << "]: " << *OpVec << "\n"); 1885 OpVecs.push_back(OpVec); 1886 } 1887 1888 Module *M = F->getParent(); 1889 Intrinsic::ID ID = getIntrinsicIDForCall(CI, TLI); 1890 Type *Tys[] = { VectorType::get(CI->getType(), E->Scalars.size()) }; 1891 Function *CF = Intrinsic::getDeclaration(M, ID, Tys); 1892 Value *V = Builder.CreateCall(CF, OpVecs); 1893 E->VectorizedValue = V; 1894 return V; 1895 } 1896 case Instruction::ShuffleVector: { 1897 ValueList LHSVL, RHSVL; 1898 for (int i = 0, e = E->Scalars.size(); i < e; ++i) { 1899 LHSVL.push_back(cast<Instruction>(E->Scalars[i])->getOperand(0)); 1900 RHSVL.push_back(cast<Instruction>(E->Scalars[i])->getOperand(1)); 1901 } 1902 setInsertPointAfterBundle(E->Scalars); 1903 1904 Value *LHS = vectorizeTree(LHSVL); 1905 Value *RHS = vectorizeTree(RHSVL); 1906 1907 if (Value *V = alreadyVectorized(E->Scalars)) 1908 return V; 1909 1910 // Create a vector of LHS op1 RHS 1911 BinaryOperator *BinOp0 = cast<BinaryOperator>(VL0); 1912 Value *V0 = Builder.CreateBinOp(BinOp0->getOpcode(), LHS, RHS); 1913 1914 // Create a vector of LHS op2 RHS 1915 Instruction *VL1 = cast<Instruction>(E->Scalars[1]); 1916 BinaryOperator *BinOp1 = cast<BinaryOperator>(VL1); 1917 Value *V1 = Builder.CreateBinOp(BinOp1->getOpcode(), LHS, RHS); 1918 1919 // Create appropriate shuffle to take alternative operations from 1920 // the vector. 1921 std::vector<Constant *> Mask(E->Scalars.size()); 1922 unsigned e = E->Scalars.size(); 1923 for (unsigned i = 0; i < e; ++i) { 1924 if (i & 1) 1925 Mask[i] = Builder.getInt32(e + i); 1926 else 1927 Mask[i] = Builder.getInt32(i); 1928 } 1929 1930 Value *ShuffleMask = ConstantVector::get(Mask); 1931 1932 Value *V = Builder.CreateShuffleVector(V0, V1, ShuffleMask); 1933 E->VectorizedValue = V; 1934 if (Instruction *I = dyn_cast<Instruction>(V)) 1935 return propagateMetadata(I, E->Scalars); 1936 1937 return V; 1938 } 1939 default: 1940 llvm_unreachable("unknown inst"); 1941 } 1942 return nullptr; 1943 } 1944 1945 Value *BoUpSLP::vectorizeTree() { 1946 Builder.SetInsertPoint(F->getEntryBlock().begin()); 1947 vectorizeTree(&VectorizableTree[0]); 1948 1949 DEBUG(dbgs() << "SLP: Extracting " << ExternalUses.size() << " values .\n"); 1950 1951 // Extract all of the elements with the external uses. 1952 for (UserList::iterator it = ExternalUses.begin(), e = ExternalUses.end(); 1953 it != e; ++it) { 1954 Value *Scalar = it->Scalar; 1955 llvm::User *User = it->User; 1956 1957 // Skip users that we already RAUW. This happens when one instruction 1958 // has multiple uses of the same value. 1959 if (std::find(Scalar->user_begin(), Scalar->user_end(), User) == 1960 Scalar->user_end()) 1961 continue; 1962 assert(ScalarToTreeEntry.count(Scalar) && "Invalid scalar"); 1963 1964 int Idx = ScalarToTreeEntry[Scalar]; 1965 TreeEntry *E = &VectorizableTree[Idx]; 1966 assert(!E->NeedToGather && "Extracting from a gather list"); 1967 1968 Value *Vec = E->VectorizedValue; 1969 assert(Vec && "Can't find vectorizable value"); 1970 1971 Value *Lane = Builder.getInt32(it->Lane); 1972 // Generate extracts for out-of-tree users. 1973 // Find the insertion point for the extractelement lane. 1974 if (isa<Instruction>(Vec)){ 1975 if (PHINode *PH = dyn_cast<PHINode>(User)) { 1976 for (int i = 0, e = PH->getNumIncomingValues(); i != e; ++i) { 1977 if (PH->getIncomingValue(i) == Scalar) { 1978 Builder.SetInsertPoint(PH->getIncomingBlock(i)->getTerminator()); 1979 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 1980 CSEBlocks.insert(PH->getIncomingBlock(i)); 1981 PH->setOperand(i, Ex); 1982 } 1983 } 1984 } else { 1985 Builder.SetInsertPoint(cast<Instruction>(User)); 1986 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 1987 CSEBlocks.insert(cast<Instruction>(User)->getParent()); 1988 User->replaceUsesOfWith(Scalar, Ex); 1989 } 1990 } else { 1991 Builder.SetInsertPoint(F->getEntryBlock().begin()); 1992 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 1993 CSEBlocks.insert(&F->getEntryBlock()); 1994 User->replaceUsesOfWith(Scalar, Ex); 1995 } 1996 1997 DEBUG(dbgs() << "SLP: Replaced:" << *User << ".\n"); 1998 } 1999 2000 // For each vectorized value: 2001 for (int EIdx = 0, EE = VectorizableTree.size(); EIdx < EE; ++EIdx) { 2002 TreeEntry *Entry = &VectorizableTree[EIdx]; 2003 2004 // For each lane: 2005 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 2006 Value *Scalar = Entry->Scalars[Lane]; 2007 // No need to handle users of gathered values. 2008 if (Entry->NeedToGather) 2009 continue; 2010 2011 assert(Entry->VectorizedValue && "Can't find vectorizable value"); 2012 2013 Type *Ty = Scalar->getType(); 2014 if (!Ty->isVoidTy()) { 2015 #ifndef NDEBUG 2016 for (User *U : Scalar->users()) { 2017 DEBUG(dbgs() << "SLP: \tvalidating user:" << *U << ".\n"); 2018 2019 assert((ScalarToTreeEntry.count(U) || 2020 // It is legal to replace users in the ignorelist by undef. 2021 (std::find(UserIgnoreList.begin(), UserIgnoreList.end(), U) != 2022 UserIgnoreList.end())) && 2023 "Replacing out-of-tree value with undef"); 2024 } 2025 #endif 2026 Value *Undef = UndefValue::get(Ty); 2027 Scalar->replaceAllUsesWith(Undef); 2028 } 2029 DEBUG(dbgs() << "SLP: \tErasing scalar:" << *Scalar << ".\n"); 2030 cast<Instruction>(Scalar)->eraseFromParent(); 2031 } 2032 } 2033 2034 for (auto &BN : BlocksNumbers) 2035 BN.second.forget(); 2036 2037 Builder.ClearInsertionPoint(); 2038 2039 return VectorizableTree[0].VectorizedValue; 2040 } 2041 2042 void BoUpSLP::optimizeGatherSequence() { 2043 DEBUG(dbgs() << "SLP: Optimizing " << GatherSeq.size() 2044 << " gather sequences instructions.\n"); 2045 // LICM InsertElementInst sequences. 2046 for (SetVector<Instruction *>::iterator it = GatherSeq.begin(), 2047 e = GatherSeq.end(); it != e; ++it) { 2048 InsertElementInst *Insert = dyn_cast<InsertElementInst>(*it); 2049 2050 if (!Insert) 2051 continue; 2052 2053 // Check if this block is inside a loop. 2054 Loop *L = LI->getLoopFor(Insert->getParent()); 2055 if (!L) 2056 continue; 2057 2058 // Check if it has a preheader. 2059 BasicBlock *PreHeader = L->getLoopPreheader(); 2060 if (!PreHeader) 2061 continue; 2062 2063 // If the vector or the element that we insert into it are 2064 // instructions that are defined in this basic block then we can't 2065 // hoist this instruction. 2066 Instruction *CurrVec = dyn_cast<Instruction>(Insert->getOperand(0)); 2067 Instruction *NewElem = dyn_cast<Instruction>(Insert->getOperand(1)); 2068 if (CurrVec && L->contains(CurrVec)) 2069 continue; 2070 if (NewElem && L->contains(NewElem)) 2071 continue; 2072 2073 // We can hoist this instruction. Move it to the pre-header. 2074 Insert->moveBefore(PreHeader->getTerminator()); 2075 } 2076 2077 // Make a list of all reachable blocks in our CSE queue. 2078 SmallVector<const DomTreeNode *, 8> CSEWorkList; 2079 CSEWorkList.reserve(CSEBlocks.size()); 2080 for (BasicBlock *BB : CSEBlocks) 2081 if (DomTreeNode *N = DT->getNode(BB)) { 2082 assert(DT->isReachableFromEntry(N)); 2083 CSEWorkList.push_back(N); 2084 } 2085 2086 // Sort blocks by domination. This ensures we visit a block after all blocks 2087 // dominating it are visited. 2088 std::stable_sort(CSEWorkList.begin(), CSEWorkList.end(), 2089 [this](const DomTreeNode *A, const DomTreeNode *B) { 2090 return DT->properlyDominates(A, B); 2091 }); 2092 2093 // Perform O(N^2) search over the gather sequences and merge identical 2094 // instructions. TODO: We can further optimize this scan if we split the 2095 // instructions into different buckets based on the insert lane. 2096 SmallVector<Instruction *, 16> Visited; 2097 for (auto I = CSEWorkList.begin(), E = CSEWorkList.end(); I != E; ++I) { 2098 assert((I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev(I))) && 2099 "Worklist not sorted properly!"); 2100 BasicBlock *BB = (*I)->getBlock(); 2101 // For all instructions in blocks containing gather sequences: 2102 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e;) { 2103 Instruction *In = it++; 2104 if (!isa<InsertElementInst>(In) && !isa<ExtractElementInst>(In)) 2105 continue; 2106 2107 // Check if we can replace this instruction with any of the 2108 // visited instructions. 2109 for (SmallVectorImpl<Instruction *>::iterator v = Visited.begin(), 2110 ve = Visited.end(); 2111 v != ve; ++v) { 2112 if (In->isIdenticalTo(*v) && 2113 DT->dominates((*v)->getParent(), In->getParent())) { 2114 In->replaceAllUsesWith(*v); 2115 In->eraseFromParent(); 2116 In = nullptr; 2117 break; 2118 } 2119 } 2120 if (In) { 2121 assert(std::find(Visited.begin(), Visited.end(), In) == Visited.end()); 2122 Visited.push_back(In); 2123 } 2124 } 2125 } 2126 CSEBlocks.clear(); 2127 GatherSeq.clear(); 2128 } 2129 2130 /// The SLPVectorizer Pass. 2131 struct SLPVectorizer : public FunctionPass { 2132 typedef SmallVector<StoreInst *, 8> StoreList; 2133 typedef MapVector<Value *, StoreList> StoreListMap; 2134 2135 /// Pass identification, replacement for typeid 2136 static char ID; 2137 2138 explicit SLPVectorizer() : FunctionPass(ID) { 2139 initializeSLPVectorizerPass(*PassRegistry::getPassRegistry()); 2140 } 2141 2142 ScalarEvolution *SE; 2143 const DataLayout *DL; 2144 TargetTransformInfo *TTI; 2145 TargetLibraryInfo *TLI; 2146 AliasAnalysis *AA; 2147 LoopInfo *LI; 2148 DominatorTree *DT; 2149 2150 bool runOnFunction(Function &F) override { 2151 if (skipOptnoneFunction(F)) 2152 return false; 2153 2154 SE = &getAnalysis<ScalarEvolution>(); 2155 DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>(); 2156 DL = DLP ? &DLP->getDataLayout() : nullptr; 2157 TTI = &getAnalysis<TargetTransformInfo>(); 2158 TLI = getAnalysisIfAvailable<TargetLibraryInfo>(); 2159 AA = &getAnalysis<AliasAnalysis>(); 2160 LI = &getAnalysis<LoopInfo>(); 2161 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 2162 2163 StoreRefs.clear(); 2164 bool Changed = false; 2165 2166 // If the target claims to have no vector registers don't attempt 2167 // vectorization. 2168 if (!TTI->getNumberOfRegisters(true)) 2169 return false; 2170 2171 // Must have DataLayout. We can't require it because some tests run w/o 2172 // triple. 2173 if (!DL) 2174 return false; 2175 2176 // Don't vectorize when the attribute NoImplicitFloat is used. 2177 if (F.hasFnAttribute(Attribute::NoImplicitFloat)) 2178 return false; 2179 2180 DEBUG(dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n"); 2181 2182 // Use the bottom up slp vectorizer to construct chains that start with 2183 // store instructions. 2184 BoUpSLP R(&F, SE, DL, TTI, TLI, AA, LI, DT); 2185 2186 // Scan the blocks in the function in post order. 2187 for (po_iterator<BasicBlock*> it = po_begin(&F.getEntryBlock()), 2188 e = po_end(&F.getEntryBlock()); it != e; ++it) { 2189 BasicBlock *BB = *it; 2190 // Vectorize trees that end at stores. 2191 if (unsigned count = collectStores(BB, R)) { 2192 (void)count; 2193 DEBUG(dbgs() << "SLP: Found " << count << " stores to vectorize.\n"); 2194 Changed |= vectorizeStoreChains(R); 2195 } 2196 2197 // Vectorize trees that end at reductions. 2198 Changed |= vectorizeChainsInBlock(BB, R); 2199 } 2200 2201 if (Changed) { 2202 R.optimizeGatherSequence(); 2203 DEBUG(dbgs() << "SLP: vectorized \"" << F.getName() << "\"\n"); 2204 DEBUG(verifyFunction(F)); 2205 } 2206 return Changed; 2207 } 2208 2209 void getAnalysisUsage(AnalysisUsage &AU) const override { 2210 FunctionPass::getAnalysisUsage(AU); 2211 AU.addRequired<ScalarEvolution>(); 2212 AU.addRequired<AliasAnalysis>(); 2213 AU.addRequired<TargetTransformInfo>(); 2214 AU.addRequired<LoopInfo>(); 2215 AU.addRequired<DominatorTreeWrapperPass>(); 2216 AU.addPreserved<LoopInfo>(); 2217 AU.addPreserved<DominatorTreeWrapperPass>(); 2218 AU.setPreservesCFG(); 2219 } 2220 2221 private: 2222 2223 /// \brief Collect memory references and sort them according to their base 2224 /// object. We sort the stores to their base objects to reduce the cost of the 2225 /// quadratic search on the stores. TODO: We can further reduce this cost 2226 /// if we flush the chain creation every time we run into a memory barrier. 2227 unsigned collectStores(BasicBlock *BB, BoUpSLP &R); 2228 2229 /// \brief Try to vectorize a chain that starts at two arithmetic instrs. 2230 bool tryToVectorizePair(Value *A, Value *B, BoUpSLP &R); 2231 2232 /// \brief Try to vectorize a list of operands. 2233 /// \@param BuildVector A list of users to ignore for the purpose of 2234 /// scheduling and that don't need extracting. 2235 /// \returns true if a value was vectorized. 2236 bool tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R, 2237 ArrayRef<Value *> BuildVector = None); 2238 2239 /// \brief Try to vectorize a chain that may start at the operands of \V; 2240 bool tryToVectorize(BinaryOperator *V, BoUpSLP &R); 2241 2242 /// \brief Vectorize the stores that were collected in StoreRefs. 2243 bool vectorizeStoreChains(BoUpSLP &R); 2244 2245 /// \brief Scan the basic block and look for patterns that are likely to start 2246 /// a vectorization chain. 2247 bool vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R); 2248 2249 bool vectorizeStoreChain(ArrayRef<Value *> Chain, int CostThreshold, 2250 BoUpSLP &R); 2251 2252 bool vectorizeStores(ArrayRef<StoreInst *> Stores, int costThreshold, 2253 BoUpSLP &R); 2254 private: 2255 StoreListMap StoreRefs; 2256 }; 2257 2258 /// \brief Check that the Values in the slice in VL array are still existent in 2259 /// the WeakVH array. 2260 /// Vectorization of part of the VL array may cause later values in the VL array 2261 /// to become invalid. We track when this has happened in the WeakVH array. 2262 static bool hasValueBeenRAUWed(ArrayRef<Value *> &VL, 2263 SmallVectorImpl<WeakVH> &VH, 2264 unsigned SliceBegin, 2265 unsigned SliceSize) { 2266 for (unsigned i = SliceBegin; i < SliceBegin + SliceSize; ++i) 2267 if (VH[i] != VL[i]) 2268 return true; 2269 2270 return false; 2271 } 2272 2273 bool SLPVectorizer::vectorizeStoreChain(ArrayRef<Value *> Chain, 2274 int CostThreshold, BoUpSLP &R) { 2275 unsigned ChainLen = Chain.size(); 2276 DEBUG(dbgs() << "SLP: Analyzing a store chain of length " << ChainLen 2277 << "\n"); 2278 Type *StoreTy = cast<StoreInst>(Chain[0])->getValueOperand()->getType(); 2279 unsigned Sz = DL->getTypeSizeInBits(StoreTy); 2280 unsigned VF = MinVecRegSize / Sz; 2281 2282 if (!isPowerOf2_32(Sz) || VF < 2) 2283 return false; 2284 2285 // Keep track of values that were deleted by vectorizing in the loop below. 2286 SmallVector<WeakVH, 8> TrackValues(Chain.begin(), Chain.end()); 2287 2288 bool Changed = false; 2289 // Look for profitable vectorizable trees at all offsets, starting at zero. 2290 for (unsigned i = 0, e = ChainLen; i < e; ++i) { 2291 if (i + VF > e) 2292 break; 2293 2294 // Check that a previous iteration of this loop did not delete the Value. 2295 if (hasValueBeenRAUWed(Chain, TrackValues, i, VF)) 2296 continue; 2297 2298 DEBUG(dbgs() << "SLP: Analyzing " << VF << " stores at offset " << i 2299 << "\n"); 2300 ArrayRef<Value *> Operands = Chain.slice(i, VF); 2301 2302 R.buildTree(Operands); 2303 2304 int Cost = R.getTreeCost(); 2305 2306 DEBUG(dbgs() << "SLP: Found cost=" << Cost << " for VF=" << VF << "\n"); 2307 if (Cost < CostThreshold) { 2308 DEBUG(dbgs() << "SLP: Decided to vectorize cost=" << Cost << "\n"); 2309 R.vectorizeTree(); 2310 2311 // Move to the next bundle. 2312 i += VF - 1; 2313 Changed = true; 2314 } 2315 } 2316 2317 return Changed; 2318 } 2319 2320 bool SLPVectorizer::vectorizeStores(ArrayRef<StoreInst *> Stores, 2321 int costThreshold, BoUpSLP &R) { 2322 SetVector<Value *> Heads, Tails; 2323 SmallDenseMap<Value *, Value *> ConsecutiveChain; 2324 2325 // We may run into multiple chains that merge into a single chain. We mark the 2326 // stores that we vectorized so that we don't visit the same store twice. 2327 BoUpSLP::ValueSet VectorizedStores; 2328 bool Changed = false; 2329 2330 // Do a quadratic search on all of the given stores and find 2331 // all of the pairs of stores that follow each other. 2332 for (unsigned i = 0, e = Stores.size(); i < e; ++i) { 2333 for (unsigned j = 0; j < e; ++j) { 2334 if (i == j) 2335 continue; 2336 2337 if (R.isConsecutiveAccess(Stores[i], Stores[j])) { 2338 Tails.insert(Stores[j]); 2339 Heads.insert(Stores[i]); 2340 ConsecutiveChain[Stores[i]] = Stores[j]; 2341 } 2342 } 2343 } 2344 2345 // For stores that start but don't end a link in the chain: 2346 for (SetVector<Value *>::iterator it = Heads.begin(), e = Heads.end(); 2347 it != e; ++it) { 2348 if (Tails.count(*it)) 2349 continue; 2350 2351 // We found a store instr that starts a chain. Now follow the chain and try 2352 // to vectorize it. 2353 BoUpSLP::ValueList Operands; 2354 Value *I = *it; 2355 // Collect the chain into a list. 2356 while (Tails.count(I) || Heads.count(I)) { 2357 if (VectorizedStores.count(I)) 2358 break; 2359 Operands.push_back(I); 2360 // Move to the next value in the chain. 2361 I = ConsecutiveChain[I]; 2362 } 2363 2364 bool Vectorized = vectorizeStoreChain(Operands, costThreshold, R); 2365 2366 // Mark the vectorized stores so that we don't vectorize them again. 2367 if (Vectorized) 2368 VectorizedStores.insert(Operands.begin(), Operands.end()); 2369 Changed |= Vectorized; 2370 } 2371 2372 return Changed; 2373 } 2374 2375 2376 unsigned SLPVectorizer::collectStores(BasicBlock *BB, BoUpSLP &R) { 2377 unsigned count = 0; 2378 StoreRefs.clear(); 2379 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) { 2380 StoreInst *SI = dyn_cast<StoreInst>(it); 2381 if (!SI) 2382 continue; 2383 2384 // Don't touch volatile stores. 2385 if (!SI->isSimple()) 2386 continue; 2387 2388 // Check that the pointer points to scalars. 2389 Type *Ty = SI->getValueOperand()->getType(); 2390 if (Ty->isAggregateType() || Ty->isVectorTy()) 2391 continue; 2392 2393 // Find the base pointer. 2394 Value *Ptr = GetUnderlyingObject(SI->getPointerOperand(), DL); 2395 2396 // Save the store locations. 2397 StoreRefs[Ptr].push_back(SI); 2398 count++; 2399 } 2400 return count; 2401 } 2402 2403 bool SLPVectorizer::tryToVectorizePair(Value *A, Value *B, BoUpSLP &R) { 2404 if (!A || !B) 2405 return false; 2406 Value *VL[] = { A, B }; 2407 return tryToVectorizeList(VL, R); 2408 } 2409 2410 bool SLPVectorizer::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R, 2411 ArrayRef<Value *> BuildVector) { 2412 if (VL.size() < 2) 2413 return false; 2414 2415 DEBUG(dbgs() << "SLP: Vectorizing a list of length = " << VL.size() << ".\n"); 2416 2417 // Check that all of the parts are scalar instructions of the same type. 2418 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 2419 if (!I0) 2420 return false; 2421 2422 unsigned Opcode0 = I0->getOpcode(); 2423 2424 Type *Ty0 = I0->getType(); 2425 unsigned Sz = DL->getTypeSizeInBits(Ty0); 2426 unsigned VF = MinVecRegSize / Sz; 2427 2428 for (int i = 0, e = VL.size(); i < e; ++i) { 2429 Type *Ty = VL[i]->getType(); 2430 if (Ty->isAggregateType() || Ty->isVectorTy()) 2431 return false; 2432 Instruction *Inst = dyn_cast<Instruction>(VL[i]); 2433 if (!Inst || Inst->getOpcode() != Opcode0) 2434 return false; 2435 } 2436 2437 bool Changed = false; 2438 2439 // Keep track of values that were deleted by vectorizing in the loop below. 2440 SmallVector<WeakVH, 8> TrackValues(VL.begin(), VL.end()); 2441 2442 for (unsigned i = 0, e = VL.size(); i < e; ++i) { 2443 unsigned OpsWidth = 0; 2444 2445 if (i + VF > e) 2446 OpsWidth = e - i; 2447 else 2448 OpsWidth = VF; 2449 2450 if (!isPowerOf2_32(OpsWidth) || OpsWidth < 2) 2451 break; 2452 2453 // Check that a previous iteration of this loop did not delete the Value. 2454 if (hasValueBeenRAUWed(VL, TrackValues, i, OpsWidth)) 2455 continue; 2456 2457 DEBUG(dbgs() << "SLP: Analyzing " << OpsWidth << " operations " 2458 << "\n"); 2459 ArrayRef<Value *> Ops = VL.slice(i, OpsWidth); 2460 2461 ArrayRef<Value *> BuildVectorSlice; 2462 if (!BuildVector.empty()) 2463 BuildVectorSlice = BuildVector.slice(i, OpsWidth); 2464 2465 R.buildTree(Ops, BuildVectorSlice); 2466 int Cost = R.getTreeCost(); 2467 2468 if (Cost < -SLPCostThreshold) { 2469 DEBUG(dbgs() << "SLP: Vectorizing list at cost:" << Cost << ".\n"); 2470 Value *VectorizedRoot = R.vectorizeTree(); 2471 2472 // Reconstruct the build vector by extracting the vectorized root. This 2473 // way we handle the case where some elements of the vector are undefined. 2474 // (return (inserelt <4 xi32> (insertelt undef (opd0) 0) (opd1) 2)) 2475 if (!BuildVectorSlice.empty()) { 2476 // The insert point is the last build vector instruction. The vectorized 2477 // root will precede it. This guarantees that we get an instruction. The 2478 // vectorized tree could have been constant folded. 2479 Instruction *InsertAfter = cast<Instruction>(BuildVectorSlice.back()); 2480 unsigned VecIdx = 0; 2481 for (auto &V : BuildVectorSlice) { 2482 IRBuilder<true, NoFolder> Builder( 2483 ++BasicBlock::iterator(InsertAfter)); 2484 InsertElementInst *IE = cast<InsertElementInst>(V); 2485 Instruction *Extract = cast<Instruction>(Builder.CreateExtractElement( 2486 VectorizedRoot, Builder.getInt32(VecIdx++))); 2487 IE->setOperand(1, Extract); 2488 IE->removeFromParent(); 2489 IE->insertAfter(Extract); 2490 InsertAfter = IE; 2491 } 2492 } 2493 // Move to the next bundle. 2494 i += VF - 1; 2495 Changed = true; 2496 } 2497 } 2498 2499 return Changed; 2500 } 2501 2502 bool SLPVectorizer::tryToVectorize(BinaryOperator *V, BoUpSLP &R) { 2503 if (!V) 2504 return false; 2505 2506 // Try to vectorize V. 2507 if (tryToVectorizePair(V->getOperand(0), V->getOperand(1), R)) 2508 return true; 2509 2510 BinaryOperator *A = dyn_cast<BinaryOperator>(V->getOperand(0)); 2511 BinaryOperator *B = dyn_cast<BinaryOperator>(V->getOperand(1)); 2512 // Try to skip B. 2513 if (B && B->hasOneUse()) { 2514 BinaryOperator *B0 = dyn_cast<BinaryOperator>(B->getOperand(0)); 2515 BinaryOperator *B1 = dyn_cast<BinaryOperator>(B->getOperand(1)); 2516 if (tryToVectorizePair(A, B0, R)) { 2517 B->moveBefore(V); 2518 return true; 2519 } 2520 if (tryToVectorizePair(A, B1, R)) { 2521 B->moveBefore(V); 2522 return true; 2523 } 2524 } 2525 2526 // Try to skip A. 2527 if (A && A->hasOneUse()) { 2528 BinaryOperator *A0 = dyn_cast<BinaryOperator>(A->getOperand(0)); 2529 BinaryOperator *A1 = dyn_cast<BinaryOperator>(A->getOperand(1)); 2530 if (tryToVectorizePair(A0, B, R)) { 2531 A->moveBefore(V); 2532 return true; 2533 } 2534 if (tryToVectorizePair(A1, B, R)) { 2535 A->moveBefore(V); 2536 return true; 2537 } 2538 } 2539 return 0; 2540 } 2541 2542 /// \brief Generate a shuffle mask to be used in a reduction tree. 2543 /// 2544 /// \param VecLen The length of the vector to be reduced. 2545 /// \param NumEltsToRdx The number of elements that should be reduced in the 2546 /// vector. 2547 /// \param IsPairwise Whether the reduction is a pairwise or splitting 2548 /// reduction. A pairwise reduction will generate a mask of 2549 /// <0,2,...> or <1,3,..> while a splitting reduction will generate 2550 /// <2,3, undef,undef> for a vector of 4 and NumElts = 2. 2551 /// \param IsLeft True will generate a mask of even elements, odd otherwise. 2552 static Value *createRdxShuffleMask(unsigned VecLen, unsigned NumEltsToRdx, 2553 bool IsPairwise, bool IsLeft, 2554 IRBuilder<> &Builder) { 2555 assert((IsPairwise || !IsLeft) && "Don't support a <0,1,undef,...> mask"); 2556 2557 SmallVector<Constant *, 32> ShuffleMask( 2558 VecLen, UndefValue::get(Builder.getInt32Ty())); 2559 2560 if (IsPairwise) 2561 // Build a mask of 0, 2, ... (left) or 1, 3, ... (right). 2562 for (unsigned i = 0; i != NumEltsToRdx; ++i) 2563 ShuffleMask[i] = Builder.getInt32(2 * i + !IsLeft); 2564 else 2565 // Move the upper half of the vector to the lower half. 2566 for (unsigned i = 0; i != NumEltsToRdx; ++i) 2567 ShuffleMask[i] = Builder.getInt32(NumEltsToRdx + i); 2568 2569 return ConstantVector::get(ShuffleMask); 2570 } 2571 2572 2573 /// Model horizontal reductions. 2574 /// 2575 /// A horizontal reduction is a tree of reduction operations (currently add and 2576 /// fadd) that has operations that can be put into a vector as its leaf. 2577 /// For example, this tree: 2578 /// 2579 /// mul mul mul mul 2580 /// \ / \ / 2581 /// + + 2582 /// \ / 2583 /// + 2584 /// This tree has "mul" as its reduced values and "+" as its reduction 2585 /// operations. A reduction might be feeding into a store or a binary operation 2586 /// feeding a phi. 2587 /// ... 2588 /// \ / 2589 /// + 2590 /// | 2591 /// phi += 2592 /// 2593 /// Or: 2594 /// ... 2595 /// \ / 2596 /// + 2597 /// | 2598 /// *p = 2599 /// 2600 class HorizontalReduction { 2601 SmallVector<Value *, 16> ReductionOps; 2602 SmallVector<Value *, 32> ReducedVals; 2603 2604 BinaryOperator *ReductionRoot; 2605 PHINode *ReductionPHI; 2606 2607 /// The opcode of the reduction. 2608 unsigned ReductionOpcode; 2609 /// The opcode of the values we perform a reduction on. 2610 unsigned ReducedValueOpcode; 2611 /// The width of one full horizontal reduction operation. 2612 unsigned ReduxWidth; 2613 /// Should we model this reduction as a pairwise reduction tree or a tree that 2614 /// splits the vector in halves and adds those halves. 2615 bool IsPairwiseReduction; 2616 2617 public: 2618 HorizontalReduction() 2619 : ReductionRoot(nullptr), ReductionPHI(nullptr), ReductionOpcode(0), 2620 ReducedValueOpcode(0), ReduxWidth(0), IsPairwiseReduction(false) {} 2621 2622 /// \brief Try to find a reduction tree. 2623 bool matchAssociativeReduction(PHINode *Phi, BinaryOperator *B, 2624 const DataLayout *DL) { 2625 assert((!Phi || 2626 std::find(Phi->op_begin(), Phi->op_end(), B) != Phi->op_end()) && 2627 "Thi phi needs to use the binary operator"); 2628 2629 // We could have a initial reductions that is not an add. 2630 // r *= v1 + v2 + v3 + v4 2631 // In such a case start looking for a tree rooted in the first '+'. 2632 if (Phi) { 2633 if (B->getOperand(0) == Phi) { 2634 Phi = nullptr; 2635 B = dyn_cast<BinaryOperator>(B->getOperand(1)); 2636 } else if (B->getOperand(1) == Phi) { 2637 Phi = nullptr; 2638 B = dyn_cast<BinaryOperator>(B->getOperand(0)); 2639 } 2640 } 2641 2642 if (!B) 2643 return false; 2644 2645 Type *Ty = B->getType(); 2646 if (Ty->isVectorTy()) 2647 return false; 2648 2649 ReductionOpcode = B->getOpcode(); 2650 ReducedValueOpcode = 0; 2651 ReduxWidth = MinVecRegSize / DL->getTypeSizeInBits(Ty); 2652 ReductionRoot = B; 2653 ReductionPHI = Phi; 2654 2655 if (ReduxWidth < 4) 2656 return false; 2657 2658 // We currently only support adds. 2659 if (ReductionOpcode != Instruction::Add && 2660 ReductionOpcode != Instruction::FAdd) 2661 return false; 2662 2663 // Post order traverse the reduction tree starting at B. We only handle true 2664 // trees containing only binary operators. 2665 SmallVector<std::pair<BinaryOperator *, unsigned>, 32> Stack; 2666 Stack.push_back(std::make_pair(B, 0)); 2667 while (!Stack.empty()) { 2668 BinaryOperator *TreeN = Stack.back().first; 2669 unsigned EdgeToVist = Stack.back().second++; 2670 bool IsReducedValue = TreeN->getOpcode() != ReductionOpcode; 2671 2672 // Only handle trees in the current basic block. 2673 if (TreeN->getParent() != B->getParent()) 2674 return false; 2675 2676 // Each tree node needs to have one user except for the ultimate 2677 // reduction. 2678 if (!TreeN->hasOneUse() && TreeN != B) 2679 return false; 2680 2681 // Postorder vist. 2682 if (EdgeToVist == 2 || IsReducedValue) { 2683 if (IsReducedValue) { 2684 // Make sure that the opcodes of the operations that we are going to 2685 // reduce match. 2686 if (!ReducedValueOpcode) 2687 ReducedValueOpcode = TreeN->getOpcode(); 2688 else if (ReducedValueOpcode != TreeN->getOpcode()) 2689 return false; 2690 ReducedVals.push_back(TreeN); 2691 } else { 2692 // We need to be able to reassociate the adds. 2693 if (!TreeN->isAssociative()) 2694 return false; 2695 ReductionOps.push_back(TreeN); 2696 } 2697 // Retract. 2698 Stack.pop_back(); 2699 continue; 2700 } 2701 2702 // Visit left or right. 2703 Value *NextV = TreeN->getOperand(EdgeToVist); 2704 BinaryOperator *Next = dyn_cast<BinaryOperator>(NextV); 2705 if (Next) 2706 Stack.push_back(std::make_pair(Next, 0)); 2707 else if (NextV != Phi) 2708 return false; 2709 } 2710 return true; 2711 } 2712 2713 /// \brief Attempt to vectorize the tree found by 2714 /// matchAssociativeReduction. 2715 bool tryToReduce(BoUpSLP &V, TargetTransformInfo *TTI) { 2716 if (ReducedVals.empty()) 2717 return false; 2718 2719 unsigned NumReducedVals = ReducedVals.size(); 2720 if (NumReducedVals < ReduxWidth) 2721 return false; 2722 2723 Value *VectorizedTree = nullptr; 2724 IRBuilder<> Builder(ReductionRoot); 2725 FastMathFlags Unsafe; 2726 Unsafe.setUnsafeAlgebra(); 2727 Builder.SetFastMathFlags(Unsafe); 2728 unsigned i = 0; 2729 2730 for (; i < NumReducedVals - ReduxWidth + 1; i += ReduxWidth) { 2731 ArrayRef<Value *> ValsToReduce(&ReducedVals[i], ReduxWidth); 2732 V.buildTree(ValsToReduce, ReductionOps); 2733 2734 // Estimate cost. 2735 int Cost = V.getTreeCost() + getReductionCost(TTI, ReducedVals[i]); 2736 if (Cost >= -SLPCostThreshold) 2737 break; 2738 2739 DEBUG(dbgs() << "SLP: Vectorizing horizontal reduction at cost:" << Cost 2740 << ". (HorRdx)\n"); 2741 2742 // Vectorize a tree. 2743 DebugLoc Loc = cast<Instruction>(ReducedVals[i])->getDebugLoc(); 2744 Value *VectorizedRoot = V.vectorizeTree(); 2745 2746 // Emit a reduction. 2747 Value *ReducedSubTree = emitReduction(VectorizedRoot, Builder); 2748 if (VectorizedTree) { 2749 Builder.SetCurrentDebugLocation(Loc); 2750 VectorizedTree = createBinOp(Builder, ReductionOpcode, VectorizedTree, 2751 ReducedSubTree, "bin.rdx"); 2752 } else 2753 VectorizedTree = ReducedSubTree; 2754 } 2755 2756 if (VectorizedTree) { 2757 // Finish the reduction. 2758 for (; i < NumReducedVals; ++i) { 2759 Builder.SetCurrentDebugLocation( 2760 cast<Instruction>(ReducedVals[i])->getDebugLoc()); 2761 VectorizedTree = createBinOp(Builder, ReductionOpcode, VectorizedTree, 2762 ReducedVals[i]); 2763 } 2764 // Update users. 2765 if (ReductionPHI) { 2766 assert(ReductionRoot && "Need a reduction operation"); 2767 ReductionRoot->setOperand(0, VectorizedTree); 2768 ReductionRoot->setOperand(1, ReductionPHI); 2769 } else 2770 ReductionRoot->replaceAllUsesWith(VectorizedTree); 2771 } 2772 return VectorizedTree != nullptr; 2773 } 2774 2775 private: 2776 2777 /// \brief Calcuate the cost of a reduction. 2778 int getReductionCost(TargetTransformInfo *TTI, Value *FirstReducedVal) { 2779 Type *ScalarTy = FirstReducedVal->getType(); 2780 Type *VecTy = VectorType::get(ScalarTy, ReduxWidth); 2781 2782 int PairwiseRdxCost = TTI->getReductionCost(ReductionOpcode, VecTy, true); 2783 int SplittingRdxCost = TTI->getReductionCost(ReductionOpcode, VecTy, false); 2784 2785 IsPairwiseReduction = PairwiseRdxCost < SplittingRdxCost; 2786 int VecReduxCost = IsPairwiseReduction ? PairwiseRdxCost : SplittingRdxCost; 2787 2788 int ScalarReduxCost = 2789 ReduxWidth * TTI->getArithmeticInstrCost(ReductionOpcode, VecTy); 2790 2791 DEBUG(dbgs() << "SLP: Adding cost " << VecReduxCost - ScalarReduxCost 2792 << " for reduction that starts with " << *FirstReducedVal 2793 << " (It is a " 2794 << (IsPairwiseReduction ? "pairwise" : "splitting") 2795 << " reduction)\n"); 2796 2797 return VecReduxCost - ScalarReduxCost; 2798 } 2799 2800 static Value *createBinOp(IRBuilder<> &Builder, unsigned Opcode, Value *L, 2801 Value *R, const Twine &Name = "") { 2802 if (Opcode == Instruction::FAdd) 2803 return Builder.CreateFAdd(L, R, Name); 2804 return Builder.CreateBinOp((Instruction::BinaryOps)Opcode, L, R, Name); 2805 } 2806 2807 /// \brief Emit a horizontal reduction of the vectorized value. 2808 Value *emitReduction(Value *VectorizedValue, IRBuilder<> &Builder) { 2809 assert(VectorizedValue && "Need to have a vectorized tree node"); 2810 Instruction *ValToReduce = dyn_cast<Instruction>(VectorizedValue); 2811 assert(isPowerOf2_32(ReduxWidth) && 2812 "We only handle power-of-two reductions for now"); 2813 2814 Value *TmpVec = ValToReduce; 2815 for (unsigned i = ReduxWidth / 2; i != 0; i >>= 1) { 2816 if (IsPairwiseReduction) { 2817 Value *LeftMask = 2818 createRdxShuffleMask(ReduxWidth, i, true, true, Builder); 2819 Value *RightMask = 2820 createRdxShuffleMask(ReduxWidth, i, true, false, Builder); 2821 2822 Value *LeftShuf = Builder.CreateShuffleVector( 2823 TmpVec, UndefValue::get(TmpVec->getType()), LeftMask, "rdx.shuf.l"); 2824 Value *RightShuf = Builder.CreateShuffleVector( 2825 TmpVec, UndefValue::get(TmpVec->getType()), (RightMask), 2826 "rdx.shuf.r"); 2827 TmpVec = createBinOp(Builder, ReductionOpcode, LeftShuf, RightShuf, 2828 "bin.rdx"); 2829 } else { 2830 Value *UpperHalf = 2831 createRdxShuffleMask(ReduxWidth, i, false, false, Builder); 2832 Value *Shuf = Builder.CreateShuffleVector( 2833 TmpVec, UndefValue::get(TmpVec->getType()), UpperHalf, "rdx.shuf"); 2834 TmpVec = createBinOp(Builder, ReductionOpcode, TmpVec, Shuf, "bin.rdx"); 2835 } 2836 } 2837 2838 // The result is in the first element of the vector. 2839 return Builder.CreateExtractElement(TmpVec, Builder.getInt32(0)); 2840 } 2841 }; 2842 2843 /// \brief Recognize construction of vectors like 2844 /// %ra = insertelement <4 x float> undef, float %s0, i32 0 2845 /// %rb = insertelement <4 x float> %ra, float %s1, i32 1 2846 /// %rc = insertelement <4 x float> %rb, float %s2, i32 2 2847 /// %rd = insertelement <4 x float> %rc, float %s3, i32 3 2848 /// 2849 /// Returns true if it matches 2850 /// 2851 static bool findBuildVector(InsertElementInst *FirstInsertElem, 2852 SmallVectorImpl<Value *> &BuildVector, 2853 SmallVectorImpl<Value *> &BuildVectorOpds) { 2854 if (!isa<UndefValue>(FirstInsertElem->getOperand(0))) 2855 return false; 2856 2857 InsertElementInst *IE = FirstInsertElem; 2858 while (true) { 2859 BuildVector.push_back(IE); 2860 BuildVectorOpds.push_back(IE->getOperand(1)); 2861 2862 if (IE->use_empty()) 2863 return false; 2864 2865 InsertElementInst *NextUse = dyn_cast<InsertElementInst>(IE->user_back()); 2866 if (!NextUse) 2867 return true; 2868 2869 // If this isn't the final use, make sure the next insertelement is the only 2870 // use. It's OK if the final constructed vector is used multiple times 2871 if (!IE->hasOneUse()) 2872 return false; 2873 2874 IE = NextUse; 2875 } 2876 2877 return false; 2878 } 2879 2880 static bool PhiTypeSorterFunc(Value *V, Value *V2) { 2881 return V->getType() < V2->getType(); 2882 } 2883 2884 bool SLPVectorizer::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) { 2885 bool Changed = false; 2886 SmallVector<Value *, 4> Incoming; 2887 SmallSet<Value *, 16> VisitedInstrs; 2888 2889 bool HaveVectorizedPhiNodes = true; 2890 while (HaveVectorizedPhiNodes) { 2891 HaveVectorizedPhiNodes = false; 2892 2893 // Collect the incoming values from the PHIs. 2894 Incoming.clear(); 2895 for (BasicBlock::iterator instr = BB->begin(), ie = BB->end(); instr != ie; 2896 ++instr) { 2897 PHINode *P = dyn_cast<PHINode>(instr); 2898 if (!P) 2899 break; 2900 2901 if (!VisitedInstrs.count(P)) 2902 Incoming.push_back(P); 2903 } 2904 2905 // Sort by type. 2906 std::stable_sort(Incoming.begin(), Incoming.end(), PhiTypeSorterFunc); 2907 2908 // Try to vectorize elements base on their type. 2909 for (SmallVector<Value *, 4>::iterator IncIt = Incoming.begin(), 2910 E = Incoming.end(); 2911 IncIt != E;) { 2912 2913 // Look for the next elements with the same type. 2914 SmallVector<Value *, 4>::iterator SameTypeIt = IncIt; 2915 while (SameTypeIt != E && 2916 (*SameTypeIt)->getType() == (*IncIt)->getType()) { 2917 VisitedInstrs.insert(*SameTypeIt); 2918 ++SameTypeIt; 2919 } 2920 2921 // Try to vectorize them. 2922 unsigned NumElts = (SameTypeIt - IncIt); 2923 DEBUG(errs() << "SLP: Trying to vectorize starting at PHIs (" << NumElts << ")\n"); 2924 if (NumElts > 1 && 2925 tryToVectorizeList(ArrayRef<Value *>(IncIt, NumElts), R)) { 2926 // Success start over because instructions might have been changed. 2927 HaveVectorizedPhiNodes = true; 2928 Changed = true; 2929 break; 2930 } 2931 2932 // Start over at the next instruction of a different type (or the end). 2933 IncIt = SameTypeIt; 2934 } 2935 } 2936 2937 VisitedInstrs.clear(); 2938 2939 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; it++) { 2940 // We may go through BB multiple times so skip the one we have checked. 2941 if (!VisitedInstrs.insert(it)) 2942 continue; 2943 2944 if (isa<DbgInfoIntrinsic>(it)) 2945 continue; 2946 2947 // Try to vectorize reductions that use PHINodes. 2948 if (PHINode *P = dyn_cast<PHINode>(it)) { 2949 // Check that the PHI is a reduction PHI. 2950 if (P->getNumIncomingValues() != 2) 2951 return Changed; 2952 Value *Rdx = 2953 (P->getIncomingBlock(0) == BB 2954 ? (P->getIncomingValue(0)) 2955 : (P->getIncomingBlock(1) == BB ? P->getIncomingValue(1) 2956 : nullptr)); 2957 // Check if this is a Binary Operator. 2958 BinaryOperator *BI = dyn_cast_or_null<BinaryOperator>(Rdx); 2959 if (!BI) 2960 continue; 2961 2962 // Try to match and vectorize a horizontal reduction. 2963 HorizontalReduction HorRdx; 2964 if (ShouldVectorizeHor && 2965 HorRdx.matchAssociativeReduction(P, BI, DL) && 2966 HorRdx.tryToReduce(R, TTI)) { 2967 Changed = true; 2968 it = BB->begin(); 2969 e = BB->end(); 2970 continue; 2971 } 2972 2973 Value *Inst = BI->getOperand(0); 2974 if (Inst == P) 2975 Inst = BI->getOperand(1); 2976 2977 if (tryToVectorize(dyn_cast<BinaryOperator>(Inst), R)) { 2978 // We would like to start over since some instructions are deleted 2979 // and the iterator may become invalid value. 2980 Changed = true; 2981 it = BB->begin(); 2982 e = BB->end(); 2983 continue; 2984 } 2985 2986 continue; 2987 } 2988 2989 // Try to vectorize horizontal reductions feeding into a store. 2990 if (ShouldStartVectorizeHorAtStore) 2991 if (StoreInst *SI = dyn_cast<StoreInst>(it)) 2992 if (BinaryOperator *BinOp = 2993 dyn_cast<BinaryOperator>(SI->getValueOperand())) { 2994 HorizontalReduction HorRdx; 2995 if (((HorRdx.matchAssociativeReduction(nullptr, BinOp, DL) && 2996 HorRdx.tryToReduce(R, TTI)) || 2997 tryToVectorize(BinOp, R))) { 2998 Changed = true; 2999 it = BB->begin(); 3000 e = BB->end(); 3001 continue; 3002 } 3003 } 3004 3005 // Try to vectorize trees that start at compare instructions. 3006 if (CmpInst *CI = dyn_cast<CmpInst>(it)) { 3007 if (tryToVectorizePair(CI->getOperand(0), CI->getOperand(1), R)) { 3008 Changed = true; 3009 // We would like to start over since some instructions are deleted 3010 // and the iterator may become invalid value. 3011 it = BB->begin(); 3012 e = BB->end(); 3013 continue; 3014 } 3015 3016 for (int i = 0; i < 2; ++i) { 3017 if (BinaryOperator *BI = dyn_cast<BinaryOperator>(CI->getOperand(i))) { 3018 if (tryToVectorizePair(BI->getOperand(0), BI->getOperand(1), R)) { 3019 Changed = true; 3020 // We would like to start over since some instructions are deleted 3021 // and the iterator may become invalid value. 3022 it = BB->begin(); 3023 e = BB->end(); 3024 } 3025 } 3026 } 3027 continue; 3028 } 3029 3030 // Try to vectorize trees that start at insertelement instructions. 3031 if (InsertElementInst *FirstInsertElem = dyn_cast<InsertElementInst>(it)) { 3032 SmallVector<Value *, 16> BuildVector; 3033 SmallVector<Value *, 16> BuildVectorOpds; 3034 if (!findBuildVector(FirstInsertElem, BuildVector, BuildVectorOpds)) 3035 continue; 3036 3037 // Vectorize starting with the build vector operands ignoring the 3038 // BuildVector instructions for the purpose of scheduling and user 3039 // extraction. 3040 if (tryToVectorizeList(BuildVectorOpds, R, BuildVector)) { 3041 Changed = true; 3042 it = BB->begin(); 3043 e = BB->end(); 3044 } 3045 3046 continue; 3047 } 3048 } 3049 3050 return Changed; 3051 } 3052 3053 bool SLPVectorizer::vectorizeStoreChains(BoUpSLP &R) { 3054 bool Changed = false; 3055 // Attempt to sort and vectorize each of the store-groups. 3056 for (StoreListMap::iterator it = StoreRefs.begin(), e = StoreRefs.end(); 3057 it != e; ++it) { 3058 if (it->second.size() < 2) 3059 continue; 3060 3061 DEBUG(dbgs() << "SLP: Analyzing a store chain of length " 3062 << it->second.size() << ".\n"); 3063 3064 // Process the stores in chunks of 16. 3065 for (unsigned CI = 0, CE = it->second.size(); CI < CE; CI+=16) { 3066 unsigned Len = std::min<unsigned>(CE - CI, 16); 3067 ArrayRef<StoreInst *> Chunk(&it->second[CI], Len); 3068 Changed |= vectorizeStores(Chunk, -SLPCostThreshold, R); 3069 } 3070 } 3071 return Changed; 3072 } 3073 3074 } // end anonymous namespace 3075 3076 char SLPVectorizer::ID = 0; 3077 static const char lv_name[] = "SLP Vectorizer"; 3078 INITIALIZE_PASS_BEGIN(SLPVectorizer, SV_NAME, lv_name, false, false) 3079 INITIALIZE_AG_DEPENDENCY(AliasAnalysis) 3080 INITIALIZE_AG_DEPENDENCY(TargetTransformInfo) 3081 INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) 3082 INITIALIZE_PASS_DEPENDENCY(LoopSimplify) 3083 INITIALIZE_PASS_END(SLPVectorizer, SV_NAME, lv_name, false, false) 3084 3085 namespace llvm { 3086 Pass *createSLPVectorizerPass() { return new SLPVectorizer(); } 3087 } 3088