1 //===- LoopAccessAnalysis.cpp - Loop Access Analysis Implementation --------==// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // The implementation for the loop memory dependence that was originally 11 // developed for the loop vectorizer. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Analysis/LoopAccessAnalysis.h" 16 #include "llvm/Analysis/LoopInfo.h" 17 #include "llvm/Analysis/LoopPassManager.h" 18 #include "llvm/Analysis/ScalarEvolutionExpander.h" 19 #include "llvm/Analysis/TargetLibraryInfo.h" 20 #include "llvm/Analysis/ValueTracking.h" 21 #include "llvm/Analysis/VectorUtils.h" 22 #include "llvm/IR/DiagnosticInfo.h" 23 #include "llvm/IR/Dominators.h" 24 #include "llvm/IR/IRBuilder.h" 25 #include "llvm/IR/PassManager.h" 26 #include "llvm/Support/Debug.h" 27 #include "llvm/Support/raw_ostream.h" 28 using namespace llvm; 29 30 #define DEBUG_TYPE "loop-accesses" 31 32 static cl::opt<unsigned, true> 33 VectorizationFactor("force-vector-width", cl::Hidden, 34 cl::desc("Sets the SIMD width. Zero is autoselect."), 35 cl::location(VectorizerParams::VectorizationFactor)); 36 unsigned VectorizerParams::VectorizationFactor; 37 38 static cl::opt<unsigned, true> 39 VectorizationInterleave("force-vector-interleave", cl::Hidden, 40 cl::desc("Sets the vectorization interleave count. " 41 "Zero is autoselect."), 42 cl::location( 43 VectorizerParams::VectorizationInterleave)); 44 unsigned VectorizerParams::VectorizationInterleave; 45 46 static cl::opt<unsigned, true> RuntimeMemoryCheckThreshold( 47 "runtime-memory-check-threshold", cl::Hidden, 48 cl::desc("When performing memory disambiguation checks at runtime do not " 49 "generate more than this number of comparisons (default = 8)."), 50 cl::location(VectorizerParams::RuntimeMemoryCheckThreshold), cl::init(8)); 51 unsigned VectorizerParams::RuntimeMemoryCheckThreshold; 52 53 /// \brief The maximum iterations used to merge memory checks 54 static cl::opt<unsigned> MemoryCheckMergeThreshold( 55 "memory-check-merge-threshold", cl::Hidden, 56 cl::desc("Maximum number of comparisons done when trying to merge " 57 "runtime memory checks. (default = 100)"), 58 cl::init(100)); 59 60 /// Maximum SIMD width. 61 const unsigned VectorizerParams::MaxVectorWidth = 64; 62 63 /// \brief We collect dependences up to this threshold. 64 static cl::opt<unsigned> 65 MaxDependences("max-dependences", cl::Hidden, 66 cl::desc("Maximum number of dependences collected by " 67 "loop-access analysis (default = 100)"), 68 cl::init(100)); 69 70 /// This enables versioning on the strides of symbolically striding memory 71 /// accesses in code like the following. 72 /// for (i = 0; i < N; ++i) 73 /// A[i * Stride1] += B[i * Stride2] ... 74 /// 75 /// Will be roughly translated to 76 /// if (Stride1 == 1 && Stride2 == 1) { 77 /// for (i = 0; i < N; i+=4) 78 /// A[i:i+3] += ... 79 /// } else 80 /// ... 81 static cl::opt<bool> EnableMemAccessVersioning( 82 "enable-mem-access-versioning", cl::init(true), cl::Hidden, 83 cl::desc("Enable symbolic stride memory access versioning")); 84 85 /// \brief Enable store-to-load forwarding conflict detection. This option can 86 /// be disabled for correctness testing. 87 static cl::opt<bool> EnableForwardingConflictDetection( 88 "store-to-load-forwarding-conflict-detection", cl::Hidden, 89 cl::desc("Enable conflict detection in loop-access analysis"), 90 cl::init(true)); 91 92 bool VectorizerParams::isInterleaveForced() { 93 return ::VectorizationInterleave.getNumOccurrences() > 0; 94 } 95 96 void LoopAccessReport::emitAnalysis(const LoopAccessReport &Message, 97 const Function *TheFunction, 98 const Loop *TheLoop, 99 const char *PassName) { 100 DebugLoc DL = TheLoop->getStartLoc(); 101 if (const Instruction *I = Message.getInstr()) 102 DL = I->getDebugLoc(); 103 emitOptimizationRemarkAnalysis(TheFunction->getContext(), PassName, 104 *TheFunction, DL, Message.str()); 105 } 106 107 Value *llvm::stripIntegerCast(Value *V) { 108 if (auto *CI = dyn_cast<CastInst>(V)) 109 if (CI->getOperand(0)->getType()->isIntegerTy()) 110 return CI->getOperand(0); 111 return V; 112 } 113 114 const SCEV *llvm::replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE, 115 const ValueToValueMap &PtrToStride, 116 Value *Ptr, Value *OrigPtr) { 117 const SCEV *OrigSCEV = PSE.getSCEV(Ptr); 118 119 // If there is an entry in the map return the SCEV of the pointer with the 120 // symbolic stride replaced by one. 121 ValueToValueMap::const_iterator SI = 122 PtrToStride.find(OrigPtr ? OrigPtr : Ptr); 123 if (SI != PtrToStride.end()) { 124 Value *StrideVal = SI->second; 125 126 // Strip casts. 127 StrideVal = stripIntegerCast(StrideVal); 128 129 // Replace symbolic stride by one. 130 Value *One = ConstantInt::get(StrideVal->getType(), 1); 131 ValueToValueMap RewriteMap; 132 RewriteMap[StrideVal] = One; 133 134 ScalarEvolution *SE = PSE.getSE(); 135 const auto *U = cast<SCEVUnknown>(SE->getSCEV(StrideVal)); 136 const auto *CT = 137 static_cast<const SCEVConstant *>(SE->getOne(StrideVal->getType())); 138 139 PSE.addPredicate(*SE->getEqualPredicate(U, CT)); 140 auto *Expr = PSE.getSCEV(Ptr); 141 142 DEBUG(dbgs() << "LAA: Replacing SCEV: " << *OrigSCEV << " by: " << *Expr 143 << "\n"); 144 return Expr; 145 } 146 147 // Otherwise, just return the SCEV of the original pointer. 148 return OrigSCEV; 149 } 150 151 void RuntimePointerChecking::insert(Loop *Lp, Value *Ptr, bool WritePtr, 152 unsigned DepSetId, unsigned ASId, 153 const ValueToValueMap &Strides, 154 PredicatedScalarEvolution &PSE) { 155 // Get the stride replaced scev. 156 const SCEV *Sc = replaceSymbolicStrideSCEV(PSE, Strides, Ptr); 157 ScalarEvolution *SE = PSE.getSE(); 158 159 const SCEV *ScStart; 160 const SCEV *ScEnd; 161 162 if (SE->isLoopInvariant(Sc, Lp)) 163 ScStart = ScEnd = Sc; 164 else { 165 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Sc); 166 assert(AR && "Invalid addrec expression"); 167 const SCEV *Ex = PSE.getBackedgeTakenCount(); 168 169 ScStart = AR->getStart(); 170 ScEnd = AR->evaluateAtIteration(Ex, *SE); 171 const SCEV *Step = AR->getStepRecurrence(*SE); 172 173 // For expressions with negative step, the upper bound is ScStart and the 174 // lower bound is ScEnd. 175 if (const auto *CStep = dyn_cast<SCEVConstant>(Step)) { 176 if (CStep->getValue()->isNegative()) 177 std::swap(ScStart, ScEnd); 178 } else { 179 // Fallback case: the step is not constant, but the we can still 180 // get the upper and lower bounds of the interval by using min/max 181 // expressions. 182 ScStart = SE->getUMinExpr(ScStart, ScEnd); 183 ScEnd = SE->getUMaxExpr(AR->getStart(), ScEnd); 184 } 185 } 186 187 Pointers.emplace_back(Ptr, ScStart, ScEnd, WritePtr, DepSetId, ASId, Sc); 188 } 189 190 SmallVector<RuntimePointerChecking::PointerCheck, 4> 191 RuntimePointerChecking::generateChecks() const { 192 SmallVector<PointerCheck, 4> Checks; 193 194 for (unsigned I = 0; I < CheckingGroups.size(); ++I) { 195 for (unsigned J = I + 1; J < CheckingGroups.size(); ++J) { 196 const RuntimePointerChecking::CheckingPtrGroup &CGI = CheckingGroups[I]; 197 const RuntimePointerChecking::CheckingPtrGroup &CGJ = CheckingGroups[J]; 198 199 if (needsChecking(CGI, CGJ)) 200 Checks.push_back(std::make_pair(&CGI, &CGJ)); 201 } 202 } 203 return Checks; 204 } 205 206 void RuntimePointerChecking::generateChecks( 207 MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) { 208 assert(Checks.empty() && "Checks is not empty"); 209 groupChecks(DepCands, UseDependencies); 210 Checks = generateChecks(); 211 } 212 213 bool RuntimePointerChecking::needsChecking(const CheckingPtrGroup &M, 214 const CheckingPtrGroup &N) const { 215 for (unsigned I = 0, EI = M.Members.size(); EI != I; ++I) 216 for (unsigned J = 0, EJ = N.Members.size(); EJ != J; ++J) 217 if (needsChecking(M.Members[I], N.Members[J])) 218 return true; 219 return false; 220 } 221 222 /// Compare \p I and \p J and return the minimum. 223 /// Return nullptr in case we couldn't find an answer. 224 static const SCEV *getMinFromExprs(const SCEV *I, const SCEV *J, 225 ScalarEvolution *SE) { 226 const SCEV *Diff = SE->getMinusSCEV(J, I); 227 const SCEVConstant *C = dyn_cast<const SCEVConstant>(Diff); 228 229 if (!C) 230 return nullptr; 231 if (C->getValue()->isNegative()) 232 return J; 233 return I; 234 } 235 236 bool RuntimePointerChecking::CheckingPtrGroup::addPointer(unsigned Index) { 237 const SCEV *Start = RtCheck.Pointers[Index].Start; 238 const SCEV *End = RtCheck.Pointers[Index].End; 239 240 // Compare the starts and ends with the known minimum and maximum 241 // of this set. We need to know how we compare against the min/max 242 // of the set in order to be able to emit memchecks. 243 const SCEV *Min0 = getMinFromExprs(Start, Low, RtCheck.SE); 244 if (!Min0) 245 return false; 246 247 const SCEV *Min1 = getMinFromExprs(End, High, RtCheck.SE); 248 if (!Min1) 249 return false; 250 251 // Update the low bound expression if we've found a new min value. 252 if (Min0 == Start) 253 Low = Start; 254 255 // Update the high bound expression if we've found a new max value. 256 if (Min1 != End) 257 High = End; 258 259 Members.push_back(Index); 260 return true; 261 } 262 263 void RuntimePointerChecking::groupChecks( 264 MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) { 265 // We build the groups from dependency candidates equivalence classes 266 // because: 267 // - We know that pointers in the same equivalence class share 268 // the same underlying object and therefore there is a chance 269 // that we can compare pointers 270 // - We wouldn't be able to merge two pointers for which we need 271 // to emit a memcheck. The classes in DepCands are already 272 // conveniently built such that no two pointers in the same 273 // class need checking against each other. 274 275 // We use the following (greedy) algorithm to construct the groups 276 // For every pointer in the equivalence class: 277 // For each existing group: 278 // - if the difference between this pointer and the min/max bounds 279 // of the group is a constant, then make the pointer part of the 280 // group and update the min/max bounds of that group as required. 281 282 CheckingGroups.clear(); 283 284 // If we need to check two pointers to the same underlying object 285 // with a non-constant difference, we shouldn't perform any pointer 286 // grouping with those pointers. This is because we can easily get 287 // into cases where the resulting check would return false, even when 288 // the accesses are safe. 289 // 290 // The following example shows this: 291 // for (i = 0; i < 1000; ++i) 292 // a[5000 + i * m] = a[i] + a[i + 9000] 293 // 294 // Here grouping gives a check of (5000, 5000 + 1000 * m) against 295 // (0, 10000) which is always false. However, if m is 1, there is no 296 // dependence. Not grouping the checks for a[i] and a[i + 9000] allows 297 // us to perform an accurate check in this case. 298 // 299 // The above case requires that we have an UnknownDependence between 300 // accesses to the same underlying object. This cannot happen unless 301 // ShouldRetryWithRuntimeCheck is set, and therefore UseDependencies 302 // is also false. In this case we will use the fallback path and create 303 // separate checking groups for all pointers. 304 305 // If we don't have the dependency partitions, construct a new 306 // checking pointer group for each pointer. This is also required 307 // for correctness, because in this case we can have checking between 308 // pointers to the same underlying object. 309 if (!UseDependencies) { 310 for (unsigned I = 0; I < Pointers.size(); ++I) 311 CheckingGroups.push_back(CheckingPtrGroup(I, *this)); 312 return; 313 } 314 315 unsigned TotalComparisons = 0; 316 317 DenseMap<Value *, unsigned> PositionMap; 318 for (unsigned Index = 0; Index < Pointers.size(); ++Index) 319 PositionMap[Pointers[Index].PointerValue] = Index; 320 321 // We need to keep track of what pointers we've already seen so we 322 // don't process them twice. 323 SmallSet<unsigned, 2> Seen; 324 325 // Go through all equivalence classes, get the "pointer check groups" 326 // and add them to the overall solution. We use the order in which accesses 327 // appear in 'Pointers' to enforce determinism. 328 for (unsigned I = 0; I < Pointers.size(); ++I) { 329 // We've seen this pointer before, and therefore already processed 330 // its equivalence class. 331 if (Seen.count(I)) 332 continue; 333 334 MemoryDepChecker::MemAccessInfo Access(Pointers[I].PointerValue, 335 Pointers[I].IsWritePtr); 336 337 SmallVector<CheckingPtrGroup, 2> Groups; 338 auto LeaderI = DepCands.findValue(DepCands.getLeaderValue(Access)); 339 340 // Because DepCands is constructed by visiting accesses in the order in 341 // which they appear in alias sets (which is deterministic) and the 342 // iteration order within an equivalence class member is only dependent on 343 // the order in which unions and insertions are performed on the 344 // equivalence class, the iteration order is deterministic. 345 for (auto MI = DepCands.member_begin(LeaderI), ME = DepCands.member_end(); 346 MI != ME; ++MI) { 347 unsigned Pointer = PositionMap[MI->getPointer()]; 348 bool Merged = false; 349 // Mark this pointer as seen. 350 Seen.insert(Pointer); 351 352 // Go through all the existing sets and see if we can find one 353 // which can include this pointer. 354 for (CheckingPtrGroup &Group : Groups) { 355 // Don't perform more than a certain amount of comparisons. 356 // This should limit the cost of grouping the pointers to something 357 // reasonable. If we do end up hitting this threshold, the algorithm 358 // will create separate groups for all remaining pointers. 359 if (TotalComparisons > MemoryCheckMergeThreshold) 360 break; 361 362 TotalComparisons++; 363 364 if (Group.addPointer(Pointer)) { 365 Merged = true; 366 break; 367 } 368 } 369 370 if (!Merged) 371 // We couldn't add this pointer to any existing set or the threshold 372 // for the number of comparisons has been reached. Create a new group 373 // to hold the current pointer. 374 Groups.push_back(CheckingPtrGroup(Pointer, *this)); 375 } 376 377 // We've computed the grouped checks for this partition. 378 // Save the results and continue with the next one. 379 std::copy(Groups.begin(), Groups.end(), std::back_inserter(CheckingGroups)); 380 } 381 } 382 383 bool RuntimePointerChecking::arePointersInSamePartition( 384 const SmallVectorImpl<int> &PtrToPartition, unsigned PtrIdx1, 385 unsigned PtrIdx2) { 386 return (PtrToPartition[PtrIdx1] != -1 && 387 PtrToPartition[PtrIdx1] == PtrToPartition[PtrIdx2]); 388 } 389 390 bool RuntimePointerChecking::needsChecking(unsigned I, unsigned J) const { 391 const PointerInfo &PointerI = Pointers[I]; 392 const PointerInfo &PointerJ = Pointers[J]; 393 394 // No need to check if two readonly pointers intersect. 395 if (!PointerI.IsWritePtr && !PointerJ.IsWritePtr) 396 return false; 397 398 // Only need to check pointers between two different dependency sets. 399 if (PointerI.DependencySetId == PointerJ.DependencySetId) 400 return false; 401 402 // Only need to check pointers in the same alias set. 403 if (PointerI.AliasSetId != PointerJ.AliasSetId) 404 return false; 405 406 return true; 407 } 408 409 void RuntimePointerChecking::printChecks( 410 raw_ostream &OS, const SmallVectorImpl<PointerCheck> &Checks, 411 unsigned Depth) const { 412 unsigned N = 0; 413 for (const auto &Check : Checks) { 414 const auto &First = Check.first->Members, &Second = Check.second->Members; 415 416 OS.indent(Depth) << "Check " << N++ << ":\n"; 417 418 OS.indent(Depth + 2) << "Comparing group (" << Check.first << "):\n"; 419 for (unsigned K = 0; K < First.size(); ++K) 420 OS.indent(Depth + 2) << *Pointers[First[K]].PointerValue << "\n"; 421 422 OS.indent(Depth + 2) << "Against group (" << Check.second << "):\n"; 423 for (unsigned K = 0; K < Second.size(); ++K) 424 OS.indent(Depth + 2) << *Pointers[Second[K]].PointerValue << "\n"; 425 } 426 } 427 428 void RuntimePointerChecking::print(raw_ostream &OS, unsigned Depth) const { 429 430 OS.indent(Depth) << "Run-time memory checks:\n"; 431 printChecks(OS, Checks, Depth); 432 433 OS.indent(Depth) << "Grouped accesses:\n"; 434 for (unsigned I = 0; I < CheckingGroups.size(); ++I) { 435 const auto &CG = CheckingGroups[I]; 436 437 OS.indent(Depth + 2) << "Group " << &CG << ":\n"; 438 OS.indent(Depth + 4) << "(Low: " << *CG.Low << " High: " << *CG.High 439 << ")\n"; 440 for (unsigned J = 0; J < CG.Members.size(); ++J) { 441 OS.indent(Depth + 6) << "Member: " << *Pointers[CG.Members[J]].Expr 442 << "\n"; 443 } 444 } 445 } 446 447 namespace { 448 /// \brief Analyses memory accesses in a loop. 449 /// 450 /// Checks whether run time pointer checks are needed and builds sets for data 451 /// dependence checking. 452 class AccessAnalysis { 453 public: 454 /// \brief Read or write access location. 455 typedef PointerIntPair<Value *, 1, bool> MemAccessInfo; 456 typedef SmallPtrSet<MemAccessInfo, 8> MemAccessInfoSet; 457 458 AccessAnalysis(const DataLayout &Dl, AliasAnalysis *AA, LoopInfo *LI, 459 MemoryDepChecker::DepCandidates &DA, 460 PredicatedScalarEvolution &PSE) 461 : DL(Dl), AST(*AA), LI(LI), DepCands(DA), IsRTCheckAnalysisNeeded(false), 462 PSE(PSE) {} 463 464 /// \brief Register a load and whether it is only read from. 465 void addLoad(MemoryLocation &Loc, bool IsReadOnly) { 466 Value *Ptr = const_cast<Value*>(Loc.Ptr); 467 AST.add(Ptr, MemoryLocation::UnknownSize, Loc.AATags); 468 Accesses.insert(MemAccessInfo(Ptr, false)); 469 if (IsReadOnly) 470 ReadOnlyPtr.insert(Ptr); 471 } 472 473 /// \brief Register a store. 474 void addStore(MemoryLocation &Loc) { 475 Value *Ptr = const_cast<Value*>(Loc.Ptr); 476 AST.add(Ptr, MemoryLocation::UnknownSize, Loc.AATags); 477 Accesses.insert(MemAccessInfo(Ptr, true)); 478 } 479 480 /// \brief Check whether we can check the pointers at runtime for 481 /// non-intersection. 482 /// 483 /// Returns true if we need no check or if we do and we can generate them 484 /// (i.e. the pointers have computable bounds). 485 bool canCheckPtrAtRT(RuntimePointerChecking &RtCheck, ScalarEvolution *SE, 486 Loop *TheLoop, const ValueToValueMap &Strides, 487 bool ShouldCheckWrap = false); 488 489 /// \brief Goes over all memory accesses, checks whether a RT check is needed 490 /// and builds sets of dependent accesses. 491 void buildDependenceSets() { 492 processMemAccesses(); 493 } 494 495 /// \brief Initial processing of memory accesses determined that we need to 496 /// perform dependency checking. 497 /// 498 /// Note that this can later be cleared if we retry memcheck analysis without 499 /// dependency checking (i.e. ShouldRetryWithRuntimeCheck). 500 bool isDependencyCheckNeeded() { return !CheckDeps.empty(); } 501 502 /// We decided that no dependence analysis would be used. Reset the state. 503 void resetDepChecks(MemoryDepChecker &DepChecker) { 504 CheckDeps.clear(); 505 DepChecker.clearDependences(); 506 } 507 508 MemAccessInfoSet &getDependenciesToCheck() { return CheckDeps; } 509 510 private: 511 typedef SetVector<MemAccessInfo> PtrAccessSet; 512 513 /// \brief Go over all memory access and check whether runtime pointer checks 514 /// are needed and build sets of dependency check candidates. 515 void processMemAccesses(); 516 517 /// Set of all accesses. 518 PtrAccessSet Accesses; 519 520 const DataLayout &DL; 521 522 /// Set of accesses that need a further dependence check. 523 MemAccessInfoSet CheckDeps; 524 525 /// Set of pointers that are read only. 526 SmallPtrSet<Value*, 16> ReadOnlyPtr; 527 528 /// An alias set tracker to partition the access set by underlying object and 529 //intrinsic property (such as TBAA metadata). 530 AliasSetTracker AST; 531 532 LoopInfo *LI; 533 534 /// Sets of potentially dependent accesses - members of one set share an 535 /// underlying pointer. The set "CheckDeps" identfies which sets really need a 536 /// dependence check. 537 MemoryDepChecker::DepCandidates &DepCands; 538 539 /// \brief Initial processing of memory accesses determined that we may need 540 /// to add memchecks. Perform the analysis to determine the necessary checks. 541 /// 542 /// Note that, this is different from isDependencyCheckNeeded. When we retry 543 /// memcheck analysis without dependency checking 544 /// (i.e. ShouldRetryWithRuntimeCheck), isDependencyCheckNeeded is cleared 545 /// while this remains set if we have potentially dependent accesses. 546 bool IsRTCheckAnalysisNeeded; 547 548 /// The SCEV predicate containing all the SCEV-related assumptions. 549 PredicatedScalarEvolution &PSE; 550 }; 551 552 } // end anonymous namespace 553 554 /// \brief Check whether a pointer can participate in a runtime bounds check. 555 static bool hasComputableBounds(PredicatedScalarEvolution &PSE, 556 const ValueToValueMap &Strides, Value *Ptr, 557 Loop *L) { 558 const SCEV *PtrScev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr); 559 560 // The bounds for loop-invariant pointer is trivial. 561 if (PSE.getSE()->isLoopInvariant(PtrScev, L)) 562 return true; 563 564 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev); 565 if (!AR) 566 return false; 567 568 return AR->isAffine(); 569 } 570 571 /// \brief Check whether a pointer address cannot wrap. 572 static bool isNoWrap(PredicatedScalarEvolution &PSE, 573 const ValueToValueMap &Strides, Value *Ptr, Loop *L) { 574 const SCEV *PtrScev = PSE.getSCEV(Ptr); 575 if (PSE.getSE()->isLoopInvariant(PtrScev, L)) 576 return true; 577 578 int64_t Stride = getPtrStride(PSE, Ptr, L, Strides); 579 return Stride == 1; 580 } 581 582 bool AccessAnalysis::canCheckPtrAtRT(RuntimePointerChecking &RtCheck, 583 ScalarEvolution *SE, Loop *TheLoop, 584 const ValueToValueMap &StridesMap, 585 bool ShouldCheckWrap) { 586 // Find pointers with computable bounds. We are going to use this information 587 // to place a runtime bound check. 588 bool CanDoRT = true; 589 590 bool NeedRTCheck = false; 591 if (!IsRTCheckAnalysisNeeded) return true; 592 593 bool IsDepCheckNeeded = isDependencyCheckNeeded(); 594 595 // We assign a consecutive id to access from different alias sets. 596 // Accesses between different groups doesn't need to be checked. 597 unsigned ASId = 1; 598 for (auto &AS : AST) { 599 int NumReadPtrChecks = 0; 600 int NumWritePtrChecks = 0; 601 602 // We assign consecutive id to access from different dependence sets. 603 // Accesses within the same set don't need a runtime check. 604 unsigned RunningDepId = 1; 605 DenseMap<Value *, unsigned> DepSetId; 606 607 for (auto A : AS) { 608 Value *Ptr = A.getValue(); 609 bool IsWrite = Accesses.count(MemAccessInfo(Ptr, true)); 610 MemAccessInfo Access(Ptr, IsWrite); 611 612 if (IsWrite) 613 ++NumWritePtrChecks; 614 else 615 ++NumReadPtrChecks; 616 617 if (hasComputableBounds(PSE, StridesMap, Ptr, TheLoop) && 618 // When we run after a failing dependency check we have to make sure 619 // we don't have wrapping pointers. 620 (!ShouldCheckWrap || isNoWrap(PSE, StridesMap, Ptr, TheLoop))) { 621 // The id of the dependence set. 622 unsigned DepId; 623 624 if (IsDepCheckNeeded) { 625 Value *Leader = DepCands.getLeaderValue(Access).getPointer(); 626 unsigned &LeaderId = DepSetId[Leader]; 627 if (!LeaderId) 628 LeaderId = RunningDepId++; 629 DepId = LeaderId; 630 } else 631 // Each access has its own dependence set. 632 DepId = RunningDepId++; 633 634 RtCheck.insert(TheLoop, Ptr, IsWrite, DepId, ASId, StridesMap, PSE); 635 636 DEBUG(dbgs() << "LAA: Found a runtime check ptr:" << *Ptr << '\n'); 637 } else { 638 DEBUG(dbgs() << "LAA: Can't find bounds for ptr:" << *Ptr << '\n'); 639 CanDoRT = false; 640 } 641 } 642 643 // If we have at least two writes or one write and a read then we need to 644 // check them. But there is no need to checks if there is only one 645 // dependence set for this alias set. 646 // 647 // Note that this function computes CanDoRT and NeedRTCheck independently. 648 // For example CanDoRT=false, NeedRTCheck=false means that we have a pointer 649 // for which we couldn't find the bounds but we don't actually need to emit 650 // any checks so it does not matter. 651 if (!(IsDepCheckNeeded && CanDoRT && RunningDepId == 2)) 652 NeedRTCheck |= (NumWritePtrChecks >= 2 || (NumReadPtrChecks >= 1 && 653 NumWritePtrChecks >= 1)); 654 655 ++ASId; 656 } 657 658 // If the pointers that we would use for the bounds comparison have different 659 // address spaces, assume the values aren't directly comparable, so we can't 660 // use them for the runtime check. We also have to assume they could 661 // overlap. In the future there should be metadata for whether address spaces 662 // are disjoint. 663 unsigned NumPointers = RtCheck.Pointers.size(); 664 for (unsigned i = 0; i < NumPointers; ++i) { 665 for (unsigned j = i + 1; j < NumPointers; ++j) { 666 // Only need to check pointers between two different dependency sets. 667 if (RtCheck.Pointers[i].DependencySetId == 668 RtCheck.Pointers[j].DependencySetId) 669 continue; 670 // Only need to check pointers in the same alias set. 671 if (RtCheck.Pointers[i].AliasSetId != RtCheck.Pointers[j].AliasSetId) 672 continue; 673 674 Value *PtrI = RtCheck.Pointers[i].PointerValue; 675 Value *PtrJ = RtCheck.Pointers[j].PointerValue; 676 677 unsigned ASi = PtrI->getType()->getPointerAddressSpace(); 678 unsigned ASj = PtrJ->getType()->getPointerAddressSpace(); 679 if (ASi != ASj) { 680 DEBUG(dbgs() << "LAA: Runtime check would require comparison between" 681 " different address spaces\n"); 682 return false; 683 } 684 } 685 } 686 687 if (NeedRTCheck && CanDoRT) 688 RtCheck.generateChecks(DepCands, IsDepCheckNeeded); 689 690 DEBUG(dbgs() << "LAA: We need to do " << RtCheck.getNumberOfChecks() 691 << " pointer comparisons.\n"); 692 693 RtCheck.Need = NeedRTCheck; 694 695 bool CanDoRTIfNeeded = !NeedRTCheck || CanDoRT; 696 if (!CanDoRTIfNeeded) 697 RtCheck.reset(); 698 return CanDoRTIfNeeded; 699 } 700 701 void AccessAnalysis::processMemAccesses() { 702 // We process the set twice: first we process read-write pointers, last we 703 // process read-only pointers. This allows us to skip dependence tests for 704 // read-only pointers. 705 706 DEBUG(dbgs() << "LAA: Processing memory accesses...\n"); 707 DEBUG(dbgs() << " AST: "; AST.dump()); 708 DEBUG(dbgs() << "LAA: Accesses(" << Accesses.size() << "):\n"); 709 DEBUG({ 710 for (auto A : Accesses) 711 dbgs() << "\t" << *A.getPointer() << " (" << 712 (A.getInt() ? "write" : (ReadOnlyPtr.count(A.getPointer()) ? 713 "read-only" : "read")) << ")\n"; 714 }); 715 716 // The AliasSetTracker has nicely partitioned our pointers by metadata 717 // compatibility and potential for underlying-object overlap. As a result, we 718 // only need to check for potential pointer dependencies within each alias 719 // set. 720 for (auto &AS : AST) { 721 // Note that both the alias-set tracker and the alias sets themselves used 722 // linked lists internally and so the iteration order here is deterministic 723 // (matching the original instruction order within each set). 724 725 bool SetHasWrite = false; 726 727 // Map of pointers to last access encountered. 728 typedef DenseMap<Value*, MemAccessInfo> UnderlyingObjToAccessMap; 729 UnderlyingObjToAccessMap ObjToLastAccess; 730 731 // Set of access to check after all writes have been processed. 732 PtrAccessSet DeferredAccesses; 733 734 // Iterate over each alias set twice, once to process read/write pointers, 735 // and then to process read-only pointers. 736 for (int SetIteration = 0; SetIteration < 2; ++SetIteration) { 737 bool UseDeferred = SetIteration > 0; 738 PtrAccessSet &S = UseDeferred ? DeferredAccesses : Accesses; 739 740 for (auto AV : AS) { 741 Value *Ptr = AV.getValue(); 742 743 // For a single memory access in AliasSetTracker, Accesses may contain 744 // both read and write, and they both need to be handled for CheckDeps. 745 for (auto AC : S) { 746 if (AC.getPointer() != Ptr) 747 continue; 748 749 bool IsWrite = AC.getInt(); 750 751 // If we're using the deferred access set, then it contains only 752 // reads. 753 bool IsReadOnlyPtr = ReadOnlyPtr.count(Ptr) && !IsWrite; 754 if (UseDeferred && !IsReadOnlyPtr) 755 continue; 756 // Otherwise, the pointer must be in the PtrAccessSet, either as a 757 // read or a write. 758 assert(((IsReadOnlyPtr && UseDeferred) || IsWrite || 759 S.count(MemAccessInfo(Ptr, false))) && 760 "Alias-set pointer not in the access set?"); 761 762 MemAccessInfo Access(Ptr, IsWrite); 763 DepCands.insert(Access); 764 765 // Memorize read-only pointers for later processing and skip them in 766 // the first round (they need to be checked after we have seen all 767 // write pointers). Note: we also mark pointer that are not 768 // consecutive as "read-only" pointers (so that we check 769 // "a[b[i]] +="). Hence, we need the second check for "!IsWrite". 770 if (!UseDeferred && IsReadOnlyPtr) { 771 DeferredAccesses.insert(Access); 772 continue; 773 } 774 775 // If this is a write - check other reads and writes for conflicts. If 776 // this is a read only check other writes for conflicts (but only if 777 // there is no other write to the ptr - this is an optimization to 778 // catch "a[i] = a[i] + " without having to do a dependence check). 779 if ((IsWrite || IsReadOnlyPtr) && SetHasWrite) { 780 CheckDeps.insert(Access); 781 IsRTCheckAnalysisNeeded = true; 782 } 783 784 if (IsWrite) 785 SetHasWrite = true; 786 787 // Create sets of pointers connected by a shared alias set and 788 // underlying object. 789 typedef SmallVector<Value *, 16> ValueVector; 790 ValueVector TempObjects; 791 792 GetUnderlyingObjects(Ptr, TempObjects, DL, LI); 793 DEBUG(dbgs() << "Underlying objects for pointer " << *Ptr << "\n"); 794 for (Value *UnderlyingObj : TempObjects) { 795 // nullptr never alias, don't join sets for pointer that have "null" 796 // in their UnderlyingObjects list. 797 if (isa<ConstantPointerNull>(UnderlyingObj)) 798 continue; 799 800 UnderlyingObjToAccessMap::iterator Prev = 801 ObjToLastAccess.find(UnderlyingObj); 802 if (Prev != ObjToLastAccess.end()) 803 DepCands.unionSets(Access, Prev->second); 804 805 ObjToLastAccess[UnderlyingObj] = Access; 806 DEBUG(dbgs() << " " << *UnderlyingObj << "\n"); 807 } 808 } 809 } 810 } 811 } 812 } 813 814 static bool isInBoundsGep(Value *Ptr) { 815 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) 816 return GEP->isInBounds(); 817 return false; 818 } 819 820 /// \brief Return true if an AddRec pointer \p Ptr is unsigned non-wrapping, 821 /// i.e. monotonically increasing/decreasing. 822 static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR, 823 PredicatedScalarEvolution &PSE, const Loop *L) { 824 // FIXME: This should probably only return true for NUW. 825 if (AR->getNoWrapFlags(SCEV::NoWrapMask)) 826 return true; 827 828 // Scalar evolution does not propagate the non-wrapping flags to values that 829 // are derived from a non-wrapping induction variable because non-wrapping 830 // could be flow-sensitive. 831 // 832 // Look through the potentially overflowing instruction to try to prove 833 // non-wrapping for the *specific* value of Ptr. 834 835 // The arithmetic implied by an inbounds GEP can't overflow. 836 auto *GEP = dyn_cast<GetElementPtrInst>(Ptr); 837 if (!GEP || !GEP->isInBounds()) 838 return false; 839 840 // Make sure there is only one non-const index and analyze that. 841 Value *NonConstIndex = nullptr; 842 for (Value *Index : make_range(GEP->idx_begin(), GEP->idx_end())) 843 if (!isa<ConstantInt>(Index)) { 844 if (NonConstIndex) 845 return false; 846 NonConstIndex = Index; 847 } 848 if (!NonConstIndex) 849 // The recurrence is on the pointer, ignore for now. 850 return false; 851 852 // The index in GEP is signed. It is non-wrapping if it's derived from a NSW 853 // AddRec using a NSW operation. 854 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(NonConstIndex)) 855 if (OBO->hasNoSignedWrap() && 856 // Assume constant for other the operand so that the AddRec can be 857 // easily found. 858 isa<ConstantInt>(OBO->getOperand(1))) { 859 auto *OpScev = PSE.getSCEV(OBO->getOperand(0)); 860 861 if (auto *OpAR = dyn_cast<SCEVAddRecExpr>(OpScev)) 862 return OpAR->getLoop() == L && OpAR->getNoWrapFlags(SCEV::FlagNSW); 863 } 864 865 return false; 866 } 867 868 /// \brief Check whether the access through \p Ptr has a constant stride. 869 int64_t llvm::getPtrStride(PredicatedScalarEvolution &PSE, Value *Ptr, 870 const Loop *Lp, const ValueToValueMap &StridesMap, 871 bool Assume) { 872 Type *Ty = Ptr->getType(); 873 assert(Ty->isPointerTy() && "Unexpected non-ptr"); 874 875 // Make sure that the pointer does not point to aggregate types. 876 auto *PtrTy = cast<PointerType>(Ty); 877 if (PtrTy->getElementType()->isAggregateType()) { 878 DEBUG(dbgs() << "LAA: Bad stride - Not a pointer to a scalar type" << *Ptr 879 << "\n"); 880 return 0; 881 } 882 883 const SCEV *PtrScev = replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr); 884 885 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev); 886 if (Assume && !AR) 887 AR = PSE.getAsAddRec(Ptr); 888 889 if (!AR) { 890 DEBUG(dbgs() << "LAA: Bad stride - Not an AddRecExpr pointer " << *Ptr 891 << " SCEV: " << *PtrScev << "\n"); 892 return 0; 893 } 894 895 // The accesss function must stride over the innermost loop. 896 if (Lp != AR->getLoop()) { 897 DEBUG(dbgs() << "LAA: Bad stride - Not striding over innermost loop " << 898 *Ptr << " SCEV: " << *AR << "\n"); 899 return 0; 900 } 901 902 // The address calculation must not wrap. Otherwise, a dependence could be 903 // inverted. 904 // An inbounds getelementptr that is a AddRec with a unit stride 905 // cannot wrap per definition. The unit stride requirement is checked later. 906 // An getelementptr without an inbounds attribute and unit stride would have 907 // to access the pointer value "0" which is undefined behavior in address 908 // space 0, therefore we can also vectorize this case. 909 bool IsInBoundsGEP = isInBoundsGep(Ptr); 910 bool IsNoWrapAddRec = 911 PSE.hasNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW) || 912 isNoWrapAddRec(Ptr, AR, PSE, Lp); 913 bool IsInAddressSpaceZero = PtrTy->getAddressSpace() == 0; 914 if (!IsNoWrapAddRec && !IsInBoundsGEP && !IsInAddressSpaceZero) { 915 if (Assume) { 916 PSE.setNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW); 917 IsNoWrapAddRec = true; 918 DEBUG(dbgs() << "LAA: Pointer may wrap in the address space:\n" 919 << "LAA: Pointer: " << *Ptr << "\n" 920 << "LAA: SCEV: " << *AR << "\n" 921 << "LAA: Added an overflow assumption\n"); 922 } else { 923 DEBUG(dbgs() << "LAA: Bad stride - Pointer may wrap in the address space " 924 << *Ptr << " SCEV: " << *AR << "\n"); 925 return 0; 926 } 927 } 928 929 // Check the step is constant. 930 const SCEV *Step = AR->getStepRecurrence(*PSE.getSE()); 931 932 // Calculate the pointer stride and check if it is constant. 933 const SCEVConstant *C = dyn_cast<SCEVConstant>(Step); 934 if (!C) { 935 DEBUG(dbgs() << "LAA: Bad stride - Not a constant strided " << *Ptr << 936 " SCEV: " << *AR << "\n"); 937 return 0; 938 } 939 940 auto &DL = Lp->getHeader()->getModule()->getDataLayout(); 941 int64_t Size = DL.getTypeAllocSize(PtrTy->getElementType()); 942 const APInt &APStepVal = C->getAPInt(); 943 944 // Huge step value - give up. 945 if (APStepVal.getBitWidth() > 64) 946 return 0; 947 948 int64_t StepVal = APStepVal.getSExtValue(); 949 950 // Strided access. 951 int64_t Stride = StepVal / Size; 952 int64_t Rem = StepVal % Size; 953 if (Rem) 954 return 0; 955 956 // If the SCEV could wrap but we have an inbounds gep with a unit stride we 957 // know we can't "wrap around the address space". In case of address space 958 // zero we know that this won't happen without triggering undefined behavior. 959 if (!IsNoWrapAddRec && (IsInBoundsGEP || IsInAddressSpaceZero) && 960 Stride != 1 && Stride != -1) { 961 if (Assume) { 962 // We can avoid this case by adding a run-time check. 963 DEBUG(dbgs() << "LAA: Non unit strided pointer which is not either " 964 << "inbouds or in address space 0 may wrap:\n" 965 << "LAA: Pointer: " << *Ptr << "\n" 966 << "LAA: SCEV: " << *AR << "\n" 967 << "LAA: Added an overflow assumption\n"); 968 PSE.setNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW); 969 } else 970 return 0; 971 } 972 973 return Stride; 974 } 975 976 /// Take the pointer operand from the Load/Store instruction. 977 /// Returns NULL if this is not a valid Load/Store instruction. 978 static Value *getPointerOperand(Value *I) { 979 if (auto *LI = dyn_cast<LoadInst>(I)) 980 return LI->getPointerOperand(); 981 if (auto *SI = dyn_cast<StoreInst>(I)) 982 return SI->getPointerOperand(); 983 return nullptr; 984 } 985 986 /// Take the address space operand from the Load/Store instruction. 987 /// Returns -1 if this is not a valid Load/Store instruction. 988 static unsigned getAddressSpaceOperand(Value *I) { 989 if (LoadInst *L = dyn_cast<LoadInst>(I)) 990 return L->getPointerAddressSpace(); 991 if (StoreInst *S = dyn_cast<StoreInst>(I)) 992 return S->getPointerAddressSpace(); 993 return -1; 994 } 995 996 /// Returns true if the memory operations \p A and \p B are consecutive. 997 bool llvm::isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL, 998 ScalarEvolution &SE, bool CheckType) { 999 Value *PtrA = getPointerOperand(A); 1000 Value *PtrB = getPointerOperand(B); 1001 unsigned ASA = getAddressSpaceOperand(A); 1002 unsigned ASB = getAddressSpaceOperand(B); 1003 1004 // Check that the address spaces match and that the pointers are valid. 1005 if (!PtrA || !PtrB || (ASA != ASB)) 1006 return false; 1007 1008 // Make sure that A and B are different pointers. 1009 if (PtrA == PtrB) 1010 return false; 1011 1012 // Make sure that A and B have the same type if required. 1013 if(CheckType && PtrA->getType() != PtrB->getType()) 1014 return false; 1015 1016 unsigned PtrBitWidth = DL.getPointerSizeInBits(ASA); 1017 Type *Ty = cast<PointerType>(PtrA->getType())->getElementType(); 1018 APInt Size(PtrBitWidth, DL.getTypeStoreSize(Ty)); 1019 1020 APInt OffsetA(PtrBitWidth, 0), OffsetB(PtrBitWidth, 0); 1021 PtrA = PtrA->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetA); 1022 PtrB = PtrB->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetB); 1023 1024 // OffsetDelta = OffsetB - OffsetA; 1025 const SCEV *OffsetSCEVA = SE.getConstant(OffsetA); 1026 const SCEV *OffsetSCEVB = SE.getConstant(OffsetB); 1027 const SCEV *OffsetDeltaSCEV = SE.getMinusSCEV(OffsetSCEVB, OffsetSCEVA); 1028 const SCEVConstant *OffsetDeltaC = dyn_cast<SCEVConstant>(OffsetDeltaSCEV); 1029 const APInt &OffsetDelta = OffsetDeltaC->getAPInt(); 1030 // Check if they are based on the same pointer. That makes the offsets 1031 // sufficient. 1032 if (PtrA == PtrB) 1033 return OffsetDelta == Size; 1034 1035 // Compute the necessary base pointer delta to have the necessary final delta 1036 // equal to the size. 1037 // BaseDelta = Size - OffsetDelta; 1038 const SCEV *SizeSCEV = SE.getConstant(Size); 1039 const SCEV *BaseDelta = SE.getMinusSCEV(SizeSCEV, OffsetDeltaSCEV); 1040 1041 // Otherwise compute the distance with SCEV between the base pointers. 1042 const SCEV *PtrSCEVA = SE.getSCEV(PtrA); 1043 const SCEV *PtrSCEVB = SE.getSCEV(PtrB); 1044 const SCEV *X = SE.getAddExpr(PtrSCEVA, BaseDelta); 1045 return X == PtrSCEVB; 1046 } 1047 1048 bool MemoryDepChecker::Dependence::isSafeForVectorization(DepType Type) { 1049 switch (Type) { 1050 case NoDep: 1051 case Forward: 1052 case BackwardVectorizable: 1053 return true; 1054 1055 case Unknown: 1056 case ForwardButPreventsForwarding: 1057 case Backward: 1058 case BackwardVectorizableButPreventsForwarding: 1059 return false; 1060 } 1061 llvm_unreachable("unexpected DepType!"); 1062 } 1063 1064 bool MemoryDepChecker::Dependence::isBackward() const { 1065 switch (Type) { 1066 case NoDep: 1067 case Forward: 1068 case ForwardButPreventsForwarding: 1069 case Unknown: 1070 return false; 1071 1072 case BackwardVectorizable: 1073 case Backward: 1074 case BackwardVectorizableButPreventsForwarding: 1075 return true; 1076 } 1077 llvm_unreachable("unexpected DepType!"); 1078 } 1079 1080 bool MemoryDepChecker::Dependence::isPossiblyBackward() const { 1081 return isBackward() || Type == Unknown; 1082 } 1083 1084 bool MemoryDepChecker::Dependence::isForward() const { 1085 switch (Type) { 1086 case Forward: 1087 case ForwardButPreventsForwarding: 1088 return true; 1089 1090 case NoDep: 1091 case Unknown: 1092 case BackwardVectorizable: 1093 case Backward: 1094 case BackwardVectorizableButPreventsForwarding: 1095 return false; 1096 } 1097 llvm_unreachable("unexpected DepType!"); 1098 } 1099 1100 bool MemoryDepChecker::couldPreventStoreLoadForward(uint64_t Distance, 1101 uint64_t TypeByteSize) { 1102 // If loads occur at a distance that is not a multiple of a feasible vector 1103 // factor store-load forwarding does not take place. 1104 // Positive dependences might cause troubles because vectorizing them might 1105 // prevent store-load forwarding making vectorized code run a lot slower. 1106 // a[i] = a[i-3] ^ a[i-8]; 1107 // The stores to a[i:i+1] don't align with the stores to a[i-3:i-2] and 1108 // hence on your typical architecture store-load forwarding does not take 1109 // place. Vectorizing in such cases does not make sense. 1110 // Store-load forwarding distance. 1111 1112 // After this many iterations store-to-load forwarding conflicts should not 1113 // cause any slowdowns. 1114 const uint64_t NumItersForStoreLoadThroughMemory = 8 * TypeByteSize; 1115 // Maximum vector factor. 1116 uint64_t MaxVFWithoutSLForwardIssues = std::min( 1117 VectorizerParams::MaxVectorWidth * TypeByteSize, MaxSafeDepDistBytes); 1118 1119 // Compute the smallest VF at which the store and load would be misaligned. 1120 for (uint64_t VF = 2 * TypeByteSize; VF <= MaxVFWithoutSLForwardIssues; 1121 VF *= 2) { 1122 // If the number of vector iteration between the store and the load are 1123 // small we could incur conflicts. 1124 if (Distance % VF && Distance / VF < NumItersForStoreLoadThroughMemory) { 1125 MaxVFWithoutSLForwardIssues = (VF >>= 1); 1126 break; 1127 } 1128 } 1129 1130 if (MaxVFWithoutSLForwardIssues < 2 * TypeByteSize) { 1131 DEBUG(dbgs() << "LAA: Distance " << Distance 1132 << " that could cause a store-load forwarding conflict\n"); 1133 return true; 1134 } 1135 1136 if (MaxVFWithoutSLForwardIssues < MaxSafeDepDistBytes && 1137 MaxVFWithoutSLForwardIssues != 1138 VectorizerParams::MaxVectorWidth * TypeByteSize) 1139 MaxSafeDepDistBytes = MaxVFWithoutSLForwardIssues; 1140 return false; 1141 } 1142 1143 /// \brief Check the dependence for two accesses with the same stride \p Stride. 1144 /// \p Distance is the positive distance and \p TypeByteSize is type size in 1145 /// bytes. 1146 /// 1147 /// \returns true if they are independent. 1148 static bool areStridedAccessesIndependent(uint64_t Distance, uint64_t Stride, 1149 uint64_t TypeByteSize) { 1150 assert(Stride > 1 && "The stride must be greater than 1"); 1151 assert(TypeByteSize > 0 && "The type size in byte must be non-zero"); 1152 assert(Distance > 0 && "The distance must be non-zero"); 1153 1154 // Skip if the distance is not multiple of type byte size. 1155 if (Distance % TypeByteSize) 1156 return false; 1157 1158 uint64_t ScaledDist = Distance / TypeByteSize; 1159 1160 // No dependence if the scaled distance is not multiple of the stride. 1161 // E.g. 1162 // for (i = 0; i < 1024 ; i += 4) 1163 // A[i+2] = A[i] + 1; 1164 // 1165 // Two accesses in memory (scaled distance is 2, stride is 4): 1166 // | A[0] | | | | A[4] | | | | 1167 // | | | A[2] | | | | A[6] | | 1168 // 1169 // E.g. 1170 // for (i = 0; i < 1024 ; i += 3) 1171 // A[i+4] = A[i] + 1; 1172 // 1173 // Two accesses in memory (scaled distance is 4, stride is 3): 1174 // | A[0] | | | A[3] | | | A[6] | | | 1175 // | | | | | A[4] | | | A[7] | | 1176 return ScaledDist % Stride; 1177 } 1178 1179 MemoryDepChecker::Dependence::DepType 1180 MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx, 1181 const MemAccessInfo &B, unsigned BIdx, 1182 const ValueToValueMap &Strides) { 1183 assert (AIdx < BIdx && "Must pass arguments in program order"); 1184 1185 Value *APtr = A.getPointer(); 1186 Value *BPtr = B.getPointer(); 1187 bool AIsWrite = A.getInt(); 1188 bool BIsWrite = B.getInt(); 1189 1190 // Two reads are independent. 1191 if (!AIsWrite && !BIsWrite) 1192 return Dependence::NoDep; 1193 1194 // We cannot check pointers in different address spaces. 1195 if (APtr->getType()->getPointerAddressSpace() != 1196 BPtr->getType()->getPointerAddressSpace()) 1197 return Dependence::Unknown; 1198 1199 int64_t StrideAPtr = getPtrStride(PSE, APtr, InnermostLoop, Strides, true); 1200 int64_t StrideBPtr = getPtrStride(PSE, BPtr, InnermostLoop, Strides, true); 1201 1202 const SCEV *Src = PSE.getSCEV(APtr); 1203 const SCEV *Sink = PSE.getSCEV(BPtr); 1204 1205 // If the induction step is negative we have to invert source and sink of the 1206 // dependence. 1207 if (StrideAPtr < 0) { 1208 std::swap(APtr, BPtr); 1209 std::swap(Src, Sink); 1210 std::swap(AIsWrite, BIsWrite); 1211 std::swap(AIdx, BIdx); 1212 std::swap(StrideAPtr, StrideBPtr); 1213 } 1214 1215 const SCEV *Dist = PSE.getSE()->getMinusSCEV(Sink, Src); 1216 1217 DEBUG(dbgs() << "LAA: Src Scev: " << *Src << "Sink Scev: " << *Sink 1218 << "(Induction step: " << StrideAPtr << ")\n"); 1219 DEBUG(dbgs() << "LAA: Distance for " << *InstMap[AIdx] << " to " 1220 << *InstMap[BIdx] << ": " << *Dist << "\n"); 1221 1222 // Need accesses with constant stride. We don't want to vectorize 1223 // "A[B[i]] += ..." and similar code or pointer arithmetic that could wrap in 1224 // the address space. 1225 if (!StrideAPtr || !StrideBPtr || StrideAPtr != StrideBPtr){ 1226 DEBUG(dbgs() << "Pointer access with non-constant stride\n"); 1227 return Dependence::Unknown; 1228 } 1229 1230 const SCEVConstant *C = dyn_cast<SCEVConstant>(Dist); 1231 if (!C) { 1232 DEBUG(dbgs() << "LAA: Dependence because of non-constant distance\n"); 1233 ShouldRetryWithRuntimeCheck = true; 1234 return Dependence::Unknown; 1235 } 1236 1237 Type *ATy = APtr->getType()->getPointerElementType(); 1238 Type *BTy = BPtr->getType()->getPointerElementType(); 1239 auto &DL = InnermostLoop->getHeader()->getModule()->getDataLayout(); 1240 uint64_t TypeByteSize = DL.getTypeAllocSize(ATy); 1241 1242 const APInt &Val = C->getAPInt(); 1243 int64_t Distance = Val.getSExtValue(); 1244 uint64_t Stride = std::abs(StrideAPtr); 1245 1246 // Attempt to prove strided accesses independent. 1247 if (std::abs(Distance) > 0 && Stride > 1 && ATy == BTy && 1248 areStridedAccessesIndependent(std::abs(Distance), Stride, TypeByteSize)) { 1249 DEBUG(dbgs() << "LAA: Strided accesses are independent\n"); 1250 return Dependence::NoDep; 1251 } 1252 1253 // Negative distances are not plausible dependencies. 1254 if (Val.isNegative()) { 1255 bool IsTrueDataDependence = (AIsWrite && !BIsWrite); 1256 if (IsTrueDataDependence && EnableForwardingConflictDetection && 1257 (couldPreventStoreLoadForward(Val.abs().getZExtValue(), TypeByteSize) || 1258 ATy != BTy)) { 1259 DEBUG(dbgs() << "LAA: Forward but may prevent st->ld forwarding\n"); 1260 return Dependence::ForwardButPreventsForwarding; 1261 } 1262 1263 DEBUG(dbgs() << "LAA: Dependence is negative\n"); 1264 return Dependence::Forward; 1265 } 1266 1267 // Write to the same location with the same size. 1268 // Could be improved to assert type sizes are the same (i32 == float, etc). 1269 if (Val == 0) { 1270 if (ATy == BTy) 1271 return Dependence::Forward; 1272 DEBUG(dbgs() << "LAA: Zero dependence difference but different types\n"); 1273 return Dependence::Unknown; 1274 } 1275 1276 assert(Val.isStrictlyPositive() && "Expect a positive value"); 1277 1278 if (ATy != BTy) { 1279 DEBUG(dbgs() << 1280 "LAA: ReadWrite-Write positive dependency with different types\n"); 1281 return Dependence::Unknown; 1282 } 1283 1284 // Bail out early if passed-in parameters make vectorization not feasible. 1285 unsigned ForcedFactor = (VectorizerParams::VectorizationFactor ? 1286 VectorizerParams::VectorizationFactor : 1); 1287 unsigned ForcedUnroll = (VectorizerParams::VectorizationInterleave ? 1288 VectorizerParams::VectorizationInterleave : 1); 1289 // The minimum number of iterations for a vectorized/unrolled version. 1290 unsigned MinNumIter = std::max(ForcedFactor * ForcedUnroll, 2U); 1291 1292 // It's not vectorizable if the distance is smaller than the minimum distance 1293 // needed for a vectroized/unrolled version. Vectorizing one iteration in 1294 // front needs TypeByteSize * Stride. Vectorizing the last iteration needs 1295 // TypeByteSize (No need to plus the last gap distance). 1296 // 1297 // E.g. Assume one char is 1 byte in memory and one int is 4 bytes. 1298 // foo(int *A) { 1299 // int *B = (int *)((char *)A + 14); 1300 // for (i = 0 ; i < 1024 ; i += 2) 1301 // B[i] = A[i] + 1; 1302 // } 1303 // 1304 // Two accesses in memory (stride is 2): 1305 // | A[0] | | A[2] | | A[4] | | A[6] | | 1306 // | B[0] | | B[2] | | B[4] | 1307 // 1308 // Distance needs for vectorizing iterations except the last iteration: 1309 // 4 * 2 * (MinNumIter - 1). Distance needs for the last iteration: 4. 1310 // So the minimum distance needed is: 4 * 2 * (MinNumIter - 1) + 4. 1311 // 1312 // If MinNumIter is 2, it is vectorizable as the minimum distance needed is 1313 // 12, which is less than distance. 1314 // 1315 // If MinNumIter is 4 (Say if a user forces the vectorization factor to be 4), 1316 // the minimum distance needed is 28, which is greater than distance. It is 1317 // not safe to do vectorization. 1318 uint64_t MinDistanceNeeded = 1319 TypeByteSize * Stride * (MinNumIter - 1) + TypeByteSize; 1320 if (MinDistanceNeeded > static_cast<uint64_t>(Distance)) { 1321 DEBUG(dbgs() << "LAA: Failure because of positive distance " << Distance 1322 << '\n'); 1323 return Dependence::Backward; 1324 } 1325 1326 // Unsafe if the minimum distance needed is greater than max safe distance. 1327 if (MinDistanceNeeded > MaxSafeDepDistBytes) { 1328 DEBUG(dbgs() << "LAA: Failure because it needs at least " 1329 << MinDistanceNeeded << " size in bytes"); 1330 return Dependence::Backward; 1331 } 1332 1333 // Positive distance bigger than max vectorization factor. 1334 // FIXME: Should use max factor instead of max distance in bytes, which could 1335 // not handle different types. 1336 // E.g. Assume one char is 1 byte in memory and one int is 4 bytes. 1337 // void foo (int *A, char *B) { 1338 // for (unsigned i = 0; i < 1024; i++) { 1339 // A[i+2] = A[i] + 1; 1340 // B[i+2] = B[i] + 1; 1341 // } 1342 // } 1343 // 1344 // This case is currently unsafe according to the max safe distance. If we 1345 // analyze the two accesses on array B, the max safe dependence distance 1346 // is 2. Then we analyze the accesses on array A, the minimum distance needed 1347 // is 8, which is less than 2 and forbidden vectorization, But actually 1348 // both A and B could be vectorized by 2 iterations. 1349 MaxSafeDepDistBytes = 1350 std::min(static_cast<uint64_t>(Distance), MaxSafeDepDistBytes); 1351 1352 bool IsTrueDataDependence = (!AIsWrite && BIsWrite); 1353 if (IsTrueDataDependence && EnableForwardingConflictDetection && 1354 couldPreventStoreLoadForward(Distance, TypeByteSize)) 1355 return Dependence::BackwardVectorizableButPreventsForwarding; 1356 1357 DEBUG(dbgs() << "LAA: Positive distance " << Val.getSExtValue() 1358 << " with max VF = " 1359 << MaxSafeDepDistBytes / (TypeByteSize * Stride) << '\n'); 1360 1361 return Dependence::BackwardVectorizable; 1362 } 1363 1364 bool MemoryDepChecker::areDepsSafe(DepCandidates &AccessSets, 1365 MemAccessInfoSet &CheckDeps, 1366 const ValueToValueMap &Strides) { 1367 1368 MaxSafeDepDistBytes = -1; 1369 while (!CheckDeps.empty()) { 1370 MemAccessInfo CurAccess = *CheckDeps.begin(); 1371 1372 // Get the relevant memory access set. 1373 EquivalenceClasses<MemAccessInfo>::iterator I = 1374 AccessSets.findValue(AccessSets.getLeaderValue(CurAccess)); 1375 1376 // Check accesses within this set. 1377 EquivalenceClasses<MemAccessInfo>::member_iterator AI = 1378 AccessSets.member_begin(I); 1379 EquivalenceClasses<MemAccessInfo>::member_iterator AE = 1380 AccessSets.member_end(); 1381 1382 // Check every access pair. 1383 while (AI != AE) { 1384 CheckDeps.erase(*AI); 1385 EquivalenceClasses<MemAccessInfo>::member_iterator OI = std::next(AI); 1386 while (OI != AE) { 1387 // Check every accessing instruction pair in program order. 1388 for (std::vector<unsigned>::iterator I1 = Accesses[*AI].begin(), 1389 I1E = Accesses[*AI].end(); I1 != I1E; ++I1) 1390 for (std::vector<unsigned>::iterator I2 = Accesses[*OI].begin(), 1391 I2E = Accesses[*OI].end(); I2 != I2E; ++I2) { 1392 auto A = std::make_pair(&*AI, *I1); 1393 auto B = std::make_pair(&*OI, *I2); 1394 1395 assert(*I1 != *I2); 1396 if (*I1 > *I2) 1397 std::swap(A, B); 1398 1399 Dependence::DepType Type = 1400 isDependent(*A.first, A.second, *B.first, B.second, Strides); 1401 SafeForVectorization &= Dependence::isSafeForVectorization(Type); 1402 1403 // Gather dependences unless we accumulated MaxDependences 1404 // dependences. In that case return as soon as we find the first 1405 // unsafe dependence. This puts a limit on this quadratic 1406 // algorithm. 1407 if (RecordDependences) { 1408 if (Type != Dependence::NoDep) 1409 Dependences.push_back(Dependence(A.second, B.second, Type)); 1410 1411 if (Dependences.size() >= MaxDependences) { 1412 RecordDependences = false; 1413 Dependences.clear(); 1414 DEBUG(dbgs() << "Too many dependences, stopped recording\n"); 1415 } 1416 } 1417 if (!RecordDependences && !SafeForVectorization) 1418 return false; 1419 } 1420 ++OI; 1421 } 1422 AI++; 1423 } 1424 } 1425 1426 DEBUG(dbgs() << "Total Dependences: " << Dependences.size() << "\n"); 1427 return SafeForVectorization; 1428 } 1429 1430 SmallVector<Instruction *, 4> 1431 MemoryDepChecker::getInstructionsForAccess(Value *Ptr, bool isWrite) const { 1432 MemAccessInfo Access(Ptr, isWrite); 1433 auto &IndexVector = Accesses.find(Access)->second; 1434 1435 SmallVector<Instruction *, 4> Insts; 1436 std::transform(IndexVector.begin(), IndexVector.end(), 1437 std::back_inserter(Insts), 1438 [&](unsigned Idx) { return this->InstMap[Idx]; }); 1439 return Insts; 1440 } 1441 1442 const char *MemoryDepChecker::Dependence::DepName[] = { 1443 "NoDep", "Unknown", "Forward", "ForwardButPreventsForwarding", "Backward", 1444 "BackwardVectorizable", "BackwardVectorizableButPreventsForwarding"}; 1445 1446 void MemoryDepChecker::Dependence::print( 1447 raw_ostream &OS, unsigned Depth, 1448 const SmallVectorImpl<Instruction *> &Instrs) const { 1449 OS.indent(Depth) << DepName[Type] << ":\n"; 1450 OS.indent(Depth + 2) << *Instrs[Source] << " -> \n"; 1451 OS.indent(Depth + 2) << *Instrs[Destination] << "\n"; 1452 } 1453 1454 bool LoopAccessInfo::canAnalyzeLoop() { 1455 // We need to have a loop header. 1456 DEBUG(dbgs() << "LAA: Found a loop in " 1457 << TheLoop->getHeader()->getParent()->getName() << ": " 1458 << TheLoop->getHeader()->getName() << '\n'); 1459 1460 // We can only analyze innermost loops. 1461 if (!TheLoop->empty()) { 1462 DEBUG(dbgs() << "LAA: loop is not the innermost loop\n"); 1463 emitAnalysis(LoopAccessReport() << "loop is not the innermost loop"); 1464 return false; 1465 } 1466 1467 // We must have a single backedge. 1468 if (TheLoop->getNumBackEdges() != 1) { 1469 DEBUG(dbgs() << "LAA: loop control flow is not understood by analyzer\n"); 1470 emitAnalysis( 1471 LoopAccessReport() << 1472 "loop control flow is not understood by analyzer"); 1473 return false; 1474 } 1475 1476 // We must have a single exiting block. 1477 if (!TheLoop->getExitingBlock()) { 1478 DEBUG(dbgs() << "LAA: loop control flow is not understood by analyzer\n"); 1479 emitAnalysis( 1480 LoopAccessReport() << 1481 "loop control flow is not understood by analyzer"); 1482 return false; 1483 } 1484 1485 // We only handle bottom-tested loops, i.e. loop in which the condition is 1486 // checked at the end of each iteration. With that we can assume that all 1487 // instructions in the loop are executed the same number of times. 1488 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { 1489 DEBUG(dbgs() << "LAA: loop control flow is not understood by analyzer\n"); 1490 emitAnalysis( 1491 LoopAccessReport() << 1492 "loop control flow is not understood by analyzer"); 1493 return false; 1494 } 1495 1496 // ScalarEvolution needs to be able to find the exit count. 1497 const SCEV *ExitCount = PSE->getBackedgeTakenCount(); 1498 if (ExitCount == PSE->getSE()->getCouldNotCompute()) { 1499 emitAnalysis(LoopAccessReport() 1500 << "could not determine number of loop iterations"); 1501 DEBUG(dbgs() << "LAA: SCEV could not compute the loop exit count.\n"); 1502 return false; 1503 } 1504 1505 return true; 1506 } 1507 1508 void LoopAccessInfo::analyzeLoop(AliasAnalysis *AA, LoopInfo *LI, 1509 const TargetLibraryInfo *TLI, 1510 DominatorTree *DT) { 1511 typedef SmallPtrSet<Value*, 16> ValueSet; 1512 1513 // Holds the Load and Store instructions. 1514 SmallVector<LoadInst *, 16> Loads; 1515 SmallVector<StoreInst *, 16> Stores; 1516 1517 // Holds all the different accesses in the loop. 1518 unsigned NumReads = 0; 1519 unsigned NumReadWrites = 0; 1520 1521 PtrRtChecking->Pointers.clear(); 1522 PtrRtChecking->Need = false; 1523 1524 const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel(); 1525 1526 // For each block. 1527 for (BasicBlock *BB : TheLoop->blocks()) { 1528 // Scan the BB and collect legal loads and stores. 1529 for (Instruction &I : *BB) { 1530 // If this is a load, save it. If this instruction can read from memory 1531 // but is not a load, then we quit. Notice that we don't handle function 1532 // calls that read or write. 1533 if (I.mayReadFromMemory()) { 1534 // Many math library functions read the rounding mode. We will only 1535 // vectorize a loop if it contains known function calls that don't set 1536 // the flag. Therefore, it is safe to ignore this read from memory. 1537 auto *Call = dyn_cast<CallInst>(&I); 1538 if (Call && getVectorIntrinsicIDForCall(Call, TLI)) 1539 continue; 1540 1541 // If the function has an explicit vectorized counterpart, we can safely 1542 // assume that it can be vectorized. 1543 if (Call && !Call->isNoBuiltin() && Call->getCalledFunction() && 1544 TLI->isFunctionVectorizable(Call->getCalledFunction()->getName())) 1545 continue; 1546 1547 auto *Ld = dyn_cast<LoadInst>(&I); 1548 if (!Ld || (!Ld->isSimple() && !IsAnnotatedParallel)) { 1549 emitAnalysis(LoopAccessReport(Ld) 1550 << "read with atomic ordering or volatile read"); 1551 DEBUG(dbgs() << "LAA: Found a non-simple load.\n"); 1552 CanVecMem = false; 1553 return; 1554 } 1555 NumLoads++; 1556 Loads.push_back(Ld); 1557 DepChecker->addAccess(Ld); 1558 if (EnableMemAccessVersioning) 1559 collectStridedAccess(Ld); 1560 continue; 1561 } 1562 1563 // Save 'store' instructions. Abort if other instructions write to memory. 1564 if (I.mayWriteToMemory()) { 1565 auto *St = dyn_cast<StoreInst>(&I); 1566 if (!St) { 1567 emitAnalysis(LoopAccessReport(St) 1568 << "instruction cannot be vectorized"); 1569 CanVecMem = false; 1570 return; 1571 } 1572 if (!St->isSimple() && !IsAnnotatedParallel) { 1573 emitAnalysis(LoopAccessReport(St) 1574 << "write with atomic ordering or volatile write"); 1575 DEBUG(dbgs() << "LAA: Found a non-simple store.\n"); 1576 CanVecMem = false; 1577 return; 1578 } 1579 NumStores++; 1580 Stores.push_back(St); 1581 DepChecker->addAccess(St); 1582 if (EnableMemAccessVersioning) 1583 collectStridedAccess(St); 1584 } 1585 } // Next instr. 1586 } // Next block. 1587 1588 // Now we have two lists that hold the loads and the stores. 1589 // Next, we find the pointers that they use. 1590 1591 // Check if we see any stores. If there are no stores, then we don't 1592 // care if the pointers are *restrict*. 1593 if (!Stores.size()) { 1594 DEBUG(dbgs() << "LAA: Found a read-only loop!\n"); 1595 CanVecMem = true; 1596 return; 1597 } 1598 1599 MemoryDepChecker::DepCandidates DependentAccesses; 1600 AccessAnalysis Accesses(TheLoop->getHeader()->getModule()->getDataLayout(), 1601 AA, LI, DependentAccesses, *PSE); 1602 1603 // Holds the analyzed pointers. We don't want to call GetUnderlyingObjects 1604 // multiple times on the same object. If the ptr is accessed twice, once 1605 // for read and once for write, it will only appear once (on the write 1606 // list). This is okay, since we are going to check for conflicts between 1607 // writes and between reads and writes, but not between reads and reads. 1608 ValueSet Seen; 1609 1610 for (StoreInst *ST : Stores) { 1611 Value *Ptr = ST->getPointerOperand(); 1612 // Check for store to loop invariant address. 1613 StoreToLoopInvariantAddress |= isUniform(Ptr); 1614 // If we did *not* see this pointer before, insert it to the read-write 1615 // list. At this phase it is only a 'write' list. 1616 if (Seen.insert(Ptr).second) { 1617 ++NumReadWrites; 1618 1619 MemoryLocation Loc = MemoryLocation::get(ST); 1620 // The TBAA metadata could have a control dependency on the predication 1621 // condition, so we cannot rely on it when determining whether or not we 1622 // need runtime pointer checks. 1623 if (blockNeedsPredication(ST->getParent(), TheLoop, DT)) 1624 Loc.AATags.TBAA = nullptr; 1625 1626 Accesses.addStore(Loc); 1627 } 1628 } 1629 1630 if (IsAnnotatedParallel) { 1631 DEBUG(dbgs() 1632 << "LAA: A loop annotated parallel, ignore memory dependency " 1633 << "checks.\n"); 1634 CanVecMem = true; 1635 return; 1636 } 1637 1638 for (LoadInst *LD : Loads) { 1639 Value *Ptr = LD->getPointerOperand(); 1640 // If we did *not* see this pointer before, insert it to the 1641 // read list. If we *did* see it before, then it is already in 1642 // the read-write list. This allows us to vectorize expressions 1643 // such as A[i] += x; Because the address of A[i] is a read-write 1644 // pointer. This only works if the index of A[i] is consecutive. 1645 // If the address of i is unknown (for example A[B[i]]) then we may 1646 // read a few words, modify, and write a few words, and some of the 1647 // words may be written to the same address. 1648 bool IsReadOnlyPtr = false; 1649 if (Seen.insert(Ptr).second || 1650 !getPtrStride(*PSE, Ptr, TheLoop, SymbolicStrides)) { 1651 ++NumReads; 1652 IsReadOnlyPtr = true; 1653 } 1654 1655 MemoryLocation Loc = MemoryLocation::get(LD); 1656 // The TBAA metadata could have a control dependency on the predication 1657 // condition, so we cannot rely on it when determining whether or not we 1658 // need runtime pointer checks. 1659 if (blockNeedsPredication(LD->getParent(), TheLoop, DT)) 1660 Loc.AATags.TBAA = nullptr; 1661 1662 Accesses.addLoad(Loc, IsReadOnlyPtr); 1663 } 1664 1665 // If we write (or read-write) to a single destination and there are no 1666 // other reads in this loop then is it safe to vectorize. 1667 if (NumReadWrites == 1 && NumReads == 0) { 1668 DEBUG(dbgs() << "LAA: Found a write-only loop!\n"); 1669 CanVecMem = true; 1670 return; 1671 } 1672 1673 // Build dependence sets and check whether we need a runtime pointer bounds 1674 // check. 1675 Accesses.buildDependenceSets(); 1676 1677 // Find pointers with computable bounds. We are going to use this information 1678 // to place a runtime bound check. 1679 bool CanDoRTIfNeeded = Accesses.canCheckPtrAtRT(*PtrRtChecking, PSE->getSE(), 1680 TheLoop, SymbolicStrides); 1681 if (!CanDoRTIfNeeded) { 1682 emitAnalysis(LoopAccessReport() << "cannot identify array bounds"); 1683 DEBUG(dbgs() << "LAA: We can't vectorize because we can't find " 1684 << "the array bounds.\n"); 1685 CanVecMem = false; 1686 return; 1687 } 1688 1689 DEBUG(dbgs() << "LAA: We can perform a memory runtime check if needed.\n"); 1690 1691 CanVecMem = true; 1692 if (Accesses.isDependencyCheckNeeded()) { 1693 DEBUG(dbgs() << "LAA: Checking memory dependencies\n"); 1694 CanVecMem = DepChecker->areDepsSafe( 1695 DependentAccesses, Accesses.getDependenciesToCheck(), SymbolicStrides); 1696 MaxSafeDepDistBytes = DepChecker->getMaxSafeDepDistBytes(); 1697 1698 if (!CanVecMem && DepChecker->shouldRetryWithRuntimeCheck()) { 1699 DEBUG(dbgs() << "LAA: Retrying with memory checks\n"); 1700 1701 // Clear the dependency checks. We assume they are not needed. 1702 Accesses.resetDepChecks(*DepChecker); 1703 1704 PtrRtChecking->reset(); 1705 PtrRtChecking->Need = true; 1706 1707 auto *SE = PSE->getSE(); 1708 CanDoRTIfNeeded = Accesses.canCheckPtrAtRT(*PtrRtChecking, SE, TheLoop, 1709 SymbolicStrides, true); 1710 1711 // Check that we found the bounds for the pointer. 1712 if (!CanDoRTIfNeeded) { 1713 emitAnalysis(LoopAccessReport() 1714 << "cannot check memory dependencies at runtime"); 1715 DEBUG(dbgs() << "LAA: Can't vectorize with memory checks\n"); 1716 CanVecMem = false; 1717 return; 1718 } 1719 1720 CanVecMem = true; 1721 } 1722 } 1723 1724 if (CanVecMem) 1725 DEBUG(dbgs() << "LAA: No unsafe dependent memory operations in loop. We" 1726 << (PtrRtChecking->Need ? "" : " don't") 1727 << " need runtime memory checks.\n"); 1728 else { 1729 emitAnalysis( 1730 LoopAccessReport() 1731 << "unsafe dependent memory operations in loop. Use " 1732 "#pragma loop distribute(enable) to allow loop distribution " 1733 "to attempt to isolate the offending operations into a separate " 1734 "loop"); 1735 DEBUG(dbgs() << "LAA: unsafe dependent memory operations in loop\n"); 1736 } 1737 } 1738 1739 bool LoopAccessInfo::blockNeedsPredication(BasicBlock *BB, Loop *TheLoop, 1740 DominatorTree *DT) { 1741 assert(TheLoop->contains(BB) && "Unknown block used"); 1742 1743 // Blocks that do not dominate the latch need predication. 1744 BasicBlock* Latch = TheLoop->getLoopLatch(); 1745 return !DT->dominates(BB, Latch); 1746 } 1747 1748 void LoopAccessInfo::emitAnalysis(LoopAccessReport &Message) { 1749 assert(!Report && "Multiple reports generated"); 1750 Report = Message; 1751 } 1752 1753 bool LoopAccessInfo::isUniform(Value *V) const { 1754 return (PSE->getSE()->isLoopInvariant(PSE->getSE()->getSCEV(V), TheLoop)); 1755 } 1756 1757 // FIXME: this function is currently a duplicate of the one in 1758 // LoopVectorize.cpp. 1759 static Instruction *getFirstInst(Instruction *FirstInst, Value *V, 1760 Instruction *Loc) { 1761 if (FirstInst) 1762 return FirstInst; 1763 if (Instruction *I = dyn_cast<Instruction>(V)) 1764 return I->getParent() == Loc->getParent() ? I : nullptr; 1765 return nullptr; 1766 } 1767 1768 namespace { 1769 /// \brief IR Values for the lower and upper bounds of a pointer evolution. We 1770 /// need to use value-handles because SCEV expansion can invalidate previously 1771 /// expanded values. Thus expansion of a pointer can invalidate the bounds for 1772 /// a previous one. 1773 struct PointerBounds { 1774 TrackingVH<Value> Start; 1775 TrackingVH<Value> End; 1776 }; 1777 } // end anonymous namespace 1778 1779 /// \brief Expand code for the lower and upper bound of the pointer group \p CG 1780 /// in \p TheLoop. \return the values for the bounds. 1781 static PointerBounds 1782 expandBounds(const RuntimePointerChecking::CheckingPtrGroup *CG, Loop *TheLoop, 1783 Instruction *Loc, SCEVExpander &Exp, ScalarEvolution *SE, 1784 const RuntimePointerChecking &PtrRtChecking) { 1785 Value *Ptr = PtrRtChecking.Pointers[CG->Members[0]].PointerValue; 1786 const SCEV *Sc = SE->getSCEV(Ptr); 1787 1788 if (SE->isLoopInvariant(Sc, TheLoop)) { 1789 DEBUG(dbgs() << "LAA: Adding RT check for a loop invariant ptr:" << *Ptr 1790 << "\n"); 1791 return {Ptr, Ptr}; 1792 } else { 1793 unsigned AS = Ptr->getType()->getPointerAddressSpace(); 1794 LLVMContext &Ctx = Loc->getContext(); 1795 1796 // Use this type for pointer arithmetic. 1797 Type *PtrArithTy = Type::getInt8PtrTy(Ctx, AS); 1798 Value *Start = nullptr, *End = nullptr; 1799 1800 DEBUG(dbgs() << "LAA: Adding RT check for range:\n"); 1801 Start = Exp.expandCodeFor(CG->Low, PtrArithTy, Loc); 1802 End = Exp.expandCodeFor(CG->High, PtrArithTy, Loc); 1803 DEBUG(dbgs() << "Start: " << *CG->Low << " End: " << *CG->High << "\n"); 1804 return {Start, End}; 1805 } 1806 } 1807 1808 /// \brief Turns a collection of checks into a collection of expanded upper and 1809 /// lower bounds for both pointers in the check. 1810 static SmallVector<std::pair<PointerBounds, PointerBounds>, 4> expandBounds( 1811 const SmallVectorImpl<RuntimePointerChecking::PointerCheck> &PointerChecks, 1812 Loop *L, Instruction *Loc, ScalarEvolution *SE, SCEVExpander &Exp, 1813 const RuntimePointerChecking &PtrRtChecking) { 1814 SmallVector<std::pair<PointerBounds, PointerBounds>, 4> ChecksWithBounds; 1815 1816 // Here we're relying on the SCEV Expander's cache to only emit code for the 1817 // same bounds once. 1818 std::transform( 1819 PointerChecks.begin(), PointerChecks.end(), 1820 std::back_inserter(ChecksWithBounds), 1821 [&](const RuntimePointerChecking::PointerCheck &Check) { 1822 PointerBounds 1823 First = expandBounds(Check.first, L, Loc, Exp, SE, PtrRtChecking), 1824 Second = expandBounds(Check.second, L, Loc, Exp, SE, PtrRtChecking); 1825 return std::make_pair(First, Second); 1826 }); 1827 1828 return ChecksWithBounds; 1829 } 1830 1831 std::pair<Instruction *, Instruction *> LoopAccessInfo::addRuntimeChecks( 1832 Instruction *Loc, 1833 const SmallVectorImpl<RuntimePointerChecking::PointerCheck> &PointerChecks) 1834 const { 1835 const DataLayout &DL = TheLoop->getHeader()->getModule()->getDataLayout(); 1836 auto *SE = PSE->getSE(); 1837 SCEVExpander Exp(*SE, DL, "induction"); 1838 auto ExpandedChecks = 1839 expandBounds(PointerChecks, TheLoop, Loc, SE, Exp, *PtrRtChecking); 1840 1841 LLVMContext &Ctx = Loc->getContext(); 1842 Instruction *FirstInst = nullptr; 1843 IRBuilder<> ChkBuilder(Loc); 1844 // Our instructions might fold to a constant. 1845 Value *MemoryRuntimeCheck = nullptr; 1846 1847 for (const auto &Check : ExpandedChecks) { 1848 const PointerBounds &A = Check.first, &B = Check.second; 1849 // Check if two pointers (A and B) conflict where conflict is computed as: 1850 // start(A) <= end(B) && start(B) <= end(A) 1851 unsigned AS0 = A.Start->getType()->getPointerAddressSpace(); 1852 unsigned AS1 = B.Start->getType()->getPointerAddressSpace(); 1853 1854 assert((AS0 == B.End->getType()->getPointerAddressSpace()) && 1855 (AS1 == A.End->getType()->getPointerAddressSpace()) && 1856 "Trying to bounds check pointers with different address spaces"); 1857 1858 Type *PtrArithTy0 = Type::getInt8PtrTy(Ctx, AS0); 1859 Type *PtrArithTy1 = Type::getInt8PtrTy(Ctx, AS1); 1860 1861 Value *Start0 = ChkBuilder.CreateBitCast(A.Start, PtrArithTy0, "bc"); 1862 Value *Start1 = ChkBuilder.CreateBitCast(B.Start, PtrArithTy1, "bc"); 1863 Value *End0 = ChkBuilder.CreateBitCast(A.End, PtrArithTy1, "bc"); 1864 Value *End1 = ChkBuilder.CreateBitCast(B.End, PtrArithTy0, "bc"); 1865 1866 Value *Cmp0 = ChkBuilder.CreateICmpULE(Start0, End1, "bound0"); 1867 FirstInst = getFirstInst(FirstInst, Cmp0, Loc); 1868 Value *Cmp1 = ChkBuilder.CreateICmpULE(Start1, End0, "bound1"); 1869 FirstInst = getFirstInst(FirstInst, Cmp1, Loc); 1870 Value *IsConflict = ChkBuilder.CreateAnd(Cmp0, Cmp1, "found.conflict"); 1871 FirstInst = getFirstInst(FirstInst, IsConflict, Loc); 1872 if (MemoryRuntimeCheck) { 1873 IsConflict = 1874 ChkBuilder.CreateOr(MemoryRuntimeCheck, IsConflict, "conflict.rdx"); 1875 FirstInst = getFirstInst(FirstInst, IsConflict, Loc); 1876 } 1877 MemoryRuntimeCheck = IsConflict; 1878 } 1879 1880 if (!MemoryRuntimeCheck) 1881 return std::make_pair(nullptr, nullptr); 1882 1883 // We have to do this trickery because the IRBuilder might fold the check to a 1884 // constant expression in which case there is no Instruction anchored in a 1885 // the block. 1886 Instruction *Check = BinaryOperator::CreateAnd(MemoryRuntimeCheck, 1887 ConstantInt::getTrue(Ctx)); 1888 ChkBuilder.Insert(Check, "memcheck.conflict"); 1889 FirstInst = getFirstInst(FirstInst, Check, Loc); 1890 return std::make_pair(FirstInst, Check); 1891 } 1892 1893 std::pair<Instruction *, Instruction *> 1894 LoopAccessInfo::addRuntimeChecks(Instruction *Loc) const { 1895 if (!PtrRtChecking->Need) 1896 return std::make_pair(nullptr, nullptr); 1897 1898 return addRuntimeChecks(Loc, PtrRtChecking->getChecks()); 1899 } 1900 1901 void LoopAccessInfo::collectStridedAccess(Value *MemAccess) { 1902 Value *Ptr = nullptr; 1903 if (LoadInst *LI = dyn_cast<LoadInst>(MemAccess)) 1904 Ptr = LI->getPointerOperand(); 1905 else if (StoreInst *SI = dyn_cast<StoreInst>(MemAccess)) 1906 Ptr = SI->getPointerOperand(); 1907 else 1908 return; 1909 1910 Value *Stride = getStrideFromPointer(Ptr, PSE->getSE(), TheLoop); 1911 if (!Stride) 1912 return; 1913 1914 DEBUG(dbgs() << "LAA: Found a strided access that we can version"); 1915 DEBUG(dbgs() << " Ptr: " << *Ptr << " Stride: " << *Stride << "\n"); 1916 SymbolicStrides[Ptr] = Stride; 1917 StrideSet.insert(Stride); 1918 } 1919 1920 LoopAccessInfo::LoopAccessInfo(Loop *L, ScalarEvolution *SE, 1921 const TargetLibraryInfo *TLI, AliasAnalysis *AA, 1922 DominatorTree *DT, LoopInfo *LI) 1923 : PSE(llvm::make_unique<PredicatedScalarEvolution>(*SE, *L)), 1924 PtrRtChecking(llvm::make_unique<RuntimePointerChecking>(SE)), 1925 DepChecker(llvm::make_unique<MemoryDepChecker>(*PSE, L)), TheLoop(L), 1926 NumLoads(0), NumStores(0), MaxSafeDepDistBytes(-1), CanVecMem(false), 1927 StoreToLoopInvariantAddress(false) { 1928 if (canAnalyzeLoop()) 1929 analyzeLoop(AA, LI, TLI, DT); 1930 } 1931 1932 void LoopAccessInfo::print(raw_ostream &OS, unsigned Depth) const { 1933 if (CanVecMem) { 1934 OS.indent(Depth) << "Memory dependences are safe"; 1935 if (MaxSafeDepDistBytes != -1ULL) 1936 OS << " with a maximum dependence distance of " << MaxSafeDepDistBytes 1937 << " bytes"; 1938 if (PtrRtChecking->Need) 1939 OS << " with run-time checks"; 1940 OS << "\n"; 1941 } 1942 1943 if (Report) 1944 OS.indent(Depth) << "Report: " << Report->str() << "\n"; 1945 1946 if (auto *Dependences = DepChecker->getDependences()) { 1947 OS.indent(Depth) << "Dependences:\n"; 1948 for (auto &Dep : *Dependences) { 1949 Dep.print(OS, Depth + 2, DepChecker->getMemoryInstructions()); 1950 OS << "\n"; 1951 } 1952 } else 1953 OS.indent(Depth) << "Too many dependences, not recorded\n"; 1954 1955 // List the pair of accesses need run-time checks to prove independence. 1956 PtrRtChecking->print(OS, Depth); 1957 OS << "\n"; 1958 1959 OS.indent(Depth) << "Store to invariant address was " 1960 << (StoreToLoopInvariantAddress ? "" : "not ") 1961 << "found in loop.\n"; 1962 1963 OS.indent(Depth) << "SCEV assumptions:\n"; 1964 PSE->getUnionPredicate().print(OS, Depth); 1965 1966 OS << "\n"; 1967 1968 OS.indent(Depth) << "Expressions re-written:\n"; 1969 PSE->print(OS, Depth); 1970 } 1971 1972 const LoopAccessInfo &LoopAccessLegacyAnalysis::getInfo(Loop *L) { 1973 auto &LAI = LoopAccessInfoMap[L]; 1974 1975 if (!LAI) 1976 LAI = llvm::make_unique<LoopAccessInfo>(L, SE, TLI, AA, DT, LI); 1977 1978 return *LAI.get(); 1979 } 1980 1981 void LoopAccessLegacyAnalysis::print(raw_ostream &OS, const Module *M) const { 1982 LoopAccessLegacyAnalysis &LAA = *const_cast<LoopAccessLegacyAnalysis *>(this); 1983 1984 for (Loop *TopLevelLoop : *LI) 1985 for (Loop *L : depth_first(TopLevelLoop)) { 1986 OS.indent(2) << L->getHeader()->getName() << ":\n"; 1987 auto &LAI = LAA.getInfo(L); 1988 LAI.print(OS, 4); 1989 } 1990 } 1991 1992 bool LoopAccessLegacyAnalysis::runOnFunction(Function &F) { 1993 SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 1994 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 1995 TLI = TLIP ? &TLIP->getTLI() : nullptr; 1996 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 1997 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1998 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 1999 2000 return false; 2001 } 2002 2003 void LoopAccessLegacyAnalysis::getAnalysisUsage(AnalysisUsage &AU) const { 2004 AU.addRequired<ScalarEvolutionWrapperPass>(); 2005 AU.addRequired<AAResultsWrapperPass>(); 2006 AU.addRequired<DominatorTreeWrapperPass>(); 2007 AU.addRequired<LoopInfoWrapperPass>(); 2008 2009 AU.setPreservesAll(); 2010 } 2011 2012 char LoopAccessLegacyAnalysis::ID = 0; 2013 static const char laa_name[] = "Loop Access Analysis"; 2014 #define LAA_NAME "loop-accesses" 2015 2016 INITIALIZE_PASS_BEGIN(LoopAccessLegacyAnalysis, LAA_NAME, laa_name, false, true) 2017 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 2018 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 2019 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 2020 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 2021 INITIALIZE_PASS_END(LoopAccessLegacyAnalysis, LAA_NAME, laa_name, false, true) 2022 2023 char LoopAccessAnalysis::PassID; 2024 2025 LoopAccessInfo LoopAccessAnalysis::run(Loop &L, AnalysisManager<Loop> &AM) { 2026 const AnalysisManager<Function> &FAM = 2027 AM.getResult<FunctionAnalysisManagerLoopProxy>(L).getManager(); 2028 Function &F = *L.getHeader()->getParent(); 2029 auto *SE = FAM.getCachedResult<ScalarEvolutionAnalysis>(F); 2030 auto *TLI = FAM.getCachedResult<TargetLibraryAnalysis>(F); 2031 auto *AA = FAM.getCachedResult<AAManager>(F); 2032 auto *DT = FAM.getCachedResult<DominatorTreeAnalysis>(F); 2033 auto *LI = FAM.getCachedResult<LoopAnalysis>(F); 2034 if (!SE) 2035 report_fatal_error( 2036 "ScalarEvolution must have been cached at a higher level"); 2037 if (!AA) 2038 report_fatal_error("AliasAnalysis must have been cached at a higher level"); 2039 if (!DT) 2040 report_fatal_error("DominatorTree must have been cached at a higher level"); 2041 if (!LI) 2042 report_fatal_error("LoopInfo must have been cached at a higher level"); 2043 return LoopAccessInfo(&L, SE, TLI, AA, DT, LI); 2044 } 2045 2046 PreservedAnalyses LoopAccessInfoPrinterPass::run(Loop &L, 2047 AnalysisManager<Loop> &AM) { 2048 Function &F = *L.getHeader()->getParent(); 2049 auto &LAI = AM.getResult<LoopAccessAnalysis>(L); 2050 OS << "Loop access info in function '" << F.getName() << "':\n"; 2051 OS.indent(2) << L.getHeader()->getName() << ":\n"; 2052 LAI.print(OS, 4); 2053 return PreservedAnalyses::all(); 2054 } 2055 2056 namespace llvm { 2057 Pass *createLAAPass() { 2058 return new LoopAccessLegacyAnalysis(); 2059 } 2060 } 2061