1 //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines the primary stateless implementation of the 11 // Alias Analysis interface that implements identities (two different 12 // globals cannot alias, etc), but does no stateful analysis. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "llvm/Analysis/Passes.h" 17 #include "llvm/ADT/SmallPtrSet.h" 18 #include "llvm/ADT/SmallVector.h" 19 #include "llvm/Analysis/AliasAnalysis.h" 20 #include "llvm/Analysis/CaptureTracking.h" 21 #include "llvm/Analysis/InstructionSimplify.h" 22 #include "llvm/Analysis/MemoryBuiltins.h" 23 #include "llvm/Analysis/ValueTracking.h" 24 #include "llvm/IR/Constants.h" 25 #include "llvm/IR/DataLayout.h" 26 #include "llvm/IR/DerivedTypes.h" 27 #include "llvm/IR/Function.h" 28 #include "llvm/IR/GlobalAlias.h" 29 #include "llvm/IR/GlobalVariable.h" 30 #include "llvm/IR/Instructions.h" 31 #include "llvm/IR/IntrinsicInst.h" 32 #include "llvm/IR/LLVMContext.h" 33 #include "llvm/IR/Operator.h" 34 #include "llvm/Pass.h" 35 #include "llvm/Support/ErrorHandling.h" 36 #include "llvm/Support/GetElementPtrTypeIterator.h" 37 #include "llvm/Target/TargetLibraryInfo.h" 38 #include <algorithm> 39 using namespace llvm; 40 41 //===----------------------------------------------------------------------===// 42 // Useful predicates 43 //===----------------------------------------------------------------------===// 44 45 /// isNonEscapingLocalObject - Return true if the pointer is to a function-local 46 /// object that never escapes from the function. 47 static bool isNonEscapingLocalObject(const Value *V) { 48 // If this is a local allocation, check to see if it escapes. 49 if (isa<AllocaInst>(V) || isNoAliasCall(V)) 50 // Set StoreCaptures to True so that we can assume in our callers that the 51 // pointer is not the result of a load instruction. Currently 52 // PointerMayBeCaptured doesn't have any special analysis for the 53 // StoreCaptures=false case; if it did, our callers could be refined to be 54 // more precise. 55 return !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true); 56 57 // If this is an argument that corresponds to a byval or noalias argument, 58 // then it has not escaped before entering the function. Check if it escapes 59 // inside the function. 60 if (const Argument *A = dyn_cast<Argument>(V)) 61 if (A->hasByValAttr() || A->hasNoAliasAttr()) 62 // Note even if the argument is marked nocapture we still need to check 63 // for copies made inside the function. The nocapture attribute only 64 // specifies that there are no copies made that outlive the function. 65 return !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true); 66 67 return false; 68 } 69 70 /// isEscapeSource - Return true if the pointer is one which would have 71 /// been considered an escape by isNonEscapingLocalObject. 72 static bool isEscapeSource(const Value *V) { 73 if (isa<CallInst>(V) || isa<InvokeInst>(V) || isa<Argument>(V)) 74 return true; 75 76 // The load case works because isNonEscapingLocalObject considers all 77 // stores to be escapes (it passes true for the StoreCaptures argument 78 // to PointerMayBeCaptured). 79 if (isa<LoadInst>(V)) 80 return true; 81 82 return false; 83 } 84 85 /// getObjectSize - Return the size of the object specified by V, or 86 /// UnknownSize if unknown. 87 static uint64_t getObjectSize(const Value *V, const DataLayout &TD, 88 const TargetLibraryInfo &TLI, 89 bool RoundToAlign = false) { 90 uint64_t Size; 91 if (getUnderlyingObjectSize(V, Size, &TD, &TLI, RoundToAlign)) 92 return Size; 93 return AliasAnalysis::UnknownSize; 94 } 95 96 /// isObjectSmallerThan - Return true if we can prove that the object specified 97 /// by V is smaller than Size. 98 static bool isObjectSmallerThan(const Value *V, uint64_t Size, 99 const DataLayout &TD, 100 const TargetLibraryInfo &TLI) { 101 // This function needs to use the aligned object size because we allow 102 // reads a bit past the end given sufficient alignment. 103 uint64_t ObjectSize = getObjectSize(V, TD, TLI, /*RoundToAlign*/true); 104 105 return ObjectSize != AliasAnalysis::UnknownSize && ObjectSize < Size; 106 } 107 108 /// isObjectSize - Return true if we can prove that the object specified 109 /// by V has size Size. 110 static bool isObjectSize(const Value *V, uint64_t Size, 111 const DataLayout &TD, const TargetLibraryInfo &TLI) { 112 uint64_t ObjectSize = getObjectSize(V, TD, TLI); 113 return ObjectSize != AliasAnalysis::UnknownSize && ObjectSize == Size; 114 } 115 116 //===----------------------------------------------------------------------===// 117 // GetElementPtr Instruction Decomposition and Analysis 118 //===----------------------------------------------------------------------===// 119 120 namespace { 121 enum ExtensionKind { 122 EK_NotExtended, 123 EK_SignExt, 124 EK_ZeroExt 125 }; 126 127 struct VariableGEPIndex { 128 const Value *V; 129 ExtensionKind Extension; 130 int64_t Scale; 131 132 bool operator==(const VariableGEPIndex &Other) const { 133 return V == Other.V && Extension == Other.Extension && 134 Scale == Other.Scale; 135 } 136 137 bool operator!=(const VariableGEPIndex &Other) const { 138 return !operator==(Other); 139 } 140 }; 141 } 142 143 144 /// GetLinearExpression - Analyze the specified value as a linear expression: 145 /// "A*V + B", where A and B are constant integers. Return the scale and offset 146 /// values as APInts and return V as a Value*, and return whether we looked 147 /// through any sign or zero extends. The incoming Value is known to have 148 /// IntegerType and it may already be sign or zero extended. 149 /// 150 /// Note that this looks through extends, so the high bits may not be 151 /// represented in the result. 152 static Value *GetLinearExpression(Value *V, APInt &Scale, APInt &Offset, 153 ExtensionKind &Extension, 154 const DataLayout &TD, unsigned Depth) { 155 assert(V->getType()->isIntegerTy() && "Not an integer value"); 156 157 // Limit our recursion depth. 158 if (Depth == 6) { 159 Scale = 1; 160 Offset = 0; 161 return V; 162 } 163 164 if (BinaryOperator *BOp = dyn_cast<BinaryOperator>(V)) { 165 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) { 166 switch (BOp->getOpcode()) { 167 default: break; 168 case Instruction::Or: 169 // X|C == X+C if all the bits in C are unset in X. Otherwise we can't 170 // analyze it. 171 if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), &TD)) 172 break; 173 // FALL THROUGH. 174 case Instruction::Add: 175 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, Extension, 176 TD, Depth+1); 177 Offset += RHSC->getValue(); 178 return V; 179 case Instruction::Mul: 180 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, Extension, 181 TD, Depth+1); 182 Offset *= RHSC->getValue(); 183 Scale *= RHSC->getValue(); 184 return V; 185 case Instruction::Shl: 186 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, Extension, 187 TD, Depth+1); 188 Offset <<= RHSC->getValue().getLimitedValue(); 189 Scale <<= RHSC->getValue().getLimitedValue(); 190 return V; 191 } 192 } 193 } 194 195 // Since GEP indices are sign extended anyway, we don't care about the high 196 // bits of a sign or zero extended value - just scales and offsets. The 197 // extensions have to be consistent though. 198 if ((isa<SExtInst>(V) && Extension != EK_ZeroExt) || 199 (isa<ZExtInst>(V) && Extension != EK_SignExt)) { 200 Value *CastOp = cast<CastInst>(V)->getOperand(0); 201 unsigned OldWidth = Scale.getBitWidth(); 202 unsigned SmallWidth = CastOp->getType()->getPrimitiveSizeInBits(); 203 Scale = Scale.trunc(SmallWidth); 204 Offset = Offset.trunc(SmallWidth); 205 Extension = isa<SExtInst>(V) ? EK_SignExt : EK_ZeroExt; 206 207 Value *Result = GetLinearExpression(CastOp, Scale, Offset, Extension, 208 TD, Depth+1); 209 Scale = Scale.zext(OldWidth); 210 Offset = Offset.zext(OldWidth); 211 212 return Result; 213 } 214 215 Scale = 1; 216 Offset = 0; 217 return V; 218 } 219 220 /// DecomposeGEPExpression - If V is a symbolic pointer expression, decompose it 221 /// into a base pointer with a constant offset and a number of scaled symbolic 222 /// offsets. 223 /// 224 /// The scaled symbolic offsets (represented by pairs of a Value* and a scale in 225 /// the VarIndices vector) are Value*'s that are known to be scaled by the 226 /// specified amount, but which may have other unrepresented high bits. As such, 227 /// the gep cannot necessarily be reconstructed from its decomposed form. 228 /// 229 /// When DataLayout is around, this function is capable of analyzing everything 230 /// that GetUnderlyingObject can look through. When not, it just looks 231 /// through pointer casts. 232 /// 233 static const Value * 234 DecomposeGEPExpression(const Value *V, int64_t &BaseOffs, 235 SmallVectorImpl<VariableGEPIndex> &VarIndices, 236 const DataLayout *TD) { 237 // Limit recursion depth to limit compile time in crazy cases. 238 unsigned MaxLookup = 6; 239 240 BaseOffs = 0; 241 do { 242 // See if this is a bitcast or GEP. 243 const Operator *Op = dyn_cast<Operator>(V); 244 if (Op == 0) { 245 // The only non-operator case we can handle are GlobalAliases. 246 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 247 if (!GA->mayBeOverridden()) { 248 V = GA->getAliasee(); 249 continue; 250 } 251 } 252 return V; 253 } 254 255 if (Op->getOpcode() == Instruction::BitCast) { 256 V = Op->getOperand(0); 257 continue; 258 } 259 260 const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op); 261 if (GEPOp == 0) { 262 // If it's not a GEP, hand it off to SimplifyInstruction to see if it 263 // can come up with something. This matches what GetUnderlyingObject does. 264 if (const Instruction *I = dyn_cast<Instruction>(V)) 265 // TODO: Get a DominatorTree and use it here. 266 if (const Value *Simplified = 267 SimplifyInstruction(const_cast<Instruction *>(I), TD)) { 268 V = Simplified; 269 continue; 270 } 271 272 return V; 273 } 274 275 // Don't attempt to analyze GEPs over unsized objects. 276 if (!cast<PointerType>(GEPOp->getOperand(0)->getType()) 277 ->getElementType()->isSized()) 278 return V; 279 280 // If we are lacking DataLayout information, we can't compute the offets of 281 // elements computed by GEPs. However, we can handle bitcast equivalent 282 // GEPs. 283 if (TD == 0) { 284 if (!GEPOp->hasAllZeroIndices()) 285 return V; 286 V = GEPOp->getOperand(0); 287 continue; 288 } 289 290 // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices. 291 gep_type_iterator GTI = gep_type_begin(GEPOp); 292 for (User::const_op_iterator I = GEPOp->op_begin()+1, 293 E = GEPOp->op_end(); I != E; ++I) { 294 Value *Index = *I; 295 // Compute the (potentially symbolic) offset in bytes for this index. 296 if (StructType *STy = dyn_cast<StructType>(*GTI++)) { 297 // For a struct, add the member offset. 298 unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue(); 299 if (FieldNo == 0) continue; 300 301 BaseOffs += TD->getStructLayout(STy)->getElementOffset(FieldNo); 302 continue; 303 } 304 305 // For an array/pointer, add the element offset, explicitly scaled. 306 if (ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) { 307 if (CIdx->isZero()) continue; 308 BaseOffs += TD->getTypeAllocSize(*GTI)*CIdx->getSExtValue(); 309 continue; 310 } 311 312 uint64_t Scale = TD->getTypeAllocSize(*GTI); 313 ExtensionKind Extension = EK_NotExtended; 314 315 // If the integer type is smaller than the pointer size, it is implicitly 316 // sign extended to pointer size. 317 unsigned Width = cast<IntegerType>(Index->getType())->getBitWidth(); 318 if (TD->getPointerSizeInBits() > Width) 319 Extension = EK_SignExt; 320 321 // Use GetLinearExpression to decompose the index into a C1*V+C2 form. 322 APInt IndexScale(Width, 0), IndexOffset(Width, 0); 323 Index = GetLinearExpression(Index, IndexScale, IndexOffset, Extension, 324 *TD, 0); 325 326 // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale. 327 // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale. 328 BaseOffs += IndexOffset.getSExtValue()*Scale; 329 Scale *= IndexScale.getSExtValue(); 330 331 332 // If we already had an occurrence of this index variable, merge this 333 // scale into it. For example, we want to handle: 334 // A[x][x] -> x*16 + x*4 -> x*20 335 // This also ensures that 'x' only appears in the index list once. 336 for (unsigned i = 0, e = VarIndices.size(); i != e; ++i) { 337 if (VarIndices[i].V == Index && 338 VarIndices[i].Extension == Extension) { 339 Scale += VarIndices[i].Scale; 340 VarIndices.erase(VarIndices.begin()+i); 341 break; 342 } 343 } 344 345 // Make sure that we have a scale that makes sense for this target's 346 // pointer size. 347 if (unsigned ShiftBits = 64-TD->getPointerSizeInBits()) { 348 Scale <<= ShiftBits; 349 Scale = (int64_t)Scale >> ShiftBits; 350 } 351 352 if (Scale) { 353 VariableGEPIndex Entry = {Index, Extension, 354 static_cast<int64_t>(Scale)}; 355 VarIndices.push_back(Entry); 356 } 357 } 358 359 // Analyze the base pointer next. 360 V = GEPOp->getOperand(0); 361 } while (--MaxLookup); 362 363 // If the chain of expressions is too deep, just return early. 364 return V; 365 } 366 367 /// GetIndexDifference - Dest and Src are the variable indices from two 368 /// decomposed GetElementPtr instructions GEP1 and GEP2 which have common base 369 /// pointers. Subtract the GEP2 indices from GEP1 to find the symbolic 370 /// difference between the two pointers. 371 static void GetIndexDifference(SmallVectorImpl<VariableGEPIndex> &Dest, 372 const SmallVectorImpl<VariableGEPIndex> &Src) { 373 if (Src.empty()) return; 374 375 for (unsigned i = 0, e = Src.size(); i != e; ++i) { 376 const Value *V = Src[i].V; 377 ExtensionKind Extension = Src[i].Extension; 378 int64_t Scale = Src[i].Scale; 379 380 // Find V in Dest. This is N^2, but pointer indices almost never have more 381 // than a few variable indexes. 382 for (unsigned j = 0, e = Dest.size(); j != e; ++j) { 383 if (Dest[j].V != V || Dest[j].Extension != Extension) continue; 384 385 // If we found it, subtract off Scale V's from the entry in Dest. If it 386 // goes to zero, remove the entry. 387 if (Dest[j].Scale != Scale) 388 Dest[j].Scale -= Scale; 389 else 390 Dest.erase(Dest.begin()+j); 391 Scale = 0; 392 break; 393 } 394 395 // If we didn't consume this entry, add it to the end of the Dest list. 396 if (Scale) { 397 VariableGEPIndex Entry = { V, Extension, -Scale }; 398 Dest.push_back(Entry); 399 } 400 } 401 } 402 403 //===----------------------------------------------------------------------===// 404 // BasicAliasAnalysis Pass 405 //===----------------------------------------------------------------------===// 406 407 #ifndef NDEBUG 408 static const Function *getParent(const Value *V) { 409 if (const Instruction *inst = dyn_cast<Instruction>(V)) 410 return inst->getParent()->getParent(); 411 412 if (const Argument *arg = dyn_cast<Argument>(V)) 413 return arg->getParent(); 414 415 return NULL; 416 } 417 418 static bool notDifferentParent(const Value *O1, const Value *O2) { 419 420 const Function *F1 = getParent(O1); 421 const Function *F2 = getParent(O2); 422 423 return !F1 || !F2 || F1 == F2; 424 } 425 #endif 426 427 namespace { 428 /// BasicAliasAnalysis - This is the primary alias analysis implementation. 429 struct BasicAliasAnalysis : public ImmutablePass, public AliasAnalysis { 430 static char ID; // Class identification, replacement for typeinfo 431 BasicAliasAnalysis() : ImmutablePass(ID) { 432 initializeBasicAliasAnalysisPass(*PassRegistry::getPassRegistry()); 433 } 434 435 virtual void initializePass() { 436 InitializeAliasAnalysis(this); 437 } 438 439 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 440 AU.addRequired<AliasAnalysis>(); 441 AU.addRequired<TargetLibraryInfo>(); 442 } 443 444 virtual AliasResult alias(const Location &LocA, 445 const Location &LocB) { 446 assert(AliasCache.empty() && "AliasCache must be cleared after use!"); 447 assert(notDifferentParent(LocA.Ptr, LocB.Ptr) && 448 "BasicAliasAnalysis doesn't support interprocedural queries."); 449 AliasResult Alias = aliasCheck(LocA.Ptr, LocA.Size, LocA.TBAATag, 450 LocB.Ptr, LocB.Size, LocB.TBAATag); 451 // AliasCache rarely has more than 1 or 2 elements, always use 452 // shrink_and_clear so it quickly returns to the inline capacity of the 453 // SmallDenseMap if it ever grows larger. 454 // FIXME: This should really be shrink_to_inline_capacity_and_clear(). 455 AliasCache.shrink_and_clear(); 456 return Alias; 457 } 458 459 virtual ModRefResult getModRefInfo(ImmutableCallSite CS, 460 const Location &Loc); 461 462 virtual ModRefResult getModRefInfo(ImmutableCallSite CS1, 463 ImmutableCallSite CS2) { 464 // The AliasAnalysis base class has some smarts, lets use them. 465 return AliasAnalysis::getModRefInfo(CS1, CS2); 466 } 467 468 /// pointsToConstantMemory - Chase pointers until we find a (constant 469 /// global) or not. 470 virtual bool pointsToConstantMemory(const Location &Loc, bool OrLocal); 471 472 /// getModRefBehavior - Return the behavior when calling the given 473 /// call site. 474 virtual ModRefBehavior getModRefBehavior(ImmutableCallSite CS); 475 476 /// getModRefBehavior - Return the behavior when calling the given function. 477 /// For use when the call site is not known. 478 virtual ModRefBehavior getModRefBehavior(const Function *F); 479 480 /// getAdjustedAnalysisPointer - This method is used when a pass implements 481 /// an analysis interface through multiple inheritance. If needed, it 482 /// should override this to adjust the this pointer as needed for the 483 /// specified pass info. 484 virtual void *getAdjustedAnalysisPointer(const void *ID) { 485 if (ID == &AliasAnalysis::ID) 486 return (AliasAnalysis*)this; 487 return this; 488 } 489 490 private: 491 // AliasCache - Track alias queries to guard against recursion. 492 typedef std::pair<Location, Location> LocPair; 493 typedef SmallDenseMap<LocPair, AliasResult, 8> AliasCacheTy; 494 AliasCacheTy AliasCache; 495 496 // Visited - Track instructions visited by pointsToConstantMemory. 497 SmallPtrSet<const Value*, 16> Visited; 498 499 // aliasGEP - Provide a bunch of ad-hoc rules to disambiguate a GEP 500 // instruction against another. 501 AliasResult aliasGEP(const GEPOperator *V1, uint64_t V1Size, 502 const MDNode *V1TBAAInfo, 503 const Value *V2, uint64_t V2Size, 504 const MDNode *V2TBAAInfo, 505 const Value *UnderlyingV1, const Value *UnderlyingV2); 506 507 // aliasPHI - Provide a bunch of ad-hoc rules to disambiguate a PHI 508 // instruction against another. 509 AliasResult aliasPHI(const PHINode *PN, uint64_t PNSize, 510 const MDNode *PNTBAAInfo, 511 const Value *V2, uint64_t V2Size, 512 const MDNode *V2TBAAInfo); 513 514 /// aliasSelect - Disambiguate a Select instruction against another value. 515 AliasResult aliasSelect(const SelectInst *SI, uint64_t SISize, 516 const MDNode *SITBAAInfo, 517 const Value *V2, uint64_t V2Size, 518 const MDNode *V2TBAAInfo); 519 520 AliasResult aliasCheck(const Value *V1, uint64_t V1Size, 521 const MDNode *V1TBAATag, 522 const Value *V2, uint64_t V2Size, 523 const MDNode *V2TBAATag); 524 }; 525 } // End of anonymous namespace 526 527 // Register this pass... 528 char BasicAliasAnalysis::ID = 0; 529 INITIALIZE_AG_PASS_BEGIN(BasicAliasAnalysis, AliasAnalysis, "basicaa", 530 "Basic Alias Analysis (stateless AA impl)", 531 false, true, false) 532 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo) 533 INITIALIZE_AG_PASS_END(BasicAliasAnalysis, AliasAnalysis, "basicaa", 534 "Basic Alias Analysis (stateless AA impl)", 535 false, true, false) 536 537 538 ImmutablePass *llvm::createBasicAliasAnalysisPass() { 539 return new BasicAliasAnalysis(); 540 } 541 542 /// pointsToConstantMemory - Returns whether the given pointer value 543 /// points to memory that is local to the function, with global constants being 544 /// considered local to all functions. 545 bool 546 BasicAliasAnalysis::pointsToConstantMemory(const Location &Loc, bool OrLocal) { 547 assert(Visited.empty() && "Visited must be cleared after use!"); 548 549 unsigned MaxLookup = 8; 550 SmallVector<const Value *, 16> Worklist; 551 Worklist.push_back(Loc.Ptr); 552 do { 553 const Value *V = GetUnderlyingObject(Worklist.pop_back_val(), TD); 554 if (!Visited.insert(V)) { 555 Visited.clear(); 556 return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal); 557 } 558 559 // An alloca instruction defines local memory. 560 if (OrLocal && isa<AllocaInst>(V)) 561 continue; 562 563 // A global constant counts as local memory for our purposes. 564 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) { 565 // Note: this doesn't require GV to be "ODR" because it isn't legal for a 566 // global to be marked constant in some modules and non-constant in 567 // others. GV may even be a declaration, not a definition. 568 if (!GV->isConstant()) { 569 Visited.clear(); 570 return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal); 571 } 572 continue; 573 } 574 575 // If both select values point to local memory, then so does the select. 576 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) { 577 Worklist.push_back(SI->getTrueValue()); 578 Worklist.push_back(SI->getFalseValue()); 579 continue; 580 } 581 582 // If all values incoming to a phi node point to local memory, then so does 583 // the phi. 584 if (const PHINode *PN = dyn_cast<PHINode>(V)) { 585 // Don't bother inspecting phi nodes with many operands. 586 if (PN->getNumIncomingValues() > MaxLookup) { 587 Visited.clear(); 588 return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal); 589 } 590 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 591 Worklist.push_back(PN->getIncomingValue(i)); 592 continue; 593 } 594 595 // Otherwise be conservative. 596 Visited.clear(); 597 return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal); 598 599 } while (!Worklist.empty() && --MaxLookup); 600 601 Visited.clear(); 602 return Worklist.empty(); 603 } 604 605 /// getModRefBehavior - Return the behavior when calling the given call site. 606 AliasAnalysis::ModRefBehavior 607 BasicAliasAnalysis::getModRefBehavior(ImmutableCallSite CS) { 608 if (CS.doesNotAccessMemory()) 609 // Can't do better than this. 610 return DoesNotAccessMemory; 611 612 ModRefBehavior Min = UnknownModRefBehavior; 613 614 // If the callsite knows it only reads memory, don't return worse 615 // than that. 616 if (CS.onlyReadsMemory()) 617 Min = OnlyReadsMemory; 618 619 // The AliasAnalysis base class has some smarts, lets use them. 620 return ModRefBehavior(AliasAnalysis::getModRefBehavior(CS) & Min); 621 } 622 623 /// getModRefBehavior - Return the behavior when calling the given function. 624 /// For use when the call site is not known. 625 AliasAnalysis::ModRefBehavior 626 BasicAliasAnalysis::getModRefBehavior(const Function *F) { 627 // If the function declares it doesn't access memory, we can't do better. 628 if (F->doesNotAccessMemory()) 629 return DoesNotAccessMemory; 630 631 // For intrinsics, we can check the table. 632 if (unsigned iid = F->getIntrinsicID()) { 633 #define GET_INTRINSIC_MODREF_BEHAVIOR 634 #include "llvm/IR/Intrinsics.gen" 635 #undef GET_INTRINSIC_MODREF_BEHAVIOR 636 } 637 638 ModRefBehavior Min = UnknownModRefBehavior; 639 640 // If the function declares it only reads memory, go with that. 641 if (F->onlyReadsMemory()) 642 Min = OnlyReadsMemory; 643 644 // Otherwise be conservative. 645 return ModRefBehavior(AliasAnalysis::getModRefBehavior(F) & Min); 646 } 647 648 /// getModRefInfo - Check to see if the specified callsite can clobber the 649 /// specified memory object. Since we only look at local properties of this 650 /// function, we really can't say much about this query. We do, however, use 651 /// simple "address taken" analysis on local objects. 652 AliasAnalysis::ModRefResult 653 BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS, 654 const Location &Loc) { 655 assert(notDifferentParent(CS.getInstruction(), Loc.Ptr) && 656 "AliasAnalysis query involving multiple functions!"); 657 658 const Value *Object = GetUnderlyingObject(Loc.Ptr, TD); 659 660 // If this is a tail call and Loc.Ptr points to a stack location, we know that 661 // the tail call cannot access or modify the local stack. 662 // We cannot exclude byval arguments here; these belong to the caller of 663 // the current function not to the current function, and a tail callee 664 // may reference them. 665 if (isa<AllocaInst>(Object)) 666 if (const CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) 667 if (CI->isTailCall()) 668 return NoModRef; 669 670 // If the pointer is to a locally allocated object that does not escape, 671 // then the call can not mod/ref the pointer unless the call takes the pointer 672 // as an argument, and itself doesn't capture it. 673 if (!isa<Constant>(Object) && CS.getInstruction() != Object && 674 isNonEscapingLocalObject(Object)) { 675 bool PassedAsArg = false; 676 unsigned ArgNo = 0; 677 for (ImmutableCallSite::arg_iterator CI = CS.arg_begin(), CE = CS.arg_end(); 678 CI != CE; ++CI, ++ArgNo) { 679 // Only look at the no-capture or byval pointer arguments. If this 680 // pointer were passed to arguments that were neither of these, then it 681 // couldn't be no-capture. 682 if (!(*CI)->getType()->isPointerTy() || 683 (!CS.doesNotCapture(ArgNo) && !CS.isByValArgument(ArgNo))) 684 continue; 685 686 // If this is a no-capture pointer argument, see if we can tell that it 687 // is impossible to alias the pointer we're checking. If not, we have to 688 // assume that the call could touch the pointer, even though it doesn't 689 // escape. 690 if (!isNoAlias(Location(*CI), Location(Object))) { 691 PassedAsArg = true; 692 break; 693 } 694 } 695 696 if (!PassedAsArg) 697 return NoModRef; 698 } 699 700 const TargetLibraryInfo &TLI = getAnalysis<TargetLibraryInfo>(); 701 ModRefResult Min = ModRef; 702 703 // Finally, handle specific knowledge of intrinsics. 704 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction()); 705 if (II != 0) 706 switch (II->getIntrinsicID()) { 707 default: break; 708 case Intrinsic::memcpy: 709 case Intrinsic::memmove: { 710 uint64_t Len = UnknownSize; 711 if (ConstantInt *LenCI = dyn_cast<ConstantInt>(II->getArgOperand(2))) 712 Len = LenCI->getZExtValue(); 713 Value *Dest = II->getArgOperand(0); 714 Value *Src = II->getArgOperand(1); 715 // If it can't overlap the source dest, then it doesn't modref the loc. 716 if (isNoAlias(Location(Dest, Len), Loc)) { 717 if (isNoAlias(Location(Src, Len), Loc)) 718 return NoModRef; 719 // If it can't overlap the dest, then worst case it reads the loc. 720 Min = Ref; 721 } else if (isNoAlias(Location(Src, Len), Loc)) { 722 // If it can't overlap the source, then worst case it mutates the loc. 723 Min = Mod; 724 } 725 break; 726 } 727 case Intrinsic::memset: 728 // Since memset is 'accesses arguments' only, the AliasAnalysis base class 729 // will handle it for the variable length case. 730 if (ConstantInt *LenCI = dyn_cast<ConstantInt>(II->getArgOperand(2))) { 731 uint64_t Len = LenCI->getZExtValue(); 732 Value *Dest = II->getArgOperand(0); 733 if (isNoAlias(Location(Dest, Len), Loc)) 734 return NoModRef; 735 } 736 // We know that memset doesn't load anything. 737 Min = Mod; 738 break; 739 case Intrinsic::lifetime_start: 740 case Intrinsic::lifetime_end: 741 case Intrinsic::invariant_start: { 742 uint64_t PtrSize = 743 cast<ConstantInt>(II->getArgOperand(0))->getZExtValue(); 744 if (isNoAlias(Location(II->getArgOperand(1), 745 PtrSize, 746 II->getMetadata(LLVMContext::MD_tbaa)), 747 Loc)) 748 return NoModRef; 749 break; 750 } 751 case Intrinsic::invariant_end: { 752 uint64_t PtrSize = 753 cast<ConstantInt>(II->getArgOperand(1))->getZExtValue(); 754 if (isNoAlias(Location(II->getArgOperand(2), 755 PtrSize, 756 II->getMetadata(LLVMContext::MD_tbaa)), 757 Loc)) 758 return NoModRef; 759 break; 760 } 761 case Intrinsic::arm_neon_vld1: { 762 // LLVM's vld1 and vst1 intrinsics currently only support a single 763 // vector register. 764 uint64_t Size = 765 TD ? TD->getTypeStoreSize(II->getType()) : UnknownSize; 766 if (isNoAlias(Location(II->getArgOperand(0), Size, 767 II->getMetadata(LLVMContext::MD_tbaa)), 768 Loc)) 769 return NoModRef; 770 break; 771 } 772 case Intrinsic::arm_neon_vst1: { 773 uint64_t Size = 774 TD ? TD->getTypeStoreSize(II->getArgOperand(1)->getType()) : UnknownSize; 775 if (isNoAlias(Location(II->getArgOperand(0), Size, 776 II->getMetadata(LLVMContext::MD_tbaa)), 777 Loc)) 778 return NoModRef; 779 break; 780 } 781 } 782 783 // We can bound the aliasing properties of memset_pattern16 just as we can 784 // for memcpy/memset. This is particularly important because the 785 // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16 786 // whenever possible. 787 else if (TLI.has(LibFunc::memset_pattern16) && 788 CS.getCalledFunction() && 789 CS.getCalledFunction()->getName() == "memset_pattern16") { 790 const Function *MS = CS.getCalledFunction(); 791 FunctionType *MemsetType = MS->getFunctionType(); 792 if (!MemsetType->isVarArg() && MemsetType->getNumParams() == 3 && 793 isa<PointerType>(MemsetType->getParamType(0)) && 794 isa<PointerType>(MemsetType->getParamType(1)) && 795 isa<IntegerType>(MemsetType->getParamType(2))) { 796 uint64_t Len = UnknownSize; 797 if (const ConstantInt *LenCI = dyn_cast<ConstantInt>(CS.getArgument(2))) 798 Len = LenCI->getZExtValue(); 799 const Value *Dest = CS.getArgument(0); 800 const Value *Src = CS.getArgument(1); 801 // If it can't overlap the source dest, then it doesn't modref the loc. 802 if (isNoAlias(Location(Dest, Len), Loc)) { 803 // Always reads 16 bytes of the source. 804 if (isNoAlias(Location(Src, 16), Loc)) 805 return NoModRef; 806 // If it can't overlap the dest, then worst case it reads the loc. 807 Min = Ref; 808 // Always reads 16 bytes of the source. 809 } else if (isNoAlias(Location(Src, 16), Loc)) { 810 // If it can't overlap the source, then worst case it mutates the loc. 811 Min = Mod; 812 } 813 } 814 } 815 816 // The AliasAnalysis base class has some smarts, lets use them. 817 return ModRefResult(AliasAnalysis::getModRefInfo(CS, Loc) & Min); 818 } 819 820 static bool areVarIndicesEqual(SmallVector<VariableGEPIndex, 4> &Indices1, 821 SmallVector<VariableGEPIndex, 4> &Indices2) { 822 unsigned Size1 = Indices1.size(); 823 unsigned Size2 = Indices2.size(); 824 825 if (Size1 != Size2) 826 return false; 827 828 for (unsigned I = 0; I != Size1; ++I) 829 if (Indices1[I] != Indices2[I]) 830 return false; 831 832 return true; 833 } 834 835 /// aliasGEP - Provide a bunch of ad-hoc rules to disambiguate a GEP instruction 836 /// against another pointer. We know that V1 is a GEP, but we don't know 837 /// anything about V2. UnderlyingV1 is GetUnderlyingObject(GEP1, TD), 838 /// UnderlyingV2 is the same for V2. 839 /// 840 AliasAnalysis::AliasResult 841 BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size, 842 const MDNode *V1TBAAInfo, 843 const Value *V2, uint64_t V2Size, 844 const MDNode *V2TBAAInfo, 845 const Value *UnderlyingV1, 846 const Value *UnderlyingV2) { 847 int64_t GEP1BaseOffset; 848 SmallVector<VariableGEPIndex, 4> GEP1VariableIndices; 849 850 // If we have two gep instructions with must-alias or not-alias'ing base 851 // pointers, figure out if the indexes to the GEP tell us anything about the 852 // derived pointer. 853 if (const GEPOperator *GEP2 = dyn_cast<GEPOperator>(V2)) { 854 // Check for geps of non-aliasing underlying pointers where the offsets are 855 // identical. 856 if (V1Size == V2Size) { 857 // Do the base pointers alias assuming type and size. 858 AliasResult PreciseBaseAlias = aliasCheck(UnderlyingV1, V1Size, 859 V1TBAAInfo, UnderlyingV2, 860 V2Size, V2TBAAInfo); 861 if (PreciseBaseAlias == NoAlias) { 862 // See if the computed offset from the common pointer tells us about the 863 // relation of the resulting pointer. 864 int64_t GEP2BaseOffset; 865 SmallVector<VariableGEPIndex, 4> GEP2VariableIndices; 866 const Value *GEP2BasePtr = 867 DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices, TD); 868 const Value *GEP1BasePtr = 869 DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, TD); 870 // DecomposeGEPExpression and GetUnderlyingObject should return the 871 // same result except when DecomposeGEPExpression has no DataLayout. 872 if (GEP1BasePtr != UnderlyingV1 || GEP2BasePtr != UnderlyingV2) { 873 assert(TD == 0 && 874 "DecomposeGEPExpression and GetUnderlyingObject disagree!"); 875 return MayAlias; 876 } 877 // Same offsets. 878 if (GEP1BaseOffset == GEP2BaseOffset && 879 areVarIndicesEqual(GEP1VariableIndices, GEP2VariableIndices)) 880 return NoAlias; 881 GEP1VariableIndices.clear(); 882 } 883 } 884 885 // Do the base pointers alias? 886 AliasResult BaseAlias = aliasCheck(UnderlyingV1, UnknownSize, 0, 887 UnderlyingV2, UnknownSize, 0); 888 889 // If we get a No or May, then return it immediately, no amount of analysis 890 // will improve this situation. 891 if (BaseAlias != MustAlias) return BaseAlias; 892 893 // Otherwise, we have a MustAlias. Since the base pointers alias each other 894 // exactly, see if the computed offset from the common pointer tells us 895 // about the relation of the resulting pointer. 896 const Value *GEP1BasePtr = 897 DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, TD); 898 899 int64_t GEP2BaseOffset; 900 SmallVector<VariableGEPIndex, 4> GEP2VariableIndices; 901 const Value *GEP2BasePtr = 902 DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices, TD); 903 904 // DecomposeGEPExpression and GetUnderlyingObject should return the 905 // same result except when DecomposeGEPExpression has no DataLayout. 906 if (GEP1BasePtr != UnderlyingV1 || GEP2BasePtr != UnderlyingV2) { 907 assert(TD == 0 && 908 "DecomposeGEPExpression and GetUnderlyingObject disagree!"); 909 return MayAlias; 910 } 911 912 // Subtract the GEP2 pointer from the GEP1 pointer to find out their 913 // symbolic difference. 914 GEP1BaseOffset -= GEP2BaseOffset; 915 GetIndexDifference(GEP1VariableIndices, GEP2VariableIndices); 916 917 } else { 918 // Check to see if these two pointers are related by the getelementptr 919 // instruction. If one pointer is a GEP with a non-zero index of the other 920 // pointer, we know they cannot alias. 921 922 // If both accesses are unknown size, we can't do anything useful here. 923 if (V1Size == UnknownSize && V2Size == UnknownSize) 924 return MayAlias; 925 926 AliasResult R = aliasCheck(UnderlyingV1, UnknownSize, 0, 927 V2, V2Size, V2TBAAInfo); 928 if (R != MustAlias) 929 // If V2 may alias GEP base pointer, conservatively returns MayAlias. 930 // If V2 is known not to alias GEP base pointer, then the two values 931 // cannot alias per GEP semantics: "A pointer value formed from a 932 // getelementptr instruction is associated with the addresses associated 933 // with the first operand of the getelementptr". 934 return R; 935 936 const Value *GEP1BasePtr = 937 DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, TD); 938 939 // DecomposeGEPExpression and GetUnderlyingObject should return the 940 // same result except when DecomposeGEPExpression has no DataLayout. 941 if (GEP1BasePtr != UnderlyingV1) { 942 assert(TD == 0 && 943 "DecomposeGEPExpression and GetUnderlyingObject disagree!"); 944 return MayAlias; 945 } 946 } 947 948 // In the two GEP Case, if there is no difference in the offsets of the 949 // computed pointers, the resultant pointers are a must alias. This 950 // hapens when we have two lexically identical GEP's (for example). 951 // 952 // In the other case, if we have getelementptr <ptr>, 0, 0, 0, 0, ... and V2 953 // must aliases the GEP, the end result is a must alias also. 954 if (GEP1BaseOffset == 0 && GEP1VariableIndices.empty()) 955 return MustAlias; 956 957 // If there is a constant difference between the pointers, but the difference 958 // is less than the size of the associated memory object, then we know 959 // that the objects are partially overlapping. If the difference is 960 // greater, we know they do not overlap. 961 if (GEP1BaseOffset != 0 && GEP1VariableIndices.empty()) { 962 if (GEP1BaseOffset >= 0) { 963 if (V2Size != UnknownSize) { 964 if ((uint64_t)GEP1BaseOffset < V2Size) 965 return PartialAlias; 966 return NoAlias; 967 } 968 } else { 969 if (V1Size != UnknownSize) { 970 if (-(uint64_t)GEP1BaseOffset < V1Size) 971 return PartialAlias; 972 return NoAlias; 973 } 974 } 975 } 976 977 // Try to distinguish something like &A[i][1] against &A[42][0]. 978 // Grab the least significant bit set in any of the scales. 979 if (!GEP1VariableIndices.empty()) { 980 uint64_t Modulo = 0; 981 for (unsigned i = 0, e = GEP1VariableIndices.size(); i != e; ++i) 982 Modulo |= (uint64_t)GEP1VariableIndices[i].Scale; 983 Modulo = Modulo ^ (Modulo & (Modulo - 1)); 984 985 // We can compute the difference between the two addresses 986 // mod Modulo. Check whether that difference guarantees that the 987 // two locations do not alias. 988 uint64_t ModOffset = (uint64_t)GEP1BaseOffset & (Modulo - 1); 989 if (V1Size != UnknownSize && V2Size != UnknownSize && 990 ModOffset >= V2Size && V1Size <= Modulo - ModOffset) 991 return NoAlias; 992 } 993 994 // Statically, we can see that the base objects are the same, but the 995 // pointers have dynamic offsets which we can't resolve. And none of our 996 // little tricks above worked. 997 // 998 // TODO: Returning PartialAlias instead of MayAlias is a mild hack; the 999 // practical effect of this is protecting TBAA in the case of dynamic 1000 // indices into arrays of unions or malloc'd memory. 1001 return PartialAlias; 1002 } 1003 1004 static AliasAnalysis::AliasResult 1005 MergeAliasResults(AliasAnalysis::AliasResult A, AliasAnalysis::AliasResult B) { 1006 // If the results agree, take it. 1007 if (A == B) 1008 return A; 1009 // A mix of PartialAlias and MustAlias is PartialAlias. 1010 if ((A == AliasAnalysis::PartialAlias && B == AliasAnalysis::MustAlias) || 1011 (B == AliasAnalysis::PartialAlias && A == AliasAnalysis::MustAlias)) 1012 return AliasAnalysis::PartialAlias; 1013 // Otherwise, we don't know anything. 1014 return AliasAnalysis::MayAlias; 1015 } 1016 1017 /// aliasSelect - Provide a bunch of ad-hoc rules to disambiguate a Select 1018 /// instruction against another. 1019 AliasAnalysis::AliasResult 1020 BasicAliasAnalysis::aliasSelect(const SelectInst *SI, uint64_t SISize, 1021 const MDNode *SITBAAInfo, 1022 const Value *V2, uint64_t V2Size, 1023 const MDNode *V2TBAAInfo) { 1024 // If the values are Selects with the same condition, we can do a more precise 1025 // check: just check for aliases between the values on corresponding arms. 1026 if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2)) 1027 if (SI->getCondition() == SI2->getCondition()) { 1028 AliasResult Alias = 1029 aliasCheck(SI->getTrueValue(), SISize, SITBAAInfo, 1030 SI2->getTrueValue(), V2Size, V2TBAAInfo); 1031 if (Alias == MayAlias) 1032 return MayAlias; 1033 AliasResult ThisAlias = 1034 aliasCheck(SI->getFalseValue(), SISize, SITBAAInfo, 1035 SI2->getFalseValue(), V2Size, V2TBAAInfo); 1036 return MergeAliasResults(ThisAlias, Alias); 1037 } 1038 1039 // If both arms of the Select node NoAlias or MustAlias V2, then returns 1040 // NoAlias / MustAlias. Otherwise, returns MayAlias. 1041 AliasResult Alias = 1042 aliasCheck(V2, V2Size, V2TBAAInfo, SI->getTrueValue(), SISize, SITBAAInfo); 1043 if (Alias == MayAlias) 1044 return MayAlias; 1045 1046 AliasResult ThisAlias = 1047 aliasCheck(V2, V2Size, V2TBAAInfo, SI->getFalseValue(), SISize, SITBAAInfo); 1048 return MergeAliasResults(ThisAlias, Alias); 1049 } 1050 1051 // aliasPHI - Provide a bunch of ad-hoc rules to disambiguate a PHI instruction 1052 // against another. 1053 AliasAnalysis::AliasResult 1054 BasicAliasAnalysis::aliasPHI(const PHINode *PN, uint64_t PNSize, 1055 const MDNode *PNTBAAInfo, 1056 const Value *V2, uint64_t V2Size, 1057 const MDNode *V2TBAAInfo) { 1058 // If the values are PHIs in the same block, we can do a more precise 1059 // as well as efficient check: just check for aliases between the values 1060 // on corresponding edges. 1061 if (const PHINode *PN2 = dyn_cast<PHINode>(V2)) 1062 if (PN2->getParent() == PN->getParent()) { 1063 LocPair Locs(Location(PN, PNSize, PNTBAAInfo), 1064 Location(V2, V2Size, V2TBAAInfo)); 1065 if (PN > V2) 1066 std::swap(Locs.first, Locs.second); 1067 // Analyse the PHIs' inputs under the assumption that the PHIs are 1068 // NoAlias. 1069 // If the PHIs are May/MustAlias there must be (recursively) an input 1070 // operand from outside the PHIs' cycle that is MayAlias/MustAlias or 1071 // there must be an operation on the PHIs within the PHIs' value cycle 1072 // that causes a MayAlias. 1073 // Pretend the phis do not alias. 1074 AliasResult Alias = NoAlias; 1075 assert(AliasCache.count(Locs) && 1076 "There must exist an entry for the phi node"); 1077 AliasResult OrigAliasResult = AliasCache[Locs]; 1078 AliasCache[Locs] = NoAlias; 1079 1080 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 1081 AliasResult ThisAlias = 1082 aliasCheck(PN->getIncomingValue(i), PNSize, PNTBAAInfo, 1083 PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)), 1084 V2Size, V2TBAAInfo); 1085 Alias = MergeAliasResults(ThisAlias, Alias); 1086 if (Alias == MayAlias) 1087 break; 1088 } 1089 1090 // Reset if speculation failed. 1091 if (Alias != NoAlias) 1092 AliasCache[Locs] = OrigAliasResult; 1093 1094 return Alias; 1095 } 1096 1097 SmallPtrSet<Value*, 4> UniqueSrc; 1098 SmallVector<Value*, 4> V1Srcs; 1099 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 1100 Value *PV1 = PN->getIncomingValue(i); 1101 if (isa<PHINode>(PV1)) 1102 // If any of the source itself is a PHI, return MayAlias conservatively 1103 // to avoid compile time explosion. The worst possible case is if both 1104 // sides are PHI nodes. In which case, this is O(m x n) time where 'm' 1105 // and 'n' are the number of PHI sources. 1106 return MayAlias; 1107 if (UniqueSrc.insert(PV1)) 1108 V1Srcs.push_back(PV1); 1109 } 1110 1111 AliasResult Alias = aliasCheck(V2, V2Size, V2TBAAInfo, 1112 V1Srcs[0], PNSize, PNTBAAInfo); 1113 // Early exit if the check of the first PHI source against V2 is MayAlias. 1114 // Other results are not possible. 1115 if (Alias == MayAlias) 1116 return MayAlias; 1117 1118 // If all sources of the PHI node NoAlias or MustAlias V2, then returns 1119 // NoAlias / MustAlias. Otherwise, returns MayAlias. 1120 for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) { 1121 Value *V = V1Srcs[i]; 1122 1123 AliasResult ThisAlias = aliasCheck(V2, V2Size, V2TBAAInfo, 1124 V, PNSize, PNTBAAInfo); 1125 Alias = MergeAliasResults(ThisAlias, Alias); 1126 if (Alias == MayAlias) 1127 break; 1128 } 1129 1130 return Alias; 1131 } 1132 1133 // aliasCheck - Provide a bunch of ad-hoc rules to disambiguate in common cases, 1134 // such as array references. 1135 // 1136 AliasAnalysis::AliasResult 1137 BasicAliasAnalysis::aliasCheck(const Value *V1, uint64_t V1Size, 1138 const MDNode *V1TBAAInfo, 1139 const Value *V2, uint64_t V2Size, 1140 const MDNode *V2TBAAInfo) { 1141 // If either of the memory references is empty, it doesn't matter what the 1142 // pointer values are. 1143 if (V1Size == 0 || V2Size == 0) 1144 return NoAlias; 1145 1146 // Strip off any casts if they exist. 1147 V1 = V1->stripPointerCasts(); 1148 V2 = V2->stripPointerCasts(); 1149 1150 // Are we checking for alias of the same value? 1151 if (V1 == V2) return MustAlias; 1152 1153 if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy()) 1154 return NoAlias; // Scalars cannot alias each other 1155 1156 // Figure out what objects these things are pointing to if we can. 1157 const Value *O1 = GetUnderlyingObject(V1, TD); 1158 const Value *O2 = GetUnderlyingObject(V2, TD); 1159 1160 // Null values in the default address space don't point to any object, so they 1161 // don't alias any other pointer. 1162 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1)) 1163 if (CPN->getType()->getAddressSpace() == 0) 1164 return NoAlias; 1165 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2)) 1166 if (CPN->getType()->getAddressSpace() == 0) 1167 return NoAlias; 1168 1169 if (O1 != O2) { 1170 // If V1/V2 point to two different objects we know that we have no alias. 1171 if (isIdentifiedObject(O1) && isIdentifiedObject(O2)) 1172 return NoAlias; 1173 1174 // Constant pointers can't alias with non-const isIdentifiedObject objects. 1175 if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) || 1176 (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1))) 1177 return NoAlias; 1178 1179 // Arguments can't alias with local allocations or noalias calls 1180 // in the same function. 1181 if (((isa<Argument>(O1) && (isa<AllocaInst>(O2) || isNoAliasCall(O2))) || 1182 (isa<Argument>(O2) && (isa<AllocaInst>(O1) || isNoAliasCall(O1))))) 1183 return NoAlias; 1184 1185 // Most objects can't alias null. 1186 if ((isa<ConstantPointerNull>(O2) && isKnownNonNull(O1)) || 1187 (isa<ConstantPointerNull>(O1) && isKnownNonNull(O2))) 1188 return NoAlias; 1189 1190 // If one pointer is the result of a call/invoke or load and the other is a 1191 // non-escaping local object within the same function, then we know the 1192 // object couldn't escape to a point where the call could return it. 1193 // 1194 // Note that if the pointers are in different functions, there are a 1195 // variety of complications. A call with a nocapture argument may still 1196 // temporary store the nocapture argument's value in a temporary memory 1197 // location if that memory location doesn't escape. Or it may pass a 1198 // nocapture value to other functions as long as they don't capture it. 1199 if (isEscapeSource(O1) && isNonEscapingLocalObject(O2)) 1200 return NoAlias; 1201 if (isEscapeSource(O2) && isNonEscapingLocalObject(O1)) 1202 return NoAlias; 1203 } 1204 1205 // If the size of one access is larger than the entire object on the other 1206 // side, then we know such behavior is undefined and can assume no alias. 1207 if (TD) 1208 if ((V1Size != UnknownSize && isObjectSmallerThan(O2, V1Size, *TD, *TLI)) || 1209 (V2Size != UnknownSize && isObjectSmallerThan(O1, V2Size, *TD, *TLI))) 1210 return NoAlias; 1211 1212 // Check the cache before climbing up use-def chains. This also terminates 1213 // otherwise infinitely recursive queries. 1214 LocPair Locs(Location(V1, V1Size, V1TBAAInfo), 1215 Location(V2, V2Size, V2TBAAInfo)); 1216 if (V1 > V2) 1217 std::swap(Locs.first, Locs.second); 1218 std::pair<AliasCacheTy::iterator, bool> Pair = 1219 AliasCache.insert(std::make_pair(Locs, MayAlias)); 1220 if (!Pair.second) 1221 return Pair.first->second; 1222 1223 // FIXME: This isn't aggressively handling alias(GEP, PHI) for example: if the 1224 // GEP can't simplify, we don't even look at the PHI cases. 1225 if (!isa<GEPOperator>(V1) && isa<GEPOperator>(V2)) { 1226 std::swap(V1, V2); 1227 std::swap(V1Size, V2Size); 1228 std::swap(O1, O2); 1229 std::swap(V1TBAAInfo, V2TBAAInfo); 1230 } 1231 if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) { 1232 AliasResult Result = aliasGEP(GV1, V1Size, V1TBAAInfo, V2, V2Size, V2TBAAInfo, O1, O2); 1233 if (Result != MayAlias) return AliasCache[Locs] = Result; 1234 } 1235 1236 if (isa<PHINode>(V2) && !isa<PHINode>(V1)) { 1237 std::swap(V1, V2); 1238 std::swap(V1Size, V2Size); 1239 std::swap(V1TBAAInfo, V2TBAAInfo); 1240 } 1241 if (const PHINode *PN = dyn_cast<PHINode>(V1)) { 1242 AliasResult Result = aliasPHI(PN, V1Size, V1TBAAInfo, 1243 V2, V2Size, V2TBAAInfo); 1244 if (Result != MayAlias) return AliasCache[Locs] = Result; 1245 } 1246 1247 if (isa<SelectInst>(V2) && !isa<SelectInst>(V1)) { 1248 std::swap(V1, V2); 1249 std::swap(V1Size, V2Size); 1250 std::swap(V1TBAAInfo, V2TBAAInfo); 1251 } 1252 if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) { 1253 AliasResult Result = aliasSelect(S1, V1Size, V1TBAAInfo, 1254 V2, V2Size, V2TBAAInfo); 1255 if (Result != MayAlias) return AliasCache[Locs] = Result; 1256 } 1257 1258 // If both pointers are pointing into the same object and one of them 1259 // accesses is accessing the entire object, then the accesses must 1260 // overlap in some way. 1261 if (TD && O1 == O2) 1262 if ((V1Size != UnknownSize && isObjectSize(O1, V1Size, *TD, *TLI)) || 1263 (V2Size != UnknownSize && isObjectSize(O2, V2Size, *TD, *TLI))) 1264 return AliasCache[Locs] = PartialAlias; 1265 1266 AliasResult Result = 1267 AliasAnalysis::alias(Location(V1, V1Size, V1TBAAInfo), 1268 Location(V2, V2Size, V2TBAAInfo)); 1269 return AliasCache[Locs] = Result; 1270 } 1271