1 //===- ValueTracking.cpp - Walk computations to compute properties --------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains routines that help analyze properties that chains of 11 // computations have. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Analysis/ValueTracking.h" 16 #include "llvm/ADT/Optional.h" 17 #include "llvm/ADT/SmallPtrSet.h" 18 #include "llvm/Analysis/AssumptionCache.h" 19 #include "llvm/Analysis/InstructionSimplify.h" 20 #include "llvm/Analysis/MemoryBuiltins.h" 21 #include "llvm/Analysis/LoopInfo.h" 22 #include "llvm/IR/CallSite.h" 23 #include "llvm/IR/ConstantRange.h" 24 #include "llvm/IR/Constants.h" 25 #include "llvm/IR/DataLayout.h" 26 #include "llvm/IR/Dominators.h" 27 #include "llvm/IR/GetElementPtrTypeIterator.h" 28 #include "llvm/IR/GlobalAlias.h" 29 #include "llvm/IR/GlobalVariable.h" 30 #include "llvm/IR/Instructions.h" 31 #include "llvm/IR/IntrinsicInst.h" 32 #include "llvm/IR/LLVMContext.h" 33 #include "llvm/IR/Metadata.h" 34 #include "llvm/IR/Operator.h" 35 #include "llvm/IR/PatternMatch.h" 36 #include "llvm/IR/Statepoint.h" 37 #include "llvm/Support/Debug.h" 38 #include "llvm/Support/MathExtras.h" 39 #include <cstring> 40 using namespace llvm; 41 using namespace llvm::PatternMatch; 42 43 const unsigned MaxDepth = 6; 44 45 /// Enable an experimental feature to leverage information about dominating 46 /// conditions to compute known bits. The individual options below control how 47 /// hard we search. The defaults are chosen to be fairly aggressive. If you 48 /// run into compile time problems when testing, scale them back and report 49 /// your findings. 50 static cl::opt<bool> EnableDomConditions("value-tracking-dom-conditions", 51 cl::Hidden, cl::init(false)); 52 53 // This is expensive, so we only do it for the top level query value. 54 // (TODO: evaluate cost vs profit, consider higher thresholds) 55 static cl::opt<unsigned> DomConditionsMaxDepth("dom-conditions-max-depth", 56 cl::Hidden, cl::init(1)); 57 58 /// How many dominating blocks should be scanned looking for dominating 59 /// conditions? 60 static cl::opt<unsigned> DomConditionsMaxDomBlocks("dom-conditions-dom-blocks", 61 cl::Hidden, 62 cl::init(20)); 63 64 // Controls the number of uses of the value searched for possible 65 // dominating comparisons. 66 static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses", 67 cl::Hidden, cl::init(20)); 68 69 // If true, don't consider only compares whose only use is a branch. 70 static cl::opt<bool> DomConditionsSingleCmpUse("dom-conditions-single-cmp-use", 71 cl::Hidden, cl::init(false)); 72 73 /// Returns the bitwidth of the given scalar or pointer type (if unknown returns 74 /// 0). For vector types, returns the element type's bitwidth. 75 static unsigned getBitWidth(Type *Ty, const DataLayout &DL) { 76 if (unsigned BitWidth = Ty->getScalarSizeInBits()) 77 return BitWidth; 78 79 return DL.getPointerTypeSizeInBits(Ty); 80 } 81 82 // Many of these functions have internal versions that take an assumption 83 // exclusion set. This is because of the potential for mutual recursion to 84 // cause computeKnownBits to repeatedly visit the same assume intrinsic. The 85 // classic case of this is assume(x = y), which will attempt to determine 86 // bits in x from bits in y, which will attempt to determine bits in y from 87 // bits in x, etc. Regarding the mutual recursion, computeKnownBits can call 88 // isKnownNonZero, which calls computeKnownBits and ComputeSignBit and 89 // isKnownToBeAPowerOfTwo (all of which can call computeKnownBits), and so on. 90 typedef SmallPtrSet<const Value *, 8> ExclInvsSet; 91 92 namespace { 93 // Simplifying using an assume can only be done in a particular control-flow 94 // context (the context instruction provides that context). If an assume and 95 // the context instruction are not in the same block then the DT helps in 96 // figuring out if we can use it. 97 struct Query { 98 ExclInvsSet ExclInvs; 99 AssumptionCache *AC; 100 const Instruction *CxtI; 101 const DominatorTree *DT; 102 103 Query(AssumptionCache *AC = nullptr, const Instruction *CxtI = nullptr, 104 const DominatorTree *DT = nullptr) 105 : AC(AC), CxtI(CxtI), DT(DT) {} 106 107 Query(const Query &Q, const Value *NewExcl) 108 : ExclInvs(Q.ExclInvs), AC(Q.AC), CxtI(Q.CxtI), DT(Q.DT) { 109 ExclInvs.insert(NewExcl); 110 } 111 }; 112 } // end anonymous namespace 113 114 // Given the provided Value and, potentially, a context instruction, return 115 // the preferred context instruction (if any). 116 static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) { 117 // If we've been provided with a context instruction, then use that (provided 118 // it has been inserted). 119 if (CxtI && CxtI->getParent()) 120 return CxtI; 121 122 // If the value is really an already-inserted instruction, then use that. 123 CxtI = dyn_cast<Instruction>(V); 124 if (CxtI && CxtI->getParent()) 125 return CxtI; 126 127 return nullptr; 128 } 129 130 static void computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne, 131 const DataLayout &DL, unsigned Depth, 132 const Query &Q); 133 134 void llvm::computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne, 135 const DataLayout &DL, unsigned Depth, 136 AssumptionCache *AC, const Instruction *CxtI, 137 const DominatorTree *DT) { 138 ::computeKnownBits(V, KnownZero, KnownOne, DL, Depth, 139 Query(AC, safeCxtI(V, CxtI), DT)); 140 } 141 142 bool llvm::haveNoCommonBitsSet(Value *LHS, Value *RHS, const DataLayout &DL, 143 AssumptionCache *AC, const Instruction *CxtI, 144 const DominatorTree *DT) { 145 assert(LHS->getType() == RHS->getType() && 146 "LHS and RHS should have the same type"); 147 assert(LHS->getType()->isIntOrIntVectorTy() && 148 "LHS and RHS should be integers"); 149 IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType()); 150 APInt LHSKnownZero(IT->getBitWidth(), 0), LHSKnownOne(IT->getBitWidth(), 0); 151 APInt RHSKnownZero(IT->getBitWidth(), 0), RHSKnownOne(IT->getBitWidth(), 0); 152 computeKnownBits(LHS, LHSKnownZero, LHSKnownOne, DL, 0, AC, CxtI, DT); 153 computeKnownBits(RHS, RHSKnownZero, RHSKnownOne, DL, 0, AC, CxtI, DT); 154 return (LHSKnownZero | RHSKnownZero).isAllOnesValue(); 155 } 156 157 static void ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne, 158 const DataLayout &DL, unsigned Depth, 159 const Query &Q); 160 161 void llvm::ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne, 162 const DataLayout &DL, unsigned Depth, 163 AssumptionCache *AC, const Instruction *CxtI, 164 const DominatorTree *DT) { 165 ::ComputeSignBit(V, KnownZero, KnownOne, DL, Depth, 166 Query(AC, safeCxtI(V, CxtI), DT)); 167 } 168 169 static bool isKnownToBeAPowerOfTwo(Value *V, bool OrZero, unsigned Depth, 170 const Query &Q, const DataLayout &DL); 171 172 bool llvm::isKnownToBeAPowerOfTwo(Value *V, const DataLayout &DL, bool OrZero, 173 unsigned Depth, AssumptionCache *AC, 174 const Instruction *CxtI, 175 const DominatorTree *DT) { 176 return ::isKnownToBeAPowerOfTwo(V, OrZero, Depth, 177 Query(AC, safeCxtI(V, CxtI), DT), DL); 178 } 179 180 static bool isKnownNonZero(Value *V, const DataLayout &DL, unsigned Depth, 181 const Query &Q); 182 183 bool llvm::isKnownNonZero(Value *V, const DataLayout &DL, unsigned Depth, 184 AssumptionCache *AC, const Instruction *CxtI, 185 const DominatorTree *DT) { 186 return ::isKnownNonZero(V, DL, Depth, Query(AC, safeCxtI(V, CxtI), DT)); 187 } 188 189 bool llvm::isKnownNonNegative(Value *V, const DataLayout &DL, unsigned Depth, 190 AssumptionCache *AC, const Instruction *CxtI, 191 const DominatorTree *DT) { 192 bool NonNegative, Negative; 193 ComputeSignBit(V, NonNegative, Negative, DL, Depth, AC, CxtI, DT); 194 return NonNegative; 195 } 196 197 static bool isKnownNonEqual(Value *V1, Value *V2, const DataLayout &DL, 198 const Query &Q); 199 200 bool llvm::isKnownNonEqual(Value *V1, Value *V2, const DataLayout &DL, 201 AssumptionCache *AC, const Instruction *CxtI, 202 const DominatorTree *DT) { 203 return ::isKnownNonEqual(V1, V2, DL, Query(AC, 204 safeCxtI(V1, safeCxtI(V2, CxtI)), 205 DT)); 206 } 207 208 static bool MaskedValueIsZero(Value *V, const APInt &Mask, const DataLayout &DL, 209 unsigned Depth, const Query &Q); 210 211 bool llvm::MaskedValueIsZero(Value *V, const APInt &Mask, const DataLayout &DL, 212 unsigned Depth, AssumptionCache *AC, 213 const Instruction *CxtI, const DominatorTree *DT) { 214 return ::MaskedValueIsZero(V, Mask, DL, Depth, 215 Query(AC, safeCxtI(V, CxtI), DT)); 216 } 217 218 static unsigned ComputeNumSignBits(Value *V, const DataLayout &DL, 219 unsigned Depth, const Query &Q); 220 221 unsigned llvm::ComputeNumSignBits(Value *V, const DataLayout &DL, 222 unsigned Depth, AssumptionCache *AC, 223 const Instruction *CxtI, 224 const DominatorTree *DT) { 225 return ::ComputeNumSignBits(V, DL, Depth, Query(AC, safeCxtI(V, CxtI), DT)); 226 } 227 228 static void computeKnownBitsAddSub(bool Add, Value *Op0, Value *Op1, bool NSW, 229 APInt &KnownZero, APInt &KnownOne, 230 APInt &KnownZero2, APInt &KnownOne2, 231 const DataLayout &DL, unsigned Depth, 232 const Query &Q) { 233 if (!Add) { 234 if (ConstantInt *CLHS = dyn_cast<ConstantInt>(Op0)) { 235 // We know that the top bits of C-X are clear if X contains less bits 236 // than C (i.e. no wrap-around can happen). For example, 20-X is 237 // positive if we can prove that X is >= 0 and < 16. 238 if (!CLHS->getValue().isNegative()) { 239 unsigned BitWidth = KnownZero.getBitWidth(); 240 unsigned NLZ = (CLHS->getValue()+1).countLeadingZeros(); 241 // NLZ can't be BitWidth with no sign bit 242 APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1); 243 computeKnownBits(Op1, KnownZero2, KnownOne2, DL, Depth + 1, Q); 244 245 // If all of the MaskV bits are known to be zero, then we know the 246 // output top bits are zero, because we now know that the output is 247 // from [0-C]. 248 if ((KnownZero2 & MaskV) == MaskV) { 249 unsigned NLZ2 = CLHS->getValue().countLeadingZeros(); 250 // Top bits known zero. 251 KnownZero = APInt::getHighBitsSet(BitWidth, NLZ2); 252 } 253 } 254 } 255 } 256 257 unsigned BitWidth = KnownZero.getBitWidth(); 258 259 // If an initial sequence of bits in the result is not needed, the 260 // corresponding bits in the operands are not needed. 261 APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0); 262 computeKnownBits(Op0, LHSKnownZero, LHSKnownOne, DL, Depth + 1, Q); 263 computeKnownBits(Op1, KnownZero2, KnownOne2, DL, Depth + 1, Q); 264 265 // Carry in a 1 for a subtract, rather than a 0. 266 APInt CarryIn(BitWidth, 0); 267 if (!Add) { 268 // Sum = LHS + ~RHS + 1 269 std::swap(KnownZero2, KnownOne2); 270 CarryIn.setBit(0); 271 } 272 273 APInt PossibleSumZero = ~LHSKnownZero + ~KnownZero2 + CarryIn; 274 APInt PossibleSumOne = LHSKnownOne + KnownOne2 + CarryIn; 275 276 // Compute known bits of the carry. 277 APInt CarryKnownZero = ~(PossibleSumZero ^ LHSKnownZero ^ KnownZero2); 278 APInt CarryKnownOne = PossibleSumOne ^ LHSKnownOne ^ KnownOne2; 279 280 // Compute set of known bits (where all three relevant bits are known). 281 APInt LHSKnown = LHSKnownZero | LHSKnownOne; 282 APInt RHSKnown = KnownZero2 | KnownOne2; 283 APInt CarryKnown = CarryKnownZero | CarryKnownOne; 284 APInt Known = LHSKnown & RHSKnown & CarryKnown; 285 286 assert((PossibleSumZero & Known) == (PossibleSumOne & Known) && 287 "known bits of sum differ"); 288 289 // Compute known bits of the result. 290 KnownZero = ~PossibleSumOne & Known; 291 KnownOne = PossibleSumOne & Known; 292 293 // Are we still trying to solve for the sign bit? 294 if (!Known.isNegative()) { 295 if (NSW) { 296 // Adding two non-negative numbers, or subtracting a negative number from 297 // a non-negative one, can't wrap into negative. 298 if (LHSKnownZero.isNegative() && KnownZero2.isNegative()) 299 KnownZero |= APInt::getSignBit(BitWidth); 300 // Adding two negative numbers, or subtracting a non-negative number from 301 // a negative one, can't wrap into non-negative. 302 else if (LHSKnownOne.isNegative() && KnownOne2.isNegative()) 303 KnownOne |= APInt::getSignBit(BitWidth); 304 } 305 } 306 } 307 308 static void computeKnownBitsMul(Value *Op0, Value *Op1, bool NSW, 309 APInt &KnownZero, APInt &KnownOne, 310 APInt &KnownZero2, APInt &KnownOne2, 311 const DataLayout &DL, unsigned Depth, 312 const Query &Q) { 313 unsigned BitWidth = KnownZero.getBitWidth(); 314 computeKnownBits(Op1, KnownZero, KnownOne, DL, Depth + 1, Q); 315 computeKnownBits(Op0, KnownZero2, KnownOne2, DL, Depth + 1, Q); 316 317 bool isKnownNegative = false; 318 bool isKnownNonNegative = false; 319 // If the multiplication is known not to overflow, compute the sign bit. 320 if (NSW) { 321 if (Op0 == Op1) { 322 // The product of a number with itself is non-negative. 323 isKnownNonNegative = true; 324 } else { 325 bool isKnownNonNegativeOp1 = KnownZero.isNegative(); 326 bool isKnownNonNegativeOp0 = KnownZero2.isNegative(); 327 bool isKnownNegativeOp1 = KnownOne.isNegative(); 328 bool isKnownNegativeOp0 = KnownOne2.isNegative(); 329 // The product of two numbers with the same sign is non-negative. 330 isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) || 331 (isKnownNonNegativeOp1 && isKnownNonNegativeOp0); 332 // The product of a negative number and a non-negative number is either 333 // negative or zero. 334 if (!isKnownNonNegative) 335 isKnownNegative = (isKnownNegativeOp1 && isKnownNonNegativeOp0 && 336 isKnownNonZero(Op0, DL, Depth, Q)) || 337 (isKnownNegativeOp0 && isKnownNonNegativeOp1 && 338 isKnownNonZero(Op1, DL, Depth, Q)); 339 } 340 } 341 342 // If low bits are zero in either operand, output low known-0 bits. 343 // Also compute a conservative estimate for high known-0 bits. 344 // More trickiness is possible, but this is sufficient for the 345 // interesting case of alignment computation. 346 KnownOne.clearAllBits(); 347 unsigned TrailZ = KnownZero.countTrailingOnes() + 348 KnownZero2.countTrailingOnes(); 349 unsigned LeadZ = std::max(KnownZero.countLeadingOnes() + 350 KnownZero2.countLeadingOnes(), 351 BitWidth) - BitWidth; 352 353 TrailZ = std::min(TrailZ, BitWidth); 354 LeadZ = std::min(LeadZ, BitWidth); 355 KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) | 356 APInt::getHighBitsSet(BitWidth, LeadZ); 357 358 // Only make use of no-wrap flags if we failed to compute the sign bit 359 // directly. This matters if the multiplication always overflows, in 360 // which case we prefer to follow the result of the direct computation, 361 // though as the program is invoking undefined behaviour we can choose 362 // whatever we like here. 363 if (isKnownNonNegative && !KnownOne.isNegative()) 364 KnownZero.setBit(BitWidth - 1); 365 else if (isKnownNegative && !KnownZero.isNegative()) 366 KnownOne.setBit(BitWidth - 1); 367 } 368 369 void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges, 370 APInt &KnownZero, 371 APInt &KnownOne) { 372 unsigned BitWidth = KnownZero.getBitWidth(); 373 unsigned NumRanges = Ranges.getNumOperands() / 2; 374 assert(NumRanges >= 1); 375 376 KnownZero.setAllBits(); 377 KnownOne.setAllBits(); 378 379 for (unsigned i = 0; i < NumRanges; ++i) { 380 ConstantInt *Lower = 381 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0)); 382 ConstantInt *Upper = 383 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1)); 384 ConstantRange Range(Lower->getValue(), Upper->getValue()); 385 386 // The first CommonPrefixBits of all values in Range are equal. 387 unsigned CommonPrefixBits = 388 (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countLeadingZeros(); 389 390 APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits); 391 KnownOne &= Range.getUnsignedMax() & Mask; 392 KnownZero &= ~Range.getUnsignedMax() & Mask; 393 } 394 } 395 396 static bool isEphemeralValueOf(Instruction *I, const Value *E) { 397 SmallVector<const Value *, 16> WorkSet(1, I); 398 SmallPtrSet<const Value *, 32> Visited; 399 SmallPtrSet<const Value *, 16> EphValues; 400 401 // The instruction defining an assumption's condition itself is always 402 // considered ephemeral to that assumption (even if it has other 403 // non-ephemeral users). See r246696's test case for an example. 404 if (std::find(I->op_begin(), I->op_end(), E) != I->op_end()) 405 return true; 406 407 while (!WorkSet.empty()) { 408 const Value *V = WorkSet.pop_back_val(); 409 if (!Visited.insert(V).second) 410 continue; 411 412 // If all uses of this value are ephemeral, then so is this value. 413 if (std::all_of(V->user_begin(), V->user_end(), 414 [&](const User *U) { return EphValues.count(U); })) { 415 if (V == E) 416 return true; 417 418 EphValues.insert(V); 419 if (const User *U = dyn_cast<User>(V)) 420 for (User::const_op_iterator J = U->op_begin(), JE = U->op_end(); 421 J != JE; ++J) { 422 if (isSafeToSpeculativelyExecute(*J)) 423 WorkSet.push_back(*J); 424 } 425 } 426 } 427 428 return false; 429 } 430 431 // Is this an intrinsic that cannot be speculated but also cannot trap? 432 static bool isAssumeLikeIntrinsic(const Instruction *I) { 433 if (const CallInst *CI = dyn_cast<CallInst>(I)) 434 if (Function *F = CI->getCalledFunction()) 435 switch (F->getIntrinsicID()) { 436 default: break; 437 // FIXME: This list is repeated from NoTTI::getIntrinsicCost. 438 case Intrinsic::assume: 439 case Intrinsic::dbg_declare: 440 case Intrinsic::dbg_value: 441 case Intrinsic::invariant_start: 442 case Intrinsic::invariant_end: 443 case Intrinsic::lifetime_start: 444 case Intrinsic::lifetime_end: 445 case Intrinsic::objectsize: 446 case Intrinsic::ptr_annotation: 447 case Intrinsic::var_annotation: 448 return true; 449 } 450 451 return false; 452 } 453 454 static bool isValidAssumeForContext(Value *V, const Query &Q) { 455 Instruction *Inv = cast<Instruction>(V); 456 457 // There are two restrictions on the use of an assume: 458 // 1. The assume must dominate the context (or the control flow must 459 // reach the assume whenever it reaches the context). 460 // 2. The context must not be in the assume's set of ephemeral values 461 // (otherwise we will use the assume to prove that the condition 462 // feeding the assume is trivially true, thus causing the removal of 463 // the assume). 464 465 if (Q.DT) { 466 if (Q.DT->dominates(Inv, Q.CxtI)) { 467 return true; 468 } else if (Inv->getParent() == Q.CxtI->getParent()) { 469 // The context comes first, but they're both in the same block. Make sure 470 // there is nothing in between that might interrupt the control flow. 471 for (BasicBlock::const_iterator I = 472 std::next(BasicBlock::const_iterator(Q.CxtI)), 473 IE(Inv); I != IE; ++I) 474 if (!isSafeToSpeculativelyExecute(&*I) && !isAssumeLikeIntrinsic(&*I)) 475 return false; 476 477 return !isEphemeralValueOf(Inv, Q.CxtI); 478 } 479 480 return false; 481 } 482 483 // When we don't have a DT, we do a limited search... 484 if (Inv->getParent() == Q.CxtI->getParent()->getSinglePredecessor()) { 485 return true; 486 } else if (Inv->getParent() == Q.CxtI->getParent()) { 487 // Search forward from the assume until we reach the context (or the end 488 // of the block); the common case is that the assume will come first. 489 for (BasicBlock::iterator I = std::next(BasicBlock::iterator(Inv)), 490 IE = Inv->getParent()->end(); I != IE; ++I) 491 if (&*I == Q.CxtI) 492 return true; 493 494 // The context must come first... 495 for (BasicBlock::const_iterator I = 496 std::next(BasicBlock::const_iterator(Q.CxtI)), 497 IE(Inv); I != IE; ++I) 498 if (!isSafeToSpeculativelyExecute(&*I) && !isAssumeLikeIntrinsic(&*I)) 499 return false; 500 501 return !isEphemeralValueOf(Inv, Q.CxtI); 502 } 503 504 return false; 505 } 506 507 bool llvm::isValidAssumeForContext(const Instruction *I, 508 const Instruction *CxtI, 509 const DominatorTree *DT) { 510 return ::isValidAssumeForContext(const_cast<Instruction *>(I), 511 Query(nullptr, CxtI, DT)); 512 } 513 514 template<typename LHS, typename RHS> 515 inline match_combine_or<CmpClass_match<LHS, RHS, ICmpInst, ICmpInst::Predicate>, 516 CmpClass_match<RHS, LHS, ICmpInst, ICmpInst::Predicate>> 517 m_c_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R) { 518 return m_CombineOr(m_ICmp(Pred, L, R), m_ICmp(Pred, R, L)); 519 } 520 521 template<typename LHS, typename RHS> 522 inline match_combine_or<BinaryOp_match<LHS, RHS, Instruction::And>, 523 BinaryOp_match<RHS, LHS, Instruction::And>> 524 m_c_And(const LHS &L, const RHS &R) { 525 return m_CombineOr(m_And(L, R), m_And(R, L)); 526 } 527 528 template<typename LHS, typename RHS> 529 inline match_combine_or<BinaryOp_match<LHS, RHS, Instruction::Or>, 530 BinaryOp_match<RHS, LHS, Instruction::Or>> 531 m_c_Or(const LHS &L, const RHS &R) { 532 return m_CombineOr(m_Or(L, R), m_Or(R, L)); 533 } 534 535 template<typename LHS, typename RHS> 536 inline match_combine_or<BinaryOp_match<LHS, RHS, Instruction::Xor>, 537 BinaryOp_match<RHS, LHS, Instruction::Xor>> 538 m_c_Xor(const LHS &L, const RHS &R) { 539 return m_CombineOr(m_Xor(L, R), m_Xor(R, L)); 540 } 541 542 /// Compute known bits in 'V' under the assumption that the condition 'Cmp' is 543 /// true (at the context instruction.) This is mostly a utility function for 544 /// the prototype dominating conditions reasoning below. 545 static void computeKnownBitsFromTrueCondition(Value *V, ICmpInst *Cmp, 546 APInt &KnownZero, 547 APInt &KnownOne, 548 const DataLayout &DL, 549 unsigned Depth, const Query &Q) { 550 Value *LHS = Cmp->getOperand(0); 551 Value *RHS = Cmp->getOperand(1); 552 // TODO: We could potentially be more aggressive here. This would be worth 553 // evaluating. If we can, explore commoning this code with the assume 554 // handling logic. 555 if (LHS != V && RHS != V) 556 return; 557 558 const unsigned BitWidth = KnownZero.getBitWidth(); 559 560 switch (Cmp->getPredicate()) { 561 default: 562 // We know nothing from this condition 563 break; 564 // TODO: implement unsigned bound from below (known one bits) 565 // TODO: common condition check implementations with assumes 566 // TODO: implement other patterns from assume (e.g. V & B == A) 567 case ICmpInst::ICMP_SGT: 568 if (LHS == V) { 569 APInt KnownZeroTemp(BitWidth, 0), KnownOneTemp(BitWidth, 0); 570 computeKnownBits(RHS, KnownZeroTemp, KnownOneTemp, DL, Depth + 1, Q); 571 if (KnownOneTemp.isAllOnesValue() || KnownZeroTemp.isNegative()) { 572 // We know that the sign bit is zero. 573 KnownZero |= APInt::getSignBit(BitWidth); 574 } 575 } 576 break; 577 case ICmpInst::ICMP_EQ: 578 { 579 APInt KnownZeroTemp(BitWidth, 0), KnownOneTemp(BitWidth, 0); 580 if (LHS == V) 581 computeKnownBits(RHS, KnownZeroTemp, KnownOneTemp, DL, Depth + 1, Q); 582 else if (RHS == V) 583 computeKnownBits(LHS, KnownZeroTemp, KnownOneTemp, DL, Depth + 1, Q); 584 else 585 llvm_unreachable("missing use?"); 586 KnownZero |= KnownZeroTemp; 587 KnownOne |= KnownOneTemp; 588 } 589 break; 590 case ICmpInst::ICMP_ULE: 591 if (LHS == V) { 592 APInt KnownZeroTemp(BitWidth, 0), KnownOneTemp(BitWidth, 0); 593 computeKnownBits(RHS, KnownZeroTemp, KnownOneTemp, DL, Depth + 1, Q); 594 // The known zero bits carry over 595 unsigned SignBits = KnownZeroTemp.countLeadingOnes(); 596 KnownZero |= APInt::getHighBitsSet(BitWidth, SignBits); 597 } 598 break; 599 case ICmpInst::ICMP_ULT: 600 if (LHS == V) { 601 APInt KnownZeroTemp(BitWidth, 0), KnownOneTemp(BitWidth, 0); 602 computeKnownBits(RHS, KnownZeroTemp, KnownOneTemp, DL, Depth + 1, Q); 603 // Whatever high bits in rhs are zero are known to be zero (if rhs is a 604 // power of 2, then one more). 605 unsigned SignBits = KnownZeroTemp.countLeadingOnes(); 606 if (isKnownToBeAPowerOfTwo(RHS, false, Depth + 1, Query(Q, Cmp), DL)) 607 SignBits++; 608 KnownZero |= APInt::getHighBitsSet(BitWidth, SignBits); 609 } 610 break; 611 }; 612 } 613 614 /// Compute known bits in 'V' from conditions which are known to be true along 615 /// all paths leading to the context instruction. In particular, look for 616 /// cases where one branch of an interesting condition dominates the context 617 /// instruction. This does not do general dataflow. 618 /// NOTE: This code is EXPERIMENTAL and currently off by default. 619 static void computeKnownBitsFromDominatingCondition(Value *V, APInt &KnownZero, 620 APInt &KnownOne, 621 const DataLayout &DL, 622 unsigned Depth, 623 const Query &Q) { 624 // Need both the dominator tree and the query location to do anything useful 625 if (!Q.DT || !Q.CxtI) 626 return; 627 Instruction *Cxt = const_cast<Instruction *>(Q.CxtI); 628 // The context instruction might be in a statically unreachable block. If 629 // so, asking dominator queries may yield suprising results. (e.g. the block 630 // may not have a dom tree node) 631 if (!Q.DT->isReachableFromEntry(Cxt->getParent())) 632 return; 633 634 // Avoid useless work 635 if (auto VI = dyn_cast<Instruction>(V)) 636 if (VI->getParent() == Cxt->getParent()) 637 return; 638 639 // Note: We currently implement two options. It's not clear which of these 640 // will survive long term, we need data for that. 641 // Option 1 - Try walking the dominator tree looking for conditions which 642 // might apply. This works well for local conditions (loop guards, etc..), 643 // but not as well for things far from the context instruction (presuming a 644 // low max blocks explored). If we can set an high enough limit, this would 645 // be all we need. 646 // Option 2 - We restrict out search to those conditions which are uses of 647 // the value we're interested in. This is independent of dom structure, 648 // but is slightly less powerful without looking through lots of use chains. 649 // It does handle conditions far from the context instruction (e.g. early 650 // function exits on entry) really well though. 651 652 // Option 1 - Search the dom tree 653 unsigned NumBlocksExplored = 0; 654 BasicBlock *Current = Cxt->getParent(); 655 while (true) { 656 // Stop searching if we've gone too far up the chain 657 if (NumBlocksExplored >= DomConditionsMaxDomBlocks) 658 break; 659 NumBlocksExplored++; 660 661 if (!Q.DT->getNode(Current)->getIDom()) 662 break; 663 Current = Q.DT->getNode(Current)->getIDom()->getBlock(); 664 if (!Current) 665 // found function entry 666 break; 667 668 BranchInst *BI = dyn_cast<BranchInst>(Current->getTerminator()); 669 if (!BI || BI->isUnconditional()) 670 continue; 671 ICmpInst *Cmp = dyn_cast<ICmpInst>(BI->getCondition()); 672 if (!Cmp) 673 continue; 674 675 // We're looking for conditions that are guaranteed to hold at the context 676 // instruction. Finding a condition where one path dominates the context 677 // isn't enough because both the true and false cases could merge before 678 // the context instruction we're actually interested in. Instead, we need 679 // to ensure that the taken *edge* dominates the context instruction. We 680 // know that the edge must be reachable since we started from a reachable 681 // block. 682 BasicBlock *BB0 = BI->getSuccessor(0); 683 BasicBlockEdge Edge(BI->getParent(), BB0); 684 if (!Edge.isSingleEdge() || !Q.DT->dominates(Edge, Q.CxtI->getParent())) 685 continue; 686 687 computeKnownBitsFromTrueCondition(V, Cmp, KnownZero, KnownOne, DL, Depth, 688 Q); 689 } 690 691 // Option 2 - Search the other uses of V 692 unsigned NumUsesExplored = 0; 693 for (auto U : V->users()) { 694 // Avoid massive lists 695 if (NumUsesExplored >= DomConditionsMaxUses) 696 break; 697 NumUsesExplored++; 698 // Consider only compare instructions uniquely controlling a branch 699 ICmpInst *Cmp = dyn_cast<ICmpInst>(U); 700 if (!Cmp) 701 continue; 702 703 if (DomConditionsSingleCmpUse && !Cmp->hasOneUse()) 704 continue; 705 706 for (auto *CmpU : Cmp->users()) { 707 BranchInst *BI = dyn_cast<BranchInst>(CmpU); 708 if (!BI || BI->isUnconditional()) 709 continue; 710 // We're looking for conditions that are guaranteed to hold at the 711 // context instruction. Finding a condition where one path dominates 712 // the context isn't enough because both the true and false cases could 713 // merge before the context instruction we're actually interested in. 714 // Instead, we need to ensure that the taken *edge* dominates the context 715 // instruction. 716 BasicBlock *BB0 = BI->getSuccessor(0); 717 BasicBlockEdge Edge(BI->getParent(), BB0); 718 if (!Edge.isSingleEdge() || !Q.DT->dominates(Edge, Q.CxtI->getParent())) 719 continue; 720 721 computeKnownBitsFromTrueCondition(V, Cmp, KnownZero, KnownOne, DL, Depth, 722 Q); 723 } 724 } 725 } 726 727 static void computeKnownBitsFromAssume(Value *V, APInt &KnownZero, 728 APInt &KnownOne, const DataLayout &DL, 729 unsigned Depth, const Query &Q) { 730 // Use of assumptions is context-sensitive. If we don't have a context, we 731 // cannot use them! 732 if (!Q.AC || !Q.CxtI) 733 return; 734 735 unsigned BitWidth = KnownZero.getBitWidth(); 736 737 for (auto &AssumeVH : Q.AC->assumptions()) { 738 if (!AssumeVH) 739 continue; 740 CallInst *I = cast<CallInst>(AssumeVH); 741 assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() && 742 "Got assumption for the wrong function!"); 743 if (Q.ExclInvs.count(I)) 744 continue; 745 746 // Warning: This loop can end up being somewhat performance sensetive. 747 // We're running this loop for once for each value queried resulting in a 748 // runtime of ~O(#assumes * #values). 749 750 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && 751 "must be an assume intrinsic"); 752 753 Value *Arg = I->getArgOperand(0); 754 755 if (Arg == V && isValidAssumeForContext(I, Q)) { 756 assert(BitWidth == 1 && "assume operand is not i1?"); 757 KnownZero.clearAllBits(); 758 KnownOne.setAllBits(); 759 return; 760 } 761 762 // The remaining tests are all recursive, so bail out if we hit the limit. 763 if (Depth == MaxDepth) 764 continue; 765 766 Value *A, *B; 767 auto m_V = m_CombineOr(m_Specific(V), 768 m_CombineOr(m_PtrToInt(m_Specific(V)), 769 m_BitCast(m_Specific(V)))); 770 771 CmpInst::Predicate Pred; 772 ConstantInt *C; 773 // assume(v = a) 774 if (match(Arg, m_c_ICmp(Pred, m_V, m_Value(A))) && 775 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) { 776 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 777 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); 778 KnownZero |= RHSKnownZero; 779 KnownOne |= RHSKnownOne; 780 // assume(v & b = a) 781 } else if (match(Arg, 782 m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) && 783 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) { 784 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 785 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); 786 APInt MaskKnownZero(BitWidth, 0), MaskKnownOne(BitWidth, 0); 787 computeKnownBits(B, MaskKnownZero, MaskKnownOne, DL, Depth+1, Query(Q, I)); 788 789 // For those bits in the mask that are known to be one, we can propagate 790 // known bits from the RHS to V. 791 KnownZero |= RHSKnownZero & MaskKnownOne; 792 KnownOne |= RHSKnownOne & MaskKnownOne; 793 // assume(~(v & b) = a) 794 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))), 795 m_Value(A))) && 796 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) { 797 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 798 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); 799 APInt MaskKnownZero(BitWidth, 0), MaskKnownOne(BitWidth, 0); 800 computeKnownBits(B, MaskKnownZero, MaskKnownOne, DL, Depth+1, Query(Q, I)); 801 802 // For those bits in the mask that are known to be one, we can propagate 803 // inverted known bits from the RHS to V. 804 KnownZero |= RHSKnownOne & MaskKnownOne; 805 KnownOne |= RHSKnownZero & MaskKnownOne; 806 // assume(v | b = a) 807 } else if (match(Arg, 808 m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) && 809 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) { 810 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 811 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); 812 APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0); 813 computeKnownBits(B, BKnownZero, BKnownOne, DL, Depth+1, Query(Q, I)); 814 815 // For those bits in B that are known to be zero, we can propagate known 816 // bits from the RHS to V. 817 KnownZero |= RHSKnownZero & BKnownZero; 818 KnownOne |= RHSKnownOne & BKnownZero; 819 // assume(~(v | b) = a) 820 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))), 821 m_Value(A))) && 822 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) { 823 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 824 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); 825 APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0); 826 computeKnownBits(B, BKnownZero, BKnownOne, DL, Depth+1, Query(Q, I)); 827 828 // For those bits in B that are known to be zero, we can propagate 829 // inverted known bits from the RHS to V. 830 KnownZero |= RHSKnownOne & BKnownZero; 831 KnownOne |= RHSKnownZero & BKnownZero; 832 // assume(v ^ b = a) 833 } else if (match(Arg, 834 m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) && 835 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) { 836 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 837 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); 838 APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0); 839 computeKnownBits(B, BKnownZero, BKnownOne, DL, Depth+1, Query(Q, I)); 840 841 // For those bits in B that are known to be zero, we can propagate known 842 // bits from the RHS to V. For those bits in B that are known to be one, 843 // we can propagate inverted known bits from the RHS to V. 844 KnownZero |= RHSKnownZero & BKnownZero; 845 KnownOne |= RHSKnownOne & BKnownZero; 846 KnownZero |= RHSKnownOne & BKnownOne; 847 KnownOne |= RHSKnownZero & BKnownOne; 848 // assume(~(v ^ b) = a) 849 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))), 850 m_Value(A))) && 851 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) { 852 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 853 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); 854 APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0); 855 computeKnownBits(B, BKnownZero, BKnownOne, DL, Depth+1, Query(Q, I)); 856 857 // For those bits in B that are known to be zero, we can propagate 858 // inverted known bits from the RHS to V. For those bits in B that are 859 // known to be one, we can propagate known bits from the RHS to V. 860 KnownZero |= RHSKnownOne & BKnownZero; 861 KnownOne |= RHSKnownZero & BKnownZero; 862 KnownZero |= RHSKnownZero & BKnownOne; 863 KnownOne |= RHSKnownOne & BKnownOne; 864 // assume(v << c = a) 865 } else if (match(Arg, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)), 866 m_Value(A))) && 867 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) { 868 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 869 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); 870 // For those bits in RHS that are known, we can propagate them to known 871 // bits in V shifted to the right by C. 872 KnownZero |= RHSKnownZero.lshr(C->getZExtValue()); 873 KnownOne |= RHSKnownOne.lshr(C->getZExtValue()); 874 // assume(~(v << c) = a) 875 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))), 876 m_Value(A))) && 877 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) { 878 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 879 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); 880 // For those bits in RHS that are known, we can propagate them inverted 881 // to known bits in V shifted to the right by C. 882 KnownZero |= RHSKnownOne.lshr(C->getZExtValue()); 883 KnownOne |= RHSKnownZero.lshr(C->getZExtValue()); 884 // assume(v >> c = a) 885 } else if (match(Arg, 886 m_c_ICmp(Pred, m_CombineOr(m_LShr(m_V, m_ConstantInt(C)), 887 m_AShr(m_V, m_ConstantInt(C))), 888 m_Value(A))) && 889 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) { 890 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 891 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); 892 // For those bits in RHS that are known, we can propagate them to known 893 // bits in V shifted to the right by C. 894 KnownZero |= RHSKnownZero << C->getZExtValue(); 895 KnownOne |= RHSKnownOne << C->getZExtValue(); 896 // assume(~(v >> c) = a) 897 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_CombineOr( 898 m_LShr(m_V, m_ConstantInt(C)), 899 m_AShr(m_V, m_ConstantInt(C)))), 900 m_Value(A))) && 901 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) { 902 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 903 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); 904 // For those bits in RHS that are known, we can propagate them inverted 905 // to known bits in V shifted to the right by C. 906 KnownZero |= RHSKnownOne << C->getZExtValue(); 907 KnownOne |= RHSKnownZero << C->getZExtValue(); 908 // assume(v >=_s c) where c is non-negative 909 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 910 Pred == ICmpInst::ICMP_SGE && isValidAssumeForContext(I, Q)) { 911 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 912 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); 913 914 if (RHSKnownZero.isNegative()) { 915 // We know that the sign bit is zero. 916 KnownZero |= APInt::getSignBit(BitWidth); 917 } 918 // assume(v >_s c) where c is at least -1. 919 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 920 Pred == ICmpInst::ICMP_SGT && isValidAssumeForContext(I, Q)) { 921 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 922 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); 923 924 if (RHSKnownOne.isAllOnesValue() || RHSKnownZero.isNegative()) { 925 // We know that the sign bit is zero. 926 KnownZero |= APInt::getSignBit(BitWidth); 927 } 928 // assume(v <=_s c) where c is negative 929 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 930 Pred == ICmpInst::ICMP_SLE && isValidAssumeForContext(I, Q)) { 931 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 932 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); 933 934 if (RHSKnownOne.isNegative()) { 935 // We know that the sign bit is one. 936 KnownOne |= APInt::getSignBit(BitWidth); 937 } 938 // assume(v <_s c) where c is non-positive 939 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 940 Pred == ICmpInst::ICMP_SLT && isValidAssumeForContext(I, Q)) { 941 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 942 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); 943 944 if (RHSKnownZero.isAllOnesValue() || RHSKnownOne.isNegative()) { 945 // We know that the sign bit is one. 946 KnownOne |= APInt::getSignBit(BitWidth); 947 } 948 // assume(v <=_u c) 949 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 950 Pred == ICmpInst::ICMP_ULE && isValidAssumeForContext(I, Q)) { 951 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 952 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); 953 954 // Whatever high bits in c are zero are known to be zero. 955 KnownZero |= 956 APInt::getHighBitsSet(BitWidth, RHSKnownZero.countLeadingOnes()); 957 // assume(v <_u c) 958 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 959 Pred == ICmpInst::ICMP_ULT && isValidAssumeForContext(I, Q)) { 960 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 961 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); 962 963 // Whatever high bits in c are zero are known to be zero (if c is a power 964 // of 2, then one more). 965 if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, Query(Q, I), DL)) 966 KnownZero |= 967 APInt::getHighBitsSet(BitWidth, RHSKnownZero.countLeadingOnes()+1); 968 else 969 KnownZero |= 970 APInt::getHighBitsSet(BitWidth, RHSKnownZero.countLeadingOnes()); 971 } 972 } 973 } 974 975 // Compute known bits from a shift operator, including those with a 976 // non-constant shift amount. KnownZero and KnownOne are the outputs of this 977 // function. KnownZero2 and KnownOne2 are pre-allocated temporaries with the 978 // same bit width as KnownZero and KnownOne. KZF and KOF are operator-specific 979 // functors that, given the known-zero or known-one bits respectively, and a 980 // shift amount, compute the implied known-zero or known-one bits of the shift 981 // operator's result respectively for that shift amount. The results from calling 982 // KZF and KOF are conservatively combined for all permitted shift amounts. 983 template <typename KZFunctor, typename KOFunctor> 984 static void computeKnownBitsFromShiftOperator(Operator *I, 985 APInt &KnownZero, APInt &KnownOne, 986 APInt &KnownZero2, APInt &KnownOne2, 987 const DataLayout &DL, unsigned Depth, const Query &Q, 988 KZFunctor KZF, KOFunctor KOF) { 989 unsigned BitWidth = KnownZero.getBitWidth(); 990 991 if (auto *SA = dyn_cast<ConstantInt>(I->getOperand(1))) { 992 unsigned ShiftAmt = SA->getLimitedValue(BitWidth-1); 993 994 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, DL, Depth + 1, Q); 995 KnownZero = KZF(KnownZero, ShiftAmt); 996 KnownOne = KOF(KnownOne, ShiftAmt); 997 return; 998 } 999 1000 computeKnownBits(I->getOperand(1), KnownZero, KnownOne, DL, Depth + 1, Q); 1001 1002 // Note: We cannot use KnownZero.getLimitedValue() here, because if 1003 // BitWidth > 64 and any upper bits are known, we'll end up returning the 1004 // limit value (which implies all bits are known). 1005 uint64_t ShiftAmtKZ = KnownZero.zextOrTrunc(64).getZExtValue(); 1006 uint64_t ShiftAmtKO = KnownOne.zextOrTrunc(64).getZExtValue(); 1007 1008 // It would be more-clearly correct to use the two temporaries for this 1009 // calculation. Reusing the APInts here to prevent unnecessary allocations. 1010 KnownZero.clearAllBits(), KnownOne.clearAllBits(); 1011 1012 // If we know the shifter operand is nonzero, we can sometimes infer more 1013 // known bits. However this is expensive to compute, so be lazy about it and 1014 // only compute it when absolutely necessary. 1015 Optional<bool> ShifterOperandIsNonZero; 1016 1017 // Early exit if we can't constrain any well-defined shift amount. 1018 if (!(ShiftAmtKZ & (BitWidth - 1)) && !(ShiftAmtKO & (BitWidth - 1))) { 1019 ShifterOperandIsNonZero = 1020 isKnownNonZero(I->getOperand(1), DL, Depth + 1, Q); 1021 if (!*ShifterOperandIsNonZero) 1022 return; 1023 } 1024 1025 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, DL, Depth + 1, Q); 1026 1027 KnownZero = KnownOne = APInt::getAllOnesValue(BitWidth); 1028 for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) { 1029 // Combine the shifted known input bits only for those shift amounts 1030 // compatible with its known constraints. 1031 if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt) 1032 continue; 1033 if ((ShiftAmt | ShiftAmtKO) != ShiftAmt) 1034 continue; 1035 // If we know the shifter is nonzero, we may be able to infer more known 1036 // bits. This check is sunk down as far as possible to avoid the expensive 1037 // call to isKnownNonZero if the cheaper checks above fail. 1038 if (ShiftAmt == 0) { 1039 if (!ShifterOperandIsNonZero.hasValue()) 1040 ShifterOperandIsNonZero = 1041 isKnownNonZero(I->getOperand(1), DL, Depth + 1, Q); 1042 if (*ShifterOperandIsNonZero) 1043 continue; 1044 } 1045 1046 KnownZero &= KZF(KnownZero2, ShiftAmt); 1047 KnownOne &= KOF(KnownOne2, ShiftAmt); 1048 } 1049 1050 // If there are no compatible shift amounts, then we've proven that the shift 1051 // amount must be >= the BitWidth, and the result is undefined. We could 1052 // return anything we'd like, but we need to make sure the sets of known bits 1053 // stay disjoint (it should be better for some other code to actually 1054 // propagate the undef than to pick a value here using known bits). 1055 if ((KnownZero & KnownOne) != 0) 1056 KnownZero.clearAllBits(), KnownOne.clearAllBits(); 1057 } 1058 1059 static void computeKnownBitsFromOperator(Operator *I, APInt &KnownZero, 1060 APInt &KnownOne, const DataLayout &DL, 1061 unsigned Depth, const Query &Q) { 1062 unsigned BitWidth = KnownZero.getBitWidth(); 1063 1064 APInt KnownZero2(KnownZero), KnownOne2(KnownOne); 1065 switch (I->getOpcode()) { 1066 default: break; 1067 case Instruction::Load: 1068 if (MDNode *MD = cast<LoadInst>(I)->getMetadata(LLVMContext::MD_range)) 1069 computeKnownBitsFromRangeMetadata(*MD, KnownZero, KnownOne); 1070 break; 1071 case Instruction::And: { 1072 // If either the LHS or the RHS are Zero, the result is zero. 1073 computeKnownBits(I->getOperand(1), KnownZero, KnownOne, DL, Depth + 1, Q); 1074 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, DL, Depth + 1, Q); 1075 1076 // Output known-1 bits are only known if set in both the LHS & RHS. 1077 KnownOne &= KnownOne2; 1078 // Output known-0 are known to be clear if zero in either the LHS | RHS. 1079 KnownZero |= KnownZero2; 1080 1081 // and(x, add (x, -1)) is a common idiom that always clears the low bit; 1082 // here we handle the more general case of adding any odd number by 1083 // matching the form add(x, add(x, y)) where y is odd. 1084 // TODO: This could be generalized to clearing any bit set in y where the 1085 // following bit is known to be unset in y. 1086 Value *Y = nullptr; 1087 if (match(I->getOperand(0), m_Add(m_Specific(I->getOperand(1)), 1088 m_Value(Y))) || 1089 match(I->getOperand(1), m_Add(m_Specific(I->getOperand(0)), 1090 m_Value(Y)))) { 1091 APInt KnownZero3(BitWidth, 0), KnownOne3(BitWidth, 0); 1092 computeKnownBits(Y, KnownZero3, KnownOne3, DL, Depth + 1, Q); 1093 if (KnownOne3.countTrailingOnes() > 0) 1094 KnownZero |= APInt::getLowBitsSet(BitWidth, 1); 1095 } 1096 break; 1097 } 1098 case Instruction::Or: { 1099 computeKnownBits(I->getOperand(1), KnownZero, KnownOne, DL, Depth + 1, Q); 1100 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, DL, Depth + 1, Q); 1101 1102 // Output known-0 bits are only known if clear in both the LHS & RHS. 1103 KnownZero &= KnownZero2; 1104 // Output known-1 are known to be set if set in either the LHS | RHS. 1105 KnownOne |= KnownOne2; 1106 break; 1107 } 1108 case Instruction::Xor: { 1109 computeKnownBits(I->getOperand(1), KnownZero, KnownOne, DL, Depth + 1, Q); 1110 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, DL, Depth + 1, Q); 1111 1112 // Output known-0 bits are known if clear or set in both the LHS & RHS. 1113 APInt KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2); 1114 // Output known-1 are known to be set if set in only one of the LHS, RHS. 1115 KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2); 1116 KnownZero = KnownZeroOut; 1117 break; 1118 } 1119 case Instruction::Mul: { 1120 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap(); 1121 computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, KnownZero, 1122 KnownOne, KnownZero2, KnownOne2, DL, Depth, Q); 1123 break; 1124 } 1125 case Instruction::UDiv: { 1126 // For the purposes of computing leading zeros we can conservatively 1127 // treat a udiv as a logical right shift by the power of 2 known to 1128 // be less than the denominator. 1129 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, DL, Depth + 1, Q); 1130 unsigned LeadZ = KnownZero2.countLeadingOnes(); 1131 1132 KnownOne2.clearAllBits(); 1133 KnownZero2.clearAllBits(); 1134 computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, DL, Depth + 1, Q); 1135 unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros(); 1136 if (RHSUnknownLeadingOnes != BitWidth) 1137 LeadZ = std::min(BitWidth, 1138 LeadZ + BitWidth - RHSUnknownLeadingOnes - 1); 1139 1140 KnownZero = APInt::getHighBitsSet(BitWidth, LeadZ); 1141 break; 1142 } 1143 case Instruction::Select: 1144 computeKnownBits(I->getOperand(2), KnownZero, KnownOne, DL, Depth + 1, Q); 1145 computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, DL, Depth + 1, Q); 1146 1147 // Only known if known in both the LHS and RHS. 1148 KnownOne &= KnownOne2; 1149 KnownZero &= KnownZero2; 1150 break; 1151 case Instruction::FPTrunc: 1152 case Instruction::FPExt: 1153 case Instruction::FPToUI: 1154 case Instruction::FPToSI: 1155 case Instruction::SIToFP: 1156 case Instruction::UIToFP: 1157 break; // Can't work with floating point. 1158 case Instruction::PtrToInt: 1159 case Instruction::IntToPtr: 1160 case Instruction::AddrSpaceCast: // Pointers could be different sizes. 1161 // FALL THROUGH and handle them the same as zext/trunc. 1162 case Instruction::ZExt: 1163 case Instruction::Trunc: { 1164 Type *SrcTy = I->getOperand(0)->getType(); 1165 1166 unsigned SrcBitWidth; 1167 // Note that we handle pointer operands here because of inttoptr/ptrtoint 1168 // which fall through here. 1169 SrcBitWidth = DL.getTypeSizeInBits(SrcTy->getScalarType()); 1170 1171 assert(SrcBitWidth && "SrcBitWidth can't be zero"); 1172 KnownZero = KnownZero.zextOrTrunc(SrcBitWidth); 1173 KnownOne = KnownOne.zextOrTrunc(SrcBitWidth); 1174 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, DL, Depth + 1, Q); 1175 KnownZero = KnownZero.zextOrTrunc(BitWidth); 1176 KnownOne = KnownOne.zextOrTrunc(BitWidth); 1177 // Any top bits are known to be zero. 1178 if (BitWidth > SrcBitWidth) 1179 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth); 1180 break; 1181 } 1182 case Instruction::BitCast: { 1183 Type *SrcTy = I->getOperand(0)->getType(); 1184 if ((SrcTy->isIntegerTy() || SrcTy->isPointerTy() || 1185 SrcTy->isFloatingPointTy()) && 1186 // TODO: For now, not handling conversions like: 1187 // (bitcast i64 %x to <2 x i32>) 1188 !I->getType()->isVectorTy()) { 1189 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, DL, Depth + 1, Q); 1190 break; 1191 } 1192 break; 1193 } 1194 case Instruction::SExt: { 1195 // Compute the bits in the result that are not present in the input. 1196 unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits(); 1197 1198 KnownZero = KnownZero.trunc(SrcBitWidth); 1199 KnownOne = KnownOne.trunc(SrcBitWidth); 1200 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, DL, Depth + 1, Q); 1201 KnownZero = KnownZero.zext(BitWidth); 1202 KnownOne = KnownOne.zext(BitWidth); 1203 1204 // If the sign bit of the input is known set or clear, then we know the 1205 // top bits of the result. 1206 if (KnownZero[SrcBitWidth-1]) // Input sign bit known zero 1207 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth); 1208 else if (KnownOne[SrcBitWidth-1]) // Input sign bit known set 1209 KnownOne |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth); 1210 break; 1211 } 1212 case Instruction::Shl: { 1213 // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0 1214 auto KZF = [BitWidth](const APInt &KnownZero, unsigned ShiftAmt) { 1215 return (KnownZero << ShiftAmt) | 1216 APInt::getLowBitsSet(BitWidth, ShiftAmt); // Low bits known 0. 1217 }; 1218 1219 auto KOF = [BitWidth](const APInt &KnownOne, unsigned ShiftAmt) { 1220 return KnownOne << ShiftAmt; 1221 }; 1222 1223 computeKnownBitsFromShiftOperator(I, KnownZero, KnownOne, 1224 KnownZero2, KnownOne2, DL, Depth, Q, 1225 KZF, KOF); 1226 break; 1227 } 1228 case Instruction::LShr: { 1229 // (ushr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0 1230 auto KZF = [BitWidth](const APInt &KnownZero, unsigned ShiftAmt) { 1231 return APIntOps::lshr(KnownZero, ShiftAmt) | 1232 // High bits known zero. 1233 APInt::getHighBitsSet(BitWidth, ShiftAmt); 1234 }; 1235 1236 auto KOF = [BitWidth](const APInt &KnownOne, unsigned ShiftAmt) { 1237 return APIntOps::lshr(KnownOne, ShiftAmt); 1238 }; 1239 1240 computeKnownBitsFromShiftOperator(I, KnownZero, KnownOne, 1241 KnownZero2, KnownOne2, DL, Depth, Q, 1242 KZF, KOF); 1243 break; 1244 } 1245 case Instruction::AShr: { 1246 // (ashr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0 1247 auto KZF = [BitWidth](const APInt &KnownZero, unsigned ShiftAmt) { 1248 return APIntOps::ashr(KnownZero, ShiftAmt); 1249 }; 1250 1251 auto KOF = [BitWidth](const APInt &KnownOne, unsigned ShiftAmt) { 1252 return APIntOps::ashr(KnownOne, ShiftAmt); 1253 }; 1254 1255 computeKnownBitsFromShiftOperator(I, KnownZero, KnownOne, 1256 KnownZero2, KnownOne2, DL, Depth, Q, 1257 KZF, KOF); 1258 break; 1259 } 1260 case Instruction::Sub: { 1261 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap(); 1262 computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW, 1263 KnownZero, KnownOne, KnownZero2, KnownOne2, DL, 1264 Depth, Q); 1265 break; 1266 } 1267 case Instruction::Add: { 1268 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap(); 1269 computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW, 1270 KnownZero, KnownOne, KnownZero2, KnownOne2, DL, 1271 Depth, Q); 1272 break; 1273 } 1274 case Instruction::SRem: 1275 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) { 1276 APInt RA = Rem->getValue().abs(); 1277 if (RA.isPowerOf2()) { 1278 APInt LowBits = RA - 1; 1279 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, DL, Depth + 1, 1280 Q); 1281 1282 // The low bits of the first operand are unchanged by the srem. 1283 KnownZero = KnownZero2 & LowBits; 1284 KnownOne = KnownOne2 & LowBits; 1285 1286 // If the first operand is non-negative or has all low bits zero, then 1287 // the upper bits are all zero. 1288 if (KnownZero2[BitWidth-1] || ((KnownZero2 & LowBits) == LowBits)) 1289 KnownZero |= ~LowBits; 1290 1291 // If the first operand is negative and not all low bits are zero, then 1292 // the upper bits are all one. 1293 if (KnownOne2[BitWidth-1] && ((KnownOne2 & LowBits) != 0)) 1294 KnownOne |= ~LowBits; 1295 1296 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1297 } 1298 } 1299 1300 // The sign bit is the LHS's sign bit, except when the result of the 1301 // remainder is zero. 1302 if (KnownZero.isNonNegative()) { 1303 APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0); 1304 computeKnownBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, DL, 1305 Depth + 1, Q); 1306 // If it's known zero, our sign bit is also zero. 1307 if (LHSKnownZero.isNegative()) 1308 KnownZero.setBit(BitWidth - 1); 1309 } 1310 1311 break; 1312 case Instruction::URem: { 1313 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) { 1314 APInt RA = Rem->getValue(); 1315 if (RA.isPowerOf2()) { 1316 APInt LowBits = (RA - 1); 1317 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, DL, Depth + 1, 1318 Q); 1319 KnownZero |= ~LowBits; 1320 KnownOne &= LowBits; 1321 break; 1322 } 1323 } 1324 1325 // Since the result is less than or equal to either operand, any leading 1326 // zero bits in either operand must also exist in the result. 1327 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, DL, Depth + 1, Q); 1328 computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, DL, Depth + 1, Q); 1329 1330 unsigned Leaders = std::max(KnownZero.countLeadingOnes(), 1331 KnownZero2.countLeadingOnes()); 1332 KnownOne.clearAllBits(); 1333 KnownZero = APInt::getHighBitsSet(BitWidth, Leaders); 1334 break; 1335 } 1336 1337 case Instruction::Alloca: { 1338 AllocaInst *AI = cast<AllocaInst>(I); 1339 unsigned Align = AI->getAlignment(); 1340 if (Align == 0) 1341 Align = DL.getABITypeAlignment(AI->getType()->getElementType()); 1342 1343 if (Align > 0) 1344 KnownZero = APInt::getLowBitsSet(BitWidth, countTrailingZeros(Align)); 1345 break; 1346 } 1347 case Instruction::GetElementPtr: { 1348 // Analyze all of the subscripts of this getelementptr instruction 1349 // to determine if we can prove known low zero bits. 1350 APInt LocalKnownZero(BitWidth, 0), LocalKnownOne(BitWidth, 0); 1351 computeKnownBits(I->getOperand(0), LocalKnownZero, LocalKnownOne, DL, 1352 Depth + 1, Q); 1353 unsigned TrailZ = LocalKnownZero.countTrailingOnes(); 1354 1355 gep_type_iterator GTI = gep_type_begin(I); 1356 for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) { 1357 Value *Index = I->getOperand(i); 1358 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 1359 // Handle struct member offset arithmetic. 1360 1361 // Handle case when index is vector zeroinitializer 1362 Constant *CIndex = cast<Constant>(Index); 1363 if (CIndex->isZeroValue()) 1364 continue; 1365 1366 if (CIndex->getType()->isVectorTy()) 1367 Index = CIndex->getSplatValue(); 1368 1369 unsigned Idx = cast<ConstantInt>(Index)->getZExtValue(); 1370 const StructLayout *SL = DL.getStructLayout(STy); 1371 uint64_t Offset = SL->getElementOffset(Idx); 1372 TrailZ = std::min<unsigned>(TrailZ, 1373 countTrailingZeros(Offset)); 1374 } else { 1375 // Handle array index arithmetic. 1376 Type *IndexedTy = GTI.getIndexedType(); 1377 if (!IndexedTy->isSized()) { 1378 TrailZ = 0; 1379 break; 1380 } 1381 unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits(); 1382 uint64_t TypeSize = DL.getTypeAllocSize(IndexedTy); 1383 LocalKnownZero = LocalKnownOne = APInt(GEPOpiBits, 0); 1384 computeKnownBits(Index, LocalKnownZero, LocalKnownOne, DL, Depth + 1, 1385 Q); 1386 TrailZ = std::min(TrailZ, 1387 unsigned(countTrailingZeros(TypeSize) + 1388 LocalKnownZero.countTrailingOnes())); 1389 } 1390 } 1391 1392 KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ); 1393 break; 1394 } 1395 case Instruction::PHI: { 1396 PHINode *P = cast<PHINode>(I); 1397 // Handle the case of a simple two-predecessor recurrence PHI. 1398 // There's a lot more that could theoretically be done here, but 1399 // this is sufficient to catch some interesting cases. 1400 if (P->getNumIncomingValues() == 2) { 1401 for (unsigned i = 0; i != 2; ++i) { 1402 Value *L = P->getIncomingValue(i); 1403 Value *R = P->getIncomingValue(!i); 1404 Operator *LU = dyn_cast<Operator>(L); 1405 if (!LU) 1406 continue; 1407 unsigned Opcode = LU->getOpcode(); 1408 // Check for operations that have the property that if 1409 // both their operands have low zero bits, the result 1410 // will have low zero bits. 1411 if (Opcode == Instruction::Add || 1412 Opcode == Instruction::Sub || 1413 Opcode == Instruction::And || 1414 Opcode == Instruction::Or || 1415 Opcode == Instruction::Mul) { 1416 Value *LL = LU->getOperand(0); 1417 Value *LR = LU->getOperand(1); 1418 // Find a recurrence. 1419 if (LL == I) 1420 L = LR; 1421 else if (LR == I) 1422 L = LL; 1423 else 1424 break; 1425 // Ok, we have a PHI of the form L op= R. Check for low 1426 // zero bits. 1427 computeKnownBits(R, KnownZero2, KnownOne2, DL, Depth + 1, Q); 1428 1429 // We need to take the minimum number of known bits 1430 APInt KnownZero3(KnownZero), KnownOne3(KnownOne); 1431 computeKnownBits(L, KnownZero3, KnownOne3, DL, Depth + 1, Q); 1432 1433 KnownZero = APInt::getLowBitsSet(BitWidth, 1434 std::min(KnownZero2.countTrailingOnes(), 1435 KnownZero3.countTrailingOnes())); 1436 break; 1437 } 1438 } 1439 } 1440 1441 // Unreachable blocks may have zero-operand PHI nodes. 1442 if (P->getNumIncomingValues() == 0) 1443 break; 1444 1445 // Otherwise take the unions of the known bit sets of the operands, 1446 // taking conservative care to avoid excessive recursion. 1447 if (Depth < MaxDepth - 1 && !KnownZero && !KnownOne) { 1448 // Skip if every incoming value references to ourself. 1449 if (dyn_cast_or_null<UndefValue>(P->hasConstantValue())) 1450 break; 1451 1452 KnownZero = APInt::getAllOnesValue(BitWidth); 1453 KnownOne = APInt::getAllOnesValue(BitWidth); 1454 for (Value *IncValue : P->incoming_values()) { 1455 // Skip direct self references. 1456 if (IncValue == P) continue; 1457 1458 KnownZero2 = APInt(BitWidth, 0); 1459 KnownOne2 = APInt(BitWidth, 0); 1460 // Recurse, but cap the recursion to one level, because we don't 1461 // want to waste time spinning around in loops. 1462 computeKnownBits(IncValue, KnownZero2, KnownOne2, DL, 1463 MaxDepth - 1, Q); 1464 KnownZero &= KnownZero2; 1465 KnownOne &= KnownOne2; 1466 // If all bits have been ruled out, there's no need to check 1467 // more operands. 1468 if (!KnownZero && !KnownOne) 1469 break; 1470 } 1471 } 1472 break; 1473 } 1474 case Instruction::Call: 1475 case Instruction::Invoke: 1476 if (MDNode *MD = cast<Instruction>(I)->getMetadata(LLVMContext::MD_range)) 1477 computeKnownBitsFromRangeMetadata(*MD, KnownZero, KnownOne); 1478 // If a range metadata is attached to this IntrinsicInst, intersect the 1479 // explicit range specified by the metadata and the implicit range of 1480 // the intrinsic. 1481 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 1482 switch (II->getIntrinsicID()) { 1483 default: break; 1484 case Intrinsic::bswap: 1485 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, DL, 1486 Depth + 1, Q); 1487 KnownZero |= KnownZero2.byteSwap(); 1488 KnownOne |= KnownOne2.byteSwap(); 1489 break; 1490 case Intrinsic::ctlz: 1491 case Intrinsic::cttz: { 1492 unsigned LowBits = Log2_32(BitWidth)+1; 1493 // If this call is undefined for 0, the result will be less than 2^n. 1494 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext())) 1495 LowBits -= 1; 1496 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - LowBits); 1497 break; 1498 } 1499 case Intrinsic::ctpop: { 1500 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, DL, 1501 Depth + 1, Q); 1502 // We can bound the space the count needs. Also, bits known to be zero 1503 // can't contribute to the population. 1504 unsigned BitsPossiblySet = BitWidth - KnownZero2.countPopulation(); 1505 unsigned LeadingZeros = 1506 APInt(BitWidth, BitsPossiblySet).countLeadingZeros(); 1507 assert(LeadingZeros <= BitWidth); 1508 KnownZero |= APInt::getHighBitsSet(BitWidth, LeadingZeros); 1509 KnownOne &= ~KnownZero; 1510 // TODO: we could bound KnownOne using the lower bound on the number 1511 // of bits which might be set provided by popcnt KnownOne2. 1512 break; 1513 } 1514 case Intrinsic::fabs: { 1515 Type *Ty = II->getType(); 1516 APInt SignBit = APInt::getSignBit(Ty->getScalarSizeInBits()); 1517 KnownZero |= APInt::getSplat(Ty->getPrimitiveSizeInBits(), SignBit); 1518 break; 1519 } 1520 case Intrinsic::x86_sse42_crc32_64_64: 1521 KnownZero |= APInt::getHighBitsSet(64, 32); 1522 break; 1523 } 1524 } 1525 break; 1526 case Instruction::ExtractValue: 1527 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) { 1528 ExtractValueInst *EVI = cast<ExtractValueInst>(I); 1529 if (EVI->getNumIndices() != 1) break; 1530 if (EVI->getIndices()[0] == 0) { 1531 switch (II->getIntrinsicID()) { 1532 default: break; 1533 case Intrinsic::uadd_with_overflow: 1534 case Intrinsic::sadd_with_overflow: 1535 computeKnownBitsAddSub(true, II->getArgOperand(0), 1536 II->getArgOperand(1), false, KnownZero, 1537 KnownOne, KnownZero2, KnownOne2, DL, Depth, Q); 1538 break; 1539 case Intrinsic::usub_with_overflow: 1540 case Intrinsic::ssub_with_overflow: 1541 computeKnownBitsAddSub(false, II->getArgOperand(0), 1542 II->getArgOperand(1), false, KnownZero, 1543 KnownOne, KnownZero2, KnownOne2, DL, Depth, Q); 1544 break; 1545 case Intrinsic::umul_with_overflow: 1546 case Intrinsic::smul_with_overflow: 1547 computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false, 1548 KnownZero, KnownOne, KnownZero2, KnownOne2, DL, 1549 Depth, Q); 1550 break; 1551 } 1552 } 1553 } 1554 } 1555 } 1556 1557 static unsigned getAlignment(const Value *V, const DataLayout &DL) { 1558 unsigned Align = 0; 1559 if (auto *GO = dyn_cast<GlobalObject>(V)) { 1560 Align = GO->getAlignment(); 1561 if (Align == 0) { 1562 if (auto *GVar = dyn_cast<GlobalVariable>(GO)) { 1563 Type *ObjectType = GVar->getType()->getElementType(); 1564 if (ObjectType->isSized()) { 1565 // If the object is defined in the current Module, we'll be giving 1566 // it the preferred alignment. Otherwise, we have to assume that it 1567 // may only have the minimum ABI alignment. 1568 if (GVar->isStrongDefinitionForLinker()) 1569 Align = DL.getPreferredAlignment(GVar); 1570 else 1571 Align = DL.getABITypeAlignment(ObjectType); 1572 } 1573 } 1574 } 1575 } else if (const Argument *A = dyn_cast<Argument>(V)) { 1576 Align = A->getType()->isPointerTy() ? A->getParamAlignment() : 0; 1577 1578 if (!Align && A->hasStructRetAttr()) { 1579 // An sret parameter has at least the ABI alignment of the return type. 1580 Type *EltTy = cast<PointerType>(A->getType())->getElementType(); 1581 if (EltTy->isSized()) 1582 Align = DL.getABITypeAlignment(EltTy); 1583 } 1584 } else if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) 1585 Align = AI->getAlignment(); 1586 else if (auto CS = ImmutableCallSite(V)) 1587 Align = CS.getAttributes().getParamAlignment(AttributeSet::ReturnIndex); 1588 else if (const LoadInst *LI = dyn_cast<LoadInst>(V)) 1589 if (MDNode *MD = LI->getMetadata(LLVMContext::MD_align)) { 1590 ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0)); 1591 Align = CI->getLimitedValue(); 1592 } 1593 1594 return Align; 1595 } 1596 1597 /// Determine which bits of V are known to be either zero or one and return 1598 /// them in the KnownZero/KnownOne bit sets. 1599 /// 1600 /// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that 1601 /// we cannot optimize based on the assumption that it is zero without changing 1602 /// it to be an explicit zero. If we don't change it to zero, other code could 1603 /// optimized based on the contradictory assumption that it is non-zero. 1604 /// Because instcombine aggressively folds operations with undef args anyway, 1605 /// this won't lose us code quality. 1606 /// 1607 /// This function is defined on values with integer type, values with pointer 1608 /// type, and vectors of integers. In the case 1609 /// where V is a vector, known zero, and known one values are the 1610 /// same width as the vector element, and the bit is set only if it is true 1611 /// for all of the elements in the vector. 1612 void computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne, 1613 const DataLayout &DL, unsigned Depth, const Query &Q) { 1614 assert(V && "No Value?"); 1615 assert(Depth <= MaxDepth && "Limit Search Depth"); 1616 unsigned BitWidth = KnownZero.getBitWidth(); 1617 1618 assert((V->getType()->isIntOrIntVectorTy() || 1619 V->getType()->isFPOrFPVectorTy() || 1620 V->getType()->getScalarType()->isPointerTy()) && 1621 "Not integer, floating point, or pointer type!"); 1622 assert((DL.getTypeSizeInBits(V->getType()->getScalarType()) == BitWidth) && 1623 (!V->getType()->isIntOrIntVectorTy() || 1624 V->getType()->getScalarSizeInBits() == BitWidth) && 1625 KnownZero.getBitWidth() == BitWidth && 1626 KnownOne.getBitWidth() == BitWidth && 1627 "V, KnownOne and KnownZero should have same BitWidth"); 1628 1629 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) { 1630 // We know all of the bits for a constant! 1631 KnownOne = CI->getValue(); 1632 KnownZero = ~KnownOne; 1633 return; 1634 } 1635 // Null and aggregate-zero are all-zeros. 1636 if (isa<ConstantPointerNull>(V) || 1637 isa<ConstantAggregateZero>(V)) { 1638 KnownOne.clearAllBits(); 1639 KnownZero = APInt::getAllOnesValue(BitWidth); 1640 return; 1641 } 1642 // Handle a constant vector by taking the intersection of the known bits of 1643 // each element. There is no real need to handle ConstantVector here, because 1644 // we don't handle undef in any particularly useful way. 1645 if (ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(V)) { 1646 // We know that CDS must be a vector of integers. Take the intersection of 1647 // each element. 1648 KnownZero.setAllBits(); KnownOne.setAllBits(); 1649 APInt Elt(KnownZero.getBitWidth(), 0); 1650 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) { 1651 Elt = CDS->getElementAsInteger(i); 1652 KnownZero &= ~Elt; 1653 KnownOne &= Elt; 1654 } 1655 return; 1656 } 1657 1658 // Start out not knowing anything. 1659 KnownZero.clearAllBits(); KnownOne.clearAllBits(); 1660 1661 // Limit search depth. 1662 // All recursive calls that increase depth must come after this. 1663 if (Depth == MaxDepth) 1664 return; 1665 1666 // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has 1667 // the bits of its aliasee. 1668 if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 1669 if (!GA->mayBeOverridden()) 1670 computeKnownBits(GA->getAliasee(), KnownZero, KnownOne, DL, Depth + 1, Q); 1671 return; 1672 } 1673 1674 if (Operator *I = dyn_cast<Operator>(V)) 1675 computeKnownBitsFromOperator(I, KnownZero, KnownOne, DL, Depth, Q); 1676 1677 // Aligned pointers have trailing zeros - refine KnownZero set 1678 if (V->getType()->isPointerTy()) { 1679 unsigned Align = getAlignment(V, DL); 1680 if (Align) 1681 KnownZero |= APInt::getLowBitsSet(BitWidth, countTrailingZeros(Align)); 1682 } 1683 1684 // computeKnownBitsFromAssume and computeKnownBitsFromDominatingCondition 1685 // strictly refines KnownZero and KnownOne. Therefore, we run them after 1686 // computeKnownBitsFromOperator. 1687 1688 // Check whether a nearby assume intrinsic can determine some known bits. 1689 computeKnownBitsFromAssume(V, KnownZero, KnownOne, DL, Depth, Q); 1690 1691 // Check whether there's a dominating condition which implies something about 1692 // this value at the given context. 1693 if (EnableDomConditions && Depth <= DomConditionsMaxDepth) 1694 computeKnownBitsFromDominatingCondition(V, KnownZero, KnownOne, DL, Depth, 1695 Q); 1696 1697 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1698 } 1699 1700 /// Determine whether the sign bit is known to be zero or one. 1701 /// Convenience wrapper around computeKnownBits. 1702 void ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne, 1703 const DataLayout &DL, unsigned Depth, const Query &Q) { 1704 unsigned BitWidth = getBitWidth(V->getType(), DL); 1705 if (!BitWidth) { 1706 KnownZero = false; 1707 KnownOne = false; 1708 return; 1709 } 1710 APInt ZeroBits(BitWidth, 0); 1711 APInt OneBits(BitWidth, 0); 1712 computeKnownBits(V, ZeroBits, OneBits, DL, Depth, Q); 1713 KnownOne = OneBits[BitWidth - 1]; 1714 KnownZero = ZeroBits[BitWidth - 1]; 1715 } 1716 1717 /// Return true if the given value is known to have exactly one 1718 /// bit set when defined. For vectors return true if every element is known to 1719 /// be a power of two when defined. Supports values with integer or pointer 1720 /// types and vectors of integers. 1721 bool isKnownToBeAPowerOfTwo(Value *V, bool OrZero, unsigned Depth, 1722 const Query &Q, const DataLayout &DL) { 1723 if (Constant *C = dyn_cast<Constant>(V)) { 1724 if (C->isNullValue()) 1725 return OrZero; 1726 if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) 1727 return CI->getValue().isPowerOf2(); 1728 // TODO: Handle vector constants. 1729 } 1730 1731 // 1 << X is clearly a power of two if the one is not shifted off the end. If 1732 // it is shifted off the end then the result is undefined. 1733 if (match(V, m_Shl(m_One(), m_Value()))) 1734 return true; 1735 1736 // (signbit) >>l X is clearly a power of two if the one is not shifted off the 1737 // bottom. If it is shifted off the bottom then the result is undefined. 1738 if (match(V, m_LShr(m_SignBit(), m_Value()))) 1739 return true; 1740 1741 // The remaining tests are all recursive, so bail out if we hit the limit. 1742 if (Depth++ == MaxDepth) 1743 return false; 1744 1745 Value *X = nullptr, *Y = nullptr; 1746 // A shift of a power of two is a power of two or zero. 1747 if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) || 1748 match(V, m_Shr(m_Value(X), m_Value())))) 1749 return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q, DL); 1750 1751 if (ZExtInst *ZI = dyn_cast<ZExtInst>(V)) 1752 return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q, DL); 1753 1754 if (SelectInst *SI = dyn_cast<SelectInst>(V)) 1755 return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q, DL) && 1756 isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q, DL); 1757 1758 if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) { 1759 // A power of two and'd with anything is a power of two or zero. 1760 if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q, DL) || 1761 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q, DL)) 1762 return true; 1763 // X & (-X) is always a power of two or zero. 1764 if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X)))) 1765 return true; 1766 return false; 1767 } 1768 1769 // Adding a power-of-two or zero to the same power-of-two or zero yields 1770 // either the original power-of-two, a larger power-of-two or zero. 1771 if (match(V, m_Add(m_Value(X), m_Value(Y)))) { 1772 OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V); 1773 if (OrZero || VOBO->hasNoUnsignedWrap() || VOBO->hasNoSignedWrap()) { 1774 if (match(X, m_And(m_Specific(Y), m_Value())) || 1775 match(X, m_And(m_Value(), m_Specific(Y)))) 1776 if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q, DL)) 1777 return true; 1778 if (match(Y, m_And(m_Specific(X), m_Value())) || 1779 match(Y, m_And(m_Value(), m_Specific(X)))) 1780 if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q, DL)) 1781 return true; 1782 1783 unsigned BitWidth = V->getType()->getScalarSizeInBits(); 1784 APInt LHSZeroBits(BitWidth, 0), LHSOneBits(BitWidth, 0); 1785 computeKnownBits(X, LHSZeroBits, LHSOneBits, DL, Depth, Q); 1786 1787 APInt RHSZeroBits(BitWidth, 0), RHSOneBits(BitWidth, 0); 1788 computeKnownBits(Y, RHSZeroBits, RHSOneBits, DL, Depth, Q); 1789 // If i8 V is a power of two or zero: 1790 // ZeroBits: 1 1 1 0 1 1 1 1 1791 // ~ZeroBits: 0 0 0 1 0 0 0 0 1792 if ((~(LHSZeroBits & RHSZeroBits)).isPowerOf2()) 1793 // If OrZero isn't set, we cannot give back a zero result. 1794 // Make sure either the LHS or RHS has a bit set. 1795 if (OrZero || RHSOneBits.getBoolValue() || LHSOneBits.getBoolValue()) 1796 return true; 1797 } 1798 } 1799 1800 // An exact divide or right shift can only shift off zero bits, so the result 1801 // is a power of two only if the first operand is a power of two and not 1802 // copying a sign bit (sdiv int_min, 2). 1803 if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) || 1804 match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) { 1805 return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero, 1806 Depth, Q, DL); 1807 } 1808 1809 return false; 1810 } 1811 1812 /// \brief Test whether a GEP's result is known to be non-null. 1813 /// 1814 /// Uses properties inherent in a GEP to try to determine whether it is known 1815 /// to be non-null. 1816 /// 1817 /// Currently this routine does not support vector GEPs. 1818 static bool isGEPKnownNonNull(GEPOperator *GEP, const DataLayout &DL, 1819 unsigned Depth, const Query &Q) { 1820 if (!GEP->isInBounds() || GEP->getPointerAddressSpace() != 0) 1821 return false; 1822 1823 // FIXME: Support vector-GEPs. 1824 assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP"); 1825 1826 // If the base pointer is non-null, we cannot walk to a null address with an 1827 // inbounds GEP in address space zero. 1828 if (isKnownNonZero(GEP->getPointerOperand(), DL, Depth, Q)) 1829 return true; 1830 1831 // Walk the GEP operands and see if any operand introduces a non-zero offset. 1832 // If so, then the GEP cannot produce a null pointer, as doing so would 1833 // inherently violate the inbounds contract within address space zero. 1834 for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP); 1835 GTI != GTE; ++GTI) { 1836 // Struct types are easy -- they must always be indexed by a constant. 1837 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 1838 ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand()); 1839 unsigned ElementIdx = OpC->getZExtValue(); 1840 const StructLayout *SL = DL.getStructLayout(STy); 1841 uint64_t ElementOffset = SL->getElementOffset(ElementIdx); 1842 if (ElementOffset > 0) 1843 return true; 1844 continue; 1845 } 1846 1847 // If we have a zero-sized type, the index doesn't matter. Keep looping. 1848 if (DL.getTypeAllocSize(GTI.getIndexedType()) == 0) 1849 continue; 1850 1851 // Fast path the constant operand case both for efficiency and so we don't 1852 // increment Depth when just zipping down an all-constant GEP. 1853 if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) { 1854 if (!OpC->isZero()) 1855 return true; 1856 continue; 1857 } 1858 1859 // We post-increment Depth here because while isKnownNonZero increments it 1860 // as well, when we pop back up that increment won't persist. We don't want 1861 // to recurse 10k times just because we have 10k GEP operands. We don't 1862 // bail completely out because we want to handle constant GEPs regardless 1863 // of depth. 1864 if (Depth++ >= MaxDepth) 1865 continue; 1866 1867 if (isKnownNonZero(GTI.getOperand(), DL, Depth, Q)) 1868 return true; 1869 } 1870 1871 return false; 1872 } 1873 1874 /// Does the 'Range' metadata (which must be a valid MD_range operand list) 1875 /// ensure that the value it's attached to is never Value? 'RangeType' is 1876 /// is the type of the value described by the range. 1877 static bool rangeMetadataExcludesValue(MDNode* Ranges, 1878 const APInt& Value) { 1879 const unsigned NumRanges = Ranges->getNumOperands() / 2; 1880 assert(NumRanges >= 1); 1881 for (unsigned i = 0; i < NumRanges; ++i) { 1882 ConstantInt *Lower = 1883 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0)); 1884 ConstantInt *Upper = 1885 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1)); 1886 ConstantRange Range(Lower->getValue(), Upper->getValue()); 1887 if (Range.contains(Value)) 1888 return false; 1889 } 1890 return true; 1891 } 1892 1893 /// Return true if the given value is known to be non-zero when defined. 1894 /// For vectors return true if every element is known to be non-zero when 1895 /// defined. Supports values with integer or pointer type and vectors of 1896 /// integers. 1897 bool isKnownNonZero(Value *V, const DataLayout &DL, unsigned Depth, 1898 const Query &Q) { 1899 if (Constant *C = dyn_cast<Constant>(V)) { 1900 if (C->isNullValue()) 1901 return false; 1902 if (isa<ConstantInt>(C)) 1903 // Must be non-zero due to null test above. 1904 return true; 1905 // TODO: Handle vectors 1906 return false; 1907 } 1908 1909 if (Instruction* I = dyn_cast<Instruction>(V)) { 1910 if (MDNode *Ranges = I->getMetadata(LLVMContext::MD_range)) { 1911 // If the possible ranges don't contain zero, then the value is 1912 // definitely non-zero. 1913 if (IntegerType* Ty = dyn_cast<IntegerType>(V->getType())) { 1914 const APInt ZeroValue(Ty->getBitWidth(), 0); 1915 if (rangeMetadataExcludesValue(Ranges, ZeroValue)) 1916 return true; 1917 } 1918 } 1919 } 1920 1921 // The remaining tests are all recursive, so bail out if we hit the limit. 1922 if (Depth++ >= MaxDepth) 1923 return false; 1924 1925 // Check for pointer simplifications. 1926 if (V->getType()->isPointerTy()) { 1927 if (isKnownNonNull(V)) 1928 return true; 1929 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) 1930 if (isGEPKnownNonNull(GEP, DL, Depth, Q)) 1931 return true; 1932 } 1933 1934 unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), DL); 1935 1936 // X | Y != 0 if X != 0 or Y != 0. 1937 Value *X = nullptr, *Y = nullptr; 1938 if (match(V, m_Or(m_Value(X), m_Value(Y)))) 1939 return isKnownNonZero(X, DL, Depth, Q) || isKnownNonZero(Y, DL, Depth, Q); 1940 1941 // ext X != 0 if X != 0. 1942 if (isa<SExtInst>(V) || isa<ZExtInst>(V)) 1943 return isKnownNonZero(cast<Instruction>(V)->getOperand(0), DL, Depth, Q); 1944 1945 // shl X, Y != 0 if X is odd. Note that the value of the shift is undefined 1946 // if the lowest bit is shifted off the end. 1947 if (BitWidth && match(V, m_Shl(m_Value(X), m_Value(Y)))) { 1948 // shl nuw can't remove any non-zero bits. 1949 OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V); 1950 if (BO->hasNoUnsignedWrap()) 1951 return isKnownNonZero(X, DL, Depth, Q); 1952 1953 APInt KnownZero(BitWidth, 0); 1954 APInt KnownOne(BitWidth, 0); 1955 computeKnownBits(X, KnownZero, KnownOne, DL, Depth, Q); 1956 if (KnownOne[0]) 1957 return true; 1958 } 1959 // shr X, Y != 0 if X is negative. Note that the value of the shift is not 1960 // defined if the sign bit is shifted off the end. 1961 else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) { 1962 // shr exact can only shift out zero bits. 1963 PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V); 1964 if (BO->isExact()) 1965 return isKnownNonZero(X, DL, Depth, Q); 1966 1967 bool XKnownNonNegative, XKnownNegative; 1968 ComputeSignBit(X, XKnownNonNegative, XKnownNegative, DL, Depth, Q); 1969 if (XKnownNegative) 1970 return true; 1971 1972 // If the shifter operand is a constant, and all of the bits shifted 1973 // out are known to be zero, and X is known non-zero then at least one 1974 // non-zero bit must remain. 1975 if (ConstantInt *Shift = dyn_cast<ConstantInt>(Y)) { 1976 APInt KnownZero(BitWidth, 0); 1977 APInt KnownOne(BitWidth, 0); 1978 computeKnownBits(X, KnownZero, KnownOne, DL, Depth, Q); 1979 1980 auto ShiftVal = Shift->getLimitedValue(BitWidth - 1); 1981 // Is there a known one in the portion not shifted out? 1982 if (KnownOne.countLeadingZeros() < BitWidth - ShiftVal) 1983 return true; 1984 // Are all the bits to be shifted out known zero? 1985 if (KnownZero.countTrailingOnes() >= ShiftVal) 1986 return isKnownNonZero(X, DL, Depth, Q); 1987 } 1988 } 1989 // div exact can only produce a zero if the dividend is zero. 1990 else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) { 1991 return isKnownNonZero(X, DL, Depth, Q); 1992 } 1993 // X + Y. 1994 else if (match(V, m_Add(m_Value(X), m_Value(Y)))) { 1995 bool XKnownNonNegative, XKnownNegative; 1996 bool YKnownNonNegative, YKnownNegative; 1997 ComputeSignBit(X, XKnownNonNegative, XKnownNegative, DL, Depth, Q); 1998 ComputeSignBit(Y, YKnownNonNegative, YKnownNegative, DL, Depth, Q); 1999 2000 // If X and Y are both non-negative (as signed values) then their sum is not 2001 // zero unless both X and Y are zero. 2002 if (XKnownNonNegative && YKnownNonNegative) 2003 if (isKnownNonZero(X, DL, Depth, Q) || isKnownNonZero(Y, DL, Depth, Q)) 2004 return true; 2005 2006 // If X and Y are both negative (as signed values) then their sum is not 2007 // zero unless both X and Y equal INT_MIN. 2008 if (BitWidth && XKnownNegative && YKnownNegative) { 2009 APInt KnownZero(BitWidth, 0); 2010 APInt KnownOne(BitWidth, 0); 2011 APInt Mask = APInt::getSignedMaxValue(BitWidth); 2012 // The sign bit of X is set. If some other bit is set then X is not equal 2013 // to INT_MIN. 2014 computeKnownBits(X, KnownZero, KnownOne, DL, Depth, Q); 2015 if ((KnownOne & Mask) != 0) 2016 return true; 2017 // The sign bit of Y is set. If some other bit is set then Y is not equal 2018 // to INT_MIN. 2019 computeKnownBits(Y, KnownZero, KnownOne, DL, Depth, Q); 2020 if ((KnownOne & Mask) != 0) 2021 return true; 2022 } 2023 2024 // The sum of a non-negative number and a power of two is not zero. 2025 if (XKnownNonNegative && 2026 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q, DL)) 2027 return true; 2028 if (YKnownNonNegative && 2029 isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q, DL)) 2030 return true; 2031 } 2032 // X * Y. 2033 else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) { 2034 OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V); 2035 // If X and Y are non-zero then so is X * Y as long as the multiplication 2036 // does not overflow. 2037 if ((BO->hasNoSignedWrap() || BO->hasNoUnsignedWrap()) && 2038 isKnownNonZero(X, DL, Depth, Q) && isKnownNonZero(Y, DL, Depth, Q)) 2039 return true; 2040 } 2041 // (C ? X : Y) != 0 if X != 0 and Y != 0. 2042 else if (SelectInst *SI = dyn_cast<SelectInst>(V)) { 2043 if (isKnownNonZero(SI->getTrueValue(), DL, Depth, Q) && 2044 isKnownNonZero(SI->getFalseValue(), DL, Depth, Q)) 2045 return true; 2046 } 2047 // PHI 2048 else if (PHINode *PN = dyn_cast<PHINode>(V)) { 2049 // Try and detect a recurrence that monotonically increases from a 2050 // starting value, as these are common as induction variables. 2051 if (PN->getNumIncomingValues() == 2) { 2052 Value *Start = PN->getIncomingValue(0); 2053 Value *Induction = PN->getIncomingValue(1); 2054 if (isa<ConstantInt>(Induction) && !isa<ConstantInt>(Start)) 2055 std::swap(Start, Induction); 2056 if (ConstantInt *C = dyn_cast<ConstantInt>(Start)) { 2057 if (!C->isZero() && !C->isNegative()) { 2058 ConstantInt *X; 2059 if ((match(Induction, m_NSWAdd(m_Specific(PN), m_ConstantInt(X))) || 2060 match(Induction, m_NUWAdd(m_Specific(PN), m_ConstantInt(X)))) && 2061 !X->isNegative()) 2062 return true; 2063 } 2064 } 2065 } 2066 } 2067 2068 if (!BitWidth) return false; 2069 APInt KnownZero(BitWidth, 0); 2070 APInt KnownOne(BitWidth, 0); 2071 computeKnownBits(V, KnownZero, KnownOne, DL, Depth, Q); 2072 return KnownOne != 0; 2073 } 2074 2075 /// Return true if V2 == V1 + X, where X is known non-zero. 2076 static bool isAddOfNonZero(Value *V1, Value *V2, const DataLayout &DL, 2077 const Query &Q) { 2078 BinaryOperator *BO = dyn_cast<BinaryOperator>(V1); 2079 if (!BO || BO->getOpcode() != Instruction::Add) 2080 return false; 2081 Value *Op = nullptr; 2082 if (V2 == BO->getOperand(0)) 2083 Op = BO->getOperand(1); 2084 else if (V2 == BO->getOperand(1)) 2085 Op = BO->getOperand(0); 2086 else 2087 return false; 2088 return isKnownNonZero(Op, DL, 0, Q); 2089 } 2090 2091 /// Return true if it is known that V1 != V2. 2092 static bool isKnownNonEqual(Value *V1, Value *V2, const DataLayout &DL, 2093 const Query &Q) { 2094 if (V1->getType()->isVectorTy() || V1 == V2) 2095 return false; 2096 if (V1->getType() != V2->getType()) 2097 // We can't look through casts yet. 2098 return false; 2099 if (isAddOfNonZero(V1, V2, DL, Q) || isAddOfNonZero(V2, V1, DL, Q)) 2100 return true; 2101 2102 if (IntegerType *Ty = dyn_cast<IntegerType>(V1->getType())) { 2103 // Are any known bits in V1 contradictory to known bits in V2? If V1 2104 // has a known zero where V2 has a known one, they must not be equal. 2105 auto BitWidth = Ty->getBitWidth(); 2106 APInt KnownZero1(BitWidth, 0); 2107 APInt KnownOne1(BitWidth, 0); 2108 computeKnownBits(V1, KnownZero1, KnownOne1, DL, 0, Q); 2109 APInt KnownZero2(BitWidth, 0); 2110 APInt KnownOne2(BitWidth, 0); 2111 computeKnownBits(V2, KnownZero2, KnownOne2, DL, 0, Q); 2112 2113 auto OppositeBits = (KnownZero1 & KnownOne2) | (KnownZero2 & KnownOne1); 2114 if (OppositeBits.getBoolValue()) 2115 return true; 2116 } 2117 return false; 2118 } 2119 2120 /// Return true if 'V & Mask' is known to be zero. We use this predicate to 2121 /// simplify operations downstream. Mask is known to be zero for bits that V 2122 /// cannot have. 2123 /// 2124 /// This function is defined on values with integer type, values with pointer 2125 /// type, and vectors of integers. In the case 2126 /// where V is a vector, the mask, known zero, and known one values are the 2127 /// same width as the vector element, and the bit is set only if it is true 2128 /// for all of the elements in the vector. 2129 bool MaskedValueIsZero(Value *V, const APInt &Mask, const DataLayout &DL, 2130 unsigned Depth, const Query &Q) { 2131 APInt KnownZero(Mask.getBitWidth(), 0), KnownOne(Mask.getBitWidth(), 0); 2132 computeKnownBits(V, KnownZero, KnownOne, DL, Depth, Q); 2133 return (KnownZero & Mask) == Mask; 2134 } 2135 2136 2137 2138 /// Return the number of times the sign bit of the register is replicated into 2139 /// the other bits. We know that at least 1 bit is always equal to the sign bit 2140 /// (itself), but other cases can give us information. For example, immediately 2141 /// after an "ashr X, 2", we know that the top 3 bits are all equal to each 2142 /// other, so we return 3. 2143 /// 2144 /// 'Op' must have a scalar integer type. 2145 /// 2146 unsigned ComputeNumSignBits(Value *V, const DataLayout &DL, unsigned Depth, 2147 const Query &Q) { 2148 unsigned TyBits = DL.getTypeSizeInBits(V->getType()->getScalarType()); 2149 unsigned Tmp, Tmp2; 2150 unsigned FirstAnswer = 1; 2151 2152 // Note that ConstantInt is handled by the general computeKnownBits case 2153 // below. 2154 2155 if (Depth == 6) 2156 return 1; // Limit search depth. 2157 2158 Operator *U = dyn_cast<Operator>(V); 2159 switch (Operator::getOpcode(V)) { 2160 default: break; 2161 case Instruction::SExt: 2162 Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits(); 2163 return ComputeNumSignBits(U->getOperand(0), DL, Depth + 1, Q) + Tmp; 2164 2165 case Instruction::SDiv: { 2166 const APInt *Denominator; 2167 // sdiv X, C -> adds log(C) sign bits. 2168 if (match(U->getOperand(1), m_APInt(Denominator))) { 2169 2170 // Ignore non-positive denominator. 2171 if (!Denominator->isStrictlyPositive()) 2172 break; 2173 2174 // Calculate the incoming numerator bits. 2175 unsigned NumBits = ComputeNumSignBits(U->getOperand(0), DL, Depth + 1, Q); 2176 2177 // Add floor(log(C)) bits to the numerator bits. 2178 return std::min(TyBits, NumBits + Denominator->logBase2()); 2179 } 2180 break; 2181 } 2182 2183 case Instruction::SRem: { 2184 const APInt *Denominator; 2185 // srem X, C -> we know that the result is within [-C+1,C) when C is a 2186 // positive constant. This let us put a lower bound on the number of sign 2187 // bits. 2188 if (match(U->getOperand(1), m_APInt(Denominator))) { 2189 2190 // Ignore non-positive denominator. 2191 if (!Denominator->isStrictlyPositive()) 2192 break; 2193 2194 // Calculate the incoming numerator bits. SRem by a positive constant 2195 // can't lower the number of sign bits. 2196 unsigned NumrBits = 2197 ComputeNumSignBits(U->getOperand(0), DL, Depth + 1, Q); 2198 2199 // Calculate the leading sign bit constraints by examining the 2200 // denominator. Given that the denominator is positive, there are two 2201 // cases: 2202 // 2203 // 1. the numerator is positive. The result range is [0,C) and [0,C) u< 2204 // (1 << ceilLogBase2(C)). 2205 // 2206 // 2. the numerator is negative. Then the result range is (-C,0] and 2207 // integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)). 2208 // 2209 // Thus a lower bound on the number of sign bits is `TyBits - 2210 // ceilLogBase2(C)`. 2211 2212 unsigned ResBits = TyBits - Denominator->ceilLogBase2(); 2213 return std::max(NumrBits, ResBits); 2214 } 2215 break; 2216 } 2217 2218 case Instruction::AShr: { 2219 Tmp = ComputeNumSignBits(U->getOperand(0), DL, Depth + 1, Q); 2220 // ashr X, C -> adds C sign bits. Vectors too. 2221 const APInt *ShAmt; 2222 if (match(U->getOperand(1), m_APInt(ShAmt))) { 2223 Tmp += ShAmt->getZExtValue(); 2224 if (Tmp > TyBits) Tmp = TyBits; 2225 } 2226 return Tmp; 2227 } 2228 case Instruction::Shl: { 2229 const APInt *ShAmt; 2230 if (match(U->getOperand(1), m_APInt(ShAmt))) { 2231 // shl destroys sign bits. 2232 Tmp = ComputeNumSignBits(U->getOperand(0), DL, Depth + 1, Q); 2233 Tmp2 = ShAmt->getZExtValue(); 2234 if (Tmp2 >= TyBits || // Bad shift. 2235 Tmp2 >= Tmp) break; // Shifted all sign bits out. 2236 return Tmp - Tmp2; 2237 } 2238 break; 2239 } 2240 case Instruction::And: 2241 case Instruction::Or: 2242 case Instruction::Xor: // NOT is handled here. 2243 // Logical binary ops preserve the number of sign bits at the worst. 2244 Tmp = ComputeNumSignBits(U->getOperand(0), DL, Depth + 1, Q); 2245 if (Tmp != 1) { 2246 Tmp2 = ComputeNumSignBits(U->getOperand(1), DL, Depth + 1, Q); 2247 FirstAnswer = std::min(Tmp, Tmp2); 2248 // We computed what we know about the sign bits as our first 2249 // answer. Now proceed to the generic code that uses 2250 // computeKnownBits, and pick whichever answer is better. 2251 } 2252 break; 2253 2254 case Instruction::Select: 2255 Tmp = ComputeNumSignBits(U->getOperand(1), DL, Depth + 1, Q); 2256 if (Tmp == 1) return 1; // Early out. 2257 Tmp2 = ComputeNumSignBits(U->getOperand(2), DL, Depth + 1, Q); 2258 return std::min(Tmp, Tmp2); 2259 2260 case Instruction::Add: 2261 // Add can have at most one carry bit. Thus we know that the output 2262 // is, at worst, one more bit than the inputs. 2263 Tmp = ComputeNumSignBits(U->getOperand(0), DL, Depth + 1, Q); 2264 if (Tmp == 1) return 1; // Early out. 2265 2266 // Special case decrementing a value (ADD X, -1): 2267 if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1))) 2268 if (CRHS->isAllOnesValue()) { 2269 APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0); 2270 computeKnownBits(U->getOperand(0), KnownZero, KnownOne, DL, Depth + 1, 2271 Q); 2272 2273 // If the input is known to be 0 or 1, the output is 0/-1, which is all 2274 // sign bits set. 2275 if ((KnownZero | APInt(TyBits, 1)).isAllOnesValue()) 2276 return TyBits; 2277 2278 // If we are subtracting one from a positive number, there is no carry 2279 // out of the result. 2280 if (KnownZero.isNegative()) 2281 return Tmp; 2282 } 2283 2284 Tmp2 = ComputeNumSignBits(U->getOperand(1), DL, Depth + 1, Q); 2285 if (Tmp2 == 1) return 1; 2286 return std::min(Tmp, Tmp2)-1; 2287 2288 case Instruction::Sub: 2289 Tmp2 = ComputeNumSignBits(U->getOperand(1), DL, Depth + 1, Q); 2290 if (Tmp2 == 1) return 1; 2291 2292 // Handle NEG. 2293 if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0))) 2294 if (CLHS->isNullValue()) { 2295 APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0); 2296 computeKnownBits(U->getOperand(1), KnownZero, KnownOne, DL, Depth + 1, 2297 Q); 2298 // If the input is known to be 0 or 1, the output is 0/-1, which is all 2299 // sign bits set. 2300 if ((KnownZero | APInt(TyBits, 1)).isAllOnesValue()) 2301 return TyBits; 2302 2303 // If the input is known to be positive (the sign bit is known clear), 2304 // the output of the NEG has the same number of sign bits as the input. 2305 if (KnownZero.isNegative()) 2306 return Tmp2; 2307 2308 // Otherwise, we treat this like a SUB. 2309 } 2310 2311 // Sub can have at most one carry bit. Thus we know that the output 2312 // is, at worst, one more bit than the inputs. 2313 Tmp = ComputeNumSignBits(U->getOperand(0), DL, Depth + 1, Q); 2314 if (Tmp == 1) return 1; // Early out. 2315 return std::min(Tmp, Tmp2)-1; 2316 2317 case Instruction::PHI: { 2318 PHINode *PN = cast<PHINode>(U); 2319 unsigned NumIncomingValues = PN->getNumIncomingValues(); 2320 // Don't analyze large in-degree PHIs. 2321 if (NumIncomingValues > 4) break; 2322 // Unreachable blocks may have zero-operand PHI nodes. 2323 if (NumIncomingValues == 0) break; 2324 2325 // Take the minimum of all incoming values. This can't infinitely loop 2326 // because of our depth threshold. 2327 Tmp = ComputeNumSignBits(PN->getIncomingValue(0), DL, Depth + 1, Q); 2328 for (unsigned i = 1, e = NumIncomingValues; i != e; ++i) { 2329 if (Tmp == 1) return Tmp; 2330 Tmp = std::min( 2331 Tmp, ComputeNumSignBits(PN->getIncomingValue(i), DL, Depth + 1, Q)); 2332 } 2333 return Tmp; 2334 } 2335 2336 case Instruction::Trunc: 2337 // FIXME: it's tricky to do anything useful for this, but it is an important 2338 // case for targets like X86. 2339 break; 2340 } 2341 2342 // Finally, if we can prove that the top bits of the result are 0's or 1's, 2343 // use this information. 2344 APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0); 2345 APInt Mask; 2346 computeKnownBits(V, KnownZero, KnownOne, DL, Depth, Q); 2347 2348 if (KnownZero.isNegative()) { // sign bit is 0 2349 Mask = KnownZero; 2350 } else if (KnownOne.isNegative()) { // sign bit is 1; 2351 Mask = KnownOne; 2352 } else { 2353 // Nothing known. 2354 return FirstAnswer; 2355 } 2356 2357 // Okay, we know that the sign bit in Mask is set. Use CLZ to determine 2358 // the number of identical bits in the top of the input value. 2359 Mask = ~Mask; 2360 Mask <<= Mask.getBitWidth()-TyBits; 2361 // Return # leading zeros. We use 'min' here in case Val was zero before 2362 // shifting. We don't want to return '64' as for an i32 "0". 2363 return std::max(FirstAnswer, std::min(TyBits, Mask.countLeadingZeros())); 2364 } 2365 2366 /// This function computes the integer multiple of Base that equals V. 2367 /// If successful, it returns true and returns the multiple in 2368 /// Multiple. If unsuccessful, it returns false. It looks 2369 /// through SExt instructions only if LookThroughSExt is true. 2370 bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple, 2371 bool LookThroughSExt, unsigned Depth) { 2372 const unsigned MaxDepth = 6; 2373 2374 assert(V && "No Value?"); 2375 assert(Depth <= MaxDepth && "Limit Search Depth"); 2376 assert(V->getType()->isIntegerTy() && "Not integer or pointer type!"); 2377 2378 Type *T = V->getType(); 2379 2380 ConstantInt *CI = dyn_cast<ConstantInt>(V); 2381 2382 if (Base == 0) 2383 return false; 2384 2385 if (Base == 1) { 2386 Multiple = V; 2387 return true; 2388 } 2389 2390 ConstantExpr *CO = dyn_cast<ConstantExpr>(V); 2391 Constant *BaseVal = ConstantInt::get(T, Base); 2392 if (CO && CO == BaseVal) { 2393 // Multiple is 1. 2394 Multiple = ConstantInt::get(T, 1); 2395 return true; 2396 } 2397 2398 if (CI && CI->getZExtValue() % Base == 0) { 2399 Multiple = ConstantInt::get(T, CI->getZExtValue() / Base); 2400 return true; 2401 } 2402 2403 if (Depth == MaxDepth) return false; // Limit search depth. 2404 2405 Operator *I = dyn_cast<Operator>(V); 2406 if (!I) return false; 2407 2408 switch (I->getOpcode()) { 2409 default: break; 2410 case Instruction::SExt: 2411 if (!LookThroughSExt) return false; 2412 // otherwise fall through to ZExt 2413 case Instruction::ZExt: 2414 return ComputeMultiple(I->getOperand(0), Base, Multiple, 2415 LookThroughSExt, Depth+1); 2416 case Instruction::Shl: 2417 case Instruction::Mul: { 2418 Value *Op0 = I->getOperand(0); 2419 Value *Op1 = I->getOperand(1); 2420 2421 if (I->getOpcode() == Instruction::Shl) { 2422 ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1); 2423 if (!Op1CI) return false; 2424 // Turn Op0 << Op1 into Op0 * 2^Op1 2425 APInt Op1Int = Op1CI->getValue(); 2426 uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1); 2427 APInt API(Op1Int.getBitWidth(), 0); 2428 API.setBit(BitToSet); 2429 Op1 = ConstantInt::get(V->getContext(), API); 2430 } 2431 2432 Value *Mul0 = nullptr; 2433 if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) { 2434 if (Constant *Op1C = dyn_cast<Constant>(Op1)) 2435 if (Constant *MulC = dyn_cast<Constant>(Mul0)) { 2436 if (Op1C->getType()->getPrimitiveSizeInBits() < 2437 MulC->getType()->getPrimitiveSizeInBits()) 2438 Op1C = ConstantExpr::getZExt(Op1C, MulC->getType()); 2439 if (Op1C->getType()->getPrimitiveSizeInBits() > 2440 MulC->getType()->getPrimitiveSizeInBits()) 2441 MulC = ConstantExpr::getZExt(MulC, Op1C->getType()); 2442 2443 // V == Base * (Mul0 * Op1), so return (Mul0 * Op1) 2444 Multiple = ConstantExpr::getMul(MulC, Op1C); 2445 return true; 2446 } 2447 2448 if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0)) 2449 if (Mul0CI->getValue() == 1) { 2450 // V == Base * Op1, so return Op1 2451 Multiple = Op1; 2452 return true; 2453 } 2454 } 2455 2456 Value *Mul1 = nullptr; 2457 if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) { 2458 if (Constant *Op0C = dyn_cast<Constant>(Op0)) 2459 if (Constant *MulC = dyn_cast<Constant>(Mul1)) { 2460 if (Op0C->getType()->getPrimitiveSizeInBits() < 2461 MulC->getType()->getPrimitiveSizeInBits()) 2462 Op0C = ConstantExpr::getZExt(Op0C, MulC->getType()); 2463 if (Op0C->getType()->getPrimitiveSizeInBits() > 2464 MulC->getType()->getPrimitiveSizeInBits()) 2465 MulC = ConstantExpr::getZExt(MulC, Op0C->getType()); 2466 2467 // V == Base * (Mul1 * Op0), so return (Mul1 * Op0) 2468 Multiple = ConstantExpr::getMul(MulC, Op0C); 2469 return true; 2470 } 2471 2472 if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1)) 2473 if (Mul1CI->getValue() == 1) { 2474 // V == Base * Op0, so return Op0 2475 Multiple = Op0; 2476 return true; 2477 } 2478 } 2479 } 2480 } 2481 2482 // We could not determine if V is a multiple of Base. 2483 return false; 2484 } 2485 2486 /// Return true if we can prove that the specified FP value is never equal to 2487 /// -0.0. 2488 /// 2489 /// NOTE: this function will need to be revisited when we support non-default 2490 /// rounding modes! 2491 /// 2492 bool llvm::CannotBeNegativeZero(const Value *V, unsigned Depth) { 2493 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) 2494 return !CFP->getValueAPF().isNegZero(); 2495 2496 // FIXME: Magic number! At the least, this should be given a name because it's 2497 // used similarly in CannotBeOrderedLessThanZero(). A better fix may be to 2498 // expose it as a parameter, so it can be used for testing / experimenting. 2499 if (Depth == 6) 2500 return false; // Limit search depth. 2501 2502 const Operator *I = dyn_cast<Operator>(V); 2503 if (!I) return false; 2504 2505 // Check if the nsz fast-math flag is set 2506 if (const FPMathOperator *FPO = dyn_cast<FPMathOperator>(I)) 2507 if (FPO->hasNoSignedZeros()) 2508 return true; 2509 2510 // (add x, 0.0) is guaranteed to return +0.0, not -0.0. 2511 if (I->getOpcode() == Instruction::FAdd) 2512 if (ConstantFP *CFP = dyn_cast<ConstantFP>(I->getOperand(1))) 2513 if (CFP->isNullValue()) 2514 return true; 2515 2516 // sitofp and uitofp turn into +0.0 for zero. 2517 if (isa<SIToFPInst>(I) || isa<UIToFPInst>(I)) 2518 return true; 2519 2520 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) 2521 // sqrt(-0.0) = -0.0, no other negative results are possible. 2522 if (II->getIntrinsicID() == Intrinsic::sqrt) 2523 return CannotBeNegativeZero(II->getArgOperand(0), Depth+1); 2524 2525 if (const CallInst *CI = dyn_cast<CallInst>(I)) 2526 if (const Function *F = CI->getCalledFunction()) { 2527 if (F->isDeclaration()) { 2528 // abs(x) != -0.0 2529 if (F->getName() == "abs") return true; 2530 // fabs[lf](x) != -0.0 2531 if (F->getName() == "fabs") return true; 2532 if (F->getName() == "fabsf") return true; 2533 if (F->getName() == "fabsl") return true; 2534 if (F->getName() == "sqrt" || F->getName() == "sqrtf" || 2535 F->getName() == "sqrtl") 2536 return CannotBeNegativeZero(CI->getArgOperand(0), Depth+1); 2537 } 2538 } 2539 2540 return false; 2541 } 2542 2543 bool llvm::CannotBeOrderedLessThanZero(const Value *V, unsigned Depth) { 2544 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) 2545 return !CFP->getValueAPF().isNegative() || CFP->getValueAPF().isZero(); 2546 2547 // FIXME: Magic number! At the least, this should be given a name because it's 2548 // used similarly in CannotBeNegativeZero(). A better fix may be to 2549 // expose it as a parameter, so it can be used for testing / experimenting. 2550 if (Depth == 6) 2551 return false; // Limit search depth. 2552 2553 const Operator *I = dyn_cast<Operator>(V); 2554 if (!I) return false; 2555 2556 switch (I->getOpcode()) { 2557 default: break; 2558 case Instruction::FMul: 2559 // x*x is always non-negative or a NaN. 2560 if (I->getOperand(0) == I->getOperand(1)) 2561 return true; 2562 // Fall through 2563 case Instruction::FAdd: 2564 case Instruction::FDiv: 2565 case Instruction::FRem: 2566 return CannotBeOrderedLessThanZero(I->getOperand(0), Depth+1) && 2567 CannotBeOrderedLessThanZero(I->getOperand(1), Depth+1); 2568 case Instruction::FPExt: 2569 case Instruction::FPTrunc: 2570 // Widening/narrowing never change sign. 2571 return CannotBeOrderedLessThanZero(I->getOperand(0), Depth+1); 2572 case Instruction::Call: 2573 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) 2574 switch (II->getIntrinsicID()) { 2575 default: break; 2576 case Intrinsic::exp: 2577 case Intrinsic::exp2: 2578 case Intrinsic::fabs: 2579 case Intrinsic::sqrt: 2580 return true; 2581 case Intrinsic::powi: 2582 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) { 2583 // powi(x,n) is non-negative if n is even. 2584 if (CI->getBitWidth() <= 64 && CI->getSExtValue() % 2u == 0) 2585 return true; 2586 } 2587 return CannotBeOrderedLessThanZero(I->getOperand(0), Depth+1); 2588 case Intrinsic::fma: 2589 case Intrinsic::fmuladd: 2590 // x*x+y is non-negative if y is non-negative. 2591 return I->getOperand(0) == I->getOperand(1) && 2592 CannotBeOrderedLessThanZero(I->getOperand(2), Depth+1); 2593 } 2594 break; 2595 } 2596 return false; 2597 } 2598 2599 /// If the specified value can be set by repeating the same byte in memory, 2600 /// return the i8 value that it is represented with. This is 2601 /// true for all i8 values obviously, but is also true for i32 0, i32 -1, 2602 /// i16 0xF0F0, double 0.0 etc. If the value can't be handled with a repeated 2603 /// byte store (e.g. i16 0x1234), return null. 2604 Value *llvm::isBytewiseValue(Value *V) { 2605 // All byte-wide stores are splatable, even of arbitrary variables. 2606 if (V->getType()->isIntegerTy(8)) return V; 2607 2608 // Handle 'null' ConstantArrayZero etc. 2609 if (Constant *C = dyn_cast<Constant>(V)) 2610 if (C->isNullValue()) 2611 return Constant::getNullValue(Type::getInt8Ty(V->getContext())); 2612 2613 // Constant float and double values can be handled as integer values if the 2614 // corresponding integer value is "byteable". An important case is 0.0. 2615 if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) { 2616 if (CFP->getType()->isFloatTy()) 2617 V = ConstantExpr::getBitCast(CFP, Type::getInt32Ty(V->getContext())); 2618 if (CFP->getType()->isDoubleTy()) 2619 V = ConstantExpr::getBitCast(CFP, Type::getInt64Ty(V->getContext())); 2620 // Don't handle long double formats, which have strange constraints. 2621 } 2622 2623 // We can handle constant integers that are multiple of 8 bits. 2624 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) { 2625 if (CI->getBitWidth() % 8 == 0) { 2626 assert(CI->getBitWidth() > 8 && "8 bits should be handled above!"); 2627 2628 if (!CI->getValue().isSplat(8)) 2629 return nullptr; 2630 return ConstantInt::get(V->getContext(), CI->getValue().trunc(8)); 2631 } 2632 } 2633 2634 // A ConstantDataArray/Vector is splatable if all its members are equal and 2635 // also splatable. 2636 if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(V)) { 2637 Value *Elt = CA->getElementAsConstant(0); 2638 Value *Val = isBytewiseValue(Elt); 2639 if (!Val) 2640 return nullptr; 2641 2642 for (unsigned I = 1, E = CA->getNumElements(); I != E; ++I) 2643 if (CA->getElementAsConstant(I) != Elt) 2644 return nullptr; 2645 2646 return Val; 2647 } 2648 2649 // Conceptually, we could handle things like: 2650 // %a = zext i8 %X to i16 2651 // %b = shl i16 %a, 8 2652 // %c = or i16 %a, %b 2653 // but until there is an example that actually needs this, it doesn't seem 2654 // worth worrying about. 2655 return nullptr; 2656 } 2657 2658 2659 // This is the recursive version of BuildSubAggregate. It takes a few different 2660 // arguments. Idxs is the index within the nested struct From that we are 2661 // looking at now (which is of type IndexedType). IdxSkip is the number of 2662 // indices from Idxs that should be left out when inserting into the resulting 2663 // struct. To is the result struct built so far, new insertvalue instructions 2664 // build on that. 2665 static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType, 2666 SmallVectorImpl<unsigned> &Idxs, 2667 unsigned IdxSkip, 2668 Instruction *InsertBefore) { 2669 llvm::StructType *STy = dyn_cast<llvm::StructType>(IndexedType); 2670 if (STy) { 2671 // Save the original To argument so we can modify it 2672 Value *OrigTo = To; 2673 // General case, the type indexed by Idxs is a struct 2674 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 2675 // Process each struct element recursively 2676 Idxs.push_back(i); 2677 Value *PrevTo = To; 2678 To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip, 2679 InsertBefore); 2680 Idxs.pop_back(); 2681 if (!To) { 2682 // Couldn't find any inserted value for this index? Cleanup 2683 while (PrevTo != OrigTo) { 2684 InsertValueInst* Del = cast<InsertValueInst>(PrevTo); 2685 PrevTo = Del->getAggregateOperand(); 2686 Del->eraseFromParent(); 2687 } 2688 // Stop processing elements 2689 break; 2690 } 2691 } 2692 // If we successfully found a value for each of our subaggregates 2693 if (To) 2694 return To; 2695 } 2696 // Base case, the type indexed by SourceIdxs is not a struct, or not all of 2697 // the struct's elements had a value that was inserted directly. In the latter 2698 // case, perhaps we can't determine each of the subelements individually, but 2699 // we might be able to find the complete struct somewhere. 2700 2701 // Find the value that is at that particular spot 2702 Value *V = FindInsertedValue(From, Idxs); 2703 2704 if (!V) 2705 return nullptr; 2706 2707 // Insert the value in the new (sub) aggregrate 2708 return llvm::InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip), 2709 "tmp", InsertBefore); 2710 } 2711 2712 // This helper takes a nested struct and extracts a part of it (which is again a 2713 // struct) into a new value. For example, given the struct: 2714 // { a, { b, { c, d }, e } } 2715 // and the indices "1, 1" this returns 2716 // { c, d }. 2717 // 2718 // It does this by inserting an insertvalue for each element in the resulting 2719 // struct, as opposed to just inserting a single struct. This will only work if 2720 // each of the elements of the substruct are known (ie, inserted into From by an 2721 // insertvalue instruction somewhere). 2722 // 2723 // All inserted insertvalue instructions are inserted before InsertBefore 2724 static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range, 2725 Instruction *InsertBefore) { 2726 assert(InsertBefore && "Must have someplace to insert!"); 2727 Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(), 2728 idx_range); 2729 Value *To = UndefValue::get(IndexedType); 2730 SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end()); 2731 unsigned IdxSkip = Idxs.size(); 2732 2733 return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore); 2734 } 2735 2736 /// Given an aggregrate and an sequence of indices, see if 2737 /// the scalar value indexed is already around as a register, for example if it 2738 /// were inserted directly into the aggregrate. 2739 /// 2740 /// If InsertBefore is not null, this function will duplicate (modified) 2741 /// insertvalues when a part of a nested struct is extracted. 2742 Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range, 2743 Instruction *InsertBefore) { 2744 // Nothing to index? Just return V then (this is useful at the end of our 2745 // recursion). 2746 if (idx_range.empty()) 2747 return V; 2748 // We have indices, so V should have an indexable type. 2749 assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) && 2750 "Not looking at a struct or array?"); 2751 assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) && 2752 "Invalid indices for type?"); 2753 2754 if (Constant *C = dyn_cast<Constant>(V)) { 2755 C = C->getAggregateElement(idx_range[0]); 2756 if (!C) return nullptr; 2757 return FindInsertedValue(C, idx_range.slice(1), InsertBefore); 2758 } 2759 2760 if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) { 2761 // Loop the indices for the insertvalue instruction in parallel with the 2762 // requested indices 2763 const unsigned *req_idx = idx_range.begin(); 2764 for (const unsigned *i = I->idx_begin(), *e = I->idx_end(); 2765 i != e; ++i, ++req_idx) { 2766 if (req_idx == idx_range.end()) { 2767 // We can't handle this without inserting insertvalues 2768 if (!InsertBefore) 2769 return nullptr; 2770 2771 // The requested index identifies a part of a nested aggregate. Handle 2772 // this specially. For example, 2773 // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0 2774 // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1 2775 // %C = extractvalue {i32, { i32, i32 } } %B, 1 2776 // This can be changed into 2777 // %A = insertvalue {i32, i32 } undef, i32 10, 0 2778 // %C = insertvalue {i32, i32 } %A, i32 11, 1 2779 // which allows the unused 0,0 element from the nested struct to be 2780 // removed. 2781 return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx), 2782 InsertBefore); 2783 } 2784 2785 // This insert value inserts something else than what we are looking for. 2786 // See if the (aggregate) value inserted into has the value we are 2787 // looking for, then. 2788 if (*req_idx != *i) 2789 return FindInsertedValue(I->getAggregateOperand(), idx_range, 2790 InsertBefore); 2791 } 2792 // If we end up here, the indices of the insertvalue match with those 2793 // requested (though possibly only partially). Now we recursively look at 2794 // the inserted value, passing any remaining indices. 2795 return FindInsertedValue(I->getInsertedValueOperand(), 2796 makeArrayRef(req_idx, idx_range.end()), 2797 InsertBefore); 2798 } 2799 2800 if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) { 2801 // If we're extracting a value from an aggregate that was extracted from 2802 // something else, we can extract from that something else directly instead. 2803 // However, we will need to chain I's indices with the requested indices. 2804 2805 // Calculate the number of indices required 2806 unsigned size = I->getNumIndices() + idx_range.size(); 2807 // Allocate some space to put the new indices in 2808 SmallVector<unsigned, 5> Idxs; 2809 Idxs.reserve(size); 2810 // Add indices from the extract value instruction 2811 Idxs.append(I->idx_begin(), I->idx_end()); 2812 2813 // Add requested indices 2814 Idxs.append(idx_range.begin(), idx_range.end()); 2815 2816 assert(Idxs.size() == size 2817 && "Number of indices added not correct?"); 2818 2819 return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore); 2820 } 2821 // Otherwise, we don't know (such as, extracting from a function return value 2822 // or load instruction) 2823 return nullptr; 2824 } 2825 2826 /// Analyze the specified pointer to see if it can be expressed as a base 2827 /// pointer plus a constant offset. Return the base and offset to the caller. 2828 Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, 2829 const DataLayout &DL) { 2830 unsigned BitWidth = DL.getPointerTypeSizeInBits(Ptr->getType()); 2831 APInt ByteOffset(BitWidth, 0); 2832 while (1) { 2833 if (Ptr->getType()->isVectorTy()) 2834 break; 2835 2836 if (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) { 2837 APInt GEPOffset(BitWidth, 0); 2838 if (!GEP->accumulateConstantOffset(DL, GEPOffset)) 2839 break; 2840 2841 ByteOffset += GEPOffset; 2842 2843 Ptr = GEP->getPointerOperand(); 2844 } else if (Operator::getOpcode(Ptr) == Instruction::BitCast || 2845 Operator::getOpcode(Ptr) == Instruction::AddrSpaceCast) { 2846 Ptr = cast<Operator>(Ptr)->getOperand(0); 2847 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) { 2848 if (GA->mayBeOverridden()) 2849 break; 2850 Ptr = GA->getAliasee(); 2851 } else { 2852 break; 2853 } 2854 } 2855 Offset = ByteOffset.getSExtValue(); 2856 return Ptr; 2857 } 2858 2859 2860 /// This function computes the length of a null-terminated C string pointed to 2861 /// by V. If successful, it returns true and returns the string in Str. 2862 /// If unsuccessful, it returns false. 2863 bool llvm::getConstantStringInfo(const Value *V, StringRef &Str, 2864 uint64_t Offset, bool TrimAtNul) { 2865 assert(V); 2866 2867 // Look through bitcast instructions and geps. 2868 V = V->stripPointerCasts(); 2869 2870 // If the value is a GEP instruction or constant expression, treat it as an 2871 // offset. 2872 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 2873 // Make sure the GEP has exactly three arguments. 2874 if (GEP->getNumOperands() != 3) 2875 return false; 2876 2877 // Make sure the index-ee is a pointer to array of i8. 2878 PointerType *PT = cast<PointerType>(GEP->getOperand(0)->getType()); 2879 ArrayType *AT = dyn_cast<ArrayType>(PT->getElementType()); 2880 if (!AT || !AT->getElementType()->isIntegerTy(8)) 2881 return false; 2882 2883 // Check to make sure that the first operand of the GEP is an integer and 2884 // has value 0 so that we are sure we're indexing into the initializer. 2885 const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1)); 2886 if (!FirstIdx || !FirstIdx->isZero()) 2887 return false; 2888 2889 // If the second index isn't a ConstantInt, then this is a variable index 2890 // into the array. If this occurs, we can't say anything meaningful about 2891 // the string. 2892 uint64_t StartIdx = 0; 2893 if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2))) 2894 StartIdx = CI->getZExtValue(); 2895 else 2896 return false; 2897 return getConstantStringInfo(GEP->getOperand(0), Str, StartIdx + Offset, 2898 TrimAtNul); 2899 } 2900 2901 // The GEP instruction, constant or instruction, must reference a global 2902 // variable that is a constant and is initialized. The referenced constant 2903 // initializer is the array that we'll use for optimization. 2904 const GlobalVariable *GV = dyn_cast<GlobalVariable>(V); 2905 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer()) 2906 return false; 2907 2908 // Handle the all-zeros case 2909 if (GV->getInitializer()->isNullValue()) { 2910 // This is a degenerate case. The initializer is constant zero so the 2911 // length of the string must be zero. 2912 Str = ""; 2913 return true; 2914 } 2915 2916 // Must be a Constant Array 2917 const ConstantDataArray *Array = 2918 dyn_cast<ConstantDataArray>(GV->getInitializer()); 2919 if (!Array || !Array->isString()) 2920 return false; 2921 2922 // Get the number of elements in the array 2923 uint64_t NumElts = Array->getType()->getArrayNumElements(); 2924 2925 // Start out with the entire array in the StringRef. 2926 Str = Array->getAsString(); 2927 2928 if (Offset > NumElts) 2929 return false; 2930 2931 // Skip over 'offset' bytes. 2932 Str = Str.substr(Offset); 2933 2934 if (TrimAtNul) { 2935 // Trim off the \0 and anything after it. If the array is not nul 2936 // terminated, we just return the whole end of string. The client may know 2937 // some other way that the string is length-bound. 2938 Str = Str.substr(0, Str.find('\0')); 2939 } 2940 return true; 2941 } 2942 2943 // These next two are very similar to the above, but also look through PHI 2944 // nodes. 2945 // TODO: See if we can integrate these two together. 2946 2947 /// If we can compute the length of the string pointed to by 2948 /// the specified pointer, return 'len+1'. If we can't, return 0. 2949 static uint64_t GetStringLengthH(Value *V, SmallPtrSetImpl<PHINode*> &PHIs) { 2950 // Look through noop bitcast instructions. 2951 V = V->stripPointerCasts(); 2952 2953 // If this is a PHI node, there are two cases: either we have already seen it 2954 // or we haven't. 2955 if (PHINode *PN = dyn_cast<PHINode>(V)) { 2956 if (!PHIs.insert(PN).second) 2957 return ~0ULL; // already in the set. 2958 2959 // If it was new, see if all the input strings are the same length. 2960 uint64_t LenSoFar = ~0ULL; 2961 for (Value *IncValue : PN->incoming_values()) { 2962 uint64_t Len = GetStringLengthH(IncValue, PHIs); 2963 if (Len == 0) return 0; // Unknown length -> unknown. 2964 2965 if (Len == ~0ULL) continue; 2966 2967 if (Len != LenSoFar && LenSoFar != ~0ULL) 2968 return 0; // Disagree -> unknown. 2969 LenSoFar = Len; 2970 } 2971 2972 // Success, all agree. 2973 return LenSoFar; 2974 } 2975 2976 // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y) 2977 if (SelectInst *SI = dyn_cast<SelectInst>(V)) { 2978 uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs); 2979 if (Len1 == 0) return 0; 2980 uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs); 2981 if (Len2 == 0) return 0; 2982 if (Len1 == ~0ULL) return Len2; 2983 if (Len2 == ~0ULL) return Len1; 2984 if (Len1 != Len2) return 0; 2985 return Len1; 2986 } 2987 2988 // Otherwise, see if we can read the string. 2989 StringRef StrData; 2990 if (!getConstantStringInfo(V, StrData)) 2991 return 0; 2992 2993 return StrData.size()+1; 2994 } 2995 2996 /// If we can compute the length of the string pointed to by 2997 /// the specified pointer, return 'len+1'. If we can't, return 0. 2998 uint64_t llvm::GetStringLength(Value *V) { 2999 if (!V->getType()->isPointerTy()) return 0; 3000 3001 SmallPtrSet<PHINode*, 32> PHIs; 3002 uint64_t Len = GetStringLengthH(V, PHIs); 3003 // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return 3004 // an empty string as a length. 3005 return Len == ~0ULL ? 1 : Len; 3006 } 3007 3008 /// \brief \p PN defines a loop-variant pointer to an object. Check if the 3009 /// previous iteration of the loop was referring to the same object as \p PN. 3010 static bool isSameUnderlyingObjectInLoop(PHINode *PN, LoopInfo *LI) { 3011 // Find the loop-defined value. 3012 Loop *L = LI->getLoopFor(PN->getParent()); 3013 if (PN->getNumIncomingValues() != 2) 3014 return true; 3015 3016 // Find the value from previous iteration. 3017 auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0)); 3018 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L) 3019 PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1)); 3020 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L) 3021 return true; 3022 3023 // If a new pointer is loaded in the loop, the pointer references a different 3024 // object in every iteration. E.g.: 3025 // for (i) 3026 // int *p = a[i]; 3027 // ... 3028 if (auto *Load = dyn_cast<LoadInst>(PrevValue)) 3029 if (!L->isLoopInvariant(Load->getPointerOperand())) 3030 return false; 3031 return true; 3032 } 3033 3034 Value *llvm::GetUnderlyingObject(Value *V, const DataLayout &DL, 3035 unsigned MaxLookup) { 3036 if (!V->getType()->isPointerTy()) 3037 return V; 3038 for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) { 3039 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 3040 V = GEP->getPointerOperand(); 3041 } else if (Operator::getOpcode(V) == Instruction::BitCast || 3042 Operator::getOpcode(V) == Instruction::AddrSpaceCast) { 3043 V = cast<Operator>(V)->getOperand(0); 3044 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 3045 if (GA->mayBeOverridden()) 3046 return V; 3047 V = GA->getAliasee(); 3048 } else { 3049 // See if InstructionSimplify knows any relevant tricks. 3050 if (Instruction *I = dyn_cast<Instruction>(V)) 3051 // TODO: Acquire a DominatorTree and AssumptionCache and use them. 3052 if (Value *Simplified = SimplifyInstruction(I, DL, nullptr)) { 3053 V = Simplified; 3054 continue; 3055 } 3056 3057 return V; 3058 } 3059 assert(V->getType()->isPointerTy() && "Unexpected operand type!"); 3060 } 3061 return V; 3062 } 3063 3064 void llvm::GetUnderlyingObjects(Value *V, SmallVectorImpl<Value *> &Objects, 3065 const DataLayout &DL, LoopInfo *LI, 3066 unsigned MaxLookup) { 3067 SmallPtrSet<Value *, 4> Visited; 3068 SmallVector<Value *, 4> Worklist; 3069 Worklist.push_back(V); 3070 do { 3071 Value *P = Worklist.pop_back_val(); 3072 P = GetUnderlyingObject(P, DL, MaxLookup); 3073 3074 if (!Visited.insert(P).second) 3075 continue; 3076 3077 if (SelectInst *SI = dyn_cast<SelectInst>(P)) { 3078 Worklist.push_back(SI->getTrueValue()); 3079 Worklist.push_back(SI->getFalseValue()); 3080 continue; 3081 } 3082 3083 if (PHINode *PN = dyn_cast<PHINode>(P)) { 3084 // If this PHI changes the underlying object in every iteration of the 3085 // loop, don't look through it. Consider: 3086 // int **A; 3087 // for (i) { 3088 // Prev = Curr; // Prev = PHI (Prev_0, Curr) 3089 // Curr = A[i]; 3090 // *Prev, *Curr; 3091 // 3092 // Prev is tracking Curr one iteration behind so they refer to different 3093 // underlying objects. 3094 if (!LI || !LI->isLoopHeader(PN->getParent()) || 3095 isSameUnderlyingObjectInLoop(PN, LI)) 3096 for (Value *IncValue : PN->incoming_values()) 3097 Worklist.push_back(IncValue); 3098 continue; 3099 } 3100 3101 Objects.push_back(P); 3102 } while (!Worklist.empty()); 3103 } 3104 3105 /// Return true if the only users of this pointer are lifetime markers. 3106 bool llvm::onlyUsedByLifetimeMarkers(const Value *V) { 3107 for (const User *U : V->users()) { 3108 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U); 3109 if (!II) return false; 3110 3111 if (II->getIntrinsicID() != Intrinsic::lifetime_start && 3112 II->getIntrinsicID() != Intrinsic::lifetime_end) 3113 return false; 3114 } 3115 return true; 3116 } 3117 3118 static bool isDereferenceableFromAttribute(const Value *BV, APInt Offset, 3119 Type *Ty, const DataLayout &DL, 3120 const Instruction *CtxI, 3121 const DominatorTree *DT, 3122 const TargetLibraryInfo *TLI) { 3123 assert(Offset.isNonNegative() && "offset can't be negative"); 3124 assert(Ty->isSized() && "must be sized"); 3125 3126 APInt DerefBytes(Offset.getBitWidth(), 0); 3127 bool CheckForNonNull = false; 3128 if (const Argument *A = dyn_cast<Argument>(BV)) { 3129 DerefBytes = A->getDereferenceableBytes(); 3130 if (!DerefBytes.getBoolValue()) { 3131 DerefBytes = A->getDereferenceableOrNullBytes(); 3132 CheckForNonNull = true; 3133 } 3134 } else if (auto CS = ImmutableCallSite(BV)) { 3135 DerefBytes = CS.getDereferenceableBytes(0); 3136 if (!DerefBytes.getBoolValue()) { 3137 DerefBytes = CS.getDereferenceableOrNullBytes(0); 3138 CheckForNonNull = true; 3139 } 3140 } else if (const LoadInst *LI = dyn_cast<LoadInst>(BV)) { 3141 if (MDNode *MD = LI->getMetadata(LLVMContext::MD_dereferenceable)) { 3142 ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0)); 3143 DerefBytes = CI->getLimitedValue(); 3144 } 3145 if (!DerefBytes.getBoolValue()) { 3146 if (MDNode *MD = 3147 LI->getMetadata(LLVMContext::MD_dereferenceable_or_null)) { 3148 ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0)); 3149 DerefBytes = CI->getLimitedValue(); 3150 } 3151 CheckForNonNull = true; 3152 } 3153 } 3154 3155 if (DerefBytes.getBoolValue()) 3156 if (DerefBytes.uge(Offset + DL.getTypeStoreSize(Ty))) 3157 if (!CheckForNonNull || isKnownNonNullAt(BV, CtxI, DT, TLI)) 3158 return true; 3159 3160 return false; 3161 } 3162 3163 static bool isDereferenceableFromAttribute(const Value *V, const DataLayout &DL, 3164 const Instruction *CtxI, 3165 const DominatorTree *DT, 3166 const TargetLibraryInfo *TLI) { 3167 Type *VTy = V->getType(); 3168 Type *Ty = VTy->getPointerElementType(); 3169 if (!Ty->isSized()) 3170 return false; 3171 3172 APInt Offset(DL.getTypeStoreSizeInBits(VTy), 0); 3173 return isDereferenceableFromAttribute(V, Offset, Ty, DL, CtxI, DT, TLI); 3174 } 3175 3176 static bool isAligned(const Value *Base, APInt Offset, unsigned Align, 3177 const DataLayout &DL) { 3178 APInt BaseAlign(Offset.getBitWidth(), getAlignment(Base, DL)); 3179 3180 if (!BaseAlign) { 3181 Type *Ty = Base->getType()->getPointerElementType(); 3182 if (!Ty->isSized()) 3183 return false; 3184 BaseAlign = DL.getABITypeAlignment(Ty); 3185 } 3186 3187 APInt Alignment(Offset.getBitWidth(), Align); 3188 3189 assert(Alignment.isPowerOf2() && "must be a power of 2!"); 3190 return BaseAlign.uge(Alignment) && !(Offset & (Alignment-1)); 3191 } 3192 3193 static bool isAligned(const Value *Base, unsigned Align, const DataLayout &DL) { 3194 Type *Ty = Base->getType(); 3195 assert(Ty->isSized() && "must be sized"); 3196 APInt Offset(DL.getTypeStoreSizeInBits(Ty), 0); 3197 return isAligned(Base, Offset, Align, DL); 3198 } 3199 3200 /// Test if V is always a pointer to allocated and suitably aligned memory for 3201 /// a simple load or store. 3202 static bool isDereferenceableAndAlignedPointer( 3203 const Value *V, unsigned Align, const DataLayout &DL, 3204 const Instruction *CtxI, const DominatorTree *DT, 3205 const TargetLibraryInfo *TLI, SmallPtrSetImpl<const Value *> &Visited) { 3206 // Note that it is not safe to speculate into a malloc'd region because 3207 // malloc may return null. 3208 3209 // These are obviously ok if aligned. 3210 if (isa<AllocaInst>(V)) 3211 return isAligned(V, Align, DL); 3212 3213 // It's not always safe to follow a bitcast, for example: 3214 // bitcast i8* (alloca i8) to i32* 3215 // would result in a 4-byte load from a 1-byte alloca. However, 3216 // if we're casting from a pointer from a type of larger size 3217 // to a type of smaller size (or the same size), and the alignment 3218 // is at least as large as for the resulting pointer type, then 3219 // we can look through the bitcast. 3220 if (const BitCastOperator *BC = dyn_cast<BitCastOperator>(V)) { 3221 Type *STy = BC->getSrcTy()->getPointerElementType(), 3222 *DTy = BC->getDestTy()->getPointerElementType(); 3223 if (STy->isSized() && DTy->isSized() && 3224 (DL.getTypeStoreSize(STy) >= DL.getTypeStoreSize(DTy)) && 3225 (DL.getABITypeAlignment(STy) >= DL.getABITypeAlignment(DTy))) 3226 return isDereferenceableAndAlignedPointer(BC->getOperand(0), Align, DL, 3227 CtxI, DT, TLI, Visited); 3228 } 3229 3230 // Global variables which can't collapse to null are ok. 3231 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) 3232 if (!GV->hasExternalWeakLinkage()) 3233 return isAligned(V, Align, DL); 3234 3235 // byval arguments are okay. 3236 if (const Argument *A = dyn_cast<Argument>(V)) 3237 if (A->hasByValAttr()) 3238 return isAligned(V, Align, DL); 3239 3240 if (isDereferenceableFromAttribute(V, DL, CtxI, DT, TLI)) 3241 return isAligned(V, Align, DL); 3242 3243 // For GEPs, determine if the indexing lands within the allocated object. 3244 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 3245 Type *VTy = GEP->getType(); 3246 Type *Ty = VTy->getPointerElementType(); 3247 const Value *Base = GEP->getPointerOperand(); 3248 3249 // Conservatively require that the base pointer be fully dereferenceable 3250 // and aligned. 3251 if (!Visited.insert(Base).second) 3252 return false; 3253 if (!isDereferenceableAndAlignedPointer(Base, Align, DL, CtxI, DT, TLI, 3254 Visited)) 3255 return false; 3256 3257 APInt Offset(DL.getPointerTypeSizeInBits(VTy), 0); 3258 if (!GEP->accumulateConstantOffset(DL, Offset)) 3259 return false; 3260 3261 // Check if the load is within the bounds of the underlying object 3262 // and offset is aligned. 3263 uint64_t LoadSize = DL.getTypeStoreSize(Ty); 3264 Type *BaseType = Base->getType()->getPointerElementType(); 3265 assert(isPowerOf2_32(Align) && "must be a power of 2!"); 3266 return (Offset + LoadSize).ule(DL.getTypeAllocSize(BaseType)) && 3267 !(Offset & APInt(Offset.getBitWidth(), Align-1)); 3268 } 3269 3270 // For gc.relocate, look through relocations 3271 if (const IntrinsicInst *I = dyn_cast<IntrinsicInst>(V)) 3272 if (I->getIntrinsicID() == Intrinsic::experimental_gc_relocate) { 3273 GCRelocateOperands RelocateInst(I); 3274 return isDereferenceableAndAlignedPointer( 3275 RelocateInst.getDerivedPtr(), Align, DL, CtxI, DT, TLI, Visited); 3276 } 3277 3278 if (const AddrSpaceCastInst *ASC = dyn_cast<AddrSpaceCastInst>(V)) 3279 return isDereferenceableAndAlignedPointer(ASC->getOperand(0), Align, DL, 3280 CtxI, DT, TLI, Visited); 3281 3282 // If we don't know, assume the worst. 3283 return false; 3284 } 3285 3286 bool llvm::isDereferenceableAndAlignedPointer(const Value *V, unsigned Align, 3287 const DataLayout &DL, 3288 const Instruction *CtxI, 3289 const DominatorTree *DT, 3290 const TargetLibraryInfo *TLI) { 3291 // When dereferenceability information is provided by a dereferenceable 3292 // attribute, we know exactly how many bytes are dereferenceable. If we can 3293 // determine the exact offset to the attributed variable, we can use that 3294 // information here. 3295 Type *VTy = V->getType(); 3296 Type *Ty = VTy->getPointerElementType(); 3297 3298 // Require ABI alignment for loads without alignment specification 3299 if (Align == 0) 3300 Align = DL.getABITypeAlignment(Ty); 3301 3302 if (Ty->isSized()) { 3303 APInt Offset(DL.getTypeStoreSizeInBits(VTy), 0); 3304 const Value *BV = V->stripAndAccumulateInBoundsConstantOffsets(DL, Offset); 3305 3306 if (Offset.isNonNegative()) 3307 if (isDereferenceableFromAttribute(BV, Offset, Ty, DL, CtxI, DT, TLI) && 3308 isAligned(BV, Offset, Align, DL)) 3309 return true; 3310 } 3311 3312 SmallPtrSet<const Value *, 32> Visited; 3313 return ::isDereferenceableAndAlignedPointer(V, Align, DL, CtxI, DT, TLI, 3314 Visited); 3315 } 3316 3317 bool llvm::isDereferenceablePointer(const Value *V, const DataLayout &DL, 3318 const Instruction *CtxI, 3319 const DominatorTree *DT, 3320 const TargetLibraryInfo *TLI) { 3321 return isDereferenceableAndAlignedPointer(V, 1, DL, CtxI, DT, TLI); 3322 } 3323 3324 bool llvm::isSafeToSpeculativelyExecute(const Value *V, 3325 const Instruction *CtxI, 3326 const DominatorTree *DT, 3327 const TargetLibraryInfo *TLI) { 3328 const Operator *Inst = dyn_cast<Operator>(V); 3329 if (!Inst) 3330 return false; 3331 3332 for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i) 3333 if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i))) 3334 if (C->canTrap()) 3335 return false; 3336 3337 switch (Inst->getOpcode()) { 3338 default: 3339 return true; 3340 case Instruction::UDiv: 3341 case Instruction::URem: { 3342 // x / y is undefined if y == 0. 3343 const APInt *V; 3344 if (match(Inst->getOperand(1), m_APInt(V))) 3345 return *V != 0; 3346 return false; 3347 } 3348 case Instruction::SDiv: 3349 case Instruction::SRem: { 3350 // x / y is undefined if y == 0 or x == INT_MIN and y == -1 3351 const APInt *Numerator, *Denominator; 3352 if (!match(Inst->getOperand(1), m_APInt(Denominator))) 3353 return false; 3354 // We cannot hoist this division if the denominator is 0. 3355 if (*Denominator == 0) 3356 return false; 3357 // It's safe to hoist if the denominator is not 0 or -1. 3358 if (*Denominator != -1) 3359 return true; 3360 // At this point we know that the denominator is -1. It is safe to hoist as 3361 // long we know that the numerator is not INT_MIN. 3362 if (match(Inst->getOperand(0), m_APInt(Numerator))) 3363 return !Numerator->isMinSignedValue(); 3364 // The numerator *might* be MinSignedValue. 3365 return false; 3366 } 3367 case Instruction::Load: { 3368 const LoadInst *LI = cast<LoadInst>(Inst); 3369 if (!LI->isUnordered() || 3370 // Speculative load may create a race that did not exist in the source. 3371 LI->getParent()->getParent()->hasFnAttribute( 3372 Attribute::SanitizeThread) || 3373 // Speculative load may load data from dirty regions. 3374 LI->getParent()->getParent()->hasFnAttribute( 3375 Attribute::SanitizeAddress)) 3376 return false; 3377 const DataLayout &DL = LI->getModule()->getDataLayout(); 3378 return isDereferenceableAndAlignedPointer( 3379 LI->getPointerOperand(), LI->getAlignment(), DL, CtxI, DT, TLI); 3380 } 3381 case Instruction::Call: { 3382 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { 3383 switch (II->getIntrinsicID()) { 3384 // These synthetic intrinsics have no side-effects and just mark 3385 // information about their operands. 3386 // FIXME: There are other no-op synthetic instructions that potentially 3387 // should be considered at least *safe* to speculate... 3388 case Intrinsic::dbg_declare: 3389 case Intrinsic::dbg_value: 3390 return true; 3391 3392 case Intrinsic::bswap: 3393 case Intrinsic::ctlz: 3394 case Intrinsic::ctpop: 3395 case Intrinsic::cttz: 3396 case Intrinsic::objectsize: 3397 case Intrinsic::sadd_with_overflow: 3398 case Intrinsic::smul_with_overflow: 3399 case Intrinsic::ssub_with_overflow: 3400 case Intrinsic::uadd_with_overflow: 3401 case Intrinsic::umul_with_overflow: 3402 case Intrinsic::usub_with_overflow: 3403 return true; 3404 // Sqrt should be OK, since the llvm sqrt intrinsic isn't defined to set 3405 // errno like libm sqrt would. 3406 case Intrinsic::sqrt: 3407 case Intrinsic::fma: 3408 case Intrinsic::fmuladd: 3409 case Intrinsic::fabs: 3410 case Intrinsic::minnum: 3411 case Intrinsic::maxnum: 3412 return true; 3413 // TODO: some fp intrinsics are marked as having the same error handling 3414 // as libm. They're safe to speculate when they won't error. 3415 // TODO: are convert_{from,to}_fp16 safe? 3416 // TODO: can we list target-specific intrinsics here? 3417 default: break; 3418 } 3419 } 3420 return false; // The called function could have undefined behavior or 3421 // side-effects, even if marked readnone nounwind. 3422 } 3423 case Instruction::VAArg: 3424 case Instruction::Alloca: 3425 case Instruction::Invoke: 3426 case Instruction::PHI: 3427 case Instruction::Store: 3428 case Instruction::Ret: 3429 case Instruction::Br: 3430 case Instruction::IndirectBr: 3431 case Instruction::Switch: 3432 case Instruction::Unreachable: 3433 case Instruction::Fence: 3434 case Instruction::AtomicRMW: 3435 case Instruction::AtomicCmpXchg: 3436 case Instruction::LandingPad: 3437 case Instruction::Resume: 3438 case Instruction::CatchSwitch: 3439 case Instruction::CatchPad: 3440 case Instruction::CatchRet: 3441 case Instruction::CleanupPad: 3442 case Instruction::CleanupRet: 3443 return false; // Misc instructions which have effects 3444 } 3445 } 3446 3447 bool llvm::mayBeMemoryDependent(const Instruction &I) { 3448 return I.mayReadOrWriteMemory() || !isSafeToSpeculativelyExecute(&I); 3449 } 3450 3451 /// Return true if we know that the specified value is never null. 3452 bool llvm::isKnownNonNull(const Value *V, const TargetLibraryInfo *TLI) { 3453 assert(V->getType()->isPointerTy() && "V must be pointer type"); 3454 3455 // Alloca never returns null, malloc might. 3456 if (isa<AllocaInst>(V)) return true; 3457 3458 // A byval, inalloca, or nonnull argument is never null. 3459 if (const Argument *A = dyn_cast<Argument>(V)) 3460 return A->hasByValOrInAllocaAttr() || A->hasNonNullAttr(); 3461 3462 // A global variable in address space 0 is non null unless extern weak. 3463 // Other address spaces may have null as a valid address for a global, 3464 // so we can't assume anything. 3465 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) 3466 return !GV->hasExternalWeakLinkage() && 3467 GV->getType()->getAddressSpace() == 0; 3468 3469 // A Load tagged w/nonnull metadata is never null. 3470 if (const LoadInst *LI = dyn_cast<LoadInst>(V)) 3471 return LI->getMetadata(LLVMContext::MD_nonnull); 3472 3473 if (auto CS = ImmutableCallSite(V)) 3474 if (CS.isReturnNonNull()) 3475 return true; 3476 3477 // operator new never returns null. 3478 if (isOperatorNewLikeFn(V, TLI, /*LookThroughBitCast=*/true)) 3479 return true; 3480 3481 return false; 3482 } 3483 3484 static bool isKnownNonNullFromDominatingCondition(const Value *V, 3485 const Instruction *CtxI, 3486 const DominatorTree *DT) { 3487 assert(V->getType()->isPointerTy() && "V must be pointer type"); 3488 3489 unsigned NumUsesExplored = 0; 3490 for (auto U : V->users()) { 3491 // Avoid massive lists 3492 if (NumUsesExplored >= DomConditionsMaxUses) 3493 break; 3494 NumUsesExplored++; 3495 // Consider only compare instructions uniquely controlling a branch 3496 const ICmpInst *Cmp = dyn_cast<ICmpInst>(U); 3497 if (!Cmp) 3498 continue; 3499 3500 if (DomConditionsSingleCmpUse && !Cmp->hasOneUse()) 3501 continue; 3502 3503 for (auto *CmpU : Cmp->users()) { 3504 const BranchInst *BI = dyn_cast<BranchInst>(CmpU); 3505 if (!BI) 3506 continue; 3507 3508 assert(BI->isConditional() && "uses a comparison!"); 3509 3510 BasicBlock *NonNullSuccessor = nullptr; 3511 CmpInst::Predicate Pred; 3512 3513 if (match(const_cast<ICmpInst*>(Cmp), 3514 m_c_ICmp(Pred, m_Specific(V), m_Zero()))) { 3515 if (Pred == ICmpInst::ICMP_EQ) 3516 NonNullSuccessor = BI->getSuccessor(1); 3517 else if (Pred == ICmpInst::ICMP_NE) 3518 NonNullSuccessor = BI->getSuccessor(0); 3519 } 3520 3521 if (NonNullSuccessor) { 3522 BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor); 3523 if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent())) 3524 return true; 3525 } 3526 } 3527 } 3528 3529 return false; 3530 } 3531 3532 bool llvm::isKnownNonNullAt(const Value *V, const Instruction *CtxI, 3533 const DominatorTree *DT, const TargetLibraryInfo *TLI) { 3534 if (isKnownNonNull(V, TLI)) 3535 return true; 3536 3537 return CtxI ? ::isKnownNonNullFromDominatingCondition(V, CtxI, DT) : false; 3538 } 3539 3540 OverflowResult llvm::computeOverflowForUnsignedMul(Value *LHS, Value *RHS, 3541 const DataLayout &DL, 3542 AssumptionCache *AC, 3543 const Instruction *CxtI, 3544 const DominatorTree *DT) { 3545 // Multiplying n * m significant bits yields a result of n + m significant 3546 // bits. If the total number of significant bits does not exceed the 3547 // result bit width (minus 1), there is no overflow. 3548 // This means if we have enough leading zero bits in the operands 3549 // we can guarantee that the result does not overflow. 3550 // Ref: "Hacker's Delight" by Henry Warren 3551 unsigned BitWidth = LHS->getType()->getScalarSizeInBits(); 3552 APInt LHSKnownZero(BitWidth, 0); 3553 APInt LHSKnownOne(BitWidth, 0); 3554 APInt RHSKnownZero(BitWidth, 0); 3555 APInt RHSKnownOne(BitWidth, 0); 3556 computeKnownBits(LHS, LHSKnownZero, LHSKnownOne, DL, /*Depth=*/0, AC, CxtI, 3557 DT); 3558 computeKnownBits(RHS, RHSKnownZero, RHSKnownOne, DL, /*Depth=*/0, AC, CxtI, 3559 DT); 3560 // Note that underestimating the number of zero bits gives a more 3561 // conservative answer. 3562 unsigned ZeroBits = LHSKnownZero.countLeadingOnes() + 3563 RHSKnownZero.countLeadingOnes(); 3564 // First handle the easy case: if we have enough zero bits there's 3565 // definitely no overflow. 3566 if (ZeroBits >= BitWidth) 3567 return OverflowResult::NeverOverflows; 3568 3569 // Get the largest possible values for each operand. 3570 APInt LHSMax = ~LHSKnownZero; 3571 APInt RHSMax = ~RHSKnownZero; 3572 3573 // We know the multiply operation doesn't overflow if the maximum values for 3574 // each operand will not overflow after we multiply them together. 3575 bool MaxOverflow; 3576 LHSMax.umul_ov(RHSMax, MaxOverflow); 3577 if (!MaxOverflow) 3578 return OverflowResult::NeverOverflows; 3579 3580 // We know it always overflows if multiplying the smallest possible values for 3581 // the operands also results in overflow. 3582 bool MinOverflow; 3583 LHSKnownOne.umul_ov(RHSKnownOne, MinOverflow); 3584 if (MinOverflow) 3585 return OverflowResult::AlwaysOverflows; 3586 3587 return OverflowResult::MayOverflow; 3588 } 3589 3590 OverflowResult llvm::computeOverflowForUnsignedAdd(Value *LHS, Value *RHS, 3591 const DataLayout &DL, 3592 AssumptionCache *AC, 3593 const Instruction *CxtI, 3594 const DominatorTree *DT) { 3595 bool LHSKnownNonNegative, LHSKnownNegative; 3596 ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, DL, /*Depth=*/0, 3597 AC, CxtI, DT); 3598 if (LHSKnownNonNegative || LHSKnownNegative) { 3599 bool RHSKnownNonNegative, RHSKnownNegative; 3600 ComputeSignBit(RHS, RHSKnownNonNegative, RHSKnownNegative, DL, /*Depth=*/0, 3601 AC, CxtI, DT); 3602 3603 if (LHSKnownNegative && RHSKnownNegative) { 3604 // The sign bit is set in both cases: this MUST overflow. 3605 // Create a simple add instruction, and insert it into the struct. 3606 return OverflowResult::AlwaysOverflows; 3607 } 3608 3609 if (LHSKnownNonNegative && RHSKnownNonNegative) { 3610 // The sign bit is clear in both cases: this CANNOT overflow. 3611 // Create a simple add instruction, and insert it into the struct. 3612 return OverflowResult::NeverOverflows; 3613 } 3614 } 3615 3616 return OverflowResult::MayOverflow; 3617 } 3618 3619 static OverflowResult computeOverflowForSignedAdd( 3620 Value *LHS, Value *RHS, AddOperator *Add, const DataLayout &DL, 3621 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT) { 3622 if (Add && Add->hasNoSignedWrap()) { 3623 return OverflowResult::NeverOverflows; 3624 } 3625 3626 bool LHSKnownNonNegative, LHSKnownNegative; 3627 bool RHSKnownNonNegative, RHSKnownNegative; 3628 ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, DL, /*Depth=*/0, 3629 AC, CxtI, DT); 3630 ComputeSignBit(RHS, RHSKnownNonNegative, RHSKnownNegative, DL, /*Depth=*/0, 3631 AC, CxtI, DT); 3632 3633 if ((LHSKnownNonNegative && RHSKnownNegative) || 3634 (LHSKnownNegative && RHSKnownNonNegative)) { 3635 // The sign bits are opposite: this CANNOT overflow. 3636 return OverflowResult::NeverOverflows; 3637 } 3638 3639 // The remaining code needs Add to be available. Early returns if not so. 3640 if (!Add) 3641 return OverflowResult::MayOverflow; 3642 3643 // If the sign of Add is the same as at least one of the operands, this add 3644 // CANNOT overflow. This is particularly useful when the sum is 3645 // @llvm.assume'ed non-negative rather than proved so from analyzing its 3646 // operands. 3647 bool LHSOrRHSKnownNonNegative = 3648 (LHSKnownNonNegative || RHSKnownNonNegative); 3649 bool LHSOrRHSKnownNegative = (LHSKnownNegative || RHSKnownNegative); 3650 if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) { 3651 bool AddKnownNonNegative, AddKnownNegative; 3652 ComputeSignBit(Add, AddKnownNonNegative, AddKnownNegative, DL, 3653 /*Depth=*/0, AC, CxtI, DT); 3654 if ((AddKnownNonNegative && LHSOrRHSKnownNonNegative) || 3655 (AddKnownNegative && LHSOrRHSKnownNegative)) { 3656 return OverflowResult::NeverOverflows; 3657 } 3658 } 3659 3660 return OverflowResult::MayOverflow; 3661 } 3662 3663 OverflowResult llvm::computeOverflowForSignedAdd(AddOperator *Add, 3664 const DataLayout &DL, 3665 AssumptionCache *AC, 3666 const Instruction *CxtI, 3667 const DominatorTree *DT) { 3668 return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1), 3669 Add, DL, AC, CxtI, DT); 3670 } 3671 3672 OverflowResult llvm::computeOverflowForSignedAdd(Value *LHS, Value *RHS, 3673 const DataLayout &DL, 3674 AssumptionCache *AC, 3675 const Instruction *CxtI, 3676 const DominatorTree *DT) { 3677 return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT); 3678 } 3679 3680 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) { 3681 // FIXME: This conservative implementation can be relaxed. E.g. most 3682 // atomic operations are guaranteed to terminate on most platforms 3683 // and most functions terminate. 3684 3685 return !I->isAtomic() && // atomics may never succeed on some platforms 3686 !isa<CallInst>(I) && // could throw and might not terminate 3687 !isa<InvokeInst>(I) && // might not terminate and could throw to 3688 // non-successor (see bug 24185 for details). 3689 !isa<ResumeInst>(I) && // has no successors 3690 !isa<ReturnInst>(I); // has no successors 3691 } 3692 3693 bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I, 3694 const Loop *L) { 3695 // The loop header is guaranteed to be executed for every iteration. 3696 // 3697 // FIXME: Relax this constraint to cover all basic blocks that are 3698 // guaranteed to be executed at every iteration. 3699 if (I->getParent() != L->getHeader()) return false; 3700 3701 for (const Instruction &LI : *L->getHeader()) { 3702 if (&LI == I) return true; 3703 if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false; 3704 } 3705 llvm_unreachable("Instruction not contained in its own parent basic block."); 3706 } 3707 3708 bool llvm::propagatesFullPoison(const Instruction *I) { 3709 switch (I->getOpcode()) { 3710 case Instruction::Add: 3711 case Instruction::Sub: 3712 case Instruction::Xor: 3713 case Instruction::Trunc: 3714 case Instruction::BitCast: 3715 case Instruction::AddrSpaceCast: 3716 // These operations all propagate poison unconditionally. Note that poison 3717 // is not any particular value, so xor or subtraction of poison with 3718 // itself still yields poison, not zero. 3719 return true; 3720 3721 case Instruction::AShr: 3722 case Instruction::SExt: 3723 // For these operations, one bit of the input is replicated across 3724 // multiple output bits. A replicated poison bit is still poison. 3725 return true; 3726 3727 case Instruction::Shl: { 3728 // Left shift *by* a poison value is poison. The number of 3729 // positions to shift is unsigned, so no negative values are 3730 // possible there. Left shift by zero places preserves poison. So 3731 // it only remains to consider left shift of poison by a positive 3732 // number of places. 3733 // 3734 // A left shift by a positive number of places leaves the lowest order bit 3735 // non-poisoned. However, if such a shift has a no-wrap flag, then we can 3736 // make the poison operand violate that flag, yielding a fresh full-poison 3737 // value. 3738 auto *OBO = cast<OverflowingBinaryOperator>(I); 3739 return OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap(); 3740 } 3741 3742 case Instruction::Mul: { 3743 // A multiplication by zero yields a non-poison zero result, so we need to 3744 // rule out zero as an operand. Conservatively, multiplication by a 3745 // non-zero constant is not multiplication by zero. 3746 // 3747 // Multiplication by a non-zero constant can leave some bits 3748 // non-poisoned. For example, a multiplication by 2 leaves the lowest 3749 // order bit unpoisoned. So we need to consider that. 3750 // 3751 // Multiplication by 1 preserves poison. If the multiplication has a 3752 // no-wrap flag, then we can make the poison operand violate that flag 3753 // when multiplied by any integer other than 0 and 1. 3754 auto *OBO = cast<OverflowingBinaryOperator>(I); 3755 if (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) { 3756 for (Value *V : OBO->operands()) { 3757 if (auto *CI = dyn_cast<ConstantInt>(V)) { 3758 // A ConstantInt cannot yield poison, so we can assume that it is 3759 // the other operand that is poison. 3760 return !CI->isZero(); 3761 } 3762 } 3763 } 3764 return false; 3765 } 3766 3767 case Instruction::GetElementPtr: 3768 // A GEP implicitly represents a sequence of additions, subtractions, 3769 // truncations, sign extensions and multiplications. The multiplications 3770 // are by the non-zero sizes of some set of types, so we do not have to be 3771 // concerned with multiplication by zero. If the GEP is in-bounds, then 3772 // these operations are implicitly no-signed-wrap so poison is propagated 3773 // by the arguments above for Add, Sub, Trunc, SExt and Mul. 3774 return cast<GEPOperator>(I)->isInBounds(); 3775 3776 default: 3777 return false; 3778 } 3779 } 3780 3781 const Value *llvm::getGuaranteedNonFullPoisonOp(const Instruction *I) { 3782 switch (I->getOpcode()) { 3783 case Instruction::Store: 3784 return cast<StoreInst>(I)->getPointerOperand(); 3785 3786 case Instruction::Load: 3787 return cast<LoadInst>(I)->getPointerOperand(); 3788 3789 case Instruction::AtomicCmpXchg: 3790 return cast<AtomicCmpXchgInst>(I)->getPointerOperand(); 3791 3792 case Instruction::AtomicRMW: 3793 return cast<AtomicRMWInst>(I)->getPointerOperand(); 3794 3795 case Instruction::UDiv: 3796 case Instruction::SDiv: 3797 case Instruction::URem: 3798 case Instruction::SRem: 3799 return I->getOperand(1); 3800 3801 default: 3802 return nullptr; 3803 } 3804 } 3805 3806 bool llvm::isKnownNotFullPoison(const Instruction *PoisonI) { 3807 // We currently only look for uses of poison values within the same basic 3808 // block, as that makes it easier to guarantee that the uses will be 3809 // executed given that PoisonI is executed. 3810 // 3811 // FIXME: Expand this to consider uses beyond the same basic block. To do 3812 // this, look out for the distinction between post-dominance and strong 3813 // post-dominance. 3814 const BasicBlock *BB = PoisonI->getParent(); 3815 3816 // Set of instructions that we have proved will yield poison if PoisonI 3817 // does. 3818 SmallSet<const Value *, 16> YieldsPoison; 3819 YieldsPoison.insert(PoisonI); 3820 3821 for (BasicBlock::const_iterator I = PoisonI->getIterator(), E = BB->end(); 3822 I != E; ++I) { 3823 if (&*I != PoisonI) { 3824 const Value *NotPoison = getGuaranteedNonFullPoisonOp(&*I); 3825 if (NotPoison != nullptr && YieldsPoison.count(NotPoison)) return true; 3826 if (!isGuaranteedToTransferExecutionToSuccessor(&*I)) 3827 return false; 3828 } 3829 3830 // Mark poison that propagates from I through uses of I. 3831 if (YieldsPoison.count(&*I)) { 3832 for (const User *User : I->users()) { 3833 const Instruction *UserI = cast<Instruction>(User); 3834 if (UserI->getParent() == BB && propagatesFullPoison(UserI)) 3835 YieldsPoison.insert(User); 3836 } 3837 } 3838 } 3839 return false; 3840 } 3841 3842 static bool isKnownNonNaN(Value *V, FastMathFlags FMF) { 3843 if (FMF.noNaNs()) 3844 return true; 3845 3846 if (auto *C = dyn_cast<ConstantFP>(V)) 3847 return !C->isNaN(); 3848 return false; 3849 } 3850 3851 static bool isKnownNonZero(Value *V) { 3852 if (auto *C = dyn_cast<ConstantFP>(V)) 3853 return !C->isZero(); 3854 return false; 3855 } 3856 3857 static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred, 3858 FastMathFlags FMF, 3859 Value *CmpLHS, Value *CmpRHS, 3860 Value *TrueVal, Value *FalseVal, 3861 Value *&LHS, Value *&RHS) { 3862 LHS = CmpLHS; 3863 RHS = CmpRHS; 3864 3865 // If the predicate is an "or-equal" (FP) predicate, then signed zeroes may 3866 // return inconsistent results between implementations. 3867 // (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0 3868 // minNum(0.0, -0.0) // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1) 3869 // Therefore we behave conservatively and only proceed if at least one of the 3870 // operands is known to not be zero, or if we don't care about signed zeroes. 3871 switch (Pred) { 3872 default: break; 3873 case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLE: 3874 case CmpInst::FCMP_UGE: case CmpInst::FCMP_ULE: 3875 if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) && 3876 !isKnownNonZero(CmpRHS)) 3877 return {SPF_UNKNOWN, SPNB_NA, false}; 3878 } 3879 3880 SelectPatternNaNBehavior NaNBehavior = SPNB_NA; 3881 bool Ordered = false; 3882 3883 // When given one NaN and one non-NaN input: 3884 // - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input. 3885 // - A simple C99 (a < b ? a : b) construction will return 'b' (as the 3886 // ordered comparison fails), which could be NaN or non-NaN. 3887 // so here we discover exactly what NaN behavior is required/accepted. 3888 if (CmpInst::isFPPredicate(Pred)) { 3889 bool LHSSafe = isKnownNonNaN(CmpLHS, FMF); 3890 bool RHSSafe = isKnownNonNaN(CmpRHS, FMF); 3891 3892 if (LHSSafe && RHSSafe) { 3893 // Both operands are known non-NaN. 3894 NaNBehavior = SPNB_RETURNS_ANY; 3895 } else if (CmpInst::isOrdered(Pred)) { 3896 // An ordered comparison will return false when given a NaN, so it 3897 // returns the RHS. 3898 Ordered = true; 3899 if (LHSSafe) 3900 // LHS is non-NaN, so if RHS is NaN then NaN will be returned. 3901 NaNBehavior = SPNB_RETURNS_NAN; 3902 else if (RHSSafe) 3903 NaNBehavior = SPNB_RETURNS_OTHER; 3904 else 3905 // Completely unsafe. 3906 return {SPF_UNKNOWN, SPNB_NA, false}; 3907 } else { 3908 Ordered = false; 3909 // An unordered comparison will return true when given a NaN, so it 3910 // returns the LHS. 3911 if (LHSSafe) 3912 // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned. 3913 NaNBehavior = SPNB_RETURNS_OTHER; 3914 else if (RHSSafe) 3915 NaNBehavior = SPNB_RETURNS_NAN; 3916 else 3917 // Completely unsafe. 3918 return {SPF_UNKNOWN, SPNB_NA, false}; 3919 } 3920 } 3921 3922 if (TrueVal == CmpRHS && FalseVal == CmpLHS) { 3923 std::swap(CmpLHS, CmpRHS); 3924 Pred = CmpInst::getSwappedPredicate(Pred); 3925 if (NaNBehavior == SPNB_RETURNS_NAN) 3926 NaNBehavior = SPNB_RETURNS_OTHER; 3927 else if (NaNBehavior == SPNB_RETURNS_OTHER) 3928 NaNBehavior = SPNB_RETURNS_NAN; 3929 Ordered = !Ordered; 3930 } 3931 3932 // ([if]cmp X, Y) ? X : Y 3933 if (TrueVal == CmpLHS && FalseVal == CmpRHS) { 3934 switch (Pred) { 3935 default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality. 3936 case ICmpInst::ICMP_UGT: 3937 case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false}; 3938 case ICmpInst::ICMP_SGT: 3939 case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false}; 3940 case ICmpInst::ICMP_ULT: 3941 case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false}; 3942 case ICmpInst::ICMP_SLT: 3943 case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false}; 3944 case FCmpInst::FCMP_UGT: 3945 case FCmpInst::FCMP_UGE: 3946 case FCmpInst::FCMP_OGT: 3947 case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered}; 3948 case FCmpInst::FCMP_ULT: 3949 case FCmpInst::FCMP_ULE: 3950 case FCmpInst::FCMP_OLT: 3951 case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered}; 3952 } 3953 } 3954 3955 if (ConstantInt *C1 = dyn_cast<ConstantInt>(CmpRHS)) { 3956 if ((CmpLHS == TrueVal && match(FalseVal, m_Neg(m_Specific(CmpLHS)))) || 3957 (CmpLHS == FalseVal && match(TrueVal, m_Neg(m_Specific(CmpLHS))))) { 3958 3959 // ABS(X) ==> (X >s 0) ? X : -X and (X >s -1) ? X : -X 3960 // NABS(X) ==> (X >s 0) ? -X : X and (X >s -1) ? -X : X 3961 if (Pred == ICmpInst::ICMP_SGT && (C1->isZero() || C1->isMinusOne())) { 3962 return {(CmpLHS == TrueVal) ? SPF_ABS : SPF_NABS, SPNB_NA, false}; 3963 } 3964 3965 // ABS(X) ==> (X <s 0) ? -X : X and (X <s 1) ? -X : X 3966 // NABS(X) ==> (X <s 0) ? X : -X and (X <s 1) ? X : -X 3967 if (Pred == ICmpInst::ICMP_SLT && (C1->isZero() || C1->isOne())) { 3968 return {(CmpLHS == FalseVal) ? SPF_ABS : SPF_NABS, SPNB_NA, false}; 3969 } 3970 } 3971 3972 // Y >s C ? ~Y : ~C == ~Y <s ~C ? ~Y : ~C = SMIN(~Y, ~C) 3973 if (const auto *C2 = dyn_cast<ConstantInt>(FalseVal)) { 3974 if (C1->getType() == C2->getType() && ~C1->getValue() == C2->getValue() && 3975 (match(TrueVal, m_Not(m_Specific(CmpLHS))) || 3976 match(CmpLHS, m_Not(m_Specific(TrueVal))))) { 3977 LHS = TrueVal; 3978 RHS = FalseVal; 3979 return {SPF_SMIN, SPNB_NA, false}; 3980 } 3981 } 3982 } 3983 3984 // TODO: (X > 4) ? X : 5 --> (X >= 5) ? X : 5 --> MAX(X, 5) 3985 3986 return {SPF_UNKNOWN, SPNB_NA, false}; 3987 } 3988 3989 static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2, 3990 Instruction::CastOps *CastOp) { 3991 CastInst *CI = dyn_cast<CastInst>(V1); 3992 Constant *C = dyn_cast<Constant>(V2); 3993 CastInst *CI2 = dyn_cast<CastInst>(V2); 3994 if (!CI) 3995 return nullptr; 3996 *CastOp = CI->getOpcode(); 3997 3998 if (CI2) { 3999 // If V1 and V2 are both the same cast from the same type, we can look 4000 // through V1. 4001 if (CI2->getOpcode() == CI->getOpcode() && 4002 CI2->getSrcTy() == CI->getSrcTy()) 4003 return CI2->getOperand(0); 4004 return nullptr; 4005 } else if (!C) { 4006 return nullptr; 4007 } 4008 4009 if (isa<SExtInst>(CI) && CmpI->isSigned()) { 4010 Constant *T = ConstantExpr::getTrunc(C, CI->getSrcTy()); 4011 // This is only valid if the truncated value can be sign-extended 4012 // back to the original value. 4013 if (ConstantExpr::getSExt(T, C->getType()) == C) 4014 return T; 4015 return nullptr; 4016 } 4017 if (isa<ZExtInst>(CI) && CmpI->isUnsigned()) 4018 return ConstantExpr::getTrunc(C, CI->getSrcTy()); 4019 4020 if (isa<TruncInst>(CI)) 4021 return ConstantExpr::getIntegerCast(C, CI->getSrcTy(), CmpI->isSigned()); 4022 4023 if (isa<FPToUIInst>(CI)) 4024 return ConstantExpr::getUIToFP(C, CI->getSrcTy(), true); 4025 4026 if (isa<FPToSIInst>(CI)) 4027 return ConstantExpr::getSIToFP(C, CI->getSrcTy(), true); 4028 4029 if (isa<UIToFPInst>(CI)) 4030 return ConstantExpr::getFPToUI(C, CI->getSrcTy(), true); 4031 4032 if (isa<SIToFPInst>(CI)) 4033 return ConstantExpr::getFPToSI(C, CI->getSrcTy(), true); 4034 4035 if (isa<FPTruncInst>(CI)) 4036 return ConstantExpr::getFPExtend(C, CI->getSrcTy(), true); 4037 4038 if (isa<FPExtInst>(CI)) 4039 return ConstantExpr::getFPTrunc(C, CI->getSrcTy(), true); 4040 4041 return nullptr; 4042 } 4043 4044 SelectPatternResult llvm::matchSelectPattern(Value *V, 4045 Value *&LHS, Value *&RHS, 4046 Instruction::CastOps *CastOp) { 4047 SelectInst *SI = dyn_cast<SelectInst>(V); 4048 if (!SI) return {SPF_UNKNOWN, SPNB_NA, false}; 4049 4050 CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition()); 4051 if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false}; 4052 4053 CmpInst::Predicate Pred = CmpI->getPredicate(); 4054 Value *CmpLHS = CmpI->getOperand(0); 4055 Value *CmpRHS = CmpI->getOperand(1); 4056 Value *TrueVal = SI->getTrueValue(); 4057 Value *FalseVal = SI->getFalseValue(); 4058 FastMathFlags FMF; 4059 if (isa<FPMathOperator>(CmpI)) 4060 FMF = CmpI->getFastMathFlags(); 4061 4062 // Bail out early. 4063 if (CmpI->isEquality()) 4064 return {SPF_UNKNOWN, SPNB_NA, false}; 4065 4066 // Deal with type mismatches. 4067 if (CastOp && CmpLHS->getType() != TrueVal->getType()) { 4068 if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp)) 4069 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, 4070 cast<CastInst>(TrueVal)->getOperand(0), C, 4071 LHS, RHS); 4072 if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp)) 4073 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, 4074 C, cast<CastInst>(FalseVal)->getOperand(0), 4075 LHS, RHS); 4076 } 4077 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal, 4078 LHS, RHS); 4079 } 4080 4081 ConstantRange llvm::getConstantRangeFromMetadata(MDNode &Ranges) { 4082 const unsigned NumRanges = Ranges.getNumOperands() / 2; 4083 assert(NumRanges >= 1 && "Must have at least one range!"); 4084 assert(Ranges.getNumOperands() % 2 == 0 && "Must be a sequence of pairs"); 4085 4086 auto *FirstLow = mdconst::extract<ConstantInt>(Ranges.getOperand(0)); 4087 auto *FirstHigh = mdconst::extract<ConstantInt>(Ranges.getOperand(1)); 4088 4089 ConstantRange CR(FirstLow->getValue(), FirstHigh->getValue()); 4090 4091 for (unsigned i = 1; i < NumRanges; ++i) { 4092 auto *Low = mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0)); 4093 auto *High = mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1)); 4094 4095 // Note: unionWith will potentially create a range that contains values not 4096 // contained in any of the original N ranges. 4097 CR = CR.unionWith(ConstantRange(Low->getValue(), High->getValue())); 4098 } 4099 4100 return CR; 4101 } 4102 4103 /// Return true if "icmp Pred LHS RHS" is always true. 4104 static bool isTruePredicate(CmpInst::Predicate Pred, Value *LHS, Value *RHS, 4105 const DataLayout &DL, unsigned Depth, 4106 AssumptionCache *AC, const Instruction *CxtI, 4107 const DominatorTree *DT) { 4108 assert(!LHS->getType()->isVectorTy() && "TODO: extend to handle vectors!"); 4109 if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS) 4110 return true; 4111 4112 switch (Pred) { 4113 default: 4114 return false; 4115 4116 case CmpInst::ICMP_SLE: { 4117 const APInt *C; 4118 4119 // LHS s<= LHS +_{nsw} C if C >= 0 4120 if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C)))) 4121 return !C->isNegative(); 4122 return false; 4123 } 4124 4125 case CmpInst::ICMP_ULE: { 4126 const APInt *C; 4127 4128 // LHS u<= LHS +_{nuw} C for any C 4129 if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C)))) 4130 return true; 4131 4132 // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB) 4133 auto MatchNUWAddsToSameValue = [&](Value *A, Value *B, Value *&X, 4134 const APInt *&CA, const APInt *&CB) { 4135 if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) && 4136 match(B, m_NUWAdd(m_Specific(X), m_APInt(CB)))) 4137 return true; 4138 4139 // If X & C == 0 then (X | C) == X +_{nuw} C 4140 if (match(A, m_Or(m_Value(X), m_APInt(CA))) && 4141 match(B, m_Or(m_Specific(X), m_APInt(CB)))) { 4142 unsigned BitWidth = CA->getBitWidth(); 4143 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0); 4144 computeKnownBits(X, KnownZero, KnownOne, DL, Depth + 1, AC, CxtI, DT); 4145 4146 if ((KnownZero & *CA) == *CA && (KnownZero & *CB) == *CB) 4147 return true; 4148 } 4149 4150 return false; 4151 }; 4152 4153 Value *X; 4154 const APInt *CLHS, *CRHS; 4155 if (MatchNUWAddsToSameValue(LHS, RHS, X, CLHS, CRHS)) 4156 return CLHS->ule(*CRHS); 4157 4158 return false; 4159 } 4160 } 4161 } 4162 4163 /// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred 4164 /// ALHS ARHS" is true. 4165 static bool isImpliedCondOperands(CmpInst::Predicate Pred, Value *ALHS, 4166 Value *ARHS, Value *BLHS, Value *BRHS, 4167 const DataLayout &DL, unsigned Depth, 4168 AssumptionCache *AC, const Instruction *CxtI, 4169 const DominatorTree *DT) { 4170 switch (Pred) { 4171 default: 4172 return false; 4173 4174 case CmpInst::ICMP_SLT: 4175 case CmpInst::ICMP_SLE: 4176 return isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth, AC, CxtI, 4177 DT) && 4178 isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth, AC, CxtI, 4179 DT); 4180 4181 case CmpInst::ICMP_ULT: 4182 case CmpInst::ICMP_ULE: 4183 return isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth, AC, CxtI, 4184 DT) && 4185 isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth, AC, CxtI, 4186 DT); 4187 } 4188 } 4189 4190 bool llvm::isImpliedCondition(Value *LHS, Value *RHS, const DataLayout &DL, 4191 unsigned Depth, AssumptionCache *AC, 4192 const Instruction *CxtI, 4193 const DominatorTree *DT) { 4194 assert(LHS->getType() == RHS->getType() && "mismatched type"); 4195 Type *OpTy = LHS->getType(); 4196 assert(OpTy->getScalarType()->isIntegerTy(1)); 4197 4198 // LHS ==> RHS by definition 4199 if (LHS == RHS) return true; 4200 4201 if (OpTy->isVectorTy()) 4202 // TODO: extending the code below to handle vectors 4203 return false; 4204 assert(OpTy->isIntegerTy(1) && "implied by above"); 4205 4206 ICmpInst::Predicate APred, BPred; 4207 Value *ALHS, *ARHS; 4208 Value *BLHS, *BRHS; 4209 4210 if (!match(LHS, m_ICmp(APred, m_Value(ALHS), m_Value(ARHS))) || 4211 !match(RHS, m_ICmp(BPred, m_Value(BLHS), m_Value(BRHS)))) 4212 return false; 4213 4214 if (APred == BPred) 4215 return isImpliedCondOperands(APred, ALHS, ARHS, BLHS, BRHS, DL, Depth, AC, 4216 CxtI, DT); 4217 4218 return false; 4219 } 4220