Home | History | Annotate | Download | only in Analysis
      1 //===- ValueTracking.cpp - Walk computations to compute properties --------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file contains routines that help analyze properties that chains of
     11 // computations have.
     12 //
     13 //===----------------------------------------------------------------------===//
     14 
     15 #include "llvm/Analysis/ValueTracking.h"
     16 #include "llvm/ADT/SmallPtrSet.h"
     17 #include "llvm/Analysis/AssumptionCache.h"
     18 #include "llvm/Analysis/InstructionSimplify.h"
     19 #include "llvm/Analysis/MemoryBuiltins.h"
     20 #include "llvm/IR/CallSite.h"
     21 #include "llvm/IR/ConstantRange.h"
     22 #include "llvm/IR/Constants.h"
     23 #include "llvm/IR/DataLayout.h"
     24 #include "llvm/IR/Dominators.h"
     25 #include "llvm/IR/GetElementPtrTypeIterator.h"
     26 #include "llvm/IR/GlobalAlias.h"
     27 #include "llvm/IR/GlobalVariable.h"
     28 #include "llvm/IR/Instructions.h"
     29 #include "llvm/IR/IntrinsicInst.h"
     30 #include "llvm/IR/LLVMContext.h"
     31 #include "llvm/IR/Metadata.h"
     32 #include "llvm/IR/Operator.h"
     33 #include "llvm/IR/PatternMatch.h"
     34 #include "llvm/Support/Debug.h"
     35 #include "llvm/Support/MathExtras.h"
     36 #include <cstring>
     37 using namespace llvm;
     38 using namespace llvm::PatternMatch;
     39 
     40 const unsigned MaxDepth = 6;
     41 
     42 /// Enable an experimental feature to leverage information about dominating
     43 /// conditions to compute known bits.  The individual options below control how
     44 /// hard we search.  The defaults are choosen to be fairly aggressive.  If you
     45 /// run into compile time problems when testing, scale them back and report
     46 /// your findings.
     47 static cl::opt<bool> EnableDomConditions("value-tracking-dom-conditions",
     48                                          cl::Hidden, cl::init(false));
     49 
     50 // This is expensive, so we only do it for the top level query value.
     51 // (TODO: evaluate cost vs profit, consider higher thresholds)
     52 static cl::opt<unsigned> DomConditionsMaxDepth("dom-conditions-max-depth",
     53                                                cl::Hidden, cl::init(1));
     54 
     55 /// How many dominating blocks should be scanned looking for dominating
     56 /// conditions?
     57 static cl::opt<unsigned> DomConditionsMaxDomBlocks("dom-conditions-dom-blocks",
     58                                                    cl::Hidden,
     59                                                    cl::init(20000));
     60 
     61 // Controls the number of uses of the value searched for possible
     62 // dominating comparisons.
     63 static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses",
     64                                               cl::Hidden, cl::init(2000));
     65 
     66 // If true, don't consider only compares whose only use is a branch.
     67 static cl::opt<bool> DomConditionsSingleCmpUse("dom-conditions-single-cmp-use",
     68                                                cl::Hidden, cl::init(false));
     69 
     70 /// Returns the bitwidth of the given scalar or pointer type (if unknown returns
     71 /// 0). For vector types, returns the element type's bitwidth.
     72 static unsigned getBitWidth(Type *Ty, const DataLayout &DL) {
     73   if (unsigned BitWidth = Ty->getScalarSizeInBits())
     74     return BitWidth;
     75 
     76   return DL.getPointerTypeSizeInBits(Ty);
     77 }
     78 
     79 // Many of these functions have internal versions that take an assumption
     80 // exclusion set. This is because of the potential for mutual recursion to
     81 // cause computeKnownBits to repeatedly visit the same assume intrinsic. The
     82 // classic case of this is assume(x = y), which will attempt to determine
     83 // bits in x from bits in y, which will attempt to determine bits in y from
     84 // bits in x, etc. Regarding the mutual recursion, computeKnownBits can call
     85 // isKnownNonZero, which calls computeKnownBits and ComputeSignBit and
     86 // isKnownToBeAPowerOfTwo (all of which can call computeKnownBits), and so on.
     87 typedef SmallPtrSet<const Value *, 8> ExclInvsSet;
     88 
     89 namespace {
     90 // Simplifying using an assume can only be done in a particular control-flow
     91 // context (the context instruction provides that context). If an assume and
     92 // the context instruction are not in the same block then the DT helps in
     93 // figuring out if we can use it.
     94 struct Query {
     95   ExclInvsSet ExclInvs;
     96   AssumptionCache *AC;
     97   const Instruction *CxtI;
     98   const DominatorTree *DT;
     99 
    100   Query(AssumptionCache *AC = nullptr, const Instruction *CxtI = nullptr,
    101         const DominatorTree *DT = nullptr)
    102       : AC(AC), CxtI(CxtI), DT(DT) {}
    103 
    104   Query(const Query &Q, const Value *NewExcl)
    105       : ExclInvs(Q.ExclInvs), AC(Q.AC), CxtI(Q.CxtI), DT(Q.DT) {
    106     ExclInvs.insert(NewExcl);
    107   }
    108 };
    109 } // end anonymous namespace
    110 
    111 // Given the provided Value and, potentially, a context instruction, return
    112 // the preferred context instruction (if any).
    113 static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) {
    114   // If we've been provided with a context instruction, then use that (provided
    115   // it has been inserted).
    116   if (CxtI && CxtI->getParent())
    117     return CxtI;
    118 
    119   // If the value is really an already-inserted instruction, then use that.
    120   CxtI = dyn_cast<Instruction>(V);
    121   if (CxtI && CxtI->getParent())
    122     return CxtI;
    123 
    124   return nullptr;
    125 }
    126 
    127 static void computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne,
    128                              const DataLayout &DL, unsigned Depth,
    129                              const Query &Q);
    130 
    131 void llvm::computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne,
    132                             const DataLayout &DL, unsigned Depth,
    133                             AssumptionCache *AC, const Instruction *CxtI,
    134                             const DominatorTree *DT) {
    135   ::computeKnownBits(V, KnownZero, KnownOne, DL, Depth,
    136                      Query(AC, safeCxtI(V, CxtI), DT));
    137 }
    138 
    139 static void ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne,
    140                            const DataLayout &DL, unsigned Depth,
    141                            const Query &Q);
    142 
    143 void llvm::ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne,
    144                           const DataLayout &DL, unsigned Depth,
    145                           AssumptionCache *AC, const Instruction *CxtI,
    146                           const DominatorTree *DT) {
    147   ::ComputeSignBit(V, KnownZero, KnownOne, DL, Depth,
    148                    Query(AC, safeCxtI(V, CxtI), DT));
    149 }
    150 
    151 static bool isKnownToBeAPowerOfTwo(Value *V, bool OrZero, unsigned Depth,
    152                                    const Query &Q, const DataLayout &DL);
    153 
    154 bool llvm::isKnownToBeAPowerOfTwo(Value *V, const DataLayout &DL, bool OrZero,
    155                                   unsigned Depth, AssumptionCache *AC,
    156                                   const Instruction *CxtI,
    157                                   const DominatorTree *DT) {
    158   return ::isKnownToBeAPowerOfTwo(V, OrZero, Depth,
    159                                   Query(AC, safeCxtI(V, CxtI), DT), DL);
    160 }
    161 
    162 static bool isKnownNonZero(Value *V, const DataLayout &DL, unsigned Depth,
    163                            const Query &Q);
    164 
    165 bool llvm::isKnownNonZero(Value *V, const DataLayout &DL, unsigned Depth,
    166                           AssumptionCache *AC, const Instruction *CxtI,
    167                           const DominatorTree *DT) {
    168   return ::isKnownNonZero(V, DL, Depth, Query(AC, safeCxtI(V, CxtI), DT));
    169 }
    170 
    171 static bool MaskedValueIsZero(Value *V, const APInt &Mask, const DataLayout &DL,
    172                               unsigned Depth, const Query &Q);
    173 
    174 bool llvm::MaskedValueIsZero(Value *V, const APInt &Mask, const DataLayout &DL,
    175                              unsigned Depth, AssumptionCache *AC,
    176                              const Instruction *CxtI, const DominatorTree *DT) {
    177   return ::MaskedValueIsZero(V, Mask, DL, Depth,
    178                              Query(AC, safeCxtI(V, CxtI), DT));
    179 }
    180 
    181 static unsigned ComputeNumSignBits(Value *V, const DataLayout &DL,
    182                                    unsigned Depth, const Query &Q);
    183 
    184 unsigned llvm::ComputeNumSignBits(Value *V, const DataLayout &DL,
    185                                   unsigned Depth, AssumptionCache *AC,
    186                                   const Instruction *CxtI,
    187                                   const DominatorTree *DT) {
    188   return ::ComputeNumSignBits(V, DL, Depth, Query(AC, safeCxtI(V, CxtI), DT));
    189 }
    190 
    191 static void computeKnownBitsAddSub(bool Add, Value *Op0, Value *Op1, bool NSW,
    192                                    APInt &KnownZero, APInt &KnownOne,
    193                                    APInt &KnownZero2, APInt &KnownOne2,
    194                                    const DataLayout &DL, unsigned Depth,
    195                                    const Query &Q) {
    196   if (!Add) {
    197     if (ConstantInt *CLHS = dyn_cast<ConstantInt>(Op0)) {
    198       // We know that the top bits of C-X are clear if X contains less bits
    199       // than C (i.e. no wrap-around can happen).  For example, 20-X is
    200       // positive if we can prove that X is >= 0 and < 16.
    201       if (!CLHS->getValue().isNegative()) {
    202         unsigned BitWidth = KnownZero.getBitWidth();
    203         unsigned NLZ = (CLHS->getValue()+1).countLeadingZeros();
    204         // NLZ can't be BitWidth with no sign bit
    205         APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1);
    206         computeKnownBits(Op1, KnownZero2, KnownOne2, DL, Depth + 1, Q);
    207 
    208         // If all of the MaskV bits are known to be zero, then we know the
    209         // output top bits are zero, because we now know that the output is
    210         // from [0-C].
    211         if ((KnownZero2 & MaskV) == MaskV) {
    212           unsigned NLZ2 = CLHS->getValue().countLeadingZeros();
    213           // Top bits known zero.
    214           KnownZero = APInt::getHighBitsSet(BitWidth, NLZ2);
    215         }
    216       }
    217     }
    218   }
    219 
    220   unsigned BitWidth = KnownZero.getBitWidth();
    221 
    222   // If an initial sequence of bits in the result is not needed, the
    223   // corresponding bits in the operands are not needed.
    224   APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0);
    225   computeKnownBits(Op0, LHSKnownZero, LHSKnownOne, DL, Depth + 1, Q);
    226   computeKnownBits(Op1, KnownZero2, KnownOne2, DL, Depth + 1, Q);
    227 
    228   // Carry in a 1 for a subtract, rather than a 0.
    229   APInt CarryIn(BitWidth, 0);
    230   if (!Add) {
    231     // Sum = LHS + ~RHS + 1
    232     std::swap(KnownZero2, KnownOne2);
    233     CarryIn.setBit(0);
    234   }
    235 
    236   APInt PossibleSumZero = ~LHSKnownZero + ~KnownZero2 + CarryIn;
    237   APInt PossibleSumOne = LHSKnownOne + KnownOne2 + CarryIn;
    238 
    239   // Compute known bits of the carry.
    240   APInt CarryKnownZero = ~(PossibleSumZero ^ LHSKnownZero ^ KnownZero2);
    241   APInt CarryKnownOne = PossibleSumOne ^ LHSKnownOne ^ KnownOne2;
    242 
    243   // Compute set of known bits (where all three relevant bits are known).
    244   APInt LHSKnown = LHSKnownZero | LHSKnownOne;
    245   APInt RHSKnown = KnownZero2 | KnownOne2;
    246   APInt CarryKnown = CarryKnownZero | CarryKnownOne;
    247   APInt Known = LHSKnown & RHSKnown & CarryKnown;
    248 
    249   assert((PossibleSumZero & Known) == (PossibleSumOne & Known) &&
    250          "known bits of sum differ");
    251 
    252   // Compute known bits of the result.
    253   KnownZero = ~PossibleSumOne & Known;
    254   KnownOne = PossibleSumOne & Known;
    255 
    256   // Are we still trying to solve for the sign bit?
    257   if (!Known.isNegative()) {
    258     if (NSW) {
    259       // Adding two non-negative numbers, or subtracting a negative number from
    260       // a non-negative one, can't wrap into negative.
    261       if (LHSKnownZero.isNegative() && KnownZero2.isNegative())
    262         KnownZero |= APInt::getSignBit(BitWidth);
    263       // Adding two negative numbers, or subtracting a non-negative number from
    264       // a negative one, can't wrap into non-negative.
    265       else if (LHSKnownOne.isNegative() && KnownOne2.isNegative())
    266         KnownOne |= APInt::getSignBit(BitWidth);
    267     }
    268   }
    269 }
    270 
    271 static void computeKnownBitsMul(Value *Op0, Value *Op1, bool NSW,
    272                                 APInt &KnownZero, APInt &KnownOne,
    273                                 APInt &KnownZero2, APInt &KnownOne2,
    274                                 const DataLayout &DL, unsigned Depth,
    275                                 const Query &Q) {
    276   unsigned BitWidth = KnownZero.getBitWidth();
    277   computeKnownBits(Op1, KnownZero, KnownOne, DL, Depth + 1, Q);
    278   computeKnownBits(Op0, KnownZero2, KnownOne2, DL, Depth + 1, Q);
    279 
    280   bool isKnownNegative = false;
    281   bool isKnownNonNegative = false;
    282   // If the multiplication is known not to overflow, compute the sign bit.
    283   if (NSW) {
    284     if (Op0 == Op1) {
    285       // The product of a number with itself is non-negative.
    286       isKnownNonNegative = true;
    287     } else {
    288       bool isKnownNonNegativeOp1 = KnownZero.isNegative();
    289       bool isKnownNonNegativeOp0 = KnownZero2.isNegative();
    290       bool isKnownNegativeOp1 = KnownOne.isNegative();
    291       bool isKnownNegativeOp0 = KnownOne2.isNegative();
    292       // The product of two numbers with the same sign is non-negative.
    293       isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) ||
    294         (isKnownNonNegativeOp1 && isKnownNonNegativeOp0);
    295       // The product of a negative number and a non-negative number is either
    296       // negative or zero.
    297       if (!isKnownNonNegative)
    298         isKnownNegative = (isKnownNegativeOp1 && isKnownNonNegativeOp0 &&
    299                            isKnownNonZero(Op0, DL, Depth, Q)) ||
    300                           (isKnownNegativeOp0 && isKnownNonNegativeOp1 &&
    301                            isKnownNonZero(Op1, DL, Depth, Q));
    302     }
    303   }
    304 
    305   // If low bits are zero in either operand, output low known-0 bits.
    306   // Also compute a conserative estimate for high known-0 bits.
    307   // More trickiness is possible, but this is sufficient for the
    308   // interesting case of alignment computation.
    309   KnownOne.clearAllBits();
    310   unsigned TrailZ = KnownZero.countTrailingOnes() +
    311                     KnownZero2.countTrailingOnes();
    312   unsigned LeadZ =  std::max(KnownZero.countLeadingOnes() +
    313                              KnownZero2.countLeadingOnes(),
    314                              BitWidth) - BitWidth;
    315 
    316   TrailZ = std::min(TrailZ, BitWidth);
    317   LeadZ = std::min(LeadZ, BitWidth);
    318   KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) |
    319               APInt::getHighBitsSet(BitWidth, LeadZ);
    320 
    321   // Only make use of no-wrap flags if we failed to compute the sign bit
    322   // directly.  This matters if the multiplication always overflows, in
    323   // which case we prefer to follow the result of the direct computation,
    324   // though as the program is invoking undefined behaviour we can choose
    325   // whatever we like here.
    326   if (isKnownNonNegative && !KnownOne.isNegative())
    327     KnownZero.setBit(BitWidth - 1);
    328   else if (isKnownNegative && !KnownZero.isNegative())
    329     KnownOne.setBit(BitWidth - 1);
    330 }
    331 
    332 void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
    333                                              APInt &KnownZero) {
    334   unsigned BitWidth = KnownZero.getBitWidth();
    335   unsigned NumRanges = Ranges.getNumOperands() / 2;
    336   assert(NumRanges >= 1);
    337 
    338   // Use the high end of the ranges to find leading zeros.
    339   unsigned MinLeadingZeros = BitWidth;
    340   for (unsigned i = 0; i < NumRanges; ++i) {
    341     ConstantInt *Lower =
    342         mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0));
    343     ConstantInt *Upper =
    344         mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1));
    345     ConstantRange Range(Lower->getValue(), Upper->getValue());
    346     if (Range.isWrappedSet())
    347       MinLeadingZeros = 0; // -1 has no zeros
    348     unsigned LeadingZeros = (Upper->getValue() - 1).countLeadingZeros();
    349     MinLeadingZeros = std::min(LeadingZeros, MinLeadingZeros);
    350   }
    351 
    352   KnownZero = APInt::getHighBitsSet(BitWidth, MinLeadingZeros);
    353 }
    354 
    355 static bool isEphemeralValueOf(Instruction *I, const Value *E) {
    356   SmallVector<const Value *, 16> WorkSet(1, I);
    357   SmallPtrSet<const Value *, 32> Visited;
    358   SmallPtrSet<const Value *, 16> EphValues;
    359 
    360   while (!WorkSet.empty()) {
    361     const Value *V = WorkSet.pop_back_val();
    362     if (!Visited.insert(V).second)
    363       continue;
    364 
    365     // If all uses of this value are ephemeral, then so is this value.
    366     bool FoundNEUse = false;
    367     for (const User *I : V->users())
    368       if (!EphValues.count(I)) {
    369         FoundNEUse = true;
    370         break;
    371       }
    372 
    373     if (!FoundNEUse) {
    374       if (V == E)
    375         return true;
    376 
    377       EphValues.insert(V);
    378       if (const User *U = dyn_cast<User>(V))
    379         for (User::const_op_iterator J = U->op_begin(), JE = U->op_end();
    380              J != JE; ++J) {
    381           if (isSafeToSpeculativelyExecute(*J))
    382             WorkSet.push_back(*J);
    383         }
    384     }
    385   }
    386 
    387   return false;
    388 }
    389 
    390 // Is this an intrinsic that cannot be speculated but also cannot trap?
    391 static bool isAssumeLikeIntrinsic(const Instruction *I) {
    392   if (const CallInst *CI = dyn_cast<CallInst>(I))
    393     if (Function *F = CI->getCalledFunction())
    394       switch (F->getIntrinsicID()) {
    395       default: break;
    396       // FIXME: This list is repeated from NoTTI::getIntrinsicCost.
    397       case Intrinsic::assume:
    398       case Intrinsic::dbg_declare:
    399       case Intrinsic::dbg_value:
    400       case Intrinsic::invariant_start:
    401       case Intrinsic::invariant_end:
    402       case Intrinsic::lifetime_start:
    403       case Intrinsic::lifetime_end:
    404       case Intrinsic::objectsize:
    405       case Intrinsic::ptr_annotation:
    406       case Intrinsic::var_annotation:
    407         return true;
    408       }
    409 
    410   return false;
    411 }
    412 
    413 static bool isValidAssumeForContext(Value *V, const Query &Q) {
    414   Instruction *Inv = cast<Instruction>(V);
    415 
    416   // There are two restrictions on the use of an assume:
    417   //  1. The assume must dominate the context (or the control flow must
    418   //     reach the assume whenever it reaches the context).
    419   //  2. The context must not be in the assume's set of ephemeral values
    420   //     (otherwise we will use the assume to prove that the condition
    421   //     feeding the assume is trivially true, thus causing the removal of
    422   //     the assume).
    423 
    424   if (Q.DT) {
    425     if (Q.DT->dominates(Inv, Q.CxtI)) {
    426       return true;
    427     } else if (Inv->getParent() == Q.CxtI->getParent()) {
    428       // The context comes first, but they're both in the same block. Make sure
    429       // there is nothing in between that might interrupt the control flow.
    430       for (BasicBlock::const_iterator I =
    431              std::next(BasicBlock::const_iterator(Q.CxtI)),
    432                                       IE(Inv); I != IE; ++I)
    433         if (!isSafeToSpeculativelyExecute(I) && !isAssumeLikeIntrinsic(I))
    434           return false;
    435 
    436       return !isEphemeralValueOf(Inv, Q.CxtI);
    437     }
    438 
    439     return false;
    440   }
    441 
    442   // When we don't have a DT, we do a limited search...
    443   if (Inv->getParent() == Q.CxtI->getParent()->getSinglePredecessor()) {
    444     return true;
    445   } else if (Inv->getParent() == Q.CxtI->getParent()) {
    446     // Search forward from the assume until we reach the context (or the end
    447     // of the block); the common case is that the assume will come first.
    448     for (BasicBlock::iterator I = std::next(BasicBlock::iterator(Inv)),
    449          IE = Inv->getParent()->end(); I != IE; ++I)
    450       if (I == Q.CxtI)
    451         return true;
    452 
    453     // The context must come first...
    454     for (BasicBlock::const_iterator I =
    455            std::next(BasicBlock::const_iterator(Q.CxtI)),
    456                                     IE(Inv); I != IE; ++I)
    457       if (!isSafeToSpeculativelyExecute(I) && !isAssumeLikeIntrinsic(I))
    458         return false;
    459 
    460     return !isEphemeralValueOf(Inv, Q.CxtI);
    461   }
    462 
    463   return false;
    464 }
    465 
    466 bool llvm::isValidAssumeForContext(const Instruction *I,
    467                                    const Instruction *CxtI,
    468                                    const DominatorTree *DT) {
    469   return ::isValidAssumeForContext(const_cast<Instruction *>(I),
    470                                    Query(nullptr, CxtI, DT));
    471 }
    472 
    473 template<typename LHS, typename RHS>
    474 inline match_combine_or<CmpClass_match<LHS, RHS, ICmpInst, ICmpInst::Predicate>,
    475                         CmpClass_match<RHS, LHS, ICmpInst, ICmpInst::Predicate>>
    476 m_c_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R) {
    477   return m_CombineOr(m_ICmp(Pred, L, R), m_ICmp(Pred, R, L));
    478 }
    479 
    480 template<typename LHS, typename RHS>
    481 inline match_combine_or<BinaryOp_match<LHS, RHS, Instruction::And>,
    482                         BinaryOp_match<RHS, LHS, Instruction::And>>
    483 m_c_And(const LHS &L, const RHS &R) {
    484   return m_CombineOr(m_And(L, R), m_And(R, L));
    485 }
    486 
    487 template<typename LHS, typename RHS>
    488 inline match_combine_or<BinaryOp_match<LHS, RHS, Instruction::Or>,
    489                         BinaryOp_match<RHS, LHS, Instruction::Or>>
    490 m_c_Or(const LHS &L, const RHS &R) {
    491   return m_CombineOr(m_Or(L, R), m_Or(R, L));
    492 }
    493 
    494 template<typename LHS, typename RHS>
    495 inline match_combine_or<BinaryOp_match<LHS, RHS, Instruction::Xor>,
    496                         BinaryOp_match<RHS, LHS, Instruction::Xor>>
    497 m_c_Xor(const LHS &L, const RHS &R) {
    498   return m_CombineOr(m_Xor(L, R), m_Xor(R, L));
    499 }
    500 
    501 /// Compute known bits in 'V' under the assumption that the condition 'Cmp' is
    502 /// true (at the context instruction.)  This is mostly a utility function for
    503 /// the prototype dominating conditions reasoning below.
    504 static void computeKnownBitsFromTrueCondition(Value *V, ICmpInst *Cmp,
    505                                               APInt &KnownZero,
    506                                               APInt &KnownOne,
    507                                               const DataLayout &DL,
    508                                               unsigned Depth, const Query &Q) {
    509   Value *LHS = Cmp->getOperand(0);
    510   Value *RHS = Cmp->getOperand(1);
    511   // TODO: We could potentially be more aggressive here.  This would be worth
    512   // evaluating.  If we can, explore commoning this code with the assume
    513   // handling logic.
    514   if (LHS != V && RHS != V)
    515     return;
    516 
    517   const unsigned BitWidth = KnownZero.getBitWidth();
    518 
    519   switch (Cmp->getPredicate()) {
    520   default:
    521     // We know nothing from this condition
    522     break;
    523   // TODO: implement unsigned bound from below (known one bits)
    524   // TODO: common condition check implementations with assumes
    525   // TODO: implement other patterns from assume (e.g. V & B == A)
    526   case ICmpInst::ICMP_SGT:
    527     if (LHS == V) {
    528       APInt KnownZeroTemp(BitWidth, 0), KnownOneTemp(BitWidth, 0);
    529       computeKnownBits(RHS, KnownZeroTemp, KnownOneTemp, DL, Depth + 1, Q);
    530       if (KnownOneTemp.isAllOnesValue() || KnownZeroTemp.isNegative()) {
    531         // We know that the sign bit is zero.
    532         KnownZero |= APInt::getSignBit(BitWidth);
    533       }
    534     }
    535     break;
    536   case ICmpInst::ICMP_EQ:
    537     if (LHS == V)
    538       computeKnownBits(RHS, KnownZero, KnownOne, DL, Depth + 1, Q);
    539     else if (RHS == V)
    540       computeKnownBits(LHS, KnownZero, KnownOne, DL, Depth + 1, Q);
    541     else
    542       llvm_unreachable("missing use?");
    543     break;
    544   case ICmpInst::ICMP_ULE:
    545     if (LHS == V) {
    546       APInt KnownZeroTemp(BitWidth, 0), KnownOneTemp(BitWidth, 0);
    547       computeKnownBits(RHS, KnownZeroTemp, KnownOneTemp, DL, Depth + 1, Q);
    548       // The known zero bits carry over
    549       unsigned SignBits = KnownZeroTemp.countLeadingOnes();
    550       KnownZero |= APInt::getHighBitsSet(BitWidth, SignBits);
    551     }
    552     break;
    553   case ICmpInst::ICMP_ULT:
    554     if (LHS == V) {
    555       APInt KnownZeroTemp(BitWidth, 0), KnownOneTemp(BitWidth, 0);
    556       computeKnownBits(RHS, KnownZeroTemp, KnownOneTemp, DL, Depth + 1, Q);
    557       // Whatever high bits in rhs are zero are known to be zero (if rhs is a
    558       // power of 2, then one more).
    559       unsigned SignBits = KnownZeroTemp.countLeadingOnes();
    560       if (isKnownToBeAPowerOfTwo(RHS, false, Depth + 1, Query(Q, Cmp), DL))
    561         SignBits++;
    562       KnownZero |= APInt::getHighBitsSet(BitWidth, SignBits);
    563     }
    564     break;
    565   };
    566 }
    567 
    568 /// Compute known bits in 'V' from conditions which are known to be true along
    569 /// all paths leading to the context instruction.  In particular, look for
    570 /// cases where one branch of an interesting condition dominates the context
    571 /// instruction.  This does not do general dataflow.
    572 /// NOTE: This code is EXPERIMENTAL and currently off by default.
    573 static void computeKnownBitsFromDominatingCondition(Value *V, APInt &KnownZero,
    574                                                     APInt &KnownOne,
    575                                                     const DataLayout &DL,
    576                                                     unsigned Depth,
    577                                                     const Query &Q) {
    578   // Need both the dominator tree and the query location to do anything useful
    579   if (!Q.DT || !Q.CxtI)
    580     return;
    581   Instruction *Cxt = const_cast<Instruction *>(Q.CxtI);
    582 
    583   // Avoid useless work
    584   if (auto VI = dyn_cast<Instruction>(V))
    585     if (VI->getParent() == Cxt->getParent())
    586       return;
    587 
    588   // Note: We currently implement two options.  It's not clear which of these
    589   // will survive long term, we need data for that.
    590   // Option 1 - Try walking the dominator tree looking for conditions which
    591   // might apply.  This works well for local conditions (loop guards, etc..),
    592   // but not as well for things far from the context instruction (presuming a
    593   // low max blocks explored).  If we can set an high enough limit, this would
    594   // be all we need.
    595   // Option 2 - We restrict out search to those conditions which are uses of
    596   // the value we're interested in.  This is independent of dom structure,
    597   // but is slightly less powerful without looking through lots of use chains.
    598   // It does handle conditions far from the context instruction (e.g. early
    599   // function exits on entry) really well though.
    600 
    601   // Option 1 - Search the dom tree
    602   unsigned NumBlocksExplored = 0;
    603   BasicBlock *Current = Cxt->getParent();
    604   while (true) {
    605     // Stop searching if we've gone too far up the chain
    606     if (NumBlocksExplored >= DomConditionsMaxDomBlocks)
    607       break;
    608     NumBlocksExplored++;
    609 
    610     if (!Q.DT->getNode(Current)->getIDom())
    611       break;
    612     Current = Q.DT->getNode(Current)->getIDom()->getBlock();
    613     if (!Current)
    614       // found function entry
    615       break;
    616 
    617     BranchInst *BI = dyn_cast<BranchInst>(Current->getTerminator());
    618     if (!BI || BI->isUnconditional())
    619       continue;
    620     ICmpInst *Cmp = dyn_cast<ICmpInst>(BI->getCondition());
    621     if (!Cmp)
    622       continue;
    623 
    624     // We're looking for conditions that are guaranteed to hold at the context
    625     // instruction.  Finding a condition where one path dominates the context
    626     // isn't enough because both the true and false cases could merge before
    627     // the context instruction we're actually interested in.  Instead, we need
    628     // to ensure that the taken *edge* dominates the context instruction.
    629     BasicBlock *BB0 = BI->getSuccessor(0);
    630     BasicBlockEdge Edge(BI->getParent(), BB0);
    631     if (!Edge.isSingleEdge() || !Q.DT->dominates(Edge, Q.CxtI->getParent()))
    632       continue;
    633 
    634     computeKnownBitsFromTrueCondition(V, Cmp, KnownZero, KnownOne, DL, Depth,
    635                                       Q);
    636   }
    637 
    638   // Option 2 - Search the other uses of V
    639   unsigned NumUsesExplored = 0;
    640   for (auto U : V->users()) {
    641     // Avoid massive lists
    642     if (NumUsesExplored >= DomConditionsMaxUses)
    643       break;
    644     NumUsesExplored++;
    645     // Consider only compare instructions uniquely controlling a branch
    646     ICmpInst *Cmp = dyn_cast<ICmpInst>(U);
    647     if (!Cmp)
    648       continue;
    649 
    650     if (DomConditionsSingleCmpUse && !Cmp->hasOneUse())
    651       continue;
    652 
    653     for (auto *CmpU : Cmp->users()) {
    654       BranchInst *BI = dyn_cast<BranchInst>(CmpU);
    655       if (!BI || BI->isUnconditional())
    656         continue;
    657       // We're looking for conditions that are guaranteed to hold at the
    658       // context instruction.  Finding a condition where one path dominates
    659       // the context isn't enough because both the true and false cases could
    660       // merge before the context instruction we're actually interested in.
    661       // Instead, we need to ensure that the taken *edge* dominates the context
    662       // instruction.
    663       BasicBlock *BB0 = BI->getSuccessor(0);
    664       BasicBlockEdge Edge(BI->getParent(), BB0);
    665       if (!Edge.isSingleEdge() || !Q.DT->dominates(Edge, Q.CxtI->getParent()))
    666         continue;
    667 
    668       computeKnownBitsFromTrueCondition(V, Cmp, KnownZero, KnownOne, DL, Depth,
    669                                         Q);
    670     }
    671   }
    672 }
    673 
    674 static void computeKnownBitsFromAssume(Value *V, APInt &KnownZero,
    675                                        APInt &KnownOne, const DataLayout &DL,
    676                                        unsigned Depth, const Query &Q) {
    677   // Use of assumptions is context-sensitive. If we don't have a context, we
    678   // cannot use them!
    679   if (!Q.AC || !Q.CxtI)
    680     return;
    681 
    682   unsigned BitWidth = KnownZero.getBitWidth();
    683 
    684   for (auto &AssumeVH : Q.AC->assumptions()) {
    685     if (!AssumeVH)
    686       continue;
    687     CallInst *I = cast<CallInst>(AssumeVH);
    688     assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() &&
    689            "Got assumption for the wrong function!");
    690     if (Q.ExclInvs.count(I))
    691       continue;
    692 
    693     // Warning: This loop can end up being somewhat performance sensetive.
    694     // We're running this loop for once for each value queried resulting in a
    695     // runtime of ~O(#assumes * #values).
    696 
    697     assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
    698            "must be an assume intrinsic");
    699 
    700     Value *Arg = I->getArgOperand(0);
    701 
    702     if (Arg == V && isValidAssumeForContext(I, Q)) {
    703       assert(BitWidth == 1 && "assume operand is not i1?");
    704       KnownZero.clearAllBits();
    705       KnownOne.setAllBits();
    706       return;
    707     }
    708 
    709     // The remaining tests are all recursive, so bail out if we hit the limit.
    710     if (Depth == MaxDepth)
    711       continue;
    712 
    713     Value *A, *B;
    714     auto m_V = m_CombineOr(m_Specific(V),
    715                            m_CombineOr(m_PtrToInt(m_Specific(V)),
    716                            m_BitCast(m_Specific(V))));
    717 
    718     CmpInst::Predicate Pred;
    719     ConstantInt *C;
    720     // assume(v = a)
    721     if (match(Arg, m_c_ICmp(Pred, m_V, m_Value(A))) &&
    722         Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) {
    723       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
    724       computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I));
    725       KnownZero |= RHSKnownZero;
    726       KnownOne  |= RHSKnownOne;
    727     // assume(v & b = a)
    728     } else if (match(Arg,
    729                      m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) &&
    730                Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) {
    731       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
    732       computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I));
    733       APInt MaskKnownZero(BitWidth, 0), MaskKnownOne(BitWidth, 0);
    734       computeKnownBits(B, MaskKnownZero, MaskKnownOne, DL, Depth+1, Query(Q, I));
    735 
    736       // For those bits in the mask that are known to be one, we can propagate
    737       // known bits from the RHS to V.
    738       KnownZero |= RHSKnownZero & MaskKnownOne;
    739       KnownOne  |= RHSKnownOne  & MaskKnownOne;
    740     // assume(~(v & b) = a)
    741     } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))),
    742                                    m_Value(A))) &&
    743                Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) {
    744       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
    745       computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I));
    746       APInt MaskKnownZero(BitWidth, 0), MaskKnownOne(BitWidth, 0);
    747       computeKnownBits(B, MaskKnownZero, MaskKnownOne, DL, Depth+1, Query(Q, I));
    748 
    749       // For those bits in the mask that are known to be one, we can propagate
    750       // inverted known bits from the RHS to V.
    751       KnownZero |= RHSKnownOne  & MaskKnownOne;
    752       KnownOne  |= RHSKnownZero & MaskKnownOne;
    753     // assume(v | b = a)
    754     } else if (match(Arg,
    755                      m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) &&
    756                Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) {
    757       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
    758       computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I));
    759       APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0);
    760       computeKnownBits(B, BKnownZero, BKnownOne, DL, Depth+1, Query(Q, I));
    761 
    762       // For those bits in B that are known to be zero, we can propagate known
    763       // bits from the RHS to V.
    764       KnownZero |= RHSKnownZero & BKnownZero;
    765       KnownOne  |= RHSKnownOne  & BKnownZero;
    766     // assume(~(v | b) = a)
    767     } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))),
    768                                    m_Value(A))) &&
    769                Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) {
    770       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
    771       computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I));
    772       APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0);
    773       computeKnownBits(B, BKnownZero, BKnownOne, DL, Depth+1, Query(Q, I));
    774 
    775       // For those bits in B that are known to be zero, we can propagate
    776       // inverted known bits from the RHS to V.
    777       KnownZero |= RHSKnownOne  & BKnownZero;
    778       KnownOne  |= RHSKnownZero & BKnownZero;
    779     // assume(v ^ b = a)
    780     } else if (match(Arg,
    781                      m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) &&
    782                Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) {
    783       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
    784       computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I));
    785       APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0);
    786       computeKnownBits(B, BKnownZero, BKnownOne, DL, Depth+1, Query(Q, I));
    787 
    788       // For those bits in B that are known to be zero, we can propagate known
    789       // bits from the RHS to V. For those bits in B that are known to be one,
    790       // we can propagate inverted known bits from the RHS to V.
    791       KnownZero |= RHSKnownZero & BKnownZero;
    792       KnownOne  |= RHSKnownOne  & BKnownZero;
    793       KnownZero |= RHSKnownOne  & BKnownOne;
    794       KnownOne  |= RHSKnownZero & BKnownOne;
    795     // assume(~(v ^ b) = a)
    796     } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))),
    797                                    m_Value(A))) &&
    798                Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) {
    799       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
    800       computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I));
    801       APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0);
    802       computeKnownBits(B, BKnownZero, BKnownOne, DL, Depth+1, Query(Q, I));
    803 
    804       // For those bits in B that are known to be zero, we can propagate
    805       // inverted known bits from the RHS to V. For those bits in B that are
    806       // known to be one, we can propagate known bits from the RHS to V.
    807       KnownZero |= RHSKnownOne  & BKnownZero;
    808       KnownOne  |= RHSKnownZero & BKnownZero;
    809       KnownZero |= RHSKnownZero & BKnownOne;
    810       KnownOne  |= RHSKnownOne  & BKnownOne;
    811     // assume(v << c = a)
    812     } else if (match(Arg, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)),
    813                                    m_Value(A))) &&
    814                Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) {
    815       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
    816       computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I));
    817       // For those bits in RHS that are known, we can propagate them to known
    818       // bits in V shifted to the right by C.
    819       KnownZero |= RHSKnownZero.lshr(C->getZExtValue());
    820       KnownOne  |= RHSKnownOne.lshr(C->getZExtValue());
    821     // assume(~(v << c) = a)
    822     } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))),
    823                                    m_Value(A))) &&
    824                Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) {
    825       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
    826       computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I));
    827       // For those bits in RHS that are known, we can propagate them inverted
    828       // to known bits in V shifted to the right by C.
    829       KnownZero |= RHSKnownOne.lshr(C->getZExtValue());
    830       KnownOne  |= RHSKnownZero.lshr(C->getZExtValue());
    831     // assume(v >> c = a)
    832     } else if (match(Arg,
    833                      m_c_ICmp(Pred, m_CombineOr(m_LShr(m_V, m_ConstantInt(C)),
    834                                                 m_AShr(m_V, m_ConstantInt(C))),
    835                               m_Value(A))) &&
    836                Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) {
    837       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
    838       computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I));
    839       // For those bits in RHS that are known, we can propagate them to known
    840       // bits in V shifted to the right by C.
    841       KnownZero |= RHSKnownZero << C->getZExtValue();
    842       KnownOne  |= RHSKnownOne  << C->getZExtValue();
    843     // assume(~(v >> c) = a)
    844     } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_CombineOr(
    845                                              m_LShr(m_V, m_ConstantInt(C)),
    846                                              m_AShr(m_V, m_ConstantInt(C)))),
    847                                    m_Value(A))) &&
    848                Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) {
    849       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
    850       computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I));
    851       // For those bits in RHS that are known, we can propagate them inverted
    852       // to known bits in V shifted to the right by C.
    853       KnownZero |= RHSKnownOne  << C->getZExtValue();
    854       KnownOne  |= RHSKnownZero << C->getZExtValue();
    855     // assume(v >=_s c) where c is non-negative
    856     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
    857                Pred == ICmpInst::ICMP_SGE && isValidAssumeForContext(I, Q)) {
    858       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
    859       computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I));
    860 
    861       if (RHSKnownZero.isNegative()) {
    862         // We know that the sign bit is zero.
    863         KnownZero |= APInt::getSignBit(BitWidth);
    864       }
    865     // assume(v >_s c) where c is at least -1.
    866     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
    867                Pred == ICmpInst::ICMP_SGT && isValidAssumeForContext(I, Q)) {
    868       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
    869       computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I));
    870 
    871       if (RHSKnownOne.isAllOnesValue() || RHSKnownZero.isNegative()) {
    872         // We know that the sign bit is zero.
    873         KnownZero |= APInt::getSignBit(BitWidth);
    874       }
    875     // assume(v <=_s c) where c is negative
    876     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
    877                Pred == ICmpInst::ICMP_SLE && isValidAssumeForContext(I, Q)) {
    878       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
    879       computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I));
    880 
    881       if (RHSKnownOne.isNegative()) {
    882         // We know that the sign bit is one.
    883         KnownOne |= APInt::getSignBit(BitWidth);
    884       }
    885     // assume(v <_s c) where c is non-positive
    886     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
    887                Pred == ICmpInst::ICMP_SLT && isValidAssumeForContext(I, Q)) {
    888       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
    889       computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I));
    890 
    891       if (RHSKnownZero.isAllOnesValue() || RHSKnownOne.isNegative()) {
    892         // We know that the sign bit is one.
    893         KnownOne |= APInt::getSignBit(BitWidth);
    894       }
    895     // assume(v <=_u c)
    896     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
    897                Pred == ICmpInst::ICMP_ULE && isValidAssumeForContext(I, Q)) {
    898       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
    899       computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I));
    900 
    901       // Whatever high bits in c are zero are known to be zero.
    902       KnownZero |=
    903         APInt::getHighBitsSet(BitWidth, RHSKnownZero.countLeadingOnes());
    904     // assume(v <_u c)
    905     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
    906                Pred == ICmpInst::ICMP_ULT && isValidAssumeForContext(I, Q)) {
    907       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
    908       computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I));
    909 
    910       // Whatever high bits in c are zero are known to be zero (if c is a power
    911       // of 2, then one more).
    912       if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, Query(Q, I), DL))
    913         KnownZero |=
    914           APInt::getHighBitsSet(BitWidth, RHSKnownZero.countLeadingOnes()+1);
    915       else
    916         KnownZero |=
    917           APInt::getHighBitsSet(BitWidth, RHSKnownZero.countLeadingOnes());
    918     }
    919   }
    920 }
    921 
    922 /// Determine which bits of V are known to be either zero or one and return
    923 /// them in the KnownZero/KnownOne bit sets.
    924 ///
    925 /// NOTE: we cannot consider 'undef' to be "IsZero" here.  The problem is that
    926 /// we cannot optimize based on the assumption that it is zero without changing
    927 /// it to be an explicit zero.  If we don't change it to zero, other code could
    928 /// optimized based on the contradictory assumption that it is non-zero.
    929 /// Because instcombine aggressively folds operations with undef args anyway,
    930 /// this won't lose us code quality.
    931 ///
    932 /// This function is defined on values with integer type, values with pointer
    933 /// type, and vectors of integers.  In the case
    934 /// where V is a vector, known zero, and known one values are the
    935 /// same width as the vector element, and the bit is set only if it is true
    936 /// for all of the elements in the vector.
    937 void computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne,
    938                       const DataLayout &DL, unsigned Depth, const Query &Q) {
    939   assert(V && "No Value?");
    940   assert(Depth <= MaxDepth && "Limit Search Depth");
    941   unsigned BitWidth = KnownZero.getBitWidth();
    942 
    943   assert((V->getType()->isIntOrIntVectorTy() ||
    944           V->getType()->getScalarType()->isPointerTy()) &&
    945          "Not integer or pointer type!");
    946   assert((DL.getTypeSizeInBits(V->getType()->getScalarType()) == BitWidth) &&
    947          (!V->getType()->isIntOrIntVectorTy() ||
    948           V->getType()->getScalarSizeInBits() == BitWidth) &&
    949          KnownZero.getBitWidth() == BitWidth &&
    950          KnownOne.getBitWidth() == BitWidth &&
    951          "V, KnownOne and KnownZero should have same BitWidth");
    952 
    953   if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
    954     // We know all of the bits for a constant!
    955     KnownOne = CI->getValue();
    956     KnownZero = ~KnownOne;
    957     return;
    958   }
    959   // Null and aggregate-zero are all-zeros.
    960   if (isa<ConstantPointerNull>(V) ||
    961       isa<ConstantAggregateZero>(V)) {
    962     KnownOne.clearAllBits();
    963     KnownZero = APInt::getAllOnesValue(BitWidth);
    964     return;
    965   }
    966   // Handle a constant vector by taking the intersection of the known bits of
    967   // each element.  There is no real need to handle ConstantVector here, because
    968   // we don't handle undef in any particularly useful way.
    969   if (ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(V)) {
    970     // We know that CDS must be a vector of integers. Take the intersection of
    971     // each element.
    972     KnownZero.setAllBits(); KnownOne.setAllBits();
    973     APInt Elt(KnownZero.getBitWidth(), 0);
    974     for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
    975       Elt = CDS->getElementAsInteger(i);
    976       KnownZero &= ~Elt;
    977       KnownOne &= Elt;
    978     }
    979     return;
    980   }
    981 
    982   // The address of an aligned GlobalValue has trailing zeros.
    983   if (auto *GO = dyn_cast<GlobalObject>(V)) {
    984     unsigned Align = GO->getAlignment();
    985     if (Align == 0) {
    986       if (auto *GVar = dyn_cast<GlobalVariable>(GO)) {
    987         Type *ObjectType = GVar->getType()->getElementType();
    988         if (ObjectType->isSized()) {
    989           // If the object is defined in the current Module, we'll be giving
    990           // it the preferred alignment. Otherwise, we have to assume that it
    991           // may only have the minimum ABI alignment.
    992           if (!GVar->isDeclaration() && !GVar->isWeakForLinker())
    993             Align = DL.getPreferredAlignment(GVar);
    994           else
    995             Align = DL.getABITypeAlignment(ObjectType);
    996         }
    997       }
    998     }
    999     if (Align > 0)
   1000       KnownZero = APInt::getLowBitsSet(BitWidth,
   1001                                        countTrailingZeros(Align));
   1002     else
   1003       KnownZero.clearAllBits();
   1004     KnownOne.clearAllBits();
   1005     return;
   1006   }
   1007 
   1008   if (Argument *A = dyn_cast<Argument>(V)) {
   1009     unsigned Align = A->getType()->isPointerTy() ? A->getParamAlignment() : 0;
   1010 
   1011     if (!Align && A->hasStructRetAttr()) {
   1012       // An sret parameter has at least the ABI alignment of the return type.
   1013       Type *EltTy = cast<PointerType>(A->getType())->getElementType();
   1014       if (EltTy->isSized())
   1015         Align = DL.getABITypeAlignment(EltTy);
   1016     }
   1017 
   1018     if (Align)
   1019       KnownZero = APInt::getLowBitsSet(BitWidth, countTrailingZeros(Align));
   1020     else
   1021       KnownZero.clearAllBits();
   1022     KnownOne.clearAllBits();
   1023 
   1024     // Don't give up yet... there might be an assumption that provides more
   1025     // information...
   1026     computeKnownBitsFromAssume(V, KnownZero, KnownOne, DL, Depth, Q);
   1027 
   1028     // Or a dominating condition for that matter
   1029     if (EnableDomConditions && Depth <= DomConditionsMaxDepth)
   1030       computeKnownBitsFromDominatingCondition(V, KnownZero, KnownOne, DL,
   1031                                               Depth, Q);
   1032     return;
   1033   }
   1034 
   1035   // Start out not knowing anything.
   1036   KnownZero.clearAllBits(); KnownOne.clearAllBits();
   1037 
   1038   // Limit search depth.
   1039   // All recursive calls that increase depth must come after this.
   1040   if (Depth == MaxDepth)
   1041     return;
   1042 
   1043   // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has
   1044   // the bits of its aliasee.
   1045   if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
   1046     if (!GA->mayBeOverridden())
   1047       computeKnownBits(GA->getAliasee(), KnownZero, KnownOne, DL, Depth + 1, Q);
   1048     return;
   1049   }
   1050 
   1051   // Check whether a nearby assume intrinsic can determine some known bits.
   1052   computeKnownBitsFromAssume(V, KnownZero, KnownOne, DL, Depth, Q);
   1053 
   1054   // Check whether there's a dominating condition which implies something about
   1055   // this value at the given context.
   1056   if (EnableDomConditions && Depth <= DomConditionsMaxDepth)
   1057     computeKnownBitsFromDominatingCondition(V, KnownZero, KnownOne, DL, Depth,
   1058                                             Q);
   1059 
   1060   Operator *I = dyn_cast<Operator>(V);
   1061   if (!I) return;
   1062 
   1063   APInt KnownZero2(KnownZero), KnownOne2(KnownOne);
   1064   switch (I->getOpcode()) {
   1065   default: break;
   1066   case Instruction::Load:
   1067     if (MDNode *MD = cast<LoadInst>(I)->getMetadata(LLVMContext::MD_range))
   1068       computeKnownBitsFromRangeMetadata(*MD, KnownZero);
   1069     break;
   1070   case Instruction::And: {
   1071     // If either the LHS or the RHS are Zero, the result is zero.
   1072     computeKnownBits(I->getOperand(1), KnownZero, KnownOne, DL, Depth + 1, Q);
   1073     computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, DL, Depth + 1, Q);
   1074 
   1075     // Output known-1 bits are only known if set in both the LHS & RHS.
   1076     KnownOne &= KnownOne2;
   1077     // Output known-0 are known to be clear if zero in either the LHS | RHS.
   1078     KnownZero |= KnownZero2;
   1079     break;
   1080   }
   1081   case Instruction::Or: {
   1082     computeKnownBits(I->getOperand(1), KnownZero, KnownOne, DL, Depth + 1, Q);
   1083     computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, DL, Depth + 1, Q);
   1084 
   1085     // Output known-0 bits are only known if clear in both the LHS & RHS.
   1086     KnownZero &= KnownZero2;
   1087     // Output known-1 are known to be set if set in either the LHS | RHS.
   1088     KnownOne |= KnownOne2;
   1089     break;
   1090   }
   1091   case Instruction::Xor: {
   1092     computeKnownBits(I->getOperand(1), KnownZero, KnownOne, DL, Depth + 1, Q);
   1093     computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, DL, Depth + 1, Q);
   1094 
   1095     // Output known-0 bits are known if clear or set in both the LHS & RHS.
   1096     APInt KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2);
   1097     // Output known-1 are known to be set if set in only one of the LHS, RHS.
   1098     KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2);
   1099     KnownZero = KnownZeroOut;
   1100     break;
   1101   }
   1102   case Instruction::Mul: {
   1103     bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
   1104     computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, KnownZero,
   1105                         KnownOne, KnownZero2, KnownOne2, DL, Depth, Q);
   1106     break;
   1107   }
   1108   case Instruction::UDiv: {
   1109     // For the purposes of computing leading zeros we can conservatively
   1110     // treat a udiv as a logical right shift by the power of 2 known to
   1111     // be less than the denominator.
   1112     computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, DL, Depth + 1, Q);
   1113     unsigned LeadZ = KnownZero2.countLeadingOnes();
   1114 
   1115     KnownOne2.clearAllBits();
   1116     KnownZero2.clearAllBits();
   1117     computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, DL, Depth + 1, Q);
   1118     unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros();
   1119     if (RHSUnknownLeadingOnes != BitWidth)
   1120       LeadZ = std::min(BitWidth,
   1121                        LeadZ + BitWidth - RHSUnknownLeadingOnes - 1);
   1122 
   1123     KnownZero = APInt::getHighBitsSet(BitWidth, LeadZ);
   1124     break;
   1125   }
   1126   case Instruction::Select:
   1127     computeKnownBits(I->getOperand(2), KnownZero, KnownOne, DL, Depth + 1, Q);
   1128     computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, DL, Depth + 1, Q);
   1129 
   1130     // Only known if known in both the LHS and RHS.
   1131     KnownOne &= KnownOne2;
   1132     KnownZero &= KnownZero2;
   1133     break;
   1134   case Instruction::FPTrunc:
   1135   case Instruction::FPExt:
   1136   case Instruction::FPToUI:
   1137   case Instruction::FPToSI:
   1138   case Instruction::SIToFP:
   1139   case Instruction::UIToFP:
   1140     break; // Can't work with floating point.
   1141   case Instruction::PtrToInt:
   1142   case Instruction::IntToPtr:
   1143   case Instruction::AddrSpaceCast: // Pointers could be different sizes.
   1144     // FALL THROUGH and handle them the same as zext/trunc.
   1145   case Instruction::ZExt:
   1146   case Instruction::Trunc: {
   1147     Type *SrcTy = I->getOperand(0)->getType();
   1148 
   1149     unsigned SrcBitWidth;
   1150     // Note that we handle pointer operands here because of inttoptr/ptrtoint
   1151     // which fall through here.
   1152     SrcBitWidth = DL.getTypeSizeInBits(SrcTy->getScalarType());
   1153 
   1154     assert(SrcBitWidth && "SrcBitWidth can't be zero");
   1155     KnownZero = KnownZero.zextOrTrunc(SrcBitWidth);
   1156     KnownOne = KnownOne.zextOrTrunc(SrcBitWidth);
   1157     computeKnownBits(I->getOperand(0), KnownZero, KnownOne, DL, Depth + 1, Q);
   1158     KnownZero = KnownZero.zextOrTrunc(BitWidth);
   1159     KnownOne = KnownOne.zextOrTrunc(BitWidth);
   1160     // Any top bits are known to be zero.
   1161     if (BitWidth > SrcBitWidth)
   1162       KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth);
   1163     break;
   1164   }
   1165   case Instruction::BitCast: {
   1166     Type *SrcTy = I->getOperand(0)->getType();
   1167     if ((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
   1168         // TODO: For now, not handling conversions like:
   1169         // (bitcast i64 %x to <2 x i32>)
   1170         !I->getType()->isVectorTy()) {
   1171       computeKnownBits(I->getOperand(0), KnownZero, KnownOne, DL, Depth + 1, Q);
   1172       break;
   1173     }
   1174     break;
   1175   }
   1176   case Instruction::SExt: {
   1177     // Compute the bits in the result that are not present in the input.
   1178     unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
   1179 
   1180     KnownZero = KnownZero.trunc(SrcBitWidth);
   1181     KnownOne = KnownOne.trunc(SrcBitWidth);
   1182     computeKnownBits(I->getOperand(0), KnownZero, KnownOne, DL, Depth + 1, Q);
   1183     KnownZero = KnownZero.zext(BitWidth);
   1184     KnownOne = KnownOne.zext(BitWidth);
   1185 
   1186     // If the sign bit of the input is known set or clear, then we know the
   1187     // top bits of the result.
   1188     if (KnownZero[SrcBitWidth-1])             // Input sign bit known zero
   1189       KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth);
   1190     else if (KnownOne[SrcBitWidth-1])           // Input sign bit known set
   1191       KnownOne |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth);
   1192     break;
   1193   }
   1194   case Instruction::Shl:
   1195     // (shl X, C1) & C2 == 0   iff   (X & C2 >>u C1) == 0
   1196     if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
   1197       uint64_t ShiftAmt = SA->getLimitedValue(BitWidth);
   1198       computeKnownBits(I->getOperand(0), KnownZero, KnownOne, DL, Depth + 1, Q);
   1199       KnownZero <<= ShiftAmt;
   1200       KnownOne  <<= ShiftAmt;
   1201       KnownZero |= APInt::getLowBitsSet(BitWidth, ShiftAmt); // low bits known 0
   1202     }
   1203     break;
   1204   case Instruction::LShr:
   1205     // (ushr X, C1) & C2 == 0   iff  (-1 >> C1) & C2 == 0
   1206     if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
   1207       // Compute the new bits that are at the top now.
   1208       uint64_t ShiftAmt = SA->getLimitedValue(BitWidth);
   1209 
   1210       // Unsigned shift right.
   1211       computeKnownBits(I->getOperand(0), KnownZero, KnownOne, DL, Depth + 1, Q);
   1212       KnownZero = APIntOps::lshr(KnownZero, ShiftAmt);
   1213       KnownOne  = APIntOps::lshr(KnownOne, ShiftAmt);
   1214       // high bits known zero.
   1215       KnownZero |= APInt::getHighBitsSet(BitWidth, ShiftAmt);
   1216     }
   1217     break;
   1218   case Instruction::AShr:
   1219     // (ashr X, C1) & C2 == 0   iff  (-1 >> C1) & C2 == 0
   1220     if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
   1221       // Compute the new bits that are at the top now.
   1222       uint64_t ShiftAmt = SA->getLimitedValue(BitWidth-1);
   1223 
   1224       // Signed shift right.
   1225       computeKnownBits(I->getOperand(0), KnownZero, KnownOne, DL, Depth + 1, Q);
   1226       KnownZero = APIntOps::lshr(KnownZero, ShiftAmt);
   1227       KnownOne  = APIntOps::lshr(KnownOne, ShiftAmt);
   1228 
   1229       APInt HighBits(APInt::getHighBitsSet(BitWidth, ShiftAmt));
   1230       if (KnownZero[BitWidth-ShiftAmt-1])    // New bits are known zero.
   1231         KnownZero |= HighBits;
   1232       else if (KnownOne[BitWidth-ShiftAmt-1])  // New bits are known one.
   1233         KnownOne |= HighBits;
   1234     }
   1235     break;
   1236   case Instruction::Sub: {
   1237     bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
   1238     computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW,
   1239                            KnownZero, KnownOne, KnownZero2, KnownOne2, DL,
   1240                            Depth, Q);
   1241     break;
   1242   }
   1243   case Instruction::Add: {
   1244     bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
   1245     computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW,
   1246                            KnownZero, KnownOne, KnownZero2, KnownOne2, DL,
   1247                            Depth, Q);
   1248     break;
   1249   }
   1250   case Instruction::SRem:
   1251     if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
   1252       APInt RA = Rem->getValue().abs();
   1253       if (RA.isPowerOf2()) {
   1254         APInt LowBits = RA - 1;
   1255         computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, DL, Depth + 1,
   1256                          Q);
   1257 
   1258         // The low bits of the first operand are unchanged by the srem.
   1259         KnownZero = KnownZero2 & LowBits;
   1260         KnownOne = KnownOne2 & LowBits;
   1261 
   1262         // If the first operand is non-negative or has all low bits zero, then
   1263         // the upper bits are all zero.
   1264         if (KnownZero2[BitWidth-1] || ((KnownZero2 & LowBits) == LowBits))
   1265           KnownZero |= ~LowBits;
   1266 
   1267         // If the first operand is negative and not all low bits are zero, then
   1268         // the upper bits are all one.
   1269         if (KnownOne2[BitWidth-1] && ((KnownOne2 & LowBits) != 0))
   1270           KnownOne |= ~LowBits;
   1271 
   1272         assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
   1273       }
   1274     }
   1275 
   1276     // The sign bit is the LHS's sign bit, except when the result of the
   1277     // remainder is zero.
   1278     if (KnownZero.isNonNegative()) {
   1279       APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0);
   1280       computeKnownBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, DL,
   1281                        Depth + 1, Q);
   1282       // If it's known zero, our sign bit is also zero.
   1283       if (LHSKnownZero.isNegative())
   1284         KnownZero.setBit(BitWidth - 1);
   1285     }
   1286 
   1287     break;
   1288   case Instruction::URem: {
   1289     if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
   1290       APInt RA = Rem->getValue();
   1291       if (RA.isPowerOf2()) {
   1292         APInt LowBits = (RA - 1);
   1293         computeKnownBits(I->getOperand(0), KnownZero, KnownOne, DL, Depth + 1,
   1294                          Q);
   1295         KnownZero |= ~LowBits;
   1296         KnownOne &= LowBits;
   1297         break;
   1298       }
   1299     }
   1300 
   1301     // Since the result is less than or equal to either operand, any leading
   1302     // zero bits in either operand must also exist in the result.
   1303     computeKnownBits(I->getOperand(0), KnownZero, KnownOne, DL, Depth + 1, Q);
   1304     computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, DL, Depth + 1, Q);
   1305 
   1306     unsigned Leaders = std::max(KnownZero.countLeadingOnes(),
   1307                                 KnownZero2.countLeadingOnes());
   1308     KnownOne.clearAllBits();
   1309     KnownZero = APInt::getHighBitsSet(BitWidth, Leaders);
   1310     break;
   1311   }
   1312 
   1313   case Instruction::Alloca: {
   1314     AllocaInst *AI = cast<AllocaInst>(V);
   1315     unsigned Align = AI->getAlignment();
   1316     if (Align == 0)
   1317       Align = DL.getABITypeAlignment(AI->getType()->getElementType());
   1318 
   1319     if (Align > 0)
   1320       KnownZero = APInt::getLowBitsSet(BitWidth, countTrailingZeros(Align));
   1321     break;
   1322   }
   1323   case Instruction::GetElementPtr: {
   1324     // Analyze all of the subscripts of this getelementptr instruction
   1325     // to determine if we can prove known low zero bits.
   1326     APInt LocalKnownZero(BitWidth, 0), LocalKnownOne(BitWidth, 0);
   1327     computeKnownBits(I->getOperand(0), LocalKnownZero, LocalKnownOne, DL,
   1328                      Depth + 1, Q);
   1329     unsigned TrailZ = LocalKnownZero.countTrailingOnes();
   1330 
   1331     gep_type_iterator GTI = gep_type_begin(I);
   1332     for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
   1333       Value *Index = I->getOperand(i);
   1334       if (StructType *STy = dyn_cast<StructType>(*GTI)) {
   1335         // Handle struct member offset arithmetic.
   1336 
   1337         // Handle case when index is vector zeroinitializer
   1338         Constant *CIndex = cast<Constant>(Index);
   1339         if (CIndex->isZeroValue())
   1340           continue;
   1341 
   1342         if (CIndex->getType()->isVectorTy())
   1343           Index = CIndex->getSplatValue();
   1344 
   1345         unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
   1346         const StructLayout *SL = DL.getStructLayout(STy);
   1347         uint64_t Offset = SL->getElementOffset(Idx);
   1348         TrailZ = std::min<unsigned>(TrailZ,
   1349                                     countTrailingZeros(Offset));
   1350       } else {
   1351         // Handle array index arithmetic.
   1352         Type *IndexedTy = GTI.getIndexedType();
   1353         if (!IndexedTy->isSized()) {
   1354           TrailZ = 0;
   1355           break;
   1356         }
   1357         unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits();
   1358         uint64_t TypeSize = DL.getTypeAllocSize(IndexedTy);
   1359         LocalKnownZero = LocalKnownOne = APInt(GEPOpiBits, 0);
   1360         computeKnownBits(Index, LocalKnownZero, LocalKnownOne, DL, Depth + 1,
   1361                          Q);
   1362         TrailZ = std::min(TrailZ,
   1363                           unsigned(countTrailingZeros(TypeSize) +
   1364                                    LocalKnownZero.countTrailingOnes()));
   1365       }
   1366     }
   1367 
   1368     KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ);
   1369     break;
   1370   }
   1371   case Instruction::PHI: {
   1372     PHINode *P = cast<PHINode>(I);
   1373     // Handle the case of a simple two-predecessor recurrence PHI.
   1374     // There's a lot more that could theoretically be done here, but
   1375     // this is sufficient to catch some interesting cases.
   1376     if (P->getNumIncomingValues() == 2) {
   1377       for (unsigned i = 0; i != 2; ++i) {
   1378         Value *L = P->getIncomingValue(i);
   1379         Value *R = P->getIncomingValue(!i);
   1380         Operator *LU = dyn_cast<Operator>(L);
   1381         if (!LU)
   1382           continue;
   1383         unsigned Opcode = LU->getOpcode();
   1384         // Check for operations that have the property that if
   1385         // both their operands have low zero bits, the result
   1386         // will have low zero bits.
   1387         if (Opcode == Instruction::Add ||
   1388             Opcode == Instruction::Sub ||
   1389             Opcode == Instruction::And ||
   1390             Opcode == Instruction::Or ||
   1391             Opcode == Instruction::Mul) {
   1392           Value *LL = LU->getOperand(0);
   1393           Value *LR = LU->getOperand(1);
   1394           // Find a recurrence.
   1395           if (LL == I)
   1396             L = LR;
   1397           else if (LR == I)
   1398             L = LL;
   1399           else
   1400             break;
   1401           // Ok, we have a PHI of the form L op= R. Check for low
   1402           // zero bits.
   1403           computeKnownBits(R, KnownZero2, KnownOne2, DL, Depth + 1, Q);
   1404 
   1405           // We need to take the minimum number of known bits
   1406           APInt KnownZero3(KnownZero), KnownOne3(KnownOne);
   1407           computeKnownBits(L, KnownZero3, KnownOne3, DL, Depth + 1, Q);
   1408 
   1409           KnownZero = APInt::getLowBitsSet(BitWidth,
   1410                                            std::min(KnownZero2.countTrailingOnes(),
   1411                                                     KnownZero3.countTrailingOnes()));
   1412           break;
   1413         }
   1414       }
   1415     }
   1416 
   1417     // Unreachable blocks may have zero-operand PHI nodes.
   1418     if (P->getNumIncomingValues() == 0)
   1419       break;
   1420 
   1421     // Otherwise take the unions of the known bit sets of the operands,
   1422     // taking conservative care to avoid excessive recursion.
   1423     if (Depth < MaxDepth - 1 && !KnownZero && !KnownOne) {
   1424       // Skip if every incoming value references to ourself.
   1425       if (dyn_cast_or_null<UndefValue>(P->hasConstantValue()))
   1426         break;
   1427 
   1428       KnownZero = APInt::getAllOnesValue(BitWidth);
   1429       KnownOne = APInt::getAllOnesValue(BitWidth);
   1430       for (unsigned i = 0, e = P->getNumIncomingValues(); i != e; ++i) {
   1431         // Skip direct self references.
   1432         if (P->getIncomingValue(i) == P) continue;
   1433 
   1434         KnownZero2 = APInt(BitWidth, 0);
   1435         KnownOne2 = APInt(BitWidth, 0);
   1436         // Recurse, but cap the recursion to one level, because we don't
   1437         // want to waste time spinning around in loops.
   1438         computeKnownBits(P->getIncomingValue(i), KnownZero2, KnownOne2, DL,
   1439                          MaxDepth - 1, Q);
   1440         KnownZero &= KnownZero2;
   1441         KnownOne &= KnownOne2;
   1442         // If all bits have been ruled out, there's no need to check
   1443         // more operands.
   1444         if (!KnownZero && !KnownOne)
   1445           break;
   1446       }
   1447     }
   1448     break;
   1449   }
   1450   case Instruction::Call:
   1451   case Instruction::Invoke:
   1452     if (MDNode *MD = cast<Instruction>(I)->getMetadata(LLVMContext::MD_range))
   1453       computeKnownBitsFromRangeMetadata(*MD, KnownZero);
   1454     // If a range metadata is attached to this IntrinsicInst, intersect the
   1455     // explicit range specified by the metadata and the implicit range of
   1456     // the intrinsic.
   1457     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
   1458       switch (II->getIntrinsicID()) {
   1459       default: break;
   1460       case Intrinsic::ctlz:
   1461       case Intrinsic::cttz: {
   1462         unsigned LowBits = Log2_32(BitWidth)+1;
   1463         // If this call is undefined for 0, the result will be less than 2^n.
   1464         if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
   1465           LowBits -= 1;
   1466         KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - LowBits);
   1467         break;
   1468       }
   1469       case Intrinsic::ctpop: {
   1470         unsigned LowBits = Log2_32(BitWidth)+1;
   1471         KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - LowBits);
   1472         break;
   1473       }
   1474       case Intrinsic::x86_sse42_crc32_64_64:
   1475         KnownZero |= APInt::getHighBitsSet(64, 32);
   1476         break;
   1477       }
   1478     }
   1479     break;
   1480   case Instruction::ExtractValue:
   1481     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) {
   1482       ExtractValueInst *EVI = cast<ExtractValueInst>(I);
   1483       if (EVI->getNumIndices() != 1) break;
   1484       if (EVI->getIndices()[0] == 0) {
   1485         switch (II->getIntrinsicID()) {
   1486         default: break;
   1487         case Intrinsic::uadd_with_overflow:
   1488         case Intrinsic::sadd_with_overflow:
   1489           computeKnownBitsAddSub(true, II->getArgOperand(0),
   1490                                  II->getArgOperand(1), false, KnownZero,
   1491                                  KnownOne, KnownZero2, KnownOne2, DL, Depth, Q);
   1492           break;
   1493         case Intrinsic::usub_with_overflow:
   1494         case Intrinsic::ssub_with_overflow:
   1495           computeKnownBitsAddSub(false, II->getArgOperand(0),
   1496                                  II->getArgOperand(1), false, KnownZero,
   1497                                  KnownOne, KnownZero2, KnownOne2, DL, Depth, Q);
   1498           break;
   1499         case Intrinsic::umul_with_overflow:
   1500         case Intrinsic::smul_with_overflow:
   1501           computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false,
   1502                               KnownZero, KnownOne, KnownZero2, KnownOne2, DL,
   1503                               Depth, Q);
   1504           break;
   1505         }
   1506       }
   1507     }
   1508   }
   1509 
   1510   assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
   1511 }
   1512 
   1513 /// Determine whether the sign bit is known to be zero or one.
   1514 /// Convenience wrapper around computeKnownBits.
   1515 void ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne,
   1516                     const DataLayout &DL, unsigned Depth, const Query &Q) {
   1517   unsigned BitWidth = getBitWidth(V->getType(), DL);
   1518   if (!BitWidth) {
   1519     KnownZero = false;
   1520     KnownOne = false;
   1521     return;
   1522   }
   1523   APInt ZeroBits(BitWidth, 0);
   1524   APInt OneBits(BitWidth, 0);
   1525   computeKnownBits(V, ZeroBits, OneBits, DL, Depth, Q);
   1526   KnownOne = OneBits[BitWidth - 1];
   1527   KnownZero = ZeroBits[BitWidth - 1];
   1528 }
   1529 
   1530 /// Return true if the given value is known to have exactly one
   1531 /// bit set when defined. For vectors return true if every element is known to
   1532 /// be a power of two when defined. Supports values with integer or pointer
   1533 /// types and vectors of integers.
   1534 bool isKnownToBeAPowerOfTwo(Value *V, bool OrZero, unsigned Depth,
   1535                             const Query &Q, const DataLayout &DL) {
   1536   if (Constant *C = dyn_cast<Constant>(V)) {
   1537     if (C->isNullValue())
   1538       return OrZero;
   1539     if (ConstantInt *CI = dyn_cast<ConstantInt>(C))
   1540       return CI->getValue().isPowerOf2();
   1541     // TODO: Handle vector constants.
   1542   }
   1543 
   1544   // 1 << X is clearly a power of two if the one is not shifted off the end.  If
   1545   // it is shifted off the end then the result is undefined.
   1546   if (match(V, m_Shl(m_One(), m_Value())))
   1547     return true;
   1548 
   1549   // (signbit) >>l X is clearly a power of two if the one is not shifted off the
   1550   // bottom.  If it is shifted off the bottom then the result is undefined.
   1551   if (match(V, m_LShr(m_SignBit(), m_Value())))
   1552     return true;
   1553 
   1554   // The remaining tests are all recursive, so bail out if we hit the limit.
   1555   if (Depth++ == MaxDepth)
   1556     return false;
   1557 
   1558   Value *X = nullptr, *Y = nullptr;
   1559   // A shift of a power of two is a power of two or zero.
   1560   if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) ||
   1561                  match(V, m_Shr(m_Value(X), m_Value()))))
   1562     return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q, DL);
   1563 
   1564   if (ZExtInst *ZI = dyn_cast<ZExtInst>(V))
   1565     return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q, DL);
   1566 
   1567   if (SelectInst *SI = dyn_cast<SelectInst>(V))
   1568     return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q, DL) &&
   1569            isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q, DL);
   1570 
   1571   if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) {
   1572     // A power of two and'd with anything is a power of two or zero.
   1573     if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q, DL) ||
   1574         isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q, DL))
   1575       return true;
   1576     // X & (-X) is always a power of two or zero.
   1577     if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X))))
   1578       return true;
   1579     return false;
   1580   }
   1581 
   1582   // Adding a power-of-two or zero to the same power-of-two or zero yields
   1583   // either the original power-of-two, a larger power-of-two or zero.
   1584   if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
   1585     OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V);
   1586     if (OrZero || VOBO->hasNoUnsignedWrap() || VOBO->hasNoSignedWrap()) {
   1587       if (match(X, m_And(m_Specific(Y), m_Value())) ||
   1588           match(X, m_And(m_Value(), m_Specific(Y))))
   1589         if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q, DL))
   1590           return true;
   1591       if (match(Y, m_And(m_Specific(X), m_Value())) ||
   1592           match(Y, m_And(m_Value(), m_Specific(X))))
   1593         if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q, DL))
   1594           return true;
   1595 
   1596       unsigned BitWidth = V->getType()->getScalarSizeInBits();
   1597       APInt LHSZeroBits(BitWidth, 0), LHSOneBits(BitWidth, 0);
   1598       computeKnownBits(X, LHSZeroBits, LHSOneBits, DL, Depth, Q);
   1599 
   1600       APInt RHSZeroBits(BitWidth, 0), RHSOneBits(BitWidth, 0);
   1601       computeKnownBits(Y, RHSZeroBits, RHSOneBits, DL, Depth, Q);
   1602       // If i8 V is a power of two or zero:
   1603       //  ZeroBits: 1 1 1 0 1 1 1 1
   1604       // ~ZeroBits: 0 0 0 1 0 0 0 0
   1605       if ((~(LHSZeroBits & RHSZeroBits)).isPowerOf2())
   1606         // If OrZero isn't set, we cannot give back a zero result.
   1607         // Make sure either the LHS or RHS has a bit set.
   1608         if (OrZero || RHSOneBits.getBoolValue() || LHSOneBits.getBoolValue())
   1609           return true;
   1610     }
   1611   }
   1612 
   1613   // An exact divide or right shift can only shift off zero bits, so the result
   1614   // is a power of two only if the first operand is a power of two and not
   1615   // copying a sign bit (sdiv int_min, 2).
   1616   if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) ||
   1617       match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) {
   1618     return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero,
   1619                                   Depth, Q, DL);
   1620   }
   1621 
   1622   return false;
   1623 }
   1624 
   1625 /// \brief Test whether a GEP's result is known to be non-null.
   1626 ///
   1627 /// Uses properties inherent in a GEP to try to determine whether it is known
   1628 /// to be non-null.
   1629 ///
   1630 /// Currently this routine does not support vector GEPs.
   1631 static bool isGEPKnownNonNull(GEPOperator *GEP, const DataLayout &DL,
   1632                               unsigned Depth, const Query &Q) {
   1633   if (!GEP->isInBounds() || GEP->getPointerAddressSpace() != 0)
   1634     return false;
   1635 
   1636   // FIXME: Support vector-GEPs.
   1637   assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP");
   1638 
   1639   // If the base pointer is non-null, we cannot walk to a null address with an
   1640   // inbounds GEP in address space zero.
   1641   if (isKnownNonZero(GEP->getPointerOperand(), DL, Depth, Q))
   1642     return true;
   1643 
   1644   // Walk the GEP operands and see if any operand introduces a non-zero offset.
   1645   // If so, then the GEP cannot produce a null pointer, as doing so would
   1646   // inherently violate the inbounds contract within address space zero.
   1647   for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
   1648        GTI != GTE; ++GTI) {
   1649     // Struct types are easy -- they must always be indexed by a constant.
   1650     if (StructType *STy = dyn_cast<StructType>(*GTI)) {
   1651       ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand());
   1652       unsigned ElementIdx = OpC->getZExtValue();
   1653       const StructLayout *SL = DL.getStructLayout(STy);
   1654       uint64_t ElementOffset = SL->getElementOffset(ElementIdx);
   1655       if (ElementOffset > 0)
   1656         return true;
   1657       continue;
   1658     }
   1659 
   1660     // If we have a zero-sized type, the index doesn't matter. Keep looping.
   1661     if (DL.getTypeAllocSize(GTI.getIndexedType()) == 0)
   1662       continue;
   1663 
   1664     // Fast path the constant operand case both for efficiency and so we don't
   1665     // increment Depth when just zipping down an all-constant GEP.
   1666     if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) {
   1667       if (!OpC->isZero())
   1668         return true;
   1669       continue;
   1670     }
   1671 
   1672     // We post-increment Depth here because while isKnownNonZero increments it
   1673     // as well, when we pop back up that increment won't persist. We don't want
   1674     // to recurse 10k times just because we have 10k GEP operands. We don't
   1675     // bail completely out because we want to handle constant GEPs regardless
   1676     // of depth.
   1677     if (Depth++ >= MaxDepth)
   1678       continue;
   1679 
   1680     if (isKnownNonZero(GTI.getOperand(), DL, Depth, Q))
   1681       return true;
   1682   }
   1683 
   1684   return false;
   1685 }
   1686 
   1687 /// Does the 'Range' metadata (which must be a valid MD_range operand list)
   1688 /// ensure that the value it's attached to is never Value?  'RangeType' is
   1689 /// is the type of the value described by the range.
   1690 static bool rangeMetadataExcludesValue(MDNode* Ranges,
   1691                                        const APInt& Value) {
   1692   const unsigned NumRanges = Ranges->getNumOperands() / 2;
   1693   assert(NumRanges >= 1);
   1694   for (unsigned i = 0; i < NumRanges; ++i) {
   1695     ConstantInt *Lower =
   1696         mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0));
   1697     ConstantInt *Upper =
   1698         mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1));
   1699     ConstantRange Range(Lower->getValue(), Upper->getValue());
   1700     if (Range.contains(Value))
   1701       return false;
   1702   }
   1703   return true;
   1704 }
   1705 
   1706 /// Return true if the given value is known to be non-zero when defined.
   1707 /// For vectors return true if every element is known to be non-zero when
   1708 /// defined. Supports values with integer or pointer type and vectors of
   1709 /// integers.
   1710 bool isKnownNonZero(Value *V, const DataLayout &DL, unsigned Depth,
   1711                     const Query &Q) {
   1712   if (Constant *C = dyn_cast<Constant>(V)) {
   1713     if (C->isNullValue())
   1714       return false;
   1715     if (isa<ConstantInt>(C))
   1716       // Must be non-zero due to null test above.
   1717       return true;
   1718     // TODO: Handle vectors
   1719     return false;
   1720   }
   1721 
   1722   if (Instruction* I = dyn_cast<Instruction>(V)) {
   1723     if (MDNode *Ranges = I->getMetadata(LLVMContext::MD_range)) {
   1724       // If the possible ranges don't contain zero, then the value is
   1725       // definitely non-zero.
   1726       if (IntegerType* Ty = dyn_cast<IntegerType>(V->getType())) {
   1727         const APInt ZeroValue(Ty->getBitWidth(), 0);
   1728         if (rangeMetadataExcludesValue(Ranges, ZeroValue))
   1729           return true;
   1730       }
   1731     }
   1732   }
   1733 
   1734   // The remaining tests are all recursive, so bail out if we hit the limit.
   1735   if (Depth++ >= MaxDepth)
   1736     return false;
   1737 
   1738   // Check for pointer simplifications.
   1739   if (V->getType()->isPointerTy()) {
   1740     if (isKnownNonNull(V))
   1741       return true;
   1742     if (GEPOperator *GEP = dyn_cast<GEPOperator>(V))
   1743       if (isGEPKnownNonNull(GEP, DL, Depth, Q))
   1744         return true;
   1745   }
   1746 
   1747   unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), DL);
   1748 
   1749   // X | Y != 0 if X != 0 or Y != 0.
   1750   Value *X = nullptr, *Y = nullptr;
   1751   if (match(V, m_Or(m_Value(X), m_Value(Y))))
   1752     return isKnownNonZero(X, DL, Depth, Q) || isKnownNonZero(Y, DL, Depth, Q);
   1753 
   1754   // ext X != 0 if X != 0.
   1755   if (isa<SExtInst>(V) || isa<ZExtInst>(V))
   1756     return isKnownNonZero(cast<Instruction>(V)->getOperand(0), DL, Depth, Q);
   1757 
   1758   // shl X, Y != 0 if X is odd.  Note that the value of the shift is undefined
   1759   // if the lowest bit is shifted off the end.
   1760   if (BitWidth && match(V, m_Shl(m_Value(X), m_Value(Y)))) {
   1761     // shl nuw can't remove any non-zero bits.
   1762     OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
   1763     if (BO->hasNoUnsignedWrap())
   1764       return isKnownNonZero(X, DL, Depth, Q);
   1765 
   1766     APInt KnownZero(BitWidth, 0);
   1767     APInt KnownOne(BitWidth, 0);
   1768     computeKnownBits(X, KnownZero, KnownOne, DL, Depth, Q);
   1769     if (KnownOne[0])
   1770       return true;
   1771   }
   1772   // shr X, Y != 0 if X is negative.  Note that the value of the shift is not
   1773   // defined if the sign bit is shifted off the end.
   1774   else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) {
   1775     // shr exact can only shift out zero bits.
   1776     PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V);
   1777     if (BO->isExact())
   1778       return isKnownNonZero(X, DL, Depth, Q);
   1779 
   1780     bool XKnownNonNegative, XKnownNegative;
   1781     ComputeSignBit(X, XKnownNonNegative, XKnownNegative, DL, Depth, Q);
   1782     if (XKnownNegative)
   1783       return true;
   1784   }
   1785   // div exact can only produce a zero if the dividend is zero.
   1786   else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) {
   1787     return isKnownNonZero(X, DL, Depth, Q);
   1788   }
   1789   // X + Y.
   1790   else if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
   1791     bool XKnownNonNegative, XKnownNegative;
   1792     bool YKnownNonNegative, YKnownNegative;
   1793     ComputeSignBit(X, XKnownNonNegative, XKnownNegative, DL, Depth, Q);
   1794     ComputeSignBit(Y, YKnownNonNegative, YKnownNegative, DL, Depth, Q);
   1795 
   1796     // If X and Y are both non-negative (as signed values) then their sum is not
   1797     // zero unless both X and Y are zero.
   1798     if (XKnownNonNegative && YKnownNonNegative)
   1799       if (isKnownNonZero(X, DL, Depth, Q) || isKnownNonZero(Y, DL, Depth, Q))
   1800         return true;
   1801 
   1802     // If X and Y are both negative (as signed values) then their sum is not
   1803     // zero unless both X and Y equal INT_MIN.
   1804     if (BitWidth && XKnownNegative && YKnownNegative) {
   1805       APInt KnownZero(BitWidth, 0);
   1806       APInt KnownOne(BitWidth, 0);
   1807       APInt Mask = APInt::getSignedMaxValue(BitWidth);
   1808       // The sign bit of X is set.  If some other bit is set then X is not equal
   1809       // to INT_MIN.
   1810       computeKnownBits(X, KnownZero, KnownOne, DL, Depth, Q);
   1811       if ((KnownOne & Mask) != 0)
   1812         return true;
   1813       // The sign bit of Y is set.  If some other bit is set then Y is not equal
   1814       // to INT_MIN.
   1815       computeKnownBits(Y, KnownZero, KnownOne, DL, Depth, Q);
   1816       if ((KnownOne & Mask) != 0)
   1817         return true;
   1818     }
   1819 
   1820     // The sum of a non-negative number and a power of two is not zero.
   1821     if (XKnownNonNegative &&
   1822         isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q, DL))
   1823       return true;
   1824     if (YKnownNonNegative &&
   1825         isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q, DL))
   1826       return true;
   1827   }
   1828   // X * Y.
   1829   else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) {
   1830     OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
   1831     // If X and Y are non-zero then so is X * Y as long as the multiplication
   1832     // does not overflow.
   1833     if ((BO->hasNoSignedWrap() || BO->hasNoUnsignedWrap()) &&
   1834         isKnownNonZero(X, DL, Depth, Q) && isKnownNonZero(Y, DL, Depth, Q))
   1835       return true;
   1836   }
   1837   // (C ? X : Y) != 0 if X != 0 and Y != 0.
   1838   else if (SelectInst *SI = dyn_cast<SelectInst>(V)) {
   1839     if (isKnownNonZero(SI->getTrueValue(), DL, Depth, Q) &&
   1840         isKnownNonZero(SI->getFalseValue(), DL, Depth, Q))
   1841       return true;
   1842   }
   1843 
   1844   if (!BitWidth) return false;
   1845   APInt KnownZero(BitWidth, 0);
   1846   APInt KnownOne(BitWidth, 0);
   1847   computeKnownBits(V, KnownZero, KnownOne, DL, Depth, Q);
   1848   return KnownOne != 0;
   1849 }
   1850 
   1851 /// Return true if 'V & Mask' is known to be zero.  We use this predicate to
   1852 /// simplify operations downstream. Mask is known to be zero for bits that V
   1853 /// cannot have.
   1854 ///
   1855 /// This function is defined on values with integer type, values with pointer
   1856 /// type, and vectors of integers.  In the case
   1857 /// where V is a vector, the mask, known zero, and known one values are the
   1858 /// same width as the vector element, and the bit is set only if it is true
   1859 /// for all of the elements in the vector.
   1860 bool MaskedValueIsZero(Value *V, const APInt &Mask, const DataLayout &DL,
   1861                        unsigned Depth, const Query &Q) {
   1862   APInt KnownZero(Mask.getBitWidth(), 0), KnownOne(Mask.getBitWidth(), 0);
   1863   computeKnownBits(V, KnownZero, KnownOne, DL, Depth, Q);
   1864   return (KnownZero & Mask) == Mask;
   1865 }
   1866 
   1867 
   1868 
   1869 /// Return the number of times the sign bit of the register is replicated into
   1870 /// the other bits. We know that at least 1 bit is always equal to the sign bit
   1871 /// (itself), but other cases can give us information. For example, immediately
   1872 /// after an "ashr X, 2", we know that the top 3 bits are all equal to each
   1873 /// other, so we return 3.
   1874 ///
   1875 /// 'Op' must have a scalar integer type.
   1876 ///
   1877 unsigned ComputeNumSignBits(Value *V, const DataLayout &DL, unsigned Depth,
   1878                             const Query &Q) {
   1879   unsigned TyBits = DL.getTypeSizeInBits(V->getType()->getScalarType());
   1880   unsigned Tmp, Tmp2;
   1881   unsigned FirstAnswer = 1;
   1882 
   1883   // Note that ConstantInt is handled by the general computeKnownBits case
   1884   // below.
   1885 
   1886   if (Depth == 6)
   1887     return 1;  // Limit search depth.
   1888 
   1889   Operator *U = dyn_cast<Operator>(V);
   1890   switch (Operator::getOpcode(V)) {
   1891   default: break;
   1892   case Instruction::SExt:
   1893     Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
   1894     return ComputeNumSignBits(U->getOperand(0), DL, Depth + 1, Q) + Tmp;
   1895 
   1896   case Instruction::SDiv: {
   1897     const APInt *Denominator;
   1898     // sdiv X, C -> adds log(C) sign bits.
   1899     if (match(U->getOperand(1), m_APInt(Denominator))) {
   1900 
   1901       // Ignore non-positive denominator.
   1902       if (!Denominator->isStrictlyPositive())
   1903         break;
   1904 
   1905       // Calculate the incoming numerator bits.
   1906       unsigned NumBits = ComputeNumSignBits(U->getOperand(0), DL, Depth + 1, Q);
   1907 
   1908       // Add floor(log(C)) bits to the numerator bits.
   1909       return std::min(TyBits, NumBits + Denominator->logBase2());
   1910     }
   1911     break;
   1912   }
   1913 
   1914   case Instruction::SRem: {
   1915     const APInt *Denominator;
   1916     // srem X, C -> we know that the result is within [-C+1,C) when C is a
   1917     // positive constant.  This let us put a lower bound on the number of sign
   1918     // bits.
   1919     if (match(U->getOperand(1), m_APInt(Denominator))) {
   1920 
   1921       // Ignore non-positive denominator.
   1922       if (!Denominator->isStrictlyPositive())
   1923         break;
   1924 
   1925       // Calculate the incoming numerator bits. SRem by a positive constant
   1926       // can't lower the number of sign bits.
   1927       unsigned NumrBits =
   1928           ComputeNumSignBits(U->getOperand(0), DL, Depth + 1, Q);
   1929 
   1930       // Calculate the leading sign bit constraints by examining the
   1931       // denominator.  Given that the denominator is positive, there are two
   1932       // cases:
   1933       //
   1934       //  1. the numerator is positive.  The result range is [0,C) and [0,C) u<
   1935       //     (1 << ceilLogBase2(C)).
   1936       //
   1937       //  2. the numerator is negative.  Then the result range is (-C,0] and
   1938       //     integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)).
   1939       //
   1940       // Thus a lower bound on the number of sign bits is `TyBits -
   1941       // ceilLogBase2(C)`.
   1942 
   1943       unsigned ResBits = TyBits - Denominator->ceilLogBase2();
   1944       return std::max(NumrBits, ResBits);
   1945     }
   1946     break;
   1947   }
   1948 
   1949   case Instruction::AShr: {
   1950     Tmp = ComputeNumSignBits(U->getOperand(0), DL, Depth + 1, Q);
   1951     // ashr X, C   -> adds C sign bits.  Vectors too.
   1952     const APInt *ShAmt;
   1953     if (match(U->getOperand(1), m_APInt(ShAmt))) {
   1954       Tmp += ShAmt->getZExtValue();
   1955       if (Tmp > TyBits) Tmp = TyBits;
   1956     }
   1957     return Tmp;
   1958   }
   1959   case Instruction::Shl: {
   1960     const APInt *ShAmt;
   1961     if (match(U->getOperand(1), m_APInt(ShAmt))) {
   1962       // shl destroys sign bits.
   1963       Tmp = ComputeNumSignBits(U->getOperand(0), DL, Depth + 1, Q);
   1964       Tmp2 = ShAmt->getZExtValue();
   1965       if (Tmp2 >= TyBits ||      // Bad shift.
   1966           Tmp2 >= Tmp) break;    // Shifted all sign bits out.
   1967       return Tmp - Tmp2;
   1968     }
   1969     break;
   1970   }
   1971   case Instruction::And:
   1972   case Instruction::Or:
   1973   case Instruction::Xor:    // NOT is handled here.
   1974     // Logical binary ops preserve the number of sign bits at the worst.
   1975     Tmp = ComputeNumSignBits(U->getOperand(0), DL, Depth + 1, Q);
   1976     if (Tmp != 1) {
   1977       Tmp2 = ComputeNumSignBits(U->getOperand(1), DL, Depth + 1, Q);
   1978       FirstAnswer = std::min(Tmp, Tmp2);
   1979       // We computed what we know about the sign bits as our first
   1980       // answer. Now proceed to the generic code that uses
   1981       // computeKnownBits, and pick whichever answer is better.
   1982     }
   1983     break;
   1984 
   1985   case Instruction::Select:
   1986     Tmp = ComputeNumSignBits(U->getOperand(1), DL, Depth + 1, Q);
   1987     if (Tmp == 1) return 1;  // Early out.
   1988     Tmp2 = ComputeNumSignBits(U->getOperand(2), DL, Depth + 1, Q);
   1989     return std::min(Tmp, Tmp2);
   1990 
   1991   case Instruction::Add:
   1992     // Add can have at most one carry bit.  Thus we know that the output
   1993     // is, at worst, one more bit than the inputs.
   1994     Tmp = ComputeNumSignBits(U->getOperand(0), DL, Depth + 1, Q);
   1995     if (Tmp == 1) return 1;  // Early out.
   1996 
   1997     // Special case decrementing a value (ADD X, -1):
   1998     if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1)))
   1999       if (CRHS->isAllOnesValue()) {
   2000         APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0);
   2001         computeKnownBits(U->getOperand(0), KnownZero, KnownOne, DL, Depth + 1,
   2002                          Q);
   2003 
   2004         // If the input is known to be 0 or 1, the output is 0/-1, which is all
   2005         // sign bits set.
   2006         if ((KnownZero | APInt(TyBits, 1)).isAllOnesValue())
   2007           return TyBits;
   2008 
   2009         // If we are subtracting one from a positive number, there is no carry
   2010         // out of the result.
   2011         if (KnownZero.isNegative())
   2012           return Tmp;
   2013       }
   2014 
   2015     Tmp2 = ComputeNumSignBits(U->getOperand(1), DL, Depth + 1, Q);
   2016     if (Tmp2 == 1) return 1;
   2017     return std::min(Tmp, Tmp2)-1;
   2018 
   2019   case Instruction::Sub:
   2020     Tmp2 = ComputeNumSignBits(U->getOperand(1), DL, Depth + 1, Q);
   2021     if (Tmp2 == 1) return 1;
   2022 
   2023     // Handle NEG.
   2024     if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0)))
   2025       if (CLHS->isNullValue()) {
   2026         APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0);
   2027         computeKnownBits(U->getOperand(1), KnownZero, KnownOne, DL, Depth + 1,
   2028                          Q);
   2029         // If the input is known to be 0 or 1, the output is 0/-1, which is all
   2030         // sign bits set.
   2031         if ((KnownZero | APInt(TyBits, 1)).isAllOnesValue())
   2032           return TyBits;
   2033 
   2034         // If the input is known to be positive (the sign bit is known clear),
   2035         // the output of the NEG has the same number of sign bits as the input.
   2036         if (KnownZero.isNegative())
   2037           return Tmp2;
   2038 
   2039         // Otherwise, we treat this like a SUB.
   2040       }
   2041 
   2042     // Sub can have at most one carry bit.  Thus we know that the output
   2043     // is, at worst, one more bit than the inputs.
   2044     Tmp = ComputeNumSignBits(U->getOperand(0), DL, Depth + 1, Q);
   2045     if (Tmp == 1) return 1;  // Early out.
   2046     return std::min(Tmp, Tmp2)-1;
   2047 
   2048   case Instruction::PHI: {
   2049     PHINode *PN = cast<PHINode>(U);
   2050     unsigned NumIncomingValues = PN->getNumIncomingValues();
   2051     // Don't analyze large in-degree PHIs.
   2052     if (NumIncomingValues > 4) break;
   2053     // Unreachable blocks may have zero-operand PHI nodes.
   2054     if (NumIncomingValues == 0) break;
   2055 
   2056     // Take the minimum of all incoming values.  This can't infinitely loop
   2057     // because of our depth threshold.
   2058     Tmp = ComputeNumSignBits(PN->getIncomingValue(0), DL, Depth + 1, Q);
   2059     for (unsigned i = 1, e = NumIncomingValues; i != e; ++i) {
   2060       if (Tmp == 1) return Tmp;
   2061       Tmp = std::min(
   2062           Tmp, ComputeNumSignBits(PN->getIncomingValue(i), DL, Depth + 1, Q));
   2063     }
   2064     return Tmp;
   2065   }
   2066 
   2067   case Instruction::Trunc:
   2068     // FIXME: it's tricky to do anything useful for this, but it is an important
   2069     // case for targets like X86.
   2070     break;
   2071   }
   2072 
   2073   // Finally, if we can prove that the top bits of the result are 0's or 1's,
   2074   // use this information.
   2075   APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0);
   2076   APInt Mask;
   2077   computeKnownBits(V, KnownZero, KnownOne, DL, Depth, Q);
   2078 
   2079   if (KnownZero.isNegative()) {        // sign bit is 0
   2080     Mask = KnownZero;
   2081   } else if (KnownOne.isNegative()) {  // sign bit is 1;
   2082     Mask = KnownOne;
   2083   } else {
   2084     // Nothing known.
   2085     return FirstAnswer;
   2086   }
   2087 
   2088   // Okay, we know that the sign bit in Mask is set.  Use CLZ to determine
   2089   // the number of identical bits in the top of the input value.
   2090   Mask = ~Mask;
   2091   Mask <<= Mask.getBitWidth()-TyBits;
   2092   // Return # leading zeros.  We use 'min' here in case Val was zero before
   2093   // shifting.  We don't want to return '64' as for an i32 "0".
   2094   return std::max(FirstAnswer, std::min(TyBits, Mask.countLeadingZeros()));
   2095 }
   2096 
   2097 /// This function computes the integer multiple of Base that equals V.
   2098 /// If successful, it returns true and returns the multiple in
   2099 /// Multiple. If unsuccessful, it returns false. It looks
   2100 /// through SExt instructions only if LookThroughSExt is true.
   2101 bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
   2102                            bool LookThroughSExt, unsigned Depth) {
   2103   const unsigned MaxDepth = 6;
   2104 
   2105   assert(V && "No Value?");
   2106   assert(Depth <= MaxDepth && "Limit Search Depth");
   2107   assert(V->getType()->isIntegerTy() && "Not integer or pointer type!");
   2108 
   2109   Type *T = V->getType();
   2110 
   2111   ConstantInt *CI = dyn_cast<ConstantInt>(V);
   2112 
   2113   if (Base == 0)
   2114     return false;
   2115 
   2116   if (Base == 1) {
   2117     Multiple = V;
   2118     return true;
   2119   }
   2120 
   2121   ConstantExpr *CO = dyn_cast<ConstantExpr>(V);
   2122   Constant *BaseVal = ConstantInt::get(T, Base);
   2123   if (CO && CO == BaseVal) {
   2124     // Multiple is 1.
   2125     Multiple = ConstantInt::get(T, 1);
   2126     return true;
   2127   }
   2128 
   2129   if (CI && CI->getZExtValue() % Base == 0) {
   2130     Multiple = ConstantInt::get(T, CI->getZExtValue() / Base);
   2131     return true;
   2132   }
   2133 
   2134   if (Depth == MaxDepth) return false;  // Limit search depth.
   2135 
   2136   Operator *I = dyn_cast<Operator>(V);
   2137   if (!I) return false;
   2138 
   2139   switch (I->getOpcode()) {
   2140   default: break;
   2141   case Instruction::SExt:
   2142     if (!LookThroughSExt) return false;
   2143     // otherwise fall through to ZExt
   2144   case Instruction::ZExt:
   2145     return ComputeMultiple(I->getOperand(0), Base, Multiple,
   2146                            LookThroughSExt, Depth+1);
   2147   case Instruction::Shl:
   2148   case Instruction::Mul: {
   2149     Value *Op0 = I->getOperand(0);
   2150     Value *Op1 = I->getOperand(1);
   2151 
   2152     if (I->getOpcode() == Instruction::Shl) {
   2153       ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1);
   2154       if (!Op1CI) return false;
   2155       // Turn Op0 << Op1 into Op0 * 2^Op1
   2156       APInt Op1Int = Op1CI->getValue();
   2157       uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1);
   2158       APInt API(Op1Int.getBitWidth(), 0);
   2159       API.setBit(BitToSet);
   2160       Op1 = ConstantInt::get(V->getContext(), API);
   2161     }
   2162 
   2163     Value *Mul0 = nullptr;
   2164     if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) {
   2165       if (Constant *Op1C = dyn_cast<Constant>(Op1))
   2166         if (Constant *MulC = dyn_cast<Constant>(Mul0)) {
   2167           if (Op1C->getType()->getPrimitiveSizeInBits() <
   2168               MulC->getType()->getPrimitiveSizeInBits())
   2169             Op1C = ConstantExpr::getZExt(Op1C, MulC->getType());
   2170           if (Op1C->getType()->getPrimitiveSizeInBits() >
   2171               MulC->getType()->getPrimitiveSizeInBits())
   2172             MulC = ConstantExpr::getZExt(MulC, Op1C->getType());
   2173 
   2174           // V == Base * (Mul0 * Op1), so return (Mul0 * Op1)
   2175           Multiple = ConstantExpr::getMul(MulC, Op1C);
   2176           return true;
   2177         }
   2178 
   2179       if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0))
   2180         if (Mul0CI->getValue() == 1) {
   2181           // V == Base * Op1, so return Op1
   2182           Multiple = Op1;
   2183           return true;
   2184         }
   2185     }
   2186 
   2187     Value *Mul1 = nullptr;
   2188     if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) {
   2189       if (Constant *Op0C = dyn_cast<Constant>(Op0))
   2190         if (Constant *MulC = dyn_cast<Constant>(Mul1)) {
   2191           if (Op0C->getType()->getPrimitiveSizeInBits() <
   2192               MulC->getType()->getPrimitiveSizeInBits())
   2193             Op0C = ConstantExpr::getZExt(Op0C, MulC->getType());
   2194           if (Op0C->getType()->getPrimitiveSizeInBits() >
   2195               MulC->getType()->getPrimitiveSizeInBits())
   2196             MulC = ConstantExpr::getZExt(MulC, Op0C->getType());
   2197 
   2198           // V == Base * (Mul1 * Op0), so return (Mul1 * Op0)
   2199           Multiple = ConstantExpr::getMul(MulC, Op0C);
   2200           return true;
   2201         }
   2202 
   2203       if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1))
   2204         if (Mul1CI->getValue() == 1) {
   2205           // V == Base * Op0, so return Op0
   2206           Multiple = Op0;
   2207           return true;
   2208         }
   2209     }
   2210   }
   2211   }
   2212 
   2213   // We could not determine if V is a multiple of Base.
   2214   return false;
   2215 }
   2216 
   2217 /// Return true if we can prove that the specified FP value is never equal to
   2218 /// -0.0.
   2219 ///
   2220 /// NOTE: this function will need to be revisited when we support non-default
   2221 /// rounding modes!
   2222 ///
   2223 bool llvm::CannotBeNegativeZero(const Value *V, unsigned Depth) {
   2224   if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V))
   2225     return !CFP->getValueAPF().isNegZero();
   2226 
   2227   // FIXME: Magic number! At the least, this should be given a name because it's
   2228   // used similarly in CannotBeOrderedLessThanZero(). A better fix may be to
   2229   // expose it as a parameter, so it can be used for testing / experimenting.
   2230   if (Depth == 6)
   2231     return false;  // Limit search depth.
   2232 
   2233   const Operator *I = dyn_cast<Operator>(V);
   2234   if (!I) return false;
   2235 
   2236   // Check if the nsz fast-math flag is set
   2237   if (const FPMathOperator *FPO = dyn_cast<FPMathOperator>(I))
   2238     if (FPO->hasNoSignedZeros())
   2239       return true;
   2240 
   2241   // (add x, 0.0) is guaranteed to return +0.0, not -0.0.
   2242   if (I->getOpcode() == Instruction::FAdd)
   2243     if (ConstantFP *CFP = dyn_cast<ConstantFP>(I->getOperand(1)))
   2244       if (CFP->isNullValue())
   2245         return true;
   2246 
   2247   // sitofp and uitofp turn into +0.0 for zero.
   2248   if (isa<SIToFPInst>(I) || isa<UIToFPInst>(I))
   2249     return true;
   2250 
   2251   if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
   2252     // sqrt(-0.0) = -0.0, no other negative results are possible.
   2253     if (II->getIntrinsicID() == Intrinsic::sqrt)
   2254       return CannotBeNegativeZero(II->getArgOperand(0), Depth+1);
   2255 
   2256   if (const CallInst *CI = dyn_cast<CallInst>(I))
   2257     if (const Function *F = CI->getCalledFunction()) {
   2258       if (F->isDeclaration()) {
   2259         // abs(x) != -0.0
   2260         if (F->getName() == "abs") return true;
   2261         // fabs[lf](x) != -0.0
   2262         if (F->getName() == "fabs") return true;
   2263         if (F->getName() == "fabsf") return true;
   2264         if (F->getName() == "fabsl") return true;
   2265         if (F->getName() == "sqrt" || F->getName() == "sqrtf" ||
   2266             F->getName() == "sqrtl")
   2267           return CannotBeNegativeZero(CI->getArgOperand(0), Depth+1);
   2268       }
   2269     }
   2270 
   2271   return false;
   2272 }
   2273 
   2274 bool llvm::CannotBeOrderedLessThanZero(const Value *V, unsigned Depth) {
   2275   if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V))
   2276     return !CFP->getValueAPF().isNegative() || CFP->getValueAPF().isZero();
   2277 
   2278   // FIXME: Magic number! At the least, this should be given a name because it's
   2279   // used similarly in CannotBeNegativeZero(). A better fix may be to
   2280   // expose it as a parameter, so it can be used for testing / experimenting.
   2281   if (Depth == 6)
   2282     return false;  // Limit search depth.
   2283 
   2284   const Operator *I = dyn_cast<Operator>(V);
   2285   if (!I) return false;
   2286 
   2287   switch (I->getOpcode()) {
   2288   default: break;
   2289   case Instruction::FMul:
   2290     // x*x is always non-negative or a NaN.
   2291     if (I->getOperand(0) == I->getOperand(1))
   2292       return true;
   2293     // Fall through
   2294   case Instruction::FAdd:
   2295   case Instruction::FDiv:
   2296   case Instruction::FRem:
   2297     return CannotBeOrderedLessThanZero(I->getOperand(0), Depth+1) &&
   2298            CannotBeOrderedLessThanZero(I->getOperand(1), Depth+1);
   2299   case Instruction::FPExt:
   2300   case Instruction::FPTrunc:
   2301     // Widening/narrowing never change sign.
   2302     return CannotBeOrderedLessThanZero(I->getOperand(0), Depth+1);
   2303   case Instruction::Call:
   2304     if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
   2305       switch (II->getIntrinsicID()) {
   2306       default: break;
   2307       case Intrinsic::exp:
   2308       case Intrinsic::exp2:
   2309       case Intrinsic::fabs:
   2310       case Intrinsic::sqrt:
   2311         return true;
   2312       case Intrinsic::powi:
   2313         if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
   2314           // powi(x,n) is non-negative if n is even.
   2315           if (CI->getBitWidth() <= 64 && CI->getSExtValue() % 2u == 0)
   2316             return true;
   2317         }
   2318         return CannotBeOrderedLessThanZero(I->getOperand(0), Depth+1);
   2319       case Intrinsic::fma:
   2320       case Intrinsic::fmuladd:
   2321         // x*x+y is non-negative if y is non-negative.
   2322         return I->getOperand(0) == I->getOperand(1) &&
   2323                CannotBeOrderedLessThanZero(I->getOperand(2), Depth+1);
   2324       }
   2325     break;
   2326   }
   2327   return false;
   2328 }
   2329 
   2330 /// If the specified value can be set by repeating the same byte in memory,
   2331 /// return the i8 value that it is represented with.  This is
   2332 /// true for all i8 values obviously, but is also true for i32 0, i32 -1,
   2333 /// i16 0xF0F0, double 0.0 etc.  If the value can't be handled with a repeated
   2334 /// byte store (e.g. i16 0x1234), return null.
   2335 Value *llvm::isBytewiseValue(Value *V) {
   2336   // All byte-wide stores are splatable, even of arbitrary variables.
   2337   if (V->getType()->isIntegerTy(8)) return V;
   2338 
   2339   // Handle 'null' ConstantArrayZero etc.
   2340   if (Constant *C = dyn_cast<Constant>(V))
   2341     if (C->isNullValue())
   2342       return Constant::getNullValue(Type::getInt8Ty(V->getContext()));
   2343 
   2344   // Constant float and double values can be handled as integer values if the
   2345   // corresponding integer value is "byteable".  An important case is 0.0.
   2346   if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
   2347     if (CFP->getType()->isFloatTy())
   2348       V = ConstantExpr::getBitCast(CFP, Type::getInt32Ty(V->getContext()));
   2349     if (CFP->getType()->isDoubleTy())
   2350       V = ConstantExpr::getBitCast(CFP, Type::getInt64Ty(V->getContext()));
   2351     // Don't handle long double formats, which have strange constraints.
   2352   }
   2353 
   2354   // We can handle constant integers that are multiple of 8 bits.
   2355   if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
   2356     if (CI->getBitWidth() % 8 == 0) {
   2357       assert(CI->getBitWidth() > 8 && "8 bits should be handled above!");
   2358 
   2359       if (!CI->getValue().isSplat(8))
   2360         return nullptr;
   2361       return ConstantInt::get(V->getContext(), CI->getValue().trunc(8));
   2362     }
   2363   }
   2364 
   2365   // A ConstantDataArray/Vector is splatable if all its members are equal and
   2366   // also splatable.
   2367   if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(V)) {
   2368     Value *Elt = CA->getElementAsConstant(0);
   2369     Value *Val = isBytewiseValue(Elt);
   2370     if (!Val)
   2371       return nullptr;
   2372 
   2373     for (unsigned I = 1, E = CA->getNumElements(); I != E; ++I)
   2374       if (CA->getElementAsConstant(I) != Elt)
   2375         return nullptr;
   2376 
   2377     return Val;
   2378   }
   2379 
   2380   // Conceptually, we could handle things like:
   2381   //   %a = zext i8 %X to i16
   2382   //   %b = shl i16 %a, 8
   2383   //   %c = or i16 %a, %b
   2384   // but until there is an example that actually needs this, it doesn't seem
   2385   // worth worrying about.
   2386   return nullptr;
   2387 }
   2388 
   2389 
   2390 // This is the recursive version of BuildSubAggregate. It takes a few different
   2391 // arguments. Idxs is the index within the nested struct From that we are
   2392 // looking at now (which is of type IndexedType). IdxSkip is the number of
   2393 // indices from Idxs that should be left out when inserting into the resulting
   2394 // struct. To is the result struct built so far, new insertvalue instructions
   2395 // build on that.
   2396 static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType,
   2397                                 SmallVectorImpl<unsigned> &Idxs,
   2398                                 unsigned IdxSkip,
   2399                                 Instruction *InsertBefore) {
   2400   llvm::StructType *STy = dyn_cast<llvm::StructType>(IndexedType);
   2401   if (STy) {
   2402     // Save the original To argument so we can modify it
   2403     Value *OrigTo = To;
   2404     // General case, the type indexed by Idxs is a struct
   2405     for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
   2406       // Process each struct element recursively
   2407       Idxs.push_back(i);
   2408       Value *PrevTo = To;
   2409       To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip,
   2410                              InsertBefore);
   2411       Idxs.pop_back();
   2412       if (!To) {
   2413         // Couldn't find any inserted value for this index? Cleanup
   2414         while (PrevTo != OrigTo) {
   2415           InsertValueInst* Del = cast<InsertValueInst>(PrevTo);
   2416           PrevTo = Del->getAggregateOperand();
   2417           Del->eraseFromParent();
   2418         }
   2419         // Stop processing elements
   2420         break;
   2421       }
   2422     }
   2423     // If we successfully found a value for each of our subaggregates
   2424     if (To)
   2425       return To;
   2426   }
   2427   // Base case, the type indexed by SourceIdxs is not a struct, or not all of
   2428   // the struct's elements had a value that was inserted directly. In the latter
   2429   // case, perhaps we can't determine each of the subelements individually, but
   2430   // we might be able to find the complete struct somewhere.
   2431 
   2432   // Find the value that is at that particular spot
   2433   Value *V = FindInsertedValue(From, Idxs);
   2434 
   2435   if (!V)
   2436     return nullptr;
   2437 
   2438   // Insert the value in the new (sub) aggregrate
   2439   return llvm::InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip),
   2440                                        "tmp", InsertBefore);
   2441 }
   2442 
   2443 // This helper takes a nested struct and extracts a part of it (which is again a
   2444 // struct) into a new value. For example, given the struct:
   2445 // { a, { b, { c, d }, e } }
   2446 // and the indices "1, 1" this returns
   2447 // { c, d }.
   2448 //
   2449 // It does this by inserting an insertvalue for each element in the resulting
   2450 // struct, as opposed to just inserting a single struct. This will only work if
   2451 // each of the elements of the substruct are known (ie, inserted into From by an
   2452 // insertvalue instruction somewhere).
   2453 //
   2454 // All inserted insertvalue instructions are inserted before InsertBefore
   2455 static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range,
   2456                                 Instruction *InsertBefore) {
   2457   assert(InsertBefore && "Must have someplace to insert!");
   2458   Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(),
   2459                                                              idx_range);
   2460   Value *To = UndefValue::get(IndexedType);
   2461   SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end());
   2462   unsigned IdxSkip = Idxs.size();
   2463 
   2464   return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore);
   2465 }
   2466 
   2467 /// Given an aggregrate and an sequence of indices, see if
   2468 /// the scalar value indexed is already around as a register, for example if it
   2469 /// were inserted directly into the aggregrate.
   2470 ///
   2471 /// If InsertBefore is not null, this function will duplicate (modified)
   2472 /// insertvalues when a part of a nested struct is extracted.
   2473 Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
   2474                                Instruction *InsertBefore) {
   2475   // Nothing to index? Just return V then (this is useful at the end of our
   2476   // recursion).
   2477   if (idx_range.empty())
   2478     return V;
   2479   // We have indices, so V should have an indexable type.
   2480   assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&
   2481          "Not looking at a struct or array?");
   2482   assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) &&
   2483          "Invalid indices for type?");
   2484 
   2485   if (Constant *C = dyn_cast<Constant>(V)) {
   2486     C = C->getAggregateElement(idx_range[0]);
   2487     if (!C) return nullptr;
   2488     return FindInsertedValue(C, idx_range.slice(1), InsertBefore);
   2489   }
   2490 
   2491   if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) {
   2492     // Loop the indices for the insertvalue instruction in parallel with the
   2493     // requested indices
   2494     const unsigned *req_idx = idx_range.begin();
   2495     for (const unsigned *i = I->idx_begin(), *e = I->idx_end();
   2496          i != e; ++i, ++req_idx) {
   2497       if (req_idx == idx_range.end()) {
   2498         // We can't handle this without inserting insertvalues
   2499         if (!InsertBefore)
   2500           return nullptr;
   2501 
   2502         // The requested index identifies a part of a nested aggregate. Handle
   2503         // this specially. For example,
   2504         // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0
   2505         // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1
   2506         // %C = extractvalue {i32, { i32, i32 } } %B, 1
   2507         // This can be changed into
   2508         // %A = insertvalue {i32, i32 } undef, i32 10, 0
   2509         // %C = insertvalue {i32, i32 } %A, i32 11, 1
   2510         // which allows the unused 0,0 element from the nested struct to be
   2511         // removed.
   2512         return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx),
   2513                                  InsertBefore);
   2514       }
   2515 
   2516       // This insert value inserts something else than what we are looking for.
   2517       // See if the (aggregrate) value inserted into has the value we are
   2518       // looking for, then.
   2519       if (*req_idx != *i)
   2520         return FindInsertedValue(I->getAggregateOperand(), idx_range,
   2521                                  InsertBefore);
   2522     }
   2523     // If we end up here, the indices of the insertvalue match with those
   2524     // requested (though possibly only partially). Now we recursively look at
   2525     // the inserted value, passing any remaining indices.
   2526     return FindInsertedValue(I->getInsertedValueOperand(),
   2527                              makeArrayRef(req_idx, idx_range.end()),
   2528                              InsertBefore);
   2529   }
   2530 
   2531   if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) {
   2532     // If we're extracting a value from an aggregrate that was extracted from
   2533     // something else, we can extract from that something else directly instead.
   2534     // However, we will need to chain I's indices with the requested indices.
   2535 
   2536     // Calculate the number of indices required
   2537     unsigned size = I->getNumIndices() + idx_range.size();
   2538     // Allocate some space to put the new indices in
   2539     SmallVector<unsigned, 5> Idxs;
   2540     Idxs.reserve(size);
   2541     // Add indices from the extract value instruction
   2542     Idxs.append(I->idx_begin(), I->idx_end());
   2543 
   2544     // Add requested indices
   2545     Idxs.append(idx_range.begin(), idx_range.end());
   2546 
   2547     assert(Idxs.size() == size
   2548            && "Number of indices added not correct?");
   2549 
   2550     return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore);
   2551   }
   2552   // Otherwise, we don't know (such as, extracting from a function return value
   2553   // or load instruction)
   2554   return nullptr;
   2555 }
   2556 
   2557 /// Analyze the specified pointer to see if it can be expressed as a base
   2558 /// pointer plus a constant offset. Return the base and offset to the caller.
   2559 Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
   2560                                               const DataLayout &DL) {
   2561   unsigned BitWidth = DL.getPointerTypeSizeInBits(Ptr->getType());
   2562   APInt ByteOffset(BitWidth, 0);
   2563   while (1) {
   2564     if (Ptr->getType()->isVectorTy())
   2565       break;
   2566 
   2567     if (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) {
   2568       APInt GEPOffset(BitWidth, 0);
   2569       if (!GEP->accumulateConstantOffset(DL, GEPOffset))
   2570         break;
   2571 
   2572       ByteOffset += GEPOffset;
   2573 
   2574       Ptr = GEP->getPointerOperand();
   2575     } else if (Operator::getOpcode(Ptr) == Instruction::BitCast ||
   2576                Operator::getOpcode(Ptr) == Instruction::AddrSpaceCast) {
   2577       Ptr = cast<Operator>(Ptr)->getOperand(0);
   2578     } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) {
   2579       if (GA->mayBeOverridden())
   2580         break;
   2581       Ptr = GA->getAliasee();
   2582     } else {
   2583       break;
   2584     }
   2585   }
   2586   Offset = ByteOffset.getSExtValue();
   2587   return Ptr;
   2588 }
   2589 
   2590 
   2591 /// This function computes the length of a null-terminated C string pointed to
   2592 /// by V. If successful, it returns true and returns the string in Str.
   2593 /// If unsuccessful, it returns false.
   2594 bool llvm::getConstantStringInfo(const Value *V, StringRef &Str,
   2595                                  uint64_t Offset, bool TrimAtNul) {
   2596   assert(V);
   2597 
   2598   // Look through bitcast instructions and geps.
   2599   V = V->stripPointerCasts();
   2600 
   2601   // If the value is a GEP instruction or constant expression, treat it as an
   2602   // offset.
   2603   if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
   2604     // Make sure the GEP has exactly three arguments.
   2605     if (GEP->getNumOperands() != 3)
   2606       return false;
   2607 
   2608     // Make sure the index-ee is a pointer to array of i8.
   2609     PointerType *PT = cast<PointerType>(GEP->getOperand(0)->getType());
   2610     ArrayType *AT = dyn_cast<ArrayType>(PT->getElementType());
   2611     if (!AT || !AT->getElementType()->isIntegerTy(8))
   2612       return false;
   2613 
   2614     // Check to make sure that the first operand of the GEP is an integer and
   2615     // has value 0 so that we are sure we're indexing into the initializer.
   2616     const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1));
   2617     if (!FirstIdx || !FirstIdx->isZero())
   2618       return false;
   2619 
   2620     // If the second index isn't a ConstantInt, then this is a variable index
   2621     // into the array.  If this occurs, we can't say anything meaningful about
   2622     // the string.
   2623     uint64_t StartIdx = 0;
   2624     if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2)))
   2625       StartIdx = CI->getZExtValue();
   2626     else
   2627       return false;
   2628     return getConstantStringInfo(GEP->getOperand(0), Str, StartIdx + Offset,
   2629                                  TrimAtNul);
   2630   }
   2631 
   2632   // The GEP instruction, constant or instruction, must reference a global
   2633   // variable that is a constant and is initialized. The referenced constant
   2634   // initializer is the array that we'll use for optimization.
   2635   const GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
   2636   if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
   2637     return false;
   2638 
   2639   // Handle the all-zeros case
   2640   if (GV->getInitializer()->isNullValue()) {
   2641     // This is a degenerate case. The initializer is constant zero so the
   2642     // length of the string must be zero.
   2643     Str = "";
   2644     return true;
   2645   }
   2646 
   2647   // Must be a Constant Array
   2648   const ConstantDataArray *Array =
   2649     dyn_cast<ConstantDataArray>(GV->getInitializer());
   2650   if (!Array || !Array->isString())
   2651     return false;
   2652 
   2653   // Get the number of elements in the array
   2654   uint64_t NumElts = Array->getType()->getArrayNumElements();
   2655 
   2656   // Start out with the entire array in the StringRef.
   2657   Str = Array->getAsString();
   2658 
   2659   if (Offset > NumElts)
   2660     return false;
   2661 
   2662   // Skip over 'offset' bytes.
   2663   Str = Str.substr(Offset);
   2664 
   2665   if (TrimAtNul) {
   2666     // Trim off the \0 and anything after it.  If the array is not nul
   2667     // terminated, we just return the whole end of string.  The client may know
   2668     // some other way that the string is length-bound.
   2669     Str = Str.substr(0, Str.find('\0'));
   2670   }
   2671   return true;
   2672 }
   2673 
   2674 // These next two are very similar to the above, but also look through PHI
   2675 // nodes.
   2676 // TODO: See if we can integrate these two together.
   2677 
   2678 /// If we can compute the length of the string pointed to by
   2679 /// the specified pointer, return 'len+1'.  If we can't, return 0.
   2680 static uint64_t GetStringLengthH(Value *V, SmallPtrSetImpl<PHINode*> &PHIs) {
   2681   // Look through noop bitcast instructions.
   2682   V = V->stripPointerCasts();
   2683 
   2684   // If this is a PHI node, there are two cases: either we have already seen it
   2685   // or we haven't.
   2686   if (PHINode *PN = dyn_cast<PHINode>(V)) {
   2687     if (!PHIs.insert(PN).second)
   2688       return ~0ULL;  // already in the set.
   2689 
   2690     // If it was new, see if all the input strings are the same length.
   2691     uint64_t LenSoFar = ~0ULL;
   2692     for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
   2693       uint64_t Len = GetStringLengthH(PN->getIncomingValue(i), PHIs);
   2694       if (Len == 0) return 0; // Unknown length -> unknown.
   2695 
   2696       if (Len == ~0ULL) continue;
   2697 
   2698       if (Len != LenSoFar && LenSoFar != ~0ULL)
   2699         return 0;    // Disagree -> unknown.
   2700       LenSoFar = Len;
   2701     }
   2702 
   2703     // Success, all agree.
   2704     return LenSoFar;
   2705   }
   2706 
   2707   // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y)
   2708   if (SelectInst *SI = dyn_cast<SelectInst>(V)) {
   2709     uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs);
   2710     if (Len1 == 0) return 0;
   2711     uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs);
   2712     if (Len2 == 0) return 0;
   2713     if (Len1 == ~0ULL) return Len2;
   2714     if (Len2 == ~0ULL) return Len1;
   2715     if (Len1 != Len2) return 0;
   2716     return Len1;
   2717   }
   2718 
   2719   // Otherwise, see if we can read the string.
   2720   StringRef StrData;
   2721   if (!getConstantStringInfo(V, StrData))
   2722     return 0;
   2723 
   2724   return StrData.size()+1;
   2725 }
   2726 
   2727 /// If we can compute the length of the string pointed to by
   2728 /// the specified pointer, return 'len+1'.  If we can't, return 0.
   2729 uint64_t llvm::GetStringLength(Value *V) {
   2730   if (!V->getType()->isPointerTy()) return 0;
   2731 
   2732   SmallPtrSet<PHINode*, 32> PHIs;
   2733   uint64_t Len = GetStringLengthH(V, PHIs);
   2734   // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return
   2735   // an empty string as a length.
   2736   return Len == ~0ULL ? 1 : Len;
   2737 }
   2738 
   2739 Value *llvm::GetUnderlyingObject(Value *V, const DataLayout &DL,
   2740                                  unsigned MaxLookup) {
   2741   if (!V->getType()->isPointerTy())
   2742     return V;
   2743   for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
   2744     if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
   2745       V = GEP->getPointerOperand();
   2746     } else if (Operator::getOpcode(V) == Instruction::BitCast ||
   2747                Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
   2748       V = cast<Operator>(V)->getOperand(0);
   2749     } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
   2750       if (GA->mayBeOverridden())
   2751         return V;
   2752       V = GA->getAliasee();
   2753     } else {
   2754       // See if InstructionSimplify knows any relevant tricks.
   2755       if (Instruction *I = dyn_cast<Instruction>(V))
   2756         // TODO: Acquire a DominatorTree and AssumptionCache and use them.
   2757         if (Value *Simplified = SimplifyInstruction(I, DL, nullptr)) {
   2758           V = Simplified;
   2759           continue;
   2760         }
   2761 
   2762       return V;
   2763     }
   2764     assert(V->getType()->isPointerTy() && "Unexpected operand type!");
   2765   }
   2766   return V;
   2767 }
   2768 
   2769 void llvm::GetUnderlyingObjects(Value *V, SmallVectorImpl<Value *> &Objects,
   2770                                 const DataLayout &DL, unsigned MaxLookup) {
   2771   SmallPtrSet<Value *, 4> Visited;
   2772   SmallVector<Value *, 4> Worklist;
   2773   Worklist.push_back(V);
   2774   do {
   2775     Value *P = Worklist.pop_back_val();
   2776     P = GetUnderlyingObject(P, DL, MaxLookup);
   2777 
   2778     if (!Visited.insert(P).second)
   2779       continue;
   2780 
   2781     if (SelectInst *SI = dyn_cast<SelectInst>(P)) {
   2782       Worklist.push_back(SI->getTrueValue());
   2783       Worklist.push_back(SI->getFalseValue());
   2784       continue;
   2785     }
   2786 
   2787     if (PHINode *PN = dyn_cast<PHINode>(P)) {
   2788       for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
   2789         Worklist.push_back(PN->getIncomingValue(i));
   2790       continue;
   2791     }
   2792 
   2793     Objects.push_back(P);
   2794   } while (!Worklist.empty());
   2795 }
   2796 
   2797 /// Return true if the only users of this pointer are lifetime markers.
   2798 bool llvm::onlyUsedByLifetimeMarkers(const Value *V) {
   2799   for (const User *U : V->users()) {
   2800     const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
   2801     if (!II) return false;
   2802 
   2803     if (II->getIntrinsicID() != Intrinsic::lifetime_start &&
   2804         II->getIntrinsicID() != Intrinsic::lifetime_end)
   2805       return false;
   2806   }
   2807   return true;
   2808 }
   2809 
   2810 bool llvm::isSafeToSpeculativelyExecute(const Value *V) {
   2811   const Operator *Inst = dyn_cast<Operator>(V);
   2812   if (!Inst)
   2813     return false;
   2814 
   2815   for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i)
   2816     if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i)))
   2817       if (C->canTrap())
   2818         return false;
   2819 
   2820   switch (Inst->getOpcode()) {
   2821   default:
   2822     return true;
   2823   case Instruction::UDiv:
   2824   case Instruction::URem: {
   2825     // x / y is undefined if y == 0.
   2826     const APInt *V;
   2827     if (match(Inst->getOperand(1), m_APInt(V)))
   2828       return *V != 0;
   2829     return false;
   2830   }
   2831   case Instruction::SDiv:
   2832   case Instruction::SRem: {
   2833     // x / y is undefined if y == 0 or x == INT_MIN and y == -1
   2834     const APInt *Numerator, *Denominator;
   2835     if (!match(Inst->getOperand(1), m_APInt(Denominator)))
   2836       return false;
   2837     // We cannot hoist this division if the denominator is 0.
   2838     if (*Denominator == 0)
   2839       return false;
   2840     // It's safe to hoist if the denominator is not 0 or -1.
   2841     if (*Denominator != -1)
   2842       return true;
   2843     // At this point we know that the denominator is -1.  It is safe to hoist as
   2844     // long we know that the numerator is not INT_MIN.
   2845     if (match(Inst->getOperand(0), m_APInt(Numerator)))
   2846       return !Numerator->isMinSignedValue();
   2847     // The numerator *might* be MinSignedValue.
   2848     return false;
   2849   }
   2850   case Instruction::Load: {
   2851     const LoadInst *LI = cast<LoadInst>(Inst);
   2852     if (!LI->isUnordered() ||
   2853         // Speculative load may create a race that did not exist in the source.
   2854         LI->getParent()->getParent()->hasFnAttribute(Attribute::SanitizeThread))
   2855       return false;
   2856     const DataLayout &DL = LI->getModule()->getDataLayout();
   2857     return LI->getPointerOperand()->isDereferenceablePointer(DL);
   2858   }
   2859   case Instruction::Call: {
   2860     if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
   2861       switch (II->getIntrinsicID()) {
   2862       // These synthetic intrinsics have no side-effects and just mark
   2863       // information about their operands.
   2864       // FIXME: There are other no-op synthetic instructions that potentially
   2865       // should be considered at least *safe* to speculate...
   2866       case Intrinsic::dbg_declare:
   2867       case Intrinsic::dbg_value:
   2868         return true;
   2869 
   2870       case Intrinsic::bswap:
   2871       case Intrinsic::ctlz:
   2872       case Intrinsic::ctpop:
   2873       case Intrinsic::cttz:
   2874       case Intrinsic::objectsize:
   2875       case Intrinsic::sadd_with_overflow:
   2876       case Intrinsic::smul_with_overflow:
   2877       case Intrinsic::ssub_with_overflow:
   2878       case Intrinsic::uadd_with_overflow:
   2879       case Intrinsic::umul_with_overflow:
   2880       case Intrinsic::usub_with_overflow:
   2881         return true;
   2882       // Sqrt should be OK, since the llvm sqrt intrinsic isn't defined to set
   2883       // errno like libm sqrt would.
   2884       case Intrinsic::sqrt:
   2885       case Intrinsic::fma:
   2886       case Intrinsic::fmuladd:
   2887       case Intrinsic::fabs:
   2888       case Intrinsic::minnum:
   2889       case Intrinsic::maxnum:
   2890         return true;
   2891       // TODO: some fp intrinsics are marked as having the same error handling
   2892       // as libm. They're safe to speculate when they won't error.
   2893       // TODO: are convert_{from,to}_fp16 safe?
   2894       // TODO: can we list target-specific intrinsics here?
   2895       default: break;
   2896       }
   2897     }
   2898     return false; // The called function could have undefined behavior or
   2899                   // side-effects, even if marked readnone nounwind.
   2900   }
   2901   case Instruction::VAArg:
   2902   case Instruction::Alloca:
   2903   case Instruction::Invoke:
   2904   case Instruction::PHI:
   2905   case Instruction::Store:
   2906   case Instruction::Ret:
   2907   case Instruction::Br:
   2908   case Instruction::IndirectBr:
   2909   case Instruction::Switch:
   2910   case Instruction::Unreachable:
   2911   case Instruction::Fence:
   2912   case Instruction::LandingPad:
   2913   case Instruction::AtomicRMW:
   2914   case Instruction::AtomicCmpXchg:
   2915   case Instruction::Resume:
   2916     return false; // Misc instructions which have effects
   2917   }
   2918 }
   2919 
   2920 /// Return true if we know that the specified value is never null.
   2921 bool llvm::isKnownNonNull(const Value *V, const TargetLibraryInfo *TLI) {
   2922   // Alloca never returns null, malloc might.
   2923   if (isa<AllocaInst>(V)) return true;
   2924 
   2925   // A byval, inalloca, or nonnull argument is never null.
   2926   if (const Argument *A = dyn_cast<Argument>(V))
   2927     return A->hasByValOrInAllocaAttr() || A->hasNonNullAttr();
   2928 
   2929   // Global values are not null unless extern weak.
   2930   if (const GlobalValue *GV = dyn_cast<GlobalValue>(V))
   2931     return !GV->hasExternalWeakLinkage();
   2932 
   2933   // A Load tagged w/nonnull metadata is never null.
   2934   if (const LoadInst *LI = dyn_cast<LoadInst>(V))
   2935     return LI->getMetadata(LLVMContext::MD_nonnull);
   2936 
   2937   if (auto CS = ImmutableCallSite(V))
   2938     if (CS.isReturnNonNull())
   2939       return true;
   2940 
   2941   // operator new never returns null.
   2942   if (isOperatorNewLikeFn(V, TLI, /*LookThroughBitCast=*/true))
   2943     return true;
   2944 
   2945   return false;
   2946 }
   2947 
   2948 OverflowResult llvm::computeOverflowForUnsignedMul(Value *LHS, Value *RHS,
   2949                                                    const DataLayout &DL,
   2950                                                    AssumptionCache *AC,
   2951                                                    const Instruction *CxtI,
   2952                                                    const DominatorTree *DT) {
   2953   // Multiplying n * m significant bits yields a result of n + m significant
   2954   // bits. If the total number of significant bits does not exceed the
   2955   // result bit width (minus 1), there is no overflow.
   2956   // This means if we have enough leading zero bits in the operands
   2957   // we can guarantee that the result does not overflow.
   2958   // Ref: "Hacker's Delight" by Henry Warren
   2959   unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
   2960   APInt LHSKnownZero(BitWidth, 0);
   2961   APInt LHSKnownOne(BitWidth, 0);
   2962   APInt RHSKnownZero(BitWidth, 0);
   2963   APInt RHSKnownOne(BitWidth, 0);
   2964   computeKnownBits(LHS, LHSKnownZero, LHSKnownOne, DL, /*Depth=*/0, AC, CxtI,
   2965                    DT);
   2966   computeKnownBits(RHS, RHSKnownZero, RHSKnownOne, DL, /*Depth=*/0, AC, CxtI,
   2967                    DT);
   2968   // Note that underestimating the number of zero bits gives a more
   2969   // conservative answer.
   2970   unsigned ZeroBits = LHSKnownZero.countLeadingOnes() +
   2971                       RHSKnownZero.countLeadingOnes();
   2972   // First handle the easy case: if we have enough zero bits there's
   2973   // definitely no overflow.
   2974   if (ZeroBits >= BitWidth)
   2975     return OverflowResult::NeverOverflows;
   2976 
   2977   // Get the largest possible values for each operand.
   2978   APInt LHSMax = ~LHSKnownZero;
   2979   APInt RHSMax = ~RHSKnownZero;
   2980 
   2981   // We know the multiply operation doesn't overflow if the maximum values for
   2982   // each operand will not overflow after we multiply them together.
   2983   bool MaxOverflow;
   2984   LHSMax.umul_ov(RHSMax, MaxOverflow);
   2985   if (!MaxOverflow)
   2986     return OverflowResult::NeverOverflows;
   2987 
   2988   // We know it always overflows if multiplying the smallest possible values for
   2989   // the operands also results in overflow.
   2990   bool MinOverflow;
   2991   LHSKnownOne.umul_ov(RHSKnownOne, MinOverflow);
   2992   if (MinOverflow)
   2993     return OverflowResult::AlwaysOverflows;
   2994 
   2995   return OverflowResult::MayOverflow;
   2996 }
   2997 
   2998 OverflowResult llvm::computeOverflowForUnsignedAdd(Value *LHS, Value *RHS,
   2999                                                    const DataLayout &DL,
   3000                                                    AssumptionCache *AC,
   3001                                                    const Instruction *CxtI,
   3002                                                    const DominatorTree *DT) {
   3003   bool LHSKnownNonNegative, LHSKnownNegative;
   3004   ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, DL, /*Depth=*/0,
   3005                  AC, CxtI, DT);
   3006   if (LHSKnownNonNegative || LHSKnownNegative) {
   3007     bool RHSKnownNonNegative, RHSKnownNegative;
   3008     ComputeSignBit(RHS, RHSKnownNonNegative, RHSKnownNegative, DL, /*Depth=*/0,
   3009                    AC, CxtI, DT);
   3010 
   3011     if (LHSKnownNegative && RHSKnownNegative) {
   3012       // The sign bit is set in both cases: this MUST overflow.
   3013       // Create a simple add instruction, and insert it into the struct.
   3014       return OverflowResult::AlwaysOverflows;
   3015     }
   3016 
   3017     if (LHSKnownNonNegative && RHSKnownNonNegative) {
   3018       // The sign bit is clear in both cases: this CANNOT overflow.
   3019       // Create a simple add instruction, and insert it into the struct.
   3020       return OverflowResult::NeverOverflows;
   3021     }
   3022   }
   3023 
   3024   return OverflowResult::MayOverflow;
   3025 }
   3026