Home | History | Annotate | Download | only in Analysis
      1 //===- ScalarEvolution.cpp - Scalar Evolution Analysis ----------*- C++ -*-===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file contains the implementation of the scalar evolution analysis
     11 // engine, which is used primarily to analyze expressions involving induction
     12 // variables in loops.
     13 //
     14 // There are several aspects to this library.  First is the representation of
     15 // scalar expressions, which are represented as subclasses of the SCEV class.
     16 // These classes are used to represent certain types of subexpressions that we
     17 // can handle. We only create one SCEV of a particular shape, so
     18 // pointer-comparisons for equality are legal.
     19 //
     20 // One important aspect of the SCEV objects is that they are never cyclic, even
     21 // if there is a cycle in the dataflow for an expression (ie, a PHI node).  If
     22 // the PHI node is one of the idioms that we can represent (e.g., a polynomial
     23 // recurrence) then we represent it directly as a recurrence node, otherwise we
     24 // represent it as a SCEVUnknown node.
     25 //
     26 // In addition to being able to represent expressions of various types, we also
     27 // have folders that are used to build the *canonical* representation for a
     28 // particular expression.  These folders are capable of using a variety of
     29 // rewrite rules to simplify the expressions.
     30 //
     31 // Once the folders are defined, we can implement the more interesting
     32 // higher-level code, such as the code that recognizes PHI nodes of various
     33 // types, computes the execution count of a loop, etc.
     34 //
     35 // TODO: We should use these routines and value representations to implement
     36 // dependence analysis!
     37 //
     38 //===----------------------------------------------------------------------===//
     39 //
     40 // There are several good references for the techniques used in this analysis.
     41 //
     42 //  Chains of recurrences -- a method to expedite the evaluation
     43 //  of closed-form functions
     44 //  Olaf Bachmann, Paul S. Wang, Eugene V. Zima
     45 //
     46 //  On computational properties of chains of recurrences
     47 //  Eugene V. Zima
     48 //
     49 //  Symbolic Evaluation of Chains of Recurrences for Loop Optimization
     50 //  Robert A. van Engelen
     51 //
     52 //  Efficient Symbolic Analysis for Optimizing Compilers
     53 //  Robert A. van Engelen
     54 //
     55 //  Using the chains of recurrences algebra for data dependence testing and
     56 //  induction variable substitution
     57 //  MS Thesis, Johnie Birch
     58 //
     59 //===----------------------------------------------------------------------===//
     60 
     61 #include "llvm/Analysis/ScalarEvolution.h"
     62 #include "llvm/ADT/STLExtras.h"
     63 #include "llvm/ADT/SmallPtrSet.h"
     64 #include "llvm/ADT/Statistic.h"
     65 #include "llvm/Analysis/ConstantFolding.h"
     66 #include "llvm/Analysis/InstructionSimplify.h"
     67 #include "llvm/Analysis/LoopInfo.h"
     68 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
     69 #include "llvm/Analysis/ValueTracking.h"
     70 #include "llvm/IR/ConstantRange.h"
     71 #include "llvm/IR/Constants.h"
     72 #include "llvm/IR/DataLayout.h"
     73 #include "llvm/IR/DerivedTypes.h"
     74 #include "llvm/IR/Dominators.h"
     75 #include "llvm/IR/GetElementPtrTypeIterator.h"
     76 #include "llvm/IR/GlobalAlias.h"
     77 #include "llvm/IR/GlobalVariable.h"
     78 #include "llvm/IR/InstIterator.h"
     79 #include "llvm/IR/Instructions.h"
     80 #include "llvm/IR/LLVMContext.h"
     81 #include "llvm/IR/Operator.h"
     82 #include "llvm/Support/CommandLine.h"
     83 #include "llvm/Support/Debug.h"
     84 #include "llvm/Support/ErrorHandling.h"
     85 #include "llvm/Support/MathExtras.h"
     86 #include "llvm/Support/raw_ostream.h"
     87 #include "llvm/Target/TargetLibraryInfo.h"
     88 #include <algorithm>
     89 using namespace llvm;
     90 
     91 #define DEBUG_TYPE "scalar-evolution"
     92 
     93 STATISTIC(NumArrayLenItCounts,
     94           "Number of trip counts computed with array length");
     95 STATISTIC(NumTripCountsComputed,
     96           "Number of loops with predictable loop counts");
     97 STATISTIC(NumTripCountsNotComputed,
     98           "Number of loops without predictable loop counts");
     99 STATISTIC(NumBruteForceTripCountsComputed,
    100           "Number of loops with trip counts computed by force");
    101 
    102 static cl::opt<unsigned>
    103 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden,
    104                         cl::desc("Maximum number of iterations SCEV will "
    105                                  "symbolically execute a constant "
    106                                  "derived loop"),
    107                         cl::init(100));
    108 
    109 // FIXME: Enable this with XDEBUG when the test suite is clean.
    110 static cl::opt<bool>
    111 VerifySCEV("verify-scev",
    112            cl::desc("Verify ScalarEvolution's backedge taken counts (slow)"));
    113 
    114 INITIALIZE_PASS_BEGIN(ScalarEvolution, "scalar-evolution",
    115                 "Scalar Evolution Analysis", false, true)
    116 INITIALIZE_PASS_DEPENDENCY(LoopInfo)
    117 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
    118 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
    119 INITIALIZE_PASS_END(ScalarEvolution, "scalar-evolution",
    120                 "Scalar Evolution Analysis", false, true)
    121 char ScalarEvolution::ID = 0;
    122 
    123 //===----------------------------------------------------------------------===//
    124 //                           SCEV class definitions
    125 //===----------------------------------------------------------------------===//
    126 
    127 //===----------------------------------------------------------------------===//
    128 // Implementation of the SCEV class.
    129 //
    130 
    131 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
    132 void SCEV::dump() const {
    133   print(dbgs());
    134   dbgs() << '\n';
    135 }
    136 #endif
    137 
    138 void SCEV::print(raw_ostream &OS) const {
    139   switch (static_cast<SCEVTypes>(getSCEVType())) {
    140   case scConstant:
    141     cast<SCEVConstant>(this)->getValue()->printAsOperand(OS, false);
    142     return;
    143   case scTruncate: {
    144     const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(this);
    145     const SCEV *Op = Trunc->getOperand();
    146     OS << "(trunc " << *Op->getType() << " " << *Op << " to "
    147        << *Trunc->getType() << ")";
    148     return;
    149   }
    150   case scZeroExtend: {
    151     const SCEVZeroExtendExpr *ZExt = cast<SCEVZeroExtendExpr>(this);
    152     const SCEV *Op = ZExt->getOperand();
    153     OS << "(zext " << *Op->getType() << " " << *Op << " to "
    154        << *ZExt->getType() << ")";
    155     return;
    156   }
    157   case scSignExtend: {
    158     const SCEVSignExtendExpr *SExt = cast<SCEVSignExtendExpr>(this);
    159     const SCEV *Op = SExt->getOperand();
    160     OS << "(sext " << *Op->getType() << " " << *Op << " to "
    161        << *SExt->getType() << ")";
    162     return;
    163   }
    164   case scAddRecExpr: {
    165     const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(this);
    166     OS << "{" << *AR->getOperand(0);
    167     for (unsigned i = 1, e = AR->getNumOperands(); i != e; ++i)
    168       OS << ",+," << *AR->getOperand(i);
    169     OS << "}<";
    170     if (AR->getNoWrapFlags(FlagNUW))
    171       OS << "nuw><";
    172     if (AR->getNoWrapFlags(FlagNSW))
    173       OS << "nsw><";
    174     if (AR->getNoWrapFlags(FlagNW) &&
    175         !AR->getNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW)))
    176       OS << "nw><";
    177     AR->getLoop()->getHeader()->printAsOperand(OS, /*PrintType=*/false);
    178     OS << ">";
    179     return;
    180   }
    181   case scAddExpr:
    182   case scMulExpr:
    183   case scUMaxExpr:
    184   case scSMaxExpr: {
    185     const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(this);
    186     const char *OpStr = nullptr;
    187     switch (NAry->getSCEVType()) {
    188     case scAddExpr: OpStr = " + "; break;
    189     case scMulExpr: OpStr = " * "; break;
    190     case scUMaxExpr: OpStr = " umax "; break;
    191     case scSMaxExpr: OpStr = " smax "; break;
    192     }
    193     OS << "(";
    194     for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
    195          I != E; ++I) {
    196       OS << **I;
    197       if (std::next(I) != E)
    198         OS << OpStr;
    199     }
    200     OS << ")";
    201     switch (NAry->getSCEVType()) {
    202     case scAddExpr:
    203     case scMulExpr:
    204       if (NAry->getNoWrapFlags(FlagNUW))
    205         OS << "<nuw>";
    206       if (NAry->getNoWrapFlags(FlagNSW))
    207         OS << "<nsw>";
    208     }
    209     return;
    210   }
    211   case scUDivExpr: {
    212     const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(this);
    213     OS << "(" << *UDiv->getLHS() << " /u " << *UDiv->getRHS() << ")";
    214     return;
    215   }
    216   case scUnknown: {
    217     const SCEVUnknown *U = cast<SCEVUnknown>(this);
    218     Type *AllocTy;
    219     if (U->isSizeOf(AllocTy)) {
    220       OS << "sizeof(" << *AllocTy << ")";
    221       return;
    222     }
    223     if (U->isAlignOf(AllocTy)) {
    224       OS << "alignof(" << *AllocTy << ")";
    225       return;
    226     }
    227 
    228     Type *CTy;
    229     Constant *FieldNo;
    230     if (U->isOffsetOf(CTy, FieldNo)) {
    231       OS << "offsetof(" << *CTy << ", ";
    232       FieldNo->printAsOperand(OS, false);
    233       OS << ")";
    234       return;
    235     }
    236 
    237     // Otherwise just print it normally.
    238     U->getValue()->printAsOperand(OS, false);
    239     return;
    240   }
    241   case scCouldNotCompute:
    242     OS << "***COULDNOTCOMPUTE***";
    243     return;
    244   }
    245   llvm_unreachable("Unknown SCEV kind!");
    246 }
    247 
    248 Type *SCEV::getType() const {
    249   switch (static_cast<SCEVTypes>(getSCEVType())) {
    250   case scConstant:
    251     return cast<SCEVConstant>(this)->getType();
    252   case scTruncate:
    253   case scZeroExtend:
    254   case scSignExtend:
    255     return cast<SCEVCastExpr>(this)->getType();
    256   case scAddRecExpr:
    257   case scMulExpr:
    258   case scUMaxExpr:
    259   case scSMaxExpr:
    260     return cast<SCEVNAryExpr>(this)->getType();
    261   case scAddExpr:
    262     return cast<SCEVAddExpr>(this)->getType();
    263   case scUDivExpr:
    264     return cast<SCEVUDivExpr>(this)->getType();
    265   case scUnknown:
    266     return cast<SCEVUnknown>(this)->getType();
    267   case scCouldNotCompute:
    268     llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
    269   }
    270   llvm_unreachable("Unknown SCEV kind!");
    271 }
    272 
    273 bool SCEV::isZero() const {
    274   if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
    275     return SC->getValue()->isZero();
    276   return false;
    277 }
    278 
    279 bool SCEV::isOne() const {
    280   if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
    281     return SC->getValue()->isOne();
    282   return false;
    283 }
    284 
    285 bool SCEV::isAllOnesValue() const {
    286   if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
    287     return SC->getValue()->isAllOnesValue();
    288   return false;
    289 }
    290 
    291 /// isNonConstantNegative - Return true if the specified scev is negated, but
    292 /// not a constant.
    293 bool SCEV::isNonConstantNegative() const {
    294   const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(this);
    295   if (!Mul) return false;
    296 
    297   // If there is a constant factor, it will be first.
    298   const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0));
    299   if (!SC) return false;
    300 
    301   // Return true if the value is negative, this matches things like (-42 * V).
    302   return SC->getValue()->getValue().isNegative();
    303 }
    304 
    305 SCEVCouldNotCompute::SCEVCouldNotCompute() :
    306   SCEV(FoldingSetNodeIDRef(), scCouldNotCompute) {}
    307 
    308 bool SCEVCouldNotCompute::classof(const SCEV *S) {
    309   return S->getSCEVType() == scCouldNotCompute;
    310 }
    311 
    312 const SCEV *ScalarEvolution::getConstant(ConstantInt *V) {
    313   FoldingSetNodeID ID;
    314   ID.AddInteger(scConstant);
    315   ID.AddPointer(V);
    316   void *IP = nullptr;
    317   if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
    318   SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V);
    319   UniqueSCEVs.InsertNode(S, IP);
    320   return S;
    321 }
    322 
    323 const SCEV *ScalarEvolution::getConstant(const APInt &Val) {
    324   return getConstant(ConstantInt::get(getContext(), Val));
    325 }
    326 
    327 const SCEV *
    328 ScalarEvolution::getConstant(Type *Ty, uint64_t V, bool isSigned) {
    329   IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty));
    330   return getConstant(ConstantInt::get(ITy, V, isSigned));
    331 }
    332 
    333 SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID,
    334                            unsigned SCEVTy, const SCEV *op, Type *ty)
    335   : SCEV(ID, SCEVTy), Op(op), Ty(ty) {}
    336 
    337 SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID,
    338                                    const SCEV *op, Type *ty)
    339   : SCEVCastExpr(ID, scTruncate, op, ty) {
    340   assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
    341          (Ty->isIntegerTy() || Ty->isPointerTy()) &&
    342          "Cannot truncate non-integer value!");
    343 }
    344 
    345 SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID,
    346                                        const SCEV *op, Type *ty)
    347   : SCEVCastExpr(ID, scZeroExtend, op, ty) {
    348   assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
    349          (Ty->isIntegerTy() || Ty->isPointerTy()) &&
    350          "Cannot zero extend non-integer value!");
    351 }
    352 
    353 SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID,
    354                                        const SCEV *op, Type *ty)
    355   : SCEVCastExpr(ID, scSignExtend, op, ty) {
    356   assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
    357          (Ty->isIntegerTy() || Ty->isPointerTy()) &&
    358          "Cannot sign extend non-integer value!");
    359 }
    360 
    361 void SCEVUnknown::deleted() {
    362   // Clear this SCEVUnknown from various maps.
    363   SE->forgetMemoizedResults(this);
    364 
    365   // Remove this SCEVUnknown from the uniquing map.
    366   SE->UniqueSCEVs.RemoveNode(this);
    367 
    368   // Release the value.
    369   setValPtr(nullptr);
    370 }
    371 
    372 void SCEVUnknown::allUsesReplacedWith(Value *New) {
    373   // Clear this SCEVUnknown from various maps.
    374   SE->forgetMemoizedResults(this);
    375 
    376   // Remove this SCEVUnknown from the uniquing map.
    377   SE->UniqueSCEVs.RemoveNode(this);
    378 
    379   // Update this SCEVUnknown to point to the new value. This is needed
    380   // because there may still be outstanding SCEVs which still point to
    381   // this SCEVUnknown.
    382   setValPtr(New);
    383 }
    384 
    385 bool SCEVUnknown::isSizeOf(Type *&AllocTy) const {
    386   if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
    387     if (VCE->getOpcode() == Instruction::PtrToInt)
    388       if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
    389         if (CE->getOpcode() == Instruction::GetElementPtr &&
    390             CE->getOperand(0)->isNullValue() &&
    391             CE->getNumOperands() == 2)
    392           if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1)))
    393             if (CI->isOne()) {
    394               AllocTy = cast<PointerType>(CE->getOperand(0)->getType())
    395                                  ->getElementType();
    396               return true;
    397             }
    398 
    399   return false;
    400 }
    401 
    402 bool SCEVUnknown::isAlignOf(Type *&AllocTy) const {
    403   if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
    404     if (VCE->getOpcode() == Instruction::PtrToInt)
    405       if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
    406         if (CE->getOpcode() == Instruction::GetElementPtr &&
    407             CE->getOperand(0)->isNullValue()) {
    408           Type *Ty =
    409             cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
    410           if (StructType *STy = dyn_cast<StructType>(Ty))
    411             if (!STy->isPacked() &&
    412                 CE->getNumOperands() == 3 &&
    413                 CE->getOperand(1)->isNullValue()) {
    414               if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2)))
    415                 if (CI->isOne() &&
    416                     STy->getNumElements() == 2 &&
    417                     STy->getElementType(0)->isIntegerTy(1)) {
    418                   AllocTy = STy->getElementType(1);
    419                   return true;
    420                 }
    421             }
    422         }
    423 
    424   return false;
    425 }
    426 
    427 bool SCEVUnknown::isOffsetOf(Type *&CTy, Constant *&FieldNo) const {
    428   if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
    429     if (VCE->getOpcode() == Instruction::PtrToInt)
    430       if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
    431         if (CE->getOpcode() == Instruction::GetElementPtr &&
    432             CE->getNumOperands() == 3 &&
    433             CE->getOperand(0)->isNullValue() &&
    434             CE->getOperand(1)->isNullValue()) {
    435           Type *Ty =
    436             cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
    437           // Ignore vector types here so that ScalarEvolutionExpander doesn't
    438           // emit getelementptrs that index into vectors.
    439           if (Ty->isStructTy() || Ty->isArrayTy()) {
    440             CTy = Ty;
    441             FieldNo = CE->getOperand(2);
    442             return true;
    443           }
    444         }
    445 
    446   return false;
    447 }
    448 
    449 //===----------------------------------------------------------------------===//
    450 //                               SCEV Utilities
    451 //===----------------------------------------------------------------------===//
    452 
    453 namespace {
    454   /// SCEVComplexityCompare - Return true if the complexity of the LHS is less
    455   /// than the complexity of the RHS.  This comparator is used to canonicalize
    456   /// expressions.
    457   class SCEVComplexityCompare {
    458     const LoopInfo *const LI;
    459   public:
    460     explicit SCEVComplexityCompare(const LoopInfo *li) : LI(li) {}
    461 
    462     // Return true or false if LHS is less than, or at least RHS, respectively.
    463     bool operator()(const SCEV *LHS, const SCEV *RHS) const {
    464       return compare(LHS, RHS) < 0;
    465     }
    466 
    467     // Return negative, zero, or positive, if LHS is less than, equal to, or
    468     // greater than RHS, respectively. A three-way result allows recursive
    469     // comparisons to be more efficient.
    470     int compare(const SCEV *LHS, const SCEV *RHS) const {
    471       // Fast-path: SCEVs are uniqued so we can do a quick equality check.
    472       if (LHS == RHS)
    473         return 0;
    474 
    475       // Primarily, sort the SCEVs by their getSCEVType().
    476       unsigned LType = LHS->getSCEVType(), RType = RHS->getSCEVType();
    477       if (LType != RType)
    478         return (int)LType - (int)RType;
    479 
    480       // Aside from the getSCEVType() ordering, the particular ordering
    481       // isn't very important except that it's beneficial to be consistent,
    482       // so that (a + b) and (b + a) don't end up as different expressions.
    483       switch (static_cast<SCEVTypes>(LType)) {
    484       case scUnknown: {
    485         const SCEVUnknown *LU = cast<SCEVUnknown>(LHS);
    486         const SCEVUnknown *RU = cast<SCEVUnknown>(RHS);
    487 
    488         // Sort SCEVUnknown values with some loose heuristics. TODO: This is
    489         // not as complete as it could be.
    490         const Value *LV = LU->getValue(), *RV = RU->getValue();
    491 
    492         // Order pointer values after integer values. This helps SCEVExpander
    493         // form GEPs.
    494         bool LIsPointer = LV->getType()->isPointerTy(),
    495              RIsPointer = RV->getType()->isPointerTy();
    496         if (LIsPointer != RIsPointer)
    497           return (int)LIsPointer - (int)RIsPointer;
    498 
    499         // Compare getValueID values.
    500         unsigned LID = LV->getValueID(),
    501                  RID = RV->getValueID();
    502         if (LID != RID)
    503           return (int)LID - (int)RID;
    504 
    505         // Sort arguments by their position.
    506         if (const Argument *LA = dyn_cast<Argument>(LV)) {
    507           const Argument *RA = cast<Argument>(RV);
    508           unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo();
    509           return (int)LArgNo - (int)RArgNo;
    510         }
    511 
    512         // For instructions, compare their loop depth, and their operand
    513         // count.  This is pretty loose.
    514         if (const Instruction *LInst = dyn_cast<Instruction>(LV)) {
    515           const Instruction *RInst = cast<Instruction>(RV);
    516 
    517           // Compare loop depths.
    518           const BasicBlock *LParent = LInst->getParent(),
    519                            *RParent = RInst->getParent();
    520           if (LParent != RParent) {
    521             unsigned LDepth = LI->getLoopDepth(LParent),
    522                      RDepth = LI->getLoopDepth(RParent);
    523             if (LDepth != RDepth)
    524               return (int)LDepth - (int)RDepth;
    525           }
    526 
    527           // Compare the number of operands.
    528           unsigned LNumOps = LInst->getNumOperands(),
    529                    RNumOps = RInst->getNumOperands();
    530           return (int)LNumOps - (int)RNumOps;
    531         }
    532 
    533         return 0;
    534       }
    535 
    536       case scConstant: {
    537         const SCEVConstant *LC = cast<SCEVConstant>(LHS);
    538         const SCEVConstant *RC = cast<SCEVConstant>(RHS);
    539 
    540         // Compare constant values.
    541         const APInt &LA = LC->getValue()->getValue();
    542         const APInt &RA = RC->getValue()->getValue();
    543         unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth();
    544         if (LBitWidth != RBitWidth)
    545           return (int)LBitWidth - (int)RBitWidth;
    546         return LA.ult(RA) ? -1 : 1;
    547       }
    548 
    549       case scAddRecExpr: {
    550         const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS);
    551         const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS);
    552 
    553         // Compare addrec loop depths.
    554         const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop();
    555         if (LLoop != RLoop) {
    556           unsigned LDepth = LLoop->getLoopDepth(),
    557                    RDepth = RLoop->getLoopDepth();
    558           if (LDepth != RDepth)
    559             return (int)LDepth - (int)RDepth;
    560         }
    561 
    562         // Addrec complexity grows with operand count.
    563         unsigned LNumOps = LA->getNumOperands(), RNumOps = RA->getNumOperands();
    564         if (LNumOps != RNumOps)
    565           return (int)LNumOps - (int)RNumOps;
    566 
    567         // Lexicographically compare.
    568         for (unsigned i = 0; i != LNumOps; ++i) {
    569           long X = compare(LA->getOperand(i), RA->getOperand(i));
    570           if (X != 0)
    571             return X;
    572         }
    573 
    574         return 0;
    575       }
    576 
    577       case scAddExpr:
    578       case scMulExpr:
    579       case scSMaxExpr:
    580       case scUMaxExpr: {
    581         const SCEVNAryExpr *LC = cast<SCEVNAryExpr>(LHS);
    582         const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS);
    583 
    584         // Lexicographically compare n-ary expressions.
    585         unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands();
    586         if (LNumOps != RNumOps)
    587           return (int)LNumOps - (int)RNumOps;
    588 
    589         for (unsigned i = 0; i != LNumOps; ++i) {
    590           if (i >= RNumOps)
    591             return 1;
    592           long X = compare(LC->getOperand(i), RC->getOperand(i));
    593           if (X != 0)
    594             return X;
    595         }
    596         return (int)LNumOps - (int)RNumOps;
    597       }
    598 
    599       case scUDivExpr: {
    600         const SCEVUDivExpr *LC = cast<SCEVUDivExpr>(LHS);
    601         const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS);
    602 
    603         // Lexicographically compare udiv expressions.
    604         long X = compare(LC->getLHS(), RC->getLHS());
    605         if (X != 0)
    606           return X;
    607         return compare(LC->getRHS(), RC->getRHS());
    608       }
    609 
    610       case scTruncate:
    611       case scZeroExtend:
    612       case scSignExtend: {
    613         const SCEVCastExpr *LC = cast<SCEVCastExpr>(LHS);
    614         const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS);
    615 
    616         // Compare cast expressions by operand.
    617         return compare(LC->getOperand(), RC->getOperand());
    618       }
    619 
    620       case scCouldNotCompute:
    621         llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
    622       }
    623       llvm_unreachable("Unknown SCEV kind!");
    624     }
    625   };
    626 }
    627 
    628 /// GroupByComplexity - Given a list of SCEV objects, order them by their
    629 /// complexity, and group objects of the same complexity together by value.
    630 /// When this routine is finished, we know that any duplicates in the vector are
    631 /// consecutive and that complexity is monotonically increasing.
    632 ///
    633 /// Note that we go take special precautions to ensure that we get deterministic
    634 /// results from this routine.  In other words, we don't want the results of
    635 /// this to depend on where the addresses of various SCEV objects happened to
    636 /// land in memory.
    637 ///
    638 static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops,
    639                               LoopInfo *LI) {
    640   if (Ops.size() < 2) return;  // Noop
    641   if (Ops.size() == 2) {
    642     // This is the common case, which also happens to be trivially simple.
    643     // Special case it.
    644     const SCEV *&LHS = Ops[0], *&RHS = Ops[1];
    645     if (SCEVComplexityCompare(LI)(RHS, LHS))
    646       std::swap(LHS, RHS);
    647     return;
    648   }
    649 
    650   // Do the rough sort by complexity.
    651   std::stable_sort(Ops.begin(), Ops.end(), SCEVComplexityCompare(LI));
    652 
    653   // Now that we are sorted by complexity, group elements of the same
    654   // complexity.  Note that this is, at worst, N^2, but the vector is likely to
    655   // be extremely short in practice.  Note that we take this approach because we
    656   // do not want to depend on the addresses of the objects we are grouping.
    657   for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) {
    658     const SCEV *S = Ops[i];
    659     unsigned Complexity = S->getSCEVType();
    660 
    661     // If there are any objects of the same complexity and same value as this
    662     // one, group them.
    663     for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) {
    664       if (Ops[j] == S) { // Found a duplicate.
    665         // Move it to immediately after i'th element.
    666         std::swap(Ops[i+1], Ops[j]);
    667         ++i;   // no need to rescan it.
    668         if (i == e-2) return;  // Done!
    669       }
    670     }
    671   }
    672 }
    673 
    674 
    675 
    676 //===----------------------------------------------------------------------===//
    677 //                      Simple SCEV method implementations
    678 //===----------------------------------------------------------------------===//
    679 
    680 /// BinomialCoefficient - Compute BC(It, K).  The result has width W.
    681 /// Assume, K > 0.
    682 static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K,
    683                                        ScalarEvolution &SE,
    684                                        Type *ResultTy) {
    685   // Handle the simplest case efficiently.
    686   if (K == 1)
    687     return SE.getTruncateOrZeroExtend(It, ResultTy);
    688 
    689   // We are using the following formula for BC(It, K):
    690   //
    691   //   BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K!
    692   //
    693   // Suppose, W is the bitwidth of the return value.  We must be prepared for
    694   // overflow.  Hence, we must assure that the result of our computation is
    695   // equal to the accurate one modulo 2^W.  Unfortunately, division isn't
    696   // safe in modular arithmetic.
    697   //
    698   // However, this code doesn't use exactly that formula; the formula it uses
    699   // is something like the following, where T is the number of factors of 2 in
    700   // K! (i.e. trailing zeros in the binary representation of K!), and ^ is
    701   // exponentiation:
    702   //
    703   //   BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T)
    704   //
    705   // This formula is trivially equivalent to the previous formula.  However,
    706   // this formula can be implemented much more efficiently.  The trick is that
    707   // K! / 2^T is odd, and exact division by an odd number *is* safe in modular
    708   // arithmetic.  To do exact division in modular arithmetic, all we have
    709   // to do is multiply by the inverse.  Therefore, this step can be done at
    710   // width W.
    711   //
    712   // The next issue is how to safely do the division by 2^T.  The way this
    713   // is done is by doing the multiplication step at a width of at least W + T
    714   // bits.  This way, the bottom W+T bits of the product are accurate. Then,
    715   // when we perform the division by 2^T (which is equivalent to a right shift
    716   // by T), the bottom W bits are accurate.  Extra bits are okay; they'll get
    717   // truncated out after the division by 2^T.
    718   //
    719   // In comparison to just directly using the first formula, this technique
    720   // is much more efficient; using the first formula requires W * K bits,
    721   // but this formula less than W + K bits. Also, the first formula requires
    722   // a division step, whereas this formula only requires multiplies and shifts.
    723   //
    724   // It doesn't matter whether the subtraction step is done in the calculation
    725   // width or the input iteration count's width; if the subtraction overflows,
    726   // the result must be zero anyway.  We prefer here to do it in the width of
    727   // the induction variable because it helps a lot for certain cases; CodeGen
    728   // isn't smart enough to ignore the overflow, which leads to much less
    729   // efficient code if the width of the subtraction is wider than the native
    730   // register width.
    731   //
    732   // (It's possible to not widen at all by pulling out factors of 2 before
    733   // the multiplication; for example, K=2 can be calculated as
    734   // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires
    735   // extra arithmetic, so it's not an obvious win, and it gets
    736   // much more complicated for K > 3.)
    737 
    738   // Protection from insane SCEVs; this bound is conservative,
    739   // but it probably doesn't matter.
    740   if (K > 1000)
    741     return SE.getCouldNotCompute();
    742 
    743   unsigned W = SE.getTypeSizeInBits(ResultTy);
    744 
    745   // Calculate K! / 2^T and T; we divide out the factors of two before
    746   // multiplying for calculating K! / 2^T to avoid overflow.
    747   // Other overflow doesn't matter because we only care about the bottom
    748   // W bits of the result.
    749   APInt OddFactorial(W, 1);
    750   unsigned T = 1;
    751   for (unsigned i = 3; i <= K; ++i) {
    752     APInt Mult(W, i);
    753     unsigned TwoFactors = Mult.countTrailingZeros();
    754     T += TwoFactors;
    755     Mult = Mult.lshr(TwoFactors);
    756     OddFactorial *= Mult;
    757   }
    758 
    759   // We need at least W + T bits for the multiplication step
    760   unsigned CalculationBits = W + T;
    761 
    762   // Calculate 2^T, at width T+W.
    763   APInt DivFactor = APInt::getOneBitSet(CalculationBits, T);
    764 
    765   // Calculate the multiplicative inverse of K! / 2^T;
    766   // this multiplication factor will perform the exact division by
    767   // K! / 2^T.
    768   APInt Mod = APInt::getSignedMinValue(W+1);
    769   APInt MultiplyFactor = OddFactorial.zext(W+1);
    770   MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod);
    771   MultiplyFactor = MultiplyFactor.trunc(W);
    772 
    773   // Calculate the product, at width T+W
    774   IntegerType *CalculationTy = IntegerType::get(SE.getContext(),
    775                                                       CalculationBits);
    776   const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy);
    777   for (unsigned i = 1; i != K; ++i) {
    778     const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i));
    779     Dividend = SE.getMulExpr(Dividend,
    780                              SE.getTruncateOrZeroExtend(S, CalculationTy));
    781   }
    782 
    783   // Divide by 2^T
    784   const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor));
    785 
    786   // Truncate the result, and divide by K! / 2^T.
    787 
    788   return SE.getMulExpr(SE.getConstant(MultiplyFactor),
    789                        SE.getTruncateOrZeroExtend(DivResult, ResultTy));
    790 }
    791 
    792 /// evaluateAtIteration - Return the value of this chain of recurrences at
    793 /// the specified iteration number.  We can evaluate this recurrence by
    794 /// multiplying each element in the chain by the binomial coefficient
    795 /// corresponding to it.  In other words, we can evaluate {A,+,B,+,C,+,D} as:
    796 ///
    797 ///   A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3)
    798 ///
    799 /// where BC(It, k) stands for binomial coefficient.
    800 ///
    801 const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It,
    802                                                 ScalarEvolution &SE) const {
    803   const SCEV *Result = getStart();
    804   for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
    805     // The computation is correct in the face of overflow provided that the
    806     // multiplication is performed _after_ the evaluation of the binomial
    807     // coefficient.
    808     const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType());
    809     if (isa<SCEVCouldNotCompute>(Coeff))
    810       return Coeff;
    811 
    812     Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff));
    813   }
    814   return Result;
    815 }
    816 
    817 //===----------------------------------------------------------------------===//
    818 //                    SCEV Expression folder implementations
    819 //===----------------------------------------------------------------------===//
    820 
    821 const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op,
    822                                              Type *Ty) {
    823   assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) &&
    824          "This is not a truncating conversion!");
    825   assert(isSCEVable(Ty) &&
    826          "This is not a conversion to a SCEVable type!");
    827   Ty = getEffectiveSCEVType(Ty);
    828 
    829   FoldingSetNodeID ID;
    830   ID.AddInteger(scTruncate);
    831   ID.AddPointer(Op);
    832   ID.AddPointer(Ty);
    833   void *IP = nullptr;
    834   if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
    835 
    836   // Fold if the operand is constant.
    837   if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
    838     return getConstant(
    839       cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty)));
    840 
    841   // trunc(trunc(x)) --> trunc(x)
    842   if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op))
    843     return getTruncateExpr(ST->getOperand(), Ty);
    844 
    845   // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing
    846   if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
    847     return getTruncateOrSignExtend(SS->getOperand(), Ty);
    848 
    849   // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing
    850   if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
    851     return getTruncateOrZeroExtend(SZ->getOperand(), Ty);
    852 
    853   // trunc(x1+x2+...+xN) --> trunc(x1)+trunc(x2)+...+trunc(xN) if we can
    854   // eliminate all the truncates.
    855   if (const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Op)) {
    856     SmallVector<const SCEV *, 4> Operands;
    857     bool hasTrunc = false;
    858     for (unsigned i = 0, e = SA->getNumOperands(); i != e && !hasTrunc; ++i) {
    859       const SCEV *S = getTruncateExpr(SA->getOperand(i), Ty);
    860       hasTrunc = isa<SCEVTruncateExpr>(S);
    861       Operands.push_back(S);
    862     }
    863     if (!hasTrunc)
    864       return getAddExpr(Operands);
    865     UniqueSCEVs.FindNodeOrInsertPos(ID, IP);  // Mutates IP, returns NULL.
    866   }
    867 
    868   // trunc(x1*x2*...*xN) --> trunc(x1)*trunc(x2)*...*trunc(xN) if we can
    869   // eliminate all the truncates.
    870   if (const SCEVMulExpr *SM = dyn_cast<SCEVMulExpr>(Op)) {
    871     SmallVector<const SCEV *, 4> Operands;
    872     bool hasTrunc = false;
    873     for (unsigned i = 0, e = SM->getNumOperands(); i != e && !hasTrunc; ++i) {
    874       const SCEV *S = getTruncateExpr(SM->getOperand(i), Ty);
    875       hasTrunc = isa<SCEVTruncateExpr>(S);
    876       Operands.push_back(S);
    877     }
    878     if (!hasTrunc)
    879       return getMulExpr(Operands);
    880     UniqueSCEVs.FindNodeOrInsertPos(ID, IP);  // Mutates IP, returns NULL.
    881   }
    882 
    883   // If the input value is a chrec scev, truncate the chrec's operands.
    884   if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) {
    885     SmallVector<const SCEV *, 4> Operands;
    886     for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
    887       Operands.push_back(getTruncateExpr(AddRec->getOperand(i), Ty));
    888     return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap);
    889   }
    890 
    891   // The cast wasn't folded; create an explicit cast node. We can reuse
    892   // the existing insert position since if we get here, we won't have
    893   // made any changes which would invalidate it.
    894   SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator),
    895                                                  Op, Ty);
    896   UniqueSCEVs.InsertNode(S, IP);
    897   return S;
    898 }
    899 
    900 const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
    901                                                Type *Ty) {
    902   assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
    903          "This is not an extending conversion!");
    904   assert(isSCEVable(Ty) &&
    905          "This is not a conversion to a SCEVable type!");
    906   Ty = getEffectiveSCEVType(Ty);
    907 
    908   // Fold if the operand is constant.
    909   if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
    910     return getConstant(
    911       cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(), Ty)));
    912 
    913   // zext(zext(x)) --> zext(x)
    914   if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
    915     return getZeroExtendExpr(SZ->getOperand(), Ty);
    916 
    917   // Before doing any expensive analysis, check to see if we've already
    918   // computed a SCEV for this Op and Ty.
    919   FoldingSetNodeID ID;
    920   ID.AddInteger(scZeroExtend);
    921   ID.AddPointer(Op);
    922   ID.AddPointer(Ty);
    923   void *IP = nullptr;
    924   if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
    925 
    926   // zext(trunc(x)) --> zext(x) or x or trunc(x)
    927   if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) {
    928     // It's possible the bits taken off by the truncate were all zero bits. If
    929     // so, we should be able to simplify this further.
    930     const SCEV *X = ST->getOperand();
    931     ConstantRange CR = getUnsignedRange(X);
    932     unsigned TruncBits = getTypeSizeInBits(ST->getType());
    933     unsigned NewBits = getTypeSizeInBits(Ty);
    934     if (CR.truncate(TruncBits).zeroExtend(NewBits).contains(
    935             CR.zextOrTrunc(NewBits)))
    936       return getTruncateOrZeroExtend(X, Ty);
    937   }
    938 
    939   // If the input value is a chrec scev, and we can prove that the value
    940   // did not overflow the old, smaller, value, we can zero extend all of the
    941   // operands (often constants).  This allows analysis of something like
    942   // this:  for (unsigned char X = 0; X < 100; ++X) { int Y = X; }
    943   if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
    944     if (AR->isAffine()) {
    945       const SCEV *Start = AR->getStart();
    946       const SCEV *Step = AR->getStepRecurrence(*this);
    947       unsigned BitWidth = getTypeSizeInBits(AR->getType());
    948       const Loop *L = AR->getLoop();
    949 
    950       // If we have special knowledge that this addrec won't overflow,
    951       // we don't need to do any further analysis.
    952       if (AR->getNoWrapFlags(SCEV::FlagNUW))
    953         return getAddRecExpr(getZeroExtendExpr(Start, Ty),
    954                              getZeroExtendExpr(Step, Ty),
    955                              L, AR->getNoWrapFlags());
    956 
    957       // Check whether the backedge-taken count is SCEVCouldNotCompute.
    958       // Note that this serves two purposes: It filters out loops that are
    959       // simply not analyzable, and it covers the case where this code is
    960       // being called from within backedge-taken count analysis, such that
    961       // attempting to ask for the backedge-taken count would likely result
    962       // in infinite recursion. In the later case, the analysis code will
    963       // cope with a conservative value, and it will take care to purge
    964       // that value once it has finished.
    965       const SCEV *MaxBECount = getMaxBackedgeTakenCount(L);
    966       if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
    967         // Manually compute the final value for AR, checking for
    968         // overflow.
    969 
    970         // Check whether the backedge-taken count can be losslessly casted to
    971         // the addrec's type. The count is always unsigned.
    972         const SCEV *CastedMaxBECount =
    973           getTruncateOrZeroExtend(MaxBECount, Start->getType());
    974         const SCEV *RecastedMaxBECount =
    975           getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
    976         if (MaxBECount == RecastedMaxBECount) {
    977           Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
    978           // Check whether Start+Step*MaxBECount has no unsigned overflow.
    979           const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step);
    980           const SCEV *ZAdd = getZeroExtendExpr(getAddExpr(Start, ZMul), WideTy);
    981           const SCEV *WideStart = getZeroExtendExpr(Start, WideTy);
    982           const SCEV *WideMaxBECount =
    983             getZeroExtendExpr(CastedMaxBECount, WideTy);
    984           const SCEV *OperandExtendedAdd =
    985             getAddExpr(WideStart,
    986                        getMulExpr(WideMaxBECount,
    987                                   getZeroExtendExpr(Step, WideTy)));
    988           if (ZAdd == OperandExtendedAdd) {
    989             // Cache knowledge of AR NUW, which is propagated to this AddRec.
    990             const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW);
    991             // Return the expression with the addrec on the outside.
    992             return getAddRecExpr(getZeroExtendExpr(Start, Ty),
    993                                  getZeroExtendExpr(Step, Ty),
    994                                  L, AR->getNoWrapFlags());
    995           }
    996           // Similar to above, only this time treat the step value as signed.
    997           // This covers loops that count down.
    998           OperandExtendedAdd =
    999             getAddExpr(WideStart,
   1000                        getMulExpr(WideMaxBECount,
   1001                                   getSignExtendExpr(Step, WideTy)));
   1002           if (ZAdd == OperandExtendedAdd) {
   1003             // Cache knowledge of AR NW, which is propagated to this AddRec.
   1004             // Negative step causes unsigned wrap, but it still can't self-wrap.
   1005             const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW);
   1006             // Return the expression with the addrec on the outside.
   1007             return getAddRecExpr(getZeroExtendExpr(Start, Ty),
   1008                                  getSignExtendExpr(Step, Ty),
   1009                                  L, AR->getNoWrapFlags());
   1010           }
   1011         }
   1012 
   1013         // If the backedge is guarded by a comparison with the pre-inc value
   1014         // the addrec is safe. Also, if the entry is guarded by a comparison
   1015         // with the start value and the backedge is guarded by a comparison
   1016         // with the post-inc value, the addrec is safe.
   1017         if (isKnownPositive(Step)) {
   1018           const SCEV *N = getConstant(APInt::getMinValue(BitWidth) -
   1019                                       getUnsignedRange(Step).getUnsignedMax());
   1020           if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) ||
   1021               (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_ULT, Start, N) &&
   1022                isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT,
   1023                                            AR->getPostIncExpr(*this), N))) {
   1024             // Cache knowledge of AR NUW, which is propagated to this AddRec.
   1025             const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW);
   1026             // Return the expression with the addrec on the outside.
   1027             return getAddRecExpr(getZeroExtendExpr(Start, Ty),
   1028                                  getZeroExtendExpr(Step, Ty),
   1029                                  L, AR->getNoWrapFlags());
   1030           }
   1031         } else if (isKnownNegative(Step)) {
   1032           const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) -
   1033                                       getSignedRange(Step).getSignedMin());
   1034           if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) ||
   1035               (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_UGT, Start, N) &&
   1036                isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT,
   1037                                            AR->getPostIncExpr(*this), N))) {
   1038             // Cache knowledge of AR NW, which is propagated to this AddRec.
   1039             // Negative step causes unsigned wrap, but it still can't self-wrap.
   1040             const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW);
   1041             // Return the expression with the addrec on the outside.
   1042             return getAddRecExpr(getZeroExtendExpr(Start, Ty),
   1043                                  getSignExtendExpr(Step, Ty),
   1044                                  L, AR->getNoWrapFlags());
   1045           }
   1046         }
   1047       }
   1048     }
   1049 
   1050   // The cast wasn't folded; create an explicit cast node.
   1051   // Recompute the insert position, as it may have been invalidated.
   1052   if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
   1053   SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator),
   1054                                                    Op, Ty);
   1055   UniqueSCEVs.InsertNode(S, IP);
   1056   return S;
   1057 }
   1058 
   1059 // Get the limit of a recurrence such that incrementing by Step cannot cause
   1060 // signed overflow as long as the value of the recurrence within the loop does
   1061 // not exceed this limit before incrementing.
   1062 static const SCEV *getOverflowLimitForStep(const SCEV *Step,
   1063                                            ICmpInst::Predicate *Pred,
   1064                                            ScalarEvolution *SE) {
   1065   unsigned BitWidth = SE->getTypeSizeInBits(Step->getType());
   1066   if (SE->isKnownPositive(Step)) {
   1067     *Pred = ICmpInst::ICMP_SLT;
   1068     return SE->getConstant(APInt::getSignedMinValue(BitWidth) -
   1069                            SE->getSignedRange(Step).getSignedMax());
   1070   }
   1071   if (SE->isKnownNegative(Step)) {
   1072     *Pred = ICmpInst::ICMP_SGT;
   1073     return SE->getConstant(APInt::getSignedMaxValue(BitWidth) -
   1074                        SE->getSignedRange(Step).getSignedMin());
   1075   }
   1076   return nullptr;
   1077 }
   1078 
   1079 // The recurrence AR has been shown to have no signed wrap. Typically, if we can
   1080 // prove NSW for AR, then we can just as easily prove NSW for its preincrement
   1081 // or postincrement sibling. This allows normalizing a sign extended AddRec as
   1082 // such: {sext(Step + Start),+,Step} => {(Step + sext(Start),+,Step} As a
   1083 // result, the expression "Step + sext(PreIncAR)" is congruent with
   1084 // "sext(PostIncAR)"
   1085 static const SCEV *getPreStartForSignExtend(const SCEVAddRecExpr *AR,
   1086                                             Type *Ty,
   1087                                             ScalarEvolution *SE) {
   1088   const Loop *L = AR->getLoop();
   1089   const SCEV *Start = AR->getStart();
   1090   const SCEV *Step = AR->getStepRecurrence(*SE);
   1091 
   1092   // Check for a simple looking step prior to loop entry.
   1093   const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start);
   1094   if (!SA)
   1095     return nullptr;
   1096 
   1097   // Create an AddExpr for "PreStart" after subtracting Step. Full SCEV
   1098   // subtraction is expensive. For this purpose, perform a quick and dirty
   1099   // difference, by checking for Step in the operand list.
   1100   SmallVector<const SCEV *, 4> DiffOps;
   1101   for (const SCEV *Op : SA->operands())
   1102     if (Op != Step)
   1103       DiffOps.push_back(Op);
   1104 
   1105   if (DiffOps.size() == SA->getNumOperands())
   1106     return nullptr;
   1107 
   1108   // This is a postinc AR. Check for overflow on the preinc recurrence using the
   1109   // same three conditions that getSignExtendedExpr checks.
   1110 
   1111   // 1. NSW flags on the step increment.
   1112   const SCEV *PreStart = SE->getAddExpr(DiffOps, SA->getNoWrapFlags());
   1113   const SCEVAddRecExpr *PreAR = dyn_cast<SCEVAddRecExpr>(
   1114     SE->getAddRecExpr(PreStart, Step, L, SCEV::FlagAnyWrap));
   1115 
   1116   if (PreAR && PreAR->getNoWrapFlags(SCEV::FlagNSW))
   1117     return PreStart;
   1118 
   1119   // 2. Direct overflow check on the step operation's expression.
   1120   unsigned BitWidth = SE->getTypeSizeInBits(AR->getType());
   1121   Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2);
   1122   const SCEV *OperandExtendedStart =
   1123     SE->getAddExpr(SE->getSignExtendExpr(PreStart, WideTy),
   1124                    SE->getSignExtendExpr(Step, WideTy));
   1125   if (SE->getSignExtendExpr(Start, WideTy) == OperandExtendedStart) {
   1126     // Cache knowledge of PreAR NSW.
   1127     if (PreAR)
   1128       const_cast<SCEVAddRecExpr *>(PreAR)->setNoWrapFlags(SCEV::FlagNSW);
   1129     // FIXME: this optimization needs a unit test
   1130     DEBUG(dbgs() << "SCEV: untested prestart overflow check\n");
   1131     return PreStart;
   1132   }
   1133 
   1134   // 3. Loop precondition.
   1135   ICmpInst::Predicate Pred;
   1136   const SCEV *OverflowLimit = getOverflowLimitForStep(Step, &Pred, SE);
   1137 
   1138   if (OverflowLimit &&
   1139       SE->isLoopEntryGuardedByCond(L, Pred, PreStart, OverflowLimit)) {
   1140     return PreStart;
   1141   }
   1142   return nullptr;
   1143 }
   1144 
   1145 // Get the normalized sign-extended expression for this AddRec's Start.
   1146 static const SCEV *getSignExtendAddRecStart(const SCEVAddRecExpr *AR,
   1147                                             Type *Ty,
   1148                                             ScalarEvolution *SE) {
   1149   const SCEV *PreStart = getPreStartForSignExtend(AR, Ty, SE);
   1150   if (!PreStart)
   1151     return SE->getSignExtendExpr(AR->getStart(), Ty);
   1152 
   1153   return SE->getAddExpr(SE->getSignExtendExpr(AR->getStepRecurrence(*SE), Ty),
   1154                         SE->getSignExtendExpr(PreStart, Ty));
   1155 }
   1156 
   1157 const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
   1158                                                Type *Ty) {
   1159   assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
   1160          "This is not an extending conversion!");
   1161   assert(isSCEVable(Ty) &&
   1162          "This is not a conversion to a SCEVable type!");
   1163   Ty = getEffectiveSCEVType(Ty);
   1164 
   1165   // Fold if the operand is constant.
   1166   if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
   1167     return getConstant(
   1168       cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(), Ty)));
   1169 
   1170   // sext(sext(x)) --> sext(x)
   1171   if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
   1172     return getSignExtendExpr(SS->getOperand(), Ty);
   1173 
   1174   // sext(zext(x)) --> zext(x)
   1175   if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
   1176     return getZeroExtendExpr(SZ->getOperand(), Ty);
   1177 
   1178   // Before doing any expensive analysis, check to see if we've already
   1179   // computed a SCEV for this Op and Ty.
   1180   FoldingSetNodeID ID;
   1181   ID.AddInteger(scSignExtend);
   1182   ID.AddPointer(Op);
   1183   ID.AddPointer(Ty);
   1184   void *IP = nullptr;
   1185   if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
   1186 
   1187   // If the input value is provably positive, build a zext instead.
   1188   if (isKnownNonNegative(Op))
   1189     return getZeroExtendExpr(Op, Ty);
   1190 
   1191   // sext(trunc(x)) --> sext(x) or x or trunc(x)
   1192   if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) {
   1193     // It's possible the bits taken off by the truncate were all sign bits. If
   1194     // so, we should be able to simplify this further.
   1195     const SCEV *X = ST->getOperand();
   1196     ConstantRange CR = getSignedRange(X);
   1197     unsigned TruncBits = getTypeSizeInBits(ST->getType());
   1198     unsigned NewBits = getTypeSizeInBits(Ty);
   1199     if (CR.truncate(TruncBits).signExtend(NewBits).contains(
   1200             CR.sextOrTrunc(NewBits)))
   1201       return getTruncateOrSignExtend(X, Ty);
   1202   }
   1203 
   1204   // sext(C1 + (C2 * x)) --> C1 + sext(C2 * x) if C1 < C2
   1205   if (auto SA = dyn_cast<SCEVAddExpr>(Op)) {
   1206     if (SA->getNumOperands() == 2) {
   1207       auto SC1 = dyn_cast<SCEVConstant>(SA->getOperand(0));
   1208       auto SMul = dyn_cast<SCEVMulExpr>(SA->getOperand(1));
   1209       if (SMul && SC1) {
   1210         if (auto SC2 = dyn_cast<SCEVConstant>(SMul->getOperand(0))) {
   1211           const APInt &C1 = SC1->getValue()->getValue();
   1212           const APInt &C2 = SC2->getValue()->getValue();
   1213           if (C1.isStrictlyPositive() && C2.isStrictlyPositive() &&
   1214               C2.ugt(C1) && C2.isPowerOf2())
   1215             return getAddExpr(getSignExtendExpr(SC1, Ty),
   1216                               getSignExtendExpr(SMul, Ty));
   1217         }
   1218       }
   1219     }
   1220   }
   1221   // If the input value is a chrec scev, and we can prove that the value
   1222   // did not overflow the old, smaller, value, we can sign extend all of the
   1223   // operands (often constants).  This allows analysis of something like
   1224   // this:  for (signed char X = 0; X < 100; ++X) { int Y = X; }
   1225   if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
   1226     if (AR->isAffine()) {
   1227       const SCEV *Start = AR->getStart();
   1228       const SCEV *Step = AR->getStepRecurrence(*this);
   1229       unsigned BitWidth = getTypeSizeInBits(AR->getType());
   1230       const Loop *L = AR->getLoop();
   1231 
   1232       // If we have special knowledge that this addrec won't overflow,
   1233       // we don't need to do any further analysis.
   1234       if (AR->getNoWrapFlags(SCEV::FlagNSW))
   1235         return getAddRecExpr(getSignExtendAddRecStart(AR, Ty, this),
   1236                              getSignExtendExpr(Step, Ty),
   1237                              L, SCEV::FlagNSW);
   1238 
   1239       // Check whether the backedge-taken count is SCEVCouldNotCompute.
   1240       // Note that this serves two purposes: It filters out loops that are
   1241       // simply not analyzable, and it covers the case where this code is
   1242       // being called from within backedge-taken count analysis, such that
   1243       // attempting to ask for the backedge-taken count would likely result
   1244       // in infinite recursion. In the later case, the analysis code will
   1245       // cope with a conservative value, and it will take care to purge
   1246       // that value once it has finished.
   1247       const SCEV *MaxBECount = getMaxBackedgeTakenCount(L);
   1248       if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
   1249         // Manually compute the final value for AR, checking for
   1250         // overflow.
   1251 
   1252         // Check whether the backedge-taken count can be losslessly casted to
   1253         // the addrec's type. The count is always unsigned.
   1254         const SCEV *CastedMaxBECount =
   1255           getTruncateOrZeroExtend(MaxBECount, Start->getType());
   1256         const SCEV *RecastedMaxBECount =
   1257           getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
   1258         if (MaxBECount == RecastedMaxBECount) {
   1259           Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
   1260           // Check whether Start+Step*MaxBECount has no signed overflow.
   1261           const SCEV *SMul = getMulExpr(CastedMaxBECount, Step);
   1262           const SCEV *SAdd = getSignExtendExpr(getAddExpr(Start, SMul), WideTy);
   1263           const SCEV *WideStart = getSignExtendExpr(Start, WideTy);
   1264           const SCEV *WideMaxBECount =
   1265             getZeroExtendExpr(CastedMaxBECount, WideTy);
   1266           const SCEV *OperandExtendedAdd =
   1267             getAddExpr(WideStart,
   1268                        getMulExpr(WideMaxBECount,
   1269                                   getSignExtendExpr(Step, WideTy)));
   1270           if (SAdd == OperandExtendedAdd) {
   1271             // Cache knowledge of AR NSW, which is propagated to this AddRec.
   1272             const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW);
   1273             // Return the expression with the addrec on the outside.
   1274             return getAddRecExpr(getSignExtendAddRecStart(AR, Ty, this),
   1275                                  getSignExtendExpr(Step, Ty),
   1276                                  L, AR->getNoWrapFlags());
   1277           }
   1278           // Similar to above, only this time treat the step value as unsigned.
   1279           // This covers loops that count up with an unsigned step.
   1280           OperandExtendedAdd =
   1281             getAddExpr(WideStart,
   1282                        getMulExpr(WideMaxBECount,
   1283                                   getZeroExtendExpr(Step, WideTy)));
   1284           if (SAdd == OperandExtendedAdd) {
   1285             // Cache knowledge of AR NSW, which is propagated to this AddRec.
   1286             const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW);
   1287             // Return the expression with the addrec on the outside.
   1288             return getAddRecExpr(getSignExtendAddRecStart(AR, Ty, this),
   1289                                  getZeroExtendExpr(Step, Ty),
   1290                                  L, AR->getNoWrapFlags());
   1291           }
   1292         }
   1293 
   1294         // If the backedge is guarded by a comparison with the pre-inc value
   1295         // the addrec is safe. Also, if the entry is guarded by a comparison
   1296         // with the start value and the backedge is guarded by a comparison
   1297         // with the post-inc value, the addrec is safe.
   1298         ICmpInst::Predicate Pred;
   1299         const SCEV *OverflowLimit = getOverflowLimitForStep(Step, &Pred, this);
   1300         if (OverflowLimit &&
   1301             (isLoopBackedgeGuardedByCond(L, Pred, AR, OverflowLimit) ||
   1302              (isLoopEntryGuardedByCond(L, Pred, Start, OverflowLimit) &&
   1303               isLoopBackedgeGuardedByCond(L, Pred, AR->getPostIncExpr(*this),
   1304                                           OverflowLimit)))) {
   1305           // Cache knowledge of AR NSW, then propagate NSW to the wide AddRec.
   1306           const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW);
   1307           return getAddRecExpr(getSignExtendAddRecStart(AR, Ty, this),
   1308                                getSignExtendExpr(Step, Ty),
   1309                                L, AR->getNoWrapFlags());
   1310         }
   1311       }
   1312       // If Start and Step are constants, check if we can apply this
   1313       // transformation:
   1314       // sext{C1,+,C2} --> C1 + sext{0,+,C2} if C1 < C2
   1315       auto SC1 = dyn_cast<SCEVConstant>(Start);
   1316       auto SC2 = dyn_cast<SCEVConstant>(Step);
   1317       if (SC1 && SC2) {
   1318         const APInt &C1 = SC1->getValue()->getValue();
   1319         const APInt &C2 = SC2->getValue()->getValue();
   1320         if (C1.isStrictlyPositive() && C2.isStrictlyPositive() && C2.ugt(C1) &&
   1321             C2.isPowerOf2()) {
   1322           Start = getSignExtendExpr(Start, Ty);
   1323           const SCEV *NewAR = getAddRecExpr(getConstant(AR->getType(), 0), Step,
   1324                                             L, AR->getNoWrapFlags());
   1325           return getAddExpr(Start, getSignExtendExpr(NewAR, Ty));
   1326         }
   1327       }
   1328     }
   1329 
   1330   // The cast wasn't folded; create an explicit cast node.
   1331   // Recompute the insert position, as it may have been invalidated.
   1332   if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
   1333   SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator),
   1334                                                    Op, Ty);
   1335   UniqueSCEVs.InsertNode(S, IP);
   1336   return S;
   1337 }
   1338 
   1339 /// getAnyExtendExpr - Return a SCEV for the given operand extended with
   1340 /// unspecified bits out to the given type.
   1341 ///
   1342 const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op,
   1343                                               Type *Ty) {
   1344   assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
   1345          "This is not an extending conversion!");
   1346   assert(isSCEVable(Ty) &&
   1347          "This is not a conversion to a SCEVable type!");
   1348   Ty = getEffectiveSCEVType(Ty);
   1349 
   1350   // Sign-extend negative constants.
   1351   if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
   1352     if (SC->getValue()->getValue().isNegative())
   1353       return getSignExtendExpr(Op, Ty);
   1354 
   1355   // Peel off a truncate cast.
   1356   if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) {
   1357     const SCEV *NewOp = T->getOperand();
   1358     if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty))
   1359       return getAnyExtendExpr(NewOp, Ty);
   1360     return getTruncateOrNoop(NewOp, Ty);
   1361   }
   1362 
   1363   // Next try a zext cast. If the cast is folded, use it.
   1364   const SCEV *ZExt = getZeroExtendExpr(Op, Ty);
   1365   if (!isa<SCEVZeroExtendExpr>(ZExt))
   1366     return ZExt;
   1367 
   1368   // Next try a sext cast. If the cast is folded, use it.
   1369   const SCEV *SExt = getSignExtendExpr(Op, Ty);
   1370   if (!isa<SCEVSignExtendExpr>(SExt))
   1371     return SExt;
   1372 
   1373   // Force the cast to be folded into the operands of an addrec.
   1374   if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) {
   1375     SmallVector<const SCEV *, 4> Ops;
   1376     for (const SCEV *Op : AR->operands())
   1377       Ops.push_back(getAnyExtendExpr(Op, Ty));
   1378     return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagNW);
   1379   }
   1380 
   1381   // If the expression is obviously signed, use the sext cast value.
   1382   if (isa<SCEVSMaxExpr>(Op))
   1383     return SExt;
   1384 
   1385   // Absent any other information, use the zext cast value.
   1386   return ZExt;
   1387 }
   1388 
   1389 /// CollectAddOperandsWithScales - Process the given Ops list, which is
   1390 /// a list of operands to be added under the given scale, update the given
   1391 /// map. This is a helper function for getAddRecExpr. As an example of
   1392 /// what it does, given a sequence of operands that would form an add
   1393 /// expression like this:
   1394 ///
   1395 ///    m + n + 13 + (A * (o + p + (B * (q + m + 29)))) + r + (-1 * r)
   1396 ///
   1397 /// where A and B are constants, update the map with these values:
   1398 ///
   1399 ///    (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0)
   1400 ///
   1401 /// and add 13 + A*B*29 to AccumulatedConstant.
   1402 /// This will allow getAddRecExpr to produce this:
   1403 ///
   1404 ///    13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B)
   1405 ///
   1406 /// This form often exposes folding opportunities that are hidden in
   1407 /// the original operand list.
   1408 ///
   1409 /// Return true iff it appears that any interesting folding opportunities
   1410 /// may be exposed. This helps getAddRecExpr short-circuit extra work in
   1411 /// the common case where no interesting opportunities are present, and
   1412 /// is also used as a check to avoid infinite recursion.
   1413 ///
   1414 static bool
   1415 CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M,
   1416                              SmallVectorImpl<const SCEV *> &NewOps,
   1417                              APInt &AccumulatedConstant,
   1418                              const SCEV *const *Ops, size_t NumOperands,
   1419                              const APInt &Scale,
   1420                              ScalarEvolution &SE) {
   1421   bool Interesting = false;
   1422 
   1423   // Iterate over the add operands. They are sorted, with constants first.
   1424   unsigned i = 0;
   1425   while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
   1426     ++i;
   1427     // Pull a buried constant out to the outside.
   1428     if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero())
   1429       Interesting = true;
   1430     AccumulatedConstant += Scale * C->getValue()->getValue();
   1431   }
   1432 
   1433   // Next comes everything else. We're especially interested in multiplies
   1434   // here, but they're in the middle, so just visit the rest with one loop.
   1435   for (; i != NumOperands; ++i) {
   1436     const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]);
   1437     if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) {
   1438       APInt NewScale =
   1439         Scale * cast<SCEVConstant>(Mul->getOperand(0))->getValue()->getValue();
   1440       if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) {
   1441         // A multiplication of a constant with another add; recurse.
   1442         const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1));
   1443         Interesting |=
   1444           CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
   1445                                        Add->op_begin(), Add->getNumOperands(),
   1446                                        NewScale, SE);
   1447       } else {
   1448         // A multiplication of a constant with some other value. Update
   1449         // the map.
   1450         SmallVector<const SCEV *, 4> MulOps(Mul->op_begin()+1, Mul->op_end());
   1451         const SCEV *Key = SE.getMulExpr(MulOps);
   1452         std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
   1453           M.insert(std::make_pair(Key, NewScale));
   1454         if (Pair.second) {
   1455           NewOps.push_back(Pair.first->first);
   1456         } else {
   1457           Pair.first->second += NewScale;
   1458           // The map already had an entry for this value, which may indicate
   1459           // a folding opportunity.
   1460           Interesting = true;
   1461         }
   1462       }
   1463     } else {
   1464       // An ordinary operand. Update the map.
   1465       std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
   1466         M.insert(std::make_pair(Ops[i], Scale));
   1467       if (Pair.second) {
   1468         NewOps.push_back(Pair.first->first);
   1469       } else {
   1470         Pair.first->second += Scale;
   1471         // The map already had an entry for this value, which may indicate
   1472         // a folding opportunity.
   1473         Interesting = true;
   1474       }
   1475     }
   1476   }
   1477 
   1478   return Interesting;
   1479 }
   1480 
   1481 namespace {
   1482   struct APIntCompare {
   1483     bool operator()(const APInt &LHS, const APInt &RHS) const {
   1484       return LHS.ult(RHS);
   1485     }
   1486   };
   1487 }
   1488 
   1489 /// getAddExpr - Get a canonical add expression, or something simpler if
   1490 /// possible.
   1491 const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
   1492                                         SCEV::NoWrapFlags Flags) {
   1493   assert(!(Flags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) &&
   1494          "only nuw or nsw allowed");
   1495   assert(!Ops.empty() && "Cannot get empty add!");
   1496   if (Ops.size() == 1) return Ops[0];
   1497 #ifndef NDEBUG
   1498   Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
   1499   for (unsigned i = 1, e = Ops.size(); i != e; ++i)
   1500     assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
   1501            "SCEVAddExpr operand types don't match!");
   1502 #endif
   1503 
   1504   // If FlagNSW is true and all the operands are non-negative, infer FlagNUW.
   1505   // And vice-versa.
   1506   int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW;
   1507   SCEV::NoWrapFlags SignOrUnsignWrap = maskFlags(Flags, SignOrUnsignMask);
   1508   if (SignOrUnsignWrap && (SignOrUnsignWrap != SignOrUnsignMask)) {
   1509     bool All = true;
   1510     for (SmallVectorImpl<const SCEV *>::const_iterator I = Ops.begin(),
   1511          E = Ops.end(); I != E; ++I)
   1512       if (!isKnownNonNegative(*I)) {
   1513         All = false;
   1514         break;
   1515       }
   1516     if (All) Flags = setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask);
   1517   }
   1518 
   1519   // Sort by complexity, this groups all similar expression types together.
   1520   GroupByComplexity(Ops, LI);
   1521 
   1522   // If there are any constants, fold them together.
   1523   unsigned Idx = 0;
   1524   if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
   1525     ++Idx;
   1526     assert(Idx < Ops.size());
   1527     while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
   1528       // We found two constants, fold them together!
   1529       Ops[0] = getConstant(LHSC->getValue()->getValue() +
   1530                            RHSC->getValue()->getValue());
   1531       if (Ops.size() == 2) return Ops[0];
   1532       Ops.erase(Ops.begin()+1);  // Erase the folded element
   1533       LHSC = cast<SCEVConstant>(Ops[0]);
   1534     }
   1535 
   1536     // If we are left with a constant zero being added, strip it off.
   1537     if (LHSC->getValue()->isZero()) {
   1538       Ops.erase(Ops.begin());
   1539       --Idx;
   1540     }
   1541 
   1542     if (Ops.size() == 1) return Ops[0];
   1543   }
   1544 
   1545   // Okay, check to see if the same value occurs in the operand list more than
   1546   // once.  If so, merge them together into an multiply expression.  Since we
   1547   // sorted the list, these values are required to be adjacent.
   1548   Type *Ty = Ops[0]->getType();
   1549   bool FoundMatch = false;
   1550   for (unsigned i = 0, e = Ops.size(); i != e-1; ++i)
   1551     if (Ops[i] == Ops[i+1]) {      //  X + Y + Y  -->  X + Y*2
   1552       // Scan ahead to count how many equal operands there are.
   1553       unsigned Count = 2;
   1554       while (i+Count != e && Ops[i+Count] == Ops[i])
   1555         ++Count;
   1556       // Merge the values into a multiply.
   1557       const SCEV *Scale = getConstant(Ty, Count);
   1558       const SCEV *Mul = getMulExpr(Scale, Ops[i]);
   1559       if (Ops.size() == Count)
   1560         return Mul;
   1561       Ops[i] = Mul;
   1562       Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count);
   1563       --i; e -= Count - 1;
   1564       FoundMatch = true;
   1565     }
   1566   if (FoundMatch)
   1567     return getAddExpr(Ops, Flags);
   1568 
   1569   // Check for truncates. If all the operands are truncated from the same
   1570   // type, see if factoring out the truncate would permit the result to be
   1571   // folded. eg., trunc(x) + m*trunc(n) --> trunc(x + trunc(m)*n)
   1572   // if the contents of the resulting outer trunc fold to something simple.
   1573   for (; Idx < Ops.size() && isa<SCEVTruncateExpr>(Ops[Idx]); ++Idx) {
   1574     const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(Ops[Idx]);
   1575     Type *DstType = Trunc->getType();
   1576     Type *SrcType = Trunc->getOperand()->getType();
   1577     SmallVector<const SCEV *, 8> LargeOps;
   1578     bool Ok = true;
   1579     // Check all the operands to see if they can be represented in the
   1580     // source type of the truncate.
   1581     for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
   1582       if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) {
   1583         if (T->getOperand()->getType() != SrcType) {
   1584           Ok = false;
   1585           break;
   1586         }
   1587         LargeOps.push_back(T->getOperand());
   1588       } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
   1589         LargeOps.push_back(getAnyExtendExpr(C, SrcType));
   1590       } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) {
   1591         SmallVector<const SCEV *, 8> LargeMulOps;
   1592         for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) {
   1593           if (const SCEVTruncateExpr *T =
   1594                 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) {
   1595             if (T->getOperand()->getType() != SrcType) {
   1596               Ok = false;
   1597               break;
   1598             }
   1599             LargeMulOps.push_back(T->getOperand());
   1600           } else if (const SCEVConstant *C =
   1601                        dyn_cast<SCEVConstant>(M->getOperand(j))) {
   1602             LargeMulOps.push_back(getAnyExtendExpr(C, SrcType));
   1603           } else {
   1604             Ok = false;
   1605             break;
   1606           }
   1607         }
   1608         if (Ok)
   1609           LargeOps.push_back(getMulExpr(LargeMulOps));
   1610       } else {
   1611         Ok = false;
   1612         break;
   1613       }
   1614     }
   1615     if (Ok) {
   1616       // Evaluate the expression in the larger type.
   1617       const SCEV *Fold = getAddExpr(LargeOps, Flags);
   1618       // If it folds to something simple, use it. Otherwise, don't.
   1619       if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold))
   1620         return getTruncateExpr(Fold, DstType);
   1621     }
   1622   }
   1623 
   1624   // Skip past any other cast SCEVs.
   1625   while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr)
   1626     ++Idx;
   1627 
   1628   // If there are add operands they would be next.
   1629   if (Idx < Ops.size()) {
   1630     bool DeletedAdd = false;
   1631     while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) {
   1632       // If we have an add, expand the add operands onto the end of the operands
   1633       // list.
   1634       Ops.erase(Ops.begin()+Idx);
   1635       Ops.append(Add->op_begin(), Add->op_end());
   1636       DeletedAdd = true;
   1637     }
   1638 
   1639     // If we deleted at least one add, we added operands to the end of the list,
   1640     // and they are not necessarily sorted.  Recurse to resort and resimplify
   1641     // any operands we just acquired.
   1642     if (DeletedAdd)
   1643       return getAddExpr(Ops);
   1644   }
   1645 
   1646   // Skip over the add expression until we get to a multiply.
   1647   while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
   1648     ++Idx;
   1649 
   1650   // Check to see if there are any folding opportunities present with
   1651   // operands multiplied by constant values.
   1652   if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) {
   1653     uint64_t BitWidth = getTypeSizeInBits(Ty);
   1654     DenseMap<const SCEV *, APInt> M;
   1655     SmallVector<const SCEV *, 8> NewOps;
   1656     APInt AccumulatedConstant(BitWidth, 0);
   1657     if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
   1658                                      Ops.data(), Ops.size(),
   1659                                      APInt(BitWidth, 1), *this)) {
   1660       // Some interesting folding opportunity is present, so its worthwhile to
   1661       // re-generate the operands list. Group the operands by constant scale,
   1662       // to avoid multiplying by the same constant scale multiple times.
   1663       std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists;
   1664       for (SmallVectorImpl<const SCEV *>::const_iterator I = NewOps.begin(),
   1665            E = NewOps.end(); I != E; ++I)
   1666         MulOpLists[M.find(*I)->second].push_back(*I);
   1667       // Re-generate the operands list.
   1668       Ops.clear();
   1669       if (AccumulatedConstant != 0)
   1670         Ops.push_back(getConstant(AccumulatedConstant));
   1671       for (std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare>::iterator
   1672            I = MulOpLists.begin(), E = MulOpLists.end(); I != E; ++I)
   1673         if (I->first != 0)
   1674           Ops.push_back(getMulExpr(getConstant(I->first),
   1675                                    getAddExpr(I->second)));
   1676       if (Ops.empty())
   1677         return getConstant(Ty, 0);
   1678       if (Ops.size() == 1)
   1679         return Ops[0];
   1680       return getAddExpr(Ops);
   1681     }
   1682   }
   1683 
   1684   // If we are adding something to a multiply expression, make sure the
   1685   // something is not already an operand of the multiply.  If so, merge it into
   1686   // the multiply.
   1687   for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) {
   1688     const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]);
   1689     for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) {
   1690       const SCEV *MulOpSCEV = Mul->getOperand(MulOp);
   1691       if (isa<SCEVConstant>(MulOpSCEV))
   1692         continue;
   1693       for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp)
   1694         if (MulOpSCEV == Ops[AddOp]) {
   1695           // Fold W + X + (X * Y * Z)  -->  W + (X * ((Y*Z)+1))
   1696           const SCEV *InnerMul = Mul->getOperand(MulOp == 0);
   1697           if (Mul->getNumOperands() != 2) {
   1698             // If the multiply has more than two operands, we must get the
   1699             // Y*Z term.
   1700             SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
   1701                                                 Mul->op_begin()+MulOp);
   1702             MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end());
   1703             InnerMul = getMulExpr(MulOps);
   1704           }
   1705           const SCEV *One = getConstant(Ty, 1);
   1706           const SCEV *AddOne = getAddExpr(One, InnerMul);
   1707           const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV);
   1708           if (Ops.size() == 2) return OuterMul;
   1709           if (AddOp < Idx) {
   1710             Ops.erase(Ops.begin()+AddOp);
   1711             Ops.erase(Ops.begin()+Idx-1);
   1712           } else {
   1713             Ops.erase(Ops.begin()+Idx);
   1714             Ops.erase(Ops.begin()+AddOp-1);
   1715           }
   1716           Ops.push_back(OuterMul);
   1717           return getAddExpr(Ops);
   1718         }
   1719 
   1720       // Check this multiply against other multiplies being added together.
   1721       for (unsigned OtherMulIdx = Idx+1;
   1722            OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]);
   1723            ++OtherMulIdx) {
   1724         const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]);
   1725         // If MulOp occurs in OtherMul, we can fold the two multiplies
   1726         // together.
   1727         for (unsigned OMulOp = 0, e = OtherMul->getNumOperands();
   1728              OMulOp != e; ++OMulOp)
   1729           if (OtherMul->getOperand(OMulOp) == MulOpSCEV) {
   1730             // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E))
   1731             const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0);
   1732             if (Mul->getNumOperands() != 2) {
   1733               SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
   1734                                                   Mul->op_begin()+MulOp);
   1735               MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end());
   1736               InnerMul1 = getMulExpr(MulOps);
   1737             }
   1738             const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0);
   1739             if (OtherMul->getNumOperands() != 2) {
   1740               SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(),
   1741                                                   OtherMul->op_begin()+OMulOp);
   1742               MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end());
   1743               InnerMul2 = getMulExpr(MulOps);
   1744             }
   1745             const SCEV *InnerMulSum = getAddExpr(InnerMul1,InnerMul2);
   1746             const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum);
   1747             if (Ops.size() == 2) return OuterMul;
   1748             Ops.erase(Ops.begin()+Idx);
   1749             Ops.erase(Ops.begin()+OtherMulIdx-1);
   1750             Ops.push_back(OuterMul);
   1751             return getAddExpr(Ops);
   1752           }
   1753       }
   1754     }
   1755   }
   1756 
   1757   // If there are any add recurrences in the operands list, see if any other
   1758   // added values are loop invariant.  If so, we can fold them into the
   1759   // recurrence.
   1760   while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
   1761     ++Idx;
   1762 
   1763   // Scan over all recurrences, trying to fold loop invariants into them.
   1764   for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
   1765     // Scan all of the other operands to this add and add them to the vector if
   1766     // they are loop invariant w.r.t. the recurrence.
   1767     SmallVector<const SCEV *, 8> LIOps;
   1768     const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
   1769     const Loop *AddRecLoop = AddRec->getLoop();
   1770     for (unsigned i = 0, e = Ops.size(); i != e; ++i)
   1771       if (isLoopInvariant(Ops[i], AddRecLoop)) {
   1772         LIOps.push_back(Ops[i]);
   1773         Ops.erase(Ops.begin()+i);
   1774         --i; --e;
   1775       }
   1776 
   1777     // If we found some loop invariants, fold them into the recurrence.
   1778     if (!LIOps.empty()) {
   1779       //  NLI + LI + {Start,+,Step}  -->  NLI + {LI+Start,+,Step}
   1780       LIOps.push_back(AddRec->getStart());
   1781 
   1782       SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(),
   1783                                              AddRec->op_end());
   1784       AddRecOps[0] = getAddExpr(LIOps);
   1785 
   1786       // Build the new addrec. Propagate the NUW and NSW flags if both the
   1787       // outer add and the inner addrec are guaranteed to have no overflow.
   1788       // Always propagate NW.
   1789       Flags = AddRec->getNoWrapFlags(setFlags(Flags, SCEV::FlagNW));
   1790       const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, Flags);
   1791 
   1792       // If all of the other operands were loop invariant, we are done.
   1793       if (Ops.size() == 1) return NewRec;
   1794 
   1795       // Otherwise, add the folded AddRec by the non-invariant parts.
   1796       for (unsigned i = 0;; ++i)
   1797         if (Ops[i] == AddRec) {
   1798           Ops[i] = NewRec;
   1799           break;
   1800         }
   1801       return getAddExpr(Ops);
   1802     }
   1803 
   1804     // Okay, if there weren't any loop invariants to be folded, check to see if
   1805     // there are multiple AddRec's with the same loop induction variable being
   1806     // added together.  If so, we can fold them.
   1807     for (unsigned OtherIdx = Idx+1;
   1808          OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
   1809          ++OtherIdx)
   1810       if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) {
   1811         // Other + {A,+,B}<L> + {C,+,D}<L>  -->  Other + {A+C,+,B+D}<L>
   1812         SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(),
   1813                                                AddRec->op_end());
   1814         for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
   1815              ++OtherIdx)
   1816           if (const SCEVAddRecExpr *OtherAddRec =
   1817                 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]))
   1818             if (OtherAddRec->getLoop() == AddRecLoop) {
   1819               for (unsigned i = 0, e = OtherAddRec->getNumOperands();
   1820                    i != e; ++i) {
   1821                 if (i >= AddRecOps.size()) {
   1822                   AddRecOps.append(OtherAddRec->op_begin()+i,
   1823                                    OtherAddRec->op_end());
   1824                   break;
   1825                 }
   1826                 AddRecOps[i] = getAddExpr(AddRecOps[i],
   1827                                           OtherAddRec->getOperand(i));
   1828               }
   1829               Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
   1830             }
   1831         // Step size has changed, so we cannot guarantee no self-wraparound.
   1832         Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop, SCEV::FlagAnyWrap);
   1833         return getAddExpr(Ops);
   1834       }
   1835 
   1836     // Otherwise couldn't fold anything into this recurrence.  Move onto the
   1837     // next one.
   1838   }
   1839 
   1840   // Okay, it looks like we really DO need an add expr.  Check to see if we
   1841   // already have one, otherwise create a new one.
   1842   FoldingSetNodeID ID;
   1843   ID.AddInteger(scAddExpr);
   1844   for (unsigned i = 0, e = Ops.size(); i != e; ++i)
   1845     ID.AddPointer(Ops[i]);
   1846   void *IP = nullptr;
   1847   SCEVAddExpr *S =
   1848     static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
   1849   if (!S) {
   1850     const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
   1851     std::uninitialized_copy(Ops.begin(), Ops.end(), O);
   1852     S = new (SCEVAllocator) SCEVAddExpr(ID.Intern(SCEVAllocator),
   1853                                         O, Ops.size());
   1854     UniqueSCEVs.InsertNode(S, IP);
   1855   }
   1856   S->setNoWrapFlags(Flags);
   1857   return S;
   1858 }
   1859 
   1860 static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow) {
   1861   uint64_t k = i*j;
   1862   if (j > 1 && k / j != i) Overflow = true;
   1863   return k;
   1864 }
   1865 
   1866 /// Compute the result of "n choose k", the binomial coefficient.  If an
   1867 /// intermediate computation overflows, Overflow will be set and the return will
   1868 /// be garbage. Overflow is not cleared on absence of overflow.
   1869 static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow) {
   1870   // We use the multiplicative formula:
   1871   //     n(n-1)(n-2)...(n-(k-1)) / k(k-1)(k-2)...1 .
   1872   // At each iteration, we take the n-th term of the numeral and divide by the
   1873   // (k-n)th term of the denominator.  This division will always produce an
   1874   // integral result, and helps reduce the chance of overflow in the
   1875   // intermediate computations. However, we can still overflow even when the
   1876   // final result would fit.
   1877 
   1878   if (n == 0 || n == k) return 1;
   1879   if (k > n) return 0;
   1880 
   1881   if (k > n/2)
   1882     k = n-k;
   1883 
   1884   uint64_t r = 1;
   1885   for (uint64_t i = 1; i <= k; ++i) {
   1886     r = umul_ov(r, n-(i-1), Overflow);
   1887     r /= i;
   1888   }
   1889   return r;
   1890 }
   1891 
   1892 /// getMulExpr - Get a canonical multiply expression, or something simpler if
   1893 /// possible.
   1894 const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
   1895                                         SCEV::NoWrapFlags Flags) {
   1896   assert(Flags == maskFlags(Flags, SCEV::FlagNUW | SCEV::FlagNSW) &&
   1897          "only nuw or nsw allowed");
   1898   assert(!Ops.empty() && "Cannot get empty mul!");
   1899   if (Ops.size() == 1) return Ops[0];
   1900 #ifndef NDEBUG
   1901   Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
   1902   for (unsigned i = 1, e = Ops.size(); i != e; ++i)
   1903     assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
   1904            "SCEVMulExpr operand types don't match!");
   1905 #endif
   1906 
   1907   // If FlagNSW is true and all the operands are non-negative, infer FlagNUW.
   1908   // And vice-versa.
   1909   int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW;
   1910   SCEV::NoWrapFlags SignOrUnsignWrap = maskFlags(Flags, SignOrUnsignMask);
   1911   if (SignOrUnsignWrap && (SignOrUnsignWrap != SignOrUnsignMask)) {
   1912     bool All = true;
   1913     for (SmallVectorImpl<const SCEV *>::const_iterator I = Ops.begin(),
   1914          E = Ops.end(); I != E; ++I)
   1915       if (!isKnownNonNegative(*I)) {
   1916         All = false;
   1917         break;
   1918       }
   1919     if (All) Flags = setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask);
   1920   }
   1921 
   1922   // Sort by complexity, this groups all similar expression types together.
   1923   GroupByComplexity(Ops, LI);
   1924 
   1925   // If there are any constants, fold them together.
   1926   unsigned Idx = 0;
   1927   if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
   1928 
   1929     // C1*(C2+V) -> C1*C2 + C1*V
   1930     if (Ops.size() == 2)
   1931       if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1]))
   1932         if (Add->getNumOperands() == 2 &&
   1933             isa<SCEVConstant>(Add->getOperand(0)))
   1934           return getAddExpr(getMulExpr(LHSC, Add->getOperand(0)),
   1935                             getMulExpr(LHSC, Add->getOperand(1)));
   1936 
   1937     ++Idx;
   1938     while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
   1939       // We found two constants, fold them together!
   1940       ConstantInt *Fold = ConstantInt::get(getContext(),
   1941                                            LHSC->getValue()->getValue() *
   1942                                            RHSC->getValue()->getValue());
   1943       Ops[0] = getConstant(Fold);
   1944       Ops.erase(Ops.begin()+1);  // Erase the folded element
   1945       if (Ops.size() == 1) return Ops[0];
   1946       LHSC = cast<SCEVConstant>(Ops[0]);
   1947     }
   1948 
   1949     // If we are left with a constant one being multiplied, strip it off.
   1950     if (cast<SCEVConstant>(Ops[0])->getValue()->equalsInt(1)) {
   1951       Ops.erase(Ops.begin());
   1952       --Idx;
   1953     } else if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) {
   1954       // If we have a multiply of zero, it will always be zero.
   1955       return Ops[0];
   1956     } else if (Ops[0]->isAllOnesValue()) {
   1957       // If we have a mul by -1 of an add, try distributing the -1 among the
   1958       // add operands.
   1959       if (Ops.size() == 2) {
   1960         if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) {
   1961           SmallVector<const SCEV *, 4> NewOps;
   1962           bool AnyFolded = false;
   1963           for (SCEVAddRecExpr::op_iterator I = Add->op_begin(),
   1964                  E = Add->op_end(); I != E; ++I) {
   1965             const SCEV *Mul = getMulExpr(Ops[0], *I);
   1966             if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true;
   1967             NewOps.push_back(Mul);
   1968           }
   1969           if (AnyFolded)
   1970             return getAddExpr(NewOps);
   1971         }
   1972         else if (const SCEVAddRecExpr *
   1973                  AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) {
   1974           // Negation preserves a recurrence's no self-wrap property.
   1975           SmallVector<const SCEV *, 4> Operands;
   1976           for (SCEVAddRecExpr::op_iterator I = AddRec->op_begin(),
   1977                  E = AddRec->op_end(); I != E; ++I) {
   1978             Operands.push_back(getMulExpr(Ops[0], *I));
   1979           }
   1980           return getAddRecExpr(Operands, AddRec->getLoop(),
   1981                                AddRec->getNoWrapFlags(SCEV::FlagNW));
   1982         }
   1983       }
   1984     }
   1985 
   1986     if (Ops.size() == 1)
   1987       return Ops[0];
   1988   }
   1989 
   1990   // Skip over the add expression until we get to a multiply.
   1991   while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
   1992     ++Idx;
   1993 
   1994   // If there are mul operands inline them all into this expression.
   1995   if (Idx < Ops.size()) {
   1996     bool DeletedMul = false;
   1997     while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) {
   1998       // If we have an mul, expand the mul operands onto the end of the operands
   1999       // list.
   2000       Ops.erase(Ops.begin()+Idx);
   2001       Ops.append(Mul->op_begin(), Mul->op_end());
   2002       DeletedMul = true;
   2003     }
   2004 
   2005     // If we deleted at least one mul, we added operands to the end of the list,
   2006     // and they are not necessarily sorted.  Recurse to resort and resimplify
   2007     // any operands we just acquired.
   2008     if (DeletedMul)
   2009       return getMulExpr(Ops);
   2010   }
   2011 
   2012   // If there are any add recurrences in the operands list, see if any other
   2013   // added values are loop invariant.  If so, we can fold them into the
   2014   // recurrence.
   2015   while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
   2016     ++Idx;
   2017 
   2018   // Scan over all recurrences, trying to fold loop invariants into them.
   2019   for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
   2020     // Scan all of the other operands to this mul and add them to the vector if
   2021     // they are loop invariant w.r.t. the recurrence.
   2022     SmallVector<const SCEV *, 8> LIOps;
   2023     const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
   2024     const Loop *AddRecLoop = AddRec->getLoop();
   2025     for (unsigned i = 0, e = Ops.size(); i != e; ++i)
   2026       if (isLoopInvariant(Ops[i], AddRecLoop)) {
   2027         LIOps.push_back(Ops[i]);
   2028         Ops.erase(Ops.begin()+i);
   2029         --i; --e;
   2030       }
   2031 
   2032     // If we found some loop invariants, fold them into the recurrence.
   2033     if (!LIOps.empty()) {
   2034       //  NLI * LI * {Start,+,Step}  -->  NLI * {LI*Start,+,LI*Step}
   2035       SmallVector<const SCEV *, 4> NewOps;
   2036       NewOps.reserve(AddRec->getNumOperands());
   2037       const SCEV *Scale = getMulExpr(LIOps);
   2038       for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
   2039         NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i)));
   2040 
   2041       // Build the new addrec. Propagate the NUW and NSW flags if both the
   2042       // outer mul and the inner addrec are guaranteed to have no overflow.
   2043       //
   2044       // No self-wrap cannot be guaranteed after changing the step size, but
   2045       // will be inferred if either NUW or NSW is true.
   2046       Flags = AddRec->getNoWrapFlags(clearFlags(Flags, SCEV::FlagNW));
   2047       const SCEV *NewRec = getAddRecExpr(NewOps, AddRecLoop, Flags);
   2048 
   2049       // If all of the other operands were loop invariant, we are done.
   2050       if (Ops.size() == 1) return NewRec;
   2051 
   2052       // Otherwise, multiply the folded AddRec by the non-invariant parts.
   2053       for (unsigned i = 0;; ++i)
   2054         if (Ops[i] == AddRec) {
   2055           Ops[i] = NewRec;
   2056           break;
   2057         }
   2058       return getMulExpr(Ops);
   2059     }
   2060 
   2061     // Okay, if there weren't any loop invariants to be folded, check to see if
   2062     // there are multiple AddRec's with the same loop induction variable being
   2063     // multiplied together.  If so, we can fold them.
   2064     for (unsigned OtherIdx = Idx+1;
   2065          OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
   2066          ++OtherIdx) {
   2067       if (AddRecLoop != cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop())
   2068         continue;
   2069 
   2070       // {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L>
   2071       // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [
   2072       //       choose(x, 2x)*choose(2x-y, x-z)*A_{y-z}*B_z
   2073       //   ]]],+,...up to x=2n}.
   2074       // Note that the arguments to choose() are always integers with values
   2075       // known at compile time, never SCEV objects.
   2076       //
   2077       // The implementation avoids pointless extra computations when the two
   2078       // addrec's are of different length (mathematically, it's equivalent to
   2079       // an infinite stream of zeros on the right).
   2080       bool OpsModified = false;
   2081       for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
   2082            ++OtherIdx) {
   2083         const SCEVAddRecExpr *OtherAddRec =
   2084           dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]);
   2085         if (!OtherAddRec || OtherAddRec->getLoop() != AddRecLoop)
   2086           continue;
   2087 
   2088         bool Overflow = false;
   2089         Type *Ty = AddRec->getType();
   2090         bool LargerThan64Bits = getTypeSizeInBits(Ty) > 64;
   2091         SmallVector<const SCEV*, 7> AddRecOps;
   2092         for (int x = 0, xe = AddRec->getNumOperands() +
   2093                OtherAddRec->getNumOperands() - 1; x != xe && !Overflow; ++x) {
   2094           const SCEV *Term = getConstant(Ty, 0);
   2095           for (int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) {
   2096             uint64_t Coeff1 = Choose(x, 2*x - y, Overflow);
   2097             for (int z = std::max(y-x, y-(int)AddRec->getNumOperands()+1),
   2098                    ze = std::min(x+1, (int)OtherAddRec->getNumOperands());
   2099                  z < ze && !Overflow; ++z) {
   2100               uint64_t Coeff2 = Choose(2*x - y, x-z, Overflow);
   2101               uint64_t Coeff;
   2102               if (LargerThan64Bits)
   2103                 Coeff = umul_ov(Coeff1, Coeff2, Overflow);
   2104               else
   2105                 Coeff = Coeff1*Coeff2;
   2106               const SCEV *CoeffTerm = getConstant(Ty, Coeff);
   2107               const SCEV *Term1 = AddRec->getOperand(y-z);
   2108               const SCEV *Term2 = OtherAddRec->getOperand(z);
   2109               Term = getAddExpr(Term, getMulExpr(CoeffTerm, Term1,Term2));
   2110             }
   2111           }
   2112           AddRecOps.push_back(Term);
   2113         }
   2114         if (!Overflow) {
   2115           const SCEV *NewAddRec = getAddRecExpr(AddRecOps, AddRec->getLoop(),
   2116                                                 SCEV::FlagAnyWrap);
   2117           if (Ops.size() == 2) return NewAddRec;
   2118           Ops[Idx] = NewAddRec;
   2119           Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
   2120           OpsModified = true;
   2121           AddRec = dyn_cast<SCEVAddRecExpr>(NewAddRec);
   2122           if (!AddRec)
   2123             break;
   2124         }
   2125       }
   2126       if (OpsModified)
   2127         return getMulExpr(Ops);
   2128     }
   2129 
   2130     // Otherwise couldn't fold anything into this recurrence.  Move onto the
   2131     // next one.
   2132   }
   2133 
   2134   // Okay, it looks like we really DO need an mul expr.  Check to see if we
   2135   // already have one, otherwise create a new one.
   2136   FoldingSetNodeID ID;
   2137   ID.AddInteger(scMulExpr);
   2138   for (unsigned i = 0, e = Ops.size(); i != e; ++i)
   2139     ID.AddPointer(Ops[i]);
   2140   void *IP = nullptr;
   2141   SCEVMulExpr *S =
   2142     static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
   2143   if (!S) {
   2144     const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
   2145     std::uninitialized_copy(Ops.begin(), Ops.end(), O);
   2146     S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator),
   2147                                         O, Ops.size());
   2148     UniqueSCEVs.InsertNode(S, IP);
   2149   }
   2150   S->setNoWrapFlags(Flags);
   2151   return S;
   2152 }
   2153 
   2154 /// getUDivExpr - Get a canonical unsigned division expression, or something
   2155 /// simpler if possible.
   2156 const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
   2157                                          const SCEV *RHS) {
   2158   assert(getEffectiveSCEVType(LHS->getType()) ==
   2159          getEffectiveSCEVType(RHS->getType()) &&
   2160          "SCEVUDivExpr operand types don't match!");
   2161 
   2162   if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
   2163     if (RHSC->getValue()->equalsInt(1))
   2164       return LHS;                               // X udiv 1 --> x
   2165     // If the denominator is zero, the result of the udiv is undefined. Don't
   2166     // try to analyze it, because the resolution chosen here may differ from
   2167     // the resolution chosen in other parts of the compiler.
   2168     if (!RHSC->getValue()->isZero()) {
   2169       // Determine if the division can be folded into the operands of
   2170       // its operands.
   2171       // TODO: Generalize this to non-constants by using known-bits information.
   2172       Type *Ty = LHS->getType();
   2173       unsigned LZ = RHSC->getValue()->getValue().countLeadingZeros();
   2174       unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1;
   2175       // For non-power-of-two values, effectively round the value up to the
   2176       // nearest power of two.
   2177       if (!RHSC->getValue()->getValue().isPowerOf2())
   2178         ++MaxShiftAmt;
   2179       IntegerType *ExtTy =
   2180         IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt);
   2181       if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS))
   2182         if (const SCEVConstant *Step =
   2183             dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this))) {
   2184           // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded.
   2185           const APInt &StepInt = Step->getValue()->getValue();
   2186           const APInt &DivInt = RHSC->getValue()->getValue();
   2187           if (!StepInt.urem(DivInt) &&
   2188               getZeroExtendExpr(AR, ExtTy) ==
   2189               getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
   2190                             getZeroExtendExpr(Step, ExtTy),
   2191                             AR->getLoop(), SCEV::FlagAnyWrap)) {
   2192             SmallVector<const SCEV *, 4> Operands;
   2193             for (unsigned i = 0, e = AR->getNumOperands(); i != e; ++i)
   2194               Operands.push_back(getUDivExpr(AR->getOperand(i), RHS));
   2195             return getAddRecExpr(Operands, AR->getLoop(),
   2196                                  SCEV::FlagNW);
   2197           }
   2198           /// Get a canonical UDivExpr for a recurrence.
   2199           /// {X,+,N}/C => {Y,+,N}/C where Y=X-(X%N). Safe when C%N=0.
   2200           // We can currently only fold X%N if X is constant.
   2201           const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart());
   2202           if (StartC && !DivInt.urem(StepInt) &&
   2203               getZeroExtendExpr(AR, ExtTy) ==
   2204               getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
   2205                             getZeroExtendExpr(Step, ExtTy),
   2206                             AR->getLoop(), SCEV::FlagAnyWrap)) {
   2207             const APInt &StartInt = StartC->getValue()->getValue();
   2208             const APInt &StartRem = StartInt.urem(StepInt);
   2209             if (StartRem != 0)
   2210               LHS = getAddRecExpr(getConstant(StartInt - StartRem), Step,
   2211                                   AR->getLoop(), SCEV::FlagNW);
   2212           }
   2213         }
   2214       // (A*B)/C --> A*(B/C) if safe and B/C can be folded.
   2215       if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) {
   2216         SmallVector<const SCEV *, 4> Operands;
   2217         for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i)
   2218           Operands.push_back(getZeroExtendExpr(M->getOperand(i), ExtTy));
   2219         if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands))
   2220           // Find an operand that's safely divisible.
   2221           for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
   2222             const SCEV *Op = M->getOperand(i);
   2223             const SCEV *Div = getUDivExpr(Op, RHSC);
   2224             if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) {
   2225               Operands = SmallVector<const SCEV *, 4>(M->op_begin(),
   2226                                                       M->op_end());
   2227               Operands[i] = Div;
   2228               return getMulExpr(Operands);
   2229             }
   2230           }
   2231       }
   2232       // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded.
   2233       if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(LHS)) {
   2234         SmallVector<const SCEV *, 4> Operands;
   2235         for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i)
   2236           Operands.push_back(getZeroExtendExpr(A->getOperand(i), ExtTy));
   2237         if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) {
   2238           Operands.clear();
   2239           for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) {
   2240             const SCEV *Op = getUDivExpr(A->getOperand(i), RHS);
   2241             if (isa<SCEVUDivExpr>(Op) ||
   2242                 getMulExpr(Op, RHS) != A->getOperand(i))
   2243               break;
   2244             Operands.push_back(Op);
   2245           }
   2246           if (Operands.size() == A->getNumOperands())
   2247             return getAddExpr(Operands);
   2248         }
   2249       }
   2250 
   2251       // Fold if both operands are constant.
   2252       if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
   2253         Constant *LHSCV = LHSC->getValue();
   2254         Constant *RHSCV = RHSC->getValue();
   2255         return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV,
   2256                                                                    RHSCV)));
   2257       }
   2258     }
   2259   }
   2260 
   2261   FoldingSetNodeID ID;
   2262   ID.AddInteger(scUDivExpr);
   2263   ID.AddPointer(LHS);
   2264   ID.AddPointer(RHS);
   2265   void *IP = nullptr;
   2266   if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
   2267   SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator),
   2268                                              LHS, RHS);
   2269   UniqueSCEVs.InsertNode(S, IP);
   2270   return S;
   2271 }
   2272 
   2273 static const APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) {
   2274   APInt A = C1->getValue()->getValue().abs();
   2275   APInt B = C2->getValue()->getValue().abs();
   2276   uint32_t ABW = A.getBitWidth();
   2277   uint32_t BBW = B.getBitWidth();
   2278 
   2279   if (ABW > BBW)
   2280     B = B.zext(ABW);
   2281   else if (ABW < BBW)
   2282     A = A.zext(BBW);
   2283 
   2284   return APIntOps::GreatestCommonDivisor(A, B);
   2285 }
   2286 
   2287 /// getUDivExactExpr - Get a canonical unsigned division expression, or
   2288 /// something simpler if possible. There is no representation for an exact udiv
   2289 /// in SCEV IR, but we can attempt to remove factors from the LHS and RHS.
   2290 /// We can't do this when it's not exact because the udiv may be clearing bits.
   2291 const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS,
   2292                                               const SCEV *RHS) {
   2293   // TODO: we could try to find factors in all sorts of things, but for now we
   2294   // just deal with u/exact (multiply, constant). See SCEVDivision towards the
   2295   // end of this file for inspiration.
   2296 
   2297   const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS);
   2298   if (!Mul)
   2299     return getUDivExpr(LHS, RHS);
   2300 
   2301   if (const SCEVConstant *RHSCst = dyn_cast<SCEVConstant>(RHS)) {
   2302     // If the mulexpr multiplies by a constant, then that constant must be the
   2303     // first element of the mulexpr.
   2304     if (const SCEVConstant *LHSCst =
   2305             dyn_cast<SCEVConstant>(Mul->getOperand(0))) {
   2306       if (LHSCst == RHSCst) {
   2307         SmallVector<const SCEV *, 2> Operands;
   2308         Operands.append(Mul->op_begin() + 1, Mul->op_end());
   2309         return getMulExpr(Operands);
   2310       }
   2311 
   2312       // We can't just assume that LHSCst divides RHSCst cleanly, it could be
   2313       // that there's a factor provided by one of the other terms. We need to
   2314       // check.
   2315       APInt Factor = gcd(LHSCst, RHSCst);
   2316       if (!Factor.isIntN(1)) {
   2317         LHSCst = cast<SCEVConstant>(
   2318             getConstant(LHSCst->getValue()->getValue().udiv(Factor)));
   2319         RHSCst = cast<SCEVConstant>(
   2320             getConstant(RHSCst->getValue()->getValue().udiv(Factor)));
   2321         SmallVector<const SCEV *, 2> Operands;
   2322         Operands.push_back(LHSCst);
   2323         Operands.append(Mul->op_begin() + 1, Mul->op_end());
   2324         LHS = getMulExpr(Operands);
   2325         RHS = RHSCst;
   2326         Mul = dyn_cast<SCEVMulExpr>(LHS);
   2327         if (!Mul)
   2328           return getUDivExactExpr(LHS, RHS);
   2329       }
   2330     }
   2331   }
   2332 
   2333   for (int i = 0, e = Mul->getNumOperands(); i != e; ++i) {
   2334     if (Mul->getOperand(i) == RHS) {
   2335       SmallVector<const SCEV *, 2> Operands;
   2336       Operands.append(Mul->op_begin(), Mul->op_begin() + i);
   2337       Operands.append(Mul->op_begin() + i + 1, Mul->op_end());
   2338       return getMulExpr(Operands);
   2339     }
   2340   }
   2341 
   2342   return getUDivExpr(LHS, RHS);
   2343 }
   2344 
   2345 /// getAddRecExpr - Get an add recurrence expression for the specified loop.
   2346 /// Simplify the expression as much as possible.
   2347 const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step,
   2348                                            const Loop *L,
   2349                                            SCEV::NoWrapFlags Flags) {
   2350   SmallVector<const SCEV *, 4> Operands;
   2351   Operands.push_back(Start);
   2352   if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step))
   2353     if (StepChrec->getLoop() == L) {
   2354       Operands.append(StepChrec->op_begin(), StepChrec->op_end());
   2355       return getAddRecExpr(Operands, L, maskFlags(Flags, SCEV::FlagNW));
   2356     }
   2357 
   2358   Operands.push_back(Step);
   2359   return getAddRecExpr(Operands, L, Flags);
   2360 }
   2361 
   2362 /// getAddRecExpr - Get an add recurrence expression for the specified loop.
   2363 /// Simplify the expression as much as possible.
   2364 const SCEV *
   2365 ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
   2366                                const Loop *L, SCEV::NoWrapFlags Flags) {
   2367   if (Operands.size() == 1) return Operands[0];
   2368 #ifndef NDEBUG
   2369   Type *ETy = getEffectiveSCEVType(Operands[0]->getType());
   2370   for (unsigned i = 1, e = Operands.size(); i != e; ++i)
   2371     assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy &&
   2372            "SCEVAddRecExpr operand types don't match!");
   2373   for (unsigned i = 0, e = Operands.size(); i != e; ++i)
   2374     assert(isLoopInvariant(Operands[i], L) &&
   2375            "SCEVAddRecExpr operand is not loop-invariant!");
   2376 #endif
   2377 
   2378   if (Operands.back()->isZero()) {
   2379     Operands.pop_back();
   2380     return getAddRecExpr(Operands, L, SCEV::FlagAnyWrap); // {X,+,0}  -->  X
   2381   }
   2382 
   2383   // It's tempting to want to call getMaxBackedgeTakenCount count here and
   2384   // use that information to infer NUW and NSW flags. However, computing a
   2385   // BE count requires calling getAddRecExpr, so we may not yet have a
   2386   // meaningful BE count at this point (and if we don't, we'd be stuck
   2387   // with a SCEVCouldNotCompute as the cached BE count).
   2388 
   2389   // If FlagNSW is true and all the operands are non-negative, infer FlagNUW.
   2390   // And vice-versa.
   2391   int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW;
   2392   SCEV::NoWrapFlags SignOrUnsignWrap = maskFlags(Flags, SignOrUnsignMask);
   2393   if (SignOrUnsignWrap && (SignOrUnsignWrap != SignOrUnsignMask)) {
   2394     bool All = true;
   2395     for (SmallVectorImpl<const SCEV *>::const_iterator I = Operands.begin(),
   2396          E = Operands.end(); I != E; ++I)
   2397       if (!isKnownNonNegative(*I)) {
   2398         All = false;
   2399         break;
   2400       }
   2401     if (All) Flags = setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask);
   2402   }
   2403 
   2404   // Canonicalize nested AddRecs in by nesting them in order of loop depth.
   2405   if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) {
   2406     const Loop *NestedLoop = NestedAR->getLoop();
   2407     if (L->contains(NestedLoop) ?
   2408         (L->getLoopDepth() < NestedLoop->getLoopDepth()) :
   2409         (!NestedLoop->contains(L) &&
   2410          DT->dominates(L->getHeader(), NestedLoop->getHeader()))) {
   2411       SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(),
   2412                                                   NestedAR->op_end());
   2413       Operands[0] = NestedAR->getStart();
   2414       // AddRecs require their operands be loop-invariant with respect to their
   2415       // loops. Don't perform this transformation if it would break this
   2416       // requirement.
   2417       bool AllInvariant = true;
   2418       for (unsigned i = 0, e = Operands.size(); i != e; ++i)
   2419         if (!isLoopInvariant(Operands[i], L)) {
   2420           AllInvariant = false;
   2421           break;
   2422         }
   2423       if (AllInvariant) {
   2424         // Create a recurrence for the outer loop with the same step size.
   2425         //
   2426         // The outer recurrence keeps its NW flag but only keeps NUW/NSW if the
   2427         // inner recurrence has the same property.
   2428         SCEV::NoWrapFlags OuterFlags =
   2429           maskFlags(Flags, SCEV::FlagNW | NestedAR->getNoWrapFlags());
   2430 
   2431         NestedOperands[0] = getAddRecExpr(Operands, L, OuterFlags);
   2432         AllInvariant = true;
   2433         for (unsigned i = 0, e = NestedOperands.size(); i != e; ++i)
   2434           if (!isLoopInvariant(NestedOperands[i], NestedLoop)) {
   2435             AllInvariant = false;
   2436             break;
   2437           }
   2438         if (AllInvariant) {
   2439           // Ok, both add recurrences are valid after the transformation.
   2440           //
   2441           // The inner recurrence keeps its NW flag but only keeps NUW/NSW if
   2442           // the outer recurrence has the same property.
   2443           SCEV::NoWrapFlags InnerFlags =
   2444             maskFlags(NestedAR->getNoWrapFlags(), SCEV::FlagNW | Flags);
   2445           return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags);
   2446         }
   2447       }
   2448       // Reset Operands to its original state.
   2449       Operands[0] = NestedAR;
   2450     }
   2451   }
   2452 
   2453   // Okay, it looks like we really DO need an addrec expr.  Check to see if we
   2454   // already have one, otherwise create a new one.
   2455   FoldingSetNodeID ID;
   2456   ID.AddInteger(scAddRecExpr);
   2457   for (unsigned i = 0, e = Operands.size(); i != e; ++i)
   2458     ID.AddPointer(Operands[i]);
   2459   ID.AddPointer(L);
   2460   void *IP = nullptr;
   2461   SCEVAddRecExpr *S =
   2462     static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
   2463   if (!S) {
   2464     const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Operands.size());
   2465     std::uninitialized_copy(Operands.begin(), Operands.end(), O);
   2466     S = new (SCEVAllocator) SCEVAddRecExpr(ID.Intern(SCEVAllocator),
   2467                                            O, Operands.size(), L);
   2468     UniqueSCEVs.InsertNode(S, IP);
   2469   }
   2470   S->setNoWrapFlags(Flags);
   2471   return S;
   2472 }
   2473 
   2474 const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS,
   2475                                          const SCEV *RHS) {
   2476   SmallVector<const SCEV *, 2> Ops;
   2477   Ops.push_back(LHS);
   2478   Ops.push_back(RHS);
   2479   return getSMaxExpr(Ops);
   2480 }
   2481 
   2482 const SCEV *
   2483 ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
   2484   assert(!Ops.empty() && "Cannot get empty smax!");
   2485   if (Ops.size() == 1) return Ops[0];
   2486 #ifndef NDEBUG
   2487   Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
   2488   for (unsigned i = 1, e = Ops.size(); i != e; ++i)
   2489     assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
   2490            "SCEVSMaxExpr operand types don't match!");
   2491 #endif
   2492 
   2493   // Sort by complexity, this groups all similar expression types together.
   2494   GroupByComplexity(Ops, LI);
   2495 
   2496   // If there are any constants, fold them together.
   2497   unsigned Idx = 0;
   2498   if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
   2499     ++Idx;
   2500     assert(Idx < Ops.size());
   2501     while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
   2502       // We found two constants, fold them together!
   2503       ConstantInt *Fold = ConstantInt::get(getContext(),
   2504                               APIntOps::smax(LHSC->getValue()->getValue(),
   2505                                              RHSC->getValue()->getValue()));
   2506       Ops[0] = getConstant(Fold);
   2507       Ops.erase(Ops.begin()+1);  // Erase the folded element
   2508       if (Ops.size() == 1) return Ops[0];
   2509       LHSC = cast<SCEVConstant>(Ops[0]);
   2510     }
   2511 
   2512     // If we are left with a constant minimum-int, strip it off.
   2513     if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(true)) {
   2514       Ops.erase(Ops.begin());
   2515       --Idx;
   2516     } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(true)) {
   2517       // If we have an smax with a constant maximum-int, it will always be
   2518       // maximum-int.
   2519       return Ops[0];
   2520     }
   2521 
   2522     if (Ops.size() == 1) return Ops[0];
   2523   }
   2524 
   2525   // Find the first SMax
   2526   while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scSMaxExpr)
   2527     ++Idx;
   2528 
   2529   // Check to see if one of the operands is an SMax. If so, expand its operands
   2530   // onto our operand list, and recurse to simplify.
   2531   if (Idx < Ops.size()) {
   2532     bool DeletedSMax = false;
   2533     while (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(Ops[Idx])) {
   2534       Ops.erase(Ops.begin()+Idx);
   2535       Ops.append(SMax->op_begin(), SMax->op_end());
   2536       DeletedSMax = true;
   2537     }
   2538 
   2539     if (DeletedSMax)
   2540       return getSMaxExpr(Ops);
   2541   }
   2542 
   2543   // Okay, check to see if the same value occurs in the operand list twice.  If
   2544   // so, delete one.  Since we sorted the list, these values are required to
   2545   // be adjacent.
   2546   for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
   2547     //  X smax Y smax Y  -->  X smax Y
   2548     //  X smax Y         -->  X, if X is always greater than Y
   2549     if (Ops[i] == Ops[i+1] ||
   2550         isKnownPredicate(ICmpInst::ICMP_SGE, Ops[i], Ops[i+1])) {
   2551       Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2);
   2552       --i; --e;
   2553     } else if (isKnownPredicate(ICmpInst::ICMP_SLE, Ops[i], Ops[i+1])) {
   2554       Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
   2555       --i; --e;
   2556     }
   2557 
   2558   if (Ops.size() == 1) return Ops[0];
   2559 
   2560   assert(!Ops.empty() && "Reduced smax down to nothing!");
   2561 
   2562   // Okay, it looks like we really DO need an smax expr.  Check to see if we
   2563   // already have one, otherwise create a new one.
   2564   FoldingSetNodeID ID;
   2565   ID.AddInteger(scSMaxExpr);
   2566   for (unsigned i = 0, e = Ops.size(); i != e; ++i)
   2567     ID.AddPointer(Ops[i]);
   2568   void *IP = nullptr;
   2569   if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
   2570   const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
   2571   std::uninitialized_copy(Ops.begin(), Ops.end(), O);
   2572   SCEV *S = new (SCEVAllocator) SCEVSMaxExpr(ID.Intern(SCEVAllocator),
   2573                                              O, Ops.size());
   2574   UniqueSCEVs.InsertNode(S, IP);
   2575   return S;
   2576 }
   2577 
   2578 const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS,
   2579                                          const SCEV *RHS) {
   2580   SmallVector<const SCEV *, 2> Ops;
   2581   Ops.push_back(LHS);
   2582   Ops.push_back(RHS);
   2583   return getUMaxExpr(Ops);
   2584 }
   2585 
   2586 const SCEV *
   2587 ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
   2588   assert(!Ops.empty() && "Cannot get empty umax!");
   2589   if (Ops.size() == 1) return Ops[0];
   2590 #ifndef NDEBUG
   2591   Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
   2592   for (unsigned i = 1, e = Ops.size(); i != e; ++i)
   2593     assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
   2594            "SCEVUMaxExpr operand types don't match!");
   2595 #endif
   2596 
   2597   // Sort by complexity, this groups all similar expression types together.
   2598   GroupByComplexity(Ops, LI);
   2599 
   2600   // If there are any constants, fold them together.
   2601   unsigned Idx = 0;
   2602   if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
   2603     ++Idx;
   2604     assert(Idx < Ops.size());
   2605     while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
   2606       // We found two constants, fold them together!
   2607       ConstantInt *Fold = ConstantInt::get(getContext(),
   2608                               APIntOps::umax(LHSC->getValue()->getValue(),
   2609                                              RHSC->getValue()->getValue()));
   2610       Ops[0] = getConstant(Fold);
   2611       Ops.erase(Ops.begin()+1);  // Erase the folded element
   2612       if (Ops.size() == 1) return Ops[0];
   2613       LHSC = cast<SCEVConstant>(Ops[0]);
   2614     }
   2615 
   2616     // If we are left with a constant minimum-int, strip it off.
   2617     if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(false)) {
   2618       Ops.erase(Ops.begin());
   2619       --Idx;
   2620     } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(false)) {
   2621       // If we have an umax with a constant maximum-int, it will always be
   2622       // maximum-int.
   2623       return Ops[0];
   2624     }
   2625 
   2626     if (Ops.size() == 1) return Ops[0];
   2627   }
   2628 
   2629   // Find the first UMax
   2630   while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scUMaxExpr)
   2631     ++Idx;
   2632 
   2633   // Check to see if one of the operands is a UMax. If so, expand its operands
   2634   // onto our operand list, and recurse to simplify.
   2635   if (Idx < Ops.size()) {
   2636     bool DeletedUMax = false;
   2637     while (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(Ops[Idx])) {
   2638       Ops.erase(Ops.begin()+Idx);
   2639       Ops.append(UMax->op_begin(), UMax->op_end());
   2640       DeletedUMax = true;
   2641     }
   2642 
   2643     if (DeletedUMax)
   2644       return getUMaxExpr(Ops);
   2645   }
   2646 
   2647   // Okay, check to see if the same value occurs in the operand list twice.  If
   2648   // so, delete one.  Since we sorted the list, these values are required to
   2649   // be adjacent.
   2650   for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
   2651     //  X umax Y umax Y  -->  X umax Y
   2652     //  X umax Y         -->  X, if X is always greater than Y
   2653     if (Ops[i] == Ops[i+1] ||
   2654         isKnownPredicate(ICmpInst::ICMP_UGE, Ops[i], Ops[i+1])) {
   2655       Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2);
   2656       --i; --e;
   2657     } else if (isKnownPredicate(ICmpInst::ICMP_ULE, Ops[i], Ops[i+1])) {
   2658       Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
   2659       --i; --e;
   2660     }
   2661 
   2662   if (Ops.size() == 1) return Ops[0];
   2663 
   2664   assert(!Ops.empty() && "Reduced umax down to nothing!");
   2665 
   2666   // Okay, it looks like we really DO need a umax expr.  Check to see if we
   2667   // already have one, otherwise create a new one.
   2668   FoldingSetNodeID ID;
   2669   ID.AddInteger(scUMaxExpr);
   2670   for (unsigned i = 0, e = Ops.size(); i != e; ++i)
   2671     ID.AddPointer(Ops[i]);
   2672   void *IP = nullptr;
   2673   if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
   2674   const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
   2675   std::uninitialized_copy(Ops.begin(), Ops.end(), O);
   2676   SCEV *S = new (SCEVAllocator) SCEVUMaxExpr(ID.Intern(SCEVAllocator),
   2677                                              O, Ops.size());
   2678   UniqueSCEVs.InsertNode(S, IP);
   2679   return S;
   2680 }
   2681 
   2682 const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS,
   2683                                          const SCEV *RHS) {
   2684   // ~smax(~x, ~y) == smin(x, y).
   2685   return getNotSCEV(getSMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
   2686 }
   2687 
   2688 const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS,
   2689                                          const SCEV *RHS) {
   2690   // ~umax(~x, ~y) == umin(x, y)
   2691   return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
   2692 }
   2693 
   2694 const SCEV *ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) {
   2695   // If we have DataLayout, we can bypass creating a target-independent
   2696   // constant expression and then folding it back into a ConstantInt.
   2697   // This is just a compile-time optimization.
   2698   if (DL)
   2699     return getConstant(IntTy, DL->getTypeAllocSize(AllocTy));
   2700 
   2701   Constant *C = ConstantExpr::getSizeOf(AllocTy);
   2702   if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
   2703     if (Constant *Folded = ConstantFoldConstantExpression(CE, DL, TLI))
   2704       C = Folded;
   2705   Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy));
   2706   assert(Ty == IntTy && "Effective SCEV type doesn't match");
   2707   return getTruncateOrZeroExtend(getSCEV(C), Ty);
   2708 }
   2709 
   2710 const SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy,
   2711                                              StructType *STy,
   2712                                              unsigned FieldNo) {
   2713   // If we have DataLayout, we can bypass creating a target-independent
   2714   // constant expression and then folding it back into a ConstantInt.
   2715   // This is just a compile-time optimization.
   2716   if (DL) {
   2717     return getConstant(IntTy,
   2718                        DL->getStructLayout(STy)->getElementOffset(FieldNo));
   2719   }
   2720 
   2721   Constant *C = ConstantExpr::getOffsetOf(STy, FieldNo);
   2722   if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
   2723     if (Constant *Folded = ConstantFoldConstantExpression(CE, DL, TLI))
   2724       C = Folded;
   2725 
   2726   Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(STy));
   2727   return getTruncateOrZeroExtend(getSCEV(C), Ty);
   2728 }
   2729 
   2730 const SCEV *ScalarEvolution::getUnknown(Value *V) {
   2731   // Don't attempt to do anything other than create a SCEVUnknown object
   2732   // here.  createSCEV only calls getUnknown after checking for all other
   2733   // interesting possibilities, and any other code that calls getUnknown
   2734   // is doing so in order to hide a value from SCEV canonicalization.
   2735 
   2736   FoldingSetNodeID ID;
   2737   ID.AddInteger(scUnknown);
   2738   ID.AddPointer(V);
   2739   void *IP = nullptr;
   2740   if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) {
   2741     assert(cast<SCEVUnknown>(S)->getValue() == V &&
   2742            "Stale SCEVUnknown in uniquing map!");
   2743     return S;
   2744   }
   2745   SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this,
   2746                                             FirstUnknown);
   2747   FirstUnknown = cast<SCEVUnknown>(S);
   2748   UniqueSCEVs.InsertNode(S, IP);
   2749   return S;
   2750 }
   2751 
   2752 //===----------------------------------------------------------------------===//
   2753 //            Basic SCEV Analysis and PHI Idiom Recognition Code
   2754 //
   2755 
   2756 /// isSCEVable - Test if values of the given type are analyzable within
   2757 /// the SCEV framework. This primarily includes integer types, and it
   2758 /// can optionally include pointer types if the ScalarEvolution class
   2759 /// has access to target-specific information.
   2760 bool ScalarEvolution::isSCEVable(Type *Ty) const {
   2761   // Integers and pointers are always SCEVable.
   2762   return Ty->isIntegerTy() || Ty->isPointerTy();
   2763 }
   2764 
   2765 /// getTypeSizeInBits - Return the size in bits of the specified type,
   2766 /// for which isSCEVable must return true.
   2767 uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const {
   2768   assert(isSCEVable(Ty) && "Type is not SCEVable!");
   2769 
   2770   // If we have a DataLayout, use it!
   2771   if (DL)
   2772     return DL->getTypeSizeInBits(Ty);
   2773 
   2774   // Integer types have fixed sizes.
   2775   if (Ty->isIntegerTy())
   2776     return Ty->getPrimitiveSizeInBits();
   2777 
   2778   // The only other support type is pointer. Without DataLayout, conservatively
   2779   // assume pointers are 64-bit.
   2780   assert(Ty->isPointerTy() && "isSCEVable permitted a non-SCEVable type!");
   2781   return 64;
   2782 }
   2783 
   2784 /// getEffectiveSCEVType - Return a type with the same bitwidth as
   2785 /// the given type and which represents how SCEV will treat the given
   2786 /// type, for which isSCEVable must return true. For pointer types,
   2787 /// this is the pointer-sized integer type.
   2788 Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const {
   2789   assert(isSCEVable(Ty) && "Type is not SCEVable!");
   2790 
   2791   if (Ty->isIntegerTy()) {
   2792     return Ty;
   2793   }
   2794 
   2795   // The only other support type is pointer.
   2796   assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!");
   2797 
   2798   if (DL)
   2799     return DL->getIntPtrType(Ty);
   2800 
   2801   // Without DataLayout, conservatively assume pointers are 64-bit.
   2802   return Type::getInt64Ty(getContext());
   2803 }
   2804 
   2805 const SCEV *ScalarEvolution::getCouldNotCompute() {
   2806   return &CouldNotCompute;
   2807 }
   2808 
   2809 namespace {
   2810   // Helper class working with SCEVTraversal to figure out if a SCEV contains
   2811   // a SCEVUnknown with null value-pointer. FindInvalidSCEVUnknown::FindOne
   2812   // is set iff if find such SCEVUnknown.
   2813   //
   2814   struct FindInvalidSCEVUnknown {
   2815     bool FindOne;
   2816     FindInvalidSCEVUnknown() { FindOne = false; }
   2817     bool follow(const SCEV *S) {
   2818       switch (static_cast<SCEVTypes>(S->getSCEVType())) {
   2819       case scConstant:
   2820         return false;
   2821       case scUnknown:
   2822         if (!cast<SCEVUnknown>(S)->getValue())
   2823           FindOne = true;
   2824         return false;
   2825       default:
   2826         return true;
   2827       }
   2828     }
   2829     bool isDone() const { return FindOne; }
   2830   };
   2831 }
   2832 
   2833 bool ScalarEvolution::checkValidity(const SCEV *S) const {
   2834   FindInvalidSCEVUnknown F;
   2835   SCEVTraversal<FindInvalidSCEVUnknown> ST(F);
   2836   ST.visitAll(S);
   2837 
   2838   return !F.FindOne;
   2839 }
   2840 
   2841 /// getSCEV - Return an existing SCEV if it exists, otherwise analyze the
   2842 /// expression and create a new one.
   2843 const SCEV *ScalarEvolution::getSCEV(Value *V) {
   2844   assert(isSCEVable(V->getType()) && "Value is not SCEVable!");
   2845 
   2846   ValueExprMapType::iterator I = ValueExprMap.find_as(V);
   2847   if (I != ValueExprMap.end()) {
   2848     const SCEV *S = I->second;
   2849     if (checkValidity(S))
   2850       return S;
   2851     else
   2852       ValueExprMap.erase(I);
   2853   }
   2854   const SCEV *S = createSCEV(V);
   2855 
   2856   // The process of creating a SCEV for V may have caused other SCEVs
   2857   // to have been created, so it's necessary to insert the new entry
   2858   // from scratch, rather than trying to remember the insert position
   2859   // above.
   2860   ValueExprMap.insert(std::make_pair(SCEVCallbackVH(V, this), S));
   2861   return S;
   2862 }
   2863 
   2864 /// getNegativeSCEV - Return a SCEV corresponding to -V = -1*V
   2865 ///
   2866 const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V) {
   2867   if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
   2868     return getConstant(
   2869                cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue())));
   2870 
   2871   Type *Ty = V->getType();
   2872   Ty = getEffectiveSCEVType(Ty);
   2873   return getMulExpr(V,
   2874                   getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))));
   2875 }
   2876 
   2877 /// getNotSCEV - Return a SCEV corresponding to ~V = -1-V
   2878 const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) {
   2879   if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
   2880     return getConstant(
   2881                 cast<ConstantInt>(ConstantExpr::getNot(VC->getValue())));
   2882 
   2883   Type *Ty = V->getType();
   2884   Ty = getEffectiveSCEVType(Ty);
   2885   const SCEV *AllOnes =
   2886                    getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty)));
   2887   return getMinusSCEV(AllOnes, V);
   2888 }
   2889 
   2890 /// getMinusSCEV - Return LHS-RHS.  Minus is represented in SCEV as A+B*-1.
   2891 const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS,
   2892                                           SCEV::NoWrapFlags Flags) {
   2893   assert(!maskFlags(Flags, SCEV::FlagNUW) && "subtraction does not have NUW");
   2894 
   2895   // Fast path: X - X --> 0.
   2896   if (LHS == RHS)
   2897     return getConstant(LHS->getType(), 0);
   2898 
   2899   // X - Y --> X + -Y
   2900   return getAddExpr(LHS, getNegativeSCEV(RHS), Flags);
   2901 }
   2902 
   2903 /// getTruncateOrZeroExtend - Return a SCEV corresponding to a conversion of the
   2904 /// input value to the specified type.  If the type must be extended, it is zero
   2905 /// extended.
   2906 const SCEV *
   2907 ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, Type *Ty) {
   2908   Type *SrcTy = V->getType();
   2909   assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
   2910          (Ty->isIntegerTy() || Ty->isPointerTy()) &&
   2911          "Cannot truncate or zero extend with non-integer arguments!");
   2912   if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
   2913     return V;  // No conversion
   2914   if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
   2915     return getTruncateExpr(V, Ty);
   2916   return getZeroExtendExpr(V, Ty);
   2917 }
   2918 
   2919 /// getTruncateOrSignExtend - Return a SCEV corresponding to a conversion of the
   2920 /// input value to the specified type.  If the type must be extended, it is sign
   2921 /// extended.
   2922 const SCEV *
   2923 ScalarEvolution::getTruncateOrSignExtend(const SCEV *V,
   2924                                          Type *Ty) {
   2925   Type *SrcTy = V->getType();
   2926   assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
   2927          (Ty->isIntegerTy() || Ty->isPointerTy()) &&
   2928          "Cannot truncate or zero extend with non-integer arguments!");
   2929   if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
   2930     return V;  // No conversion
   2931   if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
   2932     return getTruncateExpr(V, Ty);
   2933   return getSignExtendExpr(V, Ty);
   2934 }
   2935 
   2936 /// getNoopOrZeroExtend - Return a SCEV corresponding to a conversion of the
   2937 /// input value to the specified type.  If the type must be extended, it is zero
   2938 /// extended.  The conversion must not be narrowing.
   2939 const SCEV *
   2940 ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, Type *Ty) {
   2941   Type *SrcTy = V->getType();
   2942   assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
   2943          (Ty->isIntegerTy() || Ty->isPointerTy()) &&
   2944          "Cannot noop or zero extend with non-integer arguments!");
   2945   assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
   2946          "getNoopOrZeroExtend cannot truncate!");
   2947   if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
   2948     return V;  // No conversion
   2949   return getZeroExtendExpr(V, Ty);
   2950 }
   2951 
   2952 /// getNoopOrSignExtend - Return a SCEV corresponding to a conversion of the
   2953 /// input value to the specified type.  If the type must be extended, it is sign
   2954 /// extended.  The conversion must not be narrowing.
   2955 const SCEV *
   2956 ScalarEvolution::getNoopOrSignExtend(const SCEV *V, Type *Ty) {
   2957   Type *SrcTy = V->getType();
   2958   assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
   2959          (Ty->isIntegerTy() || Ty->isPointerTy()) &&
   2960          "Cannot noop or sign extend with non-integer arguments!");
   2961   assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
   2962          "getNoopOrSignExtend cannot truncate!");
   2963   if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
   2964     return V;  // No conversion
   2965   return getSignExtendExpr(V, Ty);
   2966 }
   2967 
   2968 /// getNoopOrAnyExtend - Return a SCEV corresponding to a conversion of
   2969 /// the input value to the specified type. If the type must be extended,
   2970 /// it is extended with unspecified bits. The conversion must not be
   2971 /// narrowing.
   2972 const SCEV *
   2973 ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, Type *Ty) {
   2974   Type *SrcTy = V->getType();
   2975   assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
   2976          (Ty->isIntegerTy() || Ty->isPointerTy()) &&
   2977          "Cannot noop or any extend with non-integer arguments!");
   2978   assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
   2979          "getNoopOrAnyExtend cannot truncate!");
   2980   if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
   2981     return V;  // No conversion
   2982   return getAnyExtendExpr(V, Ty);
   2983 }
   2984 
   2985 /// getTruncateOrNoop - Return a SCEV corresponding to a conversion of the
   2986 /// input value to the specified type.  The conversion must not be widening.
   2987 const SCEV *
   2988 ScalarEvolution::getTruncateOrNoop(const SCEV *V, Type *Ty) {
   2989   Type *SrcTy = V->getType();
   2990   assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
   2991          (Ty->isIntegerTy() || Ty->isPointerTy()) &&
   2992          "Cannot truncate or noop with non-integer arguments!");
   2993   assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) &&
   2994          "getTruncateOrNoop cannot extend!");
   2995   if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
   2996     return V;  // No conversion
   2997   return getTruncateExpr(V, Ty);
   2998 }
   2999 
   3000 /// getUMaxFromMismatchedTypes - Promote the operands to the wider of
   3001 /// the types using zero-extension, and then perform a umax operation
   3002 /// with them.
   3003 const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS,
   3004                                                         const SCEV *RHS) {
   3005   const SCEV *PromotedLHS = LHS;
   3006   const SCEV *PromotedRHS = RHS;
   3007 
   3008   if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
   3009     PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
   3010   else
   3011     PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());
   3012 
   3013   return getUMaxExpr(PromotedLHS, PromotedRHS);
   3014 }
   3015 
   3016 /// getUMinFromMismatchedTypes - Promote the operands to the wider of
   3017 /// the types using zero-extension, and then perform a umin operation
   3018 /// with them.
   3019 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS,
   3020                                                         const SCEV *RHS) {
   3021   const SCEV *PromotedLHS = LHS;
   3022   const SCEV *PromotedRHS = RHS;
   3023 
   3024   if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
   3025     PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
   3026   else
   3027     PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());
   3028 
   3029   return getUMinExpr(PromotedLHS, PromotedRHS);
   3030 }
   3031 
   3032 /// getPointerBase - Transitively follow the chain of pointer-type operands
   3033 /// until reaching a SCEV that does not have a single pointer operand. This
   3034 /// returns a SCEVUnknown pointer for well-formed pointer-type expressions,
   3035 /// but corner cases do exist.
   3036 const SCEV *ScalarEvolution::getPointerBase(const SCEV *V) {
   3037   // A pointer operand may evaluate to a nonpointer expression, such as null.
   3038   if (!V->getType()->isPointerTy())
   3039     return V;
   3040 
   3041   if (const SCEVCastExpr *Cast = dyn_cast<SCEVCastExpr>(V)) {
   3042     return getPointerBase(Cast->getOperand());
   3043   }
   3044   else if (const SCEVNAryExpr *NAry = dyn_cast<SCEVNAryExpr>(V)) {
   3045     const SCEV *PtrOp = nullptr;
   3046     for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
   3047          I != E; ++I) {
   3048       if ((*I)->getType()->isPointerTy()) {
   3049         // Cannot find the base of an expression with multiple pointer operands.
   3050         if (PtrOp)
   3051           return V;
   3052         PtrOp = *I;
   3053       }
   3054     }
   3055     if (!PtrOp)
   3056       return V;
   3057     return getPointerBase(PtrOp);
   3058   }
   3059   return V;
   3060 }
   3061 
   3062 /// PushDefUseChildren - Push users of the given Instruction
   3063 /// onto the given Worklist.
   3064 static void
   3065 PushDefUseChildren(Instruction *I,
   3066                    SmallVectorImpl<Instruction *> &Worklist) {
   3067   // Push the def-use children onto the Worklist stack.
   3068   for (User *U : I->users())
   3069     Worklist.push_back(cast<Instruction>(U));
   3070 }
   3071 
   3072 /// ForgetSymbolicValue - This looks up computed SCEV values for all
   3073 /// instructions that depend on the given instruction and removes them from
   3074 /// the ValueExprMapType map if they reference SymName. This is used during PHI
   3075 /// resolution.
   3076 void
   3077 ScalarEvolution::ForgetSymbolicName(Instruction *PN, const SCEV *SymName) {
   3078   SmallVector<Instruction *, 16> Worklist;
   3079   PushDefUseChildren(PN, Worklist);
   3080 
   3081   SmallPtrSet<Instruction *, 8> Visited;
   3082   Visited.insert(PN);
   3083   while (!Worklist.empty()) {
   3084     Instruction *I = Worklist.pop_back_val();
   3085     if (!Visited.insert(I)) continue;
   3086 
   3087     ValueExprMapType::iterator It =
   3088       ValueExprMap.find_as(static_cast<Value *>(I));
   3089     if (It != ValueExprMap.end()) {
   3090       const SCEV *Old = It->second;
   3091 
   3092       // Short-circuit the def-use traversal if the symbolic name
   3093       // ceases to appear in expressions.
   3094       if (Old != SymName && !hasOperand(Old, SymName))
   3095         continue;
   3096 
   3097       // SCEVUnknown for a PHI either means that it has an unrecognized
   3098       // structure, it's a PHI that's in the progress of being computed
   3099       // by createNodeForPHI, or it's a single-value PHI. In the first case,
   3100       // additional loop trip count information isn't going to change anything.
   3101       // In the second case, createNodeForPHI will perform the necessary
   3102       // updates on its own when it gets to that point. In the third, we do
   3103       // want to forget the SCEVUnknown.
   3104       if (!isa<PHINode>(I) ||
   3105           !isa<SCEVUnknown>(Old) ||
   3106           (I != PN && Old == SymName)) {
   3107         forgetMemoizedResults(Old);
   3108         ValueExprMap.erase(It);
   3109       }
   3110     }
   3111 
   3112     PushDefUseChildren(I, Worklist);
   3113   }
   3114 }
   3115 
   3116 /// createNodeForPHI - PHI nodes have two cases.  Either the PHI node exists in
   3117 /// a loop header, making it a potential recurrence, or it doesn't.
   3118 ///
   3119 const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
   3120   if (const Loop *L = LI->getLoopFor(PN->getParent()))
   3121     if (L->getHeader() == PN->getParent()) {
   3122       // The loop may have multiple entrances or multiple exits; we can analyze
   3123       // this phi as an addrec if it has a unique entry value and a unique
   3124       // backedge value.
   3125       Value *BEValueV = nullptr, *StartValueV = nullptr;
   3126       for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
   3127         Value *V = PN->getIncomingValue(i);
   3128         if (L->contains(PN->getIncomingBlock(i))) {
   3129           if (!BEValueV) {
   3130             BEValueV = V;
   3131           } else if (BEValueV != V) {
   3132             BEValueV = nullptr;
   3133             break;
   3134           }
   3135         } else if (!StartValueV) {
   3136           StartValueV = V;
   3137         } else if (StartValueV != V) {
   3138           StartValueV = nullptr;
   3139           break;
   3140         }
   3141       }
   3142       if (BEValueV && StartValueV) {
   3143         // While we are analyzing this PHI node, handle its value symbolically.
   3144         const SCEV *SymbolicName = getUnknown(PN);
   3145         assert(ValueExprMap.find_as(PN) == ValueExprMap.end() &&
   3146                "PHI node already processed?");
   3147         ValueExprMap.insert(std::make_pair(SCEVCallbackVH(PN, this), SymbolicName));
   3148 
   3149         // Using this symbolic name for the PHI, analyze the value coming around
   3150         // the back-edge.
   3151         const SCEV *BEValue = getSCEV(BEValueV);
   3152 
   3153         // NOTE: If BEValue is loop invariant, we know that the PHI node just
   3154         // has a special value for the first iteration of the loop.
   3155 
   3156         // If the value coming around the backedge is an add with the symbolic
   3157         // value we just inserted, then we found a simple induction variable!
   3158         if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) {
   3159           // If there is a single occurrence of the symbolic value, replace it
   3160           // with a recurrence.
   3161           unsigned FoundIndex = Add->getNumOperands();
   3162           for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
   3163             if (Add->getOperand(i) == SymbolicName)
   3164               if (FoundIndex == e) {
   3165                 FoundIndex = i;
   3166                 break;
   3167               }
   3168 
   3169           if (FoundIndex != Add->getNumOperands()) {
   3170             // Create an add with everything but the specified operand.
   3171             SmallVector<const SCEV *, 8> Ops;
   3172             for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
   3173               if (i != FoundIndex)
   3174                 Ops.push_back(Add->getOperand(i));
   3175             const SCEV *Accum = getAddExpr(Ops);
   3176 
   3177             // This is not a valid addrec if the step amount is varying each
   3178             // loop iteration, but is not itself an addrec in this loop.
   3179             if (isLoopInvariant(Accum, L) ||
   3180                 (isa<SCEVAddRecExpr>(Accum) &&
   3181                  cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) {
   3182               SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap;
   3183 
   3184               // If the increment doesn't overflow, then neither the addrec nor
   3185               // the post-increment will overflow.
   3186               if (const AddOperator *OBO = dyn_cast<AddOperator>(BEValueV)) {
   3187                 if (OBO->hasNoUnsignedWrap())
   3188                   Flags = setFlags(Flags, SCEV::FlagNUW);
   3189                 if (OBO->hasNoSignedWrap())
   3190                   Flags = setFlags(Flags, SCEV::FlagNSW);
   3191               } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(BEValueV)) {
   3192                 // If the increment is an inbounds GEP, then we know the address
   3193                 // space cannot be wrapped around. We cannot make any guarantee
   3194                 // about signed or unsigned overflow because pointers are
   3195                 // unsigned but we may have a negative index from the base
   3196                 // pointer. We can guarantee that no unsigned wrap occurs if the
   3197                 // indices form a positive value.
   3198                 if (GEP->isInBounds()) {
   3199                   Flags = setFlags(Flags, SCEV::FlagNW);
   3200 
   3201                   const SCEV *Ptr = getSCEV(GEP->getPointerOperand());
   3202                   if (isKnownPositive(getMinusSCEV(getSCEV(GEP), Ptr)))
   3203                     Flags = setFlags(Flags, SCEV::FlagNUW);
   3204                 }
   3205               } else if (const SubOperator *OBO =
   3206                            dyn_cast<SubOperator>(BEValueV)) {
   3207                 if (OBO->hasNoUnsignedWrap())
   3208                   Flags = setFlags(Flags, SCEV::FlagNUW);
   3209                 if (OBO->hasNoSignedWrap())
   3210                   Flags = setFlags(Flags, SCEV::FlagNSW);
   3211               }
   3212 
   3213               const SCEV *StartVal = getSCEV(StartValueV);
   3214               const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags);
   3215 
   3216               // Since the no-wrap flags are on the increment, they apply to the
   3217               // post-incremented value as well.
   3218               if (isLoopInvariant(Accum, L))
   3219                 (void)getAddRecExpr(getAddExpr(StartVal, Accum),
   3220                                     Accum, L, Flags);
   3221 
   3222               // Okay, for the entire analysis of this edge we assumed the PHI
   3223               // to be symbolic.  We now need to go back and purge all of the
   3224               // entries for the scalars that use the symbolic expression.
   3225               ForgetSymbolicName(PN, SymbolicName);
   3226               ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV;
   3227               return PHISCEV;
   3228             }
   3229           }
   3230         } else if (const SCEVAddRecExpr *AddRec =
   3231                      dyn_cast<SCEVAddRecExpr>(BEValue)) {
   3232           // Otherwise, this could be a loop like this:
   3233           //     i = 0;  for (j = 1; ..; ++j) { ....  i = j; }
   3234           // In this case, j = {1,+,1}  and BEValue is j.
   3235           // Because the other in-value of i (0) fits the evolution of BEValue
   3236           // i really is an addrec evolution.
   3237           if (AddRec->getLoop() == L && AddRec->isAffine()) {
   3238             const SCEV *StartVal = getSCEV(StartValueV);
   3239 
   3240             // If StartVal = j.start - j.stride, we can use StartVal as the
   3241             // initial step of the addrec evolution.
   3242             if (StartVal == getMinusSCEV(AddRec->getOperand(0),
   3243                                          AddRec->getOperand(1))) {
   3244               // FIXME: For constant StartVal, we should be able to infer
   3245               // no-wrap flags.
   3246               const SCEV *PHISCEV =
   3247                 getAddRecExpr(StartVal, AddRec->getOperand(1), L,
   3248                               SCEV::FlagAnyWrap);
   3249 
   3250               // Okay, for the entire analysis of this edge we assumed the PHI
   3251               // to be symbolic.  We now need to go back and purge all of the
   3252               // entries for the scalars that use the symbolic expression.
   3253               ForgetSymbolicName(PN, SymbolicName);
   3254               ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV;
   3255               return PHISCEV;
   3256             }
   3257           }
   3258         }
   3259       }
   3260     }
   3261 
   3262   // If the PHI has a single incoming value, follow that value, unless the
   3263   // PHI's incoming blocks are in a different loop, in which case doing so
   3264   // risks breaking LCSSA form. Instcombine would normally zap these, but
   3265   // it doesn't have DominatorTree information, so it may miss cases.
   3266   if (Value *V = SimplifyInstruction(PN, DL, TLI, DT))
   3267     if (LI->replacementPreservesLCSSAForm(PN, V))
   3268       return getSCEV(V);
   3269 
   3270   // If it's not a loop phi, we can't handle it yet.
   3271   return getUnknown(PN);
   3272 }
   3273 
   3274 /// createNodeForGEP - Expand GEP instructions into add and multiply
   3275 /// operations. This allows them to be analyzed by regular SCEV code.
   3276 ///
   3277 const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
   3278   Type *IntPtrTy = getEffectiveSCEVType(GEP->getType());
   3279   Value *Base = GEP->getOperand(0);
   3280   // Don't attempt to analyze GEPs over unsized objects.
   3281   if (!Base->getType()->getPointerElementType()->isSized())
   3282     return getUnknown(GEP);
   3283 
   3284   // Don't blindly transfer the inbounds flag from the GEP instruction to the
   3285   // Add expression, because the Instruction may be guarded by control flow
   3286   // and the no-overflow bits may not be valid for the expression in any
   3287   // context.
   3288   SCEV::NoWrapFlags Wrap = GEP->isInBounds() ? SCEV::FlagNSW : SCEV::FlagAnyWrap;
   3289 
   3290   const SCEV *TotalOffset = getConstant(IntPtrTy, 0);
   3291   gep_type_iterator GTI = gep_type_begin(GEP);
   3292   for (GetElementPtrInst::op_iterator I = std::next(GEP->op_begin()),
   3293                                       E = GEP->op_end();
   3294        I != E; ++I) {
   3295     Value *Index = *I;
   3296     // Compute the (potentially symbolic) offset in bytes for this index.
   3297     if (StructType *STy = dyn_cast<StructType>(*GTI++)) {
   3298       // For a struct, add the member offset.
   3299       unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
   3300       const SCEV *FieldOffset = getOffsetOfExpr(IntPtrTy, STy, FieldNo);
   3301 
   3302       // Add the field offset to the running total offset.
   3303       TotalOffset = getAddExpr(TotalOffset, FieldOffset);
   3304     } else {
   3305       // For an array, add the element offset, explicitly scaled.
   3306       const SCEV *ElementSize = getSizeOfExpr(IntPtrTy, *GTI);
   3307       const SCEV *IndexS = getSCEV(Index);
   3308       // Getelementptr indices are signed.
   3309       IndexS = getTruncateOrSignExtend(IndexS, IntPtrTy);
   3310 
   3311       // Multiply the index by the element size to compute the element offset.
   3312       const SCEV *LocalOffset = getMulExpr(IndexS, ElementSize, Wrap);
   3313 
   3314       // Add the element offset to the running total offset.
   3315       TotalOffset = getAddExpr(TotalOffset, LocalOffset);
   3316     }
   3317   }
   3318 
   3319   // Get the SCEV for the GEP base.
   3320   const SCEV *BaseS = getSCEV(Base);
   3321 
   3322   // Add the total offset from all the GEP indices to the base.
   3323   return getAddExpr(BaseS, TotalOffset, Wrap);
   3324 }
   3325 
   3326 /// GetMinTrailingZeros - Determine the minimum number of zero bits that S is
   3327 /// guaranteed to end in (at every loop iteration).  It is, at the same time,
   3328 /// the minimum number of times S is divisible by 2.  For example, given {4,+,8}
   3329 /// it returns 2.  If S is guaranteed to be 0, it returns the bitwidth of S.
   3330 uint32_t
   3331 ScalarEvolution::GetMinTrailingZeros(const SCEV *S) {
   3332   if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
   3333     return C->getValue()->getValue().countTrailingZeros();
   3334 
   3335   if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S))
   3336     return std::min(GetMinTrailingZeros(T->getOperand()),
   3337                     (uint32_t)getTypeSizeInBits(T->getType()));
   3338 
   3339   if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) {
   3340     uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
   3341     return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ?
   3342              getTypeSizeInBits(E->getType()) : OpRes;
   3343   }
   3344 
   3345   if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) {
   3346     uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
   3347     return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ?
   3348              getTypeSizeInBits(E->getType()) : OpRes;
   3349   }
   3350 
   3351   if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) {
   3352     // The result is the min of all operands results.
   3353     uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
   3354     for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
   3355       MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
   3356     return MinOpRes;
   3357   }
   3358 
   3359   if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
   3360     // The result is the sum of all operands results.
   3361     uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0));
   3362     uint32_t BitWidth = getTypeSizeInBits(M->getType());
   3363     for (unsigned i = 1, e = M->getNumOperands();
   3364          SumOpRes != BitWidth && i != e; ++i)
   3365       SumOpRes = std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)),
   3366                           BitWidth);
   3367     return SumOpRes;
   3368   }
   3369 
   3370   if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
   3371     // The result is the min of all operands results.
   3372     uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
   3373     for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
   3374       MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
   3375     return MinOpRes;
   3376   }
   3377 
   3378   if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) {
   3379     // The result is the min of all operands results.
   3380     uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
   3381     for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
   3382       MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
   3383     return MinOpRes;
   3384   }
   3385 
   3386   if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) {
   3387     // The result is the min of all operands results.
   3388     uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
   3389     for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
   3390       MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
   3391     return MinOpRes;
   3392   }
   3393 
   3394   if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
   3395     // For a SCEVUnknown, ask ValueTracking.
   3396     unsigned BitWidth = getTypeSizeInBits(U->getType());
   3397     APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
   3398     computeKnownBits(U->getValue(), Zeros, Ones);
   3399     return Zeros.countTrailingOnes();
   3400   }
   3401 
   3402   // SCEVUDivExpr
   3403   return 0;
   3404 }
   3405 
   3406 /// getUnsignedRange - Determine the unsigned range for a particular SCEV.
   3407 ///
   3408 ConstantRange
   3409 ScalarEvolution::getUnsignedRange(const SCEV *S) {
   3410   // See if we've computed this range already.
   3411   DenseMap<const SCEV *, ConstantRange>::iterator I = UnsignedRanges.find(S);
   3412   if (I != UnsignedRanges.end())
   3413     return I->second;
   3414 
   3415   if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
   3416     return setUnsignedRange(C, ConstantRange(C->getValue()->getValue()));
   3417 
   3418   unsigned BitWidth = getTypeSizeInBits(S->getType());
   3419   ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true);
   3420 
   3421   // If the value has known zeros, the maximum unsigned value will have those
   3422   // known zeros as well.
   3423   uint32_t TZ = GetMinTrailingZeros(S);
   3424   if (TZ != 0)
   3425     ConservativeResult =
   3426       ConstantRange(APInt::getMinValue(BitWidth),
   3427                     APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1);
   3428 
   3429   if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
   3430     ConstantRange X = getUnsignedRange(Add->getOperand(0));
   3431     for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i)
   3432       X = X.add(getUnsignedRange(Add->getOperand(i)));
   3433     return setUnsignedRange(Add, ConservativeResult.intersectWith(X));
   3434   }
   3435 
   3436   if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
   3437     ConstantRange X = getUnsignedRange(Mul->getOperand(0));
   3438     for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i)
   3439       X = X.multiply(getUnsignedRange(Mul->getOperand(i)));
   3440     return setUnsignedRange(Mul, ConservativeResult.intersectWith(X));
   3441   }
   3442 
   3443   if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) {
   3444     ConstantRange X = getUnsignedRange(SMax->getOperand(0));
   3445     for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i)
   3446       X = X.smax(getUnsignedRange(SMax->getOperand(i)));
   3447     return setUnsignedRange(SMax, ConservativeResult.intersectWith(X));
   3448   }
   3449 
   3450   if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) {
   3451     ConstantRange X = getUnsignedRange(UMax->getOperand(0));
   3452     for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i)
   3453       X = X.umax(getUnsignedRange(UMax->getOperand(i)));
   3454     return setUnsignedRange(UMax, ConservativeResult.intersectWith(X));
   3455   }
   3456 
   3457   if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) {
   3458     ConstantRange X = getUnsignedRange(UDiv->getLHS());
   3459     ConstantRange Y = getUnsignedRange(UDiv->getRHS());
   3460     return setUnsignedRange(UDiv, ConservativeResult.intersectWith(X.udiv(Y)));
   3461   }
   3462 
   3463   if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) {
   3464     ConstantRange X = getUnsignedRange(ZExt->getOperand());
   3465     return setUnsignedRange(ZExt,
   3466       ConservativeResult.intersectWith(X.zeroExtend(BitWidth)));
   3467   }
   3468 
   3469   if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) {
   3470     ConstantRange X = getUnsignedRange(SExt->getOperand());
   3471     return setUnsignedRange(SExt,
   3472       ConservativeResult.intersectWith(X.signExtend(BitWidth)));
   3473   }
   3474 
   3475   if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) {
   3476     ConstantRange X = getUnsignedRange(Trunc->getOperand());
   3477     return setUnsignedRange(Trunc,
   3478       ConservativeResult.intersectWith(X.truncate(BitWidth)));
   3479   }
   3480 
   3481   if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
   3482     // If there's no unsigned wrap, the value will never be less than its
   3483     // initial value.
   3484     if (AddRec->getNoWrapFlags(SCEV::FlagNUW))
   3485       if (const SCEVConstant *C = dyn_cast<SCEVConstant>(AddRec->getStart()))
   3486         if (!C->getValue()->isZero())
   3487           ConservativeResult =
   3488             ConservativeResult.intersectWith(
   3489               ConstantRange(C->getValue()->getValue(), APInt(BitWidth, 0)));
   3490 
   3491     // TODO: non-affine addrec
   3492     if (AddRec->isAffine()) {
   3493       Type *Ty = AddRec->getType();
   3494       const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop());
   3495       if (!isa<SCEVCouldNotCompute>(MaxBECount) &&
   3496           getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) {
   3497         MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty);
   3498 
   3499         const SCEV *Start = AddRec->getStart();
   3500         const SCEV *Step = AddRec->getStepRecurrence(*this);
   3501 
   3502         ConstantRange StartRange = getUnsignedRange(Start);
   3503         ConstantRange StepRange = getSignedRange(Step);
   3504         ConstantRange MaxBECountRange = getUnsignedRange(MaxBECount);
   3505         ConstantRange EndRange =
   3506           StartRange.add(MaxBECountRange.multiply(StepRange));
   3507 
   3508         // Check for overflow. This must be done with ConstantRange arithmetic
   3509         // because we could be called from within the ScalarEvolution overflow
   3510         // checking code.
   3511         ConstantRange ExtStartRange = StartRange.zextOrTrunc(BitWidth*2+1);
   3512         ConstantRange ExtStepRange = StepRange.sextOrTrunc(BitWidth*2+1);
   3513         ConstantRange ExtMaxBECountRange =
   3514           MaxBECountRange.zextOrTrunc(BitWidth*2+1);
   3515         ConstantRange ExtEndRange = EndRange.zextOrTrunc(BitWidth*2+1);
   3516         if (ExtStartRange.add(ExtMaxBECountRange.multiply(ExtStepRange)) !=
   3517             ExtEndRange)
   3518           return setUnsignedRange(AddRec, ConservativeResult);
   3519 
   3520         APInt Min = APIntOps::umin(StartRange.getUnsignedMin(),
   3521                                    EndRange.getUnsignedMin());
   3522         APInt Max = APIntOps::umax(StartRange.getUnsignedMax(),
   3523                                    EndRange.getUnsignedMax());
   3524         if (Min.isMinValue() && Max.isMaxValue())
   3525           return setUnsignedRange(AddRec, ConservativeResult);
   3526         return setUnsignedRange(AddRec,
   3527           ConservativeResult.intersectWith(ConstantRange(Min, Max+1)));
   3528       }
   3529     }
   3530 
   3531     return setUnsignedRange(AddRec, ConservativeResult);
   3532   }
   3533 
   3534   if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
   3535     // For a SCEVUnknown, ask ValueTracking.
   3536     APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
   3537     computeKnownBits(U->getValue(), Zeros, Ones, DL);
   3538     if (Ones == ~Zeros + 1)
   3539       return setUnsignedRange(U, ConservativeResult);
   3540     return setUnsignedRange(U,
   3541       ConservativeResult.intersectWith(ConstantRange(Ones, ~Zeros + 1)));
   3542   }
   3543 
   3544   return setUnsignedRange(S, ConservativeResult);
   3545 }
   3546 
   3547 /// getSignedRange - Determine the signed range for a particular SCEV.
   3548 ///
   3549 ConstantRange
   3550 ScalarEvolution::getSignedRange(const SCEV *S) {
   3551   // See if we've computed this range already.
   3552   DenseMap<const SCEV *, ConstantRange>::iterator I = SignedRanges.find(S);
   3553   if (I != SignedRanges.end())
   3554     return I->second;
   3555 
   3556   if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
   3557     return setSignedRange(C, ConstantRange(C->getValue()->getValue()));
   3558 
   3559   unsigned BitWidth = getTypeSizeInBits(S->getType());
   3560   ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true);
   3561 
   3562   // If the value has known zeros, the maximum signed value will have those
   3563   // known zeros as well.
   3564   uint32_t TZ = GetMinTrailingZeros(S);
   3565   if (TZ != 0)
   3566     ConservativeResult =
   3567       ConstantRange(APInt::getSignedMinValue(BitWidth),
   3568                     APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1);
   3569 
   3570   if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
   3571     ConstantRange X = getSignedRange(Add->getOperand(0));
   3572     for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i)
   3573       X = X.add(getSignedRange(Add->getOperand(i)));
   3574     return setSignedRange(Add, ConservativeResult.intersectWith(X));
   3575   }
   3576 
   3577   if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
   3578     ConstantRange X = getSignedRange(Mul->getOperand(0));
   3579     for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i)
   3580       X = X.multiply(getSignedRange(Mul->getOperand(i)));
   3581     return setSignedRange(Mul, ConservativeResult.intersectWith(X));
   3582   }
   3583 
   3584   if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) {
   3585     ConstantRange X = getSignedRange(SMax->getOperand(0));
   3586     for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i)
   3587       X = X.smax(getSignedRange(SMax->getOperand(i)));
   3588     return setSignedRange(SMax, ConservativeResult.intersectWith(X));
   3589   }
   3590 
   3591   if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) {
   3592     ConstantRange X = getSignedRange(UMax->getOperand(0));
   3593     for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i)
   3594       X = X.umax(getSignedRange(UMax->getOperand(i)));
   3595     return setSignedRange(UMax, ConservativeResult.intersectWith(X));
   3596   }
   3597 
   3598   if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) {
   3599     ConstantRange X = getSignedRange(UDiv->getLHS());
   3600     ConstantRange Y = getSignedRange(UDiv->getRHS());
   3601     return setSignedRange(UDiv, ConservativeResult.intersectWith(X.udiv(Y)));
   3602   }
   3603 
   3604   if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) {
   3605     ConstantRange X = getSignedRange(ZExt->getOperand());
   3606     return setSignedRange(ZExt,
   3607       ConservativeResult.intersectWith(X.zeroExtend(BitWidth)));
   3608   }
   3609 
   3610   if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) {
   3611     ConstantRange X = getSignedRange(SExt->getOperand());
   3612     return setSignedRange(SExt,
   3613       ConservativeResult.intersectWith(X.signExtend(BitWidth)));
   3614   }
   3615 
   3616   if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) {
   3617     ConstantRange X = getSignedRange(Trunc->getOperand());
   3618     return setSignedRange(Trunc,
   3619       ConservativeResult.intersectWith(X.truncate(BitWidth)));
   3620   }
   3621 
   3622   if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
   3623     // If there's no signed wrap, and all the operands have the same sign or
   3624     // zero, the value won't ever change sign.
   3625     if (AddRec->getNoWrapFlags(SCEV::FlagNSW)) {
   3626       bool AllNonNeg = true;
   3627       bool AllNonPos = true;
   3628       for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
   3629         if (!isKnownNonNegative(AddRec->getOperand(i))) AllNonNeg = false;
   3630         if (!isKnownNonPositive(AddRec->getOperand(i))) AllNonPos = false;
   3631       }
   3632       if (AllNonNeg)
   3633         ConservativeResult = ConservativeResult.intersectWith(
   3634           ConstantRange(APInt(BitWidth, 0),
   3635                         APInt::getSignedMinValue(BitWidth)));
   3636       else if (AllNonPos)
   3637         ConservativeResult = ConservativeResult.intersectWith(
   3638           ConstantRange(APInt::getSignedMinValue(BitWidth),
   3639                         APInt(BitWidth, 1)));
   3640     }
   3641 
   3642     // TODO: non-affine addrec
   3643     if (AddRec->isAffine()) {
   3644       Type *Ty = AddRec->getType();
   3645       const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop());
   3646       if (!isa<SCEVCouldNotCompute>(MaxBECount) &&
   3647           getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) {
   3648         MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty);
   3649 
   3650         const SCEV *Start = AddRec->getStart();
   3651         const SCEV *Step = AddRec->getStepRecurrence(*this);
   3652 
   3653         ConstantRange StartRange = getSignedRange(Start);
   3654         ConstantRange StepRange = getSignedRange(Step);
   3655         ConstantRange MaxBECountRange = getUnsignedRange(MaxBECount);
   3656         ConstantRange EndRange =
   3657           StartRange.add(MaxBECountRange.multiply(StepRange));
   3658 
   3659         // Check for overflow. This must be done with ConstantRange arithmetic
   3660         // because we could be called from within the ScalarEvolution overflow
   3661         // checking code.
   3662         ConstantRange ExtStartRange = StartRange.sextOrTrunc(BitWidth*2+1);
   3663         ConstantRange ExtStepRange = StepRange.sextOrTrunc(BitWidth*2+1);
   3664         ConstantRange ExtMaxBECountRange =
   3665           MaxBECountRange.zextOrTrunc(BitWidth*2+1);
   3666         ConstantRange ExtEndRange = EndRange.sextOrTrunc(BitWidth*2+1);
   3667         if (ExtStartRange.add(ExtMaxBECountRange.multiply(ExtStepRange)) !=
   3668             ExtEndRange)
   3669           return setSignedRange(AddRec, ConservativeResult);
   3670 
   3671         APInt Min = APIntOps::smin(StartRange.getSignedMin(),
   3672                                    EndRange.getSignedMin());
   3673         APInt Max = APIntOps::smax(StartRange.getSignedMax(),
   3674                                    EndRange.getSignedMax());
   3675         if (Min.isMinSignedValue() && Max.isMaxSignedValue())
   3676           return setSignedRange(AddRec, ConservativeResult);
   3677         return setSignedRange(AddRec,
   3678           ConservativeResult.intersectWith(ConstantRange(Min, Max+1)));
   3679       }
   3680     }
   3681 
   3682     return setSignedRange(AddRec, ConservativeResult);
   3683   }
   3684 
   3685   if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
   3686     // For a SCEVUnknown, ask ValueTracking.
   3687     if (!U->getValue()->getType()->isIntegerTy() && !DL)
   3688       return setSignedRange(U, ConservativeResult);
   3689     unsigned NS = ComputeNumSignBits(U->getValue(), DL);
   3690     if (NS <= 1)
   3691       return setSignedRange(U, ConservativeResult);
   3692     return setSignedRange(U, ConservativeResult.intersectWith(
   3693       ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1),
   3694                     APInt::getSignedMaxValue(BitWidth).ashr(NS - 1)+1)));
   3695   }
   3696 
   3697   return setSignedRange(S, ConservativeResult);
   3698 }
   3699 
   3700 /// createSCEV - We know that there is no SCEV for the specified value.
   3701 /// Analyze the expression.
   3702 ///
   3703 const SCEV *ScalarEvolution::createSCEV(Value *V) {
   3704   if (!isSCEVable(V->getType()))
   3705     return getUnknown(V);
   3706 
   3707   unsigned Opcode = Instruction::UserOp1;
   3708   if (Instruction *I = dyn_cast<Instruction>(V)) {
   3709     Opcode = I->getOpcode();
   3710 
   3711     // Don't attempt to analyze instructions in blocks that aren't
   3712     // reachable. Such instructions don't matter, and they aren't required
   3713     // to obey basic rules for definitions dominating uses which this
   3714     // analysis depends on.
   3715     if (!DT->isReachableFromEntry(I->getParent()))
   3716       return getUnknown(V);
   3717   } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
   3718     Opcode = CE->getOpcode();
   3719   else if (ConstantInt *CI = dyn_cast<ConstantInt>(V))
   3720     return getConstant(CI);
   3721   else if (isa<ConstantPointerNull>(V))
   3722     return getConstant(V->getType(), 0);
   3723   else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V))
   3724     return GA->mayBeOverridden() ? getUnknown(V) : getSCEV(GA->getAliasee());
   3725   else
   3726     return getUnknown(V);
   3727 
   3728   Operator *U = cast<Operator>(V);
   3729   switch (Opcode) {
   3730   case Instruction::Add: {
   3731     // The simple thing to do would be to just call getSCEV on both operands
   3732     // and call getAddExpr with the result. However if we're looking at a
   3733     // bunch of things all added together, this can be quite inefficient,
   3734     // because it leads to N-1 getAddExpr calls for N ultimate operands.
   3735     // Instead, gather up all the operands and make a single getAddExpr call.
   3736     // LLVM IR canonical form means we need only traverse the left operands.
   3737     //
   3738     // Don't apply this instruction's NSW or NUW flags to the new
   3739     // expression. The instruction may be guarded by control flow that the
   3740     // no-wrap behavior depends on. Non-control-equivalent instructions can be
   3741     // mapped to the same SCEV expression, and it would be incorrect to transfer
   3742     // NSW/NUW semantics to those operations.
   3743     SmallVector<const SCEV *, 4> AddOps;
   3744     AddOps.push_back(getSCEV(U->getOperand(1)));
   3745     for (Value *Op = U->getOperand(0); ; Op = U->getOperand(0)) {
   3746       unsigned Opcode = Op->getValueID() - Value::InstructionVal;
   3747       if (Opcode != Instruction::Add && Opcode != Instruction::Sub)
   3748         break;
   3749       U = cast<Operator>(Op);
   3750       const SCEV *Op1 = getSCEV(U->getOperand(1));
   3751       if (Opcode == Instruction::Sub)
   3752         AddOps.push_back(getNegativeSCEV(Op1));
   3753       else
   3754         AddOps.push_back(Op1);
   3755     }
   3756     AddOps.push_back(getSCEV(U->getOperand(0)));
   3757     return getAddExpr(AddOps);
   3758   }
   3759   case Instruction::Mul: {
   3760     // Don't transfer NSW/NUW for the same reason as AddExpr.
   3761     SmallVector<const SCEV *, 4> MulOps;
   3762     MulOps.push_back(getSCEV(U->getOperand(1)));
   3763     for (Value *Op = U->getOperand(0);
   3764          Op->getValueID() == Instruction::Mul + Value::InstructionVal;
   3765          Op = U->getOperand(0)) {
   3766       U = cast<Operator>(Op);
   3767       MulOps.push_back(getSCEV(U->getOperand(1)));
   3768     }
   3769     MulOps.push_back(getSCEV(U->getOperand(0)));
   3770     return getMulExpr(MulOps);
   3771   }
   3772   case Instruction::UDiv:
   3773     return getUDivExpr(getSCEV(U->getOperand(0)),
   3774                        getSCEV(U->getOperand(1)));
   3775   case Instruction::Sub:
   3776     return getMinusSCEV(getSCEV(U->getOperand(0)),
   3777                         getSCEV(U->getOperand(1)));
   3778   case Instruction::And:
   3779     // For an expression like x&255 that merely masks off the high bits,
   3780     // use zext(trunc(x)) as the SCEV expression.
   3781     if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
   3782       if (CI->isNullValue())
   3783         return getSCEV(U->getOperand(1));
   3784       if (CI->isAllOnesValue())
   3785         return getSCEV(U->getOperand(0));
   3786       const APInt &A = CI->getValue();
   3787 
   3788       // Instcombine's ShrinkDemandedConstant may strip bits out of
   3789       // constants, obscuring what would otherwise be a low-bits mask.
   3790       // Use computeKnownBits to compute what ShrinkDemandedConstant
   3791       // knew about to reconstruct a low-bits mask value.
   3792       unsigned LZ = A.countLeadingZeros();
   3793       unsigned TZ = A.countTrailingZeros();
   3794       unsigned BitWidth = A.getBitWidth();
   3795       APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
   3796       computeKnownBits(U->getOperand(0), KnownZero, KnownOne, DL);
   3797 
   3798       APInt EffectiveMask =
   3799           APInt::getLowBitsSet(BitWidth, BitWidth - LZ - TZ).shl(TZ);
   3800       if ((LZ != 0 || TZ != 0) && !((~A & ~KnownZero) & EffectiveMask)) {
   3801         const SCEV *MulCount = getConstant(
   3802             ConstantInt::get(getContext(), APInt::getOneBitSet(BitWidth, TZ)));
   3803         return getMulExpr(
   3804             getZeroExtendExpr(
   3805                 getTruncateExpr(
   3806                     getUDivExactExpr(getSCEV(U->getOperand(0)), MulCount),
   3807                     IntegerType::get(getContext(), BitWidth - LZ - TZ)),
   3808                 U->getType()),
   3809             MulCount);
   3810       }
   3811     }
   3812     break;
   3813 
   3814   case Instruction::Or:
   3815     // If the RHS of the Or is a constant, we may have something like:
   3816     // X*4+1 which got turned into X*4|1.  Handle this as an Add so loop
   3817     // optimizations will transparently handle this case.
   3818     //
   3819     // In order for this transformation to be safe, the LHS must be of the
   3820     // form X*(2^n) and the Or constant must be less than 2^n.
   3821     if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
   3822       const SCEV *LHS = getSCEV(U->getOperand(0));
   3823       const APInt &CIVal = CI->getValue();
   3824       if (GetMinTrailingZeros(LHS) >=
   3825           (CIVal.getBitWidth() - CIVal.countLeadingZeros())) {
   3826         // Build a plain add SCEV.
   3827         const SCEV *S = getAddExpr(LHS, getSCEV(CI));
   3828         // If the LHS of the add was an addrec and it has no-wrap flags,
   3829         // transfer the no-wrap flags, since an or won't introduce a wrap.
   3830         if (const SCEVAddRecExpr *NewAR = dyn_cast<SCEVAddRecExpr>(S)) {
   3831           const SCEVAddRecExpr *OldAR = cast<SCEVAddRecExpr>(LHS);
   3832           const_cast<SCEVAddRecExpr *>(NewAR)->setNoWrapFlags(
   3833             OldAR->getNoWrapFlags());
   3834         }
   3835         return S;
   3836       }
   3837     }
   3838     break;
   3839   case Instruction::Xor:
   3840     if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
   3841       // If the RHS of the xor is a signbit, then this is just an add.
   3842       // Instcombine turns add of signbit into xor as a strength reduction step.
   3843       if (CI->getValue().isSignBit())
   3844         return getAddExpr(getSCEV(U->getOperand(0)),
   3845                           getSCEV(U->getOperand(1)));
   3846 
   3847       // If the RHS of xor is -1, then this is a not operation.
   3848       if (CI->isAllOnesValue())
   3849         return getNotSCEV(getSCEV(U->getOperand(0)));
   3850 
   3851       // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask.
   3852       // This is a variant of the check for xor with -1, and it handles
   3853       // the case where instcombine has trimmed non-demanded bits out
   3854       // of an xor with -1.
   3855       if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U->getOperand(0)))
   3856         if (ConstantInt *LCI = dyn_cast<ConstantInt>(BO->getOperand(1)))
   3857           if (BO->getOpcode() == Instruction::And &&
   3858               LCI->getValue() == CI->getValue())
   3859             if (const SCEVZeroExtendExpr *Z =
   3860                   dyn_cast<SCEVZeroExtendExpr>(getSCEV(U->getOperand(0)))) {
   3861               Type *UTy = U->getType();
   3862               const SCEV *Z0 = Z->getOperand();
   3863               Type *Z0Ty = Z0->getType();
   3864               unsigned Z0TySize = getTypeSizeInBits(Z0Ty);
   3865 
   3866               // If C is a low-bits mask, the zero extend is serving to
   3867               // mask off the high bits. Complement the operand and
   3868               // re-apply the zext.
   3869               if (APIntOps::isMask(Z0TySize, CI->getValue()))
   3870                 return getZeroExtendExpr(getNotSCEV(Z0), UTy);
   3871 
   3872               // If C is a single bit, it may be in the sign-bit position
   3873               // before the zero-extend. In this case, represent the xor
   3874               // using an add, which is equivalent, and re-apply the zext.
   3875               APInt Trunc = CI->getValue().trunc(Z0TySize);
   3876               if (Trunc.zext(getTypeSizeInBits(UTy)) == CI->getValue() &&
   3877                   Trunc.isSignBit())
   3878                 return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)),
   3879                                          UTy);
   3880             }
   3881     }
   3882     break;
   3883 
   3884   case Instruction::Shl:
   3885     // Turn shift left of a constant amount into a multiply.
   3886     if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
   3887       uint32_t BitWidth = cast<IntegerType>(U->getType())->getBitWidth();
   3888 
   3889       // If the shift count is not less than the bitwidth, the result of
   3890       // the shift is undefined. Don't try to analyze it, because the
   3891       // resolution chosen here may differ from the resolution chosen in
   3892       // other parts of the compiler.
   3893       if (SA->getValue().uge(BitWidth))
   3894         break;
   3895 
   3896       Constant *X = ConstantInt::get(getContext(),
   3897         APInt::getOneBitSet(BitWidth, SA->getZExtValue()));
   3898       return getMulExpr(getSCEV(U->getOperand(0)), getSCEV(X));
   3899     }
   3900     break;
   3901 
   3902   case Instruction::LShr:
   3903     // Turn logical shift right of a constant into a unsigned divide.
   3904     if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
   3905       uint32_t BitWidth = cast<IntegerType>(U->getType())->getBitWidth();
   3906 
   3907       // If the shift count is not less than the bitwidth, the result of
   3908       // the shift is undefined. Don't try to analyze it, because the
   3909       // resolution chosen here may differ from the resolution chosen in
   3910       // other parts of the compiler.
   3911       if (SA->getValue().uge(BitWidth))
   3912         break;
   3913 
   3914       Constant *X = ConstantInt::get(getContext(),
   3915         APInt::getOneBitSet(BitWidth, SA->getZExtValue()));
   3916       return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(X));
   3917     }
   3918     break;
   3919 
   3920   case Instruction::AShr:
   3921     // For a two-shift sext-inreg, use sext(trunc(x)) as the SCEV expression.
   3922     if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1)))
   3923       if (Operator *L = dyn_cast<Operator>(U->getOperand(0)))
   3924         if (L->getOpcode() == Instruction::Shl &&
   3925             L->getOperand(1) == U->getOperand(1)) {
   3926           uint64_t BitWidth = getTypeSizeInBits(U->getType());
   3927 
   3928           // If the shift count is not less than the bitwidth, the result of
   3929           // the shift is undefined. Don't try to analyze it, because the
   3930           // resolution chosen here may differ from the resolution chosen in
   3931           // other parts of the compiler.
   3932           if (CI->getValue().uge(BitWidth))
   3933             break;
   3934 
   3935           uint64_t Amt = BitWidth - CI->getZExtValue();
   3936           if (Amt == BitWidth)
   3937             return getSCEV(L->getOperand(0));       // shift by zero --> noop
   3938           return
   3939             getSignExtendExpr(getTruncateExpr(getSCEV(L->getOperand(0)),
   3940                                               IntegerType::get(getContext(),
   3941                                                                Amt)),
   3942                               U->getType());
   3943         }
   3944     break;
   3945 
   3946   case Instruction::Trunc:
   3947     return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType());
   3948 
   3949   case Instruction::ZExt:
   3950     return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType());
   3951 
   3952   case Instruction::SExt:
   3953     return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType());
   3954 
   3955   case Instruction::BitCast:
   3956     // BitCasts are no-op casts so we just eliminate the cast.
   3957     if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType()))
   3958       return getSCEV(U->getOperand(0));
   3959     break;
   3960 
   3961   // It's tempting to handle inttoptr and ptrtoint as no-ops, however this can
   3962   // lead to pointer expressions which cannot safely be expanded to GEPs,
   3963   // because ScalarEvolution doesn't respect the GEP aliasing rules when
   3964   // simplifying integer expressions.
   3965 
   3966   case Instruction::GetElementPtr:
   3967     return createNodeForGEP(cast<GEPOperator>(U));
   3968 
   3969   case Instruction::PHI:
   3970     return createNodeForPHI(cast<PHINode>(U));
   3971 
   3972   case Instruction::Select:
   3973     // This could be a smax or umax that was lowered earlier.
   3974     // Try to recover it.
   3975     if (ICmpInst *ICI = dyn_cast<ICmpInst>(U->getOperand(0))) {
   3976       Value *LHS = ICI->getOperand(0);
   3977       Value *RHS = ICI->getOperand(1);
   3978       switch (ICI->getPredicate()) {
   3979       case ICmpInst::ICMP_SLT:
   3980       case ICmpInst::ICMP_SLE:
   3981         std::swap(LHS, RHS);
   3982         // fall through
   3983       case ICmpInst::ICMP_SGT:
   3984       case ICmpInst::ICMP_SGE:
   3985         // a >s b ? a+x : b+x  ->  smax(a, b)+x
   3986         // a >s b ? b+x : a+x  ->  smin(a, b)+x
   3987         if (LHS->getType() == U->getType()) {
   3988           const SCEV *LS = getSCEV(LHS);
   3989           const SCEV *RS = getSCEV(RHS);
   3990           const SCEV *LA = getSCEV(U->getOperand(1));
   3991           const SCEV *RA = getSCEV(U->getOperand(2));
   3992           const SCEV *LDiff = getMinusSCEV(LA, LS);
   3993           const SCEV *RDiff = getMinusSCEV(RA, RS);
   3994           if (LDiff == RDiff)
   3995             return getAddExpr(getSMaxExpr(LS, RS), LDiff);
   3996           LDiff = getMinusSCEV(LA, RS);
   3997           RDiff = getMinusSCEV(RA, LS);
   3998           if (LDiff == RDiff)
   3999             return getAddExpr(getSMinExpr(LS, RS), LDiff);
   4000         }
   4001         break;
   4002       case ICmpInst::ICMP_ULT:
   4003       case ICmpInst::ICMP_ULE:
   4004         std::swap(LHS, RHS);
   4005         // fall through
   4006       case ICmpInst::ICMP_UGT:
   4007       case ICmpInst::ICMP_UGE:
   4008         // a >u b ? a+x : b+x  ->  umax(a, b)+x
   4009         // a >u b ? b+x : a+x  ->  umin(a, b)+x
   4010         if (LHS->getType() == U->getType()) {
   4011           const SCEV *LS = getSCEV(LHS);
   4012           const SCEV *RS = getSCEV(RHS);
   4013           const SCEV *LA = getSCEV(U->getOperand(1));
   4014           const SCEV *RA = getSCEV(U->getOperand(2));
   4015           const SCEV *LDiff = getMinusSCEV(LA, LS);
   4016           const SCEV *RDiff = getMinusSCEV(RA, RS);
   4017           if (LDiff == RDiff)
   4018             return getAddExpr(getUMaxExpr(LS, RS), LDiff);
   4019           LDiff = getMinusSCEV(LA, RS);
   4020           RDiff = getMinusSCEV(RA, LS);
   4021           if (LDiff == RDiff)
   4022             return getAddExpr(getUMinExpr(LS, RS), LDiff);
   4023         }
   4024         break;
   4025       case ICmpInst::ICMP_NE:
   4026         // n != 0 ? n+x : 1+x  ->  umax(n, 1)+x
   4027         if (LHS->getType() == U->getType() &&
   4028             isa<ConstantInt>(RHS) &&
   4029             cast<ConstantInt>(RHS)->isZero()) {
   4030           const SCEV *One = getConstant(LHS->getType(), 1);
   4031           const SCEV *LS = getSCEV(LHS);
   4032           const SCEV *LA = getSCEV(U->getOperand(1));
   4033           const SCEV *RA = getSCEV(U->getOperand(2));
   4034           const SCEV *LDiff = getMinusSCEV(LA, LS);
   4035           const SCEV *RDiff = getMinusSCEV(RA, One);
   4036           if (LDiff == RDiff)
   4037             return getAddExpr(getUMaxExpr(One, LS), LDiff);
   4038         }
   4039         break;
   4040       case ICmpInst::ICMP_EQ:
   4041         // n == 0 ? 1+x : n+x  ->  umax(n, 1)+x
   4042         if (LHS->getType() == U->getType() &&
   4043             isa<ConstantInt>(RHS) &&
   4044             cast<ConstantInt>(RHS)->isZero()) {
   4045           const SCEV *One = getConstant(LHS->getType(), 1);
   4046           const SCEV *LS = getSCEV(LHS);
   4047           const SCEV *LA = getSCEV(U->getOperand(1));
   4048           const SCEV *RA = getSCEV(U->getOperand(2));
   4049           const SCEV *LDiff = getMinusSCEV(LA, One);
   4050           const SCEV *RDiff = getMinusSCEV(RA, LS);
   4051           if (LDiff == RDiff)
   4052             return getAddExpr(getUMaxExpr(One, LS), LDiff);
   4053         }
   4054         break;
   4055       default:
   4056         break;
   4057       }
   4058     }
   4059 
   4060   default: // We cannot analyze this expression.
   4061     break;
   4062   }
   4063 
   4064   return getUnknown(V);
   4065 }
   4066 
   4067 
   4068 
   4069 //===----------------------------------------------------------------------===//
   4070 //                   Iteration Count Computation Code
   4071 //
   4072 
   4073 /// getSmallConstantTripCount - Returns the maximum trip count of this loop as a
   4074 /// normal unsigned value. Returns 0 if the trip count is unknown or not
   4075 /// constant. Will also return 0 if the maximum trip count is very large (>=
   4076 /// 2^32).
   4077 ///
   4078 /// This "trip count" assumes that control exits via ExitingBlock. More
   4079 /// precisely, it is the number of times that control may reach ExitingBlock
   4080 /// before taking the branch. For loops with multiple exits, it may not be the
   4081 /// number times that the loop header executes because the loop may exit
   4082 /// prematurely via another branch.
   4083 ///
   4084 /// FIXME: We conservatively call getBackedgeTakenCount(L) instead of
   4085 /// getExitCount(L, ExitingBlock) to compute a safe trip count considering all
   4086 /// loop exits. getExitCount() may return an exact count for this branch
   4087 /// assuming no-signed-wrap. The number of well-defined iterations may actually
   4088 /// be higher than this trip count if this exit test is skipped and the loop
   4089 /// exits via a different branch. Ideally, getExitCount() would know whether it
   4090 /// depends on a NSW assumption, and we would only fall back to a conservative
   4091 /// trip count in that case.
   4092 unsigned ScalarEvolution::
   4093 getSmallConstantTripCount(Loop *L, BasicBlock * /*ExitingBlock*/) {
   4094   const SCEVConstant *ExitCount =
   4095     dyn_cast<SCEVConstant>(getBackedgeTakenCount(L));
   4096   if (!ExitCount)
   4097     return 0;
   4098 
   4099   ConstantInt *ExitConst = ExitCount->getValue();
   4100 
   4101   // Guard against huge trip counts.
   4102   if (ExitConst->getValue().getActiveBits() > 32)
   4103     return 0;
   4104 
   4105   // In case of integer overflow, this returns 0, which is correct.
   4106   return ((unsigned)ExitConst->getZExtValue()) + 1;
   4107 }
   4108 
   4109 /// getSmallConstantTripMultiple - Returns the largest constant divisor of the
   4110 /// trip count of this loop as a normal unsigned value, if possible. This
   4111 /// means that the actual trip count is always a multiple of the returned
   4112 /// value (don't forget the trip count could very well be zero as well!).
   4113 ///
   4114 /// Returns 1 if the trip count is unknown or not guaranteed to be the
   4115 /// multiple of a constant (which is also the case if the trip count is simply
   4116 /// constant, use getSmallConstantTripCount for that case), Will also return 1
   4117 /// if the trip count is very large (>= 2^32).
   4118 ///
   4119 /// As explained in the comments for getSmallConstantTripCount, this assumes
   4120 /// that control exits the loop via ExitingBlock.
   4121 unsigned ScalarEvolution::
   4122 getSmallConstantTripMultiple(Loop *L, BasicBlock * /*ExitingBlock*/) {
   4123   const SCEV *ExitCount = getBackedgeTakenCount(L);
   4124   if (ExitCount == getCouldNotCompute())
   4125     return 1;
   4126 
   4127   // Get the trip count from the BE count by adding 1.
   4128   const SCEV *TCMul = getAddExpr(ExitCount,
   4129                                  getConstant(ExitCount->getType(), 1));
   4130   // FIXME: SCEV distributes multiplication as V1*C1 + V2*C1. We could attempt
   4131   // to factor simple cases.
   4132   if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(TCMul))
   4133     TCMul = Mul->getOperand(0);
   4134 
   4135   const SCEVConstant *MulC = dyn_cast<SCEVConstant>(TCMul);
   4136   if (!MulC)
   4137     return 1;
   4138 
   4139   ConstantInt *Result = MulC->getValue();
   4140 
   4141   // Guard against huge trip counts (this requires checking
   4142   // for zero to handle the case where the trip count == -1 and the
   4143   // addition wraps).
   4144   if (!Result || Result->getValue().getActiveBits() > 32 ||
   4145       Result->getValue().getActiveBits() == 0)
   4146     return 1;
   4147 
   4148   return (unsigned)Result->getZExtValue();
   4149 }
   4150 
   4151 // getExitCount - Get the expression for the number of loop iterations for which
   4152 // this loop is guaranteed not to exit via ExitingBlock. Otherwise return
   4153 // SCEVCouldNotCompute.
   4154 const SCEV *ScalarEvolution::getExitCount(Loop *L, BasicBlock *ExitingBlock) {
   4155   return getBackedgeTakenInfo(L).getExact(ExitingBlock, this);
   4156 }
   4157 
   4158 /// getBackedgeTakenCount - If the specified loop has a predictable
   4159 /// backedge-taken count, return it, otherwise return a SCEVCouldNotCompute
   4160 /// object. The backedge-taken count is the number of times the loop header
   4161 /// will be branched to from within the loop. This is one less than the
   4162 /// trip count of the loop, since it doesn't count the first iteration,
   4163 /// when the header is branched to from outside the loop.
   4164 ///
   4165 /// Note that it is not valid to call this method on a loop without a
   4166 /// loop-invariant backedge-taken count (see
   4167 /// hasLoopInvariantBackedgeTakenCount).
   4168 ///
   4169 const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L) {
   4170   return getBackedgeTakenInfo(L).getExact(this);
   4171 }
   4172 
   4173 /// getMaxBackedgeTakenCount - Similar to getBackedgeTakenCount, except
   4174 /// return the least SCEV value that is known never to be less than the
   4175 /// actual backedge taken count.
   4176 const SCEV *ScalarEvolution::getMaxBackedgeTakenCount(const Loop *L) {
   4177   return getBackedgeTakenInfo(L).getMax(this);
   4178 }
   4179 
   4180 /// PushLoopPHIs - Push PHI nodes in the header of the given loop
   4181 /// onto the given Worklist.
   4182 static void
   4183 PushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) {
   4184   BasicBlock *Header = L->getHeader();
   4185 
   4186   // Push all Loop-header PHIs onto the Worklist stack.
   4187   for (BasicBlock::iterator I = Header->begin();
   4188        PHINode *PN = dyn_cast<PHINode>(I); ++I)
   4189     Worklist.push_back(PN);
   4190 }
   4191 
   4192 const ScalarEvolution::BackedgeTakenInfo &
   4193 ScalarEvolution::getBackedgeTakenInfo(const Loop *L) {
   4194   // Initially insert an invalid entry for this loop. If the insertion
   4195   // succeeds, proceed to actually compute a backedge-taken count and
   4196   // update the value. The temporary CouldNotCompute value tells SCEV
   4197   // code elsewhere that it shouldn't attempt to request a new
   4198   // backedge-taken count, which could result in infinite recursion.
   4199   std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair =
   4200     BackedgeTakenCounts.insert(std::make_pair(L, BackedgeTakenInfo()));
   4201   if (!Pair.second)
   4202     return Pair.first->second;
   4203 
   4204   // ComputeBackedgeTakenCount may allocate memory for its result. Inserting it
   4205   // into the BackedgeTakenCounts map transfers ownership. Otherwise, the result
   4206   // must be cleared in this scope.
   4207   BackedgeTakenInfo Result = ComputeBackedgeTakenCount(L);
   4208 
   4209   if (Result.getExact(this) != getCouldNotCompute()) {
   4210     assert(isLoopInvariant(Result.getExact(this), L) &&
   4211            isLoopInvariant(Result.getMax(this), L) &&
   4212            "Computed backedge-taken count isn't loop invariant for loop!");
   4213     ++NumTripCountsComputed;
   4214   }
   4215   else if (Result.getMax(this) == getCouldNotCompute() &&
   4216            isa<PHINode>(L->getHeader()->begin())) {
   4217     // Only count loops that have phi nodes as not being computable.
   4218     ++NumTripCountsNotComputed;
   4219   }
   4220 
   4221   // Now that we know more about the trip count for this loop, forget any
   4222   // existing SCEV values for PHI nodes in this loop since they are only
   4223   // conservative estimates made without the benefit of trip count
   4224   // information. This is similar to the code in forgetLoop, except that
   4225   // it handles SCEVUnknown PHI nodes specially.
   4226   if (Result.hasAnyInfo()) {
   4227     SmallVector<Instruction *, 16> Worklist;
   4228     PushLoopPHIs(L, Worklist);
   4229 
   4230     SmallPtrSet<Instruction *, 8> Visited;
   4231     while (!Worklist.empty()) {
   4232       Instruction *I = Worklist.pop_back_val();
   4233       if (!Visited.insert(I)) continue;
   4234 
   4235       ValueExprMapType::iterator It =
   4236         ValueExprMap.find_as(static_cast<Value *>(I));
   4237       if (It != ValueExprMap.end()) {
   4238         const SCEV *Old = It->second;
   4239 
   4240         // SCEVUnknown for a PHI either means that it has an unrecognized
   4241         // structure, or it's a PHI that's in the progress of being computed
   4242         // by createNodeForPHI.  In the former case, additional loop trip
   4243         // count information isn't going to change anything. In the later
   4244         // case, createNodeForPHI will perform the necessary updates on its
   4245         // own when it gets to that point.
   4246         if (!isa<PHINode>(I) || !isa<SCEVUnknown>(Old)) {
   4247           forgetMemoizedResults(Old);
   4248           ValueExprMap.erase(It);
   4249         }
   4250         if (PHINode *PN = dyn_cast<PHINode>(I))
   4251           ConstantEvolutionLoopExitValue.erase(PN);
   4252       }
   4253 
   4254       PushDefUseChildren(I, Worklist);
   4255     }
   4256   }
   4257 
   4258   // Re-lookup the insert position, since the call to
   4259   // ComputeBackedgeTakenCount above could result in a
   4260   // recusive call to getBackedgeTakenInfo (on a different
   4261   // loop), which would invalidate the iterator computed
   4262   // earlier.
   4263   return BackedgeTakenCounts.find(L)->second = Result;
   4264 }
   4265 
   4266 /// forgetLoop - This method should be called by the client when it has
   4267 /// changed a loop in a way that may effect ScalarEvolution's ability to
   4268 /// compute a trip count, or if the loop is deleted.
   4269 void ScalarEvolution::forgetLoop(const Loop *L) {
   4270   // Drop any stored trip count value.
   4271   DenseMap<const Loop*, BackedgeTakenInfo>::iterator BTCPos =
   4272     BackedgeTakenCounts.find(L);
   4273   if (BTCPos != BackedgeTakenCounts.end()) {
   4274     BTCPos->second.clear();
   4275     BackedgeTakenCounts.erase(BTCPos);
   4276   }
   4277 
   4278   // Drop information about expressions based on loop-header PHIs.
   4279   SmallVector<Instruction *, 16> Worklist;
   4280   PushLoopPHIs(L, Worklist);
   4281 
   4282   SmallPtrSet<Instruction *, 8> Visited;
   4283   while (!Worklist.empty()) {
   4284     Instruction *I = Worklist.pop_back_val();
   4285     if (!Visited.insert(I)) continue;
   4286 
   4287     ValueExprMapType::iterator It =
   4288       ValueExprMap.find_as(static_cast<Value *>(I));
   4289     if (It != ValueExprMap.end()) {
   4290       forgetMemoizedResults(It->second);
   4291       ValueExprMap.erase(It);
   4292       if (PHINode *PN = dyn_cast<PHINode>(I))
   4293         ConstantEvolutionLoopExitValue.erase(PN);
   4294     }
   4295 
   4296     PushDefUseChildren(I, Worklist);
   4297   }
   4298 
   4299   // Forget all contained loops too, to avoid dangling entries in the
   4300   // ValuesAtScopes map.
   4301   for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I)
   4302     forgetLoop(*I);
   4303 }
   4304 
   4305 /// forgetValue - This method should be called by the client when it has
   4306 /// changed a value in a way that may effect its value, or which may
   4307 /// disconnect it from a def-use chain linking it to a loop.
   4308 void ScalarEvolution::forgetValue(Value *V) {
   4309   Instruction *I = dyn_cast<Instruction>(V);
   4310   if (!I) return;
   4311 
   4312   // Drop information about expressions based on loop-header PHIs.
   4313   SmallVector<Instruction *, 16> Worklist;
   4314   Worklist.push_back(I);
   4315 
   4316   SmallPtrSet<Instruction *, 8> Visited;
   4317   while (!Worklist.empty()) {
   4318     I = Worklist.pop_back_val();
   4319     if (!Visited.insert(I)) continue;
   4320 
   4321     ValueExprMapType::iterator It =
   4322       ValueExprMap.find_as(static_cast<Value *>(I));
   4323     if (It != ValueExprMap.end()) {
   4324       forgetMemoizedResults(It->second);
   4325       ValueExprMap.erase(It);
   4326       if (PHINode *PN = dyn_cast<PHINode>(I))
   4327         ConstantEvolutionLoopExitValue.erase(PN);
   4328     }
   4329 
   4330     PushDefUseChildren(I, Worklist);
   4331   }
   4332 }
   4333 
   4334 /// getExact - Get the exact loop backedge taken count considering all loop
   4335 /// exits. A computable result can only be return for loops with a single exit.
   4336 /// Returning the minimum taken count among all exits is incorrect because one
   4337 /// of the loop's exit limit's may have been skipped. HowFarToZero assumes that
   4338 /// the limit of each loop test is never skipped. This is a valid assumption as
   4339 /// long as the loop exits via that test. For precise results, it is the
   4340 /// caller's responsibility to specify the relevant loop exit using
   4341 /// getExact(ExitingBlock, SE).
   4342 const SCEV *
   4343 ScalarEvolution::BackedgeTakenInfo::getExact(ScalarEvolution *SE) const {
   4344   // If any exits were not computable, the loop is not computable.
   4345   if (!ExitNotTaken.isCompleteList()) return SE->getCouldNotCompute();
   4346 
   4347   // We need exactly one computable exit.
   4348   if (!ExitNotTaken.ExitingBlock) return SE->getCouldNotCompute();
   4349   assert(ExitNotTaken.ExactNotTaken && "uninitialized not-taken info");
   4350 
   4351   const SCEV *BECount = nullptr;
   4352   for (const ExitNotTakenInfo *ENT = &ExitNotTaken;
   4353        ENT != nullptr; ENT = ENT->getNextExit()) {
   4354 
   4355     assert(ENT->ExactNotTaken != SE->getCouldNotCompute() && "bad exit SCEV");
   4356 
   4357     if (!BECount)
   4358       BECount = ENT->ExactNotTaken;
   4359     else if (BECount != ENT->ExactNotTaken)
   4360       return SE->getCouldNotCompute();
   4361   }
   4362   assert(BECount && "Invalid not taken count for loop exit");
   4363   return BECount;
   4364 }
   4365 
   4366 /// getExact - Get the exact not taken count for this loop exit.
   4367 const SCEV *
   4368 ScalarEvolution::BackedgeTakenInfo::getExact(BasicBlock *ExitingBlock,
   4369                                              ScalarEvolution *SE) const {
   4370   for (const ExitNotTakenInfo *ENT = &ExitNotTaken;
   4371        ENT != nullptr; ENT = ENT->getNextExit()) {
   4372 
   4373     if (ENT->ExitingBlock == ExitingBlock)
   4374       return ENT->ExactNotTaken;
   4375   }
   4376   return SE->getCouldNotCompute();
   4377 }
   4378 
   4379 /// getMax - Get the max backedge taken count for the loop.
   4380 const SCEV *
   4381 ScalarEvolution::BackedgeTakenInfo::getMax(ScalarEvolution *SE) const {
   4382   return Max ? Max : SE->getCouldNotCompute();
   4383 }
   4384 
   4385 bool ScalarEvolution::BackedgeTakenInfo::hasOperand(const SCEV *S,
   4386                                                     ScalarEvolution *SE) const {
   4387   if (Max && Max != SE->getCouldNotCompute() && SE->hasOperand(Max, S))
   4388     return true;
   4389 
   4390   if (!ExitNotTaken.ExitingBlock)
   4391     return false;
   4392 
   4393   for (const ExitNotTakenInfo *ENT = &ExitNotTaken;
   4394        ENT != nullptr; ENT = ENT->getNextExit()) {
   4395 
   4396     if (ENT->ExactNotTaken != SE->getCouldNotCompute()
   4397         && SE->hasOperand(ENT->ExactNotTaken, S)) {
   4398       return true;
   4399     }
   4400   }
   4401   return false;
   4402 }
   4403 
   4404 /// Allocate memory for BackedgeTakenInfo and copy the not-taken count of each
   4405 /// computable exit into a persistent ExitNotTakenInfo array.
   4406 ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo(
   4407   SmallVectorImpl< std::pair<BasicBlock *, const SCEV *> > &ExitCounts,
   4408   bool Complete, const SCEV *MaxCount) : Max(MaxCount) {
   4409 
   4410   if (!Complete)
   4411     ExitNotTaken.setIncomplete();
   4412 
   4413   unsigned NumExits = ExitCounts.size();
   4414   if (NumExits == 0) return;
   4415 
   4416   ExitNotTaken.ExitingBlock = ExitCounts[0].first;
   4417   ExitNotTaken.ExactNotTaken = ExitCounts[0].second;
   4418   if (NumExits == 1) return;
   4419 
   4420   // Handle the rare case of multiple computable exits.
   4421   ExitNotTakenInfo *ENT = new ExitNotTakenInfo[NumExits-1];
   4422 
   4423   ExitNotTakenInfo *PrevENT = &ExitNotTaken;
   4424   for (unsigned i = 1; i < NumExits; ++i, PrevENT = ENT, ++ENT) {
   4425     PrevENT->setNextExit(ENT);
   4426     ENT->ExitingBlock = ExitCounts[i].first;
   4427     ENT->ExactNotTaken = ExitCounts[i].second;
   4428   }
   4429 }
   4430 
   4431 /// clear - Invalidate this result and free the ExitNotTakenInfo array.
   4432 void ScalarEvolution::BackedgeTakenInfo::clear() {
   4433   ExitNotTaken.ExitingBlock = nullptr;
   4434   ExitNotTaken.ExactNotTaken = nullptr;
   4435   delete[] ExitNotTaken.getNextExit();
   4436 }
   4437 
   4438 /// ComputeBackedgeTakenCount - Compute the number of times the backedge
   4439 /// of the specified loop will execute.
   4440 ScalarEvolution::BackedgeTakenInfo
   4441 ScalarEvolution::ComputeBackedgeTakenCount(const Loop *L) {
   4442   SmallVector<BasicBlock *, 8> ExitingBlocks;
   4443   L->getExitingBlocks(ExitingBlocks);
   4444 
   4445   SmallVector<std::pair<BasicBlock *, const SCEV *>, 4> ExitCounts;
   4446   bool CouldComputeBECount = true;
   4447   BasicBlock *Latch = L->getLoopLatch(); // may be NULL.
   4448   const SCEV *MustExitMaxBECount = nullptr;
   4449   const SCEV *MayExitMaxBECount = nullptr;
   4450 
   4451   // Compute the ExitLimit for each loop exit. Use this to populate ExitCounts
   4452   // and compute maxBECount.
   4453   for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) {
   4454     BasicBlock *ExitBB = ExitingBlocks[i];
   4455     ExitLimit EL = ComputeExitLimit(L, ExitBB);
   4456 
   4457     // 1. For each exit that can be computed, add an entry to ExitCounts.
   4458     // CouldComputeBECount is true only if all exits can be computed.
   4459     if (EL.Exact == getCouldNotCompute())
   4460       // We couldn't compute an exact value for this exit, so
   4461       // we won't be able to compute an exact value for the loop.
   4462       CouldComputeBECount = false;
   4463     else
   4464       ExitCounts.push_back(std::make_pair(ExitBB, EL.Exact));
   4465 
   4466     // 2. Derive the loop's MaxBECount from each exit's max number of
   4467     // non-exiting iterations. Partition the loop exits into two kinds:
   4468     // LoopMustExits and LoopMayExits.
   4469     //
   4470     // A LoopMustExit meets two requirements:
   4471     //
   4472     // (a) Its ExitLimit.MustExit flag must be set which indicates that the exit
   4473     // test condition cannot be skipped (the tested variable has unit stride or
   4474     // the test is less-than or greater-than, rather than a strict inequality).
   4475     //
   4476     // (b) It must dominate the loop latch, hence must be tested on every loop
   4477     // iteration.
   4478     //
   4479     // If any computable LoopMustExit is found, then MaxBECount is the minimum
   4480     // EL.Max of computable LoopMustExits. Otherwise, MaxBECount is
   4481     // conservatively the maximum EL.Max, where CouldNotCompute is considered
   4482     // greater than any computable EL.Max.
   4483     if (EL.MustExit && EL.Max != getCouldNotCompute() && Latch &&
   4484         DT->dominates(ExitBB, Latch)) {
   4485       if (!MustExitMaxBECount)
   4486         MustExitMaxBECount = EL.Max;
   4487       else {
   4488         MustExitMaxBECount =
   4489           getUMinFromMismatchedTypes(MustExitMaxBECount, EL.Max);
   4490       }
   4491     } else if (MayExitMaxBECount != getCouldNotCompute()) {
   4492       if (!MayExitMaxBECount || EL.Max == getCouldNotCompute())
   4493         MayExitMaxBECount = EL.Max;
   4494       else {
   4495         MayExitMaxBECount =
   4496           getUMaxFromMismatchedTypes(MayExitMaxBECount, EL.Max);
   4497       }
   4498     }
   4499   }
   4500   const SCEV *MaxBECount = MustExitMaxBECount ? MustExitMaxBECount :
   4501     (MayExitMaxBECount ? MayExitMaxBECount : getCouldNotCompute());
   4502   return BackedgeTakenInfo(ExitCounts, CouldComputeBECount, MaxBECount);
   4503 }
   4504 
   4505 /// ComputeExitLimit - Compute the number of times the backedge of the specified
   4506 /// loop will execute if it exits via the specified block.
   4507 ScalarEvolution::ExitLimit
   4508 ScalarEvolution::ComputeExitLimit(const Loop *L, BasicBlock *ExitingBlock) {
   4509 
   4510   // Okay, we've chosen an exiting block.  See what condition causes us to
   4511   // exit at this block and remember the exit block and whether all other targets
   4512   // lead to the loop header.
   4513   bool MustExecuteLoopHeader = true;
   4514   BasicBlock *Exit = nullptr;
   4515   for (succ_iterator SI = succ_begin(ExitingBlock), SE = succ_end(ExitingBlock);
   4516        SI != SE; ++SI)
   4517     if (!L->contains(*SI)) {
   4518       if (Exit) // Multiple exit successors.
   4519         return getCouldNotCompute();
   4520       Exit = *SI;
   4521     } else if (*SI != L->getHeader()) {
   4522       MustExecuteLoopHeader = false;
   4523     }
   4524 
   4525   // At this point, we know we have a conditional branch that determines whether
   4526   // the loop is exited.  However, we don't know if the branch is executed each
   4527   // time through the loop.  If not, then the execution count of the branch will
   4528   // not be equal to the trip count of the loop.
   4529   //
   4530   // Currently we check for this by checking to see if the Exit branch goes to
   4531   // the loop header.  If so, we know it will always execute the same number of
   4532   // times as the loop.  We also handle the case where the exit block *is* the
   4533   // loop header.  This is common for un-rotated loops.
   4534   //
   4535   // If both of those tests fail, walk up the unique predecessor chain to the
   4536   // header, stopping if there is an edge that doesn't exit the loop. If the
   4537   // header is reached, the execution count of the branch will be equal to the
   4538   // trip count of the loop.
   4539   //
   4540   //  More extensive analysis could be done to handle more cases here.
   4541   //
   4542   if (!MustExecuteLoopHeader && ExitingBlock != L->getHeader()) {
   4543     // The simple checks failed, try climbing the unique predecessor chain
   4544     // up to the header.
   4545     bool Ok = false;
   4546     for (BasicBlock *BB = ExitingBlock; BB; ) {
   4547       BasicBlock *Pred = BB->getUniquePredecessor();
   4548       if (!Pred)
   4549         return getCouldNotCompute();
   4550       TerminatorInst *PredTerm = Pred->getTerminator();
   4551       for (unsigned i = 0, e = PredTerm->getNumSuccessors(); i != e; ++i) {
   4552         BasicBlock *PredSucc = PredTerm->getSuccessor(i);
   4553         if (PredSucc == BB)
   4554           continue;
   4555         // If the predecessor has a successor that isn't BB and isn't
   4556         // outside the loop, assume the worst.
   4557         if (L->contains(PredSucc))
   4558           return getCouldNotCompute();
   4559       }
   4560       if (Pred == L->getHeader()) {
   4561         Ok = true;
   4562         break;
   4563       }
   4564       BB = Pred;
   4565     }
   4566     if (!Ok)
   4567       return getCouldNotCompute();
   4568   }
   4569 
   4570   TerminatorInst *Term = ExitingBlock->getTerminator();
   4571   if (BranchInst *BI = dyn_cast<BranchInst>(Term)) {
   4572     assert(BI->isConditional() && "If unconditional, it can't be in loop!");
   4573     // Proceed to the next level to examine the exit condition expression.
   4574     return ComputeExitLimitFromCond(L, BI->getCondition(), BI->getSuccessor(0),
   4575                                     BI->getSuccessor(1),
   4576                                     /*IsSubExpr=*/false);
   4577   }
   4578 
   4579   if (SwitchInst *SI = dyn_cast<SwitchInst>(Term))
   4580     return ComputeExitLimitFromSingleExitSwitch(L, SI, Exit,
   4581                                                 /*IsSubExpr=*/false);
   4582 
   4583   return getCouldNotCompute();
   4584 }
   4585 
   4586 /// ComputeExitLimitFromCond - Compute the number of times the
   4587 /// backedge of the specified loop will execute if its exit condition
   4588 /// were a conditional branch of ExitCond, TBB, and FBB.
   4589 ///
   4590 /// @param IsSubExpr is true if ExitCond does not directly control the exit
   4591 /// branch. In this case, we cannot assume that the loop only exits when the
   4592 /// condition is true and cannot infer that failing to meet the condition prior
   4593 /// to integer wraparound results in undefined behavior.
   4594 ScalarEvolution::ExitLimit
   4595 ScalarEvolution::ComputeExitLimitFromCond(const Loop *L,
   4596                                           Value *ExitCond,
   4597                                           BasicBlock *TBB,
   4598                                           BasicBlock *FBB,
   4599                                           bool IsSubExpr) {
   4600   // Check if the controlling expression for this loop is an And or Or.
   4601   if (BinaryOperator *BO = dyn_cast<BinaryOperator>(ExitCond)) {
   4602     if (BO->getOpcode() == Instruction::And) {
   4603       // Recurse on the operands of the and.
   4604       bool EitherMayExit = L->contains(TBB);
   4605       ExitLimit EL0 = ComputeExitLimitFromCond(L, BO->getOperand(0), TBB, FBB,
   4606                                                IsSubExpr || EitherMayExit);
   4607       ExitLimit EL1 = ComputeExitLimitFromCond(L, BO->getOperand(1), TBB, FBB,
   4608                                                IsSubExpr || EitherMayExit);
   4609       const SCEV *BECount = getCouldNotCompute();
   4610       const SCEV *MaxBECount = getCouldNotCompute();
   4611       bool MustExit = false;
   4612       if (EitherMayExit) {
   4613         // Both conditions must be true for the loop to continue executing.
   4614         // Choose the less conservative count.
   4615         if (EL0.Exact == getCouldNotCompute() ||
   4616             EL1.Exact == getCouldNotCompute())
   4617           BECount = getCouldNotCompute();
   4618         else
   4619           BECount = getUMinFromMismatchedTypes(EL0.Exact, EL1.Exact);
   4620         if (EL0.Max == getCouldNotCompute())
   4621           MaxBECount = EL1.Max;
   4622         else if (EL1.Max == getCouldNotCompute())
   4623           MaxBECount = EL0.Max;
   4624         else
   4625           MaxBECount = getUMinFromMismatchedTypes(EL0.Max, EL1.Max);
   4626         MustExit = EL0.MustExit || EL1.MustExit;
   4627       } else {
   4628         // Both conditions must be true at the same time for the loop to exit.
   4629         // For now, be conservative.
   4630         assert(L->contains(FBB) && "Loop block has no successor in loop!");
   4631         if (EL0.Max == EL1.Max)
   4632           MaxBECount = EL0.Max;
   4633         if (EL0.Exact == EL1.Exact)
   4634           BECount = EL0.Exact;
   4635         MustExit = EL0.MustExit && EL1.MustExit;
   4636       }
   4637 
   4638       return ExitLimit(BECount, MaxBECount, MustExit);
   4639     }
   4640     if (BO->getOpcode() == Instruction::Or) {
   4641       // Recurse on the operands of the or.
   4642       bool EitherMayExit = L->contains(FBB);
   4643       ExitLimit EL0 = ComputeExitLimitFromCond(L, BO->getOperand(0), TBB, FBB,
   4644                                                IsSubExpr || EitherMayExit);
   4645       ExitLimit EL1 = ComputeExitLimitFromCond(L, BO->getOperand(1), TBB, FBB,
   4646                                                IsSubExpr || EitherMayExit);
   4647       const SCEV *BECount = getCouldNotCompute();
   4648       const SCEV *MaxBECount = getCouldNotCompute();
   4649       bool MustExit = false;
   4650       if (EitherMayExit) {
   4651         // Both conditions must be false for the loop to continue executing.
   4652         // Choose the less conservative count.
   4653         if (EL0.Exact == getCouldNotCompute() ||
   4654             EL1.Exact == getCouldNotCompute())
   4655           BECount = getCouldNotCompute();
   4656         else
   4657           BECount = getUMinFromMismatchedTypes(EL0.Exact, EL1.Exact);
   4658         if (EL0.Max == getCouldNotCompute())
   4659           MaxBECount = EL1.Max;
   4660         else if (EL1.Max == getCouldNotCompute())
   4661           MaxBECount = EL0.Max;
   4662         else
   4663           MaxBECount = getUMinFromMismatchedTypes(EL0.Max, EL1.Max);
   4664         MustExit = EL0.MustExit || EL1.MustExit;
   4665       } else {
   4666         // Both conditions must be false at the same time for the loop to exit.
   4667         // For now, be conservative.
   4668         assert(L->contains(TBB) && "Loop block has no successor in loop!");
   4669         if (EL0.Max == EL1.Max)
   4670           MaxBECount = EL0.Max;
   4671         if (EL0.Exact == EL1.Exact)
   4672           BECount = EL0.Exact;
   4673         MustExit = EL0.MustExit && EL1.MustExit;
   4674       }
   4675 
   4676       return ExitLimit(BECount, MaxBECount, MustExit);
   4677     }
   4678   }
   4679 
   4680   // With an icmp, it may be feasible to compute an exact backedge-taken count.
   4681   // Proceed to the next level to examine the icmp.
   4682   if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond))
   4683     return ComputeExitLimitFromICmp(L, ExitCondICmp, TBB, FBB, IsSubExpr);
   4684 
   4685   // Check for a constant condition. These are normally stripped out by
   4686   // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to
   4687   // preserve the CFG and is temporarily leaving constant conditions
   4688   // in place.
   4689   if (ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) {
   4690     if (L->contains(FBB) == !CI->getZExtValue())
   4691       // The backedge is always taken.
   4692       return getCouldNotCompute();
   4693     else
   4694       // The backedge is never taken.
   4695       return getConstant(CI->getType(), 0);
   4696   }
   4697 
   4698   // If it's not an integer or pointer comparison then compute it the hard way.
   4699   return ComputeExitCountExhaustively(L, ExitCond, !L->contains(TBB));
   4700 }
   4701 
   4702 /// ComputeExitLimitFromICmp - Compute the number of times the
   4703 /// backedge of the specified loop will execute if its exit condition
   4704 /// were a conditional branch of the ICmpInst ExitCond, TBB, and FBB.
   4705 ScalarEvolution::ExitLimit
   4706 ScalarEvolution::ComputeExitLimitFromICmp(const Loop *L,
   4707                                           ICmpInst *ExitCond,
   4708                                           BasicBlock *TBB,
   4709                                           BasicBlock *FBB,
   4710                                           bool IsSubExpr) {
   4711 
   4712   // If the condition was exit on true, convert the condition to exit on false
   4713   ICmpInst::Predicate Cond;
   4714   if (!L->contains(FBB))
   4715     Cond = ExitCond->getPredicate();
   4716   else
   4717     Cond = ExitCond->getInversePredicate();
   4718 
   4719   // Handle common loops like: for (X = "string"; *X; ++X)
   4720   if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0)))
   4721     if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) {
   4722       ExitLimit ItCnt =
   4723         ComputeLoadConstantCompareExitLimit(LI, RHS, L, Cond);
   4724       if (ItCnt.hasAnyInfo())
   4725         return ItCnt;
   4726     }
   4727 
   4728   const SCEV *LHS = getSCEV(ExitCond->getOperand(0));
   4729   const SCEV *RHS = getSCEV(ExitCond->getOperand(1));
   4730 
   4731   // Try to evaluate any dependencies out of the loop.
   4732   LHS = getSCEVAtScope(LHS, L);
   4733   RHS = getSCEVAtScope(RHS, L);
   4734 
   4735   // At this point, we would like to compute how many iterations of the
   4736   // loop the predicate will return true for these inputs.
   4737   if (isLoopInvariant(LHS, L) && !isLoopInvariant(RHS, L)) {
   4738     // If there is a loop-invariant, force it into the RHS.
   4739     std::swap(LHS, RHS);
   4740     Cond = ICmpInst::getSwappedPredicate(Cond);
   4741   }
   4742 
   4743   // Simplify the operands before analyzing them.
   4744   (void)SimplifyICmpOperands(Cond, LHS, RHS);
   4745 
   4746   // If we have a comparison of a chrec against a constant, try to use value
   4747   // ranges to answer this query.
   4748   if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS))
   4749     if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS))
   4750       if (AddRec->getLoop() == L) {
   4751         // Form the constant range.
   4752         ConstantRange CompRange(
   4753             ICmpInst::makeConstantRange(Cond, RHSC->getValue()->getValue()));
   4754 
   4755         const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this);
   4756         if (!isa<SCEVCouldNotCompute>(Ret)) return Ret;
   4757       }
   4758 
   4759   switch (Cond) {
   4760   case ICmpInst::ICMP_NE: {                     // while (X != Y)
   4761     // Convert to: while (X-Y != 0)
   4762     ExitLimit EL = HowFarToZero(getMinusSCEV(LHS, RHS), L, IsSubExpr);
   4763     if (EL.hasAnyInfo()) return EL;
   4764     break;
   4765   }
   4766   case ICmpInst::ICMP_EQ: {                     // while (X == Y)
   4767     // Convert to: while (X-Y == 0)
   4768     ExitLimit EL = HowFarToNonZero(getMinusSCEV(LHS, RHS), L);
   4769     if (EL.hasAnyInfo()) return EL;
   4770     break;
   4771   }
   4772   case ICmpInst::ICMP_SLT:
   4773   case ICmpInst::ICMP_ULT: {                    // while (X < Y)
   4774     bool IsSigned = Cond == ICmpInst::ICMP_SLT;
   4775     ExitLimit EL = HowManyLessThans(LHS, RHS, L, IsSigned, IsSubExpr);
   4776     if (EL.hasAnyInfo()) return EL;
   4777     break;
   4778   }
   4779   case ICmpInst::ICMP_SGT:
   4780   case ICmpInst::ICMP_UGT: {                    // while (X > Y)
   4781     bool IsSigned = Cond == ICmpInst::ICMP_SGT;
   4782     ExitLimit EL = HowManyGreaterThans(LHS, RHS, L, IsSigned, IsSubExpr);
   4783     if (EL.hasAnyInfo()) return EL;
   4784     break;
   4785   }
   4786   default:
   4787 #if 0
   4788     dbgs() << "ComputeBackedgeTakenCount ";
   4789     if (ExitCond->getOperand(0)->getType()->isUnsigned())
   4790       dbgs() << "[unsigned] ";
   4791     dbgs() << *LHS << "   "
   4792          << Instruction::getOpcodeName(Instruction::ICmp)
   4793          << "   " << *RHS << "\n";
   4794 #endif
   4795     break;
   4796   }
   4797   return ComputeExitCountExhaustively(L, ExitCond, !L->contains(TBB));
   4798 }
   4799 
   4800 ScalarEvolution::ExitLimit
   4801 ScalarEvolution::ComputeExitLimitFromSingleExitSwitch(const Loop *L,
   4802                                                       SwitchInst *Switch,
   4803                                                       BasicBlock *ExitingBlock,
   4804                                                       bool IsSubExpr) {
   4805   assert(!L->contains(ExitingBlock) && "Not an exiting block!");
   4806 
   4807   // Give up if the exit is the default dest of a switch.
   4808   if (Switch->getDefaultDest() == ExitingBlock)
   4809     return getCouldNotCompute();
   4810 
   4811   assert(L->contains(Switch->getDefaultDest()) &&
   4812          "Default case must not exit the loop!");
   4813   const SCEV *LHS = getSCEVAtScope(Switch->getCondition(), L);
   4814   const SCEV *RHS = getConstant(Switch->findCaseDest(ExitingBlock));
   4815 
   4816   // while (X != Y) --> while (X-Y != 0)
   4817   ExitLimit EL = HowFarToZero(getMinusSCEV(LHS, RHS), L, IsSubExpr);
   4818   if (EL.hasAnyInfo())
   4819     return EL;
   4820 
   4821   return getCouldNotCompute();
   4822 }
   4823 
   4824 static ConstantInt *
   4825 EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C,
   4826                                 ScalarEvolution &SE) {
   4827   const SCEV *InVal = SE.getConstant(C);
   4828   const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE);
   4829   assert(isa<SCEVConstant>(Val) &&
   4830          "Evaluation of SCEV at constant didn't fold correctly?");
   4831   return cast<SCEVConstant>(Val)->getValue();
   4832 }
   4833 
   4834 /// ComputeLoadConstantCompareExitLimit - Given an exit condition of
   4835 /// 'icmp op load X, cst', try to see if we can compute the backedge
   4836 /// execution count.
   4837 ScalarEvolution::ExitLimit
   4838 ScalarEvolution::ComputeLoadConstantCompareExitLimit(
   4839   LoadInst *LI,
   4840   Constant *RHS,
   4841   const Loop *L,
   4842   ICmpInst::Predicate predicate) {
   4843 
   4844   if (LI->isVolatile()) return getCouldNotCompute();
   4845 
   4846   // Check to see if the loaded pointer is a getelementptr of a global.
   4847   // TODO: Use SCEV instead of manually grubbing with GEPs.
   4848   GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0));
   4849   if (!GEP) return getCouldNotCompute();
   4850 
   4851   // Make sure that it is really a constant global we are gepping, with an
   4852   // initializer, and make sure the first IDX is really 0.
   4853   GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0));
   4854   if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() ||
   4855       GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) ||
   4856       !cast<Constant>(GEP->getOperand(1))->isNullValue())
   4857     return getCouldNotCompute();
   4858 
   4859   // Okay, we allow one non-constant index into the GEP instruction.
   4860   Value *VarIdx = nullptr;
   4861   std::vector<Constant*> Indexes;
   4862   unsigned VarIdxNum = 0;
   4863   for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i)
   4864     if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) {
   4865       Indexes.push_back(CI);
   4866     } else if (!isa<ConstantInt>(GEP->getOperand(i))) {
   4867       if (VarIdx) return getCouldNotCompute();  // Multiple non-constant idx's.
   4868       VarIdx = GEP->getOperand(i);
   4869       VarIdxNum = i-2;
   4870       Indexes.push_back(nullptr);
   4871     }
   4872 
   4873   // Loop-invariant loads may be a byproduct of loop optimization. Skip them.
   4874   if (!VarIdx)
   4875     return getCouldNotCompute();
   4876 
   4877   // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant.
   4878   // Check to see if X is a loop variant variable value now.
   4879   const SCEV *Idx = getSCEV(VarIdx);
   4880   Idx = getSCEVAtScope(Idx, L);
   4881 
   4882   // We can only recognize very limited forms of loop index expressions, in
   4883   // particular, only affine AddRec's like {C1,+,C2}.
   4884   const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx);
   4885   if (!IdxExpr || !IdxExpr->isAffine() || isLoopInvariant(IdxExpr, L) ||
   4886       !isa<SCEVConstant>(IdxExpr->getOperand(0)) ||
   4887       !isa<SCEVConstant>(IdxExpr->getOperand(1)))
   4888     return getCouldNotCompute();
   4889 
   4890   unsigned MaxSteps = MaxBruteForceIterations;
   4891   for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) {
   4892     ConstantInt *ItCst = ConstantInt::get(
   4893                            cast<IntegerType>(IdxExpr->getType()), IterationNum);
   4894     ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this);
   4895 
   4896     // Form the GEP offset.
   4897     Indexes[VarIdxNum] = Val;
   4898 
   4899     Constant *Result = ConstantFoldLoadThroughGEPIndices(GV->getInitializer(),
   4900                                                          Indexes);
   4901     if (!Result) break;  // Cannot compute!
   4902 
   4903     // Evaluate the condition for this iteration.
   4904     Result = ConstantExpr::getICmp(predicate, Result, RHS);
   4905     if (!isa<ConstantInt>(Result)) break;  // Couldn't decide for sure
   4906     if (cast<ConstantInt>(Result)->getValue().isMinValue()) {
   4907 #if 0
   4908       dbgs() << "\n***\n*** Computed loop count " << *ItCst
   4909              << "\n*** From global " << *GV << "*** BB: " << *L->getHeader()
   4910              << "***\n";
   4911 #endif
   4912       ++NumArrayLenItCounts;
   4913       return getConstant(ItCst);   // Found terminating iteration!
   4914     }
   4915   }
   4916   return getCouldNotCompute();
   4917 }
   4918 
   4919 
   4920 /// CanConstantFold - Return true if we can constant fold an instruction of the
   4921 /// specified type, assuming that all operands were constants.
   4922 static bool CanConstantFold(const Instruction *I) {
   4923   if (isa<BinaryOperator>(I) || isa<CmpInst>(I) ||
   4924       isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) ||
   4925       isa<LoadInst>(I))
   4926     return true;
   4927 
   4928   if (const CallInst *CI = dyn_cast<CallInst>(I))
   4929     if (const Function *F = CI->getCalledFunction())
   4930       return canConstantFoldCallTo(F);
   4931   return false;
   4932 }
   4933 
   4934 /// Determine whether this instruction can constant evolve within this loop
   4935 /// assuming its operands can all constant evolve.
   4936 static bool canConstantEvolve(Instruction *I, const Loop *L) {
   4937   // An instruction outside of the loop can't be derived from a loop PHI.
   4938   if (!L->contains(I)) return false;
   4939 
   4940   if (isa<PHINode>(I)) {
   4941     if (L->getHeader() == I->getParent())
   4942       return true;
   4943     else
   4944       // We don't currently keep track of the control flow needed to evaluate
   4945       // PHIs, so we cannot handle PHIs inside of loops.
   4946       return false;
   4947   }
   4948 
   4949   // If we won't be able to constant fold this expression even if the operands
   4950   // are constants, bail early.
   4951   return CanConstantFold(I);
   4952 }
   4953 
   4954 /// getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by
   4955 /// recursing through each instruction operand until reaching a loop header phi.
   4956 static PHINode *
   4957 getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L,
   4958                                DenseMap<Instruction *, PHINode *> &PHIMap) {
   4959 
   4960   // Otherwise, we can evaluate this instruction if all of its operands are
   4961   // constant or derived from a PHI node themselves.
   4962   PHINode *PHI = nullptr;
   4963   for (Instruction::op_iterator OpI = UseInst->op_begin(),
   4964          OpE = UseInst->op_end(); OpI != OpE; ++OpI) {
   4965 
   4966     if (isa<Constant>(*OpI)) continue;
   4967 
   4968     Instruction *OpInst = dyn_cast<Instruction>(*OpI);
   4969     if (!OpInst || !canConstantEvolve(OpInst, L)) return nullptr;
   4970 
   4971     PHINode *P = dyn_cast<PHINode>(OpInst);
   4972     if (!P)
   4973       // If this operand is already visited, reuse the prior result.
   4974       // We may have P != PHI if this is the deepest point at which the
   4975       // inconsistent paths meet.
   4976       P = PHIMap.lookup(OpInst);
   4977     if (!P) {
   4978       // Recurse and memoize the results, whether a phi is found or not.
   4979       // This recursive call invalidates pointers into PHIMap.
   4980       P = getConstantEvolvingPHIOperands(OpInst, L, PHIMap);
   4981       PHIMap[OpInst] = P;
   4982     }
   4983     if (!P)
   4984       return nullptr;  // Not evolving from PHI
   4985     if (PHI && PHI != P)
   4986       return nullptr;  // Evolving from multiple different PHIs.
   4987     PHI = P;
   4988   }
   4989   // This is a expression evolving from a constant PHI!
   4990   return PHI;
   4991 }
   4992 
   4993 /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node
   4994 /// in the loop that V is derived from.  We allow arbitrary operations along the
   4995 /// way, but the operands of an operation must either be constants or a value
   4996 /// derived from a constant PHI.  If this expression does not fit with these
   4997 /// constraints, return null.
   4998 static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) {
   4999   Instruction *I = dyn_cast<Instruction>(V);
   5000   if (!I || !canConstantEvolve(I, L)) return nullptr;
   5001 
   5002   if (PHINode *PN = dyn_cast<PHINode>(I)) {
   5003     return PN;
   5004   }
   5005 
   5006   // Record non-constant instructions contained by the loop.
   5007   DenseMap<Instruction *, PHINode *> PHIMap;
   5008   return getConstantEvolvingPHIOperands(I, L, PHIMap);
   5009 }
   5010 
   5011 /// EvaluateExpression - Given an expression that passes the
   5012 /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node
   5013 /// in the loop has the value PHIVal.  If we can't fold this expression for some
   5014 /// reason, return null.
   5015 static Constant *EvaluateExpression(Value *V, const Loop *L,
   5016                                     DenseMap<Instruction *, Constant *> &Vals,
   5017                                     const DataLayout *DL,
   5018                                     const TargetLibraryInfo *TLI) {
   5019   // Convenient constant check, but redundant for recursive calls.
   5020   if (Constant *C = dyn_cast<Constant>(V)) return C;
   5021   Instruction *I = dyn_cast<Instruction>(V);
   5022   if (!I) return nullptr;
   5023 
   5024   if (Constant *C = Vals.lookup(I)) return C;
   5025 
   5026   // An instruction inside the loop depends on a value outside the loop that we
   5027   // weren't given a mapping for, or a value such as a call inside the loop.
   5028   if (!canConstantEvolve(I, L)) return nullptr;
   5029 
   5030   // An unmapped PHI can be due to a branch or another loop inside this loop,
   5031   // or due to this not being the initial iteration through a loop where we
   5032   // couldn't compute the evolution of this particular PHI last time.
   5033   if (isa<PHINode>(I)) return nullptr;
   5034 
   5035   std::vector<Constant*> Operands(I->getNumOperands());
   5036 
   5037   for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
   5038     Instruction *Operand = dyn_cast<Instruction>(I->getOperand(i));
   5039     if (!Operand) {
   5040       Operands[i] = dyn_cast<Constant>(I->getOperand(i));
   5041       if (!Operands[i]) return nullptr;
   5042       continue;
   5043     }
   5044     Constant *C = EvaluateExpression(Operand, L, Vals, DL, TLI);
   5045     Vals[Operand] = C;
   5046     if (!C) return nullptr;
   5047     Operands[i] = C;
   5048   }
   5049 
   5050   if (CmpInst *CI = dyn_cast<CmpInst>(I))
   5051     return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0],
   5052                                            Operands[1], DL, TLI);
   5053   if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
   5054     if (!LI->isVolatile())
   5055       return ConstantFoldLoadFromConstPtr(Operands[0], DL);
   5056   }
   5057   return ConstantFoldInstOperands(I->getOpcode(), I->getType(), Operands, DL,
   5058                                   TLI);
   5059 }
   5060 
   5061 /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is
   5062 /// in the header of its containing loop, we know the loop executes a
   5063 /// constant number of times, and the PHI node is just a recurrence
   5064 /// involving constants, fold it.
   5065 Constant *
   5066 ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
   5067                                                    const APInt &BEs,
   5068                                                    const Loop *L) {
   5069   DenseMap<PHINode*, Constant*>::const_iterator I =
   5070     ConstantEvolutionLoopExitValue.find(PN);
   5071   if (I != ConstantEvolutionLoopExitValue.end())
   5072     return I->second;
   5073 
   5074   if (BEs.ugt(MaxBruteForceIterations))
   5075     return ConstantEvolutionLoopExitValue[PN] = nullptr;  // Not going to evaluate it.
   5076 
   5077   Constant *&RetVal = ConstantEvolutionLoopExitValue[PN];
   5078 
   5079   DenseMap<Instruction *, Constant *> CurrentIterVals;
   5080   BasicBlock *Header = L->getHeader();
   5081   assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!");
   5082 
   5083   // Since the loop is canonicalized, the PHI node must have two entries.  One
   5084   // entry must be a constant (coming in from outside of the loop), and the
   5085   // second must be derived from the same PHI.
   5086   bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
   5087   PHINode *PHI = nullptr;
   5088   for (BasicBlock::iterator I = Header->begin();
   5089        (PHI = dyn_cast<PHINode>(I)); ++I) {
   5090     Constant *StartCST =
   5091       dyn_cast<Constant>(PHI->getIncomingValue(!SecondIsBackedge));
   5092     if (!StartCST) continue;
   5093     CurrentIterVals[PHI] = StartCST;
   5094   }
   5095   if (!CurrentIterVals.count(PN))
   5096     return RetVal = nullptr;
   5097 
   5098   Value *BEValue = PN->getIncomingValue(SecondIsBackedge);
   5099 
   5100   // Execute the loop symbolically to determine the exit value.
   5101   if (BEs.getActiveBits() >= 32)
   5102     return RetVal = nullptr; // More than 2^32-1 iterations?? Not doing it!
   5103 
   5104   unsigned NumIterations = BEs.getZExtValue(); // must be in range
   5105   unsigned IterationNum = 0;
   5106   for (; ; ++IterationNum) {
   5107     if (IterationNum == NumIterations)
   5108       return RetVal = CurrentIterVals[PN];  // Got exit value!
   5109 
   5110     // Compute the value of the PHIs for the next iteration.
   5111     // EvaluateExpression adds non-phi values to the CurrentIterVals map.
   5112     DenseMap<Instruction *, Constant *> NextIterVals;
   5113     Constant *NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL,
   5114                                            TLI);
   5115     if (!NextPHI)
   5116       return nullptr;        // Couldn't evaluate!
   5117     NextIterVals[PN] = NextPHI;
   5118 
   5119     bool StoppedEvolving = NextPHI == CurrentIterVals[PN];
   5120 
   5121     // Also evaluate the other PHI nodes.  However, we don't get to stop if we
   5122     // cease to be able to evaluate one of them or if they stop evolving,
   5123     // because that doesn't necessarily prevent us from computing PN.
   5124     SmallVector<std::pair<PHINode *, Constant *>, 8> PHIsToCompute;
   5125     for (DenseMap<Instruction *, Constant *>::const_iterator
   5126            I = CurrentIterVals.begin(), E = CurrentIterVals.end(); I != E; ++I){
   5127       PHINode *PHI = dyn_cast<PHINode>(I->first);
   5128       if (!PHI || PHI == PN || PHI->getParent() != Header) continue;
   5129       PHIsToCompute.push_back(std::make_pair(PHI, I->second));
   5130     }
   5131     // We use two distinct loops because EvaluateExpression may invalidate any
   5132     // iterators into CurrentIterVals.
   5133     for (SmallVectorImpl<std::pair<PHINode *, Constant*> >::const_iterator
   5134              I = PHIsToCompute.begin(), E = PHIsToCompute.end(); I != E; ++I) {
   5135       PHINode *PHI = I->first;
   5136       Constant *&NextPHI = NextIterVals[PHI];
   5137       if (!NextPHI) {   // Not already computed.
   5138         Value *BEValue = PHI->getIncomingValue(SecondIsBackedge);
   5139         NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, TLI);
   5140       }
   5141       if (NextPHI != I->second)
   5142         StoppedEvolving = false;
   5143     }
   5144 
   5145     // If all entries in CurrentIterVals == NextIterVals then we can stop
   5146     // iterating, the loop can't continue to change.
   5147     if (StoppedEvolving)
   5148       return RetVal = CurrentIterVals[PN];
   5149 
   5150     CurrentIterVals.swap(NextIterVals);
   5151   }
   5152 }
   5153 
   5154 /// ComputeExitCountExhaustively - If the loop is known to execute a
   5155 /// constant number of times (the condition evolves only from constants),
   5156 /// try to evaluate a few iterations of the loop until we get the exit
   5157 /// condition gets a value of ExitWhen (true or false).  If we cannot
   5158 /// evaluate the trip count of the loop, return getCouldNotCompute().
   5159 const SCEV *ScalarEvolution::ComputeExitCountExhaustively(const Loop *L,
   5160                                                           Value *Cond,
   5161                                                           bool ExitWhen) {
   5162   PHINode *PN = getConstantEvolvingPHI(Cond, L);
   5163   if (!PN) return getCouldNotCompute();
   5164 
   5165   // If the loop is canonicalized, the PHI will have exactly two entries.
   5166   // That's the only form we support here.
   5167   if (PN->getNumIncomingValues() != 2) return getCouldNotCompute();
   5168 
   5169   DenseMap<Instruction *, Constant *> CurrentIterVals;
   5170   BasicBlock *Header = L->getHeader();
   5171   assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!");
   5172 
   5173   // One entry must be a constant (coming in from outside of the loop), and the
   5174   // second must be derived from the same PHI.
   5175   bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
   5176   PHINode *PHI = nullptr;
   5177   for (BasicBlock::iterator I = Header->begin();
   5178        (PHI = dyn_cast<PHINode>(I)); ++I) {
   5179     Constant *StartCST =
   5180       dyn_cast<Constant>(PHI->getIncomingValue(!SecondIsBackedge));
   5181     if (!StartCST) continue;
   5182     CurrentIterVals[PHI] = StartCST;
   5183   }
   5184   if (!CurrentIterVals.count(PN))
   5185     return getCouldNotCompute();
   5186 
   5187   // Okay, we find a PHI node that defines the trip count of this loop.  Execute
   5188   // the loop symbolically to determine when the condition gets a value of
   5189   // "ExitWhen".
   5190 
   5191   unsigned MaxIterations = MaxBruteForceIterations;   // Limit analysis.
   5192   for (unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){
   5193     ConstantInt *CondVal =
   5194       dyn_cast_or_null<ConstantInt>(EvaluateExpression(Cond, L, CurrentIterVals,
   5195                                                        DL, TLI));
   5196 
   5197     // Couldn't symbolically evaluate.
   5198     if (!CondVal) return getCouldNotCompute();
   5199 
   5200     if (CondVal->getValue() == uint64_t(ExitWhen)) {
   5201       ++NumBruteForceTripCountsComputed;
   5202       return getConstant(Type::getInt32Ty(getContext()), IterationNum);
   5203     }
   5204 
   5205     // Update all the PHI nodes for the next iteration.
   5206     DenseMap<Instruction *, Constant *> NextIterVals;
   5207 
   5208     // Create a list of which PHIs we need to compute. We want to do this before
   5209     // calling EvaluateExpression on them because that may invalidate iterators
   5210     // into CurrentIterVals.
   5211     SmallVector<PHINode *, 8> PHIsToCompute;
   5212     for (DenseMap<Instruction *, Constant *>::const_iterator
   5213            I = CurrentIterVals.begin(), E = CurrentIterVals.end(); I != E; ++I){
   5214       PHINode *PHI = dyn_cast<PHINode>(I->first);
   5215       if (!PHI || PHI->getParent() != Header) continue;
   5216       PHIsToCompute.push_back(PHI);
   5217     }
   5218     for (SmallVectorImpl<PHINode *>::const_iterator I = PHIsToCompute.begin(),
   5219              E = PHIsToCompute.end(); I != E; ++I) {
   5220       PHINode *PHI = *I;
   5221       Constant *&NextPHI = NextIterVals[PHI];
   5222       if (NextPHI) continue;    // Already computed!
   5223 
   5224       Value *BEValue = PHI->getIncomingValue(SecondIsBackedge);
   5225       NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, TLI);
   5226     }
   5227     CurrentIterVals.swap(NextIterVals);
   5228   }
   5229 
   5230   // Too many iterations were needed to evaluate.
   5231   return getCouldNotCompute();
   5232 }
   5233 
   5234 /// getSCEVAtScope - Return a SCEV expression for the specified value
   5235 /// at the specified scope in the program.  The L value specifies a loop
   5236 /// nest to evaluate the expression at, where null is the top-level or a
   5237 /// specified loop is immediately inside of the loop.
   5238 ///
   5239 /// This method can be used to compute the exit value for a variable defined
   5240 /// in a loop by querying what the value will hold in the parent loop.
   5241 ///
   5242 /// In the case that a relevant loop exit value cannot be computed, the
   5243 /// original value V is returned.
   5244 const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) {
   5245   // Check to see if we've folded this expression at this loop before.
   5246   SmallVector<std::pair<const Loop *, const SCEV *>, 2> &Values = ValuesAtScopes[V];
   5247   for (unsigned u = 0; u < Values.size(); u++) {
   5248     if (Values[u].first == L)
   5249       return Values[u].second ? Values[u].second : V;
   5250   }
   5251   Values.push_back(std::make_pair(L, static_cast<const SCEV *>(nullptr)));
   5252   // Otherwise compute it.
   5253   const SCEV *C = computeSCEVAtScope(V, L);
   5254   SmallVector<std::pair<const Loop *, const SCEV *>, 2> &Values2 = ValuesAtScopes[V];
   5255   for (unsigned u = Values2.size(); u > 0; u--) {
   5256     if (Values2[u - 1].first == L) {
   5257       Values2[u - 1].second = C;
   5258       break;
   5259     }
   5260   }
   5261   return C;
   5262 }
   5263 
   5264 /// This builds up a Constant using the ConstantExpr interface.  That way, we
   5265 /// will return Constants for objects which aren't represented by a
   5266 /// SCEVConstant, because SCEVConstant is restricted to ConstantInt.
   5267 /// Returns NULL if the SCEV isn't representable as a Constant.
   5268 static Constant *BuildConstantFromSCEV(const SCEV *V) {
   5269   switch (static_cast<SCEVTypes>(V->getSCEVType())) {
   5270     case scCouldNotCompute:
   5271     case scAddRecExpr:
   5272       break;
   5273     case scConstant:
   5274       return cast<SCEVConstant>(V)->getValue();
   5275     case scUnknown:
   5276       return dyn_cast<Constant>(cast<SCEVUnknown>(V)->getValue());
   5277     case scSignExtend: {
   5278       const SCEVSignExtendExpr *SS = cast<SCEVSignExtendExpr>(V);
   5279       if (Constant *CastOp = BuildConstantFromSCEV(SS->getOperand()))
   5280         return ConstantExpr::getSExt(CastOp, SS->getType());
   5281       break;
   5282     }
   5283     case scZeroExtend: {
   5284       const SCEVZeroExtendExpr *SZ = cast<SCEVZeroExtendExpr>(V);
   5285       if (Constant *CastOp = BuildConstantFromSCEV(SZ->getOperand()))
   5286         return ConstantExpr::getZExt(CastOp, SZ->getType());
   5287       break;
   5288     }
   5289     case scTruncate: {
   5290       const SCEVTruncateExpr *ST = cast<SCEVTruncateExpr>(V);
   5291       if (Constant *CastOp = BuildConstantFromSCEV(ST->getOperand()))
   5292         return ConstantExpr::getTrunc(CastOp, ST->getType());
   5293       break;
   5294     }
   5295     case scAddExpr: {
   5296       const SCEVAddExpr *SA = cast<SCEVAddExpr>(V);
   5297       if (Constant *C = BuildConstantFromSCEV(SA->getOperand(0))) {
   5298         if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) {
   5299           unsigned AS = PTy->getAddressSpace();
   5300           Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS);
   5301           C = ConstantExpr::getBitCast(C, DestPtrTy);
   5302         }
   5303         for (unsigned i = 1, e = SA->getNumOperands(); i != e; ++i) {
   5304           Constant *C2 = BuildConstantFromSCEV(SA->getOperand(i));
   5305           if (!C2) return nullptr;
   5306 
   5307           // First pointer!
   5308           if (!C->getType()->isPointerTy() && C2->getType()->isPointerTy()) {
   5309             unsigned AS = C2->getType()->getPointerAddressSpace();
   5310             std::swap(C, C2);
   5311             Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS);
   5312             // The offsets have been converted to bytes.  We can add bytes to an
   5313             // i8* by GEP with the byte count in the first index.
   5314             C = ConstantExpr::getBitCast(C, DestPtrTy);
   5315           }
   5316 
   5317           // Don't bother trying to sum two pointers. We probably can't
   5318           // statically compute a load that results from it anyway.
   5319           if (C2->getType()->isPointerTy())
   5320             return nullptr;
   5321 
   5322           if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) {
   5323             if (PTy->getElementType()->isStructTy())
   5324               C2 = ConstantExpr::getIntegerCast(
   5325                   C2, Type::getInt32Ty(C->getContext()), true);
   5326             C = ConstantExpr::getGetElementPtr(C, C2);
   5327           } else
   5328             C = ConstantExpr::getAdd(C, C2);
   5329         }
   5330         return C;
   5331       }
   5332       break;
   5333     }
   5334     case scMulExpr: {
   5335       const SCEVMulExpr *SM = cast<SCEVMulExpr>(V);
   5336       if (Constant *C = BuildConstantFromSCEV(SM->getOperand(0))) {
   5337         // Don't bother with pointers at all.
   5338         if (C->getType()->isPointerTy()) return nullptr;
   5339         for (unsigned i = 1, e = SM->getNumOperands(); i != e; ++i) {
   5340           Constant *C2 = BuildConstantFromSCEV(SM->getOperand(i));
   5341           if (!C2 || C2->getType()->isPointerTy()) return nullptr;
   5342           C = ConstantExpr::getMul(C, C2);
   5343         }
   5344         return C;
   5345       }
   5346       break;
   5347     }
   5348     case scUDivExpr: {
   5349       const SCEVUDivExpr *SU = cast<SCEVUDivExpr>(V);
   5350       if (Constant *LHS = BuildConstantFromSCEV(SU->getLHS()))
   5351         if (Constant *RHS = BuildConstantFromSCEV(SU->getRHS()))
   5352           if (LHS->getType() == RHS->getType())
   5353             return ConstantExpr::getUDiv(LHS, RHS);
   5354       break;
   5355     }
   5356     case scSMaxExpr:
   5357     case scUMaxExpr:
   5358       break; // TODO: smax, umax.
   5359   }
   5360   return nullptr;
   5361 }
   5362 
   5363 const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
   5364   if (isa<SCEVConstant>(V)) return V;
   5365 
   5366   // If this instruction is evolved from a constant-evolving PHI, compute the
   5367   // exit value from the loop without using SCEVs.
   5368   if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) {
   5369     if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) {
   5370       const Loop *LI = (*this->LI)[I->getParent()];
   5371       if (LI && LI->getParentLoop() == L)  // Looking for loop exit value.
   5372         if (PHINode *PN = dyn_cast<PHINode>(I))
   5373           if (PN->getParent() == LI->getHeader()) {
   5374             // Okay, there is no closed form solution for the PHI node.  Check
   5375             // to see if the loop that contains it has a known backedge-taken
   5376             // count.  If so, we may be able to force computation of the exit
   5377             // value.
   5378             const SCEV *BackedgeTakenCount = getBackedgeTakenCount(LI);
   5379             if (const SCEVConstant *BTCC =
   5380                   dyn_cast<SCEVConstant>(BackedgeTakenCount)) {
   5381               // Okay, we know how many times the containing loop executes.  If
   5382               // this is a constant evolving PHI node, get the final value at
   5383               // the specified iteration number.
   5384               Constant *RV = getConstantEvolutionLoopExitValue(PN,
   5385                                                    BTCC->getValue()->getValue(),
   5386                                                                LI);
   5387               if (RV) return getSCEV(RV);
   5388             }
   5389           }
   5390 
   5391       // Okay, this is an expression that we cannot symbolically evaluate
   5392       // into a SCEV.  Check to see if it's possible to symbolically evaluate
   5393       // the arguments into constants, and if so, try to constant propagate the
   5394       // result.  This is particularly useful for computing loop exit values.
   5395       if (CanConstantFold(I)) {
   5396         SmallVector<Constant *, 4> Operands;
   5397         bool MadeImprovement = false;
   5398         for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
   5399           Value *Op = I->getOperand(i);
   5400           if (Constant *C = dyn_cast<Constant>(Op)) {
   5401             Operands.push_back(C);
   5402             continue;
   5403           }
   5404 
   5405           // If any of the operands is non-constant and if they are
   5406           // non-integer and non-pointer, don't even try to analyze them
   5407           // with scev techniques.
   5408           if (!isSCEVable(Op->getType()))
   5409             return V;
   5410 
   5411           const SCEV *OrigV = getSCEV(Op);
   5412           const SCEV *OpV = getSCEVAtScope(OrigV, L);
   5413           MadeImprovement |= OrigV != OpV;
   5414 
   5415           Constant *C = BuildConstantFromSCEV(OpV);
   5416           if (!C) return V;
   5417           if (C->getType() != Op->getType())
   5418             C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
   5419                                                               Op->getType(),
   5420                                                               false),
   5421                                       C, Op->getType());
   5422           Operands.push_back(C);
   5423         }
   5424 
   5425         // Check to see if getSCEVAtScope actually made an improvement.
   5426         if (MadeImprovement) {
   5427           Constant *C = nullptr;
   5428           if (const CmpInst *CI = dyn_cast<CmpInst>(I))
   5429             C = ConstantFoldCompareInstOperands(CI->getPredicate(),
   5430                                                 Operands[0], Operands[1], DL,
   5431                                                 TLI);
   5432           else if (const LoadInst *LI = dyn_cast<LoadInst>(I)) {
   5433             if (!LI->isVolatile())
   5434               C = ConstantFoldLoadFromConstPtr(Operands[0], DL);
   5435           } else
   5436             C = ConstantFoldInstOperands(I->getOpcode(), I->getType(),
   5437                                          Operands, DL, TLI);
   5438           if (!C) return V;
   5439           return getSCEV(C);
   5440         }
   5441       }
   5442     }
   5443 
   5444     // This is some other type of SCEVUnknown, just return it.
   5445     return V;
   5446   }
   5447 
   5448   if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) {
   5449     // Avoid performing the look-up in the common case where the specified
   5450     // expression has no loop-variant portions.
   5451     for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) {
   5452       const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
   5453       if (OpAtScope != Comm->getOperand(i)) {
   5454         // Okay, at least one of these operands is loop variant but might be
   5455         // foldable.  Build a new instance of the folded commutative expression.
   5456         SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(),
   5457                                             Comm->op_begin()+i);
   5458         NewOps.push_back(OpAtScope);
   5459 
   5460         for (++i; i != e; ++i) {
   5461           OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
   5462           NewOps.push_back(OpAtScope);
   5463         }
   5464         if (isa<SCEVAddExpr>(Comm))
   5465           return getAddExpr(NewOps);
   5466         if (isa<SCEVMulExpr>(Comm))
   5467           return getMulExpr(NewOps);
   5468         if (isa<SCEVSMaxExpr>(Comm))
   5469           return getSMaxExpr(NewOps);
   5470         if (isa<SCEVUMaxExpr>(Comm))
   5471           return getUMaxExpr(NewOps);
   5472         llvm_unreachable("Unknown commutative SCEV type!");
   5473       }
   5474     }
   5475     // If we got here, all operands are loop invariant.
   5476     return Comm;
   5477   }
   5478 
   5479   if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) {
   5480     const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L);
   5481     const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L);
   5482     if (LHS == Div->getLHS() && RHS == Div->getRHS())
   5483       return Div;   // must be loop invariant
   5484     return getUDivExpr(LHS, RHS);
   5485   }
   5486 
   5487   // If this is a loop recurrence for a loop that does not contain L, then we
   5488   // are dealing with the final value computed by the loop.
   5489   if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) {
   5490     // First, attempt to evaluate each operand.
   5491     // Avoid performing the look-up in the common case where the specified
   5492     // expression has no loop-variant portions.
   5493     for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
   5494       const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L);
   5495       if (OpAtScope == AddRec->getOperand(i))
   5496         continue;
   5497 
   5498       // Okay, at least one of these operands is loop variant but might be
   5499       // foldable.  Build a new instance of the folded commutative expression.
   5500       SmallVector<const SCEV *, 8> NewOps(AddRec->op_begin(),
   5501                                           AddRec->op_begin()+i);
   5502       NewOps.push_back(OpAtScope);
   5503       for (++i; i != e; ++i)
   5504         NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L));
   5505 
   5506       const SCEV *FoldedRec =
   5507         getAddRecExpr(NewOps, AddRec->getLoop(),
   5508                       AddRec->getNoWrapFlags(SCEV::FlagNW));
   5509       AddRec = dyn_cast<SCEVAddRecExpr>(FoldedRec);
   5510       // The addrec may be folded to a nonrecurrence, for example, if the
   5511       // induction variable is multiplied by zero after constant folding. Go
   5512       // ahead and return the folded value.
   5513       if (!AddRec)
   5514         return FoldedRec;
   5515       break;
   5516     }
   5517 
   5518     // If the scope is outside the addrec's loop, evaluate it by using the
   5519     // loop exit value of the addrec.
   5520     if (!AddRec->getLoop()->contains(L)) {
   5521       // To evaluate this recurrence, we need to know how many times the AddRec
   5522       // loop iterates.  Compute this now.
   5523       const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop());
   5524       if (BackedgeTakenCount == getCouldNotCompute()) return AddRec;
   5525 
   5526       // Then, evaluate the AddRec.
   5527       return AddRec->evaluateAtIteration(BackedgeTakenCount, *this);
   5528     }
   5529 
   5530     return AddRec;
   5531   }
   5532 
   5533   if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) {
   5534     const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
   5535     if (Op == Cast->getOperand())
   5536       return Cast;  // must be loop invariant
   5537     return getZeroExtendExpr(Op, Cast->getType());
   5538   }
   5539 
   5540   if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) {
   5541     const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
   5542     if (Op == Cast->getOperand())
   5543       return Cast;  // must be loop invariant
   5544     return getSignExtendExpr(Op, Cast->getType());
   5545   }
   5546 
   5547   if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) {
   5548     const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
   5549     if (Op == Cast->getOperand())
   5550       return Cast;  // must be loop invariant
   5551     return getTruncateExpr(Op, Cast->getType());
   5552   }
   5553 
   5554   llvm_unreachable("Unknown SCEV type!");
   5555 }
   5556 
   5557 /// getSCEVAtScope - This is a convenience function which does
   5558 /// getSCEVAtScope(getSCEV(V), L).
   5559 const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) {
   5560   return getSCEVAtScope(getSCEV(V), L);
   5561 }
   5562 
   5563 /// SolveLinEquationWithOverflow - Finds the minimum unsigned root of the
   5564 /// following equation:
   5565 ///
   5566 ///     A * X = B (mod N)
   5567 ///
   5568 /// where N = 2^BW and BW is the common bit width of A and B. The signedness of
   5569 /// A and B isn't important.
   5570 ///
   5571 /// If the equation does not have a solution, SCEVCouldNotCompute is returned.
   5572 static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const APInt &B,
   5573                                                ScalarEvolution &SE) {
   5574   uint32_t BW = A.getBitWidth();
   5575   assert(BW == B.getBitWidth() && "Bit widths must be the same.");
   5576   assert(A != 0 && "A must be non-zero.");
   5577 
   5578   // 1. D = gcd(A, N)
   5579   //
   5580   // The gcd of A and N may have only one prime factor: 2. The number of
   5581   // trailing zeros in A is its multiplicity
   5582   uint32_t Mult2 = A.countTrailingZeros();
   5583   // D = 2^Mult2
   5584 
   5585   // 2. Check if B is divisible by D.
   5586   //
   5587   // B is divisible by D if and only if the multiplicity of prime factor 2 for B
   5588   // is not less than multiplicity of this prime factor for D.
   5589   if (B.countTrailingZeros() < Mult2)
   5590     return SE.getCouldNotCompute();
   5591 
   5592   // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic
   5593   // modulo (N / D).
   5594   //
   5595   // (N / D) may need BW+1 bits in its representation.  Hence, we'll use this
   5596   // bit width during computations.
   5597   APInt AD = A.lshr(Mult2).zext(BW + 1);  // AD = A / D
   5598   APInt Mod(BW + 1, 0);
   5599   Mod.setBit(BW - Mult2);  // Mod = N / D
   5600   APInt I = AD.multiplicativeInverse(Mod);
   5601 
   5602   // 4. Compute the minimum unsigned root of the equation:
   5603   // I * (B / D) mod (N / D)
   5604   APInt Result = (I * B.lshr(Mult2).zext(BW + 1)).urem(Mod);
   5605 
   5606   // The result is guaranteed to be less than 2^BW so we may truncate it to BW
   5607   // bits.
   5608   return SE.getConstant(Result.trunc(BW));
   5609 }
   5610 
   5611 /// SolveQuadraticEquation - Find the roots of the quadratic equation for the
   5612 /// given quadratic chrec {L,+,M,+,N}.  This returns either the two roots (which
   5613 /// might be the same) or two SCEVCouldNotCompute objects.
   5614 ///
   5615 static std::pair<const SCEV *,const SCEV *>
   5616 SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) {
   5617   assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!");
   5618   const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0));
   5619   const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1));
   5620   const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2));
   5621 
   5622   // We currently can only solve this if the coefficients are constants.
   5623   if (!LC || !MC || !NC) {
   5624     const SCEV *CNC = SE.getCouldNotCompute();
   5625     return std::make_pair(CNC, CNC);
   5626   }
   5627 
   5628   uint32_t BitWidth = LC->getValue()->getValue().getBitWidth();
   5629   const APInt &L = LC->getValue()->getValue();
   5630   const APInt &M = MC->getValue()->getValue();
   5631   const APInt &N = NC->getValue()->getValue();
   5632   APInt Two(BitWidth, 2);
   5633   APInt Four(BitWidth, 4);
   5634 
   5635   {
   5636     using namespace APIntOps;
   5637     const APInt& C = L;
   5638     // Convert from chrec coefficients to polynomial coefficients AX^2+BX+C
   5639     // The B coefficient is M-N/2
   5640     APInt B(M);
   5641     B -= sdiv(N,Two);
   5642 
   5643     // The A coefficient is N/2
   5644     APInt A(N.sdiv(Two));
   5645 
   5646     // Compute the B^2-4ac term.
   5647     APInt SqrtTerm(B);
   5648     SqrtTerm *= B;
   5649     SqrtTerm -= Four * (A * C);
   5650 
   5651     if (SqrtTerm.isNegative()) {
   5652       // The loop is provably infinite.
   5653       const SCEV *CNC = SE.getCouldNotCompute();
   5654       return std::make_pair(CNC, CNC);
   5655     }
   5656 
   5657     // Compute sqrt(B^2-4ac). This is guaranteed to be the nearest
   5658     // integer value or else APInt::sqrt() will assert.
   5659     APInt SqrtVal(SqrtTerm.sqrt());
   5660 
   5661     // Compute the two solutions for the quadratic formula.
   5662     // The divisions must be performed as signed divisions.
   5663     APInt NegB(-B);
   5664     APInt TwoA(A << 1);
   5665     if (TwoA.isMinValue()) {
   5666       const SCEV *CNC = SE.getCouldNotCompute();
   5667       return std::make_pair(CNC, CNC);
   5668     }
   5669 
   5670     LLVMContext &Context = SE.getContext();
   5671 
   5672     ConstantInt *Solution1 =
   5673       ConstantInt::get(Context, (NegB + SqrtVal).sdiv(TwoA));
   5674     ConstantInt *Solution2 =
   5675       ConstantInt::get(Context, (NegB - SqrtVal).sdiv(TwoA));
   5676 
   5677     return std::make_pair(SE.getConstant(Solution1),
   5678                           SE.getConstant(Solution2));
   5679   } // end APIntOps namespace
   5680 }
   5681 
   5682 /// HowFarToZero - Return the number of times a backedge comparing the specified
   5683 /// value to zero will execute.  If not computable, return CouldNotCompute.
   5684 ///
   5685 /// This is only used for loops with a "x != y" exit test. The exit condition is
   5686 /// now expressed as a single expression, V = x-y. So the exit test is
   5687 /// effectively V != 0.  We know and take advantage of the fact that this
   5688 /// expression only being used in a comparison by zero context.
   5689 ScalarEvolution::ExitLimit
   5690 ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L, bool IsSubExpr) {
   5691   // If the value is a constant
   5692   if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
   5693     // If the value is already zero, the branch will execute zero times.
   5694     if (C->getValue()->isZero()) return C;
   5695     return getCouldNotCompute();  // Otherwise it will loop infinitely.
   5696   }
   5697 
   5698   const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V);
   5699   if (!AddRec || AddRec->getLoop() != L)
   5700     return getCouldNotCompute();
   5701 
   5702   // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of
   5703   // the quadratic equation to solve it.
   5704   if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) {
   5705     std::pair<const SCEV *,const SCEV *> Roots =
   5706       SolveQuadraticEquation(AddRec, *this);
   5707     const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first);
   5708     const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second);
   5709     if (R1 && R2) {
   5710 #if 0
   5711       dbgs() << "HFTZ: " << *V << " - sol#1: " << *R1
   5712              << "  sol#2: " << *R2 << "\n";
   5713 #endif
   5714       // Pick the smallest positive root value.
   5715       if (ConstantInt *CB =
   5716           dyn_cast<ConstantInt>(ConstantExpr::getICmp(CmpInst::ICMP_ULT,
   5717                                                       R1->getValue(),
   5718                                                       R2->getValue()))) {
   5719         if (CB->getZExtValue() == false)
   5720           std::swap(R1, R2);   // R1 is the minimum root now.
   5721 
   5722         // We can only use this value if the chrec ends up with an exact zero
   5723         // value at this index.  When solving for "X*X != 5", for example, we
   5724         // should not accept a root of 2.
   5725         const SCEV *Val = AddRec->evaluateAtIteration(R1, *this);
   5726         if (Val->isZero())
   5727           return R1;  // We found a quadratic root!
   5728       }
   5729     }
   5730     return getCouldNotCompute();
   5731   }
   5732 
   5733   // Otherwise we can only handle this if it is affine.
   5734   if (!AddRec->isAffine())
   5735     return getCouldNotCompute();
   5736 
   5737   // If this is an affine expression, the execution count of this branch is
   5738   // the minimum unsigned root of the following equation:
   5739   //
   5740   //     Start + Step*N = 0 (mod 2^BW)
   5741   //
   5742   // equivalent to:
   5743   //
   5744   //             Step*N = -Start (mod 2^BW)
   5745   //
   5746   // where BW is the common bit width of Start and Step.
   5747 
   5748   // Get the initial value for the loop.
   5749   const SCEV *Start = getSCEVAtScope(AddRec->getStart(), L->getParentLoop());
   5750   const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1), L->getParentLoop());
   5751 
   5752   // For now we handle only constant steps.
   5753   //
   5754   // TODO: Handle a nonconstant Step given AddRec<NUW>. If the
   5755   // AddRec is NUW, then (in an unsigned sense) it cannot be counting up to wrap
   5756   // to 0, it must be counting down to equal 0. Consequently, N = Start / -Step.
   5757   // We have not yet seen any such cases.
   5758   const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step);
   5759   if (!StepC || StepC->getValue()->equalsInt(0))
   5760     return getCouldNotCompute();
   5761 
   5762   // For positive steps (counting up until unsigned overflow):
   5763   //   N = -Start/Step (as unsigned)
   5764   // For negative steps (counting down to zero):
   5765   //   N = Start/-Step
   5766   // First compute the unsigned distance from zero in the direction of Step.
   5767   bool CountDown = StepC->getValue()->getValue().isNegative();
   5768   const SCEV *Distance = CountDown ? Start : getNegativeSCEV(Start);
   5769 
   5770   // Handle unitary steps, which cannot wraparound.
   5771   // 1*N = -Start; -1*N = Start (mod 2^BW), so:
   5772   //   N = Distance (as unsigned)
   5773   if (StepC->getValue()->equalsInt(1) || StepC->getValue()->isAllOnesValue()) {
   5774     ConstantRange CR = getUnsignedRange(Start);
   5775     const SCEV *MaxBECount;
   5776     if (!CountDown && CR.getUnsignedMin().isMinValue())
   5777       // When counting up, the worst starting value is 1, not 0.
   5778       MaxBECount = CR.getUnsignedMax().isMinValue()
   5779         ? getConstant(APInt::getMinValue(CR.getBitWidth()))
   5780         : getConstant(APInt::getMaxValue(CR.getBitWidth()));
   5781     else
   5782       MaxBECount = getConstant(CountDown ? CR.getUnsignedMax()
   5783                                          : -CR.getUnsignedMin());
   5784     return ExitLimit(Distance, MaxBECount, /*MustExit=*/true);
   5785   }
   5786 
   5787   // If the recurrence is known not to wraparound, unsigned divide computes the
   5788   // back edge count. (Ideally we would have an "isexact" bit for udiv). We know
   5789   // that the value will either become zero (and thus the loop terminates), that
   5790   // the loop will terminate through some other exit condition first, or that
   5791   // the loop has undefined behavior.  This means we can't "miss" the exit
   5792   // value, even with nonunit stride, and exit later via the same branch. Note
   5793   // that we can skip this exit if loop later exits via a different
   5794   // branch. Hence MustExit=false.
   5795   //
   5796   // This is only valid for expressions that directly compute the loop exit. It
   5797   // is invalid for subexpressions in which the loop may exit through this
   5798   // branch even if this subexpression is false. In that case, the trip count
   5799   // computed by this udiv could be smaller than the number of well-defined
   5800   // iterations.
   5801   if (!IsSubExpr && AddRec->getNoWrapFlags(SCEV::FlagNW)) {
   5802     const SCEV *Exact =
   5803       getUDivExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step);
   5804     return ExitLimit(Exact, Exact, /*MustExit=*/false);
   5805   }
   5806 
   5807   // If Step is a power of two that evenly divides Start we know that the loop
   5808   // will always terminate.  Start may not be a constant so we just have the
   5809   // number of trailing zeros available.  This is safe even in presence of
   5810   // overflow as the recurrence will overflow to exactly 0.
   5811   const APInt &StepV = StepC->getValue()->getValue();
   5812   if (StepV.isPowerOf2() &&
   5813       GetMinTrailingZeros(getNegativeSCEV(Start)) >= StepV.countTrailingZeros())
   5814     return getUDivExactExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step);
   5815 
   5816   // Then, try to solve the above equation provided that Start is constant.
   5817   if (const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start))
   5818     return SolveLinEquationWithOverflow(StepC->getValue()->getValue(),
   5819                                         -StartC->getValue()->getValue(),
   5820                                         *this);
   5821   return getCouldNotCompute();
   5822 }
   5823 
   5824 /// HowFarToNonZero - Return the number of times a backedge checking the
   5825 /// specified value for nonzero will execute.  If not computable, return
   5826 /// CouldNotCompute
   5827 ScalarEvolution::ExitLimit
   5828 ScalarEvolution::HowFarToNonZero(const SCEV *V, const Loop *L) {
   5829   // Loops that look like: while (X == 0) are very strange indeed.  We don't
   5830   // handle them yet except for the trivial case.  This could be expanded in the
   5831   // future as needed.
   5832 
   5833   // If the value is a constant, check to see if it is known to be non-zero
   5834   // already.  If so, the backedge will execute zero times.
   5835   if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
   5836     if (!C->getValue()->isNullValue())
   5837       return getConstant(C->getType(), 0);
   5838     return getCouldNotCompute();  // Otherwise it will loop infinitely.
   5839   }
   5840 
   5841   // We could implement others, but I really doubt anyone writes loops like
   5842   // this, and if they did, they would already be constant folded.
   5843   return getCouldNotCompute();
   5844 }
   5845 
   5846 /// getPredecessorWithUniqueSuccessorForBB - Return a predecessor of BB
   5847 /// (which may not be an immediate predecessor) which has exactly one
   5848 /// successor from which BB is reachable, or null if no such block is
   5849 /// found.
   5850 ///
   5851 std::pair<BasicBlock *, BasicBlock *>
   5852 ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) {
   5853   // If the block has a unique predecessor, then there is no path from the
   5854   // predecessor to the block that does not go through the direct edge
   5855   // from the predecessor to the block.
   5856   if (BasicBlock *Pred = BB->getSinglePredecessor())
   5857     return std::make_pair(Pred, BB);
   5858 
   5859   // A loop's header is defined to be a block that dominates the loop.
   5860   // If the header has a unique predecessor outside the loop, it must be
   5861   // a block that has exactly one successor that can reach the loop.
   5862   if (Loop *L = LI->getLoopFor(BB))
   5863     return std::make_pair(L->getLoopPredecessor(), L->getHeader());
   5864 
   5865   return std::pair<BasicBlock *, BasicBlock *>();
   5866 }
   5867 
   5868 /// HasSameValue - SCEV structural equivalence is usually sufficient for
   5869 /// testing whether two expressions are equal, however for the purposes of
   5870 /// looking for a condition guarding a loop, it can be useful to be a little
   5871 /// more general, since a front-end may have replicated the controlling
   5872 /// expression.
   5873 ///
   5874 static bool HasSameValue(const SCEV *A, const SCEV *B) {
   5875   // Quick check to see if they are the same SCEV.
   5876   if (A == B) return true;
   5877 
   5878   // Otherwise, if they're both SCEVUnknown, it's possible that they hold
   5879   // two different instructions with the same value. Check for this case.
   5880   if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A))
   5881     if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B))
   5882       if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue()))
   5883         if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue()))
   5884           if (AI->isIdenticalTo(BI) && !AI->mayReadFromMemory())
   5885             return true;
   5886 
   5887   // Otherwise assume they may have a different value.
   5888   return false;
   5889 }
   5890 
   5891 /// SimplifyICmpOperands - Simplify LHS and RHS in a comparison with
   5892 /// predicate Pred. Return true iff any changes were made.
   5893 ///
   5894 bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred,
   5895                                            const SCEV *&LHS, const SCEV *&RHS,
   5896                                            unsigned Depth) {
   5897   bool Changed = false;
   5898 
   5899   // If we hit the max recursion limit bail out.
   5900   if (Depth >= 3)
   5901     return false;
   5902 
   5903   // Canonicalize a constant to the right side.
   5904   if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
   5905     // Check for both operands constant.
   5906     if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
   5907       if (ConstantExpr::getICmp(Pred,
   5908                                 LHSC->getValue(),
   5909                                 RHSC->getValue())->isNullValue())
   5910         goto trivially_false;
   5911       else
   5912         goto trivially_true;
   5913     }
   5914     // Otherwise swap the operands to put the constant on the right.
   5915     std::swap(LHS, RHS);
   5916     Pred = ICmpInst::getSwappedPredicate(Pred);
   5917     Changed = true;
   5918   }
   5919 
   5920   // If we're comparing an addrec with a value which is loop-invariant in the
   5921   // addrec's loop, put the addrec on the left. Also make a dominance check,
   5922   // as both operands could be addrecs loop-invariant in each other's loop.
   5923   if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) {
   5924     const Loop *L = AR->getLoop();
   5925     if (isLoopInvariant(LHS, L) && properlyDominates(LHS, L->getHeader())) {
   5926       std::swap(LHS, RHS);
   5927       Pred = ICmpInst::getSwappedPredicate(Pred);
   5928       Changed = true;
   5929     }
   5930   }
   5931 
   5932   // If there's a constant operand, canonicalize comparisons with boundary
   5933   // cases, and canonicalize *-or-equal comparisons to regular comparisons.
   5934   if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) {
   5935     const APInt &RA = RC->getValue()->getValue();
   5936     switch (Pred) {
   5937     default: llvm_unreachable("Unexpected ICmpInst::Predicate value!");
   5938     case ICmpInst::ICMP_EQ:
   5939     case ICmpInst::ICMP_NE:
   5940       // Fold ((-1) * %a) + %b == 0 (equivalent to %b-%a == 0) into %a == %b.
   5941       if (!RA)
   5942         if (const SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(LHS))
   5943           if (const SCEVMulExpr *ME = dyn_cast<SCEVMulExpr>(AE->getOperand(0)))
   5944             if (AE->getNumOperands() == 2 && ME->getNumOperands() == 2 &&
   5945                 ME->getOperand(0)->isAllOnesValue()) {
   5946               RHS = AE->getOperand(1);
   5947               LHS = ME->getOperand(1);
   5948               Changed = true;
   5949             }
   5950       break;
   5951     case ICmpInst::ICMP_UGE:
   5952       if ((RA - 1).isMinValue()) {
   5953         Pred = ICmpInst::ICMP_NE;
   5954         RHS = getConstant(RA - 1);
   5955         Changed = true;
   5956         break;
   5957       }
   5958       if (RA.isMaxValue()) {
   5959         Pred = ICmpInst::ICMP_EQ;
   5960         Changed = true;
   5961         break;
   5962       }
   5963       if (RA.isMinValue()) goto trivially_true;
   5964 
   5965       Pred = ICmpInst::ICMP_UGT;
   5966       RHS = getConstant(RA - 1);
   5967       Changed = true;
   5968       break;
   5969     case ICmpInst::ICMP_ULE:
   5970       if ((RA + 1).isMaxValue()) {
   5971         Pred = ICmpInst::ICMP_NE;
   5972         RHS = getConstant(RA + 1);
   5973         Changed = true;
   5974         break;
   5975       }
   5976       if (RA.isMinValue()) {
   5977         Pred = ICmpInst::ICMP_EQ;
   5978         Changed = true;
   5979         break;
   5980       }
   5981       if (RA.isMaxValue()) goto trivially_true;
   5982 
   5983       Pred = ICmpInst::ICMP_ULT;
   5984       RHS = getConstant(RA + 1);
   5985       Changed = true;
   5986       break;
   5987     case ICmpInst::ICMP_SGE:
   5988       if ((RA - 1).isMinSignedValue()) {
   5989         Pred = ICmpInst::ICMP_NE;
   5990         RHS = getConstant(RA - 1);
   5991         Changed = true;
   5992         break;
   5993       }
   5994       if (RA.isMaxSignedValue()) {
   5995         Pred = ICmpInst::ICMP_EQ;
   5996         Changed = true;
   5997         break;
   5998       }
   5999       if (RA.isMinSignedValue()) goto trivially_true;
   6000 
   6001       Pred = ICmpInst::ICMP_SGT;
   6002       RHS = getConstant(RA - 1);
   6003       Changed = true;
   6004       break;
   6005     case ICmpInst::ICMP_SLE:
   6006       if ((RA + 1).isMaxSignedValue()) {
   6007         Pred = ICmpInst::ICMP_NE;
   6008         RHS = getConstant(RA + 1);
   6009         Changed = true;
   6010         break;
   6011       }
   6012       if (RA.isMinSignedValue()) {
   6013         Pred = ICmpInst::ICMP_EQ;
   6014         Changed = true;
   6015         break;
   6016       }
   6017       if (RA.isMaxSignedValue()) goto trivially_true;
   6018 
   6019       Pred = ICmpInst::ICMP_SLT;
   6020       RHS = getConstant(RA + 1);
   6021       Changed = true;
   6022       break;
   6023     case ICmpInst::ICMP_UGT:
   6024       if (RA.isMinValue()) {
   6025         Pred = ICmpInst::ICMP_NE;
   6026         Changed = true;
   6027         break;
   6028       }
   6029       if ((RA + 1).isMaxValue()) {
   6030         Pred = ICmpInst::ICMP_EQ;
   6031         RHS = getConstant(RA + 1);
   6032         Changed = true;
   6033         break;
   6034       }
   6035       if (RA.isMaxValue()) goto trivially_false;
   6036       break;
   6037     case ICmpInst::ICMP_ULT:
   6038       if (RA.isMaxValue()) {
   6039         Pred = ICmpInst::ICMP_NE;
   6040         Changed = true;
   6041         break;
   6042       }
   6043       if ((RA - 1).isMinValue()) {
   6044         Pred = ICmpInst::ICMP_EQ;
   6045         RHS = getConstant(RA - 1);
   6046         Changed = true;
   6047         break;
   6048       }
   6049       if (RA.isMinValue()) goto trivially_false;
   6050       break;
   6051     case ICmpInst::ICMP_SGT:
   6052       if (RA.isMinSignedValue()) {
   6053         Pred = ICmpInst::ICMP_NE;
   6054         Changed = true;
   6055         break;
   6056       }
   6057       if ((RA + 1).isMaxSignedValue()) {
   6058         Pred = ICmpInst::ICMP_EQ;
   6059         RHS = getConstant(RA + 1);
   6060         Changed = true;
   6061         break;
   6062       }
   6063       if (RA.isMaxSignedValue()) goto trivially_false;
   6064       break;
   6065     case ICmpInst::ICMP_SLT:
   6066       if (RA.isMaxSignedValue()) {
   6067         Pred = ICmpInst::ICMP_NE;
   6068         Changed = true;
   6069         break;
   6070       }
   6071       if ((RA - 1).isMinSignedValue()) {
   6072        Pred = ICmpInst::ICMP_EQ;
   6073        RHS = getConstant(RA - 1);
   6074         Changed = true;
   6075        break;
   6076       }
   6077       if (RA.isMinSignedValue()) goto trivially_false;
   6078       break;
   6079     }
   6080   }
   6081 
   6082   // Check for obvious equality.
   6083   if (HasSameValue(LHS, RHS)) {
   6084     if (ICmpInst::isTrueWhenEqual(Pred))
   6085       goto trivially_true;
   6086     if (ICmpInst::isFalseWhenEqual(Pred))
   6087       goto trivially_false;
   6088   }
   6089 
   6090   // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by
   6091   // adding or subtracting 1 from one of the operands.
   6092   switch (Pred) {
   6093   case ICmpInst::ICMP_SLE:
   6094     if (!getSignedRange(RHS).getSignedMax().isMaxSignedValue()) {
   6095       RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS,
   6096                        SCEV::FlagNSW);
   6097       Pred = ICmpInst::ICMP_SLT;
   6098       Changed = true;
   6099     } else if (!getSignedRange(LHS).getSignedMin().isMinSignedValue()) {
   6100       LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS,
   6101                        SCEV::FlagNSW);
   6102       Pred = ICmpInst::ICMP_SLT;
   6103       Changed = true;
   6104     }
   6105     break;
   6106   case ICmpInst::ICMP_SGE:
   6107     if (!getSignedRange(RHS).getSignedMin().isMinSignedValue()) {
   6108       RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS,
   6109                        SCEV::FlagNSW);
   6110       Pred = ICmpInst::ICMP_SGT;
   6111       Changed = true;
   6112     } else if (!getSignedRange(LHS).getSignedMax().isMaxSignedValue()) {
   6113       LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS,
   6114                        SCEV::FlagNSW);
   6115       Pred = ICmpInst::ICMP_SGT;
   6116       Changed = true;
   6117     }
   6118     break;
   6119   case ICmpInst::ICMP_ULE:
   6120     if (!getUnsignedRange(RHS).getUnsignedMax().isMaxValue()) {
   6121       RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS,
   6122                        SCEV::FlagNUW);
   6123       Pred = ICmpInst::ICMP_ULT;
   6124       Changed = true;
   6125     } else if (!getUnsignedRange(LHS).getUnsignedMin().isMinValue()) {
   6126       LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS,
   6127                        SCEV::FlagNUW);
   6128       Pred = ICmpInst::ICMP_ULT;
   6129       Changed = true;
   6130     }
   6131     break;
   6132   case ICmpInst::ICMP_UGE:
   6133     if (!getUnsignedRange(RHS).getUnsignedMin().isMinValue()) {
   6134       RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS,
   6135                        SCEV::FlagNUW);
   6136       Pred = ICmpInst::ICMP_UGT;
   6137       Changed = true;
   6138     } else if (!getUnsignedRange(LHS).getUnsignedMax().isMaxValue()) {
   6139       LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS,
   6140                        SCEV::FlagNUW);
   6141       Pred = ICmpInst::ICMP_UGT;
   6142       Changed = true;
   6143     }
   6144     break;
   6145   default:
   6146     break;
   6147   }
   6148 
   6149   // TODO: More simplifications are possible here.
   6150 
   6151   // Recursively simplify until we either hit a recursion limit or nothing
   6152   // changes.
   6153   if (Changed)
   6154     return SimplifyICmpOperands(Pred, LHS, RHS, Depth+1);
   6155 
   6156   return Changed;
   6157 
   6158 trivially_true:
   6159   // Return 0 == 0.
   6160   LHS = RHS = getConstant(ConstantInt::getFalse(getContext()));
   6161   Pred = ICmpInst::ICMP_EQ;
   6162   return true;
   6163 
   6164 trivially_false:
   6165   // Return 0 != 0.
   6166   LHS = RHS = getConstant(ConstantInt::getFalse(getContext()));
   6167   Pred = ICmpInst::ICMP_NE;
   6168   return true;
   6169 }
   6170 
   6171 bool ScalarEvolution::isKnownNegative(const SCEV *S) {
   6172   return getSignedRange(S).getSignedMax().isNegative();
   6173 }
   6174 
   6175 bool ScalarEvolution::isKnownPositive(const SCEV *S) {
   6176   return getSignedRange(S).getSignedMin().isStrictlyPositive();
   6177 }
   6178 
   6179 bool ScalarEvolution::isKnownNonNegative(const SCEV *S) {
   6180   return !getSignedRange(S).getSignedMin().isNegative();
   6181 }
   6182 
   6183 bool ScalarEvolution::isKnownNonPositive(const SCEV *S) {
   6184   return !getSignedRange(S).getSignedMax().isStrictlyPositive();
   6185 }
   6186 
   6187 bool ScalarEvolution::isKnownNonZero(const SCEV *S) {
   6188   return isKnownNegative(S) || isKnownPositive(S);
   6189 }
   6190 
   6191 bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred,
   6192                                        const SCEV *LHS, const SCEV *RHS) {
   6193   // Canonicalize the inputs first.
   6194   (void)SimplifyICmpOperands(Pred, LHS, RHS);
   6195 
   6196   // If LHS or RHS is an addrec, check to see if the condition is true in
   6197   // every iteration of the loop.
   6198   // If LHS and RHS are both addrec, both conditions must be true in
   6199   // every iteration of the loop.
   6200   const SCEVAddRecExpr *LAR = dyn_cast<SCEVAddRecExpr>(LHS);
   6201   const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS);
   6202   bool LeftGuarded = false;
   6203   bool RightGuarded = false;
   6204   if (LAR) {
   6205     const Loop *L = LAR->getLoop();
   6206     if (isLoopEntryGuardedByCond(L, Pred, LAR->getStart(), RHS) &&
   6207         isLoopBackedgeGuardedByCond(L, Pred, LAR->getPostIncExpr(*this), RHS)) {
   6208       if (!RAR) return true;
   6209       LeftGuarded = true;
   6210     }
   6211   }
   6212   if (RAR) {
   6213     const Loop *L = RAR->getLoop();
   6214     if (isLoopEntryGuardedByCond(L, Pred, LHS, RAR->getStart()) &&
   6215         isLoopBackedgeGuardedByCond(L, Pred, LHS, RAR->getPostIncExpr(*this))) {
   6216       if (!LAR) return true;
   6217       RightGuarded = true;
   6218     }
   6219   }
   6220   if (LeftGuarded && RightGuarded)
   6221     return true;
   6222 
   6223   // Otherwise see what can be done with known constant ranges.
   6224   return isKnownPredicateWithRanges(Pred, LHS, RHS);
   6225 }
   6226 
   6227 bool
   6228 ScalarEvolution::isKnownPredicateWithRanges(ICmpInst::Predicate Pred,
   6229                                             const SCEV *LHS, const SCEV *RHS) {
   6230   if (HasSameValue(LHS, RHS))
   6231     return ICmpInst::isTrueWhenEqual(Pred);
   6232 
   6233   // This code is split out from isKnownPredicate because it is called from
   6234   // within isLoopEntryGuardedByCond.
   6235   switch (Pred) {
   6236   default:
   6237     llvm_unreachable("Unexpected ICmpInst::Predicate value!");
   6238   case ICmpInst::ICMP_SGT:
   6239     std::swap(LHS, RHS);
   6240   case ICmpInst::ICMP_SLT: {
   6241     ConstantRange LHSRange = getSignedRange(LHS);
   6242     ConstantRange RHSRange = getSignedRange(RHS);
   6243     if (LHSRange.getSignedMax().slt(RHSRange.getSignedMin()))
   6244       return true;
   6245     if (LHSRange.getSignedMin().sge(RHSRange.getSignedMax()))
   6246       return false;
   6247     break;
   6248   }
   6249   case ICmpInst::ICMP_SGE:
   6250     std::swap(LHS, RHS);
   6251   case ICmpInst::ICMP_SLE: {
   6252     ConstantRange LHSRange = getSignedRange(LHS);
   6253     ConstantRange RHSRange = getSignedRange(RHS);
   6254     if (LHSRange.getSignedMax().sle(RHSRange.getSignedMin()))
   6255       return true;
   6256     if (LHSRange.getSignedMin().sgt(RHSRange.getSignedMax()))
   6257       return false;
   6258     break;
   6259   }
   6260   case ICmpInst::ICMP_UGT:
   6261     std::swap(LHS, RHS);
   6262   case ICmpInst::ICMP_ULT: {
   6263     ConstantRange LHSRange = getUnsignedRange(LHS);
   6264     ConstantRange RHSRange = getUnsignedRange(RHS);
   6265     if (LHSRange.getUnsignedMax().ult(RHSRange.getUnsignedMin()))
   6266       return true;
   6267     if (LHSRange.getUnsignedMin().uge(RHSRange.getUnsignedMax()))
   6268       return false;
   6269     break;
   6270   }
   6271   case ICmpInst::ICMP_UGE:
   6272     std::swap(LHS, RHS);
   6273   case ICmpInst::ICMP_ULE: {
   6274     ConstantRange LHSRange = getUnsignedRange(LHS);
   6275     ConstantRange RHSRange = getUnsignedRange(RHS);
   6276     if (LHSRange.getUnsignedMax().ule(RHSRange.getUnsignedMin()))
   6277       return true;
   6278     if (LHSRange.getUnsignedMin().ugt(RHSRange.getUnsignedMax()))
   6279       return false;
   6280     break;
   6281   }
   6282   case ICmpInst::ICMP_NE: {
   6283     if (getUnsignedRange(LHS).intersectWith(getUnsignedRange(RHS)).isEmptySet())
   6284       return true;
   6285     if (getSignedRange(LHS).intersectWith(getSignedRange(RHS)).isEmptySet())
   6286       return true;
   6287 
   6288     const SCEV *Diff = getMinusSCEV(LHS, RHS);
   6289     if (isKnownNonZero(Diff))
   6290       return true;
   6291     break;
   6292   }
   6293   case ICmpInst::ICMP_EQ:
   6294     // The check at the top of the function catches the case where
   6295     // the values are known to be equal.
   6296     break;
   6297   }
   6298   return false;
   6299 }
   6300 
   6301 /// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is
   6302 /// protected by a conditional between LHS and RHS.  This is used to
   6303 /// to eliminate casts.
   6304 bool
   6305 ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L,
   6306                                              ICmpInst::Predicate Pred,
   6307                                              const SCEV *LHS, const SCEV *RHS) {
   6308   // Interpret a null as meaning no loop, where there is obviously no guard
   6309   // (interprocedural conditions notwithstanding).
   6310   if (!L) return true;
   6311 
   6312   BasicBlock *Latch = L->getLoopLatch();
   6313   if (!Latch)
   6314     return false;
   6315 
   6316   BranchInst *LoopContinuePredicate =
   6317     dyn_cast<BranchInst>(Latch->getTerminator());
   6318   if (!LoopContinuePredicate ||
   6319       LoopContinuePredicate->isUnconditional())
   6320     return false;
   6321 
   6322   return isImpliedCond(Pred, LHS, RHS,
   6323                        LoopContinuePredicate->getCondition(),
   6324                        LoopContinuePredicate->getSuccessor(0) != L->getHeader());
   6325 }
   6326 
   6327 /// isLoopEntryGuardedByCond - Test whether entry to the loop is protected
   6328 /// by a conditional between LHS and RHS.  This is used to help avoid max
   6329 /// expressions in loop trip counts, and to eliminate casts.
   6330 bool
   6331 ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L,
   6332                                           ICmpInst::Predicate Pred,
   6333                                           const SCEV *LHS, const SCEV *RHS) {
   6334   // Interpret a null as meaning no loop, where there is obviously no guard
   6335   // (interprocedural conditions notwithstanding).
   6336   if (!L) return false;
   6337 
   6338   // Starting at the loop predecessor, climb up the predecessor chain, as long
   6339   // as there are predecessors that can be found that have unique successors
   6340   // leading to the original header.
   6341   for (std::pair<BasicBlock *, BasicBlock *>
   6342          Pair(L->getLoopPredecessor(), L->getHeader());
   6343        Pair.first;
   6344        Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) {
   6345 
   6346     BranchInst *LoopEntryPredicate =
   6347       dyn_cast<BranchInst>(Pair.first->getTerminator());
   6348     if (!LoopEntryPredicate ||
   6349         LoopEntryPredicate->isUnconditional())
   6350       continue;
   6351 
   6352     if (isImpliedCond(Pred, LHS, RHS,
   6353                       LoopEntryPredicate->getCondition(),
   6354                       LoopEntryPredicate->getSuccessor(0) != Pair.second))
   6355       return true;
   6356   }
   6357 
   6358   return false;
   6359 }
   6360 
   6361 /// RAII wrapper to prevent recursive application of isImpliedCond.
   6362 /// ScalarEvolution's PendingLoopPredicates set must be empty unless we are
   6363 /// currently evaluating isImpliedCond.
   6364 struct MarkPendingLoopPredicate {
   6365   Value *Cond;
   6366   DenseSet<Value*> &LoopPreds;
   6367   bool Pending;
   6368 
   6369   MarkPendingLoopPredicate(Value *C, DenseSet<Value*> &LP)
   6370     : Cond(C), LoopPreds(LP) {
   6371     Pending = !LoopPreds.insert(Cond).second;
   6372   }
   6373   ~MarkPendingLoopPredicate() {
   6374     if (!Pending)
   6375       LoopPreds.erase(Cond);
   6376   }
   6377 };
   6378 
   6379 /// isImpliedCond - Test whether the condition described by Pred, LHS,
   6380 /// and RHS is true whenever the given Cond value evaluates to true.
   6381 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred,
   6382                                     const SCEV *LHS, const SCEV *RHS,
   6383                                     Value *FoundCondValue,
   6384                                     bool Inverse) {
   6385   MarkPendingLoopPredicate Mark(FoundCondValue, PendingLoopPredicates);
   6386   if (Mark.Pending)
   6387     return false;
   6388 
   6389   // Recursively handle And and Or conditions.
   6390   if (BinaryOperator *BO = dyn_cast<BinaryOperator>(FoundCondValue)) {
   6391     if (BO->getOpcode() == Instruction::And) {
   6392       if (!Inverse)
   6393         return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) ||
   6394                isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse);
   6395     } else if (BO->getOpcode() == Instruction::Or) {
   6396       if (Inverse)
   6397         return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) ||
   6398                isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse);
   6399     }
   6400   }
   6401 
   6402   ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue);
   6403   if (!ICI) return false;
   6404 
   6405   // Bail if the ICmp's operands' types are wider than the needed type
   6406   // before attempting to call getSCEV on them. This avoids infinite
   6407   // recursion, since the analysis of widening casts can require loop
   6408   // exit condition information for overflow checking, which would
   6409   // lead back here.
   6410   if (getTypeSizeInBits(LHS->getType()) <
   6411       getTypeSizeInBits(ICI->getOperand(0)->getType()))
   6412     return false;
   6413 
   6414   // Now that we found a conditional branch that dominates the loop or controls
   6415   // the loop latch. Check to see if it is the comparison we are looking for.
   6416   ICmpInst::Predicate FoundPred;
   6417   if (Inverse)
   6418     FoundPred = ICI->getInversePredicate();
   6419   else
   6420     FoundPred = ICI->getPredicate();
   6421 
   6422   const SCEV *FoundLHS = getSCEV(ICI->getOperand(0));
   6423   const SCEV *FoundRHS = getSCEV(ICI->getOperand(1));
   6424 
   6425   // Balance the types. The case where FoundLHS' type is wider than
   6426   // LHS' type is checked for above.
   6427   if (getTypeSizeInBits(LHS->getType()) >
   6428       getTypeSizeInBits(FoundLHS->getType())) {
   6429     if (CmpInst::isSigned(FoundPred)) {
   6430       FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType());
   6431       FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType());
   6432     } else {
   6433       FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType());
   6434       FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType());
   6435     }
   6436   }
   6437 
   6438   // Canonicalize the query to match the way instcombine will have
   6439   // canonicalized the comparison.
   6440   if (SimplifyICmpOperands(Pred, LHS, RHS))
   6441     if (LHS == RHS)
   6442       return CmpInst::isTrueWhenEqual(Pred);
   6443   if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS))
   6444     if (FoundLHS == FoundRHS)
   6445       return CmpInst::isFalseWhenEqual(FoundPred);
   6446 
   6447   // Check to see if we can make the LHS or RHS match.
   6448   if (LHS == FoundRHS || RHS == FoundLHS) {
   6449     if (isa<SCEVConstant>(RHS)) {
   6450       std::swap(FoundLHS, FoundRHS);
   6451       FoundPred = ICmpInst::getSwappedPredicate(FoundPred);
   6452     } else {
   6453       std::swap(LHS, RHS);
   6454       Pred = ICmpInst::getSwappedPredicate(Pred);
   6455     }
   6456   }
   6457 
   6458   // Check whether the found predicate is the same as the desired predicate.
   6459   if (FoundPred == Pred)
   6460     return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS);
   6461 
   6462   // Check whether swapping the found predicate makes it the same as the
   6463   // desired predicate.
   6464   if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) {
   6465     if (isa<SCEVConstant>(RHS))
   6466       return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS);
   6467     else
   6468       return isImpliedCondOperands(ICmpInst::getSwappedPredicate(Pred),
   6469                                    RHS, LHS, FoundLHS, FoundRHS);
   6470   }
   6471 
   6472   // Check whether the actual condition is beyond sufficient.
   6473   if (FoundPred == ICmpInst::ICMP_EQ)
   6474     if (ICmpInst::isTrueWhenEqual(Pred))
   6475       if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS))
   6476         return true;
   6477   if (Pred == ICmpInst::ICMP_NE)
   6478     if (!ICmpInst::isTrueWhenEqual(FoundPred))
   6479       if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS))
   6480         return true;
   6481 
   6482   // Otherwise assume the worst.
   6483   return false;
   6484 }
   6485 
   6486 /// isImpliedCondOperands - Test whether the condition described by Pred,
   6487 /// LHS, and RHS is true whenever the condition described by Pred, FoundLHS,
   6488 /// and FoundRHS is true.
   6489 bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred,
   6490                                             const SCEV *LHS, const SCEV *RHS,
   6491                                             const SCEV *FoundLHS,
   6492                                             const SCEV *FoundRHS) {
   6493   return isImpliedCondOperandsHelper(Pred, LHS, RHS,
   6494                                      FoundLHS, FoundRHS) ||
   6495          // ~x < ~y --> x > y
   6496          isImpliedCondOperandsHelper(Pred, LHS, RHS,
   6497                                      getNotSCEV(FoundRHS),
   6498                                      getNotSCEV(FoundLHS));
   6499 }
   6500 
   6501 /// isImpliedCondOperandsHelper - Test whether the condition described by
   6502 /// Pred, LHS, and RHS is true whenever the condition described by Pred,
   6503 /// FoundLHS, and FoundRHS is true.
   6504 bool
   6505 ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred,
   6506                                              const SCEV *LHS, const SCEV *RHS,
   6507                                              const SCEV *FoundLHS,
   6508                                              const SCEV *FoundRHS) {
   6509   switch (Pred) {
   6510   default: llvm_unreachable("Unexpected ICmpInst::Predicate value!");
   6511   case ICmpInst::ICMP_EQ:
   6512   case ICmpInst::ICMP_NE:
   6513     if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS))
   6514       return true;
   6515     break;
   6516   case ICmpInst::ICMP_SLT:
   6517   case ICmpInst::ICMP_SLE:
   6518     if (isKnownPredicateWithRanges(ICmpInst::ICMP_SLE, LHS, FoundLHS) &&
   6519         isKnownPredicateWithRanges(ICmpInst::ICMP_SGE, RHS, FoundRHS))
   6520       return true;
   6521     break;
   6522   case ICmpInst::ICMP_SGT:
   6523   case ICmpInst::ICMP_SGE:
   6524     if (isKnownPredicateWithRanges(ICmpInst::ICMP_SGE, LHS, FoundLHS) &&
   6525         isKnownPredicateWithRanges(ICmpInst::ICMP_SLE, RHS, FoundRHS))
   6526       return true;
   6527     break;
   6528   case ICmpInst::ICMP_ULT:
   6529   case ICmpInst::ICMP_ULE:
   6530     if (isKnownPredicateWithRanges(ICmpInst::ICMP_ULE, LHS, FoundLHS) &&
   6531         isKnownPredicateWithRanges(ICmpInst::ICMP_UGE, RHS, FoundRHS))
   6532       return true;
   6533     break;
   6534   case ICmpInst::ICMP_UGT:
   6535   case ICmpInst::ICMP_UGE:
   6536     if (isKnownPredicateWithRanges(ICmpInst::ICMP_UGE, LHS, FoundLHS) &&
   6537         isKnownPredicateWithRanges(ICmpInst::ICMP_ULE, RHS, FoundRHS))
   6538       return true;
   6539     break;
   6540   }
   6541 
   6542   return false;
   6543 }
   6544 
   6545 // Verify if an linear IV with positive stride can overflow when in a
   6546 // less-than comparison, knowing the invariant term of the comparison, the
   6547 // stride and the knowledge of NSW/NUW flags on the recurrence.
   6548 bool ScalarEvolution::doesIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride,
   6549                                          bool IsSigned, bool NoWrap) {
   6550   if (NoWrap) return false;
   6551 
   6552   unsigned BitWidth = getTypeSizeInBits(RHS->getType());
   6553   const SCEV *One = getConstant(Stride->getType(), 1);
   6554 
   6555   if (IsSigned) {
   6556     APInt MaxRHS = getSignedRange(RHS).getSignedMax();
   6557     APInt MaxValue = APInt::getSignedMaxValue(BitWidth);
   6558     APInt MaxStrideMinusOne = getSignedRange(getMinusSCEV(Stride, One))
   6559                                 .getSignedMax();
   6560 
   6561     // SMaxRHS + SMaxStrideMinusOne > SMaxValue => overflow!
   6562     return (MaxValue - MaxStrideMinusOne).slt(MaxRHS);
   6563   }
   6564 
   6565   APInt MaxRHS = getUnsignedRange(RHS).getUnsignedMax();
   6566   APInt MaxValue = APInt::getMaxValue(BitWidth);
   6567   APInt MaxStrideMinusOne = getUnsignedRange(getMinusSCEV(Stride, One))
   6568                               .getUnsignedMax();
   6569 
   6570   // UMaxRHS + UMaxStrideMinusOne > UMaxValue => overflow!
   6571   return (MaxValue - MaxStrideMinusOne).ult(MaxRHS);
   6572 }
   6573 
   6574 // Verify if an linear IV with negative stride can overflow when in a
   6575 // greater-than comparison, knowing the invariant term of the comparison,
   6576 // the stride and the knowledge of NSW/NUW flags on the recurrence.
   6577 bool ScalarEvolution::doesIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride,
   6578                                          bool IsSigned, bool NoWrap) {
   6579   if (NoWrap) return false;
   6580 
   6581   unsigned BitWidth = getTypeSizeInBits(RHS->getType());
   6582   const SCEV *One = getConstant(Stride->getType(), 1);
   6583 
   6584   if (IsSigned) {
   6585     APInt MinRHS = getSignedRange(RHS).getSignedMin();
   6586     APInt MinValue = APInt::getSignedMinValue(BitWidth);
   6587     APInt MaxStrideMinusOne = getSignedRange(getMinusSCEV(Stride, One))
   6588                                .getSignedMax();
   6589 
   6590     // SMinRHS - SMaxStrideMinusOne < SMinValue => overflow!
   6591     return (MinValue + MaxStrideMinusOne).sgt(MinRHS);
   6592   }
   6593 
   6594   APInt MinRHS = getUnsignedRange(RHS).getUnsignedMin();
   6595   APInt MinValue = APInt::getMinValue(BitWidth);
   6596   APInt MaxStrideMinusOne = getUnsignedRange(getMinusSCEV(Stride, One))
   6597                             .getUnsignedMax();
   6598 
   6599   // UMinRHS - UMaxStrideMinusOne < UMinValue => overflow!
   6600   return (MinValue + MaxStrideMinusOne).ugt(MinRHS);
   6601 }
   6602 
   6603 // Compute the backedge taken count knowing the interval difference, the
   6604 // stride and presence of the equality in the comparison.
   6605 const SCEV *ScalarEvolution::computeBECount(const SCEV *Delta, const SCEV *Step,
   6606                                             bool Equality) {
   6607   const SCEV *One = getConstant(Step->getType(), 1);
   6608   Delta = Equality ? getAddExpr(Delta, Step)
   6609                    : getAddExpr(Delta, getMinusSCEV(Step, One));
   6610   return getUDivExpr(Delta, Step);
   6611 }
   6612 
   6613 /// HowManyLessThans - Return the number of times a backedge containing the
   6614 /// specified less-than comparison will execute.  If not computable, return
   6615 /// CouldNotCompute.
   6616 ///
   6617 /// @param IsSubExpr is true when the LHS < RHS condition does not directly
   6618 /// control the branch. In this case, we can only compute an iteration count for
   6619 /// a subexpression that cannot overflow before evaluating true.
   6620 ScalarEvolution::ExitLimit
   6621 ScalarEvolution::HowManyLessThans(const SCEV *LHS, const SCEV *RHS,
   6622                                   const Loop *L, bool IsSigned,
   6623                                   bool IsSubExpr) {
   6624   // We handle only IV < Invariant
   6625   if (!isLoopInvariant(RHS, L))
   6626     return getCouldNotCompute();
   6627 
   6628   const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS);
   6629 
   6630   // Avoid weird loops
   6631   if (!IV || IV->getLoop() != L || !IV->isAffine())
   6632     return getCouldNotCompute();
   6633 
   6634   bool NoWrap = !IsSubExpr &&
   6635                 IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW);
   6636 
   6637   const SCEV *Stride = IV->getStepRecurrence(*this);
   6638 
   6639   // Avoid negative or zero stride values
   6640   if (!isKnownPositive(Stride))
   6641     return getCouldNotCompute();
   6642 
   6643   // Avoid proven overflow cases: this will ensure that the backedge taken count
   6644   // will not generate any unsigned overflow. Relaxed no-overflow conditions
   6645   // exploit NoWrapFlags, allowing to optimize in presence of undefined
   6646   // behaviors like the case of C language.
   6647   if (!Stride->isOne() && doesIVOverflowOnLT(RHS, Stride, IsSigned, NoWrap))
   6648     return getCouldNotCompute();
   6649 
   6650   ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SLT
   6651                                       : ICmpInst::ICMP_ULT;
   6652   const SCEV *Start = IV->getStart();
   6653   const SCEV *End = RHS;
   6654   if (!isLoopEntryGuardedByCond(L, Cond, getMinusSCEV(Start, Stride), RHS))
   6655     End = IsSigned ? getSMaxExpr(RHS, Start)
   6656                    : getUMaxExpr(RHS, Start);
   6657 
   6658   const SCEV *BECount = computeBECount(getMinusSCEV(End, Start), Stride, false);
   6659 
   6660   APInt MinStart = IsSigned ? getSignedRange(Start).getSignedMin()
   6661                             : getUnsignedRange(Start).getUnsignedMin();
   6662 
   6663   APInt MinStride = IsSigned ? getSignedRange(Stride).getSignedMin()
   6664                              : getUnsignedRange(Stride).getUnsignedMin();
   6665 
   6666   unsigned BitWidth = getTypeSizeInBits(LHS->getType());
   6667   APInt Limit = IsSigned ? APInt::getSignedMaxValue(BitWidth) - (MinStride - 1)
   6668                          : APInt::getMaxValue(BitWidth) - (MinStride - 1);
   6669 
   6670   // Although End can be a MAX expression we estimate MaxEnd considering only
   6671   // the case End = RHS. This is safe because in the other case (End - Start)
   6672   // is zero, leading to a zero maximum backedge taken count.
   6673   APInt MaxEnd =
   6674     IsSigned ? APIntOps::smin(getSignedRange(RHS).getSignedMax(), Limit)
   6675              : APIntOps::umin(getUnsignedRange(RHS).getUnsignedMax(), Limit);
   6676 
   6677   const SCEV *MaxBECount;
   6678   if (isa<SCEVConstant>(BECount))
   6679     MaxBECount = BECount;
   6680   else
   6681     MaxBECount = computeBECount(getConstant(MaxEnd - MinStart),
   6682                                 getConstant(MinStride), false);
   6683 
   6684   if (isa<SCEVCouldNotCompute>(MaxBECount))
   6685     MaxBECount = BECount;
   6686 
   6687   return ExitLimit(BECount, MaxBECount, /*MustExit=*/true);
   6688 }
   6689 
   6690 ScalarEvolution::ExitLimit
   6691 ScalarEvolution::HowManyGreaterThans(const SCEV *LHS, const SCEV *RHS,
   6692                                      const Loop *L, bool IsSigned,
   6693                                      bool IsSubExpr) {
   6694   // We handle only IV > Invariant
   6695   if (!isLoopInvariant(RHS, L))
   6696     return getCouldNotCompute();
   6697 
   6698   const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS);
   6699 
   6700   // Avoid weird loops
   6701   if (!IV || IV->getLoop() != L || !IV->isAffine())
   6702     return getCouldNotCompute();
   6703 
   6704   bool NoWrap = !IsSubExpr &&
   6705                 IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW);
   6706 
   6707   const SCEV *Stride = getNegativeSCEV(IV->getStepRecurrence(*this));
   6708 
   6709   // Avoid negative or zero stride values
   6710   if (!isKnownPositive(Stride))
   6711     return getCouldNotCompute();
   6712 
   6713   // Avoid proven overflow cases: this will ensure that the backedge taken count
   6714   // will not generate any unsigned overflow. Relaxed no-overflow conditions
   6715   // exploit NoWrapFlags, allowing to optimize in presence of undefined
   6716   // behaviors like the case of C language.
   6717   if (!Stride->isOne() && doesIVOverflowOnGT(RHS, Stride, IsSigned, NoWrap))
   6718     return getCouldNotCompute();
   6719 
   6720   ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SGT
   6721                                       : ICmpInst::ICMP_UGT;
   6722 
   6723   const SCEV *Start = IV->getStart();
   6724   const SCEV *End = RHS;
   6725   if (!isLoopEntryGuardedByCond(L, Cond, getAddExpr(Start, Stride), RHS))
   6726     End = IsSigned ? getSMinExpr(RHS, Start)
   6727                    : getUMinExpr(RHS, Start);
   6728 
   6729   const SCEV *BECount = computeBECount(getMinusSCEV(Start, End), Stride, false);
   6730 
   6731   APInt MaxStart = IsSigned ? getSignedRange(Start).getSignedMax()
   6732                             : getUnsignedRange(Start).getUnsignedMax();
   6733 
   6734   APInt MinStride = IsSigned ? getSignedRange(Stride).getSignedMin()
   6735                              : getUnsignedRange(Stride).getUnsignedMin();
   6736 
   6737   unsigned BitWidth = getTypeSizeInBits(LHS->getType());
   6738   APInt Limit = IsSigned ? APInt::getSignedMinValue(BitWidth) + (MinStride - 1)
   6739                          : APInt::getMinValue(BitWidth) + (MinStride - 1);
   6740 
   6741   // Although End can be a MIN expression we estimate MinEnd considering only
   6742   // the case End = RHS. This is safe because in the other case (Start - End)
   6743   // is zero, leading to a zero maximum backedge taken count.
   6744   APInt MinEnd =
   6745     IsSigned ? APIntOps::smax(getSignedRange(RHS).getSignedMin(), Limit)
   6746              : APIntOps::umax(getUnsignedRange(RHS).getUnsignedMin(), Limit);
   6747 
   6748 
   6749   const SCEV *MaxBECount = getCouldNotCompute();
   6750   if (isa<SCEVConstant>(BECount))
   6751     MaxBECount = BECount;
   6752   else
   6753     MaxBECount = computeBECount(getConstant(MaxStart - MinEnd),
   6754                                 getConstant(MinStride), false);
   6755 
   6756   if (isa<SCEVCouldNotCompute>(MaxBECount))
   6757     MaxBECount = BECount;
   6758 
   6759   return ExitLimit(BECount, MaxBECount, /*MustExit=*/true);
   6760 }
   6761 
   6762 /// getNumIterationsInRange - Return the number of iterations of this loop that
   6763 /// produce values in the specified constant range.  Another way of looking at
   6764 /// this is that it returns the first iteration number where the value is not in
   6765 /// the condition, thus computing the exit count. If the iteration count can't
   6766 /// be computed, an instance of SCEVCouldNotCompute is returned.
   6767 const SCEV *SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range,
   6768                                                     ScalarEvolution &SE) const {
   6769   if (Range.isFullSet())  // Infinite loop.
   6770     return SE.getCouldNotCompute();
   6771 
   6772   // If the start is a non-zero constant, shift the range to simplify things.
   6773   if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart()))
   6774     if (!SC->getValue()->isZero()) {
   6775       SmallVector<const SCEV *, 4> Operands(op_begin(), op_end());
   6776       Operands[0] = SE.getConstant(SC->getType(), 0);
   6777       const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop(),
   6778                                              getNoWrapFlags(FlagNW));
   6779       if (const SCEVAddRecExpr *ShiftedAddRec =
   6780             dyn_cast<SCEVAddRecExpr>(Shifted))
   6781         return ShiftedAddRec->getNumIterationsInRange(
   6782                            Range.subtract(SC->getValue()->getValue()), SE);
   6783       // This is strange and shouldn't happen.
   6784       return SE.getCouldNotCompute();
   6785     }
   6786 
   6787   // The only time we can solve this is when we have all constant indices.
   6788   // Otherwise, we cannot determine the overflow conditions.
   6789   for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
   6790     if (!isa<SCEVConstant>(getOperand(i)))
   6791       return SE.getCouldNotCompute();
   6792 
   6793 
   6794   // Okay at this point we know that all elements of the chrec are constants and
   6795   // that the start element is zero.
   6796 
   6797   // First check to see if the range contains zero.  If not, the first
   6798   // iteration exits.
   6799   unsigned BitWidth = SE.getTypeSizeInBits(getType());
   6800   if (!Range.contains(APInt(BitWidth, 0)))
   6801     return SE.getConstant(getType(), 0);
   6802 
   6803   if (isAffine()) {
   6804     // If this is an affine expression then we have this situation:
   6805     //   Solve {0,+,A} in Range  ===  Ax in Range
   6806 
   6807     // We know that zero is in the range.  If A is positive then we know that
   6808     // the upper value of the range must be the first possible exit value.
   6809     // If A is negative then the lower of the range is the last possible loop
   6810     // value.  Also note that we already checked for a full range.
   6811     APInt One(BitWidth,1);
   6812     APInt A     = cast<SCEVConstant>(getOperand(1))->getValue()->getValue();
   6813     APInt End = A.sge(One) ? (Range.getUpper() - One) : Range.getLower();
   6814 
   6815     // The exit value should be (End+A)/A.
   6816     APInt ExitVal = (End + A).udiv(A);
   6817     ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal);
   6818 
   6819     // Evaluate at the exit value.  If we really did fall out of the valid
   6820     // range, then we computed our trip count, otherwise wrap around or other
   6821     // things must have happened.
   6822     ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE);
   6823     if (Range.contains(Val->getValue()))
   6824       return SE.getCouldNotCompute();  // Something strange happened
   6825 
   6826     // Ensure that the previous value is in the range.  This is a sanity check.
   6827     assert(Range.contains(
   6828            EvaluateConstantChrecAtConstant(this,
   6829            ConstantInt::get(SE.getContext(), ExitVal - One), SE)->getValue()) &&
   6830            "Linear scev computation is off in a bad way!");
   6831     return SE.getConstant(ExitValue);
   6832   } else if (isQuadratic()) {
   6833     // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of the
   6834     // quadratic equation to solve it.  To do this, we must frame our problem in
   6835     // terms of figuring out when zero is crossed, instead of when
   6836     // Range.getUpper() is crossed.
   6837     SmallVector<const SCEV *, 4> NewOps(op_begin(), op_end());
   6838     NewOps[0] = SE.getNegativeSCEV(SE.getConstant(Range.getUpper()));
   6839     const SCEV *NewAddRec = SE.getAddRecExpr(NewOps, getLoop(),
   6840                                              // getNoWrapFlags(FlagNW)
   6841                                              FlagAnyWrap);
   6842 
   6843     // Next, solve the constructed addrec
   6844     std::pair<const SCEV *,const SCEV *> Roots =
   6845       SolveQuadraticEquation(cast<SCEVAddRecExpr>(NewAddRec), SE);
   6846     const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first);
   6847     const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second);
   6848     if (R1) {
   6849       // Pick the smallest positive root value.
   6850       if (ConstantInt *CB =
   6851           dyn_cast<ConstantInt>(ConstantExpr::getICmp(ICmpInst::ICMP_ULT,
   6852                          R1->getValue(), R2->getValue()))) {
   6853         if (CB->getZExtValue() == false)
   6854           std::swap(R1, R2);   // R1 is the minimum root now.
   6855 
   6856         // Make sure the root is not off by one.  The returned iteration should
   6857         // not be in the range, but the previous one should be.  When solving
   6858         // for "X*X < 5", for example, we should not return a root of 2.
   6859         ConstantInt *R1Val = EvaluateConstantChrecAtConstant(this,
   6860                                                              R1->getValue(),
   6861                                                              SE);
   6862         if (Range.contains(R1Val->getValue())) {
   6863           // The next iteration must be out of the range...
   6864           ConstantInt *NextVal =
   6865                 ConstantInt::get(SE.getContext(), R1->getValue()->getValue()+1);
   6866 
   6867           R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE);
   6868           if (!Range.contains(R1Val->getValue()))
   6869             return SE.getConstant(NextVal);
   6870           return SE.getCouldNotCompute();  // Something strange happened
   6871         }
   6872 
   6873         // If R1 was not in the range, then it is a good return value.  Make
   6874         // sure that R1-1 WAS in the range though, just in case.
   6875         ConstantInt *NextVal =
   6876                ConstantInt::get(SE.getContext(), R1->getValue()->getValue()-1);
   6877         R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE);
   6878         if (Range.contains(R1Val->getValue()))
   6879           return R1;
   6880         return SE.getCouldNotCompute();  // Something strange happened
   6881       }
   6882     }
   6883   }
   6884 
   6885   return SE.getCouldNotCompute();
   6886 }
   6887 
   6888 namespace {
   6889 struct FindUndefs {
   6890   bool Found;
   6891   FindUndefs() : Found(false) {}
   6892 
   6893   bool follow(const SCEV *S) {
   6894     if (const SCEVUnknown *C = dyn_cast<SCEVUnknown>(S)) {
   6895       if (isa<UndefValue>(C->getValue()))
   6896         Found = true;
   6897     } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) {
   6898       if (isa<UndefValue>(C->getValue()))
   6899         Found = true;
   6900     }
   6901 
   6902     // Keep looking if we haven't found it yet.
   6903     return !Found;
   6904   }
   6905   bool isDone() const {
   6906     // Stop recursion if we have found an undef.
   6907     return Found;
   6908   }
   6909 };
   6910 }
   6911 
   6912 // Return true when S contains at least an undef value.
   6913 static inline bool
   6914 containsUndefs(const SCEV *S) {
   6915   FindUndefs F;
   6916   SCEVTraversal<FindUndefs> ST(F);
   6917   ST.visitAll(S);
   6918 
   6919   return F.Found;
   6920 }
   6921 
   6922 namespace {
   6923 // Collect all steps of SCEV expressions.
   6924 struct SCEVCollectStrides {
   6925   ScalarEvolution &SE;
   6926   SmallVectorImpl<const SCEV *> &Strides;
   6927 
   6928   SCEVCollectStrides(ScalarEvolution &SE, SmallVectorImpl<const SCEV *> &S)
   6929       : SE(SE), Strides(S) {}
   6930 
   6931   bool follow(const SCEV *S) {
   6932     if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S))
   6933       Strides.push_back(AR->getStepRecurrence(SE));
   6934     return true;
   6935   }
   6936   bool isDone() const { return false; }
   6937 };
   6938 
   6939 // Collect all SCEVUnknown and SCEVMulExpr expressions.
   6940 struct SCEVCollectTerms {
   6941   SmallVectorImpl<const SCEV *> &Terms;
   6942 
   6943   SCEVCollectTerms(SmallVectorImpl<const SCEV *> &T)
   6944       : Terms(T) {}
   6945 
   6946   bool follow(const SCEV *S) {
   6947     if (isa<SCEVUnknown>(S) || isa<SCEVMulExpr>(S)) {
   6948       if (!containsUndefs(S))
   6949         Terms.push_back(S);
   6950 
   6951       // Stop recursion: once we collected a term, do not walk its operands.
   6952       return false;
   6953     }
   6954 
   6955     // Keep looking.
   6956     return true;
   6957   }
   6958   bool isDone() const { return false; }
   6959 };
   6960 }
   6961 
   6962 /// Find parametric terms in this SCEVAddRecExpr.
   6963 void SCEVAddRecExpr::collectParametricTerms(
   6964     ScalarEvolution &SE, SmallVectorImpl<const SCEV *> &Terms) const {
   6965   SmallVector<const SCEV *, 4> Strides;
   6966   SCEVCollectStrides StrideCollector(SE, Strides);
   6967   visitAll(this, StrideCollector);
   6968 
   6969   DEBUG({
   6970       dbgs() << "Strides:\n";
   6971       for (const SCEV *S : Strides)
   6972         dbgs() << *S << "\n";
   6973     });
   6974 
   6975   for (const SCEV *S : Strides) {
   6976     SCEVCollectTerms TermCollector(Terms);
   6977     visitAll(S, TermCollector);
   6978   }
   6979 
   6980   DEBUG({
   6981       dbgs() << "Terms:\n";
   6982       for (const SCEV *T : Terms)
   6983         dbgs() << *T << "\n";
   6984     });
   6985 }
   6986 
   6987 static const APInt srem(const SCEVConstant *C1, const SCEVConstant *C2) {
   6988   APInt A = C1->getValue()->getValue();
   6989   APInt B = C2->getValue()->getValue();
   6990   uint32_t ABW = A.getBitWidth();
   6991   uint32_t BBW = B.getBitWidth();
   6992 
   6993   if (ABW > BBW)
   6994     B = B.sext(ABW);
   6995   else if (ABW < BBW)
   6996     A = A.sext(BBW);
   6997 
   6998   return APIntOps::srem(A, B);
   6999 }
   7000 
   7001 static const APInt sdiv(const SCEVConstant *C1, const SCEVConstant *C2) {
   7002   APInt A = C1->getValue()->getValue();
   7003   APInt B = C2->getValue()->getValue();
   7004   uint32_t ABW = A.getBitWidth();
   7005   uint32_t BBW = B.getBitWidth();
   7006 
   7007   if (ABW > BBW)
   7008     B = B.sext(ABW);
   7009   else if (ABW < BBW)
   7010     A = A.sext(BBW);
   7011 
   7012   return APIntOps::sdiv(A, B);
   7013 }
   7014 
   7015 namespace {
   7016 struct FindSCEVSize {
   7017   int Size;
   7018   FindSCEVSize() : Size(0) {}
   7019 
   7020   bool follow(const SCEV *S) {
   7021     ++Size;
   7022     // Keep looking at all operands of S.
   7023     return true;
   7024   }
   7025   bool isDone() const {
   7026     return false;
   7027   }
   7028 };
   7029 }
   7030 
   7031 // Returns the size of the SCEV S.
   7032 static inline int sizeOfSCEV(const SCEV *S) {
   7033   FindSCEVSize F;
   7034   SCEVTraversal<FindSCEVSize> ST(F);
   7035   ST.visitAll(S);
   7036   return F.Size;
   7037 }
   7038 
   7039 namespace {
   7040 
   7041 struct SCEVDivision : public SCEVVisitor<SCEVDivision, void> {
   7042 public:
   7043   // Computes the Quotient and Remainder of the division of Numerator by
   7044   // Denominator.
   7045   static void divide(ScalarEvolution &SE, const SCEV *Numerator,
   7046                      const SCEV *Denominator, const SCEV **Quotient,
   7047                      const SCEV **Remainder) {
   7048     assert(Numerator && Denominator && "Uninitialized SCEV");
   7049 
   7050     SCEVDivision D(SE, Numerator, Denominator);
   7051 
   7052     // Check for the trivial case here to avoid having to check for it in the
   7053     // rest of the code.
   7054     if (Numerator == Denominator) {
   7055       *Quotient = D.One;
   7056       *Remainder = D.Zero;
   7057       return;
   7058     }
   7059 
   7060     if (Numerator->isZero()) {
   7061       *Quotient = D.Zero;
   7062       *Remainder = D.Zero;
   7063       return;
   7064     }
   7065 
   7066     // Split the Denominator when it is a product.
   7067     if (const SCEVMulExpr *T = dyn_cast<const SCEVMulExpr>(Denominator)) {
   7068       const SCEV *Q, *R;
   7069       *Quotient = Numerator;
   7070       for (const SCEV *Op : T->operands()) {
   7071         divide(SE, *Quotient, Op, &Q, &R);
   7072         *Quotient = Q;
   7073 
   7074         // Bail out when the Numerator is not divisible by one of the terms of
   7075         // the Denominator.
   7076         if (!R->isZero()) {
   7077           *Quotient = D.Zero;
   7078           *Remainder = Numerator;
   7079           return;
   7080         }
   7081       }
   7082       *Remainder = D.Zero;
   7083       return;
   7084     }
   7085 
   7086     D.visit(Numerator);
   7087     *Quotient = D.Quotient;
   7088     *Remainder = D.Remainder;
   7089   }
   7090 
   7091   SCEVDivision(ScalarEvolution &S, const SCEV *Numerator, const SCEV *Denominator)
   7092       : SE(S), Denominator(Denominator) {
   7093     Zero = SE.getConstant(Denominator->getType(), 0);
   7094     One = SE.getConstant(Denominator->getType(), 1);
   7095 
   7096     // By default, we don't know how to divide Expr by Denominator.
   7097     // Providing the default here simplifies the rest of the code.
   7098     Quotient = Zero;
   7099     Remainder = Numerator;
   7100   }
   7101 
   7102   // Except in the trivial case described above, we do not know how to divide
   7103   // Expr by Denominator for the following functions with empty implementation.
   7104   void visitTruncateExpr(const SCEVTruncateExpr *Numerator) {}
   7105   void visitZeroExtendExpr(const SCEVZeroExtendExpr *Numerator) {}
   7106   void visitSignExtendExpr(const SCEVSignExtendExpr *Numerator) {}
   7107   void visitUDivExpr(const SCEVUDivExpr *Numerator) {}
   7108   void visitSMaxExpr(const SCEVSMaxExpr *Numerator) {}
   7109   void visitUMaxExpr(const SCEVUMaxExpr *Numerator) {}
   7110   void visitUnknown(const SCEVUnknown *Numerator) {}
   7111   void visitCouldNotCompute(const SCEVCouldNotCompute *Numerator) {}
   7112 
   7113   void visitConstant(const SCEVConstant *Numerator) {
   7114     if (const SCEVConstant *D = dyn_cast<SCEVConstant>(Denominator)) {
   7115       Quotient = SE.getConstant(sdiv(Numerator, D));
   7116       Remainder = SE.getConstant(srem(Numerator, D));
   7117       return;
   7118     }
   7119   }
   7120 
   7121   void visitAddRecExpr(const SCEVAddRecExpr *Numerator) {
   7122     const SCEV *StartQ, *StartR, *StepQ, *StepR;
   7123     assert(Numerator->isAffine() && "Numerator should be affine");
   7124     divide(SE, Numerator->getStart(), Denominator, &StartQ, &StartR);
   7125     divide(SE, Numerator->getStepRecurrence(SE), Denominator, &StepQ, &StepR);
   7126     Quotient = SE.getAddRecExpr(StartQ, StepQ, Numerator->getLoop(),
   7127                                 Numerator->getNoWrapFlags());
   7128     Remainder = SE.getAddRecExpr(StartR, StepR, Numerator->getLoop(),
   7129                                  Numerator->getNoWrapFlags());
   7130   }
   7131 
   7132   void visitAddExpr(const SCEVAddExpr *Numerator) {
   7133     SmallVector<const SCEV *, 2> Qs, Rs;
   7134     Type *Ty = Denominator->getType();
   7135 
   7136     for (const SCEV *Op : Numerator->operands()) {
   7137       const SCEV *Q, *R;
   7138       divide(SE, Op, Denominator, &Q, &R);
   7139 
   7140       // Bail out if types do not match.
   7141       if (Ty != Q->getType() || Ty != R->getType()) {
   7142         Quotient = Zero;
   7143         Remainder = Numerator;
   7144         return;
   7145       }
   7146 
   7147       Qs.push_back(Q);
   7148       Rs.push_back(R);
   7149     }
   7150 
   7151     if (Qs.size() == 1) {
   7152       Quotient = Qs[0];
   7153       Remainder = Rs[0];
   7154       return;
   7155     }
   7156 
   7157     Quotient = SE.getAddExpr(Qs);
   7158     Remainder = SE.getAddExpr(Rs);
   7159   }
   7160 
   7161   void visitMulExpr(const SCEVMulExpr *Numerator) {
   7162     SmallVector<const SCEV *, 2> Qs;
   7163     Type *Ty = Denominator->getType();
   7164 
   7165     bool FoundDenominatorTerm = false;
   7166     for (const SCEV *Op : Numerator->operands()) {
   7167       // Bail out if types do not match.
   7168       if (Ty != Op->getType()) {
   7169         Quotient = Zero;
   7170         Remainder = Numerator;
   7171         return;
   7172       }
   7173 
   7174       if (FoundDenominatorTerm) {
   7175         Qs.push_back(Op);
   7176         continue;
   7177       }
   7178 
   7179       // Check whether Denominator divides one of the product operands.
   7180       const SCEV *Q, *R;
   7181       divide(SE, Op, Denominator, &Q, &R);
   7182       if (!R->isZero()) {
   7183         Qs.push_back(Op);
   7184         continue;
   7185       }
   7186 
   7187       // Bail out if types do not match.
   7188       if (Ty != Q->getType()) {
   7189         Quotient = Zero;
   7190         Remainder = Numerator;
   7191         return;
   7192       }
   7193 
   7194       FoundDenominatorTerm = true;
   7195       Qs.push_back(Q);
   7196     }
   7197 
   7198     if (FoundDenominatorTerm) {
   7199       Remainder = Zero;
   7200       if (Qs.size() == 1)
   7201         Quotient = Qs[0];
   7202       else
   7203         Quotient = SE.getMulExpr(Qs);
   7204       return;
   7205     }
   7206 
   7207     if (!isa<SCEVUnknown>(Denominator)) {
   7208       Quotient = Zero;
   7209       Remainder = Numerator;
   7210       return;
   7211     }
   7212 
   7213     // The Remainder is obtained by replacing Denominator by 0 in Numerator.
   7214     ValueToValueMap RewriteMap;
   7215     RewriteMap[cast<SCEVUnknown>(Denominator)->getValue()] =
   7216         cast<SCEVConstant>(Zero)->getValue();
   7217     Remainder = SCEVParameterRewriter::rewrite(Numerator, SE, RewriteMap, true);
   7218 
   7219     if (Remainder->isZero()) {
   7220       // The Quotient is obtained by replacing Denominator by 1 in Numerator.
   7221       RewriteMap[cast<SCEVUnknown>(Denominator)->getValue()] =
   7222           cast<SCEVConstant>(One)->getValue();
   7223       Quotient =
   7224           SCEVParameterRewriter::rewrite(Numerator, SE, RewriteMap, true);
   7225       return;
   7226     }
   7227 
   7228     // Quotient is (Numerator - Remainder) divided by Denominator.
   7229     const SCEV *Q, *R;
   7230     const SCEV *Diff = SE.getMinusSCEV(Numerator, Remainder);
   7231     if (sizeOfSCEV(Diff) > sizeOfSCEV(Numerator)) {
   7232       // This SCEV does not seem to simplify: fail the division here.
   7233       Quotient = Zero;
   7234       Remainder = Numerator;
   7235       return;
   7236     }
   7237     divide(SE, Diff, Denominator, &Q, &R);
   7238     assert(R == Zero &&
   7239            "(Numerator - Remainder) should evenly divide Denominator");
   7240     Quotient = Q;
   7241   }
   7242 
   7243 private:
   7244   ScalarEvolution &SE;
   7245   const SCEV *Denominator, *Quotient, *Remainder, *Zero, *One;
   7246 };
   7247 }
   7248 
   7249 static bool findArrayDimensionsRec(ScalarEvolution &SE,
   7250                                    SmallVectorImpl<const SCEV *> &Terms,
   7251                                    SmallVectorImpl<const SCEV *> &Sizes) {
   7252   int Last = Terms.size() - 1;
   7253   const SCEV *Step = Terms[Last];
   7254 
   7255   // End of recursion.
   7256   if (Last == 0) {
   7257     if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Step)) {
   7258       SmallVector<const SCEV *, 2> Qs;
   7259       for (const SCEV *Op : M->operands())
   7260         if (!isa<SCEVConstant>(Op))
   7261           Qs.push_back(Op);
   7262 
   7263       Step = SE.getMulExpr(Qs);
   7264     }
   7265 
   7266     Sizes.push_back(Step);
   7267     return true;
   7268   }
   7269 
   7270   for (const SCEV *&Term : Terms) {
   7271     // Normalize the terms before the next call to findArrayDimensionsRec.
   7272     const SCEV *Q, *R;
   7273     SCEVDivision::divide(SE, Term, Step, &Q, &R);
   7274 
   7275     // Bail out when GCD does not evenly divide one of the terms.
   7276     if (!R->isZero())
   7277       return false;
   7278 
   7279     Term = Q;
   7280   }
   7281 
   7282   // Remove all SCEVConstants.
   7283   Terms.erase(std::remove_if(Terms.begin(), Terms.end(), [](const SCEV *E) {
   7284                 return isa<SCEVConstant>(E);
   7285               }),
   7286               Terms.end());
   7287 
   7288   if (Terms.size() > 0)
   7289     if (!findArrayDimensionsRec(SE, Terms, Sizes))
   7290       return false;
   7291 
   7292   Sizes.push_back(Step);
   7293   return true;
   7294 }
   7295 
   7296 namespace {
   7297 struct FindParameter {
   7298   bool FoundParameter;
   7299   FindParameter() : FoundParameter(false) {}
   7300 
   7301   bool follow(const SCEV *S) {
   7302     if (isa<SCEVUnknown>(S)) {
   7303       FoundParameter = true;
   7304       // Stop recursion: we found a parameter.
   7305       return false;
   7306     }
   7307     // Keep looking.
   7308     return true;
   7309   }
   7310   bool isDone() const {
   7311     // Stop recursion if we have found a parameter.
   7312     return FoundParameter;
   7313   }
   7314 };
   7315 }
   7316 
   7317 // Returns true when S contains at least a SCEVUnknown parameter.
   7318 static inline bool
   7319 containsParameters(const SCEV *S) {
   7320   FindParameter F;
   7321   SCEVTraversal<FindParameter> ST(F);
   7322   ST.visitAll(S);
   7323 
   7324   return F.FoundParameter;
   7325 }
   7326 
   7327 // Returns true when one of the SCEVs of Terms contains a SCEVUnknown parameter.
   7328 static inline bool
   7329 containsParameters(SmallVectorImpl<const SCEV *> &Terms) {
   7330   for (const SCEV *T : Terms)
   7331     if (containsParameters(T))
   7332       return true;
   7333   return false;
   7334 }
   7335 
   7336 // Return the number of product terms in S.
   7337 static inline int numberOfTerms(const SCEV *S) {
   7338   if (const SCEVMulExpr *Expr = dyn_cast<SCEVMulExpr>(S))
   7339     return Expr->getNumOperands();
   7340   return 1;
   7341 }
   7342 
   7343 static const SCEV *removeConstantFactors(ScalarEvolution &SE, const SCEV *T) {
   7344   if (isa<SCEVConstant>(T))
   7345     return nullptr;
   7346 
   7347   if (isa<SCEVUnknown>(T))
   7348     return T;
   7349 
   7350   if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(T)) {
   7351     SmallVector<const SCEV *, 2> Factors;
   7352     for (const SCEV *Op : M->operands())
   7353       if (!isa<SCEVConstant>(Op))
   7354         Factors.push_back(Op);
   7355 
   7356     return SE.getMulExpr(Factors);
   7357   }
   7358 
   7359   return T;
   7360 }
   7361 
   7362 /// Return the size of an element read or written by Inst.
   7363 const SCEV *ScalarEvolution::getElementSize(Instruction *Inst) {
   7364   Type *Ty;
   7365   if (StoreInst *Store = dyn_cast<StoreInst>(Inst))
   7366     Ty = Store->getValueOperand()->getType();
   7367   else if (LoadInst *Load = dyn_cast<LoadInst>(Inst))
   7368     Ty = Load->getType();
   7369   else
   7370     return nullptr;
   7371 
   7372   Type *ETy = getEffectiveSCEVType(PointerType::getUnqual(Ty));
   7373   return getSizeOfExpr(ETy, Ty);
   7374 }
   7375 
   7376 /// Second step of delinearization: compute the array dimensions Sizes from the
   7377 /// set of Terms extracted from the memory access function of this SCEVAddRec.
   7378 void ScalarEvolution::findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms,
   7379                                           SmallVectorImpl<const SCEV *> &Sizes,
   7380                                           const SCEV *ElementSize) const {
   7381 
   7382   if (Terms.size() < 1 || !ElementSize)
   7383     return;
   7384 
   7385   // Early return when Terms do not contain parameters: we do not delinearize
   7386   // non parametric SCEVs.
   7387   if (!containsParameters(Terms))
   7388     return;
   7389 
   7390   DEBUG({
   7391       dbgs() << "Terms:\n";
   7392       for (const SCEV *T : Terms)
   7393         dbgs() << *T << "\n";
   7394     });
   7395 
   7396   // Remove duplicates.
   7397   std::sort(Terms.begin(), Terms.end());
   7398   Terms.erase(std::unique(Terms.begin(), Terms.end()), Terms.end());
   7399 
   7400   // Put larger terms first.
   7401   std::sort(Terms.begin(), Terms.end(), [](const SCEV *LHS, const SCEV *RHS) {
   7402     return numberOfTerms(LHS) > numberOfTerms(RHS);
   7403   });
   7404 
   7405   ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this);
   7406 
   7407   // Divide all terms by the element size.
   7408   for (const SCEV *&Term : Terms) {
   7409     const SCEV *Q, *R;
   7410     SCEVDivision::divide(SE, Term, ElementSize, &Q, &R);
   7411     Term = Q;
   7412   }
   7413 
   7414   SmallVector<const SCEV *, 4> NewTerms;
   7415 
   7416   // Remove constant factors.
   7417   for (const SCEV *T : Terms)
   7418     if (const SCEV *NewT = removeConstantFactors(SE, T))
   7419       NewTerms.push_back(NewT);
   7420 
   7421   DEBUG({
   7422       dbgs() << "Terms after sorting:\n";
   7423       for (const SCEV *T : NewTerms)
   7424         dbgs() << *T << "\n";
   7425     });
   7426 
   7427   if (NewTerms.empty() ||
   7428       !findArrayDimensionsRec(SE, NewTerms, Sizes)) {
   7429     Sizes.clear();
   7430     return;
   7431   }
   7432 
   7433   // The last element to be pushed into Sizes is the size of an element.
   7434   Sizes.push_back(ElementSize);
   7435 
   7436   DEBUG({
   7437       dbgs() << "Sizes:\n";
   7438       for (const SCEV *S : Sizes)
   7439         dbgs() << *S << "\n";
   7440     });
   7441 }
   7442 
   7443 /// Third step of delinearization: compute the access functions for the
   7444 /// Subscripts based on the dimensions in Sizes.
   7445 void SCEVAddRecExpr::computeAccessFunctions(
   7446     ScalarEvolution &SE, SmallVectorImpl<const SCEV *> &Subscripts,
   7447     SmallVectorImpl<const SCEV *> &Sizes) const {
   7448 
   7449   // Early exit in case this SCEV is not an affine multivariate function.
   7450   if (Sizes.empty() || !this->isAffine())
   7451     return;
   7452 
   7453   const SCEV *Res = this;
   7454   int Last = Sizes.size() - 1;
   7455   for (int i = Last; i >= 0; i--) {
   7456     const SCEV *Q, *R;
   7457     SCEVDivision::divide(SE, Res, Sizes[i], &Q, &R);
   7458 
   7459     DEBUG({
   7460         dbgs() << "Res: " << *Res << "\n";
   7461         dbgs() << "Sizes[i]: " << *Sizes[i] << "\n";
   7462         dbgs() << "Res divided by Sizes[i]:\n";
   7463         dbgs() << "Quotient: " << *Q << "\n";
   7464         dbgs() << "Remainder: " << *R << "\n";
   7465       });
   7466 
   7467     Res = Q;
   7468 
   7469     // Do not record the last subscript corresponding to the size of elements in
   7470     // the array.
   7471     if (i == Last) {
   7472 
   7473       // Bail out if the remainder is too complex.
   7474       if (isa<SCEVAddRecExpr>(R)) {
   7475         Subscripts.clear();
   7476         Sizes.clear();
   7477         return;
   7478       }
   7479 
   7480       continue;
   7481     }
   7482 
   7483     // Record the access function for the current subscript.
   7484     Subscripts.push_back(R);
   7485   }
   7486 
   7487   // Also push in last position the remainder of the last division: it will be
   7488   // the access function of the innermost dimension.
   7489   Subscripts.push_back(Res);
   7490 
   7491   std::reverse(Subscripts.begin(), Subscripts.end());
   7492 
   7493   DEBUG({
   7494       dbgs() << "Subscripts:\n";
   7495       for (const SCEV *S : Subscripts)
   7496         dbgs() << *S << "\n";
   7497     });
   7498 }
   7499 
   7500 /// Splits the SCEV into two vectors of SCEVs representing the subscripts and
   7501 /// sizes of an array access. Returns the remainder of the delinearization that
   7502 /// is the offset start of the array.  The SCEV->delinearize algorithm computes
   7503 /// the multiples of SCEV coefficients: that is a pattern matching of sub
   7504 /// expressions in the stride and base of a SCEV corresponding to the
   7505 /// computation of a GCD (greatest common divisor) of base and stride.  When
   7506 /// SCEV->delinearize fails, it returns the SCEV unchanged.
   7507 ///
   7508 /// For example: when analyzing the memory access A[i][j][k] in this loop nest
   7509 ///
   7510 ///  void foo(long n, long m, long o, double A[n][m][o]) {
   7511 ///
   7512 ///    for (long i = 0; i < n; i++)
   7513 ///      for (long j = 0; j < m; j++)
   7514 ///        for (long k = 0; k < o; k++)
   7515 ///          A[i][j][k] = 1.0;
   7516 ///  }
   7517 ///
   7518 /// the delinearization input is the following AddRec SCEV:
   7519 ///
   7520 ///  AddRec: {{{%A,+,(8 * %m * %o)}<%for.i>,+,(8 * %o)}<%for.j>,+,8}<%for.k>
   7521 ///
   7522 /// From this SCEV, we are able to say that the base offset of the access is %A
   7523 /// because it appears as an offset that does not divide any of the strides in
   7524 /// the loops:
   7525 ///
   7526 ///  CHECK: Base offset: %A
   7527 ///
   7528 /// and then SCEV->delinearize determines the size of some of the dimensions of
   7529 /// the array as these are the multiples by which the strides are happening:
   7530 ///
   7531 ///  CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of sizeof(double) bytes.
   7532 ///
   7533 /// Note that the outermost dimension remains of UnknownSize because there are
   7534 /// no strides that would help identifying the size of the last dimension: when
   7535 /// the array has been statically allocated, one could compute the size of that
   7536 /// dimension by dividing the overall size of the array by the size of the known
   7537 /// dimensions: %m * %o * 8.
   7538 ///
   7539 /// Finally delinearize provides the access functions for the array reference
   7540 /// that does correspond to A[i][j][k] of the above C testcase:
   7541 ///
   7542 ///  CHECK: ArrayRef[{0,+,1}<%for.i>][{0,+,1}<%for.j>][{0,+,1}<%for.k>]
   7543 ///
   7544 /// The testcases are checking the output of a function pass:
   7545 /// DelinearizationPass that walks through all loads and stores of a function
   7546 /// asking for the SCEV of the memory access with respect to all enclosing
   7547 /// loops, calling SCEV->delinearize on that and printing the results.
   7548 
   7549 void SCEVAddRecExpr::delinearize(ScalarEvolution &SE,
   7550                                  SmallVectorImpl<const SCEV *> &Subscripts,
   7551                                  SmallVectorImpl<const SCEV *> &Sizes,
   7552                                  const SCEV *ElementSize) const {
   7553   // First step: collect parametric terms.
   7554   SmallVector<const SCEV *, 4> Terms;
   7555   collectParametricTerms(SE, Terms);
   7556 
   7557   if (Terms.empty())
   7558     return;
   7559 
   7560   // Second step: find subscript sizes.
   7561   SE.findArrayDimensions(Terms, Sizes, ElementSize);
   7562 
   7563   if (Sizes.empty())
   7564     return;
   7565 
   7566   // Third step: compute the access functions for each subscript.
   7567   computeAccessFunctions(SE, Subscripts, Sizes);
   7568 
   7569   if (Subscripts.empty())
   7570     return;
   7571 
   7572   DEBUG({
   7573       dbgs() << "succeeded to delinearize " << *this << "\n";
   7574       dbgs() << "ArrayDecl[UnknownSize]";
   7575       for (const SCEV *S : Sizes)
   7576         dbgs() << "[" << *S << "]";
   7577 
   7578       dbgs() << "\nArrayRef";
   7579       for (const SCEV *S : Subscripts)
   7580         dbgs() << "[" << *S << "]";
   7581       dbgs() << "\n";
   7582     });
   7583 }
   7584 
   7585 //===----------------------------------------------------------------------===//
   7586 //                   SCEVCallbackVH Class Implementation
   7587 //===----------------------------------------------------------------------===//
   7588 
   7589 void ScalarEvolution::SCEVCallbackVH::deleted() {
   7590   assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!");
   7591   if (PHINode *PN = dyn_cast<PHINode>(getValPtr()))
   7592     SE->ConstantEvolutionLoopExitValue.erase(PN);
   7593   SE->ValueExprMap.erase(getValPtr());
   7594   // this now dangles!
   7595 }
   7596 
   7597 void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) {
   7598   assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!");
   7599 
   7600   // Forget all the expressions associated with users of the old value,
   7601   // so that future queries will recompute the expressions using the new
   7602   // value.
   7603   Value *Old = getValPtr();
   7604   SmallVector<User *, 16> Worklist(Old->user_begin(), Old->user_end());
   7605   SmallPtrSet<User *, 8> Visited;
   7606   while (!Worklist.empty()) {
   7607     User *U = Worklist.pop_back_val();
   7608     // Deleting the Old value will cause this to dangle. Postpone
   7609     // that until everything else is done.
   7610     if (U == Old)
   7611       continue;
   7612     if (!Visited.insert(U))
   7613       continue;
   7614     if (PHINode *PN = dyn_cast<PHINode>(U))
   7615       SE->ConstantEvolutionLoopExitValue.erase(PN);
   7616     SE->ValueExprMap.erase(U);
   7617     Worklist.insert(Worklist.end(), U->user_begin(), U->user_end());
   7618   }
   7619   // Delete the Old value.
   7620   if (PHINode *PN = dyn_cast<PHINode>(Old))
   7621     SE->ConstantEvolutionLoopExitValue.erase(PN);
   7622   SE->ValueExprMap.erase(Old);
   7623   // this now dangles!
   7624 }
   7625 
   7626 ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se)
   7627   : CallbackVH(V), SE(se) {}
   7628 
   7629 //===----------------------------------------------------------------------===//
   7630 //                   ScalarEvolution Class Implementation
   7631 //===----------------------------------------------------------------------===//
   7632 
   7633 ScalarEvolution::ScalarEvolution()
   7634   : FunctionPass(ID), ValuesAtScopes(64), LoopDispositions(64),
   7635     BlockDispositions(64), FirstUnknown(nullptr) {
   7636   initializeScalarEvolutionPass(*PassRegistry::getPassRegistry());
   7637 }
   7638 
   7639 bool ScalarEvolution::runOnFunction(Function &F) {
   7640   this->F = &F;
   7641   LI = &getAnalysis<LoopInfo>();
   7642   DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
   7643   DL = DLP ? &DLP->getDataLayout() : nullptr;
   7644   TLI = &getAnalysis<TargetLibraryInfo>();
   7645   DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
   7646   return false;
   7647 }
   7648 
   7649 void ScalarEvolution::releaseMemory() {
   7650   // Iterate through all the SCEVUnknown instances and call their
   7651   // destructors, so that they release their references to their values.
   7652   for (SCEVUnknown *U = FirstUnknown; U; U = U->Next)
   7653     U->~SCEVUnknown();
   7654   FirstUnknown = nullptr;
   7655 
   7656   ValueExprMap.clear();
   7657 
   7658   // Free any extra memory created for ExitNotTakenInfo in the unlikely event
   7659   // that a loop had multiple computable exits.
   7660   for (DenseMap<const Loop*, BackedgeTakenInfo>::iterator I =
   7661          BackedgeTakenCounts.begin(), E = BackedgeTakenCounts.end();
   7662        I != E; ++I) {
   7663     I->second.clear();
   7664   }
   7665 
   7666   assert(PendingLoopPredicates.empty() && "isImpliedCond garbage");
   7667 
   7668   BackedgeTakenCounts.clear();
   7669   ConstantEvolutionLoopExitValue.clear();
   7670   ValuesAtScopes.clear();
   7671   LoopDispositions.clear();
   7672   BlockDispositions.clear();
   7673   UnsignedRanges.clear();
   7674   SignedRanges.clear();
   7675   UniqueSCEVs.clear();
   7676   SCEVAllocator.Reset();
   7677 }
   7678 
   7679 void ScalarEvolution::getAnalysisUsage(AnalysisUsage &AU) const {
   7680   AU.setPreservesAll();
   7681   AU.addRequiredTransitive<LoopInfo>();
   7682   AU.addRequiredTransitive<DominatorTreeWrapperPass>();
   7683   AU.addRequired<TargetLibraryInfo>();
   7684 }
   7685 
   7686 bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) {
   7687   return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L));
   7688 }
   7689 
   7690 static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE,
   7691                           const Loop *L) {
   7692   // Print all inner loops first
   7693   for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I)
   7694     PrintLoopInfo(OS, SE, *I);
   7695 
   7696   OS << "Loop ";
   7697   L->getHeader()->printAsOperand(OS, /*PrintType=*/false);
   7698   OS << ": ";
   7699 
   7700   SmallVector<BasicBlock *, 8> ExitBlocks;
   7701   L->getExitBlocks(ExitBlocks);
   7702   if (ExitBlocks.size() != 1)
   7703     OS << "<multiple exits> ";
   7704 
   7705   if (SE->hasLoopInvariantBackedgeTakenCount(L)) {
   7706     OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L);
   7707   } else {
   7708     OS << "Unpredictable backedge-taken count. ";
   7709   }
   7710 
   7711   OS << "\n"
   7712         "Loop ";
   7713   L->getHeader()->printAsOperand(OS, /*PrintType=*/false);
   7714   OS << ": ";
   7715 
   7716   if (!isa<SCEVCouldNotCompute>(SE->getMaxBackedgeTakenCount(L))) {
   7717     OS << "max backedge-taken count is " << *SE->getMaxBackedgeTakenCount(L);
   7718   } else {
   7719     OS << "Unpredictable max backedge-taken count. ";
   7720   }
   7721 
   7722   OS << "\n";
   7723 }
   7724 
   7725 void ScalarEvolution::print(raw_ostream &OS, const Module *) const {
   7726   // ScalarEvolution's implementation of the print method is to print
   7727   // out SCEV values of all instructions that are interesting. Doing
   7728   // this potentially causes it to create new SCEV objects though,
   7729   // which technically conflicts with the const qualifier. This isn't
   7730   // observable from outside the class though, so casting away the
   7731   // const isn't dangerous.
   7732   ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this);
   7733 
   7734   OS << "Classifying expressions for: ";
   7735   F->printAsOperand(OS, /*PrintType=*/false);
   7736   OS << "\n";
   7737   for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I)
   7738     if (isSCEVable(I->getType()) && !isa<CmpInst>(*I)) {
   7739       OS << *I << '\n';
   7740       OS << "  -->  ";
   7741       const SCEV *SV = SE.getSCEV(&*I);
   7742       SV->print(OS);
   7743 
   7744       const Loop *L = LI->getLoopFor((*I).getParent());
   7745 
   7746       const SCEV *AtUse = SE.getSCEVAtScope(SV, L);
   7747       if (AtUse != SV) {
   7748         OS << "  -->  ";
   7749         AtUse->print(OS);
   7750       }
   7751 
   7752       if (L) {
   7753         OS << "\t\t" "Exits: ";
   7754         const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop());
   7755         if (!SE.isLoopInvariant(ExitValue, L)) {
   7756           OS << "<<Unknown>>";
   7757         } else {
   7758           OS << *ExitValue;
   7759         }
   7760       }
   7761 
   7762       OS << "\n";
   7763     }
   7764 
   7765   OS << "Determining loop execution counts for: ";
   7766   F->printAsOperand(OS, /*PrintType=*/false);
   7767   OS << "\n";
   7768   for (LoopInfo::iterator I = LI->begin(), E = LI->end(); I != E; ++I)
   7769     PrintLoopInfo(OS, &SE, *I);
   7770 }
   7771 
   7772 ScalarEvolution::LoopDisposition
   7773 ScalarEvolution::getLoopDisposition(const SCEV *S, const Loop *L) {
   7774   SmallVector<std::pair<const Loop *, LoopDisposition>, 2> &Values = LoopDispositions[S];
   7775   for (unsigned u = 0; u < Values.size(); u++) {
   7776     if (Values[u].first == L)
   7777       return Values[u].second;
   7778   }
   7779   Values.push_back(std::make_pair(L, LoopVariant));
   7780   LoopDisposition D = computeLoopDisposition(S, L);
   7781   SmallVector<std::pair<const Loop *, LoopDisposition>, 2> &Values2 = LoopDispositions[S];
   7782   for (unsigned u = Values2.size(); u > 0; u--) {
   7783     if (Values2[u - 1].first == L) {
   7784       Values2[u - 1].second = D;
   7785       break;
   7786     }
   7787   }
   7788   return D;
   7789 }
   7790 
   7791 ScalarEvolution::LoopDisposition
   7792 ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) {
   7793   switch (static_cast<SCEVTypes>(S->getSCEVType())) {
   7794   case scConstant:
   7795     return LoopInvariant;
   7796   case scTruncate:
   7797   case scZeroExtend:
   7798   case scSignExtend:
   7799     return getLoopDisposition(cast<SCEVCastExpr>(S)->getOperand(), L);
   7800   case scAddRecExpr: {
   7801     const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S);
   7802 
   7803     // If L is the addrec's loop, it's computable.
   7804     if (AR->getLoop() == L)
   7805       return LoopComputable;
   7806 
   7807     // Add recurrences are never invariant in the function-body (null loop).
   7808     if (!L)
   7809       return LoopVariant;
   7810 
   7811     // This recurrence is variant w.r.t. L if L contains AR's loop.
   7812     if (L->contains(AR->getLoop()))
   7813       return LoopVariant;
   7814 
   7815     // This recurrence is invariant w.r.t. L if AR's loop contains L.
   7816     if (AR->getLoop()->contains(L))
   7817       return LoopInvariant;
   7818 
   7819     // This recurrence is variant w.r.t. L if any of its operands
   7820     // are variant.
   7821     for (SCEVAddRecExpr::op_iterator I = AR->op_begin(), E = AR->op_end();
   7822          I != E; ++I)
   7823       if (!isLoopInvariant(*I, L))
   7824         return LoopVariant;
   7825 
   7826     // Otherwise it's loop-invariant.
   7827     return LoopInvariant;
   7828   }
   7829   case scAddExpr:
   7830   case scMulExpr:
   7831   case scUMaxExpr:
   7832   case scSMaxExpr: {
   7833     const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S);
   7834     bool HasVarying = false;
   7835     for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
   7836          I != E; ++I) {
   7837       LoopDisposition D = getLoopDisposition(*I, L);
   7838       if (D == LoopVariant)
   7839         return LoopVariant;
   7840       if (D == LoopComputable)
   7841         HasVarying = true;
   7842     }
   7843     return HasVarying ? LoopComputable : LoopInvariant;
   7844   }
   7845   case scUDivExpr: {
   7846     const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S);
   7847     LoopDisposition LD = getLoopDisposition(UDiv->getLHS(), L);
   7848     if (LD == LoopVariant)
   7849       return LoopVariant;
   7850     LoopDisposition RD = getLoopDisposition(UDiv->getRHS(), L);
   7851     if (RD == LoopVariant)
   7852       return LoopVariant;
   7853     return (LD == LoopInvariant && RD == LoopInvariant) ?
   7854            LoopInvariant : LoopComputable;
   7855   }
   7856   case scUnknown:
   7857     // All non-instruction values are loop invariant.  All instructions are loop
   7858     // invariant if they are not contained in the specified loop.
   7859     // Instructions are never considered invariant in the function body
   7860     // (null loop) because they are defined within the "loop".
   7861     if (Instruction *I = dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue()))
   7862       return (L && !L->contains(I)) ? LoopInvariant : LoopVariant;
   7863     return LoopInvariant;
   7864   case scCouldNotCompute:
   7865     llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
   7866   }
   7867   llvm_unreachable("Unknown SCEV kind!");
   7868 }
   7869 
   7870 bool ScalarEvolution::isLoopInvariant(const SCEV *S, const Loop *L) {
   7871   return getLoopDisposition(S, L) == LoopInvariant;
   7872 }
   7873 
   7874 bool ScalarEvolution::hasComputableLoopEvolution(const SCEV *S, const Loop *L) {
   7875   return getLoopDisposition(S, L) == LoopComputable;
   7876 }
   7877 
   7878 ScalarEvolution::BlockDisposition
   7879 ScalarEvolution::getBlockDisposition(const SCEV *S, const BasicBlock *BB) {
   7880   SmallVector<std::pair<const BasicBlock *, BlockDisposition>, 2> &Values = BlockDispositions[S];
   7881   for (unsigned u = 0; u < Values.size(); u++) {
   7882     if (Values[u].first == BB)
   7883       return Values[u].second;
   7884   }
   7885   Values.push_back(std::make_pair(BB, DoesNotDominateBlock));
   7886   BlockDisposition D = computeBlockDisposition(S, BB);
   7887   SmallVector<std::pair<const BasicBlock *, BlockDisposition>, 2> &Values2 = BlockDispositions[S];
   7888   for (unsigned u = Values2.size(); u > 0; u--) {
   7889     if (Values2[u - 1].first == BB) {
   7890       Values2[u - 1].second = D;
   7891       break;
   7892     }
   7893   }
   7894   return D;
   7895 }
   7896 
   7897 ScalarEvolution::BlockDisposition
   7898 ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) {
   7899   switch (static_cast<SCEVTypes>(S->getSCEVType())) {
   7900   case scConstant:
   7901     return ProperlyDominatesBlock;
   7902   case scTruncate:
   7903   case scZeroExtend:
   7904   case scSignExtend:
   7905     return getBlockDisposition(cast<SCEVCastExpr>(S)->getOperand(), BB);
   7906   case scAddRecExpr: {
   7907     // This uses a "dominates" query instead of "properly dominates" query
   7908     // to test for proper dominance too, because the instruction which
   7909     // produces the addrec's value is a PHI, and a PHI effectively properly
   7910     // dominates its entire containing block.
   7911     const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S);
   7912     if (!DT->dominates(AR->getLoop()->getHeader(), BB))
   7913       return DoesNotDominateBlock;
   7914   }
   7915   // FALL THROUGH into SCEVNAryExpr handling.
   7916   case scAddExpr:
   7917   case scMulExpr:
   7918   case scUMaxExpr:
   7919   case scSMaxExpr: {
   7920     const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S);
   7921     bool Proper = true;
   7922     for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
   7923          I != E; ++I) {
   7924       BlockDisposition D = getBlockDisposition(*I, BB);
   7925       if (D == DoesNotDominateBlock)
   7926         return DoesNotDominateBlock;
   7927       if (D == DominatesBlock)
   7928         Proper = false;
   7929     }
   7930     return Proper ? ProperlyDominatesBlock : DominatesBlock;
   7931   }
   7932   case scUDivExpr: {
   7933     const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S);
   7934     const SCEV *LHS = UDiv->getLHS(), *RHS = UDiv->getRHS();
   7935     BlockDisposition LD = getBlockDisposition(LHS, BB);
   7936     if (LD == DoesNotDominateBlock)
   7937       return DoesNotDominateBlock;
   7938     BlockDisposition RD = getBlockDisposition(RHS, BB);
   7939     if (RD == DoesNotDominateBlock)
   7940       return DoesNotDominateBlock;
   7941     return (LD == ProperlyDominatesBlock && RD == ProperlyDominatesBlock) ?
   7942       ProperlyDominatesBlock : DominatesBlock;
   7943   }
   7944   case scUnknown:
   7945     if (Instruction *I =
   7946           dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) {
   7947       if (I->getParent() == BB)
   7948         return DominatesBlock;
   7949       if (DT->properlyDominates(I->getParent(), BB))
   7950         return ProperlyDominatesBlock;
   7951       return DoesNotDominateBlock;
   7952     }
   7953     return ProperlyDominatesBlock;
   7954   case scCouldNotCompute:
   7955     llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
   7956   }
   7957   llvm_unreachable("Unknown SCEV kind!");
   7958 }
   7959 
   7960 bool ScalarEvolution::dominates(const SCEV *S, const BasicBlock *BB) {
   7961   return getBlockDisposition(S, BB) >= DominatesBlock;
   7962 }
   7963 
   7964 bool ScalarEvolution::properlyDominates(const SCEV *S, const BasicBlock *BB) {
   7965   return getBlockDisposition(S, BB) == ProperlyDominatesBlock;
   7966 }
   7967 
   7968 namespace {
   7969 // Search for a SCEV expression node within an expression tree.
   7970 // Implements SCEVTraversal::Visitor.
   7971 struct SCEVSearch {
   7972   const SCEV *Node;
   7973   bool IsFound;
   7974 
   7975   SCEVSearch(const SCEV *N): Node(N), IsFound(false) {}
   7976 
   7977   bool follow(const SCEV *S) {
   7978     IsFound |= (S == Node);
   7979     return !IsFound;
   7980   }
   7981   bool isDone() const { return IsFound; }
   7982 };
   7983 }
   7984 
   7985 bool ScalarEvolution::hasOperand(const SCEV *S, const SCEV *Op) const {
   7986   SCEVSearch Search(Op);
   7987   visitAll(S, Search);
   7988   return Search.IsFound;
   7989 }
   7990 
   7991 void ScalarEvolution::forgetMemoizedResults(const SCEV *S) {
   7992   ValuesAtScopes.erase(S);
   7993   LoopDispositions.erase(S);
   7994   BlockDispositions.erase(S);
   7995   UnsignedRanges.erase(S);
   7996   SignedRanges.erase(S);
   7997 
   7998   for (DenseMap<const Loop*, BackedgeTakenInfo>::iterator I =
   7999          BackedgeTakenCounts.begin(), E = BackedgeTakenCounts.end(); I != E; ) {
   8000     BackedgeTakenInfo &BEInfo = I->second;
   8001     if (BEInfo.hasOperand(S, this)) {
   8002       BEInfo.clear();
   8003       BackedgeTakenCounts.erase(I++);
   8004     }
   8005     else
   8006       ++I;
   8007   }
   8008 }
   8009 
   8010 typedef DenseMap<const Loop *, std::string> VerifyMap;
   8011 
   8012 /// replaceSubString - Replaces all occurrences of From in Str with To.
   8013 static void replaceSubString(std::string &Str, StringRef From, StringRef To) {
   8014   size_t Pos = 0;
   8015   while ((Pos = Str.find(From, Pos)) != std::string::npos) {
   8016     Str.replace(Pos, From.size(), To.data(), To.size());
   8017     Pos += To.size();
   8018   }
   8019 }
   8020 
   8021 /// getLoopBackedgeTakenCounts - Helper method for verifyAnalysis.
   8022 static void
   8023 getLoopBackedgeTakenCounts(Loop *L, VerifyMap &Map, ScalarEvolution &SE) {
   8024   for (Loop::reverse_iterator I = L->rbegin(), E = L->rend(); I != E; ++I) {
   8025     getLoopBackedgeTakenCounts(*I, Map, SE); // recurse.
   8026 
   8027     std::string &S = Map[L];
   8028     if (S.empty()) {
   8029       raw_string_ostream OS(S);
   8030       SE.getBackedgeTakenCount(L)->print(OS);
   8031 
   8032       // false and 0 are semantically equivalent. This can happen in dead loops.
   8033       replaceSubString(OS.str(), "false", "0");
   8034       // Remove wrap flags, their use in SCEV is highly fragile.
   8035       // FIXME: Remove this when SCEV gets smarter about them.
   8036       replaceSubString(OS.str(), "<nw>", "");
   8037       replaceSubString(OS.str(), "<nsw>", "");
   8038       replaceSubString(OS.str(), "<nuw>", "");
   8039     }
   8040   }
   8041 }
   8042 
   8043 void ScalarEvolution::verifyAnalysis() const {
   8044   if (!VerifySCEV)
   8045     return;
   8046 
   8047   ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this);
   8048 
   8049   // Gather stringified backedge taken counts for all loops using SCEV's caches.
   8050   // FIXME: It would be much better to store actual values instead of strings,
   8051   //        but SCEV pointers will change if we drop the caches.
   8052   VerifyMap BackedgeDumpsOld, BackedgeDumpsNew;
   8053   for (LoopInfo::reverse_iterator I = LI->rbegin(), E = LI->rend(); I != E; ++I)
   8054     getLoopBackedgeTakenCounts(*I, BackedgeDumpsOld, SE);
   8055 
   8056   // Gather stringified backedge taken counts for all loops without using
   8057   // SCEV's caches.
   8058   SE.releaseMemory();
   8059   for (LoopInfo::reverse_iterator I = LI->rbegin(), E = LI->rend(); I != E; ++I)
   8060     getLoopBackedgeTakenCounts(*I, BackedgeDumpsNew, SE);
   8061 
   8062   // Now compare whether they're the same with and without caches. This allows
   8063   // verifying that no pass changed the cache.
   8064   assert(BackedgeDumpsOld.size() == BackedgeDumpsNew.size() &&
   8065          "New loops suddenly appeared!");
   8066 
   8067   for (VerifyMap::iterator OldI = BackedgeDumpsOld.begin(),
   8068                            OldE = BackedgeDumpsOld.end(),
   8069                            NewI = BackedgeDumpsNew.begin();
   8070        OldI != OldE; ++OldI, ++NewI) {
   8071     assert(OldI->first == NewI->first && "Loop order changed!");
   8072 
   8073     // Compare the stringified SCEVs. We don't care if undef backedgetaken count
   8074     // changes.
   8075     // FIXME: We currently ignore SCEV changes from/to CouldNotCompute. This
   8076     // means that a pass is buggy or SCEV has to learn a new pattern but is
   8077     // usually not harmful.
   8078     if (OldI->second != NewI->second &&
   8079         OldI->second.find("undef") == std::string::npos &&
   8080         NewI->second.find("undef") == std::string::npos &&
   8081         OldI->second != "***COULDNOTCOMPUTE***" &&
   8082         NewI->second != "***COULDNOTCOMPUTE***") {
   8083       dbgs() << "SCEVValidator: SCEV for loop '"
   8084              << OldI->first->getHeader()->getName()
   8085              << "' changed from '" << OldI->second
   8086              << "' to '" << NewI->second << "'!\n";
   8087       std::abort();
   8088     }
   8089   }
   8090 
   8091   // TODO: Verify more things.
   8092 }
   8093