Home | History | Annotate | Download | only in Analysis
      1 //===- ScalarEvolution.cpp - Scalar Evolution Analysis ----------*- C++ -*-===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file contains the implementation of the scalar evolution analysis
     11 // engine, which is used primarily to analyze expressions involving induction
     12 // variables in loops.
     13 //
     14 // There are several aspects to this library.  First is the representation of
     15 // scalar expressions, which are represented as subclasses of the SCEV class.
     16 // These classes are used to represent certain types of subexpressions that we
     17 // can handle. We only create one SCEV of a particular shape, so
     18 // pointer-comparisons for equality are legal.
     19 //
     20 // One important aspect of the SCEV objects is that they are never cyclic, even
     21 // if there is a cycle in the dataflow for an expression (ie, a PHI node).  If
     22 // the PHI node is one of the idioms that we can represent (e.g., a polynomial
     23 // recurrence) then we represent it directly as a recurrence node, otherwise we
     24 // represent it as a SCEVUnknown node.
     25 //
     26 // In addition to being able to represent expressions of various types, we also
     27 // have folders that are used to build the *canonical* representation for a
     28 // particular expression.  These folders are capable of using a variety of
     29 // rewrite rules to simplify the expressions.
     30 //
     31 // Once the folders are defined, we can implement the more interesting
     32 // higher-level code, such as the code that recognizes PHI nodes of various
     33 // types, computes the execution count of a loop, etc.
     34 //
     35 // TODO: We should use these routines and value representations to implement
     36 // dependence analysis!
     37 //
     38 //===----------------------------------------------------------------------===//
     39 //
     40 // There are several good references for the techniques used in this analysis.
     41 //
     42 //  Chains of recurrences -- a method to expedite the evaluation
     43 //  of closed-form functions
     44 //  Olaf Bachmann, Paul S. Wang, Eugene V. Zima
     45 //
     46 //  On computational properties of chains of recurrences
     47 //  Eugene V. Zima
     48 //
     49 //  Symbolic Evaluation of Chains of Recurrences for Loop Optimization
     50 //  Robert A. van Engelen
     51 //
     52 //  Efficient Symbolic Analysis for Optimizing Compilers
     53 //  Robert A. van Engelen
     54 //
     55 //  Using the chains of recurrences algebra for data dependence testing and
     56 //  induction variable substitution
     57 //  MS Thesis, Johnie Birch
     58 //
     59 //===----------------------------------------------------------------------===//
     60 
     61 #define DEBUG_TYPE "scalar-evolution"
     62 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
     63 #include "llvm/Constants.h"
     64 #include "llvm/DerivedTypes.h"
     65 #include "llvm/GlobalVariable.h"
     66 #include "llvm/GlobalAlias.h"
     67 #include "llvm/Instructions.h"
     68 #include "llvm/LLVMContext.h"
     69 #include "llvm/Operator.h"
     70 #include "llvm/Analysis/ConstantFolding.h"
     71 #include "llvm/Analysis/Dominators.h"
     72 #include "llvm/Analysis/InstructionSimplify.h"
     73 #include "llvm/Analysis/LoopInfo.h"
     74 #include "llvm/Analysis/ValueTracking.h"
     75 #include "llvm/Assembly/Writer.h"
     76 #include "llvm/Target/TargetData.h"
     77 #include "llvm/Support/CommandLine.h"
     78 #include "llvm/Support/ConstantRange.h"
     79 #include "llvm/Support/Debug.h"
     80 #include "llvm/Support/ErrorHandling.h"
     81 #include "llvm/Support/GetElementPtrTypeIterator.h"
     82 #include "llvm/Support/InstIterator.h"
     83 #include "llvm/Support/MathExtras.h"
     84 #include "llvm/Support/raw_ostream.h"
     85 #include "llvm/ADT/Statistic.h"
     86 #include "llvm/ADT/STLExtras.h"
     87 #include "llvm/ADT/SmallPtrSet.h"
     88 #include <algorithm>
     89 using namespace llvm;
     90 
     91 STATISTIC(NumArrayLenItCounts,
     92           "Number of trip counts computed with array length");
     93 STATISTIC(NumTripCountsComputed,
     94           "Number of loops with predictable loop counts");
     95 STATISTIC(NumTripCountsNotComputed,
     96           "Number of loops without predictable loop counts");
     97 STATISTIC(NumBruteForceTripCountsComputed,
     98           "Number of loops with trip counts computed by force");
     99 
    100 static cl::opt<unsigned>
    101 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden,
    102                         cl::desc("Maximum number of iterations SCEV will "
    103                                  "symbolically execute a constant "
    104                                  "derived loop"),
    105                         cl::init(100));
    106 
    107 INITIALIZE_PASS_BEGIN(ScalarEvolution, "scalar-evolution",
    108                 "Scalar Evolution Analysis", false, true)
    109 INITIALIZE_PASS_DEPENDENCY(LoopInfo)
    110 INITIALIZE_PASS_DEPENDENCY(DominatorTree)
    111 INITIALIZE_PASS_END(ScalarEvolution, "scalar-evolution",
    112                 "Scalar Evolution Analysis", false, true)
    113 char ScalarEvolution::ID = 0;
    114 
    115 //===----------------------------------------------------------------------===//
    116 //                           SCEV class definitions
    117 //===----------------------------------------------------------------------===//
    118 
    119 //===----------------------------------------------------------------------===//
    120 // Implementation of the SCEV class.
    121 //
    122 
    123 void SCEV::dump() const {
    124   print(dbgs());
    125   dbgs() << '\n';
    126 }
    127 
    128 void SCEV::print(raw_ostream &OS) const {
    129   switch (getSCEVType()) {
    130   case scConstant:
    131     WriteAsOperand(OS, cast<SCEVConstant>(this)->getValue(), false);
    132     return;
    133   case scTruncate: {
    134     const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(this);
    135     const SCEV *Op = Trunc->getOperand();
    136     OS << "(trunc " << *Op->getType() << " " << *Op << " to "
    137        << *Trunc->getType() << ")";
    138     return;
    139   }
    140   case scZeroExtend: {
    141     const SCEVZeroExtendExpr *ZExt = cast<SCEVZeroExtendExpr>(this);
    142     const SCEV *Op = ZExt->getOperand();
    143     OS << "(zext " << *Op->getType() << " " << *Op << " to "
    144        << *ZExt->getType() << ")";
    145     return;
    146   }
    147   case scSignExtend: {
    148     const SCEVSignExtendExpr *SExt = cast<SCEVSignExtendExpr>(this);
    149     const SCEV *Op = SExt->getOperand();
    150     OS << "(sext " << *Op->getType() << " " << *Op << " to "
    151        << *SExt->getType() << ")";
    152     return;
    153   }
    154   case scAddRecExpr: {
    155     const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(this);
    156     OS << "{" << *AR->getOperand(0);
    157     for (unsigned i = 1, e = AR->getNumOperands(); i != e; ++i)
    158       OS << ",+," << *AR->getOperand(i);
    159     OS << "}<";
    160     if (AR->getNoWrapFlags(FlagNUW))
    161       OS << "nuw><";
    162     if (AR->getNoWrapFlags(FlagNSW))
    163       OS << "nsw><";
    164     if (AR->getNoWrapFlags(FlagNW) &&
    165         !AR->getNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW)))
    166       OS << "nw><";
    167     WriteAsOperand(OS, AR->getLoop()->getHeader(), /*PrintType=*/false);
    168     OS << ">";
    169     return;
    170   }
    171   case scAddExpr:
    172   case scMulExpr:
    173   case scUMaxExpr:
    174   case scSMaxExpr: {
    175     const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(this);
    176     const char *OpStr = 0;
    177     switch (NAry->getSCEVType()) {
    178     case scAddExpr: OpStr = " + "; break;
    179     case scMulExpr: OpStr = " * "; break;
    180     case scUMaxExpr: OpStr = " umax "; break;
    181     case scSMaxExpr: OpStr = " smax "; break;
    182     }
    183     OS << "(";
    184     for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
    185          I != E; ++I) {
    186       OS << **I;
    187       if (llvm::next(I) != E)
    188         OS << OpStr;
    189     }
    190     OS << ")";
    191     return;
    192   }
    193   case scUDivExpr: {
    194     const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(this);
    195     OS << "(" << *UDiv->getLHS() << " /u " << *UDiv->getRHS() << ")";
    196     return;
    197   }
    198   case scUnknown: {
    199     const SCEVUnknown *U = cast<SCEVUnknown>(this);
    200     Type *AllocTy;
    201     if (U->isSizeOf(AllocTy)) {
    202       OS << "sizeof(" << *AllocTy << ")";
    203       return;
    204     }
    205     if (U->isAlignOf(AllocTy)) {
    206       OS << "alignof(" << *AllocTy << ")";
    207       return;
    208     }
    209 
    210     Type *CTy;
    211     Constant *FieldNo;
    212     if (U->isOffsetOf(CTy, FieldNo)) {
    213       OS << "offsetof(" << *CTy << ", ";
    214       WriteAsOperand(OS, FieldNo, false);
    215       OS << ")";
    216       return;
    217     }
    218 
    219     // Otherwise just print it normally.
    220     WriteAsOperand(OS, U->getValue(), false);
    221     return;
    222   }
    223   case scCouldNotCompute:
    224     OS << "***COULDNOTCOMPUTE***";
    225     return;
    226   default: break;
    227   }
    228   llvm_unreachable("Unknown SCEV kind!");
    229 }
    230 
    231 Type *SCEV::getType() const {
    232   switch (getSCEVType()) {
    233   case scConstant:
    234     return cast<SCEVConstant>(this)->getType();
    235   case scTruncate:
    236   case scZeroExtend:
    237   case scSignExtend:
    238     return cast<SCEVCastExpr>(this)->getType();
    239   case scAddRecExpr:
    240   case scMulExpr:
    241   case scUMaxExpr:
    242   case scSMaxExpr:
    243     return cast<SCEVNAryExpr>(this)->getType();
    244   case scAddExpr:
    245     return cast<SCEVAddExpr>(this)->getType();
    246   case scUDivExpr:
    247     return cast<SCEVUDivExpr>(this)->getType();
    248   case scUnknown:
    249     return cast<SCEVUnknown>(this)->getType();
    250   case scCouldNotCompute:
    251     llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
    252     return 0;
    253   default: break;
    254   }
    255   llvm_unreachable("Unknown SCEV kind!");
    256   return 0;
    257 }
    258 
    259 bool SCEV::isZero() const {
    260   if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
    261     return SC->getValue()->isZero();
    262   return false;
    263 }
    264 
    265 bool SCEV::isOne() const {
    266   if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
    267     return SC->getValue()->isOne();
    268   return false;
    269 }
    270 
    271 bool SCEV::isAllOnesValue() const {
    272   if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
    273     return SC->getValue()->isAllOnesValue();
    274   return false;
    275 }
    276 
    277 SCEVCouldNotCompute::SCEVCouldNotCompute() :
    278   SCEV(FoldingSetNodeIDRef(), scCouldNotCompute) {}
    279 
    280 bool SCEVCouldNotCompute::classof(const SCEV *S) {
    281   return S->getSCEVType() == scCouldNotCompute;
    282 }
    283 
    284 const SCEV *ScalarEvolution::getConstant(ConstantInt *V) {
    285   FoldingSetNodeID ID;
    286   ID.AddInteger(scConstant);
    287   ID.AddPointer(V);
    288   void *IP = 0;
    289   if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
    290   SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V);
    291   UniqueSCEVs.InsertNode(S, IP);
    292   return S;
    293 }
    294 
    295 const SCEV *ScalarEvolution::getConstant(const APInt& Val) {
    296   return getConstant(ConstantInt::get(getContext(), Val));
    297 }
    298 
    299 const SCEV *
    300 ScalarEvolution::getConstant(Type *Ty, uint64_t V, bool isSigned) {
    301   IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty));
    302   return getConstant(ConstantInt::get(ITy, V, isSigned));
    303 }
    304 
    305 SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID,
    306                            unsigned SCEVTy, const SCEV *op, Type *ty)
    307   : SCEV(ID, SCEVTy), Op(op), Ty(ty) {}
    308 
    309 SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID,
    310                                    const SCEV *op, Type *ty)
    311   : SCEVCastExpr(ID, scTruncate, op, ty) {
    312   assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
    313          (Ty->isIntegerTy() || Ty->isPointerTy()) &&
    314          "Cannot truncate non-integer value!");
    315 }
    316 
    317 SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID,
    318                                        const SCEV *op, Type *ty)
    319   : SCEVCastExpr(ID, scZeroExtend, op, ty) {
    320   assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
    321          (Ty->isIntegerTy() || Ty->isPointerTy()) &&
    322          "Cannot zero extend non-integer value!");
    323 }
    324 
    325 SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID,
    326                                        const SCEV *op, Type *ty)
    327   : SCEVCastExpr(ID, scSignExtend, op, ty) {
    328   assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
    329          (Ty->isIntegerTy() || Ty->isPointerTy()) &&
    330          "Cannot sign extend non-integer value!");
    331 }
    332 
    333 void SCEVUnknown::deleted() {
    334   // Clear this SCEVUnknown from various maps.
    335   SE->forgetMemoizedResults(this);
    336 
    337   // Remove this SCEVUnknown from the uniquing map.
    338   SE->UniqueSCEVs.RemoveNode(this);
    339 
    340   // Release the value.
    341   setValPtr(0);
    342 }
    343 
    344 void SCEVUnknown::allUsesReplacedWith(Value *New) {
    345   // Clear this SCEVUnknown from various maps.
    346   SE->forgetMemoizedResults(this);
    347 
    348   // Remove this SCEVUnknown from the uniquing map.
    349   SE->UniqueSCEVs.RemoveNode(this);
    350 
    351   // Update this SCEVUnknown to point to the new value. This is needed
    352   // because there may still be outstanding SCEVs which still point to
    353   // this SCEVUnknown.
    354   setValPtr(New);
    355 }
    356 
    357 bool SCEVUnknown::isSizeOf(Type *&AllocTy) const {
    358   if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
    359     if (VCE->getOpcode() == Instruction::PtrToInt)
    360       if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
    361         if (CE->getOpcode() == Instruction::GetElementPtr &&
    362             CE->getOperand(0)->isNullValue() &&
    363             CE->getNumOperands() == 2)
    364           if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1)))
    365             if (CI->isOne()) {
    366               AllocTy = cast<PointerType>(CE->getOperand(0)->getType())
    367                                  ->getElementType();
    368               return true;
    369             }
    370 
    371   return false;
    372 }
    373 
    374 bool SCEVUnknown::isAlignOf(Type *&AllocTy) const {
    375   if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
    376     if (VCE->getOpcode() == Instruction::PtrToInt)
    377       if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
    378         if (CE->getOpcode() == Instruction::GetElementPtr &&
    379             CE->getOperand(0)->isNullValue()) {
    380           Type *Ty =
    381             cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
    382           if (StructType *STy = dyn_cast<StructType>(Ty))
    383             if (!STy->isPacked() &&
    384                 CE->getNumOperands() == 3 &&
    385                 CE->getOperand(1)->isNullValue()) {
    386               if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2)))
    387                 if (CI->isOne() &&
    388                     STy->getNumElements() == 2 &&
    389                     STy->getElementType(0)->isIntegerTy(1)) {
    390                   AllocTy = STy->getElementType(1);
    391                   return true;
    392                 }
    393             }
    394         }
    395 
    396   return false;
    397 }
    398 
    399 bool SCEVUnknown::isOffsetOf(Type *&CTy, Constant *&FieldNo) const {
    400   if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
    401     if (VCE->getOpcode() == Instruction::PtrToInt)
    402       if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
    403         if (CE->getOpcode() == Instruction::GetElementPtr &&
    404             CE->getNumOperands() == 3 &&
    405             CE->getOperand(0)->isNullValue() &&
    406             CE->getOperand(1)->isNullValue()) {
    407           Type *Ty =
    408             cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
    409           // Ignore vector types here so that ScalarEvolutionExpander doesn't
    410           // emit getelementptrs that index into vectors.
    411           if (Ty->isStructTy() || Ty->isArrayTy()) {
    412             CTy = Ty;
    413             FieldNo = CE->getOperand(2);
    414             return true;
    415           }
    416         }
    417 
    418   return false;
    419 }
    420 
    421 //===----------------------------------------------------------------------===//
    422 //                               SCEV Utilities
    423 //===----------------------------------------------------------------------===//
    424 
    425 namespace {
    426   /// SCEVComplexityCompare - Return true if the complexity of the LHS is less
    427   /// than the complexity of the RHS.  This comparator is used to canonicalize
    428   /// expressions.
    429   class SCEVComplexityCompare {
    430     const LoopInfo *const LI;
    431   public:
    432     explicit SCEVComplexityCompare(const LoopInfo *li) : LI(li) {}
    433 
    434     // Return true or false if LHS is less than, or at least RHS, respectively.
    435     bool operator()(const SCEV *LHS, const SCEV *RHS) const {
    436       return compare(LHS, RHS) < 0;
    437     }
    438 
    439     // Return negative, zero, or positive, if LHS is less than, equal to, or
    440     // greater than RHS, respectively. A three-way result allows recursive
    441     // comparisons to be more efficient.
    442     int compare(const SCEV *LHS, const SCEV *RHS) const {
    443       // Fast-path: SCEVs are uniqued so we can do a quick equality check.
    444       if (LHS == RHS)
    445         return 0;
    446 
    447       // Primarily, sort the SCEVs by their getSCEVType().
    448       unsigned LType = LHS->getSCEVType(), RType = RHS->getSCEVType();
    449       if (LType != RType)
    450         return (int)LType - (int)RType;
    451 
    452       // Aside from the getSCEVType() ordering, the particular ordering
    453       // isn't very important except that it's beneficial to be consistent,
    454       // so that (a + b) and (b + a) don't end up as different expressions.
    455       switch (LType) {
    456       case scUnknown: {
    457         const SCEVUnknown *LU = cast<SCEVUnknown>(LHS);
    458         const SCEVUnknown *RU = cast<SCEVUnknown>(RHS);
    459 
    460         // Sort SCEVUnknown values with some loose heuristics. TODO: This is
    461         // not as complete as it could be.
    462         const Value *LV = LU->getValue(), *RV = RU->getValue();
    463 
    464         // Order pointer values after integer values. This helps SCEVExpander
    465         // form GEPs.
    466         bool LIsPointer = LV->getType()->isPointerTy(),
    467              RIsPointer = RV->getType()->isPointerTy();
    468         if (LIsPointer != RIsPointer)
    469           return (int)LIsPointer - (int)RIsPointer;
    470 
    471         // Compare getValueID values.
    472         unsigned LID = LV->getValueID(),
    473                  RID = RV->getValueID();
    474         if (LID != RID)
    475           return (int)LID - (int)RID;
    476 
    477         // Sort arguments by their position.
    478         if (const Argument *LA = dyn_cast<Argument>(LV)) {
    479           const Argument *RA = cast<Argument>(RV);
    480           unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo();
    481           return (int)LArgNo - (int)RArgNo;
    482         }
    483 
    484         // For instructions, compare their loop depth, and their operand
    485         // count.  This is pretty loose.
    486         if (const Instruction *LInst = dyn_cast<Instruction>(LV)) {
    487           const Instruction *RInst = cast<Instruction>(RV);
    488 
    489           // Compare loop depths.
    490           const BasicBlock *LParent = LInst->getParent(),
    491                            *RParent = RInst->getParent();
    492           if (LParent != RParent) {
    493             unsigned LDepth = LI->getLoopDepth(LParent),
    494                      RDepth = LI->getLoopDepth(RParent);
    495             if (LDepth != RDepth)
    496               return (int)LDepth - (int)RDepth;
    497           }
    498 
    499           // Compare the number of operands.
    500           unsigned LNumOps = LInst->getNumOperands(),
    501                    RNumOps = RInst->getNumOperands();
    502           return (int)LNumOps - (int)RNumOps;
    503         }
    504 
    505         return 0;
    506       }
    507 
    508       case scConstant: {
    509         const SCEVConstant *LC = cast<SCEVConstant>(LHS);
    510         const SCEVConstant *RC = cast<SCEVConstant>(RHS);
    511 
    512         // Compare constant values.
    513         const APInt &LA = LC->getValue()->getValue();
    514         const APInt &RA = RC->getValue()->getValue();
    515         unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth();
    516         if (LBitWidth != RBitWidth)
    517           return (int)LBitWidth - (int)RBitWidth;
    518         return LA.ult(RA) ? -1 : 1;
    519       }
    520 
    521       case scAddRecExpr: {
    522         const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS);
    523         const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS);
    524 
    525         // Compare addrec loop depths.
    526         const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop();
    527         if (LLoop != RLoop) {
    528           unsigned LDepth = LLoop->getLoopDepth(),
    529                    RDepth = RLoop->getLoopDepth();
    530           if (LDepth != RDepth)
    531             return (int)LDepth - (int)RDepth;
    532         }
    533 
    534         // Addrec complexity grows with operand count.
    535         unsigned LNumOps = LA->getNumOperands(), RNumOps = RA->getNumOperands();
    536         if (LNumOps != RNumOps)
    537           return (int)LNumOps - (int)RNumOps;
    538 
    539         // Lexicographically compare.
    540         for (unsigned i = 0; i != LNumOps; ++i) {
    541           long X = compare(LA->getOperand(i), RA->getOperand(i));
    542           if (X != 0)
    543             return X;
    544         }
    545 
    546         return 0;
    547       }
    548 
    549       case scAddExpr:
    550       case scMulExpr:
    551       case scSMaxExpr:
    552       case scUMaxExpr: {
    553         const SCEVNAryExpr *LC = cast<SCEVNAryExpr>(LHS);
    554         const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS);
    555 
    556         // Lexicographically compare n-ary expressions.
    557         unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands();
    558         for (unsigned i = 0; i != LNumOps; ++i) {
    559           if (i >= RNumOps)
    560             return 1;
    561           long X = compare(LC->getOperand(i), RC->getOperand(i));
    562           if (X != 0)
    563             return X;
    564         }
    565         return (int)LNumOps - (int)RNumOps;
    566       }
    567 
    568       case scUDivExpr: {
    569         const SCEVUDivExpr *LC = cast<SCEVUDivExpr>(LHS);
    570         const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS);
    571 
    572         // Lexicographically compare udiv expressions.
    573         long X = compare(LC->getLHS(), RC->getLHS());
    574         if (X != 0)
    575           return X;
    576         return compare(LC->getRHS(), RC->getRHS());
    577       }
    578 
    579       case scTruncate:
    580       case scZeroExtend:
    581       case scSignExtend: {
    582         const SCEVCastExpr *LC = cast<SCEVCastExpr>(LHS);
    583         const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS);
    584 
    585         // Compare cast expressions by operand.
    586         return compare(LC->getOperand(), RC->getOperand());
    587       }
    588 
    589       default:
    590         break;
    591       }
    592 
    593       llvm_unreachable("Unknown SCEV kind!");
    594       return 0;
    595     }
    596   };
    597 }
    598 
    599 /// GroupByComplexity - Given a list of SCEV objects, order them by their
    600 /// complexity, and group objects of the same complexity together by value.
    601 /// When this routine is finished, we know that any duplicates in the vector are
    602 /// consecutive and that complexity is monotonically increasing.
    603 ///
    604 /// Note that we go take special precautions to ensure that we get deterministic
    605 /// results from this routine.  In other words, we don't want the results of
    606 /// this to depend on where the addresses of various SCEV objects happened to
    607 /// land in memory.
    608 ///
    609 static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops,
    610                               LoopInfo *LI) {
    611   if (Ops.size() < 2) return;  // Noop
    612   if (Ops.size() == 2) {
    613     // This is the common case, which also happens to be trivially simple.
    614     // Special case it.
    615     const SCEV *&LHS = Ops[0], *&RHS = Ops[1];
    616     if (SCEVComplexityCompare(LI)(RHS, LHS))
    617       std::swap(LHS, RHS);
    618     return;
    619   }
    620 
    621   // Do the rough sort by complexity.
    622   std::stable_sort(Ops.begin(), Ops.end(), SCEVComplexityCompare(LI));
    623 
    624   // Now that we are sorted by complexity, group elements of the same
    625   // complexity.  Note that this is, at worst, N^2, but the vector is likely to
    626   // be extremely short in practice.  Note that we take this approach because we
    627   // do not want to depend on the addresses of the objects we are grouping.
    628   for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) {
    629     const SCEV *S = Ops[i];
    630     unsigned Complexity = S->getSCEVType();
    631 
    632     // If there are any objects of the same complexity and same value as this
    633     // one, group them.
    634     for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) {
    635       if (Ops[j] == S) { // Found a duplicate.
    636         // Move it to immediately after i'th element.
    637         std::swap(Ops[i+1], Ops[j]);
    638         ++i;   // no need to rescan it.
    639         if (i == e-2) return;  // Done!
    640       }
    641     }
    642   }
    643 }
    644 
    645 
    646 
    647 //===----------------------------------------------------------------------===//
    648 //                      Simple SCEV method implementations
    649 //===----------------------------------------------------------------------===//
    650 
    651 /// BinomialCoefficient - Compute BC(It, K).  The result has width W.
    652 /// Assume, K > 0.
    653 static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K,
    654                                        ScalarEvolution &SE,
    655                                        Type* ResultTy) {
    656   // Handle the simplest case efficiently.
    657   if (K == 1)
    658     return SE.getTruncateOrZeroExtend(It, ResultTy);
    659 
    660   // We are using the following formula for BC(It, K):
    661   //
    662   //   BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K!
    663   //
    664   // Suppose, W is the bitwidth of the return value.  We must be prepared for
    665   // overflow.  Hence, we must assure that the result of our computation is
    666   // equal to the accurate one modulo 2^W.  Unfortunately, division isn't
    667   // safe in modular arithmetic.
    668   //
    669   // However, this code doesn't use exactly that formula; the formula it uses
    670   // is something like the following, where T is the number of factors of 2 in
    671   // K! (i.e. trailing zeros in the binary representation of K!), and ^ is
    672   // exponentiation:
    673   //
    674   //   BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T)
    675   //
    676   // This formula is trivially equivalent to the previous formula.  However,
    677   // this formula can be implemented much more efficiently.  The trick is that
    678   // K! / 2^T is odd, and exact division by an odd number *is* safe in modular
    679   // arithmetic.  To do exact division in modular arithmetic, all we have
    680   // to do is multiply by the inverse.  Therefore, this step can be done at
    681   // width W.
    682   //
    683   // The next issue is how to safely do the division by 2^T.  The way this
    684   // is done is by doing the multiplication step at a width of at least W + T
    685   // bits.  This way, the bottom W+T bits of the product are accurate. Then,
    686   // when we perform the division by 2^T (which is equivalent to a right shift
    687   // by T), the bottom W bits are accurate.  Extra bits are okay; they'll get
    688   // truncated out after the division by 2^T.
    689   //
    690   // In comparison to just directly using the first formula, this technique
    691   // is much more efficient; using the first formula requires W * K bits,
    692   // but this formula less than W + K bits. Also, the first formula requires
    693   // a division step, whereas this formula only requires multiplies and shifts.
    694   //
    695   // It doesn't matter whether the subtraction step is done in the calculation
    696   // width or the input iteration count's width; if the subtraction overflows,
    697   // the result must be zero anyway.  We prefer here to do it in the width of
    698   // the induction variable because it helps a lot for certain cases; CodeGen
    699   // isn't smart enough to ignore the overflow, which leads to much less
    700   // efficient code if the width of the subtraction is wider than the native
    701   // register width.
    702   //
    703   // (It's possible to not widen at all by pulling out factors of 2 before
    704   // the multiplication; for example, K=2 can be calculated as
    705   // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires
    706   // extra arithmetic, so it's not an obvious win, and it gets
    707   // much more complicated for K > 3.)
    708 
    709   // Protection from insane SCEVs; this bound is conservative,
    710   // but it probably doesn't matter.
    711   if (K > 1000)
    712     return SE.getCouldNotCompute();
    713 
    714   unsigned W = SE.getTypeSizeInBits(ResultTy);
    715 
    716   // Calculate K! / 2^T and T; we divide out the factors of two before
    717   // multiplying for calculating K! / 2^T to avoid overflow.
    718   // Other overflow doesn't matter because we only care about the bottom
    719   // W bits of the result.
    720   APInt OddFactorial(W, 1);
    721   unsigned T = 1;
    722   for (unsigned i = 3; i <= K; ++i) {
    723     APInt Mult(W, i);
    724     unsigned TwoFactors = Mult.countTrailingZeros();
    725     T += TwoFactors;
    726     Mult = Mult.lshr(TwoFactors);
    727     OddFactorial *= Mult;
    728   }
    729 
    730   // We need at least W + T bits for the multiplication step
    731   unsigned CalculationBits = W + T;
    732 
    733   // Calculate 2^T, at width T+W.
    734   APInt DivFactor = APInt(CalculationBits, 1).shl(T);
    735 
    736   // Calculate the multiplicative inverse of K! / 2^T;
    737   // this multiplication factor will perform the exact division by
    738   // K! / 2^T.
    739   APInt Mod = APInt::getSignedMinValue(W+1);
    740   APInt MultiplyFactor = OddFactorial.zext(W+1);
    741   MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod);
    742   MultiplyFactor = MultiplyFactor.trunc(W);
    743 
    744   // Calculate the product, at width T+W
    745   IntegerType *CalculationTy = IntegerType::get(SE.getContext(),
    746                                                       CalculationBits);
    747   const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy);
    748   for (unsigned i = 1; i != K; ++i) {
    749     const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i));
    750     Dividend = SE.getMulExpr(Dividend,
    751                              SE.getTruncateOrZeroExtend(S, CalculationTy));
    752   }
    753 
    754   // Divide by 2^T
    755   const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor));
    756 
    757   // Truncate the result, and divide by K! / 2^T.
    758 
    759   return SE.getMulExpr(SE.getConstant(MultiplyFactor),
    760                        SE.getTruncateOrZeroExtend(DivResult, ResultTy));
    761 }
    762 
    763 /// evaluateAtIteration - Return the value of this chain of recurrences at
    764 /// the specified iteration number.  We can evaluate this recurrence by
    765 /// multiplying each element in the chain by the binomial coefficient
    766 /// corresponding to it.  In other words, we can evaluate {A,+,B,+,C,+,D} as:
    767 ///
    768 ///   A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3)
    769 ///
    770 /// where BC(It, k) stands for binomial coefficient.
    771 ///
    772 const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It,
    773                                                 ScalarEvolution &SE) const {
    774   const SCEV *Result = getStart();
    775   for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
    776     // The computation is correct in the face of overflow provided that the
    777     // multiplication is performed _after_ the evaluation of the binomial
    778     // coefficient.
    779     const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType());
    780     if (isa<SCEVCouldNotCompute>(Coeff))
    781       return Coeff;
    782 
    783     Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff));
    784   }
    785   return Result;
    786 }
    787 
    788 //===----------------------------------------------------------------------===//
    789 //                    SCEV Expression folder implementations
    790 //===----------------------------------------------------------------------===//
    791 
    792 const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op,
    793                                              Type *Ty) {
    794   assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) &&
    795          "This is not a truncating conversion!");
    796   assert(isSCEVable(Ty) &&
    797          "This is not a conversion to a SCEVable type!");
    798   Ty = getEffectiveSCEVType(Ty);
    799 
    800   FoldingSetNodeID ID;
    801   ID.AddInteger(scTruncate);
    802   ID.AddPointer(Op);
    803   ID.AddPointer(Ty);
    804   void *IP = 0;
    805   if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
    806 
    807   // Fold if the operand is constant.
    808   if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
    809     return getConstant(
    810       cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(),
    811                                                getEffectiveSCEVType(Ty))));
    812 
    813   // trunc(trunc(x)) --> trunc(x)
    814   if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op))
    815     return getTruncateExpr(ST->getOperand(), Ty);
    816 
    817   // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing
    818   if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
    819     return getTruncateOrSignExtend(SS->getOperand(), Ty);
    820 
    821   // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing
    822   if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
    823     return getTruncateOrZeroExtend(SZ->getOperand(), Ty);
    824 
    825   // trunc(x1+x2+...+xN) --> trunc(x1)+trunc(x2)+...+trunc(xN) if we can
    826   // eliminate all the truncates.
    827   if (const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Op)) {
    828     SmallVector<const SCEV *, 4> Operands;
    829     bool hasTrunc = false;
    830     for (unsigned i = 0, e = SA->getNumOperands(); i != e && !hasTrunc; ++i) {
    831       const SCEV *S = getTruncateExpr(SA->getOperand(i), Ty);
    832       hasTrunc = isa<SCEVTruncateExpr>(S);
    833       Operands.push_back(S);
    834     }
    835     if (!hasTrunc)
    836       return getAddExpr(Operands);
    837     UniqueSCEVs.FindNodeOrInsertPos(ID, IP);  // Mutates IP, returns NULL.
    838   }
    839 
    840   // trunc(x1*x2*...*xN) --> trunc(x1)*trunc(x2)*...*trunc(xN) if we can
    841   // eliminate all the truncates.
    842   if (const SCEVMulExpr *SM = dyn_cast<SCEVMulExpr>(Op)) {
    843     SmallVector<const SCEV *, 4> Operands;
    844     bool hasTrunc = false;
    845     for (unsigned i = 0, e = SM->getNumOperands(); i != e && !hasTrunc; ++i) {
    846       const SCEV *S = getTruncateExpr(SM->getOperand(i), Ty);
    847       hasTrunc = isa<SCEVTruncateExpr>(S);
    848       Operands.push_back(S);
    849     }
    850     if (!hasTrunc)
    851       return getMulExpr(Operands);
    852     UniqueSCEVs.FindNodeOrInsertPos(ID, IP);  // Mutates IP, returns NULL.
    853   }
    854 
    855   // If the input value is a chrec scev, truncate the chrec's operands.
    856   if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) {
    857     SmallVector<const SCEV *, 4> Operands;
    858     for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
    859       Operands.push_back(getTruncateExpr(AddRec->getOperand(i), Ty));
    860     return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap);
    861   }
    862 
    863   // As a special case, fold trunc(undef) to undef. We don't want to
    864   // know too much about SCEVUnknowns, but this special case is handy
    865   // and harmless.
    866   if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Op))
    867     if (isa<UndefValue>(U->getValue()))
    868       return getSCEV(UndefValue::get(Ty));
    869 
    870   // The cast wasn't folded; create an explicit cast node. We can reuse
    871   // the existing insert position since if we get here, we won't have
    872   // made any changes which would invalidate it.
    873   SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator),
    874                                                  Op, Ty);
    875   UniqueSCEVs.InsertNode(S, IP);
    876   return S;
    877 }
    878 
    879 const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
    880                                                Type *Ty) {
    881   assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
    882          "This is not an extending conversion!");
    883   assert(isSCEVable(Ty) &&
    884          "This is not a conversion to a SCEVable type!");
    885   Ty = getEffectiveSCEVType(Ty);
    886 
    887   // Fold if the operand is constant.
    888   if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
    889     return getConstant(
    890       cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(),
    891                                               getEffectiveSCEVType(Ty))));
    892 
    893   // zext(zext(x)) --> zext(x)
    894   if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
    895     return getZeroExtendExpr(SZ->getOperand(), Ty);
    896 
    897   // Before doing any expensive analysis, check to see if we've already
    898   // computed a SCEV for this Op and Ty.
    899   FoldingSetNodeID ID;
    900   ID.AddInteger(scZeroExtend);
    901   ID.AddPointer(Op);
    902   ID.AddPointer(Ty);
    903   void *IP = 0;
    904   if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
    905 
    906   // zext(trunc(x)) --> zext(x) or x or trunc(x)
    907   if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) {
    908     // It's possible the bits taken off by the truncate were all zero bits. If
    909     // so, we should be able to simplify this further.
    910     const SCEV *X = ST->getOperand();
    911     ConstantRange CR = getUnsignedRange(X);
    912     unsigned TruncBits = getTypeSizeInBits(ST->getType());
    913     unsigned NewBits = getTypeSizeInBits(Ty);
    914     if (CR.truncate(TruncBits).zeroExtend(NewBits).contains(
    915             CR.zextOrTrunc(NewBits)))
    916       return getTruncateOrZeroExtend(X, Ty);
    917   }
    918 
    919   // If the input value is a chrec scev, and we can prove that the value
    920   // did not overflow the old, smaller, value, we can zero extend all of the
    921   // operands (often constants).  This allows analysis of something like
    922   // this:  for (unsigned char X = 0; X < 100; ++X) { int Y = X; }
    923   if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
    924     if (AR->isAffine()) {
    925       const SCEV *Start = AR->getStart();
    926       const SCEV *Step = AR->getStepRecurrence(*this);
    927       unsigned BitWidth = getTypeSizeInBits(AR->getType());
    928       const Loop *L = AR->getLoop();
    929 
    930       // If we have special knowledge that this addrec won't overflow,
    931       // we don't need to do any further analysis.
    932       if (AR->getNoWrapFlags(SCEV::FlagNUW))
    933         return getAddRecExpr(getZeroExtendExpr(Start, Ty),
    934                              getZeroExtendExpr(Step, Ty),
    935                              L, AR->getNoWrapFlags());
    936 
    937       // Check whether the backedge-taken count is SCEVCouldNotCompute.
    938       // Note that this serves two purposes: It filters out loops that are
    939       // simply not analyzable, and it covers the case where this code is
    940       // being called from within backedge-taken count analysis, such that
    941       // attempting to ask for the backedge-taken count would likely result
    942       // in infinite recursion. In the later case, the analysis code will
    943       // cope with a conservative value, and it will take care to purge
    944       // that value once it has finished.
    945       const SCEV *MaxBECount = getMaxBackedgeTakenCount(L);
    946       if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
    947         // Manually compute the final value for AR, checking for
    948         // overflow.
    949 
    950         // Check whether the backedge-taken count can be losslessly casted to
    951         // the addrec's type. The count is always unsigned.
    952         const SCEV *CastedMaxBECount =
    953           getTruncateOrZeroExtend(MaxBECount, Start->getType());
    954         const SCEV *RecastedMaxBECount =
    955           getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
    956         if (MaxBECount == RecastedMaxBECount) {
    957           Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
    958           // Check whether Start+Step*MaxBECount has no unsigned overflow.
    959           const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step);
    960           const SCEV *Add = getAddExpr(Start, ZMul);
    961           const SCEV *OperandExtendedAdd =
    962             getAddExpr(getZeroExtendExpr(Start, WideTy),
    963                        getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
    964                                   getZeroExtendExpr(Step, WideTy)));
    965           if (getZeroExtendExpr(Add, WideTy) == OperandExtendedAdd) {
    966             // Cache knowledge of AR NUW, which is propagated to this AddRec.
    967             const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW);
    968             // Return the expression with the addrec on the outside.
    969             return getAddRecExpr(getZeroExtendExpr(Start, Ty),
    970                                  getZeroExtendExpr(Step, Ty),
    971                                  L, AR->getNoWrapFlags());
    972           }
    973           // Similar to above, only this time treat the step value as signed.
    974           // This covers loops that count down.
    975           const SCEV *SMul = getMulExpr(CastedMaxBECount, Step);
    976           Add = getAddExpr(Start, SMul);
    977           OperandExtendedAdd =
    978             getAddExpr(getZeroExtendExpr(Start, WideTy),
    979                        getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
    980                                   getSignExtendExpr(Step, WideTy)));
    981           if (getZeroExtendExpr(Add, WideTy) == OperandExtendedAdd) {
    982             // Cache knowledge of AR NW, which is propagated to this AddRec.
    983             // Negative step causes unsigned wrap, but it still can't self-wrap.
    984             const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW);
    985             // Return the expression with the addrec on the outside.
    986             return getAddRecExpr(getZeroExtendExpr(Start, Ty),
    987                                  getSignExtendExpr(Step, Ty),
    988                                  L, AR->getNoWrapFlags());
    989           }
    990         }
    991 
    992         // If the backedge is guarded by a comparison with the pre-inc value
    993         // the addrec is safe. Also, if the entry is guarded by a comparison
    994         // with the start value and the backedge is guarded by a comparison
    995         // with the post-inc value, the addrec is safe.
    996         if (isKnownPositive(Step)) {
    997           const SCEV *N = getConstant(APInt::getMinValue(BitWidth) -
    998                                       getUnsignedRange(Step).getUnsignedMax());
    999           if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) ||
   1000               (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_ULT, Start, N) &&
   1001                isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT,
   1002                                            AR->getPostIncExpr(*this), N))) {
   1003             // Cache knowledge of AR NUW, which is propagated to this AddRec.
   1004             const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW);
   1005             // Return the expression with the addrec on the outside.
   1006             return getAddRecExpr(getZeroExtendExpr(Start, Ty),
   1007                                  getZeroExtendExpr(Step, Ty),
   1008                                  L, AR->getNoWrapFlags());
   1009           }
   1010         } else if (isKnownNegative(Step)) {
   1011           const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) -
   1012                                       getSignedRange(Step).getSignedMin());
   1013           if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) ||
   1014               (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_UGT, Start, N) &&
   1015                isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT,
   1016                                            AR->getPostIncExpr(*this), N))) {
   1017             // Cache knowledge of AR NW, which is propagated to this AddRec.
   1018             // Negative step causes unsigned wrap, but it still can't self-wrap.
   1019             const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW);
   1020             // Return the expression with the addrec on the outside.
   1021             return getAddRecExpr(getZeroExtendExpr(Start, Ty),
   1022                                  getSignExtendExpr(Step, Ty),
   1023                                  L, AR->getNoWrapFlags());
   1024           }
   1025         }
   1026       }
   1027     }
   1028 
   1029   // The cast wasn't folded; create an explicit cast node.
   1030   // Recompute the insert position, as it may have been invalidated.
   1031   if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
   1032   SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator),
   1033                                                    Op, Ty);
   1034   UniqueSCEVs.InsertNode(S, IP);
   1035   return S;
   1036 }
   1037 
   1038 // Get the limit of a recurrence such that incrementing by Step cannot cause
   1039 // signed overflow as long as the value of the recurrence within the loop does
   1040 // not exceed this limit before incrementing.
   1041 static const SCEV *getOverflowLimitForStep(const SCEV *Step,
   1042                                            ICmpInst::Predicate *Pred,
   1043                                            ScalarEvolution *SE) {
   1044   unsigned BitWidth = SE->getTypeSizeInBits(Step->getType());
   1045   if (SE->isKnownPositive(Step)) {
   1046     *Pred = ICmpInst::ICMP_SLT;
   1047     return SE->getConstant(APInt::getSignedMinValue(BitWidth) -
   1048                            SE->getSignedRange(Step).getSignedMax());
   1049   }
   1050   if (SE->isKnownNegative(Step)) {
   1051     *Pred = ICmpInst::ICMP_SGT;
   1052     return SE->getConstant(APInt::getSignedMaxValue(BitWidth) -
   1053                        SE->getSignedRange(Step).getSignedMin());
   1054   }
   1055   return 0;
   1056 }
   1057 
   1058 // The recurrence AR has been shown to have no signed wrap. Typically, if we can
   1059 // prove NSW for AR, then we can just as easily prove NSW for its preincrement
   1060 // or postincrement sibling. This allows normalizing a sign extended AddRec as
   1061 // such: {sext(Step + Start),+,Step} => {(Step + sext(Start),+,Step} As a
   1062 // result, the expression "Step + sext(PreIncAR)" is congruent with
   1063 // "sext(PostIncAR)"
   1064 static const SCEV *getPreStartForSignExtend(const SCEVAddRecExpr *AR,
   1065                                             Type *Ty,
   1066                                             ScalarEvolution *SE) {
   1067   const Loop *L = AR->getLoop();
   1068   const SCEV *Start = AR->getStart();
   1069   const SCEV *Step = AR->getStepRecurrence(*SE);
   1070 
   1071   // Check for a simple looking step prior to loop entry.
   1072   const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start);
   1073   if (!SA || SA->getNumOperands() != 2 || SA->getOperand(0) != Step)
   1074     return 0;
   1075 
   1076   // This is a postinc AR. Check for overflow on the preinc recurrence using the
   1077   // same three conditions that getSignExtendedExpr checks.
   1078 
   1079   // 1. NSW flags on the step increment.
   1080   const SCEV *PreStart = SA->getOperand(1);
   1081   const SCEVAddRecExpr *PreAR = dyn_cast<SCEVAddRecExpr>(
   1082     SE->getAddRecExpr(PreStart, Step, L, SCEV::FlagAnyWrap));
   1083 
   1084   if (PreAR && PreAR->getNoWrapFlags(SCEV::FlagNSW))
   1085     return PreStart;
   1086 
   1087   // 2. Direct overflow check on the step operation's expression.
   1088   unsigned BitWidth = SE->getTypeSizeInBits(AR->getType());
   1089   Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2);
   1090   const SCEV *OperandExtendedStart =
   1091     SE->getAddExpr(SE->getSignExtendExpr(PreStart, WideTy),
   1092                    SE->getSignExtendExpr(Step, WideTy));
   1093   if (SE->getSignExtendExpr(Start, WideTy) == OperandExtendedStart) {
   1094     // Cache knowledge of PreAR NSW.
   1095     if (PreAR)
   1096       const_cast<SCEVAddRecExpr *>(PreAR)->setNoWrapFlags(SCEV::FlagNSW);
   1097     // FIXME: this optimization needs a unit test
   1098     DEBUG(dbgs() << "SCEV: untested prestart overflow check\n");
   1099     return PreStart;
   1100   }
   1101 
   1102   // 3. Loop precondition.
   1103   ICmpInst::Predicate Pred;
   1104   const SCEV *OverflowLimit = getOverflowLimitForStep(Step, &Pred, SE);
   1105 
   1106   if (OverflowLimit &&
   1107       SE->isLoopEntryGuardedByCond(L, Pred, PreStart, OverflowLimit)) {
   1108     return PreStart;
   1109   }
   1110   return 0;
   1111 }
   1112 
   1113 // Get the normalized sign-extended expression for this AddRec's Start.
   1114 static const SCEV *getSignExtendAddRecStart(const SCEVAddRecExpr *AR,
   1115                                             Type *Ty,
   1116                                             ScalarEvolution *SE) {
   1117   const SCEV *PreStart = getPreStartForSignExtend(AR, Ty, SE);
   1118   if (!PreStart)
   1119     return SE->getSignExtendExpr(AR->getStart(), Ty);
   1120 
   1121   return SE->getAddExpr(SE->getSignExtendExpr(AR->getStepRecurrence(*SE), Ty),
   1122                         SE->getSignExtendExpr(PreStart, Ty));
   1123 }
   1124 
   1125 const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
   1126                                                Type *Ty) {
   1127   assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
   1128          "This is not an extending conversion!");
   1129   assert(isSCEVable(Ty) &&
   1130          "This is not a conversion to a SCEVable type!");
   1131   Ty = getEffectiveSCEVType(Ty);
   1132 
   1133   // Fold if the operand is constant.
   1134   if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
   1135     return getConstant(
   1136       cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(),
   1137                                               getEffectiveSCEVType(Ty))));
   1138 
   1139   // sext(sext(x)) --> sext(x)
   1140   if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
   1141     return getSignExtendExpr(SS->getOperand(), Ty);
   1142 
   1143   // sext(zext(x)) --> zext(x)
   1144   if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
   1145     return getZeroExtendExpr(SZ->getOperand(), Ty);
   1146 
   1147   // Before doing any expensive analysis, check to see if we've already
   1148   // computed a SCEV for this Op and Ty.
   1149   FoldingSetNodeID ID;
   1150   ID.AddInteger(scSignExtend);
   1151   ID.AddPointer(Op);
   1152   ID.AddPointer(Ty);
   1153   void *IP = 0;
   1154   if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
   1155 
   1156   // If the input value is provably positive, build a zext instead.
   1157   if (isKnownNonNegative(Op))
   1158     return getZeroExtendExpr(Op, Ty);
   1159 
   1160   // sext(trunc(x)) --> sext(x) or x or trunc(x)
   1161   if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) {
   1162     // It's possible the bits taken off by the truncate were all sign bits. If
   1163     // so, we should be able to simplify this further.
   1164     const SCEV *X = ST->getOperand();
   1165     ConstantRange CR = getSignedRange(X);
   1166     unsigned TruncBits = getTypeSizeInBits(ST->getType());
   1167     unsigned NewBits = getTypeSizeInBits(Ty);
   1168     if (CR.truncate(TruncBits).signExtend(NewBits).contains(
   1169             CR.sextOrTrunc(NewBits)))
   1170       return getTruncateOrSignExtend(X, Ty);
   1171   }
   1172 
   1173   // If the input value is a chrec scev, and we can prove that the value
   1174   // did not overflow the old, smaller, value, we can sign extend all of the
   1175   // operands (often constants).  This allows analysis of something like
   1176   // this:  for (signed char X = 0; X < 100; ++X) { int Y = X; }
   1177   if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
   1178     if (AR->isAffine()) {
   1179       const SCEV *Start = AR->getStart();
   1180       const SCEV *Step = AR->getStepRecurrence(*this);
   1181       unsigned BitWidth = getTypeSizeInBits(AR->getType());
   1182       const Loop *L = AR->getLoop();
   1183 
   1184       // If we have special knowledge that this addrec won't overflow,
   1185       // we don't need to do any further analysis.
   1186       if (AR->getNoWrapFlags(SCEV::FlagNSW))
   1187         return getAddRecExpr(getSignExtendAddRecStart(AR, Ty, this),
   1188                              getSignExtendExpr(Step, Ty),
   1189                              L, SCEV::FlagNSW);
   1190 
   1191       // Check whether the backedge-taken count is SCEVCouldNotCompute.
   1192       // Note that this serves two purposes: It filters out loops that are
   1193       // simply not analyzable, and it covers the case where this code is
   1194       // being called from within backedge-taken count analysis, such that
   1195       // attempting to ask for the backedge-taken count would likely result
   1196       // in infinite recursion. In the later case, the analysis code will
   1197       // cope with a conservative value, and it will take care to purge
   1198       // that value once it has finished.
   1199       const SCEV *MaxBECount = getMaxBackedgeTakenCount(L);
   1200       if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
   1201         // Manually compute the final value for AR, checking for
   1202         // overflow.
   1203 
   1204         // Check whether the backedge-taken count can be losslessly casted to
   1205         // the addrec's type. The count is always unsigned.
   1206         const SCEV *CastedMaxBECount =
   1207           getTruncateOrZeroExtend(MaxBECount, Start->getType());
   1208         const SCEV *RecastedMaxBECount =
   1209           getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
   1210         if (MaxBECount == RecastedMaxBECount) {
   1211           Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
   1212           // Check whether Start+Step*MaxBECount has no signed overflow.
   1213           const SCEV *SMul = getMulExpr(CastedMaxBECount, Step);
   1214           const SCEV *Add = getAddExpr(Start, SMul);
   1215           const SCEV *OperandExtendedAdd =
   1216             getAddExpr(getSignExtendExpr(Start, WideTy),
   1217                        getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
   1218                                   getSignExtendExpr(Step, WideTy)));
   1219           if (getSignExtendExpr(Add, WideTy) == OperandExtendedAdd) {
   1220             // Cache knowledge of AR NSW, which is propagated to this AddRec.
   1221             const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW);
   1222             // Return the expression with the addrec on the outside.
   1223             return getAddRecExpr(getSignExtendAddRecStart(AR, Ty, this),
   1224                                  getSignExtendExpr(Step, Ty),
   1225                                  L, AR->getNoWrapFlags());
   1226           }
   1227           // Similar to above, only this time treat the step value as unsigned.
   1228           // This covers loops that count up with an unsigned step.
   1229           const SCEV *UMul = getMulExpr(CastedMaxBECount, Step);
   1230           Add = getAddExpr(Start, UMul);
   1231           OperandExtendedAdd =
   1232             getAddExpr(getSignExtendExpr(Start, WideTy),
   1233                        getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
   1234                                   getZeroExtendExpr(Step, WideTy)));
   1235           if (getSignExtendExpr(Add, WideTy) == OperandExtendedAdd) {
   1236             // Cache knowledge of AR NSW, which is propagated to this AddRec.
   1237             const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW);
   1238             // Return the expression with the addrec on the outside.
   1239             return getAddRecExpr(getSignExtendAddRecStart(AR, Ty, this),
   1240                                  getZeroExtendExpr(Step, Ty),
   1241                                  L, AR->getNoWrapFlags());
   1242           }
   1243         }
   1244 
   1245         // If the backedge is guarded by a comparison with the pre-inc value
   1246         // the addrec is safe. Also, if the entry is guarded by a comparison
   1247         // with the start value and the backedge is guarded by a comparison
   1248         // with the post-inc value, the addrec is safe.
   1249         ICmpInst::Predicate Pred;
   1250         const SCEV *OverflowLimit = getOverflowLimitForStep(Step, &Pred, this);
   1251         if (OverflowLimit &&
   1252             (isLoopBackedgeGuardedByCond(L, Pred, AR, OverflowLimit) ||
   1253              (isLoopEntryGuardedByCond(L, Pred, Start, OverflowLimit) &&
   1254               isLoopBackedgeGuardedByCond(L, Pred, AR->getPostIncExpr(*this),
   1255                                           OverflowLimit)))) {
   1256           // Cache knowledge of AR NSW, then propagate NSW to the wide AddRec.
   1257           const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW);
   1258           return getAddRecExpr(getSignExtendAddRecStart(AR, Ty, this),
   1259                                getSignExtendExpr(Step, Ty),
   1260                                L, AR->getNoWrapFlags());
   1261         }
   1262       }
   1263     }
   1264 
   1265   // The cast wasn't folded; create an explicit cast node.
   1266   // Recompute the insert position, as it may have been invalidated.
   1267   if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
   1268   SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator),
   1269                                                    Op, Ty);
   1270   UniqueSCEVs.InsertNode(S, IP);
   1271   return S;
   1272 }
   1273 
   1274 /// getAnyExtendExpr - Return a SCEV for the given operand extended with
   1275 /// unspecified bits out to the given type.
   1276 ///
   1277 const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op,
   1278                                               Type *Ty) {
   1279   assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
   1280          "This is not an extending conversion!");
   1281   assert(isSCEVable(Ty) &&
   1282          "This is not a conversion to a SCEVable type!");
   1283   Ty = getEffectiveSCEVType(Ty);
   1284 
   1285   // Sign-extend negative constants.
   1286   if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
   1287     if (SC->getValue()->getValue().isNegative())
   1288       return getSignExtendExpr(Op, Ty);
   1289 
   1290   // Peel off a truncate cast.
   1291   if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) {
   1292     const SCEV *NewOp = T->getOperand();
   1293     if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty))
   1294       return getAnyExtendExpr(NewOp, Ty);
   1295     return getTruncateOrNoop(NewOp, Ty);
   1296   }
   1297 
   1298   // Next try a zext cast. If the cast is folded, use it.
   1299   const SCEV *ZExt = getZeroExtendExpr(Op, Ty);
   1300   if (!isa<SCEVZeroExtendExpr>(ZExt))
   1301     return ZExt;
   1302 
   1303   // Next try a sext cast. If the cast is folded, use it.
   1304   const SCEV *SExt = getSignExtendExpr(Op, Ty);
   1305   if (!isa<SCEVSignExtendExpr>(SExt))
   1306     return SExt;
   1307 
   1308   // Force the cast to be folded into the operands of an addrec.
   1309   if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) {
   1310     SmallVector<const SCEV *, 4> Ops;
   1311     for (SCEVAddRecExpr::op_iterator I = AR->op_begin(), E = AR->op_end();
   1312          I != E; ++I)
   1313       Ops.push_back(getAnyExtendExpr(*I, Ty));
   1314     return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagNW);
   1315   }
   1316 
   1317   // As a special case, fold anyext(undef) to undef. We don't want to
   1318   // know too much about SCEVUnknowns, but this special case is handy
   1319   // and harmless.
   1320   if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Op))
   1321     if (isa<UndefValue>(U->getValue()))
   1322       return getSCEV(UndefValue::get(Ty));
   1323 
   1324   // If the expression is obviously signed, use the sext cast value.
   1325   if (isa<SCEVSMaxExpr>(Op))
   1326     return SExt;
   1327 
   1328   // Absent any other information, use the zext cast value.
   1329   return ZExt;
   1330 }
   1331 
   1332 /// CollectAddOperandsWithScales - Process the given Ops list, which is
   1333 /// a list of operands to be added under the given scale, update the given
   1334 /// map. This is a helper function for getAddRecExpr. As an example of
   1335 /// what it does, given a sequence of operands that would form an add
   1336 /// expression like this:
   1337 ///
   1338 ///    m + n + 13 + (A * (o + p + (B * q + m + 29))) + r + (-1 * r)
   1339 ///
   1340 /// where A and B are constants, update the map with these values:
   1341 ///
   1342 ///    (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0)
   1343 ///
   1344 /// and add 13 + A*B*29 to AccumulatedConstant.
   1345 /// This will allow getAddRecExpr to produce this:
   1346 ///
   1347 ///    13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B)
   1348 ///
   1349 /// This form often exposes folding opportunities that are hidden in
   1350 /// the original operand list.
   1351 ///
   1352 /// Return true iff it appears that any interesting folding opportunities
   1353 /// may be exposed. This helps getAddRecExpr short-circuit extra work in
   1354 /// the common case where no interesting opportunities are present, and
   1355 /// is also used as a check to avoid infinite recursion.
   1356 ///
   1357 static bool
   1358 CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M,
   1359                              SmallVector<const SCEV *, 8> &NewOps,
   1360                              APInt &AccumulatedConstant,
   1361                              const SCEV *const *Ops, size_t NumOperands,
   1362                              const APInt &Scale,
   1363                              ScalarEvolution &SE) {
   1364   bool Interesting = false;
   1365 
   1366   // Iterate over the add operands. They are sorted, with constants first.
   1367   unsigned i = 0;
   1368   while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
   1369     ++i;
   1370     // Pull a buried constant out to the outside.
   1371     if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero())
   1372       Interesting = true;
   1373     AccumulatedConstant += Scale * C->getValue()->getValue();
   1374   }
   1375 
   1376   // Next comes everything else. We're especially interested in multiplies
   1377   // here, but they're in the middle, so just visit the rest with one loop.
   1378   for (; i != NumOperands; ++i) {
   1379     const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]);
   1380     if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) {
   1381       APInt NewScale =
   1382         Scale * cast<SCEVConstant>(Mul->getOperand(0))->getValue()->getValue();
   1383       if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) {
   1384         // A multiplication of a constant with another add; recurse.
   1385         const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1));
   1386         Interesting |=
   1387           CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
   1388                                        Add->op_begin(), Add->getNumOperands(),
   1389                                        NewScale, SE);
   1390       } else {
   1391         // A multiplication of a constant with some other value. Update
   1392         // the map.
   1393         SmallVector<const SCEV *, 4> MulOps(Mul->op_begin()+1, Mul->op_end());
   1394         const SCEV *Key = SE.getMulExpr(MulOps);
   1395         std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
   1396           M.insert(std::make_pair(Key, NewScale));
   1397         if (Pair.second) {
   1398           NewOps.push_back(Pair.first->first);
   1399         } else {
   1400           Pair.first->second += NewScale;
   1401           // The map already had an entry for this value, which may indicate
   1402           // a folding opportunity.
   1403           Interesting = true;
   1404         }
   1405       }
   1406     } else {
   1407       // An ordinary operand. Update the map.
   1408       std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
   1409         M.insert(std::make_pair(Ops[i], Scale));
   1410       if (Pair.second) {
   1411         NewOps.push_back(Pair.first->first);
   1412       } else {
   1413         Pair.first->second += Scale;
   1414         // The map already had an entry for this value, which may indicate
   1415         // a folding opportunity.
   1416         Interesting = true;
   1417       }
   1418     }
   1419   }
   1420 
   1421   return Interesting;
   1422 }
   1423 
   1424 namespace {
   1425   struct APIntCompare {
   1426     bool operator()(const APInt &LHS, const APInt &RHS) const {
   1427       return LHS.ult(RHS);
   1428     }
   1429   };
   1430 }
   1431 
   1432 /// getAddExpr - Get a canonical add expression, or something simpler if
   1433 /// possible.
   1434 const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
   1435                                         SCEV::NoWrapFlags Flags) {
   1436   assert(!(Flags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) &&
   1437          "only nuw or nsw allowed");
   1438   assert(!Ops.empty() && "Cannot get empty add!");
   1439   if (Ops.size() == 1) return Ops[0];
   1440 #ifndef NDEBUG
   1441   Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
   1442   for (unsigned i = 1, e = Ops.size(); i != e; ++i)
   1443     assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
   1444            "SCEVAddExpr operand types don't match!");
   1445 #endif
   1446 
   1447   // If FlagNSW is true and all the operands are non-negative, infer FlagNUW.
   1448   // And vice-versa.
   1449   int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW;
   1450   SCEV::NoWrapFlags SignOrUnsignWrap = maskFlags(Flags, SignOrUnsignMask);
   1451   if (SignOrUnsignWrap && (SignOrUnsignWrap != SignOrUnsignMask)) {
   1452     bool All = true;
   1453     for (SmallVectorImpl<const SCEV *>::const_iterator I = Ops.begin(),
   1454          E = Ops.end(); I != E; ++I)
   1455       if (!isKnownNonNegative(*I)) {
   1456         All = false;
   1457         break;
   1458       }
   1459     if (All) Flags = setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask);
   1460   }
   1461 
   1462   // Sort by complexity, this groups all similar expression types together.
   1463   GroupByComplexity(Ops, LI);
   1464 
   1465   // If there are any constants, fold them together.
   1466   unsigned Idx = 0;
   1467   if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
   1468     ++Idx;
   1469     assert(Idx < Ops.size());
   1470     while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
   1471       // We found two constants, fold them together!
   1472       Ops[0] = getConstant(LHSC->getValue()->getValue() +
   1473                            RHSC->getValue()->getValue());
   1474       if (Ops.size() == 2) return Ops[0];
   1475       Ops.erase(Ops.begin()+1);  // Erase the folded element
   1476       LHSC = cast<SCEVConstant>(Ops[0]);
   1477     }
   1478 
   1479     // If we are left with a constant zero being added, strip it off.
   1480     if (LHSC->getValue()->isZero()) {
   1481       Ops.erase(Ops.begin());
   1482       --Idx;
   1483     }
   1484 
   1485     if (Ops.size() == 1) return Ops[0];
   1486   }
   1487 
   1488   // Okay, check to see if the same value occurs in the operand list more than
   1489   // once.  If so, merge them together into an multiply expression.  Since we
   1490   // sorted the list, these values are required to be adjacent.
   1491   Type *Ty = Ops[0]->getType();
   1492   bool FoundMatch = false;
   1493   for (unsigned i = 0, e = Ops.size(); i != e-1; ++i)
   1494     if (Ops[i] == Ops[i+1]) {      //  X + Y + Y  -->  X + Y*2
   1495       // Scan ahead to count how many equal operands there are.
   1496       unsigned Count = 2;
   1497       while (i+Count != e && Ops[i+Count] == Ops[i])
   1498         ++Count;
   1499       // Merge the values into a multiply.
   1500       const SCEV *Scale = getConstant(Ty, Count);
   1501       const SCEV *Mul = getMulExpr(Scale, Ops[i]);
   1502       if (Ops.size() == Count)
   1503         return Mul;
   1504       Ops[i] = Mul;
   1505       Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count);
   1506       --i; e -= Count - 1;
   1507       FoundMatch = true;
   1508     }
   1509   if (FoundMatch)
   1510     return getAddExpr(Ops, Flags);
   1511 
   1512   // Check for truncates. If all the operands are truncated from the same
   1513   // type, see if factoring out the truncate would permit the result to be
   1514   // folded. eg., trunc(x) + m*trunc(n) --> trunc(x + trunc(m)*n)
   1515   // if the contents of the resulting outer trunc fold to something simple.
   1516   for (; Idx < Ops.size() && isa<SCEVTruncateExpr>(Ops[Idx]); ++Idx) {
   1517     const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(Ops[Idx]);
   1518     Type *DstType = Trunc->getType();
   1519     Type *SrcType = Trunc->getOperand()->getType();
   1520     SmallVector<const SCEV *, 8> LargeOps;
   1521     bool Ok = true;
   1522     // Check all the operands to see if they can be represented in the
   1523     // source type of the truncate.
   1524     for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
   1525       if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) {
   1526         if (T->getOperand()->getType() != SrcType) {
   1527           Ok = false;
   1528           break;
   1529         }
   1530         LargeOps.push_back(T->getOperand());
   1531       } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
   1532         LargeOps.push_back(getAnyExtendExpr(C, SrcType));
   1533       } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) {
   1534         SmallVector<const SCEV *, 8> LargeMulOps;
   1535         for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) {
   1536           if (const SCEVTruncateExpr *T =
   1537                 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) {
   1538             if (T->getOperand()->getType() != SrcType) {
   1539               Ok = false;
   1540               break;
   1541             }
   1542             LargeMulOps.push_back(T->getOperand());
   1543           } else if (const SCEVConstant *C =
   1544                        dyn_cast<SCEVConstant>(M->getOperand(j))) {
   1545             LargeMulOps.push_back(getAnyExtendExpr(C, SrcType));
   1546           } else {
   1547             Ok = false;
   1548             break;
   1549           }
   1550         }
   1551         if (Ok)
   1552           LargeOps.push_back(getMulExpr(LargeMulOps));
   1553       } else {
   1554         Ok = false;
   1555         break;
   1556       }
   1557     }
   1558     if (Ok) {
   1559       // Evaluate the expression in the larger type.
   1560       const SCEV *Fold = getAddExpr(LargeOps, Flags);
   1561       // If it folds to something simple, use it. Otherwise, don't.
   1562       if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold))
   1563         return getTruncateExpr(Fold, DstType);
   1564     }
   1565   }
   1566 
   1567   // Skip past any other cast SCEVs.
   1568   while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr)
   1569     ++Idx;
   1570 
   1571   // If there are add operands they would be next.
   1572   if (Idx < Ops.size()) {
   1573     bool DeletedAdd = false;
   1574     while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) {
   1575       // If we have an add, expand the add operands onto the end of the operands
   1576       // list.
   1577       Ops.erase(Ops.begin()+Idx);
   1578       Ops.append(Add->op_begin(), Add->op_end());
   1579       DeletedAdd = true;
   1580     }
   1581 
   1582     // If we deleted at least one add, we added operands to the end of the list,
   1583     // and they are not necessarily sorted.  Recurse to resort and resimplify
   1584     // any operands we just acquired.
   1585     if (DeletedAdd)
   1586       return getAddExpr(Ops);
   1587   }
   1588 
   1589   // Skip over the add expression until we get to a multiply.
   1590   while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
   1591     ++Idx;
   1592 
   1593   // Check to see if there are any folding opportunities present with
   1594   // operands multiplied by constant values.
   1595   if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) {
   1596     uint64_t BitWidth = getTypeSizeInBits(Ty);
   1597     DenseMap<const SCEV *, APInt> M;
   1598     SmallVector<const SCEV *, 8> NewOps;
   1599     APInt AccumulatedConstant(BitWidth, 0);
   1600     if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
   1601                                      Ops.data(), Ops.size(),
   1602                                      APInt(BitWidth, 1), *this)) {
   1603       // Some interesting folding opportunity is present, so its worthwhile to
   1604       // re-generate the operands list. Group the operands by constant scale,
   1605       // to avoid multiplying by the same constant scale multiple times.
   1606       std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists;
   1607       for (SmallVector<const SCEV *, 8>::const_iterator I = NewOps.begin(),
   1608            E = NewOps.end(); I != E; ++I)
   1609         MulOpLists[M.find(*I)->second].push_back(*I);
   1610       // Re-generate the operands list.
   1611       Ops.clear();
   1612       if (AccumulatedConstant != 0)
   1613         Ops.push_back(getConstant(AccumulatedConstant));
   1614       for (std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare>::iterator
   1615            I = MulOpLists.begin(), E = MulOpLists.end(); I != E; ++I)
   1616         if (I->first != 0)
   1617           Ops.push_back(getMulExpr(getConstant(I->first),
   1618                                    getAddExpr(I->second)));
   1619       if (Ops.empty())
   1620         return getConstant(Ty, 0);
   1621       if (Ops.size() == 1)
   1622         return Ops[0];
   1623       return getAddExpr(Ops);
   1624     }
   1625   }
   1626 
   1627   // If we are adding something to a multiply expression, make sure the
   1628   // something is not already an operand of the multiply.  If so, merge it into
   1629   // the multiply.
   1630   for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) {
   1631     const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]);
   1632     for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) {
   1633       const SCEV *MulOpSCEV = Mul->getOperand(MulOp);
   1634       if (isa<SCEVConstant>(MulOpSCEV))
   1635         continue;
   1636       for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp)
   1637         if (MulOpSCEV == Ops[AddOp]) {
   1638           // Fold W + X + (X * Y * Z)  -->  W + (X * ((Y*Z)+1))
   1639           const SCEV *InnerMul = Mul->getOperand(MulOp == 0);
   1640           if (Mul->getNumOperands() != 2) {
   1641             // If the multiply has more than two operands, we must get the
   1642             // Y*Z term.
   1643             SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
   1644                                                 Mul->op_begin()+MulOp);
   1645             MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end());
   1646             InnerMul = getMulExpr(MulOps);
   1647           }
   1648           const SCEV *One = getConstant(Ty, 1);
   1649           const SCEV *AddOne = getAddExpr(One, InnerMul);
   1650           const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV);
   1651           if (Ops.size() == 2) return OuterMul;
   1652           if (AddOp < Idx) {
   1653             Ops.erase(Ops.begin()+AddOp);
   1654             Ops.erase(Ops.begin()+Idx-1);
   1655           } else {
   1656             Ops.erase(Ops.begin()+Idx);
   1657             Ops.erase(Ops.begin()+AddOp-1);
   1658           }
   1659           Ops.push_back(OuterMul);
   1660           return getAddExpr(Ops);
   1661         }
   1662 
   1663       // Check this multiply against other multiplies being added together.
   1664       for (unsigned OtherMulIdx = Idx+1;
   1665            OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]);
   1666            ++OtherMulIdx) {
   1667         const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]);
   1668         // If MulOp occurs in OtherMul, we can fold the two multiplies
   1669         // together.
   1670         for (unsigned OMulOp = 0, e = OtherMul->getNumOperands();
   1671              OMulOp != e; ++OMulOp)
   1672           if (OtherMul->getOperand(OMulOp) == MulOpSCEV) {
   1673             // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E))
   1674             const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0);
   1675             if (Mul->getNumOperands() != 2) {
   1676               SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
   1677                                                   Mul->op_begin()+MulOp);
   1678               MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end());
   1679               InnerMul1 = getMulExpr(MulOps);
   1680             }
   1681             const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0);
   1682             if (OtherMul->getNumOperands() != 2) {
   1683               SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(),
   1684                                                   OtherMul->op_begin()+OMulOp);
   1685               MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end());
   1686               InnerMul2 = getMulExpr(MulOps);
   1687             }
   1688             const SCEV *InnerMulSum = getAddExpr(InnerMul1,InnerMul2);
   1689             const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum);
   1690             if (Ops.size() == 2) return OuterMul;
   1691             Ops.erase(Ops.begin()+Idx);
   1692             Ops.erase(Ops.begin()+OtherMulIdx-1);
   1693             Ops.push_back(OuterMul);
   1694             return getAddExpr(Ops);
   1695           }
   1696       }
   1697     }
   1698   }
   1699 
   1700   // If there are any add recurrences in the operands list, see if any other
   1701   // added values are loop invariant.  If so, we can fold them into the
   1702   // recurrence.
   1703   while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
   1704     ++Idx;
   1705 
   1706   // Scan over all recurrences, trying to fold loop invariants into them.
   1707   for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
   1708     // Scan all of the other operands to this add and add them to the vector if
   1709     // they are loop invariant w.r.t. the recurrence.
   1710     SmallVector<const SCEV *, 8> LIOps;
   1711     const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
   1712     const Loop *AddRecLoop = AddRec->getLoop();
   1713     for (unsigned i = 0, e = Ops.size(); i != e; ++i)
   1714       if (isLoopInvariant(Ops[i], AddRecLoop)) {
   1715         LIOps.push_back(Ops[i]);
   1716         Ops.erase(Ops.begin()+i);
   1717         --i; --e;
   1718       }
   1719 
   1720     // If we found some loop invariants, fold them into the recurrence.
   1721     if (!LIOps.empty()) {
   1722       //  NLI + LI + {Start,+,Step}  -->  NLI + {LI+Start,+,Step}
   1723       LIOps.push_back(AddRec->getStart());
   1724 
   1725       SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(),
   1726                                              AddRec->op_end());
   1727       AddRecOps[0] = getAddExpr(LIOps);
   1728 
   1729       // Build the new addrec. Propagate the NUW and NSW flags if both the
   1730       // outer add and the inner addrec are guaranteed to have no overflow.
   1731       // Always propagate NW.
   1732       Flags = AddRec->getNoWrapFlags(setFlags(Flags, SCEV::FlagNW));
   1733       const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, Flags);
   1734 
   1735       // If all of the other operands were loop invariant, we are done.
   1736       if (Ops.size() == 1) return NewRec;
   1737 
   1738       // Otherwise, add the folded AddRec by the non-liv parts.
   1739       for (unsigned i = 0;; ++i)
   1740         if (Ops[i] == AddRec) {
   1741           Ops[i] = NewRec;
   1742           break;
   1743         }
   1744       return getAddExpr(Ops);
   1745     }
   1746 
   1747     // Okay, if there weren't any loop invariants to be folded, check to see if
   1748     // there are multiple AddRec's with the same loop induction variable being
   1749     // added together.  If so, we can fold them.
   1750     for (unsigned OtherIdx = Idx+1;
   1751          OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
   1752          ++OtherIdx)
   1753       if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) {
   1754         // Other + {A,+,B}<L> + {C,+,D}<L>  -->  Other + {A+C,+,B+D}<L>
   1755         SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(),
   1756                                                AddRec->op_end());
   1757         for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
   1758              ++OtherIdx)
   1759           if (const SCEVAddRecExpr *OtherAddRec =
   1760                 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]))
   1761             if (OtherAddRec->getLoop() == AddRecLoop) {
   1762               for (unsigned i = 0, e = OtherAddRec->getNumOperands();
   1763                    i != e; ++i) {
   1764                 if (i >= AddRecOps.size()) {
   1765                   AddRecOps.append(OtherAddRec->op_begin()+i,
   1766                                    OtherAddRec->op_end());
   1767                   break;
   1768                 }
   1769                 AddRecOps[i] = getAddExpr(AddRecOps[i],
   1770                                           OtherAddRec->getOperand(i));
   1771               }
   1772               Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
   1773             }
   1774         // Step size has changed, so we cannot guarantee no self-wraparound.
   1775         Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop, SCEV::FlagAnyWrap);
   1776         return getAddExpr(Ops);
   1777       }
   1778 
   1779     // Otherwise couldn't fold anything into this recurrence.  Move onto the
   1780     // next one.
   1781   }
   1782 
   1783   // Okay, it looks like we really DO need an add expr.  Check to see if we
   1784   // already have one, otherwise create a new one.
   1785   FoldingSetNodeID ID;
   1786   ID.AddInteger(scAddExpr);
   1787   for (unsigned i = 0, e = Ops.size(); i != e; ++i)
   1788     ID.AddPointer(Ops[i]);
   1789   void *IP = 0;
   1790   SCEVAddExpr *S =
   1791     static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
   1792   if (!S) {
   1793     const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
   1794     std::uninitialized_copy(Ops.begin(), Ops.end(), O);
   1795     S = new (SCEVAllocator) SCEVAddExpr(ID.Intern(SCEVAllocator),
   1796                                         O, Ops.size());
   1797     UniqueSCEVs.InsertNode(S, IP);
   1798   }
   1799   S->setNoWrapFlags(Flags);
   1800   return S;
   1801 }
   1802 
   1803 /// getMulExpr - Get a canonical multiply expression, or something simpler if
   1804 /// possible.
   1805 const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
   1806                                         SCEV::NoWrapFlags Flags) {
   1807   assert(Flags == maskFlags(Flags, SCEV::FlagNUW | SCEV::FlagNSW) &&
   1808          "only nuw or nsw allowed");
   1809   assert(!Ops.empty() && "Cannot get empty mul!");
   1810   if (Ops.size() == 1) return Ops[0];
   1811 #ifndef NDEBUG
   1812   Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
   1813   for (unsigned i = 1, e = Ops.size(); i != e; ++i)
   1814     assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
   1815            "SCEVMulExpr operand types don't match!");
   1816 #endif
   1817 
   1818   // If FlagNSW is true and all the operands are non-negative, infer FlagNUW.
   1819   // And vice-versa.
   1820   int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW;
   1821   SCEV::NoWrapFlags SignOrUnsignWrap = maskFlags(Flags, SignOrUnsignMask);
   1822   if (SignOrUnsignWrap && (SignOrUnsignWrap != SignOrUnsignMask)) {
   1823     bool All = true;
   1824     for (SmallVectorImpl<const SCEV *>::const_iterator I = Ops.begin(),
   1825          E = Ops.end(); I != E; ++I)
   1826       if (!isKnownNonNegative(*I)) {
   1827         All = false;
   1828         break;
   1829       }
   1830     if (All) Flags = setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask);
   1831   }
   1832 
   1833   // Sort by complexity, this groups all similar expression types together.
   1834   GroupByComplexity(Ops, LI);
   1835 
   1836   // If there are any constants, fold them together.
   1837   unsigned Idx = 0;
   1838   if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
   1839 
   1840     // C1*(C2+V) -> C1*C2 + C1*V
   1841     if (Ops.size() == 2)
   1842       if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1]))
   1843         if (Add->getNumOperands() == 2 &&
   1844             isa<SCEVConstant>(Add->getOperand(0)))
   1845           return getAddExpr(getMulExpr(LHSC, Add->getOperand(0)),
   1846                             getMulExpr(LHSC, Add->getOperand(1)));
   1847 
   1848     ++Idx;
   1849     while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
   1850       // We found two constants, fold them together!
   1851       ConstantInt *Fold = ConstantInt::get(getContext(),
   1852                                            LHSC->getValue()->getValue() *
   1853                                            RHSC->getValue()->getValue());
   1854       Ops[0] = getConstant(Fold);
   1855       Ops.erase(Ops.begin()+1);  // Erase the folded element
   1856       if (Ops.size() == 1) return Ops[0];
   1857       LHSC = cast<SCEVConstant>(Ops[0]);
   1858     }
   1859 
   1860     // If we are left with a constant one being multiplied, strip it off.
   1861     if (cast<SCEVConstant>(Ops[0])->getValue()->equalsInt(1)) {
   1862       Ops.erase(Ops.begin());
   1863       --Idx;
   1864     } else if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) {
   1865       // If we have a multiply of zero, it will always be zero.
   1866       return Ops[0];
   1867     } else if (Ops[0]->isAllOnesValue()) {
   1868       // If we have a mul by -1 of an add, try distributing the -1 among the
   1869       // add operands.
   1870       if (Ops.size() == 2) {
   1871         if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) {
   1872           SmallVector<const SCEV *, 4> NewOps;
   1873           bool AnyFolded = false;
   1874           for (SCEVAddRecExpr::op_iterator I = Add->op_begin(),
   1875                  E = Add->op_end(); I != E; ++I) {
   1876             const SCEV *Mul = getMulExpr(Ops[0], *I);
   1877             if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true;
   1878             NewOps.push_back(Mul);
   1879           }
   1880           if (AnyFolded)
   1881             return getAddExpr(NewOps);
   1882         }
   1883         else if (const SCEVAddRecExpr *
   1884                  AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) {
   1885           // Negation preserves a recurrence's no self-wrap property.
   1886           SmallVector<const SCEV *, 4> Operands;
   1887           for (SCEVAddRecExpr::op_iterator I = AddRec->op_begin(),
   1888                  E = AddRec->op_end(); I != E; ++I) {
   1889             Operands.push_back(getMulExpr(Ops[0], *I));
   1890           }
   1891           return getAddRecExpr(Operands, AddRec->getLoop(),
   1892                                AddRec->getNoWrapFlags(SCEV::FlagNW));
   1893         }
   1894       }
   1895     }
   1896 
   1897     if (Ops.size() == 1)
   1898       return Ops[0];
   1899   }
   1900 
   1901   // Skip over the add expression until we get to a multiply.
   1902   while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
   1903     ++Idx;
   1904 
   1905   // If there are mul operands inline them all into this expression.
   1906   if (Idx < Ops.size()) {
   1907     bool DeletedMul = false;
   1908     while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) {
   1909       // If we have an mul, expand the mul operands onto the end of the operands
   1910       // list.
   1911       Ops.erase(Ops.begin()+Idx);
   1912       Ops.append(Mul->op_begin(), Mul->op_end());
   1913       DeletedMul = true;
   1914     }
   1915 
   1916     // If we deleted at least one mul, we added operands to the end of the list,
   1917     // and they are not necessarily sorted.  Recurse to resort and resimplify
   1918     // any operands we just acquired.
   1919     if (DeletedMul)
   1920       return getMulExpr(Ops);
   1921   }
   1922 
   1923   // If there are any add recurrences in the operands list, see if any other
   1924   // added values are loop invariant.  If so, we can fold them into the
   1925   // recurrence.
   1926   while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
   1927     ++Idx;
   1928 
   1929   // Scan over all recurrences, trying to fold loop invariants into them.
   1930   for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
   1931     // Scan all of the other operands to this mul and add them to the vector if
   1932     // they are loop invariant w.r.t. the recurrence.
   1933     SmallVector<const SCEV *, 8> LIOps;
   1934     const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
   1935     const Loop *AddRecLoop = AddRec->getLoop();
   1936     for (unsigned i = 0, e = Ops.size(); i != e; ++i)
   1937       if (isLoopInvariant(Ops[i], AddRecLoop)) {
   1938         LIOps.push_back(Ops[i]);
   1939         Ops.erase(Ops.begin()+i);
   1940         --i; --e;
   1941       }
   1942 
   1943     // If we found some loop invariants, fold them into the recurrence.
   1944     if (!LIOps.empty()) {
   1945       //  NLI * LI * {Start,+,Step}  -->  NLI * {LI*Start,+,LI*Step}
   1946       SmallVector<const SCEV *, 4> NewOps;
   1947       NewOps.reserve(AddRec->getNumOperands());
   1948       const SCEV *Scale = getMulExpr(LIOps);
   1949       for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
   1950         NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i)));
   1951 
   1952       // Build the new addrec. Propagate the NUW and NSW flags if both the
   1953       // outer mul and the inner addrec are guaranteed to have no overflow.
   1954       //
   1955       // No self-wrap cannot be guaranteed after changing the step size, but
   1956       // will be inferred if either NUW or NSW is true.
   1957       Flags = AddRec->getNoWrapFlags(clearFlags(Flags, SCEV::FlagNW));
   1958       const SCEV *NewRec = getAddRecExpr(NewOps, AddRecLoop, Flags);
   1959 
   1960       // If all of the other operands were loop invariant, we are done.
   1961       if (Ops.size() == 1) return NewRec;
   1962 
   1963       // Otherwise, multiply the folded AddRec by the non-liv parts.
   1964       for (unsigned i = 0;; ++i)
   1965         if (Ops[i] == AddRec) {
   1966           Ops[i] = NewRec;
   1967           break;
   1968         }
   1969       return getMulExpr(Ops);
   1970     }
   1971 
   1972     // Okay, if there weren't any loop invariants to be folded, check to see if
   1973     // there are multiple AddRec's with the same loop induction variable being
   1974     // multiplied together.  If so, we can fold them.
   1975     for (unsigned OtherIdx = Idx+1;
   1976          OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
   1977          ++OtherIdx)
   1978       if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) {
   1979         // F * G, where F = {A,+,B}<L> and G = {C,+,D}<L>  -->
   1980         // {A*C,+,F*D + G*B + B*D}<L>
   1981         for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
   1982              ++OtherIdx)
   1983           if (const SCEVAddRecExpr *OtherAddRec =
   1984                 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]))
   1985             if (OtherAddRec->getLoop() == AddRecLoop) {
   1986               const SCEVAddRecExpr *F = AddRec, *G = OtherAddRec;
   1987               const SCEV *NewStart = getMulExpr(F->getStart(), G->getStart());
   1988               const SCEV *B = F->getStepRecurrence(*this);
   1989               const SCEV *D = G->getStepRecurrence(*this);
   1990               const SCEV *NewStep = getAddExpr(getMulExpr(F, D),
   1991                                                getMulExpr(G, B),
   1992                                                getMulExpr(B, D));
   1993               const SCEV *NewAddRec = getAddRecExpr(NewStart, NewStep,
   1994                                                     F->getLoop(),
   1995                                                     SCEV::FlagAnyWrap);
   1996               if (Ops.size() == 2) return NewAddRec;
   1997               Ops[Idx] = AddRec = cast<SCEVAddRecExpr>(NewAddRec);
   1998               Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
   1999             }
   2000         return getMulExpr(Ops);
   2001       }
   2002 
   2003     // Otherwise couldn't fold anything into this recurrence.  Move onto the
   2004     // next one.
   2005   }
   2006 
   2007   // Okay, it looks like we really DO need an mul expr.  Check to see if we
   2008   // already have one, otherwise create a new one.
   2009   FoldingSetNodeID ID;
   2010   ID.AddInteger(scMulExpr);
   2011   for (unsigned i = 0, e = Ops.size(); i != e; ++i)
   2012     ID.AddPointer(Ops[i]);
   2013   void *IP = 0;
   2014   SCEVMulExpr *S =
   2015     static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
   2016   if (!S) {
   2017     const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
   2018     std::uninitialized_copy(Ops.begin(), Ops.end(), O);
   2019     S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator),
   2020                                         O, Ops.size());
   2021     UniqueSCEVs.InsertNode(S, IP);
   2022   }
   2023   S->setNoWrapFlags(Flags);
   2024   return S;
   2025 }
   2026 
   2027 /// getUDivExpr - Get a canonical unsigned division expression, or something
   2028 /// simpler if possible.
   2029 const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
   2030                                          const SCEV *RHS) {
   2031   assert(getEffectiveSCEVType(LHS->getType()) ==
   2032          getEffectiveSCEVType(RHS->getType()) &&
   2033          "SCEVUDivExpr operand types don't match!");
   2034 
   2035   if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
   2036     if (RHSC->getValue()->equalsInt(1))
   2037       return LHS;                               // X udiv 1 --> x
   2038     // If the denominator is zero, the result of the udiv is undefined. Don't
   2039     // try to analyze it, because the resolution chosen here may differ from
   2040     // the resolution chosen in other parts of the compiler.
   2041     if (!RHSC->getValue()->isZero()) {
   2042       // Determine if the division can be folded into the operands of
   2043       // its operands.
   2044       // TODO: Generalize this to non-constants by using known-bits information.
   2045       Type *Ty = LHS->getType();
   2046       unsigned LZ = RHSC->getValue()->getValue().countLeadingZeros();
   2047       unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1;
   2048       // For non-power-of-two values, effectively round the value up to the
   2049       // nearest power of two.
   2050       if (!RHSC->getValue()->getValue().isPowerOf2())
   2051         ++MaxShiftAmt;
   2052       IntegerType *ExtTy =
   2053         IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt);
   2054       // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded.
   2055       if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS))
   2056         if (const SCEVConstant *Step =
   2057               dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this)))
   2058           if (!Step->getValue()->getValue()
   2059                 .urem(RHSC->getValue()->getValue()) &&
   2060               getZeroExtendExpr(AR, ExtTy) ==
   2061               getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
   2062                             getZeroExtendExpr(Step, ExtTy),
   2063                             AR->getLoop(), SCEV::FlagAnyWrap)) {
   2064             SmallVector<const SCEV *, 4> Operands;
   2065             for (unsigned i = 0, e = AR->getNumOperands(); i != e; ++i)
   2066               Operands.push_back(getUDivExpr(AR->getOperand(i), RHS));
   2067             return getAddRecExpr(Operands, AR->getLoop(),
   2068                                  SCEV::FlagNW);
   2069           }
   2070       // (A*B)/C --> A*(B/C) if safe and B/C can be folded.
   2071       if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) {
   2072         SmallVector<const SCEV *, 4> Operands;
   2073         for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i)
   2074           Operands.push_back(getZeroExtendExpr(M->getOperand(i), ExtTy));
   2075         if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands))
   2076           // Find an operand that's safely divisible.
   2077           for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
   2078             const SCEV *Op = M->getOperand(i);
   2079             const SCEV *Div = getUDivExpr(Op, RHSC);
   2080             if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) {
   2081               Operands = SmallVector<const SCEV *, 4>(M->op_begin(),
   2082                                                       M->op_end());
   2083               Operands[i] = Div;
   2084               return getMulExpr(Operands);
   2085             }
   2086           }
   2087       }
   2088       // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded.
   2089       if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(LHS)) {
   2090         SmallVector<const SCEV *, 4> Operands;
   2091         for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i)
   2092           Operands.push_back(getZeroExtendExpr(A->getOperand(i), ExtTy));
   2093         if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) {
   2094           Operands.clear();
   2095           for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) {
   2096             const SCEV *Op = getUDivExpr(A->getOperand(i), RHS);
   2097             if (isa<SCEVUDivExpr>(Op) ||
   2098                 getMulExpr(Op, RHS) != A->getOperand(i))
   2099               break;
   2100             Operands.push_back(Op);
   2101           }
   2102           if (Operands.size() == A->getNumOperands())
   2103             return getAddExpr(Operands);
   2104         }
   2105       }
   2106 
   2107       // Fold if both operands are constant.
   2108       if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
   2109         Constant *LHSCV = LHSC->getValue();
   2110         Constant *RHSCV = RHSC->getValue();
   2111         return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV,
   2112                                                                    RHSCV)));
   2113       }
   2114     }
   2115   }
   2116 
   2117   FoldingSetNodeID ID;
   2118   ID.AddInteger(scUDivExpr);
   2119   ID.AddPointer(LHS);
   2120   ID.AddPointer(RHS);
   2121   void *IP = 0;
   2122   if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
   2123   SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator),
   2124                                              LHS, RHS);
   2125   UniqueSCEVs.InsertNode(S, IP);
   2126   return S;
   2127 }
   2128 
   2129 
   2130 /// getAddRecExpr - Get an add recurrence expression for the specified loop.
   2131 /// Simplify the expression as much as possible.
   2132 const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step,
   2133                                            const Loop *L,
   2134                                            SCEV::NoWrapFlags Flags) {
   2135   SmallVector<const SCEV *, 4> Operands;
   2136   Operands.push_back(Start);
   2137   if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step))
   2138     if (StepChrec->getLoop() == L) {
   2139       Operands.append(StepChrec->op_begin(), StepChrec->op_end());
   2140       return getAddRecExpr(Operands, L, maskFlags(Flags, SCEV::FlagNW));
   2141     }
   2142 
   2143   Operands.push_back(Step);
   2144   return getAddRecExpr(Operands, L, Flags);
   2145 }
   2146 
   2147 /// getAddRecExpr - Get an add recurrence expression for the specified loop.
   2148 /// Simplify the expression as much as possible.
   2149 const SCEV *
   2150 ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
   2151                                const Loop *L, SCEV::NoWrapFlags Flags) {
   2152   if (Operands.size() == 1) return Operands[0];
   2153 #ifndef NDEBUG
   2154   Type *ETy = getEffectiveSCEVType(Operands[0]->getType());
   2155   for (unsigned i = 1, e = Operands.size(); i != e; ++i)
   2156     assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy &&
   2157            "SCEVAddRecExpr operand types don't match!");
   2158   for (unsigned i = 0, e = Operands.size(); i != e; ++i)
   2159     assert(isLoopInvariant(Operands[i], L) &&
   2160            "SCEVAddRecExpr operand is not loop-invariant!");
   2161 #endif
   2162 
   2163   if (Operands.back()->isZero()) {
   2164     Operands.pop_back();
   2165     return getAddRecExpr(Operands, L, SCEV::FlagAnyWrap); // {X,+,0}  -->  X
   2166   }
   2167 
   2168   // It's tempting to want to call getMaxBackedgeTakenCount count here and
   2169   // use that information to infer NUW and NSW flags. However, computing a
   2170   // BE count requires calling getAddRecExpr, so we may not yet have a
   2171   // meaningful BE count at this point (and if we don't, we'd be stuck
   2172   // with a SCEVCouldNotCompute as the cached BE count).
   2173 
   2174   // If FlagNSW is true and all the operands are non-negative, infer FlagNUW.
   2175   // And vice-versa.
   2176   int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW;
   2177   SCEV::NoWrapFlags SignOrUnsignWrap = maskFlags(Flags, SignOrUnsignMask);
   2178   if (SignOrUnsignWrap && (SignOrUnsignWrap != SignOrUnsignMask)) {
   2179     bool All = true;
   2180     for (SmallVectorImpl<const SCEV *>::const_iterator I = Operands.begin(),
   2181          E = Operands.end(); I != E; ++I)
   2182       if (!isKnownNonNegative(*I)) {
   2183         All = false;
   2184         break;
   2185       }
   2186     if (All) Flags = setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask);
   2187   }
   2188 
   2189   // Canonicalize nested AddRecs in by nesting them in order of loop depth.
   2190   if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) {
   2191     const Loop *NestedLoop = NestedAR->getLoop();
   2192     if (L->contains(NestedLoop) ?
   2193         (L->getLoopDepth() < NestedLoop->getLoopDepth()) :
   2194         (!NestedLoop->contains(L) &&
   2195          DT->dominates(L->getHeader(), NestedLoop->getHeader()))) {
   2196       SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(),
   2197                                                   NestedAR->op_end());
   2198       Operands[0] = NestedAR->getStart();
   2199       // AddRecs require their operands be loop-invariant with respect to their
   2200       // loops. Don't perform this transformation if it would break this
   2201       // requirement.
   2202       bool AllInvariant = true;
   2203       for (unsigned i = 0, e = Operands.size(); i != e; ++i)
   2204         if (!isLoopInvariant(Operands[i], L)) {
   2205           AllInvariant = false;
   2206           break;
   2207         }
   2208       if (AllInvariant) {
   2209         // Create a recurrence for the outer loop with the same step size.
   2210         //
   2211         // The outer recurrence keeps its NW flag but only keeps NUW/NSW if the
   2212         // inner recurrence has the same property.
   2213         SCEV::NoWrapFlags OuterFlags =
   2214           maskFlags(Flags, SCEV::FlagNW | NestedAR->getNoWrapFlags());
   2215 
   2216         NestedOperands[0] = getAddRecExpr(Operands, L, OuterFlags);
   2217         AllInvariant = true;
   2218         for (unsigned i = 0, e = NestedOperands.size(); i != e; ++i)
   2219           if (!isLoopInvariant(NestedOperands[i], NestedLoop)) {
   2220             AllInvariant = false;
   2221             break;
   2222           }
   2223         if (AllInvariant) {
   2224           // Ok, both add recurrences are valid after the transformation.
   2225           //
   2226           // The inner recurrence keeps its NW flag but only keeps NUW/NSW if
   2227           // the outer recurrence has the same property.
   2228           SCEV::NoWrapFlags InnerFlags =
   2229             maskFlags(NestedAR->getNoWrapFlags(), SCEV::FlagNW | Flags);
   2230           return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags);
   2231         }
   2232       }
   2233       // Reset Operands to its original state.
   2234       Operands[0] = NestedAR;
   2235     }
   2236   }
   2237 
   2238   // Okay, it looks like we really DO need an addrec expr.  Check to see if we
   2239   // already have one, otherwise create a new one.
   2240   FoldingSetNodeID ID;
   2241   ID.AddInteger(scAddRecExpr);
   2242   for (unsigned i = 0, e = Operands.size(); i != e; ++i)
   2243     ID.AddPointer(Operands[i]);
   2244   ID.AddPointer(L);
   2245   void *IP = 0;
   2246   SCEVAddRecExpr *S =
   2247     static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
   2248   if (!S) {
   2249     const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Operands.size());
   2250     std::uninitialized_copy(Operands.begin(), Operands.end(), O);
   2251     S = new (SCEVAllocator) SCEVAddRecExpr(ID.Intern(SCEVAllocator),
   2252                                            O, Operands.size(), L);
   2253     UniqueSCEVs.InsertNode(S, IP);
   2254   }
   2255   S->setNoWrapFlags(Flags);
   2256   return S;
   2257 }
   2258 
   2259 const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS,
   2260                                          const SCEV *RHS) {
   2261   SmallVector<const SCEV *, 2> Ops;
   2262   Ops.push_back(LHS);
   2263   Ops.push_back(RHS);
   2264   return getSMaxExpr(Ops);
   2265 }
   2266 
   2267 const SCEV *
   2268 ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
   2269   assert(!Ops.empty() && "Cannot get empty smax!");
   2270   if (Ops.size() == 1) return Ops[0];
   2271 #ifndef NDEBUG
   2272   Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
   2273   for (unsigned i = 1, e = Ops.size(); i != e; ++i)
   2274     assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
   2275            "SCEVSMaxExpr operand types don't match!");
   2276 #endif
   2277 
   2278   // Sort by complexity, this groups all similar expression types together.
   2279   GroupByComplexity(Ops, LI);
   2280 
   2281   // If there are any constants, fold them together.
   2282   unsigned Idx = 0;
   2283   if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
   2284     ++Idx;
   2285     assert(Idx < Ops.size());
   2286     while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
   2287       // We found two constants, fold them together!
   2288       ConstantInt *Fold = ConstantInt::get(getContext(),
   2289                               APIntOps::smax(LHSC->getValue()->getValue(),
   2290                                              RHSC->getValue()->getValue()));
   2291       Ops[0] = getConstant(Fold);
   2292       Ops.erase(Ops.begin()+1);  // Erase the folded element
   2293       if (Ops.size() == 1) return Ops[0];
   2294       LHSC = cast<SCEVConstant>(Ops[0]);
   2295     }
   2296 
   2297     // If we are left with a constant minimum-int, strip it off.
   2298     if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(true)) {
   2299       Ops.erase(Ops.begin());
   2300       --Idx;
   2301     } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(true)) {
   2302       // If we have an smax with a constant maximum-int, it will always be
   2303       // maximum-int.
   2304       return Ops[0];
   2305     }
   2306 
   2307     if (Ops.size() == 1) return Ops[0];
   2308   }
   2309 
   2310   // Find the first SMax
   2311   while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scSMaxExpr)
   2312     ++Idx;
   2313 
   2314   // Check to see if one of the operands is an SMax. If so, expand its operands
   2315   // onto our operand list, and recurse to simplify.
   2316   if (Idx < Ops.size()) {
   2317     bool DeletedSMax = false;
   2318     while (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(Ops[Idx])) {
   2319       Ops.erase(Ops.begin()+Idx);
   2320       Ops.append(SMax->op_begin(), SMax->op_end());
   2321       DeletedSMax = true;
   2322     }
   2323 
   2324     if (DeletedSMax)
   2325       return getSMaxExpr(Ops);
   2326   }
   2327 
   2328   // Okay, check to see if the same value occurs in the operand list twice.  If
   2329   // so, delete one.  Since we sorted the list, these values are required to
   2330   // be adjacent.
   2331   for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
   2332     //  X smax Y smax Y  -->  X smax Y
   2333     //  X smax Y         -->  X, if X is always greater than Y
   2334     if (Ops[i] == Ops[i+1] ||
   2335         isKnownPredicate(ICmpInst::ICMP_SGE, Ops[i], Ops[i+1])) {
   2336       Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2);
   2337       --i; --e;
   2338     } else if (isKnownPredicate(ICmpInst::ICMP_SLE, Ops[i], Ops[i+1])) {
   2339       Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
   2340       --i; --e;
   2341     }
   2342 
   2343   if (Ops.size() == 1) return Ops[0];
   2344 
   2345   assert(!Ops.empty() && "Reduced smax down to nothing!");
   2346 
   2347   // Okay, it looks like we really DO need an smax expr.  Check to see if we
   2348   // already have one, otherwise create a new one.
   2349   FoldingSetNodeID ID;
   2350   ID.AddInteger(scSMaxExpr);
   2351   for (unsigned i = 0, e = Ops.size(); i != e; ++i)
   2352     ID.AddPointer(Ops[i]);
   2353   void *IP = 0;
   2354   if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
   2355   const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
   2356   std::uninitialized_copy(Ops.begin(), Ops.end(), O);
   2357   SCEV *S = new (SCEVAllocator) SCEVSMaxExpr(ID.Intern(SCEVAllocator),
   2358                                              O, Ops.size());
   2359   UniqueSCEVs.InsertNode(S, IP);
   2360   return S;
   2361 }
   2362 
   2363 const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS,
   2364                                          const SCEV *RHS) {
   2365   SmallVector<const SCEV *, 2> Ops;
   2366   Ops.push_back(LHS);
   2367   Ops.push_back(RHS);
   2368   return getUMaxExpr(Ops);
   2369 }
   2370 
   2371 const SCEV *
   2372 ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
   2373   assert(!Ops.empty() && "Cannot get empty umax!");
   2374   if (Ops.size() == 1) return Ops[0];
   2375 #ifndef NDEBUG
   2376   Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
   2377   for (unsigned i = 1, e = Ops.size(); i != e; ++i)
   2378     assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
   2379            "SCEVUMaxExpr operand types don't match!");
   2380 #endif
   2381 
   2382   // Sort by complexity, this groups all similar expression types together.
   2383   GroupByComplexity(Ops, LI);
   2384 
   2385   // If there are any constants, fold them together.
   2386   unsigned Idx = 0;
   2387   if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
   2388     ++Idx;
   2389     assert(Idx < Ops.size());
   2390     while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
   2391       // We found two constants, fold them together!
   2392       ConstantInt *Fold = ConstantInt::get(getContext(),
   2393                               APIntOps::umax(LHSC->getValue()->getValue(),
   2394                                              RHSC->getValue()->getValue()));
   2395       Ops[0] = getConstant(Fold);
   2396       Ops.erase(Ops.begin()+1);  // Erase the folded element
   2397       if (Ops.size() == 1) return Ops[0];
   2398       LHSC = cast<SCEVConstant>(Ops[0]);
   2399     }
   2400 
   2401     // If we are left with a constant minimum-int, strip it off.
   2402     if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(false)) {
   2403       Ops.erase(Ops.begin());
   2404       --Idx;
   2405     } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(false)) {
   2406       // If we have an umax with a constant maximum-int, it will always be
   2407       // maximum-int.
   2408       return Ops[0];
   2409     }
   2410 
   2411     if (Ops.size() == 1) return Ops[0];
   2412   }
   2413 
   2414   // Find the first UMax
   2415   while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scUMaxExpr)
   2416     ++Idx;
   2417 
   2418   // Check to see if one of the operands is a UMax. If so, expand its operands
   2419   // onto our operand list, and recurse to simplify.
   2420   if (Idx < Ops.size()) {
   2421     bool DeletedUMax = false;
   2422     while (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(Ops[Idx])) {
   2423       Ops.erase(Ops.begin()+Idx);
   2424       Ops.append(UMax->op_begin(), UMax->op_end());
   2425       DeletedUMax = true;
   2426     }
   2427 
   2428     if (DeletedUMax)
   2429       return getUMaxExpr(Ops);
   2430   }
   2431 
   2432   // Okay, check to see if the same value occurs in the operand list twice.  If
   2433   // so, delete one.  Since we sorted the list, these values are required to
   2434   // be adjacent.
   2435   for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
   2436     //  X umax Y umax Y  -->  X umax Y
   2437     //  X umax Y         -->  X, if X is always greater than Y
   2438     if (Ops[i] == Ops[i+1] ||
   2439         isKnownPredicate(ICmpInst::ICMP_UGE, Ops[i], Ops[i+1])) {
   2440       Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2);
   2441       --i; --e;
   2442     } else if (isKnownPredicate(ICmpInst::ICMP_ULE, Ops[i], Ops[i+1])) {
   2443       Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
   2444       --i; --e;
   2445     }
   2446 
   2447   if (Ops.size() == 1) return Ops[0];
   2448 
   2449   assert(!Ops.empty() && "Reduced umax down to nothing!");
   2450 
   2451   // Okay, it looks like we really DO need a umax expr.  Check to see if we
   2452   // already have one, otherwise create a new one.
   2453   FoldingSetNodeID ID;
   2454   ID.AddInteger(scUMaxExpr);
   2455   for (unsigned i = 0, e = Ops.size(); i != e; ++i)
   2456     ID.AddPointer(Ops[i]);
   2457   void *IP = 0;
   2458   if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
   2459   const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
   2460   std::uninitialized_copy(Ops.begin(), Ops.end(), O);
   2461   SCEV *S = new (SCEVAllocator) SCEVUMaxExpr(ID.Intern(SCEVAllocator),
   2462                                              O, Ops.size());
   2463   UniqueSCEVs.InsertNode(S, IP);
   2464   return S;
   2465 }
   2466 
   2467 const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS,
   2468                                          const SCEV *RHS) {
   2469   // ~smax(~x, ~y) == smin(x, y).
   2470   return getNotSCEV(getSMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
   2471 }
   2472 
   2473 const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS,
   2474                                          const SCEV *RHS) {
   2475   // ~umax(~x, ~y) == umin(x, y)
   2476   return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
   2477 }
   2478 
   2479 const SCEV *ScalarEvolution::getSizeOfExpr(Type *AllocTy) {
   2480   // If we have TargetData, we can bypass creating a target-independent
   2481   // constant expression and then folding it back into a ConstantInt.
   2482   // This is just a compile-time optimization.
   2483   if (TD)
   2484     return getConstant(TD->getIntPtrType(getContext()),
   2485                        TD->getTypeAllocSize(AllocTy));
   2486 
   2487   Constant *C = ConstantExpr::getSizeOf(AllocTy);
   2488   if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
   2489     if (Constant *Folded = ConstantFoldConstantExpression(CE, TD))
   2490       C = Folded;
   2491   Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy));
   2492   return getTruncateOrZeroExtend(getSCEV(C), Ty);
   2493 }
   2494 
   2495 const SCEV *ScalarEvolution::getAlignOfExpr(Type *AllocTy) {
   2496   Constant *C = ConstantExpr::getAlignOf(AllocTy);
   2497   if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
   2498     if (Constant *Folded = ConstantFoldConstantExpression(CE, TD))
   2499       C = Folded;
   2500   Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy));
   2501   return getTruncateOrZeroExtend(getSCEV(C), Ty);
   2502 }
   2503 
   2504 const SCEV *ScalarEvolution::getOffsetOfExpr(StructType *STy,
   2505                                              unsigned FieldNo) {
   2506   // If we have TargetData, we can bypass creating a target-independent
   2507   // constant expression and then folding it back into a ConstantInt.
   2508   // This is just a compile-time optimization.
   2509   if (TD)
   2510     return getConstant(TD->getIntPtrType(getContext()),
   2511                        TD->getStructLayout(STy)->getElementOffset(FieldNo));
   2512 
   2513   Constant *C = ConstantExpr::getOffsetOf(STy, FieldNo);
   2514   if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
   2515     if (Constant *Folded = ConstantFoldConstantExpression(CE, TD))
   2516       C = Folded;
   2517   Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(STy));
   2518   return getTruncateOrZeroExtend(getSCEV(C), Ty);
   2519 }
   2520 
   2521 const SCEV *ScalarEvolution::getOffsetOfExpr(Type *CTy,
   2522                                              Constant *FieldNo) {
   2523   Constant *C = ConstantExpr::getOffsetOf(CTy, FieldNo);
   2524   if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
   2525     if (Constant *Folded = ConstantFoldConstantExpression(CE, TD))
   2526       C = Folded;
   2527   Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(CTy));
   2528   return getTruncateOrZeroExtend(getSCEV(C), Ty);
   2529 }
   2530 
   2531 const SCEV *ScalarEvolution::getUnknown(Value *V) {
   2532   // Don't attempt to do anything other than create a SCEVUnknown object
   2533   // here.  createSCEV only calls getUnknown after checking for all other
   2534   // interesting possibilities, and any other code that calls getUnknown
   2535   // is doing so in order to hide a value from SCEV canonicalization.
   2536 
   2537   FoldingSetNodeID ID;
   2538   ID.AddInteger(scUnknown);
   2539   ID.AddPointer(V);
   2540   void *IP = 0;
   2541   if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) {
   2542     assert(cast<SCEVUnknown>(S)->getValue() == V &&
   2543            "Stale SCEVUnknown in uniquing map!");
   2544     return S;
   2545   }
   2546   SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this,
   2547                                             FirstUnknown);
   2548   FirstUnknown = cast<SCEVUnknown>(S);
   2549   UniqueSCEVs.InsertNode(S, IP);
   2550   return S;
   2551 }
   2552 
   2553 //===----------------------------------------------------------------------===//
   2554 //            Basic SCEV Analysis and PHI Idiom Recognition Code
   2555 //
   2556 
   2557 /// isSCEVable - Test if values of the given type are analyzable within
   2558 /// the SCEV framework. This primarily includes integer types, and it
   2559 /// can optionally include pointer types if the ScalarEvolution class
   2560 /// has access to target-specific information.
   2561 bool ScalarEvolution::isSCEVable(Type *Ty) const {
   2562   // Integers and pointers are always SCEVable.
   2563   return Ty->isIntegerTy() || Ty->isPointerTy();
   2564 }
   2565 
   2566 /// getTypeSizeInBits - Return the size in bits of the specified type,
   2567 /// for which isSCEVable must return true.
   2568 uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const {
   2569   assert(isSCEVable(Ty) && "Type is not SCEVable!");
   2570 
   2571   // If we have a TargetData, use it!
   2572   if (TD)
   2573     return TD->getTypeSizeInBits(Ty);
   2574 
   2575   // Integer types have fixed sizes.
   2576   if (Ty->isIntegerTy())
   2577     return Ty->getPrimitiveSizeInBits();
   2578 
   2579   // The only other support type is pointer. Without TargetData, conservatively
   2580   // assume pointers are 64-bit.
   2581   assert(Ty->isPointerTy() && "isSCEVable permitted a non-SCEVable type!");
   2582   return 64;
   2583 }
   2584 
   2585 /// getEffectiveSCEVType - Return a type with the same bitwidth as
   2586 /// the given type and which represents how SCEV will treat the given
   2587 /// type, for which isSCEVable must return true. For pointer types,
   2588 /// this is the pointer-sized integer type.
   2589 Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const {
   2590   assert(isSCEVable(Ty) && "Type is not SCEVable!");
   2591 
   2592   if (Ty->isIntegerTy())
   2593     return Ty;
   2594 
   2595   // The only other support type is pointer.
   2596   assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!");
   2597   if (TD) return TD->getIntPtrType(getContext());
   2598 
   2599   // Without TargetData, conservatively assume pointers are 64-bit.
   2600   return Type::getInt64Ty(getContext());
   2601 }
   2602 
   2603 const SCEV *ScalarEvolution::getCouldNotCompute() {
   2604   return &CouldNotCompute;
   2605 }
   2606 
   2607 /// getSCEV - Return an existing SCEV if it exists, otherwise analyze the
   2608 /// expression and create a new one.
   2609 const SCEV *ScalarEvolution::getSCEV(Value *V) {
   2610   assert(isSCEVable(V->getType()) && "Value is not SCEVable!");
   2611 
   2612   ValueExprMapType::const_iterator I = ValueExprMap.find(V);
   2613   if (I != ValueExprMap.end()) return I->second;
   2614   const SCEV *S = createSCEV(V);
   2615 
   2616   // The process of creating a SCEV for V may have caused other SCEVs
   2617   // to have been created, so it's necessary to insert the new entry
   2618   // from scratch, rather than trying to remember the insert position
   2619   // above.
   2620   ValueExprMap.insert(std::make_pair(SCEVCallbackVH(V, this), S));
   2621   return S;
   2622 }
   2623 
   2624 /// getNegativeSCEV - Return a SCEV corresponding to -V = -1*V
   2625 ///
   2626 const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V) {
   2627   if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
   2628     return getConstant(
   2629                cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue())));
   2630 
   2631   Type *Ty = V->getType();
   2632   Ty = getEffectiveSCEVType(Ty);
   2633   return getMulExpr(V,
   2634                   getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))));
   2635 }
   2636 
   2637 /// getNotSCEV - Return a SCEV corresponding to ~V = -1-V
   2638 const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) {
   2639   if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
   2640     return getConstant(
   2641                 cast<ConstantInt>(ConstantExpr::getNot(VC->getValue())));
   2642 
   2643   Type *Ty = V->getType();
   2644   Ty = getEffectiveSCEVType(Ty);
   2645   const SCEV *AllOnes =
   2646                    getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty)));
   2647   return getMinusSCEV(AllOnes, V);
   2648 }
   2649 
   2650 /// getMinusSCEV - Return LHS-RHS.  Minus is represented in SCEV as A+B*-1.
   2651 const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS,
   2652                                           SCEV::NoWrapFlags Flags) {
   2653   assert(!maskFlags(Flags, SCEV::FlagNUW) && "subtraction does not have NUW");
   2654 
   2655   // Fast path: X - X --> 0.
   2656   if (LHS == RHS)
   2657     return getConstant(LHS->getType(), 0);
   2658 
   2659   // X - Y --> X + -Y
   2660   return getAddExpr(LHS, getNegativeSCEV(RHS), Flags);
   2661 }
   2662 
   2663 /// getTruncateOrZeroExtend - Return a SCEV corresponding to a conversion of the
   2664 /// input value to the specified type.  If the type must be extended, it is zero
   2665 /// extended.
   2666 const SCEV *
   2667 ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, Type *Ty) {
   2668   Type *SrcTy = V->getType();
   2669   assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
   2670          (Ty->isIntegerTy() || Ty->isPointerTy()) &&
   2671          "Cannot truncate or zero extend with non-integer arguments!");
   2672   if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
   2673     return V;  // No conversion
   2674   if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
   2675     return getTruncateExpr(V, Ty);
   2676   return getZeroExtendExpr(V, Ty);
   2677 }
   2678 
   2679 /// getTruncateOrSignExtend - Return a SCEV corresponding to a conversion of the
   2680 /// input value to the specified type.  If the type must be extended, it is sign
   2681 /// extended.
   2682 const SCEV *
   2683 ScalarEvolution::getTruncateOrSignExtend(const SCEV *V,
   2684                                          Type *Ty) {
   2685   Type *SrcTy = V->getType();
   2686   assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
   2687          (Ty->isIntegerTy() || Ty->isPointerTy()) &&
   2688          "Cannot truncate or zero extend with non-integer arguments!");
   2689   if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
   2690     return V;  // No conversion
   2691   if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
   2692     return getTruncateExpr(V, Ty);
   2693   return getSignExtendExpr(V, Ty);
   2694 }
   2695 
   2696 /// getNoopOrZeroExtend - Return a SCEV corresponding to a conversion of the
   2697 /// input value to the specified type.  If the type must be extended, it is zero
   2698 /// extended.  The conversion must not be narrowing.
   2699 const SCEV *
   2700 ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, Type *Ty) {
   2701   Type *SrcTy = V->getType();
   2702   assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
   2703          (Ty->isIntegerTy() || Ty->isPointerTy()) &&
   2704          "Cannot noop or zero extend with non-integer arguments!");
   2705   assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
   2706          "getNoopOrZeroExtend cannot truncate!");
   2707   if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
   2708     return V;  // No conversion
   2709   return getZeroExtendExpr(V, Ty);
   2710 }
   2711 
   2712 /// getNoopOrSignExtend - Return a SCEV corresponding to a conversion of the
   2713 /// input value to the specified type.  If the type must be extended, it is sign
   2714 /// extended.  The conversion must not be narrowing.
   2715 const SCEV *
   2716 ScalarEvolution::getNoopOrSignExtend(const SCEV *V, Type *Ty) {
   2717   Type *SrcTy = V->getType();
   2718   assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
   2719          (Ty->isIntegerTy() || Ty->isPointerTy()) &&
   2720          "Cannot noop or sign extend with non-integer arguments!");
   2721   assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
   2722          "getNoopOrSignExtend cannot truncate!");
   2723   if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
   2724     return V;  // No conversion
   2725   return getSignExtendExpr(V, Ty);
   2726 }
   2727 
   2728 /// getNoopOrAnyExtend - Return a SCEV corresponding to a conversion of
   2729 /// the input value to the specified type. If the type must be extended,
   2730 /// it is extended with unspecified bits. The conversion must not be
   2731 /// narrowing.
   2732 const SCEV *
   2733 ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, Type *Ty) {
   2734   Type *SrcTy = V->getType();
   2735   assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
   2736          (Ty->isIntegerTy() || Ty->isPointerTy()) &&
   2737          "Cannot noop or any extend with non-integer arguments!");
   2738   assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
   2739          "getNoopOrAnyExtend cannot truncate!");
   2740   if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
   2741     return V;  // No conversion
   2742   return getAnyExtendExpr(V, Ty);
   2743 }
   2744 
   2745 /// getTruncateOrNoop - Return a SCEV corresponding to a conversion of the
   2746 /// input value to the specified type.  The conversion must not be widening.
   2747 const SCEV *
   2748 ScalarEvolution::getTruncateOrNoop(const SCEV *V, Type *Ty) {
   2749   Type *SrcTy = V->getType();
   2750   assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
   2751          (Ty->isIntegerTy() || Ty->isPointerTy()) &&
   2752          "Cannot truncate or noop with non-integer arguments!");
   2753   assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) &&
   2754          "getTruncateOrNoop cannot extend!");
   2755   if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
   2756     return V;  // No conversion
   2757   return getTruncateExpr(V, Ty);
   2758 }
   2759 
   2760 /// getUMaxFromMismatchedTypes - Promote the operands to the wider of
   2761 /// the types using zero-extension, and then perform a umax operation
   2762 /// with them.
   2763 const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS,
   2764                                                         const SCEV *RHS) {
   2765   const SCEV *PromotedLHS = LHS;
   2766   const SCEV *PromotedRHS = RHS;
   2767 
   2768   if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
   2769     PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
   2770   else
   2771     PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());
   2772 
   2773   return getUMaxExpr(PromotedLHS, PromotedRHS);
   2774 }
   2775 
   2776 /// getUMinFromMismatchedTypes - Promote the operands to the wider of
   2777 /// the types using zero-extension, and then perform a umin operation
   2778 /// with them.
   2779 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS,
   2780                                                         const SCEV *RHS) {
   2781   const SCEV *PromotedLHS = LHS;
   2782   const SCEV *PromotedRHS = RHS;
   2783 
   2784   if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
   2785     PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
   2786   else
   2787     PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());
   2788 
   2789   return getUMinExpr(PromotedLHS, PromotedRHS);
   2790 }
   2791 
   2792 /// getPointerBase - Transitively follow the chain of pointer-type operands
   2793 /// until reaching a SCEV that does not have a single pointer operand. This
   2794 /// returns a SCEVUnknown pointer for well-formed pointer-type expressions,
   2795 /// but corner cases do exist.
   2796 const SCEV *ScalarEvolution::getPointerBase(const SCEV *V) {
   2797   // A pointer operand may evaluate to a nonpointer expression, such as null.
   2798   if (!V->getType()->isPointerTy())
   2799     return V;
   2800 
   2801   if (const SCEVCastExpr *Cast = dyn_cast<SCEVCastExpr>(V)) {
   2802     return getPointerBase(Cast->getOperand());
   2803   }
   2804   else if (const SCEVNAryExpr *NAry = dyn_cast<SCEVNAryExpr>(V)) {
   2805     const SCEV *PtrOp = 0;
   2806     for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
   2807          I != E; ++I) {
   2808       if ((*I)->getType()->isPointerTy()) {
   2809         // Cannot find the base of an expression with multiple pointer operands.
   2810         if (PtrOp)
   2811           return V;
   2812         PtrOp = *I;
   2813       }
   2814     }
   2815     if (!PtrOp)
   2816       return V;
   2817     return getPointerBase(PtrOp);
   2818   }
   2819   return V;
   2820 }
   2821 
   2822 /// PushDefUseChildren - Push users of the given Instruction
   2823 /// onto the given Worklist.
   2824 static void
   2825 PushDefUseChildren(Instruction *I,
   2826                    SmallVectorImpl<Instruction *> &Worklist) {
   2827   // Push the def-use children onto the Worklist stack.
   2828   for (Value::use_iterator UI = I->use_begin(), UE = I->use_end();
   2829        UI != UE; ++UI)
   2830     Worklist.push_back(cast<Instruction>(*UI));
   2831 }
   2832 
   2833 /// ForgetSymbolicValue - This looks up computed SCEV values for all
   2834 /// instructions that depend on the given instruction and removes them from
   2835 /// the ValueExprMapType map if they reference SymName. This is used during PHI
   2836 /// resolution.
   2837 void
   2838 ScalarEvolution::ForgetSymbolicName(Instruction *PN, const SCEV *SymName) {
   2839   SmallVector<Instruction *, 16> Worklist;
   2840   PushDefUseChildren(PN, Worklist);
   2841 
   2842   SmallPtrSet<Instruction *, 8> Visited;
   2843   Visited.insert(PN);
   2844   while (!Worklist.empty()) {
   2845     Instruction *I = Worklist.pop_back_val();
   2846     if (!Visited.insert(I)) continue;
   2847 
   2848     ValueExprMapType::iterator It =
   2849       ValueExprMap.find(static_cast<Value *>(I));
   2850     if (It != ValueExprMap.end()) {
   2851       const SCEV *Old = It->second;
   2852 
   2853       // Short-circuit the def-use traversal if the symbolic name
   2854       // ceases to appear in expressions.
   2855       if (Old != SymName && !hasOperand(Old, SymName))
   2856         continue;
   2857 
   2858       // SCEVUnknown for a PHI either means that it has an unrecognized
   2859       // structure, it's a PHI that's in the progress of being computed
   2860       // by createNodeForPHI, or it's a single-value PHI. In the first case,
   2861       // additional loop trip count information isn't going to change anything.
   2862       // In the second case, createNodeForPHI will perform the necessary
   2863       // updates on its own when it gets to that point. In the third, we do
   2864       // want to forget the SCEVUnknown.
   2865       if (!isa<PHINode>(I) ||
   2866           !isa<SCEVUnknown>(Old) ||
   2867           (I != PN && Old == SymName)) {
   2868         forgetMemoizedResults(Old);
   2869         ValueExprMap.erase(It);
   2870       }
   2871     }
   2872 
   2873     PushDefUseChildren(I, Worklist);
   2874   }
   2875 }
   2876 
   2877 /// createNodeForPHI - PHI nodes have two cases.  Either the PHI node exists in
   2878 /// a loop header, making it a potential recurrence, or it doesn't.
   2879 ///
   2880 const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
   2881   if (const Loop *L = LI->getLoopFor(PN->getParent()))
   2882     if (L->getHeader() == PN->getParent()) {
   2883       // The loop may have multiple entrances or multiple exits; we can analyze
   2884       // this phi as an addrec if it has a unique entry value and a unique
   2885       // backedge value.
   2886       Value *BEValueV = 0, *StartValueV = 0;
   2887       for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
   2888         Value *V = PN->getIncomingValue(i);
   2889         if (L->contains(PN->getIncomingBlock(i))) {
   2890           if (!BEValueV) {
   2891             BEValueV = V;
   2892           } else if (BEValueV != V) {
   2893             BEValueV = 0;
   2894             break;
   2895           }
   2896         } else if (!StartValueV) {
   2897           StartValueV = V;
   2898         } else if (StartValueV != V) {
   2899           StartValueV = 0;
   2900           break;
   2901         }
   2902       }
   2903       if (BEValueV && StartValueV) {
   2904         // While we are analyzing this PHI node, handle its value symbolically.
   2905         const SCEV *SymbolicName = getUnknown(PN);
   2906         assert(ValueExprMap.find(PN) == ValueExprMap.end() &&
   2907                "PHI node already processed?");
   2908         ValueExprMap.insert(std::make_pair(SCEVCallbackVH(PN, this), SymbolicName));
   2909 
   2910         // Using this symbolic name for the PHI, analyze the value coming around
   2911         // the back-edge.
   2912         const SCEV *BEValue = getSCEV(BEValueV);
   2913 
   2914         // NOTE: If BEValue is loop invariant, we know that the PHI node just
   2915         // has a special value for the first iteration of the loop.
   2916 
   2917         // If the value coming around the backedge is an add with the symbolic
   2918         // value we just inserted, then we found a simple induction variable!
   2919         if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) {
   2920           // If there is a single occurrence of the symbolic value, replace it
   2921           // with a recurrence.
   2922           unsigned FoundIndex = Add->getNumOperands();
   2923           for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
   2924             if (Add->getOperand(i) == SymbolicName)
   2925               if (FoundIndex == e) {
   2926                 FoundIndex = i;
   2927                 break;
   2928               }
   2929 
   2930           if (FoundIndex != Add->getNumOperands()) {
   2931             // Create an add with everything but the specified operand.
   2932             SmallVector<const SCEV *, 8> Ops;
   2933             for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
   2934               if (i != FoundIndex)
   2935                 Ops.push_back(Add->getOperand(i));
   2936             const SCEV *Accum = getAddExpr(Ops);
   2937 
   2938             // This is not a valid addrec if the step amount is varying each
   2939             // loop iteration, but is not itself an addrec in this loop.
   2940             if (isLoopInvariant(Accum, L) ||
   2941                 (isa<SCEVAddRecExpr>(Accum) &&
   2942                  cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) {
   2943               SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap;
   2944 
   2945               // If the increment doesn't overflow, then neither the addrec nor
   2946               // the post-increment will overflow.
   2947               if (const AddOperator *OBO = dyn_cast<AddOperator>(BEValueV)) {
   2948                 if (OBO->hasNoUnsignedWrap())
   2949                   Flags = setFlags(Flags, SCEV::FlagNUW);
   2950                 if (OBO->hasNoSignedWrap())
   2951                   Flags = setFlags(Flags, SCEV::FlagNSW);
   2952               } else if (const GEPOperator *GEP =
   2953                          dyn_cast<GEPOperator>(BEValueV)) {
   2954                 // If the increment is an inbounds GEP, then we know the address
   2955                 // space cannot be wrapped around. We cannot make any guarantee
   2956                 // about signed or unsigned overflow because pointers are
   2957                 // unsigned but we may have a negative index from the base
   2958                 // pointer.
   2959                 if (GEP->isInBounds())
   2960                   Flags = setFlags(Flags, SCEV::FlagNW);
   2961               }
   2962 
   2963               const SCEV *StartVal = getSCEV(StartValueV);
   2964               const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags);
   2965 
   2966               // Since the no-wrap flags are on the increment, they apply to the
   2967               // post-incremented value as well.
   2968               if (isLoopInvariant(Accum, L))
   2969                 (void)getAddRecExpr(getAddExpr(StartVal, Accum),
   2970                                     Accum, L, Flags);
   2971 
   2972               // Okay, for the entire analysis of this edge we assumed the PHI
   2973               // to be symbolic.  We now need to go back and purge all of the
   2974               // entries for the scalars that use the symbolic expression.
   2975               ForgetSymbolicName(PN, SymbolicName);
   2976               ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV;
   2977               return PHISCEV;
   2978             }
   2979           }
   2980         } else if (const SCEVAddRecExpr *AddRec =
   2981                      dyn_cast<SCEVAddRecExpr>(BEValue)) {
   2982           // Otherwise, this could be a loop like this:
   2983           //     i = 0;  for (j = 1; ..; ++j) { ....  i = j; }
   2984           // In this case, j = {1,+,1}  and BEValue is j.
   2985           // Because the other in-value of i (0) fits the evolution of BEValue
   2986           // i really is an addrec evolution.
   2987           if (AddRec->getLoop() == L && AddRec->isAffine()) {
   2988             const SCEV *StartVal = getSCEV(StartValueV);
   2989 
   2990             // If StartVal = j.start - j.stride, we can use StartVal as the
   2991             // initial step of the addrec evolution.
   2992             if (StartVal == getMinusSCEV(AddRec->getOperand(0),
   2993                                          AddRec->getOperand(1))) {
   2994               // FIXME: For constant StartVal, we should be able to infer
   2995               // no-wrap flags.
   2996               const SCEV *PHISCEV =
   2997                 getAddRecExpr(StartVal, AddRec->getOperand(1), L,
   2998                               SCEV::FlagAnyWrap);
   2999 
   3000               // Okay, for the entire analysis of this edge we assumed the PHI
   3001               // to be symbolic.  We now need to go back and purge all of the
   3002               // entries for the scalars that use the symbolic expression.
   3003               ForgetSymbolicName(PN, SymbolicName);
   3004               ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV;
   3005               return PHISCEV;
   3006             }
   3007           }
   3008         }
   3009       }
   3010     }
   3011 
   3012   // If the PHI has a single incoming value, follow that value, unless the
   3013   // PHI's incoming blocks are in a different loop, in which case doing so
   3014   // risks breaking LCSSA form. Instcombine would normally zap these, but
   3015   // it doesn't have DominatorTree information, so it may miss cases.
   3016   if (Value *V = SimplifyInstruction(PN, TD, DT))
   3017     if (LI->replacementPreservesLCSSAForm(PN, V))
   3018       return getSCEV(V);
   3019 
   3020   // If it's not a loop phi, we can't handle it yet.
   3021   return getUnknown(PN);
   3022 }
   3023 
   3024 /// createNodeForGEP - Expand GEP instructions into add and multiply
   3025 /// operations. This allows them to be analyzed by regular SCEV code.
   3026 ///
   3027 const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
   3028 
   3029   // Don't blindly transfer the inbounds flag from the GEP instruction to the
   3030   // Add expression, because the Instruction may be guarded by control flow
   3031   // and the no-overflow bits may not be valid for the expression in any
   3032   // context.
   3033   bool isInBounds = GEP->isInBounds();
   3034 
   3035   Type *IntPtrTy = getEffectiveSCEVType(GEP->getType());
   3036   Value *Base = GEP->getOperand(0);
   3037   // Don't attempt to analyze GEPs over unsized objects.
   3038   if (!cast<PointerType>(Base->getType())->getElementType()->isSized())
   3039     return getUnknown(GEP);
   3040   const SCEV *TotalOffset = getConstant(IntPtrTy, 0);
   3041   gep_type_iterator GTI = gep_type_begin(GEP);
   3042   for (GetElementPtrInst::op_iterator I = llvm::next(GEP->op_begin()),
   3043                                       E = GEP->op_end();
   3044        I != E; ++I) {
   3045     Value *Index = *I;
   3046     // Compute the (potentially symbolic) offset in bytes for this index.
   3047     if (StructType *STy = dyn_cast<StructType>(*GTI++)) {
   3048       // For a struct, add the member offset.
   3049       unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
   3050       const SCEV *FieldOffset = getOffsetOfExpr(STy, FieldNo);
   3051 
   3052       // Add the field offset to the running total offset.
   3053       TotalOffset = getAddExpr(TotalOffset, FieldOffset);
   3054     } else {
   3055       // For an array, add the element offset, explicitly scaled.
   3056       const SCEV *ElementSize = getSizeOfExpr(*GTI);
   3057       const SCEV *IndexS = getSCEV(Index);
   3058       // Getelementptr indices are signed.
   3059       IndexS = getTruncateOrSignExtend(IndexS, IntPtrTy);
   3060 
   3061       // Multiply the index by the element size to compute the element offset.
   3062       const SCEV *LocalOffset = getMulExpr(IndexS, ElementSize,
   3063                                            isInBounds ? SCEV::FlagNSW :
   3064                                            SCEV::FlagAnyWrap);
   3065 
   3066       // Add the element offset to the running total offset.
   3067       TotalOffset = getAddExpr(TotalOffset, LocalOffset);
   3068     }
   3069   }
   3070 
   3071   // Get the SCEV for the GEP base.
   3072   const SCEV *BaseS = getSCEV(Base);
   3073 
   3074   // Add the total offset from all the GEP indices to the base.
   3075   return getAddExpr(BaseS, TotalOffset,
   3076                     isInBounds ? SCEV::FlagNSW : SCEV::FlagAnyWrap);
   3077 }
   3078 
   3079 /// GetMinTrailingZeros - Determine the minimum number of zero bits that S is
   3080 /// guaranteed to end in (at every loop iteration).  It is, at the same time,
   3081 /// the minimum number of times S is divisible by 2.  For example, given {4,+,8}
   3082 /// it returns 2.  If S is guaranteed to be 0, it returns the bitwidth of S.
   3083 uint32_t
   3084 ScalarEvolution::GetMinTrailingZeros(const SCEV *S) {
   3085   if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
   3086     return C->getValue()->getValue().countTrailingZeros();
   3087 
   3088   if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S))
   3089     return std::min(GetMinTrailingZeros(T->getOperand()),
   3090                     (uint32_t)getTypeSizeInBits(T->getType()));
   3091 
   3092   if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) {
   3093     uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
   3094     return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ?
   3095              getTypeSizeInBits(E->getType()) : OpRes;
   3096   }
   3097 
   3098   if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) {
   3099     uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
   3100     return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ?
   3101              getTypeSizeInBits(E->getType()) : OpRes;
   3102   }
   3103 
   3104   if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) {
   3105     // The result is the min of all operands results.
   3106     uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
   3107     for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
   3108       MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
   3109     return MinOpRes;
   3110   }
   3111 
   3112   if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
   3113     // The result is the sum of all operands results.
   3114     uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0));
   3115     uint32_t BitWidth = getTypeSizeInBits(M->getType());
   3116     for (unsigned i = 1, e = M->getNumOperands();
   3117          SumOpRes != BitWidth && i != e; ++i)
   3118       SumOpRes = std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)),
   3119                           BitWidth);
   3120     return SumOpRes;
   3121   }
   3122 
   3123   if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
   3124     // The result is the min of all operands results.
   3125     uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
   3126     for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
   3127       MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
   3128     return MinOpRes;
   3129   }
   3130 
   3131   if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) {
   3132     // The result is the min of all operands results.
   3133     uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
   3134     for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
   3135       MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
   3136     return MinOpRes;
   3137   }
   3138 
   3139   if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) {
   3140     // The result is the min of all operands results.
   3141     uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
   3142     for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
   3143       MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
   3144     return MinOpRes;
   3145   }
   3146 
   3147   if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
   3148     // For a SCEVUnknown, ask ValueTracking.
   3149     unsigned BitWidth = getTypeSizeInBits(U->getType());
   3150     APInt Mask = APInt::getAllOnesValue(BitWidth);
   3151     APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
   3152     ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones);
   3153     return Zeros.countTrailingOnes();
   3154   }
   3155 
   3156   // SCEVUDivExpr
   3157   return 0;
   3158 }
   3159 
   3160 /// getUnsignedRange - Determine the unsigned range for a particular SCEV.
   3161 ///
   3162 ConstantRange
   3163 ScalarEvolution::getUnsignedRange(const SCEV *S) {
   3164   // See if we've computed this range already.
   3165   DenseMap<const SCEV *, ConstantRange>::iterator I = UnsignedRanges.find(S);
   3166   if (I != UnsignedRanges.end())
   3167     return I->second;
   3168 
   3169   if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
   3170     return setUnsignedRange(C, ConstantRange(C->getValue()->getValue()));
   3171 
   3172   unsigned BitWidth = getTypeSizeInBits(S->getType());
   3173   ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true);
   3174 
   3175   // If the value has known zeros, the maximum unsigned value will have those
   3176   // known zeros as well.
   3177   uint32_t TZ = GetMinTrailingZeros(S);
   3178   if (TZ != 0)
   3179     ConservativeResult =
   3180       ConstantRange(APInt::getMinValue(BitWidth),
   3181                     APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1);
   3182 
   3183   if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
   3184     ConstantRange X = getUnsignedRange(Add->getOperand(0));
   3185     for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i)
   3186       X = X.add(getUnsignedRange(Add->getOperand(i)));
   3187     return setUnsignedRange(Add, ConservativeResult.intersectWith(X));
   3188   }
   3189 
   3190   if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
   3191     ConstantRange X = getUnsignedRange(Mul->getOperand(0));
   3192     for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i)
   3193       X = X.multiply(getUnsignedRange(Mul->getOperand(i)));
   3194     return setUnsignedRange(Mul, ConservativeResult.intersectWith(X));
   3195   }
   3196 
   3197   if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) {
   3198     ConstantRange X = getUnsignedRange(SMax->getOperand(0));
   3199     for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i)
   3200       X = X.smax(getUnsignedRange(SMax->getOperand(i)));
   3201     return setUnsignedRange(SMax, ConservativeResult.intersectWith(X));
   3202   }
   3203 
   3204   if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) {
   3205     ConstantRange X = getUnsignedRange(UMax->getOperand(0));
   3206     for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i)
   3207       X = X.umax(getUnsignedRange(UMax->getOperand(i)));
   3208     return setUnsignedRange(UMax, ConservativeResult.intersectWith(X));
   3209   }
   3210 
   3211   if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) {
   3212     ConstantRange X = getUnsignedRange(UDiv->getLHS());
   3213     ConstantRange Y = getUnsignedRange(UDiv->getRHS());
   3214     return setUnsignedRange(UDiv, ConservativeResult.intersectWith(X.udiv(Y)));
   3215   }
   3216 
   3217   if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) {
   3218     ConstantRange X = getUnsignedRange(ZExt->getOperand());
   3219     return setUnsignedRange(ZExt,
   3220       ConservativeResult.intersectWith(X.zeroExtend(BitWidth)));
   3221   }
   3222 
   3223   if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) {
   3224     ConstantRange X = getUnsignedRange(SExt->getOperand());
   3225     return setUnsignedRange(SExt,
   3226       ConservativeResult.intersectWith(X.signExtend(BitWidth)));
   3227   }
   3228 
   3229   if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) {
   3230     ConstantRange X = getUnsignedRange(Trunc->getOperand());
   3231     return setUnsignedRange(Trunc,
   3232       ConservativeResult.intersectWith(X.truncate(BitWidth)));
   3233   }
   3234 
   3235   if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
   3236     // If there's no unsigned wrap, the value will never be less than its
   3237     // initial value.
   3238     if (AddRec->getNoWrapFlags(SCEV::FlagNUW))
   3239       if (const SCEVConstant *C = dyn_cast<SCEVConstant>(AddRec->getStart()))
   3240         if (!C->getValue()->isZero())
   3241           ConservativeResult =
   3242             ConservativeResult.intersectWith(
   3243               ConstantRange(C->getValue()->getValue(), APInt(BitWidth, 0)));
   3244 
   3245     // TODO: non-affine addrec
   3246     if (AddRec->isAffine()) {
   3247       Type *Ty = AddRec->getType();
   3248       const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop());
   3249       if (!isa<SCEVCouldNotCompute>(MaxBECount) &&
   3250           getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) {
   3251         MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty);
   3252 
   3253         const SCEV *Start = AddRec->getStart();
   3254         const SCEV *Step = AddRec->getStepRecurrence(*this);
   3255 
   3256         ConstantRange StartRange = getUnsignedRange(Start);
   3257         ConstantRange StepRange = getSignedRange(Step);
   3258         ConstantRange MaxBECountRange = getUnsignedRange(MaxBECount);
   3259         ConstantRange EndRange =
   3260           StartRange.add(MaxBECountRange.multiply(StepRange));
   3261 
   3262         // Check for overflow. This must be done with ConstantRange arithmetic
   3263         // because we could be called from within the ScalarEvolution overflow
   3264         // checking code.
   3265         ConstantRange ExtStartRange = StartRange.zextOrTrunc(BitWidth*2+1);
   3266         ConstantRange ExtStepRange = StepRange.sextOrTrunc(BitWidth*2+1);
   3267         ConstantRange ExtMaxBECountRange =
   3268           MaxBECountRange.zextOrTrunc(BitWidth*2+1);
   3269         ConstantRange ExtEndRange = EndRange.zextOrTrunc(BitWidth*2+1);
   3270         if (ExtStartRange.add(ExtMaxBECountRange.multiply(ExtStepRange)) !=
   3271             ExtEndRange)
   3272           return setUnsignedRange(AddRec, ConservativeResult);
   3273 
   3274         APInt Min = APIntOps::umin(StartRange.getUnsignedMin(),
   3275                                    EndRange.getUnsignedMin());
   3276         APInt Max = APIntOps::umax(StartRange.getUnsignedMax(),
   3277                                    EndRange.getUnsignedMax());
   3278         if (Min.isMinValue() && Max.isMaxValue())
   3279           return setUnsignedRange(AddRec, ConservativeResult);
   3280         return setUnsignedRange(AddRec,
   3281           ConservativeResult.intersectWith(ConstantRange(Min, Max+1)));
   3282       }
   3283     }
   3284 
   3285     return setUnsignedRange(AddRec, ConservativeResult);
   3286   }
   3287 
   3288   if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
   3289     // For a SCEVUnknown, ask ValueTracking.
   3290     APInt Mask = APInt::getAllOnesValue(BitWidth);
   3291     APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
   3292     ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones, TD);
   3293     if (Ones == ~Zeros + 1)
   3294       return setUnsignedRange(U, ConservativeResult);
   3295     return setUnsignedRange(U,
   3296       ConservativeResult.intersectWith(ConstantRange(Ones, ~Zeros + 1)));
   3297   }
   3298 
   3299   return setUnsignedRange(S, ConservativeResult);
   3300 }
   3301 
   3302 /// getSignedRange - Determine the signed range for a particular SCEV.
   3303 ///
   3304 ConstantRange
   3305 ScalarEvolution::getSignedRange(const SCEV *S) {
   3306   // See if we've computed this range already.
   3307   DenseMap<const SCEV *, ConstantRange>::iterator I = SignedRanges.find(S);
   3308   if (I != SignedRanges.end())
   3309     return I->second;
   3310 
   3311   if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
   3312     return setSignedRange(C, ConstantRange(C->getValue()->getValue()));
   3313 
   3314   unsigned BitWidth = getTypeSizeInBits(S->getType());
   3315   ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true);
   3316 
   3317   // If the value has known zeros, the maximum signed value will have those
   3318   // known zeros as well.
   3319   uint32_t TZ = GetMinTrailingZeros(S);
   3320   if (TZ != 0)
   3321     ConservativeResult =
   3322       ConstantRange(APInt::getSignedMinValue(BitWidth),
   3323                     APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1);
   3324 
   3325   if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
   3326     ConstantRange X = getSignedRange(Add->getOperand(0));
   3327     for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i)
   3328       X = X.add(getSignedRange(Add->getOperand(i)));
   3329     return setSignedRange(Add, ConservativeResult.intersectWith(X));
   3330   }
   3331 
   3332   if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
   3333     ConstantRange X = getSignedRange(Mul->getOperand(0));
   3334     for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i)
   3335       X = X.multiply(getSignedRange(Mul->getOperand(i)));
   3336     return setSignedRange(Mul, ConservativeResult.intersectWith(X));
   3337   }
   3338 
   3339   if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) {
   3340     ConstantRange X = getSignedRange(SMax->getOperand(0));
   3341     for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i)
   3342       X = X.smax(getSignedRange(SMax->getOperand(i)));
   3343     return setSignedRange(SMax, ConservativeResult.intersectWith(X));
   3344   }
   3345 
   3346   if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) {
   3347     ConstantRange X = getSignedRange(UMax->getOperand(0));
   3348     for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i)
   3349       X = X.umax(getSignedRange(UMax->getOperand(i)));
   3350     return setSignedRange(UMax, ConservativeResult.intersectWith(X));
   3351   }
   3352 
   3353   if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) {
   3354     ConstantRange X = getSignedRange(UDiv->getLHS());
   3355     ConstantRange Y = getSignedRange(UDiv->getRHS());
   3356     return setSignedRange(UDiv, ConservativeResult.intersectWith(X.udiv(Y)));
   3357   }
   3358 
   3359   if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) {
   3360     ConstantRange X = getSignedRange(ZExt->getOperand());
   3361     return setSignedRange(ZExt,
   3362       ConservativeResult.intersectWith(X.zeroExtend(BitWidth)));
   3363   }
   3364 
   3365   if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) {
   3366     ConstantRange X = getSignedRange(SExt->getOperand());
   3367     return setSignedRange(SExt,
   3368       ConservativeResult.intersectWith(X.signExtend(BitWidth)));
   3369   }
   3370 
   3371   if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) {
   3372     ConstantRange X = getSignedRange(Trunc->getOperand());
   3373     return setSignedRange(Trunc,
   3374       ConservativeResult.intersectWith(X.truncate(BitWidth)));
   3375   }
   3376 
   3377   if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
   3378     // If there's no signed wrap, and all the operands have the same sign or
   3379     // zero, the value won't ever change sign.
   3380     if (AddRec->getNoWrapFlags(SCEV::FlagNSW)) {
   3381       bool AllNonNeg = true;
   3382       bool AllNonPos = true;
   3383       for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
   3384         if (!isKnownNonNegative(AddRec->getOperand(i))) AllNonNeg = false;
   3385         if (!isKnownNonPositive(AddRec->getOperand(i))) AllNonPos = false;
   3386       }
   3387       if (AllNonNeg)
   3388         ConservativeResult = ConservativeResult.intersectWith(
   3389           ConstantRange(APInt(BitWidth, 0),
   3390                         APInt::getSignedMinValue(BitWidth)));
   3391       else if (AllNonPos)
   3392         ConservativeResult = ConservativeResult.intersectWith(
   3393           ConstantRange(APInt::getSignedMinValue(BitWidth),
   3394                         APInt(BitWidth, 1)));
   3395     }
   3396 
   3397     // TODO: non-affine addrec
   3398     if (AddRec->isAffine()) {
   3399       Type *Ty = AddRec->getType();
   3400       const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop());
   3401       if (!isa<SCEVCouldNotCompute>(MaxBECount) &&
   3402           getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) {
   3403         MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty);
   3404 
   3405         const SCEV *Start = AddRec->getStart();
   3406         const SCEV *Step = AddRec->getStepRecurrence(*this);
   3407 
   3408         ConstantRange StartRange = getSignedRange(Start);
   3409         ConstantRange StepRange = getSignedRange(Step);
   3410         ConstantRange MaxBECountRange = getUnsignedRange(MaxBECount);
   3411         ConstantRange EndRange =
   3412           StartRange.add(MaxBECountRange.multiply(StepRange));
   3413 
   3414         // Check for overflow. This must be done with ConstantRange arithmetic
   3415         // because we could be called from within the ScalarEvolution overflow
   3416         // checking code.
   3417         ConstantRange ExtStartRange = StartRange.sextOrTrunc(BitWidth*2+1);
   3418         ConstantRange ExtStepRange = StepRange.sextOrTrunc(BitWidth*2+1);
   3419         ConstantRange ExtMaxBECountRange =
   3420           MaxBECountRange.zextOrTrunc(BitWidth*2+1);
   3421         ConstantRange ExtEndRange = EndRange.sextOrTrunc(BitWidth*2+1);
   3422         if (ExtStartRange.add(ExtMaxBECountRange.multiply(ExtStepRange)) !=
   3423             ExtEndRange)
   3424           return setSignedRange(AddRec, ConservativeResult);
   3425 
   3426         APInt Min = APIntOps::smin(StartRange.getSignedMin(),
   3427                                    EndRange.getSignedMin());
   3428         APInt Max = APIntOps::smax(StartRange.getSignedMax(),
   3429                                    EndRange.getSignedMax());
   3430         if (Min.isMinSignedValue() && Max.isMaxSignedValue())
   3431           return setSignedRange(AddRec, ConservativeResult);
   3432         return setSignedRange(AddRec,
   3433           ConservativeResult.intersectWith(ConstantRange(Min, Max+1)));
   3434       }
   3435     }
   3436 
   3437     return setSignedRange(AddRec, ConservativeResult);
   3438   }
   3439 
   3440   if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
   3441     // For a SCEVUnknown, ask ValueTracking.
   3442     if (!U->getValue()->getType()->isIntegerTy() && !TD)
   3443       return setSignedRange(U, ConservativeResult);
   3444     unsigned NS = ComputeNumSignBits(U->getValue(), TD);
   3445     if (NS == 1)
   3446       return setSignedRange(U, ConservativeResult);
   3447     return setSignedRange(U, ConservativeResult.intersectWith(
   3448       ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1),
   3449                     APInt::getSignedMaxValue(BitWidth).ashr(NS - 1)+1)));
   3450   }
   3451 
   3452   return setSignedRange(S, ConservativeResult);
   3453 }
   3454 
   3455 /// createSCEV - We know that there is no SCEV for the specified value.
   3456 /// Analyze the expression.
   3457 ///
   3458 const SCEV *ScalarEvolution::createSCEV(Value *V) {
   3459   if (!isSCEVable(V->getType()))
   3460     return getUnknown(V);
   3461 
   3462   unsigned Opcode = Instruction::UserOp1;
   3463   if (Instruction *I = dyn_cast<Instruction>(V)) {
   3464     Opcode = I->getOpcode();
   3465 
   3466     // Don't attempt to analyze instructions in blocks that aren't
   3467     // reachable. Such instructions don't matter, and they aren't required
   3468     // to obey basic rules for definitions dominating uses which this
   3469     // analysis depends on.
   3470     if (!DT->isReachableFromEntry(I->getParent()))
   3471       return getUnknown(V);
   3472   } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
   3473     Opcode = CE->getOpcode();
   3474   else if (ConstantInt *CI = dyn_cast<ConstantInt>(V))
   3475     return getConstant(CI);
   3476   else if (isa<ConstantPointerNull>(V))
   3477     return getConstant(V->getType(), 0);
   3478   else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V))
   3479     return GA->mayBeOverridden() ? getUnknown(V) : getSCEV(GA->getAliasee());
   3480   else
   3481     return getUnknown(V);
   3482 
   3483   Operator *U = cast<Operator>(V);
   3484   switch (Opcode) {
   3485   case Instruction::Add: {
   3486     // The simple thing to do would be to just call getSCEV on both operands
   3487     // and call getAddExpr with the result. However if we're looking at a
   3488     // bunch of things all added together, this can be quite inefficient,
   3489     // because it leads to N-1 getAddExpr calls for N ultimate operands.
   3490     // Instead, gather up all the operands and make a single getAddExpr call.
   3491     // LLVM IR canonical form means we need only traverse the left operands.
   3492     SmallVector<const SCEV *, 4> AddOps;
   3493     AddOps.push_back(getSCEV(U->getOperand(1)));
   3494     for (Value *Op = U->getOperand(0); ; Op = U->getOperand(0)) {
   3495       unsigned Opcode = Op->getValueID() - Value::InstructionVal;
   3496       if (Opcode != Instruction::Add && Opcode != Instruction::Sub)
   3497         break;
   3498       U = cast<Operator>(Op);
   3499       const SCEV *Op1 = getSCEV(U->getOperand(1));
   3500       if (Opcode == Instruction::Sub)
   3501         AddOps.push_back(getNegativeSCEV(Op1));
   3502       else
   3503         AddOps.push_back(Op1);
   3504     }
   3505     AddOps.push_back(getSCEV(U->getOperand(0)));
   3506     return getAddExpr(AddOps);
   3507   }
   3508   case Instruction::Mul: {
   3509     // See the Add code above.
   3510     SmallVector<const SCEV *, 4> MulOps;
   3511     MulOps.push_back(getSCEV(U->getOperand(1)));
   3512     for (Value *Op = U->getOperand(0);
   3513          Op->getValueID() == Instruction::Mul + Value::InstructionVal;
   3514          Op = U->getOperand(0)) {
   3515       U = cast<Operator>(Op);
   3516       MulOps.push_back(getSCEV(U->getOperand(1)));
   3517     }
   3518     MulOps.push_back(getSCEV(U->getOperand(0)));
   3519     return getMulExpr(MulOps);
   3520   }
   3521   case Instruction::UDiv:
   3522     return getUDivExpr(getSCEV(U->getOperand(0)),
   3523                        getSCEV(U->getOperand(1)));
   3524   case Instruction::Sub:
   3525     return getMinusSCEV(getSCEV(U->getOperand(0)),
   3526                         getSCEV(U->getOperand(1)));
   3527   case Instruction::And:
   3528     // For an expression like x&255 that merely masks off the high bits,
   3529     // use zext(trunc(x)) as the SCEV expression.
   3530     if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
   3531       if (CI->isNullValue())
   3532         return getSCEV(U->getOperand(1));
   3533       if (CI->isAllOnesValue())
   3534         return getSCEV(U->getOperand(0));
   3535       const APInt &A = CI->getValue();
   3536 
   3537       // Instcombine's ShrinkDemandedConstant may strip bits out of
   3538       // constants, obscuring what would otherwise be a low-bits mask.
   3539       // Use ComputeMaskedBits to compute what ShrinkDemandedConstant
   3540       // knew about to reconstruct a low-bits mask value.
   3541       unsigned LZ = A.countLeadingZeros();
   3542       unsigned BitWidth = A.getBitWidth();
   3543       APInt AllOnes = APInt::getAllOnesValue(BitWidth);
   3544       APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
   3545       ComputeMaskedBits(U->getOperand(0), AllOnes, KnownZero, KnownOne, TD);
   3546 
   3547       APInt EffectiveMask = APInt::getLowBitsSet(BitWidth, BitWidth - LZ);
   3548 
   3549       if (LZ != 0 && !((~A & ~KnownZero) & EffectiveMask))
   3550         return
   3551           getZeroExtendExpr(getTruncateExpr(getSCEV(U->getOperand(0)),
   3552                                 IntegerType::get(getContext(), BitWidth - LZ)),
   3553                             U->getType());
   3554     }
   3555     break;
   3556 
   3557   case Instruction::Or:
   3558     // If the RHS of the Or is a constant, we may have something like:
   3559     // X*4+1 which got turned into X*4|1.  Handle this as an Add so loop
   3560     // optimizations will transparently handle this case.
   3561     //
   3562     // In order for this transformation to be safe, the LHS must be of the
   3563     // form X*(2^n) and the Or constant must be less than 2^n.
   3564     if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
   3565       const SCEV *LHS = getSCEV(U->getOperand(0));
   3566       const APInt &CIVal = CI->getValue();
   3567       if (GetMinTrailingZeros(LHS) >=
   3568           (CIVal.getBitWidth() - CIVal.countLeadingZeros())) {
   3569         // Build a plain add SCEV.
   3570         const SCEV *S = getAddExpr(LHS, getSCEV(CI));
   3571         // If the LHS of the add was an addrec and it has no-wrap flags,
   3572         // transfer the no-wrap flags, since an or won't introduce a wrap.
   3573         if (const SCEVAddRecExpr *NewAR = dyn_cast<SCEVAddRecExpr>(S)) {
   3574           const SCEVAddRecExpr *OldAR = cast<SCEVAddRecExpr>(LHS);
   3575           const_cast<SCEVAddRecExpr *>(NewAR)->setNoWrapFlags(
   3576             OldAR->getNoWrapFlags());
   3577         }
   3578         return S;
   3579       }
   3580     }
   3581     break;
   3582   case Instruction::Xor:
   3583     if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
   3584       // If the RHS of the xor is a signbit, then this is just an add.
   3585       // Instcombine turns add of signbit into xor as a strength reduction step.
   3586       if (CI->getValue().isSignBit())
   3587         return getAddExpr(getSCEV(U->getOperand(0)),
   3588                           getSCEV(U->getOperand(1)));
   3589 
   3590       // If the RHS of xor is -1, then this is a not operation.
   3591       if (CI->isAllOnesValue())
   3592         return getNotSCEV(getSCEV(U->getOperand(0)));
   3593 
   3594       // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask.
   3595       // This is a variant of the check for xor with -1, and it handles
   3596       // the case where instcombine has trimmed non-demanded bits out
   3597       // of an xor with -1.
   3598       if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U->getOperand(0)))
   3599         if (ConstantInt *LCI = dyn_cast<ConstantInt>(BO->getOperand(1)))
   3600           if (BO->getOpcode() == Instruction::And &&
   3601               LCI->getValue() == CI->getValue())
   3602             if (const SCEVZeroExtendExpr *Z =
   3603                   dyn_cast<SCEVZeroExtendExpr>(getSCEV(U->getOperand(0)))) {
   3604               Type *UTy = U->getType();
   3605               const SCEV *Z0 = Z->getOperand();
   3606               Type *Z0Ty = Z0->getType();
   3607               unsigned Z0TySize = getTypeSizeInBits(Z0Ty);
   3608 
   3609               // If C is a low-bits mask, the zero extend is serving to
   3610               // mask off the high bits. Complement the operand and
   3611               // re-apply the zext.
   3612               if (APIntOps::isMask(Z0TySize, CI->getValue()))
   3613                 return getZeroExtendExpr(getNotSCEV(Z0), UTy);
   3614 
   3615               // If C is a single bit, it may be in the sign-bit position
   3616               // before the zero-extend. In this case, represent the xor
   3617               // using an add, which is equivalent, and re-apply the zext.
   3618               APInt Trunc = CI->getValue().trunc(Z0TySize);
   3619               if (Trunc.zext(getTypeSizeInBits(UTy)) == CI->getValue() &&
   3620                   Trunc.isSignBit())
   3621                 return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)),
   3622                                          UTy);
   3623             }
   3624     }
   3625     break;
   3626 
   3627   case Instruction::Shl:
   3628     // Turn shift left of a constant amount into a multiply.
   3629     if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
   3630       uint32_t BitWidth = cast<IntegerType>(U->getType())->getBitWidth();
   3631 
   3632       // If the shift count is not less than the bitwidth, the result of
   3633       // the shift is undefined. Don't try to analyze it, because the
   3634       // resolution chosen here may differ from the resolution chosen in
   3635       // other parts of the compiler.
   3636       if (SA->getValue().uge(BitWidth))
   3637         break;
   3638 
   3639       Constant *X = ConstantInt::get(getContext(),
   3640         APInt(BitWidth, 1).shl(SA->getZExtValue()));
   3641       return getMulExpr(getSCEV(U->getOperand(0)), getSCEV(X));
   3642     }
   3643     break;
   3644 
   3645   case Instruction::LShr:
   3646     // Turn logical shift right of a constant into a unsigned divide.
   3647     if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
   3648       uint32_t BitWidth = cast<IntegerType>(U->getType())->getBitWidth();
   3649 
   3650       // If the shift count is not less than the bitwidth, the result of
   3651       // the shift is undefined. Don't try to analyze it, because the
   3652       // resolution chosen here may differ from the resolution chosen in
   3653       // other parts of the compiler.
   3654       if (SA->getValue().uge(BitWidth))
   3655         break;
   3656 
   3657       Constant *X = ConstantInt::get(getContext(),
   3658         APInt(BitWidth, 1).shl(SA->getZExtValue()));
   3659       return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(X));
   3660     }
   3661     break;
   3662 
   3663   case Instruction::AShr:
   3664     // For a two-shift sext-inreg, use sext(trunc(x)) as the SCEV expression.
   3665     if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1)))
   3666       if (Operator *L = dyn_cast<Operator>(U->getOperand(0)))
   3667         if (L->getOpcode() == Instruction::Shl &&
   3668             L->getOperand(1) == U->getOperand(1)) {
   3669           uint64_t BitWidth = getTypeSizeInBits(U->getType());
   3670 
   3671           // If the shift count is not less than the bitwidth, the result of
   3672           // the shift is undefined. Don't try to analyze it, because the
   3673           // resolution chosen here may differ from the resolution chosen in
   3674           // other parts of the compiler.
   3675           if (CI->getValue().uge(BitWidth))
   3676             break;
   3677 
   3678           uint64_t Amt = BitWidth - CI->getZExtValue();
   3679           if (Amt == BitWidth)
   3680             return getSCEV(L->getOperand(0));       // shift by zero --> noop
   3681           return
   3682             getSignExtendExpr(getTruncateExpr(getSCEV(L->getOperand(0)),
   3683                                               IntegerType::get(getContext(),
   3684                                                                Amt)),
   3685                               U->getType());
   3686         }
   3687     break;
   3688 
   3689   case Instruction::Trunc:
   3690     return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType());
   3691 
   3692   case Instruction::ZExt:
   3693     return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType());
   3694 
   3695   case Instruction::SExt:
   3696     return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType());
   3697 
   3698   case Instruction::BitCast:
   3699     // BitCasts are no-op casts so we just eliminate the cast.
   3700     if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType()))
   3701       return getSCEV(U->getOperand(0));
   3702     break;
   3703 
   3704   // It's tempting to handle inttoptr and ptrtoint as no-ops, however this can
   3705   // lead to pointer expressions which cannot safely be expanded to GEPs,
   3706   // because ScalarEvolution doesn't respect the GEP aliasing rules when
   3707   // simplifying integer expressions.
   3708 
   3709   case Instruction::GetElementPtr:
   3710     return createNodeForGEP(cast<GEPOperator>(U));
   3711 
   3712   case Instruction::PHI:
   3713     return createNodeForPHI(cast<PHINode>(U));
   3714 
   3715   case Instruction::Select:
   3716     // This could be a smax or umax that was lowered earlier.
   3717     // Try to recover it.
   3718     if (ICmpInst *ICI = dyn_cast<ICmpInst>(U->getOperand(0))) {
   3719       Value *LHS = ICI->getOperand(0);
   3720       Value *RHS = ICI->getOperand(1);
   3721       switch (ICI->getPredicate()) {
   3722       case ICmpInst::ICMP_SLT:
   3723       case ICmpInst::ICMP_SLE:
   3724         std::swap(LHS, RHS);
   3725         // fall through
   3726       case ICmpInst::ICMP_SGT:
   3727       case ICmpInst::ICMP_SGE:
   3728         // a >s b ? a+x : b+x  ->  smax(a, b)+x
   3729         // a >s b ? b+x : a+x  ->  smin(a, b)+x
   3730         if (LHS->getType() == U->getType()) {
   3731           const SCEV *LS = getSCEV(LHS);
   3732           const SCEV *RS = getSCEV(RHS);
   3733           const SCEV *LA = getSCEV(U->getOperand(1));
   3734           const SCEV *RA = getSCEV(U->getOperand(2));
   3735           const SCEV *LDiff = getMinusSCEV(LA, LS);
   3736           const SCEV *RDiff = getMinusSCEV(RA, RS);
   3737           if (LDiff == RDiff)
   3738             return getAddExpr(getSMaxExpr(LS, RS), LDiff);
   3739           LDiff = getMinusSCEV(LA, RS);
   3740           RDiff = getMinusSCEV(RA, LS);
   3741           if (LDiff == RDiff)
   3742             return getAddExpr(getSMinExpr(LS, RS), LDiff);
   3743         }
   3744         break;
   3745       case ICmpInst::ICMP_ULT:
   3746       case ICmpInst::ICMP_ULE:
   3747         std::swap(LHS, RHS);
   3748         // fall through
   3749       case ICmpInst::ICMP_UGT:
   3750       case ICmpInst::ICMP_UGE:
   3751         // a >u b ? a+x : b+x  ->  umax(a, b)+x
   3752         // a >u b ? b+x : a+x  ->  umin(a, b)+x
   3753         if (LHS->getType() == U->getType()) {
   3754           const SCEV *LS = getSCEV(LHS);
   3755           const SCEV *RS = getSCEV(RHS);
   3756           const SCEV *LA = getSCEV(U->getOperand(1));
   3757           const SCEV *RA = getSCEV(U->getOperand(2));
   3758           const SCEV *LDiff = getMinusSCEV(LA, LS);
   3759           const SCEV *RDiff = getMinusSCEV(RA, RS);
   3760           if (LDiff == RDiff)
   3761             return getAddExpr(getUMaxExpr(LS, RS), LDiff);
   3762           LDiff = getMinusSCEV(LA, RS);
   3763           RDiff = getMinusSCEV(RA, LS);
   3764           if (LDiff == RDiff)
   3765             return getAddExpr(getUMinExpr(LS, RS), LDiff);
   3766         }
   3767         break;
   3768       case ICmpInst::ICMP_NE:
   3769         // n != 0 ? n+x : 1+x  ->  umax(n, 1)+x
   3770         if (LHS->getType() == U->getType() &&
   3771             isa<ConstantInt>(RHS) &&
   3772             cast<ConstantInt>(RHS)->isZero()) {
   3773           const SCEV *One = getConstant(LHS->getType(), 1);
   3774           const SCEV *LS = getSCEV(LHS);
   3775           const SCEV *LA = getSCEV(U->getOperand(1));
   3776           const SCEV *RA = getSCEV(U->getOperand(2));
   3777           const SCEV *LDiff = getMinusSCEV(LA, LS);
   3778           const SCEV *RDiff = getMinusSCEV(RA, One);
   3779           if (LDiff == RDiff)
   3780             return getAddExpr(getUMaxExpr(One, LS), LDiff);
   3781         }
   3782         break;
   3783       case ICmpInst::ICMP_EQ:
   3784         // n == 0 ? 1+x : n+x  ->  umax(n, 1)+x
   3785         if (LHS->getType() == U->getType() &&
   3786             isa<ConstantInt>(RHS) &&
   3787             cast<ConstantInt>(RHS)->isZero()) {
   3788           const SCEV *One = getConstant(LHS->getType(), 1);
   3789           const SCEV *LS = getSCEV(LHS);
   3790           const SCEV *LA = getSCEV(U->getOperand(1));
   3791           const SCEV *RA = getSCEV(U->getOperand(2));
   3792           const SCEV *LDiff = getMinusSCEV(LA, One);
   3793           const SCEV *RDiff = getMinusSCEV(RA, LS);
   3794           if (LDiff == RDiff)
   3795             return getAddExpr(getUMaxExpr(One, LS), LDiff);
   3796         }
   3797         break;
   3798       default:
   3799         break;
   3800       }
   3801     }
   3802 
   3803   default: // We cannot analyze this expression.
   3804     break;
   3805   }
   3806 
   3807   return getUnknown(V);
   3808 }
   3809 
   3810 
   3811 
   3812 //===----------------------------------------------------------------------===//
   3813 //                   Iteration Count Computation Code
   3814 //
   3815 
   3816 /// getBackedgeTakenCount - If the specified loop has a predictable
   3817 /// backedge-taken count, return it, otherwise return a SCEVCouldNotCompute
   3818 /// object. The backedge-taken count is the number of times the loop header
   3819 /// will be branched to from within the loop. This is one less than the
   3820 /// trip count of the loop, since it doesn't count the first iteration,
   3821 /// when the header is branched to from outside the loop.
   3822 ///
   3823 /// Note that it is not valid to call this method on a loop without a
   3824 /// loop-invariant backedge-taken count (see
   3825 /// hasLoopInvariantBackedgeTakenCount).
   3826 ///
   3827 const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L) {
   3828   return getBackedgeTakenInfo(L).Exact;
   3829 }
   3830 
   3831 /// getMaxBackedgeTakenCount - Similar to getBackedgeTakenCount, except
   3832 /// return the least SCEV value that is known never to be less than the
   3833 /// actual backedge taken count.
   3834 const SCEV *ScalarEvolution::getMaxBackedgeTakenCount(const Loop *L) {
   3835   return getBackedgeTakenInfo(L).Max;
   3836 }
   3837 
   3838 /// PushLoopPHIs - Push PHI nodes in the header of the given loop
   3839 /// onto the given Worklist.
   3840 static void
   3841 PushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) {
   3842   BasicBlock *Header = L->getHeader();
   3843 
   3844   // Push all Loop-header PHIs onto the Worklist stack.
   3845   for (BasicBlock::iterator I = Header->begin();
   3846        PHINode *PN = dyn_cast<PHINode>(I); ++I)
   3847     Worklist.push_back(PN);
   3848 }
   3849 
   3850 const ScalarEvolution::BackedgeTakenInfo &
   3851 ScalarEvolution::getBackedgeTakenInfo(const Loop *L) {
   3852   // Initially insert a CouldNotCompute for this loop. If the insertion
   3853   // succeeds, proceed to actually compute a backedge-taken count and
   3854   // update the value. The temporary CouldNotCompute value tells SCEV
   3855   // code elsewhere that it shouldn't attempt to request a new
   3856   // backedge-taken count, which could result in infinite recursion.
   3857   std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair =
   3858     BackedgeTakenCounts.insert(std::make_pair(L, getCouldNotCompute()));
   3859   if (!Pair.second)
   3860     return Pair.first->second;
   3861 
   3862   BackedgeTakenInfo Result = getCouldNotCompute();
   3863   BackedgeTakenInfo Computed = ComputeBackedgeTakenCount(L);
   3864   if (Computed.Exact != getCouldNotCompute()) {
   3865     assert(isLoopInvariant(Computed.Exact, L) &&
   3866            isLoopInvariant(Computed.Max, L) &&
   3867            "Computed backedge-taken count isn't loop invariant for loop!");
   3868     ++NumTripCountsComputed;
   3869 
   3870     // Update the value in the map.
   3871     Result = Computed;
   3872   } else {
   3873     if (Computed.Max != getCouldNotCompute())
   3874       // Update the value in the map.
   3875       Result = Computed;
   3876     if (isa<PHINode>(L->getHeader()->begin()))
   3877       // Only count loops that have phi nodes as not being computable.
   3878       ++NumTripCountsNotComputed;
   3879   }
   3880 
   3881   // Now that we know more about the trip count for this loop, forget any
   3882   // existing SCEV values for PHI nodes in this loop since they are only
   3883   // conservative estimates made without the benefit of trip count
   3884   // information. This is similar to the code in forgetLoop, except that
   3885   // it handles SCEVUnknown PHI nodes specially.
   3886   if (Computed.hasAnyInfo()) {
   3887     SmallVector<Instruction *, 16> Worklist;
   3888     PushLoopPHIs(L, Worklist);
   3889 
   3890     SmallPtrSet<Instruction *, 8> Visited;
   3891     while (!Worklist.empty()) {
   3892       Instruction *I = Worklist.pop_back_val();
   3893       if (!Visited.insert(I)) continue;
   3894 
   3895       ValueExprMapType::iterator It =
   3896         ValueExprMap.find(static_cast<Value *>(I));
   3897       if (It != ValueExprMap.end()) {
   3898         const SCEV *Old = It->second;
   3899 
   3900         // SCEVUnknown for a PHI either means that it has an unrecognized
   3901         // structure, or it's a PHI that's in the progress of being computed
   3902         // by createNodeForPHI.  In the former case, additional loop trip
   3903         // count information isn't going to change anything. In the later
   3904         // case, createNodeForPHI will perform the necessary updates on its
   3905         // own when it gets to that point.
   3906         if (!isa<PHINode>(I) || !isa<SCEVUnknown>(Old)) {
   3907           forgetMemoizedResults(Old);
   3908           ValueExprMap.erase(It);
   3909         }
   3910         if (PHINode *PN = dyn_cast<PHINode>(I))
   3911           ConstantEvolutionLoopExitValue.erase(PN);
   3912       }
   3913 
   3914       PushDefUseChildren(I, Worklist);
   3915     }
   3916   }
   3917 
   3918   // Re-lookup the insert position, since the call to
   3919   // ComputeBackedgeTakenCount above could result in a
   3920   // recusive call to getBackedgeTakenInfo (on a different
   3921   // loop), which would invalidate the iterator computed
   3922   // earlier.
   3923   return BackedgeTakenCounts.find(L)->second = Result;
   3924 }
   3925 
   3926 /// forgetLoop - This method should be called by the client when it has
   3927 /// changed a loop in a way that may effect ScalarEvolution's ability to
   3928 /// compute a trip count, or if the loop is deleted.
   3929 void ScalarEvolution::forgetLoop(const Loop *L) {
   3930   // Drop any stored trip count value.
   3931   BackedgeTakenCounts.erase(L);
   3932 
   3933   // Drop information about expressions based on loop-header PHIs.
   3934   SmallVector<Instruction *, 16> Worklist;
   3935   PushLoopPHIs(L, Worklist);
   3936 
   3937   SmallPtrSet<Instruction *, 8> Visited;
   3938   while (!Worklist.empty()) {
   3939     Instruction *I = Worklist.pop_back_val();
   3940     if (!Visited.insert(I)) continue;
   3941 
   3942     ValueExprMapType::iterator It = ValueExprMap.find(static_cast<Value *>(I));
   3943     if (It != ValueExprMap.end()) {
   3944       forgetMemoizedResults(It->second);
   3945       ValueExprMap.erase(It);
   3946       if (PHINode *PN = dyn_cast<PHINode>(I))
   3947         ConstantEvolutionLoopExitValue.erase(PN);
   3948     }
   3949 
   3950     PushDefUseChildren(I, Worklist);
   3951   }
   3952 
   3953   // Forget all contained loops too, to avoid dangling entries in the
   3954   // ValuesAtScopes map.
   3955   for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I)
   3956     forgetLoop(*I);
   3957 }
   3958 
   3959 /// forgetValue - This method should be called by the client when it has
   3960 /// changed a value in a way that may effect its value, or which may
   3961 /// disconnect it from a def-use chain linking it to a loop.
   3962 void ScalarEvolution::forgetValue(Value *V) {
   3963   Instruction *I = dyn_cast<Instruction>(V);
   3964   if (!I) return;
   3965 
   3966   // Drop information about expressions based on loop-header PHIs.
   3967   SmallVector<Instruction *, 16> Worklist;
   3968   Worklist.push_back(I);
   3969 
   3970   SmallPtrSet<Instruction *, 8> Visited;
   3971   while (!Worklist.empty()) {
   3972     I = Worklist.pop_back_val();
   3973     if (!Visited.insert(I)) continue;
   3974 
   3975     ValueExprMapType::iterator It = ValueExprMap.find(static_cast<Value *>(I));
   3976     if (It != ValueExprMap.end()) {
   3977       forgetMemoizedResults(It->second);
   3978       ValueExprMap.erase(It);
   3979       if (PHINode *PN = dyn_cast<PHINode>(I))
   3980         ConstantEvolutionLoopExitValue.erase(PN);
   3981     }
   3982 
   3983     PushDefUseChildren(I, Worklist);
   3984   }
   3985 }
   3986 
   3987 /// ComputeBackedgeTakenCount - Compute the number of times the backedge
   3988 /// of the specified loop will execute.
   3989 ScalarEvolution::BackedgeTakenInfo
   3990 ScalarEvolution::ComputeBackedgeTakenCount(const Loop *L) {
   3991   SmallVector<BasicBlock *, 8> ExitingBlocks;
   3992   L->getExitingBlocks(ExitingBlocks);
   3993 
   3994   // Examine all exits and pick the most conservative values.
   3995   const SCEV *BECount = getCouldNotCompute();
   3996   const SCEV *MaxBECount = getCouldNotCompute();
   3997   bool CouldNotComputeBECount = false;
   3998   for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) {
   3999     BackedgeTakenInfo NewBTI =
   4000       ComputeBackedgeTakenCountFromExit(L, ExitingBlocks[i]);
   4001 
   4002     if (NewBTI.Exact == getCouldNotCompute()) {
   4003       // We couldn't compute an exact value for this exit, so
   4004       // we won't be able to compute an exact value for the loop.
   4005       CouldNotComputeBECount = true;
   4006       BECount = getCouldNotCompute();
   4007     } else if (!CouldNotComputeBECount) {
   4008       if (BECount == getCouldNotCompute())
   4009         BECount = NewBTI.Exact;
   4010       else
   4011         BECount = getUMinFromMismatchedTypes(BECount, NewBTI.Exact);
   4012     }
   4013     if (MaxBECount == getCouldNotCompute())
   4014       MaxBECount = NewBTI.Max;
   4015     else if (NewBTI.Max != getCouldNotCompute())
   4016       MaxBECount = getUMinFromMismatchedTypes(MaxBECount, NewBTI.Max);
   4017   }
   4018 
   4019   return BackedgeTakenInfo(BECount, MaxBECount);
   4020 }
   4021 
   4022 /// ComputeBackedgeTakenCountFromExit - Compute the number of times the backedge
   4023 /// of the specified loop will execute if it exits via the specified block.
   4024 ScalarEvolution::BackedgeTakenInfo
   4025 ScalarEvolution::ComputeBackedgeTakenCountFromExit(const Loop *L,
   4026                                                    BasicBlock *ExitingBlock) {
   4027 
   4028   // Okay, we've chosen an exiting block.  See what condition causes us to
   4029   // exit at this block.
   4030   //
   4031   // FIXME: we should be able to handle switch instructions (with a single exit)
   4032   BranchInst *ExitBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator());
   4033   if (ExitBr == 0) return getCouldNotCompute();
   4034   assert(ExitBr->isConditional() && "If unconditional, it can't be in loop!");
   4035 
   4036   // At this point, we know we have a conditional branch that determines whether
   4037   // the loop is exited.  However, we don't know if the branch is executed each
   4038   // time through the loop.  If not, then the execution count of the branch will
   4039   // not be equal to the trip count of the loop.
   4040   //
   4041   // Currently we check for this by checking to see if the Exit branch goes to
   4042   // the loop header.  If so, we know it will always execute the same number of
   4043   // times as the loop.  We also handle the case where the exit block *is* the
   4044   // loop header.  This is common for un-rotated loops.
   4045   //
   4046   // If both of those tests fail, walk up the unique predecessor chain to the
   4047   // header, stopping if there is an edge that doesn't exit the loop. If the
   4048   // header is reached, the execution count of the branch will be equal to the
   4049   // trip count of the loop.
   4050   //
   4051   //  More extensive analysis could be done to handle more cases here.
   4052   //
   4053   if (ExitBr->getSuccessor(0) != L->getHeader() &&
   4054       ExitBr->getSuccessor(1) != L->getHeader() &&
   4055       ExitBr->getParent() != L->getHeader()) {
   4056     // The simple checks failed, try climbing the unique predecessor chain
   4057     // up to the header.
   4058     bool Ok = false;
   4059     for (BasicBlock *BB = ExitBr->getParent(); BB; ) {
   4060       BasicBlock *Pred = BB->getUniquePredecessor();
   4061       if (!Pred)
   4062         return getCouldNotCompute();
   4063       TerminatorInst *PredTerm = Pred->getTerminator();
   4064       for (unsigned i = 0, e = PredTerm->getNumSuccessors(); i != e; ++i) {
   4065         BasicBlock *PredSucc = PredTerm->getSuccessor(i);
   4066         if (PredSucc == BB)
   4067           continue;
   4068         // If the predecessor has a successor that isn't BB and isn't
   4069         // outside the loop, assume the worst.
   4070         if (L->contains(PredSucc))
   4071           return getCouldNotCompute();
   4072       }
   4073       if (Pred == L->getHeader()) {
   4074         Ok = true;
   4075         break;
   4076       }
   4077       BB = Pred;
   4078     }
   4079     if (!Ok)
   4080       return getCouldNotCompute();
   4081   }
   4082 
   4083   // Proceed to the next level to examine the exit condition expression.
   4084   return ComputeBackedgeTakenCountFromExitCond(L, ExitBr->getCondition(),
   4085                                                ExitBr->getSuccessor(0),
   4086                                                ExitBr->getSuccessor(1));
   4087 }
   4088 
   4089 /// ComputeBackedgeTakenCountFromExitCond - Compute the number of times the
   4090 /// backedge of the specified loop will execute if its exit condition
   4091 /// were a conditional branch of ExitCond, TBB, and FBB.
   4092 ScalarEvolution::BackedgeTakenInfo
   4093 ScalarEvolution::ComputeBackedgeTakenCountFromExitCond(const Loop *L,
   4094                                                        Value *ExitCond,
   4095                                                        BasicBlock *TBB,
   4096                                                        BasicBlock *FBB) {
   4097   // Check if the controlling expression for this loop is an And or Or.
   4098   if (BinaryOperator *BO = dyn_cast<BinaryOperator>(ExitCond)) {
   4099     if (BO->getOpcode() == Instruction::And) {
   4100       // Recurse on the operands of the and.
   4101       BackedgeTakenInfo BTI0 =
   4102         ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(0), TBB, FBB);
   4103       BackedgeTakenInfo BTI1 =
   4104         ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(1), TBB, FBB);
   4105       const SCEV *BECount = getCouldNotCompute();
   4106       const SCEV *MaxBECount = getCouldNotCompute();
   4107       if (L->contains(TBB)) {
   4108         // Both conditions must be true for the loop to continue executing.
   4109         // Choose the less conservative count.
   4110         if (BTI0.Exact == getCouldNotCompute() ||
   4111             BTI1.Exact == getCouldNotCompute())
   4112           BECount = getCouldNotCompute();
   4113         else
   4114           BECount = getUMinFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
   4115         if (BTI0.Max == getCouldNotCompute())
   4116           MaxBECount = BTI1.Max;
   4117         else if (BTI1.Max == getCouldNotCompute())
   4118           MaxBECount = BTI0.Max;
   4119         else
   4120           MaxBECount = getUMinFromMismatchedTypes(BTI0.Max, BTI1.Max);
   4121       } else {
   4122         // Both conditions must be true at the same time for the loop to exit.
   4123         // For now, be conservative.
   4124         assert(L->contains(FBB) && "Loop block has no successor in loop!");
   4125         if (BTI0.Max == BTI1.Max)
   4126           MaxBECount = BTI0.Max;
   4127         if (BTI0.Exact == BTI1.Exact)
   4128           BECount = BTI0.Exact;
   4129       }
   4130 
   4131       return BackedgeTakenInfo(BECount, MaxBECount);
   4132     }
   4133     if (BO->getOpcode() == Instruction::Or) {
   4134       // Recurse on the operands of the or.
   4135       BackedgeTakenInfo BTI0 =
   4136         ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(0), TBB, FBB);
   4137       BackedgeTakenInfo BTI1 =
   4138         ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(1), TBB, FBB);
   4139       const SCEV *BECount = getCouldNotCompute();
   4140       const SCEV *MaxBECount = getCouldNotCompute();
   4141       if (L->contains(FBB)) {
   4142         // Both conditions must be false for the loop to continue executing.
   4143         // Choose the less conservative count.
   4144         if (BTI0.Exact == getCouldNotCompute() ||
   4145             BTI1.Exact == getCouldNotCompute())
   4146           BECount = getCouldNotCompute();
   4147         else
   4148           BECount = getUMinFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
   4149         if (BTI0.Max == getCouldNotCompute())
   4150           MaxBECount = BTI1.Max;
   4151         else if (BTI1.Max == getCouldNotCompute())
   4152           MaxBECount = BTI0.Max;
   4153         else
   4154           MaxBECount = getUMinFromMismatchedTypes(BTI0.Max, BTI1.Max);
   4155       } else {
   4156         // Both conditions must be false at the same time for the loop to exit.
   4157         // For now, be conservative.
   4158         assert(L->contains(TBB) && "Loop block has no successor in loop!");
   4159         if (BTI0.Max == BTI1.Max)
   4160           MaxBECount = BTI0.Max;
   4161         if (BTI0.Exact == BTI1.Exact)
   4162           BECount = BTI0.Exact;
   4163       }
   4164 
   4165       return BackedgeTakenInfo(BECount, MaxBECount);
   4166     }
   4167   }
   4168 
   4169   // With an icmp, it may be feasible to compute an exact backedge-taken count.
   4170   // Proceed to the next level to examine the icmp.
   4171   if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond))
   4172     return ComputeBackedgeTakenCountFromExitCondICmp(L, ExitCondICmp, TBB, FBB);
   4173 
   4174   // Check for a constant condition. These are normally stripped out by
   4175   // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to
   4176   // preserve the CFG and is temporarily leaving constant conditions
   4177   // in place.
   4178   if (ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) {
   4179     if (L->contains(FBB) == !CI->getZExtValue())
   4180       // The backedge is always taken.
   4181       return getCouldNotCompute();
   4182     else
   4183       // The backedge is never taken.
   4184       return getConstant(CI->getType(), 0);
   4185   }
   4186 
   4187   // If it's not an integer or pointer comparison then compute it the hard way.
   4188   return ComputeBackedgeTakenCountExhaustively(L, ExitCond, !L->contains(TBB));
   4189 }
   4190 
   4191 /// ComputeBackedgeTakenCountFromExitCondICmp - Compute the number of times the
   4192 /// backedge of the specified loop will execute if its exit condition
   4193 /// were a conditional branch of the ICmpInst ExitCond, TBB, and FBB.
   4194 ScalarEvolution::BackedgeTakenInfo
   4195 ScalarEvolution::ComputeBackedgeTakenCountFromExitCondICmp(const Loop *L,
   4196                                                            ICmpInst *ExitCond,
   4197                                                            BasicBlock *TBB,
   4198                                                            BasicBlock *FBB) {
   4199 
   4200   // If the condition was exit on true, convert the condition to exit on false
   4201   ICmpInst::Predicate Cond;
   4202   if (!L->contains(FBB))
   4203     Cond = ExitCond->getPredicate();
   4204   else
   4205     Cond = ExitCond->getInversePredicate();
   4206 
   4207   // Handle common loops like: for (X = "string"; *X; ++X)
   4208   if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0)))
   4209     if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) {
   4210       BackedgeTakenInfo ItCnt =
   4211         ComputeLoadConstantCompareBackedgeTakenCount(LI, RHS, L, Cond);
   4212       if (ItCnt.hasAnyInfo())
   4213         return ItCnt;
   4214     }
   4215 
   4216   const SCEV *LHS = getSCEV(ExitCond->getOperand(0));
   4217   const SCEV *RHS = getSCEV(ExitCond->getOperand(1));
   4218 
   4219   // Try to evaluate any dependencies out of the loop.
   4220   LHS = getSCEVAtScope(LHS, L);
   4221   RHS = getSCEVAtScope(RHS, L);
   4222 
   4223   // At this point, we would like to compute how many iterations of the
   4224   // loop the predicate will return true for these inputs.
   4225   if (isLoopInvariant(LHS, L) && !isLoopInvariant(RHS, L)) {
   4226     // If there is a loop-invariant, force it into the RHS.
   4227     std::swap(LHS, RHS);
   4228     Cond = ICmpInst::getSwappedPredicate(Cond);
   4229   }
   4230 
   4231   // Simplify the operands before analyzing them.
   4232   (void)SimplifyICmpOperands(Cond, LHS, RHS);
   4233 
   4234   // If we have a comparison of a chrec against a constant, try to use value
   4235   // ranges to answer this query.
   4236   if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS))
   4237     if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS))
   4238       if (AddRec->getLoop() == L) {
   4239         // Form the constant range.
   4240         ConstantRange CompRange(
   4241             ICmpInst::makeConstantRange(Cond, RHSC->getValue()->getValue()));
   4242 
   4243         const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this);
   4244         if (!isa<SCEVCouldNotCompute>(Ret)) return Ret;
   4245       }
   4246 
   4247   switch (Cond) {
   4248   case ICmpInst::ICMP_NE: {                     // while (X != Y)
   4249     // Convert to: while (X-Y != 0)
   4250     BackedgeTakenInfo BTI = HowFarToZero(getMinusSCEV(LHS, RHS), L);
   4251     if (BTI.hasAnyInfo()) return BTI;
   4252     break;
   4253   }
   4254   case ICmpInst::ICMP_EQ: {                     // while (X == Y)
   4255     // Convert to: while (X-Y == 0)
   4256     BackedgeTakenInfo BTI = HowFarToNonZero(getMinusSCEV(LHS, RHS), L);
   4257     if (BTI.hasAnyInfo()) return BTI;
   4258     break;
   4259   }
   4260   case ICmpInst::ICMP_SLT: {
   4261     BackedgeTakenInfo BTI = HowManyLessThans(LHS, RHS, L, true);
   4262     if (BTI.hasAnyInfo()) return BTI;
   4263     break;
   4264   }
   4265   case ICmpInst::ICMP_SGT: {
   4266     BackedgeTakenInfo BTI = HowManyLessThans(getNotSCEV(LHS),
   4267                                              getNotSCEV(RHS), L, true);
   4268     if (BTI.hasAnyInfo()) return BTI;
   4269     break;
   4270   }
   4271   case ICmpInst::ICMP_ULT: {
   4272     BackedgeTakenInfo BTI = HowManyLessThans(LHS, RHS, L, false);
   4273     if (BTI.hasAnyInfo()) return BTI;
   4274     break;
   4275   }
   4276   case ICmpInst::ICMP_UGT: {
   4277     BackedgeTakenInfo BTI = HowManyLessThans(getNotSCEV(LHS),
   4278                                              getNotSCEV(RHS), L, false);
   4279     if (BTI.hasAnyInfo()) return BTI;
   4280     break;
   4281   }
   4282   default:
   4283 #if 0
   4284     dbgs() << "ComputeBackedgeTakenCount ";
   4285     if (ExitCond->getOperand(0)->getType()->isUnsigned())
   4286       dbgs() << "[unsigned] ";
   4287     dbgs() << *LHS << "   "
   4288          << Instruction::getOpcodeName(Instruction::ICmp)
   4289          << "   " << *RHS << "\n";
   4290 #endif
   4291     break;
   4292   }
   4293   return
   4294     ComputeBackedgeTakenCountExhaustively(L, ExitCond, !L->contains(TBB));
   4295 }
   4296 
   4297 static ConstantInt *
   4298 EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C,
   4299                                 ScalarEvolution &SE) {
   4300   const SCEV *InVal = SE.getConstant(C);
   4301   const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE);
   4302   assert(isa<SCEVConstant>(Val) &&
   4303          "Evaluation of SCEV at constant didn't fold correctly?");
   4304   return cast<SCEVConstant>(Val)->getValue();
   4305 }
   4306 
   4307 /// GetAddressedElementFromGlobal - Given a global variable with an initializer
   4308 /// and a GEP expression (missing the pointer index) indexing into it, return
   4309 /// the addressed element of the initializer or null if the index expression is
   4310 /// invalid.
   4311 static Constant *
   4312 GetAddressedElementFromGlobal(GlobalVariable *GV,
   4313                               const std::vector<ConstantInt*> &Indices) {
   4314   Constant *Init = GV->getInitializer();
   4315   for (unsigned i = 0, e = Indices.size(); i != e; ++i) {
   4316     uint64_t Idx = Indices[i]->getZExtValue();
   4317     if (ConstantStruct *CS = dyn_cast<ConstantStruct>(Init)) {
   4318       assert(Idx < CS->getNumOperands() && "Bad struct index!");
   4319       Init = cast<Constant>(CS->getOperand(Idx));
   4320     } else if (ConstantArray *CA = dyn_cast<ConstantArray>(Init)) {
   4321       if (Idx >= CA->getNumOperands()) return 0;  // Bogus program
   4322       Init = cast<Constant>(CA->getOperand(Idx));
   4323     } else if (isa<ConstantAggregateZero>(Init)) {
   4324       if (StructType *STy = dyn_cast<StructType>(Init->getType())) {
   4325         assert(Idx < STy->getNumElements() && "Bad struct index!");
   4326         Init = Constant::getNullValue(STy->getElementType(Idx));
   4327       } else if (ArrayType *ATy = dyn_cast<ArrayType>(Init->getType())) {
   4328         if (Idx >= ATy->getNumElements()) return 0;  // Bogus program
   4329         Init = Constant::getNullValue(ATy->getElementType());
   4330       } else {
   4331         llvm_unreachable("Unknown constant aggregate type!");
   4332       }
   4333       return 0;
   4334     } else {
   4335       return 0; // Unknown initializer type
   4336     }
   4337   }
   4338   return Init;
   4339 }
   4340 
   4341 /// ComputeLoadConstantCompareBackedgeTakenCount - Given an exit condition of
   4342 /// 'icmp op load X, cst', try to see if we can compute the backedge
   4343 /// execution count.
   4344 ScalarEvolution::BackedgeTakenInfo
   4345 ScalarEvolution::ComputeLoadConstantCompareBackedgeTakenCount(
   4346                                                 LoadInst *LI,
   4347                                                 Constant *RHS,
   4348                                                 const Loop *L,
   4349                                                 ICmpInst::Predicate predicate) {
   4350   if (LI->isVolatile()) return getCouldNotCompute();
   4351 
   4352   // Check to see if the loaded pointer is a getelementptr of a global.
   4353   // TODO: Use SCEV instead of manually grubbing with GEPs.
   4354   GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0));
   4355   if (!GEP) return getCouldNotCompute();
   4356 
   4357   // Make sure that it is really a constant global we are gepping, with an
   4358   // initializer, and make sure the first IDX is really 0.
   4359   GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0));
   4360   if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() ||
   4361       GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) ||
   4362       !cast<Constant>(GEP->getOperand(1))->isNullValue())
   4363     return getCouldNotCompute();
   4364 
   4365   // Okay, we allow one non-constant index into the GEP instruction.
   4366   Value *VarIdx = 0;
   4367   std::vector<ConstantInt*> Indexes;
   4368   unsigned VarIdxNum = 0;
   4369   for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i)
   4370     if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) {
   4371       Indexes.push_back(CI);
   4372     } else if (!isa<ConstantInt>(GEP->getOperand(i))) {
   4373       if (VarIdx) return getCouldNotCompute();  // Multiple non-constant idx's.
   4374       VarIdx = GEP->getOperand(i);
   4375       VarIdxNum = i-2;
   4376       Indexes.push_back(0);
   4377     }
   4378 
   4379   // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant.
   4380   // Check to see if X is a loop variant variable value now.
   4381   const SCEV *Idx = getSCEV(VarIdx);
   4382   Idx = getSCEVAtScope(Idx, L);
   4383 
   4384   // We can only recognize very limited forms of loop index expressions, in
   4385   // particular, only affine AddRec's like {C1,+,C2}.
   4386   const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx);
   4387   if (!IdxExpr || !IdxExpr->isAffine() || isLoopInvariant(IdxExpr, L) ||
   4388       !isa<SCEVConstant>(IdxExpr->getOperand(0)) ||
   4389       !isa<SCEVConstant>(IdxExpr->getOperand(1)))
   4390     return getCouldNotCompute();
   4391 
   4392   unsigned MaxSteps = MaxBruteForceIterations;
   4393   for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) {
   4394     ConstantInt *ItCst = ConstantInt::get(
   4395                            cast<IntegerType>(IdxExpr->getType()), IterationNum);
   4396     ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this);
   4397 
   4398     // Form the GEP offset.
   4399     Indexes[VarIdxNum] = Val;
   4400 
   4401     Constant *Result = GetAddressedElementFromGlobal(GV, Indexes);
   4402     if (Result == 0) break;  // Cannot compute!
   4403 
   4404     // Evaluate the condition for this iteration.
   4405     Result = ConstantExpr::getICmp(predicate, Result, RHS);
   4406     if (!isa<ConstantInt>(Result)) break;  // Couldn't decide for sure
   4407     if (cast<ConstantInt>(Result)->getValue().isMinValue()) {
   4408 #if 0
   4409       dbgs() << "\n***\n*** Computed loop count " << *ItCst
   4410              << "\n*** From global " << *GV << "*** BB: " << *L->getHeader()
   4411              << "***\n";
   4412 #endif
   4413       ++NumArrayLenItCounts;
   4414       return getConstant(ItCst);   // Found terminating iteration!
   4415     }
   4416   }
   4417   return getCouldNotCompute();
   4418 }
   4419 
   4420 
   4421 /// CanConstantFold - Return true if we can constant fold an instruction of the
   4422 /// specified type, assuming that all operands were constants.
   4423 static bool CanConstantFold(const Instruction *I) {
   4424   if (isa<BinaryOperator>(I) || isa<CmpInst>(I) ||
   4425       isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I))
   4426     return true;
   4427 
   4428   if (const CallInst *CI = dyn_cast<CallInst>(I))
   4429     if (const Function *F = CI->getCalledFunction())
   4430       return canConstantFoldCallTo(F);
   4431   return false;
   4432 }
   4433 
   4434 /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node
   4435 /// in the loop that V is derived from.  We allow arbitrary operations along the
   4436 /// way, but the operands of an operation must either be constants or a value
   4437 /// derived from a constant PHI.  If this expression does not fit with these
   4438 /// constraints, return null.
   4439 static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) {
   4440   // If this is not an instruction, or if this is an instruction outside of the
   4441   // loop, it can't be derived from a loop PHI.
   4442   Instruction *I = dyn_cast<Instruction>(V);
   4443   if (I == 0 || !L->contains(I)) return 0;
   4444 
   4445   if (PHINode *PN = dyn_cast<PHINode>(I)) {
   4446     if (L->getHeader() == I->getParent())
   4447       return PN;
   4448     else
   4449       // We don't currently keep track of the control flow needed to evaluate
   4450       // PHIs, so we cannot handle PHIs inside of loops.
   4451       return 0;
   4452   }
   4453 
   4454   // If we won't be able to constant fold this expression even if the operands
   4455   // are constants, return early.
   4456   if (!CanConstantFold(I)) return 0;
   4457 
   4458   // Otherwise, we can evaluate this instruction if all of its operands are
   4459   // constant or derived from a PHI node themselves.
   4460   PHINode *PHI = 0;
   4461   for (unsigned Op = 0, e = I->getNumOperands(); Op != e; ++Op)
   4462     if (!isa<Constant>(I->getOperand(Op))) {
   4463       PHINode *P = getConstantEvolvingPHI(I->getOperand(Op), L);
   4464       if (P == 0) return 0;  // Not evolving from PHI
   4465       if (PHI == 0)
   4466         PHI = P;
   4467       else if (PHI != P)
   4468         return 0;  // Evolving from multiple different PHIs.
   4469     }
   4470 
   4471   // This is a expression evolving from a constant PHI!
   4472   return PHI;
   4473 }
   4474 
   4475 /// EvaluateExpression - Given an expression that passes the
   4476 /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node
   4477 /// in the loop has the value PHIVal.  If we can't fold this expression for some
   4478 /// reason, return null.
   4479 static Constant *EvaluateExpression(Value *V, Constant *PHIVal,
   4480                                     const TargetData *TD) {
   4481   if (isa<PHINode>(V)) return PHIVal;
   4482   if (Constant *C = dyn_cast<Constant>(V)) return C;
   4483   Instruction *I = cast<Instruction>(V);
   4484 
   4485   std::vector<Constant*> Operands(I->getNumOperands());
   4486 
   4487   for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
   4488     Operands[i] = EvaluateExpression(I->getOperand(i), PHIVal, TD);
   4489     if (Operands[i] == 0) return 0;
   4490   }
   4491 
   4492   if (const CmpInst *CI = dyn_cast<CmpInst>(I))
   4493     return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0],
   4494                                            Operands[1], TD);
   4495   return ConstantFoldInstOperands(I->getOpcode(), I->getType(), Operands, TD);
   4496 }
   4497 
   4498 /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is
   4499 /// in the header of its containing loop, we know the loop executes a
   4500 /// constant number of times, and the PHI node is just a recurrence
   4501 /// involving constants, fold it.
   4502 Constant *
   4503 ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
   4504                                                    const APInt &BEs,
   4505                                                    const Loop *L) {
   4506   DenseMap<PHINode*, Constant*>::const_iterator I =
   4507     ConstantEvolutionLoopExitValue.find(PN);
   4508   if (I != ConstantEvolutionLoopExitValue.end())
   4509     return I->second;
   4510 
   4511   if (BEs.ugt(MaxBruteForceIterations))
   4512     return ConstantEvolutionLoopExitValue[PN] = 0;  // Not going to evaluate it.
   4513 
   4514   Constant *&RetVal = ConstantEvolutionLoopExitValue[PN];
   4515 
   4516   // Since the loop is canonicalized, the PHI node must have two entries.  One
   4517   // entry must be a constant (coming in from outside of the loop), and the
   4518   // second must be derived from the same PHI.
   4519   bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
   4520   Constant *StartCST =
   4521     dyn_cast<Constant>(PN->getIncomingValue(!SecondIsBackedge));
   4522   if (StartCST == 0)
   4523     return RetVal = 0;  // Must be a constant.
   4524 
   4525   Value *BEValue = PN->getIncomingValue(SecondIsBackedge);
   4526   if (getConstantEvolvingPHI(BEValue, L) != PN &&
   4527       !isa<Constant>(BEValue))
   4528     return RetVal = 0;  // Not derived from same PHI.
   4529 
   4530   // Execute the loop symbolically to determine the exit value.
   4531   if (BEs.getActiveBits() >= 32)
   4532     return RetVal = 0; // More than 2^32-1 iterations?? Not doing it!
   4533 
   4534   unsigned NumIterations = BEs.getZExtValue(); // must be in range
   4535   unsigned IterationNum = 0;
   4536   for (Constant *PHIVal = StartCST; ; ++IterationNum) {
   4537     if (IterationNum == NumIterations)
   4538       return RetVal = PHIVal;  // Got exit value!
   4539 
   4540     // Compute the value of the PHI node for the next iteration.
   4541     Constant *NextPHI = EvaluateExpression(BEValue, PHIVal, TD);
   4542     if (NextPHI == PHIVal)
   4543       return RetVal = NextPHI;  // Stopped evolving!
   4544     if (NextPHI == 0)
   4545       return 0;        // Couldn't evaluate!
   4546     PHIVal = NextPHI;
   4547   }
   4548 }
   4549 
   4550 /// ComputeBackedgeTakenCountExhaustively - If the loop is known to execute a
   4551 /// constant number of times (the condition evolves only from constants),
   4552 /// try to evaluate a few iterations of the loop until we get the exit
   4553 /// condition gets a value of ExitWhen (true or false).  If we cannot
   4554 /// evaluate the trip count of the loop, return getCouldNotCompute().
   4555 const SCEV *
   4556 ScalarEvolution::ComputeBackedgeTakenCountExhaustively(const Loop *L,
   4557                                                        Value *Cond,
   4558                                                        bool ExitWhen) {
   4559   PHINode *PN = getConstantEvolvingPHI(Cond, L);
   4560   if (PN == 0) return getCouldNotCompute();
   4561 
   4562   // If the loop is canonicalized, the PHI will have exactly two entries.
   4563   // That's the only form we support here.
   4564   if (PN->getNumIncomingValues() != 2) return getCouldNotCompute();
   4565 
   4566   // One entry must be a constant (coming in from outside of the loop), and the
   4567   // second must be derived from the same PHI.
   4568   bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
   4569   Constant *StartCST =
   4570     dyn_cast<Constant>(PN->getIncomingValue(!SecondIsBackedge));
   4571   if (StartCST == 0) return getCouldNotCompute();  // Must be a constant.
   4572 
   4573   Value *BEValue = PN->getIncomingValue(SecondIsBackedge);
   4574   if (getConstantEvolvingPHI(BEValue, L) != PN &&
   4575       !isa<Constant>(BEValue))
   4576     return getCouldNotCompute();  // Not derived from same PHI.
   4577 
   4578   // Okay, we find a PHI node that defines the trip count of this loop.  Execute
   4579   // the loop symbolically to determine when the condition gets a value of
   4580   // "ExitWhen".
   4581   unsigned IterationNum = 0;
   4582   unsigned MaxIterations = MaxBruteForceIterations;   // Limit analysis.
   4583   for (Constant *PHIVal = StartCST;
   4584        IterationNum != MaxIterations; ++IterationNum) {
   4585     ConstantInt *CondVal =
   4586       dyn_cast_or_null<ConstantInt>(EvaluateExpression(Cond, PHIVal, TD));
   4587 
   4588     // Couldn't symbolically evaluate.
   4589     if (!CondVal) return getCouldNotCompute();
   4590 
   4591     if (CondVal->getValue() == uint64_t(ExitWhen)) {
   4592       ++NumBruteForceTripCountsComputed;
   4593       return getConstant(Type::getInt32Ty(getContext()), IterationNum);
   4594     }
   4595 
   4596     // Compute the value of the PHI node for the next iteration.
   4597     Constant *NextPHI = EvaluateExpression(BEValue, PHIVal, TD);
   4598     if (NextPHI == 0 || NextPHI == PHIVal)
   4599       return getCouldNotCompute();// Couldn't evaluate or not making progress...
   4600     PHIVal = NextPHI;
   4601   }
   4602 
   4603   // Too many iterations were needed to evaluate.
   4604   return getCouldNotCompute();
   4605 }
   4606 
   4607 /// getSCEVAtScope - Return a SCEV expression for the specified value
   4608 /// at the specified scope in the program.  The L value specifies a loop
   4609 /// nest to evaluate the expression at, where null is the top-level or a
   4610 /// specified loop is immediately inside of the loop.
   4611 ///
   4612 /// This method can be used to compute the exit value for a variable defined
   4613 /// in a loop by querying what the value will hold in the parent loop.
   4614 ///
   4615 /// In the case that a relevant loop exit value cannot be computed, the
   4616 /// original value V is returned.
   4617 const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) {
   4618   // Check to see if we've folded this expression at this loop before.
   4619   std::map<const Loop *, const SCEV *> &Values = ValuesAtScopes[V];
   4620   std::pair<std::map<const Loop *, const SCEV *>::iterator, bool> Pair =
   4621     Values.insert(std::make_pair(L, static_cast<const SCEV *>(0)));
   4622   if (!Pair.second)
   4623     return Pair.first->second ? Pair.first->second : V;
   4624 
   4625   // Otherwise compute it.
   4626   const SCEV *C = computeSCEVAtScope(V, L);
   4627   ValuesAtScopes[V][L] = C;
   4628   return C;
   4629 }
   4630 
   4631 const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
   4632   if (isa<SCEVConstant>(V)) return V;
   4633 
   4634   // If this instruction is evolved from a constant-evolving PHI, compute the
   4635   // exit value from the loop without using SCEVs.
   4636   if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) {
   4637     if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) {
   4638       const Loop *LI = (*this->LI)[I->getParent()];
   4639       if (LI && LI->getParentLoop() == L)  // Looking for loop exit value.
   4640         if (PHINode *PN = dyn_cast<PHINode>(I))
   4641           if (PN->getParent() == LI->getHeader()) {
   4642             // Okay, there is no closed form solution for the PHI node.  Check
   4643             // to see if the loop that contains it has a known backedge-taken
   4644             // count.  If so, we may be able to force computation of the exit
   4645             // value.
   4646             const SCEV *BackedgeTakenCount = getBackedgeTakenCount(LI);
   4647             if (const SCEVConstant *BTCC =
   4648                   dyn_cast<SCEVConstant>(BackedgeTakenCount)) {
   4649               // Okay, we know how many times the containing loop executes.  If
   4650               // this is a constant evolving PHI node, get the final value at
   4651               // the specified iteration number.
   4652               Constant *RV = getConstantEvolutionLoopExitValue(PN,
   4653                                                    BTCC->getValue()->getValue(),
   4654                                                                LI);
   4655               if (RV) return getSCEV(RV);
   4656             }
   4657           }
   4658 
   4659       // Okay, this is an expression that we cannot symbolically evaluate
   4660       // into a SCEV.  Check to see if it's possible to symbolically evaluate
   4661       // the arguments into constants, and if so, try to constant propagate the
   4662       // result.  This is particularly useful for computing loop exit values.
   4663       if (CanConstantFold(I)) {
   4664         SmallVector<Constant *, 4> Operands;
   4665         bool MadeImprovement = false;
   4666         for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
   4667           Value *Op = I->getOperand(i);
   4668           if (Constant *C = dyn_cast<Constant>(Op)) {
   4669             Operands.push_back(C);
   4670             continue;
   4671           }
   4672 
   4673           // If any of the operands is non-constant and if they are
   4674           // non-integer and non-pointer, don't even try to analyze them
   4675           // with scev techniques.
   4676           if (!isSCEVable(Op->getType()))
   4677             return V;
   4678 
   4679           const SCEV *OrigV = getSCEV(Op);
   4680           const SCEV *OpV = getSCEVAtScope(OrigV, L);
   4681           MadeImprovement |= OrigV != OpV;
   4682 
   4683           Constant *C = 0;
   4684           if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(OpV))
   4685             C = SC->getValue();
   4686           if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(OpV))
   4687             C = dyn_cast<Constant>(SU->getValue());
   4688           if (!C) return V;
   4689           if (C->getType() != Op->getType())
   4690             C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
   4691                                                               Op->getType(),
   4692                                                               false),
   4693                                       C, Op->getType());
   4694           Operands.push_back(C);
   4695         }
   4696 
   4697         // Check to see if getSCEVAtScope actually made an improvement.
   4698         if (MadeImprovement) {
   4699           Constant *C = 0;
   4700           if (const CmpInst *CI = dyn_cast<CmpInst>(I))
   4701             C = ConstantFoldCompareInstOperands(CI->getPredicate(),
   4702                                                 Operands[0], Operands[1], TD);
   4703           else
   4704             C = ConstantFoldInstOperands(I->getOpcode(), I->getType(),
   4705                                          Operands, TD);
   4706           if (!C) return V;
   4707           return getSCEV(C);
   4708         }
   4709       }
   4710     }
   4711 
   4712     // This is some other type of SCEVUnknown, just return it.
   4713     return V;
   4714   }
   4715 
   4716   if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) {
   4717     // Avoid performing the look-up in the common case where the specified
   4718     // expression has no loop-variant portions.
   4719     for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) {
   4720       const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
   4721       if (OpAtScope != Comm->getOperand(i)) {
   4722         // Okay, at least one of these operands is loop variant but might be
   4723         // foldable.  Build a new instance of the folded commutative expression.
   4724         SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(),
   4725                                             Comm->op_begin()+i);
   4726         NewOps.push_back(OpAtScope);
   4727 
   4728         for (++i; i != e; ++i) {
   4729           OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
   4730           NewOps.push_back(OpAtScope);
   4731         }
   4732         if (isa<SCEVAddExpr>(Comm))
   4733           return getAddExpr(NewOps);
   4734         if (isa<SCEVMulExpr>(Comm))
   4735           return getMulExpr(NewOps);
   4736         if (isa<SCEVSMaxExpr>(Comm))
   4737           return getSMaxExpr(NewOps);
   4738         if (isa<SCEVUMaxExpr>(Comm))
   4739           return getUMaxExpr(NewOps);
   4740         llvm_unreachable("Unknown commutative SCEV type!");
   4741       }
   4742     }
   4743     // If we got here, all operands are loop invariant.
   4744     return Comm;
   4745   }
   4746 
   4747   if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) {
   4748     const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L);
   4749     const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L);
   4750     if (LHS == Div->getLHS() && RHS == Div->getRHS())
   4751       return Div;   // must be loop invariant
   4752     return getUDivExpr(LHS, RHS);
   4753   }
   4754 
   4755   // If this is a loop recurrence for a loop that does not contain L, then we
   4756   // are dealing with the final value computed by the loop.
   4757   if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) {
   4758     // First, attempt to evaluate each operand.
   4759     // Avoid performing the look-up in the common case where the specified
   4760     // expression has no loop-variant portions.
   4761     for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
   4762       const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L);
   4763       if (OpAtScope == AddRec->getOperand(i))
   4764         continue;
   4765 
   4766       // Okay, at least one of these operands is loop variant but might be
   4767       // foldable.  Build a new instance of the folded commutative expression.
   4768       SmallVector<const SCEV *, 8> NewOps(AddRec->op_begin(),
   4769                                           AddRec->op_begin()+i);
   4770       NewOps.push_back(OpAtScope);
   4771       for (++i; i != e; ++i)
   4772         NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L));
   4773 
   4774       const SCEV *FoldedRec =
   4775         getAddRecExpr(NewOps, AddRec->getLoop(),
   4776                       AddRec->getNoWrapFlags(SCEV::FlagNW));
   4777       AddRec = dyn_cast<SCEVAddRecExpr>(FoldedRec);
   4778       // The addrec may be folded to a nonrecurrence, for example, if the
   4779       // induction variable is multiplied by zero after constant folding. Go
   4780       // ahead and return the folded value.
   4781       if (!AddRec)
   4782         return FoldedRec;
   4783       break;
   4784     }
   4785 
   4786     // If the scope is outside the addrec's loop, evaluate it by using the
   4787     // loop exit value of the addrec.
   4788     if (!AddRec->getLoop()->contains(L)) {
   4789       // To evaluate this recurrence, we need to know how many times the AddRec
   4790       // loop iterates.  Compute this now.
   4791       const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop());
   4792       if (BackedgeTakenCount == getCouldNotCompute()) return AddRec;
   4793 
   4794       // Then, evaluate the AddRec.
   4795       return AddRec->evaluateAtIteration(BackedgeTakenCount, *this);
   4796     }
   4797 
   4798     return AddRec;
   4799   }
   4800 
   4801   if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) {
   4802     const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
   4803     if (Op == Cast->getOperand())
   4804       return Cast;  // must be loop invariant
   4805     return getZeroExtendExpr(Op, Cast->getType());
   4806   }
   4807 
   4808   if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) {
   4809     const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
   4810     if (Op == Cast->getOperand())
   4811       return Cast;  // must be loop invariant
   4812     return getSignExtendExpr(Op, Cast->getType());
   4813   }
   4814 
   4815   if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) {
   4816     const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
   4817     if (Op == Cast->getOperand())
   4818       return Cast;  // must be loop invariant
   4819     return getTruncateExpr(Op, Cast->getType());
   4820   }
   4821 
   4822   llvm_unreachable("Unknown SCEV type!");
   4823   return 0;
   4824 }
   4825 
   4826 /// getSCEVAtScope - This is a convenience function which does
   4827 /// getSCEVAtScope(getSCEV(V), L).
   4828 const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) {
   4829   return getSCEVAtScope(getSCEV(V), L);
   4830 }
   4831 
   4832 /// SolveLinEquationWithOverflow - Finds the minimum unsigned root of the
   4833 /// following equation:
   4834 ///
   4835 ///     A * X = B (mod N)
   4836 ///
   4837 /// where N = 2^BW and BW is the common bit width of A and B. The signedness of
   4838 /// A and B isn't important.
   4839 ///
   4840 /// If the equation does not have a solution, SCEVCouldNotCompute is returned.
   4841 static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const APInt &B,
   4842                                                ScalarEvolution &SE) {
   4843   uint32_t BW = A.getBitWidth();
   4844   assert(BW == B.getBitWidth() && "Bit widths must be the same.");
   4845   assert(A != 0 && "A must be non-zero.");
   4846 
   4847   // 1. D = gcd(A, N)
   4848   //
   4849   // The gcd of A and N may have only one prime factor: 2. The number of
   4850   // trailing zeros in A is its multiplicity
   4851   uint32_t Mult2 = A.countTrailingZeros();
   4852   // D = 2^Mult2
   4853 
   4854   // 2. Check if B is divisible by D.
   4855   //
   4856   // B is divisible by D if and only if the multiplicity of prime factor 2 for B
   4857   // is not less than multiplicity of this prime factor for D.
   4858   if (B.countTrailingZeros() < Mult2)
   4859     return SE.getCouldNotCompute();
   4860 
   4861   // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic
   4862   // modulo (N / D).
   4863   //
   4864   // (N / D) may need BW+1 bits in its representation.  Hence, we'll use this
   4865   // bit width during computations.
   4866   APInt AD = A.lshr(Mult2).zext(BW + 1);  // AD = A / D
   4867   APInt Mod(BW + 1, 0);
   4868   Mod.setBit(BW - Mult2);  // Mod = N / D
   4869   APInt I = AD.multiplicativeInverse(Mod);
   4870 
   4871   // 4. Compute the minimum unsigned root of the equation:
   4872   // I * (B / D) mod (N / D)
   4873   APInt Result = (I * B.lshr(Mult2).zext(BW + 1)).urem(Mod);
   4874 
   4875   // The result is guaranteed to be less than 2^BW so we may truncate it to BW
   4876   // bits.
   4877   return SE.getConstant(Result.trunc(BW));
   4878 }
   4879 
   4880 /// SolveQuadraticEquation - Find the roots of the quadratic equation for the
   4881 /// given quadratic chrec {L,+,M,+,N}.  This returns either the two roots (which
   4882 /// might be the same) or two SCEVCouldNotCompute objects.
   4883 ///
   4884 static std::pair<const SCEV *,const SCEV *>
   4885 SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) {
   4886   assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!");
   4887   const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0));
   4888   const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1));
   4889   const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2));
   4890 
   4891   // We currently can only solve this if the coefficients are constants.
   4892   if (!LC || !MC || !NC) {
   4893     const SCEV *CNC = SE.getCouldNotCompute();
   4894     return std::make_pair(CNC, CNC);
   4895   }
   4896 
   4897   uint32_t BitWidth = LC->getValue()->getValue().getBitWidth();
   4898   const APInt &L = LC->getValue()->getValue();
   4899   const APInt &M = MC->getValue()->getValue();
   4900   const APInt &N = NC->getValue()->getValue();
   4901   APInt Two(BitWidth, 2);
   4902   APInt Four(BitWidth, 4);
   4903 
   4904   {
   4905     using namespace APIntOps;
   4906     const APInt& C = L;
   4907     // Convert from chrec coefficients to polynomial coefficients AX^2+BX+C
   4908     // The B coefficient is M-N/2
   4909     APInt B(M);
   4910     B -= sdiv(N,Two);
   4911 
   4912     // The A coefficient is N/2
   4913     APInt A(N.sdiv(Two));
   4914 
   4915     // Compute the B^2-4ac term.
   4916     APInt SqrtTerm(B);
   4917     SqrtTerm *= B;
   4918     SqrtTerm -= Four * (A * C);
   4919 
   4920     // Compute sqrt(B^2-4ac). This is guaranteed to be the nearest
   4921     // integer value or else APInt::sqrt() will assert.
   4922     APInt SqrtVal(SqrtTerm.sqrt());
   4923 
   4924     // Compute the two solutions for the quadratic formula.
   4925     // The divisions must be performed as signed divisions.
   4926     APInt NegB(-B);
   4927     APInt TwoA( A << 1 );
   4928     if (TwoA.isMinValue()) {
   4929       const SCEV *CNC = SE.getCouldNotCompute();
   4930       return std::make_pair(CNC, CNC);
   4931     }
   4932 
   4933     LLVMContext &Context = SE.getContext();
   4934 
   4935     ConstantInt *Solution1 =
   4936       ConstantInt::get(Context, (NegB + SqrtVal).sdiv(TwoA));
   4937     ConstantInt *Solution2 =
   4938       ConstantInt::get(Context, (NegB - SqrtVal).sdiv(TwoA));
   4939 
   4940     return std::make_pair(SE.getConstant(Solution1),
   4941                           SE.getConstant(Solution2));
   4942     } // end APIntOps namespace
   4943 }
   4944 
   4945 /// HowFarToZero - Return the number of times a backedge comparing the specified
   4946 /// value to zero will execute.  If not computable, return CouldNotCompute.
   4947 ///
   4948 /// This is only used for loops with a "x != y" exit test. The exit condition is
   4949 /// now expressed as a single expression, V = x-y. So the exit test is
   4950 /// effectively V != 0.  We know and take advantage of the fact that this
   4951 /// expression only being used in a comparison by zero context.
   4952 ScalarEvolution::BackedgeTakenInfo
   4953 ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L) {
   4954   // If the value is a constant
   4955   if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
   4956     // If the value is already zero, the branch will execute zero times.
   4957     if (C->getValue()->isZero()) return C;
   4958     return getCouldNotCompute();  // Otherwise it will loop infinitely.
   4959   }
   4960 
   4961   const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V);
   4962   if (!AddRec || AddRec->getLoop() != L)
   4963     return getCouldNotCompute();
   4964 
   4965   // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of
   4966   // the quadratic equation to solve it.
   4967   if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) {
   4968     std::pair<const SCEV *,const SCEV *> Roots =
   4969       SolveQuadraticEquation(AddRec, *this);
   4970     const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first);
   4971     const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second);
   4972     if (R1 && R2) {
   4973 #if 0
   4974       dbgs() << "HFTZ: " << *V << " - sol#1: " << *R1
   4975              << "  sol#2: " << *R2 << "\n";
   4976 #endif
   4977       // Pick the smallest positive root value.
   4978       if (ConstantInt *CB =
   4979           dyn_cast<ConstantInt>(ConstantExpr::getICmp(CmpInst::ICMP_ULT,
   4980                                                       R1->getValue(),
   4981                                                       R2->getValue()))) {
   4982         if (CB->getZExtValue() == false)
   4983           std::swap(R1, R2);   // R1 is the minimum root now.
   4984 
   4985         // We can only use this value if the chrec ends up with an exact zero
   4986         // value at this index.  When solving for "X*X != 5", for example, we
   4987         // should not accept a root of 2.
   4988         const SCEV *Val = AddRec->evaluateAtIteration(R1, *this);
   4989         if (Val->isZero())
   4990           return R1;  // We found a quadratic root!
   4991       }
   4992     }
   4993     return getCouldNotCompute();
   4994   }
   4995 
   4996   // Otherwise we can only handle this if it is affine.
   4997   if (!AddRec->isAffine())
   4998     return getCouldNotCompute();
   4999 
   5000   // If this is an affine expression, the execution count of this branch is
   5001   // the minimum unsigned root of the following equation:
   5002   //
   5003   //     Start + Step*N = 0 (mod 2^BW)
   5004   //
   5005   // equivalent to:
   5006   //
   5007   //             Step*N = -Start (mod 2^BW)
   5008   //
   5009   // where BW is the common bit width of Start and Step.
   5010 
   5011   // Get the initial value for the loop.
   5012   const SCEV *Start = getSCEVAtScope(AddRec->getStart(), L->getParentLoop());
   5013   const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1), L->getParentLoop());
   5014 
   5015   // For now we handle only constant steps.
   5016   //
   5017   // TODO: Handle a nonconstant Step given AddRec<NUW>. If the
   5018   // AddRec is NUW, then (in an unsigned sense) it cannot be counting up to wrap
   5019   // to 0, it must be counting down to equal 0. Consequently, N = Start / -Step.
   5020   // We have not yet seen any such cases.
   5021   const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step);
   5022   if (StepC == 0)
   5023     return getCouldNotCompute();
   5024 
   5025   // For positive steps (counting up until unsigned overflow):
   5026   //   N = -Start/Step (as unsigned)
   5027   // For negative steps (counting down to zero):
   5028   //   N = Start/-Step
   5029   // First compute the unsigned distance from zero in the direction of Step.
   5030   bool CountDown = StepC->getValue()->getValue().isNegative();
   5031   const SCEV *Distance = CountDown ? Start : getNegativeSCEV(Start);
   5032 
   5033   // Handle unitary steps, which cannot wraparound.
   5034   // 1*N = -Start; -1*N = Start (mod 2^BW), so:
   5035   //   N = Distance (as unsigned)
   5036   if (StepC->getValue()->equalsInt(1) || StepC->getValue()->isAllOnesValue())
   5037     return Distance;
   5038 
   5039   // If the recurrence is known not to wraparound, unsigned divide computes the
   5040   // back edge count. We know that the value will either become zero (and thus
   5041   // the loop terminates), that the loop will terminate through some other exit
   5042   // condition first, or that the loop has undefined behavior.  This means
   5043   // we can't "miss" the exit value, even with nonunit stride.
   5044   //
   5045   // FIXME: Prove that loops always exhibits *acceptable* undefined
   5046   // behavior. Loops must exhibit defined behavior until a wrapped value is
   5047   // actually used. So the trip count computed by udiv could be smaller than the
   5048   // number of well-defined iterations.
   5049   if (AddRec->getNoWrapFlags(SCEV::FlagNW))
   5050     // FIXME: We really want an "isexact" bit for udiv.
   5051     return getUDivExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step);
   5052 
   5053   // Then, try to solve the above equation provided that Start is constant.
   5054   if (const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start))
   5055     return SolveLinEquationWithOverflow(StepC->getValue()->getValue(),
   5056                                         -StartC->getValue()->getValue(),
   5057                                         *this);
   5058   return getCouldNotCompute();
   5059 }
   5060 
   5061 /// HowFarToNonZero - Return the number of times a backedge checking the
   5062 /// specified value for nonzero will execute.  If not computable, return
   5063 /// CouldNotCompute
   5064 ScalarEvolution::BackedgeTakenInfo
   5065 ScalarEvolution::HowFarToNonZero(const SCEV *V, const Loop *L) {
   5066   // Loops that look like: while (X == 0) are very strange indeed.  We don't
   5067   // handle them yet except for the trivial case.  This could be expanded in the
   5068   // future as needed.
   5069 
   5070   // If the value is a constant, check to see if it is known to be non-zero
   5071   // already.  If so, the backedge will execute zero times.
   5072   if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
   5073     if (!C->getValue()->isNullValue())
   5074       return getConstant(C->getType(), 0);
   5075     return getCouldNotCompute();  // Otherwise it will loop infinitely.
   5076   }
   5077 
   5078   // We could implement others, but I really doubt anyone writes loops like
   5079   // this, and if they did, they would already be constant folded.
   5080   return getCouldNotCompute();
   5081 }
   5082 
   5083 /// getPredecessorWithUniqueSuccessorForBB - Return a predecessor of BB
   5084 /// (which may not be an immediate predecessor) which has exactly one
   5085 /// successor from which BB is reachable, or null if no such block is
   5086 /// found.
   5087 ///
   5088 std::pair<BasicBlock *, BasicBlock *>
   5089 ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) {
   5090   // If the block has a unique predecessor, then there is no path from the
   5091   // predecessor to the block that does not go through the direct edge
   5092   // from the predecessor to the block.
   5093   if (BasicBlock *Pred = BB->getSinglePredecessor())
   5094     return std::make_pair(Pred, BB);
   5095 
   5096   // A loop's header is defined to be a block that dominates the loop.
   5097   // If the header has a unique predecessor outside the loop, it must be
   5098   // a block that has exactly one successor that can reach the loop.
   5099   if (Loop *L = LI->getLoopFor(BB))
   5100     return std::make_pair(L->getLoopPredecessor(), L->getHeader());
   5101 
   5102   return std::pair<BasicBlock *, BasicBlock *>();
   5103 }
   5104 
   5105 /// HasSameValue - SCEV structural equivalence is usually sufficient for
   5106 /// testing whether two expressions are equal, however for the purposes of
   5107 /// looking for a condition guarding a loop, it can be useful to be a little
   5108 /// more general, since a front-end may have replicated the controlling
   5109 /// expression.
   5110 ///
   5111 static bool HasSameValue(const SCEV *A, const SCEV *B) {
   5112   // Quick check to see if they are the same SCEV.
   5113   if (A == B) return true;
   5114 
   5115   // Otherwise, if they're both SCEVUnknown, it's possible that they hold
   5116   // two different instructions with the same value. Check for this case.
   5117   if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A))
   5118     if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B))
   5119       if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue()))
   5120         if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue()))
   5121           if (AI->isIdenticalTo(BI) && !AI->mayReadFromMemory())
   5122             return true;
   5123 
   5124   // Otherwise assume they may have a different value.
   5125   return false;
   5126 }
   5127 
   5128 /// SimplifyICmpOperands - Simplify LHS and RHS in a comparison with
   5129 /// predicate Pred. Return true iff any changes were made.
   5130 ///
   5131 bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred,
   5132                                            const SCEV *&LHS, const SCEV *&RHS) {
   5133   bool Changed = false;
   5134 
   5135   // Canonicalize a constant to the right side.
   5136   if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
   5137     // Check for both operands constant.
   5138     if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
   5139       if (ConstantExpr::getICmp(Pred,
   5140                                 LHSC->getValue(),
   5141                                 RHSC->getValue())->isNullValue())
   5142         goto trivially_false;
   5143       else
   5144         goto trivially_true;
   5145     }
   5146     // Otherwise swap the operands to put the constant on the right.
   5147     std::swap(LHS, RHS);
   5148     Pred = ICmpInst::getSwappedPredicate(Pred);
   5149     Changed = true;
   5150   }
   5151 
   5152   // If we're comparing an addrec with a value which is loop-invariant in the
   5153   // addrec's loop, put the addrec on the left. Also make a dominance check,
   5154   // as both operands could be addrecs loop-invariant in each other's loop.
   5155   if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) {
   5156     const Loop *L = AR->getLoop();
   5157     if (isLoopInvariant(LHS, L) && properlyDominates(LHS, L->getHeader())) {
   5158       std::swap(LHS, RHS);
   5159       Pred = ICmpInst::getSwappedPredicate(Pred);
   5160       Changed = true;
   5161     }
   5162   }
   5163 
   5164   // If there's a constant operand, canonicalize comparisons with boundary
   5165   // cases, and canonicalize *-or-equal comparisons to regular comparisons.
   5166   if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) {
   5167     const APInt &RA = RC->getValue()->getValue();
   5168     switch (Pred) {
   5169     default: llvm_unreachable("Unexpected ICmpInst::Predicate value!");
   5170     case ICmpInst::ICMP_EQ:
   5171     case ICmpInst::ICMP_NE:
   5172       break;
   5173     case ICmpInst::ICMP_UGE:
   5174       if ((RA - 1).isMinValue()) {
   5175         Pred = ICmpInst::ICMP_NE;
   5176         RHS = getConstant(RA - 1);
   5177         Changed = true;
   5178         break;
   5179       }
   5180       if (RA.isMaxValue()) {
   5181         Pred = ICmpInst::ICMP_EQ;
   5182         Changed = true;
   5183         break;
   5184       }
   5185       if (RA.isMinValue()) goto trivially_true;
   5186 
   5187       Pred = ICmpInst::ICMP_UGT;
   5188       RHS = getConstant(RA - 1);
   5189       Changed = true;
   5190       break;
   5191     case ICmpInst::ICMP_ULE:
   5192       if ((RA + 1).isMaxValue()) {
   5193         Pred = ICmpInst::ICMP_NE;
   5194         RHS = getConstant(RA + 1);
   5195         Changed = true;
   5196         break;
   5197       }
   5198       if (RA.isMinValue()) {
   5199         Pred = ICmpInst::ICMP_EQ;
   5200         Changed = true;
   5201         break;
   5202       }
   5203       if (RA.isMaxValue()) goto trivially_true;
   5204 
   5205       Pred = ICmpInst::ICMP_ULT;
   5206       RHS = getConstant(RA + 1);
   5207       Changed = true;
   5208       break;
   5209     case ICmpInst::ICMP_SGE:
   5210       if ((RA - 1).isMinSignedValue()) {
   5211         Pred = ICmpInst::ICMP_NE;
   5212         RHS = getConstant(RA - 1);
   5213         Changed = true;
   5214         break;
   5215       }
   5216       if (RA.isMaxSignedValue()) {
   5217         Pred = ICmpInst::ICMP_EQ;
   5218         Changed = true;
   5219         break;
   5220       }
   5221       if (RA.isMinSignedValue()) goto trivially_true;
   5222 
   5223       Pred = ICmpInst::ICMP_SGT;
   5224       RHS = getConstant(RA - 1);
   5225       Changed = true;
   5226       break;
   5227     case ICmpInst::ICMP_SLE:
   5228       if ((RA + 1).isMaxSignedValue()) {
   5229         Pred = ICmpInst::ICMP_NE;
   5230         RHS = getConstant(RA + 1);
   5231         Changed = true;
   5232         break;
   5233       }
   5234       if (RA.isMinSignedValue()) {
   5235         Pred = ICmpInst::ICMP_EQ;
   5236         Changed = true;
   5237         break;
   5238       }
   5239       if (RA.isMaxSignedValue()) goto trivially_true;
   5240 
   5241       Pred = ICmpInst::ICMP_SLT;
   5242       RHS = getConstant(RA + 1);
   5243       Changed = true;
   5244       break;
   5245     case ICmpInst::ICMP_UGT:
   5246       if (RA.isMinValue()) {
   5247         Pred = ICmpInst::ICMP_NE;
   5248         Changed = true;
   5249         break;
   5250       }
   5251       if ((RA + 1).isMaxValue()) {
   5252         Pred = ICmpInst::ICMP_EQ;
   5253         RHS = getConstant(RA + 1);
   5254         Changed = true;
   5255         break;
   5256       }
   5257       if (RA.isMaxValue()) goto trivially_false;
   5258       break;
   5259     case ICmpInst::ICMP_ULT:
   5260       if (RA.isMaxValue()) {
   5261         Pred = ICmpInst::ICMP_NE;
   5262         Changed = true;
   5263         break;
   5264       }
   5265       if ((RA - 1).isMinValue()) {
   5266         Pred = ICmpInst::ICMP_EQ;
   5267         RHS = getConstant(RA - 1);
   5268         Changed = true;
   5269         break;
   5270       }
   5271       if (RA.isMinValue()) goto trivially_false;
   5272       break;
   5273     case ICmpInst::ICMP_SGT:
   5274       if (RA.isMinSignedValue()) {
   5275         Pred = ICmpInst::ICMP_NE;
   5276         Changed = true;
   5277         break;
   5278       }
   5279       if ((RA + 1).isMaxSignedValue()) {
   5280         Pred = ICmpInst::ICMP_EQ;
   5281         RHS = getConstant(RA + 1);
   5282         Changed = true;
   5283         break;
   5284       }
   5285       if (RA.isMaxSignedValue()) goto trivially_false;
   5286       break;
   5287     case ICmpInst::ICMP_SLT:
   5288       if (RA.isMaxSignedValue()) {
   5289         Pred = ICmpInst::ICMP_NE;
   5290         Changed = true;
   5291         break;
   5292       }
   5293       if ((RA - 1).isMinSignedValue()) {
   5294        Pred = ICmpInst::ICMP_EQ;
   5295        RHS = getConstant(RA - 1);
   5296         Changed = true;
   5297        break;
   5298       }
   5299       if (RA.isMinSignedValue()) goto trivially_false;
   5300       break;
   5301     }
   5302   }
   5303 
   5304   // Check for obvious equality.
   5305   if (HasSameValue(LHS, RHS)) {
   5306     if (ICmpInst::isTrueWhenEqual(Pred))
   5307       goto trivially_true;
   5308     if (ICmpInst::isFalseWhenEqual(Pred))
   5309       goto trivially_false;
   5310   }
   5311 
   5312   // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by
   5313   // adding or subtracting 1 from one of the operands.
   5314   switch (Pred) {
   5315   case ICmpInst::ICMP_SLE:
   5316     if (!getSignedRange(RHS).getSignedMax().isMaxSignedValue()) {
   5317       RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS,
   5318                        SCEV::FlagNSW);
   5319       Pred = ICmpInst::ICMP_SLT;
   5320       Changed = true;
   5321     } else if (!getSignedRange(LHS).getSignedMin().isMinSignedValue()) {
   5322       LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS,
   5323                        SCEV::FlagNSW);
   5324       Pred = ICmpInst::ICMP_SLT;
   5325       Changed = true;
   5326     }
   5327     break;
   5328   case ICmpInst::ICMP_SGE:
   5329     if (!getSignedRange(RHS).getSignedMin().isMinSignedValue()) {
   5330       RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS,
   5331                        SCEV::FlagNSW);
   5332       Pred = ICmpInst::ICMP_SGT;
   5333       Changed = true;
   5334     } else if (!getSignedRange(LHS).getSignedMax().isMaxSignedValue()) {
   5335       LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS,
   5336                        SCEV::FlagNSW);
   5337       Pred = ICmpInst::ICMP_SGT;
   5338       Changed = true;
   5339     }
   5340     break;
   5341   case ICmpInst::ICMP_ULE:
   5342     if (!getUnsignedRange(RHS).getUnsignedMax().isMaxValue()) {
   5343       RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS,
   5344                        SCEV::FlagNUW);
   5345       Pred = ICmpInst::ICMP_ULT;
   5346       Changed = true;
   5347     } else if (!getUnsignedRange(LHS).getUnsignedMin().isMinValue()) {
   5348       LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS,
   5349                        SCEV::FlagNUW);
   5350       Pred = ICmpInst::ICMP_ULT;
   5351       Changed = true;
   5352     }
   5353     break;
   5354   case ICmpInst::ICMP_UGE:
   5355     if (!getUnsignedRange(RHS).getUnsignedMin().isMinValue()) {
   5356       RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS,
   5357                        SCEV::FlagNUW);
   5358       Pred = ICmpInst::ICMP_UGT;
   5359       Changed = true;
   5360     } else if (!getUnsignedRange(LHS).getUnsignedMax().isMaxValue()) {
   5361       LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS,
   5362                        SCEV::FlagNUW);
   5363       Pred = ICmpInst::ICMP_UGT;
   5364       Changed = true;
   5365     }
   5366     break;
   5367   default:
   5368     break;
   5369   }
   5370 
   5371   // TODO: More simplifications are possible here.
   5372 
   5373   return Changed;
   5374 
   5375 trivially_true:
   5376   // Return 0 == 0.
   5377   LHS = RHS = getConstant(ConstantInt::getFalse(getContext()));
   5378   Pred = ICmpInst::ICMP_EQ;
   5379   return true;
   5380 
   5381 trivially_false:
   5382   // Return 0 != 0.
   5383   LHS = RHS = getConstant(ConstantInt::getFalse(getContext()));
   5384   Pred = ICmpInst::ICMP_NE;
   5385   return true;
   5386 }
   5387 
   5388 bool ScalarEvolution::isKnownNegative(const SCEV *S) {
   5389   return getSignedRange(S).getSignedMax().isNegative();
   5390 }
   5391 
   5392 bool ScalarEvolution::isKnownPositive(const SCEV *S) {
   5393   return getSignedRange(S).getSignedMin().isStrictlyPositive();
   5394 }
   5395 
   5396 bool ScalarEvolution::isKnownNonNegative(const SCEV *S) {
   5397   return !getSignedRange(S).getSignedMin().isNegative();
   5398 }
   5399 
   5400 bool ScalarEvolution::isKnownNonPositive(const SCEV *S) {
   5401   return !getSignedRange(S).getSignedMax().isStrictlyPositive();
   5402 }
   5403 
   5404 bool ScalarEvolution::isKnownNonZero(const SCEV *S) {
   5405   return isKnownNegative(S) || isKnownPositive(S);
   5406 }
   5407 
   5408 bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred,
   5409                                        const SCEV *LHS, const SCEV *RHS) {
   5410   // Canonicalize the inputs first.
   5411   (void)SimplifyICmpOperands(Pred, LHS, RHS);
   5412 
   5413   // If LHS or RHS is an addrec, check to see if the condition is true in
   5414   // every iteration of the loop.
   5415   if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS))
   5416     if (isLoopEntryGuardedByCond(
   5417           AR->getLoop(), Pred, AR->getStart(), RHS) &&
   5418         isLoopBackedgeGuardedByCond(
   5419           AR->getLoop(), Pred, AR->getPostIncExpr(*this), RHS))
   5420       return true;
   5421   if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS))
   5422     if (isLoopEntryGuardedByCond(
   5423           AR->getLoop(), Pred, LHS, AR->getStart()) &&
   5424         isLoopBackedgeGuardedByCond(
   5425           AR->getLoop(), Pred, LHS, AR->getPostIncExpr(*this)))
   5426       return true;
   5427 
   5428   // Otherwise see what can be done with known constant ranges.
   5429   return isKnownPredicateWithRanges(Pred, LHS, RHS);
   5430 }
   5431 
   5432 bool
   5433 ScalarEvolution::isKnownPredicateWithRanges(ICmpInst::Predicate Pred,
   5434                                             const SCEV *LHS, const SCEV *RHS) {
   5435   if (HasSameValue(LHS, RHS))
   5436     return ICmpInst::isTrueWhenEqual(Pred);
   5437 
   5438   // This code is split out from isKnownPredicate because it is called from
   5439   // within isLoopEntryGuardedByCond.
   5440   switch (Pred) {
   5441   default:
   5442     llvm_unreachable("Unexpected ICmpInst::Predicate value!");
   5443     break;
   5444   case ICmpInst::ICMP_SGT:
   5445     Pred = ICmpInst::ICMP_SLT;
   5446     std::swap(LHS, RHS);
   5447   case ICmpInst::ICMP_SLT: {
   5448     ConstantRange LHSRange = getSignedRange(LHS);
   5449     ConstantRange RHSRange = getSignedRange(RHS);
   5450     if (LHSRange.getSignedMax().slt(RHSRange.getSignedMin()))
   5451       return true;
   5452     if (LHSRange.getSignedMin().sge(RHSRange.getSignedMax()))
   5453       return false;
   5454     break;
   5455   }
   5456   case ICmpInst::ICMP_SGE:
   5457     Pred = ICmpInst::ICMP_SLE;
   5458     std::swap(LHS, RHS);
   5459   case ICmpInst::ICMP_SLE: {
   5460     ConstantRange LHSRange = getSignedRange(LHS);
   5461     ConstantRange RHSRange = getSignedRange(RHS);
   5462     if (LHSRange.getSignedMax().sle(RHSRange.getSignedMin()))
   5463       return true;
   5464     if (LHSRange.getSignedMin().sgt(RHSRange.getSignedMax()))
   5465       return false;
   5466     break;
   5467   }
   5468   case ICmpInst::ICMP_UGT:
   5469     Pred = ICmpInst::ICMP_ULT;
   5470     std::swap(LHS, RHS);
   5471   case ICmpInst::ICMP_ULT: {
   5472     ConstantRange LHSRange = getUnsignedRange(LHS);
   5473     ConstantRange RHSRange = getUnsignedRange(RHS);
   5474     if (LHSRange.getUnsignedMax().ult(RHSRange.getUnsignedMin()))
   5475       return true;
   5476     if (LHSRange.getUnsignedMin().uge(RHSRange.getUnsignedMax()))
   5477       return false;
   5478     break;
   5479   }
   5480   case ICmpInst::ICMP_UGE:
   5481     Pred = ICmpInst::ICMP_ULE;
   5482     std::swap(LHS, RHS);
   5483   case ICmpInst::ICMP_ULE: {
   5484     ConstantRange LHSRange = getUnsignedRange(LHS);
   5485     ConstantRange RHSRange = getUnsignedRange(RHS);
   5486     if (LHSRange.getUnsignedMax().ule(RHSRange.getUnsignedMin()))
   5487       return true;
   5488     if (LHSRange.getUnsignedMin().ugt(RHSRange.getUnsignedMax()))
   5489       return false;
   5490     break;
   5491   }
   5492   case ICmpInst::ICMP_NE: {
   5493     if (getUnsignedRange(LHS).intersectWith(getUnsignedRange(RHS)).isEmptySet())
   5494       return true;
   5495     if (getSignedRange(LHS).intersectWith(getSignedRange(RHS)).isEmptySet())
   5496       return true;
   5497 
   5498     const SCEV *Diff = getMinusSCEV(LHS, RHS);
   5499     if (isKnownNonZero(Diff))
   5500       return true;
   5501     break;
   5502   }
   5503   case ICmpInst::ICMP_EQ:
   5504     // The check at the top of the function catches the case where
   5505     // the values are known to be equal.
   5506     break;
   5507   }
   5508   return false;
   5509 }
   5510 
   5511 /// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is
   5512 /// protected by a conditional between LHS and RHS.  This is used to
   5513 /// to eliminate casts.
   5514 bool
   5515 ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L,
   5516                                              ICmpInst::Predicate Pred,
   5517                                              const SCEV *LHS, const SCEV *RHS) {
   5518   // Interpret a null as meaning no loop, where there is obviously no guard
   5519   // (interprocedural conditions notwithstanding).
   5520   if (!L) return true;
   5521 
   5522   BasicBlock *Latch = L->getLoopLatch();
   5523   if (!Latch)
   5524     return false;
   5525 
   5526   BranchInst *LoopContinuePredicate =
   5527     dyn_cast<BranchInst>(Latch->getTerminator());
   5528   if (!LoopContinuePredicate ||
   5529       LoopContinuePredicate->isUnconditional())
   5530     return false;
   5531 
   5532   return isImpliedCond(Pred, LHS, RHS,
   5533                        LoopContinuePredicate->getCondition(),
   5534                        LoopContinuePredicate->getSuccessor(0) != L->getHeader());
   5535 }
   5536 
   5537 /// isLoopEntryGuardedByCond - Test whether entry to the loop is protected
   5538 /// by a conditional between LHS and RHS.  This is used to help avoid max
   5539 /// expressions in loop trip counts, and to eliminate casts.
   5540 bool
   5541 ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L,
   5542                                           ICmpInst::Predicate Pred,
   5543                                           const SCEV *LHS, const SCEV *RHS) {
   5544   // Interpret a null as meaning no loop, where there is obviously no guard
   5545   // (interprocedural conditions notwithstanding).
   5546   if (!L) return false;
   5547 
   5548   // Starting at the loop predecessor, climb up the predecessor chain, as long
   5549   // as there are predecessors that can be found that have unique successors
   5550   // leading to the original header.
   5551   for (std::pair<BasicBlock *, BasicBlock *>
   5552          Pair(L->getLoopPredecessor(), L->getHeader());
   5553        Pair.first;
   5554        Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) {
   5555 
   5556     BranchInst *LoopEntryPredicate =
   5557       dyn_cast<BranchInst>(Pair.first->getTerminator());
   5558     if (!LoopEntryPredicate ||
   5559         LoopEntryPredicate->isUnconditional())
   5560       continue;
   5561 
   5562     if (isImpliedCond(Pred, LHS, RHS,
   5563                       LoopEntryPredicate->getCondition(),
   5564                       LoopEntryPredicate->getSuccessor(0) != Pair.second))
   5565       return true;
   5566   }
   5567 
   5568   return false;
   5569 }
   5570 
   5571 /// isImpliedCond - Test whether the condition described by Pred, LHS,
   5572 /// and RHS is true whenever the given Cond value evaluates to true.
   5573 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred,
   5574                                     const SCEV *LHS, const SCEV *RHS,
   5575                                     Value *FoundCondValue,
   5576                                     bool Inverse) {
   5577   // Recursively handle And and Or conditions.
   5578   if (BinaryOperator *BO = dyn_cast<BinaryOperator>(FoundCondValue)) {
   5579     if (BO->getOpcode() == Instruction::And) {
   5580       if (!Inverse)
   5581         return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) ||
   5582                isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse);
   5583     } else if (BO->getOpcode() == Instruction::Or) {
   5584       if (Inverse)
   5585         return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) ||
   5586                isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse);
   5587     }
   5588   }
   5589 
   5590   ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue);
   5591   if (!ICI) return false;
   5592 
   5593   // Bail if the ICmp's operands' types are wider than the needed type
   5594   // before attempting to call getSCEV on them. This avoids infinite
   5595   // recursion, since the analysis of widening casts can require loop
   5596   // exit condition information for overflow checking, which would
   5597   // lead back here.
   5598   if (getTypeSizeInBits(LHS->getType()) <
   5599       getTypeSizeInBits(ICI->getOperand(0)->getType()))
   5600     return false;
   5601 
   5602   // Now that we found a conditional branch that dominates the loop, check to
   5603   // see if it is the comparison we are looking for.
   5604   ICmpInst::Predicate FoundPred;
   5605   if (Inverse)
   5606     FoundPred = ICI->getInversePredicate();
   5607   else
   5608     FoundPred = ICI->getPredicate();
   5609 
   5610   const SCEV *FoundLHS = getSCEV(ICI->getOperand(0));
   5611   const SCEV *FoundRHS = getSCEV(ICI->getOperand(1));
   5612 
   5613   // Balance the types. The case where FoundLHS' type is wider than
   5614   // LHS' type is checked for above.
   5615   if (getTypeSizeInBits(LHS->getType()) >
   5616       getTypeSizeInBits(FoundLHS->getType())) {
   5617     if (CmpInst::isSigned(Pred)) {
   5618       FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType());
   5619       FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType());
   5620     } else {
   5621       FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType());
   5622       FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType());
   5623     }
   5624   }
   5625 
   5626   // Canonicalize the query to match the way instcombine will have
   5627   // canonicalized the comparison.
   5628   if (SimplifyICmpOperands(Pred, LHS, RHS))
   5629     if (LHS == RHS)
   5630       return CmpInst::isTrueWhenEqual(Pred);
   5631   if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS))
   5632     if (FoundLHS == FoundRHS)
   5633       return CmpInst::isFalseWhenEqual(Pred);
   5634 
   5635   // Check to see if we can make the LHS or RHS match.
   5636   if (LHS == FoundRHS || RHS == FoundLHS) {
   5637     if (isa<SCEVConstant>(RHS)) {
   5638       std::swap(FoundLHS, FoundRHS);
   5639       FoundPred = ICmpInst::getSwappedPredicate(FoundPred);
   5640     } else {
   5641       std::swap(LHS, RHS);
   5642       Pred = ICmpInst::getSwappedPredicate(Pred);
   5643     }
   5644   }
   5645 
   5646   // Check whether the found predicate is the same as the desired predicate.
   5647   if (FoundPred == Pred)
   5648     return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS);
   5649 
   5650   // Check whether swapping the found predicate makes it the same as the
   5651   // desired predicate.
   5652   if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) {
   5653     if (isa<SCEVConstant>(RHS))
   5654       return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS);
   5655     else
   5656       return isImpliedCondOperands(ICmpInst::getSwappedPredicate(Pred),
   5657                                    RHS, LHS, FoundLHS, FoundRHS);
   5658   }
   5659 
   5660   // Check whether the actual condition is beyond sufficient.
   5661   if (FoundPred == ICmpInst::ICMP_EQ)
   5662     if (ICmpInst::isTrueWhenEqual(Pred))
   5663       if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS))
   5664         return true;
   5665   if (Pred == ICmpInst::ICMP_NE)
   5666     if (!ICmpInst::isTrueWhenEqual(FoundPred))
   5667       if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS))
   5668         return true;
   5669 
   5670   // Otherwise assume the worst.
   5671   return false;
   5672 }
   5673 
   5674 /// isImpliedCondOperands - Test whether the condition described by Pred,
   5675 /// LHS, and RHS is true whenever the condition described by Pred, FoundLHS,
   5676 /// and FoundRHS is true.
   5677 bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred,
   5678                                             const SCEV *LHS, const SCEV *RHS,
   5679                                             const SCEV *FoundLHS,
   5680                                             const SCEV *FoundRHS) {
   5681   return isImpliedCondOperandsHelper(Pred, LHS, RHS,
   5682                                      FoundLHS, FoundRHS) ||
   5683          // ~x < ~y --> x > y
   5684          isImpliedCondOperandsHelper(Pred, LHS, RHS,
   5685                                      getNotSCEV(FoundRHS),
   5686                                      getNotSCEV(FoundLHS));
   5687 }
   5688 
   5689 /// isImpliedCondOperandsHelper - Test whether the condition described by
   5690 /// Pred, LHS, and RHS is true whenever the condition described by Pred,
   5691 /// FoundLHS, and FoundRHS is true.
   5692 bool
   5693 ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred,
   5694                                              const SCEV *LHS, const SCEV *RHS,
   5695                                              const SCEV *FoundLHS,
   5696                                              const SCEV *FoundRHS) {
   5697   switch (Pred) {
   5698   default: llvm_unreachable("Unexpected ICmpInst::Predicate value!");
   5699   case ICmpInst::ICMP_EQ:
   5700   case ICmpInst::ICMP_NE:
   5701     if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS))
   5702       return true;
   5703     break;
   5704   case ICmpInst::ICMP_SLT:
   5705   case ICmpInst::ICMP_SLE:
   5706     if (isKnownPredicateWithRanges(ICmpInst::ICMP_SLE, LHS, FoundLHS) &&
   5707         isKnownPredicateWithRanges(ICmpInst::ICMP_SGE, RHS, FoundRHS))
   5708       return true;
   5709     break;
   5710   case ICmpInst::ICMP_SGT:
   5711   case ICmpInst::ICMP_SGE:
   5712     if (isKnownPredicateWithRanges(ICmpInst::ICMP_SGE, LHS, FoundLHS) &&
   5713         isKnownPredicateWithRanges(ICmpInst::ICMP_SLE, RHS, FoundRHS))
   5714       return true;
   5715     break;
   5716   case ICmpInst::ICMP_ULT:
   5717   case ICmpInst::ICMP_ULE:
   5718     if (isKnownPredicateWithRanges(ICmpInst::ICMP_ULE, LHS, FoundLHS) &&
   5719         isKnownPredicateWithRanges(ICmpInst::ICMP_UGE, RHS, FoundRHS))
   5720       return true;
   5721     break;
   5722   case ICmpInst::ICMP_UGT:
   5723   case ICmpInst::ICMP_UGE:
   5724     if (isKnownPredicateWithRanges(ICmpInst::ICMP_UGE, LHS, FoundLHS) &&
   5725         isKnownPredicateWithRanges(ICmpInst::ICMP_ULE, RHS, FoundRHS))
   5726       return true;
   5727     break;
   5728   }
   5729 
   5730   return false;
   5731 }
   5732 
   5733 /// getBECount - Subtract the end and start values and divide by the step,
   5734 /// rounding up, to get the number of times the backedge is executed. Return
   5735 /// CouldNotCompute if an intermediate computation overflows.
   5736 const SCEV *ScalarEvolution::getBECount(const SCEV *Start,
   5737                                         const SCEV *End,
   5738                                         const SCEV *Step,
   5739                                         bool NoWrap) {
   5740   assert(!isKnownNegative(Step) &&
   5741          "This code doesn't handle negative strides yet!");
   5742 
   5743   Type *Ty = Start->getType();
   5744 
   5745   // When Start == End, we have an exact BECount == 0. Short-circuit this case
   5746   // here because SCEV may not be able to determine that the unsigned division
   5747   // after rounding is zero.
   5748   if (Start == End)
   5749     return getConstant(Ty, 0);
   5750 
   5751   const SCEV *NegOne = getConstant(Ty, (uint64_t)-1);
   5752   const SCEV *Diff = getMinusSCEV(End, Start);
   5753   const SCEV *RoundUp = getAddExpr(Step, NegOne);
   5754 
   5755   // Add an adjustment to the difference between End and Start so that
   5756   // the division will effectively round up.
   5757   const SCEV *Add = getAddExpr(Diff, RoundUp);
   5758 
   5759   if (!NoWrap) {
   5760     // Check Add for unsigned overflow.
   5761     // TODO: More sophisticated things could be done here.
   5762     Type *WideTy = IntegerType::get(getContext(),
   5763                                           getTypeSizeInBits(Ty) + 1);
   5764     const SCEV *EDiff = getZeroExtendExpr(Diff, WideTy);
   5765     const SCEV *ERoundUp = getZeroExtendExpr(RoundUp, WideTy);
   5766     const SCEV *OperandExtendedAdd = getAddExpr(EDiff, ERoundUp);
   5767     if (getZeroExtendExpr(Add, WideTy) != OperandExtendedAdd)
   5768       return getCouldNotCompute();
   5769   }
   5770 
   5771   return getUDivExpr(Add, Step);
   5772 }
   5773 
   5774 /// HowManyLessThans - Return the number of times a backedge containing the
   5775 /// specified less-than comparison will execute.  If not computable, return
   5776 /// CouldNotCompute.
   5777 ScalarEvolution::BackedgeTakenInfo
   5778 ScalarEvolution::HowManyLessThans(const SCEV *LHS, const SCEV *RHS,
   5779                                   const Loop *L, bool isSigned) {
   5780   // Only handle:  "ADDREC < LoopInvariant".
   5781   if (!isLoopInvariant(RHS, L)) return getCouldNotCompute();
   5782 
   5783   const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS);
   5784   if (!AddRec || AddRec->getLoop() != L)
   5785     return getCouldNotCompute();
   5786 
   5787   // Check to see if we have a flag which makes analysis easy.
   5788   bool NoWrap = isSigned ? AddRec->getNoWrapFlags(SCEV::FlagNSW) :
   5789                            AddRec->getNoWrapFlags(SCEV::FlagNUW);
   5790 
   5791   if (AddRec->isAffine()) {
   5792     unsigned BitWidth = getTypeSizeInBits(AddRec->getType());
   5793     const SCEV *Step = AddRec->getStepRecurrence(*this);
   5794 
   5795     if (Step->isZero())
   5796       return getCouldNotCompute();
   5797     if (Step->isOne()) {
   5798       // With unit stride, the iteration never steps past the limit value.
   5799     } else if (isKnownPositive(Step)) {
   5800       // Test whether a positive iteration can step past the limit
   5801       // value and past the maximum value for its type in a single step.
   5802       // Note that it's not sufficient to check NoWrap here, because even
   5803       // though the value after a wrap is undefined, it's not undefined
   5804       // behavior, so if wrap does occur, the loop could either terminate or
   5805       // loop infinitely, but in either case, the loop is guaranteed to
   5806       // iterate at least until the iteration where the wrapping occurs.
   5807       const SCEV *One = getConstant(Step->getType(), 1);
   5808       if (isSigned) {
   5809         APInt Max = APInt::getSignedMaxValue(BitWidth);
   5810         if ((Max - getSignedRange(getMinusSCEV(Step, One)).getSignedMax())
   5811               .slt(getSignedRange(RHS).getSignedMax()))
   5812           return getCouldNotCompute();
   5813       } else {
   5814         APInt Max = APInt::getMaxValue(BitWidth);
   5815         if ((Max - getUnsignedRange(getMinusSCEV(Step, One)).getUnsignedMax())
   5816               .ult(getUnsignedRange(RHS).getUnsignedMax()))
   5817           return getCouldNotCompute();
   5818       }
   5819     } else
   5820       // TODO: Handle negative strides here and below.
   5821       return getCouldNotCompute();
   5822 
   5823     // We know the LHS is of the form {n,+,s} and the RHS is some loop-invariant
   5824     // m.  So, we count the number of iterations in which {n,+,s} < m is true.
   5825     // Note that we cannot simply return max(m-n,0)/s because it's not safe to
   5826     // treat m-n as signed nor unsigned due to overflow possibility.
   5827 
   5828     // First, we get the value of the LHS in the first iteration: n
   5829     const SCEV *Start = AddRec->getOperand(0);
   5830 
   5831     // Determine the minimum constant start value.
   5832     const SCEV *MinStart = getConstant(isSigned ?
   5833       getSignedRange(Start).getSignedMin() :
   5834       getUnsignedRange(Start).getUnsignedMin());
   5835 
   5836     // If we know that the condition is true in order to enter the loop,
   5837     // then we know that it will run exactly (m-n)/s times. Otherwise, we
   5838     // only know that it will execute (max(m,n)-n)/s times. In both cases,
   5839     // the division must round up.
   5840     const SCEV *End = RHS;
   5841     if (!isLoopEntryGuardedByCond(L,
   5842                                   isSigned ? ICmpInst::ICMP_SLT :
   5843                                              ICmpInst::ICMP_ULT,
   5844                                   getMinusSCEV(Start, Step), RHS))
   5845       End = isSigned ? getSMaxExpr(RHS, Start)
   5846                      : getUMaxExpr(RHS, Start);
   5847 
   5848     // Determine the maximum constant end value.
   5849     const SCEV *MaxEnd = getConstant(isSigned ?
   5850       getSignedRange(End).getSignedMax() :
   5851       getUnsignedRange(End).getUnsignedMax());
   5852 
   5853     // If MaxEnd is within a step of the maximum integer value in its type,
   5854     // adjust it down to the minimum value which would produce the same effect.
   5855     // This allows the subsequent ceiling division of (N+(step-1))/step to
   5856     // compute the correct value.
   5857     const SCEV *StepMinusOne = getMinusSCEV(Step,
   5858                                             getConstant(Step->getType(), 1));
   5859     MaxEnd = isSigned ?
   5860       getSMinExpr(MaxEnd,
   5861                   getMinusSCEV(getConstant(APInt::getSignedMaxValue(BitWidth)),
   5862                                StepMinusOne)) :
   5863       getUMinExpr(MaxEnd,
   5864                   getMinusSCEV(getConstant(APInt::getMaxValue(BitWidth)),
   5865                                StepMinusOne));
   5866 
   5867     // Finally, we subtract these two values and divide, rounding up, to get
   5868     // the number of times the backedge is executed.
   5869     const SCEV *BECount = getBECount(Start, End, Step, NoWrap);
   5870 
   5871     // The maximum backedge count is similar, except using the minimum start
   5872     // value and the maximum end value.
   5873     // If we already have an exact constant BECount, use it instead.
   5874     const SCEV *MaxBECount = isa<SCEVConstant>(BECount) ? BECount
   5875       : getBECount(MinStart, MaxEnd, Step, NoWrap);
   5876 
   5877     // If the stride is nonconstant, and NoWrap == true, then
   5878     // getBECount(MinStart, MaxEnd) may not compute. This would result in an
   5879     // exact BECount and invalid MaxBECount, which should be avoided to catch
   5880     // more optimization opportunities.
   5881     if (isa<SCEVCouldNotCompute>(MaxBECount))
   5882       MaxBECount = BECount;
   5883 
   5884     return BackedgeTakenInfo(BECount, MaxBECount);
   5885   }
   5886 
   5887   return getCouldNotCompute();
   5888 }
   5889 
   5890 /// getNumIterationsInRange - Return the number of iterations of this loop that
   5891 /// produce values in the specified constant range.  Another way of looking at
   5892 /// this is that it returns the first iteration number where the value is not in
   5893 /// the condition, thus computing the exit count. If the iteration count can't
   5894 /// be computed, an instance of SCEVCouldNotCompute is returned.
   5895 const SCEV *SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range,
   5896                                                     ScalarEvolution &SE) const {
   5897   if (Range.isFullSet())  // Infinite loop.
   5898     return SE.getCouldNotCompute();
   5899 
   5900   // If the start is a non-zero constant, shift the range to simplify things.
   5901   if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart()))
   5902     if (!SC->getValue()->isZero()) {
   5903       SmallVector<const SCEV *, 4> Operands(op_begin(), op_end());
   5904       Operands[0] = SE.getConstant(SC->getType(), 0);
   5905       const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop(),
   5906                                              getNoWrapFlags(FlagNW));
   5907       if (const SCEVAddRecExpr *ShiftedAddRec =
   5908             dyn_cast<SCEVAddRecExpr>(Shifted))
   5909         return ShiftedAddRec->getNumIterationsInRange(
   5910                            Range.subtract(SC->getValue()->getValue()), SE);
   5911       // This is strange and shouldn't happen.
   5912       return SE.getCouldNotCompute();
   5913     }
   5914 
   5915   // The only time we can solve this is when we have all constant indices.
   5916   // Otherwise, we cannot determine the overflow conditions.
   5917   for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
   5918     if (!isa<SCEVConstant>(getOperand(i)))
   5919       return SE.getCouldNotCompute();
   5920 
   5921 
   5922   // Okay at this point we know that all elements of the chrec are constants and
   5923   // that the start element is zero.
   5924 
   5925   // First check to see if the range contains zero.  If not, the first
   5926   // iteration exits.
   5927   unsigned BitWidth = SE.getTypeSizeInBits(getType());
   5928   if (!Range.contains(APInt(BitWidth, 0)))
   5929     return SE.getConstant(getType(), 0);
   5930 
   5931   if (isAffine()) {
   5932     // If this is an affine expression then we have this situation:
   5933     //   Solve {0,+,A} in Range  ===  Ax in Range
   5934 
   5935     // We know that zero is in the range.  If A is positive then we know that
   5936     // the upper value of the range must be the first possible exit value.
   5937     // If A is negative then the lower of the range is the last possible loop
   5938     // value.  Also note that we already checked for a full range.
   5939     APInt One(BitWidth,1);
   5940     APInt A     = cast<SCEVConstant>(getOperand(1))->getValue()->getValue();
   5941     APInt End = A.sge(One) ? (Range.getUpper() - One) : Range.getLower();
   5942 
   5943     // The exit value should be (End+A)/A.
   5944     APInt ExitVal = (End + A).udiv(A);
   5945     ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal);
   5946 
   5947     // Evaluate at the exit value.  If we really did fall out of the valid
   5948     // range, then we computed our trip count, otherwise wrap around or other
   5949     // things must have happened.
   5950     ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE);
   5951     if (Range.contains(Val->getValue()))
   5952       return SE.getCouldNotCompute();  // Something strange happened
   5953 
   5954     // Ensure that the previous value is in the range.  This is a sanity check.
   5955     assert(Range.contains(
   5956            EvaluateConstantChrecAtConstant(this,
   5957            ConstantInt::get(SE.getContext(), ExitVal - One), SE)->getValue()) &&
   5958            "Linear scev computation is off in a bad way!");
   5959     return SE.getConstant(ExitValue);
   5960   } else if (isQuadratic()) {
   5961     // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of the
   5962     // quadratic equation to solve it.  To do this, we must frame our problem in
   5963     // terms of figuring out when zero is crossed, instead of when
   5964     // Range.getUpper() is crossed.
   5965     SmallVector<const SCEV *, 4> NewOps(op_begin(), op_end());
   5966     NewOps[0] = SE.getNegativeSCEV(SE.getConstant(Range.getUpper()));
   5967     const SCEV *NewAddRec = SE.getAddRecExpr(NewOps, getLoop(),
   5968                                              // getNoWrapFlags(FlagNW)
   5969                                              FlagAnyWrap);
   5970 
   5971     // Next, solve the constructed addrec
   5972     std::pair<const SCEV *,const SCEV *> Roots =
   5973       SolveQuadraticEquation(cast<SCEVAddRecExpr>(NewAddRec), SE);
   5974     const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first);
   5975     const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second);
   5976     if (R1) {
   5977       // Pick the smallest positive root value.
   5978       if (ConstantInt *CB =
   5979           dyn_cast<ConstantInt>(ConstantExpr::getICmp(ICmpInst::ICMP_ULT,
   5980                          R1->getValue(), R2->getValue()))) {
   5981         if (CB->getZExtValue() == false)
   5982           std::swap(R1, R2);   // R1 is the minimum root now.
   5983 
   5984         // Make sure the root is not off by one.  The returned iteration should
   5985         // not be in the range, but the previous one should be.  When solving
   5986         // for "X*X < 5", for example, we should not return a root of 2.
   5987         ConstantInt *R1Val = EvaluateConstantChrecAtConstant(this,
   5988                                                              R1->getValue(),
   5989                                                              SE);
   5990         if (Range.contains(R1Val->getValue())) {
   5991           // The next iteration must be out of the range...
   5992           ConstantInt *NextVal =
   5993                 ConstantInt::get(SE.getContext(), R1->getValue()->getValue()+1);
   5994 
   5995           R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE);
   5996           if (!Range.contains(R1Val->getValue()))
   5997             return SE.getConstant(NextVal);
   5998           return SE.getCouldNotCompute();  // Something strange happened
   5999         }
   6000 
   6001         // If R1 was not in the range, then it is a good return value.  Make
   6002         // sure that R1-1 WAS in the range though, just in case.
   6003         ConstantInt *NextVal =
   6004                ConstantInt::get(SE.getContext(), R1->getValue()->getValue()-1);
   6005         R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE);
   6006         if (Range.contains(R1Val->getValue()))
   6007           return R1;
   6008         return SE.getCouldNotCompute();  // Something strange happened
   6009       }
   6010     }
   6011   }
   6012 
   6013   return SE.getCouldNotCompute();
   6014 }
   6015 
   6016 
   6017 
   6018 //===----------------------------------------------------------------------===//
   6019 //                   SCEVCallbackVH Class Implementation
   6020 //===----------------------------------------------------------------------===//
   6021 
   6022 void ScalarEvolution::SCEVCallbackVH::deleted() {
   6023   assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!");
   6024   if (PHINode *PN = dyn_cast<PHINode>(getValPtr()))
   6025     SE->ConstantEvolutionLoopExitValue.erase(PN);
   6026   SE->ValueExprMap.erase(getValPtr());
   6027   // this now dangles!
   6028 }
   6029 
   6030 void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) {
   6031   assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!");
   6032 
   6033   // Forget all the expressions associated with users of the old value,
   6034   // so that future queries will recompute the expressions using the new
   6035   // value.
   6036   Value *Old = getValPtr();
   6037   SmallVector<User *, 16> Worklist;
   6038   SmallPtrSet<User *, 8> Visited;
   6039   for (Value::use_iterator UI = Old->use_begin(), UE = Old->use_end();
   6040        UI != UE; ++UI)
   6041     Worklist.push_back(*UI);
   6042   while (!Worklist.empty()) {
   6043     User *U = Worklist.pop_back_val();
   6044     // Deleting the Old value will cause this to dangle. Postpone
   6045     // that until everything else is done.
   6046     if (U == Old)
   6047       continue;
   6048     if (!Visited.insert(U))
   6049       continue;
   6050     if (PHINode *PN = dyn_cast<PHINode>(U))
   6051       SE->ConstantEvolutionLoopExitValue.erase(PN);
   6052     SE->ValueExprMap.erase(U);
   6053     for (Value::use_iterator UI = U->use_begin(), UE = U->use_end();
   6054          UI != UE; ++UI)
   6055       Worklist.push_back(*UI);
   6056   }
   6057   // Delete the Old value.
   6058   if (PHINode *PN = dyn_cast<PHINode>(Old))
   6059     SE->ConstantEvolutionLoopExitValue.erase(PN);
   6060   SE->ValueExprMap.erase(Old);
   6061   // this now dangles!
   6062 }
   6063 
   6064 ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se)
   6065   : CallbackVH(V), SE(se) {}
   6066 
   6067 //===----------------------------------------------------------------------===//
   6068 //                   ScalarEvolution Class Implementation
   6069 //===----------------------------------------------------------------------===//
   6070 
   6071 ScalarEvolution::ScalarEvolution()
   6072   : FunctionPass(ID), FirstUnknown(0) {
   6073   initializeScalarEvolutionPass(*PassRegistry::getPassRegistry());
   6074 }
   6075 
   6076 bool ScalarEvolution::runOnFunction(Function &F) {
   6077   this->F = &F;
   6078   LI = &getAnalysis<LoopInfo>();
   6079   TD = getAnalysisIfAvailable<TargetData>();
   6080   DT = &getAnalysis<DominatorTree>();
   6081   return false;
   6082 }
   6083 
   6084 void ScalarEvolution::releaseMemory() {
   6085   // Iterate through all the SCEVUnknown instances and call their
   6086   // destructors, so that they release their references to their values.
   6087   for (SCEVUnknown *U = FirstUnknown; U; U = U->Next)
   6088     U->~SCEVUnknown();
   6089   FirstUnknown = 0;
   6090 
   6091   ValueExprMap.clear();
   6092   BackedgeTakenCounts.clear();
   6093   ConstantEvolutionLoopExitValue.clear();
   6094   ValuesAtScopes.clear();
   6095   LoopDispositions.clear();
   6096   BlockDispositions.clear();
   6097   UnsignedRanges.clear();
   6098   SignedRanges.clear();
   6099   UniqueSCEVs.clear();
   6100   SCEVAllocator.Reset();
   6101 }
   6102 
   6103 void ScalarEvolution::getAnalysisUsage(AnalysisUsage &AU) const {
   6104   AU.setPreservesAll();
   6105   AU.addRequiredTransitive<LoopInfo>();
   6106   AU.addRequiredTransitive<DominatorTree>();
   6107 }
   6108 
   6109 bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) {
   6110   return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L));
   6111 }
   6112 
   6113 static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE,
   6114                           const Loop *L) {
   6115   // Print all inner loops first
   6116   for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I)
   6117     PrintLoopInfo(OS, SE, *I);
   6118 
   6119   OS << "Loop ";
   6120   WriteAsOperand(OS, L->getHeader(), /*PrintType=*/false);
   6121   OS << ": ";
   6122 
   6123   SmallVector<BasicBlock *, 8> ExitBlocks;
   6124   L->getExitBlocks(ExitBlocks);
   6125   if (ExitBlocks.size() != 1)
   6126     OS << "<multiple exits> ";
   6127 
   6128   if (SE->hasLoopInvariantBackedgeTakenCount(L)) {
   6129     OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L);
   6130   } else {
   6131     OS << "Unpredictable backedge-taken count. ";
   6132   }
   6133 
   6134   OS << "\n"
   6135         "Loop ";
   6136   WriteAsOperand(OS, L->getHeader(), /*PrintType=*/false);
   6137   OS << ": ";
   6138 
   6139   if (!isa<SCEVCouldNotCompute>(SE->getMaxBackedgeTakenCount(L))) {
   6140     OS << "max backedge-taken count is " << *SE->getMaxBackedgeTakenCount(L);
   6141   } else {
   6142     OS << "Unpredictable max backedge-taken count. ";
   6143   }
   6144 
   6145   OS << "\n";
   6146 }
   6147 
   6148 void ScalarEvolution::print(raw_ostream &OS, const Module *) const {
   6149   // ScalarEvolution's implementation of the print method is to print
   6150   // out SCEV values of all instructions that are interesting. Doing
   6151   // this potentially causes it to create new SCEV objects though,
   6152   // which technically conflicts with the const qualifier. This isn't
   6153   // observable from outside the class though, so casting away the
   6154   // const isn't dangerous.
   6155   ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this);
   6156 
   6157   OS << "Classifying expressions for: ";
   6158   WriteAsOperand(OS, F, /*PrintType=*/false);
   6159   OS << "\n";
   6160   for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I)
   6161     if (isSCEVable(I->getType()) && !isa<CmpInst>(*I)) {
   6162       OS << *I << '\n';
   6163       OS << "  -->  ";
   6164       const SCEV *SV = SE.getSCEV(&*I);
   6165       SV->print(OS);
   6166 
   6167       const Loop *L = LI->getLoopFor((*I).getParent());
   6168 
   6169       const SCEV *AtUse = SE.getSCEVAtScope(SV, L);
   6170       if (AtUse != SV) {
   6171         OS << "  -->  ";
   6172         AtUse->print(OS);
   6173       }
   6174 
   6175       if (L) {
   6176         OS << "\t\t" "Exits: ";
   6177         const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop());
   6178         if (!SE.isLoopInvariant(ExitValue, L)) {
   6179           OS << "<<Unknown>>";
   6180         } else {
   6181           OS << *ExitValue;
   6182         }
   6183       }
   6184 
   6185       OS << "\n";
   6186     }
   6187 
   6188   OS << "Determining loop execution counts for: ";
   6189   WriteAsOperand(OS, F, /*PrintType=*/false);
   6190   OS << "\n";
   6191   for (LoopInfo::iterator I = LI->begin(), E = LI->end(); I != E; ++I)
   6192     PrintLoopInfo(OS, &SE, *I);
   6193 }
   6194 
   6195 ScalarEvolution::LoopDisposition
   6196 ScalarEvolution::getLoopDisposition(const SCEV *S, const Loop *L) {
   6197   std::map<const Loop *, LoopDisposition> &Values = LoopDispositions[S];
   6198   std::pair<std::map<const Loop *, LoopDisposition>::iterator, bool> Pair =
   6199     Values.insert(std::make_pair(L, LoopVariant));
   6200   if (!Pair.second)
   6201     return Pair.first->second;
   6202 
   6203   LoopDisposition D = computeLoopDisposition(S, L);
   6204   return LoopDispositions[S][L] = D;
   6205 }
   6206 
   6207 ScalarEvolution::LoopDisposition
   6208 ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) {
   6209   switch (S->getSCEVType()) {
   6210   case scConstant:
   6211     return LoopInvariant;
   6212   case scTruncate:
   6213   case scZeroExtend:
   6214   case scSignExtend:
   6215     return getLoopDisposition(cast<SCEVCastExpr>(S)->getOperand(), L);
   6216   case scAddRecExpr: {
   6217     const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S);
   6218 
   6219     // If L is the addrec's loop, it's computable.
   6220     if (AR->getLoop() == L)
   6221       return LoopComputable;
   6222 
   6223     // Add recurrences are never invariant in the function-body (null loop).
   6224     if (!L)
   6225       return LoopVariant;
   6226 
   6227     // This recurrence is variant w.r.t. L if L contains AR's loop.
   6228     if (L->contains(AR->getLoop()))
   6229       return LoopVariant;
   6230 
   6231     // This recurrence is invariant w.r.t. L if AR's loop contains L.
   6232     if (AR->getLoop()->contains(L))
   6233       return LoopInvariant;
   6234 
   6235     // This recurrence is variant w.r.t. L if any of its operands
   6236     // are variant.
   6237     for (SCEVAddRecExpr::op_iterator I = AR->op_begin(), E = AR->op_end();
   6238          I != E; ++I)
   6239       if (!isLoopInvariant(*I, L))
   6240         return LoopVariant;
   6241 
   6242     // Otherwise it's loop-invariant.
   6243     return LoopInvariant;
   6244   }
   6245   case scAddExpr:
   6246   case scMulExpr:
   6247   case scUMaxExpr:
   6248   case scSMaxExpr: {
   6249     const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S);
   6250     bool HasVarying = false;
   6251     for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
   6252          I != E; ++I) {
   6253       LoopDisposition D = getLoopDisposition(*I, L);
   6254       if (D == LoopVariant)
   6255         return LoopVariant;
   6256       if (D == LoopComputable)
   6257         HasVarying = true;
   6258     }
   6259     return HasVarying ? LoopComputable : LoopInvariant;
   6260   }
   6261   case scUDivExpr: {
   6262     const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S);
   6263     LoopDisposition LD = getLoopDisposition(UDiv->getLHS(), L);
   6264     if (LD == LoopVariant)
   6265       return LoopVariant;
   6266     LoopDisposition RD = getLoopDisposition(UDiv->getRHS(), L);
   6267     if (RD == LoopVariant)
   6268       return LoopVariant;
   6269     return (LD == LoopInvariant && RD == LoopInvariant) ?
   6270            LoopInvariant : LoopComputable;
   6271   }
   6272   case scUnknown:
   6273     // All non-instruction values are loop invariant.  All instructions are loop
   6274     // invariant if they are not contained in the specified loop.
   6275     // Instructions are never considered invariant in the function body
   6276     // (null loop) because they are defined within the "loop".
   6277     if (Instruction *I = dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue()))
   6278       return (L && !L->contains(I)) ? LoopInvariant : LoopVariant;
   6279     return LoopInvariant;
   6280   case scCouldNotCompute:
   6281     llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
   6282     return LoopVariant;
   6283   default: break;
   6284   }
   6285   llvm_unreachable("Unknown SCEV kind!");
   6286   return LoopVariant;
   6287 }
   6288 
   6289 bool ScalarEvolution::isLoopInvariant(const SCEV *S, const Loop *L) {
   6290   return getLoopDisposition(S, L) == LoopInvariant;
   6291 }
   6292 
   6293 bool ScalarEvolution::hasComputableLoopEvolution(const SCEV *S, const Loop *L) {
   6294   return getLoopDisposition(S, L) == LoopComputable;
   6295 }
   6296 
   6297 ScalarEvolution::BlockDisposition
   6298 ScalarEvolution::getBlockDisposition(const SCEV *S, const BasicBlock *BB) {
   6299   std::map<const BasicBlock *, BlockDisposition> &Values = BlockDispositions[S];
   6300   std::pair<std::map<const BasicBlock *, BlockDisposition>::iterator, bool>
   6301     Pair = Values.insert(std::make_pair(BB, DoesNotDominateBlock));
   6302   if (!Pair.second)
   6303     return Pair.first->second;
   6304 
   6305   BlockDisposition D = computeBlockDisposition(S, BB);
   6306   return BlockDispositions[S][BB] = D;
   6307 }
   6308 
   6309 ScalarEvolution::BlockDisposition
   6310 ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) {
   6311   switch (S->getSCEVType()) {
   6312   case scConstant:
   6313     return ProperlyDominatesBlock;
   6314   case scTruncate:
   6315   case scZeroExtend:
   6316   case scSignExtend:
   6317     return getBlockDisposition(cast<SCEVCastExpr>(S)->getOperand(), BB);
   6318   case scAddRecExpr: {
   6319     // This uses a "dominates" query instead of "properly dominates" query
   6320     // to test for proper dominance too, because the instruction which
   6321     // produces the addrec's value is a PHI, and a PHI effectively properly
   6322     // dominates its entire containing block.
   6323     const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S);
   6324     if (!DT->dominates(AR->getLoop()->getHeader(), BB))
   6325       return DoesNotDominateBlock;
   6326   }
   6327   // FALL THROUGH into SCEVNAryExpr handling.
   6328   case scAddExpr:
   6329   case scMulExpr:
   6330   case scUMaxExpr:
   6331   case scSMaxExpr: {
   6332     const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S);
   6333     bool Proper = true;
   6334     for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
   6335          I != E; ++I) {
   6336       BlockDisposition D = getBlockDisposition(*I, BB);
   6337       if (D == DoesNotDominateBlock)
   6338         return DoesNotDominateBlock;
   6339       if (D == DominatesBlock)
   6340         Proper = false;
   6341     }
   6342     return Proper ? ProperlyDominatesBlock : DominatesBlock;
   6343   }
   6344   case scUDivExpr: {
   6345     const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S);
   6346     const SCEV *LHS = UDiv->getLHS(), *RHS = UDiv->getRHS();
   6347     BlockDisposition LD = getBlockDisposition(LHS, BB);
   6348     if (LD == DoesNotDominateBlock)
   6349       return DoesNotDominateBlock;
   6350     BlockDisposition RD = getBlockDisposition(RHS, BB);
   6351     if (RD == DoesNotDominateBlock)
   6352       return DoesNotDominateBlock;
   6353     return (LD == ProperlyDominatesBlock && RD == ProperlyDominatesBlock) ?
   6354       ProperlyDominatesBlock : DominatesBlock;
   6355   }
   6356   case scUnknown:
   6357     if (Instruction *I =
   6358           dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) {
   6359       if (I->getParent() == BB)
   6360         return DominatesBlock;
   6361       if (DT->properlyDominates(I->getParent(), BB))
   6362         return ProperlyDominatesBlock;
   6363       return DoesNotDominateBlock;
   6364     }
   6365     return ProperlyDominatesBlock;
   6366   case scCouldNotCompute:
   6367     llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
   6368     return DoesNotDominateBlock;
   6369   default: break;
   6370   }
   6371   llvm_unreachable("Unknown SCEV kind!");
   6372   return DoesNotDominateBlock;
   6373 }
   6374 
   6375 bool ScalarEvolution::dominates(const SCEV *S, const BasicBlock *BB) {
   6376   return getBlockDisposition(S, BB) >= DominatesBlock;
   6377 }
   6378 
   6379 bool ScalarEvolution::properlyDominates(const SCEV *S, const BasicBlock *BB) {
   6380   return getBlockDisposition(S, BB) == ProperlyDominatesBlock;
   6381 }
   6382 
   6383 bool ScalarEvolution::hasOperand(const SCEV *S, const SCEV *Op) const {
   6384   switch (S->getSCEVType()) {
   6385   case scConstant:
   6386     return false;
   6387   case scTruncate:
   6388   case scZeroExtend:
   6389   case scSignExtend: {
   6390     const SCEVCastExpr *Cast = cast<SCEVCastExpr>(S);
   6391     const SCEV *CastOp = Cast->getOperand();
   6392     return Op == CastOp || hasOperand(CastOp, Op);
   6393   }
   6394   case scAddRecExpr:
   6395   case scAddExpr:
   6396   case scMulExpr:
   6397   case scUMaxExpr:
   6398   case scSMaxExpr: {
   6399     const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S);
   6400     for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
   6401          I != E; ++I) {
   6402       const SCEV *NAryOp = *I;
   6403       if (NAryOp == Op || hasOperand(NAryOp, Op))
   6404         return true;
   6405     }
   6406     return false;
   6407   }
   6408   case scUDivExpr: {
   6409     const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S);
   6410     const SCEV *LHS = UDiv->getLHS(), *RHS = UDiv->getRHS();
   6411     return LHS == Op || hasOperand(LHS, Op) ||
   6412            RHS == Op || hasOperand(RHS, Op);
   6413   }
   6414   case scUnknown:
   6415     return false;
   6416   case scCouldNotCompute:
   6417     llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
   6418     return false;
   6419   default: break;
   6420   }
   6421   llvm_unreachable("Unknown SCEV kind!");
   6422   return false;
   6423 }
   6424 
   6425 void ScalarEvolution::forgetMemoizedResults(const SCEV *S) {
   6426   ValuesAtScopes.erase(S);
   6427   LoopDispositions.erase(S);
   6428   BlockDispositions.erase(S);
   6429   UnsignedRanges.erase(S);
   6430   SignedRanges.erase(S);
   6431 }
   6432