Home | History | Annotate | Download | only in Analysis
      1 //===- ScalarEvolutionExpander.cpp - Scalar Evolution Analysis ------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file contains the implementation of the scalar evolution expander,
     11 // which is used to generate the code corresponding to a given scalar evolution
     12 // expression.
     13 //
     14 //===----------------------------------------------------------------------===//
     15 
     16 #include "llvm/Analysis/ScalarEvolutionExpander.h"
     17 #include "llvm/ADT/STLExtras.h"
     18 #include "llvm/ADT/SmallSet.h"
     19 #include "llvm/Analysis/InstructionSimplify.h"
     20 #include "llvm/Analysis/LoopInfo.h"
     21 #include "llvm/Analysis/TargetTransformInfo.h"
     22 #include "llvm/IR/DataLayout.h"
     23 #include "llvm/IR/Dominators.h"
     24 #include "llvm/IR/IntrinsicInst.h"
     25 #include "llvm/IR/LLVMContext.h"
     26 #include "llvm/IR/Module.h"
     27 #include "llvm/IR/PatternMatch.h"
     28 #include "llvm/Support/Debug.h"
     29 #include "llvm/Support/raw_ostream.h"
     30 
     31 using namespace llvm;
     32 using namespace PatternMatch;
     33 
     34 /// ReuseOrCreateCast - Arrange for there to be a cast of V to Ty at IP,
     35 /// reusing an existing cast if a suitable one exists, moving an existing
     36 /// cast if a suitable one exists but isn't in the right place, or
     37 /// creating a new one.
     38 Value *SCEVExpander::ReuseOrCreateCast(Value *V, Type *Ty,
     39                                        Instruction::CastOps Op,
     40                                        BasicBlock::iterator IP) {
     41   // This function must be called with the builder having a valid insertion
     42   // point. It doesn't need to be the actual IP where the uses of the returned
     43   // cast will be added, but it must dominate such IP.
     44   // We use this precondition to produce a cast that will dominate all its
     45   // uses. In particular, this is crucial for the case where the builder's
     46   // insertion point *is* the point where we were asked to put the cast.
     47   // Since we don't know the builder's insertion point is actually
     48   // where the uses will be added (only that it dominates it), we are
     49   // not allowed to move it.
     50   BasicBlock::iterator BIP = Builder.GetInsertPoint();
     51 
     52   Instruction *Ret = nullptr;
     53 
     54   // Check to see if there is already a cast!
     55   for (User *U : V->users())
     56     if (U->getType() == Ty)
     57       if (CastInst *CI = dyn_cast<CastInst>(U))
     58         if (CI->getOpcode() == Op) {
     59           // If the cast isn't where we want it, create a new cast at IP.
     60           // Likewise, do not reuse a cast at BIP because it must dominate
     61           // instructions that might be inserted before BIP.
     62           if (BasicBlock::iterator(CI) != IP || BIP == IP) {
     63             // Create a new cast, and leave the old cast in place in case
     64             // it is being used as an insert point. Clear its operand
     65             // so that it doesn't hold anything live.
     66             Ret = CastInst::Create(Op, V, Ty, "", &*IP);
     67             Ret->takeName(CI);
     68             CI->replaceAllUsesWith(Ret);
     69             CI->setOperand(0, UndefValue::get(V->getType()));
     70             break;
     71           }
     72           Ret = CI;
     73           break;
     74         }
     75 
     76   // Create a new cast.
     77   if (!Ret)
     78     Ret = CastInst::Create(Op, V, Ty, V->getName(), &*IP);
     79 
     80   // We assert at the end of the function since IP might point to an
     81   // instruction with different dominance properties than a cast
     82   // (an invoke for example) and not dominate BIP (but the cast does).
     83   assert(SE.DT.dominates(Ret, &*BIP));
     84 
     85   rememberInstruction(Ret);
     86   return Ret;
     87 }
     88 
     89 static BasicBlock::iterator findInsertPointAfter(Instruction *I,
     90                                                  BasicBlock *MustDominate) {
     91   BasicBlock::iterator IP = ++I->getIterator();
     92   if (auto *II = dyn_cast<InvokeInst>(I))
     93     IP = II->getNormalDest()->begin();
     94 
     95   while (isa<PHINode>(IP))
     96     ++IP;
     97 
     98   if (isa<FuncletPadInst>(IP) || isa<LandingPadInst>(IP)) {
     99     ++IP;
    100   } else if (isa<CatchSwitchInst>(IP)) {
    101     IP = MustDominate->getFirstInsertionPt();
    102   } else {
    103     assert(!IP->isEHPad() && "unexpected eh pad!");
    104   }
    105 
    106   return IP;
    107 }
    108 
    109 /// InsertNoopCastOfTo - Insert a cast of V to the specified type,
    110 /// which must be possible with a noop cast, doing what we can to share
    111 /// the casts.
    112 Value *SCEVExpander::InsertNoopCastOfTo(Value *V, Type *Ty) {
    113   Instruction::CastOps Op = CastInst::getCastOpcode(V, false, Ty, false);
    114   assert((Op == Instruction::BitCast ||
    115           Op == Instruction::PtrToInt ||
    116           Op == Instruction::IntToPtr) &&
    117          "InsertNoopCastOfTo cannot perform non-noop casts!");
    118   assert(SE.getTypeSizeInBits(V->getType()) == SE.getTypeSizeInBits(Ty) &&
    119          "InsertNoopCastOfTo cannot change sizes!");
    120 
    121   // Short-circuit unnecessary bitcasts.
    122   if (Op == Instruction::BitCast) {
    123     if (V->getType() == Ty)
    124       return V;
    125     if (CastInst *CI = dyn_cast<CastInst>(V)) {
    126       if (CI->getOperand(0)->getType() == Ty)
    127         return CI->getOperand(0);
    128     }
    129   }
    130   // Short-circuit unnecessary inttoptr<->ptrtoint casts.
    131   if ((Op == Instruction::PtrToInt || Op == Instruction::IntToPtr) &&
    132       SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(V->getType())) {
    133     if (CastInst *CI = dyn_cast<CastInst>(V))
    134       if ((CI->getOpcode() == Instruction::PtrToInt ||
    135            CI->getOpcode() == Instruction::IntToPtr) &&
    136           SE.getTypeSizeInBits(CI->getType()) ==
    137           SE.getTypeSizeInBits(CI->getOperand(0)->getType()))
    138         return CI->getOperand(0);
    139     if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
    140       if ((CE->getOpcode() == Instruction::PtrToInt ||
    141            CE->getOpcode() == Instruction::IntToPtr) &&
    142           SE.getTypeSizeInBits(CE->getType()) ==
    143           SE.getTypeSizeInBits(CE->getOperand(0)->getType()))
    144         return CE->getOperand(0);
    145   }
    146 
    147   // Fold a cast of a constant.
    148   if (Constant *C = dyn_cast<Constant>(V))
    149     return ConstantExpr::getCast(Op, C, Ty);
    150 
    151   // Cast the argument at the beginning of the entry block, after
    152   // any bitcasts of other arguments.
    153   if (Argument *A = dyn_cast<Argument>(V)) {
    154     BasicBlock::iterator IP = A->getParent()->getEntryBlock().begin();
    155     while ((isa<BitCastInst>(IP) &&
    156             isa<Argument>(cast<BitCastInst>(IP)->getOperand(0)) &&
    157             cast<BitCastInst>(IP)->getOperand(0) != A) ||
    158            isa<DbgInfoIntrinsic>(IP))
    159       ++IP;
    160     return ReuseOrCreateCast(A, Ty, Op, IP);
    161   }
    162 
    163   // Cast the instruction immediately after the instruction.
    164   Instruction *I = cast<Instruction>(V);
    165   BasicBlock::iterator IP = findInsertPointAfter(I, Builder.GetInsertBlock());
    166   return ReuseOrCreateCast(I, Ty, Op, IP);
    167 }
    168 
    169 /// InsertBinop - Insert the specified binary operator, doing a small amount
    170 /// of work to avoid inserting an obviously redundant operation.
    171 Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode,
    172                                  Value *LHS, Value *RHS) {
    173   // Fold a binop with constant operands.
    174   if (Constant *CLHS = dyn_cast<Constant>(LHS))
    175     if (Constant *CRHS = dyn_cast<Constant>(RHS))
    176       return ConstantExpr::get(Opcode, CLHS, CRHS);
    177 
    178   // Do a quick scan to see if we have this binop nearby.  If so, reuse it.
    179   unsigned ScanLimit = 6;
    180   BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin();
    181   // Scanning starts from the last instruction before the insertion point.
    182   BasicBlock::iterator IP = Builder.GetInsertPoint();
    183   if (IP != BlockBegin) {
    184     --IP;
    185     for (; ScanLimit; --IP, --ScanLimit) {
    186       // Don't count dbg.value against the ScanLimit, to avoid perturbing the
    187       // generated code.
    188       if (isa<DbgInfoIntrinsic>(IP))
    189         ScanLimit++;
    190       if (IP->getOpcode() == (unsigned)Opcode && IP->getOperand(0) == LHS &&
    191           IP->getOperand(1) == RHS)
    192         return &*IP;
    193       if (IP == BlockBegin) break;
    194     }
    195   }
    196 
    197   // Save the original insertion point so we can restore it when we're done.
    198   DebugLoc Loc = Builder.GetInsertPoint()->getDebugLoc();
    199   SCEVInsertPointGuard Guard(Builder, this);
    200 
    201   // Move the insertion point out of as many loops as we can.
    202   while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
    203     if (!L->isLoopInvariant(LHS) || !L->isLoopInvariant(RHS)) break;
    204     BasicBlock *Preheader = L->getLoopPreheader();
    205     if (!Preheader) break;
    206 
    207     // Ok, move up a level.
    208     Builder.SetInsertPoint(Preheader->getTerminator());
    209   }
    210 
    211   // If we haven't found this binop, insert it.
    212   Instruction *BO = cast<Instruction>(Builder.CreateBinOp(Opcode, LHS, RHS));
    213   BO->setDebugLoc(Loc);
    214   rememberInstruction(BO);
    215 
    216   return BO;
    217 }
    218 
    219 /// FactorOutConstant - Test if S is divisible by Factor, using signed
    220 /// division. If so, update S with Factor divided out and return true.
    221 /// S need not be evenly divisible if a reasonable remainder can be
    222 /// computed.
    223 /// TODO: When ScalarEvolution gets a SCEVSDivExpr, this can be made
    224 /// unnecessary; in its place, just signed-divide Ops[i] by the scale and
    225 /// check to see if the divide was folded.
    226 static bool FactorOutConstant(const SCEV *&S, const SCEV *&Remainder,
    227                               const SCEV *Factor, ScalarEvolution &SE,
    228                               const DataLayout &DL) {
    229   // Everything is divisible by one.
    230   if (Factor->isOne())
    231     return true;
    232 
    233   // x/x == 1.
    234   if (S == Factor) {
    235     S = SE.getConstant(S->getType(), 1);
    236     return true;
    237   }
    238 
    239   // For a Constant, check for a multiple of the given factor.
    240   if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) {
    241     // 0/x == 0.
    242     if (C->isZero())
    243       return true;
    244     // Check for divisibility.
    245     if (const SCEVConstant *FC = dyn_cast<SCEVConstant>(Factor)) {
    246       ConstantInt *CI =
    247           ConstantInt::get(SE.getContext(), C->getAPInt().sdiv(FC->getAPInt()));
    248       // If the quotient is zero and the remainder is non-zero, reject
    249       // the value at this scale. It will be considered for subsequent
    250       // smaller scales.
    251       if (!CI->isZero()) {
    252         const SCEV *Div = SE.getConstant(CI);
    253         S = Div;
    254         Remainder = SE.getAddExpr(
    255             Remainder, SE.getConstant(C->getAPInt().srem(FC->getAPInt())));
    256         return true;
    257       }
    258     }
    259   }
    260 
    261   // In a Mul, check if there is a constant operand which is a multiple
    262   // of the given factor.
    263   if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
    264     // Size is known, check if there is a constant operand which is a multiple
    265     // of the given factor. If so, we can factor it.
    266     const SCEVConstant *FC = cast<SCEVConstant>(Factor);
    267     if (const SCEVConstant *C = dyn_cast<SCEVConstant>(M->getOperand(0)))
    268       if (!C->getAPInt().srem(FC->getAPInt())) {
    269         SmallVector<const SCEV *, 4> NewMulOps(M->op_begin(), M->op_end());
    270         NewMulOps[0] = SE.getConstant(C->getAPInt().sdiv(FC->getAPInt()));
    271         S = SE.getMulExpr(NewMulOps);
    272         return true;
    273       }
    274   }
    275 
    276   // In an AddRec, check if both start and step are divisible.
    277   if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
    278     const SCEV *Step = A->getStepRecurrence(SE);
    279     const SCEV *StepRem = SE.getConstant(Step->getType(), 0);
    280     if (!FactorOutConstant(Step, StepRem, Factor, SE, DL))
    281       return false;
    282     if (!StepRem->isZero())
    283       return false;
    284     const SCEV *Start = A->getStart();
    285     if (!FactorOutConstant(Start, Remainder, Factor, SE, DL))
    286       return false;
    287     S = SE.getAddRecExpr(Start, Step, A->getLoop(),
    288                          A->getNoWrapFlags(SCEV::FlagNW));
    289     return true;
    290   }
    291 
    292   return false;
    293 }
    294 
    295 /// SimplifyAddOperands - Sort and simplify a list of add operands. NumAddRecs
    296 /// is the number of SCEVAddRecExprs present, which are kept at the end of
    297 /// the list.
    298 ///
    299 static void SimplifyAddOperands(SmallVectorImpl<const SCEV *> &Ops,
    300                                 Type *Ty,
    301                                 ScalarEvolution &SE) {
    302   unsigned NumAddRecs = 0;
    303   for (unsigned i = Ops.size(); i > 0 && isa<SCEVAddRecExpr>(Ops[i-1]); --i)
    304     ++NumAddRecs;
    305   // Group Ops into non-addrecs and addrecs.
    306   SmallVector<const SCEV *, 8> NoAddRecs(Ops.begin(), Ops.end() - NumAddRecs);
    307   SmallVector<const SCEV *, 8> AddRecs(Ops.end() - NumAddRecs, Ops.end());
    308   // Let ScalarEvolution sort and simplify the non-addrecs list.
    309   const SCEV *Sum = NoAddRecs.empty() ?
    310                     SE.getConstant(Ty, 0) :
    311                     SE.getAddExpr(NoAddRecs);
    312   // If it returned an add, use the operands. Otherwise it simplified
    313   // the sum into a single value, so just use that.
    314   Ops.clear();
    315   if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Sum))
    316     Ops.append(Add->op_begin(), Add->op_end());
    317   else if (!Sum->isZero())
    318     Ops.push_back(Sum);
    319   // Then append the addrecs.
    320   Ops.append(AddRecs.begin(), AddRecs.end());
    321 }
    322 
    323 /// SplitAddRecs - Flatten a list of add operands, moving addrec start values
    324 /// out to the top level. For example, convert {a + b,+,c} to a, b, {0,+,d}.
    325 /// This helps expose more opportunities for folding parts of the expressions
    326 /// into GEP indices.
    327 ///
    328 static void SplitAddRecs(SmallVectorImpl<const SCEV *> &Ops,
    329                          Type *Ty,
    330                          ScalarEvolution &SE) {
    331   // Find the addrecs.
    332   SmallVector<const SCEV *, 8> AddRecs;
    333   for (unsigned i = 0, e = Ops.size(); i != e; ++i)
    334     while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Ops[i])) {
    335       const SCEV *Start = A->getStart();
    336       if (Start->isZero()) break;
    337       const SCEV *Zero = SE.getConstant(Ty, 0);
    338       AddRecs.push_back(SE.getAddRecExpr(Zero,
    339                                          A->getStepRecurrence(SE),
    340                                          A->getLoop(),
    341                                          A->getNoWrapFlags(SCEV::FlagNW)));
    342       if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Start)) {
    343         Ops[i] = Zero;
    344         Ops.append(Add->op_begin(), Add->op_end());
    345         e += Add->getNumOperands();
    346       } else {
    347         Ops[i] = Start;
    348       }
    349     }
    350   if (!AddRecs.empty()) {
    351     // Add the addrecs onto the end of the list.
    352     Ops.append(AddRecs.begin(), AddRecs.end());
    353     // Resort the operand list, moving any constants to the front.
    354     SimplifyAddOperands(Ops, Ty, SE);
    355   }
    356 }
    357 
    358 /// expandAddToGEP - Expand an addition expression with a pointer type into
    359 /// a GEP instead of using ptrtoint+arithmetic+inttoptr. This helps
    360 /// BasicAliasAnalysis and other passes analyze the result. See the rules
    361 /// for getelementptr vs. inttoptr in
    362 /// http://llvm.org/docs/LangRef.html#pointeraliasing
    363 /// for details.
    364 ///
    365 /// Design note: The correctness of using getelementptr here depends on
    366 /// ScalarEvolution not recognizing inttoptr and ptrtoint operators, as
    367 /// they may introduce pointer arithmetic which may not be safely converted
    368 /// into getelementptr.
    369 ///
    370 /// Design note: It might seem desirable for this function to be more
    371 /// loop-aware. If some of the indices are loop-invariant while others
    372 /// aren't, it might seem desirable to emit multiple GEPs, keeping the
    373 /// loop-invariant portions of the overall computation outside the loop.
    374 /// However, there are a few reasons this is not done here. Hoisting simple
    375 /// arithmetic is a low-level optimization that often isn't very
    376 /// important until late in the optimization process. In fact, passes
    377 /// like InstructionCombining will combine GEPs, even if it means
    378 /// pushing loop-invariant computation down into loops, so even if the
    379 /// GEPs were split here, the work would quickly be undone. The
    380 /// LoopStrengthReduction pass, which is usually run quite late (and
    381 /// after the last InstructionCombining pass), takes care of hoisting
    382 /// loop-invariant portions of expressions, after considering what
    383 /// can be folded using target addressing modes.
    384 ///
    385 Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
    386                                     const SCEV *const *op_end,
    387                                     PointerType *PTy,
    388                                     Type *Ty,
    389                                     Value *V) {
    390   Type *OriginalElTy = PTy->getElementType();
    391   Type *ElTy = OriginalElTy;
    392   SmallVector<Value *, 4> GepIndices;
    393   SmallVector<const SCEV *, 8> Ops(op_begin, op_end);
    394   bool AnyNonZeroIndices = false;
    395 
    396   // Split AddRecs up into parts as either of the parts may be usable
    397   // without the other.
    398   SplitAddRecs(Ops, Ty, SE);
    399 
    400   Type *IntPtrTy = DL.getIntPtrType(PTy);
    401 
    402   // Descend down the pointer's type and attempt to convert the other
    403   // operands into GEP indices, at each level. The first index in a GEP
    404   // indexes into the array implied by the pointer operand; the rest of
    405   // the indices index into the element or field type selected by the
    406   // preceding index.
    407   for (;;) {
    408     // If the scale size is not 0, attempt to factor out a scale for
    409     // array indexing.
    410     SmallVector<const SCEV *, 8> ScaledOps;
    411     if (ElTy->isSized()) {
    412       const SCEV *ElSize = SE.getSizeOfExpr(IntPtrTy, ElTy);
    413       if (!ElSize->isZero()) {
    414         SmallVector<const SCEV *, 8> NewOps;
    415         for (const SCEV *Op : Ops) {
    416           const SCEV *Remainder = SE.getConstant(Ty, 0);
    417           if (FactorOutConstant(Op, Remainder, ElSize, SE, DL)) {
    418             // Op now has ElSize factored out.
    419             ScaledOps.push_back(Op);
    420             if (!Remainder->isZero())
    421               NewOps.push_back(Remainder);
    422             AnyNonZeroIndices = true;
    423           } else {
    424             // The operand was not divisible, so add it to the list of operands
    425             // we'll scan next iteration.
    426             NewOps.push_back(Op);
    427           }
    428         }
    429         // If we made any changes, update Ops.
    430         if (!ScaledOps.empty()) {
    431           Ops = NewOps;
    432           SimplifyAddOperands(Ops, Ty, SE);
    433         }
    434       }
    435     }
    436 
    437     // Record the scaled array index for this level of the type. If
    438     // we didn't find any operands that could be factored, tentatively
    439     // assume that element zero was selected (since the zero offset
    440     // would obviously be folded away).
    441     Value *Scaled = ScaledOps.empty() ?
    442                     Constant::getNullValue(Ty) :
    443                     expandCodeFor(SE.getAddExpr(ScaledOps), Ty);
    444     GepIndices.push_back(Scaled);
    445 
    446     // Collect struct field index operands.
    447     while (StructType *STy = dyn_cast<StructType>(ElTy)) {
    448       bool FoundFieldNo = false;
    449       // An empty struct has no fields.
    450       if (STy->getNumElements() == 0) break;
    451       // Field offsets are known. See if a constant offset falls within any of
    452       // the struct fields.
    453       if (Ops.empty())
    454         break;
    455       if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[0]))
    456         if (SE.getTypeSizeInBits(C->getType()) <= 64) {
    457           const StructLayout &SL = *DL.getStructLayout(STy);
    458           uint64_t FullOffset = C->getValue()->getZExtValue();
    459           if (FullOffset < SL.getSizeInBytes()) {
    460             unsigned ElIdx = SL.getElementContainingOffset(FullOffset);
    461             GepIndices.push_back(
    462                 ConstantInt::get(Type::getInt32Ty(Ty->getContext()), ElIdx));
    463             ElTy = STy->getTypeAtIndex(ElIdx);
    464             Ops[0] =
    465                 SE.getConstant(Ty, FullOffset - SL.getElementOffset(ElIdx));
    466             AnyNonZeroIndices = true;
    467             FoundFieldNo = true;
    468           }
    469         }
    470       // If no struct field offsets were found, tentatively assume that
    471       // field zero was selected (since the zero offset would obviously
    472       // be folded away).
    473       if (!FoundFieldNo) {
    474         ElTy = STy->getTypeAtIndex(0u);
    475         GepIndices.push_back(
    476           Constant::getNullValue(Type::getInt32Ty(Ty->getContext())));
    477       }
    478     }
    479 
    480     if (ArrayType *ATy = dyn_cast<ArrayType>(ElTy))
    481       ElTy = ATy->getElementType();
    482     else
    483       break;
    484   }
    485 
    486   // If none of the operands were convertible to proper GEP indices, cast
    487   // the base to i8* and do an ugly getelementptr with that. It's still
    488   // better than ptrtoint+arithmetic+inttoptr at least.
    489   if (!AnyNonZeroIndices) {
    490     // Cast the base to i8*.
    491     V = InsertNoopCastOfTo(V,
    492        Type::getInt8PtrTy(Ty->getContext(), PTy->getAddressSpace()));
    493 
    494     assert(!isa<Instruction>(V) ||
    495            SE.DT.dominates(cast<Instruction>(V), &*Builder.GetInsertPoint()));
    496 
    497     // Expand the operands for a plain byte offset.
    498     Value *Idx = expandCodeFor(SE.getAddExpr(Ops), Ty);
    499 
    500     // Fold a GEP with constant operands.
    501     if (Constant *CLHS = dyn_cast<Constant>(V))
    502       if (Constant *CRHS = dyn_cast<Constant>(Idx))
    503         return ConstantExpr::getGetElementPtr(Type::getInt8Ty(Ty->getContext()),
    504                                               CLHS, CRHS);
    505 
    506     // Do a quick scan to see if we have this GEP nearby.  If so, reuse it.
    507     unsigned ScanLimit = 6;
    508     BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin();
    509     // Scanning starts from the last instruction before the insertion point.
    510     BasicBlock::iterator IP = Builder.GetInsertPoint();
    511     if (IP != BlockBegin) {
    512       --IP;
    513       for (; ScanLimit; --IP, --ScanLimit) {
    514         // Don't count dbg.value against the ScanLimit, to avoid perturbing the
    515         // generated code.
    516         if (isa<DbgInfoIntrinsic>(IP))
    517           ScanLimit++;
    518         if (IP->getOpcode() == Instruction::GetElementPtr &&
    519             IP->getOperand(0) == V && IP->getOperand(1) == Idx)
    520           return &*IP;
    521         if (IP == BlockBegin) break;
    522       }
    523     }
    524 
    525     // Save the original insertion point so we can restore it when we're done.
    526     SCEVInsertPointGuard Guard(Builder, this);
    527 
    528     // Move the insertion point out of as many loops as we can.
    529     while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
    530       if (!L->isLoopInvariant(V) || !L->isLoopInvariant(Idx)) break;
    531       BasicBlock *Preheader = L->getLoopPreheader();
    532       if (!Preheader) break;
    533 
    534       // Ok, move up a level.
    535       Builder.SetInsertPoint(Preheader->getTerminator());
    536     }
    537 
    538     // Emit a GEP.
    539     Value *GEP = Builder.CreateGEP(Builder.getInt8Ty(), V, Idx, "uglygep");
    540     rememberInstruction(GEP);
    541 
    542     return GEP;
    543   }
    544 
    545   {
    546     SCEVInsertPointGuard Guard(Builder, this);
    547 
    548     // Move the insertion point out of as many loops as we can.
    549     while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
    550       if (!L->isLoopInvariant(V)) break;
    551 
    552       bool AnyIndexNotLoopInvariant =
    553           std::any_of(GepIndices.begin(), GepIndices.end(),
    554                       [L](Value *Op) { return !L->isLoopInvariant(Op); });
    555 
    556       if (AnyIndexNotLoopInvariant)
    557         break;
    558 
    559       BasicBlock *Preheader = L->getLoopPreheader();
    560       if (!Preheader) break;
    561 
    562       // Ok, move up a level.
    563       Builder.SetInsertPoint(Preheader->getTerminator());
    564     }
    565 
    566     // Insert a pretty getelementptr. Note that this GEP is not marked inbounds,
    567     // because ScalarEvolution may have changed the address arithmetic to
    568     // compute a value which is beyond the end of the allocated object.
    569     Value *Casted = V;
    570     if (V->getType() != PTy)
    571       Casted = InsertNoopCastOfTo(Casted, PTy);
    572     Value *GEP = Builder.CreateGEP(OriginalElTy, Casted, GepIndices, "scevgep");
    573     Ops.push_back(SE.getUnknown(GEP));
    574     rememberInstruction(GEP);
    575   }
    576 
    577   return expand(SE.getAddExpr(Ops));
    578 }
    579 
    580 /// PickMostRelevantLoop - Given two loops pick the one that's most relevant for
    581 /// SCEV expansion. If they are nested, this is the most nested. If they are
    582 /// neighboring, pick the later.
    583 static const Loop *PickMostRelevantLoop(const Loop *A, const Loop *B,
    584                                         DominatorTree &DT) {
    585   if (!A) return B;
    586   if (!B) return A;
    587   if (A->contains(B)) return B;
    588   if (B->contains(A)) return A;
    589   if (DT.dominates(A->getHeader(), B->getHeader())) return B;
    590   if (DT.dominates(B->getHeader(), A->getHeader())) return A;
    591   return A; // Arbitrarily break the tie.
    592 }
    593 
    594 /// getRelevantLoop - Get the most relevant loop associated with the given
    595 /// expression, according to PickMostRelevantLoop.
    596 const Loop *SCEVExpander::getRelevantLoop(const SCEV *S) {
    597   // Test whether we've already computed the most relevant loop for this SCEV.
    598   auto Pair = RelevantLoops.insert(std::make_pair(S, nullptr));
    599   if (!Pair.second)
    600     return Pair.first->second;
    601 
    602   if (isa<SCEVConstant>(S))
    603     // A constant has no relevant loops.
    604     return nullptr;
    605   if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
    606     if (const Instruction *I = dyn_cast<Instruction>(U->getValue()))
    607       return Pair.first->second = SE.LI.getLoopFor(I->getParent());
    608     // A non-instruction has no relevant loops.
    609     return nullptr;
    610   }
    611   if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S)) {
    612     const Loop *L = nullptr;
    613     if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S))
    614       L = AR->getLoop();
    615     for (const SCEV *Op : N->operands())
    616       L = PickMostRelevantLoop(L, getRelevantLoop(Op), SE.DT);
    617     return RelevantLoops[N] = L;
    618   }
    619   if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S)) {
    620     const Loop *Result = getRelevantLoop(C->getOperand());
    621     return RelevantLoops[C] = Result;
    622   }
    623   if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) {
    624     const Loop *Result = PickMostRelevantLoop(
    625         getRelevantLoop(D->getLHS()), getRelevantLoop(D->getRHS()), SE.DT);
    626     return RelevantLoops[D] = Result;
    627   }
    628   llvm_unreachable("Unexpected SCEV type!");
    629 }
    630 
    631 namespace {
    632 
    633 /// LoopCompare - Compare loops by PickMostRelevantLoop.
    634 class LoopCompare {
    635   DominatorTree &DT;
    636 public:
    637   explicit LoopCompare(DominatorTree &dt) : DT(dt) {}
    638 
    639   bool operator()(std::pair<const Loop *, const SCEV *> LHS,
    640                   std::pair<const Loop *, const SCEV *> RHS) const {
    641     // Keep pointer operands sorted at the end.
    642     if (LHS.second->getType()->isPointerTy() !=
    643         RHS.second->getType()->isPointerTy())
    644       return LHS.second->getType()->isPointerTy();
    645 
    646     // Compare loops with PickMostRelevantLoop.
    647     if (LHS.first != RHS.first)
    648       return PickMostRelevantLoop(LHS.first, RHS.first, DT) != LHS.first;
    649 
    650     // If one operand is a non-constant negative and the other is not,
    651     // put the non-constant negative on the right so that a sub can
    652     // be used instead of a negate and add.
    653     if (LHS.second->isNonConstantNegative()) {
    654       if (!RHS.second->isNonConstantNegative())
    655         return false;
    656     } else if (RHS.second->isNonConstantNegative())
    657       return true;
    658 
    659     // Otherwise they are equivalent according to this comparison.
    660     return false;
    661   }
    662 };
    663 
    664 }
    665 
    666 Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {
    667   Type *Ty = SE.getEffectiveSCEVType(S->getType());
    668 
    669   // Collect all the add operands in a loop, along with their associated loops.
    670   // Iterate in reverse so that constants are emitted last, all else equal, and
    671   // so that pointer operands are inserted first, which the code below relies on
    672   // to form more involved GEPs.
    673   SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops;
    674   for (std::reverse_iterator<SCEVAddExpr::op_iterator> I(S->op_end()),
    675        E(S->op_begin()); I != E; ++I)
    676     OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I));
    677 
    678   // Sort by loop. Use a stable sort so that constants follow non-constants and
    679   // pointer operands precede non-pointer operands.
    680   std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(SE.DT));
    681 
    682   // Emit instructions to add all the operands. Hoist as much as possible
    683   // out of loops, and form meaningful getelementptrs where possible.
    684   Value *Sum = nullptr;
    685   for (auto I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E;) {
    686     const Loop *CurLoop = I->first;
    687     const SCEV *Op = I->second;
    688     if (!Sum) {
    689       // This is the first operand. Just expand it.
    690       Sum = expand(Op);
    691       ++I;
    692     } else if (PointerType *PTy = dyn_cast<PointerType>(Sum->getType())) {
    693       // The running sum expression is a pointer. Try to form a getelementptr
    694       // at this level with that as the base.
    695       SmallVector<const SCEV *, 4> NewOps;
    696       for (; I != E && I->first == CurLoop; ++I) {
    697         // If the operand is SCEVUnknown and not instructions, peek through
    698         // it, to enable more of it to be folded into the GEP.
    699         const SCEV *X = I->second;
    700         if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(X))
    701           if (!isa<Instruction>(U->getValue()))
    702             X = SE.getSCEV(U->getValue());
    703         NewOps.push_back(X);
    704       }
    705       Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, Sum);
    706     } else if (PointerType *PTy = dyn_cast<PointerType>(Op->getType())) {
    707       // The running sum is an integer, and there's a pointer at this level.
    708       // Try to form a getelementptr. If the running sum is instructions,
    709       // use a SCEVUnknown to avoid re-analyzing them.
    710       SmallVector<const SCEV *, 4> NewOps;
    711       NewOps.push_back(isa<Instruction>(Sum) ? SE.getUnknown(Sum) :
    712                                                SE.getSCEV(Sum));
    713       for (++I; I != E && I->first == CurLoop; ++I)
    714         NewOps.push_back(I->second);
    715       Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, expand(Op));
    716     } else if (Op->isNonConstantNegative()) {
    717       // Instead of doing a negate and add, just do a subtract.
    718       Value *W = expandCodeFor(SE.getNegativeSCEV(Op), Ty);
    719       Sum = InsertNoopCastOfTo(Sum, Ty);
    720       Sum = InsertBinop(Instruction::Sub, Sum, W);
    721       ++I;
    722     } else {
    723       // A simple add.
    724       Value *W = expandCodeFor(Op, Ty);
    725       Sum = InsertNoopCastOfTo(Sum, Ty);
    726       // Canonicalize a constant to the RHS.
    727       if (isa<Constant>(Sum)) std::swap(Sum, W);
    728       Sum = InsertBinop(Instruction::Add, Sum, W);
    729       ++I;
    730     }
    731   }
    732 
    733   return Sum;
    734 }
    735 
    736 Value *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) {
    737   Type *Ty = SE.getEffectiveSCEVType(S->getType());
    738 
    739   // Collect all the mul operands in a loop, along with their associated loops.
    740   // Iterate in reverse so that constants are emitted last, all else equal.
    741   SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops;
    742   for (std::reverse_iterator<SCEVMulExpr::op_iterator> I(S->op_end()),
    743        E(S->op_begin()); I != E; ++I)
    744     OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I));
    745 
    746   // Sort by loop. Use a stable sort so that constants follow non-constants.
    747   std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(SE.DT));
    748 
    749   // Emit instructions to mul all the operands. Hoist as much as possible
    750   // out of loops.
    751   Value *Prod = nullptr;
    752   for (const auto &I : OpsAndLoops) {
    753     const SCEV *Op = I.second;
    754     if (!Prod) {
    755       // This is the first operand. Just expand it.
    756       Prod = expand(Op);
    757     } else if (Op->isAllOnesValue()) {
    758       // Instead of doing a multiply by negative one, just do a negate.
    759       Prod = InsertNoopCastOfTo(Prod, Ty);
    760       Prod = InsertBinop(Instruction::Sub, Constant::getNullValue(Ty), Prod);
    761     } else {
    762       // A simple mul.
    763       Value *W = expandCodeFor(Op, Ty);
    764       Prod = InsertNoopCastOfTo(Prod, Ty);
    765       // Canonicalize a constant to the RHS.
    766       if (isa<Constant>(Prod)) std::swap(Prod, W);
    767       const APInt *RHS;
    768       if (match(W, m_Power2(RHS))) {
    769         // Canonicalize Prod*(1<<C) to Prod<<C.
    770         assert(!Ty->isVectorTy() && "vector types are not SCEVable");
    771         Prod = InsertBinop(Instruction::Shl, Prod,
    772                            ConstantInt::get(Ty, RHS->logBase2()));
    773       } else {
    774         Prod = InsertBinop(Instruction::Mul, Prod, W);
    775       }
    776     }
    777   }
    778 
    779   return Prod;
    780 }
    781 
    782 Value *SCEVExpander::visitUDivExpr(const SCEVUDivExpr *S) {
    783   Type *Ty = SE.getEffectiveSCEVType(S->getType());
    784 
    785   Value *LHS = expandCodeFor(S->getLHS(), Ty);
    786   if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(S->getRHS())) {
    787     const APInt &RHS = SC->getAPInt();
    788     if (RHS.isPowerOf2())
    789       return InsertBinop(Instruction::LShr, LHS,
    790                          ConstantInt::get(Ty, RHS.logBase2()));
    791   }
    792 
    793   Value *RHS = expandCodeFor(S->getRHS(), Ty);
    794   return InsertBinop(Instruction::UDiv, LHS, RHS);
    795 }
    796 
    797 /// Move parts of Base into Rest to leave Base with the minimal
    798 /// expression that provides a pointer operand suitable for a
    799 /// GEP expansion.
    800 static void ExposePointerBase(const SCEV *&Base, const SCEV *&Rest,
    801                               ScalarEvolution &SE) {
    802   while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Base)) {
    803     Base = A->getStart();
    804     Rest = SE.getAddExpr(Rest,
    805                          SE.getAddRecExpr(SE.getConstant(A->getType(), 0),
    806                                           A->getStepRecurrence(SE),
    807                                           A->getLoop(),
    808                                           A->getNoWrapFlags(SCEV::FlagNW)));
    809   }
    810   if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(Base)) {
    811     Base = A->getOperand(A->getNumOperands()-1);
    812     SmallVector<const SCEV *, 8> NewAddOps(A->op_begin(), A->op_end());
    813     NewAddOps.back() = Rest;
    814     Rest = SE.getAddExpr(NewAddOps);
    815     ExposePointerBase(Base, Rest, SE);
    816   }
    817 }
    818 
    819 /// Determine if this is a well-behaved chain of instructions leading back to
    820 /// the PHI. If so, it may be reused by expanded expressions.
    821 bool SCEVExpander::isNormalAddRecExprPHI(PHINode *PN, Instruction *IncV,
    822                                          const Loop *L) {
    823   if (IncV->getNumOperands() == 0 || isa<PHINode>(IncV) ||
    824       (isa<CastInst>(IncV) && !isa<BitCastInst>(IncV)))
    825     return false;
    826   // If any of the operands don't dominate the insert position, bail.
    827   // Addrec operands are always loop-invariant, so this can only happen
    828   // if there are instructions which haven't been hoisted.
    829   if (L == IVIncInsertLoop) {
    830     for (User::op_iterator OI = IncV->op_begin()+1,
    831            OE = IncV->op_end(); OI != OE; ++OI)
    832       if (Instruction *OInst = dyn_cast<Instruction>(OI))
    833         if (!SE.DT.dominates(OInst, IVIncInsertPos))
    834           return false;
    835   }
    836   // Advance to the next instruction.
    837   IncV = dyn_cast<Instruction>(IncV->getOperand(0));
    838   if (!IncV)
    839     return false;
    840 
    841   if (IncV->mayHaveSideEffects())
    842     return false;
    843 
    844   if (IncV != PN)
    845     return true;
    846 
    847   return isNormalAddRecExprPHI(PN, IncV, L);
    848 }
    849 
    850 /// getIVIncOperand returns an induction variable increment's induction
    851 /// variable operand.
    852 ///
    853 /// If allowScale is set, any type of GEP is allowed as long as the nonIV
    854 /// operands dominate InsertPos.
    855 ///
    856 /// If allowScale is not set, ensure that a GEP increment conforms to one of the
    857 /// simple patterns generated by getAddRecExprPHILiterally and
    858 /// expandAddtoGEP. If the pattern isn't recognized, return NULL.
    859 Instruction *SCEVExpander::getIVIncOperand(Instruction *IncV,
    860                                            Instruction *InsertPos,
    861                                            bool allowScale) {
    862   if (IncV == InsertPos)
    863     return nullptr;
    864 
    865   switch (IncV->getOpcode()) {
    866   default:
    867     return nullptr;
    868   // Check for a simple Add/Sub or GEP of a loop invariant step.
    869   case Instruction::Add:
    870   case Instruction::Sub: {
    871     Instruction *OInst = dyn_cast<Instruction>(IncV->getOperand(1));
    872     if (!OInst || SE.DT.dominates(OInst, InsertPos))
    873       return dyn_cast<Instruction>(IncV->getOperand(0));
    874     return nullptr;
    875   }
    876   case Instruction::BitCast:
    877     return dyn_cast<Instruction>(IncV->getOperand(0));
    878   case Instruction::GetElementPtr:
    879     for (auto I = IncV->op_begin() + 1, E = IncV->op_end(); I != E; ++I) {
    880       if (isa<Constant>(*I))
    881         continue;
    882       if (Instruction *OInst = dyn_cast<Instruction>(*I)) {
    883         if (!SE.DT.dominates(OInst, InsertPos))
    884           return nullptr;
    885       }
    886       if (allowScale) {
    887         // allow any kind of GEP as long as it can be hoisted.
    888         continue;
    889       }
    890       // This must be a pointer addition of constants (pretty), which is already
    891       // handled, or some number of address-size elements (ugly). Ugly geps
    892       // have 2 operands. i1* is used by the expander to represent an
    893       // address-size element.
    894       if (IncV->getNumOperands() != 2)
    895         return nullptr;
    896       unsigned AS = cast<PointerType>(IncV->getType())->getAddressSpace();
    897       if (IncV->getType() != Type::getInt1PtrTy(SE.getContext(), AS)
    898           && IncV->getType() != Type::getInt8PtrTy(SE.getContext(), AS))
    899         return nullptr;
    900       break;
    901     }
    902     return dyn_cast<Instruction>(IncV->getOperand(0));
    903   }
    904 }
    905 
    906 /// If the insert point of the current builder or any of the builders on the
    907 /// stack of saved builders has 'I' as its insert point, update it to point to
    908 /// the instruction after 'I'.  This is intended to be used when the instruction
    909 /// 'I' is being moved.  If this fixup is not done and 'I' is moved to a
    910 /// different block, the inconsistent insert point (with a mismatched
    911 /// Instruction and Block) can lead to an instruction being inserted in a block
    912 /// other than its parent.
    913 void SCEVExpander::fixupInsertPoints(Instruction *I) {
    914   BasicBlock::iterator It(*I);
    915   BasicBlock::iterator NewInsertPt = std::next(It);
    916   if (Builder.GetInsertPoint() == It)
    917     Builder.SetInsertPoint(&*NewInsertPt);
    918   for (auto *InsertPtGuard : InsertPointGuards)
    919     if (InsertPtGuard->GetInsertPoint() == It)
    920       InsertPtGuard->SetInsertPoint(NewInsertPt);
    921 }
    922 
    923 /// hoistStep - Attempt to hoist a simple IV increment above InsertPos to make
    924 /// it available to other uses in this loop. Recursively hoist any operands,
    925 /// until we reach a value that dominates InsertPos.
    926 bool SCEVExpander::hoistIVInc(Instruction *IncV, Instruction *InsertPos) {
    927   if (SE.DT.dominates(IncV, InsertPos))
    928       return true;
    929 
    930   // InsertPos must itself dominate IncV so that IncV's new position satisfies
    931   // its existing users.
    932   if (isa<PHINode>(InsertPos) ||
    933       !SE.DT.dominates(InsertPos->getParent(), IncV->getParent()))
    934     return false;
    935 
    936   if (!SE.LI.movementPreservesLCSSAForm(IncV, InsertPos))
    937     return false;
    938 
    939   // Check that the chain of IV operands leading back to Phi can be hoisted.
    940   SmallVector<Instruction*, 4> IVIncs;
    941   for(;;) {
    942     Instruction *Oper = getIVIncOperand(IncV, InsertPos, /*allowScale*/true);
    943     if (!Oper)
    944       return false;
    945     // IncV is safe to hoist.
    946     IVIncs.push_back(IncV);
    947     IncV = Oper;
    948     if (SE.DT.dominates(IncV, InsertPos))
    949       break;
    950   }
    951   for (auto I = IVIncs.rbegin(), E = IVIncs.rend(); I != E; ++I) {
    952     fixupInsertPoints(*I);
    953     (*I)->moveBefore(InsertPos);
    954   }
    955   return true;
    956 }
    957 
    958 /// Determine if this cyclic phi is in a form that would have been generated by
    959 /// LSR. We don't care if the phi was actually expanded in this pass, as long
    960 /// as it is in a low-cost form, for example, no implied multiplication. This
    961 /// should match any patterns generated by getAddRecExprPHILiterally and
    962 /// expandAddtoGEP.
    963 bool SCEVExpander::isExpandedAddRecExprPHI(PHINode *PN, Instruction *IncV,
    964                                            const Loop *L) {
    965   for(Instruction *IVOper = IncV;
    966       (IVOper = getIVIncOperand(IVOper, L->getLoopPreheader()->getTerminator(),
    967                                 /*allowScale=*/false));) {
    968     if (IVOper == PN)
    969       return true;
    970   }
    971   return false;
    972 }
    973 
    974 /// expandIVInc - Expand an IV increment at Builder's current InsertPos.
    975 /// Typically this is the LatchBlock terminator or IVIncInsertPos, but we may
    976 /// need to materialize IV increments elsewhere to handle difficult situations.
    977 Value *SCEVExpander::expandIVInc(PHINode *PN, Value *StepV, const Loop *L,
    978                                  Type *ExpandTy, Type *IntTy,
    979                                  bool useSubtract) {
    980   Value *IncV;
    981   // If the PHI is a pointer, use a GEP, otherwise use an add or sub.
    982   if (ExpandTy->isPointerTy()) {
    983     PointerType *GEPPtrTy = cast<PointerType>(ExpandTy);
    984     // If the step isn't constant, don't use an implicitly scaled GEP, because
    985     // that would require a multiply inside the loop.
    986     if (!isa<ConstantInt>(StepV))
    987       GEPPtrTy = PointerType::get(Type::getInt1Ty(SE.getContext()),
    988                                   GEPPtrTy->getAddressSpace());
    989     const SCEV *const StepArray[1] = { SE.getSCEV(StepV) };
    990     IncV = expandAddToGEP(StepArray, StepArray+1, GEPPtrTy, IntTy, PN);
    991     if (IncV->getType() != PN->getType()) {
    992       IncV = Builder.CreateBitCast(IncV, PN->getType());
    993       rememberInstruction(IncV);
    994     }
    995   } else {
    996     IncV = useSubtract ?
    997       Builder.CreateSub(PN, StepV, Twine(IVName) + ".iv.next") :
    998       Builder.CreateAdd(PN, StepV, Twine(IVName) + ".iv.next");
    999     rememberInstruction(IncV);
   1000   }
   1001   return IncV;
   1002 }
   1003 
   1004 /// \brief Hoist the addrec instruction chain rooted in the loop phi above the
   1005 /// position. This routine assumes that this is possible (has been checked).
   1006 void SCEVExpander::hoistBeforePos(DominatorTree *DT, Instruction *InstToHoist,
   1007                                   Instruction *Pos, PHINode *LoopPhi) {
   1008   do {
   1009     if (DT->dominates(InstToHoist, Pos))
   1010       break;
   1011     // Make sure the increment is where we want it. But don't move it
   1012     // down past a potential existing post-inc user.
   1013     fixupInsertPoints(InstToHoist);
   1014     InstToHoist->moveBefore(Pos);
   1015     Pos = InstToHoist;
   1016     InstToHoist = cast<Instruction>(InstToHoist->getOperand(0));
   1017   } while (InstToHoist != LoopPhi);
   1018 }
   1019 
   1020 /// \brief Check whether we can cheaply express the requested SCEV in terms of
   1021 /// the available PHI SCEV by truncation and/or inversion of the step.
   1022 static bool canBeCheaplyTransformed(ScalarEvolution &SE,
   1023                                     const SCEVAddRecExpr *Phi,
   1024                                     const SCEVAddRecExpr *Requested,
   1025                                     bool &InvertStep) {
   1026   Type *PhiTy = SE.getEffectiveSCEVType(Phi->getType());
   1027   Type *RequestedTy = SE.getEffectiveSCEVType(Requested->getType());
   1028 
   1029   if (RequestedTy->getIntegerBitWidth() > PhiTy->getIntegerBitWidth())
   1030     return false;
   1031 
   1032   // Try truncate it if necessary.
   1033   Phi = dyn_cast<SCEVAddRecExpr>(SE.getTruncateOrNoop(Phi, RequestedTy));
   1034   if (!Phi)
   1035     return false;
   1036 
   1037   // Check whether truncation will help.
   1038   if (Phi == Requested) {
   1039     InvertStep = false;
   1040     return true;
   1041   }
   1042 
   1043   // Check whether inverting will help: {R,+,-1} == R - {0,+,1}.
   1044   if (SE.getAddExpr(Requested->getStart(),
   1045                     SE.getNegativeSCEV(Requested)) == Phi) {
   1046     InvertStep = true;
   1047     return true;
   1048   }
   1049 
   1050   return false;
   1051 }
   1052 
   1053 static bool IsIncrementNSW(ScalarEvolution &SE, const SCEVAddRecExpr *AR) {
   1054   if (!isa<IntegerType>(AR->getType()))
   1055     return false;
   1056 
   1057   unsigned BitWidth = cast<IntegerType>(AR->getType())->getBitWidth();
   1058   Type *WideTy = IntegerType::get(AR->getType()->getContext(), BitWidth * 2);
   1059   const SCEV *Step = AR->getStepRecurrence(SE);
   1060   const SCEV *OpAfterExtend = SE.getAddExpr(SE.getSignExtendExpr(Step, WideTy),
   1061                                             SE.getSignExtendExpr(AR, WideTy));
   1062   const SCEV *ExtendAfterOp =
   1063     SE.getSignExtendExpr(SE.getAddExpr(AR, Step), WideTy);
   1064   return ExtendAfterOp == OpAfterExtend;
   1065 }
   1066 
   1067 static bool IsIncrementNUW(ScalarEvolution &SE, const SCEVAddRecExpr *AR) {
   1068   if (!isa<IntegerType>(AR->getType()))
   1069     return false;
   1070 
   1071   unsigned BitWidth = cast<IntegerType>(AR->getType())->getBitWidth();
   1072   Type *WideTy = IntegerType::get(AR->getType()->getContext(), BitWidth * 2);
   1073   const SCEV *Step = AR->getStepRecurrence(SE);
   1074   const SCEV *OpAfterExtend = SE.getAddExpr(SE.getZeroExtendExpr(Step, WideTy),
   1075                                             SE.getZeroExtendExpr(AR, WideTy));
   1076   const SCEV *ExtendAfterOp =
   1077     SE.getZeroExtendExpr(SE.getAddExpr(AR, Step), WideTy);
   1078   return ExtendAfterOp == OpAfterExtend;
   1079 }
   1080 
   1081 /// getAddRecExprPHILiterally - Helper for expandAddRecExprLiterally. Expand
   1082 /// the base addrec, which is the addrec without any non-loop-dominating
   1083 /// values, and return the PHI.
   1084 PHINode *
   1085 SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
   1086                                         const Loop *L,
   1087                                         Type *ExpandTy,
   1088                                         Type *IntTy,
   1089                                         Type *&TruncTy,
   1090                                         bool &InvertStep) {
   1091   assert((!IVIncInsertLoop||IVIncInsertPos) && "Uninitialized insert position");
   1092 
   1093   // Reuse a previously-inserted PHI, if present.
   1094   BasicBlock *LatchBlock = L->getLoopLatch();
   1095   if (LatchBlock) {
   1096     PHINode *AddRecPhiMatch = nullptr;
   1097     Instruction *IncV = nullptr;
   1098     TruncTy = nullptr;
   1099     InvertStep = false;
   1100 
   1101     // Only try partially matching scevs that need truncation and/or
   1102     // step-inversion if we know this loop is outside the current loop.
   1103     bool TryNonMatchingSCEV =
   1104         IVIncInsertLoop &&
   1105         SE.DT.properlyDominates(LatchBlock, IVIncInsertLoop->getHeader());
   1106 
   1107     for (auto &I : *L->getHeader()) {
   1108       auto *PN = dyn_cast<PHINode>(&I);
   1109       if (!PN || !SE.isSCEVable(PN->getType()))
   1110         continue;
   1111 
   1112       const SCEVAddRecExpr *PhiSCEV = dyn_cast<SCEVAddRecExpr>(SE.getSCEV(PN));
   1113       if (!PhiSCEV)
   1114         continue;
   1115 
   1116       bool IsMatchingSCEV = PhiSCEV == Normalized;
   1117       // We only handle truncation and inversion of phi recurrences for the
   1118       // expanded expression if the expanded expression's loop dominates the
   1119       // loop we insert to. Check now, so we can bail out early.
   1120       if (!IsMatchingSCEV && !TryNonMatchingSCEV)
   1121           continue;
   1122 
   1123       Instruction *TempIncV =
   1124           cast<Instruction>(PN->getIncomingValueForBlock(LatchBlock));
   1125 
   1126       // Check whether we can reuse this PHI node.
   1127       if (LSRMode) {
   1128         if (!isExpandedAddRecExprPHI(PN, TempIncV, L))
   1129           continue;
   1130         if (L == IVIncInsertLoop && !hoistIVInc(TempIncV, IVIncInsertPos))
   1131           continue;
   1132       } else {
   1133         if (!isNormalAddRecExprPHI(PN, TempIncV, L))
   1134           continue;
   1135       }
   1136 
   1137       // Stop if we have found an exact match SCEV.
   1138       if (IsMatchingSCEV) {
   1139         IncV = TempIncV;
   1140         TruncTy = nullptr;
   1141         InvertStep = false;
   1142         AddRecPhiMatch = PN;
   1143         break;
   1144       }
   1145 
   1146       // Try whether the phi can be translated into the requested form
   1147       // (truncated and/or offset by a constant).
   1148       if ((!TruncTy || InvertStep) &&
   1149           canBeCheaplyTransformed(SE, PhiSCEV, Normalized, InvertStep)) {
   1150         // Record the phi node. But don't stop we might find an exact match
   1151         // later.
   1152         AddRecPhiMatch = PN;
   1153         IncV = TempIncV;
   1154         TruncTy = SE.getEffectiveSCEVType(Normalized->getType());
   1155       }
   1156     }
   1157 
   1158     if (AddRecPhiMatch) {
   1159       // Potentially, move the increment. We have made sure in
   1160       // isExpandedAddRecExprPHI or hoistIVInc that this is possible.
   1161       if (L == IVIncInsertLoop)
   1162         hoistBeforePos(&SE.DT, IncV, IVIncInsertPos, AddRecPhiMatch);
   1163 
   1164       // Ok, the add recurrence looks usable.
   1165       // Remember this PHI, even in post-inc mode.
   1166       InsertedValues.insert(AddRecPhiMatch);
   1167       // Remember the increment.
   1168       rememberInstruction(IncV);
   1169       return AddRecPhiMatch;
   1170     }
   1171   }
   1172 
   1173   // Save the original insertion point so we can restore it when we're done.
   1174   SCEVInsertPointGuard Guard(Builder, this);
   1175 
   1176   // Another AddRec may need to be recursively expanded below. For example, if
   1177   // this AddRec is quadratic, the StepV may itself be an AddRec in this
   1178   // loop. Remove this loop from the PostIncLoops set before expanding such
   1179   // AddRecs. Otherwise, we cannot find a valid position for the step
   1180   // (i.e. StepV can never dominate its loop header).  Ideally, we could do
   1181   // SavedIncLoops.swap(PostIncLoops), but we generally have a single element,
   1182   // so it's not worth implementing SmallPtrSet::swap.
   1183   PostIncLoopSet SavedPostIncLoops = PostIncLoops;
   1184   PostIncLoops.clear();
   1185 
   1186   // Expand code for the start value.
   1187   Value *StartV =
   1188       expandCodeFor(Normalized->getStart(), ExpandTy, &L->getHeader()->front());
   1189 
   1190   // StartV must be hoisted into L's preheader to dominate the new phi.
   1191   assert(!isa<Instruction>(StartV) ||
   1192          SE.DT.properlyDominates(cast<Instruction>(StartV)->getParent(),
   1193                                  L->getHeader()));
   1194 
   1195   // Expand code for the step value. Do this before creating the PHI so that PHI
   1196   // reuse code doesn't see an incomplete PHI.
   1197   const SCEV *Step = Normalized->getStepRecurrence(SE);
   1198   // If the stride is negative, insert a sub instead of an add for the increment
   1199   // (unless it's a constant, because subtracts of constants are canonicalized
   1200   // to adds).
   1201   bool useSubtract = !ExpandTy->isPointerTy() && Step->isNonConstantNegative();
   1202   if (useSubtract)
   1203     Step = SE.getNegativeSCEV(Step);
   1204   // Expand the step somewhere that dominates the loop header.
   1205   Value *StepV = expandCodeFor(Step, IntTy, &L->getHeader()->front());
   1206 
   1207   // The no-wrap behavior proved by IsIncrement(NUW|NSW) is only applicable if
   1208   // we actually do emit an addition.  It does not apply if we emit a
   1209   // subtraction.
   1210   bool IncrementIsNUW = !useSubtract && IsIncrementNUW(SE, Normalized);
   1211   bool IncrementIsNSW = !useSubtract && IsIncrementNSW(SE, Normalized);
   1212 
   1213   // Create the PHI.
   1214   BasicBlock *Header = L->getHeader();
   1215   Builder.SetInsertPoint(Header, Header->begin());
   1216   pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header);
   1217   PHINode *PN = Builder.CreatePHI(ExpandTy, std::distance(HPB, HPE),
   1218                                   Twine(IVName) + ".iv");
   1219   rememberInstruction(PN);
   1220 
   1221   // Create the step instructions and populate the PHI.
   1222   for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) {
   1223     BasicBlock *Pred = *HPI;
   1224 
   1225     // Add a start value.
   1226     if (!L->contains(Pred)) {
   1227       PN->addIncoming(StartV, Pred);
   1228       continue;
   1229     }
   1230 
   1231     // Create a step value and add it to the PHI.
   1232     // If IVIncInsertLoop is non-null and equal to the addrec's loop, insert the
   1233     // instructions at IVIncInsertPos.
   1234     Instruction *InsertPos = L == IVIncInsertLoop ?
   1235       IVIncInsertPos : Pred->getTerminator();
   1236     Builder.SetInsertPoint(InsertPos);
   1237     Value *IncV = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract);
   1238 
   1239     if (isa<OverflowingBinaryOperator>(IncV)) {
   1240       if (IncrementIsNUW)
   1241         cast<BinaryOperator>(IncV)->setHasNoUnsignedWrap();
   1242       if (IncrementIsNSW)
   1243         cast<BinaryOperator>(IncV)->setHasNoSignedWrap();
   1244     }
   1245     PN->addIncoming(IncV, Pred);
   1246   }
   1247 
   1248   // After expanding subexpressions, restore the PostIncLoops set so the caller
   1249   // can ensure that IVIncrement dominates the current uses.
   1250   PostIncLoops = SavedPostIncLoops;
   1251 
   1252   // Remember this PHI, even in post-inc mode.
   1253   InsertedValues.insert(PN);
   1254 
   1255   return PN;
   1256 }
   1257 
   1258 Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
   1259   Type *STy = S->getType();
   1260   Type *IntTy = SE.getEffectiveSCEVType(STy);
   1261   const Loop *L = S->getLoop();
   1262 
   1263   // Determine a normalized form of this expression, which is the expression
   1264   // before any post-inc adjustment is made.
   1265   const SCEVAddRecExpr *Normalized = S;
   1266   if (PostIncLoops.count(L)) {
   1267     PostIncLoopSet Loops;
   1268     Loops.insert(L);
   1269     Normalized = cast<SCEVAddRecExpr>(TransformForPostIncUse(
   1270         Normalize, S, nullptr, nullptr, Loops, SE, SE.DT));
   1271   }
   1272 
   1273   // Strip off any non-loop-dominating component from the addrec start.
   1274   const SCEV *Start = Normalized->getStart();
   1275   const SCEV *PostLoopOffset = nullptr;
   1276   if (!SE.properlyDominates(Start, L->getHeader())) {
   1277     PostLoopOffset = Start;
   1278     Start = SE.getConstant(Normalized->getType(), 0);
   1279     Normalized = cast<SCEVAddRecExpr>(
   1280       SE.getAddRecExpr(Start, Normalized->getStepRecurrence(SE),
   1281                        Normalized->getLoop(),
   1282                        Normalized->getNoWrapFlags(SCEV::FlagNW)));
   1283   }
   1284 
   1285   // Strip off any non-loop-dominating component from the addrec step.
   1286   const SCEV *Step = Normalized->getStepRecurrence(SE);
   1287   const SCEV *PostLoopScale = nullptr;
   1288   if (!SE.dominates(Step, L->getHeader())) {
   1289     PostLoopScale = Step;
   1290     Step = SE.getConstant(Normalized->getType(), 1);
   1291     if (!Start->isZero()) {
   1292         // The normalization below assumes that Start is constant zero, so if
   1293         // it isn't re-associate Start to PostLoopOffset.
   1294         assert(!PostLoopOffset && "Start not-null but PostLoopOffset set?");
   1295         PostLoopOffset = Start;
   1296         Start = SE.getConstant(Normalized->getType(), 0);
   1297     }
   1298     Normalized =
   1299       cast<SCEVAddRecExpr>(SE.getAddRecExpr(
   1300                              Start, Step, Normalized->getLoop(),
   1301                              Normalized->getNoWrapFlags(SCEV::FlagNW)));
   1302   }
   1303 
   1304   // Expand the core addrec. If we need post-loop scaling, force it to
   1305   // expand to an integer type to avoid the need for additional casting.
   1306   Type *ExpandTy = PostLoopScale ? IntTy : STy;
   1307   // In some cases, we decide to reuse an existing phi node but need to truncate
   1308   // it and/or invert the step.
   1309   Type *TruncTy = nullptr;
   1310   bool InvertStep = false;
   1311   PHINode *PN = getAddRecExprPHILiterally(Normalized, L, ExpandTy, IntTy,
   1312                                           TruncTy, InvertStep);
   1313 
   1314   // Accommodate post-inc mode, if necessary.
   1315   Value *Result;
   1316   if (!PostIncLoops.count(L))
   1317     Result = PN;
   1318   else {
   1319     // In PostInc mode, use the post-incremented value.
   1320     BasicBlock *LatchBlock = L->getLoopLatch();
   1321     assert(LatchBlock && "PostInc mode requires a unique loop latch!");
   1322     Result = PN->getIncomingValueForBlock(LatchBlock);
   1323 
   1324     // For an expansion to use the postinc form, the client must call
   1325     // expandCodeFor with an InsertPoint that is either outside the PostIncLoop
   1326     // or dominated by IVIncInsertPos.
   1327     if (isa<Instruction>(Result) &&
   1328         !SE.DT.dominates(cast<Instruction>(Result),
   1329                          &*Builder.GetInsertPoint())) {
   1330       // The induction variable's postinc expansion does not dominate this use.
   1331       // IVUsers tries to prevent this case, so it is rare. However, it can
   1332       // happen when an IVUser outside the loop is not dominated by the latch
   1333       // block. Adjusting IVIncInsertPos before expansion begins cannot handle
   1334       // all cases. Consider a phi outide whose operand is replaced during
   1335       // expansion with the value of the postinc user. Without fundamentally
   1336       // changing the way postinc users are tracked, the only remedy is
   1337       // inserting an extra IV increment. StepV might fold into PostLoopOffset,
   1338       // but hopefully expandCodeFor handles that.
   1339       bool useSubtract =
   1340         !ExpandTy->isPointerTy() && Step->isNonConstantNegative();
   1341       if (useSubtract)
   1342         Step = SE.getNegativeSCEV(Step);
   1343       Value *StepV;
   1344       {
   1345         // Expand the step somewhere that dominates the loop header.
   1346         SCEVInsertPointGuard Guard(Builder, this);
   1347         StepV = expandCodeFor(Step, IntTy, &L->getHeader()->front());
   1348       }
   1349       Result = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract);
   1350     }
   1351   }
   1352 
   1353   // We have decided to reuse an induction variable of a dominating loop. Apply
   1354   // truncation and/or invertion of the step.
   1355   if (TruncTy) {
   1356     Type *ResTy = Result->getType();
   1357     // Normalize the result type.
   1358     if (ResTy != SE.getEffectiveSCEVType(ResTy))
   1359       Result = InsertNoopCastOfTo(Result, SE.getEffectiveSCEVType(ResTy));
   1360     // Truncate the result.
   1361     if (TruncTy != Result->getType()) {
   1362       Result = Builder.CreateTrunc(Result, TruncTy);
   1363       rememberInstruction(Result);
   1364     }
   1365     // Invert the result.
   1366     if (InvertStep) {
   1367       Result = Builder.CreateSub(expandCodeFor(Normalized->getStart(), TruncTy),
   1368                                  Result);
   1369       rememberInstruction(Result);
   1370     }
   1371   }
   1372 
   1373   // Re-apply any non-loop-dominating scale.
   1374   if (PostLoopScale) {
   1375     assert(S->isAffine() && "Can't linearly scale non-affine recurrences.");
   1376     Result = InsertNoopCastOfTo(Result, IntTy);
   1377     Result = Builder.CreateMul(Result,
   1378                                expandCodeFor(PostLoopScale, IntTy));
   1379     rememberInstruction(Result);
   1380   }
   1381 
   1382   // Re-apply any non-loop-dominating offset.
   1383   if (PostLoopOffset) {
   1384     if (PointerType *PTy = dyn_cast<PointerType>(ExpandTy)) {
   1385       const SCEV *const OffsetArray[1] = { PostLoopOffset };
   1386       Result = expandAddToGEP(OffsetArray, OffsetArray+1, PTy, IntTy, Result);
   1387     } else {
   1388       Result = InsertNoopCastOfTo(Result, IntTy);
   1389       Result = Builder.CreateAdd(Result,
   1390                                  expandCodeFor(PostLoopOffset, IntTy));
   1391       rememberInstruction(Result);
   1392     }
   1393   }
   1394 
   1395   return Result;
   1396 }
   1397 
   1398 Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
   1399   if (!CanonicalMode) return expandAddRecExprLiterally(S);
   1400 
   1401   Type *Ty = SE.getEffectiveSCEVType(S->getType());
   1402   const Loop *L = S->getLoop();
   1403 
   1404   // First check for an existing canonical IV in a suitable type.
   1405   PHINode *CanonicalIV = nullptr;
   1406   if (PHINode *PN = L->getCanonicalInductionVariable())
   1407     if (SE.getTypeSizeInBits(PN->getType()) >= SE.getTypeSizeInBits(Ty))
   1408       CanonicalIV = PN;
   1409 
   1410   // Rewrite an AddRec in terms of the canonical induction variable, if
   1411   // its type is more narrow.
   1412   if (CanonicalIV &&
   1413       SE.getTypeSizeInBits(CanonicalIV->getType()) >
   1414       SE.getTypeSizeInBits(Ty)) {
   1415     SmallVector<const SCEV *, 4> NewOps(S->getNumOperands());
   1416     for (unsigned i = 0, e = S->getNumOperands(); i != e; ++i)
   1417       NewOps[i] = SE.getAnyExtendExpr(S->op_begin()[i], CanonicalIV->getType());
   1418     Value *V = expand(SE.getAddRecExpr(NewOps, S->getLoop(),
   1419                                        S->getNoWrapFlags(SCEV::FlagNW)));
   1420     BasicBlock::iterator NewInsertPt =
   1421         findInsertPointAfter(cast<Instruction>(V), Builder.GetInsertBlock());
   1422     V = expandCodeFor(SE.getTruncateExpr(SE.getUnknown(V), Ty), nullptr,
   1423                       &*NewInsertPt);
   1424     return V;
   1425   }
   1426 
   1427   // {X,+,F} --> X + {0,+,F}
   1428   if (!S->getStart()->isZero()) {
   1429     SmallVector<const SCEV *, 4> NewOps(S->op_begin(), S->op_end());
   1430     NewOps[0] = SE.getConstant(Ty, 0);
   1431     const SCEV *Rest = SE.getAddRecExpr(NewOps, L,
   1432                                         S->getNoWrapFlags(SCEV::FlagNW));
   1433 
   1434     // Turn things like ptrtoint+arithmetic+inttoptr into GEP. See the
   1435     // comments on expandAddToGEP for details.
   1436     const SCEV *Base = S->getStart();
   1437     const SCEV *RestArray[1] = { Rest };
   1438     // Dig into the expression to find the pointer base for a GEP.
   1439     ExposePointerBase(Base, RestArray[0], SE);
   1440     // If we found a pointer, expand the AddRec with a GEP.
   1441     if (PointerType *PTy = dyn_cast<PointerType>(Base->getType())) {
   1442       // Make sure the Base isn't something exotic, such as a multiplied
   1443       // or divided pointer value. In those cases, the result type isn't
   1444       // actually a pointer type.
   1445       if (!isa<SCEVMulExpr>(Base) && !isa<SCEVUDivExpr>(Base)) {
   1446         Value *StartV = expand(Base);
   1447         assert(StartV->getType() == PTy && "Pointer type mismatch for GEP!");
   1448         return expandAddToGEP(RestArray, RestArray+1, PTy, Ty, StartV);
   1449       }
   1450     }
   1451 
   1452     // Just do a normal add. Pre-expand the operands to suppress folding.
   1453     //
   1454     // The LHS and RHS values are factored out of the expand call to make the
   1455     // output independent of the argument evaluation order.
   1456     const SCEV *AddExprLHS = SE.getUnknown(expand(S->getStart()));
   1457     const SCEV *AddExprRHS = SE.getUnknown(expand(Rest));
   1458     return expand(SE.getAddExpr(AddExprLHS, AddExprRHS));
   1459   }
   1460 
   1461   // If we don't yet have a canonical IV, create one.
   1462   if (!CanonicalIV) {
   1463     // Create and insert the PHI node for the induction variable in the
   1464     // specified loop.
   1465     BasicBlock *Header = L->getHeader();
   1466     pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header);
   1467     CanonicalIV = PHINode::Create(Ty, std::distance(HPB, HPE), "indvar",
   1468                                   &Header->front());
   1469     rememberInstruction(CanonicalIV);
   1470 
   1471     SmallSet<BasicBlock *, 4> PredSeen;
   1472     Constant *One = ConstantInt::get(Ty, 1);
   1473     for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) {
   1474       BasicBlock *HP = *HPI;
   1475       if (!PredSeen.insert(HP).second) {
   1476         // There must be an incoming value for each predecessor, even the
   1477         // duplicates!
   1478         CanonicalIV->addIncoming(CanonicalIV->getIncomingValueForBlock(HP), HP);
   1479         continue;
   1480       }
   1481 
   1482       if (L->contains(HP)) {
   1483         // Insert a unit add instruction right before the terminator
   1484         // corresponding to the back-edge.
   1485         Instruction *Add = BinaryOperator::CreateAdd(CanonicalIV, One,
   1486                                                      "indvar.next",
   1487                                                      HP->getTerminator());
   1488         Add->setDebugLoc(HP->getTerminator()->getDebugLoc());
   1489         rememberInstruction(Add);
   1490         CanonicalIV->addIncoming(Add, HP);
   1491       } else {
   1492         CanonicalIV->addIncoming(Constant::getNullValue(Ty), HP);
   1493       }
   1494     }
   1495   }
   1496 
   1497   // {0,+,1} --> Insert a canonical induction variable into the loop!
   1498   if (S->isAffine() && S->getOperand(1)->isOne()) {
   1499     assert(Ty == SE.getEffectiveSCEVType(CanonicalIV->getType()) &&
   1500            "IVs with types different from the canonical IV should "
   1501            "already have been handled!");
   1502     return CanonicalIV;
   1503   }
   1504 
   1505   // {0,+,F} --> {0,+,1} * F
   1506 
   1507   // If this is a simple linear addrec, emit it now as a special case.
   1508   if (S->isAffine())    // {0,+,F} --> i*F
   1509     return
   1510       expand(SE.getTruncateOrNoop(
   1511         SE.getMulExpr(SE.getUnknown(CanonicalIV),
   1512                       SE.getNoopOrAnyExtend(S->getOperand(1),
   1513                                             CanonicalIV->getType())),
   1514         Ty));
   1515 
   1516   // If this is a chain of recurrences, turn it into a closed form, using the
   1517   // folders, then expandCodeFor the closed form.  This allows the folders to
   1518   // simplify the expression without having to build a bunch of special code
   1519   // into this folder.
   1520   const SCEV *IH = SE.getUnknown(CanonicalIV);   // Get I as a "symbolic" SCEV.
   1521 
   1522   // Promote S up to the canonical IV type, if the cast is foldable.
   1523   const SCEV *NewS = S;
   1524   const SCEV *Ext = SE.getNoopOrAnyExtend(S, CanonicalIV->getType());
   1525   if (isa<SCEVAddRecExpr>(Ext))
   1526     NewS = Ext;
   1527 
   1528   const SCEV *V = cast<SCEVAddRecExpr>(NewS)->evaluateAtIteration(IH, SE);
   1529   //cerr << "Evaluated: " << *this << "\n     to: " << *V << "\n";
   1530 
   1531   // Truncate the result down to the original type, if needed.
   1532   const SCEV *T = SE.getTruncateOrNoop(V, Ty);
   1533   return expand(T);
   1534 }
   1535 
   1536 Value *SCEVExpander::visitTruncateExpr(const SCEVTruncateExpr *S) {
   1537   Type *Ty = SE.getEffectiveSCEVType(S->getType());
   1538   Value *V = expandCodeFor(S->getOperand(),
   1539                            SE.getEffectiveSCEVType(S->getOperand()->getType()));
   1540   Value *I = Builder.CreateTrunc(V, Ty);
   1541   rememberInstruction(I);
   1542   return I;
   1543 }
   1544 
   1545 Value *SCEVExpander::visitZeroExtendExpr(const SCEVZeroExtendExpr *S) {
   1546   Type *Ty = SE.getEffectiveSCEVType(S->getType());
   1547   Value *V = expandCodeFor(S->getOperand(),
   1548                            SE.getEffectiveSCEVType(S->getOperand()->getType()));
   1549   Value *I = Builder.CreateZExt(V, Ty);
   1550   rememberInstruction(I);
   1551   return I;
   1552 }
   1553 
   1554 Value *SCEVExpander::visitSignExtendExpr(const SCEVSignExtendExpr *S) {
   1555   Type *Ty = SE.getEffectiveSCEVType(S->getType());
   1556   Value *V = expandCodeFor(S->getOperand(),
   1557                            SE.getEffectiveSCEVType(S->getOperand()->getType()));
   1558   Value *I = Builder.CreateSExt(V, Ty);
   1559   rememberInstruction(I);
   1560   return I;
   1561 }
   1562 
   1563 Value *SCEVExpander::visitSMaxExpr(const SCEVSMaxExpr *S) {
   1564   Value *LHS = expand(S->getOperand(S->getNumOperands()-1));
   1565   Type *Ty = LHS->getType();
   1566   for (int i = S->getNumOperands()-2; i >= 0; --i) {
   1567     // In the case of mixed integer and pointer types, do the
   1568     // rest of the comparisons as integer.
   1569     if (S->getOperand(i)->getType() != Ty) {
   1570       Ty = SE.getEffectiveSCEVType(Ty);
   1571       LHS = InsertNoopCastOfTo(LHS, Ty);
   1572     }
   1573     Value *RHS = expandCodeFor(S->getOperand(i), Ty);
   1574     Value *ICmp = Builder.CreateICmpSGT(LHS, RHS);
   1575     rememberInstruction(ICmp);
   1576     Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "smax");
   1577     rememberInstruction(Sel);
   1578     LHS = Sel;
   1579   }
   1580   // In the case of mixed integer and pointer types, cast the
   1581   // final result back to the pointer type.
   1582   if (LHS->getType() != S->getType())
   1583     LHS = InsertNoopCastOfTo(LHS, S->getType());
   1584   return LHS;
   1585 }
   1586 
   1587 Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) {
   1588   Value *LHS = expand(S->getOperand(S->getNumOperands()-1));
   1589   Type *Ty = LHS->getType();
   1590   for (int i = S->getNumOperands()-2; i >= 0; --i) {
   1591     // In the case of mixed integer and pointer types, do the
   1592     // rest of the comparisons as integer.
   1593     if (S->getOperand(i)->getType() != Ty) {
   1594       Ty = SE.getEffectiveSCEVType(Ty);
   1595       LHS = InsertNoopCastOfTo(LHS, Ty);
   1596     }
   1597     Value *RHS = expandCodeFor(S->getOperand(i), Ty);
   1598     Value *ICmp = Builder.CreateICmpUGT(LHS, RHS);
   1599     rememberInstruction(ICmp);
   1600     Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "umax");
   1601     rememberInstruction(Sel);
   1602     LHS = Sel;
   1603   }
   1604   // In the case of mixed integer and pointer types, cast the
   1605   // final result back to the pointer type.
   1606   if (LHS->getType() != S->getType())
   1607     LHS = InsertNoopCastOfTo(LHS, S->getType());
   1608   return LHS;
   1609 }
   1610 
   1611 Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty,
   1612                                    Instruction *IP) {
   1613   assert(IP);
   1614   Builder.SetInsertPoint(IP);
   1615   return expandCodeFor(SH, Ty);
   1616 }
   1617 
   1618 Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty) {
   1619   // Expand the code for this SCEV.
   1620   Value *V = expand(SH);
   1621   if (Ty) {
   1622     assert(SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(SH->getType()) &&
   1623            "non-trivial casts should be done with the SCEVs directly!");
   1624     V = InsertNoopCastOfTo(V, Ty);
   1625   }
   1626   return V;
   1627 }
   1628 
   1629 Value *SCEVExpander::FindValueInExprValueMap(const SCEV *S,
   1630                                              const Instruction *InsertPt) {
   1631   SetVector<Value *> *Set = SE.getSCEVValues(S);
   1632   // If the expansion is not in CanonicalMode, and the SCEV contains any
   1633   // sub scAddRecExpr type SCEV, it is required to expand the SCEV literally.
   1634   if (CanonicalMode || !SE.containsAddRecurrence(S)) {
   1635     // If S is scConstant, it may be worse to reuse an existing Value.
   1636     if (S->getSCEVType() != scConstant && Set) {
   1637       // Choose a Value from the set which dominates the insertPt.
   1638       // insertPt should be inside the Value's parent loop so as not to break
   1639       // the LCSSA form.
   1640       for (auto const &Ent : *Set) {
   1641         Instruction *EntInst = nullptr;
   1642         if (Ent && isa<Instruction>(Ent) &&
   1643             (EntInst = cast<Instruction>(Ent)) &&
   1644             S->getType() == Ent->getType() &&
   1645             EntInst->getFunction() == InsertPt->getFunction() &&
   1646             SE.DT.dominates(EntInst, InsertPt) &&
   1647             (SE.LI.getLoopFor(EntInst->getParent()) == nullptr ||
   1648              SE.LI.getLoopFor(EntInst->getParent())->contains(InsertPt))) {
   1649           return Ent;
   1650         }
   1651       }
   1652     }
   1653   }
   1654   return nullptr;
   1655 }
   1656 
   1657 // The expansion of SCEV will either reuse a previous Value in ExprValueMap,
   1658 // or expand the SCEV literally. Specifically, if the expansion is in LSRMode,
   1659 // and the SCEV contains any sub scAddRecExpr type SCEV, it will be expanded
   1660 // literally, to prevent LSR's transformed SCEV from being reverted. Otherwise,
   1661 // the expansion will try to reuse Value from ExprValueMap, and only when it
   1662 // fails, expand the SCEV literally.
   1663 Value *SCEVExpander::expand(const SCEV *S) {
   1664   // Compute an insertion point for this SCEV object. Hoist the instructions
   1665   // as far out in the loop nest as possible.
   1666   Instruction *InsertPt = &*Builder.GetInsertPoint();
   1667   for (Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock());;
   1668        L = L->getParentLoop())
   1669     if (SE.isLoopInvariant(S, L)) {
   1670       if (!L) break;
   1671       if (BasicBlock *Preheader = L->getLoopPreheader())
   1672         InsertPt = Preheader->getTerminator();
   1673       else {
   1674         // LSR sets the insertion point for AddRec start/step values to the
   1675         // block start to simplify value reuse, even though it's an invalid
   1676         // position. SCEVExpander must correct for this in all cases.
   1677         InsertPt = &*L->getHeader()->getFirstInsertionPt();
   1678       }
   1679     } else {
   1680       // If the SCEV is computable at this level, insert it into the header
   1681       // after the PHIs (and after any other instructions that we've inserted
   1682       // there) so that it is guaranteed to dominate any user inside the loop.
   1683       if (L && SE.hasComputableLoopEvolution(S, L) && !PostIncLoops.count(L))
   1684         InsertPt = &*L->getHeader()->getFirstInsertionPt();
   1685       while (InsertPt->getIterator() != Builder.GetInsertPoint() &&
   1686              (isInsertedInstruction(InsertPt) ||
   1687               isa<DbgInfoIntrinsic>(InsertPt))) {
   1688         InsertPt = &*std::next(InsertPt->getIterator());
   1689       }
   1690       break;
   1691     }
   1692 
   1693   // Check to see if we already expanded this here.
   1694   auto I = InsertedExpressions.find(std::make_pair(S, InsertPt));
   1695   if (I != InsertedExpressions.end())
   1696     return I->second;
   1697 
   1698   SCEVInsertPointGuard Guard(Builder, this);
   1699   Builder.SetInsertPoint(InsertPt);
   1700 
   1701   // Expand the expression into instructions.
   1702   Value *V = FindValueInExprValueMap(S, InsertPt);
   1703 
   1704   if (!V)
   1705     V = visit(S);
   1706 
   1707   // Remember the expanded value for this SCEV at this location.
   1708   //
   1709   // This is independent of PostIncLoops. The mapped value simply materializes
   1710   // the expression at this insertion point. If the mapped value happened to be
   1711   // a postinc expansion, it could be reused by a non-postinc user, but only if
   1712   // its insertion point was already at the head of the loop.
   1713   InsertedExpressions[std::make_pair(S, InsertPt)] = V;
   1714   return V;
   1715 }
   1716 
   1717 void SCEVExpander::rememberInstruction(Value *I) {
   1718   if (!PostIncLoops.empty())
   1719     InsertedPostIncValues.insert(I);
   1720   else
   1721     InsertedValues.insert(I);
   1722 }
   1723 
   1724 /// getOrInsertCanonicalInductionVariable - This method returns the
   1725 /// canonical induction variable of the specified type for the specified
   1726 /// loop (inserting one if there is none).  A canonical induction variable
   1727 /// starts at zero and steps by one on each iteration.
   1728 PHINode *
   1729 SCEVExpander::getOrInsertCanonicalInductionVariable(const Loop *L,
   1730                                                     Type *Ty) {
   1731   assert(Ty->isIntegerTy() && "Can only insert integer induction variables!");
   1732 
   1733   // Build a SCEV for {0,+,1}<L>.
   1734   // Conservatively use FlagAnyWrap for now.
   1735   const SCEV *H = SE.getAddRecExpr(SE.getConstant(Ty, 0),
   1736                                    SE.getConstant(Ty, 1), L, SCEV::FlagAnyWrap);
   1737 
   1738   // Emit code for it.
   1739   SCEVInsertPointGuard Guard(Builder, this);
   1740   PHINode *V =
   1741       cast<PHINode>(expandCodeFor(H, nullptr, &L->getHeader()->front()));
   1742 
   1743   return V;
   1744 }
   1745 
   1746 /// replaceCongruentIVs - Check for congruent phis in this loop header and
   1747 /// replace them with their most canonical representative. Return the number of
   1748 /// phis eliminated.
   1749 ///
   1750 /// This does not depend on any SCEVExpander state but should be used in
   1751 /// the same context that SCEVExpander is used.
   1752 unsigned SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT,
   1753                                            SmallVectorImpl<WeakVH> &DeadInsts,
   1754                                            const TargetTransformInfo *TTI) {
   1755   // Find integer phis in order of increasing width.
   1756   SmallVector<PHINode*, 8> Phis;
   1757   for (auto &I : *L->getHeader()) {
   1758     if (auto *PN = dyn_cast<PHINode>(&I))
   1759       Phis.push_back(PN);
   1760     else
   1761       break;
   1762   }
   1763 
   1764   if (TTI)
   1765     std::sort(Phis.begin(), Phis.end(), [](Value *LHS, Value *RHS) {
   1766       // Put pointers at the back and make sure pointer < pointer = false.
   1767       if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
   1768         return RHS->getType()->isIntegerTy() && !LHS->getType()->isIntegerTy();
   1769       return RHS->getType()->getPrimitiveSizeInBits() <
   1770              LHS->getType()->getPrimitiveSizeInBits();
   1771     });
   1772 
   1773   unsigned NumElim = 0;
   1774   DenseMap<const SCEV *, PHINode *> ExprToIVMap;
   1775   // Process phis from wide to narrow. Map wide phis to their truncation
   1776   // so narrow phis can reuse them.
   1777   for (PHINode *Phi : Phis) {
   1778     auto SimplifyPHINode = [&](PHINode *PN) -> Value * {
   1779       if (Value *V = SimplifyInstruction(PN, DL, &SE.TLI, &SE.DT, &SE.AC))
   1780         return V;
   1781       if (!SE.isSCEVable(PN->getType()))
   1782         return nullptr;
   1783       auto *Const = dyn_cast<SCEVConstant>(SE.getSCEV(PN));
   1784       if (!Const)
   1785         return nullptr;
   1786       return Const->getValue();
   1787     };
   1788 
   1789     // Fold constant phis. They may be congruent to other constant phis and
   1790     // would confuse the logic below that expects proper IVs.
   1791     if (Value *V = SimplifyPHINode(Phi)) {
   1792       if (V->getType() != Phi->getType())
   1793         continue;
   1794       Phi->replaceAllUsesWith(V);
   1795       DeadInsts.emplace_back(Phi);
   1796       ++NumElim;
   1797       DEBUG_WITH_TYPE(DebugType, dbgs()
   1798                       << "INDVARS: Eliminated constant iv: " << *Phi << '\n');
   1799       continue;
   1800     }
   1801 
   1802     if (!SE.isSCEVable(Phi->getType()))
   1803       continue;
   1804 
   1805     PHINode *&OrigPhiRef = ExprToIVMap[SE.getSCEV(Phi)];
   1806     if (!OrigPhiRef) {
   1807       OrigPhiRef = Phi;
   1808       if (Phi->getType()->isIntegerTy() && TTI &&
   1809           TTI->isTruncateFree(Phi->getType(), Phis.back()->getType())) {
   1810         // This phi can be freely truncated to the narrowest phi type. Map the
   1811         // truncated expression to it so it will be reused for narrow types.
   1812         const SCEV *TruncExpr =
   1813           SE.getTruncateExpr(SE.getSCEV(Phi), Phis.back()->getType());
   1814         ExprToIVMap[TruncExpr] = Phi;
   1815       }
   1816       continue;
   1817     }
   1818 
   1819     // Replacing a pointer phi with an integer phi or vice-versa doesn't make
   1820     // sense.
   1821     if (OrigPhiRef->getType()->isPointerTy() != Phi->getType()->isPointerTy())
   1822       continue;
   1823 
   1824     if (BasicBlock *LatchBlock = L->getLoopLatch()) {
   1825       Instruction *OrigInc = dyn_cast<Instruction>(
   1826           OrigPhiRef->getIncomingValueForBlock(LatchBlock));
   1827       Instruction *IsomorphicInc =
   1828           dyn_cast<Instruction>(Phi->getIncomingValueForBlock(LatchBlock));
   1829 
   1830       if (OrigInc && IsomorphicInc) {
   1831         // If this phi has the same width but is more canonical, replace the
   1832         // original with it. As part of the "more canonical" determination,
   1833         // respect a prior decision to use an IV chain.
   1834         if (OrigPhiRef->getType() == Phi->getType() &&
   1835             !(ChainedPhis.count(Phi) ||
   1836               isExpandedAddRecExprPHI(OrigPhiRef, OrigInc, L)) &&
   1837             (ChainedPhis.count(Phi) ||
   1838              isExpandedAddRecExprPHI(Phi, IsomorphicInc, L))) {
   1839           std::swap(OrigPhiRef, Phi);
   1840           std::swap(OrigInc, IsomorphicInc);
   1841         }
   1842         // Replacing the congruent phi is sufficient because acyclic
   1843         // redundancy elimination, CSE/GVN, should handle the
   1844         // rest. However, once SCEV proves that a phi is congruent,
   1845         // it's often the head of an IV user cycle that is isomorphic
   1846         // with the original phi. It's worth eagerly cleaning up the
   1847         // common case of a single IV increment so that DeleteDeadPHIs
   1848         // can remove cycles that had postinc uses.
   1849         const SCEV *TruncExpr =
   1850             SE.getTruncateOrNoop(SE.getSCEV(OrigInc), IsomorphicInc->getType());
   1851         if (OrigInc != IsomorphicInc &&
   1852             TruncExpr == SE.getSCEV(IsomorphicInc) &&
   1853             SE.LI.replacementPreservesLCSSAForm(IsomorphicInc, OrigInc) &&
   1854             hoistIVInc(OrigInc, IsomorphicInc)) {
   1855           DEBUG_WITH_TYPE(DebugType,
   1856                           dbgs() << "INDVARS: Eliminated congruent iv.inc: "
   1857                                  << *IsomorphicInc << '\n');
   1858           Value *NewInc = OrigInc;
   1859           if (OrigInc->getType() != IsomorphicInc->getType()) {
   1860             Instruction *IP = nullptr;
   1861             if (PHINode *PN = dyn_cast<PHINode>(OrigInc))
   1862               IP = &*PN->getParent()->getFirstInsertionPt();
   1863             else
   1864               IP = OrigInc->getNextNode();
   1865 
   1866             IRBuilder<> Builder(IP);
   1867             Builder.SetCurrentDebugLocation(IsomorphicInc->getDebugLoc());
   1868             NewInc = Builder.CreateTruncOrBitCast(
   1869                 OrigInc, IsomorphicInc->getType(), IVName);
   1870           }
   1871           IsomorphicInc->replaceAllUsesWith(NewInc);
   1872           DeadInsts.emplace_back(IsomorphicInc);
   1873         }
   1874       }
   1875     }
   1876     DEBUG_WITH_TYPE(DebugType, dbgs() << "INDVARS: Eliminated congruent iv: "
   1877                                       << *Phi << '\n');
   1878     ++NumElim;
   1879     Value *NewIV = OrigPhiRef;
   1880     if (OrigPhiRef->getType() != Phi->getType()) {
   1881       IRBuilder<> Builder(&*L->getHeader()->getFirstInsertionPt());
   1882       Builder.SetCurrentDebugLocation(Phi->getDebugLoc());
   1883       NewIV = Builder.CreateTruncOrBitCast(OrigPhiRef, Phi->getType(), IVName);
   1884     }
   1885     Phi->replaceAllUsesWith(NewIV);
   1886     DeadInsts.emplace_back(Phi);
   1887   }
   1888   return NumElim;
   1889 }
   1890 
   1891 Value *SCEVExpander::findExistingExpansion(const SCEV *S,
   1892                                            const Instruction *At, Loop *L) {
   1893   using namespace llvm::PatternMatch;
   1894 
   1895   SmallVector<BasicBlock *, 4> ExitingBlocks;
   1896   L->getExitingBlocks(ExitingBlocks);
   1897 
   1898   // Look for suitable value in simple conditions at the loop exits.
   1899   for (BasicBlock *BB : ExitingBlocks) {
   1900     ICmpInst::Predicate Pred;
   1901     Instruction *LHS, *RHS;
   1902     BasicBlock *TrueBB, *FalseBB;
   1903 
   1904     if (!match(BB->getTerminator(),
   1905                m_Br(m_ICmp(Pred, m_Instruction(LHS), m_Instruction(RHS)),
   1906                     TrueBB, FalseBB)))
   1907       continue;
   1908 
   1909     if (SE.getSCEV(LHS) == S && SE.DT.dominates(LHS, At))
   1910       return LHS;
   1911 
   1912     if (SE.getSCEV(RHS) == S && SE.DT.dominates(RHS, At))
   1913       return RHS;
   1914   }
   1915 
   1916   // Use expand's logic which is used for reusing a previous Value in
   1917   // ExprValueMap.
   1918   if (Value *Val = FindValueInExprValueMap(S, At))
   1919     return Val;
   1920 
   1921   // There is potential to make this significantly smarter, but this simple
   1922   // heuristic already gets some interesting cases.
   1923 
   1924   // Can not find suitable value.
   1925   return nullptr;
   1926 }
   1927 
   1928 bool SCEVExpander::isHighCostExpansionHelper(
   1929     const SCEV *S, Loop *L, const Instruction *At,
   1930     SmallPtrSetImpl<const SCEV *> &Processed) {
   1931 
   1932   // If we can find an existing value for this scev avaliable at the point "At"
   1933   // then consider the expression cheap.
   1934   if (At && findExistingExpansion(S, At, L) != nullptr)
   1935     return false;
   1936 
   1937   // Zero/One operand expressions
   1938   switch (S->getSCEVType()) {
   1939   case scUnknown:
   1940   case scConstant:
   1941     return false;
   1942   case scTruncate:
   1943     return isHighCostExpansionHelper(cast<SCEVTruncateExpr>(S)->getOperand(),
   1944                                      L, At, Processed);
   1945   case scZeroExtend:
   1946     return isHighCostExpansionHelper(cast<SCEVZeroExtendExpr>(S)->getOperand(),
   1947                                      L, At, Processed);
   1948   case scSignExtend:
   1949     return isHighCostExpansionHelper(cast<SCEVSignExtendExpr>(S)->getOperand(),
   1950                                      L, At, Processed);
   1951   }
   1952 
   1953   if (!Processed.insert(S).second)
   1954     return false;
   1955 
   1956   if (auto *UDivExpr = dyn_cast<SCEVUDivExpr>(S)) {
   1957     // If the divisor is a power of two and the SCEV type fits in a native
   1958     // integer, consider the division cheap irrespective of whether it occurs in
   1959     // the user code since it can be lowered into a right shift.
   1960     if (auto *SC = dyn_cast<SCEVConstant>(UDivExpr->getRHS()))
   1961       if (SC->getAPInt().isPowerOf2()) {
   1962         const DataLayout &DL =
   1963             L->getHeader()->getParent()->getParent()->getDataLayout();
   1964         unsigned Width = cast<IntegerType>(UDivExpr->getType())->getBitWidth();
   1965         return DL.isIllegalInteger(Width);
   1966       }
   1967 
   1968     // UDivExpr is very likely a UDiv that ScalarEvolution's HowFarToZero or
   1969     // HowManyLessThans produced to compute a precise expression, rather than a
   1970     // UDiv from the user's code. If we can't find a UDiv in the code with some
   1971     // simple searching, assume the former consider UDivExpr expensive to
   1972     // compute.
   1973     BasicBlock *ExitingBB = L->getExitingBlock();
   1974     if (!ExitingBB)
   1975       return true;
   1976 
   1977     // At the beginning of this function we already tried to find existing value
   1978     // for plain 'S'. Now try to lookup 'S + 1' since it is common pattern
   1979     // involving division. This is just a simple search heuristic.
   1980     if (!At)
   1981       At = &ExitingBB->back();
   1982     if (!findExistingExpansion(
   1983             SE.getAddExpr(S, SE.getConstant(S->getType(), 1)), At, L))
   1984       return true;
   1985   }
   1986 
   1987   // HowManyLessThans uses a Max expression whenever the loop is not guarded by
   1988   // the exit condition.
   1989   if (isa<SCEVSMaxExpr>(S) || isa<SCEVUMaxExpr>(S))
   1990     return true;
   1991 
   1992   // Recurse past nary expressions, which commonly occur in the
   1993   // BackedgeTakenCount. They may already exist in program code, and if not,
   1994   // they are not too expensive rematerialize.
   1995   if (const SCEVNAryExpr *NAry = dyn_cast<SCEVNAryExpr>(S)) {
   1996     for (auto *Op : NAry->operands())
   1997       if (isHighCostExpansionHelper(Op, L, At, Processed))
   1998         return true;
   1999   }
   2000 
   2001   // If we haven't recognized an expensive SCEV pattern, assume it's an
   2002   // expression produced by program code.
   2003   return false;
   2004 }
   2005 
   2006 Value *SCEVExpander::expandCodeForPredicate(const SCEVPredicate *Pred,
   2007                                             Instruction *IP) {
   2008   assert(IP);
   2009   switch (Pred->getKind()) {
   2010   case SCEVPredicate::P_Union:
   2011     return expandUnionPredicate(cast<SCEVUnionPredicate>(Pred), IP);
   2012   case SCEVPredicate::P_Equal:
   2013     return expandEqualPredicate(cast<SCEVEqualPredicate>(Pred), IP);
   2014   case SCEVPredicate::P_Wrap: {
   2015     auto *AddRecPred = cast<SCEVWrapPredicate>(Pred);
   2016     return expandWrapPredicate(AddRecPred, IP);
   2017   }
   2018   }
   2019   llvm_unreachable("Unknown SCEV predicate type");
   2020 }
   2021 
   2022 Value *SCEVExpander::expandEqualPredicate(const SCEVEqualPredicate *Pred,
   2023                                           Instruction *IP) {
   2024   Value *Expr0 = expandCodeFor(Pred->getLHS(), Pred->getLHS()->getType(), IP);
   2025   Value *Expr1 = expandCodeFor(Pred->getRHS(), Pred->getRHS()->getType(), IP);
   2026 
   2027   Builder.SetInsertPoint(IP);
   2028   auto *I = Builder.CreateICmpNE(Expr0, Expr1, "ident.check");
   2029   return I;
   2030 }
   2031 
   2032 Value *SCEVExpander::generateOverflowCheck(const SCEVAddRecExpr *AR,
   2033                                            Instruction *Loc, bool Signed) {
   2034   assert(AR->isAffine() && "Cannot generate RT check for "
   2035                            "non-affine expression");
   2036 
   2037   SCEVUnionPredicate Pred;
   2038   const SCEV *ExitCount =
   2039       SE.getPredicatedBackedgeTakenCount(AR->getLoop(), Pred);
   2040 
   2041   assert(ExitCount != SE.getCouldNotCompute() && "Invalid loop count");
   2042 
   2043   const SCEV *Step = AR->getStepRecurrence(SE);
   2044   const SCEV *Start = AR->getStart();
   2045 
   2046   unsigned SrcBits = SE.getTypeSizeInBits(ExitCount->getType());
   2047   unsigned DstBits = SE.getTypeSizeInBits(AR->getType());
   2048 
   2049   // The expression {Start,+,Step} has nusw/nssw if
   2050   //   Step < 0, Start - |Step| * Backedge <= Start
   2051   //   Step >= 0, Start + |Step| * Backedge > Start
   2052   // and |Step| * Backedge doesn't unsigned overflow.
   2053 
   2054   IntegerType *CountTy = IntegerType::get(Loc->getContext(), SrcBits);
   2055   Builder.SetInsertPoint(Loc);
   2056   Value *TripCountVal = expandCodeFor(ExitCount, CountTy, Loc);
   2057 
   2058   IntegerType *Ty =
   2059       IntegerType::get(Loc->getContext(), SE.getTypeSizeInBits(AR->getType()));
   2060 
   2061   Value *StepValue = expandCodeFor(Step, Ty, Loc);
   2062   Value *NegStepValue = expandCodeFor(SE.getNegativeSCEV(Step), Ty, Loc);
   2063   Value *StartValue = expandCodeFor(Start, Ty, Loc);
   2064 
   2065   ConstantInt *Zero =
   2066       ConstantInt::get(Loc->getContext(), APInt::getNullValue(DstBits));
   2067 
   2068   Builder.SetInsertPoint(Loc);
   2069   // Compute |Step|
   2070   Value *StepCompare = Builder.CreateICmp(ICmpInst::ICMP_SLT, StepValue, Zero);
   2071   Value *AbsStep = Builder.CreateSelect(StepCompare, NegStepValue, StepValue);
   2072 
   2073   // Get the backedge taken count and truncate or extended to the AR type.
   2074   Value *TruncTripCount = Builder.CreateZExtOrTrunc(TripCountVal, Ty);
   2075   auto *MulF = Intrinsic::getDeclaration(Loc->getModule(),
   2076                                          Intrinsic::umul_with_overflow, Ty);
   2077 
   2078   // Compute |Step| * Backedge
   2079   CallInst *Mul = Builder.CreateCall(MulF, {AbsStep, TruncTripCount}, "mul");
   2080   Value *MulV = Builder.CreateExtractValue(Mul, 0, "mul.result");
   2081   Value *OfMul = Builder.CreateExtractValue(Mul, 1, "mul.overflow");
   2082 
   2083   // Compute:
   2084   //   Start + |Step| * Backedge < Start
   2085   //   Start - |Step| * Backedge > Start
   2086   Value *Add = Builder.CreateAdd(StartValue, MulV);
   2087   Value *Sub = Builder.CreateSub(StartValue, MulV);
   2088 
   2089   Value *EndCompareGT = Builder.CreateICmp(
   2090       Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT, Sub, StartValue);
   2091 
   2092   Value *EndCompareLT = Builder.CreateICmp(
   2093       Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT, Add, StartValue);
   2094 
   2095   // Select the answer based on the sign of Step.
   2096   Value *EndCheck =
   2097       Builder.CreateSelect(StepCompare, EndCompareGT, EndCompareLT);
   2098 
   2099   // If the backedge taken count type is larger than the AR type,
   2100   // check that we don't drop any bits by truncating it. If we are
   2101   // droping bits, then we have overflow (unless the step is zero).
   2102   if (SE.getTypeSizeInBits(CountTy) > SE.getTypeSizeInBits(Ty)) {
   2103     auto MaxVal = APInt::getMaxValue(DstBits).zext(SrcBits);
   2104     auto *BackedgeCheck =
   2105         Builder.CreateICmp(ICmpInst::ICMP_UGT, TripCountVal,
   2106                            ConstantInt::get(Loc->getContext(), MaxVal));
   2107     BackedgeCheck = Builder.CreateAnd(
   2108         BackedgeCheck, Builder.CreateICmp(ICmpInst::ICMP_NE, StepValue, Zero));
   2109 
   2110     EndCheck = Builder.CreateOr(EndCheck, BackedgeCheck);
   2111   }
   2112 
   2113   EndCheck = Builder.CreateOr(EndCheck, OfMul);
   2114   return EndCheck;
   2115 }
   2116 
   2117 Value *SCEVExpander::expandWrapPredicate(const SCEVWrapPredicate *Pred,
   2118                                          Instruction *IP) {
   2119   const auto *A = cast<SCEVAddRecExpr>(Pred->getExpr());
   2120   Value *NSSWCheck = nullptr, *NUSWCheck = nullptr;
   2121 
   2122   // Add a check for NUSW
   2123   if (Pred->getFlags() & SCEVWrapPredicate::IncrementNUSW)
   2124     NUSWCheck = generateOverflowCheck(A, IP, false);
   2125 
   2126   // Add a check for NSSW
   2127   if (Pred->getFlags() & SCEVWrapPredicate::IncrementNSSW)
   2128     NSSWCheck = generateOverflowCheck(A, IP, true);
   2129 
   2130   if (NUSWCheck && NSSWCheck)
   2131     return Builder.CreateOr(NUSWCheck, NSSWCheck);
   2132 
   2133   if (NUSWCheck)
   2134     return NUSWCheck;
   2135 
   2136   if (NSSWCheck)
   2137     return NSSWCheck;
   2138 
   2139   return ConstantInt::getFalse(IP->getContext());
   2140 }
   2141 
   2142 Value *SCEVExpander::expandUnionPredicate(const SCEVUnionPredicate *Union,
   2143                                           Instruction *IP) {
   2144   auto *BoolType = IntegerType::get(IP->getContext(), 1);
   2145   Value *Check = ConstantInt::getNullValue(BoolType);
   2146 
   2147   // Loop over all checks in this set.
   2148   for (auto Pred : Union->getPredicates()) {
   2149     auto *NextCheck = expandCodeForPredicate(Pred, IP);
   2150     Builder.SetInsertPoint(IP);
   2151     Check = Builder.CreateOr(Check, NextCheck);
   2152   }
   2153 
   2154   return Check;
   2155 }
   2156 
   2157 namespace {
   2158 // Search for a SCEV subexpression that is not safe to expand.  Any expression
   2159 // that may expand to a !isSafeToSpeculativelyExecute value is unsafe, namely
   2160 // UDiv expressions. We don't know if the UDiv is derived from an IR divide
   2161 // instruction, but the important thing is that we prove the denominator is
   2162 // nonzero before expansion.
   2163 //
   2164 // IVUsers already checks that IV-derived expressions are safe. So this check is
   2165 // only needed when the expression includes some subexpression that is not IV
   2166 // derived.
   2167 //
   2168 // Currently, we only allow division by a nonzero constant here. If this is
   2169 // inadequate, we could easily allow division by SCEVUnknown by using
   2170 // ValueTracking to check isKnownNonZero().
   2171 //
   2172 // We cannot generally expand recurrences unless the step dominates the loop
   2173 // header. The expander handles the special case of affine recurrences by
   2174 // scaling the recurrence outside the loop, but this technique isn't generally
   2175 // applicable. Expanding a nested recurrence outside a loop requires computing
   2176 // binomial coefficients. This could be done, but the recurrence has to be in a
   2177 // perfectly reduced form, which can't be guaranteed.
   2178 struct SCEVFindUnsafe {
   2179   ScalarEvolution &SE;
   2180   bool IsUnsafe;
   2181 
   2182   SCEVFindUnsafe(ScalarEvolution &se): SE(se), IsUnsafe(false) {}
   2183 
   2184   bool follow(const SCEV *S) {
   2185     if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) {
   2186       const SCEVConstant *SC = dyn_cast<SCEVConstant>(D->getRHS());
   2187       if (!SC || SC->getValue()->isZero()) {
   2188         IsUnsafe = true;
   2189         return false;
   2190       }
   2191     }
   2192     if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
   2193       const SCEV *Step = AR->getStepRecurrence(SE);
   2194       if (!AR->isAffine() && !SE.dominates(Step, AR->getLoop()->getHeader())) {
   2195         IsUnsafe = true;
   2196         return false;
   2197       }
   2198     }
   2199     return true;
   2200   }
   2201   bool isDone() const { return IsUnsafe; }
   2202 };
   2203 }
   2204 
   2205 namespace llvm {
   2206 bool isSafeToExpand(const SCEV *S, ScalarEvolution &SE) {
   2207   SCEVFindUnsafe Search(SE);
   2208   visitAll(S, Search);
   2209   return !Search.IsUnsafe;
   2210 }
   2211 }
   2212