Home | History | Annotate | Download | only in Analysis
      1 //===- ScalarEvolutionExpander.cpp - Scalar Evolution Analysis --*- C++ -*-===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file contains the implementation of the scalar evolution expander,
     11 // which is used to generate the code corresponding to a given scalar evolution
     12 // expression.
     13 //
     14 //===----------------------------------------------------------------------===//
     15 
     16 #include "llvm/Analysis/ScalarEvolutionExpander.h"
     17 #include "llvm/ADT/STLExtras.h"
     18 #include "llvm/ADT/SmallSet.h"
     19 #include "llvm/Analysis/InstructionSimplify.h"
     20 #include "llvm/Analysis/LoopInfo.h"
     21 #include "llvm/Analysis/TargetTransformInfo.h"
     22 #include "llvm/IR/DataLayout.h"
     23 #include "llvm/IR/Dominators.h"
     24 #include "llvm/IR/IntrinsicInst.h"
     25 #include "llvm/IR/LLVMContext.h"
     26 #include "llvm/IR/Module.h"
     27 #include "llvm/IR/PatternMatch.h"
     28 #include "llvm/Support/Debug.h"
     29 #include "llvm/Support/raw_ostream.h"
     30 
     31 using namespace llvm;
     32 using namespace PatternMatch;
     33 
     34 /// ReuseOrCreateCast - Arrange for there to be a cast of V to Ty at IP,
     35 /// reusing an existing cast if a suitable one exists, moving an existing
     36 /// cast if a suitable one exists but isn't in the right place, or
     37 /// creating a new one.
     38 Value *SCEVExpander::ReuseOrCreateCast(Value *V, Type *Ty,
     39                                        Instruction::CastOps Op,
     40                                        BasicBlock::iterator IP) {
     41   // This function must be called with the builder having a valid insertion
     42   // point. It doesn't need to be the actual IP where the uses of the returned
     43   // cast will be added, but it must dominate such IP.
     44   // We use this precondition to produce a cast that will dominate all its
     45   // uses. In particular, this is crucial for the case where the builder's
     46   // insertion point *is* the point where we were asked to put the cast.
     47   // Since we don't know the builder's insertion point is actually
     48   // where the uses will be added (only that it dominates it), we are
     49   // not allowed to move it.
     50   BasicBlock::iterator BIP = Builder.GetInsertPoint();
     51 
     52   Instruction *Ret = nullptr;
     53 
     54   // Check to see if there is already a cast!
     55   for (User *U : V->users())
     56     if (U->getType() == Ty)
     57       if (CastInst *CI = dyn_cast<CastInst>(U))
     58         if (CI->getOpcode() == Op) {
     59           // If the cast isn't where we want it, create a new cast at IP.
     60           // Likewise, do not reuse a cast at BIP because it must dominate
     61           // instructions that might be inserted before BIP.
     62           if (BasicBlock::iterator(CI) != IP || BIP == IP) {
     63             // Create a new cast, and leave the old cast in place in case
     64             // it is being used as an insert point. Clear its operand
     65             // so that it doesn't hold anything live.
     66             Ret = CastInst::Create(Op, V, Ty, "", &*IP);
     67             Ret->takeName(CI);
     68             CI->replaceAllUsesWith(Ret);
     69             CI->setOperand(0, UndefValue::get(V->getType()));
     70             break;
     71           }
     72           Ret = CI;
     73           break;
     74         }
     75 
     76   // Create a new cast.
     77   if (!Ret)
     78     Ret = CastInst::Create(Op, V, Ty, V->getName(), &*IP);
     79 
     80   // We assert at the end of the function since IP might point to an
     81   // instruction with different dominance properties than a cast
     82   // (an invoke for example) and not dominate BIP (but the cast does).
     83   assert(SE.DT.dominates(Ret, &*BIP));
     84 
     85   rememberInstruction(Ret);
     86   return Ret;
     87 }
     88 
     89 static BasicBlock::iterator findInsertPointAfter(Instruction *I,
     90                                                  BasicBlock *MustDominate) {
     91   BasicBlock::iterator IP = ++I->getIterator();
     92   if (auto *II = dyn_cast<InvokeInst>(I))
     93     IP = II->getNormalDest()->begin();
     94 
     95   while (isa<PHINode>(IP))
     96     ++IP;
     97 
     98   while (IP->isEHPad()) {
     99     if (isa<FuncletPadInst>(IP) || isa<LandingPadInst>(IP)) {
    100       ++IP;
    101     } else if (isa<CatchSwitchInst>(IP)) {
    102       IP = MustDominate->getFirstInsertionPt();
    103     } else {
    104       llvm_unreachable("unexpected eh pad!");
    105     }
    106   }
    107 
    108   return IP;
    109 }
    110 
    111 /// InsertNoopCastOfTo - Insert a cast of V to the specified type,
    112 /// which must be possible with a noop cast, doing what we can to share
    113 /// the casts.
    114 Value *SCEVExpander::InsertNoopCastOfTo(Value *V, Type *Ty) {
    115   Instruction::CastOps Op = CastInst::getCastOpcode(V, false, Ty, false);
    116   assert((Op == Instruction::BitCast ||
    117           Op == Instruction::PtrToInt ||
    118           Op == Instruction::IntToPtr) &&
    119          "InsertNoopCastOfTo cannot perform non-noop casts!");
    120   assert(SE.getTypeSizeInBits(V->getType()) == SE.getTypeSizeInBits(Ty) &&
    121          "InsertNoopCastOfTo cannot change sizes!");
    122 
    123   // Short-circuit unnecessary bitcasts.
    124   if (Op == Instruction::BitCast) {
    125     if (V->getType() == Ty)
    126       return V;
    127     if (CastInst *CI = dyn_cast<CastInst>(V)) {
    128       if (CI->getOperand(0)->getType() == Ty)
    129         return CI->getOperand(0);
    130     }
    131   }
    132   // Short-circuit unnecessary inttoptr<->ptrtoint casts.
    133   if ((Op == Instruction::PtrToInt || Op == Instruction::IntToPtr) &&
    134       SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(V->getType())) {
    135     if (CastInst *CI = dyn_cast<CastInst>(V))
    136       if ((CI->getOpcode() == Instruction::PtrToInt ||
    137            CI->getOpcode() == Instruction::IntToPtr) &&
    138           SE.getTypeSizeInBits(CI->getType()) ==
    139           SE.getTypeSizeInBits(CI->getOperand(0)->getType()))
    140         return CI->getOperand(0);
    141     if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
    142       if ((CE->getOpcode() == Instruction::PtrToInt ||
    143            CE->getOpcode() == Instruction::IntToPtr) &&
    144           SE.getTypeSizeInBits(CE->getType()) ==
    145           SE.getTypeSizeInBits(CE->getOperand(0)->getType()))
    146         return CE->getOperand(0);
    147   }
    148 
    149   // Fold a cast of a constant.
    150   if (Constant *C = dyn_cast<Constant>(V))
    151     return ConstantExpr::getCast(Op, C, Ty);
    152 
    153   // Cast the argument at the beginning of the entry block, after
    154   // any bitcasts of other arguments.
    155   if (Argument *A = dyn_cast<Argument>(V)) {
    156     BasicBlock::iterator IP = A->getParent()->getEntryBlock().begin();
    157     while ((isa<BitCastInst>(IP) &&
    158             isa<Argument>(cast<BitCastInst>(IP)->getOperand(0)) &&
    159             cast<BitCastInst>(IP)->getOperand(0) != A) ||
    160            isa<DbgInfoIntrinsic>(IP))
    161       ++IP;
    162     return ReuseOrCreateCast(A, Ty, Op, IP);
    163   }
    164 
    165   // Cast the instruction immediately after the instruction.
    166   Instruction *I = cast<Instruction>(V);
    167   BasicBlock::iterator IP = findInsertPointAfter(I, Builder.GetInsertBlock());
    168   return ReuseOrCreateCast(I, Ty, Op, IP);
    169 }
    170 
    171 /// InsertBinop - Insert the specified binary operator, doing a small amount
    172 /// of work to avoid inserting an obviously redundant operation.
    173 Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode,
    174                                  Value *LHS, Value *RHS) {
    175   // Fold a binop with constant operands.
    176   if (Constant *CLHS = dyn_cast<Constant>(LHS))
    177     if (Constant *CRHS = dyn_cast<Constant>(RHS))
    178       return ConstantExpr::get(Opcode, CLHS, CRHS);
    179 
    180   // Do a quick scan to see if we have this binop nearby.  If so, reuse it.
    181   unsigned ScanLimit = 6;
    182   BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin();
    183   // Scanning starts from the last instruction before the insertion point.
    184   BasicBlock::iterator IP = Builder.GetInsertPoint();
    185   if (IP != BlockBegin) {
    186     --IP;
    187     for (; ScanLimit; --IP, --ScanLimit) {
    188       // Don't count dbg.value against the ScanLimit, to avoid perturbing the
    189       // generated code.
    190       if (isa<DbgInfoIntrinsic>(IP))
    191         ScanLimit++;
    192       if (IP->getOpcode() == (unsigned)Opcode && IP->getOperand(0) == LHS &&
    193           IP->getOperand(1) == RHS)
    194         return &*IP;
    195       if (IP == BlockBegin) break;
    196     }
    197   }
    198 
    199   // Save the original insertion point so we can restore it when we're done.
    200   DebugLoc Loc = Builder.GetInsertPoint()->getDebugLoc();
    201   BuilderType::InsertPointGuard Guard(Builder);
    202 
    203   // Move the insertion point out of as many loops as we can.
    204   while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
    205     if (!L->isLoopInvariant(LHS) || !L->isLoopInvariant(RHS)) break;
    206     BasicBlock *Preheader = L->getLoopPreheader();
    207     if (!Preheader) break;
    208 
    209     // Ok, move up a level.
    210     Builder.SetInsertPoint(Preheader->getTerminator());
    211   }
    212 
    213   // If we haven't found this binop, insert it.
    214   Instruction *BO = cast<Instruction>(Builder.CreateBinOp(Opcode, LHS, RHS));
    215   BO->setDebugLoc(Loc);
    216   rememberInstruction(BO);
    217 
    218   return BO;
    219 }
    220 
    221 /// FactorOutConstant - Test if S is divisible by Factor, using signed
    222 /// division. If so, update S with Factor divided out and return true.
    223 /// S need not be evenly divisible if a reasonable remainder can be
    224 /// computed.
    225 /// TODO: When ScalarEvolution gets a SCEVSDivExpr, this can be made
    226 /// unnecessary; in its place, just signed-divide Ops[i] by the scale and
    227 /// check to see if the divide was folded.
    228 static bool FactorOutConstant(const SCEV *&S, const SCEV *&Remainder,
    229                               const SCEV *Factor, ScalarEvolution &SE,
    230                               const DataLayout &DL) {
    231   // Everything is divisible by one.
    232   if (Factor->isOne())
    233     return true;
    234 
    235   // x/x == 1.
    236   if (S == Factor) {
    237     S = SE.getConstant(S->getType(), 1);
    238     return true;
    239   }
    240 
    241   // For a Constant, check for a multiple of the given factor.
    242   if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) {
    243     // 0/x == 0.
    244     if (C->isZero())
    245       return true;
    246     // Check for divisibility.
    247     if (const SCEVConstant *FC = dyn_cast<SCEVConstant>(Factor)) {
    248       ConstantInt *CI =
    249           ConstantInt::get(SE.getContext(), C->getAPInt().sdiv(FC->getAPInt()));
    250       // If the quotient is zero and the remainder is non-zero, reject
    251       // the value at this scale. It will be considered for subsequent
    252       // smaller scales.
    253       if (!CI->isZero()) {
    254         const SCEV *Div = SE.getConstant(CI);
    255         S = Div;
    256         Remainder = SE.getAddExpr(
    257             Remainder, SE.getConstant(C->getAPInt().srem(FC->getAPInt())));
    258         return true;
    259       }
    260     }
    261   }
    262 
    263   // In a Mul, check if there is a constant operand which is a multiple
    264   // of the given factor.
    265   if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
    266     // Size is known, check if there is a constant operand which is a multiple
    267     // of the given factor. If so, we can factor it.
    268     const SCEVConstant *FC = cast<SCEVConstant>(Factor);
    269     if (const SCEVConstant *C = dyn_cast<SCEVConstant>(M->getOperand(0)))
    270       if (!C->getAPInt().srem(FC->getAPInt())) {
    271         SmallVector<const SCEV *, 4> NewMulOps(M->op_begin(), M->op_end());
    272         NewMulOps[0] = SE.getConstant(C->getAPInt().sdiv(FC->getAPInt()));
    273         S = SE.getMulExpr(NewMulOps);
    274         return true;
    275       }
    276   }
    277 
    278   // In an AddRec, check if both start and step are divisible.
    279   if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
    280     const SCEV *Step = A->getStepRecurrence(SE);
    281     const SCEV *StepRem = SE.getConstant(Step->getType(), 0);
    282     if (!FactorOutConstant(Step, StepRem, Factor, SE, DL))
    283       return false;
    284     if (!StepRem->isZero())
    285       return false;
    286     const SCEV *Start = A->getStart();
    287     if (!FactorOutConstant(Start, Remainder, Factor, SE, DL))
    288       return false;
    289     S = SE.getAddRecExpr(Start, Step, A->getLoop(),
    290                          A->getNoWrapFlags(SCEV::FlagNW));
    291     return true;
    292   }
    293 
    294   return false;
    295 }
    296 
    297 /// SimplifyAddOperands - Sort and simplify a list of add operands. NumAddRecs
    298 /// is the number of SCEVAddRecExprs present, which are kept at the end of
    299 /// the list.
    300 ///
    301 static void SimplifyAddOperands(SmallVectorImpl<const SCEV *> &Ops,
    302                                 Type *Ty,
    303                                 ScalarEvolution &SE) {
    304   unsigned NumAddRecs = 0;
    305   for (unsigned i = Ops.size(); i > 0 && isa<SCEVAddRecExpr>(Ops[i-1]); --i)
    306     ++NumAddRecs;
    307   // Group Ops into non-addrecs and addrecs.
    308   SmallVector<const SCEV *, 8> NoAddRecs(Ops.begin(), Ops.end() - NumAddRecs);
    309   SmallVector<const SCEV *, 8> AddRecs(Ops.end() - NumAddRecs, Ops.end());
    310   // Let ScalarEvolution sort and simplify the non-addrecs list.
    311   const SCEV *Sum = NoAddRecs.empty() ?
    312                     SE.getConstant(Ty, 0) :
    313                     SE.getAddExpr(NoAddRecs);
    314   // If it returned an add, use the operands. Otherwise it simplified
    315   // the sum into a single value, so just use that.
    316   Ops.clear();
    317   if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Sum))
    318     Ops.append(Add->op_begin(), Add->op_end());
    319   else if (!Sum->isZero())
    320     Ops.push_back(Sum);
    321   // Then append the addrecs.
    322   Ops.append(AddRecs.begin(), AddRecs.end());
    323 }
    324 
    325 /// SplitAddRecs - Flatten a list of add operands, moving addrec start values
    326 /// out to the top level. For example, convert {a + b,+,c} to a, b, {0,+,d}.
    327 /// This helps expose more opportunities for folding parts of the expressions
    328 /// into GEP indices.
    329 ///
    330 static void SplitAddRecs(SmallVectorImpl<const SCEV *> &Ops,
    331                          Type *Ty,
    332                          ScalarEvolution &SE) {
    333   // Find the addrecs.
    334   SmallVector<const SCEV *, 8> AddRecs;
    335   for (unsigned i = 0, e = Ops.size(); i != e; ++i)
    336     while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Ops[i])) {
    337       const SCEV *Start = A->getStart();
    338       if (Start->isZero()) break;
    339       const SCEV *Zero = SE.getConstant(Ty, 0);
    340       AddRecs.push_back(SE.getAddRecExpr(Zero,
    341                                          A->getStepRecurrence(SE),
    342                                          A->getLoop(),
    343                                          A->getNoWrapFlags(SCEV::FlagNW)));
    344       if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Start)) {
    345         Ops[i] = Zero;
    346         Ops.append(Add->op_begin(), Add->op_end());
    347         e += Add->getNumOperands();
    348       } else {
    349         Ops[i] = Start;
    350       }
    351     }
    352   if (!AddRecs.empty()) {
    353     // Add the addrecs onto the end of the list.
    354     Ops.append(AddRecs.begin(), AddRecs.end());
    355     // Resort the operand list, moving any constants to the front.
    356     SimplifyAddOperands(Ops, Ty, SE);
    357   }
    358 }
    359 
    360 /// expandAddToGEP - Expand an addition expression with a pointer type into
    361 /// a GEP instead of using ptrtoint+arithmetic+inttoptr. This helps
    362 /// BasicAliasAnalysis and other passes analyze the result. See the rules
    363 /// for getelementptr vs. inttoptr in
    364 /// http://llvm.org/docs/LangRef.html#pointeraliasing
    365 /// for details.
    366 ///
    367 /// Design note: The correctness of using getelementptr here depends on
    368 /// ScalarEvolution not recognizing inttoptr and ptrtoint operators, as
    369 /// they may introduce pointer arithmetic which may not be safely converted
    370 /// into getelementptr.
    371 ///
    372 /// Design note: It might seem desirable for this function to be more
    373 /// loop-aware. If some of the indices are loop-invariant while others
    374 /// aren't, it might seem desirable to emit multiple GEPs, keeping the
    375 /// loop-invariant portions of the overall computation outside the loop.
    376 /// However, there are a few reasons this is not done here. Hoisting simple
    377 /// arithmetic is a low-level optimization that often isn't very
    378 /// important until late in the optimization process. In fact, passes
    379 /// like InstructionCombining will combine GEPs, even if it means
    380 /// pushing loop-invariant computation down into loops, so even if the
    381 /// GEPs were split here, the work would quickly be undone. The
    382 /// LoopStrengthReduction pass, which is usually run quite late (and
    383 /// after the last InstructionCombining pass), takes care of hoisting
    384 /// loop-invariant portions of expressions, after considering what
    385 /// can be folded using target addressing modes.
    386 ///
    387 Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
    388                                     const SCEV *const *op_end,
    389                                     PointerType *PTy,
    390                                     Type *Ty,
    391                                     Value *V) {
    392   Type *OriginalElTy = PTy->getElementType();
    393   Type *ElTy = OriginalElTy;
    394   SmallVector<Value *, 4> GepIndices;
    395   SmallVector<const SCEV *, 8> Ops(op_begin, op_end);
    396   bool AnyNonZeroIndices = false;
    397 
    398   // Split AddRecs up into parts as either of the parts may be usable
    399   // without the other.
    400   SplitAddRecs(Ops, Ty, SE);
    401 
    402   Type *IntPtrTy = DL.getIntPtrType(PTy);
    403 
    404   // Descend down the pointer's type and attempt to convert the other
    405   // operands into GEP indices, at each level. The first index in a GEP
    406   // indexes into the array implied by the pointer operand; the rest of
    407   // the indices index into the element or field type selected by the
    408   // preceding index.
    409   for (;;) {
    410     // If the scale size is not 0, attempt to factor out a scale for
    411     // array indexing.
    412     SmallVector<const SCEV *, 8> ScaledOps;
    413     if (ElTy->isSized()) {
    414       const SCEV *ElSize = SE.getSizeOfExpr(IntPtrTy, ElTy);
    415       if (!ElSize->isZero()) {
    416         SmallVector<const SCEV *, 8> NewOps;
    417         for (const SCEV *Op : Ops) {
    418           const SCEV *Remainder = SE.getConstant(Ty, 0);
    419           if (FactorOutConstant(Op, Remainder, ElSize, SE, DL)) {
    420             // Op now has ElSize factored out.
    421             ScaledOps.push_back(Op);
    422             if (!Remainder->isZero())
    423               NewOps.push_back(Remainder);
    424             AnyNonZeroIndices = true;
    425           } else {
    426             // The operand was not divisible, so add it to the list of operands
    427             // we'll scan next iteration.
    428             NewOps.push_back(Op);
    429           }
    430         }
    431         // If we made any changes, update Ops.
    432         if (!ScaledOps.empty()) {
    433           Ops = NewOps;
    434           SimplifyAddOperands(Ops, Ty, SE);
    435         }
    436       }
    437     }
    438 
    439     // Record the scaled array index for this level of the type. If
    440     // we didn't find any operands that could be factored, tentatively
    441     // assume that element zero was selected (since the zero offset
    442     // would obviously be folded away).
    443     Value *Scaled = ScaledOps.empty() ?
    444                     Constant::getNullValue(Ty) :
    445                     expandCodeFor(SE.getAddExpr(ScaledOps), Ty);
    446     GepIndices.push_back(Scaled);
    447 
    448     // Collect struct field index operands.
    449     while (StructType *STy = dyn_cast<StructType>(ElTy)) {
    450       bool FoundFieldNo = false;
    451       // An empty struct has no fields.
    452       if (STy->getNumElements() == 0) break;
    453       // Field offsets are known. See if a constant offset falls within any of
    454       // the struct fields.
    455       if (Ops.empty())
    456         break;
    457       if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[0]))
    458         if (SE.getTypeSizeInBits(C->getType()) <= 64) {
    459           const StructLayout &SL = *DL.getStructLayout(STy);
    460           uint64_t FullOffset = C->getValue()->getZExtValue();
    461           if (FullOffset < SL.getSizeInBytes()) {
    462             unsigned ElIdx = SL.getElementContainingOffset(FullOffset);
    463             GepIndices.push_back(
    464                 ConstantInt::get(Type::getInt32Ty(Ty->getContext()), ElIdx));
    465             ElTy = STy->getTypeAtIndex(ElIdx);
    466             Ops[0] =
    467                 SE.getConstant(Ty, FullOffset - SL.getElementOffset(ElIdx));
    468             AnyNonZeroIndices = true;
    469             FoundFieldNo = true;
    470           }
    471         }
    472       // If no struct field offsets were found, tentatively assume that
    473       // field zero was selected (since the zero offset would obviously
    474       // be folded away).
    475       if (!FoundFieldNo) {
    476         ElTy = STy->getTypeAtIndex(0u);
    477         GepIndices.push_back(
    478           Constant::getNullValue(Type::getInt32Ty(Ty->getContext())));
    479       }
    480     }
    481 
    482     if (ArrayType *ATy = dyn_cast<ArrayType>(ElTy))
    483       ElTy = ATy->getElementType();
    484     else
    485       break;
    486   }
    487 
    488   // If none of the operands were convertible to proper GEP indices, cast
    489   // the base to i8* and do an ugly getelementptr with that. It's still
    490   // better than ptrtoint+arithmetic+inttoptr at least.
    491   if (!AnyNonZeroIndices) {
    492     // Cast the base to i8*.
    493     V = InsertNoopCastOfTo(V,
    494        Type::getInt8PtrTy(Ty->getContext(), PTy->getAddressSpace()));
    495 
    496     assert(!isa<Instruction>(V) ||
    497            SE.DT.dominates(cast<Instruction>(V), &*Builder.GetInsertPoint()));
    498 
    499     // Expand the operands for a plain byte offset.
    500     Value *Idx = expandCodeFor(SE.getAddExpr(Ops), Ty);
    501 
    502     // Fold a GEP with constant operands.
    503     if (Constant *CLHS = dyn_cast<Constant>(V))
    504       if (Constant *CRHS = dyn_cast<Constant>(Idx))
    505         return ConstantExpr::getGetElementPtr(Type::getInt8Ty(Ty->getContext()),
    506                                               CLHS, CRHS);
    507 
    508     // Do a quick scan to see if we have this GEP nearby.  If so, reuse it.
    509     unsigned ScanLimit = 6;
    510     BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin();
    511     // Scanning starts from the last instruction before the insertion point.
    512     BasicBlock::iterator IP = Builder.GetInsertPoint();
    513     if (IP != BlockBegin) {
    514       --IP;
    515       for (; ScanLimit; --IP, --ScanLimit) {
    516         // Don't count dbg.value against the ScanLimit, to avoid perturbing the
    517         // generated code.
    518         if (isa<DbgInfoIntrinsic>(IP))
    519           ScanLimit++;
    520         if (IP->getOpcode() == Instruction::GetElementPtr &&
    521             IP->getOperand(0) == V && IP->getOperand(1) == Idx)
    522           return &*IP;
    523         if (IP == BlockBegin) break;
    524       }
    525     }
    526 
    527     // Save the original insertion point so we can restore it when we're done.
    528     BuilderType::InsertPointGuard Guard(Builder);
    529 
    530     // Move the insertion point out of as many loops as we can.
    531     while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
    532       if (!L->isLoopInvariant(V) || !L->isLoopInvariant(Idx)) break;
    533       BasicBlock *Preheader = L->getLoopPreheader();
    534       if (!Preheader) break;
    535 
    536       // Ok, move up a level.
    537       Builder.SetInsertPoint(Preheader->getTerminator());
    538     }
    539 
    540     // Emit a GEP.
    541     Value *GEP = Builder.CreateGEP(Builder.getInt8Ty(), V, Idx, "uglygep");
    542     rememberInstruction(GEP);
    543 
    544     return GEP;
    545   }
    546 
    547   // Save the original insertion point so we can restore it when we're done.
    548   BuilderType::InsertPoint SaveInsertPt = Builder.saveIP();
    549 
    550   // Move the insertion point out of as many loops as we can.
    551   while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
    552     if (!L->isLoopInvariant(V)) break;
    553 
    554     bool AnyIndexNotLoopInvariant =
    555         std::any_of(GepIndices.begin(), GepIndices.end(),
    556                     [L](Value *Op) { return !L->isLoopInvariant(Op); });
    557 
    558     if (AnyIndexNotLoopInvariant)
    559       break;
    560 
    561     BasicBlock *Preheader = L->getLoopPreheader();
    562     if (!Preheader) break;
    563 
    564     // Ok, move up a level.
    565     Builder.SetInsertPoint(Preheader->getTerminator());
    566   }
    567 
    568   // Insert a pretty getelementptr. Note that this GEP is not marked inbounds,
    569   // because ScalarEvolution may have changed the address arithmetic to
    570   // compute a value which is beyond the end of the allocated object.
    571   Value *Casted = V;
    572   if (V->getType() != PTy)
    573     Casted = InsertNoopCastOfTo(Casted, PTy);
    574   Value *GEP = Builder.CreateGEP(OriginalElTy, Casted, GepIndices, "scevgep");
    575   Ops.push_back(SE.getUnknown(GEP));
    576   rememberInstruction(GEP);
    577 
    578   // Restore the original insert point.
    579   Builder.restoreIP(SaveInsertPt);
    580 
    581   return expand(SE.getAddExpr(Ops));
    582 }
    583 
    584 /// PickMostRelevantLoop - Given two loops pick the one that's most relevant for
    585 /// SCEV expansion. If they are nested, this is the most nested. If they are
    586 /// neighboring, pick the later.
    587 static const Loop *PickMostRelevantLoop(const Loop *A, const Loop *B,
    588                                         DominatorTree &DT) {
    589   if (!A) return B;
    590   if (!B) return A;
    591   if (A->contains(B)) return B;
    592   if (B->contains(A)) return A;
    593   if (DT.dominates(A->getHeader(), B->getHeader())) return B;
    594   if (DT.dominates(B->getHeader(), A->getHeader())) return A;
    595   return A; // Arbitrarily break the tie.
    596 }
    597 
    598 /// getRelevantLoop - Get the most relevant loop associated with the given
    599 /// expression, according to PickMostRelevantLoop.
    600 const Loop *SCEVExpander::getRelevantLoop(const SCEV *S) {
    601   // Test whether we've already computed the most relevant loop for this SCEV.
    602   auto Pair = RelevantLoops.insert(std::make_pair(S, nullptr));
    603   if (!Pair.second)
    604     return Pair.first->second;
    605 
    606   if (isa<SCEVConstant>(S))
    607     // A constant has no relevant loops.
    608     return nullptr;
    609   if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
    610     if (const Instruction *I = dyn_cast<Instruction>(U->getValue()))
    611       return Pair.first->second = SE.LI.getLoopFor(I->getParent());
    612     // A non-instruction has no relevant loops.
    613     return nullptr;
    614   }
    615   if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S)) {
    616     const Loop *L = nullptr;
    617     if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S))
    618       L = AR->getLoop();
    619     for (const SCEV *Op : N->operands())
    620       L = PickMostRelevantLoop(L, getRelevantLoop(Op), SE.DT);
    621     return RelevantLoops[N] = L;
    622   }
    623   if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S)) {
    624     const Loop *Result = getRelevantLoop(C->getOperand());
    625     return RelevantLoops[C] = Result;
    626   }
    627   if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) {
    628     const Loop *Result = PickMostRelevantLoop(
    629         getRelevantLoop(D->getLHS()), getRelevantLoop(D->getRHS()), SE.DT);
    630     return RelevantLoops[D] = Result;
    631   }
    632   llvm_unreachable("Unexpected SCEV type!");
    633 }
    634 
    635 namespace {
    636 
    637 /// LoopCompare - Compare loops by PickMostRelevantLoop.
    638 class LoopCompare {
    639   DominatorTree &DT;
    640 public:
    641   explicit LoopCompare(DominatorTree &dt) : DT(dt) {}
    642 
    643   bool operator()(std::pair<const Loop *, const SCEV *> LHS,
    644                   std::pair<const Loop *, const SCEV *> RHS) const {
    645     // Keep pointer operands sorted at the end.
    646     if (LHS.second->getType()->isPointerTy() !=
    647         RHS.second->getType()->isPointerTy())
    648       return LHS.second->getType()->isPointerTy();
    649 
    650     // Compare loops with PickMostRelevantLoop.
    651     if (LHS.first != RHS.first)
    652       return PickMostRelevantLoop(LHS.first, RHS.first, DT) != LHS.first;
    653 
    654     // If one operand is a non-constant negative and the other is not,
    655     // put the non-constant negative on the right so that a sub can
    656     // be used instead of a negate and add.
    657     if (LHS.second->isNonConstantNegative()) {
    658       if (!RHS.second->isNonConstantNegative())
    659         return false;
    660     } else if (RHS.second->isNonConstantNegative())
    661       return true;
    662 
    663     // Otherwise they are equivalent according to this comparison.
    664     return false;
    665   }
    666 };
    667 
    668 }
    669 
    670 Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {
    671   Type *Ty = SE.getEffectiveSCEVType(S->getType());
    672 
    673   // Collect all the add operands in a loop, along with their associated loops.
    674   // Iterate in reverse so that constants are emitted last, all else equal, and
    675   // so that pointer operands are inserted first, which the code below relies on
    676   // to form more involved GEPs.
    677   SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops;
    678   for (std::reverse_iterator<SCEVAddExpr::op_iterator> I(S->op_end()),
    679        E(S->op_begin()); I != E; ++I)
    680     OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I));
    681 
    682   // Sort by loop. Use a stable sort so that constants follow non-constants and
    683   // pointer operands precede non-pointer operands.
    684   std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(SE.DT));
    685 
    686   // Emit instructions to add all the operands. Hoist as much as possible
    687   // out of loops, and form meaningful getelementptrs where possible.
    688   Value *Sum = nullptr;
    689   for (auto I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E;) {
    690     const Loop *CurLoop = I->first;
    691     const SCEV *Op = I->second;
    692     if (!Sum) {
    693       // This is the first operand. Just expand it.
    694       Sum = expand(Op);
    695       ++I;
    696     } else if (PointerType *PTy = dyn_cast<PointerType>(Sum->getType())) {
    697       // The running sum expression is a pointer. Try to form a getelementptr
    698       // at this level with that as the base.
    699       SmallVector<const SCEV *, 4> NewOps;
    700       for (; I != E && I->first == CurLoop; ++I) {
    701         // If the operand is SCEVUnknown and not instructions, peek through
    702         // it, to enable more of it to be folded into the GEP.
    703         const SCEV *X = I->second;
    704         if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(X))
    705           if (!isa<Instruction>(U->getValue()))
    706             X = SE.getSCEV(U->getValue());
    707         NewOps.push_back(X);
    708       }
    709       Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, Sum);
    710     } else if (PointerType *PTy = dyn_cast<PointerType>(Op->getType())) {
    711       // The running sum is an integer, and there's a pointer at this level.
    712       // Try to form a getelementptr. If the running sum is instructions,
    713       // use a SCEVUnknown to avoid re-analyzing them.
    714       SmallVector<const SCEV *, 4> NewOps;
    715       NewOps.push_back(isa<Instruction>(Sum) ? SE.getUnknown(Sum) :
    716                                                SE.getSCEV(Sum));
    717       for (++I; I != E && I->first == CurLoop; ++I)
    718         NewOps.push_back(I->second);
    719       Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, expand(Op));
    720     } else if (Op->isNonConstantNegative()) {
    721       // Instead of doing a negate and add, just do a subtract.
    722       Value *W = expandCodeFor(SE.getNegativeSCEV(Op), Ty);
    723       Sum = InsertNoopCastOfTo(Sum, Ty);
    724       Sum = InsertBinop(Instruction::Sub, Sum, W);
    725       ++I;
    726     } else {
    727       // A simple add.
    728       Value *W = expandCodeFor(Op, Ty);
    729       Sum = InsertNoopCastOfTo(Sum, Ty);
    730       // Canonicalize a constant to the RHS.
    731       if (isa<Constant>(Sum)) std::swap(Sum, W);
    732       Sum = InsertBinop(Instruction::Add, Sum, W);
    733       ++I;
    734     }
    735   }
    736 
    737   return Sum;
    738 }
    739 
    740 Value *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) {
    741   Type *Ty = SE.getEffectiveSCEVType(S->getType());
    742 
    743   // Collect all the mul operands in a loop, along with their associated loops.
    744   // Iterate in reverse so that constants are emitted last, all else equal.
    745   SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops;
    746   for (std::reverse_iterator<SCEVMulExpr::op_iterator> I(S->op_end()),
    747        E(S->op_begin()); I != E; ++I)
    748     OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I));
    749 
    750   // Sort by loop. Use a stable sort so that constants follow non-constants.
    751   std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(SE.DT));
    752 
    753   // Emit instructions to mul all the operands. Hoist as much as possible
    754   // out of loops.
    755   Value *Prod = nullptr;
    756   for (const auto &I : OpsAndLoops) {
    757     const SCEV *Op = I.second;
    758     if (!Prod) {
    759       // This is the first operand. Just expand it.
    760       Prod = expand(Op);
    761     } else if (Op->isAllOnesValue()) {
    762       // Instead of doing a multiply by negative one, just do a negate.
    763       Prod = InsertNoopCastOfTo(Prod, Ty);
    764       Prod = InsertBinop(Instruction::Sub, Constant::getNullValue(Ty), Prod);
    765     } else {
    766       // A simple mul.
    767       Value *W = expandCodeFor(Op, Ty);
    768       Prod = InsertNoopCastOfTo(Prod, Ty);
    769       // Canonicalize a constant to the RHS.
    770       if (isa<Constant>(Prod)) std::swap(Prod, W);
    771       const APInt *RHS;
    772       if (match(W, m_Power2(RHS))) {
    773         // Canonicalize Prod*(1<<C) to Prod<<C.
    774         assert(!Ty->isVectorTy() && "vector types are not SCEVable");
    775         Prod = InsertBinop(Instruction::Shl, Prod,
    776                            ConstantInt::get(Ty, RHS->logBase2()));
    777       } else {
    778         Prod = InsertBinop(Instruction::Mul, Prod, W);
    779       }
    780     }
    781   }
    782 
    783   return Prod;
    784 }
    785 
    786 Value *SCEVExpander::visitUDivExpr(const SCEVUDivExpr *S) {
    787   Type *Ty = SE.getEffectiveSCEVType(S->getType());
    788 
    789   Value *LHS = expandCodeFor(S->getLHS(), Ty);
    790   if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(S->getRHS())) {
    791     const APInt &RHS = SC->getAPInt();
    792     if (RHS.isPowerOf2())
    793       return InsertBinop(Instruction::LShr, LHS,
    794                          ConstantInt::get(Ty, RHS.logBase2()));
    795   }
    796 
    797   Value *RHS = expandCodeFor(S->getRHS(), Ty);
    798   return InsertBinop(Instruction::UDiv, LHS, RHS);
    799 }
    800 
    801 /// Move parts of Base into Rest to leave Base with the minimal
    802 /// expression that provides a pointer operand suitable for a
    803 /// GEP expansion.
    804 static void ExposePointerBase(const SCEV *&Base, const SCEV *&Rest,
    805                               ScalarEvolution &SE) {
    806   while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Base)) {
    807     Base = A->getStart();
    808     Rest = SE.getAddExpr(Rest,
    809                          SE.getAddRecExpr(SE.getConstant(A->getType(), 0),
    810                                           A->getStepRecurrence(SE),
    811                                           A->getLoop(),
    812                                           A->getNoWrapFlags(SCEV::FlagNW)));
    813   }
    814   if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(Base)) {
    815     Base = A->getOperand(A->getNumOperands()-1);
    816     SmallVector<const SCEV *, 8> NewAddOps(A->op_begin(), A->op_end());
    817     NewAddOps.back() = Rest;
    818     Rest = SE.getAddExpr(NewAddOps);
    819     ExposePointerBase(Base, Rest, SE);
    820   }
    821 }
    822 
    823 /// Determine if this is a well-behaved chain of instructions leading back to
    824 /// the PHI. If so, it may be reused by expanded expressions.
    825 bool SCEVExpander::isNormalAddRecExprPHI(PHINode *PN, Instruction *IncV,
    826                                          const Loop *L) {
    827   if (IncV->getNumOperands() == 0 || isa<PHINode>(IncV) ||
    828       (isa<CastInst>(IncV) && !isa<BitCastInst>(IncV)))
    829     return false;
    830   // If any of the operands don't dominate the insert position, bail.
    831   // Addrec operands are always loop-invariant, so this can only happen
    832   // if there are instructions which haven't been hoisted.
    833   if (L == IVIncInsertLoop) {
    834     for (User::op_iterator OI = IncV->op_begin()+1,
    835            OE = IncV->op_end(); OI != OE; ++OI)
    836       if (Instruction *OInst = dyn_cast<Instruction>(OI))
    837         if (!SE.DT.dominates(OInst, IVIncInsertPos))
    838           return false;
    839   }
    840   // Advance to the next instruction.
    841   IncV = dyn_cast<Instruction>(IncV->getOperand(0));
    842   if (!IncV)
    843     return false;
    844 
    845   if (IncV->mayHaveSideEffects())
    846     return false;
    847 
    848   if (IncV != PN)
    849     return true;
    850 
    851   return isNormalAddRecExprPHI(PN, IncV, L);
    852 }
    853 
    854 /// getIVIncOperand returns an induction variable increment's induction
    855 /// variable operand.
    856 ///
    857 /// If allowScale is set, any type of GEP is allowed as long as the nonIV
    858 /// operands dominate InsertPos.
    859 ///
    860 /// If allowScale is not set, ensure that a GEP increment conforms to one of the
    861 /// simple patterns generated by getAddRecExprPHILiterally and
    862 /// expandAddtoGEP. If the pattern isn't recognized, return NULL.
    863 Instruction *SCEVExpander::getIVIncOperand(Instruction *IncV,
    864                                            Instruction *InsertPos,
    865                                            bool allowScale) {
    866   if (IncV == InsertPos)
    867     return nullptr;
    868 
    869   switch (IncV->getOpcode()) {
    870   default:
    871     return nullptr;
    872   // Check for a simple Add/Sub or GEP of a loop invariant step.
    873   case Instruction::Add:
    874   case Instruction::Sub: {
    875     Instruction *OInst = dyn_cast<Instruction>(IncV->getOperand(1));
    876     if (!OInst || SE.DT.dominates(OInst, InsertPos))
    877       return dyn_cast<Instruction>(IncV->getOperand(0));
    878     return nullptr;
    879   }
    880   case Instruction::BitCast:
    881     return dyn_cast<Instruction>(IncV->getOperand(0));
    882   case Instruction::GetElementPtr:
    883     for (auto I = IncV->op_begin() + 1, E = IncV->op_end(); I != E; ++I) {
    884       if (isa<Constant>(*I))
    885         continue;
    886       if (Instruction *OInst = dyn_cast<Instruction>(*I)) {
    887         if (!SE.DT.dominates(OInst, InsertPos))
    888           return nullptr;
    889       }
    890       if (allowScale) {
    891         // allow any kind of GEP as long as it can be hoisted.
    892         continue;
    893       }
    894       // This must be a pointer addition of constants (pretty), which is already
    895       // handled, or some number of address-size elements (ugly). Ugly geps
    896       // have 2 operands. i1* is used by the expander to represent an
    897       // address-size element.
    898       if (IncV->getNumOperands() != 2)
    899         return nullptr;
    900       unsigned AS = cast<PointerType>(IncV->getType())->getAddressSpace();
    901       if (IncV->getType() != Type::getInt1PtrTy(SE.getContext(), AS)
    902           && IncV->getType() != Type::getInt8PtrTy(SE.getContext(), AS))
    903         return nullptr;
    904       break;
    905     }
    906     return dyn_cast<Instruction>(IncV->getOperand(0));
    907   }
    908 }
    909 
    910 /// hoistStep - Attempt to hoist a simple IV increment above InsertPos to make
    911 /// it available to other uses in this loop. Recursively hoist any operands,
    912 /// until we reach a value that dominates InsertPos.
    913 bool SCEVExpander::hoistIVInc(Instruction *IncV, Instruction *InsertPos) {
    914   if (SE.DT.dominates(IncV, InsertPos))
    915       return true;
    916 
    917   // InsertPos must itself dominate IncV so that IncV's new position satisfies
    918   // its existing users.
    919   if (isa<PHINode>(InsertPos) ||
    920       !SE.DT.dominates(InsertPos->getParent(), IncV->getParent()))
    921     return false;
    922 
    923   if (!SE.LI.movementPreservesLCSSAForm(IncV, InsertPos))
    924     return false;
    925 
    926   // Check that the chain of IV operands leading back to Phi can be hoisted.
    927   SmallVector<Instruction*, 4> IVIncs;
    928   for(;;) {
    929     Instruction *Oper = getIVIncOperand(IncV, InsertPos, /*allowScale*/true);
    930     if (!Oper)
    931       return false;
    932     // IncV is safe to hoist.
    933     IVIncs.push_back(IncV);
    934     IncV = Oper;
    935     if (SE.DT.dominates(IncV, InsertPos))
    936       break;
    937   }
    938   for (auto I = IVIncs.rbegin(), E = IVIncs.rend(); I != E; ++I) {
    939     (*I)->moveBefore(InsertPos);
    940   }
    941   return true;
    942 }
    943 
    944 /// Determine if this cyclic phi is in a form that would have been generated by
    945 /// LSR. We don't care if the phi was actually expanded in this pass, as long
    946 /// as it is in a low-cost form, for example, no implied multiplication. This
    947 /// should match any patterns generated by getAddRecExprPHILiterally and
    948 /// expandAddtoGEP.
    949 bool SCEVExpander::isExpandedAddRecExprPHI(PHINode *PN, Instruction *IncV,
    950                                            const Loop *L) {
    951   for(Instruction *IVOper = IncV;
    952       (IVOper = getIVIncOperand(IVOper, L->getLoopPreheader()->getTerminator(),
    953                                 /*allowScale=*/false));) {
    954     if (IVOper == PN)
    955       return true;
    956   }
    957   return false;
    958 }
    959 
    960 /// expandIVInc - Expand an IV increment at Builder's current InsertPos.
    961 /// Typically this is the LatchBlock terminator or IVIncInsertPos, but we may
    962 /// need to materialize IV increments elsewhere to handle difficult situations.
    963 Value *SCEVExpander::expandIVInc(PHINode *PN, Value *StepV, const Loop *L,
    964                                  Type *ExpandTy, Type *IntTy,
    965                                  bool useSubtract) {
    966   Value *IncV;
    967   // If the PHI is a pointer, use a GEP, otherwise use an add or sub.
    968   if (ExpandTy->isPointerTy()) {
    969     PointerType *GEPPtrTy = cast<PointerType>(ExpandTy);
    970     // If the step isn't constant, don't use an implicitly scaled GEP, because
    971     // that would require a multiply inside the loop.
    972     if (!isa<ConstantInt>(StepV))
    973       GEPPtrTy = PointerType::get(Type::getInt1Ty(SE.getContext()),
    974                                   GEPPtrTy->getAddressSpace());
    975     const SCEV *const StepArray[1] = { SE.getSCEV(StepV) };
    976     IncV = expandAddToGEP(StepArray, StepArray+1, GEPPtrTy, IntTy, PN);
    977     if (IncV->getType() != PN->getType()) {
    978       IncV = Builder.CreateBitCast(IncV, PN->getType());
    979       rememberInstruction(IncV);
    980     }
    981   } else {
    982     IncV = useSubtract ?
    983       Builder.CreateSub(PN, StepV, Twine(IVName) + ".iv.next") :
    984       Builder.CreateAdd(PN, StepV, Twine(IVName) + ".iv.next");
    985     rememberInstruction(IncV);
    986   }
    987   return IncV;
    988 }
    989 
    990 /// \brief Hoist the addrec instruction chain rooted in the loop phi above the
    991 /// position. This routine assumes that this is possible (has been checked).
    992 static void hoistBeforePos(DominatorTree *DT, Instruction *InstToHoist,
    993                            Instruction *Pos, PHINode *LoopPhi) {
    994   do {
    995     if (DT->dominates(InstToHoist, Pos))
    996       break;
    997     // Make sure the increment is where we want it. But don't move it
    998     // down past a potential existing post-inc user.
    999     InstToHoist->moveBefore(Pos);
   1000     Pos = InstToHoist;
   1001     InstToHoist = cast<Instruction>(InstToHoist->getOperand(0));
   1002   } while (InstToHoist != LoopPhi);
   1003 }
   1004 
   1005 /// \brief Check whether we can cheaply express the requested SCEV in terms of
   1006 /// the available PHI SCEV by truncation and/or inversion of the step.
   1007 static bool canBeCheaplyTransformed(ScalarEvolution &SE,
   1008                                     const SCEVAddRecExpr *Phi,
   1009                                     const SCEVAddRecExpr *Requested,
   1010                                     bool &InvertStep) {
   1011   Type *PhiTy = SE.getEffectiveSCEVType(Phi->getType());
   1012   Type *RequestedTy = SE.getEffectiveSCEVType(Requested->getType());
   1013 
   1014   if (RequestedTy->getIntegerBitWidth() > PhiTy->getIntegerBitWidth())
   1015     return false;
   1016 
   1017   // Try truncate it if necessary.
   1018   Phi = dyn_cast<SCEVAddRecExpr>(SE.getTruncateOrNoop(Phi, RequestedTy));
   1019   if (!Phi)
   1020     return false;
   1021 
   1022   // Check whether truncation will help.
   1023   if (Phi == Requested) {
   1024     InvertStep = false;
   1025     return true;
   1026   }
   1027 
   1028   // Check whether inverting will help: {R,+,-1} == R - {0,+,1}.
   1029   if (SE.getAddExpr(Requested->getStart(),
   1030                     SE.getNegativeSCEV(Requested)) == Phi) {
   1031     InvertStep = true;
   1032     return true;
   1033   }
   1034 
   1035   return false;
   1036 }
   1037 
   1038 static bool IsIncrementNSW(ScalarEvolution &SE, const SCEVAddRecExpr *AR) {
   1039   if (!isa<IntegerType>(AR->getType()))
   1040     return false;
   1041 
   1042   unsigned BitWidth = cast<IntegerType>(AR->getType())->getBitWidth();
   1043   Type *WideTy = IntegerType::get(AR->getType()->getContext(), BitWidth * 2);
   1044   const SCEV *Step = AR->getStepRecurrence(SE);
   1045   const SCEV *OpAfterExtend = SE.getAddExpr(SE.getSignExtendExpr(Step, WideTy),
   1046                                             SE.getSignExtendExpr(AR, WideTy));
   1047   const SCEV *ExtendAfterOp =
   1048     SE.getSignExtendExpr(SE.getAddExpr(AR, Step), WideTy);
   1049   return ExtendAfterOp == OpAfterExtend;
   1050 }
   1051 
   1052 static bool IsIncrementNUW(ScalarEvolution &SE, const SCEVAddRecExpr *AR) {
   1053   if (!isa<IntegerType>(AR->getType()))
   1054     return false;
   1055 
   1056   unsigned BitWidth = cast<IntegerType>(AR->getType())->getBitWidth();
   1057   Type *WideTy = IntegerType::get(AR->getType()->getContext(), BitWidth * 2);
   1058   const SCEV *Step = AR->getStepRecurrence(SE);
   1059   const SCEV *OpAfterExtend = SE.getAddExpr(SE.getZeroExtendExpr(Step, WideTy),
   1060                                             SE.getZeroExtendExpr(AR, WideTy));
   1061   const SCEV *ExtendAfterOp =
   1062     SE.getZeroExtendExpr(SE.getAddExpr(AR, Step), WideTy);
   1063   return ExtendAfterOp == OpAfterExtend;
   1064 }
   1065 
   1066 /// getAddRecExprPHILiterally - Helper for expandAddRecExprLiterally. Expand
   1067 /// the base addrec, which is the addrec without any non-loop-dominating
   1068 /// values, and return the PHI.
   1069 PHINode *
   1070 SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
   1071                                         const Loop *L,
   1072                                         Type *ExpandTy,
   1073                                         Type *IntTy,
   1074                                         Type *&TruncTy,
   1075                                         bool &InvertStep) {
   1076   assert((!IVIncInsertLoop||IVIncInsertPos) && "Uninitialized insert position");
   1077 
   1078   // Reuse a previously-inserted PHI, if present.
   1079   BasicBlock *LatchBlock = L->getLoopLatch();
   1080   if (LatchBlock) {
   1081     PHINode *AddRecPhiMatch = nullptr;
   1082     Instruction *IncV = nullptr;
   1083     TruncTy = nullptr;
   1084     InvertStep = false;
   1085 
   1086     // Only try partially matching scevs that need truncation and/or
   1087     // step-inversion if we know this loop is outside the current loop.
   1088     bool TryNonMatchingSCEV =
   1089         IVIncInsertLoop &&
   1090         SE.DT.properlyDominates(LatchBlock, IVIncInsertLoop->getHeader());
   1091 
   1092     for (auto &I : *L->getHeader()) {
   1093       auto *PN = dyn_cast<PHINode>(&I);
   1094       if (!PN || !SE.isSCEVable(PN->getType()))
   1095         continue;
   1096 
   1097       const SCEVAddRecExpr *PhiSCEV = dyn_cast<SCEVAddRecExpr>(SE.getSCEV(PN));
   1098       if (!PhiSCEV)
   1099         continue;
   1100 
   1101       bool IsMatchingSCEV = PhiSCEV == Normalized;
   1102       // We only handle truncation and inversion of phi recurrences for the
   1103       // expanded expression if the expanded expression's loop dominates the
   1104       // loop we insert to. Check now, so we can bail out early.
   1105       if (!IsMatchingSCEV && !TryNonMatchingSCEV)
   1106           continue;
   1107 
   1108       Instruction *TempIncV =
   1109           cast<Instruction>(PN->getIncomingValueForBlock(LatchBlock));
   1110 
   1111       // Check whether we can reuse this PHI node.
   1112       if (LSRMode) {
   1113         if (!isExpandedAddRecExprPHI(PN, TempIncV, L))
   1114           continue;
   1115         if (L == IVIncInsertLoop && !hoistIVInc(TempIncV, IVIncInsertPos))
   1116           continue;
   1117       } else {
   1118         if (!isNormalAddRecExprPHI(PN, TempIncV, L))
   1119           continue;
   1120       }
   1121 
   1122       // Stop if we have found an exact match SCEV.
   1123       if (IsMatchingSCEV) {
   1124         IncV = TempIncV;
   1125         TruncTy = nullptr;
   1126         InvertStep = false;
   1127         AddRecPhiMatch = PN;
   1128         break;
   1129       }
   1130 
   1131       // Try whether the phi can be translated into the requested form
   1132       // (truncated and/or offset by a constant).
   1133       if ((!TruncTy || InvertStep) &&
   1134           canBeCheaplyTransformed(SE, PhiSCEV, Normalized, InvertStep)) {
   1135         // Record the phi node. But don't stop we might find an exact match
   1136         // later.
   1137         AddRecPhiMatch = PN;
   1138         IncV = TempIncV;
   1139         TruncTy = SE.getEffectiveSCEVType(Normalized->getType());
   1140       }
   1141     }
   1142 
   1143     if (AddRecPhiMatch) {
   1144       // Potentially, move the increment. We have made sure in
   1145       // isExpandedAddRecExprPHI or hoistIVInc that this is possible.
   1146       if (L == IVIncInsertLoop)
   1147         hoistBeforePos(&SE.DT, IncV, IVIncInsertPos, AddRecPhiMatch);
   1148 
   1149       // Ok, the add recurrence looks usable.
   1150       // Remember this PHI, even in post-inc mode.
   1151       InsertedValues.insert(AddRecPhiMatch);
   1152       // Remember the increment.
   1153       rememberInstruction(IncV);
   1154       return AddRecPhiMatch;
   1155     }
   1156   }
   1157 
   1158   // Save the original insertion point so we can restore it when we're done.
   1159   BuilderType::InsertPointGuard Guard(Builder);
   1160 
   1161   // Another AddRec may need to be recursively expanded below. For example, if
   1162   // this AddRec is quadratic, the StepV may itself be an AddRec in this
   1163   // loop. Remove this loop from the PostIncLoops set before expanding such
   1164   // AddRecs. Otherwise, we cannot find a valid position for the step
   1165   // (i.e. StepV can never dominate its loop header).  Ideally, we could do
   1166   // SavedIncLoops.swap(PostIncLoops), but we generally have a single element,
   1167   // so it's not worth implementing SmallPtrSet::swap.
   1168   PostIncLoopSet SavedPostIncLoops = PostIncLoops;
   1169   PostIncLoops.clear();
   1170 
   1171   // Expand code for the start value.
   1172   Value *StartV =
   1173       expandCodeFor(Normalized->getStart(), ExpandTy, &L->getHeader()->front());
   1174 
   1175   // StartV must be hoisted into L's preheader to dominate the new phi.
   1176   assert(!isa<Instruction>(StartV) ||
   1177          SE.DT.properlyDominates(cast<Instruction>(StartV)->getParent(),
   1178                                  L->getHeader()));
   1179 
   1180   // Expand code for the step value. Do this before creating the PHI so that PHI
   1181   // reuse code doesn't see an incomplete PHI.
   1182   const SCEV *Step = Normalized->getStepRecurrence(SE);
   1183   // If the stride is negative, insert a sub instead of an add for the increment
   1184   // (unless it's a constant, because subtracts of constants are canonicalized
   1185   // to adds).
   1186   bool useSubtract = !ExpandTy->isPointerTy() && Step->isNonConstantNegative();
   1187   if (useSubtract)
   1188     Step = SE.getNegativeSCEV(Step);
   1189   // Expand the step somewhere that dominates the loop header.
   1190   Value *StepV = expandCodeFor(Step, IntTy, &L->getHeader()->front());
   1191 
   1192   // The no-wrap behavior proved by IsIncrement(NUW|NSW) is only applicable if
   1193   // we actually do emit an addition.  It does not apply if we emit a
   1194   // subtraction.
   1195   bool IncrementIsNUW = !useSubtract && IsIncrementNUW(SE, Normalized);
   1196   bool IncrementIsNSW = !useSubtract && IsIncrementNSW(SE, Normalized);
   1197 
   1198   // Create the PHI.
   1199   BasicBlock *Header = L->getHeader();
   1200   Builder.SetInsertPoint(Header, Header->begin());
   1201   pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header);
   1202   PHINode *PN = Builder.CreatePHI(ExpandTy, std::distance(HPB, HPE),
   1203                                   Twine(IVName) + ".iv");
   1204   rememberInstruction(PN);
   1205 
   1206   // Create the step instructions and populate the PHI.
   1207   for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) {
   1208     BasicBlock *Pred = *HPI;
   1209 
   1210     // Add a start value.
   1211     if (!L->contains(Pred)) {
   1212       PN->addIncoming(StartV, Pred);
   1213       continue;
   1214     }
   1215 
   1216     // Create a step value and add it to the PHI.
   1217     // If IVIncInsertLoop is non-null and equal to the addrec's loop, insert the
   1218     // instructions at IVIncInsertPos.
   1219     Instruction *InsertPos = L == IVIncInsertLoop ?
   1220       IVIncInsertPos : Pred->getTerminator();
   1221     Builder.SetInsertPoint(InsertPos);
   1222     Value *IncV = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract);
   1223 
   1224     if (isa<OverflowingBinaryOperator>(IncV)) {
   1225       if (IncrementIsNUW)
   1226         cast<BinaryOperator>(IncV)->setHasNoUnsignedWrap();
   1227       if (IncrementIsNSW)
   1228         cast<BinaryOperator>(IncV)->setHasNoSignedWrap();
   1229     }
   1230     PN->addIncoming(IncV, Pred);
   1231   }
   1232 
   1233   // After expanding subexpressions, restore the PostIncLoops set so the caller
   1234   // can ensure that IVIncrement dominates the current uses.
   1235   PostIncLoops = SavedPostIncLoops;
   1236 
   1237   // Remember this PHI, even in post-inc mode.
   1238   InsertedValues.insert(PN);
   1239 
   1240   return PN;
   1241 }
   1242 
   1243 Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
   1244   Type *STy = S->getType();
   1245   Type *IntTy = SE.getEffectiveSCEVType(STy);
   1246   const Loop *L = S->getLoop();
   1247 
   1248   // Determine a normalized form of this expression, which is the expression
   1249   // before any post-inc adjustment is made.
   1250   const SCEVAddRecExpr *Normalized = S;
   1251   if (PostIncLoops.count(L)) {
   1252     PostIncLoopSet Loops;
   1253     Loops.insert(L);
   1254     Normalized = cast<SCEVAddRecExpr>(TransformForPostIncUse(
   1255         Normalize, S, nullptr, nullptr, Loops, SE, SE.DT));
   1256   }
   1257 
   1258   // Strip off any non-loop-dominating component from the addrec start.
   1259   const SCEV *Start = Normalized->getStart();
   1260   const SCEV *PostLoopOffset = nullptr;
   1261   if (!SE.properlyDominates(Start, L->getHeader())) {
   1262     PostLoopOffset = Start;
   1263     Start = SE.getConstant(Normalized->getType(), 0);
   1264     Normalized = cast<SCEVAddRecExpr>(
   1265       SE.getAddRecExpr(Start, Normalized->getStepRecurrence(SE),
   1266                        Normalized->getLoop(),
   1267                        Normalized->getNoWrapFlags(SCEV::FlagNW)));
   1268   }
   1269 
   1270   // Strip off any non-loop-dominating component from the addrec step.
   1271   const SCEV *Step = Normalized->getStepRecurrence(SE);
   1272   const SCEV *PostLoopScale = nullptr;
   1273   if (!SE.dominates(Step, L->getHeader())) {
   1274     PostLoopScale = Step;
   1275     Step = SE.getConstant(Normalized->getType(), 1);
   1276     Normalized =
   1277       cast<SCEVAddRecExpr>(SE.getAddRecExpr(
   1278                              Start, Step, Normalized->getLoop(),
   1279                              Normalized->getNoWrapFlags(SCEV::FlagNW)));
   1280   }
   1281 
   1282   // Expand the core addrec. If we need post-loop scaling, force it to
   1283   // expand to an integer type to avoid the need for additional casting.
   1284   Type *ExpandTy = PostLoopScale ? IntTy : STy;
   1285   // In some cases, we decide to reuse an existing phi node but need to truncate
   1286   // it and/or invert the step.
   1287   Type *TruncTy = nullptr;
   1288   bool InvertStep = false;
   1289   PHINode *PN = getAddRecExprPHILiterally(Normalized, L, ExpandTy, IntTy,
   1290                                           TruncTy, InvertStep);
   1291 
   1292   // Accommodate post-inc mode, if necessary.
   1293   Value *Result;
   1294   if (!PostIncLoops.count(L))
   1295     Result = PN;
   1296   else {
   1297     // In PostInc mode, use the post-incremented value.
   1298     BasicBlock *LatchBlock = L->getLoopLatch();
   1299     assert(LatchBlock && "PostInc mode requires a unique loop latch!");
   1300     Result = PN->getIncomingValueForBlock(LatchBlock);
   1301 
   1302     // For an expansion to use the postinc form, the client must call
   1303     // expandCodeFor with an InsertPoint that is either outside the PostIncLoop
   1304     // or dominated by IVIncInsertPos.
   1305     if (isa<Instruction>(Result) &&
   1306         !SE.DT.dominates(cast<Instruction>(Result),
   1307                          &*Builder.GetInsertPoint())) {
   1308       // The induction variable's postinc expansion does not dominate this use.
   1309       // IVUsers tries to prevent this case, so it is rare. However, it can
   1310       // happen when an IVUser outside the loop is not dominated by the latch
   1311       // block. Adjusting IVIncInsertPos before expansion begins cannot handle
   1312       // all cases. Consider a phi outide whose operand is replaced during
   1313       // expansion with the value of the postinc user. Without fundamentally
   1314       // changing the way postinc users are tracked, the only remedy is
   1315       // inserting an extra IV increment. StepV might fold into PostLoopOffset,
   1316       // but hopefully expandCodeFor handles that.
   1317       bool useSubtract =
   1318         !ExpandTy->isPointerTy() && Step->isNonConstantNegative();
   1319       if (useSubtract)
   1320         Step = SE.getNegativeSCEV(Step);
   1321       Value *StepV;
   1322       {
   1323         // Expand the step somewhere that dominates the loop header.
   1324         BuilderType::InsertPointGuard Guard(Builder);
   1325         StepV = expandCodeFor(Step, IntTy, &L->getHeader()->front());
   1326       }
   1327       Result = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract);
   1328     }
   1329   }
   1330 
   1331   // We have decided to reuse an induction variable of a dominating loop. Apply
   1332   // truncation and/or invertion of the step.
   1333   if (TruncTy) {
   1334     Type *ResTy = Result->getType();
   1335     // Normalize the result type.
   1336     if (ResTy != SE.getEffectiveSCEVType(ResTy))
   1337       Result = InsertNoopCastOfTo(Result, SE.getEffectiveSCEVType(ResTy));
   1338     // Truncate the result.
   1339     if (TruncTy != Result->getType()) {
   1340       Result = Builder.CreateTrunc(Result, TruncTy);
   1341       rememberInstruction(Result);
   1342     }
   1343     // Invert the result.
   1344     if (InvertStep) {
   1345       Result = Builder.CreateSub(expandCodeFor(Normalized->getStart(), TruncTy),
   1346                                  Result);
   1347       rememberInstruction(Result);
   1348     }
   1349   }
   1350 
   1351   // Re-apply any non-loop-dominating scale.
   1352   if (PostLoopScale) {
   1353     assert(S->isAffine() && "Can't linearly scale non-affine recurrences.");
   1354     Result = InsertNoopCastOfTo(Result, IntTy);
   1355     Result = Builder.CreateMul(Result,
   1356                                expandCodeFor(PostLoopScale, IntTy));
   1357     rememberInstruction(Result);
   1358   }
   1359 
   1360   // Re-apply any non-loop-dominating offset.
   1361   if (PostLoopOffset) {
   1362     if (PointerType *PTy = dyn_cast<PointerType>(ExpandTy)) {
   1363       const SCEV *const OffsetArray[1] = { PostLoopOffset };
   1364       Result = expandAddToGEP(OffsetArray, OffsetArray+1, PTy, IntTy, Result);
   1365     } else {
   1366       Result = InsertNoopCastOfTo(Result, IntTy);
   1367       Result = Builder.CreateAdd(Result,
   1368                                  expandCodeFor(PostLoopOffset, IntTy));
   1369       rememberInstruction(Result);
   1370     }
   1371   }
   1372 
   1373   return Result;
   1374 }
   1375 
   1376 Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
   1377   if (!CanonicalMode) return expandAddRecExprLiterally(S);
   1378 
   1379   Type *Ty = SE.getEffectiveSCEVType(S->getType());
   1380   const Loop *L = S->getLoop();
   1381 
   1382   // First check for an existing canonical IV in a suitable type.
   1383   PHINode *CanonicalIV = nullptr;
   1384   if (PHINode *PN = L->getCanonicalInductionVariable())
   1385     if (SE.getTypeSizeInBits(PN->getType()) >= SE.getTypeSizeInBits(Ty))
   1386       CanonicalIV = PN;
   1387 
   1388   // Rewrite an AddRec in terms of the canonical induction variable, if
   1389   // its type is more narrow.
   1390   if (CanonicalIV &&
   1391       SE.getTypeSizeInBits(CanonicalIV->getType()) >
   1392       SE.getTypeSizeInBits(Ty)) {
   1393     SmallVector<const SCEV *, 4> NewOps(S->getNumOperands());
   1394     for (unsigned i = 0, e = S->getNumOperands(); i != e; ++i)
   1395       NewOps[i] = SE.getAnyExtendExpr(S->op_begin()[i], CanonicalIV->getType());
   1396     Value *V = expand(SE.getAddRecExpr(NewOps, S->getLoop(),
   1397                                        S->getNoWrapFlags(SCEV::FlagNW)));
   1398     BasicBlock::iterator NewInsertPt =
   1399         findInsertPointAfter(cast<Instruction>(V), Builder.GetInsertBlock());
   1400     V = expandCodeFor(SE.getTruncateExpr(SE.getUnknown(V), Ty), nullptr,
   1401                       &*NewInsertPt);
   1402     return V;
   1403   }
   1404 
   1405   // {X,+,F} --> X + {0,+,F}
   1406   if (!S->getStart()->isZero()) {
   1407     SmallVector<const SCEV *, 4> NewOps(S->op_begin(), S->op_end());
   1408     NewOps[0] = SE.getConstant(Ty, 0);
   1409     const SCEV *Rest = SE.getAddRecExpr(NewOps, L,
   1410                                         S->getNoWrapFlags(SCEV::FlagNW));
   1411 
   1412     // Turn things like ptrtoint+arithmetic+inttoptr into GEP. See the
   1413     // comments on expandAddToGEP for details.
   1414     const SCEV *Base = S->getStart();
   1415     const SCEV *RestArray[1] = { Rest };
   1416     // Dig into the expression to find the pointer base for a GEP.
   1417     ExposePointerBase(Base, RestArray[0], SE);
   1418     // If we found a pointer, expand the AddRec with a GEP.
   1419     if (PointerType *PTy = dyn_cast<PointerType>(Base->getType())) {
   1420       // Make sure the Base isn't something exotic, such as a multiplied
   1421       // or divided pointer value. In those cases, the result type isn't
   1422       // actually a pointer type.
   1423       if (!isa<SCEVMulExpr>(Base) && !isa<SCEVUDivExpr>(Base)) {
   1424         Value *StartV = expand(Base);
   1425         assert(StartV->getType() == PTy && "Pointer type mismatch for GEP!");
   1426         return expandAddToGEP(RestArray, RestArray+1, PTy, Ty, StartV);
   1427       }
   1428     }
   1429 
   1430     // Just do a normal add. Pre-expand the operands to suppress folding.
   1431     return expand(SE.getAddExpr(SE.getUnknown(expand(S->getStart())),
   1432                                 SE.getUnknown(expand(Rest))));
   1433   }
   1434 
   1435   // If we don't yet have a canonical IV, create one.
   1436   if (!CanonicalIV) {
   1437     // Create and insert the PHI node for the induction variable in the
   1438     // specified loop.
   1439     BasicBlock *Header = L->getHeader();
   1440     pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header);
   1441     CanonicalIV = PHINode::Create(Ty, std::distance(HPB, HPE), "indvar",
   1442                                   &Header->front());
   1443     rememberInstruction(CanonicalIV);
   1444 
   1445     SmallSet<BasicBlock *, 4> PredSeen;
   1446     Constant *One = ConstantInt::get(Ty, 1);
   1447     for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) {
   1448       BasicBlock *HP = *HPI;
   1449       if (!PredSeen.insert(HP).second) {
   1450         // There must be an incoming value for each predecessor, even the
   1451         // duplicates!
   1452         CanonicalIV->addIncoming(CanonicalIV->getIncomingValueForBlock(HP), HP);
   1453         continue;
   1454       }
   1455 
   1456       if (L->contains(HP)) {
   1457         // Insert a unit add instruction right before the terminator
   1458         // corresponding to the back-edge.
   1459         Instruction *Add = BinaryOperator::CreateAdd(CanonicalIV, One,
   1460                                                      "indvar.next",
   1461                                                      HP->getTerminator());
   1462         Add->setDebugLoc(HP->getTerminator()->getDebugLoc());
   1463         rememberInstruction(Add);
   1464         CanonicalIV->addIncoming(Add, HP);
   1465       } else {
   1466         CanonicalIV->addIncoming(Constant::getNullValue(Ty), HP);
   1467       }
   1468     }
   1469   }
   1470 
   1471   // {0,+,1} --> Insert a canonical induction variable into the loop!
   1472   if (S->isAffine() && S->getOperand(1)->isOne()) {
   1473     assert(Ty == SE.getEffectiveSCEVType(CanonicalIV->getType()) &&
   1474            "IVs with types different from the canonical IV should "
   1475            "already have been handled!");
   1476     return CanonicalIV;
   1477   }
   1478 
   1479   // {0,+,F} --> {0,+,1} * F
   1480 
   1481   // If this is a simple linear addrec, emit it now as a special case.
   1482   if (S->isAffine())    // {0,+,F} --> i*F
   1483     return
   1484       expand(SE.getTruncateOrNoop(
   1485         SE.getMulExpr(SE.getUnknown(CanonicalIV),
   1486                       SE.getNoopOrAnyExtend(S->getOperand(1),
   1487                                             CanonicalIV->getType())),
   1488         Ty));
   1489 
   1490   // If this is a chain of recurrences, turn it into a closed form, using the
   1491   // folders, then expandCodeFor the closed form.  This allows the folders to
   1492   // simplify the expression without having to build a bunch of special code
   1493   // into this folder.
   1494   const SCEV *IH = SE.getUnknown(CanonicalIV);   // Get I as a "symbolic" SCEV.
   1495 
   1496   // Promote S up to the canonical IV type, if the cast is foldable.
   1497   const SCEV *NewS = S;
   1498   const SCEV *Ext = SE.getNoopOrAnyExtend(S, CanonicalIV->getType());
   1499   if (isa<SCEVAddRecExpr>(Ext))
   1500     NewS = Ext;
   1501 
   1502   const SCEV *V = cast<SCEVAddRecExpr>(NewS)->evaluateAtIteration(IH, SE);
   1503   //cerr << "Evaluated: " << *this << "\n     to: " << *V << "\n";
   1504 
   1505   // Truncate the result down to the original type, if needed.
   1506   const SCEV *T = SE.getTruncateOrNoop(V, Ty);
   1507   return expand(T);
   1508 }
   1509 
   1510 Value *SCEVExpander::visitTruncateExpr(const SCEVTruncateExpr *S) {
   1511   Type *Ty = SE.getEffectiveSCEVType(S->getType());
   1512   Value *V = expandCodeFor(S->getOperand(),
   1513                            SE.getEffectiveSCEVType(S->getOperand()->getType()));
   1514   Value *I = Builder.CreateTrunc(V, Ty);
   1515   rememberInstruction(I);
   1516   return I;
   1517 }
   1518 
   1519 Value *SCEVExpander::visitZeroExtendExpr(const SCEVZeroExtendExpr *S) {
   1520   Type *Ty = SE.getEffectiveSCEVType(S->getType());
   1521   Value *V = expandCodeFor(S->getOperand(),
   1522                            SE.getEffectiveSCEVType(S->getOperand()->getType()));
   1523   Value *I = Builder.CreateZExt(V, Ty);
   1524   rememberInstruction(I);
   1525   return I;
   1526 }
   1527 
   1528 Value *SCEVExpander::visitSignExtendExpr(const SCEVSignExtendExpr *S) {
   1529   Type *Ty = SE.getEffectiveSCEVType(S->getType());
   1530   Value *V = expandCodeFor(S->getOperand(),
   1531                            SE.getEffectiveSCEVType(S->getOperand()->getType()));
   1532   Value *I = Builder.CreateSExt(V, Ty);
   1533   rememberInstruction(I);
   1534   return I;
   1535 }
   1536 
   1537 Value *SCEVExpander::visitSMaxExpr(const SCEVSMaxExpr *S) {
   1538   Value *LHS = expand(S->getOperand(S->getNumOperands()-1));
   1539   Type *Ty = LHS->getType();
   1540   for (int i = S->getNumOperands()-2; i >= 0; --i) {
   1541     // In the case of mixed integer and pointer types, do the
   1542     // rest of the comparisons as integer.
   1543     if (S->getOperand(i)->getType() != Ty) {
   1544       Ty = SE.getEffectiveSCEVType(Ty);
   1545       LHS = InsertNoopCastOfTo(LHS, Ty);
   1546     }
   1547     Value *RHS = expandCodeFor(S->getOperand(i), Ty);
   1548     Value *ICmp = Builder.CreateICmpSGT(LHS, RHS);
   1549     rememberInstruction(ICmp);
   1550     Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "smax");
   1551     rememberInstruction(Sel);
   1552     LHS = Sel;
   1553   }
   1554   // In the case of mixed integer and pointer types, cast the
   1555   // final result back to the pointer type.
   1556   if (LHS->getType() != S->getType())
   1557     LHS = InsertNoopCastOfTo(LHS, S->getType());
   1558   return LHS;
   1559 }
   1560 
   1561 Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) {
   1562   Value *LHS = expand(S->getOperand(S->getNumOperands()-1));
   1563   Type *Ty = LHS->getType();
   1564   for (int i = S->getNumOperands()-2; i >= 0; --i) {
   1565     // In the case of mixed integer and pointer types, do the
   1566     // rest of the comparisons as integer.
   1567     if (S->getOperand(i)->getType() != Ty) {
   1568       Ty = SE.getEffectiveSCEVType(Ty);
   1569       LHS = InsertNoopCastOfTo(LHS, Ty);
   1570     }
   1571     Value *RHS = expandCodeFor(S->getOperand(i), Ty);
   1572     Value *ICmp = Builder.CreateICmpUGT(LHS, RHS);
   1573     rememberInstruction(ICmp);
   1574     Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "umax");
   1575     rememberInstruction(Sel);
   1576     LHS = Sel;
   1577   }
   1578   // In the case of mixed integer and pointer types, cast the
   1579   // final result back to the pointer type.
   1580   if (LHS->getType() != S->getType())
   1581     LHS = InsertNoopCastOfTo(LHS, S->getType());
   1582   return LHS;
   1583 }
   1584 
   1585 Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty,
   1586                                    Instruction *IP) {
   1587   assert(IP);
   1588   Builder.SetInsertPoint(IP);
   1589   return expandCodeFor(SH, Ty);
   1590 }
   1591 
   1592 Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty) {
   1593   // Expand the code for this SCEV.
   1594   Value *V = expand(SH);
   1595   if (Ty) {
   1596     assert(SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(SH->getType()) &&
   1597            "non-trivial casts should be done with the SCEVs directly!");
   1598     V = InsertNoopCastOfTo(V, Ty);
   1599   }
   1600   return V;
   1601 }
   1602 
   1603 Value *SCEVExpander::expand(const SCEV *S) {
   1604   // Compute an insertion point for this SCEV object. Hoist the instructions
   1605   // as far out in the loop nest as possible.
   1606   Instruction *InsertPt = &*Builder.GetInsertPoint();
   1607   for (Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock());;
   1608        L = L->getParentLoop())
   1609     if (SE.isLoopInvariant(S, L)) {
   1610       if (!L) break;
   1611       if (BasicBlock *Preheader = L->getLoopPreheader())
   1612         InsertPt = Preheader->getTerminator();
   1613       else {
   1614         // LSR sets the insertion point for AddRec start/step values to the
   1615         // block start to simplify value reuse, even though it's an invalid
   1616         // position. SCEVExpander must correct for this in all cases.
   1617         InsertPt = &*L->getHeader()->getFirstInsertionPt();
   1618       }
   1619     } else {
   1620       // If the SCEV is computable at this level, insert it into the header
   1621       // after the PHIs (and after any other instructions that we've inserted
   1622       // there) so that it is guaranteed to dominate any user inside the loop.
   1623       if (L && SE.hasComputableLoopEvolution(S, L) && !PostIncLoops.count(L))
   1624         InsertPt = &*L->getHeader()->getFirstInsertionPt();
   1625       while (InsertPt != Builder.GetInsertPoint()
   1626              && (isInsertedInstruction(InsertPt)
   1627                  || isa<DbgInfoIntrinsic>(InsertPt))) {
   1628         InsertPt = &*std::next(InsertPt->getIterator());
   1629       }
   1630       break;
   1631     }
   1632 
   1633   // Check to see if we already expanded this here.
   1634   auto I = InsertedExpressions.find(std::make_pair(S, InsertPt));
   1635   if (I != InsertedExpressions.end())
   1636     return I->second;
   1637 
   1638   BuilderType::InsertPointGuard Guard(Builder);
   1639   Builder.SetInsertPoint(InsertPt);
   1640 
   1641   // Expand the expression into instructions.
   1642   Value *V = visit(S);
   1643 
   1644   // Remember the expanded value for this SCEV at this location.
   1645   //
   1646   // This is independent of PostIncLoops. The mapped value simply materializes
   1647   // the expression at this insertion point. If the mapped value happened to be
   1648   // a postinc expansion, it could be reused by a non-postinc user, but only if
   1649   // its insertion point was already at the head of the loop.
   1650   InsertedExpressions[std::make_pair(S, InsertPt)] = V;
   1651   return V;
   1652 }
   1653 
   1654 void SCEVExpander::rememberInstruction(Value *I) {
   1655   if (!PostIncLoops.empty())
   1656     InsertedPostIncValues.insert(I);
   1657   else
   1658     InsertedValues.insert(I);
   1659 }
   1660 
   1661 /// getOrInsertCanonicalInductionVariable - This method returns the
   1662 /// canonical induction variable of the specified type for the specified
   1663 /// loop (inserting one if there is none).  A canonical induction variable
   1664 /// starts at zero and steps by one on each iteration.
   1665 PHINode *
   1666 SCEVExpander::getOrInsertCanonicalInductionVariable(const Loop *L,
   1667                                                     Type *Ty) {
   1668   assert(Ty->isIntegerTy() && "Can only insert integer induction variables!");
   1669 
   1670   // Build a SCEV for {0,+,1}<L>.
   1671   // Conservatively use FlagAnyWrap for now.
   1672   const SCEV *H = SE.getAddRecExpr(SE.getConstant(Ty, 0),
   1673                                    SE.getConstant(Ty, 1), L, SCEV::FlagAnyWrap);
   1674 
   1675   // Emit code for it.
   1676   BuilderType::InsertPointGuard Guard(Builder);
   1677   PHINode *V =
   1678       cast<PHINode>(expandCodeFor(H, nullptr, &L->getHeader()->front()));
   1679 
   1680   return V;
   1681 }
   1682 
   1683 /// replaceCongruentIVs - Check for congruent phis in this loop header and
   1684 /// replace them with their most canonical representative. Return the number of
   1685 /// phis eliminated.
   1686 ///
   1687 /// This does not depend on any SCEVExpander state but should be used in
   1688 /// the same context that SCEVExpander is used.
   1689 unsigned SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT,
   1690                                            SmallVectorImpl<WeakVH> &DeadInsts,
   1691                                            const TargetTransformInfo *TTI) {
   1692   // Find integer phis in order of increasing width.
   1693   SmallVector<PHINode*, 8> Phis;
   1694   for (auto &I : *L->getHeader()) {
   1695     if (auto *PN = dyn_cast<PHINode>(&I))
   1696       Phis.push_back(PN);
   1697     else
   1698       break;
   1699   }
   1700 
   1701   if (TTI)
   1702     std::sort(Phis.begin(), Phis.end(), [](Value *LHS, Value *RHS) {
   1703       // Put pointers at the back and make sure pointer < pointer = false.
   1704       if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
   1705         return RHS->getType()->isIntegerTy() && !LHS->getType()->isIntegerTy();
   1706       return RHS->getType()->getPrimitiveSizeInBits() <
   1707              LHS->getType()->getPrimitiveSizeInBits();
   1708     });
   1709 
   1710   unsigned NumElim = 0;
   1711   DenseMap<const SCEV *, PHINode *> ExprToIVMap;
   1712   // Process phis from wide to narrow. Map wide phis to their truncation
   1713   // so narrow phis can reuse them.
   1714   for (PHINode *Phi : Phis) {
   1715     auto SimplifyPHINode = [&](PHINode *PN) -> Value * {
   1716       if (Value *V = SimplifyInstruction(PN, DL, &SE.TLI, &SE.DT, &SE.AC))
   1717         return V;
   1718       if (!SE.isSCEVable(PN->getType()))
   1719         return nullptr;
   1720       auto *Const = dyn_cast<SCEVConstant>(SE.getSCEV(PN));
   1721       if (!Const)
   1722         return nullptr;
   1723       return Const->getValue();
   1724     };
   1725 
   1726     // Fold constant phis. They may be congruent to other constant phis and
   1727     // would confuse the logic below that expects proper IVs.
   1728     if (Value *V = SimplifyPHINode(Phi)) {
   1729       if (V->getType() != Phi->getType())
   1730         continue;
   1731       Phi->replaceAllUsesWith(V);
   1732       DeadInsts.emplace_back(Phi);
   1733       ++NumElim;
   1734       DEBUG_WITH_TYPE(DebugType, dbgs()
   1735                       << "INDVARS: Eliminated constant iv: " << *Phi << '\n');
   1736       continue;
   1737     }
   1738 
   1739     if (!SE.isSCEVable(Phi->getType()))
   1740       continue;
   1741 
   1742     PHINode *&OrigPhiRef = ExprToIVMap[SE.getSCEV(Phi)];
   1743     if (!OrigPhiRef) {
   1744       OrigPhiRef = Phi;
   1745       if (Phi->getType()->isIntegerTy() && TTI
   1746           && TTI->isTruncateFree(Phi->getType(), Phis.back()->getType())) {
   1747         // This phi can be freely truncated to the narrowest phi type. Map the
   1748         // truncated expression to it so it will be reused for narrow types.
   1749         const SCEV *TruncExpr =
   1750           SE.getTruncateExpr(SE.getSCEV(Phi), Phis.back()->getType());
   1751         ExprToIVMap[TruncExpr] = Phi;
   1752       }
   1753       continue;
   1754     }
   1755 
   1756     // Replacing a pointer phi with an integer phi or vice-versa doesn't make
   1757     // sense.
   1758     if (OrigPhiRef->getType()->isPointerTy() != Phi->getType()->isPointerTy())
   1759       continue;
   1760 
   1761     if (BasicBlock *LatchBlock = L->getLoopLatch()) {
   1762       Instruction *OrigInc =
   1763         cast<Instruction>(OrigPhiRef->getIncomingValueForBlock(LatchBlock));
   1764       Instruction *IsomorphicInc =
   1765         cast<Instruction>(Phi->getIncomingValueForBlock(LatchBlock));
   1766 
   1767       // If this phi has the same width but is more canonical, replace the
   1768       // original with it. As part of the "more canonical" determination,
   1769       // respect a prior decision to use an IV chain.
   1770       if (OrigPhiRef->getType() == Phi->getType()
   1771           && !(ChainedPhis.count(Phi)
   1772                || isExpandedAddRecExprPHI(OrigPhiRef, OrigInc, L))
   1773           && (ChainedPhis.count(Phi)
   1774               || isExpandedAddRecExprPHI(Phi, IsomorphicInc, L))) {
   1775         std::swap(OrigPhiRef, Phi);
   1776         std::swap(OrigInc, IsomorphicInc);
   1777       }
   1778       // Replacing the congruent phi is sufficient because acyclic redundancy
   1779       // elimination, CSE/GVN, should handle the rest. However, once SCEV proves
   1780       // that a phi is congruent, it's often the head of an IV user cycle that
   1781       // is isomorphic with the original phi. It's worth eagerly cleaning up the
   1782       // common case of a single IV increment so that DeleteDeadPHIs can remove
   1783       // cycles that had postinc uses.
   1784       const SCEV *TruncExpr = SE.getTruncateOrNoop(SE.getSCEV(OrigInc),
   1785                                                    IsomorphicInc->getType());
   1786       if (OrigInc != IsomorphicInc
   1787           && TruncExpr == SE.getSCEV(IsomorphicInc)
   1788           && ((isa<PHINode>(OrigInc) && isa<PHINode>(IsomorphicInc))
   1789               || hoistIVInc(OrigInc, IsomorphicInc))) {
   1790         DEBUG_WITH_TYPE(DebugType, dbgs()
   1791                         << "INDVARS: Eliminated congruent iv.inc: "
   1792                         << *IsomorphicInc << '\n');
   1793         Value *NewInc = OrigInc;
   1794         if (OrigInc->getType() != IsomorphicInc->getType()) {
   1795           Instruction *IP = nullptr;
   1796           if (PHINode *PN = dyn_cast<PHINode>(OrigInc))
   1797             IP = &*PN->getParent()->getFirstInsertionPt();
   1798           else
   1799             IP = OrigInc->getNextNode();
   1800 
   1801           IRBuilder<> Builder(IP);
   1802           Builder.SetCurrentDebugLocation(IsomorphicInc->getDebugLoc());
   1803           NewInc = Builder.
   1804             CreateTruncOrBitCast(OrigInc, IsomorphicInc->getType(), IVName);
   1805         }
   1806         IsomorphicInc->replaceAllUsesWith(NewInc);
   1807         DeadInsts.emplace_back(IsomorphicInc);
   1808       }
   1809     }
   1810     DEBUG_WITH_TYPE(DebugType, dbgs()
   1811                     << "INDVARS: Eliminated congruent iv: " << *Phi << '\n');
   1812     ++NumElim;
   1813     Value *NewIV = OrigPhiRef;
   1814     if (OrigPhiRef->getType() != Phi->getType()) {
   1815       IRBuilder<> Builder(&*L->getHeader()->getFirstInsertionPt());
   1816       Builder.SetCurrentDebugLocation(Phi->getDebugLoc());
   1817       NewIV = Builder.CreateTruncOrBitCast(OrigPhiRef, Phi->getType(), IVName);
   1818     }
   1819     Phi->replaceAllUsesWith(NewIV);
   1820     DeadInsts.emplace_back(Phi);
   1821   }
   1822   return NumElim;
   1823 }
   1824 
   1825 Value *SCEVExpander::findExistingExpansion(const SCEV *S,
   1826                                            const Instruction *At, Loop *L) {
   1827   using namespace llvm::PatternMatch;
   1828 
   1829   SmallVector<BasicBlock *, 4> ExitingBlocks;
   1830   L->getExitingBlocks(ExitingBlocks);
   1831 
   1832   // Look for suitable value in simple conditions at the loop exits.
   1833   for (BasicBlock *BB : ExitingBlocks) {
   1834     ICmpInst::Predicate Pred;
   1835     Instruction *LHS, *RHS;
   1836     BasicBlock *TrueBB, *FalseBB;
   1837 
   1838     if (!match(BB->getTerminator(),
   1839                m_Br(m_ICmp(Pred, m_Instruction(LHS), m_Instruction(RHS)),
   1840                     TrueBB, FalseBB)))
   1841       continue;
   1842 
   1843     if (SE.getSCEV(LHS) == S && SE.DT.dominates(LHS, At))
   1844       return LHS;
   1845 
   1846     if (SE.getSCEV(RHS) == S && SE.DT.dominates(RHS, At))
   1847       return RHS;
   1848   }
   1849 
   1850   // There is potential to make this significantly smarter, but this simple
   1851   // heuristic already gets some interesting cases.
   1852 
   1853   // Can not find suitable value.
   1854   return nullptr;
   1855 }
   1856 
   1857 bool SCEVExpander::isHighCostExpansionHelper(
   1858     const SCEV *S, Loop *L, const Instruction *At,
   1859     SmallPtrSetImpl<const SCEV *> &Processed) {
   1860 
   1861   // If we can find an existing value for this scev avaliable at the point "At"
   1862   // then consider the expression cheap.
   1863   if (At && findExistingExpansion(S, At, L) != nullptr)
   1864     return false;
   1865 
   1866   // Zero/One operand expressions
   1867   switch (S->getSCEVType()) {
   1868   case scUnknown:
   1869   case scConstant:
   1870     return false;
   1871   case scTruncate:
   1872     return isHighCostExpansionHelper(cast<SCEVTruncateExpr>(S)->getOperand(),
   1873                                      L, At, Processed);
   1874   case scZeroExtend:
   1875     return isHighCostExpansionHelper(cast<SCEVZeroExtendExpr>(S)->getOperand(),
   1876                                      L, At, Processed);
   1877   case scSignExtend:
   1878     return isHighCostExpansionHelper(cast<SCEVSignExtendExpr>(S)->getOperand(),
   1879                                      L, At, Processed);
   1880   }
   1881 
   1882   if (!Processed.insert(S).second)
   1883     return false;
   1884 
   1885   if (auto *UDivExpr = dyn_cast<SCEVUDivExpr>(S)) {
   1886     // If the divisor is a power of two and the SCEV type fits in a native
   1887     // integer, consider the division cheap irrespective of whether it occurs in
   1888     // the user code since it can be lowered into a right shift.
   1889     if (auto *SC = dyn_cast<SCEVConstant>(UDivExpr->getRHS()))
   1890       if (SC->getAPInt().isPowerOf2()) {
   1891         const DataLayout &DL =
   1892             L->getHeader()->getParent()->getParent()->getDataLayout();
   1893         unsigned Width = cast<IntegerType>(UDivExpr->getType())->getBitWidth();
   1894         return DL.isIllegalInteger(Width);
   1895       }
   1896 
   1897     // UDivExpr is very likely a UDiv that ScalarEvolution's HowFarToZero or
   1898     // HowManyLessThans produced to compute a precise expression, rather than a
   1899     // UDiv from the user's code. If we can't find a UDiv in the code with some
   1900     // simple searching, assume the former consider UDivExpr expensive to
   1901     // compute.
   1902     BasicBlock *ExitingBB = L->getExitingBlock();
   1903     if (!ExitingBB)
   1904       return true;
   1905 
   1906     // At the beginning of this function we already tried to find existing value
   1907     // for plain 'S'. Now try to lookup 'S + 1' since it is common pattern
   1908     // involving division. This is just a simple search heuristic.
   1909     if (!At)
   1910       At = &ExitingBB->back();
   1911     if (!findExistingExpansion(
   1912             SE.getAddExpr(S, SE.getConstant(S->getType(), 1)), At, L))
   1913       return true;
   1914   }
   1915 
   1916   // HowManyLessThans uses a Max expression whenever the loop is not guarded by
   1917   // the exit condition.
   1918   if (isa<SCEVSMaxExpr>(S) || isa<SCEVUMaxExpr>(S))
   1919     return true;
   1920 
   1921   // Recurse past nary expressions, which commonly occur in the
   1922   // BackedgeTakenCount. They may already exist in program code, and if not,
   1923   // they are not too expensive rematerialize.
   1924   if (const SCEVNAryExpr *NAry = dyn_cast<SCEVNAryExpr>(S)) {
   1925     for (auto *Op : NAry->operands())
   1926       if (isHighCostExpansionHelper(Op, L, At, Processed))
   1927         return true;
   1928   }
   1929 
   1930   // If we haven't recognized an expensive SCEV pattern, assume it's an
   1931   // expression produced by program code.
   1932   return false;
   1933 }
   1934 
   1935 Value *SCEVExpander::expandCodeForPredicate(const SCEVPredicate *Pred,
   1936                                             Instruction *IP) {
   1937   assert(IP);
   1938   switch (Pred->getKind()) {
   1939   case SCEVPredicate::P_Union:
   1940     return expandUnionPredicate(cast<SCEVUnionPredicate>(Pred), IP);
   1941   case SCEVPredicate::P_Equal:
   1942     return expandEqualPredicate(cast<SCEVEqualPredicate>(Pred), IP);
   1943   }
   1944   llvm_unreachable("Unknown SCEV predicate type");
   1945 }
   1946 
   1947 Value *SCEVExpander::expandEqualPredicate(const SCEVEqualPredicate *Pred,
   1948                                           Instruction *IP) {
   1949   Value *Expr0 = expandCodeFor(Pred->getLHS(), Pred->getLHS()->getType(), IP);
   1950   Value *Expr1 = expandCodeFor(Pred->getRHS(), Pred->getRHS()->getType(), IP);
   1951 
   1952   Builder.SetInsertPoint(IP);
   1953   auto *I = Builder.CreateICmpNE(Expr0, Expr1, "ident.check");
   1954   return I;
   1955 }
   1956 
   1957 Value *SCEVExpander::expandUnionPredicate(const SCEVUnionPredicate *Union,
   1958                                           Instruction *IP) {
   1959   auto *BoolType = IntegerType::get(IP->getContext(), 1);
   1960   Value *Check = ConstantInt::getNullValue(BoolType);
   1961 
   1962   // Loop over all checks in this set.
   1963   for (auto Pred : Union->getPredicates()) {
   1964     auto *NextCheck = expandCodeForPredicate(Pred, IP);
   1965     Builder.SetInsertPoint(IP);
   1966     Check = Builder.CreateOr(Check, NextCheck);
   1967   }
   1968 
   1969   return Check;
   1970 }
   1971 
   1972 namespace {
   1973 // Search for a SCEV subexpression that is not safe to expand.  Any expression
   1974 // that may expand to a !isSafeToSpeculativelyExecute value is unsafe, namely
   1975 // UDiv expressions. We don't know if the UDiv is derived from an IR divide
   1976 // instruction, but the important thing is that we prove the denominator is
   1977 // nonzero before expansion.
   1978 //
   1979 // IVUsers already checks that IV-derived expressions are safe. So this check is
   1980 // only needed when the expression includes some subexpression that is not IV
   1981 // derived.
   1982 //
   1983 // Currently, we only allow division by a nonzero constant here. If this is
   1984 // inadequate, we could easily allow division by SCEVUnknown by using
   1985 // ValueTracking to check isKnownNonZero().
   1986 //
   1987 // We cannot generally expand recurrences unless the step dominates the loop
   1988 // header. The expander handles the special case of affine recurrences by
   1989 // scaling the recurrence outside the loop, but this technique isn't generally
   1990 // applicable. Expanding a nested recurrence outside a loop requires computing
   1991 // binomial coefficients. This could be done, but the recurrence has to be in a
   1992 // perfectly reduced form, which can't be guaranteed.
   1993 struct SCEVFindUnsafe {
   1994   ScalarEvolution &SE;
   1995   bool IsUnsafe;
   1996 
   1997   SCEVFindUnsafe(ScalarEvolution &se): SE(se), IsUnsafe(false) {}
   1998 
   1999   bool follow(const SCEV *S) {
   2000     if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) {
   2001       const SCEVConstant *SC = dyn_cast<SCEVConstant>(D->getRHS());
   2002       if (!SC || SC->getValue()->isZero()) {
   2003         IsUnsafe = true;
   2004         return false;
   2005       }
   2006     }
   2007     if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
   2008       const SCEV *Step = AR->getStepRecurrence(SE);
   2009       if (!AR->isAffine() && !SE.dominates(Step, AR->getLoop()->getHeader())) {
   2010         IsUnsafe = true;
   2011         return false;
   2012       }
   2013     }
   2014     return true;
   2015   }
   2016   bool isDone() const { return IsUnsafe; }
   2017 };
   2018 }
   2019 
   2020 namespace llvm {
   2021 bool isSafeToExpand(const SCEV *S, ScalarEvolution &SE) {
   2022   SCEVFindUnsafe Search(SE);
   2023   visitAll(S, Search);
   2024   return !Search.IsUnsafe;
   2025 }
   2026 }
   2027