Home | History | Annotate | Download | only in Scalar
      1 //===-- LoopIdiomRecognize.cpp - Loop idiom recognition -------------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This pass implements an idiom recognizer that transforms simple loops into a
     11 // non-loop form.  In cases that this kicks in, it can be a significant
     12 // performance win.
     13 //
     14 //===----------------------------------------------------------------------===//
     15 //
     16 // TODO List:
     17 //
     18 // Future loop memory idioms to recognize:
     19 //   memcmp, memmove, strlen, etc.
     20 // Future floating point idioms to recognize in -ffast-math mode:
     21 //   fpowi
     22 // Future integer operation idioms to recognize:
     23 //   ctpop, ctlz, cttz
     24 //
     25 // Beware that isel's default lowering for ctpop is highly inefficient for
     26 // i64 and larger types when i64 is legal and the value has few bits set.  It
     27 // would be good to enhance isel to emit a loop for ctpop in this case.
     28 //
     29 // We should enhance the memset/memcpy recognition to handle multiple stores in
     30 // the loop.  This would handle things like:
     31 //   void foo(_Complex float *P)
     32 //     for (i) { __real__(*P) = 0;  __imag__(*P) = 0; }
     33 //
     34 // This could recognize common matrix multiplies and dot product idioms and
     35 // replace them with calls to BLAS (if linked in??).
     36 //
     37 //===----------------------------------------------------------------------===//
     38 
     39 #include "llvm/Transforms/Scalar.h"
     40 #include "llvm/ADT/Statistic.h"
     41 #include "llvm/Analysis/AliasAnalysis.h"
     42 #include "llvm/Analysis/BasicAliasAnalysis.h"
     43 #include "llvm/Analysis/GlobalsModRef.h"
     44 #include "llvm/Analysis/LoopPass.h"
     45 #include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h"
     46 #include "llvm/Analysis/ScalarEvolutionExpander.h"
     47 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
     48 #include "llvm/Analysis/TargetLibraryInfo.h"
     49 #include "llvm/Analysis/TargetTransformInfo.h"
     50 #include "llvm/Analysis/ValueTracking.h"
     51 #include "llvm/IR/DataLayout.h"
     52 #include "llvm/IR/Dominators.h"
     53 #include "llvm/IR/IRBuilder.h"
     54 #include "llvm/IR/IntrinsicInst.h"
     55 #include "llvm/IR/Module.h"
     56 #include "llvm/Support/Debug.h"
     57 #include "llvm/Support/raw_ostream.h"
     58 #include "llvm/Transforms/Utils/Local.h"
     59 using namespace llvm;
     60 
     61 #define DEBUG_TYPE "loop-idiom"
     62 
     63 STATISTIC(NumMemSet, "Number of memset's formed from loop stores");
     64 STATISTIC(NumMemCpy, "Number of memcpy's formed from loop load+stores");
     65 
     66 namespace {
     67 
     68 class LoopIdiomRecognize : public LoopPass {
     69   Loop *CurLoop;
     70   AliasAnalysis *AA;
     71   DominatorTree *DT;
     72   LoopInfo *LI;
     73   ScalarEvolution *SE;
     74   TargetLibraryInfo *TLI;
     75   const TargetTransformInfo *TTI;
     76   const DataLayout *DL;
     77 
     78 public:
     79   static char ID;
     80   explicit LoopIdiomRecognize() : LoopPass(ID) {
     81     initializeLoopIdiomRecognizePass(*PassRegistry::getPassRegistry());
     82   }
     83 
     84   bool runOnLoop(Loop *L, LPPassManager &LPM) override;
     85 
     86   /// This transformation requires natural loop information & requires that
     87   /// loop preheaders be inserted into the CFG.
     88   ///
     89   void getAnalysisUsage(AnalysisUsage &AU) const override {
     90     AU.addRequired<LoopInfoWrapperPass>();
     91     AU.addPreserved<LoopInfoWrapperPass>();
     92     AU.addRequiredID(LoopSimplifyID);
     93     AU.addPreservedID(LoopSimplifyID);
     94     AU.addRequiredID(LCSSAID);
     95     AU.addPreservedID(LCSSAID);
     96     AU.addRequired<AAResultsWrapperPass>();
     97     AU.addPreserved<AAResultsWrapperPass>();
     98     AU.addRequired<ScalarEvolutionWrapperPass>();
     99     AU.addPreserved<ScalarEvolutionWrapperPass>();
    100     AU.addPreserved<SCEVAAWrapperPass>();
    101     AU.addRequired<DominatorTreeWrapperPass>();
    102     AU.addPreserved<DominatorTreeWrapperPass>();
    103     AU.addRequired<TargetLibraryInfoWrapperPass>();
    104     AU.addRequired<TargetTransformInfoWrapperPass>();
    105     AU.addPreserved<BasicAAWrapperPass>();
    106     AU.addPreserved<GlobalsAAWrapperPass>();
    107   }
    108 
    109 private:
    110   typedef SmallVector<StoreInst *, 8> StoreList;
    111   StoreList StoreRefs;
    112 
    113   /// \name Countable Loop Idiom Handling
    114   /// @{
    115 
    116   bool runOnCountableLoop();
    117   bool runOnLoopBlock(BasicBlock *BB, const SCEV *BECount,
    118                       SmallVectorImpl<BasicBlock *> &ExitBlocks);
    119 
    120   void collectStores(BasicBlock *BB);
    121   bool isLegalStore(StoreInst *SI);
    122   bool processLoopStore(StoreInst *SI, const SCEV *BECount);
    123   bool processLoopMemSet(MemSetInst *MSI, const SCEV *BECount);
    124 
    125   bool processLoopStridedStore(Value *DestPtr, unsigned StoreSize,
    126                                unsigned StoreAlignment, Value *SplatValue,
    127                                Instruction *TheStore, const SCEVAddRecExpr *Ev,
    128                                const SCEV *BECount, bool NegStride);
    129   bool processLoopStoreOfLoopLoad(StoreInst *SI, unsigned StoreSize,
    130                                   const SCEVAddRecExpr *StoreEv,
    131                                   const SCEV *BECount, bool NegStride);
    132 
    133   /// @}
    134   /// \name Noncountable Loop Idiom Handling
    135   /// @{
    136 
    137   bool runOnNoncountableLoop();
    138 
    139   bool recognizePopcount();
    140   void transformLoopToPopcount(BasicBlock *PreCondBB, Instruction *CntInst,
    141                                PHINode *CntPhi, Value *Var);
    142 
    143   /// @}
    144 };
    145 
    146 } // End anonymous namespace.
    147 
    148 char LoopIdiomRecognize::ID = 0;
    149 INITIALIZE_PASS_BEGIN(LoopIdiomRecognize, "loop-idiom", "Recognize loop idioms",
    150                       false, false)
    151 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
    152 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
    153 INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
    154 INITIALIZE_PASS_DEPENDENCY(LCSSA)
    155 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
    156 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
    157 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)
    158 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
    159 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
    160 INITIALIZE_PASS_DEPENDENCY(SCEVAAWrapperPass)
    161 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
    162 INITIALIZE_PASS_END(LoopIdiomRecognize, "loop-idiom", "Recognize loop idioms",
    163                     false, false)
    164 
    165 Pass *llvm::createLoopIdiomPass() { return new LoopIdiomRecognize(); }
    166 
    167 /// deleteDeadInstruction - Delete this instruction.  Before we do, go through
    168 /// and zero out all the operands of this instruction.  If any of them become
    169 /// dead, delete them and the computation tree that feeds them.
    170 ///
    171 static void deleteDeadInstruction(Instruction *I,
    172                                   const TargetLibraryInfo *TLI) {
    173   SmallVector<Value *, 16> Operands(I->value_op_begin(), I->value_op_end());
    174   I->replaceAllUsesWith(UndefValue::get(I->getType()));
    175   I->eraseFromParent();
    176   for (Value *Op : Operands)
    177     RecursivelyDeleteTriviallyDeadInstructions(Op, TLI);
    178 }
    179 
    180 //===----------------------------------------------------------------------===//
    181 //
    182 //          Implementation of LoopIdiomRecognize
    183 //
    184 //===----------------------------------------------------------------------===//
    185 
    186 bool LoopIdiomRecognize::runOnLoop(Loop *L, LPPassManager &LPM) {
    187   if (skipOptnoneFunction(L))
    188     return false;
    189 
    190   CurLoop = L;
    191   // If the loop could not be converted to canonical form, it must have an
    192   // indirectbr in it, just give up.
    193   if (!L->getLoopPreheader())
    194     return false;
    195 
    196   // Disable loop idiom recognition if the function's name is a common idiom.
    197   StringRef Name = L->getHeader()->getParent()->getName();
    198   if (Name == "memset" || Name == "memcpy")
    199     return false;
    200 
    201   AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
    202   DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
    203   LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
    204   SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
    205   TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
    206   TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(
    207       *CurLoop->getHeader()->getParent());
    208   DL = &CurLoop->getHeader()->getModule()->getDataLayout();
    209 
    210   if (SE->hasLoopInvariantBackedgeTakenCount(L))
    211     return runOnCountableLoop();
    212 
    213   return runOnNoncountableLoop();
    214 }
    215 
    216 bool LoopIdiomRecognize::runOnCountableLoop() {
    217   const SCEV *BECount = SE->getBackedgeTakenCount(CurLoop);
    218   assert(!isa<SCEVCouldNotCompute>(BECount) &&
    219          "runOnCountableLoop() called on a loop without a predictable"
    220          "backedge-taken count");
    221 
    222   // If this loop executes exactly one time, then it should be peeled, not
    223   // optimized by this pass.
    224   if (const SCEVConstant *BECst = dyn_cast<SCEVConstant>(BECount))
    225     if (BECst->getAPInt() == 0)
    226       return false;
    227 
    228   SmallVector<BasicBlock *, 8> ExitBlocks;
    229   CurLoop->getUniqueExitBlocks(ExitBlocks);
    230 
    231   DEBUG(dbgs() << "loop-idiom Scanning: F["
    232                << CurLoop->getHeader()->getParent()->getName() << "] Loop %"
    233                << CurLoop->getHeader()->getName() << "\n");
    234 
    235   bool MadeChange = false;
    236   // Scan all the blocks in the loop that are not in subloops.
    237   for (auto *BB : CurLoop->getBlocks()) {
    238     // Ignore blocks in subloops.
    239     if (LI->getLoopFor(BB) != CurLoop)
    240       continue;
    241 
    242     MadeChange |= runOnLoopBlock(BB, BECount, ExitBlocks);
    243   }
    244   return MadeChange;
    245 }
    246 
    247 static unsigned getStoreSizeInBytes(StoreInst *SI, const DataLayout *DL) {
    248   uint64_t SizeInBits = DL->getTypeSizeInBits(SI->getValueOperand()->getType());
    249   assert(((SizeInBits & 7) || (SizeInBits >> 32) == 0) &&
    250          "Don't overflow unsigned.");
    251   return (unsigned)SizeInBits >> 3;
    252 }
    253 
    254 static unsigned getStoreStride(const SCEVAddRecExpr *StoreEv) {
    255   const SCEVConstant *ConstStride = cast<SCEVConstant>(StoreEv->getOperand(1));
    256   return ConstStride->getAPInt().getZExtValue();
    257 }
    258 
    259 /// getMemSetPatternValue - If a strided store of the specified value is safe to
    260 /// turn into a memset_pattern16, return a ConstantArray of 16 bytes that should
    261 /// be passed in.  Otherwise, return null.
    262 ///
    263 /// Note that we don't ever attempt to use memset_pattern8 or 4, because these
    264 /// just replicate their input array and then pass on to memset_pattern16.
    265 static Constant *getMemSetPatternValue(Value *V, const DataLayout *DL) {
    266   // If the value isn't a constant, we can't promote it to being in a constant
    267   // array.  We could theoretically do a store to an alloca or something, but
    268   // that doesn't seem worthwhile.
    269   Constant *C = dyn_cast<Constant>(V);
    270   if (!C)
    271     return nullptr;
    272 
    273   // Only handle simple values that are a power of two bytes in size.
    274   uint64_t Size = DL->getTypeSizeInBits(V->getType());
    275   if (Size == 0 || (Size & 7) || (Size & (Size - 1)))
    276     return nullptr;
    277 
    278   // Don't care enough about darwin/ppc to implement this.
    279   if (DL->isBigEndian())
    280     return nullptr;
    281 
    282   // Convert to size in bytes.
    283   Size /= 8;
    284 
    285   // TODO: If CI is larger than 16-bytes, we can try slicing it in half to see
    286   // if the top and bottom are the same (e.g. for vectors and large integers).
    287   if (Size > 16)
    288     return nullptr;
    289 
    290   // If the constant is exactly 16 bytes, just use it.
    291   if (Size == 16)
    292     return C;
    293 
    294   // Otherwise, we'll use an array of the constants.
    295   unsigned ArraySize = 16 / Size;
    296   ArrayType *AT = ArrayType::get(V->getType(), ArraySize);
    297   return ConstantArray::get(AT, std::vector<Constant *>(ArraySize, C));
    298 }
    299 
    300 bool LoopIdiomRecognize::isLegalStore(StoreInst *SI) {
    301   // Don't touch volatile stores.
    302   if (!SI->isSimple())
    303     return false;
    304 
    305   Value *StoredVal = SI->getValueOperand();
    306   Value *StorePtr = SI->getPointerOperand();
    307 
    308   // Reject stores that are so large that they overflow an unsigned.
    309   uint64_t SizeInBits = DL->getTypeSizeInBits(StoredVal->getType());
    310   if ((SizeInBits & 7) || (SizeInBits >> 32) != 0)
    311     return false;
    312 
    313   // See if the pointer expression is an AddRec like {base,+,1} on the current
    314   // loop, which indicates a strided store.  If we have something else, it's a
    315   // random store we can't handle.
    316   const SCEVAddRecExpr *StoreEv =
    317       dyn_cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr));
    318   if (!StoreEv || StoreEv->getLoop() != CurLoop || !StoreEv->isAffine())
    319     return false;
    320 
    321   // Check to see if we have a constant stride.
    322   if (!isa<SCEVConstant>(StoreEv->getOperand(1)))
    323     return false;
    324 
    325   return true;
    326 }
    327 
    328 void LoopIdiomRecognize::collectStores(BasicBlock *BB) {
    329   StoreRefs.clear();
    330   for (Instruction &I : *BB) {
    331     StoreInst *SI = dyn_cast<StoreInst>(&I);
    332     if (!SI)
    333       continue;
    334 
    335     // Make sure this is a strided store with a constant stride.
    336     if (!isLegalStore(SI))
    337       continue;
    338 
    339     // Save the store locations.
    340     StoreRefs.push_back(SI);
    341   }
    342 }
    343 
    344 /// runOnLoopBlock - Process the specified block, which lives in a counted loop
    345 /// with the specified backedge count.  This block is known to be in the current
    346 /// loop and not in any subloops.
    347 bool LoopIdiomRecognize::runOnLoopBlock(
    348     BasicBlock *BB, const SCEV *BECount,
    349     SmallVectorImpl<BasicBlock *> &ExitBlocks) {
    350   // We can only promote stores in this block if they are unconditionally
    351   // executed in the loop.  For a block to be unconditionally executed, it has
    352   // to dominate all the exit blocks of the loop.  Verify this now.
    353   for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i)
    354     if (!DT->dominates(BB, ExitBlocks[i]))
    355       return false;
    356 
    357   bool MadeChange = false;
    358   // Look for store instructions, which may be optimized to memset/memcpy.
    359   collectStores(BB);
    360   for (auto &SI : StoreRefs)
    361     MadeChange |= processLoopStore(SI, BECount);
    362 
    363   for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
    364     Instruction *Inst = &*I++;
    365     // Look for memset instructions, which may be optimized to a larger memset.
    366     if (MemSetInst *MSI = dyn_cast<MemSetInst>(Inst)) {
    367       WeakVH InstPtr(&*I);
    368       if (!processLoopMemSet(MSI, BECount))
    369         continue;
    370       MadeChange = true;
    371 
    372       // If processing the memset invalidated our iterator, start over from the
    373       // top of the block.
    374       if (!InstPtr)
    375         I = BB->begin();
    376       continue;
    377     }
    378   }
    379 
    380   return MadeChange;
    381 }
    382 
    383 /// processLoopStore - See if this store can be promoted to a memset or memcpy.
    384 bool LoopIdiomRecognize::processLoopStore(StoreInst *SI, const SCEV *BECount) {
    385   assert(SI->isSimple() && "Expected only non-volatile stores.");
    386 
    387   Value *StoredVal = SI->getValueOperand();
    388   Value *StorePtr = SI->getPointerOperand();
    389 
    390   // Check to see if the stride matches the size of the store.  If so, then we
    391   // know that every byte is touched in the loop.
    392   const SCEVAddRecExpr *StoreEv = cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr));
    393   unsigned Stride = getStoreStride(StoreEv);
    394   unsigned StoreSize = getStoreSizeInBytes(SI, DL);
    395   if (StoreSize != Stride && StoreSize != -Stride)
    396     return false;
    397 
    398   bool NegStride = StoreSize == -Stride;
    399 
    400   // See if we can optimize just this store in isolation.
    401   if (processLoopStridedStore(StorePtr, StoreSize, SI->getAlignment(),
    402                               StoredVal, SI, StoreEv, BECount, NegStride))
    403     return true;
    404 
    405   // Optimize the store into a memcpy, if it feeds an similarly strided load.
    406   return processLoopStoreOfLoopLoad(SI, StoreSize, StoreEv, BECount, NegStride);
    407 }
    408 
    409 /// processLoopMemSet - See if this memset can be promoted to a large memset.
    410 bool LoopIdiomRecognize::processLoopMemSet(MemSetInst *MSI,
    411                                            const SCEV *BECount) {
    412   // We can only handle non-volatile memsets with a constant size.
    413   if (MSI->isVolatile() || !isa<ConstantInt>(MSI->getLength()))
    414     return false;
    415 
    416   // If we're not allowed to hack on memset, we fail.
    417   if (!TLI->has(LibFunc::memset))
    418     return false;
    419 
    420   Value *Pointer = MSI->getDest();
    421 
    422   // See if the pointer expression is an AddRec like {base,+,1} on the current
    423   // loop, which indicates a strided store.  If we have something else, it's a
    424   // random store we can't handle.
    425   const SCEVAddRecExpr *Ev = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(Pointer));
    426   if (!Ev || Ev->getLoop() != CurLoop || !Ev->isAffine())
    427     return false;
    428 
    429   // Reject memsets that are so large that they overflow an unsigned.
    430   uint64_t SizeInBytes = cast<ConstantInt>(MSI->getLength())->getZExtValue();
    431   if ((SizeInBytes >> 32) != 0)
    432     return false;
    433 
    434   // Check to see if the stride matches the size of the memset.  If so, then we
    435   // know that every byte is touched in the loop.
    436   const SCEVConstant *Stride = dyn_cast<SCEVConstant>(Ev->getOperand(1));
    437 
    438   // TODO: Could also handle negative stride here someday, that will require the
    439   // validity check in mayLoopAccessLocation to be updated though.
    440   if (!Stride || MSI->getLength() != Stride->getValue())
    441     return false;
    442 
    443   return processLoopStridedStore(Pointer, (unsigned)SizeInBytes,
    444                                  MSI->getAlignment(), MSI->getValue(), MSI, Ev,
    445                                  BECount, /*NegStride=*/false);
    446 }
    447 
    448 /// mayLoopAccessLocation - Return true if the specified loop might access the
    449 /// specified pointer location, which is a loop-strided access.  The 'Access'
    450 /// argument specifies what the verboten forms of access are (read or write).
    451 static bool mayLoopAccessLocation(Value *Ptr, ModRefInfo Access, Loop *L,
    452                                   const SCEV *BECount, unsigned StoreSize,
    453                                   AliasAnalysis &AA,
    454                                   Instruction *IgnoredStore) {
    455   // Get the location that may be stored across the loop.  Since the access is
    456   // strided positively through memory, we say that the modified location starts
    457   // at the pointer and has infinite size.
    458   uint64_t AccessSize = MemoryLocation::UnknownSize;
    459 
    460   // If the loop iterates a fixed number of times, we can refine the access size
    461   // to be exactly the size of the memset, which is (BECount+1)*StoreSize
    462   if (const SCEVConstant *BECst = dyn_cast<SCEVConstant>(BECount))
    463     AccessSize = (BECst->getValue()->getZExtValue() + 1) * StoreSize;
    464 
    465   // TODO: For this to be really effective, we have to dive into the pointer
    466   // operand in the store.  Store to &A[i] of 100 will always return may alias
    467   // with store of &A[100], we need to StoreLoc to be "A" with size of 100,
    468   // which will then no-alias a store to &A[100].
    469   MemoryLocation StoreLoc(Ptr, AccessSize);
    470 
    471   for (Loop::block_iterator BI = L->block_begin(), E = L->block_end(); BI != E;
    472        ++BI)
    473     for (BasicBlock::iterator I = (*BI)->begin(), E = (*BI)->end(); I != E; ++I)
    474       if (&*I != IgnoredStore && (AA.getModRefInfo(&*I, StoreLoc) & Access))
    475         return true;
    476 
    477   return false;
    478 }
    479 
    480 // If we have a negative stride, Start refers to the end of the memory location
    481 // we're trying to memset.  Therefore, we need to recompute the base pointer,
    482 // which is just Start - BECount*Size.
    483 static const SCEV *getStartForNegStride(const SCEV *Start, const SCEV *BECount,
    484                                         Type *IntPtr, unsigned StoreSize,
    485                                         ScalarEvolution *SE) {
    486   const SCEV *Index = SE->getTruncateOrZeroExtend(BECount, IntPtr);
    487   if (StoreSize != 1)
    488     Index = SE->getMulExpr(Index, SE->getConstant(IntPtr, StoreSize),
    489                            SCEV::FlagNUW);
    490   return SE->getMinusSCEV(Start, Index);
    491 }
    492 
    493 /// processLoopStridedStore - We see a strided store of some value.  If we can
    494 /// transform this into a memset or memset_pattern in the loop preheader, do so.
    495 bool LoopIdiomRecognize::processLoopStridedStore(
    496     Value *DestPtr, unsigned StoreSize, unsigned StoreAlignment,
    497     Value *StoredVal, Instruction *TheStore, const SCEVAddRecExpr *Ev,
    498     const SCEV *BECount, bool NegStride) {
    499 
    500   // If the stored value is a byte-wise value (like i32 -1), then it may be
    501   // turned into a memset of i8 -1, assuming that all the consecutive bytes
    502   // are stored.  A store of i32 0x01020304 can never be turned into a memset,
    503   // but it can be turned into memset_pattern if the target supports it.
    504   Value *SplatValue = isBytewiseValue(StoredVal);
    505   Constant *PatternValue = nullptr;
    506   unsigned DestAS = DestPtr->getType()->getPointerAddressSpace();
    507 
    508   // If we're allowed to form a memset, and the stored value would be acceptable
    509   // for memset, use it.
    510   if (SplatValue && TLI->has(LibFunc::memset) &&
    511       // Verify that the stored value is loop invariant.  If not, we can't
    512       // promote the memset.
    513       CurLoop->isLoopInvariant(SplatValue)) {
    514     // Keep and use SplatValue.
    515     PatternValue = nullptr;
    516   } else if (DestAS == 0 && TLI->has(LibFunc::memset_pattern16) &&
    517              (PatternValue = getMemSetPatternValue(StoredVal, DL))) {
    518     // Don't create memset_pattern16s with address spaces.
    519     // It looks like we can use PatternValue!
    520     SplatValue = nullptr;
    521   } else {
    522     // Otherwise, this isn't an idiom we can transform.  For example, we can't
    523     // do anything with a 3-byte store.
    524     return false;
    525   }
    526 
    527   // The trip count of the loop and the base pointer of the addrec SCEV is
    528   // guaranteed to be loop invariant, which means that it should dominate the
    529   // header.  This allows us to insert code for it in the preheader.
    530   BasicBlock *Preheader = CurLoop->getLoopPreheader();
    531   IRBuilder<> Builder(Preheader->getTerminator());
    532   SCEVExpander Expander(*SE, *DL, "loop-idiom");
    533 
    534   Type *DestInt8PtrTy = Builder.getInt8PtrTy(DestAS);
    535   Type *IntPtr = Builder.getIntPtrTy(*DL, DestAS);
    536 
    537   const SCEV *Start = Ev->getStart();
    538   // Handle negative strided loops.
    539   if (NegStride)
    540     Start = getStartForNegStride(Start, BECount, IntPtr, StoreSize, SE);
    541 
    542   // Okay, we have a strided store "p[i]" of a splattable value.  We can turn
    543   // this into a memset in the loop preheader now if we want.  However, this
    544   // would be unsafe to do if there is anything else in the loop that may read
    545   // or write to the aliased location.  Check for any overlap by generating the
    546   // base pointer and checking the region.
    547   Value *BasePtr =
    548       Expander.expandCodeFor(Start, DestInt8PtrTy, Preheader->getTerminator());
    549   if (mayLoopAccessLocation(BasePtr, MRI_ModRef, CurLoop, BECount, StoreSize,
    550                             *AA, TheStore)) {
    551     Expander.clear();
    552     // If we generated new code for the base pointer, clean up.
    553     RecursivelyDeleteTriviallyDeadInstructions(BasePtr, TLI);
    554     return false;
    555   }
    556 
    557   // Okay, everything looks good, insert the memset.
    558 
    559   // The # stored bytes is (BECount+1)*Size.  Expand the trip count out to
    560   // pointer size if it isn't already.
    561   BECount = SE->getTruncateOrZeroExtend(BECount, IntPtr);
    562 
    563   const SCEV *NumBytesS =
    564       SE->getAddExpr(BECount, SE->getOne(IntPtr), SCEV::FlagNUW);
    565   if (StoreSize != 1) {
    566     NumBytesS = SE->getMulExpr(NumBytesS, SE->getConstant(IntPtr, StoreSize),
    567                                SCEV::FlagNUW);
    568   }
    569 
    570   Value *NumBytes =
    571       Expander.expandCodeFor(NumBytesS, IntPtr, Preheader->getTerminator());
    572 
    573   CallInst *NewCall;
    574   if (SplatValue) {
    575     NewCall =
    576         Builder.CreateMemSet(BasePtr, SplatValue, NumBytes, StoreAlignment);
    577   } else {
    578     // Everything is emitted in default address space
    579     Type *Int8PtrTy = DestInt8PtrTy;
    580 
    581     Module *M = TheStore->getModule();
    582     Value *MSP =
    583         M->getOrInsertFunction("memset_pattern16", Builder.getVoidTy(),
    584                                Int8PtrTy, Int8PtrTy, IntPtr, (void *)nullptr);
    585 
    586     // Otherwise we should form a memset_pattern16.  PatternValue is known to be
    587     // an constant array of 16-bytes.  Plop the value into a mergable global.
    588     GlobalVariable *GV = new GlobalVariable(*M, PatternValue->getType(), true,
    589                                             GlobalValue::PrivateLinkage,
    590                                             PatternValue, ".memset_pattern");
    591     GV->setUnnamedAddr(true); // Ok to merge these.
    592     GV->setAlignment(16);
    593     Value *PatternPtr = ConstantExpr::getBitCast(GV, Int8PtrTy);
    594     NewCall = Builder.CreateCall(MSP, {BasePtr, PatternPtr, NumBytes});
    595   }
    596 
    597   DEBUG(dbgs() << "  Formed memset: " << *NewCall << "\n"
    598                << "    from store to: " << *Ev << " at: " << *TheStore << "\n");
    599   NewCall->setDebugLoc(TheStore->getDebugLoc());
    600 
    601   // Okay, the memset has been formed.  Zap the original store and anything that
    602   // feeds into it.
    603   deleteDeadInstruction(TheStore, TLI);
    604   ++NumMemSet;
    605   return true;
    606 }
    607 
    608 /// If the stored value is a strided load in the same loop with the same stride
    609 /// this may be transformable into a memcpy.  This kicks in for stuff like
    610 ///   for (i) A[i] = B[i];
    611 bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(
    612     StoreInst *SI, unsigned StoreSize, const SCEVAddRecExpr *StoreEv,
    613     const SCEV *BECount, bool NegStride) {
    614   // If we're not allowed to form memcpy, we fail.
    615   if (!TLI->has(LibFunc::memcpy))
    616     return false;
    617 
    618   // The store must be feeding a non-volatile load.
    619   LoadInst *LI = dyn_cast<LoadInst>(SI->getValueOperand());
    620   if (!LI || !LI->isSimple())
    621     return false;
    622 
    623   // See if the pointer expression is an AddRec like {base,+,1} on the current
    624   // loop, which indicates a strided load.  If we have something else, it's a
    625   // random load we can't handle.
    626   const SCEVAddRecExpr *LoadEv =
    627       dyn_cast<SCEVAddRecExpr>(SE->getSCEV(LI->getPointerOperand()));
    628   if (!LoadEv || LoadEv->getLoop() != CurLoop || !LoadEv->isAffine())
    629     return false;
    630 
    631   // The store and load must share the same stride.
    632   if (StoreEv->getOperand(1) != LoadEv->getOperand(1))
    633     return false;
    634 
    635   // The trip count of the loop and the base pointer of the addrec SCEV is
    636   // guaranteed to be loop invariant, which means that it should dominate the
    637   // header.  This allows us to insert code for it in the preheader.
    638   BasicBlock *Preheader = CurLoop->getLoopPreheader();
    639   IRBuilder<> Builder(Preheader->getTerminator());
    640   SCEVExpander Expander(*SE, *DL, "loop-idiom");
    641 
    642   const SCEV *StrStart = StoreEv->getStart();
    643   unsigned StrAS = SI->getPointerAddressSpace();
    644   Type *IntPtrTy = Builder.getIntPtrTy(*DL, StrAS);
    645 
    646   // Handle negative strided loops.
    647   if (NegStride)
    648     StrStart = getStartForNegStride(StrStart, BECount, IntPtrTy, StoreSize, SE);
    649 
    650   // Okay, we have a strided store "p[i]" of a loaded value.  We can turn
    651   // this into a memcpy in the loop preheader now if we want.  However, this
    652   // would be unsafe to do if there is anything else in the loop that may read
    653   // or write the memory region we're storing to.  This includes the load that
    654   // feeds the stores.  Check for an alias by generating the base address and
    655   // checking everything.
    656   Value *StoreBasePtr = Expander.expandCodeFor(
    657       StrStart, Builder.getInt8PtrTy(StrAS), Preheader->getTerminator());
    658 
    659   if (mayLoopAccessLocation(StoreBasePtr, MRI_ModRef, CurLoop, BECount,
    660                             StoreSize, *AA, SI)) {
    661     Expander.clear();
    662     // If we generated new code for the base pointer, clean up.
    663     RecursivelyDeleteTriviallyDeadInstructions(StoreBasePtr, TLI);
    664     return false;
    665   }
    666 
    667   const SCEV *LdStart = LoadEv->getStart();
    668   unsigned LdAS = LI->getPointerAddressSpace();
    669 
    670   // Handle negative strided loops.
    671   if (NegStride)
    672     LdStart = getStartForNegStride(LdStart, BECount, IntPtrTy, StoreSize, SE);
    673 
    674   // For a memcpy, we have to make sure that the input array is not being
    675   // mutated by the loop.
    676   Value *LoadBasePtr = Expander.expandCodeFor(
    677       LdStart, Builder.getInt8PtrTy(LdAS), Preheader->getTerminator());
    678 
    679   if (mayLoopAccessLocation(LoadBasePtr, MRI_Mod, CurLoop, BECount, StoreSize,
    680                             *AA, SI)) {
    681     Expander.clear();
    682     // If we generated new code for the base pointer, clean up.
    683     RecursivelyDeleteTriviallyDeadInstructions(LoadBasePtr, TLI);
    684     RecursivelyDeleteTriviallyDeadInstructions(StoreBasePtr, TLI);
    685     return false;
    686   }
    687 
    688   // Okay, everything is safe, we can transform this!
    689 
    690   // The # stored bytes is (BECount+1)*Size.  Expand the trip count out to
    691   // pointer size if it isn't already.
    692   BECount = SE->getTruncateOrZeroExtend(BECount, IntPtrTy);
    693 
    694   const SCEV *NumBytesS =
    695       SE->getAddExpr(BECount, SE->getOne(IntPtrTy), SCEV::FlagNUW);
    696   if (StoreSize != 1)
    697     NumBytesS = SE->getMulExpr(NumBytesS, SE->getConstant(IntPtrTy, StoreSize),
    698                                SCEV::FlagNUW);
    699 
    700   Value *NumBytes =
    701       Expander.expandCodeFor(NumBytesS, IntPtrTy, Preheader->getTerminator());
    702 
    703   CallInst *NewCall =
    704       Builder.CreateMemCpy(StoreBasePtr, LoadBasePtr, NumBytes,
    705                            std::min(SI->getAlignment(), LI->getAlignment()));
    706   NewCall->setDebugLoc(SI->getDebugLoc());
    707 
    708   DEBUG(dbgs() << "  Formed memcpy: " << *NewCall << "\n"
    709                << "    from load ptr=" << *LoadEv << " at: " << *LI << "\n"
    710                << "    from store ptr=" << *StoreEv << " at: " << *SI << "\n");
    711 
    712   // Okay, the memcpy has been formed.  Zap the original store and anything that
    713   // feeds into it.
    714   deleteDeadInstruction(SI, TLI);
    715   ++NumMemCpy;
    716   return true;
    717 }
    718 
    719 bool LoopIdiomRecognize::runOnNoncountableLoop() {
    720   return recognizePopcount();
    721 }
    722 
    723 /// Check if the given conditional branch is based on the comparison between
    724 /// a variable and zero, and if the variable is non-zero, the control yields to
    725 /// the loop entry. If the branch matches the behavior, the variable involved
    726 /// in the comparion is returned. This function will be called to see if the
    727 /// precondition and postcondition of the loop are in desirable form.
    728 static Value *matchCondition(BranchInst *BI, BasicBlock *LoopEntry) {
    729   if (!BI || !BI->isConditional())
    730     return nullptr;
    731 
    732   ICmpInst *Cond = dyn_cast<ICmpInst>(BI->getCondition());
    733   if (!Cond)
    734     return nullptr;
    735 
    736   ConstantInt *CmpZero = dyn_cast<ConstantInt>(Cond->getOperand(1));
    737   if (!CmpZero || !CmpZero->isZero())
    738     return nullptr;
    739 
    740   ICmpInst::Predicate Pred = Cond->getPredicate();
    741   if ((Pred == ICmpInst::ICMP_NE && BI->getSuccessor(0) == LoopEntry) ||
    742       (Pred == ICmpInst::ICMP_EQ && BI->getSuccessor(1) == LoopEntry))
    743     return Cond->getOperand(0);
    744 
    745   return nullptr;
    746 }
    747 
    748 /// Return true iff the idiom is detected in the loop.
    749 ///
    750 /// Additionally:
    751 /// 1) \p CntInst is set to the instruction counting the population bit.
    752 /// 2) \p CntPhi is set to the corresponding phi node.
    753 /// 3) \p Var is set to the value whose population bits are being counted.
    754 ///
    755 /// The core idiom we are trying to detect is:
    756 /// \code
    757 ///    if (x0 != 0)
    758 ///      goto loop-exit // the precondition of the loop
    759 ///    cnt0 = init-val;
    760 ///    do {
    761 ///       x1 = phi (x0, x2);
    762 ///       cnt1 = phi(cnt0, cnt2);
    763 ///
    764 ///       cnt2 = cnt1 + 1;
    765 ///        ...
    766 ///       x2 = x1 & (x1 - 1);
    767 ///        ...
    768 ///    } while(x != 0);
    769 ///
    770 /// loop-exit:
    771 /// \endcode
    772 static bool detectPopcountIdiom(Loop *CurLoop, BasicBlock *PreCondBB,
    773                                 Instruction *&CntInst, PHINode *&CntPhi,
    774                                 Value *&Var) {
    775   // step 1: Check to see if the look-back branch match this pattern:
    776   //    "if (a!=0) goto loop-entry".
    777   BasicBlock *LoopEntry;
    778   Instruction *DefX2, *CountInst;
    779   Value *VarX1, *VarX0;
    780   PHINode *PhiX, *CountPhi;
    781 
    782   DefX2 = CountInst = nullptr;
    783   VarX1 = VarX0 = nullptr;
    784   PhiX = CountPhi = nullptr;
    785   LoopEntry = *(CurLoop->block_begin());
    786 
    787   // step 1: Check if the loop-back branch is in desirable form.
    788   {
    789     if (Value *T = matchCondition(
    790             dyn_cast<BranchInst>(LoopEntry->getTerminator()), LoopEntry))
    791       DefX2 = dyn_cast<Instruction>(T);
    792     else
    793       return false;
    794   }
    795 
    796   // step 2: detect instructions corresponding to "x2 = x1 & (x1 - 1)"
    797   {
    798     if (!DefX2 || DefX2->getOpcode() != Instruction::And)
    799       return false;
    800 
    801     BinaryOperator *SubOneOp;
    802 
    803     if ((SubOneOp = dyn_cast<BinaryOperator>(DefX2->getOperand(0))))
    804       VarX1 = DefX2->getOperand(1);
    805     else {
    806       VarX1 = DefX2->getOperand(0);
    807       SubOneOp = dyn_cast<BinaryOperator>(DefX2->getOperand(1));
    808     }
    809     if (!SubOneOp)
    810       return false;
    811 
    812     Instruction *SubInst = cast<Instruction>(SubOneOp);
    813     ConstantInt *Dec = dyn_cast<ConstantInt>(SubInst->getOperand(1));
    814     if (!Dec ||
    815         !((SubInst->getOpcode() == Instruction::Sub && Dec->isOne()) ||
    816           (SubInst->getOpcode() == Instruction::Add &&
    817            Dec->isAllOnesValue()))) {
    818       return false;
    819     }
    820   }
    821 
    822   // step 3: Check the recurrence of variable X
    823   {
    824     PhiX = dyn_cast<PHINode>(VarX1);
    825     if (!PhiX ||
    826         (PhiX->getOperand(0) != DefX2 && PhiX->getOperand(1) != DefX2)) {
    827       return false;
    828     }
    829   }
    830 
    831   // step 4: Find the instruction which count the population: cnt2 = cnt1 + 1
    832   {
    833     CountInst = nullptr;
    834     for (BasicBlock::iterator Iter = LoopEntry->getFirstNonPHI()->getIterator(),
    835                               IterE = LoopEntry->end();
    836          Iter != IterE; Iter++) {
    837       Instruction *Inst = &*Iter;
    838       if (Inst->getOpcode() != Instruction::Add)
    839         continue;
    840 
    841       ConstantInt *Inc = dyn_cast<ConstantInt>(Inst->getOperand(1));
    842       if (!Inc || !Inc->isOne())
    843         continue;
    844 
    845       PHINode *Phi = dyn_cast<PHINode>(Inst->getOperand(0));
    846       if (!Phi || Phi->getParent() != LoopEntry)
    847         continue;
    848 
    849       // Check if the result of the instruction is live of the loop.
    850       bool LiveOutLoop = false;
    851       for (User *U : Inst->users()) {
    852         if ((cast<Instruction>(U))->getParent() != LoopEntry) {
    853           LiveOutLoop = true;
    854           break;
    855         }
    856       }
    857 
    858       if (LiveOutLoop) {
    859         CountInst = Inst;
    860         CountPhi = Phi;
    861         break;
    862       }
    863     }
    864 
    865     if (!CountInst)
    866       return false;
    867   }
    868 
    869   // step 5: check if the precondition is in this form:
    870   //   "if (x != 0) goto loop-head ; else goto somewhere-we-don't-care;"
    871   {
    872     auto *PreCondBr = dyn_cast<BranchInst>(PreCondBB->getTerminator());
    873     Value *T = matchCondition(PreCondBr, CurLoop->getLoopPreheader());
    874     if (T != PhiX->getOperand(0) && T != PhiX->getOperand(1))
    875       return false;
    876 
    877     CntInst = CountInst;
    878     CntPhi = CountPhi;
    879     Var = T;
    880   }
    881 
    882   return true;
    883 }
    884 
    885 /// Recognizes a population count idiom in a non-countable loop.
    886 ///
    887 /// If detected, transforms the relevant code to issue the popcount intrinsic
    888 /// function call, and returns true; otherwise, returns false.
    889 bool LoopIdiomRecognize::recognizePopcount() {
    890   if (TTI->getPopcntSupport(32) != TargetTransformInfo::PSK_FastHardware)
    891     return false;
    892 
    893   // Counting population are usually conducted by few arithmetic instructions.
    894   // Such instructions can be easily "absorbed" by vacant slots in a
    895   // non-compact loop. Therefore, recognizing popcount idiom only makes sense
    896   // in a compact loop.
    897 
    898   // Give up if the loop has multiple blocks or multiple backedges.
    899   if (CurLoop->getNumBackEdges() != 1 || CurLoop->getNumBlocks() != 1)
    900     return false;
    901 
    902   BasicBlock *LoopBody = *(CurLoop->block_begin());
    903   if (LoopBody->size() >= 20) {
    904     // The loop is too big, bail out.
    905     return false;
    906   }
    907 
    908   // It should have a preheader containing nothing but an unconditional branch.
    909   BasicBlock *PH = CurLoop->getLoopPreheader();
    910   if (!PH)
    911     return false;
    912   if (&PH->front() != PH->getTerminator())
    913     return false;
    914   auto *EntryBI = dyn_cast<BranchInst>(PH->getTerminator());
    915   if (!EntryBI || EntryBI->isConditional())
    916     return false;
    917 
    918   // It should have a precondition block where the generated popcount instrinsic
    919   // function can be inserted.
    920   auto *PreCondBB = PH->getSinglePredecessor();
    921   if (!PreCondBB)
    922     return false;
    923   auto *PreCondBI = dyn_cast<BranchInst>(PreCondBB->getTerminator());
    924   if (!PreCondBI || PreCondBI->isUnconditional())
    925     return false;
    926 
    927   Instruction *CntInst;
    928   PHINode *CntPhi;
    929   Value *Val;
    930   if (!detectPopcountIdiom(CurLoop, PreCondBB, CntInst, CntPhi, Val))
    931     return false;
    932 
    933   transformLoopToPopcount(PreCondBB, CntInst, CntPhi, Val);
    934   return true;
    935 }
    936 
    937 static CallInst *createPopcntIntrinsic(IRBuilder<> &IRBuilder, Value *Val,
    938                                        DebugLoc DL) {
    939   Value *Ops[] = {Val};
    940   Type *Tys[] = {Val->getType()};
    941 
    942   Module *M = IRBuilder.GetInsertBlock()->getParent()->getParent();
    943   Value *Func = Intrinsic::getDeclaration(M, Intrinsic::ctpop, Tys);
    944   CallInst *CI = IRBuilder.CreateCall(Func, Ops);
    945   CI->setDebugLoc(DL);
    946 
    947   return CI;
    948 }
    949 
    950 void LoopIdiomRecognize::transformLoopToPopcount(BasicBlock *PreCondBB,
    951                                                  Instruction *CntInst,
    952                                                  PHINode *CntPhi, Value *Var) {
    953   BasicBlock *PreHead = CurLoop->getLoopPreheader();
    954   auto *PreCondBr = dyn_cast<BranchInst>(PreCondBB->getTerminator());
    955   const DebugLoc DL = CntInst->getDebugLoc();
    956 
    957   // Assuming before transformation, the loop is following:
    958   //  if (x) // the precondition
    959   //     do { cnt++; x &= x - 1; } while(x);
    960 
    961   // Step 1: Insert the ctpop instruction at the end of the precondition block
    962   IRBuilder<> Builder(PreCondBr);
    963   Value *PopCnt, *PopCntZext, *NewCount, *TripCnt;
    964   {
    965     PopCnt = createPopcntIntrinsic(Builder, Var, DL);
    966     NewCount = PopCntZext =
    967         Builder.CreateZExtOrTrunc(PopCnt, cast<IntegerType>(CntPhi->getType()));
    968 
    969     if (NewCount != PopCnt)
    970       (cast<Instruction>(NewCount))->setDebugLoc(DL);
    971 
    972     // TripCnt is exactly the number of iterations the loop has
    973     TripCnt = NewCount;
    974 
    975     // If the population counter's initial value is not zero, insert Add Inst.
    976     Value *CntInitVal = CntPhi->getIncomingValueForBlock(PreHead);
    977     ConstantInt *InitConst = dyn_cast<ConstantInt>(CntInitVal);
    978     if (!InitConst || !InitConst->isZero()) {
    979       NewCount = Builder.CreateAdd(NewCount, CntInitVal);
    980       (cast<Instruction>(NewCount))->setDebugLoc(DL);
    981     }
    982   }
    983 
    984   // Step 2: Replace the precondition from "if (x == 0) goto loop-exit" to
    985   //   "if (NewCount == 0) loop-exit". Without this change, the intrinsic
    986   //   function would be partial dead code, and downstream passes will drag
    987   //   it back from the precondition block to the preheader.
    988   {
    989     ICmpInst *PreCond = cast<ICmpInst>(PreCondBr->getCondition());
    990 
    991     Value *Opnd0 = PopCntZext;
    992     Value *Opnd1 = ConstantInt::get(PopCntZext->getType(), 0);
    993     if (PreCond->getOperand(0) != Var)
    994       std::swap(Opnd0, Opnd1);
    995 
    996     ICmpInst *NewPreCond = cast<ICmpInst>(
    997         Builder.CreateICmp(PreCond->getPredicate(), Opnd0, Opnd1));
    998     PreCondBr->setCondition(NewPreCond);
    999 
   1000     RecursivelyDeleteTriviallyDeadInstructions(PreCond, TLI);
   1001   }
   1002 
   1003   // Step 3: Note that the population count is exactly the trip count of the
   1004   // loop in question, which enable us to to convert the loop from noncountable
   1005   // loop into a countable one. The benefit is twofold:
   1006   //
   1007   //  - If the loop only counts population, the entire loop becomes dead after
   1008   //    the transformation. It is a lot easier to prove a countable loop dead
   1009   //    than to prove a noncountable one. (In some C dialects, an infinite loop
   1010   //    isn't dead even if it computes nothing useful. In general, DCE needs
   1011   //    to prove a noncountable loop finite before safely delete it.)
   1012   //
   1013   //  - If the loop also performs something else, it remains alive.
   1014   //    Since it is transformed to countable form, it can be aggressively
   1015   //    optimized by some optimizations which are in general not applicable
   1016   //    to a noncountable loop.
   1017   //
   1018   // After this step, this loop (conceptually) would look like following:
   1019   //   newcnt = __builtin_ctpop(x);
   1020   //   t = newcnt;
   1021   //   if (x)
   1022   //     do { cnt++; x &= x-1; t--) } while (t > 0);
   1023   BasicBlock *Body = *(CurLoop->block_begin());
   1024   {
   1025     auto *LbBr = dyn_cast<BranchInst>(Body->getTerminator());
   1026     ICmpInst *LbCond = cast<ICmpInst>(LbBr->getCondition());
   1027     Type *Ty = TripCnt->getType();
   1028 
   1029     PHINode *TcPhi = PHINode::Create(Ty, 2, "tcphi", &Body->front());
   1030 
   1031     Builder.SetInsertPoint(LbCond);
   1032     Instruction *TcDec = cast<Instruction>(
   1033         Builder.CreateSub(TcPhi, ConstantInt::get(Ty, 1),
   1034                           "tcdec", false, true));
   1035 
   1036     TcPhi->addIncoming(TripCnt, PreHead);
   1037     TcPhi->addIncoming(TcDec, Body);
   1038 
   1039     CmpInst::Predicate Pred =
   1040         (LbBr->getSuccessor(0) == Body) ? CmpInst::ICMP_UGT : CmpInst::ICMP_SLE;
   1041     LbCond->setPredicate(Pred);
   1042     LbCond->setOperand(0, TcDec);
   1043     LbCond->setOperand(1, ConstantInt::get(Ty, 0));
   1044   }
   1045 
   1046   // Step 4: All the references to the original population counter outside
   1047   //  the loop are replaced with the NewCount -- the value returned from
   1048   //  __builtin_ctpop().
   1049   CntInst->replaceUsesOutsideBlock(NewCount, Body);
   1050 
   1051   // step 5: Forget the "non-computable" trip-count SCEV associated with the
   1052   //   loop. The loop would otherwise not be deleted even if it becomes empty.
   1053   SE->forgetLoop(CurLoop);
   1054 }
   1055