Home | History | Annotate | Download | only in Scalar
      1 //===- CodeGenPrepare.cpp - Prepare a function for code generation --------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This pass munges the code in the input function to better prepare it for
     11 // SelectionDAG-based code generation. This works around limitations in it's
     12 // basic-block-at-a-time approach. It should eventually be removed.
     13 //
     14 //===----------------------------------------------------------------------===//
     15 
     16 #define DEBUG_TYPE "codegenprepare"
     17 #include "llvm/Transforms/Scalar.h"
     18 #include "llvm/ADT/DenseMap.h"
     19 #include "llvm/ADT/SmallSet.h"
     20 #include "llvm/ADT/Statistic.h"
     21 #include "llvm/ADT/ValueMap.h"
     22 #include "llvm/Analysis/DominatorInternals.h"
     23 #include "llvm/Analysis/Dominators.h"
     24 #include "llvm/Analysis/InstructionSimplify.h"
     25 #include "llvm/Analysis/ProfileInfo.h"
     26 #include "llvm/Assembly/Writer.h"
     27 #include "llvm/IR/Constants.h"
     28 #include "llvm/IR/DataLayout.h"
     29 #include "llvm/IR/DerivedTypes.h"
     30 #include "llvm/IR/Function.h"
     31 #include "llvm/IR/IRBuilder.h"
     32 #include "llvm/IR/InlineAsm.h"
     33 #include "llvm/IR/Instructions.h"
     34 #include "llvm/IR/IntrinsicInst.h"
     35 #include "llvm/Pass.h"
     36 #include "llvm/Support/CallSite.h"
     37 #include "llvm/Support/CommandLine.h"
     38 #include "llvm/Support/Debug.h"
     39 #include "llvm/Support/GetElementPtrTypeIterator.h"
     40 #include "llvm/Support/PatternMatch.h"
     41 #include "llvm/Support/ValueHandle.h"
     42 #include "llvm/Support/raw_ostream.h"
     43 #include "llvm/Target/TargetLibraryInfo.h"
     44 #include "llvm/Target/TargetLowering.h"
     45 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
     46 #include "llvm/Transforms/Utils/BuildLibCalls.h"
     47 #include "llvm/Transforms/Utils/BypassSlowDivision.h"
     48 #include "llvm/Transforms/Utils/Local.h"
     49 using namespace llvm;
     50 using namespace llvm::PatternMatch;
     51 
     52 STATISTIC(NumBlocksElim, "Number of blocks eliminated");
     53 STATISTIC(NumPHIsElim,   "Number of trivial PHIs eliminated");
     54 STATISTIC(NumGEPsElim,   "Number of GEPs converted to casts");
     55 STATISTIC(NumCmpUses, "Number of uses of Cmp expressions replaced with uses of "
     56                       "sunken Cmps");
     57 STATISTIC(NumCastUses, "Number of uses of Cast expressions replaced with uses "
     58                        "of sunken Casts");
     59 STATISTIC(NumMemoryInsts, "Number of memory instructions whose address "
     60                           "computations were sunk");
     61 STATISTIC(NumExtsMoved,  "Number of [s|z]ext instructions combined with loads");
     62 STATISTIC(NumExtUses,    "Number of uses of [s|z]ext instructions optimized");
     63 STATISTIC(NumRetsDup,    "Number of return instructions duplicated");
     64 STATISTIC(NumDbgValueMoved, "Number of debug value instructions moved");
     65 STATISTIC(NumSelectsExpanded, "Number of selects turned into branches");
     66 
     67 static cl::opt<bool> DisableBranchOpts(
     68   "disable-cgp-branch-opts", cl::Hidden, cl::init(false),
     69   cl::desc("Disable branch optimizations in CodeGenPrepare"));
     70 
     71 static cl::opt<bool> DisableSelectToBranch(
     72   "disable-cgp-select2branch", cl::Hidden, cl::init(false),
     73   cl::desc("Disable select to branch conversion."));
     74 
     75 namespace {
     76   class CodeGenPrepare : public FunctionPass {
     77     /// TLI - Keep a pointer of a TargetLowering to consult for determining
     78     /// transformation profitability.
     79     const TargetMachine *TM;
     80     const TargetLowering *TLI;
     81     const TargetLibraryInfo *TLInfo;
     82     DominatorTree *DT;
     83     ProfileInfo *PFI;
     84 
     85     /// CurInstIterator - As we scan instructions optimizing them, this is the
     86     /// next instruction to optimize.  Xforms that can invalidate this should
     87     /// update it.
     88     BasicBlock::iterator CurInstIterator;
     89 
     90     /// Keeps track of non-local addresses that have been sunk into a block.
     91     /// This allows us to avoid inserting duplicate code for blocks with
     92     /// multiple load/stores of the same address.
     93     ValueMap<Value*, Value*> SunkAddrs;
     94 
     95     /// ModifiedDT - If CFG is modified in anyway, dominator tree may need to
     96     /// be updated.
     97     bool ModifiedDT;
     98 
     99     /// OptSize - True if optimizing for size.
    100     bool OptSize;
    101 
    102   public:
    103     static char ID; // Pass identification, replacement for typeid
    104     explicit CodeGenPrepare(const TargetMachine *TM = 0)
    105       : FunctionPass(ID), TM(TM), TLI(0) {
    106         initializeCodeGenPreparePass(*PassRegistry::getPassRegistry());
    107       }
    108     bool runOnFunction(Function &F);
    109 
    110     const char *getPassName() const { return "CodeGen Prepare"; }
    111 
    112     virtual void getAnalysisUsage(AnalysisUsage &AU) const {
    113       AU.addPreserved<DominatorTree>();
    114       AU.addPreserved<ProfileInfo>();
    115       AU.addRequired<TargetLibraryInfo>();
    116     }
    117 
    118   private:
    119     bool EliminateFallThrough(Function &F);
    120     bool EliminateMostlyEmptyBlocks(Function &F);
    121     bool CanMergeBlocks(const BasicBlock *BB, const BasicBlock *DestBB) const;
    122     void EliminateMostlyEmptyBlock(BasicBlock *BB);
    123     bool OptimizeBlock(BasicBlock &BB);
    124     bool OptimizeInst(Instruction *I);
    125     bool OptimizeMemoryInst(Instruction *I, Value *Addr, Type *AccessTy);
    126     bool OptimizeInlineAsmInst(CallInst *CS);
    127     bool OptimizeCallInst(CallInst *CI);
    128     bool MoveExtToFormExtLoad(Instruction *I);
    129     bool OptimizeExtUses(Instruction *I);
    130     bool OptimizeSelectInst(SelectInst *SI);
    131     bool DupRetToEnableTailCallOpts(BasicBlock *BB);
    132     bool PlaceDbgValues(Function &F);
    133   };
    134 }
    135 
    136 char CodeGenPrepare::ID = 0;
    137 INITIALIZE_PASS_BEGIN(CodeGenPrepare, "codegenprepare",
    138                 "Optimize for code generation", false, false)
    139 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
    140 INITIALIZE_PASS_END(CodeGenPrepare, "codegenprepare",
    141                 "Optimize for code generation", false, false)
    142 
    143 FunctionPass *llvm::createCodeGenPreparePass(const TargetMachine *TM) {
    144   return new CodeGenPrepare(TM);
    145 }
    146 
    147 bool CodeGenPrepare::runOnFunction(Function &F) {
    148   bool EverMadeChange = false;
    149 
    150   ModifiedDT = false;
    151   if (TM) TLI = TM->getTargetLowering();
    152   TLInfo = &getAnalysis<TargetLibraryInfo>();
    153   DT = getAnalysisIfAvailable<DominatorTree>();
    154   PFI = getAnalysisIfAvailable<ProfileInfo>();
    155   OptSize = F.getAttributes().hasAttribute(AttributeSet::FunctionIndex,
    156                                            Attribute::OptimizeForSize);
    157 
    158   /// This optimization identifies DIV instructions that can be
    159   /// profitably bypassed and carried out with a shorter, faster divide.
    160   if (!OptSize && TLI && TLI->isSlowDivBypassed()) {
    161     const DenseMap<unsigned int, unsigned int> &BypassWidths =
    162        TLI->getBypassSlowDivWidths();
    163     for (Function::iterator I = F.begin(); I != F.end(); I++)
    164       EverMadeChange |= bypassSlowDivision(F, I, BypassWidths);
    165   }
    166 
    167   // Eliminate blocks that contain only PHI nodes and an
    168   // unconditional branch.
    169   EverMadeChange |= EliminateMostlyEmptyBlocks(F);
    170 
    171   // llvm.dbg.value is far away from the value then iSel may not be able
    172   // handle it properly. iSel will drop llvm.dbg.value if it can not
    173   // find a node corresponding to the value.
    174   EverMadeChange |= PlaceDbgValues(F);
    175 
    176   bool MadeChange = true;
    177   while (MadeChange) {
    178     MadeChange = false;
    179     for (Function::iterator I = F.begin(); I != F.end(); ) {
    180       BasicBlock *BB = I++;
    181       MadeChange |= OptimizeBlock(*BB);
    182     }
    183     EverMadeChange |= MadeChange;
    184   }
    185 
    186   SunkAddrs.clear();
    187 
    188   if (!DisableBranchOpts) {
    189     MadeChange = false;
    190     SmallPtrSet<BasicBlock*, 8> WorkList;
    191     for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
    192       SmallVector<BasicBlock*, 2> Successors(succ_begin(BB), succ_end(BB));
    193       MadeChange |= ConstantFoldTerminator(BB, true);
    194       if (!MadeChange) continue;
    195 
    196       for (SmallVectorImpl<BasicBlock*>::iterator
    197              II = Successors.begin(), IE = Successors.end(); II != IE; ++II)
    198         if (pred_begin(*II) == pred_end(*II))
    199           WorkList.insert(*II);
    200     }
    201 
    202     // Delete the dead blocks and any of their dead successors.
    203     MadeChange |= !WorkList.empty();
    204     while (!WorkList.empty()) {
    205       BasicBlock *BB = *WorkList.begin();
    206       WorkList.erase(BB);
    207       SmallVector<BasicBlock*, 2> Successors(succ_begin(BB), succ_end(BB));
    208 
    209       DeleteDeadBlock(BB);
    210 
    211       for (SmallVectorImpl<BasicBlock*>::iterator
    212              II = Successors.begin(), IE = Successors.end(); II != IE; ++II)
    213         if (pred_begin(*II) == pred_end(*II))
    214           WorkList.insert(*II);
    215     }
    216 
    217     // Merge pairs of basic blocks with unconditional branches, connected by
    218     // a single edge.
    219     if (EverMadeChange || MadeChange)
    220       MadeChange |= EliminateFallThrough(F);
    221 
    222     if (MadeChange)
    223       ModifiedDT = true;
    224     EverMadeChange |= MadeChange;
    225   }
    226 
    227   if (ModifiedDT && DT)
    228     DT->DT->recalculate(F);
    229 
    230   return EverMadeChange;
    231 }
    232 
    233 /// EliminateFallThrough - Merge basic blocks which are connected
    234 /// by a single edge, where one of the basic blocks has a single successor
    235 /// pointing to the other basic block, which has a single predecessor.
    236 bool CodeGenPrepare::EliminateFallThrough(Function &F) {
    237   bool Changed = false;
    238   // Scan all of the blocks in the function, except for the entry block.
    239   for (Function::iterator I = ++F.begin(), E = F.end(); I != E; ) {
    240     BasicBlock *BB = I++;
    241     // If the destination block has a single pred, then this is a trivial
    242     // edge, just collapse it.
    243     BasicBlock *SinglePred = BB->getSinglePredecessor();
    244 
    245     // Don't merge if BB's address is taken.
    246     if (!SinglePred || SinglePred == BB || BB->hasAddressTaken()) continue;
    247 
    248     BranchInst *Term = dyn_cast<BranchInst>(SinglePred->getTerminator());
    249     if (Term && !Term->isConditional()) {
    250       Changed = true;
    251       DEBUG(dbgs() << "To merge:\n"<< *SinglePred << "\n\n\n");
    252       // Remember if SinglePred was the entry block of the function.
    253       // If so, we will need to move BB back to the entry position.
    254       bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock();
    255       MergeBasicBlockIntoOnlyPred(BB, this);
    256 
    257       if (isEntry && BB != &BB->getParent()->getEntryBlock())
    258         BB->moveBefore(&BB->getParent()->getEntryBlock());
    259 
    260       // We have erased a block. Update the iterator.
    261       I = BB;
    262     }
    263   }
    264   return Changed;
    265 }
    266 
    267 /// EliminateMostlyEmptyBlocks - eliminate blocks that contain only PHI nodes,
    268 /// debug info directives, and an unconditional branch.  Passes before isel
    269 /// (e.g. LSR/loopsimplify) often split edges in ways that are non-optimal for
    270 /// isel.  Start by eliminating these blocks so we can split them the way we
    271 /// want them.
    272 bool CodeGenPrepare::EliminateMostlyEmptyBlocks(Function &F) {
    273   bool MadeChange = false;
    274   // Note that this intentionally skips the entry block.
    275   for (Function::iterator I = ++F.begin(), E = F.end(); I != E; ) {
    276     BasicBlock *BB = I++;
    277 
    278     // If this block doesn't end with an uncond branch, ignore it.
    279     BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator());
    280     if (!BI || !BI->isUnconditional())
    281       continue;
    282 
    283     // If the instruction before the branch (skipping debug info) isn't a phi
    284     // node, then other stuff is happening here.
    285     BasicBlock::iterator BBI = BI;
    286     if (BBI != BB->begin()) {
    287       --BBI;
    288       while (isa<DbgInfoIntrinsic>(BBI)) {
    289         if (BBI == BB->begin())
    290           break;
    291         --BBI;
    292       }
    293       if (!isa<DbgInfoIntrinsic>(BBI) && !isa<PHINode>(BBI))
    294         continue;
    295     }
    296 
    297     // Do not break infinite loops.
    298     BasicBlock *DestBB = BI->getSuccessor(0);
    299     if (DestBB == BB)
    300       continue;
    301 
    302     if (!CanMergeBlocks(BB, DestBB))
    303       continue;
    304 
    305     EliminateMostlyEmptyBlock(BB);
    306     MadeChange = true;
    307   }
    308   return MadeChange;
    309 }
    310 
    311 /// CanMergeBlocks - Return true if we can merge BB into DestBB if there is a
    312 /// single uncond branch between them, and BB contains no other non-phi
    313 /// instructions.
    314 bool CodeGenPrepare::CanMergeBlocks(const BasicBlock *BB,
    315                                     const BasicBlock *DestBB) const {
    316   // We only want to eliminate blocks whose phi nodes are used by phi nodes in
    317   // the successor.  If there are more complex condition (e.g. preheaders),
    318   // don't mess around with them.
    319   BasicBlock::const_iterator BBI = BB->begin();
    320   while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) {
    321     for (Value::const_use_iterator UI = PN->use_begin(), E = PN->use_end();
    322          UI != E; ++UI) {
    323       const Instruction *User = cast<Instruction>(*UI);
    324       if (User->getParent() != DestBB || !isa<PHINode>(User))
    325         return false;
    326       // If User is inside DestBB block and it is a PHINode then check
    327       // incoming value. If incoming value is not from BB then this is
    328       // a complex condition (e.g. preheaders) we want to avoid here.
    329       if (User->getParent() == DestBB) {
    330         if (const PHINode *UPN = dyn_cast<PHINode>(User))
    331           for (unsigned I = 0, E = UPN->getNumIncomingValues(); I != E; ++I) {
    332             Instruction *Insn = dyn_cast<Instruction>(UPN->getIncomingValue(I));
    333             if (Insn && Insn->getParent() == BB &&
    334                 Insn->getParent() != UPN->getIncomingBlock(I))
    335               return false;
    336           }
    337       }
    338     }
    339   }
    340 
    341   // If BB and DestBB contain any common predecessors, then the phi nodes in BB
    342   // and DestBB may have conflicting incoming values for the block.  If so, we
    343   // can't merge the block.
    344   const PHINode *DestBBPN = dyn_cast<PHINode>(DestBB->begin());
    345   if (!DestBBPN) return true;  // no conflict.
    346 
    347   // Collect the preds of BB.
    348   SmallPtrSet<const BasicBlock*, 16> BBPreds;
    349   if (const PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) {
    350     // It is faster to get preds from a PHI than with pred_iterator.
    351     for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i)
    352       BBPreds.insert(BBPN->getIncomingBlock(i));
    353   } else {
    354     BBPreds.insert(pred_begin(BB), pred_end(BB));
    355   }
    356 
    357   // Walk the preds of DestBB.
    358   for (unsigned i = 0, e = DestBBPN->getNumIncomingValues(); i != e; ++i) {
    359     BasicBlock *Pred = DestBBPN->getIncomingBlock(i);
    360     if (BBPreds.count(Pred)) {   // Common predecessor?
    361       BBI = DestBB->begin();
    362       while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) {
    363         const Value *V1 = PN->getIncomingValueForBlock(Pred);
    364         const Value *V2 = PN->getIncomingValueForBlock(BB);
    365 
    366         // If V2 is a phi node in BB, look up what the mapped value will be.
    367         if (const PHINode *V2PN = dyn_cast<PHINode>(V2))
    368           if (V2PN->getParent() == BB)
    369             V2 = V2PN->getIncomingValueForBlock(Pred);
    370 
    371         // If there is a conflict, bail out.
    372         if (V1 != V2) return false;
    373       }
    374     }
    375   }
    376 
    377   return true;
    378 }
    379 
    380 
    381 /// EliminateMostlyEmptyBlock - Eliminate a basic block that have only phi's and
    382 /// an unconditional branch in it.
    383 void CodeGenPrepare::EliminateMostlyEmptyBlock(BasicBlock *BB) {
    384   BranchInst *BI = cast<BranchInst>(BB->getTerminator());
    385   BasicBlock *DestBB = BI->getSuccessor(0);
    386 
    387   DEBUG(dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n" << *BB << *DestBB);
    388 
    389   // If the destination block has a single pred, then this is a trivial edge,
    390   // just collapse it.
    391   if (BasicBlock *SinglePred = DestBB->getSinglePredecessor()) {
    392     if (SinglePred != DestBB) {
    393       // Remember if SinglePred was the entry block of the function.  If so, we
    394       // will need to move BB back to the entry position.
    395       bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock();
    396       MergeBasicBlockIntoOnlyPred(DestBB, this);
    397 
    398       if (isEntry && BB != &BB->getParent()->getEntryBlock())
    399         BB->moveBefore(&BB->getParent()->getEntryBlock());
    400 
    401       DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n");
    402       return;
    403     }
    404   }
    405 
    406   // Otherwise, we have multiple predecessors of BB.  Update the PHIs in DestBB
    407   // to handle the new incoming edges it is about to have.
    408   PHINode *PN;
    409   for (BasicBlock::iterator BBI = DestBB->begin();
    410        (PN = dyn_cast<PHINode>(BBI)); ++BBI) {
    411     // Remove the incoming value for BB, and remember it.
    412     Value *InVal = PN->removeIncomingValue(BB, false);
    413 
    414     // Two options: either the InVal is a phi node defined in BB or it is some
    415     // value that dominates BB.
    416     PHINode *InValPhi = dyn_cast<PHINode>(InVal);
    417     if (InValPhi && InValPhi->getParent() == BB) {
    418       // Add all of the input values of the input PHI as inputs of this phi.
    419       for (unsigned i = 0, e = InValPhi->getNumIncomingValues(); i != e; ++i)
    420         PN->addIncoming(InValPhi->getIncomingValue(i),
    421                         InValPhi->getIncomingBlock(i));
    422     } else {
    423       // Otherwise, add one instance of the dominating value for each edge that
    424       // we will be adding.
    425       if (PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) {
    426         for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i)
    427           PN->addIncoming(InVal, BBPN->getIncomingBlock(i));
    428       } else {
    429         for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI)
    430           PN->addIncoming(InVal, *PI);
    431       }
    432     }
    433   }
    434 
    435   // The PHIs are now updated, change everything that refers to BB to use
    436   // DestBB and remove BB.
    437   BB->replaceAllUsesWith(DestBB);
    438   if (DT && !ModifiedDT) {
    439     BasicBlock *BBIDom  = DT->getNode(BB)->getIDom()->getBlock();
    440     BasicBlock *DestBBIDom = DT->getNode(DestBB)->getIDom()->getBlock();
    441     BasicBlock *NewIDom = DT->findNearestCommonDominator(BBIDom, DestBBIDom);
    442     DT->changeImmediateDominator(DestBB, NewIDom);
    443     DT->eraseNode(BB);
    444   }
    445   if (PFI) {
    446     PFI->replaceAllUses(BB, DestBB);
    447     PFI->removeEdge(ProfileInfo::getEdge(BB, DestBB));
    448   }
    449   BB->eraseFromParent();
    450   ++NumBlocksElim;
    451 
    452   DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n");
    453 }
    454 
    455 /// OptimizeNoopCopyExpression - If the specified cast instruction is a noop
    456 /// copy (e.g. it's casting from one pointer type to another, i32->i8 on PPC),
    457 /// sink it into user blocks to reduce the number of virtual
    458 /// registers that must be created and coalesced.
    459 ///
    460 /// Return true if any changes are made.
    461 ///
    462 static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI){
    463   // If this is a noop copy,
    464   EVT SrcVT = TLI.getValueType(CI->getOperand(0)->getType());
    465   EVT DstVT = TLI.getValueType(CI->getType());
    466 
    467   // This is an fp<->int conversion?
    468   if (SrcVT.isInteger() != DstVT.isInteger())
    469     return false;
    470 
    471   // If this is an extension, it will be a zero or sign extension, which
    472   // isn't a noop.
    473   if (SrcVT.bitsLT(DstVT)) return false;
    474 
    475   // If these values will be promoted, find out what they will be promoted
    476   // to.  This helps us consider truncates on PPC as noop copies when they
    477   // are.
    478   if (TLI.getTypeAction(CI->getContext(), SrcVT) ==
    479       TargetLowering::TypePromoteInteger)
    480     SrcVT = TLI.getTypeToTransformTo(CI->getContext(), SrcVT);
    481   if (TLI.getTypeAction(CI->getContext(), DstVT) ==
    482       TargetLowering::TypePromoteInteger)
    483     DstVT = TLI.getTypeToTransformTo(CI->getContext(), DstVT);
    484 
    485   // If, after promotion, these are the same types, this is a noop copy.
    486   if (SrcVT != DstVT)
    487     return false;
    488 
    489   BasicBlock *DefBB = CI->getParent();
    490 
    491   /// InsertedCasts - Only insert a cast in each block once.
    492   DenseMap<BasicBlock*, CastInst*> InsertedCasts;
    493 
    494   bool MadeChange = false;
    495   for (Value::use_iterator UI = CI->use_begin(), E = CI->use_end();
    496        UI != E; ) {
    497     Use &TheUse = UI.getUse();
    498     Instruction *User = cast<Instruction>(*UI);
    499 
    500     // Figure out which BB this cast is used in.  For PHI's this is the
    501     // appropriate predecessor block.
    502     BasicBlock *UserBB = User->getParent();
    503     if (PHINode *PN = dyn_cast<PHINode>(User)) {
    504       UserBB = PN->getIncomingBlock(UI);
    505     }
    506 
    507     // Preincrement use iterator so we don't invalidate it.
    508     ++UI;
    509 
    510     // If this user is in the same block as the cast, don't change the cast.
    511     if (UserBB == DefBB) continue;
    512 
    513     // If we have already inserted a cast into this block, use it.
    514     CastInst *&InsertedCast = InsertedCasts[UserBB];
    515 
    516     if (!InsertedCast) {
    517       BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
    518       InsertedCast =
    519         CastInst::Create(CI->getOpcode(), CI->getOperand(0), CI->getType(), "",
    520                          InsertPt);
    521       MadeChange = true;
    522     }
    523 
    524     // Replace a use of the cast with a use of the new cast.
    525     TheUse = InsertedCast;
    526     ++NumCastUses;
    527   }
    528 
    529   // If we removed all uses, nuke the cast.
    530   if (CI->use_empty()) {
    531     CI->eraseFromParent();
    532     MadeChange = true;
    533   }
    534 
    535   return MadeChange;
    536 }
    537 
    538 /// OptimizeCmpExpression - sink the given CmpInst into user blocks to reduce
    539 /// the number of virtual registers that must be created and coalesced.  This is
    540 /// a clear win except on targets with multiple condition code registers
    541 ///  (PowerPC), where it might lose; some adjustment may be wanted there.
    542 ///
    543 /// Return true if any changes are made.
    544 static bool OptimizeCmpExpression(CmpInst *CI) {
    545   BasicBlock *DefBB = CI->getParent();
    546 
    547   /// InsertedCmp - Only insert a cmp in each block once.
    548   DenseMap<BasicBlock*, CmpInst*> InsertedCmps;
    549 
    550   bool MadeChange = false;
    551   for (Value::use_iterator UI = CI->use_begin(), E = CI->use_end();
    552        UI != E; ) {
    553     Use &TheUse = UI.getUse();
    554     Instruction *User = cast<Instruction>(*UI);
    555 
    556     // Preincrement use iterator so we don't invalidate it.
    557     ++UI;
    558 
    559     // Don't bother for PHI nodes.
    560     if (isa<PHINode>(User))
    561       continue;
    562 
    563     // Figure out which BB this cmp is used in.
    564     BasicBlock *UserBB = User->getParent();
    565 
    566     // If this user is in the same block as the cmp, don't change the cmp.
    567     if (UserBB == DefBB) continue;
    568 
    569     // If we have already inserted a cmp into this block, use it.
    570     CmpInst *&InsertedCmp = InsertedCmps[UserBB];
    571 
    572     if (!InsertedCmp) {
    573       BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
    574       InsertedCmp =
    575         CmpInst::Create(CI->getOpcode(),
    576                         CI->getPredicate(),  CI->getOperand(0),
    577                         CI->getOperand(1), "", InsertPt);
    578       MadeChange = true;
    579     }
    580 
    581     // Replace a use of the cmp with a use of the new cmp.
    582     TheUse = InsertedCmp;
    583     ++NumCmpUses;
    584   }
    585 
    586   // If we removed all uses, nuke the cmp.
    587   if (CI->use_empty())
    588     CI->eraseFromParent();
    589 
    590   return MadeChange;
    591 }
    592 
    593 namespace {
    594 class CodeGenPrepareFortifiedLibCalls : public SimplifyFortifiedLibCalls {
    595 protected:
    596   void replaceCall(Value *With) {
    597     CI->replaceAllUsesWith(With);
    598     CI->eraseFromParent();
    599   }
    600   bool isFoldable(unsigned SizeCIOp, unsigned, bool) const {
    601       if (ConstantInt *SizeCI =
    602                              dyn_cast<ConstantInt>(CI->getArgOperand(SizeCIOp)))
    603         return SizeCI->isAllOnesValue();
    604     return false;
    605   }
    606 };
    607 } // end anonymous namespace
    608 
    609 bool CodeGenPrepare::OptimizeCallInst(CallInst *CI) {
    610   BasicBlock *BB = CI->getParent();
    611 
    612   // Lower inline assembly if we can.
    613   // If we found an inline asm expession, and if the target knows how to
    614   // lower it to normal LLVM code, do so now.
    615   if (TLI && isa<InlineAsm>(CI->getCalledValue())) {
    616     if (TLI->ExpandInlineAsm(CI)) {
    617       // Avoid invalidating the iterator.
    618       CurInstIterator = BB->begin();
    619       // Avoid processing instructions out of order, which could cause
    620       // reuse before a value is defined.
    621       SunkAddrs.clear();
    622       return true;
    623     }
    624     // Sink address computing for memory operands into the block.
    625     if (OptimizeInlineAsmInst(CI))
    626       return true;
    627   }
    628 
    629   // Lower all uses of llvm.objectsize.*
    630   IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
    631   if (II && II->getIntrinsicID() == Intrinsic::objectsize) {
    632     bool Min = (cast<ConstantInt>(II->getArgOperand(1))->getZExtValue() == 1);
    633     Type *ReturnTy = CI->getType();
    634     Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL);
    635 
    636     // Substituting this can cause recursive simplifications, which can
    637     // invalidate our iterator.  Use a WeakVH to hold onto it in case this
    638     // happens.
    639     WeakVH IterHandle(CurInstIterator);
    640 
    641     replaceAndRecursivelySimplify(CI, RetVal, TLI ? TLI->getDataLayout() : 0,
    642                                   TLInfo, ModifiedDT ? 0 : DT);
    643 
    644     // If the iterator instruction was recursively deleted, start over at the
    645     // start of the block.
    646     if (IterHandle != CurInstIterator) {
    647       CurInstIterator = BB->begin();
    648       SunkAddrs.clear();
    649     }
    650     return true;
    651   }
    652 
    653   if (II && TLI) {
    654     SmallVector<Value*, 2> PtrOps;
    655     Type *AccessTy;
    656     if (TLI->GetAddrModeArguments(II, PtrOps, AccessTy))
    657       while (!PtrOps.empty())
    658         if (OptimizeMemoryInst(II, PtrOps.pop_back_val(), AccessTy))
    659           return true;
    660   }
    661 
    662   // From here on out we're working with named functions.
    663   if (CI->getCalledFunction() == 0) return false;
    664 
    665   // We'll need DataLayout from here on out.
    666   const DataLayout *TD = TLI ? TLI->getDataLayout() : 0;
    667   if (!TD) return false;
    668 
    669   // Lower all default uses of _chk calls.  This is very similar
    670   // to what InstCombineCalls does, but here we are only lowering calls
    671   // that have the default "don't know" as the objectsize.  Anything else
    672   // should be left alone.
    673   CodeGenPrepareFortifiedLibCalls Simplifier;
    674   return Simplifier.fold(CI, TD, TLInfo);
    675 }
    676 
    677 /// DupRetToEnableTailCallOpts - Look for opportunities to duplicate return
    678 /// instructions to the predecessor to enable tail call optimizations. The
    679 /// case it is currently looking for is:
    680 /// @code
    681 /// bb0:
    682 ///   %tmp0 = tail call i32 @f0()
    683 ///   br label %return
    684 /// bb1:
    685 ///   %tmp1 = tail call i32 @f1()
    686 ///   br label %return
    687 /// bb2:
    688 ///   %tmp2 = tail call i32 @f2()
    689 ///   br label %return
    690 /// return:
    691 ///   %retval = phi i32 [ %tmp0, %bb0 ], [ %tmp1, %bb1 ], [ %tmp2, %bb2 ]
    692 ///   ret i32 %retval
    693 /// @endcode
    694 ///
    695 /// =>
    696 ///
    697 /// @code
    698 /// bb0:
    699 ///   %tmp0 = tail call i32 @f0()
    700 ///   ret i32 %tmp0
    701 /// bb1:
    702 ///   %tmp1 = tail call i32 @f1()
    703 ///   ret i32 %tmp1
    704 /// bb2:
    705 ///   %tmp2 = tail call i32 @f2()
    706 ///   ret i32 %tmp2
    707 /// @endcode
    708 bool CodeGenPrepare::DupRetToEnableTailCallOpts(BasicBlock *BB) {
    709   if (!TLI)
    710     return false;
    711 
    712   ReturnInst *RI = dyn_cast<ReturnInst>(BB->getTerminator());
    713   if (!RI)
    714     return false;
    715 
    716   PHINode *PN = 0;
    717   BitCastInst *BCI = 0;
    718   Value *V = RI->getReturnValue();
    719   if (V) {
    720     BCI = dyn_cast<BitCastInst>(V);
    721     if (BCI)
    722       V = BCI->getOperand(0);
    723 
    724     PN = dyn_cast<PHINode>(V);
    725     if (!PN)
    726       return false;
    727   }
    728 
    729   if (PN && PN->getParent() != BB)
    730     return false;
    731 
    732   // It's not safe to eliminate the sign / zero extension of the return value.
    733   // See llvm::isInTailCallPosition().
    734   const Function *F = BB->getParent();
    735   AttributeSet CallerAttrs = F->getAttributes();
    736   if (CallerAttrs.hasAttribute(AttributeSet::ReturnIndex, Attribute::ZExt) ||
    737       CallerAttrs.hasAttribute(AttributeSet::ReturnIndex, Attribute::SExt))
    738     return false;
    739 
    740   // Make sure there are no instructions between the PHI and return, or that the
    741   // return is the first instruction in the block.
    742   if (PN) {
    743     BasicBlock::iterator BI = BB->begin();
    744     do { ++BI; } while (isa<DbgInfoIntrinsic>(BI));
    745     if (&*BI == BCI)
    746       // Also skip over the bitcast.
    747       ++BI;
    748     if (&*BI != RI)
    749       return false;
    750   } else {
    751     BasicBlock::iterator BI = BB->begin();
    752     while (isa<DbgInfoIntrinsic>(BI)) ++BI;
    753     if (&*BI != RI)
    754       return false;
    755   }
    756 
    757   /// Only dup the ReturnInst if the CallInst is likely to be emitted as a tail
    758   /// call.
    759   SmallVector<CallInst*, 4> TailCalls;
    760   if (PN) {
    761     for (unsigned I = 0, E = PN->getNumIncomingValues(); I != E; ++I) {
    762       CallInst *CI = dyn_cast<CallInst>(PN->getIncomingValue(I));
    763       // Make sure the phi value is indeed produced by the tail call.
    764       if (CI && CI->hasOneUse() && CI->getParent() == PN->getIncomingBlock(I) &&
    765           TLI->mayBeEmittedAsTailCall(CI))
    766         TailCalls.push_back(CI);
    767     }
    768   } else {
    769     SmallPtrSet<BasicBlock*, 4> VisitedBBs;
    770     for (pred_iterator PI = pred_begin(BB), PE = pred_end(BB); PI != PE; ++PI) {
    771       if (!VisitedBBs.insert(*PI))
    772         continue;
    773 
    774       BasicBlock::InstListType &InstList = (*PI)->getInstList();
    775       BasicBlock::InstListType::reverse_iterator RI = InstList.rbegin();
    776       BasicBlock::InstListType::reverse_iterator RE = InstList.rend();
    777       do { ++RI; } while (RI != RE && isa<DbgInfoIntrinsic>(&*RI));
    778       if (RI == RE)
    779         continue;
    780 
    781       CallInst *CI = dyn_cast<CallInst>(&*RI);
    782       if (CI && CI->use_empty() && TLI->mayBeEmittedAsTailCall(CI))
    783         TailCalls.push_back(CI);
    784     }
    785   }
    786 
    787   bool Changed = false;
    788   for (unsigned i = 0, e = TailCalls.size(); i != e; ++i) {
    789     CallInst *CI = TailCalls[i];
    790     CallSite CS(CI);
    791 
    792     // Conservatively require the attributes of the call to match those of the
    793     // return. Ignore noalias because it doesn't affect the call sequence.
    794     AttributeSet CalleeAttrs = CS.getAttributes();
    795     if (AttrBuilder(CalleeAttrs, AttributeSet::ReturnIndex).
    796           removeAttribute(Attribute::NoAlias) !=
    797         AttrBuilder(CalleeAttrs, AttributeSet::ReturnIndex).
    798           removeAttribute(Attribute::NoAlias))
    799       continue;
    800 
    801     // Make sure the call instruction is followed by an unconditional branch to
    802     // the return block.
    803     BasicBlock *CallBB = CI->getParent();
    804     BranchInst *BI = dyn_cast<BranchInst>(CallBB->getTerminator());
    805     if (!BI || !BI->isUnconditional() || BI->getSuccessor(0) != BB)
    806       continue;
    807 
    808     // Duplicate the return into CallBB.
    809     (void)FoldReturnIntoUncondBranch(RI, BB, CallBB);
    810     ModifiedDT = Changed = true;
    811     ++NumRetsDup;
    812   }
    813 
    814   // If we eliminated all predecessors of the block, delete the block now.
    815   if (Changed && !BB->hasAddressTaken() && pred_begin(BB) == pred_end(BB))
    816     BB->eraseFromParent();
    817 
    818   return Changed;
    819 }
    820 
    821 //===----------------------------------------------------------------------===//
    822 // Memory Optimization
    823 //===----------------------------------------------------------------------===//
    824 
    825 namespace {
    826 
    827 /// ExtAddrMode - This is an extended version of TargetLowering::AddrMode
    828 /// which holds actual Value*'s for register values.
    829 struct ExtAddrMode : public TargetLowering::AddrMode {
    830   Value *BaseReg;
    831   Value *ScaledReg;
    832   ExtAddrMode() : BaseReg(0), ScaledReg(0) {}
    833   void print(raw_ostream &OS) const;
    834   void dump() const;
    835 
    836   bool operator==(const ExtAddrMode& O) const {
    837     return (BaseReg == O.BaseReg) && (ScaledReg == O.ScaledReg) &&
    838            (BaseGV == O.BaseGV) && (BaseOffs == O.BaseOffs) &&
    839            (HasBaseReg == O.HasBaseReg) && (Scale == O.Scale);
    840   }
    841 };
    842 
    843 static inline raw_ostream &operator<<(raw_ostream &OS, const ExtAddrMode &AM) {
    844   AM.print(OS);
    845   return OS;
    846 }
    847 
    848 void ExtAddrMode::print(raw_ostream &OS) const {
    849   bool NeedPlus = false;
    850   OS << "[";
    851   if (BaseGV) {
    852     OS << (NeedPlus ? " + " : "")
    853        << "GV:";
    854     WriteAsOperand(OS, BaseGV, /*PrintType=*/false);
    855     NeedPlus = true;
    856   }
    857 
    858   if (BaseOffs)
    859     OS << (NeedPlus ? " + " : "") << BaseOffs, NeedPlus = true;
    860 
    861   if (BaseReg) {
    862     OS << (NeedPlus ? " + " : "")
    863        << "Base:";
    864     WriteAsOperand(OS, BaseReg, /*PrintType=*/false);
    865     NeedPlus = true;
    866   }
    867   if (Scale) {
    868     OS << (NeedPlus ? " + " : "")
    869        << Scale << "*";
    870     WriteAsOperand(OS, ScaledReg, /*PrintType=*/false);
    871   }
    872 
    873   OS << ']';
    874 }
    875 
    876 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
    877 void ExtAddrMode::dump() const {
    878   print(dbgs());
    879   dbgs() << '\n';
    880 }
    881 #endif
    882 
    883 
    884 /// \brief A helper class for matching addressing modes.
    885 ///
    886 /// This encapsulates the logic for matching the target-legal addressing modes.
    887 class AddressingModeMatcher {
    888   SmallVectorImpl<Instruction*> &AddrModeInsts;
    889   const TargetLowering &TLI;
    890 
    891   /// AccessTy/MemoryInst - This is the type for the access (e.g. double) and
    892   /// the memory instruction that we're computing this address for.
    893   Type *AccessTy;
    894   Instruction *MemoryInst;
    895 
    896   /// AddrMode - This is the addressing mode that we're building up.  This is
    897   /// part of the return value of this addressing mode matching stuff.
    898   ExtAddrMode &AddrMode;
    899 
    900   /// IgnoreProfitability - This is set to true when we should not do
    901   /// profitability checks.  When true, IsProfitableToFoldIntoAddressingMode
    902   /// always returns true.
    903   bool IgnoreProfitability;
    904 
    905   AddressingModeMatcher(SmallVectorImpl<Instruction*> &AMI,
    906                         const TargetLowering &T, Type *AT,
    907                         Instruction *MI, ExtAddrMode &AM)
    908     : AddrModeInsts(AMI), TLI(T), AccessTy(AT), MemoryInst(MI), AddrMode(AM) {
    909     IgnoreProfitability = false;
    910   }
    911 public:
    912 
    913   /// Match - Find the maximal addressing mode that a load/store of V can fold,
    914   /// give an access type of AccessTy.  This returns a list of involved
    915   /// instructions in AddrModeInsts.
    916   static ExtAddrMode Match(Value *V, Type *AccessTy,
    917                            Instruction *MemoryInst,
    918                            SmallVectorImpl<Instruction*> &AddrModeInsts,
    919                            const TargetLowering &TLI) {
    920     ExtAddrMode Result;
    921 
    922     bool Success =
    923       AddressingModeMatcher(AddrModeInsts, TLI, AccessTy,
    924                             MemoryInst, Result).MatchAddr(V, 0);
    925     (void)Success; assert(Success && "Couldn't select *anything*?");
    926     return Result;
    927   }
    928 private:
    929   bool MatchScaledValue(Value *ScaleReg, int64_t Scale, unsigned Depth);
    930   bool MatchAddr(Value *V, unsigned Depth);
    931   bool MatchOperationAddr(User *Operation, unsigned Opcode, unsigned Depth);
    932   bool IsProfitableToFoldIntoAddressingMode(Instruction *I,
    933                                             ExtAddrMode &AMBefore,
    934                                             ExtAddrMode &AMAfter);
    935   bool ValueAlreadyLiveAtInst(Value *Val, Value *KnownLive1, Value *KnownLive2);
    936 };
    937 
    938 /// MatchScaledValue - Try adding ScaleReg*Scale to the current addressing mode.
    939 /// Return true and update AddrMode if this addr mode is legal for the target,
    940 /// false if not.
    941 bool AddressingModeMatcher::MatchScaledValue(Value *ScaleReg, int64_t Scale,
    942                                              unsigned Depth) {
    943   // If Scale is 1, then this is the same as adding ScaleReg to the addressing
    944   // mode.  Just process that directly.
    945   if (Scale == 1)
    946     return MatchAddr(ScaleReg, Depth);
    947 
    948   // If the scale is 0, it takes nothing to add this.
    949   if (Scale == 0)
    950     return true;
    951 
    952   // If we already have a scale of this value, we can add to it, otherwise, we
    953   // need an available scale field.
    954   if (AddrMode.Scale != 0 && AddrMode.ScaledReg != ScaleReg)
    955     return false;
    956 
    957   ExtAddrMode TestAddrMode = AddrMode;
    958 
    959   // Add scale to turn X*4+X*3 -> X*7.  This could also do things like
    960   // [A+B + A*7] -> [B+A*8].
    961   TestAddrMode.Scale += Scale;
    962   TestAddrMode.ScaledReg = ScaleReg;
    963 
    964   // If the new address isn't legal, bail out.
    965   if (!TLI.isLegalAddressingMode(TestAddrMode, AccessTy))
    966     return false;
    967 
    968   // It was legal, so commit it.
    969   AddrMode = TestAddrMode;
    970 
    971   // Okay, we decided that we can add ScaleReg+Scale to AddrMode.  Check now
    972   // to see if ScaleReg is actually X+C.  If so, we can turn this into adding
    973   // X*Scale + C*Scale to addr mode.
    974   ConstantInt *CI = 0; Value *AddLHS = 0;
    975   if (isa<Instruction>(ScaleReg) &&  // not a constant expr.
    976       match(ScaleReg, m_Add(m_Value(AddLHS), m_ConstantInt(CI)))) {
    977     TestAddrMode.ScaledReg = AddLHS;
    978     TestAddrMode.BaseOffs += CI->getSExtValue()*TestAddrMode.Scale;
    979 
    980     // If this addressing mode is legal, commit it and remember that we folded
    981     // this instruction.
    982     if (TLI.isLegalAddressingMode(TestAddrMode, AccessTy)) {
    983       AddrModeInsts.push_back(cast<Instruction>(ScaleReg));
    984       AddrMode = TestAddrMode;
    985       return true;
    986     }
    987   }
    988 
    989   // Otherwise, not (x+c)*scale, just return what we have.
    990   return true;
    991 }
    992 
    993 /// MightBeFoldableInst - This is a little filter, which returns true if an
    994 /// addressing computation involving I might be folded into a load/store
    995 /// accessing it.  This doesn't need to be perfect, but needs to accept at least
    996 /// the set of instructions that MatchOperationAddr can.
    997 static bool MightBeFoldableInst(Instruction *I) {
    998   switch (I->getOpcode()) {
    999   case Instruction::BitCast:
   1000     // Don't touch identity bitcasts.
   1001     if (I->getType() == I->getOperand(0)->getType())
   1002       return false;
   1003     return I->getType()->isPointerTy() || I->getType()->isIntegerTy();
   1004   case Instruction::PtrToInt:
   1005     // PtrToInt is always a noop, as we know that the int type is pointer sized.
   1006     return true;
   1007   case Instruction::IntToPtr:
   1008     // We know the input is intptr_t, so this is foldable.
   1009     return true;
   1010   case Instruction::Add:
   1011     return true;
   1012   case Instruction::Mul:
   1013   case Instruction::Shl:
   1014     // Can only handle X*C and X << C.
   1015     return isa<ConstantInt>(I->getOperand(1));
   1016   case Instruction::GetElementPtr:
   1017     return true;
   1018   default:
   1019     return false;
   1020   }
   1021 }
   1022 
   1023 /// MatchOperationAddr - Given an instruction or constant expr, see if we can
   1024 /// fold the operation into the addressing mode.  If so, update the addressing
   1025 /// mode and return true, otherwise return false without modifying AddrMode.
   1026 bool AddressingModeMatcher::MatchOperationAddr(User *AddrInst, unsigned Opcode,
   1027                                                unsigned Depth) {
   1028   // Avoid exponential behavior on extremely deep expression trees.
   1029   if (Depth >= 5) return false;
   1030 
   1031   switch (Opcode) {
   1032   case Instruction::PtrToInt:
   1033     // PtrToInt is always a noop, as we know that the int type is pointer sized.
   1034     return MatchAddr(AddrInst->getOperand(0), Depth);
   1035   case Instruction::IntToPtr:
   1036     // This inttoptr is a no-op if the integer type is pointer sized.
   1037     if (TLI.getValueType(AddrInst->getOperand(0)->getType()) ==
   1038         TLI.getPointerTy())
   1039       return MatchAddr(AddrInst->getOperand(0), Depth);
   1040     return false;
   1041   case Instruction::BitCast:
   1042     // BitCast is always a noop, and we can handle it as long as it is
   1043     // int->int or pointer->pointer (we don't want int<->fp or something).
   1044     if ((AddrInst->getOperand(0)->getType()->isPointerTy() ||
   1045          AddrInst->getOperand(0)->getType()->isIntegerTy()) &&
   1046         // Don't touch identity bitcasts.  These were probably put here by LSR,
   1047         // and we don't want to mess around with them.  Assume it knows what it
   1048         // is doing.
   1049         AddrInst->getOperand(0)->getType() != AddrInst->getType())
   1050       return MatchAddr(AddrInst->getOperand(0), Depth);
   1051     return false;
   1052   case Instruction::Add: {
   1053     // Check to see if we can merge in the RHS then the LHS.  If so, we win.
   1054     ExtAddrMode BackupAddrMode = AddrMode;
   1055     unsigned OldSize = AddrModeInsts.size();
   1056     if (MatchAddr(AddrInst->getOperand(1), Depth+1) &&
   1057         MatchAddr(AddrInst->getOperand(0), Depth+1))
   1058       return true;
   1059 
   1060     // Restore the old addr mode info.
   1061     AddrMode = BackupAddrMode;
   1062     AddrModeInsts.resize(OldSize);
   1063 
   1064     // Otherwise this was over-aggressive.  Try merging in the LHS then the RHS.
   1065     if (MatchAddr(AddrInst->getOperand(0), Depth+1) &&
   1066         MatchAddr(AddrInst->getOperand(1), Depth+1))
   1067       return true;
   1068 
   1069     // Otherwise we definitely can't merge the ADD in.
   1070     AddrMode = BackupAddrMode;
   1071     AddrModeInsts.resize(OldSize);
   1072     break;
   1073   }
   1074   //case Instruction::Or:
   1075   // TODO: We can handle "Or Val, Imm" iff this OR is equivalent to an ADD.
   1076   //break;
   1077   case Instruction::Mul:
   1078   case Instruction::Shl: {
   1079     // Can only handle X*C and X << C.
   1080     ConstantInt *RHS = dyn_cast<ConstantInt>(AddrInst->getOperand(1));
   1081     if (!RHS) return false;
   1082     int64_t Scale = RHS->getSExtValue();
   1083     if (Opcode == Instruction::Shl)
   1084       Scale = 1LL << Scale;
   1085 
   1086     return MatchScaledValue(AddrInst->getOperand(0), Scale, Depth);
   1087   }
   1088   case Instruction::GetElementPtr: {
   1089     // Scan the GEP.  We check it if it contains constant offsets and at most
   1090     // one variable offset.
   1091     int VariableOperand = -1;
   1092     unsigned VariableScale = 0;
   1093 
   1094     int64_t ConstantOffset = 0;
   1095     const DataLayout *TD = TLI.getDataLayout();
   1096     gep_type_iterator GTI = gep_type_begin(AddrInst);
   1097     for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) {
   1098       if (StructType *STy = dyn_cast<StructType>(*GTI)) {
   1099         const StructLayout *SL = TD->getStructLayout(STy);
   1100         unsigned Idx =
   1101           cast<ConstantInt>(AddrInst->getOperand(i))->getZExtValue();
   1102         ConstantOffset += SL->getElementOffset(Idx);
   1103       } else {
   1104         uint64_t TypeSize = TD->getTypeAllocSize(GTI.getIndexedType());
   1105         if (ConstantInt *CI = dyn_cast<ConstantInt>(AddrInst->getOperand(i))) {
   1106           ConstantOffset += CI->getSExtValue()*TypeSize;
   1107         } else if (TypeSize) {  // Scales of zero don't do anything.
   1108           // We only allow one variable index at the moment.
   1109           if (VariableOperand != -1)
   1110             return false;
   1111 
   1112           // Remember the variable index.
   1113           VariableOperand = i;
   1114           VariableScale = TypeSize;
   1115         }
   1116       }
   1117     }
   1118 
   1119     // A common case is for the GEP to only do a constant offset.  In this case,
   1120     // just add it to the disp field and check validity.
   1121     if (VariableOperand == -1) {
   1122       AddrMode.BaseOffs += ConstantOffset;
   1123       if (ConstantOffset == 0 || TLI.isLegalAddressingMode(AddrMode, AccessTy)){
   1124         // Check to see if we can fold the base pointer in too.
   1125         if (MatchAddr(AddrInst->getOperand(0), Depth+1))
   1126           return true;
   1127       }
   1128       AddrMode.BaseOffs -= ConstantOffset;
   1129       return false;
   1130     }
   1131 
   1132     // Save the valid addressing mode in case we can't match.
   1133     ExtAddrMode BackupAddrMode = AddrMode;
   1134     unsigned OldSize = AddrModeInsts.size();
   1135 
   1136     // See if the scale and offset amount is valid for this target.
   1137     AddrMode.BaseOffs += ConstantOffset;
   1138 
   1139     // Match the base operand of the GEP.
   1140     if (!MatchAddr(AddrInst->getOperand(0), Depth+1)) {
   1141       // If it couldn't be matched, just stuff the value in a register.
   1142       if (AddrMode.HasBaseReg) {
   1143         AddrMode = BackupAddrMode;
   1144         AddrModeInsts.resize(OldSize);
   1145         return false;
   1146       }
   1147       AddrMode.HasBaseReg = true;
   1148       AddrMode.BaseReg = AddrInst->getOperand(0);
   1149     }
   1150 
   1151     // Match the remaining variable portion of the GEP.
   1152     if (!MatchScaledValue(AddrInst->getOperand(VariableOperand), VariableScale,
   1153                           Depth)) {
   1154       // If it couldn't be matched, try stuffing the base into a register
   1155       // instead of matching it, and retrying the match of the scale.
   1156       AddrMode = BackupAddrMode;
   1157       AddrModeInsts.resize(OldSize);
   1158       if (AddrMode.HasBaseReg)
   1159         return false;
   1160       AddrMode.HasBaseReg = true;
   1161       AddrMode.BaseReg = AddrInst->getOperand(0);
   1162       AddrMode.BaseOffs += ConstantOffset;
   1163       if (!MatchScaledValue(AddrInst->getOperand(VariableOperand),
   1164                             VariableScale, Depth)) {
   1165         // If even that didn't work, bail.
   1166         AddrMode = BackupAddrMode;
   1167         AddrModeInsts.resize(OldSize);
   1168         return false;
   1169       }
   1170     }
   1171 
   1172     return true;
   1173   }
   1174   }
   1175   return false;
   1176 }
   1177 
   1178 /// MatchAddr - If we can, try to add the value of 'Addr' into the current
   1179 /// addressing mode.  If Addr can't be added to AddrMode this returns false and
   1180 /// leaves AddrMode unmodified.  This assumes that Addr is either a pointer type
   1181 /// or intptr_t for the target.
   1182 ///
   1183 bool AddressingModeMatcher::MatchAddr(Value *Addr, unsigned Depth) {
   1184   if (ConstantInt *CI = dyn_cast<ConstantInt>(Addr)) {
   1185     // Fold in immediates if legal for the target.
   1186     AddrMode.BaseOffs += CI->getSExtValue();
   1187     if (TLI.isLegalAddressingMode(AddrMode, AccessTy))
   1188       return true;
   1189     AddrMode.BaseOffs -= CI->getSExtValue();
   1190   } else if (GlobalValue *GV = dyn_cast<GlobalValue>(Addr)) {
   1191     // If this is a global variable, try to fold it into the addressing mode.
   1192     if (AddrMode.BaseGV == 0) {
   1193       AddrMode.BaseGV = GV;
   1194       if (TLI.isLegalAddressingMode(AddrMode, AccessTy))
   1195         return true;
   1196       AddrMode.BaseGV = 0;
   1197     }
   1198   } else if (Instruction *I = dyn_cast<Instruction>(Addr)) {
   1199     ExtAddrMode BackupAddrMode = AddrMode;
   1200     unsigned OldSize = AddrModeInsts.size();
   1201 
   1202     // Check to see if it is possible to fold this operation.
   1203     if (MatchOperationAddr(I, I->getOpcode(), Depth)) {
   1204       // Okay, it's possible to fold this.  Check to see if it is actually
   1205       // *profitable* to do so.  We use a simple cost model to avoid increasing
   1206       // register pressure too much.
   1207       if (I->hasOneUse() ||
   1208           IsProfitableToFoldIntoAddressingMode(I, BackupAddrMode, AddrMode)) {
   1209         AddrModeInsts.push_back(I);
   1210         return true;
   1211       }
   1212 
   1213       // It isn't profitable to do this, roll back.
   1214       //cerr << "NOT FOLDING: " << *I;
   1215       AddrMode = BackupAddrMode;
   1216       AddrModeInsts.resize(OldSize);
   1217     }
   1218   } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) {
   1219     if (MatchOperationAddr(CE, CE->getOpcode(), Depth))
   1220       return true;
   1221   } else if (isa<ConstantPointerNull>(Addr)) {
   1222     // Null pointer gets folded without affecting the addressing mode.
   1223     return true;
   1224   }
   1225 
   1226   // Worse case, the target should support [reg] addressing modes. :)
   1227   if (!AddrMode.HasBaseReg) {
   1228     AddrMode.HasBaseReg = true;
   1229     AddrMode.BaseReg = Addr;
   1230     // Still check for legality in case the target supports [imm] but not [i+r].
   1231     if (TLI.isLegalAddressingMode(AddrMode, AccessTy))
   1232       return true;
   1233     AddrMode.HasBaseReg = false;
   1234     AddrMode.BaseReg = 0;
   1235   }
   1236 
   1237   // If the base register is already taken, see if we can do [r+r].
   1238   if (AddrMode.Scale == 0) {
   1239     AddrMode.Scale = 1;
   1240     AddrMode.ScaledReg = Addr;
   1241     if (TLI.isLegalAddressingMode(AddrMode, AccessTy))
   1242       return true;
   1243     AddrMode.Scale = 0;
   1244     AddrMode.ScaledReg = 0;
   1245   }
   1246   // Couldn't match.
   1247   return false;
   1248 }
   1249 
   1250 /// IsOperandAMemoryOperand - Check to see if all uses of OpVal by the specified
   1251 /// inline asm call are due to memory operands.  If so, return true, otherwise
   1252 /// return false.
   1253 static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal,
   1254                                     const TargetLowering &TLI) {
   1255   TargetLowering::AsmOperandInfoVector TargetConstraints = TLI.ParseConstraints(ImmutableCallSite(CI));
   1256   for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) {
   1257     TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i];
   1258 
   1259     // Compute the constraint code and ConstraintType to use.
   1260     TLI.ComputeConstraintToUse(OpInfo, SDValue());
   1261 
   1262     // If this asm operand is our Value*, and if it isn't an indirect memory
   1263     // operand, we can't fold it!
   1264     if (OpInfo.CallOperandVal == OpVal &&
   1265         (OpInfo.ConstraintType != TargetLowering::C_Memory ||
   1266          !OpInfo.isIndirect))
   1267       return false;
   1268   }
   1269 
   1270   return true;
   1271 }
   1272 
   1273 /// FindAllMemoryUses - Recursively walk all the uses of I until we find a
   1274 /// memory use.  If we find an obviously non-foldable instruction, return true.
   1275 /// Add the ultimately found memory instructions to MemoryUses.
   1276 static bool FindAllMemoryUses(Instruction *I,
   1277                 SmallVectorImpl<std::pair<Instruction*,unsigned> > &MemoryUses,
   1278                               SmallPtrSet<Instruction*, 16> &ConsideredInsts,
   1279                               const TargetLowering &TLI) {
   1280   // If we already considered this instruction, we're done.
   1281   if (!ConsideredInsts.insert(I))
   1282     return false;
   1283 
   1284   // If this is an obviously unfoldable instruction, bail out.
   1285   if (!MightBeFoldableInst(I))
   1286     return true;
   1287 
   1288   // Loop over all the uses, recursively processing them.
   1289   for (Value::use_iterator UI = I->use_begin(), E = I->use_end();
   1290        UI != E; ++UI) {
   1291     User *U = *UI;
   1292 
   1293     if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
   1294       MemoryUses.push_back(std::make_pair(LI, UI.getOperandNo()));
   1295       continue;
   1296     }
   1297 
   1298     if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
   1299       unsigned opNo = UI.getOperandNo();
   1300       if (opNo == 0) return true; // Storing addr, not into addr.
   1301       MemoryUses.push_back(std::make_pair(SI, opNo));
   1302       continue;
   1303     }
   1304 
   1305     if (CallInst *CI = dyn_cast<CallInst>(U)) {
   1306       InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledValue());
   1307       if (!IA) return true;
   1308 
   1309       // If this is a memory operand, we're cool, otherwise bail out.
   1310       if (!IsOperandAMemoryOperand(CI, IA, I, TLI))
   1311         return true;
   1312       continue;
   1313     }
   1314 
   1315     if (FindAllMemoryUses(cast<Instruction>(U), MemoryUses, ConsideredInsts,
   1316                           TLI))
   1317       return true;
   1318   }
   1319 
   1320   return false;
   1321 }
   1322 
   1323 /// ValueAlreadyLiveAtInst - Retrn true if Val is already known to be live at
   1324 /// the use site that we're folding it into.  If so, there is no cost to
   1325 /// include it in the addressing mode.  KnownLive1 and KnownLive2 are two values
   1326 /// that we know are live at the instruction already.
   1327 bool AddressingModeMatcher::ValueAlreadyLiveAtInst(Value *Val,Value *KnownLive1,
   1328                                                    Value *KnownLive2) {
   1329   // If Val is either of the known-live values, we know it is live!
   1330   if (Val == 0 || Val == KnownLive1 || Val == KnownLive2)
   1331     return true;
   1332 
   1333   // All values other than instructions and arguments (e.g. constants) are live.
   1334   if (!isa<Instruction>(Val) && !isa<Argument>(Val)) return true;
   1335 
   1336   // If Val is a constant sized alloca in the entry block, it is live, this is
   1337   // true because it is just a reference to the stack/frame pointer, which is
   1338   // live for the whole function.
   1339   if (AllocaInst *AI = dyn_cast<AllocaInst>(Val))
   1340     if (AI->isStaticAlloca())
   1341       return true;
   1342 
   1343   // Check to see if this value is already used in the memory instruction's
   1344   // block.  If so, it's already live into the block at the very least, so we
   1345   // can reasonably fold it.
   1346   return Val->isUsedInBasicBlock(MemoryInst->getParent());
   1347 }
   1348 
   1349 /// IsProfitableToFoldIntoAddressingMode - It is possible for the addressing
   1350 /// mode of the machine to fold the specified instruction into a load or store
   1351 /// that ultimately uses it.  However, the specified instruction has multiple
   1352 /// uses.  Given this, it may actually increase register pressure to fold it
   1353 /// into the load.  For example, consider this code:
   1354 ///
   1355 ///     X = ...
   1356 ///     Y = X+1
   1357 ///     use(Y)   -> nonload/store
   1358 ///     Z = Y+1
   1359 ///     load Z
   1360 ///
   1361 /// In this case, Y has multiple uses, and can be folded into the load of Z
   1362 /// (yielding load [X+2]).  However, doing this will cause both "X" and "X+1" to
   1363 /// be live at the use(Y) line.  If we don't fold Y into load Z, we use one
   1364 /// fewer register.  Since Y can't be folded into "use(Y)" we don't increase the
   1365 /// number of computations either.
   1366 ///
   1367 /// Note that this (like most of CodeGenPrepare) is just a rough heuristic.  If
   1368 /// X was live across 'load Z' for other reasons, we actually *would* want to
   1369 /// fold the addressing mode in the Z case.  This would make Y die earlier.
   1370 bool AddressingModeMatcher::
   1371 IsProfitableToFoldIntoAddressingMode(Instruction *I, ExtAddrMode &AMBefore,
   1372                                      ExtAddrMode &AMAfter) {
   1373   if (IgnoreProfitability) return true;
   1374 
   1375   // AMBefore is the addressing mode before this instruction was folded into it,
   1376   // and AMAfter is the addressing mode after the instruction was folded.  Get
   1377   // the set of registers referenced by AMAfter and subtract out those
   1378   // referenced by AMBefore: this is the set of values which folding in this
   1379   // address extends the lifetime of.
   1380   //
   1381   // Note that there are only two potential values being referenced here,
   1382   // BaseReg and ScaleReg (global addresses are always available, as are any
   1383   // folded immediates).
   1384   Value *BaseReg = AMAfter.BaseReg, *ScaledReg = AMAfter.ScaledReg;
   1385 
   1386   // If the BaseReg or ScaledReg was referenced by the previous addrmode, their
   1387   // lifetime wasn't extended by adding this instruction.
   1388   if (ValueAlreadyLiveAtInst(BaseReg, AMBefore.BaseReg, AMBefore.ScaledReg))
   1389     BaseReg = 0;
   1390   if (ValueAlreadyLiveAtInst(ScaledReg, AMBefore.BaseReg, AMBefore.ScaledReg))
   1391     ScaledReg = 0;
   1392 
   1393   // If folding this instruction (and it's subexprs) didn't extend any live
   1394   // ranges, we're ok with it.
   1395   if (BaseReg == 0 && ScaledReg == 0)
   1396     return true;
   1397 
   1398   // If all uses of this instruction are ultimately load/store/inlineasm's,
   1399   // check to see if their addressing modes will include this instruction.  If
   1400   // so, we can fold it into all uses, so it doesn't matter if it has multiple
   1401   // uses.
   1402   SmallVector<std::pair<Instruction*,unsigned>, 16> MemoryUses;
   1403   SmallPtrSet<Instruction*, 16> ConsideredInsts;
   1404   if (FindAllMemoryUses(I, MemoryUses, ConsideredInsts, TLI))
   1405     return false;  // Has a non-memory, non-foldable use!
   1406 
   1407   // Now that we know that all uses of this instruction are part of a chain of
   1408   // computation involving only operations that could theoretically be folded
   1409   // into a memory use, loop over each of these uses and see if they could
   1410   // *actually* fold the instruction.
   1411   SmallVector<Instruction*, 32> MatchedAddrModeInsts;
   1412   for (unsigned i = 0, e = MemoryUses.size(); i != e; ++i) {
   1413     Instruction *User = MemoryUses[i].first;
   1414     unsigned OpNo = MemoryUses[i].second;
   1415 
   1416     // Get the access type of this use.  If the use isn't a pointer, we don't
   1417     // know what it accesses.
   1418     Value *Address = User->getOperand(OpNo);
   1419     if (!Address->getType()->isPointerTy())
   1420       return false;
   1421     Type *AddressAccessTy =
   1422       cast<PointerType>(Address->getType())->getElementType();
   1423 
   1424     // Do a match against the root of this address, ignoring profitability. This
   1425     // will tell us if the addressing mode for the memory operation will
   1426     // *actually* cover the shared instruction.
   1427     ExtAddrMode Result;
   1428     AddressingModeMatcher Matcher(MatchedAddrModeInsts, TLI, AddressAccessTy,
   1429                                   MemoryInst, Result);
   1430     Matcher.IgnoreProfitability = true;
   1431     bool Success = Matcher.MatchAddr(Address, 0);
   1432     (void)Success; assert(Success && "Couldn't select *anything*?");
   1433 
   1434     // If the match didn't cover I, then it won't be shared by it.
   1435     if (std::find(MatchedAddrModeInsts.begin(), MatchedAddrModeInsts.end(),
   1436                   I) == MatchedAddrModeInsts.end())
   1437       return false;
   1438 
   1439     MatchedAddrModeInsts.clear();
   1440   }
   1441 
   1442   return true;
   1443 }
   1444 
   1445 } // end anonymous namespace
   1446 
   1447 /// IsNonLocalValue - Return true if the specified values are defined in a
   1448 /// different basic block than BB.
   1449 static bool IsNonLocalValue(Value *V, BasicBlock *BB) {
   1450   if (Instruction *I = dyn_cast<Instruction>(V))
   1451     return I->getParent() != BB;
   1452   return false;
   1453 }
   1454 
   1455 /// OptimizeMemoryInst - Load and Store Instructions often have
   1456 /// addressing modes that can do significant amounts of computation.  As such,
   1457 /// instruction selection will try to get the load or store to do as much
   1458 /// computation as possible for the program.  The problem is that isel can only
   1459 /// see within a single block.  As such, we sink as much legal addressing mode
   1460 /// stuff into the block as possible.
   1461 ///
   1462 /// This method is used to optimize both load/store and inline asms with memory
   1463 /// operands.
   1464 bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
   1465                                         Type *AccessTy) {
   1466   Value *Repl = Addr;
   1467 
   1468   // Try to collapse single-value PHI nodes.  This is necessary to undo
   1469   // unprofitable PRE transformations.
   1470   SmallVector<Value*, 8> worklist;
   1471   SmallPtrSet<Value*, 16> Visited;
   1472   worklist.push_back(Addr);
   1473 
   1474   // Use a worklist to iteratively look through PHI nodes, and ensure that
   1475   // the addressing mode obtained from the non-PHI roots of the graph
   1476   // are equivalent.
   1477   Value *Consensus = 0;
   1478   unsigned NumUsesConsensus = 0;
   1479   bool IsNumUsesConsensusValid = false;
   1480   SmallVector<Instruction*, 16> AddrModeInsts;
   1481   ExtAddrMode AddrMode;
   1482   while (!worklist.empty()) {
   1483     Value *V = worklist.back();
   1484     worklist.pop_back();
   1485 
   1486     // Break use-def graph loops.
   1487     if (!Visited.insert(V)) {
   1488       Consensus = 0;
   1489       break;
   1490     }
   1491 
   1492     // For a PHI node, push all of its incoming values.
   1493     if (PHINode *P = dyn_cast<PHINode>(V)) {
   1494       for (unsigned i = 0, e = P->getNumIncomingValues(); i != e; ++i)
   1495         worklist.push_back(P->getIncomingValue(i));
   1496       continue;
   1497     }
   1498 
   1499     // For non-PHIs, determine the addressing mode being computed.
   1500     SmallVector<Instruction*, 16> NewAddrModeInsts;
   1501     ExtAddrMode NewAddrMode =
   1502       AddressingModeMatcher::Match(V, AccessTy, MemoryInst,
   1503                                    NewAddrModeInsts, *TLI);
   1504 
   1505     // This check is broken into two cases with very similar code to avoid using
   1506     // getNumUses() as much as possible. Some values have a lot of uses, so
   1507     // calling getNumUses() unconditionally caused a significant compile-time
   1508     // regression.
   1509     if (!Consensus) {
   1510       Consensus = V;
   1511       AddrMode = NewAddrMode;
   1512       AddrModeInsts = NewAddrModeInsts;
   1513       continue;
   1514     } else if (NewAddrMode == AddrMode) {
   1515       if (!IsNumUsesConsensusValid) {
   1516         NumUsesConsensus = Consensus->getNumUses();
   1517         IsNumUsesConsensusValid = true;
   1518       }
   1519 
   1520       // Ensure that the obtained addressing mode is equivalent to that obtained
   1521       // for all other roots of the PHI traversal.  Also, when choosing one
   1522       // such root as representative, select the one with the most uses in order
   1523       // to keep the cost modeling heuristics in AddressingModeMatcher
   1524       // applicable.
   1525       unsigned NumUses = V->getNumUses();
   1526       if (NumUses > NumUsesConsensus) {
   1527         Consensus = V;
   1528         NumUsesConsensus = NumUses;
   1529         AddrModeInsts = NewAddrModeInsts;
   1530       }
   1531       continue;
   1532     }
   1533 
   1534     Consensus = 0;
   1535     break;
   1536   }
   1537 
   1538   // If the addressing mode couldn't be determined, or if multiple different
   1539   // ones were determined, bail out now.
   1540   if (!Consensus) return false;
   1541 
   1542   // Check to see if any of the instructions supersumed by this addr mode are
   1543   // non-local to I's BB.
   1544   bool AnyNonLocal = false;
   1545   for (unsigned i = 0, e = AddrModeInsts.size(); i != e; ++i) {
   1546     if (IsNonLocalValue(AddrModeInsts[i], MemoryInst->getParent())) {
   1547       AnyNonLocal = true;
   1548       break;
   1549     }
   1550   }
   1551 
   1552   // If all the instructions matched are already in this BB, don't do anything.
   1553   if (!AnyNonLocal) {
   1554     DEBUG(dbgs() << "CGP: Found      local addrmode: " << AddrMode << "\n");
   1555     return false;
   1556   }
   1557 
   1558   // Insert this computation right after this user.  Since our caller is
   1559   // scanning from the top of the BB to the bottom, reuse of the expr are
   1560   // guaranteed to happen later.
   1561   IRBuilder<> Builder(MemoryInst);
   1562 
   1563   // Now that we determined the addressing expression we want to use and know
   1564   // that we have to sink it into this block.  Check to see if we have already
   1565   // done this for some other load/store instr in this block.  If so, reuse the
   1566   // computation.
   1567   Value *&SunkAddr = SunkAddrs[Addr];
   1568   if (SunkAddr) {
   1569     DEBUG(dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode << " for "
   1570                  << *MemoryInst);
   1571     if (SunkAddr->getType() != Addr->getType())
   1572       SunkAddr = Builder.CreateBitCast(SunkAddr, Addr->getType());
   1573   } else {
   1574     DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for "
   1575                  << *MemoryInst);
   1576     Type *IntPtrTy =
   1577           TLI->getDataLayout()->getIntPtrType(AccessTy->getContext());
   1578 
   1579     Value *Result = 0;
   1580 
   1581     // Start with the base register. Do this first so that subsequent address
   1582     // matching finds it last, which will prevent it from trying to match it
   1583     // as the scaled value in case it happens to be a mul. That would be
   1584     // problematic if we've sunk a different mul for the scale, because then
   1585     // we'd end up sinking both muls.
   1586     if (AddrMode.BaseReg) {
   1587       Value *V = AddrMode.BaseReg;
   1588       if (V->getType()->isPointerTy())
   1589         V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr");
   1590       if (V->getType() != IntPtrTy)
   1591         V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr");
   1592       Result = V;
   1593     }
   1594 
   1595     // Add the scale value.
   1596     if (AddrMode.Scale) {
   1597       Value *V = AddrMode.ScaledReg;
   1598       if (V->getType() == IntPtrTy) {
   1599         // done.
   1600       } else if (V->getType()->isPointerTy()) {
   1601         V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr");
   1602       } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() <
   1603                  cast<IntegerType>(V->getType())->getBitWidth()) {
   1604         V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr");
   1605       } else {
   1606         V = Builder.CreateSExt(V, IntPtrTy, "sunkaddr");
   1607       }
   1608       if (AddrMode.Scale != 1)
   1609         V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale),
   1610                               "sunkaddr");
   1611       if (Result)
   1612         Result = Builder.CreateAdd(Result, V, "sunkaddr");
   1613       else
   1614         Result = V;
   1615     }
   1616 
   1617     // Add in the BaseGV if present.
   1618     if (AddrMode.BaseGV) {
   1619       Value *V = Builder.CreatePtrToInt(AddrMode.BaseGV, IntPtrTy, "sunkaddr");
   1620       if (Result)
   1621         Result = Builder.CreateAdd(Result, V, "sunkaddr");
   1622       else
   1623         Result = V;
   1624     }
   1625 
   1626     // Add in the Base Offset if present.
   1627     if (AddrMode.BaseOffs) {
   1628       Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs);
   1629       if (Result)
   1630         Result = Builder.CreateAdd(Result, V, "sunkaddr");
   1631       else
   1632         Result = V;
   1633     }
   1634 
   1635     if (Result == 0)
   1636       SunkAddr = Constant::getNullValue(Addr->getType());
   1637     else
   1638       SunkAddr = Builder.CreateIntToPtr(Result, Addr->getType(), "sunkaddr");
   1639   }
   1640 
   1641   MemoryInst->replaceUsesOfWith(Repl, SunkAddr);
   1642 
   1643   // If we have no uses, recursively delete the value and all dead instructions
   1644   // using it.
   1645   if (Repl->use_empty()) {
   1646     // This can cause recursive deletion, which can invalidate our iterator.
   1647     // Use a WeakVH to hold onto it in case this happens.
   1648     WeakVH IterHandle(CurInstIterator);
   1649     BasicBlock *BB = CurInstIterator->getParent();
   1650 
   1651     RecursivelyDeleteTriviallyDeadInstructions(Repl, TLInfo);
   1652 
   1653     if (IterHandle != CurInstIterator) {
   1654       // If the iterator instruction was recursively deleted, start over at the
   1655       // start of the block.
   1656       CurInstIterator = BB->begin();
   1657       SunkAddrs.clear();
   1658     }
   1659   }
   1660   ++NumMemoryInsts;
   1661   return true;
   1662 }
   1663 
   1664 /// OptimizeInlineAsmInst - If there are any memory operands, use
   1665 /// OptimizeMemoryInst to sink their address computing into the block when
   1666 /// possible / profitable.
   1667 bool CodeGenPrepare::OptimizeInlineAsmInst(CallInst *CS) {
   1668   bool MadeChange = false;
   1669 
   1670   TargetLowering::AsmOperandInfoVector
   1671     TargetConstraints = TLI->ParseConstraints(CS);
   1672   unsigned ArgNo = 0;
   1673   for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) {
   1674     TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i];
   1675 
   1676     // Compute the constraint code and ConstraintType to use.
   1677     TLI->ComputeConstraintToUse(OpInfo, SDValue());
   1678 
   1679     if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
   1680         OpInfo.isIndirect) {
   1681       Value *OpVal = CS->getArgOperand(ArgNo++);
   1682       MadeChange |= OptimizeMemoryInst(CS, OpVal, OpVal->getType());
   1683     } else if (OpInfo.Type == InlineAsm::isInput)
   1684       ArgNo++;
   1685   }
   1686 
   1687   return MadeChange;
   1688 }
   1689 
   1690 /// MoveExtToFormExtLoad - Move a zext or sext fed by a load into the same
   1691 /// basic block as the load, unless conditions are unfavorable. This allows
   1692 /// SelectionDAG to fold the extend into the load.
   1693 ///
   1694 bool CodeGenPrepare::MoveExtToFormExtLoad(Instruction *I) {
   1695   // Look for a load being extended.
   1696   LoadInst *LI = dyn_cast<LoadInst>(I->getOperand(0));
   1697   if (!LI) return false;
   1698 
   1699   // If they're already in the same block, there's nothing to do.
   1700   if (LI->getParent() == I->getParent())
   1701     return false;
   1702 
   1703   // If the load has other users and the truncate is not free, this probably
   1704   // isn't worthwhile.
   1705   if (!LI->hasOneUse() &&
   1706       TLI && (TLI->isTypeLegal(TLI->getValueType(LI->getType())) ||
   1707               !TLI->isTypeLegal(TLI->getValueType(I->getType()))) &&
   1708       !TLI->isTruncateFree(I->getType(), LI->getType()))
   1709     return false;
   1710 
   1711   // Check whether the target supports casts folded into loads.
   1712   unsigned LType;
   1713   if (isa<ZExtInst>(I))
   1714     LType = ISD::ZEXTLOAD;
   1715   else {
   1716     assert(isa<SExtInst>(I) && "Unexpected ext type!");
   1717     LType = ISD::SEXTLOAD;
   1718   }
   1719   if (TLI && !TLI->isLoadExtLegal(LType, TLI->getValueType(LI->getType())))
   1720     return false;
   1721 
   1722   // Move the extend into the same block as the load, so that SelectionDAG
   1723   // can fold it.
   1724   I->removeFromParent();
   1725   I->insertAfter(LI);
   1726   ++NumExtsMoved;
   1727   return true;
   1728 }
   1729 
   1730 bool CodeGenPrepare::OptimizeExtUses(Instruction *I) {
   1731   BasicBlock *DefBB = I->getParent();
   1732 
   1733   // If the result of a {s|z}ext and its source are both live out, rewrite all
   1734   // other uses of the source with result of extension.
   1735   Value *Src = I->getOperand(0);
   1736   if (Src->hasOneUse())
   1737     return false;
   1738 
   1739   // Only do this xform if truncating is free.
   1740   if (TLI && !TLI->isTruncateFree(I->getType(), Src->getType()))
   1741     return false;
   1742 
   1743   // Only safe to perform the optimization if the source is also defined in
   1744   // this block.
   1745   if (!isa<Instruction>(Src) || DefBB != cast<Instruction>(Src)->getParent())
   1746     return false;
   1747 
   1748   bool DefIsLiveOut = false;
   1749   for (Value::use_iterator UI = I->use_begin(), E = I->use_end();
   1750        UI != E; ++UI) {
   1751     Instruction *User = cast<Instruction>(*UI);
   1752 
   1753     // Figure out which BB this ext is used in.
   1754     BasicBlock *UserBB = User->getParent();
   1755     if (UserBB == DefBB) continue;
   1756     DefIsLiveOut = true;
   1757     break;
   1758   }
   1759   if (!DefIsLiveOut)
   1760     return false;
   1761 
   1762   // Make sure none of the uses are PHI nodes.
   1763   for (Value::use_iterator UI = Src->use_begin(), E = Src->use_end();
   1764        UI != E; ++UI) {
   1765     Instruction *User = cast<Instruction>(*UI);
   1766     BasicBlock *UserBB = User->getParent();
   1767     if (UserBB == DefBB) continue;
   1768     // Be conservative. We don't want this xform to end up introducing
   1769     // reloads just before load / store instructions.
   1770     if (isa<PHINode>(User) || isa<LoadInst>(User) || isa<StoreInst>(User))
   1771       return false;
   1772   }
   1773 
   1774   // InsertedTruncs - Only insert one trunc in each block once.
   1775   DenseMap<BasicBlock*, Instruction*> InsertedTruncs;
   1776 
   1777   bool MadeChange = false;
   1778   for (Value::use_iterator UI = Src->use_begin(), E = Src->use_end();
   1779        UI != E; ++UI) {
   1780     Use &TheUse = UI.getUse();
   1781     Instruction *User = cast<Instruction>(*UI);
   1782 
   1783     // Figure out which BB this ext is used in.
   1784     BasicBlock *UserBB = User->getParent();
   1785     if (UserBB == DefBB) continue;
   1786 
   1787     // Both src and def are live in this block. Rewrite the use.
   1788     Instruction *&InsertedTrunc = InsertedTruncs[UserBB];
   1789 
   1790     if (!InsertedTrunc) {
   1791       BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
   1792       InsertedTrunc = new TruncInst(I, Src->getType(), "", InsertPt);
   1793     }
   1794 
   1795     // Replace a use of the {s|z}ext source with a use of the result.
   1796     TheUse = InsertedTrunc;
   1797     ++NumExtUses;
   1798     MadeChange = true;
   1799   }
   1800 
   1801   return MadeChange;
   1802 }
   1803 
   1804 /// isFormingBranchFromSelectProfitable - Returns true if a SelectInst should be
   1805 /// turned into an explicit branch.
   1806 static bool isFormingBranchFromSelectProfitable(SelectInst *SI) {
   1807   // FIXME: This should use the same heuristics as IfConversion to determine
   1808   // whether a select is better represented as a branch.  This requires that
   1809   // branch probability metadata is preserved for the select, which is not the
   1810   // case currently.
   1811 
   1812   CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition());
   1813 
   1814   // If the branch is predicted right, an out of order CPU can avoid blocking on
   1815   // the compare.  Emit cmovs on compares with a memory operand as branches to
   1816   // avoid stalls on the load from memory.  If the compare has more than one use
   1817   // there's probably another cmov or setcc around so it's not worth emitting a
   1818   // branch.
   1819   if (!Cmp)
   1820     return false;
   1821 
   1822   Value *CmpOp0 = Cmp->getOperand(0);
   1823   Value *CmpOp1 = Cmp->getOperand(1);
   1824 
   1825   // We check that the memory operand has one use to avoid uses of the loaded
   1826   // value directly after the compare, making branches unprofitable.
   1827   return Cmp->hasOneUse() &&
   1828          ((isa<LoadInst>(CmpOp0) && CmpOp0->hasOneUse()) ||
   1829           (isa<LoadInst>(CmpOp1) && CmpOp1->hasOneUse()));
   1830 }
   1831 
   1832 
   1833 /// If we have a SelectInst that will likely profit from branch prediction,
   1834 /// turn it into a branch.
   1835 bool CodeGenPrepare::OptimizeSelectInst(SelectInst *SI) {
   1836   bool VectorCond = !SI->getCondition()->getType()->isIntegerTy(1);
   1837 
   1838   // Can we convert the 'select' to CF ?
   1839   if (DisableSelectToBranch || OptSize || !TLI || VectorCond)
   1840     return false;
   1841 
   1842   TargetLowering::SelectSupportKind SelectKind;
   1843   if (VectorCond)
   1844     SelectKind = TargetLowering::VectorMaskSelect;
   1845   else if (SI->getType()->isVectorTy())
   1846     SelectKind = TargetLowering::ScalarCondVectorVal;
   1847   else
   1848     SelectKind = TargetLowering::ScalarValSelect;
   1849 
   1850   // Do we have efficient codegen support for this kind of 'selects' ?
   1851   if (TLI->isSelectSupported(SelectKind)) {
   1852     // We have efficient codegen support for the select instruction.
   1853     // Check if it is profitable to keep this 'select'.
   1854     if (!TLI->isPredictableSelectExpensive() ||
   1855         !isFormingBranchFromSelectProfitable(SI))
   1856       return false;
   1857   }
   1858 
   1859   ModifiedDT = true;
   1860 
   1861   // First, we split the block containing the select into 2 blocks.
   1862   BasicBlock *StartBlock = SI->getParent();
   1863   BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(SI));
   1864   BasicBlock *NextBlock = StartBlock->splitBasicBlock(SplitPt, "select.end");
   1865 
   1866   // Create a new block serving as the landing pad for the branch.
   1867   BasicBlock *SmallBlock = BasicBlock::Create(SI->getContext(), "select.mid",
   1868                                              NextBlock->getParent(), NextBlock);
   1869 
   1870   // Move the unconditional branch from the block with the select in it into our
   1871   // landing pad block.
   1872   StartBlock->getTerminator()->eraseFromParent();
   1873   BranchInst::Create(NextBlock, SmallBlock);
   1874 
   1875   // Insert the real conditional branch based on the original condition.
   1876   BranchInst::Create(NextBlock, SmallBlock, SI->getCondition(), SI);
   1877 
   1878   // The select itself is replaced with a PHI Node.
   1879   PHINode *PN = PHINode::Create(SI->getType(), 2, "", NextBlock->begin());
   1880   PN->takeName(SI);
   1881   PN->addIncoming(SI->getTrueValue(), StartBlock);
   1882   PN->addIncoming(SI->getFalseValue(), SmallBlock);
   1883   SI->replaceAllUsesWith(PN);
   1884   SI->eraseFromParent();
   1885 
   1886   // Instruct OptimizeBlock to skip to the next block.
   1887   CurInstIterator = StartBlock->end();
   1888   ++NumSelectsExpanded;
   1889   return true;
   1890 }
   1891 
   1892 bool CodeGenPrepare::OptimizeInst(Instruction *I) {
   1893   if (PHINode *P = dyn_cast<PHINode>(I)) {
   1894     // It is possible for very late stage optimizations (such as SimplifyCFG)
   1895     // to introduce PHI nodes too late to be cleaned up.  If we detect such a
   1896     // trivial PHI, go ahead and zap it here.
   1897     if (Value *V = SimplifyInstruction(P)) {
   1898       P->replaceAllUsesWith(V);
   1899       P->eraseFromParent();
   1900       ++NumPHIsElim;
   1901       return true;
   1902     }
   1903     return false;
   1904   }
   1905 
   1906   if (CastInst *CI = dyn_cast<CastInst>(I)) {
   1907     // If the source of the cast is a constant, then this should have
   1908     // already been constant folded.  The only reason NOT to constant fold
   1909     // it is if something (e.g. LSR) was careful to place the constant
   1910     // evaluation in a block other than then one that uses it (e.g. to hoist
   1911     // the address of globals out of a loop).  If this is the case, we don't
   1912     // want to forward-subst the cast.
   1913     if (isa<Constant>(CI->getOperand(0)))
   1914       return false;
   1915 
   1916     if (TLI && OptimizeNoopCopyExpression(CI, *TLI))
   1917       return true;
   1918 
   1919     if (isa<ZExtInst>(I) || isa<SExtInst>(I)) {
   1920       bool MadeChange = MoveExtToFormExtLoad(I);
   1921       return MadeChange | OptimizeExtUses(I);
   1922     }
   1923     return false;
   1924   }
   1925 
   1926   if (CmpInst *CI = dyn_cast<CmpInst>(I))
   1927     return OptimizeCmpExpression(CI);
   1928 
   1929   if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
   1930     if (TLI)
   1931       return OptimizeMemoryInst(I, I->getOperand(0), LI->getType());
   1932     return false;
   1933   }
   1934 
   1935   if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
   1936     if (TLI)
   1937       return OptimizeMemoryInst(I, SI->getOperand(1),
   1938                                 SI->getOperand(0)->getType());
   1939     return false;
   1940   }
   1941 
   1942   if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) {
   1943     if (GEPI->hasAllZeroIndices()) {
   1944       /// The GEP operand must be a pointer, so must its result -> BitCast
   1945       Instruction *NC = new BitCastInst(GEPI->getOperand(0), GEPI->getType(),
   1946                                         GEPI->getName(), GEPI);
   1947       GEPI->replaceAllUsesWith(NC);
   1948       GEPI->eraseFromParent();
   1949       ++NumGEPsElim;
   1950       OptimizeInst(NC);
   1951       return true;
   1952     }
   1953     return false;
   1954   }
   1955 
   1956   if (CallInst *CI = dyn_cast<CallInst>(I))
   1957     return OptimizeCallInst(CI);
   1958 
   1959   if (SelectInst *SI = dyn_cast<SelectInst>(I))
   1960     return OptimizeSelectInst(SI);
   1961 
   1962   return false;
   1963 }
   1964 
   1965 // In this pass we look for GEP and cast instructions that are used
   1966 // across basic blocks and rewrite them to improve basic-block-at-a-time
   1967 // selection.
   1968 bool CodeGenPrepare::OptimizeBlock(BasicBlock &BB) {
   1969   SunkAddrs.clear();
   1970   bool MadeChange = false;
   1971 
   1972   CurInstIterator = BB.begin();
   1973   while (CurInstIterator != BB.end())
   1974     MadeChange |= OptimizeInst(CurInstIterator++);
   1975 
   1976   MadeChange |= DupRetToEnableTailCallOpts(&BB);
   1977 
   1978   return MadeChange;
   1979 }
   1980 
   1981 // llvm.dbg.value is far away from the value then iSel may not be able
   1982 // handle it properly. iSel will drop llvm.dbg.value if it can not
   1983 // find a node corresponding to the value.
   1984 bool CodeGenPrepare::PlaceDbgValues(Function &F) {
   1985   bool MadeChange = false;
   1986   for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I) {
   1987     Instruction *PrevNonDbgInst = NULL;
   1988     for (BasicBlock::iterator BI = I->begin(), BE = I->end(); BI != BE;) {
   1989       Instruction *Insn = BI; ++BI;
   1990       DbgValueInst *DVI = dyn_cast<DbgValueInst>(Insn);
   1991       if (!DVI) {
   1992         PrevNonDbgInst = Insn;
   1993         continue;
   1994       }
   1995 
   1996       Instruction *VI = dyn_cast_or_null<Instruction>(DVI->getValue());
   1997       if (VI && VI != PrevNonDbgInst && !VI->isTerminator()) {
   1998         DEBUG(dbgs() << "Moving Debug Value before :\n" << *DVI << ' ' << *VI);
   1999         DVI->removeFromParent();
   2000         if (isa<PHINode>(VI))
   2001           DVI->insertBefore(VI->getParent()->getFirstInsertionPt());
   2002         else
   2003           DVI->insertAfter(VI);
   2004         MadeChange = true;
   2005         ++NumDbgValueMoved;
   2006       }
   2007     }
   2008   }
   2009   return MadeChange;
   2010 }
   2011