Home | History | Annotate | Download | only in Utils
      1 //===-- Local.cpp - Functions to perform local transformations ------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This family of functions perform various local transformations to the
     11 // program.
     12 //
     13 //===----------------------------------------------------------------------===//
     14 
     15 #include "llvm/Transforms/Utils/Local.h"
     16 #include "llvm/ADT/DenseMap.h"
     17 #include "llvm/ADT/DenseSet.h"
     18 #include "llvm/ADT/Hashing.h"
     19 #include "llvm/ADT/STLExtras.h"
     20 #include "llvm/ADT/SetVector.h"
     21 #include "llvm/ADT/SmallPtrSet.h"
     22 #include "llvm/ADT/Statistic.h"
     23 #include "llvm/Analysis/EHPersonalities.h"
     24 #include "llvm/Analysis/InstructionSimplify.h"
     25 #include "llvm/Analysis/MemoryBuiltins.h"
     26 #include "llvm/Analysis/ValueTracking.h"
     27 #include "llvm/IR/CFG.h"
     28 #include "llvm/IR/Constants.h"
     29 #include "llvm/IR/DIBuilder.h"
     30 #include "llvm/IR/DataLayout.h"
     31 #include "llvm/IR/DebugInfo.h"
     32 #include "llvm/IR/DerivedTypes.h"
     33 #include "llvm/IR/Dominators.h"
     34 #include "llvm/IR/GetElementPtrTypeIterator.h"
     35 #include "llvm/IR/GlobalAlias.h"
     36 #include "llvm/IR/GlobalVariable.h"
     37 #include "llvm/IR/IRBuilder.h"
     38 #include "llvm/IR/Instructions.h"
     39 #include "llvm/IR/IntrinsicInst.h"
     40 #include "llvm/IR/Intrinsics.h"
     41 #include "llvm/IR/MDBuilder.h"
     42 #include "llvm/IR/Metadata.h"
     43 #include "llvm/IR/Operator.h"
     44 #include "llvm/IR/ValueHandle.h"
     45 #include "llvm/Support/Debug.h"
     46 #include "llvm/Support/MathExtras.h"
     47 #include "llvm/Support/raw_ostream.h"
     48 using namespace llvm;
     49 
     50 #define DEBUG_TYPE "local"
     51 
     52 STATISTIC(NumRemoved, "Number of unreachable basic blocks removed");
     53 
     54 //===----------------------------------------------------------------------===//
     55 //  Local constant propagation.
     56 //
     57 
     58 /// ConstantFoldTerminator - If a terminator instruction is predicated on a
     59 /// constant value, convert it into an unconditional branch to the constant
     60 /// destination.  This is a nontrivial operation because the successors of this
     61 /// basic block must have their PHI nodes updated.
     62 /// Also calls RecursivelyDeleteTriviallyDeadInstructions() on any branch/switch
     63 /// conditions and indirectbr addresses this might make dead if
     64 /// DeleteDeadConditions is true.
     65 bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions,
     66                                   const TargetLibraryInfo *TLI) {
     67   TerminatorInst *T = BB->getTerminator();
     68   IRBuilder<> Builder(T);
     69 
     70   // Branch - See if we are conditional jumping on constant
     71   if (BranchInst *BI = dyn_cast<BranchInst>(T)) {
     72     if (BI->isUnconditional()) return false;  // Can't optimize uncond branch
     73     BasicBlock *Dest1 = BI->getSuccessor(0);
     74     BasicBlock *Dest2 = BI->getSuccessor(1);
     75 
     76     if (ConstantInt *Cond = dyn_cast<ConstantInt>(BI->getCondition())) {
     77       // Are we branching on constant?
     78       // YES.  Change to unconditional branch...
     79       BasicBlock *Destination = Cond->getZExtValue() ? Dest1 : Dest2;
     80       BasicBlock *OldDest     = Cond->getZExtValue() ? Dest2 : Dest1;
     81 
     82       //cerr << "Function: " << T->getParent()->getParent()
     83       //     << "\nRemoving branch from " << T->getParent()
     84       //     << "\n\nTo: " << OldDest << endl;
     85 
     86       // Let the basic block know that we are letting go of it.  Based on this,
     87       // it will adjust it's PHI nodes.
     88       OldDest->removePredecessor(BB);
     89 
     90       // Replace the conditional branch with an unconditional one.
     91       Builder.CreateBr(Destination);
     92       BI->eraseFromParent();
     93       return true;
     94     }
     95 
     96     if (Dest2 == Dest1) {       // Conditional branch to same location?
     97       // This branch matches something like this:
     98       //     br bool %cond, label %Dest, label %Dest
     99       // and changes it into:  br label %Dest
    100 
    101       // Let the basic block know that we are letting go of one copy of it.
    102       assert(BI->getParent() && "Terminator not inserted in block!");
    103       Dest1->removePredecessor(BI->getParent());
    104 
    105       // Replace the conditional branch with an unconditional one.
    106       Builder.CreateBr(Dest1);
    107       Value *Cond = BI->getCondition();
    108       BI->eraseFromParent();
    109       if (DeleteDeadConditions)
    110         RecursivelyDeleteTriviallyDeadInstructions(Cond, TLI);
    111       return true;
    112     }
    113     return false;
    114   }
    115 
    116   if (SwitchInst *SI = dyn_cast<SwitchInst>(T)) {
    117     // If we are switching on a constant, we can convert the switch to an
    118     // unconditional branch.
    119     ConstantInt *CI = dyn_cast<ConstantInt>(SI->getCondition());
    120     BasicBlock *DefaultDest = SI->getDefaultDest();
    121     BasicBlock *TheOnlyDest = DefaultDest;
    122 
    123     // If the default is unreachable, ignore it when searching for TheOnlyDest.
    124     if (isa<UnreachableInst>(DefaultDest->getFirstNonPHIOrDbg()) &&
    125         SI->getNumCases() > 0) {
    126       TheOnlyDest = SI->case_begin().getCaseSuccessor();
    127     }
    128 
    129     // Figure out which case it goes to.
    130     for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end();
    131          i != e; ++i) {
    132       // Found case matching a constant operand?
    133       if (i.getCaseValue() == CI) {
    134         TheOnlyDest = i.getCaseSuccessor();
    135         break;
    136       }
    137 
    138       // Check to see if this branch is going to the same place as the default
    139       // dest.  If so, eliminate it as an explicit compare.
    140       if (i.getCaseSuccessor() == DefaultDest) {
    141         MDNode *MD = SI->getMetadata(LLVMContext::MD_prof);
    142         unsigned NCases = SI->getNumCases();
    143         // Fold the case metadata into the default if there will be any branches
    144         // left, unless the metadata doesn't match the switch.
    145         if (NCases > 1 && MD && MD->getNumOperands() == 2 + NCases) {
    146           // Collect branch weights into a vector.
    147           SmallVector<uint32_t, 8> Weights;
    148           for (unsigned MD_i = 1, MD_e = MD->getNumOperands(); MD_i < MD_e;
    149                ++MD_i) {
    150             ConstantInt *CI =
    151                 mdconst::dyn_extract<ConstantInt>(MD->getOperand(MD_i));
    152             assert(CI);
    153             Weights.push_back(CI->getValue().getZExtValue());
    154           }
    155           // Merge weight of this case to the default weight.
    156           unsigned idx = i.getCaseIndex();
    157           Weights[0] += Weights[idx+1];
    158           // Remove weight for this case.
    159           std::swap(Weights[idx+1], Weights.back());
    160           Weights.pop_back();
    161           SI->setMetadata(LLVMContext::MD_prof,
    162                           MDBuilder(BB->getContext()).
    163                           createBranchWeights(Weights));
    164         }
    165         // Remove this entry.
    166         DefaultDest->removePredecessor(SI->getParent());
    167         SI->removeCase(i);
    168         --i; --e;
    169         continue;
    170       }
    171 
    172       // Otherwise, check to see if the switch only branches to one destination.
    173       // We do this by reseting "TheOnlyDest" to null when we find two non-equal
    174       // destinations.
    175       if (i.getCaseSuccessor() != TheOnlyDest) TheOnlyDest = nullptr;
    176     }
    177 
    178     if (CI && !TheOnlyDest) {
    179       // Branching on a constant, but not any of the cases, go to the default
    180       // successor.
    181       TheOnlyDest = SI->getDefaultDest();
    182     }
    183 
    184     // If we found a single destination that we can fold the switch into, do so
    185     // now.
    186     if (TheOnlyDest) {
    187       // Insert the new branch.
    188       Builder.CreateBr(TheOnlyDest);
    189       BasicBlock *BB = SI->getParent();
    190 
    191       // Remove entries from PHI nodes which we no longer branch to...
    192       for (BasicBlock *Succ : SI->successors()) {
    193         // Found case matching a constant operand?
    194         if (Succ == TheOnlyDest)
    195           TheOnlyDest = nullptr; // Don't modify the first branch to TheOnlyDest
    196         else
    197           Succ->removePredecessor(BB);
    198       }
    199 
    200       // Delete the old switch.
    201       Value *Cond = SI->getCondition();
    202       SI->eraseFromParent();
    203       if (DeleteDeadConditions)
    204         RecursivelyDeleteTriviallyDeadInstructions(Cond, TLI);
    205       return true;
    206     }
    207 
    208     if (SI->getNumCases() == 1) {
    209       // Otherwise, we can fold this switch into a conditional branch
    210       // instruction if it has only one non-default destination.
    211       SwitchInst::CaseIt FirstCase = SI->case_begin();
    212       Value *Cond = Builder.CreateICmpEQ(SI->getCondition(),
    213           FirstCase.getCaseValue(), "cond");
    214 
    215       // Insert the new branch.
    216       BranchInst *NewBr = Builder.CreateCondBr(Cond,
    217                                                FirstCase.getCaseSuccessor(),
    218                                                SI->getDefaultDest());
    219       MDNode *MD = SI->getMetadata(LLVMContext::MD_prof);
    220       if (MD && MD->getNumOperands() == 3) {
    221         ConstantInt *SICase =
    222             mdconst::dyn_extract<ConstantInt>(MD->getOperand(2));
    223         ConstantInt *SIDef =
    224             mdconst::dyn_extract<ConstantInt>(MD->getOperand(1));
    225         assert(SICase && SIDef);
    226         // The TrueWeight should be the weight for the single case of SI.
    227         NewBr->setMetadata(LLVMContext::MD_prof,
    228                         MDBuilder(BB->getContext()).
    229                         createBranchWeights(SICase->getValue().getZExtValue(),
    230                                             SIDef->getValue().getZExtValue()));
    231       }
    232 
    233       // Update make.implicit metadata to the newly-created conditional branch.
    234       MDNode *MakeImplicitMD = SI->getMetadata(LLVMContext::MD_make_implicit);
    235       if (MakeImplicitMD)
    236         NewBr->setMetadata(LLVMContext::MD_make_implicit, MakeImplicitMD);
    237 
    238       // Delete the old switch.
    239       SI->eraseFromParent();
    240       return true;
    241     }
    242     return false;
    243   }
    244 
    245   if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(T)) {
    246     // indirectbr blockaddress(@F, @BB) -> br label @BB
    247     if (BlockAddress *BA =
    248           dyn_cast<BlockAddress>(IBI->getAddress()->stripPointerCasts())) {
    249       BasicBlock *TheOnlyDest = BA->getBasicBlock();
    250       // Insert the new branch.
    251       Builder.CreateBr(TheOnlyDest);
    252 
    253       for (unsigned i = 0, e = IBI->getNumDestinations(); i != e; ++i) {
    254         if (IBI->getDestination(i) == TheOnlyDest)
    255           TheOnlyDest = nullptr;
    256         else
    257           IBI->getDestination(i)->removePredecessor(IBI->getParent());
    258       }
    259       Value *Address = IBI->getAddress();
    260       IBI->eraseFromParent();
    261       if (DeleteDeadConditions)
    262         RecursivelyDeleteTriviallyDeadInstructions(Address, TLI);
    263 
    264       // If we didn't find our destination in the IBI successor list, then we
    265       // have undefined behavior.  Replace the unconditional branch with an
    266       // 'unreachable' instruction.
    267       if (TheOnlyDest) {
    268         BB->getTerminator()->eraseFromParent();
    269         new UnreachableInst(BB->getContext(), BB);
    270       }
    271 
    272       return true;
    273     }
    274   }
    275 
    276   return false;
    277 }
    278 
    279 
    280 //===----------------------------------------------------------------------===//
    281 //  Local dead code elimination.
    282 //
    283 
    284 /// isInstructionTriviallyDead - Return true if the result produced by the
    285 /// instruction is not used, and the instruction has no side effects.
    286 ///
    287 bool llvm::isInstructionTriviallyDead(Instruction *I,
    288                                       const TargetLibraryInfo *TLI) {
    289   if (!I->use_empty() || isa<TerminatorInst>(I)) return false;
    290 
    291   // We don't want the landingpad-like instructions removed by anything this
    292   // general.
    293   if (I->isEHPad())
    294     return false;
    295 
    296   // We don't want debug info removed by anything this general, unless
    297   // debug info is empty.
    298   if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(I)) {
    299     if (DDI->getAddress())
    300       return false;
    301     return true;
    302   }
    303   if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(I)) {
    304     if (DVI->getValue())
    305       return false;
    306     return true;
    307   }
    308 
    309   if (!I->mayHaveSideEffects()) return true;
    310 
    311   // Special case intrinsics that "may have side effects" but can be deleted
    312   // when dead.
    313   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
    314     // Safe to delete llvm.stacksave if dead.
    315     if (II->getIntrinsicID() == Intrinsic::stacksave)
    316       return true;
    317 
    318     // Lifetime intrinsics are dead when their right-hand is undef.
    319     if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
    320         II->getIntrinsicID() == Intrinsic::lifetime_end)
    321       return isa<UndefValue>(II->getArgOperand(1));
    322 
    323     // Assumptions are dead if their condition is trivially true.
    324     if (II->getIntrinsicID() == Intrinsic::assume) {
    325       if (ConstantInt *Cond = dyn_cast<ConstantInt>(II->getArgOperand(0)))
    326         return !Cond->isZero();
    327 
    328       return false;
    329     }
    330   }
    331 
    332   if (isAllocLikeFn(I, TLI)) return true;
    333 
    334   if (CallInst *CI = isFreeCall(I, TLI))
    335     if (Constant *C = dyn_cast<Constant>(CI->getArgOperand(0)))
    336       return C->isNullValue() || isa<UndefValue>(C);
    337 
    338   return false;
    339 }
    340 
    341 /// RecursivelyDeleteTriviallyDeadInstructions - If the specified value is a
    342 /// trivially dead instruction, delete it.  If that makes any of its operands
    343 /// trivially dead, delete them too, recursively.  Return true if any
    344 /// instructions were deleted.
    345 bool
    346 llvm::RecursivelyDeleteTriviallyDeadInstructions(Value *V,
    347                                                  const TargetLibraryInfo *TLI) {
    348   Instruction *I = dyn_cast<Instruction>(V);
    349   if (!I || !I->use_empty() || !isInstructionTriviallyDead(I, TLI))
    350     return false;
    351 
    352   SmallVector<Instruction*, 16> DeadInsts;
    353   DeadInsts.push_back(I);
    354 
    355   do {
    356     I = DeadInsts.pop_back_val();
    357 
    358     // Null out all of the instruction's operands to see if any operand becomes
    359     // dead as we go.
    360     for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
    361       Value *OpV = I->getOperand(i);
    362       I->setOperand(i, nullptr);
    363 
    364       if (!OpV->use_empty()) continue;
    365 
    366       // If the operand is an instruction that became dead as we nulled out the
    367       // operand, and if it is 'trivially' dead, delete it in a future loop
    368       // iteration.
    369       if (Instruction *OpI = dyn_cast<Instruction>(OpV))
    370         if (isInstructionTriviallyDead(OpI, TLI))
    371           DeadInsts.push_back(OpI);
    372     }
    373 
    374     I->eraseFromParent();
    375   } while (!DeadInsts.empty());
    376 
    377   return true;
    378 }
    379 
    380 /// areAllUsesEqual - Check whether the uses of a value are all the same.
    381 /// This is similar to Instruction::hasOneUse() except this will also return
    382 /// true when there are no uses or multiple uses that all refer to the same
    383 /// value.
    384 static bool areAllUsesEqual(Instruction *I) {
    385   Value::user_iterator UI = I->user_begin();
    386   Value::user_iterator UE = I->user_end();
    387   if (UI == UE)
    388     return true;
    389 
    390   User *TheUse = *UI;
    391   for (++UI; UI != UE; ++UI) {
    392     if (*UI != TheUse)
    393       return false;
    394   }
    395   return true;
    396 }
    397 
    398 /// RecursivelyDeleteDeadPHINode - If the specified value is an effectively
    399 /// dead PHI node, due to being a def-use chain of single-use nodes that
    400 /// either forms a cycle or is terminated by a trivially dead instruction,
    401 /// delete it.  If that makes any of its operands trivially dead, delete them
    402 /// too, recursively.  Return true if a change was made.
    403 bool llvm::RecursivelyDeleteDeadPHINode(PHINode *PN,
    404                                         const TargetLibraryInfo *TLI) {
    405   SmallPtrSet<Instruction*, 4> Visited;
    406   for (Instruction *I = PN; areAllUsesEqual(I) && !I->mayHaveSideEffects();
    407        I = cast<Instruction>(*I->user_begin())) {
    408     if (I->use_empty())
    409       return RecursivelyDeleteTriviallyDeadInstructions(I, TLI);
    410 
    411     // If we find an instruction more than once, we're on a cycle that
    412     // won't prove fruitful.
    413     if (!Visited.insert(I).second) {
    414       // Break the cycle and delete the instruction and its operands.
    415       I->replaceAllUsesWith(UndefValue::get(I->getType()));
    416       (void)RecursivelyDeleteTriviallyDeadInstructions(I, TLI);
    417       return true;
    418     }
    419   }
    420   return false;
    421 }
    422 
    423 static bool
    424 simplifyAndDCEInstruction(Instruction *I,
    425                           SmallSetVector<Instruction *, 16> &WorkList,
    426                           const DataLayout &DL,
    427                           const TargetLibraryInfo *TLI) {
    428   if (isInstructionTriviallyDead(I, TLI)) {
    429     // Null out all of the instruction's operands to see if any operand becomes
    430     // dead as we go.
    431     for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
    432       Value *OpV = I->getOperand(i);
    433       I->setOperand(i, nullptr);
    434 
    435       if (!OpV->use_empty() || I == OpV)
    436         continue;
    437 
    438       // If the operand is an instruction that became dead as we nulled out the
    439       // operand, and if it is 'trivially' dead, delete it in a future loop
    440       // iteration.
    441       if (Instruction *OpI = dyn_cast<Instruction>(OpV))
    442         if (isInstructionTriviallyDead(OpI, TLI))
    443           WorkList.insert(OpI);
    444     }
    445 
    446     I->eraseFromParent();
    447 
    448     return true;
    449   }
    450 
    451   if (Value *SimpleV = SimplifyInstruction(I, DL)) {
    452     // Add the users to the worklist. CAREFUL: an instruction can use itself,
    453     // in the case of a phi node.
    454     for (User *U : I->users())
    455       if (U != I)
    456         WorkList.insert(cast<Instruction>(U));
    457 
    458     // Replace the instruction with its simplified value.
    459     I->replaceAllUsesWith(SimpleV);
    460     I->eraseFromParent();
    461     return true;
    462   }
    463   return false;
    464 }
    465 
    466 /// SimplifyInstructionsInBlock - Scan the specified basic block and try to
    467 /// simplify any instructions in it and recursively delete dead instructions.
    468 ///
    469 /// This returns true if it changed the code, note that it can delete
    470 /// instructions in other blocks as well in this block.
    471 bool llvm::SimplifyInstructionsInBlock(BasicBlock *BB,
    472                                        const TargetLibraryInfo *TLI) {
    473   bool MadeChange = false;
    474   const DataLayout &DL = BB->getModule()->getDataLayout();
    475 
    476 #ifndef NDEBUG
    477   // In debug builds, ensure that the terminator of the block is never replaced
    478   // or deleted by these simplifications. The idea of simplification is that it
    479   // cannot introduce new instructions, and there is no way to replace the
    480   // terminator of a block without introducing a new instruction.
    481   AssertingVH<Instruction> TerminatorVH(&BB->back());
    482 #endif
    483 
    484   SmallSetVector<Instruction *, 16> WorkList;
    485   // Iterate over the original function, only adding insts to the worklist
    486   // if they actually need to be revisited. This avoids having to pre-init
    487   // the worklist with the entire function's worth of instructions.
    488   for (BasicBlock::iterator BI = BB->begin(), E = std::prev(BB->end()); BI != E;) {
    489     assert(!BI->isTerminator());
    490     Instruction *I = &*BI;
    491     ++BI;
    492 
    493     // We're visiting this instruction now, so make sure it's not in the
    494     // worklist from an earlier visit.
    495     if (!WorkList.count(I))
    496       MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI);
    497   }
    498 
    499   while (!WorkList.empty()) {
    500     Instruction *I = WorkList.pop_back_val();
    501     MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI);
    502   }
    503   return MadeChange;
    504 }
    505 
    506 //===----------------------------------------------------------------------===//
    507 //  Control Flow Graph Restructuring.
    508 //
    509 
    510 
    511 /// RemovePredecessorAndSimplify - Like BasicBlock::removePredecessor, this
    512 /// method is called when we're about to delete Pred as a predecessor of BB.  If
    513 /// BB contains any PHI nodes, this drops the entries in the PHI nodes for Pred.
    514 ///
    515 /// Unlike the removePredecessor method, this attempts to simplify uses of PHI
    516 /// nodes that collapse into identity values.  For example, if we have:
    517 ///   x = phi(1, 0, 0, 0)
    518 ///   y = and x, z
    519 ///
    520 /// .. and delete the predecessor corresponding to the '1', this will attempt to
    521 /// recursively fold the and to 0.
    522 void llvm::RemovePredecessorAndSimplify(BasicBlock *BB, BasicBlock *Pred) {
    523   // This only adjusts blocks with PHI nodes.
    524   if (!isa<PHINode>(BB->begin()))
    525     return;
    526 
    527   // Remove the entries for Pred from the PHI nodes in BB, but do not simplify
    528   // them down.  This will leave us with single entry phi nodes and other phis
    529   // that can be removed.
    530   BB->removePredecessor(Pred, true);
    531 
    532   WeakVH PhiIt = &BB->front();
    533   while (PHINode *PN = dyn_cast<PHINode>(PhiIt)) {
    534     PhiIt = &*++BasicBlock::iterator(cast<Instruction>(PhiIt));
    535     Value *OldPhiIt = PhiIt;
    536 
    537     if (!recursivelySimplifyInstruction(PN))
    538       continue;
    539 
    540     // If recursive simplification ended up deleting the next PHI node we would
    541     // iterate to, then our iterator is invalid, restart scanning from the top
    542     // of the block.
    543     if (PhiIt != OldPhiIt) PhiIt = &BB->front();
    544   }
    545 }
    546 
    547 
    548 /// MergeBasicBlockIntoOnlyPred - DestBB is a block with one predecessor and its
    549 /// predecessor is known to have one successor (DestBB!).  Eliminate the edge
    550 /// between them, moving the instructions in the predecessor into DestBB and
    551 /// deleting the predecessor block.
    552 ///
    553 void llvm::MergeBasicBlockIntoOnlyPred(BasicBlock *DestBB, DominatorTree *DT) {
    554   // If BB has single-entry PHI nodes, fold them.
    555   while (PHINode *PN = dyn_cast<PHINode>(DestBB->begin())) {
    556     Value *NewVal = PN->getIncomingValue(0);
    557     // Replace self referencing PHI with undef, it must be dead.
    558     if (NewVal == PN) NewVal = UndefValue::get(PN->getType());
    559     PN->replaceAllUsesWith(NewVal);
    560     PN->eraseFromParent();
    561   }
    562 
    563   BasicBlock *PredBB = DestBB->getSinglePredecessor();
    564   assert(PredBB && "Block doesn't have a single predecessor!");
    565 
    566   // Zap anything that took the address of DestBB.  Not doing this will give the
    567   // address an invalid value.
    568   if (DestBB->hasAddressTaken()) {
    569     BlockAddress *BA = BlockAddress::get(DestBB);
    570     Constant *Replacement =
    571       ConstantInt::get(llvm::Type::getInt32Ty(BA->getContext()), 1);
    572     BA->replaceAllUsesWith(ConstantExpr::getIntToPtr(Replacement,
    573                                                      BA->getType()));
    574     BA->destroyConstant();
    575   }
    576 
    577   // Anything that branched to PredBB now branches to DestBB.
    578   PredBB->replaceAllUsesWith(DestBB);
    579 
    580   // Splice all the instructions from PredBB to DestBB.
    581   PredBB->getTerminator()->eraseFromParent();
    582   DestBB->getInstList().splice(DestBB->begin(), PredBB->getInstList());
    583 
    584   // If the PredBB is the entry block of the function, move DestBB up to
    585   // become the entry block after we erase PredBB.
    586   if (PredBB == &DestBB->getParent()->getEntryBlock())
    587     DestBB->moveAfter(PredBB);
    588 
    589   if (DT) {
    590     BasicBlock *PredBBIDom = DT->getNode(PredBB)->getIDom()->getBlock();
    591     DT->changeImmediateDominator(DestBB, PredBBIDom);
    592     DT->eraseNode(PredBB);
    593   }
    594   // Nuke BB.
    595   PredBB->eraseFromParent();
    596 }
    597 
    598 /// CanMergeValues - Return true if we can choose one of these values to use
    599 /// in place of the other. Note that we will always choose the non-undef
    600 /// value to keep.
    601 static bool CanMergeValues(Value *First, Value *Second) {
    602   return First == Second || isa<UndefValue>(First) || isa<UndefValue>(Second);
    603 }
    604 
    605 /// CanPropagatePredecessorsForPHIs - Return true if we can fold BB, an
    606 /// almost-empty BB ending in an unconditional branch to Succ, into Succ.
    607 ///
    608 /// Assumption: Succ is the single successor for BB.
    609 ///
    610 static bool CanPropagatePredecessorsForPHIs(BasicBlock *BB, BasicBlock *Succ) {
    611   assert(*succ_begin(BB) == Succ && "Succ is not successor of BB!");
    612 
    613   DEBUG(dbgs() << "Looking to fold " << BB->getName() << " into "
    614         << Succ->getName() << "\n");
    615   // Shortcut, if there is only a single predecessor it must be BB and merging
    616   // is always safe
    617   if (Succ->getSinglePredecessor()) return true;
    618 
    619   // Make a list of the predecessors of BB
    620   SmallPtrSet<BasicBlock*, 16> BBPreds(pred_begin(BB), pred_end(BB));
    621 
    622   // Look at all the phi nodes in Succ, to see if they present a conflict when
    623   // merging these blocks
    624   for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) {
    625     PHINode *PN = cast<PHINode>(I);
    626 
    627     // If the incoming value from BB is again a PHINode in
    628     // BB which has the same incoming value for *PI as PN does, we can
    629     // merge the phi nodes and then the blocks can still be merged
    630     PHINode *BBPN = dyn_cast<PHINode>(PN->getIncomingValueForBlock(BB));
    631     if (BBPN && BBPN->getParent() == BB) {
    632       for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) {
    633         BasicBlock *IBB = PN->getIncomingBlock(PI);
    634         if (BBPreds.count(IBB) &&
    635             !CanMergeValues(BBPN->getIncomingValueForBlock(IBB),
    636                             PN->getIncomingValue(PI))) {
    637           DEBUG(dbgs() << "Can't fold, phi node " << PN->getName() << " in "
    638                 << Succ->getName() << " is conflicting with "
    639                 << BBPN->getName() << " with regard to common predecessor "
    640                 << IBB->getName() << "\n");
    641           return false;
    642         }
    643       }
    644     } else {
    645       Value* Val = PN->getIncomingValueForBlock(BB);
    646       for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) {
    647         // See if the incoming value for the common predecessor is equal to the
    648         // one for BB, in which case this phi node will not prevent the merging
    649         // of the block.
    650         BasicBlock *IBB = PN->getIncomingBlock(PI);
    651         if (BBPreds.count(IBB) &&
    652             !CanMergeValues(Val, PN->getIncomingValue(PI))) {
    653           DEBUG(dbgs() << "Can't fold, phi node " << PN->getName() << " in "
    654                 << Succ->getName() << " is conflicting with regard to common "
    655                 << "predecessor " << IBB->getName() << "\n");
    656           return false;
    657         }
    658       }
    659     }
    660   }
    661 
    662   return true;
    663 }
    664 
    665 typedef SmallVector<BasicBlock *, 16> PredBlockVector;
    666 typedef DenseMap<BasicBlock *, Value *> IncomingValueMap;
    667 
    668 /// \brief Determines the value to use as the phi node input for a block.
    669 ///
    670 /// Select between \p OldVal any value that we know flows from \p BB
    671 /// to a particular phi on the basis of which one (if either) is not
    672 /// undef. Update IncomingValues based on the selected value.
    673 ///
    674 /// \param OldVal The value we are considering selecting.
    675 /// \param BB The block that the value flows in from.
    676 /// \param IncomingValues A map from block-to-value for other phi inputs
    677 /// that we have examined.
    678 ///
    679 /// \returns the selected value.
    680 static Value *selectIncomingValueForBlock(Value *OldVal, BasicBlock *BB,
    681                                           IncomingValueMap &IncomingValues) {
    682   if (!isa<UndefValue>(OldVal)) {
    683     assert((!IncomingValues.count(BB) ||
    684             IncomingValues.find(BB)->second == OldVal) &&
    685            "Expected OldVal to match incoming value from BB!");
    686 
    687     IncomingValues.insert(std::make_pair(BB, OldVal));
    688     return OldVal;
    689   }
    690 
    691   IncomingValueMap::const_iterator It = IncomingValues.find(BB);
    692   if (It != IncomingValues.end()) return It->second;
    693 
    694   return OldVal;
    695 }
    696 
    697 /// \brief Create a map from block to value for the operands of a
    698 /// given phi.
    699 ///
    700 /// Create a map from block to value for each non-undef value flowing
    701 /// into \p PN.
    702 ///
    703 /// \param PN The phi we are collecting the map for.
    704 /// \param IncomingValues [out] The map from block to value for this phi.
    705 static void gatherIncomingValuesToPhi(PHINode *PN,
    706                                       IncomingValueMap &IncomingValues) {
    707   for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
    708     BasicBlock *BB = PN->getIncomingBlock(i);
    709     Value *V = PN->getIncomingValue(i);
    710 
    711     if (!isa<UndefValue>(V))
    712       IncomingValues.insert(std::make_pair(BB, V));
    713   }
    714 }
    715 
    716 /// \brief Replace the incoming undef values to a phi with the values
    717 /// from a block-to-value map.
    718 ///
    719 /// \param PN The phi we are replacing the undefs in.
    720 /// \param IncomingValues A map from block to value.
    721 static void replaceUndefValuesInPhi(PHINode *PN,
    722                                     const IncomingValueMap &IncomingValues) {
    723   for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
    724     Value *V = PN->getIncomingValue(i);
    725 
    726     if (!isa<UndefValue>(V)) continue;
    727 
    728     BasicBlock *BB = PN->getIncomingBlock(i);
    729     IncomingValueMap::const_iterator It = IncomingValues.find(BB);
    730     if (It == IncomingValues.end()) continue;
    731 
    732     PN->setIncomingValue(i, It->second);
    733   }
    734 }
    735 
    736 /// \brief Replace a value flowing from a block to a phi with
    737 /// potentially multiple instances of that value flowing from the
    738 /// block's predecessors to the phi.
    739 ///
    740 /// \param BB The block with the value flowing into the phi.
    741 /// \param BBPreds The predecessors of BB.
    742 /// \param PN The phi that we are updating.
    743 static void redirectValuesFromPredecessorsToPhi(BasicBlock *BB,
    744                                                 const PredBlockVector &BBPreds,
    745                                                 PHINode *PN) {
    746   Value *OldVal = PN->removeIncomingValue(BB, false);
    747   assert(OldVal && "No entry in PHI for Pred BB!");
    748 
    749   IncomingValueMap IncomingValues;
    750 
    751   // We are merging two blocks - BB, and the block containing PN - and
    752   // as a result we need to redirect edges from the predecessors of BB
    753   // to go to the block containing PN, and update PN
    754   // accordingly. Since we allow merging blocks in the case where the
    755   // predecessor and successor blocks both share some predecessors,
    756   // and where some of those common predecessors might have undef
    757   // values flowing into PN, we want to rewrite those values to be
    758   // consistent with the non-undef values.
    759 
    760   gatherIncomingValuesToPhi(PN, IncomingValues);
    761 
    762   // If this incoming value is one of the PHI nodes in BB, the new entries
    763   // in the PHI node are the entries from the old PHI.
    764   if (isa<PHINode>(OldVal) && cast<PHINode>(OldVal)->getParent() == BB) {
    765     PHINode *OldValPN = cast<PHINode>(OldVal);
    766     for (unsigned i = 0, e = OldValPN->getNumIncomingValues(); i != e; ++i) {
    767       // Note that, since we are merging phi nodes and BB and Succ might
    768       // have common predecessors, we could end up with a phi node with
    769       // identical incoming branches. This will be cleaned up later (and
    770       // will trigger asserts if we try to clean it up now, without also
    771       // simplifying the corresponding conditional branch).
    772       BasicBlock *PredBB = OldValPN->getIncomingBlock(i);
    773       Value *PredVal = OldValPN->getIncomingValue(i);
    774       Value *Selected = selectIncomingValueForBlock(PredVal, PredBB,
    775                                                     IncomingValues);
    776 
    777       // And add a new incoming value for this predecessor for the
    778       // newly retargeted branch.
    779       PN->addIncoming(Selected, PredBB);
    780     }
    781   } else {
    782     for (unsigned i = 0, e = BBPreds.size(); i != e; ++i) {
    783       // Update existing incoming values in PN for this
    784       // predecessor of BB.
    785       BasicBlock *PredBB = BBPreds[i];
    786       Value *Selected = selectIncomingValueForBlock(OldVal, PredBB,
    787                                                     IncomingValues);
    788 
    789       // And add a new incoming value for this predecessor for the
    790       // newly retargeted branch.
    791       PN->addIncoming(Selected, PredBB);
    792     }
    793   }
    794 
    795   replaceUndefValuesInPhi(PN, IncomingValues);
    796 }
    797 
    798 /// TryToSimplifyUncondBranchFromEmptyBlock - BB is known to contain an
    799 /// unconditional branch, and contains no instructions other than PHI nodes,
    800 /// potential side-effect free intrinsics and the branch.  If possible,
    801 /// eliminate BB by rewriting all the predecessors to branch to the successor
    802 /// block and return true.  If we can't transform, return false.
    803 bool llvm::TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock *BB) {
    804   assert(BB != &BB->getParent()->getEntryBlock() &&
    805          "TryToSimplifyUncondBranchFromEmptyBlock called on entry block!");
    806 
    807   // We can't eliminate infinite loops.
    808   BasicBlock *Succ = cast<BranchInst>(BB->getTerminator())->getSuccessor(0);
    809   if (BB == Succ) return false;
    810 
    811   // Check to see if merging these blocks would cause conflicts for any of the
    812   // phi nodes in BB or Succ. If not, we can safely merge.
    813   if (!CanPropagatePredecessorsForPHIs(BB, Succ)) return false;
    814 
    815   // Check for cases where Succ has multiple predecessors and a PHI node in BB
    816   // has uses which will not disappear when the PHI nodes are merged.  It is
    817   // possible to handle such cases, but difficult: it requires checking whether
    818   // BB dominates Succ, which is non-trivial to calculate in the case where
    819   // Succ has multiple predecessors.  Also, it requires checking whether
    820   // constructing the necessary self-referential PHI node doesn't introduce any
    821   // conflicts; this isn't too difficult, but the previous code for doing this
    822   // was incorrect.
    823   //
    824   // Note that if this check finds a live use, BB dominates Succ, so BB is
    825   // something like a loop pre-header (or rarely, a part of an irreducible CFG);
    826   // folding the branch isn't profitable in that case anyway.
    827   if (!Succ->getSinglePredecessor()) {
    828     BasicBlock::iterator BBI = BB->begin();
    829     while (isa<PHINode>(*BBI)) {
    830       for (Use &U : BBI->uses()) {
    831         if (PHINode* PN = dyn_cast<PHINode>(U.getUser())) {
    832           if (PN->getIncomingBlock(U) != BB)
    833             return false;
    834         } else {
    835           return false;
    836         }
    837       }
    838       ++BBI;
    839     }
    840   }
    841 
    842   DEBUG(dbgs() << "Killing Trivial BB: \n" << *BB);
    843 
    844   if (isa<PHINode>(Succ->begin())) {
    845     // If there is more than one pred of succ, and there are PHI nodes in
    846     // the successor, then we need to add incoming edges for the PHI nodes
    847     //
    848     const PredBlockVector BBPreds(pred_begin(BB), pred_end(BB));
    849 
    850     // Loop over all of the PHI nodes in the successor of BB.
    851     for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) {
    852       PHINode *PN = cast<PHINode>(I);
    853 
    854       redirectValuesFromPredecessorsToPhi(BB, BBPreds, PN);
    855     }
    856   }
    857 
    858   if (Succ->getSinglePredecessor()) {
    859     // BB is the only predecessor of Succ, so Succ will end up with exactly
    860     // the same predecessors BB had.
    861 
    862     // Copy over any phi, debug or lifetime instruction.
    863     BB->getTerminator()->eraseFromParent();
    864     Succ->getInstList().splice(Succ->getFirstNonPHI()->getIterator(),
    865                                BB->getInstList());
    866   } else {
    867     while (PHINode *PN = dyn_cast<PHINode>(&BB->front())) {
    868       // We explicitly check for such uses in CanPropagatePredecessorsForPHIs.
    869       assert(PN->use_empty() && "There shouldn't be any uses here!");
    870       PN->eraseFromParent();
    871     }
    872   }
    873 
    874   // Everything that jumped to BB now goes to Succ.
    875   BB->replaceAllUsesWith(Succ);
    876   if (!Succ->hasName()) Succ->takeName(BB);
    877   BB->eraseFromParent();              // Delete the old basic block.
    878   return true;
    879 }
    880 
    881 /// EliminateDuplicatePHINodes - Check for and eliminate duplicate PHI
    882 /// nodes in this block. This doesn't try to be clever about PHI nodes
    883 /// which differ only in the order of the incoming values, but instcombine
    884 /// orders them so it usually won't matter.
    885 ///
    886 bool llvm::EliminateDuplicatePHINodes(BasicBlock *BB) {
    887   // This implementation doesn't currently consider undef operands
    888   // specially. Theoretically, two phis which are identical except for
    889   // one having an undef where the other doesn't could be collapsed.
    890 
    891   struct PHIDenseMapInfo {
    892     static PHINode *getEmptyKey() {
    893       return DenseMapInfo<PHINode *>::getEmptyKey();
    894     }
    895     static PHINode *getTombstoneKey() {
    896       return DenseMapInfo<PHINode *>::getTombstoneKey();
    897     }
    898     static unsigned getHashValue(PHINode *PN) {
    899       // Compute a hash value on the operands. Instcombine will likely have
    900       // sorted them, which helps expose duplicates, but we have to check all
    901       // the operands to be safe in case instcombine hasn't run.
    902       return static_cast<unsigned>(hash_combine(
    903           hash_combine_range(PN->value_op_begin(), PN->value_op_end()),
    904           hash_combine_range(PN->block_begin(), PN->block_end())));
    905     }
    906     static bool isEqual(PHINode *LHS, PHINode *RHS) {
    907       if (LHS == getEmptyKey() || LHS == getTombstoneKey() ||
    908           RHS == getEmptyKey() || RHS == getTombstoneKey())
    909         return LHS == RHS;
    910       return LHS->isIdenticalTo(RHS);
    911     }
    912   };
    913 
    914   // Set of unique PHINodes.
    915   DenseSet<PHINode *, PHIDenseMapInfo> PHISet;
    916 
    917   // Examine each PHI.
    918   bool Changed = false;
    919   for (auto I = BB->begin(); PHINode *PN = dyn_cast<PHINode>(I++);) {
    920     auto Inserted = PHISet.insert(PN);
    921     if (!Inserted.second) {
    922       // A duplicate. Replace this PHI with its duplicate.
    923       PN->replaceAllUsesWith(*Inserted.first);
    924       PN->eraseFromParent();
    925       Changed = true;
    926 
    927       // The RAUW can change PHIs that we already visited. Start over from the
    928       // beginning.
    929       PHISet.clear();
    930       I = BB->begin();
    931     }
    932   }
    933 
    934   return Changed;
    935 }
    936 
    937 /// enforceKnownAlignment - If the specified pointer points to an object that
    938 /// we control, modify the object's alignment to PrefAlign. This isn't
    939 /// often possible though. If alignment is important, a more reliable approach
    940 /// is to simply align all global variables and allocation instructions to
    941 /// their preferred alignment from the beginning.
    942 ///
    943 static unsigned enforceKnownAlignment(Value *V, unsigned Align,
    944                                       unsigned PrefAlign,
    945                                       const DataLayout &DL) {
    946   V = V->stripPointerCasts();
    947 
    948   if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
    949     // If the preferred alignment is greater than the natural stack alignment
    950     // then don't round up. This avoids dynamic stack realignment.
    951     if (DL.exceedsNaturalStackAlignment(PrefAlign))
    952       return Align;
    953     // If there is a requested alignment and if this is an alloca, round up.
    954     if (AI->getAlignment() >= PrefAlign)
    955       return AI->getAlignment();
    956     AI->setAlignment(PrefAlign);
    957     return PrefAlign;
    958   }
    959 
    960   if (auto *GO = dyn_cast<GlobalObject>(V)) {
    961     // If there is a large requested alignment and we can, bump up the alignment
    962     // of the global.  If the memory we set aside for the global may not be the
    963     // memory used by the final program then it is impossible for us to reliably
    964     // enforce the preferred alignment.
    965     if (!GO->isStrongDefinitionForLinker())
    966       return Align;
    967 
    968     if (GO->getAlignment() >= PrefAlign)
    969       return GO->getAlignment();
    970     // We can only increase the alignment of the global if it has no alignment
    971     // specified or if it is not assigned a section.  If it is assigned a
    972     // section, the global could be densely packed with other objects in the
    973     // section, increasing the alignment could cause padding issues.
    974     if (!GO->hasSection() || GO->getAlignment() == 0)
    975       GO->setAlignment(PrefAlign);
    976     return GO->getAlignment();
    977   }
    978 
    979   return Align;
    980 }
    981 
    982 /// getOrEnforceKnownAlignment - If the specified pointer has an alignment that
    983 /// we can determine, return it, otherwise return 0.  If PrefAlign is specified,
    984 /// and it is more than the alignment of the ultimate object, see if we can
    985 /// increase the alignment of the ultimate object, making this check succeed.
    986 unsigned llvm::getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign,
    987                                           const DataLayout &DL,
    988                                           const Instruction *CxtI,
    989                                           AssumptionCache *AC,
    990                                           const DominatorTree *DT) {
    991   assert(V->getType()->isPointerTy() &&
    992          "getOrEnforceKnownAlignment expects a pointer!");
    993   unsigned BitWidth = DL.getPointerTypeSizeInBits(V->getType());
    994 
    995   APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
    996   computeKnownBits(V, KnownZero, KnownOne, DL, 0, AC, CxtI, DT);
    997   unsigned TrailZ = KnownZero.countTrailingOnes();
    998 
    999   // Avoid trouble with ridiculously large TrailZ values, such as
   1000   // those computed from a null pointer.
   1001   TrailZ = std::min(TrailZ, unsigned(sizeof(unsigned) * CHAR_BIT - 1));
   1002 
   1003   unsigned Align = 1u << std::min(BitWidth - 1, TrailZ);
   1004 
   1005   // LLVM doesn't support alignments larger than this currently.
   1006   Align = std::min(Align, +Value::MaximumAlignment);
   1007 
   1008   if (PrefAlign > Align)
   1009     Align = enforceKnownAlignment(V, Align, PrefAlign, DL);
   1010 
   1011   // We don't need to make any adjustment.
   1012   return Align;
   1013 }
   1014 
   1015 ///===---------------------------------------------------------------------===//
   1016 ///  Dbg Intrinsic utilities
   1017 ///
   1018 
   1019 /// See if there is a dbg.value intrinsic for DIVar before I.
   1020 static bool LdStHasDebugValue(const DILocalVariable *DIVar, Instruction *I) {
   1021   // Since we can't guarantee that the original dbg.declare instrinsic
   1022   // is removed by LowerDbgDeclare(), we need to make sure that we are
   1023   // not inserting the same dbg.value intrinsic over and over.
   1024   llvm::BasicBlock::InstListType::iterator PrevI(I);
   1025   if (PrevI != I->getParent()->getInstList().begin()) {
   1026     --PrevI;
   1027     if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(PrevI))
   1028       if (DVI->getValue() == I->getOperand(0) &&
   1029           DVI->getOffset() == 0 &&
   1030           DVI->getVariable() == DIVar)
   1031         return true;
   1032   }
   1033   return false;
   1034 }
   1035 
   1036 /// Inserts a llvm.dbg.value intrinsic before a store to an alloca'd value
   1037 /// that has an associated llvm.dbg.decl intrinsic.
   1038 bool llvm::ConvertDebugDeclareToDebugValue(DbgDeclareInst *DDI,
   1039                                            StoreInst *SI, DIBuilder &Builder) {
   1040   auto *DIVar = DDI->getVariable();
   1041   auto *DIExpr = DDI->getExpression();
   1042   assert(DIVar && "Missing variable");
   1043 
   1044   if (LdStHasDebugValue(DIVar, SI))
   1045     return true;
   1046 
   1047   // If an argument is zero extended then use argument directly. The ZExt
   1048   // may be zapped by an optimization pass in future.
   1049   Argument *ExtendedArg = nullptr;
   1050   if (ZExtInst *ZExt = dyn_cast<ZExtInst>(SI->getOperand(0)))
   1051     ExtendedArg = dyn_cast<Argument>(ZExt->getOperand(0));
   1052   if (SExtInst *SExt = dyn_cast<SExtInst>(SI->getOperand(0)))
   1053     ExtendedArg = dyn_cast<Argument>(SExt->getOperand(0));
   1054   if (ExtendedArg)
   1055     Builder.insertDbgValueIntrinsic(ExtendedArg, 0, DIVar, DIExpr,
   1056                                     DDI->getDebugLoc(), SI);
   1057   else
   1058     Builder.insertDbgValueIntrinsic(SI->getOperand(0), 0, DIVar, DIExpr,
   1059                                     DDI->getDebugLoc(), SI);
   1060   return true;
   1061 }
   1062 
   1063 /// Inserts a llvm.dbg.value intrinsic before a load of an alloca'd value
   1064 /// that has an associated llvm.dbg.decl intrinsic.
   1065 bool llvm::ConvertDebugDeclareToDebugValue(DbgDeclareInst *DDI,
   1066                                            LoadInst *LI, DIBuilder &Builder) {
   1067   auto *DIVar = DDI->getVariable();
   1068   auto *DIExpr = DDI->getExpression();
   1069   assert(DIVar && "Missing variable");
   1070 
   1071   if (LdStHasDebugValue(DIVar, LI))
   1072     return true;
   1073 
   1074   // We are now tracking the loaded value instead of the address. In the
   1075   // future if multi-location support is added to the IR, it might be
   1076   // preferable to keep tracking both the loaded value and the original
   1077   // address in case the alloca can not be elided.
   1078   Instruction *DbgValue = Builder.insertDbgValueIntrinsic(
   1079       LI, 0, DIVar, DIExpr, DDI->getDebugLoc(), (Instruction *)nullptr);
   1080   DbgValue->insertAfter(LI);
   1081   return true;
   1082 }
   1083 
   1084 /// Determine whether this alloca is either a VLA or an array.
   1085 static bool isArray(AllocaInst *AI) {
   1086   return AI->isArrayAllocation() ||
   1087     AI->getType()->getElementType()->isArrayTy();
   1088 }
   1089 
   1090 /// LowerDbgDeclare - Lowers llvm.dbg.declare intrinsics into appropriate set
   1091 /// of llvm.dbg.value intrinsics.
   1092 bool llvm::LowerDbgDeclare(Function &F) {
   1093   DIBuilder DIB(*F.getParent(), /*AllowUnresolved*/ false);
   1094   SmallVector<DbgDeclareInst *, 4> Dbgs;
   1095   for (auto &FI : F)
   1096     for (Instruction &BI : FI)
   1097       if (auto DDI = dyn_cast<DbgDeclareInst>(&BI))
   1098         Dbgs.push_back(DDI);
   1099 
   1100   if (Dbgs.empty())
   1101     return false;
   1102 
   1103   for (auto &I : Dbgs) {
   1104     DbgDeclareInst *DDI = I;
   1105     AllocaInst *AI = dyn_cast_or_null<AllocaInst>(DDI->getAddress());
   1106     // If this is an alloca for a scalar variable, insert a dbg.value
   1107     // at each load and store to the alloca and erase the dbg.declare.
   1108     // The dbg.values allow tracking a variable even if it is not
   1109     // stored on the stack, while the dbg.declare can only describe
   1110     // the stack slot (and at a lexical-scope granularity). Later
   1111     // passes will attempt to elide the stack slot.
   1112     if (AI && !isArray(AI)) {
   1113       for (User *U : AI->users())
   1114         if (StoreInst *SI = dyn_cast<StoreInst>(U))
   1115           ConvertDebugDeclareToDebugValue(DDI, SI, DIB);
   1116         else if (LoadInst *LI = dyn_cast<LoadInst>(U))
   1117           ConvertDebugDeclareToDebugValue(DDI, LI, DIB);
   1118         else if (CallInst *CI = dyn_cast<CallInst>(U)) {
   1119           // This is a call by-value or some other instruction that
   1120           // takes a pointer to the variable. Insert a *value*
   1121           // intrinsic that describes the alloca.
   1122           SmallVector<uint64_t, 1> NewDIExpr;
   1123           auto *DIExpr = DDI->getExpression();
   1124           NewDIExpr.push_back(dwarf::DW_OP_deref);
   1125           NewDIExpr.append(DIExpr->elements_begin(), DIExpr->elements_end());
   1126           DIB.insertDbgValueIntrinsic(AI, 0, DDI->getVariable(),
   1127                                       DIB.createExpression(NewDIExpr),
   1128                                       DDI->getDebugLoc(), CI);
   1129         }
   1130       DDI->eraseFromParent();
   1131     }
   1132   }
   1133   return true;
   1134 }
   1135 
   1136 /// FindAllocaDbgDeclare - Finds the llvm.dbg.declare intrinsic describing the
   1137 /// alloca 'V', if any.
   1138 DbgDeclareInst *llvm::FindAllocaDbgDeclare(Value *V) {
   1139   if (auto *L = LocalAsMetadata::getIfExists(V))
   1140     if (auto *MDV = MetadataAsValue::getIfExists(V->getContext(), L))
   1141       for (User *U : MDV->users())
   1142         if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(U))
   1143           return DDI;
   1144 
   1145   return nullptr;
   1146 }
   1147 
   1148 bool llvm::replaceDbgDeclare(Value *Address, Value *NewAddress,
   1149                              Instruction *InsertBefore, DIBuilder &Builder,
   1150                              bool Deref, int Offset) {
   1151   DbgDeclareInst *DDI = FindAllocaDbgDeclare(Address);
   1152   if (!DDI)
   1153     return false;
   1154   DebugLoc Loc = DDI->getDebugLoc();
   1155   auto *DIVar = DDI->getVariable();
   1156   auto *DIExpr = DDI->getExpression();
   1157   assert(DIVar && "Missing variable");
   1158 
   1159   if (Deref || Offset) {
   1160     // Create a copy of the original DIDescriptor for user variable, prepending
   1161     // "deref" operation to a list of address elements, as new llvm.dbg.declare
   1162     // will take a value storing address of the memory for variable, not
   1163     // alloca itself.
   1164     SmallVector<uint64_t, 4> NewDIExpr;
   1165     if (Deref)
   1166       NewDIExpr.push_back(dwarf::DW_OP_deref);
   1167     if (Offset > 0) {
   1168       NewDIExpr.push_back(dwarf::DW_OP_plus);
   1169       NewDIExpr.push_back(Offset);
   1170     } else if (Offset < 0) {
   1171       NewDIExpr.push_back(dwarf::DW_OP_minus);
   1172       NewDIExpr.push_back(-Offset);
   1173     }
   1174     if (DIExpr)
   1175       NewDIExpr.append(DIExpr->elements_begin(), DIExpr->elements_end());
   1176     DIExpr = Builder.createExpression(NewDIExpr);
   1177   }
   1178 
   1179   // Insert llvm.dbg.declare immediately after the original alloca, and remove
   1180   // old llvm.dbg.declare.
   1181   Builder.insertDeclare(NewAddress, DIVar, DIExpr, Loc, InsertBefore);
   1182   DDI->eraseFromParent();
   1183   return true;
   1184 }
   1185 
   1186 bool llvm::replaceDbgDeclareForAlloca(AllocaInst *AI, Value *NewAllocaAddress,
   1187                                       DIBuilder &Builder, bool Deref, int Offset) {
   1188   return replaceDbgDeclare(AI, NewAllocaAddress, AI->getNextNode(), Builder,
   1189                            Deref, Offset);
   1190 }
   1191 
   1192 void llvm::changeToUnreachable(Instruction *I, bool UseLLVMTrap) {
   1193   BasicBlock *BB = I->getParent();
   1194   // Loop over all of the successors, removing BB's entry from any PHI
   1195   // nodes.
   1196   for (succ_iterator SI = succ_begin(BB), SE = succ_end(BB); SI != SE; ++SI)
   1197     (*SI)->removePredecessor(BB);
   1198 
   1199   // Insert a call to llvm.trap right before this.  This turns the undefined
   1200   // behavior into a hard fail instead of falling through into random code.
   1201   if (UseLLVMTrap) {
   1202     Function *TrapFn =
   1203       Intrinsic::getDeclaration(BB->getParent()->getParent(), Intrinsic::trap);
   1204     CallInst *CallTrap = CallInst::Create(TrapFn, "", I);
   1205     CallTrap->setDebugLoc(I->getDebugLoc());
   1206   }
   1207   new UnreachableInst(I->getContext(), I);
   1208 
   1209   // All instructions after this are dead.
   1210   BasicBlock::iterator BBI = I->getIterator(), BBE = BB->end();
   1211   while (BBI != BBE) {
   1212     if (!BBI->use_empty())
   1213       BBI->replaceAllUsesWith(UndefValue::get(BBI->getType()));
   1214     BB->getInstList().erase(BBI++);
   1215   }
   1216 }
   1217 
   1218 /// changeToCall - Convert the specified invoke into a normal call.
   1219 static void changeToCall(InvokeInst *II) {
   1220   SmallVector<Value*, 8> Args(II->arg_begin(), II->arg_end());
   1221   SmallVector<OperandBundleDef, 1> OpBundles;
   1222   II->getOperandBundlesAsDefs(OpBundles);
   1223   CallInst *NewCall = CallInst::Create(II->getCalledValue(), Args, OpBundles,
   1224                                        "", II);
   1225   NewCall->takeName(II);
   1226   NewCall->setCallingConv(II->getCallingConv());
   1227   NewCall->setAttributes(II->getAttributes());
   1228   NewCall->setDebugLoc(II->getDebugLoc());
   1229   II->replaceAllUsesWith(NewCall);
   1230 
   1231   // Follow the call by a branch to the normal destination.
   1232   BranchInst::Create(II->getNormalDest(), II);
   1233 
   1234   // Update PHI nodes in the unwind destination
   1235   II->getUnwindDest()->removePredecessor(II->getParent());
   1236   II->eraseFromParent();
   1237 }
   1238 
   1239 static bool markAliveBlocks(Function &F,
   1240                             SmallPtrSetImpl<BasicBlock*> &Reachable) {
   1241 
   1242   SmallVector<BasicBlock*, 128> Worklist;
   1243   BasicBlock *BB = &F.front();
   1244   Worklist.push_back(BB);
   1245   Reachable.insert(BB);
   1246   bool Changed = false;
   1247   do {
   1248     BB = Worklist.pop_back_val();
   1249 
   1250     // Do a quick scan of the basic block, turning any obviously unreachable
   1251     // instructions into LLVM unreachable insts.  The instruction combining pass
   1252     // canonicalizes unreachable insts into stores to null or undef.
   1253     for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E;++BBI){
   1254       // Assumptions that are known to be false are equivalent to unreachable.
   1255       // Also, if the condition is undefined, then we make the choice most
   1256       // beneficial to the optimizer, and choose that to also be unreachable.
   1257       if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(BBI))
   1258         if (II->getIntrinsicID() == Intrinsic::assume) {
   1259           bool MakeUnreachable = false;
   1260           if (isa<UndefValue>(II->getArgOperand(0)))
   1261             MakeUnreachable = true;
   1262           else if (ConstantInt *Cond =
   1263                    dyn_cast<ConstantInt>(II->getArgOperand(0)))
   1264             MakeUnreachable = Cond->isZero();
   1265 
   1266           if (MakeUnreachable) {
   1267             // Don't insert a call to llvm.trap right before the unreachable.
   1268             changeToUnreachable(&*BBI, false);
   1269             Changed = true;
   1270             break;
   1271           }
   1272         }
   1273 
   1274       if (CallInst *CI = dyn_cast<CallInst>(BBI)) {
   1275         if (CI->doesNotReturn()) {
   1276           // If we found a call to a no-return function, insert an unreachable
   1277           // instruction after it.  Make sure there isn't *already* one there
   1278           // though.
   1279           ++BBI;
   1280           if (!isa<UnreachableInst>(BBI)) {
   1281             // Don't insert a call to llvm.trap right before the unreachable.
   1282             changeToUnreachable(&*BBI, false);
   1283             Changed = true;
   1284           }
   1285           break;
   1286         }
   1287       }
   1288 
   1289       // Store to undef and store to null are undefined and used to signal that
   1290       // they should be changed to unreachable by passes that can't modify the
   1291       // CFG.
   1292       if (StoreInst *SI = dyn_cast<StoreInst>(BBI)) {
   1293         // Don't touch volatile stores.
   1294         if (SI->isVolatile()) continue;
   1295 
   1296         Value *Ptr = SI->getOperand(1);
   1297 
   1298         if (isa<UndefValue>(Ptr) ||
   1299             (isa<ConstantPointerNull>(Ptr) &&
   1300              SI->getPointerAddressSpace() == 0)) {
   1301           changeToUnreachable(SI, true);
   1302           Changed = true;
   1303           break;
   1304         }
   1305       }
   1306     }
   1307 
   1308     // Turn invokes that call 'nounwind' functions into ordinary calls.
   1309     if (InvokeInst *II = dyn_cast<InvokeInst>(BB->getTerminator())) {
   1310       Value *Callee = II->getCalledValue();
   1311       if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) {
   1312         changeToUnreachable(II, true);
   1313         Changed = true;
   1314       } else if (II->doesNotThrow() && canSimplifyInvokeNoUnwind(&F)) {
   1315         if (II->use_empty() && II->onlyReadsMemory()) {
   1316           // jump to the normal destination branch.
   1317           BranchInst::Create(II->getNormalDest(), II);
   1318           II->getUnwindDest()->removePredecessor(II->getParent());
   1319           II->eraseFromParent();
   1320         } else
   1321           changeToCall(II);
   1322         Changed = true;
   1323       }
   1324     }
   1325 
   1326     Changed |= ConstantFoldTerminator(BB, true);
   1327     for (succ_iterator SI = succ_begin(BB), SE = succ_end(BB); SI != SE; ++SI)
   1328       if (Reachable.insert(*SI).second)
   1329         Worklist.push_back(*SI);
   1330   } while (!Worklist.empty());
   1331   return Changed;
   1332 }
   1333 
   1334 void llvm::removeUnwindEdge(BasicBlock *BB) {
   1335   TerminatorInst *TI = BB->getTerminator();
   1336 
   1337   if (auto *II = dyn_cast<InvokeInst>(TI)) {
   1338     changeToCall(II);
   1339     return;
   1340   }
   1341 
   1342   TerminatorInst *NewTI;
   1343   BasicBlock *UnwindDest;
   1344 
   1345   if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
   1346     NewTI = CleanupReturnInst::Create(CRI->getCleanupPad(), nullptr, CRI);
   1347     UnwindDest = CRI->getUnwindDest();
   1348   } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(TI)) {
   1349     auto *NewCatchSwitch = CatchSwitchInst::Create(
   1350         CatchSwitch->getParentPad(), nullptr, CatchSwitch->getNumHandlers(),
   1351         CatchSwitch->getName(), CatchSwitch);
   1352     for (BasicBlock *PadBB : CatchSwitch->handlers())
   1353       NewCatchSwitch->addHandler(PadBB);
   1354 
   1355     NewTI = NewCatchSwitch;
   1356     UnwindDest = CatchSwitch->getUnwindDest();
   1357   } else {
   1358     llvm_unreachable("Could not find unwind successor");
   1359   }
   1360 
   1361   NewTI->takeName(TI);
   1362   NewTI->setDebugLoc(TI->getDebugLoc());
   1363   UnwindDest->removePredecessor(BB);
   1364   TI->replaceAllUsesWith(NewTI);
   1365   TI->eraseFromParent();
   1366 }
   1367 
   1368 /// removeUnreachableBlocksFromFn - Remove blocks that are not reachable, even
   1369 /// if they are in a dead cycle.  Return true if a change was made, false
   1370 /// otherwise.
   1371 bool llvm::removeUnreachableBlocks(Function &F) {
   1372   SmallPtrSet<BasicBlock*, 128> Reachable;
   1373   bool Changed = markAliveBlocks(F, Reachable);
   1374 
   1375   // If there are unreachable blocks in the CFG...
   1376   if (Reachable.size() == F.size())
   1377     return Changed;
   1378 
   1379   assert(Reachable.size() < F.size());
   1380   NumRemoved += F.size()-Reachable.size();
   1381 
   1382   // Loop over all of the basic blocks that are not reachable, dropping all of
   1383   // their internal references...
   1384   for (Function::iterator BB = ++F.begin(), E = F.end(); BB != E; ++BB) {
   1385     if (Reachable.count(&*BB))
   1386       continue;
   1387 
   1388     for (succ_iterator SI = succ_begin(&*BB), SE = succ_end(&*BB); SI != SE;
   1389          ++SI)
   1390       if (Reachable.count(*SI))
   1391         (*SI)->removePredecessor(&*BB);
   1392     BB->dropAllReferences();
   1393   }
   1394 
   1395   for (Function::iterator I = ++F.begin(); I != F.end();)
   1396     if (!Reachable.count(&*I))
   1397       I = F.getBasicBlockList().erase(I);
   1398     else
   1399       ++I;
   1400 
   1401   return true;
   1402 }
   1403 
   1404 void llvm::combineMetadata(Instruction *K, const Instruction *J,
   1405                            ArrayRef<unsigned> KnownIDs) {
   1406   SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata;
   1407   K->dropUnknownNonDebugMetadata(KnownIDs);
   1408   K->getAllMetadataOtherThanDebugLoc(Metadata);
   1409   for (unsigned i = 0, n = Metadata.size(); i < n; ++i) {
   1410     unsigned Kind = Metadata[i].first;
   1411     MDNode *JMD = J->getMetadata(Kind);
   1412     MDNode *KMD = Metadata[i].second;
   1413 
   1414     switch (Kind) {
   1415       default:
   1416         K->setMetadata(Kind, nullptr); // Remove unknown metadata
   1417         break;
   1418       case LLVMContext::MD_dbg:
   1419         llvm_unreachable("getAllMetadataOtherThanDebugLoc returned a MD_dbg");
   1420       case LLVMContext::MD_tbaa:
   1421         K->setMetadata(Kind, MDNode::getMostGenericTBAA(JMD, KMD));
   1422         break;
   1423       case LLVMContext::MD_alias_scope:
   1424         K->setMetadata(Kind, MDNode::getMostGenericAliasScope(JMD, KMD));
   1425         break;
   1426       case LLVMContext::MD_noalias:
   1427         K->setMetadata(Kind, MDNode::intersect(JMD, KMD));
   1428         break;
   1429       case LLVMContext::MD_range:
   1430         K->setMetadata(Kind, MDNode::getMostGenericRange(JMD, KMD));
   1431         break;
   1432       case LLVMContext::MD_fpmath:
   1433         K->setMetadata(Kind, MDNode::getMostGenericFPMath(JMD, KMD));
   1434         break;
   1435       case LLVMContext::MD_invariant_load:
   1436         // Only set the !invariant.load if it is present in both instructions.
   1437         K->setMetadata(Kind, JMD);
   1438         break;
   1439       case LLVMContext::MD_nonnull:
   1440         // Only set the !nonnull if it is present in both instructions.
   1441         K->setMetadata(Kind, JMD);
   1442         break;
   1443       case LLVMContext::MD_invariant_group:
   1444         // Preserve !invariant.group in K.
   1445         break;
   1446       case LLVMContext::MD_align:
   1447         K->setMetadata(Kind,
   1448           MDNode::getMostGenericAlignmentOrDereferenceable(JMD, KMD));
   1449         break;
   1450       case LLVMContext::MD_dereferenceable:
   1451       case LLVMContext::MD_dereferenceable_or_null:
   1452         K->setMetadata(Kind,
   1453           MDNode::getMostGenericAlignmentOrDereferenceable(JMD, KMD));
   1454         break;
   1455     }
   1456   }
   1457   // Set !invariant.group from J if J has it. If both instructions have it
   1458   // then we will just pick it from J - even when they are different.
   1459   // Also make sure that K is load or store - f.e. combining bitcast with load
   1460   // could produce bitcast with invariant.group metadata, which is invalid.
   1461   // FIXME: we should try to preserve both invariant.group md if they are
   1462   // different, but right now instruction can only have one invariant.group.
   1463   if (auto *JMD = J->getMetadata(LLVMContext::MD_invariant_group))
   1464     if (isa<LoadInst>(K) || isa<StoreInst>(K))
   1465       K->setMetadata(LLVMContext::MD_invariant_group, JMD);
   1466 }
   1467 
   1468 unsigned llvm::replaceDominatedUsesWith(Value *From, Value *To,
   1469                                         DominatorTree &DT,
   1470                                         const BasicBlockEdge &Root) {
   1471   assert(From->getType() == To->getType());
   1472 
   1473   unsigned Count = 0;
   1474   for (Value::use_iterator UI = From->use_begin(), UE = From->use_end();
   1475        UI != UE; ) {
   1476     Use &U = *UI++;
   1477     if (DT.dominates(Root, U)) {
   1478       U.set(To);
   1479       DEBUG(dbgs() << "Replace dominated use of '"
   1480             << From->getName() << "' as "
   1481             << *To << " in " << *U << "\n");
   1482       ++Count;
   1483     }
   1484   }
   1485   return Count;
   1486 }
   1487 
   1488 unsigned llvm::replaceDominatedUsesWith(Value *From, Value *To,
   1489                                         DominatorTree &DT,
   1490                                         const BasicBlock *BB) {
   1491   assert(From->getType() == To->getType());
   1492 
   1493   unsigned Count = 0;
   1494   for (Value::use_iterator UI = From->use_begin(), UE = From->use_end();
   1495        UI != UE;) {
   1496     Use &U = *UI++;
   1497     auto *I = cast<Instruction>(U.getUser());
   1498     if (DT.dominates(BB, I->getParent())) {
   1499       U.set(To);
   1500       DEBUG(dbgs() << "Replace dominated use of '" << From->getName() << "' as "
   1501                    << *To << " in " << *U << "\n");
   1502       ++Count;
   1503     }
   1504   }
   1505   return Count;
   1506 }
   1507 
   1508 bool llvm::callsGCLeafFunction(ImmutableCallSite CS) {
   1509   if (isa<IntrinsicInst>(CS.getInstruction()))
   1510     // Most LLVM intrinsics are things which can never take a safepoint.
   1511     // As a result, we don't need to have the stack parsable at the
   1512     // callsite.  This is a highly useful optimization since intrinsic
   1513     // calls are fairly prevalent, particularly in debug builds.
   1514     return true;
   1515 
   1516   // Check if the function is specifically marked as a gc leaf function.
   1517   //
   1518   // TODO: we should be checking the attributes on the call site as well.
   1519   if (const Function *F = CS.getCalledFunction())
   1520     return F->hasFnAttribute("gc-leaf-function");
   1521 
   1522   return false;
   1523 }
   1524