Home | History | Annotate | Download | only in InstCombine
      1 //===- InstCombinePHI.cpp -------------------------------------------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file implements the visitPHINode function.
     11 //
     12 //===----------------------------------------------------------------------===//
     13 
     14 #include "InstCombine.h"
     15 #include "llvm/ADT/STLExtras.h"
     16 #include "llvm/ADT/SmallPtrSet.h"
     17 #include "llvm/Analysis/InstructionSimplify.h"
     18 #include "llvm/IR/DataLayout.h"
     19 using namespace llvm;
     20 
     21 #define DEBUG_TYPE "instcombine"
     22 
     23 /// FoldPHIArgBinOpIntoPHI - If we have something like phi [add (a,b), add(a,c)]
     24 /// and if a/b/c and the add's all have a single use, turn this into a phi
     25 /// and a single binop.
     26 Instruction *InstCombiner::FoldPHIArgBinOpIntoPHI(PHINode &PN) {
     27   Instruction *FirstInst = cast<Instruction>(PN.getIncomingValue(0));
     28   assert(isa<BinaryOperator>(FirstInst) || isa<CmpInst>(FirstInst));
     29   unsigned Opc = FirstInst->getOpcode();
     30   Value *LHSVal = FirstInst->getOperand(0);
     31   Value *RHSVal = FirstInst->getOperand(1);
     32 
     33   Type *LHSType = LHSVal->getType();
     34   Type *RHSType = RHSVal->getType();
     35 
     36   bool isNUW = false, isNSW = false, isExact = false;
     37   if (OverflowingBinaryOperator *BO =
     38         dyn_cast<OverflowingBinaryOperator>(FirstInst)) {
     39     isNUW = BO->hasNoUnsignedWrap();
     40     isNSW = BO->hasNoSignedWrap();
     41   } else if (PossiblyExactOperator *PEO =
     42                dyn_cast<PossiblyExactOperator>(FirstInst))
     43     isExact = PEO->isExact();
     44 
     45   // Scan to see if all operands are the same opcode, and all have one use.
     46   for (unsigned i = 1; i != PN.getNumIncomingValues(); ++i) {
     47     Instruction *I = dyn_cast<Instruction>(PN.getIncomingValue(i));
     48     if (!I || I->getOpcode() != Opc || !I->hasOneUse() ||
     49         // Verify type of the LHS matches so we don't fold cmp's of different
     50         // types.
     51         I->getOperand(0)->getType() != LHSType ||
     52         I->getOperand(1)->getType() != RHSType)
     53       return nullptr;
     54 
     55     // If they are CmpInst instructions, check their predicates
     56     if (CmpInst *CI = dyn_cast<CmpInst>(I))
     57       if (CI->getPredicate() != cast<CmpInst>(FirstInst)->getPredicate())
     58         return nullptr;
     59 
     60     if (isNUW)
     61       isNUW = cast<OverflowingBinaryOperator>(I)->hasNoUnsignedWrap();
     62     if (isNSW)
     63       isNSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
     64     if (isExact)
     65       isExact = cast<PossiblyExactOperator>(I)->isExact();
     66 
     67     // Keep track of which operand needs a phi node.
     68     if (I->getOperand(0) != LHSVal) LHSVal = nullptr;
     69     if (I->getOperand(1) != RHSVal) RHSVal = nullptr;
     70   }
     71 
     72   // If both LHS and RHS would need a PHI, don't do this transformation,
     73   // because it would increase the number of PHIs entering the block,
     74   // which leads to higher register pressure. This is especially
     75   // bad when the PHIs are in the header of a loop.
     76   if (!LHSVal && !RHSVal)
     77     return nullptr;
     78 
     79   // Otherwise, this is safe to transform!
     80 
     81   Value *InLHS = FirstInst->getOperand(0);
     82   Value *InRHS = FirstInst->getOperand(1);
     83   PHINode *NewLHS = nullptr, *NewRHS = nullptr;
     84   if (!LHSVal) {
     85     NewLHS = PHINode::Create(LHSType, PN.getNumIncomingValues(),
     86                              FirstInst->getOperand(0)->getName() + ".pn");
     87     NewLHS->addIncoming(InLHS, PN.getIncomingBlock(0));
     88     InsertNewInstBefore(NewLHS, PN);
     89     LHSVal = NewLHS;
     90   }
     91 
     92   if (!RHSVal) {
     93     NewRHS = PHINode::Create(RHSType, PN.getNumIncomingValues(),
     94                              FirstInst->getOperand(1)->getName() + ".pn");
     95     NewRHS->addIncoming(InRHS, PN.getIncomingBlock(0));
     96     InsertNewInstBefore(NewRHS, PN);
     97     RHSVal = NewRHS;
     98   }
     99 
    100   // Add all operands to the new PHIs.
    101   if (NewLHS || NewRHS) {
    102     for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
    103       Instruction *InInst = cast<Instruction>(PN.getIncomingValue(i));
    104       if (NewLHS) {
    105         Value *NewInLHS = InInst->getOperand(0);
    106         NewLHS->addIncoming(NewInLHS, PN.getIncomingBlock(i));
    107       }
    108       if (NewRHS) {
    109         Value *NewInRHS = InInst->getOperand(1);
    110         NewRHS->addIncoming(NewInRHS, PN.getIncomingBlock(i));
    111       }
    112     }
    113   }
    114 
    115   if (CmpInst *CIOp = dyn_cast<CmpInst>(FirstInst)) {
    116     CmpInst *NewCI = CmpInst::Create(CIOp->getOpcode(), CIOp->getPredicate(),
    117                                      LHSVal, RHSVal);
    118     NewCI->setDebugLoc(FirstInst->getDebugLoc());
    119     return NewCI;
    120   }
    121 
    122   BinaryOperator *BinOp = cast<BinaryOperator>(FirstInst);
    123   BinaryOperator *NewBinOp =
    124     BinaryOperator::Create(BinOp->getOpcode(), LHSVal, RHSVal);
    125   if (isNUW) NewBinOp->setHasNoUnsignedWrap();
    126   if (isNSW) NewBinOp->setHasNoSignedWrap();
    127   if (isExact) NewBinOp->setIsExact();
    128   NewBinOp->setDebugLoc(FirstInst->getDebugLoc());
    129   return NewBinOp;
    130 }
    131 
    132 Instruction *InstCombiner::FoldPHIArgGEPIntoPHI(PHINode &PN) {
    133   GetElementPtrInst *FirstInst =cast<GetElementPtrInst>(PN.getIncomingValue(0));
    134 
    135   SmallVector<Value*, 16> FixedOperands(FirstInst->op_begin(),
    136                                         FirstInst->op_end());
    137   // This is true if all GEP bases are allocas and if all indices into them are
    138   // constants.
    139   bool AllBasePointersAreAllocas = true;
    140 
    141   // We don't want to replace this phi if the replacement would require
    142   // more than one phi, which leads to higher register pressure. This is
    143   // especially bad when the PHIs are in the header of a loop.
    144   bool NeededPhi = false;
    145 
    146   bool AllInBounds = true;
    147 
    148   // Scan to see if all operands are the same opcode, and all have one use.
    149   for (unsigned i = 1; i != PN.getNumIncomingValues(); ++i) {
    150     GetElementPtrInst *GEP= dyn_cast<GetElementPtrInst>(PN.getIncomingValue(i));
    151     if (!GEP || !GEP->hasOneUse() || GEP->getType() != FirstInst->getType() ||
    152       GEP->getNumOperands() != FirstInst->getNumOperands())
    153       return nullptr;
    154 
    155     AllInBounds &= GEP->isInBounds();
    156 
    157     // Keep track of whether or not all GEPs are of alloca pointers.
    158     if (AllBasePointersAreAllocas &&
    159         (!isa<AllocaInst>(GEP->getOperand(0)) ||
    160          !GEP->hasAllConstantIndices()))
    161       AllBasePointersAreAllocas = false;
    162 
    163     // Compare the operand lists.
    164     for (unsigned op = 0, e = FirstInst->getNumOperands(); op != e; ++op) {
    165       if (FirstInst->getOperand(op) == GEP->getOperand(op))
    166         continue;
    167 
    168       // Don't merge two GEPs when two operands differ (introducing phi nodes)
    169       // if one of the PHIs has a constant for the index.  The index may be
    170       // substantially cheaper to compute for the constants, so making it a
    171       // variable index could pessimize the path.  This also handles the case
    172       // for struct indices, which must always be constant.
    173       if (isa<ConstantInt>(FirstInst->getOperand(op)) ||
    174           isa<ConstantInt>(GEP->getOperand(op)))
    175         return nullptr;
    176 
    177       if (FirstInst->getOperand(op)->getType() !=GEP->getOperand(op)->getType())
    178         return nullptr;
    179 
    180       // If we already needed a PHI for an earlier operand, and another operand
    181       // also requires a PHI, we'd be introducing more PHIs than we're
    182       // eliminating, which increases register pressure on entry to the PHI's
    183       // block.
    184       if (NeededPhi)
    185         return nullptr;
    186 
    187       FixedOperands[op] = nullptr;  // Needs a PHI.
    188       NeededPhi = true;
    189     }
    190   }
    191 
    192   // If all of the base pointers of the PHI'd GEPs are from allocas, don't
    193   // bother doing this transformation.  At best, this will just save a bit of
    194   // offset calculation, but all the predecessors will have to materialize the
    195   // stack address into a register anyway.  We'd actually rather *clone* the
    196   // load up into the predecessors so that we have a load of a gep of an alloca,
    197   // which can usually all be folded into the load.
    198   if (AllBasePointersAreAllocas)
    199     return nullptr;
    200 
    201   // Otherwise, this is safe to transform.  Insert PHI nodes for each operand
    202   // that is variable.
    203   SmallVector<PHINode*, 16> OperandPhis(FixedOperands.size());
    204 
    205   bool HasAnyPHIs = false;
    206   for (unsigned i = 0, e = FixedOperands.size(); i != e; ++i) {
    207     if (FixedOperands[i]) continue;  // operand doesn't need a phi.
    208     Value *FirstOp = FirstInst->getOperand(i);
    209     PHINode *NewPN = PHINode::Create(FirstOp->getType(), e,
    210                                      FirstOp->getName()+".pn");
    211     InsertNewInstBefore(NewPN, PN);
    212 
    213     NewPN->addIncoming(FirstOp, PN.getIncomingBlock(0));
    214     OperandPhis[i] = NewPN;
    215     FixedOperands[i] = NewPN;
    216     HasAnyPHIs = true;
    217   }
    218 
    219 
    220   // Add all operands to the new PHIs.
    221   if (HasAnyPHIs) {
    222     for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
    223       GetElementPtrInst *InGEP =cast<GetElementPtrInst>(PN.getIncomingValue(i));
    224       BasicBlock *InBB = PN.getIncomingBlock(i);
    225 
    226       for (unsigned op = 0, e = OperandPhis.size(); op != e; ++op)
    227         if (PHINode *OpPhi = OperandPhis[op])
    228           OpPhi->addIncoming(InGEP->getOperand(op), InBB);
    229     }
    230   }
    231 
    232   Value *Base = FixedOperands[0];
    233   GetElementPtrInst *NewGEP =
    234     GetElementPtrInst::Create(Base, makeArrayRef(FixedOperands).slice(1));
    235   if (AllInBounds) NewGEP->setIsInBounds();
    236   NewGEP->setDebugLoc(FirstInst->getDebugLoc());
    237   return NewGEP;
    238 }
    239 
    240 
    241 /// isSafeAndProfitableToSinkLoad - Return true if we know that it is safe to
    242 /// sink the load out of the block that defines it.  This means that it must be
    243 /// obvious the value of the load is not changed from the point of the load to
    244 /// the end of the block it is in.
    245 ///
    246 /// Finally, it is safe, but not profitable, to sink a load targeting a
    247 /// non-address-taken alloca.  Doing so will cause us to not promote the alloca
    248 /// to a register.
    249 static bool isSafeAndProfitableToSinkLoad(LoadInst *L) {
    250   BasicBlock::iterator BBI = L, E = L->getParent()->end();
    251 
    252   for (++BBI; BBI != E; ++BBI)
    253     if (BBI->mayWriteToMemory())
    254       return false;
    255 
    256   // Check for non-address taken alloca.  If not address-taken already, it isn't
    257   // profitable to do this xform.
    258   if (AllocaInst *AI = dyn_cast<AllocaInst>(L->getOperand(0))) {
    259     bool isAddressTaken = false;
    260     for (User *U : AI->users()) {
    261       if (isa<LoadInst>(U)) continue;
    262       if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
    263         // If storing TO the alloca, then the address isn't taken.
    264         if (SI->getOperand(1) == AI) continue;
    265       }
    266       isAddressTaken = true;
    267       break;
    268     }
    269 
    270     if (!isAddressTaken && AI->isStaticAlloca())
    271       return false;
    272   }
    273 
    274   // If this load is a load from a GEP with a constant offset from an alloca,
    275   // then we don't want to sink it.  In its present form, it will be
    276   // load [constant stack offset].  Sinking it will cause us to have to
    277   // materialize the stack addresses in each predecessor in a register only to
    278   // do a shared load from register in the successor.
    279   if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(L->getOperand(0)))
    280     if (AllocaInst *AI = dyn_cast<AllocaInst>(GEP->getOperand(0)))
    281       if (AI->isStaticAlloca() && GEP->hasAllConstantIndices())
    282         return false;
    283 
    284   return true;
    285 }
    286 
    287 Instruction *InstCombiner::FoldPHIArgLoadIntoPHI(PHINode &PN) {
    288   LoadInst *FirstLI = cast<LoadInst>(PN.getIncomingValue(0));
    289 
    290   // FIXME: This is overconservative; this transform is allowed in some cases
    291   // for atomic operations.
    292   if (FirstLI->isAtomic())
    293     return nullptr;
    294 
    295   // When processing loads, we need to propagate two bits of information to the
    296   // sunk load: whether it is volatile, and what its alignment is.  We currently
    297   // don't sink loads when some have their alignment specified and some don't.
    298   // visitLoadInst will propagate an alignment onto the load when TD is around,
    299   // and if TD isn't around, we can't handle the mixed case.
    300   bool isVolatile = FirstLI->isVolatile();
    301   unsigned LoadAlignment = FirstLI->getAlignment();
    302   unsigned LoadAddrSpace = FirstLI->getPointerAddressSpace();
    303 
    304   // We can't sink the load if the loaded value could be modified between the
    305   // load and the PHI.
    306   if (FirstLI->getParent() != PN.getIncomingBlock(0) ||
    307       !isSafeAndProfitableToSinkLoad(FirstLI))
    308     return nullptr;
    309 
    310   // If the PHI is of volatile loads and the load block has multiple
    311   // successors, sinking it would remove a load of the volatile value from
    312   // the path through the other successor.
    313   if (isVolatile &&
    314       FirstLI->getParent()->getTerminator()->getNumSuccessors() != 1)
    315     return nullptr;
    316 
    317   // Check to see if all arguments are the same operation.
    318   for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
    319     LoadInst *LI = dyn_cast<LoadInst>(PN.getIncomingValue(i));
    320     if (!LI || !LI->hasOneUse())
    321       return nullptr;
    322 
    323     // We can't sink the load if the loaded value could be modified between
    324     // the load and the PHI.
    325     if (LI->isVolatile() != isVolatile ||
    326         LI->getParent() != PN.getIncomingBlock(i) ||
    327         LI->getPointerAddressSpace() != LoadAddrSpace ||
    328         !isSafeAndProfitableToSinkLoad(LI))
    329       return nullptr;
    330 
    331     // If some of the loads have an alignment specified but not all of them,
    332     // we can't do the transformation.
    333     if ((LoadAlignment != 0) != (LI->getAlignment() != 0))
    334       return nullptr;
    335 
    336     LoadAlignment = std::min(LoadAlignment, LI->getAlignment());
    337 
    338     // If the PHI is of volatile loads and the load block has multiple
    339     // successors, sinking it would remove a load of the volatile value from
    340     // the path through the other successor.
    341     if (isVolatile &&
    342         LI->getParent()->getTerminator()->getNumSuccessors() != 1)
    343       return nullptr;
    344   }
    345 
    346   // Okay, they are all the same operation.  Create a new PHI node of the
    347   // correct type, and PHI together all of the LHS's of the instructions.
    348   PHINode *NewPN = PHINode::Create(FirstLI->getOperand(0)->getType(),
    349                                    PN.getNumIncomingValues(),
    350                                    PN.getName()+".in");
    351 
    352   Value *InVal = FirstLI->getOperand(0);
    353   NewPN->addIncoming(InVal, PN.getIncomingBlock(0));
    354 
    355   // Add all operands to the new PHI.
    356   for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
    357     Value *NewInVal = cast<LoadInst>(PN.getIncomingValue(i))->getOperand(0);
    358     if (NewInVal != InVal)
    359       InVal = nullptr;
    360     NewPN->addIncoming(NewInVal, PN.getIncomingBlock(i));
    361   }
    362 
    363   Value *PhiVal;
    364   if (InVal) {
    365     // The new PHI unions all of the same values together.  This is really
    366     // common, so we handle it intelligently here for compile-time speed.
    367     PhiVal = InVal;
    368     delete NewPN;
    369   } else {
    370     InsertNewInstBefore(NewPN, PN);
    371     PhiVal = NewPN;
    372   }
    373 
    374   // If this was a volatile load that we are merging, make sure to loop through
    375   // and mark all the input loads as non-volatile.  If we don't do this, we will
    376   // insert a new volatile load and the old ones will not be deletable.
    377   if (isVolatile)
    378     for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
    379       cast<LoadInst>(PN.getIncomingValue(i))->setVolatile(false);
    380 
    381   LoadInst *NewLI = new LoadInst(PhiVal, "", isVolatile, LoadAlignment);
    382   NewLI->setDebugLoc(FirstLI->getDebugLoc());
    383   return NewLI;
    384 }
    385 
    386 
    387 
    388 /// FoldPHIArgOpIntoPHI - If all operands to a PHI node are the same "unary"
    389 /// operator and they all are only used by the PHI, PHI together their
    390 /// inputs, and do the operation once, to the result of the PHI.
    391 Instruction *InstCombiner::FoldPHIArgOpIntoPHI(PHINode &PN) {
    392   Instruction *FirstInst = cast<Instruction>(PN.getIncomingValue(0));
    393 
    394   if (isa<GetElementPtrInst>(FirstInst))
    395     return FoldPHIArgGEPIntoPHI(PN);
    396   if (isa<LoadInst>(FirstInst))
    397     return FoldPHIArgLoadIntoPHI(PN);
    398 
    399   // Scan the instruction, looking for input operations that can be folded away.
    400   // If all input operands to the phi are the same instruction (e.g. a cast from
    401   // the same type or "+42") we can pull the operation through the PHI, reducing
    402   // code size and simplifying code.
    403   Constant *ConstantOp = nullptr;
    404   Type *CastSrcTy = nullptr;
    405   bool isNUW = false, isNSW = false, isExact = false;
    406 
    407   if (isa<CastInst>(FirstInst)) {
    408     CastSrcTy = FirstInst->getOperand(0)->getType();
    409 
    410     // Be careful about transforming integer PHIs.  We don't want to pessimize
    411     // the code by turning an i32 into an i1293.
    412     if (PN.getType()->isIntegerTy() && CastSrcTy->isIntegerTy()) {
    413       if (!ShouldChangeType(PN.getType(), CastSrcTy))
    414         return nullptr;
    415     }
    416   } else if (isa<BinaryOperator>(FirstInst) || isa<CmpInst>(FirstInst)) {
    417     // Can fold binop, compare or shift here if the RHS is a constant,
    418     // otherwise call FoldPHIArgBinOpIntoPHI.
    419     ConstantOp = dyn_cast<Constant>(FirstInst->getOperand(1));
    420     if (!ConstantOp)
    421       return FoldPHIArgBinOpIntoPHI(PN);
    422 
    423     if (OverflowingBinaryOperator *BO =
    424         dyn_cast<OverflowingBinaryOperator>(FirstInst)) {
    425       isNUW = BO->hasNoUnsignedWrap();
    426       isNSW = BO->hasNoSignedWrap();
    427     } else if (PossiblyExactOperator *PEO =
    428                dyn_cast<PossiblyExactOperator>(FirstInst))
    429       isExact = PEO->isExact();
    430   } else {
    431     return nullptr;  // Cannot fold this operation.
    432   }
    433 
    434   // Check to see if all arguments are the same operation.
    435   for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
    436     Instruction *I = dyn_cast<Instruction>(PN.getIncomingValue(i));
    437     if (!I || !I->hasOneUse() || !I->isSameOperationAs(FirstInst))
    438       return nullptr;
    439     if (CastSrcTy) {
    440       if (I->getOperand(0)->getType() != CastSrcTy)
    441         return nullptr;  // Cast operation must match.
    442     } else if (I->getOperand(1) != ConstantOp) {
    443       return nullptr;
    444     }
    445 
    446     if (isNUW)
    447       isNUW = cast<OverflowingBinaryOperator>(I)->hasNoUnsignedWrap();
    448     if (isNSW)
    449       isNSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
    450     if (isExact)
    451       isExact = cast<PossiblyExactOperator>(I)->isExact();
    452   }
    453 
    454   // Okay, they are all the same operation.  Create a new PHI node of the
    455   // correct type, and PHI together all of the LHS's of the instructions.
    456   PHINode *NewPN = PHINode::Create(FirstInst->getOperand(0)->getType(),
    457                                    PN.getNumIncomingValues(),
    458                                    PN.getName()+".in");
    459 
    460   Value *InVal = FirstInst->getOperand(0);
    461   NewPN->addIncoming(InVal, PN.getIncomingBlock(0));
    462 
    463   // Add all operands to the new PHI.
    464   for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
    465     Value *NewInVal = cast<Instruction>(PN.getIncomingValue(i))->getOperand(0);
    466     if (NewInVal != InVal)
    467       InVal = nullptr;
    468     NewPN->addIncoming(NewInVal, PN.getIncomingBlock(i));
    469   }
    470 
    471   Value *PhiVal;
    472   if (InVal) {
    473     // The new PHI unions all of the same values together.  This is really
    474     // common, so we handle it intelligently here for compile-time speed.
    475     PhiVal = InVal;
    476     delete NewPN;
    477   } else {
    478     InsertNewInstBefore(NewPN, PN);
    479     PhiVal = NewPN;
    480   }
    481 
    482   // Insert and return the new operation.
    483   if (CastInst *FirstCI = dyn_cast<CastInst>(FirstInst)) {
    484     CastInst *NewCI = CastInst::Create(FirstCI->getOpcode(), PhiVal,
    485                                        PN.getType());
    486     NewCI->setDebugLoc(FirstInst->getDebugLoc());
    487     return NewCI;
    488   }
    489 
    490   if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(FirstInst)) {
    491     BinOp = BinaryOperator::Create(BinOp->getOpcode(), PhiVal, ConstantOp);
    492     if (isNUW) BinOp->setHasNoUnsignedWrap();
    493     if (isNSW) BinOp->setHasNoSignedWrap();
    494     if (isExact) BinOp->setIsExact();
    495     BinOp->setDebugLoc(FirstInst->getDebugLoc());
    496     return BinOp;
    497   }
    498 
    499   CmpInst *CIOp = cast<CmpInst>(FirstInst);
    500   CmpInst *NewCI = CmpInst::Create(CIOp->getOpcode(), CIOp->getPredicate(),
    501                                    PhiVal, ConstantOp);
    502   NewCI->setDebugLoc(FirstInst->getDebugLoc());
    503   return NewCI;
    504 }
    505 
    506 /// DeadPHICycle - Return true if this PHI node is only used by a PHI node cycle
    507 /// that is dead.
    508 static bool DeadPHICycle(PHINode *PN,
    509                          SmallPtrSet<PHINode*, 16> &PotentiallyDeadPHIs) {
    510   if (PN->use_empty()) return true;
    511   if (!PN->hasOneUse()) return false;
    512 
    513   // Remember this node, and if we find the cycle, return.
    514   if (!PotentiallyDeadPHIs.insert(PN))
    515     return true;
    516 
    517   // Don't scan crazily complex things.
    518   if (PotentiallyDeadPHIs.size() == 16)
    519     return false;
    520 
    521   if (PHINode *PU = dyn_cast<PHINode>(PN->user_back()))
    522     return DeadPHICycle(PU, PotentiallyDeadPHIs);
    523 
    524   return false;
    525 }
    526 
    527 /// PHIsEqualValue - Return true if this phi node is always equal to
    528 /// NonPhiInVal.  This happens with mutually cyclic phi nodes like:
    529 ///   z = some value; x = phi (y, z); y = phi (x, z)
    530 static bool PHIsEqualValue(PHINode *PN, Value *NonPhiInVal,
    531                            SmallPtrSet<PHINode*, 16> &ValueEqualPHIs) {
    532   // See if we already saw this PHI node.
    533   if (!ValueEqualPHIs.insert(PN))
    534     return true;
    535 
    536   // Don't scan crazily complex things.
    537   if (ValueEqualPHIs.size() == 16)
    538     return false;
    539 
    540   // Scan the operands to see if they are either phi nodes or are equal to
    541   // the value.
    542   for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
    543     Value *Op = PN->getIncomingValue(i);
    544     if (PHINode *OpPN = dyn_cast<PHINode>(Op)) {
    545       if (!PHIsEqualValue(OpPN, NonPhiInVal, ValueEqualPHIs))
    546         return false;
    547     } else if (Op != NonPhiInVal)
    548       return false;
    549   }
    550 
    551   return true;
    552 }
    553 
    554 
    555 namespace {
    556 struct PHIUsageRecord {
    557   unsigned PHIId;     // The ID # of the PHI (something determinstic to sort on)
    558   unsigned Shift;     // The amount shifted.
    559   Instruction *Inst;  // The trunc instruction.
    560 
    561   PHIUsageRecord(unsigned pn, unsigned Sh, Instruction *User)
    562     : PHIId(pn), Shift(Sh), Inst(User) {}
    563 
    564   bool operator<(const PHIUsageRecord &RHS) const {
    565     if (PHIId < RHS.PHIId) return true;
    566     if (PHIId > RHS.PHIId) return false;
    567     if (Shift < RHS.Shift) return true;
    568     if (Shift > RHS.Shift) return false;
    569     return Inst->getType()->getPrimitiveSizeInBits() <
    570            RHS.Inst->getType()->getPrimitiveSizeInBits();
    571   }
    572 };
    573 
    574 struct LoweredPHIRecord {
    575   PHINode *PN;        // The PHI that was lowered.
    576   unsigned Shift;     // The amount shifted.
    577   unsigned Width;     // The width extracted.
    578 
    579   LoweredPHIRecord(PHINode *pn, unsigned Sh, Type *Ty)
    580     : PN(pn), Shift(Sh), Width(Ty->getPrimitiveSizeInBits()) {}
    581 
    582   // Ctor form used by DenseMap.
    583   LoweredPHIRecord(PHINode *pn, unsigned Sh)
    584     : PN(pn), Shift(Sh), Width(0) {}
    585 };
    586 }
    587 
    588 namespace llvm {
    589   template<>
    590   struct DenseMapInfo<LoweredPHIRecord> {
    591     static inline LoweredPHIRecord getEmptyKey() {
    592       return LoweredPHIRecord(nullptr, 0);
    593     }
    594     static inline LoweredPHIRecord getTombstoneKey() {
    595       return LoweredPHIRecord(nullptr, 1);
    596     }
    597     static unsigned getHashValue(const LoweredPHIRecord &Val) {
    598       return DenseMapInfo<PHINode*>::getHashValue(Val.PN) ^ (Val.Shift>>3) ^
    599              (Val.Width>>3);
    600     }
    601     static bool isEqual(const LoweredPHIRecord &LHS,
    602                         const LoweredPHIRecord &RHS) {
    603       return LHS.PN == RHS.PN && LHS.Shift == RHS.Shift &&
    604              LHS.Width == RHS.Width;
    605     }
    606   };
    607 }
    608 
    609 
    610 /// SliceUpIllegalIntegerPHI - This is an integer PHI and we know that it has an
    611 /// illegal type: see if it is only used by trunc or trunc(lshr) operations.  If
    612 /// so, we split the PHI into the various pieces being extracted.  This sort of
    613 /// thing is introduced when SROA promotes an aggregate to large integer values.
    614 ///
    615 /// TODO: The user of the trunc may be an bitcast to float/double/vector or an
    616 /// inttoptr.  We should produce new PHIs in the right type.
    617 ///
    618 Instruction *InstCombiner::SliceUpIllegalIntegerPHI(PHINode &FirstPhi) {
    619   // PHIUsers - Keep track of all of the truncated values extracted from a set
    620   // of PHIs, along with their offset.  These are the things we want to rewrite.
    621   SmallVector<PHIUsageRecord, 16> PHIUsers;
    622 
    623   // PHIs are often mutually cyclic, so we keep track of a whole set of PHI
    624   // nodes which are extracted from. PHIsToSlice is a set we use to avoid
    625   // revisiting PHIs, PHIsInspected is a ordered list of PHIs that we need to
    626   // check the uses of (to ensure they are all extracts).
    627   SmallVector<PHINode*, 8> PHIsToSlice;
    628   SmallPtrSet<PHINode*, 8> PHIsInspected;
    629 
    630   PHIsToSlice.push_back(&FirstPhi);
    631   PHIsInspected.insert(&FirstPhi);
    632 
    633   for (unsigned PHIId = 0; PHIId != PHIsToSlice.size(); ++PHIId) {
    634     PHINode *PN = PHIsToSlice[PHIId];
    635 
    636     // Scan the input list of the PHI.  If any input is an invoke, and if the
    637     // input is defined in the predecessor, then we won't be split the critical
    638     // edge which is required to insert a truncate.  Because of this, we have to
    639     // bail out.
    640     for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
    641       InvokeInst *II = dyn_cast<InvokeInst>(PN->getIncomingValue(i));
    642       if (!II) continue;
    643       if (II->getParent() != PN->getIncomingBlock(i))
    644         continue;
    645 
    646       // If we have a phi, and if it's directly in the predecessor, then we have
    647       // a critical edge where we need to put the truncate.  Since we can't
    648       // split the edge in instcombine, we have to bail out.
    649       return nullptr;
    650     }
    651 
    652     for (User *U : PN->users()) {
    653       Instruction *UserI = cast<Instruction>(U);
    654 
    655       // If the user is a PHI, inspect its uses recursively.
    656       if (PHINode *UserPN = dyn_cast<PHINode>(UserI)) {
    657         if (PHIsInspected.insert(UserPN))
    658           PHIsToSlice.push_back(UserPN);
    659         continue;
    660       }
    661 
    662       // Truncates are always ok.
    663       if (isa<TruncInst>(UserI)) {
    664         PHIUsers.push_back(PHIUsageRecord(PHIId, 0, UserI));
    665         continue;
    666       }
    667 
    668       // Otherwise it must be a lshr which can only be used by one trunc.
    669       if (UserI->getOpcode() != Instruction::LShr ||
    670           !UserI->hasOneUse() || !isa<TruncInst>(UserI->user_back()) ||
    671           !isa<ConstantInt>(UserI->getOperand(1)))
    672         return nullptr;
    673 
    674       unsigned Shift = cast<ConstantInt>(UserI->getOperand(1))->getZExtValue();
    675       PHIUsers.push_back(PHIUsageRecord(PHIId, Shift, UserI->user_back()));
    676     }
    677   }
    678 
    679   // If we have no users, they must be all self uses, just nuke the PHI.
    680   if (PHIUsers.empty())
    681     return ReplaceInstUsesWith(FirstPhi, UndefValue::get(FirstPhi.getType()));
    682 
    683   // If this phi node is transformable, create new PHIs for all the pieces
    684   // extracted out of it.  First, sort the users by their offset and size.
    685   array_pod_sort(PHIUsers.begin(), PHIUsers.end());
    686 
    687   DEBUG(dbgs() << "SLICING UP PHI: " << FirstPhi << '\n';
    688         for (unsigned i = 1, e = PHIsToSlice.size(); i != e; ++i)
    689           dbgs() << "AND USER PHI #" << i << ": " << *PHIsToSlice[i] << '\n';
    690     );
    691 
    692   // PredValues - This is a temporary used when rewriting PHI nodes.  It is
    693   // hoisted out here to avoid construction/destruction thrashing.
    694   DenseMap<BasicBlock*, Value*> PredValues;
    695 
    696   // ExtractedVals - Each new PHI we introduce is saved here so we don't
    697   // introduce redundant PHIs.
    698   DenseMap<LoweredPHIRecord, PHINode*> ExtractedVals;
    699 
    700   for (unsigned UserI = 0, UserE = PHIUsers.size(); UserI != UserE; ++UserI) {
    701     unsigned PHIId = PHIUsers[UserI].PHIId;
    702     PHINode *PN = PHIsToSlice[PHIId];
    703     unsigned Offset = PHIUsers[UserI].Shift;
    704     Type *Ty = PHIUsers[UserI].Inst->getType();
    705 
    706     PHINode *EltPHI;
    707 
    708     // If we've already lowered a user like this, reuse the previously lowered
    709     // value.
    710     if ((EltPHI = ExtractedVals[LoweredPHIRecord(PN, Offset, Ty)]) == nullptr) {
    711 
    712       // Otherwise, Create the new PHI node for this user.
    713       EltPHI = PHINode::Create(Ty, PN->getNumIncomingValues(),
    714                                PN->getName()+".off"+Twine(Offset), PN);
    715       assert(EltPHI->getType() != PN->getType() &&
    716              "Truncate didn't shrink phi?");
    717 
    718       for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
    719         BasicBlock *Pred = PN->getIncomingBlock(i);
    720         Value *&PredVal = PredValues[Pred];
    721 
    722         // If we already have a value for this predecessor, reuse it.
    723         if (PredVal) {
    724           EltPHI->addIncoming(PredVal, Pred);
    725           continue;
    726         }
    727 
    728         // Handle the PHI self-reuse case.
    729         Value *InVal = PN->getIncomingValue(i);
    730         if (InVal == PN) {
    731           PredVal = EltPHI;
    732           EltPHI->addIncoming(PredVal, Pred);
    733           continue;
    734         }
    735 
    736         if (PHINode *InPHI = dyn_cast<PHINode>(PN)) {
    737           // If the incoming value was a PHI, and if it was one of the PHIs we
    738           // already rewrote it, just use the lowered value.
    739           if (Value *Res = ExtractedVals[LoweredPHIRecord(InPHI, Offset, Ty)]) {
    740             PredVal = Res;
    741             EltPHI->addIncoming(PredVal, Pred);
    742             continue;
    743           }
    744         }
    745 
    746         // Otherwise, do an extract in the predecessor.
    747         Builder->SetInsertPoint(Pred, Pred->getTerminator());
    748         Value *Res = InVal;
    749         if (Offset)
    750           Res = Builder->CreateLShr(Res, ConstantInt::get(InVal->getType(),
    751                                                           Offset), "extract");
    752         Res = Builder->CreateTrunc(Res, Ty, "extract.t");
    753         PredVal = Res;
    754         EltPHI->addIncoming(Res, Pred);
    755 
    756         // If the incoming value was a PHI, and if it was one of the PHIs we are
    757         // rewriting, we will ultimately delete the code we inserted.  This
    758         // means we need to revisit that PHI to make sure we extract out the
    759         // needed piece.
    760         if (PHINode *OldInVal = dyn_cast<PHINode>(PN->getIncomingValue(i)))
    761           if (PHIsInspected.count(OldInVal)) {
    762             unsigned RefPHIId = std::find(PHIsToSlice.begin(),PHIsToSlice.end(),
    763                                           OldInVal)-PHIsToSlice.begin();
    764             PHIUsers.push_back(PHIUsageRecord(RefPHIId, Offset,
    765                                               cast<Instruction>(Res)));
    766             ++UserE;
    767           }
    768       }
    769       PredValues.clear();
    770 
    771       DEBUG(dbgs() << "  Made element PHI for offset " << Offset << ": "
    772                    << *EltPHI << '\n');
    773       ExtractedVals[LoweredPHIRecord(PN, Offset, Ty)] = EltPHI;
    774     }
    775 
    776     // Replace the use of this piece with the PHI node.
    777     ReplaceInstUsesWith(*PHIUsers[UserI].Inst, EltPHI);
    778   }
    779 
    780   // Replace all the remaining uses of the PHI nodes (self uses and the lshrs)
    781   // with undefs.
    782   Value *Undef = UndefValue::get(FirstPhi.getType());
    783   for (unsigned i = 1, e = PHIsToSlice.size(); i != e; ++i)
    784     ReplaceInstUsesWith(*PHIsToSlice[i], Undef);
    785   return ReplaceInstUsesWith(FirstPhi, Undef);
    786 }
    787 
    788 // PHINode simplification
    789 //
    790 Instruction *InstCombiner::visitPHINode(PHINode &PN) {
    791   if (Value *V = SimplifyInstruction(&PN, DL, TLI))
    792     return ReplaceInstUsesWith(PN, V);
    793 
    794   // If all PHI operands are the same operation, pull them through the PHI,
    795   // reducing code size.
    796   if (isa<Instruction>(PN.getIncomingValue(0)) &&
    797       isa<Instruction>(PN.getIncomingValue(1)) &&
    798       cast<Instruction>(PN.getIncomingValue(0))->getOpcode() ==
    799       cast<Instruction>(PN.getIncomingValue(1))->getOpcode() &&
    800       // FIXME: The hasOneUse check will fail for PHIs that use the value more
    801       // than themselves more than once.
    802       PN.getIncomingValue(0)->hasOneUse())
    803     if (Instruction *Result = FoldPHIArgOpIntoPHI(PN))
    804       return Result;
    805 
    806   // If this is a trivial cycle in the PHI node graph, remove it.  Basically, if
    807   // this PHI only has a single use (a PHI), and if that PHI only has one use (a
    808   // PHI)... break the cycle.
    809   if (PN.hasOneUse()) {
    810     Instruction *PHIUser = cast<Instruction>(PN.user_back());
    811     if (PHINode *PU = dyn_cast<PHINode>(PHIUser)) {
    812       SmallPtrSet<PHINode*, 16> PotentiallyDeadPHIs;
    813       PotentiallyDeadPHIs.insert(&PN);
    814       if (DeadPHICycle(PU, PotentiallyDeadPHIs))
    815         return ReplaceInstUsesWith(PN, UndefValue::get(PN.getType()));
    816     }
    817 
    818     // If this phi has a single use, and if that use just computes a value for
    819     // the next iteration of a loop, delete the phi.  This occurs with unused
    820     // induction variables, e.g. "for (int j = 0; ; ++j);".  Detecting this
    821     // common case here is good because the only other things that catch this
    822     // are induction variable analysis (sometimes) and ADCE, which is only run
    823     // late.
    824     if (PHIUser->hasOneUse() &&
    825         (isa<BinaryOperator>(PHIUser) || isa<GetElementPtrInst>(PHIUser)) &&
    826         PHIUser->user_back() == &PN) {
    827       return ReplaceInstUsesWith(PN, UndefValue::get(PN.getType()));
    828     }
    829   }
    830 
    831   // We sometimes end up with phi cycles that non-obviously end up being the
    832   // same value, for example:
    833   //   z = some value; x = phi (y, z); y = phi (x, z)
    834   // where the phi nodes don't necessarily need to be in the same block.  Do a
    835   // quick check to see if the PHI node only contains a single non-phi value, if
    836   // so, scan to see if the phi cycle is actually equal to that value.
    837   {
    838     unsigned InValNo = 0, NumIncomingVals = PN.getNumIncomingValues();
    839     // Scan for the first non-phi operand.
    840     while (InValNo != NumIncomingVals &&
    841            isa<PHINode>(PN.getIncomingValue(InValNo)))
    842       ++InValNo;
    843 
    844     if (InValNo != NumIncomingVals) {
    845       Value *NonPhiInVal = PN.getIncomingValue(InValNo);
    846 
    847       // Scan the rest of the operands to see if there are any conflicts, if so
    848       // there is no need to recursively scan other phis.
    849       for (++InValNo; InValNo != NumIncomingVals; ++InValNo) {
    850         Value *OpVal = PN.getIncomingValue(InValNo);
    851         if (OpVal != NonPhiInVal && !isa<PHINode>(OpVal))
    852           break;
    853       }
    854 
    855       // If we scanned over all operands, then we have one unique value plus
    856       // phi values.  Scan PHI nodes to see if they all merge in each other or
    857       // the value.
    858       if (InValNo == NumIncomingVals) {
    859         SmallPtrSet<PHINode*, 16> ValueEqualPHIs;
    860         if (PHIsEqualValue(&PN, NonPhiInVal, ValueEqualPHIs))
    861           return ReplaceInstUsesWith(PN, NonPhiInVal);
    862       }
    863     }
    864   }
    865 
    866   // If there are multiple PHIs, sort their operands so that they all list
    867   // the blocks in the same order. This will help identical PHIs be eliminated
    868   // by other passes. Other passes shouldn't depend on this for correctness
    869   // however.
    870   PHINode *FirstPN = cast<PHINode>(PN.getParent()->begin());
    871   if (&PN != FirstPN)
    872     for (unsigned i = 0, e = FirstPN->getNumIncomingValues(); i != e; ++i) {
    873       BasicBlock *BBA = PN.getIncomingBlock(i);
    874       BasicBlock *BBB = FirstPN->getIncomingBlock(i);
    875       if (BBA != BBB) {
    876         Value *VA = PN.getIncomingValue(i);
    877         unsigned j = PN.getBasicBlockIndex(BBB);
    878         Value *VB = PN.getIncomingValue(j);
    879         PN.setIncomingBlock(i, BBB);
    880         PN.setIncomingValue(i, VB);
    881         PN.setIncomingBlock(j, BBA);
    882         PN.setIncomingValue(j, VA);
    883         // NOTE: Instcombine normally would want us to "return &PN" if we
    884         // modified any of the operands of an instruction.  However, since we
    885         // aren't adding or removing uses (just rearranging them) we don't do
    886         // this in this case.
    887       }
    888     }
    889 
    890   // If this is an integer PHI and we know that it has an illegal type, see if
    891   // it is only used by trunc or trunc(lshr) operations.  If so, we split the
    892   // PHI into the various pieces being extracted.  This sort of thing is
    893   // introduced when SROA promotes an aggregate to a single large integer type.
    894   if (PN.getType()->isIntegerTy() && DL &&
    895       !DL->isLegalInteger(PN.getType()->getPrimitiveSizeInBits()))
    896     if (Instruction *Res = SliceUpIllegalIntegerPHI(PN))
    897       return Res;
    898 
    899   return nullptr;
    900 }
    901