Home | History | Annotate | Download | only in InstCombine
      1 //===- InstCombineCalls.cpp -----------------------------------------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file implements the visitCall and visitInvoke functions.
     11 //
     12 //===----------------------------------------------------------------------===//
     13 
     14 #include "InstCombineInternal.h"
     15 #include "llvm/ADT/Statistic.h"
     16 #include "llvm/Analysis/MemoryBuiltins.h"
     17 #include "llvm/IR/CallSite.h"
     18 #include "llvm/IR/Dominators.h"
     19 #include "llvm/IR/PatternMatch.h"
     20 #include "llvm/IR/Statepoint.h"
     21 #include "llvm/Transforms/Utils/BuildLibCalls.h"
     22 #include "llvm/Transforms/Utils/Local.h"
     23 #include "llvm/Transforms/Utils/SimplifyLibCalls.h"
     24 using namespace llvm;
     25 using namespace PatternMatch;
     26 
     27 #define DEBUG_TYPE "instcombine"
     28 
     29 STATISTIC(NumSimplified, "Number of library calls simplified");
     30 
     31 /// getPromotedType - Return the specified type promoted as it would be to pass
     32 /// though a va_arg area.
     33 static Type *getPromotedType(Type *Ty) {
     34   if (IntegerType* ITy = dyn_cast<IntegerType>(Ty)) {
     35     if (ITy->getBitWidth() < 32)
     36       return Type::getInt32Ty(Ty->getContext());
     37   }
     38   return Ty;
     39 }
     40 
     41 /// reduceToSingleValueType - Given an aggregate type which ultimately holds a
     42 /// single scalar element, like {{{type}}} or [1 x type], return type.
     43 static Type *reduceToSingleValueType(Type *T) {
     44   while (!T->isSingleValueType()) {
     45     if (StructType *STy = dyn_cast<StructType>(T)) {
     46       if (STy->getNumElements() == 1)
     47         T = STy->getElementType(0);
     48       else
     49         break;
     50     } else if (ArrayType *ATy = dyn_cast<ArrayType>(T)) {
     51       if (ATy->getNumElements() == 1)
     52         T = ATy->getElementType();
     53       else
     54         break;
     55     } else
     56       break;
     57   }
     58 
     59   return T;
     60 }
     61 
     62 Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
     63   unsigned DstAlign = getKnownAlignment(MI->getArgOperand(0), DL, MI, AC, DT);
     64   unsigned SrcAlign = getKnownAlignment(MI->getArgOperand(1), DL, MI, AC, DT);
     65   unsigned MinAlign = std::min(DstAlign, SrcAlign);
     66   unsigned CopyAlign = MI->getAlignment();
     67 
     68   if (CopyAlign < MinAlign) {
     69     MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),
     70                                              MinAlign, false));
     71     return MI;
     72   }
     73 
     74   // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with
     75   // load/store.
     76   ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getArgOperand(2));
     77   if (!MemOpLength) return nullptr;
     78 
     79   // Source and destination pointer types are always "i8*" for intrinsic.  See
     80   // if the size is something we can handle with a single primitive load/store.
     81   // A single load+store correctly handles overlapping memory in the memmove
     82   // case.
     83   uint64_t Size = MemOpLength->getLimitedValue();
     84   assert(Size && "0-sized memory transferring should be removed already.");
     85 
     86   if (Size > 8 || (Size&(Size-1)))
     87     return nullptr;  // If not 1/2/4/8 bytes, exit.
     88 
     89   // Use an integer load+store unless we can find something better.
     90   unsigned SrcAddrSp =
     91     cast<PointerType>(MI->getArgOperand(1)->getType())->getAddressSpace();
     92   unsigned DstAddrSp =
     93     cast<PointerType>(MI->getArgOperand(0)->getType())->getAddressSpace();
     94 
     95   IntegerType* IntType = IntegerType::get(MI->getContext(), Size<<3);
     96   Type *NewSrcPtrTy = PointerType::get(IntType, SrcAddrSp);
     97   Type *NewDstPtrTy = PointerType::get(IntType, DstAddrSp);
     98 
     99   // Memcpy forces the use of i8* for the source and destination.  That means
    100   // that if you're using memcpy to move one double around, you'll get a cast
    101   // from double* to i8*.  We'd much rather use a double load+store rather than
    102   // an i64 load+store, here because this improves the odds that the source or
    103   // dest address will be promotable.  See if we can find a better type than the
    104   // integer datatype.
    105   Value *StrippedDest = MI->getArgOperand(0)->stripPointerCasts();
    106   MDNode *CopyMD = nullptr;
    107   if (StrippedDest != MI->getArgOperand(0)) {
    108     Type *SrcETy = cast<PointerType>(StrippedDest->getType())
    109                                     ->getElementType();
    110     if (SrcETy->isSized() && DL.getTypeStoreSize(SrcETy) == Size) {
    111       // The SrcETy might be something like {{{double}}} or [1 x double].  Rip
    112       // down through these levels if so.
    113       SrcETy = reduceToSingleValueType(SrcETy);
    114 
    115       if (SrcETy->isSingleValueType()) {
    116         NewSrcPtrTy = PointerType::get(SrcETy, SrcAddrSp);
    117         NewDstPtrTy = PointerType::get(SrcETy, DstAddrSp);
    118 
    119         // If the memcpy has metadata describing the members, see if we can
    120         // get the TBAA tag describing our copy.
    121         if (MDNode *M = MI->getMetadata(LLVMContext::MD_tbaa_struct)) {
    122           if (M->getNumOperands() == 3 && M->getOperand(0) &&
    123               mdconst::hasa<ConstantInt>(M->getOperand(0)) &&
    124               mdconst::extract<ConstantInt>(M->getOperand(0))->isNullValue() &&
    125               M->getOperand(1) &&
    126               mdconst::hasa<ConstantInt>(M->getOperand(1)) &&
    127               mdconst::extract<ConstantInt>(M->getOperand(1))->getValue() ==
    128                   Size &&
    129               M->getOperand(2) && isa<MDNode>(M->getOperand(2)))
    130             CopyMD = cast<MDNode>(M->getOperand(2));
    131         }
    132       }
    133     }
    134   }
    135 
    136   // If the memcpy/memmove provides better alignment info than we can
    137   // infer, use it.
    138   SrcAlign = std::max(SrcAlign, CopyAlign);
    139   DstAlign = std::max(DstAlign, CopyAlign);
    140 
    141   Value *Src = Builder->CreateBitCast(MI->getArgOperand(1), NewSrcPtrTy);
    142   Value *Dest = Builder->CreateBitCast(MI->getArgOperand(0), NewDstPtrTy);
    143   LoadInst *L = Builder->CreateLoad(Src, MI->isVolatile());
    144   L->setAlignment(SrcAlign);
    145   if (CopyMD)
    146     L->setMetadata(LLVMContext::MD_tbaa, CopyMD);
    147   StoreInst *S = Builder->CreateStore(L, Dest, MI->isVolatile());
    148   S->setAlignment(DstAlign);
    149   if (CopyMD)
    150     S->setMetadata(LLVMContext::MD_tbaa, CopyMD);
    151 
    152   // Set the size of the copy to 0, it will be deleted on the next iteration.
    153   MI->setArgOperand(2, Constant::getNullValue(MemOpLength->getType()));
    154   return MI;
    155 }
    156 
    157 Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
    158   unsigned Alignment = getKnownAlignment(MI->getDest(), DL, MI, AC, DT);
    159   if (MI->getAlignment() < Alignment) {
    160     MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),
    161                                              Alignment, false));
    162     return MI;
    163   }
    164 
    165   // Extract the length and alignment and fill if they are constant.
    166   ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength());
    167   ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue());
    168   if (!LenC || !FillC || !FillC->getType()->isIntegerTy(8))
    169     return nullptr;
    170   uint64_t Len = LenC->getLimitedValue();
    171   Alignment = MI->getAlignment();
    172   assert(Len && "0-sized memory setting should be removed already.");
    173 
    174   // memset(s,c,n) -> store s, c (for n=1,2,4,8)
    175   if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) {
    176     Type *ITy = IntegerType::get(MI->getContext(), Len*8);  // n=1 -> i8.
    177 
    178     Value *Dest = MI->getDest();
    179     unsigned DstAddrSp = cast<PointerType>(Dest->getType())->getAddressSpace();
    180     Type *NewDstPtrTy = PointerType::get(ITy, DstAddrSp);
    181     Dest = Builder->CreateBitCast(Dest, NewDstPtrTy);
    182 
    183     // Alignment 0 is identity for alignment 1 for memset, but not store.
    184     if (Alignment == 0) Alignment = 1;
    185 
    186     // Extract the fill value and store.
    187     uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL;
    188     StoreInst *S = Builder->CreateStore(ConstantInt::get(ITy, Fill), Dest,
    189                                         MI->isVolatile());
    190     S->setAlignment(Alignment);
    191 
    192     // Set the size of the copy to 0, it will be deleted on the next iteration.
    193     MI->setLength(Constant::getNullValue(LenC->getType()));
    194     return MI;
    195   }
    196 
    197   return nullptr;
    198 }
    199 
    200 static Value *SimplifyX86insertps(const IntrinsicInst &II,
    201                                   InstCombiner::BuilderTy &Builder) {
    202   if (auto *CInt = dyn_cast<ConstantInt>(II.getArgOperand(2))) {
    203     VectorType *VecTy = cast<VectorType>(II.getType());
    204     ConstantAggregateZero *ZeroVector = ConstantAggregateZero::get(VecTy);
    205 
    206     // The immediate permute control byte looks like this:
    207     //    [3:0] - zero mask for each 32-bit lane
    208     //    [5:4] - select one 32-bit destination lane
    209     //    [7:6] - select one 32-bit source lane
    210 
    211     uint8_t Imm = CInt->getZExtValue();
    212     uint8_t ZMask = Imm & 0xf;
    213     uint8_t DestLane = (Imm >> 4) & 0x3;
    214     uint8_t SourceLane = (Imm >> 6) & 0x3;
    215 
    216     // If all zero mask bits are set, this was just a weird way to
    217     // generate a zero vector.
    218     if (ZMask == 0xf)
    219       return ZeroVector;
    220 
    221     // TODO: Model this case as two shuffles or a 'logical and' plus shuffle?
    222     if (ZMask)
    223       return nullptr;
    224 
    225     assert(VecTy->getNumElements() == 4 && "insertps with wrong vector type");
    226 
    227     // If we're not zeroing anything, this is a single shuffle.
    228     // Replace the selected destination lane with the selected source lane.
    229     // For all other lanes, pass the first source bits through.
    230     int ShuffleMask[4] = { 0, 1, 2, 3 };
    231     ShuffleMask[DestLane] = SourceLane + 4;
    232 
    233     return Builder.CreateShuffleVector(II.getArgOperand(0), II.getArgOperand(1),
    234                                        ShuffleMask);
    235   }
    236   return nullptr;
    237 }
    238 
    239 /// The shuffle mask for a perm2*128 selects any two halves of two 256-bit
    240 /// source vectors, unless a zero bit is set. If a zero bit is set,
    241 /// then ignore that half of the mask and clear that half of the vector.
    242 static Value *SimplifyX86vperm2(const IntrinsicInst &II,
    243                                 InstCombiner::BuilderTy &Builder) {
    244   if (auto *CInt = dyn_cast<ConstantInt>(II.getArgOperand(2))) {
    245     VectorType *VecTy = cast<VectorType>(II.getType());
    246     ConstantAggregateZero *ZeroVector = ConstantAggregateZero::get(VecTy);
    247 
    248     // The immediate permute control byte looks like this:
    249     //    [1:0] - select 128 bits from sources for low half of destination
    250     //    [2]   - ignore
    251     //    [3]   - zero low half of destination
    252     //    [5:4] - select 128 bits from sources for high half of destination
    253     //    [6]   - ignore
    254     //    [7]   - zero high half of destination
    255 
    256     uint8_t Imm = CInt->getZExtValue();
    257 
    258     bool LowHalfZero = Imm & 0x08;
    259     bool HighHalfZero = Imm & 0x80;
    260 
    261     // If both zero mask bits are set, this was just a weird way to
    262     // generate a zero vector.
    263     if (LowHalfZero && HighHalfZero)
    264       return ZeroVector;
    265 
    266     // If 0 or 1 zero mask bits are set, this is a simple shuffle.
    267     unsigned NumElts = VecTy->getNumElements();
    268     unsigned HalfSize = NumElts / 2;
    269     SmallVector<int, 8> ShuffleMask(NumElts);
    270 
    271     // The high bit of the selection field chooses the 1st or 2nd operand.
    272     bool LowInputSelect = Imm & 0x02;
    273     bool HighInputSelect = Imm & 0x20;
    274 
    275     // The low bit of the selection field chooses the low or high half
    276     // of the selected operand.
    277     bool LowHalfSelect = Imm & 0x01;
    278     bool HighHalfSelect = Imm & 0x10;
    279 
    280     // Determine which operand(s) are actually in use for this instruction.
    281     Value *V0 = LowInputSelect ? II.getArgOperand(1) : II.getArgOperand(0);
    282     Value *V1 = HighInputSelect ? II.getArgOperand(1) : II.getArgOperand(0);
    283 
    284     // If needed, replace operands based on zero mask.
    285     V0 = LowHalfZero ? ZeroVector : V0;
    286     V1 = HighHalfZero ? ZeroVector : V1;
    287 
    288     // Permute low half of result.
    289     unsigned StartIndex = LowHalfSelect ? HalfSize : 0;
    290     for (unsigned i = 0; i < HalfSize; ++i)
    291       ShuffleMask[i] = StartIndex + i;
    292 
    293     // Permute high half of result.
    294     StartIndex = HighHalfSelect ? HalfSize : 0;
    295     StartIndex += NumElts;
    296     for (unsigned i = 0; i < HalfSize; ++i)
    297       ShuffleMask[i + HalfSize] = StartIndex + i;
    298 
    299     return Builder.CreateShuffleVector(V0, V1, ShuffleMask);
    300   }
    301   return nullptr;
    302 }
    303 
    304 /// visitCallInst - CallInst simplification.  This mostly only handles folding
    305 /// of intrinsic instructions.  For normal calls, it allows visitCallSite to do
    306 /// the heavy lifting.
    307 ///
    308 Instruction *InstCombiner::visitCallInst(CallInst &CI) {
    309   if (isFreeCall(&CI, TLI))
    310     return visitFree(CI);
    311 
    312   // If the caller function is nounwind, mark the call as nounwind, even if the
    313   // callee isn't.
    314   if (CI.getParent()->getParent()->doesNotThrow() &&
    315       !CI.doesNotThrow()) {
    316     CI.setDoesNotThrow();
    317     return &CI;
    318   }
    319 
    320   IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI);
    321   if (!II) return visitCallSite(&CI);
    322 
    323   // Intrinsics cannot occur in an invoke, so handle them here instead of in
    324   // visitCallSite.
    325   if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(II)) {
    326     bool Changed = false;
    327 
    328     // memmove/cpy/set of zero bytes is a noop.
    329     if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) {
    330       if (NumBytes->isNullValue())
    331         return EraseInstFromFunction(CI);
    332 
    333       if (ConstantInt *CI = dyn_cast<ConstantInt>(NumBytes))
    334         if (CI->getZExtValue() == 1) {
    335           // Replace the instruction with just byte operations.  We would
    336           // transform other cases to loads/stores, but we don't know if
    337           // alignment is sufficient.
    338         }
    339     }
    340 
    341     // No other transformations apply to volatile transfers.
    342     if (MI->isVolatile())
    343       return nullptr;
    344 
    345     // If we have a memmove and the source operation is a constant global,
    346     // then the source and dest pointers can't alias, so we can change this
    347     // into a call to memcpy.
    348     if (MemMoveInst *MMI = dyn_cast<MemMoveInst>(MI)) {
    349       if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource()))
    350         if (GVSrc->isConstant()) {
    351           Module *M = CI.getParent()->getParent()->getParent();
    352           Intrinsic::ID MemCpyID = Intrinsic::memcpy;
    353           Type *Tys[3] = { CI.getArgOperand(0)->getType(),
    354                            CI.getArgOperand(1)->getType(),
    355                            CI.getArgOperand(2)->getType() };
    356           CI.setCalledFunction(Intrinsic::getDeclaration(M, MemCpyID, Tys));
    357           Changed = true;
    358         }
    359     }
    360 
    361     if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
    362       // memmove(x,x,size) -> noop.
    363       if (MTI->getSource() == MTI->getDest())
    364         return EraseInstFromFunction(CI);
    365     }
    366 
    367     // If we can determine a pointer alignment that is bigger than currently
    368     // set, update the alignment.
    369     if (isa<MemTransferInst>(MI)) {
    370       if (Instruction *I = SimplifyMemTransfer(MI))
    371         return I;
    372     } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(MI)) {
    373       if (Instruction *I = SimplifyMemSet(MSI))
    374         return I;
    375     }
    376 
    377     if (Changed) return II;
    378   }
    379 
    380   switch (II->getIntrinsicID()) {
    381   default: break;
    382   case Intrinsic::objectsize: {
    383     uint64_t Size;
    384     if (getObjectSize(II->getArgOperand(0), Size, DL, TLI))
    385       return ReplaceInstUsesWith(CI, ConstantInt::get(CI.getType(), Size));
    386     return nullptr;
    387   }
    388   case Intrinsic::bswap: {
    389     Value *IIOperand = II->getArgOperand(0);
    390     Value *X = nullptr;
    391 
    392     // bswap(bswap(x)) -> x
    393     if (match(IIOperand, m_BSwap(m_Value(X))))
    394         return ReplaceInstUsesWith(CI, X);
    395 
    396     // bswap(trunc(bswap(x))) -> trunc(lshr(x, c))
    397     if (match(IIOperand, m_Trunc(m_BSwap(m_Value(X))))) {
    398       unsigned C = X->getType()->getPrimitiveSizeInBits() -
    399         IIOperand->getType()->getPrimitiveSizeInBits();
    400       Value *CV = ConstantInt::get(X->getType(), C);
    401       Value *V = Builder->CreateLShr(X, CV);
    402       return new TruncInst(V, IIOperand->getType());
    403     }
    404     break;
    405   }
    406 
    407   case Intrinsic::powi:
    408     if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
    409       // powi(x, 0) -> 1.0
    410       if (Power->isZero())
    411         return ReplaceInstUsesWith(CI, ConstantFP::get(CI.getType(), 1.0));
    412       // powi(x, 1) -> x
    413       if (Power->isOne())
    414         return ReplaceInstUsesWith(CI, II->getArgOperand(0));
    415       // powi(x, -1) -> 1/x
    416       if (Power->isAllOnesValue())
    417         return BinaryOperator::CreateFDiv(ConstantFP::get(CI.getType(), 1.0),
    418                                           II->getArgOperand(0));
    419     }
    420     break;
    421   case Intrinsic::cttz: {
    422     // If all bits below the first known one are known zero,
    423     // this value is constant.
    424     IntegerType *IT = dyn_cast<IntegerType>(II->getArgOperand(0)->getType());
    425     // FIXME: Try to simplify vectors of integers.
    426     if (!IT) break;
    427     uint32_t BitWidth = IT->getBitWidth();
    428     APInt KnownZero(BitWidth, 0);
    429     APInt KnownOne(BitWidth, 0);
    430     computeKnownBits(II->getArgOperand(0), KnownZero, KnownOne, 0, II);
    431     unsigned TrailingZeros = KnownOne.countTrailingZeros();
    432     APInt Mask(APInt::getLowBitsSet(BitWidth, TrailingZeros));
    433     if ((Mask & KnownZero) == Mask)
    434       return ReplaceInstUsesWith(CI, ConstantInt::get(IT,
    435                                  APInt(BitWidth, TrailingZeros)));
    436 
    437     }
    438     break;
    439   case Intrinsic::ctlz: {
    440     // If all bits above the first known one are known zero,
    441     // this value is constant.
    442     IntegerType *IT = dyn_cast<IntegerType>(II->getArgOperand(0)->getType());
    443     // FIXME: Try to simplify vectors of integers.
    444     if (!IT) break;
    445     uint32_t BitWidth = IT->getBitWidth();
    446     APInt KnownZero(BitWidth, 0);
    447     APInt KnownOne(BitWidth, 0);
    448     computeKnownBits(II->getArgOperand(0), KnownZero, KnownOne, 0, II);
    449     unsigned LeadingZeros = KnownOne.countLeadingZeros();
    450     APInt Mask(APInt::getHighBitsSet(BitWidth, LeadingZeros));
    451     if ((Mask & KnownZero) == Mask)
    452       return ReplaceInstUsesWith(CI, ConstantInt::get(IT,
    453                                  APInt(BitWidth, LeadingZeros)));
    454 
    455     }
    456     break;
    457 
    458   case Intrinsic::uadd_with_overflow:
    459   case Intrinsic::sadd_with_overflow:
    460   case Intrinsic::umul_with_overflow:
    461   case Intrinsic::smul_with_overflow:
    462     if (isa<Constant>(II->getArgOperand(0)) &&
    463         !isa<Constant>(II->getArgOperand(1))) {
    464       // Canonicalize constants into the RHS.
    465       Value *LHS = II->getArgOperand(0);
    466       II->setArgOperand(0, II->getArgOperand(1));
    467       II->setArgOperand(1, LHS);
    468       return II;
    469     }
    470     // fall through
    471 
    472   case Intrinsic::usub_with_overflow:
    473   case Intrinsic::ssub_with_overflow: {
    474     OverflowCheckFlavor OCF =
    475         IntrinsicIDToOverflowCheckFlavor(II->getIntrinsicID());
    476     assert(OCF != OCF_INVALID && "unexpected!");
    477 
    478     Value *OperationResult = nullptr;
    479     Constant *OverflowResult = nullptr;
    480     if (OptimizeOverflowCheck(OCF, II->getArgOperand(0), II->getArgOperand(1),
    481                               *II, OperationResult, OverflowResult))
    482       return CreateOverflowTuple(II, OperationResult, OverflowResult);
    483 
    484     break;
    485   }
    486 
    487   case Intrinsic::minnum:
    488   case Intrinsic::maxnum: {
    489     Value *Arg0 = II->getArgOperand(0);
    490     Value *Arg1 = II->getArgOperand(1);
    491 
    492     // fmin(x, x) -> x
    493     if (Arg0 == Arg1)
    494       return ReplaceInstUsesWith(CI, Arg0);
    495 
    496     const ConstantFP *C0 = dyn_cast<ConstantFP>(Arg0);
    497     const ConstantFP *C1 = dyn_cast<ConstantFP>(Arg1);
    498 
    499     // Canonicalize constants into the RHS.
    500     if (C0 && !C1) {
    501       II->setArgOperand(0, Arg1);
    502       II->setArgOperand(1, Arg0);
    503       return II;
    504     }
    505 
    506     // fmin(x, nan) -> x
    507     if (C1 && C1->isNaN())
    508       return ReplaceInstUsesWith(CI, Arg0);
    509 
    510     // This is the value because if undef were NaN, we would return the other
    511     // value and cannot return a NaN unless both operands are.
    512     //
    513     // fmin(undef, x) -> x
    514     if (isa<UndefValue>(Arg0))
    515       return ReplaceInstUsesWith(CI, Arg1);
    516 
    517     // fmin(x, undef) -> x
    518     if (isa<UndefValue>(Arg1))
    519       return ReplaceInstUsesWith(CI, Arg0);
    520 
    521     Value *X = nullptr;
    522     Value *Y = nullptr;
    523     if (II->getIntrinsicID() == Intrinsic::minnum) {
    524       // fmin(x, fmin(x, y)) -> fmin(x, y)
    525       // fmin(y, fmin(x, y)) -> fmin(x, y)
    526       if (match(Arg1, m_FMin(m_Value(X), m_Value(Y)))) {
    527         if (Arg0 == X || Arg0 == Y)
    528           return ReplaceInstUsesWith(CI, Arg1);
    529       }
    530 
    531       // fmin(fmin(x, y), x) -> fmin(x, y)
    532       // fmin(fmin(x, y), y) -> fmin(x, y)
    533       if (match(Arg0, m_FMin(m_Value(X), m_Value(Y)))) {
    534         if (Arg1 == X || Arg1 == Y)
    535           return ReplaceInstUsesWith(CI, Arg0);
    536       }
    537 
    538       // TODO: fmin(nnan x, inf) -> x
    539       // TODO: fmin(nnan ninf x, flt_max) -> x
    540       if (C1 && C1->isInfinity()) {
    541         // fmin(x, -inf) -> -inf
    542         if (C1->isNegative())
    543           return ReplaceInstUsesWith(CI, Arg1);
    544       }
    545     } else {
    546       assert(II->getIntrinsicID() == Intrinsic::maxnum);
    547       // fmax(x, fmax(x, y)) -> fmax(x, y)
    548       // fmax(y, fmax(x, y)) -> fmax(x, y)
    549       if (match(Arg1, m_FMax(m_Value(X), m_Value(Y)))) {
    550         if (Arg0 == X || Arg0 == Y)
    551           return ReplaceInstUsesWith(CI, Arg1);
    552       }
    553 
    554       // fmax(fmax(x, y), x) -> fmax(x, y)
    555       // fmax(fmax(x, y), y) -> fmax(x, y)
    556       if (match(Arg0, m_FMax(m_Value(X), m_Value(Y)))) {
    557         if (Arg1 == X || Arg1 == Y)
    558           return ReplaceInstUsesWith(CI, Arg0);
    559       }
    560 
    561       // TODO: fmax(nnan x, -inf) -> x
    562       // TODO: fmax(nnan ninf x, -flt_max) -> x
    563       if (C1 && C1->isInfinity()) {
    564         // fmax(x, inf) -> inf
    565         if (!C1->isNegative())
    566           return ReplaceInstUsesWith(CI, Arg1);
    567       }
    568     }
    569     break;
    570   }
    571   case Intrinsic::ppc_altivec_lvx:
    572   case Intrinsic::ppc_altivec_lvxl:
    573     // Turn PPC lvx -> load if the pointer is known aligned.
    574     if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, II, AC, DT) >=
    575         16) {
    576       Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
    577                                          PointerType::getUnqual(II->getType()));
    578       return new LoadInst(Ptr);
    579     }
    580     break;
    581   case Intrinsic::ppc_vsx_lxvw4x:
    582   case Intrinsic::ppc_vsx_lxvd2x: {
    583     // Turn PPC VSX loads into normal loads.
    584     Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
    585                                         PointerType::getUnqual(II->getType()));
    586     return new LoadInst(Ptr, Twine(""), false, 1);
    587   }
    588   case Intrinsic::ppc_altivec_stvx:
    589   case Intrinsic::ppc_altivec_stvxl:
    590     // Turn stvx -> store if the pointer is known aligned.
    591     if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, DL, II, AC, DT) >=
    592         16) {
    593       Type *OpPtrTy =
    594         PointerType::getUnqual(II->getArgOperand(0)->getType());
    595       Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy);
    596       return new StoreInst(II->getArgOperand(0), Ptr);
    597     }
    598     break;
    599   case Intrinsic::ppc_vsx_stxvw4x:
    600   case Intrinsic::ppc_vsx_stxvd2x: {
    601     // Turn PPC VSX stores into normal stores.
    602     Type *OpPtrTy = PointerType::getUnqual(II->getArgOperand(0)->getType());
    603     Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy);
    604     return new StoreInst(II->getArgOperand(0), Ptr, false, 1);
    605   }
    606   case Intrinsic::ppc_qpx_qvlfs:
    607     // Turn PPC QPX qvlfs -> load if the pointer is known aligned.
    608     if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, II, AC, DT) >=
    609         16) {
    610       Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
    611                                          PointerType::getUnqual(II->getType()));
    612       return new LoadInst(Ptr);
    613     }
    614     break;
    615   case Intrinsic::ppc_qpx_qvlfd:
    616     // Turn PPC QPX qvlfd -> load if the pointer is known aligned.
    617     if (getOrEnforceKnownAlignment(II->getArgOperand(0), 32, DL, II, AC, DT) >=
    618         32) {
    619       Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
    620                                          PointerType::getUnqual(II->getType()));
    621       return new LoadInst(Ptr);
    622     }
    623     break;
    624   case Intrinsic::ppc_qpx_qvstfs:
    625     // Turn PPC QPX qvstfs -> store if the pointer is known aligned.
    626     if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, DL, II, AC, DT) >=
    627         16) {
    628       Type *OpPtrTy =
    629         PointerType::getUnqual(II->getArgOperand(0)->getType());
    630       Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy);
    631       return new StoreInst(II->getArgOperand(0), Ptr);
    632     }
    633     break;
    634   case Intrinsic::ppc_qpx_qvstfd:
    635     // Turn PPC QPX qvstfd -> store if the pointer is known aligned.
    636     if (getOrEnforceKnownAlignment(II->getArgOperand(1), 32, DL, II, AC, DT) >=
    637         32) {
    638       Type *OpPtrTy =
    639         PointerType::getUnqual(II->getArgOperand(0)->getType());
    640       Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy);
    641       return new StoreInst(II->getArgOperand(0), Ptr);
    642     }
    643     break;
    644   case Intrinsic::x86_sse_storeu_ps:
    645   case Intrinsic::x86_sse2_storeu_pd:
    646   case Intrinsic::x86_sse2_storeu_dq:
    647     // Turn X86 storeu -> store if the pointer is known aligned.
    648     if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, II, AC, DT) >=
    649         16) {
    650       Type *OpPtrTy =
    651         PointerType::getUnqual(II->getArgOperand(1)->getType());
    652       Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0), OpPtrTy);
    653       return new StoreInst(II->getArgOperand(1), Ptr);
    654     }
    655     break;
    656 
    657   case Intrinsic::x86_sse_cvtss2si:
    658   case Intrinsic::x86_sse_cvtss2si64:
    659   case Intrinsic::x86_sse_cvttss2si:
    660   case Intrinsic::x86_sse_cvttss2si64:
    661   case Intrinsic::x86_sse2_cvtsd2si:
    662   case Intrinsic::x86_sse2_cvtsd2si64:
    663   case Intrinsic::x86_sse2_cvttsd2si:
    664   case Intrinsic::x86_sse2_cvttsd2si64: {
    665     // These intrinsics only demand the 0th element of their input vectors. If
    666     // we can simplify the input based on that, do so now.
    667     unsigned VWidth =
    668       cast<VectorType>(II->getArgOperand(0)->getType())->getNumElements();
    669     APInt DemandedElts(VWidth, 1);
    670     APInt UndefElts(VWidth, 0);
    671     if (Value *V = SimplifyDemandedVectorElts(II->getArgOperand(0),
    672                                               DemandedElts, UndefElts)) {
    673       II->setArgOperand(0, V);
    674       return II;
    675     }
    676     break;
    677   }
    678 
    679   // Constant fold <A x Bi> << Ci.
    680   // FIXME: We don't handle _dq because it's a shift of an i128, but is
    681   // represented in the IR as <2 x i64>. A per element shift is wrong.
    682   case Intrinsic::x86_sse2_psll_d:
    683   case Intrinsic::x86_sse2_psll_q:
    684   case Intrinsic::x86_sse2_psll_w:
    685   case Intrinsic::x86_sse2_pslli_d:
    686   case Intrinsic::x86_sse2_pslli_q:
    687   case Intrinsic::x86_sse2_pslli_w:
    688   case Intrinsic::x86_avx2_psll_d:
    689   case Intrinsic::x86_avx2_psll_q:
    690   case Intrinsic::x86_avx2_psll_w:
    691   case Intrinsic::x86_avx2_pslli_d:
    692   case Intrinsic::x86_avx2_pslli_q:
    693   case Intrinsic::x86_avx2_pslli_w:
    694   case Intrinsic::x86_sse2_psrl_d:
    695   case Intrinsic::x86_sse2_psrl_q:
    696   case Intrinsic::x86_sse2_psrl_w:
    697   case Intrinsic::x86_sse2_psrli_d:
    698   case Intrinsic::x86_sse2_psrli_q:
    699   case Intrinsic::x86_sse2_psrli_w:
    700   case Intrinsic::x86_avx2_psrl_d:
    701   case Intrinsic::x86_avx2_psrl_q:
    702   case Intrinsic::x86_avx2_psrl_w:
    703   case Intrinsic::x86_avx2_psrli_d:
    704   case Intrinsic::x86_avx2_psrli_q:
    705   case Intrinsic::x86_avx2_psrli_w: {
    706     // Simplify if count is constant. To 0 if >= BitWidth,
    707     // otherwise to shl/lshr.
    708     auto CDV = dyn_cast<ConstantDataVector>(II->getArgOperand(1));
    709     auto CInt = dyn_cast<ConstantInt>(II->getArgOperand(1));
    710     if (!CDV && !CInt)
    711       break;
    712     ConstantInt *Count;
    713     if (CDV)
    714       Count = cast<ConstantInt>(CDV->getElementAsConstant(0));
    715     else
    716       Count = CInt;
    717 
    718     auto Vec = II->getArgOperand(0);
    719     auto VT = cast<VectorType>(Vec->getType());
    720     if (Count->getZExtValue() >
    721         VT->getElementType()->getPrimitiveSizeInBits() - 1)
    722       return ReplaceInstUsesWith(
    723           CI, ConstantAggregateZero::get(Vec->getType()));
    724 
    725     bool isPackedShiftLeft = true;
    726     switch (II->getIntrinsicID()) {
    727     default : break;
    728     case Intrinsic::x86_sse2_psrl_d:
    729     case Intrinsic::x86_sse2_psrl_q:
    730     case Intrinsic::x86_sse2_psrl_w:
    731     case Intrinsic::x86_sse2_psrli_d:
    732     case Intrinsic::x86_sse2_psrli_q:
    733     case Intrinsic::x86_sse2_psrli_w:
    734     case Intrinsic::x86_avx2_psrl_d:
    735     case Intrinsic::x86_avx2_psrl_q:
    736     case Intrinsic::x86_avx2_psrl_w:
    737     case Intrinsic::x86_avx2_psrli_d:
    738     case Intrinsic::x86_avx2_psrli_q:
    739     case Intrinsic::x86_avx2_psrli_w: isPackedShiftLeft = false; break;
    740     }
    741 
    742     unsigned VWidth = VT->getNumElements();
    743     // Get a constant vector of the same type as the first operand.
    744     auto VTCI = ConstantInt::get(VT->getElementType(), Count->getZExtValue());
    745     if (isPackedShiftLeft)
    746       return BinaryOperator::CreateShl(Vec,
    747           Builder->CreateVectorSplat(VWidth, VTCI));
    748 
    749     return BinaryOperator::CreateLShr(Vec,
    750         Builder->CreateVectorSplat(VWidth, VTCI));
    751   }
    752 
    753   case Intrinsic::x86_sse41_pmovsxbw:
    754   case Intrinsic::x86_sse41_pmovsxwd:
    755   case Intrinsic::x86_sse41_pmovsxdq:
    756   case Intrinsic::x86_sse41_pmovzxbw:
    757   case Intrinsic::x86_sse41_pmovzxwd:
    758   case Intrinsic::x86_sse41_pmovzxdq: {
    759     // pmov{s|z}x ignores the upper half of their input vectors.
    760     unsigned VWidth =
    761       cast<VectorType>(II->getArgOperand(0)->getType())->getNumElements();
    762     unsigned LowHalfElts = VWidth / 2;
    763     APInt InputDemandedElts(APInt::getBitsSet(VWidth, 0, LowHalfElts));
    764     APInt UndefElts(VWidth, 0);
    765     if (Value *TmpV = SimplifyDemandedVectorElts(
    766             II->getArgOperand(0), InputDemandedElts, UndefElts)) {
    767       II->setArgOperand(0, TmpV);
    768       return II;
    769     }
    770     break;
    771   }
    772   case Intrinsic::x86_sse41_insertps:
    773     if (Value *V = SimplifyX86insertps(*II, *Builder))
    774       return ReplaceInstUsesWith(*II, V);
    775     break;
    776 
    777   case Intrinsic::x86_sse4a_insertqi: {
    778     // insertqi x, y, 64, 0 can just copy y's lower bits and leave the top
    779     // ones undef
    780     // TODO: eventually we should lower this intrinsic to IR
    781     if (auto CIWidth = dyn_cast<ConstantInt>(II->getArgOperand(2))) {
    782       if (auto CIStart = dyn_cast<ConstantInt>(II->getArgOperand(3))) {
    783         unsigned Index = CIStart->getZExtValue();
    784         // From AMD documentation: "a value of zero in the field length is
    785         // defined as length of 64".
    786         unsigned Length = CIWidth->equalsInt(0) ? 64 : CIWidth->getZExtValue();
    787 
    788         // From AMD documentation: "If the sum of the bit index + length field
    789         // is greater than 64, the results are undefined".
    790 
    791         // Note that both field index and field length are 8-bit quantities.
    792         // Since variables 'Index' and 'Length' are unsigned values
    793         // obtained from zero-extending field index and field length
    794         // respectively, their sum should never wrap around.
    795         if ((Index + Length) > 64)
    796           return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
    797 
    798         if (Length == 64 && Index == 0) {
    799           Value *Vec = II->getArgOperand(1);
    800           Value *Undef = UndefValue::get(Vec->getType());
    801           const uint32_t Mask[] = { 0, 2 };
    802           return ReplaceInstUsesWith(
    803               CI,
    804               Builder->CreateShuffleVector(
    805                   Vec, Undef, ConstantDataVector::get(
    806                                   II->getContext(), makeArrayRef(Mask))));
    807 
    808         } else if (auto Source =
    809                        dyn_cast<IntrinsicInst>(II->getArgOperand(0))) {
    810           if (Source->hasOneUse() &&
    811               Source->getArgOperand(1) == II->getArgOperand(1)) {
    812             // If the source of the insert has only one use and it's another
    813             // insert (and they're both inserting from the same vector), try to
    814             // bundle both together.
    815             auto CISourceWidth =
    816                 dyn_cast<ConstantInt>(Source->getArgOperand(2));
    817             auto CISourceStart =
    818                 dyn_cast<ConstantInt>(Source->getArgOperand(3));
    819             if (CISourceStart && CISourceWidth) {
    820               unsigned Start = CIStart->getZExtValue();
    821               unsigned Width = CIWidth->getZExtValue();
    822               unsigned End = Start + Width;
    823               unsigned SourceStart = CISourceStart->getZExtValue();
    824               unsigned SourceWidth = CISourceWidth->getZExtValue();
    825               unsigned SourceEnd = SourceStart + SourceWidth;
    826               unsigned NewStart, NewWidth;
    827               bool ShouldReplace = false;
    828               if (Start <= SourceStart && SourceStart <= End) {
    829                 NewStart = Start;
    830                 NewWidth = std::max(End, SourceEnd) - NewStart;
    831                 ShouldReplace = true;
    832               } else if (SourceStart <= Start && Start <= SourceEnd) {
    833                 NewStart = SourceStart;
    834                 NewWidth = std::max(SourceEnd, End) - NewStart;
    835                 ShouldReplace = true;
    836               }
    837 
    838               if (ShouldReplace) {
    839                 Constant *ConstantWidth = ConstantInt::get(
    840                     II->getArgOperand(2)->getType(), NewWidth, false);
    841                 Constant *ConstantStart = ConstantInt::get(
    842                     II->getArgOperand(3)->getType(), NewStart, false);
    843                 Value *Args[4] = { Source->getArgOperand(0),
    844                                    II->getArgOperand(1), ConstantWidth,
    845                                    ConstantStart };
    846                 Module *M = CI.getParent()->getParent()->getParent();
    847                 Value *F =
    848                     Intrinsic::getDeclaration(M, Intrinsic::x86_sse4a_insertqi);
    849                 return ReplaceInstUsesWith(CI, Builder->CreateCall(F, Args));
    850               }
    851             }
    852           }
    853         }
    854       }
    855     }
    856     break;
    857   }
    858 
    859   case Intrinsic::x86_sse41_pblendvb:
    860   case Intrinsic::x86_sse41_blendvps:
    861   case Intrinsic::x86_sse41_blendvpd:
    862   case Intrinsic::x86_avx_blendv_ps_256:
    863   case Intrinsic::x86_avx_blendv_pd_256:
    864   case Intrinsic::x86_avx2_pblendvb: {
    865     // Convert blendv* to vector selects if the mask is constant.
    866     // This optimization is convoluted because the intrinsic is defined as
    867     // getting a vector of floats or doubles for the ps and pd versions.
    868     // FIXME: That should be changed.
    869     Value *Mask = II->getArgOperand(2);
    870     if (auto C = dyn_cast<ConstantDataVector>(Mask)) {
    871       auto Tyi1 = Builder->getInt1Ty();
    872       auto SelectorType = cast<VectorType>(Mask->getType());
    873       auto EltTy = SelectorType->getElementType();
    874       unsigned Size = SelectorType->getNumElements();
    875       unsigned BitWidth =
    876           EltTy->isFloatTy()
    877               ? 32
    878               : (EltTy->isDoubleTy() ? 64 : EltTy->getIntegerBitWidth());
    879       assert((BitWidth == 64 || BitWidth == 32 || BitWidth == 8) &&
    880              "Wrong arguments for variable blend intrinsic");
    881       SmallVector<Constant *, 32> Selectors;
    882       for (unsigned I = 0; I < Size; ++I) {
    883         // The intrinsics only read the top bit
    884         uint64_t Selector;
    885         if (BitWidth == 8)
    886           Selector = C->getElementAsInteger(I);
    887         else
    888           Selector = C->getElementAsAPFloat(I).bitcastToAPInt().getZExtValue();
    889         Selectors.push_back(ConstantInt::get(Tyi1, Selector >> (BitWidth - 1)));
    890       }
    891       auto NewSelector = ConstantVector::get(Selectors);
    892       return SelectInst::Create(NewSelector, II->getArgOperand(1),
    893                                 II->getArgOperand(0), "blendv");
    894     } else {
    895       break;
    896     }
    897   }
    898 
    899   case Intrinsic::x86_avx_vpermilvar_ps:
    900   case Intrinsic::x86_avx_vpermilvar_ps_256:
    901   case Intrinsic::x86_avx_vpermilvar_pd:
    902   case Intrinsic::x86_avx_vpermilvar_pd_256: {
    903     // Convert vpermil* to shufflevector if the mask is constant.
    904     Value *V = II->getArgOperand(1);
    905     unsigned Size = cast<VectorType>(V->getType())->getNumElements();
    906     assert(Size == 8 || Size == 4 || Size == 2);
    907     uint32_t Indexes[8];
    908     if (auto C = dyn_cast<ConstantDataVector>(V)) {
    909       // The intrinsics only read one or two bits, clear the rest.
    910       for (unsigned I = 0; I < Size; ++I) {
    911         uint32_t Index = C->getElementAsInteger(I) & 0x3;
    912         if (II->getIntrinsicID() == Intrinsic::x86_avx_vpermilvar_pd ||
    913             II->getIntrinsicID() == Intrinsic::x86_avx_vpermilvar_pd_256)
    914           Index >>= 1;
    915         Indexes[I] = Index;
    916       }
    917     } else if (isa<ConstantAggregateZero>(V)) {
    918       for (unsigned I = 0; I < Size; ++I)
    919         Indexes[I] = 0;
    920     } else {
    921       break;
    922     }
    923     // The _256 variants are a bit trickier since the mask bits always index
    924     // into the corresponding 128 half. In order to convert to a generic
    925     // shuffle, we have to make that explicit.
    926     if (II->getIntrinsicID() == Intrinsic::x86_avx_vpermilvar_ps_256 ||
    927         II->getIntrinsicID() == Intrinsic::x86_avx_vpermilvar_pd_256) {
    928       for (unsigned I = Size / 2; I < Size; ++I)
    929         Indexes[I] += Size / 2;
    930     }
    931     auto NewC =
    932         ConstantDataVector::get(V->getContext(), makeArrayRef(Indexes, Size));
    933     auto V1 = II->getArgOperand(0);
    934     auto V2 = UndefValue::get(V1->getType());
    935     auto Shuffle = Builder->CreateShuffleVector(V1, V2, NewC);
    936     return ReplaceInstUsesWith(CI, Shuffle);
    937   }
    938 
    939   case Intrinsic::x86_avx_vperm2f128_pd_256:
    940   case Intrinsic::x86_avx_vperm2f128_ps_256:
    941   case Intrinsic::x86_avx_vperm2f128_si_256:
    942   case Intrinsic::x86_avx2_vperm2i128:
    943     if (Value *V = SimplifyX86vperm2(*II, *Builder))
    944       return ReplaceInstUsesWith(*II, V);
    945     break;
    946 
    947   case Intrinsic::ppc_altivec_vperm:
    948     // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant.
    949     // Note that ppc_altivec_vperm has a big-endian bias, so when creating
    950     // a vectorshuffle for little endian, we must undo the transformation
    951     // performed on vec_perm in altivec.h.  That is, we must complement
    952     // the permutation mask with respect to 31 and reverse the order of
    953     // V1 and V2.
    954     if (Constant *Mask = dyn_cast<Constant>(II->getArgOperand(2))) {
    955       assert(Mask->getType()->getVectorNumElements() == 16 &&
    956              "Bad type for intrinsic!");
    957 
    958       // Check that all of the elements are integer constants or undefs.
    959       bool AllEltsOk = true;
    960       for (unsigned i = 0; i != 16; ++i) {
    961         Constant *Elt = Mask->getAggregateElement(i);
    962         if (!Elt || !(isa<ConstantInt>(Elt) || isa<UndefValue>(Elt))) {
    963           AllEltsOk = false;
    964           break;
    965         }
    966       }
    967 
    968       if (AllEltsOk) {
    969         // Cast the input vectors to byte vectors.
    970         Value *Op0 = Builder->CreateBitCast(II->getArgOperand(0),
    971                                             Mask->getType());
    972         Value *Op1 = Builder->CreateBitCast(II->getArgOperand(1),
    973                                             Mask->getType());
    974         Value *Result = UndefValue::get(Op0->getType());
    975 
    976         // Only extract each element once.
    977         Value *ExtractedElts[32];
    978         memset(ExtractedElts, 0, sizeof(ExtractedElts));
    979 
    980         for (unsigned i = 0; i != 16; ++i) {
    981           if (isa<UndefValue>(Mask->getAggregateElement(i)))
    982             continue;
    983           unsigned Idx =
    984             cast<ConstantInt>(Mask->getAggregateElement(i))->getZExtValue();
    985           Idx &= 31;  // Match the hardware behavior.
    986           if (DL.isLittleEndian())
    987             Idx = 31 - Idx;
    988 
    989           if (!ExtractedElts[Idx]) {
    990             Value *Op0ToUse = (DL.isLittleEndian()) ? Op1 : Op0;
    991             Value *Op1ToUse = (DL.isLittleEndian()) ? Op0 : Op1;
    992             ExtractedElts[Idx] =
    993               Builder->CreateExtractElement(Idx < 16 ? Op0ToUse : Op1ToUse,
    994                                             Builder->getInt32(Idx&15));
    995           }
    996 
    997           // Insert this value into the result vector.
    998           Result = Builder->CreateInsertElement(Result, ExtractedElts[Idx],
    999                                                 Builder->getInt32(i));
   1000         }
   1001         return CastInst::Create(Instruction::BitCast, Result, CI.getType());
   1002       }
   1003     }
   1004     break;
   1005 
   1006   case Intrinsic::arm_neon_vld1:
   1007   case Intrinsic::arm_neon_vld2:
   1008   case Intrinsic::arm_neon_vld3:
   1009   case Intrinsic::arm_neon_vld4:
   1010   case Intrinsic::arm_neon_vld2lane:
   1011   case Intrinsic::arm_neon_vld3lane:
   1012   case Intrinsic::arm_neon_vld4lane:
   1013   case Intrinsic::arm_neon_vst1:
   1014   case Intrinsic::arm_neon_vst2:
   1015   case Intrinsic::arm_neon_vst3:
   1016   case Intrinsic::arm_neon_vst4:
   1017   case Intrinsic::arm_neon_vst2lane:
   1018   case Intrinsic::arm_neon_vst3lane:
   1019   case Intrinsic::arm_neon_vst4lane: {
   1020     unsigned MemAlign = getKnownAlignment(II->getArgOperand(0), DL, II, AC, DT);
   1021     unsigned AlignArg = II->getNumArgOperands() - 1;
   1022     ConstantInt *IntrAlign = dyn_cast<ConstantInt>(II->getArgOperand(AlignArg));
   1023     if (IntrAlign && IntrAlign->getZExtValue() < MemAlign) {
   1024       II->setArgOperand(AlignArg,
   1025                         ConstantInt::get(Type::getInt32Ty(II->getContext()),
   1026                                          MemAlign, false));
   1027       return II;
   1028     }
   1029     break;
   1030   }
   1031 
   1032   case Intrinsic::arm_neon_vmulls:
   1033   case Intrinsic::arm_neon_vmullu:
   1034   case Intrinsic::aarch64_neon_smull:
   1035   case Intrinsic::aarch64_neon_umull: {
   1036     Value *Arg0 = II->getArgOperand(0);
   1037     Value *Arg1 = II->getArgOperand(1);
   1038 
   1039     // Handle mul by zero first:
   1040     if (isa<ConstantAggregateZero>(Arg0) || isa<ConstantAggregateZero>(Arg1)) {
   1041       return ReplaceInstUsesWith(CI, ConstantAggregateZero::get(II->getType()));
   1042     }
   1043 
   1044     // Check for constant LHS & RHS - in this case we just simplify.
   1045     bool Zext = (II->getIntrinsicID() == Intrinsic::arm_neon_vmullu ||
   1046                  II->getIntrinsicID() == Intrinsic::aarch64_neon_umull);
   1047     VectorType *NewVT = cast<VectorType>(II->getType());
   1048     if (Constant *CV0 = dyn_cast<Constant>(Arg0)) {
   1049       if (Constant *CV1 = dyn_cast<Constant>(Arg1)) {
   1050         CV0 = ConstantExpr::getIntegerCast(CV0, NewVT, /*isSigned=*/!Zext);
   1051         CV1 = ConstantExpr::getIntegerCast(CV1, NewVT, /*isSigned=*/!Zext);
   1052 
   1053         return ReplaceInstUsesWith(CI, ConstantExpr::getMul(CV0, CV1));
   1054       }
   1055 
   1056       // Couldn't simplify - canonicalize constant to the RHS.
   1057       std::swap(Arg0, Arg1);
   1058     }
   1059 
   1060     // Handle mul by one:
   1061     if (Constant *CV1 = dyn_cast<Constant>(Arg1))
   1062       if (ConstantInt *Splat =
   1063               dyn_cast_or_null<ConstantInt>(CV1->getSplatValue()))
   1064         if (Splat->isOne())
   1065           return CastInst::CreateIntegerCast(Arg0, II->getType(),
   1066                                              /*isSigned=*/!Zext);
   1067 
   1068     break;
   1069   }
   1070 
   1071   case Intrinsic::AMDGPU_rcp: {
   1072     if (const ConstantFP *C = dyn_cast<ConstantFP>(II->getArgOperand(0))) {
   1073       const APFloat &ArgVal = C->getValueAPF();
   1074       APFloat Val(ArgVal.getSemantics(), 1.0);
   1075       APFloat::opStatus Status = Val.divide(ArgVal,
   1076                                             APFloat::rmNearestTiesToEven);
   1077       // Only do this if it was exact and therefore not dependent on the
   1078       // rounding mode.
   1079       if (Status == APFloat::opOK)
   1080         return ReplaceInstUsesWith(CI, ConstantFP::get(II->getContext(), Val));
   1081     }
   1082 
   1083     break;
   1084   }
   1085   case Intrinsic::stackrestore: {
   1086     // If the save is right next to the restore, remove the restore.  This can
   1087     // happen when variable allocas are DCE'd.
   1088     if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) {
   1089       if (SS->getIntrinsicID() == Intrinsic::stacksave) {
   1090         BasicBlock::iterator BI = SS;
   1091         if (&*++BI == II)
   1092           return EraseInstFromFunction(CI);
   1093       }
   1094     }
   1095 
   1096     // Scan down this block to see if there is another stack restore in the
   1097     // same block without an intervening call/alloca.
   1098     BasicBlock::iterator BI = II;
   1099     TerminatorInst *TI = II->getParent()->getTerminator();
   1100     bool CannotRemove = false;
   1101     for (++BI; &*BI != TI; ++BI) {
   1102       if (isa<AllocaInst>(BI)) {
   1103         CannotRemove = true;
   1104         break;
   1105       }
   1106       if (CallInst *BCI = dyn_cast<CallInst>(BI)) {
   1107         if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(BCI)) {
   1108           // If there is a stackrestore below this one, remove this one.
   1109           if (II->getIntrinsicID() == Intrinsic::stackrestore)
   1110             return EraseInstFromFunction(CI);
   1111           // Otherwise, ignore the intrinsic.
   1112         } else {
   1113           // If we found a non-intrinsic call, we can't remove the stack
   1114           // restore.
   1115           CannotRemove = true;
   1116           break;
   1117         }
   1118       }
   1119     }
   1120 
   1121     // If the stack restore is in a return, resume, or unwind block and if there
   1122     // are no allocas or calls between the restore and the return, nuke the
   1123     // restore.
   1124     if (!CannotRemove && (isa<ReturnInst>(TI) || isa<ResumeInst>(TI)))
   1125       return EraseInstFromFunction(CI);
   1126     break;
   1127   }
   1128   case Intrinsic::assume: {
   1129     // Canonicalize assume(a && b) -> assume(a); assume(b);
   1130     // Note: New assumption intrinsics created here are registered by
   1131     // the InstCombineIRInserter object.
   1132     Value *IIOperand = II->getArgOperand(0), *A, *B,
   1133           *AssumeIntrinsic = II->getCalledValue();
   1134     if (match(IIOperand, m_And(m_Value(A), m_Value(B)))) {
   1135       Builder->CreateCall(AssumeIntrinsic, A, II->getName());
   1136       Builder->CreateCall(AssumeIntrinsic, B, II->getName());
   1137       return EraseInstFromFunction(*II);
   1138     }
   1139     // assume(!(a || b)) -> assume(!a); assume(!b);
   1140     if (match(IIOperand, m_Not(m_Or(m_Value(A), m_Value(B))))) {
   1141       Builder->CreateCall(AssumeIntrinsic, Builder->CreateNot(A),
   1142                           II->getName());
   1143       Builder->CreateCall(AssumeIntrinsic, Builder->CreateNot(B),
   1144                           II->getName());
   1145       return EraseInstFromFunction(*II);
   1146     }
   1147 
   1148     // assume( (load addr) != null ) -> add 'nonnull' metadata to load
   1149     // (if assume is valid at the load)
   1150     if (ICmpInst* ICmp = dyn_cast<ICmpInst>(IIOperand)) {
   1151       Value *LHS = ICmp->getOperand(0);
   1152       Value *RHS = ICmp->getOperand(1);
   1153       if (ICmpInst::ICMP_NE == ICmp->getPredicate() &&
   1154           isa<LoadInst>(LHS) &&
   1155           isa<Constant>(RHS) &&
   1156           RHS->getType()->isPointerTy() &&
   1157           cast<Constant>(RHS)->isNullValue()) {
   1158         LoadInst* LI = cast<LoadInst>(LHS);
   1159         if (isValidAssumeForContext(II, LI, DT)) {
   1160           MDNode *MD = MDNode::get(II->getContext(), None);
   1161           LI->setMetadata(LLVMContext::MD_nonnull, MD);
   1162           return EraseInstFromFunction(*II);
   1163         }
   1164       }
   1165       // TODO: apply nonnull return attributes to calls and invokes
   1166       // TODO: apply range metadata for range check patterns?
   1167     }
   1168     // If there is a dominating assume with the same condition as this one,
   1169     // then this one is redundant, and should be removed.
   1170     APInt KnownZero(1, 0), KnownOne(1, 0);
   1171     computeKnownBits(IIOperand, KnownZero, KnownOne, 0, II);
   1172     if (KnownOne.isAllOnesValue())
   1173       return EraseInstFromFunction(*II);
   1174 
   1175     break;
   1176   }
   1177   case Intrinsic::experimental_gc_relocate: {
   1178     // Translate facts known about a pointer before relocating into
   1179     // facts about the relocate value, while being careful to
   1180     // preserve relocation semantics.
   1181     GCRelocateOperands Operands(II);
   1182     Value *DerivedPtr = Operands.derivedPtr();
   1183 
   1184     // Remove the relocation if unused, note that this check is required
   1185     // to prevent the cases below from looping forever.
   1186     if (II->use_empty())
   1187       return EraseInstFromFunction(*II);
   1188 
   1189     // Undef is undef, even after relocation.
   1190     // TODO: provide a hook for this in GCStrategy.  This is clearly legal for
   1191     // most practical collectors, but there was discussion in the review thread
   1192     // about whether it was legal for all possible collectors.
   1193     if (isa<UndefValue>(DerivedPtr))
   1194       return ReplaceInstUsesWith(*II, DerivedPtr);
   1195 
   1196     // The relocation of null will be null for most any collector.
   1197     // TODO: provide a hook for this in GCStrategy.  There might be some weird
   1198     // collector this property does not hold for.
   1199     if (isa<ConstantPointerNull>(DerivedPtr))
   1200       return ReplaceInstUsesWith(*II, DerivedPtr);
   1201 
   1202     // isKnownNonNull -> nonnull attribute
   1203     if (isKnownNonNull(DerivedPtr))
   1204       II->addAttribute(AttributeSet::ReturnIndex, Attribute::NonNull);
   1205 
   1206     // isDereferenceablePointer -> deref attribute
   1207     if (DerivedPtr->isDereferenceablePointer(DL)) {
   1208       if (Argument *A = dyn_cast<Argument>(DerivedPtr)) {
   1209         uint64_t Bytes = A->getDereferenceableBytes();
   1210         II->addDereferenceableAttr(AttributeSet::ReturnIndex, Bytes);
   1211       }
   1212     }
   1213 
   1214     // TODO: bitcast(relocate(p)) -> relocate(bitcast(p))
   1215     // Canonicalize on the type from the uses to the defs
   1216 
   1217     // TODO: relocate((gep p, C, C2, ...)) -> gep(relocate(p), C, C2, ...)
   1218   }
   1219   }
   1220 
   1221   return visitCallSite(II);
   1222 }
   1223 
   1224 // InvokeInst simplification
   1225 //
   1226 Instruction *InstCombiner::visitInvokeInst(InvokeInst &II) {
   1227   return visitCallSite(&II);
   1228 }
   1229 
   1230 /// isSafeToEliminateVarargsCast - If this cast does not affect the value
   1231 /// passed through the varargs area, we can eliminate the use of the cast.
   1232 static bool isSafeToEliminateVarargsCast(const CallSite CS,
   1233                                          const DataLayout &DL,
   1234                                          const CastInst *const CI,
   1235                                          const int ix) {
   1236   if (!CI->isLosslessCast())
   1237     return false;
   1238 
   1239   // If this is a GC intrinsic, avoid munging types.  We need types for
   1240   // statepoint reconstruction in SelectionDAG.
   1241   // TODO: This is probably something which should be expanded to all
   1242   // intrinsics since the entire point of intrinsics is that
   1243   // they are understandable by the optimizer.
   1244   if (isStatepoint(CS) || isGCRelocate(CS) || isGCResult(CS))
   1245     return false;
   1246 
   1247   // The size of ByVal or InAlloca arguments is derived from the type, so we
   1248   // can't change to a type with a different size.  If the size were
   1249   // passed explicitly we could avoid this check.
   1250   if (!CS.isByValOrInAllocaArgument(ix))
   1251     return true;
   1252 
   1253   Type* SrcTy =
   1254             cast<PointerType>(CI->getOperand(0)->getType())->getElementType();
   1255   Type* DstTy = cast<PointerType>(CI->getType())->getElementType();
   1256   if (!SrcTy->isSized() || !DstTy->isSized())
   1257     return false;
   1258   if (DL.getTypeAllocSize(SrcTy) != DL.getTypeAllocSize(DstTy))
   1259     return false;
   1260   return true;
   1261 }
   1262 
   1263 // Try to fold some different type of calls here.
   1264 // Currently we're only working with the checking functions, memcpy_chk,
   1265 // mempcpy_chk, memmove_chk, memset_chk, strcpy_chk, stpcpy_chk, strncpy_chk,
   1266 // strcat_chk and strncat_chk.
   1267 Instruction *InstCombiner::tryOptimizeCall(CallInst *CI) {
   1268   if (!CI->getCalledFunction()) return nullptr;
   1269 
   1270   auto InstCombineRAUW = [this](Instruction *From, Value *With) {
   1271     ReplaceInstUsesWith(*From, With);
   1272   };
   1273   LibCallSimplifier Simplifier(DL, TLI, InstCombineRAUW);
   1274   if (Value *With = Simplifier.optimizeCall(CI)) {
   1275     ++NumSimplified;
   1276     return CI->use_empty() ? CI : ReplaceInstUsesWith(*CI, With);
   1277   }
   1278 
   1279   return nullptr;
   1280 }
   1281 
   1282 static IntrinsicInst *FindInitTrampolineFromAlloca(Value *TrampMem) {
   1283   // Strip off at most one level of pointer casts, looking for an alloca.  This
   1284   // is good enough in practice and simpler than handling any number of casts.
   1285   Value *Underlying = TrampMem->stripPointerCasts();
   1286   if (Underlying != TrampMem &&
   1287       (!Underlying->hasOneUse() || Underlying->user_back() != TrampMem))
   1288     return nullptr;
   1289   if (!isa<AllocaInst>(Underlying))
   1290     return nullptr;
   1291 
   1292   IntrinsicInst *InitTrampoline = nullptr;
   1293   for (User *U : TrampMem->users()) {
   1294     IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
   1295     if (!II)
   1296       return nullptr;
   1297     if (II->getIntrinsicID() == Intrinsic::init_trampoline) {
   1298       if (InitTrampoline)
   1299         // More than one init_trampoline writes to this value.  Give up.
   1300         return nullptr;
   1301       InitTrampoline = II;
   1302       continue;
   1303     }
   1304     if (II->getIntrinsicID() == Intrinsic::adjust_trampoline)
   1305       // Allow any number of calls to adjust.trampoline.
   1306       continue;
   1307     return nullptr;
   1308   }
   1309 
   1310   // No call to init.trampoline found.
   1311   if (!InitTrampoline)
   1312     return nullptr;
   1313 
   1314   // Check that the alloca is being used in the expected way.
   1315   if (InitTrampoline->getOperand(0) != TrampMem)
   1316     return nullptr;
   1317 
   1318   return InitTrampoline;
   1319 }
   1320 
   1321 static IntrinsicInst *FindInitTrampolineFromBB(IntrinsicInst *AdjustTramp,
   1322                                                Value *TrampMem) {
   1323   // Visit all the previous instructions in the basic block, and try to find a
   1324   // init.trampoline which has a direct path to the adjust.trampoline.
   1325   for (BasicBlock::iterator I = AdjustTramp,
   1326        E = AdjustTramp->getParent()->begin(); I != E; ) {
   1327     Instruction *Inst = --I;
   1328     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
   1329       if (II->getIntrinsicID() == Intrinsic::init_trampoline &&
   1330           II->getOperand(0) == TrampMem)
   1331         return II;
   1332     if (Inst->mayWriteToMemory())
   1333       return nullptr;
   1334   }
   1335   return nullptr;
   1336 }
   1337 
   1338 // Given a call to llvm.adjust.trampoline, find and return the corresponding
   1339 // call to llvm.init.trampoline if the call to the trampoline can be optimized
   1340 // to a direct call to a function.  Otherwise return NULL.
   1341 //
   1342 static IntrinsicInst *FindInitTrampoline(Value *Callee) {
   1343   Callee = Callee->stripPointerCasts();
   1344   IntrinsicInst *AdjustTramp = dyn_cast<IntrinsicInst>(Callee);
   1345   if (!AdjustTramp ||
   1346       AdjustTramp->getIntrinsicID() != Intrinsic::adjust_trampoline)
   1347     return nullptr;
   1348 
   1349   Value *TrampMem = AdjustTramp->getOperand(0);
   1350 
   1351   if (IntrinsicInst *IT = FindInitTrampolineFromAlloca(TrampMem))
   1352     return IT;
   1353   if (IntrinsicInst *IT = FindInitTrampolineFromBB(AdjustTramp, TrampMem))
   1354     return IT;
   1355   return nullptr;
   1356 }
   1357 
   1358 // visitCallSite - Improvements for call and invoke instructions.
   1359 //
   1360 Instruction *InstCombiner::visitCallSite(CallSite CS) {
   1361   if (isAllocLikeFn(CS.getInstruction(), TLI))
   1362     return visitAllocSite(*CS.getInstruction());
   1363 
   1364   bool Changed = false;
   1365 
   1366   // If the callee is a pointer to a function, attempt to move any casts to the
   1367   // arguments of the call/invoke.
   1368   Value *Callee = CS.getCalledValue();
   1369   if (!isa<Function>(Callee) && transformConstExprCastCall(CS))
   1370     return nullptr;
   1371 
   1372   if (Function *CalleeF = dyn_cast<Function>(Callee))
   1373     // If the call and callee calling conventions don't match, this call must
   1374     // be unreachable, as the call is undefined.
   1375     if (CalleeF->getCallingConv() != CS.getCallingConv() &&
   1376         // Only do this for calls to a function with a body.  A prototype may
   1377         // not actually end up matching the implementation's calling conv for a
   1378         // variety of reasons (e.g. it may be written in assembly).
   1379         !CalleeF->isDeclaration()) {
   1380       Instruction *OldCall = CS.getInstruction();
   1381       new StoreInst(ConstantInt::getTrue(Callee->getContext()),
   1382                 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
   1383                                   OldCall);
   1384       // If OldCall does not return void then replaceAllUsesWith undef.
   1385       // This allows ValueHandlers and custom metadata to adjust itself.
   1386       if (!OldCall->getType()->isVoidTy())
   1387         ReplaceInstUsesWith(*OldCall, UndefValue::get(OldCall->getType()));
   1388       if (isa<CallInst>(OldCall))
   1389         return EraseInstFromFunction(*OldCall);
   1390 
   1391       // We cannot remove an invoke, because it would change the CFG, just
   1392       // change the callee to a null pointer.
   1393       cast<InvokeInst>(OldCall)->setCalledFunction(
   1394                                     Constant::getNullValue(CalleeF->getType()));
   1395       return nullptr;
   1396     }
   1397 
   1398   if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) {
   1399     // If CS does not return void then replaceAllUsesWith undef.
   1400     // This allows ValueHandlers and custom metadata to adjust itself.
   1401     if (!CS.getInstruction()->getType()->isVoidTy())
   1402       ReplaceInstUsesWith(*CS.getInstruction(),
   1403                           UndefValue::get(CS.getInstruction()->getType()));
   1404 
   1405     if (isa<InvokeInst>(CS.getInstruction())) {
   1406       // Can't remove an invoke because we cannot change the CFG.
   1407       return nullptr;
   1408     }
   1409 
   1410     // This instruction is not reachable, just remove it.  We insert a store to
   1411     // undef so that we know that this code is not reachable, despite the fact
   1412     // that we can't modify the CFG here.
   1413     new StoreInst(ConstantInt::getTrue(Callee->getContext()),
   1414                   UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
   1415                   CS.getInstruction());
   1416 
   1417     return EraseInstFromFunction(*CS.getInstruction());
   1418   }
   1419 
   1420   if (IntrinsicInst *II = FindInitTrampoline(Callee))
   1421     return transformCallThroughTrampoline(CS, II);
   1422 
   1423   PointerType *PTy = cast<PointerType>(Callee->getType());
   1424   FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
   1425   if (FTy->isVarArg()) {
   1426     int ix = FTy->getNumParams();
   1427     // See if we can optimize any arguments passed through the varargs area of
   1428     // the call.
   1429     for (CallSite::arg_iterator I = CS.arg_begin() + FTy->getNumParams(),
   1430            E = CS.arg_end(); I != E; ++I, ++ix) {
   1431       CastInst *CI = dyn_cast<CastInst>(*I);
   1432       if (CI && isSafeToEliminateVarargsCast(CS, DL, CI, ix)) {
   1433         *I = CI->getOperand(0);
   1434         Changed = true;
   1435       }
   1436     }
   1437   }
   1438 
   1439   if (isa<InlineAsm>(Callee) && !CS.doesNotThrow()) {
   1440     // Inline asm calls cannot throw - mark them 'nounwind'.
   1441     CS.setDoesNotThrow();
   1442     Changed = true;
   1443   }
   1444 
   1445   // Try to optimize the call if possible, we require DataLayout for most of
   1446   // this.  None of these calls are seen as possibly dead so go ahead and
   1447   // delete the instruction now.
   1448   if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) {
   1449     Instruction *I = tryOptimizeCall(CI);
   1450     // If we changed something return the result, etc. Otherwise let
   1451     // the fallthrough check.
   1452     if (I) return EraseInstFromFunction(*I);
   1453   }
   1454 
   1455   return Changed ? CS.getInstruction() : nullptr;
   1456 }
   1457 
   1458 // transformConstExprCastCall - If the callee is a constexpr cast of a function,
   1459 // attempt to move the cast to the arguments of the call/invoke.
   1460 //
   1461 bool InstCombiner::transformConstExprCastCall(CallSite CS) {
   1462   Function *Callee =
   1463     dyn_cast<Function>(CS.getCalledValue()->stripPointerCasts());
   1464   if (!Callee)
   1465     return false;
   1466   // The prototype of thunks are a lie, don't try to directly call such
   1467   // functions.
   1468   if (Callee->hasFnAttribute("thunk"))
   1469     return false;
   1470   Instruction *Caller = CS.getInstruction();
   1471   const AttributeSet &CallerPAL = CS.getAttributes();
   1472 
   1473   // Okay, this is a cast from a function to a different type.  Unless doing so
   1474   // would cause a type conversion of one of our arguments, change this call to
   1475   // be a direct call with arguments casted to the appropriate types.
   1476   //
   1477   FunctionType *FT = Callee->getFunctionType();
   1478   Type *OldRetTy = Caller->getType();
   1479   Type *NewRetTy = FT->getReturnType();
   1480 
   1481   // Check to see if we are changing the return type...
   1482   if (OldRetTy != NewRetTy) {
   1483 
   1484     if (NewRetTy->isStructTy())
   1485       return false; // TODO: Handle multiple return values.
   1486 
   1487     if (!CastInst::isBitOrNoopPointerCastable(NewRetTy, OldRetTy, DL)) {
   1488       if (Callee->isDeclaration())
   1489         return false;   // Cannot transform this return value.
   1490 
   1491       if (!Caller->use_empty() &&
   1492           // void -> non-void is handled specially
   1493           !NewRetTy->isVoidTy())
   1494         return false;   // Cannot transform this return value.
   1495     }
   1496 
   1497     if (!CallerPAL.isEmpty() && !Caller->use_empty()) {
   1498       AttrBuilder RAttrs(CallerPAL, AttributeSet::ReturnIndex);
   1499       if (RAttrs.
   1500           hasAttributes(AttributeFuncs::
   1501                         typeIncompatible(NewRetTy, AttributeSet::ReturnIndex),
   1502                         AttributeSet::ReturnIndex))
   1503         return false;   // Attribute not compatible with transformed value.
   1504     }
   1505 
   1506     // If the callsite is an invoke instruction, and the return value is used by
   1507     // a PHI node in a successor, we cannot change the return type of the call
   1508     // because there is no place to put the cast instruction (without breaking
   1509     // the critical edge).  Bail out in this case.
   1510     if (!Caller->use_empty())
   1511       if (InvokeInst *II = dyn_cast<InvokeInst>(Caller))
   1512         for (User *U : II->users())
   1513           if (PHINode *PN = dyn_cast<PHINode>(U))
   1514             if (PN->getParent() == II->getNormalDest() ||
   1515                 PN->getParent() == II->getUnwindDest())
   1516               return false;
   1517   }
   1518 
   1519   unsigned NumActualArgs = CS.arg_size();
   1520   unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs);
   1521 
   1522   // Prevent us turning:
   1523   // declare void @takes_i32_inalloca(i32* inalloca)
   1524   //  call void bitcast (void (i32*)* @takes_i32_inalloca to void (i32)*)(i32 0)
   1525   //
   1526   // into:
   1527   //  call void @takes_i32_inalloca(i32* null)
   1528   //
   1529   //  Similarly, avoid folding away bitcasts of byval calls.
   1530   if (Callee->getAttributes().hasAttrSomewhere(Attribute::InAlloca) ||
   1531       Callee->getAttributes().hasAttrSomewhere(Attribute::ByVal))
   1532     return false;
   1533 
   1534   CallSite::arg_iterator AI = CS.arg_begin();
   1535   for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) {
   1536     Type *ParamTy = FT->getParamType(i);
   1537     Type *ActTy = (*AI)->getType();
   1538 
   1539     if (!CastInst::isBitOrNoopPointerCastable(ActTy, ParamTy, DL))
   1540       return false;   // Cannot transform this parameter value.
   1541 
   1542     if (AttrBuilder(CallerPAL.getParamAttributes(i + 1), i + 1).
   1543           hasAttributes(AttributeFuncs::
   1544                         typeIncompatible(ParamTy, i + 1), i + 1))
   1545       return false;   // Attribute not compatible with transformed value.
   1546 
   1547     if (CS.isInAllocaArgument(i))
   1548       return false;   // Cannot transform to and from inalloca.
   1549 
   1550     // If the parameter is passed as a byval argument, then we have to have a
   1551     // sized type and the sized type has to have the same size as the old type.
   1552     if (ParamTy != ActTy &&
   1553         CallerPAL.getParamAttributes(i + 1).hasAttribute(i + 1,
   1554                                                          Attribute::ByVal)) {
   1555       PointerType *ParamPTy = dyn_cast<PointerType>(ParamTy);
   1556       if (!ParamPTy || !ParamPTy->getElementType()->isSized())
   1557         return false;
   1558 
   1559       Type *CurElTy = ActTy->getPointerElementType();
   1560       if (DL.getTypeAllocSize(CurElTy) !=
   1561           DL.getTypeAllocSize(ParamPTy->getElementType()))
   1562         return false;
   1563     }
   1564   }
   1565 
   1566   if (Callee->isDeclaration()) {
   1567     // Do not delete arguments unless we have a function body.
   1568     if (FT->getNumParams() < NumActualArgs && !FT->isVarArg())
   1569       return false;
   1570 
   1571     // If the callee is just a declaration, don't change the varargsness of the
   1572     // call.  We don't want to introduce a varargs call where one doesn't
   1573     // already exist.
   1574     PointerType *APTy = cast<PointerType>(CS.getCalledValue()->getType());
   1575     if (FT->isVarArg()!=cast<FunctionType>(APTy->getElementType())->isVarArg())
   1576       return false;
   1577 
   1578     // If both the callee and the cast type are varargs, we still have to make
   1579     // sure the number of fixed parameters are the same or we have the same
   1580     // ABI issues as if we introduce a varargs call.
   1581     if (FT->isVarArg() &&
   1582         cast<FunctionType>(APTy->getElementType())->isVarArg() &&
   1583         FT->getNumParams() !=
   1584         cast<FunctionType>(APTy->getElementType())->getNumParams())
   1585       return false;
   1586   }
   1587 
   1588   if (FT->getNumParams() < NumActualArgs && FT->isVarArg() &&
   1589       !CallerPAL.isEmpty())
   1590     // In this case we have more arguments than the new function type, but we
   1591     // won't be dropping them.  Check that these extra arguments have attributes
   1592     // that are compatible with being a vararg call argument.
   1593     for (unsigned i = CallerPAL.getNumSlots(); i; --i) {
   1594       unsigned Index = CallerPAL.getSlotIndex(i - 1);
   1595       if (Index <= FT->getNumParams())
   1596         break;
   1597 
   1598       // Check if it has an attribute that's incompatible with varargs.
   1599       AttributeSet PAttrs = CallerPAL.getSlotAttributes(i - 1);
   1600       if (PAttrs.hasAttribute(Index, Attribute::StructRet))
   1601         return false;
   1602     }
   1603 
   1604 
   1605   // Okay, we decided that this is a safe thing to do: go ahead and start
   1606   // inserting cast instructions as necessary.
   1607   std::vector<Value*> Args;
   1608   Args.reserve(NumActualArgs);
   1609   SmallVector<AttributeSet, 8> attrVec;
   1610   attrVec.reserve(NumCommonArgs);
   1611 
   1612   // Get any return attributes.
   1613   AttrBuilder RAttrs(CallerPAL, AttributeSet::ReturnIndex);
   1614 
   1615   // If the return value is not being used, the type may not be compatible
   1616   // with the existing attributes.  Wipe out any problematic attributes.
   1617   RAttrs.
   1618     removeAttributes(AttributeFuncs::
   1619                      typeIncompatible(NewRetTy, AttributeSet::ReturnIndex),
   1620                      AttributeSet::ReturnIndex);
   1621 
   1622   // Add the new return attributes.
   1623   if (RAttrs.hasAttributes())
   1624     attrVec.push_back(AttributeSet::get(Caller->getContext(),
   1625                                         AttributeSet::ReturnIndex, RAttrs));
   1626 
   1627   AI = CS.arg_begin();
   1628   for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) {
   1629     Type *ParamTy = FT->getParamType(i);
   1630 
   1631     if ((*AI)->getType() == ParamTy) {
   1632       Args.push_back(*AI);
   1633     } else {
   1634       Args.push_back(Builder->CreateBitOrPointerCast(*AI, ParamTy));
   1635     }
   1636 
   1637     // Add any parameter attributes.
   1638     AttrBuilder PAttrs(CallerPAL.getParamAttributes(i + 1), i + 1);
   1639     if (PAttrs.hasAttributes())
   1640       attrVec.push_back(AttributeSet::get(Caller->getContext(), i + 1,
   1641                                           PAttrs));
   1642   }
   1643 
   1644   // If the function takes more arguments than the call was taking, add them
   1645   // now.
   1646   for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i)
   1647     Args.push_back(Constant::getNullValue(FT->getParamType(i)));
   1648 
   1649   // If we are removing arguments to the function, emit an obnoxious warning.
   1650   if (FT->getNumParams() < NumActualArgs) {
   1651     // TODO: if (!FT->isVarArg()) this call may be unreachable. PR14722
   1652     if (FT->isVarArg()) {
   1653       // Add all of the arguments in their promoted form to the arg list.
   1654       for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) {
   1655         Type *PTy = getPromotedType((*AI)->getType());
   1656         if (PTy != (*AI)->getType()) {
   1657           // Must promote to pass through va_arg area!
   1658           Instruction::CastOps opcode =
   1659             CastInst::getCastOpcode(*AI, false, PTy, false);
   1660           Args.push_back(Builder->CreateCast(opcode, *AI, PTy));
   1661         } else {
   1662           Args.push_back(*AI);
   1663         }
   1664 
   1665         // Add any parameter attributes.
   1666         AttrBuilder PAttrs(CallerPAL.getParamAttributes(i + 1), i + 1);
   1667         if (PAttrs.hasAttributes())
   1668           attrVec.push_back(AttributeSet::get(FT->getContext(), i + 1,
   1669                                               PAttrs));
   1670       }
   1671     }
   1672   }
   1673 
   1674   AttributeSet FnAttrs = CallerPAL.getFnAttributes();
   1675   if (CallerPAL.hasAttributes(AttributeSet::FunctionIndex))
   1676     attrVec.push_back(AttributeSet::get(Callee->getContext(), FnAttrs));
   1677 
   1678   if (NewRetTy->isVoidTy())
   1679     Caller->setName("");   // Void type should not have a name.
   1680 
   1681   const AttributeSet &NewCallerPAL = AttributeSet::get(Callee->getContext(),
   1682                                                        attrVec);
   1683 
   1684   Instruction *NC;
   1685   if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
   1686     NC = Builder->CreateInvoke(Callee, II->getNormalDest(),
   1687                                II->getUnwindDest(), Args);
   1688     NC->takeName(II);
   1689     cast<InvokeInst>(NC)->setCallingConv(II->getCallingConv());
   1690     cast<InvokeInst>(NC)->setAttributes(NewCallerPAL);
   1691   } else {
   1692     CallInst *CI = cast<CallInst>(Caller);
   1693     NC = Builder->CreateCall(Callee, Args);
   1694     NC->takeName(CI);
   1695     if (CI->isTailCall())
   1696       cast<CallInst>(NC)->setTailCall();
   1697     cast<CallInst>(NC)->setCallingConv(CI->getCallingConv());
   1698     cast<CallInst>(NC)->setAttributes(NewCallerPAL);
   1699   }
   1700 
   1701   // Insert a cast of the return type as necessary.
   1702   Value *NV = NC;
   1703   if (OldRetTy != NV->getType() && !Caller->use_empty()) {
   1704     if (!NV->getType()->isVoidTy()) {
   1705       NV = NC = CastInst::CreateBitOrPointerCast(NC, OldRetTy);
   1706       NC->setDebugLoc(Caller->getDebugLoc());
   1707 
   1708       // If this is an invoke instruction, we should insert it after the first
   1709       // non-phi, instruction in the normal successor block.
   1710       if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
   1711         BasicBlock::iterator I = II->getNormalDest()->getFirstInsertionPt();
   1712         InsertNewInstBefore(NC, *I);
   1713       } else {
   1714         // Otherwise, it's a call, just insert cast right after the call.
   1715         InsertNewInstBefore(NC, *Caller);
   1716       }
   1717       Worklist.AddUsersToWorkList(*Caller);
   1718     } else {
   1719       NV = UndefValue::get(Caller->getType());
   1720     }
   1721   }
   1722 
   1723   if (!Caller->use_empty())
   1724     ReplaceInstUsesWith(*Caller, NV);
   1725   else if (Caller->hasValueHandle()) {
   1726     if (OldRetTy == NV->getType())
   1727       ValueHandleBase::ValueIsRAUWd(Caller, NV);
   1728     else
   1729       // We cannot call ValueIsRAUWd with a different type, and the
   1730       // actual tracked value will disappear.
   1731       ValueHandleBase::ValueIsDeleted(Caller);
   1732   }
   1733 
   1734   EraseInstFromFunction(*Caller);
   1735   return true;
   1736 }
   1737 
   1738 // transformCallThroughTrampoline - Turn a call to a function created by
   1739 // init_trampoline / adjust_trampoline intrinsic pair into a direct call to the
   1740 // underlying function.
   1741 //
   1742 Instruction *
   1743 InstCombiner::transformCallThroughTrampoline(CallSite CS,
   1744                                              IntrinsicInst *Tramp) {
   1745   Value *Callee = CS.getCalledValue();
   1746   PointerType *PTy = cast<PointerType>(Callee->getType());
   1747   FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
   1748   const AttributeSet &Attrs = CS.getAttributes();
   1749 
   1750   // If the call already has the 'nest' attribute somewhere then give up -
   1751   // otherwise 'nest' would occur twice after splicing in the chain.
   1752   if (Attrs.hasAttrSomewhere(Attribute::Nest))
   1753     return nullptr;
   1754 
   1755   assert(Tramp &&
   1756          "transformCallThroughTrampoline called with incorrect CallSite.");
   1757 
   1758   Function *NestF =cast<Function>(Tramp->getArgOperand(1)->stripPointerCasts());
   1759   PointerType *NestFPTy = cast<PointerType>(NestF->getType());
   1760   FunctionType *NestFTy = cast<FunctionType>(NestFPTy->getElementType());
   1761 
   1762   const AttributeSet &NestAttrs = NestF->getAttributes();
   1763   if (!NestAttrs.isEmpty()) {
   1764     unsigned NestIdx = 1;
   1765     Type *NestTy = nullptr;
   1766     AttributeSet NestAttr;
   1767 
   1768     // Look for a parameter marked with the 'nest' attribute.
   1769     for (FunctionType::param_iterator I = NestFTy->param_begin(),
   1770          E = NestFTy->param_end(); I != E; ++NestIdx, ++I)
   1771       if (NestAttrs.hasAttribute(NestIdx, Attribute::Nest)) {
   1772         // Record the parameter type and any other attributes.
   1773         NestTy = *I;
   1774         NestAttr = NestAttrs.getParamAttributes(NestIdx);
   1775         break;
   1776       }
   1777 
   1778     if (NestTy) {
   1779       Instruction *Caller = CS.getInstruction();
   1780       std::vector<Value*> NewArgs;
   1781       NewArgs.reserve(CS.arg_size() + 1);
   1782 
   1783       SmallVector<AttributeSet, 8> NewAttrs;
   1784       NewAttrs.reserve(Attrs.getNumSlots() + 1);
   1785 
   1786       // Insert the nest argument into the call argument list, which may
   1787       // mean appending it.  Likewise for attributes.
   1788 
   1789       // Add any result attributes.
   1790       if (Attrs.hasAttributes(AttributeSet::ReturnIndex))
   1791         NewAttrs.push_back(AttributeSet::get(Caller->getContext(),
   1792                                              Attrs.getRetAttributes()));
   1793 
   1794       {
   1795         unsigned Idx = 1;
   1796         CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
   1797         do {
   1798           if (Idx == NestIdx) {
   1799             // Add the chain argument and attributes.
   1800             Value *NestVal = Tramp->getArgOperand(2);
   1801             if (NestVal->getType() != NestTy)
   1802               NestVal = Builder->CreateBitCast(NestVal, NestTy, "nest");
   1803             NewArgs.push_back(NestVal);
   1804             NewAttrs.push_back(AttributeSet::get(Caller->getContext(),
   1805                                                  NestAttr));
   1806           }
   1807 
   1808           if (I == E)
   1809             break;
   1810 
   1811           // Add the original argument and attributes.
   1812           NewArgs.push_back(*I);
   1813           AttributeSet Attr = Attrs.getParamAttributes(Idx);
   1814           if (Attr.hasAttributes(Idx)) {
   1815             AttrBuilder B(Attr, Idx);
   1816             NewAttrs.push_back(AttributeSet::get(Caller->getContext(),
   1817                                                  Idx + (Idx >= NestIdx), B));
   1818           }
   1819 
   1820           ++Idx, ++I;
   1821         } while (1);
   1822       }
   1823 
   1824       // Add any function attributes.
   1825       if (Attrs.hasAttributes(AttributeSet::FunctionIndex))
   1826         NewAttrs.push_back(AttributeSet::get(FTy->getContext(),
   1827                                              Attrs.getFnAttributes()));
   1828 
   1829       // The trampoline may have been bitcast to a bogus type (FTy).
   1830       // Handle this by synthesizing a new function type, equal to FTy
   1831       // with the chain parameter inserted.
   1832 
   1833       std::vector<Type*> NewTypes;
   1834       NewTypes.reserve(FTy->getNumParams()+1);
   1835 
   1836       // Insert the chain's type into the list of parameter types, which may
   1837       // mean appending it.
   1838       {
   1839         unsigned Idx = 1;
   1840         FunctionType::param_iterator I = FTy->param_begin(),
   1841           E = FTy->param_end();
   1842 
   1843         do {
   1844           if (Idx == NestIdx)
   1845             // Add the chain's type.
   1846             NewTypes.push_back(NestTy);
   1847 
   1848           if (I == E)
   1849             break;
   1850 
   1851           // Add the original type.
   1852           NewTypes.push_back(*I);
   1853 
   1854           ++Idx, ++I;
   1855         } while (1);
   1856       }
   1857 
   1858       // Replace the trampoline call with a direct call.  Let the generic
   1859       // code sort out any function type mismatches.
   1860       FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(), NewTypes,
   1861                                                 FTy->isVarArg());
   1862       Constant *NewCallee =
   1863         NestF->getType() == PointerType::getUnqual(NewFTy) ?
   1864         NestF : ConstantExpr::getBitCast(NestF,
   1865                                          PointerType::getUnqual(NewFTy));
   1866       const AttributeSet &NewPAL =
   1867           AttributeSet::get(FTy->getContext(), NewAttrs);
   1868 
   1869       Instruction *NewCaller;
   1870       if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
   1871         NewCaller = InvokeInst::Create(NewCallee,
   1872                                        II->getNormalDest(), II->getUnwindDest(),
   1873                                        NewArgs);
   1874         cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv());
   1875         cast<InvokeInst>(NewCaller)->setAttributes(NewPAL);
   1876       } else {
   1877         NewCaller = CallInst::Create(NewCallee, NewArgs);
   1878         if (cast<CallInst>(Caller)->isTailCall())
   1879           cast<CallInst>(NewCaller)->setTailCall();
   1880         cast<CallInst>(NewCaller)->
   1881           setCallingConv(cast<CallInst>(Caller)->getCallingConv());
   1882         cast<CallInst>(NewCaller)->setAttributes(NewPAL);
   1883       }
   1884 
   1885       return NewCaller;
   1886     }
   1887   }
   1888 
   1889   // Replace the trampoline call with a direct call.  Since there is no 'nest'
   1890   // parameter, there is no need to adjust the argument list.  Let the generic
   1891   // code sort out any function type mismatches.
   1892   Constant *NewCallee =
   1893     NestF->getType() == PTy ? NestF :
   1894                               ConstantExpr::getBitCast(NestF, PTy);
   1895   CS.setCalledFunction(NewCallee);
   1896   return CS.getInstruction();
   1897 }
   1898