Home | History | Annotate | Download | only in InstCombine
      1 //===- InstCombineCalls.cpp -----------------------------------------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file implements the visitCall and visitInvoke functions.
     11 //
     12 //===----------------------------------------------------------------------===//
     13 
     14 #include "InstCombine.h"
     15 #include "llvm/Support/CallSite.h"
     16 #include "llvm/Target/TargetData.h"
     17 #include "llvm/Analysis/MemoryBuiltins.h"
     18 #include "llvm/Transforms/Utils/BuildLibCalls.h"
     19 #include "llvm/Transforms/Utils/Local.h"
     20 using namespace llvm;
     21 
     22 /// getPromotedType - Return the specified type promoted as it would be to pass
     23 /// though a va_arg area.
     24 static Type *getPromotedType(Type *Ty) {
     25   if (IntegerType* ITy = dyn_cast<IntegerType>(Ty)) {
     26     if (ITy->getBitWidth() < 32)
     27       return Type::getInt32Ty(Ty->getContext());
     28   }
     29   return Ty;
     30 }
     31 
     32 
     33 Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
     34   unsigned DstAlign = getKnownAlignment(MI->getArgOperand(0), TD);
     35   unsigned SrcAlign = getKnownAlignment(MI->getArgOperand(1), TD);
     36   unsigned MinAlign = std::min(DstAlign, SrcAlign);
     37   unsigned CopyAlign = MI->getAlignment();
     38 
     39   if (CopyAlign < MinAlign) {
     40     MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),
     41                                              MinAlign, false));
     42     return MI;
     43   }
     44 
     45   // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with
     46   // load/store.
     47   ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getArgOperand(2));
     48   if (MemOpLength == 0) return 0;
     49 
     50   // Source and destination pointer types are always "i8*" for intrinsic.  See
     51   // if the size is something we can handle with a single primitive load/store.
     52   // A single load+store correctly handles overlapping memory in the memmove
     53   // case.
     54   unsigned Size = MemOpLength->getZExtValue();
     55   if (Size == 0) return MI;  // Delete this mem transfer.
     56 
     57   if (Size > 8 || (Size&(Size-1)))
     58     return 0;  // If not 1/2/4/8 bytes, exit.
     59 
     60   // Use an integer load+store unless we can find something better.
     61   unsigned SrcAddrSp =
     62     cast<PointerType>(MI->getArgOperand(1)->getType())->getAddressSpace();
     63   unsigned DstAddrSp =
     64     cast<PointerType>(MI->getArgOperand(0)->getType())->getAddressSpace();
     65 
     66   IntegerType* IntType = IntegerType::get(MI->getContext(), Size<<3);
     67   Type *NewSrcPtrTy = PointerType::get(IntType, SrcAddrSp);
     68   Type *NewDstPtrTy = PointerType::get(IntType, DstAddrSp);
     69 
     70   // Memcpy forces the use of i8* for the source and destination.  That means
     71   // that if you're using memcpy to move one double around, you'll get a cast
     72   // from double* to i8*.  We'd much rather use a double load+store rather than
     73   // an i64 load+store, here because this improves the odds that the source or
     74   // dest address will be promotable.  See if we can find a better type than the
     75   // integer datatype.
     76   Value *StrippedDest = MI->getArgOperand(0)->stripPointerCasts();
     77   if (StrippedDest != MI->getArgOperand(0)) {
     78     Type *SrcETy = cast<PointerType>(StrippedDest->getType())
     79                                     ->getElementType();
     80     if (TD && SrcETy->isSized() && TD->getTypeStoreSize(SrcETy) == Size) {
     81       // The SrcETy might be something like {{{double}}} or [1 x double].  Rip
     82       // down through these levels if so.
     83       while (!SrcETy->isSingleValueType()) {
     84         if (StructType *STy = dyn_cast<StructType>(SrcETy)) {
     85           if (STy->getNumElements() == 1)
     86             SrcETy = STy->getElementType(0);
     87           else
     88             break;
     89         } else if (ArrayType *ATy = dyn_cast<ArrayType>(SrcETy)) {
     90           if (ATy->getNumElements() == 1)
     91             SrcETy = ATy->getElementType();
     92           else
     93             break;
     94         } else
     95           break;
     96       }
     97 
     98       if (SrcETy->isSingleValueType()) {
     99         NewSrcPtrTy = PointerType::get(SrcETy, SrcAddrSp);
    100         NewDstPtrTy = PointerType::get(SrcETy, DstAddrSp);
    101       }
    102     }
    103   }
    104 
    105 
    106   // If the memcpy/memmove provides better alignment info than we can
    107   // infer, use it.
    108   SrcAlign = std::max(SrcAlign, CopyAlign);
    109   DstAlign = std::max(DstAlign, CopyAlign);
    110 
    111   Value *Src = Builder->CreateBitCast(MI->getArgOperand(1), NewSrcPtrTy);
    112   Value *Dest = Builder->CreateBitCast(MI->getArgOperand(0), NewDstPtrTy);
    113   LoadInst *L = Builder->CreateLoad(Src, MI->isVolatile());
    114   L->setAlignment(SrcAlign);
    115   StoreInst *S = Builder->CreateStore(L, Dest, MI->isVolatile());
    116   S->setAlignment(DstAlign);
    117 
    118   // Set the size of the copy to 0, it will be deleted on the next iteration.
    119   MI->setArgOperand(2, Constant::getNullValue(MemOpLength->getType()));
    120   return MI;
    121 }
    122 
    123 Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
    124   unsigned Alignment = getKnownAlignment(MI->getDest(), TD);
    125   if (MI->getAlignment() < Alignment) {
    126     MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),
    127                                              Alignment, false));
    128     return MI;
    129   }
    130 
    131   // Extract the length and alignment and fill if they are constant.
    132   ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength());
    133   ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue());
    134   if (!LenC || !FillC || !FillC->getType()->isIntegerTy(8))
    135     return 0;
    136   uint64_t Len = LenC->getZExtValue();
    137   Alignment = MI->getAlignment();
    138 
    139   // If the length is zero, this is a no-op
    140   if (Len == 0) return MI; // memset(d,c,0,a) -> noop
    141 
    142   // memset(s,c,n) -> store s, c (for n=1,2,4,8)
    143   if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) {
    144     Type *ITy = IntegerType::get(MI->getContext(), Len*8);  // n=1 -> i8.
    145 
    146     Value *Dest = MI->getDest();
    147     unsigned DstAddrSp = cast<PointerType>(Dest->getType())->getAddressSpace();
    148     Type *NewDstPtrTy = PointerType::get(ITy, DstAddrSp);
    149     Dest = Builder->CreateBitCast(Dest, NewDstPtrTy);
    150 
    151     // Alignment 0 is identity for alignment 1 for memset, but not store.
    152     if (Alignment == 0) Alignment = 1;
    153 
    154     // Extract the fill value and store.
    155     uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL;
    156     StoreInst *S = Builder->CreateStore(ConstantInt::get(ITy, Fill), Dest,
    157                                         MI->isVolatile());
    158     S->setAlignment(Alignment);
    159 
    160     // Set the size of the copy to 0, it will be deleted on the next iteration.
    161     MI->setLength(Constant::getNullValue(LenC->getType()));
    162     return MI;
    163   }
    164 
    165   return 0;
    166 }
    167 
    168 /// visitCallInst - CallInst simplification.  This mostly only handles folding
    169 /// of intrinsic instructions.  For normal calls, it allows visitCallSite to do
    170 /// the heavy lifting.
    171 ///
    172 Instruction *InstCombiner::visitCallInst(CallInst &CI) {
    173   if (isFreeCall(&CI))
    174     return visitFree(CI);
    175   if (isMalloc(&CI))
    176     return visitMalloc(CI);
    177 
    178   // If the caller function is nounwind, mark the call as nounwind, even if the
    179   // callee isn't.
    180   if (CI.getParent()->getParent()->doesNotThrow() &&
    181       !CI.doesNotThrow()) {
    182     CI.setDoesNotThrow();
    183     return &CI;
    184   }
    185 
    186   IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI);
    187   if (!II) return visitCallSite(&CI);
    188 
    189   // Intrinsics cannot occur in an invoke, so handle them here instead of in
    190   // visitCallSite.
    191   if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(II)) {
    192     bool Changed = false;
    193 
    194     // memmove/cpy/set of zero bytes is a noop.
    195     if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) {
    196       if (NumBytes->isNullValue())
    197         return EraseInstFromFunction(CI);
    198 
    199       if (ConstantInt *CI = dyn_cast<ConstantInt>(NumBytes))
    200         if (CI->getZExtValue() == 1) {
    201           // Replace the instruction with just byte operations.  We would
    202           // transform other cases to loads/stores, but we don't know if
    203           // alignment is sufficient.
    204         }
    205     }
    206 
    207     // No other transformations apply to volatile transfers.
    208     if (MI->isVolatile())
    209       return 0;
    210 
    211     // If we have a memmove and the source operation is a constant global,
    212     // then the source and dest pointers can't alias, so we can change this
    213     // into a call to memcpy.
    214     if (MemMoveInst *MMI = dyn_cast<MemMoveInst>(MI)) {
    215       if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource()))
    216         if (GVSrc->isConstant()) {
    217           Module *M = CI.getParent()->getParent()->getParent();
    218           Intrinsic::ID MemCpyID = Intrinsic::memcpy;
    219           Type *Tys[3] = { CI.getArgOperand(0)->getType(),
    220                            CI.getArgOperand(1)->getType(),
    221                            CI.getArgOperand(2)->getType() };
    222           CI.setCalledFunction(Intrinsic::getDeclaration(M, MemCpyID, Tys));
    223           Changed = true;
    224         }
    225     }
    226 
    227     if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
    228       // memmove(x,x,size) -> noop.
    229       if (MTI->getSource() == MTI->getDest())
    230         return EraseInstFromFunction(CI);
    231     }
    232 
    233     // If we can determine a pointer alignment that is bigger than currently
    234     // set, update the alignment.
    235     if (isa<MemTransferInst>(MI)) {
    236       if (Instruction *I = SimplifyMemTransfer(MI))
    237         return I;
    238     } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(MI)) {
    239       if (Instruction *I = SimplifyMemSet(MSI))
    240         return I;
    241     }
    242 
    243     if (Changed) return II;
    244   }
    245 
    246   switch (II->getIntrinsicID()) {
    247   default: break;
    248   case Intrinsic::objectsize: {
    249     // We need target data for just about everything so depend on it.
    250     if (!TD) break;
    251 
    252     Type *ReturnTy = CI.getType();
    253     uint64_t DontKnow = II->getArgOperand(1) == Builder->getTrue() ? 0 : -1ULL;
    254 
    255     // Get to the real allocated thing and offset as fast as possible.
    256     Value *Op1 = II->getArgOperand(0)->stripPointerCasts();
    257 
    258     uint64_t Offset = 0;
    259     uint64_t Size = -1ULL;
    260 
    261     // Try to look through constant GEPs.
    262     if (GEPOperator *GEP = dyn_cast<GEPOperator>(Op1)) {
    263       if (!GEP->hasAllConstantIndices()) break;
    264 
    265       // Get the current byte offset into the thing. Use the original
    266       // operand in case we're looking through a bitcast.
    267       SmallVector<Value*, 8> Ops(GEP->idx_begin(), GEP->idx_end());
    268       Offset = TD->getIndexedOffset(GEP->getPointerOperandType(), Ops);
    269 
    270       Op1 = GEP->getPointerOperand()->stripPointerCasts();
    271 
    272       // Make sure we're not a constant offset from an external
    273       // global.
    274       if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Op1))
    275         if (!GV->hasDefinitiveInitializer()) break;
    276     }
    277 
    278     // If we've stripped down to a single global variable that we
    279     // can know the size of then just return that.
    280     if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Op1)) {
    281       if (GV->hasDefinitiveInitializer()) {
    282         Constant *C = GV->getInitializer();
    283         Size = TD->getTypeAllocSize(C->getType());
    284       } else {
    285         // Can't determine size of the GV.
    286         Constant *RetVal = ConstantInt::get(ReturnTy, DontKnow);
    287         return ReplaceInstUsesWith(CI, RetVal);
    288       }
    289     } else if (AllocaInst *AI = dyn_cast<AllocaInst>(Op1)) {
    290       // Get alloca size.
    291       if (AI->getAllocatedType()->isSized()) {
    292         Size = TD->getTypeAllocSize(AI->getAllocatedType());
    293         if (AI->isArrayAllocation()) {
    294           const ConstantInt *C = dyn_cast<ConstantInt>(AI->getArraySize());
    295           if (!C) break;
    296           Size *= C->getZExtValue();
    297         }
    298       }
    299     } else if (CallInst *MI = extractMallocCall(Op1)) {
    300       // Get allocation size.
    301       Type* MallocType = getMallocAllocatedType(MI);
    302       if (MallocType && MallocType->isSized())
    303         if (Value *NElems = getMallocArraySize(MI, TD, true))
    304           if (ConstantInt *NElements = dyn_cast<ConstantInt>(NElems))
    305             Size = NElements->getZExtValue() * TD->getTypeAllocSize(MallocType);
    306     }
    307 
    308     // Do not return "I don't know" here. Later optimization passes could
    309     // make it possible to evaluate objectsize to a constant.
    310     if (Size == -1ULL)
    311       break;
    312 
    313     if (Size < Offset) {
    314       // Out of bound reference? Negative index normalized to large
    315       // index? Just return "I don't know".
    316       return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, DontKnow));
    317     }
    318     return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, Size-Offset));
    319   }
    320   case Intrinsic::bswap:
    321     // bswap(bswap(x)) -> x
    322     if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(II->getArgOperand(0)))
    323       if (Operand->getIntrinsicID() == Intrinsic::bswap)
    324         return ReplaceInstUsesWith(CI, Operand->getArgOperand(0));
    325 
    326     // bswap(trunc(bswap(x))) -> trunc(lshr(x, c))
    327     if (TruncInst *TI = dyn_cast<TruncInst>(II->getArgOperand(0))) {
    328       if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(TI->getOperand(0)))
    329         if (Operand->getIntrinsicID() == Intrinsic::bswap) {
    330           unsigned C = Operand->getType()->getPrimitiveSizeInBits() -
    331                        TI->getType()->getPrimitiveSizeInBits();
    332           Value *CV = ConstantInt::get(Operand->getType(), C);
    333           Value *V = Builder->CreateLShr(Operand->getArgOperand(0), CV);
    334           return new TruncInst(V, TI->getType());
    335         }
    336     }
    337 
    338     break;
    339   case Intrinsic::powi:
    340     if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
    341       // powi(x, 0) -> 1.0
    342       if (Power->isZero())
    343         return ReplaceInstUsesWith(CI, ConstantFP::get(CI.getType(), 1.0));
    344       // powi(x, 1) -> x
    345       if (Power->isOne())
    346         return ReplaceInstUsesWith(CI, II->getArgOperand(0));
    347       // powi(x, -1) -> 1/x
    348       if (Power->isAllOnesValue())
    349         return BinaryOperator::CreateFDiv(ConstantFP::get(CI.getType(), 1.0),
    350                                           II->getArgOperand(0));
    351     }
    352     break;
    353   case Intrinsic::cttz: {
    354     // If all bits below the first known one are known zero,
    355     // this value is constant.
    356     IntegerType *IT = dyn_cast<IntegerType>(II->getArgOperand(0)->getType());
    357     // FIXME: Try to simplify vectors of integers.
    358     if (!IT) break;
    359     uint32_t BitWidth = IT->getBitWidth();
    360     APInt KnownZero(BitWidth, 0);
    361     APInt KnownOne(BitWidth, 0);
    362     ComputeMaskedBits(II->getArgOperand(0), APInt::getAllOnesValue(BitWidth),
    363                       KnownZero, KnownOne);
    364     unsigned TrailingZeros = KnownOne.countTrailingZeros();
    365     APInt Mask(APInt::getLowBitsSet(BitWidth, TrailingZeros));
    366     if ((Mask & KnownZero) == Mask)
    367       return ReplaceInstUsesWith(CI, ConstantInt::get(IT,
    368                                  APInt(BitWidth, TrailingZeros)));
    369 
    370     }
    371     break;
    372   case Intrinsic::ctlz: {
    373     // If all bits above the first known one are known zero,
    374     // this value is constant.
    375     IntegerType *IT = dyn_cast<IntegerType>(II->getArgOperand(0)->getType());
    376     // FIXME: Try to simplify vectors of integers.
    377     if (!IT) break;
    378     uint32_t BitWidth = IT->getBitWidth();
    379     APInt KnownZero(BitWidth, 0);
    380     APInt KnownOne(BitWidth, 0);
    381     ComputeMaskedBits(II->getArgOperand(0), APInt::getAllOnesValue(BitWidth),
    382                       KnownZero, KnownOne);
    383     unsigned LeadingZeros = KnownOne.countLeadingZeros();
    384     APInt Mask(APInt::getHighBitsSet(BitWidth, LeadingZeros));
    385     if ((Mask & KnownZero) == Mask)
    386       return ReplaceInstUsesWith(CI, ConstantInt::get(IT,
    387                                  APInt(BitWidth, LeadingZeros)));
    388 
    389     }
    390     break;
    391   case Intrinsic::uadd_with_overflow: {
    392     Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
    393     IntegerType *IT = cast<IntegerType>(II->getArgOperand(0)->getType());
    394     uint32_t BitWidth = IT->getBitWidth();
    395     APInt Mask = APInt::getSignBit(BitWidth);
    396     APInt LHSKnownZero(BitWidth, 0);
    397     APInt LHSKnownOne(BitWidth, 0);
    398     ComputeMaskedBits(LHS, Mask, LHSKnownZero, LHSKnownOne);
    399     bool LHSKnownNegative = LHSKnownOne[BitWidth - 1];
    400     bool LHSKnownPositive = LHSKnownZero[BitWidth - 1];
    401 
    402     if (LHSKnownNegative || LHSKnownPositive) {
    403       APInt RHSKnownZero(BitWidth, 0);
    404       APInt RHSKnownOne(BitWidth, 0);
    405       ComputeMaskedBits(RHS, Mask, RHSKnownZero, RHSKnownOne);
    406       bool RHSKnownNegative = RHSKnownOne[BitWidth - 1];
    407       bool RHSKnownPositive = RHSKnownZero[BitWidth - 1];
    408       if (LHSKnownNegative && RHSKnownNegative) {
    409         // The sign bit is set in both cases: this MUST overflow.
    410         // Create a simple add instruction, and insert it into the struct.
    411         Value *Add = Builder->CreateAdd(LHS, RHS);
    412         Add->takeName(&CI);
    413         Constant *V[] = {
    414           UndefValue::get(LHS->getType()),
    415           ConstantInt::getTrue(II->getContext())
    416         };
    417         StructType *ST = cast<StructType>(II->getType());
    418         Constant *Struct = ConstantStruct::get(ST, V);
    419         return InsertValueInst::Create(Struct, Add, 0);
    420       }
    421 
    422       if (LHSKnownPositive && RHSKnownPositive) {
    423         // The sign bit is clear in both cases: this CANNOT overflow.
    424         // Create a simple add instruction, and insert it into the struct.
    425         Value *Add = Builder->CreateNUWAdd(LHS, RHS);
    426         Add->takeName(&CI);
    427         Constant *V[] = {
    428           UndefValue::get(LHS->getType()),
    429           ConstantInt::getFalse(II->getContext())
    430         };
    431         StructType *ST = cast<StructType>(II->getType());
    432         Constant *Struct = ConstantStruct::get(ST, V);
    433         return InsertValueInst::Create(Struct, Add, 0);
    434       }
    435     }
    436   }
    437   // FALL THROUGH uadd into sadd
    438   case Intrinsic::sadd_with_overflow:
    439     // Canonicalize constants into the RHS.
    440     if (isa<Constant>(II->getArgOperand(0)) &&
    441         !isa<Constant>(II->getArgOperand(1))) {
    442       Value *LHS = II->getArgOperand(0);
    443       II->setArgOperand(0, II->getArgOperand(1));
    444       II->setArgOperand(1, LHS);
    445       return II;
    446     }
    447 
    448     // X + undef -> undef
    449     if (isa<UndefValue>(II->getArgOperand(1)))
    450       return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
    451 
    452     if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
    453       // X + 0 -> {X, false}
    454       if (RHS->isZero()) {
    455         Constant *V[] = {
    456           UndefValue::get(II->getArgOperand(0)->getType()),
    457           ConstantInt::getFalse(II->getContext())
    458         };
    459         Constant *Struct =
    460           ConstantStruct::get(cast<StructType>(II->getType()), V);
    461         return InsertValueInst::Create(Struct, II->getArgOperand(0), 0);
    462       }
    463     }
    464     break;
    465   case Intrinsic::usub_with_overflow:
    466   case Intrinsic::ssub_with_overflow:
    467     // undef - X -> undef
    468     // X - undef -> undef
    469     if (isa<UndefValue>(II->getArgOperand(0)) ||
    470         isa<UndefValue>(II->getArgOperand(1)))
    471       return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
    472 
    473     if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
    474       // X - 0 -> {X, false}
    475       if (RHS->isZero()) {
    476         Constant *V[] = {
    477           UndefValue::get(II->getArgOperand(0)->getType()),
    478           ConstantInt::getFalse(II->getContext())
    479         };
    480         Constant *Struct =
    481           ConstantStruct::get(cast<StructType>(II->getType()), V);
    482         return InsertValueInst::Create(Struct, II->getArgOperand(0), 0);
    483       }
    484     }
    485     break;
    486   case Intrinsic::umul_with_overflow: {
    487     Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
    488     unsigned BitWidth = cast<IntegerType>(LHS->getType())->getBitWidth();
    489     APInt Mask = APInt::getAllOnesValue(BitWidth);
    490 
    491     APInt LHSKnownZero(BitWidth, 0);
    492     APInt LHSKnownOne(BitWidth, 0);
    493     ComputeMaskedBits(LHS, Mask, LHSKnownZero, LHSKnownOne);
    494     APInt RHSKnownZero(BitWidth, 0);
    495     APInt RHSKnownOne(BitWidth, 0);
    496     ComputeMaskedBits(RHS, Mask, RHSKnownZero, RHSKnownOne);
    497 
    498     // Get the largest possible values for each operand.
    499     APInt LHSMax = ~LHSKnownZero;
    500     APInt RHSMax = ~RHSKnownZero;
    501 
    502     // If multiplying the maximum values does not overflow then we can turn
    503     // this into a plain NUW mul.
    504     bool Overflow;
    505     LHSMax.umul_ov(RHSMax, Overflow);
    506     if (!Overflow) {
    507       Value *Mul = Builder->CreateNUWMul(LHS, RHS, "umul_with_overflow");
    508       Constant *V[] = {
    509         UndefValue::get(LHS->getType()),
    510         Builder->getFalse()
    511       };
    512       Constant *Struct = ConstantStruct::get(cast<StructType>(II->getType()),V);
    513       return InsertValueInst::Create(Struct, Mul, 0);
    514     }
    515   } // FALL THROUGH
    516   case Intrinsic::smul_with_overflow:
    517     // Canonicalize constants into the RHS.
    518     if (isa<Constant>(II->getArgOperand(0)) &&
    519         !isa<Constant>(II->getArgOperand(1))) {
    520       Value *LHS = II->getArgOperand(0);
    521       II->setArgOperand(0, II->getArgOperand(1));
    522       II->setArgOperand(1, LHS);
    523       return II;
    524     }
    525 
    526     // X * undef -> undef
    527     if (isa<UndefValue>(II->getArgOperand(1)))
    528       return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
    529 
    530     if (ConstantInt *RHSI = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
    531       // X*0 -> {0, false}
    532       if (RHSI->isZero())
    533         return ReplaceInstUsesWith(CI, Constant::getNullValue(II->getType()));
    534 
    535       // X * 1 -> {X, false}
    536       if (RHSI->equalsInt(1)) {
    537         Constant *V[] = {
    538           UndefValue::get(II->getArgOperand(0)->getType()),
    539           ConstantInt::getFalse(II->getContext())
    540         };
    541         Constant *Struct =
    542           ConstantStruct::get(cast<StructType>(II->getType()), V);
    543         return InsertValueInst::Create(Struct, II->getArgOperand(0), 0);
    544       }
    545     }
    546     break;
    547   case Intrinsic::ppc_altivec_lvx:
    548   case Intrinsic::ppc_altivec_lvxl:
    549     // Turn PPC lvx -> load if the pointer is known aligned.
    550     if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, TD) >= 16) {
    551       Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
    552                                          PointerType::getUnqual(II->getType()));
    553       return new LoadInst(Ptr);
    554     }
    555     break;
    556   case Intrinsic::ppc_altivec_stvx:
    557   case Intrinsic::ppc_altivec_stvxl:
    558     // Turn stvx -> store if the pointer is known aligned.
    559     if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, TD) >= 16) {
    560       Type *OpPtrTy =
    561         PointerType::getUnqual(II->getArgOperand(0)->getType());
    562       Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy);
    563       return new StoreInst(II->getArgOperand(0), Ptr);
    564     }
    565     break;
    566   case Intrinsic::x86_sse_storeu_ps:
    567   case Intrinsic::x86_sse2_storeu_pd:
    568   case Intrinsic::x86_sse2_storeu_dq:
    569     // Turn X86 storeu -> store if the pointer is known aligned.
    570     if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, TD) >= 16) {
    571       Type *OpPtrTy =
    572         PointerType::getUnqual(II->getArgOperand(1)->getType());
    573       Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0), OpPtrTy);
    574       return new StoreInst(II->getArgOperand(1), Ptr);
    575     }
    576     break;
    577 
    578   case Intrinsic::x86_sse_cvtss2si:
    579   case Intrinsic::x86_sse_cvtss2si64:
    580   case Intrinsic::x86_sse_cvttss2si:
    581   case Intrinsic::x86_sse_cvttss2si64:
    582   case Intrinsic::x86_sse2_cvtsd2si:
    583   case Intrinsic::x86_sse2_cvtsd2si64:
    584   case Intrinsic::x86_sse2_cvttsd2si:
    585   case Intrinsic::x86_sse2_cvttsd2si64: {
    586     // These intrinsics only demand the 0th element of their input vectors. If
    587     // we can simplify the input based on that, do so now.
    588     unsigned VWidth =
    589       cast<VectorType>(II->getArgOperand(0)->getType())->getNumElements();
    590     APInt DemandedElts(VWidth, 1);
    591     APInt UndefElts(VWidth, 0);
    592     if (Value *V = SimplifyDemandedVectorElts(II->getArgOperand(0),
    593                                               DemandedElts, UndefElts)) {
    594       II->setArgOperand(0, V);
    595       return II;
    596     }
    597     break;
    598   }
    599 
    600 
    601   case Intrinsic::x86_sse41_pmovsxbw:
    602   case Intrinsic::x86_sse41_pmovsxwd:
    603   case Intrinsic::x86_sse41_pmovsxdq:
    604   case Intrinsic::x86_sse41_pmovzxbw:
    605   case Intrinsic::x86_sse41_pmovzxwd:
    606   case Intrinsic::x86_sse41_pmovzxdq: {
    607     // pmov{s|z}x ignores the upper half of their input vectors.
    608     unsigned VWidth =
    609       cast<VectorType>(II->getArgOperand(0)->getType())->getNumElements();
    610     unsigned LowHalfElts = VWidth / 2;
    611     APInt InputDemandedElts(APInt::getBitsSet(VWidth, 0, LowHalfElts));
    612     APInt UndefElts(VWidth, 0);
    613     if (Value *TmpV = SimplifyDemandedVectorElts(II->getArgOperand(0),
    614                                                  InputDemandedElts,
    615                                                  UndefElts)) {
    616       II->setArgOperand(0, TmpV);
    617       return II;
    618     }
    619     break;
    620   }
    621 
    622   case Intrinsic::ppc_altivec_vperm:
    623     // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant.
    624     if (ConstantVector *Mask = dyn_cast<ConstantVector>(II->getArgOperand(2))) {
    625       assert(Mask->getNumOperands() == 16 && "Bad type for intrinsic!");
    626 
    627       // Check that all of the elements are integer constants or undefs.
    628       bool AllEltsOk = true;
    629       for (unsigned i = 0; i != 16; ++i) {
    630         if (!isa<ConstantInt>(Mask->getOperand(i)) &&
    631             !isa<UndefValue>(Mask->getOperand(i))) {
    632           AllEltsOk = false;
    633           break;
    634         }
    635       }
    636 
    637       if (AllEltsOk) {
    638         // Cast the input vectors to byte vectors.
    639         Value *Op0 = Builder->CreateBitCast(II->getArgOperand(0),
    640                                             Mask->getType());
    641         Value *Op1 = Builder->CreateBitCast(II->getArgOperand(1),
    642                                             Mask->getType());
    643         Value *Result = UndefValue::get(Op0->getType());
    644 
    645         // Only extract each element once.
    646         Value *ExtractedElts[32];
    647         memset(ExtractedElts, 0, sizeof(ExtractedElts));
    648 
    649         for (unsigned i = 0; i != 16; ++i) {
    650           if (isa<UndefValue>(Mask->getOperand(i)))
    651             continue;
    652           unsigned Idx=cast<ConstantInt>(Mask->getOperand(i))->getZExtValue();
    653           Idx &= 31;  // Match the hardware behavior.
    654 
    655           if (ExtractedElts[Idx] == 0) {
    656             ExtractedElts[Idx] =
    657               Builder->CreateExtractElement(Idx < 16 ? Op0 : Op1,
    658                                             Builder->getInt32(Idx&15));
    659           }
    660 
    661           // Insert this value into the result vector.
    662           Result = Builder->CreateInsertElement(Result, ExtractedElts[Idx],
    663                                                 Builder->getInt32(i));
    664         }
    665         return CastInst::Create(Instruction::BitCast, Result, CI.getType());
    666       }
    667     }
    668     break;
    669 
    670   case Intrinsic::arm_neon_vld1:
    671   case Intrinsic::arm_neon_vld2:
    672   case Intrinsic::arm_neon_vld3:
    673   case Intrinsic::arm_neon_vld4:
    674   case Intrinsic::arm_neon_vld2lane:
    675   case Intrinsic::arm_neon_vld3lane:
    676   case Intrinsic::arm_neon_vld4lane:
    677   case Intrinsic::arm_neon_vst1:
    678   case Intrinsic::arm_neon_vst2:
    679   case Intrinsic::arm_neon_vst3:
    680   case Intrinsic::arm_neon_vst4:
    681   case Intrinsic::arm_neon_vst2lane:
    682   case Intrinsic::arm_neon_vst3lane:
    683   case Intrinsic::arm_neon_vst4lane: {
    684     unsigned MemAlign = getKnownAlignment(II->getArgOperand(0), TD);
    685     unsigned AlignArg = II->getNumArgOperands() - 1;
    686     ConstantInt *IntrAlign = dyn_cast<ConstantInt>(II->getArgOperand(AlignArg));
    687     if (IntrAlign && IntrAlign->getZExtValue() < MemAlign) {
    688       II->setArgOperand(AlignArg,
    689                         ConstantInt::get(Type::getInt32Ty(II->getContext()),
    690                                          MemAlign, false));
    691       return II;
    692     }
    693     break;
    694   }
    695 
    696   case Intrinsic::stackrestore: {
    697     // If the save is right next to the restore, remove the restore.  This can
    698     // happen when variable allocas are DCE'd.
    699     if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) {
    700       if (SS->getIntrinsicID() == Intrinsic::stacksave) {
    701         BasicBlock::iterator BI = SS;
    702         if (&*++BI == II)
    703           return EraseInstFromFunction(CI);
    704       }
    705     }
    706 
    707     // Scan down this block to see if there is another stack restore in the
    708     // same block without an intervening call/alloca.
    709     BasicBlock::iterator BI = II;
    710     TerminatorInst *TI = II->getParent()->getTerminator();
    711     bool CannotRemove = false;
    712     for (++BI; &*BI != TI; ++BI) {
    713       if (isa<AllocaInst>(BI) || isMalloc(BI)) {
    714         CannotRemove = true;
    715         break;
    716       }
    717       if (CallInst *BCI = dyn_cast<CallInst>(BI)) {
    718         if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(BCI)) {
    719           // If there is a stackrestore below this one, remove this one.
    720           if (II->getIntrinsicID() == Intrinsic::stackrestore)
    721             return EraseInstFromFunction(CI);
    722           // Otherwise, ignore the intrinsic.
    723         } else {
    724           // If we found a non-intrinsic call, we can't remove the stack
    725           // restore.
    726           CannotRemove = true;
    727           break;
    728         }
    729       }
    730     }
    731 
    732     // If the stack restore is in a return, resume, or unwind block and if there
    733     // are no allocas or calls between the restore and the return, nuke the
    734     // restore.
    735     if (!CannotRemove && (isa<ReturnInst>(TI) || isa<ResumeInst>(TI) ||
    736                           isa<UnwindInst>(TI)))
    737       return EraseInstFromFunction(CI);
    738     break;
    739   }
    740   }
    741 
    742   return visitCallSite(II);
    743 }
    744 
    745 // InvokeInst simplification
    746 //
    747 Instruction *InstCombiner::visitInvokeInst(InvokeInst &II) {
    748   return visitCallSite(&II);
    749 }
    750 
    751 /// isSafeToEliminateVarargsCast - If this cast does not affect the value
    752 /// passed through the varargs area, we can eliminate the use of the cast.
    753 static bool isSafeToEliminateVarargsCast(const CallSite CS,
    754                                          const CastInst * const CI,
    755                                          const TargetData * const TD,
    756                                          const int ix) {
    757   if (!CI->isLosslessCast())
    758     return false;
    759 
    760   // The size of ByVal arguments is derived from the type, so we
    761   // can't change to a type with a different size.  If the size were
    762   // passed explicitly we could avoid this check.
    763   if (!CS.paramHasAttr(ix, Attribute::ByVal))
    764     return true;
    765 
    766   Type* SrcTy =
    767             cast<PointerType>(CI->getOperand(0)->getType())->getElementType();
    768   Type* DstTy = cast<PointerType>(CI->getType())->getElementType();
    769   if (!SrcTy->isSized() || !DstTy->isSized())
    770     return false;
    771   if (!TD || TD->getTypeAllocSize(SrcTy) != TD->getTypeAllocSize(DstTy))
    772     return false;
    773   return true;
    774 }
    775 
    776 namespace {
    777 class InstCombineFortifiedLibCalls : public SimplifyFortifiedLibCalls {
    778   InstCombiner *IC;
    779 protected:
    780   void replaceCall(Value *With) {
    781     NewInstruction = IC->ReplaceInstUsesWith(*CI, With);
    782   }
    783   bool isFoldable(unsigned SizeCIOp, unsigned SizeArgOp, bool isString) const {
    784     if (CI->getArgOperand(SizeCIOp) == CI->getArgOperand(SizeArgOp))
    785       return true;
    786     if (ConstantInt *SizeCI =
    787                            dyn_cast<ConstantInt>(CI->getArgOperand(SizeCIOp))) {
    788       if (SizeCI->isAllOnesValue())
    789         return true;
    790       if (isString) {
    791         uint64_t Len = GetStringLength(CI->getArgOperand(SizeArgOp));
    792         // If the length is 0 we don't know how long it is and so we can't
    793         // remove the check.
    794         if (Len == 0) return false;
    795         return SizeCI->getZExtValue() >= Len;
    796       }
    797       if (ConstantInt *Arg = dyn_cast<ConstantInt>(
    798                                                   CI->getArgOperand(SizeArgOp)))
    799         return SizeCI->getZExtValue() >= Arg->getZExtValue();
    800     }
    801     return false;
    802   }
    803 public:
    804   InstCombineFortifiedLibCalls(InstCombiner *IC) : IC(IC), NewInstruction(0) { }
    805   Instruction *NewInstruction;
    806 };
    807 } // end anonymous namespace
    808 
    809 // Try to fold some different type of calls here.
    810 // Currently we're only working with the checking functions, memcpy_chk,
    811 // mempcpy_chk, memmove_chk, memset_chk, strcpy_chk, stpcpy_chk, strncpy_chk,
    812 // strcat_chk and strncat_chk.
    813 Instruction *InstCombiner::tryOptimizeCall(CallInst *CI, const TargetData *TD) {
    814   if (CI->getCalledFunction() == 0) return 0;
    815 
    816   InstCombineFortifiedLibCalls Simplifier(this);
    817   Simplifier.fold(CI, TD);
    818   return Simplifier.NewInstruction;
    819 }
    820 
    821 static IntrinsicInst *FindInitTrampolineFromAlloca(Value *TrampMem) {
    822   // Strip off at most one level of pointer casts, looking for an alloca.  This
    823   // is good enough in practice and simpler than handling any number of casts.
    824   Value *Underlying = TrampMem->stripPointerCasts();
    825   if (Underlying != TrampMem &&
    826       (!Underlying->hasOneUse() || *Underlying->use_begin() != TrampMem))
    827     return 0;
    828   if (!isa<AllocaInst>(Underlying))
    829     return 0;
    830 
    831   IntrinsicInst *InitTrampoline = 0;
    832   for (Value::use_iterator I = TrampMem->use_begin(), E = TrampMem->use_end();
    833        I != E; I++) {
    834     IntrinsicInst *II = dyn_cast<IntrinsicInst>(*I);
    835     if (!II)
    836       return 0;
    837     if (II->getIntrinsicID() == Intrinsic::init_trampoline) {
    838       if (InitTrampoline)
    839         // More than one init_trampoline writes to this value.  Give up.
    840         return 0;
    841       InitTrampoline = II;
    842       continue;
    843     }
    844     if (II->getIntrinsicID() == Intrinsic::adjust_trampoline)
    845       // Allow any number of calls to adjust.trampoline.
    846       continue;
    847     return 0;
    848   }
    849 
    850   // No call to init.trampoline found.
    851   if (!InitTrampoline)
    852     return 0;
    853 
    854   // Check that the alloca is being used in the expected way.
    855   if (InitTrampoline->getOperand(0) != TrampMem)
    856     return 0;
    857 
    858   return InitTrampoline;
    859 }
    860 
    861 static IntrinsicInst *FindInitTrampolineFromBB(IntrinsicInst *AdjustTramp,
    862                                                Value *TrampMem) {
    863   // Visit all the previous instructions in the basic block, and try to find a
    864   // init.trampoline which has a direct path to the adjust.trampoline.
    865   for (BasicBlock::iterator I = AdjustTramp,
    866        E = AdjustTramp->getParent()->begin(); I != E; ) {
    867     Instruction *Inst = --I;
    868     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
    869       if (II->getIntrinsicID() == Intrinsic::init_trampoline &&
    870           II->getOperand(0) == TrampMem)
    871         return II;
    872     if (Inst->mayWriteToMemory())
    873       return 0;
    874   }
    875   return 0;
    876 }
    877 
    878 // Given a call to llvm.adjust.trampoline, find and return the corresponding
    879 // call to llvm.init.trampoline if the call to the trampoline can be optimized
    880 // to a direct call to a function.  Otherwise return NULL.
    881 //
    882 static IntrinsicInst *FindInitTrampoline(Value *Callee) {
    883   Callee = Callee->stripPointerCasts();
    884   IntrinsicInst *AdjustTramp = dyn_cast<IntrinsicInst>(Callee);
    885   if (!AdjustTramp ||
    886       AdjustTramp->getIntrinsicID() != Intrinsic::adjust_trampoline)
    887     return 0;
    888 
    889   Value *TrampMem = AdjustTramp->getOperand(0);
    890 
    891   if (IntrinsicInst *IT = FindInitTrampolineFromAlloca(TrampMem))
    892     return IT;
    893   if (IntrinsicInst *IT = FindInitTrampolineFromBB(AdjustTramp, TrampMem))
    894     return IT;
    895   return 0;
    896 }
    897 
    898 // visitCallSite - Improvements for call and invoke instructions.
    899 //
    900 Instruction *InstCombiner::visitCallSite(CallSite CS) {
    901   bool Changed = false;
    902 
    903   // If the callee is a pointer to a function, attempt to move any casts to the
    904   // arguments of the call/invoke.
    905   Value *Callee = CS.getCalledValue();
    906   if (!isa<Function>(Callee) && transformConstExprCastCall(CS))
    907     return 0;
    908 
    909   if (Function *CalleeF = dyn_cast<Function>(Callee))
    910     // If the call and callee calling conventions don't match, this call must
    911     // be unreachable, as the call is undefined.
    912     if (CalleeF->getCallingConv() != CS.getCallingConv() &&
    913         // Only do this for calls to a function with a body.  A prototype may
    914         // not actually end up matching the implementation's calling conv for a
    915         // variety of reasons (e.g. it may be written in assembly).
    916         !CalleeF->isDeclaration()) {
    917       Instruction *OldCall = CS.getInstruction();
    918       new StoreInst(ConstantInt::getTrue(Callee->getContext()),
    919                 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
    920                                   OldCall);
    921       // If OldCall dues not return void then replaceAllUsesWith undef.
    922       // This allows ValueHandlers and custom metadata to adjust itself.
    923       if (!OldCall->getType()->isVoidTy())
    924         ReplaceInstUsesWith(*OldCall, UndefValue::get(OldCall->getType()));
    925       if (isa<CallInst>(OldCall))
    926         return EraseInstFromFunction(*OldCall);
    927 
    928       // We cannot remove an invoke, because it would change the CFG, just
    929       // change the callee to a null pointer.
    930       cast<InvokeInst>(OldCall)->setCalledFunction(
    931                                     Constant::getNullValue(CalleeF->getType()));
    932       return 0;
    933     }
    934 
    935   if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) {
    936     // This instruction is not reachable, just remove it.  We insert a store to
    937     // undef so that we know that this code is not reachable, despite the fact
    938     // that we can't modify the CFG here.
    939     new StoreInst(ConstantInt::getTrue(Callee->getContext()),
    940                UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
    941                   CS.getInstruction());
    942 
    943     // If CS does not return void then replaceAllUsesWith undef.
    944     // This allows ValueHandlers and custom metadata to adjust itself.
    945     if (!CS.getInstruction()->getType()->isVoidTy())
    946       ReplaceInstUsesWith(*CS.getInstruction(),
    947                           UndefValue::get(CS.getInstruction()->getType()));
    948 
    949     if (InvokeInst *II = dyn_cast<InvokeInst>(CS.getInstruction())) {
    950       // Don't break the CFG, insert a dummy cond branch.
    951       BranchInst::Create(II->getNormalDest(), II->getUnwindDest(),
    952                          ConstantInt::getTrue(Callee->getContext()), II);
    953     }
    954     return EraseInstFromFunction(*CS.getInstruction());
    955   }
    956 
    957   if (IntrinsicInst *II = FindInitTrampoline(Callee))
    958     return transformCallThroughTrampoline(CS, II);
    959 
    960   PointerType *PTy = cast<PointerType>(Callee->getType());
    961   FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
    962   if (FTy->isVarArg()) {
    963     int ix = FTy->getNumParams() + (isa<InvokeInst>(Callee) ? 3 : 1);
    964     // See if we can optimize any arguments passed through the varargs area of
    965     // the call.
    966     for (CallSite::arg_iterator I = CS.arg_begin()+FTy->getNumParams(),
    967            E = CS.arg_end(); I != E; ++I, ++ix) {
    968       CastInst *CI = dyn_cast<CastInst>(*I);
    969       if (CI && isSafeToEliminateVarargsCast(CS, CI, TD, ix)) {
    970         *I = CI->getOperand(0);
    971         Changed = true;
    972       }
    973     }
    974   }
    975 
    976   if (isa<InlineAsm>(Callee) && !CS.doesNotThrow()) {
    977     // Inline asm calls cannot throw - mark them 'nounwind'.
    978     CS.setDoesNotThrow();
    979     Changed = true;
    980   }
    981 
    982   // Try to optimize the call if possible, we require TargetData for most of
    983   // this.  None of these calls are seen as possibly dead so go ahead and
    984   // delete the instruction now.
    985   if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) {
    986     Instruction *I = tryOptimizeCall(CI, TD);
    987     // If we changed something return the result, etc. Otherwise let
    988     // the fallthrough check.
    989     if (I) return EraseInstFromFunction(*I);
    990   }
    991 
    992   return Changed ? CS.getInstruction() : 0;
    993 }
    994 
    995 // transformConstExprCastCall - If the callee is a constexpr cast of a function,
    996 // attempt to move the cast to the arguments of the call/invoke.
    997 //
    998 bool InstCombiner::transformConstExprCastCall(CallSite CS) {
    999   Function *Callee =
   1000     dyn_cast<Function>(CS.getCalledValue()->stripPointerCasts());
   1001   if (Callee == 0)
   1002     return false;
   1003   Instruction *Caller = CS.getInstruction();
   1004   const AttrListPtr &CallerPAL = CS.getAttributes();
   1005 
   1006   // Okay, this is a cast from a function to a different type.  Unless doing so
   1007   // would cause a type conversion of one of our arguments, change this call to
   1008   // be a direct call with arguments casted to the appropriate types.
   1009   //
   1010   FunctionType *FT = Callee->getFunctionType();
   1011   Type *OldRetTy = Caller->getType();
   1012   Type *NewRetTy = FT->getReturnType();
   1013 
   1014   if (NewRetTy->isStructTy())
   1015     return false; // TODO: Handle multiple return values.
   1016 
   1017   // Check to see if we are changing the return type...
   1018   if (OldRetTy != NewRetTy) {
   1019     if (Callee->isDeclaration() &&
   1020         // Conversion is ok if changing from one pointer type to another or from
   1021         // a pointer to an integer of the same size.
   1022         !((OldRetTy->isPointerTy() || !TD ||
   1023            OldRetTy == TD->getIntPtrType(Caller->getContext())) &&
   1024           (NewRetTy->isPointerTy() || !TD ||
   1025            NewRetTy == TD->getIntPtrType(Caller->getContext()))))
   1026       return false;   // Cannot transform this return value.
   1027 
   1028     if (!Caller->use_empty() &&
   1029         // void -> non-void is handled specially
   1030         !NewRetTy->isVoidTy() && !CastInst::isCastable(NewRetTy, OldRetTy))
   1031       return false;   // Cannot transform this return value.
   1032 
   1033     if (!CallerPAL.isEmpty() && !Caller->use_empty()) {
   1034       Attributes RAttrs = CallerPAL.getRetAttributes();
   1035       if (RAttrs & Attribute::typeIncompatible(NewRetTy))
   1036         return false;   // Attribute not compatible with transformed value.
   1037     }
   1038 
   1039     // If the callsite is an invoke instruction, and the return value is used by
   1040     // a PHI node in a successor, we cannot change the return type of the call
   1041     // because there is no place to put the cast instruction (without breaking
   1042     // the critical edge).  Bail out in this case.
   1043     if (!Caller->use_empty())
   1044       if (InvokeInst *II = dyn_cast<InvokeInst>(Caller))
   1045         for (Value::use_iterator UI = II->use_begin(), E = II->use_end();
   1046              UI != E; ++UI)
   1047           if (PHINode *PN = dyn_cast<PHINode>(*UI))
   1048             if (PN->getParent() == II->getNormalDest() ||
   1049                 PN->getParent() == II->getUnwindDest())
   1050               return false;
   1051   }
   1052 
   1053   unsigned NumActualArgs = unsigned(CS.arg_end()-CS.arg_begin());
   1054   unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs);
   1055 
   1056   CallSite::arg_iterator AI = CS.arg_begin();
   1057   for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) {
   1058     Type *ParamTy = FT->getParamType(i);
   1059     Type *ActTy = (*AI)->getType();
   1060 
   1061     if (!CastInst::isCastable(ActTy, ParamTy))
   1062       return false;   // Cannot transform this parameter value.
   1063 
   1064     unsigned Attrs = CallerPAL.getParamAttributes(i + 1);
   1065     if (Attrs & Attribute::typeIncompatible(ParamTy))
   1066       return false;   // Attribute not compatible with transformed value.
   1067 
   1068     // If the parameter is passed as a byval argument, then we have to have a
   1069     // sized type and the sized type has to have the same size as the old type.
   1070     if (ParamTy != ActTy && (Attrs & Attribute::ByVal)) {
   1071       PointerType *ParamPTy = dyn_cast<PointerType>(ParamTy);
   1072       if (ParamPTy == 0 || !ParamPTy->getElementType()->isSized() || TD == 0)
   1073         return false;
   1074 
   1075       Type *CurElTy = cast<PointerType>(ActTy)->getElementType();
   1076       if (TD->getTypeAllocSize(CurElTy) !=
   1077           TD->getTypeAllocSize(ParamPTy->getElementType()))
   1078         return false;
   1079     }
   1080 
   1081     // Converting from one pointer type to another or between a pointer and an
   1082     // integer of the same size is safe even if we do not have a body.
   1083     bool isConvertible = ActTy == ParamTy ||
   1084       (TD && ((ParamTy->isPointerTy() ||
   1085       ParamTy == TD->getIntPtrType(Caller->getContext())) &&
   1086               (ActTy->isPointerTy() ||
   1087               ActTy == TD->getIntPtrType(Caller->getContext()))));
   1088     if (Callee->isDeclaration() && !isConvertible) return false;
   1089   }
   1090 
   1091   if (Callee->isDeclaration()) {
   1092     // Do not delete arguments unless we have a function body.
   1093     if (FT->getNumParams() < NumActualArgs && !FT->isVarArg())
   1094       return false;
   1095 
   1096     // If the callee is just a declaration, don't change the varargsness of the
   1097     // call.  We don't want to introduce a varargs call where one doesn't
   1098     // already exist.
   1099     PointerType *APTy = cast<PointerType>(CS.getCalledValue()->getType());
   1100     if (FT->isVarArg()!=cast<FunctionType>(APTy->getElementType())->isVarArg())
   1101       return false;
   1102   }
   1103 
   1104   if (FT->getNumParams() < NumActualArgs && FT->isVarArg() &&
   1105       !CallerPAL.isEmpty())
   1106     // In this case we have more arguments than the new function type, but we
   1107     // won't be dropping them.  Check that these extra arguments have attributes
   1108     // that are compatible with being a vararg call argument.
   1109     for (unsigned i = CallerPAL.getNumSlots(); i; --i) {
   1110       if (CallerPAL.getSlot(i - 1).Index <= FT->getNumParams())
   1111         break;
   1112       Attributes PAttrs = CallerPAL.getSlot(i - 1).Attrs;
   1113       if (PAttrs & Attribute::VarArgsIncompatible)
   1114         return false;
   1115     }
   1116 
   1117 
   1118   // Okay, we decided that this is a safe thing to do: go ahead and start
   1119   // inserting cast instructions as necessary.
   1120   std::vector<Value*> Args;
   1121   Args.reserve(NumActualArgs);
   1122   SmallVector<AttributeWithIndex, 8> attrVec;
   1123   attrVec.reserve(NumCommonArgs);
   1124 
   1125   // Get any return attributes.
   1126   Attributes RAttrs = CallerPAL.getRetAttributes();
   1127 
   1128   // If the return value is not being used, the type may not be compatible
   1129   // with the existing attributes.  Wipe out any problematic attributes.
   1130   RAttrs &= ~Attribute::typeIncompatible(NewRetTy);
   1131 
   1132   // Add the new return attributes.
   1133   if (RAttrs)
   1134     attrVec.push_back(AttributeWithIndex::get(0, RAttrs));
   1135 
   1136   AI = CS.arg_begin();
   1137   for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) {
   1138     Type *ParamTy = FT->getParamType(i);
   1139     if ((*AI)->getType() == ParamTy) {
   1140       Args.push_back(*AI);
   1141     } else {
   1142       Instruction::CastOps opcode = CastInst::getCastOpcode(*AI,
   1143           false, ParamTy, false);
   1144       Args.push_back(Builder->CreateCast(opcode, *AI, ParamTy));
   1145     }
   1146 
   1147     // Add any parameter attributes.
   1148     if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1))
   1149       attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs));
   1150   }
   1151 
   1152   // If the function takes more arguments than the call was taking, add them
   1153   // now.
   1154   for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i)
   1155     Args.push_back(Constant::getNullValue(FT->getParamType(i)));
   1156 
   1157   // If we are removing arguments to the function, emit an obnoxious warning.
   1158   if (FT->getNumParams() < NumActualArgs) {
   1159     if (!FT->isVarArg()) {
   1160       errs() << "WARNING: While resolving call to function '"
   1161              << Callee->getName() << "' arguments were dropped!\n";
   1162     } else {
   1163       // Add all of the arguments in their promoted form to the arg list.
   1164       for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) {
   1165         Type *PTy = getPromotedType((*AI)->getType());
   1166         if (PTy != (*AI)->getType()) {
   1167           // Must promote to pass through va_arg area!
   1168           Instruction::CastOps opcode =
   1169             CastInst::getCastOpcode(*AI, false, PTy, false);
   1170           Args.push_back(Builder->CreateCast(opcode, *AI, PTy));
   1171         } else {
   1172           Args.push_back(*AI);
   1173         }
   1174 
   1175         // Add any parameter attributes.
   1176         if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1))
   1177           attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs));
   1178       }
   1179     }
   1180   }
   1181 
   1182   if (Attributes FnAttrs =  CallerPAL.getFnAttributes())
   1183     attrVec.push_back(AttributeWithIndex::get(~0, FnAttrs));
   1184 
   1185   if (NewRetTy->isVoidTy())
   1186     Caller->setName("");   // Void type should not have a name.
   1187 
   1188   const AttrListPtr &NewCallerPAL = AttrListPtr::get(attrVec.begin(),
   1189                                                      attrVec.end());
   1190 
   1191   Instruction *NC;
   1192   if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
   1193     NC = Builder->CreateInvoke(Callee, II->getNormalDest(),
   1194                                II->getUnwindDest(), Args);
   1195     NC->takeName(II);
   1196     cast<InvokeInst>(NC)->setCallingConv(II->getCallingConv());
   1197     cast<InvokeInst>(NC)->setAttributes(NewCallerPAL);
   1198   } else {
   1199     CallInst *CI = cast<CallInst>(Caller);
   1200     NC = Builder->CreateCall(Callee, Args);
   1201     NC->takeName(CI);
   1202     if (CI->isTailCall())
   1203       cast<CallInst>(NC)->setTailCall();
   1204     cast<CallInst>(NC)->setCallingConv(CI->getCallingConv());
   1205     cast<CallInst>(NC)->setAttributes(NewCallerPAL);
   1206   }
   1207 
   1208   // Insert a cast of the return type as necessary.
   1209   Value *NV = NC;
   1210   if (OldRetTy != NV->getType() && !Caller->use_empty()) {
   1211     if (!NV->getType()->isVoidTy()) {
   1212       Instruction::CastOps opcode =
   1213         CastInst::getCastOpcode(NC, false, OldRetTy, false);
   1214       NV = NC = CastInst::Create(opcode, NC, OldRetTy);
   1215       NC->setDebugLoc(Caller->getDebugLoc());
   1216 
   1217       // If this is an invoke instruction, we should insert it after the first
   1218       // non-phi, instruction in the normal successor block.
   1219       if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
   1220         BasicBlock::iterator I = II->getNormalDest()->getFirstInsertionPt();
   1221         InsertNewInstBefore(NC, *I);
   1222       } else {
   1223         // Otherwise, it's a call, just insert cast right after the call.
   1224         InsertNewInstBefore(NC, *Caller);
   1225       }
   1226       Worklist.AddUsersToWorkList(*Caller);
   1227     } else {
   1228       NV = UndefValue::get(Caller->getType());
   1229     }
   1230   }
   1231 
   1232   if (!Caller->use_empty())
   1233     ReplaceInstUsesWith(*Caller, NV);
   1234 
   1235   EraseInstFromFunction(*Caller);
   1236   return true;
   1237 }
   1238 
   1239 // transformCallThroughTrampoline - Turn a call to a function created by
   1240 // init_trampoline / adjust_trampoline intrinsic pair into a direct call to the
   1241 // underlying function.
   1242 //
   1243 Instruction *
   1244 InstCombiner::transformCallThroughTrampoline(CallSite CS,
   1245                                              IntrinsicInst *Tramp) {
   1246   Value *Callee = CS.getCalledValue();
   1247   PointerType *PTy = cast<PointerType>(Callee->getType());
   1248   FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
   1249   const AttrListPtr &Attrs = CS.getAttributes();
   1250 
   1251   // If the call already has the 'nest' attribute somewhere then give up -
   1252   // otherwise 'nest' would occur twice after splicing in the chain.
   1253   if (Attrs.hasAttrSomewhere(Attribute::Nest))
   1254     return 0;
   1255 
   1256   assert(Tramp &&
   1257          "transformCallThroughTrampoline called with incorrect CallSite.");
   1258 
   1259   Function *NestF =cast<Function>(Tramp->getArgOperand(1)->stripPointerCasts());
   1260   PointerType *NestFPTy = cast<PointerType>(NestF->getType());
   1261   FunctionType *NestFTy = cast<FunctionType>(NestFPTy->getElementType());
   1262 
   1263   const AttrListPtr &NestAttrs = NestF->getAttributes();
   1264   if (!NestAttrs.isEmpty()) {
   1265     unsigned NestIdx = 1;
   1266     Type *NestTy = 0;
   1267     Attributes NestAttr = Attribute::None;
   1268 
   1269     // Look for a parameter marked with the 'nest' attribute.
   1270     for (FunctionType::param_iterator I = NestFTy->param_begin(),
   1271          E = NestFTy->param_end(); I != E; ++NestIdx, ++I)
   1272       if (NestAttrs.paramHasAttr(NestIdx, Attribute::Nest)) {
   1273         // Record the parameter type and any other attributes.
   1274         NestTy = *I;
   1275         NestAttr = NestAttrs.getParamAttributes(NestIdx);
   1276         break;
   1277       }
   1278 
   1279     if (NestTy) {
   1280       Instruction *Caller = CS.getInstruction();
   1281       std::vector<Value*> NewArgs;
   1282       NewArgs.reserve(unsigned(CS.arg_end()-CS.arg_begin())+1);
   1283 
   1284       SmallVector<AttributeWithIndex, 8> NewAttrs;
   1285       NewAttrs.reserve(Attrs.getNumSlots() + 1);
   1286 
   1287       // Insert the nest argument into the call argument list, which may
   1288       // mean appending it.  Likewise for attributes.
   1289 
   1290       // Add any result attributes.
   1291       if (Attributes Attr = Attrs.getRetAttributes())
   1292         NewAttrs.push_back(AttributeWithIndex::get(0, Attr));
   1293 
   1294       {
   1295         unsigned Idx = 1;
   1296         CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
   1297         do {
   1298           if (Idx == NestIdx) {
   1299             // Add the chain argument and attributes.
   1300             Value *NestVal = Tramp->getArgOperand(2);
   1301             if (NestVal->getType() != NestTy)
   1302               NestVal = Builder->CreateBitCast(NestVal, NestTy, "nest");
   1303             NewArgs.push_back(NestVal);
   1304             NewAttrs.push_back(AttributeWithIndex::get(NestIdx, NestAttr));
   1305           }
   1306 
   1307           if (I == E)
   1308             break;
   1309 
   1310           // Add the original argument and attributes.
   1311           NewArgs.push_back(*I);
   1312           if (Attributes Attr = Attrs.getParamAttributes(Idx))
   1313             NewAttrs.push_back
   1314               (AttributeWithIndex::get(Idx + (Idx >= NestIdx), Attr));
   1315 
   1316           ++Idx, ++I;
   1317         } while (1);
   1318       }
   1319 
   1320       // Add any function attributes.
   1321       if (Attributes Attr = Attrs.getFnAttributes())
   1322         NewAttrs.push_back(AttributeWithIndex::get(~0, Attr));
   1323 
   1324       // The trampoline may have been bitcast to a bogus type (FTy).
   1325       // Handle this by synthesizing a new function type, equal to FTy
   1326       // with the chain parameter inserted.
   1327 
   1328       std::vector<Type*> NewTypes;
   1329       NewTypes.reserve(FTy->getNumParams()+1);
   1330 
   1331       // Insert the chain's type into the list of parameter types, which may
   1332       // mean appending it.
   1333       {
   1334         unsigned Idx = 1;
   1335         FunctionType::param_iterator I = FTy->param_begin(),
   1336           E = FTy->param_end();
   1337 
   1338         do {
   1339           if (Idx == NestIdx)
   1340             // Add the chain's type.
   1341             NewTypes.push_back(NestTy);
   1342 
   1343           if (I == E)
   1344             break;
   1345 
   1346           // Add the original type.
   1347           NewTypes.push_back(*I);
   1348 
   1349           ++Idx, ++I;
   1350         } while (1);
   1351       }
   1352 
   1353       // Replace the trampoline call with a direct call.  Let the generic
   1354       // code sort out any function type mismatches.
   1355       FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(), NewTypes,
   1356                                                 FTy->isVarArg());
   1357       Constant *NewCallee =
   1358         NestF->getType() == PointerType::getUnqual(NewFTy) ?
   1359         NestF : ConstantExpr::getBitCast(NestF,
   1360                                          PointerType::getUnqual(NewFTy));
   1361       const AttrListPtr &NewPAL = AttrListPtr::get(NewAttrs.begin(),
   1362                                                    NewAttrs.end());
   1363 
   1364       Instruction *NewCaller;
   1365       if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
   1366         NewCaller = InvokeInst::Create(NewCallee,
   1367                                        II->getNormalDest(), II->getUnwindDest(),
   1368                                        NewArgs);
   1369         cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv());
   1370         cast<InvokeInst>(NewCaller)->setAttributes(NewPAL);
   1371       } else {
   1372         NewCaller = CallInst::Create(NewCallee, NewArgs);
   1373         if (cast<CallInst>(Caller)->isTailCall())
   1374           cast<CallInst>(NewCaller)->setTailCall();
   1375         cast<CallInst>(NewCaller)->
   1376           setCallingConv(cast<CallInst>(Caller)->getCallingConv());
   1377         cast<CallInst>(NewCaller)->setAttributes(NewPAL);
   1378       }
   1379 
   1380       return NewCaller;
   1381     }
   1382   }
   1383 
   1384   // Replace the trampoline call with a direct call.  Since there is no 'nest'
   1385   // parameter, there is no need to adjust the argument list.  Let the generic
   1386   // code sort out any function type mismatches.
   1387   Constant *NewCallee =
   1388     NestF->getType() == PTy ? NestF :
   1389                               ConstantExpr::getBitCast(NestF, PTy);
   1390   CS.setCalledFunction(NewCallee);
   1391   return CS.getInstruction();
   1392 }
   1393 
   1394