1 //===- InstCombineCalls.cpp -----------------------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the visitCall and visitInvoke functions. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "InstCombine.h" 15 #include "llvm/IntrinsicInst.h" 16 #include "llvm/Support/CallSite.h" 17 #include "llvm/Target/TargetData.h" 18 #include "llvm/Analysis/MemoryBuiltins.h" 19 #include "llvm/Transforms/Utils/BuildLibCalls.h" 20 #include "llvm/Transforms/Utils/Local.h" 21 using namespace llvm; 22 23 /// getPromotedType - Return the specified type promoted as it would be to pass 24 /// though a va_arg area. 25 static Type *getPromotedType(Type *Ty) { 26 if (IntegerType* ITy = dyn_cast<IntegerType>(Ty)) { 27 if (ITy->getBitWidth() < 32) 28 return Type::getInt32Ty(Ty->getContext()); 29 } 30 return Ty; 31 } 32 33 34 Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) { 35 unsigned DstAlign = getKnownAlignment(MI->getArgOperand(0), TD); 36 unsigned SrcAlign = getKnownAlignment(MI->getArgOperand(1), TD); 37 unsigned MinAlign = std::min(DstAlign, SrcAlign); 38 unsigned CopyAlign = MI->getAlignment(); 39 40 if (CopyAlign < MinAlign) { 41 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(), 42 MinAlign, false)); 43 return MI; 44 } 45 46 // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with 47 // load/store. 48 ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getArgOperand(2)); 49 if (MemOpLength == 0) return 0; 50 51 // Source and destination pointer types are always "i8*" for intrinsic. See 52 // if the size is something we can handle with a single primitive load/store. 53 // A single load+store correctly handles overlapping memory in the memmove 54 // case. 55 unsigned Size = MemOpLength->getZExtValue(); 56 if (Size == 0) return MI; // Delete this mem transfer. 57 58 if (Size > 8 || (Size&(Size-1))) 59 return 0; // If not 1/2/4/8 bytes, exit. 60 61 // Use an integer load+store unless we can find something better. 62 unsigned SrcAddrSp = 63 cast<PointerType>(MI->getArgOperand(1)->getType())->getAddressSpace(); 64 unsigned DstAddrSp = 65 cast<PointerType>(MI->getArgOperand(0)->getType())->getAddressSpace(); 66 67 IntegerType* IntType = IntegerType::get(MI->getContext(), Size<<3); 68 Type *NewSrcPtrTy = PointerType::get(IntType, SrcAddrSp); 69 Type *NewDstPtrTy = PointerType::get(IntType, DstAddrSp); 70 71 // Memcpy forces the use of i8* for the source and destination. That means 72 // that if you're using memcpy to move one double around, you'll get a cast 73 // from double* to i8*. We'd much rather use a double load+store rather than 74 // an i64 load+store, here because this improves the odds that the source or 75 // dest address will be promotable. See if we can find a better type than the 76 // integer datatype. 77 Value *StrippedDest = MI->getArgOperand(0)->stripPointerCasts(); 78 if (StrippedDest != MI->getArgOperand(0)) { 79 Type *SrcETy = cast<PointerType>(StrippedDest->getType()) 80 ->getElementType(); 81 if (TD && SrcETy->isSized() && TD->getTypeStoreSize(SrcETy) == Size) { 82 // The SrcETy might be something like {{{double}}} or [1 x double]. Rip 83 // down through these levels if so. 84 while (!SrcETy->isSingleValueType()) { 85 if (StructType *STy = dyn_cast<StructType>(SrcETy)) { 86 if (STy->getNumElements() == 1) 87 SrcETy = STy->getElementType(0); 88 else 89 break; 90 } else if (ArrayType *ATy = dyn_cast<ArrayType>(SrcETy)) { 91 if (ATy->getNumElements() == 1) 92 SrcETy = ATy->getElementType(); 93 else 94 break; 95 } else 96 break; 97 } 98 99 if (SrcETy->isSingleValueType()) { 100 NewSrcPtrTy = PointerType::get(SrcETy, SrcAddrSp); 101 NewDstPtrTy = PointerType::get(SrcETy, DstAddrSp); 102 } 103 } 104 } 105 106 107 // If the memcpy/memmove provides better alignment info than we can 108 // infer, use it. 109 SrcAlign = std::max(SrcAlign, CopyAlign); 110 DstAlign = std::max(DstAlign, CopyAlign); 111 112 Value *Src = Builder->CreateBitCast(MI->getArgOperand(1), NewSrcPtrTy); 113 Value *Dest = Builder->CreateBitCast(MI->getArgOperand(0), NewDstPtrTy); 114 LoadInst *L = Builder->CreateLoad(Src, MI->isVolatile()); 115 L->setAlignment(SrcAlign); 116 StoreInst *S = Builder->CreateStore(L, Dest, MI->isVolatile()); 117 S->setAlignment(DstAlign); 118 119 // Set the size of the copy to 0, it will be deleted on the next iteration. 120 MI->setArgOperand(2, Constant::getNullValue(MemOpLength->getType())); 121 return MI; 122 } 123 124 Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) { 125 unsigned Alignment = getKnownAlignment(MI->getDest(), TD); 126 if (MI->getAlignment() < Alignment) { 127 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(), 128 Alignment, false)); 129 return MI; 130 } 131 132 // Extract the length and alignment and fill if they are constant. 133 ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength()); 134 ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue()); 135 if (!LenC || !FillC || !FillC->getType()->isIntegerTy(8)) 136 return 0; 137 uint64_t Len = LenC->getZExtValue(); 138 Alignment = MI->getAlignment(); 139 140 // If the length is zero, this is a no-op 141 if (Len == 0) return MI; // memset(d,c,0,a) -> noop 142 143 // memset(s,c,n) -> store s, c (for n=1,2,4,8) 144 if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) { 145 Type *ITy = IntegerType::get(MI->getContext(), Len*8); // n=1 -> i8. 146 147 Value *Dest = MI->getDest(); 148 unsigned DstAddrSp = cast<PointerType>(Dest->getType())->getAddressSpace(); 149 Type *NewDstPtrTy = PointerType::get(ITy, DstAddrSp); 150 Dest = Builder->CreateBitCast(Dest, NewDstPtrTy); 151 152 // Alignment 0 is identity for alignment 1 for memset, but not store. 153 if (Alignment == 0) Alignment = 1; 154 155 // Extract the fill value and store. 156 uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL; 157 StoreInst *S = Builder->CreateStore(ConstantInt::get(ITy, Fill), Dest, 158 MI->isVolatile()); 159 S->setAlignment(Alignment); 160 161 // Set the size of the copy to 0, it will be deleted on the next iteration. 162 MI->setLength(Constant::getNullValue(LenC->getType())); 163 return MI; 164 } 165 166 return 0; 167 } 168 169 /// visitCallInst - CallInst simplification. This mostly only handles folding 170 /// of intrinsic instructions. For normal calls, it allows visitCallSite to do 171 /// the heavy lifting. 172 /// 173 Instruction *InstCombiner::visitCallInst(CallInst &CI) { 174 if (isFreeCall(&CI)) 175 return visitFree(CI); 176 if (isMalloc(&CI)) 177 return visitMalloc(CI); 178 179 // If the caller function is nounwind, mark the call as nounwind, even if the 180 // callee isn't. 181 if (CI.getParent()->getParent()->doesNotThrow() && 182 !CI.doesNotThrow()) { 183 CI.setDoesNotThrow(); 184 return &CI; 185 } 186 187 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI); 188 if (!II) return visitCallSite(&CI); 189 190 // Intrinsics cannot occur in an invoke, so handle them here instead of in 191 // visitCallSite. 192 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(II)) { 193 bool Changed = false; 194 195 // memmove/cpy/set of zero bytes is a noop. 196 if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) { 197 if (NumBytes->isNullValue()) 198 return EraseInstFromFunction(CI); 199 200 if (ConstantInt *CI = dyn_cast<ConstantInt>(NumBytes)) 201 if (CI->getZExtValue() == 1) { 202 // Replace the instruction with just byte operations. We would 203 // transform other cases to loads/stores, but we don't know if 204 // alignment is sufficient. 205 } 206 } 207 208 // No other transformations apply to volatile transfers. 209 if (MI->isVolatile()) 210 return 0; 211 212 // If we have a memmove and the source operation is a constant global, 213 // then the source and dest pointers can't alias, so we can change this 214 // into a call to memcpy. 215 if (MemMoveInst *MMI = dyn_cast<MemMoveInst>(MI)) { 216 if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource())) 217 if (GVSrc->isConstant()) { 218 Module *M = CI.getParent()->getParent()->getParent(); 219 Intrinsic::ID MemCpyID = Intrinsic::memcpy; 220 Type *Tys[3] = { CI.getArgOperand(0)->getType(), 221 CI.getArgOperand(1)->getType(), 222 CI.getArgOperand(2)->getType() }; 223 CI.setCalledFunction(Intrinsic::getDeclaration(M, MemCpyID, Tys)); 224 Changed = true; 225 } 226 } 227 228 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) { 229 // memmove(x,x,size) -> noop. 230 if (MTI->getSource() == MTI->getDest()) 231 return EraseInstFromFunction(CI); 232 } 233 234 // If we can determine a pointer alignment that is bigger than currently 235 // set, update the alignment. 236 if (isa<MemTransferInst>(MI)) { 237 if (Instruction *I = SimplifyMemTransfer(MI)) 238 return I; 239 } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(MI)) { 240 if (Instruction *I = SimplifyMemSet(MSI)) 241 return I; 242 } 243 244 if (Changed) return II; 245 } 246 247 switch (II->getIntrinsicID()) { 248 default: break; 249 case Intrinsic::objectsize: { 250 // We need target data for just about everything so depend on it. 251 if (!TD) break; 252 253 Type *ReturnTy = CI.getType(); 254 uint64_t DontKnow = II->getArgOperand(1) == Builder->getTrue() ? 0 : -1ULL; 255 256 // Get to the real allocated thing and offset as fast as possible. 257 Value *Op1 = II->getArgOperand(0)->stripPointerCasts(); 258 259 uint64_t Offset = 0; 260 uint64_t Size = -1ULL; 261 262 // Try to look through constant GEPs. 263 if (GEPOperator *GEP = dyn_cast<GEPOperator>(Op1)) { 264 if (!GEP->hasAllConstantIndices()) break; 265 266 // Get the current byte offset into the thing. Use the original 267 // operand in case we're looking through a bitcast. 268 SmallVector<Value*, 8> Ops(GEP->idx_begin(), GEP->idx_end()); 269 Offset = TD->getIndexedOffset(GEP->getPointerOperandType(), Ops); 270 271 Op1 = GEP->getPointerOperand()->stripPointerCasts(); 272 273 // Make sure we're not a constant offset from an external 274 // global. 275 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Op1)) 276 if (!GV->hasDefinitiveInitializer()) break; 277 } 278 279 // If we've stripped down to a single global variable that we 280 // can know the size of then just return that. 281 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Op1)) { 282 if (GV->hasDefinitiveInitializer()) { 283 Constant *C = GV->getInitializer(); 284 Size = TD->getTypeAllocSize(C->getType()); 285 } else { 286 // Can't determine size of the GV. 287 Constant *RetVal = ConstantInt::get(ReturnTy, DontKnow); 288 return ReplaceInstUsesWith(CI, RetVal); 289 } 290 } else if (AllocaInst *AI = dyn_cast<AllocaInst>(Op1)) { 291 // Get alloca size. 292 if (AI->getAllocatedType()->isSized()) { 293 Size = TD->getTypeAllocSize(AI->getAllocatedType()); 294 if (AI->isArrayAllocation()) { 295 const ConstantInt *C = dyn_cast<ConstantInt>(AI->getArraySize()); 296 if (!C) break; 297 Size *= C->getZExtValue(); 298 } 299 } 300 } else if (CallInst *MI = extractMallocCall(Op1)) { 301 // Get allocation size. 302 Type* MallocType = getMallocAllocatedType(MI); 303 if (MallocType && MallocType->isSized()) 304 if (Value *NElems = getMallocArraySize(MI, TD, true)) 305 if (ConstantInt *NElements = dyn_cast<ConstantInt>(NElems)) 306 Size = NElements->getZExtValue() * TD->getTypeAllocSize(MallocType); 307 } 308 309 // Do not return "I don't know" here. Later optimization passes could 310 // make it possible to evaluate objectsize to a constant. 311 if (Size == -1ULL) 312 break; 313 314 if (Size < Offset) { 315 // Out of bound reference? Negative index normalized to large 316 // index? Just return "I don't know". 317 return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, DontKnow)); 318 } 319 return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, Size-Offset)); 320 } 321 case Intrinsic::bswap: 322 // bswap(bswap(x)) -> x 323 if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) 324 if (Operand->getIntrinsicID() == Intrinsic::bswap) 325 return ReplaceInstUsesWith(CI, Operand->getArgOperand(0)); 326 327 // bswap(trunc(bswap(x))) -> trunc(lshr(x, c)) 328 if (TruncInst *TI = dyn_cast<TruncInst>(II->getArgOperand(0))) { 329 if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(TI->getOperand(0))) 330 if (Operand->getIntrinsicID() == Intrinsic::bswap) { 331 unsigned C = Operand->getType()->getPrimitiveSizeInBits() - 332 TI->getType()->getPrimitiveSizeInBits(); 333 Value *CV = ConstantInt::get(Operand->getType(), C); 334 Value *V = Builder->CreateLShr(Operand->getArgOperand(0), CV); 335 return new TruncInst(V, TI->getType()); 336 } 337 } 338 339 break; 340 case Intrinsic::powi: 341 if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getArgOperand(1))) { 342 // powi(x, 0) -> 1.0 343 if (Power->isZero()) 344 return ReplaceInstUsesWith(CI, ConstantFP::get(CI.getType(), 1.0)); 345 // powi(x, 1) -> x 346 if (Power->isOne()) 347 return ReplaceInstUsesWith(CI, II->getArgOperand(0)); 348 // powi(x, -1) -> 1/x 349 if (Power->isAllOnesValue()) 350 return BinaryOperator::CreateFDiv(ConstantFP::get(CI.getType(), 1.0), 351 II->getArgOperand(0)); 352 } 353 break; 354 case Intrinsic::cttz: { 355 // If all bits below the first known one are known zero, 356 // this value is constant. 357 IntegerType *IT = dyn_cast<IntegerType>(II->getArgOperand(0)->getType()); 358 // FIXME: Try to simplify vectors of integers. 359 if (!IT) break; 360 uint32_t BitWidth = IT->getBitWidth(); 361 APInt KnownZero(BitWidth, 0); 362 APInt KnownOne(BitWidth, 0); 363 ComputeMaskedBits(II->getArgOperand(0), APInt::getAllOnesValue(BitWidth), 364 KnownZero, KnownOne); 365 unsigned TrailingZeros = KnownOne.countTrailingZeros(); 366 APInt Mask(APInt::getLowBitsSet(BitWidth, TrailingZeros)); 367 if ((Mask & KnownZero) == Mask) 368 return ReplaceInstUsesWith(CI, ConstantInt::get(IT, 369 APInt(BitWidth, TrailingZeros))); 370 371 } 372 break; 373 case Intrinsic::ctlz: { 374 // If all bits above the first known one are known zero, 375 // this value is constant. 376 IntegerType *IT = dyn_cast<IntegerType>(II->getArgOperand(0)->getType()); 377 // FIXME: Try to simplify vectors of integers. 378 if (!IT) break; 379 uint32_t BitWidth = IT->getBitWidth(); 380 APInt KnownZero(BitWidth, 0); 381 APInt KnownOne(BitWidth, 0); 382 ComputeMaskedBits(II->getArgOperand(0), APInt::getAllOnesValue(BitWidth), 383 KnownZero, KnownOne); 384 unsigned LeadingZeros = KnownOne.countLeadingZeros(); 385 APInt Mask(APInt::getHighBitsSet(BitWidth, LeadingZeros)); 386 if ((Mask & KnownZero) == Mask) 387 return ReplaceInstUsesWith(CI, ConstantInt::get(IT, 388 APInt(BitWidth, LeadingZeros))); 389 390 } 391 break; 392 case Intrinsic::uadd_with_overflow: { 393 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1); 394 IntegerType *IT = cast<IntegerType>(II->getArgOperand(0)->getType()); 395 uint32_t BitWidth = IT->getBitWidth(); 396 APInt Mask = APInt::getSignBit(BitWidth); 397 APInt LHSKnownZero(BitWidth, 0); 398 APInt LHSKnownOne(BitWidth, 0); 399 ComputeMaskedBits(LHS, Mask, LHSKnownZero, LHSKnownOne); 400 bool LHSKnownNegative = LHSKnownOne[BitWidth - 1]; 401 bool LHSKnownPositive = LHSKnownZero[BitWidth - 1]; 402 403 if (LHSKnownNegative || LHSKnownPositive) { 404 APInt RHSKnownZero(BitWidth, 0); 405 APInt RHSKnownOne(BitWidth, 0); 406 ComputeMaskedBits(RHS, Mask, RHSKnownZero, RHSKnownOne); 407 bool RHSKnownNegative = RHSKnownOne[BitWidth - 1]; 408 bool RHSKnownPositive = RHSKnownZero[BitWidth - 1]; 409 if (LHSKnownNegative && RHSKnownNegative) { 410 // The sign bit is set in both cases: this MUST overflow. 411 // Create a simple add instruction, and insert it into the struct. 412 Value *Add = Builder->CreateAdd(LHS, RHS); 413 Add->takeName(&CI); 414 Constant *V[] = { 415 UndefValue::get(LHS->getType()), 416 ConstantInt::getTrue(II->getContext()) 417 }; 418 StructType *ST = cast<StructType>(II->getType()); 419 Constant *Struct = ConstantStruct::get(ST, V); 420 return InsertValueInst::Create(Struct, Add, 0); 421 } 422 423 if (LHSKnownPositive && RHSKnownPositive) { 424 // The sign bit is clear in both cases: this CANNOT overflow. 425 // Create a simple add instruction, and insert it into the struct. 426 Value *Add = Builder->CreateNUWAdd(LHS, RHS); 427 Add->takeName(&CI); 428 Constant *V[] = { 429 UndefValue::get(LHS->getType()), 430 ConstantInt::getFalse(II->getContext()) 431 }; 432 StructType *ST = cast<StructType>(II->getType()); 433 Constant *Struct = ConstantStruct::get(ST, V); 434 return InsertValueInst::Create(Struct, Add, 0); 435 } 436 } 437 } 438 // FALL THROUGH uadd into sadd 439 case Intrinsic::sadd_with_overflow: 440 // Canonicalize constants into the RHS. 441 if (isa<Constant>(II->getArgOperand(0)) && 442 !isa<Constant>(II->getArgOperand(1))) { 443 Value *LHS = II->getArgOperand(0); 444 II->setArgOperand(0, II->getArgOperand(1)); 445 II->setArgOperand(1, LHS); 446 return II; 447 } 448 449 // X + undef -> undef 450 if (isa<UndefValue>(II->getArgOperand(1))) 451 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType())); 452 453 if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getArgOperand(1))) { 454 // X + 0 -> {X, false} 455 if (RHS->isZero()) { 456 Constant *V[] = { 457 UndefValue::get(II->getArgOperand(0)->getType()), 458 ConstantInt::getFalse(II->getContext()) 459 }; 460 Constant *Struct = 461 ConstantStruct::get(cast<StructType>(II->getType()), V); 462 return InsertValueInst::Create(Struct, II->getArgOperand(0), 0); 463 } 464 } 465 break; 466 case Intrinsic::usub_with_overflow: 467 case Intrinsic::ssub_with_overflow: 468 // undef - X -> undef 469 // X - undef -> undef 470 if (isa<UndefValue>(II->getArgOperand(0)) || 471 isa<UndefValue>(II->getArgOperand(1))) 472 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType())); 473 474 if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getArgOperand(1))) { 475 // X - 0 -> {X, false} 476 if (RHS->isZero()) { 477 Constant *V[] = { 478 UndefValue::get(II->getArgOperand(0)->getType()), 479 ConstantInt::getFalse(II->getContext()) 480 }; 481 Constant *Struct = 482 ConstantStruct::get(cast<StructType>(II->getType()), V); 483 return InsertValueInst::Create(Struct, II->getArgOperand(0), 0); 484 } 485 } 486 break; 487 case Intrinsic::umul_with_overflow: { 488 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1); 489 unsigned BitWidth = cast<IntegerType>(LHS->getType())->getBitWidth(); 490 APInt Mask = APInt::getAllOnesValue(BitWidth); 491 492 APInt LHSKnownZero(BitWidth, 0); 493 APInt LHSKnownOne(BitWidth, 0); 494 ComputeMaskedBits(LHS, Mask, LHSKnownZero, LHSKnownOne); 495 APInt RHSKnownZero(BitWidth, 0); 496 APInt RHSKnownOne(BitWidth, 0); 497 ComputeMaskedBits(RHS, Mask, RHSKnownZero, RHSKnownOne); 498 499 // Get the largest possible values for each operand. 500 APInt LHSMax = ~LHSKnownZero; 501 APInt RHSMax = ~RHSKnownZero; 502 503 // If multiplying the maximum values does not overflow then we can turn 504 // this into a plain NUW mul. 505 bool Overflow; 506 LHSMax.umul_ov(RHSMax, Overflow); 507 if (!Overflow) { 508 Value *Mul = Builder->CreateNUWMul(LHS, RHS, "umul_with_overflow"); 509 Constant *V[] = { 510 UndefValue::get(LHS->getType()), 511 Builder->getFalse() 512 }; 513 Constant *Struct = ConstantStruct::get(cast<StructType>(II->getType()),V); 514 return InsertValueInst::Create(Struct, Mul, 0); 515 } 516 } // FALL THROUGH 517 case Intrinsic::smul_with_overflow: 518 // Canonicalize constants into the RHS. 519 if (isa<Constant>(II->getArgOperand(0)) && 520 !isa<Constant>(II->getArgOperand(1))) { 521 Value *LHS = II->getArgOperand(0); 522 II->setArgOperand(0, II->getArgOperand(1)); 523 II->setArgOperand(1, LHS); 524 return II; 525 } 526 527 // X * undef -> undef 528 if (isa<UndefValue>(II->getArgOperand(1))) 529 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType())); 530 531 if (ConstantInt *RHSI = dyn_cast<ConstantInt>(II->getArgOperand(1))) { 532 // X*0 -> {0, false} 533 if (RHSI->isZero()) 534 return ReplaceInstUsesWith(CI, Constant::getNullValue(II->getType())); 535 536 // X * 1 -> {X, false} 537 if (RHSI->equalsInt(1)) { 538 Constant *V[] = { 539 UndefValue::get(II->getArgOperand(0)->getType()), 540 ConstantInt::getFalse(II->getContext()) 541 }; 542 Constant *Struct = 543 ConstantStruct::get(cast<StructType>(II->getType()), V); 544 return InsertValueInst::Create(Struct, II->getArgOperand(0), 0); 545 } 546 } 547 break; 548 case Intrinsic::ppc_altivec_lvx: 549 case Intrinsic::ppc_altivec_lvxl: 550 // Turn PPC lvx -> load if the pointer is known aligned. 551 if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, TD) >= 16) { 552 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0), 553 PointerType::getUnqual(II->getType())); 554 return new LoadInst(Ptr); 555 } 556 break; 557 case Intrinsic::ppc_altivec_stvx: 558 case Intrinsic::ppc_altivec_stvxl: 559 // Turn stvx -> store if the pointer is known aligned. 560 if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, TD) >= 16) { 561 Type *OpPtrTy = 562 PointerType::getUnqual(II->getArgOperand(0)->getType()); 563 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy); 564 return new StoreInst(II->getArgOperand(0), Ptr); 565 } 566 break; 567 case Intrinsic::x86_sse_storeu_ps: 568 case Intrinsic::x86_sse2_storeu_pd: 569 case Intrinsic::x86_sse2_storeu_dq: 570 // Turn X86 storeu -> store if the pointer is known aligned. 571 if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, TD) >= 16) { 572 Type *OpPtrTy = 573 PointerType::getUnqual(II->getArgOperand(1)->getType()); 574 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0), OpPtrTy); 575 return new StoreInst(II->getArgOperand(1), Ptr); 576 } 577 break; 578 579 case Intrinsic::x86_sse_cvtss2si: 580 case Intrinsic::x86_sse_cvtss2si64: 581 case Intrinsic::x86_sse_cvttss2si: 582 case Intrinsic::x86_sse_cvttss2si64: 583 case Intrinsic::x86_sse2_cvtsd2si: 584 case Intrinsic::x86_sse2_cvtsd2si64: 585 case Intrinsic::x86_sse2_cvttsd2si: 586 case Intrinsic::x86_sse2_cvttsd2si64: { 587 // These intrinsics only demand the 0th element of their input vectors. If 588 // we can simplify the input based on that, do so now. 589 unsigned VWidth = 590 cast<VectorType>(II->getArgOperand(0)->getType())->getNumElements(); 591 APInt DemandedElts(VWidth, 1); 592 APInt UndefElts(VWidth, 0); 593 if (Value *V = SimplifyDemandedVectorElts(II->getArgOperand(0), 594 DemandedElts, UndefElts)) { 595 II->setArgOperand(0, V); 596 return II; 597 } 598 break; 599 } 600 601 602 case Intrinsic::x86_sse41_pmovsxbw: 603 case Intrinsic::x86_sse41_pmovsxwd: 604 case Intrinsic::x86_sse41_pmovsxdq: 605 case Intrinsic::x86_sse41_pmovzxbw: 606 case Intrinsic::x86_sse41_pmovzxwd: 607 case Intrinsic::x86_sse41_pmovzxdq: { 608 // pmov{s|z}x ignores the upper half of their input vectors. 609 unsigned VWidth = 610 cast<VectorType>(II->getArgOperand(0)->getType())->getNumElements(); 611 unsigned LowHalfElts = VWidth / 2; 612 APInt InputDemandedElts(APInt::getBitsSet(VWidth, 0, LowHalfElts)); 613 APInt UndefElts(VWidth, 0); 614 if (Value *TmpV = SimplifyDemandedVectorElts(II->getArgOperand(0), 615 InputDemandedElts, 616 UndefElts)) { 617 II->setArgOperand(0, TmpV); 618 return II; 619 } 620 break; 621 } 622 623 case Intrinsic::ppc_altivec_vperm: 624 // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant. 625 if (ConstantVector *Mask = dyn_cast<ConstantVector>(II->getArgOperand(2))) { 626 assert(Mask->getNumOperands() == 16 && "Bad type for intrinsic!"); 627 628 // Check that all of the elements are integer constants or undefs. 629 bool AllEltsOk = true; 630 for (unsigned i = 0; i != 16; ++i) { 631 if (!isa<ConstantInt>(Mask->getOperand(i)) && 632 !isa<UndefValue>(Mask->getOperand(i))) { 633 AllEltsOk = false; 634 break; 635 } 636 } 637 638 if (AllEltsOk) { 639 // Cast the input vectors to byte vectors. 640 Value *Op0 = Builder->CreateBitCast(II->getArgOperand(0), 641 Mask->getType()); 642 Value *Op1 = Builder->CreateBitCast(II->getArgOperand(1), 643 Mask->getType()); 644 Value *Result = UndefValue::get(Op0->getType()); 645 646 // Only extract each element once. 647 Value *ExtractedElts[32]; 648 memset(ExtractedElts, 0, sizeof(ExtractedElts)); 649 650 for (unsigned i = 0; i != 16; ++i) { 651 if (isa<UndefValue>(Mask->getOperand(i))) 652 continue; 653 unsigned Idx=cast<ConstantInt>(Mask->getOperand(i))->getZExtValue(); 654 Idx &= 31; // Match the hardware behavior. 655 656 if (ExtractedElts[Idx] == 0) { 657 ExtractedElts[Idx] = 658 Builder->CreateExtractElement(Idx < 16 ? Op0 : Op1, 659 ConstantInt::get(Type::getInt32Ty(II->getContext()), 660 Idx&15, false), "tmp"); 661 } 662 663 // Insert this value into the result vector. 664 Result = Builder->CreateInsertElement(Result, ExtractedElts[Idx], 665 ConstantInt::get(Type::getInt32Ty(II->getContext()), 666 i, false), "tmp"); 667 } 668 return CastInst::Create(Instruction::BitCast, Result, CI.getType()); 669 } 670 } 671 break; 672 673 case Intrinsic::arm_neon_vld1: 674 case Intrinsic::arm_neon_vld2: 675 case Intrinsic::arm_neon_vld3: 676 case Intrinsic::arm_neon_vld4: 677 case Intrinsic::arm_neon_vld2lane: 678 case Intrinsic::arm_neon_vld3lane: 679 case Intrinsic::arm_neon_vld4lane: 680 case Intrinsic::arm_neon_vst1: 681 case Intrinsic::arm_neon_vst2: 682 case Intrinsic::arm_neon_vst3: 683 case Intrinsic::arm_neon_vst4: 684 case Intrinsic::arm_neon_vst2lane: 685 case Intrinsic::arm_neon_vst3lane: 686 case Intrinsic::arm_neon_vst4lane: { 687 unsigned MemAlign = getKnownAlignment(II->getArgOperand(0), TD); 688 unsigned AlignArg = II->getNumArgOperands() - 1; 689 ConstantInt *IntrAlign = dyn_cast<ConstantInt>(II->getArgOperand(AlignArg)); 690 if (IntrAlign && IntrAlign->getZExtValue() < MemAlign) { 691 II->setArgOperand(AlignArg, 692 ConstantInt::get(Type::getInt32Ty(II->getContext()), 693 MemAlign, false)); 694 return II; 695 } 696 break; 697 } 698 699 case Intrinsic::stackrestore: { 700 // If the save is right next to the restore, remove the restore. This can 701 // happen when variable allocas are DCE'd. 702 if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) { 703 if (SS->getIntrinsicID() == Intrinsic::stacksave) { 704 BasicBlock::iterator BI = SS; 705 if (&*++BI == II) 706 return EraseInstFromFunction(CI); 707 } 708 } 709 710 // Scan down this block to see if there is another stack restore in the 711 // same block without an intervening call/alloca. 712 BasicBlock::iterator BI = II; 713 TerminatorInst *TI = II->getParent()->getTerminator(); 714 bool CannotRemove = false; 715 for (++BI; &*BI != TI; ++BI) { 716 if (isa<AllocaInst>(BI) || isMalloc(BI)) { 717 CannotRemove = true; 718 break; 719 } 720 if (CallInst *BCI = dyn_cast<CallInst>(BI)) { 721 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(BCI)) { 722 // If there is a stackrestore below this one, remove this one. 723 if (II->getIntrinsicID() == Intrinsic::stackrestore) 724 return EraseInstFromFunction(CI); 725 // Otherwise, ignore the intrinsic. 726 } else { 727 // If we found a non-intrinsic call, we can't remove the stack 728 // restore. 729 CannotRemove = true; 730 break; 731 } 732 } 733 } 734 735 // If the stack restore is in a return/unwind block and if there are no 736 // allocas or calls between the restore and the return, nuke the restore. 737 if (!CannotRemove && (isa<ReturnInst>(TI) || isa<UnwindInst>(TI))) 738 return EraseInstFromFunction(CI); 739 break; 740 } 741 } 742 743 return visitCallSite(II); 744 } 745 746 // InvokeInst simplification 747 // 748 Instruction *InstCombiner::visitInvokeInst(InvokeInst &II) { 749 return visitCallSite(&II); 750 } 751 752 /// isSafeToEliminateVarargsCast - If this cast does not affect the value 753 /// passed through the varargs area, we can eliminate the use of the cast. 754 static bool isSafeToEliminateVarargsCast(const CallSite CS, 755 const CastInst * const CI, 756 const TargetData * const TD, 757 const int ix) { 758 if (!CI->isLosslessCast()) 759 return false; 760 761 // The size of ByVal arguments is derived from the type, so we 762 // can't change to a type with a different size. If the size were 763 // passed explicitly we could avoid this check. 764 if (!CS.paramHasAttr(ix, Attribute::ByVal)) 765 return true; 766 767 Type* SrcTy = 768 cast<PointerType>(CI->getOperand(0)->getType())->getElementType(); 769 Type* DstTy = cast<PointerType>(CI->getType())->getElementType(); 770 if (!SrcTy->isSized() || !DstTy->isSized()) 771 return false; 772 if (!TD || TD->getTypeAllocSize(SrcTy) != TD->getTypeAllocSize(DstTy)) 773 return false; 774 return true; 775 } 776 777 namespace { 778 class InstCombineFortifiedLibCalls : public SimplifyFortifiedLibCalls { 779 InstCombiner *IC; 780 protected: 781 void replaceCall(Value *With) { 782 NewInstruction = IC->ReplaceInstUsesWith(*CI, With); 783 } 784 bool isFoldable(unsigned SizeCIOp, unsigned SizeArgOp, bool isString) const { 785 if (CI->getArgOperand(SizeCIOp) == CI->getArgOperand(SizeArgOp)) 786 return true; 787 if (ConstantInt *SizeCI = 788 dyn_cast<ConstantInt>(CI->getArgOperand(SizeCIOp))) { 789 if (SizeCI->isAllOnesValue()) 790 return true; 791 if (isString) { 792 uint64_t Len = GetStringLength(CI->getArgOperand(SizeArgOp)); 793 // If the length is 0 we don't know how long it is and so we can't 794 // remove the check. 795 if (Len == 0) return false; 796 return SizeCI->getZExtValue() >= Len; 797 } 798 if (ConstantInt *Arg = dyn_cast<ConstantInt>( 799 CI->getArgOperand(SizeArgOp))) 800 return SizeCI->getZExtValue() >= Arg->getZExtValue(); 801 } 802 return false; 803 } 804 public: 805 InstCombineFortifiedLibCalls(InstCombiner *IC) : IC(IC), NewInstruction(0) { } 806 Instruction *NewInstruction; 807 }; 808 } // end anonymous namespace 809 810 // Try to fold some different type of calls here. 811 // Currently we're only working with the checking functions, memcpy_chk, 812 // mempcpy_chk, memmove_chk, memset_chk, strcpy_chk, stpcpy_chk, strncpy_chk, 813 // strcat_chk and strncat_chk. 814 Instruction *InstCombiner::tryOptimizeCall(CallInst *CI, const TargetData *TD) { 815 if (CI->getCalledFunction() == 0) return 0; 816 817 InstCombineFortifiedLibCalls Simplifier(this); 818 Simplifier.fold(CI, TD); 819 return Simplifier.NewInstruction; 820 } 821 822 // visitCallSite - Improvements for call and invoke instructions. 823 // 824 Instruction *InstCombiner::visitCallSite(CallSite CS) { 825 bool Changed = false; 826 827 // If the callee is a pointer to a function, attempt to move any casts to the 828 // arguments of the call/invoke. 829 Value *Callee = CS.getCalledValue(); 830 if (!isa<Function>(Callee) && transformConstExprCastCall(CS)) 831 return 0; 832 833 if (Function *CalleeF = dyn_cast<Function>(Callee)) 834 // If the call and callee calling conventions don't match, this call must 835 // be unreachable, as the call is undefined. 836 if (CalleeF->getCallingConv() != CS.getCallingConv() && 837 // Only do this for calls to a function with a body. A prototype may 838 // not actually end up matching the implementation's calling conv for a 839 // variety of reasons (e.g. it may be written in assembly). 840 !CalleeF->isDeclaration()) { 841 Instruction *OldCall = CS.getInstruction(); 842 new StoreInst(ConstantInt::getTrue(Callee->getContext()), 843 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())), 844 OldCall); 845 // If OldCall dues not return void then replaceAllUsesWith undef. 846 // This allows ValueHandlers and custom metadata to adjust itself. 847 if (!OldCall->getType()->isVoidTy()) 848 ReplaceInstUsesWith(*OldCall, UndefValue::get(OldCall->getType())); 849 if (isa<CallInst>(OldCall)) 850 return EraseInstFromFunction(*OldCall); 851 852 // We cannot remove an invoke, because it would change the CFG, just 853 // change the callee to a null pointer. 854 cast<InvokeInst>(OldCall)->setCalledFunction( 855 Constant::getNullValue(CalleeF->getType())); 856 return 0; 857 } 858 859 if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) { 860 // This instruction is not reachable, just remove it. We insert a store to 861 // undef so that we know that this code is not reachable, despite the fact 862 // that we can't modify the CFG here. 863 new StoreInst(ConstantInt::getTrue(Callee->getContext()), 864 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())), 865 CS.getInstruction()); 866 867 // If CS does not return void then replaceAllUsesWith undef. 868 // This allows ValueHandlers and custom metadata to adjust itself. 869 if (!CS.getInstruction()->getType()->isVoidTy()) 870 ReplaceInstUsesWith(*CS.getInstruction(), 871 UndefValue::get(CS.getInstruction()->getType())); 872 873 if (InvokeInst *II = dyn_cast<InvokeInst>(CS.getInstruction())) { 874 // Don't break the CFG, insert a dummy cond branch. 875 BranchInst::Create(II->getNormalDest(), II->getUnwindDest(), 876 ConstantInt::getTrue(Callee->getContext()), II); 877 } 878 return EraseInstFromFunction(*CS.getInstruction()); 879 } 880 881 if (BitCastInst *BC = dyn_cast<BitCastInst>(Callee)) 882 if (IntrinsicInst *In = dyn_cast<IntrinsicInst>(BC->getOperand(0))) 883 if (In->getIntrinsicID() == Intrinsic::init_trampoline) 884 return transformCallThroughTrampoline(CS); 885 886 PointerType *PTy = cast<PointerType>(Callee->getType()); 887 FunctionType *FTy = cast<FunctionType>(PTy->getElementType()); 888 if (FTy->isVarArg()) { 889 int ix = FTy->getNumParams() + (isa<InvokeInst>(Callee) ? 3 : 1); 890 // See if we can optimize any arguments passed through the varargs area of 891 // the call. 892 for (CallSite::arg_iterator I = CS.arg_begin()+FTy->getNumParams(), 893 E = CS.arg_end(); I != E; ++I, ++ix) { 894 CastInst *CI = dyn_cast<CastInst>(*I); 895 if (CI && isSafeToEliminateVarargsCast(CS, CI, TD, ix)) { 896 *I = CI->getOperand(0); 897 Changed = true; 898 } 899 } 900 } 901 902 if (isa<InlineAsm>(Callee) && !CS.doesNotThrow()) { 903 // Inline asm calls cannot throw - mark them 'nounwind'. 904 CS.setDoesNotThrow(); 905 Changed = true; 906 } 907 908 // Try to optimize the call if possible, we require TargetData for most of 909 // this. None of these calls are seen as possibly dead so go ahead and 910 // delete the instruction now. 911 if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) { 912 Instruction *I = tryOptimizeCall(CI, TD); 913 // If we changed something return the result, etc. Otherwise let 914 // the fallthrough check. 915 if (I) return EraseInstFromFunction(*I); 916 } 917 918 return Changed ? CS.getInstruction() : 0; 919 } 920 921 // transformConstExprCastCall - If the callee is a constexpr cast of a function, 922 // attempt to move the cast to the arguments of the call/invoke. 923 // 924 bool InstCombiner::transformConstExprCastCall(CallSite CS) { 925 Function *Callee = 926 dyn_cast<Function>(CS.getCalledValue()->stripPointerCasts()); 927 if (Callee == 0) 928 return false; 929 Instruction *Caller = CS.getInstruction(); 930 const AttrListPtr &CallerPAL = CS.getAttributes(); 931 932 // Okay, this is a cast from a function to a different type. Unless doing so 933 // would cause a type conversion of one of our arguments, change this call to 934 // be a direct call with arguments casted to the appropriate types. 935 // 936 FunctionType *FT = Callee->getFunctionType(); 937 Type *OldRetTy = Caller->getType(); 938 Type *NewRetTy = FT->getReturnType(); 939 940 if (NewRetTy->isStructTy()) 941 return false; // TODO: Handle multiple return values. 942 943 // Check to see if we are changing the return type... 944 if (OldRetTy != NewRetTy) { 945 if (Callee->isDeclaration() && 946 // Conversion is ok if changing from one pointer type to another or from 947 // a pointer to an integer of the same size. 948 !((OldRetTy->isPointerTy() || !TD || 949 OldRetTy == TD->getIntPtrType(Caller->getContext())) && 950 (NewRetTy->isPointerTy() || !TD || 951 NewRetTy == TD->getIntPtrType(Caller->getContext())))) 952 return false; // Cannot transform this return value. 953 954 if (!Caller->use_empty() && 955 // void -> non-void is handled specially 956 !NewRetTy->isVoidTy() && !CastInst::isCastable(NewRetTy, OldRetTy)) 957 return false; // Cannot transform this return value. 958 959 if (!CallerPAL.isEmpty() && !Caller->use_empty()) { 960 Attributes RAttrs = CallerPAL.getRetAttributes(); 961 if (RAttrs & Attribute::typeIncompatible(NewRetTy)) 962 return false; // Attribute not compatible with transformed value. 963 } 964 965 // If the callsite is an invoke instruction, and the return value is used by 966 // a PHI node in a successor, we cannot change the return type of the call 967 // because there is no place to put the cast instruction (without breaking 968 // the critical edge). Bail out in this case. 969 if (!Caller->use_empty()) 970 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) 971 for (Value::use_iterator UI = II->use_begin(), E = II->use_end(); 972 UI != E; ++UI) 973 if (PHINode *PN = dyn_cast<PHINode>(*UI)) 974 if (PN->getParent() == II->getNormalDest() || 975 PN->getParent() == II->getUnwindDest()) 976 return false; 977 } 978 979 unsigned NumActualArgs = unsigned(CS.arg_end()-CS.arg_begin()); 980 unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs); 981 982 CallSite::arg_iterator AI = CS.arg_begin(); 983 for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) { 984 Type *ParamTy = FT->getParamType(i); 985 Type *ActTy = (*AI)->getType(); 986 987 if (!CastInst::isCastable(ActTy, ParamTy)) 988 return false; // Cannot transform this parameter value. 989 990 unsigned Attrs = CallerPAL.getParamAttributes(i + 1); 991 if (Attrs & Attribute::typeIncompatible(ParamTy)) 992 return false; // Attribute not compatible with transformed value. 993 994 // If the parameter is passed as a byval argument, then we have to have a 995 // sized type and the sized type has to have the same size as the old type. 996 if (ParamTy != ActTy && (Attrs & Attribute::ByVal)) { 997 PointerType *ParamPTy = dyn_cast<PointerType>(ParamTy); 998 if (ParamPTy == 0 || !ParamPTy->getElementType()->isSized() || TD == 0) 999 return false; 1000 1001 Type *CurElTy = cast<PointerType>(ActTy)->getElementType(); 1002 if (TD->getTypeAllocSize(CurElTy) != 1003 TD->getTypeAllocSize(ParamPTy->getElementType())) 1004 return false; 1005 } 1006 1007 // Converting from one pointer type to another or between a pointer and an 1008 // integer of the same size is safe even if we do not have a body. 1009 bool isConvertible = ActTy == ParamTy || 1010 (TD && ((ParamTy->isPointerTy() || 1011 ParamTy == TD->getIntPtrType(Caller->getContext())) && 1012 (ActTy->isPointerTy() || 1013 ActTy == TD->getIntPtrType(Caller->getContext())))); 1014 if (Callee->isDeclaration() && !isConvertible) return false; 1015 } 1016 1017 if (Callee->isDeclaration()) { 1018 // Do not delete arguments unless we have a function body. 1019 if (FT->getNumParams() < NumActualArgs && !FT->isVarArg()) 1020 return false; 1021 1022 // If the callee is just a declaration, don't change the varargsness of the 1023 // call. We don't want to introduce a varargs call where one doesn't 1024 // already exist. 1025 PointerType *APTy = cast<PointerType>(CS.getCalledValue()->getType()); 1026 if (FT->isVarArg()!=cast<FunctionType>(APTy->getElementType())->isVarArg()) 1027 return false; 1028 } 1029 1030 if (FT->getNumParams() < NumActualArgs && FT->isVarArg() && 1031 !CallerPAL.isEmpty()) 1032 // In this case we have more arguments than the new function type, but we 1033 // won't be dropping them. Check that these extra arguments have attributes 1034 // that are compatible with being a vararg call argument. 1035 for (unsigned i = CallerPAL.getNumSlots(); i; --i) { 1036 if (CallerPAL.getSlot(i - 1).Index <= FT->getNumParams()) 1037 break; 1038 Attributes PAttrs = CallerPAL.getSlot(i - 1).Attrs; 1039 if (PAttrs & Attribute::VarArgsIncompatible) 1040 return false; 1041 } 1042 1043 1044 // Okay, we decided that this is a safe thing to do: go ahead and start 1045 // inserting cast instructions as necessary. 1046 std::vector<Value*> Args; 1047 Args.reserve(NumActualArgs); 1048 SmallVector<AttributeWithIndex, 8> attrVec; 1049 attrVec.reserve(NumCommonArgs); 1050 1051 // Get any return attributes. 1052 Attributes RAttrs = CallerPAL.getRetAttributes(); 1053 1054 // If the return value is not being used, the type may not be compatible 1055 // with the existing attributes. Wipe out any problematic attributes. 1056 RAttrs &= ~Attribute::typeIncompatible(NewRetTy); 1057 1058 // Add the new return attributes. 1059 if (RAttrs) 1060 attrVec.push_back(AttributeWithIndex::get(0, RAttrs)); 1061 1062 AI = CS.arg_begin(); 1063 for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) { 1064 Type *ParamTy = FT->getParamType(i); 1065 if ((*AI)->getType() == ParamTy) { 1066 Args.push_back(*AI); 1067 } else { 1068 Instruction::CastOps opcode = CastInst::getCastOpcode(*AI, 1069 false, ParamTy, false); 1070 Args.push_back(Builder->CreateCast(opcode, *AI, ParamTy, "tmp")); 1071 } 1072 1073 // Add any parameter attributes. 1074 if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1)) 1075 attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs)); 1076 } 1077 1078 // If the function takes more arguments than the call was taking, add them 1079 // now. 1080 for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i) 1081 Args.push_back(Constant::getNullValue(FT->getParamType(i))); 1082 1083 // If we are removing arguments to the function, emit an obnoxious warning. 1084 if (FT->getNumParams() < NumActualArgs) { 1085 if (!FT->isVarArg()) { 1086 errs() << "WARNING: While resolving call to function '" 1087 << Callee->getName() << "' arguments were dropped!\n"; 1088 } else { 1089 // Add all of the arguments in their promoted form to the arg list. 1090 for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) { 1091 Type *PTy = getPromotedType((*AI)->getType()); 1092 if (PTy != (*AI)->getType()) { 1093 // Must promote to pass through va_arg area! 1094 Instruction::CastOps opcode = 1095 CastInst::getCastOpcode(*AI, false, PTy, false); 1096 Args.push_back(Builder->CreateCast(opcode, *AI, PTy, "tmp")); 1097 } else { 1098 Args.push_back(*AI); 1099 } 1100 1101 // Add any parameter attributes. 1102 if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1)) 1103 attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs)); 1104 } 1105 } 1106 } 1107 1108 if (Attributes FnAttrs = CallerPAL.getFnAttributes()) 1109 attrVec.push_back(AttributeWithIndex::get(~0, FnAttrs)); 1110 1111 if (NewRetTy->isVoidTy()) 1112 Caller->setName(""); // Void type should not have a name. 1113 1114 const AttrListPtr &NewCallerPAL = AttrListPtr::get(attrVec.begin(), 1115 attrVec.end()); 1116 1117 Instruction *NC; 1118 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) { 1119 NC = Builder->CreateInvoke(Callee, II->getNormalDest(), 1120 II->getUnwindDest(), Args); 1121 NC->takeName(II); 1122 cast<InvokeInst>(NC)->setCallingConv(II->getCallingConv()); 1123 cast<InvokeInst>(NC)->setAttributes(NewCallerPAL); 1124 } else { 1125 CallInst *CI = cast<CallInst>(Caller); 1126 NC = Builder->CreateCall(Callee, Args); 1127 NC->takeName(CI); 1128 if (CI->isTailCall()) 1129 cast<CallInst>(NC)->setTailCall(); 1130 cast<CallInst>(NC)->setCallingConv(CI->getCallingConv()); 1131 cast<CallInst>(NC)->setAttributes(NewCallerPAL); 1132 } 1133 1134 // Insert a cast of the return type as necessary. 1135 Value *NV = NC; 1136 if (OldRetTy != NV->getType() && !Caller->use_empty()) { 1137 if (!NV->getType()->isVoidTy()) { 1138 Instruction::CastOps opcode = 1139 CastInst::getCastOpcode(NC, false, OldRetTy, false); 1140 NV = NC = CastInst::Create(opcode, NC, OldRetTy, "tmp"); 1141 NC->setDebugLoc(Caller->getDebugLoc()); 1142 1143 // If this is an invoke instruction, we should insert it after the first 1144 // non-phi, instruction in the normal successor block. 1145 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) { 1146 BasicBlock::iterator I = II->getNormalDest()->getFirstNonPHI(); 1147 InsertNewInstBefore(NC, *I); 1148 } else { 1149 // Otherwise, it's a call, just insert cast right after the call. 1150 InsertNewInstBefore(NC, *Caller); 1151 } 1152 Worklist.AddUsersToWorkList(*Caller); 1153 } else { 1154 NV = UndefValue::get(Caller->getType()); 1155 } 1156 } 1157 1158 if (!Caller->use_empty()) 1159 ReplaceInstUsesWith(*Caller, NV); 1160 1161 EraseInstFromFunction(*Caller); 1162 return true; 1163 } 1164 1165 // transformCallThroughTrampoline - Turn a call to a function created by the 1166 // init_trampoline intrinsic into a direct call to the underlying function. 1167 // 1168 Instruction *InstCombiner::transformCallThroughTrampoline(CallSite CS) { 1169 Value *Callee = CS.getCalledValue(); 1170 PointerType *PTy = cast<PointerType>(Callee->getType()); 1171 FunctionType *FTy = cast<FunctionType>(PTy->getElementType()); 1172 const AttrListPtr &Attrs = CS.getAttributes(); 1173 1174 // If the call already has the 'nest' attribute somewhere then give up - 1175 // otherwise 'nest' would occur twice after splicing in the chain. 1176 if (Attrs.hasAttrSomewhere(Attribute::Nest)) 1177 return 0; 1178 1179 IntrinsicInst *Tramp = 1180 cast<IntrinsicInst>(cast<BitCastInst>(Callee)->getOperand(0)); 1181 1182 Function *NestF =cast<Function>(Tramp->getArgOperand(1)->stripPointerCasts()); 1183 PointerType *NestFPTy = cast<PointerType>(NestF->getType()); 1184 FunctionType *NestFTy = cast<FunctionType>(NestFPTy->getElementType()); 1185 1186 const AttrListPtr &NestAttrs = NestF->getAttributes(); 1187 if (!NestAttrs.isEmpty()) { 1188 unsigned NestIdx = 1; 1189 Type *NestTy = 0; 1190 Attributes NestAttr = Attribute::None; 1191 1192 // Look for a parameter marked with the 'nest' attribute. 1193 for (FunctionType::param_iterator I = NestFTy->param_begin(), 1194 E = NestFTy->param_end(); I != E; ++NestIdx, ++I) 1195 if (NestAttrs.paramHasAttr(NestIdx, Attribute::Nest)) { 1196 // Record the parameter type and any other attributes. 1197 NestTy = *I; 1198 NestAttr = NestAttrs.getParamAttributes(NestIdx); 1199 break; 1200 } 1201 1202 if (NestTy) { 1203 Instruction *Caller = CS.getInstruction(); 1204 std::vector<Value*> NewArgs; 1205 NewArgs.reserve(unsigned(CS.arg_end()-CS.arg_begin())+1); 1206 1207 SmallVector<AttributeWithIndex, 8> NewAttrs; 1208 NewAttrs.reserve(Attrs.getNumSlots() + 1); 1209 1210 // Insert the nest argument into the call argument list, which may 1211 // mean appending it. Likewise for attributes. 1212 1213 // Add any result attributes. 1214 if (Attributes Attr = Attrs.getRetAttributes()) 1215 NewAttrs.push_back(AttributeWithIndex::get(0, Attr)); 1216 1217 { 1218 unsigned Idx = 1; 1219 CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end(); 1220 do { 1221 if (Idx == NestIdx) { 1222 // Add the chain argument and attributes. 1223 Value *NestVal = Tramp->getArgOperand(2); 1224 if (NestVal->getType() != NestTy) 1225 NestVal = Builder->CreateBitCast(NestVal, NestTy, "nest"); 1226 NewArgs.push_back(NestVal); 1227 NewAttrs.push_back(AttributeWithIndex::get(NestIdx, NestAttr)); 1228 } 1229 1230 if (I == E) 1231 break; 1232 1233 // Add the original argument and attributes. 1234 NewArgs.push_back(*I); 1235 if (Attributes Attr = Attrs.getParamAttributes(Idx)) 1236 NewAttrs.push_back 1237 (AttributeWithIndex::get(Idx + (Idx >= NestIdx), Attr)); 1238 1239 ++Idx, ++I; 1240 } while (1); 1241 } 1242 1243 // Add any function attributes. 1244 if (Attributes Attr = Attrs.getFnAttributes()) 1245 NewAttrs.push_back(AttributeWithIndex::get(~0, Attr)); 1246 1247 // The trampoline may have been bitcast to a bogus type (FTy). 1248 // Handle this by synthesizing a new function type, equal to FTy 1249 // with the chain parameter inserted. 1250 1251 std::vector<Type*> NewTypes; 1252 NewTypes.reserve(FTy->getNumParams()+1); 1253 1254 // Insert the chain's type into the list of parameter types, which may 1255 // mean appending it. 1256 { 1257 unsigned Idx = 1; 1258 FunctionType::param_iterator I = FTy->param_begin(), 1259 E = FTy->param_end(); 1260 1261 do { 1262 if (Idx == NestIdx) 1263 // Add the chain's type. 1264 NewTypes.push_back(NestTy); 1265 1266 if (I == E) 1267 break; 1268 1269 // Add the original type. 1270 NewTypes.push_back(*I); 1271 1272 ++Idx, ++I; 1273 } while (1); 1274 } 1275 1276 // Replace the trampoline call with a direct call. Let the generic 1277 // code sort out any function type mismatches. 1278 FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(), NewTypes, 1279 FTy->isVarArg()); 1280 Constant *NewCallee = 1281 NestF->getType() == PointerType::getUnqual(NewFTy) ? 1282 NestF : ConstantExpr::getBitCast(NestF, 1283 PointerType::getUnqual(NewFTy)); 1284 const AttrListPtr &NewPAL = AttrListPtr::get(NewAttrs.begin(), 1285 NewAttrs.end()); 1286 1287 Instruction *NewCaller; 1288 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) { 1289 NewCaller = InvokeInst::Create(NewCallee, 1290 II->getNormalDest(), II->getUnwindDest(), 1291 NewArgs); 1292 cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv()); 1293 cast<InvokeInst>(NewCaller)->setAttributes(NewPAL); 1294 } else { 1295 NewCaller = CallInst::Create(NewCallee, NewArgs); 1296 if (cast<CallInst>(Caller)->isTailCall()) 1297 cast<CallInst>(NewCaller)->setTailCall(); 1298 cast<CallInst>(NewCaller)-> 1299 setCallingConv(cast<CallInst>(Caller)->getCallingConv()); 1300 cast<CallInst>(NewCaller)->setAttributes(NewPAL); 1301 } 1302 1303 return NewCaller; 1304 } 1305 } 1306 1307 // Replace the trampoline call with a direct call. Since there is no 'nest' 1308 // parameter, there is no need to adjust the argument list. Let the generic 1309 // code sort out any function type mismatches. 1310 Constant *NewCallee = 1311 NestF->getType() == PTy ? NestF : 1312 ConstantExpr::getBitCast(NestF, PTy); 1313 CS.setCalledFunction(NewCallee); 1314 return CS.getInstruction(); 1315 } 1316 1317