1 //===-- ConstantFolding.cpp - Fold instructions into constants ------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines routines for folding instructions into constants. 11 // 12 // Also, to supplement the basic IR ConstantExpr simplifications, 13 // this file defines some additional folding routines that can make use of 14 // DataLayout information. These functions cannot go in IR due to library 15 // dependency issues. 16 // 17 //===----------------------------------------------------------------------===// 18 19 #include "llvm/Analysis/ConstantFolding.h" 20 #include "llvm/ADT/SmallPtrSet.h" 21 #include "llvm/ADT/SmallVector.h" 22 #include "llvm/ADT/StringMap.h" 23 #include "llvm/Analysis/ValueTracking.h" 24 #include "llvm/Config/config.h" 25 #include "llvm/IR/Constants.h" 26 #include "llvm/IR/DataLayout.h" 27 #include "llvm/IR/DerivedTypes.h" 28 #include "llvm/IR/Function.h" 29 #include "llvm/IR/GetElementPtrTypeIterator.h" 30 #include "llvm/IR/GlobalVariable.h" 31 #include "llvm/IR/Instructions.h" 32 #include "llvm/IR/Intrinsics.h" 33 #include "llvm/IR/Operator.h" 34 #include "llvm/Support/ErrorHandling.h" 35 #include "llvm/Support/MathExtras.h" 36 #include "llvm/Target/TargetLibraryInfo.h" 37 #include <cerrno> 38 #include <cmath> 39 40 #ifdef HAVE_FENV_H 41 #include <fenv.h> 42 #endif 43 44 using namespace llvm; 45 46 //===----------------------------------------------------------------------===// 47 // Constant Folding internal helper functions 48 //===----------------------------------------------------------------------===// 49 50 /// FoldBitCast - Constant fold bitcast, symbolically evaluating it with 51 /// DataLayout. This always returns a non-null constant, but it may be a 52 /// ConstantExpr if unfoldable. 53 static Constant *FoldBitCast(Constant *C, Type *DestTy, 54 const DataLayout &TD) { 55 // Catch the obvious splat cases. 56 if (C->isNullValue() && !DestTy->isX86_MMXTy()) 57 return Constant::getNullValue(DestTy); 58 if (C->isAllOnesValue() && !DestTy->isX86_MMXTy()) 59 return Constant::getAllOnesValue(DestTy); 60 61 // Handle a vector->integer cast. 62 if (IntegerType *IT = dyn_cast<IntegerType>(DestTy)) { 63 VectorType *VTy = dyn_cast<VectorType>(C->getType()); 64 if (!VTy) 65 return ConstantExpr::getBitCast(C, DestTy); 66 67 unsigned NumSrcElts = VTy->getNumElements(); 68 Type *SrcEltTy = VTy->getElementType(); 69 70 // If the vector is a vector of floating point, convert it to vector of int 71 // to simplify things. 72 if (SrcEltTy->isFloatingPointTy()) { 73 unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits(); 74 Type *SrcIVTy = 75 VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumSrcElts); 76 // Ask IR to do the conversion now that #elts line up. 77 C = ConstantExpr::getBitCast(C, SrcIVTy); 78 } 79 80 ConstantDataVector *CDV = dyn_cast<ConstantDataVector>(C); 81 if (!CDV) 82 return ConstantExpr::getBitCast(C, DestTy); 83 84 // Now that we know that the input value is a vector of integers, just shift 85 // and insert them into our result. 86 unsigned BitShift = TD.getTypeAllocSizeInBits(SrcEltTy); 87 APInt Result(IT->getBitWidth(), 0); 88 for (unsigned i = 0; i != NumSrcElts; ++i) { 89 Result <<= BitShift; 90 if (TD.isLittleEndian()) 91 Result |= CDV->getElementAsInteger(NumSrcElts-i-1); 92 else 93 Result |= CDV->getElementAsInteger(i); 94 } 95 96 return ConstantInt::get(IT, Result); 97 } 98 99 // The code below only handles casts to vectors currently. 100 VectorType *DestVTy = dyn_cast<VectorType>(DestTy); 101 if (!DestVTy) 102 return ConstantExpr::getBitCast(C, DestTy); 103 104 // If this is a scalar -> vector cast, convert the input into a <1 x scalar> 105 // vector so the code below can handle it uniformly. 106 if (isa<ConstantFP>(C) || isa<ConstantInt>(C)) { 107 Constant *Ops = C; // don't take the address of C! 108 return FoldBitCast(ConstantVector::get(Ops), DestTy, TD); 109 } 110 111 // If this is a bitcast from constant vector -> vector, fold it. 112 if (!isa<ConstantDataVector>(C) && !isa<ConstantVector>(C)) 113 return ConstantExpr::getBitCast(C, DestTy); 114 115 // If the element types match, IR can fold it. 116 unsigned NumDstElt = DestVTy->getNumElements(); 117 unsigned NumSrcElt = C->getType()->getVectorNumElements(); 118 if (NumDstElt == NumSrcElt) 119 return ConstantExpr::getBitCast(C, DestTy); 120 121 Type *SrcEltTy = C->getType()->getVectorElementType(); 122 Type *DstEltTy = DestVTy->getElementType(); 123 124 // Otherwise, we're changing the number of elements in a vector, which 125 // requires endianness information to do the right thing. For example, 126 // bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>) 127 // folds to (little endian): 128 // <4 x i32> <i32 0, i32 0, i32 1, i32 0> 129 // and to (big endian): 130 // <4 x i32> <i32 0, i32 0, i32 0, i32 1> 131 132 // First thing is first. We only want to think about integer here, so if 133 // we have something in FP form, recast it as integer. 134 if (DstEltTy->isFloatingPointTy()) { 135 // Fold to an vector of integers with same size as our FP type. 136 unsigned FPWidth = DstEltTy->getPrimitiveSizeInBits(); 137 Type *DestIVTy = 138 VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumDstElt); 139 // Recursively handle this integer conversion, if possible. 140 C = FoldBitCast(C, DestIVTy, TD); 141 142 // Finally, IR can handle this now that #elts line up. 143 return ConstantExpr::getBitCast(C, DestTy); 144 } 145 146 // Okay, we know the destination is integer, if the input is FP, convert 147 // it to integer first. 148 if (SrcEltTy->isFloatingPointTy()) { 149 unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits(); 150 Type *SrcIVTy = 151 VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumSrcElt); 152 // Ask IR to do the conversion now that #elts line up. 153 C = ConstantExpr::getBitCast(C, SrcIVTy); 154 // If IR wasn't able to fold it, bail out. 155 if (!isa<ConstantVector>(C) && // FIXME: Remove ConstantVector. 156 !isa<ConstantDataVector>(C)) 157 return C; 158 } 159 160 // Now we know that the input and output vectors are both integer vectors 161 // of the same size, and that their #elements is not the same. Do the 162 // conversion here, which depends on whether the input or output has 163 // more elements. 164 bool isLittleEndian = TD.isLittleEndian(); 165 166 SmallVector<Constant*, 32> Result; 167 if (NumDstElt < NumSrcElt) { 168 // Handle: bitcast (<4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>) 169 Constant *Zero = Constant::getNullValue(DstEltTy); 170 unsigned Ratio = NumSrcElt/NumDstElt; 171 unsigned SrcBitSize = SrcEltTy->getPrimitiveSizeInBits(); 172 unsigned SrcElt = 0; 173 for (unsigned i = 0; i != NumDstElt; ++i) { 174 // Build each element of the result. 175 Constant *Elt = Zero; 176 unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize*(Ratio-1); 177 for (unsigned j = 0; j != Ratio; ++j) { 178 Constant *Src =dyn_cast<ConstantInt>(C->getAggregateElement(SrcElt++)); 179 if (!Src) // Reject constantexpr elements. 180 return ConstantExpr::getBitCast(C, DestTy); 181 182 // Zero extend the element to the right size. 183 Src = ConstantExpr::getZExt(Src, Elt->getType()); 184 185 // Shift it to the right place, depending on endianness. 186 Src = ConstantExpr::getShl(Src, 187 ConstantInt::get(Src->getType(), ShiftAmt)); 188 ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize; 189 190 // Mix it in. 191 Elt = ConstantExpr::getOr(Elt, Src); 192 } 193 Result.push_back(Elt); 194 } 195 return ConstantVector::get(Result); 196 } 197 198 // Handle: bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>) 199 unsigned Ratio = NumDstElt/NumSrcElt; 200 unsigned DstBitSize = DstEltTy->getPrimitiveSizeInBits(); 201 202 // Loop over each source value, expanding into multiple results. 203 for (unsigned i = 0; i != NumSrcElt; ++i) { 204 Constant *Src = dyn_cast<ConstantInt>(C->getAggregateElement(i)); 205 if (!Src) // Reject constantexpr elements. 206 return ConstantExpr::getBitCast(C, DestTy); 207 208 unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1); 209 for (unsigned j = 0; j != Ratio; ++j) { 210 // Shift the piece of the value into the right place, depending on 211 // endianness. 212 Constant *Elt = ConstantExpr::getLShr(Src, 213 ConstantInt::get(Src->getType(), ShiftAmt)); 214 ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize; 215 216 // Truncate and remember this piece. 217 Result.push_back(ConstantExpr::getTrunc(Elt, DstEltTy)); 218 } 219 } 220 221 return ConstantVector::get(Result); 222 } 223 224 225 /// IsConstantOffsetFromGlobal - If this constant is actually a constant offset 226 /// from a global, return the global and the constant. Because of 227 /// constantexprs, this function is recursive. 228 static bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV, 229 APInt &Offset, const DataLayout &TD) { 230 // Trivial case, constant is the global. 231 if ((GV = dyn_cast<GlobalValue>(C))) { 232 unsigned BitWidth = TD.getPointerTypeSizeInBits(GV->getType()); 233 Offset = APInt(BitWidth, 0); 234 return true; 235 } 236 237 // Otherwise, if this isn't a constant expr, bail out. 238 ConstantExpr *CE = dyn_cast<ConstantExpr>(C); 239 if (!CE) return false; 240 241 // Look through ptr->int and ptr->ptr casts. 242 if (CE->getOpcode() == Instruction::PtrToInt || 243 CE->getOpcode() == Instruction::BitCast) 244 return IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, TD); 245 246 // i32* getelementptr ([5 x i32]* @a, i32 0, i32 5) 247 GEPOperator *GEP = dyn_cast<GEPOperator>(CE); 248 if (!GEP) 249 return false; 250 251 unsigned BitWidth = TD.getPointerTypeSizeInBits(GEP->getType()); 252 APInt TmpOffset(BitWidth, 0); 253 254 // If the base isn't a global+constant, we aren't either. 255 if (!IsConstantOffsetFromGlobal(CE->getOperand(0), GV, TmpOffset, TD)) 256 return false; 257 258 // Otherwise, add any offset that our operands provide. 259 if (!GEP->accumulateConstantOffset(TD, TmpOffset)) 260 return false; 261 262 Offset = TmpOffset; 263 return true; 264 } 265 266 /// ReadDataFromGlobal - Recursive helper to read bits out of global. C is the 267 /// constant being copied out of. ByteOffset is an offset into C. CurPtr is the 268 /// pointer to copy results into and BytesLeft is the number of bytes left in 269 /// the CurPtr buffer. TD is the target data. 270 static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset, 271 unsigned char *CurPtr, unsigned BytesLeft, 272 const DataLayout &TD) { 273 assert(ByteOffset <= TD.getTypeAllocSize(C->getType()) && 274 "Out of range access"); 275 276 // If this element is zero or undefined, we can just return since *CurPtr is 277 // zero initialized. 278 if (isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) 279 return true; 280 281 if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) { 282 if (CI->getBitWidth() > 64 || 283 (CI->getBitWidth() & 7) != 0) 284 return false; 285 286 uint64_t Val = CI->getZExtValue(); 287 unsigned IntBytes = unsigned(CI->getBitWidth()/8); 288 289 for (unsigned i = 0; i != BytesLeft && ByteOffset != IntBytes; ++i) { 290 int n = ByteOffset; 291 if (!TD.isLittleEndian()) 292 n = IntBytes - n - 1; 293 CurPtr[i] = (unsigned char)(Val >> (n * 8)); 294 ++ByteOffset; 295 } 296 return true; 297 } 298 299 if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) { 300 if (CFP->getType()->isDoubleTy()) { 301 C = FoldBitCast(C, Type::getInt64Ty(C->getContext()), TD); 302 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, TD); 303 } 304 if (CFP->getType()->isFloatTy()){ 305 C = FoldBitCast(C, Type::getInt32Ty(C->getContext()), TD); 306 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, TD); 307 } 308 if (CFP->getType()->isHalfTy()){ 309 C = FoldBitCast(C, Type::getInt16Ty(C->getContext()), TD); 310 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, TD); 311 } 312 return false; 313 } 314 315 if (ConstantStruct *CS = dyn_cast<ConstantStruct>(C)) { 316 const StructLayout *SL = TD.getStructLayout(CS->getType()); 317 unsigned Index = SL->getElementContainingOffset(ByteOffset); 318 uint64_t CurEltOffset = SL->getElementOffset(Index); 319 ByteOffset -= CurEltOffset; 320 321 while (1) { 322 // If the element access is to the element itself and not to tail padding, 323 // read the bytes from the element. 324 uint64_t EltSize = TD.getTypeAllocSize(CS->getOperand(Index)->getType()); 325 326 if (ByteOffset < EltSize && 327 !ReadDataFromGlobal(CS->getOperand(Index), ByteOffset, CurPtr, 328 BytesLeft, TD)) 329 return false; 330 331 ++Index; 332 333 // Check to see if we read from the last struct element, if so we're done. 334 if (Index == CS->getType()->getNumElements()) 335 return true; 336 337 // If we read all of the bytes we needed from this element we're done. 338 uint64_t NextEltOffset = SL->getElementOffset(Index); 339 340 if (BytesLeft <= NextEltOffset - CurEltOffset - ByteOffset) 341 return true; 342 343 // Move to the next element of the struct. 344 CurPtr += NextEltOffset - CurEltOffset - ByteOffset; 345 BytesLeft -= NextEltOffset - CurEltOffset - ByteOffset; 346 ByteOffset = 0; 347 CurEltOffset = NextEltOffset; 348 } 349 // not reached. 350 } 351 352 if (isa<ConstantArray>(C) || isa<ConstantVector>(C) || 353 isa<ConstantDataSequential>(C)) { 354 Type *EltTy = C->getType()->getSequentialElementType(); 355 uint64_t EltSize = TD.getTypeAllocSize(EltTy); 356 uint64_t Index = ByteOffset / EltSize; 357 uint64_t Offset = ByteOffset - Index * EltSize; 358 uint64_t NumElts; 359 if (ArrayType *AT = dyn_cast<ArrayType>(C->getType())) 360 NumElts = AT->getNumElements(); 361 else 362 NumElts = C->getType()->getVectorNumElements(); 363 364 for (; Index != NumElts; ++Index) { 365 if (!ReadDataFromGlobal(C->getAggregateElement(Index), Offset, CurPtr, 366 BytesLeft, TD)) 367 return false; 368 369 uint64_t BytesWritten = EltSize - Offset; 370 assert(BytesWritten <= EltSize && "Not indexing into this element?"); 371 if (BytesWritten >= BytesLeft) 372 return true; 373 374 Offset = 0; 375 BytesLeft -= BytesWritten; 376 CurPtr += BytesWritten; 377 } 378 return true; 379 } 380 381 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) { 382 if (CE->getOpcode() == Instruction::IntToPtr && 383 CE->getOperand(0)->getType() == TD.getIntPtrType(CE->getType())) { 384 return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr, 385 BytesLeft, TD); 386 } 387 } 388 389 // Otherwise, unknown initializer type. 390 return false; 391 } 392 393 static Constant *FoldReinterpretLoadFromConstPtr(Constant *C, 394 const DataLayout &TD) { 395 PointerType *PTy = cast<PointerType>(C->getType()); 396 Type *LoadTy = PTy->getElementType(); 397 IntegerType *IntType = dyn_cast<IntegerType>(LoadTy); 398 399 // If this isn't an integer load we can't fold it directly. 400 if (!IntType) { 401 unsigned AS = PTy->getAddressSpace(); 402 403 // If this is a float/double load, we can try folding it as an int32/64 load 404 // and then bitcast the result. This can be useful for union cases. Note 405 // that address spaces don't matter here since we're not going to result in 406 // an actual new load. 407 Type *MapTy; 408 if (LoadTy->isHalfTy()) 409 MapTy = Type::getInt16PtrTy(C->getContext(), AS); 410 else if (LoadTy->isFloatTy()) 411 MapTy = Type::getInt32PtrTy(C->getContext(), AS); 412 else if (LoadTy->isDoubleTy()) 413 MapTy = Type::getInt64PtrTy(C->getContext(), AS); 414 else if (LoadTy->isVectorTy()) { 415 MapTy = PointerType::getIntNPtrTy(C->getContext(), 416 TD.getTypeAllocSizeInBits(LoadTy), 417 AS); 418 } else 419 return nullptr; 420 421 C = FoldBitCast(C, MapTy, TD); 422 if (Constant *Res = FoldReinterpretLoadFromConstPtr(C, TD)) 423 return FoldBitCast(Res, LoadTy, TD); 424 return nullptr; 425 } 426 427 unsigned BytesLoaded = (IntType->getBitWidth() + 7) / 8; 428 if (BytesLoaded > 32 || BytesLoaded == 0) 429 return nullptr; 430 431 GlobalValue *GVal; 432 APInt Offset; 433 if (!IsConstantOffsetFromGlobal(C, GVal, Offset, TD)) 434 return nullptr; 435 436 GlobalVariable *GV = dyn_cast<GlobalVariable>(GVal); 437 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() || 438 !GV->getInitializer()->getType()->isSized()) 439 return nullptr; 440 441 // If we're loading off the beginning of the global, some bytes may be valid, 442 // but we don't try to handle this. 443 if (Offset.isNegative()) 444 return nullptr; 445 446 // If we're not accessing anything in this constant, the result is undefined. 447 if (Offset.getZExtValue() >= 448 TD.getTypeAllocSize(GV->getInitializer()->getType())) 449 return UndefValue::get(IntType); 450 451 unsigned char RawBytes[32] = {0}; 452 if (!ReadDataFromGlobal(GV->getInitializer(), Offset.getZExtValue(), RawBytes, 453 BytesLoaded, TD)) 454 return nullptr; 455 456 APInt ResultVal = APInt(IntType->getBitWidth(), 0); 457 if (TD.isLittleEndian()) { 458 ResultVal = RawBytes[BytesLoaded - 1]; 459 for (unsigned i = 1; i != BytesLoaded; ++i) { 460 ResultVal <<= 8; 461 ResultVal |= RawBytes[BytesLoaded - 1 - i]; 462 } 463 } else { 464 ResultVal = RawBytes[0]; 465 for (unsigned i = 1; i != BytesLoaded; ++i) { 466 ResultVal <<= 8; 467 ResultVal |= RawBytes[i]; 468 } 469 } 470 471 return ConstantInt::get(IntType->getContext(), ResultVal); 472 } 473 474 static Constant *ConstantFoldLoadThroughBitcast(ConstantExpr *CE, 475 const DataLayout *DL) { 476 if (!DL) 477 return nullptr; 478 auto *DestPtrTy = dyn_cast<PointerType>(CE->getType()); 479 if (!DestPtrTy) 480 return nullptr; 481 Type *DestTy = DestPtrTy->getElementType(); 482 483 Constant *C = ConstantFoldLoadFromConstPtr(CE->getOperand(0), DL); 484 if (!C) 485 return nullptr; 486 487 do { 488 Type *SrcTy = C->getType(); 489 490 // If the type sizes are the same and a cast is legal, just directly 491 // cast the constant. 492 if (DL->getTypeSizeInBits(DestTy) == DL->getTypeSizeInBits(SrcTy)) { 493 Instruction::CastOps Cast = Instruction::BitCast; 494 // If we are going from a pointer to int or vice versa, we spell the cast 495 // differently. 496 if (SrcTy->isIntegerTy() && DestTy->isPointerTy()) 497 Cast = Instruction::IntToPtr; 498 else if (SrcTy->isPointerTy() && DestTy->isIntegerTy()) 499 Cast = Instruction::PtrToInt; 500 501 if (CastInst::castIsValid(Cast, C, DestTy)) 502 return ConstantExpr::getCast(Cast, C, DestTy); 503 } 504 505 // If this isn't an aggregate type, there is nothing we can do to drill down 506 // and find a bitcastable constant. 507 if (!SrcTy->isAggregateType()) 508 return nullptr; 509 510 // We're simulating a load through a pointer that was bitcast to point to 511 // a different type, so we can try to walk down through the initial 512 // elements of an aggregate to see if some part of th e aggregate is 513 // castable to implement the "load" semantic model. 514 C = C->getAggregateElement(0u); 515 } while (C); 516 517 return nullptr; 518 } 519 520 /// ConstantFoldLoadFromConstPtr - Return the value that a load from C would 521 /// produce if it is constant and determinable. If this is not determinable, 522 /// return null. 523 Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, 524 const DataLayout *TD) { 525 // First, try the easy cases: 526 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(C)) 527 if (GV->isConstant() && GV->hasDefinitiveInitializer()) 528 return GV->getInitializer(); 529 530 // If the loaded value isn't a constant expr, we can't handle it. 531 ConstantExpr *CE = dyn_cast<ConstantExpr>(C); 532 if (!CE) 533 return nullptr; 534 535 if (CE->getOpcode() == Instruction::GetElementPtr) { 536 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(CE->getOperand(0))) { 537 if (GV->isConstant() && GV->hasDefinitiveInitializer()) { 538 if (Constant *V = 539 ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE)) 540 return V; 541 } 542 } 543 } 544 545 if (CE->getOpcode() == Instruction::BitCast) 546 if (Constant *LoadedC = ConstantFoldLoadThroughBitcast(CE, TD)) 547 return LoadedC; 548 549 // Instead of loading constant c string, use corresponding integer value 550 // directly if string length is small enough. 551 StringRef Str; 552 if (TD && getConstantStringInfo(CE, Str) && !Str.empty()) { 553 unsigned StrLen = Str.size(); 554 Type *Ty = cast<PointerType>(CE->getType())->getElementType(); 555 unsigned NumBits = Ty->getPrimitiveSizeInBits(); 556 // Replace load with immediate integer if the result is an integer or fp 557 // value. 558 if ((NumBits >> 3) == StrLen + 1 && (NumBits & 7) == 0 && 559 (isa<IntegerType>(Ty) || Ty->isFloatingPointTy())) { 560 APInt StrVal(NumBits, 0); 561 APInt SingleChar(NumBits, 0); 562 if (TD->isLittleEndian()) { 563 for (signed i = StrLen-1; i >= 0; i--) { 564 SingleChar = (uint64_t) Str[i] & UCHAR_MAX; 565 StrVal = (StrVal << 8) | SingleChar; 566 } 567 } else { 568 for (unsigned i = 0; i < StrLen; i++) { 569 SingleChar = (uint64_t) Str[i] & UCHAR_MAX; 570 StrVal = (StrVal << 8) | SingleChar; 571 } 572 // Append NULL at the end. 573 SingleChar = 0; 574 StrVal = (StrVal << 8) | SingleChar; 575 } 576 577 Constant *Res = ConstantInt::get(CE->getContext(), StrVal); 578 if (Ty->isFloatingPointTy()) 579 Res = ConstantExpr::getBitCast(Res, Ty); 580 return Res; 581 } 582 } 583 584 // If this load comes from anywhere in a constant global, and if the global 585 // is all undef or zero, we know what it loads. 586 if (GlobalVariable *GV = 587 dyn_cast<GlobalVariable>(GetUnderlyingObject(CE, TD))) { 588 if (GV->isConstant() && GV->hasDefinitiveInitializer()) { 589 Type *ResTy = cast<PointerType>(C->getType())->getElementType(); 590 if (GV->getInitializer()->isNullValue()) 591 return Constant::getNullValue(ResTy); 592 if (isa<UndefValue>(GV->getInitializer())) 593 return UndefValue::get(ResTy); 594 } 595 } 596 597 // Try hard to fold loads from bitcasted strange and non-type-safe things. 598 if (TD) 599 return FoldReinterpretLoadFromConstPtr(CE, *TD); 600 return nullptr; 601 } 602 603 static Constant *ConstantFoldLoadInst(const LoadInst *LI, const DataLayout *TD){ 604 if (LI->isVolatile()) return nullptr; 605 606 if (Constant *C = dyn_cast<Constant>(LI->getOperand(0))) 607 return ConstantFoldLoadFromConstPtr(C, TD); 608 609 return nullptr; 610 } 611 612 /// SymbolicallyEvaluateBinop - One of Op0/Op1 is a constant expression. 613 /// Attempt to symbolically evaluate the result of a binary operator merging 614 /// these together. If target data info is available, it is provided as DL, 615 /// otherwise DL is null. 616 static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0, 617 Constant *Op1, const DataLayout *DL){ 618 // SROA 619 620 // Fold (and 0xffffffff00000000, (shl x, 32)) -> shl. 621 // Fold (lshr (or X, Y), 32) -> (lshr [X/Y], 32) if one doesn't contribute 622 // bits. 623 624 625 if (Opc == Instruction::And && DL) { 626 unsigned BitWidth = DL->getTypeSizeInBits(Op0->getType()->getScalarType()); 627 APInt KnownZero0(BitWidth, 0), KnownOne0(BitWidth, 0); 628 APInt KnownZero1(BitWidth, 0), KnownOne1(BitWidth, 0); 629 computeKnownBits(Op0, KnownZero0, KnownOne0, DL); 630 computeKnownBits(Op1, KnownZero1, KnownOne1, DL); 631 if ((KnownOne1 | KnownZero0).isAllOnesValue()) { 632 // All the bits of Op0 that the 'and' could be masking are already zero. 633 return Op0; 634 } 635 if ((KnownOne0 | KnownZero1).isAllOnesValue()) { 636 // All the bits of Op1 that the 'and' could be masking are already zero. 637 return Op1; 638 } 639 640 APInt KnownZero = KnownZero0 | KnownZero1; 641 APInt KnownOne = KnownOne0 & KnownOne1; 642 if ((KnownZero | KnownOne).isAllOnesValue()) { 643 return ConstantInt::get(Op0->getType(), KnownOne); 644 } 645 } 646 647 // If the constant expr is something like &A[123] - &A[4].f, fold this into a 648 // constant. This happens frequently when iterating over a global array. 649 if (Opc == Instruction::Sub && DL) { 650 GlobalValue *GV1, *GV2; 651 APInt Offs1, Offs2; 652 653 if (IsConstantOffsetFromGlobal(Op0, GV1, Offs1, *DL)) 654 if (IsConstantOffsetFromGlobal(Op1, GV2, Offs2, *DL) && 655 GV1 == GV2) { 656 unsigned OpSize = DL->getTypeSizeInBits(Op0->getType()); 657 658 // (&GV+C1) - (&GV+C2) -> C1-C2, pointer arithmetic cannot overflow. 659 // PtrToInt may change the bitwidth so we have convert to the right size 660 // first. 661 return ConstantInt::get(Op0->getType(), Offs1.zextOrTrunc(OpSize) - 662 Offs2.zextOrTrunc(OpSize)); 663 } 664 } 665 666 return nullptr; 667 } 668 669 /// CastGEPIndices - If array indices are not pointer-sized integers, 670 /// explicitly cast them so that they aren't implicitly casted by the 671 /// getelementptr. 672 static Constant *CastGEPIndices(ArrayRef<Constant *> Ops, 673 Type *ResultTy, const DataLayout *TD, 674 const TargetLibraryInfo *TLI) { 675 if (!TD) 676 return nullptr; 677 678 Type *IntPtrTy = TD->getIntPtrType(ResultTy); 679 680 bool Any = false; 681 SmallVector<Constant*, 32> NewIdxs; 682 for (unsigned i = 1, e = Ops.size(); i != e; ++i) { 683 if ((i == 1 || 684 !isa<StructType>(GetElementPtrInst::getIndexedType( 685 Ops[0]->getType(), 686 Ops.slice(1, i - 1)))) && 687 Ops[i]->getType() != IntPtrTy) { 688 Any = true; 689 NewIdxs.push_back(ConstantExpr::getCast(CastInst::getCastOpcode(Ops[i], 690 true, 691 IntPtrTy, 692 true), 693 Ops[i], IntPtrTy)); 694 } else 695 NewIdxs.push_back(Ops[i]); 696 } 697 698 if (!Any) 699 return nullptr; 700 701 Constant *C = ConstantExpr::getGetElementPtr(Ops[0], NewIdxs); 702 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) { 703 if (Constant *Folded = ConstantFoldConstantExpression(CE, TD, TLI)) 704 C = Folded; 705 } 706 707 return C; 708 } 709 710 /// Strip the pointer casts, but preserve the address space information. 711 static Constant* StripPtrCastKeepAS(Constant* Ptr) { 712 assert(Ptr->getType()->isPointerTy() && "Not a pointer type"); 713 PointerType *OldPtrTy = cast<PointerType>(Ptr->getType()); 714 Ptr = Ptr->stripPointerCasts(); 715 PointerType *NewPtrTy = cast<PointerType>(Ptr->getType()); 716 717 // Preserve the address space number of the pointer. 718 if (NewPtrTy->getAddressSpace() != OldPtrTy->getAddressSpace()) { 719 NewPtrTy = NewPtrTy->getElementType()->getPointerTo( 720 OldPtrTy->getAddressSpace()); 721 Ptr = ConstantExpr::getPointerCast(Ptr, NewPtrTy); 722 } 723 return Ptr; 724 } 725 726 /// SymbolicallyEvaluateGEP - If we can symbolically evaluate the specified GEP 727 /// constant expression, do so. 728 static Constant *SymbolicallyEvaluateGEP(ArrayRef<Constant *> Ops, 729 Type *ResultTy, const DataLayout *TD, 730 const TargetLibraryInfo *TLI) { 731 Constant *Ptr = Ops[0]; 732 if (!TD || !Ptr->getType()->getPointerElementType()->isSized() || 733 !Ptr->getType()->isPointerTy()) 734 return nullptr; 735 736 Type *IntPtrTy = TD->getIntPtrType(Ptr->getType()); 737 Type *ResultElementTy = ResultTy->getPointerElementType(); 738 739 // If this is a constant expr gep that is effectively computing an 740 // "offsetof", fold it into 'cast int Size to T*' instead of 'gep 0, 0, 12' 741 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 742 if (!isa<ConstantInt>(Ops[i])) { 743 744 // If this is "gep i8* Ptr, (sub 0, V)", fold this as: 745 // "inttoptr (sub (ptrtoint Ptr), V)" 746 if (Ops.size() == 2 && ResultElementTy->isIntegerTy(8)) { 747 ConstantExpr *CE = dyn_cast<ConstantExpr>(Ops[1]); 748 assert((!CE || CE->getType() == IntPtrTy) && 749 "CastGEPIndices didn't canonicalize index types!"); 750 if (CE && CE->getOpcode() == Instruction::Sub && 751 CE->getOperand(0)->isNullValue()) { 752 Constant *Res = ConstantExpr::getPtrToInt(Ptr, CE->getType()); 753 Res = ConstantExpr::getSub(Res, CE->getOperand(1)); 754 Res = ConstantExpr::getIntToPtr(Res, ResultTy); 755 if (ConstantExpr *ResCE = dyn_cast<ConstantExpr>(Res)) 756 Res = ConstantFoldConstantExpression(ResCE, TD, TLI); 757 return Res; 758 } 759 } 760 return nullptr; 761 } 762 763 unsigned BitWidth = TD->getTypeSizeInBits(IntPtrTy); 764 APInt Offset = 765 APInt(BitWidth, TD->getIndexedOffset(Ptr->getType(), 766 makeArrayRef((Value *const*) 767 Ops.data() + 1, 768 Ops.size() - 1))); 769 Ptr = StripPtrCastKeepAS(Ptr); 770 771 // If this is a GEP of a GEP, fold it all into a single GEP. 772 while (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) { 773 SmallVector<Value *, 4> NestedOps(GEP->op_begin() + 1, GEP->op_end()); 774 775 // Do not try the incorporate the sub-GEP if some index is not a number. 776 bool AllConstantInt = true; 777 for (unsigned i = 0, e = NestedOps.size(); i != e; ++i) 778 if (!isa<ConstantInt>(NestedOps[i])) { 779 AllConstantInt = false; 780 break; 781 } 782 if (!AllConstantInt) 783 break; 784 785 Ptr = cast<Constant>(GEP->getOperand(0)); 786 Offset += APInt(BitWidth, 787 TD->getIndexedOffset(Ptr->getType(), NestedOps)); 788 Ptr = StripPtrCastKeepAS(Ptr); 789 } 790 791 // If the base value for this address is a literal integer value, fold the 792 // getelementptr to the resulting integer value casted to the pointer type. 793 APInt BasePtr(BitWidth, 0); 794 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) { 795 if (CE->getOpcode() == Instruction::IntToPtr) { 796 if (ConstantInt *Base = dyn_cast<ConstantInt>(CE->getOperand(0))) 797 BasePtr = Base->getValue().zextOrTrunc(BitWidth); 798 } 799 } 800 801 if (Ptr->isNullValue() || BasePtr != 0) { 802 Constant *C = ConstantInt::get(Ptr->getContext(), Offset + BasePtr); 803 return ConstantExpr::getIntToPtr(C, ResultTy); 804 } 805 806 // Otherwise form a regular getelementptr. Recompute the indices so that 807 // we eliminate over-indexing of the notional static type array bounds. 808 // This makes it easy to determine if the getelementptr is "inbounds". 809 // Also, this helps GlobalOpt do SROA on GlobalVariables. 810 Type *Ty = Ptr->getType(); 811 assert(Ty->isPointerTy() && "Forming regular GEP of non-pointer type"); 812 SmallVector<Constant *, 32> NewIdxs; 813 814 do { 815 if (SequentialType *ATy = dyn_cast<SequentialType>(Ty)) { 816 if (ATy->isPointerTy()) { 817 // The only pointer indexing we'll do is on the first index of the GEP. 818 if (!NewIdxs.empty()) 819 break; 820 821 // Only handle pointers to sized types, not pointers to functions. 822 if (!ATy->getElementType()->isSized()) 823 return nullptr; 824 } 825 826 // Determine which element of the array the offset points into. 827 APInt ElemSize(BitWidth, TD->getTypeAllocSize(ATy->getElementType())); 828 if (ElemSize == 0) 829 // The element size is 0. This may be [0 x Ty]*, so just use a zero 830 // index for this level and proceed to the next level to see if it can 831 // accommodate the offset. 832 NewIdxs.push_back(ConstantInt::get(IntPtrTy, 0)); 833 else { 834 // The element size is non-zero divide the offset by the element 835 // size (rounding down), to compute the index at this level. 836 APInt NewIdx = Offset.udiv(ElemSize); 837 Offset -= NewIdx * ElemSize; 838 NewIdxs.push_back(ConstantInt::get(IntPtrTy, NewIdx)); 839 } 840 Ty = ATy->getElementType(); 841 } else if (StructType *STy = dyn_cast<StructType>(Ty)) { 842 // If we end up with an offset that isn't valid for this struct type, we 843 // can't re-form this GEP in a regular form, so bail out. The pointer 844 // operand likely went through casts that are necessary to make the GEP 845 // sensible. 846 const StructLayout &SL = *TD->getStructLayout(STy); 847 if (Offset.uge(SL.getSizeInBytes())) 848 break; 849 850 // Determine which field of the struct the offset points into. The 851 // getZExtValue is fine as we've already ensured that the offset is 852 // within the range representable by the StructLayout API. 853 unsigned ElIdx = SL.getElementContainingOffset(Offset.getZExtValue()); 854 NewIdxs.push_back(ConstantInt::get(Type::getInt32Ty(Ty->getContext()), 855 ElIdx)); 856 Offset -= APInt(BitWidth, SL.getElementOffset(ElIdx)); 857 Ty = STy->getTypeAtIndex(ElIdx); 858 } else { 859 // We've reached some non-indexable type. 860 break; 861 } 862 } while (Ty != ResultElementTy); 863 864 // If we haven't used up the entire offset by descending the static 865 // type, then the offset is pointing into the middle of an indivisible 866 // member, so we can't simplify it. 867 if (Offset != 0) 868 return nullptr; 869 870 // Create a GEP. 871 Constant *C = ConstantExpr::getGetElementPtr(Ptr, NewIdxs); 872 assert(C->getType()->getPointerElementType() == Ty && 873 "Computed GetElementPtr has unexpected type!"); 874 875 // If we ended up indexing a member with a type that doesn't match 876 // the type of what the original indices indexed, add a cast. 877 if (Ty != ResultElementTy) 878 C = FoldBitCast(C, ResultTy, *TD); 879 880 return C; 881 } 882 883 884 885 //===----------------------------------------------------------------------===// 886 // Constant Folding public APIs 887 //===----------------------------------------------------------------------===// 888 889 /// ConstantFoldInstruction - Try to constant fold the specified instruction. 890 /// If successful, the constant result is returned, if not, null is returned. 891 /// Note that this fails if not all of the operands are constant. Otherwise, 892 /// this function can only fail when attempting to fold instructions like loads 893 /// and stores, which have no constant expression form. 894 Constant *llvm::ConstantFoldInstruction(Instruction *I, 895 const DataLayout *TD, 896 const TargetLibraryInfo *TLI) { 897 // Handle PHI nodes quickly here... 898 if (PHINode *PN = dyn_cast<PHINode>(I)) { 899 Constant *CommonValue = nullptr; 900 901 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 902 Value *Incoming = PN->getIncomingValue(i); 903 // If the incoming value is undef then skip it. Note that while we could 904 // skip the value if it is equal to the phi node itself we choose not to 905 // because that would break the rule that constant folding only applies if 906 // all operands are constants. 907 if (isa<UndefValue>(Incoming)) 908 continue; 909 // If the incoming value is not a constant, then give up. 910 Constant *C = dyn_cast<Constant>(Incoming); 911 if (!C) 912 return nullptr; 913 // Fold the PHI's operands. 914 if (ConstantExpr *NewC = dyn_cast<ConstantExpr>(C)) 915 C = ConstantFoldConstantExpression(NewC, TD, TLI); 916 // If the incoming value is a different constant to 917 // the one we saw previously, then give up. 918 if (CommonValue && C != CommonValue) 919 return nullptr; 920 CommonValue = C; 921 } 922 923 924 // If we reach here, all incoming values are the same constant or undef. 925 return CommonValue ? CommonValue : UndefValue::get(PN->getType()); 926 } 927 928 // Scan the operand list, checking to see if they are all constants, if so, 929 // hand off to ConstantFoldInstOperands. 930 SmallVector<Constant*, 8> Ops; 931 for (User::op_iterator i = I->op_begin(), e = I->op_end(); i != e; ++i) { 932 Constant *Op = dyn_cast<Constant>(*i); 933 if (!Op) 934 return nullptr; // All operands not constant! 935 936 // Fold the Instruction's operands. 937 if (ConstantExpr *NewCE = dyn_cast<ConstantExpr>(Op)) 938 Op = ConstantFoldConstantExpression(NewCE, TD, TLI); 939 940 Ops.push_back(Op); 941 } 942 943 if (const CmpInst *CI = dyn_cast<CmpInst>(I)) 944 return ConstantFoldCompareInstOperands(CI->getPredicate(), Ops[0], Ops[1], 945 TD, TLI); 946 947 if (const LoadInst *LI = dyn_cast<LoadInst>(I)) 948 return ConstantFoldLoadInst(LI, TD); 949 950 if (InsertValueInst *IVI = dyn_cast<InsertValueInst>(I)) { 951 return ConstantExpr::getInsertValue( 952 cast<Constant>(IVI->getAggregateOperand()), 953 cast<Constant>(IVI->getInsertedValueOperand()), 954 IVI->getIndices()); 955 } 956 957 if (ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(I)) { 958 return ConstantExpr::getExtractValue( 959 cast<Constant>(EVI->getAggregateOperand()), 960 EVI->getIndices()); 961 } 962 963 return ConstantFoldInstOperands(I->getOpcode(), I->getType(), Ops, TD, TLI); 964 } 965 966 static Constant * 967 ConstantFoldConstantExpressionImpl(const ConstantExpr *CE, const DataLayout *TD, 968 const TargetLibraryInfo *TLI, 969 SmallPtrSet<ConstantExpr *, 4> &FoldedOps) { 970 SmallVector<Constant *, 8> Ops; 971 for (User::const_op_iterator i = CE->op_begin(), e = CE->op_end(); i != e; 972 ++i) { 973 Constant *NewC = cast<Constant>(*i); 974 // Recursively fold the ConstantExpr's operands. If we have already folded 975 // a ConstantExpr, we don't have to process it again. 976 if (ConstantExpr *NewCE = dyn_cast<ConstantExpr>(NewC)) { 977 if (FoldedOps.insert(NewCE)) 978 NewC = ConstantFoldConstantExpressionImpl(NewCE, TD, TLI, FoldedOps); 979 } 980 Ops.push_back(NewC); 981 } 982 983 if (CE->isCompare()) 984 return ConstantFoldCompareInstOperands(CE->getPredicate(), Ops[0], Ops[1], 985 TD, TLI); 986 return ConstantFoldInstOperands(CE->getOpcode(), CE->getType(), Ops, TD, TLI); 987 } 988 989 /// ConstantFoldConstantExpression - Attempt to fold the constant expression 990 /// using the specified DataLayout. If successful, the constant result is 991 /// result is returned, if not, null is returned. 992 Constant *llvm::ConstantFoldConstantExpression(const ConstantExpr *CE, 993 const DataLayout *TD, 994 const TargetLibraryInfo *TLI) { 995 SmallPtrSet<ConstantExpr *, 4> FoldedOps; 996 return ConstantFoldConstantExpressionImpl(CE, TD, TLI, FoldedOps); 997 } 998 999 /// ConstantFoldInstOperands - Attempt to constant fold an instruction with the 1000 /// specified opcode and operands. If successful, the constant result is 1001 /// returned, if not, null is returned. Note that this function can fail when 1002 /// attempting to fold instructions like loads and stores, which have no 1003 /// constant expression form. 1004 /// 1005 /// TODO: This function neither utilizes nor preserves nsw/nuw/inbounds/etc 1006 /// information, due to only being passed an opcode and operands. Constant 1007 /// folding using this function strips this information. 1008 /// 1009 Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy, 1010 ArrayRef<Constant *> Ops, 1011 const DataLayout *TD, 1012 const TargetLibraryInfo *TLI) { 1013 // Handle easy binops first. 1014 if (Instruction::isBinaryOp(Opcode)) { 1015 if (isa<ConstantExpr>(Ops[0]) || isa<ConstantExpr>(Ops[1])) { 1016 if (Constant *C = SymbolicallyEvaluateBinop(Opcode, Ops[0], Ops[1], TD)) 1017 return C; 1018 } 1019 1020 return ConstantExpr::get(Opcode, Ops[0], Ops[1]); 1021 } 1022 1023 switch (Opcode) { 1024 default: return nullptr; 1025 case Instruction::ICmp: 1026 case Instruction::FCmp: llvm_unreachable("Invalid for compares"); 1027 case Instruction::Call: 1028 if (Function *F = dyn_cast<Function>(Ops.back())) 1029 if (canConstantFoldCallTo(F)) 1030 return ConstantFoldCall(F, Ops.slice(0, Ops.size() - 1), TLI); 1031 return nullptr; 1032 case Instruction::PtrToInt: 1033 // If the input is a inttoptr, eliminate the pair. This requires knowing 1034 // the width of a pointer, so it can't be done in ConstantExpr::getCast. 1035 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ops[0])) { 1036 if (TD && CE->getOpcode() == Instruction::IntToPtr) { 1037 Constant *Input = CE->getOperand(0); 1038 unsigned InWidth = Input->getType()->getScalarSizeInBits(); 1039 unsigned PtrWidth = TD->getPointerTypeSizeInBits(CE->getType()); 1040 if (PtrWidth < InWidth) { 1041 Constant *Mask = 1042 ConstantInt::get(CE->getContext(), 1043 APInt::getLowBitsSet(InWidth, PtrWidth)); 1044 Input = ConstantExpr::getAnd(Input, Mask); 1045 } 1046 // Do a zext or trunc to get to the dest size. 1047 return ConstantExpr::getIntegerCast(Input, DestTy, false); 1048 } 1049 } 1050 return ConstantExpr::getCast(Opcode, Ops[0], DestTy); 1051 case Instruction::IntToPtr: 1052 // If the input is a ptrtoint, turn the pair into a ptr to ptr bitcast if 1053 // the int size is >= the ptr size and the address spaces are the same. 1054 // This requires knowing the width of a pointer, so it can't be done in 1055 // ConstantExpr::getCast. 1056 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ops[0])) { 1057 if (TD && CE->getOpcode() == Instruction::PtrToInt) { 1058 Constant *SrcPtr = CE->getOperand(0); 1059 unsigned SrcPtrSize = TD->getPointerTypeSizeInBits(SrcPtr->getType()); 1060 unsigned MidIntSize = CE->getType()->getScalarSizeInBits(); 1061 1062 if (MidIntSize >= SrcPtrSize) { 1063 unsigned SrcAS = SrcPtr->getType()->getPointerAddressSpace(); 1064 if (SrcAS == DestTy->getPointerAddressSpace()) 1065 return FoldBitCast(CE->getOperand(0), DestTy, *TD); 1066 } 1067 } 1068 } 1069 1070 return ConstantExpr::getCast(Opcode, Ops[0], DestTy); 1071 case Instruction::Trunc: 1072 case Instruction::ZExt: 1073 case Instruction::SExt: 1074 case Instruction::FPTrunc: 1075 case Instruction::FPExt: 1076 case Instruction::UIToFP: 1077 case Instruction::SIToFP: 1078 case Instruction::FPToUI: 1079 case Instruction::FPToSI: 1080 case Instruction::AddrSpaceCast: 1081 return ConstantExpr::getCast(Opcode, Ops[0], DestTy); 1082 case Instruction::BitCast: 1083 if (TD) 1084 return FoldBitCast(Ops[0], DestTy, *TD); 1085 return ConstantExpr::getBitCast(Ops[0], DestTy); 1086 case Instruction::Select: 1087 return ConstantExpr::getSelect(Ops[0], Ops[1], Ops[2]); 1088 case Instruction::ExtractElement: 1089 return ConstantExpr::getExtractElement(Ops[0], Ops[1]); 1090 case Instruction::InsertElement: 1091 return ConstantExpr::getInsertElement(Ops[0], Ops[1], Ops[2]); 1092 case Instruction::ShuffleVector: 1093 return ConstantExpr::getShuffleVector(Ops[0], Ops[1], Ops[2]); 1094 case Instruction::GetElementPtr: 1095 if (Constant *C = CastGEPIndices(Ops, DestTy, TD, TLI)) 1096 return C; 1097 if (Constant *C = SymbolicallyEvaluateGEP(Ops, DestTy, TD, TLI)) 1098 return C; 1099 1100 return ConstantExpr::getGetElementPtr(Ops[0], Ops.slice(1)); 1101 } 1102 } 1103 1104 /// ConstantFoldCompareInstOperands - Attempt to constant fold a compare 1105 /// instruction (icmp/fcmp) with the specified operands. If it fails, it 1106 /// returns a constant expression of the specified operands. 1107 /// 1108 Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate, 1109 Constant *Ops0, Constant *Ops1, 1110 const DataLayout *TD, 1111 const TargetLibraryInfo *TLI) { 1112 // fold: icmp (inttoptr x), null -> icmp x, 0 1113 // fold: icmp (ptrtoint x), 0 -> icmp x, null 1114 // fold: icmp (inttoptr x), (inttoptr y) -> icmp trunc/zext x, trunc/zext y 1115 // fold: icmp (ptrtoint x), (ptrtoint y) -> icmp x, y 1116 // 1117 // ConstantExpr::getCompare cannot do this, because it doesn't have TD 1118 // around to know if bit truncation is happening. 1119 if (ConstantExpr *CE0 = dyn_cast<ConstantExpr>(Ops0)) { 1120 if (TD && Ops1->isNullValue()) { 1121 if (CE0->getOpcode() == Instruction::IntToPtr) { 1122 Type *IntPtrTy = TD->getIntPtrType(CE0->getType()); 1123 // Convert the integer value to the right size to ensure we get the 1124 // proper extension or truncation. 1125 Constant *C = ConstantExpr::getIntegerCast(CE0->getOperand(0), 1126 IntPtrTy, false); 1127 Constant *Null = Constant::getNullValue(C->getType()); 1128 return ConstantFoldCompareInstOperands(Predicate, C, Null, TD, TLI); 1129 } 1130 1131 // Only do this transformation if the int is intptrty in size, otherwise 1132 // there is a truncation or extension that we aren't modeling. 1133 if (CE0->getOpcode() == Instruction::PtrToInt) { 1134 Type *IntPtrTy = TD->getIntPtrType(CE0->getOperand(0)->getType()); 1135 if (CE0->getType() == IntPtrTy) { 1136 Constant *C = CE0->getOperand(0); 1137 Constant *Null = Constant::getNullValue(C->getType()); 1138 return ConstantFoldCompareInstOperands(Predicate, C, Null, TD, TLI); 1139 } 1140 } 1141 } 1142 1143 if (ConstantExpr *CE1 = dyn_cast<ConstantExpr>(Ops1)) { 1144 if (TD && CE0->getOpcode() == CE1->getOpcode()) { 1145 if (CE0->getOpcode() == Instruction::IntToPtr) { 1146 Type *IntPtrTy = TD->getIntPtrType(CE0->getType()); 1147 1148 // Convert the integer value to the right size to ensure we get the 1149 // proper extension or truncation. 1150 Constant *C0 = ConstantExpr::getIntegerCast(CE0->getOperand(0), 1151 IntPtrTy, false); 1152 Constant *C1 = ConstantExpr::getIntegerCast(CE1->getOperand(0), 1153 IntPtrTy, false); 1154 return ConstantFoldCompareInstOperands(Predicate, C0, C1, TD, TLI); 1155 } 1156 1157 // Only do this transformation if the int is intptrty in size, otherwise 1158 // there is a truncation or extension that we aren't modeling. 1159 if (CE0->getOpcode() == Instruction::PtrToInt) { 1160 Type *IntPtrTy = TD->getIntPtrType(CE0->getOperand(0)->getType()); 1161 if (CE0->getType() == IntPtrTy && 1162 CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType()) { 1163 return ConstantFoldCompareInstOperands(Predicate, 1164 CE0->getOperand(0), 1165 CE1->getOperand(0), 1166 TD, 1167 TLI); 1168 } 1169 } 1170 } 1171 } 1172 1173 // icmp eq (or x, y), 0 -> (icmp eq x, 0) & (icmp eq y, 0) 1174 // icmp ne (or x, y), 0 -> (icmp ne x, 0) | (icmp ne y, 0) 1175 if ((Predicate == ICmpInst::ICMP_EQ || Predicate == ICmpInst::ICMP_NE) && 1176 CE0->getOpcode() == Instruction::Or && Ops1->isNullValue()) { 1177 Constant *LHS = 1178 ConstantFoldCompareInstOperands(Predicate, CE0->getOperand(0), Ops1, 1179 TD, TLI); 1180 Constant *RHS = 1181 ConstantFoldCompareInstOperands(Predicate, CE0->getOperand(1), Ops1, 1182 TD, TLI); 1183 unsigned OpC = 1184 Predicate == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or; 1185 Constant *Ops[] = { LHS, RHS }; 1186 return ConstantFoldInstOperands(OpC, LHS->getType(), Ops, TD, TLI); 1187 } 1188 } 1189 1190 return ConstantExpr::getCompare(Predicate, Ops0, Ops1); 1191 } 1192 1193 1194 /// ConstantFoldLoadThroughGEPConstantExpr - Given a constant and a 1195 /// getelementptr constantexpr, return the constant value being addressed by the 1196 /// constant expression, or null if something is funny and we can't decide. 1197 Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C, 1198 ConstantExpr *CE) { 1199 if (!CE->getOperand(1)->isNullValue()) 1200 return nullptr; // Do not allow stepping over the value! 1201 1202 // Loop over all of the operands, tracking down which value we are 1203 // addressing. 1204 for (unsigned i = 2, e = CE->getNumOperands(); i != e; ++i) { 1205 C = C->getAggregateElement(CE->getOperand(i)); 1206 if (!C) 1207 return nullptr; 1208 } 1209 return C; 1210 } 1211 1212 /// ConstantFoldLoadThroughGEPIndices - Given a constant and getelementptr 1213 /// indices (with an *implied* zero pointer index that is not in the list), 1214 /// return the constant value being addressed by a virtual load, or null if 1215 /// something is funny and we can't decide. 1216 Constant *llvm::ConstantFoldLoadThroughGEPIndices(Constant *C, 1217 ArrayRef<Constant*> Indices) { 1218 // Loop over all of the operands, tracking down which value we are 1219 // addressing. 1220 for (unsigned i = 0, e = Indices.size(); i != e; ++i) { 1221 C = C->getAggregateElement(Indices[i]); 1222 if (!C) 1223 return nullptr; 1224 } 1225 return C; 1226 } 1227 1228 1229 //===----------------------------------------------------------------------===// 1230 // Constant Folding for Calls 1231 // 1232 1233 /// canConstantFoldCallTo - Return true if its even possible to fold a call to 1234 /// the specified function. 1235 bool llvm::canConstantFoldCallTo(const Function *F) { 1236 switch (F->getIntrinsicID()) { 1237 case Intrinsic::fabs: 1238 case Intrinsic::log: 1239 case Intrinsic::log2: 1240 case Intrinsic::log10: 1241 case Intrinsic::exp: 1242 case Intrinsic::exp2: 1243 case Intrinsic::floor: 1244 case Intrinsic::ceil: 1245 case Intrinsic::sqrt: 1246 case Intrinsic::pow: 1247 case Intrinsic::powi: 1248 case Intrinsic::bswap: 1249 case Intrinsic::ctpop: 1250 case Intrinsic::ctlz: 1251 case Intrinsic::cttz: 1252 case Intrinsic::fma: 1253 case Intrinsic::fmuladd: 1254 case Intrinsic::copysign: 1255 case Intrinsic::round: 1256 case Intrinsic::sadd_with_overflow: 1257 case Intrinsic::uadd_with_overflow: 1258 case Intrinsic::ssub_with_overflow: 1259 case Intrinsic::usub_with_overflow: 1260 case Intrinsic::smul_with_overflow: 1261 case Intrinsic::umul_with_overflow: 1262 case Intrinsic::convert_from_fp16: 1263 case Intrinsic::convert_to_fp16: 1264 case Intrinsic::x86_sse_cvtss2si: 1265 case Intrinsic::x86_sse_cvtss2si64: 1266 case Intrinsic::x86_sse_cvttss2si: 1267 case Intrinsic::x86_sse_cvttss2si64: 1268 case Intrinsic::x86_sse2_cvtsd2si: 1269 case Intrinsic::x86_sse2_cvtsd2si64: 1270 case Intrinsic::x86_sse2_cvttsd2si: 1271 case Intrinsic::x86_sse2_cvttsd2si64: 1272 return true; 1273 default: 1274 return false; 1275 case 0: break; 1276 } 1277 1278 if (!F->hasName()) 1279 return false; 1280 StringRef Name = F->getName(); 1281 1282 // In these cases, the check of the length is required. We don't want to 1283 // return true for a name like "cos\0blah" which strcmp would return equal to 1284 // "cos", but has length 8. 1285 switch (Name[0]) { 1286 default: return false; 1287 case 'a': 1288 return Name == "acos" || Name == "asin" || Name == "atan" || Name =="atan2"; 1289 case 'c': 1290 return Name == "cos" || Name == "ceil" || Name == "cosf" || Name == "cosh"; 1291 case 'e': 1292 return Name == "exp" || Name == "exp2"; 1293 case 'f': 1294 return Name == "fabs" || Name == "fmod" || Name == "floor"; 1295 case 'l': 1296 return Name == "log" || Name == "log10"; 1297 case 'p': 1298 return Name == "pow"; 1299 case 's': 1300 return Name == "sin" || Name == "sinh" || Name == "sqrt" || 1301 Name == "sinf" || Name == "sqrtf"; 1302 case 't': 1303 return Name == "tan" || Name == "tanh"; 1304 } 1305 } 1306 1307 static Constant *GetConstantFoldFPValue(double V, Type *Ty) { 1308 if (Ty->isHalfTy()) { 1309 APFloat APF(V); 1310 bool unused; 1311 APF.convert(APFloat::IEEEhalf, APFloat::rmNearestTiesToEven, &unused); 1312 return ConstantFP::get(Ty->getContext(), APF); 1313 } 1314 if (Ty->isFloatTy()) 1315 return ConstantFP::get(Ty->getContext(), APFloat((float)V)); 1316 if (Ty->isDoubleTy()) 1317 return ConstantFP::get(Ty->getContext(), APFloat(V)); 1318 llvm_unreachable("Can only constant fold half/float/double"); 1319 1320 } 1321 1322 namespace { 1323 /// llvm_fenv_clearexcept - Clear the floating-point exception state. 1324 static inline void llvm_fenv_clearexcept() { 1325 #if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT 1326 feclearexcept(FE_ALL_EXCEPT); 1327 #endif 1328 errno = 0; 1329 } 1330 1331 /// llvm_fenv_testexcept - Test if a floating-point exception was raised. 1332 static inline bool llvm_fenv_testexcept() { 1333 int errno_val = errno; 1334 if (errno_val == ERANGE || errno_val == EDOM) 1335 return true; 1336 #if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT && HAVE_DECL_FE_INEXACT 1337 if (fetestexcept(FE_ALL_EXCEPT & ~FE_INEXACT)) 1338 return true; 1339 #endif 1340 return false; 1341 } 1342 } // End namespace 1343 1344 static Constant *ConstantFoldFP(double (*NativeFP)(double), double V, 1345 Type *Ty) { 1346 llvm_fenv_clearexcept(); 1347 V = NativeFP(V); 1348 if (llvm_fenv_testexcept()) { 1349 llvm_fenv_clearexcept(); 1350 return nullptr; 1351 } 1352 1353 return GetConstantFoldFPValue(V, Ty); 1354 } 1355 1356 static Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double), 1357 double V, double W, Type *Ty) { 1358 llvm_fenv_clearexcept(); 1359 V = NativeFP(V, W); 1360 if (llvm_fenv_testexcept()) { 1361 llvm_fenv_clearexcept(); 1362 return nullptr; 1363 } 1364 1365 return GetConstantFoldFPValue(V, Ty); 1366 } 1367 1368 /// ConstantFoldConvertToInt - Attempt to an SSE floating point to integer 1369 /// conversion of a constant floating point. If roundTowardZero is false, the 1370 /// default IEEE rounding is used (toward nearest, ties to even). This matches 1371 /// the behavior of the non-truncating SSE instructions in the default rounding 1372 /// mode. The desired integer type Ty is used to select how many bits are 1373 /// available for the result. Returns null if the conversion cannot be 1374 /// performed, otherwise returns the Constant value resulting from the 1375 /// conversion. 1376 static Constant *ConstantFoldConvertToInt(const APFloat &Val, 1377 bool roundTowardZero, Type *Ty) { 1378 // All of these conversion intrinsics form an integer of at most 64bits. 1379 unsigned ResultWidth = Ty->getIntegerBitWidth(); 1380 assert(ResultWidth <= 64 && 1381 "Can only constant fold conversions to 64 and 32 bit ints"); 1382 1383 uint64_t UIntVal; 1384 bool isExact = false; 1385 APFloat::roundingMode mode = roundTowardZero? APFloat::rmTowardZero 1386 : APFloat::rmNearestTiesToEven; 1387 APFloat::opStatus status = Val.convertToInteger(&UIntVal, ResultWidth, 1388 /*isSigned=*/true, mode, 1389 &isExact); 1390 if (status != APFloat::opOK && status != APFloat::opInexact) 1391 return nullptr; 1392 return ConstantInt::get(Ty, UIntVal, /*isSigned=*/true); 1393 } 1394 1395 static double getValueAsDouble(ConstantFP *Op) { 1396 Type *Ty = Op->getType(); 1397 1398 if (Ty->isFloatTy()) 1399 return Op->getValueAPF().convertToFloat(); 1400 1401 if (Ty->isDoubleTy()) 1402 return Op->getValueAPF().convertToDouble(); 1403 1404 bool unused; 1405 APFloat APF = Op->getValueAPF(); 1406 APF.convert(APFloat::IEEEdouble, APFloat::rmNearestTiesToEven, &unused); 1407 return APF.convertToDouble(); 1408 } 1409 1410 static Constant *ConstantFoldScalarCall(StringRef Name, unsigned IntrinsicID, 1411 Type *Ty, ArrayRef<Constant *> Operands, 1412 const TargetLibraryInfo *TLI) { 1413 if (Operands.size() == 1) { 1414 if (ConstantFP *Op = dyn_cast<ConstantFP>(Operands[0])) { 1415 if (IntrinsicID == Intrinsic::convert_to_fp16) { 1416 APFloat Val(Op->getValueAPF()); 1417 1418 bool lost = false; 1419 Val.convert(APFloat::IEEEhalf, APFloat::rmNearestTiesToEven, &lost); 1420 1421 return ConstantInt::get(Ty->getContext(), Val.bitcastToAPInt()); 1422 } 1423 1424 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy()) 1425 return nullptr; 1426 1427 if (IntrinsicID == Intrinsic::round) { 1428 APFloat V = Op->getValueAPF(); 1429 V.roundToIntegral(APFloat::rmNearestTiesToAway); 1430 return ConstantFP::get(Ty->getContext(), V); 1431 } 1432 1433 /// We only fold functions with finite arguments. Folding NaN and inf is 1434 /// likely to be aborted with an exception anyway, and some host libms 1435 /// have known errors raising exceptions. 1436 if (Op->getValueAPF().isNaN() || Op->getValueAPF().isInfinity()) 1437 return nullptr; 1438 1439 /// Currently APFloat versions of these functions do not exist, so we use 1440 /// the host native double versions. Float versions are not called 1441 /// directly but for all these it is true (float)(f((double)arg)) == 1442 /// f(arg). Long double not supported yet. 1443 double V = getValueAsDouble(Op); 1444 1445 switch (IntrinsicID) { 1446 default: break; 1447 case Intrinsic::fabs: 1448 return ConstantFoldFP(fabs, V, Ty); 1449 #if HAVE_LOG2 1450 case Intrinsic::log2: 1451 return ConstantFoldFP(log2, V, Ty); 1452 #endif 1453 #if HAVE_LOG 1454 case Intrinsic::log: 1455 return ConstantFoldFP(log, V, Ty); 1456 #endif 1457 #if HAVE_LOG10 1458 case Intrinsic::log10: 1459 return ConstantFoldFP(log10, V, Ty); 1460 #endif 1461 #if HAVE_EXP 1462 case Intrinsic::exp: 1463 return ConstantFoldFP(exp, V, Ty); 1464 #endif 1465 #if HAVE_EXP2 1466 case Intrinsic::exp2: 1467 return ConstantFoldFP(exp2, V, Ty); 1468 #endif 1469 case Intrinsic::floor: 1470 return ConstantFoldFP(floor, V, Ty); 1471 case Intrinsic::ceil: 1472 return ConstantFoldFP(ceil, V, Ty); 1473 } 1474 1475 if (!TLI) 1476 return nullptr; 1477 1478 switch (Name[0]) { 1479 case 'a': 1480 if (Name == "acos" && TLI->has(LibFunc::acos)) 1481 return ConstantFoldFP(acos, V, Ty); 1482 else if (Name == "asin" && TLI->has(LibFunc::asin)) 1483 return ConstantFoldFP(asin, V, Ty); 1484 else if (Name == "atan" && TLI->has(LibFunc::atan)) 1485 return ConstantFoldFP(atan, V, Ty); 1486 break; 1487 case 'c': 1488 if (Name == "ceil" && TLI->has(LibFunc::ceil)) 1489 return ConstantFoldFP(ceil, V, Ty); 1490 else if (Name == "cos" && TLI->has(LibFunc::cos)) 1491 return ConstantFoldFP(cos, V, Ty); 1492 else if (Name == "cosh" && TLI->has(LibFunc::cosh)) 1493 return ConstantFoldFP(cosh, V, Ty); 1494 else if (Name == "cosf" && TLI->has(LibFunc::cosf)) 1495 return ConstantFoldFP(cos, V, Ty); 1496 break; 1497 case 'e': 1498 if (Name == "exp" && TLI->has(LibFunc::exp)) 1499 return ConstantFoldFP(exp, V, Ty); 1500 1501 if (Name == "exp2" && TLI->has(LibFunc::exp2)) { 1502 // Constant fold exp2(x) as pow(2,x) in case the host doesn't have a 1503 // C99 library. 1504 return ConstantFoldBinaryFP(pow, 2.0, V, Ty); 1505 } 1506 break; 1507 case 'f': 1508 if (Name == "fabs" && TLI->has(LibFunc::fabs)) 1509 return ConstantFoldFP(fabs, V, Ty); 1510 else if (Name == "floor" && TLI->has(LibFunc::floor)) 1511 return ConstantFoldFP(floor, V, Ty); 1512 break; 1513 case 'l': 1514 if (Name == "log" && V > 0 && TLI->has(LibFunc::log)) 1515 return ConstantFoldFP(log, V, Ty); 1516 else if (Name == "log10" && V > 0 && TLI->has(LibFunc::log10)) 1517 return ConstantFoldFP(log10, V, Ty); 1518 else if (IntrinsicID == Intrinsic::sqrt && 1519 (Ty->isHalfTy() || Ty->isFloatTy() || Ty->isDoubleTy())) { 1520 if (V >= -0.0) 1521 return ConstantFoldFP(sqrt, V, Ty); 1522 else // Undefined 1523 return Constant::getNullValue(Ty); 1524 } 1525 break; 1526 case 's': 1527 if (Name == "sin" && TLI->has(LibFunc::sin)) 1528 return ConstantFoldFP(sin, V, Ty); 1529 else if (Name == "sinh" && TLI->has(LibFunc::sinh)) 1530 return ConstantFoldFP(sinh, V, Ty); 1531 else if (Name == "sqrt" && V >= 0 && TLI->has(LibFunc::sqrt)) 1532 return ConstantFoldFP(sqrt, V, Ty); 1533 else if (Name == "sqrtf" && V >= 0 && TLI->has(LibFunc::sqrtf)) 1534 return ConstantFoldFP(sqrt, V, Ty); 1535 else if (Name == "sinf" && TLI->has(LibFunc::sinf)) 1536 return ConstantFoldFP(sin, V, Ty); 1537 break; 1538 case 't': 1539 if (Name == "tan" && TLI->has(LibFunc::tan)) 1540 return ConstantFoldFP(tan, V, Ty); 1541 else if (Name == "tanh" && TLI->has(LibFunc::tanh)) 1542 return ConstantFoldFP(tanh, V, Ty); 1543 break; 1544 default: 1545 break; 1546 } 1547 return nullptr; 1548 } 1549 1550 if (ConstantInt *Op = dyn_cast<ConstantInt>(Operands[0])) { 1551 switch (IntrinsicID) { 1552 case Intrinsic::bswap: 1553 return ConstantInt::get(Ty->getContext(), Op->getValue().byteSwap()); 1554 case Intrinsic::ctpop: 1555 return ConstantInt::get(Ty, Op->getValue().countPopulation()); 1556 case Intrinsic::convert_from_fp16: { 1557 APFloat Val(APFloat::IEEEhalf, Op->getValue()); 1558 1559 bool lost = false; 1560 APFloat::opStatus status = 1561 Val.convert(APFloat::IEEEsingle, APFloat::rmNearestTiesToEven, &lost); 1562 1563 // Conversion is always precise. 1564 (void)status; 1565 assert(status == APFloat::opOK && !lost && 1566 "Precision lost during fp16 constfolding"); 1567 1568 return ConstantFP::get(Ty->getContext(), Val); 1569 } 1570 default: 1571 return nullptr; 1572 } 1573 } 1574 1575 // Support ConstantVector in case we have an Undef in the top. 1576 if (isa<ConstantVector>(Operands[0]) || 1577 isa<ConstantDataVector>(Operands[0])) { 1578 Constant *Op = cast<Constant>(Operands[0]); 1579 switch (IntrinsicID) { 1580 default: break; 1581 case Intrinsic::x86_sse_cvtss2si: 1582 case Intrinsic::x86_sse_cvtss2si64: 1583 case Intrinsic::x86_sse2_cvtsd2si: 1584 case Intrinsic::x86_sse2_cvtsd2si64: 1585 if (ConstantFP *FPOp = 1586 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U))) 1587 return ConstantFoldConvertToInt(FPOp->getValueAPF(), 1588 /*roundTowardZero=*/false, Ty); 1589 case Intrinsic::x86_sse_cvttss2si: 1590 case Intrinsic::x86_sse_cvttss2si64: 1591 case Intrinsic::x86_sse2_cvttsd2si: 1592 case Intrinsic::x86_sse2_cvttsd2si64: 1593 if (ConstantFP *FPOp = 1594 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U))) 1595 return ConstantFoldConvertToInt(FPOp->getValueAPF(), 1596 /*roundTowardZero=*/true, Ty); 1597 } 1598 } 1599 1600 if (isa<UndefValue>(Operands[0])) { 1601 if (IntrinsicID == Intrinsic::bswap) 1602 return Operands[0]; 1603 return nullptr; 1604 } 1605 1606 return nullptr; 1607 } 1608 1609 if (Operands.size() == 2) { 1610 if (ConstantFP *Op1 = dyn_cast<ConstantFP>(Operands[0])) { 1611 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy()) 1612 return nullptr; 1613 double Op1V = getValueAsDouble(Op1); 1614 1615 if (ConstantFP *Op2 = dyn_cast<ConstantFP>(Operands[1])) { 1616 if (Op2->getType() != Op1->getType()) 1617 return nullptr; 1618 1619 double Op2V = getValueAsDouble(Op2); 1620 if (IntrinsicID == Intrinsic::pow) { 1621 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty); 1622 } 1623 if (IntrinsicID == Intrinsic::copysign) { 1624 APFloat V1 = Op1->getValueAPF(); 1625 APFloat V2 = Op2->getValueAPF(); 1626 V1.copySign(V2); 1627 return ConstantFP::get(Ty->getContext(), V1); 1628 } 1629 if (!TLI) 1630 return nullptr; 1631 if (Name == "pow" && TLI->has(LibFunc::pow)) 1632 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty); 1633 if (Name == "fmod" && TLI->has(LibFunc::fmod)) 1634 return ConstantFoldBinaryFP(fmod, Op1V, Op2V, Ty); 1635 if (Name == "atan2" && TLI->has(LibFunc::atan2)) 1636 return ConstantFoldBinaryFP(atan2, Op1V, Op2V, Ty); 1637 } else if (ConstantInt *Op2C = dyn_cast<ConstantInt>(Operands[1])) { 1638 if (IntrinsicID == Intrinsic::powi && Ty->isHalfTy()) 1639 return ConstantFP::get(Ty->getContext(), 1640 APFloat((float)std::pow((float)Op1V, 1641 (int)Op2C->getZExtValue()))); 1642 if (IntrinsicID == Intrinsic::powi && Ty->isFloatTy()) 1643 return ConstantFP::get(Ty->getContext(), 1644 APFloat((float)std::pow((float)Op1V, 1645 (int)Op2C->getZExtValue()))); 1646 if (IntrinsicID == Intrinsic::powi && Ty->isDoubleTy()) 1647 return ConstantFP::get(Ty->getContext(), 1648 APFloat((double)std::pow((double)Op1V, 1649 (int)Op2C->getZExtValue()))); 1650 } 1651 return nullptr; 1652 } 1653 1654 if (ConstantInt *Op1 = dyn_cast<ConstantInt>(Operands[0])) { 1655 if (ConstantInt *Op2 = dyn_cast<ConstantInt>(Operands[1])) { 1656 switch (IntrinsicID) { 1657 default: break; 1658 case Intrinsic::sadd_with_overflow: 1659 case Intrinsic::uadd_with_overflow: 1660 case Intrinsic::ssub_with_overflow: 1661 case Intrinsic::usub_with_overflow: 1662 case Intrinsic::smul_with_overflow: 1663 case Intrinsic::umul_with_overflow: { 1664 APInt Res; 1665 bool Overflow; 1666 switch (IntrinsicID) { 1667 default: llvm_unreachable("Invalid case"); 1668 case Intrinsic::sadd_with_overflow: 1669 Res = Op1->getValue().sadd_ov(Op2->getValue(), Overflow); 1670 break; 1671 case Intrinsic::uadd_with_overflow: 1672 Res = Op1->getValue().uadd_ov(Op2->getValue(), Overflow); 1673 break; 1674 case Intrinsic::ssub_with_overflow: 1675 Res = Op1->getValue().ssub_ov(Op2->getValue(), Overflow); 1676 break; 1677 case Intrinsic::usub_with_overflow: 1678 Res = Op1->getValue().usub_ov(Op2->getValue(), Overflow); 1679 break; 1680 case Intrinsic::smul_with_overflow: 1681 Res = Op1->getValue().smul_ov(Op2->getValue(), Overflow); 1682 break; 1683 case Intrinsic::umul_with_overflow: 1684 Res = Op1->getValue().umul_ov(Op2->getValue(), Overflow); 1685 break; 1686 } 1687 Constant *Ops[] = { 1688 ConstantInt::get(Ty->getContext(), Res), 1689 ConstantInt::get(Type::getInt1Ty(Ty->getContext()), Overflow) 1690 }; 1691 return ConstantStruct::get(cast<StructType>(Ty), Ops); 1692 } 1693 case Intrinsic::cttz: 1694 if (Op2->isOne() && Op1->isZero()) // cttz(0, 1) is undef. 1695 return UndefValue::get(Ty); 1696 return ConstantInt::get(Ty, Op1->getValue().countTrailingZeros()); 1697 case Intrinsic::ctlz: 1698 if (Op2->isOne() && Op1->isZero()) // ctlz(0, 1) is undef. 1699 return UndefValue::get(Ty); 1700 return ConstantInt::get(Ty, Op1->getValue().countLeadingZeros()); 1701 } 1702 } 1703 1704 return nullptr; 1705 } 1706 return nullptr; 1707 } 1708 1709 if (Operands.size() != 3) 1710 return nullptr; 1711 1712 if (const ConstantFP *Op1 = dyn_cast<ConstantFP>(Operands[0])) { 1713 if (const ConstantFP *Op2 = dyn_cast<ConstantFP>(Operands[1])) { 1714 if (const ConstantFP *Op3 = dyn_cast<ConstantFP>(Operands[2])) { 1715 switch (IntrinsicID) { 1716 default: break; 1717 case Intrinsic::fma: 1718 case Intrinsic::fmuladd: { 1719 APFloat V = Op1->getValueAPF(); 1720 APFloat::opStatus s = V.fusedMultiplyAdd(Op2->getValueAPF(), 1721 Op3->getValueAPF(), 1722 APFloat::rmNearestTiesToEven); 1723 if (s != APFloat::opInvalidOp) 1724 return ConstantFP::get(Ty->getContext(), V); 1725 1726 return nullptr; 1727 } 1728 } 1729 } 1730 } 1731 } 1732 1733 return nullptr; 1734 } 1735 1736 static Constant *ConstantFoldVectorCall(StringRef Name, unsigned IntrinsicID, 1737 VectorType *VTy, 1738 ArrayRef<Constant *> Operands, 1739 const TargetLibraryInfo *TLI) { 1740 SmallVector<Constant *, 4> Result(VTy->getNumElements()); 1741 SmallVector<Constant *, 4> Lane(Operands.size()); 1742 Type *Ty = VTy->getElementType(); 1743 1744 for (unsigned I = 0, E = VTy->getNumElements(); I != E; ++I) { 1745 // Gather a column of constants. 1746 for (unsigned J = 0, JE = Operands.size(); J != JE; ++J) { 1747 Constant *Agg = Operands[J]->getAggregateElement(I); 1748 if (!Agg) 1749 return nullptr; 1750 1751 Lane[J] = Agg; 1752 } 1753 1754 // Use the regular scalar folding to simplify this column. 1755 Constant *Folded = ConstantFoldScalarCall(Name, IntrinsicID, Ty, Lane, TLI); 1756 if (!Folded) 1757 return nullptr; 1758 Result[I] = Folded; 1759 } 1760 1761 return ConstantVector::get(Result); 1762 } 1763 1764 /// ConstantFoldCall - Attempt to constant fold a call to the specified function 1765 /// with the specified arguments, returning null if unsuccessful. 1766 Constant * 1767 llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands, 1768 const TargetLibraryInfo *TLI) { 1769 if (!F->hasName()) 1770 return nullptr; 1771 StringRef Name = F->getName(); 1772 1773 Type *Ty = F->getReturnType(); 1774 1775 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) 1776 return ConstantFoldVectorCall(Name, F->getIntrinsicID(), VTy, Operands, TLI); 1777 1778 return ConstantFoldScalarCall(Name, F->getIntrinsicID(), Ty, Operands, TLI); 1779 } 1780