1 //===-- ConstantFolding.cpp - Fold instructions into constants ------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines routines for folding instructions into constants. 11 // 12 // Also, to supplement the basic IR ConstantExpr simplifications, 13 // this file defines some additional folding routines that can make use of 14 // DataLayout information. These functions cannot go in IR due to library 15 // dependency issues. 16 // 17 //===----------------------------------------------------------------------===// 18 19 #include "llvm/Analysis/ConstantFolding.h" 20 #include "llvm/ADT/SmallPtrSet.h" 21 #include "llvm/ADT/SmallVector.h" 22 #include "llvm/ADT/StringMap.h" 23 #include "llvm/Analysis/TargetLibraryInfo.h" 24 #include "llvm/Analysis/ValueTracking.h" 25 #include "llvm/Config/config.h" 26 #include "llvm/IR/Constants.h" 27 #include "llvm/IR/DataLayout.h" 28 #include "llvm/IR/DerivedTypes.h" 29 #include "llvm/IR/Function.h" 30 #include "llvm/IR/GetElementPtrTypeIterator.h" 31 #include "llvm/IR/GlobalVariable.h" 32 #include "llvm/IR/Instructions.h" 33 #include "llvm/IR/Intrinsics.h" 34 #include "llvm/IR/Operator.h" 35 #include "llvm/Support/ErrorHandling.h" 36 #include "llvm/Support/MathExtras.h" 37 #include <cerrno> 38 #include <cmath> 39 40 #ifdef HAVE_FENV_H 41 #include <fenv.h> 42 #endif 43 44 using namespace llvm; 45 46 //===----------------------------------------------------------------------===// 47 // Constant Folding internal helper functions 48 //===----------------------------------------------------------------------===// 49 50 /// Constant fold bitcast, symbolically evaluating it with DataLayout. 51 /// This always returns a non-null constant, but it may be a 52 /// ConstantExpr if unfoldable. 53 static Constant *FoldBitCast(Constant *C, Type *DestTy, const DataLayout &DL) { 54 // Catch the obvious splat cases. 55 if (C->isNullValue() && !DestTy->isX86_MMXTy()) 56 return Constant::getNullValue(DestTy); 57 if (C->isAllOnesValue() && !DestTy->isX86_MMXTy() && 58 !DestTy->isPtrOrPtrVectorTy()) // Don't get ones for ptr types! 59 return Constant::getAllOnesValue(DestTy); 60 61 // Handle a vector->integer cast. 62 if (IntegerType *IT = dyn_cast<IntegerType>(DestTy)) { 63 VectorType *VTy = dyn_cast<VectorType>(C->getType()); 64 if (!VTy) 65 return ConstantExpr::getBitCast(C, DestTy); 66 67 unsigned NumSrcElts = VTy->getNumElements(); 68 Type *SrcEltTy = VTy->getElementType(); 69 70 // If the vector is a vector of floating point, convert it to vector of int 71 // to simplify things. 72 if (SrcEltTy->isFloatingPointTy()) { 73 unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits(); 74 Type *SrcIVTy = 75 VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumSrcElts); 76 // Ask IR to do the conversion now that #elts line up. 77 C = ConstantExpr::getBitCast(C, SrcIVTy); 78 } 79 80 ConstantDataVector *CDV = dyn_cast<ConstantDataVector>(C); 81 if (!CDV) 82 return ConstantExpr::getBitCast(C, DestTy); 83 84 // Now that we know that the input value is a vector of integers, just shift 85 // and insert them into our result. 86 unsigned BitShift = DL.getTypeAllocSizeInBits(SrcEltTy); 87 APInt Result(IT->getBitWidth(), 0); 88 for (unsigned i = 0; i != NumSrcElts; ++i) { 89 Result <<= BitShift; 90 if (DL.isLittleEndian()) 91 Result |= CDV->getElementAsInteger(NumSrcElts-i-1); 92 else 93 Result |= CDV->getElementAsInteger(i); 94 } 95 96 return ConstantInt::get(IT, Result); 97 } 98 99 // The code below only handles casts to vectors currently. 100 VectorType *DestVTy = dyn_cast<VectorType>(DestTy); 101 if (!DestVTy) 102 return ConstantExpr::getBitCast(C, DestTy); 103 104 // If this is a scalar -> vector cast, convert the input into a <1 x scalar> 105 // vector so the code below can handle it uniformly. 106 if (isa<ConstantFP>(C) || isa<ConstantInt>(C)) { 107 Constant *Ops = C; // don't take the address of C! 108 return FoldBitCast(ConstantVector::get(Ops), DestTy, DL); 109 } 110 111 // If this is a bitcast from constant vector -> vector, fold it. 112 if (!isa<ConstantDataVector>(C) && !isa<ConstantVector>(C)) 113 return ConstantExpr::getBitCast(C, DestTy); 114 115 // If the element types match, IR can fold it. 116 unsigned NumDstElt = DestVTy->getNumElements(); 117 unsigned NumSrcElt = C->getType()->getVectorNumElements(); 118 if (NumDstElt == NumSrcElt) 119 return ConstantExpr::getBitCast(C, DestTy); 120 121 Type *SrcEltTy = C->getType()->getVectorElementType(); 122 Type *DstEltTy = DestVTy->getElementType(); 123 124 // Otherwise, we're changing the number of elements in a vector, which 125 // requires endianness information to do the right thing. For example, 126 // bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>) 127 // folds to (little endian): 128 // <4 x i32> <i32 0, i32 0, i32 1, i32 0> 129 // and to (big endian): 130 // <4 x i32> <i32 0, i32 0, i32 0, i32 1> 131 132 // First thing is first. We only want to think about integer here, so if 133 // we have something in FP form, recast it as integer. 134 if (DstEltTy->isFloatingPointTy()) { 135 // Fold to an vector of integers with same size as our FP type. 136 unsigned FPWidth = DstEltTy->getPrimitiveSizeInBits(); 137 Type *DestIVTy = 138 VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumDstElt); 139 // Recursively handle this integer conversion, if possible. 140 C = FoldBitCast(C, DestIVTy, DL); 141 142 // Finally, IR can handle this now that #elts line up. 143 return ConstantExpr::getBitCast(C, DestTy); 144 } 145 146 // Okay, we know the destination is integer, if the input is FP, convert 147 // it to integer first. 148 if (SrcEltTy->isFloatingPointTy()) { 149 unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits(); 150 Type *SrcIVTy = 151 VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumSrcElt); 152 // Ask IR to do the conversion now that #elts line up. 153 C = ConstantExpr::getBitCast(C, SrcIVTy); 154 // If IR wasn't able to fold it, bail out. 155 if (!isa<ConstantVector>(C) && // FIXME: Remove ConstantVector. 156 !isa<ConstantDataVector>(C)) 157 return C; 158 } 159 160 // Now we know that the input and output vectors are both integer vectors 161 // of the same size, and that their #elements is not the same. Do the 162 // conversion here, which depends on whether the input or output has 163 // more elements. 164 bool isLittleEndian = DL.isLittleEndian(); 165 166 SmallVector<Constant*, 32> Result; 167 if (NumDstElt < NumSrcElt) { 168 // Handle: bitcast (<4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>) 169 Constant *Zero = Constant::getNullValue(DstEltTy); 170 unsigned Ratio = NumSrcElt/NumDstElt; 171 unsigned SrcBitSize = SrcEltTy->getPrimitiveSizeInBits(); 172 unsigned SrcElt = 0; 173 for (unsigned i = 0; i != NumDstElt; ++i) { 174 // Build each element of the result. 175 Constant *Elt = Zero; 176 unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize*(Ratio-1); 177 for (unsigned j = 0; j != Ratio; ++j) { 178 Constant *Src =dyn_cast<ConstantInt>(C->getAggregateElement(SrcElt++)); 179 if (!Src) // Reject constantexpr elements. 180 return ConstantExpr::getBitCast(C, DestTy); 181 182 // Zero extend the element to the right size. 183 Src = ConstantExpr::getZExt(Src, Elt->getType()); 184 185 // Shift it to the right place, depending on endianness. 186 Src = ConstantExpr::getShl(Src, 187 ConstantInt::get(Src->getType(), ShiftAmt)); 188 ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize; 189 190 // Mix it in. 191 Elt = ConstantExpr::getOr(Elt, Src); 192 } 193 Result.push_back(Elt); 194 } 195 return ConstantVector::get(Result); 196 } 197 198 // Handle: bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>) 199 unsigned Ratio = NumDstElt/NumSrcElt; 200 unsigned DstBitSize = DL.getTypeSizeInBits(DstEltTy); 201 202 // Loop over each source value, expanding into multiple results. 203 for (unsigned i = 0; i != NumSrcElt; ++i) { 204 Constant *Src = dyn_cast<ConstantInt>(C->getAggregateElement(i)); 205 if (!Src) // Reject constantexpr elements. 206 return ConstantExpr::getBitCast(C, DestTy); 207 208 unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1); 209 for (unsigned j = 0; j != Ratio; ++j) { 210 // Shift the piece of the value into the right place, depending on 211 // endianness. 212 Constant *Elt = ConstantExpr::getLShr(Src, 213 ConstantInt::get(Src->getType(), ShiftAmt)); 214 ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize; 215 216 // Truncate the element to an integer with the same pointer size and 217 // convert the element back to a pointer using a inttoptr. 218 if (DstEltTy->isPointerTy()) { 219 IntegerType *DstIntTy = Type::getIntNTy(C->getContext(), DstBitSize); 220 Constant *CE = ConstantExpr::getTrunc(Elt, DstIntTy); 221 Result.push_back(ConstantExpr::getIntToPtr(CE, DstEltTy)); 222 continue; 223 } 224 225 // Truncate and remember this piece. 226 Result.push_back(ConstantExpr::getTrunc(Elt, DstEltTy)); 227 } 228 } 229 230 return ConstantVector::get(Result); 231 } 232 233 234 /// If this constant is a constant offset from a global, return the global and 235 /// the constant. Because of constantexprs, this function is recursive. 236 static bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV, 237 APInt &Offset, const DataLayout &DL) { 238 // Trivial case, constant is the global. 239 if ((GV = dyn_cast<GlobalValue>(C))) { 240 unsigned BitWidth = DL.getPointerTypeSizeInBits(GV->getType()); 241 Offset = APInt(BitWidth, 0); 242 return true; 243 } 244 245 // Otherwise, if this isn't a constant expr, bail out. 246 ConstantExpr *CE = dyn_cast<ConstantExpr>(C); 247 if (!CE) return false; 248 249 // Look through ptr->int and ptr->ptr casts. 250 if (CE->getOpcode() == Instruction::PtrToInt || 251 CE->getOpcode() == Instruction::BitCast) 252 return IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, DL); 253 254 // i32* getelementptr ([5 x i32]* @a, i32 0, i32 5) 255 GEPOperator *GEP = dyn_cast<GEPOperator>(CE); 256 if (!GEP) 257 return false; 258 259 unsigned BitWidth = DL.getPointerTypeSizeInBits(GEP->getType()); 260 APInt TmpOffset(BitWidth, 0); 261 262 // If the base isn't a global+constant, we aren't either. 263 if (!IsConstantOffsetFromGlobal(CE->getOperand(0), GV, TmpOffset, DL)) 264 return false; 265 266 // Otherwise, add any offset that our operands provide. 267 if (!GEP->accumulateConstantOffset(DL, TmpOffset)) 268 return false; 269 270 Offset = TmpOffset; 271 return true; 272 } 273 274 /// Recursive helper to read bits out of global. C is the constant being copied 275 /// out of. ByteOffset is an offset into C. CurPtr is the pointer to copy 276 /// results into and BytesLeft is the number of bytes left in 277 /// the CurPtr buffer. DL is the DataLayout. 278 static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset, 279 unsigned char *CurPtr, unsigned BytesLeft, 280 const DataLayout &DL) { 281 assert(ByteOffset <= DL.getTypeAllocSize(C->getType()) && 282 "Out of range access"); 283 284 // If this element is zero or undefined, we can just return since *CurPtr is 285 // zero initialized. 286 if (isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) 287 return true; 288 289 if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) { 290 if (CI->getBitWidth() > 64 || 291 (CI->getBitWidth() & 7) != 0) 292 return false; 293 294 uint64_t Val = CI->getZExtValue(); 295 unsigned IntBytes = unsigned(CI->getBitWidth()/8); 296 297 for (unsigned i = 0; i != BytesLeft && ByteOffset != IntBytes; ++i) { 298 int n = ByteOffset; 299 if (!DL.isLittleEndian()) 300 n = IntBytes - n - 1; 301 CurPtr[i] = (unsigned char)(Val >> (n * 8)); 302 ++ByteOffset; 303 } 304 return true; 305 } 306 307 if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) { 308 if (CFP->getType()->isDoubleTy()) { 309 C = FoldBitCast(C, Type::getInt64Ty(C->getContext()), DL); 310 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL); 311 } 312 if (CFP->getType()->isFloatTy()){ 313 C = FoldBitCast(C, Type::getInt32Ty(C->getContext()), DL); 314 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL); 315 } 316 if (CFP->getType()->isHalfTy()){ 317 C = FoldBitCast(C, Type::getInt16Ty(C->getContext()), DL); 318 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL); 319 } 320 return false; 321 } 322 323 if (ConstantStruct *CS = dyn_cast<ConstantStruct>(C)) { 324 const StructLayout *SL = DL.getStructLayout(CS->getType()); 325 unsigned Index = SL->getElementContainingOffset(ByteOffset); 326 uint64_t CurEltOffset = SL->getElementOffset(Index); 327 ByteOffset -= CurEltOffset; 328 329 while (1) { 330 // If the element access is to the element itself and not to tail padding, 331 // read the bytes from the element. 332 uint64_t EltSize = DL.getTypeAllocSize(CS->getOperand(Index)->getType()); 333 334 if (ByteOffset < EltSize && 335 !ReadDataFromGlobal(CS->getOperand(Index), ByteOffset, CurPtr, 336 BytesLeft, DL)) 337 return false; 338 339 ++Index; 340 341 // Check to see if we read from the last struct element, if so we're done. 342 if (Index == CS->getType()->getNumElements()) 343 return true; 344 345 // If we read all of the bytes we needed from this element we're done. 346 uint64_t NextEltOffset = SL->getElementOffset(Index); 347 348 if (BytesLeft <= NextEltOffset - CurEltOffset - ByteOffset) 349 return true; 350 351 // Move to the next element of the struct. 352 CurPtr += NextEltOffset - CurEltOffset - ByteOffset; 353 BytesLeft -= NextEltOffset - CurEltOffset - ByteOffset; 354 ByteOffset = 0; 355 CurEltOffset = NextEltOffset; 356 } 357 // not reached. 358 } 359 360 if (isa<ConstantArray>(C) || isa<ConstantVector>(C) || 361 isa<ConstantDataSequential>(C)) { 362 Type *EltTy = C->getType()->getSequentialElementType(); 363 uint64_t EltSize = DL.getTypeAllocSize(EltTy); 364 uint64_t Index = ByteOffset / EltSize; 365 uint64_t Offset = ByteOffset - Index * EltSize; 366 uint64_t NumElts; 367 if (ArrayType *AT = dyn_cast<ArrayType>(C->getType())) 368 NumElts = AT->getNumElements(); 369 else 370 NumElts = C->getType()->getVectorNumElements(); 371 372 for (; Index != NumElts; ++Index) { 373 if (!ReadDataFromGlobal(C->getAggregateElement(Index), Offset, CurPtr, 374 BytesLeft, DL)) 375 return false; 376 377 uint64_t BytesWritten = EltSize - Offset; 378 assert(BytesWritten <= EltSize && "Not indexing into this element?"); 379 if (BytesWritten >= BytesLeft) 380 return true; 381 382 Offset = 0; 383 BytesLeft -= BytesWritten; 384 CurPtr += BytesWritten; 385 } 386 return true; 387 } 388 389 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) { 390 if (CE->getOpcode() == Instruction::IntToPtr && 391 CE->getOperand(0)->getType() == DL.getIntPtrType(CE->getType())) { 392 return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr, 393 BytesLeft, DL); 394 } 395 } 396 397 // Otherwise, unknown initializer type. 398 return false; 399 } 400 401 static Constant *FoldReinterpretLoadFromConstPtr(Constant *C, 402 const DataLayout &DL) { 403 PointerType *PTy = cast<PointerType>(C->getType()); 404 Type *LoadTy = PTy->getElementType(); 405 IntegerType *IntType = dyn_cast<IntegerType>(LoadTy); 406 407 // If this isn't an integer load we can't fold it directly. 408 if (!IntType) { 409 unsigned AS = PTy->getAddressSpace(); 410 411 // If this is a float/double load, we can try folding it as an int32/64 load 412 // and then bitcast the result. This can be useful for union cases. Note 413 // that address spaces don't matter here since we're not going to result in 414 // an actual new load. 415 Type *MapTy; 416 if (LoadTy->isHalfTy()) 417 MapTy = Type::getInt16PtrTy(C->getContext(), AS); 418 else if (LoadTy->isFloatTy()) 419 MapTy = Type::getInt32PtrTy(C->getContext(), AS); 420 else if (LoadTy->isDoubleTy()) 421 MapTy = Type::getInt64PtrTy(C->getContext(), AS); 422 else if (LoadTy->isVectorTy()) { 423 MapTy = PointerType::getIntNPtrTy(C->getContext(), 424 DL.getTypeAllocSizeInBits(LoadTy), AS); 425 } else 426 return nullptr; 427 428 C = FoldBitCast(C, MapTy, DL); 429 if (Constant *Res = FoldReinterpretLoadFromConstPtr(C, DL)) 430 return FoldBitCast(Res, LoadTy, DL); 431 return nullptr; 432 } 433 434 unsigned BytesLoaded = (IntType->getBitWidth() + 7) / 8; 435 if (BytesLoaded > 32 || BytesLoaded == 0) 436 return nullptr; 437 438 GlobalValue *GVal; 439 APInt Offset; 440 if (!IsConstantOffsetFromGlobal(C, GVal, Offset, DL)) 441 return nullptr; 442 443 GlobalVariable *GV = dyn_cast<GlobalVariable>(GVal); 444 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() || 445 !GV->getInitializer()->getType()->isSized()) 446 return nullptr; 447 448 // If we're loading off the beginning of the global, some bytes may be valid, 449 // but we don't try to handle this. 450 if (Offset.isNegative()) 451 return nullptr; 452 453 // If we're not accessing anything in this constant, the result is undefined. 454 if (Offset.getZExtValue() >= 455 DL.getTypeAllocSize(GV->getInitializer()->getType())) 456 return UndefValue::get(IntType); 457 458 unsigned char RawBytes[32] = {0}; 459 if (!ReadDataFromGlobal(GV->getInitializer(), Offset.getZExtValue(), RawBytes, 460 BytesLoaded, DL)) 461 return nullptr; 462 463 APInt ResultVal = APInt(IntType->getBitWidth(), 0); 464 if (DL.isLittleEndian()) { 465 ResultVal = RawBytes[BytesLoaded - 1]; 466 for (unsigned i = 1; i != BytesLoaded; ++i) { 467 ResultVal <<= 8; 468 ResultVal |= RawBytes[BytesLoaded - 1 - i]; 469 } 470 } else { 471 ResultVal = RawBytes[0]; 472 for (unsigned i = 1; i != BytesLoaded; ++i) { 473 ResultVal <<= 8; 474 ResultVal |= RawBytes[i]; 475 } 476 } 477 478 return ConstantInt::get(IntType->getContext(), ResultVal); 479 } 480 481 static Constant *ConstantFoldLoadThroughBitcast(ConstantExpr *CE, 482 const DataLayout &DL) { 483 auto *DestPtrTy = dyn_cast<PointerType>(CE->getType()); 484 if (!DestPtrTy) 485 return nullptr; 486 Type *DestTy = DestPtrTy->getElementType(); 487 488 Constant *C = ConstantFoldLoadFromConstPtr(CE->getOperand(0), DL); 489 if (!C) 490 return nullptr; 491 492 do { 493 Type *SrcTy = C->getType(); 494 495 // If the type sizes are the same and a cast is legal, just directly 496 // cast the constant. 497 if (DL.getTypeSizeInBits(DestTy) == DL.getTypeSizeInBits(SrcTy)) { 498 Instruction::CastOps Cast = Instruction::BitCast; 499 // If we are going from a pointer to int or vice versa, we spell the cast 500 // differently. 501 if (SrcTy->isIntegerTy() && DestTy->isPointerTy()) 502 Cast = Instruction::IntToPtr; 503 else if (SrcTy->isPointerTy() && DestTy->isIntegerTy()) 504 Cast = Instruction::PtrToInt; 505 506 if (CastInst::castIsValid(Cast, C, DestTy)) 507 return ConstantExpr::getCast(Cast, C, DestTy); 508 } 509 510 // If this isn't an aggregate type, there is nothing we can do to drill down 511 // and find a bitcastable constant. 512 if (!SrcTy->isAggregateType()) 513 return nullptr; 514 515 // We're simulating a load through a pointer that was bitcast to point to 516 // a different type, so we can try to walk down through the initial 517 // elements of an aggregate to see if some part of th e aggregate is 518 // castable to implement the "load" semantic model. 519 C = C->getAggregateElement(0u); 520 } while (C); 521 522 return nullptr; 523 } 524 525 /// Return the value that a load from C would produce if it is constant and 526 /// determinable. If this is not determinable, return null. 527 Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, 528 const DataLayout &DL) { 529 // First, try the easy cases: 530 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(C)) 531 if (GV->isConstant() && GV->hasDefinitiveInitializer()) 532 return GV->getInitializer(); 533 534 if (auto *GA = dyn_cast<GlobalAlias>(C)) 535 if (GA->getAliasee() && !GA->mayBeOverridden()) 536 return ConstantFoldLoadFromConstPtr(GA->getAliasee(), DL); 537 538 // If the loaded value isn't a constant expr, we can't handle it. 539 ConstantExpr *CE = dyn_cast<ConstantExpr>(C); 540 if (!CE) 541 return nullptr; 542 543 if (CE->getOpcode() == Instruction::GetElementPtr) { 544 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(CE->getOperand(0))) { 545 if (GV->isConstant() && GV->hasDefinitiveInitializer()) { 546 if (Constant *V = 547 ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE)) 548 return V; 549 } 550 } 551 } 552 553 if (CE->getOpcode() == Instruction::BitCast) 554 if (Constant *LoadedC = ConstantFoldLoadThroughBitcast(CE, DL)) 555 return LoadedC; 556 557 // Instead of loading constant c string, use corresponding integer value 558 // directly if string length is small enough. 559 StringRef Str; 560 if (getConstantStringInfo(CE, Str) && !Str.empty()) { 561 unsigned StrLen = Str.size(); 562 Type *Ty = cast<PointerType>(CE->getType())->getElementType(); 563 unsigned NumBits = Ty->getPrimitiveSizeInBits(); 564 // Replace load with immediate integer if the result is an integer or fp 565 // value. 566 if ((NumBits >> 3) == StrLen + 1 && (NumBits & 7) == 0 && 567 (isa<IntegerType>(Ty) || Ty->isFloatingPointTy())) { 568 APInt StrVal(NumBits, 0); 569 APInt SingleChar(NumBits, 0); 570 if (DL.isLittleEndian()) { 571 for (signed i = StrLen-1; i >= 0; i--) { 572 SingleChar = (uint64_t) Str[i] & UCHAR_MAX; 573 StrVal = (StrVal << 8) | SingleChar; 574 } 575 } else { 576 for (unsigned i = 0; i < StrLen; i++) { 577 SingleChar = (uint64_t) Str[i] & UCHAR_MAX; 578 StrVal = (StrVal << 8) | SingleChar; 579 } 580 // Append NULL at the end. 581 SingleChar = 0; 582 StrVal = (StrVal << 8) | SingleChar; 583 } 584 585 Constant *Res = ConstantInt::get(CE->getContext(), StrVal); 586 if (Ty->isFloatingPointTy()) 587 Res = ConstantExpr::getBitCast(Res, Ty); 588 return Res; 589 } 590 } 591 592 // If this load comes from anywhere in a constant global, and if the global 593 // is all undef or zero, we know what it loads. 594 if (GlobalVariable *GV = 595 dyn_cast<GlobalVariable>(GetUnderlyingObject(CE, DL))) { 596 if (GV->isConstant() && GV->hasDefinitiveInitializer()) { 597 Type *ResTy = cast<PointerType>(C->getType())->getElementType(); 598 if (GV->getInitializer()->isNullValue()) 599 return Constant::getNullValue(ResTy); 600 if (isa<UndefValue>(GV->getInitializer())) 601 return UndefValue::get(ResTy); 602 } 603 } 604 605 // Try hard to fold loads from bitcasted strange and non-type-safe things. 606 return FoldReinterpretLoadFromConstPtr(CE, DL); 607 } 608 609 static Constant *ConstantFoldLoadInst(const LoadInst *LI, 610 const DataLayout &DL) { 611 if (LI->isVolatile()) return nullptr; 612 613 if (Constant *C = dyn_cast<Constant>(LI->getOperand(0))) 614 return ConstantFoldLoadFromConstPtr(C, DL); 615 616 return nullptr; 617 } 618 619 /// One of Op0/Op1 is a constant expression. 620 /// Attempt to symbolically evaluate the result of a binary operator merging 621 /// these together. If target data info is available, it is provided as DL, 622 /// otherwise DL is null. 623 static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0, 624 Constant *Op1, 625 const DataLayout &DL) { 626 // SROA 627 628 // Fold (and 0xffffffff00000000, (shl x, 32)) -> shl. 629 // Fold (lshr (or X, Y), 32) -> (lshr [X/Y], 32) if one doesn't contribute 630 // bits. 631 632 if (Opc == Instruction::And) { 633 unsigned BitWidth = DL.getTypeSizeInBits(Op0->getType()->getScalarType()); 634 APInt KnownZero0(BitWidth, 0), KnownOne0(BitWidth, 0); 635 APInt KnownZero1(BitWidth, 0), KnownOne1(BitWidth, 0); 636 computeKnownBits(Op0, KnownZero0, KnownOne0, DL); 637 computeKnownBits(Op1, KnownZero1, KnownOne1, DL); 638 if ((KnownOne1 | KnownZero0).isAllOnesValue()) { 639 // All the bits of Op0 that the 'and' could be masking are already zero. 640 return Op0; 641 } 642 if ((KnownOne0 | KnownZero1).isAllOnesValue()) { 643 // All the bits of Op1 that the 'and' could be masking are already zero. 644 return Op1; 645 } 646 647 APInt KnownZero = KnownZero0 | KnownZero1; 648 APInt KnownOne = KnownOne0 & KnownOne1; 649 if ((KnownZero | KnownOne).isAllOnesValue()) { 650 return ConstantInt::get(Op0->getType(), KnownOne); 651 } 652 } 653 654 // If the constant expr is something like &A[123] - &A[4].f, fold this into a 655 // constant. This happens frequently when iterating over a global array. 656 if (Opc == Instruction::Sub) { 657 GlobalValue *GV1, *GV2; 658 APInt Offs1, Offs2; 659 660 if (IsConstantOffsetFromGlobal(Op0, GV1, Offs1, DL)) 661 if (IsConstantOffsetFromGlobal(Op1, GV2, Offs2, DL) && GV1 == GV2) { 662 unsigned OpSize = DL.getTypeSizeInBits(Op0->getType()); 663 664 // (&GV+C1) - (&GV+C2) -> C1-C2, pointer arithmetic cannot overflow. 665 // PtrToInt may change the bitwidth so we have convert to the right size 666 // first. 667 return ConstantInt::get(Op0->getType(), Offs1.zextOrTrunc(OpSize) - 668 Offs2.zextOrTrunc(OpSize)); 669 } 670 } 671 672 return nullptr; 673 } 674 675 /// If array indices are not pointer-sized integers, explicitly cast them so 676 /// that they aren't implicitly casted by the getelementptr. 677 static Constant *CastGEPIndices(Type *SrcTy, ArrayRef<Constant *> Ops, 678 Type *ResultTy, const DataLayout &DL, 679 const TargetLibraryInfo *TLI) { 680 Type *IntPtrTy = DL.getIntPtrType(ResultTy); 681 682 bool Any = false; 683 SmallVector<Constant*, 32> NewIdxs; 684 for (unsigned i = 1, e = Ops.size(); i != e; ++i) { 685 if ((i == 1 || 686 !isa<StructType>(GetElementPtrInst::getIndexedType( 687 cast<PointerType>(Ops[0]->getType()->getScalarType()) 688 ->getElementType(), 689 Ops.slice(1, i - 1)))) && 690 Ops[i]->getType() != IntPtrTy) { 691 Any = true; 692 NewIdxs.push_back(ConstantExpr::getCast(CastInst::getCastOpcode(Ops[i], 693 true, 694 IntPtrTy, 695 true), 696 Ops[i], IntPtrTy)); 697 } else 698 NewIdxs.push_back(Ops[i]); 699 } 700 701 if (!Any) 702 return nullptr; 703 704 Constant *C = ConstantExpr::getGetElementPtr(SrcTy, Ops[0], NewIdxs); 705 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) { 706 if (Constant *Folded = ConstantFoldConstantExpression(CE, DL, TLI)) 707 C = Folded; 708 } 709 710 return C; 711 } 712 713 /// Strip the pointer casts, but preserve the address space information. 714 static Constant* StripPtrCastKeepAS(Constant* Ptr) { 715 assert(Ptr->getType()->isPointerTy() && "Not a pointer type"); 716 PointerType *OldPtrTy = cast<PointerType>(Ptr->getType()); 717 Ptr = Ptr->stripPointerCasts(); 718 PointerType *NewPtrTy = cast<PointerType>(Ptr->getType()); 719 720 // Preserve the address space number of the pointer. 721 if (NewPtrTy->getAddressSpace() != OldPtrTy->getAddressSpace()) { 722 NewPtrTy = NewPtrTy->getElementType()->getPointerTo( 723 OldPtrTy->getAddressSpace()); 724 Ptr = ConstantExpr::getPointerCast(Ptr, NewPtrTy); 725 } 726 return Ptr; 727 } 728 729 /// If we can symbolically evaluate the GEP constant expression, do so. 730 static Constant *SymbolicallyEvaluateGEP(Type *SrcTy, ArrayRef<Constant *> Ops, 731 Type *ResultTy, const DataLayout &DL, 732 const TargetLibraryInfo *TLI) { 733 Constant *Ptr = Ops[0]; 734 if (!Ptr->getType()->getPointerElementType()->isSized() || 735 !Ptr->getType()->isPointerTy()) 736 return nullptr; 737 738 Type *IntPtrTy = DL.getIntPtrType(Ptr->getType()); 739 Type *ResultElementTy = ResultTy->getPointerElementType(); 740 741 // If this is a constant expr gep that is effectively computing an 742 // "offsetof", fold it into 'cast int Size to T*' instead of 'gep 0, 0, 12' 743 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 744 if (!isa<ConstantInt>(Ops[i])) { 745 746 // If this is "gep i8* Ptr, (sub 0, V)", fold this as: 747 // "inttoptr (sub (ptrtoint Ptr), V)" 748 if (Ops.size() == 2 && ResultElementTy->isIntegerTy(8)) { 749 ConstantExpr *CE = dyn_cast<ConstantExpr>(Ops[1]); 750 assert((!CE || CE->getType() == IntPtrTy) && 751 "CastGEPIndices didn't canonicalize index types!"); 752 if (CE && CE->getOpcode() == Instruction::Sub && 753 CE->getOperand(0)->isNullValue()) { 754 Constant *Res = ConstantExpr::getPtrToInt(Ptr, CE->getType()); 755 Res = ConstantExpr::getSub(Res, CE->getOperand(1)); 756 Res = ConstantExpr::getIntToPtr(Res, ResultTy); 757 if (ConstantExpr *ResCE = dyn_cast<ConstantExpr>(Res)) 758 Res = ConstantFoldConstantExpression(ResCE, DL, TLI); 759 return Res; 760 } 761 } 762 return nullptr; 763 } 764 765 unsigned BitWidth = DL.getTypeSizeInBits(IntPtrTy); 766 APInt Offset = 767 APInt(BitWidth, 768 DL.getIndexedOffset( 769 Ptr->getType(), 770 makeArrayRef((Value * const *)Ops.data() + 1, Ops.size() - 1))); 771 Ptr = StripPtrCastKeepAS(Ptr); 772 773 // If this is a GEP of a GEP, fold it all into a single GEP. 774 while (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) { 775 SmallVector<Value *, 4> NestedOps(GEP->op_begin() + 1, GEP->op_end()); 776 777 // Do not try the incorporate the sub-GEP if some index is not a number. 778 bool AllConstantInt = true; 779 for (unsigned i = 0, e = NestedOps.size(); i != e; ++i) 780 if (!isa<ConstantInt>(NestedOps[i])) { 781 AllConstantInt = false; 782 break; 783 } 784 if (!AllConstantInt) 785 break; 786 787 Ptr = cast<Constant>(GEP->getOperand(0)); 788 Offset += APInt(BitWidth, DL.getIndexedOffset(Ptr->getType(), NestedOps)); 789 Ptr = StripPtrCastKeepAS(Ptr); 790 } 791 792 // If the base value for this address is a literal integer value, fold the 793 // getelementptr to the resulting integer value casted to the pointer type. 794 APInt BasePtr(BitWidth, 0); 795 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) { 796 if (CE->getOpcode() == Instruction::IntToPtr) { 797 if (ConstantInt *Base = dyn_cast<ConstantInt>(CE->getOperand(0))) 798 BasePtr = Base->getValue().zextOrTrunc(BitWidth); 799 } 800 } 801 802 if (Ptr->isNullValue() || BasePtr != 0) { 803 Constant *C = ConstantInt::get(Ptr->getContext(), Offset + BasePtr); 804 return ConstantExpr::getIntToPtr(C, ResultTy); 805 } 806 807 // Otherwise form a regular getelementptr. Recompute the indices so that 808 // we eliminate over-indexing of the notional static type array bounds. 809 // This makes it easy to determine if the getelementptr is "inbounds". 810 // Also, this helps GlobalOpt do SROA on GlobalVariables. 811 Type *Ty = Ptr->getType(); 812 assert(Ty->isPointerTy() && "Forming regular GEP of non-pointer type"); 813 SmallVector<Constant *, 32> NewIdxs; 814 815 do { 816 if (SequentialType *ATy = dyn_cast<SequentialType>(Ty)) { 817 if (ATy->isPointerTy()) { 818 // The only pointer indexing we'll do is on the first index of the GEP. 819 if (!NewIdxs.empty()) 820 break; 821 822 // Only handle pointers to sized types, not pointers to functions. 823 if (!ATy->getElementType()->isSized()) 824 return nullptr; 825 } 826 827 // Determine which element of the array the offset points into. 828 APInt ElemSize(BitWidth, DL.getTypeAllocSize(ATy->getElementType())); 829 if (ElemSize == 0) 830 // The element size is 0. This may be [0 x Ty]*, so just use a zero 831 // index for this level and proceed to the next level to see if it can 832 // accommodate the offset. 833 NewIdxs.push_back(ConstantInt::get(IntPtrTy, 0)); 834 else { 835 // The element size is non-zero divide the offset by the element 836 // size (rounding down), to compute the index at this level. 837 APInt NewIdx = Offset.udiv(ElemSize); 838 Offset -= NewIdx * ElemSize; 839 NewIdxs.push_back(ConstantInt::get(IntPtrTy, NewIdx)); 840 } 841 Ty = ATy->getElementType(); 842 } else if (StructType *STy = dyn_cast<StructType>(Ty)) { 843 // If we end up with an offset that isn't valid for this struct type, we 844 // can't re-form this GEP in a regular form, so bail out. The pointer 845 // operand likely went through casts that are necessary to make the GEP 846 // sensible. 847 const StructLayout &SL = *DL.getStructLayout(STy); 848 if (Offset.uge(SL.getSizeInBytes())) 849 break; 850 851 // Determine which field of the struct the offset points into. The 852 // getZExtValue is fine as we've already ensured that the offset is 853 // within the range representable by the StructLayout API. 854 unsigned ElIdx = SL.getElementContainingOffset(Offset.getZExtValue()); 855 NewIdxs.push_back(ConstantInt::get(Type::getInt32Ty(Ty->getContext()), 856 ElIdx)); 857 Offset -= APInt(BitWidth, SL.getElementOffset(ElIdx)); 858 Ty = STy->getTypeAtIndex(ElIdx); 859 } else { 860 // We've reached some non-indexable type. 861 break; 862 } 863 } while (Ty != ResultElementTy); 864 865 // If we haven't used up the entire offset by descending the static 866 // type, then the offset is pointing into the middle of an indivisible 867 // member, so we can't simplify it. 868 if (Offset != 0) 869 return nullptr; 870 871 // Create a GEP. 872 Constant *C = ConstantExpr::getGetElementPtr(SrcTy, Ptr, NewIdxs); 873 assert(C->getType()->getPointerElementType() == Ty && 874 "Computed GetElementPtr has unexpected type!"); 875 876 // If we ended up indexing a member with a type that doesn't match 877 // the type of what the original indices indexed, add a cast. 878 if (Ty != ResultElementTy) 879 C = FoldBitCast(C, ResultTy, DL); 880 881 return C; 882 } 883 884 885 886 //===----------------------------------------------------------------------===// 887 // Constant Folding public APIs 888 //===----------------------------------------------------------------------===// 889 890 /// Try to constant fold the specified instruction. 891 /// If successful, the constant result is returned, if not, null is returned. 892 /// Note that this fails if not all of the operands are constant. Otherwise, 893 /// this function can only fail when attempting to fold instructions like loads 894 /// and stores, which have no constant expression form. 895 Constant *llvm::ConstantFoldInstruction(Instruction *I, const DataLayout &DL, 896 const TargetLibraryInfo *TLI) { 897 // Handle PHI nodes quickly here... 898 if (PHINode *PN = dyn_cast<PHINode>(I)) { 899 Constant *CommonValue = nullptr; 900 901 for (Value *Incoming : PN->incoming_values()) { 902 // If the incoming value is undef then skip it. Note that while we could 903 // skip the value if it is equal to the phi node itself we choose not to 904 // because that would break the rule that constant folding only applies if 905 // all operands are constants. 906 if (isa<UndefValue>(Incoming)) 907 continue; 908 // If the incoming value is not a constant, then give up. 909 Constant *C = dyn_cast<Constant>(Incoming); 910 if (!C) 911 return nullptr; 912 // Fold the PHI's operands. 913 if (ConstantExpr *NewC = dyn_cast<ConstantExpr>(C)) 914 C = ConstantFoldConstantExpression(NewC, DL, TLI); 915 // If the incoming value is a different constant to 916 // the one we saw previously, then give up. 917 if (CommonValue && C != CommonValue) 918 return nullptr; 919 CommonValue = C; 920 } 921 922 923 // If we reach here, all incoming values are the same constant or undef. 924 return CommonValue ? CommonValue : UndefValue::get(PN->getType()); 925 } 926 927 // Scan the operand list, checking to see if they are all constants, if so, 928 // hand off to ConstantFoldInstOperands. 929 SmallVector<Constant*, 8> Ops; 930 for (User::op_iterator i = I->op_begin(), e = I->op_end(); i != e; ++i) { 931 Constant *Op = dyn_cast<Constant>(*i); 932 if (!Op) 933 return nullptr; // All operands not constant! 934 935 // Fold the Instruction's operands. 936 if (ConstantExpr *NewCE = dyn_cast<ConstantExpr>(Op)) 937 Op = ConstantFoldConstantExpression(NewCE, DL, TLI); 938 939 Ops.push_back(Op); 940 } 941 942 if (const CmpInst *CI = dyn_cast<CmpInst>(I)) 943 return ConstantFoldCompareInstOperands(CI->getPredicate(), Ops[0], Ops[1], 944 DL, TLI); 945 946 if (const LoadInst *LI = dyn_cast<LoadInst>(I)) 947 return ConstantFoldLoadInst(LI, DL); 948 949 if (InsertValueInst *IVI = dyn_cast<InsertValueInst>(I)) { 950 return ConstantExpr::getInsertValue( 951 cast<Constant>(IVI->getAggregateOperand()), 952 cast<Constant>(IVI->getInsertedValueOperand()), 953 IVI->getIndices()); 954 } 955 956 if (ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(I)) { 957 return ConstantExpr::getExtractValue( 958 cast<Constant>(EVI->getAggregateOperand()), 959 EVI->getIndices()); 960 } 961 962 return ConstantFoldInstOperands(I->getOpcode(), I->getType(), Ops, DL, TLI); 963 } 964 965 static Constant * 966 ConstantFoldConstantExpressionImpl(const ConstantExpr *CE, const DataLayout &DL, 967 const TargetLibraryInfo *TLI, 968 SmallPtrSetImpl<ConstantExpr *> &FoldedOps) { 969 SmallVector<Constant *, 8> Ops; 970 for (User::const_op_iterator i = CE->op_begin(), e = CE->op_end(); i != e; 971 ++i) { 972 Constant *NewC = cast<Constant>(*i); 973 // Recursively fold the ConstantExpr's operands. If we have already folded 974 // a ConstantExpr, we don't have to process it again. 975 if (ConstantExpr *NewCE = dyn_cast<ConstantExpr>(NewC)) { 976 if (FoldedOps.insert(NewCE).second) 977 NewC = ConstantFoldConstantExpressionImpl(NewCE, DL, TLI, FoldedOps); 978 } 979 Ops.push_back(NewC); 980 } 981 982 if (CE->isCompare()) 983 return ConstantFoldCompareInstOperands(CE->getPredicate(), Ops[0], Ops[1], 984 DL, TLI); 985 return ConstantFoldInstOperands(CE->getOpcode(), CE->getType(), Ops, DL, TLI); 986 } 987 988 /// Attempt to fold the constant expression 989 /// using the specified DataLayout. If successful, the constant result is 990 /// result is returned, if not, null is returned. 991 Constant *llvm::ConstantFoldConstantExpression(const ConstantExpr *CE, 992 const DataLayout &DL, 993 const TargetLibraryInfo *TLI) { 994 SmallPtrSet<ConstantExpr *, 4> FoldedOps; 995 return ConstantFoldConstantExpressionImpl(CE, DL, TLI, FoldedOps); 996 } 997 998 /// Attempt to constant fold an instruction with the 999 /// specified opcode and operands. If successful, the constant result is 1000 /// returned, if not, null is returned. Note that this function can fail when 1001 /// attempting to fold instructions like loads and stores, which have no 1002 /// constant expression form. 1003 /// 1004 /// TODO: This function neither utilizes nor preserves nsw/nuw/inbounds/etc 1005 /// information, due to only being passed an opcode and operands. Constant 1006 /// folding using this function strips this information. 1007 /// 1008 Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy, 1009 ArrayRef<Constant *> Ops, 1010 const DataLayout &DL, 1011 const TargetLibraryInfo *TLI) { 1012 // Handle easy binops first. 1013 if (Instruction::isBinaryOp(Opcode)) { 1014 if (isa<ConstantExpr>(Ops[0]) || isa<ConstantExpr>(Ops[1])) { 1015 if (Constant *C = SymbolicallyEvaluateBinop(Opcode, Ops[0], Ops[1], DL)) 1016 return C; 1017 } 1018 1019 return ConstantExpr::get(Opcode, Ops[0], Ops[1]); 1020 } 1021 1022 switch (Opcode) { 1023 default: return nullptr; 1024 case Instruction::ICmp: 1025 case Instruction::FCmp: llvm_unreachable("Invalid for compares"); 1026 case Instruction::Call: 1027 if (Function *F = dyn_cast<Function>(Ops.back())) 1028 if (canConstantFoldCallTo(F)) 1029 return ConstantFoldCall(F, Ops.slice(0, Ops.size() - 1), TLI); 1030 return nullptr; 1031 case Instruction::PtrToInt: 1032 // If the input is a inttoptr, eliminate the pair. This requires knowing 1033 // the width of a pointer, so it can't be done in ConstantExpr::getCast. 1034 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ops[0])) { 1035 if (CE->getOpcode() == Instruction::IntToPtr) { 1036 Constant *Input = CE->getOperand(0); 1037 unsigned InWidth = Input->getType()->getScalarSizeInBits(); 1038 unsigned PtrWidth = DL.getPointerTypeSizeInBits(CE->getType()); 1039 if (PtrWidth < InWidth) { 1040 Constant *Mask = 1041 ConstantInt::get(CE->getContext(), 1042 APInt::getLowBitsSet(InWidth, PtrWidth)); 1043 Input = ConstantExpr::getAnd(Input, Mask); 1044 } 1045 // Do a zext or trunc to get to the dest size. 1046 return ConstantExpr::getIntegerCast(Input, DestTy, false); 1047 } 1048 } 1049 return ConstantExpr::getCast(Opcode, Ops[0], DestTy); 1050 case Instruction::IntToPtr: 1051 // If the input is a ptrtoint, turn the pair into a ptr to ptr bitcast if 1052 // the int size is >= the ptr size and the address spaces are the same. 1053 // This requires knowing the width of a pointer, so it can't be done in 1054 // ConstantExpr::getCast. 1055 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ops[0])) { 1056 if (CE->getOpcode() == Instruction::PtrToInt) { 1057 Constant *SrcPtr = CE->getOperand(0); 1058 unsigned SrcPtrSize = DL.getPointerTypeSizeInBits(SrcPtr->getType()); 1059 unsigned MidIntSize = CE->getType()->getScalarSizeInBits(); 1060 1061 if (MidIntSize >= SrcPtrSize) { 1062 unsigned SrcAS = SrcPtr->getType()->getPointerAddressSpace(); 1063 if (SrcAS == DestTy->getPointerAddressSpace()) 1064 return FoldBitCast(CE->getOperand(0), DestTy, DL); 1065 } 1066 } 1067 } 1068 1069 return ConstantExpr::getCast(Opcode, Ops[0], DestTy); 1070 case Instruction::Trunc: 1071 case Instruction::ZExt: 1072 case Instruction::SExt: 1073 case Instruction::FPTrunc: 1074 case Instruction::FPExt: 1075 case Instruction::UIToFP: 1076 case Instruction::SIToFP: 1077 case Instruction::FPToUI: 1078 case Instruction::FPToSI: 1079 case Instruction::AddrSpaceCast: 1080 return ConstantExpr::getCast(Opcode, Ops[0], DestTy); 1081 case Instruction::BitCast: 1082 return FoldBitCast(Ops[0], DestTy, DL); 1083 case Instruction::Select: 1084 return ConstantExpr::getSelect(Ops[0], Ops[1], Ops[2]); 1085 case Instruction::ExtractElement: 1086 return ConstantExpr::getExtractElement(Ops[0], Ops[1]); 1087 case Instruction::InsertElement: 1088 return ConstantExpr::getInsertElement(Ops[0], Ops[1], Ops[2]); 1089 case Instruction::ShuffleVector: 1090 return ConstantExpr::getShuffleVector(Ops[0], Ops[1], Ops[2]); 1091 case Instruction::GetElementPtr: { 1092 Type *SrcTy = nullptr; 1093 if (Constant *C = CastGEPIndices(SrcTy, Ops, DestTy, DL, TLI)) 1094 return C; 1095 if (Constant *C = SymbolicallyEvaluateGEP(SrcTy, Ops, DestTy, DL, TLI)) 1096 return C; 1097 1098 return ConstantExpr::getGetElementPtr(SrcTy, Ops[0], Ops.slice(1)); 1099 } 1100 } 1101 } 1102 1103 /// Attempt to constant fold a compare 1104 /// instruction (icmp/fcmp) with the specified operands. If it fails, it 1105 /// returns a constant expression of the specified operands. 1106 Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate, 1107 Constant *Ops0, Constant *Ops1, 1108 const DataLayout &DL, 1109 const TargetLibraryInfo *TLI) { 1110 // fold: icmp (inttoptr x), null -> icmp x, 0 1111 // fold: icmp (ptrtoint x), 0 -> icmp x, null 1112 // fold: icmp (inttoptr x), (inttoptr y) -> icmp trunc/zext x, trunc/zext y 1113 // fold: icmp (ptrtoint x), (ptrtoint y) -> icmp x, y 1114 // 1115 // FIXME: The following comment is out of data and the DataLayout is here now. 1116 // ConstantExpr::getCompare cannot do this, because it doesn't have DL 1117 // around to know if bit truncation is happening. 1118 if (ConstantExpr *CE0 = dyn_cast<ConstantExpr>(Ops0)) { 1119 if (Ops1->isNullValue()) { 1120 if (CE0->getOpcode() == Instruction::IntToPtr) { 1121 Type *IntPtrTy = DL.getIntPtrType(CE0->getType()); 1122 // Convert the integer value to the right size to ensure we get the 1123 // proper extension or truncation. 1124 Constant *C = ConstantExpr::getIntegerCast(CE0->getOperand(0), 1125 IntPtrTy, false); 1126 Constant *Null = Constant::getNullValue(C->getType()); 1127 return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI); 1128 } 1129 1130 // Only do this transformation if the int is intptrty in size, otherwise 1131 // there is a truncation or extension that we aren't modeling. 1132 if (CE0->getOpcode() == Instruction::PtrToInt) { 1133 Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType()); 1134 if (CE0->getType() == IntPtrTy) { 1135 Constant *C = CE0->getOperand(0); 1136 Constant *Null = Constant::getNullValue(C->getType()); 1137 return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI); 1138 } 1139 } 1140 } 1141 1142 if (ConstantExpr *CE1 = dyn_cast<ConstantExpr>(Ops1)) { 1143 if (CE0->getOpcode() == CE1->getOpcode()) { 1144 if (CE0->getOpcode() == Instruction::IntToPtr) { 1145 Type *IntPtrTy = DL.getIntPtrType(CE0->getType()); 1146 1147 // Convert the integer value to the right size to ensure we get the 1148 // proper extension or truncation. 1149 Constant *C0 = ConstantExpr::getIntegerCast(CE0->getOperand(0), 1150 IntPtrTy, false); 1151 Constant *C1 = ConstantExpr::getIntegerCast(CE1->getOperand(0), 1152 IntPtrTy, false); 1153 return ConstantFoldCompareInstOperands(Predicate, C0, C1, DL, TLI); 1154 } 1155 1156 // Only do this transformation if the int is intptrty in size, otherwise 1157 // there is a truncation or extension that we aren't modeling. 1158 if (CE0->getOpcode() == Instruction::PtrToInt) { 1159 Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType()); 1160 if (CE0->getType() == IntPtrTy && 1161 CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType()) { 1162 return ConstantFoldCompareInstOperands( 1163 Predicate, CE0->getOperand(0), CE1->getOperand(0), DL, TLI); 1164 } 1165 } 1166 } 1167 } 1168 1169 // icmp eq (or x, y), 0 -> (icmp eq x, 0) & (icmp eq y, 0) 1170 // icmp ne (or x, y), 0 -> (icmp ne x, 0) | (icmp ne y, 0) 1171 if ((Predicate == ICmpInst::ICMP_EQ || Predicate == ICmpInst::ICMP_NE) && 1172 CE0->getOpcode() == Instruction::Or && Ops1->isNullValue()) { 1173 Constant *LHS = ConstantFoldCompareInstOperands( 1174 Predicate, CE0->getOperand(0), Ops1, DL, TLI); 1175 Constant *RHS = ConstantFoldCompareInstOperands( 1176 Predicate, CE0->getOperand(1), Ops1, DL, TLI); 1177 unsigned OpC = 1178 Predicate == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or; 1179 Constant *Ops[] = { LHS, RHS }; 1180 return ConstantFoldInstOperands(OpC, LHS->getType(), Ops, DL, TLI); 1181 } 1182 } 1183 1184 return ConstantExpr::getCompare(Predicate, Ops0, Ops1); 1185 } 1186 1187 1188 /// Given a constant and a getelementptr constantexpr, return the constant value 1189 /// being addressed by the constant expression, or null if something is funny 1190 /// and we can't decide. 1191 Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C, 1192 ConstantExpr *CE) { 1193 if (!CE->getOperand(1)->isNullValue()) 1194 return nullptr; // Do not allow stepping over the value! 1195 1196 // Loop over all of the operands, tracking down which value we are 1197 // addressing. 1198 for (unsigned i = 2, e = CE->getNumOperands(); i != e; ++i) { 1199 C = C->getAggregateElement(CE->getOperand(i)); 1200 if (!C) 1201 return nullptr; 1202 } 1203 return C; 1204 } 1205 1206 /// Given a constant and getelementptr indices (with an *implied* zero pointer 1207 /// index that is not in the list), return the constant value being addressed by 1208 /// a virtual load, or null if something is funny and we can't decide. 1209 Constant *llvm::ConstantFoldLoadThroughGEPIndices(Constant *C, 1210 ArrayRef<Constant*> Indices) { 1211 // Loop over all of the operands, tracking down which value we are 1212 // addressing. 1213 for (unsigned i = 0, e = Indices.size(); i != e; ++i) { 1214 C = C->getAggregateElement(Indices[i]); 1215 if (!C) 1216 return nullptr; 1217 } 1218 return C; 1219 } 1220 1221 1222 //===----------------------------------------------------------------------===// 1223 // Constant Folding for Calls 1224 // 1225 1226 /// Return true if it's even possible to fold a call to the specified function. 1227 bool llvm::canConstantFoldCallTo(const Function *F) { 1228 switch (F->getIntrinsicID()) { 1229 case Intrinsic::fabs: 1230 case Intrinsic::minnum: 1231 case Intrinsic::maxnum: 1232 case Intrinsic::log: 1233 case Intrinsic::log2: 1234 case Intrinsic::log10: 1235 case Intrinsic::exp: 1236 case Intrinsic::exp2: 1237 case Intrinsic::floor: 1238 case Intrinsic::ceil: 1239 case Intrinsic::sqrt: 1240 case Intrinsic::sin: 1241 case Intrinsic::cos: 1242 case Intrinsic::trunc: 1243 case Intrinsic::rint: 1244 case Intrinsic::nearbyint: 1245 case Intrinsic::pow: 1246 case Intrinsic::powi: 1247 case Intrinsic::bswap: 1248 case Intrinsic::ctpop: 1249 case Intrinsic::ctlz: 1250 case Intrinsic::cttz: 1251 case Intrinsic::fma: 1252 case Intrinsic::fmuladd: 1253 case Intrinsic::copysign: 1254 case Intrinsic::round: 1255 case Intrinsic::sadd_with_overflow: 1256 case Intrinsic::uadd_with_overflow: 1257 case Intrinsic::ssub_with_overflow: 1258 case Intrinsic::usub_with_overflow: 1259 case Intrinsic::smul_with_overflow: 1260 case Intrinsic::umul_with_overflow: 1261 case Intrinsic::convert_from_fp16: 1262 case Intrinsic::convert_to_fp16: 1263 case Intrinsic::x86_sse_cvtss2si: 1264 case Intrinsic::x86_sse_cvtss2si64: 1265 case Intrinsic::x86_sse_cvttss2si: 1266 case Intrinsic::x86_sse_cvttss2si64: 1267 case Intrinsic::x86_sse2_cvtsd2si: 1268 case Intrinsic::x86_sse2_cvtsd2si64: 1269 case Intrinsic::x86_sse2_cvttsd2si: 1270 case Intrinsic::x86_sse2_cvttsd2si64: 1271 return true; 1272 default: 1273 return false; 1274 case 0: break; 1275 } 1276 1277 if (!F->hasName()) 1278 return false; 1279 StringRef Name = F->getName(); 1280 1281 // In these cases, the check of the length is required. We don't want to 1282 // return true for a name like "cos\0blah" which strcmp would return equal to 1283 // "cos", but has length 8. 1284 switch (Name[0]) { 1285 default: 1286 return false; 1287 case 'a': 1288 return Name == "acos" || Name == "asin" || Name == "atan" || 1289 Name == "atan2" || Name == "acosf" || Name == "asinf" || 1290 Name == "atanf" || Name == "atan2f"; 1291 case 'c': 1292 return Name == "ceil" || Name == "cos" || Name == "cosh" || 1293 Name == "ceilf" || Name == "cosf" || Name == "coshf"; 1294 case 'e': 1295 return Name == "exp" || Name == "exp2" || Name == "expf" || Name == "exp2f"; 1296 case 'f': 1297 return Name == "fabs" || Name == "floor" || Name == "fmod" || 1298 Name == "fabsf" || Name == "floorf" || Name == "fmodf"; 1299 case 'l': 1300 return Name == "log" || Name == "log10" || Name == "logf" || 1301 Name == "log10f"; 1302 case 'p': 1303 return Name == "pow" || Name == "powf"; 1304 case 's': 1305 return Name == "sin" || Name == "sinh" || Name == "sqrt" || 1306 Name == "sinf" || Name == "sinhf" || Name == "sqrtf"; 1307 case 't': 1308 return Name == "tan" || Name == "tanh" || Name == "tanf" || Name == "tanhf"; 1309 } 1310 } 1311 1312 static Constant *GetConstantFoldFPValue(double V, Type *Ty) { 1313 if (Ty->isHalfTy()) { 1314 APFloat APF(V); 1315 bool unused; 1316 APF.convert(APFloat::IEEEhalf, APFloat::rmNearestTiesToEven, &unused); 1317 return ConstantFP::get(Ty->getContext(), APF); 1318 } 1319 if (Ty->isFloatTy()) 1320 return ConstantFP::get(Ty->getContext(), APFloat((float)V)); 1321 if (Ty->isDoubleTy()) 1322 return ConstantFP::get(Ty->getContext(), APFloat(V)); 1323 llvm_unreachable("Can only constant fold half/float/double"); 1324 1325 } 1326 1327 namespace { 1328 /// Clear the floating-point exception state. 1329 static inline void llvm_fenv_clearexcept() { 1330 #if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT 1331 feclearexcept(FE_ALL_EXCEPT); 1332 #endif 1333 errno = 0; 1334 } 1335 1336 /// Test if a floating-point exception was raised. 1337 static inline bool llvm_fenv_testexcept() { 1338 int errno_val = errno; 1339 if (errno_val == ERANGE || errno_val == EDOM) 1340 return true; 1341 #if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT && HAVE_DECL_FE_INEXACT 1342 if (fetestexcept(FE_ALL_EXCEPT & ~FE_INEXACT)) 1343 return true; 1344 #endif 1345 return false; 1346 } 1347 } // End namespace 1348 1349 static Constant *ConstantFoldFP(double (*NativeFP)(double), double V, 1350 Type *Ty) { 1351 llvm_fenv_clearexcept(); 1352 V = NativeFP(V); 1353 if (llvm_fenv_testexcept()) { 1354 llvm_fenv_clearexcept(); 1355 return nullptr; 1356 } 1357 1358 return GetConstantFoldFPValue(V, Ty); 1359 } 1360 1361 static Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double), 1362 double V, double W, Type *Ty) { 1363 llvm_fenv_clearexcept(); 1364 V = NativeFP(V, W); 1365 if (llvm_fenv_testexcept()) { 1366 llvm_fenv_clearexcept(); 1367 return nullptr; 1368 } 1369 1370 return GetConstantFoldFPValue(V, Ty); 1371 } 1372 1373 /// Attempt to fold an SSE floating point to integer conversion of a constant 1374 /// floating point. If roundTowardZero is false, the default IEEE rounding is 1375 /// used (toward nearest, ties to even). This matches the behavior of the 1376 /// non-truncating SSE instructions in the default rounding mode. The desired 1377 /// integer type Ty is used to select how many bits are available for the 1378 /// result. Returns null if the conversion cannot be performed, otherwise 1379 /// returns the Constant value resulting from the conversion. 1380 static Constant *ConstantFoldConvertToInt(const APFloat &Val, 1381 bool roundTowardZero, Type *Ty) { 1382 // All of these conversion intrinsics form an integer of at most 64bits. 1383 unsigned ResultWidth = Ty->getIntegerBitWidth(); 1384 assert(ResultWidth <= 64 && 1385 "Can only constant fold conversions to 64 and 32 bit ints"); 1386 1387 uint64_t UIntVal; 1388 bool isExact = false; 1389 APFloat::roundingMode mode = roundTowardZero? APFloat::rmTowardZero 1390 : APFloat::rmNearestTiesToEven; 1391 APFloat::opStatus status = Val.convertToInteger(&UIntVal, ResultWidth, 1392 /*isSigned=*/true, mode, 1393 &isExact); 1394 if (status != APFloat::opOK && status != APFloat::opInexact) 1395 return nullptr; 1396 return ConstantInt::get(Ty, UIntVal, /*isSigned=*/true); 1397 } 1398 1399 static double getValueAsDouble(ConstantFP *Op) { 1400 Type *Ty = Op->getType(); 1401 1402 if (Ty->isFloatTy()) 1403 return Op->getValueAPF().convertToFloat(); 1404 1405 if (Ty->isDoubleTy()) 1406 return Op->getValueAPF().convertToDouble(); 1407 1408 bool unused; 1409 APFloat APF = Op->getValueAPF(); 1410 APF.convert(APFloat::IEEEdouble, APFloat::rmNearestTiesToEven, &unused); 1411 return APF.convertToDouble(); 1412 } 1413 1414 static Constant *ConstantFoldScalarCall(StringRef Name, unsigned IntrinsicID, 1415 Type *Ty, ArrayRef<Constant *> Operands, 1416 const TargetLibraryInfo *TLI) { 1417 if (Operands.size() == 1) { 1418 if (ConstantFP *Op = dyn_cast<ConstantFP>(Operands[0])) { 1419 if (IntrinsicID == Intrinsic::convert_to_fp16) { 1420 APFloat Val(Op->getValueAPF()); 1421 1422 bool lost = false; 1423 Val.convert(APFloat::IEEEhalf, APFloat::rmNearestTiesToEven, &lost); 1424 1425 return ConstantInt::get(Ty->getContext(), Val.bitcastToAPInt()); 1426 } 1427 1428 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy()) 1429 return nullptr; 1430 1431 if (IntrinsicID == Intrinsic::round) { 1432 APFloat V = Op->getValueAPF(); 1433 V.roundToIntegral(APFloat::rmNearestTiesToAway); 1434 return ConstantFP::get(Ty->getContext(), V); 1435 } 1436 1437 if (IntrinsicID == Intrinsic::floor) { 1438 APFloat V = Op->getValueAPF(); 1439 V.roundToIntegral(APFloat::rmTowardNegative); 1440 return ConstantFP::get(Ty->getContext(), V); 1441 } 1442 1443 if (IntrinsicID == Intrinsic::ceil) { 1444 APFloat V = Op->getValueAPF(); 1445 V.roundToIntegral(APFloat::rmTowardPositive); 1446 return ConstantFP::get(Ty->getContext(), V); 1447 } 1448 1449 if (IntrinsicID == Intrinsic::trunc) { 1450 APFloat V = Op->getValueAPF(); 1451 V.roundToIntegral(APFloat::rmTowardZero); 1452 return ConstantFP::get(Ty->getContext(), V); 1453 } 1454 1455 if (IntrinsicID == Intrinsic::rint) { 1456 APFloat V = Op->getValueAPF(); 1457 V.roundToIntegral(APFloat::rmNearestTiesToEven); 1458 return ConstantFP::get(Ty->getContext(), V); 1459 } 1460 1461 if (IntrinsicID == Intrinsic::nearbyint) { 1462 APFloat V = Op->getValueAPF(); 1463 V.roundToIntegral(APFloat::rmNearestTiesToEven); 1464 return ConstantFP::get(Ty->getContext(), V); 1465 } 1466 1467 /// We only fold functions with finite arguments. Folding NaN and inf is 1468 /// likely to be aborted with an exception anyway, and some host libms 1469 /// have known errors raising exceptions. 1470 if (Op->getValueAPF().isNaN() || Op->getValueAPF().isInfinity()) 1471 return nullptr; 1472 1473 /// Currently APFloat versions of these functions do not exist, so we use 1474 /// the host native double versions. Float versions are not called 1475 /// directly but for all these it is true (float)(f((double)arg)) == 1476 /// f(arg). Long double not supported yet. 1477 double V = getValueAsDouble(Op); 1478 1479 switch (IntrinsicID) { 1480 default: break; 1481 case Intrinsic::fabs: 1482 return ConstantFoldFP(fabs, V, Ty); 1483 case Intrinsic::log2: 1484 return ConstantFoldFP(Log2, V, Ty); 1485 case Intrinsic::log: 1486 return ConstantFoldFP(log, V, Ty); 1487 case Intrinsic::log10: 1488 return ConstantFoldFP(log10, V, Ty); 1489 case Intrinsic::exp: 1490 return ConstantFoldFP(exp, V, Ty); 1491 case Intrinsic::exp2: 1492 return ConstantFoldFP(exp2, V, Ty); 1493 case Intrinsic::sin: 1494 return ConstantFoldFP(sin, V, Ty); 1495 case Intrinsic::cos: 1496 return ConstantFoldFP(cos, V, Ty); 1497 } 1498 1499 if (!TLI) 1500 return nullptr; 1501 1502 switch (Name[0]) { 1503 case 'a': 1504 if ((Name == "acos" && TLI->has(LibFunc::acos)) || 1505 (Name == "acosf" && TLI->has(LibFunc::acosf))) 1506 return ConstantFoldFP(acos, V, Ty); 1507 else if ((Name == "asin" && TLI->has(LibFunc::asin)) || 1508 (Name == "asinf" && TLI->has(LibFunc::asinf))) 1509 return ConstantFoldFP(asin, V, Ty); 1510 else if ((Name == "atan" && TLI->has(LibFunc::atan)) || 1511 (Name == "atanf" && TLI->has(LibFunc::atanf))) 1512 return ConstantFoldFP(atan, V, Ty); 1513 break; 1514 case 'c': 1515 if ((Name == "ceil" && TLI->has(LibFunc::ceil)) || 1516 (Name == "ceilf" && TLI->has(LibFunc::ceilf))) 1517 return ConstantFoldFP(ceil, V, Ty); 1518 else if ((Name == "cos" && TLI->has(LibFunc::cos)) || 1519 (Name == "cosf" && TLI->has(LibFunc::cosf))) 1520 return ConstantFoldFP(cos, V, Ty); 1521 else if ((Name == "cosh" && TLI->has(LibFunc::cosh)) || 1522 (Name == "coshf" && TLI->has(LibFunc::coshf))) 1523 return ConstantFoldFP(cosh, V, Ty); 1524 break; 1525 case 'e': 1526 if ((Name == "exp" && TLI->has(LibFunc::exp)) || 1527 (Name == "expf" && TLI->has(LibFunc::expf))) 1528 return ConstantFoldFP(exp, V, Ty); 1529 if ((Name == "exp2" && TLI->has(LibFunc::exp2)) || 1530 (Name == "exp2f" && TLI->has(LibFunc::exp2f))) 1531 // Constant fold exp2(x) as pow(2,x) in case the host doesn't have a 1532 // C99 library. 1533 return ConstantFoldBinaryFP(pow, 2.0, V, Ty); 1534 break; 1535 case 'f': 1536 if ((Name == "fabs" && TLI->has(LibFunc::fabs)) || 1537 (Name == "fabsf" && TLI->has(LibFunc::fabsf))) 1538 return ConstantFoldFP(fabs, V, Ty); 1539 else if ((Name == "floor" && TLI->has(LibFunc::floor)) || 1540 (Name == "floorf" && TLI->has(LibFunc::floorf))) 1541 return ConstantFoldFP(floor, V, Ty); 1542 break; 1543 case 'l': 1544 if ((Name == "log" && V > 0 && TLI->has(LibFunc::log)) || 1545 (Name == "logf" && V > 0 && TLI->has(LibFunc::logf))) 1546 return ConstantFoldFP(log, V, Ty); 1547 else if ((Name == "log10" && V > 0 && TLI->has(LibFunc::log10)) || 1548 (Name == "log10f" && V > 0 && TLI->has(LibFunc::log10f))) 1549 return ConstantFoldFP(log10, V, Ty); 1550 else if (IntrinsicID == Intrinsic::sqrt && 1551 (Ty->isHalfTy() || Ty->isFloatTy() || Ty->isDoubleTy())) { 1552 if (V >= -0.0) 1553 return ConstantFoldFP(sqrt, V, Ty); 1554 else { 1555 // Unlike the sqrt definitions in C/C++, POSIX, and IEEE-754 - which 1556 // all guarantee or favor returning NaN - the square root of a 1557 // negative number is not defined for the LLVM sqrt intrinsic. 1558 // This is because the intrinsic should only be emitted in place of 1559 // libm's sqrt function when using "no-nans-fp-math". 1560 return UndefValue::get(Ty); 1561 } 1562 } 1563 break; 1564 case 's': 1565 if ((Name == "sin" && TLI->has(LibFunc::sin)) || 1566 (Name == "sinf" && TLI->has(LibFunc::sinf))) 1567 return ConstantFoldFP(sin, V, Ty); 1568 else if ((Name == "sinh" && TLI->has(LibFunc::sinh)) || 1569 (Name == "sinhf" && TLI->has(LibFunc::sinhf))) 1570 return ConstantFoldFP(sinh, V, Ty); 1571 else if ((Name == "sqrt" && V >= 0 && TLI->has(LibFunc::sqrt)) || 1572 (Name == "sqrtf" && V >= 0 && TLI->has(LibFunc::sqrtf))) 1573 return ConstantFoldFP(sqrt, V, Ty); 1574 break; 1575 case 't': 1576 if ((Name == "tan" && TLI->has(LibFunc::tan)) || 1577 (Name == "tanf" && TLI->has(LibFunc::tanf))) 1578 return ConstantFoldFP(tan, V, Ty); 1579 else if ((Name == "tanh" && TLI->has(LibFunc::tanh)) || 1580 (Name == "tanhf" && TLI->has(LibFunc::tanhf))) 1581 return ConstantFoldFP(tanh, V, Ty); 1582 break; 1583 default: 1584 break; 1585 } 1586 return nullptr; 1587 } 1588 1589 if (ConstantInt *Op = dyn_cast<ConstantInt>(Operands[0])) { 1590 switch (IntrinsicID) { 1591 case Intrinsic::bswap: 1592 return ConstantInt::get(Ty->getContext(), Op->getValue().byteSwap()); 1593 case Intrinsic::ctpop: 1594 return ConstantInt::get(Ty, Op->getValue().countPopulation()); 1595 case Intrinsic::convert_from_fp16: { 1596 APFloat Val(APFloat::IEEEhalf, Op->getValue()); 1597 1598 bool lost = false; 1599 APFloat::opStatus status = Val.convert( 1600 Ty->getFltSemantics(), APFloat::rmNearestTiesToEven, &lost); 1601 1602 // Conversion is always precise. 1603 (void)status; 1604 assert(status == APFloat::opOK && !lost && 1605 "Precision lost during fp16 constfolding"); 1606 1607 return ConstantFP::get(Ty->getContext(), Val); 1608 } 1609 default: 1610 return nullptr; 1611 } 1612 } 1613 1614 // Support ConstantVector in case we have an Undef in the top. 1615 if (isa<ConstantVector>(Operands[0]) || 1616 isa<ConstantDataVector>(Operands[0])) { 1617 Constant *Op = cast<Constant>(Operands[0]); 1618 switch (IntrinsicID) { 1619 default: break; 1620 case Intrinsic::x86_sse_cvtss2si: 1621 case Intrinsic::x86_sse_cvtss2si64: 1622 case Intrinsic::x86_sse2_cvtsd2si: 1623 case Intrinsic::x86_sse2_cvtsd2si64: 1624 if (ConstantFP *FPOp = 1625 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U))) 1626 return ConstantFoldConvertToInt(FPOp->getValueAPF(), 1627 /*roundTowardZero=*/false, Ty); 1628 case Intrinsic::x86_sse_cvttss2si: 1629 case Intrinsic::x86_sse_cvttss2si64: 1630 case Intrinsic::x86_sse2_cvttsd2si: 1631 case Intrinsic::x86_sse2_cvttsd2si64: 1632 if (ConstantFP *FPOp = 1633 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U))) 1634 return ConstantFoldConvertToInt(FPOp->getValueAPF(), 1635 /*roundTowardZero=*/true, Ty); 1636 } 1637 } 1638 1639 if (isa<UndefValue>(Operands[0])) { 1640 if (IntrinsicID == Intrinsic::bswap) 1641 return Operands[0]; 1642 return nullptr; 1643 } 1644 1645 return nullptr; 1646 } 1647 1648 if (Operands.size() == 2) { 1649 if (ConstantFP *Op1 = dyn_cast<ConstantFP>(Operands[0])) { 1650 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy()) 1651 return nullptr; 1652 double Op1V = getValueAsDouble(Op1); 1653 1654 if (ConstantFP *Op2 = dyn_cast<ConstantFP>(Operands[1])) { 1655 if (Op2->getType() != Op1->getType()) 1656 return nullptr; 1657 1658 double Op2V = getValueAsDouble(Op2); 1659 if (IntrinsicID == Intrinsic::pow) { 1660 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty); 1661 } 1662 if (IntrinsicID == Intrinsic::copysign) { 1663 APFloat V1 = Op1->getValueAPF(); 1664 APFloat V2 = Op2->getValueAPF(); 1665 V1.copySign(V2); 1666 return ConstantFP::get(Ty->getContext(), V1); 1667 } 1668 1669 if (IntrinsicID == Intrinsic::minnum) { 1670 const APFloat &C1 = Op1->getValueAPF(); 1671 const APFloat &C2 = Op2->getValueAPF(); 1672 return ConstantFP::get(Ty->getContext(), minnum(C1, C2)); 1673 } 1674 1675 if (IntrinsicID == Intrinsic::maxnum) { 1676 const APFloat &C1 = Op1->getValueAPF(); 1677 const APFloat &C2 = Op2->getValueAPF(); 1678 return ConstantFP::get(Ty->getContext(), maxnum(C1, C2)); 1679 } 1680 1681 if (!TLI) 1682 return nullptr; 1683 if ((Name == "pow" && TLI->has(LibFunc::pow)) || 1684 (Name == "powf" && TLI->has(LibFunc::powf))) 1685 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty); 1686 if ((Name == "fmod" && TLI->has(LibFunc::fmod)) || 1687 (Name == "fmodf" && TLI->has(LibFunc::fmodf))) 1688 return ConstantFoldBinaryFP(fmod, Op1V, Op2V, Ty); 1689 if ((Name == "atan2" && TLI->has(LibFunc::atan2)) || 1690 (Name == "atan2f" && TLI->has(LibFunc::atan2f))) 1691 return ConstantFoldBinaryFP(atan2, Op1V, Op2V, Ty); 1692 } else if (ConstantInt *Op2C = dyn_cast<ConstantInt>(Operands[1])) { 1693 if (IntrinsicID == Intrinsic::powi && Ty->isHalfTy()) 1694 return ConstantFP::get(Ty->getContext(), 1695 APFloat((float)std::pow((float)Op1V, 1696 (int)Op2C->getZExtValue()))); 1697 if (IntrinsicID == Intrinsic::powi && Ty->isFloatTy()) 1698 return ConstantFP::get(Ty->getContext(), 1699 APFloat((float)std::pow((float)Op1V, 1700 (int)Op2C->getZExtValue()))); 1701 if (IntrinsicID == Intrinsic::powi && Ty->isDoubleTy()) 1702 return ConstantFP::get(Ty->getContext(), 1703 APFloat((double)std::pow((double)Op1V, 1704 (int)Op2C->getZExtValue()))); 1705 } 1706 return nullptr; 1707 } 1708 1709 if (ConstantInt *Op1 = dyn_cast<ConstantInt>(Operands[0])) { 1710 if (ConstantInt *Op2 = dyn_cast<ConstantInt>(Operands[1])) { 1711 switch (IntrinsicID) { 1712 default: break; 1713 case Intrinsic::sadd_with_overflow: 1714 case Intrinsic::uadd_with_overflow: 1715 case Intrinsic::ssub_with_overflow: 1716 case Intrinsic::usub_with_overflow: 1717 case Intrinsic::smul_with_overflow: 1718 case Intrinsic::umul_with_overflow: { 1719 APInt Res; 1720 bool Overflow; 1721 switch (IntrinsicID) { 1722 default: llvm_unreachable("Invalid case"); 1723 case Intrinsic::sadd_with_overflow: 1724 Res = Op1->getValue().sadd_ov(Op2->getValue(), Overflow); 1725 break; 1726 case Intrinsic::uadd_with_overflow: 1727 Res = Op1->getValue().uadd_ov(Op2->getValue(), Overflow); 1728 break; 1729 case Intrinsic::ssub_with_overflow: 1730 Res = Op1->getValue().ssub_ov(Op2->getValue(), Overflow); 1731 break; 1732 case Intrinsic::usub_with_overflow: 1733 Res = Op1->getValue().usub_ov(Op2->getValue(), Overflow); 1734 break; 1735 case Intrinsic::smul_with_overflow: 1736 Res = Op1->getValue().smul_ov(Op2->getValue(), Overflow); 1737 break; 1738 case Intrinsic::umul_with_overflow: 1739 Res = Op1->getValue().umul_ov(Op2->getValue(), Overflow); 1740 break; 1741 } 1742 Constant *Ops[] = { 1743 ConstantInt::get(Ty->getContext(), Res), 1744 ConstantInt::get(Type::getInt1Ty(Ty->getContext()), Overflow) 1745 }; 1746 return ConstantStruct::get(cast<StructType>(Ty), Ops); 1747 } 1748 case Intrinsic::cttz: 1749 if (Op2->isOne() && Op1->isZero()) // cttz(0, 1) is undef. 1750 return UndefValue::get(Ty); 1751 return ConstantInt::get(Ty, Op1->getValue().countTrailingZeros()); 1752 case Intrinsic::ctlz: 1753 if (Op2->isOne() && Op1->isZero()) // ctlz(0, 1) is undef. 1754 return UndefValue::get(Ty); 1755 return ConstantInt::get(Ty, Op1->getValue().countLeadingZeros()); 1756 } 1757 } 1758 1759 return nullptr; 1760 } 1761 return nullptr; 1762 } 1763 1764 if (Operands.size() != 3) 1765 return nullptr; 1766 1767 if (const ConstantFP *Op1 = dyn_cast<ConstantFP>(Operands[0])) { 1768 if (const ConstantFP *Op2 = dyn_cast<ConstantFP>(Operands[1])) { 1769 if (const ConstantFP *Op3 = dyn_cast<ConstantFP>(Operands[2])) { 1770 switch (IntrinsicID) { 1771 default: break; 1772 case Intrinsic::fma: 1773 case Intrinsic::fmuladd: { 1774 APFloat V = Op1->getValueAPF(); 1775 APFloat::opStatus s = V.fusedMultiplyAdd(Op2->getValueAPF(), 1776 Op3->getValueAPF(), 1777 APFloat::rmNearestTiesToEven); 1778 if (s != APFloat::opInvalidOp) 1779 return ConstantFP::get(Ty->getContext(), V); 1780 1781 return nullptr; 1782 } 1783 } 1784 } 1785 } 1786 } 1787 1788 return nullptr; 1789 } 1790 1791 static Constant *ConstantFoldVectorCall(StringRef Name, unsigned IntrinsicID, 1792 VectorType *VTy, 1793 ArrayRef<Constant *> Operands, 1794 const TargetLibraryInfo *TLI) { 1795 SmallVector<Constant *, 4> Result(VTy->getNumElements()); 1796 SmallVector<Constant *, 4> Lane(Operands.size()); 1797 Type *Ty = VTy->getElementType(); 1798 1799 for (unsigned I = 0, E = VTy->getNumElements(); I != E; ++I) { 1800 // Gather a column of constants. 1801 for (unsigned J = 0, JE = Operands.size(); J != JE; ++J) { 1802 Constant *Agg = Operands[J]->getAggregateElement(I); 1803 if (!Agg) 1804 return nullptr; 1805 1806 Lane[J] = Agg; 1807 } 1808 1809 // Use the regular scalar folding to simplify this column. 1810 Constant *Folded = ConstantFoldScalarCall(Name, IntrinsicID, Ty, Lane, TLI); 1811 if (!Folded) 1812 return nullptr; 1813 Result[I] = Folded; 1814 } 1815 1816 return ConstantVector::get(Result); 1817 } 1818 1819 /// Attempt to constant fold a call to the specified function 1820 /// with the specified arguments, returning null if unsuccessful. 1821 Constant * 1822 llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands, 1823 const TargetLibraryInfo *TLI) { 1824 if (!F->hasName()) 1825 return nullptr; 1826 StringRef Name = F->getName(); 1827 1828 Type *Ty = F->getReturnType(); 1829 1830 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) 1831 return ConstantFoldVectorCall(Name, F->getIntrinsicID(), VTy, Operands, TLI); 1832 1833 return ConstantFoldScalarCall(Name, F->getIntrinsicID(), Ty, Operands, TLI); 1834 } 1835