1 //===-- ConstantFolding.cpp - Fold instructions into constants ------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines routines for folding instructions into constants. 11 // 12 // Also, to supplement the basic IR ConstantExpr simplifications, 13 // this file defines some additional folding routines that can make use of 14 // DataLayout information. These functions cannot go in IR due to library 15 // dependency issues. 16 // 17 //===----------------------------------------------------------------------===// 18 19 #include "llvm/Analysis/ConstantFolding.h" 20 #include "llvm/ADT/SmallPtrSet.h" 21 #include "llvm/ADT/SmallVector.h" 22 #include "llvm/ADT/StringMap.h" 23 #include "llvm/Analysis/ValueTracking.h" 24 #include "llvm/IR/Constants.h" 25 #include "llvm/IR/DataLayout.h" 26 #include "llvm/IR/DerivedTypes.h" 27 #include "llvm/IR/Function.h" 28 #include "llvm/IR/GlobalVariable.h" 29 #include "llvm/IR/Instructions.h" 30 #include "llvm/IR/Intrinsics.h" 31 #include "llvm/IR/Operator.h" 32 #include "llvm/Support/ErrorHandling.h" 33 #include "llvm/Support/FEnv.h" 34 #include "llvm/Support/GetElementPtrTypeIterator.h" 35 #include "llvm/Support/MathExtras.h" 36 #include "llvm/Target/TargetLibraryInfo.h" 37 #include <cerrno> 38 #include <cmath> 39 using namespace llvm; 40 41 //===----------------------------------------------------------------------===// 42 // Constant Folding internal helper functions 43 //===----------------------------------------------------------------------===// 44 45 /// FoldBitCast - Constant fold bitcast, symbolically evaluating it with 46 /// DataLayout. This always returns a non-null constant, but it may be a 47 /// ConstantExpr if unfoldable. 48 static Constant *FoldBitCast(Constant *C, Type *DestTy, 49 const DataLayout &TD) { 50 // Catch the obvious splat cases. 51 if (C->isNullValue() && !DestTy->isX86_MMXTy()) 52 return Constant::getNullValue(DestTy); 53 if (C->isAllOnesValue() && !DestTy->isX86_MMXTy()) 54 return Constant::getAllOnesValue(DestTy); 55 56 // Handle a vector->integer cast. 57 if (IntegerType *IT = dyn_cast<IntegerType>(DestTy)) { 58 VectorType *VTy = dyn_cast<VectorType>(C->getType()); 59 if (VTy == 0) 60 return ConstantExpr::getBitCast(C, DestTy); 61 62 unsigned NumSrcElts = VTy->getNumElements(); 63 Type *SrcEltTy = VTy->getElementType(); 64 65 // If the vector is a vector of floating point, convert it to vector of int 66 // to simplify things. 67 if (SrcEltTy->isFloatingPointTy()) { 68 unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits(); 69 Type *SrcIVTy = 70 VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumSrcElts); 71 // Ask IR to do the conversion now that #elts line up. 72 C = ConstantExpr::getBitCast(C, SrcIVTy); 73 } 74 75 ConstantDataVector *CDV = dyn_cast<ConstantDataVector>(C); 76 if (CDV == 0) 77 return ConstantExpr::getBitCast(C, DestTy); 78 79 // Now that we know that the input value is a vector of integers, just shift 80 // and insert them into our result. 81 unsigned BitShift = TD.getTypeAllocSizeInBits(SrcEltTy); 82 APInt Result(IT->getBitWidth(), 0); 83 for (unsigned i = 0; i != NumSrcElts; ++i) { 84 Result <<= BitShift; 85 if (TD.isLittleEndian()) 86 Result |= CDV->getElementAsInteger(NumSrcElts-i-1); 87 else 88 Result |= CDV->getElementAsInteger(i); 89 } 90 91 return ConstantInt::get(IT, Result); 92 } 93 94 // The code below only handles casts to vectors currently. 95 VectorType *DestVTy = dyn_cast<VectorType>(DestTy); 96 if (DestVTy == 0) 97 return ConstantExpr::getBitCast(C, DestTy); 98 99 // If this is a scalar -> vector cast, convert the input into a <1 x scalar> 100 // vector so the code below can handle it uniformly. 101 if (isa<ConstantFP>(C) || isa<ConstantInt>(C)) { 102 Constant *Ops = C; // don't take the address of C! 103 return FoldBitCast(ConstantVector::get(Ops), DestTy, TD); 104 } 105 106 // If this is a bitcast from constant vector -> vector, fold it. 107 if (!isa<ConstantDataVector>(C) && !isa<ConstantVector>(C)) 108 return ConstantExpr::getBitCast(C, DestTy); 109 110 // If the element types match, IR can fold it. 111 unsigned NumDstElt = DestVTy->getNumElements(); 112 unsigned NumSrcElt = C->getType()->getVectorNumElements(); 113 if (NumDstElt == NumSrcElt) 114 return ConstantExpr::getBitCast(C, DestTy); 115 116 Type *SrcEltTy = C->getType()->getVectorElementType(); 117 Type *DstEltTy = DestVTy->getElementType(); 118 119 // Otherwise, we're changing the number of elements in a vector, which 120 // requires endianness information to do the right thing. For example, 121 // bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>) 122 // folds to (little endian): 123 // <4 x i32> <i32 0, i32 0, i32 1, i32 0> 124 // and to (big endian): 125 // <4 x i32> <i32 0, i32 0, i32 0, i32 1> 126 127 // First thing is first. We only want to think about integer here, so if 128 // we have something in FP form, recast it as integer. 129 if (DstEltTy->isFloatingPointTy()) { 130 // Fold to an vector of integers with same size as our FP type. 131 unsigned FPWidth = DstEltTy->getPrimitiveSizeInBits(); 132 Type *DestIVTy = 133 VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumDstElt); 134 // Recursively handle this integer conversion, if possible. 135 C = FoldBitCast(C, DestIVTy, TD); 136 137 // Finally, IR can handle this now that #elts line up. 138 return ConstantExpr::getBitCast(C, DestTy); 139 } 140 141 // Okay, we know the destination is integer, if the input is FP, convert 142 // it to integer first. 143 if (SrcEltTy->isFloatingPointTy()) { 144 unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits(); 145 Type *SrcIVTy = 146 VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumSrcElt); 147 // Ask IR to do the conversion now that #elts line up. 148 C = ConstantExpr::getBitCast(C, SrcIVTy); 149 // If IR wasn't able to fold it, bail out. 150 if (!isa<ConstantVector>(C) && // FIXME: Remove ConstantVector. 151 !isa<ConstantDataVector>(C)) 152 return C; 153 } 154 155 // Now we know that the input and output vectors are both integer vectors 156 // of the same size, and that their #elements is not the same. Do the 157 // conversion here, which depends on whether the input or output has 158 // more elements. 159 bool isLittleEndian = TD.isLittleEndian(); 160 161 SmallVector<Constant*, 32> Result; 162 if (NumDstElt < NumSrcElt) { 163 // Handle: bitcast (<4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>) 164 Constant *Zero = Constant::getNullValue(DstEltTy); 165 unsigned Ratio = NumSrcElt/NumDstElt; 166 unsigned SrcBitSize = SrcEltTy->getPrimitiveSizeInBits(); 167 unsigned SrcElt = 0; 168 for (unsigned i = 0; i != NumDstElt; ++i) { 169 // Build each element of the result. 170 Constant *Elt = Zero; 171 unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize*(Ratio-1); 172 for (unsigned j = 0; j != Ratio; ++j) { 173 Constant *Src =dyn_cast<ConstantInt>(C->getAggregateElement(SrcElt++)); 174 if (!Src) // Reject constantexpr elements. 175 return ConstantExpr::getBitCast(C, DestTy); 176 177 // Zero extend the element to the right size. 178 Src = ConstantExpr::getZExt(Src, Elt->getType()); 179 180 // Shift it to the right place, depending on endianness. 181 Src = ConstantExpr::getShl(Src, 182 ConstantInt::get(Src->getType(), ShiftAmt)); 183 ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize; 184 185 // Mix it in. 186 Elt = ConstantExpr::getOr(Elt, Src); 187 } 188 Result.push_back(Elt); 189 } 190 return ConstantVector::get(Result); 191 } 192 193 // Handle: bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>) 194 unsigned Ratio = NumDstElt/NumSrcElt; 195 unsigned DstBitSize = DstEltTy->getPrimitiveSizeInBits(); 196 197 // Loop over each source value, expanding into multiple results. 198 for (unsigned i = 0; i != NumSrcElt; ++i) { 199 Constant *Src = dyn_cast<ConstantInt>(C->getAggregateElement(i)); 200 if (!Src) // Reject constantexpr elements. 201 return ConstantExpr::getBitCast(C, DestTy); 202 203 unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1); 204 for (unsigned j = 0; j != Ratio; ++j) { 205 // Shift the piece of the value into the right place, depending on 206 // endianness. 207 Constant *Elt = ConstantExpr::getLShr(Src, 208 ConstantInt::get(Src->getType(), ShiftAmt)); 209 ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize; 210 211 // Truncate and remember this piece. 212 Result.push_back(ConstantExpr::getTrunc(Elt, DstEltTy)); 213 } 214 } 215 216 return ConstantVector::get(Result); 217 } 218 219 220 /// IsConstantOffsetFromGlobal - If this constant is actually a constant offset 221 /// from a global, return the global and the constant. Because of 222 /// constantexprs, this function is recursive. 223 static bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV, 224 APInt &Offset, const DataLayout &TD) { 225 // Trivial case, constant is the global. 226 if ((GV = dyn_cast<GlobalValue>(C))) { 227 Offset.clearAllBits(); 228 return true; 229 } 230 231 // Otherwise, if this isn't a constant expr, bail out. 232 ConstantExpr *CE = dyn_cast<ConstantExpr>(C); 233 if (!CE) return false; 234 235 // Look through ptr->int and ptr->ptr casts. 236 if (CE->getOpcode() == Instruction::PtrToInt || 237 CE->getOpcode() == Instruction::BitCast) 238 return IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, TD); 239 240 // i32* getelementptr ([5 x i32]* @a, i32 0, i32 5) 241 if (GEPOperator *GEP = dyn_cast<GEPOperator>(CE)) { 242 // If the base isn't a global+constant, we aren't either. 243 if (!IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, TD)) 244 return false; 245 246 // Otherwise, add any offset that our operands provide. 247 return GEP->accumulateConstantOffset(TD, Offset); 248 } 249 250 return false; 251 } 252 253 /// ReadDataFromGlobal - Recursive helper to read bits out of global. C is the 254 /// constant being copied out of. ByteOffset is an offset into C. CurPtr is the 255 /// pointer to copy results into and BytesLeft is the number of bytes left in 256 /// the CurPtr buffer. TD is the target data. 257 static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset, 258 unsigned char *CurPtr, unsigned BytesLeft, 259 const DataLayout &TD) { 260 assert(ByteOffset <= TD.getTypeAllocSize(C->getType()) && 261 "Out of range access"); 262 263 // If this element is zero or undefined, we can just return since *CurPtr is 264 // zero initialized. 265 if (isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) 266 return true; 267 268 if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) { 269 if (CI->getBitWidth() > 64 || 270 (CI->getBitWidth() & 7) != 0) 271 return false; 272 273 uint64_t Val = CI->getZExtValue(); 274 unsigned IntBytes = unsigned(CI->getBitWidth()/8); 275 276 for (unsigned i = 0; i != BytesLeft && ByteOffset != IntBytes; ++i) { 277 int n = ByteOffset; 278 if (!TD.isLittleEndian()) 279 n = IntBytes - n - 1; 280 CurPtr[i] = (unsigned char)(Val >> (n * 8)); 281 ++ByteOffset; 282 } 283 return true; 284 } 285 286 if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) { 287 if (CFP->getType()->isDoubleTy()) { 288 C = FoldBitCast(C, Type::getInt64Ty(C->getContext()), TD); 289 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, TD); 290 } 291 if (CFP->getType()->isFloatTy()){ 292 C = FoldBitCast(C, Type::getInt32Ty(C->getContext()), TD); 293 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, TD); 294 } 295 if (CFP->getType()->isHalfTy()){ 296 C = FoldBitCast(C, Type::getInt16Ty(C->getContext()), TD); 297 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, TD); 298 } 299 return false; 300 } 301 302 if (ConstantStruct *CS = dyn_cast<ConstantStruct>(C)) { 303 const StructLayout *SL = TD.getStructLayout(CS->getType()); 304 unsigned Index = SL->getElementContainingOffset(ByteOffset); 305 uint64_t CurEltOffset = SL->getElementOffset(Index); 306 ByteOffset -= CurEltOffset; 307 308 while (1) { 309 // If the element access is to the element itself and not to tail padding, 310 // read the bytes from the element. 311 uint64_t EltSize = TD.getTypeAllocSize(CS->getOperand(Index)->getType()); 312 313 if (ByteOffset < EltSize && 314 !ReadDataFromGlobal(CS->getOperand(Index), ByteOffset, CurPtr, 315 BytesLeft, TD)) 316 return false; 317 318 ++Index; 319 320 // Check to see if we read from the last struct element, if so we're done. 321 if (Index == CS->getType()->getNumElements()) 322 return true; 323 324 // If we read all of the bytes we needed from this element we're done. 325 uint64_t NextEltOffset = SL->getElementOffset(Index); 326 327 if (BytesLeft <= NextEltOffset-CurEltOffset-ByteOffset) 328 return true; 329 330 // Move to the next element of the struct. 331 CurPtr += NextEltOffset-CurEltOffset-ByteOffset; 332 BytesLeft -= NextEltOffset-CurEltOffset-ByteOffset; 333 ByteOffset = 0; 334 CurEltOffset = NextEltOffset; 335 } 336 // not reached. 337 } 338 339 if (isa<ConstantArray>(C) || isa<ConstantVector>(C) || 340 isa<ConstantDataSequential>(C)) { 341 Type *EltTy = cast<SequentialType>(C->getType())->getElementType(); 342 uint64_t EltSize = TD.getTypeAllocSize(EltTy); 343 uint64_t Index = ByteOffset / EltSize; 344 uint64_t Offset = ByteOffset - Index * EltSize; 345 uint64_t NumElts; 346 if (ArrayType *AT = dyn_cast<ArrayType>(C->getType())) 347 NumElts = AT->getNumElements(); 348 else 349 NumElts = cast<VectorType>(C->getType())->getNumElements(); 350 351 for (; Index != NumElts; ++Index) { 352 if (!ReadDataFromGlobal(C->getAggregateElement(Index), Offset, CurPtr, 353 BytesLeft, TD)) 354 return false; 355 356 uint64_t BytesWritten = EltSize - Offset; 357 assert(BytesWritten <= EltSize && "Not indexing into this element?"); 358 if (BytesWritten >= BytesLeft) 359 return true; 360 361 Offset = 0; 362 BytesLeft -= BytesWritten; 363 CurPtr += BytesWritten; 364 } 365 return true; 366 } 367 368 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) { 369 if (CE->getOpcode() == Instruction::IntToPtr && 370 CE->getOperand(0)->getType() == TD.getIntPtrType(CE->getContext())) 371 return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr, 372 BytesLeft, TD); 373 } 374 375 // Otherwise, unknown initializer type. 376 return false; 377 } 378 379 static Constant *FoldReinterpretLoadFromConstPtr(Constant *C, 380 const DataLayout &TD) { 381 Type *LoadTy = cast<PointerType>(C->getType())->getElementType(); 382 IntegerType *IntType = dyn_cast<IntegerType>(LoadTy); 383 384 // If this isn't an integer load we can't fold it directly. 385 if (!IntType) { 386 // If this is a float/double load, we can try folding it as an int32/64 load 387 // and then bitcast the result. This can be useful for union cases. Note 388 // that address spaces don't matter here since we're not going to result in 389 // an actual new load. 390 Type *MapTy; 391 if (LoadTy->isHalfTy()) 392 MapTy = Type::getInt16PtrTy(C->getContext()); 393 else if (LoadTy->isFloatTy()) 394 MapTy = Type::getInt32PtrTy(C->getContext()); 395 else if (LoadTy->isDoubleTy()) 396 MapTy = Type::getInt64PtrTy(C->getContext()); 397 else if (LoadTy->isVectorTy()) { 398 MapTy = IntegerType::get(C->getContext(), 399 TD.getTypeAllocSizeInBits(LoadTy)); 400 MapTy = PointerType::getUnqual(MapTy); 401 } else 402 return 0; 403 404 C = FoldBitCast(C, MapTy, TD); 405 if (Constant *Res = FoldReinterpretLoadFromConstPtr(C, TD)) 406 return FoldBitCast(Res, LoadTy, TD); 407 return 0; 408 } 409 410 unsigned BytesLoaded = (IntType->getBitWidth() + 7) / 8; 411 if (BytesLoaded > 32 || BytesLoaded == 0) return 0; 412 413 GlobalValue *GVal; 414 APInt Offset(TD.getPointerSizeInBits(), 0); 415 if (!IsConstantOffsetFromGlobal(C, GVal, Offset, TD)) 416 return 0; 417 418 GlobalVariable *GV = dyn_cast<GlobalVariable>(GVal); 419 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() || 420 !GV->getInitializer()->getType()->isSized()) 421 return 0; 422 423 // If we're loading off the beginning of the global, some bytes may be valid, 424 // but we don't try to handle this. 425 if (Offset.isNegative()) return 0; 426 427 // If we're not accessing anything in this constant, the result is undefined. 428 if (Offset.getZExtValue() >= 429 TD.getTypeAllocSize(GV->getInitializer()->getType())) 430 return UndefValue::get(IntType); 431 432 unsigned char RawBytes[32] = {0}; 433 if (!ReadDataFromGlobal(GV->getInitializer(), Offset.getZExtValue(), RawBytes, 434 BytesLoaded, TD)) 435 return 0; 436 437 APInt ResultVal = APInt(IntType->getBitWidth(), 0); 438 if (TD.isLittleEndian()) { 439 ResultVal = RawBytes[BytesLoaded - 1]; 440 for (unsigned i = 1; i != BytesLoaded; ++i) { 441 ResultVal <<= 8; 442 ResultVal |= RawBytes[BytesLoaded-1-i]; 443 } 444 } else { 445 ResultVal = RawBytes[0]; 446 for (unsigned i = 1; i != BytesLoaded; ++i) { 447 ResultVal <<= 8; 448 ResultVal |= RawBytes[i]; 449 } 450 } 451 452 return ConstantInt::get(IntType->getContext(), ResultVal); 453 } 454 455 /// ConstantFoldLoadFromConstPtr - Return the value that a load from C would 456 /// produce if it is constant and determinable. If this is not determinable, 457 /// return null. 458 Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, 459 const DataLayout *TD) { 460 // First, try the easy cases: 461 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(C)) 462 if (GV->isConstant() && GV->hasDefinitiveInitializer()) 463 return GV->getInitializer(); 464 465 // If the loaded value isn't a constant expr, we can't handle it. 466 ConstantExpr *CE = dyn_cast<ConstantExpr>(C); 467 if (!CE) return 0; 468 469 if (CE->getOpcode() == Instruction::GetElementPtr) { 470 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(CE->getOperand(0))) 471 if (GV->isConstant() && GV->hasDefinitiveInitializer()) 472 if (Constant *V = 473 ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE)) 474 return V; 475 } 476 477 // Instead of loading constant c string, use corresponding integer value 478 // directly if string length is small enough. 479 StringRef Str; 480 if (TD && getConstantStringInfo(CE, Str) && !Str.empty()) { 481 unsigned StrLen = Str.size(); 482 Type *Ty = cast<PointerType>(CE->getType())->getElementType(); 483 unsigned NumBits = Ty->getPrimitiveSizeInBits(); 484 // Replace load with immediate integer if the result is an integer or fp 485 // value. 486 if ((NumBits >> 3) == StrLen + 1 && (NumBits & 7) == 0 && 487 (isa<IntegerType>(Ty) || Ty->isFloatingPointTy())) { 488 APInt StrVal(NumBits, 0); 489 APInt SingleChar(NumBits, 0); 490 if (TD->isLittleEndian()) { 491 for (signed i = StrLen-1; i >= 0; i--) { 492 SingleChar = (uint64_t) Str[i] & UCHAR_MAX; 493 StrVal = (StrVal << 8) | SingleChar; 494 } 495 } else { 496 for (unsigned i = 0; i < StrLen; i++) { 497 SingleChar = (uint64_t) Str[i] & UCHAR_MAX; 498 StrVal = (StrVal << 8) | SingleChar; 499 } 500 // Append NULL at the end. 501 SingleChar = 0; 502 StrVal = (StrVal << 8) | SingleChar; 503 } 504 505 Constant *Res = ConstantInt::get(CE->getContext(), StrVal); 506 if (Ty->isFloatingPointTy()) 507 Res = ConstantExpr::getBitCast(Res, Ty); 508 return Res; 509 } 510 } 511 512 // If this load comes from anywhere in a constant global, and if the global 513 // is all undef or zero, we know what it loads. 514 if (GlobalVariable *GV = 515 dyn_cast<GlobalVariable>(GetUnderlyingObject(CE, TD))) { 516 if (GV->isConstant() && GV->hasDefinitiveInitializer()) { 517 Type *ResTy = cast<PointerType>(C->getType())->getElementType(); 518 if (GV->getInitializer()->isNullValue()) 519 return Constant::getNullValue(ResTy); 520 if (isa<UndefValue>(GV->getInitializer())) 521 return UndefValue::get(ResTy); 522 } 523 } 524 525 // Try hard to fold loads from bitcasted strange and non-type-safe things. 526 if (TD) 527 return FoldReinterpretLoadFromConstPtr(CE, *TD); 528 return 0; 529 } 530 531 static Constant *ConstantFoldLoadInst(const LoadInst *LI, const DataLayout *TD){ 532 if (LI->isVolatile()) return 0; 533 534 if (Constant *C = dyn_cast<Constant>(LI->getOperand(0))) 535 return ConstantFoldLoadFromConstPtr(C, TD); 536 537 return 0; 538 } 539 540 /// SymbolicallyEvaluateBinop - One of Op0/Op1 is a constant expression. 541 /// Attempt to symbolically evaluate the result of a binary operator merging 542 /// these together. If target data info is available, it is provided as DL, 543 /// otherwise DL is null. 544 static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0, 545 Constant *Op1, const DataLayout *DL){ 546 // SROA 547 548 // Fold (and 0xffffffff00000000, (shl x, 32)) -> shl. 549 // Fold (lshr (or X, Y), 32) -> (lshr [X/Y], 32) if one doesn't contribute 550 // bits. 551 552 553 if (Opc == Instruction::And && DL) { 554 unsigned BitWidth = DL->getTypeSizeInBits(Op0->getType()->getScalarType()); 555 APInt KnownZero0(BitWidth, 0), KnownOne0(BitWidth, 0); 556 APInt KnownZero1(BitWidth, 0), KnownOne1(BitWidth, 0); 557 ComputeMaskedBits(Op0, KnownZero0, KnownOne0, DL); 558 ComputeMaskedBits(Op1, KnownZero1, KnownOne1, DL); 559 if ((KnownOne1 | KnownZero0).isAllOnesValue()) { 560 // All the bits of Op0 that the 'and' could be masking are already zero. 561 return Op0; 562 } 563 if ((KnownOne0 | KnownZero1).isAllOnesValue()) { 564 // All the bits of Op1 that the 'and' could be masking are already zero. 565 return Op1; 566 } 567 568 APInt KnownZero = KnownZero0 | KnownZero1; 569 APInt KnownOne = KnownOne0 & KnownOne1; 570 if ((KnownZero | KnownOne).isAllOnesValue()) { 571 return ConstantInt::get(Op0->getType(), KnownOne); 572 } 573 } 574 575 // If the constant expr is something like &A[123] - &A[4].f, fold this into a 576 // constant. This happens frequently when iterating over a global array. 577 if (Opc == Instruction::Sub && DL) { 578 GlobalValue *GV1, *GV2; 579 unsigned PtrSize = DL->getPointerSizeInBits(); 580 unsigned OpSize = DL->getTypeSizeInBits(Op0->getType()); 581 APInt Offs1(PtrSize, 0), Offs2(PtrSize, 0); 582 583 if (IsConstantOffsetFromGlobal(Op0, GV1, Offs1, *DL)) 584 if (IsConstantOffsetFromGlobal(Op1, GV2, Offs2, *DL) && 585 GV1 == GV2) { 586 // (&GV+C1) - (&GV+C2) -> C1-C2, pointer arithmetic cannot overflow. 587 // PtrToInt may change the bitwidth so we have convert to the right size 588 // first. 589 return ConstantInt::get(Op0->getType(), Offs1.zextOrTrunc(OpSize) - 590 Offs2.zextOrTrunc(OpSize)); 591 } 592 } 593 594 return 0; 595 } 596 597 /// CastGEPIndices - If array indices are not pointer-sized integers, 598 /// explicitly cast them so that they aren't implicitly casted by the 599 /// getelementptr. 600 static Constant *CastGEPIndices(ArrayRef<Constant *> Ops, 601 Type *ResultTy, const DataLayout *TD, 602 const TargetLibraryInfo *TLI) { 603 if (!TD) return 0; 604 Type *IntPtrTy = TD->getIntPtrType(ResultTy->getContext()); 605 606 bool Any = false; 607 SmallVector<Constant*, 32> NewIdxs; 608 for (unsigned i = 1, e = Ops.size(); i != e; ++i) { 609 if ((i == 1 || 610 !isa<StructType>(GetElementPtrInst::getIndexedType(Ops[0]->getType(), 611 Ops.slice(1, i-1)))) && 612 Ops[i]->getType() != IntPtrTy) { 613 Any = true; 614 NewIdxs.push_back(ConstantExpr::getCast(CastInst::getCastOpcode(Ops[i], 615 true, 616 IntPtrTy, 617 true), 618 Ops[i], IntPtrTy)); 619 } else 620 NewIdxs.push_back(Ops[i]); 621 } 622 if (!Any) return 0; 623 624 Constant *C = 625 ConstantExpr::getGetElementPtr(Ops[0], NewIdxs); 626 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) 627 if (Constant *Folded = ConstantFoldConstantExpression(CE, TD, TLI)) 628 C = Folded; 629 return C; 630 } 631 632 /// Strip the pointer casts, but preserve the address space information. 633 static Constant* StripPtrCastKeepAS(Constant* Ptr) { 634 assert(Ptr->getType()->isPointerTy() && "Not a pointer type"); 635 PointerType *OldPtrTy = cast<PointerType>(Ptr->getType()); 636 Ptr = cast<Constant>(Ptr->stripPointerCasts()); 637 PointerType *NewPtrTy = cast<PointerType>(Ptr->getType()); 638 639 // Preserve the address space number of the pointer. 640 if (NewPtrTy->getAddressSpace() != OldPtrTy->getAddressSpace()) { 641 NewPtrTy = NewPtrTy->getElementType()->getPointerTo( 642 OldPtrTy->getAddressSpace()); 643 Ptr = ConstantExpr::getBitCast(Ptr, NewPtrTy); 644 } 645 return Ptr; 646 } 647 648 /// SymbolicallyEvaluateGEP - If we can symbolically evaluate the specified GEP 649 /// constant expression, do so. 650 static Constant *SymbolicallyEvaluateGEP(ArrayRef<Constant *> Ops, 651 Type *ResultTy, const DataLayout *TD, 652 const TargetLibraryInfo *TLI) { 653 Constant *Ptr = Ops[0]; 654 if (!TD || !cast<PointerType>(Ptr->getType())->getElementType()->isSized() || 655 !Ptr->getType()->isPointerTy()) 656 return 0; 657 658 Type *IntPtrTy = TD->getIntPtrType(Ptr->getContext()); 659 660 // If this is a constant expr gep that is effectively computing an 661 // "offsetof", fold it into 'cast int Size to T*' instead of 'gep 0, 0, 12' 662 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 663 if (!isa<ConstantInt>(Ops[i])) { 664 665 // If this is "gep i8* Ptr, (sub 0, V)", fold this as: 666 // "inttoptr (sub (ptrtoint Ptr), V)" 667 if (Ops.size() == 2 && 668 cast<PointerType>(ResultTy)->getElementType()->isIntegerTy(8)) { 669 ConstantExpr *CE = dyn_cast<ConstantExpr>(Ops[1]); 670 assert((CE == 0 || CE->getType() == IntPtrTy) && 671 "CastGEPIndices didn't canonicalize index types!"); 672 if (CE && CE->getOpcode() == Instruction::Sub && 673 CE->getOperand(0)->isNullValue()) { 674 Constant *Res = ConstantExpr::getPtrToInt(Ptr, CE->getType()); 675 Res = ConstantExpr::getSub(Res, CE->getOperand(1)); 676 Res = ConstantExpr::getIntToPtr(Res, ResultTy); 677 if (ConstantExpr *ResCE = dyn_cast<ConstantExpr>(Res)) 678 Res = ConstantFoldConstantExpression(ResCE, TD, TLI); 679 return Res; 680 } 681 } 682 return 0; 683 } 684 685 unsigned BitWidth = TD->getTypeSizeInBits(IntPtrTy); 686 APInt Offset = 687 APInt(BitWidth, TD->getIndexedOffset(Ptr->getType(), 688 makeArrayRef((Value *const*) 689 Ops.data() + 1, 690 Ops.size() - 1))); 691 Ptr = StripPtrCastKeepAS(Ptr); 692 693 // If this is a GEP of a GEP, fold it all into a single GEP. 694 while (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) { 695 SmallVector<Value *, 4> NestedOps(GEP->op_begin()+1, GEP->op_end()); 696 697 // Do not try the incorporate the sub-GEP if some index is not a number. 698 bool AllConstantInt = true; 699 for (unsigned i = 0, e = NestedOps.size(); i != e; ++i) 700 if (!isa<ConstantInt>(NestedOps[i])) { 701 AllConstantInt = false; 702 break; 703 } 704 if (!AllConstantInt) 705 break; 706 707 Ptr = cast<Constant>(GEP->getOperand(0)); 708 Offset += APInt(BitWidth, 709 TD->getIndexedOffset(Ptr->getType(), NestedOps)); 710 Ptr = StripPtrCastKeepAS(Ptr); 711 } 712 713 // If the base value for this address is a literal integer value, fold the 714 // getelementptr to the resulting integer value casted to the pointer type. 715 APInt BasePtr(BitWidth, 0); 716 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) 717 if (CE->getOpcode() == Instruction::IntToPtr) 718 if (ConstantInt *Base = dyn_cast<ConstantInt>(CE->getOperand(0))) 719 BasePtr = Base->getValue().zextOrTrunc(BitWidth); 720 if (Ptr->isNullValue() || BasePtr != 0) { 721 Constant *C = ConstantInt::get(Ptr->getContext(), Offset+BasePtr); 722 return ConstantExpr::getIntToPtr(C, ResultTy); 723 } 724 725 // Otherwise form a regular getelementptr. Recompute the indices so that 726 // we eliminate over-indexing of the notional static type array bounds. 727 // This makes it easy to determine if the getelementptr is "inbounds". 728 // Also, this helps GlobalOpt do SROA on GlobalVariables. 729 Type *Ty = Ptr->getType(); 730 assert(Ty->isPointerTy() && "Forming regular GEP of non-pointer type"); 731 SmallVector<Constant*, 32> NewIdxs; 732 do { 733 if (SequentialType *ATy = dyn_cast<SequentialType>(Ty)) { 734 if (ATy->isPointerTy()) { 735 // The only pointer indexing we'll do is on the first index of the GEP. 736 if (!NewIdxs.empty()) 737 break; 738 739 // Only handle pointers to sized types, not pointers to functions. 740 if (!ATy->getElementType()->isSized()) 741 return 0; 742 } 743 744 // Determine which element of the array the offset points into. 745 APInt ElemSize(BitWidth, TD->getTypeAllocSize(ATy->getElementType())); 746 IntegerType *IntPtrTy = TD->getIntPtrType(Ty->getContext()); 747 if (ElemSize == 0) 748 // The element size is 0. This may be [0 x Ty]*, so just use a zero 749 // index for this level and proceed to the next level to see if it can 750 // accommodate the offset. 751 NewIdxs.push_back(ConstantInt::get(IntPtrTy, 0)); 752 else { 753 // The element size is non-zero divide the offset by the element 754 // size (rounding down), to compute the index at this level. 755 APInt NewIdx = Offset.udiv(ElemSize); 756 Offset -= NewIdx * ElemSize; 757 NewIdxs.push_back(ConstantInt::get(IntPtrTy, NewIdx)); 758 } 759 Ty = ATy->getElementType(); 760 } else if (StructType *STy = dyn_cast<StructType>(Ty)) { 761 // If we end up with an offset that isn't valid for this struct type, we 762 // can't re-form this GEP in a regular form, so bail out. The pointer 763 // operand likely went through casts that are necessary to make the GEP 764 // sensible. 765 const StructLayout &SL = *TD->getStructLayout(STy); 766 if (Offset.uge(SL.getSizeInBytes())) 767 break; 768 769 // Determine which field of the struct the offset points into. The 770 // getZExtValue is fine as we've already ensured that the offset is 771 // within the range representable by the StructLayout API. 772 unsigned ElIdx = SL.getElementContainingOffset(Offset.getZExtValue()); 773 NewIdxs.push_back(ConstantInt::get(Type::getInt32Ty(Ty->getContext()), 774 ElIdx)); 775 Offset -= APInt(BitWidth, SL.getElementOffset(ElIdx)); 776 Ty = STy->getTypeAtIndex(ElIdx); 777 } else { 778 // We've reached some non-indexable type. 779 break; 780 } 781 } while (Ty != cast<PointerType>(ResultTy)->getElementType()); 782 783 // If we haven't used up the entire offset by descending the static 784 // type, then the offset is pointing into the middle of an indivisible 785 // member, so we can't simplify it. 786 if (Offset != 0) 787 return 0; 788 789 // Create a GEP. 790 Constant *C = 791 ConstantExpr::getGetElementPtr(Ptr, NewIdxs); 792 assert(cast<PointerType>(C->getType())->getElementType() == Ty && 793 "Computed GetElementPtr has unexpected type!"); 794 795 // If we ended up indexing a member with a type that doesn't match 796 // the type of what the original indices indexed, add a cast. 797 if (Ty != cast<PointerType>(ResultTy)->getElementType()) 798 C = FoldBitCast(C, ResultTy, *TD); 799 800 return C; 801 } 802 803 804 805 //===----------------------------------------------------------------------===// 806 // Constant Folding public APIs 807 //===----------------------------------------------------------------------===// 808 809 /// ConstantFoldInstruction - Try to constant fold the specified instruction. 810 /// If successful, the constant result is returned, if not, null is returned. 811 /// Note that this fails if not all of the operands are constant. Otherwise, 812 /// this function can only fail when attempting to fold instructions like loads 813 /// and stores, which have no constant expression form. 814 Constant *llvm::ConstantFoldInstruction(Instruction *I, 815 const DataLayout *TD, 816 const TargetLibraryInfo *TLI) { 817 // Handle PHI nodes quickly here... 818 if (PHINode *PN = dyn_cast<PHINode>(I)) { 819 Constant *CommonValue = 0; 820 821 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 822 Value *Incoming = PN->getIncomingValue(i); 823 // If the incoming value is undef then skip it. Note that while we could 824 // skip the value if it is equal to the phi node itself we choose not to 825 // because that would break the rule that constant folding only applies if 826 // all operands are constants. 827 if (isa<UndefValue>(Incoming)) 828 continue; 829 // If the incoming value is not a constant, then give up. 830 Constant *C = dyn_cast<Constant>(Incoming); 831 if (!C) 832 return 0; 833 // Fold the PHI's operands. 834 if (ConstantExpr *NewC = dyn_cast<ConstantExpr>(C)) 835 C = ConstantFoldConstantExpression(NewC, TD, TLI); 836 // If the incoming value is a different constant to 837 // the one we saw previously, then give up. 838 if (CommonValue && C != CommonValue) 839 return 0; 840 CommonValue = C; 841 } 842 843 844 // If we reach here, all incoming values are the same constant or undef. 845 return CommonValue ? CommonValue : UndefValue::get(PN->getType()); 846 } 847 848 // Scan the operand list, checking to see if they are all constants, if so, 849 // hand off to ConstantFoldInstOperands. 850 SmallVector<Constant*, 8> Ops; 851 for (User::op_iterator i = I->op_begin(), e = I->op_end(); i != e; ++i) { 852 Constant *Op = dyn_cast<Constant>(*i); 853 if (!Op) 854 return 0; // All operands not constant! 855 856 // Fold the Instruction's operands. 857 if (ConstantExpr *NewCE = dyn_cast<ConstantExpr>(Op)) 858 Op = ConstantFoldConstantExpression(NewCE, TD, TLI); 859 860 Ops.push_back(Op); 861 } 862 863 if (const CmpInst *CI = dyn_cast<CmpInst>(I)) 864 return ConstantFoldCompareInstOperands(CI->getPredicate(), Ops[0], Ops[1], 865 TD, TLI); 866 867 if (const LoadInst *LI = dyn_cast<LoadInst>(I)) 868 return ConstantFoldLoadInst(LI, TD); 869 870 if (InsertValueInst *IVI = dyn_cast<InsertValueInst>(I)) 871 return ConstantExpr::getInsertValue( 872 cast<Constant>(IVI->getAggregateOperand()), 873 cast<Constant>(IVI->getInsertedValueOperand()), 874 IVI->getIndices()); 875 876 if (ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(I)) 877 return ConstantExpr::getExtractValue( 878 cast<Constant>(EVI->getAggregateOperand()), 879 EVI->getIndices()); 880 881 return ConstantFoldInstOperands(I->getOpcode(), I->getType(), Ops, TD, TLI); 882 } 883 884 static Constant * 885 ConstantFoldConstantExpressionImpl(const ConstantExpr *CE, const DataLayout *TD, 886 const TargetLibraryInfo *TLI, 887 SmallPtrSet<ConstantExpr *, 4> &FoldedOps) { 888 SmallVector<Constant *, 8> Ops; 889 for (User::const_op_iterator i = CE->op_begin(), e = CE->op_end(); i != e; 890 ++i) { 891 Constant *NewC = cast<Constant>(*i); 892 // Recursively fold the ConstantExpr's operands. If we have already folded 893 // a ConstantExpr, we don't have to process it again. 894 if (ConstantExpr *NewCE = dyn_cast<ConstantExpr>(NewC)) { 895 if (FoldedOps.insert(NewCE)) 896 NewC = ConstantFoldConstantExpressionImpl(NewCE, TD, TLI, FoldedOps); 897 } 898 Ops.push_back(NewC); 899 } 900 901 if (CE->isCompare()) 902 return ConstantFoldCompareInstOperands(CE->getPredicate(), Ops[0], Ops[1], 903 TD, TLI); 904 return ConstantFoldInstOperands(CE->getOpcode(), CE->getType(), Ops, TD, TLI); 905 } 906 907 /// ConstantFoldConstantExpression - Attempt to fold the constant expression 908 /// using the specified DataLayout. If successful, the constant result is 909 /// result is returned, if not, null is returned. 910 Constant *llvm::ConstantFoldConstantExpression(const ConstantExpr *CE, 911 const DataLayout *TD, 912 const TargetLibraryInfo *TLI) { 913 SmallPtrSet<ConstantExpr *, 4> FoldedOps; 914 return ConstantFoldConstantExpressionImpl(CE, TD, TLI, FoldedOps); 915 } 916 917 /// ConstantFoldInstOperands - Attempt to constant fold an instruction with the 918 /// specified opcode and operands. If successful, the constant result is 919 /// returned, if not, null is returned. Note that this function can fail when 920 /// attempting to fold instructions like loads and stores, which have no 921 /// constant expression form. 922 /// 923 /// TODO: This function neither utilizes nor preserves nsw/nuw/inbounds/etc 924 /// information, due to only being passed an opcode and operands. Constant 925 /// folding using this function strips this information. 926 /// 927 Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy, 928 ArrayRef<Constant *> Ops, 929 const DataLayout *TD, 930 const TargetLibraryInfo *TLI) { 931 // Handle easy binops first. 932 if (Instruction::isBinaryOp(Opcode)) { 933 if (isa<ConstantExpr>(Ops[0]) || isa<ConstantExpr>(Ops[1])) 934 if (Constant *C = SymbolicallyEvaluateBinop(Opcode, Ops[0], Ops[1], TD)) 935 return C; 936 937 return ConstantExpr::get(Opcode, Ops[0], Ops[1]); 938 } 939 940 switch (Opcode) { 941 default: return 0; 942 case Instruction::ICmp: 943 case Instruction::FCmp: llvm_unreachable("Invalid for compares"); 944 case Instruction::Call: 945 if (Function *F = dyn_cast<Function>(Ops.back())) 946 if (canConstantFoldCallTo(F)) 947 return ConstantFoldCall(F, Ops.slice(0, Ops.size() - 1), TLI); 948 return 0; 949 case Instruction::PtrToInt: 950 // If the input is a inttoptr, eliminate the pair. This requires knowing 951 // the width of a pointer, so it can't be done in ConstantExpr::getCast. 952 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ops[0])) { 953 if (TD && CE->getOpcode() == Instruction::IntToPtr) { 954 Constant *Input = CE->getOperand(0); 955 unsigned InWidth = Input->getType()->getScalarSizeInBits(); 956 if (TD->getPointerSizeInBits() < InWidth) { 957 Constant *Mask = 958 ConstantInt::get(CE->getContext(), APInt::getLowBitsSet(InWidth, 959 TD->getPointerSizeInBits())); 960 Input = ConstantExpr::getAnd(Input, Mask); 961 } 962 // Do a zext or trunc to get to the dest size. 963 return ConstantExpr::getIntegerCast(Input, DestTy, false); 964 } 965 } 966 return ConstantExpr::getCast(Opcode, Ops[0], DestTy); 967 case Instruction::IntToPtr: 968 // If the input is a ptrtoint, turn the pair into a ptr to ptr bitcast if 969 // the int size is >= the ptr size. This requires knowing the width of a 970 // pointer, so it can't be done in ConstantExpr::getCast. 971 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ops[0])) 972 if (TD && 973 TD->getPointerSizeInBits() <= CE->getType()->getScalarSizeInBits() && 974 CE->getOpcode() == Instruction::PtrToInt) 975 return FoldBitCast(CE->getOperand(0), DestTy, *TD); 976 977 return ConstantExpr::getCast(Opcode, Ops[0], DestTy); 978 case Instruction::Trunc: 979 case Instruction::ZExt: 980 case Instruction::SExt: 981 case Instruction::FPTrunc: 982 case Instruction::FPExt: 983 case Instruction::UIToFP: 984 case Instruction::SIToFP: 985 case Instruction::FPToUI: 986 case Instruction::FPToSI: 987 return ConstantExpr::getCast(Opcode, Ops[0], DestTy); 988 case Instruction::BitCast: 989 if (TD) 990 return FoldBitCast(Ops[0], DestTy, *TD); 991 return ConstantExpr::getBitCast(Ops[0], DestTy); 992 case Instruction::Select: 993 return ConstantExpr::getSelect(Ops[0], Ops[1], Ops[2]); 994 case Instruction::ExtractElement: 995 return ConstantExpr::getExtractElement(Ops[0], Ops[1]); 996 case Instruction::InsertElement: 997 return ConstantExpr::getInsertElement(Ops[0], Ops[1], Ops[2]); 998 case Instruction::ShuffleVector: 999 return ConstantExpr::getShuffleVector(Ops[0], Ops[1], Ops[2]); 1000 case Instruction::GetElementPtr: 1001 if (Constant *C = CastGEPIndices(Ops, DestTy, TD, TLI)) 1002 return C; 1003 if (Constant *C = SymbolicallyEvaluateGEP(Ops, DestTy, TD, TLI)) 1004 return C; 1005 1006 return ConstantExpr::getGetElementPtr(Ops[0], Ops.slice(1)); 1007 } 1008 } 1009 1010 /// ConstantFoldCompareInstOperands - Attempt to constant fold a compare 1011 /// instruction (icmp/fcmp) with the specified operands. If it fails, it 1012 /// returns a constant expression of the specified operands. 1013 /// 1014 Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate, 1015 Constant *Ops0, Constant *Ops1, 1016 const DataLayout *TD, 1017 const TargetLibraryInfo *TLI) { 1018 // fold: icmp (inttoptr x), null -> icmp x, 0 1019 // fold: icmp (ptrtoint x), 0 -> icmp x, null 1020 // fold: icmp (inttoptr x), (inttoptr y) -> icmp trunc/zext x, trunc/zext y 1021 // fold: icmp (ptrtoint x), (ptrtoint y) -> icmp x, y 1022 // 1023 // ConstantExpr::getCompare cannot do this, because it doesn't have TD 1024 // around to know if bit truncation is happening. 1025 if (ConstantExpr *CE0 = dyn_cast<ConstantExpr>(Ops0)) { 1026 if (TD && Ops1->isNullValue()) { 1027 Type *IntPtrTy = TD->getIntPtrType(CE0->getContext()); 1028 if (CE0->getOpcode() == Instruction::IntToPtr) { 1029 // Convert the integer value to the right size to ensure we get the 1030 // proper extension or truncation. 1031 Constant *C = ConstantExpr::getIntegerCast(CE0->getOperand(0), 1032 IntPtrTy, false); 1033 Constant *Null = Constant::getNullValue(C->getType()); 1034 return ConstantFoldCompareInstOperands(Predicate, C, Null, TD, TLI); 1035 } 1036 1037 // Only do this transformation if the int is intptrty in size, otherwise 1038 // there is a truncation or extension that we aren't modeling. 1039 if (CE0->getOpcode() == Instruction::PtrToInt && 1040 CE0->getType() == IntPtrTy) { 1041 Constant *C = CE0->getOperand(0); 1042 Constant *Null = Constant::getNullValue(C->getType()); 1043 return ConstantFoldCompareInstOperands(Predicate, C, Null, TD, TLI); 1044 } 1045 } 1046 1047 if (ConstantExpr *CE1 = dyn_cast<ConstantExpr>(Ops1)) { 1048 if (TD && CE0->getOpcode() == CE1->getOpcode()) { 1049 Type *IntPtrTy = TD->getIntPtrType(CE0->getContext()); 1050 1051 if (CE0->getOpcode() == Instruction::IntToPtr) { 1052 // Convert the integer value to the right size to ensure we get the 1053 // proper extension or truncation. 1054 Constant *C0 = ConstantExpr::getIntegerCast(CE0->getOperand(0), 1055 IntPtrTy, false); 1056 Constant *C1 = ConstantExpr::getIntegerCast(CE1->getOperand(0), 1057 IntPtrTy, false); 1058 return ConstantFoldCompareInstOperands(Predicate, C0, C1, TD, TLI); 1059 } 1060 1061 // Only do this transformation if the int is intptrty in size, otherwise 1062 // there is a truncation or extension that we aren't modeling. 1063 if ((CE0->getOpcode() == Instruction::PtrToInt && 1064 CE0->getType() == IntPtrTy && 1065 CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType())) 1066 return ConstantFoldCompareInstOperands(Predicate, CE0->getOperand(0), 1067 CE1->getOperand(0), TD, TLI); 1068 } 1069 } 1070 1071 // icmp eq (or x, y), 0 -> (icmp eq x, 0) & (icmp eq y, 0) 1072 // icmp ne (or x, y), 0 -> (icmp ne x, 0) | (icmp ne y, 0) 1073 if ((Predicate == ICmpInst::ICMP_EQ || Predicate == ICmpInst::ICMP_NE) && 1074 CE0->getOpcode() == Instruction::Or && Ops1->isNullValue()) { 1075 Constant *LHS = 1076 ConstantFoldCompareInstOperands(Predicate, CE0->getOperand(0), Ops1, 1077 TD, TLI); 1078 Constant *RHS = 1079 ConstantFoldCompareInstOperands(Predicate, CE0->getOperand(1), Ops1, 1080 TD, TLI); 1081 unsigned OpC = 1082 Predicate == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or; 1083 Constant *Ops[] = { LHS, RHS }; 1084 return ConstantFoldInstOperands(OpC, LHS->getType(), Ops, TD, TLI); 1085 } 1086 } 1087 1088 return ConstantExpr::getCompare(Predicate, Ops0, Ops1); 1089 } 1090 1091 1092 /// ConstantFoldLoadThroughGEPConstantExpr - Given a constant and a 1093 /// getelementptr constantexpr, return the constant value being addressed by the 1094 /// constant expression, or null if something is funny and we can't decide. 1095 Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C, 1096 ConstantExpr *CE) { 1097 if (!CE->getOperand(1)->isNullValue()) 1098 return 0; // Do not allow stepping over the value! 1099 1100 // Loop over all of the operands, tracking down which value we are 1101 // addressing. 1102 for (unsigned i = 2, e = CE->getNumOperands(); i != e; ++i) { 1103 C = C->getAggregateElement(CE->getOperand(i)); 1104 if (C == 0) return 0; 1105 } 1106 return C; 1107 } 1108 1109 /// ConstantFoldLoadThroughGEPIndices - Given a constant and getelementptr 1110 /// indices (with an *implied* zero pointer index that is not in the list), 1111 /// return the constant value being addressed by a virtual load, or null if 1112 /// something is funny and we can't decide. 1113 Constant *llvm::ConstantFoldLoadThroughGEPIndices(Constant *C, 1114 ArrayRef<Constant*> Indices) { 1115 // Loop over all of the operands, tracking down which value we are 1116 // addressing. 1117 for (unsigned i = 0, e = Indices.size(); i != e; ++i) { 1118 C = C->getAggregateElement(Indices[i]); 1119 if (C == 0) return 0; 1120 } 1121 return C; 1122 } 1123 1124 1125 //===----------------------------------------------------------------------===// 1126 // Constant Folding for Calls 1127 // 1128 1129 /// canConstantFoldCallTo - Return true if its even possible to fold a call to 1130 /// the specified function. 1131 bool 1132 llvm::canConstantFoldCallTo(const Function *F) { 1133 switch (F->getIntrinsicID()) { 1134 case Intrinsic::fabs: 1135 case Intrinsic::log: 1136 case Intrinsic::log2: 1137 case Intrinsic::log10: 1138 case Intrinsic::exp: 1139 case Intrinsic::exp2: 1140 case Intrinsic::floor: 1141 case Intrinsic::sqrt: 1142 case Intrinsic::pow: 1143 case Intrinsic::powi: 1144 case Intrinsic::bswap: 1145 case Intrinsic::ctpop: 1146 case Intrinsic::ctlz: 1147 case Intrinsic::cttz: 1148 case Intrinsic::sadd_with_overflow: 1149 case Intrinsic::uadd_with_overflow: 1150 case Intrinsic::ssub_with_overflow: 1151 case Intrinsic::usub_with_overflow: 1152 case Intrinsic::smul_with_overflow: 1153 case Intrinsic::umul_with_overflow: 1154 case Intrinsic::convert_from_fp16: 1155 case Intrinsic::convert_to_fp16: 1156 case Intrinsic::x86_sse_cvtss2si: 1157 case Intrinsic::x86_sse_cvtss2si64: 1158 case Intrinsic::x86_sse_cvttss2si: 1159 case Intrinsic::x86_sse_cvttss2si64: 1160 case Intrinsic::x86_sse2_cvtsd2si: 1161 case Intrinsic::x86_sse2_cvtsd2si64: 1162 case Intrinsic::x86_sse2_cvttsd2si: 1163 case Intrinsic::x86_sse2_cvttsd2si64: 1164 return true; 1165 default: 1166 return false; 1167 case 0: break; 1168 } 1169 1170 if (!F->hasName()) return false; 1171 StringRef Name = F->getName(); 1172 1173 // In these cases, the check of the length is required. We don't want to 1174 // return true for a name like "cos\0blah" which strcmp would return equal to 1175 // "cos", but has length 8. 1176 switch (Name[0]) { 1177 default: return false; 1178 case 'a': 1179 return Name == "acos" || Name == "asin" || Name == "atan" || Name =="atan2"; 1180 case 'c': 1181 return Name == "cos" || Name == "ceil" || Name == "cosf" || Name == "cosh"; 1182 case 'e': 1183 return Name == "exp" || Name == "exp2"; 1184 case 'f': 1185 return Name == "fabs" || Name == "fmod" || Name == "floor"; 1186 case 'l': 1187 return Name == "log" || Name == "log10"; 1188 case 'p': 1189 return Name == "pow"; 1190 case 's': 1191 return Name == "sin" || Name == "sinh" || Name == "sqrt" || 1192 Name == "sinf" || Name == "sqrtf"; 1193 case 't': 1194 return Name == "tan" || Name == "tanh"; 1195 } 1196 } 1197 1198 static Constant *ConstantFoldFP(double (*NativeFP)(double), double V, 1199 Type *Ty) { 1200 sys::llvm_fenv_clearexcept(); 1201 V = NativeFP(V); 1202 if (sys::llvm_fenv_testexcept()) { 1203 sys::llvm_fenv_clearexcept(); 1204 return 0; 1205 } 1206 1207 if (Ty->isHalfTy()) { 1208 APFloat APF(V); 1209 bool unused; 1210 APF.convert(APFloat::IEEEhalf, APFloat::rmNearestTiesToEven, &unused); 1211 return ConstantFP::get(Ty->getContext(), APF); 1212 } 1213 if (Ty->isFloatTy()) 1214 return ConstantFP::get(Ty->getContext(), APFloat((float)V)); 1215 if (Ty->isDoubleTy()) 1216 return ConstantFP::get(Ty->getContext(), APFloat(V)); 1217 llvm_unreachable("Can only constant fold half/float/double"); 1218 } 1219 1220 static Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double), 1221 double V, double W, Type *Ty) { 1222 sys::llvm_fenv_clearexcept(); 1223 V = NativeFP(V, W); 1224 if (sys::llvm_fenv_testexcept()) { 1225 sys::llvm_fenv_clearexcept(); 1226 return 0; 1227 } 1228 1229 if (Ty->isHalfTy()) { 1230 APFloat APF(V); 1231 bool unused; 1232 APF.convert(APFloat::IEEEhalf, APFloat::rmNearestTiesToEven, &unused); 1233 return ConstantFP::get(Ty->getContext(), APF); 1234 } 1235 if (Ty->isFloatTy()) 1236 return ConstantFP::get(Ty->getContext(), APFloat((float)V)); 1237 if (Ty->isDoubleTy()) 1238 return ConstantFP::get(Ty->getContext(), APFloat(V)); 1239 llvm_unreachable("Can only constant fold half/float/double"); 1240 } 1241 1242 /// ConstantFoldConvertToInt - Attempt to an SSE floating point to integer 1243 /// conversion of a constant floating point. If roundTowardZero is false, the 1244 /// default IEEE rounding is used (toward nearest, ties to even). This matches 1245 /// the behavior of the non-truncating SSE instructions in the default rounding 1246 /// mode. The desired integer type Ty is used to select how many bits are 1247 /// available for the result. Returns null if the conversion cannot be 1248 /// performed, otherwise returns the Constant value resulting from the 1249 /// conversion. 1250 static Constant *ConstantFoldConvertToInt(const APFloat &Val, 1251 bool roundTowardZero, Type *Ty) { 1252 // All of these conversion intrinsics form an integer of at most 64bits. 1253 unsigned ResultWidth = cast<IntegerType>(Ty)->getBitWidth(); 1254 assert(ResultWidth <= 64 && 1255 "Can only constant fold conversions to 64 and 32 bit ints"); 1256 1257 uint64_t UIntVal; 1258 bool isExact = false; 1259 APFloat::roundingMode mode = roundTowardZero? APFloat::rmTowardZero 1260 : APFloat::rmNearestTiesToEven; 1261 APFloat::opStatus status = Val.convertToInteger(&UIntVal, ResultWidth, 1262 /*isSigned=*/true, mode, 1263 &isExact); 1264 if (status != APFloat::opOK && status != APFloat::opInexact) 1265 return 0; 1266 return ConstantInt::get(Ty, UIntVal, /*isSigned=*/true); 1267 } 1268 1269 /// ConstantFoldCall - Attempt to constant fold a call to the specified function 1270 /// with the specified arguments, returning null if unsuccessful. 1271 Constant * 1272 llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands, 1273 const TargetLibraryInfo *TLI) { 1274 if (!F->hasName()) return 0; 1275 StringRef Name = F->getName(); 1276 1277 Type *Ty = F->getReturnType(); 1278 if (Operands.size() == 1) { 1279 if (ConstantFP *Op = dyn_cast<ConstantFP>(Operands[0])) { 1280 if (F->getIntrinsicID() == Intrinsic::convert_to_fp16) { 1281 APFloat Val(Op->getValueAPF()); 1282 1283 bool lost = false; 1284 Val.convert(APFloat::IEEEhalf, APFloat::rmNearestTiesToEven, &lost); 1285 1286 return ConstantInt::get(F->getContext(), Val.bitcastToAPInt()); 1287 } 1288 if (!TLI) 1289 return 0; 1290 1291 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy()) 1292 return 0; 1293 1294 /// We only fold functions with finite arguments. Folding NaN and inf is 1295 /// likely to be aborted with an exception anyway, and some host libms 1296 /// have known errors raising exceptions. 1297 if (Op->getValueAPF().isNaN() || Op->getValueAPF().isInfinity()) 1298 return 0; 1299 1300 /// Currently APFloat versions of these functions do not exist, so we use 1301 /// the host native double versions. Float versions are not called 1302 /// directly but for all these it is true (float)(f((double)arg)) == 1303 /// f(arg). Long double not supported yet. 1304 double V; 1305 if (Ty->isFloatTy()) 1306 V = Op->getValueAPF().convertToFloat(); 1307 else if (Ty->isDoubleTy()) 1308 V = Op->getValueAPF().convertToDouble(); 1309 else { 1310 bool unused; 1311 APFloat APF = Op->getValueAPF(); 1312 APF.convert(APFloat::IEEEdouble, APFloat::rmNearestTiesToEven, &unused); 1313 V = APF.convertToDouble(); 1314 } 1315 1316 switch (F->getIntrinsicID()) { 1317 default: break; 1318 case Intrinsic::fabs: 1319 return ConstantFoldFP(fabs, V, Ty); 1320 #if HAVE_LOG2 1321 case Intrinsic::log2: 1322 return ConstantFoldFP(log2, V, Ty); 1323 #endif 1324 #if HAVE_LOG 1325 case Intrinsic::log: 1326 return ConstantFoldFP(log, V, Ty); 1327 #endif 1328 #if HAVE_LOG10 1329 case Intrinsic::log10: 1330 return ConstantFoldFP(log10, V, Ty); 1331 #endif 1332 #if HAVE_EXP 1333 case Intrinsic::exp: 1334 return ConstantFoldFP(exp, V, Ty); 1335 #endif 1336 #if HAVE_EXP2 1337 case Intrinsic::exp2: 1338 return ConstantFoldFP(exp2, V, Ty); 1339 #endif 1340 case Intrinsic::floor: 1341 return ConstantFoldFP(floor, V, Ty); 1342 } 1343 1344 switch (Name[0]) { 1345 case 'a': 1346 if (Name == "acos" && TLI->has(LibFunc::acos)) 1347 return ConstantFoldFP(acos, V, Ty); 1348 else if (Name == "asin" && TLI->has(LibFunc::asin)) 1349 return ConstantFoldFP(asin, V, Ty); 1350 else if (Name == "atan" && TLI->has(LibFunc::atan)) 1351 return ConstantFoldFP(atan, V, Ty); 1352 break; 1353 case 'c': 1354 if (Name == "ceil" && TLI->has(LibFunc::ceil)) 1355 return ConstantFoldFP(ceil, V, Ty); 1356 else if (Name == "cos" && TLI->has(LibFunc::cos)) 1357 return ConstantFoldFP(cos, V, Ty); 1358 else if (Name == "cosh" && TLI->has(LibFunc::cosh)) 1359 return ConstantFoldFP(cosh, V, Ty); 1360 else if (Name == "cosf" && TLI->has(LibFunc::cosf)) 1361 return ConstantFoldFP(cos, V, Ty); 1362 break; 1363 case 'e': 1364 if (Name == "exp" && TLI->has(LibFunc::exp)) 1365 return ConstantFoldFP(exp, V, Ty); 1366 1367 if (Name == "exp2" && TLI->has(LibFunc::exp2)) { 1368 // Constant fold exp2(x) as pow(2,x) in case the host doesn't have a 1369 // C99 library. 1370 return ConstantFoldBinaryFP(pow, 2.0, V, Ty); 1371 } 1372 break; 1373 case 'f': 1374 if (Name == "fabs" && TLI->has(LibFunc::fabs)) 1375 return ConstantFoldFP(fabs, V, Ty); 1376 else if (Name == "floor" && TLI->has(LibFunc::floor)) 1377 return ConstantFoldFP(floor, V, Ty); 1378 break; 1379 case 'l': 1380 if (Name == "log" && V > 0 && TLI->has(LibFunc::log)) 1381 return ConstantFoldFP(log, V, Ty); 1382 else if (Name == "log10" && V > 0 && TLI->has(LibFunc::log10)) 1383 return ConstantFoldFP(log10, V, Ty); 1384 else if (F->getIntrinsicID() == Intrinsic::sqrt && 1385 (Ty->isHalfTy() || Ty->isFloatTy() || Ty->isDoubleTy())) { 1386 if (V >= -0.0) 1387 return ConstantFoldFP(sqrt, V, Ty); 1388 else // Undefined 1389 return Constant::getNullValue(Ty); 1390 } 1391 break; 1392 case 's': 1393 if (Name == "sin" && TLI->has(LibFunc::sin)) 1394 return ConstantFoldFP(sin, V, Ty); 1395 else if (Name == "sinh" && TLI->has(LibFunc::sinh)) 1396 return ConstantFoldFP(sinh, V, Ty); 1397 else if (Name == "sqrt" && V >= 0 && TLI->has(LibFunc::sqrt)) 1398 return ConstantFoldFP(sqrt, V, Ty); 1399 else if (Name == "sqrtf" && V >= 0 && TLI->has(LibFunc::sqrtf)) 1400 return ConstantFoldFP(sqrt, V, Ty); 1401 else if (Name == "sinf" && TLI->has(LibFunc::sinf)) 1402 return ConstantFoldFP(sin, V, Ty); 1403 break; 1404 case 't': 1405 if (Name == "tan" && TLI->has(LibFunc::tan)) 1406 return ConstantFoldFP(tan, V, Ty); 1407 else if (Name == "tanh" && TLI->has(LibFunc::tanh)) 1408 return ConstantFoldFP(tanh, V, Ty); 1409 break; 1410 default: 1411 break; 1412 } 1413 return 0; 1414 } 1415 1416 if (ConstantInt *Op = dyn_cast<ConstantInt>(Operands[0])) { 1417 switch (F->getIntrinsicID()) { 1418 case Intrinsic::bswap: 1419 return ConstantInt::get(F->getContext(), Op->getValue().byteSwap()); 1420 case Intrinsic::ctpop: 1421 return ConstantInt::get(Ty, Op->getValue().countPopulation()); 1422 case Intrinsic::convert_from_fp16: { 1423 APFloat Val(APFloat::IEEEhalf, Op->getValue()); 1424 1425 bool lost = false; 1426 APFloat::opStatus status = 1427 Val.convert(APFloat::IEEEsingle, APFloat::rmNearestTiesToEven, &lost); 1428 1429 // Conversion is always precise. 1430 (void)status; 1431 assert(status == APFloat::opOK && !lost && 1432 "Precision lost during fp16 constfolding"); 1433 1434 return ConstantFP::get(F->getContext(), Val); 1435 } 1436 default: 1437 return 0; 1438 } 1439 } 1440 1441 // Support ConstantVector in case we have an Undef in the top. 1442 if (isa<ConstantVector>(Operands[0]) || 1443 isa<ConstantDataVector>(Operands[0])) { 1444 Constant *Op = cast<Constant>(Operands[0]); 1445 switch (F->getIntrinsicID()) { 1446 default: break; 1447 case Intrinsic::x86_sse_cvtss2si: 1448 case Intrinsic::x86_sse_cvtss2si64: 1449 case Intrinsic::x86_sse2_cvtsd2si: 1450 case Intrinsic::x86_sse2_cvtsd2si64: 1451 if (ConstantFP *FPOp = 1452 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U))) 1453 return ConstantFoldConvertToInt(FPOp->getValueAPF(), 1454 /*roundTowardZero=*/false, Ty); 1455 case Intrinsic::x86_sse_cvttss2si: 1456 case Intrinsic::x86_sse_cvttss2si64: 1457 case Intrinsic::x86_sse2_cvttsd2si: 1458 case Intrinsic::x86_sse2_cvttsd2si64: 1459 if (ConstantFP *FPOp = 1460 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U))) 1461 return ConstantFoldConvertToInt(FPOp->getValueAPF(), 1462 /*roundTowardZero=*/true, Ty); 1463 } 1464 } 1465 1466 if (isa<UndefValue>(Operands[0])) { 1467 if (F->getIntrinsicID() == Intrinsic::bswap) 1468 return Operands[0]; 1469 return 0; 1470 } 1471 1472 return 0; 1473 } 1474 1475 if (Operands.size() == 2) { 1476 if (ConstantFP *Op1 = dyn_cast<ConstantFP>(Operands[0])) { 1477 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy()) 1478 return 0; 1479 double Op1V; 1480 if (Ty->isFloatTy()) 1481 Op1V = Op1->getValueAPF().convertToFloat(); 1482 else if (Ty->isDoubleTy()) 1483 Op1V = Op1->getValueAPF().convertToDouble(); 1484 else { 1485 bool unused; 1486 APFloat APF = Op1->getValueAPF(); 1487 APF.convert(APFloat::IEEEdouble, APFloat::rmNearestTiesToEven, &unused); 1488 Op1V = APF.convertToDouble(); 1489 } 1490 1491 if (ConstantFP *Op2 = dyn_cast<ConstantFP>(Operands[1])) { 1492 if (Op2->getType() != Op1->getType()) 1493 return 0; 1494 1495 double Op2V; 1496 if (Ty->isFloatTy()) 1497 Op2V = Op2->getValueAPF().convertToFloat(); 1498 else if (Ty->isDoubleTy()) 1499 Op2V = Op2->getValueAPF().convertToDouble(); 1500 else { 1501 bool unused; 1502 APFloat APF = Op2->getValueAPF(); 1503 APF.convert(APFloat::IEEEdouble, APFloat::rmNearestTiesToEven, &unused); 1504 Op2V = APF.convertToDouble(); 1505 } 1506 1507 if (F->getIntrinsicID() == Intrinsic::pow) { 1508 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty); 1509 } 1510 if (!TLI) 1511 return 0; 1512 if (Name == "pow" && TLI->has(LibFunc::pow)) 1513 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty); 1514 if (Name == "fmod" && TLI->has(LibFunc::fmod)) 1515 return ConstantFoldBinaryFP(fmod, Op1V, Op2V, Ty); 1516 if (Name == "atan2" && TLI->has(LibFunc::atan2)) 1517 return ConstantFoldBinaryFP(atan2, Op1V, Op2V, Ty); 1518 } else if (ConstantInt *Op2C = dyn_cast<ConstantInt>(Operands[1])) { 1519 if (F->getIntrinsicID() == Intrinsic::powi && Ty->isHalfTy()) 1520 return ConstantFP::get(F->getContext(), 1521 APFloat((float)std::pow((float)Op1V, 1522 (int)Op2C->getZExtValue()))); 1523 if (F->getIntrinsicID() == Intrinsic::powi && Ty->isFloatTy()) 1524 return ConstantFP::get(F->getContext(), 1525 APFloat((float)std::pow((float)Op1V, 1526 (int)Op2C->getZExtValue()))); 1527 if (F->getIntrinsicID() == Intrinsic::powi && Ty->isDoubleTy()) 1528 return ConstantFP::get(F->getContext(), 1529 APFloat((double)std::pow((double)Op1V, 1530 (int)Op2C->getZExtValue()))); 1531 } 1532 return 0; 1533 } 1534 1535 if (ConstantInt *Op1 = dyn_cast<ConstantInt>(Operands[0])) { 1536 if (ConstantInt *Op2 = dyn_cast<ConstantInt>(Operands[1])) { 1537 switch (F->getIntrinsicID()) { 1538 default: break; 1539 case Intrinsic::sadd_with_overflow: 1540 case Intrinsic::uadd_with_overflow: 1541 case Intrinsic::ssub_with_overflow: 1542 case Intrinsic::usub_with_overflow: 1543 case Intrinsic::smul_with_overflow: 1544 case Intrinsic::umul_with_overflow: { 1545 APInt Res; 1546 bool Overflow; 1547 switch (F->getIntrinsicID()) { 1548 default: llvm_unreachable("Invalid case"); 1549 case Intrinsic::sadd_with_overflow: 1550 Res = Op1->getValue().sadd_ov(Op2->getValue(), Overflow); 1551 break; 1552 case Intrinsic::uadd_with_overflow: 1553 Res = Op1->getValue().uadd_ov(Op2->getValue(), Overflow); 1554 break; 1555 case Intrinsic::ssub_with_overflow: 1556 Res = Op1->getValue().ssub_ov(Op2->getValue(), Overflow); 1557 break; 1558 case Intrinsic::usub_with_overflow: 1559 Res = Op1->getValue().usub_ov(Op2->getValue(), Overflow); 1560 break; 1561 case Intrinsic::smul_with_overflow: 1562 Res = Op1->getValue().smul_ov(Op2->getValue(), Overflow); 1563 break; 1564 case Intrinsic::umul_with_overflow: 1565 Res = Op1->getValue().umul_ov(Op2->getValue(), Overflow); 1566 break; 1567 } 1568 Constant *Ops[] = { 1569 ConstantInt::get(F->getContext(), Res), 1570 ConstantInt::get(Type::getInt1Ty(F->getContext()), Overflow) 1571 }; 1572 return ConstantStruct::get(cast<StructType>(F->getReturnType()), Ops); 1573 } 1574 case Intrinsic::cttz: 1575 if (Op2->isOne() && Op1->isZero()) // cttz(0, 1) is undef. 1576 return UndefValue::get(Ty); 1577 return ConstantInt::get(Ty, Op1->getValue().countTrailingZeros()); 1578 case Intrinsic::ctlz: 1579 if (Op2->isOne() && Op1->isZero()) // ctlz(0, 1) is undef. 1580 return UndefValue::get(Ty); 1581 return ConstantInt::get(Ty, Op1->getValue().countLeadingZeros()); 1582 } 1583 } 1584 1585 return 0; 1586 } 1587 return 0; 1588 } 1589 return 0; 1590 } 1591