1 //===- InstCombineCompares.cpp --------------------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the visitICmp and visitFCmp functions. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "InstCombine.h" 15 #include "llvm/IntrinsicInst.h" 16 #include "llvm/Analysis/InstructionSimplify.h" 17 #include "llvm/Analysis/MemoryBuiltins.h" 18 #include "llvm/Target/TargetData.h" 19 #include "llvm/Support/ConstantRange.h" 20 #include "llvm/Support/GetElementPtrTypeIterator.h" 21 #include "llvm/Support/PatternMatch.h" 22 using namespace llvm; 23 using namespace PatternMatch; 24 25 static ConstantInt *getOne(Constant *C) { 26 return ConstantInt::get(cast<IntegerType>(C->getType()), 1); 27 } 28 29 /// AddOne - Add one to a ConstantInt 30 static Constant *AddOne(Constant *C) { 31 return ConstantExpr::getAdd(C, ConstantInt::get(C->getType(), 1)); 32 } 33 /// SubOne - Subtract one from a ConstantInt 34 static Constant *SubOne(Constant *C) { 35 return ConstantExpr::getSub(C, ConstantInt::get(C->getType(), 1)); 36 } 37 38 static ConstantInt *ExtractElement(Constant *V, Constant *Idx) { 39 return cast<ConstantInt>(ConstantExpr::getExtractElement(V, Idx)); 40 } 41 42 static bool HasAddOverflow(ConstantInt *Result, 43 ConstantInt *In1, ConstantInt *In2, 44 bool IsSigned) { 45 if (!IsSigned) 46 return Result->getValue().ult(In1->getValue()); 47 48 if (In2->isNegative()) 49 return Result->getValue().sgt(In1->getValue()); 50 return Result->getValue().slt(In1->getValue()); 51 } 52 53 /// AddWithOverflow - Compute Result = In1+In2, returning true if the result 54 /// overflowed for this type. 55 static bool AddWithOverflow(Constant *&Result, Constant *In1, 56 Constant *In2, bool IsSigned = false) { 57 Result = ConstantExpr::getAdd(In1, In2); 58 59 if (VectorType *VTy = dyn_cast<VectorType>(In1->getType())) { 60 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) { 61 Constant *Idx = ConstantInt::get(Type::getInt32Ty(In1->getContext()), i); 62 if (HasAddOverflow(ExtractElement(Result, Idx), 63 ExtractElement(In1, Idx), 64 ExtractElement(In2, Idx), 65 IsSigned)) 66 return true; 67 } 68 return false; 69 } 70 71 return HasAddOverflow(cast<ConstantInt>(Result), 72 cast<ConstantInt>(In1), cast<ConstantInt>(In2), 73 IsSigned); 74 } 75 76 static bool HasSubOverflow(ConstantInt *Result, 77 ConstantInt *In1, ConstantInt *In2, 78 bool IsSigned) { 79 if (!IsSigned) 80 return Result->getValue().ugt(In1->getValue()); 81 82 if (In2->isNegative()) 83 return Result->getValue().slt(In1->getValue()); 84 85 return Result->getValue().sgt(In1->getValue()); 86 } 87 88 /// SubWithOverflow - Compute Result = In1-In2, returning true if the result 89 /// overflowed for this type. 90 static bool SubWithOverflow(Constant *&Result, Constant *In1, 91 Constant *In2, bool IsSigned = false) { 92 Result = ConstantExpr::getSub(In1, In2); 93 94 if (VectorType *VTy = dyn_cast<VectorType>(In1->getType())) { 95 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) { 96 Constant *Idx = ConstantInt::get(Type::getInt32Ty(In1->getContext()), i); 97 if (HasSubOverflow(ExtractElement(Result, Idx), 98 ExtractElement(In1, Idx), 99 ExtractElement(In2, Idx), 100 IsSigned)) 101 return true; 102 } 103 return false; 104 } 105 106 return HasSubOverflow(cast<ConstantInt>(Result), 107 cast<ConstantInt>(In1), cast<ConstantInt>(In2), 108 IsSigned); 109 } 110 111 /// isSignBitCheck - Given an exploded icmp instruction, return true if the 112 /// comparison only checks the sign bit. If it only checks the sign bit, set 113 /// TrueIfSigned if the result of the comparison is true when the input value is 114 /// signed. 115 static bool isSignBitCheck(ICmpInst::Predicate pred, ConstantInt *RHS, 116 bool &TrueIfSigned) { 117 switch (pred) { 118 case ICmpInst::ICMP_SLT: // True if LHS s< 0 119 TrueIfSigned = true; 120 return RHS->isZero(); 121 case ICmpInst::ICMP_SLE: // True if LHS s<= RHS and RHS == -1 122 TrueIfSigned = true; 123 return RHS->isAllOnesValue(); 124 case ICmpInst::ICMP_SGT: // True if LHS s> -1 125 TrueIfSigned = false; 126 return RHS->isAllOnesValue(); 127 case ICmpInst::ICMP_UGT: 128 // True if LHS u> RHS and RHS == high-bit-mask - 1 129 TrueIfSigned = true; 130 return RHS->isMaxValue(true); 131 case ICmpInst::ICMP_UGE: 132 // True if LHS u>= RHS and RHS == high-bit-mask (2^7, 2^15, 2^31, etc) 133 TrueIfSigned = true; 134 return RHS->getValue().isSignBit(); 135 default: 136 return false; 137 } 138 } 139 140 // isHighOnes - Return true if the constant is of the form 1+0+. 141 // This is the same as lowones(~X). 142 static bool isHighOnes(const ConstantInt *CI) { 143 return (~CI->getValue() + 1).isPowerOf2(); 144 } 145 146 /// ComputeSignedMinMaxValuesFromKnownBits - Given a signed integer type and a 147 /// set of known zero and one bits, compute the maximum and minimum values that 148 /// could have the specified known zero and known one bits, returning them in 149 /// min/max. 150 static void ComputeSignedMinMaxValuesFromKnownBits(const APInt& KnownZero, 151 const APInt& KnownOne, 152 APInt& Min, APInt& Max) { 153 assert(KnownZero.getBitWidth() == KnownOne.getBitWidth() && 154 KnownZero.getBitWidth() == Min.getBitWidth() && 155 KnownZero.getBitWidth() == Max.getBitWidth() && 156 "KnownZero, KnownOne and Min, Max must have equal bitwidth."); 157 APInt UnknownBits = ~(KnownZero|KnownOne); 158 159 // The minimum value is when all unknown bits are zeros, EXCEPT for the sign 160 // bit if it is unknown. 161 Min = KnownOne; 162 Max = KnownOne|UnknownBits; 163 164 if (UnknownBits.isNegative()) { // Sign bit is unknown 165 Min.setBit(Min.getBitWidth()-1); 166 Max.clearBit(Max.getBitWidth()-1); 167 } 168 } 169 170 // ComputeUnsignedMinMaxValuesFromKnownBits - Given an unsigned integer type and 171 // a set of known zero and one bits, compute the maximum and minimum values that 172 // could have the specified known zero and known one bits, returning them in 173 // min/max. 174 static void ComputeUnsignedMinMaxValuesFromKnownBits(const APInt &KnownZero, 175 const APInt &KnownOne, 176 APInt &Min, APInt &Max) { 177 assert(KnownZero.getBitWidth() == KnownOne.getBitWidth() && 178 KnownZero.getBitWidth() == Min.getBitWidth() && 179 KnownZero.getBitWidth() == Max.getBitWidth() && 180 "Ty, KnownZero, KnownOne and Min, Max must have equal bitwidth."); 181 APInt UnknownBits = ~(KnownZero|KnownOne); 182 183 // The minimum value is when the unknown bits are all zeros. 184 Min = KnownOne; 185 // The maximum value is when the unknown bits are all ones. 186 Max = KnownOne|UnknownBits; 187 } 188 189 190 191 /// FoldCmpLoadFromIndexedGlobal - Called we see this pattern: 192 /// cmp pred (load (gep GV, ...)), cmpcst 193 /// where GV is a global variable with a constant initializer. Try to simplify 194 /// this into some simple computation that does not need the load. For example 195 /// we can optimize "icmp eq (load (gep "foo", 0, i)), 0" into "icmp eq i, 3". 196 /// 197 /// If AndCst is non-null, then the loaded value is masked with that constant 198 /// before doing the comparison. This handles cases like "A[i]&4 == 0". 199 Instruction *InstCombiner:: 200 FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV, 201 CmpInst &ICI, ConstantInt *AndCst) { 202 // We need TD information to know the pointer size unless this is inbounds. 203 if (!GEP->isInBounds() && TD == 0) return 0; 204 205 ConstantArray *Init = dyn_cast<ConstantArray>(GV->getInitializer()); 206 if (Init == 0 || Init->getNumOperands() > 1024) return 0; 207 208 // There are many forms of this optimization we can handle, for now, just do 209 // the simple index into a single-dimensional array. 210 // 211 // Require: GEP GV, 0, i {{, constant indices}} 212 if (GEP->getNumOperands() < 3 || 213 !isa<ConstantInt>(GEP->getOperand(1)) || 214 !cast<ConstantInt>(GEP->getOperand(1))->isZero() || 215 isa<Constant>(GEP->getOperand(2))) 216 return 0; 217 218 // Check that indices after the variable are constants and in-range for the 219 // type they index. Collect the indices. This is typically for arrays of 220 // structs. 221 SmallVector<unsigned, 4> LaterIndices; 222 223 Type *EltTy = cast<ArrayType>(Init->getType())->getElementType(); 224 for (unsigned i = 3, e = GEP->getNumOperands(); i != e; ++i) { 225 ConstantInt *Idx = dyn_cast<ConstantInt>(GEP->getOperand(i)); 226 if (Idx == 0) return 0; // Variable index. 227 228 uint64_t IdxVal = Idx->getZExtValue(); 229 if ((unsigned)IdxVal != IdxVal) return 0; // Too large array index. 230 231 if (StructType *STy = dyn_cast<StructType>(EltTy)) 232 EltTy = STy->getElementType(IdxVal); 233 else if (ArrayType *ATy = dyn_cast<ArrayType>(EltTy)) { 234 if (IdxVal >= ATy->getNumElements()) return 0; 235 EltTy = ATy->getElementType(); 236 } else { 237 return 0; // Unknown type. 238 } 239 240 LaterIndices.push_back(IdxVal); 241 } 242 243 enum { Overdefined = -3, Undefined = -2 }; 244 245 // Variables for our state machines. 246 247 // FirstTrueElement/SecondTrueElement - Used to emit a comparison of the form 248 // "i == 47 | i == 87", where 47 is the first index the condition is true for, 249 // and 87 is the second (and last) index. FirstTrueElement is -2 when 250 // undefined, otherwise set to the first true element. SecondTrueElement is 251 // -2 when undefined, -3 when overdefined and >= 0 when that index is true. 252 int FirstTrueElement = Undefined, SecondTrueElement = Undefined; 253 254 // FirstFalseElement/SecondFalseElement - Used to emit a comparison of the 255 // form "i != 47 & i != 87". Same state transitions as for true elements. 256 int FirstFalseElement = Undefined, SecondFalseElement = Undefined; 257 258 /// TrueRangeEnd/FalseRangeEnd - In conjunction with First*Element, these 259 /// define a state machine that triggers for ranges of values that the index 260 /// is true or false for. This triggers on things like "abbbbc"[i] == 'b'. 261 /// This is -2 when undefined, -3 when overdefined, and otherwise the last 262 /// index in the range (inclusive). We use -2 for undefined here because we 263 /// use relative comparisons and don't want 0-1 to match -1. 264 int TrueRangeEnd = Undefined, FalseRangeEnd = Undefined; 265 266 // MagicBitvector - This is a magic bitvector where we set a bit if the 267 // comparison is true for element 'i'. If there are 64 elements or less in 268 // the array, this will fully represent all the comparison results. 269 uint64_t MagicBitvector = 0; 270 271 272 // Scan the array and see if one of our patterns matches. 273 Constant *CompareRHS = cast<Constant>(ICI.getOperand(1)); 274 for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) { 275 Constant *Elt = Init->getOperand(i); 276 277 // If this is indexing an array of structures, get the structure element. 278 if (!LaterIndices.empty()) 279 Elt = ConstantExpr::getExtractValue(Elt, LaterIndices); 280 281 // If the element is masked, handle it. 282 if (AndCst) Elt = ConstantExpr::getAnd(Elt, AndCst); 283 284 // Find out if the comparison would be true or false for the i'th element. 285 Constant *C = ConstantFoldCompareInstOperands(ICI.getPredicate(), Elt, 286 CompareRHS, TD); 287 // If the result is undef for this element, ignore it. 288 if (isa<UndefValue>(C)) { 289 // Extend range state machines to cover this element in case there is an 290 // undef in the middle of the range. 291 if (TrueRangeEnd == (int)i-1) 292 TrueRangeEnd = i; 293 if (FalseRangeEnd == (int)i-1) 294 FalseRangeEnd = i; 295 continue; 296 } 297 298 // If we can't compute the result for any of the elements, we have to give 299 // up evaluating the entire conditional. 300 if (!isa<ConstantInt>(C)) return 0; 301 302 // Otherwise, we know if the comparison is true or false for this element, 303 // update our state machines. 304 bool IsTrueForElt = !cast<ConstantInt>(C)->isZero(); 305 306 // State machine for single/double/range index comparison. 307 if (IsTrueForElt) { 308 // Update the TrueElement state machine. 309 if (FirstTrueElement == Undefined) 310 FirstTrueElement = TrueRangeEnd = i; // First true element. 311 else { 312 // Update double-compare state machine. 313 if (SecondTrueElement == Undefined) 314 SecondTrueElement = i; 315 else 316 SecondTrueElement = Overdefined; 317 318 // Update range state machine. 319 if (TrueRangeEnd == (int)i-1) 320 TrueRangeEnd = i; 321 else 322 TrueRangeEnd = Overdefined; 323 } 324 } else { 325 // Update the FalseElement state machine. 326 if (FirstFalseElement == Undefined) 327 FirstFalseElement = FalseRangeEnd = i; // First false element. 328 else { 329 // Update double-compare state machine. 330 if (SecondFalseElement == Undefined) 331 SecondFalseElement = i; 332 else 333 SecondFalseElement = Overdefined; 334 335 // Update range state machine. 336 if (FalseRangeEnd == (int)i-1) 337 FalseRangeEnd = i; 338 else 339 FalseRangeEnd = Overdefined; 340 } 341 } 342 343 344 // If this element is in range, update our magic bitvector. 345 if (i < 64 && IsTrueForElt) 346 MagicBitvector |= 1ULL << i; 347 348 // If all of our states become overdefined, bail out early. Since the 349 // predicate is expensive, only check it every 8 elements. This is only 350 // really useful for really huge arrays. 351 if ((i & 8) == 0 && i >= 64 && SecondTrueElement == Overdefined && 352 SecondFalseElement == Overdefined && TrueRangeEnd == Overdefined && 353 FalseRangeEnd == Overdefined) 354 return 0; 355 } 356 357 // Now that we've scanned the entire array, emit our new comparison(s). We 358 // order the state machines in complexity of the generated code. 359 Value *Idx = GEP->getOperand(2); 360 361 // If the index is larger than the pointer size of the target, truncate the 362 // index down like the GEP would do implicitly. We don't have to do this for 363 // an inbounds GEP because the index can't be out of range. 364 if (!GEP->isInBounds() && 365 Idx->getType()->getPrimitiveSizeInBits() > TD->getPointerSizeInBits()) 366 Idx = Builder->CreateTrunc(Idx, TD->getIntPtrType(Idx->getContext())); 367 368 // If the comparison is only true for one or two elements, emit direct 369 // comparisons. 370 if (SecondTrueElement != Overdefined) { 371 // None true -> false. 372 if (FirstTrueElement == Undefined) 373 return ReplaceInstUsesWith(ICI, ConstantInt::getFalse(GEP->getContext())); 374 375 Value *FirstTrueIdx = ConstantInt::get(Idx->getType(), FirstTrueElement); 376 377 // True for one element -> 'i == 47'. 378 if (SecondTrueElement == Undefined) 379 return new ICmpInst(ICmpInst::ICMP_EQ, Idx, FirstTrueIdx); 380 381 // True for two elements -> 'i == 47 | i == 72'. 382 Value *C1 = Builder->CreateICmpEQ(Idx, FirstTrueIdx); 383 Value *SecondTrueIdx = ConstantInt::get(Idx->getType(), SecondTrueElement); 384 Value *C2 = Builder->CreateICmpEQ(Idx, SecondTrueIdx); 385 return BinaryOperator::CreateOr(C1, C2); 386 } 387 388 // If the comparison is only false for one or two elements, emit direct 389 // comparisons. 390 if (SecondFalseElement != Overdefined) { 391 // None false -> true. 392 if (FirstFalseElement == Undefined) 393 return ReplaceInstUsesWith(ICI, ConstantInt::getTrue(GEP->getContext())); 394 395 Value *FirstFalseIdx = ConstantInt::get(Idx->getType(), FirstFalseElement); 396 397 // False for one element -> 'i != 47'. 398 if (SecondFalseElement == Undefined) 399 return new ICmpInst(ICmpInst::ICMP_NE, Idx, FirstFalseIdx); 400 401 // False for two elements -> 'i != 47 & i != 72'. 402 Value *C1 = Builder->CreateICmpNE(Idx, FirstFalseIdx); 403 Value *SecondFalseIdx = ConstantInt::get(Idx->getType(),SecondFalseElement); 404 Value *C2 = Builder->CreateICmpNE(Idx, SecondFalseIdx); 405 return BinaryOperator::CreateAnd(C1, C2); 406 } 407 408 // If the comparison can be replaced with a range comparison for the elements 409 // where it is true, emit the range check. 410 if (TrueRangeEnd != Overdefined) { 411 assert(TrueRangeEnd != FirstTrueElement && "Should emit single compare"); 412 413 // Generate (i-FirstTrue) <u (TrueRangeEnd-FirstTrue+1). 414 if (FirstTrueElement) { 415 Value *Offs = ConstantInt::get(Idx->getType(), -FirstTrueElement); 416 Idx = Builder->CreateAdd(Idx, Offs); 417 } 418 419 Value *End = ConstantInt::get(Idx->getType(), 420 TrueRangeEnd-FirstTrueElement+1); 421 return new ICmpInst(ICmpInst::ICMP_ULT, Idx, End); 422 } 423 424 // False range check. 425 if (FalseRangeEnd != Overdefined) { 426 assert(FalseRangeEnd != FirstFalseElement && "Should emit single compare"); 427 // Generate (i-FirstFalse) >u (FalseRangeEnd-FirstFalse). 428 if (FirstFalseElement) { 429 Value *Offs = ConstantInt::get(Idx->getType(), -FirstFalseElement); 430 Idx = Builder->CreateAdd(Idx, Offs); 431 } 432 433 Value *End = ConstantInt::get(Idx->getType(), 434 FalseRangeEnd-FirstFalseElement); 435 return new ICmpInst(ICmpInst::ICMP_UGT, Idx, End); 436 } 437 438 439 // If a 32-bit or 64-bit magic bitvector captures the entire comparison state 440 // of this load, replace it with computation that does: 441 // ((magic_cst >> i) & 1) != 0 442 if (Init->getNumOperands() <= 32 || 443 (TD && Init->getNumOperands() <= 64 && TD->isLegalInteger(64))) { 444 Type *Ty; 445 if (Init->getNumOperands() <= 32) 446 Ty = Type::getInt32Ty(Init->getContext()); 447 else 448 Ty = Type::getInt64Ty(Init->getContext()); 449 Value *V = Builder->CreateIntCast(Idx, Ty, false); 450 V = Builder->CreateLShr(ConstantInt::get(Ty, MagicBitvector), V); 451 V = Builder->CreateAnd(ConstantInt::get(Ty, 1), V); 452 return new ICmpInst(ICmpInst::ICMP_NE, V, ConstantInt::get(Ty, 0)); 453 } 454 455 return 0; 456 } 457 458 459 /// EvaluateGEPOffsetExpression - Return a value that can be used to compare 460 /// the *offset* implied by a GEP to zero. For example, if we have &A[i], we 461 /// want to return 'i' for "icmp ne i, 0". Note that, in general, indices can 462 /// be complex, and scales are involved. The above expression would also be 463 /// legal to codegen as "icmp ne (i*4), 0" (assuming A is a pointer to i32). 464 /// This later form is less amenable to optimization though, and we are allowed 465 /// to generate the first by knowing that pointer arithmetic doesn't overflow. 466 /// 467 /// If we can't emit an optimized form for this expression, this returns null. 468 /// 469 static Value *EvaluateGEPOffsetExpression(User *GEP, InstCombiner &IC) { 470 TargetData &TD = *IC.getTargetData(); 471 gep_type_iterator GTI = gep_type_begin(GEP); 472 473 // Check to see if this gep only has a single variable index. If so, and if 474 // any constant indices are a multiple of its scale, then we can compute this 475 // in terms of the scale of the variable index. For example, if the GEP 476 // implies an offset of "12 + i*4", then we can codegen this as "3 + i", 477 // because the expression will cross zero at the same point. 478 unsigned i, e = GEP->getNumOperands(); 479 int64_t Offset = 0; 480 for (i = 1; i != e; ++i, ++GTI) { 481 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) { 482 // Compute the aggregate offset of constant indices. 483 if (CI->isZero()) continue; 484 485 // Handle a struct index, which adds its field offset to the pointer. 486 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 487 Offset += TD.getStructLayout(STy)->getElementOffset(CI->getZExtValue()); 488 } else { 489 uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType()); 490 Offset += Size*CI->getSExtValue(); 491 } 492 } else { 493 // Found our variable index. 494 break; 495 } 496 } 497 498 // If there are no variable indices, we must have a constant offset, just 499 // evaluate it the general way. 500 if (i == e) return 0; 501 502 Value *VariableIdx = GEP->getOperand(i); 503 // Determine the scale factor of the variable element. For example, this is 504 // 4 if the variable index is into an array of i32. 505 uint64_t VariableScale = TD.getTypeAllocSize(GTI.getIndexedType()); 506 507 // Verify that there are no other variable indices. If so, emit the hard way. 508 for (++i, ++GTI; i != e; ++i, ++GTI) { 509 ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i)); 510 if (!CI) return 0; 511 512 // Compute the aggregate offset of constant indices. 513 if (CI->isZero()) continue; 514 515 // Handle a struct index, which adds its field offset to the pointer. 516 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 517 Offset += TD.getStructLayout(STy)->getElementOffset(CI->getZExtValue()); 518 } else { 519 uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType()); 520 Offset += Size*CI->getSExtValue(); 521 } 522 } 523 524 // Okay, we know we have a single variable index, which must be a 525 // pointer/array/vector index. If there is no offset, life is simple, return 526 // the index. 527 unsigned IntPtrWidth = TD.getPointerSizeInBits(); 528 if (Offset == 0) { 529 // Cast to intptrty in case a truncation occurs. If an extension is needed, 530 // we don't need to bother extending: the extension won't affect where the 531 // computation crosses zero. 532 if (VariableIdx->getType()->getPrimitiveSizeInBits() > IntPtrWidth) { 533 Type *IntPtrTy = TD.getIntPtrType(VariableIdx->getContext()); 534 VariableIdx = IC.Builder->CreateTrunc(VariableIdx, IntPtrTy); 535 } 536 return VariableIdx; 537 } 538 539 // Otherwise, there is an index. The computation we will do will be modulo 540 // the pointer size, so get it. 541 uint64_t PtrSizeMask = ~0ULL >> (64-IntPtrWidth); 542 543 Offset &= PtrSizeMask; 544 VariableScale &= PtrSizeMask; 545 546 // To do this transformation, any constant index must be a multiple of the 547 // variable scale factor. For example, we can evaluate "12 + 4*i" as "3 + i", 548 // but we can't evaluate "10 + 3*i" in terms of i. Check that the offset is a 549 // multiple of the variable scale. 550 int64_t NewOffs = Offset / (int64_t)VariableScale; 551 if (Offset != NewOffs*(int64_t)VariableScale) 552 return 0; 553 554 // Okay, we can do this evaluation. Start by converting the index to intptr. 555 Type *IntPtrTy = TD.getIntPtrType(VariableIdx->getContext()); 556 if (VariableIdx->getType() != IntPtrTy) 557 VariableIdx = IC.Builder->CreateIntCast(VariableIdx, IntPtrTy, 558 true /*Signed*/); 559 Constant *OffsetVal = ConstantInt::get(IntPtrTy, NewOffs); 560 return IC.Builder->CreateAdd(VariableIdx, OffsetVal, "offset"); 561 } 562 563 /// FoldGEPICmp - Fold comparisons between a GEP instruction and something 564 /// else. At this point we know that the GEP is on the LHS of the comparison. 565 Instruction *InstCombiner::FoldGEPICmp(GEPOperator *GEPLHS, Value *RHS, 566 ICmpInst::Predicate Cond, 567 Instruction &I) { 568 // Look through bitcasts. 569 if (BitCastInst *BCI = dyn_cast<BitCastInst>(RHS)) 570 RHS = BCI->getOperand(0); 571 572 Value *PtrBase = GEPLHS->getOperand(0); 573 if (TD && PtrBase == RHS && GEPLHS->isInBounds()) { 574 // ((gep Ptr, OFFSET) cmp Ptr) ---> (OFFSET cmp 0). 575 // This transformation (ignoring the base and scales) is valid because we 576 // know pointers can't overflow since the gep is inbounds. See if we can 577 // output an optimized form. 578 Value *Offset = EvaluateGEPOffsetExpression(GEPLHS, *this); 579 580 // If not, synthesize the offset the hard way. 581 if (Offset == 0) 582 Offset = EmitGEPOffset(GEPLHS); 583 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), Offset, 584 Constant::getNullValue(Offset->getType())); 585 } else if (GEPOperator *GEPRHS = dyn_cast<GEPOperator>(RHS)) { 586 // If the base pointers are different, but the indices are the same, just 587 // compare the base pointer. 588 if (PtrBase != GEPRHS->getOperand(0)) { 589 bool IndicesTheSame = GEPLHS->getNumOperands()==GEPRHS->getNumOperands(); 590 IndicesTheSame &= GEPLHS->getOperand(0)->getType() == 591 GEPRHS->getOperand(0)->getType(); 592 if (IndicesTheSame) 593 for (unsigned i = 1, e = GEPLHS->getNumOperands(); i != e; ++i) 594 if (GEPLHS->getOperand(i) != GEPRHS->getOperand(i)) { 595 IndicesTheSame = false; 596 break; 597 } 598 599 // If all indices are the same, just compare the base pointers. 600 if (IndicesTheSame) 601 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), 602 GEPLHS->getOperand(0), GEPRHS->getOperand(0)); 603 604 // Otherwise, the base pointers are different and the indices are 605 // different, bail out. 606 return 0; 607 } 608 609 // If one of the GEPs has all zero indices, recurse. 610 bool AllZeros = true; 611 for (unsigned i = 1, e = GEPLHS->getNumOperands(); i != e; ++i) 612 if (!isa<Constant>(GEPLHS->getOperand(i)) || 613 !cast<Constant>(GEPLHS->getOperand(i))->isNullValue()) { 614 AllZeros = false; 615 break; 616 } 617 if (AllZeros) 618 return FoldGEPICmp(GEPRHS, GEPLHS->getOperand(0), 619 ICmpInst::getSwappedPredicate(Cond), I); 620 621 // If the other GEP has all zero indices, recurse. 622 AllZeros = true; 623 for (unsigned i = 1, e = GEPRHS->getNumOperands(); i != e; ++i) 624 if (!isa<Constant>(GEPRHS->getOperand(i)) || 625 !cast<Constant>(GEPRHS->getOperand(i))->isNullValue()) { 626 AllZeros = false; 627 break; 628 } 629 if (AllZeros) 630 return FoldGEPICmp(GEPLHS, GEPRHS->getOperand(0), Cond, I); 631 632 bool GEPsInBounds = GEPLHS->isInBounds() && GEPRHS->isInBounds(); 633 if (GEPLHS->getNumOperands() == GEPRHS->getNumOperands()) { 634 // If the GEPs only differ by one index, compare it. 635 unsigned NumDifferences = 0; // Keep track of # differences. 636 unsigned DiffOperand = 0; // The operand that differs. 637 for (unsigned i = 1, e = GEPRHS->getNumOperands(); i != e; ++i) 638 if (GEPLHS->getOperand(i) != GEPRHS->getOperand(i)) { 639 if (GEPLHS->getOperand(i)->getType()->getPrimitiveSizeInBits() != 640 GEPRHS->getOperand(i)->getType()->getPrimitiveSizeInBits()) { 641 // Irreconcilable differences. 642 NumDifferences = 2; 643 break; 644 } else { 645 if (NumDifferences++) break; 646 DiffOperand = i; 647 } 648 } 649 650 if (NumDifferences == 0) // SAME GEP? 651 return ReplaceInstUsesWith(I, // No comparison is needed here. 652 ConstantInt::get(Type::getInt1Ty(I.getContext()), 653 ICmpInst::isTrueWhenEqual(Cond))); 654 655 else if (NumDifferences == 1 && GEPsInBounds) { 656 Value *LHSV = GEPLHS->getOperand(DiffOperand); 657 Value *RHSV = GEPRHS->getOperand(DiffOperand); 658 // Make sure we do a signed comparison here. 659 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), LHSV, RHSV); 660 } 661 } 662 663 // Only lower this if the icmp is the only user of the GEP or if we expect 664 // the result to fold to a constant! 665 if (TD && 666 GEPsInBounds && 667 (isa<ConstantExpr>(GEPLHS) || GEPLHS->hasOneUse()) && 668 (isa<ConstantExpr>(GEPRHS) || GEPRHS->hasOneUse())) { 669 // ((gep Ptr, OFFSET1) cmp (gep Ptr, OFFSET2) ---> (OFFSET1 cmp OFFSET2) 670 Value *L = EmitGEPOffset(GEPLHS); 671 Value *R = EmitGEPOffset(GEPRHS); 672 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), L, R); 673 } 674 } 675 return 0; 676 } 677 678 /// FoldICmpAddOpCst - Fold "icmp pred (X+CI), X". 679 Instruction *InstCombiner::FoldICmpAddOpCst(ICmpInst &ICI, 680 Value *X, ConstantInt *CI, 681 ICmpInst::Predicate Pred, 682 Value *TheAdd) { 683 // If we have X+0, exit early (simplifying logic below) and let it get folded 684 // elsewhere. icmp X+0, X -> icmp X, X 685 if (CI->isZero()) { 686 bool isTrue = ICmpInst::isTrueWhenEqual(Pred); 687 return ReplaceInstUsesWith(ICI, ConstantInt::get(ICI.getType(), isTrue)); 688 } 689 690 // (X+4) == X -> false. 691 if (Pred == ICmpInst::ICMP_EQ) 692 return ReplaceInstUsesWith(ICI, ConstantInt::getFalse(X->getContext())); 693 694 // (X+4) != X -> true. 695 if (Pred == ICmpInst::ICMP_NE) 696 return ReplaceInstUsesWith(ICI, ConstantInt::getTrue(X->getContext())); 697 698 // From this point on, we know that (X+C <= X) --> (X+C < X) because C != 0, 699 // so the values can never be equal. Similarly for all other "or equals" 700 // operators. 701 702 // (X+1) <u X --> X >u (MAXUINT-1) --> X == 255 703 // (X+2) <u X --> X >u (MAXUINT-2) --> X > 253 704 // (X+MAXUINT) <u X --> X >u (MAXUINT-MAXUINT) --> X != 0 705 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) { 706 Value *R = 707 ConstantExpr::getSub(ConstantInt::getAllOnesValue(CI->getType()), CI); 708 return new ICmpInst(ICmpInst::ICMP_UGT, X, R); 709 } 710 711 // (X+1) >u X --> X <u (0-1) --> X != 255 712 // (X+2) >u X --> X <u (0-2) --> X <u 254 713 // (X+MAXUINT) >u X --> X <u (0-MAXUINT) --> X <u 1 --> X == 0 714 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) 715 return new ICmpInst(ICmpInst::ICMP_ULT, X, ConstantExpr::getNeg(CI)); 716 717 unsigned BitWidth = CI->getType()->getPrimitiveSizeInBits(); 718 ConstantInt *SMax = ConstantInt::get(X->getContext(), 719 APInt::getSignedMaxValue(BitWidth)); 720 721 // (X+ 1) <s X --> X >s (MAXSINT-1) --> X == 127 722 // (X+ 2) <s X --> X >s (MAXSINT-2) --> X >s 125 723 // (X+MAXSINT) <s X --> X >s (MAXSINT-MAXSINT) --> X >s 0 724 // (X+MINSINT) <s X --> X >s (MAXSINT-MINSINT) --> X >s -1 725 // (X+ -2) <s X --> X >s (MAXSINT- -2) --> X >s 126 726 // (X+ -1) <s X --> X >s (MAXSINT- -1) --> X != 127 727 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) 728 return new ICmpInst(ICmpInst::ICMP_SGT, X, ConstantExpr::getSub(SMax, CI)); 729 730 // (X+ 1) >s X --> X <s (MAXSINT-(1-1)) --> X != 127 731 // (X+ 2) >s X --> X <s (MAXSINT-(2-1)) --> X <s 126 732 // (X+MAXSINT) >s X --> X <s (MAXSINT-(MAXSINT-1)) --> X <s 1 733 // (X+MINSINT) >s X --> X <s (MAXSINT-(MINSINT-1)) --> X <s -2 734 // (X+ -2) >s X --> X <s (MAXSINT-(-2-1)) --> X <s -126 735 // (X+ -1) >s X --> X <s (MAXSINT-(-1-1)) --> X == -128 736 737 assert(Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE); 738 Constant *C = ConstantInt::get(X->getContext(), CI->getValue()-1); 739 return new ICmpInst(ICmpInst::ICMP_SLT, X, ConstantExpr::getSub(SMax, C)); 740 } 741 742 /// FoldICmpDivCst - Fold "icmp pred, ([su]div X, DivRHS), CmpRHS" where DivRHS 743 /// and CmpRHS are both known to be integer constants. 744 Instruction *InstCombiner::FoldICmpDivCst(ICmpInst &ICI, BinaryOperator *DivI, 745 ConstantInt *DivRHS) { 746 ConstantInt *CmpRHS = cast<ConstantInt>(ICI.getOperand(1)); 747 const APInt &CmpRHSV = CmpRHS->getValue(); 748 749 // FIXME: If the operand types don't match the type of the divide 750 // then don't attempt this transform. The code below doesn't have the 751 // logic to deal with a signed divide and an unsigned compare (and 752 // vice versa). This is because (x /s C1) <s C2 produces different 753 // results than (x /s C1) <u C2 or (x /u C1) <s C2 or even 754 // (x /u C1) <u C2. Simply casting the operands and result won't 755 // work. :( The if statement below tests that condition and bails 756 // if it finds it. 757 bool DivIsSigned = DivI->getOpcode() == Instruction::SDiv; 758 if (!ICI.isEquality() && DivIsSigned != ICI.isSigned()) 759 return 0; 760 if (DivRHS->isZero()) 761 return 0; // The ProdOV computation fails on divide by zero. 762 if (DivIsSigned && DivRHS->isAllOnesValue()) 763 return 0; // The overflow computation also screws up here 764 if (DivRHS->isOne()) { 765 // This eliminates some funny cases with INT_MIN. 766 ICI.setOperand(0, DivI->getOperand(0)); // X/1 == X. 767 return &ICI; 768 } 769 770 // Compute Prod = CI * DivRHS. We are essentially solving an equation 771 // of form X/C1=C2. We solve for X by multiplying C1 (DivRHS) and 772 // C2 (CI). By solving for X we can turn this into a range check 773 // instead of computing a divide. 774 Constant *Prod = ConstantExpr::getMul(CmpRHS, DivRHS); 775 776 // Determine if the product overflows by seeing if the product is 777 // not equal to the divide. Make sure we do the same kind of divide 778 // as in the LHS instruction that we're folding. 779 bool ProdOV = (DivIsSigned ? ConstantExpr::getSDiv(Prod, DivRHS) : 780 ConstantExpr::getUDiv(Prod, DivRHS)) != CmpRHS; 781 782 // Get the ICmp opcode 783 ICmpInst::Predicate Pred = ICI.getPredicate(); 784 785 /// If the division is known to be exact, then there is no remainder from the 786 /// divide, so the covered range size is unit, otherwise it is the divisor. 787 ConstantInt *RangeSize = DivI->isExact() ? getOne(Prod) : DivRHS; 788 789 // Figure out the interval that is being checked. For example, a comparison 790 // like "X /u 5 == 0" is really checking that X is in the interval [0, 5). 791 // Compute this interval based on the constants involved and the signedness of 792 // the compare/divide. This computes a half-open interval, keeping track of 793 // whether either value in the interval overflows. After analysis each 794 // overflow variable is set to 0 if it's corresponding bound variable is valid 795 // -1 if overflowed off the bottom end, or +1 if overflowed off the top end. 796 int LoOverflow = 0, HiOverflow = 0; 797 Constant *LoBound = 0, *HiBound = 0; 798 799 if (!DivIsSigned) { // udiv 800 // e.g. X/5 op 3 --> [15, 20) 801 LoBound = Prod; 802 HiOverflow = LoOverflow = ProdOV; 803 if (!HiOverflow) { 804 // If this is not an exact divide, then many values in the range collapse 805 // to the same result value. 806 HiOverflow = AddWithOverflow(HiBound, LoBound, RangeSize, false); 807 } 808 809 } else if (DivRHS->getValue().isStrictlyPositive()) { // Divisor is > 0. 810 if (CmpRHSV == 0) { // (X / pos) op 0 811 // Can't overflow. e.g. X/2 op 0 --> [-1, 2) 812 LoBound = ConstantExpr::getNeg(SubOne(RangeSize)); 813 HiBound = RangeSize; 814 } else if (CmpRHSV.isStrictlyPositive()) { // (X / pos) op pos 815 LoBound = Prod; // e.g. X/5 op 3 --> [15, 20) 816 HiOverflow = LoOverflow = ProdOV; 817 if (!HiOverflow) 818 HiOverflow = AddWithOverflow(HiBound, Prod, RangeSize, true); 819 } else { // (X / pos) op neg 820 // e.g. X/5 op -3 --> [-15-4, -15+1) --> [-19, -14) 821 HiBound = AddOne(Prod); 822 LoOverflow = HiOverflow = ProdOV ? -1 : 0; 823 if (!LoOverflow) { 824 ConstantInt *DivNeg =cast<ConstantInt>(ConstantExpr::getNeg(RangeSize)); 825 LoOverflow = AddWithOverflow(LoBound, HiBound, DivNeg, true) ? -1 : 0; 826 } 827 } 828 } else if (DivRHS->isNegative()) { // Divisor is < 0. 829 if (DivI->isExact()) 830 RangeSize = cast<ConstantInt>(ConstantExpr::getNeg(RangeSize)); 831 if (CmpRHSV == 0) { // (X / neg) op 0 832 // e.g. X/-5 op 0 --> [-4, 5) 833 LoBound = AddOne(RangeSize); 834 HiBound = cast<ConstantInt>(ConstantExpr::getNeg(RangeSize)); 835 if (HiBound == DivRHS) { // -INTMIN = INTMIN 836 HiOverflow = 1; // [INTMIN+1, overflow) 837 HiBound = 0; // e.g. X/INTMIN = 0 --> X > INTMIN 838 } 839 } else if (CmpRHSV.isStrictlyPositive()) { // (X / neg) op pos 840 // e.g. X/-5 op 3 --> [-19, -14) 841 HiBound = AddOne(Prod); 842 HiOverflow = LoOverflow = ProdOV ? -1 : 0; 843 if (!LoOverflow) 844 LoOverflow = AddWithOverflow(LoBound, HiBound, RangeSize, true) ? -1:0; 845 } else { // (X / neg) op neg 846 LoBound = Prod; // e.g. X/-5 op -3 --> [15, 20) 847 LoOverflow = HiOverflow = ProdOV; 848 if (!HiOverflow) 849 HiOverflow = SubWithOverflow(HiBound, Prod, RangeSize, true); 850 } 851 852 // Dividing by a negative swaps the condition. LT <-> GT 853 Pred = ICmpInst::getSwappedPredicate(Pred); 854 } 855 856 Value *X = DivI->getOperand(0); 857 switch (Pred) { 858 default: llvm_unreachable("Unhandled icmp opcode!"); 859 case ICmpInst::ICMP_EQ: 860 if (LoOverflow && HiOverflow) 861 return ReplaceInstUsesWith(ICI, ConstantInt::getFalse(ICI.getContext())); 862 if (HiOverflow) 863 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SGE : 864 ICmpInst::ICMP_UGE, X, LoBound); 865 if (LoOverflow) 866 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT : 867 ICmpInst::ICMP_ULT, X, HiBound); 868 return ReplaceInstUsesWith(ICI, InsertRangeTest(X, LoBound, HiBound, 869 DivIsSigned, true)); 870 case ICmpInst::ICMP_NE: 871 if (LoOverflow && HiOverflow) 872 return ReplaceInstUsesWith(ICI, ConstantInt::getTrue(ICI.getContext())); 873 if (HiOverflow) 874 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT : 875 ICmpInst::ICMP_ULT, X, LoBound); 876 if (LoOverflow) 877 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SGE : 878 ICmpInst::ICMP_UGE, X, HiBound); 879 return ReplaceInstUsesWith(ICI, InsertRangeTest(X, LoBound, HiBound, 880 DivIsSigned, false)); 881 case ICmpInst::ICMP_ULT: 882 case ICmpInst::ICMP_SLT: 883 if (LoOverflow == +1) // Low bound is greater than input range. 884 return ReplaceInstUsesWith(ICI, ConstantInt::getTrue(ICI.getContext())); 885 if (LoOverflow == -1) // Low bound is less than input range. 886 return ReplaceInstUsesWith(ICI, ConstantInt::getFalse(ICI.getContext())); 887 return new ICmpInst(Pred, X, LoBound); 888 case ICmpInst::ICMP_UGT: 889 case ICmpInst::ICMP_SGT: 890 if (HiOverflow == +1) // High bound greater than input range. 891 return ReplaceInstUsesWith(ICI, ConstantInt::getFalse(ICI.getContext())); 892 if (HiOverflow == -1) // High bound less than input range. 893 return ReplaceInstUsesWith(ICI, ConstantInt::getTrue(ICI.getContext())); 894 if (Pred == ICmpInst::ICMP_UGT) 895 return new ICmpInst(ICmpInst::ICMP_UGE, X, HiBound); 896 return new ICmpInst(ICmpInst::ICMP_SGE, X, HiBound); 897 } 898 } 899 900 /// FoldICmpShrCst - Handle "icmp(([al]shr X, cst1), cst2)". 901 Instruction *InstCombiner::FoldICmpShrCst(ICmpInst &ICI, BinaryOperator *Shr, 902 ConstantInt *ShAmt) { 903 const APInt &CmpRHSV = cast<ConstantInt>(ICI.getOperand(1))->getValue(); 904 905 // Check that the shift amount is in range. If not, don't perform 906 // undefined shifts. When the shift is visited it will be 907 // simplified. 908 uint32_t TypeBits = CmpRHSV.getBitWidth(); 909 uint32_t ShAmtVal = (uint32_t)ShAmt->getLimitedValue(TypeBits); 910 if (ShAmtVal >= TypeBits || ShAmtVal == 0) 911 return 0; 912 913 if (!ICI.isEquality()) { 914 // If we have an unsigned comparison and an ashr, we can't simplify this. 915 // Similarly for signed comparisons with lshr. 916 if (ICI.isSigned() != (Shr->getOpcode() == Instruction::AShr)) 917 return 0; 918 919 // Otherwise, all lshr and most exact ashr's are equivalent to a udiv/sdiv 920 // by a power of 2. Since we already have logic to simplify these, 921 // transform to div and then simplify the resultant comparison. 922 if (Shr->getOpcode() == Instruction::AShr && 923 (!Shr->isExact() || ShAmtVal == TypeBits - 1)) 924 return 0; 925 926 // Revisit the shift (to delete it). 927 Worklist.Add(Shr); 928 929 Constant *DivCst = 930 ConstantInt::get(Shr->getType(), APInt::getOneBitSet(TypeBits, ShAmtVal)); 931 932 Value *Tmp = 933 Shr->getOpcode() == Instruction::AShr ? 934 Builder->CreateSDiv(Shr->getOperand(0), DivCst, "", Shr->isExact()) : 935 Builder->CreateUDiv(Shr->getOperand(0), DivCst, "", Shr->isExact()); 936 937 ICI.setOperand(0, Tmp); 938 939 // If the builder folded the binop, just return it. 940 BinaryOperator *TheDiv = dyn_cast<BinaryOperator>(Tmp); 941 if (TheDiv == 0) 942 return &ICI; 943 944 // Otherwise, fold this div/compare. 945 assert(TheDiv->getOpcode() == Instruction::SDiv || 946 TheDiv->getOpcode() == Instruction::UDiv); 947 948 Instruction *Res = FoldICmpDivCst(ICI, TheDiv, cast<ConstantInt>(DivCst)); 949 assert(Res && "This div/cst should have folded!"); 950 return Res; 951 } 952 953 954 // If we are comparing against bits always shifted out, the 955 // comparison cannot succeed. 956 APInt Comp = CmpRHSV << ShAmtVal; 957 ConstantInt *ShiftedCmpRHS = ConstantInt::get(ICI.getContext(), Comp); 958 if (Shr->getOpcode() == Instruction::LShr) 959 Comp = Comp.lshr(ShAmtVal); 960 else 961 Comp = Comp.ashr(ShAmtVal); 962 963 if (Comp != CmpRHSV) { // Comparing against a bit that we know is zero. 964 bool IsICMP_NE = ICI.getPredicate() == ICmpInst::ICMP_NE; 965 Constant *Cst = ConstantInt::get(Type::getInt1Ty(ICI.getContext()), 966 IsICMP_NE); 967 return ReplaceInstUsesWith(ICI, Cst); 968 } 969 970 // Otherwise, check to see if the bits shifted out are known to be zero. 971 // If so, we can compare against the unshifted value: 972 // (X & 4) >> 1 == 2 --> (X & 4) == 4. 973 if (Shr->hasOneUse() && Shr->isExact()) 974 return new ICmpInst(ICI.getPredicate(), Shr->getOperand(0), ShiftedCmpRHS); 975 976 if (Shr->hasOneUse()) { 977 // Otherwise strength reduce the shift into an and. 978 APInt Val(APInt::getHighBitsSet(TypeBits, TypeBits - ShAmtVal)); 979 Constant *Mask = ConstantInt::get(ICI.getContext(), Val); 980 981 Value *And = Builder->CreateAnd(Shr->getOperand(0), 982 Mask, Shr->getName()+".mask"); 983 return new ICmpInst(ICI.getPredicate(), And, ShiftedCmpRHS); 984 } 985 return 0; 986 } 987 988 989 /// visitICmpInstWithInstAndIntCst - Handle "icmp (instr, intcst)". 990 /// 991 Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI, 992 Instruction *LHSI, 993 ConstantInt *RHS) { 994 const APInt &RHSV = RHS->getValue(); 995 996 switch (LHSI->getOpcode()) { 997 case Instruction::Trunc: 998 if (ICI.isEquality() && LHSI->hasOneUse()) { 999 // Simplify icmp eq (trunc x to i8), 42 -> icmp eq x, 42|highbits if all 1000 // of the high bits truncated out of x are known. 1001 unsigned DstBits = LHSI->getType()->getPrimitiveSizeInBits(), 1002 SrcBits = LHSI->getOperand(0)->getType()->getPrimitiveSizeInBits(); 1003 APInt Mask(APInt::getHighBitsSet(SrcBits, SrcBits-DstBits)); 1004 APInt KnownZero(SrcBits, 0), KnownOne(SrcBits, 0); 1005 ComputeMaskedBits(LHSI->getOperand(0), Mask, KnownZero, KnownOne); 1006 1007 // If all the high bits are known, we can do this xform. 1008 if ((KnownZero|KnownOne).countLeadingOnes() >= SrcBits-DstBits) { 1009 // Pull in the high bits from known-ones set. 1010 APInt NewRHS = RHS->getValue().zext(SrcBits); 1011 NewRHS |= KnownOne; 1012 return new ICmpInst(ICI.getPredicate(), LHSI->getOperand(0), 1013 ConstantInt::get(ICI.getContext(), NewRHS)); 1014 } 1015 } 1016 break; 1017 1018 case Instruction::Xor: // (icmp pred (xor X, XorCST), CI) 1019 if (ConstantInt *XorCST = dyn_cast<ConstantInt>(LHSI->getOperand(1))) { 1020 // If this is a comparison that tests the signbit (X < 0) or (x > -1), 1021 // fold the xor. 1022 if ((ICI.getPredicate() == ICmpInst::ICMP_SLT && RHSV == 0) || 1023 (ICI.getPredicate() == ICmpInst::ICMP_SGT && RHSV.isAllOnesValue())) { 1024 Value *CompareVal = LHSI->getOperand(0); 1025 1026 // If the sign bit of the XorCST is not set, there is no change to 1027 // the operation, just stop using the Xor. 1028 if (!XorCST->isNegative()) { 1029 ICI.setOperand(0, CompareVal); 1030 Worklist.Add(LHSI); 1031 return &ICI; 1032 } 1033 1034 // Was the old condition true if the operand is positive? 1035 bool isTrueIfPositive = ICI.getPredicate() == ICmpInst::ICMP_SGT; 1036 1037 // If so, the new one isn't. 1038 isTrueIfPositive ^= true; 1039 1040 if (isTrueIfPositive) 1041 return new ICmpInst(ICmpInst::ICMP_SGT, CompareVal, 1042 SubOne(RHS)); 1043 else 1044 return new ICmpInst(ICmpInst::ICMP_SLT, CompareVal, 1045 AddOne(RHS)); 1046 } 1047 1048 if (LHSI->hasOneUse()) { 1049 // (icmp u/s (xor A SignBit), C) -> (icmp s/u A, (xor C SignBit)) 1050 if (!ICI.isEquality() && XorCST->getValue().isSignBit()) { 1051 const APInt &SignBit = XorCST->getValue(); 1052 ICmpInst::Predicate Pred = ICI.isSigned() 1053 ? ICI.getUnsignedPredicate() 1054 : ICI.getSignedPredicate(); 1055 return new ICmpInst(Pred, LHSI->getOperand(0), 1056 ConstantInt::get(ICI.getContext(), 1057 RHSV ^ SignBit)); 1058 } 1059 1060 // (icmp u/s (xor A ~SignBit), C) -> (icmp s/u (xor C ~SignBit), A) 1061 if (!ICI.isEquality() && XorCST->isMaxValue(true)) { 1062 const APInt &NotSignBit = XorCST->getValue(); 1063 ICmpInst::Predicate Pred = ICI.isSigned() 1064 ? ICI.getUnsignedPredicate() 1065 : ICI.getSignedPredicate(); 1066 Pred = ICI.getSwappedPredicate(Pred); 1067 return new ICmpInst(Pred, LHSI->getOperand(0), 1068 ConstantInt::get(ICI.getContext(), 1069 RHSV ^ NotSignBit)); 1070 } 1071 } 1072 } 1073 break; 1074 case Instruction::And: // (icmp pred (and X, AndCST), RHS) 1075 if (LHSI->hasOneUse() && isa<ConstantInt>(LHSI->getOperand(1)) && 1076 LHSI->getOperand(0)->hasOneUse()) { 1077 ConstantInt *AndCST = cast<ConstantInt>(LHSI->getOperand(1)); 1078 1079 // If the LHS is an AND of a truncating cast, we can widen the 1080 // and/compare to be the input width without changing the value 1081 // produced, eliminating a cast. 1082 if (TruncInst *Cast = dyn_cast<TruncInst>(LHSI->getOperand(0))) { 1083 // We can do this transformation if either the AND constant does not 1084 // have its sign bit set or if it is an equality comparison. 1085 // Extending a relational comparison when we're checking the sign 1086 // bit would not work. 1087 if (ICI.isEquality() || 1088 (!AndCST->isNegative() && RHSV.isNonNegative())) { 1089 Value *NewAnd = 1090 Builder->CreateAnd(Cast->getOperand(0), 1091 ConstantExpr::getZExt(AndCST, Cast->getSrcTy())); 1092 NewAnd->takeName(LHSI); 1093 return new ICmpInst(ICI.getPredicate(), NewAnd, 1094 ConstantExpr::getZExt(RHS, Cast->getSrcTy())); 1095 } 1096 } 1097 1098 // If the LHS is an AND of a zext, and we have an equality compare, we can 1099 // shrink the and/compare to the smaller type, eliminating the cast. 1100 if (ZExtInst *Cast = dyn_cast<ZExtInst>(LHSI->getOperand(0))) { 1101 IntegerType *Ty = cast<IntegerType>(Cast->getSrcTy()); 1102 // Make sure we don't compare the upper bits, SimplifyDemandedBits 1103 // should fold the icmp to true/false in that case. 1104 if (ICI.isEquality() && RHSV.getActiveBits() <= Ty->getBitWidth()) { 1105 Value *NewAnd = 1106 Builder->CreateAnd(Cast->getOperand(0), 1107 ConstantExpr::getTrunc(AndCST, Ty)); 1108 NewAnd->takeName(LHSI); 1109 return new ICmpInst(ICI.getPredicate(), NewAnd, 1110 ConstantExpr::getTrunc(RHS, Ty)); 1111 } 1112 } 1113 1114 // If this is: (X >> C1) & C2 != C3 (where any shift and any compare 1115 // could exist), turn it into (X & (C2 << C1)) != (C3 << C1). This 1116 // happens a LOT in code produced by the C front-end, for bitfield 1117 // access. 1118 BinaryOperator *Shift = dyn_cast<BinaryOperator>(LHSI->getOperand(0)); 1119 if (Shift && !Shift->isShift()) 1120 Shift = 0; 1121 1122 ConstantInt *ShAmt; 1123 ShAmt = Shift ? dyn_cast<ConstantInt>(Shift->getOperand(1)) : 0; 1124 Type *Ty = Shift ? Shift->getType() : 0; // Type of the shift. 1125 Type *AndTy = AndCST->getType(); // Type of the and. 1126 1127 // We can fold this as long as we can't shift unknown bits 1128 // into the mask. This can only happen with signed shift 1129 // rights, as they sign-extend. 1130 if (ShAmt) { 1131 bool CanFold = Shift->isLogicalShift(); 1132 if (!CanFold) { 1133 // To test for the bad case of the signed shr, see if any 1134 // of the bits shifted in could be tested after the mask. 1135 uint32_t TyBits = Ty->getPrimitiveSizeInBits(); 1136 int ShAmtVal = TyBits - ShAmt->getLimitedValue(TyBits); 1137 1138 uint32_t BitWidth = AndTy->getPrimitiveSizeInBits(); 1139 if ((APInt::getHighBitsSet(BitWidth, BitWidth-ShAmtVal) & 1140 AndCST->getValue()) == 0) 1141 CanFold = true; 1142 } 1143 1144 if (CanFold) { 1145 Constant *NewCst; 1146 if (Shift->getOpcode() == Instruction::Shl) 1147 NewCst = ConstantExpr::getLShr(RHS, ShAmt); 1148 else 1149 NewCst = ConstantExpr::getShl(RHS, ShAmt); 1150 1151 // Check to see if we are shifting out any of the bits being 1152 // compared. 1153 if (ConstantExpr::get(Shift->getOpcode(), 1154 NewCst, ShAmt) != RHS) { 1155 // If we shifted bits out, the fold is not going to work out. 1156 // As a special case, check to see if this means that the 1157 // result is always true or false now. 1158 if (ICI.getPredicate() == ICmpInst::ICMP_EQ) 1159 return ReplaceInstUsesWith(ICI, 1160 ConstantInt::getFalse(ICI.getContext())); 1161 if (ICI.getPredicate() == ICmpInst::ICMP_NE) 1162 return ReplaceInstUsesWith(ICI, 1163 ConstantInt::getTrue(ICI.getContext())); 1164 } else { 1165 ICI.setOperand(1, NewCst); 1166 Constant *NewAndCST; 1167 if (Shift->getOpcode() == Instruction::Shl) 1168 NewAndCST = ConstantExpr::getLShr(AndCST, ShAmt); 1169 else 1170 NewAndCST = ConstantExpr::getShl(AndCST, ShAmt); 1171 LHSI->setOperand(1, NewAndCST); 1172 LHSI->setOperand(0, Shift->getOperand(0)); 1173 Worklist.Add(Shift); // Shift is dead. 1174 return &ICI; 1175 } 1176 } 1177 } 1178 1179 // Turn ((X >> Y) & C) == 0 into (X & (C << Y)) == 0. The later is 1180 // preferable because it allows the C<<Y expression to be hoisted out 1181 // of a loop if Y is invariant and X is not. 1182 if (Shift && Shift->hasOneUse() && RHSV == 0 && 1183 ICI.isEquality() && !Shift->isArithmeticShift() && 1184 !isa<Constant>(Shift->getOperand(0))) { 1185 // Compute C << Y. 1186 Value *NS; 1187 if (Shift->getOpcode() == Instruction::LShr) { 1188 NS = Builder->CreateShl(AndCST, Shift->getOperand(1), "tmp"); 1189 } else { 1190 // Insert a logical shift. 1191 NS = Builder->CreateLShr(AndCST, Shift->getOperand(1), "tmp"); 1192 } 1193 1194 // Compute X & (C << Y). 1195 Value *NewAnd = 1196 Builder->CreateAnd(Shift->getOperand(0), NS, LHSI->getName()); 1197 1198 ICI.setOperand(0, NewAnd); 1199 return &ICI; 1200 } 1201 } 1202 1203 // Try to optimize things like "A[i]&42 == 0" to index computations. 1204 if (LoadInst *LI = dyn_cast<LoadInst>(LHSI->getOperand(0))) { 1205 if (GetElementPtrInst *GEP = 1206 dyn_cast<GetElementPtrInst>(LI->getOperand(0))) 1207 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0))) 1208 if (GV->isConstant() && GV->hasDefinitiveInitializer() && 1209 !LI->isVolatile() && isa<ConstantInt>(LHSI->getOperand(1))) { 1210 ConstantInt *C = cast<ConstantInt>(LHSI->getOperand(1)); 1211 if (Instruction *Res = FoldCmpLoadFromIndexedGlobal(GEP, GV,ICI, C)) 1212 return Res; 1213 } 1214 } 1215 break; 1216 1217 case Instruction::Or: { 1218 if (!ICI.isEquality() || !RHS->isNullValue() || !LHSI->hasOneUse()) 1219 break; 1220 Value *P, *Q; 1221 if (match(LHSI, m_Or(m_PtrToInt(m_Value(P)), m_PtrToInt(m_Value(Q))))) { 1222 // Simplify icmp eq (or (ptrtoint P), (ptrtoint Q)), 0 1223 // -> and (icmp eq P, null), (icmp eq Q, null). 1224 Value *ICIP = Builder->CreateICmp(ICI.getPredicate(), P, 1225 Constant::getNullValue(P->getType())); 1226 Value *ICIQ = Builder->CreateICmp(ICI.getPredicate(), Q, 1227 Constant::getNullValue(Q->getType())); 1228 Instruction *Op; 1229 if (ICI.getPredicate() == ICmpInst::ICMP_EQ) 1230 Op = BinaryOperator::CreateAnd(ICIP, ICIQ); 1231 else 1232 Op = BinaryOperator::CreateOr(ICIP, ICIQ); 1233 return Op; 1234 } 1235 break; 1236 } 1237 1238 case Instruction::Shl: { // (icmp pred (shl X, ShAmt), CI) 1239 ConstantInt *ShAmt = dyn_cast<ConstantInt>(LHSI->getOperand(1)); 1240 if (!ShAmt) break; 1241 1242 uint32_t TypeBits = RHSV.getBitWidth(); 1243 1244 // Check that the shift amount is in range. If not, don't perform 1245 // undefined shifts. When the shift is visited it will be 1246 // simplified. 1247 if (ShAmt->uge(TypeBits)) 1248 break; 1249 1250 if (ICI.isEquality()) { 1251 // If we are comparing against bits always shifted out, the 1252 // comparison cannot succeed. 1253 Constant *Comp = 1254 ConstantExpr::getShl(ConstantExpr::getLShr(RHS, ShAmt), 1255 ShAmt); 1256 if (Comp != RHS) {// Comparing against a bit that we know is zero. 1257 bool IsICMP_NE = ICI.getPredicate() == ICmpInst::ICMP_NE; 1258 Constant *Cst = 1259 ConstantInt::get(Type::getInt1Ty(ICI.getContext()), IsICMP_NE); 1260 return ReplaceInstUsesWith(ICI, Cst); 1261 } 1262 1263 // If the shift is NUW, then it is just shifting out zeros, no need for an 1264 // AND. 1265 if (cast<BinaryOperator>(LHSI)->hasNoUnsignedWrap()) 1266 return new ICmpInst(ICI.getPredicate(), LHSI->getOperand(0), 1267 ConstantExpr::getLShr(RHS, ShAmt)); 1268 1269 if (LHSI->hasOneUse()) { 1270 // Otherwise strength reduce the shift into an and. 1271 uint32_t ShAmtVal = (uint32_t)ShAmt->getLimitedValue(TypeBits); 1272 Constant *Mask = 1273 ConstantInt::get(ICI.getContext(), APInt::getLowBitsSet(TypeBits, 1274 TypeBits-ShAmtVal)); 1275 1276 Value *And = 1277 Builder->CreateAnd(LHSI->getOperand(0),Mask, LHSI->getName()+".mask"); 1278 return new ICmpInst(ICI.getPredicate(), And, 1279 ConstantExpr::getLShr(RHS, ShAmt)); 1280 } 1281 } 1282 1283 // Otherwise, if this is a comparison of the sign bit, simplify to and/test. 1284 bool TrueIfSigned = false; 1285 if (LHSI->hasOneUse() && 1286 isSignBitCheck(ICI.getPredicate(), RHS, TrueIfSigned)) { 1287 // (X << 31) <s 0 --> (X&1) != 0 1288 Constant *Mask = ConstantInt::get(LHSI->getOperand(0)->getType(), 1289 APInt::getOneBitSet(TypeBits, 1290 TypeBits-ShAmt->getZExtValue()-1)); 1291 Value *And = 1292 Builder->CreateAnd(LHSI->getOperand(0), Mask, LHSI->getName()+".mask"); 1293 return new ICmpInst(TrueIfSigned ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ, 1294 And, Constant::getNullValue(And->getType())); 1295 } 1296 break; 1297 } 1298 1299 case Instruction::LShr: // (icmp pred (shr X, ShAmt), CI) 1300 case Instruction::AShr: { 1301 // Handle equality comparisons of shift-by-constant. 1302 BinaryOperator *BO = cast<BinaryOperator>(LHSI); 1303 if (ConstantInt *ShAmt = dyn_cast<ConstantInt>(LHSI->getOperand(1))) { 1304 if (Instruction *Res = FoldICmpShrCst(ICI, BO, ShAmt)) 1305 return Res; 1306 } 1307 1308 // Handle exact shr's. 1309 if (ICI.isEquality() && BO->isExact() && BO->hasOneUse()) { 1310 if (RHSV.isMinValue()) 1311 return new ICmpInst(ICI.getPredicate(), BO->getOperand(0), RHS); 1312 } 1313 break; 1314 } 1315 1316 case Instruction::SDiv: 1317 case Instruction::UDiv: 1318 // Fold: icmp pred ([us]div X, C1), C2 -> range test 1319 // Fold this div into the comparison, producing a range check. 1320 // Determine, based on the divide type, what the range is being 1321 // checked. If there is an overflow on the low or high side, remember 1322 // it, otherwise compute the range [low, hi) bounding the new value. 1323 // See: InsertRangeTest above for the kinds of replacements possible. 1324 if (ConstantInt *DivRHS = dyn_cast<ConstantInt>(LHSI->getOperand(1))) 1325 if (Instruction *R = FoldICmpDivCst(ICI, cast<BinaryOperator>(LHSI), 1326 DivRHS)) 1327 return R; 1328 break; 1329 1330 case Instruction::Add: 1331 // Fold: icmp pred (add X, C1), C2 1332 if (!ICI.isEquality()) { 1333 ConstantInt *LHSC = dyn_cast<ConstantInt>(LHSI->getOperand(1)); 1334 if (!LHSC) break; 1335 const APInt &LHSV = LHSC->getValue(); 1336 1337 ConstantRange CR = ICI.makeConstantRange(ICI.getPredicate(), RHSV) 1338 .subtract(LHSV); 1339 1340 if (ICI.isSigned()) { 1341 if (CR.getLower().isSignBit()) { 1342 return new ICmpInst(ICmpInst::ICMP_SLT, LHSI->getOperand(0), 1343 ConstantInt::get(ICI.getContext(),CR.getUpper())); 1344 } else if (CR.getUpper().isSignBit()) { 1345 return new ICmpInst(ICmpInst::ICMP_SGE, LHSI->getOperand(0), 1346 ConstantInt::get(ICI.getContext(),CR.getLower())); 1347 } 1348 } else { 1349 if (CR.getLower().isMinValue()) { 1350 return new ICmpInst(ICmpInst::ICMP_ULT, LHSI->getOperand(0), 1351 ConstantInt::get(ICI.getContext(),CR.getUpper())); 1352 } else if (CR.getUpper().isMinValue()) { 1353 return new ICmpInst(ICmpInst::ICMP_UGE, LHSI->getOperand(0), 1354 ConstantInt::get(ICI.getContext(),CR.getLower())); 1355 } 1356 } 1357 } 1358 break; 1359 } 1360 1361 // Simplify icmp_eq and icmp_ne instructions with integer constant RHS. 1362 if (ICI.isEquality()) { 1363 bool isICMP_NE = ICI.getPredicate() == ICmpInst::ICMP_NE; 1364 1365 // If the first operand is (add|sub|and|or|xor|rem) with a constant, and 1366 // the second operand is a constant, simplify a bit. 1367 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(LHSI)) { 1368 switch (BO->getOpcode()) { 1369 case Instruction::SRem: 1370 // If we have a signed (X % (2^c)) == 0, turn it into an unsigned one. 1371 if (RHSV == 0 && isa<ConstantInt>(BO->getOperand(1)) &&BO->hasOneUse()){ 1372 const APInt &V = cast<ConstantInt>(BO->getOperand(1))->getValue(); 1373 if (V.sgt(1) && V.isPowerOf2()) { 1374 Value *NewRem = 1375 Builder->CreateURem(BO->getOperand(0), BO->getOperand(1), 1376 BO->getName()); 1377 return new ICmpInst(ICI.getPredicate(), NewRem, 1378 Constant::getNullValue(BO->getType())); 1379 } 1380 } 1381 break; 1382 case Instruction::Add: 1383 // Replace ((add A, B) != C) with (A != C-B) if B & C are constants. 1384 if (ConstantInt *BOp1C = dyn_cast<ConstantInt>(BO->getOperand(1))) { 1385 if (BO->hasOneUse()) 1386 return new ICmpInst(ICI.getPredicate(), BO->getOperand(0), 1387 ConstantExpr::getSub(RHS, BOp1C)); 1388 } else if (RHSV == 0) { 1389 // Replace ((add A, B) != 0) with (A != -B) if A or B is 1390 // efficiently invertible, or if the add has just this one use. 1391 Value *BOp0 = BO->getOperand(0), *BOp1 = BO->getOperand(1); 1392 1393 if (Value *NegVal = dyn_castNegVal(BOp1)) 1394 return new ICmpInst(ICI.getPredicate(), BOp0, NegVal); 1395 if (Value *NegVal = dyn_castNegVal(BOp0)) 1396 return new ICmpInst(ICI.getPredicate(), NegVal, BOp1); 1397 if (BO->hasOneUse()) { 1398 Value *Neg = Builder->CreateNeg(BOp1); 1399 Neg->takeName(BO); 1400 return new ICmpInst(ICI.getPredicate(), BOp0, Neg); 1401 } 1402 } 1403 break; 1404 case Instruction::Xor: 1405 // For the xor case, we can xor two constants together, eliminating 1406 // the explicit xor. 1407 if (Constant *BOC = dyn_cast<Constant>(BO->getOperand(1))) { 1408 return new ICmpInst(ICI.getPredicate(), BO->getOperand(0), 1409 ConstantExpr::getXor(RHS, BOC)); 1410 } else if (RHSV == 0) { 1411 // Replace ((xor A, B) != 0) with (A != B) 1412 return new ICmpInst(ICI.getPredicate(), BO->getOperand(0), 1413 BO->getOperand(1)); 1414 } 1415 break; 1416 case Instruction::Sub: 1417 // Replace ((sub A, B) != C) with (B != A-C) if A & C are constants. 1418 if (ConstantInt *BOp0C = dyn_cast<ConstantInt>(BO->getOperand(0))) { 1419 if (BO->hasOneUse()) 1420 return new ICmpInst(ICI.getPredicate(), BO->getOperand(1), 1421 ConstantExpr::getSub(BOp0C, RHS)); 1422 } else if (RHSV == 0) { 1423 // Replace ((sub A, B) != 0) with (A != B) 1424 return new ICmpInst(ICI.getPredicate(), BO->getOperand(0), 1425 BO->getOperand(1)); 1426 } 1427 break; 1428 case Instruction::Or: 1429 // If bits are being or'd in that are not present in the constant we 1430 // are comparing against, then the comparison could never succeed! 1431 if (ConstantInt *BOC = dyn_cast<ConstantInt>(BO->getOperand(1))) { 1432 Constant *NotCI = ConstantExpr::getNot(RHS); 1433 if (!ConstantExpr::getAnd(BOC, NotCI)->isNullValue()) 1434 return ReplaceInstUsesWith(ICI, 1435 ConstantInt::get(Type::getInt1Ty(ICI.getContext()), 1436 isICMP_NE)); 1437 } 1438 break; 1439 1440 case Instruction::And: 1441 if (ConstantInt *BOC = dyn_cast<ConstantInt>(BO->getOperand(1))) { 1442 // If bits are being compared against that are and'd out, then the 1443 // comparison can never succeed! 1444 if ((RHSV & ~BOC->getValue()) != 0) 1445 return ReplaceInstUsesWith(ICI, 1446 ConstantInt::get(Type::getInt1Ty(ICI.getContext()), 1447 isICMP_NE)); 1448 1449 // If we have ((X & C) == C), turn it into ((X & C) != 0). 1450 if (RHS == BOC && RHSV.isPowerOf2()) 1451 return new ICmpInst(isICMP_NE ? ICmpInst::ICMP_EQ : 1452 ICmpInst::ICMP_NE, LHSI, 1453 Constant::getNullValue(RHS->getType())); 1454 1455 // Don't perform the following transforms if the AND has multiple uses 1456 if (!BO->hasOneUse()) 1457 break; 1458 1459 // Replace (and X, (1 << size(X)-1) != 0) with x s< 0 1460 if (BOC->getValue().isSignBit()) { 1461 Value *X = BO->getOperand(0); 1462 Constant *Zero = Constant::getNullValue(X->getType()); 1463 ICmpInst::Predicate pred = isICMP_NE ? 1464 ICmpInst::ICMP_SLT : ICmpInst::ICMP_SGE; 1465 return new ICmpInst(pred, X, Zero); 1466 } 1467 1468 // ((X & ~7) == 0) --> X < 8 1469 if (RHSV == 0 && isHighOnes(BOC)) { 1470 Value *X = BO->getOperand(0); 1471 Constant *NegX = ConstantExpr::getNeg(BOC); 1472 ICmpInst::Predicate pred = isICMP_NE ? 1473 ICmpInst::ICMP_UGE : ICmpInst::ICMP_ULT; 1474 return new ICmpInst(pred, X, NegX); 1475 } 1476 } 1477 default: break; 1478 } 1479 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(LHSI)) { 1480 // Handle icmp {eq|ne} <intrinsic>, intcst. 1481 switch (II->getIntrinsicID()) { 1482 case Intrinsic::bswap: 1483 Worklist.Add(II); 1484 ICI.setOperand(0, II->getArgOperand(0)); 1485 ICI.setOperand(1, ConstantInt::get(II->getContext(), RHSV.byteSwap())); 1486 return &ICI; 1487 case Intrinsic::ctlz: 1488 case Intrinsic::cttz: 1489 // ctz(A) == bitwidth(a) -> A == 0 and likewise for != 1490 if (RHSV == RHS->getType()->getBitWidth()) { 1491 Worklist.Add(II); 1492 ICI.setOperand(0, II->getArgOperand(0)); 1493 ICI.setOperand(1, ConstantInt::get(RHS->getType(), 0)); 1494 return &ICI; 1495 } 1496 break; 1497 case Intrinsic::ctpop: 1498 // popcount(A) == 0 -> A == 0 and likewise for != 1499 if (RHS->isZero()) { 1500 Worklist.Add(II); 1501 ICI.setOperand(0, II->getArgOperand(0)); 1502 ICI.setOperand(1, RHS); 1503 return &ICI; 1504 } 1505 break; 1506 default: 1507 break; 1508 } 1509 } 1510 } 1511 return 0; 1512 } 1513 1514 /// visitICmpInstWithCastAndCast - Handle icmp (cast x to y), (cast/cst). 1515 /// We only handle extending casts so far. 1516 /// 1517 Instruction *InstCombiner::visitICmpInstWithCastAndCast(ICmpInst &ICI) { 1518 const CastInst *LHSCI = cast<CastInst>(ICI.getOperand(0)); 1519 Value *LHSCIOp = LHSCI->getOperand(0); 1520 Type *SrcTy = LHSCIOp->getType(); 1521 Type *DestTy = LHSCI->getType(); 1522 Value *RHSCIOp; 1523 1524 // Turn icmp (ptrtoint x), (ptrtoint/c) into a compare of the input if the 1525 // integer type is the same size as the pointer type. 1526 if (TD && LHSCI->getOpcode() == Instruction::PtrToInt && 1527 TD->getPointerSizeInBits() == 1528 cast<IntegerType>(DestTy)->getBitWidth()) { 1529 Value *RHSOp = 0; 1530 if (Constant *RHSC = dyn_cast<Constant>(ICI.getOperand(1))) { 1531 RHSOp = ConstantExpr::getIntToPtr(RHSC, SrcTy); 1532 } else if (PtrToIntInst *RHSC = dyn_cast<PtrToIntInst>(ICI.getOperand(1))) { 1533 RHSOp = RHSC->getOperand(0); 1534 // If the pointer types don't match, insert a bitcast. 1535 if (LHSCIOp->getType() != RHSOp->getType()) 1536 RHSOp = Builder->CreateBitCast(RHSOp, LHSCIOp->getType()); 1537 } 1538 1539 if (RHSOp) 1540 return new ICmpInst(ICI.getPredicate(), LHSCIOp, RHSOp); 1541 } 1542 1543 // The code below only handles extension cast instructions, so far. 1544 // Enforce this. 1545 if (LHSCI->getOpcode() != Instruction::ZExt && 1546 LHSCI->getOpcode() != Instruction::SExt) 1547 return 0; 1548 1549 bool isSignedExt = LHSCI->getOpcode() == Instruction::SExt; 1550 bool isSignedCmp = ICI.isSigned(); 1551 1552 if (CastInst *CI = dyn_cast<CastInst>(ICI.getOperand(1))) { 1553 // Not an extension from the same type? 1554 RHSCIOp = CI->getOperand(0); 1555 if (RHSCIOp->getType() != LHSCIOp->getType()) 1556 return 0; 1557 1558 // If the signedness of the two casts doesn't agree (i.e. one is a sext 1559 // and the other is a zext), then we can't handle this. 1560 if (CI->getOpcode() != LHSCI->getOpcode()) 1561 return 0; 1562 1563 // Deal with equality cases early. 1564 if (ICI.isEquality()) 1565 return new ICmpInst(ICI.getPredicate(), LHSCIOp, RHSCIOp); 1566 1567 // A signed comparison of sign extended values simplifies into a 1568 // signed comparison. 1569 if (isSignedCmp && isSignedExt) 1570 return new ICmpInst(ICI.getPredicate(), LHSCIOp, RHSCIOp); 1571 1572 // The other three cases all fold into an unsigned comparison. 1573 return new ICmpInst(ICI.getUnsignedPredicate(), LHSCIOp, RHSCIOp); 1574 } 1575 1576 // If we aren't dealing with a constant on the RHS, exit early 1577 ConstantInt *CI = dyn_cast<ConstantInt>(ICI.getOperand(1)); 1578 if (!CI) 1579 return 0; 1580 1581 // Compute the constant that would happen if we truncated to SrcTy then 1582 // reextended to DestTy. 1583 Constant *Res1 = ConstantExpr::getTrunc(CI, SrcTy); 1584 Constant *Res2 = ConstantExpr::getCast(LHSCI->getOpcode(), 1585 Res1, DestTy); 1586 1587 // If the re-extended constant didn't change... 1588 if (Res2 == CI) { 1589 // Deal with equality cases early. 1590 if (ICI.isEquality()) 1591 return new ICmpInst(ICI.getPredicate(), LHSCIOp, Res1); 1592 1593 // A signed comparison of sign extended values simplifies into a 1594 // signed comparison. 1595 if (isSignedExt && isSignedCmp) 1596 return new ICmpInst(ICI.getPredicate(), LHSCIOp, Res1); 1597 1598 // The other three cases all fold into an unsigned comparison. 1599 return new ICmpInst(ICI.getUnsignedPredicate(), LHSCIOp, Res1); 1600 } 1601 1602 // The re-extended constant changed so the constant cannot be represented 1603 // in the shorter type. Consequently, we cannot emit a simple comparison. 1604 // All the cases that fold to true or false will have already been handled 1605 // by SimplifyICmpInst, so only deal with the tricky case. 1606 1607 if (isSignedCmp || !isSignedExt) 1608 return 0; 1609 1610 // Evaluate the comparison for LT (we invert for GT below). LE and GE cases 1611 // should have been folded away previously and not enter in here. 1612 1613 // We're performing an unsigned comp with a sign extended value. 1614 // This is true if the input is >= 0. [aka >s -1] 1615 Constant *NegOne = Constant::getAllOnesValue(SrcTy); 1616 Value *Result = Builder->CreateICmpSGT(LHSCIOp, NegOne, ICI.getName()); 1617 1618 // Finally, return the value computed. 1619 if (ICI.getPredicate() == ICmpInst::ICMP_ULT) 1620 return ReplaceInstUsesWith(ICI, Result); 1621 1622 assert(ICI.getPredicate() == ICmpInst::ICMP_UGT && "ICmp should be folded!"); 1623 return BinaryOperator::CreateNot(Result); 1624 } 1625 1626 /// ProcessUGT_ADDCST_ADD - The caller has matched a pattern of the form: 1627 /// I = icmp ugt (add (add A, B), CI2), CI1 1628 /// If this is of the form: 1629 /// sum = a + b 1630 /// if (sum+128 >u 255) 1631 /// Then replace it with llvm.sadd.with.overflow.i8. 1632 /// 1633 static Instruction *ProcessUGT_ADDCST_ADD(ICmpInst &I, Value *A, Value *B, 1634 ConstantInt *CI2, ConstantInt *CI1, 1635 InstCombiner &IC) { 1636 // The transformation we're trying to do here is to transform this into an 1637 // llvm.sadd.with.overflow. To do this, we have to replace the original add 1638 // with a narrower add, and discard the add-with-constant that is part of the 1639 // range check (if we can't eliminate it, this isn't profitable). 1640 1641 // In order to eliminate the add-with-constant, the compare can be its only 1642 // use. 1643 Instruction *AddWithCst = cast<Instruction>(I.getOperand(0)); 1644 if (!AddWithCst->hasOneUse()) return 0; 1645 1646 // If CI2 is 2^7, 2^15, 2^31, then it might be an sadd.with.overflow. 1647 if (!CI2->getValue().isPowerOf2()) return 0; 1648 unsigned NewWidth = CI2->getValue().countTrailingZeros(); 1649 if (NewWidth != 7 && NewWidth != 15 && NewWidth != 31) return 0; 1650 1651 // The width of the new add formed is 1 more than the bias. 1652 ++NewWidth; 1653 1654 // Check to see that CI1 is an all-ones value with NewWidth bits. 1655 if (CI1->getBitWidth() == NewWidth || 1656 CI1->getValue() != APInt::getLowBitsSet(CI1->getBitWidth(), NewWidth)) 1657 return 0; 1658 1659 // In order to replace the original add with a narrower 1660 // llvm.sadd.with.overflow, the only uses allowed are the add-with-constant 1661 // and truncates that discard the high bits of the add. Verify that this is 1662 // the case. 1663 Instruction *OrigAdd = cast<Instruction>(AddWithCst->getOperand(0)); 1664 for (Value::use_iterator UI = OrigAdd->use_begin(), E = OrigAdd->use_end(); 1665 UI != E; ++UI) { 1666 if (*UI == AddWithCst) continue; 1667 1668 // Only accept truncates for now. We would really like a nice recursive 1669 // predicate like SimplifyDemandedBits, but which goes downwards the use-def 1670 // chain to see which bits of a value are actually demanded. If the 1671 // original add had another add which was then immediately truncated, we 1672 // could still do the transformation. 1673 TruncInst *TI = dyn_cast<TruncInst>(*UI); 1674 if (TI == 0 || 1675 TI->getType()->getPrimitiveSizeInBits() > NewWidth) return 0; 1676 } 1677 1678 // If the pattern matches, truncate the inputs to the narrower type and 1679 // use the sadd_with_overflow intrinsic to efficiently compute both the 1680 // result and the overflow bit. 1681 Module *M = I.getParent()->getParent()->getParent(); 1682 1683 Type *NewType = IntegerType::get(OrigAdd->getContext(), NewWidth); 1684 Value *F = Intrinsic::getDeclaration(M, Intrinsic::sadd_with_overflow, 1685 NewType); 1686 1687 InstCombiner::BuilderTy *Builder = IC.Builder; 1688 1689 // Put the new code above the original add, in case there are any uses of the 1690 // add between the add and the compare. 1691 Builder->SetInsertPoint(OrigAdd); 1692 1693 Value *TruncA = Builder->CreateTrunc(A, NewType, A->getName()+".trunc"); 1694 Value *TruncB = Builder->CreateTrunc(B, NewType, B->getName()+".trunc"); 1695 CallInst *Call = Builder->CreateCall2(F, TruncA, TruncB, "sadd"); 1696 Value *Add = Builder->CreateExtractValue(Call, 0, "sadd.result"); 1697 Value *ZExt = Builder->CreateZExt(Add, OrigAdd->getType()); 1698 1699 // The inner add was the result of the narrow add, zero extended to the 1700 // wider type. Replace it with the result computed by the intrinsic. 1701 IC.ReplaceInstUsesWith(*OrigAdd, ZExt); 1702 1703 // The original icmp gets replaced with the overflow value. 1704 return ExtractValueInst::Create(Call, 1, "sadd.overflow"); 1705 } 1706 1707 static Instruction *ProcessUAddIdiom(Instruction &I, Value *OrigAddV, 1708 InstCombiner &IC) { 1709 // Don't bother doing this transformation for pointers, don't do it for 1710 // vectors. 1711 if (!isa<IntegerType>(OrigAddV->getType())) return 0; 1712 1713 // If the add is a constant expr, then we don't bother transforming it. 1714 Instruction *OrigAdd = dyn_cast<Instruction>(OrigAddV); 1715 if (OrigAdd == 0) return 0; 1716 1717 Value *LHS = OrigAdd->getOperand(0), *RHS = OrigAdd->getOperand(1); 1718 1719 // Put the new code above the original add, in case there are any uses of the 1720 // add between the add and the compare. 1721 InstCombiner::BuilderTy *Builder = IC.Builder; 1722 Builder->SetInsertPoint(OrigAdd); 1723 1724 Module *M = I.getParent()->getParent()->getParent(); 1725 Type *Ty = LHS->getType(); 1726 Value *F = Intrinsic::getDeclaration(M, Intrinsic::uadd_with_overflow, Ty); 1727 CallInst *Call = Builder->CreateCall2(F, LHS, RHS, "uadd"); 1728 Value *Add = Builder->CreateExtractValue(Call, 0); 1729 1730 IC.ReplaceInstUsesWith(*OrigAdd, Add); 1731 1732 // The original icmp gets replaced with the overflow value. 1733 return ExtractValueInst::Create(Call, 1, "uadd.overflow"); 1734 } 1735 1736 // DemandedBitsLHSMask - When performing a comparison against a constant, 1737 // it is possible that not all the bits in the LHS are demanded. This helper 1738 // method computes the mask that IS demanded. 1739 static APInt DemandedBitsLHSMask(ICmpInst &I, 1740 unsigned BitWidth, bool isSignCheck) { 1741 if (isSignCheck) 1742 return APInt::getSignBit(BitWidth); 1743 1744 ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand(1)); 1745 if (!CI) return APInt::getAllOnesValue(BitWidth); 1746 const APInt &RHS = CI->getValue(); 1747 1748 switch (I.getPredicate()) { 1749 // For a UGT comparison, we don't care about any bits that 1750 // correspond to the trailing ones of the comparand. The value of these 1751 // bits doesn't impact the outcome of the comparison, because any value 1752 // greater than the RHS must differ in a bit higher than these due to carry. 1753 case ICmpInst::ICMP_UGT: { 1754 unsigned trailingOnes = RHS.countTrailingOnes(); 1755 APInt lowBitsSet = APInt::getLowBitsSet(BitWidth, trailingOnes); 1756 return ~lowBitsSet; 1757 } 1758 1759 // Similarly, for a ULT comparison, we don't care about the trailing zeros. 1760 // Any value less than the RHS must differ in a higher bit because of carries. 1761 case ICmpInst::ICMP_ULT: { 1762 unsigned trailingZeros = RHS.countTrailingZeros(); 1763 APInt lowBitsSet = APInt::getLowBitsSet(BitWidth, trailingZeros); 1764 return ~lowBitsSet; 1765 } 1766 1767 default: 1768 return APInt::getAllOnesValue(BitWidth); 1769 } 1770 1771 } 1772 1773 Instruction *InstCombiner::visitICmpInst(ICmpInst &I) { 1774 bool Changed = false; 1775 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 1776 1777 /// Orders the operands of the compare so that they are listed from most 1778 /// complex to least complex. This puts constants before unary operators, 1779 /// before binary operators. 1780 if (getComplexity(Op0) < getComplexity(Op1)) { 1781 I.swapOperands(); 1782 std::swap(Op0, Op1); 1783 Changed = true; 1784 } 1785 1786 if (Value *V = SimplifyICmpInst(I.getPredicate(), Op0, Op1, TD)) 1787 return ReplaceInstUsesWith(I, V); 1788 1789 Type *Ty = Op0->getType(); 1790 1791 // icmp's with boolean values can always be turned into bitwise operations 1792 if (Ty->isIntegerTy(1)) { 1793 switch (I.getPredicate()) { 1794 default: llvm_unreachable("Invalid icmp instruction!"); 1795 case ICmpInst::ICMP_EQ: { // icmp eq i1 A, B -> ~(A^B) 1796 Value *Xor = Builder->CreateXor(Op0, Op1, I.getName()+"tmp"); 1797 return BinaryOperator::CreateNot(Xor); 1798 } 1799 case ICmpInst::ICMP_NE: // icmp eq i1 A, B -> A^B 1800 return BinaryOperator::CreateXor(Op0, Op1); 1801 1802 case ICmpInst::ICMP_UGT: 1803 std::swap(Op0, Op1); // Change icmp ugt -> icmp ult 1804 // FALL THROUGH 1805 case ICmpInst::ICMP_ULT:{ // icmp ult i1 A, B -> ~A & B 1806 Value *Not = Builder->CreateNot(Op0, I.getName()+"tmp"); 1807 return BinaryOperator::CreateAnd(Not, Op1); 1808 } 1809 case ICmpInst::ICMP_SGT: 1810 std::swap(Op0, Op1); // Change icmp sgt -> icmp slt 1811 // FALL THROUGH 1812 case ICmpInst::ICMP_SLT: { // icmp slt i1 A, B -> A & ~B 1813 Value *Not = Builder->CreateNot(Op1, I.getName()+"tmp"); 1814 return BinaryOperator::CreateAnd(Not, Op0); 1815 } 1816 case ICmpInst::ICMP_UGE: 1817 std::swap(Op0, Op1); // Change icmp uge -> icmp ule 1818 // FALL THROUGH 1819 case ICmpInst::ICMP_ULE: { // icmp ule i1 A, B -> ~A | B 1820 Value *Not = Builder->CreateNot(Op0, I.getName()+"tmp"); 1821 return BinaryOperator::CreateOr(Not, Op1); 1822 } 1823 case ICmpInst::ICMP_SGE: 1824 std::swap(Op0, Op1); // Change icmp sge -> icmp sle 1825 // FALL THROUGH 1826 case ICmpInst::ICMP_SLE: { // icmp sle i1 A, B -> A | ~B 1827 Value *Not = Builder->CreateNot(Op1, I.getName()+"tmp"); 1828 return BinaryOperator::CreateOr(Not, Op0); 1829 } 1830 } 1831 } 1832 1833 unsigned BitWidth = 0; 1834 if (Ty->isIntOrIntVectorTy()) 1835 BitWidth = Ty->getScalarSizeInBits(); 1836 else if (TD) // Pointers require TD info to get their size. 1837 BitWidth = TD->getTypeSizeInBits(Ty->getScalarType()); 1838 1839 bool isSignBit = false; 1840 1841 // See if we are doing a comparison with a constant. 1842 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) { 1843 Value *A = 0, *B = 0; 1844 1845 // Match the following pattern, which is a common idiom when writing 1846 // overflow-safe integer arithmetic function. The source performs an 1847 // addition in wider type, and explicitly checks for overflow using 1848 // comparisons against INT_MIN and INT_MAX. Simplify this by using the 1849 // sadd_with_overflow intrinsic. 1850 // 1851 // TODO: This could probably be generalized to handle other overflow-safe 1852 // operations if we worked out the formulas to compute the appropriate 1853 // magic constants. 1854 // 1855 // sum = a + b 1856 // if (sum+128 >u 255) ... -> llvm.sadd.with.overflow.i8 1857 { 1858 ConstantInt *CI2; // I = icmp ugt (add (add A, B), CI2), CI 1859 if (I.getPredicate() == ICmpInst::ICMP_UGT && 1860 match(Op0, m_Add(m_Add(m_Value(A), m_Value(B)), m_ConstantInt(CI2)))) 1861 if (Instruction *Res = ProcessUGT_ADDCST_ADD(I, A, B, CI2, CI, *this)) 1862 return Res; 1863 } 1864 1865 // (icmp ne/eq (sub A B) 0) -> (icmp ne/eq A, B) 1866 if (I.isEquality() && CI->isZero() && 1867 match(Op0, m_Sub(m_Value(A), m_Value(B)))) { 1868 // (icmp cond A B) if cond is equality 1869 return new ICmpInst(I.getPredicate(), A, B); 1870 } 1871 1872 // If we have an icmp le or icmp ge instruction, turn it into the 1873 // appropriate icmp lt or icmp gt instruction. This allows us to rely on 1874 // them being folded in the code below. The SimplifyICmpInst code has 1875 // already handled the edge cases for us, so we just assert on them. 1876 switch (I.getPredicate()) { 1877 default: break; 1878 case ICmpInst::ICMP_ULE: 1879 assert(!CI->isMaxValue(false)); // A <=u MAX -> TRUE 1880 return new ICmpInst(ICmpInst::ICMP_ULT, Op0, 1881 ConstantInt::get(CI->getContext(), CI->getValue()+1)); 1882 case ICmpInst::ICMP_SLE: 1883 assert(!CI->isMaxValue(true)); // A <=s MAX -> TRUE 1884 return new ICmpInst(ICmpInst::ICMP_SLT, Op0, 1885 ConstantInt::get(CI->getContext(), CI->getValue()+1)); 1886 case ICmpInst::ICMP_UGE: 1887 assert(!CI->isMinValue(false)); // A >=u MIN -> TRUE 1888 return new ICmpInst(ICmpInst::ICMP_UGT, Op0, 1889 ConstantInt::get(CI->getContext(), CI->getValue()-1)); 1890 case ICmpInst::ICMP_SGE: 1891 assert(!CI->isMinValue(true)); // A >=s MIN -> TRUE 1892 return new ICmpInst(ICmpInst::ICMP_SGT, Op0, 1893 ConstantInt::get(CI->getContext(), CI->getValue()-1)); 1894 } 1895 1896 // If this comparison is a normal comparison, it demands all 1897 // bits, if it is a sign bit comparison, it only demands the sign bit. 1898 bool UnusedBit; 1899 isSignBit = isSignBitCheck(I.getPredicate(), CI, UnusedBit); 1900 } 1901 1902 // See if we can fold the comparison based on range information we can get 1903 // by checking whether bits are known to be zero or one in the input. 1904 if (BitWidth != 0) { 1905 APInt Op0KnownZero(BitWidth, 0), Op0KnownOne(BitWidth, 0); 1906 APInt Op1KnownZero(BitWidth, 0), Op1KnownOne(BitWidth, 0); 1907 1908 if (SimplifyDemandedBits(I.getOperandUse(0), 1909 DemandedBitsLHSMask(I, BitWidth, isSignBit), 1910 Op0KnownZero, Op0KnownOne, 0)) 1911 return &I; 1912 if (SimplifyDemandedBits(I.getOperandUse(1), 1913 APInt::getAllOnesValue(BitWidth), 1914 Op1KnownZero, Op1KnownOne, 0)) 1915 return &I; 1916 1917 // Given the known and unknown bits, compute a range that the LHS could be 1918 // in. Compute the Min, Max and RHS values based on the known bits. For the 1919 // EQ and NE we use unsigned values. 1920 APInt Op0Min(BitWidth, 0), Op0Max(BitWidth, 0); 1921 APInt Op1Min(BitWidth, 0), Op1Max(BitWidth, 0); 1922 if (I.isSigned()) { 1923 ComputeSignedMinMaxValuesFromKnownBits(Op0KnownZero, Op0KnownOne, 1924 Op0Min, Op0Max); 1925 ComputeSignedMinMaxValuesFromKnownBits(Op1KnownZero, Op1KnownOne, 1926 Op1Min, Op1Max); 1927 } else { 1928 ComputeUnsignedMinMaxValuesFromKnownBits(Op0KnownZero, Op0KnownOne, 1929 Op0Min, Op0Max); 1930 ComputeUnsignedMinMaxValuesFromKnownBits(Op1KnownZero, Op1KnownOne, 1931 Op1Min, Op1Max); 1932 } 1933 1934 // If Min and Max are known to be the same, then SimplifyDemandedBits 1935 // figured out that the LHS is a constant. Just constant fold this now so 1936 // that code below can assume that Min != Max. 1937 if (!isa<Constant>(Op0) && Op0Min == Op0Max) 1938 return new ICmpInst(I.getPredicate(), 1939 ConstantInt::get(Op0->getType(), Op0Min), Op1); 1940 if (!isa<Constant>(Op1) && Op1Min == Op1Max) 1941 return new ICmpInst(I.getPredicate(), Op0, 1942 ConstantInt::get(Op1->getType(), Op1Min)); 1943 1944 // Based on the range information we know about the LHS, see if we can 1945 // simplify this comparison. For example, (x&4) < 8 is always true. 1946 switch (I.getPredicate()) { 1947 default: llvm_unreachable("Unknown icmp opcode!"); 1948 case ICmpInst::ICMP_EQ: { 1949 if (Op0Max.ult(Op1Min) || Op0Min.ugt(Op1Max)) 1950 return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getType())); 1951 1952 // If all bits are known zero except for one, then we know at most one 1953 // bit is set. If the comparison is against zero, then this is a check 1954 // to see if *that* bit is set. 1955 APInt Op0KnownZeroInverted = ~Op0KnownZero; 1956 if (~Op1KnownZero == 0 && Op0KnownZeroInverted.isPowerOf2()) { 1957 // If the LHS is an AND with the same constant, look through it. 1958 Value *LHS = 0; 1959 ConstantInt *LHSC = 0; 1960 if (!match(Op0, m_And(m_Value(LHS), m_ConstantInt(LHSC))) || 1961 LHSC->getValue() != Op0KnownZeroInverted) 1962 LHS = Op0; 1963 1964 // If the LHS is 1 << x, and we know the result is a power of 2 like 8, 1965 // then turn "((1 << x)&8) == 0" into "x != 3". 1966 Value *X = 0; 1967 if (match(LHS, m_Shl(m_One(), m_Value(X)))) { 1968 unsigned CmpVal = Op0KnownZeroInverted.countTrailingZeros(); 1969 return new ICmpInst(ICmpInst::ICMP_NE, X, 1970 ConstantInt::get(X->getType(), CmpVal)); 1971 } 1972 1973 // If the LHS is 8 >>u x, and we know the result is a power of 2 like 1, 1974 // then turn "((8 >>u x)&1) == 0" into "x != 3". 1975 const APInt *CI; 1976 if (Op0KnownZeroInverted == 1 && 1977 match(LHS, m_LShr(m_Power2(CI), m_Value(X)))) 1978 return new ICmpInst(ICmpInst::ICMP_NE, X, 1979 ConstantInt::get(X->getType(), 1980 CI->countTrailingZeros())); 1981 } 1982 1983 break; 1984 } 1985 case ICmpInst::ICMP_NE: { 1986 if (Op0Max.ult(Op1Min) || Op0Min.ugt(Op1Max)) 1987 return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getType())); 1988 1989 // If all bits are known zero except for one, then we know at most one 1990 // bit is set. If the comparison is against zero, then this is a check 1991 // to see if *that* bit is set. 1992 APInt Op0KnownZeroInverted = ~Op0KnownZero; 1993 if (~Op1KnownZero == 0 && Op0KnownZeroInverted.isPowerOf2()) { 1994 // If the LHS is an AND with the same constant, look through it. 1995 Value *LHS = 0; 1996 ConstantInt *LHSC = 0; 1997 if (!match(Op0, m_And(m_Value(LHS), m_ConstantInt(LHSC))) || 1998 LHSC->getValue() != Op0KnownZeroInverted) 1999 LHS = Op0; 2000 2001 // If the LHS is 1 << x, and we know the result is a power of 2 like 8, 2002 // then turn "((1 << x)&8) != 0" into "x == 3". 2003 Value *X = 0; 2004 if (match(LHS, m_Shl(m_One(), m_Value(X)))) { 2005 unsigned CmpVal = Op0KnownZeroInverted.countTrailingZeros(); 2006 return new ICmpInst(ICmpInst::ICMP_EQ, X, 2007 ConstantInt::get(X->getType(), CmpVal)); 2008 } 2009 2010 // If the LHS is 8 >>u x, and we know the result is a power of 2 like 1, 2011 // then turn "((8 >>u x)&1) != 0" into "x == 3". 2012 const APInt *CI; 2013 if (Op0KnownZeroInverted == 1 && 2014 match(LHS, m_LShr(m_Power2(CI), m_Value(X)))) 2015 return new ICmpInst(ICmpInst::ICMP_EQ, X, 2016 ConstantInt::get(X->getType(), 2017 CI->countTrailingZeros())); 2018 } 2019 2020 break; 2021 } 2022 case ICmpInst::ICMP_ULT: 2023 if (Op0Max.ult(Op1Min)) // A <u B -> true if max(A) < min(B) 2024 return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getType())); 2025 if (Op0Min.uge(Op1Max)) // A <u B -> false if min(A) >= max(B) 2026 return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getType())); 2027 if (Op1Min == Op0Max) // A <u B -> A != B if max(A) == min(B) 2028 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1); 2029 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) { 2030 if (Op1Max == Op0Min+1) // A <u C -> A == C-1 if min(A)+1 == C 2031 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, 2032 ConstantInt::get(CI->getContext(), CI->getValue()-1)); 2033 2034 // (x <u 2147483648) -> (x >s -1) -> true if sign bit clear 2035 if (CI->isMinValue(true)) 2036 return new ICmpInst(ICmpInst::ICMP_SGT, Op0, 2037 Constant::getAllOnesValue(Op0->getType())); 2038 } 2039 break; 2040 case ICmpInst::ICMP_UGT: 2041 if (Op0Min.ugt(Op1Max)) // A >u B -> true if min(A) > max(B) 2042 return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getType())); 2043 if (Op0Max.ule(Op1Min)) // A >u B -> false if max(A) <= max(B) 2044 return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getType())); 2045 2046 if (Op1Max == Op0Min) // A >u B -> A != B if min(A) == max(B) 2047 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1); 2048 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) { 2049 if (Op1Min == Op0Max-1) // A >u C -> A == C+1 if max(a)-1 == C 2050 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, 2051 ConstantInt::get(CI->getContext(), CI->getValue()+1)); 2052 2053 // (x >u 2147483647) -> (x <s 0) -> true if sign bit set 2054 if (CI->isMaxValue(true)) 2055 return new ICmpInst(ICmpInst::ICMP_SLT, Op0, 2056 Constant::getNullValue(Op0->getType())); 2057 } 2058 break; 2059 case ICmpInst::ICMP_SLT: 2060 if (Op0Max.slt(Op1Min)) // A <s B -> true if max(A) < min(C) 2061 return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getType())); 2062 if (Op0Min.sge(Op1Max)) // A <s B -> false if min(A) >= max(C) 2063 return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getType())); 2064 if (Op1Min == Op0Max) // A <s B -> A != B if max(A) == min(B) 2065 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1); 2066 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) { 2067 if (Op1Max == Op0Min+1) // A <s C -> A == C-1 if min(A)+1 == C 2068 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, 2069 ConstantInt::get(CI->getContext(), CI->getValue()-1)); 2070 } 2071 break; 2072 case ICmpInst::ICMP_SGT: 2073 if (Op0Min.sgt(Op1Max)) // A >s B -> true if min(A) > max(B) 2074 return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getType())); 2075 if (Op0Max.sle(Op1Min)) // A >s B -> false if max(A) <= min(B) 2076 return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getType())); 2077 2078 if (Op1Max == Op0Min) // A >s B -> A != B if min(A) == max(B) 2079 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1); 2080 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) { 2081 if (Op1Min == Op0Max-1) // A >s C -> A == C+1 if max(A)-1 == C 2082 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, 2083 ConstantInt::get(CI->getContext(), CI->getValue()+1)); 2084 } 2085 break; 2086 case ICmpInst::ICMP_SGE: 2087 assert(!isa<ConstantInt>(Op1) && "ICMP_SGE with ConstantInt not folded!"); 2088 if (Op0Min.sge(Op1Max)) // A >=s B -> true if min(A) >= max(B) 2089 return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getType())); 2090 if (Op0Max.slt(Op1Min)) // A >=s B -> false if max(A) < min(B) 2091 return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getType())); 2092 break; 2093 case ICmpInst::ICMP_SLE: 2094 assert(!isa<ConstantInt>(Op1) && "ICMP_SLE with ConstantInt not folded!"); 2095 if (Op0Max.sle(Op1Min)) // A <=s B -> true if max(A) <= min(B) 2096 return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getType())); 2097 if (Op0Min.sgt(Op1Max)) // A <=s B -> false if min(A) > max(B) 2098 return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getType())); 2099 break; 2100 case ICmpInst::ICMP_UGE: 2101 assert(!isa<ConstantInt>(Op1) && "ICMP_UGE with ConstantInt not folded!"); 2102 if (Op0Min.uge(Op1Max)) // A >=u B -> true if min(A) >= max(B) 2103 return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getType())); 2104 if (Op0Max.ult(Op1Min)) // A >=u B -> false if max(A) < min(B) 2105 return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getType())); 2106 break; 2107 case ICmpInst::ICMP_ULE: 2108 assert(!isa<ConstantInt>(Op1) && "ICMP_ULE with ConstantInt not folded!"); 2109 if (Op0Max.ule(Op1Min)) // A <=u B -> true if max(A) <= min(B) 2110 return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getType())); 2111 if (Op0Min.ugt(Op1Max)) // A <=u B -> false if min(A) > max(B) 2112 return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getType())); 2113 break; 2114 } 2115 2116 // Turn a signed comparison into an unsigned one if both operands 2117 // are known to have the same sign. 2118 if (I.isSigned() && 2119 ((Op0KnownZero.isNegative() && Op1KnownZero.isNegative()) || 2120 (Op0KnownOne.isNegative() && Op1KnownOne.isNegative()))) 2121 return new ICmpInst(I.getUnsignedPredicate(), Op0, Op1); 2122 } 2123 2124 // Test if the ICmpInst instruction is used exclusively by a select as 2125 // part of a minimum or maximum operation. If so, refrain from doing 2126 // any other folding. This helps out other analyses which understand 2127 // non-obfuscated minimum and maximum idioms, such as ScalarEvolution 2128 // and CodeGen. And in this case, at least one of the comparison 2129 // operands has at least one user besides the compare (the select), 2130 // which would often largely negate the benefit of folding anyway. 2131 if (I.hasOneUse()) 2132 if (SelectInst *SI = dyn_cast<SelectInst>(*I.use_begin())) 2133 if ((SI->getOperand(1) == Op0 && SI->getOperand(2) == Op1) || 2134 (SI->getOperand(2) == Op0 && SI->getOperand(1) == Op1)) 2135 return 0; 2136 2137 // See if we are doing a comparison between a constant and an instruction that 2138 // can be folded into the comparison. 2139 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) { 2140 // Since the RHS is a ConstantInt (CI), if the left hand side is an 2141 // instruction, see if that instruction also has constants so that the 2142 // instruction can be folded into the icmp 2143 if (Instruction *LHSI = dyn_cast<Instruction>(Op0)) 2144 if (Instruction *Res = visitICmpInstWithInstAndIntCst(I, LHSI, CI)) 2145 return Res; 2146 } 2147 2148 // Handle icmp with constant (but not simple integer constant) RHS 2149 if (Constant *RHSC = dyn_cast<Constant>(Op1)) { 2150 if (Instruction *LHSI = dyn_cast<Instruction>(Op0)) 2151 switch (LHSI->getOpcode()) { 2152 case Instruction::GetElementPtr: 2153 // icmp pred GEP (P, int 0, int 0, int 0), null -> icmp pred P, null 2154 if (RHSC->isNullValue() && 2155 cast<GetElementPtrInst>(LHSI)->hasAllZeroIndices()) 2156 return new ICmpInst(I.getPredicate(), LHSI->getOperand(0), 2157 Constant::getNullValue(LHSI->getOperand(0)->getType())); 2158 break; 2159 case Instruction::PHI: 2160 // Only fold icmp into the PHI if the phi and icmp are in the same 2161 // block. If in the same block, we're encouraging jump threading. If 2162 // not, we are just pessimizing the code by making an i1 phi. 2163 if (LHSI->getParent() == I.getParent()) 2164 if (Instruction *NV = FoldOpIntoPhi(I)) 2165 return NV; 2166 break; 2167 case Instruction::Select: { 2168 // If either operand of the select is a constant, we can fold the 2169 // comparison into the select arms, which will cause one to be 2170 // constant folded and the select turned into a bitwise or. 2171 Value *Op1 = 0, *Op2 = 0; 2172 if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(1))) 2173 Op1 = ConstantExpr::getICmp(I.getPredicate(), C, RHSC); 2174 if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(2))) 2175 Op2 = ConstantExpr::getICmp(I.getPredicate(), C, RHSC); 2176 2177 // We only want to perform this transformation if it will not lead to 2178 // additional code. This is true if either both sides of the select 2179 // fold to a constant (in which case the icmp is replaced with a select 2180 // which will usually simplify) or this is the only user of the 2181 // select (in which case we are trading a select+icmp for a simpler 2182 // select+icmp). 2183 if ((Op1 && Op2) || (LHSI->hasOneUse() && (Op1 || Op2))) { 2184 if (!Op1) 2185 Op1 = Builder->CreateICmp(I.getPredicate(), LHSI->getOperand(1), 2186 RHSC, I.getName()); 2187 if (!Op2) 2188 Op2 = Builder->CreateICmp(I.getPredicate(), LHSI->getOperand(2), 2189 RHSC, I.getName()); 2190 return SelectInst::Create(LHSI->getOperand(0), Op1, Op2); 2191 } 2192 break; 2193 } 2194 case Instruction::IntToPtr: 2195 // icmp pred inttoptr(X), null -> icmp pred X, 0 2196 if (RHSC->isNullValue() && TD && 2197 TD->getIntPtrType(RHSC->getContext()) == 2198 LHSI->getOperand(0)->getType()) 2199 return new ICmpInst(I.getPredicate(), LHSI->getOperand(0), 2200 Constant::getNullValue(LHSI->getOperand(0)->getType())); 2201 break; 2202 2203 case Instruction::Load: 2204 // Try to optimize things like "A[i] > 4" to index computations. 2205 if (GetElementPtrInst *GEP = 2206 dyn_cast<GetElementPtrInst>(LHSI->getOperand(0))) { 2207 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0))) 2208 if (GV->isConstant() && GV->hasDefinitiveInitializer() && 2209 !cast<LoadInst>(LHSI)->isVolatile()) 2210 if (Instruction *Res = FoldCmpLoadFromIndexedGlobal(GEP, GV, I)) 2211 return Res; 2212 } 2213 break; 2214 } 2215 } 2216 2217 // If we can optimize a 'icmp GEP, P' or 'icmp P, GEP', do so now. 2218 if (GEPOperator *GEP = dyn_cast<GEPOperator>(Op0)) 2219 if (Instruction *NI = FoldGEPICmp(GEP, Op1, I.getPredicate(), I)) 2220 return NI; 2221 if (GEPOperator *GEP = dyn_cast<GEPOperator>(Op1)) 2222 if (Instruction *NI = FoldGEPICmp(GEP, Op0, 2223 ICmpInst::getSwappedPredicate(I.getPredicate()), I)) 2224 return NI; 2225 2226 // Test to see if the operands of the icmp are casted versions of other 2227 // values. If the ptr->ptr cast can be stripped off both arguments, we do so 2228 // now. 2229 if (BitCastInst *CI = dyn_cast<BitCastInst>(Op0)) { 2230 if (Op0->getType()->isPointerTy() && 2231 (isa<Constant>(Op1) || isa<BitCastInst>(Op1))) { 2232 // We keep moving the cast from the left operand over to the right 2233 // operand, where it can often be eliminated completely. 2234 Op0 = CI->getOperand(0); 2235 2236 // If operand #1 is a bitcast instruction, it must also be a ptr->ptr cast 2237 // so eliminate it as well. 2238 if (BitCastInst *CI2 = dyn_cast<BitCastInst>(Op1)) 2239 Op1 = CI2->getOperand(0); 2240 2241 // If Op1 is a constant, we can fold the cast into the constant. 2242 if (Op0->getType() != Op1->getType()) { 2243 if (Constant *Op1C = dyn_cast<Constant>(Op1)) { 2244 Op1 = ConstantExpr::getBitCast(Op1C, Op0->getType()); 2245 } else { 2246 // Otherwise, cast the RHS right before the icmp 2247 Op1 = Builder->CreateBitCast(Op1, Op0->getType()); 2248 } 2249 } 2250 return new ICmpInst(I.getPredicate(), Op0, Op1); 2251 } 2252 } 2253 2254 if (isa<CastInst>(Op0)) { 2255 // Handle the special case of: icmp (cast bool to X), <cst> 2256 // This comes up when you have code like 2257 // int X = A < B; 2258 // if (X) ... 2259 // For generality, we handle any zero-extension of any operand comparison 2260 // with a constant or another cast from the same type. 2261 if (isa<Constant>(Op1) || isa<CastInst>(Op1)) 2262 if (Instruction *R = visitICmpInstWithCastAndCast(I)) 2263 return R; 2264 } 2265 2266 // Special logic for binary operators. 2267 BinaryOperator *BO0 = dyn_cast<BinaryOperator>(Op0); 2268 BinaryOperator *BO1 = dyn_cast<BinaryOperator>(Op1); 2269 if (BO0 || BO1) { 2270 CmpInst::Predicate Pred = I.getPredicate(); 2271 bool NoOp0WrapProblem = false, NoOp1WrapProblem = false; 2272 if (BO0 && isa<OverflowingBinaryOperator>(BO0)) 2273 NoOp0WrapProblem = ICmpInst::isEquality(Pred) || 2274 (CmpInst::isUnsigned(Pred) && BO0->hasNoUnsignedWrap()) || 2275 (CmpInst::isSigned(Pred) && BO0->hasNoSignedWrap()); 2276 if (BO1 && isa<OverflowingBinaryOperator>(BO1)) 2277 NoOp1WrapProblem = ICmpInst::isEquality(Pred) || 2278 (CmpInst::isUnsigned(Pred) && BO1->hasNoUnsignedWrap()) || 2279 (CmpInst::isSigned(Pred) && BO1->hasNoSignedWrap()); 2280 2281 // Analyze the case when either Op0 or Op1 is an add instruction. 2282 // Op0 = A + B (or A and B are null); Op1 = C + D (or C and D are null). 2283 Value *A = 0, *B = 0, *C = 0, *D = 0; 2284 if (BO0 && BO0->getOpcode() == Instruction::Add) 2285 A = BO0->getOperand(0), B = BO0->getOperand(1); 2286 if (BO1 && BO1->getOpcode() == Instruction::Add) 2287 C = BO1->getOperand(0), D = BO1->getOperand(1); 2288 2289 // icmp (X+Y), X -> icmp Y, 0 for equalities or if there is no overflow. 2290 if ((A == Op1 || B == Op1) && NoOp0WrapProblem) 2291 return new ICmpInst(Pred, A == Op1 ? B : A, 2292 Constant::getNullValue(Op1->getType())); 2293 2294 // icmp X, (X+Y) -> icmp 0, Y for equalities or if there is no overflow. 2295 if ((C == Op0 || D == Op0) && NoOp1WrapProblem) 2296 return new ICmpInst(Pred, Constant::getNullValue(Op0->getType()), 2297 C == Op0 ? D : C); 2298 2299 // icmp (X+Y), (X+Z) -> icmp Y, Z for equalities or if there is no overflow. 2300 if (A && C && (A == C || A == D || B == C || B == D) && 2301 NoOp0WrapProblem && NoOp1WrapProblem && 2302 // Try not to increase register pressure. 2303 BO0->hasOneUse() && BO1->hasOneUse()) { 2304 // Determine Y and Z in the form icmp (X+Y), (X+Z). 2305 Value *Y = (A == C || A == D) ? B : A; 2306 Value *Z = (C == A || C == B) ? D : C; 2307 return new ICmpInst(Pred, Y, Z); 2308 } 2309 2310 // Analyze the case when either Op0 or Op1 is a sub instruction. 2311 // Op0 = A - B (or A and B are null); Op1 = C - D (or C and D are null). 2312 A = 0; B = 0; C = 0; D = 0; 2313 if (BO0 && BO0->getOpcode() == Instruction::Sub) 2314 A = BO0->getOperand(0), B = BO0->getOperand(1); 2315 if (BO1 && BO1->getOpcode() == Instruction::Sub) 2316 C = BO1->getOperand(0), D = BO1->getOperand(1); 2317 2318 // icmp (X-Y), X -> icmp 0, Y for equalities or if there is no overflow. 2319 if (A == Op1 && NoOp0WrapProblem) 2320 return new ICmpInst(Pred, Constant::getNullValue(Op1->getType()), B); 2321 2322 // icmp X, (X-Y) -> icmp Y, 0 for equalities or if there is no overflow. 2323 if (C == Op0 && NoOp1WrapProblem) 2324 return new ICmpInst(Pred, D, Constant::getNullValue(Op0->getType())); 2325 2326 // icmp (Y-X), (Z-X) -> icmp Y, Z for equalities or if there is no overflow. 2327 if (B && D && B == D && NoOp0WrapProblem && NoOp1WrapProblem && 2328 // Try not to increase register pressure. 2329 BO0->hasOneUse() && BO1->hasOneUse()) 2330 return new ICmpInst(Pred, A, C); 2331 2332 // icmp (X-Y), (X-Z) -> icmp Z, Y for equalities or if there is no overflow. 2333 if (A && C && A == C && NoOp0WrapProblem && NoOp1WrapProblem && 2334 // Try not to increase register pressure. 2335 BO0->hasOneUse() && BO1->hasOneUse()) 2336 return new ICmpInst(Pred, D, B); 2337 2338 BinaryOperator *SRem = NULL; 2339 // icmp (srem X, Y), Y 2340 if (BO0 && BO0->getOpcode() == Instruction::SRem && 2341 Op1 == BO0->getOperand(1)) 2342 SRem = BO0; 2343 // icmp Y, (srem X, Y) 2344 else if (BO1 && BO1->getOpcode() == Instruction::SRem && 2345 Op0 == BO1->getOperand(1)) 2346 SRem = BO1; 2347 if (SRem) { 2348 // We don't check hasOneUse to avoid increasing register pressure because 2349 // the value we use is the same value this instruction was already using. 2350 switch (SRem == BO0 ? ICmpInst::getSwappedPredicate(Pred) : Pred) { 2351 default: break; 2352 case ICmpInst::ICMP_EQ: 2353 return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getType())); 2354 case ICmpInst::ICMP_NE: 2355 return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getType())); 2356 case ICmpInst::ICMP_SGT: 2357 case ICmpInst::ICMP_SGE: 2358 return new ICmpInst(ICmpInst::ICMP_SGT, SRem->getOperand(1), 2359 Constant::getAllOnesValue(SRem->getType())); 2360 case ICmpInst::ICMP_SLT: 2361 case ICmpInst::ICMP_SLE: 2362 return new ICmpInst(ICmpInst::ICMP_SLT, SRem->getOperand(1), 2363 Constant::getNullValue(SRem->getType())); 2364 } 2365 } 2366 2367 if (BO0 && BO1 && BO0->getOpcode() == BO1->getOpcode() && 2368 BO0->hasOneUse() && BO1->hasOneUse() && 2369 BO0->getOperand(1) == BO1->getOperand(1)) { 2370 switch (BO0->getOpcode()) { 2371 default: break; 2372 case Instruction::Add: 2373 case Instruction::Sub: 2374 case Instruction::Xor: 2375 if (I.isEquality()) // a+x icmp eq/ne b+x --> a icmp b 2376 return new ICmpInst(I.getPredicate(), BO0->getOperand(0), 2377 BO1->getOperand(0)); 2378 // icmp u/s (a ^ signbit), (b ^ signbit) --> icmp s/u a, b 2379 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO0->getOperand(1))) { 2380 if (CI->getValue().isSignBit()) { 2381 ICmpInst::Predicate Pred = I.isSigned() 2382 ? I.getUnsignedPredicate() 2383 : I.getSignedPredicate(); 2384 return new ICmpInst(Pred, BO0->getOperand(0), 2385 BO1->getOperand(0)); 2386 } 2387 2388 if (CI->isMaxValue(true)) { 2389 ICmpInst::Predicate Pred = I.isSigned() 2390 ? I.getUnsignedPredicate() 2391 : I.getSignedPredicate(); 2392 Pred = I.getSwappedPredicate(Pred); 2393 return new ICmpInst(Pred, BO0->getOperand(0), 2394 BO1->getOperand(0)); 2395 } 2396 } 2397 break; 2398 case Instruction::Mul: 2399 if (!I.isEquality()) 2400 break; 2401 2402 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO0->getOperand(1))) { 2403 // a * Cst icmp eq/ne b * Cst --> a & Mask icmp b & Mask 2404 // Mask = -1 >> count-trailing-zeros(Cst). 2405 if (!CI->isZero() && !CI->isOne()) { 2406 const APInt &AP = CI->getValue(); 2407 ConstantInt *Mask = ConstantInt::get(I.getContext(), 2408 APInt::getLowBitsSet(AP.getBitWidth(), 2409 AP.getBitWidth() - 2410 AP.countTrailingZeros())); 2411 Value *And1 = Builder->CreateAnd(BO0->getOperand(0), Mask); 2412 Value *And2 = Builder->CreateAnd(BO1->getOperand(0), Mask); 2413 return new ICmpInst(I.getPredicate(), And1, And2); 2414 } 2415 } 2416 break; 2417 case Instruction::UDiv: 2418 case Instruction::LShr: 2419 if (I.isSigned()) 2420 break; 2421 // fall-through 2422 case Instruction::SDiv: 2423 case Instruction::AShr: 2424 if (!BO0->isExact() || !BO1->isExact()) 2425 break; 2426 return new ICmpInst(I.getPredicate(), BO0->getOperand(0), 2427 BO1->getOperand(0)); 2428 case Instruction::Shl: { 2429 bool NUW = BO0->hasNoUnsignedWrap() && BO1->hasNoUnsignedWrap(); 2430 bool NSW = BO0->hasNoSignedWrap() && BO1->hasNoSignedWrap(); 2431 if (!NUW && !NSW) 2432 break; 2433 if (!NSW && I.isSigned()) 2434 break; 2435 return new ICmpInst(I.getPredicate(), BO0->getOperand(0), 2436 BO1->getOperand(0)); 2437 } 2438 } 2439 } 2440 } 2441 2442 { Value *A, *B; 2443 // ~x < ~y --> y < x 2444 // ~x < cst --> ~cst < x 2445 if (match(Op0, m_Not(m_Value(A)))) { 2446 if (match(Op1, m_Not(m_Value(B)))) 2447 return new ICmpInst(I.getPredicate(), B, A); 2448 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(Op1)) 2449 return new ICmpInst(I.getPredicate(), ConstantExpr::getNot(RHSC), A); 2450 } 2451 2452 // (a+b) <u a --> llvm.uadd.with.overflow. 2453 // (a+b) <u b --> llvm.uadd.with.overflow. 2454 if (I.getPredicate() == ICmpInst::ICMP_ULT && 2455 match(Op0, m_Add(m_Value(A), m_Value(B))) && 2456 (Op1 == A || Op1 == B)) 2457 if (Instruction *R = ProcessUAddIdiom(I, Op0, *this)) 2458 return R; 2459 2460 // a >u (a+b) --> llvm.uadd.with.overflow. 2461 // b >u (a+b) --> llvm.uadd.with.overflow. 2462 if (I.getPredicate() == ICmpInst::ICMP_UGT && 2463 match(Op1, m_Add(m_Value(A), m_Value(B))) && 2464 (Op0 == A || Op0 == B)) 2465 if (Instruction *R = ProcessUAddIdiom(I, Op1, *this)) 2466 return R; 2467 } 2468 2469 if (I.isEquality()) { 2470 Value *A, *B, *C, *D; 2471 2472 if (match(Op0, m_Xor(m_Value(A), m_Value(B)))) { 2473 if (A == Op1 || B == Op1) { // (A^B) == A -> B == 0 2474 Value *OtherVal = A == Op1 ? B : A; 2475 return new ICmpInst(I.getPredicate(), OtherVal, 2476 Constant::getNullValue(A->getType())); 2477 } 2478 2479 if (match(Op1, m_Xor(m_Value(C), m_Value(D)))) { 2480 // A^c1 == C^c2 --> A == C^(c1^c2) 2481 ConstantInt *C1, *C2; 2482 if (match(B, m_ConstantInt(C1)) && 2483 match(D, m_ConstantInt(C2)) && Op1->hasOneUse()) { 2484 Constant *NC = ConstantInt::get(I.getContext(), 2485 C1->getValue() ^ C2->getValue()); 2486 Value *Xor = Builder->CreateXor(C, NC, "tmp"); 2487 return new ICmpInst(I.getPredicate(), A, Xor); 2488 } 2489 2490 // A^B == A^D -> B == D 2491 if (A == C) return new ICmpInst(I.getPredicate(), B, D); 2492 if (A == D) return new ICmpInst(I.getPredicate(), B, C); 2493 if (B == C) return new ICmpInst(I.getPredicate(), A, D); 2494 if (B == D) return new ICmpInst(I.getPredicate(), A, C); 2495 } 2496 } 2497 2498 if (match(Op1, m_Xor(m_Value(A), m_Value(B))) && 2499 (A == Op0 || B == Op0)) { 2500 // A == (A^B) -> B == 0 2501 Value *OtherVal = A == Op0 ? B : A; 2502 return new ICmpInst(I.getPredicate(), OtherVal, 2503 Constant::getNullValue(A->getType())); 2504 } 2505 2506 // (X&Z) == (Y&Z) -> (X^Y) & Z == 0 2507 if (match(Op0, m_OneUse(m_And(m_Value(A), m_Value(B)))) && 2508 match(Op1, m_OneUse(m_And(m_Value(C), m_Value(D))))) { 2509 Value *X = 0, *Y = 0, *Z = 0; 2510 2511 if (A == C) { 2512 X = B; Y = D; Z = A; 2513 } else if (A == D) { 2514 X = B; Y = C; Z = A; 2515 } else if (B == C) { 2516 X = A; Y = D; Z = B; 2517 } else if (B == D) { 2518 X = A; Y = C; Z = B; 2519 } 2520 2521 if (X) { // Build (X^Y) & Z 2522 Op1 = Builder->CreateXor(X, Y, "tmp"); 2523 Op1 = Builder->CreateAnd(Op1, Z, "tmp"); 2524 I.setOperand(0, Op1); 2525 I.setOperand(1, Constant::getNullValue(Op1->getType())); 2526 return &I; 2527 } 2528 } 2529 2530 // Transform "icmp eq (trunc (lshr(X, cst1)), cst" to 2531 // "icmp (and X, mask), cst" 2532 uint64_t ShAmt = 0; 2533 ConstantInt *Cst1; 2534 if (Op0->hasOneUse() && 2535 match(Op0, m_Trunc(m_OneUse(m_LShr(m_Value(A), 2536 m_ConstantInt(ShAmt))))) && 2537 match(Op1, m_ConstantInt(Cst1)) && 2538 // Only do this when A has multiple uses. This is most important to do 2539 // when it exposes other optimizations. 2540 !A->hasOneUse()) { 2541 unsigned ASize =cast<IntegerType>(A->getType())->getPrimitiveSizeInBits(); 2542 2543 if (ShAmt < ASize) { 2544 APInt MaskV = 2545 APInt::getLowBitsSet(ASize, Op0->getType()->getPrimitiveSizeInBits()); 2546 MaskV <<= ShAmt; 2547 2548 APInt CmpV = Cst1->getValue().zext(ASize); 2549 CmpV <<= ShAmt; 2550 2551 Value *Mask = Builder->CreateAnd(A, Builder->getInt(MaskV)); 2552 return new ICmpInst(I.getPredicate(), Mask, Builder->getInt(CmpV)); 2553 } 2554 } 2555 } 2556 2557 { 2558 Value *X; ConstantInt *Cst; 2559 // icmp X+Cst, X 2560 if (match(Op0, m_Add(m_Value(X), m_ConstantInt(Cst))) && Op1 == X) 2561 return FoldICmpAddOpCst(I, X, Cst, I.getPredicate(), Op0); 2562 2563 // icmp X, X+Cst 2564 if (match(Op1, m_Add(m_Value(X), m_ConstantInt(Cst))) && Op0 == X) 2565 return FoldICmpAddOpCst(I, X, Cst, I.getSwappedPredicate(), Op1); 2566 } 2567 return Changed ? &I : 0; 2568 } 2569 2570 2571 2572 2573 2574 2575 /// FoldFCmp_IntToFP_Cst - Fold fcmp ([us]itofp x, cst) if possible. 2576 /// 2577 Instruction *InstCombiner::FoldFCmp_IntToFP_Cst(FCmpInst &I, 2578 Instruction *LHSI, 2579 Constant *RHSC) { 2580 if (!isa<ConstantFP>(RHSC)) return 0; 2581 const APFloat &RHS = cast<ConstantFP>(RHSC)->getValueAPF(); 2582 2583 // Get the width of the mantissa. We don't want to hack on conversions that 2584 // might lose information from the integer, e.g. "i64 -> float" 2585 int MantissaWidth = LHSI->getType()->getFPMantissaWidth(); 2586 if (MantissaWidth == -1) return 0; // Unknown. 2587 2588 // Check to see that the input is converted from an integer type that is small 2589 // enough that preserves all bits. TODO: check here for "known" sign bits. 2590 // This would allow us to handle (fptosi (x >>s 62) to float) if x is i64 f.e. 2591 unsigned InputSize = LHSI->getOperand(0)->getType()->getScalarSizeInBits(); 2592 2593 // If this is a uitofp instruction, we need an extra bit to hold the sign. 2594 bool LHSUnsigned = isa<UIToFPInst>(LHSI); 2595 if (LHSUnsigned) 2596 ++InputSize; 2597 2598 // If the conversion would lose info, don't hack on this. 2599 if ((int)InputSize > MantissaWidth) 2600 return 0; 2601 2602 // Otherwise, we can potentially simplify the comparison. We know that it 2603 // will always come through as an integer value and we know the constant is 2604 // not a NAN (it would have been previously simplified). 2605 assert(!RHS.isNaN() && "NaN comparison not already folded!"); 2606 2607 ICmpInst::Predicate Pred; 2608 switch (I.getPredicate()) { 2609 default: llvm_unreachable("Unexpected predicate!"); 2610 case FCmpInst::FCMP_UEQ: 2611 case FCmpInst::FCMP_OEQ: 2612 Pred = ICmpInst::ICMP_EQ; 2613 break; 2614 case FCmpInst::FCMP_UGT: 2615 case FCmpInst::FCMP_OGT: 2616 Pred = LHSUnsigned ? ICmpInst::ICMP_UGT : ICmpInst::ICMP_SGT; 2617 break; 2618 case FCmpInst::FCMP_UGE: 2619 case FCmpInst::FCMP_OGE: 2620 Pred = LHSUnsigned ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_SGE; 2621 break; 2622 case FCmpInst::FCMP_ULT: 2623 case FCmpInst::FCMP_OLT: 2624 Pred = LHSUnsigned ? ICmpInst::ICMP_ULT : ICmpInst::ICMP_SLT; 2625 break; 2626 case FCmpInst::FCMP_ULE: 2627 case FCmpInst::FCMP_OLE: 2628 Pred = LHSUnsigned ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_SLE; 2629 break; 2630 case FCmpInst::FCMP_UNE: 2631 case FCmpInst::FCMP_ONE: 2632 Pred = ICmpInst::ICMP_NE; 2633 break; 2634 case FCmpInst::FCMP_ORD: 2635 return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getContext())); 2636 case FCmpInst::FCMP_UNO: 2637 return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getContext())); 2638 } 2639 2640 IntegerType *IntTy = cast<IntegerType>(LHSI->getOperand(0)->getType()); 2641 2642 // Now we know that the APFloat is a normal number, zero or inf. 2643 2644 // See if the FP constant is too large for the integer. For example, 2645 // comparing an i8 to 300.0. 2646 unsigned IntWidth = IntTy->getScalarSizeInBits(); 2647 2648 if (!LHSUnsigned) { 2649 // If the RHS value is > SignedMax, fold the comparison. This handles +INF 2650 // and large values. 2651 APFloat SMax(RHS.getSemantics(), APFloat::fcZero, false); 2652 SMax.convertFromAPInt(APInt::getSignedMaxValue(IntWidth), true, 2653 APFloat::rmNearestTiesToEven); 2654 if (SMax.compare(RHS) == APFloat::cmpLessThan) { // smax < 13123.0 2655 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SLT || 2656 Pred == ICmpInst::ICMP_SLE) 2657 return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getContext())); 2658 return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getContext())); 2659 } 2660 } else { 2661 // If the RHS value is > UnsignedMax, fold the comparison. This handles 2662 // +INF and large values. 2663 APFloat UMax(RHS.getSemantics(), APFloat::fcZero, false); 2664 UMax.convertFromAPInt(APInt::getMaxValue(IntWidth), false, 2665 APFloat::rmNearestTiesToEven); 2666 if (UMax.compare(RHS) == APFloat::cmpLessThan) { // umax < 13123.0 2667 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_ULT || 2668 Pred == ICmpInst::ICMP_ULE) 2669 return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getContext())); 2670 return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getContext())); 2671 } 2672 } 2673 2674 if (!LHSUnsigned) { 2675 // See if the RHS value is < SignedMin. 2676 APFloat SMin(RHS.getSemantics(), APFloat::fcZero, false); 2677 SMin.convertFromAPInt(APInt::getSignedMinValue(IntWidth), true, 2678 APFloat::rmNearestTiesToEven); 2679 if (SMin.compare(RHS) == APFloat::cmpGreaterThan) { // smin > 12312.0 2680 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SGT || 2681 Pred == ICmpInst::ICMP_SGE) 2682 return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getContext())); 2683 return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getContext())); 2684 } 2685 } 2686 2687 // Okay, now we know that the FP constant fits in the range [SMIN, SMAX] or 2688 // [0, UMAX], but it may still be fractional. See if it is fractional by 2689 // casting the FP value to the integer value and back, checking for equality. 2690 // Don't do this for zero, because -0.0 is not fractional. 2691 Constant *RHSInt = LHSUnsigned 2692 ? ConstantExpr::getFPToUI(RHSC, IntTy) 2693 : ConstantExpr::getFPToSI(RHSC, IntTy); 2694 if (!RHS.isZero()) { 2695 bool Equal = LHSUnsigned 2696 ? ConstantExpr::getUIToFP(RHSInt, RHSC->getType()) == RHSC 2697 : ConstantExpr::getSIToFP(RHSInt, RHSC->getType()) == RHSC; 2698 if (!Equal) { 2699 // If we had a comparison against a fractional value, we have to adjust 2700 // the compare predicate and sometimes the value. RHSC is rounded towards 2701 // zero at this point. 2702 switch (Pred) { 2703 default: llvm_unreachable("Unexpected integer comparison!"); 2704 case ICmpInst::ICMP_NE: // (float)int != 4.4 --> true 2705 return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getContext())); 2706 case ICmpInst::ICMP_EQ: // (float)int == 4.4 --> false 2707 return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getContext())); 2708 case ICmpInst::ICMP_ULE: 2709 // (float)int <= 4.4 --> int <= 4 2710 // (float)int <= -4.4 --> false 2711 if (RHS.isNegative()) 2712 return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getContext())); 2713 break; 2714 case ICmpInst::ICMP_SLE: 2715 // (float)int <= 4.4 --> int <= 4 2716 // (float)int <= -4.4 --> int < -4 2717 if (RHS.isNegative()) 2718 Pred = ICmpInst::ICMP_SLT; 2719 break; 2720 case ICmpInst::ICMP_ULT: 2721 // (float)int < -4.4 --> false 2722 // (float)int < 4.4 --> int <= 4 2723 if (RHS.isNegative()) 2724 return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getContext())); 2725 Pred = ICmpInst::ICMP_ULE; 2726 break; 2727 case ICmpInst::ICMP_SLT: 2728 // (float)int < -4.4 --> int < -4 2729 // (float)int < 4.4 --> int <= 4 2730 if (!RHS.isNegative()) 2731 Pred = ICmpInst::ICMP_SLE; 2732 break; 2733 case ICmpInst::ICMP_UGT: 2734 // (float)int > 4.4 --> int > 4 2735 // (float)int > -4.4 --> true 2736 if (RHS.isNegative()) 2737 return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getContext())); 2738 break; 2739 case ICmpInst::ICMP_SGT: 2740 // (float)int > 4.4 --> int > 4 2741 // (float)int > -4.4 --> int >= -4 2742 if (RHS.isNegative()) 2743 Pred = ICmpInst::ICMP_SGE; 2744 break; 2745 case ICmpInst::ICMP_UGE: 2746 // (float)int >= -4.4 --> true 2747 // (float)int >= 4.4 --> int > 4 2748 if (!RHS.isNegative()) 2749 return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getContext())); 2750 Pred = ICmpInst::ICMP_UGT; 2751 break; 2752 case ICmpInst::ICMP_SGE: 2753 // (float)int >= -4.4 --> int >= -4 2754 // (float)int >= 4.4 --> int > 4 2755 if (!RHS.isNegative()) 2756 Pred = ICmpInst::ICMP_SGT; 2757 break; 2758 } 2759 } 2760 } 2761 2762 // Lower this FP comparison into an appropriate integer version of the 2763 // comparison. 2764 return new ICmpInst(Pred, LHSI->getOperand(0), RHSInt); 2765 } 2766 2767 Instruction *InstCombiner::visitFCmpInst(FCmpInst &I) { 2768 bool Changed = false; 2769 2770 /// Orders the operands of the compare so that they are listed from most 2771 /// complex to least complex. This puts constants before unary operators, 2772 /// before binary operators. 2773 if (getComplexity(I.getOperand(0)) < getComplexity(I.getOperand(1))) { 2774 I.swapOperands(); 2775 Changed = true; 2776 } 2777 2778 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 2779 2780 if (Value *V = SimplifyFCmpInst(I.getPredicate(), Op0, Op1, TD)) 2781 return ReplaceInstUsesWith(I, V); 2782 2783 // Simplify 'fcmp pred X, X' 2784 if (Op0 == Op1) { 2785 switch (I.getPredicate()) { 2786 default: llvm_unreachable("Unknown predicate!"); 2787 case FCmpInst::FCMP_UNO: // True if unordered: isnan(X) | isnan(Y) 2788 case FCmpInst::FCMP_ULT: // True if unordered or less than 2789 case FCmpInst::FCMP_UGT: // True if unordered or greater than 2790 case FCmpInst::FCMP_UNE: // True if unordered or not equal 2791 // Canonicalize these to be 'fcmp uno %X, 0.0'. 2792 I.setPredicate(FCmpInst::FCMP_UNO); 2793 I.setOperand(1, Constant::getNullValue(Op0->getType())); 2794 return &I; 2795 2796 case FCmpInst::FCMP_ORD: // True if ordered (no nans) 2797 case FCmpInst::FCMP_OEQ: // True if ordered and equal 2798 case FCmpInst::FCMP_OGE: // True if ordered and greater than or equal 2799 case FCmpInst::FCMP_OLE: // True if ordered and less than or equal 2800 // Canonicalize these to be 'fcmp ord %X, 0.0'. 2801 I.setPredicate(FCmpInst::FCMP_ORD); 2802 I.setOperand(1, Constant::getNullValue(Op0->getType())); 2803 return &I; 2804 } 2805 } 2806 2807 // Handle fcmp with constant RHS 2808 if (Constant *RHSC = dyn_cast<Constant>(Op1)) { 2809 if (Instruction *LHSI = dyn_cast<Instruction>(Op0)) 2810 switch (LHSI->getOpcode()) { 2811 case Instruction::FPExt: { 2812 // fcmp (fpext x), C -> fcmp x, (fptrunc C) if fptrunc is lossless 2813 FPExtInst *LHSExt = cast<FPExtInst>(LHSI); 2814 ConstantFP *RHSF = dyn_cast<ConstantFP>(RHSC); 2815 if (!RHSF) 2816 break; 2817 2818 // We can't convert a PPC double double. 2819 if (RHSF->getType()->isPPC_FP128Ty()) 2820 break; 2821 2822 const fltSemantics *Sem; 2823 // FIXME: This shouldn't be here. 2824 if (LHSExt->getSrcTy()->isFloatTy()) 2825 Sem = &APFloat::IEEEsingle; 2826 else if (LHSExt->getSrcTy()->isDoubleTy()) 2827 Sem = &APFloat::IEEEdouble; 2828 else if (LHSExt->getSrcTy()->isFP128Ty()) 2829 Sem = &APFloat::IEEEquad; 2830 else if (LHSExt->getSrcTy()->isX86_FP80Ty()) 2831 Sem = &APFloat::x87DoubleExtended; 2832 else 2833 break; 2834 2835 bool Lossy; 2836 APFloat F = RHSF->getValueAPF(); 2837 F.convert(*Sem, APFloat::rmNearestTiesToEven, &Lossy); 2838 2839 // Avoid lossy conversions and denormals. 2840 if (!Lossy && 2841 F.compare(APFloat::getSmallestNormalized(*Sem)) != 2842 APFloat::cmpLessThan) 2843 return new FCmpInst(I.getPredicate(), LHSExt->getOperand(0), 2844 ConstantFP::get(RHSC->getContext(), F)); 2845 break; 2846 } 2847 case Instruction::PHI: 2848 // Only fold fcmp into the PHI if the phi and fcmp are in the same 2849 // block. If in the same block, we're encouraging jump threading. If 2850 // not, we are just pessimizing the code by making an i1 phi. 2851 if (LHSI->getParent() == I.getParent()) 2852 if (Instruction *NV = FoldOpIntoPhi(I)) 2853 return NV; 2854 break; 2855 case Instruction::SIToFP: 2856 case Instruction::UIToFP: 2857 if (Instruction *NV = FoldFCmp_IntToFP_Cst(I, LHSI, RHSC)) 2858 return NV; 2859 break; 2860 case Instruction::Select: { 2861 // If either operand of the select is a constant, we can fold the 2862 // comparison into the select arms, which will cause one to be 2863 // constant folded and the select turned into a bitwise or. 2864 Value *Op1 = 0, *Op2 = 0; 2865 if (LHSI->hasOneUse()) { 2866 if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(1))) { 2867 // Fold the known value into the constant operand. 2868 Op1 = ConstantExpr::getCompare(I.getPredicate(), C, RHSC); 2869 // Insert a new FCmp of the other select operand. 2870 Op2 = Builder->CreateFCmp(I.getPredicate(), 2871 LHSI->getOperand(2), RHSC, I.getName()); 2872 } else if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(2))) { 2873 // Fold the known value into the constant operand. 2874 Op2 = ConstantExpr::getCompare(I.getPredicate(), C, RHSC); 2875 // Insert a new FCmp of the other select operand. 2876 Op1 = Builder->CreateFCmp(I.getPredicate(), LHSI->getOperand(1), 2877 RHSC, I.getName()); 2878 } 2879 } 2880 2881 if (Op1) 2882 return SelectInst::Create(LHSI->getOperand(0), Op1, Op2); 2883 break; 2884 } 2885 case Instruction::FSub: { 2886 // fcmp pred (fneg x), C -> fcmp swap(pred) x, -C 2887 Value *Op; 2888 if (match(LHSI, m_FNeg(m_Value(Op)))) 2889 return new FCmpInst(I.getSwappedPredicate(), Op, 2890 ConstantExpr::getFNeg(RHSC)); 2891 break; 2892 } 2893 case Instruction::Load: 2894 if (GetElementPtrInst *GEP = 2895 dyn_cast<GetElementPtrInst>(LHSI->getOperand(0))) { 2896 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0))) 2897 if (GV->isConstant() && GV->hasDefinitiveInitializer() && 2898 !cast<LoadInst>(LHSI)->isVolatile()) 2899 if (Instruction *Res = FoldCmpLoadFromIndexedGlobal(GEP, GV, I)) 2900 return Res; 2901 } 2902 break; 2903 } 2904 } 2905 2906 // fcmp pred (fneg x), (fneg y) -> fcmp swap(pred) x, y 2907 Value *X, *Y; 2908 if (match(Op0, m_FNeg(m_Value(X))) && match(Op1, m_FNeg(m_Value(Y)))) 2909 return new FCmpInst(I.getSwappedPredicate(), X, Y); 2910 2911 // fcmp (fpext x), (fpext y) -> fcmp x, y 2912 if (FPExtInst *LHSExt = dyn_cast<FPExtInst>(Op0)) 2913 if (FPExtInst *RHSExt = dyn_cast<FPExtInst>(Op1)) 2914 if (LHSExt->getSrcTy() == RHSExt->getSrcTy()) 2915 return new FCmpInst(I.getPredicate(), LHSExt->getOperand(0), 2916 RHSExt->getOperand(0)); 2917 2918 return Changed ? &I : 0; 2919 } 2920