1 //===- InstCombineAndOrXor.cpp --------------------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the visitAnd, visitOr, and visitXor functions. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "InstCombine.h" 15 #include "llvm/Intrinsics.h" 16 #include "llvm/Analysis/InstructionSimplify.h" 17 #include "llvm/Support/ConstantRange.h" 18 #include "llvm/Support/PatternMatch.h" 19 using namespace llvm; 20 using namespace PatternMatch; 21 22 23 /// AddOne - Add one to a ConstantInt. 24 static Constant *AddOne(Constant *C) { 25 return ConstantExpr::getAdd(C, ConstantInt::get(C->getType(), 1)); 26 } 27 /// SubOne - Subtract one from a ConstantInt. 28 static Constant *SubOne(ConstantInt *C) { 29 return ConstantInt::get(C->getContext(), C->getValue()-1); 30 } 31 32 /// isFreeToInvert - Return true if the specified value is free to invert (apply 33 /// ~ to). This happens in cases where the ~ can be eliminated. 34 static inline bool isFreeToInvert(Value *V) { 35 // ~(~(X)) -> X. 36 if (BinaryOperator::isNot(V)) 37 return true; 38 39 // Constants can be considered to be not'ed values. 40 if (isa<ConstantInt>(V)) 41 return true; 42 43 // Compares can be inverted if they have a single use. 44 if (CmpInst *CI = dyn_cast<CmpInst>(V)) 45 return CI->hasOneUse(); 46 47 return false; 48 } 49 50 static inline Value *dyn_castNotVal(Value *V) { 51 // If this is not(not(x)) don't return that this is a not: we want the two 52 // not's to be folded first. 53 if (BinaryOperator::isNot(V)) { 54 Value *Operand = BinaryOperator::getNotArgument(V); 55 if (!isFreeToInvert(Operand)) 56 return Operand; 57 } 58 59 // Constants can be considered to be not'ed values... 60 if (ConstantInt *C = dyn_cast<ConstantInt>(V)) 61 return ConstantInt::get(C->getType(), ~C->getValue()); 62 return 0; 63 } 64 65 66 /// getICmpCode - Encode a icmp predicate into a three bit mask. These bits 67 /// are carefully arranged to allow folding of expressions such as: 68 /// 69 /// (A < B) | (A > B) --> (A != B) 70 /// 71 /// Note that this is only valid if the first and second predicates have the 72 /// same sign. Is illegal to do: (A u< B) | (A s> B) 73 /// 74 /// Three bits are used to represent the condition, as follows: 75 /// 0 A > B 76 /// 1 A == B 77 /// 2 A < B 78 /// 79 /// <=> Value Definition 80 /// 000 0 Always false 81 /// 001 1 A > B 82 /// 010 2 A == B 83 /// 011 3 A >= B 84 /// 100 4 A < B 85 /// 101 5 A != B 86 /// 110 6 A <= B 87 /// 111 7 Always true 88 /// 89 static unsigned getICmpCode(const ICmpInst *ICI) { 90 switch (ICI->getPredicate()) { 91 // False -> 0 92 case ICmpInst::ICMP_UGT: return 1; // 001 93 case ICmpInst::ICMP_SGT: return 1; // 001 94 case ICmpInst::ICMP_EQ: return 2; // 010 95 case ICmpInst::ICMP_UGE: return 3; // 011 96 case ICmpInst::ICMP_SGE: return 3; // 011 97 case ICmpInst::ICMP_ULT: return 4; // 100 98 case ICmpInst::ICMP_SLT: return 4; // 100 99 case ICmpInst::ICMP_NE: return 5; // 101 100 case ICmpInst::ICMP_ULE: return 6; // 110 101 case ICmpInst::ICMP_SLE: return 6; // 110 102 // True -> 7 103 default: 104 llvm_unreachable("Invalid ICmp predicate!"); 105 return 0; 106 } 107 } 108 109 /// getFCmpCode - Similar to getICmpCode but for FCmpInst. This encodes a fcmp 110 /// predicate into a three bit mask. It also returns whether it is an ordered 111 /// predicate by reference. 112 static unsigned getFCmpCode(FCmpInst::Predicate CC, bool &isOrdered) { 113 isOrdered = false; 114 switch (CC) { 115 case FCmpInst::FCMP_ORD: isOrdered = true; return 0; // 000 116 case FCmpInst::FCMP_UNO: return 0; // 000 117 case FCmpInst::FCMP_OGT: isOrdered = true; return 1; // 001 118 case FCmpInst::FCMP_UGT: return 1; // 001 119 case FCmpInst::FCMP_OEQ: isOrdered = true; return 2; // 010 120 case FCmpInst::FCMP_UEQ: return 2; // 010 121 case FCmpInst::FCMP_OGE: isOrdered = true; return 3; // 011 122 case FCmpInst::FCMP_UGE: return 3; // 011 123 case FCmpInst::FCMP_OLT: isOrdered = true; return 4; // 100 124 case FCmpInst::FCMP_ULT: return 4; // 100 125 case FCmpInst::FCMP_ONE: isOrdered = true; return 5; // 101 126 case FCmpInst::FCMP_UNE: return 5; // 101 127 case FCmpInst::FCMP_OLE: isOrdered = true; return 6; // 110 128 case FCmpInst::FCMP_ULE: return 6; // 110 129 // True -> 7 130 default: 131 // Not expecting FCMP_FALSE and FCMP_TRUE; 132 llvm_unreachable("Unexpected FCmp predicate!"); 133 return 0; 134 } 135 } 136 137 /// getICmpValue - This is the complement of getICmpCode, which turns an 138 /// opcode and two operands into either a constant true or false, or a brand 139 /// new ICmp instruction. The sign is passed in to determine which kind 140 /// of predicate to use in the new icmp instruction. 141 static Value *getICmpValue(bool Sign, unsigned Code, Value *LHS, Value *RHS, 142 InstCombiner::BuilderTy *Builder) { 143 CmpInst::Predicate Pred; 144 switch (Code) { 145 default: assert(0 && "Illegal ICmp code!"); 146 case 0: // False. 147 return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 0); 148 case 1: Pred = Sign ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; break; 149 case 2: Pred = ICmpInst::ICMP_EQ; break; 150 case 3: Pred = Sign ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; break; 151 case 4: Pred = Sign ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; break; 152 case 5: Pred = ICmpInst::ICMP_NE; break; 153 case 6: Pred = Sign ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; break; 154 case 7: // True. 155 return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 1); 156 } 157 return Builder->CreateICmp(Pred, LHS, RHS); 158 } 159 160 /// getFCmpValue - This is the complement of getFCmpCode, which turns an 161 /// opcode and two operands into either a FCmp instruction. isordered is passed 162 /// in to determine which kind of predicate to use in the new fcmp instruction. 163 static Value *getFCmpValue(bool isordered, unsigned code, 164 Value *LHS, Value *RHS, 165 InstCombiner::BuilderTy *Builder) { 166 CmpInst::Predicate Pred; 167 switch (code) { 168 default: assert(0 && "Illegal FCmp code!"); 169 case 0: Pred = isordered ? FCmpInst::FCMP_ORD : FCmpInst::FCMP_UNO; break; 170 case 1: Pred = isordered ? FCmpInst::FCMP_OGT : FCmpInst::FCMP_UGT; break; 171 case 2: Pred = isordered ? FCmpInst::FCMP_OEQ : FCmpInst::FCMP_UEQ; break; 172 case 3: Pred = isordered ? FCmpInst::FCMP_OGE : FCmpInst::FCMP_UGE; break; 173 case 4: Pred = isordered ? FCmpInst::FCMP_OLT : FCmpInst::FCMP_ULT; break; 174 case 5: Pred = isordered ? FCmpInst::FCMP_ONE : FCmpInst::FCMP_UNE; break; 175 case 6: Pred = isordered ? FCmpInst::FCMP_OLE : FCmpInst::FCMP_ULE; break; 176 case 7: 177 if (!isordered) return ConstantInt::getTrue(LHS->getContext()); 178 Pred = FCmpInst::FCMP_ORD; break; 179 } 180 return Builder->CreateFCmp(Pred, LHS, RHS); 181 } 182 183 /// PredicatesFoldable - Return true if both predicates match sign or if at 184 /// least one of them is an equality comparison (which is signless). 185 static bool PredicatesFoldable(ICmpInst::Predicate p1, ICmpInst::Predicate p2) { 186 return (CmpInst::isSigned(p1) == CmpInst::isSigned(p2)) || 187 (CmpInst::isSigned(p1) && ICmpInst::isEquality(p2)) || 188 (CmpInst::isSigned(p2) && ICmpInst::isEquality(p1)); 189 } 190 191 // OptAndOp - This handles expressions of the form ((val OP C1) & C2). Where 192 // the Op parameter is 'OP', OpRHS is 'C1', and AndRHS is 'C2'. Op is 193 // guaranteed to be a binary operator. 194 Instruction *InstCombiner::OptAndOp(Instruction *Op, 195 ConstantInt *OpRHS, 196 ConstantInt *AndRHS, 197 BinaryOperator &TheAnd) { 198 Value *X = Op->getOperand(0); 199 Constant *Together = 0; 200 if (!Op->isShift()) 201 Together = ConstantExpr::getAnd(AndRHS, OpRHS); 202 203 switch (Op->getOpcode()) { 204 case Instruction::Xor: 205 if (Op->hasOneUse()) { 206 // (X ^ C1) & C2 --> (X & C2) ^ (C1&C2) 207 Value *And = Builder->CreateAnd(X, AndRHS); 208 And->takeName(Op); 209 return BinaryOperator::CreateXor(And, Together); 210 } 211 break; 212 case Instruction::Or: 213 if (Op->hasOneUse()){ 214 if (Together != OpRHS) { 215 // (X | C1) & C2 --> (X | (C1&C2)) & C2 216 Value *Or = Builder->CreateOr(X, Together); 217 Or->takeName(Op); 218 return BinaryOperator::CreateAnd(Or, AndRHS); 219 } 220 221 ConstantInt *TogetherCI = dyn_cast<ConstantInt>(Together); 222 if (TogetherCI && !TogetherCI->isZero()){ 223 // (X | C1) & C2 --> (X & (C2^(C1&C2))) | C1 224 // NOTE: This reduces the number of bits set in the & mask, which 225 // can expose opportunities for store narrowing. 226 Together = ConstantExpr::getXor(AndRHS, Together); 227 Value *And = Builder->CreateAnd(X, Together); 228 And->takeName(Op); 229 return BinaryOperator::CreateOr(And, OpRHS); 230 } 231 } 232 233 break; 234 case Instruction::Add: 235 if (Op->hasOneUse()) { 236 // Adding a one to a single bit bit-field should be turned into an XOR 237 // of the bit. First thing to check is to see if this AND is with a 238 // single bit constant. 239 const APInt &AndRHSV = cast<ConstantInt>(AndRHS)->getValue(); 240 241 // If there is only one bit set. 242 if (AndRHSV.isPowerOf2()) { 243 // Ok, at this point, we know that we are masking the result of the 244 // ADD down to exactly one bit. If the constant we are adding has 245 // no bits set below this bit, then we can eliminate the ADD. 246 const APInt& AddRHS = cast<ConstantInt>(OpRHS)->getValue(); 247 248 // Check to see if any bits below the one bit set in AndRHSV are set. 249 if ((AddRHS & (AndRHSV-1)) == 0) { 250 // If not, the only thing that can effect the output of the AND is 251 // the bit specified by AndRHSV. If that bit is set, the effect of 252 // the XOR is to toggle the bit. If it is clear, then the ADD has 253 // no effect. 254 if ((AddRHS & AndRHSV) == 0) { // Bit is not set, noop 255 TheAnd.setOperand(0, X); 256 return &TheAnd; 257 } else { 258 // Pull the XOR out of the AND. 259 Value *NewAnd = Builder->CreateAnd(X, AndRHS); 260 NewAnd->takeName(Op); 261 return BinaryOperator::CreateXor(NewAnd, AndRHS); 262 } 263 } 264 } 265 } 266 break; 267 268 case Instruction::Shl: { 269 // We know that the AND will not produce any of the bits shifted in, so if 270 // the anded constant includes them, clear them now! 271 // 272 uint32_t BitWidth = AndRHS->getType()->getBitWidth(); 273 uint32_t OpRHSVal = OpRHS->getLimitedValue(BitWidth); 274 APInt ShlMask(APInt::getHighBitsSet(BitWidth, BitWidth-OpRHSVal)); 275 ConstantInt *CI = ConstantInt::get(AndRHS->getContext(), 276 AndRHS->getValue() & ShlMask); 277 278 if (CI->getValue() == ShlMask) 279 // Masking out bits that the shift already masks. 280 return ReplaceInstUsesWith(TheAnd, Op); // No need for the and. 281 282 if (CI != AndRHS) { // Reducing bits set in and. 283 TheAnd.setOperand(1, CI); 284 return &TheAnd; 285 } 286 break; 287 } 288 case Instruction::LShr: { 289 // We know that the AND will not produce any of the bits shifted in, so if 290 // the anded constant includes them, clear them now! This only applies to 291 // unsigned shifts, because a signed shr may bring in set bits! 292 // 293 uint32_t BitWidth = AndRHS->getType()->getBitWidth(); 294 uint32_t OpRHSVal = OpRHS->getLimitedValue(BitWidth); 295 APInt ShrMask(APInt::getLowBitsSet(BitWidth, BitWidth - OpRHSVal)); 296 ConstantInt *CI = ConstantInt::get(Op->getContext(), 297 AndRHS->getValue() & ShrMask); 298 299 if (CI->getValue() == ShrMask) 300 // Masking out bits that the shift already masks. 301 return ReplaceInstUsesWith(TheAnd, Op); 302 303 if (CI != AndRHS) { 304 TheAnd.setOperand(1, CI); // Reduce bits set in and cst. 305 return &TheAnd; 306 } 307 break; 308 } 309 case Instruction::AShr: 310 // Signed shr. 311 // See if this is shifting in some sign extension, then masking it out 312 // with an and. 313 if (Op->hasOneUse()) { 314 uint32_t BitWidth = AndRHS->getType()->getBitWidth(); 315 uint32_t OpRHSVal = OpRHS->getLimitedValue(BitWidth); 316 APInt ShrMask(APInt::getLowBitsSet(BitWidth, BitWidth - OpRHSVal)); 317 Constant *C = ConstantInt::get(Op->getContext(), 318 AndRHS->getValue() & ShrMask); 319 if (C == AndRHS) { // Masking out bits shifted in. 320 // (Val ashr C1) & C2 -> (Val lshr C1) & C2 321 // Make the argument unsigned. 322 Value *ShVal = Op->getOperand(0); 323 ShVal = Builder->CreateLShr(ShVal, OpRHS, Op->getName()); 324 return BinaryOperator::CreateAnd(ShVal, AndRHS, TheAnd.getName()); 325 } 326 } 327 break; 328 } 329 return 0; 330 } 331 332 333 /// InsertRangeTest - Emit a computation of: (V >= Lo && V < Hi) if Inside is 334 /// true, otherwise (V < Lo || V >= Hi). In practice, we emit the more efficient 335 /// (V-Lo) <u Hi-Lo. This method expects that Lo <= Hi. isSigned indicates 336 /// whether to treat the V, Lo and HI as signed or not. IB is the location to 337 /// insert new instructions. 338 Value *InstCombiner::InsertRangeTest(Value *V, Constant *Lo, Constant *Hi, 339 bool isSigned, bool Inside) { 340 assert(cast<ConstantInt>(ConstantExpr::getICmp((isSigned ? 341 ICmpInst::ICMP_SLE:ICmpInst::ICMP_ULE), Lo, Hi))->getZExtValue() && 342 "Lo is not <= Hi in range emission code!"); 343 344 if (Inside) { 345 if (Lo == Hi) // Trivially false. 346 return ConstantInt::getFalse(V->getContext()); 347 348 // V >= Min && V < Hi --> V < Hi 349 if (cast<ConstantInt>(Lo)->isMinValue(isSigned)) { 350 ICmpInst::Predicate pred = (isSigned ? 351 ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT); 352 return Builder->CreateICmp(pred, V, Hi); 353 } 354 355 // Emit V-Lo <u Hi-Lo 356 Constant *NegLo = ConstantExpr::getNeg(Lo); 357 Value *Add = Builder->CreateAdd(V, NegLo, V->getName()+".off"); 358 Constant *UpperBound = ConstantExpr::getAdd(NegLo, Hi); 359 return Builder->CreateICmpULT(Add, UpperBound); 360 } 361 362 if (Lo == Hi) // Trivially true. 363 return ConstantInt::getTrue(V->getContext()); 364 365 // V < Min || V >= Hi -> V > Hi-1 366 Hi = SubOne(cast<ConstantInt>(Hi)); 367 if (cast<ConstantInt>(Lo)->isMinValue(isSigned)) { 368 ICmpInst::Predicate pred = (isSigned ? 369 ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT); 370 return Builder->CreateICmp(pred, V, Hi); 371 } 372 373 // Emit V-Lo >u Hi-1-Lo 374 // Note that Hi has already had one subtracted from it, above. 375 ConstantInt *NegLo = cast<ConstantInt>(ConstantExpr::getNeg(Lo)); 376 Value *Add = Builder->CreateAdd(V, NegLo, V->getName()+".off"); 377 Constant *LowerBound = ConstantExpr::getAdd(NegLo, Hi); 378 return Builder->CreateICmpUGT(Add, LowerBound); 379 } 380 381 // isRunOfOnes - Returns true iff Val consists of one contiguous run of 1s with 382 // any number of 0s on either side. The 1s are allowed to wrap from LSB to 383 // MSB, so 0x000FFF0, 0x0000FFFF, and 0xFF0000FF are all runs. 0x0F0F0000 is 384 // not, since all 1s are not contiguous. 385 static bool isRunOfOnes(ConstantInt *Val, uint32_t &MB, uint32_t &ME) { 386 const APInt& V = Val->getValue(); 387 uint32_t BitWidth = Val->getType()->getBitWidth(); 388 if (!APIntOps::isShiftedMask(BitWidth, V)) return false; 389 390 // look for the first zero bit after the run of ones 391 MB = BitWidth - ((V - 1) ^ V).countLeadingZeros(); 392 // look for the first non-zero bit 393 ME = V.getActiveBits(); 394 return true; 395 } 396 397 /// FoldLogicalPlusAnd - This is part of an expression (LHS +/- RHS) & Mask, 398 /// where isSub determines whether the operator is a sub. If we can fold one of 399 /// the following xforms: 400 /// 401 /// ((A & N) +/- B) & Mask -> (A +/- B) & Mask iff N&Mask == Mask 402 /// ((A | N) +/- B) & Mask -> (A +/- B) & Mask iff N&Mask == 0 403 /// ((A ^ N) +/- B) & Mask -> (A +/- B) & Mask iff N&Mask == 0 404 /// 405 /// return (A +/- B). 406 /// 407 Value *InstCombiner::FoldLogicalPlusAnd(Value *LHS, Value *RHS, 408 ConstantInt *Mask, bool isSub, 409 Instruction &I) { 410 Instruction *LHSI = dyn_cast<Instruction>(LHS); 411 if (!LHSI || LHSI->getNumOperands() != 2 || 412 !isa<ConstantInt>(LHSI->getOperand(1))) return 0; 413 414 ConstantInt *N = cast<ConstantInt>(LHSI->getOperand(1)); 415 416 switch (LHSI->getOpcode()) { 417 default: return 0; 418 case Instruction::And: 419 if (ConstantExpr::getAnd(N, Mask) == Mask) { 420 // If the AndRHS is a power of two minus one (0+1+), this is simple. 421 if ((Mask->getValue().countLeadingZeros() + 422 Mask->getValue().countPopulation()) == 423 Mask->getValue().getBitWidth()) 424 break; 425 426 // Otherwise, if Mask is 0+1+0+, and if B is known to have the low 0+ 427 // part, we don't need any explicit masks to take them out of A. If that 428 // is all N is, ignore it. 429 uint32_t MB = 0, ME = 0; 430 if (isRunOfOnes(Mask, MB, ME)) { // begin/end bit of run, inclusive 431 uint32_t BitWidth = cast<IntegerType>(RHS->getType())->getBitWidth(); 432 APInt Mask(APInt::getLowBitsSet(BitWidth, MB-1)); 433 if (MaskedValueIsZero(RHS, Mask)) 434 break; 435 } 436 } 437 return 0; 438 case Instruction::Or: 439 case Instruction::Xor: 440 // If the AndRHS is a power of two minus one (0+1+), and N&Mask == 0 441 if ((Mask->getValue().countLeadingZeros() + 442 Mask->getValue().countPopulation()) == Mask->getValue().getBitWidth() 443 && ConstantExpr::getAnd(N, Mask)->isNullValue()) 444 break; 445 return 0; 446 } 447 448 if (isSub) 449 return Builder->CreateSub(LHSI->getOperand(0), RHS, "fold"); 450 return Builder->CreateAdd(LHSI->getOperand(0), RHS, "fold"); 451 } 452 453 /// enum for classifying (icmp eq (A & B), C) and (icmp ne (A & B), C) 454 /// One of A and B is considered the mask, the other the value. This is 455 /// described as the "AMask" or "BMask" part of the enum. If the enum 456 /// contains only "Mask", then both A and B can be considered masks. 457 /// If A is the mask, then it was proven, that (A & C) == C. This 458 /// is trivial if C == A, or C == 0. If both A and C are constants, this 459 /// proof is also easy. 460 /// For the following explanations we assume that A is the mask. 461 /// The part "AllOnes" declares, that the comparison is true only 462 /// if (A & B) == A, or all bits of A are set in B. 463 /// Example: (icmp eq (A & 3), 3) -> FoldMskICmp_AMask_AllOnes 464 /// The part "AllZeroes" declares, that the comparison is true only 465 /// if (A & B) == 0, or all bits of A are cleared in B. 466 /// Example: (icmp eq (A & 3), 0) -> FoldMskICmp_Mask_AllZeroes 467 /// The part "Mixed" declares, that (A & B) == C and C might or might not 468 /// contain any number of one bits and zero bits. 469 /// Example: (icmp eq (A & 3), 1) -> FoldMskICmp_AMask_Mixed 470 /// The Part "Not" means, that in above descriptions "==" should be replaced 471 /// by "!=". 472 /// Example: (icmp ne (A & 3), 3) -> FoldMskICmp_AMask_NotAllOnes 473 /// If the mask A contains a single bit, then the following is equivalent: 474 /// (icmp eq (A & B), A) equals (icmp ne (A & B), 0) 475 /// (icmp ne (A & B), A) equals (icmp eq (A & B), 0) 476 enum MaskedICmpType { 477 FoldMskICmp_AMask_AllOnes = 1, 478 FoldMskICmp_AMask_NotAllOnes = 2, 479 FoldMskICmp_BMask_AllOnes = 4, 480 FoldMskICmp_BMask_NotAllOnes = 8, 481 FoldMskICmp_Mask_AllZeroes = 16, 482 FoldMskICmp_Mask_NotAllZeroes = 32, 483 FoldMskICmp_AMask_Mixed = 64, 484 FoldMskICmp_AMask_NotMixed = 128, 485 FoldMskICmp_BMask_Mixed = 256, 486 FoldMskICmp_BMask_NotMixed = 512 487 }; 488 489 /// return the set of pattern classes (from MaskedICmpType) 490 /// that (icmp SCC (A & B), C) satisfies 491 static unsigned getTypeOfMaskedICmp(Value* A, Value* B, Value* C, 492 ICmpInst::Predicate SCC) 493 { 494 ConstantInt *ACst = dyn_cast<ConstantInt>(A); 495 ConstantInt *BCst = dyn_cast<ConstantInt>(B); 496 ConstantInt *CCst = dyn_cast<ConstantInt>(C); 497 bool icmp_eq = (SCC == ICmpInst::ICMP_EQ); 498 bool icmp_abit = (ACst != 0 && !ACst->isZero() && 499 ACst->getValue().isPowerOf2()); 500 bool icmp_bbit = (BCst != 0 && !BCst->isZero() && 501 BCst->getValue().isPowerOf2()); 502 unsigned result = 0; 503 if (CCst != 0 && CCst->isZero()) { 504 // if C is zero, then both A and B qualify as mask 505 result |= (icmp_eq ? (FoldMskICmp_Mask_AllZeroes | 506 FoldMskICmp_Mask_AllZeroes | 507 FoldMskICmp_AMask_Mixed | 508 FoldMskICmp_BMask_Mixed) 509 : (FoldMskICmp_Mask_NotAllZeroes | 510 FoldMskICmp_Mask_NotAllZeroes | 511 FoldMskICmp_AMask_NotMixed | 512 FoldMskICmp_BMask_NotMixed)); 513 if (icmp_abit) 514 result |= (icmp_eq ? (FoldMskICmp_AMask_NotAllOnes | 515 FoldMskICmp_AMask_NotMixed) 516 : (FoldMskICmp_AMask_AllOnes | 517 FoldMskICmp_AMask_Mixed)); 518 if (icmp_bbit) 519 result |= (icmp_eq ? (FoldMskICmp_BMask_NotAllOnes | 520 FoldMskICmp_BMask_NotMixed) 521 : (FoldMskICmp_BMask_AllOnes | 522 FoldMskICmp_BMask_Mixed)); 523 return result; 524 } 525 if (A == C) { 526 result |= (icmp_eq ? (FoldMskICmp_AMask_AllOnes | 527 FoldMskICmp_AMask_Mixed) 528 : (FoldMskICmp_AMask_NotAllOnes | 529 FoldMskICmp_AMask_NotMixed)); 530 if (icmp_abit) 531 result |= (icmp_eq ? (FoldMskICmp_Mask_NotAllZeroes | 532 FoldMskICmp_AMask_NotMixed) 533 : (FoldMskICmp_Mask_AllZeroes | 534 FoldMskICmp_AMask_Mixed)); 535 } 536 else if (ACst != 0 && CCst != 0 && 537 ConstantExpr::getAnd(ACst, CCst) == CCst) { 538 result |= (icmp_eq ? FoldMskICmp_AMask_Mixed 539 : FoldMskICmp_AMask_NotMixed); 540 } 541 if (B == C) 542 { 543 result |= (icmp_eq ? (FoldMskICmp_BMask_AllOnes | 544 FoldMskICmp_BMask_Mixed) 545 : (FoldMskICmp_BMask_NotAllOnes | 546 FoldMskICmp_BMask_NotMixed)); 547 if (icmp_bbit) 548 result |= (icmp_eq ? (FoldMskICmp_Mask_NotAllZeroes | 549 FoldMskICmp_BMask_NotMixed) 550 : (FoldMskICmp_Mask_AllZeroes | 551 FoldMskICmp_BMask_Mixed)); 552 } 553 else if (BCst != 0 && CCst != 0 && 554 ConstantExpr::getAnd(BCst, CCst) == CCst) { 555 result |= (icmp_eq ? FoldMskICmp_BMask_Mixed 556 : FoldMskICmp_BMask_NotMixed); 557 } 558 return result; 559 } 560 561 /// foldLogOpOfMaskedICmpsHelper: 562 /// handle (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E) 563 /// return the set of pattern classes (from MaskedICmpType) 564 /// that both LHS and RHS satisfy 565 static unsigned foldLogOpOfMaskedICmpsHelper(Value*& A, 566 Value*& B, Value*& C, 567 Value*& D, Value*& E, 568 ICmpInst *LHS, ICmpInst *RHS) { 569 ICmpInst::Predicate LHSCC = LHS->getPredicate(), RHSCC = RHS->getPredicate(); 570 if (LHSCC != ICmpInst::ICMP_EQ && LHSCC != ICmpInst::ICMP_NE) return 0; 571 if (RHSCC != ICmpInst::ICMP_EQ && RHSCC != ICmpInst::ICMP_NE) return 0; 572 if (LHS->getOperand(0)->getType() != RHS->getOperand(0)->getType()) return 0; 573 // vectors are not (yet?) supported 574 if (LHS->getOperand(0)->getType()->isVectorTy()) return 0; 575 576 // Here comes the tricky part: 577 // LHS might be of the form L11 & L12 == X, X == L21 & L22, 578 // and L11 & L12 == L21 & L22. The same goes for RHS. 579 // Now we must find those components L** and R**, that are equal, so 580 // that we can extract the parameters A, B, C, D, and E for the canonical 581 // above. 582 Value *L1 = LHS->getOperand(0); 583 Value *L2 = LHS->getOperand(1); 584 Value *L11,*L12,*L21,*L22; 585 if (match(L1, m_And(m_Value(L11), m_Value(L12)))) { 586 if (!match(L2, m_And(m_Value(L21), m_Value(L22)))) 587 L21 = L22 = 0; 588 } 589 else { 590 if (!match(L2, m_And(m_Value(L11), m_Value(L12)))) 591 return 0; 592 std::swap(L1, L2); 593 L21 = L22 = 0; 594 } 595 596 Value *R1 = RHS->getOperand(0); 597 Value *R2 = RHS->getOperand(1); 598 Value *R11,*R12; 599 bool ok = false; 600 if (match(R1, m_And(m_Value(R11), m_Value(R12)))) { 601 if (R11 != 0 && (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22)) { 602 A = R11; D = R12; E = R2; ok = true; 603 } 604 else 605 if (R12 != 0 && (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22)) { 606 A = R12; D = R11; E = R2; ok = true; 607 } 608 } 609 if (!ok && match(R2, m_And(m_Value(R11), m_Value(R12)))) { 610 if (R11 != 0 && (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22)) { 611 A = R11; D = R12; E = R1; ok = true; 612 } 613 else 614 if (R12 != 0 && (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22)) { 615 A = R12; D = R11; E = R1; ok = true; 616 } 617 else 618 return 0; 619 } 620 if (!ok) 621 return 0; 622 623 if (L11 == A) { 624 B = L12; C = L2; 625 } 626 else if (L12 == A) { 627 B = L11; C = L2; 628 } 629 else if (L21 == A) { 630 B = L22; C = L1; 631 } 632 else if (L22 == A) { 633 B = L21; C = L1; 634 } 635 636 unsigned left_type = getTypeOfMaskedICmp(A, B, C, LHSCC); 637 unsigned right_type = getTypeOfMaskedICmp(A, D, E, RHSCC); 638 return left_type & right_type; 639 } 640 /// foldLogOpOfMaskedICmps: 641 /// try to fold (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E) 642 /// into a single (icmp(A & X) ==/!= Y) 643 static Value* foldLogOpOfMaskedICmps(ICmpInst *LHS, ICmpInst *RHS, 644 ICmpInst::Predicate NEWCC, 645 llvm::InstCombiner::BuilderTy* Builder) { 646 Value *A = 0, *B = 0, *C = 0, *D = 0, *E = 0; 647 unsigned mask = foldLogOpOfMaskedICmpsHelper(A, B, C, D, E, LHS, RHS); 648 if (mask == 0) return 0; 649 650 if (NEWCC == ICmpInst::ICMP_NE) 651 mask >>= 1; // treat "Not"-states as normal states 652 653 if (mask & FoldMskICmp_Mask_AllZeroes) { 654 // (icmp eq (A & B), 0) & (icmp eq (A & D), 0) 655 // -> (icmp eq (A & (B|D)), 0) 656 Value* newOr = Builder->CreateOr(B, D); 657 Value* newAnd = Builder->CreateAnd(A, newOr); 658 // we can't use C as zero, because we might actually handle 659 // (icmp ne (A & B), B) & (icmp ne (A & D), D) 660 // with B and D, having a single bit set 661 Value* zero = Constant::getNullValue(A->getType()); 662 return Builder->CreateICmp(NEWCC, newAnd, zero); 663 } 664 else if (mask & FoldMskICmp_BMask_AllOnes) { 665 // (icmp eq (A & B), B) & (icmp eq (A & D), D) 666 // -> (icmp eq (A & (B|D)), (B|D)) 667 Value* newOr = Builder->CreateOr(B, D); 668 Value* newAnd = Builder->CreateAnd(A, newOr); 669 return Builder->CreateICmp(NEWCC, newAnd, newOr); 670 } 671 else if (mask & FoldMskICmp_AMask_AllOnes) { 672 // (icmp eq (A & B), A) & (icmp eq (A & D), A) 673 // -> (icmp eq (A & (B&D)), A) 674 Value* newAnd1 = Builder->CreateAnd(B, D); 675 Value* newAnd = Builder->CreateAnd(A, newAnd1); 676 return Builder->CreateICmp(NEWCC, newAnd, A); 677 } 678 else if (mask & FoldMskICmp_BMask_Mixed) { 679 // (icmp eq (A & B), C) & (icmp eq (A & D), E) 680 // We already know that B & C == C && D & E == E. 681 // If we can prove that (B & D) & (C ^ E) == 0, that is, the bits of 682 // C and E, which are shared by both the mask B and the mask D, don't 683 // contradict, then we can transform to 684 // -> (icmp eq (A & (B|D)), (C|E)) 685 // Currently, we only handle the case of B, C, D, and E being constant. 686 ConstantInt *BCst = dyn_cast<ConstantInt>(B); 687 if (BCst == 0) return 0; 688 ConstantInt *DCst = dyn_cast<ConstantInt>(D); 689 if (DCst == 0) return 0; 690 // we can't simply use C and E, because we might actually handle 691 // (icmp ne (A & B), B) & (icmp eq (A & D), D) 692 // with B and D, having a single bit set 693 694 ConstantInt *CCst = dyn_cast<ConstantInt>(C); 695 if (CCst == 0) return 0; 696 if (LHS->getPredicate() != NEWCC) 697 CCst = dyn_cast<ConstantInt>( ConstantExpr::getXor(BCst, CCst) ); 698 ConstantInt *ECst = dyn_cast<ConstantInt>(E); 699 if (ECst == 0) return 0; 700 if (RHS->getPredicate() != NEWCC) 701 ECst = dyn_cast<ConstantInt>( ConstantExpr::getXor(DCst, ECst) ); 702 ConstantInt* MCst = dyn_cast<ConstantInt>( 703 ConstantExpr::getAnd(ConstantExpr::getAnd(BCst, DCst), 704 ConstantExpr::getXor(CCst, ECst)) ); 705 // if there is a conflict we should actually return a false for the 706 // whole construct 707 if (!MCst->isZero()) 708 return 0; 709 Value *newOr1 = Builder->CreateOr(B, D); 710 Value *newOr2 = ConstantExpr::getOr(CCst, ECst); 711 Value *newAnd = Builder->CreateAnd(A, newOr1); 712 return Builder->CreateICmp(NEWCC, newAnd, newOr2); 713 } 714 return 0; 715 } 716 717 /// FoldAndOfICmps - Fold (icmp)&(icmp) if possible. 718 Value *InstCombiner::FoldAndOfICmps(ICmpInst *LHS, ICmpInst *RHS) { 719 ICmpInst::Predicate LHSCC = LHS->getPredicate(), RHSCC = RHS->getPredicate(); 720 721 // (icmp1 A, B) & (icmp2 A, B) --> (icmp3 A, B) 722 if (PredicatesFoldable(LHSCC, RHSCC)) { 723 if (LHS->getOperand(0) == RHS->getOperand(1) && 724 LHS->getOperand(1) == RHS->getOperand(0)) 725 LHS->swapOperands(); 726 if (LHS->getOperand(0) == RHS->getOperand(0) && 727 LHS->getOperand(1) == RHS->getOperand(1)) { 728 Value *Op0 = LHS->getOperand(0), *Op1 = LHS->getOperand(1); 729 unsigned Code = getICmpCode(LHS) & getICmpCode(RHS); 730 bool isSigned = LHS->isSigned() || RHS->isSigned(); 731 return getICmpValue(isSigned, Code, Op0, Op1, Builder); 732 } 733 } 734 735 // handle (roughly): (icmp eq (A & B), C) & (icmp eq (A & D), E) 736 if (Value *V = foldLogOpOfMaskedICmps(LHS, RHS, ICmpInst::ICMP_EQ, Builder)) 737 return V; 738 739 // This only handles icmp of constants: (icmp1 A, C1) & (icmp2 B, C2). 740 Value *Val = LHS->getOperand(0), *Val2 = RHS->getOperand(0); 741 ConstantInt *LHSCst = dyn_cast<ConstantInt>(LHS->getOperand(1)); 742 ConstantInt *RHSCst = dyn_cast<ConstantInt>(RHS->getOperand(1)); 743 if (LHSCst == 0 || RHSCst == 0) return 0; 744 745 if (LHSCst == RHSCst && LHSCC == RHSCC) { 746 // (icmp ult A, C) & (icmp ult B, C) --> (icmp ult (A|B), C) 747 // where C is a power of 2 748 if (LHSCC == ICmpInst::ICMP_ULT && 749 LHSCst->getValue().isPowerOf2()) { 750 Value *NewOr = Builder->CreateOr(Val, Val2); 751 return Builder->CreateICmp(LHSCC, NewOr, LHSCst); 752 } 753 754 // (icmp eq A, 0) & (icmp eq B, 0) --> (icmp eq (A|B), 0) 755 if (LHSCC == ICmpInst::ICMP_EQ && LHSCst->isZero()) { 756 Value *NewOr = Builder->CreateOr(Val, Val2); 757 return Builder->CreateICmp(LHSCC, NewOr, LHSCst); 758 } 759 760 // (icmp slt A, 0) & (icmp slt B, 0) --> (icmp slt (A&B), 0) 761 if (LHSCC == ICmpInst::ICMP_SLT && LHSCst->isZero()) { 762 Value *NewAnd = Builder->CreateAnd(Val, Val2); 763 return Builder->CreateICmp(LHSCC, NewAnd, LHSCst); 764 } 765 766 // (icmp sgt A, -1) & (icmp sgt B, -1) --> (icmp sgt (A|B), -1) 767 if (LHSCC == ICmpInst::ICMP_SGT && LHSCst->isAllOnesValue()) { 768 Value *NewOr = Builder->CreateOr(Val, Val2); 769 return Builder->CreateICmp(LHSCC, NewOr, LHSCst); 770 } 771 } 772 773 // (trunc x) == C1 & (and x, CA) == C2 -> (and x, CA|CMAX) == C1|C2 774 // where CMAX is the all ones value for the truncated type, 775 // iff the lower bits of C2 and CA are zero. 776 if (LHSCC == RHSCC && ICmpInst::isEquality(LHSCC) && 777 LHS->hasOneUse() && RHS->hasOneUse()) { 778 Value *V; 779 ConstantInt *AndCst, *SmallCst = 0, *BigCst = 0; 780 781 // (trunc x) == C1 & (and x, CA) == C2 782 if (match(Val2, m_Trunc(m_Value(V))) && 783 match(Val, m_And(m_Specific(V), m_ConstantInt(AndCst)))) { 784 SmallCst = RHSCst; 785 BigCst = LHSCst; 786 } 787 // (and x, CA) == C2 & (trunc x) == C1 788 else if (match(Val, m_Trunc(m_Value(V))) && 789 match(Val2, m_And(m_Specific(V), m_ConstantInt(AndCst)))) { 790 SmallCst = LHSCst; 791 BigCst = RHSCst; 792 } 793 794 if (SmallCst && BigCst) { 795 unsigned BigBitSize = BigCst->getType()->getBitWidth(); 796 unsigned SmallBitSize = SmallCst->getType()->getBitWidth(); 797 798 // Check that the low bits are zero. 799 APInt Low = APInt::getLowBitsSet(BigBitSize, SmallBitSize); 800 if ((Low & AndCst->getValue()) == 0 && (Low & BigCst->getValue()) == 0) { 801 Value *NewAnd = Builder->CreateAnd(V, Low | AndCst->getValue()); 802 APInt N = SmallCst->getValue().zext(BigBitSize) | BigCst->getValue(); 803 Value *NewVal = ConstantInt::get(AndCst->getType()->getContext(), N); 804 return Builder->CreateICmp(LHSCC, NewAnd, NewVal); 805 } 806 } 807 } 808 809 // From here on, we only handle: 810 // (icmp1 A, C1) & (icmp2 A, C2) --> something simpler. 811 if (Val != Val2) return 0; 812 813 // ICMP_[US][GL]E X, CST is folded to ICMP_[US][GL]T elsewhere. 814 if (LHSCC == ICmpInst::ICMP_UGE || LHSCC == ICmpInst::ICMP_ULE || 815 RHSCC == ICmpInst::ICMP_UGE || RHSCC == ICmpInst::ICMP_ULE || 816 LHSCC == ICmpInst::ICMP_SGE || LHSCC == ICmpInst::ICMP_SLE || 817 RHSCC == ICmpInst::ICMP_SGE || RHSCC == ICmpInst::ICMP_SLE) 818 return 0; 819 820 // Make a constant range that's the intersection of the two icmp ranges. 821 // If the intersection is empty, we know that the result is false. 822 ConstantRange LHSRange = 823 ConstantRange::makeICmpRegion(LHSCC, LHSCst->getValue()); 824 ConstantRange RHSRange = 825 ConstantRange::makeICmpRegion(RHSCC, RHSCst->getValue()); 826 827 if (LHSRange.intersectWith(RHSRange).isEmptySet()) 828 return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 0); 829 830 // We can't fold (ugt x, C) & (sgt x, C2). 831 if (!PredicatesFoldable(LHSCC, RHSCC)) 832 return 0; 833 834 // Ensure that the larger constant is on the RHS. 835 bool ShouldSwap; 836 if (CmpInst::isSigned(LHSCC) || 837 (ICmpInst::isEquality(LHSCC) && 838 CmpInst::isSigned(RHSCC))) 839 ShouldSwap = LHSCst->getValue().sgt(RHSCst->getValue()); 840 else 841 ShouldSwap = LHSCst->getValue().ugt(RHSCst->getValue()); 842 843 if (ShouldSwap) { 844 std::swap(LHS, RHS); 845 std::swap(LHSCst, RHSCst); 846 std::swap(LHSCC, RHSCC); 847 } 848 849 // At this point, we know we have two icmp instructions 850 // comparing a value against two constants and and'ing the result 851 // together. Because of the above check, we know that we only have 852 // icmp eq, icmp ne, icmp [su]lt, and icmp [SU]gt here. We also know 853 // (from the icmp folding check above), that the two constants 854 // are not equal and that the larger constant is on the RHS 855 assert(LHSCst != RHSCst && "Compares not folded above?"); 856 857 switch (LHSCC) { 858 default: llvm_unreachable("Unknown integer condition code!"); 859 case ICmpInst::ICMP_EQ: 860 switch (RHSCC) { 861 default: llvm_unreachable("Unknown integer condition code!"); 862 case ICmpInst::ICMP_NE: // (X == 13 & X != 15) -> X == 13 863 case ICmpInst::ICMP_ULT: // (X == 13 & X < 15) -> X == 13 864 case ICmpInst::ICMP_SLT: // (X == 13 & X < 15) -> X == 13 865 return LHS; 866 } 867 case ICmpInst::ICMP_NE: 868 switch (RHSCC) { 869 default: llvm_unreachable("Unknown integer condition code!"); 870 case ICmpInst::ICMP_ULT: 871 if (LHSCst == SubOne(RHSCst)) // (X != 13 & X u< 14) -> X < 13 872 return Builder->CreateICmpULT(Val, LHSCst); 873 break; // (X != 13 & X u< 15) -> no change 874 case ICmpInst::ICMP_SLT: 875 if (LHSCst == SubOne(RHSCst)) // (X != 13 & X s< 14) -> X < 13 876 return Builder->CreateICmpSLT(Val, LHSCst); 877 break; // (X != 13 & X s< 15) -> no change 878 case ICmpInst::ICMP_EQ: // (X != 13 & X == 15) -> X == 15 879 case ICmpInst::ICMP_UGT: // (X != 13 & X u> 15) -> X u> 15 880 case ICmpInst::ICMP_SGT: // (X != 13 & X s> 15) -> X s> 15 881 return RHS; 882 case ICmpInst::ICMP_NE: 883 if (LHSCst == SubOne(RHSCst)){// (X != 13 & X != 14) -> X-13 >u 1 884 Constant *AddCST = ConstantExpr::getNeg(LHSCst); 885 Value *Add = Builder->CreateAdd(Val, AddCST, Val->getName()+".off"); 886 return Builder->CreateICmpUGT(Add, ConstantInt::get(Add->getType(), 1)); 887 } 888 break; // (X != 13 & X != 15) -> no change 889 } 890 break; 891 case ICmpInst::ICMP_ULT: 892 switch (RHSCC) { 893 default: llvm_unreachable("Unknown integer condition code!"); 894 case ICmpInst::ICMP_EQ: // (X u< 13 & X == 15) -> false 895 case ICmpInst::ICMP_UGT: // (X u< 13 & X u> 15) -> false 896 return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 0); 897 case ICmpInst::ICMP_SGT: // (X u< 13 & X s> 15) -> no change 898 break; 899 case ICmpInst::ICMP_NE: // (X u< 13 & X != 15) -> X u< 13 900 case ICmpInst::ICMP_ULT: // (X u< 13 & X u< 15) -> X u< 13 901 return LHS; 902 case ICmpInst::ICMP_SLT: // (X u< 13 & X s< 15) -> no change 903 break; 904 } 905 break; 906 case ICmpInst::ICMP_SLT: 907 switch (RHSCC) { 908 default: llvm_unreachable("Unknown integer condition code!"); 909 case ICmpInst::ICMP_UGT: // (X s< 13 & X u> 15) -> no change 910 break; 911 case ICmpInst::ICMP_NE: // (X s< 13 & X != 15) -> X < 13 912 case ICmpInst::ICMP_SLT: // (X s< 13 & X s< 15) -> X < 13 913 return LHS; 914 case ICmpInst::ICMP_ULT: // (X s< 13 & X u< 15) -> no change 915 break; 916 } 917 break; 918 case ICmpInst::ICMP_UGT: 919 switch (RHSCC) { 920 default: llvm_unreachable("Unknown integer condition code!"); 921 case ICmpInst::ICMP_EQ: // (X u> 13 & X == 15) -> X == 15 922 case ICmpInst::ICMP_UGT: // (X u> 13 & X u> 15) -> X u> 15 923 return RHS; 924 case ICmpInst::ICMP_SGT: // (X u> 13 & X s> 15) -> no change 925 break; 926 case ICmpInst::ICMP_NE: 927 if (RHSCst == AddOne(LHSCst)) // (X u> 13 & X != 14) -> X u> 14 928 return Builder->CreateICmp(LHSCC, Val, RHSCst); 929 break; // (X u> 13 & X != 15) -> no change 930 case ICmpInst::ICMP_ULT: // (X u> 13 & X u< 15) -> (X-14) <u 1 931 return InsertRangeTest(Val, AddOne(LHSCst), RHSCst, false, true); 932 case ICmpInst::ICMP_SLT: // (X u> 13 & X s< 15) -> no change 933 break; 934 } 935 break; 936 case ICmpInst::ICMP_SGT: 937 switch (RHSCC) { 938 default: llvm_unreachable("Unknown integer condition code!"); 939 case ICmpInst::ICMP_EQ: // (X s> 13 & X == 15) -> X == 15 940 case ICmpInst::ICMP_SGT: // (X s> 13 & X s> 15) -> X s> 15 941 return RHS; 942 case ICmpInst::ICMP_UGT: // (X s> 13 & X u> 15) -> no change 943 break; 944 case ICmpInst::ICMP_NE: 945 if (RHSCst == AddOne(LHSCst)) // (X s> 13 & X != 14) -> X s> 14 946 return Builder->CreateICmp(LHSCC, Val, RHSCst); 947 break; // (X s> 13 & X != 15) -> no change 948 case ICmpInst::ICMP_SLT: // (X s> 13 & X s< 15) -> (X-14) s< 1 949 return InsertRangeTest(Val, AddOne(LHSCst), RHSCst, true, true); 950 case ICmpInst::ICMP_ULT: // (X s> 13 & X u< 15) -> no change 951 break; 952 } 953 break; 954 } 955 956 return 0; 957 } 958 959 /// FoldAndOfFCmps - Optimize (fcmp)&(fcmp). NOTE: Unlike the rest of 960 /// instcombine, this returns a Value which should already be inserted into the 961 /// function. 962 Value *InstCombiner::FoldAndOfFCmps(FCmpInst *LHS, FCmpInst *RHS) { 963 if (LHS->getPredicate() == FCmpInst::FCMP_ORD && 964 RHS->getPredicate() == FCmpInst::FCMP_ORD) { 965 // (fcmp ord x, c) & (fcmp ord y, c) -> (fcmp ord x, y) 966 if (ConstantFP *LHSC = dyn_cast<ConstantFP>(LHS->getOperand(1))) 967 if (ConstantFP *RHSC = dyn_cast<ConstantFP>(RHS->getOperand(1))) { 968 // If either of the constants are nans, then the whole thing returns 969 // false. 970 if (LHSC->getValueAPF().isNaN() || RHSC->getValueAPF().isNaN()) 971 return ConstantInt::getFalse(LHS->getContext()); 972 return Builder->CreateFCmpORD(LHS->getOperand(0), RHS->getOperand(0)); 973 } 974 975 // Handle vector zeros. This occurs because the canonical form of 976 // "fcmp ord x,x" is "fcmp ord x, 0". 977 if (isa<ConstantAggregateZero>(LHS->getOperand(1)) && 978 isa<ConstantAggregateZero>(RHS->getOperand(1))) 979 return Builder->CreateFCmpORD(LHS->getOperand(0), RHS->getOperand(0)); 980 return 0; 981 } 982 983 Value *Op0LHS = LHS->getOperand(0), *Op0RHS = LHS->getOperand(1); 984 Value *Op1LHS = RHS->getOperand(0), *Op1RHS = RHS->getOperand(1); 985 FCmpInst::Predicate Op0CC = LHS->getPredicate(), Op1CC = RHS->getPredicate(); 986 987 988 if (Op0LHS == Op1RHS && Op0RHS == Op1LHS) { 989 // Swap RHS operands to match LHS. 990 Op1CC = FCmpInst::getSwappedPredicate(Op1CC); 991 std::swap(Op1LHS, Op1RHS); 992 } 993 994 if (Op0LHS == Op1LHS && Op0RHS == Op1RHS) { 995 // Simplify (fcmp cc0 x, y) & (fcmp cc1 x, y). 996 if (Op0CC == Op1CC) 997 return Builder->CreateFCmp((FCmpInst::Predicate)Op0CC, Op0LHS, Op0RHS); 998 if (Op0CC == FCmpInst::FCMP_FALSE || Op1CC == FCmpInst::FCMP_FALSE) 999 return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 0); 1000 if (Op0CC == FCmpInst::FCMP_TRUE) 1001 return RHS; 1002 if (Op1CC == FCmpInst::FCMP_TRUE) 1003 return LHS; 1004 1005 bool Op0Ordered; 1006 bool Op1Ordered; 1007 unsigned Op0Pred = getFCmpCode(Op0CC, Op0Ordered); 1008 unsigned Op1Pred = getFCmpCode(Op1CC, Op1Ordered); 1009 if (Op1Pred == 0) { 1010 std::swap(LHS, RHS); 1011 std::swap(Op0Pred, Op1Pred); 1012 std::swap(Op0Ordered, Op1Ordered); 1013 } 1014 if (Op0Pred == 0) { 1015 // uno && ueq -> uno && (uno || eq) -> ueq 1016 // ord && olt -> ord && (ord && lt) -> olt 1017 if (Op0Ordered == Op1Ordered) 1018 return RHS; 1019 1020 // uno && oeq -> uno && (ord && eq) -> false 1021 // uno && ord -> false 1022 if (!Op0Ordered) 1023 return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 0); 1024 // ord && ueq -> ord && (uno || eq) -> oeq 1025 return getFCmpValue(true, Op1Pred, Op0LHS, Op0RHS, Builder); 1026 } 1027 } 1028 1029 return 0; 1030 } 1031 1032 1033 Instruction *InstCombiner::visitAnd(BinaryOperator &I) { 1034 bool Changed = SimplifyAssociativeOrCommutative(I); 1035 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 1036 1037 if (Value *V = SimplifyAndInst(Op0, Op1, TD)) 1038 return ReplaceInstUsesWith(I, V); 1039 1040 // (A|B)&(A|C) -> A|(B&C) etc 1041 if (Value *V = SimplifyUsingDistributiveLaws(I)) 1042 return ReplaceInstUsesWith(I, V); 1043 1044 // See if we can simplify any instructions used by the instruction whose sole 1045 // purpose is to compute bits we don't care about. 1046 if (SimplifyDemandedInstructionBits(I)) 1047 return &I; 1048 1049 if (ConstantInt *AndRHS = dyn_cast<ConstantInt>(Op1)) { 1050 const APInt &AndRHSMask = AndRHS->getValue(); 1051 1052 // Optimize a variety of ((val OP C1) & C2) combinations... 1053 if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0)) { 1054 Value *Op0LHS = Op0I->getOperand(0); 1055 Value *Op0RHS = Op0I->getOperand(1); 1056 switch (Op0I->getOpcode()) { 1057 default: break; 1058 case Instruction::Xor: 1059 case Instruction::Or: { 1060 // If the mask is only needed on one incoming arm, push it up. 1061 if (!Op0I->hasOneUse()) break; 1062 1063 APInt NotAndRHS(~AndRHSMask); 1064 if (MaskedValueIsZero(Op0LHS, NotAndRHS)) { 1065 // Not masking anything out for the LHS, move to RHS. 1066 Value *NewRHS = Builder->CreateAnd(Op0RHS, AndRHS, 1067 Op0RHS->getName()+".masked"); 1068 return BinaryOperator::Create(Op0I->getOpcode(), Op0LHS, NewRHS); 1069 } 1070 if (!isa<Constant>(Op0RHS) && 1071 MaskedValueIsZero(Op0RHS, NotAndRHS)) { 1072 // Not masking anything out for the RHS, move to LHS. 1073 Value *NewLHS = Builder->CreateAnd(Op0LHS, AndRHS, 1074 Op0LHS->getName()+".masked"); 1075 return BinaryOperator::Create(Op0I->getOpcode(), NewLHS, Op0RHS); 1076 } 1077 1078 break; 1079 } 1080 case Instruction::Add: 1081 // ((A & N) + B) & AndRHS -> (A + B) & AndRHS iff N&AndRHS == AndRHS. 1082 // ((A | N) + B) & AndRHS -> (A + B) & AndRHS iff N&AndRHS == 0 1083 // ((A ^ N) + B) & AndRHS -> (A + B) & AndRHS iff N&AndRHS == 0 1084 if (Value *V = FoldLogicalPlusAnd(Op0LHS, Op0RHS, AndRHS, false, I)) 1085 return BinaryOperator::CreateAnd(V, AndRHS); 1086 if (Value *V = FoldLogicalPlusAnd(Op0RHS, Op0LHS, AndRHS, false, I)) 1087 return BinaryOperator::CreateAnd(V, AndRHS); // Add commutes 1088 break; 1089 1090 case Instruction::Sub: 1091 // ((A & N) - B) & AndRHS -> (A - B) & AndRHS iff N&AndRHS == AndRHS. 1092 // ((A | N) - B) & AndRHS -> (A - B) & AndRHS iff N&AndRHS == 0 1093 // ((A ^ N) - B) & AndRHS -> (A - B) & AndRHS iff N&AndRHS == 0 1094 if (Value *V = FoldLogicalPlusAnd(Op0LHS, Op0RHS, AndRHS, true, I)) 1095 return BinaryOperator::CreateAnd(V, AndRHS); 1096 1097 // (A - N) & AndRHS -> -N & AndRHS iff A&AndRHS==0 and AndRHS 1098 // has 1's for all bits that the subtraction with A might affect. 1099 if (Op0I->hasOneUse() && !match(Op0LHS, m_Zero())) { 1100 uint32_t BitWidth = AndRHSMask.getBitWidth(); 1101 uint32_t Zeros = AndRHSMask.countLeadingZeros(); 1102 APInt Mask = APInt::getLowBitsSet(BitWidth, BitWidth - Zeros); 1103 1104 if (MaskedValueIsZero(Op0LHS, Mask)) { 1105 Value *NewNeg = Builder->CreateNeg(Op0RHS); 1106 return BinaryOperator::CreateAnd(NewNeg, AndRHS); 1107 } 1108 } 1109 break; 1110 1111 case Instruction::Shl: 1112 case Instruction::LShr: 1113 // (1 << x) & 1 --> zext(x == 0) 1114 // (1 >> x) & 1 --> zext(x == 0) 1115 if (AndRHSMask == 1 && Op0LHS == AndRHS) { 1116 Value *NewICmp = 1117 Builder->CreateICmpEQ(Op0RHS, Constant::getNullValue(I.getType())); 1118 return new ZExtInst(NewICmp, I.getType()); 1119 } 1120 break; 1121 } 1122 1123 if (ConstantInt *Op0CI = dyn_cast<ConstantInt>(Op0I->getOperand(1))) 1124 if (Instruction *Res = OptAndOp(Op0I, Op0CI, AndRHS, I)) 1125 return Res; 1126 } 1127 1128 // If this is an integer truncation, and if the source is an 'and' with 1129 // immediate, transform it. This frequently occurs for bitfield accesses. 1130 { 1131 Value *X = 0; ConstantInt *YC = 0; 1132 if (match(Op0, m_Trunc(m_And(m_Value(X), m_ConstantInt(YC))))) { 1133 // Change: and (trunc (and X, YC) to T), C2 1134 // into : and (trunc X to T), trunc(YC) & C2 1135 // This will fold the two constants together, which may allow 1136 // other simplifications. 1137 Value *NewCast = Builder->CreateTrunc(X, I.getType(), "and.shrunk"); 1138 Constant *C3 = ConstantExpr::getTrunc(YC, I.getType()); 1139 C3 = ConstantExpr::getAnd(C3, AndRHS); 1140 return BinaryOperator::CreateAnd(NewCast, C3); 1141 } 1142 } 1143 1144 // Try to fold constant and into select arguments. 1145 if (SelectInst *SI = dyn_cast<SelectInst>(Op0)) 1146 if (Instruction *R = FoldOpIntoSelect(I, SI)) 1147 return R; 1148 if (isa<PHINode>(Op0)) 1149 if (Instruction *NV = FoldOpIntoPhi(I)) 1150 return NV; 1151 } 1152 1153 1154 // (~A & ~B) == (~(A | B)) - De Morgan's Law 1155 if (Value *Op0NotVal = dyn_castNotVal(Op0)) 1156 if (Value *Op1NotVal = dyn_castNotVal(Op1)) 1157 if (Op0->hasOneUse() && Op1->hasOneUse()) { 1158 Value *Or = Builder->CreateOr(Op0NotVal, Op1NotVal, 1159 I.getName()+".demorgan"); 1160 return BinaryOperator::CreateNot(Or); 1161 } 1162 1163 { 1164 Value *A = 0, *B = 0, *C = 0, *D = 0; 1165 // (A|B) & ~(A&B) -> A^B 1166 if (match(Op0, m_Or(m_Value(A), m_Value(B))) && 1167 match(Op1, m_Not(m_And(m_Value(C), m_Value(D)))) && 1168 ((A == C && B == D) || (A == D && B == C))) 1169 return BinaryOperator::CreateXor(A, B); 1170 1171 // ~(A&B) & (A|B) -> A^B 1172 if (match(Op1, m_Or(m_Value(A), m_Value(B))) && 1173 match(Op0, m_Not(m_And(m_Value(C), m_Value(D)))) && 1174 ((A == C && B == D) || (A == D && B == C))) 1175 return BinaryOperator::CreateXor(A, B); 1176 1177 // A&(A^B) => A & ~B 1178 { 1179 Value *tmpOp0 = Op0; 1180 Value *tmpOp1 = Op1; 1181 if (Op0->hasOneUse() && 1182 match(Op0, m_Xor(m_Value(A), m_Value(B)))) { 1183 if (A == Op1 || B == Op1 ) { 1184 tmpOp1 = Op0; 1185 tmpOp0 = Op1; 1186 // Simplify below 1187 } 1188 } 1189 1190 if (tmpOp1->hasOneUse() && 1191 match(tmpOp1, m_Xor(m_Value(A), m_Value(B)))) { 1192 if (B == tmpOp0) { 1193 std::swap(A, B); 1194 } 1195 // Notice that the patten (A&(~B)) is actually (A&(-1^B)), so if 1196 // A is originally -1 (or a vector of -1 and undefs), then we enter 1197 // an endless loop. By checking that A is non-constant we ensure that 1198 // we will never get to the loop. 1199 if (A == tmpOp0 && !isa<Constant>(A)) // A&(A^B) -> A & ~B 1200 return BinaryOperator::CreateAnd(A, Builder->CreateNot(B)); 1201 } 1202 } 1203 1204 // (A&((~A)|B)) -> A&B 1205 if (match(Op0, m_Or(m_Not(m_Specific(Op1)), m_Value(A))) || 1206 match(Op0, m_Or(m_Value(A), m_Not(m_Specific(Op1))))) 1207 return BinaryOperator::CreateAnd(A, Op1); 1208 if (match(Op1, m_Or(m_Not(m_Specific(Op0)), m_Value(A))) || 1209 match(Op1, m_Or(m_Value(A), m_Not(m_Specific(Op0))))) 1210 return BinaryOperator::CreateAnd(A, Op0); 1211 } 1212 1213 if (ICmpInst *RHS = dyn_cast<ICmpInst>(Op1)) 1214 if (ICmpInst *LHS = dyn_cast<ICmpInst>(Op0)) 1215 if (Value *Res = FoldAndOfICmps(LHS, RHS)) 1216 return ReplaceInstUsesWith(I, Res); 1217 1218 // If and'ing two fcmp, try combine them into one. 1219 if (FCmpInst *LHS = dyn_cast<FCmpInst>(I.getOperand(0))) 1220 if (FCmpInst *RHS = dyn_cast<FCmpInst>(I.getOperand(1))) 1221 if (Value *Res = FoldAndOfFCmps(LHS, RHS)) 1222 return ReplaceInstUsesWith(I, Res); 1223 1224 1225 // fold (and (cast A), (cast B)) -> (cast (and A, B)) 1226 if (CastInst *Op0C = dyn_cast<CastInst>(Op0)) 1227 if (CastInst *Op1C = dyn_cast<CastInst>(Op1)) { 1228 Type *SrcTy = Op0C->getOperand(0)->getType(); 1229 if (Op0C->getOpcode() == Op1C->getOpcode() && // same cast kind ? 1230 SrcTy == Op1C->getOperand(0)->getType() && 1231 SrcTy->isIntOrIntVectorTy()) { 1232 Value *Op0COp = Op0C->getOperand(0), *Op1COp = Op1C->getOperand(0); 1233 1234 // Only do this if the casts both really cause code to be generated. 1235 if (ShouldOptimizeCast(Op0C->getOpcode(), Op0COp, I.getType()) && 1236 ShouldOptimizeCast(Op1C->getOpcode(), Op1COp, I.getType())) { 1237 Value *NewOp = Builder->CreateAnd(Op0COp, Op1COp, I.getName()); 1238 return CastInst::Create(Op0C->getOpcode(), NewOp, I.getType()); 1239 } 1240 1241 // If this is and(cast(icmp), cast(icmp)), try to fold this even if the 1242 // cast is otherwise not optimizable. This happens for vector sexts. 1243 if (ICmpInst *RHS = dyn_cast<ICmpInst>(Op1COp)) 1244 if (ICmpInst *LHS = dyn_cast<ICmpInst>(Op0COp)) 1245 if (Value *Res = FoldAndOfICmps(LHS, RHS)) 1246 return CastInst::Create(Op0C->getOpcode(), Res, I.getType()); 1247 1248 // If this is and(cast(fcmp), cast(fcmp)), try to fold this even if the 1249 // cast is otherwise not optimizable. This happens for vector sexts. 1250 if (FCmpInst *RHS = dyn_cast<FCmpInst>(Op1COp)) 1251 if (FCmpInst *LHS = dyn_cast<FCmpInst>(Op0COp)) 1252 if (Value *Res = FoldAndOfFCmps(LHS, RHS)) 1253 return CastInst::Create(Op0C->getOpcode(), Res, I.getType()); 1254 } 1255 } 1256 1257 // (X >> Z) & (Y >> Z) -> (X&Y) >> Z for all shifts. 1258 if (BinaryOperator *SI1 = dyn_cast<BinaryOperator>(Op1)) { 1259 if (BinaryOperator *SI0 = dyn_cast<BinaryOperator>(Op0)) 1260 if (SI0->isShift() && SI0->getOpcode() == SI1->getOpcode() && 1261 SI0->getOperand(1) == SI1->getOperand(1) && 1262 (SI0->hasOneUse() || SI1->hasOneUse())) { 1263 Value *NewOp = 1264 Builder->CreateAnd(SI0->getOperand(0), SI1->getOperand(0), 1265 SI0->getName()); 1266 return BinaryOperator::Create(SI1->getOpcode(), NewOp, 1267 SI1->getOperand(1)); 1268 } 1269 } 1270 1271 return Changed ? &I : 0; 1272 } 1273 1274 /// CollectBSwapParts - Analyze the specified subexpression and see if it is 1275 /// capable of providing pieces of a bswap. The subexpression provides pieces 1276 /// of a bswap if it is proven that each of the non-zero bytes in the output of 1277 /// the expression came from the corresponding "byte swapped" byte in some other 1278 /// value. For example, if the current subexpression is "(shl i32 %X, 24)" then 1279 /// we know that the expression deposits the low byte of %X into the high byte 1280 /// of the bswap result and that all other bytes are zero. This expression is 1281 /// accepted, the high byte of ByteValues is set to X to indicate a correct 1282 /// match. 1283 /// 1284 /// This function returns true if the match was unsuccessful and false if so. 1285 /// On entry to the function the "OverallLeftShift" is a signed integer value 1286 /// indicating the number of bytes that the subexpression is later shifted. For 1287 /// example, if the expression is later right shifted by 16 bits, the 1288 /// OverallLeftShift value would be -2 on entry. This is used to specify which 1289 /// byte of ByteValues is actually being set. 1290 /// 1291 /// Similarly, ByteMask is a bitmask where a bit is clear if its corresponding 1292 /// byte is masked to zero by a user. For example, in (X & 255), X will be 1293 /// processed with a bytemask of 1. Because bytemask is 32-bits, this limits 1294 /// this function to working on up to 32-byte (256 bit) values. ByteMask is 1295 /// always in the local (OverallLeftShift) coordinate space. 1296 /// 1297 static bool CollectBSwapParts(Value *V, int OverallLeftShift, uint32_t ByteMask, 1298 SmallVector<Value*, 8> &ByteValues) { 1299 if (Instruction *I = dyn_cast<Instruction>(V)) { 1300 // If this is an or instruction, it may be an inner node of the bswap. 1301 if (I->getOpcode() == Instruction::Or) { 1302 return CollectBSwapParts(I->getOperand(0), OverallLeftShift, ByteMask, 1303 ByteValues) || 1304 CollectBSwapParts(I->getOperand(1), OverallLeftShift, ByteMask, 1305 ByteValues); 1306 } 1307 1308 // If this is a logical shift by a constant multiple of 8, recurse with 1309 // OverallLeftShift and ByteMask adjusted. 1310 if (I->isLogicalShift() && isa<ConstantInt>(I->getOperand(1))) { 1311 unsigned ShAmt = 1312 cast<ConstantInt>(I->getOperand(1))->getLimitedValue(~0U); 1313 // Ensure the shift amount is defined and of a byte value. 1314 if ((ShAmt & 7) || (ShAmt > 8*ByteValues.size())) 1315 return true; 1316 1317 unsigned ByteShift = ShAmt >> 3; 1318 if (I->getOpcode() == Instruction::Shl) { 1319 // X << 2 -> collect(X, +2) 1320 OverallLeftShift += ByteShift; 1321 ByteMask >>= ByteShift; 1322 } else { 1323 // X >>u 2 -> collect(X, -2) 1324 OverallLeftShift -= ByteShift; 1325 ByteMask <<= ByteShift; 1326 ByteMask &= (~0U >> (32-ByteValues.size())); 1327 } 1328 1329 if (OverallLeftShift >= (int)ByteValues.size()) return true; 1330 if (OverallLeftShift <= -(int)ByteValues.size()) return true; 1331 1332 return CollectBSwapParts(I->getOperand(0), OverallLeftShift, ByteMask, 1333 ByteValues); 1334 } 1335 1336 // If this is a logical 'and' with a mask that clears bytes, clear the 1337 // corresponding bytes in ByteMask. 1338 if (I->getOpcode() == Instruction::And && 1339 isa<ConstantInt>(I->getOperand(1))) { 1340 // Scan every byte of the and mask, seeing if the byte is either 0 or 255. 1341 unsigned NumBytes = ByteValues.size(); 1342 APInt Byte(I->getType()->getPrimitiveSizeInBits(), 255); 1343 const APInt &AndMask = cast<ConstantInt>(I->getOperand(1))->getValue(); 1344 1345 for (unsigned i = 0; i != NumBytes; ++i, Byte <<= 8) { 1346 // If this byte is masked out by a later operation, we don't care what 1347 // the and mask is. 1348 if ((ByteMask & (1 << i)) == 0) 1349 continue; 1350 1351 // If the AndMask is all zeros for this byte, clear the bit. 1352 APInt MaskB = AndMask & Byte; 1353 if (MaskB == 0) { 1354 ByteMask &= ~(1U << i); 1355 continue; 1356 } 1357 1358 // If the AndMask is not all ones for this byte, it's not a bytezap. 1359 if (MaskB != Byte) 1360 return true; 1361 1362 // Otherwise, this byte is kept. 1363 } 1364 1365 return CollectBSwapParts(I->getOperand(0), OverallLeftShift, ByteMask, 1366 ByteValues); 1367 } 1368 } 1369 1370 // Okay, we got to something that isn't a shift, 'or' or 'and'. This must be 1371 // the input value to the bswap. Some observations: 1) if more than one byte 1372 // is demanded from this input, then it could not be successfully assembled 1373 // into a byteswap. At least one of the two bytes would not be aligned with 1374 // their ultimate destination. 1375 if (!isPowerOf2_32(ByteMask)) return true; 1376 unsigned InputByteNo = CountTrailingZeros_32(ByteMask); 1377 1378 // 2) The input and ultimate destinations must line up: if byte 3 of an i32 1379 // is demanded, it needs to go into byte 0 of the result. This means that the 1380 // byte needs to be shifted until it lands in the right byte bucket. The 1381 // shift amount depends on the position: if the byte is coming from the high 1382 // part of the value (e.g. byte 3) then it must be shifted right. If from the 1383 // low part, it must be shifted left. 1384 unsigned DestByteNo = InputByteNo + OverallLeftShift; 1385 if (InputByteNo < ByteValues.size()/2) { 1386 if (ByteValues.size()-1-DestByteNo != InputByteNo) 1387 return true; 1388 } else { 1389 if (ByteValues.size()-1-DestByteNo != InputByteNo) 1390 return true; 1391 } 1392 1393 // If the destination byte value is already defined, the values are or'd 1394 // together, which isn't a bswap (unless it's an or of the same bits). 1395 if (ByteValues[DestByteNo] && ByteValues[DestByteNo] != V) 1396 return true; 1397 ByteValues[DestByteNo] = V; 1398 return false; 1399 } 1400 1401 /// MatchBSwap - Given an OR instruction, check to see if this is a bswap idiom. 1402 /// If so, insert the new bswap intrinsic and return it. 1403 Instruction *InstCombiner::MatchBSwap(BinaryOperator &I) { 1404 IntegerType *ITy = dyn_cast<IntegerType>(I.getType()); 1405 if (!ITy || ITy->getBitWidth() % 16 || 1406 // ByteMask only allows up to 32-byte values. 1407 ITy->getBitWidth() > 32*8) 1408 return 0; // Can only bswap pairs of bytes. Can't do vectors. 1409 1410 /// ByteValues - For each byte of the result, we keep track of which value 1411 /// defines each byte. 1412 SmallVector<Value*, 8> ByteValues; 1413 ByteValues.resize(ITy->getBitWidth()/8); 1414 1415 // Try to find all the pieces corresponding to the bswap. 1416 uint32_t ByteMask = ~0U >> (32-ByteValues.size()); 1417 if (CollectBSwapParts(&I, 0, ByteMask, ByteValues)) 1418 return 0; 1419 1420 // Check to see if all of the bytes come from the same value. 1421 Value *V = ByteValues[0]; 1422 if (V == 0) return 0; // Didn't find a byte? Must be zero. 1423 1424 // Check to make sure that all of the bytes come from the same value. 1425 for (unsigned i = 1, e = ByteValues.size(); i != e; ++i) 1426 if (ByteValues[i] != V) 1427 return 0; 1428 Module *M = I.getParent()->getParent()->getParent(); 1429 Function *F = Intrinsic::getDeclaration(M, Intrinsic::bswap, ITy); 1430 return CallInst::Create(F, V); 1431 } 1432 1433 /// MatchSelectFromAndOr - We have an expression of the form (A&C)|(B&D). Check 1434 /// If A is (cond?-1:0) and either B or D is ~(cond?-1,0) or (cond?0,-1), then 1435 /// we can simplify this expression to "cond ? C : D or B". 1436 static Instruction *MatchSelectFromAndOr(Value *A, Value *B, 1437 Value *C, Value *D) { 1438 // If A is not a select of -1/0, this cannot match. 1439 Value *Cond = 0; 1440 if (!match(A, m_SExt(m_Value(Cond))) || 1441 !Cond->getType()->isIntegerTy(1)) 1442 return 0; 1443 1444 // ((cond?-1:0)&C) | (B&(cond?0:-1)) -> cond ? C : B. 1445 if (match(D, m_Not(m_SExt(m_Specific(Cond))))) 1446 return SelectInst::Create(Cond, C, B); 1447 if (match(D, m_SExt(m_Not(m_Specific(Cond))))) 1448 return SelectInst::Create(Cond, C, B); 1449 1450 // ((cond?-1:0)&C) | ((cond?0:-1)&D) -> cond ? C : D. 1451 if (match(B, m_Not(m_SExt(m_Specific(Cond))))) 1452 return SelectInst::Create(Cond, C, D); 1453 if (match(B, m_SExt(m_Not(m_Specific(Cond))))) 1454 return SelectInst::Create(Cond, C, D); 1455 return 0; 1456 } 1457 1458 /// FoldOrOfICmps - Fold (icmp)|(icmp) if possible. 1459 Value *InstCombiner::FoldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS) { 1460 ICmpInst::Predicate LHSCC = LHS->getPredicate(), RHSCC = RHS->getPredicate(); 1461 1462 // (icmp1 A, B) | (icmp2 A, B) --> (icmp3 A, B) 1463 if (PredicatesFoldable(LHSCC, RHSCC)) { 1464 if (LHS->getOperand(0) == RHS->getOperand(1) && 1465 LHS->getOperand(1) == RHS->getOperand(0)) 1466 LHS->swapOperands(); 1467 if (LHS->getOperand(0) == RHS->getOperand(0) && 1468 LHS->getOperand(1) == RHS->getOperand(1)) { 1469 Value *Op0 = LHS->getOperand(0), *Op1 = LHS->getOperand(1); 1470 unsigned Code = getICmpCode(LHS) | getICmpCode(RHS); 1471 bool isSigned = LHS->isSigned() || RHS->isSigned(); 1472 return getICmpValue(isSigned, Code, Op0, Op1, Builder); 1473 } 1474 } 1475 1476 // handle (roughly): 1477 // (icmp ne (A & B), C) | (icmp ne (A & D), E) 1478 if (Value *V = foldLogOpOfMaskedICmps(LHS, RHS, ICmpInst::ICMP_NE, Builder)) 1479 return V; 1480 1481 // This only handles icmp of constants: (icmp1 A, C1) | (icmp2 B, C2). 1482 Value *Val = LHS->getOperand(0), *Val2 = RHS->getOperand(0); 1483 ConstantInt *LHSCst = dyn_cast<ConstantInt>(LHS->getOperand(1)); 1484 ConstantInt *RHSCst = dyn_cast<ConstantInt>(RHS->getOperand(1)); 1485 if (LHSCst == 0 || RHSCst == 0) return 0; 1486 1487 if (LHSCst == RHSCst && LHSCC == RHSCC) { 1488 // (icmp ne A, 0) | (icmp ne B, 0) --> (icmp ne (A|B), 0) 1489 if (LHSCC == ICmpInst::ICMP_NE && LHSCst->isZero()) { 1490 Value *NewOr = Builder->CreateOr(Val, Val2); 1491 return Builder->CreateICmp(LHSCC, NewOr, LHSCst); 1492 } 1493 1494 // (icmp slt A, 0) | (icmp slt B, 0) --> (icmp slt (A|B), 0) 1495 if (LHSCC == ICmpInst::ICMP_SLT && LHSCst->isZero()) { 1496 Value *NewOr = Builder->CreateOr(Val, Val2); 1497 return Builder->CreateICmp(LHSCC, NewOr, LHSCst); 1498 } 1499 1500 // (icmp sgt A, -1) | (icmp sgt B, -1) --> (icmp sgt (A&B), -1) 1501 if (LHSCC == ICmpInst::ICMP_SGT && LHSCst->isAllOnesValue()) { 1502 Value *NewAnd = Builder->CreateAnd(Val, Val2); 1503 return Builder->CreateICmp(LHSCC, NewAnd, LHSCst); 1504 } 1505 } 1506 1507 // (icmp ult (X + CA), C1) | (icmp eq X, C2) -> (icmp ule (X + CA), C1) 1508 // iff C2 + CA == C1. 1509 if (LHSCC == ICmpInst::ICMP_ULT && RHSCC == ICmpInst::ICMP_EQ) { 1510 ConstantInt *AddCst; 1511 if (match(Val, m_Add(m_Specific(Val2), m_ConstantInt(AddCst)))) 1512 if (RHSCst->getValue() + AddCst->getValue() == LHSCst->getValue()) 1513 return Builder->CreateICmpULE(Val, LHSCst); 1514 } 1515 1516 // From here on, we only handle: 1517 // (icmp1 A, C1) | (icmp2 A, C2) --> something simpler. 1518 if (Val != Val2) return 0; 1519 1520 // ICMP_[US][GL]E X, CST is folded to ICMP_[US][GL]T elsewhere. 1521 if (LHSCC == ICmpInst::ICMP_UGE || LHSCC == ICmpInst::ICMP_ULE || 1522 RHSCC == ICmpInst::ICMP_UGE || RHSCC == ICmpInst::ICMP_ULE || 1523 LHSCC == ICmpInst::ICMP_SGE || LHSCC == ICmpInst::ICMP_SLE || 1524 RHSCC == ICmpInst::ICMP_SGE || RHSCC == ICmpInst::ICMP_SLE) 1525 return 0; 1526 1527 // We can't fold (ugt x, C) | (sgt x, C2). 1528 if (!PredicatesFoldable(LHSCC, RHSCC)) 1529 return 0; 1530 1531 // Ensure that the larger constant is on the RHS. 1532 bool ShouldSwap; 1533 if (CmpInst::isSigned(LHSCC) || 1534 (ICmpInst::isEquality(LHSCC) && 1535 CmpInst::isSigned(RHSCC))) 1536 ShouldSwap = LHSCst->getValue().sgt(RHSCst->getValue()); 1537 else 1538 ShouldSwap = LHSCst->getValue().ugt(RHSCst->getValue()); 1539 1540 if (ShouldSwap) { 1541 std::swap(LHS, RHS); 1542 std::swap(LHSCst, RHSCst); 1543 std::swap(LHSCC, RHSCC); 1544 } 1545 1546 // At this point, we know we have two icmp instructions 1547 // comparing a value against two constants and or'ing the result 1548 // together. Because of the above check, we know that we only have 1549 // ICMP_EQ, ICMP_NE, ICMP_LT, and ICMP_GT here. We also know (from the 1550 // icmp folding check above), that the two constants are not 1551 // equal. 1552 assert(LHSCst != RHSCst && "Compares not folded above?"); 1553 1554 switch (LHSCC) { 1555 default: llvm_unreachable("Unknown integer condition code!"); 1556 case ICmpInst::ICMP_EQ: 1557 switch (RHSCC) { 1558 default: llvm_unreachable("Unknown integer condition code!"); 1559 case ICmpInst::ICMP_EQ: 1560 if (LHSCst == SubOne(RHSCst)) { 1561 // (X == 13 | X == 14) -> X-13 <u 2 1562 Constant *AddCST = ConstantExpr::getNeg(LHSCst); 1563 Value *Add = Builder->CreateAdd(Val, AddCST, Val->getName()+".off"); 1564 AddCST = ConstantExpr::getSub(AddOne(RHSCst), LHSCst); 1565 return Builder->CreateICmpULT(Add, AddCST); 1566 } 1567 break; // (X == 13 | X == 15) -> no change 1568 case ICmpInst::ICMP_UGT: // (X == 13 | X u> 14) -> no change 1569 case ICmpInst::ICMP_SGT: // (X == 13 | X s> 14) -> no change 1570 break; 1571 case ICmpInst::ICMP_NE: // (X == 13 | X != 15) -> X != 15 1572 case ICmpInst::ICMP_ULT: // (X == 13 | X u< 15) -> X u< 15 1573 case ICmpInst::ICMP_SLT: // (X == 13 | X s< 15) -> X s< 15 1574 return RHS; 1575 } 1576 break; 1577 case ICmpInst::ICMP_NE: 1578 switch (RHSCC) { 1579 default: llvm_unreachable("Unknown integer condition code!"); 1580 case ICmpInst::ICMP_EQ: // (X != 13 | X == 15) -> X != 13 1581 case ICmpInst::ICMP_UGT: // (X != 13 | X u> 15) -> X != 13 1582 case ICmpInst::ICMP_SGT: // (X != 13 | X s> 15) -> X != 13 1583 return LHS; 1584 case ICmpInst::ICMP_NE: // (X != 13 | X != 15) -> true 1585 case ICmpInst::ICMP_ULT: // (X != 13 | X u< 15) -> true 1586 case ICmpInst::ICMP_SLT: // (X != 13 | X s< 15) -> true 1587 return ConstantInt::getTrue(LHS->getContext()); 1588 } 1589 break; 1590 case ICmpInst::ICMP_ULT: 1591 switch (RHSCC) { 1592 default: llvm_unreachable("Unknown integer condition code!"); 1593 case ICmpInst::ICMP_EQ: // (X u< 13 | X == 14) -> no change 1594 break; 1595 case ICmpInst::ICMP_UGT: // (X u< 13 | X u> 15) -> (X-13) u> 2 1596 // If RHSCst is [us]MAXINT, it is always false. Not handling 1597 // this can cause overflow. 1598 if (RHSCst->isMaxValue(false)) 1599 return LHS; 1600 return InsertRangeTest(Val, LHSCst, AddOne(RHSCst), false, false); 1601 case ICmpInst::ICMP_SGT: // (X u< 13 | X s> 15) -> no change 1602 break; 1603 case ICmpInst::ICMP_NE: // (X u< 13 | X != 15) -> X != 15 1604 case ICmpInst::ICMP_ULT: // (X u< 13 | X u< 15) -> X u< 15 1605 return RHS; 1606 case ICmpInst::ICMP_SLT: // (X u< 13 | X s< 15) -> no change 1607 break; 1608 } 1609 break; 1610 case ICmpInst::ICMP_SLT: 1611 switch (RHSCC) { 1612 default: llvm_unreachable("Unknown integer condition code!"); 1613 case ICmpInst::ICMP_EQ: // (X s< 13 | X == 14) -> no change 1614 break; 1615 case ICmpInst::ICMP_SGT: // (X s< 13 | X s> 15) -> (X-13) s> 2 1616 // If RHSCst is [us]MAXINT, it is always false. Not handling 1617 // this can cause overflow. 1618 if (RHSCst->isMaxValue(true)) 1619 return LHS; 1620 return InsertRangeTest(Val, LHSCst, AddOne(RHSCst), true, false); 1621 case ICmpInst::ICMP_UGT: // (X s< 13 | X u> 15) -> no change 1622 break; 1623 case ICmpInst::ICMP_NE: // (X s< 13 | X != 15) -> X != 15 1624 case ICmpInst::ICMP_SLT: // (X s< 13 | X s< 15) -> X s< 15 1625 return RHS; 1626 case ICmpInst::ICMP_ULT: // (X s< 13 | X u< 15) -> no change 1627 break; 1628 } 1629 break; 1630 case ICmpInst::ICMP_UGT: 1631 switch (RHSCC) { 1632 default: llvm_unreachable("Unknown integer condition code!"); 1633 case ICmpInst::ICMP_EQ: // (X u> 13 | X == 15) -> X u> 13 1634 case ICmpInst::ICMP_UGT: // (X u> 13 | X u> 15) -> X u> 13 1635 return LHS; 1636 case ICmpInst::ICMP_SGT: // (X u> 13 | X s> 15) -> no change 1637 break; 1638 case ICmpInst::ICMP_NE: // (X u> 13 | X != 15) -> true 1639 case ICmpInst::ICMP_ULT: // (X u> 13 | X u< 15) -> true 1640 return ConstantInt::getTrue(LHS->getContext()); 1641 case ICmpInst::ICMP_SLT: // (X u> 13 | X s< 15) -> no change 1642 break; 1643 } 1644 break; 1645 case ICmpInst::ICMP_SGT: 1646 switch (RHSCC) { 1647 default: llvm_unreachable("Unknown integer condition code!"); 1648 case ICmpInst::ICMP_EQ: // (X s> 13 | X == 15) -> X > 13 1649 case ICmpInst::ICMP_SGT: // (X s> 13 | X s> 15) -> X > 13 1650 return LHS; 1651 case ICmpInst::ICMP_UGT: // (X s> 13 | X u> 15) -> no change 1652 break; 1653 case ICmpInst::ICMP_NE: // (X s> 13 | X != 15) -> true 1654 case ICmpInst::ICMP_SLT: // (X s> 13 | X s< 15) -> true 1655 return ConstantInt::getTrue(LHS->getContext()); 1656 case ICmpInst::ICMP_ULT: // (X s> 13 | X u< 15) -> no change 1657 break; 1658 } 1659 break; 1660 } 1661 return 0; 1662 } 1663 1664 /// FoldOrOfFCmps - Optimize (fcmp)|(fcmp). NOTE: Unlike the rest of 1665 /// instcombine, this returns a Value which should already be inserted into the 1666 /// function. 1667 Value *InstCombiner::FoldOrOfFCmps(FCmpInst *LHS, FCmpInst *RHS) { 1668 if (LHS->getPredicate() == FCmpInst::FCMP_UNO && 1669 RHS->getPredicate() == FCmpInst::FCMP_UNO && 1670 LHS->getOperand(0)->getType() == RHS->getOperand(0)->getType()) { 1671 if (ConstantFP *LHSC = dyn_cast<ConstantFP>(LHS->getOperand(1))) 1672 if (ConstantFP *RHSC = dyn_cast<ConstantFP>(RHS->getOperand(1))) { 1673 // If either of the constants are nans, then the whole thing returns 1674 // true. 1675 if (LHSC->getValueAPF().isNaN() || RHSC->getValueAPF().isNaN()) 1676 return ConstantInt::getTrue(LHS->getContext()); 1677 1678 // Otherwise, no need to compare the two constants, compare the 1679 // rest. 1680 return Builder->CreateFCmpUNO(LHS->getOperand(0), RHS->getOperand(0)); 1681 } 1682 1683 // Handle vector zeros. This occurs because the canonical form of 1684 // "fcmp uno x,x" is "fcmp uno x, 0". 1685 if (isa<ConstantAggregateZero>(LHS->getOperand(1)) && 1686 isa<ConstantAggregateZero>(RHS->getOperand(1))) 1687 return Builder->CreateFCmpUNO(LHS->getOperand(0), RHS->getOperand(0)); 1688 1689 return 0; 1690 } 1691 1692 Value *Op0LHS = LHS->getOperand(0), *Op0RHS = LHS->getOperand(1); 1693 Value *Op1LHS = RHS->getOperand(0), *Op1RHS = RHS->getOperand(1); 1694 FCmpInst::Predicate Op0CC = LHS->getPredicate(), Op1CC = RHS->getPredicate(); 1695 1696 if (Op0LHS == Op1RHS && Op0RHS == Op1LHS) { 1697 // Swap RHS operands to match LHS. 1698 Op1CC = FCmpInst::getSwappedPredicate(Op1CC); 1699 std::swap(Op1LHS, Op1RHS); 1700 } 1701 if (Op0LHS == Op1LHS && Op0RHS == Op1RHS) { 1702 // Simplify (fcmp cc0 x, y) | (fcmp cc1 x, y). 1703 if (Op0CC == Op1CC) 1704 return Builder->CreateFCmp((FCmpInst::Predicate)Op0CC, Op0LHS, Op0RHS); 1705 if (Op0CC == FCmpInst::FCMP_TRUE || Op1CC == FCmpInst::FCMP_TRUE) 1706 return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 1); 1707 if (Op0CC == FCmpInst::FCMP_FALSE) 1708 return RHS; 1709 if (Op1CC == FCmpInst::FCMP_FALSE) 1710 return LHS; 1711 bool Op0Ordered; 1712 bool Op1Ordered; 1713 unsigned Op0Pred = getFCmpCode(Op0CC, Op0Ordered); 1714 unsigned Op1Pred = getFCmpCode(Op1CC, Op1Ordered); 1715 if (Op0Ordered == Op1Ordered) { 1716 // If both are ordered or unordered, return a new fcmp with 1717 // or'ed predicates. 1718 return getFCmpValue(Op0Ordered, Op0Pred|Op1Pred, Op0LHS, Op0RHS, Builder); 1719 } 1720 } 1721 return 0; 1722 } 1723 1724 /// FoldOrWithConstants - This helper function folds: 1725 /// 1726 /// ((A | B) & C1) | (B & C2) 1727 /// 1728 /// into: 1729 /// 1730 /// (A & C1) | B 1731 /// 1732 /// when the XOR of the two constants is "all ones" (-1). 1733 Instruction *InstCombiner::FoldOrWithConstants(BinaryOperator &I, Value *Op, 1734 Value *A, Value *B, Value *C) { 1735 ConstantInt *CI1 = dyn_cast<ConstantInt>(C); 1736 if (!CI1) return 0; 1737 1738 Value *V1 = 0; 1739 ConstantInt *CI2 = 0; 1740 if (!match(Op, m_And(m_Value(V1), m_ConstantInt(CI2)))) return 0; 1741 1742 APInt Xor = CI1->getValue() ^ CI2->getValue(); 1743 if (!Xor.isAllOnesValue()) return 0; 1744 1745 if (V1 == A || V1 == B) { 1746 Value *NewOp = Builder->CreateAnd((V1 == A) ? B : A, CI1); 1747 return BinaryOperator::CreateOr(NewOp, V1); 1748 } 1749 1750 return 0; 1751 } 1752 1753 Instruction *InstCombiner::visitOr(BinaryOperator &I) { 1754 bool Changed = SimplifyAssociativeOrCommutative(I); 1755 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 1756 1757 if (Value *V = SimplifyOrInst(Op0, Op1, TD)) 1758 return ReplaceInstUsesWith(I, V); 1759 1760 // (A&B)|(A&C) -> A&(B|C) etc 1761 if (Value *V = SimplifyUsingDistributiveLaws(I)) 1762 return ReplaceInstUsesWith(I, V); 1763 1764 // See if we can simplify any instructions used by the instruction whose sole 1765 // purpose is to compute bits we don't care about. 1766 if (SimplifyDemandedInstructionBits(I)) 1767 return &I; 1768 1769 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) { 1770 ConstantInt *C1 = 0; Value *X = 0; 1771 // (X & C1) | C2 --> (X | C2) & (C1|C2) 1772 // iff (C1 & C2) == 0. 1773 if (match(Op0, m_And(m_Value(X), m_ConstantInt(C1))) && 1774 (RHS->getValue() & C1->getValue()) != 0 && 1775 Op0->hasOneUse()) { 1776 Value *Or = Builder->CreateOr(X, RHS); 1777 Or->takeName(Op0); 1778 return BinaryOperator::CreateAnd(Or, 1779 ConstantInt::get(I.getContext(), 1780 RHS->getValue() | C1->getValue())); 1781 } 1782 1783 // (X ^ C1) | C2 --> (X | C2) ^ (C1&~C2) 1784 if (match(Op0, m_Xor(m_Value(X), m_ConstantInt(C1))) && 1785 Op0->hasOneUse()) { 1786 Value *Or = Builder->CreateOr(X, RHS); 1787 Or->takeName(Op0); 1788 return BinaryOperator::CreateXor(Or, 1789 ConstantInt::get(I.getContext(), 1790 C1->getValue() & ~RHS->getValue())); 1791 } 1792 1793 // Try to fold constant and into select arguments. 1794 if (SelectInst *SI = dyn_cast<SelectInst>(Op0)) 1795 if (Instruction *R = FoldOpIntoSelect(I, SI)) 1796 return R; 1797 1798 if (isa<PHINode>(Op0)) 1799 if (Instruction *NV = FoldOpIntoPhi(I)) 1800 return NV; 1801 } 1802 1803 Value *A = 0, *B = 0; 1804 ConstantInt *C1 = 0, *C2 = 0; 1805 1806 // (A | B) | C and A | (B | C) -> bswap if possible. 1807 // (A >> B) | (C << D) and (A << B) | (B >> C) -> bswap if possible. 1808 if (match(Op0, m_Or(m_Value(), m_Value())) || 1809 match(Op1, m_Or(m_Value(), m_Value())) || 1810 (match(Op0, m_LogicalShift(m_Value(), m_Value())) && 1811 match(Op1, m_LogicalShift(m_Value(), m_Value())))) { 1812 if (Instruction *BSwap = MatchBSwap(I)) 1813 return BSwap; 1814 } 1815 1816 // (X^C)|Y -> (X|Y)^C iff Y&C == 0 1817 if (Op0->hasOneUse() && 1818 match(Op0, m_Xor(m_Value(A), m_ConstantInt(C1))) && 1819 MaskedValueIsZero(Op1, C1->getValue())) { 1820 Value *NOr = Builder->CreateOr(A, Op1); 1821 NOr->takeName(Op0); 1822 return BinaryOperator::CreateXor(NOr, C1); 1823 } 1824 1825 // Y|(X^C) -> (X|Y)^C iff Y&C == 0 1826 if (Op1->hasOneUse() && 1827 match(Op1, m_Xor(m_Value(A), m_ConstantInt(C1))) && 1828 MaskedValueIsZero(Op0, C1->getValue())) { 1829 Value *NOr = Builder->CreateOr(A, Op0); 1830 NOr->takeName(Op0); 1831 return BinaryOperator::CreateXor(NOr, C1); 1832 } 1833 1834 // (A & C)|(B & D) 1835 Value *C = 0, *D = 0; 1836 if (match(Op0, m_And(m_Value(A), m_Value(C))) && 1837 match(Op1, m_And(m_Value(B), m_Value(D)))) { 1838 Value *V1 = 0, *V2 = 0; 1839 C1 = dyn_cast<ConstantInt>(C); 1840 C2 = dyn_cast<ConstantInt>(D); 1841 if (C1 && C2) { // (A & C1)|(B & C2) 1842 // If we have: ((V + N) & C1) | (V & C2) 1843 // .. and C2 = ~C1 and C2 is 0+1+ and (N & C2) == 0 1844 // replace with V+N. 1845 if (C1->getValue() == ~C2->getValue()) { 1846 if ((C2->getValue() & (C2->getValue()+1)) == 0 && // C2 == 0+1+ 1847 match(A, m_Add(m_Value(V1), m_Value(V2)))) { 1848 // Add commutes, try both ways. 1849 if (V1 == B && MaskedValueIsZero(V2, C2->getValue())) 1850 return ReplaceInstUsesWith(I, A); 1851 if (V2 == B && MaskedValueIsZero(V1, C2->getValue())) 1852 return ReplaceInstUsesWith(I, A); 1853 } 1854 // Or commutes, try both ways. 1855 if ((C1->getValue() & (C1->getValue()+1)) == 0 && 1856 match(B, m_Add(m_Value(V1), m_Value(V2)))) { 1857 // Add commutes, try both ways. 1858 if (V1 == A && MaskedValueIsZero(V2, C1->getValue())) 1859 return ReplaceInstUsesWith(I, B); 1860 if (V2 == A && MaskedValueIsZero(V1, C1->getValue())) 1861 return ReplaceInstUsesWith(I, B); 1862 } 1863 } 1864 1865 if ((C1->getValue() & C2->getValue()) == 0) { 1866 // ((V | N) & C1) | (V & C2) --> (V|N) & (C1|C2) 1867 // iff (C1&C2) == 0 and (N&~C1) == 0 1868 if (match(A, m_Or(m_Value(V1), m_Value(V2))) && 1869 ((V1 == B && MaskedValueIsZero(V2, ~C1->getValue())) || // (V|N) 1870 (V2 == B && MaskedValueIsZero(V1, ~C1->getValue())))) // (N|V) 1871 return BinaryOperator::CreateAnd(A, 1872 ConstantInt::get(A->getContext(), 1873 C1->getValue()|C2->getValue())); 1874 // Or commutes, try both ways. 1875 if (match(B, m_Or(m_Value(V1), m_Value(V2))) && 1876 ((V1 == A && MaskedValueIsZero(V2, ~C2->getValue())) || // (V|N) 1877 (V2 == A && MaskedValueIsZero(V1, ~C2->getValue())))) // (N|V) 1878 return BinaryOperator::CreateAnd(B, 1879 ConstantInt::get(B->getContext(), 1880 C1->getValue()|C2->getValue())); 1881 1882 // ((V|C3)&C1) | ((V|C4)&C2) --> (V|C3|C4)&(C1|C2) 1883 // iff (C1&C2) == 0 and (C3&~C1) == 0 and (C4&~C2) == 0. 1884 ConstantInt *C3 = 0, *C4 = 0; 1885 if (match(A, m_Or(m_Value(V1), m_ConstantInt(C3))) && 1886 (C3->getValue() & ~C1->getValue()) == 0 && 1887 match(B, m_Or(m_Specific(V1), m_ConstantInt(C4))) && 1888 (C4->getValue() & ~C2->getValue()) == 0) { 1889 V2 = Builder->CreateOr(V1, ConstantExpr::getOr(C3, C4), "bitfield"); 1890 return BinaryOperator::CreateAnd(V2, 1891 ConstantInt::get(B->getContext(), 1892 C1->getValue()|C2->getValue())); 1893 } 1894 } 1895 } 1896 1897 // (A & (C0?-1:0)) | (B & ~(C0?-1:0)) -> C0 ? A : B, and commuted variants. 1898 // Don't do this for vector select idioms, the code generator doesn't handle 1899 // them well yet. 1900 if (!I.getType()->isVectorTy()) { 1901 if (Instruction *Match = MatchSelectFromAndOr(A, B, C, D)) 1902 return Match; 1903 if (Instruction *Match = MatchSelectFromAndOr(B, A, D, C)) 1904 return Match; 1905 if (Instruction *Match = MatchSelectFromAndOr(C, B, A, D)) 1906 return Match; 1907 if (Instruction *Match = MatchSelectFromAndOr(D, A, B, C)) 1908 return Match; 1909 } 1910 1911 // ((A&~B)|(~A&B)) -> A^B 1912 if ((match(C, m_Not(m_Specific(D))) && 1913 match(B, m_Not(m_Specific(A))))) 1914 return BinaryOperator::CreateXor(A, D); 1915 // ((~B&A)|(~A&B)) -> A^B 1916 if ((match(A, m_Not(m_Specific(D))) && 1917 match(B, m_Not(m_Specific(C))))) 1918 return BinaryOperator::CreateXor(C, D); 1919 // ((A&~B)|(B&~A)) -> A^B 1920 if ((match(C, m_Not(m_Specific(B))) && 1921 match(D, m_Not(m_Specific(A))))) 1922 return BinaryOperator::CreateXor(A, B); 1923 // ((~B&A)|(B&~A)) -> A^B 1924 if ((match(A, m_Not(m_Specific(B))) && 1925 match(D, m_Not(m_Specific(C))))) 1926 return BinaryOperator::CreateXor(C, B); 1927 1928 // ((A|B)&1)|(B&-2) -> (A&1) | B 1929 if (match(A, m_Or(m_Value(V1), m_Specific(B))) || 1930 match(A, m_Or(m_Specific(B), m_Value(V1)))) { 1931 Instruction *Ret = FoldOrWithConstants(I, Op1, V1, B, C); 1932 if (Ret) return Ret; 1933 } 1934 // (B&-2)|((A|B)&1) -> (A&1) | B 1935 if (match(B, m_Or(m_Specific(A), m_Value(V1))) || 1936 match(B, m_Or(m_Value(V1), m_Specific(A)))) { 1937 Instruction *Ret = FoldOrWithConstants(I, Op0, A, V1, D); 1938 if (Ret) return Ret; 1939 } 1940 } 1941 1942 // (X >> Z) | (Y >> Z) -> (X|Y) >> Z for all shifts. 1943 if (BinaryOperator *SI1 = dyn_cast<BinaryOperator>(Op1)) { 1944 if (BinaryOperator *SI0 = dyn_cast<BinaryOperator>(Op0)) 1945 if (SI0->isShift() && SI0->getOpcode() == SI1->getOpcode() && 1946 SI0->getOperand(1) == SI1->getOperand(1) && 1947 (SI0->hasOneUse() || SI1->hasOneUse())) { 1948 Value *NewOp = Builder->CreateOr(SI0->getOperand(0), SI1->getOperand(0), 1949 SI0->getName()); 1950 return BinaryOperator::Create(SI1->getOpcode(), NewOp, 1951 SI1->getOperand(1)); 1952 } 1953 } 1954 1955 // (~A | ~B) == (~(A & B)) - De Morgan's Law 1956 if (Value *Op0NotVal = dyn_castNotVal(Op0)) 1957 if (Value *Op1NotVal = dyn_castNotVal(Op1)) 1958 if (Op0->hasOneUse() && Op1->hasOneUse()) { 1959 Value *And = Builder->CreateAnd(Op0NotVal, Op1NotVal, 1960 I.getName()+".demorgan"); 1961 return BinaryOperator::CreateNot(And); 1962 } 1963 1964 // Canonicalize xor to the RHS. 1965 if (match(Op0, m_Xor(m_Value(), m_Value()))) 1966 std::swap(Op0, Op1); 1967 1968 // A | ( A ^ B) -> A | B 1969 // A | (~A ^ B) -> A | ~B 1970 if (match(Op1, m_Xor(m_Value(A), m_Value(B)))) { 1971 if (Op0 == A || Op0 == B) 1972 return BinaryOperator::CreateOr(A, B); 1973 1974 if (Op1->hasOneUse() && match(A, m_Not(m_Specific(Op0)))) { 1975 Value *Not = Builder->CreateNot(B, B->getName()+".not"); 1976 return BinaryOperator::CreateOr(Not, Op0); 1977 } 1978 if (Op1->hasOneUse() && match(B, m_Not(m_Specific(Op0)))) { 1979 Value *Not = Builder->CreateNot(A, A->getName()+".not"); 1980 return BinaryOperator::CreateOr(Not, Op0); 1981 } 1982 } 1983 1984 // A | ~(A | B) -> A | ~B 1985 // A | ~(A ^ B) -> A | ~B 1986 if (match(Op1, m_Not(m_Value(A)))) 1987 if (BinaryOperator *B = dyn_cast<BinaryOperator>(A)) 1988 if ((Op0 == B->getOperand(0) || Op0 == B->getOperand(1)) && 1989 Op1->hasOneUse() && (B->getOpcode() == Instruction::Or || 1990 B->getOpcode() == Instruction::Xor)) { 1991 Value *NotOp = Op0 == B->getOperand(0) ? B->getOperand(1) : 1992 B->getOperand(0); 1993 Value *Not = Builder->CreateNot(NotOp, NotOp->getName()+".not"); 1994 return BinaryOperator::CreateOr(Not, Op0); 1995 } 1996 1997 if (ICmpInst *RHS = dyn_cast<ICmpInst>(I.getOperand(1))) 1998 if (ICmpInst *LHS = dyn_cast<ICmpInst>(I.getOperand(0))) 1999 if (Value *Res = FoldOrOfICmps(LHS, RHS)) 2000 return ReplaceInstUsesWith(I, Res); 2001 2002 // (fcmp uno x, c) | (fcmp uno y, c) -> (fcmp uno x, y) 2003 if (FCmpInst *LHS = dyn_cast<FCmpInst>(I.getOperand(0))) 2004 if (FCmpInst *RHS = dyn_cast<FCmpInst>(I.getOperand(1))) 2005 if (Value *Res = FoldOrOfFCmps(LHS, RHS)) 2006 return ReplaceInstUsesWith(I, Res); 2007 2008 // fold (or (cast A), (cast B)) -> (cast (or A, B)) 2009 if (CastInst *Op0C = dyn_cast<CastInst>(Op0)) { 2010 CastInst *Op1C = dyn_cast<CastInst>(Op1); 2011 if (Op1C && Op0C->getOpcode() == Op1C->getOpcode()) {// same cast kind ? 2012 Type *SrcTy = Op0C->getOperand(0)->getType(); 2013 if (SrcTy == Op1C->getOperand(0)->getType() && 2014 SrcTy->isIntOrIntVectorTy()) { 2015 Value *Op0COp = Op0C->getOperand(0), *Op1COp = Op1C->getOperand(0); 2016 2017 if ((!isa<ICmpInst>(Op0COp) || !isa<ICmpInst>(Op1COp)) && 2018 // Only do this if the casts both really cause code to be 2019 // generated. 2020 ShouldOptimizeCast(Op0C->getOpcode(), Op0COp, I.getType()) && 2021 ShouldOptimizeCast(Op1C->getOpcode(), Op1COp, I.getType())) { 2022 Value *NewOp = Builder->CreateOr(Op0COp, Op1COp, I.getName()); 2023 return CastInst::Create(Op0C->getOpcode(), NewOp, I.getType()); 2024 } 2025 2026 // If this is or(cast(icmp), cast(icmp)), try to fold this even if the 2027 // cast is otherwise not optimizable. This happens for vector sexts. 2028 if (ICmpInst *RHS = dyn_cast<ICmpInst>(Op1COp)) 2029 if (ICmpInst *LHS = dyn_cast<ICmpInst>(Op0COp)) 2030 if (Value *Res = FoldOrOfICmps(LHS, RHS)) 2031 return CastInst::Create(Op0C->getOpcode(), Res, I.getType()); 2032 2033 // If this is or(cast(fcmp), cast(fcmp)), try to fold this even if the 2034 // cast is otherwise not optimizable. This happens for vector sexts. 2035 if (FCmpInst *RHS = dyn_cast<FCmpInst>(Op1COp)) 2036 if (FCmpInst *LHS = dyn_cast<FCmpInst>(Op0COp)) 2037 if (Value *Res = FoldOrOfFCmps(LHS, RHS)) 2038 return CastInst::Create(Op0C->getOpcode(), Res, I.getType()); 2039 } 2040 } 2041 } 2042 2043 // or(sext(A), B) -> A ? -1 : B where A is an i1 2044 // or(A, sext(B)) -> B ? -1 : A where B is an i1 2045 if (match(Op0, m_SExt(m_Value(A))) && A->getType()->isIntegerTy(1)) 2046 return SelectInst::Create(A, ConstantInt::getSigned(I.getType(), -1), Op1); 2047 if (match(Op1, m_SExt(m_Value(A))) && A->getType()->isIntegerTy(1)) 2048 return SelectInst::Create(A, ConstantInt::getSigned(I.getType(), -1), Op0); 2049 2050 // Note: If we've gotten to the point of visiting the outer OR, then the 2051 // inner one couldn't be simplified. If it was a constant, then it won't 2052 // be simplified by a later pass either, so we try swapping the inner/outer 2053 // ORs in the hopes that we'll be able to simplify it this way. 2054 // (X|C) | V --> (X|V) | C 2055 if (Op0->hasOneUse() && !isa<ConstantInt>(Op1) && 2056 match(Op0, m_Or(m_Value(A), m_ConstantInt(C1)))) { 2057 Value *Inner = Builder->CreateOr(A, Op1); 2058 Inner->takeName(Op0); 2059 return BinaryOperator::CreateOr(Inner, C1); 2060 } 2061 2062 return Changed ? &I : 0; 2063 } 2064 2065 Instruction *InstCombiner::visitXor(BinaryOperator &I) { 2066 bool Changed = SimplifyAssociativeOrCommutative(I); 2067 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 2068 2069 if (Value *V = SimplifyXorInst(Op0, Op1, TD)) 2070 return ReplaceInstUsesWith(I, V); 2071 2072 // (A&B)^(A&C) -> A&(B^C) etc 2073 if (Value *V = SimplifyUsingDistributiveLaws(I)) 2074 return ReplaceInstUsesWith(I, V); 2075 2076 // See if we can simplify any instructions used by the instruction whose sole 2077 // purpose is to compute bits we don't care about. 2078 if (SimplifyDemandedInstructionBits(I)) 2079 return &I; 2080 2081 // Is this a ~ operation? 2082 if (Value *NotOp = dyn_castNotVal(&I)) { 2083 if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(NotOp)) { 2084 if (Op0I->getOpcode() == Instruction::And || 2085 Op0I->getOpcode() == Instruction::Or) { 2086 // ~(~X & Y) --> (X | ~Y) - De Morgan's Law 2087 // ~(~X | Y) === (X & ~Y) - De Morgan's Law 2088 if (dyn_castNotVal(Op0I->getOperand(1))) 2089 Op0I->swapOperands(); 2090 if (Value *Op0NotVal = dyn_castNotVal(Op0I->getOperand(0))) { 2091 Value *NotY = 2092 Builder->CreateNot(Op0I->getOperand(1), 2093 Op0I->getOperand(1)->getName()+".not"); 2094 if (Op0I->getOpcode() == Instruction::And) 2095 return BinaryOperator::CreateOr(Op0NotVal, NotY); 2096 return BinaryOperator::CreateAnd(Op0NotVal, NotY); 2097 } 2098 2099 // ~(X & Y) --> (~X | ~Y) - De Morgan's Law 2100 // ~(X | Y) === (~X & ~Y) - De Morgan's Law 2101 if (isFreeToInvert(Op0I->getOperand(0)) && 2102 isFreeToInvert(Op0I->getOperand(1))) { 2103 Value *NotX = 2104 Builder->CreateNot(Op0I->getOperand(0), "notlhs"); 2105 Value *NotY = 2106 Builder->CreateNot(Op0I->getOperand(1), "notrhs"); 2107 if (Op0I->getOpcode() == Instruction::And) 2108 return BinaryOperator::CreateOr(NotX, NotY); 2109 return BinaryOperator::CreateAnd(NotX, NotY); 2110 } 2111 2112 } else if (Op0I->getOpcode() == Instruction::AShr) { 2113 // ~(~X >>s Y) --> (X >>s Y) 2114 if (Value *Op0NotVal = dyn_castNotVal(Op0I->getOperand(0))) 2115 return BinaryOperator::CreateAShr(Op0NotVal, Op0I->getOperand(1)); 2116 } 2117 } 2118 } 2119 2120 2121 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) { 2122 if (RHS->isOne() && Op0->hasOneUse()) 2123 // xor (cmp A, B), true = not (cmp A, B) = !cmp A, B 2124 if (CmpInst *CI = dyn_cast<CmpInst>(Op0)) 2125 return CmpInst::Create(CI->getOpcode(), 2126 CI->getInversePredicate(), 2127 CI->getOperand(0), CI->getOperand(1)); 2128 2129 // fold (xor(zext(cmp)), 1) and (xor(sext(cmp)), -1) to ext(!cmp). 2130 if (CastInst *Op0C = dyn_cast<CastInst>(Op0)) { 2131 if (CmpInst *CI = dyn_cast<CmpInst>(Op0C->getOperand(0))) { 2132 if (CI->hasOneUse() && Op0C->hasOneUse()) { 2133 Instruction::CastOps Opcode = Op0C->getOpcode(); 2134 if ((Opcode == Instruction::ZExt || Opcode == Instruction::SExt) && 2135 (RHS == ConstantExpr::getCast(Opcode, 2136 ConstantInt::getTrue(I.getContext()), 2137 Op0C->getDestTy()))) { 2138 CI->setPredicate(CI->getInversePredicate()); 2139 return CastInst::Create(Opcode, CI, Op0C->getType()); 2140 } 2141 } 2142 } 2143 } 2144 2145 if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0)) { 2146 // ~(c-X) == X-c-1 == X+(-c-1) 2147 if (Op0I->getOpcode() == Instruction::Sub && RHS->isAllOnesValue()) 2148 if (Constant *Op0I0C = dyn_cast<Constant>(Op0I->getOperand(0))) { 2149 Constant *NegOp0I0C = ConstantExpr::getNeg(Op0I0C); 2150 Constant *ConstantRHS = ConstantExpr::getSub(NegOp0I0C, 2151 ConstantInt::get(I.getType(), 1)); 2152 return BinaryOperator::CreateAdd(Op0I->getOperand(1), ConstantRHS); 2153 } 2154 2155 if (ConstantInt *Op0CI = dyn_cast<ConstantInt>(Op0I->getOperand(1))) { 2156 if (Op0I->getOpcode() == Instruction::Add) { 2157 // ~(X-c) --> (-c-1)-X 2158 if (RHS->isAllOnesValue()) { 2159 Constant *NegOp0CI = ConstantExpr::getNeg(Op0CI); 2160 return BinaryOperator::CreateSub( 2161 ConstantExpr::getSub(NegOp0CI, 2162 ConstantInt::get(I.getType(), 1)), 2163 Op0I->getOperand(0)); 2164 } else if (RHS->getValue().isSignBit()) { 2165 // (X + C) ^ signbit -> (X + C + signbit) 2166 Constant *C = ConstantInt::get(I.getContext(), 2167 RHS->getValue() + Op0CI->getValue()); 2168 return BinaryOperator::CreateAdd(Op0I->getOperand(0), C); 2169 2170 } 2171 } else if (Op0I->getOpcode() == Instruction::Or) { 2172 // (X|C1)^C2 -> X^(C1|C2) iff X&~C1 == 0 2173 if (MaskedValueIsZero(Op0I->getOperand(0), Op0CI->getValue())) { 2174 Constant *NewRHS = ConstantExpr::getOr(Op0CI, RHS); 2175 // Anything in both C1 and C2 is known to be zero, remove it from 2176 // NewRHS. 2177 Constant *CommonBits = ConstantExpr::getAnd(Op0CI, RHS); 2178 NewRHS = ConstantExpr::getAnd(NewRHS, 2179 ConstantExpr::getNot(CommonBits)); 2180 Worklist.Add(Op0I); 2181 I.setOperand(0, Op0I->getOperand(0)); 2182 I.setOperand(1, NewRHS); 2183 return &I; 2184 } 2185 } 2186 } 2187 } 2188 2189 // Try to fold constant and into select arguments. 2190 if (SelectInst *SI = dyn_cast<SelectInst>(Op0)) 2191 if (Instruction *R = FoldOpIntoSelect(I, SI)) 2192 return R; 2193 if (isa<PHINode>(Op0)) 2194 if (Instruction *NV = FoldOpIntoPhi(I)) 2195 return NV; 2196 } 2197 2198 BinaryOperator *Op1I = dyn_cast<BinaryOperator>(Op1); 2199 if (Op1I) { 2200 Value *A, *B; 2201 if (match(Op1I, m_Or(m_Value(A), m_Value(B)))) { 2202 if (A == Op0) { // B^(B|A) == (A|B)^B 2203 Op1I->swapOperands(); 2204 I.swapOperands(); 2205 std::swap(Op0, Op1); 2206 } else if (B == Op0) { // B^(A|B) == (A|B)^B 2207 I.swapOperands(); // Simplified below. 2208 std::swap(Op0, Op1); 2209 } 2210 } else if (match(Op1I, m_And(m_Value(A), m_Value(B))) && 2211 Op1I->hasOneUse()){ 2212 if (A == Op0) { // A^(A&B) -> A^(B&A) 2213 Op1I->swapOperands(); 2214 std::swap(A, B); 2215 } 2216 if (B == Op0) { // A^(B&A) -> (B&A)^A 2217 I.swapOperands(); // Simplified below. 2218 std::swap(Op0, Op1); 2219 } 2220 } 2221 } 2222 2223 BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0); 2224 if (Op0I) { 2225 Value *A, *B; 2226 if (match(Op0I, m_Or(m_Value(A), m_Value(B))) && 2227 Op0I->hasOneUse()) { 2228 if (A == Op1) // (B|A)^B == (A|B)^B 2229 std::swap(A, B); 2230 if (B == Op1) // (A|B)^B == A & ~B 2231 return BinaryOperator::CreateAnd(A, Builder->CreateNot(Op1)); 2232 } else if (match(Op0I, m_And(m_Value(A), m_Value(B))) && 2233 Op0I->hasOneUse()){ 2234 if (A == Op1) // (A&B)^A -> (B&A)^A 2235 std::swap(A, B); 2236 if (B == Op1 && // (B&A)^A == ~B & A 2237 !isa<ConstantInt>(Op1)) { // Canonical form is (B&C)^C 2238 return BinaryOperator::CreateAnd(Builder->CreateNot(A), Op1); 2239 } 2240 } 2241 } 2242 2243 // (X >> Z) ^ (Y >> Z) -> (X^Y) >> Z for all shifts. 2244 if (Op0I && Op1I && Op0I->isShift() && 2245 Op0I->getOpcode() == Op1I->getOpcode() && 2246 Op0I->getOperand(1) == Op1I->getOperand(1) && 2247 (Op1I->hasOneUse() || Op1I->hasOneUse())) { 2248 Value *NewOp = 2249 Builder->CreateXor(Op0I->getOperand(0), Op1I->getOperand(0), 2250 Op0I->getName()); 2251 return BinaryOperator::Create(Op1I->getOpcode(), NewOp, 2252 Op1I->getOperand(1)); 2253 } 2254 2255 if (Op0I && Op1I) { 2256 Value *A, *B, *C, *D; 2257 // (A & B)^(A | B) -> A ^ B 2258 if (match(Op0I, m_And(m_Value(A), m_Value(B))) && 2259 match(Op1I, m_Or(m_Value(C), m_Value(D)))) { 2260 if ((A == C && B == D) || (A == D && B == C)) 2261 return BinaryOperator::CreateXor(A, B); 2262 } 2263 // (A | B)^(A & B) -> A ^ B 2264 if (match(Op0I, m_Or(m_Value(A), m_Value(B))) && 2265 match(Op1I, m_And(m_Value(C), m_Value(D)))) { 2266 if ((A == C && B == D) || (A == D && B == C)) 2267 return BinaryOperator::CreateXor(A, B); 2268 } 2269 } 2270 2271 // (icmp1 A, B) ^ (icmp2 A, B) --> (icmp3 A, B) 2272 if (ICmpInst *RHS = dyn_cast<ICmpInst>(I.getOperand(1))) 2273 if (ICmpInst *LHS = dyn_cast<ICmpInst>(I.getOperand(0))) 2274 if (PredicatesFoldable(LHS->getPredicate(), RHS->getPredicate())) { 2275 if (LHS->getOperand(0) == RHS->getOperand(1) && 2276 LHS->getOperand(1) == RHS->getOperand(0)) 2277 LHS->swapOperands(); 2278 if (LHS->getOperand(0) == RHS->getOperand(0) && 2279 LHS->getOperand(1) == RHS->getOperand(1)) { 2280 Value *Op0 = LHS->getOperand(0), *Op1 = LHS->getOperand(1); 2281 unsigned Code = getICmpCode(LHS) ^ getICmpCode(RHS); 2282 bool isSigned = LHS->isSigned() || RHS->isSigned(); 2283 return ReplaceInstUsesWith(I, 2284 getICmpValue(isSigned, Code, Op0, Op1, Builder)); 2285 } 2286 } 2287 2288 // fold (xor (cast A), (cast B)) -> (cast (xor A, B)) 2289 if (CastInst *Op0C = dyn_cast<CastInst>(Op0)) { 2290 if (CastInst *Op1C = dyn_cast<CastInst>(Op1)) 2291 if (Op0C->getOpcode() == Op1C->getOpcode()) { // same cast kind? 2292 Type *SrcTy = Op0C->getOperand(0)->getType(); 2293 if (SrcTy == Op1C->getOperand(0)->getType() && SrcTy->isIntegerTy() && 2294 // Only do this if the casts both really cause code to be generated. 2295 ShouldOptimizeCast(Op0C->getOpcode(), Op0C->getOperand(0), 2296 I.getType()) && 2297 ShouldOptimizeCast(Op1C->getOpcode(), Op1C->getOperand(0), 2298 I.getType())) { 2299 Value *NewOp = Builder->CreateXor(Op0C->getOperand(0), 2300 Op1C->getOperand(0), I.getName()); 2301 return CastInst::Create(Op0C->getOpcode(), NewOp, I.getType()); 2302 } 2303 } 2304 } 2305 2306 return Changed ? &I : 0; 2307 } 2308