1 //===- InstCombineAndOrXor.cpp --------------------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the visitAnd, visitOr, and visitXor functions. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "InstCombine.h" 15 #include "llvm/Analysis/InstructionSimplify.h" 16 #include "llvm/IR/Intrinsics.h" 17 #include "llvm/Support/ConstantRange.h" 18 #include "llvm/Support/PatternMatch.h" 19 #include "llvm/Transforms/Utils/CmpInstAnalysis.h" 20 using namespace llvm; 21 using namespace PatternMatch; 22 23 24 /// AddOne - Add one to a ConstantInt. 25 static Constant *AddOne(ConstantInt *C) { 26 return ConstantInt::get(C->getContext(), C->getValue() + 1); 27 } 28 /// SubOne - Subtract one from a ConstantInt. 29 static Constant *SubOne(ConstantInt *C) { 30 return ConstantInt::get(C->getContext(), C->getValue()-1); 31 } 32 33 /// isFreeToInvert - Return true if the specified value is free to invert (apply 34 /// ~ to). This happens in cases where the ~ can be eliminated. 35 static inline bool isFreeToInvert(Value *V) { 36 // ~(~(X)) -> X. 37 if (BinaryOperator::isNot(V)) 38 return true; 39 40 // Constants can be considered to be not'ed values. 41 if (isa<ConstantInt>(V)) 42 return true; 43 44 // Compares can be inverted if they have a single use. 45 if (CmpInst *CI = dyn_cast<CmpInst>(V)) 46 return CI->hasOneUse(); 47 48 return false; 49 } 50 51 static inline Value *dyn_castNotVal(Value *V) { 52 // If this is not(not(x)) don't return that this is a not: we want the two 53 // not's to be folded first. 54 if (BinaryOperator::isNot(V)) { 55 Value *Operand = BinaryOperator::getNotArgument(V); 56 if (!isFreeToInvert(Operand)) 57 return Operand; 58 } 59 60 // Constants can be considered to be not'ed values... 61 if (ConstantInt *C = dyn_cast<ConstantInt>(V)) 62 return ConstantInt::get(C->getType(), ~C->getValue()); 63 return 0; 64 } 65 66 /// getFCmpCode - Similar to getICmpCode but for FCmpInst. This encodes a fcmp 67 /// predicate into a three bit mask. It also returns whether it is an ordered 68 /// predicate by reference. 69 static unsigned getFCmpCode(FCmpInst::Predicate CC, bool &isOrdered) { 70 isOrdered = false; 71 switch (CC) { 72 case FCmpInst::FCMP_ORD: isOrdered = true; return 0; // 000 73 case FCmpInst::FCMP_UNO: return 0; // 000 74 case FCmpInst::FCMP_OGT: isOrdered = true; return 1; // 001 75 case FCmpInst::FCMP_UGT: return 1; // 001 76 case FCmpInst::FCMP_OEQ: isOrdered = true; return 2; // 010 77 case FCmpInst::FCMP_UEQ: return 2; // 010 78 case FCmpInst::FCMP_OGE: isOrdered = true; return 3; // 011 79 case FCmpInst::FCMP_UGE: return 3; // 011 80 case FCmpInst::FCMP_OLT: isOrdered = true; return 4; // 100 81 case FCmpInst::FCMP_ULT: return 4; // 100 82 case FCmpInst::FCMP_ONE: isOrdered = true; return 5; // 101 83 case FCmpInst::FCMP_UNE: return 5; // 101 84 case FCmpInst::FCMP_OLE: isOrdered = true; return 6; // 110 85 case FCmpInst::FCMP_ULE: return 6; // 110 86 // True -> 7 87 default: 88 // Not expecting FCMP_FALSE and FCMP_TRUE; 89 llvm_unreachable("Unexpected FCmp predicate!"); 90 } 91 } 92 93 /// getNewICmpValue - This is the complement of getICmpCode, which turns an 94 /// opcode and two operands into either a constant true or false, or a brand 95 /// new ICmp instruction. The sign is passed in to determine which kind 96 /// of predicate to use in the new icmp instruction. 97 static Value *getNewICmpValue(bool Sign, unsigned Code, Value *LHS, Value *RHS, 98 InstCombiner::BuilderTy *Builder) { 99 ICmpInst::Predicate NewPred; 100 if (Value *NewConstant = getICmpValue(Sign, Code, LHS, RHS, NewPred)) 101 return NewConstant; 102 return Builder->CreateICmp(NewPred, LHS, RHS); 103 } 104 105 /// getFCmpValue - This is the complement of getFCmpCode, which turns an 106 /// opcode and two operands into either a FCmp instruction. isordered is passed 107 /// in to determine which kind of predicate to use in the new fcmp instruction. 108 static Value *getFCmpValue(bool isordered, unsigned code, 109 Value *LHS, Value *RHS, 110 InstCombiner::BuilderTy *Builder) { 111 CmpInst::Predicate Pred; 112 switch (code) { 113 default: llvm_unreachable("Illegal FCmp code!"); 114 case 0: Pred = isordered ? FCmpInst::FCMP_ORD : FCmpInst::FCMP_UNO; break; 115 case 1: Pred = isordered ? FCmpInst::FCMP_OGT : FCmpInst::FCMP_UGT; break; 116 case 2: Pred = isordered ? FCmpInst::FCMP_OEQ : FCmpInst::FCMP_UEQ; break; 117 case 3: Pred = isordered ? FCmpInst::FCMP_OGE : FCmpInst::FCMP_UGE; break; 118 case 4: Pred = isordered ? FCmpInst::FCMP_OLT : FCmpInst::FCMP_ULT; break; 119 case 5: Pred = isordered ? FCmpInst::FCMP_ONE : FCmpInst::FCMP_UNE; break; 120 case 6: Pred = isordered ? FCmpInst::FCMP_OLE : FCmpInst::FCMP_ULE; break; 121 case 7: 122 if (!isordered) return ConstantInt::getTrue(LHS->getContext()); 123 Pred = FCmpInst::FCMP_ORD; break; 124 } 125 return Builder->CreateFCmp(Pred, LHS, RHS); 126 } 127 128 // OptAndOp - This handles expressions of the form ((val OP C1) & C2). Where 129 // the Op parameter is 'OP', OpRHS is 'C1', and AndRHS is 'C2'. Op is 130 // guaranteed to be a binary operator. 131 Instruction *InstCombiner::OptAndOp(Instruction *Op, 132 ConstantInt *OpRHS, 133 ConstantInt *AndRHS, 134 BinaryOperator &TheAnd) { 135 Value *X = Op->getOperand(0); 136 Constant *Together = 0; 137 if (!Op->isShift()) 138 Together = ConstantExpr::getAnd(AndRHS, OpRHS); 139 140 switch (Op->getOpcode()) { 141 case Instruction::Xor: 142 if (Op->hasOneUse()) { 143 // (X ^ C1) & C2 --> (X & C2) ^ (C1&C2) 144 Value *And = Builder->CreateAnd(X, AndRHS); 145 And->takeName(Op); 146 return BinaryOperator::CreateXor(And, Together); 147 } 148 break; 149 case Instruction::Or: 150 if (Op->hasOneUse()){ 151 if (Together != OpRHS) { 152 // (X | C1) & C2 --> (X | (C1&C2)) & C2 153 Value *Or = Builder->CreateOr(X, Together); 154 Or->takeName(Op); 155 return BinaryOperator::CreateAnd(Or, AndRHS); 156 } 157 158 ConstantInt *TogetherCI = dyn_cast<ConstantInt>(Together); 159 if (TogetherCI && !TogetherCI->isZero()){ 160 // (X | C1) & C2 --> (X & (C2^(C1&C2))) | C1 161 // NOTE: This reduces the number of bits set in the & mask, which 162 // can expose opportunities for store narrowing. 163 Together = ConstantExpr::getXor(AndRHS, Together); 164 Value *And = Builder->CreateAnd(X, Together); 165 And->takeName(Op); 166 return BinaryOperator::CreateOr(And, OpRHS); 167 } 168 } 169 170 break; 171 case Instruction::Add: 172 if (Op->hasOneUse()) { 173 // Adding a one to a single bit bit-field should be turned into an XOR 174 // of the bit. First thing to check is to see if this AND is with a 175 // single bit constant. 176 const APInt &AndRHSV = AndRHS->getValue(); 177 178 // If there is only one bit set. 179 if (AndRHSV.isPowerOf2()) { 180 // Ok, at this point, we know that we are masking the result of the 181 // ADD down to exactly one bit. If the constant we are adding has 182 // no bits set below this bit, then we can eliminate the ADD. 183 const APInt& AddRHS = OpRHS->getValue(); 184 185 // Check to see if any bits below the one bit set in AndRHSV are set. 186 if ((AddRHS & (AndRHSV-1)) == 0) { 187 // If not, the only thing that can effect the output of the AND is 188 // the bit specified by AndRHSV. If that bit is set, the effect of 189 // the XOR is to toggle the bit. If it is clear, then the ADD has 190 // no effect. 191 if ((AddRHS & AndRHSV) == 0) { // Bit is not set, noop 192 TheAnd.setOperand(0, X); 193 return &TheAnd; 194 } else { 195 // Pull the XOR out of the AND. 196 Value *NewAnd = Builder->CreateAnd(X, AndRHS); 197 NewAnd->takeName(Op); 198 return BinaryOperator::CreateXor(NewAnd, AndRHS); 199 } 200 } 201 } 202 } 203 break; 204 205 case Instruction::Shl: { 206 // We know that the AND will not produce any of the bits shifted in, so if 207 // the anded constant includes them, clear them now! 208 // 209 uint32_t BitWidth = AndRHS->getType()->getBitWidth(); 210 uint32_t OpRHSVal = OpRHS->getLimitedValue(BitWidth); 211 APInt ShlMask(APInt::getHighBitsSet(BitWidth, BitWidth-OpRHSVal)); 212 ConstantInt *CI = Builder->getInt(AndRHS->getValue() & ShlMask); 213 214 if (CI->getValue() == ShlMask) 215 // Masking out bits that the shift already masks. 216 return ReplaceInstUsesWith(TheAnd, Op); // No need for the and. 217 218 if (CI != AndRHS) { // Reducing bits set in and. 219 TheAnd.setOperand(1, CI); 220 return &TheAnd; 221 } 222 break; 223 } 224 case Instruction::LShr: { 225 // We know that the AND will not produce any of the bits shifted in, so if 226 // the anded constant includes them, clear them now! This only applies to 227 // unsigned shifts, because a signed shr may bring in set bits! 228 // 229 uint32_t BitWidth = AndRHS->getType()->getBitWidth(); 230 uint32_t OpRHSVal = OpRHS->getLimitedValue(BitWidth); 231 APInt ShrMask(APInt::getLowBitsSet(BitWidth, BitWidth - OpRHSVal)); 232 ConstantInt *CI = Builder->getInt(AndRHS->getValue() & ShrMask); 233 234 if (CI->getValue() == ShrMask) 235 // Masking out bits that the shift already masks. 236 return ReplaceInstUsesWith(TheAnd, Op); 237 238 if (CI != AndRHS) { 239 TheAnd.setOperand(1, CI); // Reduce bits set in and cst. 240 return &TheAnd; 241 } 242 break; 243 } 244 case Instruction::AShr: 245 // Signed shr. 246 // See if this is shifting in some sign extension, then masking it out 247 // with an and. 248 if (Op->hasOneUse()) { 249 uint32_t BitWidth = AndRHS->getType()->getBitWidth(); 250 uint32_t OpRHSVal = OpRHS->getLimitedValue(BitWidth); 251 APInt ShrMask(APInt::getLowBitsSet(BitWidth, BitWidth - OpRHSVal)); 252 Constant *C = Builder->getInt(AndRHS->getValue() & ShrMask); 253 if (C == AndRHS) { // Masking out bits shifted in. 254 // (Val ashr C1) & C2 -> (Val lshr C1) & C2 255 // Make the argument unsigned. 256 Value *ShVal = Op->getOperand(0); 257 ShVal = Builder->CreateLShr(ShVal, OpRHS, Op->getName()); 258 return BinaryOperator::CreateAnd(ShVal, AndRHS, TheAnd.getName()); 259 } 260 } 261 break; 262 } 263 return 0; 264 } 265 266 /// Emit a computation of: (V >= Lo && V < Hi) if Inside is true, otherwise 267 /// (V < Lo || V >= Hi). In practice, we emit the more efficient 268 /// (V-Lo) \<u Hi-Lo. This method expects that Lo <= Hi. isSigned indicates 269 /// whether to treat the V, Lo and HI as signed or not. IB is the location to 270 /// insert new instructions. 271 Value *InstCombiner::InsertRangeTest(Value *V, Constant *Lo, Constant *Hi, 272 bool isSigned, bool Inside) { 273 assert(cast<ConstantInt>(ConstantExpr::getICmp((isSigned ? 274 ICmpInst::ICMP_SLE:ICmpInst::ICMP_ULE), Lo, Hi))->getZExtValue() && 275 "Lo is not <= Hi in range emission code!"); 276 277 if (Inside) { 278 if (Lo == Hi) // Trivially false. 279 return Builder->getFalse(); 280 281 // V >= Min && V < Hi --> V < Hi 282 if (cast<ConstantInt>(Lo)->isMinValue(isSigned)) { 283 ICmpInst::Predicate pred = (isSigned ? 284 ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT); 285 return Builder->CreateICmp(pred, V, Hi); 286 } 287 288 // Emit V-Lo <u Hi-Lo 289 Constant *NegLo = ConstantExpr::getNeg(Lo); 290 Value *Add = Builder->CreateAdd(V, NegLo, V->getName()+".off"); 291 Constant *UpperBound = ConstantExpr::getAdd(NegLo, Hi); 292 return Builder->CreateICmpULT(Add, UpperBound); 293 } 294 295 if (Lo == Hi) // Trivially true. 296 return Builder->getTrue(); 297 298 // V < Min || V >= Hi -> V > Hi-1 299 Hi = SubOne(cast<ConstantInt>(Hi)); 300 if (cast<ConstantInt>(Lo)->isMinValue(isSigned)) { 301 ICmpInst::Predicate pred = (isSigned ? 302 ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT); 303 return Builder->CreateICmp(pred, V, Hi); 304 } 305 306 // Emit V-Lo >u Hi-1-Lo 307 // Note that Hi has already had one subtracted from it, above. 308 ConstantInt *NegLo = cast<ConstantInt>(ConstantExpr::getNeg(Lo)); 309 Value *Add = Builder->CreateAdd(V, NegLo, V->getName()+".off"); 310 Constant *LowerBound = ConstantExpr::getAdd(NegLo, Hi); 311 return Builder->CreateICmpUGT(Add, LowerBound); 312 } 313 314 // isRunOfOnes - Returns true iff Val consists of one contiguous run of 1s with 315 // any number of 0s on either side. The 1s are allowed to wrap from LSB to 316 // MSB, so 0x000FFF0, 0x0000FFFF, and 0xFF0000FF are all runs. 0x0F0F0000 is 317 // not, since all 1s are not contiguous. 318 static bool isRunOfOnes(ConstantInt *Val, uint32_t &MB, uint32_t &ME) { 319 const APInt& V = Val->getValue(); 320 uint32_t BitWidth = Val->getType()->getBitWidth(); 321 if (!APIntOps::isShiftedMask(BitWidth, V)) return false; 322 323 // look for the first zero bit after the run of ones 324 MB = BitWidth - ((V - 1) ^ V).countLeadingZeros(); 325 // look for the first non-zero bit 326 ME = V.getActiveBits(); 327 return true; 328 } 329 330 /// FoldLogicalPlusAnd - This is part of an expression (LHS +/- RHS) & Mask, 331 /// where isSub determines whether the operator is a sub. If we can fold one of 332 /// the following xforms: 333 /// 334 /// ((A & N) +/- B) & Mask -> (A +/- B) & Mask iff N&Mask == Mask 335 /// ((A | N) +/- B) & Mask -> (A +/- B) & Mask iff N&Mask == 0 336 /// ((A ^ N) +/- B) & Mask -> (A +/- B) & Mask iff N&Mask == 0 337 /// 338 /// return (A +/- B). 339 /// 340 Value *InstCombiner::FoldLogicalPlusAnd(Value *LHS, Value *RHS, 341 ConstantInt *Mask, bool isSub, 342 Instruction &I) { 343 Instruction *LHSI = dyn_cast<Instruction>(LHS); 344 if (!LHSI || LHSI->getNumOperands() != 2 || 345 !isa<ConstantInt>(LHSI->getOperand(1))) return 0; 346 347 ConstantInt *N = cast<ConstantInt>(LHSI->getOperand(1)); 348 349 switch (LHSI->getOpcode()) { 350 default: return 0; 351 case Instruction::And: 352 if (ConstantExpr::getAnd(N, Mask) == Mask) { 353 // If the AndRHS is a power of two minus one (0+1+), this is simple. 354 if ((Mask->getValue().countLeadingZeros() + 355 Mask->getValue().countPopulation()) == 356 Mask->getValue().getBitWidth()) 357 break; 358 359 // Otherwise, if Mask is 0+1+0+, and if B is known to have the low 0+ 360 // part, we don't need any explicit masks to take them out of A. If that 361 // is all N is, ignore it. 362 uint32_t MB = 0, ME = 0; 363 if (isRunOfOnes(Mask, MB, ME)) { // begin/end bit of run, inclusive 364 uint32_t BitWidth = cast<IntegerType>(RHS->getType())->getBitWidth(); 365 APInt Mask(APInt::getLowBitsSet(BitWidth, MB-1)); 366 if (MaskedValueIsZero(RHS, Mask)) 367 break; 368 } 369 } 370 return 0; 371 case Instruction::Or: 372 case Instruction::Xor: 373 // If the AndRHS is a power of two minus one (0+1+), and N&Mask == 0 374 if ((Mask->getValue().countLeadingZeros() + 375 Mask->getValue().countPopulation()) == Mask->getValue().getBitWidth() 376 && ConstantExpr::getAnd(N, Mask)->isNullValue()) 377 break; 378 return 0; 379 } 380 381 if (isSub) 382 return Builder->CreateSub(LHSI->getOperand(0), RHS, "fold"); 383 return Builder->CreateAdd(LHSI->getOperand(0), RHS, "fold"); 384 } 385 386 /// enum for classifying (icmp eq (A & B), C) and (icmp ne (A & B), C) 387 /// One of A and B is considered the mask, the other the value. This is 388 /// described as the "AMask" or "BMask" part of the enum. If the enum 389 /// contains only "Mask", then both A and B can be considered masks. 390 /// If A is the mask, then it was proven, that (A & C) == C. This 391 /// is trivial if C == A, or C == 0. If both A and C are constants, this 392 /// proof is also easy. 393 /// For the following explanations we assume that A is the mask. 394 /// The part "AllOnes" declares, that the comparison is true only 395 /// if (A & B) == A, or all bits of A are set in B. 396 /// Example: (icmp eq (A & 3), 3) -> FoldMskICmp_AMask_AllOnes 397 /// The part "AllZeroes" declares, that the comparison is true only 398 /// if (A & B) == 0, or all bits of A are cleared in B. 399 /// Example: (icmp eq (A & 3), 0) -> FoldMskICmp_Mask_AllZeroes 400 /// The part "Mixed" declares, that (A & B) == C and C might or might not 401 /// contain any number of one bits and zero bits. 402 /// Example: (icmp eq (A & 3), 1) -> FoldMskICmp_AMask_Mixed 403 /// The Part "Not" means, that in above descriptions "==" should be replaced 404 /// by "!=". 405 /// Example: (icmp ne (A & 3), 3) -> FoldMskICmp_AMask_NotAllOnes 406 /// If the mask A contains a single bit, then the following is equivalent: 407 /// (icmp eq (A & B), A) equals (icmp ne (A & B), 0) 408 /// (icmp ne (A & B), A) equals (icmp eq (A & B), 0) 409 enum MaskedICmpType { 410 FoldMskICmp_AMask_AllOnes = 1, 411 FoldMskICmp_AMask_NotAllOnes = 2, 412 FoldMskICmp_BMask_AllOnes = 4, 413 FoldMskICmp_BMask_NotAllOnes = 8, 414 FoldMskICmp_Mask_AllZeroes = 16, 415 FoldMskICmp_Mask_NotAllZeroes = 32, 416 FoldMskICmp_AMask_Mixed = 64, 417 FoldMskICmp_AMask_NotMixed = 128, 418 FoldMskICmp_BMask_Mixed = 256, 419 FoldMskICmp_BMask_NotMixed = 512 420 }; 421 422 /// return the set of pattern classes (from MaskedICmpType) 423 /// that (icmp SCC (A & B), C) satisfies 424 static unsigned getTypeOfMaskedICmp(Value* A, Value* B, Value* C, 425 ICmpInst::Predicate SCC) 426 { 427 ConstantInt *ACst = dyn_cast<ConstantInt>(A); 428 ConstantInt *BCst = dyn_cast<ConstantInt>(B); 429 ConstantInt *CCst = dyn_cast<ConstantInt>(C); 430 bool icmp_eq = (SCC == ICmpInst::ICMP_EQ); 431 bool icmp_abit = (ACst != 0 && !ACst->isZero() && 432 ACst->getValue().isPowerOf2()); 433 bool icmp_bbit = (BCst != 0 && !BCst->isZero() && 434 BCst->getValue().isPowerOf2()); 435 unsigned result = 0; 436 if (CCst != 0 && CCst->isZero()) { 437 // if C is zero, then both A and B qualify as mask 438 result |= (icmp_eq ? (FoldMskICmp_Mask_AllZeroes | 439 FoldMskICmp_Mask_AllZeroes | 440 FoldMskICmp_AMask_Mixed | 441 FoldMskICmp_BMask_Mixed) 442 : (FoldMskICmp_Mask_NotAllZeroes | 443 FoldMskICmp_Mask_NotAllZeroes | 444 FoldMskICmp_AMask_NotMixed | 445 FoldMskICmp_BMask_NotMixed)); 446 if (icmp_abit) 447 result |= (icmp_eq ? (FoldMskICmp_AMask_NotAllOnes | 448 FoldMskICmp_AMask_NotMixed) 449 : (FoldMskICmp_AMask_AllOnes | 450 FoldMskICmp_AMask_Mixed)); 451 if (icmp_bbit) 452 result |= (icmp_eq ? (FoldMskICmp_BMask_NotAllOnes | 453 FoldMskICmp_BMask_NotMixed) 454 : (FoldMskICmp_BMask_AllOnes | 455 FoldMskICmp_BMask_Mixed)); 456 return result; 457 } 458 if (A == C) { 459 result |= (icmp_eq ? (FoldMskICmp_AMask_AllOnes | 460 FoldMskICmp_AMask_Mixed) 461 : (FoldMskICmp_AMask_NotAllOnes | 462 FoldMskICmp_AMask_NotMixed)); 463 if (icmp_abit) 464 result |= (icmp_eq ? (FoldMskICmp_Mask_NotAllZeroes | 465 FoldMskICmp_AMask_NotMixed) 466 : (FoldMskICmp_Mask_AllZeroes | 467 FoldMskICmp_AMask_Mixed)); 468 } else if (ACst != 0 && CCst != 0 && 469 ConstantExpr::getAnd(ACst, CCst) == CCst) { 470 result |= (icmp_eq ? FoldMskICmp_AMask_Mixed 471 : FoldMskICmp_AMask_NotMixed); 472 } 473 if (B == C) { 474 result |= (icmp_eq ? (FoldMskICmp_BMask_AllOnes | 475 FoldMskICmp_BMask_Mixed) 476 : (FoldMskICmp_BMask_NotAllOnes | 477 FoldMskICmp_BMask_NotMixed)); 478 if (icmp_bbit) 479 result |= (icmp_eq ? (FoldMskICmp_Mask_NotAllZeroes | 480 FoldMskICmp_BMask_NotMixed) 481 : (FoldMskICmp_Mask_AllZeroes | 482 FoldMskICmp_BMask_Mixed)); 483 } else if (BCst != 0 && CCst != 0 && 484 ConstantExpr::getAnd(BCst, CCst) == CCst) { 485 result |= (icmp_eq ? FoldMskICmp_BMask_Mixed 486 : FoldMskICmp_BMask_NotMixed); 487 } 488 return result; 489 } 490 491 /// decomposeBitTestICmp - Decompose an icmp into the form ((X & Y) pred Z) 492 /// if possible. The returned predicate is either == or !=. Returns false if 493 /// decomposition fails. 494 static bool decomposeBitTestICmp(const ICmpInst *I, ICmpInst::Predicate &Pred, 495 Value *&X, Value *&Y, Value *&Z) { 496 // X < 0 is equivalent to (X & SignBit) != 0. 497 if (I->getPredicate() == ICmpInst::ICMP_SLT) 498 if (ConstantInt *C = dyn_cast<ConstantInt>(I->getOperand(1))) 499 if (C->isZero()) { 500 X = I->getOperand(0); 501 Y = ConstantInt::get(I->getContext(), 502 APInt::getSignBit(C->getBitWidth())); 503 Pred = ICmpInst::ICMP_NE; 504 Z = C; 505 return true; 506 } 507 508 // X > -1 is equivalent to (X & SignBit) == 0. 509 if (I->getPredicate() == ICmpInst::ICMP_SGT) 510 if (ConstantInt *C = dyn_cast<ConstantInt>(I->getOperand(1))) 511 if (C->isAllOnesValue()) { 512 X = I->getOperand(0); 513 Y = ConstantInt::get(I->getContext(), 514 APInt::getSignBit(C->getBitWidth())); 515 Pred = ICmpInst::ICMP_EQ; 516 Z = ConstantInt::getNullValue(C->getType()); 517 return true; 518 } 519 520 return false; 521 } 522 523 /// foldLogOpOfMaskedICmpsHelper: 524 /// handle (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E) 525 /// return the set of pattern classes (from MaskedICmpType) 526 /// that both LHS and RHS satisfy 527 static unsigned foldLogOpOfMaskedICmpsHelper(Value*& A, 528 Value*& B, Value*& C, 529 Value*& D, Value*& E, 530 ICmpInst *LHS, ICmpInst *RHS, 531 ICmpInst::Predicate &LHSCC, 532 ICmpInst::Predicate &RHSCC) { 533 if (LHS->getOperand(0)->getType() != RHS->getOperand(0)->getType()) return 0; 534 // vectors are not (yet?) supported 535 if (LHS->getOperand(0)->getType()->isVectorTy()) return 0; 536 537 // Here comes the tricky part: 538 // LHS might be of the form L11 & L12 == X, X == L21 & L22, 539 // and L11 & L12 == L21 & L22. The same goes for RHS. 540 // Now we must find those components L** and R**, that are equal, so 541 // that we can extract the parameters A, B, C, D, and E for the canonical 542 // above. 543 Value *L1 = LHS->getOperand(0); 544 Value *L2 = LHS->getOperand(1); 545 Value *L11,*L12,*L21,*L22; 546 // Check whether the icmp can be decomposed into a bit test. 547 if (decomposeBitTestICmp(LHS, LHSCC, L11, L12, L2)) { 548 L21 = L22 = L1 = 0; 549 } else { 550 // Look for ANDs in the LHS icmp. 551 if (match(L1, m_And(m_Value(L11), m_Value(L12)))) { 552 if (!match(L2, m_And(m_Value(L21), m_Value(L22)))) 553 L21 = L22 = 0; 554 } else { 555 if (!match(L2, m_And(m_Value(L11), m_Value(L12)))) 556 return 0; 557 std::swap(L1, L2); 558 L21 = L22 = 0; 559 } 560 } 561 562 // Bail if LHS was a icmp that can't be decomposed into an equality. 563 if (!ICmpInst::isEquality(LHSCC)) 564 return 0; 565 566 Value *R1 = RHS->getOperand(0); 567 Value *R2 = RHS->getOperand(1); 568 Value *R11,*R12; 569 bool ok = false; 570 if (decomposeBitTestICmp(RHS, RHSCC, R11, R12, R2)) { 571 if (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22) { 572 A = R11; D = R12; 573 } else if (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22) { 574 A = R12; D = R11; 575 } else { 576 return 0; 577 } 578 E = R2; R1 = 0; ok = true; 579 } else if (match(R1, m_And(m_Value(R11), m_Value(R12)))) { 580 if (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22) { 581 A = R11; D = R12; E = R2; ok = true; 582 } else if (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22) { 583 A = R12; D = R11; E = R2; ok = true; 584 } 585 } 586 587 // Bail if RHS was a icmp that can't be decomposed into an equality. 588 if (!ICmpInst::isEquality(RHSCC)) 589 return 0; 590 591 // Look for ANDs in on the right side of the RHS icmp. 592 if (!ok && match(R2, m_And(m_Value(R11), m_Value(R12)))) { 593 if (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22) { 594 A = R11; D = R12; E = R1; ok = true; 595 } else if (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22) { 596 A = R12; D = R11; E = R1; ok = true; 597 } else { 598 return 0; 599 } 600 } 601 if (!ok) 602 return 0; 603 604 if (L11 == A) { 605 B = L12; C = L2; 606 } else if (L12 == A) { 607 B = L11; C = L2; 608 } else if (L21 == A) { 609 B = L22; C = L1; 610 } else if (L22 == A) { 611 B = L21; C = L1; 612 } 613 614 unsigned left_type = getTypeOfMaskedICmp(A, B, C, LHSCC); 615 unsigned right_type = getTypeOfMaskedICmp(A, D, E, RHSCC); 616 return left_type & right_type; 617 } 618 /// foldLogOpOfMaskedICmps: 619 /// try to fold (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E) 620 /// into a single (icmp(A & X) ==/!= Y) 621 static Value* foldLogOpOfMaskedICmps(ICmpInst *LHS, ICmpInst *RHS, 622 ICmpInst::Predicate NEWCC, 623 llvm::InstCombiner::BuilderTy* Builder) { 624 Value *A = 0, *B = 0, *C = 0, *D = 0, *E = 0; 625 ICmpInst::Predicate LHSCC = LHS->getPredicate(), RHSCC = RHS->getPredicate(); 626 unsigned mask = foldLogOpOfMaskedICmpsHelper(A, B, C, D, E, LHS, RHS, 627 LHSCC, RHSCC); 628 if (mask == 0) return 0; 629 assert(ICmpInst::isEquality(LHSCC) && ICmpInst::isEquality(RHSCC) && 630 "foldLogOpOfMaskedICmpsHelper must return an equality predicate."); 631 632 if (NEWCC == ICmpInst::ICMP_NE) 633 mask >>= 1; // treat "Not"-states as normal states 634 635 if (mask & FoldMskICmp_Mask_AllZeroes) { 636 // (icmp eq (A & B), 0) & (icmp eq (A & D), 0) 637 // -> (icmp eq (A & (B|D)), 0) 638 Value* newOr = Builder->CreateOr(B, D); 639 Value* newAnd = Builder->CreateAnd(A, newOr); 640 // we can't use C as zero, because we might actually handle 641 // (icmp ne (A & B), B) & (icmp ne (A & D), D) 642 // with B and D, having a single bit set 643 Value* zero = Constant::getNullValue(A->getType()); 644 return Builder->CreateICmp(NEWCC, newAnd, zero); 645 } 646 if (mask & FoldMskICmp_BMask_AllOnes) { 647 // (icmp eq (A & B), B) & (icmp eq (A & D), D) 648 // -> (icmp eq (A & (B|D)), (B|D)) 649 Value* newOr = Builder->CreateOr(B, D); 650 Value* newAnd = Builder->CreateAnd(A, newOr); 651 return Builder->CreateICmp(NEWCC, newAnd, newOr); 652 } 653 if (mask & FoldMskICmp_AMask_AllOnes) { 654 // (icmp eq (A & B), A) & (icmp eq (A & D), A) 655 // -> (icmp eq (A & (B&D)), A) 656 Value* newAnd1 = Builder->CreateAnd(B, D); 657 Value* newAnd = Builder->CreateAnd(A, newAnd1); 658 return Builder->CreateICmp(NEWCC, newAnd, A); 659 } 660 if (mask & FoldMskICmp_BMask_Mixed) { 661 // (icmp eq (A & B), C) & (icmp eq (A & D), E) 662 // We already know that B & C == C && D & E == E. 663 // If we can prove that (B & D) & (C ^ E) == 0, that is, the bits of 664 // C and E, which are shared by both the mask B and the mask D, don't 665 // contradict, then we can transform to 666 // -> (icmp eq (A & (B|D)), (C|E)) 667 // Currently, we only handle the case of B, C, D, and E being constant. 668 ConstantInt *BCst = dyn_cast<ConstantInt>(B); 669 if (BCst == 0) return 0; 670 ConstantInt *DCst = dyn_cast<ConstantInt>(D); 671 if (DCst == 0) return 0; 672 // we can't simply use C and E, because we might actually handle 673 // (icmp ne (A & B), B) & (icmp eq (A & D), D) 674 // with B and D, having a single bit set 675 676 ConstantInt *CCst = dyn_cast<ConstantInt>(C); 677 if (CCst == 0) return 0; 678 if (LHSCC != NEWCC) 679 CCst = dyn_cast<ConstantInt>( ConstantExpr::getXor(BCst, CCst) ); 680 ConstantInt *ECst = dyn_cast<ConstantInt>(E); 681 if (ECst == 0) return 0; 682 if (RHSCC != NEWCC) 683 ECst = dyn_cast<ConstantInt>( ConstantExpr::getXor(DCst, ECst) ); 684 ConstantInt* MCst = dyn_cast<ConstantInt>( 685 ConstantExpr::getAnd(ConstantExpr::getAnd(BCst, DCst), 686 ConstantExpr::getXor(CCst, ECst)) ); 687 // if there is a conflict we should actually return a false for the 688 // whole construct 689 if (!MCst->isZero()) 690 return 0; 691 Value *newOr1 = Builder->CreateOr(B, D); 692 Value *newOr2 = ConstantExpr::getOr(CCst, ECst); 693 Value *newAnd = Builder->CreateAnd(A, newOr1); 694 return Builder->CreateICmp(NEWCC, newAnd, newOr2); 695 } 696 return 0; 697 } 698 699 /// FoldAndOfICmps - Fold (icmp)&(icmp) if possible. 700 Value *InstCombiner::FoldAndOfICmps(ICmpInst *LHS, ICmpInst *RHS) { 701 ICmpInst::Predicate LHSCC = LHS->getPredicate(), RHSCC = RHS->getPredicate(); 702 703 // (icmp1 A, B) & (icmp2 A, B) --> (icmp3 A, B) 704 if (PredicatesFoldable(LHSCC, RHSCC)) { 705 if (LHS->getOperand(0) == RHS->getOperand(1) && 706 LHS->getOperand(1) == RHS->getOperand(0)) 707 LHS->swapOperands(); 708 if (LHS->getOperand(0) == RHS->getOperand(0) && 709 LHS->getOperand(1) == RHS->getOperand(1)) { 710 Value *Op0 = LHS->getOperand(0), *Op1 = LHS->getOperand(1); 711 unsigned Code = getICmpCode(LHS) & getICmpCode(RHS); 712 bool isSigned = LHS->isSigned() || RHS->isSigned(); 713 return getNewICmpValue(isSigned, Code, Op0, Op1, Builder); 714 } 715 } 716 717 // handle (roughly): (icmp eq (A & B), C) & (icmp eq (A & D), E) 718 if (Value *V = foldLogOpOfMaskedICmps(LHS, RHS, ICmpInst::ICMP_EQ, Builder)) 719 return V; 720 721 // This only handles icmp of constants: (icmp1 A, C1) & (icmp2 B, C2). 722 Value *Val = LHS->getOperand(0), *Val2 = RHS->getOperand(0); 723 ConstantInt *LHSCst = dyn_cast<ConstantInt>(LHS->getOperand(1)); 724 ConstantInt *RHSCst = dyn_cast<ConstantInt>(RHS->getOperand(1)); 725 if (LHSCst == 0 || RHSCst == 0) return 0; 726 727 if (LHSCst == RHSCst && LHSCC == RHSCC) { 728 // (icmp ult A, C) & (icmp ult B, C) --> (icmp ult (A|B), C) 729 // where C is a power of 2 730 if (LHSCC == ICmpInst::ICMP_ULT && 731 LHSCst->getValue().isPowerOf2()) { 732 Value *NewOr = Builder->CreateOr(Val, Val2); 733 return Builder->CreateICmp(LHSCC, NewOr, LHSCst); 734 } 735 736 // (icmp eq A, 0) & (icmp eq B, 0) --> (icmp eq (A|B), 0) 737 if (LHSCC == ICmpInst::ICMP_EQ && LHSCst->isZero()) { 738 Value *NewOr = Builder->CreateOr(Val, Val2); 739 return Builder->CreateICmp(LHSCC, NewOr, LHSCst); 740 } 741 } 742 743 // (trunc x) == C1 & (and x, CA) == C2 -> (and x, CA|CMAX) == C1|C2 744 // where CMAX is the all ones value for the truncated type, 745 // iff the lower bits of C2 and CA are zero. 746 if (LHSCC == ICmpInst::ICMP_EQ && LHSCC == RHSCC && 747 LHS->hasOneUse() && RHS->hasOneUse()) { 748 Value *V; 749 ConstantInt *AndCst, *SmallCst = 0, *BigCst = 0; 750 751 // (trunc x) == C1 & (and x, CA) == C2 752 // (and x, CA) == C2 & (trunc x) == C1 753 if (match(Val2, m_Trunc(m_Value(V))) && 754 match(Val, m_And(m_Specific(V), m_ConstantInt(AndCst)))) { 755 SmallCst = RHSCst; 756 BigCst = LHSCst; 757 } else if (match(Val, m_Trunc(m_Value(V))) && 758 match(Val2, m_And(m_Specific(V), m_ConstantInt(AndCst)))) { 759 SmallCst = LHSCst; 760 BigCst = RHSCst; 761 } 762 763 if (SmallCst && BigCst) { 764 unsigned BigBitSize = BigCst->getType()->getBitWidth(); 765 unsigned SmallBitSize = SmallCst->getType()->getBitWidth(); 766 767 // Check that the low bits are zero. 768 APInt Low = APInt::getLowBitsSet(BigBitSize, SmallBitSize); 769 if ((Low & AndCst->getValue()) == 0 && (Low & BigCst->getValue()) == 0) { 770 Value *NewAnd = Builder->CreateAnd(V, Low | AndCst->getValue()); 771 APInt N = SmallCst->getValue().zext(BigBitSize) | BigCst->getValue(); 772 Value *NewVal = ConstantInt::get(AndCst->getType()->getContext(), N); 773 return Builder->CreateICmp(LHSCC, NewAnd, NewVal); 774 } 775 } 776 } 777 778 // From here on, we only handle: 779 // (icmp1 A, C1) & (icmp2 A, C2) --> something simpler. 780 if (Val != Val2) return 0; 781 782 // ICMP_[US][GL]E X, CST is folded to ICMP_[US][GL]T elsewhere. 783 if (LHSCC == ICmpInst::ICMP_UGE || LHSCC == ICmpInst::ICMP_ULE || 784 RHSCC == ICmpInst::ICMP_UGE || RHSCC == ICmpInst::ICMP_ULE || 785 LHSCC == ICmpInst::ICMP_SGE || LHSCC == ICmpInst::ICMP_SLE || 786 RHSCC == ICmpInst::ICMP_SGE || RHSCC == ICmpInst::ICMP_SLE) 787 return 0; 788 789 // Make a constant range that's the intersection of the two icmp ranges. 790 // If the intersection is empty, we know that the result is false. 791 ConstantRange LHSRange = 792 ConstantRange::makeICmpRegion(LHSCC, LHSCst->getValue()); 793 ConstantRange RHSRange = 794 ConstantRange::makeICmpRegion(RHSCC, RHSCst->getValue()); 795 796 if (LHSRange.intersectWith(RHSRange).isEmptySet()) 797 return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 0); 798 799 // We can't fold (ugt x, C) & (sgt x, C2). 800 if (!PredicatesFoldable(LHSCC, RHSCC)) 801 return 0; 802 803 // Ensure that the larger constant is on the RHS. 804 bool ShouldSwap; 805 if (CmpInst::isSigned(LHSCC) || 806 (ICmpInst::isEquality(LHSCC) && 807 CmpInst::isSigned(RHSCC))) 808 ShouldSwap = LHSCst->getValue().sgt(RHSCst->getValue()); 809 else 810 ShouldSwap = LHSCst->getValue().ugt(RHSCst->getValue()); 811 812 if (ShouldSwap) { 813 std::swap(LHS, RHS); 814 std::swap(LHSCst, RHSCst); 815 std::swap(LHSCC, RHSCC); 816 } 817 818 // At this point, we know we have two icmp instructions 819 // comparing a value against two constants and and'ing the result 820 // together. Because of the above check, we know that we only have 821 // icmp eq, icmp ne, icmp [su]lt, and icmp [SU]gt here. We also know 822 // (from the icmp folding check above), that the two constants 823 // are not equal and that the larger constant is on the RHS 824 assert(LHSCst != RHSCst && "Compares not folded above?"); 825 826 switch (LHSCC) { 827 default: llvm_unreachable("Unknown integer condition code!"); 828 case ICmpInst::ICMP_EQ: 829 switch (RHSCC) { 830 default: llvm_unreachable("Unknown integer condition code!"); 831 case ICmpInst::ICMP_NE: // (X == 13 & X != 15) -> X == 13 832 case ICmpInst::ICMP_ULT: // (X == 13 & X < 15) -> X == 13 833 case ICmpInst::ICMP_SLT: // (X == 13 & X < 15) -> X == 13 834 return LHS; 835 } 836 case ICmpInst::ICMP_NE: 837 switch (RHSCC) { 838 default: llvm_unreachable("Unknown integer condition code!"); 839 case ICmpInst::ICMP_ULT: 840 if (LHSCst == SubOne(RHSCst)) // (X != 13 & X u< 14) -> X < 13 841 return Builder->CreateICmpULT(Val, LHSCst); 842 break; // (X != 13 & X u< 15) -> no change 843 case ICmpInst::ICMP_SLT: 844 if (LHSCst == SubOne(RHSCst)) // (X != 13 & X s< 14) -> X < 13 845 return Builder->CreateICmpSLT(Val, LHSCst); 846 break; // (X != 13 & X s< 15) -> no change 847 case ICmpInst::ICMP_EQ: // (X != 13 & X == 15) -> X == 15 848 case ICmpInst::ICMP_UGT: // (X != 13 & X u> 15) -> X u> 15 849 case ICmpInst::ICMP_SGT: // (X != 13 & X s> 15) -> X s> 15 850 return RHS; 851 case ICmpInst::ICMP_NE: 852 if (LHSCst == SubOne(RHSCst)){// (X != 13 & X != 14) -> X-13 >u 1 853 Constant *AddCST = ConstantExpr::getNeg(LHSCst); 854 Value *Add = Builder->CreateAdd(Val, AddCST, Val->getName()+".off"); 855 return Builder->CreateICmpUGT(Add, ConstantInt::get(Add->getType(), 1)); 856 } 857 break; // (X != 13 & X != 15) -> no change 858 } 859 break; 860 case ICmpInst::ICMP_ULT: 861 switch (RHSCC) { 862 default: llvm_unreachable("Unknown integer condition code!"); 863 case ICmpInst::ICMP_EQ: // (X u< 13 & X == 15) -> false 864 case ICmpInst::ICMP_UGT: // (X u< 13 & X u> 15) -> false 865 return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 0); 866 case ICmpInst::ICMP_SGT: // (X u< 13 & X s> 15) -> no change 867 break; 868 case ICmpInst::ICMP_NE: // (X u< 13 & X != 15) -> X u< 13 869 case ICmpInst::ICMP_ULT: // (X u< 13 & X u< 15) -> X u< 13 870 return LHS; 871 case ICmpInst::ICMP_SLT: // (X u< 13 & X s< 15) -> no change 872 break; 873 } 874 break; 875 case ICmpInst::ICMP_SLT: 876 switch (RHSCC) { 877 default: llvm_unreachable("Unknown integer condition code!"); 878 case ICmpInst::ICMP_UGT: // (X s< 13 & X u> 15) -> no change 879 break; 880 case ICmpInst::ICMP_NE: // (X s< 13 & X != 15) -> X < 13 881 case ICmpInst::ICMP_SLT: // (X s< 13 & X s< 15) -> X < 13 882 return LHS; 883 case ICmpInst::ICMP_ULT: // (X s< 13 & X u< 15) -> no change 884 break; 885 } 886 break; 887 case ICmpInst::ICMP_UGT: 888 switch (RHSCC) { 889 default: llvm_unreachable("Unknown integer condition code!"); 890 case ICmpInst::ICMP_EQ: // (X u> 13 & X == 15) -> X == 15 891 case ICmpInst::ICMP_UGT: // (X u> 13 & X u> 15) -> X u> 15 892 return RHS; 893 case ICmpInst::ICMP_SGT: // (X u> 13 & X s> 15) -> no change 894 break; 895 case ICmpInst::ICMP_NE: 896 if (RHSCst == AddOne(LHSCst)) // (X u> 13 & X != 14) -> X u> 14 897 return Builder->CreateICmp(LHSCC, Val, RHSCst); 898 break; // (X u> 13 & X != 15) -> no change 899 case ICmpInst::ICMP_ULT: // (X u> 13 & X u< 15) -> (X-14) <u 1 900 return InsertRangeTest(Val, AddOne(LHSCst), RHSCst, false, true); 901 case ICmpInst::ICMP_SLT: // (X u> 13 & X s< 15) -> no change 902 break; 903 } 904 break; 905 case ICmpInst::ICMP_SGT: 906 switch (RHSCC) { 907 default: llvm_unreachable("Unknown integer condition code!"); 908 case ICmpInst::ICMP_EQ: // (X s> 13 & X == 15) -> X == 15 909 case ICmpInst::ICMP_SGT: // (X s> 13 & X s> 15) -> X s> 15 910 return RHS; 911 case ICmpInst::ICMP_UGT: // (X s> 13 & X u> 15) -> no change 912 break; 913 case ICmpInst::ICMP_NE: 914 if (RHSCst == AddOne(LHSCst)) // (X s> 13 & X != 14) -> X s> 14 915 return Builder->CreateICmp(LHSCC, Val, RHSCst); 916 break; // (X s> 13 & X != 15) -> no change 917 case ICmpInst::ICMP_SLT: // (X s> 13 & X s< 15) -> (X-14) s< 1 918 return InsertRangeTest(Val, AddOne(LHSCst), RHSCst, true, true); 919 case ICmpInst::ICMP_ULT: // (X s> 13 & X u< 15) -> no change 920 break; 921 } 922 break; 923 } 924 925 return 0; 926 } 927 928 /// FoldAndOfFCmps - Optimize (fcmp)&(fcmp). NOTE: Unlike the rest of 929 /// instcombine, this returns a Value which should already be inserted into the 930 /// function. 931 Value *InstCombiner::FoldAndOfFCmps(FCmpInst *LHS, FCmpInst *RHS) { 932 if (LHS->getPredicate() == FCmpInst::FCMP_ORD && 933 RHS->getPredicate() == FCmpInst::FCMP_ORD) { 934 if (LHS->getOperand(0)->getType() != RHS->getOperand(0)->getType()) 935 return 0; 936 937 // (fcmp ord x, c) & (fcmp ord y, c) -> (fcmp ord x, y) 938 if (ConstantFP *LHSC = dyn_cast<ConstantFP>(LHS->getOperand(1))) 939 if (ConstantFP *RHSC = dyn_cast<ConstantFP>(RHS->getOperand(1))) { 940 // If either of the constants are nans, then the whole thing returns 941 // false. 942 if (LHSC->getValueAPF().isNaN() || RHSC->getValueAPF().isNaN()) 943 return Builder->getFalse(); 944 return Builder->CreateFCmpORD(LHS->getOperand(0), RHS->getOperand(0)); 945 } 946 947 // Handle vector zeros. This occurs because the canonical form of 948 // "fcmp ord x,x" is "fcmp ord x, 0". 949 if (isa<ConstantAggregateZero>(LHS->getOperand(1)) && 950 isa<ConstantAggregateZero>(RHS->getOperand(1))) 951 return Builder->CreateFCmpORD(LHS->getOperand(0), RHS->getOperand(0)); 952 return 0; 953 } 954 955 Value *Op0LHS = LHS->getOperand(0), *Op0RHS = LHS->getOperand(1); 956 Value *Op1LHS = RHS->getOperand(0), *Op1RHS = RHS->getOperand(1); 957 FCmpInst::Predicate Op0CC = LHS->getPredicate(), Op1CC = RHS->getPredicate(); 958 959 960 if (Op0LHS == Op1RHS && Op0RHS == Op1LHS) { 961 // Swap RHS operands to match LHS. 962 Op1CC = FCmpInst::getSwappedPredicate(Op1CC); 963 std::swap(Op1LHS, Op1RHS); 964 } 965 966 if (Op0LHS == Op1LHS && Op0RHS == Op1RHS) { 967 // Simplify (fcmp cc0 x, y) & (fcmp cc1 x, y). 968 if (Op0CC == Op1CC) 969 return Builder->CreateFCmp((FCmpInst::Predicate)Op0CC, Op0LHS, Op0RHS); 970 if (Op0CC == FCmpInst::FCMP_FALSE || Op1CC == FCmpInst::FCMP_FALSE) 971 return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 0); 972 if (Op0CC == FCmpInst::FCMP_TRUE) 973 return RHS; 974 if (Op1CC == FCmpInst::FCMP_TRUE) 975 return LHS; 976 977 bool Op0Ordered; 978 bool Op1Ordered; 979 unsigned Op0Pred = getFCmpCode(Op0CC, Op0Ordered); 980 unsigned Op1Pred = getFCmpCode(Op1CC, Op1Ordered); 981 // uno && ord -> false 982 if (Op0Pred == 0 && Op1Pred == 0 && Op0Ordered != Op1Ordered) 983 return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 0); 984 if (Op1Pred == 0) { 985 std::swap(LHS, RHS); 986 std::swap(Op0Pred, Op1Pred); 987 std::swap(Op0Ordered, Op1Ordered); 988 } 989 if (Op0Pred == 0) { 990 // uno && ueq -> uno && (uno || eq) -> uno 991 // ord && olt -> ord && (ord && lt) -> olt 992 if (!Op0Ordered && (Op0Ordered == Op1Ordered)) 993 return LHS; 994 if (Op0Ordered && (Op0Ordered == Op1Ordered)) 995 return RHS; 996 997 // uno && oeq -> uno && (ord && eq) -> false 998 if (!Op0Ordered) 999 return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 0); 1000 // ord && ueq -> ord && (uno || eq) -> oeq 1001 return getFCmpValue(true, Op1Pred, Op0LHS, Op0RHS, Builder); 1002 } 1003 } 1004 1005 return 0; 1006 } 1007 1008 1009 Instruction *InstCombiner::visitAnd(BinaryOperator &I) { 1010 bool Changed = SimplifyAssociativeOrCommutative(I); 1011 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 1012 1013 if (Value *V = SimplifyAndInst(Op0, Op1, TD)) 1014 return ReplaceInstUsesWith(I, V); 1015 1016 // (A|B)&(A|C) -> A|(B&C) etc 1017 if (Value *V = SimplifyUsingDistributiveLaws(I)) 1018 return ReplaceInstUsesWith(I, V); 1019 1020 // See if we can simplify any instructions used by the instruction whose sole 1021 // purpose is to compute bits we don't care about. 1022 if (SimplifyDemandedInstructionBits(I)) 1023 return &I; 1024 1025 if (ConstantInt *AndRHS = dyn_cast<ConstantInt>(Op1)) { 1026 const APInt &AndRHSMask = AndRHS->getValue(); 1027 1028 // Optimize a variety of ((val OP C1) & C2) combinations... 1029 if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0)) { 1030 Value *Op0LHS = Op0I->getOperand(0); 1031 Value *Op0RHS = Op0I->getOperand(1); 1032 switch (Op0I->getOpcode()) { 1033 default: break; 1034 case Instruction::Xor: 1035 case Instruction::Or: { 1036 // If the mask is only needed on one incoming arm, push it up. 1037 if (!Op0I->hasOneUse()) break; 1038 1039 APInt NotAndRHS(~AndRHSMask); 1040 if (MaskedValueIsZero(Op0LHS, NotAndRHS)) { 1041 // Not masking anything out for the LHS, move to RHS. 1042 Value *NewRHS = Builder->CreateAnd(Op0RHS, AndRHS, 1043 Op0RHS->getName()+".masked"); 1044 return BinaryOperator::Create(Op0I->getOpcode(), Op0LHS, NewRHS); 1045 } 1046 if (!isa<Constant>(Op0RHS) && 1047 MaskedValueIsZero(Op0RHS, NotAndRHS)) { 1048 // Not masking anything out for the RHS, move to LHS. 1049 Value *NewLHS = Builder->CreateAnd(Op0LHS, AndRHS, 1050 Op0LHS->getName()+".masked"); 1051 return BinaryOperator::Create(Op0I->getOpcode(), NewLHS, Op0RHS); 1052 } 1053 1054 break; 1055 } 1056 case Instruction::Add: 1057 // ((A & N) + B) & AndRHS -> (A + B) & AndRHS iff N&AndRHS == AndRHS. 1058 // ((A | N) + B) & AndRHS -> (A + B) & AndRHS iff N&AndRHS == 0 1059 // ((A ^ N) + B) & AndRHS -> (A + B) & AndRHS iff N&AndRHS == 0 1060 if (Value *V = FoldLogicalPlusAnd(Op0LHS, Op0RHS, AndRHS, false, I)) 1061 return BinaryOperator::CreateAnd(V, AndRHS); 1062 if (Value *V = FoldLogicalPlusAnd(Op0RHS, Op0LHS, AndRHS, false, I)) 1063 return BinaryOperator::CreateAnd(V, AndRHS); // Add commutes 1064 break; 1065 1066 case Instruction::Sub: 1067 // ((A & N) - B) & AndRHS -> (A - B) & AndRHS iff N&AndRHS == AndRHS. 1068 // ((A | N) - B) & AndRHS -> (A - B) & AndRHS iff N&AndRHS == 0 1069 // ((A ^ N) - B) & AndRHS -> (A - B) & AndRHS iff N&AndRHS == 0 1070 if (Value *V = FoldLogicalPlusAnd(Op0LHS, Op0RHS, AndRHS, true, I)) 1071 return BinaryOperator::CreateAnd(V, AndRHS); 1072 1073 // (A - N) & AndRHS -> -N & AndRHS iff A&AndRHS==0 and AndRHS 1074 // has 1's for all bits that the subtraction with A might affect. 1075 if (Op0I->hasOneUse() && !match(Op0LHS, m_Zero())) { 1076 uint32_t BitWidth = AndRHSMask.getBitWidth(); 1077 uint32_t Zeros = AndRHSMask.countLeadingZeros(); 1078 APInt Mask = APInt::getLowBitsSet(BitWidth, BitWidth - Zeros); 1079 1080 if (MaskedValueIsZero(Op0LHS, Mask)) { 1081 Value *NewNeg = Builder->CreateNeg(Op0RHS); 1082 return BinaryOperator::CreateAnd(NewNeg, AndRHS); 1083 } 1084 } 1085 break; 1086 1087 case Instruction::Shl: 1088 case Instruction::LShr: 1089 // (1 << x) & 1 --> zext(x == 0) 1090 // (1 >> x) & 1 --> zext(x == 0) 1091 if (AndRHSMask == 1 && Op0LHS == AndRHS) { 1092 Value *NewICmp = 1093 Builder->CreateICmpEQ(Op0RHS, Constant::getNullValue(I.getType())); 1094 return new ZExtInst(NewICmp, I.getType()); 1095 } 1096 break; 1097 } 1098 1099 if (ConstantInt *Op0CI = dyn_cast<ConstantInt>(Op0I->getOperand(1))) 1100 if (Instruction *Res = OptAndOp(Op0I, Op0CI, AndRHS, I)) 1101 return Res; 1102 } 1103 1104 // If this is an integer truncation, and if the source is an 'and' with 1105 // immediate, transform it. This frequently occurs for bitfield accesses. 1106 { 1107 Value *X = 0; ConstantInt *YC = 0; 1108 if (match(Op0, m_Trunc(m_And(m_Value(X), m_ConstantInt(YC))))) { 1109 // Change: and (trunc (and X, YC) to T), C2 1110 // into : and (trunc X to T), trunc(YC) & C2 1111 // This will fold the two constants together, which may allow 1112 // other simplifications. 1113 Value *NewCast = Builder->CreateTrunc(X, I.getType(), "and.shrunk"); 1114 Constant *C3 = ConstantExpr::getTrunc(YC, I.getType()); 1115 C3 = ConstantExpr::getAnd(C3, AndRHS); 1116 return BinaryOperator::CreateAnd(NewCast, C3); 1117 } 1118 } 1119 1120 // Try to fold constant and into select arguments. 1121 if (SelectInst *SI = dyn_cast<SelectInst>(Op0)) 1122 if (Instruction *R = FoldOpIntoSelect(I, SI)) 1123 return R; 1124 if (isa<PHINode>(Op0)) 1125 if (Instruction *NV = FoldOpIntoPhi(I)) 1126 return NV; 1127 } 1128 1129 1130 // (~A & ~B) == (~(A | B)) - De Morgan's Law 1131 if (Value *Op0NotVal = dyn_castNotVal(Op0)) 1132 if (Value *Op1NotVal = dyn_castNotVal(Op1)) 1133 if (Op0->hasOneUse() && Op1->hasOneUse()) { 1134 Value *Or = Builder->CreateOr(Op0NotVal, Op1NotVal, 1135 I.getName()+".demorgan"); 1136 return BinaryOperator::CreateNot(Or); 1137 } 1138 1139 { 1140 Value *A = 0, *B = 0, *C = 0, *D = 0; 1141 // (A|B) & ~(A&B) -> A^B 1142 if (match(Op0, m_Or(m_Value(A), m_Value(B))) && 1143 match(Op1, m_Not(m_And(m_Value(C), m_Value(D)))) && 1144 ((A == C && B == D) || (A == D && B == C))) 1145 return BinaryOperator::CreateXor(A, B); 1146 1147 // ~(A&B) & (A|B) -> A^B 1148 if (match(Op1, m_Or(m_Value(A), m_Value(B))) && 1149 match(Op0, m_Not(m_And(m_Value(C), m_Value(D)))) && 1150 ((A == C && B == D) || (A == D && B == C))) 1151 return BinaryOperator::CreateXor(A, B); 1152 1153 // A&(A^B) => A & ~B 1154 { 1155 Value *tmpOp0 = Op0; 1156 Value *tmpOp1 = Op1; 1157 if (Op0->hasOneUse() && 1158 match(Op0, m_Xor(m_Value(A), m_Value(B)))) { 1159 if (A == Op1 || B == Op1 ) { 1160 tmpOp1 = Op0; 1161 tmpOp0 = Op1; 1162 // Simplify below 1163 } 1164 } 1165 1166 if (tmpOp1->hasOneUse() && 1167 match(tmpOp1, m_Xor(m_Value(A), m_Value(B)))) { 1168 if (B == tmpOp0) { 1169 std::swap(A, B); 1170 } 1171 // Notice that the patten (A&(~B)) is actually (A&(-1^B)), so if 1172 // A is originally -1 (or a vector of -1 and undefs), then we enter 1173 // an endless loop. By checking that A is non-constant we ensure that 1174 // we will never get to the loop. 1175 if (A == tmpOp0 && !isa<Constant>(A)) // A&(A^B) -> A & ~B 1176 return BinaryOperator::CreateAnd(A, Builder->CreateNot(B)); 1177 } 1178 } 1179 1180 // (A&((~A)|B)) -> A&B 1181 if (match(Op0, m_Or(m_Not(m_Specific(Op1)), m_Value(A))) || 1182 match(Op0, m_Or(m_Value(A), m_Not(m_Specific(Op1))))) 1183 return BinaryOperator::CreateAnd(A, Op1); 1184 if (match(Op1, m_Or(m_Not(m_Specific(Op0)), m_Value(A))) || 1185 match(Op1, m_Or(m_Value(A), m_Not(m_Specific(Op0))))) 1186 return BinaryOperator::CreateAnd(A, Op0); 1187 } 1188 1189 if (ICmpInst *RHS = dyn_cast<ICmpInst>(Op1)) 1190 if (ICmpInst *LHS = dyn_cast<ICmpInst>(Op0)) 1191 if (Value *Res = FoldAndOfICmps(LHS, RHS)) 1192 return ReplaceInstUsesWith(I, Res); 1193 1194 // If and'ing two fcmp, try combine them into one. 1195 if (FCmpInst *LHS = dyn_cast<FCmpInst>(I.getOperand(0))) 1196 if (FCmpInst *RHS = dyn_cast<FCmpInst>(I.getOperand(1))) 1197 if (Value *Res = FoldAndOfFCmps(LHS, RHS)) 1198 return ReplaceInstUsesWith(I, Res); 1199 1200 1201 // fold (and (cast A), (cast B)) -> (cast (and A, B)) 1202 if (CastInst *Op0C = dyn_cast<CastInst>(Op0)) 1203 if (CastInst *Op1C = dyn_cast<CastInst>(Op1)) { 1204 Type *SrcTy = Op0C->getOperand(0)->getType(); 1205 if (Op0C->getOpcode() == Op1C->getOpcode() && // same cast kind ? 1206 SrcTy == Op1C->getOperand(0)->getType() && 1207 SrcTy->isIntOrIntVectorTy()) { 1208 Value *Op0COp = Op0C->getOperand(0), *Op1COp = Op1C->getOperand(0); 1209 1210 // Only do this if the casts both really cause code to be generated. 1211 if (ShouldOptimizeCast(Op0C->getOpcode(), Op0COp, I.getType()) && 1212 ShouldOptimizeCast(Op1C->getOpcode(), Op1COp, I.getType())) { 1213 Value *NewOp = Builder->CreateAnd(Op0COp, Op1COp, I.getName()); 1214 return CastInst::Create(Op0C->getOpcode(), NewOp, I.getType()); 1215 } 1216 1217 // If this is and(cast(icmp), cast(icmp)), try to fold this even if the 1218 // cast is otherwise not optimizable. This happens for vector sexts. 1219 if (ICmpInst *RHS = dyn_cast<ICmpInst>(Op1COp)) 1220 if (ICmpInst *LHS = dyn_cast<ICmpInst>(Op0COp)) 1221 if (Value *Res = FoldAndOfICmps(LHS, RHS)) 1222 return CastInst::Create(Op0C->getOpcode(), Res, I.getType()); 1223 1224 // If this is and(cast(fcmp), cast(fcmp)), try to fold this even if the 1225 // cast is otherwise not optimizable. This happens for vector sexts. 1226 if (FCmpInst *RHS = dyn_cast<FCmpInst>(Op1COp)) 1227 if (FCmpInst *LHS = dyn_cast<FCmpInst>(Op0COp)) 1228 if (Value *Res = FoldAndOfFCmps(LHS, RHS)) 1229 return CastInst::Create(Op0C->getOpcode(), Res, I.getType()); 1230 } 1231 } 1232 1233 // (X >> Z) & (Y >> Z) -> (X&Y) >> Z for all shifts. 1234 if (BinaryOperator *SI1 = dyn_cast<BinaryOperator>(Op1)) { 1235 if (BinaryOperator *SI0 = dyn_cast<BinaryOperator>(Op0)) 1236 if (SI0->isShift() && SI0->getOpcode() == SI1->getOpcode() && 1237 SI0->getOperand(1) == SI1->getOperand(1) && 1238 (SI0->hasOneUse() || SI1->hasOneUse())) { 1239 Value *NewOp = 1240 Builder->CreateAnd(SI0->getOperand(0), SI1->getOperand(0), 1241 SI0->getName()); 1242 return BinaryOperator::Create(SI1->getOpcode(), NewOp, 1243 SI1->getOperand(1)); 1244 } 1245 } 1246 1247 { 1248 Value *X = 0; 1249 bool OpsSwapped = false; 1250 // Canonicalize SExt or Not to the LHS 1251 if (match(Op1, m_SExt(m_Value())) || 1252 match(Op1, m_Not(m_Value()))) { 1253 std::swap(Op0, Op1); 1254 OpsSwapped = true; 1255 } 1256 1257 // Fold (and (sext bool to A), B) --> (select bool, B, 0) 1258 if (match(Op0, m_SExt(m_Value(X))) && 1259 X->getType()->getScalarType()->isIntegerTy(1)) { 1260 Value *Zero = Constant::getNullValue(Op1->getType()); 1261 return SelectInst::Create(X, Op1, Zero); 1262 } 1263 1264 // Fold (and ~(sext bool to A), B) --> (select bool, 0, B) 1265 if (match(Op0, m_Not(m_SExt(m_Value(X)))) && 1266 X->getType()->getScalarType()->isIntegerTy(1)) { 1267 Value *Zero = Constant::getNullValue(Op0->getType()); 1268 return SelectInst::Create(X, Zero, Op1); 1269 } 1270 1271 if (OpsSwapped) 1272 std::swap(Op0, Op1); 1273 } 1274 1275 return Changed ? &I : 0; 1276 } 1277 1278 /// CollectBSwapParts - Analyze the specified subexpression and see if it is 1279 /// capable of providing pieces of a bswap. The subexpression provides pieces 1280 /// of a bswap if it is proven that each of the non-zero bytes in the output of 1281 /// the expression came from the corresponding "byte swapped" byte in some other 1282 /// value. For example, if the current subexpression is "(shl i32 %X, 24)" then 1283 /// we know that the expression deposits the low byte of %X into the high byte 1284 /// of the bswap result and that all other bytes are zero. This expression is 1285 /// accepted, the high byte of ByteValues is set to X to indicate a correct 1286 /// match. 1287 /// 1288 /// This function returns true if the match was unsuccessful and false if so. 1289 /// On entry to the function the "OverallLeftShift" is a signed integer value 1290 /// indicating the number of bytes that the subexpression is later shifted. For 1291 /// example, if the expression is later right shifted by 16 bits, the 1292 /// OverallLeftShift value would be -2 on entry. This is used to specify which 1293 /// byte of ByteValues is actually being set. 1294 /// 1295 /// Similarly, ByteMask is a bitmask where a bit is clear if its corresponding 1296 /// byte is masked to zero by a user. For example, in (X & 255), X will be 1297 /// processed with a bytemask of 1. Because bytemask is 32-bits, this limits 1298 /// this function to working on up to 32-byte (256 bit) values. ByteMask is 1299 /// always in the local (OverallLeftShift) coordinate space. 1300 /// 1301 static bool CollectBSwapParts(Value *V, int OverallLeftShift, uint32_t ByteMask, 1302 SmallVectorImpl<Value *> &ByteValues) { 1303 if (Instruction *I = dyn_cast<Instruction>(V)) { 1304 // If this is an or instruction, it may be an inner node of the bswap. 1305 if (I->getOpcode() == Instruction::Or) { 1306 return CollectBSwapParts(I->getOperand(0), OverallLeftShift, ByteMask, 1307 ByteValues) || 1308 CollectBSwapParts(I->getOperand(1), OverallLeftShift, ByteMask, 1309 ByteValues); 1310 } 1311 1312 // If this is a logical shift by a constant multiple of 8, recurse with 1313 // OverallLeftShift and ByteMask adjusted. 1314 if (I->isLogicalShift() && isa<ConstantInt>(I->getOperand(1))) { 1315 unsigned ShAmt = 1316 cast<ConstantInt>(I->getOperand(1))->getLimitedValue(~0U); 1317 // Ensure the shift amount is defined and of a byte value. 1318 if ((ShAmt & 7) || (ShAmt > 8*ByteValues.size())) 1319 return true; 1320 1321 unsigned ByteShift = ShAmt >> 3; 1322 if (I->getOpcode() == Instruction::Shl) { 1323 // X << 2 -> collect(X, +2) 1324 OverallLeftShift += ByteShift; 1325 ByteMask >>= ByteShift; 1326 } else { 1327 // X >>u 2 -> collect(X, -2) 1328 OverallLeftShift -= ByteShift; 1329 ByteMask <<= ByteShift; 1330 ByteMask &= (~0U >> (32-ByteValues.size())); 1331 } 1332 1333 if (OverallLeftShift >= (int)ByteValues.size()) return true; 1334 if (OverallLeftShift <= -(int)ByteValues.size()) return true; 1335 1336 return CollectBSwapParts(I->getOperand(0), OverallLeftShift, ByteMask, 1337 ByteValues); 1338 } 1339 1340 // If this is a logical 'and' with a mask that clears bytes, clear the 1341 // corresponding bytes in ByteMask. 1342 if (I->getOpcode() == Instruction::And && 1343 isa<ConstantInt>(I->getOperand(1))) { 1344 // Scan every byte of the and mask, seeing if the byte is either 0 or 255. 1345 unsigned NumBytes = ByteValues.size(); 1346 APInt Byte(I->getType()->getPrimitiveSizeInBits(), 255); 1347 const APInt &AndMask = cast<ConstantInt>(I->getOperand(1))->getValue(); 1348 1349 for (unsigned i = 0; i != NumBytes; ++i, Byte <<= 8) { 1350 // If this byte is masked out by a later operation, we don't care what 1351 // the and mask is. 1352 if ((ByteMask & (1 << i)) == 0) 1353 continue; 1354 1355 // If the AndMask is all zeros for this byte, clear the bit. 1356 APInt MaskB = AndMask & Byte; 1357 if (MaskB == 0) { 1358 ByteMask &= ~(1U << i); 1359 continue; 1360 } 1361 1362 // If the AndMask is not all ones for this byte, it's not a bytezap. 1363 if (MaskB != Byte) 1364 return true; 1365 1366 // Otherwise, this byte is kept. 1367 } 1368 1369 return CollectBSwapParts(I->getOperand(0), OverallLeftShift, ByteMask, 1370 ByteValues); 1371 } 1372 } 1373 1374 // Okay, we got to something that isn't a shift, 'or' or 'and'. This must be 1375 // the input value to the bswap. Some observations: 1) if more than one byte 1376 // is demanded from this input, then it could not be successfully assembled 1377 // into a byteswap. At least one of the two bytes would not be aligned with 1378 // their ultimate destination. 1379 if (!isPowerOf2_32(ByteMask)) return true; 1380 unsigned InputByteNo = countTrailingZeros(ByteMask); 1381 1382 // 2) The input and ultimate destinations must line up: if byte 3 of an i32 1383 // is demanded, it needs to go into byte 0 of the result. This means that the 1384 // byte needs to be shifted until it lands in the right byte bucket. The 1385 // shift amount depends on the position: if the byte is coming from the high 1386 // part of the value (e.g. byte 3) then it must be shifted right. If from the 1387 // low part, it must be shifted left. 1388 unsigned DestByteNo = InputByteNo + OverallLeftShift; 1389 if (ByteValues.size()-1-DestByteNo != InputByteNo) 1390 return true; 1391 1392 // If the destination byte value is already defined, the values are or'd 1393 // together, which isn't a bswap (unless it's an or of the same bits). 1394 if (ByteValues[DestByteNo] && ByteValues[DestByteNo] != V) 1395 return true; 1396 ByteValues[DestByteNo] = V; 1397 return false; 1398 } 1399 1400 /// MatchBSwap - Given an OR instruction, check to see if this is a bswap idiom. 1401 /// If so, insert the new bswap intrinsic and return it. 1402 Instruction *InstCombiner::MatchBSwap(BinaryOperator &I) { 1403 IntegerType *ITy = dyn_cast<IntegerType>(I.getType()); 1404 if (!ITy || ITy->getBitWidth() % 16 || 1405 // ByteMask only allows up to 32-byte values. 1406 ITy->getBitWidth() > 32*8) 1407 return 0; // Can only bswap pairs of bytes. Can't do vectors. 1408 1409 /// ByteValues - For each byte of the result, we keep track of which value 1410 /// defines each byte. 1411 SmallVector<Value*, 8> ByteValues; 1412 ByteValues.resize(ITy->getBitWidth()/8); 1413 1414 // Try to find all the pieces corresponding to the bswap. 1415 uint32_t ByteMask = ~0U >> (32-ByteValues.size()); 1416 if (CollectBSwapParts(&I, 0, ByteMask, ByteValues)) 1417 return 0; 1418 1419 // Check to see if all of the bytes come from the same value. 1420 Value *V = ByteValues[0]; 1421 if (V == 0) return 0; // Didn't find a byte? Must be zero. 1422 1423 // Check to make sure that all of the bytes come from the same value. 1424 for (unsigned i = 1, e = ByteValues.size(); i != e; ++i) 1425 if (ByteValues[i] != V) 1426 return 0; 1427 Module *M = I.getParent()->getParent()->getParent(); 1428 Function *F = Intrinsic::getDeclaration(M, Intrinsic::bswap, ITy); 1429 return CallInst::Create(F, V); 1430 } 1431 1432 /// MatchSelectFromAndOr - We have an expression of the form (A&C)|(B&D). Check 1433 /// If A is (cond?-1:0) and either B or D is ~(cond?-1,0) or (cond?0,-1), then 1434 /// we can simplify this expression to "cond ? C : D or B". 1435 static Instruction *MatchSelectFromAndOr(Value *A, Value *B, 1436 Value *C, Value *D) { 1437 // If A is not a select of -1/0, this cannot match. 1438 Value *Cond = 0; 1439 if (!match(A, m_SExt(m_Value(Cond))) || 1440 !Cond->getType()->isIntegerTy(1)) 1441 return 0; 1442 1443 // ((cond?-1:0)&C) | (B&(cond?0:-1)) -> cond ? C : B. 1444 if (match(D, m_Not(m_SExt(m_Specific(Cond))))) 1445 return SelectInst::Create(Cond, C, B); 1446 if (match(D, m_SExt(m_Not(m_Specific(Cond))))) 1447 return SelectInst::Create(Cond, C, B); 1448 1449 // ((cond?-1:0)&C) | ((cond?0:-1)&D) -> cond ? C : D. 1450 if (match(B, m_Not(m_SExt(m_Specific(Cond))))) 1451 return SelectInst::Create(Cond, C, D); 1452 if (match(B, m_SExt(m_Not(m_Specific(Cond))))) 1453 return SelectInst::Create(Cond, C, D); 1454 return 0; 1455 } 1456 1457 /// FoldOrOfICmps - Fold (icmp)|(icmp) if possible. 1458 Value *InstCombiner::FoldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS) { 1459 ICmpInst::Predicate LHSCC = LHS->getPredicate(), RHSCC = RHS->getPredicate(); 1460 1461 // (icmp1 A, B) | (icmp2 A, B) --> (icmp3 A, B) 1462 if (PredicatesFoldable(LHSCC, RHSCC)) { 1463 if (LHS->getOperand(0) == RHS->getOperand(1) && 1464 LHS->getOperand(1) == RHS->getOperand(0)) 1465 LHS->swapOperands(); 1466 if (LHS->getOperand(0) == RHS->getOperand(0) && 1467 LHS->getOperand(1) == RHS->getOperand(1)) { 1468 Value *Op0 = LHS->getOperand(0), *Op1 = LHS->getOperand(1); 1469 unsigned Code = getICmpCode(LHS) | getICmpCode(RHS); 1470 bool isSigned = LHS->isSigned() || RHS->isSigned(); 1471 return getNewICmpValue(isSigned, Code, Op0, Op1, Builder); 1472 } 1473 } 1474 1475 // handle (roughly): 1476 // (icmp ne (A & B), C) | (icmp ne (A & D), E) 1477 if (Value *V = foldLogOpOfMaskedICmps(LHS, RHS, ICmpInst::ICMP_NE, Builder)) 1478 return V; 1479 1480 Value *Val = LHS->getOperand(0), *Val2 = RHS->getOperand(0); 1481 ConstantInt *LHSCst = dyn_cast<ConstantInt>(LHS->getOperand(1)); 1482 ConstantInt *RHSCst = dyn_cast<ConstantInt>(RHS->getOperand(1)); 1483 1484 if (LHS->hasOneUse() || RHS->hasOneUse()) { 1485 // (icmp eq B, 0) | (icmp ult A, B) -> (icmp ule A, B-1) 1486 // (icmp eq B, 0) | (icmp ugt B, A) -> (icmp ule A, B-1) 1487 Value *A = 0, *B = 0; 1488 if (LHSCC == ICmpInst::ICMP_EQ && LHSCst && LHSCst->isZero()) { 1489 B = Val; 1490 if (RHSCC == ICmpInst::ICMP_ULT && Val == RHS->getOperand(1)) 1491 A = Val2; 1492 else if (RHSCC == ICmpInst::ICMP_UGT && Val == Val2) 1493 A = RHS->getOperand(1); 1494 } 1495 // (icmp ult A, B) | (icmp eq B, 0) -> (icmp ule A, B-1) 1496 // (icmp ugt B, A) | (icmp eq B, 0) -> (icmp ule A, B-1) 1497 else if (RHSCC == ICmpInst::ICMP_EQ && RHSCst && RHSCst->isZero()) { 1498 B = Val2; 1499 if (LHSCC == ICmpInst::ICMP_ULT && Val2 == LHS->getOperand(1)) 1500 A = Val; 1501 else if (LHSCC == ICmpInst::ICMP_UGT && Val2 == Val) 1502 A = LHS->getOperand(1); 1503 } 1504 if (A && B) 1505 return Builder->CreateICmp( 1506 ICmpInst::ICMP_UGE, 1507 Builder->CreateAdd(B, ConstantInt::getSigned(B->getType(), -1)), A); 1508 } 1509 1510 // This only handles icmp of constants: (icmp1 A, C1) | (icmp2 B, C2). 1511 if (LHSCst == 0 || RHSCst == 0) return 0; 1512 1513 if (LHSCst == RHSCst && LHSCC == RHSCC) { 1514 // (icmp ne A, 0) | (icmp ne B, 0) --> (icmp ne (A|B), 0) 1515 if (LHSCC == ICmpInst::ICMP_NE && LHSCst->isZero()) { 1516 Value *NewOr = Builder->CreateOr(Val, Val2); 1517 return Builder->CreateICmp(LHSCC, NewOr, LHSCst); 1518 } 1519 } 1520 1521 // (icmp ult (X + CA), C1) | (icmp eq X, C2) -> (icmp ule (X + CA), C1) 1522 // iff C2 + CA == C1. 1523 if (LHSCC == ICmpInst::ICMP_ULT && RHSCC == ICmpInst::ICMP_EQ) { 1524 ConstantInt *AddCst; 1525 if (match(Val, m_Add(m_Specific(Val2), m_ConstantInt(AddCst)))) 1526 if (RHSCst->getValue() + AddCst->getValue() == LHSCst->getValue()) 1527 return Builder->CreateICmpULE(Val, LHSCst); 1528 } 1529 1530 // From here on, we only handle: 1531 // (icmp1 A, C1) | (icmp2 A, C2) --> something simpler. 1532 if (Val != Val2) return 0; 1533 1534 // ICMP_[US][GL]E X, CST is folded to ICMP_[US][GL]T elsewhere. 1535 if (LHSCC == ICmpInst::ICMP_UGE || LHSCC == ICmpInst::ICMP_ULE || 1536 RHSCC == ICmpInst::ICMP_UGE || RHSCC == ICmpInst::ICMP_ULE || 1537 LHSCC == ICmpInst::ICMP_SGE || LHSCC == ICmpInst::ICMP_SLE || 1538 RHSCC == ICmpInst::ICMP_SGE || RHSCC == ICmpInst::ICMP_SLE) 1539 return 0; 1540 1541 // We can't fold (ugt x, C) | (sgt x, C2). 1542 if (!PredicatesFoldable(LHSCC, RHSCC)) 1543 return 0; 1544 1545 // Ensure that the larger constant is on the RHS. 1546 bool ShouldSwap; 1547 if (CmpInst::isSigned(LHSCC) || 1548 (ICmpInst::isEquality(LHSCC) && 1549 CmpInst::isSigned(RHSCC))) 1550 ShouldSwap = LHSCst->getValue().sgt(RHSCst->getValue()); 1551 else 1552 ShouldSwap = LHSCst->getValue().ugt(RHSCst->getValue()); 1553 1554 if (ShouldSwap) { 1555 std::swap(LHS, RHS); 1556 std::swap(LHSCst, RHSCst); 1557 std::swap(LHSCC, RHSCC); 1558 } 1559 1560 // At this point, we know we have two icmp instructions 1561 // comparing a value against two constants and or'ing the result 1562 // together. Because of the above check, we know that we only have 1563 // ICMP_EQ, ICMP_NE, ICMP_LT, and ICMP_GT here. We also know (from the 1564 // icmp folding check above), that the two constants are not 1565 // equal. 1566 assert(LHSCst != RHSCst && "Compares not folded above?"); 1567 1568 switch (LHSCC) { 1569 default: llvm_unreachable("Unknown integer condition code!"); 1570 case ICmpInst::ICMP_EQ: 1571 switch (RHSCC) { 1572 default: llvm_unreachable("Unknown integer condition code!"); 1573 case ICmpInst::ICMP_EQ: 1574 if (LHS->getOperand(0) == RHS->getOperand(0)) { 1575 // if LHSCst and RHSCst differ only by one bit: 1576 // (A == C1 || A == C2) -> (A & ~(C1 ^ C2)) == C1 1577 assert(LHSCst->getValue().ule(LHSCst->getValue())); 1578 1579 APInt Xor = LHSCst->getValue() ^ RHSCst->getValue(); 1580 if (Xor.isPowerOf2()) { 1581 Value *NegCst = Builder->getInt(~Xor); 1582 Value *And = Builder->CreateAnd(LHS->getOperand(0), NegCst); 1583 return Builder->CreateICmp(ICmpInst::ICMP_EQ, And, LHSCst); 1584 } 1585 } 1586 1587 if (LHSCst == SubOne(RHSCst)) { 1588 // (X == 13 | X == 14) -> X-13 <u 2 1589 Constant *AddCST = ConstantExpr::getNeg(LHSCst); 1590 Value *Add = Builder->CreateAdd(Val, AddCST, Val->getName()+".off"); 1591 AddCST = ConstantExpr::getSub(AddOne(RHSCst), LHSCst); 1592 return Builder->CreateICmpULT(Add, AddCST); 1593 } 1594 1595 break; // (X == 13 | X == 15) -> no change 1596 case ICmpInst::ICMP_UGT: // (X == 13 | X u> 14) -> no change 1597 case ICmpInst::ICMP_SGT: // (X == 13 | X s> 14) -> no change 1598 break; 1599 case ICmpInst::ICMP_NE: // (X == 13 | X != 15) -> X != 15 1600 case ICmpInst::ICMP_ULT: // (X == 13 | X u< 15) -> X u< 15 1601 case ICmpInst::ICMP_SLT: // (X == 13 | X s< 15) -> X s< 15 1602 return RHS; 1603 } 1604 break; 1605 case ICmpInst::ICMP_NE: 1606 switch (RHSCC) { 1607 default: llvm_unreachable("Unknown integer condition code!"); 1608 case ICmpInst::ICMP_EQ: // (X != 13 | X == 15) -> X != 13 1609 case ICmpInst::ICMP_UGT: // (X != 13 | X u> 15) -> X != 13 1610 case ICmpInst::ICMP_SGT: // (X != 13 | X s> 15) -> X != 13 1611 return LHS; 1612 case ICmpInst::ICMP_NE: // (X != 13 | X != 15) -> true 1613 case ICmpInst::ICMP_ULT: // (X != 13 | X u< 15) -> true 1614 case ICmpInst::ICMP_SLT: // (X != 13 | X s< 15) -> true 1615 return Builder->getTrue(); 1616 } 1617 case ICmpInst::ICMP_ULT: 1618 switch (RHSCC) { 1619 default: llvm_unreachable("Unknown integer condition code!"); 1620 case ICmpInst::ICMP_EQ: // (X u< 13 | X == 14) -> no change 1621 break; 1622 case ICmpInst::ICMP_UGT: // (X u< 13 | X u> 15) -> (X-13) u> 2 1623 // If RHSCst is [us]MAXINT, it is always false. Not handling 1624 // this can cause overflow. 1625 if (RHSCst->isMaxValue(false)) 1626 return LHS; 1627 return InsertRangeTest(Val, LHSCst, AddOne(RHSCst), false, false); 1628 case ICmpInst::ICMP_SGT: // (X u< 13 | X s> 15) -> no change 1629 break; 1630 case ICmpInst::ICMP_NE: // (X u< 13 | X != 15) -> X != 15 1631 case ICmpInst::ICMP_ULT: // (X u< 13 | X u< 15) -> X u< 15 1632 return RHS; 1633 case ICmpInst::ICMP_SLT: // (X u< 13 | X s< 15) -> no change 1634 break; 1635 } 1636 break; 1637 case ICmpInst::ICMP_SLT: 1638 switch (RHSCC) { 1639 default: llvm_unreachable("Unknown integer condition code!"); 1640 case ICmpInst::ICMP_EQ: // (X s< 13 | X == 14) -> no change 1641 break; 1642 case ICmpInst::ICMP_SGT: // (X s< 13 | X s> 15) -> (X-13) s> 2 1643 // If RHSCst is [us]MAXINT, it is always false. Not handling 1644 // this can cause overflow. 1645 if (RHSCst->isMaxValue(true)) 1646 return LHS; 1647 return InsertRangeTest(Val, LHSCst, AddOne(RHSCst), true, false); 1648 case ICmpInst::ICMP_UGT: // (X s< 13 | X u> 15) -> no change 1649 break; 1650 case ICmpInst::ICMP_NE: // (X s< 13 | X != 15) -> X != 15 1651 case ICmpInst::ICMP_SLT: // (X s< 13 | X s< 15) -> X s< 15 1652 return RHS; 1653 case ICmpInst::ICMP_ULT: // (X s< 13 | X u< 15) -> no change 1654 break; 1655 } 1656 break; 1657 case ICmpInst::ICMP_UGT: 1658 switch (RHSCC) { 1659 default: llvm_unreachable("Unknown integer condition code!"); 1660 case ICmpInst::ICMP_EQ: // (X u> 13 | X == 15) -> X u> 13 1661 case ICmpInst::ICMP_UGT: // (X u> 13 | X u> 15) -> X u> 13 1662 return LHS; 1663 case ICmpInst::ICMP_SGT: // (X u> 13 | X s> 15) -> no change 1664 break; 1665 case ICmpInst::ICMP_NE: // (X u> 13 | X != 15) -> true 1666 case ICmpInst::ICMP_ULT: // (X u> 13 | X u< 15) -> true 1667 return Builder->getTrue(); 1668 case ICmpInst::ICMP_SLT: // (X u> 13 | X s< 15) -> no change 1669 break; 1670 } 1671 break; 1672 case ICmpInst::ICMP_SGT: 1673 switch (RHSCC) { 1674 default: llvm_unreachable("Unknown integer condition code!"); 1675 case ICmpInst::ICMP_EQ: // (X s> 13 | X == 15) -> X > 13 1676 case ICmpInst::ICMP_SGT: // (X s> 13 | X s> 15) -> X > 13 1677 return LHS; 1678 case ICmpInst::ICMP_UGT: // (X s> 13 | X u> 15) -> no change 1679 break; 1680 case ICmpInst::ICMP_NE: // (X s> 13 | X != 15) -> true 1681 case ICmpInst::ICMP_SLT: // (X s> 13 | X s< 15) -> true 1682 return Builder->getTrue(); 1683 case ICmpInst::ICMP_ULT: // (X s> 13 | X u< 15) -> no change 1684 break; 1685 } 1686 break; 1687 } 1688 return 0; 1689 } 1690 1691 /// FoldOrOfFCmps - Optimize (fcmp)|(fcmp). NOTE: Unlike the rest of 1692 /// instcombine, this returns a Value which should already be inserted into the 1693 /// function. 1694 Value *InstCombiner::FoldOrOfFCmps(FCmpInst *LHS, FCmpInst *RHS) { 1695 if (LHS->getPredicate() == FCmpInst::FCMP_UNO && 1696 RHS->getPredicate() == FCmpInst::FCMP_UNO && 1697 LHS->getOperand(0)->getType() == RHS->getOperand(0)->getType()) { 1698 if (ConstantFP *LHSC = dyn_cast<ConstantFP>(LHS->getOperand(1))) 1699 if (ConstantFP *RHSC = dyn_cast<ConstantFP>(RHS->getOperand(1))) { 1700 // If either of the constants are nans, then the whole thing returns 1701 // true. 1702 if (LHSC->getValueAPF().isNaN() || RHSC->getValueAPF().isNaN()) 1703 return Builder->getTrue(); 1704 1705 // Otherwise, no need to compare the two constants, compare the 1706 // rest. 1707 return Builder->CreateFCmpUNO(LHS->getOperand(0), RHS->getOperand(0)); 1708 } 1709 1710 // Handle vector zeros. This occurs because the canonical form of 1711 // "fcmp uno x,x" is "fcmp uno x, 0". 1712 if (isa<ConstantAggregateZero>(LHS->getOperand(1)) && 1713 isa<ConstantAggregateZero>(RHS->getOperand(1))) 1714 return Builder->CreateFCmpUNO(LHS->getOperand(0), RHS->getOperand(0)); 1715 1716 return 0; 1717 } 1718 1719 Value *Op0LHS = LHS->getOperand(0), *Op0RHS = LHS->getOperand(1); 1720 Value *Op1LHS = RHS->getOperand(0), *Op1RHS = RHS->getOperand(1); 1721 FCmpInst::Predicate Op0CC = LHS->getPredicate(), Op1CC = RHS->getPredicate(); 1722 1723 if (Op0LHS == Op1RHS && Op0RHS == Op1LHS) { 1724 // Swap RHS operands to match LHS. 1725 Op1CC = FCmpInst::getSwappedPredicate(Op1CC); 1726 std::swap(Op1LHS, Op1RHS); 1727 } 1728 if (Op0LHS == Op1LHS && Op0RHS == Op1RHS) { 1729 // Simplify (fcmp cc0 x, y) | (fcmp cc1 x, y). 1730 if (Op0CC == Op1CC) 1731 return Builder->CreateFCmp((FCmpInst::Predicate)Op0CC, Op0LHS, Op0RHS); 1732 if (Op0CC == FCmpInst::FCMP_TRUE || Op1CC == FCmpInst::FCMP_TRUE) 1733 return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 1); 1734 if (Op0CC == FCmpInst::FCMP_FALSE) 1735 return RHS; 1736 if (Op1CC == FCmpInst::FCMP_FALSE) 1737 return LHS; 1738 bool Op0Ordered; 1739 bool Op1Ordered; 1740 unsigned Op0Pred = getFCmpCode(Op0CC, Op0Ordered); 1741 unsigned Op1Pred = getFCmpCode(Op1CC, Op1Ordered); 1742 if (Op0Ordered == Op1Ordered) { 1743 // If both are ordered or unordered, return a new fcmp with 1744 // or'ed predicates. 1745 return getFCmpValue(Op0Ordered, Op0Pred|Op1Pred, Op0LHS, Op0RHS, Builder); 1746 } 1747 } 1748 return 0; 1749 } 1750 1751 /// FoldOrWithConstants - This helper function folds: 1752 /// 1753 /// ((A | B) & C1) | (B & C2) 1754 /// 1755 /// into: 1756 /// 1757 /// (A & C1) | B 1758 /// 1759 /// when the XOR of the two constants is "all ones" (-1). 1760 Instruction *InstCombiner::FoldOrWithConstants(BinaryOperator &I, Value *Op, 1761 Value *A, Value *B, Value *C) { 1762 ConstantInt *CI1 = dyn_cast<ConstantInt>(C); 1763 if (!CI1) return 0; 1764 1765 Value *V1 = 0; 1766 ConstantInt *CI2 = 0; 1767 if (!match(Op, m_And(m_Value(V1), m_ConstantInt(CI2)))) return 0; 1768 1769 APInt Xor = CI1->getValue() ^ CI2->getValue(); 1770 if (!Xor.isAllOnesValue()) return 0; 1771 1772 if (V1 == A || V1 == B) { 1773 Value *NewOp = Builder->CreateAnd((V1 == A) ? B : A, CI1); 1774 return BinaryOperator::CreateOr(NewOp, V1); 1775 } 1776 1777 return 0; 1778 } 1779 1780 Instruction *InstCombiner::visitOr(BinaryOperator &I) { 1781 bool Changed = SimplifyAssociativeOrCommutative(I); 1782 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 1783 1784 if (Value *V = SimplifyOrInst(Op0, Op1, TD)) 1785 return ReplaceInstUsesWith(I, V); 1786 1787 // (A&B)|(A&C) -> A&(B|C) etc 1788 if (Value *V = SimplifyUsingDistributiveLaws(I)) 1789 return ReplaceInstUsesWith(I, V); 1790 1791 // See if we can simplify any instructions used by the instruction whose sole 1792 // purpose is to compute bits we don't care about. 1793 if (SimplifyDemandedInstructionBits(I)) 1794 return &I; 1795 1796 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) { 1797 ConstantInt *C1 = 0; Value *X = 0; 1798 // (X & C1) | C2 --> (X | C2) & (C1|C2) 1799 // iff (C1 & C2) == 0. 1800 if (match(Op0, m_And(m_Value(X), m_ConstantInt(C1))) && 1801 (RHS->getValue() & C1->getValue()) != 0 && 1802 Op0->hasOneUse()) { 1803 Value *Or = Builder->CreateOr(X, RHS); 1804 Or->takeName(Op0); 1805 return BinaryOperator::CreateAnd(Or, 1806 Builder->getInt(RHS->getValue() | C1->getValue())); 1807 } 1808 1809 // (X ^ C1) | C2 --> (X | C2) ^ (C1&~C2) 1810 if (match(Op0, m_Xor(m_Value(X), m_ConstantInt(C1))) && 1811 Op0->hasOneUse()) { 1812 Value *Or = Builder->CreateOr(X, RHS); 1813 Or->takeName(Op0); 1814 return BinaryOperator::CreateXor(Or, 1815 Builder->getInt(C1->getValue() & ~RHS->getValue())); 1816 } 1817 1818 // Try to fold constant and into select arguments. 1819 if (SelectInst *SI = dyn_cast<SelectInst>(Op0)) 1820 if (Instruction *R = FoldOpIntoSelect(I, SI)) 1821 return R; 1822 1823 if (isa<PHINode>(Op0)) 1824 if (Instruction *NV = FoldOpIntoPhi(I)) 1825 return NV; 1826 } 1827 1828 Value *A = 0, *B = 0; 1829 ConstantInt *C1 = 0, *C2 = 0; 1830 1831 // (A | B) | C and A | (B | C) -> bswap if possible. 1832 // (A >> B) | (C << D) and (A << B) | (B >> C) -> bswap if possible. 1833 if (match(Op0, m_Or(m_Value(), m_Value())) || 1834 match(Op1, m_Or(m_Value(), m_Value())) || 1835 (match(Op0, m_LogicalShift(m_Value(), m_Value())) && 1836 match(Op1, m_LogicalShift(m_Value(), m_Value())))) { 1837 if (Instruction *BSwap = MatchBSwap(I)) 1838 return BSwap; 1839 } 1840 1841 // (X^C)|Y -> (X|Y)^C iff Y&C == 0 1842 if (Op0->hasOneUse() && 1843 match(Op0, m_Xor(m_Value(A), m_ConstantInt(C1))) && 1844 MaskedValueIsZero(Op1, C1->getValue())) { 1845 Value *NOr = Builder->CreateOr(A, Op1); 1846 NOr->takeName(Op0); 1847 return BinaryOperator::CreateXor(NOr, C1); 1848 } 1849 1850 // Y|(X^C) -> (X|Y)^C iff Y&C == 0 1851 if (Op1->hasOneUse() && 1852 match(Op1, m_Xor(m_Value(A), m_ConstantInt(C1))) && 1853 MaskedValueIsZero(Op0, C1->getValue())) { 1854 Value *NOr = Builder->CreateOr(A, Op0); 1855 NOr->takeName(Op0); 1856 return BinaryOperator::CreateXor(NOr, C1); 1857 } 1858 1859 // (A & C)|(B & D) 1860 Value *C = 0, *D = 0; 1861 if (match(Op0, m_And(m_Value(A), m_Value(C))) && 1862 match(Op1, m_And(m_Value(B), m_Value(D)))) { 1863 Value *V1 = 0, *V2 = 0; 1864 C1 = dyn_cast<ConstantInt>(C); 1865 C2 = dyn_cast<ConstantInt>(D); 1866 if (C1 && C2) { // (A & C1)|(B & C2) 1867 // If we have: ((V + N) & C1) | (V & C2) 1868 // .. and C2 = ~C1 and C2 is 0+1+ and (N & C2) == 0 1869 // replace with V+N. 1870 if (C1->getValue() == ~C2->getValue()) { 1871 if ((C2->getValue() & (C2->getValue()+1)) == 0 && // C2 == 0+1+ 1872 match(A, m_Add(m_Value(V1), m_Value(V2)))) { 1873 // Add commutes, try both ways. 1874 if (V1 == B && MaskedValueIsZero(V2, C2->getValue())) 1875 return ReplaceInstUsesWith(I, A); 1876 if (V2 == B && MaskedValueIsZero(V1, C2->getValue())) 1877 return ReplaceInstUsesWith(I, A); 1878 } 1879 // Or commutes, try both ways. 1880 if ((C1->getValue() & (C1->getValue()+1)) == 0 && 1881 match(B, m_Add(m_Value(V1), m_Value(V2)))) { 1882 // Add commutes, try both ways. 1883 if (V1 == A && MaskedValueIsZero(V2, C1->getValue())) 1884 return ReplaceInstUsesWith(I, B); 1885 if (V2 == A && MaskedValueIsZero(V1, C1->getValue())) 1886 return ReplaceInstUsesWith(I, B); 1887 } 1888 } 1889 1890 if ((C1->getValue() & C2->getValue()) == 0) { 1891 // ((V | N) & C1) | (V & C2) --> (V|N) & (C1|C2) 1892 // iff (C1&C2) == 0 and (N&~C1) == 0 1893 if (match(A, m_Or(m_Value(V1), m_Value(V2))) && 1894 ((V1 == B && MaskedValueIsZero(V2, ~C1->getValue())) || // (V|N) 1895 (V2 == B && MaskedValueIsZero(V1, ~C1->getValue())))) // (N|V) 1896 return BinaryOperator::CreateAnd(A, 1897 Builder->getInt(C1->getValue()|C2->getValue())); 1898 // Or commutes, try both ways. 1899 if (match(B, m_Or(m_Value(V1), m_Value(V2))) && 1900 ((V1 == A && MaskedValueIsZero(V2, ~C2->getValue())) || // (V|N) 1901 (V2 == A && MaskedValueIsZero(V1, ~C2->getValue())))) // (N|V) 1902 return BinaryOperator::CreateAnd(B, 1903 Builder->getInt(C1->getValue()|C2->getValue())); 1904 1905 // ((V|C3)&C1) | ((V|C4)&C2) --> (V|C3|C4)&(C1|C2) 1906 // iff (C1&C2) == 0 and (C3&~C1) == 0 and (C4&~C2) == 0. 1907 ConstantInt *C3 = 0, *C4 = 0; 1908 if (match(A, m_Or(m_Value(V1), m_ConstantInt(C3))) && 1909 (C3->getValue() & ~C1->getValue()) == 0 && 1910 match(B, m_Or(m_Specific(V1), m_ConstantInt(C4))) && 1911 (C4->getValue() & ~C2->getValue()) == 0) { 1912 V2 = Builder->CreateOr(V1, ConstantExpr::getOr(C3, C4), "bitfield"); 1913 return BinaryOperator::CreateAnd(V2, 1914 Builder->getInt(C1->getValue()|C2->getValue())); 1915 } 1916 } 1917 } 1918 1919 // (A & (C0?-1:0)) | (B & ~(C0?-1:0)) -> C0 ? A : B, and commuted variants. 1920 // Don't do this for vector select idioms, the code generator doesn't handle 1921 // them well yet. 1922 if (!I.getType()->isVectorTy()) { 1923 if (Instruction *Match = MatchSelectFromAndOr(A, B, C, D)) 1924 return Match; 1925 if (Instruction *Match = MatchSelectFromAndOr(B, A, D, C)) 1926 return Match; 1927 if (Instruction *Match = MatchSelectFromAndOr(C, B, A, D)) 1928 return Match; 1929 if (Instruction *Match = MatchSelectFromAndOr(D, A, B, C)) 1930 return Match; 1931 } 1932 1933 // ((A&~B)|(~A&B)) -> A^B 1934 if ((match(C, m_Not(m_Specific(D))) && 1935 match(B, m_Not(m_Specific(A))))) 1936 return BinaryOperator::CreateXor(A, D); 1937 // ((~B&A)|(~A&B)) -> A^B 1938 if ((match(A, m_Not(m_Specific(D))) && 1939 match(B, m_Not(m_Specific(C))))) 1940 return BinaryOperator::CreateXor(C, D); 1941 // ((A&~B)|(B&~A)) -> A^B 1942 if ((match(C, m_Not(m_Specific(B))) && 1943 match(D, m_Not(m_Specific(A))))) 1944 return BinaryOperator::CreateXor(A, B); 1945 // ((~B&A)|(B&~A)) -> A^B 1946 if ((match(A, m_Not(m_Specific(B))) && 1947 match(D, m_Not(m_Specific(C))))) 1948 return BinaryOperator::CreateXor(C, B); 1949 1950 // ((A|B)&1)|(B&-2) -> (A&1) | B 1951 if (match(A, m_Or(m_Value(V1), m_Specific(B))) || 1952 match(A, m_Or(m_Specific(B), m_Value(V1)))) { 1953 Instruction *Ret = FoldOrWithConstants(I, Op1, V1, B, C); 1954 if (Ret) return Ret; 1955 } 1956 // (B&-2)|((A|B)&1) -> (A&1) | B 1957 if (match(B, m_Or(m_Specific(A), m_Value(V1))) || 1958 match(B, m_Or(m_Value(V1), m_Specific(A)))) { 1959 Instruction *Ret = FoldOrWithConstants(I, Op0, A, V1, D); 1960 if (Ret) return Ret; 1961 } 1962 } 1963 1964 // (X >> Z) | (Y >> Z) -> (X|Y) >> Z for all shifts. 1965 if (BinaryOperator *SI1 = dyn_cast<BinaryOperator>(Op1)) { 1966 if (BinaryOperator *SI0 = dyn_cast<BinaryOperator>(Op0)) 1967 if (SI0->isShift() && SI0->getOpcode() == SI1->getOpcode() && 1968 SI0->getOperand(1) == SI1->getOperand(1) && 1969 (SI0->hasOneUse() || SI1->hasOneUse())) { 1970 Value *NewOp = Builder->CreateOr(SI0->getOperand(0), SI1->getOperand(0), 1971 SI0->getName()); 1972 return BinaryOperator::Create(SI1->getOpcode(), NewOp, 1973 SI1->getOperand(1)); 1974 } 1975 } 1976 1977 // (~A | ~B) == (~(A & B)) - De Morgan's Law 1978 if (Value *Op0NotVal = dyn_castNotVal(Op0)) 1979 if (Value *Op1NotVal = dyn_castNotVal(Op1)) 1980 if (Op0->hasOneUse() && Op1->hasOneUse()) { 1981 Value *And = Builder->CreateAnd(Op0NotVal, Op1NotVal, 1982 I.getName()+".demorgan"); 1983 return BinaryOperator::CreateNot(And); 1984 } 1985 1986 // Canonicalize xor to the RHS. 1987 bool SwappedForXor = false; 1988 if (match(Op0, m_Xor(m_Value(), m_Value()))) { 1989 std::swap(Op0, Op1); 1990 SwappedForXor = true; 1991 } 1992 1993 // A | ( A ^ B) -> A | B 1994 // A | (~A ^ B) -> A | ~B 1995 // (A & B) | (A ^ B) 1996 if (match(Op1, m_Xor(m_Value(A), m_Value(B)))) { 1997 if (Op0 == A || Op0 == B) 1998 return BinaryOperator::CreateOr(A, B); 1999 2000 if (match(Op0, m_And(m_Specific(A), m_Specific(B))) || 2001 match(Op0, m_And(m_Specific(B), m_Specific(A)))) 2002 return BinaryOperator::CreateOr(A, B); 2003 2004 if (Op1->hasOneUse() && match(A, m_Not(m_Specific(Op0)))) { 2005 Value *Not = Builder->CreateNot(B, B->getName()+".not"); 2006 return BinaryOperator::CreateOr(Not, Op0); 2007 } 2008 if (Op1->hasOneUse() && match(B, m_Not(m_Specific(Op0)))) { 2009 Value *Not = Builder->CreateNot(A, A->getName()+".not"); 2010 return BinaryOperator::CreateOr(Not, Op0); 2011 } 2012 } 2013 2014 // A | ~(A | B) -> A | ~B 2015 // A | ~(A ^ B) -> A | ~B 2016 if (match(Op1, m_Not(m_Value(A)))) 2017 if (BinaryOperator *B = dyn_cast<BinaryOperator>(A)) 2018 if ((Op0 == B->getOperand(0) || Op0 == B->getOperand(1)) && 2019 Op1->hasOneUse() && (B->getOpcode() == Instruction::Or || 2020 B->getOpcode() == Instruction::Xor)) { 2021 Value *NotOp = Op0 == B->getOperand(0) ? B->getOperand(1) : 2022 B->getOperand(0); 2023 Value *Not = Builder->CreateNot(NotOp, NotOp->getName()+".not"); 2024 return BinaryOperator::CreateOr(Not, Op0); 2025 } 2026 2027 if (SwappedForXor) 2028 std::swap(Op0, Op1); 2029 2030 if (ICmpInst *RHS = dyn_cast<ICmpInst>(I.getOperand(1))) 2031 if (ICmpInst *LHS = dyn_cast<ICmpInst>(I.getOperand(0))) 2032 if (Value *Res = FoldOrOfICmps(LHS, RHS)) 2033 return ReplaceInstUsesWith(I, Res); 2034 2035 // (fcmp uno x, c) | (fcmp uno y, c) -> (fcmp uno x, y) 2036 if (FCmpInst *LHS = dyn_cast<FCmpInst>(I.getOperand(0))) 2037 if (FCmpInst *RHS = dyn_cast<FCmpInst>(I.getOperand(1))) 2038 if (Value *Res = FoldOrOfFCmps(LHS, RHS)) 2039 return ReplaceInstUsesWith(I, Res); 2040 2041 // fold (or (cast A), (cast B)) -> (cast (or A, B)) 2042 if (CastInst *Op0C = dyn_cast<CastInst>(Op0)) { 2043 CastInst *Op1C = dyn_cast<CastInst>(Op1); 2044 if (Op1C && Op0C->getOpcode() == Op1C->getOpcode()) {// same cast kind ? 2045 Type *SrcTy = Op0C->getOperand(0)->getType(); 2046 if (SrcTy == Op1C->getOperand(0)->getType() && 2047 SrcTy->isIntOrIntVectorTy()) { 2048 Value *Op0COp = Op0C->getOperand(0), *Op1COp = Op1C->getOperand(0); 2049 2050 if ((!isa<ICmpInst>(Op0COp) || !isa<ICmpInst>(Op1COp)) && 2051 // Only do this if the casts both really cause code to be 2052 // generated. 2053 ShouldOptimizeCast(Op0C->getOpcode(), Op0COp, I.getType()) && 2054 ShouldOptimizeCast(Op1C->getOpcode(), Op1COp, I.getType())) { 2055 Value *NewOp = Builder->CreateOr(Op0COp, Op1COp, I.getName()); 2056 return CastInst::Create(Op0C->getOpcode(), NewOp, I.getType()); 2057 } 2058 2059 // If this is or(cast(icmp), cast(icmp)), try to fold this even if the 2060 // cast is otherwise not optimizable. This happens for vector sexts. 2061 if (ICmpInst *RHS = dyn_cast<ICmpInst>(Op1COp)) 2062 if (ICmpInst *LHS = dyn_cast<ICmpInst>(Op0COp)) 2063 if (Value *Res = FoldOrOfICmps(LHS, RHS)) 2064 return CastInst::Create(Op0C->getOpcode(), Res, I.getType()); 2065 2066 // If this is or(cast(fcmp), cast(fcmp)), try to fold this even if the 2067 // cast is otherwise not optimizable. This happens for vector sexts. 2068 if (FCmpInst *RHS = dyn_cast<FCmpInst>(Op1COp)) 2069 if (FCmpInst *LHS = dyn_cast<FCmpInst>(Op0COp)) 2070 if (Value *Res = FoldOrOfFCmps(LHS, RHS)) 2071 return CastInst::Create(Op0C->getOpcode(), Res, I.getType()); 2072 } 2073 } 2074 } 2075 2076 // or(sext(A), B) -> A ? -1 : B where A is an i1 2077 // or(A, sext(B)) -> B ? -1 : A where B is an i1 2078 if (match(Op0, m_SExt(m_Value(A))) && A->getType()->isIntegerTy(1)) 2079 return SelectInst::Create(A, ConstantInt::getSigned(I.getType(), -1), Op1); 2080 if (match(Op1, m_SExt(m_Value(A))) && A->getType()->isIntegerTy(1)) 2081 return SelectInst::Create(A, ConstantInt::getSigned(I.getType(), -1), Op0); 2082 2083 // Note: If we've gotten to the point of visiting the outer OR, then the 2084 // inner one couldn't be simplified. If it was a constant, then it won't 2085 // be simplified by a later pass either, so we try swapping the inner/outer 2086 // ORs in the hopes that we'll be able to simplify it this way. 2087 // (X|C) | V --> (X|V) | C 2088 if (Op0->hasOneUse() && !isa<ConstantInt>(Op1) && 2089 match(Op0, m_Or(m_Value(A), m_ConstantInt(C1)))) { 2090 Value *Inner = Builder->CreateOr(A, Op1); 2091 Inner->takeName(Op0); 2092 return BinaryOperator::CreateOr(Inner, C1); 2093 } 2094 2095 // Change (or (bool?A:B),(bool?C:D)) --> (bool?(or A,C):(or B,D)) 2096 // Since this OR statement hasn't been optimized further yet, we hope 2097 // that this transformation will allow the new ORs to be optimized. 2098 { 2099 Value *X = 0, *Y = 0; 2100 if (Op0->hasOneUse() && Op1->hasOneUse() && 2101 match(Op0, m_Select(m_Value(X), m_Value(A), m_Value(B))) && 2102 match(Op1, m_Select(m_Value(Y), m_Value(C), m_Value(D))) && X == Y) { 2103 Value *orTrue = Builder->CreateOr(A, C); 2104 Value *orFalse = Builder->CreateOr(B, D); 2105 return SelectInst::Create(X, orTrue, orFalse); 2106 } 2107 } 2108 2109 return Changed ? &I : 0; 2110 } 2111 2112 Instruction *InstCombiner::visitXor(BinaryOperator &I) { 2113 bool Changed = SimplifyAssociativeOrCommutative(I); 2114 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 2115 2116 if (Value *V = SimplifyXorInst(Op0, Op1, TD)) 2117 return ReplaceInstUsesWith(I, V); 2118 2119 // (A&B)^(A&C) -> A&(B^C) etc 2120 if (Value *V = SimplifyUsingDistributiveLaws(I)) 2121 return ReplaceInstUsesWith(I, V); 2122 2123 // See if we can simplify any instructions used by the instruction whose sole 2124 // purpose is to compute bits we don't care about. 2125 if (SimplifyDemandedInstructionBits(I)) 2126 return &I; 2127 2128 // Is this a ~ operation? 2129 if (Value *NotOp = dyn_castNotVal(&I)) { 2130 if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(NotOp)) { 2131 if (Op0I->getOpcode() == Instruction::And || 2132 Op0I->getOpcode() == Instruction::Or) { 2133 // ~(~X & Y) --> (X | ~Y) - De Morgan's Law 2134 // ~(~X | Y) === (X & ~Y) - De Morgan's Law 2135 if (dyn_castNotVal(Op0I->getOperand(1))) 2136 Op0I->swapOperands(); 2137 if (Value *Op0NotVal = dyn_castNotVal(Op0I->getOperand(0))) { 2138 Value *NotY = 2139 Builder->CreateNot(Op0I->getOperand(1), 2140 Op0I->getOperand(1)->getName()+".not"); 2141 if (Op0I->getOpcode() == Instruction::And) 2142 return BinaryOperator::CreateOr(Op0NotVal, NotY); 2143 return BinaryOperator::CreateAnd(Op0NotVal, NotY); 2144 } 2145 2146 // ~(X & Y) --> (~X | ~Y) - De Morgan's Law 2147 // ~(X | Y) === (~X & ~Y) - De Morgan's Law 2148 if (isFreeToInvert(Op0I->getOperand(0)) && 2149 isFreeToInvert(Op0I->getOperand(1))) { 2150 Value *NotX = 2151 Builder->CreateNot(Op0I->getOperand(0), "notlhs"); 2152 Value *NotY = 2153 Builder->CreateNot(Op0I->getOperand(1), "notrhs"); 2154 if (Op0I->getOpcode() == Instruction::And) 2155 return BinaryOperator::CreateOr(NotX, NotY); 2156 return BinaryOperator::CreateAnd(NotX, NotY); 2157 } 2158 2159 } else if (Op0I->getOpcode() == Instruction::AShr) { 2160 // ~(~X >>s Y) --> (X >>s Y) 2161 if (Value *Op0NotVal = dyn_castNotVal(Op0I->getOperand(0))) 2162 return BinaryOperator::CreateAShr(Op0NotVal, Op0I->getOperand(1)); 2163 } 2164 } 2165 } 2166 2167 2168 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) { 2169 if (RHS->isOne() && Op0->hasOneUse()) 2170 // xor (cmp A, B), true = not (cmp A, B) = !cmp A, B 2171 if (CmpInst *CI = dyn_cast<CmpInst>(Op0)) 2172 return CmpInst::Create(CI->getOpcode(), 2173 CI->getInversePredicate(), 2174 CI->getOperand(0), CI->getOperand(1)); 2175 2176 // fold (xor(zext(cmp)), 1) and (xor(sext(cmp)), -1) to ext(!cmp). 2177 if (CastInst *Op0C = dyn_cast<CastInst>(Op0)) { 2178 if (CmpInst *CI = dyn_cast<CmpInst>(Op0C->getOperand(0))) { 2179 if (CI->hasOneUse() && Op0C->hasOneUse()) { 2180 Instruction::CastOps Opcode = Op0C->getOpcode(); 2181 if ((Opcode == Instruction::ZExt || Opcode == Instruction::SExt) && 2182 (RHS == ConstantExpr::getCast(Opcode, Builder->getTrue(), 2183 Op0C->getDestTy()))) { 2184 CI->setPredicate(CI->getInversePredicate()); 2185 return CastInst::Create(Opcode, CI, Op0C->getType()); 2186 } 2187 } 2188 } 2189 } 2190 2191 if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0)) { 2192 // ~(c-X) == X-c-1 == X+(-c-1) 2193 if (Op0I->getOpcode() == Instruction::Sub && RHS->isAllOnesValue()) 2194 if (Constant *Op0I0C = dyn_cast<Constant>(Op0I->getOperand(0))) { 2195 Constant *NegOp0I0C = ConstantExpr::getNeg(Op0I0C); 2196 Constant *ConstantRHS = ConstantExpr::getSub(NegOp0I0C, 2197 ConstantInt::get(I.getType(), 1)); 2198 return BinaryOperator::CreateAdd(Op0I->getOperand(1), ConstantRHS); 2199 } 2200 2201 if (ConstantInt *Op0CI = dyn_cast<ConstantInt>(Op0I->getOperand(1))) { 2202 if (Op0I->getOpcode() == Instruction::Add) { 2203 // ~(X-c) --> (-c-1)-X 2204 if (RHS->isAllOnesValue()) { 2205 Constant *NegOp0CI = ConstantExpr::getNeg(Op0CI); 2206 return BinaryOperator::CreateSub( 2207 ConstantExpr::getSub(NegOp0CI, 2208 ConstantInt::get(I.getType(), 1)), 2209 Op0I->getOperand(0)); 2210 } else if (RHS->getValue().isSignBit()) { 2211 // (X + C) ^ signbit -> (X + C + signbit) 2212 Constant *C = Builder->getInt(RHS->getValue() + Op0CI->getValue()); 2213 return BinaryOperator::CreateAdd(Op0I->getOperand(0), C); 2214 2215 } 2216 } else if (Op0I->getOpcode() == Instruction::Or) { 2217 // (X|C1)^C2 -> X^(C1|C2) iff X&~C1 == 0 2218 if (MaskedValueIsZero(Op0I->getOperand(0), Op0CI->getValue())) { 2219 Constant *NewRHS = ConstantExpr::getOr(Op0CI, RHS); 2220 // Anything in both C1 and C2 is known to be zero, remove it from 2221 // NewRHS. 2222 Constant *CommonBits = ConstantExpr::getAnd(Op0CI, RHS); 2223 NewRHS = ConstantExpr::getAnd(NewRHS, 2224 ConstantExpr::getNot(CommonBits)); 2225 Worklist.Add(Op0I); 2226 I.setOperand(0, Op0I->getOperand(0)); 2227 I.setOperand(1, NewRHS); 2228 return &I; 2229 } 2230 } else if (Op0I->getOpcode() == Instruction::LShr) { 2231 // ((X^C1) >> C2) ^ C3 -> (X>>C2) ^ ((C1>>C2)^C3) 2232 // E1 = "X ^ C1" 2233 BinaryOperator *E1; 2234 ConstantInt *C1; 2235 if (Op0I->hasOneUse() && 2236 (E1 = dyn_cast<BinaryOperator>(Op0I->getOperand(0))) && 2237 E1->getOpcode() == Instruction::Xor && 2238 (C1 = dyn_cast<ConstantInt>(E1->getOperand(1)))) { 2239 // fold (C1 >> C2) ^ C3 2240 ConstantInt *C2 = Op0CI, *C3 = RHS; 2241 APInt FoldConst = C1->getValue().lshr(C2->getValue()); 2242 FoldConst ^= C3->getValue(); 2243 // Prepare the two operands. 2244 Value *Opnd0 = Builder->CreateLShr(E1->getOperand(0), C2); 2245 Opnd0->takeName(Op0I); 2246 cast<Instruction>(Opnd0)->setDebugLoc(I.getDebugLoc()); 2247 Value *FoldVal = ConstantInt::get(Opnd0->getType(), FoldConst); 2248 2249 return BinaryOperator::CreateXor(Opnd0, FoldVal); 2250 } 2251 } 2252 } 2253 } 2254 2255 // Try to fold constant and into select arguments. 2256 if (SelectInst *SI = dyn_cast<SelectInst>(Op0)) 2257 if (Instruction *R = FoldOpIntoSelect(I, SI)) 2258 return R; 2259 if (isa<PHINode>(Op0)) 2260 if (Instruction *NV = FoldOpIntoPhi(I)) 2261 return NV; 2262 } 2263 2264 BinaryOperator *Op1I = dyn_cast<BinaryOperator>(Op1); 2265 if (Op1I) { 2266 Value *A, *B; 2267 if (match(Op1I, m_Or(m_Value(A), m_Value(B)))) { 2268 if (A == Op0) { // B^(B|A) == (A|B)^B 2269 Op1I->swapOperands(); 2270 I.swapOperands(); 2271 std::swap(Op0, Op1); 2272 } else if (B == Op0) { // B^(A|B) == (A|B)^B 2273 I.swapOperands(); // Simplified below. 2274 std::swap(Op0, Op1); 2275 } 2276 } else if (match(Op1I, m_And(m_Value(A), m_Value(B))) && 2277 Op1I->hasOneUse()){ 2278 if (A == Op0) { // A^(A&B) -> A^(B&A) 2279 Op1I->swapOperands(); 2280 std::swap(A, B); 2281 } 2282 if (B == Op0) { // A^(B&A) -> (B&A)^A 2283 I.swapOperands(); // Simplified below. 2284 std::swap(Op0, Op1); 2285 } 2286 } 2287 } 2288 2289 BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0); 2290 if (Op0I) { 2291 Value *A, *B; 2292 if (match(Op0I, m_Or(m_Value(A), m_Value(B))) && 2293 Op0I->hasOneUse()) { 2294 if (A == Op1) // (B|A)^B == (A|B)^B 2295 std::swap(A, B); 2296 if (B == Op1) // (A|B)^B == A & ~B 2297 return BinaryOperator::CreateAnd(A, Builder->CreateNot(Op1)); 2298 } else if (match(Op0I, m_And(m_Value(A), m_Value(B))) && 2299 Op0I->hasOneUse()){ 2300 if (A == Op1) // (A&B)^A -> (B&A)^A 2301 std::swap(A, B); 2302 if (B == Op1 && // (B&A)^A == ~B & A 2303 !isa<ConstantInt>(Op1)) { // Canonical form is (B&C)^C 2304 return BinaryOperator::CreateAnd(Builder->CreateNot(A), Op1); 2305 } 2306 } 2307 } 2308 2309 // (X >> Z) ^ (Y >> Z) -> (X^Y) >> Z for all shifts. 2310 if (Op0I && Op1I && Op0I->isShift() && 2311 Op0I->getOpcode() == Op1I->getOpcode() && 2312 Op0I->getOperand(1) == Op1I->getOperand(1) && 2313 (Op0I->hasOneUse() || Op1I->hasOneUse())) { 2314 Value *NewOp = 2315 Builder->CreateXor(Op0I->getOperand(0), Op1I->getOperand(0), 2316 Op0I->getName()); 2317 return BinaryOperator::Create(Op1I->getOpcode(), NewOp, 2318 Op1I->getOperand(1)); 2319 } 2320 2321 if (Op0I && Op1I) { 2322 Value *A, *B, *C, *D; 2323 // (A & B)^(A | B) -> A ^ B 2324 if (match(Op0I, m_And(m_Value(A), m_Value(B))) && 2325 match(Op1I, m_Or(m_Value(C), m_Value(D)))) { 2326 if ((A == C && B == D) || (A == D && B == C)) 2327 return BinaryOperator::CreateXor(A, B); 2328 } 2329 // (A | B)^(A & B) -> A ^ B 2330 if (match(Op0I, m_Or(m_Value(A), m_Value(B))) && 2331 match(Op1I, m_And(m_Value(C), m_Value(D)))) { 2332 if ((A == C && B == D) || (A == D && B == C)) 2333 return BinaryOperator::CreateXor(A, B); 2334 } 2335 } 2336 2337 // (icmp1 A, B) ^ (icmp2 A, B) --> (icmp3 A, B) 2338 if (ICmpInst *RHS = dyn_cast<ICmpInst>(I.getOperand(1))) 2339 if (ICmpInst *LHS = dyn_cast<ICmpInst>(I.getOperand(0))) 2340 if (PredicatesFoldable(LHS->getPredicate(), RHS->getPredicate())) { 2341 if (LHS->getOperand(0) == RHS->getOperand(1) && 2342 LHS->getOperand(1) == RHS->getOperand(0)) 2343 LHS->swapOperands(); 2344 if (LHS->getOperand(0) == RHS->getOperand(0) && 2345 LHS->getOperand(1) == RHS->getOperand(1)) { 2346 Value *Op0 = LHS->getOperand(0), *Op1 = LHS->getOperand(1); 2347 unsigned Code = getICmpCode(LHS) ^ getICmpCode(RHS); 2348 bool isSigned = LHS->isSigned() || RHS->isSigned(); 2349 return ReplaceInstUsesWith(I, 2350 getNewICmpValue(isSigned, Code, Op0, Op1, 2351 Builder)); 2352 } 2353 } 2354 2355 // fold (xor (cast A), (cast B)) -> (cast (xor A, B)) 2356 if (CastInst *Op0C = dyn_cast<CastInst>(Op0)) { 2357 if (CastInst *Op1C = dyn_cast<CastInst>(Op1)) 2358 if (Op0C->getOpcode() == Op1C->getOpcode()) { // same cast kind? 2359 Type *SrcTy = Op0C->getOperand(0)->getType(); 2360 if (SrcTy == Op1C->getOperand(0)->getType() && SrcTy->isIntegerTy() && 2361 // Only do this if the casts both really cause code to be generated. 2362 ShouldOptimizeCast(Op0C->getOpcode(), Op0C->getOperand(0), 2363 I.getType()) && 2364 ShouldOptimizeCast(Op1C->getOpcode(), Op1C->getOperand(0), 2365 I.getType())) { 2366 Value *NewOp = Builder->CreateXor(Op0C->getOperand(0), 2367 Op1C->getOperand(0), I.getName()); 2368 return CastInst::Create(Op0C->getOpcode(), NewOp, I.getType()); 2369 } 2370 } 2371 } 2372 2373 return Changed ? &I : 0; 2374 } 2375