1 //===- InstCombineAndOrXor.cpp --------------------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the visitAnd, visitOr, and visitXor functions. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "InstCombineInternal.h" 15 #include "llvm/Analysis/CmpInstAnalysis.h" 16 #include "llvm/Analysis/InstructionSimplify.h" 17 #include "llvm/Transforms/Utils/Local.h" 18 #include "llvm/IR/ConstantRange.h" 19 #include "llvm/IR/Intrinsics.h" 20 #include "llvm/IR/PatternMatch.h" 21 using namespace llvm; 22 using namespace PatternMatch; 23 24 #define DEBUG_TYPE "instcombine" 25 26 /// Similar to getICmpCode but for FCmpInst. This encodes a fcmp predicate into 27 /// a four bit mask. 28 static unsigned getFCmpCode(FCmpInst::Predicate CC) { 29 assert(FCmpInst::FCMP_FALSE <= CC && CC <= FCmpInst::FCMP_TRUE && 30 "Unexpected FCmp predicate!"); 31 // Take advantage of the bit pattern of FCmpInst::Predicate here. 32 // U L G E 33 static_assert(FCmpInst::FCMP_FALSE == 0, ""); // 0 0 0 0 34 static_assert(FCmpInst::FCMP_OEQ == 1, ""); // 0 0 0 1 35 static_assert(FCmpInst::FCMP_OGT == 2, ""); // 0 0 1 0 36 static_assert(FCmpInst::FCMP_OGE == 3, ""); // 0 0 1 1 37 static_assert(FCmpInst::FCMP_OLT == 4, ""); // 0 1 0 0 38 static_assert(FCmpInst::FCMP_OLE == 5, ""); // 0 1 0 1 39 static_assert(FCmpInst::FCMP_ONE == 6, ""); // 0 1 1 0 40 static_assert(FCmpInst::FCMP_ORD == 7, ""); // 0 1 1 1 41 static_assert(FCmpInst::FCMP_UNO == 8, ""); // 1 0 0 0 42 static_assert(FCmpInst::FCMP_UEQ == 9, ""); // 1 0 0 1 43 static_assert(FCmpInst::FCMP_UGT == 10, ""); // 1 0 1 0 44 static_assert(FCmpInst::FCMP_UGE == 11, ""); // 1 0 1 1 45 static_assert(FCmpInst::FCMP_ULT == 12, ""); // 1 1 0 0 46 static_assert(FCmpInst::FCMP_ULE == 13, ""); // 1 1 0 1 47 static_assert(FCmpInst::FCMP_UNE == 14, ""); // 1 1 1 0 48 static_assert(FCmpInst::FCMP_TRUE == 15, ""); // 1 1 1 1 49 return CC; 50 } 51 52 /// This is the complement of getICmpCode, which turns an opcode and two 53 /// operands into either a constant true or false, or a brand new ICmp 54 /// instruction. The sign is passed in to determine which kind of predicate to 55 /// use in the new icmp instruction. 56 static Value *getNewICmpValue(bool Sign, unsigned Code, Value *LHS, Value *RHS, 57 InstCombiner::BuilderTy &Builder) { 58 ICmpInst::Predicate NewPred; 59 if (Value *NewConstant = getICmpValue(Sign, Code, LHS, RHS, NewPred)) 60 return NewConstant; 61 return Builder.CreateICmp(NewPred, LHS, RHS); 62 } 63 64 /// This is the complement of getFCmpCode, which turns an opcode and two 65 /// operands into either a FCmp instruction, or a true/false constant. 66 static Value *getFCmpValue(unsigned Code, Value *LHS, Value *RHS, 67 InstCombiner::BuilderTy &Builder) { 68 const auto Pred = static_cast<FCmpInst::Predicate>(Code); 69 assert(FCmpInst::FCMP_FALSE <= Pred && Pred <= FCmpInst::FCMP_TRUE && 70 "Unexpected FCmp predicate!"); 71 if (Pred == FCmpInst::FCMP_FALSE) 72 return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 0); 73 if (Pred == FCmpInst::FCMP_TRUE) 74 return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 1); 75 return Builder.CreateFCmp(Pred, LHS, RHS); 76 } 77 78 /// Transform BITWISE_OP(BSWAP(A),BSWAP(B)) or 79 /// BITWISE_OP(BSWAP(A), Constant) to BSWAP(BITWISE_OP(A, B)) 80 /// \param I Binary operator to transform. 81 /// \return Pointer to node that must replace the original binary operator, or 82 /// null pointer if no transformation was made. 83 static Value *SimplifyBSwap(BinaryOperator &I, 84 InstCombiner::BuilderTy &Builder) { 85 assert(I.isBitwiseLogicOp() && "Unexpected opcode for bswap simplifying"); 86 87 Value *OldLHS = I.getOperand(0); 88 Value *OldRHS = I.getOperand(1); 89 90 Value *NewLHS; 91 if (!match(OldLHS, m_BSwap(m_Value(NewLHS)))) 92 return nullptr; 93 94 Value *NewRHS; 95 const APInt *C; 96 97 if (match(OldRHS, m_BSwap(m_Value(NewRHS)))) { 98 // OP( BSWAP(x), BSWAP(y) ) -> BSWAP( OP(x, y) ) 99 if (!OldLHS->hasOneUse() && !OldRHS->hasOneUse()) 100 return nullptr; 101 // NewRHS initialized by the matcher. 102 } else if (match(OldRHS, m_APInt(C))) { 103 // OP( BSWAP(x), CONSTANT ) -> BSWAP( OP(x, BSWAP(CONSTANT) ) ) 104 if (!OldLHS->hasOneUse()) 105 return nullptr; 106 NewRHS = ConstantInt::get(I.getType(), C->byteSwap()); 107 } else 108 return nullptr; 109 110 Value *BinOp = Builder.CreateBinOp(I.getOpcode(), NewLHS, NewRHS); 111 Function *F = Intrinsic::getDeclaration(I.getModule(), Intrinsic::bswap, 112 I.getType()); 113 return Builder.CreateCall(F, BinOp); 114 } 115 116 /// This handles expressions of the form ((val OP C1) & C2). Where 117 /// the Op parameter is 'OP', OpRHS is 'C1', and AndRHS is 'C2'. 118 Instruction *InstCombiner::OptAndOp(BinaryOperator *Op, 119 ConstantInt *OpRHS, 120 ConstantInt *AndRHS, 121 BinaryOperator &TheAnd) { 122 Value *X = Op->getOperand(0); 123 124 switch (Op->getOpcode()) { 125 default: break; 126 case Instruction::Add: 127 if (Op->hasOneUse()) { 128 // Adding a one to a single bit bit-field should be turned into an XOR 129 // of the bit. First thing to check is to see if this AND is with a 130 // single bit constant. 131 const APInt &AndRHSV = AndRHS->getValue(); 132 133 // If there is only one bit set. 134 if (AndRHSV.isPowerOf2()) { 135 // Ok, at this point, we know that we are masking the result of the 136 // ADD down to exactly one bit. If the constant we are adding has 137 // no bits set below this bit, then we can eliminate the ADD. 138 const APInt& AddRHS = OpRHS->getValue(); 139 140 // Check to see if any bits below the one bit set in AndRHSV are set. 141 if ((AddRHS & (AndRHSV - 1)).isNullValue()) { 142 // If not, the only thing that can effect the output of the AND is 143 // the bit specified by AndRHSV. If that bit is set, the effect of 144 // the XOR is to toggle the bit. If it is clear, then the ADD has 145 // no effect. 146 if ((AddRHS & AndRHSV).isNullValue()) { // Bit is not set, noop 147 TheAnd.setOperand(0, X); 148 return &TheAnd; 149 } else { 150 // Pull the XOR out of the AND. 151 Value *NewAnd = Builder.CreateAnd(X, AndRHS); 152 NewAnd->takeName(Op); 153 return BinaryOperator::CreateXor(NewAnd, AndRHS); 154 } 155 } 156 } 157 } 158 break; 159 } 160 return nullptr; 161 } 162 163 /// Emit a computation of: (V >= Lo && V < Hi) if Inside is true, otherwise 164 /// (V < Lo || V >= Hi). This method expects that Lo <= Hi. IsSigned indicates 165 /// whether to treat V, Lo, and Hi as signed or not. 166 Value *InstCombiner::insertRangeTest(Value *V, const APInt &Lo, const APInt &Hi, 167 bool isSigned, bool Inside) { 168 assert((isSigned ? Lo.sle(Hi) : Lo.ule(Hi)) && 169 "Lo is not <= Hi in range emission code!"); 170 171 Type *Ty = V->getType(); 172 if (Lo == Hi) 173 return Inside ? ConstantInt::getFalse(Ty) : ConstantInt::getTrue(Ty); 174 175 // V >= Min && V < Hi --> V < Hi 176 // V < Min || V >= Hi --> V >= Hi 177 ICmpInst::Predicate Pred = Inside ? ICmpInst::ICMP_ULT : ICmpInst::ICMP_UGE; 178 if (isSigned ? Lo.isMinSignedValue() : Lo.isMinValue()) { 179 Pred = isSigned ? ICmpInst::getSignedPredicate(Pred) : Pred; 180 return Builder.CreateICmp(Pred, V, ConstantInt::get(Ty, Hi)); 181 } 182 183 // V >= Lo && V < Hi --> V - Lo u< Hi - Lo 184 // V < Lo || V >= Hi --> V - Lo u>= Hi - Lo 185 Value *VMinusLo = 186 Builder.CreateSub(V, ConstantInt::get(Ty, Lo), V->getName() + ".off"); 187 Constant *HiMinusLo = ConstantInt::get(Ty, Hi - Lo); 188 return Builder.CreateICmp(Pred, VMinusLo, HiMinusLo); 189 } 190 191 /// Classify (icmp eq (A & B), C) and (icmp ne (A & B), C) as matching patterns 192 /// that can be simplified. 193 /// One of A and B is considered the mask. The other is the value. This is 194 /// described as the "AMask" or "BMask" part of the enum. If the enum contains 195 /// only "Mask", then both A and B can be considered masks. If A is the mask, 196 /// then it was proven that (A & C) == C. This is trivial if C == A or C == 0. 197 /// If both A and C are constants, this proof is also easy. 198 /// For the following explanations, we assume that A is the mask. 199 /// 200 /// "AllOnes" declares that the comparison is true only if (A & B) == A or all 201 /// bits of A are set in B. 202 /// Example: (icmp eq (A & 3), 3) -> AMask_AllOnes 203 /// 204 /// "AllZeros" declares that the comparison is true only if (A & B) == 0 or all 205 /// bits of A are cleared in B. 206 /// Example: (icmp eq (A & 3), 0) -> Mask_AllZeroes 207 /// 208 /// "Mixed" declares that (A & B) == C and C might or might not contain any 209 /// number of one bits and zero bits. 210 /// Example: (icmp eq (A & 3), 1) -> AMask_Mixed 211 /// 212 /// "Not" means that in above descriptions "==" should be replaced by "!=". 213 /// Example: (icmp ne (A & 3), 3) -> AMask_NotAllOnes 214 /// 215 /// If the mask A contains a single bit, then the following is equivalent: 216 /// (icmp eq (A & B), A) equals (icmp ne (A & B), 0) 217 /// (icmp ne (A & B), A) equals (icmp eq (A & B), 0) 218 enum MaskedICmpType { 219 AMask_AllOnes = 1, 220 AMask_NotAllOnes = 2, 221 BMask_AllOnes = 4, 222 BMask_NotAllOnes = 8, 223 Mask_AllZeros = 16, 224 Mask_NotAllZeros = 32, 225 AMask_Mixed = 64, 226 AMask_NotMixed = 128, 227 BMask_Mixed = 256, 228 BMask_NotMixed = 512 229 }; 230 231 /// Return the set of patterns (from MaskedICmpType) that (icmp SCC (A & B), C) 232 /// satisfies. 233 static unsigned getMaskedICmpType(Value *A, Value *B, Value *C, 234 ICmpInst::Predicate Pred) { 235 ConstantInt *ACst = dyn_cast<ConstantInt>(A); 236 ConstantInt *BCst = dyn_cast<ConstantInt>(B); 237 ConstantInt *CCst = dyn_cast<ConstantInt>(C); 238 bool IsEq = (Pred == ICmpInst::ICMP_EQ); 239 bool IsAPow2 = (ACst && !ACst->isZero() && ACst->getValue().isPowerOf2()); 240 bool IsBPow2 = (BCst && !BCst->isZero() && BCst->getValue().isPowerOf2()); 241 unsigned MaskVal = 0; 242 if (CCst && CCst->isZero()) { 243 // if C is zero, then both A and B qualify as mask 244 MaskVal |= (IsEq ? (Mask_AllZeros | AMask_Mixed | BMask_Mixed) 245 : (Mask_NotAllZeros | AMask_NotMixed | BMask_NotMixed)); 246 if (IsAPow2) 247 MaskVal |= (IsEq ? (AMask_NotAllOnes | AMask_NotMixed) 248 : (AMask_AllOnes | AMask_Mixed)); 249 if (IsBPow2) 250 MaskVal |= (IsEq ? (BMask_NotAllOnes | BMask_NotMixed) 251 : (BMask_AllOnes | BMask_Mixed)); 252 return MaskVal; 253 } 254 255 if (A == C) { 256 MaskVal |= (IsEq ? (AMask_AllOnes | AMask_Mixed) 257 : (AMask_NotAllOnes | AMask_NotMixed)); 258 if (IsAPow2) 259 MaskVal |= (IsEq ? (Mask_NotAllZeros | AMask_NotMixed) 260 : (Mask_AllZeros | AMask_Mixed)); 261 } else if (ACst && CCst && ConstantExpr::getAnd(ACst, CCst) == CCst) { 262 MaskVal |= (IsEq ? AMask_Mixed : AMask_NotMixed); 263 } 264 265 if (B == C) { 266 MaskVal |= (IsEq ? (BMask_AllOnes | BMask_Mixed) 267 : (BMask_NotAllOnes | BMask_NotMixed)); 268 if (IsBPow2) 269 MaskVal |= (IsEq ? (Mask_NotAllZeros | BMask_NotMixed) 270 : (Mask_AllZeros | BMask_Mixed)); 271 } else if (BCst && CCst && ConstantExpr::getAnd(BCst, CCst) == CCst) { 272 MaskVal |= (IsEq ? BMask_Mixed : BMask_NotMixed); 273 } 274 275 return MaskVal; 276 } 277 278 /// Convert an analysis of a masked ICmp into its equivalent if all boolean 279 /// operations had the opposite sense. Since each "NotXXX" flag (recording !=) 280 /// is adjacent to the corresponding normal flag (recording ==), this just 281 /// involves swapping those bits over. 282 static unsigned conjugateICmpMask(unsigned Mask) { 283 unsigned NewMask; 284 NewMask = (Mask & (AMask_AllOnes | BMask_AllOnes | Mask_AllZeros | 285 AMask_Mixed | BMask_Mixed)) 286 << 1; 287 288 NewMask |= (Mask & (AMask_NotAllOnes | BMask_NotAllOnes | Mask_NotAllZeros | 289 AMask_NotMixed | BMask_NotMixed)) 290 >> 1; 291 292 return NewMask; 293 } 294 295 // Adapts the external decomposeBitTestICmp for local use. 296 static bool decomposeBitTestICmp(Value *LHS, Value *RHS, CmpInst::Predicate &Pred, 297 Value *&X, Value *&Y, Value *&Z) { 298 APInt Mask; 299 if (!llvm::decomposeBitTestICmp(LHS, RHS, Pred, X, Mask)) 300 return false; 301 302 Y = ConstantInt::get(X->getType(), Mask); 303 Z = ConstantInt::get(X->getType(), 0); 304 return true; 305 } 306 307 /// Handle (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E). 308 /// Return the pattern classes (from MaskedICmpType) for the left hand side and 309 /// the right hand side as a pair. 310 /// LHS and RHS are the left hand side and the right hand side ICmps and PredL 311 /// and PredR are their predicates, respectively. 312 static 313 Optional<std::pair<unsigned, unsigned>> 314 getMaskedTypeForICmpPair(Value *&A, Value *&B, Value *&C, 315 Value *&D, Value *&E, ICmpInst *LHS, 316 ICmpInst *RHS, 317 ICmpInst::Predicate &PredL, 318 ICmpInst::Predicate &PredR) { 319 // vectors are not (yet?) supported. Don't support pointers either. 320 if (!LHS->getOperand(0)->getType()->isIntegerTy() || 321 !RHS->getOperand(0)->getType()->isIntegerTy()) 322 return None; 323 324 // Here comes the tricky part: 325 // LHS might be of the form L11 & L12 == X, X == L21 & L22, 326 // and L11 & L12 == L21 & L22. The same goes for RHS. 327 // Now we must find those components L** and R**, that are equal, so 328 // that we can extract the parameters A, B, C, D, and E for the canonical 329 // above. 330 Value *L1 = LHS->getOperand(0); 331 Value *L2 = LHS->getOperand(1); 332 Value *L11, *L12, *L21, *L22; 333 // Check whether the icmp can be decomposed into a bit test. 334 if (decomposeBitTestICmp(L1, L2, PredL, L11, L12, L2)) { 335 L21 = L22 = L1 = nullptr; 336 } else { 337 // Look for ANDs in the LHS icmp. 338 if (!match(L1, m_And(m_Value(L11), m_Value(L12)))) { 339 // Any icmp can be viewed as being trivially masked; if it allows us to 340 // remove one, it's worth it. 341 L11 = L1; 342 L12 = Constant::getAllOnesValue(L1->getType()); 343 } 344 345 if (!match(L2, m_And(m_Value(L21), m_Value(L22)))) { 346 L21 = L2; 347 L22 = Constant::getAllOnesValue(L2->getType()); 348 } 349 } 350 351 // Bail if LHS was a icmp that can't be decomposed into an equality. 352 if (!ICmpInst::isEquality(PredL)) 353 return None; 354 355 Value *R1 = RHS->getOperand(0); 356 Value *R2 = RHS->getOperand(1); 357 Value *R11, *R12; 358 bool Ok = false; 359 if (decomposeBitTestICmp(R1, R2, PredR, R11, R12, R2)) { 360 if (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22) { 361 A = R11; 362 D = R12; 363 } else if (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22) { 364 A = R12; 365 D = R11; 366 } else { 367 return None; 368 } 369 E = R2; 370 R1 = nullptr; 371 Ok = true; 372 } else { 373 if (!match(R1, m_And(m_Value(R11), m_Value(R12)))) { 374 // As before, model no mask as a trivial mask if it'll let us do an 375 // optimization. 376 R11 = R1; 377 R12 = Constant::getAllOnesValue(R1->getType()); 378 } 379 380 if (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22) { 381 A = R11; 382 D = R12; 383 E = R2; 384 Ok = true; 385 } else if (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22) { 386 A = R12; 387 D = R11; 388 E = R2; 389 Ok = true; 390 } 391 } 392 393 // Bail if RHS was a icmp that can't be decomposed into an equality. 394 if (!ICmpInst::isEquality(PredR)) 395 return None; 396 397 // Look for ANDs on the right side of the RHS icmp. 398 if (!Ok) { 399 if (!match(R2, m_And(m_Value(R11), m_Value(R12)))) { 400 R11 = R2; 401 R12 = Constant::getAllOnesValue(R2->getType()); 402 } 403 404 if (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22) { 405 A = R11; 406 D = R12; 407 E = R1; 408 Ok = true; 409 } else if (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22) { 410 A = R12; 411 D = R11; 412 E = R1; 413 Ok = true; 414 } else { 415 return None; 416 } 417 } 418 if (!Ok) 419 return None; 420 421 if (L11 == A) { 422 B = L12; 423 C = L2; 424 } else if (L12 == A) { 425 B = L11; 426 C = L2; 427 } else if (L21 == A) { 428 B = L22; 429 C = L1; 430 } else if (L22 == A) { 431 B = L21; 432 C = L1; 433 } 434 435 unsigned LeftType = getMaskedICmpType(A, B, C, PredL); 436 unsigned RightType = getMaskedICmpType(A, D, E, PredR); 437 return Optional<std::pair<unsigned, unsigned>>(std::make_pair(LeftType, RightType)); 438 } 439 440 /// Try to fold (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E) into a single 441 /// (icmp(A & X) ==/!= Y), where the left-hand side is of type Mask_NotAllZeros 442 /// and the right hand side is of type BMask_Mixed. For example, 443 /// (icmp (A & 12) != 0) & (icmp (A & 15) == 8) -> (icmp (A & 15) == 8). 444 static Value * foldLogOpOfMaskedICmps_NotAllZeros_BMask_Mixed( 445 ICmpInst *LHS, ICmpInst *RHS, bool IsAnd, 446 Value *A, Value *B, Value *C, Value *D, Value *E, 447 ICmpInst::Predicate PredL, ICmpInst::Predicate PredR, 448 llvm::InstCombiner::BuilderTy &Builder) { 449 // We are given the canonical form: 450 // (icmp ne (A & B), 0) & (icmp eq (A & D), E). 451 // where D & E == E. 452 // 453 // If IsAnd is false, we get it in negated form: 454 // (icmp eq (A & B), 0) | (icmp ne (A & D), E) -> 455 // !((icmp ne (A & B), 0) & (icmp eq (A & D), E)). 456 // 457 // We currently handle the case of B, C, D, E are constant. 458 // 459 ConstantInt *BCst = dyn_cast<ConstantInt>(B); 460 if (!BCst) 461 return nullptr; 462 ConstantInt *CCst = dyn_cast<ConstantInt>(C); 463 if (!CCst) 464 return nullptr; 465 ConstantInt *DCst = dyn_cast<ConstantInt>(D); 466 if (!DCst) 467 return nullptr; 468 ConstantInt *ECst = dyn_cast<ConstantInt>(E); 469 if (!ECst) 470 return nullptr; 471 472 ICmpInst::Predicate NewCC = IsAnd ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE; 473 474 // Update E to the canonical form when D is a power of two and RHS is 475 // canonicalized as, 476 // (icmp ne (A & D), 0) -> (icmp eq (A & D), D) or 477 // (icmp ne (A & D), D) -> (icmp eq (A & D), 0). 478 if (PredR != NewCC) 479 ECst = cast<ConstantInt>(ConstantExpr::getXor(DCst, ECst)); 480 481 // If B or D is zero, skip because if LHS or RHS can be trivially folded by 482 // other folding rules and this pattern won't apply any more. 483 if (BCst->getValue() == 0 || DCst->getValue() == 0) 484 return nullptr; 485 486 // If B and D don't intersect, ie. (B & D) == 0, no folding because we can't 487 // deduce anything from it. 488 // For example, 489 // (icmp ne (A & 12), 0) & (icmp eq (A & 3), 1) -> no folding. 490 if ((BCst->getValue() & DCst->getValue()) == 0) 491 return nullptr; 492 493 // If the following two conditions are met: 494 // 495 // 1. mask B covers only a single bit that's not covered by mask D, that is, 496 // (B & (B ^ D)) is a power of 2 (in other words, B minus the intersection of 497 // B and D has only one bit set) and, 498 // 499 // 2. RHS (and E) indicates that the rest of B's bits are zero (in other 500 // words, the intersection of B and D is zero), that is, ((B & D) & E) == 0 501 // 502 // then that single bit in B must be one and thus the whole expression can be 503 // folded to 504 // (A & (B | D)) == (B & (B ^ D)) | E. 505 // 506 // For example, 507 // (icmp ne (A & 12), 0) & (icmp eq (A & 7), 1) -> (icmp eq (A & 15), 9) 508 // (icmp ne (A & 15), 0) & (icmp eq (A & 7), 0) -> (icmp eq (A & 15), 8) 509 if ((((BCst->getValue() & DCst->getValue()) & ECst->getValue()) == 0) && 510 (BCst->getValue() & (BCst->getValue() ^ DCst->getValue())).isPowerOf2()) { 511 APInt BorD = BCst->getValue() | DCst->getValue(); 512 APInt BandBxorDorE = (BCst->getValue() & (BCst->getValue() ^ DCst->getValue())) | 513 ECst->getValue(); 514 Value *NewMask = ConstantInt::get(BCst->getType(), BorD); 515 Value *NewMaskedValue = ConstantInt::get(BCst->getType(), BandBxorDorE); 516 Value *NewAnd = Builder.CreateAnd(A, NewMask); 517 return Builder.CreateICmp(NewCC, NewAnd, NewMaskedValue); 518 } 519 520 auto IsSubSetOrEqual = [](ConstantInt *C1, ConstantInt *C2) { 521 return (C1->getValue() & C2->getValue()) == C1->getValue(); 522 }; 523 auto IsSuperSetOrEqual = [](ConstantInt *C1, ConstantInt *C2) { 524 return (C1->getValue() & C2->getValue()) == C2->getValue(); 525 }; 526 527 // In the following, we consider only the cases where B is a superset of D, B 528 // is a subset of D, or B == D because otherwise there's at least one bit 529 // covered by B but not D, in which case we can't deduce much from it, so 530 // no folding (aside from the single must-be-one bit case right above.) 531 // For example, 532 // (icmp ne (A & 14), 0) & (icmp eq (A & 3), 1) -> no folding. 533 if (!IsSubSetOrEqual(BCst, DCst) && !IsSuperSetOrEqual(BCst, DCst)) 534 return nullptr; 535 536 // At this point, either B is a superset of D, B is a subset of D or B == D. 537 538 // If E is zero, if B is a subset of (or equal to) D, LHS and RHS contradict 539 // and the whole expression becomes false (or true if negated), otherwise, no 540 // folding. 541 // For example, 542 // (icmp ne (A & 3), 0) & (icmp eq (A & 7), 0) -> false. 543 // (icmp ne (A & 15), 0) & (icmp eq (A & 3), 0) -> no folding. 544 if (ECst->isZero()) { 545 if (IsSubSetOrEqual(BCst, DCst)) 546 return ConstantInt::get(LHS->getType(), !IsAnd); 547 return nullptr; 548 } 549 550 // At this point, B, D, E aren't zero and (B & D) == B, (B & D) == D or B == 551 // D. If B is a superset of (or equal to) D, since E is not zero, LHS is 552 // subsumed by RHS (RHS implies LHS.) So the whole expression becomes 553 // RHS. For example, 554 // (icmp ne (A & 255), 0) & (icmp eq (A & 15), 8) -> (icmp eq (A & 15), 8). 555 // (icmp ne (A & 15), 0) & (icmp eq (A & 15), 8) -> (icmp eq (A & 15), 8). 556 if (IsSuperSetOrEqual(BCst, DCst)) 557 return RHS; 558 // Otherwise, B is a subset of D. If B and E have a common bit set, 559 // ie. (B & E) != 0, then LHS is subsumed by RHS. For example. 560 // (icmp ne (A & 12), 0) & (icmp eq (A & 15), 8) -> (icmp eq (A & 15), 8). 561 assert(IsSubSetOrEqual(BCst, DCst) && "Precondition due to above code"); 562 if ((BCst->getValue() & ECst->getValue()) != 0) 563 return RHS; 564 // Otherwise, LHS and RHS contradict and the whole expression becomes false 565 // (or true if negated.) For example, 566 // (icmp ne (A & 7), 0) & (icmp eq (A & 15), 8) -> false. 567 // (icmp ne (A & 6), 0) & (icmp eq (A & 15), 8) -> false. 568 return ConstantInt::get(LHS->getType(), !IsAnd); 569 } 570 571 /// Try to fold (icmp(A & B) ==/!= 0) &/| (icmp(A & D) ==/!= E) into a single 572 /// (icmp(A & X) ==/!= Y), where the left-hand side and the right hand side 573 /// aren't of the common mask pattern type. 574 static Value *foldLogOpOfMaskedICmpsAsymmetric( 575 ICmpInst *LHS, ICmpInst *RHS, bool IsAnd, 576 Value *A, Value *B, Value *C, Value *D, Value *E, 577 ICmpInst::Predicate PredL, ICmpInst::Predicate PredR, 578 unsigned LHSMask, unsigned RHSMask, 579 llvm::InstCombiner::BuilderTy &Builder) { 580 assert(ICmpInst::isEquality(PredL) && ICmpInst::isEquality(PredR) && 581 "Expected equality predicates for masked type of icmps."); 582 // Handle Mask_NotAllZeros-BMask_Mixed cases. 583 // (icmp ne/eq (A & B), C) &/| (icmp eq/ne (A & D), E), or 584 // (icmp eq/ne (A & B), C) &/| (icmp ne/eq (A & D), E) 585 // which gets swapped to 586 // (icmp ne/eq (A & D), E) &/| (icmp eq/ne (A & B), C). 587 if (!IsAnd) { 588 LHSMask = conjugateICmpMask(LHSMask); 589 RHSMask = conjugateICmpMask(RHSMask); 590 } 591 if ((LHSMask & Mask_NotAllZeros) && (RHSMask & BMask_Mixed)) { 592 if (Value *V = foldLogOpOfMaskedICmps_NotAllZeros_BMask_Mixed( 593 LHS, RHS, IsAnd, A, B, C, D, E, 594 PredL, PredR, Builder)) { 595 return V; 596 } 597 } else if ((LHSMask & BMask_Mixed) && (RHSMask & Mask_NotAllZeros)) { 598 if (Value *V = foldLogOpOfMaskedICmps_NotAllZeros_BMask_Mixed( 599 RHS, LHS, IsAnd, A, D, E, B, C, 600 PredR, PredL, Builder)) { 601 return V; 602 } 603 } 604 return nullptr; 605 } 606 607 /// Try to fold (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E) 608 /// into a single (icmp(A & X) ==/!= Y). 609 static Value *foldLogOpOfMaskedICmps(ICmpInst *LHS, ICmpInst *RHS, bool IsAnd, 610 llvm::InstCombiner::BuilderTy &Builder) { 611 Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr, *E = nullptr; 612 ICmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate(); 613 Optional<std::pair<unsigned, unsigned>> MaskPair = 614 getMaskedTypeForICmpPair(A, B, C, D, E, LHS, RHS, PredL, PredR); 615 if (!MaskPair) 616 return nullptr; 617 assert(ICmpInst::isEquality(PredL) && ICmpInst::isEquality(PredR) && 618 "Expected equality predicates for masked type of icmps."); 619 unsigned LHSMask = MaskPair->first; 620 unsigned RHSMask = MaskPair->second; 621 unsigned Mask = LHSMask & RHSMask; 622 if (Mask == 0) { 623 // Even if the two sides don't share a common pattern, check if folding can 624 // still happen. 625 if (Value *V = foldLogOpOfMaskedICmpsAsymmetric( 626 LHS, RHS, IsAnd, A, B, C, D, E, PredL, PredR, LHSMask, RHSMask, 627 Builder)) 628 return V; 629 return nullptr; 630 } 631 632 // In full generality: 633 // (icmp (A & B) Op C) | (icmp (A & D) Op E) 634 // == ![ (icmp (A & B) !Op C) & (icmp (A & D) !Op E) ] 635 // 636 // If the latter can be converted into (icmp (A & X) Op Y) then the former is 637 // equivalent to (icmp (A & X) !Op Y). 638 // 639 // Therefore, we can pretend for the rest of this function that we're dealing 640 // with the conjunction, provided we flip the sense of any comparisons (both 641 // input and output). 642 643 // In most cases we're going to produce an EQ for the "&&" case. 644 ICmpInst::Predicate NewCC = IsAnd ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE; 645 if (!IsAnd) { 646 // Convert the masking analysis into its equivalent with negated 647 // comparisons. 648 Mask = conjugateICmpMask(Mask); 649 } 650 651 if (Mask & Mask_AllZeros) { 652 // (icmp eq (A & B), 0) & (icmp eq (A & D), 0) 653 // -> (icmp eq (A & (B|D)), 0) 654 Value *NewOr = Builder.CreateOr(B, D); 655 Value *NewAnd = Builder.CreateAnd(A, NewOr); 656 // We can't use C as zero because we might actually handle 657 // (icmp ne (A & B), B) & (icmp ne (A & D), D) 658 // with B and D, having a single bit set. 659 Value *Zero = Constant::getNullValue(A->getType()); 660 return Builder.CreateICmp(NewCC, NewAnd, Zero); 661 } 662 if (Mask & BMask_AllOnes) { 663 // (icmp eq (A & B), B) & (icmp eq (A & D), D) 664 // -> (icmp eq (A & (B|D)), (B|D)) 665 Value *NewOr = Builder.CreateOr(B, D); 666 Value *NewAnd = Builder.CreateAnd(A, NewOr); 667 return Builder.CreateICmp(NewCC, NewAnd, NewOr); 668 } 669 if (Mask & AMask_AllOnes) { 670 // (icmp eq (A & B), A) & (icmp eq (A & D), A) 671 // -> (icmp eq (A & (B&D)), A) 672 Value *NewAnd1 = Builder.CreateAnd(B, D); 673 Value *NewAnd2 = Builder.CreateAnd(A, NewAnd1); 674 return Builder.CreateICmp(NewCC, NewAnd2, A); 675 } 676 677 // Remaining cases assume at least that B and D are constant, and depend on 678 // their actual values. This isn't strictly necessary, just a "handle the 679 // easy cases for now" decision. 680 ConstantInt *BCst = dyn_cast<ConstantInt>(B); 681 if (!BCst) 682 return nullptr; 683 ConstantInt *DCst = dyn_cast<ConstantInt>(D); 684 if (!DCst) 685 return nullptr; 686 687 if (Mask & (Mask_NotAllZeros | BMask_NotAllOnes)) { 688 // (icmp ne (A & B), 0) & (icmp ne (A & D), 0) and 689 // (icmp ne (A & B), B) & (icmp ne (A & D), D) 690 // -> (icmp ne (A & B), 0) or (icmp ne (A & D), 0) 691 // Only valid if one of the masks is a superset of the other (check "B&D" is 692 // the same as either B or D). 693 APInt NewMask = BCst->getValue() & DCst->getValue(); 694 695 if (NewMask == BCst->getValue()) 696 return LHS; 697 else if (NewMask == DCst->getValue()) 698 return RHS; 699 } 700 701 if (Mask & AMask_NotAllOnes) { 702 // (icmp ne (A & B), B) & (icmp ne (A & D), D) 703 // -> (icmp ne (A & B), A) or (icmp ne (A & D), A) 704 // Only valid if one of the masks is a superset of the other (check "B|D" is 705 // the same as either B or D). 706 APInt NewMask = BCst->getValue() | DCst->getValue(); 707 708 if (NewMask == BCst->getValue()) 709 return LHS; 710 else if (NewMask == DCst->getValue()) 711 return RHS; 712 } 713 714 if (Mask & BMask_Mixed) { 715 // (icmp eq (A & B), C) & (icmp eq (A & D), E) 716 // We already know that B & C == C && D & E == E. 717 // If we can prove that (B & D) & (C ^ E) == 0, that is, the bits of 718 // C and E, which are shared by both the mask B and the mask D, don't 719 // contradict, then we can transform to 720 // -> (icmp eq (A & (B|D)), (C|E)) 721 // Currently, we only handle the case of B, C, D, and E being constant. 722 // We can't simply use C and E because we might actually handle 723 // (icmp ne (A & B), B) & (icmp eq (A & D), D) 724 // with B and D, having a single bit set. 725 ConstantInt *CCst = dyn_cast<ConstantInt>(C); 726 if (!CCst) 727 return nullptr; 728 ConstantInt *ECst = dyn_cast<ConstantInt>(E); 729 if (!ECst) 730 return nullptr; 731 if (PredL != NewCC) 732 CCst = cast<ConstantInt>(ConstantExpr::getXor(BCst, CCst)); 733 if (PredR != NewCC) 734 ECst = cast<ConstantInt>(ConstantExpr::getXor(DCst, ECst)); 735 736 // If there is a conflict, we should actually return a false for the 737 // whole construct. 738 if (((BCst->getValue() & DCst->getValue()) & 739 (CCst->getValue() ^ ECst->getValue())).getBoolValue()) 740 return ConstantInt::get(LHS->getType(), !IsAnd); 741 742 Value *NewOr1 = Builder.CreateOr(B, D); 743 Value *NewOr2 = ConstantExpr::getOr(CCst, ECst); 744 Value *NewAnd = Builder.CreateAnd(A, NewOr1); 745 return Builder.CreateICmp(NewCC, NewAnd, NewOr2); 746 } 747 748 return nullptr; 749 } 750 751 /// Try to fold a signed range checked with lower bound 0 to an unsigned icmp. 752 /// Example: (icmp sge x, 0) & (icmp slt x, n) --> icmp ult x, n 753 /// If \p Inverted is true then the check is for the inverted range, e.g. 754 /// (icmp slt x, 0) | (icmp sgt x, n) --> icmp ugt x, n 755 Value *InstCombiner::simplifyRangeCheck(ICmpInst *Cmp0, ICmpInst *Cmp1, 756 bool Inverted) { 757 // Check the lower range comparison, e.g. x >= 0 758 // InstCombine already ensured that if there is a constant it's on the RHS. 759 ConstantInt *RangeStart = dyn_cast<ConstantInt>(Cmp0->getOperand(1)); 760 if (!RangeStart) 761 return nullptr; 762 763 ICmpInst::Predicate Pred0 = (Inverted ? Cmp0->getInversePredicate() : 764 Cmp0->getPredicate()); 765 766 // Accept x > -1 or x >= 0 (after potentially inverting the predicate). 767 if (!((Pred0 == ICmpInst::ICMP_SGT && RangeStart->isMinusOne()) || 768 (Pred0 == ICmpInst::ICMP_SGE && RangeStart->isZero()))) 769 return nullptr; 770 771 ICmpInst::Predicate Pred1 = (Inverted ? Cmp1->getInversePredicate() : 772 Cmp1->getPredicate()); 773 774 Value *Input = Cmp0->getOperand(0); 775 Value *RangeEnd; 776 if (Cmp1->getOperand(0) == Input) { 777 // For the upper range compare we have: icmp x, n 778 RangeEnd = Cmp1->getOperand(1); 779 } else if (Cmp1->getOperand(1) == Input) { 780 // For the upper range compare we have: icmp n, x 781 RangeEnd = Cmp1->getOperand(0); 782 Pred1 = ICmpInst::getSwappedPredicate(Pred1); 783 } else { 784 return nullptr; 785 } 786 787 // Check the upper range comparison, e.g. x < n 788 ICmpInst::Predicate NewPred; 789 switch (Pred1) { 790 case ICmpInst::ICMP_SLT: NewPred = ICmpInst::ICMP_ULT; break; 791 case ICmpInst::ICMP_SLE: NewPred = ICmpInst::ICMP_ULE; break; 792 default: return nullptr; 793 } 794 795 // This simplification is only valid if the upper range is not negative. 796 KnownBits Known = computeKnownBits(RangeEnd, /*Depth=*/0, Cmp1); 797 if (!Known.isNonNegative()) 798 return nullptr; 799 800 if (Inverted) 801 NewPred = ICmpInst::getInversePredicate(NewPred); 802 803 return Builder.CreateICmp(NewPred, Input, RangeEnd); 804 } 805 806 static Value * 807 foldAndOrOfEqualityCmpsWithConstants(ICmpInst *LHS, ICmpInst *RHS, 808 bool JoinedByAnd, 809 InstCombiner::BuilderTy &Builder) { 810 Value *X = LHS->getOperand(0); 811 if (X != RHS->getOperand(0)) 812 return nullptr; 813 814 const APInt *C1, *C2; 815 if (!match(LHS->getOperand(1), m_APInt(C1)) || 816 !match(RHS->getOperand(1), m_APInt(C2))) 817 return nullptr; 818 819 // We only handle (X != C1 && X != C2) and (X == C1 || X == C2). 820 ICmpInst::Predicate Pred = LHS->getPredicate(); 821 if (Pred != RHS->getPredicate()) 822 return nullptr; 823 if (JoinedByAnd && Pred != ICmpInst::ICMP_NE) 824 return nullptr; 825 if (!JoinedByAnd && Pred != ICmpInst::ICMP_EQ) 826 return nullptr; 827 828 // The larger unsigned constant goes on the right. 829 if (C1->ugt(*C2)) 830 std::swap(C1, C2); 831 832 APInt Xor = *C1 ^ *C2; 833 if (Xor.isPowerOf2()) { 834 // If LHSC and RHSC differ by only one bit, then set that bit in X and 835 // compare against the larger constant: 836 // (X == C1 || X == C2) --> (X | (C1 ^ C2)) == C2 837 // (X != C1 && X != C2) --> (X | (C1 ^ C2)) != C2 838 // We choose an 'or' with a Pow2 constant rather than the inverse mask with 839 // 'and' because that may lead to smaller codegen from a smaller constant. 840 Value *Or = Builder.CreateOr(X, ConstantInt::get(X->getType(), Xor)); 841 return Builder.CreateICmp(Pred, Or, ConstantInt::get(X->getType(), *C2)); 842 } 843 844 // Special case: get the ordering right when the values wrap around zero. 845 // Ie, we assumed the constants were unsigned when swapping earlier. 846 if (C1->isNullValue() && C2->isAllOnesValue()) 847 std::swap(C1, C2); 848 849 if (*C1 == *C2 - 1) { 850 // (X == 13 || X == 14) --> X - 13 <=u 1 851 // (X != 13 && X != 14) --> X - 13 >u 1 852 // An 'add' is the canonical IR form, so favor that over a 'sub'. 853 Value *Add = Builder.CreateAdd(X, ConstantInt::get(X->getType(), -(*C1))); 854 auto NewPred = JoinedByAnd ? ICmpInst::ICMP_UGT : ICmpInst::ICMP_ULE; 855 return Builder.CreateICmp(NewPred, Add, ConstantInt::get(X->getType(), 1)); 856 } 857 858 return nullptr; 859 } 860 861 // Fold (iszero(A & K1) | iszero(A & K2)) -> (A & (K1 | K2)) != (K1 | K2) 862 // Fold (!iszero(A & K1) & !iszero(A & K2)) -> (A & (K1 | K2)) == (K1 | K2) 863 Value *InstCombiner::foldAndOrOfICmpsOfAndWithPow2(ICmpInst *LHS, ICmpInst *RHS, 864 bool JoinedByAnd, 865 Instruction &CxtI) { 866 ICmpInst::Predicate Pred = LHS->getPredicate(); 867 if (Pred != RHS->getPredicate()) 868 return nullptr; 869 if (JoinedByAnd && Pred != ICmpInst::ICMP_NE) 870 return nullptr; 871 if (!JoinedByAnd && Pred != ICmpInst::ICMP_EQ) 872 return nullptr; 873 874 // TODO support vector splats 875 ConstantInt *LHSC = dyn_cast<ConstantInt>(LHS->getOperand(1)); 876 ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS->getOperand(1)); 877 if (!LHSC || !RHSC || !LHSC->isZero() || !RHSC->isZero()) 878 return nullptr; 879 880 Value *A, *B, *C, *D; 881 if (match(LHS->getOperand(0), m_And(m_Value(A), m_Value(B))) && 882 match(RHS->getOperand(0), m_And(m_Value(C), m_Value(D)))) { 883 if (A == D || B == D) 884 std::swap(C, D); 885 if (B == C) 886 std::swap(A, B); 887 888 if (A == C && 889 isKnownToBeAPowerOfTwo(B, false, 0, &CxtI) && 890 isKnownToBeAPowerOfTwo(D, false, 0, &CxtI)) { 891 Value *Mask = Builder.CreateOr(B, D); 892 Value *Masked = Builder.CreateAnd(A, Mask); 893 auto NewPred = JoinedByAnd ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE; 894 return Builder.CreateICmp(NewPred, Masked, Mask); 895 } 896 } 897 898 return nullptr; 899 } 900 901 /// Fold (icmp)&(icmp) if possible. 902 Value *InstCombiner::foldAndOfICmps(ICmpInst *LHS, ICmpInst *RHS, 903 Instruction &CxtI) { 904 // Fold (!iszero(A & K1) & !iszero(A & K2)) -> (A & (K1 | K2)) == (K1 | K2) 905 // if K1 and K2 are a one-bit mask. 906 if (Value *V = foldAndOrOfICmpsOfAndWithPow2(LHS, RHS, true, CxtI)) 907 return V; 908 909 ICmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate(); 910 911 // (icmp1 A, B) & (icmp2 A, B) --> (icmp3 A, B) 912 if (PredicatesFoldable(PredL, PredR)) { 913 if (LHS->getOperand(0) == RHS->getOperand(1) && 914 LHS->getOperand(1) == RHS->getOperand(0)) 915 LHS->swapOperands(); 916 if (LHS->getOperand(0) == RHS->getOperand(0) && 917 LHS->getOperand(1) == RHS->getOperand(1)) { 918 Value *Op0 = LHS->getOperand(0), *Op1 = LHS->getOperand(1); 919 unsigned Code = getICmpCode(LHS) & getICmpCode(RHS); 920 bool isSigned = LHS->isSigned() || RHS->isSigned(); 921 return getNewICmpValue(isSigned, Code, Op0, Op1, Builder); 922 } 923 } 924 925 // handle (roughly): (icmp eq (A & B), C) & (icmp eq (A & D), E) 926 if (Value *V = foldLogOpOfMaskedICmps(LHS, RHS, true, Builder)) 927 return V; 928 929 // E.g. (icmp sge x, 0) & (icmp slt x, n) --> icmp ult x, n 930 if (Value *V = simplifyRangeCheck(LHS, RHS, /*Inverted=*/false)) 931 return V; 932 933 // E.g. (icmp slt x, n) & (icmp sge x, 0) --> icmp ult x, n 934 if (Value *V = simplifyRangeCheck(RHS, LHS, /*Inverted=*/false)) 935 return V; 936 937 if (Value *V = foldAndOrOfEqualityCmpsWithConstants(LHS, RHS, true, Builder)) 938 return V; 939 940 // This only handles icmp of constants: (icmp1 A, C1) & (icmp2 B, C2). 941 Value *LHS0 = LHS->getOperand(0), *RHS0 = RHS->getOperand(0); 942 ConstantInt *LHSC = dyn_cast<ConstantInt>(LHS->getOperand(1)); 943 ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS->getOperand(1)); 944 if (!LHSC || !RHSC) 945 return nullptr; 946 947 if (LHSC == RHSC && PredL == PredR) { 948 // (icmp ult A, C) & (icmp ult B, C) --> (icmp ult (A|B), C) 949 // where C is a power of 2 or 950 // (icmp eq A, 0) & (icmp eq B, 0) --> (icmp eq (A|B), 0) 951 if ((PredL == ICmpInst::ICMP_ULT && LHSC->getValue().isPowerOf2()) || 952 (PredL == ICmpInst::ICMP_EQ && LHSC->isZero())) { 953 Value *NewOr = Builder.CreateOr(LHS0, RHS0); 954 return Builder.CreateICmp(PredL, NewOr, LHSC); 955 } 956 } 957 958 // (trunc x) == C1 & (and x, CA) == C2 -> (and x, CA|CMAX) == C1|C2 959 // where CMAX is the all ones value for the truncated type, 960 // iff the lower bits of C2 and CA are zero. 961 if (PredL == ICmpInst::ICMP_EQ && PredL == PredR && LHS->hasOneUse() && 962 RHS->hasOneUse()) { 963 Value *V; 964 ConstantInt *AndC, *SmallC = nullptr, *BigC = nullptr; 965 966 // (trunc x) == C1 & (and x, CA) == C2 967 // (and x, CA) == C2 & (trunc x) == C1 968 if (match(RHS0, m_Trunc(m_Value(V))) && 969 match(LHS0, m_And(m_Specific(V), m_ConstantInt(AndC)))) { 970 SmallC = RHSC; 971 BigC = LHSC; 972 } else if (match(LHS0, m_Trunc(m_Value(V))) && 973 match(RHS0, m_And(m_Specific(V), m_ConstantInt(AndC)))) { 974 SmallC = LHSC; 975 BigC = RHSC; 976 } 977 978 if (SmallC && BigC) { 979 unsigned BigBitSize = BigC->getType()->getBitWidth(); 980 unsigned SmallBitSize = SmallC->getType()->getBitWidth(); 981 982 // Check that the low bits are zero. 983 APInt Low = APInt::getLowBitsSet(BigBitSize, SmallBitSize); 984 if ((Low & AndC->getValue()).isNullValue() && 985 (Low & BigC->getValue()).isNullValue()) { 986 Value *NewAnd = Builder.CreateAnd(V, Low | AndC->getValue()); 987 APInt N = SmallC->getValue().zext(BigBitSize) | BigC->getValue(); 988 Value *NewVal = ConstantInt::get(AndC->getType()->getContext(), N); 989 return Builder.CreateICmp(PredL, NewAnd, NewVal); 990 } 991 } 992 } 993 994 // From here on, we only handle: 995 // (icmp1 A, C1) & (icmp2 A, C2) --> something simpler. 996 if (LHS0 != RHS0) 997 return nullptr; 998 999 // ICMP_[US][GL]E X, C is folded to ICMP_[US][GL]T elsewhere. 1000 if (PredL == ICmpInst::ICMP_UGE || PredL == ICmpInst::ICMP_ULE || 1001 PredR == ICmpInst::ICMP_UGE || PredR == ICmpInst::ICMP_ULE || 1002 PredL == ICmpInst::ICMP_SGE || PredL == ICmpInst::ICMP_SLE || 1003 PredR == ICmpInst::ICMP_SGE || PredR == ICmpInst::ICMP_SLE) 1004 return nullptr; 1005 1006 // We can't fold (ugt x, C) & (sgt x, C2). 1007 if (!PredicatesFoldable(PredL, PredR)) 1008 return nullptr; 1009 1010 // Ensure that the larger constant is on the RHS. 1011 bool ShouldSwap; 1012 if (CmpInst::isSigned(PredL) || 1013 (ICmpInst::isEquality(PredL) && CmpInst::isSigned(PredR))) 1014 ShouldSwap = LHSC->getValue().sgt(RHSC->getValue()); 1015 else 1016 ShouldSwap = LHSC->getValue().ugt(RHSC->getValue()); 1017 1018 if (ShouldSwap) { 1019 std::swap(LHS, RHS); 1020 std::swap(LHSC, RHSC); 1021 std::swap(PredL, PredR); 1022 } 1023 1024 // At this point, we know we have two icmp instructions 1025 // comparing a value against two constants and and'ing the result 1026 // together. Because of the above check, we know that we only have 1027 // icmp eq, icmp ne, icmp [su]lt, and icmp [SU]gt here. We also know 1028 // (from the icmp folding check above), that the two constants 1029 // are not equal and that the larger constant is on the RHS 1030 assert(LHSC != RHSC && "Compares not folded above?"); 1031 1032 switch (PredL) { 1033 default: 1034 llvm_unreachable("Unknown integer condition code!"); 1035 case ICmpInst::ICMP_NE: 1036 switch (PredR) { 1037 default: 1038 llvm_unreachable("Unknown integer condition code!"); 1039 case ICmpInst::ICMP_ULT: 1040 if (LHSC == SubOne(RHSC)) // (X != 13 & X u< 14) -> X < 13 1041 return Builder.CreateICmpULT(LHS0, LHSC); 1042 if (LHSC->isZero()) // (X != 0 & X u< 14) -> X-1 u< 13 1043 return insertRangeTest(LHS0, LHSC->getValue() + 1, RHSC->getValue(), 1044 false, true); 1045 break; // (X != 13 & X u< 15) -> no change 1046 case ICmpInst::ICMP_SLT: 1047 if (LHSC == SubOne(RHSC)) // (X != 13 & X s< 14) -> X < 13 1048 return Builder.CreateICmpSLT(LHS0, LHSC); 1049 break; // (X != 13 & X s< 15) -> no change 1050 case ICmpInst::ICMP_NE: 1051 // Potential folds for this case should already be handled. 1052 break; 1053 } 1054 break; 1055 case ICmpInst::ICMP_UGT: 1056 switch (PredR) { 1057 default: 1058 llvm_unreachable("Unknown integer condition code!"); 1059 case ICmpInst::ICMP_NE: 1060 if (RHSC == AddOne(LHSC)) // (X u> 13 & X != 14) -> X u> 14 1061 return Builder.CreateICmp(PredL, LHS0, RHSC); 1062 break; // (X u> 13 & X != 15) -> no change 1063 case ICmpInst::ICMP_ULT: // (X u> 13 & X u< 15) -> (X-14) <u 1 1064 return insertRangeTest(LHS0, LHSC->getValue() + 1, RHSC->getValue(), 1065 false, true); 1066 } 1067 break; 1068 case ICmpInst::ICMP_SGT: 1069 switch (PredR) { 1070 default: 1071 llvm_unreachable("Unknown integer condition code!"); 1072 case ICmpInst::ICMP_NE: 1073 if (RHSC == AddOne(LHSC)) // (X s> 13 & X != 14) -> X s> 14 1074 return Builder.CreateICmp(PredL, LHS0, RHSC); 1075 break; // (X s> 13 & X != 15) -> no change 1076 case ICmpInst::ICMP_SLT: // (X s> 13 & X s< 15) -> (X-14) s< 1 1077 return insertRangeTest(LHS0, LHSC->getValue() + 1, RHSC->getValue(), true, 1078 true); 1079 } 1080 break; 1081 } 1082 1083 return nullptr; 1084 } 1085 1086 Value *InstCombiner::foldLogicOfFCmps(FCmpInst *LHS, FCmpInst *RHS, bool IsAnd) { 1087 Value *LHS0 = LHS->getOperand(0), *LHS1 = LHS->getOperand(1); 1088 Value *RHS0 = RHS->getOperand(0), *RHS1 = RHS->getOperand(1); 1089 FCmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate(); 1090 1091 if (LHS0 == RHS1 && RHS0 == LHS1) { 1092 // Swap RHS operands to match LHS. 1093 PredR = FCmpInst::getSwappedPredicate(PredR); 1094 std::swap(RHS0, RHS1); 1095 } 1096 1097 // Simplify (fcmp cc0 x, y) & (fcmp cc1 x, y). 1098 // Suppose the relation between x and y is R, where R is one of 1099 // U(1000), L(0100), G(0010) or E(0001), and CC0 and CC1 are the bitmasks for 1100 // testing the desired relations. 1101 // 1102 // Since (R & CC0) and (R & CC1) are either R or 0, we actually have this: 1103 // bool(R & CC0) && bool(R & CC1) 1104 // = bool((R & CC0) & (R & CC1)) 1105 // = bool(R & (CC0 & CC1)) <= by re-association, commutation, and idempotency 1106 // 1107 // Since (R & CC0) and (R & CC1) are either R or 0, we actually have this: 1108 // bool(R & CC0) || bool(R & CC1) 1109 // = bool((R & CC0) | (R & CC1)) 1110 // = bool(R & (CC0 | CC1)) <= by reversed distribution (contribution? ;) 1111 if (LHS0 == RHS0 && LHS1 == RHS1) { 1112 unsigned FCmpCodeL = getFCmpCode(PredL); 1113 unsigned FCmpCodeR = getFCmpCode(PredR); 1114 unsigned NewPred = IsAnd ? FCmpCodeL & FCmpCodeR : FCmpCodeL | FCmpCodeR; 1115 return getFCmpValue(NewPred, LHS0, LHS1, Builder); 1116 } 1117 1118 if ((PredL == FCmpInst::FCMP_ORD && PredR == FCmpInst::FCMP_ORD && IsAnd) || 1119 (PredL == FCmpInst::FCMP_UNO && PredR == FCmpInst::FCMP_UNO && !IsAnd)) { 1120 if (LHS0->getType() != RHS0->getType()) 1121 return nullptr; 1122 1123 // FCmp canonicalization ensures that (fcmp ord/uno X, X) and 1124 // (fcmp ord/uno X, C) will be transformed to (fcmp X, +0.0). 1125 if (match(LHS1, m_PosZeroFP()) && match(RHS1, m_PosZeroFP())) 1126 // Ignore the constants because they are obviously not NANs: 1127 // (fcmp ord x, 0.0) & (fcmp ord y, 0.0) -> (fcmp ord x, y) 1128 // (fcmp uno x, 0.0) | (fcmp uno y, 0.0) -> (fcmp uno x, y) 1129 return Builder.CreateFCmp(PredL, LHS0, RHS0); 1130 } 1131 1132 return nullptr; 1133 } 1134 1135 /// Match De Morgan's Laws: 1136 /// (~A & ~B) == (~(A | B)) 1137 /// (~A | ~B) == (~(A & B)) 1138 static Instruction *matchDeMorgansLaws(BinaryOperator &I, 1139 InstCombiner::BuilderTy &Builder) { 1140 auto Opcode = I.getOpcode(); 1141 assert((Opcode == Instruction::And || Opcode == Instruction::Or) && 1142 "Trying to match De Morgan's Laws with something other than and/or"); 1143 1144 // Flip the logic operation. 1145 Opcode = (Opcode == Instruction::And) ? Instruction::Or : Instruction::And; 1146 1147 Value *A, *B; 1148 if (match(I.getOperand(0), m_OneUse(m_Not(m_Value(A)))) && 1149 match(I.getOperand(1), m_OneUse(m_Not(m_Value(B)))) && 1150 !IsFreeToInvert(A, A->hasOneUse()) && 1151 !IsFreeToInvert(B, B->hasOneUse())) { 1152 Value *AndOr = Builder.CreateBinOp(Opcode, A, B, I.getName() + ".demorgan"); 1153 return BinaryOperator::CreateNot(AndOr); 1154 } 1155 1156 return nullptr; 1157 } 1158 1159 bool InstCombiner::shouldOptimizeCast(CastInst *CI) { 1160 Value *CastSrc = CI->getOperand(0); 1161 1162 // Noop casts and casts of constants should be eliminated trivially. 1163 if (CI->getSrcTy() == CI->getDestTy() || isa<Constant>(CastSrc)) 1164 return false; 1165 1166 // If this cast is paired with another cast that can be eliminated, we prefer 1167 // to have it eliminated. 1168 if (const auto *PrecedingCI = dyn_cast<CastInst>(CastSrc)) 1169 if (isEliminableCastPair(PrecedingCI, CI)) 1170 return false; 1171 1172 return true; 1173 } 1174 1175 /// Fold {and,or,xor} (cast X), C. 1176 static Instruction *foldLogicCastConstant(BinaryOperator &Logic, CastInst *Cast, 1177 InstCombiner::BuilderTy &Builder) { 1178 Constant *C = dyn_cast<Constant>(Logic.getOperand(1)); 1179 if (!C) 1180 return nullptr; 1181 1182 auto LogicOpc = Logic.getOpcode(); 1183 Type *DestTy = Logic.getType(); 1184 Type *SrcTy = Cast->getSrcTy(); 1185 1186 // Move the logic operation ahead of a zext or sext if the constant is 1187 // unchanged in the smaller source type. Performing the logic in a smaller 1188 // type may provide more information to later folds, and the smaller logic 1189 // instruction may be cheaper (particularly in the case of vectors). 1190 Value *X; 1191 if (match(Cast, m_OneUse(m_ZExt(m_Value(X))))) { 1192 Constant *TruncC = ConstantExpr::getTrunc(C, SrcTy); 1193 Constant *ZextTruncC = ConstantExpr::getZExt(TruncC, DestTy); 1194 if (ZextTruncC == C) { 1195 // LogicOpc (zext X), C --> zext (LogicOpc X, C) 1196 Value *NewOp = Builder.CreateBinOp(LogicOpc, X, TruncC); 1197 return new ZExtInst(NewOp, DestTy); 1198 } 1199 } 1200 1201 if (match(Cast, m_OneUse(m_SExt(m_Value(X))))) { 1202 Constant *TruncC = ConstantExpr::getTrunc(C, SrcTy); 1203 Constant *SextTruncC = ConstantExpr::getSExt(TruncC, DestTy); 1204 if (SextTruncC == C) { 1205 // LogicOpc (sext X), C --> sext (LogicOpc X, C) 1206 Value *NewOp = Builder.CreateBinOp(LogicOpc, X, TruncC); 1207 return new SExtInst(NewOp, DestTy); 1208 } 1209 } 1210 1211 return nullptr; 1212 } 1213 1214 /// Fold {and,or,xor} (cast X), Y. 1215 Instruction *InstCombiner::foldCastedBitwiseLogic(BinaryOperator &I) { 1216 auto LogicOpc = I.getOpcode(); 1217 assert(I.isBitwiseLogicOp() && "Unexpected opcode for bitwise logic folding"); 1218 1219 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 1220 CastInst *Cast0 = dyn_cast<CastInst>(Op0); 1221 if (!Cast0) 1222 return nullptr; 1223 1224 // This must be a cast from an integer or integer vector source type to allow 1225 // transformation of the logic operation to the source type. 1226 Type *DestTy = I.getType(); 1227 Type *SrcTy = Cast0->getSrcTy(); 1228 if (!SrcTy->isIntOrIntVectorTy()) 1229 return nullptr; 1230 1231 if (Instruction *Ret = foldLogicCastConstant(I, Cast0, Builder)) 1232 return Ret; 1233 1234 CastInst *Cast1 = dyn_cast<CastInst>(Op1); 1235 if (!Cast1) 1236 return nullptr; 1237 1238 // Both operands of the logic operation are casts. The casts must be of the 1239 // same type for reduction. 1240 auto CastOpcode = Cast0->getOpcode(); 1241 if (CastOpcode != Cast1->getOpcode() || SrcTy != Cast1->getSrcTy()) 1242 return nullptr; 1243 1244 Value *Cast0Src = Cast0->getOperand(0); 1245 Value *Cast1Src = Cast1->getOperand(0); 1246 1247 // fold logic(cast(A), cast(B)) -> cast(logic(A, B)) 1248 if (shouldOptimizeCast(Cast0) && shouldOptimizeCast(Cast1)) { 1249 Value *NewOp = Builder.CreateBinOp(LogicOpc, Cast0Src, Cast1Src, 1250 I.getName()); 1251 return CastInst::Create(CastOpcode, NewOp, DestTy); 1252 } 1253 1254 // For now, only 'and'/'or' have optimizations after this. 1255 if (LogicOpc == Instruction::Xor) 1256 return nullptr; 1257 1258 // If this is logic(cast(icmp), cast(icmp)), try to fold this even if the 1259 // cast is otherwise not optimizable. This happens for vector sexts. 1260 ICmpInst *ICmp0 = dyn_cast<ICmpInst>(Cast0Src); 1261 ICmpInst *ICmp1 = dyn_cast<ICmpInst>(Cast1Src); 1262 if (ICmp0 && ICmp1) { 1263 Value *Res = LogicOpc == Instruction::And ? foldAndOfICmps(ICmp0, ICmp1, I) 1264 : foldOrOfICmps(ICmp0, ICmp1, I); 1265 if (Res) 1266 return CastInst::Create(CastOpcode, Res, DestTy); 1267 return nullptr; 1268 } 1269 1270 // If this is logic(cast(fcmp), cast(fcmp)), try to fold this even if the 1271 // cast is otherwise not optimizable. This happens for vector sexts. 1272 FCmpInst *FCmp0 = dyn_cast<FCmpInst>(Cast0Src); 1273 FCmpInst *FCmp1 = dyn_cast<FCmpInst>(Cast1Src); 1274 if (FCmp0 && FCmp1) 1275 if (Value *R = foldLogicOfFCmps(FCmp0, FCmp1, LogicOpc == Instruction::And)) 1276 return CastInst::Create(CastOpcode, R, DestTy); 1277 1278 return nullptr; 1279 } 1280 1281 static Instruction *foldAndToXor(BinaryOperator &I, 1282 InstCombiner::BuilderTy &Builder) { 1283 assert(I.getOpcode() == Instruction::And); 1284 Value *Op0 = I.getOperand(0); 1285 Value *Op1 = I.getOperand(1); 1286 Value *A, *B; 1287 1288 // Operand complexity canonicalization guarantees that the 'or' is Op0. 1289 // (A | B) & ~(A & B) --> A ^ B 1290 // (A | B) & ~(B & A) --> A ^ B 1291 if (match(&I, m_BinOp(m_Or(m_Value(A), m_Value(B)), 1292 m_Not(m_c_And(m_Deferred(A), m_Deferred(B)))))) 1293 return BinaryOperator::CreateXor(A, B); 1294 1295 // (A | ~B) & (~A | B) --> ~(A ^ B) 1296 // (A | ~B) & (B | ~A) --> ~(A ^ B) 1297 // (~B | A) & (~A | B) --> ~(A ^ B) 1298 // (~B | A) & (B | ~A) --> ~(A ^ B) 1299 if (Op0->hasOneUse() || Op1->hasOneUse()) 1300 if (match(&I, m_BinOp(m_c_Or(m_Value(A), m_Not(m_Value(B))), 1301 m_c_Or(m_Not(m_Deferred(A)), m_Deferred(B))))) 1302 return BinaryOperator::CreateNot(Builder.CreateXor(A, B)); 1303 1304 return nullptr; 1305 } 1306 1307 static Instruction *foldOrToXor(BinaryOperator &I, 1308 InstCombiner::BuilderTy &Builder) { 1309 assert(I.getOpcode() == Instruction::Or); 1310 Value *Op0 = I.getOperand(0); 1311 Value *Op1 = I.getOperand(1); 1312 Value *A, *B; 1313 1314 // Operand complexity canonicalization guarantees that the 'and' is Op0. 1315 // (A & B) | ~(A | B) --> ~(A ^ B) 1316 // (A & B) | ~(B | A) --> ~(A ^ B) 1317 if (Op0->hasOneUse() || Op1->hasOneUse()) 1318 if (match(Op0, m_And(m_Value(A), m_Value(B))) && 1319 match(Op1, m_Not(m_c_Or(m_Specific(A), m_Specific(B))))) 1320 return BinaryOperator::CreateNot(Builder.CreateXor(A, B)); 1321 1322 // (A & ~B) | (~A & B) --> A ^ B 1323 // (A & ~B) | (B & ~A) --> A ^ B 1324 // (~B & A) | (~A & B) --> A ^ B 1325 // (~B & A) | (B & ~A) --> A ^ B 1326 if (match(Op0, m_c_And(m_Value(A), m_Not(m_Value(B)))) && 1327 match(Op1, m_c_And(m_Not(m_Specific(A)), m_Specific(B)))) 1328 return BinaryOperator::CreateXor(A, B); 1329 1330 return nullptr; 1331 } 1332 1333 /// Return true if a constant shift amount is always less than the specified 1334 /// bit-width. If not, the shift could create poison in the narrower type. 1335 static bool canNarrowShiftAmt(Constant *C, unsigned BitWidth) { 1336 if (auto *ScalarC = dyn_cast<ConstantInt>(C)) 1337 return ScalarC->getZExtValue() < BitWidth; 1338 1339 if (C->getType()->isVectorTy()) { 1340 // Check each element of a constant vector. 1341 unsigned NumElts = C->getType()->getVectorNumElements(); 1342 for (unsigned i = 0; i != NumElts; ++i) { 1343 Constant *Elt = C->getAggregateElement(i); 1344 if (!Elt) 1345 return false; 1346 if (isa<UndefValue>(Elt)) 1347 continue; 1348 auto *CI = dyn_cast<ConstantInt>(Elt); 1349 if (!CI || CI->getZExtValue() >= BitWidth) 1350 return false; 1351 } 1352 return true; 1353 } 1354 1355 // The constant is a constant expression or unknown. 1356 return false; 1357 } 1358 1359 /// Try to use narrower ops (sink zext ops) for an 'and' with binop operand and 1360 /// a common zext operand: and (binop (zext X), C), (zext X). 1361 Instruction *InstCombiner::narrowMaskedBinOp(BinaryOperator &And) { 1362 // This transform could also apply to {or, and, xor}, but there are better 1363 // folds for those cases, so we don't expect those patterns here. AShr is not 1364 // handled because it should always be transformed to LShr in this sequence. 1365 // The subtract transform is different because it has a constant on the left. 1366 // Add/mul commute the constant to RHS; sub with constant RHS becomes add. 1367 Value *Op0 = And.getOperand(0), *Op1 = And.getOperand(1); 1368 Constant *C; 1369 if (!match(Op0, m_OneUse(m_Add(m_Specific(Op1), m_Constant(C)))) && 1370 !match(Op0, m_OneUse(m_Mul(m_Specific(Op1), m_Constant(C)))) && 1371 !match(Op0, m_OneUse(m_LShr(m_Specific(Op1), m_Constant(C)))) && 1372 !match(Op0, m_OneUse(m_Shl(m_Specific(Op1), m_Constant(C)))) && 1373 !match(Op0, m_OneUse(m_Sub(m_Constant(C), m_Specific(Op1))))) 1374 return nullptr; 1375 1376 Value *X; 1377 if (!match(Op1, m_ZExt(m_Value(X))) || Op1->hasNUsesOrMore(3)) 1378 return nullptr; 1379 1380 Type *Ty = And.getType(); 1381 if (!isa<VectorType>(Ty) && !shouldChangeType(Ty, X->getType())) 1382 return nullptr; 1383 1384 // If we're narrowing a shift, the shift amount must be safe (less than the 1385 // width) in the narrower type. If the shift amount is greater, instsimplify 1386 // usually handles that case, but we can't guarantee/assert it. 1387 Instruction::BinaryOps Opc = cast<BinaryOperator>(Op0)->getOpcode(); 1388 if (Opc == Instruction::LShr || Opc == Instruction::Shl) 1389 if (!canNarrowShiftAmt(C, X->getType()->getScalarSizeInBits())) 1390 return nullptr; 1391 1392 // and (sub C, (zext X)), (zext X) --> zext (and (sub C', X), X) 1393 // and (binop (zext X), C), (zext X) --> zext (and (binop X, C'), X) 1394 Value *NewC = ConstantExpr::getTrunc(C, X->getType()); 1395 Value *NewBO = Opc == Instruction::Sub ? Builder.CreateBinOp(Opc, NewC, X) 1396 : Builder.CreateBinOp(Opc, X, NewC); 1397 return new ZExtInst(Builder.CreateAnd(NewBO, X), Ty); 1398 } 1399 1400 // FIXME: We use commutative matchers (m_c_*) for some, but not all, matches 1401 // here. We should standardize that construct where it is needed or choose some 1402 // other way to ensure that commutated variants of patterns are not missed. 1403 Instruction *InstCombiner::visitAnd(BinaryOperator &I) { 1404 if (Value *V = SimplifyAndInst(I.getOperand(0), I.getOperand(1), 1405 SQ.getWithInstruction(&I))) 1406 return replaceInstUsesWith(I, V); 1407 1408 if (SimplifyAssociativeOrCommutative(I)) 1409 return &I; 1410 1411 if (Instruction *X = foldShuffledBinop(I)) 1412 return X; 1413 1414 // See if we can simplify any instructions used by the instruction whose sole 1415 // purpose is to compute bits we don't care about. 1416 if (SimplifyDemandedInstructionBits(I)) 1417 return &I; 1418 1419 // Do this before using distributive laws to catch simple and/or/not patterns. 1420 if (Instruction *Xor = foldAndToXor(I, Builder)) 1421 return Xor; 1422 1423 // (A|B)&(A|C) -> A|(B&C) etc 1424 if (Value *V = SimplifyUsingDistributiveLaws(I)) 1425 return replaceInstUsesWith(I, V); 1426 1427 if (Value *V = SimplifyBSwap(I, Builder)) 1428 return replaceInstUsesWith(I, V); 1429 1430 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 1431 const APInt *C; 1432 if (match(Op1, m_APInt(C))) { 1433 Value *X, *Y; 1434 if (match(Op0, m_OneUse(m_LogicalShift(m_One(), m_Value(X)))) && 1435 C->isOneValue()) { 1436 // (1 << X) & 1 --> zext(X == 0) 1437 // (1 >> X) & 1 --> zext(X == 0) 1438 Value *IsZero = Builder.CreateICmpEQ(X, ConstantInt::get(I.getType(), 0)); 1439 return new ZExtInst(IsZero, I.getType()); 1440 } 1441 1442 const APInt *XorC; 1443 if (match(Op0, m_OneUse(m_Xor(m_Value(X), m_APInt(XorC))))) { 1444 // (X ^ C1) & C2 --> (X & C2) ^ (C1&C2) 1445 Constant *NewC = ConstantInt::get(I.getType(), *C & *XorC); 1446 Value *And = Builder.CreateAnd(X, Op1); 1447 And->takeName(Op0); 1448 return BinaryOperator::CreateXor(And, NewC); 1449 } 1450 1451 const APInt *OrC; 1452 if (match(Op0, m_OneUse(m_Or(m_Value(X), m_APInt(OrC))))) { 1453 // (X | C1) & C2 --> (X & C2^(C1&C2)) | (C1&C2) 1454 // NOTE: This reduces the number of bits set in the & mask, which 1455 // can expose opportunities for store narrowing for scalars. 1456 // NOTE: SimplifyDemandedBits should have already removed bits from C1 1457 // that aren't set in C2. Meaning we can replace (C1&C2) with C1 in 1458 // above, but this feels safer. 1459 APInt Together = *C & *OrC; 1460 Value *And = Builder.CreateAnd(X, ConstantInt::get(I.getType(), 1461 Together ^ *C)); 1462 And->takeName(Op0); 1463 return BinaryOperator::CreateOr(And, ConstantInt::get(I.getType(), 1464 Together)); 1465 } 1466 1467 // If the mask is only needed on one incoming arm, push the 'and' op up. 1468 if (match(Op0, m_OneUse(m_Xor(m_Value(X), m_Value(Y)))) || 1469 match(Op0, m_OneUse(m_Or(m_Value(X), m_Value(Y))))) { 1470 APInt NotAndMask(~(*C)); 1471 BinaryOperator::BinaryOps BinOp = cast<BinaryOperator>(Op0)->getOpcode(); 1472 if (MaskedValueIsZero(X, NotAndMask, 0, &I)) { 1473 // Not masking anything out for the LHS, move mask to RHS. 1474 // and ({x}or X, Y), C --> {x}or X, (and Y, C) 1475 Value *NewRHS = Builder.CreateAnd(Y, Op1, Y->getName() + ".masked"); 1476 return BinaryOperator::Create(BinOp, X, NewRHS); 1477 } 1478 if (!isa<Constant>(Y) && MaskedValueIsZero(Y, NotAndMask, 0, &I)) { 1479 // Not masking anything out for the RHS, move mask to LHS. 1480 // and ({x}or X, Y), C --> {x}or (and X, C), Y 1481 Value *NewLHS = Builder.CreateAnd(X, Op1, X->getName() + ".masked"); 1482 return BinaryOperator::Create(BinOp, NewLHS, Y); 1483 } 1484 } 1485 1486 } 1487 1488 if (ConstantInt *AndRHS = dyn_cast<ConstantInt>(Op1)) { 1489 const APInt &AndRHSMask = AndRHS->getValue(); 1490 1491 // Optimize a variety of ((val OP C1) & C2) combinations... 1492 if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0)) { 1493 // ((C1 OP zext(X)) & C2) -> zext((C1-X) & C2) if C2 fits in the bitwidth 1494 // of X and OP behaves well when given trunc(C1) and X. 1495 switch (Op0I->getOpcode()) { 1496 default: 1497 break; 1498 case Instruction::Xor: 1499 case Instruction::Or: 1500 case Instruction::Mul: 1501 case Instruction::Add: 1502 case Instruction::Sub: 1503 Value *X; 1504 ConstantInt *C1; 1505 if (match(Op0I, m_c_BinOp(m_ZExt(m_Value(X)), m_ConstantInt(C1)))) { 1506 if (AndRHSMask.isIntN(X->getType()->getScalarSizeInBits())) { 1507 auto *TruncC1 = ConstantExpr::getTrunc(C1, X->getType()); 1508 Value *BinOp; 1509 Value *Op0LHS = Op0I->getOperand(0); 1510 if (isa<ZExtInst>(Op0LHS)) 1511 BinOp = Builder.CreateBinOp(Op0I->getOpcode(), X, TruncC1); 1512 else 1513 BinOp = Builder.CreateBinOp(Op0I->getOpcode(), TruncC1, X); 1514 auto *TruncC2 = ConstantExpr::getTrunc(AndRHS, X->getType()); 1515 auto *And = Builder.CreateAnd(BinOp, TruncC2); 1516 return new ZExtInst(And, I.getType()); 1517 } 1518 } 1519 } 1520 1521 if (ConstantInt *Op0CI = dyn_cast<ConstantInt>(Op0I->getOperand(1))) 1522 if (Instruction *Res = OptAndOp(Op0I, Op0CI, AndRHS, I)) 1523 return Res; 1524 } 1525 1526 // If this is an integer truncation, and if the source is an 'and' with 1527 // immediate, transform it. This frequently occurs for bitfield accesses. 1528 { 1529 Value *X = nullptr; ConstantInt *YC = nullptr; 1530 if (match(Op0, m_Trunc(m_And(m_Value(X), m_ConstantInt(YC))))) { 1531 // Change: and (trunc (and X, YC) to T), C2 1532 // into : and (trunc X to T), trunc(YC) & C2 1533 // This will fold the two constants together, which may allow 1534 // other simplifications. 1535 Value *NewCast = Builder.CreateTrunc(X, I.getType(), "and.shrunk"); 1536 Constant *C3 = ConstantExpr::getTrunc(YC, I.getType()); 1537 C3 = ConstantExpr::getAnd(C3, AndRHS); 1538 return BinaryOperator::CreateAnd(NewCast, C3); 1539 } 1540 } 1541 } 1542 1543 if (Instruction *Z = narrowMaskedBinOp(I)) 1544 return Z; 1545 1546 if (Instruction *FoldedLogic = foldBinOpIntoSelectOrPhi(I)) 1547 return FoldedLogic; 1548 1549 if (Instruction *DeMorgan = matchDeMorgansLaws(I, Builder)) 1550 return DeMorgan; 1551 1552 { 1553 Value *A, *B, *C; 1554 // A & (A ^ B) --> A & ~B 1555 if (match(Op1, m_OneUse(m_c_Xor(m_Specific(Op0), m_Value(B))))) 1556 return BinaryOperator::CreateAnd(Op0, Builder.CreateNot(B)); 1557 // (A ^ B) & A --> A & ~B 1558 if (match(Op0, m_OneUse(m_c_Xor(m_Specific(Op1), m_Value(B))))) 1559 return BinaryOperator::CreateAnd(Op1, Builder.CreateNot(B)); 1560 1561 // (A ^ B) & ((B ^ C) ^ A) -> (A ^ B) & ~C 1562 if (match(Op0, m_Xor(m_Value(A), m_Value(B)))) 1563 if (match(Op1, m_Xor(m_Xor(m_Specific(B), m_Value(C)), m_Specific(A)))) 1564 if (Op1->hasOneUse() || IsFreeToInvert(C, C->hasOneUse())) 1565 return BinaryOperator::CreateAnd(Op0, Builder.CreateNot(C)); 1566 1567 // ((A ^ C) ^ B) & (B ^ A) -> (B ^ A) & ~C 1568 if (match(Op0, m_Xor(m_Xor(m_Value(A), m_Value(C)), m_Value(B)))) 1569 if (match(Op1, m_Xor(m_Specific(B), m_Specific(A)))) 1570 if (Op0->hasOneUse() || IsFreeToInvert(C, C->hasOneUse())) 1571 return BinaryOperator::CreateAnd(Op1, Builder.CreateNot(C)); 1572 1573 // (A | B) & ((~A) ^ B) -> (A & B) 1574 // (A | B) & (B ^ (~A)) -> (A & B) 1575 // (B | A) & ((~A) ^ B) -> (A & B) 1576 // (B | A) & (B ^ (~A)) -> (A & B) 1577 if (match(Op1, m_c_Xor(m_Not(m_Value(A)), m_Value(B))) && 1578 match(Op0, m_c_Or(m_Specific(A), m_Specific(B)))) 1579 return BinaryOperator::CreateAnd(A, B); 1580 1581 // ((~A) ^ B) & (A | B) -> (A & B) 1582 // ((~A) ^ B) & (B | A) -> (A & B) 1583 // (B ^ (~A)) & (A | B) -> (A & B) 1584 // (B ^ (~A)) & (B | A) -> (A & B) 1585 if (match(Op0, m_c_Xor(m_Not(m_Value(A)), m_Value(B))) && 1586 match(Op1, m_c_Or(m_Specific(A), m_Specific(B)))) 1587 return BinaryOperator::CreateAnd(A, B); 1588 } 1589 1590 { 1591 ICmpInst *LHS = dyn_cast<ICmpInst>(Op0); 1592 ICmpInst *RHS = dyn_cast<ICmpInst>(Op1); 1593 if (LHS && RHS) 1594 if (Value *Res = foldAndOfICmps(LHS, RHS, I)) 1595 return replaceInstUsesWith(I, Res); 1596 1597 // TODO: Make this recursive; it's a little tricky because an arbitrary 1598 // number of 'and' instructions might have to be created. 1599 Value *X, *Y; 1600 if (LHS && match(Op1, m_OneUse(m_And(m_Value(X), m_Value(Y))))) { 1601 if (auto *Cmp = dyn_cast<ICmpInst>(X)) 1602 if (Value *Res = foldAndOfICmps(LHS, Cmp, I)) 1603 return replaceInstUsesWith(I, Builder.CreateAnd(Res, Y)); 1604 if (auto *Cmp = dyn_cast<ICmpInst>(Y)) 1605 if (Value *Res = foldAndOfICmps(LHS, Cmp, I)) 1606 return replaceInstUsesWith(I, Builder.CreateAnd(Res, X)); 1607 } 1608 if (RHS && match(Op0, m_OneUse(m_And(m_Value(X), m_Value(Y))))) { 1609 if (auto *Cmp = dyn_cast<ICmpInst>(X)) 1610 if (Value *Res = foldAndOfICmps(Cmp, RHS, I)) 1611 return replaceInstUsesWith(I, Builder.CreateAnd(Res, Y)); 1612 if (auto *Cmp = dyn_cast<ICmpInst>(Y)) 1613 if (Value *Res = foldAndOfICmps(Cmp, RHS, I)) 1614 return replaceInstUsesWith(I, Builder.CreateAnd(Res, X)); 1615 } 1616 } 1617 1618 if (FCmpInst *LHS = dyn_cast<FCmpInst>(I.getOperand(0))) 1619 if (FCmpInst *RHS = dyn_cast<FCmpInst>(I.getOperand(1))) 1620 if (Value *Res = foldLogicOfFCmps(LHS, RHS, true)) 1621 return replaceInstUsesWith(I, Res); 1622 1623 if (Instruction *CastedAnd = foldCastedBitwiseLogic(I)) 1624 return CastedAnd; 1625 1626 // and(sext(A), B) / and(B, sext(A)) --> A ? B : 0, where A is i1 or <N x i1>. 1627 Value *A; 1628 if (match(Op0, m_OneUse(m_SExt(m_Value(A)))) && 1629 A->getType()->isIntOrIntVectorTy(1)) 1630 return SelectInst::Create(A, Op1, Constant::getNullValue(I.getType())); 1631 if (match(Op1, m_OneUse(m_SExt(m_Value(A)))) && 1632 A->getType()->isIntOrIntVectorTy(1)) 1633 return SelectInst::Create(A, Op0, Constant::getNullValue(I.getType())); 1634 1635 return nullptr; 1636 } 1637 1638 /// Given an OR instruction, check to see if this is a bswap idiom. If so, 1639 /// insert the new intrinsic and return it. 1640 Instruction *InstCombiner::MatchBSwap(BinaryOperator &I) { 1641 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 1642 1643 // Look through zero extends. 1644 if (Instruction *Ext = dyn_cast<ZExtInst>(Op0)) 1645 Op0 = Ext->getOperand(0); 1646 1647 if (Instruction *Ext = dyn_cast<ZExtInst>(Op1)) 1648 Op1 = Ext->getOperand(0); 1649 1650 // (A | B) | C and A | (B | C) -> bswap if possible. 1651 bool OrOfOrs = match(Op0, m_Or(m_Value(), m_Value())) || 1652 match(Op1, m_Or(m_Value(), m_Value())); 1653 1654 // (A >> B) | (C << D) and (A << B) | (B >> C) -> bswap if possible. 1655 bool OrOfShifts = match(Op0, m_LogicalShift(m_Value(), m_Value())) && 1656 match(Op1, m_LogicalShift(m_Value(), m_Value())); 1657 1658 // (A & B) | (C & D) -> bswap if possible. 1659 bool OrOfAnds = match(Op0, m_And(m_Value(), m_Value())) && 1660 match(Op1, m_And(m_Value(), m_Value())); 1661 1662 // (A << B) | (C & D) -> bswap if possible. 1663 // The bigger pattern here is ((A & C1) << C2) | ((B >> C2) & C1), which is a 1664 // part of the bswap idiom for specific values of C1, C2 (e.g. C1 = 16711935, 1665 // C2 = 8 for i32). 1666 // This pattern can occur when the operands of the 'or' are not canonicalized 1667 // for some reason (not having only one use, for example). 1668 bool OrOfAndAndSh = (match(Op0, m_LogicalShift(m_Value(), m_Value())) && 1669 match(Op1, m_And(m_Value(), m_Value()))) || 1670 (match(Op0, m_And(m_Value(), m_Value())) && 1671 match(Op1, m_LogicalShift(m_Value(), m_Value()))); 1672 1673 if (!OrOfOrs && !OrOfShifts && !OrOfAnds && !OrOfAndAndSh) 1674 return nullptr; 1675 1676 SmallVector<Instruction*, 4> Insts; 1677 if (!recognizeBSwapOrBitReverseIdiom(&I, true, false, Insts)) 1678 return nullptr; 1679 Instruction *LastInst = Insts.pop_back_val(); 1680 LastInst->removeFromParent(); 1681 1682 for (auto *Inst : Insts) 1683 Worklist.Add(Inst); 1684 return LastInst; 1685 } 1686 1687 /// If all elements of two constant vectors are 0/-1 and inverses, return true. 1688 static bool areInverseVectorBitmasks(Constant *C1, Constant *C2) { 1689 unsigned NumElts = C1->getType()->getVectorNumElements(); 1690 for (unsigned i = 0; i != NumElts; ++i) { 1691 Constant *EltC1 = C1->getAggregateElement(i); 1692 Constant *EltC2 = C2->getAggregateElement(i); 1693 if (!EltC1 || !EltC2) 1694 return false; 1695 1696 // One element must be all ones, and the other must be all zeros. 1697 if (!((match(EltC1, m_Zero()) && match(EltC2, m_AllOnes())) || 1698 (match(EltC2, m_Zero()) && match(EltC1, m_AllOnes())))) 1699 return false; 1700 } 1701 return true; 1702 } 1703 1704 /// We have an expression of the form (A & C) | (B & D). If A is a scalar or 1705 /// vector composed of all-zeros or all-ones values and is the bitwise 'not' of 1706 /// B, it can be used as the condition operand of a select instruction. 1707 static Value *getSelectCondition(Value *A, Value *B, 1708 InstCombiner::BuilderTy &Builder) { 1709 // If these are scalars or vectors of i1, A can be used directly. 1710 Type *Ty = A->getType(); 1711 if (match(A, m_Not(m_Specific(B))) && Ty->isIntOrIntVectorTy(1)) 1712 return A; 1713 1714 // If A and B are sign-extended, look through the sexts to find the booleans. 1715 Value *Cond; 1716 Value *NotB; 1717 if (match(A, m_SExt(m_Value(Cond))) && 1718 Cond->getType()->isIntOrIntVectorTy(1) && 1719 match(B, m_OneUse(m_Not(m_Value(NotB))))) { 1720 NotB = peekThroughBitcast(NotB, true); 1721 if (match(NotB, m_SExt(m_Specific(Cond)))) 1722 return Cond; 1723 } 1724 1725 // All scalar (and most vector) possibilities should be handled now. 1726 // Try more matches that only apply to non-splat constant vectors. 1727 if (!Ty->isVectorTy()) 1728 return nullptr; 1729 1730 // If both operands are constants, see if the constants are inverse bitmasks. 1731 Constant *AC, *BC; 1732 if (match(A, m_Constant(AC)) && match(B, m_Constant(BC)) && 1733 areInverseVectorBitmasks(AC, BC)) { 1734 return Builder.CreateZExtOrTrunc(AC, CmpInst::makeCmpResultType(Ty)); 1735 } 1736 1737 // If both operands are xor'd with constants using the same sexted boolean 1738 // operand, see if the constants are inverse bitmasks. 1739 if (match(A, (m_Xor(m_SExt(m_Value(Cond)), m_Constant(AC)))) && 1740 match(B, (m_Xor(m_SExt(m_Specific(Cond)), m_Constant(BC)))) && 1741 Cond->getType()->isIntOrIntVectorTy(1) && 1742 areInverseVectorBitmasks(AC, BC)) { 1743 AC = ConstantExpr::getTrunc(AC, CmpInst::makeCmpResultType(Ty)); 1744 return Builder.CreateXor(Cond, AC); 1745 } 1746 return nullptr; 1747 } 1748 1749 /// We have an expression of the form (A & C) | (B & D). Try to simplify this 1750 /// to "A' ? C : D", where A' is a boolean or vector of booleans. 1751 static Value *matchSelectFromAndOr(Value *A, Value *C, Value *B, Value *D, 1752 InstCombiner::BuilderTy &Builder) { 1753 // The potential condition of the select may be bitcasted. In that case, look 1754 // through its bitcast and the corresponding bitcast of the 'not' condition. 1755 Type *OrigType = A->getType(); 1756 A = peekThroughBitcast(A, true); 1757 B = peekThroughBitcast(B, true); 1758 1759 if (Value *Cond = getSelectCondition(A, B, Builder)) { 1760 // ((bc Cond) & C) | ((bc ~Cond) & D) --> bc (select Cond, (bc C), (bc D)) 1761 // The bitcasts will either all exist or all not exist. The builder will 1762 // not create unnecessary casts if the types already match. 1763 Value *BitcastC = Builder.CreateBitCast(C, A->getType()); 1764 Value *BitcastD = Builder.CreateBitCast(D, A->getType()); 1765 Value *Select = Builder.CreateSelect(Cond, BitcastC, BitcastD); 1766 return Builder.CreateBitCast(Select, OrigType); 1767 } 1768 1769 return nullptr; 1770 } 1771 1772 /// Fold (icmp)|(icmp) if possible. 1773 Value *InstCombiner::foldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS, 1774 Instruction &CxtI) { 1775 // Fold (iszero(A & K1) | iszero(A & K2)) -> (A & (K1 | K2)) != (K1 | K2) 1776 // if K1 and K2 are a one-bit mask. 1777 if (Value *V = foldAndOrOfICmpsOfAndWithPow2(LHS, RHS, false, CxtI)) 1778 return V; 1779 1780 ICmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate(); 1781 1782 ConstantInt *LHSC = dyn_cast<ConstantInt>(LHS->getOperand(1)); 1783 ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS->getOperand(1)); 1784 1785 // Fold (icmp ult/ule (A + C1), C3) | (icmp ult/ule (A + C2), C3) 1786 // --> (icmp ult/ule ((A & ~(C1 ^ C2)) + max(C1, C2)), C3) 1787 // The original condition actually refers to the following two ranges: 1788 // [MAX_UINT-C1+1, MAX_UINT-C1+1+C3] and [MAX_UINT-C2+1, MAX_UINT-C2+1+C3] 1789 // We can fold these two ranges if: 1790 // 1) C1 and C2 is unsigned greater than C3. 1791 // 2) The two ranges are separated. 1792 // 3) C1 ^ C2 is one-bit mask. 1793 // 4) LowRange1 ^ LowRange2 and HighRange1 ^ HighRange2 are one-bit mask. 1794 // This implies all values in the two ranges differ by exactly one bit. 1795 1796 if ((PredL == ICmpInst::ICMP_ULT || PredL == ICmpInst::ICMP_ULE) && 1797 PredL == PredR && LHSC && RHSC && LHS->hasOneUse() && RHS->hasOneUse() && 1798 LHSC->getType() == RHSC->getType() && 1799 LHSC->getValue() == (RHSC->getValue())) { 1800 1801 Value *LAdd = LHS->getOperand(0); 1802 Value *RAdd = RHS->getOperand(0); 1803 1804 Value *LAddOpnd, *RAddOpnd; 1805 ConstantInt *LAddC, *RAddC; 1806 if (match(LAdd, m_Add(m_Value(LAddOpnd), m_ConstantInt(LAddC))) && 1807 match(RAdd, m_Add(m_Value(RAddOpnd), m_ConstantInt(RAddC))) && 1808 LAddC->getValue().ugt(LHSC->getValue()) && 1809 RAddC->getValue().ugt(LHSC->getValue())) { 1810 1811 APInt DiffC = LAddC->getValue() ^ RAddC->getValue(); 1812 if (LAddOpnd == RAddOpnd && DiffC.isPowerOf2()) { 1813 ConstantInt *MaxAddC = nullptr; 1814 if (LAddC->getValue().ult(RAddC->getValue())) 1815 MaxAddC = RAddC; 1816 else 1817 MaxAddC = LAddC; 1818 1819 APInt RRangeLow = -RAddC->getValue(); 1820 APInt RRangeHigh = RRangeLow + LHSC->getValue(); 1821 APInt LRangeLow = -LAddC->getValue(); 1822 APInt LRangeHigh = LRangeLow + LHSC->getValue(); 1823 APInt LowRangeDiff = RRangeLow ^ LRangeLow; 1824 APInt HighRangeDiff = RRangeHigh ^ LRangeHigh; 1825 APInt RangeDiff = LRangeLow.sgt(RRangeLow) ? LRangeLow - RRangeLow 1826 : RRangeLow - LRangeLow; 1827 1828 if (LowRangeDiff.isPowerOf2() && LowRangeDiff == HighRangeDiff && 1829 RangeDiff.ugt(LHSC->getValue())) { 1830 Value *MaskC = ConstantInt::get(LAddC->getType(), ~DiffC); 1831 1832 Value *NewAnd = Builder.CreateAnd(LAddOpnd, MaskC); 1833 Value *NewAdd = Builder.CreateAdd(NewAnd, MaxAddC); 1834 return Builder.CreateICmp(LHS->getPredicate(), NewAdd, LHSC); 1835 } 1836 } 1837 } 1838 } 1839 1840 // (icmp1 A, B) | (icmp2 A, B) --> (icmp3 A, B) 1841 if (PredicatesFoldable(PredL, PredR)) { 1842 if (LHS->getOperand(0) == RHS->getOperand(1) && 1843 LHS->getOperand(1) == RHS->getOperand(0)) 1844 LHS->swapOperands(); 1845 if (LHS->getOperand(0) == RHS->getOperand(0) && 1846 LHS->getOperand(1) == RHS->getOperand(1)) { 1847 Value *Op0 = LHS->getOperand(0), *Op1 = LHS->getOperand(1); 1848 unsigned Code = getICmpCode(LHS) | getICmpCode(RHS); 1849 bool isSigned = LHS->isSigned() || RHS->isSigned(); 1850 return getNewICmpValue(isSigned, Code, Op0, Op1, Builder); 1851 } 1852 } 1853 1854 // handle (roughly): 1855 // (icmp ne (A & B), C) | (icmp ne (A & D), E) 1856 if (Value *V = foldLogOpOfMaskedICmps(LHS, RHS, false, Builder)) 1857 return V; 1858 1859 Value *LHS0 = LHS->getOperand(0), *RHS0 = RHS->getOperand(0); 1860 if (LHS->hasOneUse() || RHS->hasOneUse()) { 1861 // (icmp eq B, 0) | (icmp ult A, B) -> (icmp ule A, B-1) 1862 // (icmp eq B, 0) | (icmp ugt B, A) -> (icmp ule A, B-1) 1863 Value *A = nullptr, *B = nullptr; 1864 if (PredL == ICmpInst::ICMP_EQ && LHSC && LHSC->isZero()) { 1865 B = LHS0; 1866 if (PredR == ICmpInst::ICMP_ULT && LHS0 == RHS->getOperand(1)) 1867 A = RHS0; 1868 else if (PredR == ICmpInst::ICMP_UGT && LHS0 == RHS0) 1869 A = RHS->getOperand(1); 1870 } 1871 // (icmp ult A, B) | (icmp eq B, 0) -> (icmp ule A, B-1) 1872 // (icmp ugt B, A) | (icmp eq B, 0) -> (icmp ule A, B-1) 1873 else if (PredR == ICmpInst::ICMP_EQ && RHSC && RHSC->isZero()) { 1874 B = RHS0; 1875 if (PredL == ICmpInst::ICMP_ULT && RHS0 == LHS->getOperand(1)) 1876 A = LHS0; 1877 else if (PredL == ICmpInst::ICMP_UGT && LHS0 == RHS0) 1878 A = LHS->getOperand(1); 1879 } 1880 if (A && B) 1881 return Builder.CreateICmp( 1882 ICmpInst::ICMP_UGE, 1883 Builder.CreateAdd(B, ConstantInt::getSigned(B->getType(), -1)), A); 1884 } 1885 1886 // E.g. (icmp slt x, 0) | (icmp sgt x, n) --> icmp ugt x, n 1887 if (Value *V = simplifyRangeCheck(LHS, RHS, /*Inverted=*/true)) 1888 return V; 1889 1890 // E.g. (icmp sgt x, n) | (icmp slt x, 0) --> icmp ugt x, n 1891 if (Value *V = simplifyRangeCheck(RHS, LHS, /*Inverted=*/true)) 1892 return V; 1893 1894 if (Value *V = foldAndOrOfEqualityCmpsWithConstants(LHS, RHS, false, Builder)) 1895 return V; 1896 1897 // This only handles icmp of constants: (icmp1 A, C1) | (icmp2 B, C2). 1898 if (!LHSC || !RHSC) 1899 return nullptr; 1900 1901 if (LHSC == RHSC && PredL == PredR) { 1902 // (icmp ne A, 0) | (icmp ne B, 0) --> (icmp ne (A|B), 0) 1903 if (PredL == ICmpInst::ICMP_NE && LHSC->isZero()) { 1904 Value *NewOr = Builder.CreateOr(LHS0, RHS0); 1905 return Builder.CreateICmp(PredL, NewOr, LHSC); 1906 } 1907 } 1908 1909 // (icmp ult (X + CA), C1) | (icmp eq X, C2) -> (icmp ule (X + CA), C1) 1910 // iff C2 + CA == C1. 1911 if (PredL == ICmpInst::ICMP_ULT && PredR == ICmpInst::ICMP_EQ) { 1912 ConstantInt *AddC; 1913 if (match(LHS0, m_Add(m_Specific(RHS0), m_ConstantInt(AddC)))) 1914 if (RHSC->getValue() + AddC->getValue() == LHSC->getValue()) 1915 return Builder.CreateICmpULE(LHS0, LHSC); 1916 } 1917 1918 // From here on, we only handle: 1919 // (icmp1 A, C1) | (icmp2 A, C2) --> something simpler. 1920 if (LHS0 != RHS0) 1921 return nullptr; 1922 1923 // ICMP_[US][GL]E X, C is folded to ICMP_[US][GL]T elsewhere. 1924 if (PredL == ICmpInst::ICMP_UGE || PredL == ICmpInst::ICMP_ULE || 1925 PredR == ICmpInst::ICMP_UGE || PredR == ICmpInst::ICMP_ULE || 1926 PredL == ICmpInst::ICMP_SGE || PredL == ICmpInst::ICMP_SLE || 1927 PredR == ICmpInst::ICMP_SGE || PredR == ICmpInst::ICMP_SLE) 1928 return nullptr; 1929 1930 // We can't fold (ugt x, C) | (sgt x, C2). 1931 if (!PredicatesFoldable(PredL, PredR)) 1932 return nullptr; 1933 1934 // Ensure that the larger constant is on the RHS. 1935 bool ShouldSwap; 1936 if (CmpInst::isSigned(PredL) || 1937 (ICmpInst::isEquality(PredL) && CmpInst::isSigned(PredR))) 1938 ShouldSwap = LHSC->getValue().sgt(RHSC->getValue()); 1939 else 1940 ShouldSwap = LHSC->getValue().ugt(RHSC->getValue()); 1941 1942 if (ShouldSwap) { 1943 std::swap(LHS, RHS); 1944 std::swap(LHSC, RHSC); 1945 std::swap(PredL, PredR); 1946 } 1947 1948 // At this point, we know we have two icmp instructions 1949 // comparing a value against two constants and or'ing the result 1950 // together. Because of the above check, we know that we only have 1951 // ICMP_EQ, ICMP_NE, ICMP_LT, and ICMP_GT here. We also know (from the 1952 // icmp folding check above), that the two constants are not 1953 // equal. 1954 assert(LHSC != RHSC && "Compares not folded above?"); 1955 1956 switch (PredL) { 1957 default: 1958 llvm_unreachable("Unknown integer condition code!"); 1959 case ICmpInst::ICMP_EQ: 1960 switch (PredR) { 1961 default: 1962 llvm_unreachable("Unknown integer condition code!"); 1963 case ICmpInst::ICMP_EQ: 1964 // Potential folds for this case should already be handled. 1965 break; 1966 case ICmpInst::ICMP_UGT: // (X == 13 | X u> 14) -> no change 1967 case ICmpInst::ICMP_SGT: // (X == 13 | X s> 14) -> no change 1968 break; 1969 } 1970 break; 1971 case ICmpInst::ICMP_ULT: 1972 switch (PredR) { 1973 default: 1974 llvm_unreachable("Unknown integer condition code!"); 1975 case ICmpInst::ICMP_EQ: // (X u< 13 | X == 14) -> no change 1976 break; 1977 case ICmpInst::ICMP_UGT: // (X u< 13 | X u> 15) -> (X-13) u> 2 1978 assert(!RHSC->isMaxValue(false) && "Missed icmp simplification"); 1979 return insertRangeTest(LHS0, LHSC->getValue(), RHSC->getValue() + 1, 1980 false, false); 1981 } 1982 break; 1983 case ICmpInst::ICMP_SLT: 1984 switch (PredR) { 1985 default: 1986 llvm_unreachable("Unknown integer condition code!"); 1987 case ICmpInst::ICMP_EQ: // (X s< 13 | X == 14) -> no change 1988 break; 1989 case ICmpInst::ICMP_SGT: // (X s< 13 | X s> 15) -> (X-13) s> 2 1990 assert(!RHSC->isMaxValue(true) && "Missed icmp simplification"); 1991 return insertRangeTest(LHS0, LHSC->getValue(), RHSC->getValue() + 1, true, 1992 false); 1993 } 1994 break; 1995 } 1996 return nullptr; 1997 } 1998 1999 // FIXME: We use commutative matchers (m_c_*) for some, but not all, matches 2000 // here. We should standardize that construct where it is needed or choose some 2001 // other way to ensure that commutated variants of patterns are not missed. 2002 Instruction *InstCombiner::visitOr(BinaryOperator &I) { 2003 if (Value *V = SimplifyOrInst(I.getOperand(0), I.getOperand(1), 2004 SQ.getWithInstruction(&I))) 2005 return replaceInstUsesWith(I, V); 2006 2007 if (SimplifyAssociativeOrCommutative(I)) 2008 return &I; 2009 2010 if (Instruction *X = foldShuffledBinop(I)) 2011 return X; 2012 2013 // See if we can simplify any instructions used by the instruction whose sole 2014 // purpose is to compute bits we don't care about. 2015 if (SimplifyDemandedInstructionBits(I)) 2016 return &I; 2017 2018 // Do this before using distributive laws to catch simple and/or/not patterns. 2019 if (Instruction *Xor = foldOrToXor(I, Builder)) 2020 return Xor; 2021 2022 // (A&B)|(A&C) -> A&(B|C) etc 2023 if (Value *V = SimplifyUsingDistributiveLaws(I)) 2024 return replaceInstUsesWith(I, V); 2025 2026 if (Value *V = SimplifyBSwap(I, Builder)) 2027 return replaceInstUsesWith(I, V); 2028 2029 if (Instruction *FoldedLogic = foldBinOpIntoSelectOrPhi(I)) 2030 return FoldedLogic; 2031 2032 // Given an OR instruction, check to see if this is a bswap. 2033 if (Instruction *BSwap = MatchBSwap(I)) 2034 return BSwap; 2035 2036 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 2037 { 2038 Value *A; 2039 const APInt *C; 2040 // (X^C)|Y -> (X|Y)^C iff Y&C == 0 2041 if (match(Op0, m_OneUse(m_Xor(m_Value(A), m_APInt(C)))) && 2042 MaskedValueIsZero(Op1, *C, 0, &I)) { 2043 Value *NOr = Builder.CreateOr(A, Op1); 2044 NOr->takeName(Op0); 2045 return BinaryOperator::CreateXor(NOr, 2046 ConstantInt::get(NOr->getType(), *C)); 2047 } 2048 2049 // Y|(X^C) -> (X|Y)^C iff Y&C == 0 2050 if (match(Op1, m_OneUse(m_Xor(m_Value(A), m_APInt(C)))) && 2051 MaskedValueIsZero(Op0, *C, 0, &I)) { 2052 Value *NOr = Builder.CreateOr(A, Op0); 2053 NOr->takeName(Op0); 2054 return BinaryOperator::CreateXor(NOr, 2055 ConstantInt::get(NOr->getType(), *C)); 2056 } 2057 } 2058 2059 Value *A, *B; 2060 2061 // (A & C)|(B & D) 2062 Value *C = nullptr, *D = nullptr; 2063 if (match(Op0, m_And(m_Value(A), m_Value(C))) && 2064 match(Op1, m_And(m_Value(B), m_Value(D)))) { 2065 ConstantInt *C1 = dyn_cast<ConstantInt>(C); 2066 ConstantInt *C2 = dyn_cast<ConstantInt>(D); 2067 if (C1 && C2) { // (A & C1)|(B & C2) 2068 Value *V1 = nullptr, *V2 = nullptr; 2069 if ((C1->getValue() & C2->getValue()).isNullValue()) { 2070 // ((V | N) & C1) | (V & C2) --> (V|N) & (C1|C2) 2071 // iff (C1&C2) == 0 and (N&~C1) == 0 2072 if (match(A, m_Or(m_Value(V1), m_Value(V2))) && 2073 ((V1 == B && 2074 MaskedValueIsZero(V2, ~C1->getValue(), 0, &I)) || // (V|N) 2075 (V2 == B && 2076 MaskedValueIsZero(V1, ~C1->getValue(), 0, &I)))) // (N|V) 2077 return BinaryOperator::CreateAnd(A, 2078 Builder.getInt(C1->getValue()|C2->getValue())); 2079 // Or commutes, try both ways. 2080 if (match(B, m_Or(m_Value(V1), m_Value(V2))) && 2081 ((V1 == A && 2082 MaskedValueIsZero(V2, ~C2->getValue(), 0, &I)) || // (V|N) 2083 (V2 == A && 2084 MaskedValueIsZero(V1, ~C2->getValue(), 0, &I)))) // (N|V) 2085 return BinaryOperator::CreateAnd(B, 2086 Builder.getInt(C1->getValue()|C2->getValue())); 2087 2088 // ((V|C3)&C1) | ((V|C4)&C2) --> (V|C3|C4)&(C1|C2) 2089 // iff (C1&C2) == 0 and (C3&~C1) == 0 and (C4&~C2) == 0. 2090 ConstantInt *C3 = nullptr, *C4 = nullptr; 2091 if (match(A, m_Or(m_Value(V1), m_ConstantInt(C3))) && 2092 (C3->getValue() & ~C1->getValue()).isNullValue() && 2093 match(B, m_Or(m_Specific(V1), m_ConstantInt(C4))) && 2094 (C4->getValue() & ~C2->getValue()).isNullValue()) { 2095 V2 = Builder.CreateOr(V1, ConstantExpr::getOr(C3, C4), "bitfield"); 2096 return BinaryOperator::CreateAnd(V2, 2097 Builder.getInt(C1->getValue()|C2->getValue())); 2098 } 2099 } 2100 2101 if (C1->getValue() == ~C2->getValue()) { 2102 Value *X; 2103 2104 // ((X|B)&C1)|(B&C2) -> (X&C1) | B iff C1 == ~C2 2105 if (match(A, m_c_Or(m_Value(X), m_Specific(B)))) 2106 return BinaryOperator::CreateOr(Builder.CreateAnd(X, C1), B); 2107 // (A&C2)|((X|A)&C1) -> (X&C2) | A iff C1 == ~C2 2108 if (match(B, m_c_Or(m_Specific(A), m_Value(X)))) 2109 return BinaryOperator::CreateOr(Builder.CreateAnd(X, C2), A); 2110 2111 // ((X^B)&C1)|(B&C2) -> (X&C1) ^ B iff C1 == ~C2 2112 if (match(A, m_c_Xor(m_Value(X), m_Specific(B)))) 2113 return BinaryOperator::CreateXor(Builder.CreateAnd(X, C1), B); 2114 // (A&C2)|((X^A)&C1) -> (X&C2) ^ A iff C1 == ~C2 2115 if (match(B, m_c_Xor(m_Specific(A), m_Value(X)))) 2116 return BinaryOperator::CreateXor(Builder.CreateAnd(X, C2), A); 2117 } 2118 } 2119 2120 // Don't try to form a select if it's unlikely that we'll get rid of at 2121 // least one of the operands. A select is generally more expensive than the 2122 // 'or' that it is replacing. 2123 if (Op0->hasOneUse() || Op1->hasOneUse()) { 2124 // (Cond & C) | (~Cond & D) -> Cond ? C : D, and commuted variants. 2125 if (Value *V = matchSelectFromAndOr(A, C, B, D, Builder)) 2126 return replaceInstUsesWith(I, V); 2127 if (Value *V = matchSelectFromAndOr(A, C, D, B, Builder)) 2128 return replaceInstUsesWith(I, V); 2129 if (Value *V = matchSelectFromAndOr(C, A, B, D, Builder)) 2130 return replaceInstUsesWith(I, V); 2131 if (Value *V = matchSelectFromAndOr(C, A, D, B, Builder)) 2132 return replaceInstUsesWith(I, V); 2133 if (Value *V = matchSelectFromAndOr(B, D, A, C, Builder)) 2134 return replaceInstUsesWith(I, V); 2135 if (Value *V = matchSelectFromAndOr(B, D, C, A, Builder)) 2136 return replaceInstUsesWith(I, V); 2137 if (Value *V = matchSelectFromAndOr(D, B, A, C, Builder)) 2138 return replaceInstUsesWith(I, V); 2139 if (Value *V = matchSelectFromAndOr(D, B, C, A, Builder)) 2140 return replaceInstUsesWith(I, V); 2141 } 2142 } 2143 2144 // (A ^ B) | ((B ^ C) ^ A) -> (A ^ B) | C 2145 if (match(Op0, m_Xor(m_Value(A), m_Value(B)))) 2146 if (match(Op1, m_Xor(m_Xor(m_Specific(B), m_Value(C)), m_Specific(A)))) 2147 return BinaryOperator::CreateOr(Op0, C); 2148 2149 // ((A ^ C) ^ B) | (B ^ A) -> (B ^ A) | C 2150 if (match(Op0, m_Xor(m_Xor(m_Value(A), m_Value(C)), m_Value(B)))) 2151 if (match(Op1, m_Xor(m_Specific(B), m_Specific(A)))) 2152 return BinaryOperator::CreateOr(Op1, C); 2153 2154 // ((B | C) & A) | B -> B | (A & C) 2155 if (match(Op0, m_And(m_Or(m_Specific(Op1), m_Value(C)), m_Value(A)))) 2156 return BinaryOperator::CreateOr(Op1, Builder.CreateAnd(A, C)); 2157 2158 if (Instruction *DeMorgan = matchDeMorgansLaws(I, Builder)) 2159 return DeMorgan; 2160 2161 // Canonicalize xor to the RHS. 2162 bool SwappedForXor = false; 2163 if (match(Op0, m_Xor(m_Value(), m_Value()))) { 2164 std::swap(Op0, Op1); 2165 SwappedForXor = true; 2166 } 2167 2168 // A | ( A ^ B) -> A | B 2169 // A | (~A ^ B) -> A | ~B 2170 // (A & B) | (A ^ B) 2171 if (match(Op1, m_Xor(m_Value(A), m_Value(B)))) { 2172 if (Op0 == A || Op0 == B) 2173 return BinaryOperator::CreateOr(A, B); 2174 2175 if (match(Op0, m_And(m_Specific(A), m_Specific(B))) || 2176 match(Op0, m_And(m_Specific(B), m_Specific(A)))) 2177 return BinaryOperator::CreateOr(A, B); 2178 2179 if (Op1->hasOneUse() && match(A, m_Not(m_Specific(Op0)))) { 2180 Value *Not = Builder.CreateNot(B, B->getName() + ".not"); 2181 return BinaryOperator::CreateOr(Not, Op0); 2182 } 2183 if (Op1->hasOneUse() && match(B, m_Not(m_Specific(Op0)))) { 2184 Value *Not = Builder.CreateNot(A, A->getName() + ".not"); 2185 return BinaryOperator::CreateOr(Not, Op0); 2186 } 2187 } 2188 2189 // A | ~(A | B) -> A | ~B 2190 // A | ~(A ^ B) -> A | ~B 2191 if (match(Op1, m_Not(m_Value(A)))) 2192 if (BinaryOperator *B = dyn_cast<BinaryOperator>(A)) 2193 if ((Op0 == B->getOperand(0) || Op0 == B->getOperand(1)) && 2194 Op1->hasOneUse() && (B->getOpcode() == Instruction::Or || 2195 B->getOpcode() == Instruction::Xor)) { 2196 Value *NotOp = Op0 == B->getOperand(0) ? B->getOperand(1) : 2197 B->getOperand(0); 2198 Value *Not = Builder.CreateNot(NotOp, NotOp->getName() + ".not"); 2199 return BinaryOperator::CreateOr(Not, Op0); 2200 } 2201 2202 if (SwappedForXor) 2203 std::swap(Op0, Op1); 2204 2205 { 2206 ICmpInst *LHS = dyn_cast<ICmpInst>(Op0); 2207 ICmpInst *RHS = dyn_cast<ICmpInst>(Op1); 2208 if (LHS && RHS) 2209 if (Value *Res = foldOrOfICmps(LHS, RHS, I)) 2210 return replaceInstUsesWith(I, Res); 2211 2212 // TODO: Make this recursive; it's a little tricky because an arbitrary 2213 // number of 'or' instructions might have to be created. 2214 Value *X, *Y; 2215 if (LHS && match(Op1, m_OneUse(m_Or(m_Value(X), m_Value(Y))))) { 2216 if (auto *Cmp = dyn_cast<ICmpInst>(X)) 2217 if (Value *Res = foldOrOfICmps(LHS, Cmp, I)) 2218 return replaceInstUsesWith(I, Builder.CreateOr(Res, Y)); 2219 if (auto *Cmp = dyn_cast<ICmpInst>(Y)) 2220 if (Value *Res = foldOrOfICmps(LHS, Cmp, I)) 2221 return replaceInstUsesWith(I, Builder.CreateOr(Res, X)); 2222 } 2223 if (RHS && match(Op0, m_OneUse(m_Or(m_Value(X), m_Value(Y))))) { 2224 if (auto *Cmp = dyn_cast<ICmpInst>(X)) 2225 if (Value *Res = foldOrOfICmps(Cmp, RHS, I)) 2226 return replaceInstUsesWith(I, Builder.CreateOr(Res, Y)); 2227 if (auto *Cmp = dyn_cast<ICmpInst>(Y)) 2228 if (Value *Res = foldOrOfICmps(Cmp, RHS, I)) 2229 return replaceInstUsesWith(I, Builder.CreateOr(Res, X)); 2230 } 2231 } 2232 2233 if (FCmpInst *LHS = dyn_cast<FCmpInst>(I.getOperand(0))) 2234 if (FCmpInst *RHS = dyn_cast<FCmpInst>(I.getOperand(1))) 2235 if (Value *Res = foldLogicOfFCmps(LHS, RHS, false)) 2236 return replaceInstUsesWith(I, Res); 2237 2238 if (Instruction *CastedOr = foldCastedBitwiseLogic(I)) 2239 return CastedOr; 2240 2241 // or(sext(A), B) / or(B, sext(A)) --> A ? -1 : B, where A is i1 or <N x i1>. 2242 if (match(Op0, m_OneUse(m_SExt(m_Value(A)))) && 2243 A->getType()->isIntOrIntVectorTy(1)) 2244 return SelectInst::Create(A, ConstantInt::getSigned(I.getType(), -1), Op1); 2245 if (match(Op1, m_OneUse(m_SExt(m_Value(A)))) && 2246 A->getType()->isIntOrIntVectorTy(1)) 2247 return SelectInst::Create(A, ConstantInt::getSigned(I.getType(), -1), Op0); 2248 2249 // Note: If we've gotten to the point of visiting the outer OR, then the 2250 // inner one couldn't be simplified. If it was a constant, then it won't 2251 // be simplified by a later pass either, so we try swapping the inner/outer 2252 // ORs in the hopes that we'll be able to simplify it this way. 2253 // (X|C) | V --> (X|V) | C 2254 ConstantInt *C1; 2255 if (Op0->hasOneUse() && !isa<ConstantInt>(Op1) && 2256 match(Op0, m_Or(m_Value(A), m_ConstantInt(C1)))) { 2257 Value *Inner = Builder.CreateOr(A, Op1); 2258 Inner->takeName(Op0); 2259 return BinaryOperator::CreateOr(Inner, C1); 2260 } 2261 2262 // Change (or (bool?A:B),(bool?C:D)) --> (bool?(or A,C):(or B,D)) 2263 // Since this OR statement hasn't been optimized further yet, we hope 2264 // that this transformation will allow the new ORs to be optimized. 2265 { 2266 Value *X = nullptr, *Y = nullptr; 2267 if (Op0->hasOneUse() && Op1->hasOneUse() && 2268 match(Op0, m_Select(m_Value(X), m_Value(A), m_Value(B))) && 2269 match(Op1, m_Select(m_Value(Y), m_Value(C), m_Value(D))) && X == Y) { 2270 Value *orTrue = Builder.CreateOr(A, C); 2271 Value *orFalse = Builder.CreateOr(B, D); 2272 return SelectInst::Create(X, orTrue, orFalse); 2273 } 2274 } 2275 2276 return nullptr; 2277 } 2278 2279 /// A ^ B can be specified using other logic ops in a variety of patterns. We 2280 /// can fold these early and efficiently by morphing an existing instruction. 2281 static Instruction *foldXorToXor(BinaryOperator &I, 2282 InstCombiner::BuilderTy &Builder) { 2283 assert(I.getOpcode() == Instruction::Xor); 2284 Value *Op0 = I.getOperand(0); 2285 Value *Op1 = I.getOperand(1); 2286 Value *A, *B; 2287 2288 // There are 4 commuted variants for each of the basic patterns. 2289 2290 // (A & B) ^ (A | B) -> A ^ B 2291 // (A & B) ^ (B | A) -> A ^ B 2292 // (A | B) ^ (A & B) -> A ^ B 2293 // (A | B) ^ (B & A) -> A ^ B 2294 if (match(&I, m_c_Xor(m_And(m_Value(A), m_Value(B)), 2295 m_c_Or(m_Deferred(A), m_Deferred(B))))) { 2296 I.setOperand(0, A); 2297 I.setOperand(1, B); 2298 return &I; 2299 } 2300 2301 // (A | ~B) ^ (~A | B) -> A ^ B 2302 // (~B | A) ^ (~A | B) -> A ^ B 2303 // (~A | B) ^ (A | ~B) -> A ^ B 2304 // (B | ~A) ^ (A | ~B) -> A ^ B 2305 if (match(&I, m_Xor(m_c_Or(m_Value(A), m_Not(m_Value(B))), 2306 m_c_Or(m_Not(m_Deferred(A)), m_Deferred(B))))) { 2307 I.setOperand(0, A); 2308 I.setOperand(1, B); 2309 return &I; 2310 } 2311 2312 // (A & ~B) ^ (~A & B) -> A ^ B 2313 // (~B & A) ^ (~A & B) -> A ^ B 2314 // (~A & B) ^ (A & ~B) -> A ^ B 2315 // (B & ~A) ^ (A & ~B) -> A ^ B 2316 if (match(&I, m_Xor(m_c_And(m_Value(A), m_Not(m_Value(B))), 2317 m_c_And(m_Not(m_Deferred(A)), m_Deferred(B))))) { 2318 I.setOperand(0, A); 2319 I.setOperand(1, B); 2320 return &I; 2321 } 2322 2323 // For the remaining cases we need to get rid of one of the operands. 2324 if (!Op0->hasOneUse() && !Op1->hasOneUse()) 2325 return nullptr; 2326 2327 // (A | B) ^ ~(A & B) -> ~(A ^ B) 2328 // (A | B) ^ ~(B & A) -> ~(A ^ B) 2329 // (A & B) ^ ~(A | B) -> ~(A ^ B) 2330 // (A & B) ^ ~(B | A) -> ~(A ^ B) 2331 // Complexity sorting ensures the not will be on the right side. 2332 if ((match(Op0, m_Or(m_Value(A), m_Value(B))) && 2333 match(Op1, m_Not(m_c_And(m_Specific(A), m_Specific(B))))) || 2334 (match(Op0, m_And(m_Value(A), m_Value(B))) && 2335 match(Op1, m_Not(m_c_Or(m_Specific(A), m_Specific(B)))))) 2336 return BinaryOperator::CreateNot(Builder.CreateXor(A, B)); 2337 2338 return nullptr; 2339 } 2340 2341 Value *InstCombiner::foldXorOfICmps(ICmpInst *LHS, ICmpInst *RHS) { 2342 if (PredicatesFoldable(LHS->getPredicate(), RHS->getPredicate())) { 2343 if (LHS->getOperand(0) == RHS->getOperand(1) && 2344 LHS->getOperand(1) == RHS->getOperand(0)) 2345 LHS->swapOperands(); 2346 if (LHS->getOperand(0) == RHS->getOperand(0) && 2347 LHS->getOperand(1) == RHS->getOperand(1)) { 2348 // (icmp1 A, B) ^ (icmp2 A, B) --> (icmp3 A, B) 2349 Value *Op0 = LHS->getOperand(0), *Op1 = LHS->getOperand(1); 2350 unsigned Code = getICmpCode(LHS) ^ getICmpCode(RHS); 2351 bool isSigned = LHS->isSigned() || RHS->isSigned(); 2352 return getNewICmpValue(isSigned, Code, Op0, Op1, Builder); 2353 } 2354 } 2355 2356 // TODO: This can be generalized to compares of non-signbits using 2357 // decomposeBitTestICmp(). It could be enhanced more by using (something like) 2358 // foldLogOpOfMaskedICmps(). 2359 ICmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate(); 2360 Value *LHS0 = LHS->getOperand(0), *LHS1 = LHS->getOperand(1); 2361 Value *RHS0 = RHS->getOperand(0), *RHS1 = RHS->getOperand(1); 2362 if ((LHS->hasOneUse() || RHS->hasOneUse()) && 2363 LHS0->getType() == RHS0->getType()) { 2364 // (X > -1) ^ (Y > -1) --> (X ^ Y) < 0 2365 // (X < 0) ^ (Y < 0) --> (X ^ Y) < 0 2366 if ((PredL == CmpInst::ICMP_SGT && match(LHS1, m_AllOnes()) && 2367 PredR == CmpInst::ICMP_SGT && match(RHS1, m_AllOnes())) || 2368 (PredL == CmpInst::ICMP_SLT && match(LHS1, m_Zero()) && 2369 PredR == CmpInst::ICMP_SLT && match(RHS1, m_Zero()))) { 2370 Value *Zero = ConstantInt::getNullValue(LHS0->getType()); 2371 return Builder.CreateICmpSLT(Builder.CreateXor(LHS0, RHS0), Zero); 2372 } 2373 // (X > -1) ^ (Y < 0) --> (X ^ Y) > -1 2374 // (X < 0) ^ (Y > -1) --> (X ^ Y) > -1 2375 if ((PredL == CmpInst::ICMP_SGT && match(LHS1, m_AllOnes()) && 2376 PredR == CmpInst::ICMP_SLT && match(RHS1, m_Zero())) || 2377 (PredL == CmpInst::ICMP_SLT && match(LHS1, m_Zero()) && 2378 PredR == CmpInst::ICMP_SGT && match(RHS1, m_AllOnes()))) { 2379 Value *MinusOne = ConstantInt::getAllOnesValue(LHS0->getType()); 2380 return Builder.CreateICmpSGT(Builder.CreateXor(LHS0, RHS0), MinusOne); 2381 } 2382 } 2383 2384 // Instead of trying to imitate the folds for and/or, decompose this 'xor' 2385 // into those logic ops. That is, try to turn this into an and-of-icmps 2386 // because we have many folds for that pattern. 2387 // 2388 // This is based on a truth table definition of xor: 2389 // X ^ Y --> (X | Y) & !(X & Y) 2390 if (Value *OrICmp = SimplifyBinOp(Instruction::Or, LHS, RHS, SQ)) { 2391 // TODO: If OrICmp is true, then the definition of xor simplifies to !(X&Y). 2392 // TODO: If OrICmp is false, the whole thing is false (InstSimplify?). 2393 if (Value *AndICmp = SimplifyBinOp(Instruction::And, LHS, RHS, SQ)) { 2394 // TODO: Independently handle cases where the 'and' side is a constant. 2395 if (OrICmp == LHS && AndICmp == RHS && RHS->hasOneUse()) { 2396 // (LHS | RHS) & !(LHS & RHS) --> LHS & !RHS 2397 RHS->setPredicate(RHS->getInversePredicate()); 2398 return Builder.CreateAnd(LHS, RHS); 2399 } 2400 if (OrICmp == RHS && AndICmp == LHS && LHS->hasOneUse()) { 2401 // !(LHS & RHS) & (LHS | RHS) --> !LHS & RHS 2402 LHS->setPredicate(LHS->getInversePredicate()); 2403 return Builder.CreateAnd(LHS, RHS); 2404 } 2405 } 2406 } 2407 2408 return nullptr; 2409 } 2410 2411 /// If we have a masked merge, in the canonical form of: 2412 /// (assuming that A only has one use.) 2413 /// | A | |B| 2414 /// ((x ^ y) & M) ^ y 2415 /// | D | 2416 /// * If M is inverted: 2417 /// | D | 2418 /// ((x ^ y) & ~M) ^ y 2419 /// We can canonicalize by swapping the final xor operand 2420 /// to eliminate the 'not' of the mask. 2421 /// ((x ^ y) & M) ^ x 2422 /// * If M is a constant, and D has one use, we transform to 'and' / 'or' ops 2423 /// because that shortens the dependency chain and improves analysis: 2424 /// (x & M) | (y & ~M) 2425 static Instruction *visitMaskedMerge(BinaryOperator &I, 2426 InstCombiner::BuilderTy &Builder) { 2427 Value *B, *X, *D; 2428 Value *M; 2429 if (!match(&I, m_c_Xor(m_Value(B), 2430 m_OneUse(m_c_And( 2431 m_CombineAnd(m_c_Xor(m_Deferred(B), m_Value(X)), 2432 m_Value(D)), 2433 m_Value(M)))))) 2434 return nullptr; 2435 2436 Value *NotM; 2437 if (match(M, m_Not(m_Value(NotM)))) { 2438 // De-invert the mask and swap the value in B part. 2439 Value *NewA = Builder.CreateAnd(D, NotM); 2440 return BinaryOperator::CreateXor(NewA, X); 2441 } 2442 2443 Constant *C; 2444 if (D->hasOneUse() && match(M, m_Constant(C))) { 2445 // Unfold. 2446 Value *LHS = Builder.CreateAnd(X, C); 2447 Value *NotC = Builder.CreateNot(C); 2448 Value *RHS = Builder.CreateAnd(B, NotC); 2449 return BinaryOperator::CreateOr(LHS, RHS); 2450 } 2451 2452 return nullptr; 2453 } 2454 2455 // FIXME: We use commutative matchers (m_c_*) for some, but not all, matches 2456 // here. We should standardize that construct where it is needed or choose some 2457 // other way to ensure that commutated variants of patterns are not missed. 2458 Instruction *InstCombiner::visitXor(BinaryOperator &I) { 2459 if (Value *V = SimplifyXorInst(I.getOperand(0), I.getOperand(1), 2460 SQ.getWithInstruction(&I))) 2461 return replaceInstUsesWith(I, V); 2462 2463 if (SimplifyAssociativeOrCommutative(I)) 2464 return &I; 2465 2466 if (Instruction *X = foldShuffledBinop(I)) 2467 return X; 2468 2469 if (Instruction *NewXor = foldXorToXor(I, Builder)) 2470 return NewXor; 2471 2472 // (A&B)^(A&C) -> A&(B^C) etc 2473 if (Value *V = SimplifyUsingDistributiveLaws(I)) 2474 return replaceInstUsesWith(I, V); 2475 2476 // See if we can simplify any instructions used by the instruction whose sole 2477 // purpose is to compute bits we don't care about. 2478 if (SimplifyDemandedInstructionBits(I)) 2479 return &I; 2480 2481 if (Value *V = SimplifyBSwap(I, Builder)) 2482 return replaceInstUsesWith(I, V); 2483 2484 // A^B --> A|B iff A and B have no bits set in common. 2485 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 2486 if (haveNoCommonBitsSet(Op0, Op1, DL, &AC, &I, &DT)) 2487 return BinaryOperator::CreateOr(Op0, Op1); 2488 2489 // Apply DeMorgan's Law for 'nand' / 'nor' logic with an inverted operand. 2490 Value *X, *Y; 2491 2492 // We must eliminate the and/or (one-use) for these transforms to not increase 2493 // the instruction count. 2494 // ~(~X & Y) --> (X | ~Y) 2495 // ~(Y & ~X) --> (X | ~Y) 2496 if (match(&I, m_Not(m_OneUse(m_c_And(m_Not(m_Value(X)), m_Value(Y)))))) { 2497 Value *NotY = Builder.CreateNot(Y, Y->getName() + ".not"); 2498 return BinaryOperator::CreateOr(X, NotY); 2499 } 2500 // ~(~X | Y) --> (X & ~Y) 2501 // ~(Y | ~X) --> (X & ~Y) 2502 if (match(&I, m_Not(m_OneUse(m_c_Or(m_Not(m_Value(X)), m_Value(Y)))))) { 2503 Value *NotY = Builder.CreateNot(Y, Y->getName() + ".not"); 2504 return BinaryOperator::CreateAnd(X, NotY); 2505 } 2506 2507 if (Instruction *Xor = visitMaskedMerge(I, Builder)) 2508 return Xor; 2509 2510 // Is this a 'not' (~) fed by a binary operator? 2511 BinaryOperator *NotVal; 2512 if (match(&I, m_Not(m_BinOp(NotVal)))) { 2513 if (NotVal->getOpcode() == Instruction::And || 2514 NotVal->getOpcode() == Instruction::Or) { 2515 // Apply DeMorgan's Law when inverts are free: 2516 // ~(X & Y) --> (~X | ~Y) 2517 // ~(X | Y) --> (~X & ~Y) 2518 if (IsFreeToInvert(NotVal->getOperand(0), 2519 NotVal->getOperand(0)->hasOneUse()) && 2520 IsFreeToInvert(NotVal->getOperand(1), 2521 NotVal->getOperand(1)->hasOneUse())) { 2522 Value *NotX = Builder.CreateNot(NotVal->getOperand(0), "notlhs"); 2523 Value *NotY = Builder.CreateNot(NotVal->getOperand(1), "notrhs"); 2524 if (NotVal->getOpcode() == Instruction::And) 2525 return BinaryOperator::CreateOr(NotX, NotY); 2526 return BinaryOperator::CreateAnd(NotX, NotY); 2527 } 2528 } 2529 2530 // ~(X - Y) --> ~X + Y 2531 if (match(NotVal, m_OneUse(m_Sub(m_Value(X), m_Value(Y))))) 2532 return BinaryOperator::CreateAdd(Builder.CreateNot(X), Y); 2533 2534 // ~(~X >>s Y) --> (X >>s Y) 2535 if (match(NotVal, m_AShr(m_Not(m_Value(X)), m_Value(Y)))) 2536 return BinaryOperator::CreateAShr(X, Y); 2537 2538 // If we are inverting a right-shifted constant, we may be able to eliminate 2539 // the 'not' by inverting the constant and using the opposite shift type. 2540 // Canonicalization rules ensure that only a negative constant uses 'ashr', 2541 // but we must check that in case that transform has not fired yet. 2542 Constant *C; 2543 if (match(NotVal, m_AShr(m_Constant(C), m_Value(Y))) && 2544 match(C, m_Negative())) { 2545 // ~(C >>s Y) --> ~C >>u Y (when inverting the replicated sign bits) 2546 Constant *NotC = ConstantExpr::getNot(C); 2547 return BinaryOperator::CreateLShr(NotC, Y); 2548 } 2549 2550 if (match(NotVal, m_LShr(m_Constant(C), m_Value(Y))) && 2551 match(C, m_NonNegative())) { 2552 // ~(C >>u Y) --> ~C >>s Y (when inverting the replicated sign bits) 2553 Constant *NotC = ConstantExpr::getNot(C); 2554 return BinaryOperator::CreateAShr(NotC, Y); 2555 } 2556 } 2557 2558 // not (cmp A, B) = !cmp A, B 2559 CmpInst::Predicate Pred; 2560 if (match(&I, m_Not(m_OneUse(m_Cmp(Pred, m_Value(), m_Value()))))) { 2561 cast<CmpInst>(Op0)->setPredicate(CmpInst::getInversePredicate(Pred)); 2562 return replaceInstUsesWith(I, Op0); 2563 } 2564 2565 { 2566 const APInt *RHSC; 2567 if (match(Op1, m_APInt(RHSC))) { 2568 Value *X; 2569 const APInt *C; 2570 if (match(Op0, m_Sub(m_APInt(C), m_Value(X)))) { 2571 // ~(c-X) == X-c-1 == X+(-c-1) 2572 if (RHSC->isAllOnesValue()) { 2573 Constant *NewC = ConstantInt::get(I.getType(), -(*C) - 1); 2574 return BinaryOperator::CreateAdd(X, NewC); 2575 } 2576 if (RHSC->isSignMask()) { 2577 // (C - X) ^ signmask -> (C + signmask - X) 2578 Constant *NewC = ConstantInt::get(I.getType(), *C + *RHSC); 2579 return BinaryOperator::CreateSub(NewC, X); 2580 } 2581 } else if (match(Op0, m_Add(m_Value(X), m_APInt(C)))) { 2582 // ~(X-c) --> (-c-1)-X 2583 if (RHSC->isAllOnesValue()) { 2584 Constant *NewC = ConstantInt::get(I.getType(), -(*C) - 1); 2585 return BinaryOperator::CreateSub(NewC, X); 2586 } 2587 if (RHSC->isSignMask()) { 2588 // (X + C) ^ signmask -> (X + C + signmask) 2589 Constant *NewC = ConstantInt::get(I.getType(), *C + *RHSC); 2590 return BinaryOperator::CreateAdd(X, NewC); 2591 } 2592 } 2593 2594 // (X|C1)^C2 -> X^(C1^C2) iff X&~C1 == 0 2595 if (match(Op0, m_Or(m_Value(X), m_APInt(C))) && 2596 MaskedValueIsZero(X, *C, 0, &I)) { 2597 Constant *NewC = ConstantInt::get(I.getType(), *C ^ *RHSC); 2598 Worklist.Add(cast<Instruction>(Op0)); 2599 I.setOperand(0, X); 2600 I.setOperand(1, NewC); 2601 return &I; 2602 } 2603 } 2604 } 2605 2606 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(Op1)) { 2607 if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0)) { 2608 if (ConstantInt *Op0CI = dyn_cast<ConstantInt>(Op0I->getOperand(1))) { 2609 if (Op0I->getOpcode() == Instruction::LShr) { 2610 // ((X^C1) >> C2) ^ C3 -> (X>>C2) ^ ((C1>>C2)^C3) 2611 // E1 = "X ^ C1" 2612 BinaryOperator *E1; 2613 ConstantInt *C1; 2614 if (Op0I->hasOneUse() && 2615 (E1 = dyn_cast<BinaryOperator>(Op0I->getOperand(0))) && 2616 E1->getOpcode() == Instruction::Xor && 2617 (C1 = dyn_cast<ConstantInt>(E1->getOperand(1)))) { 2618 // fold (C1 >> C2) ^ C3 2619 ConstantInt *C2 = Op0CI, *C3 = RHSC; 2620 APInt FoldConst = C1->getValue().lshr(C2->getValue()); 2621 FoldConst ^= C3->getValue(); 2622 // Prepare the two operands. 2623 Value *Opnd0 = Builder.CreateLShr(E1->getOperand(0), C2); 2624 Opnd0->takeName(Op0I); 2625 cast<Instruction>(Opnd0)->setDebugLoc(I.getDebugLoc()); 2626 Value *FoldVal = ConstantInt::get(Opnd0->getType(), FoldConst); 2627 2628 return BinaryOperator::CreateXor(Opnd0, FoldVal); 2629 } 2630 } 2631 } 2632 } 2633 } 2634 2635 if (Instruction *FoldedLogic = foldBinOpIntoSelectOrPhi(I)) 2636 return FoldedLogic; 2637 2638 { 2639 Value *A, *B; 2640 if (match(Op1, m_OneUse(m_Or(m_Value(A), m_Value(B))))) { 2641 if (A == Op0) { // A^(A|B) == A^(B|A) 2642 cast<BinaryOperator>(Op1)->swapOperands(); 2643 std::swap(A, B); 2644 } 2645 if (B == Op0) { // A^(B|A) == (B|A)^A 2646 I.swapOperands(); // Simplified below. 2647 std::swap(Op0, Op1); 2648 } 2649 } else if (match(Op1, m_OneUse(m_And(m_Value(A), m_Value(B))))) { 2650 if (A == Op0) { // A^(A&B) -> A^(B&A) 2651 cast<BinaryOperator>(Op1)->swapOperands(); 2652 std::swap(A, B); 2653 } 2654 if (B == Op0) { // A^(B&A) -> (B&A)^A 2655 I.swapOperands(); // Simplified below. 2656 std::swap(Op0, Op1); 2657 } 2658 } 2659 } 2660 2661 { 2662 Value *A, *B; 2663 if (match(Op0, m_OneUse(m_Or(m_Value(A), m_Value(B))))) { 2664 if (A == Op1) // (B|A)^B == (A|B)^B 2665 std::swap(A, B); 2666 if (B == Op1) // (A|B)^B == A & ~B 2667 return BinaryOperator::CreateAnd(A, Builder.CreateNot(Op1)); 2668 } else if (match(Op0, m_OneUse(m_And(m_Value(A), m_Value(B))))) { 2669 if (A == Op1) // (A&B)^A -> (B&A)^A 2670 std::swap(A, B); 2671 const APInt *C; 2672 if (B == Op1 && // (B&A)^A == ~B & A 2673 !match(Op1, m_APInt(C))) { // Canonical form is (B&C)^C 2674 return BinaryOperator::CreateAnd(Builder.CreateNot(A), Op1); 2675 } 2676 } 2677 } 2678 2679 { 2680 Value *A, *B, *C, *D; 2681 // (A ^ C)^(A | B) -> ((~A) & B) ^ C 2682 if (match(Op0, m_Xor(m_Value(D), m_Value(C))) && 2683 match(Op1, m_Or(m_Value(A), m_Value(B)))) { 2684 if (D == A) 2685 return BinaryOperator::CreateXor( 2686 Builder.CreateAnd(Builder.CreateNot(A), B), C); 2687 if (D == B) 2688 return BinaryOperator::CreateXor( 2689 Builder.CreateAnd(Builder.CreateNot(B), A), C); 2690 } 2691 // (A | B)^(A ^ C) -> ((~A) & B) ^ C 2692 if (match(Op0, m_Or(m_Value(A), m_Value(B))) && 2693 match(Op1, m_Xor(m_Value(D), m_Value(C)))) { 2694 if (D == A) 2695 return BinaryOperator::CreateXor( 2696 Builder.CreateAnd(Builder.CreateNot(A), B), C); 2697 if (D == B) 2698 return BinaryOperator::CreateXor( 2699 Builder.CreateAnd(Builder.CreateNot(B), A), C); 2700 } 2701 // (A & B) ^ (A ^ B) -> (A | B) 2702 if (match(Op0, m_And(m_Value(A), m_Value(B))) && 2703 match(Op1, m_c_Xor(m_Specific(A), m_Specific(B)))) 2704 return BinaryOperator::CreateOr(A, B); 2705 // (A ^ B) ^ (A & B) -> (A | B) 2706 if (match(Op0, m_Xor(m_Value(A), m_Value(B))) && 2707 match(Op1, m_c_And(m_Specific(A), m_Specific(B)))) 2708 return BinaryOperator::CreateOr(A, B); 2709 } 2710 2711 // (A & ~B) ^ ~A -> ~(A & B) 2712 // (~B & A) ^ ~A -> ~(A & B) 2713 Value *A, *B; 2714 if (match(Op0, m_c_And(m_Value(A), m_Not(m_Value(B)))) && 2715 match(Op1, m_Not(m_Specific(A)))) 2716 return BinaryOperator::CreateNot(Builder.CreateAnd(A, B)); 2717 2718 if (auto *LHS = dyn_cast<ICmpInst>(I.getOperand(0))) 2719 if (auto *RHS = dyn_cast<ICmpInst>(I.getOperand(1))) 2720 if (Value *V = foldXorOfICmps(LHS, RHS)) 2721 return replaceInstUsesWith(I, V); 2722 2723 if (Instruction *CastedXor = foldCastedBitwiseLogic(I)) 2724 return CastedXor; 2725 2726 // Canonicalize a shifty way to code absolute value to the common pattern. 2727 // There are 4 potential commuted variants. Move the 'ashr' candidate to Op1. 2728 // We're relying on the fact that we only do this transform when the shift has 2729 // exactly 2 uses and the add has exactly 1 use (otherwise, we might increase 2730 // instructions). 2731 if (Op0->hasNUses(2)) 2732 std::swap(Op0, Op1); 2733 2734 const APInt *ShAmt; 2735 Type *Ty = I.getType(); 2736 if (match(Op1, m_AShr(m_Value(A), m_APInt(ShAmt))) && 2737 Op1->hasNUses(2) && *ShAmt == Ty->getScalarSizeInBits() - 1 && 2738 match(Op0, m_OneUse(m_c_Add(m_Specific(A), m_Specific(Op1))))) { 2739 // B = ashr i32 A, 31 ; smear the sign bit 2740 // xor (add A, B), B ; add -1 and flip bits if negative 2741 // --> (A < 0) ? -A : A 2742 Value *Cmp = Builder.CreateICmpSLT(A, ConstantInt::getNullValue(Ty)); 2743 // Copy the nuw/nsw flags from the add to the negate. 2744 auto *Add = cast<BinaryOperator>(Op0); 2745 Value *Neg = Builder.CreateNeg(A, "", Add->hasNoUnsignedWrap(), 2746 Add->hasNoSignedWrap()); 2747 return SelectInst::Create(Cmp, Neg, A); 2748 } 2749 2750 // Eliminate a bitwise 'not' op of 'not' min/max by inverting the min/max: 2751 // 2752 // %notx = xor i32 %x, -1 2753 // %cmp1 = icmp sgt i32 %notx, %y 2754 // %smax = select i1 %cmp1, i32 %notx, i32 %y 2755 // %res = xor i32 %smax, -1 2756 // => 2757 // %noty = xor i32 %y, -1 2758 // %cmp2 = icmp slt %x, %noty 2759 // %res = select i1 %cmp2, i32 %x, i32 %noty 2760 // 2761 // Same is applicable for smin/umax/umin. 2762 { 2763 Value *LHS, *RHS; 2764 SelectPatternFlavor SPF = matchSelectPattern(Op0, LHS, RHS).Flavor; 2765 if (Op0->hasOneUse() && SelectPatternResult::isMinOrMax(SPF) && 2766 match(Op1, m_AllOnes())) { 2767 2768 Value *X; 2769 if (match(RHS, m_Not(m_Value(X)))) 2770 std::swap(RHS, LHS); 2771 2772 if (match(LHS, m_Not(m_Value(X)))) { 2773 Value *NotY = Builder.CreateNot(RHS); 2774 return SelectInst::Create( 2775 Builder.CreateICmp(getInverseMinMaxPred(SPF), X, NotY), X, NotY); 2776 } 2777 } 2778 } 2779 2780 return nullptr; 2781 } 2782