1 //===- InstCombineSimplifyDemanded.cpp ------------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains logic for simplifying instructions based on information 11 // about how they are used. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "InstCombineInternal.h" 16 #include "llvm/Analysis/ValueTracking.h" 17 #include "llvm/IR/IntrinsicInst.h" 18 #include "llvm/IR/PatternMatch.h" 19 #include "llvm/Support/KnownBits.h" 20 21 using namespace llvm; 22 using namespace llvm::PatternMatch; 23 24 #define DEBUG_TYPE "instcombine" 25 26 namespace { 27 28 struct AMDGPUImageDMaskIntrinsic { 29 unsigned Intr; 30 }; 31 32 #define GET_AMDGPUImageDMaskIntrinsicTable_IMPL 33 #include "InstCombineTables.inc" 34 35 } // end anonymous namespace 36 37 /// Check to see if the specified operand of the specified instruction is a 38 /// constant integer. If so, check to see if there are any bits set in the 39 /// constant that are not demanded. If so, shrink the constant and return true. 40 static bool ShrinkDemandedConstant(Instruction *I, unsigned OpNo, 41 const APInt &Demanded) { 42 assert(I && "No instruction?"); 43 assert(OpNo < I->getNumOperands() && "Operand index too large"); 44 45 // The operand must be a constant integer or splat integer. 46 Value *Op = I->getOperand(OpNo); 47 const APInt *C; 48 if (!match(Op, m_APInt(C))) 49 return false; 50 51 // If there are no bits set that aren't demanded, nothing to do. 52 if (C->isSubsetOf(Demanded)) 53 return false; 54 55 // This instruction is producing bits that are not demanded. Shrink the RHS. 56 I->setOperand(OpNo, ConstantInt::get(Op->getType(), *C & Demanded)); 57 58 return true; 59 } 60 61 62 63 /// Inst is an integer instruction that SimplifyDemandedBits knows about. See if 64 /// the instruction has any properties that allow us to simplify its operands. 65 bool InstCombiner::SimplifyDemandedInstructionBits(Instruction &Inst) { 66 unsigned BitWidth = Inst.getType()->getScalarSizeInBits(); 67 KnownBits Known(BitWidth); 68 APInt DemandedMask(APInt::getAllOnesValue(BitWidth)); 69 70 Value *V = SimplifyDemandedUseBits(&Inst, DemandedMask, Known, 71 0, &Inst); 72 if (!V) return false; 73 if (V == &Inst) return true; 74 replaceInstUsesWith(Inst, V); 75 return true; 76 } 77 78 /// This form of SimplifyDemandedBits simplifies the specified instruction 79 /// operand if possible, updating it in place. It returns true if it made any 80 /// change and false otherwise. 81 bool InstCombiner::SimplifyDemandedBits(Instruction *I, unsigned OpNo, 82 const APInt &DemandedMask, 83 KnownBits &Known, 84 unsigned Depth) { 85 Use &U = I->getOperandUse(OpNo); 86 Value *NewVal = SimplifyDemandedUseBits(U.get(), DemandedMask, Known, 87 Depth, I); 88 if (!NewVal) return false; 89 U = NewVal; 90 return true; 91 } 92 93 94 /// This function attempts to replace V with a simpler value based on the 95 /// demanded bits. When this function is called, it is known that only the bits 96 /// set in DemandedMask of the result of V are ever used downstream. 97 /// Consequently, depending on the mask and V, it may be possible to replace V 98 /// with a constant or one of its operands. In such cases, this function does 99 /// the replacement and returns true. In all other cases, it returns false after 100 /// analyzing the expression and setting KnownOne and known to be one in the 101 /// expression. Known.Zero contains all the bits that are known to be zero in 102 /// the expression. These are provided to potentially allow the caller (which 103 /// might recursively be SimplifyDemandedBits itself) to simplify the 104 /// expression. 105 /// Known.One and Known.Zero always follow the invariant that: 106 /// Known.One & Known.Zero == 0. 107 /// That is, a bit can't be both 1 and 0. Note that the bits in Known.One and 108 /// Known.Zero may only be accurate for those bits set in DemandedMask. Note 109 /// also that the bitwidth of V, DemandedMask, Known.Zero and Known.One must all 110 /// be the same. 111 /// 112 /// This returns null if it did not change anything and it permits no 113 /// simplification. This returns V itself if it did some simplification of V's 114 /// operands based on the information about what bits are demanded. This returns 115 /// some other non-null value if it found out that V is equal to another value 116 /// in the context where the specified bits are demanded, but not for all users. 117 Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask, 118 KnownBits &Known, unsigned Depth, 119 Instruction *CxtI) { 120 assert(V != nullptr && "Null pointer of Value???"); 121 assert(Depth <= 6 && "Limit Search Depth"); 122 uint32_t BitWidth = DemandedMask.getBitWidth(); 123 Type *VTy = V->getType(); 124 assert( 125 (!VTy->isIntOrIntVectorTy() || VTy->getScalarSizeInBits() == BitWidth) && 126 Known.getBitWidth() == BitWidth && 127 "Value *V, DemandedMask and Known must have same BitWidth"); 128 129 if (isa<Constant>(V)) { 130 computeKnownBits(V, Known, Depth, CxtI); 131 return nullptr; 132 } 133 134 Known.resetAll(); 135 if (DemandedMask.isNullValue()) // Not demanding any bits from V. 136 return UndefValue::get(VTy); 137 138 if (Depth == 6) // Limit search depth. 139 return nullptr; 140 141 Instruction *I = dyn_cast<Instruction>(V); 142 if (!I) { 143 computeKnownBits(V, Known, Depth, CxtI); 144 return nullptr; // Only analyze instructions. 145 } 146 147 // If there are multiple uses of this value and we aren't at the root, then 148 // we can't do any simplifications of the operands, because DemandedMask 149 // only reflects the bits demanded by *one* of the users. 150 if (Depth != 0 && !I->hasOneUse()) 151 return SimplifyMultipleUseDemandedBits(I, DemandedMask, Known, Depth, CxtI); 152 153 KnownBits LHSKnown(BitWidth), RHSKnown(BitWidth); 154 155 // If this is the root being simplified, allow it to have multiple uses, 156 // just set the DemandedMask to all bits so that we can try to simplify the 157 // operands. This allows visitTruncInst (for example) to simplify the 158 // operand of a trunc without duplicating all the logic below. 159 if (Depth == 0 && !V->hasOneUse()) 160 DemandedMask.setAllBits(); 161 162 switch (I->getOpcode()) { 163 default: 164 computeKnownBits(I, Known, Depth, CxtI); 165 break; 166 case Instruction::And: { 167 // If either the LHS or the RHS are Zero, the result is zero. 168 if (SimplifyDemandedBits(I, 1, DemandedMask, RHSKnown, Depth + 1) || 169 SimplifyDemandedBits(I, 0, DemandedMask & ~RHSKnown.Zero, LHSKnown, 170 Depth + 1)) 171 return I; 172 assert(!RHSKnown.hasConflict() && "Bits known to be one AND zero?"); 173 assert(!LHSKnown.hasConflict() && "Bits known to be one AND zero?"); 174 175 // Output known-0 are known to be clear if zero in either the LHS | RHS. 176 APInt IKnownZero = RHSKnown.Zero | LHSKnown.Zero; 177 // Output known-1 bits are only known if set in both the LHS & RHS. 178 APInt IKnownOne = RHSKnown.One & LHSKnown.One; 179 180 // If the client is only demanding bits that we know, return the known 181 // constant. 182 if (DemandedMask.isSubsetOf(IKnownZero|IKnownOne)) 183 return Constant::getIntegerValue(VTy, IKnownOne); 184 185 // If all of the demanded bits are known 1 on one side, return the other. 186 // These bits cannot contribute to the result of the 'and'. 187 if (DemandedMask.isSubsetOf(LHSKnown.Zero | RHSKnown.One)) 188 return I->getOperand(0); 189 if (DemandedMask.isSubsetOf(RHSKnown.Zero | LHSKnown.One)) 190 return I->getOperand(1); 191 192 // If the RHS is a constant, see if we can simplify it. 193 if (ShrinkDemandedConstant(I, 1, DemandedMask & ~LHSKnown.Zero)) 194 return I; 195 196 Known.Zero = std::move(IKnownZero); 197 Known.One = std::move(IKnownOne); 198 break; 199 } 200 case Instruction::Or: { 201 // If either the LHS or the RHS are One, the result is One. 202 if (SimplifyDemandedBits(I, 1, DemandedMask, RHSKnown, Depth + 1) || 203 SimplifyDemandedBits(I, 0, DemandedMask & ~RHSKnown.One, LHSKnown, 204 Depth + 1)) 205 return I; 206 assert(!RHSKnown.hasConflict() && "Bits known to be one AND zero?"); 207 assert(!LHSKnown.hasConflict() && "Bits known to be one AND zero?"); 208 209 // Output known-0 bits are only known if clear in both the LHS & RHS. 210 APInt IKnownZero = RHSKnown.Zero & LHSKnown.Zero; 211 // Output known-1 are known. to be set if s.et in either the LHS | RHS. 212 APInt IKnownOne = RHSKnown.One | LHSKnown.One; 213 214 // If the client is only demanding bits that we know, return the known 215 // constant. 216 if (DemandedMask.isSubsetOf(IKnownZero|IKnownOne)) 217 return Constant::getIntegerValue(VTy, IKnownOne); 218 219 // If all of the demanded bits are known zero on one side, return the other. 220 // These bits cannot contribute to the result of the 'or'. 221 if (DemandedMask.isSubsetOf(LHSKnown.One | RHSKnown.Zero)) 222 return I->getOperand(0); 223 if (DemandedMask.isSubsetOf(RHSKnown.One | LHSKnown.Zero)) 224 return I->getOperand(1); 225 226 // If the RHS is a constant, see if we can simplify it. 227 if (ShrinkDemandedConstant(I, 1, DemandedMask)) 228 return I; 229 230 Known.Zero = std::move(IKnownZero); 231 Known.One = std::move(IKnownOne); 232 break; 233 } 234 case Instruction::Xor: { 235 if (SimplifyDemandedBits(I, 1, DemandedMask, RHSKnown, Depth + 1) || 236 SimplifyDemandedBits(I, 0, DemandedMask, LHSKnown, Depth + 1)) 237 return I; 238 assert(!RHSKnown.hasConflict() && "Bits known to be one AND zero?"); 239 assert(!LHSKnown.hasConflict() && "Bits known to be one AND zero?"); 240 241 // Output known-0 bits are known if clear or set in both the LHS & RHS. 242 APInt IKnownZero = (RHSKnown.Zero & LHSKnown.Zero) | 243 (RHSKnown.One & LHSKnown.One); 244 // Output known-1 are known to be set if set in only one of the LHS, RHS. 245 APInt IKnownOne = (RHSKnown.Zero & LHSKnown.One) | 246 (RHSKnown.One & LHSKnown.Zero); 247 248 // If the client is only demanding bits that we know, return the known 249 // constant. 250 if (DemandedMask.isSubsetOf(IKnownZero|IKnownOne)) 251 return Constant::getIntegerValue(VTy, IKnownOne); 252 253 // If all of the demanded bits are known zero on one side, return the other. 254 // These bits cannot contribute to the result of the 'xor'. 255 if (DemandedMask.isSubsetOf(RHSKnown.Zero)) 256 return I->getOperand(0); 257 if (DemandedMask.isSubsetOf(LHSKnown.Zero)) 258 return I->getOperand(1); 259 260 // If all of the demanded bits are known to be zero on one side or the 261 // other, turn this into an *inclusive* or. 262 // e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0 263 if (DemandedMask.isSubsetOf(RHSKnown.Zero | LHSKnown.Zero)) { 264 Instruction *Or = 265 BinaryOperator::CreateOr(I->getOperand(0), I->getOperand(1), 266 I->getName()); 267 return InsertNewInstWith(Or, *I); 268 } 269 270 // If all of the demanded bits on one side are known, and all of the set 271 // bits on that side are also known to be set on the other side, turn this 272 // into an AND, as we know the bits will be cleared. 273 // e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2 274 if (DemandedMask.isSubsetOf(RHSKnown.Zero|RHSKnown.One) && 275 RHSKnown.One.isSubsetOf(LHSKnown.One)) { 276 Constant *AndC = Constant::getIntegerValue(VTy, 277 ~RHSKnown.One & DemandedMask); 278 Instruction *And = BinaryOperator::CreateAnd(I->getOperand(0), AndC); 279 return InsertNewInstWith(And, *I); 280 } 281 282 // If the RHS is a constant, see if we can simplify it. 283 // FIXME: for XOR, we prefer to force bits to 1 if they will make a -1. 284 if (ShrinkDemandedConstant(I, 1, DemandedMask)) 285 return I; 286 287 // If our LHS is an 'and' and if it has one use, and if any of the bits we 288 // are flipping are known to be set, then the xor is just resetting those 289 // bits to zero. We can just knock out bits from the 'and' and the 'xor', 290 // simplifying both of them. 291 if (Instruction *LHSInst = dyn_cast<Instruction>(I->getOperand(0))) 292 if (LHSInst->getOpcode() == Instruction::And && LHSInst->hasOneUse() && 293 isa<ConstantInt>(I->getOperand(1)) && 294 isa<ConstantInt>(LHSInst->getOperand(1)) && 295 (LHSKnown.One & RHSKnown.One & DemandedMask) != 0) { 296 ConstantInt *AndRHS = cast<ConstantInt>(LHSInst->getOperand(1)); 297 ConstantInt *XorRHS = cast<ConstantInt>(I->getOperand(1)); 298 APInt NewMask = ~(LHSKnown.One & RHSKnown.One & DemandedMask); 299 300 Constant *AndC = 301 ConstantInt::get(I->getType(), NewMask & AndRHS->getValue()); 302 Instruction *NewAnd = BinaryOperator::CreateAnd(I->getOperand(0), AndC); 303 InsertNewInstWith(NewAnd, *I); 304 305 Constant *XorC = 306 ConstantInt::get(I->getType(), NewMask & XorRHS->getValue()); 307 Instruction *NewXor = BinaryOperator::CreateXor(NewAnd, XorC); 308 return InsertNewInstWith(NewXor, *I); 309 } 310 311 // Output known-0 bits are known if clear or set in both the LHS & RHS. 312 Known.Zero = std::move(IKnownZero); 313 // Output known-1 are known to be set if set in only one of the LHS, RHS. 314 Known.One = std::move(IKnownOne); 315 break; 316 } 317 case Instruction::Select: 318 // If this is a select as part of a min/max pattern, don't simplify any 319 // further in case we break the structure. 320 Value *LHS, *RHS; 321 if (matchSelectPattern(I, LHS, RHS).Flavor != SPF_UNKNOWN) 322 return nullptr; 323 324 if (SimplifyDemandedBits(I, 2, DemandedMask, RHSKnown, Depth + 1) || 325 SimplifyDemandedBits(I, 1, DemandedMask, LHSKnown, Depth + 1)) 326 return I; 327 assert(!RHSKnown.hasConflict() && "Bits known to be one AND zero?"); 328 assert(!LHSKnown.hasConflict() && "Bits known to be one AND zero?"); 329 330 // If the operands are constants, see if we can simplify them. 331 if (ShrinkDemandedConstant(I, 1, DemandedMask) || 332 ShrinkDemandedConstant(I, 2, DemandedMask)) 333 return I; 334 335 // Only known if known in both the LHS and RHS. 336 Known.One = RHSKnown.One & LHSKnown.One; 337 Known.Zero = RHSKnown.Zero & LHSKnown.Zero; 338 break; 339 case Instruction::ZExt: 340 case Instruction::Trunc: { 341 unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits(); 342 343 APInt InputDemandedMask = DemandedMask.zextOrTrunc(SrcBitWidth); 344 KnownBits InputKnown(SrcBitWidth); 345 if (SimplifyDemandedBits(I, 0, InputDemandedMask, InputKnown, Depth + 1)) 346 return I; 347 Known = InputKnown.zextOrTrunc(BitWidth); 348 // Any top bits are known to be zero. 349 if (BitWidth > SrcBitWidth) 350 Known.Zero.setBitsFrom(SrcBitWidth); 351 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 352 break; 353 } 354 case Instruction::BitCast: 355 if (!I->getOperand(0)->getType()->isIntOrIntVectorTy()) 356 return nullptr; // vector->int or fp->int? 357 358 if (VectorType *DstVTy = dyn_cast<VectorType>(I->getType())) { 359 if (VectorType *SrcVTy = 360 dyn_cast<VectorType>(I->getOperand(0)->getType())) { 361 if (DstVTy->getNumElements() != SrcVTy->getNumElements()) 362 // Don't touch a bitcast between vectors of different element counts. 363 return nullptr; 364 } else 365 // Don't touch a scalar-to-vector bitcast. 366 return nullptr; 367 } else if (I->getOperand(0)->getType()->isVectorTy()) 368 // Don't touch a vector-to-scalar bitcast. 369 return nullptr; 370 371 if (SimplifyDemandedBits(I, 0, DemandedMask, Known, Depth + 1)) 372 return I; 373 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 374 break; 375 case Instruction::SExt: { 376 // Compute the bits in the result that are not present in the input. 377 unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits(); 378 379 APInt InputDemandedBits = DemandedMask.trunc(SrcBitWidth); 380 381 // If any of the sign extended bits are demanded, we know that the sign 382 // bit is demanded. 383 if (DemandedMask.getActiveBits() > SrcBitWidth) 384 InputDemandedBits.setBit(SrcBitWidth-1); 385 386 KnownBits InputKnown(SrcBitWidth); 387 if (SimplifyDemandedBits(I, 0, InputDemandedBits, InputKnown, Depth + 1)) 388 return I; 389 390 // If the input sign bit is known zero, or if the NewBits are not demanded 391 // convert this into a zero extension. 392 if (InputKnown.isNonNegative() || 393 DemandedMask.getActiveBits() <= SrcBitWidth) { 394 // Convert to ZExt cast. 395 CastInst *NewCast = new ZExtInst(I->getOperand(0), VTy, I->getName()); 396 return InsertNewInstWith(NewCast, *I); 397 } 398 399 // If the sign bit of the input is known set or clear, then we know the 400 // top bits of the result. 401 Known = InputKnown.sext(BitWidth); 402 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 403 break; 404 } 405 case Instruction::Add: 406 case Instruction::Sub: { 407 /// If the high-bits of an ADD/SUB are not demanded, then we do not care 408 /// about the high bits of the operands. 409 unsigned NLZ = DemandedMask.countLeadingZeros(); 410 // Right fill the mask of bits for this ADD/SUB to demand the most 411 // significant bit and all those below it. 412 APInt DemandedFromOps(APInt::getLowBitsSet(BitWidth, BitWidth-NLZ)); 413 if (ShrinkDemandedConstant(I, 0, DemandedFromOps) || 414 SimplifyDemandedBits(I, 0, DemandedFromOps, LHSKnown, Depth + 1) || 415 ShrinkDemandedConstant(I, 1, DemandedFromOps) || 416 SimplifyDemandedBits(I, 1, DemandedFromOps, RHSKnown, Depth + 1)) { 417 if (NLZ > 0) { 418 // Disable the nsw and nuw flags here: We can no longer guarantee that 419 // we won't wrap after simplification. Removing the nsw/nuw flags is 420 // legal here because the top bit is not demanded. 421 BinaryOperator &BinOP = *cast<BinaryOperator>(I); 422 BinOP.setHasNoSignedWrap(false); 423 BinOP.setHasNoUnsignedWrap(false); 424 } 425 return I; 426 } 427 428 // If we are known to be adding/subtracting zeros to every bit below 429 // the highest demanded bit, we just return the other side. 430 if (DemandedFromOps.isSubsetOf(RHSKnown.Zero)) 431 return I->getOperand(0); 432 // We can't do this with the LHS for subtraction, unless we are only 433 // demanding the LSB. 434 if ((I->getOpcode() == Instruction::Add || 435 DemandedFromOps.isOneValue()) && 436 DemandedFromOps.isSubsetOf(LHSKnown.Zero)) 437 return I->getOperand(1); 438 439 // Otherwise just compute the known bits of the result. 440 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap(); 441 Known = KnownBits::computeForAddSub(I->getOpcode() == Instruction::Add, 442 NSW, LHSKnown, RHSKnown); 443 break; 444 } 445 case Instruction::Shl: { 446 const APInt *SA; 447 if (match(I->getOperand(1), m_APInt(SA))) { 448 const APInt *ShrAmt; 449 if (match(I->getOperand(0), m_Shr(m_Value(), m_APInt(ShrAmt)))) 450 if (Instruction *Shr = dyn_cast<Instruction>(I->getOperand(0))) 451 if (Value *R = simplifyShrShlDemandedBits(Shr, *ShrAmt, I, *SA, 452 DemandedMask, Known)) 453 return R; 454 455 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth-1); 456 APInt DemandedMaskIn(DemandedMask.lshr(ShiftAmt)); 457 458 // If the shift is NUW/NSW, then it does demand the high bits. 459 ShlOperator *IOp = cast<ShlOperator>(I); 460 if (IOp->hasNoSignedWrap()) 461 DemandedMaskIn.setHighBits(ShiftAmt+1); 462 else if (IOp->hasNoUnsignedWrap()) 463 DemandedMaskIn.setHighBits(ShiftAmt); 464 465 if (SimplifyDemandedBits(I, 0, DemandedMaskIn, Known, Depth + 1)) 466 return I; 467 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 468 Known.Zero <<= ShiftAmt; 469 Known.One <<= ShiftAmt; 470 // low bits known zero. 471 if (ShiftAmt) 472 Known.Zero.setLowBits(ShiftAmt); 473 } 474 break; 475 } 476 case Instruction::LShr: { 477 const APInt *SA; 478 if (match(I->getOperand(1), m_APInt(SA))) { 479 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth-1); 480 481 // Unsigned shift right. 482 APInt DemandedMaskIn(DemandedMask.shl(ShiftAmt)); 483 484 // If the shift is exact, then it does demand the low bits (and knows that 485 // they are zero). 486 if (cast<LShrOperator>(I)->isExact()) 487 DemandedMaskIn.setLowBits(ShiftAmt); 488 489 if (SimplifyDemandedBits(I, 0, DemandedMaskIn, Known, Depth + 1)) 490 return I; 491 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 492 Known.Zero.lshrInPlace(ShiftAmt); 493 Known.One.lshrInPlace(ShiftAmt); 494 if (ShiftAmt) 495 Known.Zero.setHighBits(ShiftAmt); // high bits known zero. 496 } 497 break; 498 } 499 case Instruction::AShr: { 500 // If this is an arithmetic shift right and only the low-bit is set, we can 501 // always convert this into a logical shr, even if the shift amount is 502 // variable. The low bit of the shift cannot be an input sign bit unless 503 // the shift amount is >= the size of the datatype, which is undefined. 504 if (DemandedMask.isOneValue()) { 505 // Perform the logical shift right. 506 Instruction *NewVal = BinaryOperator::CreateLShr( 507 I->getOperand(0), I->getOperand(1), I->getName()); 508 return InsertNewInstWith(NewVal, *I); 509 } 510 511 // If the sign bit is the only bit demanded by this ashr, then there is no 512 // need to do it, the shift doesn't change the high bit. 513 if (DemandedMask.isSignMask()) 514 return I->getOperand(0); 515 516 const APInt *SA; 517 if (match(I->getOperand(1), m_APInt(SA))) { 518 uint32_t ShiftAmt = SA->getLimitedValue(BitWidth-1); 519 520 // Signed shift right. 521 APInt DemandedMaskIn(DemandedMask.shl(ShiftAmt)); 522 // If any of the high bits are demanded, we should set the sign bit as 523 // demanded. 524 if (DemandedMask.countLeadingZeros() <= ShiftAmt) 525 DemandedMaskIn.setSignBit(); 526 527 // If the shift is exact, then it does demand the low bits (and knows that 528 // they are zero). 529 if (cast<AShrOperator>(I)->isExact()) 530 DemandedMaskIn.setLowBits(ShiftAmt); 531 532 if (SimplifyDemandedBits(I, 0, DemandedMaskIn, Known, Depth + 1)) 533 return I; 534 535 unsigned SignBits = ComputeNumSignBits(I->getOperand(0), Depth + 1, CxtI); 536 537 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 538 // Compute the new bits that are at the top now plus sign bits. 539 APInt HighBits(APInt::getHighBitsSet( 540 BitWidth, std::min(SignBits + ShiftAmt - 1, BitWidth))); 541 Known.Zero.lshrInPlace(ShiftAmt); 542 Known.One.lshrInPlace(ShiftAmt); 543 544 // If the input sign bit is known to be zero, or if none of the top bits 545 // are demanded, turn this into an unsigned shift right. 546 assert(BitWidth > ShiftAmt && "Shift amount not saturated?"); 547 if (Known.Zero[BitWidth-ShiftAmt-1] || 548 !DemandedMask.intersects(HighBits)) { 549 BinaryOperator *LShr = BinaryOperator::CreateLShr(I->getOperand(0), 550 I->getOperand(1)); 551 LShr->setIsExact(cast<BinaryOperator>(I)->isExact()); 552 return InsertNewInstWith(LShr, *I); 553 } else if (Known.One[BitWidth-ShiftAmt-1]) { // New bits are known one. 554 Known.One |= HighBits; 555 } 556 } 557 break; 558 } 559 case Instruction::UDiv: { 560 // UDiv doesn't demand low bits that are zero in the divisor. 561 const APInt *SA; 562 if (match(I->getOperand(1), m_APInt(SA))) { 563 // If the shift is exact, then it does demand the low bits. 564 if (cast<UDivOperator>(I)->isExact()) 565 break; 566 567 // FIXME: Take the demanded mask of the result into account. 568 unsigned RHSTrailingZeros = SA->countTrailingZeros(); 569 APInt DemandedMaskIn = 570 APInt::getHighBitsSet(BitWidth, BitWidth - RHSTrailingZeros); 571 if (SimplifyDemandedBits(I, 0, DemandedMaskIn, LHSKnown, Depth + 1)) 572 return I; 573 574 // Propagate zero bits from the input. 575 Known.Zero.setHighBits(std::min( 576 BitWidth, LHSKnown.Zero.countLeadingOnes() + RHSTrailingZeros)); 577 } 578 break; 579 } 580 case Instruction::SRem: 581 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) { 582 // X % -1 demands all the bits because we don't want to introduce 583 // INT_MIN % -1 (== undef) by accident. 584 if (Rem->isMinusOne()) 585 break; 586 APInt RA = Rem->getValue().abs(); 587 if (RA.isPowerOf2()) { 588 if (DemandedMask.ult(RA)) // srem won't affect demanded bits 589 return I->getOperand(0); 590 591 APInt LowBits = RA - 1; 592 APInt Mask2 = LowBits | APInt::getSignMask(BitWidth); 593 if (SimplifyDemandedBits(I, 0, Mask2, LHSKnown, Depth + 1)) 594 return I; 595 596 // The low bits of LHS are unchanged by the srem. 597 Known.Zero = LHSKnown.Zero & LowBits; 598 Known.One = LHSKnown.One & LowBits; 599 600 // If LHS is non-negative or has all low bits zero, then the upper bits 601 // are all zero. 602 if (LHSKnown.isNonNegative() || LowBits.isSubsetOf(LHSKnown.Zero)) 603 Known.Zero |= ~LowBits; 604 605 // If LHS is negative and not all low bits are zero, then the upper bits 606 // are all one. 607 if (LHSKnown.isNegative() && LowBits.intersects(LHSKnown.One)) 608 Known.One |= ~LowBits; 609 610 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 611 break; 612 } 613 } 614 615 // The sign bit is the LHS's sign bit, except when the result of the 616 // remainder is zero. 617 if (DemandedMask.isSignBitSet()) { 618 computeKnownBits(I->getOperand(0), LHSKnown, Depth + 1, CxtI); 619 // If it's known zero, our sign bit is also zero. 620 if (LHSKnown.isNonNegative()) 621 Known.makeNonNegative(); 622 } 623 break; 624 case Instruction::URem: { 625 KnownBits Known2(BitWidth); 626 APInt AllOnes = APInt::getAllOnesValue(BitWidth); 627 if (SimplifyDemandedBits(I, 0, AllOnes, Known2, Depth + 1) || 628 SimplifyDemandedBits(I, 1, AllOnes, Known2, Depth + 1)) 629 return I; 630 631 unsigned Leaders = Known2.countMinLeadingZeros(); 632 Known.Zero = APInt::getHighBitsSet(BitWidth, Leaders) & DemandedMask; 633 break; 634 } 635 case Instruction::Call: 636 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 637 switch (II->getIntrinsicID()) { 638 default: break; 639 case Intrinsic::bswap: { 640 // If the only bits demanded come from one byte of the bswap result, 641 // just shift the input byte into position to eliminate the bswap. 642 unsigned NLZ = DemandedMask.countLeadingZeros(); 643 unsigned NTZ = DemandedMask.countTrailingZeros(); 644 645 // Round NTZ down to the next byte. If we have 11 trailing zeros, then 646 // we need all the bits down to bit 8. Likewise, round NLZ. If we 647 // have 14 leading zeros, round to 8. 648 NLZ &= ~7; 649 NTZ &= ~7; 650 // If we need exactly one byte, we can do this transformation. 651 if (BitWidth-NLZ-NTZ == 8) { 652 unsigned ResultBit = NTZ; 653 unsigned InputBit = BitWidth-NTZ-8; 654 655 // Replace this with either a left or right shift to get the byte into 656 // the right place. 657 Instruction *NewVal; 658 if (InputBit > ResultBit) 659 NewVal = BinaryOperator::CreateLShr(II->getArgOperand(0), 660 ConstantInt::get(I->getType(), InputBit-ResultBit)); 661 else 662 NewVal = BinaryOperator::CreateShl(II->getArgOperand(0), 663 ConstantInt::get(I->getType(), ResultBit-InputBit)); 664 NewVal->takeName(I); 665 return InsertNewInstWith(NewVal, *I); 666 } 667 668 // TODO: Could compute known zero/one bits based on the input. 669 break; 670 } 671 case Intrinsic::x86_mmx_pmovmskb: 672 case Intrinsic::x86_sse_movmsk_ps: 673 case Intrinsic::x86_sse2_movmsk_pd: 674 case Intrinsic::x86_sse2_pmovmskb_128: 675 case Intrinsic::x86_avx_movmsk_ps_256: 676 case Intrinsic::x86_avx_movmsk_pd_256: 677 case Intrinsic::x86_avx2_pmovmskb: { 678 // MOVMSK copies the vector elements' sign bits to the low bits 679 // and zeros the high bits. 680 unsigned ArgWidth; 681 if (II->getIntrinsicID() == Intrinsic::x86_mmx_pmovmskb) { 682 ArgWidth = 8; // Arg is x86_mmx, but treated as <8 x i8>. 683 } else { 684 auto Arg = II->getArgOperand(0); 685 auto ArgType = cast<VectorType>(Arg->getType()); 686 ArgWidth = ArgType->getNumElements(); 687 } 688 689 // If we don't need any of low bits then return zero, 690 // we know that DemandedMask is non-zero already. 691 APInt DemandedElts = DemandedMask.zextOrTrunc(ArgWidth); 692 if (DemandedElts.isNullValue()) 693 return ConstantInt::getNullValue(VTy); 694 695 // We know that the upper bits are set to zero. 696 Known.Zero.setBitsFrom(ArgWidth); 697 return nullptr; 698 } 699 case Intrinsic::x86_sse42_crc32_64_64: 700 Known.Zero.setBitsFrom(32); 701 return nullptr; 702 } 703 } 704 computeKnownBits(V, Known, Depth, CxtI); 705 break; 706 } 707 708 // If the client is only demanding bits that we know, return the known 709 // constant. 710 if (DemandedMask.isSubsetOf(Known.Zero|Known.One)) 711 return Constant::getIntegerValue(VTy, Known.One); 712 return nullptr; 713 } 714 715 /// Helper routine of SimplifyDemandedUseBits. It computes Known 716 /// bits. It also tries to handle simplifications that can be done based on 717 /// DemandedMask, but without modifying the Instruction. 718 Value *InstCombiner::SimplifyMultipleUseDemandedBits(Instruction *I, 719 const APInt &DemandedMask, 720 KnownBits &Known, 721 unsigned Depth, 722 Instruction *CxtI) { 723 unsigned BitWidth = DemandedMask.getBitWidth(); 724 Type *ITy = I->getType(); 725 726 KnownBits LHSKnown(BitWidth); 727 KnownBits RHSKnown(BitWidth); 728 729 // Despite the fact that we can't simplify this instruction in all User's 730 // context, we can at least compute the known bits, and we can 731 // do simplifications that apply to *just* the one user if we know that 732 // this instruction has a simpler value in that context. 733 switch (I->getOpcode()) { 734 case Instruction::And: { 735 // If either the LHS or the RHS are Zero, the result is zero. 736 computeKnownBits(I->getOperand(1), RHSKnown, Depth + 1, CxtI); 737 computeKnownBits(I->getOperand(0), LHSKnown, Depth + 1, 738 CxtI); 739 740 // Output known-0 are known to be clear if zero in either the LHS | RHS. 741 APInt IKnownZero = RHSKnown.Zero | LHSKnown.Zero; 742 // Output known-1 bits are only known if set in both the LHS & RHS. 743 APInt IKnownOne = RHSKnown.One & LHSKnown.One; 744 745 // If the client is only demanding bits that we know, return the known 746 // constant. 747 if (DemandedMask.isSubsetOf(IKnownZero|IKnownOne)) 748 return Constant::getIntegerValue(ITy, IKnownOne); 749 750 // If all of the demanded bits are known 1 on one side, return the other. 751 // These bits cannot contribute to the result of the 'and' in this 752 // context. 753 if (DemandedMask.isSubsetOf(LHSKnown.Zero | RHSKnown.One)) 754 return I->getOperand(0); 755 if (DemandedMask.isSubsetOf(RHSKnown.Zero | LHSKnown.One)) 756 return I->getOperand(1); 757 758 Known.Zero = std::move(IKnownZero); 759 Known.One = std::move(IKnownOne); 760 break; 761 } 762 case Instruction::Or: { 763 // We can simplify (X|Y) -> X or Y in the user's context if we know that 764 // only bits from X or Y are demanded. 765 766 // If either the LHS or the RHS are One, the result is One. 767 computeKnownBits(I->getOperand(1), RHSKnown, Depth + 1, CxtI); 768 computeKnownBits(I->getOperand(0), LHSKnown, Depth + 1, 769 CxtI); 770 771 // Output known-0 bits are only known if clear in both the LHS & RHS. 772 APInt IKnownZero = RHSKnown.Zero & LHSKnown.Zero; 773 // Output known-1 are known to be set if set in either the LHS | RHS. 774 APInt IKnownOne = RHSKnown.One | LHSKnown.One; 775 776 // If the client is only demanding bits that we know, return the known 777 // constant. 778 if (DemandedMask.isSubsetOf(IKnownZero|IKnownOne)) 779 return Constant::getIntegerValue(ITy, IKnownOne); 780 781 // If all of the demanded bits are known zero on one side, return the 782 // other. These bits cannot contribute to the result of the 'or' in this 783 // context. 784 if (DemandedMask.isSubsetOf(LHSKnown.One | RHSKnown.Zero)) 785 return I->getOperand(0); 786 if (DemandedMask.isSubsetOf(RHSKnown.One | LHSKnown.Zero)) 787 return I->getOperand(1); 788 789 Known.Zero = std::move(IKnownZero); 790 Known.One = std::move(IKnownOne); 791 break; 792 } 793 case Instruction::Xor: { 794 // We can simplify (X^Y) -> X or Y in the user's context if we know that 795 // only bits from X or Y are demanded. 796 797 computeKnownBits(I->getOperand(1), RHSKnown, Depth + 1, CxtI); 798 computeKnownBits(I->getOperand(0), LHSKnown, Depth + 1, 799 CxtI); 800 801 // Output known-0 bits are known if clear or set in both the LHS & RHS. 802 APInt IKnownZero = (RHSKnown.Zero & LHSKnown.Zero) | 803 (RHSKnown.One & LHSKnown.One); 804 // Output known-1 are known to be set if set in only one of the LHS, RHS. 805 APInt IKnownOne = (RHSKnown.Zero & LHSKnown.One) | 806 (RHSKnown.One & LHSKnown.Zero); 807 808 // If the client is only demanding bits that we know, return the known 809 // constant. 810 if (DemandedMask.isSubsetOf(IKnownZero|IKnownOne)) 811 return Constant::getIntegerValue(ITy, IKnownOne); 812 813 // If all of the demanded bits are known zero on one side, return the 814 // other. 815 if (DemandedMask.isSubsetOf(RHSKnown.Zero)) 816 return I->getOperand(0); 817 if (DemandedMask.isSubsetOf(LHSKnown.Zero)) 818 return I->getOperand(1); 819 820 // Output known-0 bits are known if clear or set in both the LHS & RHS. 821 Known.Zero = std::move(IKnownZero); 822 // Output known-1 are known to be set if set in only one of the LHS, RHS. 823 Known.One = std::move(IKnownOne); 824 break; 825 } 826 default: 827 // Compute the Known bits to simplify things downstream. 828 computeKnownBits(I, Known, Depth, CxtI); 829 830 // If this user is only demanding bits that we know, return the known 831 // constant. 832 if (DemandedMask.isSubsetOf(Known.Zero|Known.One)) 833 return Constant::getIntegerValue(ITy, Known.One); 834 835 break; 836 } 837 838 return nullptr; 839 } 840 841 842 /// Helper routine of SimplifyDemandedUseBits. It tries to simplify 843 /// "E1 = (X lsr C1) << C2", where the C1 and C2 are constant, into 844 /// "E2 = X << (C2 - C1)" or "E2 = X >> (C1 - C2)", depending on the sign 845 /// of "C2-C1". 846 /// 847 /// Suppose E1 and E2 are generally different in bits S={bm, bm+1, 848 /// ..., bn}, without considering the specific value X is holding. 849 /// This transformation is legal iff one of following conditions is hold: 850 /// 1) All the bit in S are 0, in this case E1 == E2. 851 /// 2) We don't care those bits in S, per the input DemandedMask. 852 /// 3) Combination of 1) and 2). Some bits in S are 0, and we don't care the 853 /// rest bits. 854 /// 855 /// Currently we only test condition 2). 856 /// 857 /// As with SimplifyDemandedUseBits, it returns NULL if the simplification was 858 /// not successful. 859 Value * 860 InstCombiner::simplifyShrShlDemandedBits(Instruction *Shr, const APInt &ShrOp1, 861 Instruction *Shl, const APInt &ShlOp1, 862 const APInt &DemandedMask, 863 KnownBits &Known) { 864 if (!ShlOp1 || !ShrOp1) 865 return nullptr; // No-op. 866 867 Value *VarX = Shr->getOperand(0); 868 Type *Ty = VarX->getType(); 869 unsigned BitWidth = Ty->getScalarSizeInBits(); 870 if (ShlOp1.uge(BitWidth) || ShrOp1.uge(BitWidth)) 871 return nullptr; // Undef. 872 873 unsigned ShlAmt = ShlOp1.getZExtValue(); 874 unsigned ShrAmt = ShrOp1.getZExtValue(); 875 876 Known.One.clearAllBits(); 877 Known.Zero.setLowBits(ShlAmt - 1); 878 Known.Zero &= DemandedMask; 879 880 APInt BitMask1(APInt::getAllOnesValue(BitWidth)); 881 APInt BitMask2(APInt::getAllOnesValue(BitWidth)); 882 883 bool isLshr = (Shr->getOpcode() == Instruction::LShr); 884 BitMask1 = isLshr ? (BitMask1.lshr(ShrAmt) << ShlAmt) : 885 (BitMask1.ashr(ShrAmt) << ShlAmt); 886 887 if (ShrAmt <= ShlAmt) { 888 BitMask2 <<= (ShlAmt - ShrAmt); 889 } else { 890 BitMask2 = isLshr ? BitMask2.lshr(ShrAmt - ShlAmt): 891 BitMask2.ashr(ShrAmt - ShlAmt); 892 } 893 894 // Check if condition-2 (see the comment to this function) is satified. 895 if ((BitMask1 & DemandedMask) == (BitMask2 & DemandedMask)) { 896 if (ShrAmt == ShlAmt) 897 return VarX; 898 899 if (!Shr->hasOneUse()) 900 return nullptr; 901 902 BinaryOperator *New; 903 if (ShrAmt < ShlAmt) { 904 Constant *Amt = ConstantInt::get(VarX->getType(), ShlAmt - ShrAmt); 905 New = BinaryOperator::CreateShl(VarX, Amt); 906 BinaryOperator *Orig = cast<BinaryOperator>(Shl); 907 New->setHasNoSignedWrap(Orig->hasNoSignedWrap()); 908 New->setHasNoUnsignedWrap(Orig->hasNoUnsignedWrap()); 909 } else { 910 Constant *Amt = ConstantInt::get(VarX->getType(), ShrAmt - ShlAmt); 911 New = isLshr ? BinaryOperator::CreateLShr(VarX, Amt) : 912 BinaryOperator::CreateAShr(VarX, Amt); 913 if (cast<BinaryOperator>(Shr)->isExact()) 914 New->setIsExact(true); 915 } 916 917 return InsertNewInstWith(New, *Shl); 918 } 919 920 return nullptr; 921 } 922 923 /// Implement SimplifyDemandedVectorElts for amdgcn buffer and image intrinsics. 924 Value *InstCombiner::simplifyAMDGCNMemoryIntrinsicDemanded(IntrinsicInst *II, 925 APInt DemandedElts, 926 int DMaskIdx) { 927 unsigned VWidth = II->getType()->getVectorNumElements(); 928 if (VWidth == 1) 929 return nullptr; 930 931 ConstantInt *NewDMask = nullptr; 932 933 if (DMaskIdx < 0) { 934 // Pretend that a prefix of elements is demanded to simplify the code 935 // below. 936 DemandedElts = (1 << DemandedElts.getActiveBits()) - 1; 937 } else { 938 ConstantInt *DMask = dyn_cast<ConstantInt>(II->getArgOperand(DMaskIdx)); 939 if (!DMask) 940 return nullptr; // non-constant dmask is not supported by codegen 941 942 unsigned DMaskVal = DMask->getZExtValue() & 0xf; 943 944 // Mask off values that are undefined because the dmask doesn't cover them 945 DemandedElts &= (1 << countPopulation(DMaskVal)) - 1; 946 947 unsigned NewDMaskVal = 0; 948 unsigned OrigLoadIdx = 0; 949 for (unsigned SrcIdx = 0; SrcIdx < 4; ++SrcIdx) { 950 const unsigned Bit = 1 << SrcIdx; 951 if (!!(DMaskVal & Bit)) { 952 if (!!DemandedElts[OrigLoadIdx]) 953 NewDMaskVal |= Bit; 954 OrigLoadIdx++; 955 } 956 } 957 958 if (DMaskVal != NewDMaskVal) 959 NewDMask = ConstantInt::get(DMask->getType(), NewDMaskVal); 960 } 961 962 // TODO: Handle 3 vectors when supported in code gen. 963 unsigned NewNumElts = PowerOf2Ceil(DemandedElts.countPopulation()); 964 if (!NewNumElts) 965 return UndefValue::get(II->getType()); 966 967 if (NewNumElts >= VWidth && DemandedElts.isMask()) { 968 if (NewDMask) 969 II->setArgOperand(DMaskIdx, NewDMask); 970 return nullptr; 971 } 972 973 // Determine the overload types of the original intrinsic. 974 auto IID = II->getIntrinsicID(); 975 SmallVector<Intrinsic::IITDescriptor, 16> Table; 976 getIntrinsicInfoTableEntries(IID, Table); 977 ArrayRef<Intrinsic::IITDescriptor> TableRef = Table; 978 979 FunctionType *FTy = II->getCalledFunction()->getFunctionType(); 980 SmallVector<Type *, 6> OverloadTys; 981 Intrinsic::matchIntrinsicType(FTy->getReturnType(), TableRef, OverloadTys); 982 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) 983 Intrinsic::matchIntrinsicType(FTy->getParamType(i), TableRef, OverloadTys); 984 985 // Get the new return type overload of the intrinsic. 986 Module *M = II->getParent()->getParent()->getParent(); 987 Type *EltTy = II->getType()->getVectorElementType(); 988 Type *NewTy = (NewNumElts == 1) ? EltTy : VectorType::get(EltTy, NewNumElts); 989 990 OverloadTys[0] = NewTy; 991 Function *NewIntrin = Intrinsic::getDeclaration(M, IID, OverloadTys); 992 993 SmallVector<Value *, 16> Args; 994 for (unsigned I = 0, E = II->getNumArgOperands(); I != E; ++I) 995 Args.push_back(II->getArgOperand(I)); 996 997 if (NewDMask) 998 Args[DMaskIdx] = NewDMask; 999 1000 IRBuilderBase::InsertPointGuard Guard(Builder); 1001 Builder.SetInsertPoint(II); 1002 1003 CallInst *NewCall = Builder.CreateCall(NewIntrin, Args); 1004 NewCall->takeName(II); 1005 NewCall->copyMetadata(*II); 1006 1007 if (NewNumElts == 1) { 1008 return Builder.CreateInsertElement(UndefValue::get(II->getType()), NewCall, 1009 DemandedElts.countTrailingZeros()); 1010 } 1011 1012 SmallVector<uint32_t, 8> EltMask; 1013 unsigned NewLoadIdx = 0; 1014 for (unsigned OrigLoadIdx = 0; OrigLoadIdx < VWidth; ++OrigLoadIdx) { 1015 if (!!DemandedElts[OrigLoadIdx]) 1016 EltMask.push_back(NewLoadIdx++); 1017 else 1018 EltMask.push_back(NewNumElts); 1019 } 1020 1021 Value *Shuffle = 1022 Builder.CreateShuffleVector(NewCall, UndefValue::get(NewTy), EltMask); 1023 1024 return Shuffle; 1025 } 1026 1027 /// The specified value produces a vector with any number of elements. 1028 /// DemandedElts contains the set of elements that are actually used by the 1029 /// caller. This method analyzes which elements of the operand are undef and 1030 /// returns that information in UndefElts. 1031 /// 1032 /// If the information about demanded elements can be used to simplify the 1033 /// operation, the operation is simplified, then the resultant value is 1034 /// returned. This returns null if no change was made. 1035 Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, 1036 APInt &UndefElts, 1037 unsigned Depth) { 1038 unsigned VWidth = V->getType()->getVectorNumElements(); 1039 APInt EltMask(APInt::getAllOnesValue(VWidth)); 1040 assert((DemandedElts & ~EltMask) == 0 && "Invalid DemandedElts!"); 1041 1042 if (isa<UndefValue>(V)) { 1043 // If the entire vector is undefined, just return this info. 1044 UndefElts = EltMask; 1045 return nullptr; 1046 } 1047 1048 if (DemandedElts.isNullValue()) { // If nothing is demanded, provide undef. 1049 UndefElts = EltMask; 1050 return UndefValue::get(V->getType()); 1051 } 1052 1053 UndefElts = 0; 1054 1055 // Handle ConstantAggregateZero, ConstantVector, ConstantDataSequential. 1056 if (Constant *C = dyn_cast<Constant>(V)) { 1057 // Check if this is identity. If so, return 0 since we are not simplifying 1058 // anything. 1059 if (DemandedElts.isAllOnesValue()) 1060 return nullptr; 1061 1062 Type *EltTy = cast<VectorType>(V->getType())->getElementType(); 1063 Constant *Undef = UndefValue::get(EltTy); 1064 1065 SmallVector<Constant*, 16> Elts; 1066 for (unsigned i = 0; i != VWidth; ++i) { 1067 if (!DemandedElts[i]) { // If not demanded, set to undef. 1068 Elts.push_back(Undef); 1069 UndefElts.setBit(i); 1070 continue; 1071 } 1072 1073 Constant *Elt = C->getAggregateElement(i); 1074 if (!Elt) return nullptr; 1075 1076 if (isa<UndefValue>(Elt)) { // Already undef. 1077 Elts.push_back(Undef); 1078 UndefElts.setBit(i); 1079 } else { // Otherwise, defined. 1080 Elts.push_back(Elt); 1081 } 1082 } 1083 1084 // If we changed the constant, return it. 1085 Constant *NewCV = ConstantVector::get(Elts); 1086 return NewCV != C ? NewCV : nullptr; 1087 } 1088 1089 // Limit search depth. 1090 if (Depth == 10) 1091 return nullptr; 1092 1093 // If multiple users are using the root value, proceed with 1094 // simplification conservatively assuming that all elements 1095 // are needed. 1096 if (!V->hasOneUse()) { 1097 // Quit if we find multiple users of a non-root value though. 1098 // They'll be handled when it's their turn to be visited by 1099 // the main instcombine process. 1100 if (Depth != 0) 1101 // TODO: Just compute the UndefElts information recursively. 1102 return nullptr; 1103 1104 // Conservatively assume that all elements are needed. 1105 DemandedElts = EltMask; 1106 } 1107 1108 Instruction *I = dyn_cast<Instruction>(V); 1109 if (!I) return nullptr; // Only analyze instructions. 1110 1111 bool MadeChange = false; 1112 APInt UndefElts2(VWidth, 0); 1113 APInt UndefElts3(VWidth, 0); 1114 Value *TmpV; 1115 switch (I->getOpcode()) { 1116 default: break; 1117 1118 case Instruction::InsertElement: { 1119 // If this is a variable index, we don't know which element it overwrites. 1120 // demand exactly the same input as we produce. 1121 ConstantInt *Idx = dyn_cast<ConstantInt>(I->getOperand(2)); 1122 if (!Idx) { 1123 // Note that we can't propagate undef elt info, because we don't know 1124 // which elt is getting updated. 1125 TmpV = SimplifyDemandedVectorElts(I->getOperand(0), DemandedElts, 1126 UndefElts2, Depth + 1); 1127 if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; } 1128 break; 1129 } 1130 1131 // The element inserted overwrites whatever was there, so the input demanded 1132 // set is simpler than the output set. 1133 unsigned IdxNo = Idx->getZExtValue(); 1134 APInt PreInsertDemandedElts = DemandedElts; 1135 if (IdxNo < VWidth) 1136 PreInsertDemandedElts.clearBit(IdxNo); 1137 TmpV = SimplifyDemandedVectorElts(I->getOperand(0), PreInsertDemandedElts, 1138 UndefElts, Depth + 1); 1139 if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; } 1140 1141 // If this is inserting an element that isn't demanded, remove this 1142 // insertelement. 1143 if (IdxNo >= VWidth || !DemandedElts[IdxNo]) { 1144 Worklist.Add(I); 1145 return I->getOperand(0); 1146 } 1147 1148 // The inserted element is defined. 1149 UndefElts.clearBit(IdxNo); 1150 break; 1151 } 1152 case Instruction::ShuffleVector: { 1153 ShuffleVectorInst *Shuffle = cast<ShuffleVectorInst>(I); 1154 unsigned LHSVWidth = 1155 Shuffle->getOperand(0)->getType()->getVectorNumElements(); 1156 APInt LeftDemanded(LHSVWidth, 0), RightDemanded(LHSVWidth, 0); 1157 for (unsigned i = 0; i < VWidth; i++) { 1158 if (DemandedElts[i]) { 1159 unsigned MaskVal = Shuffle->getMaskValue(i); 1160 if (MaskVal != -1u) { 1161 assert(MaskVal < LHSVWidth * 2 && 1162 "shufflevector mask index out of range!"); 1163 if (MaskVal < LHSVWidth) 1164 LeftDemanded.setBit(MaskVal); 1165 else 1166 RightDemanded.setBit(MaskVal - LHSVWidth); 1167 } 1168 } 1169 } 1170 1171 APInt LHSUndefElts(LHSVWidth, 0); 1172 TmpV = SimplifyDemandedVectorElts(I->getOperand(0), LeftDemanded, 1173 LHSUndefElts, Depth + 1); 1174 if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; } 1175 1176 APInt RHSUndefElts(LHSVWidth, 0); 1177 TmpV = SimplifyDemandedVectorElts(I->getOperand(1), RightDemanded, 1178 RHSUndefElts, Depth + 1); 1179 if (TmpV) { I->setOperand(1, TmpV); MadeChange = true; } 1180 1181 bool NewUndefElts = false; 1182 unsigned LHSIdx = -1u, LHSValIdx = -1u; 1183 unsigned RHSIdx = -1u, RHSValIdx = -1u; 1184 bool LHSUniform = true; 1185 bool RHSUniform = true; 1186 for (unsigned i = 0; i < VWidth; i++) { 1187 unsigned MaskVal = Shuffle->getMaskValue(i); 1188 if (MaskVal == -1u) { 1189 UndefElts.setBit(i); 1190 } else if (!DemandedElts[i]) { 1191 NewUndefElts = true; 1192 UndefElts.setBit(i); 1193 } else if (MaskVal < LHSVWidth) { 1194 if (LHSUndefElts[MaskVal]) { 1195 NewUndefElts = true; 1196 UndefElts.setBit(i); 1197 } else { 1198 LHSIdx = LHSIdx == -1u ? i : LHSVWidth; 1199 LHSValIdx = LHSValIdx == -1u ? MaskVal : LHSVWidth; 1200 LHSUniform = LHSUniform && (MaskVal == i); 1201 } 1202 } else { 1203 if (RHSUndefElts[MaskVal - LHSVWidth]) { 1204 NewUndefElts = true; 1205 UndefElts.setBit(i); 1206 } else { 1207 RHSIdx = RHSIdx == -1u ? i : LHSVWidth; 1208 RHSValIdx = RHSValIdx == -1u ? MaskVal - LHSVWidth : LHSVWidth; 1209 RHSUniform = RHSUniform && (MaskVal - LHSVWidth == i); 1210 } 1211 } 1212 } 1213 1214 // Try to transform shuffle with constant vector and single element from 1215 // this constant vector to single insertelement instruction. 1216 // shufflevector V, C, <v1, v2, .., ci, .., vm> -> 1217 // insertelement V, C[ci], ci-n 1218 if (LHSVWidth == Shuffle->getType()->getNumElements()) { 1219 Value *Op = nullptr; 1220 Constant *Value = nullptr; 1221 unsigned Idx = -1u; 1222 1223 // Find constant vector with the single element in shuffle (LHS or RHS). 1224 if (LHSIdx < LHSVWidth && RHSUniform) { 1225 if (auto *CV = dyn_cast<ConstantVector>(Shuffle->getOperand(0))) { 1226 Op = Shuffle->getOperand(1); 1227 Value = CV->getOperand(LHSValIdx); 1228 Idx = LHSIdx; 1229 } 1230 } 1231 if (RHSIdx < LHSVWidth && LHSUniform) { 1232 if (auto *CV = dyn_cast<ConstantVector>(Shuffle->getOperand(1))) { 1233 Op = Shuffle->getOperand(0); 1234 Value = CV->getOperand(RHSValIdx); 1235 Idx = RHSIdx; 1236 } 1237 } 1238 // Found constant vector with single element - convert to insertelement. 1239 if (Op && Value) { 1240 Instruction *New = InsertElementInst::Create( 1241 Op, Value, ConstantInt::get(Type::getInt32Ty(I->getContext()), Idx), 1242 Shuffle->getName()); 1243 InsertNewInstWith(New, *Shuffle); 1244 return New; 1245 } 1246 } 1247 if (NewUndefElts) { 1248 // Add additional discovered undefs. 1249 SmallVector<Constant*, 16> Elts; 1250 for (unsigned i = 0; i < VWidth; ++i) { 1251 if (UndefElts[i]) 1252 Elts.push_back(UndefValue::get(Type::getInt32Ty(I->getContext()))); 1253 else 1254 Elts.push_back(ConstantInt::get(Type::getInt32Ty(I->getContext()), 1255 Shuffle->getMaskValue(i))); 1256 } 1257 I->setOperand(2, ConstantVector::get(Elts)); 1258 MadeChange = true; 1259 } 1260 break; 1261 } 1262 case Instruction::Select: { 1263 APInt LeftDemanded(DemandedElts), RightDemanded(DemandedElts); 1264 if (ConstantVector* CV = dyn_cast<ConstantVector>(I->getOperand(0))) { 1265 for (unsigned i = 0; i < VWidth; i++) { 1266 Constant *CElt = CV->getAggregateElement(i); 1267 // Method isNullValue always returns false when called on a 1268 // ConstantExpr. If CElt is a ConstantExpr then skip it in order to 1269 // to avoid propagating incorrect information. 1270 if (isa<ConstantExpr>(CElt)) 1271 continue; 1272 if (CElt->isNullValue()) 1273 LeftDemanded.clearBit(i); 1274 else 1275 RightDemanded.clearBit(i); 1276 } 1277 } 1278 1279 TmpV = SimplifyDemandedVectorElts(I->getOperand(1), LeftDemanded, UndefElts, 1280 Depth + 1); 1281 if (TmpV) { I->setOperand(1, TmpV); MadeChange = true; } 1282 1283 TmpV = SimplifyDemandedVectorElts(I->getOperand(2), RightDemanded, 1284 UndefElts2, Depth + 1); 1285 if (TmpV) { I->setOperand(2, TmpV); MadeChange = true; } 1286 1287 // Output elements are undefined if both are undefined. 1288 UndefElts &= UndefElts2; 1289 break; 1290 } 1291 case Instruction::BitCast: { 1292 // Vector->vector casts only. 1293 VectorType *VTy = dyn_cast<VectorType>(I->getOperand(0)->getType()); 1294 if (!VTy) break; 1295 unsigned InVWidth = VTy->getNumElements(); 1296 APInt InputDemandedElts(InVWidth, 0); 1297 UndefElts2 = APInt(InVWidth, 0); 1298 unsigned Ratio; 1299 1300 if (VWidth == InVWidth) { 1301 // If we are converting from <4 x i32> -> <4 x f32>, we demand the same 1302 // elements as are demanded of us. 1303 Ratio = 1; 1304 InputDemandedElts = DemandedElts; 1305 } else if ((VWidth % InVWidth) == 0) { 1306 // If the number of elements in the output is a multiple of the number of 1307 // elements in the input then an input element is live if any of the 1308 // corresponding output elements are live. 1309 Ratio = VWidth / InVWidth; 1310 for (unsigned OutIdx = 0; OutIdx != VWidth; ++OutIdx) 1311 if (DemandedElts[OutIdx]) 1312 InputDemandedElts.setBit(OutIdx / Ratio); 1313 } else if ((InVWidth % VWidth) == 0) { 1314 // If the number of elements in the input is a multiple of the number of 1315 // elements in the output then an input element is live if the 1316 // corresponding output element is live. 1317 Ratio = InVWidth / VWidth; 1318 for (unsigned InIdx = 0; InIdx != InVWidth; ++InIdx) 1319 if (DemandedElts[InIdx / Ratio]) 1320 InputDemandedElts.setBit(InIdx); 1321 } else { 1322 // Unsupported so far. 1323 break; 1324 } 1325 1326 TmpV = SimplifyDemandedVectorElts(I->getOperand(0), InputDemandedElts, 1327 UndefElts2, Depth + 1); 1328 if (TmpV) { 1329 I->setOperand(0, TmpV); 1330 MadeChange = true; 1331 } 1332 1333 if (VWidth == InVWidth) { 1334 UndefElts = UndefElts2; 1335 } else if ((VWidth % InVWidth) == 0) { 1336 // If the number of elements in the output is a multiple of the number of 1337 // elements in the input then an output element is undef if the 1338 // corresponding input element is undef. 1339 for (unsigned OutIdx = 0; OutIdx != VWidth; ++OutIdx) 1340 if (UndefElts2[OutIdx / Ratio]) 1341 UndefElts.setBit(OutIdx); 1342 } else if ((InVWidth % VWidth) == 0) { 1343 // If the number of elements in the input is a multiple of the number of 1344 // elements in the output then an output element is undef if all of the 1345 // corresponding input elements are undef. 1346 for (unsigned OutIdx = 0; OutIdx != VWidth; ++OutIdx) { 1347 APInt SubUndef = UndefElts2.lshr(OutIdx * Ratio).zextOrTrunc(Ratio); 1348 if (SubUndef.countPopulation() == Ratio) 1349 UndefElts.setBit(OutIdx); 1350 } 1351 } else { 1352 llvm_unreachable("Unimp"); 1353 } 1354 break; 1355 } 1356 case Instruction::And: 1357 case Instruction::Or: 1358 case Instruction::Xor: 1359 case Instruction::Add: 1360 case Instruction::Sub: 1361 case Instruction::Mul: 1362 // div/rem demand all inputs, because they don't want divide by zero. 1363 TmpV = SimplifyDemandedVectorElts(I->getOperand(0), DemandedElts, UndefElts, 1364 Depth + 1); 1365 if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; } 1366 TmpV = SimplifyDemandedVectorElts(I->getOperand(1), DemandedElts, 1367 UndefElts2, Depth + 1); 1368 if (TmpV) { I->setOperand(1, TmpV); MadeChange = true; } 1369 1370 // Output elements are undefined if both are undefined. Consider things 1371 // like undef&0. The result is known zero, not undef. 1372 UndefElts &= UndefElts2; 1373 break; 1374 case Instruction::FPTrunc: 1375 case Instruction::FPExt: 1376 TmpV = SimplifyDemandedVectorElts(I->getOperand(0), DemandedElts, UndefElts, 1377 Depth + 1); 1378 if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; } 1379 break; 1380 1381 case Instruction::Call: { 1382 IntrinsicInst *II = dyn_cast<IntrinsicInst>(I); 1383 if (!II) break; 1384 switch (II->getIntrinsicID()) { 1385 case Intrinsic::x86_xop_vfrcz_ss: 1386 case Intrinsic::x86_xop_vfrcz_sd: 1387 // The instructions for these intrinsics are speced to zero upper bits not 1388 // pass them through like other scalar intrinsics. So we shouldn't just 1389 // use Arg0 if DemandedElts[0] is clear like we do for other intrinsics. 1390 // Instead we should return a zero vector. 1391 if (!DemandedElts[0]) { 1392 Worklist.Add(II); 1393 return ConstantAggregateZero::get(II->getType()); 1394 } 1395 1396 // Only the lower element is used. 1397 DemandedElts = 1; 1398 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(0), DemandedElts, 1399 UndefElts, Depth + 1); 1400 if (TmpV) { II->setArgOperand(0, TmpV); MadeChange = true; } 1401 1402 // Only the lower element is undefined. The high elements are zero. 1403 UndefElts = UndefElts[0]; 1404 break; 1405 1406 // Unary scalar-as-vector operations that work column-wise. 1407 case Intrinsic::x86_sse_rcp_ss: 1408 case Intrinsic::x86_sse_rsqrt_ss: 1409 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(0), DemandedElts, 1410 UndefElts, Depth + 1); 1411 if (TmpV) { II->setArgOperand(0, TmpV); MadeChange = true; } 1412 1413 // If lowest element of a scalar op isn't used then use Arg0. 1414 if (!DemandedElts[0]) { 1415 Worklist.Add(II); 1416 return II->getArgOperand(0); 1417 } 1418 // TODO: If only low elt lower SQRT to FSQRT (with rounding/exceptions 1419 // checks). 1420 break; 1421 1422 // Binary scalar-as-vector operations that work column-wise. The high 1423 // elements come from operand 0. The low element is a function of both 1424 // operands. 1425 case Intrinsic::x86_sse_min_ss: 1426 case Intrinsic::x86_sse_max_ss: 1427 case Intrinsic::x86_sse_cmp_ss: 1428 case Intrinsic::x86_sse2_min_sd: 1429 case Intrinsic::x86_sse2_max_sd: 1430 case Intrinsic::x86_sse2_cmp_sd: { 1431 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(0), DemandedElts, 1432 UndefElts, Depth + 1); 1433 if (TmpV) { II->setArgOperand(0, TmpV); MadeChange = true; } 1434 1435 // If lowest element of a scalar op isn't used then use Arg0. 1436 if (!DemandedElts[0]) { 1437 Worklist.Add(II); 1438 return II->getArgOperand(0); 1439 } 1440 1441 // Only lower element is used for operand 1. 1442 DemandedElts = 1; 1443 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(1), DemandedElts, 1444 UndefElts2, Depth + 1); 1445 if (TmpV) { II->setArgOperand(1, TmpV); MadeChange = true; } 1446 1447 // Lower element is undefined if both lower elements are undefined. 1448 // Consider things like undef&0. The result is known zero, not undef. 1449 if (!UndefElts2[0]) 1450 UndefElts.clearBit(0); 1451 1452 break; 1453 } 1454 1455 // Binary scalar-as-vector operations that work column-wise. The high 1456 // elements come from operand 0 and the low element comes from operand 1. 1457 case Intrinsic::x86_sse41_round_ss: 1458 case Intrinsic::x86_sse41_round_sd: { 1459 // Don't use the low element of operand 0. 1460 APInt DemandedElts2 = DemandedElts; 1461 DemandedElts2.clearBit(0); 1462 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(0), DemandedElts2, 1463 UndefElts, Depth + 1); 1464 if (TmpV) { II->setArgOperand(0, TmpV); MadeChange = true; } 1465 1466 // If lowest element of a scalar op isn't used then use Arg0. 1467 if (!DemandedElts[0]) { 1468 Worklist.Add(II); 1469 return II->getArgOperand(0); 1470 } 1471 1472 // Only lower element is used for operand 1. 1473 DemandedElts = 1; 1474 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(1), DemandedElts, 1475 UndefElts2, Depth + 1); 1476 if (TmpV) { II->setArgOperand(1, TmpV); MadeChange = true; } 1477 1478 // Take the high undef elements from operand 0 and take the lower element 1479 // from operand 1. 1480 UndefElts.clearBit(0); 1481 UndefElts |= UndefElts2[0]; 1482 break; 1483 } 1484 1485 // Three input scalar-as-vector operations that work column-wise. The high 1486 // elements come from operand 0 and the low element is a function of all 1487 // three inputs. 1488 case Intrinsic::x86_avx512_mask_add_ss_round: 1489 case Intrinsic::x86_avx512_mask_div_ss_round: 1490 case Intrinsic::x86_avx512_mask_mul_ss_round: 1491 case Intrinsic::x86_avx512_mask_sub_ss_round: 1492 case Intrinsic::x86_avx512_mask_max_ss_round: 1493 case Intrinsic::x86_avx512_mask_min_ss_round: 1494 case Intrinsic::x86_avx512_mask_add_sd_round: 1495 case Intrinsic::x86_avx512_mask_div_sd_round: 1496 case Intrinsic::x86_avx512_mask_mul_sd_round: 1497 case Intrinsic::x86_avx512_mask_sub_sd_round: 1498 case Intrinsic::x86_avx512_mask_max_sd_round: 1499 case Intrinsic::x86_avx512_mask_min_sd_round: 1500 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(0), DemandedElts, 1501 UndefElts, Depth + 1); 1502 if (TmpV) { II->setArgOperand(0, TmpV); MadeChange = true; } 1503 1504 // If lowest element of a scalar op isn't used then use Arg0. 1505 if (!DemandedElts[0]) { 1506 Worklist.Add(II); 1507 return II->getArgOperand(0); 1508 } 1509 1510 // Only lower element is used for operand 1 and 2. 1511 DemandedElts = 1; 1512 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(1), DemandedElts, 1513 UndefElts2, Depth + 1); 1514 if (TmpV) { II->setArgOperand(1, TmpV); MadeChange = true; } 1515 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(2), DemandedElts, 1516 UndefElts3, Depth + 1); 1517 if (TmpV) { II->setArgOperand(2, TmpV); MadeChange = true; } 1518 1519 // Lower element is undefined if all three lower elements are undefined. 1520 // Consider things like undef&0. The result is known zero, not undef. 1521 if (!UndefElts2[0] || !UndefElts3[0]) 1522 UndefElts.clearBit(0); 1523 1524 break; 1525 1526 case Intrinsic::x86_sse2_packssdw_128: 1527 case Intrinsic::x86_sse2_packsswb_128: 1528 case Intrinsic::x86_sse2_packuswb_128: 1529 case Intrinsic::x86_sse41_packusdw: 1530 case Intrinsic::x86_avx2_packssdw: 1531 case Intrinsic::x86_avx2_packsswb: 1532 case Intrinsic::x86_avx2_packusdw: 1533 case Intrinsic::x86_avx2_packuswb: 1534 case Intrinsic::x86_avx512_packssdw_512: 1535 case Intrinsic::x86_avx512_packsswb_512: 1536 case Intrinsic::x86_avx512_packusdw_512: 1537 case Intrinsic::x86_avx512_packuswb_512: { 1538 auto *Ty0 = II->getArgOperand(0)->getType(); 1539 unsigned InnerVWidth = Ty0->getVectorNumElements(); 1540 assert(VWidth == (InnerVWidth * 2) && "Unexpected input size"); 1541 1542 unsigned NumLanes = Ty0->getPrimitiveSizeInBits() / 128; 1543 unsigned VWidthPerLane = VWidth / NumLanes; 1544 unsigned InnerVWidthPerLane = InnerVWidth / NumLanes; 1545 1546 // Per lane, pack the elements of the first input and then the second. 1547 // e.g. 1548 // v8i16 PACK(v4i32 X, v4i32 Y) - (X[0..3],Y[0..3]) 1549 // v32i8 PACK(v16i16 X, v16i16 Y) - (X[0..7],Y[0..7]),(X[8..15],Y[8..15]) 1550 for (int OpNum = 0; OpNum != 2; ++OpNum) { 1551 APInt OpDemandedElts(InnerVWidth, 0); 1552 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) { 1553 unsigned LaneIdx = Lane * VWidthPerLane; 1554 for (unsigned Elt = 0; Elt != InnerVWidthPerLane; ++Elt) { 1555 unsigned Idx = LaneIdx + Elt + InnerVWidthPerLane * OpNum; 1556 if (DemandedElts[Idx]) 1557 OpDemandedElts.setBit((Lane * InnerVWidthPerLane) + Elt); 1558 } 1559 } 1560 1561 // Demand elements from the operand. 1562 auto *Op = II->getArgOperand(OpNum); 1563 APInt OpUndefElts(InnerVWidth, 0); 1564 TmpV = SimplifyDemandedVectorElts(Op, OpDemandedElts, OpUndefElts, 1565 Depth + 1); 1566 if (TmpV) { 1567 II->setArgOperand(OpNum, TmpV); 1568 MadeChange = true; 1569 } 1570 1571 // Pack the operand's UNDEF elements, one lane at a time. 1572 OpUndefElts = OpUndefElts.zext(VWidth); 1573 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) { 1574 APInt LaneElts = OpUndefElts.lshr(InnerVWidthPerLane * Lane); 1575 LaneElts = LaneElts.getLoBits(InnerVWidthPerLane); 1576 LaneElts <<= InnerVWidthPerLane * (2 * Lane + OpNum); 1577 UndefElts |= LaneElts; 1578 } 1579 } 1580 break; 1581 } 1582 1583 // PSHUFB 1584 case Intrinsic::x86_ssse3_pshuf_b_128: 1585 case Intrinsic::x86_avx2_pshuf_b: 1586 case Intrinsic::x86_avx512_pshuf_b_512: 1587 // PERMILVAR 1588 case Intrinsic::x86_avx_vpermilvar_ps: 1589 case Intrinsic::x86_avx_vpermilvar_ps_256: 1590 case Intrinsic::x86_avx512_vpermilvar_ps_512: 1591 case Intrinsic::x86_avx_vpermilvar_pd: 1592 case Intrinsic::x86_avx_vpermilvar_pd_256: 1593 case Intrinsic::x86_avx512_vpermilvar_pd_512: 1594 // PERMV 1595 case Intrinsic::x86_avx2_permd: 1596 case Intrinsic::x86_avx2_permps: { 1597 Value *Op1 = II->getArgOperand(1); 1598 TmpV = SimplifyDemandedVectorElts(Op1, DemandedElts, UndefElts, 1599 Depth + 1); 1600 if (TmpV) { II->setArgOperand(1, TmpV); MadeChange = true; } 1601 break; 1602 } 1603 1604 // SSE4A instructions leave the upper 64-bits of the 128-bit result 1605 // in an undefined state. 1606 case Intrinsic::x86_sse4a_extrq: 1607 case Intrinsic::x86_sse4a_extrqi: 1608 case Intrinsic::x86_sse4a_insertq: 1609 case Intrinsic::x86_sse4a_insertqi: 1610 UndefElts.setHighBits(VWidth / 2); 1611 break; 1612 case Intrinsic::amdgcn_buffer_load: 1613 case Intrinsic::amdgcn_buffer_load_format: 1614 return simplifyAMDGCNMemoryIntrinsicDemanded(II, DemandedElts); 1615 default: { 1616 if (getAMDGPUImageDMaskIntrinsic(II->getIntrinsicID())) 1617 return simplifyAMDGCNMemoryIntrinsicDemanded(II, DemandedElts, 0); 1618 1619 break; 1620 } 1621 } 1622 break; 1623 } 1624 } 1625 return MadeChange ? I : nullptr; 1626 } 1627