1 //===- InstCombineInternal.h - InstCombine pass internals -------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 /// \file 11 /// 12 /// This file provides internal interfaces used to implement the InstCombine. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #ifndef LLVM_LIB_TRANSFORMS_INSTCOMBINE_INSTCOMBINEINTERNAL_H 17 #define LLVM_LIB_TRANSFORMS_INSTCOMBINE_INSTCOMBINEINTERNAL_H 18 19 #include "llvm/ADT/ArrayRef.h" 20 #include "llvm/Analysis/AliasAnalysis.h" 21 #include "llvm/Analysis/InstructionSimplify.h" 22 #include "llvm/Analysis/TargetFolder.h" 23 #include "llvm/Transforms/Utils/Local.h" 24 #include "llvm/Analysis/ValueTracking.h" 25 #include "llvm/IR/Argument.h" 26 #include "llvm/IR/BasicBlock.h" 27 #include "llvm/IR/Constant.h" 28 #include "llvm/IR/Constants.h" 29 #include "llvm/IR/DerivedTypes.h" 30 #include "llvm/IR/IRBuilder.h" 31 #include "llvm/IR/InstVisitor.h" 32 #include "llvm/IR/InstrTypes.h" 33 #include "llvm/IR/Instruction.h" 34 #include "llvm/IR/IntrinsicInst.h" 35 #include "llvm/IR/Intrinsics.h" 36 #include "llvm/IR/Use.h" 37 #include "llvm/IR/Value.h" 38 #include "llvm/Support/Casting.h" 39 #include "llvm/Support/Compiler.h" 40 #include "llvm/Support/Debug.h" 41 #include "llvm/Support/KnownBits.h" 42 #include "llvm/Support/raw_ostream.h" 43 #include "llvm/Transforms/InstCombine/InstCombineWorklist.h" 44 #include <cassert> 45 #include <cstdint> 46 47 #define DEBUG_TYPE "instcombine" 48 49 namespace llvm { 50 51 class APInt; 52 class AssumptionCache; 53 class CallSite; 54 class DataLayout; 55 class DominatorTree; 56 class GEPOperator; 57 class GlobalVariable; 58 class LoopInfo; 59 class OptimizationRemarkEmitter; 60 class TargetLibraryInfo; 61 class User; 62 63 /// Assign a complexity or rank value to LLVM Values. This is used to reduce 64 /// the amount of pattern matching needed for compares and commutative 65 /// instructions. For example, if we have: 66 /// icmp ugt X, Constant 67 /// or 68 /// xor (add X, Constant), cast Z 69 /// 70 /// We do not have to consider the commuted variants of these patterns because 71 /// canonicalization based on complexity guarantees the above ordering. 72 /// 73 /// This routine maps IR values to various complexity ranks: 74 /// 0 -> undef 75 /// 1 -> Constants 76 /// 2 -> Other non-instructions 77 /// 3 -> Arguments 78 /// 4 -> Cast and (f)neg/not instructions 79 /// 5 -> Other instructions 80 static inline unsigned getComplexity(Value *V) { 81 if (isa<Instruction>(V)) { 82 if (isa<CastInst>(V) || BinaryOperator::isNeg(V) || 83 BinaryOperator::isFNeg(V) || BinaryOperator::isNot(V)) 84 return 4; 85 return 5; 86 } 87 if (isa<Argument>(V)) 88 return 3; 89 return isa<Constant>(V) ? (isa<UndefValue>(V) ? 0 : 1) : 2; 90 } 91 92 /// Predicate canonicalization reduces the number of patterns that need to be 93 /// matched by other transforms. For example, we may swap the operands of a 94 /// conditional branch or select to create a compare with a canonical (inverted) 95 /// predicate which is then more likely to be matched with other values. 96 static inline bool isCanonicalPredicate(CmpInst::Predicate Pred) { 97 switch (Pred) { 98 case CmpInst::ICMP_NE: 99 case CmpInst::ICMP_ULE: 100 case CmpInst::ICMP_SLE: 101 case CmpInst::ICMP_UGE: 102 case CmpInst::ICMP_SGE: 103 // TODO: There are 16 FCMP predicates. Should others be (not) canonical? 104 case CmpInst::FCMP_ONE: 105 case CmpInst::FCMP_OLE: 106 case CmpInst::FCMP_OGE: 107 return false; 108 default: 109 return true; 110 } 111 } 112 113 /// Return the source operand of a potentially bitcasted value while optionally 114 /// checking if it has one use. If there is no bitcast or the one use check is 115 /// not met, return the input value itself. 116 static inline Value *peekThroughBitcast(Value *V, bool OneUseOnly = false) { 117 if (auto *BitCast = dyn_cast<BitCastInst>(V)) 118 if (!OneUseOnly || BitCast->hasOneUse()) 119 return BitCast->getOperand(0); 120 121 // V is not a bitcast or V has more than one use and OneUseOnly is true. 122 return V; 123 } 124 125 /// Add one to a Constant 126 static inline Constant *AddOne(Constant *C) { 127 return ConstantExpr::getAdd(C, ConstantInt::get(C->getType(), 1)); 128 } 129 130 /// Subtract one from a Constant 131 static inline Constant *SubOne(Constant *C) { 132 return ConstantExpr::getSub(C, ConstantInt::get(C->getType(), 1)); 133 } 134 135 /// Return true if the specified value is free to invert (apply ~ to). 136 /// This happens in cases where the ~ can be eliminated. If WillInvertAllUses 137 /// is true, work under the assumption that the caller intends to remove all 138 /// uses of V and only keep uses of ~V. 139 static inline bool IsFreeToInvert(Value *V, bool WillInvertAllUses) { 140 // ~(~(X)) -> X. 141 if (BinaryOperator::isNot(V)) 142 return true; 143 144 // Constants can be considered to be not'ed values. 145 if (isa<ConstantInt>(V)) 146 return true; 147 148 // A vector of constant integers can be inverted easily. 149 if (V->getType()->isVectorTy() && isa<Constant>(V)) { 150 unsigned NumElts = V->getType()->getVectorNumElements(); 151 for (unsigned i = 0; i != NumElts; ++i) { 152 Constant *Elt = cast<Constant>(V)->getAggregateElement(i); 153 if (!Elt) 154 return false; 155 156 if (isa<UndefValue>(Elt)) 157 continue; 158 159 if (!isa<ConstantInt>(Elt)) 160 return false; 161 } 162 return true; 163 } 164 165 // Compares can be inverted if all of their uses are being modified to use the 166 // ~V. 167 if (isa<CmpInst>(V)) 168 return WillInvertAllUses; 169 170 // If `V` is of the form `A + Constant` then `-1 - V` can be folded into `(-1 171 // - Constant) - A` if we are willing to invert all of the uses. 172 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(V)) 173 if (BO->getOpcode() == Instruction::Add || 174 BO->getOpcode() == Instruction::Sub) 175 if (isa<Constant>(BO->getOperand(0)) || isa<Constant>(BO->getOperand(1))) 176 return WillInvertAllUses; 177 178 return false; 179 } 180 181 /// Specific patterns of overflow check idioms that we match. 182 enum OverflowCheckFlavor { 183 OCF_UNSIGNED_ADD, 184 OCF_SIGNED_ADD, 185 OCF_UNSIGNED_SUB, 186 OCF_SIGNED_SUB, 187 OCF_UNSIGNED_MUL, 188 OCF_SIGNED_MUL, 189 190 OCF_INVALID 191 }; 192 193 /// Returns the OverflowCheckFlavor corresponding to a overflow_with_op 194 /// intrinsic. 195 static inline OverflowCheckFlavor 196 IntrinsicIDToOverflowCheckFlavor(unsigned ID) { 197 switch (ID) { 198 default: 199 return OCF_INVALID; 200 case Intrinsic::uadd_with_overflow: 201 return OCF_UNSIGNED_ADD; 202 case Intrinsic::sadd_with_overflow: 203 return OCF_SIGNED_ADD; 204 case Intrinsic::usub_with_overflow: 205 return OCF_UNSIGNED_SUB; 206 case Intrinsic::ssub_with_overflow: 207 return OCF_SIGNED_SUB; 208 case Intrinsic::umul_with_overflow: 209 return OCF_UNSIGNED_MUL; 210 case Intrinsic::smul_with_overflow: 211 return OCF_SIGNED_MUL; 212 } 213 } 214 215 /// Some binary operators require special handling to avoid poison and undefined 216 /// behavior. If a constant vector has undef elements, replace those undefs with 217 /// identity constants if possible because those are always safe to execute. 218 /// If no identity constant exists, replace undef with some other safe constant. 219 static inline Constant *getSafeVectorConstantForBinop( 220 BinaryOperator::BinaryOps Opcode, Constant *In, bool IsRHSConstant) { 221 assert(In->getType()->isVectorTy() && "Not expecting scalars here"); 222 223 Type *EltTy = In->getType()->getVectorElementType(); 224 auto *SafeC = ConstantExpr::getBinOpIdentity(Opcode, EltTy, IsRHSConstant); 225 if (!SafeC) { 226 // TODO: Should this be available as a constant utility function? It is 227 // similar to getBinOpAbsorber(). 228 if (IsRHSConstant) { 229 switch (Opcode) { 230 case Instruction::SRem: // X % 1 = 0 231 case Instruction::URem: // X %u 1 = 0 232 SafeC = ConstantInt::get(EltTy, 1); 233 break; 234 case Instruction::FRem: // X % 1.0 (doesn't simplify, but it is safe) 235 SafeC = ConstantFP::get(EltTy, 1.0); 236 break; 237 default: 238 llvm_unreachable("Only rem opcodes have no identity constant for RHS"); 239 } 240 } else { 241 switch (Opcode) { 242 case Instruction::Shl: // 0 << X = 0 243 case Instruction::LShr: // 0 >>u X = 0 244 case Instruction::AShr: // 0 >> X = 0 245 case Instruction::SDiv: // 0 / X = 0 246 case Instruction::UDiv: // 0 /u X = 0 247 case Instruction::SRem: // 0 % X = 0 248 case Instruction::URem: // 0 %u X = 0 249 case Instruction::Sub: // 0 - X (doesn't simplify, but it is safe) 250 case Instruction::FSub: // 0.0 - X (doesn't simplify, but it is safe) 251 case Instruction::FDiv: // 0.0 / X (doesn't simplify, but it is safe) 252 case Instruction::FRem: // 0.0 % X = 0 253 SafeC = Constant::getNullValue(EltTy); 254 break; 255 default: 256 llvm_unreachable("Expected to find identity constant for opcode"); 257 } 258 } 259 } 260 assert(SafeC && "Must have safe constant for binop"); 261 unsigned NumElts = In->getType()->getVectorNumElements(); 262 SmallVector<Constant *, 16> Out(NumElts); 263 for (unsigned i = 0; i != NumElts; ++i) { 264 Constant *C = In->getAggregateElement(i); 265 Out[i] = isa<UndefValue>(C) ? SafeC : C; 266 } 267 return ConstantVector::get(Out); 268 } 269 270 /// The core instruction combiner logic. 271 /// 272 /// This class provides both the logic to recursively visit instructions and 273 /// combine them. 274 class LLVM_LIBRARY_VISIBILITY InstCombiner 275 : public InstVisitor<InstCombiner, Instruction *> { 276 // FIXME: These members shouldn't be public. 277 public: 278 /// A worklist of the instructions that need to be simplified. 279 InstCombineWorklist &Worklist; 280 281 /// An IRBuilder that automatically inserts new instructions into the 282 /// worklist. 283 using BuilderTy = IRBuilder<TargetFolder, IRBuilderCallbackInserter>; 284 BuilderTy &Builder; 285 286 private: 287 // Mode in which we are running the combiner. 288 const bool MinimizeSize; 289 290 /// Enable combines that trigger rarely but are costly in compiletime. 291 const bool ExpensiveCombines; 292 293 AliasAnalysis *AA; 294 295 // Required analyses. 296 AssumptionCache &AC; 297 TargetLibraryInfo &TLI; 298 DominatorTree &DT; 299 const DataLayout &DL; 300 const SimplifyQuery SQ; 301 OptimizationRemarkEmitter &ORE; 302 303 // Optional analyses. When non-null, these can both be used to do better 304 // combining and will be updated to reflect any changes. 305 LoopInfo *LI; 306 307 bool MadeIRChange = false; 308 309 public: 310 InstCombiner(InstCombineWorklist &Worklist, BuilderTy &Builder, 311 bool MinimizeSize, bool ExpensiveCombines, AliasAnalysis *AA, 312 AssumptionCache &AC, TargetLibraryInfo &TLI, DominatorTree &DT, 313 OptimizationRemarkEmitter &ORE, const DataLayout &DL, 314 LoopInfo *LI) 315 : Worklist(Worklist), Builder(Builder), MinimizeSize(MinimizeSize), 316 ExpensiveCombines(ExpensiveCombines), AA(AA), AC(AC), TLI(TLI), DT(DT), 317 DL(DL), SQ(DL, &TLI, &DT, &AC), ORE(ORE), LI(LI) {} 318 319 /// Run the combiner over the entire worklist until it is empty. 320 /// 321 /// \returns true if the IR is changed. 322 bool run(); 323 324 AssumptionCache &getAssumptionCache() const { return AC; } 325 326 const DataLayout &getDataLayout() const { return DL; } 327 328 DominatorTree &getDominatorTree() const { return DT; } 329 330 LoopInfo *getLoopInfo() const { return LI; } 331 332 TargetLibraryInfo &getTargetLibraryInfo() const { return TLI; } 333 334 // Visitation implementation - Implement instruction combining for different 335 // instruction types. The semantics are as follows: 336 // Return Value: 337 // null - No change was made 338 // I - Change was made, I is still valid, I may be dead though 339 // otherwise - Change was made, replace I with returned instruction 340 // 341 Instruction *visitAdd(BinaryOperator &I); 342 Instruction *visitFAdd(BinaryOperator &I); 343 Value *OptimizePointerDifference(Value *LHS, Value *RHS, Type *Ty); 344 Instruction *visitSub(BinaryOperator &I); 345 Instruction *visitFSub(BinaryOperator &I); 346 Instruction *visitMul(BinaryOperator &I); 347 Instruction *visitFMul(BinaryOperator &I); 348 Instruction *visitURem(BinaryOperator &I); 349 Instruction *visitSRem(BinaryOperator &I); 350 Instruction *visitFRem(BinaryOperator &I); 351 bool simplifyDivRemOfSelectWithZeroOp(BinaryOperator &I); 352 Instruction *commonRemTransforms(BinaryOperator &I); 353 Instruction *commonIRemTransforms(BinaryOperator &I); 354 Instruction *commonDivTransforms(BinaryOperator &I); 355 Instruction *commonIDivTransforms(BinaryOperator &I); 356 Instruction *visitUDiv(BinaryOperator &I); 357 Instruction *visitSDiv(BinaryOperator &I); 358 Instruction *visitFDiv(BinaryOperator &I); 359 Value *simplifyRangeCheck(ICmpInst *Cmp0, ICmpInst *Cmp1, bool Inverted); 360 Instruction *visitAnd(BinaryOperator &I); 361 Instruction *visitOr(BinaryOperator &I); 362 Instruction *visitXor(BinaryOperator &I); 363 Instruction *visitShl(BinaryOperator &I); 364 Instruction *visitAShr(BinaryOperator &I); 365 Instruction *visitLShr(BinaryOperator &I); 366 Instruction *commonShiftTransforms(BinaryOperator &I); 367 Instruction *visitFCmpInst(FCmpInst &I); 368 Instruction *visitICmpInst(ICmpInst &I); 369 Instruction *FoldShiftByConstant(Value *Op0, Constant *Op1, 370 BinaryOperator &I); 371 Instruction *commonCastTransforms(CastInst &CI); 372 Instruction *commonPointerCastTransforms(CastInst &CI); 373 Instruction *visitTrunc(TruncInst &CI); 374 Instruction *visitZExt(ZExtInst &CI); 375 Instruction *visitSExt(SExtInst &CI); 376 Instruction *visitFPTrunc(FPTruncInst &CI); 377 Instruction *visitFPExt(CastInst &CI); 378 Instruction *visitFPToUI(FPToUIInst &FI); 379 Instruction *visitFPToSI(FPToSIInst &FI); 380 Instruction *visitUIToFP(CastInst &CI); 381 Instruction *visitSIToFP(CastInst &CI); 382 Instruction *visitPtrToInt(PtrToIntInst &CI); 383 Instruction *visitIntToPtr(IntToPtrInst &CI); 384 Instruction *visitBitCast(BitCastInst &CI); 385 Instruction *visitAddrSpaceCast(AddrSpaceCastInst &CI); 386 Instruction *FoldItoFPtoI(Instruction &FI); 387 Instruction *visitSelectInst(SelectInst &SI); 388 Instruction *visitCallInst(CallInst &CI); 389 Instruction *visitInvokeInst(InvokeInst &II); 390 391 Instruction *SliceUpIllegalIntegerPHI(PHINode &PN); 392 Instruction *visitPHINode(PHINode &PN); 393 Instruction *visitGetElementPtrInst(GetElementPtrInst &GEP); 394 Instruction *visitAllocaInst(AllocaInst &AI); 395 Instruction *visitAllocSite(Instruction &FI); 396 Instruction *visitFree(CallInst &FI); 397 Instruction *visitLoadInst(LoadInst &LI); 398 Instruction *visitStoreInst(StoreInst &SI); 399 Instruction *visitBranchInst(BranchInst &BI); 400 Instruction *visitFenceInst(FenceInst &FI); 401 Instruction *visitSwitchInst(SwitchInst &SI); 402 Instruction *visitReturnInst(ReturnInst &RI); 403 Instruction *visitInsertValueInst(InsertValueInst &IV); 404 Instruction *visitInsertElementInst(InsertElementInst &IE); 405 Instruction *visitExtractElementInst(ExtractElementInst &EI); 406 Instruction *visitShuffleVectorInst(ShuffleVectorInst &SVI); 407 Instruction *visitExtractValueInst(ExtractValueInst &EV); 408 Instruction *visitLandingPadInst(LandingPadInst &LI); 409 Instruction *visitVAStartInst(VAStartInst &I); 410 Instruction *visitVACopyInst(VACopyInst &I); 411 412 /// Specify what to return for unhandled instructions. 413 Instruction *visitInstruction(Instruction &I) { return nullptr; } 414 415 /// True when DB dominates all uses of DI except UI. 416 /// UI must be in the same block as DI. 417 /// The routine checks that the DI parent and DB are different. 418 bool dominatesAllUses(const Instruction *DI, const Instruction *UI, 419 const BasicBlock *DB) const; 420 421 /// Try to replace select with select operand SIOpd in SI-ICmp sequence. 422 bool replacedSelectWithOperand(SelectInst *SI, const ICmpInst *Icmp, 423 const unsigned SIOpd); 424 425 /// Try to replace instruction \p I with value \p V which are pointers 426 /// in different address space. 427 /// \return true if successful. 428 bool replacePointer(Instruction &I, Value *V); 429 430 private: 431 bool shouldChangeType(unsigned FromBitWidth, unsigned ToBitWidth) const; 432 bool shouldChangeType(Type *From, Type *To) const; 433 Value *dyn_castNegVal(Value *V) const; 434 Type *FindElementAtOffset(PointerType *PtrTy, int64_t Offset, 435 SmallVectorImpl<Value *> &NewIndices); 436 437 /// Classify whether a cast is worth optimizing. 438 /// 439 /// This is a helper to decide whether the simplification of 440 /// logic(cast(A), cast(B)) to cast(logic(A, B)) should be performed. 441 /// 442 /// \param CI The cast we are interested in. 443 /// 444 /// \return true if this cast actually results in any code being generated and 445 /// if it cannot already be eliminated by some other transformation. 446 bool shouldOptimizeCast(CastInst *CI); 447 448 /// Try to optimize a sequence of instructions checking if an operation 449 /// on LHS and RHS overflows. 450 /// 451 /// If this overflow check is done via one of the overflow check intrinsics, 452 /// then CtxI has to be the call instruction calling that intrinsic. If this 453 /// overflow check is done by arithmetic followed by a compare, then CtxI has 454 /// to be the arithmetic instruction. 455 /// 456 /// If a simplification is possible, stores the simplified result of the 457 /// operation in OperationResult and result of the overflow check in 458 /// OverflowResult, and return true. If no simplification is possible, 459 /// returns false. 460 bool OptimizeOverflowCheck(OverflowCheckFlavor OCF, Value *LHS, Value *RHS, 461 Instruction &CtxI, Value *&OperationResult, 462 Constant *&OverflowResult); 463 464 Instruction *visitCallSite(CallSite CS); 465 Instruction *tryOptimizeCall(CallInst *CI); 466 bool transformConstExprCastCall(CallSite CS); 467 Instruction *transformCallThroughTrampoline(CallSite CS, 468 IntrinsicInst *Tramp); 469 470 /// Transform (zext icmp) to bitwise / integer operations in order to 471 /// eliminate it. 472 /// 473 /// \param ICI The icmp of the (zext icmp) pair we are interested in. 474 /// \parem CI The zext of the (zext icmp) pair we are interested in. 475 /// \param DoTransform Pass false to just test whether the given (zext icmp) 476 /// would be transformed. Pass true to actually perform the transformation. 477 /// 478 /// \return null if the transformation cannot be performed. If the 479 /// transformation can be performed the new instruction that replaces the 480 /// (zext icmp) pair will be returned (if \p DoTransform is false the 481 /// unmodified \p ICI will be returned in this case). 482 Instruction *transformZExtICmp(ICmpInst *ICI, ZExtInst &CI, 483 bool DoTransform = true); 484 485 Instruction *transformSExtICmp(ICmpInst *ICI, Instruction &CI); 486 487 bool willNotOverflowSignedAdd(const Value *LHS, const Value *RHS, 488 const Instruction &CxtI) const { 489 return computeOverflowForSignedAdd(LHS, RHS, &CxtI) == 490 OverflowResult::NeverOverflows; 491 } 492 493 bool willNotOverflowUnsignedAdd(const Value *LHS, const Value *RHS, 494 const Instruction &CxtI) const { 495 return computeOverflowForUnsignedAdd(LHS, RHS, &CxtI) == 496 OverflowResult::NeverOverflows; 497 } 498 499 bool willNotOverflowSignedSub(const Value *LHS, const Value *RHS, 500 const Instruction &CxtI) const { 501 return computeOverflowForSignedSub(LHS, RHS, &CxtI) == 502 OverflowResult::NeverOverflows; 503 } 504 505 bool willNotOverflowUnsignedSub(const Value *LHS, const Value *RHS, 506 const Instruction &CxtI) const { 507 return computeOverflowForUnsignedSub(LHS, RHS, &CxtI) == 508 OverflowResult::NeverOverflows; 509 } 510 511 bool willNotOverflowSignedMul(const Value *LHS, const Value *RHS, 512 const Instruction &CxtI) const { 513 return computeOverflowForSignedMul(LHS, RHS, &CxtI) == 514 OverflowResult::NeverOverflows; 515 } 516 517 bool willNotOverflowUnsignedMul(const Value *LHS, const Value *RHS, 518 const Instruction &CxtI) const { 519 return computeOverflowForUnsignedMul(LHS, RHS, &CxtI) == 520 OverflowResult::NeverOverflows; 521 } 522 523 Value *EmitGEPOffset(User *GEP); 524 Instruction *scalarizePHI(ExtractElementInst &EI, PHINode *PN); 525 Value *EvaluateInDifferentElementOrder(Value *V, ArrayRef<int> Mask); 526 Instruction *foldCastedBitwiseLogic(BinaryOperator &I); 527 Instruction *narrowBinOp(TruncInst &Trunc); 528 Instruction *narrowMaskedBinOp(BinaryOperator &And); 529 Instruction *narrowRotate(TruncInst &Trunc); 530 Instruction *optimizeBitCastFromPhi(CastInst &CI, PHINode *PN); 531 532 /// Determine if a pair of casts can be replaced by a single cast. 533 /// 534 /// \param CI1 The first of a pair of casts. 535 /// \param CI2 The second of a pair of casts. 536 /// 537 /// \return 0 if the cast pair cannot be eliminated, otherwise returns an 538 /// Instruction::CastOps value for a cast that can replace the pair, casting 539 /// CI1->getSrcTy() to CI2->getDstTy(). 540 /// 541 /// \see CastInst::isEliminableCastPair 542 Instruction::CastOps isEliminableCastPair(const CastInst *CI1, 543 const CastInst *CI2); 544 545 Value *foldAndOfICmps(ICmpInst *LHS, ICmpInst *RHS, Instruction &CxtI); 546 Value *foldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS, Instruction &CxtI); 547 Value *foldXorOfICmps(ICmpInst *LHS, ICmpInst *RHS); 548 549 /// Optimize (fcmp)&(fcmp) or (fcmp)|(fcmp). 550 /// NOTE: Unlike most of instcombine, this returns a Value which should 551 /// already be inserted into the function. 552 Value *foldLogicOfFCmps(FCmpInst *LHS, FCmpInst *RHS, bool IsAnd); 553 554 Value *foldAndOrOfICmpsOfAndWithPow2(ICmpInst *LHS, ICmpInst *RHS, 555 bool JoinedByAnd, Instruction &CxtI); 556 public: 557 /// Inserts an instruction \p New before instruction \p Old 558 /// 559 /// Also adds the new instruction to the worklist and returns \p New so that 560 /// it is suitable for use as the return from the visitation patterns. 561 Instruction *InsertNewInstBefore(Instruction *New, Instruction &Old) { 562 assert(New && !New->getParent() && 563 "New instruction already inserted into a basic block!"); 564 BasicBlock *BB = Old.getParent(); 565 BB->getInstList().insert(Old.getIterator(), New); // Insert inst 566 Worklist.Add(New); 567 return New; 568 } 569 570 /// Same as InsertNewInstBefore, but also sets the debug loc. 571 Instruction *InsertNewInstWith(Instruction *New, Instruction &Old) { 572 New->setDebugLoc(Old.getDebugLoc()); 573 return InsertNewInstBefore(New, Old); 574 } 575 576 /// A combiner-aware RAUW-like routine. 577 /// 578 /// This method is to be used when an instruction is found to be dead, 579 /// replaceable with another preexisting expression. Here we add all uses of 580 /// I to the worklist, replace all uses of I with the new value, then return 581 /// I, so that the inst combiner will know that I was modified. 582 Instruction *replaceInstUsesWith(Instruction &I, Value *V) { 583 // If there are no uses to replace, then we return nullptr to indicate that 584 // no changes were made to the program. 585 if (I.use_empty()) return nullptr; 586 587 Worklist.AddUsersToWorkList(I); // Add all modified instrs to worklist. 588 589 // If we are replacing the instruction with itself, this must be in a 590 // segment of unreachable code, so just clobber the instruction. 591 if (&I == V) 592 V = UndefValue::get(I.getType()); 593 594 LLVM_DEBUG(dbgs() << "IC: Replacing " << I << "\n" 595 << " with " << *V << '\n'); 596 597 I.replaceAllUsesWith(V); 598 return &I; 599 } 600 601 /// Creates a result tuple for an overflow intrinsic \p II with a given 602 /// \p Result and a constant \p Overflow value. 603 Instruction *CreateOverflowTuple(IntrinsicInst *II, Value *Result, 604 Constant *Overflow) { 605 Constant *V[] = {UndefValue::get(Result->getType()), Overflow}; 606 StructType *ST = cast<StructType>(II->getType()); 607 Constant *Struct = ConstantStruct::get(ST, V); 608 return InsertValueInst::Create(Struct, Result, 0); 609 } 610 611 /// Combiner aware instruction erasure. 612 /// 613 /// When dealing with an instruction that has side effects or produces a void 614 /// value, we can't rely on DCE to delete the instruction. Instead, visit 615 /// methods should return the value returned by this function. 616 Instruction *eraseInstFromFunction(Instruction &I) { 617 LLVM_DEBUG(dbgs() << "IC: ERASE " << I << '\n'); 618 assert(I.use_empty() && "Cannot erase instruction that is used!"); 619 salvageDebugInfo(I); 620 621 // Make sure that we reprocess all operands now that we reduced their 622 // use counts. 623 if (I.getNumOperands() < 8) { 624 for (Use &Operand : I.operands()) 625 if (auto *Inst = dyn_cast<Instruction>(Operand)) 626 Worklist.Add(Inst); 627 } 628 Worklist.Remove(&I); 629 I.eraseFromParent(); 630 MadeIRChange = true; 631 return nullptr; // Don't do anything with FI 632 } 633 634 void computeKnownBits(const Value *V, KnownBits &Known, 635 unsigned Depth, const Instruction *CxtI) const { 636 llvm::computeKnownBits(V, Known, DL, Depth, &AC, CxtI, &DT); 637 } 638 639 KnownBits computeKnownBits(const Value *V, unsigned Depth, 640 const Instruction *CxtI) const { 641 return llvm::computeKnownBits(V, DL, Depth, &AC, CxtI, &DT); 642 } 643 644 bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero = false, 645 unsigned Depth = 0, 646 const Instruction *CxtI = nullptr) { 647 return llvm::isKnownToBeAPowerOfTwo(V, DL, OrZero, Depth, &AC, CxtI, &DT); 648 } 649 650 bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth = 0, 651 const Instruction *CxtI = nullptr) const { 652 return llvm::MaskedValueIsZero(V, Mask, DL, Depth, &AC, CxtI, &DT); 653 } 654 655 unsigned ComputeNumSignBits(const Value *Op, unsigned Depth = 0, 656 const Instruction *CxtI = nullptr) const { 657 return llvm::ComputeNumSignBits(Op, DL, Depth, &AC, CxtI, &DT); 658 } 659 660 OverflowResult computeOverflowForUnsignedMul(const Value *LHS, 661 const Value *RHS, 662 const Instruction *CxtI) const { 663 return llvm::computeOverflowForUnsignedMul(LHS, RHS, DL, &AC, CxtI, &DT); 664 } 665 666 OverflowResult computeOverflowForSignedMul(const Value *LHS, 667 const Value *RHS, 668 const Instruction *CxtI) const { 669 return llvm::computeOverflowForSignedMul(LHS, RHS, DL, &AC, CxtI, &DT); 670 } 671 672 OverflowResult computeOverflowForUnsignedAdd(const Value *LHS, 673 const Value *RHS, 674 const Instruction *CxtI) const { 675 return llvm::computeOverflowForUnsignedAdd(LHS, RHS, DL, &AC, CxtI, &DT); 676 } 677 678 OverflowResult computeOverflowForSignedAdd(const Value *LHS, 679 const Value *RHS, 680 const Instruction *CxtI) const { 681 return llvm::computeOverflowForSignedAdd(LHS, RHS, DL, &AC, CxtI, &DT); 682 } 683 684 OverflowResult computeOverflowForUnsignedSub(const Value *LHS, 685 const Value *RHS, 686 const Instruction *CxtI) const { 687 return llvm::computeOverflowForUnsignedSub(LHS, RHS, DL, &AC, CxtI, &DT); 688 } 689 690 OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS, 691 const Instruction *CxtI) const { 692 return llvm::computeOverflowForSignedSub(LHS, RHS, DL, &AC, CxtI, &DT); 693 } 694 695 /// Maximum size of array considered when transforming. 696 uint64_t MaxArraySizeForCombine; 697 698 private: 699 /// Performs a few simplifications for operators which are associative 700 /// or commutative. 701 bool SimplifyAssociativeOrCommutative(BinaryOperator &I); 702 703 /// Tries to simplify binary operations which some other binary 704 /// operation distributes over. 705 /// 706 /// It does this by either by factorizing out common terms (eg "(A*B)+(A*C)" 707 /// -> "A*(B+C)") or expanding out if this results in simplifications (eg: "A 708 /// & (B | C) -> (A&B) | (A&C)" if this is a win). Returns the simplified 709 /// value, or null if it didn't simplify. 710 Value *SimplifyUsingDistributiveLaws(BinaryOperator &I); 711 712 /// Tries to simplify add operations using the definition of remainder. 713 /// 714 /// The definition of remainder is X % C = X - (X / C ) * C. The add 715 /// expression X % C0 + (( X / C0 ) % C1) * C0 can be simplified to 716 /// X % (C0 * C1) 717 Value *SimplifyAddWithRemainder(BinaryOperator &I); 718 719 // Binary Op helper for select operations where the expression can be 720 // efficiently reorganized. 721 Value *SimplifySelectsFeedingBinaryOp(BinaryOperator &I, Value *LHS, 722 Value *RHS); 723 724 /// This tries to simplify binary operations by factorizing out common terms 725 /// (e. g. "(A*B)+(A*C)" -> "A*(B+C)"). 726 Value *tryFactorization(BinaryOperator &, Instruction::BinaryOps, Value *, 727 Value *, Value *, Value *); 728 729 /// Match a select chain which produces one of three values based on whether 730 /// the LHS is less than, equal to, or greater than RHS respectively. 731 /// Return true if we matched a three way compare idiom. The LHS, RHS, Less, 732 /// Equal and Greater values are saved in the matching process and returned to 733 /// the caller. 734 bool matchThreeWayIntCompare(SelectInst *SI, Value *&LHS, Value *&RHS, 735 ConstantInt *&Less, ConstantInt *&Equal, 736 ConstantInt *&Greater); 737 738 /// Attempts to replace V with a simpler value based on the demanded 739 /// bits. 740 Value *SimplifyDemandedUseBits(Value *V, APInt DemandedMask, KnownBits &Known, 741 unsigned Depth, Instruction *CxtI); 742 bool SimplifyDemandedBits(Instruction *I, unsigned Op, 743 const APInt &DemandedMask, KnownBits &Known, 744 unsigned Depth = 0); 745 746 /// Helper routine of SimplifyDemandedUseBits. It computes KnownZero/KnownOne 747 /// bits. It also tries to handle simplifications that can be done based on 748 /// DemandedMask, but without modifying the Instruction. 749 Value *SimplifyMultipleUseDemandedBits(Instruction *I, 750 const APInt &DemandedMask, 751 KnownBits &Known, 752 unsigned Depth, Instruction *CxtI); 753 754 /// Helper routine of SimplifyDemandedUseBits. It tries to simplify demanded 755 /// bit for "r1 = shr x, c1; r2 = shl r1, c2" instruction sequence. 756 Value *simplifyShrShlDemandedBits( 757 Instruction *Shr, const APInt &ShrOp1, Instruction *Shl, 758 const APInt &ShlOp1, const APInt &DemandedMask, KnownBits &Known); 759 760 /// Tries to simplify operands to an integer instruction based on its 761 /// demanded bits. 762 bool SimplifyDemandedInstructionBits(Instruction &Inst); 763 764 Value *simplifyAMDGCNMemoryIntrinsicDemanded(IntrinsicInst *II, 765 APInt DemandedElts, 766 int DmaskIdx = -1); 767 768 Value *SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, 769 APInt &UndefElts, unsigned Depth = 0); 770 771 /// Canonicalize the position of binops relative to shufflevector. 772 Instruction *foldShuffledBinop(BinaryOperator &Inst); 773 774 /// Given a binary operator, cast instruction, or select which has a PHI node 775 /// as operand #0, see if we can fold the instruction into the PHI (which is 776 /// only possible if all operands to the PHI are constants). 777 Instruction *foldOpIntoPhi(Instruction &I, PHINode *PN); 778 779 /// Given an instruction with a select as one operand and a constant as the 780 /// other operand, try to fold the binary operator into the select arguments. 781 /// This also works for Cast instructions, which obviously do not have a 782 /// second operand. 783 Instruction *FoldOpIntoSelect(Instruction &Op, SelectInst *SI); 784 785 /// This is a convenience wrapper function for the above two functions. 786 Instruction *foldBinOpIntoSelectOrPhi(BinaryOperator &I); 787 788 Instruction *foldAddWithConstant(BinaryOperator &Add); 789 790 /// Try to rotate an operation below a PHI node, using PHI nodes for 791 /// its operands. 792 Instruction *FoldPHIArgOpIntoPHI(PHINode &PN); 793 Instruction *FoldPHIArgBinOpIntoPHI(PHINode &PN); 794 Instruction *FoldPHIArgGEPIntoPHI(PHINode &PN); 795 Instruction *FoldPHIArgLoadIntoPHI(PHINode &PN); 796 Instruction *FoldPHIArgZextsIntoPHI(PHINode &PN); 797 798 /// If an integer typed PHI has only one use which is an IntToPtr operation, 799 /// replace the PHI with an existing pointer typed PHI if it exists. Otherwise 800 /// insert a new pointer typed PHI and replace the original one. 801 Instruction *FoldIntegerTypedPHI(PHINode &PN); 802 803 /// Helper function for FoldPHIArgXIntoPHI() to set debug location for the 804 /// folded operation. 805 void PHIArgMergedDebugLoc(Instruction *Inst, PHINode &PN); 806 807 Instruction *foldGEPICmp(GEPOperator *GEPLHS, Value *RHS, 808 ICmpInst::Predicate Cond, Instruction &I); 809 Instruction *foldAllocaCmp(ICmpInst &ICI, const AllocaInst *Alloca, 810 const Value *Other); 811 Instruction *foldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, 812 GlobalVariable *GV, CmpInst &ICI, 813 ConstantInt *AndCst = nullptr); 814 Instruction *foldFCmpIntToFPConst(FCmpInst &I, Instruction *LHSI, 815 Constant *RHSC); 816 Instruction *foldICmpAddOpConst(Value *X, ConstantInt *CI, 817 ICmpInst::Predicate Pred); 818 Instruction *foldICmpWithCastAndCast(ICmpInst &ICI); 819 820 Instruction *foldICmpUsingKnownBits(ICmpInst &Cmp); 821 Instruction *foldICmpWithConstant(ICmpInst &Cmp); 822 Instruction *foldICmpInstWithConstant(ICmpInst &Cmp); 823 Instruction *foldICmpInstWithConstantNotInt(ICmpInst &Cmp); 824 Instruction *foldICmpBinOp(ICmpInst &Cmp); 825 Instruction *foldICmpEquality(ICmpInst &Cmp); 826 Instruction *foldICmpWithZero(ICmpInst &Cmp); 827 828 Instruction *foldICmpSelectConstant(ICmpInst &Cmp, SelectInst *Select, 829 ConstantInt *C); 830 Instruction *foldICmpBitCastConstant(ICmpInst &Cmp, BitCastInst *Bitcast, 831 const APInt &C); 832 Instruction *foldICmpTruncConstant(ICmpInst &Cmp, TruncInst *Trunc, 833 const APInt &C); 834 Instruction *foldICmpAndConstant(ICmpInst &Cmp, BinaryOperator *And, 835 const APInt &C); 836 Instruction *foldICmpXorConstant(ICmpInst &Cmp, BinaryOperator *Xor, 837 const APInt &C); 838 Instruction *foldICmpOrConstant(ICmpInst &Cmp, BinaryOperator *Or, 839 const APInt &C); 840 Instruction *foldICmpMulConstant(ICmpInst &Cmp, BinaryOperator *Mul, 841 const APInt &C); 842 Instruction *foldICmpShlConstant(ICmpInst &Cmp, BinaryOperator *Shl, 843 const APInt &C); 844 Instruction *foldICmpShrConstant(ICmpInst &Cmp, BinaryOperator *Shr, 845 const APInt &C); 846 Instruction *foldICmpUDivConstant(ICmpInst &Cmp, BinaryOperator *UDiv, 847 const APInt &C); 848 Instruction *foldICmpDivConstant(ICmpInst &Cmp, BinaryOperator *Div, 849 const APInt &C); 850 Instruction *foldICmpSubConstant(ICmpInst &Cmp, BinaryOperator *Sub, 851 const APInt &C); 852 Instruction *foldICmpAddConstant(ICmpInst &Cmp, BinaryOperator *Add, 853 const APInt &C); 854 Instruction *foldICmpAndConstConst(ICmpInst &Cmp, BinaryOperator *And, 855 const APInt &C1); 856 Instruction *foldICmpAndShift(ICmpInst &Cmp, BinaryOperator *And, 857 const APInt &C1, const APInt &C2); 858 Instruction *foldICmpShrConstConst(ICmpInst &I, Value *ShAmt, const APInt &C1, 859 const APInt &C2); 860 Instruction *foldICmpShlConstConst(ICmpInst &I, Value *ShAmt, const APInt &C1, 861 const APInt &C2); 862 863 Instruction *foldICmpBinOpEqualityWithConstant(ICmpInst &Cmp, 864 BinaryOperator *BO, 865 const APInt &C); 866 Instruction *foldICmpIntrinsicWithConstant(ICmpInst &ICI, const APInt &C); 867 868 // Helpers of visitSelectInst(). 869 Instruction *foldSelectExtConst(SelectInst &Sel); 870 Instruction *foldSelectOpOp(SelectInst &SI, Instruction *TI, Instruction *FI); 871 Instruction *foldSelectIntoOp(SelectInst &SI, Value *, Value *); 872 Instruction *foldSPFofSPF(Instruction *Inner, SelectPatternFlavor SPF1, 873 Value *A, Value *B, Instruction &Outer, 874 SelectPatternFlavor SPF2, Value *C); 875 Instruction *foldSelectInstWithICmp(SelectInst &SI, ICmpInst *ICI); 876 877 Instruction *OptAndOp(BinaryOperator *Op, ConstantInt *OpRHS, 878 ConstantInt *AndRHS, BinaryOperator &TheAnd); 879 880 Value *insertRangeTest(Value *V, const APInt &Lo, const APInt &Hi, 881 bool isSigned, bool Inside); 882 Instruction *PromoteCastOfAllocation(BitCastInst &CI, AllocaInst &AI); 883 Instruction *MatchBSwap(BinaryOperator &I); 884 bool SimplifyStoreAtEndOfBlock(StoreInst &SI); 885 886 Instruction *SimplifyAnyMemTransfer(AnyMemTransferInst *MI); 887 Instruction *SimplifyAnyMemSet(AnyMemSetInst *MI); 888 889 Value *EvaluateInDifferentType(Value *V, Type *Ty, bool isSigned); 890 891 /// Returns a value X such that Val = X * Scale, or null if none. 892 /// 893 /// If the multiplication is known not to overflow then NoSignedWrap is set. 894 Value *Descale(Value *Val, APInt Scale, bool &NoSignedWrap); 895 }; 896 897 } // end namespace llvm 898 899 #undef DEBUG_TYPE 900 901 #endif // LLVM_LIB_TRANSFORMS_INSTCOMBINE_INSTCOMBINEINTERNAL_H 902