Home | History | Annotate | Download | only in Analysis
      1 //===- llvm/Analysis/ValueTracking.h - Walk computations --------*- C++ -*-===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file contains routines that help analyze properties that chains of
     11 // computations have.
     12 //
     13 //===----------------------------------------------------------------------===//
     14 
     15 #ifndef LLVM_ANALYSIS_VALUETRACKING_H
     16 #define LLVM_ANALYSIS_VALUETRACKING_H
     17 
     18 #include "llvm/IR/CallSite.h"
     19 #include "llvm/IR/Instruction.h"
     20 #include "llvm/IR/IntrinsicInst.h"
     21 #include "llvm/Support/DataTypes.h"
     22 
     23 namespace llvm {
     24 template <typename T> class ArrayRef;
     25   class APInt;
     26   class AddOperator;
     27   class AssumptionCache;
     28   class DataLayout;
     29   class DominatorTree;
     30   class GEPOperator;
     31   class Instruction;
     32   struct KnownBits;
     33   class Loop;
     34   class LoopInfo;
     35   class OptimizationRemarkEmitter;
     36   class MDNode;
     37   class StringRef;
     38   class TargetLibraryInfo;
     39   class Value;
     40 
     41   namespace Intrinsic {
     42   enum ID : unsigned;
     43   }
     44 
     45   /// Determine which bits of V are known to be either zero or one and return
     46   /// them in the KnownZero/KnownOne bit sets.
     47   ///
     48   /// This function is defined on values with integer type, values with pointer
     49   /// type, and vectors of integers.  In the case
     50   /// where V is a vector, the known zero and known one values are the
     51   /// same width as the vector element, and the bit is set only if it is true
     52   /// for all of the elements in the vector.
     53   void computeKnownBits(const Value *V, KnownBits &Known,
     54                         const DataLayout &DL, unsigned Depth = 0,
     55                         AssumptionCache *AC = nullptr,
     56                         const Instruction *CxtI = nullptr,
     57                         const DominatorTree *DT = nullptr,
     58                         OptimizationRemarkEmitter *ORE = nullptr);
     59   /// Returns the known bits rather than passing by reference.
     60   KnownBits computeKnownBits(const Value *V, const DataLayout &DL,
     61                              unsigned Depth = 0, AssumptionCache *AC = nullptr,
     62                              const Instruction *CxtI = nullptr,
     63                              const DominatorTree *DT = nullptr,
     64                              OptimizationRemarkEmitter *ORE = nullptr);
     65   /// Compute known bits from the range metadata.
     66   /// \p KnownZero the set of bits that are known to be zero
     67   /// \p KnownOne the set of bits that are known to be one
     68   void computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
     69                                          KnownBits &Known);
     70   /// Return true if LHS and RHS have no common bits set.
     71   bool haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
     72                            const DataLayout &DL,
     73                            AssumptionCache *AC = nullptr,
     74                            const Instruction *CxtI = nullptr,
     75                            const DominatorTree *DT = nullptr);
     76 
     77   /// Return true if the given value is known to have exactly one bit set when
     78   /// defined. For vectors return true if every element is known to be a power
     79   /// of two when defined. Supports values with integer or pointer type and
     80   /// vectors of integers. If 'OrZero' is set, then return true if the given
     81   /// value is either a power of two or zero.
     82   bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
     83                               bool OrZero = false, unsigned Depth = 0,
     84                               AssumptionCache *AC = nullptr,
     85                               const Instruction *CxtI = nullptr,
     86                               const DominatorTree *DT = nullptr);
     87 
     88   bool isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI);
     89 
     90   /// Return true if the given value is known to be non-zero when defined. For
     91   /// vectors, return true if every element is known to be non-zero when
     92   /// defined. For pointers, if the context instruction and dominator tree are
     93   /// specified, perform context-sensitive analysis and return true if the
     94   /// pointer couldn't possibly be null at the specified instruction.
     95   /// Supports values with integer or pointer type and vectors of integers.
     96   bool isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth = 0,
     97                       AssumptionCache *AC = nullptr,
     98                       const Instruction *CxtI = nullptr,
     99                       const DominatorTree *DT = nullptr);
    100 
    101   /// Returns true if the give value is known to be non-negative.
    102   bool isKnownNonNegative(const Value *V, const DataLayout &DL,
    103                           unsigned Depth = 0,
    104                           AssumptionCache *AC = nullptr,
    105                           const Instruction *CxtI = nullptr,
    106                           const DominatorTree *DT = nullptr);
    107 
    108   /// Returns true if the given value is known be positive (i.e. non-negative
    109   /// and non-zero).
    110   bool isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth = 0,
    111                        AssumptionCache *AC = nullptr,
    112                        const Instruction *CxtI = nullptr,
    113                        const DominatorTree *DT = nullptr);
    114 
    115   /// Returns true if the given value is known be negative (i.e. non-positive
    116   /// and non-zero).
    117   bool isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth = 0,
    118                        AssumptionCache *AC = nullptr,
    119                        const Instruction *CxtI = nullptr,
    120                        const DominatorTree *DT = nullptr);
    121 
    122   /// Return true if the given values are known to be non-equal when defined.
    123   /// Supports scalar integer types only.
    124   bool isKnownNonEqual(const Value *V1, const Value *V2, const DataLayout &DL,
    125                       AssumptionCache *AC = nullptr,
    126                       const Instruction *CxtI = nullptr,
    127                       const DominatorTree *DT = nullptr);
    128 
    129   /// Return true if 'V & Mask' is known to be zero. We use this predicate to
    130   /// simplify operations downstream. Mask is known to be zero for bits that V
    131   /// cannot have.
    132   ///
    133   /// This function is defined on values with integer type, values with pointer
    134   /// type, and vectors of integers.  In the case
    135   /// where V is a vector, the mask, known zero, and known one values are the
    136   /// same width as the vector element, and the bit is set only if it is true
    137   /// for all of the elements in the vector.
    138   bool MaskedValueIsZero(const Value *V, const APInt &Mask,
    139                          const DataLayout &DL,
    140                          unsigned Depth = 0, AssumptionCache *AC = nullptr,
    141                          const Instruction *CxtI = nullptr,
    142                          const DominatorTree *DT = nullptr);
    143 
    144   /// Return the number of times the sign bit of the register is replicated into
    145   /// the other bits. We know that at least 1 bit is always equal to the sign
    146   /// bit (itself), but other cases can give us information. For example,
    147   /// immediately after an "ashr X, 2", we know that the top 3 bits are all
    148   /// equal to each other, so we return 3. For vectors, return the number of
    149   /// sign bits for the vector element with the mininum number of known sign
    150   /// bits.
    151   unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL,
    152                               unsigned Depth = 0, AssumptionCache *AC = nullptr,
    153                               const Instruction *CxtI = nullptr,
    154                               const DominatorTree *DT = nullptr);
    155 
    156   /// This function computes the integer multiple of Base that equals V. If
    157   /// successful, it returns true and returns the multiple in Multiple. If
    158   /// unsuccessful, it returns false. Also, if V can be simplified to an
    159   /// integer, then the simplified V is returned in Val. Look through sext only
    160   /// if LookThroughSExt=true.
    161   bool ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
    162                        bool LookThroughSExt = false,
    163                        unsigned Depth = 0);
    164 
    165   /// Map a call instruction to an intrinsic ID.  Libcalls which have equivalent
    166   /// intrinsics are treated as-if they were intrinsics.
    167   Intrinsic::ID getIntrinsicForCallSite(ImmutableCallSite ICS,
    168                                         const TargetLibraryInfo *TLI);
    169 
    170   /// Return true if we can prove that the specified FP value is never equal to
    171   /// -0.0.
    172   bool CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI,
    173                             unsigned Depth = 0);
    174 
    175   /// Return true if we can prove that the specified FP value is either NaN or
    176   /// never less than -0.0.
    177   ///
    178   ///      NaN --> true
    179   ///       +0 --> true
    180   ///       -0 --> true
    181   ///   x > +0 --> true
    182   ///   x < -0 --> false
    183   ///
    184   bool CannotBeOrderedLessThanZero(const Value *V, const TargetLibraryInfo *TLI);
    185 
    186   /// Return true if we can prove that the specified FP value's sign bit is 0.
    187   ///
    188   ///      NaN --> true/false (depending on the NaN's sign bit)
    189   ///       +0 --> true
    190   ///       -0 --> false
    191   ///   x > +0 --> true
    192   ///   x < -0 --> false
    193   ///
    194   bool SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI);
    195 
    196   /// If the specified value can be set by repeating the same byte in memory,
    197   /// return the i8 value that it is represented with. This is true for all i8
    198   /// values obviously, but is also true for i32 0, i32 -1, i16 0xF0F0, double
    199   /// 0.0 etc. If the value can't be handled with a repeated byte store (e.g.
    200   /// i16 0x1234), return null.
    201   Value *isBytewiseValue(Value *V);
    202 
    203   /// Given an aggregrate and an sequence of indices, see if the scalar value
    204   /// indexed is already around as a register, for example if it were inserted
    205   /// directly into the aggregrate.
    206   ///
    207   /// If InsertBefore is not null, this function will duplicate (modified)
    208   /// insertvalues when a part of a nested struct is extracted.
    209   Value *FindInsertedValue(Value *V,
    210                            ArrayRef<unsigned> idx_range,
    211                            Instruction *InsertBefore = nullptr);
    212 
    213   /// Analyze the specified pointer to see if it can be expressed as a base
    214   /// pointer plus a constant offset. Return the base and offset to the caller.
    215   Value *GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
    216                                           const DataLayout &DL);
    217   static inline const Value *
    218   GetPointerBaseWithConstantOffset(const Value *Ptr, int64_t &Offset,
    219                                    const DataLayout &DL) {
    220     return GetPointerBaseWithConstantOffset(const_cast<Value *>(Ptr), Offset,
    221                                             DL);
    222   }
    223 
    224   /// Returns true if the GEP is based on a pointer to a string (array of
    225   // \p CharSize integers) and is indexing into this string.
    226   bool isGEPBasedOnPointerToString(const GEPOperator *GEP,
    227                                    unsigned CharSize = 8);
    228 
    229   /// Represents offset+length into a ConstantDataArray.
    230   struct ConstantDataArraySlice {
    231     /// ConstantDataArray pointer. nullptr indicates a zeroinitializer (a valid
    232     /// initializer, it just doesn't fit the ConstantDataArray interface).
    233     const ConstantDataArray *Array;
    234     /// Slice starts at this Offset.
    235     uint64_t Offset;
    236     /// Length of the slice.
    237     uint64_t Length;
    238 
    239     /// Moves the Offset and adjusts Length accordingly.
    240     void move(uint64_t Delta) {
    241       assert(Delta < Length);
    242       Offset += Delta;
    243       Length -= Delta;
    244     }
    245     /// Convenience accessor for elements in the slice.
    246     uint64_t operator[](unsigned I) const {
    247       return Array==nullptr ? 0 : Array->getElementAsInteger(I + Offset);
    248     }
    249   };
    250 
    251   /// Returns true if the value \p V is a pointer into a ContantDataArray.
    252   /// If successful \p Index will point to a ConstantDataArray info object
    253   /// with an appropriate offset.
    254   bool getConstantDataArrayInfo(const Value *V, ConstantDataArraySlice &Slice,
    255                                 unsigned ElementSize, uint64_t Offset = 0);
    256 
    257   /// This function computes the length of a null-terminated C string pointed to
    258   /// by V. If successful, it returns true and returns the string in Str. If
    259   /// unsuccessful, it returns false. This does not include the trailing null
    260   /// character by default. If TrimAtNul is set to false, then this returns any
    261   /// trailing null characters as well as any other characters that come after
    262   /// it.
    263   bool getConstantStringInfo(const Value *V, StringRef &Str,
    264                              uint64_t Offset = 0, bool TrimAtNul = true);
    265 
    266   /// If we can compute the length of the string pointed to by the specified
    267   /// pointer, return 'len+1'.  If we can't, return 0.
    268   uint64_t GetStringLength(const Value *V, unsigned CharSize = 8);
    269 
    270   /// This method strips off any GEP address adjustments and pointer casts from
    271   /// the specified value, returning the original object being addressed. Note
    272   /// that the returned value has pointer type if the specified value does. If
    273   /// the MaxLookup value is non-zero, it limits the number of instructions to
    274   /// be stripped off.
    275   Value *GetUnderlyingObject(Value *V, const DataLayout &DL,
    276                              unsigned MaxLookup = 6);
    277   static inline const Value *GetUnderlyingObject(const Value *V,
    278                                                  const DataLayout &DL,
    279                                                  unsigned MaxLookup = 6) {
    280     return GetUnderlyingObject(const_cast<Value *>(V), DL, MaxLookup);
    281   }
    282 
    283   /// \brief This method is similar to GetUnderlyingObject except that it can
    284   /// look through phi and select instructions and return multiple objects.
    285   ///
    286   /// If LoopInfo is passed, loop phis are further analyzed.  If a pointer
    287   /// accesses different objects in each iteration, we don't look through the
    288   /// phi node. E.g. consider this loop nest:
    289   ///
    290   ///   int **A;
    291   ///   for (i)
    292   ///     for (j) {
    293   ///        A[i][j] = A[i-1][j] * B[j]
    294   ///     }
    295   ///
    296   /// This is transformed by Load-PRE to stash away A[i] for the next iteration
    297   /// of the outer loop:
    298   ///
    299   ///   Curr = A[0];          // Prev_0
    300   ///   for (i: 1..N) {
    301   ///     Prev = Curr;        // Prev = PHI (Prev_0, Curr)
    302   ///     Curr = A[i];
    303   ///     for (j: 0..N) {
    304   ///        Curr[j] = Prev[j] * B[j]
    305   ///     }
    306   ///   }
    307   ///
    308   /// Since A[i] and A[i-1] are independent pointers, getUnderlyingObjects
    309   /// should not assume that Curr and Prev share the same underlying object thus
    310   /// it shouldn't look through the phi above.
    311   void GetUnderlyingObjects(Value *V, SmallVectorImpl<Value *> &Objects,
    312                             const DataLayout &DL, LoopInfo *LI = nullptr,
    313                             unsigned MaxLookup = 6);
    314 
    315   /// Return true if the only users of this pointer are lifetime markers.
    316   bool onlyUsedByLifetimeMarkers(const Value *V);
    317 
    318   /// Return true if the instruction does not have any effects besides
    319   /// calculating the result and does not have undefined behavior.
    320   ///
    321   /// This method never returns true for an instruction that returns true for
    322   /// mayHaveSideEffects; however, this method also does some other checks in
    323   /// addition. It checks for undefined behavior, like dividing by zero or
    324   /// loading from an invalid pointer (but not for undefined results, like a
    325   /// shift with a shift amount larger than the width of the result). It checks
    326   /// for malloc and alloca because speculatively executing them might cause a
    327   /// memory leak. It also returns false for instructions related to control
    328   /// flow, specifically terminators and PHI nodes.
    329   ///
    330   /// If the CtxI is specified this method performs context-sensitive analysis
    331   /// and returns true if it is safe to execute the instruction immediately
    332   /// before the CtxI.
    333   ///
    334   /// If the CtxI is NOT specified this method only looks at the instruction
    335   /// itself and its operands, so if this method returns true, it is safe to
    336   /// move the instruction as long as the correct dominance relationships for
    337   /// the operands and users hold.
    338   ///
    339   /// This method can return true for instructions that read memory;
    340   /// for such instructions, moving them may change the resulting value.
    341   bool isSafeToSpeculativelyExecute(const Value *V,
    342                                     const Instruction *CtxI = nullptr,
    343                                     const DominatorTree *DT = nullptr);
    344 
    345   /// Returns true if the result or effects of the given instructions \p I
    346   /// depend on or influence global memory.
    347   /// Memory dependence arises for example if the instruction reads from
    348   /// memory or may produce effects or undefined behaviour. Memory dependent
    349   /// instructions generally cannot be reorderd with respect to other memory
    350   /// dependent instructions or moved into non-dominated basic blocks.
    351   /// Instructions which just compute a value based on the values of their
    352   /// operands are not memory dependent.
    353   bool mayBeMemoryDependent(const Instruction &I);
    354 
    355   /// Return true if this pointer couldn't possibly be null by its definition.
    356   /// This returns true for allocas, non-extern-weak globals, and byval
    357   /// arguments.
    358   bool isKnownNonNull(const Value *V);
    359 
    360   /// Return true if this pointer couldn't possibly be null. If the context
    361   /// instruction and dominator tree are specified, perform context-sensitive
    362   /// analysis and return true if the pointer couldn't possibly be null at the
    363   /// specified instruction.
    364   bool isKnownNonNullAt(const Value *V,
    365                         const Instruction *CtxI = nullptr,
    366                         const DominatorTree *DT = nullptr);
    367 
    368   /// Return true if it is valid to use the assumptions provided by an
    369   /// assume intrinsic, I, at the point in the control-flow identified by the
    370   /// context instruction, CxtI.
    371   bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI,
    372                                const DominatorTree *DT = nullptr);
    373 
    374   enum class OverflowResult { AlwaysOverflows, MayOverflow, NeverOverflows };
    375   OverflowResult computeOverflowForUnsignedMul(const Value *LHS,
    376                                                const Value *RHS,
    377                                                const DataLayout &DL,
    378                                                AssumptionCache *AC,
    379                                                const Instruction *CxtI,
    380                                                const DominatorTree *DT);
    381   OverflowResult computeOverflowForUnsignedAdd(const Value *LHS,
    382                                                const Value *RHS,
    383                                                const DataLayout &DL,
    384                                                AssumptionCache *AC,
    385                                                const Instruction *CxtI,
    386                                                const DominatorTree *DT);
    387   OverflowResult computeOverflowForSignedAdd(const Value *LHS, const Value *RHS,
    388                                              const DataLayout &DL,
    389                                              AssumptionCache *AC = nullptr,
    390                                              const Instruction *CxtI = nullptr,
    391                                              const DominatorTree *DT = nullptr);
    392   /// This version also leverages the sign bit of Add if known.
    393   OverflowResult computeOverflowForSignedAdd(const AddOperator *Add,
    394                                              const DataLayout &DL,
    395                                              AssumptionCache *AC = nullptr,
    396                                              const Instruction *CxtI = nullptr,
    397                                              const DominatorTree *DT = nullptr);
    398 
    399   /// Returns true if the arithmetic part of the \p II 's result is
    400   /// used only along the paths control dependent on the computation
    401   /// not overflowing, \p II being an <op>.with.overflow intrinsic.
    402   bool isOverflowIntrinsicNoWrap(const IntrinsicInst *II,
    403                                  const DominatorTree &DT);
    404 
    405   /// Return true if this function can prove that the instruction I will
    406   /// always transfer execution to one of its successors (including the next
    407   /// instruction that follows within a basic block). E.g. this is not
    408   /// guaranteed for function calls that could loop infinitely.
    409   ///
    410   /// In other words, this function returns false for instructions that may
    411   /// transfer execution or fail to transfer execution in a way that is not
    412   /// captured in the CFG nor in the sequence of instructions within a basic
    413   /// block.
    414   ///
    415   /// Undefined behavior is assumed not to happen, so e.g. division is
    416   /// guaranteed to transfer execution to the following instruction even
    417   /// though division by zero might cause undefined behavior.
    418   bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I);
    419 
    420   /// Return true if this function can prove that the instruction I
    421   /// is executed for every iteration of the loop L.
    422   ///
    423   /// Note that this currently only considers the loop header.
    424   bool isGuaranteedToExecuteForEveryIteration(const Instruction *I,
    425                                               const Loop *L);
    426 
    427   /// Return true if this function can prove that I is guaranteed to yield
    428   /// full-poison (all bits poison) if at least one of its operands are
    429   /// full-poison (all bits poison).
    430   ///
    431   /// The exact rules for how poison propagates through instructions have
    432   /// not been settled as of 2015-07-10, so this function is conservative
    433   /// and only considers poison to be propagated in uncontroversial
    434   /// cases. There is no attempt to track values that may be only partially
    435   /// poison.
    436   bool propagatesFullPoison(const Instruction *I);
    437 
    438   /// Return either nullptr or an operand of I such that I will trigger
    439   /// undefined behavior if I is executed and that operand has a full-poison
    440   /// value (all bits poison).
    441   const Value *getGuaranteedNonFullPoisonOp(const Instruction *I);
    442 
    443   /// Return true if this function can prove that if PoisonI is executed
    444   /// and yields a full-poison value (all bits poison), then that will
    445   /// trigger undefined behavior.
    446   ///
    447   /// Note that this currently only considers the basic block that is
    448   /// the parent of I.
    449   bool programUndefinedIfFullPoison(const Instruction *PoisonI);
    450 
    451   /// \brief Specific patterns of select instructions we can match.
    452   enum SelectPatternFlavor {
    453     SPF_UNKNOWN = 0,
    454     SPF_SMIN,                   /// Signed minimum
    455     SPF_UMIN,                   /// Unsigned minimum
    456     SPF_SMAX,                   /// Signed maximum
    457     SPF_UMAX,                   /// Unsigned maximum
    458     SPF_FMINNUM,                /// Floating point minnum
    459     SPF_FMAXNUM,                /// Floating point maxnum
    460     SPF_ABS,                    /// Absolute value
    461     SPF_NABS                    /// Negated absolute value
    462   };
    463   /// \brief Behavior when a floating point min/max is given one NaN and one
    464   /// non-NaN as input.
    465   enum SelectPatternNaNBehavior {
    466     SPNB_NA = 0,                /// NaN behavior not applicable.
    467     SPNB_RETURNS_NAN,           /// Given one NaN input, returns the NaN.
    468     SPNB_RETURNS_OTHER,         /// Given one NaN input, returns the non-NaN.
    469     SPNB_RETURNS_ANY            /// Given one NaN input, can return either (or
    470                                 /// it has been determined that no operands can
    471                                 /// be NaN).
    472   };
    473   struct SelectPatternResult {
    474     SelectPatternFlavor Flavor;
    475     SelectPatternNaNBehavior NaNBehavior; /// Only applicable if Flavor is
    476                                           /// SPF_FMINNUM or SPF_FMAXNUM.
    477     bool Ordered;               /// When implementing this min/max pattern as
    478                                 /// fcmp; select, does the fcmp have to be
    479                                 /// ordered?
    480 
    481     /// \brief Return true if \p SPF is a min or a max pattern.
    482     static bool isMinOrMax(SelectPatternFlavor SPF) {
    483       return !(SPF == SPF_UNKNOWN || SPF == SPF_ABS || SPF == SPF_NABS);
    484     }
    485   };
    486   /// Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind
    487   /// and providing the out parameter results if we successfully match.
    488   ///
    489   /// If CastOp is not nullptr, also match MIN/MAX idioms where the type does
    490   /// not match that of the original select. If this is the case, the cast
    491   /// operation (one of Trunc,SExt,Zext) that must be done to transform the
    492   /// type of LHS and RHS into the type of V is returned in CastOp.
    493   ///
    494   /// For example:
    495   ///   %1 = icmp slt i32 %a, i32 4
    496   ///   %2 = sext i32 %a to i64
    497   ///   %3 = select i1 %1, i64 %2, i64 4
    498   ///
    499   /// -> LHS = %a, RHS = i32 4, *CastOp = Instruction::SExt
    500   ///
    501   SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
    502                                          Instruction::CastOps *CastOp = nullptr);
    503   static inline SelectPatternResult
    504   matchSelectPattern(const Value *V, const Value *&LHS, const Value *&RHS,
    505                      Instruction::CastOps *CastOp = nullptr) {
    506     Value *L = const_cast<Value*>(LHS);
    507     Value *R = const_cast<Value*>(RHS);
    508     auto Result = matchSelectPattern(const_cast<Value*>(V), L, R);
    509     LHS = L;
    510     RHS = R;
    511     return Result;
    512   }
    513 
    514   /// Return true if RHS is known to be implied true by LHS.  Return false if
    515   /// RHS is known to be implied false by LHS.  Otherwise, return None if no
    516   /// implication can be made.
    517   /// A & B must be i1 (boolean) values or a vector of such values. Note that
    518   /// the truth table for implication is the same as <=u on i1 values (but not
    519   /// <=s!).  The truth table for both is:
    520   ///    | T | F (B)
    521   ///  T | T | F
    522   ///  F | T | T
    523   /// (A)
    524   Optional<bool> isImpliedCondition(const Value *LHS, const Value *RHS,
    525                                     const DataLayout &DL,
    526                                     bool InvertAPred = false,
    527                                     unsigned Depth = 0,
    528                                     AssumptionCache *AC = nullptr,
    529                                     const Instruction *CxtI = nullptr,
    530                                     const DominatorTree *DT = nullptr);
    531 } // end namespace llvm
    532 
    533 #endif
    534