Home | History | Annotate | Download | only in AggressiveInstCombine
      1 //===- AggressiveInstCombine.cpp ------------------------------------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file implements the aggressive expression pattern combiner classes.
     11 // Currently, it handles expression patterns for:
     12 //  * Truncate instruction
     13 //
     14 //===----------------------------------------------------------------------===//
     15 
     16 #include "llvm/Transforms/AggressiveInstCombine/AggressiveInstCombine.h"
     17 #include "AggressiveInstCombineInternal.h"
     18 #include "llvm-c/Initialization.h"
     19 #include "llvm-c/Transforms/Scalar.h"
     20 #include "llvm/Analysis/AliasAnalysis.h"
     21 #include "llvm/Analysis/BasicAliasAnalysis.h"
     22 #include "llvm/Analysis/GlobalsModRef.h"
     23 #include "llvm/Analysis/TargetLibraryInfo.h"
     24 #include "llvm/IR/DataLayout.h"
     25 #include "llvm/IR/Dominators.h"
     26 #include "llvm/IR/IRBuilder.h"
     27 #include "llvm/IR/LegacyPassManager.h"
     28 #include "llvm/IR/PatternMatch.h"
     29 #include "llvm/Pass.h"
     30 #include "llvm/Transforms/Utils/Local.h"
     31 using namespace llvm;
     32 using namespace PatternMatch;
     33 
     34 #define DEBUG_TYPE "aggressive-instcombine"
     35 
     36 namespace {
     37 /// Contains expression pattern combiner logic.
     38 /// This class provides both the logic to combine expression patterns and
     39 /// combine them. It differs from InstCombiner class in that each pattern
     40 /// combiner runs only once as opposed to InstCombine's multi-iteration,
     41 /// which allows pattern combiner to have higher complexity than the O(1)
     42 /// required by the instruction combiner.
     43 class AggressiveInstCombinerLegacyPass : public FunctionPass {
     44 public:
     45   static char ID; // Pass identification, replacement for typeid
     46 
     47   AggressiveInstCombinerLegacyPass() : FunctionPass(ID) {
     48     initializeAggressiveInstCombinerLegacyPassPass(
     49         *PassRegistry::getPassRegistry());
     50   }
     51 
     52   void getAnalysisUsage(AnalysisUsage &AU) const override;
     53 
     54   /// Run all expression pattern optimizations on the given /p F function.
     55   ///
     56   /// \param F function to optimize.
     57   /// \returns true if the IR is changed.
     58   bool runOnFunction(Function &F) override;
     59 };
     60 } // namespace
     61 
     62 /// This is used by foldAnyOrAllBitsSet() to capture a source value (Root) and
     63 /// the bit indexes (Mask) needed by a masked compare. If we're matching a chain
     64 /// of 'and' ops, then we also need to capture the fact that we saw an
     65 /// "and X, 1", so that's an extra return value for that case.
     66 struct MaskOps {
     67   Value *Root;
     68   APInt Mask;
     69   bool MatchAndChain;
     70   bool FoundAnd1;
     71 
     72   MaskOps(unsigned BitWidth, bool MatchAnds) :
     73       Root(nullptr), Mask(APInt::getNullValue(BitWidth)),
     74       MatchAndChain(MatchAnds), FoundAnd1(false) {}
     75 };
     76 
     77 /// This is a recursive helper for foldAnyOrAllBitsSet() that walks through a
     78 /// chain of 'and' or 'or' instructions looking for shift ops of a common source
     79 /// value. Examples:
     80 ///   or (or (or X, (X >> 3)), (X >> 5)), (X >> 8)
     81 /// returns { X, 0x129 }
     82 ///   and (and (X >> 1), 1), (X >> 4)
     83 /// returns { X, 0x12 }
     84 static bool matchAndOrChain(Value *V, MaskOps &MOps) {
     85   Value *Op0, *Op1;
     86   if (MOps.MatchAndChain) {
     87     // Recurse through a chain of 'and' operands. This requires an extra check
     88     // vs. the 'or' matcher: we must find an "and X, 1" instruction somewhere
     89     // in the chain to know that all of the high bits are cleared.
     90     if (match(V, m_And(m_Value(Op0), m_One()))) {
     91       MOps.FoundAnd1 = true;
     92       return matchAndOrChain(Op0, MOps);
     93     }
     94     if (match(V, m_And(m_Value(Op0), m_Value(Op1))))
     95       return matchAndOrChain(Op0, MOps) && matchAndOrChain(Op1, MOps);
     96   } else {
     97     // Recurse through a chain of 'or' operands.
     98     if (match(V, m_Or(m_Value(Op0), m_Value(Op1))))
     99       return matchAndOrChain(Op0, MOps) && matchAndOrChain(Op1, MOps);
    100   }
    101 
    102   // We need a shift-right or a bare value representing a compare of bit 0 of
    103   // the original source operand.
    104   Value *Candidate;
    105   uint64_t BitIndex = 0;
    106   if (!match(V, m_LShr(m_Value(Candidate), m_ConstantInt(BitIndex))))
    107     Candidate = V;
    108 
    109   // Initialize result source operand.
    110   if (!MOps.Root)
    111     MOps.Root = Candidate;
    112 
    113   // The shift constant is out-of-range? This code hasn't been simplified.
    114   if (BitIndex >= MOps.Mask.getBitWidth())
    115     return false;
    116 
    117   // Fill in the mask bit derived from the shift constant.
    118   MOps.Mask.setBit(BitIndex);
    119   return MOps.Root == Candidate;
    120 }
    121 
    122 /// Match patterns that correspond to "any-bits-set" and "all-bits-set".
    123 /// These will include a chain of 'or' or 'and'-shifted bits from a
    124 /// common source value:
    125 /// and (or  (lshr X, C), ...), 1 --> (X & CMask) != 0
    126 /// and (and (lshr X, C), ...), 1 --> (X & CMask) == CMask
    127 /// Note: "any-bits-clear" and "all-bits-clear" are variations of these patterns
    128 /// that differ only with a final 'not' of the result. We expect that final
    129 /// 'not' to be folded with the compare that we create here (invert predicate).
    130 static bool foldAnyOrAllBitsSet(Instruction &I) {
    131   // The 'any-bits-set' ('or' chain) pattern is simpler to match because the
    132   // final "and X, 1" instruction must be the final op in the sequence.
    133   bool MatchAllBitsSet;
    134   if (match(&I, m_c_And(m_OneUse(m_And(m_Value(), m_Value())), m_Value())))
    135     MatchAllBitsSet = true;
    136   else if (match(&I, m_And(m_OneUse(m_Or(m_Value(), m_Value())), m_One())))
    137     MatchAllBitsSet = false;
    138   else
    139     return false;
    140 
    141   MaskOps MOps(I.getType()->getScalarSizeInBits(), MatchAllBitsSet);
    142   if (MatchAllBitsSet) {
    143     if (!matchAndOrChain(cast<BinaryOperator>(&I), MOps) || !MOps.FoundAnd1)
    144       return false;
    145   } else {
    146     if (!matchAndOrChain(cast<BinaryOperator>(&I)->getOperand(0), MOps))
    147       return false;
    148   }
    149 
    150   // The pattern was found. Create a masked compare that replaces all of the
    151   // shift and logic ops.
    152   IRBuilder<> Builder(&I);
    153   Constant *Mask = ConstantInt::get(I.getType(), MOps.Mask);
    154   Value *And = Builder.CreateAnd(MOps.Root, Mask);
    155   Value *Cmp = MatchAllBitsSet ? Builder.CreateICmpEQ(And, Mask) :
    156                                  Builder.CreateIsNotNull(And);
    157   Value *Zext = Builder.CreateZExt(Cmp, I.getType());
    158   I.replaceAllUsesWith(Zext);
    159   return true;
    160 }
    161 
    162 /// This is the entry point for folds that could be implemented in regular
    163 /// InstCombine, but they are separated because they are not expected to
    164 /// occur frequently and/or have more than a constant-length pattern match.
    165 static bool foldUnusualPatterns(Function &F, DominatorTree &DT) {
    166   bool MadeChange = false;
    167   for (BasicBlock &BB : F) {
    168     // Ignore unreachable basic blocks.
    169     if (!DT.isReachableFromEntry(&BB))
    170       continue;
    171     // Do not delete instructions under here and invalidate the iterator.
    172     // Walk the block backwards for efficiency. We're matching a chain of
    173     // use->defs, so we're more likely to succeed by starting from the bottom.
    174     // Also, we want to avoid matching partial patterns.
    175     // TODO: It would be more efficient if we removed dead instructions
    176     // iteratively in this loop rather than waiting until the end.
    177     for (Instruction &I : make_range(BB.rbegin(), BB.rend()))
    178       MadeChange |= foldAnyOrAllBitsSet(I);
    179   }
    180 
    181   // We're done with transforms, so remove dead instructions.
    182   if (MadeChange)
    183     for (BasicBlock &BB : F)
    184       SimplifyInstructionsInBlock(&BB);
    185 
    186   return MadeChange;
    187 }
    188 
    189 /// This is the entry point for all transforms. Pass manager differences are
    190 /// handled in the callers of this function.
    191 static bool runImpl(Function &F, TargetLibraryInfo &TLI, DominatorTree &DT) {
    192   bool MadeChange = false;
    193   const DataLayout &DL = F.getParent()->getDataLayout();
    194   TruncInstCombine TIC(TLI, DL, DT);
    195   MadeChange |= TIC.run(F);
    196   MadeChange |= foldUnusualPatterns(F, DT);
    197   return MadeChange;
    198 }
    199 
    200 void AggressiveInstCombinerLegacyPass::getAnalysisUsage(
    201     AnalysisUsage &AU) const {
    202   AU.setPreservesCFG();
    203   AU.addRequired<DominatorTreeWrapperPass>();
    204   AU.addRequired<TargetLibraryInfoWrapperPass>();
    205   AU.addPreserved<AAResultsWrapperPass>();
    206   AU.addPreserved<BasicAAWrapperPass>();
    207   AU.addPreserved<DominatorTreeWrapperPass>();
    208   AU.addPreserved<GlobalsAAWrapperPass>();
    209 }
    210 
    211 bool AggressiveInstCombinerLegacyPass::runOnFunction(Function &F) {
    212   auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
    213   auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
    214   return runImpl(F, TLI, DT);
    215 }
    216 
    217 PreservedAnalyses AggressiveInstCombinePass::run(Function &F,
    218                                                  FunctionAnalysisManager &AM) {
    219   auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
    220   auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
    221   if (!runImpl(F, TLI, DT)) {
    222     // No changes, all analyses are preserved.
    223     return PreservedAnalyses::all();
    224   }
    225   // Mark all the analyses that instcombine updates as preserved.
    226   PreservedAnalyses PA;
    227   PA.preserveSet<CFGAnalyses>();
    228   PA.preserve<AAManager>();
    229   PA.preserve<GlobalsAA>();
    230   return PA;
    231 }
    232 
    233 char AggressiveInstCombinerLegacyPass::ID = 0;
    234 INITIALIZE_PASS_BEGIN(AggressiveInstCombinerLegacyPass,
    235                       "aggressive-instcombine",
    236                       "Combine pattern based expressions", false, false)
    237 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
    238 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
    239 INITIALIZE_PASS_END(AggressiveInstCombinerLegacyPass, "aggressive-instcombine",
    240                     "Combine pattern based expressions", false, false)
    241 
    242 // Initialization Routines
    243 void llvm::initializeAggressiveInstCombine(PassRegistry &Registry) {
    244   initializeAggressiveInstCombinerLegacyPassPass(Registry);
    245 }
    246 
    247 void LLVMInitializeAggressiveInstCombiner(LLVMPassRegistryRef R) {
    248   initializeAggressiveInstCombinerLegacyPassPass(*unwrap(R));
    249 }
    250 
    251 FunctionPass *llvm::createAggressiveInstCombinerPass() {
    252   return new AggressiveInstCombinerLegacyPass();
    253 }
    254 
    255 void LLVMAddAggressiveInstCombinerPass(LLVMPassManagerRef PM) {
    256   unwrap(PM)->add(createAggressiveInstCombinerPass());
    257 }
    258