Home | History | Annotate | Download | only in AArch64
      1 //===--- AArch64StorePairSuppress.cpp --- Suppress store pair formation ---===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This pass identifies floating point stores that should not be combined into
     11 // store pairs. Later we may do the same for floating point loads.
     12 // ===---------------------------------------------------------------------===//
     13 
     14 #include "AArch64InstrInfo.h"
     15 #include "llvm/CodeGen/MachineFunction.h"
     16 #include "llvm/CodeGen/MachineFunctionPass.h"
     17 #include "llvm/CodeGen/MachineInstr.h"
     18 #include "llvm/CodeGen/MachineTraceMetrics.h"
     19 #include "llvm/CodeGen/TargetSchedule.h"
     20 #include "llvm/Support/Debug.h"
     21 #include "llvm/Support/raw_ostream.h"
     22 #include "llvm/Target/TargetInstrInfo.h"
     23 
     24 using namespace llvm;
     25 
     26 #define DEBUG_TYPE "aarch64-stp-suppress"
     27 
     28 namespace {
     29 class AArch64StorePairSuppress : public MachineFunctionPass {
     30   const AArch64InstrInfo *TII;
     31   const TargetRegisterInfo *TRI;
     32   const MachineRegisterInfo *MRI;
     33   TargetSchedModel SchedModel;
     34   MachineTraceMetrics *Traces;
     35   MachineTraceMetrics::Ensemble *MinInstr;
     36 
     37 public:
     38   static char ID;
     39   AArch64StorePairSuppress() : MachineFunctionPass(ID) {}
     40 
     41   const char *getPassName() const override {
     42     return "AArch64 Store Pair Suppression";
     43   }
     44 
     45   bool runOnMachineFunction(MachineFunction &F) override;
     46 
     47 private:
     48   bool shouldAddSTPToBlock(const MachineBasicBlock *BB);
     49 
     50   bool isNarrowFPStore(const MachineInstr &MI);
     51 
     52   void getAnalysisUsage(AnalysisUsage &AU) const override {
     53     AU.setPreservesCFG();
     54     AU.addRequired<MachineTraceMetrics>();
     55     AU.addPreserved<MachineTraceMetrics>();
     56     MachineFunctionPass::getAnalysisUsage(AU);
     57   }
     58 };
     59 char AArch64StorePairSuppress::ID = 0;
     60 } // anonymous
     61 
     62 FunctionPass *llvm::createAArch64StorePairSuppressPass() {
     63   return new AArch64StorePairSuppress();
     64 }
     65 
     66 /// Return true if an STP can be added to this block without increasing the
     67 /// critical resource height. STP is good to form in Ld/St limited blocks and
     68 /// bad to form in float-point limited blocks. This is true independent of the
     69 /// critical path. If the critical path is longer than the resource height, the
     70 /// extra vector ops can limit physreg renaming. Otherwise, it could simply
     71 /// oversaturate the vector units.
     72 bool AArch64StorePairSuppress::shouldAddSTPToBlock(const MachineBasicBlock *BB) {
     73   if (!MinInstr)
     74     MinInstr = Traces->getEnsemble(MachineTraceMetrics::TS_MinInstrCount);
     75 
     76   MachineTraceMetrics::Trace BBTrace = MinInstr->getTrace(BB);
     77   unsigned ResLength = BBTrace.getResourceLength();
     78 
     79   // Get the machine model's scheduling class for STPQi.
     80   // Bypass TargetSchedule's SchedClass resolution since we only have an opcode.
     81   unsigned SCIdx = TII->get(AArch64::STPDi).getSchedClass();
     82   const MCSchedClassDesc *SCDesc =
     83       SchedModel.getMCSchedModel()->getSchedClassDesc(SCIdx);
     84 
     85   // If a subtarget does not define resources for STPQi, bail here.
     86   if (SCDesc->isValid() && !SCDesc->isVariant()) {
     87     unsigned ResLenWithSTP = BBTrace.getResourceLength(None, SCDesc);
     88     if (ResLenWithSTP > ResLength) {
     89       DEBUG(dbgs() << "  Suppress STP in BB: " << BB->getNumber()
     90                    << " resources " << ResLength << " -> " << ResLenWithSTP
     91                    << "\n");
     92       return false;
     93     }
     94   }
     95   return true;
     96 }
     97 
     98 /// Return true if this is a floating-point store smaller than the V reg. On
     99 /// cyclone, these require a vector shuffle before storing a pair.
    100 /// Ideally we would call getMatchingPairOpcode() and have the machine model
    101 /// tell us if it's profitable with no cpu knowledge here.
    102 ///
    103 /// FIXME: We plan to develop a decent Target abstraction for simple loads and
    104 /// stores. Until then use a nasty switch similar to AArch64LoadStoreOptimizer.
    105 bool AArch64StorePairSuppress::isNarrowFPStore(const MachineInstr &MI) {
    106   switch (MI.getOpcode()) {
    107   default:
    108     return false;
    109   case AArch64::STRSui:
    110   case AArch64::STRDui:
    111   case AArch64::STURSi:
    112   case AArch64::STURDi:
    113     return true;
    114   }
    115 }
    116 
    117 bool AArch64StorePairSuppress::runOnMachineFunction(MachineFunction &MF) {
    118   const TargetSubtargetInfo &ST = MF.getSubtarget();
    119   TII = static_cast<const AArch64InstrInfo *>(ST.getInstrInfo());
    120   TRI = ST.getRegisterInfo();
    121   MRI = &MF.getRegInfo();
    122   SchedModel.init(ST.getSchedModel(), &ST, TII);
    123   Traces = &getAnalysis<MachineTraceMetrics>();
    124   MinInstr = nullptr;
    125 
    126   DEBUG(dbgs() << "*** " << getPassName() << ": " << MF.getName() << '\n');
    127 
    128   if (!SchedModel.hasInstrSchedModel()) {
    129     DEBUG(dbgs() << "  Skipping pass: no machine model present.\n");
    130     return false;
    131   }
    132 
    133   // Check for a sequence of stores to the same base address. We don't need to
    134   // precisely determine whether a store pair can be formed. But we do want to
    135   // filter out most situations where we can't form store pairs to avoid
    136   // computing trace metrics in those cases.
    137   for (auto &MBB : MF) {
    138     bool SuppressSTP = false;
    139     unsigned PrevBaseReg = 0;
    140     for (auto &MI : MBB) {
    141       if (!isNarrowFPStore(MI))
    142         continue;
    143       unsigned BaseReg;
    144       unsigned Offset;
    145       if (TII->getMemOpBaseRegImmOfs(&MI, BaseReg, Offset, TRI)) {
    146         if (PrevBaseReg == BaseReg) {
    147           // If this block can take STPs, skip ahead to the next block.
    148           if (!SuppressSTP && shouldAddSTPToBlock(MI.getParent()))
    149             break;
    150           // Otherwise, continue unpairing the stores in this block.
    151           DEBUG(dbgs() << "Unpairing store " << MI << "\n");
    152           SuppressSTP = true;
    153           TII->suppressLdStPair(&MI);
    154         }
    155         PrevBaseReg = BaseReg;
    156       } else
    157         PrevBaseReg = 0;
    158     }
    159   }
    160   // This pass just sets some internal MachineMemOperand flags. It can't really
    161   // invalidate anything.
    162   return false;
    163 }
    164