Home | History | Annotate | Download | only in CodeGen
      1 //===-- llvm/Target/TargetSchedule.cpp - Sched Machine Model ----*- C++ -*-===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file implements a wrapper around MCSchedModel that allows the interface
     11 // to benefit from information currently only available in TargetInstrInfo.
     12 //
     13 //===----------------------------------------------------------------------===//
     14 
     15 #include "llvm/CodeGen/TargetSchedule.h"
     16 #include "llvm/Support/CommandLine.h"
     17 #include "llvm/Support/raw_ostream.h"
     18 #include "llvm/Target/TargetInstrInfo.h"
     19 #include "llvm/Target/TargetRegisterInfo.h"
     20 #include "llvm/Target/TargetSubtargetInfo.h"
     21 
     22 using namespace llvm;
     23 
     24 static cl::opt<bool> EnableSchedModel("schedmodel", cl::Hidden, cl::init(true),
     25   cl::desc("Use TargetSchedModel for latency lookup"));
     26 
     27 static cl::opt<bool> EnableSchedItins("scheditins", cl::Hidden, cl::init(true),
     28   cl::desc("Use InstrItineraryData for latency lookup"));
     29 
     30 bool TargetSchedModel::hasInstrSchedModel() const {
     31   return EnableSchedModel && SchedModel.hasInstrSchedModel();
     32 }
     33 
     34 bool TargetSchedModel::hasInstrItineraries() const {
     35   return EnableSchedItins && !InstrItins.isEmpty();
     36 }
     37 
     38 static unsigned gcd(unsigned Dividend, unsigned Divisor) {
     39   // Dividend and Divisor will be naturally swapped as needed.
     40   while(Divisor) {
     41     unsigned Rem = Dividend % Divisor;
     42     Dividend = Divisor;
     43     Divisor = Rem;
     44   };
     45   return Dividend;
     46 }
     47 static unsigned lcm(unsigned A, unsigned B) {
     48   unsigned LCM = (uint64_t(A) * B) / gcd(A, B);
     49   assert((LCM >= A && LCM >= B) && "LCM overflow");
     50   return LCM;
     51 }
     52 
     53 void TargetSchedModel::init(const MCSchedModel &sm,
     54                             const TargetSubtargetInfo *sti,
     55                             const TargetInstrInfo *tii) {
     56   SchedModel = sm;
     57   STI = sti;
     58   TII = tii;
     59   STI->initInstrItins(InstrItins);
     60 
     61   unsigned NumRes = SchedModel.getNumProcResourceKinds();
     62   ResourceFactors.resize(NumRes);
     63   ResourceLCM = SchedModel.IssueWidth;
     64   for (unsigned Idx = 0; Idx < NumRes; ++Idx) {
     65     unsigned NumUnits = SchedModel.getProcResource(Idx)->NumUnits;
     66     if (NumUnits > 0)
     67       ResourceLCM = lcm(ResourceLCM, NumUnits);
     68   }
     69   MicroOpFactor = ResourceLCM / SchedModel.IssueWidth;
     70   for (unsigned Idx = 0; Idx < NumRes; ++Idx) {
     71     unsigned NumUnits = SchedModel.getProcResource(Idx)->NumUnits;
     72     ResourceFactors[Idx] = NumUnits ? (ResourceLCM / NumUnits) : 0;
     73   }
     74 }
     75 
     76 unsigned TargetSchedModel::getNumMicroOps(const MachineInstr *MI,
     77                                           const MCSchedClassDesc *SC) const {
     78   if (hasInstrItineraries()) {
     79     int UOps = InstrItins.getNumMicroOps(MI->getDesc().getSchedClass());
     80     return (UOps >= 0) ? UOps : TII->getNumMicroOps(&InstrItins, *MI);
     81   }
     82   if (hasInstrSchedModel()) {
     83     if (!SC)
     84       SC = resolveSchedClass(MI);
     85     if (SC->isValid())
     86       return SC->NumMicroOps;
     87   }
     88   return MI->isTransient() ? 0 : 1;
     89 }
     90 
     91 // The machine model may explicitly specify an invalid latency, which
     92 // effectively means infinite latency. Since users of the TargetSchedule API
     93 // don't know how to handle this, we convert it to a very large latency that is
     94 // easy to distinguish when debugging the DAG but won't induce overflow.
     95 static unsigned capLatency(int Cycles) {
     96   return Cycles >= 0 ? Cycles : 1000;
     97 }
     98 
     99 /// Return the MCSchedClassDesc for this instruction. Some SchedClasses require
    100 /// evaluation of predicates that depend on instruction operands or flags.
    101 const MCSchedClassDesc *TargetSchedModel::
    102 resolveSchedClass(const MachineInstr *MI) const {
    103 
    104   // Get the definition's scheduling class descriptor from this machine model.
    105   unsigned SchedClass = MI->getDesc().getSchedClass();
    106   const MCSchedClassDesc *SCDesc = SchedModel.getSchedClassDesc(SchedClass);
    107   if (!SCDesc->isValid())
    108     return SCDesc;
    109 
    110 #ifndef NDEBUG
    111   unsigned NIter = 0;
    112 #endif
    113   while (SCDesc->isVariant()) {
    114     assert(++NIter < 6 && "Variants are nested deeper than the magic number");
    115 
    116     SchedClass = STI->resolveSchedClass(SchedClass, MI, this);
    117     SCDesc = SchedModel.getSchedClassDesc(SchedClass);
    118   }
    119   return SCDesc;
    120 }
    121 
    122 /// Find the def index of this operand. This index maps to the machine model and
    123 /// is independent of use operands. Def operands may be reordered with uses or
    124 /// merged with uses without affecting the def index (e.g. before/after
    125 /// regalloc). However, an instruction's def operands must never be reordered
    126 /// with respect to each other.
    127 static unsigned findDefIdx(const MachineInstr *MI, unsigned DefOperIdx) {
    128   unsigned DefIdx = 0;
    129   for (unsigned i = 0; i != DefOperIdx; ++i) {
    130     const MachineOperand &MO = MI->getOperand(i);
    131     if (MO.isReg() && MO.isDef())
    132       ++DefIdx;
    133   }
    134   return DefIdx;
    135 }
    136 
    137 /// Find the use index of this operand. This is independent of the instruction's
    138 /// def operands.
    139 ///
    140 /// Note that uses are not determined by the operand's isUse property, which
    141 /// is simply the inverse of isDef. Here we consider any readsReg operand to be
    142 /// a "use". The machine model allows an operand to be both a Def and Use.
    143 static unsigned findUseIdx(const MachineInstr *MI, unsigned UseOperIdx) {
    144   unsigned UseIdx = 0;
    145   for (unsigned i = 0; i != UseOperIdx; ++i) {
    146     const MachineOperand &MO = MI->getOperand(i);
    147     if (MO.isReg() && MO.readsReg())
    148       ++UseIdx;
    149   }
    150   return UseIdx;
    151 }
    152 
    153 // Top-level API for clients that know the operand indices.
    154 unsigned TargetSchedModel::computeOperandLatency(
    155   const MachineInstr *DefMI, unsigned DefOperIdx,
    156   const MachineInstr *UseMI, unsigned UseOperIdx) const {
    157 
    158   if (!hasInstrSchedModel() && !hasInstrItineraries())
    159     return TII->defaultDefLatency(SchedModel, *DefMI);
    160 
    161   if (hasInstrItineraries()) {
    162     int OperLatency = 0;
    163     if (UseMI) {
    164       OperLatency = TII->getOperandLatency(&InstrItins, *DefMI, DefOperIdx,
    165                                            *UseMI, UseOperIdx);
    166     }
    167     else {
    168       unsigned DefClass = DefMI->getDesc().getSchedClass();
    169       OperLatency = InstrItins.getOperandCycle(DefClass, DefOperIdx);
    170     }
    171     if (OperLatency >= 0)
    172       return OperLatency;
    173 
    174     // No operand latency was found.
    175     unsigned InstrLatency = TII->getInstrLatency(&InstrItins, *DefMI);
    176 
    177     // Expected latency is the max of the stage latency and itinerary props.
    178     // Rather than directly querying InstrItins stage latency, we call a TII
    179     // hook to allow subtargets to specialize latency. This hook is only
    180     // applicable to the InstrItins model. InstrSchedModel should model all
    181     // special cases without TII hooks.
    182     InstrLatency =
    183         std::max(InstrLatency, TII->defaultDefLatency(SchedModel, *DefMI));
    184     return InstrLatency;
    185   }
    186   // hasInstrSchedModel()
    187   const MCSchedClassDesc *SCDesc = resolveSchedClass(DefMI);
    188   unsigned DefIdx = findDefIdx(DefMI, DefOperIdx);
    189   if (DefIdx < SCDesc->NumWriteLatencyEntries) {
    190     // Lookup the definition's write latency in SubtargetInfo.
    191     const MCWriteLatencyEntry *WLEntry =
    192       STI->getWriteLatencyEntry(SCDesc, DefIdx);
    193     unsigned WriteID = WLEntry->WriteResourceID;
    194     unsigned Latency = capLatency(WLEntry->Cycles);
    195     if (!UseMI)
    196       return Latency;
    197 
    198     // Lookup the use's latency adjustment in SubtargetInfo.
    199     const MCSchedClassDesc *UseDesc = resolveSchedClass(UseMI);
    200     if (UseDesc->NumReadAdvanceEntries == 0)
    201       return Latency;
    202     unsigned UseIdx = findUseIdx(UseMI, UseOperIdx);
    203     int Advance = STI->getReadAdvanceCycles(UseDesc, UseIdx, WriteID);
    204     if (Advance > 0 && (unsigned)Advance > Latency) // unsigned wrap
    205       return 0;
    206     return Latency - Advance;
    207   }
    208   // If DefIdx does not exist in the model (e.g. implicit defs), then return
    209   // unit latency (defaultDefLatency may be too conservative).
    210 #ifndef NDEBUG
    211   if (SCDesc->isValid() && !DefMI->getOperand(DefOperIdx).isImplicit()
    212       && !DefMI->getDesc().OpInfo[DefOperIdx].isOptionalDef()
    213       && SchedModel.isComplete()) {
    214     errs() << "DefIdx " << DefIdx << " exceeds machine model writes for "
    215            << *DefMI << " (Try with MCSchedModel.CompleteModel set to false)";
    216     llvm_unreachable("incomplete machine model");
    217   }
    218 #endif
    219   // FIXME: Automatically giving all implicit defs defaultDefLatency is
    220   // undesirable. We should only do it for defs that are known to the MC
    221   // desc like flags. Truly implicit defs should get 1 cycle latency.
    222   return DefMI->isTransient() ? 0 : TII->defaultDefLatency(SchedModel, *DefMI);
    223 }
    224 
    225 unsigned
    226 TargetSchedModel::computeInstrLatency(const MCSchedClassDesc &SCDesc) const {
    227   unsigned Latency = 0;
    228   for (unsigned DefIdx = 0, DefEnd = SCDesc.NumWriteLatencyEntries;
    229        DefIdx != DefEnd; ++DefIdx) {
    230     // Lookup the definition's write latency in SubtargetInfo.
    231     const MCWriteLatencyEntry *WLEntry =
    232       STI->getWriteLatencyEntry(&SCDesc, DefIdx);
    233     Latency = std::max(Latency, capLatency(WLEntry->Cycles));
    234   }
    235   return Latency;
    236 }
    237 
    238 unsigned TargetSchedModel::computeInstrLatency(unsigned Opcode) const {
    239   assert(hasInstrSchedModel() && "Only call this function with a SchedModel");
    240 
    241   unsigned SCIdx = TII->get(Opcode).getSchedClass();
    242   const MCSchedClassDesc *SCDesc = SchedModel.getSchedClassDesc(SCIdx);
    243 
    244   if (SCDesc->isValid() && !SCDesc->isVariant())
    245     return computeInstrLatency(*SCDesc);
    246 
    247   llvm_unreachable("No MI sched latency");
    248 }
    249 
    250 unsigned
    251 TargetSchedModel::computeInstrLatency(const MachineInstr *MI,
    252                                       bool UseDefaultDefLatency) const {
    253   // For the itinerary model, fall back to the old subtarget hook.
    254   // Allow subtargets to compute Bundle latencies outside the machine model.
    255   if (hasInstrItineraries() || MI->isBundle() ||
    256       (!hasInstrSchedModel() && !UseDefaultDefLatency))
    257     return TII->getInstrLatency(&InstrItins, *MI);
    258 
    259   if (hasInstrSchedModel()) {
    260     const MCSchedClassDesc *SCDesc = resolveSchedClass(MI);
    261     if (SCDesc->isValid())
    262       return computeInstrLatency(*SCDesc);
    263   }
    264   return TII->defaultDefLatency(SchedModel, *MI);
    265 }
    266 
    267 unsigned TargetSchedModel::
    268 computeOutputLatency(const MachineInstr *DefMI, unsigned DefOperIdx,
    269                      const MachineInstr *DepMI) const {
    270   if (!SchedModel.isOutOfOrder())
    271     return 1;
    272 
    273   // Out-of-order processor can dispatch WAW dependencies in the same cycle.
    274 
    275   // Treat predication as a data dependency for out-of-order cpus. In-order
    276   // cpus do not need to treat predicated writes specially.
    277   //
    278   // TODO: The following hack exists because predication passes do not
    279   // correctly append imp-use operands, and readsReg() strangely returns false
    280   // for predicated defs.
    281   unsigned Reg = DefMI->getOperand(DefOperIdx).getReg();
    282   const MachineFunction &MF = *DefMI->getParent()->getParent();
    283   const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
    284   if (!DepMI->readsRegister(Reg, TRI) && TII->isPredicated(*DepMI))
    285     return computeInstrLatency(DefMI);
    286 
    287   // If we have a per operand scheduling model, check if this def is writing
    288   // an unbuffered resource. If so, it treated like an in-order cpu.
    289   if (hasInstrSchedModel()) {
    290     const MCSchedClassDesc *SCDesc = resolveSchedClass(DefMI);
    291     if (SCDesc->isValid()) {
    292       for (const MCWriteProcResEntry *PRI = STI->getWriteProcResBegin(SCDesc),
    293              *PRE = STI->getWriteProcResEnd(SCDesc); PRI != PRE; ++PRI) {
    294         if (!SchedModel.getProcResource(PRI->ProcResourceIdx)->BufferSize)
    295           return 1;
    296       }
    297     }
    298   }
    299   return 0;
    300 }
    301