Home | History | Annotate | Download | only in CodeGen
      1 //===-- TargetInstrInfo.cpp - Target Instruction Information --------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file implements the TargetInstrInfo class.
     11 //
     12 //===----------------------------------------------------------------------===//
     13 
     14 #include "llvm/Target/TargetInstrInfo.h"
     15 #include "llvm/CodeGen/MachineFrameInfo.h"
     16 #include "llvm/CodeGen/MachineMemOperand.h"
     17 #include "llvm/CodeGen/MachineRegisterInfo.h"
     18 #include "llvm/CodeGen/PseudoSourceValue.h"
     19 #include "llvm/CodeGen/ScoreboardHazardRecognizer.h"
     20 #include "llvm/MC/MCAsmInfo.h"
     21 #include "llvm/MC/MCInstrItineraries.h"
     22 #include "llvm/Support/CommandLine.h"
     23 #include "llvm/Support/ErrorHandling.h"
     24 #include "llvm/Support/raw_ostream.h"
     25 #include "llvm/Target/TargetLowering.h"
     26 #include "llvm/Target/TargetMachine.h"
     27 #include "llvm/Target/TargetRegisterInfo.h"
     28 #include <cctype>
     29 using namespace llvm;
     30 
     31 static cl::opt<bool> DisableHazardRecognizer(
     32   "disable-sched-hazard", cl::Hidden, cl::init(false),
     33   cl::desc("Disable hazard detection during preRA scheduling"));
     34 
     35 TargetInstrInfo::~TargetInstrInfo() {
     36 }
     37 
     38 const TargetRegisterClass*
     39 TargetInstrInfo::getRegClass(const MCInstrDesc &MCID, unsigned OpNum,
     40                              const TargetRegisterInfo *TRI,
     41                              const MachineFunction &MF) const {
     42   if (OpNum >= MCID.getNumOperands())
     43     return 0;
     44 
     45   short RegClass = MCID.OpInfo[OpNum].RegClass;
     46   if (MCID.OpInfo[OpNum].isLookupPtrRegClass())
     47     return TRI->getPointerRegClass(MF, RegClass);
     48 
     49   // Instructions like INSERT_SUBREG do not have fixed register classes.
     50   if (RegClass < 0)
     51     return 0;
     52 
     53   // Otherwise just look it up normally.
     54   return TRI->getRegClass(RegClass);
     55 }
     56 
     57 /// insertNoop - Insert a noop into the instruction stream at the specified
     58 /// point.
     59 void TargetInstrInfo::insertNoop(MachineBasicBlock &MBB,
     60                                  MachineBasicBlock::iterator MI) const {
     61   llvm_unreachable("Target didn't implement insertNoop!");
     62 }
     63 
     64 /// Measure the specified inline asm to determine an approximation of its
     65 /// length.
     66 /// Comments (which run till the next SeparatorString or newline) do not
     67 /// count as an instruction.
     68 /// Any other non-whitespace text is considered an instruction, with
     69 /// multiple instructions separated by SeparatorString or newlines.
     70 /// Variable-length instructions are not handled here; this function
     71 /// may be overloaded in the target code to do that.
     72 unsigned TargetInstrInfo::getInlineAsmLength(const char *Str,
     73                                              const MCAsmInfo &MAI) const {
     74 
     75 
     76   // Count the number of instructions in the asm.
     77   bool atInsnStart = true;
     78   unsigned Length = 0;
     79   for (; *Str; ++Str) {
     80     if (*Str == '\n' || strncmp(Str, MAI.getSeparatorString(),
     81                                 strlen(MAI.getSeparatorString())) == 0)
     82       atInsnStart = true;
     83     if (atInsnStart && !std::isspace(static_cast<unsigned char>(*Str))) {
     84       Length += MAI.getMaxInstLength();
     85       atInsnStart = false;
     86     }
     87     if (atInsnStart && strncmp(Str, MAI.getCommentString(),
     88                                strlen(MAI.getCommentString())) == 0)
     89       atInsnStart = false;
     90   }
     91 
     92   return Length;
     93 }
     94 
     95 /// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
     96 /// after it, replacing it with an unconditional branch to NewDest.
     97 void
     98 TargetInstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
     99                                          MachineBasicBlock *NewDest) const {
    100   MachineBasicBlock *MBB = Tail->getParent();
    101 
    102   // Remove all the old successors of MBB from the CFG.
    103   while (!MBB->succ_empty())
    104     MBB->removeSuccessor(MBB->succ_begin());
    105 
    106   // Remove all the dead instructions from the end of MBB.
    107   MBB->erase(Tail, MBB->end());
    108 
    109   // If MBB isn't immediately before MBB, insert a branch to it.
    110   if (++MachineFunction::iterator(MBB) != MachineFunction::iterator(NewDest))
    111     InsertBranch(*MBB, NewDest, 0, SmallVector<MachineOperand, 0>(),
    112                  Tail->getDebugLoc());
    113   MBB->addSuccessor(NewDest);
    114 }
    115 
    116 // commuteInstruction - The default implementation of this method just exchanges
    117 // the two operands returned by findCommutedOpIndices.
    118 MachineInstr *TargetInstrInfo::commuteInstruction(MachineInstr *MI,
    119                                                   bool NewMI) const {
    120   const MCInstrDesc &MCID = MI->getDesc();
    121   bool HasDef = MCID.getNumDefs();
    122   if (HasDef && !MI->getOperand(0).isReg())
    123     // No idea how to commute this instruction. Target should implement its own.
    124     return 0;
    125   unsigned Idx1, Idx2;
    126   if (!findCommutedOpIndices(MI, Idx1, Idx2)) {
    127     std::string msg;
    128     raw_string_ostream Msg(msg);
    129     Msg << "Don't know how to commute: " << *MI;
    130     report_fatal_error(Msg.str());
    131   }
    132 
    133   assert(MI->getOperand(Idx1).isReg() && MI->getOperand(Idx2).isReg() &&
    134          "This only knows how to commute register operands so far");
    135   unsigned Reg0 = HasDef ? MI->getOperand(0).getReg() : 0;
    136   unsigned Reg1 = MI->getOperand(Idx1).getReg();
    137   unsigned Reg2 = MI->getOperand(Idx2).getReg();
    138   unsigned SubReg0 = HasDef ? MI->getOperand(0).getSubReg() : 0;
    139   unsigned SubReg1 = MI->getOperand(Idx1).getSubReg();
    140   unsigned SubReg2 = MI->getOperand(Idx2).getSubReg();
    141   bool Reg1IsKill = MI->getOperand(Idx1).isKill();
    142   bool Reg2IsKill = MI->getOperand(Idx2).isKill();
    143   // If destination is tied to either of the commuted source register, then
    144   // it must be updated.
    145   if (HasDef && Reg0 == Reg1 &&
    146       MI->getDesc().getOperandConstraint(Idx1, MCOI::TIED_TO) == 0) {
    147     Reg2IsKill = false;
    148     Reg0 = Reg2;
    149     SubReg0 = SubReg2;
    150   } else if (HasDef && Reg0 == Reg2 &&
    151              MI->getDesc().getOperandConstraint(Idx2, MCOI::TIED_TO) == 0) {
    152     Reg1IsKill = false;
    153     Reg0 = Reg1;
    154     SubReg0 = SubReg1;
    155   }
    156 
    157   if (NewMI) {
    158     // Create a new instruction.
    159     MachineFunction &MF = *MI->getParent()->getParent();
    160     MI = MF.CloneMachineInstr(MI);
    161   }
    162 
    163   if (HasDef) {
    164     MI->getOperand(0).setReg(Reg0);
    165     MI->getOperand(0).setSubReg(SubReg0);
    166   }
    167   MI->getOperand(Idx2).setReg(Reg1);
    168   MI->getOperand(Idx1).setReg(Reg2);
    169   MI->getOperand(Idx2).setSubReg(SubReg1);
    170   MI->getOperand(Idx1).setSubReg(SubReg2);
    171   MI->getOperand(Idx2).setIsKill(Reg1IsKill);
    172   MI->getOperand(Idx1).setIsKill(Reg2IsKill);
    173   return MI;
    174 }
    175 
    176 /// findCommutedOpIndices - If specified MI is commutable, return the two
    177 /// operand indices that would swap value. Return true if the instruction
    178 /// is not in a form which this routine understands.
    179 bool TargetInstrInfo::findCommutedOpIndices(MachineInstr *MI,
    180                                             unsigned &SrcOpIdx1,
    181                                             unsigned &SrcOpIdx2) const {
    182   assert(!MI->isBundle() &&
    183          "TargetInstrInfo::findCommutedOpIndices() can't handle bundles");
    184 
    185   const MCInstrDesc &MCID = MI->getDesc();
    186   if (!MCID.isCommutable())
    187     return false;
    188   // This assumes v0 = op v1, v2 and commuting would swap v1 and v2. If this
    189   // is not true, then the target must implement this.
    190   SrcOpIdx1 = MCID.getNumDefs();
    191   SrcOpIdx2 = SrcOpIdx1 + 1;
    192   if (!MI->getOperand(SrcOpIdx1).isReg() ||
    193       !MI->getOperand(SrcOpIdx2).isReg())
    194     // No idea.
    195     return false;
    196   return true;
    197 }
    198 
    199 
    200 bool
    201 TargetInstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const {
    202   if (!MI->isTerminator()) return false;
    203 
    204   // Conditional branch is a special case.
    205   if (MI->isBranch() && !MI->isBarrier())
    206     return true;
    207   if (!MI->isPredicable())
    208     return true;
    209   return !isPredicated(MI);
    210 }
    211 
    212 
    213 bool TargetInstrInfo::PredicateInstruction(MachineInstr *MI,
    214                             const SmallVectorImpl<MachineOperand> &Pred) const {
    215   bool MadeChange = false;
    216 
    217   assert(!MI->isBundle() &&
    218          "TargetInstrInfo::PredicateInstruction() can't handle bundles");
    219 
    220   const MCInstrDesc &MCID = MI->getDesc();
    221   if (!MI->isPredicable())
    222     return false;
    223 
    224   for (unsigned j = 0, i = 0, e = MI->getNumOperands(); i != e; ++i) {
    225     if (MCID.OpInfo[i].isPredicate()) {
    226       MachineOperand &MO = MI->getOperand(i);
    227       if (MO.isReg()) {
    228         MO.setReg(Pred[j].getReg());
    229         MadeChange = true;
    230       } else if (MO.isImm()) {
    231         MO.setImm(Pred[j].getImm());
    232         MadeChange = true;
    233       } else if (MO.isMBB()) {
    234         MO.setMBB(Pred[j].getMBB());
    235         MadeChange = true;
    236       }
    237       ++j;
    238     }
    239   }
    240   return MadeChange;
    241 }
    242 
    243 bool TargetInstrInfo::hasLoadFromStackSlot(const MachineInstr *MI,
    244                                            const MachineMemOperand *&MMO,
    245                                            int &FrameIndex) const {
    246   for (MachineInstr::mmo_iterator o = MI->memoperands_begin(),
    247          oe = MI->memoperands_end();
    248        o != oe;
    249        ++o) {
    250     if ((*o)->isLoad() && (*o)->getValue())
    251       if (const FixedStackPseudoSourceValue *Value =
    252           dyn_cast<const FixedStackPseudoSourceValue>((*o)->getValue())) {
    253         FrameIndex = Value->getFrameIndex();
    254         MMO = *o;
    255         return true;
    256       }
    257   }
    258   return false;
    259 }
    260 
    261 bool TargetInstrInfo::hasStoreToStackSlot(const MachineInstr *MI,
    262                                           const MachineMemOperand *&MMO,
    263                                           int &FrameIndex) const {
    264   for (MachineInstr::mmo_iterator o = MI->memoperands_begin(),
    265          oe = MI->memoperands_end();
    266        o != oe;
    267        ++o) {
    268     if ((*o)->isStore() && (*o)->getValue())
    269       if (const FixedStackPseudoSourceValue *Value =
    270           dyn_cast<const FixedStackPseudoSourceValue>((*o)->getValue())) {
    271         FrameIndex = Value->getFrameIndex();
    272         MMO = *o;
    273         return true;
    274       }
    275   }
    276   return false;
    277 }
    278 
    279 void TargetInstrInfo::reMaterialize(MachineBasicBlock &MBB,
    280                                     MachineBasicBlock::iterator I,
    281                                     unsigned DestReg,
    282                                     unsigned SubIdx,
    283                                     const MachineInstr *Orig,
    284                                     const TargetRegisterInfo &TRI) const {
    285   MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig);
    286   MI->substituteRegister(MI->getOperand(0).getReg(), DestReg, SubIdx, TRI);
    287   MBB.insert(I, MI);
    288 }
    289 
    290 bool
    291 TargetInstrInfo::produceSameValue(const MachineInstr *MI0,
    292                                   const MachineInstr *MI1,
    293                                   const MachineRegisterInfo *MRI) const {
    294   return MI0->isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs);
    295 }
    296 
    297 MachineInstr *TargetInstrInfo::duplicate(MachineInstr *Orig,
    298                                          MachineFunction &MF) const {
    299   assert(!Orig->isNotDuplicable() &&
    300          "Instruction cannot be duplicated");
    301   return MF.CloneMachineInstr(Orig);
    302 }
    303 
    304 // If the COPY instruction in MI can be folded to a stack operation, return
    305 // the register class to use.
    306 static const TargetRegisterClass *canFoldCopy(const MachineInstr *MI,
    307                                               unsigned FoldIdx) {
    308   assert(MI->isCopy() && "MI must be a COPY instruction");
    309   if (MI->getNumOperands() != 2)
    310     return 0;
    311   assert(FoldIdx<2 && "FoldIdx refers no nonexistent operand");
    312 
    313   const MachineOperand &FoldOp = MI->getOperand(FoldIdx);
    314   const MachineOperand &LiveOp = MI->getOperand(1-FoldIdx);
    315 
    316   if (FoldOp.getSubReg() || LiveOp.getSubReg())
    317     return 0;
    318 
    319   unsigned FoldReg = FoldOp.getReg();
    320   unsigned LiveReg = LiveOp.getReg();
    321 
    322   assert(TargetRegisterInfo::isVirtualRegister(FoldReg) &&
    323          "Cannot fold physregs");
    324 
    325   const MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
    326   const TargetRegisterClass *RC = MRI.getRegClass(FoldReg);
    327 
    328   if (TargetRegisterInfo::isPhysicalRegister(LiveOp.getReg()))
    329     return RC->contains(LiveOp.getReg()) ? RC : 0;
    330 
    331   if (RC->hasSubClassEq(MRI.getRegClass(LiveReg)))
    332     return RC;
    333 
    334   // FIXME: Allow folding when register classes are memory compatible.
    335   return 0;
    336 }
    337 
    338 bool TargetInstrInfo::
    339 canFoldMemoryOperand(const MachineInstr *MI,
    340                      const SmallVectorImpl<unsigned> &Ops) const {
    341   return MI->isCopy() && Ops.size() == 1 && canFoldCopy(MI, Ops[0]);
    342 }
    343 
    344 /// foldMemoryOperand - Attempt to fold a load or store of the specified stack
    345 /// slot into the specified machine instruction for the specified operand(s).
    346 /// If this is possible, a new instruction is returned with the specified
    347 /// operand folded, otherwise NULL is returned. The client is responsible for
    348 /// removing the old instruction and adding the new one in the instruction
    349 /// stream.
    350 MachineInstr*
    351 TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
    352                                    const SmallVectorImpl<unsigned> &Ops,
    353                                    int FI) const {
    354   unsigned Flags = 0;
    355   for (unsigned i = 0, e = Ops.size(); i != e; ++i)
    356     if (MI->getOperand(Ops[i]).isDef())
    357       Flags |= MachineMemOperand::MOStore;
    358     else
    359       Flags |= MachineMemOperand::MOLoad;
    360 
    361   MachineBasicBlock *MBB = MI->getParent();
    362   assert(MBB && "foldMemoryOperand needs an inserted instruction");
    363   MachineFunction &MF = *MBB->getParent();
    364 
    365   // Ask the target to do the actual folding.
    366   if (MachineInstr *NewMI = foldMemoryOperandImpl(MF, MI, Ops, FI)) {
    367     // Add a memory operand, foldMemoryOperandImpl doesn't do that.
    368     assert((!(Flags & MachineMemOperand::MOStore) ||
    369             NewMI->mayStore()) &&
    370            "Folded a def to a non-store!");
    371     assert((!(Flags & MachineMemOperand::MOLoad) ||
    372             NewMI->mayLoad()) &&
    373            "Folded a use to a non-load!");
    374     const MachineFrameInfo &MFI = *MF.getFrameInfo();
    375     assert(MFI.getObjectOffset(FI) != -1);
    376     MachineMemOperand *MMO =
    377       MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FI),
    378                               Flags, MFI.getObjectSize(FI),
    379                               MFI.getObjectAlignment(FI));
    380     NewMI->addMemOperand(MF, MMO);
    381 
    382     // FIXME: change foldMemoryOperandImpl semantics to also insert NewMI.
    383     return MBB->insert(MI, NewMI);
    384   }
    385 
    386   // Straight COPY may fold as load/store.
    387   if (!MI->isCopy() || Ops.size() != 1)
    388     return 0;
    389 
    390   const TargetRegisterClass *RC = canFoldCopy(MI, Ops[0]);
    391   if (!RC)
    392     return 0;
    393 
    394   const MachineOperand &MO = MI->getOperand(1-Ops[0]);
    395   MachineBasicBlock::iterator Pos = MI;
    396   const TargetRegisterInfo *TRI = MF.getTarget().getRegisterInfo();
    397 
    398   if (Flags == MachineMemOperand::MOStore)
    399     storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, TRI);
    400   else
    401     loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, TRI);
    402   return --Pos;
    403 }
    404 
    405 /// foldMemoryOperand - Same as the previous version except it allows folding
    406 /// of any load and store from / to any address, not just from a specific
    407 /// stack slot.
    408 MachineInstr*
    409 TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
    410                                    const SmallVectorImpl<unsigned> &Ops,
    411                                    MachineInstr* LoadMI) const {
    412   assert(LoadMI->canFoldAsLoad() && "LoadMI isn't foldable!");
    413 #ifndef NDEBUG
    414   for (unsigned i = 0, e = Ops.size(); i != e; ++i)
    415     assert(MI->getOperand(Ops[i]).isUse() && "Folding load into def!");
    416 #endif
    417   MachineBasicBlock &MBB = *MI->getParent();
    418   MachineFunction &MF = *MBB.getParent();
    419 
    420   // Ask the target to do the actual folding.
    421   MachineInstr *NewMI = foldMemoryOperandImpl(MF, MI, Ops, LoadMI);
    422   if (!NewMI) return 0;
    423 
    424   NewMI = MBB.insert(MI, NewMI);
    425 
    426   // Copy the memoperands from the load to the folded instruction.
    427   NewMI->setMemRefs(LoadMI->memoperands_begin(),
    428                     LoadMI->memoperands_end());
    429 
    430   return NewMI;
    431 }
    432 
    433 bool TargetInstrInfo::
    434 isReallyTriviallyReMaterializableGeneric(const MachineInstr *MI,
    435                                          AliasAnalysis *AA) const {
    436   const MachineFunction &MF = *MI->getParent()->getParent();
    437   const MachineRegisterInfo &MRI = MF.getRegInfo();
    438   const TargetMachine &TM = MF.getTarget();
    439   const TargetInstrInfo &TII = *TM.getInstrInfo();
    440 
    441   // Remat clients assume operand 0 is the defined register.
    442   if (!MI->getNumOperands() || !MI->getOperand(0).isReg())
    443     return false;
    444   unsigned DefReg = MI->getOperand(0).getReg();
    445 
    446   // A sub-register definition can only be rematerialized if the instruction
    447   // doesn't read the other parts of the register.  Otherwise it is really a
    448   // read-modify-write operation on the full virtual register which cannot be
    449   // moved safely.
    450   if (TargetRegisterInfo::isVirtualRegister(DefReg) &&
    451       MI->getOperand(0).getSubReg() && MI->readsVirtualRegister(DefReg))
    452     return false;
    453 
    454   // A load from a fixed stack slot can be rematerialized. This may be
    455   // redundant with subsequent checks, but it's target-independent,
    456   // simple, and a common case.
    457   int FrameIdx = 0;
    458   if (TII.isLoadFromStackSlot(MI, FrameIdx) &&
    459       MF.getFrameInfo()->isImmutableObjectIndex(FrameIdx))
    460     return true;
    461 
    462   // Avoid instructions obviously unsafe for remat.
    463   if (MI->isNotDuplicable() || MI->mayStore() ||
    464       MI->hasUnmodeledSideEffects())
    465     return false;
    466 
    467   // Don't remat inline asm. We have no idea how expensive it is
    468   // even if it's side effect free.
    469   if (MI->isInlineAsm())
    470     return false;
    471 
    472   // Avoid instructions which load from potentially varying memory.
    473   if (MI->mayLoad() && !MI->isInvariantLoad(AA))
    474     return false;
    475 
    476   // If any of the registers accessed are non-constant, conservatively assume
    477   // the instruction is not rematerializable.
    478   for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
    479     const MachineOperand &MO = MI->getOperand(i);
    480     if (!MO.isReg()) continue;
    481     unsigned Reg = MO.getReg();
    482     if (Reg == 0)
    483       continue;
    484 
    485     // Check for a well-behaved physical register.
    486     if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
    487       if (MO.isUse()) {
    488         // If the physreg has no defs anywhere, it's just an ambient register
    489         // and we can freely move its uses. Alternatively, if it's allocatable,
    490         // it could get allocated to something with a def during allocation.
    491         if (!MRI.isConstantPhysReg(Reg, MF))
    492           return false;
    493       } else {
    494         // A physreg def. We can't remat it.
    495         return false;
    496       }
    497       continue;
    498     }
    499 
    500     // Only allow one virtual-register def.  There may be multiple defs of the
    501     // same virtual register, though.
    502     if (MO.isDef() && Reg != DefReg)
    503       return false;
    504 
    505     // Don't allow any virtual-register uses. Rematting an instruction with
    506     // virtual register uses would length the live ranges of the uses, which
    507     // is not necessarily a good idea, certainly not "trivial".
    508     if (MO.isUse())
    509       return false;
    510   }
    511 
    512   // Everything checked out.
    513   return true;
    514 }
    515 
    516 /// isSchedulingBoundary - Test if the given instruction should be
    517 /// considered a scheduling boundary. This primarily includes labels
    518 /// and terminators.
    519 bool TargetInstrInfo::isSchedulingBoundary(const MachineInstr *MI,
    520                                            const MachineBasicBlock *MBB,
    521                                            const MachineFunction &MF) const {
    522   // Terminators and labels can't be scheduled around.
    523   if (MI->isTerminator() || MI->isLabel())
    524     return true;
    525 
    526   // Don't attempt to schedule around any instruction that defines
    527   // a stack-oriented pointer, as it's unlikely to be profitable. This
    528   // saves compile time, because it doesn't require every single
    529   // stack slot reference to depend on the instruction that does the
    530   // modification.
    531   const TargetLowering &TLI = *MF.getTarget().getTargetLowering();
    532   const TargetRegisterInfo *TRI = MF.getTarget().getRegisterInfo();
    533   if (MI->modifiesRegister(TLI.getStackPointerRegisterToSaveRestore(), TRI))
    534     return true;
    535 
    536   return false;
    537 }
    538 
    539 // Provide a global flag for disabling the PreRA hazard recognizer that targets
    540 // may choose to honor.
    541 bool TargetInstrInfo::usePreRAHazardRecognizer() const {
    542   return !DisableHazardRecognizer;
    543 }
    544 
    545 // Default implementation of CreateTargetRAHazardRecognizer.
    546 ScheduleHazardRecognizer *TargetInstrInfo::
    547 CreateTargetHazardRecognizer(const TargetMachine *TM,
    548                              const ScheduleDAG *DAG) const {
    549   // Dummy hazard recognizer allows all instructions to issue.
    550   return new ScheduleHazardRecognizer();
    551 }
    552 
    553 // Default implementation of CreateTargetMIHazardRecognizer.
    554 ScheduleHazardRecognizer *TargetInstrInfo::
    555 CreateTargetMIHazardRecognizer(const InstrItineraryData *II,
    556                                const ScheduleDAG *DAG) const {
    557   return (ScheduleHazardRecognizer *)
    558     new ScoreboardHazardRecognizer(II, DAG, "misched");
    559 }
    560 
    561 // Default implementation of CreateTargetPostRAHazardRecognizer.
    562 ScheduleHazardRecognizer *TargetInstrInfo::
    563 CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
    564                                    const ScheduleDAG *DAG) const {
    565   return (ScheduleHazardRecognizer *)
    566     new ScoreboardHazardRecognizer(II, DAG, "post-RA-sched");
    567 }
    568 
    569 //===----------------------------------------------------------------------===//
    570 //  SelectionDAG latency interface.
    571 //===----------------------------------------------------------------------===//
    572 
    573 int
    574 TargetInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
    575                                    SDNode *DefNode, unsigned DefIdx,
    576                                    SDNode *UseNode, unsigned UseIdx) const {
    577   if (!ItinData || ItinData->isEmpty())
    578     return -1;
    579 
    580   if (!DefNode->isMachineOpcode())
    581     return -1;
    582 
    583   unsigned DefClass = get(DefNode->getMachineOpcode()).getSchedClass();
    584   if (!UseNode->isMachineOpcode())
    585     return ItinData->getOperandCycle(DefClass, DefIdx);
    586   unsigned UseClass = get(UseNode->getMachineOpcode()).getSchedClass();
    587   return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
    588 }
    589 
    590 int TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
    591                                      SDNode *N) const {
    592   if (!ItinData || ItinData->isEmpty())
    593     return 1;
    594 
    595   if (!N->isMachineOpcode())
    596     return 1;
    597 
    598   return ItinData->getStageLatency(get(N->getMachineOpcode()).getSchedClass());
    599 }
    600 
    601 //===----------------------------------------------------------------------===//
    602 //  MachineInstr latency interface.
    603 //===----------------------------------------------------------------------===//
    604 
    605 unsigned
    606 TargetInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData,
    607                                 const MachineInstr *MI) const {
    608   if (!ItinData || ItinData->isEmpty())
    609     return 1;
    610 
    611   unsigned Class = MI->getDesc().getSchedClass();
    612   int UOps = ItinData->Itineraries[Class].NumMicroOps;
    613   if (UOps >= 0)
    614     return UOps;
    615 
    616   // The # of u-ops is dynamically determined. The specific target should
    617   // override this function to return the right number.
    618   return 1;
    619 }
    620 
    621 /// Return the default expected latency for a def based on it's opcode.
    622 unsigned TargetInstrInfo::defaultDefLatency(const MCSchedModel *SchedModel,
    623                                             const MachineInstr *DefMI) const {
    624   if (DefMI->isTransient())
    625     return 0;
    626   if (DefMI->mayLoad())
    627     return SchedModel->LoadLatency;
    628   if (isHighLatencyDef(DefMI->getOpcode()))
    629     return SchedModel->HighLatency;
    630   return 1;
    631 }
    632 
    633 unsigned TargetInstrInfo::
    634 getInstrLatency(const InstrItineraryData *ItinData,
    635                 const MachineInstr *MI,
    636                 unsigned *PredCost) const {
    637   // Default to one cycle for no itinerary. However, an "empty" itinerary may
    638   // still have a MinLatency property, which getStageLatency checks.
    639   if (!ItinData)
    640     return MI->mayLoad() ? 2 : 1;
    641 
    642   return ItinData->getStageLatency(MI->getDesc().getSchedClass());
    643 }
    644 
    645 bool TargetInstrInfo::hasLowDefLatency(const InstrItineraryData *ItinData,
    646                                        const MachineInstr *DefMI,
    647                                        unsigned DefIdx) const {
    648   if (!ItinData || ItinData->isEmpty())
    649     return false;
    650 
    651   unsigned DefClass = DefMI->getDesc().getSchedClass();
    652   int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx);
    653   return (DefCycle != -1 && DefCycle <= 1);
    654 }
    655 
    656 /// Both DefMI and UseMI must be valid.  By default, call directly to the
    657 /// itinerary. This may be overriden by the target.
    658 int TargetInstrInfo::
    659 getOperandLatency(const InstrItineraryData *ItinData,
    660                   const MachineInstr *DefMI, unsigned DefIdx,
    661                   const MachineInstr *UseMI, unsigned UseIdx) const {
    662   unsigned DefClass = DefMI->getDesc().getSchedClass();
    663   unsigned UseClass = UseMI->getDesc().getSchedClass();
    664   return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
    665 }
    666 
    667 /// If we can determine the operand latency from the def only, without itinerary
    668 /// lookup, do so. Otherwise return -1.
    669 int TargetInstrInfo::computeDefOperandLatency(
    670   const InstrItineraryData *ItinData,
    671   const MachineInstr *DefMI, bool FindMin) const {
    672 
    673   // Let the target hook getInstrLatency handle missing itineraries.
    674   if (!ItinData)
    675     return getInstrLatency(ItinData, DefMI);
    676 
    677   // Return a latency based on the itinerary properties and defining instruction
    678   // if possible. Some common subtargets don't require per-operand latency,
    679   // especially for minimum latencies.
    680   if (FindMin) {
    681     // If MinLatency is valid, call getInstrLatency. This uses Stage latency if
    682     // it exists before defaulting to MinLatency.
    683     if (ItinData->SchedModel->MinLatency >= 0)
    684       return getInstrLatency(ItinData, DefMI);
    685 
    686     // If MinLatency is invalid, OperandLatency is interpreted as MinLatency.
    687     // For empty itineraries, short-cirtuit the check and default to one cycle.
    688     if (ItinData->isEmpty())
    689       return 1;
    690   }
    691   else if(ItinData->isEmpty())
    692     return defaultDefLatency(ItinData->SchedModel, DefMI);
    693 
    694   // ...operand lookup required
    695   return -1;
    696 }
    697 
    698 /// computeOperandLatency - Compute and return the latency of the given data
    699 /// dependent def and use when the operand indices are already known. UseMI may
    700 /// be NULL for an unknown use.
    701 ///
    702 /// FindMin may be set to get the minimum vs. expected latency. Minimum
    703 /// latency is used for scheduling groups, while expected latency is for
    704 /// instruction cost and critical path.
    705 ///
    706 /// Depending on the subtarget's itinerary properties, this may or may not need
    707 /// to call getOperandLatency(). For most subtargets, we don't need DefIdx or
    708 /// UseIdx to compute min latency.
    709 unsigned TargetInstrInfo::
    710 computeOperandLatency(const InstrItineraryData *ItinData,
    711                       const MachineInstr *DefMI, unsigned DefIdx,
    712                       const MachineInstr *UseMI, unsigned UseIdx,
    713                       bool FindMin) const {
    714 
    715   int DefLatency = computeDefOperandLatency(ItinData, DefMI, FindMin);
    716   if (DefLatency >= 0)
    717     return DefLatency;
    718 
    719   assert(ItinData && !ItinData->isEmpty() && "computeDefOperandLatency fail");
    720 
    721   int OperLatency = 0;
    722   if (UseMI)
    723     OperLatency = getOperandLatency(ItinData, DefMI, DefIdx, UseMI, UseIdx);
    724   else {
    725     unsigned DefClass = DefMI->getDesc().getSchedClass();
    726     OperLatency = ItinData->getOperandCycle(DefClass, DefIdx);
    727   }
    728   if (OperLatency >= 0)
    729     return OperLatency;
    730 
    731   // No operand latency was found.
    732   unsigned InstrLatency = getInstrLatency(ItinData, DefMI);
    733 
    734   // Expected latency is the max of the stage latency and itinerary props.
    735   if (!FindMin)
    736     InstrLatency = std::max(InstrLatency,
    737                             defaultDefLatency(ItinData->SchedModel, DefMI));
    738   return InstrLatency;
    739 }
    740