Home | History | Annotate | Download | only in CodeGen
      1 //===-- TargetInstrInfo.cpp - Target Instruction Information --------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file implements the TargetInstrInfo class.
     11 //
     12 //===----------------------------------------------------------------------===//
     13 
     14 #include "llvm/Target/TargetInstrInfo.h"
     15 #include "llvm/CodeGen/MachineFrameInfo.h"
     16 #include "llvm/CodeGen/MachineInstrBuilder.h"
     17 #include "llvm/CodeGen/MachineMemOperand.h"
     18 #include "llvm/CodeGen/MachineRegisterInfo.h"
     19 #include "llvm/CodeGen/PseudoSourceValue.h"
     20 #include "llvm/CodeGen/ScoreboardHazardRecognizer.h"
     21 #include "llvm/CodeGen/StackMaps.h"
     22 #include "llvm/CodeGen/TargetSchedule.h"
     23 #include "llvm/IR/DataLayout.h"
     24 #include "llvm/MC/MCAsmInfo.h"
     25 #include "llvm/MC/MCInstrItineraries.h"
     26 #include "llvm/Support/CommandLine.h"
     27 #include "llvm/Support/ErrorHandling.h"
     28 #include "llvm/Support/raw_ostream.h"
     29 #include "llvm/Target/TargetFrameLowering.h"
     30 #include "llvm/Target/TargetLowering.h"
     31 #include "llvm/Target/TargetMachine.h"
     32 #include "llvm/Target/TargetRegisterInfo.h"
     33 #include <cctype>
     34 
     35 using namespace llvm;
     36 
     37 static cl::opt<bool> DisableHazardRecognizer(
     38   "disable-sched-hazard", cl::Hidden, cl::init(false),
     39   cl::desc("Disable hazard detection during preRA scheduling"));
     40 
     41 TargetInstrInfo::~TargetInstrInfo() {
     42 }
     43 
     44 const TargetRegisterClass*
     45 TargetInstrInfo::getRegClass(const MCInstrDesc &MCID, unsigned OpNum,
     46                              const TargetRegisterInfo *TRI,
     47                              const MachineFunction &MF) const {
     48   if (OpNum >= MCID.getNumOperands())
     49     return nullptr;
     50 
     51   short RegClass = MCID.OpInfo[OpNum].RegClass;
     52   if (MCID.OpInfo[OpNum].isLookupPtrRegClass())
     53     return TRI->getPointerRegClass(MF, RegClass);
     54 
     55   // Instructions like INSERT_SUBREG do not have fixed register classes.
     56   if (RegClass < 0)
     57     return nullptr;
     58 
     59   // Otherwise just look it up normally.
     60   return TRI->getRegClass(RegClass);
     61 }
     62 
     63 /// insertNoop - Insert a noop into the instruction stream at the specified
     64 /// point.
     65 void TargetInstrInfo::insertNoop(MachineBasicBlock &MBB,
     66                                  MachineBasicBlock::iterator MI) const {
     67   llvm_unreachable("Target didn't implement insertNoop!");
     68 }
     69 
     70 /// Measure the specified inline asm to determine an approximation of its
     71 /// length.
     72 /// Comments (which run till the next SeparatorString or newline) do not
     73 /// count as an instruction.
     74 /// Any other non-whitespace text is considered an instruction, with
     75 /// multiple instructions separated by SeparatorString or newlines.
     76 /// Variable-length instructions are not handled here; this function
     77 /// may be overloaded in the target code to do that.
     78 unsigned TargetInstrInfo::getInlineAsmLength(const char *Str,
     79                                              const MCAsmInfo &MAI) const {
     80   // Count the number of instructions in the asm.
     81   bool atInsnStart = true;
     82   unsigned InstCount = 0;
     83   for (; *Str; ++Str) {
     84     if (*Str == '\n' || strncmp(Str, MAI.getSeparatorString(),
     85                                 strlen(MAI.getSeparatorString())) == 0) {
     86       atInsnStart = true;
     87     } else if (strncmp(Str, MAI.getCommentString(),
     88                        strlen(MAI.getCommentString())) == 0) {
     89       // Stop counting as an instruction after a comment until the next
     90       // separator.
     91       atInsnStart = false;
     92     }
     93 
     94     if (atInsnStart && !std::isspace(static_cast<unsigned char>(*Str))) {
     95       ++InstCount;
     96       atInsnStart = false;
     97     }
     98   }
     99 
    100   return InstCount * MAI.getMaxInstLength();
    101 }
    102 
    103 /// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
    104 /// after it, replacing it with an unconditional branch to NewDest.
    105 void
    106 TargetInstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
    107                                          MachineBasicBlock *NewDest) const {
    108   MachineBasicBlock *MBB = Tail->getParent();
    109 
    110   // Remove all the old successors of MBB from the CFG.
    111   while (!MBB->succ_empty())
    112     MBB->removeSuccessor(MBB->succ_begin());
    113 
    114   // Save off the debug loc before erasing the instruction.
    115   DebugLoc DL = Tail->getDebugLoc();
    116 
    117   // Remove all the dead instructions from the end of MBB.
    118   MBB->erase(Tail, MBB->end());
    119 
    120   // If MBB isn't immediately before MBB, insert a branch to it.
    121   if (++MachineFunction::iterator(MBB) != MachineFunction::iterator(NewDest))
    122     InsertBranch(*MBB, NewDest, nullptr, SmallVector<MachineOperand, 0>(), DL);
    123   MBB->addSuccessor(NewDest);
    124 }
    125 
    126 MachineInstr *TargetInstrInfo::commuteInstructionImpl(MachineInstr &MI,
    127                                                       bool NewMI, unsigned Idx1,
    128                                                       unsigned Idx2) const {
    129   const MCInstrDesc &MCID = MI.getDesc();
    130   bool HasDef = MCID.getNumDefs();
    131   if (HasDef && !MI.getOperand(0).isReg())
    132     // No idea how to commute this instruction. Target should implement its own.
    133     return nullptr;
    134 
    135   unsigned CommutableOpIdx1 = Idx1; (void)CommutableOpIdx1;
    136   unsigned CommutableOpIdx2 = Idx2; (void)CommutableOpIdx2;
    137   assert(findCommutedOpIndices(MI, CommutableOpIdx1, CommutableOpIdx2) &&
    138          CommutableOpIdx1 == Idx1 && CommutableOpIdx2 == Idx2 &&
    139          "TargetInstrInfo::CommuteInstructionImpl(): not commutable operands.");
    140   assert(MI.getOperand(Idx1).isReg() && MI.getOperand(Idx2).isReg() &&
    141          "This only knows how to commute register operands so far");
    142 
    143   unsigned Reg0 = HasDef ? MI.getOperand(0).getReg() : 0;
    144   unsigned Reg1 = MI.getOperand(Idx1).getReg();
    145   unsigned Reg2 = MI.getOperand(Idx2).getReg();
    146   unsigned SubReg0 = HasDef ? MI.getOperand(0).getSubReg() : 0;
    147   unsigned SubReg1 = MI.getOperand(Idx1).getSubReg();
    148   unsigned SubReg2 = MI.getOperand(Idx2).getSubReg();
    149   bool Reg1IsKill = MI.getOperand(Idx1).isKill();
    150   bool Reg2IsKill = MI.getOperand(Idx2).isKill();
    151   bool Reg1IsUndef = MI.getOperand(Idx1).isUndef();
    152   bool Reg2IsUndef = MI.getOperand(Idx2).isUndef();
    153   bool Reg1IsInternal = MI.getOperand(Idx1).isInternalRead();
    154   bool Reg2IsInternal = MI.getOperand(Idx2).isInternalRead();
    155   // If destination is tied to either of the commuted source register, then
    156   // it must be updated.
    157   if (HasDef && Reg0 == Reg1 &&
    158       MI.getDesc().getOperandConstraint(Idx1, MCOI::TIED_TO) == 0) {
    159     Reg2IsKill = false;
    160     Reg0 = Reg2;
    161     SubReg0 = SubReg2;
    162   } else if (HasDef && Reg0 == Reg2 &&
    163              MI.getDesc().getOperandConstraint(Idx2, MCOI::TIED_TO) == 0) {
    164     Reg1IsKill = false;
    165     Reg0 = Reg1;
    166     SubReg0 = SubReg1;
    167   }
    168 
    169   MachineInstr *CommutedMI = nullptr;
    170   if (NewMI) {
    171     // Create a new instruction.
    172     MachineFunction &MF = *MI.getParent()->getParent();
    173     CommutedMI = MF.CloneMachineInstr(&MI);
    174   } else {
    175     CommutedMI = &MI;
    176   }
    177 
    178   if (HasDef) {
    179     CommutedMI->getOperand(0).setReg(Reg0);
    180     CommutedMI->getOperand(0).setSubReg(SubReg0);
    181   }
    182   CommutedMI->getOperand(Idx2).setReg(Reg1);
    183   CommutedMI->getOperand(Idx1).setReg(Reg2);
    184   CommutedMI->getOperand(Idx2).setSubReg(SubReg1);
    185   CommutedMI->getOperand(Idx1).setSubReg(SubReg2);
    186   CommutedMI->getOperand(Idx2).setIsKill(Reg1IsKill);
    187   CommutedMI->getOperand(Idx1).setIsKill(Reg2IsKill);
    188   CommutedMI->getOperand(Idx2).setIsUndef(Reg1IsUndef);
    189   CommutedMI->getOperand(Idx1).setIsUndef(Reg2IsUndef);
    190   CommutedMI->getOperand(Idx2).setIsInternalRead(Reg1IsInternal);
    191   CommutedMI->getOperand(Idx1).setIsInternalRead(Reg2IsInternal);
    192   return CommutedMI;
    193 }
    194 
    195 MachineInstr *TargetInstrInfo::commuteInstruction(MachineInstr &MI, bool NewMI,
    196                                                   unsigned OpIdx1,
    197                                                   unsigned OpIdx2) const {
    198   // If OpIdx1 or OpIdx2 is not specified, then this method is free to choose
    199   // any commutable operand, which is done in findCommutedOpIndices() method
    200   // called below.
    201   if ((OpIdx1 == CommuteAnyOperandIndex || OpIdx2 == CommuteAnyOperandIndex) &&
    202       !findCommutedOpIndices(MI, OpIdx1, OpIdx2)) {
    203     assert(MI.isCommutable() &&
    204            "Precondition violation: MI must be commutable.");
    205     return nullptr;
    206   }
    207   return commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
    208 }
    209 
    210 bool TargetInstrInfo::fixCommutedOpIndices(unsigned &ResultIdx1,
    211                                            unsigned &ResultIdx2,
    212                                            unsigned CommutableOpIdx1,
    213                                            unsigned CommutableOpIdx2) {
    214   if (ResultIdx1 == CommuteAnyOperandIndex &&
    215       ResultIdx2 == CommuteAnyOperandIndex) {
    216     ResultIdx1 = CommutableOpIdx1;
    217     ResultIdx2 = CommutableOpIdx2;
    218   } else if (ResultIdx1 == CommuteAnyOperandIndex) {
    219     if (ResultIdx2 == CommutableOpIdx1)
    220       ResultIdx1 = CommutableOpIdx2;
    221     else if (ResultIdx2 == CommutableOpIdx2)
    222       ResultIdx1 = CommutableOpIdx1;
    223     else
    224       return false;
    225   } else if (ResultIdx2 == CommuteAnyOperandIndex) {
    226     if (ResultIdx1 == CommutableOpIdx1)
    227       ResultIdx2 = CommutableOpIdx2;
    228     else if (ResultIdx1 == CommutableOpIdx2)
    229       ResultIdx2 = CommutableOpIdx1;
    230     else
    231       return false;
    232   } else
    233     // Check that the result operand indices match the given commutable
    234     // operand indices.
    235     return (ResultIdx1 == CommutableOpIdx1 && ResultIdx2 == CommutableOpIdx2) ||
    236            (ResultIdx1 == CommutableOpIdx2 && ResultIdx2 == CommutableOpIdx1);
    237 
    238   return true;
    239 }
    240 
    241 bool TargetInstrInfo::findCommutedOpIndices(MachineInstr &MI,
    242                                             unsigned &SrcOpIdx1,
    243                                             unsigned &SrcOpIdx2) const {
    244   assert(!MI.isBundle() &&
    245          "TargetInstrInfo::findCommutedOpIndices() can't handle bundles");
    246 
    247   const MCInstrDesc &MCID = MI.getDesc();
    248   if (!MCID.isCommutable())
    249     return false;
    250 
    251   // This assumes v0 = op v1, v2 and commuting would swap v1 and v2. If this
    252   // is not true, then the target must implement this.
    253   unsigned CommutableOpIdx1 = MCID.getNumDefs();
    254   unsigned CommutableOpIdx2 = CommutableOpIdx1 + 1;
    255   if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2,
    256                             CommutableOpIdx1, CommutableOpIdx2))
    257     return false;
    258 
    259   if (!MI.getOperand(SrcOpIdx1).isReg() || !MI.getOperand(SrcOpIdx2).isReg())
    260     // No idea.
    261     return false;
    262   return true;
    263 }
    264 
    265 bool TargetInstrInfo::isUnpredicatedTerminator(const MachineInstr &MI) const {
    266   if (!MI.isTerminator()) return false;
    267 
    268   // Conditional branch is a special case.
    269   if (MI.isBranch() && !MI.isBarrier())
    270     return true;
    271   if (!MI.isPredicable())
    272     return true;
    273   return !isPredicated(MI);
    274 }
    275 
    276 bool TargetInstrInfo::PredicateInstruction(
    277     MachineInstr &MI, ArrayRef<MachineOperand> Pred) const {
    278   bool MadeChange = false;
    279 
    280   assert(!MI.isBundle() &&
    281          "TargetInstrInfo::PredicateInstruction() can't handle bundles");
    282 
    283   const MCInstrDesc &MCID = MI.getDesc();
    284   if (!MI.isPredicable())
    285     return false;
    286 
    287   for (unsigned j = 0, i = 0, e = MI.getNumOperands(); i != e; ++i) {
    288     if (MCID.OpInfo[i].isPredicate()) {
    289       MachineOperand &MO = MI.getOperand(i);
    290       if (MO.isReg()) {
    291         MO.setReg(Pred[j].getReg());
    292         MadeChange = true;
    293       } else if (MO.isImm()) {
    294         MO.setImm(Pred[j].getImm());
    295         MadeChange = true;
    296       } else if (MO.isMBB()) {
    297         MO.setMBB(Pred[j].getMBB());
    298         MadeChange = true;
    299       }
    300       ++j;
    301     }
    302   }
    303   return MadeChange;
    304 }
    305 
    306 bool TargetInstrInfo::hasLoadFromStackSlot(const MachineInstr &MI,
    307                                            const MachineMemOperand *&MMO,
    308                                            int &FrameIndex) const {
    309   for (MachineInstr::mmo_iterator o = MI.memoperands_begin(),
    310                                   oe = MI.memoperands_end();
    311        o != oe; ++o) {
    312     if ((*o)->isLoad()) {
    313       if (const FixedStackPseudoSourceValue *Value =
    314           dyn_cast_or_null<FixedStackPseudoSourceValue>(
    315               (*o)->getPseudoValue())) {
    316         FrameIndex = Value->getFrameIndex();
    317         MMO = *o;
    318         return true;
    319       }
    320     }
    321   }
    322   return false;
    323 }
    324 
    325 bool TargetInstrInfo::hasStoreToStackSlot(const MachineInstr &MI,
    326                                           const MachineMemOperand *&MMO,
    327                                           int &FrameIndex) const {
    328   for (MachineInstr::mmo_iterator o = MI.memoperands_begin(),
    329                                   oe = MI.memoperands_end();
    330        o != oe; ++o) {
    331     if ((*o)->isStore()) {
    332       if (const FixedStackPseudoSourceValue *Value =
    333           dyn_cast_or_null<FixedStackPseudoSourceValue>(
    334               (*o)->getPseudoValue())) {
    335         FrameIndex = Value->getFrameIndex();
    336         MMO = *o;
    337         return true;
    338       }
    339     }
    340   }
    341   return false;
    342 }
    343 
    344 bool TargetInstrInfo::getStackSlotRange(const TargetRegisterClass *RC,
    345                                         unsigned SubIdx, unsigned &Size,
    346                                         unsigned &Offset,
    347                                         const MachineFunction &MF) const {
    348   if (!SubIdx) {
    349     Size = RC->getSize();
    350     Offset = 0;
    351     return true;
    352   }
    353   const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
    354   unsigned BitSize = TRI->getSubRegIdxSize(SubIdx);
    355   // Convert bit size to byte size to be consistent with
    356   // MCRegisterClass::getSize().
    357   if (BitSize % 8)
    358     return false;
    359 
    360   int BitOffset = TRI->getSubRegIdxOffset(SubIdx);
    361   if (BitOffset < 0 || BitOffset % 8)
    362     return false;
    363 
    364   Size = BitSize /= 8;
    365   Offset = (unsigned)BitOffset / 8;
    366 
    367   assert(RC->getSize() >= (Offset + Size) && "bad subregister range");
    368 
    369   if (!MF.getDataLayout().isLittleEndian()) {
    370     Offset = RC->getSize() - (Offset + Size);
    371   }
    372   return true;
    373 }
    374 
    375 void TargetInstrInfo::reMaterialize(MachineBasicBlock &MBB,
    376                                     MachineBasicBlock::iterator I,
    377                                     unsigned DestReg, unsigned SubIdx,
    378                                     const MachineInstr &Orig,
    379                                     const TargetRegisterInfo &TRI) const {
    380   MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig);
    381   MI->substituteRegister(MI->getOperand(0).getReg(), DestReg, SubIdx, TRI);
    382   MBB.insert(I, MI);
    383 }
    384 
    385 bool TargetInstrInfo::produceSameValue(const MachineInstr &MI0,
    386                                        const MachineInstr &MI1,
    387                                        const MachineRegisterInfo *MRI) const {
    388   return MI0.isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs);
    389 }
    390 
    391 MachineInstr *TargetInstrInfo::duplicate(MachineInstr &Orig,
    392                                          MachineFunction &MF) const {
    393   assert(!Orig.isNotDuplicable() && "Instruction cannot be duplicated");
    394   return MF.CloneMachineInstr(&Orig);
    395 }
    396 
    397 // If the COPY instruction in MI can be folded to a stack operation, return
    398 // the register class to use.
    399 static const TargetRegisterClass *canFoldCopy(const MachineInstr &MI,
    400                                               unsigned FoldIdx) {
    401   assert(MI.isCopy() && "MI must be a COPY instruction");
    402   if (MI.getNumOperands() != 2)
    403     return nullptr;
    404   assert(FoldIdx<2 && "FoldIdx refers no nonexistent operand");
    405 
    406   const MachineOperand &FoldOp = MI.getOperand(FoldIdx);
    407   const MachineOperand &LiveOp = MI.getOperand(1 - FoldIdx);
    408 
    409   if (FoldOp.getSubReg() || LiveOp.getSubReg())
    410     return nullptr;
    411 
    412   unsigned FoldReg = FoldOp.getReg();
    413   unsigned LiveReg = LiveOp.getReg();
    414 
    415   assert(TargetRegisterInfo::isVirtualRegister(FoldReg) &&
    416          "Cannot fold physregs");
    417 
    418   const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
    419   const TargetRegisterClass *RC = MRI.getRegClass(FoldReg);
    420 
    421   if (TargetRegisterInfo::isPhysicalRegister(LiveOp.getReg()))
    422     return RC->contains(LiveOp.getReg()) ? RC : nullptr;
    423 
    424   if (RC->hasSubClassEq(MRI.getRegClass(LiveReg)))
    425     return RC;
    426 
    427   // FIXME: Allow folding when register classes are memory compatible.
    428   return nullptr;
    429 }
    430 
    431 void TargetInstrInfo::getNoopForMachoTarget(MCInst &NopInst) const {
    432   llvm_unreachable("Not a MachO target");
    433 }
    434 
    435 static MachineInstr *foldPatchpoint(MachineFunction &MF, MachineInstr &MI,
    436                                     ArrayRef<unsigned> Ops, int FrameIndex,
    437                                     const TargetInstrInfo &TII) {
    438   unsigned StartIdx = 0;
    439   switch (MI.getOpcode()) {
    440   case TargetOpcode::STACKMAP:
    441     StartIdx = 2; // Skip ID, nShadowBytes.
    442     break;
    443   case TargetOpcode::PATCHPOINT: {
    444     // For PatchPoint, the call args are not foldable.
    445     PatchPointOpers opers(&MI);
    446     StartIdx = opers.getVarIdx();
    447     break;
    448   }
    449   default:
    450     llvm_unreachable("unexpected stackmap opcode");
    451   }
    452 
    453   // Return false if any operands requested for folding are not foldable (not
    454   // part of the stackmap's live values).
    455   for (unsigned Op : Ops) {
    456     if (Op < StartIdx)
    457       return nullptr;
    458   }
    459 
    460   MachineInstr *NewMI =
    461       MF.CreateMachineInstr(TII.get(MI.getOpcode()), MI.getDebugLoc(), true);
    462   MachineInstrBuilder MIB(MF, NewMI);
    463 
    464   // No need to fold return, the meta data, and function arguments
    465   for (unsigned i = 0; i < StartIdx; ++i)
    466     MIB.addOperand(MI.getOperand(i));
    467 
    468   for (unsigned i = StartIdx; i < MI.getNumOperands(); ++i) {
    469     MachineOperand &MO = MI.getOperand(i);
    470     if (std::find(Ops.begin(), Ops.end(), i) != Ops.end()) {
    471       unsigned SpillSize;
    472       unsigned SpillOffset;
    473       // Compute the spill slot size and offset.
    474       const TargetRegisterClass *RC =
    475         MF.getRegInfo().getRegClass(MO.getReg());
    476       bool Valid =
    477           TII.getStackSlotRange(RC, MO.getSubReg(), SpillSize, SpillOffset, MF);
    478       if (!Valid)
    479         report_fatal_error("cannot spill patchpoint subregister operand");
    480       MIB.addImm(StackMaps::IndirectMemRefOp);
    481       MIB.addImm(SpillSize);
    482       MIB.addFrameIndex(FrameIndex);
    483       MIB.addImm(SpillOffset);
    484     }
    485     else
    486       MIB.addOperand(MO);
    487   }
    488   return NewMI;
    489 }
    490 
    491 /// foldMemoryOperand - Attempt to fold a load or store of the specified stack
    492 /// slot into the specified machine instruction for the specified operand(s).
    493 /// If this is possible, a new instruction is returned with the specified
    494 /// operand folded, otherwise NULL is returned. The client is responsible for
    495 /// removing the old instruction and adding the new one in the instruction
    496 /// stream.
    497 MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI,
    498                                                  ArrayRef<unsigned> Ops, int FI,
    499                                                  LiveIntervals *LIS) const {
    500   unsigned Flags = 0;
    501   for (unsigned i = 0, e = Ops.size(); i != e; ++i)
    502     if (MI.getOperand(Ops[i]).isDef())
    503       Flags |= MachineMemOperand::MOStore;
    504     else
    505       Flags |= MachineMemOperand::MOLoad;
    506 
    507   MachineBasicBlock *MBB = MI.getParent();
    508   assert(MBB && "foldMemoryOperand needs an inserted instruction");
    509   MachineFunction &MF = *MBB->getParent();
    510 
    511   MachineInstr *NewMI = nullptr;
    512 
    513   if (MI.getOpcode() == TargetOpcode::STACKMAP ||
    514       MI.getOpcode() == TargetOpcode::PATCHPOINT) {
    515     // Fold stackmap/patchpoint.
    516     NewMI = foldPatchpoint(MF, MI, Ops, FI, *this);
    517     if (NewMI)
    518       MBB->insert(MI, NewMI);
    519   } else {
    520     // Ask the target to do the actual folding.
    521     NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, FI, LIS);
    522   }
    523 
    524   if (NewMI) {
    525     NewMI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
    526     // Add a memory operand, foldMemoryOperandImpl doesn't do that.
    527     assert((!(Flags & MachineMemOperand::MOStore) ||
    528             NewMI->mayStore()) &&
    529            "Folded a def to a non-store!");
    530     assert((!(Flags & MachineMemOperand::MOLoad) ||
    531             NewMI->mayLoad()) &&
    532            "Folded a use to a non-load!");
    533     const MachineFrameInfo &MFI = *MF.getFrameInfo();
    534     assert(MFI.getObjectOffset(FI) != -1);
    535     MachineMemOperand *MMO = MF.getMachineMemOperand(
    536         MachinePointerInfo::getFixedStack(MF, FI), Flags, MFI.getObjectSize(FI),
    537         MFI.getObjectAlignment(FI));
    538     NewMI->addMemOperand(MF, MMO);
    539 
    540     return NewMI;
    541   }
    542 
    543   // Straight COPY may fold as load/store.
    544   if (!MI.isCopy() || Ops.size() != 1)
    545     return nullptr;
    546 
    547   const TargetRegisterClass *RC = canFoldCopy(MI, Ops[0]);
    548   if (!RC)
    549     return nullptr;
    550 
    551   const MachineOperand &MO = MI.getOperand(1 - Ops[0]);
    552   MachineBasicBlock::iterator Pos = MI;
    553   const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
    554 
    555   if (Flags == MachineMemOperand::MOStore)
    556     storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, TRI);
    557   else
    558     loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, TRI);
    559   return &*--Pos;
    560 }
    561 
    562 bool TargetInstrInfo::hasReassociableOperands(
    563     const MachineInstr &Inst, const MachineBasicBlock *MBB) const {
    564   const MachineOperand &Op1 = Inst.getOperand(1);
    565   const MachineOperand &Op2 = Inst.getOperand(2);
    566   const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
    567 
    568   // We need virtual register definitions for the operands that we will
    569   // reassociate.
    570   MachineInstr *MI1 = nullptr;
    571   MachineInstr *MI2 = nullptr;
    572   if (Op1.isReg() && TargetRegisterInfo::isVirtualRegister(Op1.getReg()))
    573     MI1 = MRI.getUniqueVRegDef(Op1.getReg());
    574   if (Op2.isReg() && TargetRegisterInfo::isVirtualRegister(Op2.getReg()))
    575     MI2 = MRI.getUniqueVRegDef(Op2.getReg());
    576 
    577   // And they need to be in the trace (otherwise, they won't have a depth).
    578   return MI1 && MI2 && MI1->getParent() == MBB && MI2->getParent() == MBB;
    579 }
    580 
    581 bool TargetInstrInfo::hasReassociableSibling(const MachineInstr &Inst,
    582                                              bool &Commuted) const {
    583   const MachineBasicBlock *MBB = Inst.getParent();
    584   const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
    585   MachineInstr *MI1 = MRI.getUniqueVRegDef(Inst.getOperand(1).getReg());
    586   MachineInstr *MI2 = MRI.getUniqueVRegDef(Inst.getOperand(2).getReg());
    587   unsigned AssocOpcode = Inst.getOpcode();
    588 
    589   // If only one operand has the same opcode and it's the second source operand,
    590   // the operands must be commuted.
    591   Commuted = MI1->getOpcode() != AssocOpcode && MI2->getOpcode() == AssocOpcode;
    592   if (Commuted)
    593     std::swap(MI1, MI2);
    594 
    595   // 1. The previous instruction must be the same type as Inst.
    596   // 2. The previous instruction must have virtual register definitions for its
    597   //    operands in the same basic block as Inst.
    598   // 3. The previous instruction's result must only be used by Inst.
    599   return MI1->getOpcode() == AssocOpcode &&
    600          hasReassociableOperands(*MI1, MBB) &&
    601          MRI.hasOneNonDBGUse(MI1->getOperand(0).getReg());
    602 }
    603 
    604 // 1. The operation must be associative and commutative.
    605 // 2. The instruction must have virtual register definitions for its
    606 //    operands in the same basic block.
    607 // 3. The instruction must have a reassociable sibling.
    608 bool TargetInstrInfo::isReassociationCandidate(const MachineInstr &Inst,
    609                                                bool &Commuted) const {
    610   return isAssociativeAndCommutative(Inst) &&
    611          hasReassociableOperands(Inst, Inst.getParent()) &&
    612          hasReassociableSibling(Inst, Commuted);
    613 }
    614 
    615 // The concept of the reassociation pass is that these operations can benefit
    616 // from this kind of transformation:
    617 //
    618 // A = ? op ?
    619 // B = A op X (Prev)
    620 // C = B op Y (Root)
    621 // -->
    622 // A = ? op ?
    623 // B = X op Y
    624 // C = A op B
    625 //
    626 // breaking the dependency between A and B, allowing them to be executed in
    627 // parallel (or back-to-back in a pipeline) instead of depending on each other.
    628 
    629 // FIXME: This has the potential to be expensive (compile time) while not
    630 // improving the code at all. Some ways to limit the overhead:
    631 // 1. Track successful transforms; bail out if hit rate gets too low.
    632 // 2. Only enable at -O3 or some other non-default optimization level.
    633 // 3. Pre-screen pattern candidates here: if an operand of the previous
    634 //    instruction is known to not increase the critical path, then don't match
    635 //    that pattern.
    636 bool TargetInstrInfo::getMachineCombinerPatterns(
    637     MachineInstr &Root,
    638     SmallVectorImpl<MachineCombinerPattern> &Patterns) const {
    639   bool Commute;
    640   if (isReassociationCandidate(Root, Commute)) {
    641     // We found a sequence of instructions that may be suitable for a
    642     // reassociation of operands to increase ILP. Specify each commutation
    643     // possibility for the Prev instruction in the sequence and let the
    644     // machine combiner decide if changing the operands is worthwhile.
    645     if (Commute) {
    646       Patterns.push_back(MachineCombinerPattern::REASSOC_AX_YB);
    647       Patterns.push_back(MachineCombinerPattern::REASSOC_XA_YB);
    648     } else {
    649       Patterns.push_back(MachineCombinerPattern::REASSOC_AX_BY);
    650       Patterns.push_back(MachineCombinerPattern::REASSOC_XA_BY);
    651     }
    652     return true;
    653   }
    654 
    655   return false;
    656 }
    657 /// Return true when a code sequence can improve loop throughput.
    658 bool
    659 TargetInstrInfo::isThroughputPattern(MachineCombinerPattern Pattern) const {
    660   return false;
    661 }
    662 /// Attempt the reassociation transformation to reduce critical path length.
    663 /// See the above comments before getMachineCombinerPatterns().
    664 void TargetInstrInfo::reassociateOps(
    665     MachineInstr &Root, MachineInstr &Prev,
    666     MachineCombinerPattern Pattern,
    667     SmallVectorImpl<MachineInstr *> &InsInstrs,
    668     SmallVectorImpl<MachineInstr *> &DelInstrs,
    669     DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const {
    670   MachineFunction *MF = Root.getParent()->getParent();
    671   MachineRegisterInfo &MRI = MF->getRegInfo();
    672   const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
    673   const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
    674   const TargetRegisterClass *RC = Root.getRegClassConstraint(0, TII, TRI);
    675 
    676   // This array encodes the operand index for each parameter because the
    677   // operands may be commuted. Each row corresponds to a pattern value,
    678   // and each column specifies the index of A, B, X, Y.
    679   unsigned OpIdx[4][4] = {
    680     { 1, 1, 2, 2 },
    681     { 1, 2, 2, 1 },
    682     { 2, 1, 1, 2 },
    683     { 2, 2, 1, 1 }
    684   };
    685 
    686   int Row;
    687   switch (Pattern) {
    688   case MachineCombinerPattern::REASSOC_AX_BY: Row = 0; break;
    689   case MachineCombinerPattern::REASSOC_AX_YB: Row = 1; break;
    690   case MachineCombinerPattern::REASSOC_XA_BY: Row = 2; break;
    691   case MachineCombinerPattern::REASSOC_XA_YB: Row = 3; break;
    692   default: llvm_unreachable("unexpected MachineCombinerPattern");
    693   }
    694 
    695   MachineOperand &OpA = Prev.getOperand(OpIdx[Row][0]);
    696   MachineOperand &OpB = Root.getOperand(OpIdx[Row][1]);
    697   MachineOperand &OpX = Prev.getOperand(OpIdx[Row][2]);
    698   MachineOperand &OpY = Root.getOperand(OpIdx[Row][3]);
    699   MachineOperand &OpC = Root.getOperand(0);
    700 
    701   unsigned RegA = OpA.getReg();
    702   unsigned RegB = OpB.getReg();
    703   unsigned RegX = OpX.getReg();
    704   unsigned RegY = OpY.getReg();
    705   unsigned RegC = OpC.getReg();
    706 
    707   if (TargetRegisterInfo::isVirtualRegister(RegA))
    708     MRI.constrainRegClass(RegA, RC);
    709   if (TargetRegisterInfo::isVirtualRegister(RegB))
    710     MRI.constrainRegClass(RegB, RC);
    711   if (TargetRegisterInfo::isVirtualRegister(RegX))
    712     MRI.constrainRegClass(RegX, RC);
    713   if (TargetRegisterInfo::isVirtualRegister(RegY))
    714     MRI.constrainRegClass(RegY, RC);
    715   if (TargetRegisterInfo::isVirtualRegister(RegC))
    716     MRI.constrainRegClass(RegC, RC);
    717 
    718   // Create a new virtual register for the result of (X op Y) instead of
    719   // recycling RegB because the MachineCombiner's computation of the critical
    720   // path requires a new register definition rather than an existing one.
    721   unsigned NewVR = MRI.createVirtualRegister(RC);
    722   InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
    723 
    724   unsigned Opcode = Root.getOpcode();
    725   bool KillA = OpA.isKill();
    726   bool KillX = OpX.isKill();
    727   bool KillY = OpY.isKill();
    728 
    729   // Create new instructions for insertion.
    730   MachineInstrBuilder MIB1 =
    731       BuildMI(*MF, Prev.getDebugLoc(), TII->get(Opcode), NewVR)
    732           .addReg(RegX, getKillRegState(KillX))
    733           .addReg(RegY, getKillRegState(KillY));
    734   MachineInstrBuilder MIB2 =
    735       BuildMI(*MF, Root.getDebugLoc(), TII->get(Opcode), RegC)
    736           .addReg(RegA, getKillRegState(KillA))
    737           .addReg(NewVR, getKillRegState(true));
    738 
    739   setSpecialOperandAttr(Root, Prev, *MIB1, *MIB2);
    740 
    741   // Record new instructions for insertion and old instructions for deletion.
    742   InsInstrs.push_back(MIB1);
    743   InsInstrs.push_back(MIB2);
    744   DelInstrs.push_back(&Prev);
    745   DelInstrs.push_back(&Root);
    746 }
    747 
    748 void TargetInstrInfo::genAlternativeCodeSequence(
    749     MachineInstr &Root, MachineCombinerPattern Pattern,
    750     SmallVectorImpl<MachineInstr *> &InsInstrs,
    751     SmallVectorImpl<MachineInstr *> &DelInstrs,
    752     DenseMap<unsigned, unsigned> &InstIdxForVirtReg) const {
    753   MachineRegisterInfo &MRI = Root.getParent()->getParent()->getRegInfo();
    754 
    755   // Select the previous instruction in the sequence based on the input pattern.
    756   MachineInstr *Prev = nullptr;
    757   switch (Pattern) {
    758   case MachineCombinerPattern::REASSOC_AX_BY:
    759   case MachineCombinerPattern::REASSOC_XA_BY:
    760     Prev = MRI.getUniqueVRegDef(Root.getOperand(1).getReg());
    761     break;
    762   case MachineCombinerPattern::REASSOC_AX_YB:
    763   case MachineCombinerPattern::REASSOC_XA_YB:
    764     Prev = MRI.getUniqueVRegDef(Root.getOperand(2).getReg());
    765     break;
    766   default:
    767     break;
    768   }
    769 
    770   assert(Prev && "Unknown pattern for machine combiner");
    771 
    772   reassociateOps(Root, *Prev, Pattern, InsInstrs, DelInstrs, InstIdxForVirtReg);
    773 }
    774 
    775 /// foldMemoryOperand - Same as the previous version except it allows folding
    776 /// of any load and store from / to any address, not just from a specific
    777 /// stack slot.
    778 MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI,
    779                                                  ArrayRef<unsigned> Ops,
    780                                                  MachineInstr &LoadMI,
    781                                                  LiveIntervals *LIS) const {
    782   assert(LoadMI.canFoldAsLoad() && "LoadMI isn't foldable!");
    783 #ifndef NDEBUG
    784   for (unsigned i = 0, e = Ops.size(); i != e; ++i)
    785     assert(MI.getOperand(Ops[i]).isUse() && "Folding load into def!");
    786 #endif
    787   MachineBasicBlock &MBB = *MI.getParent();
    788   MachineFunction &MF = *MBB.getParent();
    789 
    790   // Ask the target to do the actual folding.
    791   MachineInstr *NewMI = nullptr;
    792   int FrameIndex = 0;
    793 
    794   if ((MI.getOpcode() == TargetOpcode::STACKMAP ||
    795        MI.getOpcode() == TargetOpcode::PATCHPOINT) &&
    796       isLoadFromStackSlot(LoadMI, FrameIndex)) {
    797     // Fold stackmap/patchpoint.
    798     NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this);
    799     if (NewMI)
    800       NewMI = &*MBB.insert(MI, NewMI);
    801   } else {
    802     // Ask the target to do the actual folding.
    803     NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, LoadMI, LIS);
    804   }
    805 
    806   if (!NewMI) return nullptr;
    807 
    808   // Copy the memoperands from the load to the folded instruction.
    809   if (MI.memoperands_empty()) {
    810     NewMI->setMemRefs(LoadMI.memoperands_begin(), LoadMI.memoperands_end());
    811   }
    812   else {
    813     // Handle the rare case of folding multiple loads.
    814     NewMI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
    815     for (MachineInstr::mmo_iterator I = LoadMI.memoperands_begin(),
    816                                     E = LoadMI.memoperands_end();
    817          I != E; ++I) {
    818       NewMI->addMemOperand(MF, *I);
    819     }
    820   }
    821   return NewMI;
    822 }
    823 
    824 bool TargetInstrInfo::isReallyTriviallyReMaterializableGeneric(
    825     const MachineInstr &MI, AliasAnalysis *AA) const {
    826   const MachineFunction &MF = *MI.getParent()->getParent();
    827   const MachineRegisterInfo &MRI = MF.getRegInfo();
    828 
    829   // Remat clients assume operand 0 is the defined register.
    830   if (!MI.getNumOperands() || !MI.getOperand(0).isReg())
    831     return false;
    832   unsigned DefReg = MI.getOperand(0).getReg();
    833 
    834   // A sub-register definition can only be rematerialized if the instruction
    835   // doesn't read the other parts of the register.  Otherwise it is really a
    836   // read-modify-write operation on the full virtual register which cannot be
    837   // moved safely.
    838   if (TargetRegisterInfo::isVirtualRegister(DefReg) &&
    839       MI.getOperand(0).getSubReg() && MI.readsVirtualRegister(DefReg))
    840     return false;
    841 
    842   // A load from a fixed stack slot can be rematerialized. This may be
    843   // redundant with subsequent checks, but it's target-independent,
    844   // simple, and a common case.
    845   int FrameIdx = 0;
    846   if (isLoadFromStackSlot(MI, FrameIdx) &&
    847       MF.getFrameInfo()->isImmutableObjectIndex(FrameIdx))
    848     return true;
    849 
    850   // Avoid instructions obviously unsafe for remat.
    851   if (MI.isNotDuplicable() || MI.mayStore() || MI.hasUnmodeledSideEffects())
    852     return false;
    853 
    854   // Don't remat inline asm. We have no idea how expensive it is
    855   // even if it's side effect free.
    856   if (MI.isInlineAsm())
    857     return false;
    858 
    859   // Avoid instructions which load from potentially varying memory.
    860   if (MI.mayLoad() && !MI.isInvariantLoad(AA))
    861     return false;
    862 
    863   // If any of the registers accessed are non-constant, conservatively assume
    864   // the instruction is not rematerializable.
    865   for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
    866     const MachineOperand &MO = MI.getOperand(i);
    867     if (!MO.isReg()) continue;
    868     unsigned Reg = MO.getReg();
    869     if (Reg == 0)
    870       continue;
    871 
    872     // Check for a well-behaved physical register.
    873     if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
    874       if (MO.isUse()) {
    875         // If the physreg has no defs anywhere, it's just an ambient register
    876         // and we can freely move its uses. Alternatively, if it's allocatable,
    877         // it could get allocated to something with a def during allocation.
    878         if (!MRI.isConstantPhysReg(Reg, MF))
    879           return false;
    880       } else {
    881         // A physreg def. We can't remat it.
    882         return false;
    883       }
    884       continue;
    885     }
    886 
    887     // Only allow one virtual-register def.  There may be multiple defs of the
    888     // same virtual register, though.
    889     if (MO.isDef() && Reg != DefReg)
    890       return false;
    891 
    892     // Don't allow any virtual-register uses. Rematting an instruction with
    893     // virtual register uses would length the live ranges of the uses, which
    894     // is not necessarily a good idea, certainly not "trivial".
    895     if (MO.isUse())
    896       return false;
    897   }
    898 
    899   // Everything checked out.
    900   return true;
    901 }
    902 
    903 int TargetInstrInfo::getSPAdjust(const MachineInstr &MI) const {
    904   const MachineFunction *MF = MI.getParent()->getParent();
    905   const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
    906   bool StackGrowsDown =
    907     TFI->getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown;
    908 
    909   unsigned FrameSetupOpcode = getCallFrameSetupOpcode();
    910   unsigned FrameDestroyOpcode = getCallFrameDestroyOpcode();
    911 
    912   if (MI.getOpcode() != FrameSetupOpcode &&
    913       MI.getOpcode() != FrameDestroyOpcode)
    914     return 0;
    915 
    916   int SPAdj = MI.getOperand(0).getImm();
    917   SPAdj = TFI->alignSPAdjust(SPAdj);
    918 
    919   if ((!StackGrowsDown && MI.getOpcode() == FrameSetupOpcode) ||
    920       (StackGrowsDown && MI.getOpcode() == FrameDestroyOpcode))
    921     SPAdj = -SPAdj;
    922 
    923   return SPAdj;
    924 }
    925 
    926 /// isSchedulingBoundary - Test if the given instruction should be
    927 /// considered a scheduling boundary. This primarily includes labels
    928 /// and terminators.
    929 bool TargetInstrInfo::isSchedulingBoundary(const MachineInstr &MI,
    930                                            const MachineBasicBlock *MBB,
    931                                            const MachineFunction &MF) const {
    932   // Terminators and labels can't be scheduled around.
    933   if (MI.isTerminator() || MI.isPosition())
    934     return true;
    935 
    936   // Don't attempt to schedule around any instruction that defines
    937   // a stack-oriented pointer, as it's unlikely to be profitable. This
    938   // saves compile time, because it doesn't require every single
    939   // stack slot reference to depend on the instruction that does the
    940   // modification.
    941   const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering();
    942   const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
    943   return MI.modifiesRegister(TLI.getStackPointerRegisterToSaveRestore(), TRI);
    944 }
    945 
    946 // Provide a global flag for disabling the PreRA hazard recognizer that targets
    947 // may choose to honor.
    948 bool TargetInstrInfo::usePreRAHazardRecognizer() const {
    949   return !DisableHazardRecognizer;
    950 }
    951 
    952 // Default implementation of CreateTargetRAHazardRecognizer.
    953 ScheduleHazardRecognizer *TargetInstrInfo::
    954 CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI,
    955                              const ScheduleDAG *DAG) const {
    956   // Dummy hazard recognizer allows all instructions to issue.
    957   return new ScheduleHazardRecognizer();
    958 }
    959 
    960 // Default implementation of CreateTargetMIHazardRecognizer.
    961 ScheduleHazardRecognizer *TargetInstrInfo::
    962 CreateTargetMIHazardRecognizer(const InstrItineraryData *II,
    963                                const ScheduleDAG *DAG) const {
    964   return (ScheduleHazardRecognizer *)
    965     new ScoreboardHazardRecognizer(II, DAG, "misched");
    966 }
    967 
    968 // Default implementation of CreateTargetPostRAHazardRecognizer.
    969 ScheduleHazardRecognizer *TargetInstrInfo::
    970 CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
    971                                    const ScheduleDAG *DAG) const {
    972   return (ScheduleHazardRecognizer *)
    973     new ScoreboardHazardRecognizer(II, DAG, "post-RA-sched");
    974 }
    975 
    976 //===----------------------------------------------------------------------===//
    977 //  SelectionDAG latency interface.
    978 //===----------------------------------------------------------------------===//
    979 
    980 int
    981 TargetInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
    982                                    SDNode *DefNode, unsigned DefIdx,
    983                                    SDNode *UseNode, unsigned UseIdx) const {
    984   if (!ItinData || ItinData->isEmpty())
    985     return -1;
    986 
    987   if (!DefNode->isMachineOpcode())
    988     return -1;
    989 
    990   unsigned DefClass = get(DefNode->getMachineOpcode()).getSchedClass();
    991   if (!UseNode->isMachineOpcode())
    992     return ItinData->getOperandCycle(DefClass, DefIdx);
    993   unsigned UseClass = get(UseNode->getMachineOpcode()).getSchedClass();
    994   return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
    995 }
    996 
    997 int TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
    998                                      SDNode *N) const {
    999   if (!ItinData || ItinData->isEmpty())
   1000     return 1;
   1001 
   1002   if (!N->isMachineOpcode())
   1003     return 1;
   1004 
   1005   return ItinData->getStageLatency(get(N->getMachineOpcode()).getSchedClass());
   1006 }
   1007 
   1008 //===----------------------------------------------------------------------===//
   1009 //  MachineInstr latency interface.
   1010 //===----------------------------------------------------------------------===//
   1011 
   1012 unsigned TargetInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData,
   1013                                          const MachineInstr &MI) const {
   1014   if (!ItinData || ItinData->isEmpty())
   1015     return 1;
   1016 
   1017   unsigned Class = MI.getDesc().getSchedClass();
   1018   int UOps = ItinData->Itineraries[Class].NumMicroOps;
   1019   if (UOps >= 0)
   1020     return UOps;
   1021 
   1022   // The # of u-ops is dynamically determined. The specific target should
   1023   // override this function to return the right number.
   1024   return 1;
   1025 }
   1026 
   1027 /// Return the default expected latency for a def based on it's opcode.
   1028 unsigned TargetInstrInfo::defaultDefLatency(const MCSchedModel &SchedModel,
   1029                                             const MachineInstr &DefMI) const {
   1030   if (DefMI.isTransient())
   1031     return 0;
   1032   if (DefMI.mayLoad())
   1033     return SchedModel.LoadLatency;
   1034   if (isHighLatencyDef(DefMI.getOpcode()))
   1035     return SchedModel.HighLatency;
   1036   return 1;
   1037 }
   1038 
   1039 unsigned TargetInstrInfo::getPredicationCost(const MachineInstr &) const {
   1040   return 0;
   1041 }
   1042 
   1043 unsigned TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
   1044                                           const MachineInstr &MI,
   1045                                           unsigned *PredCost) const {
   1046   // Default to one cycle for no itinerary. However, an "empty" itinerary may
   1047   // still have a MinLatency property, which getStageLatency checks.
   1048   if (!ItinData)
   1049     return MI.mayLoad() ? 2 : 1;
   1050 
   1051   return ItinData->getStageLatency(MI.getDesc().getSchedClass());
   1052 }
   1053 
   1054 bool TargetInstrInfo::hasLowDefLatency(const TargetSchedModel &SchedModel,
   1055                                        const MachineInstr &DefMI,
   1056                                        unsigned DefIdx) const {
   1057   const InstrItineraryData *ItinData = SchedModel.getInstrItineraries();
   1058   if (!ItinData || ItinData->isEmpty())
   1059     return false;
   1060 
   1061   unsigned DefClass = DefMI.getDesc().getSchedClass();
   1062   int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx);
   1063   return (DefCycle != -1 && DefCycle <= 1);
   1064 }
   1065 
   1066 /// Both DefMI and UseMI must be valid.  By default, call directly to the
   1067 /// itinerary. This may be overriden by the target.
   1068 int TargetInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
   1069                                        const MachineInstr &DefMI,
   1070                                        unsigned DefIdx,
   1071                                        const MachineInstr &UseMI,
   1072                                        unsigned UseIdx) const {
   1073   unsigned DefClass = DefMI.getDesc().getSchedClass();
   1074   unsigned UseClass = UseMI.getDesc().getSchedClass();
   1075   return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
   1076 }
   1077 
   1078 /// If we can determine the operand latency from the def only, without itinerary
   1079 /// lookup, do so. Otherwise return -1.
   1080 int TargetInstrInfo::computeDefOperandLatency(
   1081     const InstrItineraryData *ItinData, const MachineInstr &DefMI) const {
   1082 
   1083   // Let the target hook getInstrLatency handle missing itineraries.
   1084   if (!ItinData)
   1085     return getInstrLatency(ItinData, DefMI);
   1086 
   1087   if(ItinData->isEmpty())
   1088     return defaultDefLatency(ItinData->SchedModel, DefMI);
   1089 
   1090   // ...operand lookup required
   1091   return -1;
   1092 }
   1093 
   1094 unsigned TargetInstrInfo::computeOperandLatency(
   1095     const InstrItineraryData *ItinData, const MachineInstr &DefMI,
   1096     unsigned DefIdx, const MachineInstr *UseMI, unsigned UseIdx) const {
   1097 
   1098   int DefLatency = computeDefOperandLatency(ItinData, DefMI);
   1099   if (DefLatency >= 0)
   1100     return DefLatency;
   1101 
   1102   assert(ItinData && !ItinData->isEmpty() && "computeDefOperandLatency fail");
   1103 
   1104   int OperLatency = 0;
   1105   if (UseMI)
   1106     OperLatency = getOperandLatency(ItinData, DefMI, DefIdx, *UseMI, UseIdx);
   1107   else {
   1108     unsigned DefClass = DefMI.getDesc().getSchedClass();
   1109     OperLatency = ItinData->getOperandCycle(DefClass, DefIdx);
   1110   }
   1111   if (OperLatency >= 0)
   1112     return OperLatency;
   1113 
   1114   // No operand latency was found.
   1115   unsigned InstrLatency = getInstrLatency(ItinData, DefMI);
   1116 
   1117   // Expected latency is the max of the stage latency and itinerary props.
   1118   InstrLatency = std::max(InstrLatency,
   1119                           defaultDefLatency(ItinData->SchedModel, DefMI));
   1120   return InstrLatency;
   1121 }
   1122 
   1123 bool TargetInstrInfo::getRegSequenceInputs(
   1124     const MachineInstr &MI, unsigned DefIdx,
   1125     SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const {
   1126   assert((MI.isRegSequence() ||
   1127           MI.isRegSequenceLike()) && "Instruction do not have the proper type");
   1128 
   1129   if (!MI.isRegSequence())
   1130     return getRegSequenceLikeInputs(MI, DefIdx, InputRegs);
   1131 
   1132   // We are looking at:
   1133   // Def = REG_SEQUENCE v0, sub0, v1, sub1, ...
   1134   assert(DefIdx == 0 && "REG_SEQUENCE only has one def");
   1135   for (unsigned OpIdx = 1, EndOpIdx = MI.getNumOperands(); OpIdx != EndOpIdx;
   1136        OpIdx += 2) {
   1137     const MachineOperand &MOReg = MI.getOperand(OpIdx);
   1138     const MachineOperand &MOSubIdx = MI.getOperand(OpIdx + 1);
   1139     assert(MOSubIdx.isImm() &&
   1140            "One of the subindex of the reg_sequence is not an immediate");
   1141     // Record Reg:SubReg, SubIdx.
   1142     InputRegs.push_back(RegSubRegPairAndIdx(MOReg.getReg(), MOReg.getSubReg(),
   1143                                             (unsigned)MOSubIdx.getImm()));
   1144   }
   1145   return true;
   1146 }
   1147 
   1148 bool TargetInstrInfo::getExtractSubregInputs(
   1149     const MachineInstr &MI, unsigned DefIdx,
   1150     RegSubRegPairAndIdx &InputReg) const {
   1151   assert((MI.isExtractSubreg() ||
   1152       MI.isExtractSubregLike()) && "Instruction do not have the proper type");
   1153 
   1154   if (!MI.isExtractSubreg())
   1155     return getExtractSubregLikeInputs(MI, DefIdx, InputReg);
   1156 
   1157   // We are looking at:
   1158   // Def = EXTRACT_SUBREG v0.sub1, sub0.
   1159   assert(DefIdx == 0 && "EXTRACT_SUBREG only has one def");
   1160   const MachineOperand &MOReg = MI.getOperand(1);
   1161   const MachineOperand &MOSubIdx = MI.getOperand(2);
   1162   assert(MOSubIdx.isImm() &&
   1163          "The subindex of the extract_subreg is not an immediate");
   1164 
   1165   InputReg.Reg = MOReg.getReg();
   1166   InputReg.SubReg = MOReg.getSubReg();
   1167   InputReg.SubIdx = (unsigned)MOSubIdx.getImm();
   1168   return true;
   1169 }
   1170 
   1171 bool TargetInstrInfo::getInsertSubregInputs(
   1172     const MachineInstr &MI, unsigned DefIdx,
   1173     RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const {
   1174   assert((MI.isInsertSubreg() ||
   1175       MI.isInsertSubregLike()) && "Instruction do not have the proper type");
   1176 
   1177   if (!MI.isInsertSubreg())
   1178     return getInsertSubregLikeInputs(MI, DefIdx, BaseReg, InsertedReg);
   1179 
   1180   // We are looking at:
   1181   // Def = INSERT_SEQUENCE v0, v1, sub0.
   1182   assert(DefIdx == 0 && "INSERT_SUBREG only has one def");
   1183   const MachineOperand &MOBaseReg = MI.getOperand(1);
   1184   const MachineOperand &MOInsertedReg = MI.getOperand(2);
   1185   const MachineOperand &MOSubIdx = MI.getOperand(3);
   1186   assert(MOSubIdx.isImm() &&
   1187          "One of the subindex of the reg_sequence is not an immediate");
   1188   BaseReg.Reg = MOBaseReg.getReg();
   1189   BaseReg.SubReg = MOBaseReg.getSubReg();
   1190 
   1191   InsertedReg.Reg = MOInsertedReg.getReg();
   1192   InsertedReg.SubReg = MOInsertedReg.getSubReg();
   1193   InsertedReg.SubIdx = (unsigned)MOSubIdx.getImm();
   1194   return true;
   1195 }
   1196