Home | History | Annotate | Download | only in CodeGen
      1 //===-- TargetInstrInfo.cpp - Target Instruction Information --------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file implements the TargetInstrInfo class.
     11 //
     12 //===----------------------------------------------------------------------===//
     13 
     14 #include "llvm/Target/TargetInstrInfo.h"
     15 #include "llvm/CodeGen/MachineFrameInfo.h"
     16 #include "llvm/CodeGen/MachineInstrBuilder.h"
     17 #include "llvm/CodeGen/MachineMemOperand.h"
     18 #include "llvm/CodeGen/MachineRegisterInfo.h"
     19 #include "llvm/CodeGen/PseudoSourceValue.h"
     20 #include "llvm/CodeGen/ScoreboardHazardRecognizer.h"
     21 #include "llvm/CodeGen/StackMaps.h"
     22 #include "llvm/CodeGen/TargetSchedule.h"
     23 #include "llvm/IR/DataLayout.h"
     24 #include "llvm/MC/MCAsmInfo.h"
     25 #include "llvm/MC/MCInstrItineraries.h"
     26 #include "llvm/Support/CommandLine.h"
     27 #include "llvm/Support/ErrorHandling.h"
     28 #include "llvm/Support/raw_ostream.h"
     29 #include "llvm/Target/TargetFrameLowering.h"
     30 #include "llvm/Target/TargetLowering.h"
     31 #include "llvm/Target/TargetMachine.h"
     32 #include "llvm/Target/TargetRegisterInfo.h"
     33 #include <cctype>
     34 using namespace llvm;
     35 
     36 static cl::opt<bool> DisableHazardRecognizer(
     37   "disable-sched-hazard", cl::Hidden, cl::init(false),
     38   cl::desc("Disable hazard detection during preRA scheduling"));
     39 
     40 TargetInstrInfo::~TargetInstrInfo() {
     41 }
     42 
     43 const TargetRegisterClass*
     44 TargetInstrInfo::getRegClass(const MCInstrDesc &MCID, unsigned OpNum,
     45                              const TargetRegisterInfo *TRI,
     46                              const MachineFunction &MF) const {
     47   if (OpNum >= MCID.getNumOperands())
     48     return nullptr;
     49 
     50   short RegClass = MCID.OpInfo[OpNum].RegClass;
     51   if (MCID.OpInfo[OpNum].isLookupPtrRegClass())
     52     return TRI->getPointerRegClass(MF, RegClass);
     53 
     54   // Instructions like INSERT_SUBREG do not have fixed register classes.
     55   if (RegClass < 0)
     56     return nullptr;
     57 
     58   // Otherwise just look it up normally.
     59   return TRI->getRegClass(RegClass);
     60 }
     61 
     62 /// insertNoop - Insert a noop into the instruction stream at the specified
     63 /// point.
     64 void TargetInstrInfo::insertNoop(MachineBasicBlock &MBB,
     65                                  MachineBasicBlock::iterator MI) const {
     66   llvm_unreachable("Target didn't implement insertNoop!");
     67 }
     68 
     69 /// Measure the specified inline asm to determine an approximation of its
     70 /// length.
     71 /// Comments (which run till the next SeparatorString or newline) do not
     72 /// count as an instruction.
     73 /// Any other non-whitespace text is considered an instruction, with
     74 /// multiple instructions separated by SeparatorString or newlines.
     75 /// Variable-length instructions are not handled here; this function
     76 /// may be overloaded in the target code to do that.
     77 unsigned TargetInstrInfo::getInlineAsmLength(const char *Str,
     78                                              const MCAsmInfo &MAI) const {
     79 
     80 
     81   // Count the number of instructions in the asm.
     82   bool atInsnStart = true;
     83   unsigned Length = 0;
     84   for (; *Str; ++Str) {
     85     if (*Str == '\n' || strncmp(Str, MAI.getSeparatorString(),
     86                                 strlen(MAI.getSeparatorString())) == 0)
     87       atInsnStart = true;
     88     if (atInsnStart && !std::isspace(static_cast<unsigned char>(*Str))) {
     89       Length += MAI.getMaxInstLength();
     90       atInsnStart = false;
     91     }
     92     if (atInsnStart && strncmp(Str, MAI.getCommentString(),
     93                                strlen(MAI.getCommentString())) == 0)
     94       atInsnStart = false;
     95   }
     96 
     97   return Length;
     98 }
     99 
    100 /// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
    101 /// after it, replacing it with an unconditional branch to NewDest.
    102 void
    103 TargetInstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
    104                                          MachineBasicBlock *NewDest) const {
    105   MachineBasicBlock *MBB = Tail->getParent();
    106 
    107   // Remove all the old successors of MBB from the CFG.
    108   while (!MBB->succ_empty())
    109     MBB->removeSuccessor(MBB->succ_begin());
    110 
    111   // Remove all the dead instructions from the end of MBB.
    112   MBB->erase(Tail, MBB->end());
    113 
    114   // If MBB isn't immediately before MBB, insert a branch to it.
    115   if (++MachineFunction::iterator(MBB) != MachineFunction::iterator(NewDest))
    116     InsertBranch(*MBB, NewDest, nullptr, SmallVector<MachineOperand, 0>(),
    117                  Tail->getDebugLoc());
    118   MBB->addSuccessor(NewDest);
    119 }
    120 
    121 MachineInstr *TargetInstrInfo::commuteInstructionImpl(MachineInstr *MI,
    122                                                       bool NewMI,
    123                                                       unsigned Idx1,
    124                                                       unsigned Idx2) const {
    125   const MCInstrDesc &MCID = MI->getDesc();
    126   bool HasDef = MCID.getNumDefs();
    127   if (HasDef && !MI->getOperand(0).isReg())
    128     // No idea how to commute this instruction. Target should implement its own.
    129     return nullptr;
    130 
    131   unsigned CommutableOpIdx1 = Idx1; (void)CommutableOpIdx1;
    132   unsigned CommutableOpIdx2 = Idx2; (void)CommutableOpIdx2;
    133   assert(findCommutedOpIndices(MI, CommutableOpIdx1, CommutableOpIdx2) &&
    134          CommutableOpIdx1 == Idx1 && CommutableOpIdx2 == Idx2 &&
    135          "TargetInstrInfo::CommuteInstructionImpl(): not commutable operands.");
    136   assert(MI->getOperand(Idx1).isReg() && MI->getOperand(Idx2).isReg() &&
    137          "This only knows how to commute register operands so far");
    138 
    139   unsigned Reg0 = HasDef ? MI->getOperand(0).getReg() : 0;
    140   unsigned Reg1 = MI->getOperand(Idx1).getReg();
    141   unsigned Reg2 = MI->getOperand(Idx2).getReg();
    142   unsigned SubReg0 = HasDef ? MI->getOperand(0).getSubReg() : 0;
    143   unsigned SubReg1 = MI->getOperand(Idx1).getSubReg();
    144   unsigned SubReg2 = MI->getOperand(Idx2).getSubReg();
    145   bool Reg1IsKill = MI->getOperand(Idx1).isKill();
    146   bool Reg2IsKill = MI->getOperand(Idx2).isKill();
    147   bool Reg1IsUndef = MI->getOperand(Idx1).isUndef();
    148   bool Reg2IsUndef = MI->getOperand(Idx2).isUndef();
    149   bool Reg1IsInternal = MI->getOperand(Idx1).isInternalRead();
    150   bool Reg2IsInternal = MI->getOperand(Idx2).isInternalRead();
    151   // If destination is tied to either of the commuted source register, then
    152   // it must be updated.
    153   if (HasDef && Reg0 == Reg1 &&
    154       MI->getDesc().getOperandConstraint(Idx1, MCOI::TIED_TO) == 0) {
    155     Reg2IsKill = false;
    156     Reg0 = Reg2;
    157     SubReg0 = SubReg2;
    158   } else if (HasDef && Reg0 == Reg2 &&
    159              MI->getDesc().getOperandConstraint(Idx2, MCOI::TIED_TO) == 0) {
    160     Reg1IsKill = false;
    161     Reg0 = Reg1;
    162     SubReg0 = SubReg1;
    163   }
    164 
    165   if (NewMI) {
    166     // Create a new instruction.
    167     MachineFunction &MF = *MI->getParent()->getParent();
    168     MI = MF.CloneMachineInstr(MI);
    169   }
    170 
    171   if (HasDef) {
    172     MI->getOperand(0).setReg(Reg0);
    173     MI->getOperand(0).setSubReg(SubReg0);
    174   }
    175   MI->getOperand(Idx2).setReg(Reg1);
    176   MI->getOperand(Idx1).setReg(Reg2);
    177   MI->getOperand(Idx2).setSubReg(SubReg1);
    178   MI->getOperand(Idx1).setSubReg(SubReg2);
    179   MI->getOperand(Idx2).setIsKill(Reg1IsKill);
    180   MI->getOperand(Idx1).setIsKill(Reg2IsKill);
    181   MI->getOperand(Idx2).setIsUndef(Reg1IsUndef);
    182   MI->getOperand(Idx1).setIsUndef(Reg2IsUndef);
    183   MI->getOperand(Idx2).setIsInternalRead(Reg1IsInternal);
    184   MI->getOperand(Idx1).setIsInternalRead(Reg2IsInternal);
    185   return MI;
    186 }
    187 
    188 MachineInstr *TargetInstrInfo::commuteInstruction(MachineInstr *MI,
    189                                                   bool NewMI,
    190                                                   unsigned OpIdx1,
    191                                                   unsigned OpIdx2) const {
    192   // If OpIdx1 or OpIdx2 is not specified, then this method is free to choose
    193   // any commutable operand, which is done in findCommutedOpIndices() method
    194   // called below.
    195   if ((OpIdx1 == CommuteAnyOperandIndex || OpIdx2 == CommuteAnyOperandIndex) &&
    196       !findCommutedOpIndices(MI, OpIdx1, OpIdx2)) {
    197     assert(MI->isCommutable() &&
    198            "Precondition violation: MI must be commutable.");
    199     return nullptr;
    200   }
    201   return commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
    202 }
    203 
    204 bool TargetInstrInfo::fixCommutedOpIndices(unsigned &ResultIdx1,
    205                                            unsigned &ResultIdx2,
    206                                            unsigned CommutableOpIdx1,
    207                                            unsigned CommutableOpIdx2) {
    208   if (ResultIdx1 == CommuteAnyOperandIndex &&
    209       ResultIdx2 == CommuteAnyOperandIndex) {
    210     ResultIdx1 = CommutableOpIdx1;
    211     ResultIdx2 = CommutableOpIdx2;
    212   } else if (ResultIdx1 == CommuteAnyOperandIndex) {
    213     if (ResultIdx2 == CommutableOpIdx1)
    214       ResultIdx1 = CommutableOpIdx2;
    215     else if (ResultIdx2 == CommutableOpIdx2)
    216       ResultIdx1 = CommutableOpIdx1;
    217     else
    218       return false;
    219   } else if (ResultIdx2 == CommuteAnyOperandIndex) {
    220     if (ResultIdx1 == CommutableOpIdx1)
    221       ResultIdx2 = CommutableOpIdx2;
    222     else if (ResultIdx1 == CommutableOpIdx2)
    223       ResultIdx2 = CommutableOpIdx1;
    224     else
    225       return false;
    226   } else
    227     // Check that the result operand indices match the given commutable
    228     // operand indices.
    229     return (ResultIdx1 == CommutableOpIdx1 && ResultIdx2 == CommutableOpIdx2) ||
    230            (ResultIdx1 == CommutableOpIdx2 && ResultIdx2 == CommutableOpIdx1);
    231 
    232   return true;
    233 }
    234 
    235 bool TargetInstrInfo::findCommutedOpIndices(MachineInstr *MI,
    236                                             unsigned &SrcOpIdx1,
    237                                             unsigned &SrcOpIdx2) const {
    238   assert(!MI->isBundle() &&
    239          "TargetInstrInfo::findCommutedOpIndices() can't handle bundles");
    240 
    241   const MCInstrDesc &MCID = MI->getDesc();
    242   if (!MCID.isCommutable())
    243     return false;
    244 
    245   // This assumes v0 = op v1, v2 and commuting would swap v1 and v2. If this
    246   // is not true, then the target must implement this.
    247   unsigned CommutableOpIdx1 = MCID.getNumDefs();
    248   unsigned CommutableOpIdx2 = CommutableOpIdx1 + 1;
    249   if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2,
    250                             CommutableOpIdx1, CommutableOpIdx2))
    251     return false;
    252 
    253   if (!MI->getOperand(SrcOpIdx1).isReg() ||
    254       !MI->getOperand(SrcOpIdx2).isReg())
    255     // No idea.
    256     return false;
    257   return true;
    258 }
    259 
    260 bool
    261 TargetInstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const {
    262   if (!MI->isTerminator()) return false;
    263 
    264   // Conditional branch is a special case.
    265   if (MI->isBranch() && !MI->isBarrier())
    266     return true;
    267   if (!MI->isPredicable())
    268     return true;
    269   return !isPredicated(MI);
    270 }
    271 
    272 bool TargetInstrInfo::PredicateInstruction(
    273     MachineInstr *MI, ArrayRef<MachineOperand> Pred) const {
    274   bool MadeChange = false;
    275 
    276   assert(!MI->isBundle() &&
    277          "TargetInstrInfo::PredicateInstruction() can't handle bundles");
    278 
    279   const MCInstrDesc &MCID = MI->getDesc();
    280   if (!MI->isPredicable())
    281     return false;
    282 
    283   for (unsigned j = 0, i = 0, e = MI->getNumOperands(); i != e; ++i) {
    284     if (MCID.OpInfo[i].isPredicate()) {
    285       MachineOperand &MO = MI->getOperand(i);
    286       if (MO.isReg()) {
    287         MO.setReg(Pred[j].getReg());
    288         MadeChange = true;
    289       } else if (MO.isImm()) {
    290         MO.setImm(Pred[j].getImm());
    291         MadeChange = true;
    292       } else if (MO.isMBB()) {
    293         MO.setMBB(Pred[j].getMBB());
    294         MadeChange = true;
    295       }
    296       ++j;
    297     }
    298   }
    299   return MadeChange;
    300 }
    301 
    302 bool TargetInstrInfo::hasLoadFromStackSlot(const MachineInstr *MI,
    303                                            const MachineMemOperand *&MMO,
    304                                            int &FrameIndex) const {
    305   for (MachineInstr::mmo_iterator o = MI->memoperands_begin(),
    306          oe = MI->memoperands_end();
    307        o != oe;
    308        ++o) {
    309     if ((*o)->isLoad()) {
    310       if (const FixedStackPseudoSourceValue *Value =
    311           dyn_cast_or_null<FixedStackPseudoSourceValue>(
    312               (*o)->getPseudoValue())) {
    313         FrameIndex = Value->getFrameIndex();
    314         MMO = *o;
    315         return true;
    316       }
    317     }
    318   }
    319   return false;
    320 }
    321 
    322 bool TargetInstrInfo::hasStoreToStackSlot(const MachineInstr *MI,
    323                                           const MachineMemOperand *&MMO,
    324                                           int &FrameIndex) const {
    325   for (MachineInstr::mmo_iterator o = MI->memoperands_begin(),
    326          oe = MI->memoperands_end();
    327        o != oe;
    328        ++o) {
    329     if ((*o)->isStore()) {
    330       if (const FixedStackPseudoSourceValue *Value =
    331           dyn_cast_or_null<FixedStackPseudoSourceValue>(
    332               (*o)->getPseudoValue())) {
    333         FrameIndex = Value->getFrameIndex();
    334         MMO = *o;
    335         return true;
    336       }
    337     }
    338   }
    339   return false;
    340 }
    341 
    342 bool TargetInstrInfo::getStackSlotRange(const TargetRegisterClass *RC,
    343                                         unsigned SubIdx, unsigned &Size,
    344                                         unsigned &Offset,
    345                                         const MachineFunction &MF) const {
    346   if (!SubIdx) {
    347     Size = RC->getSize();
    348     Offset = 0;
    349     return true;
    350   }
    351   const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
    352   unsigned BitSize = TRI->getSubRegIdxSize(SubIdx);
    353   // Convert bit size to byte size to be consistent with
    354   // MCRegisterClass::getSize().
    355   if (BitSize % 8)
    356     return false;
    357 
    358   int BitOffset = TRI->getSubRegIdxOffset(SubIdx);
    359   if (BitOffset < 0 || BitOffset % 8)
    360     return false;
    361 
    362   Size = BitSize /= 8;
    363   Offset = (unsigned)BitOffset / 8;
    364 
    365   assert(RC->getSize() >= (Offset + Size) && "bad subregister range");
    366 
    367   if (!MF.getDataLayout().isLittleEndian()) {
    368     Offset = RC->getSize() - (Offset + Size);
    369   }
    370   return true;
    371 }
    372 
    373 void TargetInstrInfo::reMaterialize(MachineBasicBlock &MBB,
    374                                     MachineBasicBlock::iterator I,
    375                                     unsigned DestReg,
    376                                     unsigned SubIdx,
    377                                     const MachineInstr *Orig,
    378                                     const TargetRegisterInfo &TRI) const {
    379   MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig);
    380   MI->substituteRegister(MI->getOperand(0).getReg(), DestReg, SubIdx, TRI);
    381   MBB.insert(I, MI);
    382 }
    383 
    384 bool
    385 TargetInstrInfo::produceSameValue(const MachineInstr *MI0,
    386                                   const MachineInstr *MI1,
    387                                   const MachineRegisterInfo *MRI) const {
    388   return MI0->isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs);
    389 }
    390 
    391 MachineInstr *TargetInstrInfo::duplicate(MachineInstr *Orig,
    392                                          MachineFunction &MF) const {
    393   assert(!Orig->isNotDuplicable() &&
    394          "Instruction cannot be duplicated");
    395   return MF.CloneMachineInstr(Orig);
    396 }
    397 
    398 // If the COPY instruction in MI can be folded to a stack operation, return
    399 // the register class to use.
    400 static const TargetRegisterClass *canFoldCopy(const MachineInstr *MI,
    401                                               unsigned FoldIdx) {
    402   assert(MI->isCopy() && "MI must be a COPY instruction");
    403   if (MI->getNumOperands() != 2)
    404     return nullptr;
    405   assert(FoldIdx<2 && "FoldIdx refers no nonexistent operand");
    406 
    407   const MachineOperand &FoldOp = MI->getOperand(FoldIdx);
    408   const MachineOperand &LiveOp = MI->getOperand(1-FoldIdx);
    409 
    410   if (FoldOp.getSubReg() || LiveOp.getSubReg())
    411     return nullptr;
    412 
    413   unsigned FoldReg = FoldOp.getReg();
    414   unsigned LiveReg = LiveOp.getReg();
    415 
    416   assert(TargetRegisterInfo::isVirtualRegister(FoldReg) &&
    417          "Cannot fold physregs");
    418 
    419   const MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
    420   const TargetRegisterClass *RC = MRI.getRegClass(FoldReg);
    421 
    422   if (TargetRegisterInfo::isPhysicalRegister(LiveOp.getReg()))
    423     return RC->contains(LiveOp.getReg()) ? RC : nullptr;
    424 
    425   if (RC->hasSubClassEq(MRI.getRegClass(LiveReg)))
    426     return RC;
    427 
    428   // FIXME: Allow folding when register classes are memory compatible.
    429   return nullptr;
    430 }
    431 
    432 void TargetInstrInfo::getNoopForMachoTarget(MCInst &NopInst) const {
    433   llvm_unreachable("Not a MachO target");
    434 }
    435 
    436 static MachineInstr *foldPatchpoint(MachineFunction &MF, MachineInstr *MI,
    437                                     ArrayRef<unsigned> Ops, int FrameIndex,
    438                                     const TargetInstrInfo &TII) {
    439   unsigned StartIdx = 0;
    440   switch (MI->getOpcode()) {
    441   case TargetOpcode::STACKMAP:
    442     StartIdx = 2; // Skip ID, nShadowBytes.
    443     break;
    444   case TargetOpcode::PATCHPOINT: {
    445     // For PatchPoint, the call args are not foldable.
    446     PatchPointOpers opers(MI);
    447     StartIdx = opers.getVarIdx();
    448     break;
    449   }
    450   default:
    451     llvm_unreachable("unexpected stackmap opcode");
    452   }
    453 
    454   // Return false if any operands requested for folding are not foldable (not
    455   // part of the stackmap's live values).
    456   for (unsigned Op : Ops) {
    457     if (Op < StartIdx)
    458       return nullptr;
    459   }
    460 
    461   MachineInstr *NewMI =
    462     MF.CreateMachineInstr(TII.get(MI->getOpcode()), MI->getDebugLoc(), true);
    463   MachineInstrBuilder MIB(MF, NewMI);
    464 
    465   // No need to fold return, the meta data, and function arguments
    466   for (unsigned i = 0; i < StartIdx; ++i)
    467     MIB.addOperand(MI->getOperand(i));
    468 
    469   for (unsigned i = StartIdx; i < MI->getNumOperands(); ++i) {
    470     MachineOperand &MO = MI->getOperand(i);
    471     if (std::find(Ops.begin(), Ops.end(), i) != Ops.end()) {
    472       unsigned SpillSize;
    473       unsigned SpillOffset;
    474       // Compute the spill slot size and offset.
    475       const TargetRegisterClass *RC =
    476         MF.getRegInfo().getRegClass(MO.getReg());
    477       bool Valid =
    478           TII.getStackSlotRange(RC, MO.getSubReg(), SpillSize, SpillOffset, MF);
    479       if (!Valid)
    480         report_fatal_error("cannot spill patchpoint subregister operand");
    481       MIB.addImm(StackMaps::IndirectMemRefOp);
    482       MIB.addImm(SpillSize);
    483       MIB.addFrameIndex(FrameIndex);
    484       MIB.addImm(SpillOffset);
    485     }
    486     else
    487       MIB.addOperand(MO);
    488   }
    489   return NewMI;
    490 }
    491 
    492 /// foldMemoryOperand - Attempt to fold a load or store of the specified stack
    493 /// slot into the specified machine instruction for the specified operand(s).
    494 /// If this is possible, a new instruction is returned with the specified
    495 /// operand folded, otherwise NULL is returned. The client is responsible for
    496 /// removing the old instruction and adding the new one in the instruction
    497 /// stream.
    498 MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
    499                                                  ArrayRef<unsigned> Ops,
    500                                                  int FI) const {
    501   unsigned Flags = 0;
    502   for (unsigned i = 0, e = Ops.size(); i != e; ++i)
    503     if (MI->getOperand(Ops[i]).isDef())
    504       Flags |= MachineMemOperand::MOStore;
    505     else
    506       Flags |= MachineMemOperand::MOLoad;
    507 
    508   MachineBasicBlock *MBB = MI->getParent();
    509   assert(MBB && "foldMemoryOperand needs an inserted instruction");
    510   MachineFunction &MF = *MBB->getParent();
    511 
    512   MachineInstr *NewMI = nullptr;
    513 
    514   if (MI->getOpcode() == TargetOpcode::STACKMAP ||
    515       MI->getOpcode() == TargetOpcode::PATCHPOINT) {
    516     // Fold stackmap/patchpoint.
    517     NewMI = foldPatchpoint(MF, MI, Ops, FI, *this);
    518     if (NewMI)
    519       MBB->insert(MI, NewMI);
    520   } else {
    521     // Ask the target to do the actual folding.
    522     NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, FI);
    523   }
    524 
    525   if (NewMI) {
    526     NewMI->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
    527     // Add a memory operand, foldMemoryOperandImpl doesn't do that.
    528     assert((!(Flags & MachineMemOperand::MOStore) ||
    529             NewMI->mayStore()) &&
    530            "Folded a def to a non-store!");
    531     assert((!(Flags & MachineMemOperand::MOLoad) ||
    532             NewMI->mayLoad()) &&
    533            "Folded a use to a non-load!");
    534     const MachineFrameInfo &MFI = *MF.getFrameInfo();
    535     assert(MFI.getObjectOffset(FI) != -1);
    536     MachineMemOperand *MMO = MF.getMachineMemOperand(
    537         MachinePointerInfo::getFixedStack(MF, FI), Flags, MFI.getObjectSize(FI),
    538         MFI.getObjectAlignment(FI));
    539     NewMI->addMemOperand(MF, MMO);
    540 
    541     return NewMI;
    542   }
    543 
    544   // Straight COPY may fold as load/store.
    545   if (!MI->isCopy() || Ops.size() != 1)
    546     return nullptr;
    547 
    548   const TargetRegisterClass *RC = canFoldCopy(MI, Ops[0]);
    549   if (!RC)
    550     return nullptr;
    551 
    552   const MachineOperand &MO = MI->getOperand(1-Ops[0]);
    553   MachineBasicBlock::iterator Pos = MI;
    554   const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
    555 
    556   if (Flags == MachineMemOperand::MOStore)
    557     storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, TRI);
    558   else
    559     loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, TRI);
    560   return --Pos;
    561 }
    562 
    563 bool TargetInstrInfo::hasReassociableOperands(
    564     const MachineInstr &Inst, const MachineBasicBlock *MBB) const {
    565   const MachineOperand &Op1 = Inst.getOperand(1);
    566   const MachineOperand &Op2 = Inst.getOperand(2);
    567   const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
    568 
    569   // We need virtual register definitions for the operands that we will
    570   // reassociate.
    571   MachineInstr *MI1 = nullptr;
    572   MachineInstr *MI2 = nullptr;
    573   if (Op1.isReg() && TargetRegisterInfo::isVirtualRegister(Op1.getReg()))
    574     MI1 = MRI.getUniqueVRegDef(Op1.getReg());
    575   if (Op2.isReg() && TargetRegisterInfo::isVirtualRegister(Op2.getReg()))
    576     MI2 = MRI.getUniqueVRegDef(Op2.getReg());
    577 
    578   // And they need to be in the trace (otherwise, they won't have a depth).
    579   return MI1 && MI2 && MI1->getParent() == MBB && MI2->getParent() == MBB;
    580 }
    581 
    582 bool TargetInstrInfo::hasReassociableSibling(const MachineInstr &Inst,
    583                                              bool &Commuted) const {
    584   const MachineBasicBlock *MBB = Inst.getParent();
    585   const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
    586   MachineInstr *MI1 = MRI.getUniqueVRegDef(Inst.getOperand(1).getReg());
    587   MachineInstr *MI2 = MRI.getUniqueVRegDef(Inst.getOperand(2).getReg());
    588   unsigned AssocOpcode = Inst.getOpcode();
    589 
    590   // If only one operand has the same opcode and it's the second source operand,
    591   // the operands must be commuted.
    592   Commuted = MI1->getOpcode() != AssocOpcode && MI2->getOpcode() == AssocOpcode;
    593   if (Commuted)
    594     std::swap(MI1, MI2);
    595 
    596   // 1. The previous instruction must be the same type as Inst.
    597   // 2. The previous instruction must have virtual register definitions for its
    598   //    operands in the same basic block as Inst.
    599   // 3. The previous instruction's result must only be used by Inst.
    600   return MI1->getOpcode() == AssocOpcode &&
    601          hasReassociableOperands(*MI1, MBB) &&
    602          MRI.hasOneNonDBGUse(MI1->getOperand(0).getReg());
    603 }
    604 
    605 // 1. The operation must be associative and commutative.
    606 // 2. The instruction must have virtual register definitions for its
    607 //    operands in the same basic block.
    608 // 3. The instruction must have a reassociable sibling.
    609 bool TargetInstrInfo::isReassociationCandidate(const MachineInstr &Inst,
    610                                                bool &Commuted) const {
    611   return isAssociativeAndCommutative(Inst) &&
    612          hasReassociableOperands(Inst, Inst.getParent()) &&
    613          hasReassociableSibling(Inst, Commuted);
    614 }
    615 
    616 // The concept of the reassociation pass is that these operations can benefit
    617 // from this kind of transformation:
    618 //
    619 // A = ? op ?
    620 // B = A op X (Prev)
    621 // C = B op Y (Root)
    622 // -->
    623 // A = ? op ?
    624 // B = X op Y
    625 // C = A op B
    626 //
    627 // breaking the dependency between A and B, allowing them to be executed in
    628 // parallel (or back-to-back in a pipeline) instead of depending on each other.
    629 
    630 // FIXME: This has the potential to be expensive (compile time) while not
    631 // improving the code at all. Some ways to limit the overhead:
    632 // 1. Track successful transforms; bail out if hit rate gets too low.
    633 // 2. Only enable at -O3 or some other non-default optimization level.
    634 // 3. Pre-screen pattern candidates here: if an operand of the previous
    635 //    instruction is known to not increase the critical path, then don't match
    636 //    that pattern.
    637 bool TargetInstrInfo::getMachineCombinerPatterns(
    638     MachineInstr &Root,
    639     SmallVectorImpl<MachineCombinerPattern> &Patterns) const {
    640 
    641   bool Commute;
    642   if (isReassociationCandidate(Root, Commute)) {
    643     // We found a sequence of instructions that may be suitable for a
    644     // reassociation of operands to increase ILP. Specify each commutation
    645     // possibility for the Prev instruction in the sequence and let the
    646     // machine combiner decide if changing the operands is worthwhile.
    647     if (Commute) {
    648       Patterns.push_back(MachineCombinerPattern::REASSOC_AX_YB);
    649       Patterns.push_back(MachineCombinerPattern::REASSOC_XA_YB);
    650     } else {
    651       Patterns.push_back(MachineCombinerPattern::REASSOC_AX_BY);
    652       Patterns.push_back(MachineCombinerPattern::REASSOC_XA_BY);
    653     }
    654     return true;
    655   }
    656 
    657   return false;
    658 }
    659 
    660 /// Attempt the reassociation transformation to reduce critical path length.
    661 /// See the above comments before getMachineCombinerPatterns().
    662 void TargetInstrInfo::reassociateOps(
    663     MachineInstr &Root, MachineInstr &Prev,
    664     MachineCombinerPattern Pattern,
    665     SmallVectorImpl<MachineInstr *> &InsInstrs,
    666     SmallVectorImpl<MachineInstr *> &DelInstrs,
    667     DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const {
    668   MachineFunction *MF = Root.getParent()->getParent();
    669   MachineRegisterInfo &MRI = MF->getRegInfo();
    670   const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
    671   const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
    672   const TargetRegisterClass *RC = Root.getRegClassConstraint(0, TII, TRI);
    673 
    674   // This array encodes the operand index for each parameter because the
    675   // operands may be commuted. Each row corresponds to a pattern value,
    676   // and each column specifies the index of A, B, X, Y.
    677   unsigned OpIdx[4][4] = {
    678     { 1, 1, 2, 2 },
    679     { 1, 2, 2, 1 },
    680     { 2, 1, 1, 2 },
    681     { 2, 2, 1, 1 }
    682   };
    683 
    684   int Row;
    685   switch (Pattern) {
    686   case MachineCombinerPattern::REASSOC_AX_BY: Row = 0; break;
    687   case MachineCombinerPattern::REASSOC_AX_YB: Row = 1; break;
    688   case MachineCombinerPattern::REASSOC_XA_BY: Row = 2; break;
    689   case MachineCombinerPattern::REASSOC_XA_YB: Row = 3; break;
    690   default: llvm_unreachable("unexpected MachineCombinerPattern");
    691   }
    692 
    693   MachineOperand &OpA = Prev.getOperand(OpIdx[Row][0]);
    694   MachineOperand &OpB = Root.getOperand(OpIdx[Row][1]);
    695   MachineOperand &OpX = Prev.getOperand(OpIdx[Row][2]);
    696   MachineOperand &OpY = Root.getOperand(OpIdx[Row][3]);
    697   MachineOperand &OpC = Root.getOperand(0);
    698 
    699   unsigned RegA = OpA.getReg();
    700   unsigned RegB = OpB.getReg();
    701   unsigned RegX = OpX.getReg();
    702   unsigned RegY = OpY.getReg();
    703   unsigned RegC = OpC.getReg();
    704 
    705   if (TargetRegisterInfo::isVirtualRegister(RegA))
    706     MRI.constrainRegClass(RegA, RC);
    707   if (TargetRegisterInfo::isVirtualRegister(RegB))
    708     MRI.constrainRegClass(RegB, RC);
    709   if (TargetRegisterInfo::isVirtualRegister(RegX))
    710     MRI.constrainRegClass(RegX, RC);
    711   if (TargetRegisterInfo::isVirtualRegister(RegY))
    712     MRI.constrainRegClass(RegY, RC);
    713   if (TargetRegisterInfo::isVirtualRegister(RegC))
    714     MRI.constrainRegClass(RegC, RC);
    715 
    716   // Create a new virtual register for the result of (X op Y) instead of
    717   // recycling RegB because the MachineCombiner's computation of the critical
    718   // path requires a new register definition rather than an existing one.
    719   unsigned NewVR = MRI.createVirtualRegister(RC);
    720   InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
    721 
    722   unsigned Opcode = Root.getOpcode();
    723   bool KillA = OpA.isKill();
    724   bool KillX = OpX.isKill();
    725   bool KillY = OpY.isKill();
    726 
    727   // Create new instructions for insertion.
    728   MachineInstrBuilder MIB1 =
    729       BuildMI(*MF, Prev.getDebugLoc(), TII->get(Opcode), NewVR)
    730           .addReg(RegX, getKillRegState(KillX))
    731           .addReg(RegY, getKillRegState(KillY));
    732   MachineInstrBuilder MIB2 =
    733       BuildMI(*MF, Root.getDebugLoc(), TII->get(Opcode), RegC)
    734           .addReg(RegA, getKillRegState(KillA))
    735           .addReg(NewVR, getKillRegState(true));
    736 
    737   setSpecialOperandAttr(Root, Prev, *MIB1, *MIB2);
    738 
    739   // Record new instructions for insertion and old instructions for deletion.
    740   InsInstrs.push_back(MIB1);
    741   InsInstrs.push_back(MIB2);
    742   DelInstrs.push_back(&Prev);
    743   DelInstrs.push_back(&Root);
    744 }
    745 
    746 void TargetInstrInfo::genAlternativeCodeSequence(
    747     MachineInstr &Root, MachineCombinerPattern Pattern,
    748     SmallVectorImpl<MachineInstr *> &InsInstrs,
    749     SmallVectorImpl<MachineInstr *> &DelInstrs,
    750     DenseMap<unsigned, unsigned> &InstIdxForVirtReg) const {
    751   MachineRegisterInfo &MRI = Root.getParent()->getParent()->getRegInfo();
    752 
    753   // Select the previous instruction in the sequence based on the input pattern.
    754   MachineInstr *Prev = nullptr;
    755   switch (Pattern) {
    756   case MachineCombinerPattern::REASSOC_AX_BY:
    757   case MachineCombinerPattern::REASSOC_XA_BY:
    758     Prev = MRI.getUniqueVRegDef(Root.getOperand(1).getReg());
    759     break;
    760   case MachineCombinerPattern::REASSOC_AX_YB:
    761   case MachineCombinerPattern::REASSOC_XA_YB:
    762     Prev = MRI.getUniqueVRegDef(Root.getOperand(2).getReg());
    763     break;
    764   default:
    765     break;
    766   }
    767 
    768   assert(Prev && "Unknown pattern for machine combiner");
    769 
    770   reassociateOps(Root, *Prev, Pattern, InsInstrs, DelInstrs, InstIdxForVirtReg);
    771   return;
    772 }
    773 
    774 /// foldMemoryOperand - Same as the previous version except it allows folding
    775 /// of any load and store from / to any address, not just from a specific
    776 /// stack slot.
    777 MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
    778                                                  ArrayRef<unsigned> Ops,
    779                                                  MachineInstr *LoadMI) const {
    780   assert(LoadMI->canFoldAsLoad() && "LoadMI isn't foldable!");
    781 #ifndef NDEBUG
    782   for (unsigned i = 0, e = Ops.size(); i != e; ++i)
    783     assert(MI->getOperand(Ops[i]).isUse() && "Folding load into def!");
    784 #endif
    785   MachineBasicBlock &MBB = *MI->getParent();
    786   MachineFunction &MF = *MBB.getParent();
    787 
    788   // Ask the target to do the actual folding.
    789   MachineInstr *NewMI = nullptr;
    790   int FrameIndex = 0;
    791 
    792   if ((MI->getOpcode() == TargetOpcode::STACKMAP ||
    793        MI->getOpcode() == TargetOpcode::PATCHPOINT) &&
    794       isLoadFromStackSlot(LoadMI, FrameIndex)) {
    795     // Fold stackmap/patchpoint.
    796     NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this);
    797     if (NewMI)
    798       NewMI = MBB.insert(MI, NewMI);
    799   } else {
    800     // Ask the target to do the actual folding.
    801     NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, LoadMI);
    802   }
    803 
    804   if (!NewMI) return nullptr;
    805 
    806   // Copy the memoperands from the load to the folded instruction.
    807   if (MI->memoperands_empty()) {
    808     NewMI->setMemRefs(LoadMI->memoperands_begin(),
    809                       LoadMI->memoperands_end());
    810   }
    811   else {
    812     // Handle the rare case of folding multiple loads.
    813     NewMI->setMemRefs(MI->memoperands_begin(),
    814                       MI->memoperands_end());
    815     for (MachineInstr::mmo_iterator I = LoadMI->memoperands_begin(),
    816            E = LoadMI->memoperands_end(); I != E; ++I) {
    817       NewMI->addMemOperand(MF, *I);
    818     }
    819   }
    820   return NewMI;
    821 }
    822 
    823 bool TargetInstrInfo::
    824 isReallyTriviallyReMaterializableGeneric(const MachineInstr *MI,
    825                                          AliasAnalysis *AA) const {
    826   const MachineFunction &MF = *MI->getParent()->getParent();
    827   const MachineRegisterInfo &MRI = MF.getRegInfo();
    828 
    829   // Remat clients assume operand 0 is the defined register.
    830   if (!MI->getNumOperands() || !MI->getOperand(0).isReg())
    831     return false;
    832   unsigned DefReg = MI->getOperand(0).getReg();
    833 
    834   // A sub-register definition can only be rematerialized if the instruction
    835   // doesn't read the other parts of the register.  Otherwise it is really a
    836   // read-modify-write operation on the full virtual register which cannot be
    837   // moved safely.
    838   if (TargetRegisterInfo::isVirtualRegister(DefReg) &&
    839       MI->getOperand(0).getSubReg() && MI->readsVirtualRegister(DefReg))
    840     return false;
    841 
    842   // A load from a fixed stack slot can be rematerialized. This may be
    843   // redundant with subsequent checks, but it's target-independent,
    844   // simple, and a common case.
    845   int FrameIdx = 0;
    846   if (isLoadFromStackSlot(MI, FrameIdx) &&
    847       MF.getFrameInfo()->isImmutableObjectIndex(FrameIdx))
    848     return true;
    849 
    850   // Avoid instructions obviously unsafe for remat.
    851   if (MI->isNotDuplicable() || MI->mayStore() ||
    852       MI->hasUnmodeledSideEffects())
    853     return false;
    854 
    855   // Don't remat inline asm. We have no idea how expensive it is
    856   // even if it's side effect free.
    857   if (MI->isInlineAsm())
    858     return false;
    859 
    860   // Avoid instructions which load from potentially varying memory.
    861   if (MI->mayLoad() && !MI->isInvariantLoad(AA))
    862     return false;
    863 
    864   // If any of the registers accessed are non-constant, conservatively assume
    865   // the instruction is not rematerializable.
    866   for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
    867     const MachineOperand &MO = MI->getOperand(i);
    868     if (!MO.isReg()) continue;
    869     unsigned Reg = MO.getReg();
    870     if (Reg == 0)
    871       continue;
    872 
    873     // Check for a well-behaved physical register.
    874     if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
    875       if (MO.isUse()) {
    876         // If the physreg has no defs anywhere, it's just an ambient register
    877         // and we can freely move its uses. Alternatively, if it's allocatable,
    878         // it could get allocated to something with a def during allocation.
    879         if (!MRI.isConstantPhysReg(Reg, MF))
    880           return false;
    881       } else {
    882         // A physreg def. We can't remat it.
    883         return false;
    884       }
    885       continue;
    886     }
    887 
    888     // Only allow one virtual-register def.  There may be multiple defs of the
    889     // same virtual register, though.
    890     if (MO.isDef() && Reg != DefReg)
    891       return false;
    892 
    893     // Don't allow any virtual-register uses. Rematting an instruction with
    894     // virtual register uses would length the live ranges of the uses, which
    895     // is not necessarily a good idea, certainly not "trivial".
    896     if (MO.isUse())
    897       return false;
    898   }
    899 
    900   // Everything checked out.
    901   return true;
    902 }
    903 
    904 int TargetInstrInfo::getSPAdjust(const MachineInstr *MI) const {
    905   const MachineFunction *MF = MI->getParent()->getParent();
    906   const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
    907   bool StackGrowsDown =
    908     TFI->getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown;
    909 
    910   unsigned FrameSetupOpcode = getCallFrameSetupOpcode();
    911   unsigned FrameDestroyOpcode = getCallFrameDestroyOpcode();
    912 
    913   if (MI->getOpcode() != FrameSetupOpcode &&
    914       MI->getOpcode() != FrameDestroyOpcode)
    915     return 0;
    916 
    917   int SPAdj = MI->getOperand(0).getImm();
    918   SPAdj = TFI->alignSPAdjust(SPAdj);
    919 
    920   if ((!StackGrowsDown && MI->getOpcode() == FrameSetupOpcode) ||
    921        (StackGrowsDown && MI->getOpcode() == FrameDestroyOpcode))
    922     SPAdj = -SPAdj;
    923 
    924   return SPAdj;
    925 }
    926 
    927 /// isSchedulingBoundary - Test if the given instruction should be
    928 /// considered a scheduling boundary. This primarily includes labels
    929 /// and terminators.
    930 bool TargetInstrInfo::isSchedulingBoundary(const MachineInstr *MI,
    931                                            const MachineBasicBlock *MBB,
    932                                            const MachineFunction &MF) const {
    933   // Terminators and labels can't be scheduled around.
    934   if (MI->isTerminator() || MI->isPosition())
    935     return true;
    936 
    937   // Don't attempt to schedule around any instruction that defines
    938   // a stack-oriented pointer, as it's unlikely to be profitable. This
    939   // saves compile time, because it doesn't require every single
    940   // stack slot reference to depend on the instruction that does the
    941   // modification.
    942   const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering();
    943   const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
    944   return MI->modifiesRegister(TLI.getStackPointerRegisterToSaveRestore(), TRI);
    945 }
    946 
    947 // Provide a global flag for disabling the PreRA hazard recognizer that targets
    948 // may choose to honor.
    949 bool TargetInstrInfo::usePreRAHazardRecognizer() const {
    950   return !DisableHazardRecognizer;
    951 }
    952 
    953 // Default implementation of CreateTargetRAHazardRecognizer.
    954 ScheduleHazardRecognizer *TargetInstrInfo::
    955 CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI,
    956                              const ScheduleDAG *DAG) const {
    957   // Dummy hazard recognizer allows all instructions to issue.
    958   return new ScheduleHazardRecognizer();
    959 }
    960 
    961 // Default implementation of CreateTargetMIHazardRecognizer.
    962 ScheduleHazardRecognizer *TargetInstrInfo::
    963 CreateTargetMIHazardRecognizer(const InstrItineraryData *II,
    964                                const ScheduleDAG *DAG) const {
    965   return (ScheduleHazardRecognizer *)
    966     new ScoreboardHazardRecognizer(II, DAG, "misched");
    967 }
    968 
    969 // Default implementation of CreateTargetPostRAHazardRecognizer.
    970 ScheduleHazardRecognizer *TargetInstrInfo::
    971 CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
    972                                    const ScheduleDAG *DAG) const {
    973   return (ScheduleHazardRecognizer *)
    974     new ScoreboardHazardRecognizer(II, DAG, "post-RA-sched");
    975 }
    976 
    977 //===----------------------------------------------------------------------===//
    978 //  SelectionDAG latency interface.
    979 //===----------------------------------------------------------------------===//
    980 
    981 int
    982 TargetInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
    983                                    SDNode *DefNode, unsigned DefIdx,
    984                                    SDNode *UseNode, unsigned UseIdx) const {
    985   if (!ItinData || ItinData->isEmpty())
    986     return -1;
    987 
    988   if (!DefNode->isMachineOpcode())
    989     return -1;
    990 
    991   unsigned DefClass = get(DefNode->getMachineOpcode()).getSchedClass();
    992   if (!UseNode->isMachineOpcode())
    993     return ItinData->getOperandCycle(DefClass, DefIdx);
    994   unsigned UseClass = get(UseNode->getMachineOpcode()).getSchedClass();
    995   return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
    996 }
    997 
    998 int TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
    999                                      SDNode *N) const {
   1000   if (!ItinData || ItinData->isEmpty())
   1001     return 1;
   1002 
   1003   if (!N->isMachineOpcode())
   1004     return 1;
   1005 
   1006   return ItinData->getStageLatency(get(N->getMachineOpcode()).getSchedClass());
   1007 }
   1008 
   1009 //===----------------------------------------------------------------------===//
   1010 //  MachineInstr latency interface.
   1011 //===----------------------------------------------------------------------===//
   1012 
   1013 unsigned
   1014 TargetInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData,
   1015                                 const MachineInstr *MI) const {
   1016   if (!ItinData || ItinData->isEmpty())
   1017     return 1;
   1018 
   1019   unsigned Class = MI->getDesc().getSchedClass();
   1020   int UOps = ItinData->Itineraries[Class].NumMicroOps;
   1021   if (UOps >= 0)
   1022     return UOps;
   1023 
   1024   // The # of u-ops is dynamically determined. The specific target should
   1025   // override this function to return the right number.
   1026   return 1;
   1027 }
   1028 
   1029 /// Return the default expected latency for a def based on it's opcode.
   1030 unsigned TargetInstrInfo::defaultDefLatency(const MCSchedModel &SchedModel,
   1031                                             const MachineInstr *DefMI) const {
   1032   if (DefMI->isTransient())
   1033     return 0;
   1034   if (DefMI->mayLoad())
   1035     return SchedModel.LoadLatency;
   1036   if (isHighLatencyDef(DefMI->getOpcode()))
   1037     return SchedModel.HighLatency;
   1038   return 1;
   1039 }
   1040 
   1041 unsigned TargetInstrInfo::getPredicationCost(const MachineInstr *) const {
   1042   return 0;
   1043 }
   1044 
   1045 unsigned TargetInstrInfo::
   1046 getInstrLatency(const InstrItineraryData *ItinData,
   1047                 const MachineInstr *MI,
   1048                 unsigned *PredCost) const {
   1049   // Default to one cycle for no itinerary. However, an "empty" itinerary may
   1050   // still have a MinLatency property, which getStageLatency checks.
   1051   if (!ItinData)
   1052     return MI->mayLoad() ? 2 : 1;
   1053 
   1054   return ItinData->getStageLatency(MI->getDesc().getSchedClass());
   1055 }
   1056 
   1057 bool TargetInstrInfo::hasLowDefLatency(const TargetSchedModel &SchedModel,
   1058                                        const MachineInstr *DefMI,
   1059                                        unsigned DefIdx) const {
   1060   const InstrItineraryData *ItinData = SchedModel.getInstrItineraries();
   1061   if (!ItinData || ItinData->isEmpty())
   1062     return false;
   1063 
   1064   unsigned DefClass = DefMI->getDesc().getSchedClass();
   1065   int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx);
   1066   return (DefCycle != -1 && DefCycle <= 1);
   1067 }
   1068 
   1069 /// Both DefMI and UseMI must be valid.  By default, call directly to the
   1070 /// itinerary. This may be overriden by the target.
   1071 int TargetInstrInfo::
   1072 getOperandLatency(const InstrItineraryData *ItinData,
   1073                   const MachineInstr *DefMI, unsigned DefIdx,
   1074                   const MachineInstr *UseMI, unsigned UseIdx) const {
   1075   unsigned DefClass = DefMI->getDesc().getSchedClass();
   1076   unsigned UseClass = UseMI->getDesc().getSchedClass();
   1077   return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
   1078 }
   1079 
   1080 /// If we can determine the operand latency from the def only, without itinerary
   1081 /// lookup, do so. Otherwise return -1.
   1082 int TargetInstrInfo::computeDefOperandLatency(
   1083   const InstrItineraryData *ItinData,
   1084   const MachineInstr *DefMI) const {
   1085 
   1086   // Let the target hook getInstrLatency handle missing itineraries.
   1087   if (!ItinData)
   1088     return getInstrLatency(ItinData, DefMI);
   1089 
   1090   if(ItinData->isEmpty())
   1091     return defaultDefLatency(ItinData->SchedModel, DefMI);
   1092 
   1093   // ...operand lookup required
   1094   return -1;
   1095 }
   1096 
   1097 /// computeOperandLatency - Compute and return the latency of the given data
   1098 /// dependent def and use when the operand indices are already known. UseMI may
   1099 /// be NULL for an unknown use.
   1100 ///
   1101 /// FindMin may be set to get the minimum vs. expected latency. Minimum
   1102 /// latency is used for scheduling groups, while expected latency is for
   1103 /// instruction cost and critical path.
   1104 ///
   1105 /// Depending on the subtarget's itinerary properties, this may or may not need
   1106 /// to call getOperandLatency(). For most subtargets, we don't need DefIdx or
   1107 /// UseIdx to compute min latency.
   1108 unsigned TargetInstrInfo::
   1109 computeOperandLatency(const InstrItineraryData *ItinData,
   1110                       const MachineInstr *DefMI, unsigned DefIdx,
   1111                       const MachineInstr *UseMI, unsigned UseIdx) const {
   1112 
   1113   int DefLatency = computeDefOperandLatency(ItinData, DefMI);
   1114   if (DefLatency >= 0)
   1115     return DefLatency;
   1116 
   1117   assert(ItinData && !ItinData->isEmpty() && "computeDefOperandLatency fail");
   1118 
   1119   int OperLatency = 0;
   1120   if (UseMI)
   1121     OperLatency = getOperandLatency(ItinData, DefMI, DefIdx, UseMI, UseIdx);
   1122   else {
   1123     unsigned DefClass = DefMI->getDesc().getSchedClass();
   1124     OperLatency = ItinData->getOperandCycle(DefClass, DefIdx);
   1125   }
   1126   if (OperLatency >= 0)
   1127     return OperLatency;
   1128 
   1129   // No operand latency was found.
   1130   unsigned InstrLatency = getInstrLatency(ItinData, DefMI);
   1131 
   1132   // Expected latency is the max of the stage latency and itinerary props.
   1133   InstrLatency = std::max(InstrLatency,
   1134                           defaultDefLatency(ItinData->SchedModel, DefMI));
   1135   return InstrLatency;
   1136 }
   1137 
   1138 bool TargetInstrInfo::getRegSequenceInputs(
   1139     const MachineInstr &MI, unsigned DefIdx,
   1140     SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const {
   1141   assert((MI.isRegSequence() ||
   1142           MI.isRegSequenceLike()) && "Instruction do not have the proper type");
   1143 
   1144   if (!MI.isRegSequence())
   1145     return getRegSequenceLikeInputs(MI, DefIdx, InputRegs);
   1146 
   1147   // We are looking at:
   1148   // Def = REG_SEQUENCE v0, sub0, v1, sub1, ...
   1149   assert(DefIdx == 0 && "REG_SEQUENCE only has one def");
   1150   for (unsigned OpIdx = 1, EndOpIdx = MI.getNumOperands(); OpIdx != EndOpIdx;
   1151        OpIdx += 2) {
   1152     const MachineOperand &MOReg = MI.getOperand(OpIdx);
   1153     const MachineOperand &MOSubIdx = MI.getOperand(OpIdx + 1);
   1154     assert(MOSubIdx.isImm() &&
   1155            "One of the subindex of the reg_sequence is not an immediate");
   1156     // Record Reg:SubReg, SubIdx.
   1157     InputRegs.push_back(RegSubRegPairAndIdx(MOReg.getReg(), MOReg.getSubReg(),
   1158                                             (unsigned)MOSubIdx.getImm()));
   1159   }
   1160   return true;
   1161 }
   1162 
   1163 bool TargetInstrInfo::getExtractSubregInputs(
   1164     const MachineInstr &MI, unsigned DefIdx,
   1165     RegSubRegPairAndIdx &InputReg) const {
   1166   assert((MI.isExtractSubreg() ||
   1167       MI.isExtractSubregLike()) && "Instruction do not have the proper type");
   1168 
   1169   if (!MI.isExtractSubreg())
   1170     return getExtractSubregLikeInputs(MI, DefIdx, InputReg);
   1171 
   1172   // We are looking at:
   1173   // Def = EXTRACT_SUBREG v0.sub1, sub0.
   1174   assert(DefIdx == 0 && "EXTRACT_SUBREG only has one def");
   1175   const MachineOperand &MOReg = MI.getOperand(1);
   1176   const MachineOperand &MOSubIdx = MI.getOperand(2);
   1177   assert(MOSubIdx.isImm() &&
   1178          "The subindex of the extract_subreg is not an immediate");
   1179 
   1180   InputReg.Reg = MOReg.getReg();
   1181   InputReg.SubReg = MOReg.getSubReg();
   1182   InputReg.SubIdx = (unsigned)MOSubIdx.getImm();
   1183   return true;
   1184 }
   1185 
   1186 bool TargetInstrInfo::getInsertSubregInputs(
   1187     const MachineInstr &MI, unsigned DefIdx,
   1188     RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const {
   1189   assert((MI.isInsertSubreg() ||
   1190       MI.isInsertSubregLike()) && "Instruction do not have the proper type");
   1191 
   1192   if (!MI.isInsertSubreg())
   1193     return getInsertSubregLikeInputs(MI, DefIdx, BaseReg, InsertedReg);
   1194 
   1195   // We are looking at:
   1196   // Def = INSERT_SEQUENCE v0, v1, sub0.
   1197   assert(DefIdx == 0 && "INSERT_SUBREG only has one def");
   1198   const MachineOperand &MOBaseReg = MI.getOperand(1);
   1199   const MachineOperand &MOInsertedReg = MI.getOperand(2);
   1200   const MachineOperand &MOSubIdx = MI.getOperand(3);
   1201   assert(MOSubIdx.isImm() &&
   1202          "One of the subindex of the reg_sequence is not an immediate");
   1203   BaseReg.Reg = MOBaseReg.getReg();
   1204   BaseReg.SubReg = MOBaseReg.getSubReg();
   1205 
   1206   InsertedReg.Reg = MOInsertedReg.getReg();
   1207   InsertedReg.SubReg = MOInsertedReg.getSubReg();
   1208   InsertedReg.SubIdx = (unsigned)MOSubIdx.getImm();
   1209   return true;
   1210 }
   1211