Home | History | Annotate | Download | only in CodeGen
      1 //===-- TargetInstrInfo.cpp - Target Instruction Information --------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file implements the TargetInstrInfo class.
     11 //
     12 //===----------------------------------------------------------------------===//
     13 
     14 #include "llvm/Target/TargetInstrInfo.h"
     15 #include "llvm/CodeGen/MachineFrameInfo.h"
     16 #include "llvm/CodeGen/MachineInstrBuilder.h"
     17 #include "llvm/CodeGen/MachineMemOperand.h"
     18 #include "llvm/CodeGen/MachineRegisterInfo.h"
     19 #include "llvm/CodeGen/PseudoSourceValue.h"
     20 #include "llvm/CodeGen/ScoreboardHazardRecognizer.h"
     21 #include "llvm/CodeGen/StackMaps.h"
     22 #include "llvm/IR/DataLayout.h"
     23 #include "llvm/MC/MCAsmInfo.h"
     24 #include "llvm/MC/MCInstrItineraries.h"
     25 #include "llvm/Support/CommandLine.h"
     26 #include "llvm/Support/ErrorHandling.h"
     27 #include "llvm/Support/raw_ostream.h"
     28 #include "llvm/Target/TargetFrameLowering.h"
     29 #include "llvm/Target/TargetLowering.h"
     30 #include "llvm/Target/TargetMachine.h"
     31 #include "llvm/Target/TargetRegisterInfo.h"
     32 #include <cctype>
     33 using namespace llvm;
     34 
     35 static cl::opt<bool> DisableHazardRecognizer(
     36   "disable-sched-hazard", cl::Hidden, cl::init(false),
     37   cl::desc("Disable hazard detection during preRA scheduling"));
     38 
     39 TargetInstrInfo::~TargetInstrInfo() {
     40 }
     41 
     42 const TargetRegisterClass*
     43 TargetInstrInfo::getRegClass(const MCInstrDesc &MCID, unsigned OpNum,
     44                              const TargetRegisterInfo *TRI,
     45                              const MachineFunction &MF) const {
     46   if (OpNum >= MCID.getNumOperands())
     47     return nullptr;
     48 
     49   short RegClass = MCID.OpInfo[OpNum].RegClass;
     50   if (MCID.OpInfo[OpNum].isLookupPtrRegClass())
     51     return TRI->getPointerRegClass(MF, RegClass);
     52 
     53   // Instructions like INSERT_SUBREG do not have fixed register classes.
     54   if (RegClass < 0)
     55     return nullptr;
     56 
     57   // Otherwise just look it up normally.
     58   return TRI->getRegClass(RegClass);
     59 }
     60 
     61 /// insertNoop - Insert a noop into the instruction stream at the specified
     62 /// point.
     63 void TargetInstrInfo::insertNoop(MachineBasicBlock &MBB,
     64                                  MachineBasicBlock::iterator MI) const {
     65   llvm_unreachable("Target didn't implement insertNoop!");
     66 }
     67 
     68 /// Measure the specified inline asm to determine an approximation of its
     69 /// length.
     70 /// Comments (which run till the next SeparatorString or newline) do not
     71 /// count as an instruction.
     72 /// Any other non-whitespace text is considered an instruction, with
     73 /// multiple instructions separated by SeparatorString or newlines.
     74 /// Variable-length instructions are not handled here; this function
     75 /// may be overloaded in the target code to do that.
     76 unsigned TargetInstrInfo::getInlineAsmLength(const char *Str,
     77                                              const MCAsmInfo &MAI) const {
     78 
     79 
     80   // Count the number of instructions in the asm.
     81   bool atInsnStart = true;
     82   unsigned Length = 0;
     83   for (; *Str; ++Str) {
     84     if (*Str == '\n' || strncmp(Str, MAI.getSeparatorString(),
     85                                 strlen(MAI.getSeparatorString())) == 0)
     86       atInsnStart = true;
     87     if (atInsnStart && !std::isspace(static_cast<unsigned char>(*Str))) {
     88       Length += MAI.getMaxInstLength();
     89       atInsnStart = false;
     90     }
     91     if (atInsnStart && strncmp(Str, MAI.getCommentString(),
     92                                strlen(MAI.getCommentString())) == 0)
     93       atInsnStart = false;
     94   }
     95 
     96   return Length;
     97 }
     98 
     99 /// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
    100 /// after it, replacing it with an unconditional branch to NewDest.
    101 void
    102 TargetInstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
    103                                          MachineBasicBlock *NewDest) const {
    104   MachineBasicBlock *MBB = Tail->getParent();
    105 
    106   // Remove all the old successors of MBB from the CFG.
    107   while (!MBB->succ_empty())
    108     MBB->removeSuccessor(MBB->succ_begin());
    109 
    110   // Remove all the dead instructions from the end of MBB.
    111   MBB->erase(Tail, MBB->end());
    112 
    113   // If MBB isn't immediately before MBB, insert a branch to it.
    114   if (++MachineFunction::iterator(MBB) != MachineFunction::iterator(NewDest))
    115     InsertBranch(*MBB, NewDest, nullptr, SmallVector<MachineOperand, 0>(),
    116                  Tail->getDebugLoc());
    117   MBB->addSuccessor(NewDest);
    118 }
    119 
    120 // commuteInstruction - The default implementation of this method just exchanges
    121 // the two operands returned by findCommutedOpIndices.
    122 MachineInstr *TargetInstrInfo::commuteInstruction(MachineInstr *MI,
    123                                                   bool NewMI) const {
    124   const MCInstrDesc &MCID = MI->getDesc();
    125   bool HasDef = MCID.getNumDefs();
    126   if (HasDef && !MI->getOperand(0).isReg())
    127     // No idea how to commute this instruction. Target should implement its own.
    128     return nullptr;
    129   unsigned Idx1, Idx2;
    130   if (!findCommutedOpIndices(MI, Idx1, Idx2)) {
    131     assert(MI->isCommutable() && "Precondition violation: MI must be commutable.");
    132     return nullptr;
    133   }
    134 
    135   assert(MI->getOperand(Idx1).isReg() && MI->getOperand(Idx2).isReg() &&
    136          "This only knows how to commute register operands so far");
    137   unsigned Reg0 = HasDef ? MI->getOperand(0).getReg() : 0;
    138   unsigned Reg1 = MI->getOperand(Idx1).getReg();
    139   unsigned Reg2 = MI->getOperand(Idx2).getReg();
    140   unsigned SubReg0 = HasDef ? MI->getOperand(0).getSubReg() : 0;
    141   unsigned SubReg1 = MI->getOperand(Idx1).getSubReg();
    142   unsigned SubReg2 = MI->getOperand(Idx2).getSubReg();
    143   bool Reg1IsKill = MI->getOperand(Idx1).isKill();
    144   bool Reg2IsKill = MI->getOperand(Idx2).isKill();
    145   // If destination is tied to either of the commuted source register, then
    146   // it must be updated.
    147   if (HasDef && Reg0 == Reg1 &&
    148       MI->getDesc().getOperandConstraint(Idx1, MCOI::TIED_TO) == 0) {
    149     Reg2IsKill = false;
    150     Reg0 = Reg2;
    151     SubReg0 = SubReg2;
    152   } else if (HasDef && Reg0 == Reg2 &&
    153              MI->getDesc().getOperandConstraint(Idx2, MCOI::TIED_TO) == 0) {
    154     Reg1IsKill = false;
    155     Reg0 = Reg1;
    156     SubReg0 = SubReg1;
    157   }
    158 
    159   if (NewMI) {
    160     // Create a new instruction.
    161     MachineFunction &MF = *MI->getParent()->getParent();
    162     MI = MF.CloneMachineInstr(MI);
    163   }
    164 
    165   if (HasDef) {
    166     MI->getOperand(0).setReg(Reg0);
    167     MI->getOperand(0).setSubReg(SubReg0);
    168   }
    169   MI->getOperand(Idx2).setReg(Reg1);
    170   MI->getOperand(Idx1).setReg(Reg2);
    171   MI->getOperand(Idx2).setSubReg(SubReg1);
    172   MI->getOperand(Idx1).setSubReg(SubReg2);
    173   MI->getOperand(Idx2).setIsKill(Reg1IsKill);
    174   MI->getOperand(Idx1).setIsKill(Reg2IsKill);
    175   return MI;
    176 }
    177 
    178 /// findCommutedOpIndices - If specified MI is commutable, return the two
    179 /// operand indices that would swap value. Return true if the instruction
    180 /// is not in a form which this routine understands.
    181 bool TargetInstrInfo::findCommutedOpIndices(MachineInstr *MI,
    182                                             unsigned &SrcOpIdx1,
    183                                             unsigned &SrcOpIdx2) const {
    184   assert(!MI->isBundle() &&
    185          "TargetInstrInfo::findCommutedOpIndices() can't handle bundles");
    186 
    187   const MCInstrDesc &MCID = MI->getDesc();
    188   if (!MCID.isCommutable())
    189     return false;
    190   // This assumes v0 = op v1, v2 and commuting would swap v1 and v2. If this
    191   // is not true, then the target must implement this.
    192   SrcOpIdx1 = MCID.getNumDefs();
    193   SrcOpIdx2 = SrcOpIdx1 + 1;
    194   if (!MI->getOperand(SrcOpIdx1).isReg() ||
    195       !MI->getOperand(SrcOpIdx2).isReg())
    196     // No idea.
    197     return false;
    198   return true;
    199 }
    200 
    201 
    202 bool
    203 TargetInstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const {
    204   if (!MI->isTerminator()) return false;
    205 
    206   // Conditional branch is a special case.
    207   if (MI->isBranch() && !MI->isBarrier())
    208     return true;
    209   if (!MI->isPredicable())
    210     return true;
    211   return !isPredicated(MI);
    212 }
    213 
    214 
    215 bool TargetInstrInfo::PredicateInstruction(MachineInstr *MI,
    216                             const SmallVectorImpl<MachineOperand> &Pred) const {
    217   bool MadeChange = false;
    218 
    219   assert(!MI->isBundle() &&
    220          "TargetInstrInfo::PredicateInstruction() can't handle bundles");
    221 
    222   const MCInstrDesc &MCID = MI->getDesc();
    223   if (!MI->isPredicable())
    224     return false;
    225 
    226   for (unsigned j = 0, i = 0, e = MI->getNumOperands(); i != e; ++i) {
    227     if (MCID.OpInfo[i].isPredicate()) {
    228       MachineOperand &MO = MI->getOperand(i);
    229       if (MO.isReg()) {
    230         MO.setReg(Pred[j].getReg());
    231         MadeChange = true;
    232       } else if (MO.isImm()) {
    233         MO.setImm(Pred[j].getImm());
    234         MadeChange = true;
    235       } else if (MO.isMBB()) {
    236         MO.setMBB(Pred[j].getMBB());
    237         MadeChange = true;
    238       }
    239       ++j;
    240     }
    241   }
    242   return MadeChange;
    243 }
    244 
    245 bool TargetInstrInfo::hasLoadFromStackSlot(const MachineInstr *MI,
    246                                            const MachineMemOperand *&MMO,
    247                                            int &FrameIndex) const {
    248   for (MachineInstr::mmo_iterator o = MI->memoperands_begin(),
    249          oe = MI->memoperands_end();
    250        o != oe;
    251        ++o) {
    252     if ((*o)->isLoad()) {
    253       if (const FixedStackPseudoSourceValue *Value =
    254           dyn_cast_or_null<FixedStackPseudoSourceValue>(
    255               (*o)->getPseudoValue())) {
    256         FrameIndex = Value->getFrameIndex();
    257         MMO = *o;
    258         return true;
    259       }
    260     }
    261   }
    262   return false;
    263 }
    264 
    265 bool TargetInstrInfo::hasStoreToStackSlot(const MachineInstr *MI,
    266                                           const MachineMemOperand *&MMO,
    267                                           int &FrameIndex) const {
    268   for (MachineInstr::mmo_iterator o = MI->memoperands_begin(),
    269          oe = MI->memoperands_end();
    270        o != oe;
    271        ++o) {
    272     if ((*o)->isStore()) {
    273       if (const FixedStackPseudoSourceValue *Value =
    274           dyn_cast_or_null<FixedStackPseudoSourceValue>(
    275               (*o)->getPseudoValue())) {
    276         FrameIndex = Value->getFrameIndex();
    277         MMO = *o;
    278         return true;
    279       }
    280     }
    281   }
    282   return false;
    283 }
    284 
    285 bool TargetInstrInfo::getStackSlotRange(const TargetRegisterClass *RC,
    286                                         unsigned SubIdx, unsigned &Size,
    287                                         unsigned &Offset,
    288                                         const MachineFunction &MF) const {
    289   if (!SubIdx) {
    290     Size = RC->getSize();
    291     Offset = 0;
    292     return true;
    293   }
    294   const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
    295   unsigned BitSize = TRI->getSubRegIdxSize(SubIdx);
    296   // Convert bit size to byte size to be consistent with
    297   // MCRegisterClass::getSize().
    298   if (BitSize % 8)
    299     return false;
    300 
    301   int BitOffset = TRI->getSubRegIdxOffset(SubIdx);
    302   if (BitOffset < 0 || BitOffset % 8)
    303     return false;
    304 
    305   Size = BitSize /= 8;
    306   Offset = (unsigned)BitOffset / 8;
    307 
    308   assert(RC->getSize() >= (Offset + Size) && "bad subregister range");
    309 
    310   if (!MF.getTarget().getDataLayout()->isLittleEndian()) {
    311     Offset = RC->getSize() - (Offset + Size);
    312   }
    313   return true;
    314 }
    315 
    316 void TargetInstrInfo::reMaterialize(MachineBasicBlock &MBB,
    317                                     MachineBasicBlock::iterator I,
    318                                     unsigned DestReg,
    319                                     unsigned SubIdx,
    320                                     const MachineInstr *Orig,
    321                                     const TargetRegisterInfo &TRI) const {
    322   MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig);
    323   MI->substituteRegister(MI->getOperand(0).getReg(), DestReg, SubIdx, TRI);
    324   MBB.insert(I, MI);
    325 }
    326 
    327 bool
    328 TargetInstrInfo::produceSameValue(const MachineInstr *MI0,
    329                                   const MachineInstr *MI1,
    330                                   const MachineRegisterInfo *MRI) const {
    331   return MI0->isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs);
    332 }
    333 
    334 MachineInstr *TargetInstrInfo::duplicate(MachineInstr *Orig,
    335                                          MachineFunction &MF) const {
    336   assert(!Orig->isNotDuplicable() &&
    337          "Instruction cannot be duplicated");
    338   return MF.CloneMachineInstr(Orig);
    339 }
    340 
    341 // If the COPY instruction in MI can be folded to a stack operation, return
    342 // the register class to use.
    343 static const TargetRegisterClass *canFoldCopy(const MachineInstr *MI,
    344                                               unsigned FoldIdx) {
    345   assert(MI->isCopy() && "MI must be a COPY instruction");
    346   if (MI->getNumOperands() != 2)
    347     return nullptr;
    348   assert(FoldIdx<2 && "FoldIdx refers no nonexistent operand");
    349 
    350   const MachineOperand &FoldOp = MI->getOperand(FoldIdx);
    351   const MachineOperand &LiveOp = MI->getOperand(1-FoldIdx);
    352 
    353   if (FoldOp.getSubReg() || LiveOp.getSubReg())
    354     return nullptr;
    355 
    356   unsigned FoldReg = FoldOp.getReg();
    357   unsigned LiveReg = LiveOp.getReg();
    358 
    359   assert(TargetRegisterInfo::isVirtualRegister(FoldReg) &&
    360          "Cannot fold physregs");
    361 
    362   const MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
    363   const TargetRegisterClass *RC = MRI.getRegClass(FoldReg);
    364 
    365   if (TargetRegisterInfo::isPhysicalRegister(LiveOp.getReg()))
    366     return RC->contains(LiveOp.getReg()) ? RC : nullptr;
    367 
    368   if (RC->hasSubClassEq(MRI.getRegClass(LiveReg)))
    369     return RC;
    370 
    371   // FIXME: Allow folding when register classes are memory compatible.
    372   return nullptr;
    373 }
    374 
    375 void TargetInstrInfo::getNoopForMachoTarget(MCInst &NopInst) const {
    376   llvm_unreachable("Not a MachO target");
    377 }
    378 
    379 bool TargetInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
    380                                            ArrayRef<unsigned> Ops) const {
    381   return MI->isCopy() && Ops.size() == 1 && canFoldCopy(MI, Ops[0]);
    382 }
    383 
    384 static MachineInstr *foldPatchpoint(MachineFunction &MF, MachineInstr *MI,
    385                                     ArrayRef<unsigned> Ops, int FrameIndex,
    386                                     const TargetInstrInfo &TII) {
    387   unsigned StartIdx = 0;
    388   switch (MI->getOpcode()) {
    389   case TargetOpcode::STACKMAP:
    390     StartIdx = 2; // Skip ID, nShadowBytes.
    391     break;
    392   case TargetOpcode::PATCHPOINT: {
    393     // For PatchPoint, the call args are not foldable.
    394     PatchPointOpers opers(MI);
    395     StartIdx = opers.getVarIdx();
    396     break;
    397   }
    398   default:
    399     llvm_unreachable("unexpected stackmap opcode");
    400   }
    401 
    402   // Return false if any operands requested for folding are not foldable (not
    403   // part of the stackmap's live values).
    404   for (unsigned Op : Ops) {
    405     if (Op < StartIdx)
    406       return nullptr;
    407   }
    408 
    409   MachineInstr *NewMI =
    410     MF.CreateMachineInstr(TII.get(MI->getOpcode()), MI->getDebugLoc(), true);
    411   MachineInstrBuilder MIB(MF, NewMI);
    412 
    413   // No need to fold return, the meta data, and function arguments
    414   for (unsigned i = 0; i < StartIdx; ++i)
    415     MIB.addOperand(MI->getOperand(i));
    416 
    417   for (unsigned i = StartIdx; i < MI->getNumOperands(); ++i) {
    418     MachineOperand &MO = MI->getOperand(i);
    419     if (std::find(Ops.begin(), Ops.end(), i) != Ops.end()) {
    420       unsigned SpillSize;
    421       unsigned SpillOffset;
    422       // Compute the spill slot size and offset.
    423       const TargetRegisterClass *RC =
    424         MF.getRegInfo().getRegClass(MO.getReg());
    425       bool Valid =
    426           TII.getStackSlotRange(RC, MO.getSubReg(), SpillSize, SpillOffset, MF);
    427       if (!Valid)
    428         report_fatal_error("cannot spill patchpoint subregister operand");
    429       MIB.addImm(StackMaps::IndirectMemRefOp);
    430       MIB.addImm(SpillSize);
    431       MIB.addFrameIndex(FrameIndex);
    432       MIB.addImm(SpillOffset);
    433     }
    434     else
    435       MIB.addOperand(MO);
    436   }
    437   return NewMI;
    438 }
    439 
    440 /// foldMemoryOperand - Attempt to fold a load or store of the specified stack
    441 /// slot into the specified machine instruction for the specified operand(s).
    442 /// If this is possible, a new instruction is returned with the specified
    443 /// operand folded, otherwise NULL is returned. The client is responsible for
    444 /// removing the old instruction and adding the new one in the instruction
    445 /// stream.
    446 MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
    447                                                  ArrayRef<unsigned> Ops,
    448                                                  int FI) const {
    449   unsigned Flags = 0;
    450   for (unsigned i = 0, e = Ops.size(); i != e; ++i)
    451     if (MI->getOperand(Ops[i]).isDef())
    452       Flags |= MachineMemOperand::MOStore;
    453     else
    454       Flags |= MachineMemOperand::MOLoad;
    455 
    456   MachineBasicBlock *MBB = MI->getParent();
    457   assert(MBB && "foldMemoryOperand needs an inserted instruction");
    458   MachineFunction &MF = *MBB->getParent();
    459 
    460   MachineInstr *NewMI = nullptr;
    461 
    462   if (MI->getOpcode() == TargetOpcode::STACKMAP ||
    463       MI->getOpcode() == TargetOpcode::PATCHPOINT) {
    464     // Fold stackmap/patchpoint.
    465     NewMI = foldPatchpoint(MF, MI, Ops, FI, *this);
    466   } else {
    467     // Ask the target to do the actual folding.
    468     NewMI =foldMemoryOperandImpl(MF, MI, Ops, FI);
    469   }
    470 
    471   if (NewMI) {
    472     NewMI->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
    473     // Add a memory operand, foldMemoryOperandImpl doesn't do that.
    474     assert((!(Flags & MachineMemOperand::MOStore) ||
    475             NewMI->mayStore()) &&
    476            "Folded a def to a non-store!");
    477     assert((!(Flags & MachineMemOperand::MOLoad) ||
    478             NewMI->mayLoad()) &&
    479            "Folded a use to a non-load!");
    480     const MachineFrameInfo &MFI = *MF.getFrameInfo();
    481     assert(MFI.getObjectOffset(FI) != -1);
    482     MachineMemOperand *MMO =
    483       MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FI),
    484                               Flags, MFI.getObjectSize(FI),
    485                               MFI.getObjectAlignment(FI));
    486     NewMI->addMemOperand(MF, MMO);
    487 
    488     // FIXME: change foldMemoryOperandImpl semantics to also insert NewMI.
    489     return MBB->insert(MI, NewMI);
    490   }
    491 
    492   // Straight COPY may fold as load/store.
    493   if (!MI->isCopy() || Ops.size() != 1)
    494     return nullptr;
    495 
    496   const TargetRegisterClass *RC = canFoldCopy(MI, Ops[0]);
    497   if (!RC)
    498     return nullptr;
    499 
    500   const MachineOperand &MO = MI->getOperand(1-Ops[0]);
    501   MachineBasicBlock::iterator Pos = MI;
    502   const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
    503 
    504   if (Flags == MachineMemOperand::MOStore)
    505     storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, TRI);
    506   else
    507     loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, TRI);
    508   return --Pos;
    509 }
    510 
    511 /// foldMemoryOperand - Same as the previous version except it allows folding
    512 /// of any load and store from / to any address, not just from a specific
    513 /// stack slot.
    514 MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
    515                                                  ArrayRef<unsigned> Ops,
    516                                                  MachineInstr *LoadMI) const {
    517   assert(LoadMI->canFoldAsLoad() && "LoadMI isn't foldable!");
    518 #ifndef NDEBUG
    519   for (unsigned i = 0, e = Ops.size(); i != e; ++i)
    520     assert(MI->getOperand(Ops[i]).isUse() && "Folding load into def!");
    521 #endif
    522   MachineBasicBlock &MBB = *MI->getParent();
    523   MachineFunction &MF = *MBB.getParent();
    524 
    525   // Ask the target to do the actual folding.
    526   MachineInstr *NewMI = nullptr;
    527   int FrameIndex = 0;
    528 
    529   if ((MI->getOpcode() == TargetOpcode::STACKMAP ||
    530        MI->getOpcode() == TargetOpcode::PATCHPOINT) &&
    531       isLoadFromStackSlot(LoadMI, FrameIndex)) {
    532     // Fold stackmap/patchpoint.
    533     NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this);
    534   } else {
    535     // Ask the target to do the actual folding.
    536     NewMI = foldMemoryOperandImpl(MF, MI, Ops, LoadMI);
    537   }
    538 
    539   if (!NewMI) return nullptr;
    540 
    541   NewMI = MBB.insert(MI, NewMI);
    542 
    543   // Copy the memoperands from the load to the folded instruction.
    544   if (MI->memoperands_empty()) {
    545     NewMI->setMemRefs(LoadMI->memoperands_begin(),
    546                       LoadMI->memoperands_end());
    547   }
    548   else {
    549     // Handle the rare case of folding multiple loads.
    550     NewMI->setMemRefs(MI->memoperands_begin(),
    551                       MI->memoperands_end());
    552     for (MachineInstr::mmo_iterator I = LoadMI->memoperands_begin(),
    553            E = LoadMI->memoperands_end(); I != E; ++I) {
    554       NewMI->addMemOperand(MF, *I);
    555     }
    556   }
    557   return NewMI;
    558 }
    559 
    560 bool TargetInstrInfo::
    561 isReallyTriviallyReMaterializableGeneric(const MachineInstr *MI,
    562                                          AliasAnalysis *AA) const {
    563   const MachineFunction &MF = *MI->getParent()->getParent();
    564   const MachineRegisterInfo &MRI = MF.getRegInfo();
    565 
    566   // Remat clients assume operand 0 is the defined register.
    567   if (!MI->getNumOperands() || !MI->getOperand(0).isReg())
    568     return false;
    569   unsigned DefReg = MI->getOperand(0).getReg();
    570 
    571   // A sub-register definition can only be rematerialized if the instruction
    572   // doesn't read the other parts of the register.  Otherwise it is really a
    573   // read-modify-write operation on the full virtual register which cannot be
    574   // moved safely.
    575   if (TargetRegisterInfo::isVirtualRegister(DefReg) &&
    576       MI->getOperand(0).getSubReg() && MI->readsVirtualRegister(DefReg))
    577     return false;
    578 
    579   // A load from a fixed stack slot can be rematerialized. This may be
    580   // redundant with subsequent checks, but it's target-independent,
    581   // simple, and a common case.
    582   int FrameIdx = 0;
    583   if (isLoadFromStackSlot(MI, FrameIdx) &&
    584       MF.getFrameInfo()->isImmutableObjectIndex(FrameIdx))
    585     return true;
    586 
    587   // Avoid instructions obviously unsafe for remat.
    588   if (MI->isNotDuplicable() || MI->mayStore() ||
    589       MI->hasUnmodeledSideEffects())
    590     return false;
    591 
    592   // Don't remat inline asm. We have no idea how expensive it is
    593   // even if it's side effect free.
    594   if (MI->isInlineAsm())
    595     return false;
    596 
    597   // Avoid instructions which load from potentially varying memory.
    598   if (MI->mayLoad() && !MI->isInvariantLoad(AA))
    599     return false;
    600 
    601   // If any of the registers accessed are non-constant, conservatively assume
    602   // the instruction is not rematerializable.
    603   for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
    604     const MachineOperand &MO = MI->getOperand(i);
    605     if (!MO.isReg()) continue;
    606     unsigned Reg = MO.getReg();
    607     if (Reg == 0)
    608       continue;
    609 
    610     // Check for a well-behaved physical register.
    611     if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
    612       if (MO.isUse()) {
    613         // If the physreg has no defs anywhere, it's just an ambient register
    614         // and we can freely move its uses. Alternatively, if it's allocatable,
    615         // it could get allocated to something with a def during allocation.
    616         if (!MRI.isConstantPhysReg(Reg, MF))
    617           return false;
    618       } else {
    619         // A physreg def. We can't remat it.
    620         return false;
    621       }
    622       continue;
    623     }
    624 
    625     // Only allow one virtual-register def.  There may be multiple defs of the
    626     // same virtual register, though.
    627     if (MO.isDef() && Reg != DefReg)
    628       return false;
    629 
    630     // Don't allow any virtual-register uses. Rematting an instruction with
    631     // virtual register uses would length the live ranges of the uses, which
    632     // is not necessarily a good idea, certainly not "trivial".
    633     if (MO.isUse())
    634       return false;
    635   }
    636 
    637   // Everything checked out.
    638   return true;
    639 }
    640 
    641 int TargetInstrInfo::getSPAdjust(const MachineInstr *MI) const {
    642   const MachineFunction *MF = MI->getParent()->getParent();
    643   const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
    644   bool StackGrowsDown =
    645     TFI->getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown;
    646 
    647   int FrameSetupOpcode = getCallFrameSetupOpcode();
    648   int FrameDestroyOpcode = getCallFrameDestroyOpcode();
    649 
    650   if (MI->getOpcode() != FrameSetupOpcode &&
    651       MI->getOpcode() != FrameDestroyOpcode)
    652     return 0;
    653 
    654   int SPAdj = MI->getOperand(0).getImm();
    655 
    656   if ((!StackGrowsDown && MI->getOpcode() == FrameSetupOpcode) ||
    657        (StackGrowsDown && MI->getOpcode() == FrameDestroyOpcode))
    658     SPAdj = -SPAdj;
    659 
    660   return SPAdj;
    661 }
    662 
    663 /// isSchedulingBoundary - Test if the given instruction should be
    664 /// considered a scheduling boundary. This primarily includes labels
    665 /// and terminators.
    666 bool TargetInstrInfo::isSchedulingBoundary(const MachineInstr *MI,
    667                                            const MachineBasicBlock *MBB,
    668                                            const MachineFunction &MF) const {
    669   // Terminators and labels can't be scheduled around.
    670   if (MI->isTerminator() || MI->isPosition())
    671     return true;
    672 
    673   // Don't attempt to schedule around any instruction that defines
    674   // a stack-oriented pointer, as it's unlikely to be profitable. This
    675   // saves compile time, because it doesn't require every single
    676   // stack slot reference to depend on the instruction that does the
    677   // modification.
    678   const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering();
    679   const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
    680   if (MI->modifiesRegister(TLI.getStackPointerRegisterToSaveRestore(), TRI))
    681     return true;
    682 
    683   return false;
    684 }
    685 
    686 // Provide a global flag for disabling the PreRA hazard recognizer that targets
    687 // may choose to honor.
    688 bool TargetInstrInfo::usePreRAHazardRecognizer() const {
    689   return !DisableHazardRecognizer;
    690 }
    691 
    692 // Default implementation of CreateTargetRAHazardRecognizer.
    693 ScheduleHazardRecognizer *TargetInstrInfo::
    694 CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI,
    695                              const ScheduleDAG *DAG) const {
    696   // Dummy hazard recognizer allows all instructions to issue.
    697   return new ScheduleHazardRecognizer();
    698 }
    699 
    700 // Default implementation of CreateTargetMIHazardRecognizer.
    701 ScheduleHazardRecognizer *TargetInstrInfo::
    702 CreateTargetMIHazardRecognizer(const InstrItineraryData *II,
    703                                const ScheduleDAG *DAG) const {
    704   return (ScheduleHazardRecognizer *)
    705     new ScoreboardHazardRecognizer(II, DAG, "misched");
    706 }
    707 
    708 // Default implementation of CreateTargetPostRAHazardRecognizer.
    709 ScheduleHazardRecognizer *TargetInstrInfo::
    710 CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
    711                                    const ScheduleDAG *DAG) const {
    712   return (ScheduleHazardRecognizer *)
    713     new ScoreboardHazardRecognizer(II, DAG, "post-RA-sched");
    714 }
    715 
    716 //===----------------------------------------------------------------------===//
    717 //  SelectionDAG latency interface.
    718 //===----------------------------------------------------------------------===//
    719 
    720 int
    721 TargetInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
    722                                    SDNode *DefNode, unsigned DefIdx,
    723                                    SDNode *UseNode, unsigned UseIdx) const {
    724   if (!ItinData || ItinData->isEmpty())
    725     return -1;
    726 
    727   if (!DefNode->isMachineOpcode())
    728     return -1;
    729 
    730   unsigned DefClass = get(DefNode->getMachineOpcode()).getSchedClass();
    731   if (!UseNode->isMachineOpcode())
    732     return ItinData->getOperandCycle(DefClass, DefIdx);
    733   unsigned UseClass = get(UseNode->getMachineOpcode()).getSchedClass();
    734   return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
    735 }
    736 
    737 int TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
    738                                      SDNode *N) const {
    739   if (!ItinData || ItinData->isEmpty())
    740     return 1;
    741 
    742   if (!N->isMachineOpcode())
    743     return 1;
    744 
    745   return ItinData->getStageLatency(get(N->getMachineOpcode()).getSchedClass());
    746 }
    747 
    748 //===----------------------------------------------------------------------===//
    749 //  MachineInstr latency interface.
    750 //===----------------------------------------------------------------------===//
    751 
    752 unsigned
    753 TargetInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData,
    754                                 const MachineInstr *MI) const {
    755   if (!ItinData || ItinData->isEmpty())
    756     return 1;
    757 
    758   unsigned Class = MI->getDesc().getSchedClass();
    759   int UOps = ItinData->Itineraries[Class].NumMicroOps;
    760   if (UOps >= 0)
    761     return UOps;
    762 
    763   // The # of u-ops is dynamically determined. The specific target should
    764   // override this function to return the right number.
    765   return 1;
    766 }
    767 
    768 /// Return the default expected latency for a def based on it's opcode.
    769 unsigned TargetInstrInfo::defaultDefLatency(const MCSchedModel &SchedModel,
    770                                             const MachineInstr *DefMI) const {
    771   if (DefMI->isTransient())
    772     return 0;
    773   if (DefMI->mayLoad())
    774     return SchedModel.LoadLatency;
    775   if (isHighLatencyDef(DefMI->getOpcode()))
    776     return SchedModel.HighLatency;
    777   return 1;
    778 }
    779 
    780 unsigned TargetInstrInfo::getPredicationCost(const MachineInstr *) const {
    781   return 0;
    782 }
    783 
    784 unsigned TargetInstrInfo::
    785 getInstrLatency(const InstrItineraryData *ItinData,
    786                 const MachineInstr *MI,
    787                 unsigned *PredCost) const {
    788   // Default to one cycle for no itinerary. However, an "empty" itinerary may
    789   // still have a MinLatency property, which getStageLatency checks.
    790   if (!ItinData)
    791     return MI->mayLoad() ? 2 : 1;
    792 
    793   return ItinData->getStageLatency(MI->getDesc().getSchedClass());
    794 }
    795 
    796 bool TargetInstrInfo::hasLowDefLatency(const InstrItineraryData *ItinData,
    797                                        const MachineInstr *DefMI,
    798                                        unsigned DefIdx) const {
    799   if (!ItinData || ItinData->isEmpty())
    800     return false;
    801 
    802   unsigned DefClass = DefMI->getDesc().getSchedClass();
    803   int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx);
    804   return (DefCycle != -1 && DefCycle <= 1);
    805 }
    806 
    807 /// Both DefMI and UseMI must be valid.  By default, call directly to the
    808 /// itinerary. This may be overriden by the target.
    809 int TargetInstrInfo::
    810 getOperandLatency(const InstrItineraryData *ItinData,
    811                   const MachineInstr *DefMI, unsigned DefIdx,
    812                   const MachineInstr *UseMI, unsigned UseIdx) const {
    813   unsigned DefClass = DefMI->getDesc().getSchedClass();
    814   unsigned UseClass = UseMI->getDesc().getSchedClass();
    815   return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
    816 }
    817 
    818 /// If we can determine the operand latency from the def only, without itinerary
    819 /// lookup, do so. Otherwise return -1.
    820 int TargetInstrInfo::computeDefOperandLatency(
    821   const InstrItineraryData *ItinData,
    822   const MachineInstr *DefMI) const {
    823 
    824   // Let the target hook getInstrLatency handle missing itineraries.
    825   if (!ItinData)
    826     return getInstrLatency(ItinData, DefMI);
    827 
    828   if(ItinData->isEmpty())
    829     return defaultDefLatency(ItinData->SchedModel, DefMI);
    830 
    831   // ...operand lookup required
    832   return -1;
    833 }
    834 
    835 /// computeOperandLatency - Compute and return the latency of the given data
    836 /// dependent def and use when the operand indices are already known. UseMI may
    837 /// be NULL for an unknown use.
    838 ///
    839 /// FindMin may be set to get the minimum vs. expected latency. Minimum
    840 /// latency is used for scheduling groups, while expected latency is for
    841 /// instruction cost and critical path.
    842 ///
    843 /// Depending on the subtarget's itinerary properties, this may or may not need
    844 /// to call getOperandLatency(). For most subtargets, we don't need DefIdx or
    845 /// UseIdx to compute min latency.
    846 unsigned TargetInstrInfo::
    847 computeOperandLatency(const InstrItineraryData *ItinData,
    848                       const MachineInstr *DefMI, unsigned DefIdx,
    849                       const MachineInstr *UseMI, unsigned UseIdx) const {
    850 
    851   int DefLatency = computeDefOperandLatency(ItinData, DefMI);
    852   if (DefLatency >= 0)
    853     return DefLatency;
    854 
    855   assert(ItinData && !ItinData->isEmpty() && "computeDefOperandLatency fail");
    856 
    857   int OperLatency = 0;
    858   if (UseMI)
    859     OperLatency = getOperandLatency(ItinData, DefMI, DefIdx, UseMI, UseIdx);
    860   else {
    861     unsigned DefClass = DefMI->getDesc().getSchedClass();
    862     OperLatency = ItinData->getOperandCycle(DefClass, DefIdx);
    863   }
    864   if (OperLatency >= 0)
    865     return OperLatency;
    866 
    867   // No operand latency was found.
    868   unsigned InstrLatency = getInstrLatency(ItinData, DefMI);
    869 
    870   // Expected latency is the max of the stage latency and itinerary props.
    871   InstrLatency = std::max(InstrLatency,
    872                           defaultDefLatency(ItinData->SchedModel, DefMI));
    873   return InstrLatency;
    874 }
    875 
    876 bool TargetInstrInfo::getRegSequenceInputs(
    877     const MachineInstr &MI, unsigned DefIdx,
    878     SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const {
    879   assert((MI.isRegSequence() ||
    880           MI.isRegSequenceLike()) && "Instruction do not have the proper type");
    881 
    882   if (!MI.isRegSequence())
    883     return getRegSequenceLikeInputs(MI, DefIdx, InputRegs);
    884 
    885   // We are looking at:
    886   // Def = REG_SEQUENCE v0, sub0, v1, sub1, ...
    887   assert(DefIdx == 0 && "REG_SEQUENCE only has one def");
    888   for (unsigned OpIdx = 1, EndOpIdx = MI.getNumOperands(); OpIdx != EndOpIdx;
    889        OpIdx += 2) {
    890     const MachineOperand &MOReg = MI.getOperand(OpIdx);
    891     const MachineOperand &MOSubIdx = MI.getOperand(OpIdx + 1);
    892     assert(MOSubIdx.isImm() &&
    893            "One of the subindex of the reg_sequence is not an immediate");
    894     // Record Reg:SubReg, SubIdx.
    895     InputRegs.push_back(RegSubRegPairAndIdx(MOReg.getReg(), MOReg.getSubReg(),
    896                                             (unsigned)MOSubIdx.getImm()));
    897   }
    898   return true;
    899 }
    900 
    901 bool TargetInstrInfo::getExtractSubregInputs(
    902     const MachineInstr &MI, unsigned DefIdx,
    903     RegSubRegPairAndIdx &InputReg) const {
    904   assert((MI.isExtractSubreg() ||
    905       MI.isExtractSubregLike()) && "Instruction do not have the proper type");
    906 
    907   if (!MI.isExtractSubreg())
    908     return getExtractSubregLikeInputs(MI, DefIdx, InputReg);
    909 
    910   // We are looking at:
    911   // Def = EXTRACT_SUBREG v0.sub1, sub0.
    912   assert(DefIdx == 0 && "EXTRACT_SUBREG only has one def");
    913   const MachineOperand &MOReg = MI.getOperand(1);
    914   const MachineOperand &MOSubIdx = MI.getOperand(2);
    915   assert(MOSubIdx.isImm() &&
    916          "The subindex of the extract_subreg is not an immediate");
    917 
    918   InputReg.Reg = MOReg.getReg();
    919   InputReg.SubReg = MOReg.getSubReg();
    920   InputReg.SubIdx = (unsigned)MOSubIdx.getImm();
    921   return true;
    922 }
    923 
    924 bool TargetInstrInfo::getInsertSubregInputs(
    925     const MachineInstr &MI, unsigned DefIdx,
    926     RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const {
    927   assert((MI.isInsertSubreg() ||
    928       MI.isInsertSubregLike()) && "Instruction do not have the proper type");
    929 
    930   if (!MI.isInsertSubreg())
    931     return getInsertSubregLikeInputs(MI, DefIdx, BaseReg, InsertedReg);
    932 
    933   // We are looking at:
    934   // Def = INSERT_SEQUENCE v0, v1, sub0.
    935   assert(DefIdx == 0 && "INSERT_SUBREG only has one def");
    936   const MachineOperand &MOBaseReg = MI.getOperand(1);
    937   const MachineOperand &MOInsertedReg = MI.getOperand(2);
    938   const MachineOperand &MOSubIdx = MI.getOperand(3);
    939   assert(MOSubIdx.isImm() &&
    940          "One of the subindex of the reg_sequence is not an immediate");
    941   BaseReg.Reg = MOBaseReg.getReg();
    942   BaseReg.SubReg = MOBaseReg.getSubReg();
    943 
    944   InsertedReg.Reg = MOInsertedReg.getReg();
    945   InsertedReg.SubReg = MOInsertedReg.getSubReg();
    946   InsertedReg.SubIdx = (unsigned)MOSubIdx.getImm();
    947   return true;
    948 }
    949