Home | History | Annotate | Download | only in X86
      1 //===-- X86InstrInfo.h - X86 Instruction Information ------------*- C++ -*-===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file contains the X86 implementation of the TargetInstrInfo class.
     11 //
     12 //===----------------------------------------------------------------------===//
     13 
     14 #ifndef LLVM_LIB_TARGET_X86_X86INSTRINFO_H
     15 #define LLVM_LIB_TARGET_X86_X86INSTRINFO_H
     16 
     17 #include "MCTargetDesc/X86BaseInfo.h"
     18 #include "X86RegisterInfo.h"
     19 #include "llvm/ADT/DenseMap.h"
     20 #include "llvm/Target/TargetInstrInfo.h"
     21 
     22 #define GET_INSTRINFO_HEADER
     23 #include "X86GenInstrInfo.inc"
     24 
     25 namespace llvm {
     26   class MachineInstrBuilder;
     27   class X86RegisterInfo;
     28   class X86Subtarget;
     29 
     30 namespace X86 {
     31   // X86 specific condition code. These correspond to X86_*_COND in
     32   // X86InstrInfo.td. They must be kept in synch.
     33   enum CondCode {
     34     COND_A  = 0,
     35     COND_AE = 1,
     36     COND_B  = 2,
     37     COND_BE = 3,
     38     COND_E  = 4,
     39     COND_G  = 5,
     40     COND_GE = 6,
     41     COND_L  = 7,
     42     COND_LE = 8,
     43     COND_NE = 9,
     44     COND_NO = 10,
     45     COND_NP = 11,
     46     COND_NS = 12,
     47     COND_O  = 13,
     48     COND_P  = 14,
     49     COND_S  = 15,
     50     LAST_VALID_COND = COND_S,
     51 
     52     // Artificial condition codes. These are used by AnalyzeBranch
     53     // to indicate a block terminated with two conditional branches to
     54     // the same location. This occurs in code using FCMP_OEQ or FCMP_UNE,
     55     // which can't be represented on x86 with a single condition. These
     56     // are never used in MachineInstrs.
     57     COND_NE_OR_P,
     58     COND_NP_OR_E,
     59 
     60     COND_INVALID
     61   };
     62 
     63   // Turn condition code into conditional branch opcode.
     64   unsigned GetCondBranchFromCond(CondCode CC);
     65 
     66   /// \brief Return a set opcode for the given condition and whether it has
     67   /// a memory operand.
     68   unsigned getSETFromCond(CondCode CC, bool HasMemoryOperand = false);
     69 
     70   /// \brief Return a cmov opcode for the given condition, register size in
     71   /// bytes, and operand type.
     72   unsigned getCMovFromCond(CondCode CC, unsigned RegBytes,
     73                            bool HasMemoryOperand = false);
     74 
     75   // Turn CMov opcode into condition code.
     76   CondCode getCondFromCMovOpc(unsigned Opc);
     77 
     78   /// GetOppositeBranchCondition - Return the inverse of the specified cond,
     79   /// e.g. turning COND_E to COND_NE.
     80   CondCode GetOppositeBranchCondition(CondCode CC);
     81 }  // end namespace X86;
     82 
     83 
     84 /// isGlobalStubReference - Return true if the specified TargetFlag operand is
     85 /// a reference to a stub for a global, not the global itself.
     86 inline static bool isGlobalStubReference(unsigned char TargetFlag) {
     87   switch (TargetFlag) {
     88   case X86II::MO_DLLIMPORT: // dllimport stub.
     89   case X86II::MO_GOTPCREL:  // rip-relative GOT reference.
     90   case X86II::MO_GOT:       // normal GOT reference.
     91   case X86II::MO_DARWIN_NONLAZY_PIC_BASE:        // Normal $non_lazy_ptr ref.
     92   case X86II::MO_DARWIN_NONLAZY:                 // Normal $non_lazy_ptr ref.
     93   case X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE: // Hidden $non_lazy_ptr ref.
     94     return true;
     95   default:
     96     return false;
     97   }
     98 }
     99 
    100 /// isGlobalRelativeToPICBase - Return true if the specified global value
    101 /// reference is relative to a 32-bit PIC base (X86ISD::GlobalBaseReg).  If this
    102 /// is true, the addressing mode has the PIC base register added in (e.g. EBX).
    103 inline static bool isGlobalRelativeToPICBase(unsigned char TargetFlag) {
    104   switch (TargetFlag) {
    105   case X86II::MO_GOTOFF:                         // isPICStyleGOT: local global.
    106   case X86II::MO_GOT:                            // isPICStyleGOT: other global.
    107   case X86II::MO_PIC_BASE_OFFSET:                // Darwin local global.
    108   case X86II::MO_DARWIN_NONLAZY_PIC_BASE:        // Darwin/32 external global.
    109   case X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE: // Darwin/32 hidden global.
    110   case X86II::MO_TLVP:                           // ??? Pretty sure..
    111     return true;
    112   default:
    113     return false;
    114   }
    115 }
    116 
    117 inline static bool isScale(const MachineOperand &MO) {
    118   return MO.isImm() &&
    119     (MO.getImm() == 1 || MO.getImm() == 2 ||
    120      MO.getImm() == 4 || MO.getImm() == 8);
    121 }
    122 
    123 inline static bool isLeaMem(const MachineInstr *MI, unsigned Op) {
    124   if (MI->getOperand(Op).isFI()) return true;
    125   return Op+X86::AddrSegmentReg <= MI->getNumOperands() &&
    126     MI->getOperand(Op+X86::AddrBaseReg).isReg() &&
    127     isScale(MI->getOperand(Op+X86::AddrScaleAmt)) &&
    128     MI->getOperand(Op+X86::AddrIndexReg).isReg() &&
    129     (MI->getOperand(Op+X86::AddrDisp).isImm() ||
    130      MI->getOperand(Op+X86::AddrDisp).isGlobal() ||
    131      MI->getOperand(Op+X86::AddrDisp).isCPI() ||
    132      MI->getOperand(Op+X86::AddrDisp).isJTI());
    133 }
    134 
    135 inline static bool isMem(const MachineInstr *MI, unsigned Op) {
    136   if (MI->getOperand(Op).isFI()) return true;
    137   return Op+X86::AddrNumOperands <= MI->getNumOperands() &&
    138     MI->getOperand(Op+X86::AddrSegmentReg).isReg() &&
    139     isLeaMem(MI, Op);
    140 }
    141 
    142 class X86InstrInfo final : public X86GenInstrInfo {
    143   X86Subtarget &Subtarget;
    144   const X86RegisterInfo RI;
    145 
    146   /// RegOp2MemOpTable3Addr, RegOp2MemOpTable0, RegOp2MemOpTable1,
    147   /// RegOp2MemOpTable2, RegOp2MemOpTable3 - Load / store folding opcode maps.
    148   ///
    149   typedef DenseMap<unsigned,
    150                    std::pair<unsigned, unsigned> > RegOp2MemOpTableType;
    151   RegOp2MemOpTableType RegOp2MemOpTable2Addr;
    152   RegOp2MemOpTableType RegOp2MemOpTable0;
    153   RegOp2MemOpTableType RegOp2MemOpTable1;
    154   RegOp2MemOpTableType RegOp2MemOpTable2;
    155   RegOp2MemOpTableType RegOp2MemOpTable3;
    156   RegOp2MemOpTableType RegOp2MemOpTable4;
    157 
    158   /// MemOp2RegOpTable - Load / store unfolding opcode map.
    159   ///
    160   typedef DenseMap<unsigned,
    161                    std::pair<unsigned, unsigned> > MemOp2RegOpTableType;
    162   MemOp2RegOpTableType MemOp2RegOpTable;
    163 
    164   static void AddTableEntry(RegOp2MemOpTableType &R2MTable,
    165                             MemOp2RegOpTableType &M2RTable,
    166                             unsigned RegOp, unsigned MemOp, unsigned Flags);
    167 
    168   virtual void anchor();
    169 
    170   bool AnalyzeBranchImpl(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
    171                          MachineBasicBlock *&FBB,
    172                          SmallVectorImpl<MachineOperand> &Cond,
    173                          SmallVectorImpl<MachineInstr *> &CondBranches,
    174                          bool AllowModify) const;
    175 
    176 public:
    177   explicit X86InstrInfo(X86Subtarget &STI);
    178 
    179   /// getRegisterInfo - TargetInstrInfo is a superset of MRegister info.  As
    180   /// such, whenever a client has an instance of instruction info, it should
    181   /// always be able to get register info as well (through this method).
    182   ///
    183   const X86RegisterInfo &getRegisterInfo() const { return RI; }
    184 
    185   /// getSPAdjust - This returns the stack pointer adjustment made by
    186   /// this instruction. For x86, we need to handle more complex call
    187   /// sequences involving PUSHes.
    188   int getSPAdjust(const MachineInstr *MI) const override;
    189 
    190   /// isCoalescableExtInstr - Return true if the instruction is a "coalescable"
    191   /// extension instruction. That is, it's like a copy where it's legal for the
    192   /// source to overlap the destination. e.g. X86::MOVSX64rr32. If this returns
    193   /// true, then it's expected the pre-extension value is available as a subreg
    194   /// of the result register. This also returns the sub-register index in
    195   /// SubIdx.
    196   bool isCoalescableExtInstr(const MachineInstr &MI,
    197                              unsigned &SrcReg, unsigned &DstReg,
    198                              unsigned &SubIdx) const override;
    199 
    200   unsigned isLoadFromStackSlot(const MachineInstr *MI,
    201                                int &FrameIndex) const override;
    202   /// isLoadFromStackSlotPostFE - Check for post-frame ptr elimination
    203   /// stack locations as well.  This uses a heuristic so it isn't
    204   /// reliable for correctness.
    205   unsigned isLoadFromStackSlotPostFE(const MachineInstr *MI,
    206                                      int &FrameIndex) const override;
    207 
    208   unsigned isStoreToStackSlot(const MachineInstr *MI,
    209                               int &FrameIndex) const override;
    210   /// isStoreToStackSlotPostFE - Check for post-frame ptr elimination
    211   /// stack locations as well.  This uses a heuristic so it isn't
    212   /// reliable for correctness.
    213   unsigned isStoreToStackSlotPostFE(const MachineInstr *MI,
    214                                     int &FrameIndex) const override;
    215 
    216   bool isReallyTriviallyReMaterializable(const MachineInstr *MI,
    217                                          AliasAnalysis *AA) const override;
    218   void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
    219                      unsigned DestReg, unsigned SubIdx,
    220                      const MachineInstr *Orig,
    221                      const TargetRegisterInfo &TRI) const override;
    222 
    223   /// Given an operand within a MachineInstr, insert preceding code to put it
    224   /// into the right format for a particular kind of LEA instruction. This may
    225   /// involve using an appropriate super-register instead (with an implicit use
    226   /// of the original) or creating a new virtual register and inserting COPY
    227   /// instructions to get the data into the right class.
    228   ///
    229   /// Reference parameters are set to indicate how caller should add this
    230   /// operand to the LEA instruction.
    231   bool classifyLEAReg(MachineInstr *MI, const MachineOperand &Src,
    232                       unsigned LEAOpcode, bool AllowSP,
    233                       unsigned &NewSrc, bool &isKill,
    234                       bool &isUndef, MachineOperand &ImplicitOp) const;
    235 
    236   /// convertToThreeAddress - This method must be implemented by targets that
    237   /// set the M_CONVERTIBLE_TO_3_ADDR flag.  When this flag is set, the target
    238   /// may be able to convert a two-address instruction into a true
    239   /// three-address instruction on demand.  This allows the X86 target (for
    240   /// example) to convert ADD and SHL instructions into LEA instructions if they
    241   /// would require register copies due to two-addressness.
    242   ///
    243   /// This method returns a null pointer if the transformation cannot be
    244   /// performed, otherwise it returns the new instruction.
    245   ///
    246   MachineInstr *convertToThreeAddress(MachineFunction::iterator &MFI,
    247                                       MachineBasicBlock::iterator &MBBI,
    248                                       LiveVariables *LV) const override;
    249 
    250   /// Returns true iff the routine could find two commutable operands in the
    251   /// given machine instruction.
    252   /// The 'SrcOpIdx1' and 'SrcOpIdx2' are INPUT and OUTPUT arguments. Their
    253   /// input values can be re-defined in this method only if the input values
    254   /// are not pre-defined, which is designated by the special value
    255   /// 'CommuteAnyOperandIndex' assigned to it.
    256   /// If both of indices are pre-defined and refer to some operands, then the
    257   /// method simply returns true if the corresponding operands are commutable
    258   /// and returns false otherwise.
    259   ///
    260   /// For example, calling this method this way:
    261   ///     unsigned Op1 = 1, Op2 = CommuteAnyOperandIndex;
    262   ///     findCommutedOpIndices(MI, Op1, Op2);
    263   /// can be interpreted as a query asking to find an operand that would be
    264   /// commutable with the operand#1.
    265   bool findCommutedOpIndices(MachineInstr *MI, unsigned &SrcOpIdx1,
    266                              unsigned &SrcOpIdx2) const override;
    267 
    268   /// Returns true if the routine could find two commutable operands
    269   /// in the given FMA instruction. Otherwise, returns false.
    270   ///
    271   /// \p SrcOpIdx1 and \p SrcOpIdx2 are INPUT and OUTPUT arguments.
    272   /// The output indices of the commuted operands are returned in these
    273   /// arguments. Also, the input values of these arguments may be preset either
    274   /// to indices of operands that must be commuted or be equal to a special
    275   /// value 'CommuteAnyOperandIndex' which means that the corresponding
    276   /// operand index is not set and this method is free to pick any of
    277   /// available commutable operands.
    278   ///
    279   /// For example, calling this method this way:
    280   ///     unsigned Idx1 = 1, Idx2 = CommuteAnyOperandIndex;
    281   ///     findFMA3CommutedOpIndices(MI, Idx1, Idx2);
    282   /// can be interpreted as a query asking if the operand #1 can be swapped
    283   /// with any other available operand (e.g. operand #2, operand #3, etc.).
    284   ///
    285   /// The returned FMA opcode may differ from the opcode in the given MI.
    286   /// For example, commuting the operands #1 and #3 in the following FMA
    287   ///     FMA213 #1, #2, #3
    288   /// results into instruction with adjusted opcode:
    289   ///     FMA231 #3, #2, #1
    290   bool findFMA3CommutedOpIndices(MachineInstr *MI,
    291                                  unsigned &SrcOpIdx1,
    292                                  unsigned &SrcOpIdx2) const;
    293 
    294   /// Returns an adjusted FMA opcode that must be used in FMA instruction that
    295   /// performs the same computations as the given MI but which has the operands
    296   /// \p SrcOpIdx1 and \p SrcOpIdx2 commuted.
    297   /// It may return 0 if it is unsafe to commute the operands.
    298   ///
    299   /// The returned FMA opcode may differ from the opcode in the given \p MI.
    300   /// For example, commuting the operands #1 and #3 in the following FMA
    301   ///     FMA213 #1, #2, #3
    302   /// results into instruction with adjusted opcode:
    303   ///     FMA231 #3, #2, #1
    304   unsigned getFMA3OpcodeToCommuteOperands(MachineInstr *MI,
    305                                           unsigned SrcOpIdx1,
    306                                           unsigned SrcOpIdx2) const;
    307 
    308   // Branch analysis.
    309   bool isUnpredicatedTerminator(const MachineInstr* MI) const override;
    310   bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
    311                      MachineBasicBlock *&FBB,
    312                      SmallVectorImpl<MachineOperand> &Cond,
    313                      bool AllowModify) const override;
    314 
    315   bool getMemOpBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
    316                              unsigned &Offset,
    317                              const TargetRegisterInfo *TRI) const override;
    318   bool AnalyzeBranchPredicate(MachineBasicBlock &MBB,
    319                               TargetInstrInfo::MachineBranchPredicate &MBP,
    320                               bool AllowModify = false) const override;
    321 
    322   unsigned RemoveBranch(MachineBasicBlock &MBB) const override;
    323   unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
    324                         MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
    325                         DebugLoc DL) const override;
    326   bool canInsertSelect(const MachineBasicBlock&, ArrayRef<MachineOperand> Cond,
    327                        unsigned, unsigned, int&, int&, int&) const override;
    328   void insertSelect(MachineBasicBlock &MBB,
    329                     MachineBasicBlock::iterator MI, DebugLoc DL,
    330                     unsigned DstReg, ArrayRef<MachineOperand> Cond,
    331                     unsigned TrueReg, unsigned FalseReg) const override;
    332   void copyPhysReg(MachineBasicBlock &MBB,
    333                    MachineBasicBlock::iterator MI, DebugLoc DL,
    334                    unsigned DestReg, unsigned SrcReg,
    335                    bool KillSrc) const override;
    336   void storeRegToStackSlot(MachineBasicBlock &MBB,
    337                            MachineBasicBlock::iterator MI,
    338                            unsigned SrcReg, bool isKill, int FrameIndex,
    339                            const TargetRegisterClass *RC,
    340                            const TargetRegisterInfo *TRI) const override;
    341 
    342   void storeRegToAddr(MachineFunction &MF, unsigned SrcReg, bool isKill,
    343                       SmallVectorImpl<MachineOperand> &Addr,
    344                       const TargetRegisterClass *RC,
    345                       MachineInstr::mmo_iterator MMOBegin,
    346                       MachineInstr::mmo_iterator MMOEnd,
    347                       SmallVectorImpl<MachineInstr*> &NewMIs) const;
    348 
    349   void loadRegFromStackSlot(MachineBasicBlock &MBB,
    350                             MachineBasicBlock::iterator MI,
    351                             unsigned DestReg, int FrameIndex,
    352                             const TargetRegisterClass *RC,
    353                             const TargetRegisterInfo *TRI) const override;
    354 
    355   void loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
    356                        SmallVectorImpl<MachineOperand> &Addr,
    357                        const TargetRegisterClass *RC,
    358                        MachineInstr::mmo_iterator MMOBegin,
    359                        MachineInstr::mmo_iterator MMOEnd,
    360                        SmallVectorImpl<MachineInstr*> &NewMIs) const;
    361 
    362   bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const override;
    363 
    364   /// foldMemoryOperand - If this target supports it, fold a load or store of
    365   /// the specified stack slot into the specified machine instruction for the
    366   /// specified operand(s).  If this is possible, the target should perform the
    367   /// folding and return true, otherwise it should return false.  If it folds
    368   /// the instruction, it is likely that the MachineInstruction the iterator
    369   /// references has been changed.
    370   MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
    371                                       ArrayRef<unsigned> Ops,
    372                                       MachineBasicBlock::iterator InsertPt,
    373                                       int FrameIndex) const override;
    374 
    375   /// foldMemoryOperand - Same as the previous version except it allows folding
    376   /// of any load and store from / to any address, not just from a specific
    377   /// stack slot.
    378   MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
    379                                       ArrayRef<unsigned> Ops,
    380                                       MachineBasicBlock::iterator InsertPt,
    381                                       MachineInstr *LoadMI) const override;
    382 
    383   /// unfoldMemoryOperand - Separate a single instruction which folded a load or
    384   /// a store or a load and a store into two or more instruction. If this is
    385   /// possible, returns true as well as the new instructions by reference.
    386   bool unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
    387                          unsigned Reg, bool UnfoldLoad, bool UnfoldStore,
    388                          SmallVectorImpl<MachineInstr*> &NewMIs) const override;
    389 
    390   bool unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
    391                            SmallVectorImpl<SDNode*> &NewNodes) const override;
    392 
    393   /// getOpcodeAfterMemoryUnfold - Returns the opcode of the would be new
    394   /// instruction after load / store are unfolded from an instruction of the
    395   /// specified opcode. It returns zero if the specified unfolding is not
    396   /// possible. If LoadRegIndex is non-null, it is filled in with the operand
    397   /// index of the operand which will hold the register holding the loaded
    398   /// value.
    399   unsigned getOpcodeAfterMemoryUnfold(unsigned Opc,
    400                               bool UnfoldLoad, bool UnfoldStore,
    401                               unsigned *LoadRegIndex = nullptr) const override;
    402 
    403   /// areLoadsFromSameBasePtr - This is used by the pre-regalloc scheduler
    404   /// to determine if two loads are loading from the same base address. It
    405   /// should only return true if the base pointers are the same and the
    406   /// only differences between the two addresses are the offset. It also returns
    407   /// the offsets by reference.
    408   bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, int64_t &Offset1,
    409                                int64_t &Offset2) const override;
    410 
    411   /// shouldScheduleLoadsNear - This is a used by the pre-regalloc scheduler to
    412   /// determine (in conjunction with areLoadsFromSameBasePtr) if two loads should
    413   /// be scheduled togther. On some targets if two loads are loading from
    414   /// addresses in the same cache line, it's better if they are scheduled
    415   /// together. This function takes two integers that represent the load offsets
    416   /// from the common base address. It returns true if it decides it's desirable
    417   /// to schedule the two loads together. "NumLoads" is the number of loads that
    418   /// have already been scheduled after Load1.
    419   bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
    420                                int64_t Offset1, int64_t Offset2,
    421                                unsigned NumLoads) const override;
    422 
    423   bool shouldScheduleAdjacent(MachineInstr* First,
    424                               MachineInstr *Second) const override;
    425 
    426   void getNoopForMachoTarget(MCInst &NopInst) const override;
    427 
    428   bool
    429   ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override;
    430 
    431   /// isSafeToMoveRegClassDefs - Return true if it's safe to move a machine
    432   /// instruction that defines the specified register class.
    433   bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const override;
    434 
    435   /// isSafeToClobberEFLAGS - Return true if it's safe insert an instruction tha
    436   /// would clobber the EFLAGS condition register. Note the result may be
    437   /// conservative. If it cannot definitely determine the safety after visiting
    438   /// a few instructions in each direction it assumes it's not safe.
    439   bool isSafeToClobberEFLAGS(MachineBasicBlock &MBB,
    440                              MachineBasicBlock::iterator I) const;
    441 
    442   /// True if MI has a condition code def, e.g. EFLAGS, that is
    443   /// not marked dead.
    444   bool hasLiveCondCodeDef(MachineInstr *MI) const;
    445 
    446   /// getGlobalBaseReg - Return a virtual register initialized with the
    447   /// the global base register value. Output instructions required to
    448   /// initialize the register in the function entry block, if necessary.
    449   ///
    450   unsigned getGlobalBaseReg(MachineFunction *MF) const;
    451 
    452   std::pair<uint16_t, uint16_t>
    453   getExecutionDomain(const MachineInstr *MI) const override;
    454 
    455   void setExecutionDomain(MachineInstr *MI, unsigned Domain) const override;
    456 
    457   unsigned
    458     getPartialRegUpdateClearance(const MachineInstr *MI, unsigned OpNum,
    459                                  const TargetRegisterInfo *TRI) const override;
    460   unsigned getUndefRegClearance(const MachineInstr *MI, unsigned &OpNum,
    461                                 const TargetRegisterInfo *TRI) const override;
    462   void breakPartialRegDependency(MachineBasicBlock::iterator MI, unsigned OpNum,
    463                                  const TargetRegisterInfo *TRI) const override;
    464 
    465   MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
    466                                       unsigned OpNum,
    467                                       ArrayRef<MachineOperand> MOs,
    468                                       MachineBasicBlock::iterator InsertPt,
    469                                       unsigned Size, unsigned Alignment,
    470                                       bool AllowCommute) const;
    471 
    472   void
    473   getUnconditionalBranch(MCInst &Branch,
    474                          const MCSymbolRefExpr *BranchTarget) const override;
    475 
    476   void getTrap(MCInst &MI) const override;
    477 
    478   unsigned getJumpInstrTableEntryBound() const override;
    479 
    480   bool isHighLatencyDef(int opc) const override;
    481 
    482   bool hasHighOperandLatency(const TargetSchedModel &SchedModel,
    483                              const MachineRegisterInfo *MRI,
    484                              const MachineInstr *DefMI, unsigned DefIdx,
    485                              const MachineInstr *UseMI,
    486                              unsigned UseIdx) const override;
    487 
    488   bool useMachineCombiner() const override {
    489     return true;
    490   }
    491 
    492   bool isAssociativeAndCommutative(const MachineInstr &Inst) const override;
    493 
    494   bool hasReassociableOperands(const MachineInstr &Inst,
    495                                const MachineBasicBlock *MBB) const override;
    496 
    497   void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2,
    498                              MachineInstr &NewMI1,
    499                              MachineInstr &NewMI2) const override;
    500 
    501   /// analyzeCompare - For a comparison instruction, return the source registers
    502   /// in SrcReg and SrcReg2 if having two register operands, and the value it
    503   /// compares against in CmpValue. Return true if the comparison instruction
    504   /// can be analyzed.
    505   bool analyzeCompare(const MachineInstr *MI, unsigned &SrcReg,
    506                       unsigned &SrcReg2, int &CmpMask,
    507                       int &CmpValue) const override;
    508 
    509   /// optimizeCompareInstr - Check if there exists an earlier instruction that
    510   /// operates on the same source operands and sets flags in the same way as
    511   /// Compare; remove Compare if possible.
    512   bool optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg,
    513                             unsigned SrcReg2, int CmpMask, int CmpValue,
    514                             const MachineRegisterInfo *MRI) const override;
    515 
    516   /// optimizeLoadInstr - Try to remove the load by folding it to a register
    517   /// operand at the use. We fold the load instructions if and only if the
    518   /// def and use are in the same BB. We only look at one load and see
    519   /// whether it can be folded into MI. FoldAsLoadDefReg is the virtual register
    520   /// defined by the load we are trying to fold. DefMI returns the machine
    521   /// instruction that defines FoldAsLoadDefReg, and the function returns
    522   /// the machine instruction generated due to folding.
    523   MachineInstr* optimizeLoadInstr(MachineInstr *MI,
    524                                   const MachineRegisterInfo *MRI,
    525                                   unsigned &FoldAsLoadDefReg,
    526                                   MachineInstr *&DefMI) const override;
    527 
    528   std::pair<unsigned, unsigned>
    529   decomposeMachineOperandsTargetFlags(unsigned TF) const override;
    530 
    531   ArrayRef<std::pair<unsigned, const char *>>
    532   getSerializableDirectMachineOperandTargetFlags() const override;
    533 
    534 protected:
    535   /// Commutes the operands in the given instruction by changing the operands
    536   /// order and/or changing the instruction's opcode and/or the immediate value
    537   /// operand.
    538   ///
    539   /// The arguments 'CommuteOpIdx1' and 'CommuteOpIdx2' specify the operands
    540   /// to be commuted.
    541   ///
    542   /// Do not call this method for a non-commutable instruction or
    543   /// non-commutable operands.
    544   /// Even though the instruction is commutable, the method may still
    545   /// fail to commute the operands, null pointer is returned in such cases.
    546   MachineInstr *commuteInstructionImpl(MachineInstr *MI, bool NewMI,
    547                                        unsigned CommuteOpIdx1,
    548                                        unsigned CommuteOpIdx2) const override;
    549 
    550 private:
    551   MachineInstr * convertToThreeAddressWithLEA(unsigned MIOpc,
    552                                               MachineFunction::iterator &MFI,
    553                                               MachineBasicBlock::iterator &MBBI,
    554                                               LiveVariables *LV) const;
    555 
    556   /// Handles memory folding for special case instructions, for instance those
    557   /// requiring custom manipulation of the address.
    558   MachineInstr *foldMemoryOperandCustom(MachineFunction &MF, MachineInstr *MI,
    559                                         unsigned OpNum,
    560                                         ArrayRef<MachineOperand> MOs,
    561                                         MachineBasicBlock::iterator InsertPt,
    562                                         unsigned Size, unsigned Align) const;
    563 
    564   /// isFrameOperand - Return true and the FrameIndex if the specified
    565   /// operand and follow operands form a reference to the stack frame.
    566   bool isFrameOperand(const MachineInstr *MI, unsigned int Op,
    567                       int &FrameIndex) const;
    568 
    569   /// Expand the MOVImmSExti8 pseudo-instructions.
    570   bool ExpandMOVImmSExti8(MachineInstrBuilder &MIB) const;
    571 };
    572 
    573 } // End llvm namespace
    574 
    575 #endif
    576