Home | History | Annotate | Download | only in X86
      1 //===-- X86InstrInfo.h - X86 Instruction Information ------------*- C++ -*-===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file contains the X86 implementation of the TargetInstrInfo class.
     11 //
     12 //===----------------------------------------------------------------------===//
     13 
     14 #ifndef LLVM_LIB_TARGET_X86_X86INSTRINFO_H
     15 #define LLVM_LIB_TARGET_X86_X86INSTRINFO_H
     16 
     17 #include "MCTargetDesc/X86BaseInfo.h"
     18 #include "X86RegisterInfo.h"
     19 #include "llvm/ADT/DenseMap.h"
     20 #include "llvm/Target/TargetInstrInfo.h"
     21 
     22 #define GET_INSTRINFO_HEADER
     23 #include "X86GenInstrInfo.inc"
     24 
     25 namespace llvm {
     26   class MachineInstrBuilder;
     27   class X86RegisterInfo;
     28   class X86Subtarget;
     29 
     30 namespace X86 {
     31   // X86 specific condition code. These correspond to X86_*_COND in
     32   // X86InstrInfo.td. They must be kept in synch.
     33 enum CondCode {
     34   COND_A = 0,
     35   COND_AE = 1,
     36   COND_B = 2,
     37   COND_BE = 3,
     38   COND_E = 4,
     39   COND_G = 5,
     40   COND_GE = 6,
     41   COND_L = 7,
     42   COND_LE = 8,
     43   COND_NE = 9,
     44   COND_NO = 10,
     45   COND_NP = 11,
     46   COND_NS = 12,
     47   COND_O = 13,
     48   COND_P = 14,
     49   COND_S = 15,
     50   LAST_VALID_COND = COND_S,
     51 
     52   // Artificial condition codes. These are used by AnalyzeBranch
     53   // to indicate a block terminated with two conditional branches that together
     54   // form a compound condition. They occur in code using FCMP_OEQ or FCMP_UNE,
     55   // which can't be represented on x86 with a single condition. These
     56   // are never used in MachineInstrs and are inverses of one another.
     57   COND_NE_OR_P,
     58   COND_E_AND_NP,
     59 
     60   COND_INVALID
     61 };
     62 
     63 // Turn condition code into conditional branch opcode.
     64 unsigned GetCondBranchFromCond(CondCode CC);
     65 
     66 /// \brief Return a set opcode for the given condition and whether it has
     67 /// a memory operand.
     68 unsigned getSETFromCond(CondCode CC, bool HasMemoryOperand = false);
     69 
     70 /// \brief Return a cmov opcode for the given condition, register size in
     71 /// bytes, and operand type.
     72 unsigned getCMovFromCond(CondCode CC, unsigned RegBytes,
     73                          bool HasMemoryOperand = false);
     74 
     75 // Turn CMov opcode into condition code.
     76 CondCode getCondFromCMovOpc(unsigned Opc);
     77 
     78 /// GetOppositeBranchCondition - Return the inverse of the specified cond,
     79 /// e.g. turning COND_E to COND_NE.
     80 CondCode GetOppositeBranchCondition(CondCode CC);
     81 }  // end namespace X86;
     82 
     83 
     84 /// isGlobalStubReference - Return true if the specified TargetFlag operand is
     85 /// a reference to a stub for a global, not the global itself.
     86 inline static bool isGlobalStubReference(unsigned char TargetFlag) {
     87   switch (TargetFlag) {
     88   case X86II::MO_DLLIMPORT: // dllimport stub.
     89   case X86II::MO_GOTPCREL:  // rip-relative GOT reference.
     90   case X86II::MO_GOT:       // normal GOT reference.
     91   case X86II::MO_DARWIN_NONLAZY_PIC_BASE:        // Normal $non_lazy_ptr ref.
     92   case X86II::MO_DARWIN_NONLAZY:                 // Normal $non_lazy_ptr ref.
     93     return true;
     94   default:
     95     return false;
     96   }
     97 }
     98 
     99 /// isGlobalRelativeToPICBase - Return true if the specified global value
    100 /// reference is relative to a 32-bit PIC base (X86ISD::GlobalBaseReg).  If this
    101 /// is true, the addressing mode has the PIC base register added in (e.g. EBX).
    102 inline static bool isGlobalRelativeToPICBase(unsigned char TargetFlag) {
    103   switch (TargetFlag) {
    104   case X86II::MO_GOTOFF:                         // isPICStyleGOT: local global.
    105   case X86II::MO_GOT:                            // isPICStyleGOT: other global.
    106   case X86II::MO_PIC_BASE_OFFSET:                // Darwin local global.
    107   case X86II::MO_DARWIN_NONLAZY_PIC_BASE:        // Darwin/32 external global.
    108   case X86II::MO_TLVP:                           // ??? Pretty sure..
    109     return true;
    110   default:
    111     return false;
    112   }
    113 }
    114 
    115 inline static bool isScale(const MachineOperand &MO) {
    116   return MO.isImm() &&
    117     (MO.getImm() == 1 || MO.getImm() == 2 ||
    118      MO.getImm() == 4 || MO.getImm() == 8);
    119 }
    120 
    121 inline static bool isLeaMem(const MachineInstr &MI, unsigned Op) {
    122   if (MI.getOperand(Op).isFI())
    123     return true;
    124   return Op + X86::AddrSegmentReg <= MI.getNumOperands() &&
    125          MI.getOperand(Op + X86::AddrBaseReg).isReg() &&
    126          isScale(MI.getOperand(Op + X86::AddrScaleAmt)) &&
    127          MI.getOperand(Op + X86::AddrIndexReg).isReg() &&
    128          (MI.getOperand(Op + X86::AddrDisp).isImm() ||
    129           MI.getOperand(Op + X86::AddrDisp).isGlobal() ||
    130           MI.getOperand(Op + X86::AddrDisp).isCPI() ||
    131           MI.getOperand(Op + X86::AddrDisp).isJTI());
    132 }
    133 
    134 inline static bool isMem(const MachineInstr &MI, unsigned Op) {
    135   if (MI.getOperand(Op).isFI())
    136     return true;
    137   return Op + X86::AddrNumOperands <= MI.getNumOperands() &&
    138          MI.getOperand(Op + X86::AddrSegmentReg).isReg() && isLeaMem(MI, Op);
    139 }
    140 
    141 class X86InstrInfo final : public X86GenInstrInfo {
    142   X86Subtarget &Subtarget;
    143   const X86RegisterInfo RI;
    144 
    145   /// RegOp2MemOpTable3Addr, RegOp2MemOpTable0, RegOp2MemOpTable1,
    146   /// RegOp2MemOpTable2, RegOp2MemOpTable3 - Load / store folding opcode maps.
    147   ///
    148   typedef DenseMap<unsigned,
    149                    std::pair<uint16_t, uint16_t> > RegOp2MemOpTableType;
    150   RegOp2MemOpTableType RegOp2MemOpTable2Addr;
    151   RegOp2MemOpTableType RegOp2MemOpTable0;
    152   RegOp2MemOpTableType RegOp2MemOpTable1;
    153   RegOp2MemOpTableType RegOp2MemOpTable2;
    154   RegOp2MemOpTableType RegOp2MemOpTable3;
    155   RegOp2MemOpTableType RegOp2MemOpTable4;
    156 
    157   /// MemOp2RegOpTable - Load / store unfolding opcode map.
    158   ///
    159   typedef DenseMap<unsigned,
    160                    std::pair<uint16_t, uint16_t> > MemOp2RegOpTableType;
    161   MemOp2RegOpTableType MemOp2RegOpTable;
    162 
    163   static void AddTableEntry(RegOp2MemOpTableType &R2MTable,
    164                             MemOp2RegOpTableType &M2RTable,
    165                             uint16_t RegOp, uint16_t MemOp, uint16_t Flags);
    166 
    167   virtual void anchor();
    168 
    169   bool AnalyzeBranchImpl(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
    170                          MachineBasicBlock *&FBB,
    171                          SmallVectorImpl<MachineOperand> &Cond,
    172                          SmallVectorImpl<MachineInstr *> &CondBranches,
    173                          bool AllowModify) const;
    174 
    175 public:
    176   explicit X86InstrInfo(X86Subtarget &STI);
    177 
    178   /// getRegisterInfo - TargetInstrInfo is a superset of MRegister info.  As
    179   /// such, whenever a client has an instance of instruction info, it should
    180   /// always be able to get register info as well (through this method).
    181   ///
    182   const X86RegisterInfo &getRegisterInfo() const { return RI; }
    183 
    184   /// getSPAdjust - This returns the stack pointer adjustment made by
    185   /// this instruction. For x86, we need to handle more complex call
    186   /// sequences involving PUSHes.
    187   int getSPAdjust(const MachineInstr &MI) const override;
    188 
    189   /// isCoalescableExtInstr - Return true if the instruction is a "coalescable"
    190   /// extension instruction. That is, it's like a copy where it's legal for the
    191   /// source to overlap the destination. e.g. X86::MOVSX64rr32. If this returns
    192   /// true, then it's expected the pre-extension value is available as a subreg
    193   /// of the result register. This also returns the sub-register index in
    194   /// SubIdx.
    195   bool isCoalescableExtInstr(const MachineInstr &MI,
    196                              unsigned &SrcReg, unsigned &DstReg,
    197                              unsigned &SubIdx) const override;
    198 
    199   unsigned isLoadFromStackSlot(const MachineInstr &MI,
    200                                int &FrameIndex) const override;
    201   /// isLoadFromStackSlotPostFE - Check for post-frame ptr elimination
    202   /// stack locations as well.  This uses a heuristic so it isn't
    203   /// reliable for correctness.
    204   unsigned isLoadFromStackSlotPostFE(const MachineInstr &MI,
    205                                      int &FrameIndex) const override;
    206 
    207   unsigned isStoreToStackSlot(const MachineInstr &MI,
    208                               int &FrameIndex) const override;
    209   /// isStoreToStackSlotPostFE - Check for post-frame ptr elimination
    210   /// stack locations as well.  This uses a heuristic so it isn't
    211   /// reliable for correctness.
    212   unsigned isStoreToStackSlotPostFE(const MachineInstr &MI,
    213                                     int &FrameIndex) const override;
    214 
    215   bool isReallyTriviallyReMaterializable(const MachineInstr &MI,
    216                                          AliasAnalysis *AA) const override;
    217   void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
    218                      unsigned DestReg, unsigned SubIdx,
    219                      const MachineInstr &Orig,
    220                      const TargetRegisterInfo &TRI) const override;
    221 
    222   /// Given an operand within a MachineInstr, insert preceding code to put it
    223   /// into the right format for a particular kind of LEA instruction. This may
    224   /// involve using an appropriate super-register instead (with an implicit use
    225   /// of the original) or creating a new virtual register and inserting COPY
    226   /// instructions to get the data into the right class.
    227   ///
    228   /// Reference parameters are set to indicate how caller should add this
    229   /// operand to the LEA instruction.
    230   bool classifyLEAReg(MachineInstr &MI, const MachineOperand &Src,
    231                       unsigned LEAOpcode, bool AllowSP, unsigned &NewSrc,
    232                       bool &isKill, bool &isUndef,
    233                       MachineOperand &ImplicitOp) const;
    234 
    235   /// convertToThreeAddress - This method must be implemented by targets that
    236   /// set the M_CONVERTIBLE_TO_3_ADDR flag.  When this flag is set, the target
    237   /// may be able to convert a two-address instruction into a true
    238   /// three-address instruction on demand.  This allows the X86 target (for
    239   /// example) to convert ADD and SHL instructions into LEA instructions if they
    240   /// would require register copies due to two-addressness.
    241   ///
    242   /// This method returns a null pointer if the transformation cannot be
    243   /// performed, otherwise it returns the new instruction.
    244   ///
    245   MachineInstr *convertToThreeAddress(MachineFunction::iterator &MFI,
    246                                       MachineInstr &MI,
    247                                       LiveVariables *LV) const override;
    248 
    249   /// Returns true iff the routine could find two commutable operands in the
    250   /// given machine instruction.
    251   /// The 'SrcOpIdx1' and 'SrcOpIdx2' are INPUT and OUTPUT arguments. Their
    252   /// input values can be re-defined in this method only if the input values
    253   /// are not pre-defined, which is designated by the special value
    254   /// 'CommuteAnyOperandIndex' assigned to it.
    255   /// If both of indices are pre-defined and refer to some operands, then the
    256   /// method simply returns true if the corresponding operands are commutable
    257   /// and returns false otherwise.
    258   ///
    259   /// For example, calling this method this way:
    260   ///     unsigned Op1 = 1, Op2 = CommuteAnyOperandIndex;
    261   ///     findCommutedOpIndices(MI, Op1, Op2);
    262   /// can be interpreted as a query asking to find an operand that would be
    263   /// commutable with the operand#1.
    264   bool findCommutedOpIndices(MachineInstr &MI, unsigned &SrcOpIdx1,
    265                              unsigned &SrcOpIdx2) const override;
    266 
    267   /// Returns true if the routine could find two commutable operands
    268   /// in the given FMA instruction. Otherwise, returns false.
    269   ///
    270   /// \p SrcOpIdx1 and \p SrcOpIdx2 are INPUT and OUTPUT arguments.
    271   /// The output indices of the commuted operands are returned in these
    272   /// arguments. Also, the input values of these arguments may be preset either
    273   /// to indices of operands that must be commuted or be equal to a special
    274   /// value 'CommuteAnyOperandIndex' which means that the corresponding
    275   /// operand index is not set and this method is free to pick any of
    276   /// available commutable operands.
    277   ///
    278   /// For example, calling this method this way:
    279   ///     unsigned Idx1 = 1, Idx2 = CommuteAnyOperandIndex;
    280   ///     findFMA3CommutedOpIndices(MI, Idx1, Idx2);
    281   /// can be interpreted as a query asking if the operand #1 can be swapped
    282   /// with any other available operand (e.g. operand #2, operand #3, etc.).
    283   ///
    284   /// The returned FMA opcode may differ from the opcode in the given MI.
    285   /// For example, commuting the operands #1 and #3 in the following FMA
    286   ///     FMA213 #1, #2, #3
    287   /// results into instruction with adjusted opcode:
    288   ///     FMA231 #3, #2, #1
    289   bool findFMA3CommutedOpIndices(MachineInstr &MI, unsigned &SrcOpIdx1,
    290                                  unsigned &SrcOpIdx2) const;
    291 
    292   /// Returns an adjusted FMA opcode that must be used in FMA instruction that
    293   /// performs the same computations as the given MI but which has the operands
    294   /// \p SrcOpIdx1 and \p SrcOpIdx2 commuted.
    295   /// It may return 0 if it is unsafe to commute the operands.
    296   ///
    297   /// The returned FMA opcode may differ from the opcode in the given \p MI.
    298   /// For example, commuting the operands #1 and #3 in the following FMA
    299   ///     FMA213 #1, #2, #3
    300   /// results into instruction with adjusted opcode:
    301   ///     FMA231 #3, #2, #1
    302   unsigned getFMA3OpcodeToCommuteOperands(MachineInstr &MI, unsigned SrcOpIdx1,
    303                                           unsigned SrcOpIdx2) const;
    304 
    305   // Branch analysis.
    306   bool isUnpredicatedTerminator(const MachineInstr &MI) const override;
    307   bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
    308                      MachineBasicBlock *&FBB,
    309                      SmallVectorImpl<MachineOperand> &Cond,
    310                      bool AllowModify) const override;
    311 
    312   bool getMemOpBaseRegImmOfs(MachineInstr &LdSt, unsigned &BaseReg,
    313                              int64_t &Offset,
    314                              const TargetRegisterInfo *TRI) const override;
    315   bool analyzeBranchPredicate(MachineBasicBlock &MBB,
    316                               TargetInstrInfo::MachineBranchPredicate &MBP,
    317                               bool AllowModify = false) const override;
    318 
    319   unsigned RemoveBranch(MachineBasicBlock &MBB) const override;
    320   unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
    321                         MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
    322                         const DebugLoc &DL) const override;
    323   bool canInsertSelect(const MachineBasicBlock&, ArrayRef<MachineOperand> Cond,
    324                        unsigned, unsigned, int&, int&, int&) const override;
    325   void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
    326                     const DebugLoc &DL, unsigned DstReg,
    327                     ArrayRef<MachineOperand> Cond, unsigned TrueReg,
    328                     unsigned FalseReg) const override;
    329   void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
    330                    const DebugLoc &DL, unsigned DestReg, unsigned SrcReg,
    331                    bool KillSrc) const override;
    332   void storeRegToStackSlot(MachineBasicBlock &MBB,
    333                            MachineBasicBlock::iterator MI,
    334                            unsigned SrcReg, bool isKill, int FrameIndex,
    335                            const TargetRegisterClass *RC,
    336                            const TargetRegisterInfo *TRI) const override;
    337 
    338   void storeRegToAddr(MachineFunction &MF, unsigned SrcReg, bool isKill,
    339                       SmallVectorImpl<MachineOperand> &Addr,
    340                       const TargetRegisterClass *RC,
    341                       MachineInstr::mmo_iterator MMOBegin,
    342                       MachineInstr::mmo_iterator MMOEnd,
    343                       SmallVectorImpl<MachineInstr*> &NewMIs) const;
    344 
    345   void loadRegFromStackSlot(MachineBasicBlock &MBB,
    346                             MachineBasicBlock::iterator MI,
    347                             unsigned DestReg, int FrameIndex,
    348                             const TargetRegisterClass *RC,
    349                             const TargetRegisterInfo *TRI) const override;
    350 
    351   void loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
    352                        SmallVectorImpl<MachineOperand> &Addr,
    353                        const TargetRegisterClass *RC,
    354                        MachineInstr::mmo_iterator MMOBegin,
    355                        MachineInstr::mmo_iterator MMOEnd,
    356                        SmallVectorImpl<MachineInstr*> &NewMIs) const;
    357 
    358   bool expandPostRAPseudo(MachineInstr &MI) const override;
    359 
    360   /// foldMemoryOperand - If this target supports it, fold a load or store of
    361   /// the specified stack slot into the specified machine instruction for the
    362   /// specified operand(s).  If this is possible, the target should perform the
    363   /// folding and return true, otherwise it should return false.  If it folds
    364   /// the instruction, it is likely that the MachineInstruction the iterator
    365   /// references has been changed.
    366   MachineInstr *
    367   foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI,
    368                         ArrayRef<unsigned> Ops,
    369                         MachineBasicBlock::iterator InsertPt, int FrameIndex,
    370                         LiveIntervals *LIS = nullptr) const override;
    371 
    372   /// foldMemoryOperand - Same as the previous version except it allows folding
    373   /// of any load and store from / to any address, not just from a specific
    374   /// stack slot.
    375   MachineInstr *foldMemoryOperandImpl(
    376       MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
    377       MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI,
    378       LiveIntervals *LIS = nullptr) const override;
    379 
    380   /// unfoldMemoryOperand - Separate a single instruction which folded a load or
    381   /// a store or a load and a store into two or more instruction. If this is
    382   /// possible, returns true as well as the new instructions by reference.
    383   bool
    384   unfoldMemoryOperand(MachineFunction &MF, MachineInstr &MI, unsigned Reg,
    385                       bool UnfoldLoad, bool UnfoldStore,
    386                       SmallVectorImpl<MachineInstr *> &NewMIs) const override;
    387 
    388   bool unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
    389                            SmallVectorImpl<SDNode*> &NewNodes) const override;
    390 
    391   /// getOpcodeAfterMemoryUnfold - Returns the opcode of the would be new
    392   /// instruction after load / store are unfolded from an instruction of the
    393   /// specified opcode. It returns zero if the specified unfolding is not
    394   /// possible. If LoadRegIndex is non-null, it is filled in with the operand
    395   /// index of the operand which will hold the register holding the loaded
    396   /// value.
    397   unsigned getOpcodeAfterMemoryUnfold(unsigned Opc,
    398                               bool UnfoldLoad, bool UnfoldStore,
    399                               unsigned *LoadRegIndex = nullptr) const override;
    400 
    401   /// areLoadsFromSameBasePtr - This is used by the pre-regalloc scheduler
    402   /// to determine if two loads are loading from the same base address. It
    403   /// should only return true if the base pointers are the same and the
    404   /// only differences between the two addresses are the offset. It also returns
    405   /// the offsets by reference.
    406   bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, int64_t &Offset1,
    407                                int64_t &Offset2) const override;
    408 
    409   /// shouldScheduleLoadsNear - This is a used by the pre-regalloc scheduler to
    410   /// determine (in conjunction with areLoadsFromSameBasePtr) if two loads should
    411   /// be scheduled togther. On some targets if two loads are loading from
    412   /// addresses in the same cache line, it's better if they are scheduled
    413   /// together. This function takes two integers that represent the load offsets
    414   /// from the common base address. It returns true if it decides it's desirable
    415   /// to schedule the two loads together. "NumLoads" is the number of loads that
    416   /// have already been scheduled after Load1.
    417   bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
    418                                int64_t Offset1, int64_t Offset2,
    419                                unsigned NumLoads) const override;
    420 
    421   bool shouldScheduleAdjacent(MachineInstr &First,
    422                               MachineInstr &Second) const override;
    423 
    424   void getNoopForMachoTarget(MCInst &NopInst) const override;
    425 
    426   bool
    427   ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override;
    428 
    429   /// isSafeToMoveRegClassDefs - Return true if it's safe to move a machine
    430   /// instruction that defines the specified register class.
    431   bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const override;
    432 
    433   /// isSafeToClobberEFLAGS - Return true if it's safe insert an instruction tha
    434   /// would clobber the EFLAGS condition register. Note the result may be
    435   /// conservative. If it cannot definitely determine the safety after visiting
    436   /// a few instructions in each direction it assumes it's not safe.
    437   bool isSafeToClobberEFLAGS(MachineBasicBlock &MBB,
    438                              MachineBasicBlock::iterator I) const;
    439 
    440   /// True if MI has a condition code def, e.g. EFLAGS, that is
    441   /// not marked dead.
    442   bool hasLiveCondCodeDef(MachineInstr &MI) const;
    443 
    444   /// getGlobalBaseReg - Return a virtual register initialized with the
    445   /// the global base register value. Output instructions required to
    446   /// initialize the register in the function entry block, if necessary.
    447   ///
    448   unsigned getGlobalBaseReg(MachineFunction *MF) const;
    449 
    450   std::pair<uint16_t, uint16_t>
    451   getExecutionDomain(const MachineInstr &MI) const override;
    452 
    453   void setExecutionDomain(MachineInstr &MI, unsigned Domain) const override;
    454 
    455   unsigned
    456   getPartialRegUpdateClearance(const MachineInstr &MI, unsigned OpNum,
    457                                const TargetRegisterInfo *TRI) const override;
    458   unsigned getUndefRegClearance(const MachineInstr &MI, unsigned &OpNum,
    459                                 const TargetRegisterInfo *TRI) const override;
    460   void breakPartialRegDependency(MachineInstr &MI, unsigned OpNum,
    461                                  const TargetRegisterInfo *TRI) const override;
    462 
    463   MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI,
    464                                       unsigned OpNum,
    465                                       ArrayRef<MachineOperand> MOs,
    466                                       MachineBasicBlock::iterator InsertPt,
    467                                       unsigned Size, unsigned Alignment,
    468                                       bool AllowCommute) const;
    469 
    470   void
    471   getUnconditionalBranch(MCInst &Branch,
    472                          const MCSymbolRefExpr *BranchTarget) const override;
    473 
    474   void getTrap(MCInst &MI) const override;
    475 
    476   unsigned getJumpInstrTableEntryBound() const override;
    477 
    478   bool isHighLatencyDef(int opc) const override;
    479 
    480   bool hasHighOperandLatency(const TargetSchedModel &SchedModel,
    481                              const MachineRegisterInfo *MRI,
    482                              const MachineInstr &DefMI, unsigned DefIdx,
    483                              const MachineInstr &UseMI,
    484                              unsigned UseIdx) const override;
    485 
    486   bool useMachineCombiner() const override {
    487     return true;
    488   }
    489 
    490   bool isAssociativeAndCommutative(const MachineInstr &Inst) const override;
    491 
    492   bool hasReassociableOperands(const MachineInstr &Inst,
    493                                const MachineBasicBlock *MBB) const override;
    494 
    495   void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2,
    496                              MachineInstr &NewMI1,
    497                              MachineInstr &NewMI2) const override;
    498 
    499   /// analyzeCompare - For a comparison instruction, return the source registers
    500   /// in SrcReg and SrcReg2 if having two register operands, and the value it
    501   /// compares against in CmpValue. Return true if the comparison instruction
    502   /// can be analyzed.
    503   bool analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
    504                       unsigned &SrcReg2, int &CmpMask,
    505                       int &CmpValue) const override;
    506 
    507   /// optimizeCompareInstr - Check if there exists an earlier instruction that
    508   /// operates on the same source operands and sets flags in the same way as
    509   /// Compare; remove Compare if possible.
    510   bool optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg,
    511                             unsigned SrcReg2, int CmpMask, int CmpValue,
    512                             const MachineRegisterInfo *MRI) const override;
    513 
    514   /// optimizeLoadInstr - Try to remove the load by folding it to a register
    515   /// operand at the use. We fold the load instructions if and only if the
    516   /// def and use are in the same BB. We only look at one load and see
    517   /// whether it can be folded into MI. FoldAsLoadDefReg is the virtual register
    518   /// defined by the load we are trying to fold. DefMI returns the machine
    519   /// instruction that defines FoldAsLoadDefReg, and the function returns
    520   /// the machine instruction generated due to folding.
    521   MachineInstr *optimizeLoadInstr(MachineInstr &MI,
    522                                   const MachineRegisterInfo *MRI,
    523                                   unsigned &FoldAsLoadDefReg,
    524                                   MachineInstr *&DefMI) const override;
    525 
    526   std::pair<unsigned, unsigned>
    527   decomposeMachineOperandsTargetFlags(unsigned TF) const override;
    528 
    529   ArrayRef<std::pair<unsigned, const char *>>
    530   getSerializableDirectMachineOperandTargetFlags() const override;
    531 
    532 protected:
    533   /// Commutes the operands in the given instruction by changing the operands
    534   /// order and/or changing the instruction's opcode and/or the immediate value
    535   /// operand.
    536   ///
    537   /// The arguments 'CommuteOpIdx1' and 'CommuteOpIdx2' specify the operands
    538   /// to be commuted.
    539   ///
    540   /// Do not call this method for a non-commutable instruction or
    541   /// non-commutable operands.
    542   /// Even though the instruction is commutable, the method may still
    543   /// fail to commute the operands, null pointer is returned in such cases.
    544   MachineInstr *commuteInstructionImpl(MachineInstr &MI, bool NewMI,
    545                                        unsigned CommuteOpIdx1,
    546                                        unsigned CommuteOpIdx2) const override;
    547 
    548 private:
    549   MachineInstr *convertToThreeAddressWithLEA(unsigned MIOpc,
    550                                              MachineFunction::iterator &MFI,
    551                                              MachineInstr &MI,
    552                                              LiveVariables *LV) const;
    553 
    554   /// Handles memory folding for special case instructions, for instance those
    555   /// requiring custom manipulation of the address.
    556   MachineInstr *foldMemoryOperandCustom(MachineFunction &MF, MachineInstr &MI,
    557                                         unsigned OpNum,
    558                                         ArrayRef<MachineOperand> MOs,
    559                                         MachineBasicBlock::iterator InsertPt,
    560                                         unsigned Size, unsigned Align) const;
    561 
    562   /// isFrameOperand - Return true and the FrameIndex if the specified
    563   /// operand and follow operands form a reference to the stack frame.
    564   bool isFrameOperand(const MachineInstr &MI, unsigned int Op,
    565                       int &FrameIndex) const;
    566 
    567   /// Expand the MOVImmSExti8 pseudo-instructions.
    568   bool ExpandMOVImmSExti8(MachineInstrBuilder &MIB) const;
    569 };
    570 
    571 } // End llvm namespace
    572 
    573 #endif
    574