Home | History | Annotate | Download | only in AMDGPU
      1 //===-- AMDGPUInstrInfo.h - AMDGPU Instruction Information ------*- C++ -*-===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 /// \file
     11 /// \brief Contains the definition of a TargetInstrInfo class that is common
     12 /// to all AMD GPUs.
     13 //
     14 //===----------------------------------------------------------------------===//
     15 
     16 #ifndef LLVM_LIB_TARGET_R600_AMDGPUINSTRINFO_H
     17 #define LLVM_LIB_TARGET_R600_AMDGPUINSTRINFO_H
     18 
     19 #include "AMDGPURegisterInfo.h"
     20 #include "llvm/Target/TargetInstrInfo.h"
     21 #include <map>
     22 
     23 #define GET_INSTRINFO_HEADER
     24 #define GET_INSTRINFO_ENUM
     25 #define GET_INSTRINFO_OPERAND_ENUM
     26 #include "AMDGPUGenInstrInfo.inc"
     27 
     28 #define OPCODE_IS_ZERO_INT AMDGPU::PRED_SETE_INT
     29 #define OPCODE_IS_NOT_ZERO_INT AMDGPU::PRED_SETNE_INT
     30 #define OPCODE_IS_ZERO AMDGPU::PRED_SETE
     31 #define OPCODE_IS_NOT_ZERO AMDGPU::PRED_SETNE
     32 
     33 namespace llvm {
     34 
     35 class AMDGPUSubtarget;
     36 class MachineFunction;
     37 class MachineInstr;
     38 class MachineInstrBuilder;
     39 
     40 class AMDGPUInstrInfo : public AMDGPUGenInstrInfo {
     41 private:
     42   const AMDGPURegisterInfo RI;
     43   virtual void anchor();
     44 protected:
     45   const AMDGPUSubtarget &ST;
     46 public:
     47   explicit AMDGPUInstrInfo(const AMDGPUSubtarget &st);
     48 
     49   virtual const AMDGPURegisterInfo &getRegisterInfo() const = 0;
     50 
     51   bool isCoalescableExtInstr(const MachineInstr &MI, unsigned &SrcReg,
     52                              unsigned &DstReg, unsigned &SubIdx) const override;
     53 
     54   unsigned isLoadFromStackSlot(const MachineInstr *MI,
     55                                int &FrameIndex) const override;
     56   unsigned isLoadFromStackSlotPostFE(const MachineInstr *MI,
     57                                      int &FrameIndex) const override;
     58   bool hasLoadFromStackSlot(const MachineInstr *MI,
     59                             const MachineMemOperand *&MMO,
     60                             int &FrameIndex) const override;
     61   unsigned isStoreFromStackSlot(const MachineInstr *MI, int &FrameIndex) const;
     62   unsigned isStoreFromStackSlotPostFE(const MachineInstr *MI,
     63                                       int &FrameIndex) const;
     64   bool hasStoreFromStackSlot(const MachineInstr *MI,
     65                              const MachineMemOperand *&MMO,
     66                              int &FrameIndex) const;
     67 
     68   MachineInstr *
     69   convertToThreeAddress(MachineFunction::iterator &MFI,
     70                         MachineBasicBlock::iterator &MBBI,
     71                         LiveVariables *LV) const override;
     72 
     73 
     74   bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const override;
     75 
     76   void storeRegToStackSlot(MachineBasicBlock &MBB,
     77                            MachineBasicBlock::iterator MI,
     78                            unsigned SrcReg, bool isKill, int FrameIndex,
     79                            const TargetRegisterClass *RC,
     80                            const TargetRegisterInfo *TRI) const override;
     81   void loadRegFromStackSlot(MachineBasicBlock &MBB,
     82                             MachineBasicBlock::iterator MI,
     83                             unsigned DestReg, int FrameIndex,
     84                             const TargetRegisterClass *RC,
     85                             const TargetRegisterInfo *TRI) const override;
     86 
     87 protected:
     88   MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
     89                                       ArrayRef<unsigned> Ops,
     90                                       MachineBasicBlock::iterator InsertPt,
     91                                       int FrameIndex) const override;
     92   MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
     93                                       ArrayRef<unsigned> Ops,
     94                                       MachineBasicBlock::iterator InsertPt,
     95                                       MachineInstr *LoadMI) const override;
     96 
     97 public:
     98   /// \returns the smallest register index that will be accessed by an indirect
     99   /// read or write or -1 if indirect addressing is not used by this program.
    100   int getIndirectIndexBegin(const MachineFunction &MF) const;
    101 
    102   /// \returns the largest register index that will be accessed by an indirect
    103   /// read or write or -1 if indirect addressing is not used by this program.
    104   int getIndirectIndexEnd(const MachineFunction &MF) const;
    105 
    106   bool unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
    107                         unsigned Reg, bool UnfoldLoad, bool UnfoldStore,
    108                         SmallVectorImpl<MachineInstr *> &NewMIs) const override;
    109   bool unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
    110                            SmallVectorImpl<SDNode *> &NewNodes) const override;
    111   unsigned getOpcodeAfterMemoryUnfold(unsigned Opc,
    112                                bool UnfoldLoad, bool UnfoldStore,
    113                                unsigned *LoadRegIndex = nullptr) const override;
    114 
    115   bool enableClusterLoads() const override;
    116 
    117   bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
    118                                int64_t Offset1, int64_t Offset2,
    119                                unsigned NumLoads) const override;
    120 
    121   bool
    122   ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override;
    123   void insertNoop(MachineBasicBlock &MBB,
    124                   MachineBasicBlock::iterator MI) const override;
    125   bool isPredicated(const MachineInstr *MI) const override;
    126   bool SubsumesPredicate(ArrayRef<MachineOperand> Pred1,
    127                          ArrayRef<MachineOperand> Pred2) const override;
    128   bool DefinesPredicate(MachineInstr *MI,
    129                         std::vector<MachineOperand> &Pred) const override;
    130   bool isPredicable(MachineInstr *MI) const override;
    131   bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const override;
    132 
    133   // Helper functions that check the opcode for status information
    134   bool isRegisterStore(const MachineInstr &MI) const;
    135   bool isRegisterLoad(const MachineInstr &MI) const;
    136 
    137   /// \brief Return a target-specific opcode if Opcode is a pseudo instruction.
    138   /// Return -1 if the target-specific opcode for the pseudo instruction does
    139   /// not exist. If Opcode is not a pseudo instruction, this is identity.
    140   int pseudoToMCOpcode(int Opcode) const;
    141 
    142   /// \brief Return the descriptor of the target-specific machine instruction
    143   /// that corresponds to the specified pseudo or native opcode.
    144   const MCInstrDesc &getMCOpcodeFromPseudo(unsigned Opcode) const {
    145     return get(pseudoToMCOpcode(Opcode));
    146   }
    147 
    148   ArrayRef<std::pair<int, const char *>>
    149   getSerializableTargetIndices() const override;
    150 
    151 //===---------------------------------------------------------------------===//
    152 // Pure virtual funtions to be implemented by sub-classes.
    153 //===---------------------------------------------------------------------===//
    154 
    155   virtual bool isMov(unsigned opcode) const = 0;
    156 
    157   /// \brief Calculate the "Indirect Address" for the given \p RegIndex and
    158   ///        \p Channel
    159   ///
    160   /// We model indirect addressing using a virtual address space that can be
    161   /// accesed with loads and stores.  The "Indirect Address" is the memory
    162   /// address in this virtual address space that maps to the given \p RegIndex
    163   /// and \p Channel.
    164   virtual unsigned calculateIndirectAddress(unsigned RegIndex,
    165                                             unsigned Channel) const = 0;
    166 
    167   /// \returns The register class to be used for loading and storing values
    168   /// from an "Indirect Address" .
    169   virtual const TargetRegisterClass *getIndirectAddrRegClass() const = 0;
    170 
    171   /// \brief Build instruction(s) for an indirect register write.
    172   ///
    173   /// \returns The instruction that performs the indirect register write
    174   virtual MachineInstrBuilder buildIndirectWrite(MachineBasicBlock *MBB,
    175                                     MachineBasicBlock::iterator I,
    176                                     unsigned ValueReg, unsigned Address,
    177                                     unsigned OffsetReg) const = 0;
    178 
    179   /// \brief Build instruction(s) for an indirect register read.
    180   ///
    181   /// \returns The instruction that performs the indirect register read
    182   virtual MachineInstrBuilder buildIndirectRead(MachineBasicBlock *MBB,
    183                                     MachineBasicBlock::iterator I,
    184                                     unsigned ValueReg, unsigned Address,
    185                                     unsigned OffsetReg) const = 0;
    186 
    187   /// \brief Build a MOV instruction.
    188   virtual MachineInstr *buildMovInstr(MachineBasicBlock *MBB,
    189                                       MachineBasicBlock::iterator I,
    190                                       unsigned DstReg, unsigned SrcReg) const = 0;
    191 
    192   /// \brief Given a MIMG \p Opcode that writes all 4 channels, return the
    193   /// equivalent opcode that writes \p Channels Channels.
    194   int getMaskedMIMGOp(uint16_t Opcode, unsigned Channels) const;
    195 
    196 };
    197 
    198 namespace AMDGPU {
    199   LLVM_READONLY
    200   int16_t getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIndex);
    201 }  // End namespace AMDGPU
    202 
    203 } // End llvm namespace
    204 
    205 #define AMDGPU_FLAG_REGISTER_LOAD  (UINT64_C(1) << 63)
    206 #define AMDGPU_FLAG_REGISTER_STORE (UINT64_C(1) << 62)
    207 
    208 #endif
    209