Home | History | Annotate | Download | only in R600
      1 //===-- AMDGPUInstrInfo.cpp - Base class for AMD GPU InstrInfo ------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 /// \file
     11 /// \brief Implementation of the TargetInstrInfo class that is common to all
     12 /// AMD GPUs.
     13 //
     14 //===----------------------------------------------------------------------===//
     15 
     16 #include "AMDGPUInstrInfo.h"
     17 #include "AMDGPURegisterInfo.h"
     18 #include "AMDGPUTargetMachine.h"
     19 #include "AMDIL.h"
     20 #include "llvm/CodeGen/MachineFrameInfo.h"
     21 #include "llvm/CodeGen/MachineInstrBuilder.h"
     22 #include "llvm/CodeGen/MachineRegisterInfo.h"
     23 
     24 #define GET_INSTRINFO_CTOR
     25 #define GET_INSTRMAP_INFO
     26 #include "AMDGPUGenInstrInfo.inc"
     27 
     28 using namespace llvm;
     29 
     30 AMDGPUInstrInfo::AMDGPUInstrInfo(TargetMachine &tm)
     31   : AMDGPUGenInstrInfo(0,0), RI(tm, *this), TM(tm) { }
     32 
     33 const AMDGPURegisterInfo &AMDGPUInstrInfo::getRegisterInfo() const {
     34   return RI;
     35 }
     36 
     37 bool AMDGPUInstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
     38                                            unsigned &SrcReg, unsigned &DstReg,
     39                                            unsigned &SubIdx) const {
     40 // TODO: Implement this function
     41   return false;
     42 }
     43 
     44 unsigned AMDGPUInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
     45                                              int &FrameIndex) const {
     46 // TODO: Implement this function
     47   return 0;
     48 }
     49 
     50 unsigned AMDGPUInstrInfo::isLoadFromStackSlotPostFE(const MachineInstr *MI,
     51                                                    int &FrameIndex) const {
     52 // TODO: Implement this function
     53   return 0;
     54 }
     55 
     56 bool AMDGPUInstrInfo::hasLoadFromStackSlot(const MachineInstr *MI,
     57                                           const MachineMemOperand *&MMO,
     58                                           int &FrameIndex) const {
     59 // TODO: Implement this function
     60   return false;
     61 }
     62 unsigned AMDGPUInstrInfo::isStoreFromStackSlot(const MachineInstr *MI,
     63                                               int &FrameIndex) const {
     64 // TODO: Implement this function
     65   return 0;
     66 }
     67 unsigned AMDGPUInstrInfo::isStoreFromStackSlotPostFE(const MachineInstr *MI,
     68                                                     int &FrameIndex) const {
     69 // TODO: Implement this function
     70   return 0;
     71 }
     72 bool AMDGPUInstrInfo::hasStoreFromStackSlot(const MachineInstr *MI,
     73                                            const MachineMemOperand *&MMO,
     74                                            int &FrameIndex) const {
     75 // TODO: Implement this function
     76   return false;
     77 }
     78 
     79 MachineInstr *
     80 AMDGPUInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
     81                                       MachineBasicBlock::iterator &MBBI,
     82                                       LiveVariables *LV) const {
     83 // TODO: Implement this function
     84   return NULL;
     85 }
     86 bool AMDGPUInstrInfo::getNextBranchInstr(MachineBasicBlock::iterator &iter,
     87                                         MachineBasicBlock &MBB) const {
     88   while (iter != MBB.end()) {
     89     switch (iter->getOpcode()) {
     90     default:
     91       break;
     92     case AMDGPU::BRANCH_COND_i32:
     93     case AMDGPU::BRANCH_COND_f32:
     94     case AMDGPU::BRANCH:
     95       return true;
     96     };
     97     ++iter;
     98   }
     99   return false;
    100 }
    101 
    102 MachineBasicBlock::iterator skipFlowControl(MachineBasicBlock *MBB) {
    103   MachineBasicBlock::iterator tmp = MBB->end();
    104   if (!MBB->size()) {
    105     return MBB->end();
    106   }
    107   while (--tmp) {
    108     if (tmp->getOpcode() == AMDGPU::ENDLOOP
    109         || tmp->getOpcode() == AMDGPU::ENDIF
    110         || tmp->getOpcode() == AMDGPU::ELSE) {
    111       if (tmp == MBB->begin()) {
    112         return tmp;
    113       } else {
    114         continue;
    115       }
    116     }  else {
    117       return ++tmp;
    118     }
    119   }
    120   return MBB->end();
    121 }
    122 
    123 void
    124 AMDGPUInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
    125                                     MachineBasicBlock::iterator MI,
    126                                     unsigned SrcReg, bool isKill,
    127                                     int FrameIndex,
    128                                     const TargetRegisterClass *RC,
    129                                     const TargetRegisterInfo *TRI) const {
    130   assert(!"Not Implemented");
    131 }
    132 
    133 void
    134 AMDGPUInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
    135                                      MachineBasicBlock::iterator MI,
    136                                      unsigned DestReg, int FrameIndex,
    137                                      const TargetRegisterClass *RC,
    138                                      const TargetRegisterInfo *TRI) const {
    139   assert(!"Not Implemented");
    140 }
    141 
    142 MachineInstr *
    143 AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
    144                                       MachineInstr *MI,
    145                                       const SmallVectorImpl<unsigned> &Ops,
    146                                       int FrameIndex) const {
    147 // TODO: Implement this function
    148   return 0;
    149 }
    150 MachineInstr*
    151 AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
    152                                       MachineInstr *MI,
    153                                       const SmallVectorImpl<unsigned> &Ops,
    154                                       MachineInstr *LoadMI) const {
    155   // TODO: Implement this function
    156   return 0;
    157 }
    158 bool
    159 AMDGPUInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
    160                                      const SmallVectorImpl<unsigned> &Ops) const {
    161   // TODO: Implement this function
    162   return false;
    163 }
    164 bool
    165 AMDGPUInstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
    166                                  unsigned Reg, bool UnfoldLoad,
    167                                  bool UnfoldStore,
    168                                  SmallVectorImpl<MachineInstr*> &NewMIs) const {
    169   // TODO: Implement this function
    170   return false;
    171 }
    172 
    173 bool
    174 AMDGPUInstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
    175                                     SmallVectorImpl<SDNode*> &NewNodes) const {
    176   // TODO: Implement this function
    177   return false;
    178 }
    179 
    180 unsigned
    181 AMDGPUInstrInfo::getOpcodeAfterMemoryUnfold(unsigned Opc,
    182                                            bool UnfoldLoad, bool UnfoldStore,
    183                                            unsigned *LoadRegIndex) const {
    184   // TODO: Implement this function
    185   return 0;
    186 }
    187 
    188 bool AMDGPUInstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
    189                                              int64_t Offset1, int64_t Offset2,
    190                                              unsigned NumLoads) const {
    191   assert(Offset2 > Offset1
    192          && "Second offset should be larger than first offset!");
    193   // If we have less than 16 loads in a row, and the offsets are within 16,
    194   // then schedule together.
    195   // TODO: Make the loads schedule near if it fits in a cacheline
    196   return (NumLoads < 16 && (Offset2 - Offset1) < 16);
    197 }
    198 
    199 bool
    200 AMDGPUInstrInfo::ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond)
    201   const {
    202   // TODO: Implement this function
    203   return true;
    204 }
    205 void AMDGPUInstrInfo::insertNoop(MachineBasicBlock &MBB,
    206                                 MachineBasicBlock::iterator MI) const {
    207   // TODO: Implement this function
    208 }
    209 
    210 bool AMDGPUInstrInfo::isPredicated(const MachineInstr *MI) const {
    211   // TODO: Implement this function
    212   return false;
    213 }
    214 bool
    215 AMDGPUInstrInfo::SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
    216                                   const SmallVectorImpl<MachineOperand> &Pred2)
    217   const {
    218   // TODO: Implement this function
    219   return false;
    220 }
    221 
    222 bool AMDGPUInstrInfo::DefinesPredicate(MachineInstr *MI,
    223                                       std::vector<MachineOperand> &Pred) const {
    224   // TODO: Implement this function
    225   return false;
    226 }
    227 
    228 bool AMDGPUInstrInfo::isPredicable(MachineInstr *MI) const {
    229   // TODO: Implement this function
    230   return MI->getDesc().isPredicable();
    231 }
    232 
    233 bool
    234 AMDGPUInstrInfo::isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
    235   // TODO: Implement this function
    236   return true;
    237 }
    238 
    239 bool AMDGPUInstrInfo::isRegisterStore(const MachineInstr &MI) const {
    240   return get(MI.getOpcode()).TSFlags & AMDGPU_FLAG_REGISTER_STORE;
    241 }
    242 
    243 bool AMDGPUInstrInfo::isRegisterLoad(const MachineInstr &MI) const {
    244   return get(MI.getOpcode()).TSFlags & AMDGPU_FLAG_REGISTER_LOAD;
    245 }
    246 
    247 
    248 void AMDGPUInstrInfo::convertToISA(MachineInstr & MI, MachineFunction &MF,
    249     DebugLoc DL) const {
    250   MachineRegisterInfo &MRI = MF.getRegInfo();
    251   const AMDGPURegisterInfo & RI = getRegisterInfo();
    252 
    253   for (unsigned i = 0; i < MI.getNumOperands(); i++) {
    254     MachineOperand &MO = MI.getOperand(i);
    255     // Convert dst regclass to one that is supported by the ISA
    256     if (MO.isReg() && MO.isDef()) {
    257       if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
    258         const TargetRegisterClass * oldRegClass = MRI.getRegClass(MO.getReg());
    259         const TargetRegisterClass * newRegClass = RI.getISARegClass(oldRegClass);
    260 
    261         assert(newRegClass);
    262 
    263         MRI.setRegClass(MO.getReg(), newRegClass);
    264       }
    265     }
    266   }
    267 }
    268