Home | History | Annotate | Download | only in R600
      1 //===-- SIInstrInfo.cpp - SI Instruction Information  ---------------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 /// \file
     11 /// \brief SI Implementation of TargetInstrInfo.
     12 //
     13 //===----------------------------------------------------------------------===//
     14 
     15 
     16 #include "SIInstrInfo.h"
     17 #include "AMDGPUTargetMachine.h"
     18 #include "llvm/CodeGen/MachineInstrBuilder.h"
     19 #include "llvm/CodeGen/MachineRegisterInfo.h"
     20 #include "llvm/MC/MCInstrDesc.h"
     21 #include <stdio.h>
     22 
     23 using namespace llvm;
     24 
     25 SIInstrInfo::SIInstrInfo(AMDGPUTargetMachine &tm)
     26   : AMDGPUInstrInfo(tm),
     27     RI(tm, *this)
     28     { }
     29 
     30 const SIRegisterInfo &SIInstrInfo::getRegisterInfo() const {
     31   return RI;
     32 }
     33 
     34 void
     35 SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
     36                          MachineBasicBlock::iterator MI, DebugLoc DL,
     37                          unsigned DestReg, unsigned SrcReg,
     38                          bool KillSrc) const {
     39 
     40   // If we are trying to copy to or from SCC, there is a bug somewhere else in
     41   // the backend.  While it may be theoretically possible to do this, it should
     42   // never be necessary.
     43   assert(DestReg != AMDGPU::SCC && SrcReg != AMDGPU::SCC);
     44 
     45   const int16_t Sub0_15[] = {
     46     AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
     47     AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7,
     48     AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11,
     49     AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15, 0
     50   };
     51 
     52   const int16_t Sub0_7[] = {
     53     AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
     54     AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7, 0
     55   };
     56 
     57   const int16_t Sub0_3[] = {
     58     AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, 0
     59   };
     60 
     61   const int16_t Sub0_1[] = {
     62     AMDGPU::sub0, AMDGPU::sub1, 0
     63   };
     64 
     65   unsigned Opcode;
     66   const int16_t *SubIndices;
     67 
     68   if (AMDGPU::SReg_32RegClass.contains(DestReg)) {
     69     assert(AMDGPU::SReg_32RegClass.contains(SrcReg));
     70     BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg)
     71             .addReg(SrcReg, getKillRegState(KillSrc));
     72     return;
     73 
     74   } else if (AMDGPU::SReg_64RegClass.contains(DestReg)) {
     75     assert(AMDGPU::SReg_64RegClass.contains(SrcReg));
     76     BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg)
     77             .addReg(SrcReg, getKillRegState(KillSrc));
     78     return;
     79 
     80   } else if (AMDGPU::SReg_128RegClass.contains(DestReg)) {
     81     assert(AMDGPU::SReg_128RegClass.contains(SrcReg));
     82     Opcode = AMDGPU::S_MOV_B32;
     83     SubIndices = Sub0_3;
     84 
     85   } else if (AMDGPU::SReg_256RegClass.contains(DestReg)) {
     86     assert(AMDGPU::SReg_256RegClass.contains(SrcReg));
     87     Opcode = AMDGPU::S_MOV_B32;
     88     SubIndices = Sub0_7;
     89 
     90   } else if (AMDGPU::SReg_512RegClass.contains(DestReg)) {
     91     assert(AMDGPU::SReg_512RegClass.contains(SrcReg));
     92     Opcode = AMDGPU::S_MOV_B32;
     93     SubIndices = Sub0_15;
     94 
     95   } else if (AMDGPU::VReg_32RegClass.contains(DestReg)) {
     96     assert(AMDGPU::VReg_32RegClass.contains(SrcReg) ||
     97 	   AMDGPU::SReg_32RegClass.contains(SrcReg));
     98     BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg)
     99             .addReg(SrcReg, getKillRegState(KillSrc));
    100     return;
    101 
    102   } else if (AMDGPU::VReg_64RegClass.contains(DestReg)) {
    103     assert(AMDGPU::VReg_64RegClass.contains(SrcReg) ||
    104 	   AMDGPU::SReg_64RegClass.contains(SrcReg));
    105     Opcode = AMDGPU::V_MOV_B32_e32;
    106     SubIndices = Sub0_1;
    107 
    108   } else if (AMDGPU::VReg_128RegClass.contains(DestReg)) {
    109     assert(AMDGPU::VReg_128RegClass.contains(SrcReg) ||
    110 	   AMDGPU::SReg_128RegClass.contains(SrcReg));
    111     Opcode = AMDGPU::V_MOV_B32_e32;
    112     SubIndices = Sub0_3;
    113 
    114   } else if (AMDGPU::VReg_256RegClass.contains(DestReg)) {
    115     assert(AMDGPU::VReg_256RegClass.contains(SrcReg) ||
    116 	   AMDGPU::SReg_256RegClass.contains(SrcReg));
    117     Opcode = AMDGPU::V_MOV_B32_e32;
    118     SubIndices = Sub0_7;
    119 
    120   } else if (AMDGPU::VReg_512RegClass.contains(DestReg)) {
    121     assert(AMDGPU::VReg_512RegClass.contains(SrcReg) ||
    122 	   AMDGPU::SReg_512RegClass.contains(SrcReg));
    123     Opcode = AMDGPU::V_MOV_B32_e32;
    124     SubIndices = Sub0_15;
    125 
    126   } else {
    127     llvm_unreachable("Can't copy register!");
    128   }
    129 
    130   while (unsigned SubIdx = *SubIndices++) {
    131     MachineInstrBuilder Builder = BuildMI(MBB, MI, DL,
    132       get(Opcode), RI.getSubReg(DestReg, SubIdx));
    133 
    134     Builder.addReg(RI.getSubReg(SrcReg, SubIdx), getKillRegState(KillSrc));
    135 
    136     if (*SubIndices)
    137       Builder.addReg(DestReg, RegState::Define | RegState::Implicit);
    138   }
    139 }
    140 
    141 MachineInstr *SIInstrInfo::commuteInstruction(MachineInstr *MI,
    142                                               bool NewMI) const {
    143 
    144   if (MI->getNumOperands() < 3 || !MI->getOperand(1).isReg() ||
    145       !MI->getOperand(2).isReg())
    146     return 0;
    147 
    148   return TargetInstrInfo::commuteInstruction(MI, NewMI);
    149 }
    150 
    151 MachineInstr * SIInstrInfo::getMovImmInstr(MachineFunction *MF, unsigned DstReg,
    152                                            int64_t Imm) const {
    153   MachineInstr * MI = MF->CreateMachineInstr(get(AMDGPU::V_MOV_B32_e32), DebugLoc());
    154   MachineInstrBuilder MIB(*MF, MI);
    155   MIB.addReg(DstReg, RegState::Define);
    156   MIB.addImm(Imm);
    157 
    158   return MI;
    159 
    160 }
    161 
    162 bool SIInstrInfo::isMov(unsigned Opcode) const {
    163   switch(Opcode) {
    164   default: return false;
    165   case AMDGPU::S_MOV_B32:
    166   case AMDGPU::S_MOV_B64:
    167   case AMDGPU::V_MOV_B32_e32:
    168   case AMDGPU::V_MOV_B32_e64:
    169     return true;
    170   }
    171 }
    172 
    173 bool
    174 SIInstrInfo::isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
    175   return RC != &AMDGPU::EXECRegRegClass;
    176 }
    177 
    178 //===----------------------------------------------------------------------===//
    179 // Indirect addressing callbacks
    180 //===----------------------------------------------------------------------===//
    181 
    182 unsigned SIInstrInfo::calculateIndirectAddress(unsigned RegIndex,
    183                                                  unsigned Channel) const {
    184   assert(Channel == 0);
    185   return RegIndex;
    186 }
    187 
    188 
    189 int SIInstrInfo::getIndirectIndexBegin(const MachineFunction &MF) const {
    190   llvm_unreachable("Unimplemented");
    191 }
    192 
    193 int SIInstrInfo::getIndirectIndexEnd(const MachineFunction &MF) const {
    194   llvm_unreachable("Unimplemented");
    195 }
    196 
    197 const TargetRegisterClass *SIInstrInfo::getIndirectAddrStoreRegClass(
    198                                                      unsigned SourceReg) const {
    199   llvm_unreachable("Unimplemented");
    200 }
    201 
    202 const TargetRegisterClass *SIInstrInfo::getIndirectAddrLoadRegClass() const {
    203   llvm_unreachable("Unimplemented");
    204 }
    205 
    206 MachineInstrBuilder SIInstrInfo::buildIndirectWrite(
    207                                    MachineBasicBlock *MBB,
    208                                    MachineBasicBlock::iterator I,
    209                                    unsigned ValueReg,
    210                                    unsigned Address, unsigned OffsetReg) const {
    211   llvm_unreachable("Unimplemented");
    212 }
    213 
    214 MachineInstrBuilder SIInstrInfo::buildIndirectRead(
    215                                    MachineBasicBlock *MBB,
    216                                    MachineBasicBlock::iterator I,
    217                                    unsigned ValueReg,
    218                                    unsigned Address, unsigned OffsetReg) const {
    219   llvm_unreachable("Unimplemented");
    220 }
    221 
    222 const TargetRegisterClass *SIInstrInfo::getSuperIndirectRegClass() const {
    223   llvm_unreachable("Unimplemented");
    224 }
    225