1 //===-- AMDGPUInstrInfo.cpp - Base class for AMD GPU InstrInfo ------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 /// \file 11 /// \brief Implementation of the TargetInstrInfo class that is common to all 12 /// AMD GPUs. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "AMDGPUInstrInfo.h" 17 #include "AMDGPURegisterInfo.h" 18 #include "AMDGPUTargetMachine.h" 19 #include "llvm/CodeGen/MachineFrameInfo.h" 20 #include "llvm/CodeGen/MachineInstrBuilder.h" 21 #include "llvm/CodeGen/MachineRegisterInfo.h" 22 23 #define GET_INSTRINFO_CTOR 24 #define GET_INSTRINFO_NAMED_OPS 25 #define GET_INSTRMAP_INFO 26 #include "AMDGPUGenInstrInfo.inc" 27 28 using namespace llvm; 29 30 AMDGPUInstrInfo::AMDGPUInstrInfo(TargetMachine &tm) 31 : AMDGPUGenInstrInfo(0,0), RI(tm), TM(tm) { } 32 33 const AMDGPURegisterInfo &AMDGPUInstrInfo::getRegisterInfo() const { 34 return RI; 35 } 36 37 bool AMDGPUInstrInfo::isCoalescableExtInstr(const MachineInstr &MI, 38 unsigned &SrcReg, unsigned &DstReg, 39 unsigned &SubIdx) const { 40 // TODO: Implement this function 41 return false; 42 } 43 44 unsigned AMDGPUInstrInfo::isLoadFromStackSlot(const MachineInstr *MI, 45 int &FrameIndex) const { 46 // TODO: Implement this function 47 return 0; 48 } 49 50 unsigned AMDGPUInstrInfo::isLoadFromStackSlotPostFE(const MachineInstr *MI, 51 int &FrameIndex) const { 52 // TODO: Implement this function 53 return 0; 54 } 55 56 bool AMDGPUInstrInfo::hasLoadFromStackSlot(const MachineInstr *MI, 57 const MachineMemOperand *&MMO, 58 int &FrameIndex) const { 59 // TODO: Implement this function 60 return false; 61 } 62 unsigned AMDGPUInstrInfo::isStoreFromStackSlot(const MachineInstr *MI, 63 int &FrameIndex) const { 64 // TODO: Implement this function 65 return 0; 66 } 67 unsigned AMDGPUInstrInfo::isStoreFromStackSlotPostFE(const MachineInstr *MI, 68 int &FrameIndex) const { 69 // TODO: Implement this function 70 return 0; 71 } 72 bool AMDGPUInstrInfo::hasStoreFromStackSlot(const MachineInstr *MI, 73 const MachineMemOperand *&MMO, 74 int &FrameIndex) const { 75 // TODO: Implement this function 76 return false; 77 } 78 79 MachineInstr * 80 AMDGPUInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, 81 MachineBasicBlock::iterator &MBBI, 82 LiveVariables *LV) const { 83 // TODO: Implement this function 84 return NULL; 85 } 86 bool AMDGPUInstrInfo::getNextBranchInstr(MachineBasicBlock::iterator &iter, 87 MachineBasicBlock &MBB) const { 88 while (iter != MBB.end()) { 89 switch (iter->getOpcode()) { 90 default: 91 break; 92 case AMDGPU::BRANCH_COND_i32: 93 case AMDGPU::BRANCH_COND_f32: 94 case AMDGPU::BRANCH: 95 return true; 96 }; 97 ++iter; 98 } 99 return false; 100 } 101 102 void 103 AMDGPUInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, 104 MachineBasicBlock::iterator MI, 105 unsigned SrcReg, bool isKill, 106 int FrameIndex, 107 const TargetRegisterClass *RC, 108 const TargetRegisterInfo *TRI) const { 109 assert(!"Not Implemented"); 110 } 111 112 void 113 AMDGPUInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, 114 MachineBasicBlock::iterator MI, 115 unsigned DestReg, int FrameIndex, 116 const TargetRegisterClass *RC, 117 const TargetRegisterInfo *TRI) const { 118 assert(!"Not Implemented"); 119 } 120 121 MachineInstr * 122 AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, 123 MachineInstr *MI, 124 const SmallVectorImpl<unsigned> &Ops, 125 int FrameIndex) const { 126 // TODO: Implement this function 127 return 0; 128 } 129 MachineInstr* 130 AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, 131 MachineInstr *MI, 132 const SmallVectorImpl<unsigned> &Ops, 133 MachineInstr *LoadMI) const { 134 // TODO: Implement this function 135 return 0; 136 } 137 bool 138 AMDGPUInstrInfo::canFoldMemoryOperand(const MachineInstr *MI, 139 const SmallVectorImpl<unsigned> &Ops) const { 140 // TODO: Implement this function 141 return false; 142 } 143 bool 144 AMDGPUInstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI, 145 unsigned Reg, bool UnfoldLoad, 146 bool UnfoldStore, 147 SmallVectorImpl<MachineInstr*> &NewMIs) const { 148 // TODO: Implement this function 149 return false; 150 } 151 152 bool 153 AMDGPUInstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N, 154 SmallVectorImpl<SDNode*> &NewNodes) const { 155 // TODO: Implement this function 156 return false; 157 } 158 159 unsigned 160 AMDGPUInstrInfo::getOpcodeAfterMemoryUnfold(unsigned Opc, 161 bool UnfoldLoad, bool UnfoldStore, 162 unsigned *LoadRegIndex) const { 163 // TODO: Implement this function 164 return 0; 165 } 166 167 bool AMDGPUInstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2, 168 int64_t Offset1, int64_t Offset2, 169 unsigned NumLoads) const { 170 assert(Offset2 > Offset1 171 && "Second offset should be larger than first offset!"); 172 // If we have less than 16 loads in a row, and the offsets are within 16, 173 // then schedule together. 174 // TODO: Make the loads schedule near if it fits in a cacheline 175 return (NumLoads < 16 && (Offset2 - Offset1) < 16); 176 } 177 178 bool 179 AMDGPUInstrInfo::ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) 180 const { 181 // TODO: Implement this function 182 return true; 183 } 184 void AMDGPUInstrInfo::insertNoop(MachineBasicBlock &MBB, 185 MachineBasicBlock::iterator MI) const { 186 // TODO: Implement this function 187 } 188 189 bool AMDGPUInstrInfo::isPredicated(const MachineInstr *MI) const { 190 // TODO: Implement this function 191 return false; 192 } 193 bool 194 AMDGPUInstrInfo::SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1, 195 const SmallVectorImpl<MachineOperand> &Pred2) 196 const { 197 // TODO: Implement this function 198 return false; 199 } 200 201 bool AMDGPUInstrInfo::DefinesPredicate(MachineInstr *MI, 202 std::vector<MachineOperand> &Pred) const { 203 // TODO: Implement this function 204 return false; 205 } 206 207 bool AMDGPUInstrInfo::isPredicable(MachineInstr *MI) const { 208 // TODO: Implement this function 209 return MI->getDesc().isPredicable(); 210 } 211 212 bool 213 AMDGPUInstrInfo::isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const { 214 // TODO: Implement this function 215 return true; 216 } 217 218 bool AMDGPUInstrInfo::isRegisterStore(const MachineInstr &MI) const { 219 return get(MI.getOpcode()).TSFlags & AMDGPU_FLAG_REGISTER_STORE; 220 } 221 222 bool AMDGPUInstrInfo::isRegisterLoad(const MachineInstr &MI) const { 223 return get(MI.getOpcode()).TSFlags & AMDGPU_FLAG_REGISTER_LOAD; 224 } 225 226 227 void AMDGPUInstrInfo::convertToISA(MachineInstr & MI, MachineFunction &MF, 228 DebugLoc DL) const { 229 MachineRegisterInfo &MRI = MF.getRegInfo(); 230 const AMDGPURegisterInfo & RI = getRegisterInfo(); 231 232 for (unsigned i = 0; i < MI.getNumOperands(); i++) { 233 MachineOperand &MO = MI.getOperand(i); 234 // Convert dst regclass to one that is supported by the ISA 235 if (MO.isReg() && MO.isDef()) { 236 if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) { 237 const TargetRegisterClass * oldRegClass = MRI.getRegClass(MO.getReg()); 238 const TargetRegisterClass * newRegClass = RI.getISARegClass(oldRegClass); 239 240 assert(newRegClass); 241 242 MRI.setRegClass(MO.getReg(), newRegClass); 243 } 244 } 245 } 246 } 247