1 //===-- R600EmitClauseMarkers.cpp - Emit CF_ALU ---------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 /// \file 11 /// Add CF_ALU. R600 Alu instructions are grouped in clause which can hold 12 /// 128 Alu instructions ; these instructions can access up to 4 prefetched 13 /// 4 lines of 16 registers from constant buffers. Such ALU clauses are 14 /// initiated by CF_ALU instructions. 15 //===----------------------------------------------------------------------===// 16 17 #include "AMDGPU.h" 18 #include "R600Defines.h" 19 #include "R600InstrInfo.h" 20 #include "R600MachineFunctionInfo.h" 21 #include "R600RegisterInfo.h" 22 #include "llvm/CodeGen/MachineFunctionPass.h" 23 #include "llvm/CodeGen/MachineInstrBuilder.h" 24 #include "llvm/CodeGen/MachineRegisterInfo.h" 25 26 using namespace llvm; 27 28 namespace llvm { 29 void initializeR600EmitClauseMarkersPass(PassRegistry&); 30 } 31 32 namespace { 33 34 class R600EmitClauseMarkers : public MachineFunctionPass { 35 36 private: 37 const R600InstrInfo *TII; 38 int Address; 39 40 unsigned OccupiedDwords(MachineInstr *MI) const { 41 switch (MI->getOpcode()) { 42 case AMDGPU::INTERP_PAIR_XY: 43 case AMDGPU::INTERP_PAIR_ZW: 44 case AMDGPU::INTERP_VEC_LOAD: 45 case AMDGPU::DOT_4: 46 return 4; 47 case AMDGPU::KILL: 48 return 0; 49 default: 50 break; 51 } 52 53 // These will be expanded to two ALU instructions in the 54 // ExpandSpecialInstructions pass. 55 if (TII->isLDSRetInstr(MI->getOpcode())) 56 return 2; 57 58 if(TII->isVector(*MI) || 59 TII->isCubeOp(MI->getOpcode()) || 60 TII->isReductionOp(MI->getOpcode())) 61 return 4; 62 63 unsigned NumLiteral = 0; 64 for (MachineInstr::mop_iterator It = MI->operands_begin(), 65 E = MI->operands_end(); It != E; ++It) { 66 MachineOperand &MO = *It; 67 if (MO.isReg() && MO.getReg() == AMDGPU::ALU_LITERAL_X) 68 ++NumLiteral; 69 } 70 return 1 + NumLiteral; 71 } 72 73 bool isALU(const MachineInstr *MI) const { 74 if (TII->isALUInstr(MI->getOpcode())) 75 return true; 76 if (TII->isVector(*MI) || TII->isCubeOp(MI->getOpcode())) 77 return true; 78 switch (MI->getOpcode()) { 79 case AMDGPU::PRED_X: 80 case AMDGPU::INTERP_PAIR_XY: 81 case AMDGPU::INTERP_PAIR_ZW: 82 case AMDGPU::INTERP_VEC_LOAD: 83 case AMDGPU::COPY: 84 case AMDGPU::DOT_4: 85 return true; 86 default: 87 return false; 88 } 89 } 90 91 bool IsTrivialInst(MachineInstr *MI) const { 92 switch (MI->getOpcode()) { 93 case AMDGPU::KILL: 94 case AMDGPU::RETURN: 95 case AMDGPU::IMPLICIT_DEF: 96 return true; 97 default: 98 return false; 99 } 100 } 101 102 std::pair<unsigned, unsigned> getAccessedBankLine(unsigned Sel) const { 103 // Sel is (512 + (kc_bank << 12) + ConstIndex) << 2 104 // (See also R600ISelLowering.cpp) 105 // ConstIndex value is in [0, 4095]; 106 return std::pair<unsigned, unsigned>( 107 ((Sel >> 2) - 512) >> 12, // KC_BANK 108 // Line Number of ConstIndex 109 // A line contains 16 constant registers however KCX bank can lock 110 // two line at the same time ; thus we want to get an even line number. 111 // Line number can be retrieved with (>>4), using (>>5) <<1 generates 112 // an even number. 113 ((((Sel >> 2) - 512) & 4095) >> 5) << 1); 114 } 115 116 bool SubstituteKCacheBank(MachineInstr *MI, 117 std::vector<std::pair<unsigned, unsigned> > &CachedConsts, 118 bool UpdateInstr = true) const { 119 std::vector<std::pair<unsigned, unsigned> > UsedKCache; 120 121 if (!TII->isALUInstr(MI->getOpcode()) && MI->getOpcode() != AMDGPU::DOT_4) 122 return true; 123 124 const SmallVectorImpl<std::pair<MachineOperand *, int64_t> > &Consts = 125 TII->getSrcs(MI); 126 assert((TII->isALUInstr(MI->getOpcode()) || 127 MI->getOpcode() == AMDGPU::DOT_4) && "Can't assign Const"); 128 for (unsigned i = 0, n = Consts.size(); i < n; ++i) { 129 if (Consts[i].first->getReg() != AMDGPU::ALU_CONST) 130 continue; 131 unsigned Sel = Consts[i].second; 132 unsigned Chan = Sel & 3, Index = ((Sel >> 2) - 512) & 31; 133 unsigned KCacheIndex = Index * 4 + Chan; 134 const std::pair<unsigned, unsigned> &BankLine = getAccessedBankLine(Sel); 135 if (CachedConsts.empty()) { 136 CachedConsts.push_back(BankLine); 137 UsedKCache.push_back(std::pair<unsigned, unsigned>(0, KCacheIndex)); 138 continue; 139 } 140 if (CachedConsts[0] == BankLine) { 141 UsedKCache.push_back(std::pair<unsigned, unsigned>(0, KCacheIndex)); 142 continue; 143 } 144 if (CachedConsts.size() == 1) { 145 CachedConsts.push_back(BankLine); 146 UsedKCache.push_back(std::pair<unsigned, unsigned>(1, KCacheIndex)); 147 continue; 148 } 149 if (CachedConsts[1] == BankLine) { 150 UsedKCache.push_back(std::pair<unsigned, unsigned>(1, KCacheIndex)); 151 continue; 152 } 153 return false; 154 } 155 156 if (!UpdateInstr) 157 return true; 158 159 for (unsigned i = 0, j = 0, n = Consts.size(); i < n; ++i) { 160 if (Consts[i].first->getReg() != AMDGPU::ALU_CONST) 161 continue; 162 switch(UsedKCache[j].first) { 163 case 0: 164 Consts[i].first->setReg( 165 AMDGPU::R600_KC0RegClass.getRegister(UsedKCache[j].second)); 166 break; 167 case 1: 168 Consts[i].first->setReg( 169 AMDGPU::R600_KC1RegClass.getRegister(UsedKCache[j].second)); 170 break; 171 default: 172 llvm_unreachable("Wrong Cache Line"); 173 } 174 j++; 175 } 176 return true; 177 } 178 179 bool canClauseLocalKillFitInClause( 180 unsigned AluInstCount, 181 std::vector<std::pair<unsigned, unsigned> > KCacheBanks, 182 MachineBasicBlock::iterator Def, 183 MachineBasicBlock::iterator BBEnd) { 184 const R600RegisterInfo &TRI = TII->getRegisterInfo(); 185 for (MachineInstr::const_mop_iterator 186 MOI = Def->operands_begin(), 187 MOE = Def->operands_end(); MOI != MOE; ++MOI) { 188 if (!MOI->isReg() || !MOI->isDef() || 189 TRI.isPhysRegLiveAcrossClauses(MOI->getReg())) 190 continue; 191 192 // Def defines a clause local register, so check that its use will fit 193 // in the clause. 194 unsigned LastUseCount = 0; 195 for (MachineBasicBlock::iterator UseI = Def; UseI != BBEnd; ++UseI) { 196 AluInstCount += OccupiedDwords(UseI); 197 // Make sure we won't need to end the clause due to KCache limitations. 198 if (!SubstituteKCacheBank(UseI, KCacheBanks, false)) 199 return false; 200 201 // We have reached the maximum instruction limit before finding the 202 // use that kills this register, so we cannot use this def in the 203 // current clause. 204 if (AluInstCount >= TII->getMaxAlusPerClause()) 205 return false; 206 207 // Register kill flags have been cleared by the time we get to this 208 // pass, but it is safe to assume that all uses of this register 209 // occur in the same basic block as its definition, because 210 // it is illegal for the scheduler to schedule them in 211 // different blocks. 212 if (UseI->findRegisterUseOperandIdx(MOI->getReg())) 213 LastUseCount = AluInstCount; 214 215 if (UseI != Def && UseI->findRegisterDefOperandIdx(MOI->getReg()) != -1) 216 break; 217 } 218 if (LastUseCount) 219 return LastUseCount <= TII->getMaxAlusPerClause(); 220 llvm_unreachable("Clause local register live at end of clause."); 221 } 222 return true; 223 } 224 225 MachineBasicBlock::iterator 226 MakeALUClause(MachineBasicBlock &MBB, MachineBasicBlock::iterator I) { 227 MachineBasicBlock::iterator ClauseHead = I; 228 std::vector<std::pair<unsigned, unsigned> > KCacheBanks; 229 bool PushBeforeModifier = false; 230 unsigned AluInstCount = 0; 231 for (MachineBasicBlock::iterator E = MBB.end(); I != E; ++I) { 232 if (IsTrivialInst(I)) 233 continue; 234 if (!isALU(I)) 235 break; 236 if (AluInstCount > TII->getMaxAlusPerClause()) 237 break; 238 if (I->getOpcode() == AMDGPU::PRED_X) { 239 // We put PRED_X in its own clause to ensure that ifcvt won't create 240 // clauses with more than 128 insts. 241 // IfCvt is indeed checking that "then" and "else" branches of an if 242 // statement have less than ~60 insts thus converted clauses can't be 243 // bigger than ~121 insts (predicate setter needs to be in the same 244 // clause as predicated alus). 245 if (AluInstCount > 0) 246 break; 247 if (TII->getFlagOp(I).getImm() & MO_FLAG_PUSH) 248 PushBeforeModifier = true; 249 AluInstCount ++; 250 continue; 251 } 252 // XXX: GROUP_BARRIER instructions cannot be in the same ALU clause as: 253 // 254 // * KILL or INTERP instructions 255 // * Any instruction that sets UPDATE_EXEC_MASK or UPDATE_PRED bits 256 // * Uses waterfalling (i.e. INDEX_MODE = AR.X) 257 // 258 // XXX: These checks have not been implemented yet. 259 if (TII->mustBeLastInClause(I->getOpcode())) { 260 I++; 261 break; 262 } 263 264 // If this instruction defines a clause local register, make sure 265 // its use can fit in this clause. 266 if (!canClauseLocalKillFitInClause(AluInstCount, KCacheBanks, I, E)) 267 break; 268 269 if (!SubstituteKCacheBank(I, KCacheBanks)) 270 break; 271 AluInstCount += OccupiedDwords(I); 272 } 273 unsigned Opcode = PushBeforeModifier ? 274 AMDGPU::CF_ALU_PUSH_BEFORE : AMDGPU::CF_ALU; 275 BuildMI(MBB, ClauseHead, MBB.findDebugLoc(ClauseHead), TII->get(Opcode)) 276 // We don't use the ADDR field until R600ControlFlowFinalizer pass, where 277 // it is safe to assume it is 0. However if we always put 0 here, the ifcvt 278 // pass may assume that identical ALU clause starter at the beginning of a 279 // true and false branch can be factorized which is not the case. 280 .addImm(Address++) // ADDR 281 .addImm(KCacheBanks.empty()?0:KCacheBanks[0].first) // KB0 282 .addImm((KCacheBanks.size() < 2)?0:KCacheBanks[1].first) // KB1 283 .addImm(KCacheBanks.empty()?0:2) // KM0 284 .addImm((KCacheBanks.size() < 2)?0:2) // KM1 285 .addImm(KCacheBanks.empty()?0:KCacheBanks[0].second) // KLINE0 286 .addImm((KCacheBanks.size() < 2)?0:KCacheBanks[1].second) // KLINE1 287 .addImm(AluInstCount) // COUNT 288 .addImm(1); // Enabled 289 return I; 290 } 291 292 public: 293 static char ID; 294 R600EmitClauseMarkers() : MachineFunctionPass(ID), TII(nullptr), Address(0) { 295 296 initializeR600EmitClauseMarkersPass(*PassRegistry::getPassRegistry()); 297 } 298 299 bool runOnMachineFunction(MachineFunction &MF) override { 300 TII = static_cast<const R600InstrInfo *>(MF.getTarget().getInstrInfo()); 301 302 for (MachineFunction::iterator BB = MF.begin(), BB_E = MF.end(); 303 BB != BB_E; ++BB) { 304 MachineBasicBlock &MBB = *BB; 305 MachineBasicBlock::iterator I = MBB.begin(); 306 if (I->getOpcode() == AMDGPU::CF_ALU) 307 continue; // BB was already parsed 308 for (MachineBasicBlock::iterator E = MBB.end(); I != E;) { 309 if (isALU(I)) 310 I = MakeALUClause(MBB, I); 311 else 312 ++I; 313 } 314 } 315 return false; 316 } 317 318 const char *getPassName() const override { 319 return "R600 Emit Clause Markers Pass"; 320 } 321 }; 322 323 char R600EmitClauseMarkers::ID = 0; 324 325 } // end anonymous namespace 326 327 INITIALIZE_PASS_BEGIN(R600EmitClauseMarkers, "emitclausemarkers", 328 "R600 Emit Clause Markters", false, false) 329 INITIALIZE_PASS_END(R600EmitClauseMarkers, "emitclausemarkers", 330 "R600 Emit Clause Markters", false, false) 331 332 llvm::FunctionPass *llvm::createR600EmitClauseMarkers() { 333 return new R600EmitClauseMarkers(); 334 } 335 336