Home | History | Annotate | Download | only in R600

Lines Matching defs:AMDGPU

44   assert(DestReg != AMDGPU::SCC && SrcReg != AMDGPU::SCC);
47 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
48 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7,
49 AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11,
50 AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15, 0
54 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
55 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7, 0
59 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, 0
63 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, 0
67 AMDGPU::sub0, AMDGPU::sub1, 0
73 if (AMDGPU::M0 == DestReg) {
78 if (!I->definesRegister(AMDGPU::M0))
82 if (Opc != TargetOpcode::COPY && Opc != AMDGPU::S_MOV_B32)
93 if (AMDGPU::SReg_32RegClass.contains(DestReg)) {
94 assert(AMDGPU::SReg_32RegClass.contains(SrcReg));
95 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg)
99 } else if (AMDGPU::SReg_64RegClass.contains(DestReg)) {
100 assert(AMDGPU::SReg_64RegClass.contains(SrcReg));
101 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg)
105 } else if (AMDGPU::SReg_128RegClass.contains(DestReg)) {
106 assert(AMDGPU::SReg_128RegClass.contains(SrcReg));
107 Opcode = AMDGPU::S_MOV_B32;
110 } else if (AMDGPU::SReg_256RegClass.contains(DestReg)) {
111 assert(AMDGPU::SReg_256RegClass.contains(SrcReg));
112 Opcode = AMDGPU::S_MOV_B32;
115 } else if (AMDGPU::SReg_512RegClass.contains(DestReg)) {
116 assert(AMDGPU::SReg_512RegClass.contains(SrcReg));
117 Opcode = AMDGPU::S_MOV_B32;
120 } else if (AMDGPU::VReg_32RegClass.contains(DestReg)) {
121 assert(AMDGPU::VReg_32RegClass.contains(SrcReg) ||
122 AMDGPU::SReg_32RegClass.contains(SrcReg));
123 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg)
127 } else if (AMDGPU::VReg_64RegClass.contains(DestReg)) {
128 assert(AMDGPU::VReg_64RegClass.contains(SrcReg) ||
129 AMDGPU::SReg_64RegClass.contains(SrcReg));
130 Opcode = AMDGPU::V_MOV_B32_e32;
133 } else if (AMDGPU::VReg_96RegClass.contains(DestReg)) {
134 assert(AMDGPU::VReg_96RegClass.contains(SrcReg));
135 Opcode = AMDGPU::V_MOV_B32_e32;
138 } else if (AMDGPU::VReg_128RegClass.contains(DestReg)) {
139 assert(AMDGPU::VReg_128RegClass.contains(SrcReg) ||
140 AMDGPU::SReg_128RegClass.contains(SrcReg));
141 Opcode = AMDGPU::V_MOV_B32_e32;
144 } else if (AMDGPU::VReg_256RegClass.contains(DestReg)) {
145 assert(AMDGPU::VReg_256RegClass.contains(SrcReg) ||
146 AMDGPU::SReg_256RegClass.contains(SrcReg));
147 Opcode = AMDGPU::V_MOV_B32_e32;
150 } else if (AMDGPU::VReg_512RegClass.contains(DestReg)) {
151 assert(AMDGPU::VReg_512RegClass.contains(SrcReg) ||
152 AMDGPU::SReg_512RegClass.contains(SrcReg));
153 Opcode = AMDGPU::V_MOV_B32_e32;
175 if ((NewOpc = AMDGPU::getCommuteRev(Opcode)) != -1)
179 if ((NewOpc = AMDGPU::getCommuteOrig(Opcode)) != -1)
200 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), AMDGPU::VGPR0)
202 } else if (TRI->getCommonSubClass(RC, &AMDGPU::SGPR_32RegClass)) {
206 BuildMI(MBB, MI, DL, get(AMDGPU::V_WRITELANE_B32), TgtReg)
224 case 64: Opcode = AMDGPU::SI_SPILL_S64_SAVE; break;
225 case 128: Opcode = AMDGPU::SI_SPILL_S128_SAVE; break;
226 case 256: Opcode = AMDGPU::SI_SPILL_S256_SAVE; break;
227 case 512: Opcode = AMDGPU::SI_SPILL_S512_SAVE; break;
251 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg)
256 case 32: Opcode = AMDGPU::SI_SPILL_S32_RESTORE; break;
257 case 64: Opcode = AMDGPU::SI_SPILL_S64_RESTORE; break;
258 case 128: Opcode = AMDGPU::SI_SPILL_S128_RESTORE; break;
259 case 256: Opcode = AMDGPU::SI_SPILL_S256_RESTORE; break;
260 case 512: Opcode = AMDGPU::SI_SPILL_S512_RESTORE; break;
278 case AMDGPU::SI_SPILL_S512_SAVE:
279 case AMDGPU::SI_SPILL_S512_RESTORE:
281 case AMDGPU::SI_SPILL_S256_SAVE:
282 case AMDGPU::SI_SPILL_S256_RESTORE:
284 case AMDGPU::SI_SPILL_S128_SAVE:
285 case AMDGPU::SI_SPILL_S128_RESTORE:
287 case AMDGPU::SI_SPILL_S64_SAVE:
288 case AMDGPU::SI_SPILL_S64_RESTORE:
290 case AMDGPU::SI_SPILL_S32_RESTORE:
305 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), get(AMDGPU::S_NOP))
319 case AMDGPU::SI_SPILL_S512_SAVE:
320 case AMDGPU::SI_SPILL_S256_SAVE:
321 case AMDGPU::SI_SPILL_S128_SAVE:
322 case AMDGPU::SI_SPILL_S64_SAVE: {
329 &AMDGPU::SGPR_32RegClass, i);
332 BuildMI(MBB, MI, DL, get(AMDGPU::V_WRITELANE_B32),
342 case AMDGPU::SI_SPILL_S512_RESTORE:
343 case AMDGPU::SI_SPILL_S256_RESTORE:
344 case AMDGPU::SI_SPILL_S128_RESTORE:
345 case AMDGPU::SI_SPILL_S64_RESTORE:
346 case AMDGPU::SI_SPILL_S32_RESTORE: {
353 &AMDGPU::SGPR_32RegClass, i);
356 BuildMI(MBB, MI, DL, get(AMDGPU::V_READLANE_B32), SubReg)
389 (MI->getOperand(AMDGPU::getNamedOperandIdx(MI->getOpcode(),
390 AMDGPU::OpName::abs)).getImm() ||
391 MI->getOperand(AMDGPU::getNamedOperandIdx(MI->getOpcode(),
392 AMDGPU::OpName::neg)).getImm()))
414 return BuildMI(*MBB, I, MBB->findDebugLoc(I), get(AMDGPU::V_MOV_B32_e32),
421 case AMDGPU::S_MOV_B32:
422 case AMDGPU::S_MOV_B64:
423 case AMDGPU::V_MOV_B32_e32:
424 case AMDGPU::V_MOV_B32_e64:
431 return RC != &AMDGPU::EXECRegRegClass;
439 case AMDGPU::S_MOV_B32:
440 case AMDGPU::S_MOV_B64:
441 case AMDGPU::V_MOV_B32_e32:
447 namespace AMDGPU {
455 return ::AMDGPU::isDS(Opcode) != -1;
547 int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
548 int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
549 int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
602 unsigned SGPRUsed = AMDGPU::NoRegister;
609 if (!MO.isImplicit() && MO.getReg() == AMDGPU::EXEC)
613 if (MO.getReg() == AMDGPU::M0 || MO.getReg() == AMDGPU::VCC ||
615 (AMDGPU::SGPR_32RegClass.contains(MO.getReg()) ||
616 AMDGPU::SGPR_64RegClass.contains(MO.getReg())))) {
659 if (Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F32 ||
660 Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F64) {
680 default: return AMDGPU::INSTRUCTION_LIST_END;
681 case AMDGPU::REG_SEQUENCE: return AMDGPU::REG_SEQUENCE;
682 case AMDGPU::COPY: return AMDGPU::COPY;
683 case AMDGPU::PHI: return AMDGPU::PHI;
684 case AMDGPU::INSERT_SUBREG: return AMDGPU::INSERT_SUBREG;
685 case AMDGPU::S_MOV_B32:
687 AMDGPU::COPY : AMDGPU::V_MOV_B32_e32;
688 case AMDGPU::S_ADD_I32: return AMDGPU::V_ADD_I32_e32;
689 case AMDGPU::S_ADDC_U32: return AMDGPU::V_ADDC_U32_e32;
690 case AMDGPU::S_SUB_I32: return AMDGPU::V_SUB_I32_e32;
691 case AMDGPU::S_SUBB_U32: return AMDGPU::V_SUBB_U32_e32;
692 case AMDGPU::S_AND_B32: return AMDGPU::V_AND_B32_e32;
693 case AMDGPU::S_OR_B32: return AMDGPU::V_OR_B32_e32;
694 case AMDGPU::S_XOR_B32: return AMDGPU::V_XOR_B32_e32;
695 case AMDGPU::S_MIN_I32: return AMDGPU::V_MIN_I32_e32;
696 case AMDGPU::S_MIN_U32: return AMDGPU::V_MIN_U32_e32;
697 case AMDGPU::S_MAX_I32: return AMDGPU::V_MAX_I32_e32;
698 case AMDGPU::S_MAX_U32: return AMDGPU::V_MAX_U32_e32;
699 case AMDGPU::S_ASHR_I32: return AMDGPU::V_ASHR_I32_e32;
700 case AMDGPU::S_ASHR_I64: return AMDGPU::V_ASHR_I64;
701 case AMDGPU::S_LSHL_B32: return AMDGPU::V_LSHL_B32_e32;
702 case AMDGPU::S_LSHL_B64: return AMDGPU::V_LSHL_B64;
703 case AMDGPU::S_LSHR_B32: return AMDGPU::V_LSHR_B32_e32;
704 case AMDGPU::S_LSHR_B64: return AMDGPU::V_LSHR_B64;
705 case AMDGPU::S_SEXT_I32_I8: return AMDGPU::V_BFE_I32;
706 case AMDGPU::S_SEXT_I32_I16: return AMDGPU::V_BFE_I32;
707 case AMDGPU::S_BFE_U32: return AMDGPU::V_BFE_U32;
708 case AMDGPU::S_BFE_I32: return AMDGPU::V_BFE_I32;
709 case AMDGPU::S_BREV_B32: return AMDGPU::V_BFREV_B32_e32;
710 case AMDGPU::S_NOT_B32: return AMDGPU::V_NOT_B32_e32;
711 case AMDGPU::S_NOT_B64: return AMDGPU::V_NOT_B32_e32;
712 case AMDGPU::S_CMP_EQ_I32: return AMDGPU::V_CMP_EQ_I32_e32;
713 case AMDGPU::S_CMP_LG_I32: return AMDGPU::V_CMP_NE_I32_e32;
714 case AMDGPU::S_CMP_GT_I32: return AMDGPU::V_CMP_GT_I32_e32;
715 case AMDGPU::S_CMP_GE_I32: return AMDGPU::V_CMP_GE_I32_e32;
716 case AMDGPU::S_CMP_LT_I32: return AMDGPU::V_CMP_LT_I32_e32;
717 case AMDGPU::S_CMP_LE_I32: return AMDGPU::V_CMP_LE_I32_e32;
718 case AMDGPU::S_LOAD_DWORD_IMM:
719 case AMDGPU::S_LOAD_DWORD_SGPR: return AMDGPU::BUFFER_LOAD_DWORD_ADDR64;
720 case AMDGPU::S_LOAD_DWORDX2_IMM:
721 case AMDGPU::S_LOAD_DWORDX2_SGPR: return AMDGPU::BUFFER_LOAD_DWORDX2_ADDR64;
722 case AMDGPU::S_LOAD_DWORDX4_IMM:
723 case AMDGPU::S_LOAD_DWORDX4_SGPR: return AMDGPU::BUFFER_LOAD_DWORDX4_ADDR64;
724 case AMDGPU::S_BCNT1_I32_B32: return AMDGPU::V_BCNT_U32_B32_e32;
725 case AMDGPU::S_FF1_I32_B32: return AMDGPU::V_FFBL_B32_e32;
726 case AMDGPU::S_FLBIT_I32_B32: return AMDGPU::V_FFBH_U32_e32;
731 return getVALUOp(MI) != AMDGPU::INSTRUCTION_LIST_END;
748 case AMDGPU::COPY:
749 case AMDGPU::REG_SEQUENCE:
750 case AMDGPU::PHI:
751 case AMDGPU::INSERT_SUBREG:
764 unsigned Opcode = AMDGPU::V_MOV_B32_e32;
766 Opcode = AMDGPU::COPY;
768 Opcode = AMDGPU::S_MOV_B32;
813 if (SubIdx == AMDGPU::sub0)
815 if (SubIdx == AMDGPU::sub1)
833 unsigned LoDst = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
834 unsigned HiDst = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
837 MachineInstr *Lo = BuildMI(*MBB, MI, DL, get(AMDGPU::S_MOV_B32),
840 MachineInstr *Hi = BuildMI(*MBB, MI, DL, get(AMDGPU::S_MOV_B32),
846 .addImm(AMDGPU::sub0)
848 .addImm(AMDGPU::sub1);
858 int Src0Idx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
859 AMDGPU::OpName::src0);
860 int Src1Idx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
861 AMDGPU::OpName::src1);
862 int Src2Idx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
863 AMDGPU::OpName::src2);
872 bool ReadsVCC = MI->readsRegister(AMDGPU::VCC, &RI);
901 unsigned SGPRReg = AMDGPU::NoRegister;
912 assert(MO.getReg() != AMDGPU::SCC && "SCC operand to VOP3 instruction");
914 if (SGPRReg == AMDGPU::NoRegister || SGPRReg == MO.getReg()) {
933 if (MI->getOpcode() == AMDGPU::REG_SEQUENCE ||
934 MI->getOpcode() == AMDGPU::PHI) {
970 if (MI->getOpcode() == AMDGPU::REG_SEQUENCE) {
979 get(AMDGPU::COPY), DstReg)
987 if (MI->getOpcode() == AMDGPU::INSERT_SUBREG) {
995 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::COPY), NewSrc0)
1006 int SRsrcIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
1007 AMDGPU::OpName::srsrc);
1008 int VAddrIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
1009 AMDGPU::OpName::vaddr);
1027 unsigned NewVAddrLo = MRI.createVirtualRegister(&AMDGPU::VReg_32RegClass);
1028 unsigned NewVAddrHi = MRI.createVirtualRegister(&AMDGPU::VReg_32RegClass);
1029 unsigned NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
1030 unsigned Zero64 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
1031 unsigned SRsrcFormatLo = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
1032 unsigned SRsrcFormatHi = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
1033 unsigned NewSRsrc = MRI.createVirtualRegister(&AMDGPU::SReg_128RegClass);
1037 &AMDGPU::VReg_128RegClass, AMDGPU::sub0, &AMDGPU::VReg_32RegClass);
1041 &AMDGPU::VReg_128RegClass, AMDGPU::sub1, &AMDGPU::VReg_32RegClass);
1045 &AMDGPU::VReg_64RegClass, AMDGPU::sub0, &AMDGPU::VReg_32RegClass);
1049 &AMDGPU::VReg_64RegClass, AMDGPU::sub1, &AMDGPU::VReg_32RegClass);
1052 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::V_ADD_I32_e32),
1056 .addReg(AMDGPU::VCC, RegState::Define | RegState::Implicit);
1059 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::V_ADDC_U32_e32),
1063 .addReg(AMDGPU::VCC, RegState::ImplicitDefine)
1064 .addReg(AMDGPU::VCC, RegState::Implicit);
1067 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::REG_SEQUENCE),
1070 .addImm(AMDGPU::sub0)
1072 .addImm(AMDGPU::sub1);
1075 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B64),
1080 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32),
1082 .addImm(AMDGPU::RSRC_DATA_FORMAT & 0xFFFFFFFF);
1085 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32),
1087 .addImm(AMDGPU::RSRC_DATA_FORMAT >> 32);
1090 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::REG_SEQUENCE),
1093 .addImm(AMDGPU::sub0_sub1)
1095 .addImm(AMDGPU::sub2)
1097 .addImm(AMDGPU::sub3);
1110 case AMDGPU::S_LOAD_DWORD_IMM:
1111 case AMDGPU::S_LOAD_DWORD_SGPR:
1112 case AMDGPU::S_LOAD_DWORDX2_IMM:
1113 case AMDGPU::S_LOAD_DWORDX2_SGPR:
1114 case AMDGPU::S_LOAD_DWORDX4_IMM:
1115 case AMDGPU::S_LOAD_DWORDX4_SGPR:
1128 RegOffset = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
1130 BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32),
1134 BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32),
1141 unsigned SRsrc = MRI.createVirtualRegister(&AMDGPU::SReg_128RegClass);
1143 unsigned DWord1 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
1144 unsigned DWord2 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
1145 unsigned DWord3 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
1147 BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32), DWord1)
1149 BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32), DWord2)
1150 .addImm(AMDGPU::RSRC_DATA_FORMAT & 0xFFFFFFFF);
1151 BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32), DWord3)
1152 .addImm(AMDGPU::RSRC_DATA_FORMAT >> 32);
1153 BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::REG_SEQUENCE), SRsrc)
1155 .addImm(AMDGPU::sub0)
1157 .addImm(AMDGPU::sub1)
1159 .addImm(AMDGPU::sub2)
1161 .addImm(AMDGPU::sub3);
1192 case AMDGPU::S_MOV_B64: {
1216 case AMDGPU::S_AND_B64:
1217 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_AND_B32);
1221 case AMDGPU::S_OR_B64:
1222 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_OR_B32);
1226 case AMDGPU::S_XOR_B64:
1227 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_XOR_B32);
1231 case AMDGPU::S_NOT_B64:
1232 splitScalar64BitUnaryOp(Worklist, Inst, AMDGPU::S_NOT_B32);
1236 case AMDGPU::S_BCNT1_I32_B64:
1241 case AMDGPU::S_BFE_U64:
1242 case AMDGPU::S_BFE_I64:
1243 case AMDGPU::S_BFM_B64:
1247 if (NewOpcode == AMDGPU::INSTRUCTION_LIST_END) {
1263 if (Op.isReg() && Op.getReg() == AMDGPU::SCC)
1267 if (Opcode == AMDGPU::S_SEXT_I32_I8 || Opcode == AMDGPU::S_SEXT_I32_I16) {
1270 unsigned Size = (Opcode == AMDGPU::S_SEXT_I32_I8) ? 8 : 16;
1282 } else if (Opcode == AMDGPU::S_BCNT1_I32_B32) {
1290 if (Opcode == AMDGPU::S_BFE_I32 || Opcode == AMDGPU::S_BFE_U32) {
1321 case AMDGPU::COPY:
1322 case AMDGPU::PHI:
1323 case AMDGPU::REG_SEQUENCE:
1324 case AMDGPU::INSERT_SUBREG:
1363 return &AMDGPU::VReg_32RegClass;
1382 &AMDGPU::SGPR_32RegClass;
1384 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0);
1387 AMDGPU::sub0, Src0SubRC);
1390 const TargetRegisterClass *DestSubRC = RI.getSubRegClass(DestRC, AMDGPU::sub0);
1397 AMDGPU::sub1, Src0SubRC);
1406 .addImm(AMDGPU::sub0)
1408 .addImm(AMDGPU::sub1);
1435 &AMDGPU::SGPR_32RegClass;
1437 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0);
1440 &AMDGPU::SGPR_32RegClass;
1442 const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0);
1445 AMDGPU::sub0, Src0SubRC);
1447 AMDGPU::sub0, Src1SubRC);
1450 const TargetRegisterClass *DestSubRC = RI.getSubRegClass(DestRC, AMDGPU::sub0);
1458 AMDGPU::sub1, Src0SubRC);
1460 AMDGPU::sub1, Src1SubRC);
1470 .addImm(AMDGPU::sub0)
1472 .addImm(AMDGPU::sub1);
1493 const MCInstrDesc &InstDesc = get(AMDGPU::V_BCNT_U32_B32_e32);
1496 &AMDGPU::SGPR_32RegClass;
1498 unsigned MidReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1499 unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1501 const TargetRegisterClass *SrcSubRC = RI.getSubRegClass(SrcRC, AMDGPU::sub0);
1504 AMDGPU::sub0, SrcSubRC);
1506 AMDGPU::sub1, SrcSubRC);
1546 unsigned IndirectBaseReg = AMDGPU::VReg_32RegClass.getRegister(
1549 return BuildMI(*MBB, I, DL, get(AMDGPU::SI_INDIRECT_DST_V1))
1564 unsigned IndirectBaseReg = AMDGPU::VReg_32RegClass.getRegister(
1567 return BuildMI(*MBB, I, DL, get(AMDGPU::SI_INDIRECT_SRC))
1586 Reserved.set(AMDGPU::VReg_32RegClass.getRegister(Index));
1589 Reserved.set(AMDGPU::VReg_64RegClass.getRegister(Index));
1592 Reserved.set(AMDGPU::VReg_96RegClass.getRegister(Index));
1595 Reserved.set(AMDGPU::VReg_128RegClass.getRegister(Index));
1598 Reserved.set(AMDGPU::VReg_256RegClass.getRegister(Index));
1601 Reserved.set(AMDGPU::VReg_512RegClass.getRegister(Index));