1 //===-- SIInstructions.td - SI Instruction Defintions ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // This file was originally auto-generated from a GPU register header file and 10 // all the instruction definitions were originally commented out. Instructions 11 // that are not yet supported remain commented out. 12 //===----------------------------------------------------------------------===// 13 14 class InterpSlots { 15 int P0 = 2; 16 int P10 = 0; 17 int P20 = 1; 18 } 19 def INTERP : InterpSlots; 20 21 def InterpSlot : Operand<i32> { 22 let PrintMethod = "printInterpSlot"; 23 } 24 25 def SendMsgImm : Operand<i32> { 26 let PrintMethod = "printSendMsg"; 27 } 28 29 def isSI : Predicate<"Subtarget.getGeneration() " 30 ">= AMDGPUSubtarget::SOUTHERN_ISLANDS">; 31 32 def isCI : Predicate<"Subtarget.getGeneration() " 33 ">= AMDGPUSubtarget::SEA_ISLANDS">; 34 35 def isCFDepth0 : Predicate<"isCFDepth0()">; 36 37 def WAIT_FLAG : InstFlag<"printWaitFlag">; 38 39 let SubtargetPredicate = isSI in { 40 let OtherPredicates = [isCFDepth0] in { 41 42 //===----------------------------------------------------------------------===// 43 // SMRD Instructions 44 //===----------------------------------------------------------------------===// 45 46 let mayLoad = 1 in { 47 48 // We are using the SGPR_32 and not the SReg_32 register class for 32-bit 49 // SMRD instructions, because the SGPR_32 register class does not include M0 50 // and writing to M0 from an SMRD instruction will hang the GPU. 51 defm S_LOAD_DWORD : SMRD_Helper <0x00, "S_LOAD_DWORD", SReg_64, SGPR_32>; 52 defm S_LOAD_DWORDX2 : SMRD_Helper <0x01, "S_LOAD_DWORDX2", SReg_64, SReg_64>; 53 defm S_LOAD_DWORDX4 : SMRD_Helper <0x02, "S_LOAD_DWORDX4", SReg_64, SReg_128>; 54 defm S_LOAD_DWORDX8 : SMRD_Helper <0x03, "S_LOAD_DWORDX8", SReg_64, SReg_256>; 55 defm S_LOAD_DWORDX16 : SMRD_Helper <0x04, "S_LOAD_DWORDX16", SReg_64, SReg_512>; 56 57 defm S_BUFFER_LOAD_DWORD : SMRD_Helper < 58 0x08, "S_BUFFER_LOAD_DWORD", SReg_128, SGPR_32 59 >; 60 61 defm S_BUFFER_LOAD_DWORDX2 : SMRD_Helper < 62 0x09, "S_BUFFER_LOAD_DWORDX2", SReg_128, SReg_64 63 >; 64 65 defm S_BUFFER_LOAD_DWORDX4 : SMRD_Helper < 66 0x0a, "S_BUFFER_LOAD_DWORDX4", SReg_128, SReg_128 67 >; 68 69 defm S_BUFFER_LOAD_DWORDX8 : SMRD_Helper < 70 0x0b, "S_BUFFER_LOAD_DWORDX8", SReg_128, SReg_256 71 >; 72 73 defm S_BUFFER_LOAD_DWORDX16 : SMRD_Helper < 74 0x0c, "S_BUFFER_LOAD_DWORDX16", SReg_128, SReg_512 75 >; 76 77 } // mayLoad = 1 78 79 //def S_MEMTIME : SMRD_ <0x0000001e, "S_MEMTIME", []>; 80 //def S_DCACHE_INV : SMRD_ <0x0000001f, "S_DCACHE_INV", []>; 81 82 //===----------------------------------------------------------------------===// 83 // SOP1 Instructions 84 //===----------------------------------------------------------------------===// 85 86 let neverHasSideEffects = 1 in { 87 88 let isMoveImm = 1 in { 89 def S_MOV_B32 : SOP1_32 <0x00000003, "S_MOV_B32", []>; 90 def S_MOV_B64 : SOP1_64 <0x00000004, "S_MOV_B64", []>; 91 def S_CMOV_B32 : SOP1_32 <0x00000005, "S_CMOV_B32", []>; 92 def S_CMOV_B64 : SOP1_64 <0x00000006, "S_CMOV_B64", []>; 93 } // End isMoveImm = 1 94 95 def S_NOT_B32 : SOP1_32 <0x00000007, "S_NOT_B32", 96 [(set i32:$dst, (not i32:$src0))] 97 >; 98 99 def S_NOT_B64 : SOP1_64 <0x00000008, "S_NOT_B64", 100 [(set i64:$dst, (not i64:$src0))] 101 >; 102 def S_WQM_B32 : SOP1_32 <0x00000009, "S_WQM_B32", []>; 103 def S_WQM_B64 : SOP1_64 <0x0000000a, "S_WQM_B64", []>; 104 def S_BREV_B32 : SOP1_32 <0x0000000b, "S_BREV_B32", 105 [(set i32:$dst, (AMDGPUbrev i32:$src0))] 106 >; 107 def S_BREV_B64 : SOP1_64 <0x0000000c, "S_BREV_B64", []>; 108 } // End neverHasSideEffects = 1 109 110 ////def S_BCNT0_I32_B32 : SOP1_BCNT0 <0x0000000d, "S_BCNT0_I32_B32", []>; 111 ////def S_BCNT0_I32_B64 : SOP1_BCNT0 <0x0000000e, "S_BCNT0_I32_B64", []>; 112 def S_BCNT1_I32_B32 : SOP1_32 <0x0000000f, "S_BCNT1_I32_B32", 113 [(set i32:$dst, (ctpop i32:$src0))] 114 >; 115 def S_BCNT1_I32_B64 : SOP1_32_64 <0x00000010, "S_BCNT1_I32_B64", []>; 116 117 ////def S_FF0_I32_B32 : SOP1_32 <0x00000011, "S_FF0_I32_B32", []>; 118 ////def S_FF0_I32_B64 : SOP1_FF0 <0x00000012, "S_FF0_I32_B64", []>; 119 def S_FF1_I32_B32 : SOP1_32 <0x00000013, "S_FF1_I32_B32", 120 [(set i32:$dst, (cttz_zero_undef i32:$src0))] 121 >; 122 ////def S_FF1_I32_B64 : SOP1_FF1 <0x00000014, "S_FF1_I32_B64", []>; 123 124 def S_FLBIT_I32_B32 : SOP1_32 <0x00000015, "S_FLBIT_I32_B32", 125 [(set i32:$dst, (ctlz_zero_undef i32:$src0))] 126 >; 127 128 //def S_FLBIT_I32_B64 : SOP1_32 <0x00000016, "S_FLBIT_I32_B64", []>; 129 def S_FLBIT_I32 : SOP1_32 <0x00000017, "S_FLBIT_I32", []>; 130 //def S_FLBIT_I32_I64 : SOP1_32 <0x00000018, "S_FLBIT_I32_I64", []>; 131 def S_SEXT_I32_I8 : SOP1_32 <0x00000019, "S_SEXT_I32_I8", 132 [(set i32:$dst, (sext_inreg i32:$src0, i8))] 133 >; 134 def S_SEXT_I32_I16 : SOP1_32 <0x0000001a, "S_SEXT_I32_I16", 135 [(set i32:$dst, (sext_inreg i32:$src0, i16))] 136 >; 137 138 ////def S_BITSET0_B32 : SOP1_BITSET0 <0x0000001b, "S_BITSET0_B32", []>; 139 ////def S_BITSET0_B64 : SOP1_BITSET0 <0x0000001c, "S_BITSET0_B64", []>; 140 ////def S_BITSET1_B32 : SOP1_BITSET1 <0x0000001d, "S_BITSET1_B32", []>; 141 ////def S_BITSET1_B64 : SOP1_BITSET1 <0x0000001e, "S_BITSET1_B64", []>; 142 def S_GETPC_B64 : SOP1_64 <0x0000001f, "S_GETPC_B64", []>; 143 def S_SETPC_B64 : SOP1_64 <0x00000020, "S_SETPC_B64", []>; 144 def S_SWAPPC_B64 : SOP1_64 <0x00000021, "S_SWAPPC_B64", []>; 145 def S_RFE_B64 : SOP1_64 <0x00000022, "S_RFE_B64", []>; 146 147 let hasSideEffects = 1, Uses = [EXEC], Defs = [EXEC] in { 148 149 def S_AND_SAVEEXEC_B64 : SOP1_64 <0x00000024, "S_AND_SAVEEXEC_B64", []>; 150 def S_OR_SAVEEXEC_B64 : SOP1_64 <0x00000025, "S_OR_SAVEEXEC_B64", []>; 151 def S_XOR_SAVEEXEC_B64 : SOP1_64 <0x00000026, "S_XOR_SAVEEXEC_B64", []>; 152 def S_ANDN2_SAVEEXEC_B64 : SOP1_64 <0x00000027, "S_ANDN2_SAVEEXEC_B64", []>; 153 def S_ORN2_SAVEEXEC_B64 : SOP1_64 <0x00000028, "S_ORN2_SAVEEXEC_B64", []>; 154 def S_NAND_SAVEEXEC_B64 : SOP1_64 <0x00000029, "S_NAND_SAVEEXEC_B64", []>; 155 def S_NOR_SAVEEXEC_B64 : SOP1_64 <0x0000002a, "S_NOR_SAVEEXEC_B64", []>; 156 def S_XNOR_SAVEEXEC_B64 : SOP1_64 <0x0000002b, "S_XNOR_SAVEEXEC_B64", []>; 157 158 } // End hasSideEffects = 1 159 160 def S_QUADMASK_B32 : SOP1_32 <0x0000002c, "S_QUADMASK_B32", []>; 161 def S_QUADMASK_B64 : SOP1_64 <0x0000002d, "S_QUADMASK_B64", []>; 162 def S_MOVRELS_B32 : SOP1_32 <0x0000002e, "S_MOVRELS_B32", []>; 163 def S_MOVRELS_B64 : SOP1_64 <0x0000002f, "S_MOVRELS_B64", []>; 164 def S_MOVRELD_B32 : SOP1_32 <0x00000030, "S_MOVRELD_B32", []>; 165 def S_MOVRELD_B64 : SOP1_64 <0x00000031, "S_MOVRELD_B64", []>; 166 //def S_CBRANCH_JOIN : SOP1_ <0x00000032, "S_CBRANCH_JOIN", []>; 167 def S_MOV_REGRD_B32 : SOP1_32 <0x00000033, "S_MOV_REGRD_B32", []>; 168 def S_ABS_I32 : SOP1_32 <0x00000034, "S_ABS_I32", []>; 169 def S_MOV_FED_B32 : SOP1_32 <0x00000035, "S_MOV_FED_B32", []>; 170 171 //===----------------------------------------------------------------------===// 172 // SOP2 Instructions 173 //===----------------------------------------------------------------------===// 174 175 let Defs = [SCC] in { // Carry out goes to SCC 176 let isCommutable = 1 in { 177 def S_ADD_U32 : SOP2_32 <0x00000000, "S_ADD_U32", []>; 178 def S_ADD_I32 : SOP2_32 <0x00000002, "S_ADD_I32", 179 [(set i32:$dst, (add SSrc_32:$src0, SSrc_32:$src1))] 180 >; 181 } // End isCommutable = 1 182 183 def S_SUB_U32 : SOP2_32 <0x00000001, "S_SUB_U32", []>; 184 def S_SUB_I32 : SOP2_32 <0x00000003, "S_SUB_I32", 185 [(set i32:$dst, (sub SSrc_32:$src0, SSrc_32:$src1))] 186 >; 187 188 let Uses = [SCC] in { // Carry in comes from SCC 189 let isCommutable = 1 in { 190 def S_ADDC_U32 : SOP2_32 <0x00000004, "S_ADDC_U32", 191 [(set i32:$dst, (adde (i32 SSrc_32:$src0), (i32 SSrc_32:$src1)))]>; 192 } // End isCommutable = 1 193 194 def S_SUBB_U32 : SOP2_32 <0x00000005, "S_SUBB_U32", 195 [(set i32:$dst, (sube (i32 SSrc_32:$src0), (i32 SSrc_32:$src1)))]>; 196 } // End Uses = [SCC] 197 } // End Defs = [SCC] 198 199 def S_MIN_I32 : SOP2_32 <0x00000006, "S_MIN_I32", 200 [(set i32:$dst, (AMDGPUsmin i32:$src0, i32:$src1))] 201 >; 202 def S_MIN_U32 : SOP2_32 <0x00000007, "S_MIN_U32", 203 [(set i32:$dst, (AMDGPUumin i32:$src0, i32:$src1))] 204 >; 205 def S_MAX_I32 : SOP2_32 <0x00000008, "S_MAX_I32", 206 [(set i32:$dst, (AMDGPUsmax i32:$src0, i32:$src1))] 207 >; 208 def S_MAX_U32 : SOP2_32 <0x00000009, "S_MAX_U32", 209 [(set i32:$dst, (AMDGPUumax i32:$src0, i32:$src1))] 210 >; 211 212 def S_CSELECT_B32 : SOP2 < 213 0x0000000a, (outs SReg_32:$dst), 214 (ins SReg_32:$src0, SReg_32:$src1, SCCReg:$scc), "S_CSELECT_B32", 215 [] 216 >; 217 218 def S_CSELECT_B64 : SOP2_64 <0x0000000b, "S_CSELECT_B64", []>; 219 220 def S_AND_B32 : SOP2_32 <0x0000000e, "S_AND_B32", 221 [(set i32:$dst, (and i32:$src0, i32:$src1))] 222 >; 223 224 def S_AND_B64 : SOP2_64 <0x0000000f, "S_AND_B64", 225 [(set i64:$dst, (and i64:$src0, i64:$src1))] 226 >; 227 228 def S_OR_B32 : SOP2_32 <0x00000010, "S_OR_B32", 229 [(set i32:$dst, (or i32:$src0, i32:$src1))] 230 >; 231 232 def S_OR_B64 : SOP2_64 <0x00000011, "S_OR_B64", 233 [(set i64:$dst, (or i64:$src0, i64:$src1))] 234 >; 235 236 def S_XOR_B32 : SOP2_32 <0x00000012, "S_XOR_B32", 237 [(set i32:$dst, (xor i32:$src0, i32:$src1))] 238 >; 239 240 def S_XOR_B64 : SOP2_64 <0x00000013, "S_XOR_B64", 241 [(set i64:$dst, (xor i64:$src0, i64:$src1))] 242 >; 243 def S_ANDN2_B32 : SOP2_32 <0x00000014, "S_ANDN2_B32", []>; 244 def S_ANDN2_B64 : SOP2_64 <0x00000015, "S_ANDN2_B64", []>; 245 def S_ORN2_B32 : SOP2_32 <0x00000016, "S_ORN2_B32", []>; 246 def S_ORN2_B64 : SOP2_64 <0x00000017, "S_ORN2_B64", []>; 247 def S_NAND_B32 : SOP2_32 <0x00000018, "S_NAND_B32", []>; 248 def S_NAND_B64 : SOP2_64 <0x00000019, "S_NAND_B64", []>; 249 def S_NOR_B32 : SOP2_32 <0x0000001a, "S_NOR_B32", []>; 250 def S_NOR_B64 : SOP2_64 <0x0000001b, "S_NOR_B64", []>; 251 def S_XNOR_B32 : SOP2_32 <0x0000001c, "S_XNOR_B32", []>; 252 def S_XNOR_B64 : SOP2_64 <0x0000001d, "S_XNOR_B64", []>; 253 254 // Use added complexity so these patterns are preferred to the VALU patterns. 255 let AddedComplexity = 1 in { 256 257 def S_LSHL_B32 : SOP2_32 <0x0000001e, "S_LSHL_B32", 258 [(set i32:$dst, (shl i32:$src0, i32:$src1))] 259 >; 260 def S_LSHL_B64 : SOP2_SHIFT_64 <0x0000001f, "S_LSHL_B64", 261 [(set i64:$dst, (shl i64:$src0, i32:$src1))] 262 >; 263 def S_LSHR_B32 : SOP2_32 <0x00000020, "S_LSHR_B32", 264 [(set i32:$dst, (srl i32:$src0, i32:$src1))] 265 >; 266 def S_LSHR_B64 : SOP2_SHIFT_64 <0x00000021, "S_LSHR_B64", 267 [(set i64:$dst, (srl i64:$src0, i32:$src1))] 268 >; 269 def S_ASHR_I32 : SOP2_32 <0x00000022, "S_ASHR_I32", 270 [(set i32:$dst, (sra i32:$src0, i32:$src1))] 271 >; 272 def S_ASHR_I64 : SOP2_SHIFT_64 <0x00000023, "S_ASHR_I64", 273 [(set i64:$dst, (sra i64:$src0, i32:$src1))] 274 >; 275 276 } // End AddedComplexity = 1 277 278 def S_BFM_B32 : SOP2_32 <0x00000024, "S_BFM_B32", []>; 279 def S_BFM_B64 : SOP2_64 <0x00000025, "S_BFM_B64", []>; 280 def S_MUL_I32 : SOP2_32 <0x00000026, "S_MUL_I32", []>; 281 def S_BFE_U32 : SOP2_32 <0x00000027, "S_BFE_U32", []>; 282 def S_BFE_I32 : SOP2_32 <0x00000028, "S_BFE_I32", []>; 283 def S_BFE_U64 : SOP2_64 <0x00000029, "S_BFE_U64", []>; 284 def S_BFE_I64 : SOP2_64 <0x0000002a, "S_BFE_I64", []>; 285 //def S_CBRANCH_G_FORK : SOP2_ <0x0000002b, "S_CBRANCH_G_FORK", []>; 286 def S_ABSDIFF_I32 : SOP2_32 <0x0000002c, "S_ABSDIFF_I32", []>; 287 288 //===----------------------------------------------------------------------===// 289 // SOPC Instructions 290 //===----------------------------------------------------------------------===// 291 292 def S_CMP_EQ_I32 : SOPC_32 <0x00000000, "S_CMP_EQ_I32">; 293 def S_CMP_LG_I32 : SOPC_32 <0x00000001, "S_CMP_LG_I32">; 294 def S_CMP_GT_I32 : SOPC_32 <0x00000002, "S_CMP_GT_I32">; 295 def S_CMP_GE_I32 : SOPC_32 <0x00000003, "S_CMP_GE_I32">; 296 def S_CMP_LT_I32 : SOPC_32 <0x00000004, "S_CMP_LT_I32">; 297 def S_CMP_LE_I32 : SOPC_32 <0x00000005, "S_CMP_LE_I32">; 298 def S_CMP_EQ_U32 : SOPC_32 <0x00000006, "S_CMP_EQ_U32">; 299 def S_CMP_LG_U32 : SOPC_32 <0x00000007, "S_CMP_LG_U32">; 300 def S_CMP_GT_U32 : SOPC_32 <0x00000008, "S_CMP_GT_U32">; 301 def S_CMP_GE_U32 : SOPC_32 <0x00000009, "S_CMP_GE_U32">; 302 def S_CMP_LT_U32 : SOPC_32 <0x0000000a, "S_CMP_LT_U32">; 303 def S_CMP_LE_U32 : SOPC_32 <0x0000000b, "S_CMP_LE_U32">; 304 ////def S_BITCMP0_B32 : SOPC_BITCMP0 <0x0000000c, "S_BITCMP0_B32", []>; 305 ////def S_BITCMP1_B32 : SOPC_BITCMP1 <0x0000000d, "S_BITCMP1_B32", []>; 306 ////def S_BITCMP0_B64 : SOPC_BITCMP0 <0x0000000e, "S_BITCMP0_B64", []>; 307 ////def S_BITCMP1_B64 : SOPC_BITCMP1 <0x0000000f, "S_BITCMP1_B64", []>; 308 //def S_SETVSKIP : SOPC_ <0x00000010, "S_SETVSKIP", []>; 309 310 //===----------------------------------------------------------------------===// 311 // SOPK Instructions 312 //===----------------------------------------------------------------------===// 313 314 def S_MOVK_I32 : SOPK_32 <0x00000000, "S_MOVK_I32", []>; 315 def S_CMOVK_I32 : SOPK_32 <0x00000002, "S_CMOVK_I32", []>; 316 317 /* 318 This instruction is disabled for now until we can figure out how to teach 319 the instruction selector to correctly use the S_CMP* vs V_CMP* 320 instructions. 321 322 When this instruction is enabled the code generator sometimes produces this 323 invalid sequence: 324 325 SCC = S_CMPK_EQ_I32 SGPR0, imm 326 VCC = COPY SCC 327 VGPR0 = V_CNDMASK VCC, VGPR0, VGPR1 328 329 def S_CMPK_EQ_I32 : SOPK < 330 0x00000003, (outs SCCReg:$dst), (ins SReg_32:$src0, i32imm:$src1), 331 "S_CMPK_EQ_I32", 332 [(set i1:$dst, (setcc i32:$src0, imm:$src1, SETEQ))] 333 >; 334 */ 335 336 let isCompare = 1, Defs = [SCC] in { 337 def S_CMPK_LG_I32 : SOPK_32 <0x00000004, "S_CMPK_LG_I32", []>; 338 def S_CMPK_GT_I32 : SOPK_32 <0x00000005, "S_CMPK_GT_I32", []>; 339 def S_CMPK_GE_I32 : SOPK_32 <0x00000006, "S_CMPK_GE_I32", []>; 340 def S_CMPK_LT_I32 : SOPK_32 <0x00000007, "S_CMPK_LT_I32", []>; 341 def S_CMPK_LE_I32 : SOPK_32 <0x00000008, "S_CMPK_LE_I32", []>; 342 def S_CMPK_EQ_U32 : SOPK_32 <0x00000009, "S_CMPK_EQ_U32", []>; 343 def S_CMPK_LG_U32 : SOPK_32 <0x0000000a, "S_CMPK_LG_U32", []>; 344 def S_CMPK_GT_U32 : SOPK_32 <0x0000000b, "S_CMPK_GT_U32", []>; 345 def S_CMPK_GE_U32 : SOPK_32 <0x0000000c, "S_CMPK_GE_U32", []>; 346 def S_CMPK_LT_U32 : SOPK_32 <0x0000000d, "S_CMPK_LT_U32", []>; 347 def S_CMPK_LE_U32 : SOPK_32 <0x0000000e, "S_CMPK_LE_U32", []>; 348 } // End isCompare = 1, Defs = [SCC] 349 350 let Defs = [SCC], isCommutable = 1 in { 351 def S_ADDK_I32 : SOPK_32 <0x0000000f, "S_ADDK_I32", []>; 352 def S_MULK_I32 : SOPK_32 <0x00000010, "S_MULK_I32", []>; 353 } 354 355 //def S_CBRANCH_I_FORK : SOPK_ <0x00000011, "S_CBRANCH_I_FORK", []>; 356 def S_GETREG_B32 : SOPK_32 <0x00000012, "S_GETREG_B32", []>; 357 def S_SETREG_B32 : SOPK_32 <0x00000013, "S_SETREG_B32", []>; 358 def S_GETREG_REGRD_B32 : SOPK_32 <0x00000014, "S_GETREG_REGRD_B32", []>; 359 //def S_SETREG_IMM32_B32 : SOPK_32 <0x00000015, "S_SETREG_IMM32_B32", []>; 360 //def EXP : EXP_ <0x00000000, "EXP", []>; 361 362 } // End let OtherPredicates = [isCFDepth0] 363 364 //===----------------------------------------------------------------------===// 365 // SOPP Instructions 366 //===----------------------------------------------------------------------===// 367 368 def S_NOP : SOPP <0x00000000, (ins i16imm:$SIMM16), "S_NOP $SIMM16", []>; 369 370 let isTerminator = 1 in { 371 372 def S_ENDPGM : SOPP <0x00000001, (ins), "S_ENDPGM", 373 [(IL_retflag)]> { 374 let SIMM16 = 0; 375 let isBarrier = 1; 376 let hasCtrlDep = 1; 377 } 378 379 let isBranch = 1 in { 380 def S_BRANCH : SOPP < 381 0x00000002, (ins brtarget:$target), "S_BRANCH $target", 382 [(br bb:$target)]> { 383 let isBarrier = 1; 384 } 385 386 let DisableEncoding = "$scc" in { 387 def S_CBRANCH_SCC0 : SOPP < 388 0x00000004, (ins brtarget:$target, SCCReg:$scc), 389 "S_CBRANCH_SCC0 $target", [] 390 >; 391 def S_CBRANCH_SCC1 : SOPP < 392 0x00000005, (ins brtarget:$target, SCCReg:$scc), 393 "S_CBRANCH_SCC1 $target", 394 [] 395 >; 396 } // End DisableEncoding = "$scc" 397 398 def S_CBRANCH_VCCZ : SOPP < 399 0x00000006, (ins brtarget:$target, VCCReg:$vcc), 400 "S_CBRANCH_VCCZ $target", 401 [] 402 >; 403 def S_CBRANCH_VCCNZ : SOPP < 404 0x00000007, (ins brtarget:$target, VCCReg:$vcc), 405 "S_CBRANCH_VCCNZ $target", 406 [] 407 >; 408 409 let DisableEncoding = "$exec" in { 410 def S_CBRANCH_EXECZ : SOPP < 411 0x00000008, (ins brtarget:$target, EXECReg:$exec), 412 "S_CBRANCH_EXECZ $target", 413 [] 414 >; 415 def S_CBRANCH_EXECNZ : SOPP < 416 0x00000009, (ins brtarget:$target, EXECReg:$exec), 417 "S_CBRANCH_EXECNZ $target", 418 [] 419 >; 420 } // End DisableEncoding = "$exec" 421 422 423 } // End isBranch = 1 424 } // End isTerminator = 1 425 426 let hasSideEffects = 1 in { 427 def S_BARRIER : SOPP <0x0000000a, (ins), "S_BARRIER", 428 [(int_AMDGPU_barrier_local)] 429 > { 430 let SIMM16 = 0; 431 let isBarrier = 1; 432 let hasCtrlDep = 1; 433 let mayLoad = 1; 434 let mayStore = 1; 435 } 436 437 def S_WAITCNT : SOPP <0x0000000c, (ins WAIT_FLAG:$simm16), "S_WAITCNT $simm16", 438 [] 439 >; 440 //def S_SETHALT : SOPP_ <0x0000000d, "S_SETHALT", []>; 441 //def S_SLEEP : SOPP_ <0x0000000e, "S_SLEEP", []>; 442 //def S_SETPRIO : SOPP_ <0x0000000f, "S_SETPRIO", []>; 443 444 let Uses = [EXEC] in { 445 def S_SENDMSG : SOPP <0x00000010, (ins SendMsgImm:$simm16, M0Reg:$m0), "S_SENDMSG $simm16", 446 [(int_SI_sendmsg imm:$simm16, M0Reg:$m0)] 447 > { 448 let DisableEncoding = "$m0"; 449 } 450 } // End Uses = [EXEC] 451 452 //def S_SENDMSGHALT : SOPP_ <0x00000011, "S_SENDMSGHALT", []>; 453 //def S_TRAP : SOPP_ <0x00000012, "S_TRAP", []>; 454 //def S_ICACHE_INV : SOPP_ <0x00000013, "S_ICACHE_INV", []>; 455 //def S_INCPERFLEVEL : SOPP_ <0x00000014, "S_INCPERFLEVEL", []>; 456 //def S_DECPERFLEVEL : SOPP_ <0x00000015, "S_DECPERFLEVEL", []>; 457 //def S_TTRACEDATA : SOPP_ <0x00000016, "S_TTRACEDATA", []>; 458 } // End hasSideEffects 459 460 //===----------------------------------------------------------------------===// 461 // VOPC Instructions 462 //===----------------------------------------------------------------------===// 463 464 let isCompare = 1 in { 465 466 defm V_CMP_F_F32 : VOPC_32 <0x00000000, "V_CMP_F_F32">; 467 defm V_CMP_LT_F32 : VOPC_32 <0x00000001, "V_CMP_LT_F32", f32, COND_OLT>; 468 defm V_CMP_EQ_F32 : VOPC_32 <0x00000002, "V_CMP_EQ_F32", f32, COND_OEQ>; 469 defm V_CMP_LE_F32 : VOPC_32 <0x00000003, "V_CMP_LE_F32", f32, COND_OLE>; 470 defm V_CMP_GT_F32 : VOPC_32 <0x00000004, "V_CMP_GT_F32", f32, COND_OGT>; 471 defm V_CMP_LG_F32 : VOPC_32 <0x00000005, "V_CMP_LG_F32">; 472 defm V_CMP_GE_F32 : VOPC_32 <0x00000006, "V_CMP_GE_F32", f32, COND_OGE>; 473 defm V_CMP_O_F32 : VOPC_32 <0x00000007, "V_CMP_O_F32", f32, COND_O>; 474 defm V_CMP_U_F32 : VOPC_32 <0x00000008, "V_CMP_U_F32", f32, COND_UO>; 475 defm V_CMP_NGE_F32 : VOPC_32 <0x00000009, "V_CMP_NGE_F32">; 476 defm V_CMP_NLG_F32 : VOPC_32 <0x0000000a, "V_CMP_NLG_F32">; 477 defm V_CMP_NGT_F32 : VOPC_32 <0x0000000b, "V_CMP_NGT_F32">; 478 defm V_CMP_NLE_F32 : VOPC_32 <0x0000000c, "V_CMP_NLE_F32">; 479 defm V_CMP_NEQ_F32 : VOPC_32 <0x0000000d, "V_CMP_NEQ_F32", f32, COND_UNE>; 480 defm V_CMP_NLT_F32 : VOPC_32 <0x0000000e, "V_CMP_NLT_F32">; 481 defm V_CMP_TRU_F32 : VOPC_32 <0x0000000f, "V_CMP_TRU_F32">; 482 483 let hasSideEffects = 1 in { 484 485 defm V_CMPX_F_F32 : VOPCX_32 <0x00000010, "V_CMPX_F_F32">; 486 defm V_CMPX_LT_F32 : VOPCX_32 <0x00000011, "V_CMPX_LT_F32">; 487 defm V_CMPX_EQ_F32 : VOPCX_32 <0x00000012, "V_CMPX_EQ_F32">; 488 defm V_CMPX_LE_F32 : VOPCX_32 <0x00000013, "V_CMPX_LE_F32">; 489 defm V_CMPX_GT_F32 : VOPCX_32 <0x00000014, "V_CMPX_GT_F32">; 490 defm V_CMPX_LG_F32 : VOPCX_32 <0x00000015, "V_CMPX_LG_F32">; 491 defm V_CMPX_GE_F32 : VOPCX_32 <0x00000016, "V_CMPX_GE_F32">; 492 defm V_CMPX_O_F32 : VOPCX_32 <0x00000017, "V_CMPX_O_F32">; 493 defm V_CMPX_U_F32 : VOPCX_32 <0x00000018, "V_CMPX_U_F32">; 494 defm V_CMPX_NGE_F32 : VOPCX_32 <0x00000019, "V_CMPX_NGE_F32">; 495 defm V_CMPX_NLG_F32 : VOPCX_32 <0x0000001a, "V_CMPX_NLG_F32">; 496 defm V_CMPX_NGT_F32 : VOPCX_32 <0x0000001b, "V_CMPX_NGT_F32">; 497 defm V_CMPX_NLE_F32 : VOPCX_32 <0x0000001c, "V_CMPX_NLE_F32">; 498 defm V_CMPX_NEQ_F32 : VOPCX_32 <0x0000001d, "V_CMPX_NEQ_F32">; 499 defm V_CMPX_NLT_F32 : VOPCX_32 <0x0000001e, "V_CMPX_NLT_F32">; 500 defm V_CMPX_TRU_F32 : VOPCX_32 <0x0000001f, "V_CMPX_TRU_F32">; 501 502 } // End hasSideEffects = 1 503 504 defm V_CMP_F_F64 : VOPC_64 <0x00000020, "V_CMP_F_F64">; 505 defm V_CMP_LT_F64 : VOPC_64 <0x00000021, "V_CMP_LT_F64", f64, COND_OLT>; 506 defm V_CMP_EQ_F64 : VOPC_64 <0x00000022, "V_CMP_EQ_F64", f64, COND_OEQ>; 507 defm V_CMP_LE_F64 : VOPC_64 <0x00000023, "V_CMP_LE_F64", f64, COND_OLE>; 508 defm V_CMP_GT_F64 : VOPC_64 <0x00000024, "V_CMP_GT_F64", f64, COND_OGT>; 509 defm V_CMP_LG_F64 : VOPC_64 <0x00000025, "V_CMP_LG_F64">; 510 defm V_CMP_GE_F64 : VOPC_64 <0x00000026, "V_CMP_GE_F64", f64, COND_OGE>; 511 defm V_CMP_O_F64 : VOPC_64 <0x00000027, "V_CMP_O_F64", f64, COND_O>; 512 defm V_CMP_U_F64 : VOPC_64 <0x00000028, "V_CMP_U_F64", f64, COND_UO>; 513 defm V_CMP_NGE_F64 : VOPC_64 <0x00000029, "V_CMP_NGE_F64">; 514 defm V_CMP_NLG_F64 : VOPC_64 <0x0000002a, "V_CMP_NLG_F64">; 515 defm V_CMP_NGT_F64 : VOPC_64 <0x0000002b, "V_CMP_NGT_F64">; 516 defm V_CMP_NLE_F64 : VOPC_64 <0x0000002c, "V_CMP_NLE_F64">; 517 defm V_CMP_NEQ_F64 : VOPC_64 <0x0000002d, "V_CMP_NEQ_F64", f64, COND_UNE>; 518 defm V_CMP_NLT_F64 : VOPC_64 <0x0000002e, "V_CMP_NLT_F64">; 519 defm V_CMP_TRU_F64 : VOPC_64 <0x0000002f, "V_CMP_TRU_F64">; 520 521 let hasSideEffects = 1 in { 522 523 defm V_CMPX_F_F64 : VOPCX_64 <0x00000030, "V_CMPX_F_F64">; 524 defm V_CMPX_LT_F64 : VOPCX_64 <0x00000031, "V_CMPX_LT_F64">; 525 defm V_CMPX_EQ_F64 : VOPCX_64 <0x00000032, "V_CMPX_EQ_F64">; 526 defm V_CMPX_LE_F64 : VOPCX_64 <0x00000033, "V_CMPX_LE_F64">; 527 defm V_CMPX_GT_F64 : VOPCX_64 <0x00000034, "V_CMPX_GT_F64">; 528 defm V_CMPX_LG_F64 : VOPCX_64 <0x00000035, "V_CMPX_LG_F64">; 529 defm V_CMPX_GE_F64 : VOPCX_64 <0x00000036, "V_CMPX_GE_F64">; 530 defm V_CMPX_O_F64 : VOPCX_64 <0x00000037, "V_CMPX_O_F64">; 531 defm V_CMPX_U_F64 : VOPCX_64 <0x00000038, "V_CMPX_U_F64">; 532 defm V_CMPX_NGE_F64 : VOPCX_64 <0x00000039, "V_CMPX_NGE_F64">; 533 defm V_CMPX_NLG_F64 : VOPCX_64 <0x0000003a, "V_CMPX_NLG_F64">; 534 defm V_CMPX_NGT_F64 : VOPCX_64 <0x0000003b, "V_CMPX_NGT_F64">; 535 defm V_CMPX_NLE_F64 : VOPCX_64 <0x0000003c, "V_CMPX_NLE_F64">; 536 defm V_CMPX_NEQ_F64 : VOPCX_64 <0x0000003d, "V_CMPX_NEQ_F64">; 537 defm V_CMPX_NLT_F64 : VOPCX_64 <0x0000003e, "V_CMPX_NLT_F64">; 538 defm V_CMPX_TRU_F64 : VOPCX_64 <0x0000003f, "V_CMPX_TRU_F64">; 539 540 } // End hasSideEffects = 1 541 542 defm V_CMPS_F_F32 : VOPC_32 <0x00000040, "V_CMPS_F_F32">; 543 defm V_CMPS_LT_F32 : VOPC_32 <0x00000041, "V_CMPS_LT_F32">; 544 defm V_CMPS_EQ_F32 : VOPC_32 <0x00000042, "V_CMPS_EQ_F32">; 545 defm V_CMPS_LE_F32 : VOPC_32 <0x00000043, "V_CMPS_LE_F32">; 546 defm V_CMPS_GT_F32 : VOPC_32 <0x00000044, "V_CMPS_GT_F32">; 547 defm V_CMPS_LG_F32 : VOPC_32 <0x00000045, "V_CMPS_LG_F32">; 548 defm V_CMPS_GE_F32 : VOPC_32 <0x00000046, "V_CMPS_GE_F32">; 549 defm V_CMPS_O_F32 : VOPC_32 <0x00000047, "V_CMPS_O_F32">; 550 defm V_CMPS_U_F32 : VOPC_32 <0x00000048, "V_CMPS_U_F32">; 551 defm V_CMPS_NGE_F32 : VOPC_32 <0x00000049, "V_CMPS_NGE_F32">; 552 defm V_CMPS_NLG_F32 : VOPC_32 <0x0000004a, "V_CMPS_NLG_F32">; 553 defm V_CMPS_NGT_F32 : VOPC_32 <0x0000004b, "V_CMPS_NGT_F32">; 554 defm V_CMPS_NLE_F32 : VOPC_32 <0x0000004c, "V_CMPS_NLE_F32">; 555 defm V_CMPS_NEQ_F32 : VOPC_32 <0x0000004d, "V_CMPS_NEQ_F32">; 556 defm V_CMPS_NLT_F32 : VOPC_32 <0x0000004e, "V_CMPS_NLT_F32">; 557 defm V_CMPS_TRU_F32 : VOPC_32 <0x0000004f, "V_CMPS_TRU_F32">; 558 559 let hasSideEffects = 1 in { 560 561 defm V_CMPSX_F_F32 : VOPCX_32 <0x00000050, "V_CMPSX_F_F32">; 562 defm V_CMPSX_LT_F32 : VOPCX_32 <0x00000051, "V_CMPSX_LT_F32">; 563 defm V_CMPSX_EQ_F32 : VOPCX_32 <0x00000052, "V_CMPSX_EQ_F32">; 564 defm V_CMPSX_LE_F32 : VOPCX_32 <0x00000053, "V_CMPSX_LE_F32">; 565 defm V_CMPSX_GT_F32 : VOPCX_32 <0x00000054, "V_CMPSX_GT_F32">; 566 defm V_CMPSX_LG_F32 : VOPCX_32 <0x00000055, "V_CMPSX_LG_F32">; 567 defm V_CMPSX_GE_F32 : VOPCX_32 <0x00000056, "V_CMPSX_GE_F32">; 568 defm V_CMPSX_O_F32 : VOPCX_32 <0x00000057, "V_CMPSX_O_F32">; 569 defm V_CMPSX_U_F32 : VOPCX_32 <0x00000058, "V_CMPSX_U_F32">; 570 defm V_CMPSX_NGE_F32 : VOPCX_32 <0x00000059, "V_CMPSX_NGE_F32">; 571 defm V_CMPSX_NLG_F32 : VOPCX_32 <0x0000005a, "V_CMPSX_NLG_F32">; 572 defm V_CMPSX_NGT_F32 : VOPCX_32 <0x0000005b, "V_CMPSX_NGT_F32">; 573 defm V_CMPSX_NLE_F32 : VOPCX_32 <0x0000005c, "V_CMPSX_NLE_F32">; 574 defm V_CMPSX_NEQ_F32 : VOPCX_32 <0x0000005d, "V_CMPSX_NEQ_F32">; 575 defm V_CMPSX_NLT_F32 : VOPCX_32 <0x0000005e, "V_CMPSX_NLT_F32">; 576 defm V_CMPSX_TRU_F32 : VOPCX_32 <0x0000005f, "V_CMPSX_TRU_F32">; 577 578 } // End hasSideEffects = 1 579 580 defm V_CMPS_F_F64 : VOPC_64 <0x00000060, "V_CMPS_F_F64">; 581 defm V_CMPS_LT_F64 : VOPC_64 <0x00000061, "V_CMPS_LT_F64">; 582 defm V_CMPS_EQ_F64 : VOPC_64 <0x00000062, "V_CMPS_EQ_F64">; 583 defm V_CMPS_LE_F64 : VOPC_64 <0x00000063, "V_CMPS_LE_F64">; 584 defm V_CMPS_GT_F64 : VOPC_64 <0x00000064, "V_CMPS_GT_F64">; 585 defm V_CMPS_LG_F64 : VOPC_64 <0x00000065, "V_CMPS_LG_F64">; 586 defm V_CMPS_GE_F64 : VOPC_64 <0x00000066, "V_CMPS_GE_F64">; 587 defm V_CMPS_O_F64 : VOPC_64 <0x00000067, "V_CMPS_O_F64">; 588 defm V_CMPS_U_F64 : VOPC_64 <0x00000068, "V_CMPS_U_F64">; 589 defm V_CMPS_NGE_F64 : VOPC_64 <0x00000069, "V_CMPS_NGE_F64">; 590 defm V_CMPS_NLG_F64 : VOPC_64 <0x0000006a, "V_CMPS_NLG_F64">; 591 defm V_CMPS_NGT_F64 : VOPC_64 <0x0000006b, "V_CMPS_NGT_F64">; 592 defm V_CMPS_NLE_F64 : VOPC_64 <0x0000006c, "V_CMPS_NLE_F64">; 593 defm V_CMPS_NEQ_F64 : VOPC_64 <0x0000006d, "V_CMPS_NEQ_F64">; 594 defm V_CMPS_NLT_F64 : VOPC_64 <0x0000006e, "V_CMPS_NLT_F64">; 595 defm V_CMPS_TRU_F64 : VOPC_64 <0x0000006f, "V_CMPS_TRU_F64">; 596 597 let hasSideEffects = 1, Defs = [EXEC] in { 598 599 defm V_CMPSX_F_F64 : VOPC_64 <0x00000070, "V_CMPSX_F_F64">; 600 defm V_CMPSX_LT_F64 : VOPC_64 <0x00000071, "V_CMPSX_LT_F64">; 601 defm V_CMPSX_EQ_F64 : VOPC_64 <0x00000072, "V_CMPSX_EQ_F64">; 602 defm V_CMPSX_LE_F64 : VOPC_64 <0x00000073, "V_CMPSX_LE_F64">; 603 defm V_CMPSX_GT_F64 : VOPC_64 <0x00000074, "V_CMPSX_GT_F64">; 604 defm V_CMPSX_LG_F64 : VOPC_64 <0x00000075, "V_CMPSX_LG_F64">; 605 defm V_CMPSX_GE_F64 : VOPC_64 <0x00000076, "V_CMPSX_GE_F64">; 606 defm V_CMPSX_O_F64 : VOPC_64 <0x00000077, "V_CMPSX_O_F64">; 607 defm V_CMPSX_U_F64 : VOPC_64 <0x00000078, "V_CMPSX_U_F64">; 608 defm V_CMPSX_NGE_F64 : VOPC_64 <0x00000079, "V_CMPSX_NGE_F64">; 609 defm V_CMPSX_NLG_F64 : VOPC_64 <0x0000007a, "V_CMPSX_NLG_F64">; 610 defm V_CMPSX_NGT_F64 : VOPC_64 <0x0000007b, "V_CMPSX_NGT_F64">; 611 defm V_CMPSX_NLE_F64 : VOPC_64 <0x0000007c, "V_CMPSX_NLE_F64">; 612 defm V_CMPSX_NEQ_F64 : VOPC_64 <0x0000007d, "V_CMPSX_NEQ_F64">; 613 defm V_CMPSX_NLT_F64 : VOPC_64 <0x0000007e, "V_CMPSX_NLT_F64">; 614 defm V_CMPSX_TRU_F64 : VOPC_64 <0x0000007f, "V_CMPSX_TRU_F64">; 615 616 } // End hasSideEffects = 1, Defs = [EXEC] 617 618 defm V_CMP_F_I32 : VOPC_32 <0x00000080, "V_CMP_F_I32">; 619 defm V_CMP_LT_I32 : VOPC_32 <0x00000081, "V_CMP_LT_I32", i32, COND_SLT>; 620 defm V_CMP_EQ_I32 : VOPC_32 <0x00000082, "V_CMP_EQ_I32", i32, COND_EQ>; 621 defm V_CMP_LE_I32 : VOPC_32 <0x00000083, "V_CMP_LE_I32", i32, COND_SLE>; 622 defm V_CMP_GT_I32 : VOPC_32 <0x00000084, "V_CMP_GT_I32", i32, COND_SGT>; 623 defm V_CMP_NE_I32 : VOPC_32 <0x00000085, "V_CMP_NE_I32", i32, COND_NE>; 624 defm V_CMP_GE_I32 : VOPC_32 <0x00000086, "V_CMP_GE_I32", i32, COND_SGE>; 625 defm V_CMP_T_I32 : VOPC_32 <0x00000087, "V_CMP_T_I32">; 626 627 let hasSideEffects = 1 in { 628 629 defm V_CMPX_F_I32 : VOPCX_32 <0x00000090, "V_CMPX_F_I32">; 630 defm V_CMPX_LT_I32 : VOPCX_32 <0x00000091, "V_CMPX_LT_I32">; 631 defm V_CMPX_EQ_I32 : VOPCX_32 <0x00000092, "V_CMPX_EQ_I32">; 632 defm V_CMPX_LE_I32 : VOPCX_32 <0x00000093, "V_CMPX_LE_I32">; 633 defm V_CMPX_GT_I32 : VOPCX_32 <0x00000094, "V_CMPX_GT_I32">; 634 defm V_CMPX_NE_I32 : VOPCX_32 <0x00000095, "V_CMPX_NE_I32">; 635 defm V_CMPX_GE_I32 : VOPCX_32 <0x00000096, "V_CMPX_GE_I32">; 636 defm V_CMPX_T_I32 : VOPCX_32 <0x00000097, "V_CMPX_T_I32">; 637 638 } // End hasSideEffects = 1 639 640 defm V_CMP_F_I64 : VOPC_64 <0x000000a0, "V_CMP_F_I64">; 641 defm V_CMP_LT_I64 : VOPC_64 <0x000000a1, "V_CMP_LT_I64", i64, COND_SLT>; 642 defm V_CMP_EQ_I64 : VOPC_64 <0x000000a2, "V_CMP_EQ_I64", i64, COND_EQ>; 643 defm V_CMP_LE_I64 : VOPC_64 <0x000000a3, "V_CMP_LE_I64", i64, COND_SLE>; 644 defm V_CMP_GT_I64 : VOPC_64 <0x000000a4, "V_CMP_GT_I64", i64, COND_SGT>; 645 defm V_CMP_NE_I64 : VOPC_64 <0x000000a5, "V_CMP_NE_I64", i64, COND_NE>; 646 defm V_CMP_GE_I64 : VOPC_64 <0x000000a6, "V_CMP_GE_I64", i64, COND_SGE>; 647 defm V_CMP_T_I64 : VOPC_64 <0x000000a7, "V_CMP_T_I64">; 648 649 let hasSideEffects = 1 in { 650 651 defm V_CMPX_F_I64 : VOPCX_64 <0x000000b0, "V_CMPX_F_I64">; 652 defm V_CMPX_LT_I64 : VOPCX_64 <0x000000b1, "V_CMPX_LT_I64">; 653 defm V_CMPX_EQ_I64 : VOPCX_64 <0x000000b2, "V_CMPX_EQ_I64">; 654 defm V_CMPX_LE_I64 : VOPCX_64 <0x000000b3, "V_CMPX_LE_I64">; 655 defm V_CMPX_GT_I64 : VOPCX_64 <0x000000b4, "V_CMPX_GT_I64">; 656 defm V_CMPX_NE_I64 : VOPCX_64 <0x000000b5, "V_CMPX_NE_I64">; 657 defm V_CMPX_GE_I64 : VOPCX_64 <0x000000b6, "V_CMPX_GE_I64">; 658 defm V_CMPX_T_I64 : VOPCX_64 <0x000000b7, "V_CMPX_T_I64">; 659 660 } // End hasSideEffects = 1 661 662 defm V_CMP_F_U32 : VOPC_32 <0x000000c0, "V_CMP_F_U32">; 663 defm V_CMP_LT_U32 : VOPC_32 <0x000000c1, "V_CMP_LT_U32", i32, COND_ULT>; 664 defm V_CMP_EQ_U32 : VOPC_32 <0x000000c2, "V_CMP_EQ_U32", i32, COND_EQ>; 665 defm V_CMP_LE_U32 : VOPC_32 <0x000000c3, "V_CMP_LE_U32", i32, COND_ULE>; 666 defm V_CMP_GT_U32 : VOPC_32 <0x000000c4, "V_CMP_GT_U32", i32, COND_UGT>; 667 defm V_CMP_NE_U32 : VOPC_32 <0x000000c5, "V_CMP_NE_U32", i32, COND_NE>; 668 defm V_CMP_GE_U32 : VOPC_32 <0x000000c6, "V_CMP_GE_U32", i32, COND_UGE>; 669 defm V_CMP_T_U32 : VOPC_32 <0x000000c7, "V_CMP_T_U32">; 670 671 let hasSideEffects = 1 in { 672 673 defm V_CMPX_F_U32 : VOPCX_32 <0x000000d0, "V_CMPX_F_U32">; 674 defm V_CMPX_LT_U32 : VOPCX_32 <0x000000d1, "V_CMPX_LT_U32">; 675 defm V_CMPX_EQ_U32 : VOPCX_32 <0x000000d2, "V_CMPX_EQ_U32">; 676 defm V_CMPX_LE_U32 : VOPCX_32 <0x000000d3, "V_CMPX_LE_U32">; 677 defm V_CMPX_GT_U32 : VOPCX_32 <0x000000d4, "V_CMPX_GT_U32">; 678 defm V_CMPX_NE_U32 : VOPCX_32 <0x000000d5, "V_CMPX_NE_U32">; 679 defm V_CMPX_GE_U32 : VOPCX_32 <0x000000d6, "V_CMPX_GE_U32">; 680 defm V_CMPX_T_U32 : VOPCX_32 <0x000000d7, "V_CMPX_T_U32">; 681 682 } // End hasSideEffects = 1 683 684 defm V_CMP_F_U64 : VOPC_64 <0x000000e0, "V_CMP_F_U64">; 685 defm V_CMP_LT_U64 : VOPC_64 <0x000000e1, "V_CMP_LT_U64", i64, COND_ULT>; 686 defm V_CMP_EQ_U64 : VOPC_64 <0x000000e2, "V_CMP_EQ_U64", i64, COND_EQ>; 687 defm V_CMP_LE_U64 : VOPC_64 <0x000000e3, "V_CMP_LE_U64", i64, COND_ULE>; 688 defm V_CMP_GT_U64 : VOPC_64 <0x000000e4, "V_CMP_GT_U64", i64, COND_UGT>; 689 defm V_CMP_NE_U64 : VOPC_64 <0x000000e5, "V_CMP_NE_U64", i64, COND_NE>; 690 defm V_CMP_GE_U64 : VOPC_64 <0x000000e6, "V_CMP_GE_U64", i64, COND_UGE>; 691 defm V_CMP_T_U64 : VOPC_64 <0x000000e7, "V_CMP_T_U64">; 692 693 let hasSideEffects = 1 in { 694 695 defm V_CMPX_F_U64 : VOPCX_64 <0x000000f0, "V_CMPX_F_U64">; 696 defm V_CMPX_LT_U64 : VOPCX_64 <0x000000f1, "V_CMPX_LT_U64">; 697 defm V_CMPX_EQ_U64 : VOPCX_64 <0x000000f2, "V_CMPX_EQ_U64">; 698 defm V_CMPX_LE_U64 : VOPCX_64 <0x000000f3, "V_CMPX_LE_U64">; 699 defm V_CMPX_GT_U64 : VOPCX_64 <0x000000f4, "V_CMPX_GT_U64">; 700 defm V_CMPX_NE_U64 : VOPCX_64 <0x000000f5, "V_CMPX_NE_U64">; 701 defm V_CMPX_GE_U64 : VOPCX_64 <0x000000f6, "V_CMPX_GE_U64">; 702 defm V_CMPX_T_U64 : VOPCX_64 <0x000000f7, "V_CMPX_T_U64">; 703 704 } // End hasSideEffects = 1 705 706 defm V_CMP_CLASS_F32 : VOPC_32 <0x00000088, "V_CMP_CLASS_F32">; 707 708 let hasSideEffects = 1 in { 709 defm V_CMPX_CLASS_F32 : VOPCX_32 <0x00000098, "V_CMPX_CLASS_F32">; 710 } // End hasSideEffects = 1 711 712 defm V_CMP_CLASS_F64 : VOPC_64 <0x000000a8, "V_CMP_CLASS_F64">; 713 714 let hasSideEffects = 1 in { 715 defm V_CMPX_CLASS_F64 : VOPCX_64 <0x000000b8, "V_CMPX_CLASS_F64">; 716 } // End hasSideEffects = 1 717 718 } // End isCompare = 1 719 720 //===----------------------------------------------------------------------===// 721 // DS Instructions 722 //===----------------------------------------------------------------------===// 723 724 725 def DS_ADD_U32 : DS_1A1D_NORET <0x0, "DS_ADD_U32", VReg_32>; 726 def DS_SUB_U32 : DS_1A1D_NORET <0x1, "DS_SUB_U32", VReg_32>; 727 def DS_RSUB_U32 : DS_1A1D_NORET <0x2, "DS_RSUB_U32", VReg_32>; 728 def DS_INC_U32 : DS_1A1D_NORET <0x3, "DS_INC_U32", VReg_32>; 729 def DS_DEC_U32 : DS_1A1D_NORET <0x4, "DS_DEC_U32", VReg_32>; 730 def DS_MIN_I32 : DS_1A1D_NORET <0x5, "DS_MIN_I32", VReg_32>; 731 def DS_MAX_I32 : DS_1A1D_NORET <0x6, "DS_MAX_I32", VReg_32>; 732 def DS_MIN_U32 : DS_1A1D_NORET <0x7, "DS_MIN_U32", VReg_32>; 733 def DS_MAX_U32 : DS_1A1D_NORET <0x8, "DS_MAX_U32", VReg_32>; 734 def DS_AND_B32 : DS_1A1D_NORET <0x9, "DS_AND_B32", VReg_32>; 735 def DS_OR_B32 : DS_1A1D_NORET <0xa, "DS_OR_B32", VReg_32>; 736 def DS_XOR_B32 : DS_1A1D_NORET <0xb, "DS_XOR_B32", VReg_32>; 737 def DS_MSKOR_B32 : DS_1A1D_NORET <0xc, "DS_MSKOR_B32", VReg_32>; 738 def DS_CMPST_B32 : DS_1A2D_NORET <0x10, "DS_CMPST_B32", VReg_32>; 739 def DS_CMPST_F32 : DS_1A2D_NORET <0x11, "DS_CMPST_F32", VReg_32>; 740 def DS_MIN_F32 : DS_1A1D_NORET <0x12, "DS_MIN_F32", VReg_32>; 741 def DS_MAX_F32 : DS_1A1D_NORET <0x13, "DS_MAX_F32", VReg_32>; 742 743 def DS_ADD_RTN_U32 : DS_1A1D_RET <0x20, "DS_ADD_RTN_U32", VReg_32>; 744 def DS_SUB_RTN_U32 : DS_1A1D_RET <0x21, "DS_SUB_RTN_U32", VReg_32>; 745 def DS_RSUB_RTN_U32 : DS_1A1D_RET <0x22, "DS_RSUB_RTN_U32", VReg_32>; 746 def DS_INC_RTN_U32 : DS_1A1D_RET <0x23, "DS_INC_RTN_U32", VReg_32>; 747 def DS_DEC_RTN_U32 : DS_1A1D_RET <0x24, "DS_DEC_RTN_U32", VReg_32>; 748 def DS_MIN_RTN_I32 : DS_1A1D_RET <0x25, "DS_MIN_RTN_I32", VReg_32>; 749 def DS_MAX_RTN_I32 : DS_1A1D_RET <0x26, "DS_MAX_RTN_I32", VReg_32>; 750 def DS_MIN_RTN_U32 : DS_1A1D_RET <0x27, "DS_MIN_RTN_U32", VReg_32>; 751 def DS_MAX_RTN_U32 : DS_1A1D_RET <0x28, "DS_MAX_RTN_U32", VReg_32>; 752 def DS_AND_RTN_B32 : DS_1A1D_RET <0x29, "DS_AND_RTN_B32", VReg_32>; 753 def DS_OR_RTN_B32 : DS_1A1D_RET <0x2a, "DS_OR_RTN_B32", VReg_32>; 754 def DS_XOR_RTN_B32 : DS_1A1D_RET <0x2b, "DS_XOR_RTN_B32", VReg_32>; 755 def DS_MSKOR_RTN_B32 : DS_1A1D_RET <0x2c, "DS_MSKOR_RTN_B32", VReg_32>; 756 def DS_WRXCHG_RTN_B32 : DS_1A1D_RET <0x2d, "DS_WRXCHG_RTN_B32", VReg_32>; 757 //def DS_WRXCHG2_RTN_B32 : DS_2A0D_RET <0x2e, "DS_WRXCHG2_RTN_B32", VReg_32>; 758 //def DS_WRXCHG2ST64_RTN_B32 : DS_2A0D_RET <0x2f, "DS_WRXCHG2_RTN_B32", VReg_32>; 759 def DS_CMPST_RTN_B32 : DS_1A2D_RET <0x30, "DS_CMPST_RTN_B32", VReg_32>; 760 def DS_CMPST_RTN_F32 : DS_1A2D_RET <0x31, "DS_CMPST_RTN_F32", VReg_32>; 761 def DS_MIN_RTN_F32 : DS_1A1D_RET <0x32, "DS_MIN_RTN_F32", VReg_32>; 762 def DS_MAX_RTN_F32 : DS_1A1D_RET <0x33, "DS_MAX_RTN_F32", VReg_32>; 763 764 let SubtargetPredicate = isCI in { 765 def DS_WRAP_RTN_F32 : DS_1A1D_RET <0x34, "DS_WRAP_RTN_F32", VReg_32>; 766 } // End isCI 767 768 769 def DS_ADD_U64 : DS_1A1D_NORET <0x40, "DS_ADD_U64", VReg_32>; 770 def DS_SUB_U64 : DS_1A1D_NORET <0x41, "DS_SUB_U64", VReg_32>; 771 def DS_RSUB_U64 : DS_1A1D_NORET <0x42, "DS_RSUB_U64", VReg_32>; 772 def DS_INC_U64 : DS_1A1D_NORET <0x43, "DS_INC_U64", VReg_32>; 773 def DS_DEC_U64 : DS_1A1D_NORET <0x44, "DS_DEC_U64", VReg_32>; 774 def DS_MIN_I64 : DS_1A1D_NORET <0x45, "DS_MIN_I64", VReg_64>; 775 def DS_MAX_I64 : DS_1A1D_NORET <0x46, "DS_MAX_I64", VReg_64>; 776 def DS_MIN_U64 : DS_1A1D_NORET <0x47, "DS_MIN_U64", VReg_64>; 777 def DS_MAX_U64 : DS_1A1D_NORET <0x48, "DS_MAX_U64", VReg_64>; 778 def DS_AND_B64 : DS_1A1D_NORET <0x49, "DS_AND_B64", VReg_64>; 779 def DS_OR_B64 : DS_1A1D_NORET <0x4a, "DS_OR_B64", VReg_64>; 780 def DS_XOR_B64 : DS_1A1D_NORET <0x4b, "DS_XOR_B64", VReg_64>; 781 def DS_MSKOR_B64 : DS_1A1D_NORET <0x4c, "DS_MSKOR_B64", VReg_64>; 782 def DS_CMPST_B64 : DS_1A2D_NORET <0x50, "DS_CMPST_B64", VReg_64>; 783 def DS_CMPST_F64 : DS_1A2D_NORET <0x51, "DS_CMPST_F64", VReg_64>; 784 def DS_MIN_F64 : DS_1A1D_NORET <0x52, "DS_MIN_F64", VReg_64>; 785 def DS_MAX_F64 : DS_1A1D_NORET <0x53, "DS_MAX_F64", VReg_64>; 786 787 def DS_ADD_RTN_U64 : DS_1A1D_RET <0x60, "DS_ADD_RTN_U64", VReg_64>; 788 def DS_SUB_RTN_U64 : DS_1A1D_RET <0x61, "DS_SUB_RTN_U64", VReg_64>; 789 def DS_RSUB_RTN_U64 : DS_1A1D_RET <0x62, "DS_RSUB_RTN_U64", VReg_64>; 790 def DS_INC_RTN_U64 : DS_1A1D_RET <0x63, "DS_INC_RTN_U64", VReg_64>; 791 def DS_DEC_RTN_U64 : DS_1A1D_RET <0x64, "DS_DEC_RTN_U64", VReg_64>; 792 def DS_MIN_RTN_I64 : DS_1A1D_RET <0x65, "DS_MIN_RTN_I64", VReg_64>; 793 def DS_MAX_RTN_I64 : DS_1A1D_RET <0x66, "DS_MAX_RTN_I64", VReg_64>; 794 def DS_MIN_RTN_U64 : DS_1A1D_RET <0x67, "DS_MIN_RTN_U64", VReg_64>; 795 def DS_MAX_RTN_U64 : DS_1A1D_RET <0x68, "DS_MAX_RTN_U64", VReg_64>; 796 def DS_AND_RTN_B64 : DS_1A1D_RET <0x69, "DS_AND_RTN_B64", VReg_64>; 797 def DS_OR_RTN_B64 : DS_1A1D_RET <0x6a, "DS_OR_RTN_B64", VReg_64>; 798 def DS_XOR_RTN_B64 : DS_1A1D_RET <0x6b, "DS_XOR_RTN_B64", VReg_64>; 799 def DS_MSKOR_RTN_B64 : DS_1A1D_RET <0x6c, "DS_MSKOR_RTN_B64", VReg_64>; 800 def DS_WRXCHG_RTN_B64 : DS_1A1D_RET <0x6d, "DS_WRXCHG_RTN_B64", VReg_64>; 801 //def DS_WRXCHG2_RTN_B64 : DS_2A0D_RET <0x6e, "DS_WRXCHG2_RTN_B64", VReg_64>; 802 //def DS_WRXCHG2ST64_RTN_B64 : DS_2A0D_RET <0x6f, "DS_WRXCHG2_RTN_B64", VReg_64>; 803 def DS_CMPST_RTN_B64 : DS_1A2D_RET <0x70, "DS_CMPST_RTN_B64", VReg_64>; 804 def DS_CMPST_RTN_F64 : DS_1A2D_RET <0x71, "DS_CMPST_RTN_F64", VReg_64>; 805 def DS_MIN_RTN_F64 : DS_1A1D_RET <0x72, "DS_MIN_F64", VReg_64>; 806 def DS_MAX_RTN_F64 : DS_1A1D_RET <0x73, "DS_MAX_F64", VReg_64>; 807 808 //let SubtargetPredicate = isCI in { 809 // DS_CONDXCHG32_RTN_B64 810 // DS_CONDXCHG32_RTN_B128 811 //} // End isCI 812 813 // TODO: _SRC2_* forms 814 815 def DS_WRITE_B32 : DS_Store_Helper <0x0000000d, "DS_WRITE_B32", VReg_32>; 816 def DS_WRITE_B8 : DS_Store_Helper <0x00000001e, "DS_WRITE_B8", VReg_32>; 817 def DS_WRITE_B16 : DS_Store_Helper <0x00000001f, "DS_WRITE_B16", VReg_32>; 818 def DS_WRITE_B64 : DS_Store_Helper <0x00000004d, "DS_WRITE_B64", VReg_64>; 819 820 def DS_READ_B32 : DS_Load_Helper <0x00000036, "DS_READ_B32", VReg_32>; 821 def DS_READ_I8 : DS_Load_Helper <0x00000039, "DS_READ_I8", VReg_32>; 822 def DS_READ_U8 : DS_Load_Helper <0x0000003a, "DS_READ_U8", VReg_32>; 823 def DS_READ_I16 : DS_Load_Helper <0x0000003b, "DS_READ_I16", VReg_32>; 824 def DS_READ_U16 : DS_Load_Helper <0x0000003c, "DS_READ_U16", VReg_32>; 825 def DS_READ_B64 : DS_Load_Helper <0x00000076, "DS_READ_B64", VReg_64>; 826 827 // 2 forms. 828 def DS_WRITE2_B32 : DS_Load2_Helper <0x0000000E, "DS_WRITE2_B32", VReg_64>; 829 def DS_WRITE2_B64 : DS_Load2_Helper <0x0000004E, "DS_WRITE2_B64", VReg_128>; 830 831 def DS_READ2_B32 : DS_Load2_Helper <0x00000037, "DS_READ2_B32", VReg_64>; 832 def DS_READ2_B64 : DS_Load2_Helper <0x00000075, "DS_READ2_B64", VReg_128>; 833 834 // TODO: DS_READ2ST64_B32, DS_READ2ST64_B64, 835 // DS_WRITE2ST64_B32, DS_WRITE2ST64_B64 836 837 //===----------------------------------------------------------------------===// 838 // MUBUF Instructions 839 //===----------------------------------------------------------------------===// 840 841 //def BUFFER_LOAD_FORMAT_X : MUBUF_ <0x00000000, "BUFFER_LOAD_FORMAT_X", []>; 842 //def BUFFER_LOAD_FORMAT_XY : MUBUF_ <0x00000001, "BUFFER_LOAD_FORMAT_XY", []>; 843 //def BUFFER_LOAD_FORMAT_XYZ : MUBUF_ <0x00000002, "BUFFER_LOAD_FORMAT_XYZ", []>; 844 defm BUFFER_LOAD_FORMAT_XYZW : MUBUF_Load_Helper <0x00000003, "BUFFER_LOAD_FORMAT_XYZW", VReg_128>; 845 //def BUFFER_STORE_FORMAT_X : MUBUF_ <0x00000004, "BUFFER_STORE_FORMAT_X", []>; 846 //def BUFFER_STORE_FORMAT_XY : MUBUF_ <0x00000005, "BUFFER_STORE_FORMAT_XY", []>; 847 //def BUFFER_STORE_FORMAT_XYZ : MUBUF_ <0x00000006, "BUFFER_STORE_FORMAT_XYZ", []>; 848 //def BUFFER_STORE_FORMAT_XYZW : MUBUF_ <0x00000007, "BUFFER_STORE_FORMAT_XYZW", []>; 849 defm BUFFER_LOAD_UBYTE : MUBUF_Load_Helper < 850 0x00000008, "BUFFER_LOAD_UBYTE", VReg_32, i32, az_extloadi8_global 851 >; 852 defm BUFFER_LOAD_SBYTE : MUBUF_Load_Helper < 853 0x00000009, "BUFFER_LOAD_SBYTE", VReg_32, i32, sextloadi8_global 854 >; 855 defm BUFFER_LOAD_USHORT : MUBUF_Load_Helper < 856 0x0000000a, "BUFFER_LOAD_USHORT", VReg_32, i32, az_extloadi16_global 857 >; 858 defm BUFFER_LOAD_SSHORT : MUBUF_Load_Helper < 859 0x0000000b, "BUFFER_LOAD_SSHORT", VReg_32, i32, sextloadi16_global 860 >; 861 defm BUFFER_LOAD_DWORD : MUBUF_Load_Helper < 862 0x0000000c, "BUFFER_LOAD_DWORD", VReg_32, i32, global_load 863 >; 864 defm BUFFER_LOAD_DWORDX2 : MUBUF_Load_Helper < 865 0x0000000d, "BUFFER_LOAD_DWORDX2", VReg_64, v2i32, global_load 866 >; 867 defm BUFFER_LOAD_DWORDX4 : MUBUF_Load_Helper < 868 0x0000000e, "BUFFER_LOAD_DWORDX4", VReg_128, v4i32, global_load 869 >; 870 871 def BUFFER_STORE_BYTE : MUBUF_Store_Helper < 872 0x00000018, "BUFFER_STORE_BYTE", VReg_32, i32, truncstorei8_global 873 >; 874 875 def BUFFER_STORE_SHORT : MUBUF_Store_Helper < 876 0x0000001a, "BUFFER_STORE_SHORT", VReg_32, i32, truncstorei16_global 877 >; 878 879 def BUFFER_STORE_DWORD : MUBUF_Store_Helper < 880 0x0000001c, "BUFFER_STORE_DWORD", VReg_32, i32, global_store 881 >; 882 883 def BUFFER_STORE_DWORDX2 : MUBUF_Store_Helper < 884 0x0000001d, "BUFFER_STORE_DWORDX2", VReg_64, v2i32, global_store 885 >; 886 887 def BUFFER_STORE_DWORDX4 : MUBUF_Store_Helper < 888 0x0000001e, "BUFFER_STORE_DWORDX4", VReg_128, v4i32, global_store 889 >; 890 //def BUFFER_ATOMIC_SWAP : MUBUF_ <0x00000030, "BUFFER_ATOMIC_SWAP", []>; 891 //def BUFFER_ATOMIC_CMPSWAP : MUBUF_ <0x00000031, "BUFFER_ATOMIC_CMPSWAP", []>; 892 //def BUFFER_ATOMIC_ADD : MUBUF_ <0x00000032, "BUFFER_ATOMIC_ADD", []>; 893 //def BUFFER_ATOMIC_SUB : MUBUF_ <0x00000033, "BUFFER_ATOMIC_SUB", []>; 894 //def BUFFER_ATOMIC_RSUB : MUBUF_ <0x00000034, "BUFFER_ATOMIC_RSUB", []>; 895 //def BUFFER_ATOMIC_SMIN : MUBUF_ <0x00000035, "BUFFER_ATOMIC_SMIN", []>; 896 //def BUFFER_ATOMIC_UMIN : MUBUF_ <0x00000036, "BUFFER_ATOMIC_UMIN", []>; 897 //def BUFFER_ATOMIC_SMAX : MUBUF_ <0x00000037, "BUFFER_ATOMIC_SMAX", []>; 898 //def BUFFER_ATOMIC_UMAX : MUBUF_ <0x00000038, "BUFFER_ATOMIC_UMAX", []>; 899 //def BUFFER_ATOMIC_AND : MUBUF_ <0x00000039, "BUFFER_ATOMIC_AND", []>; 900 //def BUFFER_ATOMIC_OR : MUBUF_ <0x0000003a, "BUFFER_ATOMIC_OR", []>; 901 //def BUFFER_ATOMIC_XOR : MUBUF_ <0x0000003b, "BUFFER_ATOMIC_XOR", []>; 902 //def BUFFER_ATOMIC_INC : MUBUF_ <0x0000003c, "BUFFER_ATOMIC_INC", []>; 903 //def BUFFER_ATOMIC_DEC : MUBUF_ <0x0000003d, "BUFFER_ATOMIC_DEC", []>; 904 //def BUFFER_ATOMIC_FCMPSWAP : MUBUF_ <0x0000003e, "BUFFER_ATOMIC_FCMPSWAP", []>; 905 //def BUFFER_ATOMIC_FMIN : MUBUF_ <0x0000003f, "BUFFER_ATOMIC_FMIN", []>; 906 //def BUFFER_ATOMIC_FMAX : MUBUF_ <0x00000040, "BUFFER_ATOMIC_FMAX", []>; 907 //def BUFFER_ATOMIC_SWAP_X2 : MUBUF_X2 <0x00000050, "BUFFER_ATOMIC_SWAP_X2", []>; 908 //def BUFFER_ATOMIC_CMPSWAP_X2 : MUBUF_X2 <0x00000051, "BUFFER_ATOMIC_CMPSWAP_X2", []>; 909 //def BUFFER_ATOMIC_ADD_X2 : MUBUF_X2 <0x00000052, "BUFFER_ATOMIC_ADD_X2", []>; 910 //def BUFFER_ATOMIC_SUB_X2 : MUBUF_X2 <0x00000053, "BUFFER_ATOMIC_SUB_X2", []>; 911 //def BUFFER_ATOMIC_RSUB_X2 : MUBUF_X2 <0x00000054, "BUFFER_ATOMIC_RSUB_X2", []>; 912 //def BUFFER_ATOMIC_SMIN_X2 : MUBUF_X2 <0x00000055, "BUFFER_ATOMIC_SMIN_X2", []>; 913 //def BUFFER_ATOMIC_UMIN_X2 : MUBUF_X2 <0x00000056, "BUFFER_ATOMIC_UMIN_X2", []>; 914 //def BUFFER_ATOMIC_SMAX_X2 : MUBUF_X2 <0x00000057, "BUFFER_ATOMIC_SMAX_X2", []>; 915 //def BUFFER_ATOMIC_UMAX_X2 : MUBUF_X2 <0x00000058, "BUFFER_ATOMIC_UMAX_X2", []>; 916 //def BUFFER_ATOMIC_AND_X2 : MUBUF_X2 <0x00000059, "BUFFER_ATOMIC_AND_X2", []>; 917 //def BUFFER_ATOMIC_OR_X2 : MUBUF_X2 <0x0000005a, "BUFFER_ATOMIC_OR_X2", []>; 918 //def BUFFER_ATOMIC_XOR_X2 : MUBUF_X2 <0x0000005b, "BUFFER_ATOMIC_XOR_X2", []>; 919 //def BUFFER_ATOMIC_INC_X2 : MUBUF_X2 <0x0000005c, "BUFFER_ATOMIC_INC_X2", []>; 920 //def BUFFER_ATOMIC_DEC_X2 : MUBUF_X2 <0x0000005d, "BUFFER_ATOMIC_DEC_X2", []>; 921 //def BUFFER_ATOMIC_FCMPSWAP_X2 : MUBUF_X2 <0x0000005e, "BUFFER_ATOMIC_FCMPSWAP_X2", []>; 922 //def BUFFER_ATOMIC_FMIN_X2 : MUBUF_X2 <0x0000005f, "BUFFER_ATOMIC_FMIN_X2", []>; 923 //def BUFFER_ATOMIC_FMAX_X2 : MUBUF_X2 <0x00000060, "BUFFER_ATOMIC_FMAX_X2", []>; 924 //def BUFFER_WBINVL1_SC : MUBUF_WBINVL1 <0x00000070, "BUFFER_WBINVL1_SC", []>; 925 //def BUFFER_WBINVL1 : MUBUF_WBINVL1 <0x00000071, "BUFFER_WBINVL1", []>; 926 927 //===----------------------------------------------------------------------===// 928 // MTBUF Instructions 929 //===----------------------------------------------------------------------===// 930 931 //def TBUFFER_LOAD_FORMAT_X : MTBUF_ <0x00000000, "TBUFFER_LOAD_FORMAT_X", []>; 932 //def TBUFFER_LOAD_FORMAT_XY : MTBUF_ <0x00000001, "TBUFFER_LOAD_FORMAT_XY", []>; 933 //def TBUFFER_LOAD_FORMAT_XYZ : MTBUF_ <0x00000002, "TBUFFER_LOAD_FORMAT_XYZ", []>; 934 def TBUFFER_LOAD_FORMAT_XYZW : MTBUF_Load_Helper <0x00000003, "TBUFFER_LOAD_FORMAT_XYZW", VReg_128>; 935 def TBUFFER_STORE_FORMAT_X : MTBUF_Store_Helper <0x00000004, "TBUFFER_STORE_FORMAT_X", VReg_32>; 936 def TBUFFER_STORE_FORMAT_XY : MTBUF_Store_Helper <0x00000005, "TBUFFER_STORE_FORMAT_XY", VReg_64>; 937 def TBUFFER_STORE_FORMAT_XYZ : MTBUF_Store_Helper <0x00000006, "TBUFFER_STORE_FORMAT_XYZ", VReg_128>; 938 def TBUFFER_STORE_FORMAT_XYZW : MTBUF_Store_Helper <0x00000007, "TBUFFER_STORE_FORMAT_XYZW", VReg_128>; 939 940 //===----------------------------------------------------------------------===// 941 // MIMG Instructions 942 //===----------------------------------------------------------------------===// 943 944 defm IMAGE_LOAD : MIMG_NoSampler <0x00000000, "IMAGE_LOAD">; 945 defm IMAGE_LOAD_MIP : MIMG_NoSampler <0x00000001, "IMAGE_LOAD_MIP">; 946 //def IMAGE_LOAD_PCK : MIMG_NoPattern_ <"IMAGE_LOAD_PCK", 0x00000002>; 947 //def IMAGE_LOAD_PCK_SGN : MIMG_NoPattern_ <"IMAGE_LOAD_PCK_SGN", 0x00000003>; 948 //def IMAGE_LOAD_MIP_PCK : MIMG_NoPattern_ <"IMAGE_LOAD_MIP_PCK", 0x00000004>; 949 //def IMAGE_LOAD_MIP_PCK_SGN : MIMG_NoPattern_ <"IMAGE_LOAD_MIP_PCK_SGN", 0x00000005>; 950 //def IMAGE_STORE : MIMG_NoPattern_ <"IMAGE_STORE", 0x00000008>; 951 //def IMAGE_STORE_MIP : MIMG_NoPattern_ <"IMAGE_STORE_MIP", 0x00000009>; 952 //def IMAGE_STORE_PCK : MIMG_NoPattern_ <"IMAGE_STORE_PCK", 0x0000000a>; 953 //def IMAGE_STORE_MIP_PCK : MIMG_NoPattern_ <"IMAGE_STORE_MIP_PCK", 0x0000000b>; 954 defm IMAGE_GET_RESINFO : MIMG_NoSampler <0x0000000e, "IMAGE_GET_RESINFO">; 955 //def IMAGE_ATOMIC_SWAP : MIMG_NoPattern_ <"IMAGE_ATOMIC_SWAP", 0x0000000f>; 956 //def IMAGE_ATOMIC_CMPSWAP : MIMG_NoPattern_ <"IMAGE_ATOMIC_CMPSWAP", 0x00000010>; 957 //def IMAGE_ATOMIC_ADD : MIMG_NoPattern_ <"IMAGE_ATOMIC_ADD", 0x00000011>; 958 //def IMAGE_ATOMIC_SUB : MIMG_NoPattern_ <"IMAGE_ATOMIC_SUB", 0x00000012>; 959 //def IMAGE_ATOMIC_RSUB : MIMG_NoPattern_ <"IMAGE_ATOMIC_RSUB", 0x00000013>; 960 //def IMAGE_ATOMIC_SMIN : MIMG_NoPattern_ <"IMAGE_ATOMIC_SMIN", 0x00000014>; 961 //def IMAGE_ATOMIC_UMIN : MIMG_NoPattern_ <"IMAGE_ATOMIC_UMIN", 0x00000015>; 962 //def IMAGE_ATOMIC_SMAX : MIMG_NoPattern_ <"IMAGE_ATOMIC_SMAX", 0x00000016>; 963 //def IMAGE_ATOMIC_UMAX : MIMG_NoPattern_ <"IMAGE_ATOMIC_UMAX", 0x00000017>; 964 //def IMAGE_ATOMIC_AND : MIMG_NoPattern_ <"IMAGE_ATOMIC_AND", 0x00000018>; 965 //def IMAGE_ATOMIC_OR : MIMG_NoPattern_ <"IMAGE_ATOMIC_OR", 0x00000019>; 966 //def IMAGE_ATOMIC_XOR : MIMG_NoPattern_ <"IMAGE_ATOMIC_XOR", 0x0000001a>; 967 //def IMAGE_ATOMIC_INC : MIMG_NoPattern_ <"IMAGE_ATOMIC_INC", 0x0000001b>; 968 //def IMAGE_ATOMIC_DEC : MIMG_NoPattern_ <"IMAGE_ATOMIC_DEC", 0x0000001c>; 969 //def IMAGE_ATOMIC_FCMPSWAP : MIMG_NoPattern_ <"IMAGE_ATOMIC_FCMPSWAP", 0x0000001d>; 970 //def IMAGE_ATOMIC_FMIN : MIMG_NoPattern_ <"IMAGE_ATOMIC_FMIN", 0x0000001e>; 971 //def IMAGE_ATOMIC_FMAX : MIMG_NoPattern_ <"IMAGE_ATOMIC_FMAX", 0x0000001f>; 972 defm IMAGE_SAMPLE : MIMG_Sampler <0x00000020, "IMAGE_SAMPLE">; 973 //def IMAGE_SAMPLE_CL : MIMG_NoPattern_ <"IMAGE_SAMPLE_CL", 0x00000021>; 974 defm IMAGE_SAMPLE_D : MIMG_Sampler <0x00000022, "IMAGE_SAMPLE_D">; 975 //def IMAGE_SAMPLE_D_CL : MIMG_NoPattern_ <"IMAGE_SAMPLE_D_CL", 0x00000023>; 976 defm IMAGE_SAMPLE_L : MIMG_Sampler <0x00000024, "IMAGE_SAMPLE_L">; 977 defm IMAGE_SAMPLE_B : MIMG_Sampler <0x00000025, "IMAGE_SAMPLE_B">; 978 //def IMAGE_SAMPLE_B_CL : MIMG_NoPattern_ <"IMAGE_SAMPLE_B_CL", 0x00000026>; 979 //def IMAGE_SAMPLE_LZ : MIMG_NoPattern_ <"IMAGE_SAMPLE_LZ", 0x00000027>; 980 defm IMAGE_SAMPLE_C : MIMG_Sampler <0x00000028, "IMAGE_SAMPLE_C">; 981 //def IMAGE_SAMPLE_C_CL : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_CL", 0x00000029>; 982 defm IMAGE_SAMPLE_C_D : MIMG_Sampler <0x0000002a, "IMAGE_SAMPLE_C_D">; 983 //def IMAGE_SAMPLE_C_D_CL : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_D_CL", 0x0000002b>; 984 defm IMAGE_SAMPLE_C_L : MIMG_Sampler <0x0000002c, "IMAGE_SAMPLE_C_L">; 985 defm IMAGE_SAMPLE_C_B : MIMG_Sampler <0x0000002d, "IMAGE_SAMPLE_C_B">; 986 //def IMAGE_SAMPLE_C_B_CL : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_B_CL", 0x0000002e>; 987 //def IMAGE_SAMPLE_C_LZ : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_LZ", 0x0000002f>; 988 //def IMAGE_SAMPLE_O : MIMG_NoPattern_ <"IMAGE_SAMPLE_O", 0x00000030>; 989 //def IMAGE_SAMPLE_CL_O : MIMG_NoPattern_ <"IMAGE_SAMPLE_CL_O", 0x00000031>; 990 //def IMAGE_SAMPLE_D_O : MIMG_NoPattern_ <"IMAGE_SAMPLE_D_O", 0x00000032>; 991 //def IMAGE_SAMPLE_D_CL_O : MIMG_NoPattern_ <"IMAGE_SAMPLE_D_CL_O", 0x00000033>; 992 //def IMAGE_SAMPLE_L_O : MIMG_NoPattern_ <"IMAGE_SAMPLE_L_O", 0x00000034>; 993 //def IMAGE_SAMPLE_B_O : MIMG_NoPattern_ <"IMAGE_SAMPLE_B_O", 0x00000035>; 994 //def IMAGE_SAMPLE_B_CL_O : MIMG_NoPattern_ <"IMAGE_SAMPLE_B_CL_O", 0x00000036>; 995 //def IMAGE_SAMPLE_LZ_O : MIMG_NoPattern_ <"IMAGE_SAMPLE_LZ_O", 0x00000037>; 996 //def IMAGE_SAMPLE_C_O : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_O", 0x00000038>; 997 //def IMAGE_SAMPLE_C_CL_O : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_CL_O", 0x00000039>; 998 //def IMAGE_SAMPLE_C_D_O : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_D_O", 0x0000003a>; 999 //def IMAGE_SAMPLE_C_D_CL_O : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_D_CL_O", 0x0000003b>; 1000 //def IMAGE_SAMPLE_C_L_O : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_L_O", 0x0000003c>; 1001 //def IMAGE_SAMPLE_C_B_O : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_B_O", 0x0000003d>; 1002 //def IMAGE_SAMPLE_C_B_CL_O : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_B_CL_O", 0x0000003e>; 1003 //def IMAGE_SAMPLE_C_LZ_O : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_LZ_O", 0x0000003f>; 1004 defm IMAGE_GATHER4 : MIMG_Gather <0x00000040, "IMAGE_GATHER4">; 1005 defm IMAGE_GATHER4_CL : MIMG_Gather <0x00000041, "IMAGE_GATHER4_CL">; 1006 defm IMAGE_GATHER4_L : MIMG_Gather <0x00000044, "IMAGE_GATHER4_L">; 1007 defm IMAGE_GATHER4_B : MIMG_Gather <0x00000045, "IMAGE_GATHER4_B">; 1008 defm IMAGE_GATHER4_B_CL : MIMG_Gather <0x00000046, "IMAGE_GATHER4_B_CL">; 1009 defm IMAGE_GATHER4_LZ : MIMG_Gather <0x00000047, "IMAGE_GATHER4_LZ">; 1010 defm IMAGE_GATHER4_C : MIMG_Gather <0x00000048, "IMAGE_GATHER4_C">; 1011 defm IMAGE_GATHER4_C_CL : MIMG_Gather <0x00000049, "IMAGE_GATHER4_C_CL">; 1012 defm IMAGE_GATHER4_C_L : MIMG_Gather <0x0000004c, "IMAGE_GATHER4_C_L">; 1013 defm IMAGE_GATHER4_C_B : MIMG_Gather <0x0000004d, "IMAGE_GATHER4_C_B">; 1014 defm IMAGE_GATHER4_C_B_CL : MIMG_Gather <0x0000004e, "IMAGE_GATHER4_C_B_CL">; 1015 defm IMAGE_GATHER4_C_LZ : MIMG_Gather <0x0000004f, "IMAGE_GATHER4_C_LZ">; 1016 defm IMAGE_GATHER4_O : MIMG_Gather <0x00000050, "IMAGE_GATHER4_O">; 1017 defm IMAGE_GATHER4_CL_O : MIMG_Gather <0x00000051, "IMAGE_GATHER4_CL_O">; 1018 defm IMAGE_GATHER4_L_O : MIMG_Gather <0x00000054, "IMAGE_GATHER4_L_O">; 1019 defm IMAGE_GATHER4_B_O : MIMG_Gather <0x00000055, "IMAGE_GATHER4_B_O">; 1020 defm IMAGE_GATHER4_B_CL_O : MIMG_Gather <0x00000056, "IMAGE_GATHER4_B_CL_O">; 1021 defm IMAGE_GATHER4_LZ_O : MIMG_Gather <0x00000057, "IMAGE_GATHER4_LZ_O">; 1022 defm IMAGE_GATHER4_C_O : MIMG_Gather <0x00000058, "IMAGE_GATHER4_C_O">; 1023 defm IMAGE_GATHER4_C_CL_O : MIMG_Gather <0x00000059, "IMAGE_GATHER4_C_CL_O">; 1024 defm IMAGE_GATHER4_C_L_O : MIMG_Gather <0x0000005c, "IMAGE_GATHER4_C_L_O">; 1025 defm IMAGE_GATHER4_C_B_O : MIMG_Gather <0x0000005d, "IMAGE_GATHER4_C_B_O">; 1026 defm IMAGE_GATHER4_C_B_CL_O : MIMG_Gather <0x0000005e, "IMAGE_GATHER4_C_B_CL_O">; 1027 defm IMAGE_GATHER4_C_LZ_O : MIMG_Gather <0x0000005f, "IMAGE_GATHER4_C_LZ_O">; 1028 defm IMAGE_GET_LOD : MIMG_Sampler <0x00000060, "IMAGE_GET_LOD">; 1029 //def IMAGE_SAMPLE_CD : MIMG_NoPattern_ <"IMAGE_SAMPLE_CD", 0x00000068>; 1030 //def IMAGE_SAMPLE_CD_CL : MIMG_NoPattern_ <"IMAGE_SAMPLE_CD_CL", 0x00000069>; 1031 //def IMAGE_SAMPLE_C_CD : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_CD", 0x0000006a>; 1032 //def IMAGE_SAMPLE_C_CD_CL : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_CD_CL", 0x0000006b>; 1033 //def IMAGE_SAMPLE_CD_O : MIMG_NoPattern_ <"IMAGE_SAMPLE_CD_O", 0x0000006c>; 1034 //def IMAGE_SAMPLE_CD_CL_O : MIMG_NoPattern_ <"IMAGE_SAMPLE_CD_CL_O", 0x0000006d>; 1035 //def IMAGE_SAMPLE_C_CD_O : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_CD_O", 0x0000006e>; 1036 //def IMAGE_SAMPLE_C_CD_CL_O : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_CD_CL_O", 0x0000006f>; 1037 //def IMAGE_RSRC256 : MIMG_NoPattern_RSRC256 <"IMAGE_RSRC256", 0x0000007e>; 1038 //def IMAGE_SAMPLER : MIMG_NoPattern_ <"IMAGE_SAMPLER", 0x0000007f>; 1039 1040 //===----------------------------------------------------------------------===// 1041 // VOP1 Instructions 1042 //===----------------------------------------------------------------------===// 1043 1044 //def V_NOP : VOP1_ <0x00000000, "V_NOP", []>; 1045 1046 let neverHasSideEffects = 1, isMoveImm = 1 in { 1047 defm V_MOV_B32 : VOP1_32 <0x00000001, "V_MOV_B32", []>; 1048 } // End neverHasSideEffects = 1, isMoveImm = 1 1049 1050 let Uses = [EXEC] in { 1051 1052 def V_READFIRSTLANE_B32 : VOP1 < 1053 0x00000002, 1054 (outs SReg_32:$vdst), 1055 (ins VReg_32:$src0), 1056 "V_READFIRSTLANE_B32 $vdst, $src0", 1057 [] 1058 >; 1059 1060 } 1061 1062 defm V_CVT_I32_F64 : VOP1_32_64 <0x00000003, "V_CVT_I32_F64", 1063 [(set i32:$dst, (fp_to_sint f64:$src0))] 1064 >; 1065 defm V_CVT_F64_I32 : VOP1_64_32 <0x00000004, "V_CVT_F64_I32", 1066 [(set f64:$dst, (sint_to_fp i32:$src0))] 1067 >; 1068 defm V_CVT_F32_I32 : VOP1_32 <0x00000005, "V_CVT_F32_I32", 1069 [(set f32:$dst, (sint_to_fp i32:$src0))] 1070 >; 1071 defm V_CVT_F32_U32 : VOP1_32 <0x00000006, "V_CVT_F32_U32", 1072 [(set f32:$dst, (uint_to_fp i32:$src0))] 1073 >; 1074 defm V_CVT_U32_F32 : VOP1_32 <0x00000007, "V_CVT_U32_F32", 1075 [(set i32:$dst, (fp_to_uint f32:$src0))] 1076 >; 1077 defm V_CVT_I32_F32 : VOP1_32 <0x00000008, "V_CVT_I32_F32", 1078 [(set i32:$dst, (fp_to_sint f32:$src0))] 1079 >; 1080 defm V_MOV_FED_B32 : VOP1_32 <0x00000009, "V_MOV_FED_B32", []>; 1081 defm V_CVT_F16_F32 : VOP1_32 <0x0000000a, "V_CVT_F16_F32", 1082 [(set i32:$dst, (f32_to_f16 f32:$src0))] 1083 >; 1084 defm V_CVT_F32_F16 : VOP1_32 <0x0000000b, "V_CVT_F32_F16", 1085 [(set f32:$dst, (f16_to_f32 i32:$src0))] 1086 >; 1087 //defm V_CVT_RPI_I32_F32 : VOP1_32 <0x0000000c, "V_CVT_RPI_I32_F32", []>; 1088 //defm V_CVT_FLR_I32_F32 : VOP1_32 <0x0000000d, "V_CVT_FLR_I32_F32", []>; 1089 //defm V_CVT_OFF_F32_I4 : VOP1_32 <0x0000000e, "V_CVT_OFF_F32_I4", []>; 1090 defm V_CVT_F32_F64 : VOP1_32_64 <0x0000000f, "V_CVT_F32_F64", 1091 [(set f32:$dst, (fround f64:$src0))] 1092 >; 1093 defm V_CVT_F64_F32 : VOP1_64_32 <0x00000010, "V_CVT_F64_F32", 1094 [(set f64:$dst, (fextend f32:$src0))] 1095 >; 1096 defm V_CVT_F32_UBYTE0 : VOP1_32 <0x00000011, "V_CVT_F32_UBYTE0", 1097 [(set f32:$dst, (AMDGPUcvt_f32_ubyte0 i32:$src0))] 1098 >; 1099 defm V_CVT_F32_UBYTE1 : VOP1_32 <0x00000012, "V_CVT_F32_UBYTE1", 1100 [(set f32:$dst, (AMDGPUcvt_f32_ubyte1 i32:$src0))] 1101 >; 1102 defm V_CVT_F32_UBYTE2 : VOP1_32 <0x00000013, "V_CVT_F32_UBYTE2", 1103 [(set f32:$dst, (AMDGPUcvt_f32_ubyte2 i32:$src0))] 1104 >; 1105 defm V_CVT_F32_UBYTE3 : VOP1_32 <0x00000014, "V_CVT_F32_UBYTE3", 1106 [(set f32:$dst, (AMDGPUcvt_f32_ubyte3 i32:$src0))] 1107 >; 1108 defm V_CVT_U32_F64 : VOP1_32_64 <0x00000015, "V_CVT_U32_F64", 1109 [(set i32:$dst, (fp_to_uint f64:$src0))] 1110 >; 1111 defm V_CVT_F64_U32 : VOP1_64_32 <0x00000016, "V_CVT_F64_U32", 1112 [(set f64:$dst, (uint_to_fp i32:$src0))] 1113 >; 1114 1115 defm V_FRACT_F32 : VOP1_32 <0x00000020, "V_FRACT_F32", 1116 [(set f32:$dst, (AMDGPUfract f32:$src0))] 1117 >; 1118 defm V_TRUNC_F32 : VOP1_32 <0x00000021, "V_TRUNC_F32", 1119 [(set f32:$dst, (ftrunc f32:$src0))] 1120 >; 1121 defm V_CEIL_F32 : VOP1_32 <0x00000022, "V_CEIL_F32", 1122 [(set f32:$dst, (fceil f32:$src0))] 1123 >; 1124 defm V_RNDNE_F32 : VOP1_32 <0x00000023, "V_RNDNE_F32", 1125 [(set f32:$dst, (frint f32:$src0))] 1126 >; 1127 defm V_FLOOR_F32 : VOP1_32 <0x00000024, "V_FLOOR_F32", 1128 [(set f32:$dst, (ffloor f32:$src0))] 1129 >; 1130 defm V_EXP_F32 : VOP1_32 <0x00000025, "V_EXP_F32", 1131 [(set f32:$dst, (fexp2 f32:$src0))] 1132 >; 1133 defm V_LOG_CLAMP_F32 : VOP1_32 <0x00000026, "V_LOG_CLAMP_F32", []>; 1134 defm V_LOG_F32 : VOP1_32 <0x00000027, "V_LOG_F32", 1135 [(set f32:$dst, (flog2 f32:$src0))] 1136 >; 1137 1138 defm V_RCP_CLAMP_F32 : VOP1_32 <0x00000028, "V_RCP_CLAMP_F32", []>; 1139 defm V_RCP_LEGACY_F32 : VOP1_32 <0x00000029, "V_RCP_LEGACY_F32", []>; 1140 defm V_RCP_F32 : VOP1_32 <0x0000002a, "V_RCP_F32", 1141 [(set f32:$dst, (AMDGPUrcp f32:$src0))] 1142 >; 1143 defm V_RCP_IFLAG_F32 : VOP1_32 <0x0000002b, "V_RCP_IFLAG_F32", []>; 1144 defm V_RSQ_CLAMP_F32 : VOP1_32 <0x0000002c, "V_RSQ_CLAMP_F32", 1145 [(set f32:$dst, (AMDGPUrsq_clamped f32:$src0))] 1146 >; 1147 defm V_RSQ_LEGACY_F32 : VOP1_32 < 1148 0x0000002d, "V_RSQ_LEGACY_F32", 1149 [(set f32:$dst, (AMDGPUrsq_legacy f32:$src0))] 1150 >; 1151 defm V_RSQ_F32 : VOP1_32 <0x0000002e, "V_RSQ_F32", 1152 [(set f32:$dst, (AMDGPUrsq f32:$src0))] 1153 >; 1154 defm V_RCP_F64 : VOP1_64 <0x0000002f, "V_RCP_F64", 1155 [(set f64:$dst, (AMDGPUrcp f64:$src0))] 1156 >; 1157 defm V_RCP_CLAMP_F64 : VOP1_64 <0x00000030, "V_RCP_CLAMP_F64", []>; 1158 defm V_RSQ_F64 : VOP1_64 <0x00000031, "V_RSQ_F64", 1159 [(set f64:$dst, (AMDGPUrsq f64:$src0))] 1160 >; 1161 defm V_RSQ_CLAMP_F64 : VOP1_64 <0x00000032, "V_RSQ_CLAMP_F64", 1162 [(set f64:$dst, (AMDGPUrsq_clamped f64:$src0))] 1163 >; 1164 defm V_SQRT_F32 : VOP1_32 <0x00000033, "V_SQRT_F32", 1165 [(set f32:$dst, (fsqrt f32:$src0))] 1166 >; 1167 defm V_SQRT_F64 : VOP1_64 <0x00000034, "V_SQRT_F64", 1168 [(set f64:$dst, (fsqrt f64:$src0))] 1169 >; 1170 defm V_SIN_F32 : VOP1_32 <0x00000035, "V_SIN_F32", []>; 1171 defm V_COS_F32 : VOP1_32 <0x00000036, "V_COS_F32", []>; 1172 defm V_NOT_B32 : VOP1_32 <0x00000037, "V_NOT_B32", []>; 1173 defm V_BFREV_B32 : VOP1_32 <0x00000038, "V_BFREV_B32", []>; 1174 defm V_FFBH_U32 : VOP1_32 <0x00000039, "V_FFBH_U32", []>; 1175 defm V_FFBL_B32 : VOP1_32 <0x0000003a, "V_FFBL_B32", []>; 1176 defm V_FFBH_I32 : VOP1_32 <0x0000003b, "V_FFBH_I32", []>; 1177 //defm V_FREXP_EXP_I32_F64 : VOP1_32 <0x0000003c, "V_FREXP_EXP_I32_F64", []>; 1178 defm V_FREXP_MANT_F64 : VOP1_64 <0x0000003d, "V_FREXP_MANT_F64", []>; 1179 defm V_FRACT_F64 : VOP1_64 <0x0000003e, "V_FRACT_F64", []>; 1180 //defm V_FREXP_EXP_I32_F32 : VOP1_32 <0x0000003f, "V_FREXP_EXP_I32_F32", []>; 1181 defm V_FREXP_MANT_F32 : VOP1_32 <0x00000040, "V_FREXP_MANT_F32", []>; 1182 //def V_CLREXCP : VOP1_ <0x00000041, "V_CLREXCP", []>; 1183 defm V_MOVRELD_B32 : VOP1_32 <0x00000042, "V_MOVRELD_B32", []>; 1184 defm V_MOVRELS_B32 : VOP1_32 <0x00000043, "V_MOVRELS_B32", []>; 1185 defm V_MOVRELSD_B32 : VOP1_32 <0x00000044, "V_MOVRELSD_B32", []>; 1186 1187 1188 //===----------------------------------------------------------------------===// 1189 // VINTRP Instructions 1190 //===----------------------------------------------------------------------===// 1191 1192 def V_INTERP_P1_F32 : VINTRP < 1193 0x00000000, 1194 (outs VReg_32:$dst), 1195 (ins VReg_32:$i, i32imm:$attr_chan, i32imm:$attr, M0Reg:$m0), 1196 "V_INTERP_P1_F32 $dst, $i, $attr_chan, $attr, [$m0]", 1197 []> { 1198 let DisableEncoding = "$m0"; 1199 } 1200 1201 def V_INTERP_P2_F32 : VINTRP < 1202 0x00000001, 1203 (outs VReg_32:$dst), 1204 (ins VReg_32:$src0, VReg_32:$j, i32imm:$attr_chan, i32imm:$attr, M0Reg:$m0), 1205 "V_INTERP_P2_F32 $dst, [$src0], $j, $attr_chan, $attr, [$m0]", 1206 []> { 1207 1208 let Constraints = "$src0 = $dst"; 1209 let DisableEncoding = "$src0,$m0"; 1210 1211 } 1212 1213 def V_INTERP_MOV_F32 : VINTRP < 1214 0x00000002, 1215 (outs VReg_32:$dst), 1216 (ins InterpSlot:$src0, i32imm:$attr_chan, i32imm:$attr, M0Reg:$m0), 1217 "V_INTERP_MOV_F32 $dst, $src0, $attr_chan, $attr, [$m0]", 1218 []> { 1219 let DisableEncoding = "$m0"; 1220 } 1221 1222 //===----------------------------------------------------------------------===// 1223 // VOP2 Instructions 1224 //===----------------------------------------------------------------------===// 1225 1226 def V_CNDMASK_B32_e32 : VOP2 <0x00000000, (outs VReg_32:$dst), 1227 (ins VSrc_32:$src0, VReg_32:$src1, VCCReg:$vcc), 1228 "V_CNDMASK_B32_e32 $dst, $src0, $src1, [$vcc]", 1229 [] 1230 >{ 1231 let DisableEncoding = "$vcc"; 1232 } 1233 1234 def V_CNDMASK_B32_e64 : VOP3 <0x00000100, (outs VReg_32:$dst), 1235 (ins VSrc_32:$src0, VSrc_32:$src1, SSrc_64:$src2, 1236 InstFlag:$abs, InstFlag:$clamp, InstFlag:$omod, InstFlag:$neg), 1237 "V_CNDMASK_B32_e64 $dst, $src0, $src1, $src2, $abs, $clamp, $omod, $neg", 1238 [(set i32:$dst, (select i1:$src2, i32:$src1, i32:$src0))] 1239 > { 1240 let src0_modifiers = 0; 1241 let src1_modifiers = 0; 1242 let src2_modifiers = 0; 1243 } 1244 1245 def V_READLANE_B32 : VOP2 < 1246 0x00000001, 1247 (outs SReg_32:$vdst), 1248 (ins VReg_32:$src0, SSrc_32:$vsrc1), 1249 "V_READLANE_B32 $vdst, $src0, $vsrc1", 1250 [] 1251 >; 1252 1253 def V_WRITELANE_B32 : VOP2 < 1254 0x00000002, 1255 (outs VReg_32:$vdst), 1256 (ins SReg_32:$src0, SSrc_32:$vsrc1), 1257 "V_WRITELANE_B32 $vdst, $src0, $vsrc1", 1258 [] 1259 >; 1260 1261 let isCommutable = 1 in { 1262 defm V_ADD_F32 : VOP2_32 <0x00000003, "V_ADD_F32", 1263 [(set f32:$dst, (fadd f32:$src0, f32:$src1))] 1264 >; 1265 1266 defm V_SUB_F32 : VOP2_32 <0x00000004, "V_SUB_F32", 1267 [(set f32:$dst, (fsub f32:$src0, f32:$src1))] 1268 >; 1269 defm V_SUBREV_F32 : VOP2_32 <0x00000005, "V_SUBREV_F32", [], "V_SUB_F32">; 1270 } // End isCommutable = 1 1271 1272 defm V_MAC_LEGACY_F32 : VOP2_32 <0x00000006, "V_MAC_LEGACY_F32", []>; 1273 1274 let isCommutable = 1 in { 1275 1276 defm V_MUL_LEGACY_F32 : VOP2_32 < 1277 0x00000007, "V_MUL_LEGACY_F32", 1278 [(set f32:$dst, (int_AMDGPU_mul f32:$src0, f32:$src1))] 1279 >; 1280 1281 defm V_MUL_F32 : VOP2_32 <0x00000008, "V_MUL_F32", 1282 [(set f32:$dst, (fmul f32:$src0, f32:$src1))] 1283 >; 1284 1285 1286 defm V_MUL_I32_I24 : VOP2_32 <0x00000009, "V_MUL_I32_I24", 1287 [(set i32:$dst, (AMDGPUmul_i24 i32:$src0, i32:$src1))] 1288 >; 1289 //defm V_MUL_HI_I32_I24 : VOP2_32 <0x0000000a, "V_MUL_HI_I32_I24", []>; 1290 defm V_MUL_U32_U24 : VOP2_32 <0x0000000b, "V_MUL_U32_U24", 1291 [(set i32:$dst, (AMDGPUmul_u24 i32:$src0, i32:$src1))] 1292 >; 1293 //defm V_MUL_HI_U32_U24 : VOP2_32 <0x0000000c, "V_MUL_HI_U32_U24", []>; 1294 1295 1296 defm V_MIN_LEGACY_F32 : VOP2_32 <0x0000000d, "V_MIN_LEGACY_F32", 1297 [(set f32:$dst, (AMDGPUfmin f32:$src0, f32:$src1))] 1298 >; 1299 1300 defm V_MAX_LEGACY_F32 : VOP2_32 <0x0000000e, "V_MAX_LEGACY_F32", 1301 [(set f32:$dst, (AMDGPUfmax f32:$src0, f32:$src1))] 1302 >; 1303 1304 defm V_MIN_F32 : VOP2_32 <0x0000000f, "V_MIN_F32", []>; 1305 defm V_MAX_F32 : VOP2_32 <0x00000010, "V_MAX_F32", []>; 1306 defm V_MIN_I32 : VOP2_32 <0x00000011, "V_MIN_I32", 1307 [(set i32:$dst, (AMDGPUsmin i32:$src0, i32:$src1))]>; 1308 defm V_MAX_I32 : VOP2_32 <0x00000012, "V_MAX_I32", 1309 [(set i32:$dst, (AMDGPUsmax i32:$src0, i32:$src1))]>; 1310 defm V_MIN_U32 : VOP2_32 <0x00000013, "V_MIN_U32", 1311 [(set i32:$dst, (AMDGPUumin i32:$src0, i32:$src1))]>; 1312 defm V_MAX_U32 : VOP2_32 <0x00000014, "V_MAX_U32", 1313 [(set i32:$dst, (AMDGPUumax i32:$src0, i32:$src1))]>; 1314 1315 defm V_LSHR_B32 : VOP2_32 <0x00000015, "V_LSHR_B32", 1316 [(set i32:$dst, (srl i32:$src0, i32:$src1))] 1317 >; 1318 1319 defm V_LSHRREV_B32 : VOP2_32 <0x00000016, "V_LSHRREV_B32", [], "V_LSHR_B32">; 1320 1321 defm V_ASHR_I32 : VOP2_32 <0x00000017, "V_ASHR_I32", 1322 [(set i32:$dst, (sra i32:$src0, i32:$src1))] 1323 >; 1324 defm V_ASHRREV_I32 : VOP2_32 <0x00000018, "V_ASHRREV_I32", [], "V_ASHR_I32">; 1325 1326 let hasPostISelHook = 1 in { 1327 1328 defm V_LSHL_B32 : VOP2_32 <0x00000019, "V_LSHL_B32", 1329 [(set i32:$dst, (shl i32:$src0, i32:$src1))] 1330 >; 1331 1332 } 1333 defm V_LSHLREV_B32 : VOP2_32 <0x0000001a, "V_LSHLREV_B32", [], "V_LSHL_B32">; 1334 1335 defm V_AND_B32 : VOP2_32 <0x0000001b, "V_AND_B32", 1336 [(set i32:$dst, (and i32:$src0, i32:$src1))]>; 1337 defm V_OR_B32 : VOP2_32 <0x0000001c, "V_OR_B32", 1338 [(set i32:$dst, (or i32:$src0, i32:$src1))] 1339 >; 1340 defm V_XOR_B32 : VOP2_32 <0x0000001d, "V_XOR_B32", 1341 [(set i32:$dst, (xor i32:$src0, i32:$src1))] 1342 >; 1343 1344 } // End isCommutable = 1 1345 1346 defm V_BFM_B32 : VOP2_32 <0x0000001e, "V_BFM_B32", 1347 [(set i32:$dst, (AMDGPUbfm i32:$src0, i32:$src1))]>; 1348 defm V_MAC_F32 : VOP2_32 <0x0000001f, "V_MAC_F32", []>; 1349 defm V_MADMK_F32 : VOP2_32 <0x00000020, "V_MADMK_F32", []>; 1350 defm V_MADAK_F32 : VOP2_32 <0x00000021, "V_MADAK_F32", []>; 1351 defm V_BCNT_U32_B32 : VOP2_32 <0x00000022, "V_BCNT_U32_B32", []>; 1352 defm V_MBCNT_LO_U32_B32 : VOP2_32 <0x00000023, "V_MBCNT_LO_U32_B32", []>; 1353 defm V_MBCNT_HI_U32_B32 : VOP2_32 <0x00000024, "V_MBCNT_HI_U32_B32", []>; 1354 1355 let isCommutable = 1, Defs = [VCC] in { // Carry-out goes to VCC 1356 // No patterns so that the scalar instructions are always selected. 1357 // The scalar versions will be replaced with vector when needed later. 1358 defm V_ADD_I32 : VOP2b_32 <0x00000025, "V_ADD_I32", 1359 [(set i32:$dst, (add i32:$src0, i32:$src1))], VSrc_32>; 1360 defm V_SUB_I32 : VOP2b_32 <0x00000026, "V_SUB_I32", 1361 [(set i32:$dst, (sub i32:$src0, i32:$src1))], VSrc_32>; 1362 defm V_SUBREV_I32 : VOP2b_32 <0x00000027, "V_SUBREV_I32", [], VSrc_32, 1363 "V_SUB_I32">; 1364 1365 let Uses = [VCC] in { // Carry-in comes from VCC 1366 defm V_ADDC_U32 : VOP2b_32 <0x00000028, "V_ADDC_U32", 1367 [(set i32:$dst, (adde i32:$src0, i32:$src1))], VReg_32>; 1368 defm V_SUBB_U32 : VOP2b_32 <0x00000029, "V_SUBB_U32", 1369 [(set i32:$dst, (sube i32:$src0, i32:$src1))], VReg_32>; 1370 defm V_SUBBREV_U32 : VOP2b_32 <0x0000002a, "V_SUBBREV_U32", [], VReg_32, 1371 "V_SUBB_U32">; 1372 } // End Uses = [VCC] 1373 } // End isCommutable = 1, Defs = [VCC] 1374 1375 defm V_LDEXP_F32 : VOP2_32 <0x0000002b, "V_LDEXP_F32", []>; 1376 ////def V_CVT_PKACCUM_U8_F32 : VOP2_U8 <0x0000002c, "V_CVT_PKACCUM_U8_F32", []>; 1377 ////def V_CVT_PKNORM_I16_F32 : VOP2_I16 <0x0000002d, "V_CVT_PKNORM_I16_F32", []>; 1378 ////def V_CVT_PKNORM_U16_F32 : VOP2_U16 <0x0000002e, "V_CVT_PKNORM_U16_F32", []>; 1379 defm V_CVT_PKRTZ_F16_F32 : VOP2_32 <0x0000002f, "V_CVT_PKRTZ_F16_F32", 1380 [(set i32:$dst, (int_SI_packf16 f32:$src0, f32:$src1))] 1381 >; 1382 ////def V_CVT_PK_U16_U32 : VOP2_U16 <0x00000030, "V_CVT_PK_U16_U32", []>; 1383 ////def V_CVT_PK_I16_I32 : VOP2_I16 <0x00000031, "V_CVT_PK_I16_I32", []>; 1384 1385 //===----------------------------------------------------------------------===// 1386 // VOP3 Instructions 1387 //===----------------------------------------------------------------------===// 1388 1389 let neverHasSideEffects = 1 in { 1390 1391 defm V_MAD_LEGACY_F32 : VOP3_32 <0x00000140, "V_MAD_LEGACY_F32", []>; 1392 defm V_MAD_F32 : VOP3_32 <0x00000141, "V_MAD_F32", 1393 [(set f32:$dst, (fadd (fmul f32:$src0, f32:$src1), f32:$src2))] 1394 >; 1395 defm V_MAD_I32_I24 : VOP3_32 <0x00000142, "V_MAD_I32_I24", 1396 [(set i32:$dst, (AMDGPUmad_i24 i32:$src0, i32:$src1, i32:$src2))] 1397 >; 1398 defm V_MAD_U32_U24 : VOP3_32 <0x00000143, "V_MAD_U32_U24", 1399 [(set i32:$dst, (AMDGPUmad_u24 i32:$src0, i32:$src1, i32:$src2))] 1400 >; 1401 1402 } // End neverHasSideEffects 1403 1404 defm V_CUBEID_F32 : VOP3_32 <0x00000144, "V_CUBEID_F32", []>; 1405 defm V_CUBESC_F32 : VOP3_32 <0x00000145, "V_CUBESC_F32", []>; 1406 defm V_CUBETC_F32 : VOP3_32 <0x00000146, "V_CUBETC_F32", []>; 1407 defm V_CUBEMA_F32 : VOP3_32 <0x00000147, "V_CUBEMA_F32", []>; 1408 1409 let neverHasSideEffects = 1, mayLoad = 0, mayStore = 0 in { 1410 defm V_BFE_U32 : VOP3_32 <0x00000148, "V_BFE_U32", 1411 [(set i32:$dst, (AMDGPUbfe_u32 i32:$src0, i32:$src1, i32:$src2))]>; 1412 defm V_BFE_I32 : VOP3_32 <0x00000149, "V_BFE_I32", 1413 [(set i32:$dst, (AMDGPUbfe_i32 i32:$src0, i32:$src1, i32:$src2))]>; 1414 } 1415 1416 defm V_BFI_B32 : VOP3_32 <0x0000014a, "V_BFI_B32", 1417 [(set i32:$dst, (AMDGPUbfi i32:$src0, i32:$src1, i32:$src2))]>; 1418 defm V_FMA_F32 : VOP3_32 <0x0000014b, "V_FMA_F32", 1419 [(set f32:$dst, (fma f32:$src0, f32:$src1, f32:$src2))] 1420 >; 1421 def V_FMA_F64 : VOP3_64 <0x0000014c, "V_FMA_F64", 1422 [(set f64:$dst, (fma f64:$src0, f64:$src1, f64:$src2))] 1423 >; 1424 //def V_LERP_U8 : VOP3_U8 <0x0000014d, "V_LERP_U8", []>; 1425 defm V_ALIGNBIT_B32 : VOP3_32 <0x0000014e, "V_ALIGNBIT_B32", []>; 1426 1427 defm V_ALIGNBYTE_B32 : VOP3_32 <0x0000014f, "V_ALIGNBYTE_B32", []>; 1428 defm V_MULLIT_F32 : VOP3_32 <0x00000150, "V_MULLIT_F32", []>; 1429 ////def V_MIN3_F32 : VOP3_MIN3 <0x00000151, "V_MIN3_F32", []>; 1430 ////def V_MIN3_I32 : VOP3_MIN3 <0x00000152, "V_MIN3_I32", []>; 1431 ////def V_MIN3_U32 : VOP3_MIN3 <0x00000153, "V_MIN3_U32", []>; 1432 ////def V_MAX3_F32 : VOP3_MAX3 <0x00000154, "V_MAX3_F32", []>; 1433 ////def V_MAX3_I32 : VOP3_MAX3 <0x00000155, "V_MAX3_I32", []>; 1434 ////def V_MAX3_U32 : VOP3_MAX3 <0x00000156, "V_MAX3_U32", []>; 1435 ////def V_MED3_F32 : VOP3_MED3 <0x00000157, "V_MED3_F32", []>; 1436 ////def V_MED3_I32 : VOP3_MED3 <0x00000158, "V_MED3_I32", []>; 1437 ////def V_MED3_U32 : VOP3_MED3 <0x00000159, "V_MED3_U32", []>; 1438 //def V_SAD_U8 : VOP3_U8 <0x0000015a, "V_SAD_U8", []>; 1439 //def V_SAD_HI_U8 : VOP3_U8 <0x0000015b, "V_SAD_HI_U8", []>; 1440 //def V_SAD_U16 : VOP3_U16 <0x0000015c, "V_SAD_U16", []>; 1441 defm V_SAD_U32 : VOP3_32 <0x0000015d, "V_SAD_U32", []>; 1442 ////def V_CVT_PK_U8_F32 : VOP3_U8 <0x0000015e, "V_CVT_PK_U8_F32", []>; 1443 defm V_DIV_FIXUP_F32 : VOP3_32 <0x0000015f, "V_DIV_FIXUP_F32", 1444 [(set f32:$dst, (AMDGPUdiv_fixup f32:$src0, f32:$src1, f32:$src2))] 1445 >; 1446 def V_DIV_FIXUP_F64 : VOP3_64 <0x00000160, "V_DIV_FIXUP_F64", 1447 [(set f64:$dst, (AMDGPUdiv_fixup f64:$src0, f64:$src1, f64:$src2))] 1448 >; 1449 1450 def V_LSHL_B64 : VOP3_64_32 <0x00000161, "V_LSHL_B64", 1451 [(set i64:$dst, (shl i64:$src0, i32:$src1))] 1452 >; 1453 def V_LSHR_B64 : VOP3_64_32 <0x00000162, "V_LSHR_B64", 1454 [(set i64:$dst, (srl i64:$src0, i32:$src1))] 1455 >; 1456 def V_ASHR_I64 : VOP3_64_32 <0x00000163, "V_ASHR_I64", 1457 [(set i64:$dst, (sra i64:$src0, i32:$src1))] 1458 >; 1459 1460 let isCommutable = 1 in { 1461 1462 def V_ADD_F64 : VOP3_64 <0x00000164, "V_ADD_F64", []>; 1463 def V_MUL_F64 : VOP3_64 <0x00000165, "V_MUL_F64", []>; 1464 def V_MIN_F64 : VOP3_64 <0x00000166, "V_MIN_F64", []>; 1465 def V_MAX_F64 : VOP3_64 <0x00000167, "V_MAX_F64", []>; 1466 1467 } // isCommutable = 1 1468 1469 def V_LDEXP_F64 : VOP3_64 <0x00000168, "V_LDEXP_F64", []>; 1470 1471 let isCommutable = 1 in { 1472 1473 defm V_MUL_LO_U32 : VOP3_32 <0x00000169, "V_MUL_LO_U32", []>; 1474 defm V_MUL_HI_U32 : VOP3_32 <0x0000016a, "V_MUL_HI_U32", []>; 1475 defm V_MUL_LO_I32 : VOP3_32 <0x0000016b, "V_MUL_LO_I32", []>; 1476 defm V_MUL_HI_I32 : VOP3_32 <0x0000016c, "V_MUL_HI_I32", []>; 1477 1478 } // isCommutable = 1 1479 1480 def V_DIV_SCALE_F32 : VOP3b_32 <0x0000016d, "V_DIV_SCALE_F32", []>; 1481 1482 // Double precision division pre-scale. 1483 def V_DIV_SCALE_F64 : VOP3b_64 <0x0000016e, "V_DIV_SCALE_F64", []>; 1484 1485 defm V_DIV_FMAS_F32 : VOP3_32 <0x0000016f, "V_DIV_FMAS_F32", 1486 [(set f32:$dst, (AMDGPUdiv_fmas f32:$src0, f32:$src1, f32:$src2))] 1487 >; 1488 def V_DIV_FMAS_F64 : VOP3_64 <0x00000170, "V_DIV_FMAS_F64", 1489 [(set f64:$dst, (AMDGPUdiv_fmas f64:$src0, f64:$src1, f64:$src2))] 1490 >; 1491 //def V_MSAD_U8 : VOP3_U8 <0x00000171, "V_MSAD_U8", []>; 1492 //def V_QSAD_U8 : VOP3_U8 <0x00000172, "V_QSAD_U8", []>; 1493 //def V_MQSAD_U8 : VOP3_U8 <0x00000173, "V_MQSAD_U8", []>; 1494 def V_TRIG_PREOP_F64 : VOP3_64_32 <0x00000174, "V_TRIG_PREOP_F64", 1495 [(set f64:$dst, (AMDGPUtrig_preop f64:$src0, i32:$src1))] 1496 >; 1497 1498 //===----------------------------------------------------------------------===// 1499 // Pseudo Instructions 1500 //===----------------------------------------------------------------------===// 1501 1502 let isCodeGenOnly = 1, isPseudo = 1 in { 1503 1504 def V_MOV_I1 : InstSI < 1505 (outs VReg_1:$dst), 1506 (ins i1imm:$src), 1507 "", [(set i1:$dst, (imm:$src))] 1508 >; 1509 1510 def V_AND_I1 : InstSI < 1511 (outs VReg_1:$dst), (ins VReg_1:$src0, VReg_1:$src1), "", 1512 [(set i1:$dst, (and i1:$src0, i1:$src1))] 1513 >; 1514 1515 def V_OR_I1 : InstSI < 1516 (outs VReg_1:$dst), (ins VReg_1:$src0, VReg_1:$src1), "", 1517 [(set i1:$dst, (or i1:$src0, i1:$src1))] 1518 >; 1519 1520 // SI pseudo instructions. These are used by the CFG structurizer pass 1521 // and should be lowered to ISA instructions prior to codegen. 1522 1523 let mayLoad = 1, mayStore = 1, hasSideEffects = 1, 1524 Uses = [EXEC], Defs = [EXEC] in { 1525 1526 let isBranch = 1, isTerminator = 1 in { 1527 1528 def SI_IF: InstSI < 1529 (outs SReg_64:$dst), 1530 (ins SReg_64:$vcc, brtarget:$target), 1531 "", 1532 [(set i64:$dst, (int_SI_if i1:$vcc, bb:$target))] 1533 >; 1534 1535 def SI_ELSE : InstSI < 1536 (outs SReg_64:$dst), 1537 (ins SReg_64:$src, brtarget:$target), 1538 "", 1539 [(set i64:$dst, (int_SI_else i64:$src, bb:$target))] 1540 > { 1541 let Constraints = "$src = $dst"; 1542 } 1543 1544 def SI_LOOP : InstSI < 1545 (outs), 1546 (ins SReg_64:$saved, brtarget:$target), 1547 "SI_LOOP $saved, $target", 1548 [(int_SI_loop i64:$saved, bb:$target)] 1549 >; 1550 1551 } // end isBranch = 1, isTerminator = 1 1552 1553 def SI_BREAK : InstSI < 1554 (outs SReg_64:$dst), 1555 (ins SReg_64:$src), 1556 "SI_ELSE $dst, $src", 1557 [(set i64:$dst, (int_SI_break i64:$src))] 1558 >; 1559 1560 def SI_IF_BREAK : InstSI < 1561 (outs SReg_64:$dst), 1562 (ins SReg_64:$vcc, SReg_64:$src), 1563 "SI_IF_BREAK $dst, $vcc, $src", 1564 [(set i64:$dst, (int_SI_if_break i1:$vcc, i64:$src))] 1565 >; 1566 1567 def SI_ELSE_BREAK : InstSI < 1568 (outs SReg_64:$dst), 1569 (ins SReg_64:$src0, SReg_64:$src1), 1570 "SI_ELSE_BREAK $dst, $src0, $src1", 1571 [(set i64:$dst, (int_SI_else_break i64:$src0, i64:$src1))] 1572 >; 1573 1574 def SI_END_CF : InstSI < 1575 (outs), 1576 (ins SReg_64:$saved), 1577 "SI_END_CF $saved", 1578 [(int_SI_end_cf i64:$saved)] 1579 >; 1580 1581 def SI_KILL : InstSI < 1582 (outs), 1583 (ins VSrc_32:$src), 1584 "SI_KILL $src", 1585 [(int_AMDGPU_kill f32:$src)] 1586 >; 1587 1588 } // end mayLoad = 1, mayStore = 1, hasSideEffects = 1 1589 // Uses = [EXEC], Defs = [EXEC] 1590 1591 let Uses = [EXEC], Defs = [EXEC,VCC,M0] in { 1592 1593 //defm SI_ : RegisterLoadStore <VReg_32, FRAMEri, ADDRIndirect>; 1594 1595 let UseNamedOperandTable = 1 in { 1596 1597 def SI_RegisterLoad : InstSI < 1598 (outs VReg_32:$dst, SReg_64:$temp), 1599 (ins FRAMEri32:$addr, i32imm:$chan), 1600 "", [] 1601 > { 1602 let isRegisterLoad = 1; 1603 let mayLoad = 1; 1604 } 1605 1606 class SIRegStore<dag outs> : InstSI < 1607 outs, 1608 (ins VReg_32:$val, FRAMEri32:$addr, i32imm:$chan), 1609 "", [] 1610 > { 1611 let isRegisterStore = 1; 1612 let mayStore = 1; 1613 } 1614 1615 let usesCustomInserter = 1 in { 1616 def SI_RegisterStorePseudo : SIRegStore<(outs)>; 1617 } // End usesCustomInserter = 1 1618 def SI_RegisterStore : SIRegStore<(outs SReg_64:$temp)>; 1619 1620 1621 } // End UseNamedOperandTable = 1 1622 1623 def SI_INDIRECT_SRC : InstSI < 1624 (outs VReg_32:$dst, SReg_64:$temp), 1625 (ins unknown:$src, VSrc_32:$idx, i32imm:$off), 1626 "SI_INDIRECT_SRC $dst, $temp, $src, $idx, $off", 1627 [] 1628 >; 1629 1630 class SI_INDIRECT_DST<RegisterClass rc> : InstSI < 1631 (outs rc:$dst, SReg_64:$temp), 1632 (ins unknown:$src, VSrc_32:$idx, i32imm:$off, VReg_32:$val), 1633 "SI_INDIRECT_DST $dst, $temp, $src, $idx, $off, $val", 1634 [] 1635 > { 1636 let Constraints = "$src = $dst"; 1637 } 1638 1639 def SI_INDIRECT_DST_V1 : SI_INDIRECT_DST<VReg_32>; 1640 def SI_INDIRECT_DST_V2 : SI_INDIRECT_DST<VReg_64>; 1641 def SI_INDIRECT_DST_V4 : SI_INDIRECT_DST<VReg_128>; 1642 def SI_INDIRECT_DST_V8 : SI_INDIRECT_DST<VReg_256>; 1643 def SI_INDIRECT_DST_V16 : SI_INDIRECT_DST<VReg_512>; 1644 1645 } // Uses = [EXEC,VCC,M0], Defs = [EXEC,VCC,M0] 1646 1647 let usesCustomInserter = 1 in { 1648 1649 // This pseudo instruction takes a pointer as input and outputs a resource 1650 // constant that can be used with the ADDR64 MUBUF instructions. 1651 def SI_ADDR64_RSRC : InstSI < 1652 (outs SReg_128:$srsrc), 1653 (ins SSrc_64:$ptr), 1654 "", [] 1655 >; 1656 1657 def V_SUB_F64 : InstSI < 1658 (outs VReg_64:$dst), 1659 (ins VReg_64:$src0, VReg_64:$src1), 1660 "V_SUB_F64 $dst, $src0, $src1", 1661 [(set f64:$dst, (fsub f64:$src0, f64:$src1))] 1662 >; 1663 1664 } // end usesCustomInserter 1665 1666 multiclass SI_SPILL_SGPR <RegisterClass sgpr_class> { 1667 1668 def _SAVE : InstSI < 1669 (outs VReg_32:$dst), 1670 (ins sgpr_class:$src, i32imm:$frame_idx), 1671 "", [] 1672 >; 1673 1674 def _RESTORE : InstSI < 1675 (outs sgpr_class:$dst), 1676 (ins VReg_32:$src, i32imm:$frame_idx), 1677 "", [] 1678 >; 1679 1680 } 1681 1682 defm SI_SPILL_S32 : SI_SPILL_SGPR <SReg_32>; 1683 defm SI_SPILL_S64 : SI_SPILL_SGPR <SReg_64>; 1684 defm SI_SPILL_S128 : SI_SPILL_SGPR <SReg_128>; 1685 defm SI_SPILL_S256 : SI_SPILL_SGPR <SReg_256>; 1686 defm SI_SPILL_S512 : SI_SPILL_SGPR <SReg_512>; 1687 1688 } // end IsCodeGenOnly, isPseudo 1689 1690 } // end SubtargetPredicate = SI 1691 1692 let Predicates = [isSI] in { 1693 1694 def : Pat< 1695 (int_AMDGPU_cndlt f32:$src0, f32:$src1, f32:$src2), 1696 (V_CNDMASK_B32_e64 $src2, $src1, (V_CMP_GT_F32_e64 0, $src0)) 1697 >; 1698 1699 def : Pat < 1700 (int_AMDGPU_kilp), 1701 (SI_KILL 0xbf800000) 1702 >; 1703 1704 /* int_SI_vs_load_input */ 1705 def : Pat< 1706 (SIload_input v4i32:$tlst, imm:$attr_offset, i32:$buf_idx_vgpr), 1707 (BUFFER_LOAD_FORMAT_XYZW_IDXEN $tlst, $buf_idx_vgpr, imm:$attr_offset, 0, 0, 0, 0) 1708 >; 1709 1710 /* int_SI_export */ 1711 def : Pat < 1712 (int_SI_export imm:$en, imm:$vm, imm:$done, imm:$tgt, imm:$compr, 1713 f32:$src0, f32:$src1, f32:$src2, f32:$src3), 1714 (EXP imm:$en, imm:$tgt, imm:$compr, imm:$done, imm:$vm, 1715 $src0, $src1, $src2, $src3) 1716 >; 1717 1718 //===----------------------------------------------------------------------===// 1719 // SMRD Patterns 1720 //===----------------------------------------------------------------------===// 1721 1722 multiclass SMRD_Pattern <SMRD Instr_IMM, SMRD Instr_SGPR, ValueType vt> { 1723 1724 // 1. Offset as 8bit DWORD immediate 1725 def : Pat < 1726 (constant_load (add i64:$sbase, (i64 IMM8bitDWORD:$offset))), 1727 (vt (Instr_IMM $sbase, (as_dword_i32imm $offset))) 1728 >; 1729 1730 // 2. Offset loaded in an 32bit SGPR 1731 def : Pat < 1732 (constant_load (add i64:$sbase, (i64 IMM32bit:$offset))), 1733 (vt (Instr_SGPR $sbase, (S_MOV_B32 (i32 (as_i32imm $offset))))) 1734 >; 1735 1736 // 3. No offset at all 1737 def : Pat < 1738 (constant_load i64:$sbase), 1739 (vt (Instr_IMM $sbase, 0)) 1740 >; 1741 } 1742 1743 defm : SMRD_Pattern <S_LOAD_DWORD_IMM, S_LOAD_DWORD_SGPR, f32>; 1744 defm : SMRD_Pattern <S_LOAD_DWORD_IMM, S_LOAD_DWORD_SGPR, i32>; 1745 defm : SMRD_Pattern <S_LOAD_DWORDX2_IMM, S_LOAD_DWORDX2_SGPR, v2i32>; 1746 defm : SMRD_Pattern <S_LOAD_DWORDX4_IMM, S_LOAD_DWORDX4_SGPR, v4i32>; 1747 defm : SMRD_Pattern <S_LOAD_DWORDX8_IMM, S_LOAD_DWORDX8_SGPR, v32i8>; 1748 defm : SMRD_Pattern <S_LOAD_DWORDX8_IMM, S_LOAD_DWORDX8_SGPR, v8i32>; 1749 defm : SMRD_Pattern <S_LOAD_DWORDX16_IMM, S_LOAD_DWORDX16_SGPR, v16i32>; 1750 1751 // 1. Offset as 8bit DWORD immediate 1752 def : Pat < 1753 (SIload_constant v4i32:$sbase, IMM8bitDWORD:$offset), 1754 (S_BUFFER_LOAD_DWORD_IMM $sbase, (as_dword_i32imm $offset)) 1755 >; 1756 1757 // 2. Offset loaded in an 32bit SGPR 1758 def : Pat < 1759 (SIload_constant v4i32:$sbase, imm:$offset), 1760 (S_BUFFER_LOAD_DWORD_SGPR $sbase, (S_MOV_B32 imm:$offset)) 1761 >; 1762 1763 } // Predicates = [isSI] in { 1764 1765 //===----------------------------------------------------------------------===// 1766 // SOP1 Patterns 1767 //===----------------------------------------------------------------------===// 1768 1769 let Predicates = [isSI, isCFDepth0] in { 1770 1771 def : Pat < 1772 (i64 (ctpop i64:$src)), 1773 (INSERT_SUBREG (INSERT_SUBREG (i64 (IMPLICIT_DEF)), 1774 (S_BCNT1_I32_B64 $src), sub0), 1775 (S_MOV_B32 0), sub1) 1776 >; 1777 1778 } // Predicates = [isSI, isCFDepth0] 1779 1780 let Predicates = [isSI] in { 1781 //===----------------------------------------------------------------------===// 1782 // SOP2 Patterns 1783 //===----------------------------------------------------------------------===// 1784 1785 def : Pat < 1786 (i1 (xor i1:$src0, i1:$src1)), 1787 (S_XOR_B64 $src0, $src1) 1788 >; 1789 1790 //===----------------------------------------------------------------------===// 1791 // SOPP Patterns 1792 //===----------------------------------------------------------------------===// 1793 1794 def : Pat < 1795 (int_AMDGPU_barrier_global), 1796 (S_BARRIER) 1797 >; 1798 1799 //===----------------------------------------------------------------------===// 1800 // VOP1 Patterns 1801 //===----------------------------------------------------------------------===// 1802 1803 def : RcpPat<V_RCP_F32_e32, f32>; 1804 def : RcpPat<V_RCP_F64_e32, f64>; 1805 defm : RsqPat<V_RSQ_F32_e32, f32>; 1806 defm : RsqPat<V_RSQ_F64_e32, f64>; 1807 1808 //===----------------------------------------------------------------------===// 1809 // VOP2 Patterns 1810 //===----------------------------------------------------------------------===// 1811 1812 class BinOp64Pat <SDNode node, Instruction inst> : Pat < 1813 (node i64:$src0, i64:$src1), 1814 (INSERT_SUBREG (INSERT_SUBREG (i64 (IMPLICIT_DEF)), 1815 (inst (EXTRACT_SUBREG i64:$src0, sub0), 1816 (EXTRACT_SUBREG i64:$src1, sub0)), sub0), 1817 (inst (EXTRACT_SUBREG i64:$src0, sub1), 1818 (EXTRACT_SUBREG i64:$src1, sub1)), sub1) 1819 >; 1820 1821 def : BinOp64Pat <or, V_OR_B32_e32>; 1822 def : BinOp64Pat <xor, V_XOR_B32_e32>; 1823 1824 class SextInReg <ValueType vt, int ShiftAmt> : Pat < 1825 (sext_inreg i32:$src0, vt), 1826 (V_ASHRREV_I32_e32 ShiftAmt, (V_LSHLREV_B32_e32 ShiftAmt, $src0)) 1827 >; 1828 1829 def : SextInReg <i8, 24>; 1830 def : SextInReg <i16, 16>; 1831 1832 def : Pat < 1833 (i32 (add (i32 (ctpop i32:$popcnt)), i32:$val)), 1834 (V_BCNT_U32_B32_e32 $popcnt, $val) 1835 >; 1836 1837 def : Pat < 1838 (i32 (ctpop i32:$popcnt)), 1839 (V_BCNT_U32_B32_e64 $popcnt, 0, 0, 0) 1840 >; 1841 1842 def : Pat < 1843 (i64 (ctpop i64:$src)), 1844 (INSERT_SUBREG 1845 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), 1846 (V_BCNT_U32_B32_e32 (EXTRACT_SUBREG $src, sub1), 1847 (V_BCNT_U32_B32_e64 (EXTRACT_SUBREG $src, sub0), 0, 0, 0)), 1848 sub0), 1849 (V_MOV_B32_e32 0), sub1) 1850 >; 1851 1852 /********** ======================= **********/ 1853 /********** Image sampling patterns **********/ 1854 /********** ======================= **********/ 1855 1856 class SampleRawPattern<SDPatternOperator name, MIMG opcode, ValueType vt> : Pat < 1857 (name vt:$addr, v32i8:$rsrc, v16i8:$sampler, i32:$dmask, i32:$unorm, 1858 i32:$r128, i32:$da, i32:$glc, i32:$slc, i32:$tfe, i32:$lwe), 1859 (opcode (as_i32imm $dmask), (as_i1imm $unorm), (as_i1imm $glc), (as_i1imm $da), 1860 (as_i1imm $r128), (as_i1imm $tfe), (as_i1imm $lwe), (as_i1imm $slc), 1861 $addr, $rsrc, $sampler) 1862 >; 1863 1864 // Only the variants which make sense are defined. 1865 def : SampleRawPattern<int_SI_gather4, IMAGE_GATHER4_V4_V2, v2i32>; 1866 def : SampleRawPattern<int_SI_gather4, IMAGE_GATHER4_V4_V4, v4i32>; 1867 def : SampleRawPattern<int_SI_gather4_cl, IMAGE_GATHER4_CL_V4_V4, v4i32>; 1868 def : SampleRawPattern<int_SI_gather4_l, IMAGE_GATHER4_L_V4_V4, v4i32>; 1869 def : SampleRawPattern<int_SI_gather4_b, IMAGE_GATHER4_B_V4_V4, v4i32>; 1870 def : SampleRawPattern<int_SI_gather4_b_cl, IMAGE_GATHER4_B_CL_V4_V4, v4i32>; 1871 def : SampleRawPattern<int_SI_gather4_b_cl, IMAGE_GATHER4_B_CL_V4_V8, v8i32>; 1872 def : SampleRawPattern<int_SI_gather4_lz, IMAGE_GATHER4_LZ_V4_V2, v2i32>; 1873 def : SampleRawPattern<int_SI_gather4_lz, IMAGE_GATHER4_LZ_V4_V4, v4i32>; 1874 1875 def : SampleRawPattern<int_SI_gather4_c, IMAGE_GATHER4_C_V4_V4, v4i32>; 1876 def : SampleRawPattern<int_SI_gather4_c_cl, IMAGE_GATHER4_C_CL_V4_V4, v4i32>; 1877 def : SampleRawPattern<int_SI_gather4_c_cl, IMAGE_GATHER4_C_CL_V4_V8, v8i32>; 1878 def : SampleRawPattern<int_SI_gather4_c_l, IMAGE_GATHER4_C_L_V4_V4, v4i32>; 1879 def : SampleRawPattern<int_SI_gather4_c_l, IMAGE_GATHER4_C_L_V4_V8, v8i32>; 1880 def : SampleRawPattern<int_SI_gather4_c_b, IMAGE_GATHER4_C_B_V4_V4, v4i32>; 1881 def : SampleRawPattern<int_SI_gather4_c_b, IMAGE_GATHER4_C_B_V4_V8, v8i32>; 1882 def : SampleRawPattern<int_SI_gather4_c_b_cl, IMAGE_GATHER4_C_B_CL_V4_V8, v8i32>; 1883 def : SampleRawPattern<int_SI_gather4_c_lz, IMAGE_GATHER4_C_LZ_V4_V4, v4i32>; 1884 1885 def : SampleRawPattern<int_SI_gather4_o, IMAGE_GATHER4_O_V4_V4, v4i32>; 1886 def : SampleRawPattern<int_SI_gather4_cl_o, IMAGE_GATHER4_CL_O_V4_V4, v4i32>; 1887 def : SampleRawPattern<int_SI_gather4_cl_o, IMAGE_GATHER4_CL_O_V4_V8, v8i32>; 1888 def : SampleRawPattern<int_SI_gather4_l_o, IMAGE_GATHER4_L_O_V4_V4, v4i32>; 1889 def : SampleRawPattern<int_SI_gather4_l_o, IMAGE_GATHER4_L_O_V4_V8, v8i32>; 1890 def : SampleRawPattern<int_SI_gather4_b_o, IMAGE_GATHER4_B_O_V4_V4, v4i32>; 1891 def : SampleRawPattern<int_SI_gather4_b_o, IMAGE_GATHER4_B_O_V4_V8, v8i32>; 1892 def : SampleRawPattern<int_SI_gather4_b_cl_o, IMAGE_GATHER4_B_CL_O_V4_V8, v8i32>; 1893 def : SampleRawPattern<int_SI_gather4_lz_o, IMAGE_GATHER4_LZ_O_V4_V4, v4i32>; 1894 1895 def : SampleRawPattern<int_SI_gather4_c_o, IMAGE_GATHER4_C_O_V4_V4, v4i32>; 1896 def : SampleRawPattern<int_SI_gather4_c_o, IMAGE_GATHER4_C_O_V4_V8, v8i32>; 1897 def : SampleRawPattern<int_SI_gather4_c_cl_o, IMAGE_GATHER4_C_CL_O_V4_V8, v8i32>; 1898 def : SampleRawPattern<int_SI_gather4_c_l_o, IMAGE_GATHER4_C_L_O_V4_V8, v8i32>; 1899 def : SampleRawPattern<int_SI_gather4_c_b_o, IMAGE_GATHER4_C_B_O_V4_V8, v8i32>; 1900 def : SampleRawPattern<int_SI_gather4_c_b_cl_o, IMAGE_GATHER4_C_B_CL_O_V4_V8, v8i32>; 1901 def : SampleRawPattern<int_SI_gather4_c_lz_o, IMAGE_GATHER4_C_LZ_O_V4_V4, v4i32>; 1902 def : SampleRawPattern<int_SI_gather4_c_lz_o, IMAGE_GATHER4_C_LZ_O_V4_V8, v8i32>; 1903 1904 def : SampleRawPattern<int_SI_getlod, IMAGE_GET_LOD_V4_V1, i32>; 1905 def : SampleRawPattern<int_SI_getlod, IMAGE_GET_LOD_V4_V2, v2i32>; 1906 def : SampleRawPattern<int_SI_getlod, IMAGE_GET_LOD_V4_V4, v4i32>; 1907 1908 /* SIsample for simple 1D texture lookup */ 1909 def : Pat < 1910 (SIsample i32:$addr, v32i8:$rsrc, v4i32:$sampler, imm), 1911 (IMAGE_SAMPLE_V4_V1 0xf, 0, 0, 0, 0, 0, 0, 0, $addr, $rsrc, $sampler) 1912 >; 1913 1914 class SamplePattern<SDNode name, MIMG opcode, ValueType vt> : Pat < 1915 (name vt:$addr, v32i8:$rsrc, v4i32:$sampler, imm), 1916 (opcode 0xf, 0, 0, 0, 0, 0, 0, 0, $addr, $rsrc, $sampler) 1917 >; 1918 1919 class SampleRectPattern<SDNode name, MIMG opcode, ValueType vt> : Pat < 1920 (name vt:$addr, v32i8:$rsrc, v4i32:$sampler, TEX_RECT), 1921 (opcode 0xf, 1, 0, 0, 0, 0, 0, 0, $addr, $rsrc, $sampler) 1922 >; 1923 1924 class SampleArrayPattern<SDNode name, MIMG opcode, ValueType vt> : Pat < 1925 (name vt:$addr, v32i8:$rsrc, v4i32:$sampler, TEX_ARRAY), 1926 (opcode 0xf, 0, 0, 1, 0, 0, 0, 0, $addr, $rsrc, $sampler) 1927 >; 1928 1929 class SampleShadowPattern<SDNode name, MIMG opcode, 1930 ValueType vt> : Pat < 1931 (name vt:$addr, v32i8:$rsrc, v4i32:$sampler, TEX_SHADOW), 1932 (opcode 0xf, 0, 0, 0, 0, 0, 0, 0, $addr, $rsrc, $sampler) 1933 >; 1934 1935 class SampleShadowArrayPattern<SDNode name, MIMG opcode, 1936 ValueType vt> : Pat < 1937 (name vt:$addr, v32i8:$rsrc, v4i32:$sampler, TEX_SHADOW_ARRAY), 1938 (opcode 0xf, 0, 0, 1, 0, 0, 0, 0, $addr, $rsrc, $sampler) 1939 >; 1940 1941 /* SIsample* for texture lookups consuming more address parameters */ 1942 multiclass SamplePatterns<MIMG sample, MIMG sample_c, MIMG sample_l, 1943 MIMG sample_c_l, MIMG sample_b, MIMG sample_c_b, 1944 MIMG sample_d, MIMG sample_c_d, ValueType addr_type> { 1945 def : SamplePattern <SIsample, sample, addr_type>; 1946 def : SampleRectPattern <SIsample, sample, addr_type>; 1947 def : SampleArrayPattern <SIsample, sample, addr_type>; 1948 def : SampleShadowPattern <SIsample, sample_c, addr_type>; 1949 def : SampleShadowArrayPattern <SIsample, sample_c, addr_type>; 1950 1951 def : SamplePattern <SIsamplel, sample_l, addr_type>; 1952 def : SampleArrayPattern <SIsamplel, sample_l, addr_type>; 1953 def : SampleShadowPattern <SIsamplel, sample_c_l, addr_type>; 1954 def : SampleShadowArrayPattern <SIsamplel, sample_c_l, addr_type>; 1955 1956 def : SamplePattern <SIsampleb, sample_b, addr_type>; 1957 def : SampleArrayPattern <SIsampleb, sample_b, addr_type>; 1958 def : SampleShadowPattern <SIsampleb, sample_c_b, addr_type>; 1959 def : SampleShadowArrayPattern <SIsampleb, sample_c_b, addr_type>; 1960 1961 def : SamplePattern <SIsampled, sample_d, addr_type>; 1962 def : SampleArrayPattern <SIsampled, sample_d, addr_type>; 1963 def : SampleShadowPattern <SIsampled, sample_c_d, addr_type>; 1964 def : SampleShadowArrayPattern <SIsampled, sample_c_d, addr_type>; 1965 } 1966 1967 defm : SamplePatterns<IMAGE_SAMPLE_V4_V2, IMAGE_SAMPLE_C_V4_V2, 1968 IMAGE_SAMPLE_L_V4_V2, IMAGE_SAMPLE_C_L_V4_V2, 1969 IMAGE_SAMPLE_B_V4_V2, IMAGE_SAMPLE_C_B_V4_V2, 1970 IMAGE_SAMPLE_D_V4_V2, IMAGE_SAMPLE_C_D_V4_V2, 1971 v2i32>; 1972 defm : SamplePatterns<IMAGE_SAMPLE_V4_V4, IMAGE_SAMPLE_C_V4_V4, 1973 IMAGE_SAMPLE_L_V4_V4, IMAGE_SAMPLE_C_L_V4_V4, 1974 IMAGE_SAMPLE_B_V4_V4, IMAGE_SAMPLE_C_B_V4_V4, 1975 IMAGE_SAMPLE_D_V4_V4, IMAGE_SAMPLE_C_D_V4_V4, 1976 v4i32>; 1977 defm : SamplePatterns<IMAGE_SAMPLE_V4_V8, IMAGE_SAMPLE_C_V4_V8, 1978 IMAGE_SAMPLE_L_V4_V8, IMAGE_SAMPLE_C_L_V4_V8, 1979 IMAGE_SAMPLE_B_V4_V8, IMAGE_SAMPLE_C_B_V4_V8, 1980 IMAGE_SAMPLE_D_V4_V8, IMAGE_SAMPLE_C_D_V4_V8, 1981 v8i32>; 1982 defm : SamplePatterns<IMAGE_SAMPLE_V4_V16, IMAGE_SAMPLE_C_V4_V16, 1983 IMAGE_SAMPLE_L_V4_V16, IMAGE_SAMPLE_C_L_V4_V16, 1984 IMAGE_SAMPLE_B_V4_V16, IMAGE_SAMPLE_C_B_V4_V16, 1985 IMAGE_SAMPLE_D_V4_V16, IMAGE_SAMPLE_C_D_V4_V16, 1986 v16i32>; 1987 1988 /* int_SI_imageload for texture fetches consuming varying address parameters */ 1989 class ImageLoadPattern<Intrinsic name, MIMG opcode, ValueType addr_type> : Pat < 1990 (name addr_type:$addr, v32i8:$rsrc, imm), 1991 (opcode 0xf, 0, 0, 0, 0, 0, 0, 0, $addr, $rsrc) 1992 >; 1993 1994 class ImageLoadArrayPattern<Intrinsic name, MIMG opcode, ValueType addr_type> : Pat < 1995 (name addr_type:$addr, v32i8:$rsrc, TEX_ARRAY), 1996 (opcode 0xf, 0, 0, 1, 0, 0, 0, 0, $addr, $rsrc) 1997 >; 1998 1999 class ImageLoadMSAAPattern<Intrinsic name, MIMG opcode, ValueType addr_type> : Pat < 2000 (name addr_type:$addr, v32i8:$rsrc, TEX_MSAA), 2001 (opcode 0xf, 0, 0, 0, 0, 0, 0, 0, $addr, $rsrc) 2002 >; 2003 2004 class ImageLoadArrayMSAAPattern<Intrinsic name, MIMG opcode, ValueType addr_type> : Pat < 2005 (name addr_type:$addr, v32i8:$rsrc, TEX_ARRAY_MSAA), 2006 (opcode 0xf, 0, 0, 1, 0, 0, 0, 0, $addr, $rsrc) 2007 >; 2008 2009 multiclass ImageLoadPatterns<MIMG opcode, ValueType addr_type> { 2010 def : ImageLoadPattern <int_SI_imageload, opcode, addr_type>; 2011 def : ImageLoadArrayPattern <int_SI_imageload, opcode, addr_type>; 2012 } 2013 2014 multiclass ImageLoadMSAAPatterns<MIMG opcode, ValueType addr_type> { 2015 def : ImageLoadMSAAPattern <int_SI_imageload, opcode, addr_type>; 2016 def : ImageLoadArrayMSAAPattern <int_SI_imageload, opcode, addr_type>; 2017 } 2018 2019 defm : ImageLoadPatterns<IMAGE_LOAD_MIP_V4_V2, v2i32>; 2020 defm : ImageLoadPatterns<IMAGE_LOAD_MIP_V4_V4, v4i32>; 2021 2022 defm : ImageLoadMSAAPatterns<IMAGE_LOAD_V4_V2, v2i32>; 2023 defm : ImageLoadMSAAPatterns<IMAGE_LOAD_V4_V4, v4i32>; 2024 2025 /* Image resource information */ 2026 def : Pat < 2027 (int_SI_resinfo i32:$mipid, v32i8:$rsrc, imm), 2028 (IMAGE_GET_RESINFO_V4_V1 0xf, 0, 0, 0, 0, 0, 0, 0, (V_MOV_B32_e32 $mipid), $rsrc) 2029 >; 2030 2031 def : Pat < 2032 (int_SI_resinfo i32:$mipid, v32i8:$rsrc, TEX_ARRAY), 2033 (IMAGE_GET_RESINFO_V4_V1 0xf, 0, 0, 1, 0, 0, 0, 0, (V_MOV_B32_e32 $mipid), $rsrc) 2034 >; 2035 2036 def : Pat < 2037 (int_SI_resinfo i32:$mipid, v32i8:$rsrc, TEX_ARRAY_MSAA), 2038 (IMAGE_GET_RESINFO_V4_V1 0xf, 0, 0, 1, 0, 0, 0, 0, (V_MOV_B32_e32 $mipid), $rsrc) 2039 >; 2040 2041 /********** ============================================ **********/ 2042 /********** Extraction, Insertion, Building and Casting **********/ 2043 /********** ============================================ **********/ 2044 2045 foreach Index = 0-2 in { 2046 def Extract_Element_v2i32_#Index : Extract_Element < 2047 i32, v2i32, Index, !cast<SubRegIndex>(sub#Index) 2048 >; 2049 def Insert_Element_v2i32_#Index : Insert_Element < 2050 i32, v2i32, Index, !cast<SubRegIndex>(sub#Index) 2051 >; 2052 2053 def Extract_Element_v2f32_#Index : Extract_Element < 2054 f32, v2f32, Index, !cast<SubRegIndex>(sub#Index) 2055 >; 2056 def Insert_Element_v2f32_#Index : Insert_Element < 2057 f32, v2f32, Index, !cast<SubRegIndex>(sub#Index) 2058 >; 2059 } 2060 2061 foreach Index = 0-3 in { 2062 def Extract_Element_v4i32_#Index : Extract_Element < 2063 i32, v4i32, Index, !cast<SubRegIndex>(sub#Index) 2064 >; 2065 def Insert_Element_v4i32_#Index : Insert_Element < 2066 i32, v4i32, Index, !cast<SubRegIndex>(sub#Index) 2067 >; 2068 2069 def Extract_Element_v4f32_#Index : Extract_Element < 2070 f32, v4f32, Index, !cast<SubRegIndex>(sub#Index) 2071 >; 2072 def Insert_Element_v4f32_#Index : Insert_Element < 2073 f32, v4f32, Index, !cast<SubRegIndex>(sub#Index) 2074 >; 2075 } 2076 2077 foreach Index = 0-7 in { 2078 def Extract_Element_v8i32_#Index : Extract_Element < 2079 i32, v8i32, Index, !cast<SubRegIndex>(sub#Index) 2080 >; 2081 def Insert_Element_v8i32_#Index : Insert_Element < 2082 i32, v8i32, Index, !cast<SubRegIndex>(sub#Index) 2083 >; 2084 2085 def Extract_Element_v8f32_#Index : Extract_Element < 2086 f32, v8f32, Index, !cast<SubRegIndex>(sub#Index) 2087 >; 2088 def Insert_Element_v8f32_#Index : Insert_Element < 2089 f32, v8f32, Index, !cast<SubRegIndex>(sub#Index) 2090 >; 2091 } 2092 2093 foreach Index = 0-15 in { 2094 def Extract_Element_v16i32_#Index : Extract_Element < 2095 i32, v16i32, Index, !cast<SubRegIndex>(sub#Index) 2096 >; 2097 def Insert_Element_v16i32_#Index : Insert_Element < 2098 i32, v16i32, Index, !cast<SubRegIndex>(sub#Index) 2099 >; 2100 2101 def Extract_Element_v16f32_#Index : Extract_Element < 2102 f32, v16f32, Index, !cast<SubRegIndex>(sub#Index) 2103 >; 2104 def Insert_Element_v16f32_#Index : Insert_Element < 2105 f32, v16f32, Index, !cast<SubRegIndex>(sub#Index) 2106 >; 2107 } 2108 2109 def : BitConvert <i32, f32, SReg_32>; 2110 def : BitConvert <i32, f32, VReg_32>; 2111 2112 def : BitConvert <f32, i32, SReg_32>; 2113 def : BitConvert <f32, i32, VReg_32>; 2114 2115 def : BitConvert <i64, f64, VReg_64>; 2116 2117 def : BitConvert <f64, i64, VReg_64>; 2118 2119 def : BitConvert <v2f32, v2i32, VReg_64>; 2120 def : BitConvert <v2i32, v2f32, VReg_64>; 2121 def : BitConvert <v2i32, i64, VReg_64>; 2122 def : BitConvert <i64, v2i32, VReg_64>; 2123 def : BitConvert <v2f32, i64, VReg_64>; 2124 def : BitConvert <i64, v2f32, VReg_64>; 2125 def : BitConvert <v2i32, f64, VReg_64>; 2126 def : BitConvert <f64, v2i32, VReg_64>; 2127 def : BitConvert <v4f32, v4i32, VReg_128>; 2128 def : BitConvert <v4i32, v4f32, VReg_128>; 2129 2130 def : BitConvert <v8f32, v8i32, SReg_256>; 2131 def : BitConvert <v8i32, v8f32, SReg_256>; 2132 def : BitConvert <v8i32, v32i8, SReg_256>; 2133 def : BitConvert <v32i8, v8i32, SReg_256>; 2134 def : BitConvert <v8i32, v32i8, VReg_256>; 2135 def : BitConvert <v8i32, v8f32, VReg_256>; 2136 def : BitConvert <v8f32, v8i32, VReg_256>; 2137 def : BitConvert <v32i8, v8i32, VReg_256>; 2138 2139 def : BitConvert <v16i32, v16f32, VReg_512>; 2140 def : BitConvert <v16f32, v16i32, VReg_512>; 2141 2142 /********** =================== **********/ 2143 /********** Src & Dst modifiers **********/ 2144 /********** =================== **********/ 2145 2146 def FCLAMP_SI : AMDGPUShaderInst < 2147 (outs VReg_32:$dst), 2148 (ins VSrc_32:$src0), 2149 "FCLAMP_SI $dst, $src0", 2150 [] 2151 > { 2152 let usesCustomInserter = 1; 2153 } 2154 2155 def : Pat < 2156 (AMDGPUclamp f32:$src, (f32 FP_ZERO), (f32 FP_ONE)), 2157 (FCLAMP_SI f32:$src) 2158 >; 2159 2160 /********** ================================ **********/ 2161 /********** Floating point absolute/negative **********/ 2162 /********** ================================ **********/ 2163 2164 // Manipulate the sign bit directly, as e.g. using the source negation modifier 2165 // in V_ADD_F32_e64 $src, 0, [...] does not result in -0.0 for $src == +0.0, 2166 // breaking the piglit *s-floatBitsToInt-neg* tests 2167 2168 // TODO: Look into not implementing isFNegFree/isFAbsFree for SI, and possibly 2169 // removing these patterns 2170 2171 def : Pat < 2172 (fneg (fabs f32:$src)), 2173 (V_OR_B32_e32 $src, (V_MOV_B32_e32 0x80000000)) /* Set sign bit */ 2174 >; 2175 2176 def FABS_SI : AMDGPUShaderInst < 2177 (outs VReg_32:$dst), 2178 (ins VSrc_32:$src0), 2179 "FABS_SI $dst, $src0", 2180 [] 2181 > { 2182 let usesCustomInserter = 1; 2183 } 2184 2185 def : Pat < 2186 (fabs f32:$src), 2187 (FABS_SI f32:$src) 2188 >; 2189 2190 def FNEG_SI : AMDGPUShaderInst < 2191 (outs VReg_32:$dst), 2192 (ins VSrc_32:$src0), 2193 "FNEG_SI $dst, $src0", 2194 [] 2195 > { 2196 let usesCustomInserter = 1; 2197 } 2198 2199 def : Pat < 2200 (fneg f32:$src), 2201 (FNEG_SI f32:$src) 2202 >; 2203 2204 /********** ================== **********/ 2205 /********** Immediate Patterns **********/ 2206 /********** ================== **********/ 2207 2208 def : Pat < 2209 (SGPRImm<(i32 imm)>:$imm), 2210 (S_MOV_B32 imm:$imm) 2211 >; 2212 2213 def : Pat < 2214 (SGPRImm<(f32 fpimm)>:$imm), 2215 (S_MOV_B32 fpimm:$imm) 2216 >; 2217 2218 def : Pat < 2219 (i32 imm:$imm), 2220 (V_MOV_B32_e32 imm:$imm) 2221 >; 2222 2223 def : Pat < 2224 (f32 fpimm:$imm), 2225 (V_MOV_B32_e32 fpimm:$imm) 2226 >; 2227 2228 def : Pat < 2229 (i64 InlineImm<i64>:$imm), 2230 (S_MOV_B64 InlineImm<i64>:$imm) 2231 >; 2232 2233 /********** ===================== **********/ 2234 /********** Interpolation Paterns **********/ 2235 /********** ===================== **********/ 2236 2237 def : Pat < 2238 (int_SI_fs_constant imm:$attr_chan, imm:$attr, i32:$params), 2239 (V_INTERP_MOV_F32 INTERP.P0, imm:$attr_chan, imm:$attr, $params) 2240 >; 2241 2242 def : Pat < 2243 (int_SI_fs_interp imm:$attr_chan, imm:$attr, M0Reg:$params, v2i32:$ij), 2244 (V_INTERP_P2_F32 (V_INTERP_P1_F32 (EXTRACT_SUBREG v2i32:$ij, sub0), 2245 imm:$attr_chan, imm:$attr, i32:$params), 2246 (EXTRACT_SUBREG $ij, sub1), 2247 imm:$attr_chan, imm:$attr, $params) 2248 >; 2249 2250 /********** ================== **********/ 2251 /********** Intrinsic Patterns **********/ 2252 /********** ================== **********/ 2253 2254 /* llvm.AMDGPU.pow */ 2255 def : POW_Common <V_LOG_F32_e32, V_EXP_F32_e32, V_MUL_LEGACY_F32_e32>; 2256 2257 def : Pat < 2258 (int_AMDGPU_div f32:$src0, f32:$src1), 2259 (V_MUL_LEGACY_F32_e32 $src0, (V_RCP_LEGACY_F32_e32 $src1)) 2260 >; 2261 2262 def : Pat< 2263 (fdiv f32:$src0, f32:$src1), 2264 (V_MUL_F32_e32 $src0, (V_RCP_F32_e32 $src1)) 2265 >; 2266 2267 def : Pat< 2268 (fdiv f64:$src0, f64:$src1), 2269 (V_MUL_F64 $src0, (V_RCP_F64_e32 $src1), (i64 0)) 2270 >; 2271 2272 def : Pat < 2273 (fcos f32:$src0), 2274 (V_COS_F32_e32 (V_MUL_F32_e32 $src0, (V_MOV_B32_e32 CONST.TWO_PI_INV))) 2275 >; 2276 2277 def : Pat < 2278 (fsin f32:$src0), 2279 (V_SIN_F32_e32 (V_MUL_F32_e32 $src0, (V_MOV_B32_e32 CONST.TWO_PI_INV))) 2280 >; 2281 2282 def : Pat < 2283 (int_AMDGPU_cube v4f32:$src), 2284 (INSERT_SUBREG (INSERT_SUBREG (INSERT_SUBREG (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), 2285 (V_CUBETC_F32 (EXTRACT_SUBREG $src, sub0), 2286 (EXTRACT_SUBREG $src, sub1), 2287 (EXTRACT_SUBREG $src, sub2)), 2288 sub0), 2289 (V_CUBESC_F32 (EXTRACT_SUBREG $src, sub0), 2290 (EXTRACT_SUBREG $src, sub1), 2291 (EXTRACT_SUBREG $src, sub2)), 2292 sub1), 2293 (V_CUBEMA_F32 (EXTRACT_SUBREG $src, sub0), 2294 (EXTRACT_SUBREG $src, sub1), 2295 (EXTRACT_SUBREG $src, sub2)), 2296 sub2), 2297 (V_CUBEID_F32 (EXTRACT_SUBREG $src, sub0), 2298 (EXTRACT_SUBREG $src, sub1), 2299 (EXTRACT_SUBREG $src, sub2)), 2300 sub3) 2301 >; 2302 2303 def : Pat < 2304 (i32 (sext i1:$src0)), 2305 (V_CNDMASK_B32_e64 (i32 0), (i32 -1), $src0) 2306 >; 2307 2308 class Ext32Pat <SDNode ext> : Pat < 2309 (i32 (ext i1:$src0)), 2310 (V_CNDMASK_B32_e64 (i32 0), (i32 1), $src0) 2311 >; 2312 2313 def : Ext32Pat <zext>; 2314 def : Ext32Pat <anyext>; 2315 2316 // Offset in an 32Bit VGPR 2317 def : Pat < 2318 (SIload_constant v4i32:$sbase, i32:$voff), 2319 (BUFFER_LOAD_DWORD_OFFEN $sbase, $voff, 0, 0, 0, 0) 2320 >; 2321 2322 // The multiplication scales from [0,1] to the unsigned integer range 2323 def : Pat < 2324 (AMDGPUurecip i32:$src0), 2325 (V_CVT_U32_F32_e32 2326 (V_MUL_F32_e32 CONST.FP_UINT_MAX_PLUS_1, 2327 (V_RCP_IFLAG_F32_e32 (V_CVT_F32_U32_e32 $src0)))) 2328 >; 2329 2330 def : Pat < 2331 (int_SI_tid), 2332 (V_MBCNT_HI_U32_B32_e32 0xffffffff, 2333 (V_MBCNT_LO_U32_B32_e64 0xffffffff, 0, 0, 0)) 2334 >; 2335 2336 //===----------------------------------------------------------------------===// 2337 // VOP3 Patterns 2338 //===----------------------------------------------------------------------===// 2339 2340 def : IMad24Pat<V_MAD_I32_I24>; 2341 def : UMad24Pat<V_MAD_U32_U24>; 2342 2343 def : Pat < 2344 (fadd f64:$src0, f64:$src1), 2345 (V_ADD_F64 $src0, $src1, (i64 0)) 2346 >; 2347 2348 def : Pat < 2349 (fmul f64:$src0, f64:$src1), 2350 (V_MUL_F64 $src0, $src1, (i64 0)) 2351 >; 2352 2353 def : Pat < 2354 (mul i32:$src0, i32:$src1), 2355 (V_MUL_LO_I32 $src0, $src1, (i32 0)) 2356 >; 2357 2358 def : Pat < 2359 (mulhu i32:$src0, i32:$src1), 2360 (V_MUL_HI_U32 $src0, $src1, (i32 0)) 2361 >; 2362 2363 def : Pat < 2364 (mulhs i32:$src0, i32:$src1), 2365 (V_MUL_HI_I32 $src0, $src1, (i32 0)) 2366 >; 2367 2368 defm : BFIPatterns <V_BFI_B32, S_MOV_B32>; 2369 def : ROTRPattern <V_ALIGNBIT_B32>; 2370 2371 /********** ======================= **********/ 2372 /********** Load/Store Patterns **********/ 2373 /********** ======================= **********/ 2374 2375 multiclass DSReadPat <DS inst, ValueType vt, PatFrag frag> { 2376 def : Pat < 2377 (vt (frag (add i32:$ptr, (i32 IMM16bit:$offset)))), 2378 (inst (i1 0), $ptr, (as_i16imm $offset)) 2379 >; 2380 2381 def : Pat < 2382 (frag i32:$src0), 2383 (vt (inst 0, $src0, 0)) 2384 >; 2385 } 2386 2387 defm : DSReadPat <DS_READ_I8, i32, sextloadi8_local>; 2388 defm : DSReadPat <DS_READ_U8, i32, az_extloadi8_local>; 2389 defm : DSReadPat <DS_READ_I16, i32, sextloadi16_local>; 2390 defm : DSReadPat <DS_READ_U16, i32, az_extloadi16_local>; 2391 defm : DSReadPat <DS_READ_B32, i32, local_load>; 2392 defm : DSReadPat <DS_READ_B64, v2i32, local_load>; 2393 2394 multiclass DSWritePat <DS inst, ValueType vt, PatFrag frag> { 2395 def : Pat < 2396 (frag vt:$value, (add i32:$ptr, (i32 IMM16bit:$offset))), 2397 (inst (i1 0), $ptr, $value, (as_i16imm $offset)) 2398 >; 2399 2400 def : Pat < 2401 (frag vt:$val, i32:$ptr), 2402 (inst 0, $ptr, $val, 0) 2403 >; 2404 } 2405 2406 defm : DSWritePat <DS_WRITE_B8, i32, truncstorei8_local>; 2407 defm : DSWritePat <DS_WRITE_B16, i32, truncstorei16_local>; 2408 defm : DSWritePat <DS_WRITE_B32, i32, local_store>; 2409 defm : DSWritePat <DS_WRITE_B64, v2i32, local_store>; 2410 2411 multiclass DSAtomicRetPat<DS inst, ValueType vt, PatFrag frag> { 2412 def : Pat < 2413 (frag (add i32:$ptr, (i32 IMM16bit:$offset)), vt:$value), 2414 (inst (i1 0), $ptr, $value, (as_i16imm $offset)) 2415 >; 2416 2417 def : Pat < 2418 (frag i32:$ptr, vt:$val), 2419 (inst 0, $ptr, $val, 0) 2420 >; 2421 } 2422 2423 // Special case of DSAtomicRetPat for add / sub 1 -> inc / dec 2424 // 2425 // We need to use something for the data0, so we set a register to 2426 // -1. For the non-rtn variants, the manual says it does 2427 // DS[A] = (DS[A] >= D0) ? 0 : DS[A] + 1, and setting D0 to uint_max 2428 // will always do the increment so I'm assuming it's the same. 2429 // 2430 // We also load this -1 with s_mov_b32 / s_mov_b64 even though this 2431 // needs to be a VGPR. The SGPR copy pass will fix this, and it's 2432 // easier since there is no v_mov_b64. 2433 multiclass DSAtomicIncRetPat<DS inst, ValueType vt, 2434 Instruction LoadImm, PatFrag frag> { 2435 def : Pat < 2436 (frag (add i32:$ptr, (i32 IMM16bit:$offset)), (vt 1)), 2437 (inst (i1 0), $ptr, (LoadImm (vt -1)), (as_i16imm $offset)) 2438 >; 2439 2440 def : Pat < 2441 (frag i32:$ptr, (vt 1)), 2442 (inst 0, $ptr, (LoadImm (vt -1)), 0) 2443 >; 2444 } 2445 2446 multiclass DSAtomicCmpXChg <DS inst, ValueType vt, PatFrag frag> { 2447 def : Pat < 2448 (frag (add i32:$ptr, (i32 IMM16bit:$offset)), vt:$cmp, vt:$swap), 2449 (inst (i1 0), $ptr, $cmp, $swap, (as_i16imm $offset)) 2450 >; 2451 2452 def : Pat < 2453 (frag i32:$ptr, vt:$cmp, vt:$swap), 2454 (inst 0, $ptr, $cmp, $swap, 0) 2455 >; 2456 } 2457 2458 2459 // 32-bit atomics. 2460 defm : DSAtomicIncRetPat<DS_INC_RTN_U32, i32, 2461 S_MOV_B32, atomic_load_add_local>; 2462 defm : DSAtomicIncRetPat<DS_DEC_RTN_U32, i32, 2463 S_MOV_B32, atomic_load_sub_local>; 2464 2465 defm : DSAtomicRetPat<DS_WRXCHG_RTN_B32, i32, atomic_swap_local>; 2466 defm : DSAtomicRetPat<DS_ADD_RTN_U32, i32, atomic_load_add_local>; 2467 defm : DSAtomicRetPat<DS_SUB_RTN_U32, i32, atomic_load_sub_local>; 2468 defm : DSAtomicRetPat<DS_AND_RTN_B32, i32, atomic_load_and_local>; 2469 defm : DSAtomicRetPat<DS_OR_RTN_B32, i32, atomic_load_or_local>; 2470 defm : DSAtomicRetPat<DS_XOR_RTN_B32, i32, atomic_load_xor_local>; 2471 defm : DSAtomicRetPat<DS_MIN_RTN_I32, i32, atomic_load_min_local>; 2472 defm : DSAtomicRetPat<DS_MAX_RTN_I32, i32, atomic_load_max_local>; 2473 defm : DSAtomicRetPat<DS_MIN_RTN_U32, i32, atomic_load_umin_local>; 2474 defm : DSAtomicRetPat<DS_MAX_RTN_U32, i32, atomic_load_umax_local>; 2475 2476 defm : DSAtomicCmpXChg<DS_CMPST_RTN_B32, i32, atomic_cmp_swap_32_local>; 2477 2478 // 64-bit atomics. 2479 defm : DSAtomicIncRetPat<DS_INC_RTN_U64, i64, 2480 S_MOV_B64, atomic_load_add_local>; 2481 defm : DSAtomicIncRetPat<DS_DEC_RTN_U64, i64, 2482 S_MOV_B64, atomic_load_sub_local>; 2483 2484 defm : DSAtomicRetPat<DS_WRXCHG_RTN_B64, i64, atomic_swap_local>; 2485 defm : DSAtomicRetPat<DS_ADD_RTN_U64, i64, atomic_load_add_local>; 2486 defm : DSAtomicRetPat<DS_SUB_RTN_U64, i64, atomic_load_sub_local>; 2487 defm : DSAtomicRetPat<DS_AND_RTN_B64, i64, atomic_load_and_local>; 2488 defm : DSAtomicRetPat<DS_OR_RTN_B64, i64, atomic_load_or_local>; 2489 defm : DSAtomicRetPat<DS_XOR_RTN_B64, i64, atomic_load_xor_local>; 2490 defm : DSAtomicRetPat<DS_MIN_RTN_I64, i64, atomic_load_min_local>; 2491 defm : DSAtomicRetPat<DS_MAX_RTN_I64, i64, atomic_load_max_local>; 2492 defm : DSAtomicRetPat<DS_MIN_RTN_U64, i64, atomic_load_umin_local>; 2493 defm : DSAtomicRetPat<DS_MAX_RTN_U64, i64, atomic_load_umax_local>; 2494 2495 defm : DSAtomicCmpXChg<DS_CMPST_RTN_B64, i64, atomic_cmp_swap_64_local>; 2496 2497 2498 //===----------------------------------------------------------------------===// 2499 // MUBUF Patterns 2500 //===----------------------------------------------------------------------===// 2501 2502 multiclass MUBUFLoad_Pattern <MUBUF Instr_ADDR64, ValueType vt, 2503 PatFrag constant_ld> { 2504 def : Pat < 2505 (vt (constant_ld (add i64:$ptr, i64:$offset))), 2506 (Instr_ADDR64 (SI_ADDR64_RSRC $ptr), $offset, 0) 2507 >; 2508 } 2509 2510 defm : MUBUFLoad_Pattern <BUFFER_LOAD_SBYTE_ADDR64, i32, 2511 sextloadi8_constant>; 2512 defm : MUBUFLoad_Pattern <BUFFER_LOAD_UBYTE_ADDR64, i32, 2513 az_extloadi8_constant>; 2514 defm : MUBUFLoad_Pattern <BUFFER_LOAD_SSHORT_ADDR64, i32, 2515 sextloadi16_constant>; 2516 defm : MUBUFLoad_Pattern <BUFFER_LOAD_USHORT_ADDR64, i32, 2517 az_extloadi16_constant>; 2518 defm : MUBUFLoad_Pattern <BUFFER_LOAD_DWORD_ADDR64, i32, 2519 constant_load>; 2520 defm : MUBUFLoad_Pattern <BUFFER_LOAD_DWORDX2_ADDR64, v2i32, 2521 constant_load>; 2522 defm : MUBUFLoad_Pattern <BUFFER_LOAD_DWORDX4_ADDR64, v4i32, 2523 constant_load>; 2524 2525 // BUFFER_LOAD_DWORD*, addr64=0 2526 multiclass MUBUF_Load_Dword <ValueType vt, MUBUF offset, MUBUF offen, MUBUF idxen, 2527 MUBUF bothen> { 2528 2529 def : Pat < 2530 (vt (int_SI_buffer_load_dword v4i32:$rsrc, i32:$vaddr, i32:$soffset, 2531 imm:$offset, 0, 0, imm:$glc, imm:$slc, 2532 imm:$tfe)), 2533 (offset $rsrc, $vaddr, (as_i16imm $offset), $soffset, (as_i1imm $glc), 2534 (as_i1imm $slc), (as_i1imm $tfe)) 2535 >; 2536 2537 def : Pat < 2538 (vt (int_SI_buffer_load_dword v4i32:$rsrc, i32:$vaddr, i32:$soffset, 2539 imm, 1, 0, imm:$glc, imm:$slc, 2540 imm:$tfe)), 2541 (offen $rsrc, $vaddr, $soffset, (as_i1imm $glc), (as_i1imm $slc), 2542 (as_i1imm $tfe)) 2543 >; 2544 2545 def : Pat < 2546 (vt (int_SI_buffer_load_dword v4i32:$rsrc, i32:$vaddr, i32:$soffset, 2547 imm:$offset, 0, 1, imm:$glc, imm:$slc, 2548 imm:$tfe)), 2549 (idxen $rsrc, $vaddr, (as_i16imm $offset), $soffset, (as_i1imm $glc), 2550 (as_i1imm $slc), (as_i1imm $tfe)) 2551 >; 2552 2553 def : Pat < 2554 (vt (int_SI_buffer_load_dword v4i32:$rsrc, v2i32:$vaddr, i32:$soffset, 2555 imm, 1, 1, imm:$glc, imm:$slc, 2556 imm:$tfe)), 2557 (bothen $rsrc, $vaddr, $soffset, (as_i1imm $glc), (as_i1imm $slc), 2558 (as_i1imm $tfe)) 2559 >; 2560 } 2561 2562 defm : MUBUF_Load_Dword <i32, BUFFER_LOAD_DWORD_OFFSET, BUFFER_LOAD_DWORD_OFFEN, 2563 BUFFER_LOAD_DWORD_IDXEN, BUFFER_LOAD_DWORD_BOTHEN>; 2564 defm : MUBUF_Load_Dword <v2i32, BUFFER_LOAD_DWORDX2_OFFSET, BUFFER_LOAD_DWORDX2_OFFEN, 2565 BUFFER_LOAD_DWORDX2_IDXEN, BUFFER_LOAD_DWORDX2_BOTHEN>; 2566 defm : MUBUF_Load_Dword <v4i32, BUFFER_LOAD_DWORDX4_OFFSET, BUFFER_LOAD_DWORDX4_OFFEN, 2567 BUFFER_LOAD_DWORDX4_IDXEN, BUFFER_LOAD_DWORDX4_BOTHEN>; 2568 2569 //===----------------------------------------------------------------------===// 2570 // MTBUF Patterns 2571 //===----------------------------------------------------------------------===// 2572 2573 // TBUFFER_STORE_FORMAT_*, addr64=0 2574 class MTBUF_StoreResource <ValueType vt, int num_channels, MTBUF opcode> : Pat< 2575 (SItbuffer_store v4i32:$rsrc, vt:$vdata, num_channels, i32:$vaddr, 2576 i32:$soffset, imm:$inst_offset, imm:$dfmt, 2577 imm:$nfmt, imm:$offen, imm:$idxen, 2578 imm:$glc, imm:$slc, imm:$tfe), 2579 (opcode 2580 $vdata, (as_i16imm $inst_offset), (as_i1imm $offen), (as_i1imm $idxen), 2581 (as_i1imm $glc), 0, (as_i8imm $dfmt), (as_i8imm $nfmt), $vaddr, $rsrc, 2582 (as_i1imm $slc), (as_i1imm $tfe), $soffset) 2583 >; 2584 2585 def : MTBUF_StoreResource <i32, 1, TBUFFER_STORE_FORMAT_X>; 2586 def : MTBUF_StoreResource <v2i32, 2, TBUFFER_STORE_FORMAT_XY>; 2587 def : MTBUF_StoreResource <v4i32, 3, TBUFFER_STORE_FORMAT_XYZ>; 2588 def : MTBUF_StoreResource <v4i32, 4, TBUFFER_STORE_FORMAT_XYZW>; 2589 2590 let SubtargetPredicate = isCI in { 2591 2592 // Sea island new arithmetic instructinos 2593 let neverHasSideEffects = 1 in { 2594 defm V_TRUNC_F64 : VOP1_64 <0x00000017, "V_TRUNC_F64", 2595 [(set f64:$dst, (ftrunc f64:$src0))] 2596 >; 2597 defm V_CEIL_F64 : VOP1_64 <0x00000018, "V_CEIL_F64", 2598 [(set f64:$dst, (fceil f64:$src0))] 2599 >; 2600 defm V_FLOOR_F64 : VOP1_64 <0x0000001A, "V_FLOOR_F64", 2601 [(set f64:$dst, (ffloor f64:$src0))] 2602 >; 2603 defm V_RNDNE_F64 : VOP1_64 <0x00000019, "V_RNDNE_F64", 2604 [(set f64:$dst, (frint f64:$src0))] 2605 >; 2606 2607 defm V_QSAD_PK_U16_U8 : VOP3_32 <0x00000173, "V_QSAD_PK_U16_U8", []>; 2608 defm V_MQSAD_U16_U8 : VOP3_32 <0x000000172, "V_MQSAD_U16_U8", []>; 2609 defm V_MQSAD_U32_U8 : VOP3_32 <0x00000175, "V_MQSAD_U32_U8", []>; 2610 def V_MAD_U64_U32 : VOP3_64 <0x00000176, "V_MAD_U64_U32", []>; 2611 2612 // XXX - Does this set VCC? 2613 def V_MAD_I64_I32 : VOP3_64 <0x00000177, "V_MAD_I64_I32", []>; 2614 } // End neverHasSideEffects = 1 2615 2616 // Remaining instructions: 2617 // FLAT_* 2618 // S_CBRANCH_CDBGUSER 2619 // S_CBRANCH_CDBGSYS 2620 // S_CBRANCH_CDBGSYS_OR_USER 2621 // S_CBRANCH_CDBGSYS_AND_USER 2622 // S_DCACHE_INV_VOL 2623 // V_EXP_LEGACY_F32 2624 // V_LOG_LEGACY_F32 2625 // DS_NOP 2626 // DS_GWS_SEMA_RELEASE_ALL 2627 // DS_WRAP_RTN_B32 2628 // DS_CNDXCHG32_RTN_B64 2629 // DS_WRITE_B96 2630 // DS_WRITE_B128 2631 // DS_CONDXCHG32_RTN_B128 2632 // DS_READ_B96 2633 // DS_READ_B128 2634 // BUFFER_LOAD_DWORDX3 2635 // BUFFER_STORE_DWORDX3 2636 2637 } // End iSCI 2638 2639 2640 /********** ====================== **********/ 2641 /********** Indirect adressing **********/ 2642 /********** ====================== **********/ 2643 2644 multiclass SI_INDIRECT_Pattern <ValueType vt, ValueType eltvt, SI_INDIRECT_DST IndDst> { 2645 2646 // 1. Extract with offset 2647 def : Pat< 2648 (vector_extract vt:$vec, (add i32:$idx, imm:$off)), 2649 (eltvt (SI_INDIRECT_SRC (IMPLICIT_DEF), $vec, $idx, imm:$off)) 2650 >; 2651 2652 // 2. Extract without offset 2653 def : Pat< 2654 (vector_extract vt:$vec, i32:$idx), 2655 (eltvt (SI_INDIRECT_SRC (IMPLICIT_DEF), $vec, $idx, 0)) 2656 >; 2657 2658 // 3. Insert with offset 2659 def : Pat< 2660 (vector_insert vt:$vec, eltvt:$val, (add i32:$idx, imm:$off)), 2661 (IndDst (IMPLICIT_DEF), $vec, $idx, imm:$off, $val) 2662 >; 2663 2664 // 4. Insert without offset 2665 def : Pat< 2666 (vector_insert vt:$vec, eltvt:$val, i32:$idx), 2667 (IndDst (IMPLICIT_DEF), $vec, $idx, 0, $val) 2668 >; 2669 } 2670 2671 defm : SI_INDIRECT_Pattern <v2f32, f32, SI_INDIRECT_DST_V2>; 2672 defm : SI_INDIRECT_Pattern <v4f32, f32, SI_INDIRECT_DST_V4>; 2673 defm : SI_INDIRECT_Pattern <v8f32, f32, SI_INDIRECT_DST_V8>; 2674 defm : SI_INDIRECT_Pattern <v16f32, f32, SI_INDIRECT_DST_V16>; 2675 2676 defm : SI_INDIRECT_Pattern <v2i32, i32, SI_INDIRECT_DST_V2>; 2677 defm : SI_INDIRECT_Pattern <v4i32, i32, SI_INDIRECT_DST_V4>; 2678 defm : SI_INDIRECT_Pattern <v8i32, i32, SI_INDIRECT_DST_V8>; 2679 defm : SI_INDIRECT_Pattern <v16i32, i32, SI_INDIRECT_DST_V16>; 2680 2681 //===----------------------------------------------------------------------===// 2682 // Conversion Patterns 2683 //===----------------------------------------------------------------------===// 2684 2685 def : Pat<(i32 (sext_inreg i32:$src, i1)), 2686 (S_BFE_I32 i32:$src, 65536)>; // 0 | 1 << 16 2687 2688 // TODO: Match 64-bit BFE. SI has a 64-bit BFE, but it's scalar only so it 2689 // might not be worth the effort, and will need to expand to shifts when 2690 // fixing SGPR copies. 2691 2692 // Handle sext_inreg in i64 2693 def : Pat < 2694 (i64 (sext_inreg i64:$src, i1)), 2695 (INSERT_SUBREG (INSERT_SUBREG (i64 (IMPLICIT_DEF)), 2696 (S_BFE_I32 (EXTRACT_SUBREG i64:$src, sub0), 65536), sub0), // 0 | 1 << 16 2697 (S_MOV_B32 -1), sub1) 2698 >; 2699 2700 def : Pat < 2701 (i64 (sext_inreg i64:$src, i8)), 2702 (INSERT_SUBREG (INSERT_SUBREG (i64 (IMPLICIT_DEF)), 2703 (S_SEXT_I32_I8 (EXTRACT_SUBREG i64:$src, sub0)), sub0), 2704 (S_MOV_B32 -1), sub1) 2705 >; 2706 2707 def : Pat < 2708 (i64 (sext_inreg i64:$src, i16)), 2709 (INSERT_SUBREG (INSERT_SUBREG (i64 (IMPLICIT_DEF)), 2710 (S_SEXT_I32_I16 (EXTRACT_SUBREG i64:$src, sub0)), sub0), 2711 (S_MOV_B32 -1), sub1) 2712 >; 2713 2714 class ZExt_i64_i32_Pat <SDNode ext> : Pat < 2715 (i64 (ext i32:$src)), 2716 (INSERT_SUBREG (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $src, sub0), 2717 (S_MOV_B32 0), sub1) 2718 >; 2719 2720 class ZExt_i64_i1_Pat <SDNode ext> : Pat < 2721 (i64 (ext i1:$src)), 2722 (INSERT_SUBREG 2723 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), 2724 (V_CNDMASK_B32_e64 (i32 0), (i32 1), $src), sub0), 2725 (S_MOV_B32 0), sub1) 2726 >; 2727 2728 2729 def : ZExt_i64_i32_Pat<zext>; 2730 def : ZExt_i64_i32_Pat<anyext>; 2731 def : ZExt_i64_i1_Pat<zext>; 2732 def : ZExt_i64_i1_Pat<anyext>; 2733 2734 def : Pat < 2735 (i64 (sext i32:$src)), 2736 (INSERT_SUBREG 2737 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $src, sub0), 2738 (S_ASHR_I32 $src, 31), sub1) 2739 >; 2740 2741 def : Pat < 2742 (i64 (sext i1:$src)), 2743 (INSERT_SUBREG 2744 (INSERT_SUBREG 2745 (i64 (IMPLICIT_DEF)), 2746 (V_CNDMASK_B32_e64 0, -1, $src), sub0), 2747 (V_CNDMASK_B32_e64 0, -1, $src), sub1) 2748 >; 2749 2750 def : Pat < 2751 (f32 (sint_to_fp i1:$src)), 2752 (V_CNDMASK_B32_e64 (i32 0), CONST.FP32_NEG_ONE, $src) 2753 >; 2754 2755 def : Pat < 2756 (f32 (uint_to_fp i1:$src)), 2757 (V_CNDMASK_B32_e64 (i32 0), CONST.FP32_ONE, $src) 2758 >; 2759 2760 def : Pat < 2761 (f64 (sint_to_fp i1:$src)), 2762 (V_CVT_F64_I32_e32 (V_CNDMASK_B32_e64 (i32 0), (i32 -1), $src)) 2763 >; 2764 2765 def : Pat < 2766 (f64 (uint_to_fp i1:$src)), 2767 (V_CVT_F64_U32_e32 (V_CNDMASK_B32_e64 (i32 0), (i32 1), $src)) 2768 >; 2769 2770 //===----------------------------------------------------------------------===// 2771 // Miscellaneous Patterns 2772 //===----------------------------------------------------------------------===// 2773 2774 def : Pat < 2775 (i32 (trunc i64:$a)), 2776 (EXTRACT_SUBREG $a, sub0) 2777 >; 2778 2779 def : Pat < 2780 (i1 (trunc i32:$a)), 2781 (V_CMP_EQ_I32_e64 (V_AND_B32_e32 (i32 1), $a), 1) 2782 >; 2783 2784 // V_ADD_I32_e32/S_ADD_I32 produces carry in VCC/SCC. For the vector 2785 // case, the sgpr-copies pass will fix this to use the vector version. 2786 def : Pat < 2787 (i32 (addc i32:$src0, i32:$src1)), 2788 (S_ADD_I32 $src0, $src1) 2789 >; 2790 2791 //============================================================================// 2792 // Miscellaneous Optimization Patterns 2793 //============================================================================// 2794 2795 def : SHA256MaPattern <V_BFI_B32, V_XOR_B32_e32>; 2796 2797 } // End isSI predicate 2798