1 //===-- SIInstructions.td - SI Instruction Defintions ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // This file was originally auto-generated from a GPU register header file and 10 // all the instruction definitions were originally commented out. Instructions 11 // that are not yet supported remain commented out. 12 //===----------------------------------------------------------------------===// 13 14 class InterpSlots { 15 int P0 = 2; 16 int P10 = 0; 17 int P20 = 1; 18 } 19 def INTERP : InterpSlots; 20 21 def isGCN : Predicate<"Subtarget->getGeneration() " 22 ">= SISubtarget::SOUTHERN_ISLANDS">, 23 AssemblerPredicate<"FeatureGCN">; 24 def isSI : Predicate<"Subtarget->getGeneration() " 25 "== SISubtarget::SOUTHERN_ISLANDS">, 26 AssemblerPredicate<"FeatureSouthernIslands">; 27 28 29 def has16BankLDS : Predicate<"Subtarget->getLDSBankCount() == 16">; 30 def has32BankLDS : Predicate<"Subtarget->getLDSBankCount() == 32">; 31 32 let SubtargetPredicate = isGCN in { 33 34 //===----------------------------------------------------------------------===// 35 // EXP Instructions 36 //===----------------------------------------------------------------------===// 37 38 defm EXP : EXP_m; 39 40 //===----------------------------------------------------------------------===// 41 // SMRD Instructions 42 //===----------------------------------------------------------------------===// 43 44 // We are using the SReg_32_XM0 and not the SReg_32 register class for 32-bit 45 // SMRD instructions, because the SReg_32_XM0 register class does not include M0 46 // and writing to M0 from an SMRD instruction will hang the GPU. 47 defm S_LOAD_DWORD : SMRD_Helper <smrd<0x00>, "s_load_dword", SReg_64, SReg_32_XM0>; 48 defm S_LOAD_DWORDX2 : SMRD_Helper <smrd<0x01>, "s_load_dwordx2", SReg_64, SReg_64>; 49 defm S_LOAD_DWORDX4 : SMRD_Helper <smrd<0x02>, "s_load_dwordx4", SReg_64, SReg_128>; 50 defm S_LOAD_DWORDX8 : SMRD_Helper <smrd<0x03>, "s_load_dwordx8", SReg_64, SReg_256>; 51 defm S_LOAD_DWORDX16 : SMRD_Helper <smrd<0x04>, "s_load_dwordx16", SReg_64, SReg_512>; 52 53 defm S_BUFFER_LOAD_DWORD : SMRD_Helper < 54 smrd<0x08>, "s_buffer_load_dword", SReg_128, SReg_32_XM0 55 >; 56 57 defm S_BUFFER_LOAD_DWORDX2 : SMRD_Helper < 58 smrd<0x09>, "s_buffer_load_dwordx2", SReg_128, SReg_64 59 >; 60 61 defm S_BUFFER_LOAD_DWORDX4 : SMRD_Helper < 62 smrd<0x0a>, "s_buffer_load_dwordx4", SReg_128, SReg_128 63 >; 64 65 defm S_BUFFER_LOAD_DWORDX8 : SMRD_Helper < 66 smrd<0x0b>, "s_buffer_load_dwordx8", SReg_128, SReg_256 67 >; 68 69 defm S_BUFFER_LOAD_DWORDX16 : SMRD_Helper < 70 smrd<0x0c>, "s_buffer_load_dwordx16", SReg_128, SReg_512 71 >; 72 73 let mayStore = ? in { 74 // FIXME: mayStore = ? is a workaround for tablegen bug for different 75 // inferred mayStore flags for the instruction pattern vs. standalone 76 // Pat. Each considers the other contradictory. 77 78 defm S_MEMTIME : SMRD_Special <smrd<0x1e, 0x24>, "s_memtime", 79 (outs SReg_64:$sdst), ?, " $sdst", [(set i64:$sdst, (int_amdgcn_s_memtime))] 80 >; 81 } 82 83 defm S_DCACHE_INV : SMRD_Inval <smrd<0x1f, 0x20>, "s_dcache_inv", 84 int_amdgcn_s_dcache_inv>; 85 86 //===----------------------------------------------------------------------===// 87 // SOP1 Instructions 88 //===----------------------------------------------------------------------===// 89 90 let isMoveImm = 1 in { 91 let isReMaterializable = 1, isAsCheapAsAMove = 1 in { 92 defm S_MOV_B32 : SOP1_32 <sop1<0x03, 0x00>, "s_mov_b32", []>; 93 defm S_MOV_B64 : SOP1_64 <sop1<0x04, 0x01>, "s_mov_b64", []>; 94 } // End isRematerializeable = 1 95 96 let Uses = [SCC] in { 97 defm S_CMOV_B32 : SOP1_32 <sop1<0x05, 0x02>, "s_cmov_b32", []>; 98 defm S_CMOV_B64 : SOP1_64 <sop1<0x06, 0x03>, "s_cmov_b64", []>; 99 } // End Uses = [SCC] 100 } // End isMoveImm = 1 101 102 let Defs = [SCC] in { 103 defm S_NOT_B32 : SOP1_32 <sop1<0x07, 0x04>, "s_not_b32", 104 [(set i32:$sdst, (not i32:$src0))] 105 >; 106 107 defm S_NOT_B64 : SOP1_64 <sop1<0x08, 0x05>, "s_not_b64", 108 [(set i64:$sdst, (not i64:$src0))] 109 >; 110 defm S_WQM_B32 : SOP1_32 <sop1<0x09, 0x06>, "s_wqm_b32", []>; 111 defm S_WQM_B64 : SOP1_64 <sop1<0x0a, 0x07>, "s_wqm_b64", []>; 112 } // End Defs = [SCC] 113 114 115 defm S_BREV_B32 : SOP1_32 <sop1<0x0b, 0x08>, "s_brev_b32", 116 [(set i32:$sdst, (bitreverse i32:$src0))] 117 >; 118 defm S_BREV_B64 : SOP1_64 <sop1<0x0c, 0x09>, "s_brev_b64", []>; 119 120 let Defs = [SCC] in { 121 defm S_BCNT0_I32_B32 : SOP1_32 <sop1<0x0d, 0x0a>, "s_bcnt0_i32_b32", []>; 122 defm S_BCNT0_I32_B64 : SOP1_32_64 <sop1<0x0e, 0x0b>, "s_bcnt0_i32_b64", []>; 123 defm S_BCNT1_I32_B32 : SOP1_32 <sop1<0x0f, 0x0c>, "s_bcnt1_i32_b32", 124 [(set i32:$sdst, (ctpop i32:$src0))] 125 >; 126 defm S_BCNT1_I32_B64 : SOP1_32_64 <sop1<0x10, 0x0d>, "s_bcnt1_i32_b64", []>; 127 } // End Defs = [SCC] 128 129 defm S_FF0_I32_B32 : SOP1_32 <sop1<0x11, 0x0e>, "s_ff0_i32_b32", []>; 130 defm S_FF0_I32_B64 : SOP1_32_64 <sop1<0x12, 0x0f>, "s_ff0_i32_b64", []>; 131 defm S_FF1_I32_B32 : SOP1_32 <sop1<0x13, 0x10>, "s_ff1_i32_b32", 132 [(set i32:$sdst, (cttz_zero_undef i32:$src0))] 133 >; 134 defm S_FF1_I32_B64 : SOP1_32_64 <sop1<0x14, 0x11>, "s_ff1_i32_b64", []>; 135 136 defm S_FLBIT_I32_B32 : SOP1_32 <sop1<0x15, 0x12>, "s_flbit_i32_b32", 137 [(set i32:$sdst, (AMDGPUffbh_u32 i32:$src0))] 138 >; 139 140 defm S_FLBIT_I32_B64 : SOP1_32_64 <sop1<0x16, 0x13>, "s_flbit_i32_b64", []>; 141 defm S_FLBIT_I32 : SOP1_32 <sop1<0x17, 0x14>, "s_flbit_i32", 142 [(set i32:$sdst, (int_AMDGPU_flbit_i32 i32:$src0))] 143 >; 144 defm S_FLBIT_I32_I64 : SOP1_32_64 <sop1<0x18, 0x15>, "s_flbit_i32_i64", []>; 145 defm S_SEXT_I32_I8 : SOP1_32 <sop1<0x19, 0x16>, "s_sext_i32_i8", 146 [(set i32:$sdst, (sext_inreg i32:$src0, i8))] 147 >; 148 defm S_SEXT_I32_I16 : SOP1_32 <sop1<0x1a, 0x17>, "s_sext_i32_i16", 149 [(set i32:$sdst, (sext_inreg i32:$src0, i16))] 150 >; 151 152 defm S_BITSET0_B32 : SOP1_32 <sop1<0x1b, 0x18>, "s_bitset0_b32", []>; 153 defm S_BITSET0_B64 : SOP1_64_32 <sop1<0x1c, 0x19>, "s_bitset0_b64", []>; 154 defm S_BITSET1_B32 : SOP1_32 <sop1<0x1d, 0x1a>, "s_bitset1_b32", []>; 155 defm S_BITSET1_B64 : SOP1_64_32 <sop1<0x1e, 0x1b>, "s_bitset1_b64", []>; 156 defm S_GETPC_B64 : SOP1_64_0 <sop1<0x1f, 0x1c>, "s_getpc_b64", []>; 157 defm S_SETPC_B64 : SOP1_1 <sop1<0x20, 0x1d>, "s_setpc_b64", []>; 158 defm S_SWAPPC_B64 : SOP1_64 <sop1<0x21, 0x1e>, "s_swappc_b64", []>; 159 defm S_RFE_B64 : SOP1_1 <sop1<0x22, 0x1f>, "s_rfe_b64", []>; 160 161 let hasSideEffects = 1, Uses = [EXEC], Defs = [EXEC, SCC] in { 162 163 defm S_AND_SAVEEXEC_B64 : SOP1_64 <sop1<0x24, 0x20>, "s_and_saveexec_b64", []>; 164 defm S_OR_SAVEEXEC_B64 : SOP1_64 <sop1<0x25, 0x21>, "s_or_saveexec_b64", []>; 165 defm S_XOR_SAVEEXEC_B64 : SOP1_64 <sop1<0x26, 0x22>, "s_xor_saveexec_b64", []>; 166 defm S_ANDN2_SAVEEXEC_B64 : SOP1_64 <sop1<0x27, 0x23>, "s_andn2_saveexec_b64", []>; 167 defm S_ORN2_SAVEEXEC_B64 : SOP1_64 <sop1<0x28, 0x24>, "s_orn2_saveexec_b64", []>; 168 defm S_NAND_SAVEEXEC_B64 : SOP1_64 <sop1<0x29, 0x25>, "s_nand_saveexec_b64", []>; 169 defm S_NOR_SAVEEXEC_B64 : SOP1_64 <sop1<0x2a, 0x26>, "s_nor_saveexec_b64", []>; 170 defm S_XNOR_SAVEEXEC_B64 : SOP1_64 <sop1<0x2b, 0x27>, "s_xnor_saveexec_b64", []>; 171 172 } // End hasSideEffects = 1, Uses = [EXEC], Defs = [EXEC, SCC] 173 174 defm S_QUADMASK_B32 : SOP1_32 <sop1<0x2c, 0x28>, "s_quadmask_b32", []>; 175 defm S_QUADMASK_B64 : SOP1_64 <sop1<0x2d, 0x29>, "s_quadmask_b64", []>; 176 177 let Uses = [M0] in { 178 defm S_MOVRELS_B32 : SOP1_32 <sop1<0x2e, 0x2a>, "s_movrels_b32", []>; 179 defm S_MOVRELS_B64 : SOP1_64 <sop1<0x2f, 0x2b>, "s_movrels_b64", []>; 180 defm S_MOVRELD_B32 : SOP1_32 <sop1<0x30, 0x2c>, "s_movreld_b32", []>; 181 defm S_MOVRELD_B64 : SOP1_64 <sop1<0x31, 0x2d>, "s_movreld_b64", []>; 182 } // End Uses = [M0] 183 184 defm S_CBRANCH_JOIN : SOP1_1 <sop1<0x32, 0x2e>, "s_cbranch_join", []>; 185 defm S_MOV_REGRD_B32 : SOP1_32 <sop1<0x33, 0x2f>, "s_mov_regrd_b32", []>; 186 let Defs = [SCC] in { 187 defm S_ABS_I32 : SOP1_32 <sop1<0x34, 0x30>, "s_abs_i32", []>; 188 } // End Defs = [SCC] 189 defm S_MOV_FED_B32 : SOP1_32 <sop1<0x35, 0x31>, "s_mov_fed_b32", []>; 190 191 //===----------------------------------------------------------------------===// 192 // SOP2 Instructions 193 //===----------------------------------------------------------------------===// 194 195 let Defs = [SCC] in { // Carry out goes to SCC 196 let isCommutable = 1 in { 197 defm S_ADD_U32 : SOP2_32 <sop2<0x00>, "s_add_u32", []>; 198 defm S_ADD_I32 : SOP2_32 <sop2<0x02>, "s_add_i32", 199 [(set i32:$sdst, (add SSrc_32:$src0, SSrc_32:$src1))] 200 >; 201 } // End isCommutable = 1 202 203 defm S_SUB_U32 : SOP2_32 <sop2<0x01>, "s_sub_u32", []>; 204 defm S_SUB_I32 : SOP2_32 <sop2<0x03>, "s_sub_i32", 205 [(set i32:$sdst, (sub SSrc_32:$src0, SSrc_32:$src1))] 206 >; 207 208 let Uses = [SCC] in { // Carry in comes from SCC 209 let isCommutable = 1 in { 210 defm S_ADDC_U32 : SOP2_32 <sop2<0x04>, "s_addc_u32", 211 [(set i32:$sdst, (adde (i32 SSrc_32:$src0), (i32 SSrc_32:$src1)))]>; 212 } // End isCommutable = 1 213 214 defm S_SUBB_U32 : SOP2_32 <sop2<0x05>, "s_subb_u32", 215 [(set i32:$sdst, (sube (i32 SSrc_32:$src0), (i32 SSrc_32:$src1)))]>; 216 } // End Uses = [SCC] 217 218 defm S_MIN_I32 : SOP2_32 <sop2<0x06>, "s_min_i32", 219 [(set i32:$sdst, (smin i32:$src0, i32:$src1))] 220 >; 221 defm S_MIN_U32 : SOP2_32 <sop2<0x07>, "s_min_u32", 222 [(set i32:$sdst, (umin i32:$src0, i32:$src1))] 223 >; 224 defm S_MAX_I32 : SOP2_32 <sop2<0x08>, "s_max_i32", 225 [(set i32:$sdst, (smax i32:$src0, i32:$src1))] 226 >; 227 defm S_MAX_U32 : SOP2_32 <sop2<0x09>, "s_max_u32", 228 [(set i32:$sdst, (umax i32:$src0, i32:$src1))] 229 >; 230 } // End Defs = [SCC] 231 232 233 let Uses = [SCC] in { 234 defm S_CSELECT_B32 : SOP2_32 <sop2<0x0a>, "s_cselect_b32", []>; 235 defm S_CSELECT_B64 : SOP2_64 <sop2<0x0b>, "s_cselect_b64", []>; 236 } // End Uses = [SCC] 237 238 let Defs = [SCC] in { 239 defm S_AND_B32 : SOP2_32 <sop2<0x0e, 0x0c>, "s_and_b32", 240 [(set i32:$sdst, (and i32:$src0, i32:$src1))] 241 >; 242 243 defm S_AND_B64 : SOP2_64 <sop2<0x0f, 0x0d>, "s_and_b64", 244 [(set i64:$sdst, (and i64:$src0, i64:$src1))] 245 >; 246 247 defm S_OR_B32 : SOP2_32 <sop2<0x10, 0x0e>, "s_or_b32", 248 [(set i32:$sdst, (or i32:$src0, i32:$src1))] 249 >; 250 251 defm S_OR_B64 : SOP2_64 <sop2<0x11, 0x0f>, "s_or_b64", 252 [(set i64:$sdst, (or i64:$src0, i64:$src1))] 253 >; 254 255 defm S_XOR_B32 : SOP2_32 <sop2<0x12, 0x10>, "s_xor_b32", 256 [(set i32:$sdst, (xor i32:$src0, i32:$src1))] 257 >; 258 259 defm S_XOR_B64 : SOP2_64 <sop2<0x13, 0x11>, "s_xor_b64", 260 [(set i64:$sdst, (xor i64:$src0, i64:$src1))] 261 >; 262 defm S_ANDN2_B32 : SOP2_32 <sop2<0x14, 0x12>, "s_andn2_b32", []>; 263 defm S_ANDN2_B64 : SOP2_64 <sop2<0x15, 0x13>, "s_andn2_b64", []>; 264 defm S_ORN2_B32 : SOP2_32 <sop2<0x16, 0x14>, "s_orn2_b32", []>; 265 defm S_ORN2_B64 : SOP2_64 <sop2<0x17, 0x15>, "s_orn2_b64", []>; 266 defm S_NAND_B32 : SOP2_32 <sop2<0x18, 0x16>, "s_nand_b32", []>; 267 defm S_NAND_B64 : SOP2_64 <sop2<0x19, 0x17>, "s_nand_b64", []>; 268 defm S_NOR_B32 : SOP2_32 <sop2<0x1a, 0x18>, "s_nor_b32", []>; 269 defm S_NOR_B64 : SOP2_64 <sop2<0x1b, 0x19>, "s_nor_b64", []>; 270 defm S_XNOR_B32 : SOP2_32 <sop2<0x1c, 0x1a>, "s_xnor_b32", []>; 271 defm S_XNOR_B64 : SOP2_64 <sop2<0x1d, 0x1b>, "s_xnor_b64", []>; 272 } // End Defs = [SCC] 273 274 // Use added complexity so these patterns are preferred to the VALU patterns. 275 let AddedComplexity = 1 in { 276 let Defs = [SCC] in { 277 278 defm S_LSHL_B32 : SOP2_32 <sop2<0x1e, 0x1c>, "s_lshl_b32", 279 [(set i32:$sdst, (shl i32:$src0, i32:$src1))] 280 >; 281 defm S_LSHL_B64 : SOP2_64_32 <sop2<0x1f, 0x1d>, "s_lshl_b64", 282 [(set i64:$sdst, (shl i64:$src0, i32:$src1))] 283 >; 284 defm S_LSHR_B32 : SOP2_32 <sop2<0x20, 0x1e>, "s_lshr_b32", 285 [(set i32:$sdst, (srl i32:$src0, i32:$src1))] 286 >; 287 defm S_LSHR_B64 : SOP2_64_32 <sop2<0x21, 0x1f>, "s_lshr_b64", 288 [(set i64:$sdst, (srl i64:$src0, i32:$src1))] 289 >; 290 defm S_ASHR_I32 : SOP2_32 <sop2<0x22, 0x20>, "s_ashr_i32", 291 [(set i32:$sdst, (sra i32:$src0, i32:$src1))] 292 >; 293 defm S_ASHR_I64 : SOP2_64_32 <sop2<0x23, 0x21>, "s_ashr_i64", 294 [(set i64:$sdst, (sra i64:$src0, i32:$src1))] 295 >; 296 } // End Defs = [SCC] 297 298 defm S_BFM_B32 : SOP2_32 <sop2<0x24, 0x22>, "s_bfm_b32", 299 [(set i32:$sdst, (AMDGPUbfm i32:$src0, i32:$src1))]>; 300 defm S_BFM_B64 : SOP2_64_32_32 <sop2<0x25, 0x23>, "s_bfm_b64", []>; 301 defm S_MUL_I32 : SOP2_32 <sop2<0x26, 0x24>, "s_mul_i32", 302 [(set i32:$sdst, (mul i32:$src0, i32:$src1))] 303 >; 304 305 } // End AddedComplexity = 1 306 307 let Defs = [SCC] in { 308 defm S_BFE_U32 : SOP2_32 <sop2<0x27, 0x25>, "s_bfe_u32", []>; 309 defm S_BFE_I32 : SOP2_32 <sop2<0x28, 0x26>, "s_bfe_i32", []>; 310 defm S_BFE_U64 : SOP2_64_32 <sop2<0x29, 0x27>, "s_bfe_u64", []>; 311 defm S_BFE_I64 : SOP2_64_32 <sop2<0x2a, 0x28>, "s_bfe_i64", []>; 312 } // End Defs = [SCC] 313 314 let sdst = 0 in { 315 defm S_CBRANCH_G_FORK : SOP2_m < 316 sop2<0x2b, 0x29>, "s_cbranch_g_fork", (outs), 317 (ins SReg_64:$src0, SReg_64:$src1), "s_cbranch_g_fork $src0, $src1", [] 318 >; 319 } 320 321 let Defs = [SCC] in { 322 defm S_ABSDIFF_I32 : SOP2_32 <sop2<0x2c, 0x2a>, "s_absdiff_i32", []>; 323 } // End Defs = [SCC] 324 325 //===----------------------------------------------------------------------===// 326 // SOPC Instructions 327 //===----------------------------------------------------------------------===// 328 329 def S_CMP_EQ_I32 : SOPC_CMP_32 <0x00000000, "s_cmp_eq_i32", COND_EQ>; 330 def S_CMP_LG_I32 : SOPC_CMP_32 <0x00000001, "s_cmp_lg_i32", COND_NE>; 331 def S_CMP_GT_I32 : SOPC_CMP_32 <0x00000002, "s_cmp_gt_i32", COND_SGT>; 332 def S_CMP_GE_I32 : SOPC_CMP_32 <0x00000003, "s_cmp_ge_i32", COND_SGE>; 333 def S_CMP_LT_I32 : SOPC_CMP_32 <0x00000004, "s_cmp_lt_i32", COND_SLT>; 334 def S_CMP_LE_I32 : SOPC_CMP_32 <0x00000005, "s_cmp_le_i32", COND_SLE>; 335 def S_CMP_EQ_U32 : SOPC_CMP_32 <0x00000006, "s_cmp_eq_u32", COND_EQ>; 336 def S_CMP_LG_U32 : SOPC_CMP_32 <0x00000007, "s_cmp_lg_u32", COND_NE >; 337 def S_CMP_GT_U32 : SOPC_CMP_32 <0x00000008, "s_cmp_gt_u32", COND_UGT>; 338 def S_CMP_GE_U32 : SOPC_CMP_32 <0x00000009, "s_cmp_ge_u32", COND_UGE>; 339 def S_CMP_LT_U32 : SOPC_CMP_32 <0x0000000a, "s_cmp_lt_u32", COND_ULT>; 340 def S_CMP_LE_U32 : SOPC_CMP_32 <0x0000000b, "s_cmp_le_u32", COND_ULE>; 341 def S_BITCMP0_B32 : SOPC_32 <0x0000000c, "s_bitcmp0_b32">; 342 def S_BITCMP1_B32 : SOPC_32 <0x0000000d, "s_bitcmp1_b32">; 343 def S_BITCMP0_B64 : SOPC_64_32 <0x0000000e, "s_bitcmp0_b64">; 344 def S_BITCMP1_B64 : SOPC_64_32 <0x0000000f, "s_bitcmp1_b64">; 345 def S_SETVSKIP : SOPC_32 <0x00000010, "s_setvskip">; 346 347 //===----------------------------------------------------------------------===// 348 // SOPK Instructions 349 //===----------------------------------------------------------------------===// 350 351 let isReMaterializable = 1, isMoveImm = 1 in { 352 defm S_MOVK_I32 : SOPK_32 <sopk<0x00>, "s_movk_i32", []>; 353 } // End isReMaterializable = 1 354 let Uses = [SCC] in { 355 defm S_CMOVK_I32 : SOPK_32 <sopk<0x02, 0x01>, "s_cmovk_i32", []>; 356 } 357 358 let isCompare = 1 in { 359 360 /* 361 This instruction is disabled for now until we can figure out how to teach 362 the instruction selector to correctly use the S_CMP* vs V_CMP* 363 instructions. 364 365 When this instruction is enabled the code generator sometimes produces this 366 invalid sequence: 367 368 SCC = S_CMPK_EQ_I32 SGPR0, imm 369 VCC = COPY SCC 370 VGPR0 = V_CNDMASK VCC, VGPR0, VGPR1 371 372 defm S_CMPK_EQ_I32 : SOPK_SCC <sopk<0x03, 0x02>, "s_cmpk_eq_i32", 373 [(set i1:$dst, (setcc i32:$src0, imm:$src1, SETEQ))] 374 >; 375 */ 376 377 defm S_CMPK_EQ_I32 : SOPK_SCC <sopk<0x03, 0x02>, "s_cmpk_eq_i32", []>; 378 defm S_CMPK_LG_I32 : SOPK_SCC <sopk<0x04, 0x03>, "s_cmpk_lg_i32", []>; 379 defm S_CMPK_GT_I32 : SOPK_SCC <sopk<0x05, 0x04>, "s_cmpk_gt_i32", []>; 380 defm S_CMPK_GE_I32 : SOPK_SCC <sopk<0x06, 0x05>, "s_cmpk_ge_i32", []>; 381 defm S_CMPK_LT_I32 : SOPK_SCC <sopk<0x07, 0x06>, "s_cmpk_lt_i32", []>; 382 defm S_CMPK_LE_I32 : SOPK_SCC <sopk<0x08, 0x07>, "s_cmpk_le_i32", []>; 383 defm S_CMPK_EQ_U32 : SOPK_SCC <sopk<0x09, 0x08>, "s_cmpk_eq_u32", []>; 384 defm S_CMPK_LG_U32 : SOPK_SCC <sopk<0x0a, 0x09>, "s_cmpk_lg_u32", []>; 385 defm S_CMPK_GT_U32 : SOPK_SCC <sopk<0x0b, 0x0a>, "s_cmpk_gt_u32", []>; 386 defm S_CMPK_GE_U32 : SOPK_SCC <sopk<0x0c, 0x0b>, "s_cmpk_ge_u32", []>; 387 defm S_CMPK_LT_U32 : SOPK_SCC <sopk<0x0d, 0x0c>, "s_cmpk_lt_u32", []>; 388 defm S_CMPK_LE_U32 : SOPK_SCC <sopk<0x0e, 0x0d>, "s_cmpk_le_u32", []>; 389 } // End isCompare = 1 390 391 let Defs = [SCC], isCommutable = 1, DisableEncoding = "$src0", 392 Constraints = "$sdst = $src0" in { 393 defm S_ADDK_I32 : SOPK_32TIE <sopk<0x0f, 0x0e>, "s_addk_i32", []>; 394 defm S_MULK_I32 : SOPK_32TIE <sopk<0x10, 0x0f>, "s_mulk_i32", []>; 395 } 396 397 defm S_CBRANCH_I_FORK : SOPK_m < 398 sopk<0x11, 0x10>, "s_cbranch_i_fork", (outs), 399 (ins SReg_64:$sdst, u16imm:$simm16), " $sdst, $simm16" 400 >; 401 402 let mayLoad = 1 in { 403 defm S_GETREG_B32 : SOPK_m < 404 sopk<0x12, 0x11>, "s_getreg_b32", (outs SReg_32:$sdst), 405 (ins hwreg:$simm16), " $sdst, $simm16" 406 >; 407 } 408 409 defm S_SETREG_B32 : SOPK_m < 410 sopk<0x13, 0x12>, "s_setreg_b32", (outs), 411 (ins SReg_32:$sdst, hwreg:$simm16), " $simm16, $sdst" 412 >; 413 // FIXME: Not on SI? 414 //defm S_GETREG_REGRD_B32 : SOPK_32 <sopk<0x14, 0x13>, "s_getreg_regrd_b32", []>; 415 defm S_SETREG_IMM32_B32 : SOPK_IMM32 < 416 sopk<0x15, 0x14>, "s_setreg_imm32_b32", (outs), 417 (ins i32imm:$imm, hwreg:$simm16), " $simm16, $imm" 418 >; 419 420 //===----------------------------------------------------------------------===// 421 // SOPP Instructions 422 //===----------------------------------------------------------------------===// 423 424 def S_NOP : SOPP <0x00000000, (ins i16imm:$simm16), "s_nop $simm16">; 425 426 let isTerminator = 1 in { 427 428 def S_ENDPGM : SOPP <0x00000001, (ins), "s_endpgm", 429 [(AMDGPUendpgm)]> { 430 let simm16 = 0; 431 let isBarrier = 1; 432 let hasCtrlDep = 1; 433 let hasSideEffects = 1; 434 } 435 436 let isBranch = 1 in { 437 def S_BRANCH : SOPP < 438 0x00000002, (ins sopp_brtarget:$simm16), "s_branch $simm16", 439 [(br bb:$simm16)]> { 440 let isBarrier = 1; 441 } 442 443 let Uses = [SCC] in { 444 def S_CBRANCH_SCC0 : SOPP < 445 0x00000004, (ins sopp_brtarget:$simm16), 446 "s_cbranch_scc0 $simm16" 447 >; 448 def S_CBRANCH_SCC1 : SOPP < 449 0x00000005, (ins sopp_brtarget:$simm16), 450 "s_cbranch_scc1 $simm16", 451 [(si_uniform_br_scc SCC, bb:$simm16)] 452 >; 453 } // End Uses = [SCC] 454 455 let Uses = [VCC] in { 456 def S_CBRANCH_VCCZ : SOPP < 457 0x00000006, (ins sopp_brtarget:$simm16), 458 "s_cbranch_vccz $simm16" 459 >; 460 def S_CBRANCH_VCCNZ : SOPP < 461 0x00000007, (ins sopp_brtarget:$simm16), 462 "s_cbranch_vccnz $simm16" 463 >; 464 } // End Uses = [VCC] 465 466 let Uses = [EXEC] in { 467 def S_CBRANCH_EXECZ : SOPP < 468 0x00000008, (ins sopp_brtarget:$simm16), 469 "s_cbranch_execz $simm16" 470 >; 471 def S_CBRANCH_EXECNZ : SOPP < 472 0x00000009, (ins sopp_brtarget:$simm16), 473 "s_cbranch_execnz $simm16" 474 >; 475 } // End Uses = [EXEC] 476 477 478 } // End isBranch = 1 479 } // End isTerminator = 1 480 481 let hasSideEffects = 1 in { 482 def S_BARRIER : SOPP <0x0000000a, (ins), "s_barrier", 483 [(int_amdgcn_s_barrier)] 484 > { 485 let SchedRW = [WriteBarrier]; 486 let simm16 = 0; 487 let mayLoad = 1; 488 let mayStore = 1; 489 let isConvergent = 1; 490 } 491 492 let mayLoad = 1, mayStore = 1, hasSideEffects = 1 in 493 def S_WAITCNT : SOPP <0x0000000c, (ins WAIT_FLAG:$simm16), "s_waitcnt $simm16">; 494 def S_SETHALT : SOPP <0x0000000d, (ins i16imm:$simm16), "s_sethalt $simm16">; 495 496 // On SI the documentation says sleep for approximately 64 * low 2 497 // bits, consistent with the reported maximum of 448. On VI the 498 // maximum reported is 960 cycles, so 960 / 64 = 15 max, so is the 499 // maximum really 15 on VI? 500 def S_SLEEP : SOPP <0x0000000e, (ins i32imm:$simm16), 501 "s_sleep $simm16", [(int_amdgcn_s_sleep SIMM16bit:$simm16)]> { 502 let hasSideEffects = 1; 503 let mayLoad = 1; 504 let mayStore = 1; 505 } 506 507 def S_SETPRIO : SOPP <0x0000000f, (ins i16imm:$simm16), "s_setprio $simm16">; 508 509 let Uses = [EXEC, M0] in { 510 // FIXME: Should this be mayLoad+mayStore? 511 def S_SENDMSG : SOPP <0x00000010, (ins SendMsgImm:$simm16), "s_sendmsg $simm16", 512 [(AMDGPUsendmsg (i32 imm:$simm16))] 513 >; 514 } // End Uses = [EXEC, M0] 515 516 def S_SENDMSGHALT : SOPP <0x00000011, (ins SendMsgImm:$simm16), "s_sendmsghalt $simm16">; 517 def S_TRAP : SOPP <0x00000012, (ins i16imm:$simm16), "s_trap $simm16">; 518 def S_ICACHE_INV : SOPP <0x00000013, (ins), "s_icache_inv"> { 519 let simm16 = 0; 520 } 521 def S_INCPERFLEVEL : SOPP <0x00000014, (ins i16imm:$simm16), "s_incperflevel $simm16">; 522 def S_DECPERFLEVEL : SOPP <0x00000015, (ins i16imm:$simm16), "s_decperflevel $simm16">; 523 def S_TTRACEDATA : SOPP <0x00000016, (ins), "s_ttracedata"> { 524 let simm16 = 0; 525 } 526 } // End hasSideEffects 527 528 //===----------------------------------------------------------------------===// 529 // VOPC Instructions 530 //===----------------------------------------------------------------------===// 531 532 let isCompare = 1, isCommutable = 1 in { 533 534 defm V_CMP_F_F32 : VOPC_F32 <vopc<0x0, 0x40>, "v_cmp_f_f32">; 535 defm V_CMP_LT_F32 : VOPC_F32 <vopc<0x1, 0x41>, "v_cmp_lt_f32", COND_OLT, "v_cmp_gt_f32">; 536 defm V_CMP_EQ_F32 : VOPC_F32 <vopc<0x2, 0x42>, "v_cmp_eq_f32", COND_OEQ>; 537 defm V_CMP_LE_F32 : VOPC_F32 <vopc<0x3, 0x43>, "v_cmp_le_f32", COND_OLE, "v_cmp_ge_f32">; 538 defm V_CMP_GT_F32 : VOPC_F32 <vopc<0x4, 0x44>, "v_cmp_gt_f32", COND_OGT>; 539 defm V_CMP_LG_F32 : VOPC_F32 <vopc<0x5, 0x45>, "v_cmp_lg_f32", COND_ONE>; 540 defm V_CMP_GE_F32 : VOPC_F32 <vopc<0x6, 0x46>, "v_cmp_ge_f32", COND_OGE>; 541 defm V_CMP_O_F32 : VOPC_F32 <vopc<0x7, 0x47>, "v_cmp_o_f32", COND_O>; 542 defm V_CMP_U_F32 : VOPC_F32 <vopc<0x8, 0x48>, "v_cmp_u_f32", COND_UO>; 543 defm V_CMP_NGE_F32 : VOPC_F32 <vopc<0x9, 0x49>, "v_cmp_nge_f32", COND_ULT, "v_cmp_nle_f32">; 544 defm V_CMP_NLG_F32 : VOPC_F32 <vopc<0xa, 0x4a>, "v_cmp_nlg_f32", COND_UEQ>; 545 defm V_CMP_NGT_F32 : VOPC_F32 <vopc<0xb, 0x4b>, "v_cmp_ngt_f32", COND_ULE, "v_cmp_nlt_f32">; 546 defm V_CMP_NLE_F32 : VOPC_F32 <vopc<0xc, 0x4c>, "v_cmp_nle_f32", COND_UGT>; 547 defm V_CMP_NEQ_F32 : VOPC_F32 <vopc<0xd, 0x4d>, "v_cmp_neq_f32", COND_UNE>; 548 defm V_CMP_NLT_F32 : VOPC_F32 <vopc<0xe, 0x4e>, "v_cmp_nlt_f32", COND_UGE>; 549 defm V_CMP_TRU_F32 : VOPC_F32 <vopc<0xf, 0x4f>, "v_cmp_tru_f32">; 550 551 552 defm V_CMPX_F_F32 : VOPCX_F32 <vopc<0x10, 0x50>, "v_cmpx_f_f32">; 553 defm V_CMPX_LT_F32 : VOPCX_F32 <vopc<0x11, 0x51>, "v_cmpx_lt_f32", "v_cmpx_gt_f32">; 554 defm V_CMPX_EQ_F32 : VOPCX_F32 <vopc<0x12, 0x52>, "v_cmpx_eq_f32">; 555 defm V_CMPX_LE_F32 : VOPCX_F32 <vopc<0x13, 0x53>, "v_cmpx_le_f32", "v_cmpx_ge_f32">; 556 defm V_CMPX_GT_F32 : VOPCX_F32 <vopc<0x14, 0x54>, "v_cmpx_gt_f32">; 557 defm V_CMPX_LG_F32 : VOPCX_F32 <vopc<0x15, 0x55>, "v_cmpx_lg_f32">; 558 defm V_CMPX_GE_F32 : VOPCX_F32 <vopc<0x16, 0x56>, "v_cmpx_ge_f32">; 559 defm V_CMPX_O_F32 : VOPCX_F32 <vopc<0x17, 0x57>, "v_cmpx_o_f32">; 560 defm V_CMPX_U_F32 : VOPCX_F32 <vopc<0x18, 0x58>, "v_cmpx_u_f32">; 561 defm V_CMPX_NGE_F32 : VOPCX_F32 <vopc<0x19, 0x59>, "v_cmpx_nge_f32">; 562 defm V_CMPX_NLG_F32 : VOPCX_F32 <vopc<0x1a, 0x5a>, "v_cmpx_nlg_f32">; 563 defm V_CMPX_NGT_F32 : VOPCX_F32 <vopc<0x1b, 0x5b>, "v_cmpx_ngt_f32">; 564 defm V_CMPX_NLE_F32 : VOPCX_F32 <vopc<0x1c, 0x5c>, "v_cmpx_nle_f32">; 565 defm V_CMPX_NEQ_F32 : VOPCX_F32 <vopc<0x1d, 0x5d>, "v_cmpx_neq_f32">; 566 defm V_CMPX_NLT_F32 : VOPCX_F32 <vopc<0x1e, 0x5e>, "v_cmpx_nlt_f32">; 567 defm V_CMPX_TRU_F32 : VOPCX_F32 <vopc<0x1f, 0x5f>, "v_cmpx_tru_f32">; 568 569 570 defm V_CMP_F_F64 : VOPC_F64 <vopc<0x20, 0x60>, "v_cmp_f_f64">; 571 defm V_CMP_LT_F64 : VOPC_F64 <vopc<0x21, 0x61>, "v_cmp_lt_f64", COND_OLT, "v_cmp_gt_f64">; 572 defm V_CMP_EQ_F64 : VOPC_F64 <vopc<0x22, 0x62>, "v_cmp_eq_f64", COND_OEQ>; 573 defm V_CMP_LE_F64 : VOPC_F64 <vopc<0x23, 0x63>, "v_cmp_le_f64", COND_OLE, "v_cmp_ge_f64">; 574 defm V_CMP_GT_F64 : VOPC_F64 <vopc<0x24, 0x64>, "v_cmp_gt_f64", COND_OGT>; 575 defm V_CMP_LG_F64 : VOPC_F64 <vopc<0x25, 0x65>, "v_cmp_lg_f64", COND_ONE>; 576 defm V_CMP_GE_F64 : VOPC_F64 <vopc<0x26, 0x66>, "v_cmp_ge_f64", COND_OGE>; 577 defm V_CMP_O_F64 : VOPC_F64 <vopc<0x27, 0x67>, "v_cmp_o_f64", COND_O>; 578 defm V_CMP_U_F64 : VOPC_F64 <vopc<0x28, 0x68>, "v_cmp_u_f64", COND_UO>; 579 defm V_CMP_NGE_F64 : VOPC_F64 <vopc<0x29, 0x69>, "v_cmp_nge_f64", COND_ULT, "v_cmp_nle_f64">; 580 defm V_CMP_NLG_F64 : VOPC_F64 <vopc<0x2a, 0x6a>, "v_cmp_nlg_f64", COND_UEQ>; 581 defm V_CMP_NGT_F64 : VOPC_F64 <vopc<0x2b, 0x6b>, "v_cmp_ngt_f64", COND_ULE, "v_cmp_nlt_f64">; 582 defm V_CMP_NLE_F64 : VOPC_F64 <vopc<0x2c, 0x6c>, "v_cmp_nle_f64", COND_UGT>; 583 defm V_CMP_NEQ_F64 : VOPC_F64 <vopc<0x2d, 0x6d>, "v_cmp_neq_f64", COND_UNE>; 584 defm V_CMP_NLT_F64 : VOPC_F64 <vopc<0x2e, 0x6e>, "v_cmp_nlt_f64", COND_UGE>; 585 defm V_CMP_TRU_F64 : VOPC_F64 <vopc<0x2f, 0x6f>, "v_cmp_tru_f64">; 586 587 588 defm V_CMPX_F_F64 : VOPCX_F64 <vopc<0x30, 0x70>, "v_cmpx_f_f64">; 589 defm V_CMPX_LT_F64 : VOPCX_F64 <vopc<0x31, 0x71>, "v_cmpx_lt_f64", "v_cmpx_gt_f64">; 590 defm V_CMPX_EQ_F64 : VOPCX_F64 <vopc<0x32, 0x72>, "v_cmpx_eq_f64">; 591 defm V_CMPX_LE_F64 : VOPCX_F64 <vopc<0x33, 0x73>, "v_cmpx_le_f64", "v_cmpx_ge_f64">; 592 defm V_CMPX_GT_F64 : VOPCX_F64 <vopc<0x34, 0x74>, "v_cmpx_gt_f64">; 593 defm V_CMPX_LG_F64 : VOPCX_F64 <vopc<0x35, 0x75>, "v_cmpx_lg_f64">; 594 defm V_CMPX_GE_F64 : VOPCX_F64 <vopc<0x36, 0x76>, "v_cmpx_ge_f64">; 595 defm V_CMPX_O_F64 : VOPCX_F64 <vopc<0x37, 0x77>, "v_cmpx_o_f64">; 596 defm V_CMPX_U_F64 : VOPCX_F64 <vopc<0x38, 0x78>, "v_cmpx_u_f64">; 597 defm V_CMPX_NGE_F64 : VOPCX_F64 <vopc<0x39, 0x79>, "v_cmpx_nge_f64", "v_cmpx_nle_f64">; 598 defm V_CMPX_NLG_F64 : VOPCX_F64 <vopc<0x3a, 0x7a>, "v_cmpx_nlg_f64">; 599 defm V_CMPX_NGT_F64 : VOPCX_F64 <vopc<0x3b, 0x7b>, "v_cmpx_ngt_f64", "v_cmpx_nlt_f64">; 600 defm V_CMPX_NLE_F64 : VOPCX_F64 <vopc<0x3c, 0x7c>, "v_cmpx_nle_f64">; 601 defm V_CMPX_NEQ_F64 : VOPCX_F64 <vopc<0x3d, 0x7d>, "v_cmpx_neq_f64">; 602 defm V_CMPX_NLT_F64 : VOPCX_F64 <vopc<0x3e, 0x7e>, "v_cmpx_nlt_f64">; 603 defm V_CMPX_TRU_F64 : VOPCX_F64 <vopc<0x3f, 0x7f>, "v_cmpx_tru_f64">; 604 605 606 let SubtargetPredicate = isSICI in { 607 608 defm V_CMPS_F_F32 : VOPC_F32 <vopc<0x40>, "v_cmps_f_f32">; 609 defm V_CMPS_LT_F32 : VOPC_F32 <vopc<0x41>, "v_cmps_lt_f32", COND_NULL, "v_cmps_gt_f32">; 610 defm V_CMPS_EQ_F32 : VOPC_F32 <vopc<0x42>, "v_cmps_eq_f32">; 611 defm V_CMPS_LE_F32 : VOPC_F32 <vopc<0x43>, "v_cmps_le_f32", COND_NULL, "v_cmps_ge_f32">; 612 defm V_CMPS_GT_F32 : VOPC_F32 <vopc<0x44>, "v_cmps_gt_f32">; 613 defm V_CMPS_LG_F32 : VOPC_F32 <vopc<0x45>, "v_cmps_lg_f32">; 614 defm V_CMPS_GE_F32 : VOPC_F32 <vopc<0x46>, "v_cmps_ge_f32">; 615 defm V_CMPS_O_F32 : VOPC_F32 <vopc<0x47>, "v_cmps_o_f32">; 616 defm V_CMPS_U_F32 : VOPC_F32 <vopc<0x48>, "v_cmps_u_f32">; 617 defm V_CMPS_NGE_F32 : VOPC_F32 <vopc<0x49>, "v_cmps_nge_f32", COND_NULL, "v_cmps_nle_f32">; 618 defm V_CMPS_NLG_F32 : VOPC_F32 <vopc<0x4a>, "v_cmps_nlg_f32">; 619 defm V_CMPS_NGT_F32 : VOPC_F32 <vopc<0x4b>, "v_cmps_ngt_f32", COND_NULL, "v_cmps_nlt_f32">; 620 defm V_CMPS_NLE_F32 : VOPC_F32 <vopc<0x4c>, "v_cmps_nle_f32">; 621 defm V_CMPS_NEQ_F32 : VOPC_F32 <vopc<0x4d>, "v_cmps_neq_f32">; 622 defm V_CMPS_NLT_F32 : VOPC_F32 <vopc<0x4e>, "v_cmps_nlt_f32">; 623 defm V_CMPS_TRU_F32 : VOPC_F32 <vopc<0x4f>, "v_cmps_tru_f32">; 624 625 626 defm V_CMPSX_F_F32 : VOPCX_F32 <vopc<0x50>, "v_cmpsx_f_f32">; 627 defm V_CMPSX_LT_F32 : VOPCX_F32 <vopc<0x51>, "v_cmpsx_lt_f32", "v_cmpsx_gt_f32">; 628 defm V_CMPSX_EQ_F32 : VOPCX_F32 <vopc<0x52>, "v_cmpsx_eq_f32">; 629 defm V_CMPSX_LE_F32 : VOPCX_F32 <vopc<0x53>, "v_cmpsx_le_f32", "v_cmpsx_ge_f32">; 630 defm V_CMPSX_GT_F32 : VOPCX_F32 <vopc<0x54>, "v_cmpsx_gt_f32">; 631 defm V_CMPSX_LG_F32 : VOPCX_F32 <vopc<0x55>, "v_cmpsx_lg_f32">; 632 defm V_CMPSX_GE_F32 : VOPCX_F32 <vopc<0x56>, "v_cmpsx_ge_f32">; 633 defm V_CMPSX_O_F32 : VOPCX_F32 <vopc<0x57>, "v_cmpsx_o_f32">; 634 defm V_CMPSX_U_F32 : VOPCX_F32 <vopc<0x58>, "v_cmpsx_u_f32">; 635 defm V_CMPSX_NGE_F32 : VOPCX_F32 <vopc<0x59>, "v_cmpsx_nge_f32", "v_cmpsx_nle_f32">; 636 defm V_CMPSX_NLG_F32 : VOPCX_F32 <vopc<0x5a>, "v_cmpsx_nlg_f32">; 637 defm V_CMPSX_NGT_F32 : VOPCX_F32 <vopc<0x5b>, "v_cmpsx_ngt_f32", "v_cmpsx_nlt_f32">; 638 defm V_CMPSX_NLE_F32 : VOPCX_F32 <vopc<0x5c>, "v_cmpsx_nle_f32">; 639 defm V_CMPSX_NEQ_F32 : VOPCX_F32 <vopc<0x5d>, "v_cmpsx_neq_f32">; 640 defm V_CMPSX_NLT_F32 : VOPCX_F32 <vopc<0x5e>, "v_cmpsx_nlt_f32">; 641 defm V_CMPSX_TRU_F32 : VOPCX_F32 <vopc<0x5f>, "v_cmpsx_tru_f32">; 642 643 644 defm V_CMPS_F_F64 : VOPC_F64 <vopc<0x60>, "v_cmps_f_f64">; 645 defm V_CMPS_LT_F64 : VOPC_F64 <vopc<0x61>, "v_cmps_lt_f64", COND_NULL, "v_cmps_gt_f64">; 646 defm V_CMPS_EQ_F64 : VOPC_F64 <vopc<0x62>, "v_cmps_eq_f64">; 647 defm V_CMPS_LE_F64 : VOPC_F64 <vopc<0x63>, "v_cmps_le_f64", COND_NULL, "v_cmps_ge_f64">; 648 defm V_CMPS_GT_F64 : VOPC_F64 <vopc<0x64>, "v_cmps_gt_f64">; 649 defm V_CMPS_LG_F64 : VOPC_F64 <vopc<0x65>, "v_cmps_lg_f64">; 650 defm V_CMPS_GE_F64 : VOPC_F64 <vopc<0x66>, "v_cmps_ge_f64">; 651 defm V_CMPS_O_F64 : VOPC_F64 <vopc<0x67>, "v_cmps_o_f64">; 652 defm V_CMPS_U_F64 : VOPC_F64 <vopc<0x68>, "v_cmps_u_f64">; 653 defm V_CMPS_NGE_F64 : VOPC_F64 <vopc<0x69>, "v_cmps_nge_f64", COND_NULL, "v_cmps_nle_f64">; 654 defm V_CMPS_NLG_F64 : VOPC_F64 <vopc<0x6a>, "v_cmps_nlg_f64">; 655 defm V_CMPS_NGT_F64 : VOPC_F64 <vopc<0x6b>, "v_cmps_ngt_f64", COND_NULL, "v_cmps_nlt_f64">; 656 defm V_CMPS_NLE_F64 : VOPC_F64 <vopc<0x6c>, "v_cmps_nle_f64">; 657 defm V_CMPS_NEQ_F64 : VOPC_F64 <vopc<0x6d>, "v_cmps_neq_f64">; 658 defm V_CMPS_NLT_F64 : VOPC_F64 <vopc<0x6e>, "v_cmps_nlt_f64">; 659 defm V_CMPS_TRU_F64 : VOPC_F64 <vopc<0x6f>, "v_cmps_tru_f64">; 660 661 662 defm V_CMPSX_F_F64 : VOPCX_F64 <vopc<0x70>, "v_cmpsx_f_f64">; 663 defm V_CMPSX_LT_F64 : VOPCX_F64 <vopc<0x71>, "v_cmpsx_lt_f64", "v_cmpsx_gt_f64">; 664 defm V_CMPSX_EQ_F64 : VOPCX_F64 <vopc<0x72>, "v_cmpsx_eq_f64">; 665 defm V_CMPSX_LE_F64 : VOPCX_F64 <vopc<0x73>, "v_cmpsx_le_f64", "v_cmpsx_ge_f64">; 666 defm V_CMPSX_GT_F64 : VOPCX_F64 <vopc<0x74>, "v_cmpsx_gt_f64">; 667 defm V_CMPSX_LG_F64 : VOPCX_F64 <vopc<0x75>, "v_cmpsx_lg_f64">; 668 defm V_CMPSX_GE_F64 : VOPCX_F64 <vopc<0x76>, "v_cmpsx_ge_f64">; 669 defm V_CMPSX_O_F64 : VOPCX_F64 <vopc<0x77>, "v_cmpsx_o_f64">; 670 defm V_CMPSX_U_F64 : VOPCX_F64 <vopc<0x78>, "v_cmpsx_u_f64">; 671 defm V_CMPSX_NGE_F64 : VOPCX_F64 <vopc<0x79>, "v_cmpsx_nge_f64", "v_cmpsx_nle_f64">; 672 defm V_CMPSX_NLG_F64 : VOPCX_F64 <vopc<0x7a>, "v_cmpsx_nlg_f64">; 673 defm V_CMPSX_NGT_F64 : VOPCX_F64 <vopc<0x7b>, "v_cmpsx_ngt_f64", "v_cmpsx_nlt_f64">; 674 defm V_CMPSX_NLE_F64 : VOPCX_F64 <vopc<0x7c>, "v_cmpsx_nle_f64">; 675 defm V_CMPSX_NEQ_F64 : VOPCX_F64 <vopc<0x7d>, "v_cmpsx_neq_f64">; 676 defm V_CMPSX_NLT_F64 : VOPCX_F64 <vopc<0x7e>, "v_cmpsx_nlt_f64">; 677 defm V_CMPSX_TRU_F64 : VOPCX_F64 <vopc<0x7f>, "v_cmpsx_tru_f64">; 678 679 } // End SubtargetPredicate = isSICI 680 681 defm V_CMP_F_I32 : VOPC_I32 <vopc<0x80, 0xc0>, "v_cmp_f_i32">; 682 defm V_CMP_LT_I32 : VOPC_I32 <vopc<0x81, 0xc1>, "v_cmp_lt_i32", COND_SLT, "v_cmp_gt_i32">; 683 defm V_CMP_EQ_I32 : VOPC_I32 <vopc<0x82, 0xc2>, "v_cmp_eq_i32", COND_EQ>; 684 defm V_CMP_LE_I32 : VOPC_I32 <vopc<0x83, 0xc3>, "v_cmp_le_i32", COND_SLE, "v_cmp_ge_i32">; 685 defm V_CMP_GT_I32 : VOPC_I32 <vopc<0x84, 0xc4>, "v_cmp_gt_i32", COND_SGT>; 686 defm V_CMP_NE_I32 : VOPC_I32 <vopc<0x85, 0xc5>, "v_cmp_ne_i32", COND_NE>; 687 defm V_CMP_GE_I32 : VOPC_I32 <vopc<0x86, 0xc6>, "v_cmp_ge_i32", COND_SGE>; 688 defm V_CMP_T_I32 : VOPC_I32 <vopc<0x87, 0xc7>, "v_cmp_t_i32">; 689 690 691 defm V_CMPX_F_I32 : VOPCX_I32 <vopc<0x90, 0xd0>, "v_cmpx_f_i32">; 692 defm V_CMPX_LT_I32 : VOPCX_I32 <vopc<0x91, 0xd1>, "v_cmpx_lt_i32", "v_cmpx_gt_i32">; 693 defm V_CMPX_EQ_I32 : VOPCX_I32 <vopc<0x92, 0xd2>, "v_cmpx_eq_i32">; 694 defm V_CMPX_LE_I32 : VOPCX_I32 <vopc<0x93, 0xd3>, "v_cmpx_le_i32", "v_cmpx_ge_i32">; 695 defm V_CMPX_GT_I32 : VOPCX_I32 <vopc<0x94, 0xd4>, "v_cmpx_gt_i32">; 696 defm V_CMPX_NE_I32 : VOPCX_I32 <vopc<0x95, 0xd5>, "v_cmpx_ne_i32">; 697 defm V_CMPX_GE_I32 : VOPCX_I32 <vopc<0x96, 0xd6>, "v_cmpx_ge_i32">; 698 defm V_CMPX_T_I32 : VOPCX_I32 <vopc<0x97, 0xd7>, "v_cmpx_t_i32">; 699 700 701 defm V_CMP_F_I64 : VOPC_I64 <vopc<0xa0, 0xe0>, "v_cmp_f_i64">; 702 defm V_CMP_LT_I64 : VOPC_I64 <vopc<0xa1, 0xe1>, "v_cmp_lt_i64", COND_SLT, "v_cmp_gt_i64">; 703 defm V_CMP_EQ_I64 : VOPC_I64 <vopc<0xa2, 0xe2>, "v_cmp_eq_i64", COND_EQ>; 704 defm V_CMP_LE_I64 : VOPC_I64 <vopc<0xa3, 0xe3>, "v_cmp_le_i64", COND_SLE, "v_cmp_ge_i64">; 705 defm V_CMP_GT_I64 : VOPC_I64 <vopc<0xa4, 0xe4>, "v_cmp_gt_i64", COND_SGT>; 706 defm V_CMP_NE_I64 : VOPC_I64 <vopc<0xa5, 0xe5>, "v_cmp_ne_i64", COND_NE>; 707 defm V_CMP_GE_I64 : VOPC_I64 <vopc<0xa6, 0xe6>, "v_cmp_ge_i64", COND_SGE>; 708 defm V_CMP_T_I64 : VOPC_I64 <vopc<0xa7, 0xe7>, "v_cmp_t_i64">; 709 710 711 defm V_CMPX_F_I64 : VOPCX_I64 <vopc<0xb0, 0xf0>, "v_cmpx_f_i64">; 712 defm V_CMPX_LT_I64 : VOPCX_I64 <vopc<0xb1, 0xf1>, "v_cmpx_lt_i64", "v_cmpx_gt_i64">; 713 defm V_CMPX_EQ_I64 : VOPCX_I64 <vopc<0xb2, 0xf2>, "v_cmpx_eq_i64">; 714 defm V_CMPX_LE_I64 : VOPCX_I64 <vopc<0xb3, 0xf3>, "v_cmpx_le_i64", "v_cmpx_ge_i64">; 715 defm V_CMPX_GT_I64 : VOPCX_I64 <vopc<0xb4, 0xf4>, "v_cmpx_gt_i64">; 716 defm V_CMPX_NE_I64 : VOPCX_I64 <vopc<0xb5, 0xf5>, "v_cmpx_ne_i64">; 717 defm V_CMPX_GE_I64 : VOPCX_I64 <vopc<0xb6, 0xf6>, "v_cmpx_ge_i64">; 718 defm V_CMPX_T_I64 : VOPCX_I64 <vopc<0xb7, 0xf7>, "v_cmpx_t_i64">; 719 720 721 defm V_CMP_F_U32 : VOPC_I32 <vopc<0xc0, 0xc8>, "v_cmp_f_u32">; 722 defm V_CMP_LT_U32 : VOPC_I32 <vopc<0xc1, 0xc9>, "v_cmp_lt_u32", COND_ULT, "v_cmp_gt_u32">; 723 defm V_CMP_EQ_U32 : VOPC_I32 <vopc<0xc2, 0xca>, "v_cmp_eq_u32", COND_EQ>; 724 defm V_CMP_LE_U32 : VOPC_I32 <vopc<0xc3, 0xcb>, "v_cmp_le_u32", COND_ULE, "v_cmp_ge_u32">; 725 defm V_CMP_GT_U32 : VOPC_I32 <vopc<0xc4, 0xcc>, "v_cmp_gt_u32", COND_UGT>; 726 defm V_CMP_NE_U32 : VOPC_I32 <vopc<0xc5, 0xcd>, "v_cmp_ne_u32", COND_NE>; 727 defm V_CMP_GE_U32 : VOPC_I32 <vopc<0xc6, 0xce>, "v_cmp_ge_u32", COND_UGE>; 728 defm V_CMP_T_U32 : VOPC_I32 <vopc<0xc7, 0xcf>, "v_cmp_t_u32">; 729 730 731 defm V_CMPX_F_U32 : VOPCX_I32 <vopc<0xd0, 0xd8>, "v_cmpx_f_u32">; 732 defm V_CMPX_LT_U32 : VOPCX_I32 <vopc<0xd1, 0xd9>, "v_cmpx_lt_u32", "v_cmpx_gt_u32">; 733 defm V_CMPX_EQ_U32 : VOPCX_I32 <vopc<0xd2, 0xda>, "v_cmpx_eq_u32">; 734 defm V_CMPX_LE_U32 : VOPCX_I32 <vopc<0xd3, 0xdb>, "v_cmpx_le_u32", "v_cmpx_le_u32">; 735 defm V_CMPX_GT_U32 : VOPCX_I32 <vopc<0xd4, 0xdc>, "v_cmpx_gt_u32">; 736 defm V_CMPX_NE_U32 : VOPCX_I32 <vopc<0xd5, 0xdd>, "v_cmpx_ne_u32">; 737 defm V_CMPX_GE_U32 : VOPCX_I32 <vopc<0xd6, 0xde>, "v_cmpx_ge_u32">; 738 defm V_CMPX_T_U32 : VOPCX_I32 <vopc<0xd7, 0xdf>, "v_cmpx_t_u32">; 739 740 741 defm V_CMP_F_U64 : VOPC_I64 <vopc<0xe0, 0xe8>, "v_cmp_f_u64">; 742 defm V_CMP_LT_U64 : VOPC_I64 <vopc<0xe1, 0xe9>, "v_cmp_lt_u64", COND_ULT, "v_cmp_gt_u64">; 743 defm V_CMP_EQ_U64 : VOPC_I64 <vopc<0xe2, 0xea>, "v_cmp_eq_u64", COND_EQ>; 744 defm V_CMP_LE_U64 : VOPC_I64 <vopc<0xe3, 0xeb>, "v_cmp_le_u64", COND_ULE, "v_cmp_ge_u64">; 745 defm V_CMP_GT_U64 : VOPC_I64 <vopc<0xe4, 0xec>, "v_cmp_gt_u64", COND_UGT>; 746 defm V_CMP_NE_U64 : VOPC_I64 <vopc<0xe5, 0xed>, "v_cmp_ne_u64", COND_NE>; 747 defm V_CMP_GE_U64 : VOPC_I64 <vopc<0xe6, 0xee>, "v_cmp_ge_u64", COND_UGE>; 748 defm V_CMP_T_U64 : VOPC_I64 <vopc<0xe7, 0xef>, "v_cmp_t_u64">; 749 750 defm V_CMPX_F_U64 : VOPCX_I64 <vopc<0xf0, 0xf8>, "v_cmpx_f_u64">; 751 defm V_CMPX_LT_U64 : VOPCX_I64 <vopc<0xf1, 0xf9>, "v_cmpx_lt_u64", "v_cmpx_gt_u64">; 752 defm V_CMPX_EQ_U64 : VOPCX_I64 <vopc<0xf2, 0xfa>, "v_cmpx_eq_u64">; 753 defm V_CMPX_LE_U64 : VOPCX_I64 <vopc<0xf3, 0xfb>, "v_cmpx_le_u64", "v_cmpx_ge_u64">; 754 defm V_CMPX_GT_U64 : VOPCX_I64 <vopc<0xf4, 0xfc>, "v_cmpx_gt_u64">; 755 defm V_CMPX_NE_U64 : VOPCX_I64 <vopc<0xf5, 0xfd>, "v_cmpx_ne_u64">; 756 defm V_CMPX_GE_U64 : VOPCX_I64 <vopc<0xf6, 0xfe>, "v_cmpx_ge_u64">; 757 defm V_CMPX_T_U64 : VOPCX_I64 <vopc<0xf7, 0xff>, "v_cmpx_t_u64">; 758 759 } // End isCompare = 1, isCommutable = 1 760 761 defm V_CMP_CLASS_F32 : VOPC_CLASS_F32 <vopc<0x88, 0x10>, "v_cmp_class_f32">; 762 defm V_CMPX_CLASS_F32 : VOPCX_CLASS_F32 <vopc<0x98, 0x11>, "v_cmpx_class_f32">; 763 defm V_CMP_CLASS_F64 : VOPC_CLASS_F64 <vopc<0xa8, 0x12>, "v_cmp_class_f64">; 764 defm V_CMPX_CLASS_F64 : VOPCX_CLASS_F64 <vopc<0xb8, 0x13>, "v_cmpx_class_f64">; 765 766 //===----------------------------------------------------------------------===// 767 // DS Instructions 768 //===----------------------------------------------------------------------===// 769 770 defm DS_ADD_U32 : DS_1A1D_NORET <0x0, "ds_add_u32", VGPR_32>; 771 defm DS_SUB_U32 : DS_1A1D_NORET <0x1, "ds_sub_u32", VGPR_32>; 772 defm DS_RSUB_U32 : DS_1A1D_NORET <0x2, "ds_rsub_u32", VGPR_32>; 773 defm DS_INC_U32 : DS_1A1D_NORET <0x3, "ds_inc_u32", VGPR_32>; 774 defm DS_DEC_U32 : DS_1A1D_NORET <0x4, "ds_dec_u32", VGPR_32>; 775 defm DS_MIN_I32 : DS_1A1D_NORET <0x5, "ds_min_i32", VGPR_32>; 776 defm DS_MAX_I32 : DS_1A1D_NORET <0x6, "ds_max_i32", VGPR_32>; 777 defm DS_MIN_U32 : DS_1A1D_NORET <0x7, "ds_min_u32", VGPR_32>; 778 defm DS_MAX_U32 : DS_1A1D_NORET <0x8, "ds_max_u32", VGPR_32>; 779 defm DS_AND_B32 : DS_1A1D_NORET <0x9, "ds_and_b32", VGPR_32>; 780 defm DS_OR_B32 : DS_1A1D_NORET <0xa, "ds_or_b32", VGPR_32>; 781 defm DS_XOR_B32 : DS_1A1D_NORET <0xb, "ds_xor_b32", VGPR_32>; 782 defm DS_MSKOR_B32 : DS_1A2D_NORET <0xc, "ds_mskor_b32", VGPR_32>; 783 let mayLoad = 0 in { 784 defm DS_WRITE_B32 : DS_1A1D_NORET <0xd, "ds_write_b32", VGPR_32>; 785 defm DS_WRITE2_B32 : DS_1A2D_Off8_NORET <0xe, "ds_write2_b32", VGPR_32>; 786 defm DS_WRITE2ST64_B32 : DS_1A2D_Off8_NORET <0xf, "ds_write2st64_b32", VGPR_32>; 787 } 788 defm DS_CMPST_B32 : DS_1A2D_NORET <0x10, "ds_cmpst_b32", VGPR_32>; 789 defm DS_CMPST_F32 : DS_1A2D_NORET <0x11, "ds_cmpst_f32", VGPR_32>; 790 defm DS_MIN_F32 : DS_1A2D_NORET <0x12, "ds_min_f32", VGPR_32>; 791 defm DS_MAX_F32 : DS_1A2D_NORET <0x13, "ds_max_f32", VGPR_32>; 792 793 defm DS_GWS_INIT : DS_1A_GDS <0x19, "ds_gws_init">; 794 defm DS_GWS_SEMA_V : DS_1A_GDS <0x1a, "ds_gws_sema_v">; 795 defm DS_GWS_SEMA_BR : DS_1A_GDS <0x1b, "ds_gws_sema_br">; 796 defm DS_GWS_SEMA_P : DS_1A_GDS <0x1c, "ds_gws_sema_p">; 797 defm DS_GWS_BARRIER : DS_1A_GDS <0x1d, "ds_gws_barrier">; 798 let mayLoad = 0 in { 799 defm DS_WRITE_B8 : DS_1A1D_NORET <0x1e, "ds_write_b8", VGPR_32>; 800 defm DS_WRITE_B16 : DS_1A1D_NORET <0x1f, "ds_write_b16", VGPR_32>; 801 } 802 defm DS_ADD_RTN_U32 : DS_1A1D_RET <0x20, "ds_add_rtn_u32", VGPR_32, "ds_add_u32">; 803 defm DS_SUB_RTN_U32 : DS_1A1D_RET <0x21, "ds_sub_rtn_u32", VGPR_32, "ds_sub_u32">; 804 defm DS_RSUB_RTN_U32 : DS_1A1D_RET <0x22, "ds_rsub_rtn_u32", VGPR_32, "ds_rsub_u32">; 805 defm DS_INC_RTN_U32 : DS_1A1D_RET <0x23, "ds_inc_rtn_u32", VGPR_32, "ds_inc_u32">; 806 defm DS_DEC_RTN_U32 : DS_1A1D_RET <0x24, "ds_dec_rtn_u32", VGPR_32, "ds_dec_u32">; 807 defm DS_MIN_RTN_I32 : DS_1A1D_RET <0x25, "ds_min_rtn_i32", VGPR_32, "ds_min_i32">; 808 defm DS_MAX_RTN_I32 : DS_1A1D_RET <0x26, "ds_max_rtn_i32", VGPR_32, "ds_max_i32">; 809 defm DS_MIN_RTN_U32 : DS_1A1D_RET <0x27, "ds_min_rtn_u32", VGPR_32, "ds_min_u32">; 810 defm DS_MAX_RTN_U32 : DS_1A1D_RET <0x28, "ds_max_rtn_u32", VGPR_32, "ds_max_u32">; 811 defm DS_AND_RTN_B32 : DS_1A1D_RET <0x29, "ds_and_rtn_b32", VGPR_32, "ds_and_b32">; 812 defm DS_OR_RTN_B32 : DS_1A1D_RET <0x2a, "ds_or_rtn_b32", VGPR_32, "ds_or_b32">; 813 defm DS_XOR_RTN_B32 : DS_1A1D_RET <0x2b, "ds_xor_rtn_b32", VGPR_32, "ds_xor_b32">; 814 defm DS_MSKOR_RTN_B32 : DS_1A2D_RET <0x2c, "ds_mskor_rtn_b32", VGPR_32, "ds_mskor_b32">; 815 defm DS_WRXCHG_RTN_B32 : DS_1A1D_RET <0x2d, "ds_wrxchg_rtn_b32", VGPR_32>; 816 defm DS_WRXCHG2_RTN_B32 : DS_1A2D_RET < 817 0x2e, "ds_wrxchg2_rtn_b32", VReg_64, "", VGPR_32 818 >; 819 defm DS_WRXCHG2ST64_RTN_B32 : DS_1A2D_RET < 820 0x2f, "ds_wrxchg2st64_rtn_b32", VReg_64, "", VGPR_32 821 >; 822 defm DS_CMPST_RTN_B32 : DS_1A2D_RET <0x30, "ds_cmpst_rtn_b32", VGPR_32, "ds_cmpst_b32">; 823 defm DS_CMPST_RTN_F32 : DS_1A2D_RET <0x31, "ds_cmpst_rtn_f32", VGPR_32, "ds_cmpst_f32">; 824 defm DS_MIN_RTN_F32 : DS_1A2D_RET <0x32, "ds_min_rtn_f32", VGPR_32, "ds_min_f32">; 825 defm DS_MAX_RTN_F32 : DS_1A2D_RET <0x33, "ds_max_rtn_f32", VGPR_32, "ds_max_f32">; 826 827 let Uses = [EXEC], mayLoad =0, mayStore = 0, isConvergent = 1 in { 828 defm DS_SWIZZLE_B32 : DS_1A_RET_ <dsop<0x35, 0x3d>, "ds_swizzle_b32", VGPR_32>; 829 } 830 831 let mayStore = 0 in { 832 defm DS_READ_B32 : DS_1A_RET <0x36, "ds_read_b32", VGPR_32>; 833 defm DS_READ2_B32 : DS_1A_Off8_RET <0x37, "ds_read2_b32", VReg_64>; 834 defm DS_READ2ST64_B32 : DS_1A_Off8_RET <0x38, "ds_read2st64_b32", VReg_64>; 835 defm DS_READ_I8 : DS_1A_RET <0x39, "ds_read_i8", VGPR_32>; 836 defm DS_READ_U8 : DS_1A_RET <0x3a, "ds_read_u8", VGPR_32>; 837 defm DS_READ_I16 : DS_1A_RET <0x3b, "ds_read_i16", VGPR_32>; 838 defm DS_READ_U16 : DS_1A_RET <0x3c, "ds_read_u16", VGPR_32>; 839 } 840 defm DS_CONSUME : DS_0A_RET <0x3d, "ds_consume">; 841 defm DS_APPEND : DS_0A_RET <0x3e, "ds_append">; 842 defm DS_ORDERED_COUNT : DS_1A_RET_GDS <0x3f, "ds_ordered_count">; 843 defm DS_ADD_U64 : DS_1A1D_NORET <0x40, "ds_add_u64", VReg_64>; 844 defm DS_SUB_U64 : DS_1A1D_NORET <0x41, "ds_sub_u64", VReg_64>; 845 defm DS_RSUB_U64 : DS_1A1D_NORET <0x42, "ds_rsub_u64", VReg_64>; 846 defm DS_INC_U64 : DS_1A1D_NORET <0x43, "ds_inc_u64", VReg_64>; 847 defm DS_DEC_U64 : DS_1A1D_NORET <0x44, "ds_dec_u64", VReg_64>; 848 defm DS_MIN_I64 : DS_1A1D_NORET <0x45, "ds_min_i64", VReg_64>; 849 defm DS_MAX_I64 : DS_1A1D_NORET <0x46, "ds_max_i64", VReg_64>; 850 defm DS_MIN_U64 : DS_1A1D_NORET <0x47, "ds_min_u64", VReg_64>; 851 defm DS_MAX_U64 : DS_1A1D_NORET <0x48, "ds_max_u64", VReg_64>; 852 defm DS_AND_B64 : DS_1A1D_NORET <0x49, "ds_and_b64", VReg_64>; 853 defm DS_OR_B64 : DS_1A1D_NORET <0x4a, "ds_or_b64", VReg_64>; 854 defm DS_XOR_B64 : DS_1A1D_NORET <0x4b, "ds_xor_b64", VReg_64>; 855 defm DS_MSKOR_B64 : DS_1A2D_NORET <0x4c, "ds_mskor_b64", VReg_64>; 856 let mayLoad = 0 in { 857 defm DS_WRITE_B64 : DS_1A1D_NORET <0x4d, "ds_write_b64", VReg_64>; 858 defm DS_WRITE2_B64 : DS_1A2D_Off8_NORET <0x4E, "ds_write2_b64", VReg_64>; 859 defm DS_WRITE2ST64_B64 : DS_1A2D_Off8_NORET <0x4f, "ds_write2st64_b64", VReg_64>; 860 } 861 defm DS_CMPST_B64 : DS_1A2D_NORET <0x50, "ds_cmpst_b64", VReg_64>; 862 defm DS_CMPST_F64 : DS_1A2D_NORET <0x51, "ds_cmpst_f64", VReg_64>; 863 defm DS_MIN_F64 : DS_1A1D_NORET <0x52, "ds_min_f64", VReg_64>; 864 defm DS_MAX_F64 : DS_1A1D_NORET <0x53, "ds_max_f64", VReg_64>; 865 866 defm DS_ADD_RTN_U64 : DS_1A1D_RET <0x60, "ds_add_rtn_u64", VReg_64, "ds_add_u64">; 867 defm DS_SUB_RTN_U64 : DS_1A1D_RET <0x61, "ds_sub_rtn_u64", VReg_64, "ds_sub_u64">; 868 defm DS_RSUB_RTN_U64 : DS_1A1D_RET <0x62, "ds_rsub_rtn_u64", VReg_64, "ds_rsub_u64">; 869 defm DS_INC_RTN_U64 : DS_1A1D_RET <0x63, "ds_inc_rtn_u64", VReg_64, "ds_inc_u64">; 870 defm DS_DEC_RTN_U64 : DS_1A1D_RET <0x64, "ds_dec_rtn_u64", VReg_64, "ds_dec_u64">; 871 defm DS_MIN_RTN_I64 : DS_1A1D_RET <0x65, "ds_min_rtn_i64", VReg_64, "ds_min_i64">; 872 defm DS_MAX_RTN_I64 : DS_1A1D_RET <0x66, "ds_max_rtn_i64", VReg_64, "ds_max_i64">; 873 defm DS_MIN_RTN_U64 : DS_1A1D_RET <0x67, "ds_min_rtn_u64", VReg_64, "ds_min_u64">; 874 defm DS_MAX_RTN_U64 : DS_1A1D_RET <0x68, "ds_max_rtn_u64", VReg_64, "ds_max_u64">; 875 defm DS_AND_RTN_B64 : DS_1A1D_RET <0x69, "ds_and_rtn_b64", VReg_64, "ds_and_b64">; 876 defm DS_OR_RTN_B64 : DS_1A1D_RET <0x6a, "ds_or_rtn_b64", VReg_64, "ds_or_b64">; 877 defm DS_XOR_RTN_B64 : DS_1A1D_RET <0x6b, "ds_xor_rtn_b64", VReg_64, "ds_xor_b64">; 878 defm DS_MSKOR_RTN_B64 : DS_1A2D_RET <0x6c, "ds_mskor_rtn_b64", VReg_64, "ds_mskor_b64">; 879 defm DS_WRXCHG_RTN_B64 : DS_1A1D_RET <0x6d, "ds_wrxchg_rtn_b64", VReg_64, "ds_wrxchg_b64">; 880 defm DS_WRXCHG2_RTN_B64 : DS_1A2D_RET <0x6e, "ds_wrxchg2_rtn_b64", VReg_128, "ds_wrxchg2_b64", VReg_64>; 881 defm DS_WRXCHG2ST64_RTN_B64 : DS_1A2D_RET <0x6f, "ds_wrxchg2st64_rtn_b64", VReg_128, "ds_wrxchg2st64_b64", VReg_64>; 882 defm DS_CMPST_RTN_B64 : DS_1A2D_RET <0x70, "ds_cmpst_rtn_b64", VReg_64, "ds_cmpst_b64">; 883 defm DS_CMPST_RTN_F64 : DS_1A2D_RET <0x71, "ds_cmpst_rtn_f64", VReg_64, "ds_cmpst_f64">; 884 defm DS_MIN_RTN_F64 : DS_1A1D_RET <0x72, "ds_min_rtn_f64", VReg_64, "ds_min_f64">; 885 defm DS_MAX_RTN_F64 : DS_1A1D_RET <0x73, "ds_max_rtn_f64", VReg_64, "ds_max_f64">; 886 887 let mayStore = 0 in { 888 defm DS_READ_B64 : DS_1A_RET <0x76, "ds_read_b64", VReg_64>; 889 defm DS_READ2_B64 : DS_1A_Off8_RET <0x77, "ds_read2_b64", VReg_128>; 890 defm DS_READ2ST64_B64 : DS_1A_Off8_RET <0x78, "ds_read2st64_b64", VReg_128>; 891 } 892 893 defm DS_ADD_SRC2_U32 : DS_1A <0x80, "ds_add_src2_u32">; 894 defm DS_SUB_SRC2_U32 : DS_1A <0x81, "ds_sub_src2_u32">; 895 defm DS_RSUB_SRC2_U32 : DS_1A <0x82, "ds_rsub_src2_u32">; 896 defm DS_INC_SRC2_U32 : DS_1A <0x83, "ds_inc_src2_u32">; 897 defm DS_DEC_SRC2_U32 : DS_1A <0x84, "ds_dec_src2_u32">; 898 defm DS_MIN_SRC2_I32 : DS_1A <0x85, "ds_min_src2_i32">; 899 defm DS_MAX_SRC2_I32 : DS_1A <0x86, "ds_max_src2_i32">; 900 defm DS_MIN_SRC2_U32 : DS_1A <0x87, "ds_min_src2_u32">; 901 defm DS_MAX_SRC2_U32 : DS_1A <0x88, "ds_max_src2_u32">; 902 defm DS_AND_SRC2_B32 : DS_1A <0x89, "ds_and_src_b32">; 903 defm DS_OR_SRC2_B32 : DS_1A <0x8a, "ds_or_src2_b32">; 904 defm DS_XOR_SRC2_B32 : DS_1A <0x8b, "ds_xor_src2_b32">; 905 defm DS_WRITE_SRC2_B32 : DS_1A_Off8_NORET <0x8d, "ds_write_src2_b32">; 906 907 defm DS_MIN_SRC2_F32 : DS_1A <0x92, "ds_min_src2_f32">; 908 defm DS_MAX_SRC2_F32 : DS_1A <0x93, "ds_max_src2_f32">; 909 910 defm DS_ADD_SRC2_U64 : DS_1A <0xc0, "ds_add_src2_u64">; 911 defm DS_SUB_SRC2_U64 : DS_1A <0xc1, "ds_sub_src2_u64">; 912 defm DS_RSUB_SRC2_U64 : DS_1A <0xc2, "ds_rsub_src2_u64">; 913 defm DS_INC_SRC2_U64 : DS_1A <0xc3, "ds_inc_src2_u64">; 914 defm DS_DEC_SRC2_U64 : DS_1A <0xc4, "ds_dec_src2_u64">; 915 defm DS_MIN_SRC2_I64 : DS_1A <0xc5, "ds_min_src2_i64">; 916 defm DS_MAX_SRC2_I64 : DS_1A <0xc6, "ds_max_src2_i64">; 917 defm DS_MIN_SRC2_U64 : DS_1A <0xc7, "ds_min_src2_u64">; 918 defm DS_MAX_SRC2_U64 : DS_1A <0xc8, "ds_max_src2_u64">; 919 defm DS_AND_SRC2_B64 : DS_1A <0xc9, "ds_and_src2_b64">; 920 defm DS_OR_SRC2_B64 : DS_1A <0xca, "ds_or_src2_b64">; 921 defm DS_XOR_SRC2_B64 : DS_1A <0xcb, "ds_xor_src2_b64">; 922 defm DS_WRITE_SRC2_B64 : DS_1A_Off8_NORET <0xcd, "ds_write_src2_b64">; 923 924 defm DS_MIN_SRC2_F64 : DS_1A <0xd2, "ds_min_src2_f64">; 925 defm DS_MAX_SRC2_F64 : DS_1A <0xd3, "ds_max_src2_f64">; 926 927 //===----------------------------------------------------------------------===// 928 // MUBUF Instructions 929 //===----------------------------------------------------------------------===// 930 931 defm BUFFER_LOAD_FORMAT_X : MUBUF_Load_Helper < 932 mubuf<0x00>, "buffer_load_format_x", VGPR_32 933 >; 934 defm BUFFER_LOAD_FORMAT_XY : MUBUF_Load_Helper < 935 mubuf<0x01>, "buffer_load_format_xy", VReg_64 936 >; 937 defm BUFFER_LOAD_FORMAT_XYZ : MUBUF_Load_Helper < 938 mubuf<0x02>, "buffer_load_format_xyz", VReg_96 939 >; 940 defm BUFFER_LOAD_FORMAT_XYZW : MUBUF_Load_Helper < 941 mubuf<0x03>, "buffer_load_format_xyzw", VReg_128 942 >; 943 defm BUFFER_STORE_FORMAT_X : MUBUF_Store_Helper < 944 mubuf<0x04>, "buffer_store_format_x", VGPR_32 945 >; 946 defm BUFFER_STORE_FORMAT_XY : MUBUF_Store_Helper < 947 mubuf<0x05>, "buffer_store_format_xy", VReg_64 948 >; 949 defm BUFFER_STORE_FORMAT_XYZ : MUBUF_Store_Helper < 950 mubuf<0x06>, "buffer_store_format_xyz", VReg_96 951 >; 952 defm BUFFER_STORE_FORMAT_XYZW : MUBUF_Store_Helper < 953 mubuf<0x07>, "buffer_store_format_xyzw", VReg_128 954 >; 955 defm BUFFER_LOAD_UBYTE : MUBUF_Load_Helper < 956 mubuf<0x08, 0x10>, "buffer_load_ubyte", VGPR_32, i32, mubuf_az_extloadi8 957 >; 958 defm BUFFER_LOAD_SBYTE : MUBUF_Load_Helper < 959 mubuf<0x09, 0x11>, "buffer_load_sbyte", VGPR_32, i32, mubuf_sextloadi8 960 >; 961 defm BUFFER_LOAD_USHORT : MUBUF_Load_Helper < 962 mubuf<0x0a, 0x12>, "buffer_load_ushort", VGPR_32, i32, mubuf_az_extloadi16 963 >; 964 defm BUFFER_LOAD_SSHORT : MUBUF_Load_Helper < 965 mubuf<0x0b, 0x13>, "buffer_load_sshort", VGPR_32, i32, mubuf_sextloadi16 966 >; 967 defm BUFFER_LOAD_DWORD : MUBUF_Load_Helper < 968 mubuf<0x0c, 0x14>, "buffer_load_dword", VGPR_32, i32, mubuf_load 969 >; 970 defm BUFFER_LOAD_DWORDX2 : MUBUF_Load_Helper < 971 mubuf<0x0d, 0x15>, "buffer_load_dwordx2", VReg_64, v2i32, mubuf_load 972 >; 973 defm BUFFER_LOAD_DWORDX4 : MUBUF_Load_Helper < 974 mubuf<0x0e, 0x17>, "buffer_load_dwordx4", VReg_128, v4i32, mubuf_load 975 >; 976 977 defm BUFFER_STORE_BYTE : MUBUF_Store_Helper < 978 mubuf<0x18>, "buffer_store_byte", VGPR_32, i32, truncstorei8_global 979 >; 980 981 defm BUFFER_STORE_SHORT : MUBUF_Store_Helper < 982 mubuf<0x1a>, "buffer_store_short", VGPR_32, i32, truncstorei16_global 983 >; 984 985 defm BUFFER_STORE_DWORD : MUBUF_Store_Helper < 986 mubuf<0x1c>, "buffer_store_dword", VGPR_32, i32, global_store 987 >; 988 989 defm BUFFER_STORE_DWORDX2 : MUBUF_Store_Helper < 990 mubuf<0x1d>, "buffer_store_dwordx2", VReg_64, v2i32, global_store 991 >; 992 993 defm BUFFER_STORE_DWORDX4 : MUBUF_Store_Helper < 994 mubuf<0x1e, 0x1f>, "buffer_store_dwordx4", VReg_128, v4i32, global_store 995 >; 996 997 defm BUFFER_ATOMIC_SWAP : MUBUF_Atomic < 998 mubuf<0x30, 0x40>, "buffer_atomic_swap", VGPR_32, i32, atomic_swap_global 999 >; 1000 defm BUFFER_ATOMIC_CMPSWAP : MUBUF_Atomic < 1001 mubuf<0x31, 0x41>, "buffer_atomic_cmpswap", VReg_64, v2i32, null_frag 1002 >; 1003 defm BUFFER_ATOMIC_ADD : MUBUF_Atomic < 1004 mubuf<0x32, 0x42>, "buffer_atomic_add", VGPR_32, i32, atomic_add_global 1005 >; 1006 defm BUFFER_ATOMIC_SUB : MUBUF_Atomic < 1007 mubuf<0x33, 0x43>, "buffer_atomic_sub", VGPR_32, i32, atomic_sub_global 1008 >; 1009 //def BUFFER_ATOMIC_RSUB : MUBUF_ <mubuf<0x34>, "buffer_atomic_rsub", []>; // isn't on CI & VI 1010 defm BUFFER_ATOMIC_SMIN : MUBUF_Atomic < 1011 mubuf<0x35, 0x44>, "buffer_atomic_smin", VGPR_32, i32, atomic_min_global 1012 >; 1013 defm BUFFER_ATOMIC_UMIN : MUBUF_Atomic < 1014 mubuf<0x36, 0x45>, "buffer_atomic_umin", VGPR_32, i32, atomic_umin_global 1015 >; 1016 defm BUFFER_ATOMIC_SMAX : MUBUF_Atomic < 1017 mubuf<0x37, 0x46>, "buffer_atomic_smax", VGPR_32, i32, atomic_max_global 1018 >; 1019 defm BUFFER_ATOMIC_UMAX : MUBUF_Atomic < 1020 mubuf<0x38, 0x47>, "buffer_atomic_umax", VGPR_32, i32, atomic_umax_global 1021 >; 1022 defm BUFFER_ATOMIC_AND : MUBUF_Atomic < 1023 mubuf<0x39, 0x48>, "buffer_atomic_and", VGPR_32, i32, atomic_and_global 1024 >; 1025 defm BUFFER_ATOMIC_OR : MUBUF_Atomic < 1026 mubuf<0x3a, 0x49>, "buffer_atomic_or", VGPR_32, i32, atomic_or_global 1027 >; 1028 defm BUFFER_ATOMIC_XOR : MUBUF_Atomic < 1029 mubuf<0x3b, 0x4a>, "buffer_atomic_xor", VGPR_32, i32, atomic_xor_global 1030 >; 1031 defm BUFFER_ATOMIC_INC : MUBUF_Atomic < 1032 mubuf<0x3c, 0x4b>, "buffer_atomic_inc", VGPR_32, i32, atomic_inc_global 1033 >; 1034 defm BUFFER_ATOMIC_DEC : MUBUF_Atomic < 1035 mubuf<0x3d, 0x4c>, "buffer_atomic_dec", VGPR_32, i32, atomic_dec_global 1036 >; 1037 1038 //def BUFFER_ATOMIC_FCMPSWAP : MUBUF_Atomic <mubuf<0x3e>, "buffer_atomic_fcmpswap", []>; // isn't on VI 1039 //def BUFFER_ATOMIC_FMIN : MUBUF_Atomic <mubuf<0x3f>, "buffer_atomic_fmin", []>; // isn't on VI 1040 //def BUFFER_ATOMIC_FMAX : MUBUF_Atomic <mubuf<0x40>, "buffer_atomic_fmax", []>; // isn't on VI 1041 defm BUFFER_ATOMIC_SWAP_X2 : MUBUF_Atomic < 1042 mubuf<0x50, 0x60>, "buffer_atomic_swap_x2", VReg_64, i64, atomic_swap_global 1043 >; 1044 defm BUFFER_ATOMIC_CMPSWAP_X2 : MUBUF_Atomic < 1045 mubuf<0x51, 0x61>, "buffer_atomic_cmpswap_x2", VReg_128, v2i64, null_frag 1046 >; 1047 defm BUFFER_ATOMIC_ADD_X2 : MUBUF_Atomic < 1048 mubuf<0x52, 0x62>, "buffer_atomic_add_x2", VReg_64, i64, atomic_add_global 1049 >; 1050 defm BUFFER_ATOMIC_SUB_X2 : MUBUF_Atomic < 1051 mubuf<0x53, 0x63>, "buffer_atomic_sub_x2", VReg_64, i64, atomic_sub_global 1052 >; 1053 //defm BUFFER_ATOMIC_RSUB_X2 : MUBUF_Atomic <mubuf<0x54>, "buffer_atomic_rsub_x2", []>; // isn't on CI & VI 1054 defm BUFFER_ATOMIC_SMIN_X2 : MUBUF_Atomic < 1055 mubuf<0x55, 0x64>, "buffer_atomic_smin_x2", VReg_64, i64, atomic_min_global 1056 >; 1057 defm BUFFER_ATOMIC_UMIN_X2 : MUBUF_Atomic < 1058 mubuf<0x56, 0x65>, "buffer_atomic_umin_x2", VReg_64, i64, atomic_umin_global 1059 >; 1060 defm BUFFER_ATOMIC_SMAX_X2 : MUBUF_Atomic < 1061 mubuf<0x57, 0x66>, "buffer_atomic_smax_x2", VReg_64, i64, atomic_max_global 1062 >; 1063 defm BUFFER_ATOMIC_UMAX_X2 : MUBUF_Atomic < 1064 mubuf<0x58, 0x67>, "buffer_atomic_umax_x2", VReg_64, i64, atomic_umax_global 1065 >; 1066 defm BUFFER_ATOMIC_AND_X2 : MUBUF_Atomic < 1067 mubuf<0x59, 0x68>, "buffer_atomic_and_x2", VReg_64, i64, atomic_and_global 1068 >; 1069 defm BUFFER_ATOMIC_OR_X2 : MUBUF_Atomic < 1070 mubuf<0x5a, 0x69>, "buffer_atomic_or_x2", VReg_64, i64, atomic_or_global 1071 >; 1072 defm BUFFER_ATOMIC_XOR_X2 : MUBUF_Atomic < 1073 mubuf<0x5b, 0x6a>, "buffer_atomic_xor_x2", VReg_64, i64, atomic_xor_global 1074 >; 1075 defm BUFFER_ATOMIC_INC_X2 : MUBUF_Atomic < 1076 mubuf<0x5c, 0x6b>, "buffer_atomic_inc_x2", VReg_64, i64, atomic_inc_global 1077 >; 1078 defm BUFFER_ATOMIC_DEC_X2 : MUBUF_Atomic < 1079 mubuf<0x5d, 0x6c>, "buffer_atomic_dec_x2", VReg_64, i64, atomic_dec_global 1080 >; 1081 //def BUFFER_ATOMIC_FCMPSWAP_X2 : MUBUF_X2 <mubuf<0x5e>, "buffer_atomic_fcmpswap_x2", []>; // isn't on VI 1082 //def BUFFER_ATOMIC_FMIN_X2 : MUBUF_X2 <mubuf<0x5f>, "buffer_atomic_fmin_x2", []>; // isn't on VI 1083 //def BUFFER_ATOMIC_FMAX_X2 : MUBUF_X2 <mubuf<0x60>, "buffer_atomic_fmax_x2", []>; // isn't on VI 1084 1085 let SubtargetPredicate = isSI, DisableVIDecoder = 1 in { 1086 defm BUFFER_WBINVL1_SC : MUBUF_Invalidate <mubuf<0x70>, "buffer_wbinvl1_sc", int_amdgcn_buffer_wbinvl1_sc>; // isn't on CI & VI 1087 } 1088 1089 defm BUFFER_WBINVL1 : MUBUF_Invalidate <mubuf<0x71, 0x3e>, "buffer_wbinvl1", int_amdgcn_buffer_wbinvl1>; 1090 1091 //===----------------------------------------------------------------------===// 1092 // MTBUF Instructions 1093 //===----------------------------------------------------------------------===// 1094 1095 //def TBUFFER_LOAD_FORMAT_X : MTBUF_ <0x00000000, "tbuffer_load_format_x", []>; 1096 //def TBUFFER_LOAD_FORMAT_XY : MTBUF_ <0x00000001, "tbuffer_load_format_xy", []>; 1097 //def TBUFFER_LOAD_FORMAT_XYZ : MTBUF_ <0x00000002, "tbuffer_load_format_xyz", []>; 1098 defm TBUFFER_LOAD_FORMAT_XYZW : MTBUF_Load_Helper <0x00000003, "tbuffer_load_format_xyzw", VReg_128>; 1099 defm TBUFFER_STORE_FORMAT_X : MTBUF_Store_Helper <0x00000004, "tbuffer_store_format_x", VGPR_32>; 1100 defm TBUFFER_STORE_FORMAT_XY : MTBUF_Store_Helper <0x00000005, "tbuffer_store_format_xy", VReg_64>; 1101 defm TBUFFER_STORE_FORMAT_XYZ : MTBUF_Store_Helper <0x00000006, "tbuffer_store_format_xyz", VReg_128>; 1102 defm TBUFFER_STORE_FORMAT_XYZW : MTBUF_Store_Helper <0x00000007, "tbuffer_store_format_xyzw", VReg_128>; 1103 1104 //===----------------------------------------------------------------------===// 1105 // MIMG Instructions 1106 //===----------------------------------------------------------------------===// 1107 1108 defm IMAGE_LOAD : MIMG_NoSampler <0x00000000, "image_load">; 1109 defm IMAGE_LOAD_MIP : MIMG_NoSampler <0x00000001, "image_load_mip">; 1110 //def IMAGE_LOAD_PCK : MIMG_NoPattern_ <"image_load_pck", 0x00000002>; 1111 //def IMAGE_LOAD_PCK_SGN : MIMG_NoPattern_ <"image_load_pck_sgn", 0x00000003>; 1112 //def IMAGE_LOAD_MIP_PCK : MIMG_NoPattern_ <"image_load_mip_pck", 0x00000004>; 1113 //def IMAGE_LOAD_MIP_PCK_SGN : MIMG_NoPattern_ <"image_load_mip_pck_sgn", 0x00000005>; 1114 defm IMAGE_STORE : MIMG_Store <0x00000008, "image_store">; 1115 defm IMAGE_STORE_MIP : MIMG_Store <0x00000009, "image_store_mip">; 1116 //def IMAGE_STORE_PCK : MIMG_NoPattern_ <"image_store_pck", 0x0000000a>; 1117 //def IMAGE_STORE_MIP_PCK : MIMG_NoPattern_ <"image_store_mip_pck", 0x0000000b>; 1118 defm IMAGE_GET_RESINFO : MIMG_NoSampler <0x0000000e, "image_get_resinfo">; 1119 defm IMAGE_ATOMIC_SWAP : MIMG_Atomic <mimg<0x0f, 0x10>, "image_atomic_swap">; 1120 defm IMAGE_ATOMIC_CMPSWAP : MIMG_Atomic <mimg<0x10, 0x11>, "image_atomic_cmpswap", VReg_64>; 1121 defm IMAGE_ATOMIC_ADD : MIMG_Atomic <mimg<0x11, 0x12>, "image_atomic_add">; 1122 defm IMAGE_ATOMIC_SUB : MIMG_Atomic <mimg<0x12, 0x13>, "image_atomic_sub">; 1123 //def IMAGE_ATOMIC_RSUB : MIMG_NoPattern_ <"image_atomic_rsub", 0x00000013>; -- not on VI 1124 defm IMAGE_ATOMIC_SMIN : MIMG_Atomic <mimg<0x14>, "image_atomic_smin">; 1125 defm IMAGE_ATOMIC_UMIN : MIMG_Atomic <mimg<0x15>, "image_atomic_umin">; 1126 defm IMAGE_ATOMIC_SMAX : MIMG_Atomic <mimg<0x16>, "image_atomic_smax">; 1127 defm IMAGE_ATOMIC_UMAX : MIMG_Atomic <mimg<0x17>, "image_atomic_umax">; 1128 defm IMAGE_ATOMIC_AND : MIMG_Atomic <mimg<0x18>, "image_atomic_and">; 1129 defm IMAGE_ATOMIC_OR : MIMG_Atomic <mimg<0x19>, "image_atomic_or">; 1130 defm IMAGE_ATOMIC_XOR : MIMG_Atomic <mimg<0x1a>, "image_atomic_xor">; 1131 defm IMAGE_ATOMIC_INC : MIMG_Atomic <mimg<0x1b>, "image_atomic_inc">; 1132 defm IMAGE_ATOMIC_DEC : MIMG_Atomic <mimg<0x1c>, "image_atomic_dec">; 1133 //def IMAGE_ATOMIC_FCMPSWAP : MIMG_NoPattern_ <"image_atomic_fcmpswap", 0x0000001d>; -- not on VI 1134 //def IMAGE_ATOMIC_FMIN : MIMG_NoPattern_ <"image_atomic_fmin", 0x0000001e>; -- not on VI 1135 //def IMAGE_ATOMIC_FMAX : MIMG_NoPattern_ <"image_atomic_fmax", 0x0000001f>; -- not on VI 1136 defm IMAGE_SAMPLE : MIMG_Sampler_WQM <0x00000020, "image_sample">; 1137 defm IMAGE_SAMPLE_CL : MIMG_Sampler_WQM <0x00000021, "image_sample_cl">; 1138 defm IMAGE_SAMPLE_D : MIMG_Sampler <0x00000022, "image_sample_d">; 1139 defm IMAGE_SAMPLE_D_CL : MIMG_Sampler <0x00000023, "image_sample_d_cl">; 1140 defm IMAGE_SAMPLE_L : MIMG_Sampler <0x00000024, "image_sample_l">; 1141 defm IMAGE_SAMPLE_B : MIMG_Sampler_WQM <0x00000025, "image_sample_b">; 1142 defm IMAGE_SAMPLE_B_CL : MIMG_Sampler_WQM <0x00000026, "image_sample_b_cl">; 1143 defm IMAGE_SAMPLE_LZ : MIMG_Sampler <0x00000027, "image_sample_lz">; 1144 defm IMAGE_SAMPLE_C : MIMG_Sampler_WQM <0x00000028, "image_sample_c">; 1145 defm IMAGE_SAMPLE_C_CL : MIMG_Sampler_WQM <0x00000029, "image_sample_c_cl">; 1146 defm IMAGE_SAMPLE_C_D : MIMG_Sampler <0x0000002a, "image_sample_c_d">; 1147 defm IMAGE_SAMPLE_C_D_CL : MIMG_Sampler <0x0000002b, "image_sample_c_d_cl">; 1148 defm IMAGE_SAMPLE_C_L : MIMG_Sampler <0x0000002c, "image_sample_c_l">; 1149 defm IMAGE_SAMPLE_C_B : MIMG_Sampler_WQM <0x0000002d, "image_sample_c_b">; 1150 defm IMAGE_SAMPLE_C_B_CL : MIMG_Sampler_WQM <0x0000002e, "image_sample_c_b_cl">; 1151 defm IMAGE_SAMPLE_C_LZ : MIMG_Sampler <0x0000002f, "image_sample_c_lz">; 1152 defm IMAGE_SAMPLE_O : MIMG_Sampler_WQM <0x00000030, "image_sample_o">; 1153 defm IMAGE_SAMPLE_CL_O : MIMG_Sampler_WQM <0x00000031, "image_sample_cl_o">; 1154 defm IMAGE_SAMPLE_D_O : MIMG_Sampler <0x00000032, "image_sample_d_o">; 1155 defm IMAGE_SAMPLE_D_CL_O : MIMG_Sampler <0x00000033, "image_sample_d_cl_o">; 1156 defm IMAGE_SAMPLE_L_O : MIMG_Sampler <0x00000034, "image_sample_l_o">; 1157 defm IMAGE_SAMPLE_B_O : MIMG_Sampler_WQM <0x00000035, "image_sample_b_o">; 1158 defm IMAGE_SAMPLE_B_CL_O : MIMG_Sampler_WQM <0x00000036, "image_sample_b_cl_o">; 1159 defm IMAGE_SAMPLE_LZ_O : MIMG_Sampler <0x00000037, "image_sample_lz_o">; 1160 defm IMAGE_SAMPLE_C_O : MIMG_Sampler_WQM <0x00000038, "image_sample_c_o">; 1161 defm IMAGE_SAMPLE_C_CL_O : MIMG_Sampler_WQM <0x00000039, "image_sample_c_cl_o">; 1162 defm IMAGE_SAMPLE_C_D_O : MIMG_Sampler <0x0000003a, "image_sample_c_d_o">; 1163 defm IMAGE_SAMPLE_C_D_CL_O : MIMG_Sampler <0x0000003b, "image_sample_c_d_cl_o">; 1164 defm IMAGE_SAMPLE_C_L_O : MIMG_Sampler <0x0000003c, "image_sample_c_l_o">; 1165 defm IMAGE_SAMPLE_C_B_O : MIMG_Sampler_WQM <0x0000003d, "image_sample_c_b_o">; 1166 defm IMAGE_SAMPLE_C_B_CL_O : MIMG_Sampler_WQM <0x0000003e, "image_sample_c_b_cl_o">; 1167 defm IMAGE_SAMPLE_C_LZ_O : MIMG_Sampler <0x0000003f, "image_sample_c_lz_o">; 1168 defm IMAGE_GATHER4 : MIMG_Gather_WQM <0x00000040, "image_gather4">; 1169 defm IMAGE_GATHER4_CL : MIMG_Gather_WQM <0x00000041, "image_gather4_cl">; 1170 defm IMAGE_GATHER4_L : MIMG_Gather <0x00000044, "image_gather4_l">; 1171 defm IMAGE_GATHER4_B : MIMG_Gather_WQM <0x00000045, "image_gather4_b">; 1172 defm IMAGE_GATHER4_B_CL : MIMG_Gather_WQM <0x00000046, "image_gather4_b_cl">; 1173 defm IMAGE_GATHER4_LZ : MIMG_Gather <0x00000047, "image_gather4_lz">; 1174 defm IMAGE_GATHER4_C : MIMG_Gather_WQM <0x00000048, "image_gather4_c">; 1175 defm IMAGE_GATHER4_C_CL : MIMG_Gather_WQM <0x00000049, "image_gather4_c_cl">; 1176 defm IMAGE_GATHER4_C_L : MIMG_Gather <0x0000004c, "image_gather4_c_l">; 1177 defm IMAGE_GATHER4_C_B : MIMG_Gather_WQM <0x0000004d, "image_gather4_c_b">; 1178 defm IMAGE_GATHER4_C_B_CL : MIMG_Gather_WQM <0x0000004e, "image_gather4_c_b_cl">; 1179 defm IMAGE_GATHER4_C_LZ : MIMG_Gather <0x0000004f, "image_gather4_c_lz">; 1180 defm IMAGE_GATHER4_O : MIMG_Gather_WQM <0x00000050, "image_gather4_o">; 1181 defm IMAGE_GATHER4_CL_O : MIMG_Gather_WQM <0x00000051, "image_gather4_cl_o">; 1182 defm IMAGE_GATHER4_L_O : MIMG_Gather <0x00000054, "image_gather4_l_o">; 1183 defm IMAGE_GATHER4_B_O : MIMG_Gather_WQM <0x00000055, "image_gather4_b_o">; 1184 defm IMAGE_GATHER4_B_CL_O : MIMG_Gather <0x00000056, "image_gather4_b_cl_o">; 1185 defm IMAGE_GATHER4_LZ_O : MIMG_Gather <0x00000057, "image_gather4_lz_o">; 1186 defm IMAGE_GATHER4_C_O : MIMG_Gather_WQM <0x00000058, "image_gather4_c_o">; 1187 defm IMAGE_GATHER4_C_CL_O : MIMG_Gather_WQM <0x00000059, "image_gather4_c_cl_o">; 1188 defm IMAGE_GATHER4_C_L_O : MIMG_Gather <0x0000005c, "image_gather4_c_l_o">; 1189 defm IMAGE_GATHER4_C_B_O : MIMG_Gather_WQM <0x0000005d, "image_gather4_c_b_o">; 1190 defm IMAGE_GATHER4_C_B_CL_O : MIMG_Gather_WQM <0x0000005e, "image_gather4_c_b_cl_o">; 1191 defm IMAGE_GATHER4_C_LZ_O : MIMG_Gather <0x0000005f, "image_gather4_c_lz_o">; 1192 defm IMAGE_GET_LOD : MIMG_Sampler_WQM <0x00000060, "image_get_lod">; 1193 defm IMAGE_SAMPLE_CD : MIMG_Sampler <0x00000068, "image_sample_cd">; 1194 defm IMAGE_SAMPLE_CD_CL : MIMG_Sampler <0x00000069, "image_sample_cd_cl">; 1195 defm IMAGE_SAMPLE_C_CD : MIMG_Sampler <0x0000006a, "image_sample_c_cd">; 1196 defm IMAGE_SAMPLE_C_CD_CL : MIMG_Sampler <0x0000006b, "image_sample_c_cd_cl">; 1197 defm IMAGE_SAMPLE_CD_O : MIMG_Sampler <0x0000006c, "image_sample_cd_o">; 1198 defm IMAGE_SAMPLE_CD_CL_O : MIMG_Sampler <0x0000006d, "image_sample_cd_cl_o">; 1199 defm IMAGE_SAMPLE_C_CD_O : MIMG_Sampler <0x0000006e, "image_sample_c_cd_o">; 1200 defm IMAGE_SAMPLE_C_CD_CL_O : MIMG_Sampler <0x0000006f, "image_sample_c_cd_cl_o">; 1201 //def IMAGE_RSRC256 : MIMG_NoPattern_RSRC256 <"image_rsrc256", 0x0000007e>; 1202 //def IMAGE_SAMPLER : MIMG_NoPattern_ <"image_sampler", 0x0000007f>; 1203 1204 //===----------------------------------------------------------------------===// 1205 // VOP1 Instructions 1206 //===----------------------------------------------------------------------===// 1207 1208 let vdst = 0, src0 = 0, VOPAsmPrefer32Bit = 1 in { 1209 defm V_NOP : VOP1Inst <vop1<0x0>, "v_nop", VOP_NONE>; 1210 } 1211 1212 let isMoveImm = 1, isReMaterializable = 1, isAsCheapAsAMove = 1 in { 1213 defm V_MOV_B32 : VOP1Inst <vop1<0x1>, "v_mov_b32", VOP_I32_I32>; 1214 } // End isMoveImm = 1 1215 1216 let Uses = [EXEC] in { 1217 1218 // FIXME: Specify SchedRW for READFIRSTLANE_B32 1219 1220 def V_READFIRSTLANE_B32 : VOP1 < 1221 0x00000002, 1222 (outs SReg_32:$vdst), 1223 (ins VS_32:$src0), 1224 "v_readfirstlane_b32 $vdst, $src0", 1225 [] 1226 > { 1227 let isConvergent = 1; 1228 } 1229 1230 } 1231 1232 let SchedRW = [WriteQuarterRate32] in { 1233 1234 defm V_CVT_I32_F64 : VOP1Inst <vop1<0x3>, "v_cvt_i32_f64", 1235 VOP_I32_F64, fp_to_sint 1236 >; 1237 defm V_CVT_F64_I32 : VOP1Inst <vop1<0x4>, "v_cvt_f64_i32", 1238 VOP_F64_I32, sint_to_fp 1239 >; 1240 defm V_CVT_F32_I32 : VOP1Inst <vop1<0x5>, "v_cvt_f32_i32", 1241 VOP_F32_I32, sint_to_fp 1242 >; 1243 defm V_CVT_F32_U32 : VOP1Inst <vop1<0x6>, "v_cvt_f32_u32", 1244 VOP_F32_I32, uint_to_fp 1245 >; 1246 defm V_CVT_U32_F32 : VOP1Inst <vop1<0x7>, "v_cvt_u32_f32", 1247 VOP_I32_F32, fp_to_uint 1248 >; 1249 defm V_CVT_I32_F32 : VOP1Inst <vop1<0x8>, "v_cvt_i32_f32", 1250 VOP_I32_F32, fp_to_sint 1251 >; 1252 defm V_CVT_F16_F32 : VOP1Inst <vop1<0xa>, "v_cvt_f16_f32", 1253 VOP_I32_F32, fp_to_f16 1254 >; 1255 defm V_CVT_F32_F16 : VOP1Inst <vop1<0xb>, "v_cvt_f32_f16", 1256 VOP_F32_I32, f16_to_fp 1257 >; 1258 defm V_CVT_RPI_I32_F32 : VOP1Inst <vop1<0xc>, "v_cvt_rpi_i32_f32", 1259 VOP_I32_F32, cvt_rpi_i32_f32>; 1260 defm V_CVT_FLR_I32_F32 : VOP1Inst <vop1<0xd>, "v_cvt_flr_i32_f32", 1261 VOP_I32_F32, cvt_flr_i32_f32>; 1262 defm V_CVT_OFF_F32_I4 : VOP1Inst <vop1<0x0e>, "v_cvt_off_f32_i4", VOP_F32_I32>; 1263 defm V_CVT_F32_F64 : VOP1Inst <vop1<0xf>, "v_cvt_f32_f64", 1264 VOP_F32_F64, fround 1265 >; 1266 defm V_CVT_F64_F32 : VOP1Inst <vop1<0x10>, "v_cvt_f64_f32", 1267 VOP_F64_F32, fextend 1268 >; 1269 defm V_CVT_F32_UBYTE0 : VOP1Inst <vop1<0x11>, "v_cvt_f32_ubyte0", 1270 VOP_F32_I32, AMDGPUcvt_f32_ubyte0 1271 >; 1272 defm V_CVT_F32_UBYTE1 : VOP1Inst <vop1<0x12>, "v_cvt_f32_ubyte1", 1273 VOP_F32_I32, AMDGPUcvt_f32_ubyte1 1274 >; 1275 defm V_CVT_F32_UBYTE2 : VOP1Inst <vop1<0x13>, "v_cvt_f32_ubyte2", 1276 VOP_F32_I32, AMDGPUcvt_f32_ubyte2 1277 >; 1278 defm V_CVT_F32_UBYTE3 : VOP1Inst <vop1<0x14>, "v_cvt_f32_ubyte3", 1279 VOP_F32_I32, AMDGPUcvt_f32_ubyte3 1280 >; 1281 defm V_CVT_U32_F64 : VOP1Inst <vop1<0x15>, "v_cvt_u32_f64", 1282 VOP_I32_F64, fp_to_uint 1283 >; 1284 defm V_CVT_F64_U32 : VOP1Inst <vop1<0x16>, "v_cvt_f64_u32", 1285 VOP_F64_I32, uint_to_fp 1286 >; 1287 1288 } // End SchedRW = [WriteQuarterRate32] 1289 1290 defm V_FRACT_F32 : VOP1Inst <vop1<0x20, 0x1b>, "v_fract_f32", 1291 VOP_F32_F32, AMDGPUfract 1292 >; 1293 defm V_TRUNC_F32 : VOP1Inst <vop1<0x21, 0x1c>, "v_trunc_f32", 1294 VOP_F32_F32, ftrunc 1295 >; 1296 defm V_CEIL_F32 : VOP1Inst <vop1<0x22, 0x1d>, "v_ceil_f32", 1297 VOP_F32_F32, fceil 1298 >; 1299 defm V_RNDNE_F32 : VOP1Inst <vop1<0x23, 0x1e>, "v_rndne_f32", 1300 VOP_F32_F32, frint 1301 >; 1302 defm V_FLOOR_F32 : VOP1Inst <vop1<0x24, 0x1f>, "v_floor_f32", 1303 VOP_F32_F32, ffloor 1304 >; 1305 defm V_EXP_F32 : VOP1Inst <vop1<0x25, 0x20>, "v_exp_f32", 1306 VOP_F32_F32, fexp2 1307 >; 1308 1309 let SchedRW = [WriteQuarterRate32] in { 1310 1311 defm V_LOG_F32 : VOP1Inst <vop1<0x27, 0x21>, "v_log_f32", 1312 VOP_F32_F32, flog2 1313 >; 1314 defm V_RCP_F32 : VOP1Inst <vop1<0x2a, 0x22>, "v_rcp_f32", 1315 VOP_F32_F32, AMDGPUrcp 1316 >; 1317 defm V_RCP_IFLAG_F32 : VOP1Inst <vop1<0x2b, 0x23>, "v_rcp_iflag_f32", 1318 VOP_F32_F32 1319 >; 1320 defm V_RSQ_F32 : VOP1Inst <vop1<0x2e, 0x24>, "v_rsq_f32", 1321 VOP_F32_F32, AMDGPUrsq 1322 >; 1323 1324 } // End SchedRW = [WriteQuarterRate32] 1325 1326 let SchedRW = [WriteDouble] in { 1327 1328 defm V_RCP_F64 : VOP1Inst <vop1<0x2f, 0x25>, "v_rcp_f64", 1329 VOP_F64_F64, AMDGPUrcp 1330 >; 1331 defm V_RSQ_F64 : VOP1Inst <vop1<0x31, 0x26>, "v_rsq_f64", 1332 VOP_F64_F64, AMDGPUrsq 1333 >; 1334 1335 } // End SchedRW = [WriteDouble]; 1336 1337 defm V_SQRT_F32 : VOP1Inst <vop1<0x33, 0x27>, "v_sqrt_f32", 1338 VOP_F32_F32, fsqrt 1339 >; 1340 1341 let SchedRW = [WriteDouble] in { 1342 1343 defm V_SQRT_F64 : VOP1Inst <vop1<0x34, 0x28>, "v_sqrt_f64", 1344 VOP_F64_F64, fsqrt 1345 >; 1346 1347 } // End SchedRW = [WriteDouble] 1348 1349 let SchedRW = [WriteQuarterRate32] in { 1350 1351 defm V_SIN_F32 : VOP1Inst <vop1<0x35, 0x29>, "v_sin_f32", 1352 VOP_F32_F32, AMDGPUsin 1353 >; 1354 defm V_COS_F32 : VOP1Inst <vop1<0x36, 0x2a>, "v_cos_f32", 1355 VOP_F32_F32, AMDGPUcos 1356 >; 1357 1358 } // End SchedRW = [WriteQuarterRate32] 1359 1360 defm V_NOT_B32 : VOP1Inst <vop1<0x37, 0x2b>, "v_not_b32", VOP_I32_I32>; 1361 defm V_BFREV_B32 : VOP1Inst <vop1<0x38, 0x2c>, "v_bfrev_b32", VOP_I32_I32>; 1362 defm V_FFBH_U32 : VOP1Inst <vop1<0x39, 0x2d>, "v_ffbh_u32", VOP_I32_I32>; 1363 defm V_FFBL_B32 : VOP1Inst <vop1<0x3a, 0x2e>, "v_ffbl_b32", VOP_I32_I32>; 1364 defm V_FFBH_I32 : VOP1Inst <vop1<0x3b, 0x2f>, "v_ffbh_i32", VOP_I32_I32>; 1365 defm V_FREXP_EXP_I32_F64 : VOP1Inst <vop1<0x3c,0x30>, "v_frexp_exp_i32_f64", 1366 VOP_I32_F64, int_amdgcn_frexp_exp 1367 >; 1368 1369 let SchedRW = [WriteDoubleAdd] in { 1370 defm V_FREXP_MANT_F64 : VOP1Inst <vop1<0x3d, 0x31>, "v_frexp_mant_f64", 1371 VOP_F64_F64, int_amdgcn_frexp_mant 1372 >; 1373 1374 defm V_FRACT_F64 : VOP1Inst <vop1<0x3e, 0x32>, "v_fract_f64", 1375 VOP_F64_F64, AMDGPUfract 1376 >; 1377 } // End SchedRW = [WriteDoubleAdd] 1378 1379 1380 defm V_FREXP_EXP_I32_F32 : VOP1Inst <vop1<0x3f, 0x33>, "v_frexp_exp_i32_f32", 1381 VOP_I32_F32, int_amdgcn_frexp_exp 1382 >; 1383 defm V_FREXP_MANT_F32 : VOP1Inst <vop1<0x40, 0x34>, "v_frexp_mant_f32", 1384 VOP_F32_F32, int_amdgcn_frexp_mant 1385 >; 1386 let vdst = 0, src0 = 0, VOPAsmPrefer32Bit = 1 in { 1387 defm V_CLREXCP : VOP1Inst <vop1<0x41,0x35>, "v_clrexcp", VOP_NO_EXT<VOP_NONE>>; 1388 } 1389 1390 let Uses = [M0, EXEC] in { 1391 defm V_MOVRELD_B32 : VOP1Inst <vop1<0x42, 0x36>, "v_movreld_b32", VOP_NO_EXT<VOP_I32_I32>>; 1392 defm V_MOVRELS_B32 : VOP1Inst <vop1<0x43, 0x37>, "v_movrels_b32", VOP_NO_EXT<VOP_I32_I32>>; 1393 defm V_MOVRELSD_B32 : VOP1Inst <vop1<0x44, 0x38>, "v_movrelsd_b32", VOP_NO_EXT<VOP_I32_I32>>; 1394 } // End Uses = [M0, EXEC] 1395 1396 // These instruction only exist on SI and CI 1397 let SubtargetPredicate = isSICI in { 1398 1399 let SchedRW = [WriteQuarterRate32] in { 1400 1401 defm V_MOV_FED_B32 : VOP1InstSI <vop1<0x9>, "v_mov_fed_b32", VOP_I32_I32>; 1402 defm V_LOG_CLAMP_F32 : VOP1InstSI <vop1<0x26>, "v_log_clamp_f32", 1403 VOP_F32_F32, int_amdgcn_log_clamp>; 1404 defm V_RCP_CLAMP_F32 : VOP1InstSI <vop1<0x28>, "v_rcp_clamp_f32", VOP_F32_F32>; 1405 defm V_RCP_LEGACY_F32 : VOP1InstSI <vop1<0x29>, "v_rcp_legacy_f32", VOP_F32_F32>; 1406 defm V_RSQ_CLAMP_F32 : VOP1InstSI <vop1<0x2c>, "v_rsq_clamp_f32", 1407 VOP_F32_F32, AMDGPUrsq_clamp 1408 >; 1409 defm V_RSQ_LEGACY_F32 : VOP1InstSI <vop1<0x2d>, "v_rsq_legacy_f32", 1410 VOP_F32_F32, AMDGPUrsq_legacy 1411 >; 1412 1413 } // End SchedRW = [WriteQuarterRate32] 1414 1415 let SchedRW = [WriteDouble] in { 1416 1417 defm V_RCP_CLAMP_F64 : VOP1InstSI <vop1<0x30>, "v_rcp_clamp_f64", VOP_F64_F64>; 1418 defm V_RSQ_CLAMP_F64 : VOP1InstSI <vop1<0x32>, "v_rsq_clamp_f64", 1419 VOP_F64_F64, AMDGPUrsq_clamp 1420 >; 1421 1422 } // End SchedRW = [WriteDouble] 1423 1424 } // End SubtargetPredicate = isSICI 1425 1426 //===----------------------------------------------------------------------===// 1427 // VINTRP Instructions 1428 //===----------------------------------------------------------------------===// 1429 1430 let Uses = [M0, EXEC] in { 1431 1432 // FIXME: Specify SchedRW for VINTRP insturctions. 1433 1434 multiclass V_INTERP_P1_F32_m : VINTRP_m < 1435 0x00000000, 1436 (outs VGPR_32:$dst), 1437 (ins VGPR_32:$i, i32imm:$attr_chan, i32imm:$attr), 1438 "v_interp_p1_f32 $dst, $i, $attr_chan, $attr, [m0]", 1439 [(set f32:$dst, (AMDGPUinterp_p1 i32:$i, (i32 imm:$attr_chan), 1440 (i32 imm:$attr)))] 1441 >; 1442 1443 let OtherPredicates = [has32BankLDS] in { 1444 1445 defm V_INTERP_P1_F32 : V_INTERP_P1_F32_m; 1446 1447 } // End OtherPredicates = [has32BankLDS] 1448 1449 let OtherPredicates = [has16BankLDS], Constraints = "@earlyclobber $dst", isAsmParserOnly=1 in { 1450 1451 defm V_INTERP_P1_F32_16bank : V_INTERP_P1_F32_m; 1452 1453 } // End OtherPredicates = [has32BankLDS], Constraints = "@earlyclobber $dst", isAsmParserOnly=1 1454 1455 let DisableEncoding = "$src0", Constraints = "$src0 = $dst" in { 1456 1457 defm V_INTERP_P2_F32 : VINTRP_m < 1458 0x00000001, 1459 (outs VGPR_32:$dst), 1460 (ins VGPR_32:$src0, VGPR_32:$j, i32imm:$attr_chan, i32imm:$attr), 1461 "v_interp_p2_f32 $dst, [$src0], $j, $attr_chan, $attr, [m0]", 1462 [(set f32:$dst, (AMDGPUinterp_p2 f32:$src0, i32:$j, (i32 imm:$attr_chan), 1463 (i32 imm:$attr)))]>; 1464 1465 } // End DisableEncoding = "$src0", Constraints = "$src0 = $dst" 1466 1467 defm V_INTERP_MOV_F32 : VINTRP_m < 1468 0x00000002, 1469 (outs VGPR_32:$dst), 1470 (ins InterpSlot:$src0, i32imm:$attr_chan, i32imm:$attr), 1471 "v_interp_mov_f32 $dst, $src0, $attr_chan, $attr, [m0]", 1472 [(set f32:$dst, (AMDGPUinterp_mov (i32 imm:$src0), (i32 imm:$attr_chan), 1473 (i32 imm:$attr)))]>; 1474 1475 } // End Uses = [M0, EXEC] 1476 1477 //===----------------------------------------------------------------------===// 1478 // VOP2 Instructions 1479 //===----------------------------------------------------------------------===// 1480 1481 defm V_CNDMASK_B32 : VOP2eInst <vop2<0x0, 0x0>, "v_cndmask_b32", 1482 VOP2e_I32_I32_I32_I1 1483 >; 1484 1485 let isCommutable = 1 in { 1486 defm V_ADD_F32 : VOP2Inst <vop2<0x3, 0x1>, "v_add_f32", 1487 VOP_F32_F32_F32, fadd 1488 >; 1489 1490 defm V_SUB_F32 : VOP2Inst <vop2<0x4, 0x2>, "v_sub_f32", VOP_F32_F32_F32, fsub>; 1491 defm V_SUBREV_F32 : VOP2Inst <vop2<0x5, 0x3>, "v_subrev_f32", 1492 VOP_F32_F32_F32, null_frag, "v_sub_f32" 1493 >; 1494 } // End isCommutable = 1 1495 1496 let isCommutable = 1 in { 1497 1498 defm V_MUL_LEGACY_F32 : VOP2Inst <vop2<0x7, 0x4>, "v_mul_legacy_f32", 1499 VOP_F32_F32_F32 1500 >; 1501 1502 defm V_MUL_F32 : VOP2Inst <vop2<0x8, 0x5>, "v_mul_f32", 1503 VOP_F32_F32_F32, fmul 1504 >; 1505 1506 defm V_MUL_I32_I24 : VOP2Inst <vop2<0x9, 0x6>, "v_mul_i32_i24", 1507 VOP_I32_I32_I32, AMDGPUmul_i24 1508 >; 1509 1510 defm V_MUL_HI_I32_I24 : VOP2Inst <vop2<0xa,0x7>, "v_mul_hi_i32_i24", 1511 VOP_I32_I32_I32 1512 >; 1513 1514 defm V_MUL_U32_U24 : VOP2Inst <vop2<0xb, 0x8>, "v_mul_u32_u24", 1515 VOP_I32_I32_I32, AMDGPUmul_u24 1516 >; 1517 1518 defm V_MUL_HI_U32_U24 : VOP2Inst <vop2<0xc,0x9>, "v_mul_hi_u32_u24", 1519 VOP_I32_I32_I32 1520 >; 1521 1522 defm V_MIN_F32 : VOP2Inst <vop2<0xf, 0xa>, "v_min_f32", VOP_F32_F32_F32, 1523 fminnum>; 1524 defm V_MAX_F32 : VOP2Inst <vop2<0x10, 0xb>, "v_max_f32", VOP_F32_F32_F32, 1525 fmaxnum>; 1526 defm V_MIN_I32 : VOP2Inst <vop2<0x11, 0xc>, "v_min_i32", VOP_I32_I32_I32>; 1527 defm V_MAX_I32 : VOP2Inst <vop2<0x12, 0xd>, "v_max_i32", VOP_I32_I32_I32>; 1528 defm V_MIN_U32 : VOP2Inst <vop2<0x13, 0xe>, "v_min_u32", VOP_I32_I32_I32>; 1529 defm V_MAX_U32 : VOP2Inst <vop2<0x14, 0xf>, "v_max_u32", VOP_I32_I32_I32>; 1530 1531 defm V_LSHRREV_B32 : VOP2Inst < 1532 vop2<0x16, 0x10>, "v_lshrrev_b32", VOP_I32_I32_I32, null_frag, 1533 "v_lshr_b32" 1534 >; 1535 1536 defm V_ASHRREV_I32 : VOP2Inst < 1537 vop2<0x18, 0x11>, "v_ashrrev_i32", VOP_I32_I32_I32, null_frag, 1538 "v_ashr_i32" 1539 >; 1540 1541 defm V_LSHLREV_B32 : VOP2Inst < 1542 vop2<0x1a, 0x12>, "v_lshlrev_b32", VOP_I32_I32_I32, null_frag, 1543 "v_lshl_b32" 1544 >; 1545 1546 defm V_AND_B32 : VOP2Inst <vop2<0x1b, 0x13>, "v_and_b32", VOP_I32_I32_I32>; 1547 defm V_OR_B32 : VOP2Inst <vop2<0x1c, 0x14>, "v_or_b32", VOP_I32_I32_I32>; 1548 defm V_XOR_B32 : VOP2Inst <vop2<0x1d, 0x15>, "v_xor_b32", VOP_I32_I32_I32>; 1549 1550 let Constraints = "$vdst = $src2", DisableEncoding="$src2", 1551 isConvertibleToThreeAddress = 1 in { 1552 defm V_MAC_F32 : VOP2Inst <vop2<0x1f, 0x16>, "v_mac_f32", VOP_MAC>; 1553 } 1554 } // End isCommutable = 1 1555 1556 defm V_MADMK_F32 : VOP2MADK <vop2<0x20, 0x17>, "v_madmk_f32", VOP_MADMK>; 1557 1558 let isCommutable = 1 in { 1559 defm V_MADAK_F32 : VOP2MADK <vop2<0x21, 0x18>, "v_madak_f32", VOP_MADAK>; 1560 } // End isCommutable = 1 1561 1562 let isCommutable = 1 in { 1563 // No patterns so that the scalar instructions are always selected. 1564 // The scalar versions will be replaced with vector when needed later. 1565 1566 // V_ADD_I32, V_SUB_I32, and V_SUBREV_I32 where renamed to *_U32 in VI, 1567 // but the VI instructions behave the same as the SI versions. 1568 defm V_ADD_I32 : VOP2bInst <vop2<0x25, 0x19>, "v_add_i32", 1569 VOP2b_I32_I1_I32_I32 1570 >; 1571 defm V_SUB_I32 : VOP2bInst <vop2<0x26, 0x1a>, "v_sub_i32", VOP2b_I32_I1_I32_I32>; 1572 1573 defm V_SUBREV_I32 : VOP2bInst <vop2<0x27, 0x1b>, "v_subrev_i32", 1574 VOP2b_I32_I1_I32_I32, null_frag, "v_sub_i32" 1575 >; 1576 1577 defm V_ADDC_U32 : VOP2bInst <vop2<0x28, 0x1c>, "v_addc_u32", 1578 VOP2b_I32_I1_I32_I32_I1 1579 >; 1580 defm V_SUBB_U32 : VOP2bInst <vop2<0x29, 0x1d>, "v_subb_u32", 1581 VOP2b_I32_I1_I32_I32_I1 1582 >; 1583 defm V_SUBBREV_U32 : VOP2bInst <vop2<0x2a, 0x1e>, "v_subbrev_u32", 1584 VOP2b_I32_I1_I32_I32_I1, null_frag, "v_subb_u32" 1585 >; 1586 1587 } // End isCommutable = 1 1588 1589 // These are special and do not read the exec mask. 1590 let isConvergent = 1, Uses = []<Register> in { 1591 1592 defm V_READLANE_B32 : VOP2SI_3VI_m < 1593 vop3 <0x001, 0x289>, 1594 "v_readlane_b32", 1595 (outs SReg_32:$vdst), 1596 (ins VS_32:$src0, SCSrc_32:$src1), 1597 "v_readlane_b32 $vdst, $src0, $src1" 1598 >; 1599 1600 defm V_WRITELANE_B32 : VOP2SI_3VI_m < 1601 vop3 <0x002, 0x28a>, 1602 "v_writelane_b32", 1603 (outs VGPR_32:$vdst), 1604 (ins SReg_32:$src0, SCSrc_32:$src1), 1605 "v_writelane_b32 $vdst, $src0, $src1" 1606 >; 1607 1608 } // End isConvergent = 1 1609 1610 // These instructions only exist on SI and CI 1611 let SubtargetPredicate = isSICI in { 1612 1613 let isCommutable = 1 in { 1614 defm V_MAC_LEGACY_F32 : VOP2InstSI <vop2<0x6>, "v_mac_legacy_f32", 1615 VOP_F32_F32_F32 1616 >; 1617 } // End isCommutable = 1 1618 1619 defm V_MIN_LEGACY_F32 : VOP2InstSI <vop2<0xd>, "v_min_legacy_f32", 1620 VOP_F32_F32_F32, AMDGPUfmin_legacy 1621 >; 1622 defm V_MAX_LEGACY_F32 : VOP2InstSI <vop2<0xe>, "v_max_legacy_f32", 1623 VOP_F32_F32_F32, AMDGPUfmax_legacy 1624 >; 1625 1626 let isCommutable = 1 in { 1627 defm V_LSHR_B32 : VOP2InstSI <vop2<0x15>, "v_lshr_b32", VOP_I32_I32_I32>; 1628 defm V_ASHR_I32 : VOP2InstSI <vop2<0x17>, "v_ashr_i32", VOP_I32_I32_I32>; 1629 defm V_LSHL_B32 : VOP2InstSI <vop2<0x19>, "v_lshl_b32", VOP_I32_I32_I32>; 1630 } // End isCommutable = 1 1631 } // End let SubtargetPredicate = SICI 1632 1633 defm V_BFM_B32 : VOP2_VI3_Inst <vop23<0x1e, 0x293>, "v_bfm_b32", 1634 VOP_I32_I32_I32 1635 >; 1636 defm V_BCNT_U32_B32 : VOP2_VI3_Inst <vop23<0x22, 0x28b>, "v_bcnt_u32_b32", 1637 VOP_I32_I32_I32 1638 >; 1639 defm V_MBCNT_LO_U32_B32 : VOP2_VI3_Inst <vop23<0x23, 0x28c>, "v_mbcnt_lo_u32_b32", 1640 VOP_I32_I32_I32, int_amdgcn_mbcnt_lo 1641 >; 1642 defm V_MBCNT_HI_U32_B32 : VOP2_VI3_Inst <vop23<0x24, 0x28d>, "v_mbcnt_hi_u32_b32", 1643 VOP_I32_I32_I32, int_amdgcn_mbcnt_hi 1644 >; 1645 defm V_LDEXP_F32 : VOP2_VI3_Inst <vop23<0x2b, 0x288>, "v_ldexp_f32", 1646 VOP_F32_F32_I32, AMDGPUldexp 1647 >; 1648 1649 defm V_CVT_PKACCUM_U8_F32 : VOP2_VI3_Inst <vop23<0x2c, 0x1f0>, "v_cvt_pkaccum_u8_f32", 1650 VOP_I32_F32_I32>; // TODO: set "Uses = dst" 1651 1652 defm V_CVT_PKNORM_I16_F32 : VOP2_VI3_Inst <vop23<0x2d, 0x294>, "v_cvt_pknorm_i16_f32", 1653 VOP_I32_F32_F32 1654 >; 1655 defm V_CVT_PKNORM_U16_F32 : VOP2_VI3_Inst <vop23<0x2e, 0x295>, "v_cvt_pknorm_u16_f32", 1656 VOP_I32_F32_F32 1657 >; 1658 defm V_CVT_PKRTZ_F16_F32 : VOP2_VI3_Inst <vop23<0x2f, 0x296>, "v_cvt_pkrtz_f16_f32", 1659 VOP_I32_F32_F32, int_SI_packf16 1660 >; 1661 defm V_CVT_PK_U16_U32 : VOP2_VI3_Inst <vop23<0x30, 0x297>, "v_cvt_pk_u16_u32", 1662 VOP_I32_I32_I32 1663 >; 1664 defm V_CVT_PK_I16_I32 : VOP2_VI3_Inst <vop23<0x31, 0x298>, "v_cvt_pk_i16_i32", 1665 VOP_I32_I32_I32 1666 >; 1667 1668 //===----------------------------------------------------------------------===// 1669 // VOP3 Instructions 1670 //===----------------------------------------------------------------------===// 1671 1672 let isCommutable = 1 in { 1673 defm V_MAD_LEGACY_F32 : VOP3Inst <vop3<0x140, 0x1c0>, "v_mad_legacy_f32", 1674 VOP_F32_F32_F32_F32 1675 >; 1676 1677 defm V_MAD_F32 : VOP3Inst <vop3<0x141, 0x1c1>, "v_mad_f32", 1678 VOP_F32_F32_F32_F32, fmad 1679 >; 1680 1681 defm V_MAD_I32_I24 : VOP3Inst <vop3<0x142, 0x1c2>, "v_mad_i32_i24", 1682 VOP_I32_I32_I32_I32, AMDGPUmad_i24 1683 >; 1684 defm V_MAD_U32_U24 : VOP3Inst <vop3<0x143, 0x1c3>, "v_mad_u32_u24", 1685 VOP_I32_I32_I32_I32, AMDGPUmad_u24 1686 >; 1687 } // End isCommutable = 1 1688 1689 defm V_CUBEID_F32 : VOP3Inst <vop3<0x144, 0x1c4>, "v_cubeid_f32", 1690 VOP_F32_F32_F32_F32, int_amdgcn_cubeid 1691 >; 1692 defm V_CUBESC_F32 : VOP3Inst <vop3<0x145, 0x1c5>, "v_cubesc_f32", 1693 VOP_F32_F32_F32_F32, int_amdgcn_cubesc 1694 >; 1695 defm V_CUBETC_F32 : VOP3Inst <vop3<0x146, 0x1c6>, "v_cubetc_f32", 1696 VOP_F32_F32_F32_F32, int_amdgcn_cubetc 1697 >; 1698 defm V_CUBEMA_F32 : VOP3Inst <vop3<0x147, 0x1c7>, "v_cubema_f32", 1699 VOP_F32_F32_F32_F32, int_amdgcn_cubema 1700 >; 1701 1702 defm V_BFE_U32 : VOP3Inst <vop3<0x148, 0x1c8>, "v_bfe_u32", 1703 VOP_I32_I32_I32_I32, AMDGPUbfe_u32 1704 >; 1705 defm V_BFE_I32 : VOP3Inst <vop3<0x149, 0x1c9>, "v_bfe_i32", 1706 VOP_I32_I32_I32_I32, AMDGPUbfe_i32 1707 >; 1708 1709 defm V_BFI_B32 : VOP3Inst <vop3<0x14a, 0x1ca>, "v_bfi_b32", 1710 VOP_I32_I32_I32_I32, AMDGPUbfi 1711 >; 1712 1713 let isCommutable = 1 in { 1714 defm V_FMA_F32 : VOP3Inst <vop3<0x14b, 0x1cb>, "v_fma_f32", 1715 VOP_F32_F32_F32_F32, fma 1716 >; 1717 defm V_FMA_F64 : VOP3Inst <vop3<0x14c, 0x1cc>, "v_fma_f64", 1718 VOP_F64_F64_F64_F64, fma 1719 >; 1720 1721 defm V_LERP_U8 : VOP3Inst <vop3<0x14d, 0x1cd>, "v_lerp_u8", 1722 VOP_I32_I32_I32_I32, int_amdgcn_lerp 1723 >; 1724 } // End isCommutable = 1 1725 1726 //def V_LERP_U8 : VOP3_U8 <0x0000014d, "v_lerp_u8", []>; 1727 defm V_ALIGNBIT_B32 : VOP3Inst <vop3<0x14e, 0x1ce>, "v_alignbit_b32", 1728 VOP_I32_I32_I32_I32 1729 >; 1730 defm V_ALIGNBYTE_B32 : VOP3Inst <vop3<0x14f, 0x1cf>, "v_alignbyte_b32", 1731 VOP_I32_I32_I32_I32 1732 >; 1733 1734 defm V_MIN3_F32 : VOP3Inst <vop3<0x151, 0x1d0>, "v_min3_f32", 1735 VOP_F32_F32_F32_F32, AMDGPUfmin3>; 1736 1737 defm V_MIN3_I32 : VOP3Inst <vop3<0x152, 0x1d1>, "v_min3_i32", 1738 VOP_I32_I32_I32_I32, AMDGPUsmin3 1739 >; 1740 defm V_MIN3_U32 : VOP3Inst <vop3<0x153, 0x1d2>, "v_min3_u32", 1741 VOP_I32_I32_I32_I32, AMDGPUumin3 1742 >; 1743 defm V_MAX3_F32 : VOP3Inst <vop3<0x154, 0x1d3>, "v_max3_f32", 1744 VOP_F32_F32_F32_F32, AMDGPUfmax3 1745 >; 1746 defm V_MAX3_I32 : VOP3Inst <vop3<0x155, 0x1d4>, "v_max3_i32", 1747 VOP_I32_I32_I32_I32, AMDGPUsmax3 1748 >; 1749 defm V_MAX3_U32 : VOP3Inst <vop3<0x156, 0x1d5>, "v_max3_u32", 1750 VOP_I32_I32_I32_I32, AMDGPUumax3 1751 >; 1752 defm V_MED3_F32 : VOP3Inst <vop3<0x157, 0x1d6>, "v_med3_f32", 1753 VOP_F32_F32_F32_F32, AMDGPUfmed3 1754 >; 1755 defm V_MED3_I32 : VOP3Inst <vop3<0x158, 0x1d7>, "v_med3_i32", 1756 VOP_I32_I32_I32_I32, AMDGPUsmed3 1757 >; 1758 defm V_MED3_U32 : VOP3Inst <vop3<0x159, 0x1d8>, "v_med3_u32", 1759 VOP_I32_I32_I32_I32, AMDGPUumed3 1760 >; 1761 1762 //def V_SAD_U8 : VOP3_U8 <0x0000015a, "v_sad_u8", []>; 1763 //def V_SAD_HI_U8 : VOP3_U8 <0x0000015b, "v_sad_hi_u8", []>; 1764 //def V_SAD_U16 : VOP3_U16 <0x0000015c, "v_sad_u16", []>; 1765 defm V_SAD_U32 : VOP3Inst <vop3<0x15d, 0x1dc>, "v_sad_u32", 1766 VOP_I32_I32_I32_I32 1767 >; 1768 //def V_CVT_PK_U8_F32 : VOP3_U8 <0x0000015e, "v_cvt_pk_u8_f32", []>; 1769 defm V_DIV_FIXUP_F32 : VOP3Inst < 1770 vop3<0x15f, 0x1de>, "v_div_fixup_f32", VOP_F32_F32_F32_F32, AMDGPUdiv_fixup 1771 >; 1772 1773 let SchedRW = [WriteDoubleAdd] in { 1774 1775 defm V_DIV_FIXUP_F64 : VOP3Inst < 1776 vop3<0x160, 0x1df>, "v_div_fixup_f64", VOP_F64_F64_F64_F64, AMDGPUdiv_fixup 1777 >; 1778 1779 } // End SchedRW = [WriteDouble] 1780 1781 let SchedRW = [WriteDoubleAdd] in { 1782 let isCommutable = 1 in { 1783 1784 defm V_ADD_F64 : VOP3Inst <vop3<0x164, 0x280>, "v_add_f64", 1785 VOP_F64_F64_F64, fadd, 1 1786 >; 1787 defm V_MUL_F64 : VOP3Inst <vop3<0x165, 0x281>, "v_mul_f64", 1788 VOP_F64_F64_F64, fmul, 1 1789 >; 1790 1791 defm V_MIN_F64 : VOP3Inst <vop3<0x166, 0x282>, "v_min_f64", 1792 VOP_F64_F64_F64, fminnum, 1 1793 >; 1794 defm V_MAX_F64 : VOP3Inst <vop3<0x167, 0x283>, "v_max_f64", 1795 VOP_F64_F64_F64, fmaxnum, 1 1796 >; 1797 1798 } // End isCommutable = 1 1799 1800 defm V_LDEXP_F64 : VOP3Inst <vop3<0x168, 0x284>, "v_ldexp_f64", 1801 VOP_F64_F64_I32, AMDGPUldexp, 1 1802 >; 1803 1804 } // End let SchedRW = [WriteDoubleAdd] 1805 1806 let isCommutable = 1, SchedRW = [WriteQuarterRate32] in { 1807 1808 defm V_MUL_LO_U32 : VOP3Inst <vop3<0x169, 0x285>, "v_mul_lo_u32", 1809 VOP_I32_I32_I32 1810 >; 1811 defm V_MUL_HI_U32 : VOP3Inst <vop3<0x16a, 0x286>, "v_mul_hi_u32", 1812 VOP_I32_I32_I32, mulhu 1813 >; 1814 1815 let DisableVIDecoder=1 in { // removed from VI as identical to V_MUL_LO_U32 1816 defm V_MUL_LO_I32 : VOP3Inst <vop3<0x16b, 0x285>, "v_mul_lo_i32", 1817 VOP_I32_I32_I32 1818 >; 1819 } 1820 1821 defm V_MUL_HI_I32 : VOP3Inst <vop3<0x16c, 0x287>, "v_mul_hi_i32", 1822 VOP_I32_I32_I32, mulhs 1823 >; 1824 1825 } // End isCommutable = 1, SchedRW = [WriteQuarterRate32] 1826 1827 let SchedRW = [WriteFloatFMA, WriteSALU] in { 1828 defm V_DIV_SCALE_F32 : VOP3bInst <vop3<0x16d, 0x1e0>, "v_div_scale_f32", 1829 VOP3b_F32_I1_F32_F32_F32, [], 1 1830 >; 1831 } 1832 1833 let SchedRW = [WriteDouble, WriteSALU] in { 1834 // Double precision division pre-scale. 1835 defm V_DIV_SCALE_F64 : VOP3bInst <vop3<0x16e, 0x1e1>, "v_div_scale_f64", 1836 VOP3b_F64_I1_F64_F64_F64, [], 1 1837 >; 1838 } // End SchedRW = [WriteDouble] 1839 1840 let isCommutable = 1, Uses = [VCC, EXEC] in { 1841 1842 let SchedRW = [WriteFloatFMA] in { 1843 // v_div_fmas_f32: 1844 // result = src0 * src1 + src2 1845 // if (vcc) 1846 // result *= 2^32 1847 // 1848 defm V_DIV_FMAS_F32 : VOP3_VCC_Inst <vop3<0x16f, 0x1e2>, "v_div_fmas_f32", 1849 VOP_F32_F32_F32_F32, AMDGPUdiv_fmas 1850 >; 1851 } 1852 1853 let SchedRW = [WriteDouble] in { 1854 // v_div_fmas_f64: 1855 // result = src0 * src1 + src2 1856 // if (vcc) 1857 // result *= 2^64 1858 // 1859 defm V_DIV_FMAS_F64 : VOP3_VCC_Inst <vop3<0x170, 0x1e3>, "v_div_fmas_f64", 1860 VOP_F64_F64_F64_F64, AMDGPUdiv_fmas 1861 >; 1862 1863 } // End SchedRW = [WriteDouble] 1864 } // End isCommutable = 1, Uses = [VCC, EXEC] 1865 1866 //def V_MSAD_U8 : VOP3_U8 <0x00000171, "v_msad_u8", []>; 1867 //def V_QSAD_U8 : VOP3_U8 <0x00000172, "v_qsad_u8", []>; 1868 //def V_MQSAD_U8 : VOP3_U8 <0x00000173, "v_mqsad_u8", []>; 1869 1870 let SchedRW = [WriteDouble] in { 1871 defm V_TRIG_PREOP_F64 : VOP3Inst < 1872 vop3<0x174, 0x292>, "v_trig_preop_f64", VOP_F64_F64_I32, AMDGPUtrig_preop 1873 >; 1874 1875 } // End SchedRW = [WriteDouble] 1876 1877 // These instructions only exist on SI and CI 1878 let SubtargetPredicate = isSICI in { 1879 1880 defm V_LSHL_B64 : VOP3Inst <vop3<0x161>, "v_lshl_b64", VOP_I64_I64_I32>; 1881 defm V_LSHR_B64 : VOP3Inst <vop3<0x162>, "v_lshr_b64", VOP_I64_I64_I32>; 1882 defm V_ASHR_I64 : VOP3Inst <vop3<0x163>, "v_ashr_i64", VOP_I64_I64_I32>; 1883 1884 defm V_MULLIT_F32 : VOP3Inst <vop3<0x150>, "v_mullit_f32", 1885 VOP_F32_F32_F32_F32>; 1886 1887 } // End SubtargetPredicate = isSICI 1888 1889 let SubtargetPredicate = isVI, DisableSIDecoder = 1 in { 1890 1891 defm V_LSHLREV_B64 : VOP3Inst <vop3<0, 0x28f>, "v_lshlrev_b64", 1892 VOP_I64_I32_I64 1893 >; 1894 defm V_LSHRREV_B64 : VOP3Inst <vop3<0, 0x290>, "v_lshrrev_b64", 1895 VOP_I64_I32_I64 1896 >; 1897 defm V_ASHRREV_I64 : VOP3Inst <vop3<0, 0x291>, "v_ashrrev_i64", 1898 VOP_I64_I32_I64 1899 >; 1900 1901 } // End SubtargetPredicate = isVI 1902 1903 //===----------------------------------------------------------------------===// 1904 // Pseudo Instructions 1905 //===----------------------------------------------------------------------===// 1906 1907 let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Uses = [EXEC] in { 1908 1909 // For use in patterns 1910 def V_CNDMASK_B64_PSEUDO : VOP3Common <(outs VReg_64:$vdst), 1911 (ins VSrc_64:$src0, VSrc_64:$src1, SSrc_64:$src2), "", []> { 1912 let isPseudo = 1; 1913 let isCodeGenOnly = 1; 1914 } 1915 1916 // 64-bit vector move instruction. This is mainly used by the SIFoldOperands 1917 // pass to enable folding of inline immediates. 1918 def V_MOV_B64_PSEUDO : PseudoInstSI <(outs VReg_64:$vdst), (ins VSrc_64:$src0)> { 1919 let VALU = 1; 1920 } 1921 } // End let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Uses = [EXEC] 1922 1923 let usesCustomInserter = 1, SALU = 1 in { 1924 def GET_GROUPSTATICSIZE : PseudoInstSI <(outs SReg_32:$sdst), (ins), 1925 [(set SReg_32:$sdst, (int_amdgcn_groupstaticsize))]>; 1926 } // End let usesCustomInserter = 1, SALU = 1 1927 1928 // SI pseudo instructions. These are used by the CFG structurizer pass 1929 // and should be lowered to ISA instructions prior to codegen. 1930 1931 let hasSideEffects = 1 in { 1932 1933 // Dummy terminator instruction to use after control flow instructions 1934 // replaced with exec mask operations. 1935 def SI_MASK_BRANCH : PseudoInstSI < 1936 (outs), (ins brtarget:$target, SReg_64:$dst)> { 1937 let isBranch = 1; 1938 let isTerminator = 1; 1939 let isBarrier = 1; 1940 let SALU = 1; 1941 } 1942 1943 let Uses = [EXEC], Defs = [EXEC, SCC] in { 1944 1945 let isBranch = 1, isTerminator = 1 in { 1946 1947 def SI_IF: PseudoInstSI < 1948 (outs SReg_64:$dst), (ins SReg_64:$vcc, brtarget:$target), 1949 [(set i64:$dst, (int_amdgcn_if i1:$vcc, bb:$target))]> { 1950 let Constraints = ""; 1951 } 1952 1953 def SI_ELSE : PseudoInstSI < 1954 (outs SReg_64:$dst), (ins SReg_64:$src, brtarget:$target), 1955 [(set i64:$dst, (int_amdgcn_else i64:$src, bb:$target))]> { 1956 let Constraints = "$src = $dst"; 1957 } 1958 1959 def SI_LOOP : PseudoInstSI < 1960 (outs), (ins SReg_64:$saved, brtarget:$target), 1961 [(int_amdgcn_loop i64:$saved, bb:$target)] 1962 >; 1963 1964 } // End isBranch = 1, isTerminator = 1 1965 1966 1967 def SI_BREAK : PseudoInstSI < 1968 (outs SReg_64:$dst), (ins SReg_64:$src), 1969 [(set i64:$dst, (int_amdgcn_break i64:$src))] 1970 >; 1971 1972 def SI_IF_BREAK : PseudoInstSI < 1973 (outs SReg_64:$dst), (ins SReg_64:$vcc, SReg_64:$src), 1974 [(set i64:$dst, (int_amdgcn_if_break i1:$vcc, i64:$src))] 1975 >; 1976 1977 def SI_ELSE_BREAK : PseudoInstSI < 1978 (outs SReg_64:$dst), (ins SReg_64:$src0, SReg_64:$src1), 1979 [(set i64:$dst, (int_amdgcn_else_break i64:$src0, i64:$src1))] 1980 >; 1981 1982 def SI_END_CF : PseudoInstSI < 1983 (outs), (ins SReg_64:$saved), 1984 [(int_amdgcn_end_cf i64:$saved)] 1985 >; 1986 1987 } // End Uses = [EXEC], Defs = [EXEC, SCC] 1988 1989 let Uses = [EXEC], Defs = [EXEC,VCC] in { 1990 def SI_KILL : PseudoInstSI < 1991 (outs), (ins VSrc_32:$src), 1992 [(int_AMDGPU_kill f32:$src)]> { 1993 let isConvergent = 1; 1994 let usesCustomInserter = 1; 1995 } 1996 1997 def SI_KILL_TERMINATOR : PseudoInstSI < 1998 (outs), (ins VSrc_32:$src)> { 1999 let isTerminator = 1; 2000 } 2001 2002 } // End Uses = [EXEC], Defs = [EXEC,VCC] 2003 2004 } // End mayLoad = 1, mayStore = 1, hasSideEffects = 1 2005 2006 def SI_PS_LIVE : PseudoInstSI < 2007 (outs SReg_64:$dst), (ins), 2008 [(set i1:$dst, (int_amdgcn_ps_live))]> { 2009 let SALU = 1; 2010 } 2011 2012 // Used as an isel pseudo to directly emit initialization with an 2013 // s_mov_b32 rather than a copy of another initialized 2014 // register. MachineCSE skips copies, and we don't want to have to 2015 // fold operands before it runs. 2016 def SI_INIT_M0 : PseudoInstSI <(outs), (ins SSrc_32:$src)> { 2017 let Defs = [M0]; 2018 let usesCustomInserter = 1; 2019 let isAsCheapAsAMove = 1; 2020 let SALU = 1; 2021 let isReMaterializable = 1; 2022 } 2023 2024 def SI_RETURN : PseudoInstSI < 2025 (outs), (ins variable_ops), [(AMDGPUreturn)]> { 2026 let isTerminator = 1; 2027 let isBarrier = 1; 2028 let isReturn = 1; 2029 let hasSideEffects = 1; 2030 let SALU = 1; 2031 let hasNoSchedulingInfo = 1; 2032 } 2033 2034 let Uses = [EXEC], Defs = [EXEC, VCC, M0], 2035 UseNamedOperandTable = 1 in { 2036 2037 class SI_INDIRECT_SRC<RegisterClass rc> : PseudoInstSI < 2038 (outs VGPR_32:$vdst, SReg_64:$sdst), 2039 (ins rc:$src, VS_32:$idx, i32imm:$offset)>; 2040 2041 class SI_INDIRECT_DST<RegisterClass rc> : PseudoInstSI < 2042 (outs rc:$vdst, SReg_64:$sdst), 2043 (ins unknown:$src, VS_32:$idx, i32imm:$offset, VGPR_32:$val)> { 2044 let Constraints = "$src = $vdst"; 2045 } 2046 2047 // TODO: We can support indirect SGPR access. 2048 def SI_INDIRECT_SRC_V1 : SI_INDIRECT_SRC<VGPR_32>; 2049 def SI_INDIRECT_SRC_V2 : SI_INDIRECT_SRC<VReg_64>; 2050 def SI_INDIRECT_SRC_V4 : SI_INDIRECT_SRC<VReg_128>; 2051 def SI_INDIRECT_SRC_V8 : SI_INDIRECT_SRC<VReg_256>; 2052 def SI_INDIRECT_SRC_V16 : SI_INDIRECT_SRC<VReg_512>; 2053 2054 def SI_INDIRECT_DST_V1 : SI_INDIRECT_DST<VGPR_32>; 2055 def SI_INDIRECT_DST_V2 : SI_INDIRECT_DST<VReg_64>; 2056 def SI_INDIRECT_DST_V4 : SI_INDIRECT_DST<VReg_128>; 2057 def SI_INDIRECT_DST_V8 : SI_INDIRECT_DST<VReg_256>; 2058 def SI_INDIRECT_DST_V16 : SI_INDIRECT_DST<VReg_512>; 2059 2060 } // End Uses = [EXEC], Defs = [EXEC,VCC,M0] 2061 2062 multiclass SI_SPILL_SGPR <RegisterClass sgpr_class> { 2063 let UseNamedOperandTable = 1, Uses = [EXEC] in { 2064 def _SAVE : PseudoInstSI < 2065 (outs), 2066 (ins sgpr_class:$src, i32imm:$frame_idx)> { 2067 let mayStore = 1; 2068 let mayLoad = 0; 2069 } 2070 2071 def _RESTORE : PseudoInstSI < 2072 (outs sgpr_class:$dst), 2073 (ins i32imm:$frame_idx)> { 2074 let mayStore = 0; 2075 let mayLoad = 1; 2076 } 2077 } // End UseNamedOperandTable = 1 2078 } 2079 2080 // It's unclear whether you can use M0 as the output of v_readlane_b32 2081 // instructions, so use SReg_32_XM0 register class for spills to prevent 2082 // this from happening. 2083 defm SI_SPILL_S32 : SI_SPILL_SGPR <SReg_32_XM0>; 2084 defm SI_SPILL_S64 : SI_SPILL_SGPR <SReg_64>; 2085 defm SI_SPILL_S128 : SI_SPILL_SGPR <SReg_128>; 2086 defm SI_SPILL_S256 : SI_SPILL_SGPR <SReg_256>; 2087 defm SI_SPILL_S512 : SI_SPILL_SGPR <SReg_512>; 2088 2089 multiclass SI_SPILL_VGPR <RegisterClass vgpr_class> { 2090 let UseNamedOperandTable = 1, VGPRSpill = 1, Uses = [EXEC] in { 2091 def _SAVE : PseudoInstSI < 2092 (outs), 2093 (ins vgpr_class:$src, i32imm:$frame_idx, SReg_128:$scratch_rsrc, 2094 SReg_32:$scratch_offset, i32imm:$offset)> { 2095 let mayStore = 1; 2096 let mayLoad = 0; 2097 } 2098 2099 def _RESTORE : PseudoInstSI < 2100 (outs vgpr_class:$dst), 2101 (ins i32imm:$frame_idx, SReg_128:$scratch_rsrc, SReg_32:$scratch_offset, 2102 i32imm:$offset)> { 2103 let mayStore = 0; 2104 let mayLoad = 1; 2105 } 2106 } // End UseNamedOperandTable = 1, VGPRSpill = 1 2107 } 2108 2109 defm SI_SPILL_V32 : SI_SPILL_VGPR <VGPR_32>; 2110 defm SI_SPILL_V64 : SI_SPILL_VGPR <VReg_64>; 2111 defm SI_SPILL_V96 : SI_SPILL_VGPR <VReg_96>; 2112 defm SI_SPILL_V128 : SI_SPILL_VGPR <VReg_128>; 2113 defm SI_SPILL_V256 : SI_SPILL_VGPR <VReg_256>; 2114 defm SI_SPILL_V512 : SI_SPILL_VGPR <VReg_512>; 2115 2116 let Defs = [SCC] in { 2117 2118 def SI_PC_ADD_REL_OFFSET : PseudoInstSI < 2119 (outs SReg_64:$dst), 2120 (ins si_ga:$ptr), 2121 [(set SReg_64:$dst, (i64 (SIpc_add_rel_offset (tglobaladdr:$ptr))))]> { 2122 let SALU = 1; 2123 } 2124 2125 } // End Defs = [SCC] 2126 2127 } // End SubtargetPredicate = isGCN 2128 2129 let Predicates = [isGCN] in { 2130 2131 def : Pat < 2132 (int_AMDGPU_kilp), 2133 (SI_KILL 0xbf800000) 2134 >; 2135 2136 /* int_SI_vs_load_input */ 2137 def : Pat< 2138 (SIload_input v4i32:$tlst, imm:$attr_offset, i32:$buf_idx_vgpr), 2139 (BUFFER_LOAD_FORMAT_XYZW_IDXEN $buf_idx_vgpr, $tlst, 0, imm:$attr_offset, 0, 0, 0) 2140 >; 2141 2142 def : Pat < 2143 (int_SI_export imm:$en, imm:$vm, imm:$done, imm:$tgt, imm:$compr, 2144 f32:$src0, f32:$src1, f32:$src2, f32:$src3), 2145 (EXP imm:$en, imm:$tgt, imm:$compr, imm:$done, imm:$vm, 2146 $src0, $src1, $src2, $src3) 2147 >; 2148 2149 //===----------------------------------------------------------------------===// 2150 // buffer_load/store_format patterns 2151 //===----------------------------------------------------------------------===// 2152 2153 multiclass MUBUF_LoadIntrinsicPat<SDPatternOperator name, ValueType vt, 2154 string opcode> { 2155 def : Pat< 2156 (vt (name v4i32:$rsrc, 0, 2157 (MUBUFIntrinsicOffset i32:$soffset, i16:$offset), 2158 imm:$glc, imm:$slc)), 2159 (!cast<MUBUF>(opcode # _OFFSET) $rsrc, $soffset, (as_i16imm $offset), 2160 (as_i1imm $glc), (as_i1imm $slc), 0) 2161 >; 2162 2163 def : Pat< 2164 (vt (name v4i32:$rsrc, i32:$vindex, 2165 (MUBUFIntrinsicOffset i32:$soffset, i16:$offset), 2166 imm:$glc, imm:$slc)), 2167 (!cast<MUBUF>(opcode # _IDXEN) $vindex, $rsrc, $soffset, (as_i16imm $offset), 2168 (as_i1imm $glc), (as_i1imm $slc), 0) 2169 >; 2170 2171 def : Pat< 2172 (vt (name v4i32:$rsrc, 0, 2173 (MUBUFIntrinsicVOffset i32:$soffset, i16:$offset, i32:$voffset), 2174 imm:$glc, imm:$slc)), 2175 (!cast<MUBUF>(opcode # _OFFEN) $voffset, $rsrc, $soffset, (as_i16imm $offset), 2176 (as_i1imm $glc), (as_i1imm $slc), 0) 2177 >; 2178 2179 def : Pat< 2180 (vt (name v4i32:$rsrc, i32:$vindex, 2181 (MUBUFIntrinsicVOffset i32:$soffset, i16:$offset, i32:$voffset), 2182 imm:$glc, imm:$slc)), 2183 (!cast<MUBUF>(opcode # _BOTHEN) 2184 (REG_SEQUENCE VReg_64, $vindex, sub0, $voffset, sub1), 2185 $rsrc, $soffset, (as_i16imm $offset), 2186 (as_i1imm $glc), (as_i1imm $slc), 0) 2187 >; 2188 } 2189 2190 defm : MUBUF_LoadIntrinsicPat<int_amdgcn_buffer_load_format, f32, "BUFFER_LOAD_FORMAT_X">; 2191 defm : MUBUF_LoadIntrinsicPat<int_amdgcn_buffer_load_format, v2f32, "BUFFER_LOAD_FORMAT_XY">; 2192 defm : MUBUF_LoadIntrinsicPat<int_amdgcn_buffer_load_format, v4f32, "BUFFER_LOAD_FORMAT_XYZW">; 2193 defm : MUBUF_LoadIntrinsicPat<int_amdgcn_buffer_load, f32, "BUFFER_LOAD_DWORD">; 2194 defm : MUBUF_LoadIntrinsicPat<int_amdgcn_buffer_load, v2f32, "BUFFER_LOAD_DWORDX2">; 2195 defm : MUBUF_LoadIntrinsicPat<int_amdgcn_buffer_load, v4f32, "BUFFER_LOAD_DWORDX4">; 2196 2197 multiclass MUBUF_StoreIntrinsicPat<SDPatternOperator name, ValueType vt, 2198 string opcode> { 2199 def : Pat< 2200 (name vt:$vdata, v4i32:$rsrc, 0, 2201 (MUBUFIntrinsicOffset i32:$soffset, i16:$offset), 2202 imm:$glc, imm:$slc), 2203 (!cast<MUBUF>(opcode # _OFFSET) $vdata, $rsrc, $soffset, (as_i16imm $offset), 2204 (as_i1imm $glc), (as_i1imm $slc), 0) 2205 >; 2206 2207 def : Pat< 2208 (name vt:$vdata, v4i32:$rsrc, i32:$vindex, 2209 (MUBUFIntrinsicOffset i32:$soffset, i16:$offset), 2210 imm:$glc, imm:$slc), 2211 (!cast<MUBUF>(opcode # _IDXEN) $vdata, $vindex, $rsrc, $soffset, 2212 (as_i16imm $offset), (as_i1imm $glc), 2213 (as_i1imm $slc), 0) 2214 >; 2215 2216 def : Pat< 2217 (name vt:$vdata, v4i32:$rsrc, 0, 2218 (MUBUFIntrinsicVOffset i32:$soffset, i16:$offset, i32:$voffset), 2219 imm:$glc, imm:$slc), 2220 (!cast<MUBUF>(opcode # _OFFEN) $vdata, $voffset, $rsrc, $soffset, 2221 (as_i16imm $offset), (as_i1imm $glc), 2222 (as_i1imm $slc), 0) 2223 >; 2224 2225 def : Pat< 2226 (name vt:$vdata, v4i32:$rsrc, i32:$vindex, 2227 (MUBUFIntrinsicVOffset i32:$soffset, i16:$offset, i32:$voffset), 2228 imm:$glc, imm:$slc), 2229 (!cast<MUBUF>(opcode # _BOTHEN) 2230 $vdata, 2231 (REG_SEQUENCE VReg_64, $vindex, sub0, $voffset, sub1), 2232 $rsrc, $soffset, (as_i16imm $offset), 2233 (as_i1imm $glc), (as_i1imm $slc), 0) 2234 >; 2235 } 2236 2237 defm : MUBUF_StoreIntrinsicPat<int_amdgcn_buffer_store_format, f32, "BUFFER_STORE_FORMAT_X">; 2238 defm : MUBUF_StoreIntrinsicPat<int_amdgcn_buffer_store_format, v2f32, "BUFFER_STORE_FORMAT_XY">; 2239 defm : MUBUF_StoreIntrinsicPat<int_amdgcn_buffer_store_format, v4f32, "BUFFER_STORE_FORMAT_XYZW">; 2240 defm : MUBUF_StoreIntrinsicPat<int_amdgcn_buffer_store, f32, "BUFFER_STORE_DWORD">; 2241 defm : MUBUF_StoreIntrinsicPat<int_amdgcn_buffer_store, v2f32, "BUFFER_STORE_DWORDX2">; 2242 defm : MUBUF_StoreIntrinsicPat<int_amdgcn_buffer_store, v4f32, "BUFFER_STORE_DWORDX4">; 2243 2244 //===----------------------------------------------------------------------===// 2245 // buffer_atomic patterns 2246 //===----------------------------------------------------------------------===// 2247 multiclass BufferAtomicPatterns<SDPatternOperator name, string opcode> { 2248 def : Pat< 2249 (name i32:$vdata_in, v4i32:$rsrc, 0, 2250 (MUBUFIntrinsicOffset i32:$soffset, i16:$offset), 2251 imm:$slc), 2252 (!cast<MUBUF>(opcode # _RTN_OFFSET) $vdata_in, $rsrc, $soffset, 2253 (as_i16imm $offset), (as_i1imm $slc)) 2254 >; 2255 2256 def : Pat< 2257 (name i32:$vdata_in, v4i32:$rsrc, i32:$vindex, 2258 (MUBUFIntrinsicOffset i32:$soffset, i16:$offset), 2259 imm:$slc), 2260 (!cast<MUBUF>(opcode # _RTN_IDXEN) $vdata_in, $vindex, $rsrc, $soffset, 2261 (as_i16imm $offset), (as_i1imm $slc)) 2262 >; 2263 2264 def : Pat< 2265 (name i32:$vdata_in, v4i32:$rsrc, 0, 2266 (MUBUFIntrinsicVOffset i32:$soffset, i16:$offset, i32:$voffset), 2267 imm:$slc), 2268 (!cast<MUBUF>(opcode # _RTN_OFFEN) $vdata_in, $voffset, $rsrc, $soffset, 2269 (as_i16imm $offset), (as_i1imm $slc)) 2270 >; 2271 2272 def : Pat< 2273 (name i32:$vdata_in, v4i32:$rsrc, i32:$vindex, 2274 (MUBUFIntrinsicVOffset i32:$soffset, i16:$offset, i32:$voffset), 2275 imm:$slc), 2276 (!cast<MUBUF>(opcode # _RTN_BOTHEN) 2277 $vdata_in, 2278 (REG_SEQUENCE VReg_64, $vindex, sub0, $voffset, sub1), 2279 $rsrc, $soffset, (as_i16imm $offset), (as_i1imm $slc)) 2280 >; 2281 } 2282 2283 defm : BufferAtomicPatterns<int_amdgcn_buffer_atomic_swap, "BUFFER_ATOMIC_SWAP">; 2284 defm : BufferAtomicPatterns<int_amdgcn_buffer_atomic_add, "BUFFER_ATOMIC_ADD">; 2285 defm : BufferAtomicPatterns<int_amdgcn_buffer_atomic_sub, "BUFFER_ATOMIC_SUB">; 2286 defm : BufferAtomicPatterns<int_amdgcn_buffer_atomic_smin, "BUFFER_ATOMIC_SMIN">; 2287 defm : BufferAtomicPatterns<int_amdgcn_buffer_atomic_umin, "BUFFER_ATOMIC_UMIN">; 2288 defm : BufferAtomicPatterns<int_amdgcn_buffer_atomic_smax, "BUFFER_ATOMIC_SMAX">; 2289 defm : BufferAtomicPatterns<int_amdgcn_buffer_atomic_umax, "BUFFER_ATOMIC_UMAX">; 2290 defm : BufferAtomicPatterns<int_amdgcn_buffer_atomic_and, "BUFFER_ATOMIC_AND">; 2291 defm : BufferAtomicPatterns<int_amdgcn_buffer_atomic_or, "BUFFER_ATOMIC_OR">; 2292 defm : BufferAtomicPatterns<int_amdgcn_buffer_atomic_xor, "BUFFER_ATOMIC_XOR">; 2293 2294 def : Pat< 2295 (int_amdgcn_buffer_atomic_cmpswap 2296 i32:$data, i32:$cmp, v4i32:$rsrc, 0, 2297 (MUBUFIntrinsicOffset i32:$soffset, i16:$offset), 2298 imm:$slc), 2299 (EXTRACT_SUBREG 2300 (BUFFER_ATOMIC_CMPSWAP_RTN_OFFSET 2301 (REG_SEQUENCE VReg_64, $data, sub0, $cmp, sub1), 2302 $rsrc, $soffset, (as_i16imm $offset), (as_i1imm $slc)), 2303 sub0) 2304 >; 2305 2306 def : Pat< 2307 (int_amdgcn_buffer_atomic_cmpswap 2308 i32:$data, i32:$cmp, v4i32:$rsrc, i32:$vindex, 2309 (MUBUFIntrinsicOffset i32:$soffset, i16:$offset), 2310 imm:$slc), 2311 (EXTRACT_SUBREG 2312 (BUFFER_ATOMIC_CMPSWAP_RTN_IDXEN 2313 (REG_SEQUENCE VReg_64, $data, sub0, $cmp, sub1), 2314 $vindex, $rsrc, $soffset, (as_i16imm $offset), (as_i1imm $slc)), 2315 sub0) 2316 >; 2317 2318 def : Pat< 2319 (int_amdgcn_buffer_atomic_cmpswap 2320 i32:$data, i32:$cmp, v4i32:$rsrc, 0, 2321 (MUBUFIntrinsicVOffset i32:$soffset, i16:$offset, i32:$voffset), 2322 imm:$slc), 2323 (EXTRACT_SUBREG 2324 (BUFFER_ATOMIC_CMPSWAP_RTN_OFFEN 2325 (REG_SEQUENCE VReg_64, $data, sub0, $cmp, sub1), 2326 $voffset, $rsrc, $soffset, (as_i16imm $offset), (as_i1imm $slc)), 2327 sub0) 2328 >; 2329 2330 def : Pat< 2331 (int_amdgcn_buffer_atomic_cmpswap 2332 i32:$data, i32:$cmp, v4i32:$rsrc, i32:$vindex, 2333 (MUBUFIntrinsicVOffset i32:$soffset, i16:$offset, i32:$voffset), 2334 imm:$slc), 2335 (EXTRACT_SUBREG 2336 (BUFFER_ATOMIC_CMPSWAP_RTN_BOTHEN 2337 (REG_SEQUENCE VReg_64, $data, sub0, $cmp, sub1), 2338 (REG_SEQUENCE VReg_64, $vindex, sub0, $voffset, sub1), 2339 $rsrc, $soffset, (as_i16imm $offset), (as_i1imm $slc)), 2340 sub0) 2341 >; 2342 2343 2344 //===----------------------------------------------------------------------===// 2345 // S_GETREG_B32 Intrinsic Pattern. 2346 //===----------------------------------------------------------------------===// 2347 def : Pat < 2348 (int_amdgcn_s_getreg imm:$simm16), 2349 (S_GETREG_B32 (as_i16imm $simm16)) 2350 >; 2351 2352 //===----------------------------------------------------------------------===// 2353 // DS_SWIZZLE Intrinsic Pattern. 2354 //===----------------------------------------------------------------------===// 2355 def : Pat < 2356 (int_amdgcn_ds_swizzle i32:$src, imm:$offset16), 2357 (DS_SWIZZLE_B32 $src, (as_i16imm $offset16), (i1 0)) 2358 >; 2359 2360 //===----------------------------------------------------------------------===// 2361 // SMRD Patterns 2362 //===----------------------------------------------------------------------===// 2363 2364 multiclass SMRD_Pattern <string Instr, ValueType vt> { 2365 2366 // 1. IMM offset 2367 def : Pat < 2368 (smrd_load (SMRDImm i64:$sbase, i32:$offset)), 2369 (vt (!cast<SMRD>(Instr#"_IMM") $sbase, $offset)) 2370 >; 2371 2372 // 2. SGPR offset 2373 def : Pat < 2374 (smrd_load (SMRDSgpr i64:$sbase, i32:$offset)), 2375 (vt (!cast<SMRD>(Instr#"_SGPR") $sbase, $offset)) 2376 >; 2377 2378 def : Pat < 2379 (smrd_load (SMRDImm32 i64:$sbase, i32:$offset)), 2380 (vt (!cast<SMRD>(Instr#"_IMM_ci") $sbase, $offset)) 2381 > { 2382 let Predicates = [isCIOnly]; 2383 } 2384 } 2385 2386 // Global and constant loads can be selected to either MUBUF or SMRD 2387 // instructions, but SMRD instructions are faster so we want the instruction 2388 // selector to prefer those. 2389 let AddedComplexity = 100 in { 2390 2391 defm : SMRD_Pattern <"S_LOAD_DWORD", i32>; 2392 defm : SMRD_Pattern <"S_LOAD_DWORDX2", v2i32>; 2393 defm : SMRD_Pattern <"S_LOAD_DWORDX4", v4i32>; 2394 defm : SMRD_Pattern <"S_LOAD_DWORDX8", v8i32>; 2395 defm : SMRD_Pattern <"S_LOAD_DWORDX16", v16i32>; 2396 2397 // 1. Offset as an immediate 2398 def : Pat < 2399 (SIload_constant v4i32:$sbase, (SMRDBufferImm i32:$offset)), 2400 (S_BUFFER_LOAD_DWORD_IMM $sbase, $offset) 2401 >; 2402 2403 // 2. Offset loaded in an 32bit SGPR 2404 def : Pat < 2405 (SIload_constant v4i32:$sbase, (SMRDBufferSgpr i32:$offset)), 2406 (S_BUFFER_LOAD_DWORD_SGPR $sbase, $offset) 2407 >; 2408 2409 let Predicates = [isCI] in { 2410 2411 def : Pat < 2412 (SIload_constant v4i32:$sbase, (SMRDBufferImm32 i32:$offset)), 2413 (S_BUFFER_LOAD_DWORD_IMM_ci $sbase, $offset) 2414 >; 2415 2416 } // End Predicates = [isCI] 2417 2418 } // End let AddedComplexity = 10000 2419 2420 //===----------------------------------------------------------------------===// 2421 // SOP1 Patterns 2422 //===----------------------------------------------------------------------===// 2423 2424 def : Pat < 2425 (i64 (ctpop i64:$src)), 2426 (i64 (REG_SEQUENCE SReg_64, 2427 (i32 (COPY_TO_REGCLASS (S_BCNT1_I32_B64 $src), SReg_32)), sub0, 2428 (S_MOV_B32 0), sub1)) 2429 >; 2430 2431 def : Pat < 2432 (i32 (smax i32:$x, (i32 (ineg i32:$x)))), 2433 (S_ABS_I32 $x) 2434 >; 2435 2436 //===----------------------------------------------------------------------===// 2437 // SOP2 Patterns 2438 //===----------------------------------------------------------------------===// 2439 2440 // V_ADD_I32_e32/S_ADD_U32 produces carry in VCC/SCC. For the vector 2441 // case, the sgpr-copies pass will fix this to use the vector version. 2442 def : Pat < 2443 (i32 (addc i32:$src0, i32:$src1)), 2444 (S_ADD_U32 $src0, $src1) 2445 >; 2446 2447 //===----------------------------------------------------------------------===// 2448 // SOPP Patterns 2449 //===----------------------------------------------------------------------===// 2450 2451 def : Pat < 2452 (int_amdgcn_s_waitcnt i32:$simm16), 2453 (S_WAITCNT (as_i16imm $simm16)) 2454 >; 2455 2456 // FIXME: These should be removed eventually 2457 def : Pat < 2458 (int_AMDGPU_barrier_global), 2459 (S_BARRIER) 2460 >; 2461 2462 def : Pat < 2463 (int_AMDGPU_barrier_local), 2464 (S_BARRIER) 2465 >; 2466 2467 //===----------------------------------------------------------------------===// 2468 // VOP1 Patterns 2469 //===----------------------------------------------------------------------===// 2470 2471 let Predicates = [UnsafeFPMath] in { 2472 2473 //def : RcpPat<V_RCP_F64_e32, f64>; 2474 //defm : RsqPat<V_RSQ_F64_e32, f64>; 2475 //defm : RsqPat<V_RSQ_F32_e32, f32>; 2476 2477 def : RsqPat<V_RSQ_F32_e32, f32>; 2478 def : RsqPat<V_RSQ_F64_e32, f64>; 2479 2480 // Convert (x - floor(x)) to fract(x) 2481 def : Pat < 2482 (f32 (fsub (f32 (VOP3Mods f32:$x, i32:$mods)), 2483 (f32 (ffloor (f32 (VOP3Mods f32:$x, i32:$mods)))))), 2484 (V_FRACT_F32_e64 $mods, $x, DSTCLAMP.NONE, DSTOMOD.NONE) 2485 >; 2486 2487 // Convert (x + (-floor(x))) to fract(x) 2488 def : Pat < 2489 (f64 (fadd (f64 (VOP3Mods f64:$x, i32:$mods)), 2490 (f64 (fneg (f64 (ffloor (f64 (VOP3Mods f64:$x, i32:$mods)))))))), 2491 (V_FRACT_F64_e64 $mods, $x, DSTCLAMP.NONE, DSTOMOD.NONE) 2492 >; 2493 2494 } // End Predicates = [UnsafeFPMath] 2495 2496 //===----------------------------------------------------------------------===// 2497 // VOP2 Patterns 2498 //===----------------------------------------------------------------------===// 2499 2500 def : Pat < 2501 (i32 (add (i32 (ctpop i32:$popcnt)), i32:$val)), 2502 (V_BCNT_U32_B32_e64 $popcnt, $val) 2503 >; 2504 2505 def : Pat < 2506 (i32 (select i1:$src0, i32:$src1, i32:$src2)), 2507 (V_CNDMASK_B32_e64 $src2, $src1, $src0) 2508 >; 2509 2510 // Pattern for V_MAC_F32 2511 def : Pat < 2512 (fmad (VOP3NoMods0 f32:$src0, i32:$src0_modifiers, i1:$clamp, i32:$omod), 2513 (VOP3NoMods f32:$src1, i32:$src1_modifiers), 2514 (VOP3NoMods f32:$src2, i32:$src2_modifiers)), 2515 (V_MAC_F32_e64 $src0_modifiers, $src0, $src1_modifiers, $src1, 2516 $src2_modifiers, $src2, $clamp, $omod) 2517 >; 2518 2519 /********** ======================= **********/ 2520 /********** Image sampling patterns **********/ 2521 /********** ======================= **********/ 2522 2523 // Image + sampler 2524 class SampleRawPattern<SDPatternOperator name, MIMG opcode, ValueType vt> : Pat < 2525 (name vt:$addr, v8i32:$rsrc, v4i32:$sampler, i32:$dmask, i32:$unorm, 2526 i32:$r128, i32:$da, i32:$glc, i32:$slc, i32:$tfe, i32:$lwe), 2527 (opcode $addr, $rsrc, $sampler, 2528 (as_i32imm $dmask), (as_i1imm $unorm), (as_i1imm $glc), (as_i1imm $slc), 2529 (as_i1imm $r128), (as_i1imm $tfe), (as_i1imm $lwe), (as_i1imm $da)) 2530 >; 2531 2532 multiclass SampleRawPatterns<SDPatternOperator name, string opcode> { 2533 def : SampleRawPattern<name, !cast<MIMG>(opcode # _V4_V1), i32>; 2534 def : SampleRawPattern<name, !cast<MIMG>(opcode # _V4_V2), v2i32>; 2535 def : SampleRawPattern<name, !cast<MIMG>(opcode # _V4_V4), v4i32>; 2536 def : SampleRawPattern<name, !cast<MIMG>(opcode # _V4_V8), v8i32>; 2537 def : SampleRawPattern<name, !cast<MIMG>(opcode # _V4_V16), v16i32>; 2538 } 2539 2540 // Image only 2541 class ImagePattern<SDPatternOperator name, MIMG opcode, ValueType vt> : Pat < 2542 (name vt:$addr, v8i32:$rsrc, imm:$dmask, imm:$unorm, 2543 imm:$r128, imm:$da, imm:$glc, imm:$slc, imm:$tfe, imm:$lwe), 2544 (opcode $addr, $rsrc, 2545 (as_i32imm $dmask), (as_i1imm $unorm), (as_i1imm $glc), (as_i1imm $slc), 2546 (as_i1imm $r128), (as_i1imm $tfe), (as_i1imm $lwe), (as_i1imm $da)) 2547 >; 2548 2549 multiclass ImagePatterns<SDPatternOperator name, string opcode> { 2550 def : ImagePattern<name, !cast<MIMG>(opcode # _V4_V1), i32>; 2551 def : ImagePattern<name, !cast<MIMG>(opcode # _V4_V2), v2i32>; 2552 def : ImagePattern<name, !cast<MIMG>(opcode # _V4_V4), v4i32>; 2553 } 2554 2555 class ImageLoadPattern<SDPatternOperator name, MIMG opcode, ValueType vt> : Pat < 2556 (name vt:$addr, v8i32:$rsrc, imm:$dmask, imm:$r128, imm:$da, imm:$glc, 2557 imm:$slc), 2558 (opcode $addr, $rsrc, 2559 (as_i32imm $dmask), 1, (as_i1imm $glc), (as_i1imm $slc), 2560 (as_i1imm $r128), 0, 0, (as_i1imm $da)) 2561 >; 2562 2563 multiclass ImageLoadPatterns<SDPatternOperator name, string opcode> { 2564 def : ImageLoadPattern<name, !cast<MIMG>(opcode # _V4_V1), i32>; 2565 def : ImageLoadPattern<name, !cast<MIMG>(opcode # _V4_V2), v2i32>; 2566 def : ImageLoadPattern<name, !cast<MIMG>(opcode # _V4_V4), v4i32>; 2567 } 2568 2569 class ImageStorePattern<SDPatternOperator name, MIMG opcode, ValueType vt> : Pat < 2570 (name v4f32:$data, vt:$addr, v8i32:$rsrc, i32:$dmask, imm:$r128, imm:$da, 2571 imm:$glc, imm:$slc), 2572 (opcode $data, $addr, $rsrc, 2573 (as_i32imm $dmask), 1, (as_i1imm $glc), (as_i1imm $slc), 2574 (as_i1imm $r128), 0, 0, (as_i1imm $da)) 2575 >; 2576 2577 multiclass ImageStorePatterns<SDPatternOperator name, string opcode> { 2578 def : ImageStorePattern<name, !cast<MIMG>(opcode # _V4_V1), i32>; 2579 def : ImageStorePattern<name, !cast<MIMG>(opcode # _V4_V2), v2i32>; 2580 def : ImageStorePattern<name, !cast<MIMG>(opcode # _V4_V4), v4i32>; 2581 } 2582 2583 class ImageAtomicPattern<SDPatternOperator name, MIMG opcode, ValueType vt> : Pat < 2584 (name i32:$vdata, vt:$addr, v8i32:$rsrc, imm:$r128, imm:$da, imm:$slc), 2585 (opcode $vdata, $addr, $rsrc, 1, 1, 1, (as_i1imm $slc), (as_i1imm $r128), 0, 0, (as_i1imm $da)) 2586 >; 2587 2588 multiclass ImageAtomicPatterns<SDPatternOperator name, string opcode> { 2589 def : ImageAtomicPattern<name, !cast<MIMG>(opcode # _V1), i32>; 2590 def : ImageAtomicPattern<name, !cast<MIMG>(opcode # _V2), v2i32>; 2591 def : ImageAtomicPattern<name, !cast<MIMG>(opcode # _V4), v4i32>; 2592 } 2593 2594 class ImageAtomicCmpSwapPattern<MIMG opcode, ValueType vt> : Pat < 2595 (int_amdgcn_image_atomic_cmpswap i32:$vsrc, i32:$vcmp, vt:$addr, v8i32:$rsrc, 2596 imm:$r128, imm:$da, imm:$slc), 2597 (EXTRACT_SUBREG 2598 (opcode (REG_SEQUENCE VReg_64, $vsrc, sub0, $vcmp, sub1), 2599 $addr, $rsrc, 3, 1, 1, (as_i1imm $slc), (as_i1imm $r128), 0, 0, (as_i1imm $da)), 2600 sub0) 2601 >; 2602 2603 // Basic sample 2604 defm : SampleRawPatterns<int_SI_image_sample, "IMAGE_SAMPLE">; 2605 defm : SampleRawPatterns<int_SI_image_sample_cl, "IMAGE_SAMPLE_CL">; 2606 defm : SampleRawPatterns<int_SI_image_sample_d, "IMAGE_SAMPLE_D">; 2607 defm : SampleRawPatterns<int_SI_image_sample_d_cl, "IMAGE_SAMPLE_D_CL">; 2608 defm : SampleRawPatterns<int_SI_image_sample_l, "IMAGE_SAMPLE_L">; 2609 defm : SampleRawPatterns<int_SI_image_sample_b, "IMAGE_SAMPLE_B">; 2610 defm : SampleRawPatterns<int_SI_image_sample_b_cl, "IMAGE_SAMPLE_B_CL">; 2611 defm : SampleRawPatterns<int_SI_image_sample_lz, "IMAGE_SAMPLE_LZ">; 2612 defm : SampleRawPatterns<int_SI_image_sample_cd, "IMAGE_SAMPLE_CD">; 2613 defm : SampleRawPatterns<int_SI_image_sample_cd_cl, "IMAGE_SAMPLE_CD_CL">; 2614 2615 // Sample with comparison 2616 defm : SampleRawPatterns<int_SI_image_sample_c, "IMAGE_SAMPLE_C">; 2617 defm : SampleRawPatterns<int_SI_image_sample_c_cl, "IMAGE_SAMPLE_C_CL">; 2618 defm : SampleRawPatterns<int_SI_image_sample_c_d, "IMAGE_SAMPLE_C_D">; 2619 defm : SampleRawPatterns<int_SI_image_sample_c_d_cl, "IMAGE_SAMPLE_C_D_CL">; 2620 defm : SampleRawPatterns<int_SI_image_sample_c_l, "IMAGE_SAMPLE_C_L">; 2621 defm : SampleRawPatterns<int_SI_image_sample_c_b, "IMAGE_SAMPLE_C_B">; 2622 defm : SampleRawPatterns<int_SI_image_sample_c_b_cl, "IMAGE_SAMPLE_C_B_CL">; 2623 defm : SampleRawPatterns<int_SI_image_sample_c_lz, "IMAGE_SAMPLE_C_LZ">; 2624 defm : SampleRawPatterns<int_SI_image_sample_c_cd, "IMAGE_SAMPLE_C_CD">; 2625 defm : SampleRawPatterns<int_SI_image_sample_c_cd_cl, "IMAGE_SAMPLE_C_CD_CL">; 2626 2627 // Sample with offsets 2628 defm : SampleRawPatterns<int_SI_image_sample_o, "IMAGE_SAMPLE_O">; 2629 defm : SampleRawPatterns<int_SI_image_sample_cl_o, "IMAGE_SAMPLE_CL_O">; 2630 defm : SampleRawPatterns<int_SI_image_sample_d_o, "IMAGE_SAMPLE_D_O">; 2631 defm : SampleRawPatterns<int_SI_image_sample_d_cl_o, "IMAGE_SAMPLE_D_CL_O">; 2632 defm : SampleRawPatterns<int_SI_image_sample_l_o, "IMAGE_SAMPLE_L_O">; 2633 defm : SampleRawPatterns<int_SI_image_sample_b_o, "IMAGE_SAMPLE_B_O">; 2634 defm : SampleRawPatterns<int_SI_image_sample_b_cl_o, "IMAGE_SAMPLE_B_CL_O">; 2635 defm : SampleRawPatterns<int_SI_image_sample_lz_o, "IMAGE_SAMPLE_LZ_O">; 2636 defm : SampleRawPatterns<int_SI_image_sample_cd_o, "IMAGE_SAMPLE_CD_O">; 2637 defm : SampleRawPatterns<int_SI_image_sample_cd_cl_o, "IMAGE_SAMPLE_CD_CL_O">; 2638 2639 // Sample with comparison and offsets 2640 defm : SampleRawPatterns<int_SI_image_sample_c_o, "IMAGE_SAMPLE_C_O">; 2641 defm : SampleRawPatterns<int_SI_image_sample_c_cl_o, "IMAGE_SAMPLE_C_CL_O">; 2642 defm : SampleRawPatterns<int_SI_image_sample_c_d_o, "IMAGE_SAMPLE_C_D_O">; 2643 defm : SampleRawPatterns<int_SI_image_sample_c_d_cl_o, "IMAGE_SAMPLE_C_D_CL_O">; 2644 defm : SampleRawPatterns<int_SI_image_sample_c_l_o, "IMAGE_SAMPLE_C_L_O">; 2645 defm : SampleRawPatterns<int_SI_image_sample_c_b_o, "IMAGE_SAMPLE_C_B_O">; 2646 defm : SampleRawPatterns<int_SI_image_sample_c_b_cl_o, "IMAGE_SAMPLE_C_B_CL_O">; 2647 defm : SampleRawPatterns<int_SI_image_sample_c_lz_o, "IMAGE_SAMPLE_C_LZ_O">; 2648 defm : SampleRawPatterns<int_SI_image_sample_c_cd_o, "IMAGE_SAMPLE_C_CD_O">; 2649 defm : SampleRawPatterns<int_SI_image_sample_c_cd_cl_o, "IMAGE_SAMPLE_C_CD_CL_O">; 2650 2651 // Gather opcodes 2652 // Only the variants which make sense are defined. 2653 def : SampleRawPattern<int_SI_gather4, IMAGE_GATHER4_V4_V2, v2i32>; 2654 def : SampleRawPattern<int_SI_gather4, IMAGE_GATHER4_V4_V4, v4i32>; 2655 def : SampleRawPattern<int_SI_gather4_cl, IMAGE_GATHER4_CL_V4_V4, v4i32>; 2656 def : SampleRawPattern<int_SI_gather4_l, IMAGE_GATHER4_L_V4_V4, v4i32>; 2657 def : SampleRawPattern<int_SI_gather4_b, IMAGE_GATHER4_B_V4_V4, v4i32>; 2658 def : SampleRawPattern<int_SI_gather4_b_cl, IMAGE_GATHER4_B_CL_V4_V4, v4i32>; 2659 def : SampleRawPattern<int_SI_gather4_b_cl, IMAGE_GATHER4_B_CL_V4_V8, v8i32>; 2660 def : SampleRawPattern<int_SI_gather4_lz, IMAGE_GATHER4_LZ_V4_V2, v2i32>; 2661 def : SampleRawPattern<int_SI_gather4_lz, IMAGE_GATHER4_LZ_V4_V4, v4i32>; 2662 2663 def : SampleRawPattern<int_SI_gather4_c, IMAGE_GATHER4_C_V4_V4, v4i32>; 2664 def : SampleRawPattern<int_SI_gather4_c_cl, IMAGE_GATHER4_C_CL_V4_V4, v4i32>; 2665 def : SampleRawPattern<int_SI_gather4_c_cl, IMAGE_GATHER4_C_CL_V4_V8, v8i32>; 2666 def : SampleRawPattern<int_SI_gather4_c_l, IMAGE_GATHER4_C_L_V4_V4, v4i32>; 2667 def : SampleRawPattern<int_SI_gather4_c_l, IMAGE_GATHER4_C_L_V4_V8, v8i32>; 2668 def : SampleRawPattern<int_SI_gather4_c_b, IMAGE_GATHER4_C_B_V4_V4, v4i32>; 2669 def : SampleRawPattern<int_SI_gather4_c_b, IMAGE_GATHER4_C_B_V4_V8, v8i32>; 2670 def : SampleRawPattern<int_SI_gather4_c_b_cl, IMAGE_GATHER4_C_B_CL_V4_V8, v8i32>; 2671 def : SampleRawPattern<int_SI_gather4_c_lz, IMAGE_GATHER4_C_LZ_V4_V4, v4i32>; 2672 2673 def : SampleRawPattern<int_SI_gather4_o, IMAGE_GATHER4_O_V4_V4, v4i32>; 2674 def : SampleRawPattern<int_SI_gather4_cl_o, IMAGE_GATHER4_CL_O_V4_V4, v4i32>; 2675 def : SampleRawPattern<int_SI_gather4_cl_o, IMAGE_GATHER4_CL_O_V4_V8, v8i32>; 2676 def : SampleRawPattern<int_SI_gather4_l_o, IMAGE_GATHER4_L_O_V4_V4, v4i32>; 2677 def : SampleRawPattern<int_SI_gather4_l_o, IMAGE_GATHER4_L_O_V4_V8, v8i32>; 2678 def : SampleRawPattern<int_SI_gather4_b_o, IMAGE_GATHER4_B_O_V4_V4, v4i32>; 2679 def : SampleRawPattern<int_SI_gather4_b_o, IMAGE_GATHER4_B_O_V4_V8, v8i32>; 2680 def : SampleRawPattern<int_SI_gather4_b_cl_o, IMAGE_GATHER4_B_CL_O_V4_V8, v8i32>; 2681 def : SampleRawPattern<int_SI_gather4_lz_o, IMAGE_GATHER4_LZ_O_V4_V4, v4i32>; 2682 2683 def : SampleRawPattern<int_SI_gather4_c_o, IMAGE_GATHER4_C_O_V4_V4, v4i32>; 2684 def : SampleRawPattern<int_SI_gather4_c_o, IMAGE_GATHER4_C_O_V4_V8, v8i32>; 2685 def : SampleRawPattern<int_SI_gather4_c_cl_o, IMAGE_GATHER4_C_CL_O_V4_V8, v8i32>; 2686 def : SampleRawPattern<int_SI_gather4_c_l_o, IMAGE_GATHER4_C_L_O_V4_V8, v8i32>; 2687 def : SampleRawPattern<int_SI_gather4_c_b_o, IMAGE_GATHER4_C_B_O_V4_V8, v8i32>; 2688 def : SampleRawPattern<int_SI_gather4_c_b_cl_o, IMAGE_GATHER4_C_B_CL_O_V4_V8, v8i32>; 2689 def : SampleRawPattern<int_SI_gather4_c_lz_o, IMAGE_GATHER4_C_LZ_O_V4_V4, v4i32>; 2690 def : SampleRawPattern<int_SI_gather4_c_lz_o, IMAGE_GATHER4_C_LZ_O_V4_V8, v8i32>; 2691 2692 def : SampleRawPattern<int_SI_getlod, IMAGE_GET_LOD_V4_V1, i32>; 2693 def : SampleRawPattern<int_SI_getlod, IMAGE_GET_LOD_V4_V2, v2i32>; 2694 def : SampleRawPattern<int_SI_getlod, IMAGE_GET_LOD_V4_V4, v4i32>; 2695 2696 def : ImagePattern<int_SI_getresinfo, IMAGE_GET_RESINFO_V4_V1, i32>; 2697 defm : ImagePatterns<int_SI_image_load, "IMAGE_LOAD">; 2698 defm : ImagePatterns<int_SI_image_load_mip, "IMAGE_LOAD_MIP">; 2699 defm : ImageLoadPatterns<int_amdgcn_image_load, "IMAGE_LOAD">; 2700 defm : ImageLoadPatterns<int_amdgcn_image_load_mip, "IMAGE_LOAD_MIP">; 2701 defm : ImageStorePatterns<int_amdgcn_image_store, "IMAGE_STORE">; 2702 defm : ImageStorePatterns<int_amdgcn_image_store_mip, "IMAGE_STORE_MIP">; 2703 defm : ImageAtomicPatterns<int_amdgcn_image_atomic_swap, "IMAGE_ATOMIC_SWAP">; 2704 def : ImageAtomicCmpSwapPattern<IMAGE_ATOMIC_CMPSWAP_V1, i32>; 2705 def : ImageAtomicCmpSwapPattern<IMAGE_ATOMIC_CMPSWAP_V2, v2i32>; 2706 def : ImageAtomicCmpSwapPattern<IMAGE_ATOMIC_CMPSWAP_V4, v4i32>; 2707 defm : ImageAtomicPatterns<int_amdgcn_image_atomic_add, "IMAGE_ATOMIC_ADD">; 2708 defm : ImageAtomicPatterns<int_amdgcn_image_atomic_sub, "IMAGE_ATOMIC_SUB">; 2709 defm : ImageAtomicPatterns<int_amdgcn_image_atomic_smin, "IMAGE_ATOMIC_SMIN">; 2710 defm : ImageAtomicPatterns<int_amdgcn_image_atomic_umin, "IMAGE_ATOMIC_UMIN">; 2711 defm : ImageAtomicPatterns<int_amdgcn_image_atomic_smax, "IMAGE_ATOMIC_SMAX">; 2712 defm : ImageAtomicPatterns<int_amdgcn_image_atomic_umax, "IMAGE_ATOMIC_UMAX">; 2713 defm : ImageAtomicPatterns<int_amdgcn_image_atomic_and, "IMAGE_ATOMIC_AND">; 2714 defm : ImageAtomicPatterns<int_amdgcn_image_atomic_or, "IMAGE_ATOMIC_OR">; 2715 defm : ImageAtomicPatterns<int_amdgcn_image_atomic_xor, "IMAGE_ATOMIC_XOR">; 2716 defm : ImageAtomicPatterns<int_amdgcn_image_atomic_inc, "IMAGE_ATOMIC_INC">; 2717 defm : ImageAtomicPatterns<int_amdgcn_image_atomic_dec, "IMAGE_ATOMIC_DEC">; 2718 2719 /* SIsample for simple 1D texture lookup */ 2720 def : Pat < 2721 (SIsample i32:$addr, v8i32:$rsrc, v4i32:$sampler, imm), 2722 (IMAGE_SAMPLE_V4_V1 $addr, $rsrc, $sampler, 0xf, 0, 0, 0, 0, 0, 0, 0) 2723 >; 2724 2725 class SamplePattern<SDNode name, MIMG opcode, ValueType vt> : Pat < 2726 (name vt:$addr, v8i32:$rsrc, v4i32:$sampler, imm), 2727 (opcode $addr, $rsrc, $sampler, 0xf, 0, 0, 0, 0, 0, 0, 0) 2728 >; 2729 2730 class SampleRectPattern<SDNode name, MIMG opcode, ValueType vt> : Pat < 2731 (name vt:$addr, v8i32:$rsrc, v4i32:$sampler, TEX_RECT), 2732 (opcode $addr, $rsrc, $sampler, 0xf, 1, 0, 0, 0, 0, 0, 0) 2733 >; 2734 2735 class SampleArrayPattern<SDNode name, MIMG opcode, ValueType vt> : Pat < 2736 (name vt:$addr, v8i32:$rsrc, v4i32:$sampler, TEX_ARRAY), 2737 (opcode $addr, $rsrc, $sampler, 0xf, 0, 0, 0, 0, 0, 0, 1) 2738 >; 2739 2740 class SampleShadowPattern<SDNode name, MIMG opcode, 2741 ValueType vt> : Pat < 2742 (name vt:$addr, v8i32:$rsrc, v4i32:$sampler, TEX_SHADOW), 2743 (opcode $addr, $rsrc, $sampler, 0xf, 0, 0, 0, 0, 0, 0, 0) 2744 >; 2745 2746 class SampleShadowArrayPattern<SDNode name, MIMG opcode, 2747 ValueType vt> : Pat < 2748 (name vt:$addr, v8i32:$rsrc, v4i32:$sampler, TEX_SHADOW_ARRAY), 2749 (opcode $addr, $rsrc, $sampler, 0xf, 0, 0, 0, 0, 0, 0, 1) 2750 >; 2751 2752 /* SIsample* for texture lookups consuming more address parameters */ 2753 multiclass SamplePatterns<MIMG sample, MIMG sample_c, MIMG sample_l, 2754 MIMG sample_c_l, MIMG sample_b, MIMG sample_c_b, 2755 MIMG sample_d, MIMG sample_c_d, ValueType addr_type> { 2756 def : SamplePattern <SIsample, sample, addr_type>; 2757 def : SampleRectPattern <SIsample, sample, addr_type>; 2758 def : SampleArrayPattern <SIsample, sample, addr_type>; 2759 def : SampleShadowPattern <SIsample, sample_c, addr_type>; 2760 def : SampleShadowArrayPattern <SIsample, sample_c, addr_type>; 2761 2762 def : SamplePattern <SIsamplel, sample_l, addr_type>; 2763 def : SampleArrayPattern <SIsamplel, sample_l, addr_type>; 2764 def : SampleShadowPattern <SIsamplel, sample_c_l, addr_type>; 2765 def : SampleShadowArrayPattern <SIsamplel, sample_c_l, addr_type>; 2766 2767 def : SamplePattern <SIsampleb, sample_b, addr_type>; 2768 def : SampleArrayPattern <SIsampleb, sample_b, addr_type>; 2769 def : SampleShadowPattern <SIsampleb, sample_c_b, addr_type>; 2770 def : SampleShadowArrayPattern <SIsampleb, sample_c_b, addr_type>; 2771 2772 def : SamplePattern <SIsampled, sample_d, addr_type>; 2773 def : SampleArrayPattern <SIsampled, sample_d, addr_type>; 2774 def : SampleShadowPattern <SIsampled, sample_c_d, addr_type>; 2775 def : SampleShadowArrayPattern <SIsampled, sample_c_d, addr_type>; 2776 } 2777 2778 defm : SamplePatterns<IMAGE_SAMPLE_V4_V2, IMAGE_SAMPLE_C_V4_V2, 2779 IMAGE_SAMPLE_L_V4_V2, IMAGE_SAMPLE_C_L_V4_V2, 2780 IMAGE_SAMPLE_B_V4_V2, IMAGE_SAMPLE_C_B_V4_V2, 2781 IMAGE_SAMPLE_D_V4_V2, IMAGE_SAMPLE_C_D_V4_V2, 2782 v2i32>; 2783 defm : SamplePatterns<IMAGE_SAMPLE_V4_V4, IMAGE_SAMPLE_C_V4_V4, 2784 IMAGE_SAMPLE_L_V4_V4, IMAGE_SAMPLE_C_L_V4_V4, 2785 IMAGE_SAMPLE_B_V4_V4, IMAGE_SAMPLE_C_B_V4_V4, 2786 IMAGE_SAMPLE_D_V4_V4, IMAGE_SAMPLE_C_D_V4_V4, 2787 v4i32>; 2788 defm : SamplePatterns<IMAGE_SAMPLE_V4_V8, IMAGE_SAMPLE_C_V4_V8, 2789 IMAGE_SAMPLE_L_V4_V8, IMAGE_SAMPLE_C_L_V4_V8, 2790 IMAGE_SAMPLE_B_V4_V8, IMAGE_SAMPLE_C_B_V4_V8, 2791 IMAGE_SAMPLE_D_V4_V8, IMAGE_SAMPLE_C_D_V4_V8, 2792 v8i32>; 2793 defm : SamplePatterns<IMAGE_SAMPLE_V4_V16, IMAGE_SAMPLE_C_V4_V16, 2794 IMAGE_SAMPLE_L_V4_V16, IMAGE_SAMPLE_C_L_V4_V16, 2795 IMAGE_SAMPLE_B_V4_V16, IMAGE_SAMPLE_C_B_V4_V16, 2796 IMAGE_SAMPLE_D_V4_V16, IMAGE_SAMPLE_C_D_V4_V16, 2797 v16i32>; 2798 2799 /********** ============================================ **********/ 2800 /********** Extraction, Insertion, Building and Casting **********/ 2801 /********** ============================================ **********/ 2802 2803 foreach Index = 0-2 in { 2804 def Extract_Element_v2i32_#Index : Extract_Element < 2805 i32, v2i32, Index, !cast<SubRegIndex>(sub#Index) 2806 >; 2807 def Insert_Element_v2i32_#Index : Insert_Element < 2808 i32, v2i32, Index, !cast<SubRegIndex>(sub#Index) 2809 >; 2810 2811 def Extract_Element_v2f32_#Index : Extract_Element < 2812 f32, v2f32, Index, !cast<SubRegIndex>(sub#Index) 2813 >; 2814 def Insert_Element_v2f32_#Index : Insert_Element < 2815 f32, v2f32, Index, !cast<SubRegIndex>(sub#Index) 2816 >; 2817 } 2818 2819 foreach Index = 0-3 in { 2820 def Extract_Element_v4i32_#Index : Extract_Element < 2821 i32, v4i32, Index, !cast<SubRegIndex>(sub#Index) 2822 >; 2823 def Insert_Element_v4i32_#Index : Insert_Element < 2824 i32, v4i32, Index, !cast<SubRegIndex>(sub#Index) 2825 >; 2826 2827 def Extract_Element_v4f32_#Index : Extract_Element < 2828 f32, v4f32, Index, !cast<SubRegIndex>(sub#Index) 2829 >; 2830 def Insert_Element_v4f32_#Index : Insert_Element < 2831 f32, v4f32, Index, !cast<SubRegIndex>(sub#Index) 2832 >; 2833 } 2834 2835 foreach Index = 0-7 in { 2836 def Extract_Element_v8i32_#Index : Extract_Element < 2837 i32, v8i32, Index, !cast<SubRegIndex>(sub#Index) 2838 >; 2839 def Insert_Element_v8i32_#Index : Insert_Element < 2840 i32, v8i32, Index, !cast<SubRegIndex>(sub#Index) 2841 >; 2842 2843 def Extract_Element_v8f32_#Index : Extract_Element < 2844 f32, v8f32, Index, !cast<SubRegIndex>(sub#Index) 2845 >; 2846 def Insert_Element_v8f32_#Index : Insert_Element < 2847 f32, v8f32, Index, !cast<SubRegIndex>(sub#Index) 2848 >; 2849 } 2850 2851 foreach Index = 0-15 in { 2852 def Extract_Element_v16i32_#Index : Extract_Element < 2853 i32, v16i32, Index, !cast<SubRegIndex>(sub#Index) 2854 >; 2855 def Insert_Element_v16i32_#Index : Insert_Element < 2856 i32, v16i32, Index, !cast<SubRegIndex>(sub#Index) 2857 >; 2858 2859 def Extract_Element_v16f32_#Index : Extract_Element < 2860 f32, v16f32, Index, !cast<SubRegIndex>(sub#Index) 2861 >; 2862 def Insert_Element_v16f32_#Index : Insert_Element < 2863 f32, v16f32, Index, !cast<SubRegIndex>(sub#Index) 2864 >; 2865 } 2866 2867 // FIXME: Why do only some of these type combinations for SReg and 2868 // VReg? 2869 // 32-bit bitcast 2870 def : BitConvert <i32, f32, VGPR_32>; 2871 def : BitConvert <f32, i32, VGPR_32>; 2872 def : BitConvert <i32, f32, SReg_32>; 2873 def : BitConvert <f32, i32, SReg_32>; 2874 2875 // 64-bit bitcast 2876 def : BitConvert <i64, f64, VReg_64>; 2877 def : BitConvert <f64, i64, VReg_64>; 2878 def : BitConvert <v2i32, v2f32, VReg_64>; 2879 def : BitConvert <v2f32, v2i32, VReg_64>; 2880 def : BitConvert <i64, v2i32, VReg_64>; 2881 def : BitConvert <v2i32, i64, VReg_64>; 2882 def : BitConvert <i64, v2f32, VReg_64>; 2883 def : BitConvert <v2f32, i64, VReg_64>; 2884 def : BitConvert <f64, v2f32, VReg_64>; 2885 def : BitConvert <v2f32, f64, VReg_64>; 2886 def : BitConvert <f64, v2i32, VReg_64>; 2887 def : BitConvert <v2i32, f64, VReg_64>; 2888 def : BitConvert <v4i32, v4f32, VReg_128>; 2889 def : BitConvert <v4f32, v4i32, VReg_128>; 2890 2891 // 128-bit bitcast 2892 def : BitConvert <v2i64, v4i32, SReg_128>; 2893 def : BitConvert <v4i32, v2i64, SReg_128>; 2894 def : BitConvert <v2f64, v4f32, VReg_128>; 2895 def : BitConvert <v2f64, v4i32, VReg_128>; 2896 def : BitConvert <v4f32, v2f64, VReg_128>; 2897 def : BitConvert <v4i32, v2f64, VReg_128>; 2898 def : BitConvert <v2i64, v2f64, VReg_128>; 2899 def : BitConvert <v2f64, v2i64, VReg_128>; 2900 2901 // 256-bit bitcast 2902 def : BitConvert <v8i32, v8f32, SReg_256>; 2903 def : BitConvert <v8f32, v8i32, SReg_256>; 2904 def : BitConvert <v8i32, v8f32, VReg_256>; 2905 def : BitConvert <v8f32, v8i32, VReg_256>; 2906 2907 // 512-bit bitcast 2908 def : BitConvert <v16i32, v16f32, VReg_512>; 2909 def : BitConvert <v16f32, v16i32, VReg_512>; 2910 2911 /********** =================== **********/ 2912 /********** Src & Dst modifiers **********/ 2913 /********** =================== **********/ 2914 2915 def : Pat < 2916 (AMDGPUclamp (VOP3Mods0Clamp f32:$src0, i32:$src0_modifiers, i32:$omod), 2917 (f32 FP_ZERO), (f32 FP_ONE)), 2918 (V_ADD_F32_e64 $src0_modifiers, $src0, 0, 0, 1, $omod) 2919 >; 2920 2921 /********** ================================ **********/ 2922 /********** Floating point absolute/negative **********/ 2923 /********** ================================ **********/ 2924 2925 // Prevent expanding both fneg and fabs. 2926 2927 def : Pat < 2928 (fneg (fabs f32:$src)), 2929 (S_OR_B32 $src, 0x80000000) // Set sign bit 2930 >; 2931 2932 // FIXME: Should use S_OR_B32 2933 def : Pat < 2934 (fneg (fabs f64:$src)), 2935 (REG_SEQUENCE VReg_64, 2936 (i32 (EXTRACT_SUBREG f64:$src, sub0)), 2937 sub0, 2938 (V_OR_B32_e32 (EXTRACT_SUBREG f64:$src, sub1), 2939 (V_MOV_B32_e32 0x80000000)), // Set sign bit. 2940 sub1) 2941 >; 2942 2943 def : Pat < 2944 (fabs f32:$src), 2945 (V_AND_B32_e32 $src, (V_MOV_B32_e32 0x7fffffff)) 2946 >; 2947 2948 def : Pat < 2949 (fneg f32:$src), 2950 (V_XOR_B32_e32 $src, (V_MOV_B32_e32 0x80000000)) 2951 >; 2952 2953 def : Pat < 2954 (fabs f64:$src), 2955 (REG_SEQUENCE VReg_64, 2956 (i32 (EXTRACT_SUBREG f64:$src, sub0)), 2957 sub0, 2958 (V_AND_B32_e32 (EXTRACT_SUBREG f64:$src, sub1), 2959 (V_MOV_B32_e32 0x7fffffff)), // Set sign bit. 2960 sub1) 2961 >; 2962 2963 def : Pat < 2964 (fneg f64:$src), 2965 (REG_SEQUENCE VReg_64, 2966 (i32 (EXTRACT_SUBREG f64:$src, sub0)), 2967 sub0, 2968 (V_XOR_B32_e32 (EXTRACT_SUBREG f64:$src, sub1), 2969 (V_MOV_B32_e32 0x80000000)), 2970 sub1) 2971 >; 2972 2973 /********** ================== **********/ 2974 /********** Immediate Patterns **********/ 2975 /********** ================== **********/ 2976 2977 def : Pat < 2978 (SGPRImm<(i32 imm)>:$imm), 2979 (S_MOV_B32 imm:$imm) 2980 >; 2981 2982 def : Pat < 2983 (SGPRImm<(f32 fpimm)>:$imm), 2984 (S_MOV_B32 (f32 (bitcast_fpimm_to_i32 $imm))) 2985 >; 2986 2987 def : Pat < 2988 (i32 imm:$imm), 2989 (V_MOV_B32_e32 imm:$imm) 2990 >; 2991 2992 def : Pat < 2993 (f32 fpimm:$imm), 2994 (V_MOV_B32_e32 (f32 (bitcast_fpimm_to_i32 $imm))) 2995 >; 2996 2997 def : Pat < 2998 (i64 InlineImm<i64>:$imm), 2999 (S_MOV_B64 InlineImm<i64>:$imm) 3000 >; 3001 3002 // XXX - Should this use a s_cmp to set SCC? 3003 3004 // Set to sign-extended 64-bit value (true = -1, false = 0) 3005 def : Pat < 3006 (i1 imm:$imm), 3007 (S_MOV_B64 (i64 (as_i64imm $imm))) 3008 >; 3009 3010 def : Pat < 3011 (f64 InlineFPImm<f64>:$imm), 3012 (S_MOV_B64 (f64 (bitcast_fpimm_to_i64 InlineFPImm<f64>:$imm))) 3013 >; 3014 3015 /********** ================== **********/ 3016 /********** Intrinsic Patterns **********/ 3017 /********** ================== **********/ 3018 3019 def : POW_Common <V_LOG_F32_e32, V_EXP_F32_e32, V_MUL_LEGACY_F32_e32>; 3020 3021 def : Pat < 3022 (int_AMDGPU_cube v4f32:$src), 3023 (REG_SEQUENCE VReg_128, 3024 (V_CUBETC_F32 0 /* src0_modifiers */, (EXTRACT_SUBREG $src, sub0), 3025 0 /* src1_modifiers */, (EXTRACT_SUBREG $src, sub1), 3026 0 /* src2_modifiers */, (EXTRACT_SUBREG $src, sub2), 3027 0 /* clamp */, 0 /* omod */), sub0, 3028 (V_CUBESC_F32 0 /* src0_modifiers */, (EXTRACT_SUBREG $src, sub0), 3029 0 /* src1_modifiers */,(EXTRACT_SUBREG $src, sub1), 3030 0 /* src2_modifiers */,(EXTRACT_SUBREG $src, sub2), 3031 0 /* clamp */, 0 /* omod */), sub1, 3032 (V_CUBEMA_F32 0 /* src1_modifiers */,(EXTRACT_SUBREG $src, sub0), 3033 0 /* src1_modifiers */,(EXTRACT_SUBREG $src, sub1), 3034 0 /* src1_modifiers */,(EXTRACT_SUBREG $src, sub2), 3035 0 /* clamp */, 0 /* omod */), sub2, 3036 (V_CUBEID_F32 0 /* src1_modifiers */,(EXTRACT_SUBREG $src, sub0), 3037 0 /* src1_modifiers */,(EXTRACT_SUBREG $src, sub1), 3038 0 /* src1_modifiers */,(EXTRACT_SUBREG $src, sub2), 3039 0 /* clamp */, 0 /* omod */), sub3) 3040 >; 3041 3042 def : Pat < 3043 (i32 (sext i1:$src0)), 3044 (V_CNDMASK_B32_e64 (i32 0), (i32 -1), $src0) 3045 >; 3046 3047 class Ext32Pat <SDNode ext> : Pat < 3048 (i32 (ext i1:$src0)), 3049 (V_CNDMASK_B32_e64 (i32 0), (i32 1), $src0) 3050 >; 3051 3052 def : Ext32Pat <zext>; 3053 def : Ext32Pat <anyext>; 3054 3055 // Offset in an 32-bit VGPR 3056 def : Pat < 3057 (SIload_constant v4i32:$sbase, i32:$voff), 3058 (BUFFER_LOAD_DWORD_OFFEN $voff, $sbase, 0, 0, 0, 0, 0) 3059 >; 3060 3061 // The multiplication scales from [0,1] to the unsigned integer range 3062 def : Pat < 3063 (AMDGPUurecip i32:$src0), 3064 (V_CVT_U32_F32_e32 3065 (V_MUL_F32_e32 CONST.FP_UINT_MAX_PLUS_1, 3066 (V_RCP_IFLAG_F32_e32 (V_CVT_F32_U32_e32 $src0)))) 3067 >; 3068 3069 //===----------------------------------------------------------------------===// 3070 // VOP3 Patterns 3071 //===----------------------------------------------------------------------===// 3072 3073 def : IMad24Pat<V_MAD_I32_I24>; 3074 def : UMad24Pat<V_MAD_U32_U24>; 3075 3076 defm : BFIPatterns <V_BFI_B32, S_MOV_B32, SReg_64>; 3077 def : ROTRPattern <V_ALIGNBIT_B32>; 3078 3079 /********** ======================= **********/ 3080 /********** Load/Store Patterns **********/ 3081 /********** ======================= **********/ 3082 3083 class DSReadPat <DS inst, ValueType vt, PatFrag frag> : Pat < 3084 (vt (frag (DS1Addr1Offset i32:$ptr, i32:$offset))), 3085 (inst $ptr, (as_i16imm $offset), (i1 0)) 3086 >; 3087 3088 def : DSReadPat <DS_READ_I8, i32, si_sextload_local_i8>; 3089 def : DSReadPat <DS_READ_U8, i32, si_az_extload_local_i8>; 3090 def : DSReadPat <DS_READ_I16, i32, si_sextload_local_i16>; 3091 def : DSReadPat <DS_READ_U16, i32, si_az_extload_local_i16>; 3092 def : DSReadPat <DS_READ_B32, i32, si_load_local>; 3093 3094 let AddedComplexity = 100 in { 3095 3096 def : DSReadPat <DS_READ_B64, v2i32, si_load_local_align8>; 3097 3098 } // End AddedComplexity = 100 3099 3100 def : Pat < 3101 (v2i32 (si_load_local (DS64Bit4ByteAligned i32:$ptr, i8:$offset0, 3102 i8:$offset1))), 3103 (DS_READ2_B32 $ptr, $offset0, $offset1, (i1 0)) 3104 >; 3105 3106 class DSWritePat <DS inst, ValueType vt, PatFrag frag> : Pat < 3107 (frag vt:$value, (DS1Addr1Offset i32:$ptr, i32:$offset)), 3108 (inst $ptr, $value, (as_i16imm $offset), (i1 0)) 3109 >; 3110 3111 def : DSWritePat <DS_WRITE_B8, i32, si_truncstore_local_i8>; 3112 def : DSWritePat <DS_WRITE_B16, i32, si_truncstore_local_i16>; 3113 def : DSWritePat <DS_WRITE_B32, i32, si_store_local>; 3114 3115 let AddedComplexity = 100 in { 3116 3117 def : DSWritePat <DS_WRITE_B64, v2i32, si_store_local_align8>; 3118 } // End AddedComplexity = 100 3119 3120 def : Pat < 3121 (si_store_local v2i32:$value, (DS64Bit4ByteAligned i32:$ptr, i8:$offset0, 3122 i8:$offset1)), 3123 (DS_WRITE2_B32 $ptr, (EXTRACT_SUBREG $value, sub0), 3124 (EXTRACT_SUBREG $value, sub1), $offset0, $offset1, 3125 (i1 0)) 3126 >; 3127 3128 class DSAtomicRetPat<DS inst, ValueType vt, PatFrag frag> : Pat < 3129 (frag (DS1Addr1Offset i32:$ptr, i32:$offset), vt:$value), 3130 (inst $ptr, $value, (as_i16imm $offset), (i1 0)) 3131 >; 3132 3133 class DSAtomicCmpXChg <DS inst, ValueType vt, PatFrag frag> : Pat < 3134 (frag (DS1Addr1Offset i32:$ptr, i32:$offset), vt:$cmp, vt:$swap), 3135 (inst $ptr, $cmp, $swap, (as_i16imm $offset), (i1 0)) 3136 >; 3137 3138 3139 // 32-bit atomics. 3140 def : DSAtomicRetPat<DS_WRXCHG_RTN_B32, i32, si_atomic_swap_local>; 3141 def : DSAtomicRetPat<DS_ADD_RTN_U32, i32, si_atomic_load_add_local>; 3142 def : DSAtomicRetPat<DS_SUB_RTN_U32, i32, si_atomic_load_sub_local>; 3143 def : DSAtomicRetPat<DS_INC_RTN_U32, i32, si_atomic_inc_local>; 3144 def : DSAtomicRetPat<DS_DEC_RTN_U32, i32, si_atomic_dec_local>; 3145 def : DSAtomicRetPat<DS_AND_RTN_B32, i32, si_atomic_load_and_local>; 3146 def : DSAtomicRetPat<DS_OR_RTN_B32, i32, si_atomic_load_or_local>; 3147 def : DSAtomicRetPat<DS_XOR_RTN_B32, i32, si_atomic_load_xor_local>; 3148 def : DSAtomicRetPat<DS_MIN_RTN_I32, i32, si_atomic_load_min_local>; 3149 def : DSAtomicRetPat<DS_MAX_RTN_I32, i32, si_atomic_load_max_local>; 3150 def : DSAtomicRetPat<DS_MIN_RTN_U32, i32, si_atomic_load_umin_local>; 3151 def : DSAtomicRetPat<DS_MAX_RTN_U32, i32, si_atomic_load_umax_local>; 3152 def : DSAtomicCmpXChg<DS_CMPST_RTN_B32, i32, si_atomic_cmp_swap_32_local>; 3153 3154 // 64-bit atomics. 3155 def : DSAtomicRetPat<DS_WRXCHG_RTN_B64, i64, si_atomic_swap_local>; 3156 def : DSAtomicRetPat<DS_ADD_RTN_U64, i64, si_atomic_load_add_local>; 3157 def : DSAtomicRetPat<DS_SUB_RTN_U64, i64, si_atomic_load_sub_local>; 3158 def : DSAtomicRetPat<DS_INC_RTN_U64, i64, si_atomic_inc_local>; 3159 def : DSAtomicRetPat<DS_DEC_RTN_U64, i64, si_atomic_dec_local>; 3160 def : DSAtomicRetPat<DS_AND_RTN_B64, i64, si_atomic_load_and_local>; 3161 def : DSAtomicRetPat<DS_OR_RTN_B64, i64, si_atomic_load_or_local>; 3162 def : DSAtomicRetPat<DS_XOR_RTN_B64, i64, si_atomic_load_xor_local>; 3163 def : DSAtomicRetPat<DS_MIN_RTN_I64, i64, si_atomic_load_min_local>; 3164 def : DSAtomicRetPat<DS_MAX_RTN_I64, i64, si_atomic_load_max_local>; 3165 def : DSAtomicRetPat<DS_MIN_RTN_U64, i64, si_atomic_load_umin_local>; 3166 def : DSAtomicRetPat<DS_MAX_RTN_U64, i64, si_atomic_load_umax_local>; 3167 3168 def : DSAtomicCmpXChg<DS_CMPST_RTN_B64, i64, si_atomic_cmp_swap_64_local>; 3169 3170 3171 //===----------------------------------------------------------------------===// 3172 // MUBUF Patterns 3173 //===----------------------------------------------------------------------===// 3174 3175 class MUBUFLoad_Pattern <MUBUF Instr_ADDR64, ValueType vt, 3176 PatFrag constant_ld> : Pat < 3177 (vt (constant_ld (MUBUFAddr64 v4i32:$srsrc, i64:$vaddr, i32:$soffset, 3178 i16:$offset, i1:$glc, i1:$slc, i1:$tfe))), 3179 (Instr_ADDR64 $vaddr, $srsrc, $soffset, $offset, $glc, $slc, $tfe) 3180 >; 3181 3182 multiclass MUBUFLoad_Atomic_Pattern <MUBUF Instr_ADDR64, MUBUF Instr_OFFSET, 3183 ValueType vt, PatFrag atomic_ld> { 3184 def : Pat < 3185 (vt (atomic_ld (MUBUFAddr64 v4i32:$srsrc, i64:$vaddr, i32:$soffset, 3186 i16:$offset, i1:$slc))), 3187 (Instr_ADDR64 $vaddr, $srsrc, $soffset, $offset, 1, $slc, 0) 3188 >; 3189 3190 def : Pat < 3191 (vt (atomic_ld (MUBUFOffsetNoGLC v4i32:$rsrc, i32:$soffset, i16:$offset))), 3192 (Instr_OFFSET $rsrc, $soffset, (as_i16imm $offset), 1, 0, 0) 3193 >; 3194 } 3195 3196 let Predicates = [isSICI] in { 3197 def : MUBUFLoad_Pattern <BUFFER_LOAD_SBYTE_ADDR64, i32, sextloadi8_constant>; 3198 def : MUBUFLoad_Pattern <BUFFER_LOAD_UBYTE_ADDR64, i32, az_extloadi8_constant>; 3199 def : MUBUFLoad_Pattern <BUFFER_LOAD_SSHORT_ADDR64, i32, sextloadi16_constant>; 3200 def : MUBUFLoad_Pattern <BUFFER_LOAD_USHORT_ADDR64, i32, az_extloadi16_constant>; 3201 3202 defm : MUBUFLoad_Atomic_Pattern <BUFFER_LOAD_DWORD_ADDR64, BUFFER_LOAD_DWORD_OFFSET, i32, mubuf_load_atomic>; 3203 defm : MUBUFLoad_Atomic_Pattern <BUFFER_LOAD_DWORDX2_ADDR64, BUFFER_LOAD_DWORDX2_OFFSET, i64, mubuf_load_atomic>; 3204 } // End Predicates = [isSICI] 3205 3206 class MUBUFScratchLoadPat <MUBUF Instr, ValueType vt, PatFrag ld> : Pat < 3207 (vt (ld (MUBUFScratch v4i32:$srsrc, i32:$vaddr, 3208 i32:$soffset, u16imm:$offset))), 3209 (Instr $vaddr, $srsrc, $soffset, $offset, 0, 0, 0) 3210 >; 3211 3212 def : MUBUFScratchLoadPat <BUFFER_LOAD_SBYTE_OFFEN, i32, sextloadi8_private>; 3213 def : MUBUFScratchLoadPat <BUFFER_LOAD_UBYTE_OFFEN, i32, extloadi8_private>; 3214 def : MUBUFScratchLoadPat <BUFFER_LOAD_SSHORT_OFFEN, i32, sextloadi16_private>; 3215 def : MUBUFScratchLoadPat <BUFFER_LOAD_USHORT_OFFEN, i32, extloadi16_private>; 3216 def : MUBUFScratchLoadPat <BUFFER_LOAD_DWORD_OFFEN, i32, load_private>; 3217 def : MUBUFScratchLoadPat <BUFFER_LOAD_DWORDX2_OFFEN, v2i32, load_private>; 3218 def : MUBUFScratchLoadPat <BUFFER_LOAD_DWORDX4_OFFEN, v4i32, load_private>; 3219 3220 // BUFFER_LOAD_DWORD*, addr64=0 3221 multiclass MUBUF_Load_Dword <ValueType vt, MUBUF offset, MUBUF offen, MUBUF idxen, 3222 MUBUF bothen> { 3223 3224 def : Pat < 3225 (vt (int_SI_buffer_load_dword v4i32:$rsrc, (i32 imm), i32:$soffset, 3226 imm:$offset, 0, 0, imm:$glc, imm:$slc, 3227 imm:$tfe)), 3228 (offset $rsrc, $soffset, (as_i16imm $offset), (as_i1imm $glc), 3229 (as_i1imm $slc), (as_i1imm $tfe)) 3230 >; 3231 3232 def : Pat < 3233 (vt (int_SI_buffer_load_dword v4i32:$rsrc, i32:$vaddr, i32:$soffset, 3234 imm:$offset, 1, 0, imm:$glc, imm:$slc, 3235 imm:$tfe)), 3236 (offen $vaddr, $rsrc, $soffset, (as_i16imm $offset), (as_i1imm $glc), (as_i1imm $slc), 3237 (as_i1imm $tfe)) 3238 >; 3239 3240 def : Pat < 3241 (vt (int_SI_buffer_load_dword v4i32:$rsrc, i32:$vaddr, i32:$soffset, 3242 imm:$offset, 0, 1, imm:$glc, imm:$slc, 3243 imm:$tfe)), 3244 (idxen $vaddr, $rsrc, $soffset, (as_i16imm $offset), (as_i1imm $glc), 3245 (as_i1imm $slc), (as_i1imm $tfe)) 3246 >; 3247 3248 def : Pat < 3249 (vt (int_SI_buffer_load_dword v4i32:$rsrc, v2i32:$vaddr, i32:$soffset, 3250 imm:$offset, 1, 1, imm:$glc, imm:$slc, 3251 imm:$tfe)), 3252 (bothen $vaddr, $rsrc, $soffset, (as_i16imm $offset), (as_i1imm $glc), (as_i1imm $slc), 3253 (as_i1imm $tfe)) 3254 >; 3255 } 3256 3257 defm : MUBUF_Load_Dword <i32, BUFFER_LOAD_DWORD_OFFSET, BUFFER_LOAD_DWORD_OFFEN, 3258 BUFFER_LOAD_DWORD_IDXEN, BUFFER_LOAD_DWORD_BOTHEN>; 3259 defm : MUBUF_Load_Dword <v2i32, BUFFER_LOAD_DWORDX2_OFFSET, BUFFER_LOAD_DWORDX2_OFFEN, 3260 BUFFER_LOAD_DWORDX2_IDXEN, BUFFER_LOAD_DWORDX2_BOTHEN>; 3261 defm : MUBUF_Load_Dword <v4i32, BUFFER_LOAD_DWORDX4_OFFSET, BUFFER_LOAD_DWORDX4_OFFEN, 3262 BUFFER_LOAD_DWORDX4_IDXEN, BUFFER_LOAD_DWORDX4_BOTHEN>; 3263 3264 multiclass MUBUFStore_Atomic_Pattern <MUBUF Instr_ADDR64, MUBUF Instr_OFFSET, 3265 ValueType vt, PatFrag atomic_st> { 3266 // Store follows atomic op convention so address is forst 3267 def : Pat < 3268 (atomic_st (MUBUFAddr64 v4i32:$srsrc, i64:$vaddr, i32:$soffset, 3269 i16:$offset, i1:$slc), vt:$val), 3270 (Instr_ADDR64 $val, $vaddr, $srsrc, $soffset, $offset, 1, $slc, 0) 3271 >; 3272 3273 def : Pat < 3274 (atomic_st (MUBUFOffsetNoGLC v4i32:$rsrc, i32:$soffset, i16:$offset), vt:$val), 3275 (Instr_OFFSET $val, $rsrc, $soffset, (as_i16imm $offset), 1, 0, 0) 3276 >; 3277 } 3278 let Predicates = [isSICI] in { 3279 defm : MUBUFStore_Atomic_Pattern <BUFFER_STORE_DWORD_ADDR64, BUFFER_STORE_DWORD_OFFSET, i32, global_store_atomic>; 3280 defm : MUBUFStore_Atomic_Pattern <BUFFER_STORE_DWORDX2_ADDR64, BUFFER_STORE_DWORDX2_OFFSET, i64, global_store_atomic>; 3281 } // End Predicates = [isSICI] 3282 3283 class MUBUFScratchStorePat <MUBUF Instr, ValueType vt, PatFrag st> : Pat < 3284 (st vt:$value, (MUBUFScratch v4i32:$srsrc, i32:$vaddr, i32:$soffset, 3285 u16imm:$offset)), 3286 (Instr $value, $vaddr, $srsrc, $soffset, $offset, 0, 0, 0) 3287 >; 3288 3289 def : MUBUFScratchStorePat <BUFFER_STORE_BYTE_OFFEN, i32, truncstorei8_private>; 3290 def : MUBUFScratchStorePat <BUFFER_STORE_SHORT_OFFEN, i32, truncstorei16_private>; 3291 def : MUBUFScratchStorePat <BUFFER_STORE_DWORD_OFFEN, i32, store_private>; 3292 def : MUBUFScratchStorePat <BUFFER_STORE_DWORDX2_OFFEN, v2i32, store_private>; 3293 def : MUBUFScratchStorePat <BUFFER_STORE_DWORDX4_OFFEN, v4i32, store_private>; 3294 3295 //===----------------------------------------------------------------------===// 3296 // MTBUF Patterns 3297 //===----------------------------------------------------------------------===// 3298 3299 // TBUFFER_STORE_FORMAT_*, addr64=0 3300 class MTBUF_StoreResource <ValueType vt, int num_channels, MTBUF opcode> : Pat< 3301 (SItbuffer_store v4i32:$rsrc, vt:$vdata, num_channels, i32:$vaddr, 3302 i32:$soffset, imm:$inst_offset, imm:$dfmt, 3303 imm:$nfmt, imm:$offen, imm:$idxen, 3304 imm:$glc, imm:$slc, imm:$tfe), 3305 (opcode 3306 $vdata, (as_i16imm $inst_offset), (as_i1imm $offen), (as_i1imm $idxen), 3307 (as_i1imm $glc), 0, (as_i8imm $dfmt), (as_i8imm $nfmt), $vaddr, $rsrc, 3308 (as_i1imm $slc), (as_i1imm $tfe), $soffset) 3309 >; 3310 3311 def : MTBUF_StoreResource <i32, 1, TBUFFER_STORE_FORMAT_X>; 3312 def : MTBUF_StoreResource <v2i32, 2, TBUFFER_STORE_FORMAT_XY>; 3313 def : MTBUF_StoreResource <v4i32, 3, TBUFFER_STORE_FORMAT_XYZ>; 3314 def : MTBUF_StoreResource <v4i32, 4, TBUFFER_STORE_FORMAT_XYZW>; 3315 3316 /********** ====================== **********/ 3317 /********** Indirect adressing **********/ 3318 /********** ====================== **********/ 3319 3320 multiclass SI_INDIRECT_Pattern <ValueType vt, ValueType eltvt, string VecSize> { 3321 // Extract with offset 3322 def : Pat< 3323 (eltvt (extractelt vt:$src, (MOVRELOffset i32:$idx, (i32 imm:$offset)))), 3324 (!cast<Instruction>("SI_INDIRECT_SRC_"#VecSize) $src, $idx, imm:$offset) 3325 >; 3326 3327 // Insert with offset 3328 def : Pat< 3329 (insertelt vt:$src, eltvt:$val, (MOVRELOffset i32:$idx, (i32 imm:$offset))), 3330 (!cast<Instruction>("SI_INDIRECT_DST_"#VecSize) $src, $idx, imm:$offset, $val) 3331 >; 3332 } 3333 3334 defm : SI_INDIRECT_Pattern <v2f32, f32, "V2">; 3335 defm : SI_INDIRECT_Pattern <v4f32, f32, "V4">; 3336 defm : SI_INDIRECT_Pattern <v8f32, f32, "V8">; 3337 defm : SI_INDIRECT_Pattern <v16f32, f32, "V16">; 3338 3339 defm : SI_INDIRECT_Pattern <v2i32, i32, "V2">; 3340 defm : SI_INDIRECT_Pattern <v4i32, i32, "V4">; 3341 defm : SI_INDIRECT_Pattern <v8i32, i32, "V8">; 3342 defm : SI_INDIRECT_Pattern <v16i32, i32, "V16">; 3343 3344 //===----------------------------------------------------------------------===// 3345 // Conversion Patterns 3346 //===----------------------------------------------------------------------===// 3347 3348 def : Pat<(i32 (sext_inreg i32:$src, i1)), 3349 (S_BFE_I32 i32:$src, 65536)>; // 0 | 1 << 16 3350 3351 // Handle sext_inreg in i64 3352 def : Pat < 3353 (i64 (sext_inreg i64:$src, i1)), 3354 (S_BFE_I64 i64:$src, 0x10000) // 0 | 1 << 16 3355 >; 3356 3357 def : Pat < 3358 (i64 (sext_inreg i64:$src, i8)), 3359 (S_BFE_I64 i64:$src, 0x80000) // 0 | 8 << 16 3360 >; 3361 3362 def : Pat < 3363 (i64 (sext_inreg i64:$src, i16)), 3364 (S_BFE_I64 i64:$src, 0x100000) // 0 | 16 << 16 3365 >; 3366 3367 def : Pat < 3368 (i64 (sext_inreg i64:$src, i32)), 3369 (S_BFE_I64 i64:$src, 0x200000) // 0 | 32 << 16 3370 >; 3371 3372 class ZExt_i64_i32_Pat <SDNode ext> : Pat < 3373 (i64 (ext i32:$src)), 3374 (REG_SEQUENCE SReg_64, $src, sub0, (S_MOV_B32 0), sub1) 3375 >; 3376 3377 class ZExt_i64_i1_Pat <SDNode ext> : Pat < 3378 (i64 (ext i1:$src)), 3379 (REG_SEQUENCE VReg_64, 3380 (V_CNDMASK_B32_e64 (i32 0), (i32 1), $src), sub0, 3381 (S_MOV_B32 0), sub1) 3382 >; 3383 3384 3385 def : ZExt_i64_i32_Pat<zext>; 3386 def : ZExt_i64_i32_Pat<anyext>; 3387 def : ZExt_i64_i1_Pat<zext>; 3388 def : ZExt_i64_i1_Pat<anyext>; 3389 3390 // FIXME: We need to use COPY_TO_REGCLASS to work-around the fact that 3391 // REG_SEQUENCE patterns don't support instructions with multiple outputs. 3392 def : Pat < 3393 (i64 (sext i32:$src)), 3394 (REG_SEQUENCE SReg_64, $src, sub0, 3395 (i32 (COPY_TO_REGCLASS (S_ASHR_I32 $src, 31), SReg_32_XM0)), sub1) 3396 >; 3397 3398 def : Pat < 3399 (i64 (sext i1:$src)), 3400 (REG_SEQUENCE VReg_64, 3401 (V_CNDMASK_B32_e64 0, -1, $src), sub0, 3402 (V_CNDMASK_B32_e64 0, -1, $src), sub1) 3403 >; 3404 3405 // If we need to perform a logical operation on i1 values, we need to 3406 // use vector comparisons since there is only one SCC register. Vector 3407 // comparisions still write to a pair of SGPRs, so treat these as 3408 // 64-bit comparisons. When legalizing SGPR copies, instructions 3409 // resulting in the copies from SCC to these instructions will be 3410 // moved to the VALU. 3411 def : Pat < 3412 (i1 (and i1:$src0, i1:$src1)), 3413 (S_AND_B64 $src0, $src1) 3414 >; 3415 3416 def : Pat < 3417 (i1 (or i1:$src0, i1:$src1)), 3418 (S_OR_B64 $src0, $src1) 3419 >; 3420 3421 def : Pat < 3422 (i1 (xor i1:$src0, i1:$src1)), 3423 (S_XOR_B64 $src0, $src1) 3424 >; 3425 3426 def : Pat < 3427 (f32 (sint_to_fp i1:$src)), 3428 (V_CNDMASK_B32_e64 (i32 0), CONST.FP32_NEG_ONE, $src) 3429 >; 3430 3431 def : Pat < 3432 (f32 (uint_to_fp i1:$src)), 3433 (V_CNDMASK_B32_e64 (i32 0), CONST.FP32_ONE, $src) 3434 >; 3435 3436 def : Pat < 3437 (f64 (sint_to_fp i1:$src)), 3438 (V_CVT_F64_I32_e32 (V_CNDMASK_B32_e64 (i32 0), (i32 -1), $src)) 3439 >; 3440 3441 def : Pat < 3442 (f64 (uint_to_fp i1:$src)), 3443 (V_CVT_F64_U32_e32 (V_CNDMASK_B32_e64 (i32 0), (i32 1), $src)) 3444 >; 3445 3446 //===----------------------------------------------------------------------===// 3447 // Miscellaneous Patterns 3448 //===----------------------------------------------------------------------===// 3449 3450 def : Pat < 3451 (i32 (trunc i64:$a)), 3452 (EXTRACT_SUBREG $a, sub0) 3453 >; 3454 3455 def : Pat < 3456 (i1 (trunc i32:$a)), 3457 (V_CMP_EQ_I32_e64 (S_AND_B32 (i32 1), $a), 1) 3458 >; 3459 3460 def : Pat < 3461 (i1 (trunc i64:$a)), 3462 (V_CMP_EQ_I32_e64 (S_AND_B32 (i32 1), 3463 (EXTRACT_SUBREG $a, sub0)), 1) 3464 >; 3465 3466 def : Pat < 3467 (i32 (bswap i32:$a)), 3468 (V_BFI_B32 (S_MOV_B32 0x00ff00ff), 3469 (V_ALIGNBIT_B32 $a, $a, 24), 3470 (V_ALIGNBIT_B32 $a, $a, 8)) 3471 >; 3472 3473 def : Pat < 3474 (f32 (select i1:$src2, f32:$src1, f32:$src0)), 3475 (V_CNDMASK_B32_e64 $src0, $src1, $src2) 3476 >; 3477 3478 multiclass BFMPatterns <ValueType vt, InstSI BFM, InstSI MOV> { 3479 def : Pat < 3480 (vt (shl (vt (add (vt (shl 1, vt:$a)), -1)), vt:$b)), 3481 (BFM $a, $b) 3482 >; 3483 3484 def : Pat < 3485 (vt (add (vt (shl 1, vt:$a)), -1)), 3486 (BFM $a, (MOV 0)) 3487 >; 3488 } 3489 3490 defm : BFMPatterns <i32, S_BFM_B32, S_MOV_B32>; 3491 // FIXME: defm : BFMPatterns <i64, S_BFM_B64, S_MOV_B64>; 3492 3493 def : BFEPattern <V_BFE_U32, S_MOV_B32>; 3494 3495 let Predicates = [isSICI] in { 3496 def : Pat < 3497 (i64 (readcyclecounter)), 3498 (S_MEMTIME) 3499 >; 3500 } 3501 3502 def : Pat< 3503 (fcanonicalize f32:$src), 3504 (V_MUL_F32_e64 0, CONST.FP32_ONE, 0, $src, 0, 0) 3505 >; 3506 3507 def : Pat< 3508 (fcanonicalize f64:$src), 3509 (V_MUL_F64 0, CONST.FP64_ONE, 0, $src, 0, 0) 3510 >; 3511 3512 //===----------------------------------------------------------------------===// 3513 // Fract Patterns 3514 //===----------------------------------------------------------------------===// 3515 3516 let Predicates = [isSI] in { 3517 3518 // V_FRACT is buggy on SI, so the F32 version is never used and (x-floor(x)) is 3519 // used instead. However, SI doesn't have V_FLOOR_F64, so the most efficient 3520 // way to implement it is using V_FRACT_F64. 3521 // The workaround for the V_FRACT bug is: 3522 // fract(x) = isnan(x) ? x : min(V_FRACT(x), 0.99999999999999999) 3523 3524 // Convert floor(x) to (x - fract(x)) 3525 def : Pat < 3526 (f64 (ffloor (f64 (VOP3Mods f64:$x, i32:$mods)))), 3527 (V_ADD_F64 3528 $mods, 3529 $x, 3530 SRCMODS.NEG, 3531 (V_CNDMASK_B64_PSEUDO 3532 (V_MIN_F64 3533 SRCMODS.NONE, 3534 (V_FRACT_F64_e64 $mods, $x, DSTCLAMP.NONE, DSTOMOD.NONE), 3535 SRCMODS.NONE, 3536 (V_MOV_B64_PSEUDO 0x3fefffffffffffff), 3537 DSTCLAMP.NONE, DSTOMOD.NONE), 3538 $x, 3539 (V_CMP_CLASS_F64_e64 SRCMODS.NONE, $x, 3/*NaN*/)), 3540 DSTCLAMP.NONE, DSTOMOD.NONE) 3541 >; 3542 3543 } // End Predicates = [isSI] 3544 3545 //============================================================================// 3546 // Miscellaneous Optimization Patterns 3547 //============================================================================// 3548 3549 def : SHA256MaPattern <V_BFI_B32, V_XOR_B32_e64>; 3550 3551 def : IntMed3Pat<V_MED3_I32, smax, smax_oneuse, smin_oneuse>; 3552 def : IntMed3Pat<V_MED3_U32, umax, umax_oneuse, umin_oneuse>; 3553 3554 //============================================================================// 3555 // Assembler aliases 3556 //============================================================================// 3557 3558 def : MnemonicAlias<"v_add_u32", "v_add_i32">; 3559 def : MnemonicAlias<"v_sub_u32", "v_sub_i32">; 3560 def : MnemonicAlias<"v_subrev_u32", "v_subrev_i32">; 3561 3562 } // End isGCN predicate 3563