1 ; RUN: llc < %s -march=amdgcn -mcpu=SI -verify-machineinstrs | FileCheck --check-prefix=SI --check-prefix=FUNC %s 2 3 ; FUNC-LABEL: {{^}}atomic_add_i32_offset: 4 ; SI: buffer_atomic_add v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}} 5 define void @atomic_add_i32_offset(i32 addrspace(1)* %out, i32 %in) { 6 entry: 7 %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4 8 %0 = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst 9 ret void 10 } 11 12 ; FUNC-LABEL: {{^}}atomic_add_i32_ret_offset: 13 ; SI: buffer_atomic_add [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16 glc {{$}} 14 ; SI: buffer_store_dword [[RET]] 15 define void @atomic_add_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) { 16 entry: 17 %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4 18 %0 = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst 19 store i32 %0, i32 addrspace(1)* %out2 20 ret void 21 } 22 23 ; FUNC-LABEL: {{^}}atomic_add_i32_addr64_offset: 24 ; SI: buffer_atomic_add v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16{{$}} 25 define void @atomic_add_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) { 26 entry: 27 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index 28 %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 29 %0 = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst 30 ret void 31 } 32 33 ; FUNC-LABEL: {{^}}atomic_add_i32_ret_addr64_offset: 34 ; SI: buffer_atomic_add [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16 glc{{$}} 35 ; SI: buffer_store_dword [[RET]] 36 define void @atomic_add_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { 37 entry: 38 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index 39 %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 40 %0 = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst 41 store i32 %0, i32 addrspace(1)* %out2 42 ret void 43 } 44 45 ; FUNC-LABEL: {{^}}atomic_add_i32: 46 ; SI: buffer_atomic_add v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}} 47 define void @atomic_add_i32(i32 addrspace(1)* %out, i32 %in) { 48 entry: 49 %0 = atomicrmw volatile add i32 addrspace(1)* %out, i32 %in seq_cst 50 ret void 51 } 52 53 ; FUNC-LABEL: {{^}}atomic_add_i32_ret: 54 ; SI: buffer_atomic_add [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 glc 55 ; SI: buffer_store_dword [[RET]] 56 define void @atomic_add_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) { 57 entry: 58 %0 = atomicrmw volatile add i32 addrspace(1)* %out, i32 %in seq_cst 59 store i32 %0, i32 addrspace(1)* %out2 60 ret void 61 } 62 63 ; FUNC-LABEL: {{^}}atomic_add_i32_addr64: 64 ; SI: buffer_atomic_add v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}} 65 define void @atomic_add_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) { 66 entry: 67 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index 68 %0 = atomicrmw volatile add i32 addrspace(1)* %ptr, i32 %in seq_cst 69 ret void 70 } 71 72 ; FUNC-LABEL: {{^}}atomic_add_i32_ret_addr64: 73 ; SI: buffer_atomic_add [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}} 74 ; SI: buffer_store_dword [[RET]] 75 define void @atomic_add_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { 76 entry: 77 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index 78 %0 = atomicrmw volatile add i32 addrspace(1)* %ptr, i32 %in seq_cst 79 store i32 %0, i32 addrspace(1)* %out2 80 ret void 81 } 82 83 ; FUNC-LABEL: {{^}}atomic_and_i32_offset: 84 ; SI: buffer_atomic_and v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}} 85 define void @atomic_and_i32_offset(i32 addrspace(1)* %out, i32 %in) { 86 entry: 87 %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4 88 %0 = atomicrmw volatile and i32 addrspace(1)* %gep, i32 %in seq_cst 89 ret void 90 } 91 92 ; FUNC-LABEL: {{^}}atomic_and_i32_ret_offset: 93 ; SI: buffer_atomic_and [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16 glc {{$}} 94 ; SI: buffer_store_dword [[RET]] 95 define void @atomic_and_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) { 96 entry: 97 %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4 98 %0 = atomicrmw volatile and i32 addrspace(1)* %gep, i32 %in seq_cst 99 store i32 %0, i32 addrspace(1)* %out2 100 ret void 101 } 102 103 ; FUNC-LABEL: {{^}}atomic_and_i32_addr64_offset: 104 ; SI: buffer_atomic_and v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16{{$}} 105 define void @atomic_and_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) { 106 entry: 107 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index 108 %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 109 %0 = atomicrmw volatile and i32 addrspace(1)* %gep, i32 %in seq_cst 110 ret void 111 } 112 113 ; FUNC-LABEL: {{^}}atomic_and_i32_ret_addr64_offset: 114 ; SI: buffer_atomic_and [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16 glc{{$}} 115 ; SI: buffer_store_dword [[RET]] 116 define void @atomic_and_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { 117 entry: 118 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index 119 %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 120 %0 = atomicrmw volatile and i32 addrspace(1)* %gep, i32 %in seq_cst 121 store i32 %0, i32 addrspace(1)* %out2 122 ret void 123 } 124 125 ; FUNC-LABEL: {{^}}atomic_and_i32: 126 ; SI: buffer_atomic_and v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}} 127 define void @atomic_and_i32(i32 addrspace(1)* %out, i32 %in) { 128 entry: 129 %0 = atomicrmw volatile and i32 addrspace(1)* %out, i32 %in seq_cst 130 ret void 131 } 132 133 ; FUNC-LABEL: {{^}}atomic_and_i32_ret: 134 ; SI: buffer_atomic_and [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 glc 135 ; SI: buffer_store_dword [[RET]] 136 define void @atomic_and_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) { 137 entry: 138 %0 = atomicrmw volatile and i32 addrspace(1)* %out, i32 %in seq_cst 139 store i32 %0, i32 addrspace(1)* %out2 140 ret void 141 } 142 143 ; FUNC-LABEL: {{^}}atomic_and_i32_addr64: 144 ; SI: buffer_atomic_and v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}} 145 define void @atomic_and_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) { 146 entry: 147 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index 148 %0 = atomicrmw volatile and i32 addrspace(1)* %ptr, i32 %in seq_cst 149 ret void 150 } 151 152 ; FUNC-LABEL: {{^}}atomic_and_i32_ret_addr64: 153 ; SI: buffer_atomic_and [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}} 154 ; SI: buffer_store_dword [[RET]] 155 define void @atomic_and_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { 156 entry: 157 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index 158 %0 = atomicrmw volatile and i32 addrspace(1)* %ptr, i32 %in seq_cst 159 store i32 %0, i32 addrspace(1)* %out2 160 ret void 161 } 162 163 ; FUNC-LABEL: {{^}}atomic_sub_i32_offset: 164 ; SI: buffer_atomic_sub v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}} 165 define void @atomic_sub_i32_offset(i32 addrspace(1)* %out, i32 %in) { 166 entry: 167 %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4 168 %0 = atomicrmw volatile sub i32 addrspace(1)* %gep, i32 %in seq_cst 169 ret void 170 } 171 172 ; FUNC-LABEL: {{^}}atomic_sub_i32_ret_offset: 173 ; SI: buffer_atomic_sub [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16 glc {{$}} 174 ; SI: buffer_store_dword [[RET]] 175 define void @atomic_sub_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) { 176 entry: 177 %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4 178 %0 = atomicrmw volatile sub i32 addrspace(1)* %gep, i32 %in seq_cst 179 store i32 %0, i32 addrspace(1)* %out2 180 ret void 181 } 182 183 ; FUNC-LABEL: {{^}}atomic_sub_i32_addr64_offset: 184 ; SI: buffer_atomic_sub v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16{{$}} 185 define void @atomic_sub_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) { 186 entry: 187 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index 188 %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 189 %0 = atomicrmw volatile sub i32 addrspace(1)* %gep, i32 %in seq_cst 190 ret void 191 } 192 193 ; FUNC-LABEL: {{^}}atomic_sub_i32_ret_addr64_offset: 194 ; SI: buffer_atomic_sub [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16 glc{{$}} 195 ; SI: buffer_store_dword [[RET]] 196 define void @atomic_sub_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { 197 entry: 198 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index 199 %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 200 %0 = atomicrmw volatile sub i32 addrspace(1)* %gep, i32 %in seq_cst 201 store i32 %0, i32 addrspace(1)* %out2 202 ret void 203 } 204 205 ; FUNC-LABEL: {{^}}atomic_sub_i32: 206 ; SI: buffer_atomic_sub v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}} 207 define void @atomic_sub_i32(i32 addrspace(1)* %out, i32 %in) { 208 entry: 209 %0 = atomicrmw volatile sub i32 addrspace(1)* %out, i32 %in seq_cst 210 ret void 211 } 212 213 ; FUNC-LABEL: {{^}}atomic_sub_i32_ret: 214 ; SI: buffer_atomic_sub [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 glc 215 ; SI: buffer_store_dword [[RET]] 216 define void @atomic_sub_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) { 217 entry: 218 %0 = atomicrmw volatile sub i32 addrspace(1)* %out, i32 %in seq_cst 219 store i32 %0, i32 addrspace(1)* %out2 220 ret void 221 } 222 223 ; FUNC-LABEL: {{^}}atomic_sub_i32_addr64: 224 ; SI: buffer_atomic_sub v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}} 225 define void @atomic_sub_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) { 226 entry: 227 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index 228 %0 = atomicrmw volatile sub i32 addrspace(1)* %ptr, i32 %in seq_cst 229 ret void 230 } 231 232 ; FUNC-LABEL: {{^}}atomic_sub_i32_ret_addr64: 233 ; SI: buffer_atomic_sub [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}} 234 ; SI: buffer_store_dword [[RET]] 235 define void @atomic_sub_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { 236 entry: 237 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index 238 %0 = atomicrmw volatile sub i32 addrspace(1)* %ptr, i32 %in seq_cst 239 store i32 %0, i32 addrspace(1)* %out2 240 ret void 241 } 242 243 ; FUNC-LABEL: {{^}}atomic_max_i32_offset: 244 ; SI: buffer_atomic_smax v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}} 245 define void @atomic_max_i32_offset(i32 addrspace(1)* %out, i32 %in) { 246 entry: 247 %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4 248 %0 = atomicrmw volatile max i32 addrspace(1)* %gep, i32 %in seq_cst 249 ret void 250 } 251 252 ; FUNC-LABEL: {{^}}atomic_max_i32_ret_offset: 253 ; SI: buffer_atomic_smax [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16 glc {{$}} 254 ; SI: buffer_store_dword [[RET]] 255 define void @atomic_max_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) { 256 entry: 257 %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4 258 %0 = atomicrmw volatile max i32 addrspace(1)* %gep, i32 %in seq_cst 259 store i32 %0, i32 addrspace(1)* %out2 260 ret void 261 } 262 263 ; FUNC-LABEL: {{^}}atomic_max_i32_addr64_offset: 264 ; SI: buffer_atomic_smax v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16{{$}} 265 define void @atomic_max_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) { 266 entry: 267 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index 268 %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 269 %0 = atomicrmw volatile max i32 addrspace(1)* %gep, i32 %in seq_cst 270 ret void 271 } 272 273 ; FUNC-LABEL: {{^}}atomic_max_i32_ret_addr64_offset: 274 ; SI: buffer_atomic_smax [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16 glc{{$}} 275 ; SI: buffer_store_dword [[RET]] 276 define void @atomic_max_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { 277 entry: 278 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index 279 %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 280 %0 = atomicrmw volatile max i32 addrspace(1)* %gep, i32 %in seq_cst 281 store i32 %0, i32 addrspace(1)* %out2 282 ret void 283 } 284 285 ; FUNC-LABEL: {{^}}atomic_max_i32: 286 ; SI: buffer_atomic_smax v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}} 287 define void @atomic_max_i32(i32 addrspace(1)* %out, i32 %in) { 288 entry: 289 %0 = atomicrmw volatile max i32 addrspace(1)* %out, i32 %in seq_cst 290 ret void 291 } 292 293 ; FUNC-LABEL: {{^}}atomic_max_i32_ret: 294 ; SI: buffer_atomic_smax [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 glc 295 ; SI: buffer_store_dword [[RET]] 296 define void @atomic_max_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) { 297 entry: 298 %0 = atomicrmw volatile max i32 addrspace(1)* %out, i32 %in seq_cst 299 store i32 %0, i32 addrspace(1)* %out2 300 ret void 301 } 302 303 ; FUNC-LABEL: {{^}}atomic_max_i32_addr64: 304 ; SI: buffer_atomic_smax v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}} 305 define void @atomic_max_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) { 306 entry: 307 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index 308 %0 = atomicrmw volatile max i32 addrspace(1)* %ptr, i32 %in seq_cst 309 ret void 310 } 311 312 ; FUNC-LABEL: {{^}}atomic_max_i32_ret_addr64: 313 ; SI: buffer_atomic_smax [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}} 314 ; SI: buffer_store_dword [[RET]] 315 define void @atomic_max_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { 316 entry: 317 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index 318 %0 = atomicrmw volatile max i32 addrspace(1)* %ptr, i32 %in seq_cst 319 store i32 %0, i32 addrspace(1)* %out2 320 ret void 321 } 322 323 ; FUNC-LABEL: {{^}}atomic_umax_i32_offset: 324 ; SI: buffer_atomic_umax v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}} 325 define void @atomic_umax_i32_offset(i32 addrspace(1)* %out, i32 %in) { 326 entry: 327 %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4 328 %0 = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in seq_cst 329 ret void 330 } 331 332 ; FUNC-LABEL: {{^}}atomic_umax_i32_ret_offset: 333 ; SI: buffer_atomic_umax [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16 glc {{$}} 334 ; SI: buffer_store_dword [[RET]] 335 define void @atomic_umax_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) { 336 entry: 337 %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4 338 %0 = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in seq_cst 339 store i32 %0, i32 addrspace(1)* %out2 340 ret void 341 } 342 343 ; FUNC-LABEL: {{^}}atomic_umax_i32_addr64_offset: 344 ; SI: buffer_atomic_umax v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16{{$}} 345 define void @atomic_umax_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) { 346 entry: 347 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index 348 %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 349 %0 = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in seq_cst 350 ret void 351 } 352 353 ; FUNC-LABEL: {{^}}atomic_umax_i32_ret_addr64_offset: 354 ; SI: buffer_atomic_umax [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16 glc{{$}} 355 ; SI: buffer_store_dword [[RET]] 356 define void @atomic_umax_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { 357 entry: 358 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index 359 %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 360 %0 = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in seq_cst 361 store i32 %0, i32 addrspace(1)* %out2 362 ret void 363 } 364 365 ; FUNC-LABEL: {{^}}atomic_umax_i32: 366 ; SI: buffer_atomic_umax v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}} 367 define void @atomic_umax_i32(i32 addrspace(1)* %out, i32 %in) { 368 entry: 369 %0 = atomicrmw volatile umax i32 addrspace(1)* %out, i32 %in seq_cst 370 ret void 371 } 372 373 ; FUNC-LABEL: {{^}}atomic_umax_i32_ret: 374 ; SI: buffer_atomic_umax [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 glc 375 ; SI: buffer_store_dword [[RET]] 376 define void @atomic_umax_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) { 377 entry: 378 %0 = atomicrmw volatile umax i32 addrspace(1)* %out, i32 %in seq_cst 379 store i32 %0, i32 addrspace(1)* %out2 380 ret void 381 } 382 383 ; FUNC-LABEL: {{^}}atomic_umax_i32_addr64: 384 ; SI: buffer_atomic_umax v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}} 385 define void @atomic_umax_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) { 386 entry: 387 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index 388 %0 = atomicrmw volatile umax i32 addrspace(1)* %ptr, i32 %in seq_cst 389 ret void 390 } 391 392 ; FUNC-LABEL: {{^}}atomic_umax_i32_ret_addr64: 393 ; SI: buffer_atomic_umax [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}} 394 ; SI: buffer_store_dword [[RET]] 395 define void @atomic_umax_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { 396 entry: 397 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index 398 %0 = atomicrmw volatile umax i32 addrspace(1)* %ptr, i32 %in seq_cst 399 store i32 %0, i32 addrspace(1)* %out2 400 ret void 401 } 402 403 ; FUNC-LABEL: {{^}}atomic_min_i32_offset: 404 ; SI: buffer_atomic_smin v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}} 405 define void @atomic_min_i32_offset(i32 addrspace(1)* %out, i32 %in) { 406 entry: 407 %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4 408 %0 = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in seq_cst 409 ret void 410 } 411 412 ; FUNC-LABEL: {{^}}atomic_min_i32_ret_offset: 413 ; SI: buffer_atomic_smin [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16 glc {{$}} 414 ; SI: buffer_store_dword [[RET]] 415 define void @atomic_min_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) { 416 entry: 417 %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4 418 %0 = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in seq_cst 419 store i32 %0, i32 addrspace(1)* %out2 420 ret void 421 } 422 423 ; FUNC-LABEL: {{^}}atomic_min_i32_addr64_offset: 424 ; SI: buffer_atomic_smin v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16{{$}} 425 define void @atomic_min_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) { 426 entry: 427 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index 428 %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 429 %0 = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in seq_cst 430 ret void 431 } 432 433 ; FUNC-LABEL: {{^}}atomic_min_i32_ret_addr64_offset: 434 ; SI: buffer_atomic_smin [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16 glc{{$}} 435 ; SI: buffer_store_dword [[RET]] 436 define void @atomic_min_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { 437 entry: 438 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index 439 %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 440 %0 = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in seq_cst 441 store i32 %0, i32 addrspace(1)* %out2 442 ret void 443 } 444 445 ; FUNC-LABEL: {{^}}atomic_min_i32: 446 ; SI: buffer_atomic_smin v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}} 447 define void @atomic_min_i32(i32 addrspace(1)* %out, i32 %in) { 448 entry: 449 %0 = atomicrmw volatile min i32 addrspace(1)* %out, i32 %in seq_cst 450 ret void 451 } 452 453 ; FUNC-LABEL: {{^}}atomic_min_i32_ret: 454 ; SI: buffer_atomic_smin [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 glc 455 ; SI: buffer_store_dword [[RET]] 456 define void @atomic_min_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) { 457 entry: 458 %0 = atomicrmw volatile min i32 addrspace(1)* %out, i32 %in seq_cst 459 store i32 %0, i32 addrspace(1)* %out2 460 ret void 461 } 462 463 ; FUNC-LABEL: {{^}}atomic_min_i32_addr64: 464 ; SI: buffer_atomic_smin v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}} 465 define void @atomic_min_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) { 466 entry: 467 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index 468 %0 = atomicrmw volatile min i32 addrspace(1)* %ptr, i32 %in seq_cst 469 ret void 470 } 471 472 ; FUNC-LABEL: {{^}}atomic_min_i32_ret_addr64: 473 ; SI: buffer_atomic_smin [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}} 474 ; SI: buffer_store_dword [[RET]] 475 define void @atomic_min_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { 476 entry: 477 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index 478 %0 = atomicrmw volatile min i32 addrspace(1)* %ptr, i32 %in seq_cst 479 store i32 %0, i32 addrspace(1)* %out2 480 ret void 481 } 482 483 ; FUNC-LABEL: {{^}}atomic_umin_i32_offset: 484 ; SI: buffer_atomic_umin v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}} 485 define void @atomic_umin_i32_offset(i32 addrspace(1)* %out, i32 %in) { 486 entry: 487 %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4 488 %0 = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in seq_cst 489 ret void 490 } 491 492 ; FUNC-LABEL: {{^}}atomic_umin_i32_ret_offset: 493 ; SI: buffer_atomic_umin [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16 glc {{$}} 494 ; SI: buffer_store_dword [[RET]] 495 define void @atomic_umin_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) { 496 entry: 497 %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4 498 %0 = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in seq_cst 499 store i32 %0, i32 addrspace(1)* %out2 500 ret void 501 } 502 503 ; FUNC-LABEL: {{^}}atomic_umin_i32_addr64_offset: 504 ; SI: buffer_atomic_umin v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16{{$}} 505 define void @atomic_umin_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) { 506 entry: 507 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index 508 %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 509 %0 = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in seq_cst 510 ret void 511 } 512 513 ; FUNC-LABEL: {{^}}atomic_umin_i32_ret_addr64_offset: 514 ; SI: buffer_atomic_umin [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16 glc{{$}} 515 ; SI: buffer_store_dword [[RET]] 516 define void @atomic_umin_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { 517 entry: 518 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index 519 %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 520 %0 = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in seq_cst 521 store i32 %0, i32 addrspace(1)* %out2 522 ret void 523 } 524 525 ; FUNC-LABEL: {{^}}atomic_umin_i32: 526 ; SI: buffer_atomic_umin v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}} 527 define void @atomic_umin_i32(i32 addrspace(1)* %out, i32 %in) { 528 entry: 529 %0 = atomicrmw volatile umin i32 addrspace(1)* %out, i32 %in seq_cst 530 ret void 531 } 532 533 ; FUNC-LABEL: {{^}}atomic_umin_i32_ret: 534 ; SI: buffer_atomic_umin [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 glc 535 ; SI: buffer_store_dword [[RET]] 536 define void @atomic_umin_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) { 537 entry: 538 %0 = atomicrmw volatile umin i32 addrspace(1)* %out, i32 %in seq_cst 539 store i32 %0, i32 addrspace(1)* %out2 540 ret void 541 } 542 543 ; FUNC-LABEL: {{^}}atomic_umin_i32_addr64: 544 ; SI: buffer_atomic_umin v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}} 545 define void @atomic_umin_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) { 546 entry: 547 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index 548 %0 = atomicrmw volatile umin i32 addrspace(1)* %ptr, i32 %in seq_cst 549 ret void 550 } 551 552 ; FUNC-LABEL: {{^}}atomic_umin_i32_ret_addr64: 553 ; SI: buffer_atomic_umin [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}} 554 ; SI: buffer_store_dword [[RET]] 555 define void @atomic_umin_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { 556 entry: 557 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index 558 %0 = atomicrmw volatile umin i32 addrspace(1)* %ptr, i32 %in seq_cst 559 store i32 %0, i32 addrspace(1)* %out2 560 ret void 561 } 562 563 ; FUNC-LABEL: {{^}}atomic_or_i32_offset: 564 ; SI: buffer_atomic_or v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}} 565 define void @atomic_or_i32_offset(i32 addrspace(1)* %out, i32 %in) { 566 entry: 567 %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4 568 %0 = atomicrmw volatile or i32 addrspace(1)* %gep, i32 %in seq_cst 569 ret void 570 } 571 572 ; FUNC-LABEL: {{^}}atomic_or_i32_ret_offset: 573 ; SI: buffer_atomic_or [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16 glc {{$}} 574 ; SI: buffer_store_dword [[RET]] 575 define void @atomic_or_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) { 576 entry: 577 %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4 578 %0 = atomicrmw volatile or i32 addrspace(1)* %gep, i32 %in seq_cst 579 store i32 %0, i32 addrspace(1)* %out2 580 ret void 581 } 582 583 ; FUNC-LABEL: {{^}}atomic_or_i32_addr64_offset: 584 ; SI: buffer_atomic_or v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16{{$}} 585 define void @atomic_or_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) { 586 entry: 587 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index 588 %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 589 %0 = atomicrmw volatile or i32 addrspace(1)* %gep, i32 %in seq_cst 590 ret void 591 } 592 593 ; FUNC-LABEL: {{^}}atomic_or_i32_ret_addr64_offset: 594 ; SI: buffer_atomic_or [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16 glc{{$}} 595 ; SI: buffer_store_dword [[RET]] 596 define void @atomic_or_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { 597 entry: 598 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index 599 %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 600 %0 = atomicrmw volatile or i32 addrspace(1)* %gep, i32 %in seq_cst 601 store i32 %0, i32 addrspace(1)* %out2 602 ret void 603 } 604 605 ; FUNC-LABEL: {{^}}atomic_or_i32: 606 ; SI: buffer_atomic_or v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}} 607 define void @atomic_or_i32(i32 addrspace(1)* %out, i32 %in) { 608 entry: 609 %0 = atomicrmw volatile or i32 addrspace(1)* %out, i32 %in seq_cst 610 ret void 611 } 612 613 ; FUNC-LABEL: {{^}}atomic_or_i32_ret: 614 ; SI: buffer_atomic_or [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 glc 615 ; SI: buffer_store_dword [[RET]] 616 define void @atomic_or_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) { 617 entry: 618 %0 = atomicrmw volatile or i32 addrspace(1)* %out, i32 %in seq_cst 619 store i32 %0, i32 addrspace(1)* %out2 620 ret void 621 } 622 623 ; FUNC-LABEL: {{^}}atomic_or_i32_addr64: 624 ; SI: buffer_atomic_or v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}} 625 define void @atomic_or_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) { 626 entry: 627 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index 628 %0 = atomicrmw volatile or i32 addrspace(1)* %ptr, i32 %in seq_cst 629 ret void 630 } 631 632 ; FUNC-LABEL: {{^}}atomic_or_i32_ret_addr64: 633 ; SI: buffer_atomic_or [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}} 634 ; SI: buffer_store_dword [[RET]] 635 define void @atomic_or_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { 636 entry: 637 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index 638 %0 = atomicrmw volatile or i32 addrspace(1)* %ptr, i32 %in seq_cst 639 store i32 %0, i32 addrspace(1)* %out2 640 ret void 641 } 642 643 ; FUNC-LABEL: {{^}}atomic_xchg_i32_offset: 644 ; SI: buffer_atomic_swap v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}} 645 define void @atomic_xchg_i32_offset(i32 addrspace(1)* %out, i32 %in) { 646 entry: 647 %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4 648 %0 = atomicrmw volatile xchg i32 addrspace(1)* %gep, i32 %in seq_cst 649 ret void 650 } 651 652 ; FUNC-LABEL: {{^}}atomic_xchg_i32_ret_offset: 653 ; SI: buffer_atomic_swap [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16 glc {{$}} 654 ; SI: buffer_store_dword [[RET]] 655 define void @atomic_xchg_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) { 656 entry: 657 %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4 658 %0 = atomicrmw volatile xchg i32 addrspace(1)* %gep, i32 %in seq_cst 659 store i32 %0, i32 addrspace(1)* %out2 660 ret void 661 } 662 663 ; FUNC-LABEL: {{^}}atomic_xchg_i32_addr64_offset: 664 ; SI: buffer_atomic_swap v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16{{$}} 665 define void @atomic_xchg_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) { 666 entry: 667 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index 668 %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 669 %0 = atomicrmw volatile xchg i32 addrspace(1)* %gep, i32 %in seq_cst 670 ret void 671 } 672 673 ; FUNC-LABEL: {{^}}atomic_xchg_i32_ret_addr64_offset: 674 ; SI: buffer_atomic_swap [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16 glc{{$}} 675 ; SI: buffer_store_dword [[RET]] 676 define void @atomic_xchg_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { 677 entry: 678 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index 679 %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 680 %0 = atomicrmw volatile xchg i32 addrspace(1)* %gep, i32 %in seq_cst 681 store i32 %0, i32 addrspace(1)* %out2 682 ret void 683 } 684 685 ; FUNC-LABEL: {{^}}atomic_xchg_i32: 686 ; SI: buffer_atomic_swap v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}} 687 define void @atomic_xchg_i32(i32 addrspace(1)* %out, i32 %in) { 688 entry: 689 %0 = atomicrmw volatile xchg i32 addrspace(1)* %out, i32 %in seq_cst 690 ret void 691 } 692 693 ; FUNC-LABEL: {{^}}atomic_xchg_i32_ret: 694 ; SI: buffer_atomic_swap [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 glc 695 ; SI: buffer_store_dword [[RET]] 696 define void @atomic_xchg_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) { 697 entry: 698 %0 = atomicrmw volatile xchg i32 addrspace(1)* %out, i32 %in seq_cst 699 store i32 %0, i32 addrspace(1)* %out2 700 ret void 701 } 702 703 ; FUNC-LABEL: {{^}}atomic_xchg_i32_addr64: 704 ; SI: buffer_atomic_swap v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}} 705 define void @atomic_xchg_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) { 706 entry: 707 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index 708 %0 = atomicrmw volatile xchg i32 addrspace(1)* %ptr, i32 %in seq_cst 709 ret void 710 } 711 712 ; FUNC-LABEL: {{^}}atomic_xchg_i32_ret_addr64: 713 ; SI: buffer_atomic_swap [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}} 714 ; SI: buffer_store_dword [[RET]] 715 define void @atomic_xchg_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { 716 entry: 717 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index 718 %0 = atomicrmw volatile xchg i32 addrspace(1)* %ptr, i32 %in seq_cst 719 store i32 %0, i32 addrspace(1)* %out2 720 ret void 721 } 722 723 ; FUNC-LABEL: {{^}}atomic_xor_i32_offset: 724 ; SI: buffer_atomic_xor v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}} 725 define void @atomic_xor_i32_offset(i32 addrspace(1)* %out, i32 %in) { 726 entry: 727 %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4 728 %0 = atomicrmw volatile xor i32 addrspace(1)* %gep, i32 %in seq_cst 729 ret void 730 } 731 732 ; FUNC-LABEL: {{^}}atomic_xor_i32_ret_offset: 733 ; SI: buffer_atomic_xor [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16 glc {{$}} 734 ; SI: buffer_store_dword [[RET]] 735 define void @atomic_xor_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) { 736 entry: 737 %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4 738 %0 = atomicrmw volatile xor i32 addrspace(1)* %gep, i32 %in seq_cst 739 store i32 %0, i32 addrspace(1)* %out2 740 ret void 741 } 742 743 ; FUNC-LABEL: {{^}}atomic_xor_i32_addr64_offset: 744 ; SI: buffer_atomic_xor v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16{{$}} 745 define void @atomic_xor_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) { 746 entry: 747 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index 748 %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 749 %0 = atomicrmw volatile xor i32 addrspace(1)* %gep, i32 %in seq_cst 750 ret void 751 } 752 753 ; FUNC-LABEL: {{^}}atomic_xor_i32_ret_addr64_offset: 754 ; SI: buffer_atomic_xor [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16 glc{{$}} 755 ; SI: buffer_store_dword [[RET]] 756 define void @atomic_xor_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { 757 entry: 758 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index 759 %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 760 %0 = atomicrmw volatile xor i32 addrspace(1)* %gep, i32 %in seq_cst 761 store i32 %0, i32 addrspace(1)* %out2 762 ret void 763 } 764 765 ; FUNC-LABEL: {{^}}atomic_xor_i32: 766 ; SI: buffer_atomic_xor v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}} 767 define void @atomic_xor_i32(i32 addrspace(1)* %out, i32 %in) { 768 entry: 769 %0 = atomicrmw volatile xor i32 addrspace(1)* %out, i32 %in seq_cst 770 ret void 771 } 772 773 ; FUNC-LABEL: {{^}}atomic_xor_i32_ret: 774 ; SI: buffer_atomic_xor [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 glc 775 ; SI: buffer_store_dword [[RET]] 776 define void @atomic_xor_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) { 777 entry: 778 %0 = atomicrmw volatile xor i32 addrspace(1)* %out, i32 %in seq_cst 779 store i32 %0, i32 addrspace(1)* %out2 780 ret void 781 } 782 783 ; FUNC-LABEL: {{^}}atomic_xor_i32_addr64: 784 ; SI: buffer_atomic_xor v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}} 785 define void @atomic_xor_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) { 786 entry: 787 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index 788 %0 = atomicrmw volatile xor i32 addrspace(1)* %ptr, i32 %in seq_cst 789 ret void 790 } 791 792 ; FUNC-LABEL: {{^}}atomic_xor_i32_ret_addr64: 793 ; SI: buffer_atomic_xor [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}} 794 ; SI: buffer_store_dword [[RET]] 795 define void @atomic_xor_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { 796 entry: 797 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index 798 %0 = atomicrmw volatile xor i32 addrspace(1)* %ptr, i32 %in seq_cst 799 store i32 %0, i32 addrspace(1)* %out2 800 ret void 801 } 802