1 ; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s 2 ; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s 3 4 ; GCN-LABEL: {{^}}atomic_add_i32_offset: 5 ; GCN: flat_atomic_add v[{{[0-9]+}}:{{[0-9]+}}], v{{[0-9]+}}{{$}} 6 define void @atomic_add_i32_offset(i32 addrspace(4)* %out, i32 %in) { 7 entry: 8 %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4 9 %val = atomicrmw volatile add i32 addrspace(4)* %gep, i32 %in seq_cst 10 ret void 11 } 12 13 ; GCN-LABEL: {{^}}atomic_add_i32_ret_offset: 14 ; GCN: flat_atomic_add [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], v{{[0-9]+}} glc{{$}} 15 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] 16 define void @atomic_add_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) { 17 entry: 18 %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4 19 %val = atomicrmw volatile add i32 addrspace(4)* %gep, i32 %in seq_cst 20 store i32 %val, i32 addrspace(4)* %out2 21 ret void 22 } 23 24 ; GCN-LABEL: {{^}}atomic_add_i32_addr64_offset: 25 ; GCN: flat_atomic_add v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} 26 define void @atomic_add_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index) { 27 entry: 28 %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index 29 %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4 30 %val = atomicrmw volatile add i32 addrspace(4)* %gep, i32 %in seq_cst 31 ret void 32 } 33 34 ; GCN-LABEL: {{^}}atomic_add_i32_ret_addr64_offset: 35 ; GCN: flat_atomic_add [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} 36 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] 37 define void @atomic_add_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) { 38 entry: 39 %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index 40 %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4 41 %val = atomicrmw volatile add i32 addrspace(4)* %gep, i32 %in seq_cst 42 store i32 %val, i32 addrspace(4)* %out2 43 ret void 44 } 45 46 ; GCN-LABEL: {{^}}atomic_add_i32: 47 ; GCN: flat_atomic_add v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} 48 define void @atomic_add_i32(i32 addrspace(4)* %out, i32 %in) { 49 entry: 50 %val = atomicrmw volatile add i32 addrspace(4)* %out, i32 %in seq_cst 51 ret void 52 } 53 54 ; GCN-LABEL: {{^}}atomic_add_i32_ret: 55 ; GCN: flat_atomic_add [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} 56 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] 57 define void @atomic_add_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) { 58 entry: 59 %val = atomicrmw volatile add i32 addrspace(4)* %out, i32 %in seq_cst 60 store i32 %val, i32 addrspace(4)* %out2 61 ret void 62 } 63 64 ; GCN-LABEL: {{^}}atomic_add_i32_addr64: 65 ; GCN: flat_atomic_add v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} 66 define void @atomic_add_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index) { 67 entry: 68 %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index 69 %val = atomicrmw volatile add i32 addrspace(4)* %ptr, i32 %in seq_cst 70 ret void 71 } 72 73 ; GCN-LABEL: {{^}}atomic_add_i32_ret_addr64: 74 ; GCN: flat_atomic_add [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} 75 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] 76 define void @atomic_add_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) { 77 entry: 78 %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index 79 %val = atomicrmw volatile add i32 addrspace(4)* %ptr, i32 %in seq_cst 80 store i32 %val, i32 addrspace(4)* %out2 81 ret void 82 } 83 84 ; GCN-LABEL: {{^}}atomic_and_i32_offset: 85 ; GCN: flat_atomic_and v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} 86 define void @atomic_and_i32_offset(i32 addrspace(4)* %out, i32 %in) { 87 entry: 88 %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4 89 %val = atomicrmw volatile and i32 addrspace(4)* %gep, i32 %in seq_cst 90 ret void 91 } 92 93 ; GCN-LABEL: {{^}}atomic_and_i32_ret_offset: 94 ; GCN: flat_atomic_and [[RET:v[0-9]]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} 95 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] 96 define void @atomic_and_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) { 97 entry: 98 %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4 99 %val = atomicrmw volatile and i32 addrspace(4)* %gep, i32 %in seq_cst 100 store i32 %val, i32 addrspace(4)* %out2 101 ret void 102 } 103 104 ; GCN-LABEL: {{^}}atomic_and_i32_addr64_offset: 105 ; GCN: flat_atomic_and v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} 106 define void @atomic_and_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index) { 107 entry: 108 %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index 109 %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4 110 %val = atomicrmw volatile and i32 addrspace(4)* %gep, i32 %in seq_cst 111 ret void 112 } 113 114 ; GCN-LABEL: {{^}}atomic_and_i32_ret_addr64_offset: 115 ; GCN: flat_atomic_and [[RET:v[0-9]]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} 116 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] 117 define void @atomic_and_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) { 118 entry: 119 %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index 120 %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4 121 %val = atomicrmw volatile and i32 addrspace(4)* %gep, i32 %in seq_cst 122 store i32 %val, i32 addrspace(4)* %out2 123 ret void 124 } 125 126 ; GCN-LABEL: {{^}}atomic_and_i32: 127 ; GCN: flat_atomic_and v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} 128 define void @atomic_and_i32(i32 addrspace(4)* %out, i32 %in) { 129 entry: 130 %val = atomicrmw volatile and i32 addrspace(4)* %out, i32 %in seq_cst 131 ret void 132 } 133 134 ; GCN-LABEL: {{^}}atomic_and_i32_ret: 135 ; GCN: flat_atomic_and [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} 136 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] 137 define void @atomic_and_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) { 138 entry: 139 %val = atomicrmw volatile and i32 addrspace(4)* %out, i32 %in seq_cst 140 store i32 %val, i32 addrspace(4)* %out2 141 ret void 142 } 143 144 ; GCN-LABEL: {{^}}atomic_and_i32_addr64: 145 ; GCN: flat_atomic_and v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} 146 define void @atomic_and_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index) { 147 entry: 148 %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index 149 %val = atomicrmw volatile and i32 addrspace(4)* %ptr, i32 %in seq_cst 150 ret void 151 } 152 153 ; GCN-LABEL: {{^}}atomic_and_i32_ret_addr64: 154 ; GCN: flat_atomic_and [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} 155 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] 156 define void @atomic_and_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) { 157 entry: 158 %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index 159 %val = atomicrmw volatile and i32 addrspace(4)* %ptr, i32 %in seq_cst 160 store i32 %val, i32 addrspace(4)* %out2 161 ret void 162 } 163 164 ; GCN-LABEL: {{^}}atomic_sub_i32_offset: 165 ; GCN: flat_atomic_sub v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} 166 define void @atomic_sub_i32_offset(i32 addrspace(4)* %out, i32 %in) { 167 entry: 168 %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4 169 %val = atomicrmw volatile sub i32 addrspace(4)* %gep, i32 %in seq_cst 170 ret void 171 } 172 173 ; GCN-LABEL: {{^}}atomic_sub_i32_ret_offset: 174 ; GCN: flat_atomic_sub [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} 175 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] 176 define void @atomic_sub_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) { 177 entry: 178 %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4 179 %val = atomicrmw volatile sub i32 addrspace(4)* %gep, i32 %in seq_cst 180 store i32 %val, i32 addrspace(4)* %out2 181 ret void 182 } 183 184 ; GCN-LABEL: {{^}}atomic_sub_i32_addr64_offset: 185 ; GCN: flat_atomic_sub v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} 186 define void @atomic_sub_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index) { 187 entry: 188 %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index 189 %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4 190 %val = atomicrmw volatile sub i32 addrspace(4)* %gep, i32 %in seq_cst 191 ret void 192 } 193 194 ; GCN-LABEL: {{^}}atomic_sub_i32_ret_addr64_offset: 195 ; GCN: flat_atomic_sub [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} 196 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] 197 define void @atomic_sub_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) { 198 entry: 199 %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index 200 %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4 201 %val = atomicrmw volatile sub i32 addrspace(4)* %gep, i32 %in seq_cst 202 store i32 %val, i32 addrspace(4)* %out2 203 ret void 204 } 205 206 ; GCN-LABEL: {{^}}atomic_sub_i32: 207 ; GCN: flat_atomic_sub v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} 208 define void @atomic_sub_i32(i32 addrspace(4)* %out, i32 %in) { 209 entry: 210 %val = atomicrmw volatile sub i32 addrspace(4)* %out, i32 %in seq_cst 211 ret void 212 } 213 214 ; GCN-LABEL: {{^}}atomic_sub_i32_ret: 215 ; GCN: flat_atomic_sub [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} 216 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] 217 define void @atomic_sub_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) { 218 entry: 219 %val = atomicrmw volatile sub i32 addrspace(4)* %out, i32 %in seq_cst 220 store i32 %val, i32 addrspace(4)* %out2 221 ret void 222 } 223 224 ; GCN-LABEL: {{^}}atomic_sub_i32_addr64: 225 ; GCN: flat_atomic_sub v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} 226 define void @atomic_sub_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index) { 227 entry: 228 %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index 229 %val = atomicrmw volatile sub i32 addrspace(4)* %ptr, i32 %in seq_cst 230 ret void 231 } 232 233 ; GCN-LABEL: {{^}}atomic_sub_i32_ret_addr64: 234 ; GCN: flat_atomic_sub [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} 235 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] 236 define void @atomic_sub_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) { 237 entry: 238 %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index 239 %val = atomicrmw volatile sub i32 addrspace(4)* %ptr, i32 %in seq_cst 240 store i32 %val, i32 addrspace(4)* %out2 241 ret void 242 } 243 244 ; GCN-LABEL: {{^}}atomic_max_i32_offset: 245 ; GCN: flat_atomic_smax v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} 246 define void @atomic_max_i32_offset(i32 addrspace(4)* %out, i32 %in) { 247 entry: 248 %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4 249 %val = atomicrmw volatile max i32 addrspace(4)* %gep, i32 %in seq_cst 250 ret void 251 } 252 253 ; GCN-LABEL: {{^}}atomic_max_i32_ret_offset: 254 ; GCN: flat_atomic_smax [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} 255 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] 256 define void @atomic_max_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) { 257 entry: 258 %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4 259 %val = atomicrmw volatile max i32 addrspace(4)* %gep, i32 %in seq_cst 260 store i32 %val, i32 addrspace(4)* %out2 261 ret void 262 } 263 264 ; GCN-LABEL: {{^}}atomic_max_i32_addr64_offset: 265 ; GCN: flat_atomic_smax v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} 266 define void @atomic_max_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index) { 267 entry: 268 %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index 269 %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4 270 %val = atomicrmw volatile max i32 addrspace(4)* %gep, i32 %in seq_cst 271 ret void 272 } 273 274 ; GCN-LABEL: {{^}}atomic_max_i32_ret_addr64_offset: 275 ; GCN: flat_atomic_smax [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} 276 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] 277 define void @atomic_max_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) { 278 entry: 279 %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index 280 %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4 281 %val = atomicrmw volatile max i32 addrspace(4)* %gep, i32 %in seq_cst 282 store i32 %val, i32 addrspace(4)* %out2 283 ret void 284 } 285 286 ; GCN-LABEL: {{^}}atomic_max_i32: 287 ; GCN: flat_atomic_smax v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} 288 define void @atomic_max_i32(i32 addrspace(4)* %out, i32 %in) { 289 entry: 290 %val = atomicrmw volatile max i32 addrspace(4)* %out, i32 %in seq_cst 291 ret void 292 } 293 294 ; GCN-LABEL: {{^}}atomic_max_i32_ret: 295 ; GCN: flat_atomic_smax [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} 296 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] 297 define void @atomic_max_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) { 298 entry: 299 %val = atomicrmw volatile max i32 addrspace(4)* %out, i32 %in seq_cst 300 store i32 %val, i32 addrspace(4)* %out2 301 ret void 302 } 303 304 ; GCN-LABEL: {{^}}atomic_max_i32_addr64: 305 ; GCN: flat_atomic_smax v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} 306 define void @atomic_max_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index) { 307 entry: 308 %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index 309 %val = atomicrmw volatile max i32 addrspace(4)* %ptr, i32 %in seq_cst 310 ret void 311 } 312 313 ; GCN-LABEL: {{^}}atomic_max_i32_ret_addr64: 314 ; GCN: flat_atomic_smax [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} 315 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] 316 define void @atomic_max_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) { 317 entry: 318 %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index 319 %val = atomicrmw volatile max i32 addrspace(4)* %ptr, i32 %in seq_cst 320 store i32 %val, i32 addrspace(4)* %out2 321 ret void 322 } 323 324 ; GCN-LABEL: {{^}}atomic_umax_i32_offset: 325 ; GCN: flat_atomic_umax v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} 326 define void @atomic_umax_i32_offset(i32 addrspace(4)* %out, i32 %in) { 327 entry: 328 %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4 329 %val = atomicrmw volatile umax i32 addrspace(4)* %gep, i32 %in seq_cst 330 ret void 331 } 332 333 ; GCN-LABEL: {{^}}atomic_umax_i32_ret_offset: 334 ; GCN: flat_atomic_umax [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} 335 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] 336 define void @atomic_umax_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) { 337 entry: 338 %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4 339 %val = atomicrmw volatile umax i32 addrspace(4)* %gep, i32 %in seq_cst 340 store i32 %val, i32 addrspace(4)* %out2 341 ret void 342 } 343 344 ; GCN-LABEL: {{^}}atomic_umax_i32_addr64_offset: 345 ; GCN: flat_atomic_umax v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} 346 define void @atomic_umax_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index) { 347 entry: 348 %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index 349 %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4 350 %val = atomicrmw volatile umax i32 addrspace(4)* %gep, i32 %in seq_cst 351 ret void 352 } 353 354 ; GCN-LABEL: {{^}}atomic_umax_i32_ret_addr64_offset: 355 ; GCN: flat_atomic_umax [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} 356 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] 357 define void @atomic_umax_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) { 358 entry: 359 %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index 360 %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4 361 %val = atomicrmw volatile umax i32 addrspace(4)* %gep, i32 %in seq_cst 362 store i32 %val, i32 addrspace(4)* %out2 363 ret void 364 } 365 366 ; GCN-LABEL: {{^}}atomic_umax_i32: 367 ; GCN: flat_atomic_umax v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} 368 define void @atomic_umax_i32(i32 addrspace(4)* %out, i32 %in) { 369 entry: 370 %val = atomicrmw volatile umax i32 addrspace(4)* %out, i32 %in seq_cst 371 ret void 372 } 373 374 ; GCN-LABEL: {{^}}atomic_umax_i32_ret: 375 ; GCN: flat_atomic_umax [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} 376 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] 377 define void @atomic_umax_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) { 378 entry: 379 %val = atomicrmw volatile umax i32 addrspace(4)* %out, i32 %in seq_cst 380 store i32 %val, i32 addrspace(4)* %out2 381 ret void 382 } 383 384 ; GCN-LABEL: {{^}}atomic_umax_i32_addr64: 385 ; GCN: flat_atomic_umax v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} 386 define void @atomic_umax_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index) { 387 entry: 388 %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index 389 %val = atomicrmw volatile umax i32 addrspace(4)* %ptr, i32 %in seq_cst 390 ret void 391 } 392 393 ; GCN-LABEL: {{^}}atomic_umax_i32_ret_addr64: 394 ; GCN: flat_atomic_umax [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} 395 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] 396 define void @atomic_umax_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) { 397 entry: 398 %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index 399 %val = atomicrmw volatile umax i32 addrspace(4)* %ptr, i32 %in seq_cst 400 store i32 %val, i32 addrspace(4)* %out2 401 ret void 402 } 403 404 ; GCN-LABEL: {{^}}atomic_min_i32_offset: 405 ; GCN: flat_atomic_smin v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} 406 define void @atomic_min_i32_offset(i32 addrspace(4)* %out, i32 %in) { 407 entry: 408 %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4 409 %val = atomicrmw volatile min i32 addrspace(4)* %gep, i32 %in seq_cst 410 ret void 411 } 412 413 ; GCN-LABEL: {{^}}atomic_min_i32_ret_offset: 414 ; GCN: flat_atomic_smin [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} 415 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] 416 define void @atomic_min_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) { 417 entry: 418 %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4 419 %val = atomicrmw volatile min i32 addrspace(4)* %gep, i32 %in seq_cst 420 store i32 %val, i32 addrspace(4)* %out2 421 ret void 422 } 423 424 ; GCN-LABEL: {{^}}atomic_min_i32_addr64_offset: 425 ; GCN: flat_atomic_smin v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} 426 define void @atomic_min_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index) { 427 entry: 428 %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index 429 %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4 430 %val = atomicrmw volatile min i32 addrspace(4)* %gep, i32 %in seq_cst 431 ret void 432 } 433 434 ; GCN-LABEL: {{^}}atomic_min_i32_ret_addr64_offset: 435 ; GCN: flat_atomic_smin [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} 436 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] 437 define void @atomic_min_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) { 438 entry: 439 %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index 440 %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4 441 %val = atomicrmw volatile min i32 addrspace(4)* %gep, i32 %in seq_cst 442 store i32 %val, i32 addrspace(4)* %out2 443 ret void 444 } 445 446 ; GCN-LABEL: {{^}}atomic_min_i32: 447 ; GCN: flat_atomic_smin v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} 448 define void @atomic_min_i32(i32 addrspace(4)* %out, i32 %in) { 449 entry: 450 %val = atomicrmw volatile min i32 addrspace(4)* %out, i32 %in seq_cst 451 ret void 452 } 453 454 ; GCN-LABEL: {{^}}atomic_min_i32_ret: 455 ; GCN: flat_atomic_smin [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} 456 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] 457 define void @atomic_min_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) { 458 entry: 459 %val = atomicrmw volatile min i32 addrspace(4)* %out, i32 %in seq_cst 460 store i32 %val, i32 addrspace(4)* %out2 461 ret void 462 } 463 464 ; GCN-LABEL: {{^}}atomic_min_i32_addr64: 465 ; GCN: flat_atomic_smin v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} 466 define void @atomic_min_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index) { 467 entry: 468 %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index 469 %val = atomicrmw volatile min i32 addrspace(4)* %ptr, i32 %in seq_cst 470 ret void 471 } 472 473 ; GCN-LABEL: {{^}}atomic_min_i32_ret_addr64: 474 ; GCN: flat_atomic_smin [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} 475 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] 476 define void @atomic_min_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) { 477 entry: 478 %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index 479 %val = atomicrmw volatile min i32 addrspace(4)* %ptr, i32 %in seq_cst 480 store i32 %val, i32 addrspace(4)* %out2 481 ret void 482 } 483 484 ; GCN-LABEL: {{^}}atomic_umin_i32_offset: 485 ; GCN: flat_atomic_umin v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} 486 define void @atomic_umin_i32_offset(i32 addrspace(4)* %out, i32 %in) { 487 entry: 488 %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4 489 %val = atomicrmw volatile umin i32 addrspace(4)* %gep, i32 %in seq_cst 490 ret void 491 } 492 493 ; GCN-LABEL: {{^}}atomic_umin_i32_ret_offset: 494 ; GCN: flat_atomic_umin [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} 495 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] 496 define void @atomic_umin_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) { 497 entry: 498 %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4 499 %val = atomicrmw volatile umin i32 addrspace(4)* %gep, i32 %in seq_cst 500 store i32 %val, i32 addrspace(4)* %out2 501 ret void 502 } 503 504 ; GCN-LABEL: {{^}}atomic_umin_i32_addr64_offset: 505 ; GCN: flat_atomic_umin v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} 506 define void @atomic_umin_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index) { 507 entry: 508 %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index 509 %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4 510 %val = atomicrmw volatile umin i32 addrspace(4)* %gep, i32 %in seq_cst 511 ret void 512 } 513 514 ; GCN-LABEL: {{^}}atomic_umin_i32_ret_addr64_offset: 515 ; GCN: flat_atomic_umin [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} 516 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] 517 define void @atomic_umin_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) { 518 entry: 519 %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index 520 %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4 521 %val = atomicrmw volatile umin i32 addrspace(4)* %gep, i32 %in seq_cst 522 store i32 %val, i32 addrspace(4)* %out2 523 ret void 524 } 525 526 ; GCN-LABEL: {{^}}atomic_umin_i32: 527 ; GCN: flat_atomic_umin v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} 528 define void @atomic_umin_i32(i32 addrspace(4)* %out, i32 %in) { 529 entry: 530 %val = atomicrmw volatile umin i32 addrspace(4)* %out, i32 %in seq_cst 531 ret void 532 } 533 534 ; GCN-LABEL: {{^}}atomic_umin_i32_ret: 535 ; GCN: flat_atomic_umin v{{[0-9]+}}, v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} 536 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] 537 define void @atomic_umin_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) { 538 entry: 539 %val = atomicrmw volatile umin i32 addrspace(4)* %out, i32 %in seq_cst 540 store i32 %val, i32 addrspace(4)* %out2 541 ret void 542 } 543 544 ; GCN-LABEL: {{^}}atomic_umin_i32_addr64: 545 ; GCN: flat_atomic_umin v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} 546 define void @atomic_umin_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index) { 547 entry: 548 %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index 549 %val = atomicrmw volatile umin i32 addrspace(4)* %ptr, i32 %in seq_cst 550 ret void 551 } 552 553 ; GCN-LABEL: {{^}}atomic_umin_i32_ret_addr64: 554 ; GCN: flat_atomic_umin [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} 555 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]{{$}} 556 define void @atomic_umin_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) { 557 entry: 558 %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index 559 %val = atomicrmw volatile umin i32 addrspace(4)* %ptr, i32 %in seq_cst 560 store i32 %val, i32 addrspace(4)* %out2 561 ret void 562 } 563 564 ; GCN-LABEL: {{^}}atomic_or_i32_offset: 565 ; GCN: flat_atomic_or v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}}{{$}} 566 define void @atomic_or_i32_offset(i32 addrspace(4)* %out, i32 %in) { 567 entry: 568 %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4 569 %val = atomicrmw volatile or i32 addrspace(4)* %gep, i32 %in seq_cst 570 ret void 571 } 572 573 ; GCN-LABEL: {{^}}atomic_or_i32_ret_offset: 574 ; GCN: flat_atomic_or [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} 575 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] 576 define void @atomic_or_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) { 577 entry: 578 %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4 579 %val = atomicrmw volatile or i32 addrspace(4)* %gep, i32 %in seq_cst 580 store i32 %val, i32 addrspace(4)* %out2 581 ret void 582 } 583 584 ; GCN-LABEL: {{^}}atomic_or_i32_addr64_offset: 585 ; GCN: flat_atomic_or v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}}{{$}} 586 define void @atomic_or_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index) { 587 entry: 588 %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index 589 %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4 590 %val = atomicrmw volatile or i32 addrspace(4)* %gep, i32 %in seq_cst 591 ret void 592 } 593 594 ; GCN-LABEL: {{^}}atomic_or_i32_ret_addr64_offset: 595 ; GCN: flat_atomic_or [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} 596 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] 597 define void @atomic_or_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) { 598 entry: 599 %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index 600 %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4 601 %val = atomicrmw volatile or i32 addrspace(4)* %gep, i32 %in seq_cst 602 store i32 %val, i32 addrspace(4)* %out2 603 ret void 604 } 605 606 ; GCN-LABEL: {{^}}atomic_or_i32: 607 ; GCN: flat_atomic_or v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} 608 define void @atomic_or_i32(i32 addrspace(4)* %out, i32 %in) { 609 entry: 610 %val = atomicrmw volatile or i32 addrspace(4)* %out, i32 %in seq_cst 611 ret void 612 } 613 614 ; GCN-LABEL: {{^}}atomic_or_i32_ret: 615 ; GCN: flat_atomic_or [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} 616 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] 617 define void @atomic_or_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) { 618 entry: 619 %val = atomicrmw volatile or i32 addrspace(4)* %out, i32 %in seq_cst 620 store i32 %val, i32 addrspace(4)* %out2 621 ret void 622 } 623 624 ; GCN-LABEL: {{^}}atomic_or_i32_addr64: 625 ; GCN: flat_atomic_or v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} 626 define void @atomic_or_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index) { 627 entry: 628 %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index 629 %val = atomicrmw volatile or i32 addrspace(4)* %ptr, i32 %in seq_cst 630 ret void 631 } 632 633 ; GCN-LABEL: {{^}}atomic_or_i32_ret_addr64: 634 ; GCN: flat_atomic_or [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} 635 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] 636 define void @atomic_or_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) { 637 entry: 638 %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index 639 %val = atomicrmw volatile or i32 addrspace(4)* %ptr, i32 %in seq_cst 640 store i32 %val, i32 addrspace(4)* %out2 641 ret void 642 } 643 644 ; GCN-LABEL: {{^}}atomic_xchg_i32_offset: 645 ; GCN: flat_atomic_swap v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}}{{$}} 646 define void @atomic_xchg_i32_offset(i32 addrspace(4)* %out, i32 %in) { 647 entry: 648 %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4 649 %val = atomicrmw volatile xchg i32 addrspace(4)* %gep, i32 %in seq_cst 650 ret void 651 } 652 653 ; GCN-LABEL: {{^}}atomic_xchg_i32_ret_offset: 654 ; GCN: flat_atomic_swap [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} 655 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] 656 define void @atomic_xchg_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) { 657 entry: 658 %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4 659 %val = atomicrmw volatile xchg i32 addrspace(4)* %gep, i32 %in seq_cst 660 store i32 %val, i32 addrspace(4)* %out2 661 ret void 662 } 663 664 ; GCN-LABEL: {{^}}atomic_xchg_i32_addr64_offset: 665 ; GCN: flat_atomic_swap v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}}{{$}} 666 define void @atomic_xchg_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index) { 667 entry: 668 %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index 669 %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4 670 %val = atomicrmw volatile xchg i32 addrspace(4)* %gep, i32 %in seq_cst 671 ret void 672 } 673 674 ; GCN-LABEL: {{^}}atomic_xchg_i32_ret_addr64_offset: 675 ; GCN: flat_atomic_swap [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} 676 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] 677 define void @atomic_xchg_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) { 678 entry: 679 %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index 680 %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4 681 %val = atomicrmw volatile xchg i32 addrspace(4)* %gep, i32 %in seq_cst 682 store i32 %val, i32 addrspace(4)* %out2 683 ret void 684 } 685 686 ; GCN-LABEL: {{^}}atomic_xchg_i32: 687 ; GCN: flat_atomic_swap v[{{[0-9]+}}:{{[0-9]+}}], v{{[0-9]+}}{{$}} 688 define void @atomic_xchg_i32(i32 addrspace(4)* %out, i32 %in) { 689 entry: 690 %val = atomicrmw volatile xchg i32 addrspace(4)* %out, i32 %in seq_cst 691 ret void 692 } 693 694 ; GCN-LABEL: {{^}}atomic_xchg_i32_ret: 695 ; GCN: flat_atomic_swap [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], v{{[0-9]+}} glc{{$}} 696 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] 697 define void @atomic_xchg_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) { 698 entry: 699 %val = atomicrmw volatile xchg i32 addrspace(4)* %out, i32 %in seq_cst 700 store i32 %val, i32 addrspace(4)* %out2 701 ret void 702 } 703 704 ; GCN-LABEL: {{^}}atomic_xchg_i32_addr64: 705 ; GCN: flat_atomic_swap v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} 706 define void @atomic_xchg_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index) { 707 entry: 708 %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index 709 %val = atomicrmw volatile xchg i32 addrspace(4)* %ptr, i32 %in seq_cst 710 ret void 711 } 712 713 ; GCN-LABEL: {{^}}atomic_xchg_i32_ret_addr64: 714 ; GCN: flat_atomic_swap [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} 715 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] 716 define void @atomic_xchg_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) { 717 entry: 718 %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index 719 %val = atomicrmw volatile xchg i32 addrspace(4)* %ptr, i32 %in seq_cst 720 store i32 %val, i32 addrspace(4)* %out2 721 ret void 722 } 723 724 ; CMP_SWAP 725 726 ; GCN-LABEL: {{^}}atomic_cmpxchg_i32_offset: 727 ; GCN: flat_atomic_cmpswap v[{{[0-9]+\:[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}]{{$}} 728 define void @atomic_cmpxchg_i32_offset(i32 addrspace(4)* %out, i32 %in, i32 %old) { 729 entry: 730 %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4 731 %val = cmpxchg volatile i32 addrspace(4)* %gep, i32 %old, i32 %in seq_cst seq_cst 732 ret void 733 } 734 735 ; GCN-LABEL: {{^}}atomic_cmpxchg_i32_ret_offset: 736 ; GCN: flat_atomic_cmpswap v[[RET:[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}] glc{{$}} 737 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, v[[RET]] 738 define void @atomic_cmpxchg_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i32 %old) { 739 entry: 740 %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4 741 %val = cmpxchg volatile i32 addrspace(4)* %gep, i32 %old, i32 %in seq_cst seq_cst 742 %flag = extractvalue { i32, i1 } %val, 0 743 store i32 %flag, i32 addrspace(4)* %out2 744 ret void 745 } 746 747 ; GCN-LABEL: {{^}}atomic_cmpxchg_i32_addr64_offset: 748 ; GCN: flat_atomic_cmpswap v[{{[0-9]+\:[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}]{{$}} 749 define void @atomic_cmpxchg_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index, i32 %old) { 750 entry: 751 %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index 752 %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4 753 %val = cmpxchg volatile i32 addrspace(4)* %gep, i32 %old, i32 %in seq_cst seq_cst 754 ret void 755 } 756 757 ; GCN-LABEL: {{^}}atomic_cmpxchg_i32_ret_addr64_offset: 758 ; GCN: flat_atomic_cmpswap v[[RET:[0-9]+]], v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}] glc{{$}} 759 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, v[[RET]] 760 define void @atomic_cmpxchg_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index, i32 %old) { 761 entry: 762 %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index 763 %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4 764 %val = cmpxchg volatile i32 addrspace(4)* %gep, i32 %old, i32 %in seq_cst seq_cst 765 %flag = extractvalue { i32, i1 } %val, 0 766 store i32 %flag, i32 addrspace(4)* %out2 767 ret void 768 } 769 770 ; GCN-LABEL: {{^}}atomic_cmpxchg_i32: 771 ; GCN: flat_atomic_cmpswap v[{{[0-9]+}}:{{[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}]{{$}} 772 define void @atomic_cmpxchg_i32(i32 addrspace(4)* %out, i32 %in, i32 %old) { 773 entry: 774 %val = cmpxchg volatile i32 addrspace(4)* %out, i32 %old, i32 %in seq_cst seq_cst 775 ret void 776 } 777 778 ; GCN-LABEL: {{^}}atomic_cmpxchg_i32_ret: 779 ; GCN: flat_atomic_cmpswap v[[RET:[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}] glc 780 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, v[[RET]] 781 define void @atomic_cmpxchg_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i32 %old) { 782 entry: 783 %val = cmpxchg volatile i32 addrspace(4)* %out, i32 %old, i32 %in seq_cst seq_cst 784 %flag = extractvalue { i32, i1 } %val, 0 785 store i32 %flag, i32 addrspace(4)* %out2 786 ret void 787 } 788 789 ; GCN-LABEL: {{^}}atomic_cmpxchg_i32_addr64: 790 ; GCN: flat_atomic_cmpswap v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}]{{$}} 791 define void @atomic_cmpxchg_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index, i32 %old) { 792 entry: 793 %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index 794 %val = cmpxchg volatile i32 addrspace(4)* %ptr, i32 %old, i32 %in seq_cst seq_cst 795 ret void 796 } 797 798 ; GCN-LABEL: {{^}}atomic_cmpxchg_i32_ret_addr64: 799 ; GCN: flat_atomic_cmpswap v[[RET:[0-9]+]], v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}] glc{{$}} 800 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, v[[RET]] 801 define void @atomic_cmpxchg_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index, i32 %old) { 802 entry: 803 %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index 804 %val = cmpxchg volatile i32 addrspace(4)* %ptr, i32 %old, i32 %in seq_cst seq_cst 805 %flag = extractvalue { i32, i1 } %val, 0 806 store i32 %flag, i32 addrspace(4)* %out2 807 ret void 808 } 809 810 ; GCN-LABEL: {{^}}atomic_xor_i32_offset: 811 ; GCN: flat_atomic_xor v[{{[0-9]+}}:{{[0-9]+}}], v{{[0-9]+}}{{$}} 812 define void @atomic_xor_i32_offset(i32 addrspace(4)* %out, i32 %in) { 813 entry: 814 %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4 815 %val = atomicrmw volatile xor i32 addrspace(4)* %gep, i32 %in seq_cst 816 ret void 817 } 818 819 ; GCN-LABEL: {{^}}atomic_xor_i32_ret_offset: 820 ; GCN: flat_atomic_xor [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], v{{[0-9]+}} glc{{$}} 821 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] 822 define void @atomic_xor_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) { 823 entry: 824 %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4 825 %val = atomicrmw volatile xor i32 addrspace(4)* %gep, i32 %in seq_cst 826 store i32 %val, i32 addrspace(4)* %out2 827 ret void 828 } 829 830 ; GCN-LABEL: {{^}}atomic_xor_i32_addr64_offset: 831 ; GCN: flat_atomic_xor v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} 832 define void @atomic_xor_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index) { 833 entry: 834 %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index 835 %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4 836 %val = atomicrmw volatile xor i32 addrspace(4)* %gep, i32 %in seq_cst 837 ret void 838 } 839 840 ; GCN-LABEL: {{^}}atomic_xor_i32_ret_addr64_offset: 841 ; GCN: flat_atomic_xor [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} 842 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] 843 define void @atomic_xor_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) { 844 entry: 845 %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index 846 %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4 847 %val = atomicrmw volatile xor i32 addrspace(4)* %gep, i32 %in seq_cst 848 store i32 %val, i32 addrspace(4)* %out2 849 ret void 850 } 851 852 ; GCN-LABEL: {{^}}atomic_xor_i32: 853 ; GCN: flat_atomic_xor v[{{[0-9]+}}:{{[0-9]+}}], v{{[0-9]+}}{{$}} 854 define void @atomic_xor_i32(i32 addrspace(4)* %out, i32 %in) { 855 entry: 856 %val = atomicrmw volatile xor i32 addrspace(4)* %out, i32 %in seq_cst 857 ret void 858 } 859 860 ; GCN-LABEL: {{^}}atomic_xor_i32_ret: 861 ; GCN: flat_atomic_xor [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], v{{[0-9]+}} glc{{$}} 862 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] 863 define void @atomic_xor_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) { 864 entry: 865 %val = atomicrmw volatile xor i32 addrspace(4)* %out, i32 %in seq_cst 866 store i32 %val, i32 addrspace(4)* %out2 867 ret void 868 } 869 870 ; GCN-LABEL: {{^}}atomic_xor_i32_addr64: 871 ; GCN: flat_atomic_xor v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} 872 define void @atomic_xor_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index) { 873 entry: 874 %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index 875 %val = atomicrmw volatile xor i32 addrspace(4)* %ptr, i32 %in seq_cst 876 ret void 877 } 878 879 ; GCN-LABEL: {{^}}atomic_xor_i32_ret_addr64: 880 ; GCN: flat_atomic_xor [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} 881 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] 882 define void @atomic_xor_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) { 883 entry: 884 %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index 885 %val = atomicrmw volatile xor i32 addrspace(4)* %ptr, i32 %in seq_cst 886 store i32 %val, i32 addrspace(4)* %out2 887 ret void 888 } 889 890 ; GCN-LABEL: {{^}}atomic_load_i32_offset: 891 ; GCN: flat_load_dword [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}] glc{{$}} 892 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] 893 define void @atomic_load_i32_offset(i32 addrspace(4)* %in, i32 addrspace(4)* %out) { 894 entry: 895 %gep = getelementptr i32, i32 addrspace(4)* %in, i32 4 896 %val = load atomic i32, i32 addrspace(4)* %gep seq_cst, align 4 897 store i32 %val, i32 addrspace(4)* %out 898 ret void 899 } 900 901 ; GCN-LABEL: {{^}}atomic_load_i32: 902 ; GCN: flat_load_dword [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}] glc 903 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] 904 define void @atomic_load_i32(i32 addrspace(4)* %in, i32 addrspace(4)* %out) { 905 entry: 906 %val = load atomic i32, i32 addrspace(4)* %in seq_cst, align 4 907 store i32 %val, i32 addrspace(4)* %out 908 ret void 909 } 910 911 ; GCN-LABEL: {{^}}atomic_load_i32_addr64_offset: 912 ; GCN: flat_load_dword [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}] glc{{$}} 913 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] 914 define void @atomic_load_i32_addr64_offset(i32 addrspace(4)* %in, i32 addrspace(4)* %out, i64 %index) { 915 entry: 916 %ptr = getelementptr i32, i32 addrspace(4)* %in, i64 %index 917 %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4 918 %val = load atomic i32, i32 addrspace(4)* %gep seq_cst, align 4 919 store i32 %val, i32 addrspace(4)* %out 920 ret void 921 } 922 923 ; GCN-LABEL: {{^}}atomic_load_i32_addr64: 924 ; GCN: flat_load_dword [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}] glc{{$}} 925 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] 926 define void @atomic_load_i32_addr64(i32 addrspace(4)* %in, i32 addrspace(4)* %out, i64 %index) { 927 entry: 928 %ptr = getelementptr i32, i32 addrspace(4)* %in, i64 %index 929 %val = load atomic i32, i32 addrspace(4)* %ptr seq_cst, align 4 930 store i32 %val, i32 addrspace(4)* %out 931 ret void 932 } 933 934 ; GCN-LABEL: {{^}}atomic_store_i32_offset: 935 ; GCN: flat_store_dword v[{{[0-9]+}}:{{[0-9]+}}], {{v[0-9]+}} glc{{$}} 936 define void @atomic_store_i32_offset(i32 %in, i32 addrspace(4)* %out) { 937 entry: 938 %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4 939 store atomic i32 %in, i32 addrspace(4)* %gep seq_cst, align 4 940 ret void 941 } 942 943 ; GCN-LABEL: {{^}}atomic_store_i32: 944 ; GCN: flat_store_dword v[{{[0-9]+}}:{{[0-9]+}}], {{v[0-9]+}} glc{{$}} 945 define void @atomic_store_i32(i32 %in, i32 addrspace(4)* %out) { 946 entry: 947 store atomic i32 %in, i32 addrspace(4)* %out seq_cst, align 4 948 ret void 949 } 950 951 ; GCN-LABEL: {{^}}atomic_store_i32_addr64_offset: 952 ; GCN: flat_store_dword v[{{[0-9]+}}:{{[0-9]+}}], {{v[0-9]+}} glc{{$}} 953 define void @atomic_store_i32_addr64_offset(i32 %in, i32 addrspace(4)* %out, i64 %index) { 954 entry: 955 %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index 956 %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4 957 store atomic i32 %in, i32 addrspace(4)* %gep seq_cst, align 4 958 ret void 959 } 960 961 ; GCN-LABEL: {{^}}atomic_store_i32_addr64: 962 ; GCN: flat_store_dword v[{{[0-9]+}}:{{[0-9]+}}], {{v[0-9]+}} glc{{$}} 963 define void @atomic_store_i32_addr64(i32 %in, i32 addrspace(4)* %out, i64 %index) { 964 entry: 965 %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index 966 store atomic i32 %in, i32 addrspace(4)* %ptr seq_cst, align 4 967 ret void 968 } 969