Home | History | Annotate | Download | only in AMDGPU
      1 ; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=CI %s
      2 ; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
      3 
      4 ; GCN-LABEL: {{^}}atomic_add_i64_offset:
      5 ; GCN: buffer_atomic_add_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32{{$}}
      6 define void @atomic_add_i64_offset(i64 addrspace(1)* %out, i64 %in) {
      7 entry:
      8   %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
      9   %tmp0 = atomicrmw volatile add i64 addrspace(1)* %gep, i64 %in seq_cst
     10   ret void
     11 }
     12 
     13 ; GCN-LABEL: {{^}}atomic_add_i64_ret_offset:
     14 ; GCN: buffer_atomic_add_x2 [[RET:v\[[0-9]+:[0-9]+\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32 glc{{$}}
     15 ; GCN: buffer_store_dwordx2 [[RET]]
     16 define void @atomic_add_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
     17 entry:
     18   %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
     19   %tmp0 = atomicrmw volatile add i64 addrspace(1)* %gep, i64 %in seq_cst
     20   store i64 %tmp0, i64 addrspace(1)* %out2
     21   ret void
     22 }
     23 
     24 ; GCN-LABEL: {{^}}atomic_add_i64_addr64_offset:
     25 ; CI: buffer_atomic_add_x2 v{{\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:32{{$}}
     26 ; VI: flat_atomic_add_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}}{{$}}
     27 define void @atomic_add_i64_addr64_offset(i64 addrspace(1)* %out, i64 %in, i64 %index) {
     28 entry:
     29   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
     30   %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
     31   %tmp0 = atomicrmw volatile add i64 addrspace(1)* %gep, i64 %in seq_cst
     32   ret void
     33 }
     34 
     35 ; GCN-LABEL: {{^}}atomic_add_i64_ret_addr64_offset:
     36 ; CI: buffer_atomic_add_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:32 glc{{$}}
     37 ; VI: flat_atomic_add_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
     38 ; GCN: buffer_store_dwordx2 [[RET]]
     39 define void @atomic_add_i64_ret_addr64_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
     40 entry:
     41   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
     42   %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
     43   %tmp0 = atomicrmw volatile add i64 addrspace(1)* %gep, i64 %in seq_cst
     44   store i64 %tmp0, i64 addrspace(1)* %out2
     45   ret void
     46 }
     47 
     48 ; GCN-LABEL: {{^}}atomic_add_i64:
     49 ; GCN: buffer_atomic_add_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
     50 define void @atomic_add_i64(i64 addrspace(1)* %out, i64 %in) {
     51 entry:
     52   %tmp0 = atomicrmw volatile add i64 addrspace(1)* %out, i64 %in seq_cst
     53   ret void
     54 }
     55 
     56 ; GCN-LABEL: {{^}}atomic_add_i64_ret:
     57 ; GCN: buffer_atomic_add_x2 [[RET:v\[[0-9]+:[0-9]+\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
     58 ; GCN: buffer_store_dwordx2 [[RET]]
     59 define void @atomic_add_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
     60 entry:
     61   %tmp0 = atomicrmw volatile add i64 addrspace(1)* %out, i64 %in seq_cst
     62   store i64 %tmp0, i64 addrspace(1)* %out2
     63   ret void
     64 }
     65 
     66 ; GCN-LABEL: {{^}}atomic_add_i64_addr64:
     67 ; CI: buffer_atomic_add_x2 v{{\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}}
     68 ; VI: flat_atomic_add_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
     69 define void @atomic_add_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) {
     70 entry:
     71   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
     72   %tmp0 = atomicrmw volatile add i64 addrspace(1)* %ptr, i64 %in seq_cst
     73   ret void
     74 }
     75 
     76 ; GCN-LABEL: {{^}}atomic_add_i64_ret_addr64:
     77 ; CI: buffer_atomic_add_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}}
     78 ; VI: flat_atomic_add_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
     79 ; GCN: buffer_store_dwordx2 [[RET]]
     80 define void @atomic_add_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
     81 entry:
     82   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
     83   %tmp0 = atomicrmw volatile add i64 addrspace(1)* %ptr, i64 %in seq_cst
     84   store i64 %tmp0, i64 addrspace(1)* %out2
     85   ret void
     86 }
     87 
     88 ; GCN-LABEL: {{^}}atomic_and_i64_offset:
     89 ; GCN: buffer_atomic_and_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32{{$}}
     90 define void @atomic_and_i64_offset(i64 addrspace(1)* %out, i64 %in) {
     91 entry:
     92   %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
     93   %tmp0 = atomicrmw volatile and i64 addrspace(1)* %gep, i64 %in seq_cst
     94   ret void
     95 }
     96 
     97 ; GCN-LABEL: {{^}}atomic_and_i64_ret_offset:
     98 ; GCN: buffer_atomic_and_x2 [[RET:v\[[0-9]+:[0-9]+\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32 glc{{$}}
     99 ; GCN: buffer_store_dwordx2 [[RET]]
    100 define void @atomic_and_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
    101 entry:
    102   %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
    103   %tmp0 = atomicrmw volatile and i64 addrspace(1)* %gep, i64 %in seq_cst
    104   store i64 %tmp0, i64 addrspace(1)* %out2
    105   ret void
    106 }
    107 
    108 ; GCN-LABEL: {{^}}atomic_and_i64_addr64_offset:
    109 ; CI: buffer_atomic_and_x2 v{{\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:32{{$}}
    110 ; VI: flat_atomic_and_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
    111 define void @atomic_and_i64_addr64_offset(i64 addrspace(1)* %out, i64 %in, i64 %index) {
    112 entry:
    113   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
    114   %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
    115   %tmp0 = atomicrmw volatile and i64 addrspace(1)* %gep, i64 %in seq_cst
    116   ret void
    117 }
    118 
    119 ; GCN-LABEL: {{^}}atomic_and_i64_ret_addr64_offset:
    120 ; CI: buffer_atomic_and_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:32 glc{{$}}
    121 ; VI: flat_atomic_and_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
    122 ; GCN: buffer_store_dwordx2 [[RET]]
    123 define void @atomic_and_i64_ret_addr64_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
    124 entry:
    125   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
    126   %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
    127   %tmp0 = atomicrmw volatile and i64 addrspace(1)* %gep, i64 %in seq_cst
    128   store i64 %tmp0, i64 addrspace(1)* %out2
    129   ret void
    130 }
    131 
    132 ; GCN-LABEL: {{^}}atomic_and_i64:
    133 ; GCN: buffer_atomic_and_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
    134 define void @atomic_and_i64(i64 addrspace(1)* %out, i64 %in) {
    135 entry:
    136   %tmp0 = atomicrmw volatile and i64 addrspace(1)* %out, i64 %in seq_cst
    137   ret void
    138 }
    139 
    140 ; GCN-LABEL: {{^}}atomic_and_i64_ret:
    141 ; GCN: buffer_atomic_and_x2 [[RET:v\[[0-9]+:[0-9]+\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
    142 ; GCN: buffer_store_dwordx2 [[RET]]
    143 define void @atomic_and_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
    144 entry:
    145   %tmp0 = atomicrmw volatile and i64 addrspace(1)* %out, i64 %in seq_cst
    146   store i64 %tmp0, i64 addrspace(1)* %out2
    147   ret void
    148 }
    149 
    150 ; GCN-LABEL: {{^}}atomic_and_i64_addr64:
    151 ; CI: buffer_atomic_and_x2 v{{\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}}
    152 ; VI: flat_atomic_and_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
    153 define void @atomic_and_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) {
    154 entry:
    155   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
    156   %tmp0 = atomicrmw volatile and i64 addrspace(1)* %ptr, i64 %in seq_cst
    157   ret void
    158 }
    159 
    160 ; GCN-LABEL: {{^}}atomic_and_i64_ret_addr64:
    161 ; CI: buffer_atomic_and_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}}
    162 ; VI: flat_atomic_and_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
    163 ; GCN: buffer_store_dwordx2 [[RET]]
    164 define void @atomic_and_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
    165 entry:
    166   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
    167   %tmp0 = atomicrmw volatile and i64 addrspace(1)* %ptr, i64 %in seq_cst
    168   store i64 %tmp0, i64 addrspace(1)* %out2
    169   ret void
    170 }
    171 
    172 ; GCN-LABEL: {{^}}atomic_sub_i64_offset:
    173 ; GCN: buffer_atomic_sub_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32{{$}}
    174 define void @atomic_sub_i64_offset(i64 addrspace(1)* %out, i64 %in) {
    175 entry:
    176   %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
    177   %tmp0 = atomicrmw volatile sub i64 addrspace(1)* %gep, i64 %in seq_cst
    178   ret void
    179 }
    180 
    181 ; GCN-LABEL: {{^}}atomic_sub_i64_ret_offset:
    182 ; GCN: buffer_atomic_sub_x2 [[RET:v\[[0-9]+:[0-9]+\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32 glc{{$}}
    183 ; GCN: buffer_store_dwordx2 [[RET]]
    184 define void @atomic_sub_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
    185 entry:
    186   %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
    187   %tmp0 = atomicrmw volatile sub i64 addrspace(1)* %gep, i64 %in seq_cst
    188   store i64 %tmp0, i64 addrspace(1)* %out2
    189   ret void
    190 }
    191 
    192 ; GCN-LABEL: {{^}}atomic_sub_i64_addr64_offset:
    193 ; CI: buffer_atomic_sub_x2 v{{\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:32{{$}}
    194 ; VI: flat_atomic_sub_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
    195 define void @atomic_sub_i64_addr64_offset(i64 addrspace(1)* %out, i64 %in, i64 %index) {
    196 entry:
    197   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
    198   %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
    199   %tmp0 = atomicrmw volatile sub i64 addrspace(1)* %gep, i64 %in seq_cst
    200   ret void
    201 }
    202 
    203 ; GCN-LABEL: {{^}}atomic_sub_i64_ret_addr64_offset:
    204 ; CI: buffer_atomic_sub_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:32 glc{{$}}
    205 ; VI: flat_atomic_sub_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
    206 ; GCN: buffer_store_dwordx2 [[RET]]
    207 define void @atomic_sub_i64_ret_addr64_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
    208 entry:
    209   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
    210   %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
    211   %tmp0 = atomicrmw volatile sub i64 addrspace(1)* %gep, i64 %in seq_cst
    212   store i64 %tmp0, i64 addrspace(1)* %out2
    213   ret void
    214 }
    215 
    216 ; GCN-LABEL: {{^}}atomic_sub_i64:
    217 ; GCN: buffer_atomic_sub_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
    218 define void @atomic_sub_i64(i64 addrspace(1)* %out, i64 %in) {
    219 entry:
    220   %tmp0 = atomicrmw volatile sub i64 addrspace(1)* %out, i64 %in seq_cst
    221   ret void
    222 }
    223 
    224 ; GCN-LABEL: {{^}}atomic_sub_i64_ret:
    225 ; GCN: buffer_atomic_sub_x2 [[RET:v\[[0-9]+:[0-9]+\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
    226 ; GCN: buffer_store_dwordx2 [[RET]]
    227 define void @atomic_sub_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
    228 entry:
    229   %tmp0 = atomicrmw volatile sub i64 addrspace(1)* %out, i64 %in seq_cst
    230   store i64 %tmp0, i64 addrspace(1)* %out2
    231   ret void
    232 }
    233 
    234 ; GCN-LABEL: {{^}}atomic_sub_i64_addr64:
    235 ; CI: buffer_atomic_sub_x2 v{{\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}}
    236 ; VI: flat_atomic_sub_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
    237 define void @atomic_sub_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) {
    238 entry:
    239   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
    240   %tmp0 = atomicrmw volatile sub i64 addrspace(1)* %ptr, i64 %in seq_cst
    241   ret void
    242 }
    243 
    244 ; GCN-LABEL: {{^}}atomic_sub_i64_ret_addr64:
    245 ; CI: buffer_atomic_sub_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}}
    246 ; VI: flat_atomic_sub_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
    247 ; GCN: buffer_store_dwordx2 [[RET]]
    248 define void @atomic_sub_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
    249 entry:
    250   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
    251   %tmp0 = atomicrmw volatile sub i64 addrspace(1)* %ptr, i64 %in seq_cst
    252   store i64 %tmp0, i64 addrspace(1)* %out2
    253   ret void
    254 }
    255 
    256 ; GCN-LABEL: {{^}}atomic_max_i64_offset:
    257 ; GCN: buffer_atomic_smax_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32{{$}}
    258 define void @atomic_max_i64_offset(i64 addrspace(1)* %out, i64 %in) {
    259 entry:
    260   %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
    261   %tmp0 = atomicrmw volatile max i64 addrspace(1)* %gep, i64 %in seq_cst
    262   ret void
    263 }
    264 
    265 ; GCN-LABEL: {{^}}atomic_max_i64_ret_offset:
    266 ; GCN: buffer_atomic_smax_x2 [[RET:v\[[0-9]+:[0-9]+\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32 glc{{$}}
    267 ; GCN: buffer_store_dwordx2 [[RET]]
    268 define void @atomic_max_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
    269 entry:
    270   %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
    271   %tmp0 = atomicrmw volatile max i64 addrspace(1)* %gep, i64 %in seq_cst
    272   store i64 %tmp0, i64 addrspace(1)* %out2
    273   ret void
    274 }
    275 
    276 ; GCN-LABEL: {{^}}atomic_max_i64_addr64_offset:
    277 ; CI: buffer_atomic_smax_x2 v{{\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:32{{$}}
    278 ; VI: flat_atomic_smax_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
    279 define void @atomic_max_i64_addr64_offset(i64 addrspace(1)* %out, i64 %in, i64 %index) {
    280 entry:
    281   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
    282   %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
    283   %tmp0 = atomicrmw volatile max i64 addrspace(1)* %gep, i64 %in seq_cst
    284   ret void
    285 }
    286 
    287 ; GCN-LABEL: {{^}}atomic_max_i64_ret_addr64_offset:
    288 ; CI: buffer_atomic_smax_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:32 glc{{$}}
    289 ; VI: flat_atomic_smax_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
    290 ; GCN: buffer_store_dwordx2 [[RET]]
    291 define void @atomic_max_i64_ret_addr64_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
    292 entry:
    293   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
    294   %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
    295   %tmp0 = atomicrmw volatile max i64 addrspace(1)* %gep, i64 %in seq_cst
    296   store i64 %tmp0, i64 addrspace(1)* %out2
    297   ret void
    298 }
    299 
    300 ; GCN-LABEL: {{^}}atomic_max_i64:
    301 ; GCN: buffer_atomic_smax_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
    302 define void @atomic_max_i64(i64 addrspace(1)* %out, i64 %in) {
    303 entry:
    304   %tmp0 = atomicrmw volatile max i64 addrspace(1)* %out, i64 %in seq_cst
    305   ret void
    306 }
    307 
    308 ; GCN-LABEL: {{^}}atomic_max_i64_ret:
    309 ; GCN: buffer_atomic_smax_x2 [[RET:v\[[0-9]+:[0-9]+\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
    310 ; GCN: buffer_store_dwordx2 [[RET]]
    311 define void @atomic_max_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
    312 entry:
    313   %tmp0 = atomicrmw volatile max i64 addrspace(1)* %out, i64 %in seq_cst
    314   store i64 %tmp0, i64 addrspace(1)* %out2
    315   ret void
    316 }
    317 
    318 ; GCN-LABEL: {{^}}atomic_max_i64_addr64:
    319 ; CI: buffer_atomic_smax_x2 v{{\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}}
    320 ; VI: flat_atomic_smax_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
    321 define void @atomic_max_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) {
    322 entry:
    323   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
    324   %tmp0 = atomicrmw volatile max i64 addrspace(1)* %ptr, i64 %in seq_cst
    325   ret void
    326 }
    327 
    328 ; GCN-LABEL: {{^}}atomic_max_i64_ret_addr64:
    329 ; CI: buffer_atomic_smax_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}}
    330 ; VI: flat_atomic_smax_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
    331 ; GCN: buffer_store_dwordx2 [[RET]]
    332 define void @atomic_max_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
    333 entry:
    334   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
    335   %tmp0 = atomicrmw volatile max i64 addrspace(1)* %ptr, i64 %in seq_cst
    336   store i64 %tmp0, i64 addrspace(1)* %out2
    337   ret void
    338 }
    339 
    340 ; GCN-LABEL: {{^}}atomic_umax_i64_offset:
    341 ; GCN: buffer_atomic_umax_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32{{$}}
    342 define void @atomic_umax_i64_offset(i64 addrspace(1)* %out, i64 %in) {
    343 entry:
    344   %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
    345   %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %gep, i64 %in seq_cst
    346   ret void
    347 }
    348 
    349 ; GCN-LABEL: {{^}}atomic_umax_i64_ret_offset:
    350 ; GCN: buffer_atomic_umax_x2 [[RET:v\[[0-9]+:[0-9]+\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32 glc{{$}}
    351 ; GCN: buffer_store_dwordx2 [[RET]]
    352 define void @atomic_umax_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
    353 entry:
    354   %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
    355   %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %gep, i64 %in seq_cst
    356   store i64 %tmp0, i64 addrspace(1)* %out2
    357   ret void
    358 }
    359 
    360 ; GCN-LABEL: {{^}}atomic_umax_i64_addr64_offset:
    361 ; CI: buffer_atomic_umax_x2 v{{\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:32{{$}}
    362 ; VI: flat_atomic_umax_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
    363 define void @atomic_umax_i64_addr64_offset(i64 addrspace(1)* %out, i64 %in, i64 %index) {
    364 entry:
    365   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
    366   %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
    367   %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %gep, i64 %in seq_cst
    368   ret void
    369 }
    370 
    371 ; GCN-LABEL: {{^}}atomic_umax_i64_ret_addr64_offset:
    372 ; CI: buffer_atomic_umax_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:32 glc{{$}}
    373 ; VI: flat_atomic_umax_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
    374 ; GCN: buffer_store_dwordx2 [[RET]]
    375 define void @atomic_umax_i64_ret_addr64_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
    376 entry:
    377   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
    378   %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
    379   %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %gep, i64 %in seq_cst
    380   store i64 %tmp0, i64 addrspace(1)* %out2
    381   ret void
    382 }
    383 
    384 ; GCN-LABEL: {{^}}atomic_umax_i64:
    385 ; GCN: buffer_atomic_umax_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
    386 define void @atomic_umax_i64(i64 addrspace(1)* %out, i64 %in) {
    387 entry:
    388   %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %out, i64 %in seq_cst
    389   ret void
    390 }
    391 
    392 ; GCN-LABEL: {{^}}atomic_umax_i64_ret:
    393 ; GCN: buffer_atomic_umax_x2 [[RET:v\[[0-9]+:[0-9]+\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
    394 ; GCN: buffer_store_dwordx2 [[RET]]
    395 define void @atomic_umax_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
    396 entry:
    397   %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %out, i64 %in seq_cst
    398   store i64 %tmp0, i64 addrspace(1)* %out2
    399   ret void
    400 }
    401 
    402 ; GCN-LABEL: {{^}}atomic_umax_i64_addr64:
    403 ; CI: buffer_atomic_umax_x2 v{{\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}}
    404 ; VI: flat_atomic_umax_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
    405 define void @atomic_umax_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) {
    406 entry:
    407   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
    408   %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %ptr, i64 %in seq_cst
    409   ret void
    410 }
    411 
    412 ; GCN-LABEL: {{^}}atomic_umax_i64_ret_addr64:
    413 ; CI: buffer_atomic_umax_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}}
    414 ; VI: flat_atomic_umax_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
    415 ; GCN: buffer_store_dwordx2 [[RET]]
    416 define void @atomic_umax_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
    417 entry:
    418   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
    419   %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %ptr, i64 %in seq_cst
    420   store i64 %tmp0, i64 addrspace(1)* %out2
    421   ret void
    422 }
    423 
    424 ; GCN-LABEL: {{^}}atomic_min_i64_offset:
    425 ; GCN: buffer_atomic_smin_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32{{$}}
    426 define void @atomic_min_i64_offset(i64 addrspace(1)* %out, i64 %in) {
    427 entry:
    428   %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
    429   %tmp0 = atomicrmw volatile min i64 addrspace(1)* %gep, i64 %in seq_cst
    430   ret void
    431 }
    432 
    433 ; GCN-LABEL: {{^}}atomic_min_i64_ret_offset:
    434 ; GCN: buffer_atomic_smin_x2 [[RET:v\[[0-9]+:[0-9]+\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32 glc{{$}}
    435 ; GCN: buffer_store_dwordx2 [[RET]]
    436 define void @atomic_min_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
    437 entry:
    438   %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
    439   %tmp0 = atomicrmw volatile min i64 addrspace(1)* %gep, i64 %in seq_cst
    440   store i64 %tmp0, i64 addrspace(1)* %out2
    441   ret void
    442 }
    443 
    444 ; GCN-LABEL: {{^}}atomic_min_i64_addr64_offset:
    445 ; CI: buffer_atomic_smin_x2 v{{\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:32{{$}}
    446 ; VI: flat_atomic_smin_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
    447 define void @atomic_min_i64_addr64_offset(i64 addrspace(1)* %out, i64 %in, i64 %index) {
    448 entry:
    449   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
    450   %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
    451   %tmp0 = atomicrmw volatile min i64 addrspace(1)* %gep, i64 %in seq_cst
    452   ret void
    453 }
    454 
    455 ; GCN-LABEL: {{^}}atomic_min_i64_ret_addr64_offset:
    456 ; CI: buffer_atomic_smin_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:32 glc{{$}}
    457 ; VI: flat_atomic_smin_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
    458 ; GCN: buffer_store_dwordx2 [[RET]]
    459 define void @atomic_min_i64_ret_addr64_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
    460 entry:
    461   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
    462   %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
    463   %tmp0 = atomicrmw volatile min i64 addrspace(1)* %gep, i64 %in seq_cst
    464   store i64 %tmp0, i64 addrspace(1)* %out2
    465   ret void
    466 }
    467 
    468 ; GCN-LABEL: {{^}}atomic_min_i64:
    469 ; GCN: buffer_atomic_smin_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
    470 define void @atomic_min_i64(i64 addrspace(1)* %out, i64 %in) {
    471 entry:
    472   %tmp0 = atomicrmw volatile min i64 addrspace(1)* %out, i64 %in seq_cst
    473   ret void
    474 }
    475 
    476 ; GCN-LABEL: {{^}}atomic_min_i64_ret:
    477 ; GCN: buffer_atomic_smin_x2 [[RET:v\[[0-9]+:[0-9]+\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
    478 ; GCN: buffer_store_dwordx2 [[RET]]
    479 define void @atomic_min_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
    480 entry:
    481   %tmp0 = atomicrmw volatile min i64 addrspace(1)* %out, i64 %in seq_cst
    482   store i64 %tmp0, i64 addrspace(1)* %out2
    483   ret void
    484 }
    485 
    486 ; GCN-LABEL: {{^}}atomic_min_i64_addr64:
    487 ; CI: buffer_atomic_smin_x2 v{{\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}}
    488 ; VI: flat_atomic_smin_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
    489 define void @atomic_min_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) {
    490 entry:
    491   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
    492   %tmp0 = atomicrmw volatile min i64 addrspace(1)* %ptr, i64 %in seq_cst
    493   ret void
    494 }
    495 
    496 ; GCN-LABEL: {{^}}atomic_min_i64_ret_addr64:
    497 ; CI: buffer_atomic_smin_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}}
    498 ; VI: flat_atomic_smin_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
    499 ; GCN: buffer_store_dwordx2 [[RET]]
    500 define void @atomic_min_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
    501 entry:
    502   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
    503   %tmp0 = atomicrmw volatile min i64 addrspace(1)* %ptr, i64 %in seq_cst
    504   store i64 %tmp0, i64 addrspace(1)* %out2
    505   ret void
    506 }
    507 
    508 ; GCN-LABEL: {{^}}atomic_umin_i64_offset:
    509 ; GCN: buffer_atomic_umin_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32{{$}}
    510 define void @atomic_umin_i64_offset(i64 addrspace(1)* %out, i64 %in) {
    511 entry:
    512   %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
    513   %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %gep, i64 %in seq_cst
    514   ret void
    515 }
    516 
    517 ; GCN-LABEL: {{^}}atomic_umin_i64_ret_offset:
    518 ; GCN: buffer_atomic_umin_x2 [[RET:v\[[0-9]+:[0-9]+\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32 glc{{$}}
    519 ; GCN: buffer_store_dwordx2 [[RET]]
    520 define void @atomic_umin_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
    521 entry:
    522   %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
    523   %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %gep, i64 %in seq_cst
    524   store i64 %tmp0, i64 addrspace(1)* %out2
    525   ret void
    526 }
    527 
    528 ; GCN-LABEL: {{^}}atomic_umin_i64_addr64_offset:
    529 ; CI: buffer_atomic_umin_x2 v{{\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:32{{$}}
    530 ; VI: flat_atomic_umin_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
    531 define void @atomic_umin_i64_addr64_offset(i64 addrspace(1)* %out, i64 %in, i64 %index) {
    532 entry:
    533   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
    534   %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
    535   %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %gep, i64 %in seq_cst
    536   ret void
    537 }
    538 
    539 ; GCN-LABEL: {{^}}atomic_umin_i64_ret_addr64_offset:
    540 ; CI: buffer_atomic_umin_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:32 glc{{$}}
    541 ; VI: flat_atomic_umin_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
    542 ; GCN: buffer_store_dwordx2 [[RET]]
    543 define void @atomic_umin_i64_ret_addr64_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
    544 entry:
    545   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
    546   %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
    547   %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %gep, i64 %in seq_cst
    548   store i64 %tmp0, i64 addrspace(1)* %out2
    549   ret void
    550 }
    551 
    552 ; GCN-LABEL: {{^}}atomic_umin_i64:
    553 ; GCN: buffer_atomic_umin_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
    554 define void @atomic_umin_i64(i64 addrspace(1)* %out, i64 %in) {
    555 entry:
    556   %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %out, i64 %in seq_cst
    557   ret void
    558 }
    559 
    560 ; GCN-LABEL: {{^}}atomic_umin_i64_ret:
    561 ; CI: buffer_atomic_umin_x2 [[RET:v\[[0-9]+:[0-9]+\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
    562 ; GCN: buffer_store_dwordx2 [[RET]]
    563 define void @atomic_umin_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
    564 entry:
    565   %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %out, i64 %in seq_cst
    566   store i64 %tmp0, i64 addrspace(1)* %out2
    567   ret void
    568 }
    569 
    570 ; GCN-LABEL: {{^}}atomic_umin_i64_addr64:
    571 ; CI: buffer_atomic_umin_x2 v{{\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}}
    572 ; VI: flat_atomic_umin_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
    573 define void @atomic_umin_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) {
    574 entry:
    575   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
    576   %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %ptr, i64 %in seq_cst
    577   ret void
    578 }
    579 
    580 ; GCN-LABEL: {{^}}atomic_umin_i64_ret_addr64:
    581 ; CI: buffer_atomic_umin_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}}
    582 ; VI: flat_atomic_umin_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
    583 ; GCN: buffer_store_dwordx2 [[RET]]
    584 define void @atomic_umin_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
    585 entry:
    586   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
    587   %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %ptr, i64 %in seq_cst
    588   store i64 %tmp0, i64 addrspace(1)* %out2
    589   ret void
    590 }
    591 
    592 ; GCN-LABEL: {{^}}atomic_or_i64_offset:
    593 ; GCN: buffer_atomic_or_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32{{$}}
    594 define void @atomic_or_i64_offset(i64 addrspace(1)* %out, i64 %in) {
    595 entry:
    596   %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
    597   %tmp0 = atomicrmw volatile or i64 addrspace(1)* %gep, i64 %in seq_cst
    598   ret void
    599 }
    600 
    601 ; GCN-LABEL: {{^}}atomic_or_i64_ret_offset:
    602 ; GCN: buffer_atomic_or_x2 [[RET:v\[[0-9]+:[0-9]+\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32 glc{{$}}
    603 ; GCN: buffer_store_dwordx2 [[RET]]
    604 define void @atomic_or_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
    605 entry:
    606   %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
    607   %tmp0 = atomicrmw volatile or i64 addrspace(1)* %gep, i64 %in seq_cst
    608   store i64 %tmp0, i64 addrspace(1)* %out2
    609   ret void
    610 }
    611 
    612 ; GCN-LABEL: {{^}}atomic_or_i64_addr64_offset:
    613 ; CI: buffer_atomic_or_x2 v{{\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:32{{$}}
    614 ; VI: flat_atomic_or_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
    615 define void @atomic_or_i64_addr64_offset(i64 addrspace(1)* %out, i64 %in, i64 %index) {
    616 entry:
    617   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
    618   %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
    619   %tmp0 = atomicrmw volatile or i64 addrspace(1)* %gep, i64 %in seq_cst
    620   ret void
    621 }
    622 
    623 ; GCN-LABEL: {{^}}atomic_or_i64_ret_addr64_offset:
    624 ; CI: buffer_atomic_or_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:32 glc{{$}}
    625 ; VI: flat_atomic_or_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
    626 ; GCN: buffer_store_dwordx2 [[RET]]
    627 define void @atomic_or_i64_ret_addr64_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
    628 entry:
    629   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
    630   %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
    631   %tmp0 = atomicrmw volatile or i64 addrspace(1)* %gep, i64 %in seq_cst
    632   store i64 %tmp0, i64 addrspace(1)* %out2
    633   ret void
    634 }
    635 
    636 ; GCN-LABEL: {{^}}atomic_or_i64:
    637 ; GCN: buffer_atomic_or_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
    638 define void @atomic_or_i64(i64 addrspace(1)* %out, i64 %in) {
    639 entry:
    640   %tmp0 = atomicrmw volatile or i64 addrspace(1)* %out, i64 %in seq_cst
    641   ret void
    642 }
    643 
    644 ; GCN-LABEL: {{^}}atomic_or_i64_ret:
    645 ; GCN: buffer_atomic_or_x2 [[RET:v\[[0-9]+:[0-9]+\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
    646 ; GCN: buffer_store_dwordx2 [[RET]]
    647 define void @atomic_or_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
    648 entry:
    649   %tmp0 = atomicrmw volatile or i64 addrspace(1)* %out, i64 %in seq_cst
    650   store i64 %tmp0, i64 addrspace(1)* %out2
    651   ret void
    652 }
    653 
    654 ; GCN-LABEL: {{^}}atomic_or_i64_addr64:
    655 ; CI: buffer_atomic_or_x2 v{{\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}}
    656 ; VI: flat_atomic_or_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
    657 define void @atomic_or_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) {
    658 entry:
    659   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
    660   %tmp0 = atomicrmw volatile or i64 addrspace(1)* %ptr, i64 %in seq_cst
    661   ret void
    662 }
    663 
    664 ; GCN-LABEL: {{^}}atomic_or_i64_ret_addr64:
    665 ; CI: buffer_atomic_or_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}}
    666 ; VI: flat_atomic_or_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
    667 ; GCN: buffer_store_dwordx2 [[RET]]
    668 define void @atomic_or_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
    669 entry:
    670   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
    671   %tmp0 = atomicrmw volatile or i64 addrspace(1)* %ptr, i64 %in seq_cst
    672   store i64 %tmp0, i64 addrspace(1)* %out2
    673   ret void
    674 }
    675 
    676 ; GCN-LABEL: {{^}}atomic_xchg_i64_offset:
    677 ; GCN: buffer_atomic_swap_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32{{$}}
    678 define void @atomic_xchg_i64_offset(i64 addrspace(1)* %out, i64 %in) {
    679 entry:
    680   %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
    681   %tmp0 = atomicrmw volatile xchg i64 addrspace(1)* %gep, i64 %in seq_cst
    682   ret void
    683 }
    684 
    685 ; GCN-LABEL: {{^}}atomic_xchg_i64_ret_offset:
    686 ; GCN: buffer_atomic_swap_x2 [[RET:v\[[0-9]+:[0-9]+\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32 glc{{$}}
    687 ; GCN: buffer_store_dwordx2 [[RET]]
    688 define void @atomic_xchg_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
    689 entry:
    690   %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
    691   %tmp0 = atomicrmw volatile xchg i64 addrspace(1)* %gep, i64 %in seq_cst
    692   store i64 %tmp0, i64 addrspace(1)* %out2
    693   ret void
    694 }
    695 
    696 ; GCN-LABEL: {{^}}atomic_xchg_i64_addr64_offset:
    697 ; CI: buffer_atomic_swap_x2 v{{\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:32{{$}}
    698 ; VI: flat_atomic_swap_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}}{{$}}
    699 define void @atomic_xchg_i64_addr64_offset(i64 addrspace(1)* %out, i64 %in, i64 %index) {
    700 entry:
    701   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
    702   %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
    703   %tmp0 = atomicrmw volatile xchg i64 addrspace(1)* %gep, i64 %in seq_cst
    704   ret void
    705 }
    706 
    707 ; GCN-LABEL: {{^}}atomic_xchg_i64_ret_addr64_offset:
    708 ; CI: buffer_atomic_swap_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:32 glc{{$}}
    709 ; VI: flat_atomic_swap_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
    710 ; GCN: buffer_store_dwordx2 [[RET]]
    711 define void @atomic_xchg_i64_ret_addr64_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
    712 entry:
    713   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
    714   %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
    715   %tmp0 = atomicrmw volatile xchg i64 addrspace(1)* %gep, i64 %in seq_cst
    716   store i64 %tmp0, i64 addrspace(1)* %out2
    717   ret void
    718 }
    719 
    720 ; GCN-LABEL: {{^}}atomic_xchg_i64:
    721 ; GCN: buffer_atomic_swap_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
    722 define void @atomic_xchg_i64(i64 addrspace(1)* %out, i64 %in) {
    723 entry:
    724   %tmp0 = atomicrmw volatile xchg i64 addrspace(1)* %out, i64 %in seq_cst
    725   ret void
    726 }
    727 
    728 ; GCN-LABEL: {{^}}atomic_xchg_i64_ret:
    729 ; GCN: buffer_atomic_swap_x2 [[RET:v\[[0-9]+:[0-9]+\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
    730 ; GCN: buffer_store_dwordx2 [[RET]]
    731 define void @atomic_xchg_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
    732 entry:
    733   %tmp0 = atomicrmw volatile xchg i64 addrspace(1)* %out, i64 %in seq_cst
    734   store i64 %tmp0, i64 addrspace(1)* %out2
    735   ret void
    736 }
    737 
    738 ; GCN-LABEL: {{^}}atomic_xchg_i64_addr64:
    739 ; CI: buffer_atomic_swap_x2 v{{\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}}
    740 ; VI: flat_atomic_swap_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
    741 define void @atomic_xchg_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) {
    742 entry:
    743   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
    744   %tmp0 = atomicrmw volatile xchg i64 addrspace(1)* %ptr, i64 %in seq_cst
    745   ret void
    746 }
    747 
    748 ; GCN-LABEL: {{^}}atomic_xchg_i64_ret_addr64:
    749 ; CI: buffer_atomic_swap_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}}
    750 ; VI: flat_atomic_swap_x2 [[RET:v\[[0-9]+:[0-9]+\]]],  v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
    751 ; GCN: buffer_store_dwordx2 [[RET]]
    752 define void @atomic_xchg_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
    753 entry:
    754   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
    755   %tmp0 = atomicrmw volatile xchg i64 addrspace(1)* %ptr, i64 %in seq_cst
    756   store i64 %tmp0, i64 addrspace(1)* %out2
    757   ret void
    758 }
    759 
    760 ; GCN-LABEL: {{^}}atomic_xor_i64_offset:
    761 ; GCN: buffer_atomic_xor_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32{{$}}
    762 define void @atomic_xor_i64_offset(i64 addrspace(1)* %out, i64 %in) {
    763 entry:
    764   %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
    765   %tmp0 = atomicrmw volatile xor i64 addrspace(1)* %gep, i64 %in seq_cst
    766   ret void
    767 }
    768 
    769 ; GCN-LABEL: {{^}}atomic_xor_i64_ret_offset:
    770 ; GCN: buffer_atomic_xor_x2 [[RET:v\[[0-9]+:[0-9]+\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32 glc{{$}}
    771 ; GCN: buffer_store_dwordx2 [[RET]]
    772 define void @atomic_xor_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
    773 entry:
    774   %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
    775   %tmp0 = atomicrmw volatile xor i64 addrspace(1)* %gep, i64 %in seq_cst
    776   store i64 %tmp0, i64 addrspace(1)* %out2
    777   ret void
    778 }
    779 
    780 ; GCN-LABEL: {{^}}atomic_xor_i64_addr64_offset:
    781 ; CI: buffer_atomic_xor_x2 v{{\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:32{{$}}
    782 ; VI: flat_atomic_xor_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
    783 define void @atomic_xor_i64_addr64_offset(i64 addrspace(1)* %out, i64 %in, i64 %index) {
    784 entry:
    785   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
    786   %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
    787   %tmp0 = atomicrmw volatile xor i64 addrspace(1)* %gep, i64 %in seq_cst
    788   ret void
    789 }
    790 
    791 ; GCN-LABEL: {{^}}atomic_xor_i64_ret_addr64_offset:
    792 ; CI: buffer_atomic_xor_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:32 glc{{$}}
    793 ; VI: flat_atomic_xor_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
    794 ; GCN: buffer_store_dwordx2 [[RET]]
    795 define void @atomic_xor_i64_ret_addr64_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
    796 entry:
    797   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
    798   %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
    799   %tmp0 = atomicrmw volatile xor i64 addrspace(1)* %gep, i64 %in seq_cst
    800   store i64 %tmp0, i64 addrspace(1)* %out2
    801   ret void
    802 }
    803 
    804 ; GCN-LABEL: {{^}}atomic_xor_i64:
    805 ; GCN: buffer_atomic_xor_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
    806 define void @atomic_xor_i64(i64 addrspace(1)* %out, i64 %in) {
    807 entry:
    808   %tmp0 = atomicrmw volatile xor i64 addrspace(1)* %out, i64 %in seq_cst
    809   ret void
    810 }
    811 
    812 ; GCN-LABEL: {{^}}atomic_xor_i64_ret:
    813 ; GCN: buffer_atomic_xor_x2 [[RET:v\[[0-9]+:[0-9]+\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
    814 ; GCN: buffer_store_dwordx2 [[RET]]
    815 define void @atomic_xor_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
    816 entry:
    817   %tmp0 = atomicrmw volatile xor i64 addrspace(1)* %out, i64 %in seq_cst
    818   store i64 %tmp0, i64 addrspace(1)* %out2
    819   ret void
    820 }
    821 
    822 ; GCN-LABEL: {{^}}atomic_xor_i64_addr64:
    823 ; CI: buffer_atomic_xor_x2 v{{\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}}
    824 ; VI: flat_atomic_xor_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
    825 define void @atomic_xor_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) {
    826 entry:
    827   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
    828   %tmp0 = atomicrmw volatile xor i64 addrspace(1)* %ptr, i64 %in seq_cst
    829   ret void
    830 }
    831 
    832 ; GCN-LABEL: {{^}}atomic_xor_i64_ret_addr64:
    833 ; CI: buffer_atomic_xor_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}}
    834 ; VI: flat_atomic_xor_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
    835 ; GCN: buffer_store_dwordx2 [[RET]]
    836 define void @atomic_xor_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
    837 entry:
    838   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
    839   %tmp0 = atomicrmw volatile xor i64 addrspace(1)* %ptr, i64 %in seq_cst
    840   store i64 %tmp0, i64 addrspace(1)* %out2
    841   ret void
    842 }
    843 
    844 
    845 
    846 
    847 
    848 
    849 
    850 
    851 
    852 ; FUNC-LABEL: {{^}}atomic_cmpxchg_i64_offset:
    853 ; GCN: buffer_atomic_cmpswap_x2 v[{{[0-9]+}}:{{[0-9]+}}], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32{{$}}
    854 define void @atomic_cmpxchg_i64_offset(i64 addrspace(1)* %out, i64 %in, i64 %old) {
    855 entry:
    856   %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
    857   %val = cmpxchg volatile i64 addrspace(1)* %gep, i64 %old, i64 %in seq_cst seq_cst
    858   ret void
    859 }
    860 
    861 ; FUNC-LABEL: {{^}}atomic_cmpxchg_i64_soffset:
    862 ; GCN: s_mov_b32 [[SREG:s[0-9]+]], 0x11940
    863 ; GCN: buffer_atomic_cmpswap_x2 v[{{[0-9]+}}:{{[0-9]+}}], off, s[{{[0-9]+}}:{{[0-9]+}}], [[SREG]]{{$}}
    864 define void @atomic_cmpxchg_i64_soffset(i64 addrspace(1)* %out, i64 %in, i64 %old) {
    865 entry:
    866   %gep = getelementptr i64, i64 addrspace(1)* %out, i64 9000
    867   %val = cmpxchg volatile i64 addrspace(1)* %gep, i64 %old, i64 %in seq_cst seq_cst
    868   ret void
    869 }
    870 
    871 ; FUNC-LABEL: {{^}}atomic_cmpxchg_i64_ret_offset:
    872 ; GCN: buffer_atomic_cmpswap_x2 v{{\[}}[[RET:[0-9]+]]{{:[0-9]+}}], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32 glc{{$}}
    873 ; GCN: buffer_store_dwordx2 v{{\[}}[[RET]]:
    874 define void @atomic_cmpxchg_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %old) {
    875 entry:
    876   %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
    877   %val = cmpxchg volatile i64 addrspace(1)* %gep, i64 %old, i64 %in seq_cst seq_cst
    878   %extract0 = extractvalue { i64, i1 } %val, 0
    879   store i64 %extract0, i64 addrspace(1)* %out2
    880   ret void
    881 }
    882 
    883 ; FUNC-LABEL: {{^}}atomic_cmpxchg_i64_addr64_offset:
    884 ; CI: buffer_atomic_cmpswap_x2 v[{{[0-9]+\:[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:32{{$}}
    885 
    886 ; VI: flat_atomic_cmpswap_x2 v[{{[0-9]+\:[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}]{{$}}
    887 define void @atomic_cmpxchg_i64_addr64_offset(i64 addrspace(1)* %out, i64 %in, i64 %index, i64 %old) {
    888 entry:
    889   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
    890   %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
    891   %val = cmpxchg volatile i64 addrspace(1)* %gep, i64 %old, i64 %in seq_cst seq_cst
    892   ret void
    893 }
    894 
    895 ; FUNC-LABEL: {{^}}atomic_cmpxchg_i64_ret_addr64_offset:
    896 ; CI: buffer_atomic_cmpswap_x2 v{{\[}}[[RET:[0-9]+]]:{{[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:32 glc{{$}}
    897 ; VI: flat_atomic_cmpswap_x2 v{{\[}}[[RET:[0-9]+]]:{{[0-9]+\]}}, v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}] glc{{$}}
    898 ; GCN: buffer_store_dwordx2 v{{\[}}[[RET]]:
    899 define void @atomic_cmpxchg_i64_ret_addr64_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index, i64 %old) {
    900 entry:
    901   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
    902   %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
    903   %val = cmpxchg volatile i64 addrspace(1)* %gep, i64 %old, i64 %in seq_cst seq_cst
    904   %extract0 = extractvalue { i64, i1 } %val, 0
    905   store i64 %extract0, i64 addrspace(1)* %out2
    906   ret void
    907 }
    908 
    909 ; FUNC-LABEL: {{^}}atomic_cmpxchg_i64:
    910 ; GCN: buffer_atomic_cmpswap_x2 v[{{[0-9]+:[0-9]+}}], off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
    911 define void @atomic_cmpxchg_i64(i64 addrspace(1)* %out, i64 %in, i64 %old) {
    912 entry:
    913   %val = cmpxchg volatile i64 addrspace(1)* %out, i64 %old, i64 %in seq_cst seq_cst
    914   ret void
    915 }
    916 
    917 ; FUNC-LABEL: {{^}}atomic_cmpxchg_i64_ret:
    918 ; GCN: buffer_atomic_cmpswap_x2 v{{\[}}[[RET:[0-9]+]]:{{[0-9]+}}], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
    919 ; GCN: buffer_store_dwordx2 v{{\[}}[[RET]]:
    920 define void @atomic_cmpxchg_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %old) {
    921 entry:
    922   %val = cmpxchg volatile i64 addrspace(1)* %out, i64 %old, i64 %in seq_cst seq_cst
    923   %extract0 = extractvalue { i64, i1 } %val, 0
    924   store i64 %extract0, i64 addrspace(1)* %out2
    925   ret void
    926 }
    927 
    928 ; FUNC-LABEL: {{^}}atomic_cmpxchg_i64_addr64:
    929 ; CI: buffer_atomic_cmpswap_x2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}}
    930 ; VI: flat_atomic_cmpswap_x2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}]{{$}}
    931 define void @atomic_cmpxchg_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index, i64 %old) {
    932 entry:
    933   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
    934   %val = cmpxchg volatile i64 addrspace(1)* %ptr, i64 %old, i64 %in seq_cst seq_cst
    935   ret void
    936 }
    937 
    938 ; FUNC-LABEL: {{^}}atomic_cmpxchg_i64_ret_addr64:
    939 ; CI: buffer_atomic_cmpswap_x2 v{{\[}}[[RET:[0-9]+]]:{{[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}}
    940 ; VI: flat_atomic_cmpswap_x2 v{{\[}}[[RET:[0-9]+]]:{{[0-9]+\]}}, v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}] glc{{$}}
    941 ; GCN: buffer_store_dwordx2 v{{\[}}[[RET]]:
    942 define void @atomic_cmpxchg_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index, i64 %old) {
    943 entry:
    944   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
    945   %val = cmpxchg volatile i64 addrspace(1)* %ptr, i64 %old, i64 %in seq_cst seq_cst
    946   %extract0 = extractvalue { i64, i1 } %val, 0
    947   store i64 %extract0, i64 addrspace(1)* %out2
    948   ret void
    949 }
    950 
    951 ; FUNC-LABEL: {{^}}atomic_load_i64_offset:
    952 ; CI: buffer_load_dwordx2 [[RET:v\[[0-9]+:[0-9]+\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32 glc{{$}}
    953 ; VI: flat_load_dwordx2 [[RET:v\[[0-9]+:[0-9]\]]], v[{{[0-9]+}}:{{[0-9]+}}] glc{{$}}
    954 ; GCN: buffer_store_dwordx2 [[RET]]
    955 define void @atomic_load_i64_offset(i64 addrspace(1)* %in, i64 addrspace(1)* %out) {
    956 entry:
    957   %gep = getelementptr i64, i64 addrspace(1)* %in, i64 4
    958   %val = load atomic i64, i64 addrspace(1)* %gep  seq_cst, align 8
    959   store i64 %val, i64 addrspace(1)* %out
    960   ret void
    961 }
    962 
    963 ; FUNC-LABEL: {{^}}atomic_load_i64:
    964 ; CI: buffer_load_dwordx2 [[RET:v\[[0-9]+:[0-9]\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
    965 ; VI: flat_load_dwordx2 [[RET:v\[[0-9]+:[0-9]\]]], v[{{[0-9]+}}:{{[0-9]+}}] glc
    966 ; GCN: buffer_store_dwordx2 [[RET]]
    967 define void @atomic_load_i64(i64 addrspace(1)* %in, i64 addrspace(1)* %out) {
    968 entry:
    969   %val = load atomic i64, i64 addrspace(1)* %in seq_cst, align 8
    970   store i64 %val, i64 addrspace(1)* %out
    971   ret void
    972 }
    973 
    974 ; FUNC-LABEL: {{^}}atomic_load_i64_addr64_offset:
    975 ; CI: buffer_load_dwordx2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:32 glc{{$}}
    976 ; VI: flat_load_dwordx2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}] glc{{$}}
    977 ; GCN: buffer_store_dwordx2 [[RET]]
    978 define void @atomic_load_i64_addr64_offset(i64 addrspace(1)* %in, i64 addrspace(1)* %out, i64 %index) {
    979 entry:
    980   %ptr = getelementptr i64, i64 addrspace(1)* %in, i64 %index
    981   %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
    982   %val = load atomic i64, i64 addrspace(1)* %gep seq_cst, align 8
    983   store i64 %val, i64 addrspace(1)* %out
    984   ret void
    985 }
    986 
    987 ; FUNC-LABEL: {{^}}atomic_load_i64_addr64:
    988 ; CI: buffer_load_dwordx2 [[RET:v\[[0-9]+:[0-9]\]]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}}
    989 ; VI: flat_load_dwordx2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}] glc{{$}}
    990 ; GCN: buffer_store_dwordx2 [[RET]]
    991 define void @atomic_load_i64_addr64(i64 addrspace(1)* %in, i64 addrspace(1)* %out, i64 %index) {
    992 entry:
    993   %ptr = getelementptr i64, i64 addrspace(1)* %in, i64 %index
    994   %val = load atomic i64, i64 addrspace(1)* %ptr seq_cst, align 8
    995   store i64 %val, i64 addrspace(1)* %out
    996   ret void
    997 }
    998 
    999 ; FUNC-LABEL: {{^}}atomic_store_i64_offset:
   1000 ; CI: buffer_store_dwordx2 [[RET:v\[[0-9]+:[0-9]+\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32 glc{{$}}
   1001 ; VI: flat_store_dwordx2 [[RET:v\[[0-9]+:[0-9]\]]], v[{{[0-9]+}}:{{[0-9]+}}] glc{{$}}
   1002 define void @atomic_store_i64_offset(i64 %in, i64 addrspace(1)* %out) {
   1003 entry:
   1004   %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
   1005   store atomic i64 %in, i64 addrspace(1)* %gep  seq_cst, align 8
   1006   ret void
   1007 }
   1008 
   1009 ; FUNC-LABEL: {{^}}atomic_store_i64:
   1010 ; CI: buffer_store_dwordx2 {{v\[[0-9]+:[0-9]\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
   1011 ; VI: flat_store_dwordx2 {{v\[[0-9]+:[0-9]\]}}, v[{{[0-9]+}}:{{[0-9]+}}] glc
   1012 define void @atomic_store_i64(i64 %in, i64 addrspace(1)* %out) {
   1013 entry:
   1014   store atomic i64 %in, i64 addrspace(1)* %out seq_cst, align 8
   1015   ret void
   1016 }
   1017 
   1018 ; FUNC-LABEL: {{^}}atomic_store_i64_addr64_offset:
   1019 ; CI: buffer_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:32 glc{{$}}
   1020 ; VI: flat_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+:[0-9]+}}] glc{{$}}
   1021 define void @atomic_store_i64_addr64_offset(i64 %in, i64 addrspace(1)* %out, i64 %index) {
   1022 entry:
   1023   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
   1024   %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
   1025   store atomic i64 %in, i64 addrspace(1)* %gep seq_cst, align 8
   1026   ret void
   1027 }
   1028 
   1029 ; FUNC-LABEL: {{^}}atomic_store_i64_addr64:
   1030 ; CI: buffer_store_dwordx2 {{v\[[0-9]+:[0-9]\]}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}}
   1031 ; VI: flat_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+:[0-9]+}}] glc{{$}}
   1032 define void @atomic_store_i64_addr64(i64 %in, i64 addrspace(1)* %out, i64 %index) {
   1033 entry:
   1034   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
   1035   store atomic i64 %in, i64 addrspace(1)* %ptr seq_cst, align 8
   1036   ret void
   1037 }
   1038