Home | History | Annotate | Download | only in CodeGen
      1 // RUN: %clang_cc1 %s -emit-llvm -o - -ffreestanding -triple=i686-apple-darwin9 | FileCheck %s
      2 // REQUIRES: x86-registered-target
      3 
      4 // Also test serialization of atomic operations here, to avoid duplicating the
      5 // test.
      6 // RUN: %clang_cc1 %s -emit-pch -o %t -ffreestanding -triple=i686-apple-darwin9
      7 // RUN: %clang_cc1 %s -include-pch %t -ffreestanding -triple=i686-apple-darwin9 -emit-llvm -o - | FileCheck %s
      8 #ifndef ALREADY_INCLUDED
      9 #define ALREADY_INCLUDED
     10 
     11 #include <stdatomic.h>
     12 
     13 // Basic IRGen tests for __c11_atomic_* and GNU __atomic_*
     14 
     15 int fi1(_Atomic(int) *i) {
     16   // CHECK-LABEL: @fi1
     17   // CHECK: load atomic i32, i32* {{.*}} seq_cst
     18   return __c11_atomic_load(i, memory_order_seq_cst);
     19 }
     20 
     21 int fi1a(int *i) {
     22   // CHECK-LABEL: @fi1a
     23   // CHECK: load atomic i32, i32* {{.*}} seq_cst
     24   int v;
     25   __atomic_load(i, &v, memory_order_seq_cst);
     26   return v;
     27 }
     28 
     29 int fi1b(int *i) {
     30   // CHECK-LABEL: @fi1b
     31   // CHECK: load atomic i32, i32* {{.*}} seq_cst
     32   return __atomic_load_n(i, memory_order_seq_cst);
     33 }
     34 
     35 int fi1c(atomic_int *i) {
     36   // CHECK-LABEL: @fi1c
     37   // CHECK: load atomic i32, i32* {{.*}} seq_cst
     38   return atomic_load(i);
     39 }
     40 
     41 void fi2(_Atomic(int) *i) {
     42   // CHECK-LABEL: @fi2
     43   // CHECK: store atomic i32 {{.*}} seq_cst
     44   __c11_atomic_store(i, 1, memory_order_seq_cst);
     45 }
     46 
     47 void fi2a(int *i) {
     48   // CHECK-LABEL: @fi2a
     49   // CHECK: store atomic i32 {{.*}} seq_cst
     50   int v = 1;
     51   __atomic_store(i, &v, memory_order_seq_cst);
     52 }
     53 
     54 void fi2b(int *i) {
     55   // CHECK-LABEL: @fi2b
     56   // CHECK: store atomic i32 {{.*}} seq_cst
     57   __atomic_store_n(i, 1, memory_order_seq_cst);
     58 }
     59 
     60 void fi2c(atomic_int *i) {
     61   // CHECK-LABEL: @fi2c
     62   // CHECK: store atomic i32 {{.*}} seq_cst
     63   atomic_store(i, 1);
     64 }
     65 
     66 int fi3(_Atomic(int) *i) {
     67   // CHECK-LABEL: @fi3
     68   // CHECK: atomicrmw and
     69   // CHECK-NOT: and
     70   return __c11_atomic_fetch_and(i, 1, memory_order_seq_cst);
     71 }
     72 
     73 int fi3a(int *i) {
     74   // CHECK-LABEL: @fi3a
     75   // CHECK: atomicrmw xor
     76   // CHECK-NOT: xor
     77   return __atomic_fetch_xor(i, 1, memory_order_seq_cst);
     78 }
     79 
     80 int fi3b(int *i) {
     81   // CHECK-LABEL: @fi3b
     82   // CHECK: atomicrmw add
     83   // CHECK: add
     84   return __atomic_add_fetch(i, 1, memory_order_seq_cst);
     85 }
     86 
     87 int fi3c(int *i) {
     88   // CHECK-LABEL: @fi3c
     89   // CHECK: atomicrmw nand
     90   // CHECK-NOT: and
     91   return __atomic_fetch_nand(i, 1, memory_order_seq_cst);
     92 }
     93 
     94 int fi3d(int *i) {
     95   // CHECK-LABEL: @fi3d
     96   // CHECK: atomicrmw nand
     97   // CHECK: and
     98   // CHECK: xor
     99   return __atomic_nand_fetch(i, 1, memory_order_seq_cst);
    100 }
    101 
    102 int fi3e(atomic_int *i) {
    103   // CHECK-LABEL: @fi3e
    104   // CHECK: atomicrmw or
    105   // CHECK-NOT: {{ or }}
    106   return atomic_fetch_or(i, 1);
    107 }
    108 
    109 int fi3f(int *i) {
    110   // CHECK-LABEL: @fi3f
    111   // CHECK-NOT: store volatile
    112   // CHECK: atomicrmw or
    113   // CHECK-NOT: {{ or }}
    114   return __atomic_fetch_or(i, (short)1, memory_order_seq_cst);
    115 }
    116 
    117 _Bool fi4(_Atomic(int) *i) {
    118   // CHECK-LABEL: @fi4(
    119   // CHECK: [[PAIR:%[.0-9A-Z_a-z]+]] = cmpxchg i32* [[PTR:%[.0-9A-Z_a-z]+]], i32 [[EXPECTED:%[.0-9A-Z_a-z]+]], i32 [[DESIRED:%[.0-9A-Z_a-z]+]]
    120   // CHECK: [[OLD:%[.0-9A-Z_a-z]+]] = extractvalue { i32, i1 } [[PAIR]], 0
    121   // CHECK: [[CMP:%[.0-9A-Z_a-z]+]] = extractvalue { i32, i1 } [[PAIR]], 1
    122   // CHECK: br i1 [[CMP]], label %[[STORE_EXPECTED:[.0-9A-Z_a-z]+]], label %[[CONTINUE:[.0-9A-Z_a-z]+]]
    123   // CHECK: store i32 [[OLD]]
    124   int cmp = 0;
    125   return __c11_atomic_compare_exchange_strong(i, &cmp, 1, memory_order_acquire, memory_order_acquire);
    126 }
    127 
    128 _Bool fi4a(int *i) {
    129   // CHECK-LABEL: @fi4a
    130   // CHECK: [[PAIR:%[.0-9A-Z_a-z]+]] = cmpxchg i32* [[PTR:%[.0-9A-Z_a-z]+]], i32 [[EXPECTED:%[.0-9A-Z_a-z]+]], i32 [[DESIRED:%[.0-9A-Z_a-z]+]]
    131   // CHECK: [[OLD:%[.0-9A-Z_a-z]+]] = extractvalue { i32, i1 } [[PAIR]], 0
    132   // CHECK: [[CMP:%[.0-9A-Z_a-z]+]] = extractvalue { i32, i1 } [[PAIR]], 1
    133   // CHECK: br i1 [[CMP]], label %[[STORE_EXPECTED:[.0-9A-Z_a-z]+]], label %[[CONTINUE:[.0-9A-Z_a-z]+]]
    134   // CHECK: store i32 [[OLD]]
    135   int cmp = 0;
    136   int desired = 1;
    137   return __atomic_compare_exchange(i, &cmp, &desired, 0, memory_order_acquire, memory_order_acquire);
    138 }
    139 
    140 _Bool fi4b(int *i) {
    141   // CHECK-LABEL: @fi4b(
    142   // CHECK: [[PAIR:%[.0-9A-Z_a-z]+]] = cmpxchg weak i32* [[PTR:%[.0-9A-Z_a-z]+]], i32 [[EXPECTED:%[.0-9A-Z_a-z]+]], i32 [[DESIRED:%[.0-9A-Z_a-z]+]]
    143   // CHECK: [[OLD:%[.0-9A-Z_a-z]+]] = extractvalue { i32, i1 } [[PAIR]], 0
    144   // CHECK: [[CMP:%[.0-9A-Z_a-z]+]] = extractvalue { i32, i1 } [[PAIR]], 1
    145   // CHECK: br i1 [[CMP]], label %[[STORE_EXPECTED:[.0-9A-Z_a-z]+]], label %[[CONTINUE:[.0-9A-Z_a-z]+]]
    146   // CHECK: store i32 [[OLD]]
    147   int cmp = 0;
    148   return __atomic_compare_exchange_n(i, &cmp, 1, 1, memory_order_acquire, memory_order_acquire);
    149 }
    150 
    151 _Bool fi4c(atomic_int *i) {
    152   // CHECK-LABEL: @fi4c
    153   // CHECK: cmpxchg i32*
    154   int cmp = 0;
    155   return atomic_compare_exchange_strong(i, &cmp, 1);
    156 }
    157 
    158 float ff1(_Atomic(float) *d) {
    159   // CHECK-LABEL: @ff1
    160   // CHECK: load atomic i32, i32* {{.*}} monotonic
    161   return __c11_atomic_load(d, memory_order_relaxed);
    162 }
    163 
    164 void ff2(_Atomic(float) *d) {
    165   // CHECK-LABEL: @ff2
    166   // CHECK: store atomic i32 {{.*}} release
    167   __c11_atomic_store(d, 1, memory_order_release);
    168 }
    169 
    170 float ff3(_Atomic(float) *d) {
    171   return __c11_atomic_exchange(d, 2, memory_order_seq_cst);
    172 }
    173 
    174 struct S {
    175   double x;
    176 };
    177 
    178 struct S fd1(struct S *a) {
    179   // CHECK-LABEL: @fd1
    180   // CHECK: [[RETVAL:%.*]] = alloca %struct.S, align 4
    181   // CHECK: [[RET:%.*]]    = alloca %struct.S, align 4
    182   // CHECK: [[CAST:%.*]]   = bitcast %struct.S* [[RET]] to i64*
    183   // CHECK: [[CALL:%.*]]   = call i64 @__atomic_load_8(
    184   // CHECK: store i64 [[CALL]], i64* [[CAST]], align 4
    185   struct S ret;
    186   __atomic_load(a, &ret, memory_order_seq_cst);
    187   return ret;
    188 }
    189 
    190 void fd2(struct S *a, struct S *b) {
    191   // CHECK-LABEL: @fd2
    192   // CHECK:      [[A_ADDR:%.*]] = alloca %struct.S*, align 4
    193   // CHECK-NEXT: [[B_ADDR:%.*]] = alloca %struct.S*, align 4
    194   // CHECK-NEXT: store %struct.S* %a, %struct.S** [[A_ADDR]], align 4
    195   // CHECK-NEXT: store %struct.S* %b, %struct.S** [[B_ADDR]], align 4
    196   // CHECK-NEXT: [[LOAD_A_PTR:%.*]] = load %struct.S*, %struct.S** [[A_ADDR]], align 4
    197   // CHECK-NEXT: [[LOAD_B_PTR:%.*]] = load %struct.S*, %struct.S** [[B_ADDR]], align 4
    198   // CHECK-NEXT: [[COERCED_A_TMP:%.*]] = bitcast %struct.S* [[LOAD_A_PTR]] to i64*
    199   // CHECK-NEXT: [[COERCED_B:%.*]] = bitcast %struct.S* [[LOAD_B_PTR]] to i64*
    200   // CHECK-NEXT: [[COERCED_A:%.*]] = bitcast i64* [[COERCED_A_TMP]] to i8*
    201   // CHECK-NEXT: [[LOAD_B:%.*]] = load i64, i64* [[COERCED_B]], align 4
    202   // CHECK-NEXT: call void @__atomic_store_8(i8* [[COERCED_A]], i64 [[LOAD_B]],
    203   // CHECK-NEXT: ret void
    204   __atomic_store(a, b, memory_order_seq_cst);
    205 }
    206 
    207 void fd3(struct S *a, struct S *b, struct S *c) {
    208   // CHECK-LABEL: @fd3
    209   // CHECK:      [[A_ADDR:%.*]] = alloca %struct.S*, align 4
    210   // CHECK-NEXT: [[B_ADDR:%.*]] = alloca %struct.S*, align 4
    211   // CHECK-NEXT: [[C_ADDR:%.*]] = alloca %struct.S*, align 4
    212   // CHECK-NEXT: store %struct.S* %a, %struct.S** [[A_ADDR]], align 4
    213   // CHECK-NEXT: store %struct.S* %b, %struct.S** [[B_ADDR]], align 4
    214   // CHECK-NEXT: store %struct.S* %c, %struct.S** [[C_ADDR]], align 4
    215   // CHECK-NEXT: [[LOAD_A_PTR:%.*]] = load %struct.S*, %struct.S** [[A_ADDR]], align 4
    216   // CHECK-NEXT: [[LOAD_B_PTR:%.*]] = load %struct.S*, %struct.S** [[B_ADDR]], align 4
    217   // CHECK-NEXT: [[LOAD_C_PTR:%.*]] = load %struct.S*, %struct.S** [[C_ADDR]], align 4
    218   // CHECK-NEXT: [[COERCED_A_TMP:%.*]] = bitcast %struct.S* [[LOAD_A_PTR]] to i64*
    219   // CHECK-NEXT: [[COERCED_B:%.*]] = bitcast %struct.S* [[LOAD_B_PTR]] to i64*
    220   // CHECK-NEXT: [[COERCED_C:%.*]] = bitcast %struct.S* [[LOAD_C_PTR]] to i64*
    221   // CHECK-NEXT: [[COERCED_A:%.*]] = bitcast i64* [[COERCED_A_TMP]] to i8*
    222   // CHECK-NEXT: [[LOAD_B:%.*]] = load i64, i64* [[COERCED_B]], align 4
    223   // CHECK-NEXT: [[CALL:%.*]] = call i64 @__atomic_exchange_8(i8* [[COERCED_A]], i64 [[LOAD_B]],
    224   // CHECK-NEXT: store i64 [[CALL]], i64* [[COERCED_C]], align 4
    225 
    226   __atomic_exchange(a, b, c, memory_order_seq_cst);
    227 }
    228 
    229 _Bool fd4(struct S *a, struct S *b, struct S *c) {
    230   // CHECK-LABEL: @fd4
    231   // CHECK:      [[A_ADDR:%.*]] = alloca %struct.S*, align 4
    232   // CHECK-NEXT: [[B_ADDR:%.*]] = alloca %struct.S*, align 4
    233   // CHECK-NEXT: [[C_ADDR:%.*]] = alloca %struct.S*, align 4
    234   // CHECK:      store %struct.S* %a, %struct.S** [[A_ADDR]], align 4
    235   // CHECK-NEXT: store %struct.S* %b, %struct.S** [[B_ADDR]], align 4
    236   // CHECK-NEXT: store %struct.S* %c, %struct.S** [[C_ADDR]], align 4
    237   // CHECK-NEXT: [[LOAD_A_PTR:%.*]] = load %struct.S*, %struct.S** [[A_ADDR]], align 4
    238   // CHECK-NEXT: [[LOAD_B_PTR:%.*]] = load %struct.S*, %struct.S** [[B_ADDR]], align 4
    239   // CHECK-NEXT: [[LOAD_C_PTR:%.*]] = load %struct.S*, %struct.S** [[C_ADDR]], align 4
    240   // CHECK-NEXT: [[COERCED_A_TMP:%.*]] = bitcast %struct.S* [[LOAD_A_PTR]] to i64*
    241   // CHECK-NEXT: [[COERCED_B_TMP:%.*]] = bitcast %struct.S* [[LOAD_B_PTR]] to i64*
    242   // CHECK-NEXT: [[COERCED_C:%.*]] = bitcast %struct.S* [[LOAD_C_PTR]] to i64*
    243   // CHECK-NEXT: [[COERCED_A:%.*]] = bitcast i64* [[COERCED_A_TMP]] to i8*
    244   // CHECK-NEXT: [[COERCED_B:%.*]] = bitcast i64* [[COERCED_B_TMP]] to i8*
    245   // CHECK-NEXT: [[LOAD_C:%.*]] = load i64, i64* [[COERCED_C]], align 4
    246   // CHECK-NEXT: [[CALL:%.*]] = call zeroext i1 @__atomic_compare_exchange_8(i8* [[COERCED_A]], i8* [[COERCED_B]], i64 [[LOAD_C]]
    247   // CHECK-NEXT: ret i1 [[CALL]]
    248   return __atomic_compare_exchange(a, b, c, 1, 5, 5);
    249 }
    250 
    251 int* fp1(_Atomic(int*) *p) {
    252   // CHECK-LABEL: @fp1
    253   // CHECK: load atomic i32, i32* {{.*}} seq_cst
    254   return __c11_atomic_load(p, memory_order_seq_cst);
    255 }
    256 
    257 int* fp2(_Atomic(int*) *p) {
    258   // CHECK-LABEL: @fp2
    259   // CHECK: store i32 4
    260   // CHECK: atomicrmw add {{.*}} monotonic
    261   return __c11_atomic_fetch_add(p, 1, memory_order_relaxed);
    262 }
    263 
    264 int *fp2a(int **p) {
    265   // CHECK-LABEL: @fp2a
    266   // CHECK: store i32 4
    267   // CHECK: atomicrmw sub {{.*}} monotonic
    268   // Note, the GNU builtins do not multiply by sizeof(T)!
    269   return __atomic_fetch_sub(p, 4, memory_order_relaxed);
    270 }
    271 
    272 _Complex float fc(_Atomic(_Complex float) *c) {
    273   // CHECK-LABEL: @fc
    274   // CHECK: atomicrmw xchg i64*
    275   return __c11_atomic_exchange(c, 2, memory_order_seq_cst);
    276 }
    277 
    278 typedef struct X { int x; } X;
    279 X fs(_Atomic(X) *c) {
    280   // CHECK-LABEL: @fs
    281   // CHECK: atomicrmw xchg i32*
    282   return __c11_atomic_exchange(c, (X){2}, memory_order_seq_cst);
    283 }
    284 
    285 X fsa(X *c, X *d) {
    286   // CHECK-LABEL: @fsa
    287   // CHECK: atomicrmw xchg i32*
    288   X ret;
    289   __atomic_exchange(c, d, &ret, memory_order_seq_cst);
    290   return ret;
    291 }
    292 
    293 _Bool fsb(_Bool *c) {
    294   // CHECK-LABEL: @fsb
    295   // CHECK: atomicrmw xchg i8*
    296   return __atomic_exchange_n(c, 1, memory_order_seq_cst);
    297 }
    298 
    299 char flag1;
    300 volatile char flag2;
    301 void test_and_set() {
    302   // CHECK: atomicrmw xchg i8* @flag1, i8 1 seq_cst
    303   __atomic_test_and_set(&flag1, memory_order_seq_cst);
    304   // CHECK: atomicrmw volatile xchg i8* @flag2, i8 1 acquire
    305   __atomic_test_and_set(&flag2, memory_order_acquire);
    306   // CHECK: store atomic volatile i8 0, i8* @flag2 release
    307   __atomic_clear(&flag2, memory_order_release);
    308   // CHECK: store atomic i8 0, i8* @flag1 seq_cst
    309   __atomic_clear(&flag1, memory_order_seq_cst);
    310 }
    311 
    312 struct Sixteen {
    313   char c[16];
    314 } sixteen;
    315 struct Seventeen {
    316   char c[17];
    317 } seventeen;
    318 
    319 struct Incomplete;
    320 
    321 int lock_free(struct Incomplete *incomplete) {
    322   // CHECK-LABEL: @lock_free
    323 
    324   // CHECK: call i32 @__atomic_is_lock_free(i32 3, i8* null)
    325   __c11_atomic_is_lock_free(3);
    326 
    327   // CHECK: call i32 @__atomic_is_lock_free(i32 16, i8* {{.*}}@sixteen{{.*}})
    328   __atomic_is_lock_free(16, &sixteen);
    329 
    330   // CHECK: call i32 @__atomic_is_lock_free(i32 17, i8* {{.*}}@seventeen{{.*}})
    331   __atomic_is_lock_free(17, &seventeen);
    332 
    333   // CHECK: call i32 @__atomic_is_lock_free(i32 4, {{.*}})
    334   __atomic_is_lock_free(4, incomplete);
    335 
    336   char cs[20];
    337   // CHECK: call i32 @__atomic_is_lock_free(i32 4, {{.*}})
    338   __atomic_is_lock_free(4, cs+1);
    339 
    340   // CHECK-NOT: call
    341   __atomic_always_lock_free(3, 0);
    342   __atomic_always_lock_free(16, 0);
    343   __atomic_always_lock_free(17, 0);
    344   __atomic_always_lock_free(16, &sixteen);
    345   __atomic_always_lock_free(17, &seventeen);
    346 
    347   int n;
    348   __atomic_is_lock_free(4, &n);
    349 
    350   // CHECK: ret i32 1
    351   return __c11_atomic_is_lock_free(sizeof(_Atomic(int)));
    352 }
    353 
    354 // Tests for atomic operations on big values.  These should call the functions
    355 // defined here:
    356 // http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary#The_Library_interface
    357 
    358 struct foo {
    359   int big[128];
    360 };
    361 struct bar {
    362   char c[3];
    363 };
    364 
    365 struct bar smallThing, thing1, thing2;
    366 struct foo bigThing;
    367 _Atomic(struct foo) bigAtomic;
    368 
    369 void structAtomicStore() {
    370   // CHECK-LABEL: @structAtomicStore
    371   struct foo f = {0};
    372   struct bar b = {0};
    373   __atomic_store(&smallThing, &b, 5);
    374   // CHECK: call void @__atomic_store(i32 3, i8* {{.*}} @smallThing
    375 
    376   __atomic_store(&bigThing, &f, 5);
    377   // CHECK: call void @__atomic_store(i32 512, i8* {{.*}} @bigThing
    378 }
    379 void structAtomicLoad() {
    380   // CHECK-LABEL: @structAtomicLoad
    381   struct bar b;
    382   __atomic_load(&smallThing, &b, 5);
    383   // CHECK: call void @__atomic_load(i32 3, i8* {{.*}} @smallThing
    384 
    385   struct foo f = {0};
    386   __atomic_load(&bigThing, &f, 5);
    387   // CHECK: call void @__atomic_load(i32 512, i8* {{.*}} @bigThing
    388 }
    389 struct foo structAtomicExchange() {
    390   // CHECK-LABEL: @structAtomicExchange
    391   struct foo f = {0};
    392   struct foo old;
    393   __atomic_exchange(&f, &bigThing, &old, 5);
    394   // CHECK: call void @__atomic_exchange(i32 512, {{.*}}, i8* bitcast ({{.*}} @bigThing to i8*),
    395 
    396   return __c11_atomic_exchange(&bigAtomic, f, 5);
    397   // CHECK: call void @__atomic_exchange(i32 512, i8* bitcast ({{.*}} @bigAtomic to i8*),
    398 }
    399 int structAtomicCmpExchange() {
    400   // CHECK-LABEL: @structAtomicCmpExchange
    401   // CHECK: %[[x_mem:.*]] = alloca i8
    402   _Bool x = __atomic_compare_exchange(&smallThing, &thing1, &thing2, 1, 5, 5);
    403   // CHECK: %[[call1:.*]] = call zeroext i1 @__atomic_compare_exchange(i32 3, {{.*}} @smallThing{{.*}} @thing1{{.*}} @thing2
    404   // CHECK: %[[zext1:.*]] = zext i1 %[[call1]] to i8
    405   // CHECK: store i8 %[[zext1]], i8* %[[x_mem]], align 1
    406   // CHECK: %[[x:.*]] = load i8, i8* %[[x_mem]]
    407   // CHECK: %[[x_bool:.*]] = trunc i8 %[[x]] to i1
    408   // CHECK: %[[conv1:.*]] = zext i1 %[[x_bool]] to i32
    409 
    410   struct foo f = {0};
    411   struct foo g = {0};
    412   g.big[12] = 12;
    413   return x & __c11_atomic_compare_exchange_strong(&bigAtomic, &f, g, 5, 5);
    414   // CHECK: %[[call2:.*]] = call zeroext i1 @__atomic_compare_exchange(i32 512, i8* bitcast ({{.*}} @bigAtomic to i8*),
    415   // CHECK: %[[conv2:.*]] = zext i1 %[[call2]] to i32
    416   // CHECK: %[[and:.*]] = and i32 %[[conv1]], %[[conv2]]
    417   // CHECK: ret i32 %[[and]]
    418 }
    419 
    420 // Check that no atomic operations are used in any initialisation of _Atomic
    421 // types.
    422 _Atomic(int) atomic_init_i = 42;
    423 
    424 // CHECK-LABEL: @atomic_init_foo
    425 void atomic_init_foo()
    426 {
    427   // CHECK-NOT: }
    428   // CHECK-NOT: atomic
    429   // CHECK: store
    430   _Atomic(int) j = 12;
    431 
    432   // CHECK-NOT: }
    433   // CHECK-NOT: atomic
    434   // CHECK: store
    435   __c11_atomic_init(&j, 42);
    436 
    437   // CHECK-NOT: atomic
    438   // CHECK: }
    439 }
    440 
    441 // CHECK-LABEL: @failureOrder
    442 void failureOrder(_Atomic(int) *ptr, int *ptr2) {
    443   __c11_atomic_compare_exchange_strong(ptr, ptr2, 43, memory_order_acquire, memory_order_relaxed);
    444   // CHECK: cmpxchg i32* {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z_.]+}} acquire monotonic
    445 
    446   __c11_atomic_compare_exchange_weak(ptr, ptr2, 43, memory_order_seq_cst, memory_order_acquire);
    447   // CHECK: cmpxchg weak i32* {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z_.]+}} seq_cst acquire
    448 
    449   // Unknown ordering: conservatively pick strongest valid option (for now!).
    450   __atomic_compare_exchange(ptr2, ptr2, ptr2, 0, memory_order_acq_rel, *ptr2);
    451   // CHECK: cmpxchg i32* {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z_.]+}} acq_rel acquire
    452 
    453   // Undefined behaviour: don't really care what that last ordering is so leave
    454   // it out:
    455   __atomic_compare_exchange_n(ptr2, ptr2, 43, 1, memory_order_seq_cst, 42);
    456   // CHECK: cmpxchg weak i32* {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z_.]+}} seq_cst
    457 }
    458 
    459 // CHECK-LABEL: @generalFailureOrder
    460 void generalFailureOrder(_Atomic(int) *ptr, int *ptr2, int success, int fail) {
    461   __c11_atomic_compare_exchange_strong(ptr, ptr2, 42, success, fail);
    462   // CHECK: switch i32 {{.*}}, label %[[MONOTONIC:[0-9a-zA-Z._]+]] [
    463   // CHECK-NEXT: i32 1, label %[[ACQUIRE:[0-9a-zA-Z._]+]]
    464   // CHECK-NEXT: i32 2, label %[[ACQUIRE]]
    465   // CHECK-NEXT: i32 3, label %[[RELEASE:[0-9a-zA-Z._]+]]
    466   // CHECK-NEXT: i32 4, label %[[ACQREL:[0-9a-zA-Z._]+]]
    467   // CHECK-NEXT: i32 5, label %[[SEQCST:[0-9a-zA-Z._]+]]
    468 
    469   // CHECK: [[MONOTONIC]]
    470   // CHECK: switch {{.*}}, label %[[MONOTONIC_MONOTONIC:[0-9a-zA-Z._]+]] [
    471   // CHECK-NEXT: ]
    472 
    473   // CHECK: [[ACQUIRE]]
    474   // CHECK: switch {{.*}}, label %[[ACQUIRE_MONOTONIC:[0-9a-zA-Z._]+]] [
    475   // CHECK-NEXT: i32 1, label %[[ACQUIRE_ACQUIRE:[0-9a-zA-Z._]+]]
    476   // CHECK-NEXT: i32 2, label %[[ACQUIRE_ACQUIRE:[0-9a-zA-Z._]+]]
    477   // CHECK-NEXT: ]
    478 
    479   // CHECK: [[RELEASE]]
    480   // CHECK: switch {{.*}}, label %[[RELEASE_MONOTONIC:[0-9a-zA-Z._]+]] [
    481   // CHECK-NEXT: ]
    482 
    483   // CHECK: [[ACQREL]]
    484   // CHECK: switch {{.*}}, label %[[ACQREL_MONOTONIC:[0-9a-zA-Z._]+]] [
    485   // CHECK-NEXT: i32 1, label %[[ACQREL_ACQUIRE:[0-9a-zA-Z._]+]]
    486   // CHECK-NEXT: i32 2, label %[[ACQREL_ACQUIRE:[0-9a-zA-Z._]+]]
    487   // CHECK-NEXT: ]
    488 
    489   // CHECK: [[SEQCST]]
    490   // CHECK: switch {{.*}}, label %[[SEQCST_MONOTONIC:[0-9a-zA-Z._]+]] [
    491   // CHECK-NEXT: i32 1, label %[[SEQCST_ACQUIRE:[0-9a-zA-Z._]+]]
    492   // CHECK-NEXT: i32 2, label %[[SEQCST_ACQUIRE:[0-9a-zA-Z._]+]]
    493   // CHECK-NEXT: i32 5, label %[[SEQCST_SEQCST:[0-9a-zA-Z._]+]]
    494   // CHECK-NEXT: ]
    495 
    496   // CHECK: [[MONOTONIC_MONOTONIC]]
    497   // CHECK: cmpxchg {{.*}} monotonic monotonic
    498   // CHECK: br
    499 
    500   // CHECK: [[ACQUIRE_MONOTONIC]]
    501   // CHECK: cmpxchg {{.*}} acquire monotonic
    502   // CHECK: br
    503 
    504   // CHECK: [[ACQUIRE_ACQUIRE]]
    505   // CHECK: cmpxchg {{.*}} acquire acquire
    506   // CHECK: br
    507 
    508   // CHECK: [[ACQREL_MONOTONIC]]
    509   // CHECK: cmpxchg {{.*}} acq_rel monotonic
    510   // CHECK: br
    511 
    512   // CHECK: [[ACQREL_ACQUIRE]]
    513   // CHECK: cmpxchg {{.*}} acq_rel acquire
    514   // CHECK: br
    515 
    516   // CHECK: [[SEQCST_MONOTONIC]]
    517   // CHECK: cmpxchg {{.*}} seq_cst monotonic
    518   // CHECK: br
    519 
    520   // CHECK: [[SEQCST_ACQUIRE]]
    521   // CHECK: cmpxchg {{.*}} seq_cst acquire
    522   // CHECK: br
    523 
    524   // CHECK: [[SEQCST_SEQCST]]
    525   // CHECK: cmpxchg {{.*}} seq_cst seq_cst
    526   // CHECK: br
    527 }
    528 
    529 void generalWeakness(int *ptr, int *ptr2, _Bool weak) {
    530   __atomic_compare_exchange_n(ptr, ptr2, 42, weak, memory_order_seq_cst, memory_order_seq_cst);
    531   // CHECK: switch i1 {{.*}}, label %[[WEAK:[0-9a-zA-Z._]+]] [
    532   // CHECK-NEXT: i1 false, label %[[STRONG:[0-9a-zA-Z._]+]]
    533 
    534   // CHECK: [[STRONG]]
    535   // CHECK-NOT: br
    536   // CHECK: cmpxchg {{.*}} seq_cst seq_cst
    537   // CHECK: br
    538 
    539   // CHECK: [[WEAK]]
    540   // CHECK-NOT: br
    541   // CHECK: cmpxchg weak {{.*}} seq_cst seq_cst
    542   // CHECK: br
    543 }
    544 
    545 // Having checked the flow in the previous two cases, we'll trust clang to
    546 // combine them sanely.
    547 void EMIT_ALL_THE_THINGS(int *ptr, int *ptr2, int new, _Bool weak, int success, int fail) {
    548   __atomic_compare_exchange(ptr, ptr2, &new, weak, success, fail);
    549 
    550   // CHECK: = cmpxchg {{.*}} monotonic monotonic
    551   // CHECK: = cmpxchg weak {{.*}} monotonic monotonic
    552   // CHECK: = cmpxchg {{.*}} acquire monotonic
    553   // CHECK: = cmpxchg {{.*}} acquire acquire
    554   // CHECK: = cmpxchg weak {{.*}} acquire monotonic
    555   // CHECK: = cmpxchg weak {{.*}} acquire acquire
    556   // CHECK: = cmpxchg {{.*}} release monotonic
    557   // CHECK: = cmpxchg weak {{.*}} release monotonic
    558   // CHECK: = cmpxchg {{.*}} acq_rel monotonic
    559   // CHECK: = cmpxchg {{.*}} acq_rel acquire
    560   // CHECK: = cmpxchg weak {{.*}} acq_rel monotonic
    561   // CHECK: = cmpxchg weak {{.*}} acq_rel acquire
    562   // CHECK: = cmpxchg {{.*}} seq_cst monotonic
    563   // CHECK: = cmpxchg {{.*}} seq_cst acquire
    564   // CHECK: = cmpxchg {{.*}} seq_cst seq_cst
    565   // CHECK: = cmpxchg weak {{.*}} seq_cst monotonic
    566   // CHECK: = cmpxchg weak {{.*}} seq_cst acquire
    567   // CHECK: = cmpxchg weak {{.*}} seq_cst seq_cst
    568 }
    569 
    570 int PR21643() {
    571   return __atomic_or_fetch((int __attribute__((address_space(257))) *)0x308, 1,
    572                            __ATOMIC_RELAXED);
    573   // CHECK: %[[atomictmp:.*]] = alloca i32, align 4
    574   // CHECK: %[[atomicdst:.*]] = alloca i32, align 4
    575   // CHECK: store i32 1, i32* %[[atomictmp]]
    576   // CHECK: %[[one:.*]] = load i32, i32* %[[atomictmp]], align 4
    577   // CHECK: %[[old:.*]] = atomicrmw or i32 addrspace(257)* inttoptr (i32 776 to i32 addrspace(257)*), i32 %[[one]] monotonic
    578   // CHECK: %[[new:.*]] = or i32 %[[old]], %[[one]]
    579   // CHECK: store i32 %[[new]], i32* %[[atomicdst]], align 4
    580   // CHECK: %[[ret:.*]] = load i32, i32* %[[atomicdst]], align 4
    581   // CHECK: ret i32 %[[ret]]
    582 }
    583 
    584 int PR17306_1(volatile _Atomic(int) *i) {
    585   // CHECK-LABEL: @PR17306_1
    586   // CHECK:      %[[i_addr:.*]] = alloca i32
    587   // CHECK-NEXT: %[[atomicdst:.*]] = alloca i32
    588   // CHECK-NEXT: store i32* %i, i32** %[[i_addr]]
    589   // CHECK-NEXT: %[[addr:.*]] = load i32*, i32** %[[i_addr]]
    590   // CHECK-NEXT: %[[res:.*]] = load atomic volatile i32, i32* %[[addr]] seq_cst
    591   // CHECK-NEXT: store i32 %[[res]], i32* %[[atomicdst]]
    592   // CHECK-NEXT: %[[retval:.*]] = load i32, i32* %[[atomicdst]]
    593   // CHECK-NEXT: ret i32 %[[retval]]
    594   return __c11_atomic_load(i, memory_order_seq_cst);
    595 }
    596 
    597 int PR17306_2(volatile int *i, int value) {
    598   // CHECK-LABEL: @PR17306_2
    599   // CHECK:      %[[i_addr:.*]] = alloca i32*
    600   // CHECK-NEXT: %[[value_addr:.*]] = alloca i32
    601   // CHECK-NEXT: %[[atomictmp:.*]] = alloca i32
    602   // CHECK-NEXT: %[[atomicdst:.*]] = alloca i32
    603   // CHECK-NEXT: store i32* %i, i32** %[[i_addr]]
    604   // CHECK-NEXT: store i32 %value, i32* %[[value_addr]]
    605   // CHECK-NEXT: %[[i_lval:.*]] = load i32*, i32** %[[i_addr]]
    606   // CHECK-NEXT: %[[value:.*]] = load i32, i32* %[[value_addr]]
    607   // CHECK-NEXT: store i32 %[[value]], i32* %[[atomictmp]]
    608   // CHECK-NEXT: %[[value_lval:.*]] = load i32, i32* %[[atomictmp]]
    609   // CHECK-NEXT: %[[old_val:.*]] = atomicrmw volatile add i32* %[[i_lval]], i32 %[[value_lval]] seq_cst
    610   // CHECK-NEXT: %[[new_val:.*]] = add i32 %[[old_val]], %[[value_lval]]
    611   // CHECK-NEXT: store i32 %[[new_val]], i32* %[[atomicdst]]
    612   // CHECK-NEXT: %[[retval:.*]] = load i32, i32* %[[atomicdst]]
    613   // CHECK-NEXT: ret i32 %[[retval]]
    614   return __atomic_add_fetch(i, value, memory_order_seq_cst);
    615 }
    616 
    617 #endif
    618