Home | History | Annotate | Download | only in CodeGen
      1 // RUN: %clang_cc1 %s -emit-llvm -o - -ffreestanding -ffake-address-space-map -triple=i686-apple-darwin9 | FileCheck %s
      2 // REQUIRES: x86-registered-target
      3 
      4 // Also test serialization of atomic operations here, to avoid duplicating the
      5 // test.
      6 // RUN: %clang_cc1 %s -emit-pch -o %t -ffreestanding -ffake-address-space-map -triple=i686-apple-darwin9
      7 // RUN: %clang_cc1 %s -include-pch %t -ffreestanding -ffake-address-space-map -triple=i686-apple-darwin9 -emit-llvm -o - | FileCheck %s
      8 #ifndef ALREADY_INCLUDED
      9 #define ALREADY_INCLUDED
     10 
     11 #include <stdatomic.h>
     12 
     13 // Basic IRGen tests for __c11_atomic_* and GNU __atomic_*
     14 
     15 int fi1(_Atomic(int) *i) {
     16   // CHECK-LABEL: @fi1
     17   // CHECK: load atomic i32, i32* {{.*}} seq_cst
     18   return __c11_atomic_load(i, memory_order_seq_cst);
     19 }
     20 
     21 int fi1a(int *i) {
     22   // CHECK-LABEL: @fi1a
     23   // CHECK: load atomic i32, i32* {{.*}} seq_cst
     24   int v;
     25   __atomic_load(i, &v, memory_order_seq_cst);
     26   return v;
     27 }
     28 
     29 int fi1b(int *i) {
     30   // CHECK-LABEL: @fi1b
     31   // CHECK: load atomic i32, i32* {{.*}} seq_cst
     32   return __atomic_load_n(i, memory_order_seq_cst);
     33 }
     34 
     35 int fi1c(atomic_int *i) {
     36   // CHECK-LABEL: @fi1c
     37   // CHECK: load atomic i32, i32* {{.*}} seq_cst
     38   return atomic_load(i);
     39 }
     40 
     41 void fi2(_Atomic(int) *i) {
     42   // CHECK-LABEL: @fi2
     43   // CHECK: store atomic i32 {{.*}} seq_cst
     44   __c11_atomic_store(i, 1, memory_order_seq_cst);
     45 }
     46 
     47 void fi2a(int *i) {
     48   // CHECK-LABEL: @fi2a
     49   // CHECK: store atomic i32 {{.*}} seq_cst
     50   int v = 1;
     51   __atomic_store(i, &v, memory_order_seq_cst);
     52 }
     53 
     54 void fi2b(int *i) {
     55   // CHECK-LABEL: @fi2b
     56   // CHECK: store atomic i32 {{.*}} seq_cst
     57   __atomic_store_n(i, 1, memory_order_seq_cst);
     58 }
     59 
     60 void fi2c(atomic_int *i) {
     61   // CHECK-LABEL: @fi2c
     62   // CHECK: store atomic i32 {{.*}} seq_cst
     63   atomic_store(i, 1);
     64 }
     65 
     66 int fi3(_Atomic(int) *i) {
     67   // CHECK-LABEL: @fi3
     68   // CHECK: atomicrmw and
     69   // CHECK-NOT: and
     70   return __c11_atomic_fetch_and(i, 1, memory_order_seq_cst);
     71 }
     72 
     73 int fi3a(int *i) {
     74   // CHECK-LABEL: @fi3a
     75   // CHECK: atomicrmw xor
     76   // CHECK-NOT: xor
     77   return __atomic_fetch_xor(i, 1, memory_order_seq_cst);
     78 }
     79 
     80 int fi3b(int *i) {
     81   // CHECK-LABEL: @fi3b
     82   // CHECK: atomicrmw add
     83   // CHECK: add
     84   return __atomic_add_fetch(i, 1, memory_order_seq_cst);
     85 }
     86 
     87 int fi3c(int *i) {
     88   // CHECK-LABEL: @fi3c
     89   // CHECK: atomicrmw nand
     90   // CHECK-NOT: and
     91   return __atomic_fetch_nand(i, 1, memory_order_seq_cst);
     92 }
     93 
     94 int fi3d(int *i) {
     95   // CHECK-LABEL: @fi3d
     96   // CHECK: atomicrmw nand
     97   // CHECK: and
     98   // CHECK: xor
     99   return __atomic_nand_fetch(i, 1, memory_order_seq_cst);
    100 }
    101 
    102 int fi3e(atomic_int *i) {
    103   // CHECK-LABEL: @fi3e
    104   // CHECK: atomicrmw or
    105   // CHECK-NOT: {{ or }}
    106   return atomic_fetch_or(i, 1);
    107 }
    108 
    109 int fi3f(int *i) {
    110   // CHECK-LABEL: @fi3f
    111   // CHECK-NOT: store volatile
    112   // CHECK: atomicrmw or
    113   // CHECK-NOT: {{ or }}
    114   return __atomic_fetch_or(i, (short)1, memory_order_seq_cst);
    115 }
    116 
    117 _Bool fi4(_Atomic(int) *i) {
    118   // CHECK-LABEL: @fi4(
    119   // CHECK: [[PAIR:%[.0-9A-Z_a-z]+]] = cmpxchg i32* [[PTR:%[.0-9A-Z_a-z]+]], i32 [[EXPECTED:%[.0-9A-Z_a-z]+]], i32 [[DESIRED:%[.0-9A-Z_a-z]+]]
    120   // CHECK: [[OLD:%[.0-9A-Z_a-z]+]] = extractvalue { i32, i1 } [[PAIR]], 0
    121   // CHECK: [[CMP:%[.0-9A-Z_a-z]+]] = extractvalue { i32, i1 } [[PAIR]], 1
    122   // CHECK: br i1 [[CMP]], label %[[STORE_EXPECTED:[.0-9A-Z_a-z]+]], label %[[CONTINUE:[.0-9A-Z_a-z]+]]
    123   // CHECK: store i32 [[OLD]]
    124   int cmp = 0;
    125   return __c11_atomic_compare_exchange_strong(i, &cmp, 1, memory_order_acquire, memory_order_acquire);
    126 }
    127 
    128 _Bool fi4a(int *i) {
    129   // CHECK-LABEL: @fi4a
    130   // CHECK: [[PAIR:%[.0-9A-Z_a-z]+]] = cmpxchg i32* [[PTR:%[.0-9A-Z_a-z]+]], i32 [[EXPECTED:%[.0-9A-Z_a-z]+]], i32 [[DESIRED:%[.0-9A-Z_a-z]+]]
    131   // CHECK: [[OLD:%[.0-9A-Z_a-z]+]] = extractvalue { i32, i1 } [[PAIR]], 0
    132   // CHECK: [[CMP:%[.0-9A-Z_a-z]+]] = extractvalue { i32, i1 } [[PAIR]], 1
    133   // CHECK: br i1 [[CMP]], label %[[STORE_EXPECTED:[.0-9A-Z_a-z]+]], label %[[CONTINUE:[.0-9A-Z_a-z]+]]
    134   // CHECK: store i32 [[OLD]]
    135   int cmp = 0;
    136   int desired = 1;
    137   return __atomic_compare_exchange(i, &cmp, &desired, 0, memory_order_acquire, memory_order_acquire);
    138 }
    139 
    140 _Bool fi4b(int *i) {
    141   // CHECK-LABEL: @fi4b(
    142   // CHECK: [[PAIR:%[.0-9A-Z_a-z]+]] = cmpxchg weak i32* [[PTR:%[.0-9A-Z_a-z]+]], i32 [[EXPECTED:%[.0-9A-Z_a-z]+]], i32 [[DESIRED:%[.0-9A-Z_a-z]+]]
    143   // CHECK: [[OLD:%[.0-9A-Z_a-z]+]] = extractvalue { i32, i1 } [[PAIR]], 0
    144   // CHECK: [[CMP:%[.0-9A-Z_a-z]+]] = extractvalue { i32, i1 } [[PAIR]], 1
    145   // CHECK: br i1 [[CMP]], label %[[STORE_EXPECTED:[.0-9A-Z_a-z]+]], label %[[CONTINUE:[.0-9A-Z_a-z]+]]
    146   // CHECK: store i32 [[OLD]]
    147   int cmp = 0;
    148   return __atomic_compare_exchange_n(i, &cmp, 1, 1, memory_order_acquire, memory_order_acquire);
    149 }
    150 
    151 _Bool fi4c(atomic_int *i) {
    152   // CHECK-LABEL: @fi4c
    153   // CHECK: cmpxchg i32*
    154   int cmp = 0;
    155   return atomic_compare_exchange_strong(i, &cmp, 1);
    156 }
    157 
    158 #define _AS1 __attribute__((address_space(1)))
    159 _Bool fi4d(_Atomic(int) *i, int _AS1 *ptr2) {
    160   // CHECK-LABEL: @fi4d(
    161   // CHECK: [[EXPECTED:%[.0-9A-Z_a-z]+]] = load i32, i32 addrspace(1)* %{{[0-9]+}}
    162   // CHECK: cmpxchg i32* %{{[0-9]+}}, i32 [[EXPECTED]], i32 %{{[0-9]+}} acquire acquire
    163   return __c11_atomic_compare_exchange_strong(i, ptr2, 1, memory_order_acquire, memory_order_acquire);
    164 }
    165 
    166 float ff1(_Atomic(float) *d) {
    167   // CHECK-LABEL: @ff1
    168   // CHECK: load atomic i32, i32* {{.*}} monotonic
    169   return __c11_atomic_load(d, memory_order_relaxed);
    170 }
    171 
    172 void ff2(_Atomic(float) *d) {
    173   // CHECK-LABEL: @ff2
    174   // CHECK: store atomic i32 {{.*}} release
    175   __c11_atomic_store(d, 1, memory_order_release);
    176 }
    177 
    178 float ff3(_Atomic(float) *d) {
    179   return __c11_atomic_exchange(d, 2, memory_order_seq_cst);
    180 }
    181 
    182 struct S {
    183   double x;
    184 };
    185 
    186 struct S fd1(struct S *a) {
    187   // CHECK-LABEL: @fd1
    188   // CHECK: [[RETVAL:%.*]] = alloca %struct.S, align 4
    189   // CHECK: [[RET:%.*]]    = alloca %struct.S, align 4
    190   // CHECK: [[CAST:%.*]]   = bitcast %struct.S* [[RET]] to i64*
    191   // CHECK: [[CALL:%.*]]   = call i64 @__atomic_load_8(
    192   // CHECK: store i64 [[CALL]], i64* [[CAST]], align 4
    193   struct S ret;
    194   __atomic_load(a, &ret, memory_order_seq_cst);
    195   return ret;
    196 }
    197 
    198 void fd2(struct S *a, struct S *b) {
    199   // CHECK-LABEL: @fd2
    200   // CHECK:      [[A_ADDR:%.*]] = alloca %struct.S*, align 4
    201   // CHECK-NEXT: [[B_ADDR:%.*]] = alloca %struct.S*, align 4
    202   // CHECK-NEXT: store %struct.S* %a, %struct.S** [[A_ADDR]], align 4
    203   // CHECK-NEXT: store %struct.S* %b, %struct.S** [[B_ADDR]], align 4
    204   // CHECK-NEXT: [[LOAD_A_PTR:%.*]] = load %struct.S*, %struct.S** [[A_ADDR]], align 4
    205   // CHECK-NEXT: [[LOAD_B_PTR:%.*]] = load %struct.S*, %struct.S** [[B_ADDR]], align 4
    206   // CHECK-NEXT: [[COERCED_A_TMP:%.*]] = bitcast %struct.S* [[LOAD_A_PTR]] to i64*
    207   // CHECK-NEXT: [[COERCED_B:%.*]] = bitcast %struct.S* [[LOAD_B_PTR]] to i64*
    208   // CHECK-NEXT: [[COERCED_A:%.*]] = bitcast i64* [[COERCED_A_TMP]] to i8*
    209   // CHECK-NEXT: [[LOAD_B:%.*]] = load i64, i64* [[COERCED_B]], align 4
    210   // CHECK-NEXT: call void @__atomic_store_8(i8* [[COERCED_A]], i64 [[LOAD_B]],
    211   // CHECK-NEXT: ret void
    212   __atomic_store(a, b, memory_order_seq_cst);
    213 }
    214 
    215 void fd3(struct S *a, struct S *b, struct S *c) {
    216   // CHECK-LABEL: @fd3
    217   // CHECK:      [[A_ADDR:%.*]] = alloca %struct.S*, align 4
    218   // CHECK-NEXT: [[B_ADDR:%.*]] = alloca %struct.S*, align 4
    219   // CHECK-NEXT: [[C_ADDR:%.*]] = alloca %struct.S*, align 4
    220   // CHECK-NEXT: store %struct.S* %a, %struct.S** [[A_ADDR]], align 4
    221   // CHECK-NEXT: store %struct.S* %b, %struct.S** [[B_ADDR]], align 4
    222   // CHECK-NEXT: store %struct.S* %c, %struct.S** [[C_ADDR]], align 4
    223   // CHECK-NEXT: [[LOAD_A_PTR:%.*]] = load %struct.S*, %struct.S** [[A_ADDR]], align 4
    224   // CHECK-NEXT: [[LOAD_B_PTR:%.*]] = load %struct.S*, %struct.S** [[B_ADDR]], align 4
    225   // CHECK-NEXT: [[LOAD_C_PTR:%.*]] = load %struct.S*, %struct.S** [[C_ADDR]], align 4
    226   // CHECK-NEXT: [[COERCED_A_TMP:%.*]] = bitcast %struct.S* [[LOAD_A_PTR]] to i64*
    227   // CHECK-NEXT: [[COERCED_B:%.*]] = bitcast %struct.S* [[LOAD_B_PTR]] to i64*
    228   // CHECK-NEXT: [[COERCED_C:%.*]] = bitcast %struct.S* [[LOAD_C_PTR]] to i64*
    229   // CHECK-NEXT: [[COERCED_A:%.*]] = bitcast i64* [[COERCED_A_TMP]] to i8*
    230   // CHECK-NEXT: [[LOAD_B:%.*]] = load i64, i64* [[COERCED_B]], align 4
    231   // CHECK-NEXT: [[CALL:%.*]] = call i64 @__atomic_exchange_8(i8* [[COERCED_A]], i64 [[LOAD_B]],
    232   // CHECK-NEXT: store i64 [[CALL]], i64* [[COERCED_C]], align 4
    233 
    234   __atomic_exchange(a, b, c, memory_order_seq_cst);
    235 }
    236 
    237 _Bool fd4(struct S *a, struct S *b, struct S *c) {
    238   // CHECK-LABEL: @fd4
    239   // CHECK:      [[A_ADDR:%.*]] = alloca %struct.S*, align 4
    240   // CHECK-NEXT: [[B_ADDR:%.*]] = alloca %struct.S*, align 4
    241   // CHECK-NEXT: [[C_ADDR:%.*]] = alloca %struct.S*, align 4
    242   // CHECK:      store %struct.S* %a, %struct.S** [[A_ADDR]], align 4
    243   // CHECK-NEXT: store %struct.S* %b, %struct.S** [[B_ADDR]], align 4
    244   // CHECK-NEXT: store %struct.S* %c, %struct.S** [[C_ADDR]], align 4
    245   // CHECK-NEXT: [[LOAD_A_PTR:%.*]] = load %struct.S*, %struct.S** [[A_ADDR]], align 4
    246   // CHECK-NEXT: [[LOAD_B_PTR:%.*]] = load %struct.S*, %struct.S** [[B_ADDR]], align 4
    247   // CHECK-NEXT: [[LOAD_C_PTR:%.*]] = load %struct.S*, %struct.S** [[C_ADDR]], align 4
    248   // CHECK-NEXT: [[COERCED_A_TMP:%.*]] = bitcast %struct.S* [[LOAD_A_PTR]] to i64*
    249   // CHECK-NEXT: [[COERCED_B_TMP:%.*]] = bitcast %struct.S* [[LOAD_B_PTR]] to i64*
    250   // CHECK-NEXT: [[COERCED_C:%.*]] = bitcast %struct.S* [[LOAD_C_PTR]] to i64*
    251   // CHECK-NEXT: [[COERCED_A:%.*]] = bitcast i64* [[COERCED_A_TMP]] to i8*
    252   // CHECK-NEXT: [[COERCED_B:%.*]] = bitcast i64* [[COERCED_B_TMP]] to i8*
    253   // CHECK-NEXT: [[LOAD_C:%.*]] = load i64, i64* [[COERCED_C]], align 4
    254   // CHECK-NEXT: [[CALL:%.*]] = call zeroext i1 @__atomic_compare_exchange_8(i8* [[COERCED_A]], i8* [[COERCED_B]], i64 [[LOAD_C]]
    255   // CHECK-NEXT: ret i1 [[CALL]]
    256   return __atomic_compare_exchange(a, b, c, 1, 5, 5);
    257 }
    258 
    259 int* fp1(_Atomic(int*) *p) {
    260   // CHECK-LABEL: @fp1
    261   // CHECK: load atomic i32, i32* {{.*}} seq_cst
    262   return __c11_atomic_load(p, memory_order_seq_cst);
    263 }
    264 
    265 int* fp2(_Atomic(int*) *p) {
    266   // CHECK-LABEL: @fp2
    267   // CHECK: store i32 4
    268   // CHECK: atomicrmw add {{.*}} monotonic
    269   return __c11_atomic_fetch_add(p, 1, memory_order_relaxed);
    270 }
    271 
    272 int *fp2a(int **p) {
    273   // CHECK-LABEL: @fp2a
    274   // CHECK: store i32 4
    275   // CHECK: atomicrmw sub {{.*}} monotonic
    276   // Note, the GNU builtins do not multiply by sizeof(T)!
    277   return __atomic_fetch_sub(p, 4, memory_order_relaxed);
    278 }
    279 
    280 _Complex float fc(_Atomic(_Complex float) *c) {
    281   // CHECK-LABEL: @fc
    282   // CHECK: atomicrmw xchg i64*
    283   return __c11_atomic_exchange(c, 2, memory_order_seq_cst);
    284 }
    285 
    286 typedef struct X { int x; } X;
    287 X fs(_Atomic(X) *c) {
    288   // CHECK-LABEL: @fs
    289   // CHECK: atomicrmw xchg i32*
    290   return __c11_atomic_exchange(c, (X){2}, memory_order_seq_cst);
    291 }
    292 
    293 X fsa(X *c, X *d) {
    294   // CHECK-LABEL: @fsa
    295   // CHECK: atomicrmw xchg i32*
    296   X ret;
    297   __atomic_exchange(c, d, &ret, memory_order_seq_cst);
    298   return ret;
    299 }
    300 
    301 _Bool fsb(_Bool *c) {
    302   // CHECK-LABEL: @fsb
    303   // CHECK: atomicrmw xchg i8*
    304   return __atomic_exchange_n(c, 1, memory_order_seq_cst);
    305 }
    306 
    307 char flag1;
    308 volatile char flag2;
    309 void test_and_set() {
    310   // CHECK: atomicrmw xchg i8* @flag1, i8 1 seq_cst
    311   __atomic_test_and_set(&flag1, memory_order_seq_cst);
    312   // CHECK: atomicrmw volatile xchg i8* @flag2, i8 1 acquire
    313   __atomic_test_and_set(&flag2, memory_order_acquire);
    314   // CHECK: store atomic volatile i8 0, i8* @flag2 release
    315   __atomic_clear(&flag2, memory_order_release);
    316   // CHECK: store atomic i8 0, i8* @flag1 seq_cst
    317   __atomic_clear(&flag1, memory_order_seq_cst);
    318 }
    319 
    320 struct Sixteen {
    321   char c[16];
    322 } sixteen;
    323 struct Seventeen {
    324   char c[17];
    325 } seventeen;
    326 
    327 struct Incomplete;
    328 
    329 int lock_free(struct Incomplete *incomplete) {
    330   // CHECK-LABEL: @lock_free
    331 
    332   // CHECK: call i32 @__atomic_is_lock_free(i32 3, i8* null)
    333   __c11_atomic_is_lock_free(3);
    334 
    335   // CHECK: call i32 @__atomic_is_lock_free(i32 16, i8* {{.*}}@sixteen{{.*}})
    336   __atomic_is_lock_free(16, &sixteen);
    337 
    338   // CHECK: call i32 @__atomic_is_lock_free(i32 17, i8* {{.*}}@seventeen{{.*}})
    339   __atomic_is_lock_free(17, &seventeen);
    340 
    341   // CHECK: call i32 @__atomic_is_lock_free(i32 4, {{.*}})
    342   __atomic_is_lock_free(4, incomplete);
    343 
    344   char cs[20];
    345   // CHECK: call i32 @__atomic_is_lock_free(i32 4, {{.*}})
    346   __atomic_is_lock_free(4, cs+1);
    347 
    348   // CHECK-NOT: call
    349   __atomic_always_lock_free(3, 0);
    350   __atomic_always_lock_free(16, 0);
    351   __atomic_always_lock_free(17, 0);
    352   __atomic_always_lock_free(16, &sixteen);
    353   __atomic_always_lock_free(17, &seventeen);
    354 
    355   int n;
    356   __atomic_is_lock_free(4, &n);
    357 
    358   // CHECK: ret i32 1
    359   return __c11_atomic_is_lock_free(sizeof(_Atomic(int)));
    360 }
    361 
    362 // Tests for atomic operations on big values.  These should call the functions
    363 // defined here:
    364 // http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary#The_Library_interface
    365 
    366 struct foo {
    367   int big[128];
    368 };
    369 struct bar {
    370   char c[3];
    371 };
    372 
    373 struct bar smallThing, thing1, thing2;
    374 struct foo bigThing;
    375 _Atomic(struct foo) bigAtomic;
    376 
    377 void structAtomicStore() {
    378   // CHECK-LABEL: @structAtomicStore
    379   struct foo f = {0};
    380   struct bar b = {0};
    381   __atomic_store(&smallThing, &b, 5);
    382   // CHECK: call void @__atomic_store(i32 3, i8* {{.*}} @smallThing
    383 
    384   __atomic_store(&bigThing, &f, 5);
    385   // CHECK: call void @__atomic_store(i32 512, i8* {{.*}} @bigThing
    386 }
    387 void structAtomicLoad() {
    388   // CHECK-LABEL: @structAtomicLoad
    389   struct bar b;
    390   __atomic_load(&smallThing, &b, 5);
    391   // CHECK: call void @__atomic_load(i32 3, i8* {{.*}} @smallThing
    392 
    393   struct foo f = {0};
    394   __atomic_load(&bigThing, &f, 5);
    395   // CHECK: call void @__atomic_load(i32 512, i8* {{.*}} @bigThing
    396 }
    397 struct foo structAtomicExchange() {
    398   // CHECK-LABEL: @structAtomicExchange
    399   struct foo f = {0};
    400   struct foo old;
    401   __atomic_exchange(&f, &bigThing, &old, 5);
    402   // CHECK: call void @__atomic_exchange(i32 512, {{.*}}, i8* bitcast ({{.*}} @bigThing to i8*),
    403 
    404   return __c11_atomic_exchange(&bigAtomic, f, 5);
    405   // CHECK: call void @__atomic_exchange(i32 512, i8* bitcast ({{.*}} @bigAtomic to i8*),
    406 }
    407 int structAtomicCmpExchange() {
    408   // CHECK-LABEL: @structAtomicCmpExchange
    409   // CHECK: %[[x_mem:.*]] = alloca i8
    410   _Bool x = __atomic_compare_exchange(&smallThing, &thing1, &thing2, 1, 5, 5);
    411   // CHECK: %[[call1:.*]] = call zeroext i1 @__atomic_compare_exchange(i32 3, {{.*}} @smallThing{{.*}} @thing1{{.*}} @thing2
    412   // CHECK: %[[zext1:.*]] = zext i1 %[[call1]] to i8
    413   // CHECK: store i8 %[[zext1]], i8* %[[x_mem]], align 1
    414   // CHECK: %[[x:.*]] = load i8, i8* %[[x_mem]]
    415   // CHECK: %[[x_bool:.*]] = trunc i8 %[[x]] to i1
    416   // CHECK: %[[conv1:.*]] = zext i1 %[[x_bool]] to i32
    417 
    418   struct foo f = {0};
    419   struct foo g = {0};
    420   g.big[12] = 12;
    421   return x & __c11_atomic_compare_exchange_strong(&bigAtomic, &f, g, 5, 5);
    422   // CHECK: %[[call2:.*]] = call zeroext i1 @__atomic_compare_exchange(i32 512, i8* bitcast ({{.*}} @bigAtomic to i8*),
    423   // CHECK: %[[conv2:.*]] = zext i1 %[[call2]] to i32
    424   // CHECK: %[[and:.*]] = and i32 %[[conv1]], %[[conv2]]
    425   // CHECK: ret i32 %[[and]]
    426 }
    427 
    428 // Check that no atomic operations are used in any initialisation of _Atomic
    429 // types.
    430 _Atomic(int) atomic_init_i = 42;
    431 
    432 // CHECK-LABEL: @atomic_init_foo
    433 void atomic_init_foo()
    434 {
    435   // CHECK-NOT: }
    436   // CHECK-NOT: atomic
    437   // CHECK: store
    438   _Atomic(int) j = 12;
    439 
    440   // CHECK-NOT: }
    441   // CHECK-NOT: atomic
    442   // CHECK: store
    443   __c11_atomic_init(&j, 42);
    444 
    445   // CHECK-NOT: atomic
    446   // CHECK: }
    447 }
    448 
    449 // CHECK-LABEL: @failureOrder
    450 void failureOrder(_Atomic(int) *ptr, int *ptr2) {
    451   __c11_atomic_compare_exchange_strong(ptr, ptr2, 43, memory_order_acquire, memory_order_relaxed);
    452   // CHECK: cmpxchg i32* {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z_.]+}} acquire monotonic
    453 
    454   __c11_atomic_compare_exchange_weak(ptr, ptr2, 43, memory_order_seq_cst, memory_order_acquire);
    455   // CHECK: cmpxchg weak i32* {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z_.]+}} seq_cst acquire
    456 
    457   // Unknown ordering: conservatively pick strongest valid option (for now!).
    458   __atomic_compare_exchange(ptr2, ptr2, ptr2, 0, memory_order_acq_rel, *ptr2);
    459   // CHECK: cmpxchg i32* {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z_.]+}} acq_rel acquire
    460 
    461   // Undefined behaviour: don't really care what that last ordering is so leave
    462   // it out:
    463   __atomic_compare_exchange_n(ptr2, ptr2, 43, 1, memory_order_seq_cst, 42);
    464   // CHECK: cmpxchg weak i32* {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z_.]+}} seq_cst
    465 }
    466 
    467 // CHECK-LABEL: @generalFailureOrder
    468 void generalFailureOrder(_Atomic(int) *ptr, int *ptr2, int success, int fail) {
    469   __c11_atomic_compare_exchange_strong(ptr, ptr2, 42, success, fail);
    470   // CHECK: switch i32 {{.*}}, label %[[MONOTONIC:[0-9a-zA-Z._]+]] [
    471   // CHECK-NEXT: i32 1, label %[[ACQUIRE:[0-9a-zA-Z._]+]]
    472   // CHECK-NEXT: i32 2, label %[[ACQUIRE]]
    473   // CHECK-NEXT: i32 3, label %[[RELEASE:[0-9a-zA-Z._]+]]
    474   // CHECK-NEXT: i32 4, label %[[ACQREL:[0-9a-zA-Z._]+]]
    475   // CHECK-NEXT: i32 5, label %[[SEQCST:[0-9a-zA-Z._]+]]
    476 
    477   // CHECK: [[MONOTONIC]]
    478   // CHECK: switch {{.*}}, label %[[MONOTONIC_MONOTONIC:[0-9a-zA-Z._]+]] [
    479   // CHECK-NEXT: ]
    480 
    481   // CHECK: [[ACQUIRE]]
    482   // CHECK: switch {{.*}}, label %[[ACQUIRE_MONOTONIC:[0-9a-zA-Z._]+]] [
    483   // CHECK-NEXT: i32 1, label %[[ACQUIRE_ACQUIRE:[0-9a-zA-Z._]+]]
    484   // CHECK-NEXT: i32 2, label %[[ACQUIRE_ACQUIRE:[0-9a-zA-Z._]+]]
    485   // CHECK-NEXT: ]
    486 
    487   // CHECK: [[RELEASE]]
    488   // CHECK: switch {{.*}}, label %[[RELEASE_MONOTONIC:[0-9a-zA-Z._]+]] [
    489   // CHECK-NEXT: ]
    490 
    491   // CHECK: [[ACQREL]]
    492   // CHECK: switch {{.*}}, label %[[ACQREL_MONOTONIC:[0-9a-zA-Z._]+]] [
    493   // CHECK-NEXT: i32 1, label %[[ACQREL_ACQUIRE:[0-9a-zA-Z._]+]]
    494   // CHECK-NEXT: i32 2, label %[[ACQREL_ACQUIRE:[0-9a-zA-Z._]+]]
    495   // CHECK-NEXT: ]
    496 
    497   // CHECK: [[SEQCST]]
    498   // CHECK: switch {{.*}}, label %[[SEQCST_MONOTONIC:[0-9a-zA-Z._]+]] [
    499   // CHECK-NEXT: i32 1, label %[[SEQCST_ACQUIRE:[0-9a-zA-Z._]+]]
    500   // CHECK-NEXT: i32 2, label %[[SEQCST_ACQUIRE:[0-9a-zA-Z._]+]]
    501   // CHECK-NEXT: i32 5, label %[[SEQCST_SEQCST:[0-9a-zA-Z._]+]]
    502   // CHECK-NEXT: ]
    503 
    504   // CHECK: [[MONOTONIC_MONOTONIC]]
    505   // CHECK: cmpxchg {{.*}} monotonic monotonic
    506   // CHECK: br
    507 
    508   // CHECK: [[ACQUIRE_MONOTONIC]]
    509   // CHECK: cmpxchg {{.*}} acquire monotonic
    510   // CHECK: br
    511 
    512   // CHECK: [[ACQUIRE_ACQUIRE]]
    513   // CHECK: cmpxchg {{.*}} acquire acquire
    514   // CHECK: br
    515 
    516   // CHECK: [[ACQREL_MONOTONIC]]
    517   // CHECK: cmpxchg {{.*}} acq_rel monotonic
    518   // CHECK: br
    519 
    520   // CHECK: [[ACQREL_ACQUIRE]]
    521   // CHECK: cmpxchg {{.*}} acq_rel acquire
    522   // CHECK: br
    523 
    524   // CHECK: [[SEQCST_MONOTONIC]]
    525   // CHECK: cmpxchg {{.*}} seq_cst monotonic
    526   // CHECK: br
    527 
    528   // CHECK: [[SEQCST_ACQUIRE]]
    529   // CHECK: cmpxchg {{.*}} seq_cst acquire
    530   // CHECK: br
    531 
    532   // CHECK: [[SEQCST_SEQCST]]
    533   // CHECK: cmpxchg {{.*}} seq_cst seq_cst
    534   // CHECK: br
    535 }
    536 
    537 void generalWeakness(int *ptr, int *ptr2, _Bool weak) {
    538   __atomic_compare_exchange_n(ptr, ptr2, 42, weak, memory_order_seq_cst, memory_order_seq_cst);
    539   // CHECK: switch i1 {{.*}}, label %[[WEAK:[0-9a-zA-Z._]+]] [
    540   // CHECK-NEXT: i1 false, label %[[STRONG:[0-9a-zA-Z._]+]]
    541 
    542   // CHECK: [[STRONG]]
    543   // CHECK-NOT: br
    544   // CHECK: cmpxchg {{.*}} seq_cst seq_cst
    545   // CHECK: br
    546 
    547   // CHECK: [[WEAK]]
    548   // CHECK-NOT: br
    549   // CHECK: cmpxchg weak {{.*}} seq_cst seq_cst
    550   // CHECK: br
    551 }
    552 
    553 // Having checked the flow in the previous two cases, we'll trust clang to
    554 // combine them sanely.
    555 void EMIT_ALL_THE_THINGS(int *ptr, int *ptr2, int new, _Bool weak, int success, int fail) {
    556   __atomic_compare_exchange(ptr, ptr2, &new, weak, success, fail);
    557 
    558   // CHECK: = cmpxchg {{.*}} monotonic monotonic
    559   // CHECK: = cmpxchg weak {{.*}} monotonic monotonic
    560   // CHECK: = cmpxchg {{.*}} acquire monotonic
    561   // CHECK: = cmpxchg {{.*}} acquire acquire
    562   // CHECK: = cmpxchg weak {{.*}} acquire monotonic
    563   // CHECK: = cmpxchg weak {{.*}} acquire acquire
    564   // CHECK: = cmpxchg {{.*}} release monotonic
    565   // CHECK: = cmpxchg weak {{.*}} release monotonic
    566   // CHECK: = cmpxchg {{.*}} acq_rel monotonic
    567   // CHECK: = cmpxchg {{.*}} acq_rel acquire
    568   // CHECK: = cmpxchg weak {{.*}} acq_rel monotonic
    569   // CHECK: = cmpxchg weak {{.*}} acq_rel acquire
    570   // CHECK: = cmpxchg {{.*}} seq_cst monotonic
    571   // CHECK: = cmpxchg {{.*}} seq_cst acquire
    572   // CHECK: = cmpxchg {{.*}} seq_cst seq_cst
    573   // CHECK: = cmpxchg weak {{.*}} seq_cst monotonic
    574   // CHECK: = cmpxchg weak {{.*}} seq_cst acquire
    575   // CHECK: = cmpxchg weak {{.*}} seq_cst seq_cst
    576 }
    577 
    578 int PR21643() {
    579   return __atomic_or_fetch((int __attribute__((address_space(257))) *)0x308, 1,
    580                            __ATOMIC_RELAXED);
    581   // CHECK: %[[atomictmp:.*]] = alloca i32, align 4
    582   // CHECK: %[[atomicdst:.*]] = alloca i32, align 4
    583   // CHECK: store i32 1, i32* %[[atomictmp]]
    584   // CHECK: %[[one:.*]] = load i32, i32* %[[atomictmp]], align 4
    585   // CHECK: %[[old:.*]] = atomicrmw or i32 addrspace(257)* inttoptr (i32 776 to i32 addrspace(257)*), i32 %[[one]] monotonic
    586   // CHECK: %[[new:.*]] = or i32 %[[old]], %[[one]]
    587   // CHECK: store i32 %[[new]], i32* %[[atomicdst]], align 4
    588   // CHECK: %[[ret:.*]] = load i32, i32* %[[atomicdst]], align 4
    589   // CHECK: ret i32 %[[ret]]
    590 }
    591 
    592 int PR17306_1(volatile _Atomic(int) *i) {
    593   // CHECK-LABEL: @PR17306_1
    594   // CHECK:      %[[i_addr:.*]] = alloca i32
    595   // CHECK-NEXT: %[[atomicdst:.*]] = alloca i32
    596   // CHECK-NEXT: store i32* %i, i32** %[[i_addr]]
    597   // CHECK-NEXT: %[[addr:.*]] = load i32*, i32** %[[i_addr]]
    598   // CHECK-NEXT: %[[res:.*]] = load atomic volatile i32, i32* %[[addr]] seq_cst
    599   // CHECK-NEXT: store i32 %[[res]], i32* %[[atomicdst]]
    600   // CHECK-NEXT: %[[retval:.*]] = load i32, i32* %[[atomicdst]]
    601   // CHECK-NEXT: ret i32 %[[retval]]
    602   return __c11_atomic_load(i, memory_order_seq_cst);
    603 }
    604 
    605 int PR17306_2(volatile int *i, int value) {
    606   // CHECK-LABEL: @PR17306_2
    607   // CHECK:      %[[i_addr:.*]] = alloca i32*
    608   // CHECK-NEXT: %[[value_addr:.*]] = alloca i32
    609   // CHECK-NEXT: %[[atomictmp:.*]] = alloca i32
    610   // CHECK-NEXT: %[[atomicdst:.*]] = alloca i32
    611   // CHECK-NEXT: store i32* %i, i32** %[[i_addr]]
    612   // CHECK-NEXT: store i32 %value, i32* %[[value_addr]]
    613   // CHECK-NEXT: %[[i_lval:.*]] = load i32*, i32** %[[i_addr]]
    614   // CHECK-NEXT: %[[value:.*]] = load i32, i32* %[[value_addr]]
    615   // CHECK-NEXT: store i32 %[[value]], i32* %[[atomictmp]]
    616   // CHECK-NEXT: %[[value_lval:.*]] = load i32, i32* %[[atomictmp]]
    617   // CHECK-NEXT: %[[old_val:.*]] = atomicrmw volatile add i32* %[[i_lval]], i32 %[[value_lval]] seq_cst
    618   // CHECK-NEXT: %[[new_val:.*]] = add i32 %[[old_val]], %[[value_lval]]
    619   // CHECK-NEXT: store i32 %[[new_val]], i32* %[[atomicdst]]
    620   // CHECK-NEXT: %[[retval:.*]] = load i32, i32* %[[atomicdst]]
    621   // CHECK-NEXT: ret i32 %[[retval]]
    622   return __atomic_add_fetch(i, value, memory_order_seq_cst);
    623 }
    624 
    625 #endif
    626