Home | History | Annotate | Download | only in CodeGen
      1 // RUN: %clang_cc1 %s -emit-llvm -o - -triple=x86_64-apple-macosx10.9.0 | FileCheck %s
      2 // REQUIRES: x86-registered-target
      3 // Also test serialization of atomic operations here, to avoid duplicating the
      4 // test.
      5 // RUN: %clang_cc1 %s -emit-pch -o %t -triple=x86_64-apple-macosx10.9.0
      6 // RUN: %clang_cc1 %s -include-pch %t -triple=x86_64-apple-macosx10.9.0 -emit-llvm -o - | FileCheck %s
      7 #ifndef ALREADY_INCLUDED
      8 #define ALREADY_INCLUDED
      9 
     10 // Basic IRGen tests for __c11_atomic_* and GNU __atomic_*
     11 
     12 typedef enum memory_order {
     13   memory_order_relaxed, memory_order_consume, memory_order_acquire,
     14   memory_order_release, memory_order_acq_rel, memory_order_seq_cst
     15 } memory_order;
     16 
     17 int fi1(_Atomic(int) *i) {
     18   // CHECK: @fi1
     19   // CHECK: load atomic i32, i32* {{.*}} seq_cst
     20   return __c11_atomic_load(i, memory_order_seq_cst);
     21 }
     22 
     23 int fi1a(int *i) {
     24   // CHECK: @fi1a
     25   // CHECK: load atomic i32, i32* {{.*}} seq_cst
     26   int v;
     27   __atomic_load(i, &v, memory_order_seq_cst);
     28   return v;
     29 }
     30 
     31 int fi1b(int *i) {
     32   // CHECK: @fi1b
     33   // CHECK: load atomic i32, i32* {{.*}} seq_cst
     34   return __atomic_load_n(i, memory_order_seq_cst);
     35 }
     36 
     37 void fi2(_Atomic(int) *i) {
     38   // CHECK: @fi2
     39   // CHECK: store atomic i32 {{.*}} seq_cst
     40   __c11_atomic_store(i, 1, memory_order_seq_cst);
     41 }
     42 
     43 void fi2a(int *i) {
     44   // CHECK: @fi2a
     45   // CHECK: store atomic i32 {{.*}} seq_cst
     46   int v = 1;
     47   __atomic_store(i, &v, memory_order_seq_cst);
     48 }
     49 
     50 void fi2b(int *i) {
     51   // CHECK: @fi2b
     52   // CHECK: store atomic i32 {{.*}} seq_cst
     53   __atomic_store_n(i, 1, memory_order_seq_cst);
     54 }
     55 
     56 int fi3(_Atomic(int) *i) {
     57   // CHECK: @fi3
     58   // CHECK: atomicrmw and
     59   // CHECK-NOT: and
     60   return __c11_atomic_fetch_and(i, 1, memory_order_seq_cst);
     61 }
     62 
     63 int fi3a(int *i) {
     64   // CHECK: @fi3a
     65   // CHECK: atomicrmw xor
     66   // CHECK-NOT: xor
     67   return __atomic_fetch_xor(i, 1, memory_order_seq_cst);
     68 }
     69 
     70 int fi3b(int *i) {
     71   // CHECK: @fi3b
     72   // CHECK: atomicrmw add
     73   // CHECK: add
     74   return __atomic_add_fetch(i, 1, memory_order_seq_cst);
     75 }
     76 
     77 int fi3c(int *i) {
     78   // CHECK: @fi3c
     79   // CHECK: atomicrmw nand
     80   // CHECK-NOT: and
     81   return __atomic_fetch_nand(i, 1, memory_order_seq_cst);
     82 }
     83 
     84 int fi3d(int *i) {
     85   // CHECK: @fi3d
     86   // CHECK: atomicrmw nand
     87   // CHECK: and
     88   // CHECK: xor
     89   return __atomic_nand_fetch(i, 1, memory_order_seq_cst);
     90 }
     91 
     92 _Bool fi4(_Atomic(int) *i) {
     93   // CHECK: @fi4
     94   // CHECK: cmpxchg i32*
     95   int cmp = 0;
     96   return __c11_atomic_compare_exchange_strong(i, &cmp, 1, memory_order_acquire, memory_order_acquire);
     97 }
     98 
     99 _Bool fi4a(int *i) {
    100   // CHECK: @fi4
    101   // CHECK: cmpxchg i32*
    102   int cmp = 0;
    103   int desired = 1;
    104   return __atomic_compare_exchange(i, &cmp, &desired, 0, memory_order_acquire, memory_order_acquire);
    105 }
    106 
    107 _Bool fi4b(int *i) {
    108   // CHECK: @fi4
    109   // CHECK: cmpxchg weak i32*
    110   int cmp = 0;
    111   return __atomic_compare_exchange_n(i, &cmp, 1, 1, memory_order_acquire, memory_order_acquire);
    112 }
    113 
    114 float ff1(_Atomic(float) *d) {
    115   // CHECK: @ff1
    116   // CHECK: load atomic i32, i32* {{.*}} monotonic
    117   return __c11_atomic_load(d, memory_order_relaxed);
    118 }
    119 
    120 void ff2(_Atomic(float) *d) {
    121   // CHECK: @ff2
    122   // CHECK: store atomic i32 {{.*}} release
    123   __c11_atomic_store(d, 1, memory_order_release);
    124 }
    125 
    126 float ff3(_Atomic(float) *d) {
    127   return __c11_atomic_exchange(d, 2, memory_order_seq_cst);
    128 }
    129 
    130 int* fp1(_Atomic(int*) *p) {
    131   // CHECK: @fp1
    132   // CHECK: load atomic i64, i64* {{.*}} seq_cst
    133   return __c11_atomic_load(p, memory_order_seq_cst);
    134 }
    135 
    136 int* fp2(_Atomic(int*) *p) {
    137   // CHECK: @fp2
    138   // CHECK: store i64 4
    139   // CHECK: atomicrmw add {{.*}} monotonic
    140   return __c11_atomic_fetch_add(p, 1, memory_order_relaxed);
    141 }
    142 
    143 int *fp2a(int **p) {
    144   // CHECK: @fp2a
    145   // CHECK: store i64 4
    146   // CHECK: atomicrmw sub {{.*}} monotonic
    147   // Note, the GNU builtins do not multiply by sizeof(T)!
    148   return __atomic_fetch_sub(p, 4, memory_order_relaxed);
    149 }
    150 
    151 _Complex float fc(_Atomic(_Complex float) *c) {
    152   // CHECK: @fc
    153   // CHECK: atomicrmw xchg i64*
    154   return __c11_atomic_exchange(c, 2, memory_order_seq_cst);
    155 }
    156 
    157 typedef struct X { int x; } X;
    158 X fs(_Atomic(X) *c) {
    159   // CHECK: @fs
    160   // CHECK: atomicrmw xchg i32*
    161   return __c11_atomic_exchange(c, (X){2}, memory_order_seq_cst);
    162 }
    163 
    164 X fsa(X *c, X *d) {
    165   // CHECK: @fsa
    166   // CHECK: atomicrmw xchg i32*
    167   X ret;
    168   __atomic_exchange(c, d, &ret, memory_order_seq_cst);
    169   return ret;
    170 }
    171 
    172 _Bool fsb(_Bool *c) {
    173   // CHECK: @fsb
    174   // CHECK: atomicrmw xchg i8*
    175   return __atomic_exchange_n(c, 1, memory_order_seq_cst);
    176 }
    177 
    178 char flag1;
    179 volatile char flag2;
    180 void test_and_set() {
    181   // CHECK: atomicrmw xchg i8* @flag1, i8 1 seq_cst
    182   __atomic_test_and_set(&flag1, memory_order_seq_cst);
    183   // CHECK: atomicrmw volatile xchg i8* @flag2, i8 1 acquire
    184   __atomic_test_and_set(&flag2, memory_order_acquire);
    185   // CHECK: store atomic volatile i8 0, i8* @flag2 release
    186   __atomic_clear(&flag2, memory_order_release);
    187   // CHECK: store atomic i8 0, i8* @flag1 seq_cst
    188   __atomic_clear(&flag1, memory_order_seq_cst);
    189 }
    190 
    191 struct Sixteen {
    192   char c[16];
    193 } sixteen;
    194 struct Seventeen {
    195   char c[17];
    196 } seventeen;
    197 
    198 int lock_free(struct Incomplete *incomplete) {
    199   // CHECK: @lock_free
    200 
    201   // CHECK: call i32 @__atomic_is_lock_free(i64 3, i8* null)
    202   __c11_atomic_is_lock_free(3);
    203 
    204   // CHECK: call i32 @__atomic_is_lock_free(i64 16, i8* {{.*}}@sixteen{{.*}})
    205   __atomic_is_lock_free(16, &sixteen);
    206 
    207   // CHECK: call i32 @__atomic_is_lock_free(i64 17, i8* {{.*}}@seventeen{{.*}})
    208   __atomic_is_lock_free(17, &seventeen);
    209 
    210   // CHECK: call i32 @__atomic_is_lock_free(i64 4, {{.*}})
    211   __atomic_is_lock_free(4, incomplete);
    212 
    213   char cs[20];
    214   // CHECK: call i32 @__atomic_is_lock_free(i64 4, {{.*}})
    215   __atomic_is_lock_free(4, cs+1);
    216 
    217   // CHECK-NOT: call
    218   __atomic_always_lock_free(3, 0);
    219   __atomic_always_lock_free(16, 0);
    220   __atomic_always_lock_free(17, 0);
    221   __atomic_always_lock_free(16, &sixteen);
    222   __atomic_always_lock_free(17, &seventeen);
    223 
    224   int n;
    225   __atomic_is_lock_free(4, &n);
    226 
    227   // CHECK: ret i32 1
    228   return __c11_atomic_is_lock_free(sizeof(_Atomic(int)));
    229 }
    230 
    231 // Tests for atomic operations on big values.  These should call the functions
    232 // defined here:
    233 // http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary#The_Library_interface
    234 
    235 struct foo {
    236   int big[128];
    237 };
    238 struct bar {
    239   char c[3];
    240 };
    241 
    242 struct bar smallThing, thing1, thing2;
    243 struct foo bigThing;
    244 _Atomic(struct foo) bigAtomic;
    245 
    246 void structAtomicStore() {
    247   // CHECK: @structAtomicStore
    248   struct foo f = {0};
    249   __c11_atomic_store(&bigAtomic, f, 5);
    250   // CHECK: call void @__atomic_store(i64 512, i8* bitcast ({{.*}} @bigAtomic to i8*),
    251 
    252   struct bar b = {0};
    253   __atomic_store(&smallThing, &b, 5);
    254   // CHECK: call void @__atomic_store(i64 3, i8* {{.*}} @smallThing
    255 
    256   __atomic_store(&bigThing, &f, 5);
    257   // CHECK: call void @__atomic_store(i64 512, i8* {{.*}} @bigThing
    258 }
    259 void structAtomicLoad() {
    260   // CHECK: @structAtomicLoad
    261   struct foo f = __c11_atomic_load(&bigAtomic, 5);
    262   // CHECK: call void @__atomic_load(i64 512, i8* bitcast ({{.*}} @bigAtomic to i8*),
    263 
    264   struct bar b;
    265   __atomic_load(&smallThing, &b, 5);
    266   // CHECK: call void @__atomic_load(i64 3, i8* {{.*}} @smallThing
    267 
    268   __atomic_load(&bigThing, &f, 5);
    269   // CHECK: call void @__atomic_load(i64 512, i8* {{.*}} @bigThing
    270 }
    271 struct foo structAtomicExchange() {
    272   // CHECK: @structAtomicExchange
    273   struct foo f = {0};
    274   struct foo old;
    275   __atomic_exchange(&f, &bigThing, &old, 5);
    276   // CHECK: call void @__atomic_exchange(i64 512, {{.*}}, i8* bitcast ({{.*}} @bigThing to i8*),
    277 
    278   return __c11_atomic_exchange(&bigAtomic, f, 5);
    279   // CHECK: call void @__atomic_exchange(i64 512, i8* bitcast ({{.*}} @bigAtomic to i8*),
    280 }
    281 int structAtomicCmpExchange() {
    282   // CHECK: @structAtomicCmpExchange
    283   _Bool x = __atomic_compare_exchange(&smallThing, &thing1, &thing2, 1, 5, 5);
    284   // CHECK: call zeroext i1 @__atomic_compare_exchange(i64 3, {{.*}} @smallThing{{.*}} @thing1{{.*}} @thing2
    285 
    286   struct foo f = {0};
    287   struct foo g = {0};
    288   g.big[12] = 12;
    289   return x & __c11_atomic_compare_exchange_strong(&bigAtomic, &f, g, 5, 5);
    290   // CHECK: call zeroext i1 @__atomic_compare_exchange(i64 512, i8* bitcast ({{.*}} @bigAtomic to i8*),
    291 }
    292 
    293 // Check that no atomic operations are used in any initialisation of _Atomic
    294 // types.
    295 _Atomic(int) atomic_init_i = 42;
    296 
    297 // CHECK: @atomic_init_foo
    298 void atomic_init_foo()
    299 {
    300   // CHECK-NOT: }
    301   // CHECK-NOT: atomic
    302   // CHECK: store
    303   _Atomic(int) j = 12;
    304 
    305   // CHECK-NOT: }
    306   // CHECK-NOT: atomic
    307   // CHECK: store
    308   __c11_atomic_init(&j, 42);
    309 
    310   // CHECK-NOT: atomic
    311   // CHECK: }
    312 }
    313 
    314 #endif
    315