Home | History | Annotate | Download | only in ThreadSanitizer
      1 ; RUN: opt < %s -tsan -S | FileCheck %s
      2 ; Check that atomic memory operations are converted to calls into ThreadSanitizer runtime.
      3 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
      4 
      5 define i8 @atomic8_load_unordered(i8* %a) nounwind uwtable {
      6 entry:
      7   %0 = load atomic i8* %a unordered, align 1
      8   ret i8 %0
      9 }
     10 ; CHECK: atomic8_load_unordered
     11 ; CHECK: call i8 @__tsan_atomic8_load(i8* %a, i32 1)
     12 
     13 define i8 @atomic8_load_monotonic(i8* %a) nounwind uwtable {
     14 entry:
     15   %0 = load atomic i8* %a monotonic, align 1
     16   ret i8 %0
     17 }
     18 ; CHECK: atomic8_load_monotonic
     19 ; CHECK: call i8 @__tsan_atomic8_load(i8* %a, i32 1)
     20 
     21 define i8 @atomic8_load_acquire(i8* %a) nounwind uwtable {
     22 entry:
     23   %0 = load atomic i8* %a acquire, align 1
     24   ret i8 %0
     25 }
     26 ; CHECK: atomic8_load_acquire
     27 ; CHECK: call i8 @__tsan_atomic8_load(i8* %a, i32 4)
     28 
     29 define i8 @atomic8_load_seq_cst(i8* %a) nounwind uwtable {
     30 entry:
     31   %0 = load atomic i8* %a seq_cst, align 1
     32   ret i8 %0
     33 }
     34 ; CHECK: atomic8_load_seq_cst
     35 ; CHECK: call i8 @__tsan_atomic8_load(i8* %a, i32 32)
     36 
     37 define void @atomic8_store_unordered(i8* %a) nounwind uwtable {
     38 entry:
     39   store atomic i8 0, i8* %a unordered, align 1
     40   ret void
     41 }
     42 ; CHECK: atomic8_store_unordered
     43 ; CHECK: call void @__tsan_atomic8_store(i8* %a, i8 0, i32 1)
     44 
     45 define void @atomic8_store_monotonic(i8* %a) nounwind uwtable {
     46 entry:
     47   store atomic i8 0, i8* %a monotonic, align 1
     48   ret void
     49 }
     50 ; CHECK: atomic8_store_monotonic
     51 ; CHECK: call void @__tsan_atomic8_store(i8* %a, i8 0, i32 1)
     52 
     53 define void @atomic8_store_release(i8* %a) nounwind uwtable {
     54 entry:
     55   store atomic i8 0, i8* %a release, align 1
     56   ret void
     57 }
     58 ; CHECK: atomic8_store_release
     59 ; CHECK: call void @__tsan_atomic8_store(i8* %a, i8 0, i32 8)
     60 
     61 define void @atomic8_store_seq_cst(i8* %a) nounwind uwtable {
     62 entry:
     63   store atomic i8 0, i8* %a seq_cst, align 1
     64   ret void
     65 }
     66 ; CHECK: atomic8_store_seq_cst
     67 ; CHECK: call void @__tsan_atomic8_store(i8* %a, i8 0, i32 32)
     68 
     69 define i16 @atomic16_load_unordered(i16* %a) nounwind uwtable {
     70 entry:
     71   %0 = load atomic i16* %a unordered, align 2
     72   ret i16 %0
     73 }
     74 ; CHECK: atomic16_load_unordered
     75 ; CHECK: call i16 @__tsan_atomic16_load(i16* %a, i32 1)
     76 
     77 define i16 @atomic16_load_monotonic(i16* %a) nounwind uwtable {
     78 entry:
     79   %0 = load atomic i16* %a monotonic, align 2
     80   ret i16 %0
     81 }
     82 ; CHECK: atomic16_load_monotonic
     83 ; CHECK: call i16 @__tsan_atomic16_load(i16* %a, i32 1)
     84 
     85 define i16 @atomic16_load_acquire(i16* %a) nounwind uwtable {
     86 entry:
     87   %0 = load atomic i16* %a acquire, align 2
     88   ret i16 %0
     89 }
     90 ; CHECK: atomic16_load_acquire
     91 ; CHECK: call i16 @__tsan_atomic16_load(i16* %a, i32 4)
     92 
     93 define i16 @atomic16_load_seq_cst(i16* %a) nounwind uwtable {
     94 entry:
     95   %0 = load atomic i16* %a seq_cst, align 2
     96   ret i16 %0
     97 }
     98 ; CHECK: atomic16_load_seq_cst
     99 ; CHECK: call i16 @__tsan_atomic16_load(i16* %a, i32 32)
    100 
    101 define void @atomic16_store_unordered(i16* %a) nounwind uwtable {
    102 entry:
    103   store atomic i16 0, i16* %a unordered, align 2
    104   ret void
    105 }
    106 ; CHECK: atomic16_store_unordered
    107 ; CHECK: call void @__tsan_atomic16_store(i16* %a, i16 0, i32 1)
    108 
    109 define void @atomic16_store_monotonic(i16* %a) nounwind uwtable {
    110 entry:
    111   store atomic i16 0, i16* %a monotonic, align 2
    112   ret void
    113 }
    114 ; CHECK: atomic16_store_monotonic
    115 ; CHECK: call void @__tsan_atomic16_store(i16* %a, i16 0, i32 1)
    116 
    117 define void @atomic16_store_release(i16* %a) nounwind uwtable {
    118 entry:
    119   store atomic i16 0, i16* %a release, align 2
    120   ret void
    121 }
    122 ; CHECK: atomic16_store_release
    123 ; CHECK: call void @__tsan_atomic16_store(i16* %a, i16 0, i32 8)
    124 
    125 define void @atomic16_store_seq_cst(i16* %a) nounwind uwtable {
    126 entry:
    127   store atomic i16 0, i16* %a seq_cst, align 2
    128   ret void
    129 }
    130 ; CHECK: atomic16_store_seq_cst
    131 ; CHECK: call void @__tsan_atomic16_store(i16* %a, i16 0, i32 32)
    132 
    133 define i32 @atomic32_load_unordered(i32* %a) nounwind uwtable {
    134 entry:
    135   %0 = load atomic i32* %a unordered, align 4
    136   ret i32 %0
    137 }
    138 ; CHECK: atomic32_load_unordered
    139 ; CHECK: call i32 @__tsan_atomic32_load(i32* %a, i32 1)
    140 
    141 define i32 @atomic32_load_monotonic(i32* %a) nounwind uwtable {
    142 entry:
    143   %0 = load atomic i32* %a monotonic, align 4
    144   ret i32 %0
    145 }
    146 ; CHECK: atomic32_load_monotonic
    147 ; CHECK: call i32 @__tsan_atomic32_load(i32* %a, i32 1)
    148 
    149 define i32 @atomic32_load_acquire(i32* %a) nounwind uwtable {
    150 entry:
    151   %0 = load atomic i32* %a acquire, align 4
    152   ret i32 %0
    153 }
    154 ; CHECK: atomic32_load_acquire
    155 ; CHECK: call i32 @__tsan_atomic32_load(i32* %a, i32 4)
    156 
    157 define i32 @atomic32_load_seq_cst(i32* %a) nounwind uwtable {
    158 entry:
    159   %0 = load atomic i32* %a seq_cst, align 4
    160   ret i32 %0
    161 }
    162 ; CHECK: atomic32_load_seq_cst
    163 ; CHECK: call i32 @__tsan_atomic32_load(i32* %a, i32 32)
    164 
    165 define void @atomic32_store_unordered(i32* %a) nounwind uwtable {
    166 entry:
    167   store atomic i32 0, i32* %a unordered, align 4
    168   ret void
    169 }
    170 ; CHECK: atomic32_store_unordered
    171 ; CHECK: call void @__tsan_atomic32_store(i32* %a, i32 0, i32 1)
    172 
    173 define void @atomic32_store_monotonic(i32* %a) nounwind uwtable {
    174 entry:
    175   store atomic i32 0, i32* %a monotonic, align 4
    176   ret void
    177 }
    178 ; CHECK: atomic32_store_monotonic
    179 ; CHECK: call void @__tsan_atomic32_store(i32* %a, i32 0, i32 1)
    180 
    181 define void @atomic32_store_release(i32* %a) nounwind uwtable {
    182 entry:
    183   store atomic i32 0, i32* %a release, align 4
    184   ret void
    185 }
    186 ; CHECK: atomic32_store_release
    187 ; CHECK: call void @__tsan_atomic32_store(i32* %a, i32 0, i32 8)
    188 
    189 define void @atomic32_store_seq_cst(i32* %a) nounwind uwtable {
    190 entry:
    191   store atomic i32 0, i32* %a seq_cst, align 4
    192   ret void
    193 }
    194 ; CHECK: atomic32_store_seq_cst
    195 ; CHECK: call void @__tsan_atomic32_store(i32* %a, i32 0, i32 32)
    196 
    197 define i64 @atomic64_load_unordered(i64* %a) nounwind uwtable {
    198 entry:
    199   %0 = load atomic i64* %a unordered, align 8
    200   ret i64 %0
    201 }
    202 ; CHECK: atomic64_load_unordered
    203 ; CHECK: call i64 @__tsan_atomic64_load(i64* %a, i32 1)
    204 
    205 define i64 @atomic64_load_monotonic(i64* %a) nounwind uwtable {
    206 entry:
    207   %0 = load atomic i64* %a monotonic, align 8
    208   ret i64 %0
    209 }
    210 ; CHECK: atomic64_load_monotonic
    211 ; CHECK: call i64 @__tsan_atomic64_load(i64* %a, i32 1)
    212 
    213 define i64 @atomic64_load_acquire(i64* %a) nounwind uwtable {
    214 entry:
    215   %0 = load atomic i64* %a acquire, align 8
    216   ret i64 %0
    217 }
    218 ; CHECK: atomic64_load_acquire
    219 ; CHECK: call i64 @__tsan_atomic64_load(i64* %a, i32 4)
    220 
    221 define i64 @atomic64_load_seq_cst(i64* %a) nounwind uwtable {
    222 entry:
    223   %0 = load atomic i64* %a seq_cst, align 8
    224   ret i64 %0
    225 }
    226 ; CHECK: atomic64_load_seq_cst
    227 ; CHECK: call i64 @__tsan_atomic64_load(i64* %a, i32 32)
    228 
    229 define void @atomic64_store_unordered(i64* %a) nounwind uwtable {
    230 entry:
    231   store atomic i64 0, i64* %a unordered, align 8
    232   ret void
    233 }
    234 ; CHECK: atomic64_store_unordered
    235 ; CHECK: call void @__tsan_atomic64_store(i64* %a, i64 0, i32 1)
    236 
    237 define void @atomic64_store_monotonic(i64* %a) nounwind uwtable {
    238 entry:
    239   store atomic i64 0, i64* %a monotonic, align 8
    240   ret void
    241 }
    242 ; CHECK: atomic64_store_monotonic
    243 ; CHECK: call void @__tsan_atomic64_store(i64* %a, i64 0, i32 1)
    244 
    245 define void @atomic64_store_release(i64* %a) nounwind uwtable {
    246 entry:
    247   store atomic i64 0, i64* %a release, align 8
    248   ret void
    249 }
    250 ; CHECK: atomic64_store_release
    251 ; CHECK: call void @__tsan_atomic64_store(i64* %a, i64 0, i32 8)
    252 
    253 define void @atomic64_store_seq_cst(i64* %a) nounwind uwtable {
    254 entry:
    255   store atomic i64 0, i64* %a seq_cst, align 8
    256   ret void
    257 }
    258 ; CHECK: atomic64_store_seq_cst
    259 ; CHECK: call void @__tsan_atomic64_store(i64* %a, i64 0, i32 32)
    260 
    261 define i128 @atomic128_load_unordered(i128* %a) nounwind uwtable {
    262 entry:
    263   %0 = load atomic i128* %a unordered, align 16
    264   ret i128 %0
    265 }
    266 ; CHECK: atomic128_load_unordered
    267 ; CHECK: call i128 @__tsan_atomic128_load(i128* %a, i32 1)
    268 
    269 define i128 @atomic128_load_monotonic(i128* %a) nounwind uwtable {
    270 entry:
    271   %0 = load atomic i128* %a monotonic, align 16
    272   ret i128 %0
    273 }
    274 ; CHECK: atomic128_load_monotonic
    275 ; CHECK: call i128 @__tsan_atomic128_load(i128* %a, i32 1)
    276 
    277 define i128 @atomic128_load_acquire(i128* %a) nounwind uwtable {
    278 entry:
    279   %0 = load atomic i128* %a acquire, align 16
    280   ret i128 %0
    281 }
    282 ; CHECK: atomic128_load_acquire
    283 ; CHECK: call i128 @__tsan_atomic128_load(i128* %a, i32 4)
    284 
    285 define i128 @atomic128_load_seq_cst(i128* %a) nounwind uwtable {
    286 entry:
    287   %0 = load atomic i128* %a seq_cst, align 16
    288   ret i128 %0
    289 }
    290 ; CHECK: atomic128_load_seq_cst
    291 ; CHECK: call i128 @__tsan_atomic128_load(i128* %a, i32 32)
    292 
    293 define void @atomic128_store_unordered(i128* %a) nounwind uwtable {
    294 entry:
    295   store atomic i128 0, i128* %a unordered, align 16
    296   ret void
    297 }
    298 ; CHECK: atomic128_store_unordered
    299 ; CHECK: call void @__tsan_atomic128_store(i128* %a, i128 0, i32 1)
    300 
    301 define void @atomic128_store_monotonic(i128* %a) nounwind uwtable {
    302 entry:
    303   store atomic i128 0, i128* %a monotonic, align 16
    304   ret void
    305 }
    306 ; CHECK: atomic128_store_monotonic
    307 ; CHECK: call void @__tsan_atomic128_store(i128* %a, i128 0, i32 1)
    308 
    309 define void @atomic128_store_release(i128* %a) nounwind uwtable {
    310 entry:
    311   store atomic i128 0, i128* %a release, align 16
    312   ret void
    313 }
    314 ; CHECK: atomic128_store_release
    315 ; CHECK: call void @__tsan_atomic128_store(i128* %a, i128 0, i32 8)
    316 
    317 define void @atomic128_store_seq_cst(i128* %a) nounwind uwtable {
    318 entry:
    319   store atomic i128 0, i128* %a seq_cst, align 16
    320   ret void
    321 }
    322 ; CHECK: atomic128_store_seq_cst
    323 ; CHECK: call void @__tsan_atomic128_store(i128* %a, i128 0, i32 32)
    324