Home | History | Annotate | Download | only in sanitizer_common
      1 //===-- sanitizer_atomic_clang_other.h --------------------------*- C++ -*-===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file is a part of ThreadSanitizer/AddressSanitizer runtime.
     11 // Not intended for direct inclusion. Include sanitizer_atomic.h.
     12 //
     13 //===----------------------------------------------------------------------===//
     14 
     15 #ifndef SANITIZER_ATOMIC_CLANG_OTHER_H
     16 #define SANITIZER_ATOMIC_CLANG_OTHER_H
     17 
     18 namespace __sanitizer {
     19 
     20 INLINE void proc_yield(int cnt) {
     21   __asm__ __volatile__("" ::: "memory");
     22 }
     23 
     24 template<typename T>
     25 INLINE typename T::Type atomic_load(
     26     const volatile T *a, memory_order mo) {
     27   DCHECK(mo & (memory_order_relaxed | memory_order_consume
     28       | memory_order_acquire | memory_order_seq_cst));
     29   DCHECK(!((uptr)a % sizeof(*a)));
     30   typename T::Type v;
     31 
     32   if (sizeof(*a) < 8 || sizeof(void*) == 8) {
     33     // Assume that aligned loads are atomic.
     34     if (mo == memory_order_relaxed) {
     35       v = a->val_dont_use;
     36     } else if (mo == memory_order_consume) {
     37       // Assume that processor respects data dependencies
     38       // (and that compiler won't break them).
     39       __asm__ __volatile__("" ::: "memory");
     40       v = a->val_dont_use;
     41       __asm__ __volatile__("" ::: "memory");
     42     } else if (mo == memory_order_acquire) {
     43       __asm__ __volatile__("" ::: "memory");
     44       v = a->val_dont_use;
     45       __sync_synchronize();
     46     } else {  // seq_cst
     47       // E.g. on POWER we need a hw fence even before the store.
     48       __sync_synchronize();
     49       v = a->val_dont_use;
     50       __sync_synchronize();
     51     }
     52   } else {
     53     // 64-bit load on 32-bit platform.
     54     // Gross, but simple and reliable.
     55     // Assume that it is not in read-only memory.
     56     v = __sync_fetch_and_add(
     57         const_cast<typename T::Type volatile *>(&a->val_dont_use), 0);
     58   }
     59   return v;
     60 }
     61 
     62 template<typename T>
     63 INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
     64   DCHECK(mo & (memory_order_relaxed | memory_order_release
     65       | memory_order_seq_cst));
     66   DCHECK(!((uptr)a % sizeof(*a)));
     67 
     68   if (sizeof(*a) < 8 || sizeof(void*) == 8) {
     69     // Assume that aligned loads are atomic.
     70     if (mo == memory_order_relaxed) {
     71       a->val_dont_use = v;
     72     } else if (mo == memory_order_release) {
     73       __sync_synchronize();
     74       a->val_dont_use = v;
     75       __asm__ __volatile__("" ::: "memory");
     76     } else {  // seq_cst
     77       __sync_synchronize();
     78       a->val_dont_use = v;
     79       __sync_synchronize();
     80     }
     81   } else {
     82     // 64-bit store on 32-bit platform.
     83     // Gross, but simple and reliable.
     84     typename T::Type cmp = a->val_dont_use;
     85     typename T::Type cur;
     86     for (;;) {
     87       cur = __sync_val_compare_and_swap(&a->val_dont_use, cmp, v);
     88       if (cmp == v)
     89         break;
     90       cmp = cur;
     91     }
     92   }
     93 }
     94 
     95 }  // namespace __sanitizer
     96 
     97 #endif  // #ifndef SANITIZER_ATOMIC_CLANG_OTHER_H
     98