Home | History | Annotate | Download | only in sanitizer_common
      1 //===-- sanitizer_atomic_clang.h --------------------------------*- C++ -*-===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file is a part of ThreadSanitizer/AddressSanitizer runtime.
     11 // Not intended for direct inclusion. Include sanitizer_atomic.h.
     12 //
     13 //===----------------------------------------------------------------------===//
     14 
     15 #ifndef SANITIZER_ATOMIC_CLANG_H
     16 #define SANITIZER_ATOMIC_CLANG_H
     17 
     18 namespace __sanitizer {
     19 
     20 INLINE void atomic_signal_fence(memory_order) {
     21   __asm__ __volatile__("" ::: "memory");
     22 }
     23 
     24 INLINE void atomic_thread_fence(memory_order) {
     25   __sync_synchronize();
     26 }
     27 
     28 INLINE void proc_yield(int cnt) {
     29   __asm__ __volatile__("" ::: "memory");
     30 #if defined(__i386__) || defined(__x86_64__)
     31   for (int i = 0; i < cnt; i++)
     32     __asm__ __volatile__("pause");
     33 #endif
     34   __asm__ __volatile__("" ::: "memory");
     35 }
     36 
     37 template<typename T>
     38 INLINE typename T::Type atomic_load(
     39     const volatile T *a, memory_order mo) {
     40   DCHECK(mo & (memory_order_relaxed | memory_order_consume
     41       | memory_order_acquire | memory_order_seq_cst));
     42   DCHECK(!((uptr)a % sizeof(*a)));
     43   typename T::Type v;
     44   // FIXME(dvyukov): 64-bit load is not atomic on 32-bits.
     45   if (mo == memory_order_relaxed) {
     46     v = a->val_dont_use;
     47   } else {
     48     atomic_signal_fence(memory_order_seq_cst);
     49     v = a->val_dont_use;
     50     atomic_signal_fence(memory_order_seq_cst);
     51   }
     52   return v;
     53 }
     54 
     55 template<typename T>
     56 INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
     57   DCHECK(mo & (memory_order_relaxed | memory_order_release
     58       | memory_order_seq_cst));
     59   DCHECK(!((uptr)a % sizeof(*a)));
     60   // FIXME(dvyukov): 64-bit store is not atomic on 32-bits.
     61   if (mo == memory_order_relaxed) {
     62     a->val_dont_use = v;
     63   } else {
     64     atomic_signal_fence(memory_order_seq_cst);
     65     a->val_dont_use = v;
     66     atomic_signal_fence(memory_order_seq_cst);
     67   }
     68   if (mo == memory_order_seq_cst)
     69     atomic_thread_fence(memory_order_seq_cst);
     70 }
     71 
     72 template<typename T>
     73 INLINE typename T::Type atomic_fetch_add(volatile T *a,
     74     typename T::Type v, memory_order mo) {
     75   (void)mo;
     76   DCHECK(!((uptr)a % sizeof(*a)));
     77   return __sync_fetch_and_add(&a->val_dont_use, v);
     78 }
     79 
     80 template<typename T>
     81 INLINE typename T::Type atomic_fetch_sub(volatile T *a,
     82     typename T::Type v, memory_order mo) {
     83   (void)mo;
     84   DCHECK(!((uptr)a % sizeof(*a)));
     85   return __sync_fetch_and_add(&a->val_dont_use, -v);
     86 }
     87 
     88 template<typename T>
     89 INLINE typename T::Type atomic_exchange(volatile T *a,
     90     typename T::Type v, memory_order mo) {
     91   DCHECK(!((uptr)a % sizeof(*a)));
     92   if (mo & (memory_order_release | memory_order_acq_rel | memory_order_seq_cst))
     93     __sync_synchronize();
     94   v = __sync_lock_test_and_set(&a->val_dont_use, v);
     95   if (mo == memory_order_seq_cst)
     96     __sync_synchronize();
     97   return v;
     98 }
     99 
    100 template<typename T>
    101 INLINE bool atomic_compare_exchange_strong(volatile T *a,
    102                                            typename T::Type *cmp,
    103                                            typename T::Type xchg,
    104                                            memory_order mo) {
    105   typedef typename T::Type Type;
    106   Type cmpv = *cmp;
    107   Type prev = __sync_val_compare_and_swap(&a->val_dont_use, cmpv, xchg);
    108   if (prev == cmpv)
    109     return true;
    110   *cmp = prev;
    111   return false;
    112 }
    113 
    114 template<typename T>
    115 INLINE bool atomic_compare_exchange_weak(volatile T *a,
    116                                            typename T::Type *cmp,
    117                                            typename T::Type xchg,
    118                                            memory_order mo) {
    119   return atomic_compare_exchange_strong(a, cmp, xchg, mo);
    120 }
    121 
    122 }  // namespace __sanitizer
    123 
    124 #endif  // SANITIZER_ATOMIC_CLANG_H
    125