Home | History | Annotate | Download | only in sanitizer_common
      1 //===-- sanitizer_atomic_msvc.h ---------------------------------*- C++ -*-===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file is a part of ThreadSanitizer/AddressSanitizer runtime.
     11 // Not intended for direct inclusion. Include sanitizer_atomic.h.
     12 //
     13 //===----------------------------------------------------------------------===//
     14 
     15 #ifndef SANITIZER_ATOMIC_MSVC_H
     16 #define SANITIZER_ATOMIC_MSVC_H
     17 
     18 extern "C" void _ReadWriteBarrier();
     19 #pragma intrinsic(_ReadWriteBarrier)
     20 extern "C" void _mm_mfence();
     21 #pragma intrinsic(_mm_mfence)
     22 extern "C" void _mm_pause();
     23 #pragma intrinsic(_mm_pause)
     24 extern "C" long _InterlockedExchangeAdd(  // NOLINT
     25     long volatile * Addend, long Value);  // NOLINT
     26 #pragma intrinsic(_InterlockedExchangeAdd)
     27 extern "C" short _InterlockedCompareExchange16(  // NOLINT
     28     short volatile *Destination,                 // NOLINT
     29     short Exchange, short Comparand);            // NOLINT
     30 #pragma intrinsic(_InterlockedCompareExchange16)
     31 extern "C"
     32 long long _InterlockedCompareExchange64(  // NOLINT
     33     long long volatile *Destination,              // NOLINT
     34     long long Exchange, long long Comparand);     // NOLINT
     35 #pragma intrinsic(_InterlockedCompareExchange64)
     36 extern "C" void *_InterlockedCompareExchangePointer(
     37     void *volatile *Destination,
     38     void *Exchange, void *Comparand);
     39 #pragma intrinsic(_InterlockedCompareExchangePointer)
     40 extern "C"
     41 long __cdecl _InterlockedCompareExchange(  // NOLINT
     42     long volatile *Destination,            // NOLINT
     43     long Exchange, long Comparand);        // NOLINT
     44 #pragma intrinsic(_InterlockedCompareExchange)
     45 
     46 #ifdef _WIN64
     47 extern "C" long long _InterlockedExchangeAdd64(     // NOLINT
     48     long long volatile * Addend, long long Value);  // NOLINT
     49 #pragma intrinsic(_InterlockedExchangeAdd64)
     50 #endif
     51 
     52 namespace __sanitizer {
     53 
     54 INLINE void atomic_signal_fence(memory_order) {
     55   _ReadWriteBarrier();
     56 }
     57 
     58 INLINE void atomic_thread_fence(memory_order) {
     59   _mm_mfence();
     60 }
     61 
     62 INLINE void proc_yield(int cnt) {
     63   for (int i = 0; i < cnt; i++)
     64     _mm_pause();
     65 }
     66 
     67 template<typename T>
     68 INLINE typename T::Type atomic_load(
     69     const volatile T *a, memory_order mo) {
     70   DCHECK(mo & (memory_order_relaxed | memory_order_consume
     71       | memory_order_acquire | memory_order_seq_cst));
     72   DCHECK(!((uptr)a % sizeof(*a)));
     73   typename T::Type v;
     74   // FIXME(dvyukov): 64-bit load is not atomic on 32-bits.
     75   if (mo == memory_order_relaxed) {
     76     v = a->val_dont_use;
     77   } else {
     78     atomic_signal_fence(memory_order_seq_cst);
     79     v = a->val_dont_use;
     80     atomic_signal_fence(memory_order_seq_cst);
     81   }
     82   return v;
     83 }
     84 
     85 template<typename T>
     86 INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
     87   DCHECK(mo & (memory_order_relaxed | memory_order_release
     88       | memory_order_seq_cst));
     89   DCHECK(!((uptr)a % sizeof(*a)));
     90   // FIXME(dvyukov): 64-bit store is not atomic on 32-bits.
     91   if (mo == memory_order_relaxed) {
     92     a->val_dont_use = v;
     93   } else {
     94     atomic_signal_fence(memory_order_seq_cst);
     95     a->val_dont_use = v;
     96     atomic_signal_fence(memory_order_seq_cst);
     97   }
     98   if (mo == memory_order_seq_cst)
     99     atomic_thread_fence(memory_order_seq_cst);
    100 }
    101 
    102 INLINE u32 atomic_fetch_add(volatile atomic_uint32_t *a,
    103     u32 v, memory_order mo) {
    104   (void)mo;
    105   DCHECK(!((uptr)a % sizeof(*a)));
    106   return (u32)_InterlockedExchangeAdd(
    107       (volatile long*)&a->val_dont_use, (long)v);  // NOLINT
    108 }
    109 
    110 INLINE uptr atomic_fetch_add(volatile atomic_uintptr_t *a,
    111     uptr v, memory_order mo) {
    112   (void)mo;
    113   DCHECK(!((uptr)a % sizeof(*a)));
    114 #ifdef _WIN64
    115   return (uptr)_InterlockedExchangeAdd64(
    116       (volatile long long*)&a->val_dont_use, (long long)v);  // NOLINT
    117 #else
    118   return (uptr)_InterlockedExchangeAdd(
    119       (volatile long*)&a->val_dont_use, (long)v);  // NOLINT
    120 #endif
    121 }
    122 
    123 INLINE u32 atomic_fetch_sub(volatile atomic_uint32_t *a,
    124     u32 v, memory_order mo) {
    125   (void)mo;
    126   DCHECK(!((uptr)a % sizeof(*a)));
    127   return (u32)_InterlockedExchangeAdd(
    128       (volatile long*)&a->val_dont_use, -(long)v);  // NOLINT
    129 }
    130 
    131 INLINE uptr atomic_fetch_sub(volatile atomic_uintptr_t *a,
    132     uptr v, memory_order mo) {
    133   (void)mo;
    134   DCHECK(!((uptr)a % sizeof(*a)));
    135 #ifdef _WIN64
    136   return (uptr)_InterlockedExchangeAdd64(
    137       (volatile long long*)&a->val_dont_use, -(long long)v);  // NOLINT
    138 #else
    139   return (uptr)_InterlockedExchangeAdd(
    140       (volatile long*)&a->val_dont_use, -(long)v);  // NOLINT
    141 #endif
    142 }
    143 
    144 INLINE u8 atomic_exchange(volatile atomic_uint8_t *a,
    145     u8 v, memory_order mo) {
    146   (void)mo;
    147   DCHECK(!((uptr)a % sizeof(*a)));
    148   __asm {
    149     mov eax, a
    150     mov cl, v
    151     xchg [eax], cl  // NOLINT
    152     mov v, cl
    153   }
    154   return v;
    155 }
    156 
    157 INLINE u16 atomic_exchange(volatile atomic_uint16_t *a,
    158     u16 v, memory_order mo) {
    159   (void)mo;
    160   DCHECK(!((uptr)a % sizeof(*a)));
    161   __asm {
    162     mov eax, a
    163     mov cx, v
    164     xchg [eax], cx  // NOLINT
    165     mov v, cx
    166   }
    167   return v;
    168 }
    169 
    170 INLINE bool atomic_compare_exchange_strong(volatile atomic_uint8_t *a,
    171                                            u8 *cmp,
    172                                            u8 xchgv,
    173                                            memory_order mo) {
    174   (void)mo;
    175   DCHECK(!((uptr)a % sizeof(*a)));
    176   u8 cmpv = *cmp;
    177   u8 prev;
    178   __asm {
    179     mov al, cmpv
    180     mov ecx, a
    181     mov dl, xchgv
    182     lock cmpxchg [ecx], dl
    183     mov prev, al
    184   }
    185   if (prev == cmpv)
    186     return true;
    187   *cmp = prev;
    188   return false;
    189 }
    190 
    191 INLINE bool atomic_compare_exchange_strong(volatile atomic_uintptr_t *a,
    192                                            uptr *cmp,
    193                                            uptr xchg,
    194                                            memory_order mo) {
    195   uptr cmpv = *cmp;
    196   uptr prev = (uptr)_InterlockedCompareExchangePointer(
    197       (void*volatile*)&a->val_dont_use, (void*)xchg, (void*)cmpv);
    198   if (prev == cmpv)
    199     return true;
    200   *cmp = prev;
    201   return false;
    202 }
    203 
    204 INLINE bool atomic_compare_exchange_strong(volatile atomic_uint16_t *a,
    205                                            u16 *cmp,
    206                                            u16 xchg,
    207                                            memory_order mo) {
    208   u16 cmpv = *cmp;
    209   u16 prev = (u16)_InterlockedCompareExchange16(
    210       (volatile short*)&a->val_dont_use, (short)xchg, (short)cmpv);
    211   if (prev == cmpv)
    212     return true;
    213   *cmp = prev;
    214   return false;
    215 }
    216 
    217 INLINE bool atomic_compare_exchange_strong(volatile atomic_uint32_t *a,
    218                                            u32 *cmp,
    219                                            u32 xchg,
    220                                            memory_order mo) {
    221   u32 cmpv = *cmp;
    222   u32 prev = (u32)_InterlockedCompareExchange(
    223       (volatile long*)&a->val_dont_use, (long)xchg, (long)cmpv);
    224   if (prev == cmpv)
    225     return true;
    226   *cmp = prev;
    227   return false;
    228 }
    229 
    230 INLINE bool atomic_compare_exchange_strong(volatile atomic_uint64_t *a,
    231                                            u64 *cmp,
    232                                            u64 xchg,
    233                                            memory_order mo) {
    234   u64 cmpv = *cmp;
    235   u64 prev = (u64)_InterlockedCompareExchange64(
    236       (volatile long long*)&a->val_dont_use, (long long)xchg, (long long)cmpv);
    237   if (prev == cmpv)
    238     return true;
    239   *cmp = prev;
    240   return false;
    241 }
    242 
    243 template<typename T>
    244 INLINE bool atomic_compare_exchange_weak(volatile T *a,
    245                                          typename T::Type *cmp,
    246                                          typename T::Type xchg,
    247                                          memory_order mo) {
    248   return atomic_compare_exchange_strong(a, cmp, xchg, mo);
    249 }
    250 
    251 }  // namespace __sanitizer
    252 
    253 #endif  // SANITIZER_ATOMIC_CLANG_H
    254