Home | History | Annotate | Download | only in rtl
      1 //===-- tsan_interface_atomic.cc ------------------------------------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file is a part of ThreadSanitizer (TSan), a race detector.
     11 //
     12 //===----------------------------------------------------------------------===//
     13 
     14 // ThreadSanitizer atomic operations are based on C++11/C1x standards.
     15 // For background see C++11 standard.  A slightly older, publicly
     16 // available draft of the standard (not entirely up-to-date, but close enough
     17 // for casual browsing) is available here:
     18 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
     19 // The following page contains more background information:
     20 // http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
     21 
     22 #include "sanitizer_common/sanitizer_placement_new.h"
     23 #include "sanitizer_common/sanitizer_stacktrace.h"
     24 #include "sanitizer_common/sanitizer_mutex.h"
     25 #include "tsan_flags.h"
     26 #include "tsan_interface.h"
     27 #include "tsan_rtl.h"
     28 
     29 using namespace __tsan;  // NOLINT
     30 
     31 #if !defined(SANITIZER_GO) && __TSAN_HAS_INT128
     32 // Protects emulation of 128-bit atomic operations.
     33 static StaticSpinMutex mutex128;
     34 #endif
     35 
     36 static bool IsLoadOrder(morder mo) {
     37   return mo == mo_relaxed || mo == mo_consume
     38       || mo == mo_acquire || mo == mo_seq_cst;
     39 }
     40 
     41 static bool IsStoreOrder(morder mo) {
     42   return mo == mo_relaxed || mo == mo_release || mo == mo_seq_cst;
     43 }
     44 
     45 static bool IsReleaseOrder(morder mo) {
     46   return mo == mo_release || mo == mo_acq_rel || mo == mo_seq_cst;
     47 }
     48 
     49 static bool IsAcquireOrder(morder mo) {
     50   return mo == mo_consume || mo == mo_acquire
     51       || mo == mo_acq_rel || mo == mo_seq_cst;
     52 }
     53 
     54 static bool IsAcqRelOrder(morder mo) {
     55   return mo == mo_acq_rel || mo == mo_seq_cst;
     56 }
     57 
     58 template<typename T> T func_xchg(volatile T *v, T op) {
     59   T res = __sync_lock_test_and_set(v, op);
     60   // __sync_lock_test_and_set does not contain full barrier.
     61   __sync_synchronize();
     62   return res;
     63 }
     64 
     65 template<typename T> T func_add(volatile T *v, T op) {
     66   return __sync_fetch_and_add(v, op);
     67 }
     68 
     69 template<typename T> T func_sub(volatile T *v, T op) {
     70   return __sync_fetch_and_sub(v, op);
     71 }
     72 
     73 template<typename T> T func_and(volatile T *v, T op) {
     74   return __sync_fetch_and_and(v, op);
     75 }
     76 
     77 template<typename T> T func_or(volatile T *v, T op) {
     78   return __sync_fetch_and_or(v, op);
     79 }
     80 
     81 template<typename T> T func_xor(volatile T *v, T op) {
     82   return __sync_fetch_and_xor(v, op);
     83 }
     84 
     85 template<typename T> T func_nand(volatile T *v, T op) {
     86   // clang does not support __sync_fetch_and_nand.
     87   T cmp = *v;
     88   for (;;) {
     89     T newv = ~(cmp & op);
     90     T cur = __sync_val_compare_and_swap(v, cmp, newv);
     91     if (cmp == cur)
     92       return cmp;
     93     cmp = cur;
     94   }
     95 }
     96 
     97 template<typename T> T func_cas(volatile T *v, T cmp, T xch) {
     98   return __sync_val_compare_and_swap(v, cmp, xch);
     99 }
    100 
    101 // clang does not support 128-bit atomic ops.
    102 // Atomic ops are executed under tsan internal mutex,
    103 // here we assume that the atomic variables are not accessed
    104 // from non-instrumented code.
    105 #if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) && !defined(SANITIZER_GO) \
    106     && __TSAN_HAS_INT128
    107 a128 func_xchg(volatile a128 *v, a128 op) {
    108   SpinMutexLock lock(&mutex128);
    109   a128 cmp = *v;
    110   *v = op;
    111   return cmp;
    112 }
    113 
    114 a128 func_add(volatile a128 *v, a128 op) {
    115   SpinMutexLock lock(&mutex128);
    116   a128 cmp = *v;
    117   *v = cmp + op;
    118   return cmp;
    119 }
    120 
    121 a128 func_sub(volatile a128 *v, a128 op) {
    122   SpinMutexLock lock(&mutex128);
    123   a128 cmp = *v;
    124   *v = cmp - op;
    125   return cmp;
    126 }
    127 
    128 a128 func_and(volatile a128 *v, a128 op) {
    129   SpinMutexLock lock(&mutex128);
    130   a128 cmp = *v;
    131   *v = cmp & op;
    132   return cmp;
    133 }
    134 
    135 a128 func_or(volatile a128 *v, a128 op) {
    136   SpinMutexLock lock(&mutex128);
    137   a128 cmp = *v;
    138   *v = cmp | op;
    139   return cmp;
    140 }
    141 
    142 a128 func_xor(volatile a128 *v, a128 op) {
    143   SpinMutexLock lock(&mutex128);
    144   a128 cmp = *v;
    145   *v = cmp ^ op;
    146   return cmp;
    147 }
    148 
    149 a128 func_nand(volatile a128 *v, a128 op) {
    150   SpinMutexLock lock(&mutex128);
    151   a128 cmp = *v;
    152   *v = ~(cmp & op);
    153   return cmp;
    154 }
    155 
    156 a128 func_cas(volatile a128 *v, a128 cmp, a128 xch) {
    157   SpinMutexLock lock(&mutex128);
    158   a128 cur = *v;
    159   if (cur == cmp)
    160     *v = xch;
    161   return cur;
    162 }
    163 #endif
    164 
    165 template<typename T>
    166 static int SizeLog() {
    167   if (sizeof(T) <= 1)
    168     return kSizeLog1;
    169   else if (sizeof(T) <= 2)
    170     return kSizeLog2;
    171   else if (sizeof(T) <= 4)
    172     return kSizeLog4;
    173   else
    174     return kSizeLog8;
    175   // For 16-byte atomics we also use 8-byte memory access,
    176   // this leads to false negatives only in very obscure cases.
    177 }
    178 
    179 #ifndef SANITIZER_GO
    180 static atomic_uint8_t *to_atomic(const volatile a8 *a) {
    181   return reinterpret_cast<atomic_uint8_t *>(const_cast<a8 *>(a));
    182 }
    183 
    184 static atomic_uint16_t *to_atomic(const volatile a16 *a) {
    185   return reinterpret_cast<atomic_uint16_t *>(const_cast<a16 *>(a));
    186 }
    187 #endif
    188 
    189 static atomic_uint32_t *to_atomic(const volatile a32 *a) {
    190   return reinterpret_cast<atomic_uint32_t *>(const_cast<a32 *>(a));
    191 }
    192 
    193 static atomic_uint64_t *to_atomic(const volatile a64 *a) {
    194   return reinterpret_cast<atomic_uint64_t *>(const_cast<a64 *>(a));
    195 }
    196 
    197 static memory_order to_mo(morder mo) {
    198   switch (mo) {
    199   case mo_relaxed: return memory_order_relaxed;
    200   case mo_consume: return memory_order_consume;
    201   case mo_acquire: return memory_order_acquire;
    202   case mo_release: return memory_order_release;
    203   case mo_acq_rel: return memory_order_acq_rel;
    204   case mo_seq_cst: return memory_order_seq_cst;
    205   }
    206   CHECK(0);
    207   return memory_order_seq_cst;
    208 }
    209 
    210 template<typename T>
    211 static T NoTsanAtomicLoad(const volatile T *a, morder mo) {
    212   return atomic_load(to_atomic(a), to_mo(mo));
    213 }
    214 
    215 #if __TSAN_HAS_INT128 && !defined(SANITIZER_GO)
    216 static a128 NoTsanAtomicLoad(const volatile a128 *a, morder mo) {
    217   SpinMutexLock lock(&mutex128);
    218   return *a;
    219 }
    220 #endif
    221 
    222 template<typename T>
    223 static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a,
    224     morder mo) {
    225   CHECK(IsLoadOrder(mo));
    226   // This fast-path is critical for performance.
    227   // Assume the access is atomic.
    228   if (!IsAcquireOrder(mo)) {
    229     MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
    230     return NoTsanAtomicLoad(a, mo);
    231   }
    232   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, false);
    233   AcquireImpl(thr, pc, &s->clock);
    234   T v = NoTsanAtomicLoad(a, mo);
    235   s->mtx.ReadUnlock();
    236   MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
    237   return v;
    238 }
    239 
    240 template<typename T>
    241 static void NoTsanAtomicStore(volatile T *a, T v, morder mo) {
    242   atomic_store(to_atomic(a), v, to_mo(mo));
    243 }
    244 
    245 #if __TSAN_HAS_INT128 && !defined(SANITIZER_GO)
    246 static void NoTsanAtomicStore(volatile a128 *a, a128 v, morder mo) {
    247   SpinMutexLock lock(&mutex128);
    248   *a = v;
    249 }
    250 #endif
    251 
    252 template<typename T>
    253 static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
    254     morder mo) {
    255   CHECK(IsStoreOrder(mo));
    256   MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
    257   // This fast-path is critical for performance.
    258   // Assume the access is atomic.
    259   // Strictly saying even relaxed store cuts off release sequence,
    260   // so must reset the clock.
    261   if (!IsReleaseOrder(mo)) {
    262     NoTsanAtomicStore(a, v, mo);
    263     return;
    264   }
    265   __sync_synchronize();
    266   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, true);
    267   thr->fast_state.IncrementEpoch();
    268   // Can't increment epoch w/o writing to the trace as well.
    269   TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
    270   ReleaseImpl(thr, pc, &s->clock);
    271   NoTsanAtomicStore(a, v, mo);
    272   s->mtx.Unlock();
    273 }
    274 
    275 template<typename T, T (*F)(volatile T *v, T op)>
    276 static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
    277   MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
    278   SyncVar *s = 0;
    279   if (mo != mo_relaxed) {
    280     s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, true);
    281     thr->fast_state.IncrementEpoch();
    282     // Can't increment epoch w/o writing to the trace as well.
    283     TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
    284     if (IsAcqRelOrder(mo))
    285       AcquireReleaseImpl(thr, pc, &s->clock);
    286     else if (IsReleaseOrder(mo))
    287       ReleaseImpl(thr, pc, &s->clock);
    288     else if (IsAcquireOrder(mo))
    289       AcquireImpl(thr, pc, &s->clock);
    290   }
    291   v = F(a, v);
    292   if (s)
    293     s->mtx.Unlock();
    294   return v;
    295 }
    296 
    297 template<typename T>
    298 static T NoTsanAtomicExchange(volatile T *a, T v, morder mo) {
    299   return func_xchg(a, v);
    300 }
    301 
    302 template<typename T>
    303 static T NoTsanAtomicFetchAdd(volatile T *a, T v, morder mo) {
    304   return func_add(a, v);
    305 }
    306 
    307 template<typename T>
    308 static T NoTsanAtomicFetchSub(volatile T *a, T v, morder mo) {
    309   return func_sub(a, v);
    310 }
    311 
    312 template<typename T>
    313 static T NoTsanAtomicFetchAnd(volatile T *a, T v, morder mo) {
    314   return func_and(a, v);
    315 }
    316 
    317 template<typename T>
    318 static T NoTsanAtomicFetchOr(volatile T *a, T v, morder mo) {
    319   return func_or(a, v);
    320 }
    321 
    322 template<typename T>
    323 static T NoTsanAtomicFetchXor(volatile T *a, T v, morder mo) {
    324   return func_xor(a, v);
    325 }
    326 
    327 template<typename T>
    328 static T NoTsanAtomicFetchNand(volatile T *a, T v, morder mo) {
    329   return func_nand(a, v);
    330 }
    331 
    332 template<typename T>
    333 static T AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v,
    334     morder mo) {
    335   return AtomicRMW<T, func_xchg>(thr, pc, a, v, mo);
    336 }
    337 
    338 template<typename T>
    339 static T AtomicFetchAdd(ThreadState *thr, uptr pc, volatile T *a, T v,
    340     morder mo) {
    341   return AtomicRMW<T, func_add>(thr, pc, a, v, mo);
    342 }
    343 
    344 template<typename T>
    345 static T AtomicFetchSub(ThreadState *thr, uptr pc, volatile T *a, T v,
    346     morder mo) {
    347   return AtomicRMW<T, func_sub>(thr, pc, a, v, mo);
    348 }
    349 
    350 template<typename T>
    351 static T AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v,
    352     morder mo) {
    353   return AtomicRMW<T, func_and>(thr, pc, a, v, mo);
    354 }
    355 
    356 template<typename T>
    357 static T AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v,
    358     morder mo) {
    359   return AtomicRMW<T, func_or>(thr, pc, a, v, mo);
    360 }
    361 
    362 template<typename T>
    363 static T AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v,
    364     morder mo) {
    365   return AtomicRMW<T, func_xor>(thr, pc, a, v, mo);
    366 }
    367 
    368 template<typename T>
    369 static T AtomicFetchNand(ThreadState *thr, uptr pc, volatile T *a, T v,
    370     morder mo) {
    371   return AtomicRMW<T, func_nand>(thr, pc, a, v, mo);
    372 }
    373 
    374 template<typename T>
    375 static bool NoTsanAtomicCAS(volatile T *a, T *c, T v, morder mo, morder fmo) {
    376   return atomic_compare_exchange_strong(to_atomic(a), c, v, to_mo(mo));
    377 }
    378 
    379 #if __TSAN_HAS_INT128
    380 static bool NoTsanAtomicCAS(volatile a128 *a, a128 *c, a128 v,
    381     morder mo, morder fmo) {
    382   a128 old = *c;
    383   a128 cur = func_cas(a, old, v);
    384   if (cur == old)
    385     return true;
    386   *c = cur;
    387   return false;
    388 }
    389 #endif
    390 
    391 template<typename T>
    392 static T NoTsanAtomicCAS(volatile T *a, T c, T v, morder mo, morder fmo) {
    393   NoTsanAtomicCAS(a, &c, v, mo, fmo);
    394   return c;
    395 }
    396 
    397 template<typename T>
    398 static bool AtomicCAS(ThreadState *thr, uptr pc,
    399     volatile T *a, T *c, T v, morder mo, morder fmo) {
    400   (void)fmo;  // Unused because llvm does not pass it yet.
    401   MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
    402   SyncVar *s = 0;
    403   bool write_lock = mo != mo_acquire && mo != mo_consume;
    404   if (mo != mo_relaxed) {
    405     s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, write_lock);
    406     thr->fast_state.IncrementEpoch();
    407     // Can't increment epoch w/o writing to the trace as well.
    408     TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
    409     if (IsAcqRelOrder(mo))
    410       AcquireReleaseImpl(thr, pc, &s->clock);
    411     else if (IsReleaseOrder(mo))
    412       ReleaseImpl(thr, pc, &s->clock);
    413     else if (IsAcquireOrder(mo))
    414       AcquireImpl(thr, pc, &s->clock);
    415   }
    416   T cc = *c;
    417   T pr = func_cas(a, cc, v);
    418   if (s) {
    419     if (write_lock)
    420       s->mtx.Unlock();
    421     else
    422       s->mtx.ReadUnlock();
    423   }
    424   if (pr == cc)
    425     return true;
    426   *c = pr;
    427   return false;
    428 }
    429 
    430 template<typename T>
    431 static T AtomicCAS(ThreadState *thr, uptr pc,
    432     volatile T *a, T c, T v, morder mo, morder fmo) {
    433   AtomicCAS(thr, pc, a, &c, v, mo, fmo);
    434   return c;
    435 }
    436 
    437 #ifndef SANITIZER_GO
    438 static void NoTsanAtomicFence(morder mo) {
    439   __sync_synchronize();
    440 }
    441 
    442 static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
    443   // FIXME(dvyukov): not implemented.
    444   __sync_synchronize();
    445 }
    446 #endif
    447 
    448 // Interface functions follow.
    449 #ifndef SANITIZER_GO
    450 
    451 // C/C++
    452 
    453 #define SCOPED_ATOMIC(func, ...) \
    454     const uptr callpc = (uptr)__builtin_return_address(0); \
    455     uptr pc = StackTrace::GetCurrentPc(); \
    456     mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \
    457     ThreadState *const thr = cur_thread(); \
    458     if (thr->ignore_interceptors) \
    459       return NoTsanAtomic##func(__VA_ARGS__); \
    460     AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \
    461     ScopedAtomic sa(thr, callpc, a, mo, __func__); \
    462     return Atomic##func(thr, pc, __VA_ARGS__); \
    463 /**/
    464 
    465 class ScopedAtomic {
    466  public:
    467   ScopedAtomic(ThreadState *thr, uptr pc, const volatile void *a,
    468                morder mo, const char *func)
    469       : thr_(thr) {
    470     FuncEntry(thr_, pc);
    471     DPrintf("#%d: %s(%p, %d)\n", thr_->tid, func, a, mo);
    472   }
    473   ~ScopedAtomic() {
    474     ProcessPendingSignals(thr_);
    475     FuncExit(thr_);
    476   }
    477  private:
    478   ThreadState *thr_;
    479 };
    480 
    481 static void AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) {
    482   StatInc(thr, StatAtomic);
    483   StatInc(thr, t);
    484   StatInc(thr, size == 1 ? StatAtomic1
    485              : size == 2 ? StatAtomic2
    486              : size == 4 ? StatAtomic4
    487              : size == 8 ? StatAtomic8
    488              :             StatAtomic16);
    489   StatInc(thr, mo == mo_relaxed ? StatAtomicRelaxed
    490              : mo == mo_consume ? StatAtomicConsume
    491              : mo == mo_acquire ? StatAtomicAcquire
    492              : mo == mo_release ? StatAtomicRelease
    493              : mo == mo_acq_rel ? StatAtomicAcq_Rel
    494              :                    StatAtomicSeq_Cst);
    495 }
    496 
    497 extern "C" {
    498 SANITIZER_INTERFACE_ATTRIBUTE
    499 a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) {
    500   SCOPED_ATOMIC(Load, a, mo);
    501 }
    502 
    503 SANITIZER_INTERFACE_ATTRIBUTE
    504 a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) {
    505   SCOPED_ATOMIC(Load, a, mo);
    506 }
    507 
    508 SANITIZER_INTERFACE_ATTRIBUTE
    509 a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) {
    510   SCOPED_ATOMIC(Load, a, mo);
    511 }
    512 
    513 SANITIZER_INTERFACE_ATTRIBUTE
    514 a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
    515   SCOPED_ATOMIC(Load, a, mo);
    516 }
    517 
    518 #if __TSAN_HAS_INT128
    519 SANITIZER_INTERFACE_ATTRIBUTE
    520 a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) {
    521   SCOPED_ATOMIC(Load, a, mo);
    522 }
    523 #endif
    524 
    525 SANITIZER_INTERFACE_ATTRIBUTE
    526 void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
    527   SCOPED_ATOMIC(Store, a, v, mo);
    528 }
    529 
    530 SANITIZER_INTERFACE_ATTRIBUTE
    531 void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) {
    532   SCOPED_ATOMIC(Store, a, v, mo);
    533 }
    534 
    535 SANITIZER_INTERFACE_ATTRIBUTE
    536 void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) {
    537   SCOPED_ATOMIC(Store, a, v, mo);
    538 }
    539 
    540 SANITIZER_INTERFACE_ATTRIBUTE
    541 void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
    542   SCOPED_ATOMIC(Store, a, v, mo);
    543 }
    544 
    545 #if __TSAN_HAS_INT128
    546 SANITIZER_INTERFACE_ATTRIBUTE
    547 void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) {
    548   SCOPED_ATOMIC(Store, a, v, mo);
    549 }
    550 #endif
    551 
    552 SANITIZER_INTERFACE_ATTRIBUTE
    553 a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
    554   SCOPED_ATOMIC(Exchange, a, v, mo);
    555 }
    556 
    557 SANITIZER_INTERFACE_ATTRIBUTE
    558 a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) {
    559   SCOPED_ATOMIC(Exchange, a, v, mo);
    560 }
    561 
    562 SANITIZER_INTERFACE_ATTRIBUTE
    563 a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) {
    564   SCOPED_ATOMIC(Exchange, a, v, mo);
    565 }
    566 
    567 SANITIZER_INTERFACE_ATTRIBUTE
    568 a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
    569   SCOPED_ATOMIC(Exchange, a, v, mo);
    570 }
    571 
    572 #if __TSAN_HAS_INT128
    573 SANITIZER_INTERFACE_ATTRIBUTE
    574 a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) {
    575   SCOPED_ATOMIC(Exchange, a, v, mo);
    576 }
    577 #endif
    578 
    579 SANITIZER_INTERFACE_ATTRIBUTE
    580 a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
    581   SCOPED_ATOMIC(FetchAdd, a, v, mo);
    582 }
    583 
    584 SANITIZER_INTERFACE_ATTRIBUTE
    585 a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) {
    586   SCOPED_ATOMIC(FetchAdd, a, v, mo);
    587 }
    588 
    589 SANITIZER_INTERFACE_ATTRIBUTE
    590 a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) {
    591   SCOPED_ATOMIC(FetchAdd, a, v, mo);
    592 }
    593 
    594 SANITIZER_INTERFACE_ATTRIBUTE
    595 a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
    596   SCOPED_ATOMIC(FetchAdd, a, v, mo);
    597 }
    598 
    599 #if __TSAN_HAS_INT128
    600 SANITIZER_INTERFACE_ATTRIBUTE
    601 a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) {
    602   SCOPED_ATOMIC(FetchAdd, a, v, mo);
    603 }
    604 #endif
    605 
    606 SANITIZER_INTERFACE_ATTRIBUTE
    607 a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) {
    608   SCOPED_ATOMIC(FetchSub, a, v, mo);
    609 }
    610 
    611 SANITIZER_INTERFACE_ATTRIBUTE
    612 a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) {
    613   SCOPED_ATOMIC(FetchSub, a, v, mo);
    614 }
    615 
    616 SANITIZER_INTERFACE_ATTRIBUTE
    617 a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) {
    618   SCOPED_ATOMIC(FetchSub, a, v, mo);
    619 }
    620 
    621 SANITIZER_INTERFACE_ATTRIBUTE
    622 a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {
    623   SCOPED_ATOMIC(FetchSub, a, v, mo);
    624 }
    625 
    626 #if __TSAN_HAS_INT128
    627 SANITIZER_INTERFACE_ATTRIBUTE
    628 a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) {
    629   SCOPED_ATOMIC(FetchSub, a, v, mo);
    630 }
    631 #endif
    632 
    633 SANITIZER_INTERFACE_ATTRIBUTE
    634 a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
    635   SCOPED_ATOMIC(FetchAnd, a, v, mo);
    636 }
    637 
    638 SANITIZER_INTERFACE_ATTRIBUTE
    639 a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) {
    640   SCOPED_ATOMIC(FetchAnd, a, v, mo);
    641 }
    642 
    643 SANITIZER_INTERFACE_ATTRIBUTE
    644 a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) {
    645   SCOPED_ATOMIC(FetchAnd, a, v, mo);
    646 }
    647 
    648 SANITIZER_INTERFACE_ATTRIBUTE
    649 a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
    650   SCOPED_ATOMIC(FetchAnd, a, v, mo);
    651 }
    652 
    653 #if __TSAN_HAS_INT128
    654 SANITIZER_INTERFACE_ATTRIBUTE
    655 a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) {
    656   SCOPED_ATOMIC(FetchAnd, a, v, mo);
    657 }
    658 #endif
    659 
    660 SANITIZER_INTERFACE_ATTRIBUTE
    661 a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
    662   SCOPED_ATOMIC(FetchOr, a, v, mo);
    663 }
    664 
    665 SANITIZER_INTERFACE_ATTRIBUTE
    666 a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) {
    667   SCOPED_ATOMIC(FetchOr, a, v, mo);
    668 }
    669 
    670 SANITIZER_INTERFACE_ATTRIBUTE
    671 a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) {
    672   SCOPED_ATOMIC(FetchOr, a, v, mo);
    673 }
    674 
    675 SANITIZER_INTERFACE_ATTRIBUTE
    676 a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
    677   SCOPED_ATOMIC(FetchOr, a, v, mo);
    678 }
    679 
    680 #if __TSAN_HAS_INT128
    681 SANITIZER_INTERFACE_ATTRIBUTE
    682 a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) {
    683   SCOPED_ATOMIC(FetchOr, a, v, mo);
    684 }
    685 #endif
    686 
    687 SANITIZER_INTERFACE_ATTRIBUTE
    688 a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
    689   SCOPED_ATOMIC(FetchXor, a, v, mo);
    690 }
    691 
    692 SANITIZER_INTERFACE_ATTRIBUTE
    693 a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) {
    694   SCOPED_ATOMIC(FetchXor, a, v, mo);
    695 }
    696 
    697 SANITIZER_INTERFACE_ATTRIBUTE
    698 a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) {
    699   SCOPED_ATOMIC(FetchXor, a, v, mo);
    700 }
    701 
    702 SANITIZER_INTERFACE_ATTRIBUTE
    703 a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
    704   SCOPED_ATOMIC(FetchXor, a, v, mo);
    705 }
    706 
    707 #if __TSAN_HAS_INT128
    708 SANITIZER_INTERFACE_ATTRIBUTE
    709 a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) {
    710   SCOPED_ATOMIC(FetchXor, a, v, mo);
    711 }
    712 #endif
    713 
    714 SANITIZER_INTERFACE_ATTRIBUTE
    715 a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) {
    716   SCOPED_ATOMIC(FetchNand, a, v, mo);
    717 }
    718 
    719 SANITIZER_INTERFACE_ATTRIBUTE
    720 a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) {
    721   SCOPED_ATOMIC(FetchNand, a, v, mo);
    722 }
    723 
    724 SANITIZER_INTERFACE_ATTRIBUTE
    725 a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) {
    726   SCOPED_ATOMIC(FetchNand, a, v, mo);
    727 }
    728 
    729 SANITIZER_INTERFACE_ATTRIBUTE
    730 a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) {
    731   SCOPED_ATOMIC(FetchNand, a, v, mo);
    732 }
    733 
    734 #if __TSAN_HAS_INT128
    735 SANITIZER_INTERFACE_ATTRIBUTE
    736 a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) {
    737   SCOPED_ATOMIC(FetchNand, a, v, mo);
    738 }
    739 #endif
    740 
    741 SANITIZER_INTERFACE_ATTRIBUTE
    742 int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
    743     morder mo, morder fmo) {
    744   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
    745 }
    746 
    747 SANITIZER_INTERFACE_ATTRIBUTE
    748 int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
    749     morder mo, morder fmo) {
    750   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
    751 }
    752 
    753 SANITIZER_INTERFACE_ATTRIBUTE
    754 int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
    755     morder mo, morder fmo) {
    756   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
    757 }
    758 
    759 SANITIZER_INTERFACE_ATTRIBUTE
    760 int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
    761     morder mo, morder fmo) {
    762   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
    763 }
    764 
    765 #if __TSAN_HAS_INT128
    766 SANITIZER_INTERFACE_ATTRIBUTE
    767 int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
    768     morder mo, morder fmo) {
    769   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
    770 }
    771 #endif
    772 
    773 SANITIZER_INTERFACE_ATTRIBUTE
    774 int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v,
    775     morder mo, morder fmo) {
    776   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
    777 }
    778 
    779 SANITIZER_INTERFACE_ATTRIBUTE
    780 int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
    781     morder mo, morder fmo) {
    782   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
    783 }
    784 
    785 SANITIZER_INTERFACE_ATTRIBUTE
    786 int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
    787     morder mo, morder fmo) {
    788   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
    789 }
    790 
    791 SANITIZER_INTERFACE_ATTRIBUTE
    792 int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
    793     morder mo, morder fmo) {
    794   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
    795 }
    796 
    797 #if __TSAN_HAS_INT128
    798 SANITIZER_INTERFACE_ATTRIBUTE
    799 int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
    800     morder mo, morder fmo) {
    801   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
    802 }
    803 #endif
    804 
    805 SANITIZER_INTERFACE_ATTRIBUTE
    806 a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v,
    807     morder mo, morder fmo) {
    808   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
    809 }
    810 
    811 SANITIZER_INTERFACE_ATTRIBUTE
    812 a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v,
    813     morder mo, morder fmo) {
    814   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
    815 }
    816 
    817 SANITIZER_INTERFACE_ATTRIBUTE
    818 a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v,
    819     morder mo, morder fmo) {
    820   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
    821 }
    822 
    823 SANITIZER_INTERFACE_ATTRIBUTE
    824 a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
    825     morder mo, morder fmo) {
    826   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
    827 }
    828 
    829 #if __TSAN_HAS_INT128
    830 SANITIZER_INTERFACE_ATTRIBUTE
    831 a128 __tsan_atomic128_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
    832     morder mo, morder fmo) {
    833   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
    834 }
    835 #endif
    836 
    837 SANITIZER_INTERFACE_ATTRIBUTE
    838 void __tsan_atomic_thread_fence(morder mo) {
    839   char* a = 0;
    840   SCOPED_ATOMIC(Fence, mo);
    841 }
    842 
    843 SANITIZER_INTERFACE_ATTRIBUTE
    844 void __tsan_atomic_signal_fence(morder mo) {
    845 }
    846 }  // extern "C"
    847 
    848 #else  // #ifndef SANITIZER_GO
    849 
    850 // Go
    851 
    852 #define ATOMIC(func, ...) \
    853     if (thr->ignore_sync) { \
    854       NoTsanAtomic##func(__VA_ARGS__); \
    855     } else { \
    856       FuncEntry(thr, cpc); \
    857       Atomic##func(thr, pc, __VA_ARGS__); \
    858       FuncExit(thr); \
    859     } \
    860 /**/
    861 
    862 #define ATOMIC_RET(func, ret, ...) \
    863     if (thr->ignore_sync) { \
    864       (ret) = NoTsanAtomic##func(__VA_ARGS__); \
    865     } else { \
    866       FuncEntry(thr, cpc); \
    867       (ret) = Atomic##func(thr, pc, __VA_ARGS__); \
    868       FuncExit(thr); \
    869     } \
    870 /**/
    871 
    872 extern "C" {
    873 SANITIZER_INTERFACE_ATTRIBUTE
    874 void __tsan_go_atomic32_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
    875   ATOMIC_RET(Load, *(a32*)(a+8), *(a32**)a, mo_acquire);
    876 }
    877 
    878 SANITIZER_INTERFACE_ATTRIBUTE
    879 void __tsan_go_atomic64_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
    880   ATOMIC_RET(Load, *(a64*)(a+8), *(a64**)a, mo_acquire);
    881 }
    882 
    883 SANITIZER_INTERFACE_ATTRIBUTE
    884 void __tsan_go_atomic32_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
    885   ATOMIC(Store, *(a32**)a, *(a32*)(a+8), mo_release);
    886 }
    887 
    888 SANITIZER_INTERFACE_ATTRIBUTE
    889 void __tsan_go_atomic64_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
    890   ATOMIC(Store, *(a64**)a, *(a64*)(a+8), mo_release);
    891 }
    892 
    893 SANITIZER_INTERFACE_ATTRIBUTE
    894 void __tsan_go_atomic32_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
    895   ATOMIC_RET(FetchAdd, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel);
    896 }
    897 
    898 SANITIZER_INTERFACE_ATTRIBUTE
    899 void __tsan_go_atomic64_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
    900   ATOMIC_RET(FetchAdd, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel);
    901 }
    902 
    903 SANITIZER_INTERFACE_ATTRIBUTE
    904 void __tsan_go_atomic32_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
    905   ATOMIC_RET(Exchange, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel);
    906 }
    907 
    908 SANITIZER_INTERFACE_ATTRIBUTE
    909 void __tsan_go_atomic64_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
    910   ATOMIC_RET(Exchange, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel);
    911 }
    912 
    913 SANITIZER_INTERFACE_ATTRIBUTE
    914 void __tsan_go_atomic32_compare_exchange(
    915     ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
    916   a32 cur = 0;
    917   a32 cmp = *(a32*)(a+8);
    918   ATOMIC_RET(CAS, cur, *(a32**)a, cmp, *(a32*)(a+12), mo_acq_rel, mo_acquire);
    919   *(bool*)(a+16) = (cur == cmp);
    920 }
    921 
    922 SANITIZER_INTERFACE_ATTRIBUTE
    923 void __tsan_go_atomic64_compare_exchange(
    924     ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
    925   a64 cur = 0;
    926   a64 cmp = *(a64*)(a+8);
    927   ATOMIC_RET(CAS, cur, *(a64**)a, cmp, *(a64*)(a+16), mo_acq_rel, mo_acquire);
    928   *(bool*)(a+24) = (cur == cmp);
    929 }
    930 }  // extern "C"
    931 #endif  // #ifndef SANITIZER_GO
    932