Home | History | Annotate | Download | only in rtl
      1 //===-- tsan_interface_atomic.cc ------------------------------------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file is a part of ThreadSanitizer (TSan), a race detector.
     11 //
     12 //===----------------------------------------------------------------------===//
     13 
     14 // ThreadSanitizer atomic operations are based on C++11/C1x standards.
     15 // For background see C++11 standard.  A slightly older, publicly
     16 // available draft of the standard (not entirely up-to-date, but close enough
     17 // for casual browsing) is available here:
     18 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
     19 // The following page contains more background information:
     20 // http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
     21 
     22 #include "sanitizer_common/sanitizer_placement_new.h"
     23 #include "sanitizer_common/sanitizer_stacktrace.h"
     24 #include "sanitizer_common/sanitizer_mutex.h"
     25 #include "tsan_flags.h"
     26 #include "tsan_rtl.h"
     27 
     28 using namespace __tsan;  // NOLINT
     29 
     30 // These should match declarations from public tsan_interface_atomic.h header.
     31 typedef unsigned char      a8;
     32 typedef unsigned short     a16;  // NOLINT
     33 typedef unsigned int       a32;
     34 typedef unsigned long long a64;  // NOLINT
     35 #if !defined(SANITIZER_GO) && (defined(__SIZEOF_INT128__) \
     36     || (__clang_major__ * 100 + __clang_minor__ >= 302)) && !defined(__mips64)
     37 __extension__ typedef __int128 a128;
     38 # define __TSAN_HAS_INT128 1
     39 #else
     40 # define __TSAN_HAS_INT128 0
     41 #endif
     42 
     43 #if !defined(SANITIZER_GO) && __TSAN_HAS_INT128
     44 // Protects emulation of 128-bit atomic operations.
     45 static StaticSpinMutex mutex128;
     46 #endif
     47 
     48 // Part of ABI, do not change.
     49 // http://llvm.org/viewvc/llvm-project/libcxx/trunk/include/atomic?view=markup
     50 typedef enum {
     51   mo_relaxed,
     52   mo_consume,
     53   mo_acquire,
     54   mo_release,
     55   mo_acq_rel,
     56   mo_seq_cst
     57 } morder;
     58 
     59 static bool IsLoadOrder(morder mo) {
     60   return mo == mo_relaxed || mo == mo_consume
     61       || mo == mo_acquire || mo == mo_seq_cst;
     62 }
     63 
     64 static bool IsStoreOrder(morder mo) {
     65   return mo == mo_relaxed || mo == mo_release || mo == mo_seq_cst;
     66 }
     67 
     68 static bool IsReleaseOrder(morder mo) {
     69   return mo == mo_release || mo == mo_acq_rel || mo == mo_seq_cst;
     70 }
     71 
     72 static bool IsAcquireOrder(morder mo) {
     73   return mo == mo_consume || mo == mo_acquire
     74       || mo == mo_acq_rel || mo == mo_seq_cst;
     75 }
     76 
     77 static bool IsAcqRelOrder(morder mo) {
     78   return mo == mo_acq_rel || mo == mo_seq_cst;
     79 }
     80 
     81 template<typename T> T func_xchg(volatile T *v, T op) {
     82   T res = __sync_lock_test_and_set(v, op);
     83   // __sync_lock_test_and_set does not contain full barrier.
     84   __sync_synchronize();
     85   return res;
     86 }
     87 
     88 template<typename T> T func_add(volatile T *v, T op) {
     89   return __sync_fetch_and_add(v, op);
     90 }
     91 
     92 template<typename T> T func_sub(volatile T *v, T op) {
     93   return __sync_fetch_and_sub(v, op);
     94 }
     95 
     96 template<typename T> T func_and(volatile T *v, T op) {
     97   return __sync_fetch_and_and(v, op);
     98 }
     99 
    100 template<typename T> T func_or(volatile T *v, T op) {
    101   return __sync_fetch_and_or(v, op);
    102 }
    103 
    104 template<typename T> T func_xor(volatile T *v, T op) {
    105   return __sync_fetch_and_xor(v, op);
    106 }
    107 
    108 template<typename T> T func_nand(volatile T *v, T op) {
    109   // clang does not support __sync_fetch_and_nand.
    110   T cmp = *v;
    111   for (;;) {
    112     T newv = ~(cmp & op);
    113     T cur = __sync_val_compare_and_swap(v, cmp, newv);
    114     if (cmp == cur)
    115       return cmp;
    116     cmp = cur;
    117   }
    118 }
    119 
    120 template<typename T> T func_cas(volatile T *v, T cmp, T xch) {
    121   return __sync_val_compare_and_swap(v, cmp, xch);
    122 }
    123 
    124 // clang does not support 128-bit atomic ops.
    125 // Atomic ops are executed under tsan internal mutex,
    126 // here we assume that the atomic variables are not accessed
    127 // from non-instrumented code.
    128 #if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) && !defined(SANITIZER_GO) \
    129     && __TSAN_HAS_INT128
    130 a128 func_xchg(volatile a128 *v, a128 op) {
    131   SpinMutexLock lock(&mutex128);
    132   a128 cmp = *v;
    133   *v = op;
    134   return cmp;
    135 }
    136 
    137 a128 func_add(volatile a128 *v, a128 op) {
    138   SpinMutexLock lock(&mutex128);
    139   a128 cmp = *v;
    140   *v = cmp + op;
    141   return cmp;
    142 }
    143 
    144 a128 func_sub(volatile a128 *v, a128 op) {
    145   SpinMutexLock lock(&mutex128);
    146   a128 cmp = *v;
    147   *v = cmp - op;
    148   return cmp;
    149 }
    150 
    151 a128 func_and(volatile a128 *v, a128 op) {
    152   SpinMutexLock lock(&mutex128);
    153   a128 cmp = *v;
    154   *v = cmp & op;
    155   return cmp;
    156 }
    157 
    158 a128 func_or(volatile a128 *v, a128 op) {
    159   SpinMutexLock lock(&mutex128);
    160   a128 cmp = *v;
    161   *v = cmp | op;
    162   return cmp;
    163 }
    164 
    165 a128 func_xor(volatile a128 *v, a128 op) {
    166   SpinMutexLock lock(&mutex128);
    167   a128 cmp = *v;
    168   *v = cmp ^ op;
    169   return cmp;
    170 }
    171 
    172 a128 func_nand(volatile a128 *v, a128 op) {
    173   SpinMutexLock lock(&mutex128);
    174   a128 cmp = *v;
    175   *v = ~(cmp & op);
    176   return cmp;
    177 }
    178 
    179 a128 func_cas(volatile a128 *v, a128 cmp, a128 xch) {
    180   SpinMutexLock lock(&mutex128);
    181   a128 cur = *v;
    182   if (cur == cmp)
    183     *v = xch;
    184   return cur;
    185 }
    186 #endif
    187 
    188 template<typename T>
    189 static int SizeLog() {
    190   if (sizeof(T) <= 1)
    191     return kSizeLog1;
    192   else if (sizeof(T) <= 2)
    193     return kSizeLog2;
    194   else if (sizeof(T) <= 4)
    195     return kSizeLog4;
    196   else
    197     return kSizeLog8;
    198   // For 16-byte atomics we also use 8-byte memory access,
    199   // this leads to false negatives only in very obscure cases.
    200 }
    201 
    202 #ifndef SANITIZER_GO
    203 static atomic_uint8_t *to_atomic(const volatile a8 *a) {
    204   return reinterpret_cast<atomic_uint8_t *>(const_cast<a8 *>(a));
    205 }
    206 
    207 static atomic_uint16_t *to_atomic(const volatile a16 *a) {
    208   return reinterpret_cast<atomic_uint16_t *>(const_cast<a16 *>(a));
    209 }
    210 #endif
    211 
    212 static atomic_uint32_t *to_atomic(const volatile a32 *a) {
    213   return reinterpret_cast<atomic_uint32_t *>(const_cast<a32 *>(a));
    214 }
    215 
    216 static atomic_uint64_t *to_atomic(const volatile a64 *a) {
    217   return reinterpret_cast<atomic_uint64_t *>(const_cast<a64 *>(a));
    218 }
    219 
    220 static memory_order to_mo(morder mo) {
    221   switch (mo) {
    222   case mo_relaxed: return memory_order_relaxed;
    223   case mo_consume: return memory_order_consume;
    224   case mo_acquire: return memory_order_acquire;
    225   case mo_release: return memory_order_release;
    226   case mo_acq_rel: return memory_order_acq_rel;
    227   case mo_seq_cst: return memory_order_seq_cst;
    228   }
    229   CHECK(0);
    230   return memory_order_seq_cst;
    231 }
    232 
    233 template<typename T>
    234 static T NoTsanAtomicLoad(const volatile T *a, morder mo) {
    235   return atomic_load(to_atomic(a), to_mo(mo));
    236 }
    237 
    238 #if __TSAN_HAS_INT128 && !defined(SANITIZER_GO)
    239 static a128 NoTsanAtomicLoad(const volatile a128 *a, morder mo) {
    240   SpinMutexLock lock(&mutex128);
    241   return *a;
    242 }
    243 #endif
    244 
    245 template<typename T>
    246 static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a,
    247     morder mo) {
    248   CHECK(IsLoadOrder(mo));
    249   // This fast-path is critical for performance.
    250   // Assume the access is atomic.
    251   if (!IsAcquireOrder(mo)) {
    252     MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
    253     return NoTsanAtomicLoad(a, mo);
    254   }
    255   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, false);
    256   AcquireImpl(thr, pc, &s->clock);
    257   T v = NoTsanAtomicLoad(a, mo);
    258   s->mtx.ReadUnlock();
    259   MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
    260   return v;
    261 }
    262 
    263 template<typename T>
    264 static void NoTsanAtomicStore(volatile T *a, T v, morder mo) {
    265   atomic_store(to_atomic(a), v, to_mo(mo));
    266 }
    267 
    268 #if __TSAN_HAS_INT128 && !defined(SANITIZER_GO)
    269 static void NoTsanAtomicStore(volatile a128 *a, a128 v, morder mo) {
    270   SpinMutexLock lock(&mutex128);
    271   *a = v;
    272 }
    273 #endif
    274 
    275 template<typename T>
    276 static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
    277     morder mo) {
    278   CHECK(IsStoreOrder(mo));
    279   MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
    280   // This fast-path is critical for performance.
    281   // Assume the access is atomic.
    282   // Strictly saying even relaxed store cuts off release sequence,
    283   // so must reset the clock.
    284   if (!IsReleaseOrder(mo)) {
    285     NoTsanAtomicStore(a, v, mo);
    286     return;
    287   }
    288   __sync_synchronize();
    289   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, true);
    290   thr->fast_state.IncrementEpoch();
    291   // Can't increment epoch w/o writing to the trace as well.
    292   TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
    293   ReleaseImpl(thr, pc, &s->clock);
    294   NoTsanAtomicStore(a, v, mo);
    295   s->mtx.Unlock();
    296 }
    297 
    298 template<typename T, T (*F)(volatile T *v, T op)>
    299 static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
    300   MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
    301   SyncVar *s = 0;
    302   if (mo != mo_relaxed) {
    303     s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, true);
    304     thr->fast_state.IncrementEpoch();
    305     // Can't increment epoch w/o writing to the trace as well.
    306     TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
    307     if (IsAcqRelOrder(mo))
    308       AcquireReleaseImpl(thr, pc, &s->clock);
    309     else if (IsReleaseOrder(mo))
    310       ReleaseImpl(thr, pc, &s->clock);
    311     else if (IsAcquireOrder(mo))
    312       AcquireImpl(thr, pc, &s->clock);
    313   }
    314   v = F(a, v);
    315   if (s)
    316     s->mtx.Unlock();
    317   return v;
    318 }
    319 
    320 template<typename T>
    321 static T NoTsanAtomicExchange(volatile T *a, T v, morder mo) {
    322   return func_xchg(a, v);
    323 }
    324 
    325 template<typename T>
    326 static T NoTsanAtomicFetchAdd(volatile T *a, T v, morder mo) {
    327   return func_add(a, v);
    328 }
    329 
    330 template<typename T>
    331 static T NoTsanAtomicFetchSub(volatile T *a, T v, morder mo) {
    332   return func_sub(a, v);
    333 }
    334 
    335 template<typename T>
    336 static T NoTsanAtomicFetchAnd(volatile T *a, T v, morder mo) {
    337   return func_and(a, v);
    338 }
    339 
    340 template<typename T>
    341 static T NoTsanAtomicFetchOr(volatile T *a, T v, morder mo) {
    342   return func_or(a, v);
    343 }
    344 
    345 template<typename T>
    346 static T NoTsanAtomicFetchXor(volatile T *a, T v, morder mo) {
    347   return func_xor(a, v);
    348 }
    349 
    350 template<typename T>
    351 static T NoTsanAtomicFetchNand(volatile T *a, T v, morder mo) {
    352   return func_nand(a, v);
    353 }
    354 
    355 template<typename T>
    356 static T AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v,
    357     morder mo) {
    358   return AtomicRMW<T, func_xchg>(thr, pc, a, v, mo);
    359 }
    360 
    361 template<typename T>
    362 static T AtomicFetchAdd(ThreadState *thr, uptr pc, volatile T *a, T v,
    363     morder mo) {
    364   return AtomicRMW<T, func_add>(thr, pc, a, v, mo);
    365 }
    366 
    367 template<typename T>
    368 static T AtomicFetchSub(ThreadState *thr, uptr pc, volatile T *a, T v,
    369     morder mo) {
    370   return AtomicRMW<T, func_sub>(thr, pc, a, v, mo);
    371 }
    372 
    373 template<typename T>
    374 static T AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v,
    375     morder mo) {
    376   return AtomicRMW<T, func_and>(thr, pc, a, v, mo);
    377 }
    378 
    379 template<typename T>
    380 static T AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v,
    381     morder mo) {
    382   return AtomicRMW<T, func_or>(thr, pc, a, v, mo);
    383 }
    384 
    385 template<typename T>
    386 static T AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v,
    387     morder mo) {
    388   return AtomicRMW<T, func_xor>(thr, pc, a, v, mo);
    389 }
    390 
    391 template<typename T>
    392 static T AtomicFetchNand(ThreadState *thr, uptr pc, volatile T *a, T v,
    393     morder mo) {
    394   return AtomicRMW<T, func_nand>(thr, pc, a, v, mo);
    395 }
    396 
    397 template<typename T>
    398 static bool NoTsanAtomicCAS(volatile T *a, T *c, T v, morder mo, morder fmo) {
    399   return atomic_compare_exchange_strong(to_atomic(a), c, v, to_mo(mo));
    400 }
    401 
    402 #if __TSAN_HAS_INT128
    403 static bool NoTsanAtomicCAS(volatile a128 *a, a128 *c, a128 v,
    404     morder mo, morder fmo) {
    405   a128 old = *c;
    406   a128 cur = func_cas(a, old, v);
    407   if (cur == old)
    408     return true;
    409   *c = cur;
    410   return false;
    411 }
    412 #endif
    413 
    414 template<typename T>
    415 static T NoTsanAtomicCAS(volatile T *a, T c, T v, morder mo, morder fmo) {
    416   NoTsanAtomicCAS(a, &c, v, mo, fmo);
    417   return c;
    418 }
    419 
    420 template<typename T>
    421 static bool AtomicCAS(ThreadState *thr, uptr pc,
    422     volatile T *a, T *c, T v, morder mo, morder fmo) {
    423   (void)fmo;  // Unused because llvm does not pass it yet.
    424   MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
    425   SyncVar *s = 0;
    426   bool write_lock = mo != mo_acquire && mo != mo_consume;
    427   if (mo != mo_relaxed) {
    428     s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, write_lock);
    429     thr->fast_state.IncrementEpoch();
    430     // Can't increment epoch w/o writing to the trace as well.
    431     TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
    432     if (IsAcqRelOrder(mo))
    433       AcquireReleaseImpl(thr, pc, &s->clock);
    434     else if (IsReleaseOrder(mo))
    435       ReleaseImpl(thr, pc, &s->clock);
    436     else if (IsAcquireOrder(mo))
    437       AcquireImpl(thr, pc, &s->clock);
    438   }
    439   T cc = *c;
    440   T pr = func_cas(a, cc, v);
    441   if (s) {
    442     if (write_lock)
    443       s->mtx.Unlock();
    444     else
    445       s->mtx.ReadUnlock();
    446   }
    447   if (pr == cc)
    448     return true;
    449   *c = pr;
    450   return false;
    451 }
    452 
    453 template<typename T>
    454 static T AtomicCAS(ThreadState *thr, uptr pc,
    455     volatile T *a, T c, T v, morder mo, morder fmo) {
    456   AtomicCAS(thr, pc, a, &c, v, mo, fmo);
    457   return c;
    458 }
    459 
    460 #ifndef SANITIZER_GO
    461 static void NoTsanAtomicFence(morder mo) {
    462   __sync_synchronize();
    463 }
    464 
    465 static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
    466   // FIXME(dvyukov): not implemented.
    467   __sync_synchronize();
    468 }
    469 #endif
    470 
    471 // Interface functions follow.
    472 #ifndef SANITIZER_GO
    473 
    474 // C/C++
    475 
    476 #define SCOPED_ATOMIC(func, ...) \
    477     const uptr callpc = (uptr)__builtin_return_address(0); \
    478     uptr pc = StackTrace::GetCurrentPc(); \
    479     mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \
    480     ThreadState *const thr = cur_thread(); \
    481     if (thr->ignore_interceptors) \
    482       return NoTsanAtomic##func(__VA_ARGS__); \
    483     AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \
    484     ScopedAtomic sa(thr, callpc, a, mo, __func__); \
    485     return Atomic##func(thr, pc, __VA_ARGS__); \
    486 /**/
    487 
    488 class ScopedAtomic {
    489  public:
    490   ScopedAtomic(ThreadState *thr, uptr pc, const volatile void *a,
    491                morder mo, const char *func)
    492       : thr_(thr) {
    493     FuncEntry(thr_, pc);
    494     DPrintf("#%d: %s(%p, %d)\n", thr_->tid, func, a, mo);
    495   }
    496   ~ScopedAtomic() {
    497     ProcessPendingSignals(thr_);
    498     FuncExit(thr_);
    499   }
    500  private:
    501   ThreadState *thr_;
    502 };
    503 
    504 static void AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) {
    505   StatInc(thr, StatAtomic);
    506   StatInc(thr, t);
    507   StatInc(thr, size == 1 ? StatAtomic1
    508              : size == 2 ? StatAtomic2
    509              : size == 4 ? StatAtomic4
    510              : size == 8 ? StatAtomic8
    511              :             StatAtomic16);
    512   StatInc(thr, mo == mo_relaxed ? StatAtomicRelaxed
    513              : mo == mo_consume ? StatAtomicConsume
    514              : mo == mo_acquire ? StatAtomicAcquire
    515              : mo == mo_release ? StatAtomicRelease
    516              : mo == mo_acq_rel ? StatAtomicAcq_Rel
    517              :                    StatAtomicSeq_Cst);
    518 }
    519 
    520 extern "C" {
    521 SANITIZER_INTERFACE_ATTRIBUTE
    522 a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) {
    523   SCOPED_ATOMIC(Load, a, mo);
    524 }
    525 
    526 SANITIZER_INTERFACE_ATTRIBUTE
    527 a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) {
    528   SCOPED_ATOMIC(Load, a, mo);
    529 }
    530 
    531 SANITIZER_INTERFACE_ATTRIBUTE
    532 a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) {
    533   SCOPED_ATOMIC(Load, a, mo);
    534 }
    535 
    536 SANITIZER_INTERFACE_ATTRIBUTE
    537 a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
    538   SCOPED_ATOMIC(Load, a, mo);
    539 }
    540 
    541 #if __TSAN_HAS_INT128
    542 SANITIZER_INTERFACE_ATTRIBUTE
    543 a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) {
    544   SCOPED_ATOMIC(Load, a, mo);
    545 }
    546 #endif
    547 
    548 SANITIZER_INTERFACE_ATTRIBUTE
    549 void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
    550   SCOPED_ATOMIC(Store, a, v, mo);
    551 }
    552 
    553 SANITIZER_INTERFACE_ATTRIBUTE
    554 void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) {
    555   SCOPED_ATOMIC(Store, a, v, mo);
    556 }
    557 
    558 SANITIZER_INTERFACE_ATTRIBUTE
    559 void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) {
    560   SCOPED_ATOMIC(Store, a, v, mo);
    561 }
    562 
    563 SANITIZER_INTERFACE_ATTRIBUTE
    564 void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
    565   SCOPED_ATOMIC(Store, a, v, mo);
    566 }
    567 
    568 #if __TSAN_HAS_INT128
    569 SANITIZER_INTERFACE_ATTRIBUTE
    570 void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) {
    571   SCOPED_ATOMIC(Store, a, v, mo);
    572 }
    573 #endif
    574 
    575 SANITIZER_INTERFACE_ATTRIBUTE
    576 a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
    577   SCOPED_ATOMIC(Exchange, a, v, mo);
    578 }
    579 
    580 SANITIZER_INTERFACE_ATTRIBUTE
    581 a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) {
    582   SCOPED_ATOMIC(Exchange, a, v, mo);
    583 }
    584 
    585 SANITIZER_INTERFACE_ATTRIBUTE
    586 a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) {
    587   SCOPED_ATOMIC(Exchange, a, v, mo);
    588 }
    589 
    590 SANITIZER_INTERFACE_ATTRIBUTE
    591 a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
    592   SCOPED_ATOMIC(Exchange, a, v, mo);
    593 }
    594 
    595 #if __TSAN_HAS_INT128
    596 SANITIZER_INTERFACE_ATTRIBUTE
    597 a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) {
    598   SCOPED_ATOMIC(Exchange, a, v, mo);
    599 }
    600 #endif
    601 
    602 SANITIZER_INTERFACE_ATTRIBUTE
    603 a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
    604   SCOPED_ATOMIC(FetchAdd, a, v, mo);
    605 }
    606 
    607 SANITIZER_INTERFACE_ATTRIBUTE
    608 a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) {
    609   SCOPED_ATOMIC(FetchAdd, a, v, mo);
    610 }
    611 
    612 SANITIZER_INTERFACE_ATTRIBUTE
    613 a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) {
    614   SCOPED_ATOMIC(FetchAdd, a, v, mo);
    615 }
    616 
    617 SANITIZER_INTERFACE_ATTRIBUTE
    618 a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
    619   SCOPED_ATOMIC(FetchAdd, a, v, mo);
    620 }
    621 
    622 #if __TSAN_HAS_INT128
    623 SANITIZER_INTERFACE_ATTRIBUTE
    624 a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) {
    625   SCOPED_ATOMIC(FetchAdd, a, v, mo);
    626 }
    627 #endif
    628 
    629 SANITIZER_INTERFACE_ATTRIBUTE
    630 a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) {
    631   SCOPED_ATOMIC(FetchSub, a, v, mo);
    632 }
    633 
    634 SANITIZER_INTERFACE_ATTRIBUTE
    635 a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) {
    636   SCOPED_ATOMIC(FetchSub, a, v, mo);
    637 }
    638 
    639 SANITIZER_INTERFACE_ATTRIBUTE
    640 a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) {
    641   SCOPED_ATOMIC(FetchSub, a, v, mo);
    642 }
    643 
    644 SANITIZER_INTERFACE_ATTRIBUTE
    645 a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {
    646   SCOPED_ATOMIC(FetchSub, a, v, mo);
    647 }
    648 
    649 #if __TSAN_HAS_INT128
    650 SANITIZER_INTERFACE_ATTRIBUTE
    651 a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) {
    652   SCOPED_ATOMIC(FetchSub, a, v, mo);
    653 }
    654 #endif
    655 
    656 SANITIZER_INTERFACE_ATTRIBUTE
    657 a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
    658   SCOPED_ATOMIC(FetchAnd, a, v, mo);
    659 }
    660 
    661 SANITIZER_INTERFACE_ATTRIBUTE
    662 a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) {
    663   SCOPED_ATOMIC(FetchAnd, a, v, mo);
    664 }
    665 
    666 SANITIZER_INTERFACE_ATTRIBUTE
    667 a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) {
    668   SCOPED_ATOMIC(FetchAnd, a, v, mo);
    669 }
    670 
    671 SANITIZER_INTERFACE_ATTRIBUTE
    672 a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
    673   SCOPED_ATOMIC(FetchAnd, a, v, mo);
    674 }
    675 
    676 #if __TSAN_HAS_INT128
    677 SANITIZER_INTERFACE_ATTRIBUTE
    678 a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) {
    679   SCOPED_ATOMIC(FetchAnd, a, v, mo);
    680 }
    681 #endif
    682 
    683 SANITIZER_INTERFACE_ATTRIBUTE
    684 a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
    685   SCOPED_ATOMIC(FetchOr, a, v, mo);
    686 }
    687 
    688 SANITIZER_INTERFACE_ATTRIBUTE
    689 a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) {
    690   SCOPED_ATOMIC(FetchOr, a, v, mo);
    691 }
    692 
    693 SANITIZER_INTERFACE_ATTRIBUTE
    694 a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) {
    695   SCOPED_ATOMIC(FetchOr, a, v, mo);
    696 }
    697 
    698 SANITIZER_INTERFACE_ATTRIBUTE
    699 a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
    700   SCOPED_ATOMIC(FetchOr, a, v, mo);
    701 }
    702 
    703 #if __TSAN_HAS_INT128
    704 SANITIZER_INTERFACE_ATTRIBUTE
    705 a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) {
    706   SCOPED_ATOMIC(FetchOr, a, v, mo);
    707 }
    708 #endif
    709 
    710 SANITIZER_INTERFACE_ATTRIBUTE
    711 a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
    712   SCOPED_ATOMIC(FetchXor, a, v, mo);
    713 }
    714 
    715 SANITIZER_INTERFACE_ATTRIBUTE
    716 a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) {
    717   SCOPED_ATOMIC(FetchXor, a, v, mo);
    718 }
    719 
    720 SANITIZER_INTERFACE_ATTRIBUTE
    721 a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) {
    722   SCOPED_ATOMIC(FetchXor, a, v, mo);
    723 }
    724 
    725 SANITIZER_INTERFACE_ATTRIBUTE
    726 a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
    727   SCOPED_ATOMIC(FetchXor, a, v, mo);
    728 }
    729 
    730 #if __TSAN_HAS_INT128
    731 SANITIZER_INTERFACE_ATTRIBUTE
    732 a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) {
    733   SCOPED_ATOMIC(FetchXor, a, v, mo);
    734 }
    735 #endif
    736 
    737 SANITIZER_INTERFACE_ATTRIBUTE
    738 a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) {
    739   SCOPED_ATOMIC(FetchNand, a, v, mo);
    740 }
    741 
    742 SANITIZER_INTERFACE_ATTRIBUTE
    743 a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) {
    744   SCOPED_ATOMIC(FetchNand, a, v, mo);
    745 }
    746 
    747 SANITIZER_INTERFACE_ATTRIBUTE
    748 a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) {
    749   SCOPED_ATOMIC(FetchNand, a, v, mo);
    750 }
    751 
    752 SANITIZER_INTERFACE_ATTRIBUTE
    753 a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) {
    754   SCOPED_ATOMIC(FetchNand, a, v, mo);
    755 }
    756 
    757 #if __TSAN_HAS_INT128
    758 SANITIZER_INTERFACE_ATTRIBUTE
    759 a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) {
    760   SCOPED_ATOMIC(FetchNand, a, v, mo);
    761 }
    762 #endif
    763 
    764 SANITIZER_INTERFACE_ATTRIBUTE
    765 int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
    766     morder mo, morder fmo) {
    767   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
    768 }
    769 
    770 SANITIZER_INTERFACE_ATTRIBUTE
    771 int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
    772     morder mo, morder fmo) {
    773   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
    774 }
    775 
    776 SANITIZER_INTERFACE_ATTRIBUTE
    777 int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
    778     morder mo, morder fmo) {
    779   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
    780 }
    781 
    782 SANITIZER_INTERFACE_ATTRIBUTE
    783 int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
    784     morder mo, morder fmo) {
    785   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
    786 }
    787 
    788 #if __TSAN_HAS_INT128
    789 SANITIZER_INTERFACE_ATTRIBUTE
    790 int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
    791     morder mo, morder fmo) {
    792   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
    793 }
    794 #endif
    795 
    796 SANITIZER_INTERFACE_ATTRIBUTE
    797 int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v,
    798     morder mo, morder fmo) {
    799   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
    800 }
    801 
    802 SANITIZER_INTERFACE_ATTRIBUTE
    803 int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
    804     morder mo, morder fmo) {
    805   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
    806 }
    807 
    808 SANITIZER_INTERFACE_ATTRIBUTE
    809 int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
    810     morder mo, morder fmo) {
    811   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
    812 }
    813 
    814 SANITIZER_INTERFACE_ATTRIBUTE
    815 int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
    816     morder mo, morder fmo) {
    817   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
    818 }
    819 
    820 #if __TSAN_HAS_INT128
    821 SANITIZER_INTERFACE_ATTRIBUTE
    822 int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
    823     morder mo, morder fmo) {
    824   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
    825 }
    826 #endif
    827 
    828 SANITIZER_INTERFACE_ATTRIBUTE
    829 a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v,
    830     morder mo, morder fmo) {
    831   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
    832 }
    833 
    834 SANITIZER_INTERFACE_ATTRIBUTE
    835 a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v,
    836     morder mo, morder fmo) {
    837   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
    838 }
    839 
    840 SANITIZER_INTERFACE_ATTRIBUTE
    841 a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v,
    842     morder mo, morder fmo) {
    843   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
    844 }
    845 
    846 SANITIZER_INTERFACE_ATTRIBUTE
    847 a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
    848     morder mo, morder fmo) {
    849   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
    850 }
    851 
    852 #if __TSAN_HAS_INT128
    853 SANITIZER_INTERFACE_ATTRIBUTE
    854 a128 __tsan_atomic128_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
    855     morder mo, morder fmo) {
    856   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
    857 }
    858 #endif
    859 
    860 SANITIZER_INTERFACE_ATTRIBUTE
    861 void __tsan_atomic_thread_fence(morder mo) {
    862   char* a = 0;
    863   SCOPED_ATOMIC(Fence, mo);
    864 }
    865 
    866 SANITIZER_INTERFACE_ATTRIBUTE
    867 void __tsan_atomic_signal_fence(morder mo) {
    868 }
    869 }  // extern "C"
    870 
    871 #else  // #ifndef SANITIZER_GO
    872 
    873 // Go
    874 
    875 #define ATOMIC(func, ...) \
    876     if (thr->ignore_sync) { \
    877       NoTsanAtomic##func(__VA_ARGS__); \
    878     } else { \
    879       FuncEntry(thr, cpc); \
    880       Atomic##func(thr, pc, __VA_ARGS__); \
    881       FuncExit(thr); \
    882     } \
    883 /**/
    884 
    885 #define ATOMIC_RET(func, ret, ...) \
    886     if (thr->ignore_sync) { \
    887       (ret) = NoTsanAtomic##func(__VA_ARGS__); \
    888     } else { \
    889       FuncEntry(thr, cpc); \
    890       (ret) = Atomic##func(thr, pc, __VA_ARGS__); \
    891       FuncExit(thr); \
    892     } \
    893 /**/
    894 
    895 extern "C" {
    896 SANITIZER_INTERFACE_ATTRIBUTE
    897 void __tsan_go_atomic32_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
    898   ATOMIC_RET(Load, *(a32*)(a+8), *(a32**)a, mo_acquire);
    899 }
    900 
    901 SANITIZER_INTERFACE_ATTRIBUTE
    902 void __tsan_go_atomic64_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
    903   ATOMIC_RET(Load, *(a64*)(a+8), *(a64**)a, mo_acquire);
    904 }
    905 
    906 SANITIZER_INTERFACE_ATTRIBUTE
    907 void __tsan_go_atomic32_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
    908   ATOMIC(Store, *(a32**)a, *(a32*)(a+8), mo_release);
    909 }
    910 
    911 SANITIZER_INTERFACE_ATTRIBUTE
    912 void __tsan_go_atomic64_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
    913   ATOMIC(Store, *(a64**)a, *(a64*)(a+8), mo_release);
    914 }
    915 
    916 SANITIZER_INTERFACE_ATTRIBUTE
    917 void __tsan_go_atomic32_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
    918   ATOMIC_RET(FetchAdd, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel);
    919 }
    920 
    921 SANITIZER_INTERFACE_ATTRIBUTE
    922 void __tsan_go_atomic64_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
    923   ATOMIC_RET(FetchAdd, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel);
    924 }
    925 
    926 SANITIZER_INTERFACE_ATTRIBUTE
    927 void __tsan_go_atomic32_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
    928   ATOMIC_RET(Exchange, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel);
    929 }
    930 
    931 SANITIZER_INTERFACE_ATTRIBUTE
    932 void __tsan_go_atomic64_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
    933   ATOMIC_RET(Exchange, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel);
    934 }
    935 
    936 SANITIZER_INTERFACE_ATTRIBUTE
    937 void __tsan_go_atomic32_compare_exchange(
    938     ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
    939   a32 cur = 0;
    940   a32 cmp = *(a32*)(a+8);
    941   ATOMIC_RET(CAS, cur, *(a32**)a, cmp, *(a32*)(a+12), mo_acq_rel, mo_acquire);
    942   *(bool*)(a+16) = (cur == cmp);
    943 }
    944 
    945 SANITIZER_INTERFACE_ATTRIBUTE
    946 void __tsan_go_atomic64_compare_exchange(
    947     ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
    948   a64 cur = 0;
    949   a64 cmp = *(a64*)(a+8);
    950   ATOMIC_RET(CAS, cur, *(a64**)a, cmp, *(a64*)(a+16), mo_acq_rel, mo_acquire);
    951   *(bool*)(a+24) = (cur == cmp);
    952 }
    953 }  // extern "C"
    954 #endif  // #ifndef SANITIZER_GO
    955