Home | History | Annotate | Download | only in rtl
      1 //===-- tsan_interface_atomic.cc ------------------------------------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file is a part of ThreadSanitizer (TSan), a race detector.
     11 //
     12 //===----------------------------------------------------------------------===//
     13 
     14 // ThreadSanitizer atomic operations are based on C++11/C1x standards.
     15 // For background see C++11 standard.  A slightly older, publicly
     16 // available draft of the standard (not entirely up-to-date, but close enough
     17 // for casual browsing) is available here:
     18 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
     19 // The following page contains more background information:
     20 // http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
     21 
     22 #include "sanitizer_common/sanitizer_placement_new.h"
     23 #include "sanitizer_common/sanitizer_stacktrace.h"
     24 #include "sanitizer_common/sanitizer_mutex.h"
     25 #include "tsan_flags.h"
     26 #include "tsan_rtl.h"
     27 
     28 using namespace __tsan;  // NOLINT
     29 
     30 #define SCOPED_ATOMIC(func, ...) \
     31     const uptr callpc = (uptr)__builtin_return_address(0); \
     32     uptr pc = __sanitizer::StackTrace::GetCurrentPc(); \
     33     mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \
     34     ThreadState *const thr = cur_thread(); \
     35     if (thr->ignore_interceptors) \
     36       return NoTsanAtomic##func(__VA_ARGS__); \
     37     AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \
     38     ScopedAtomic sa(thr, callpc, a, mo, __func__); \
     39     return Atomic##func(thr, pc, __VA_ARGS__); \
     40 /**/
     41 
     42 // These should match declarations from public tsan_interface_atomic.h header.
     43 typedef unsigned char      a8;
     44 typedef unsigned short     a16;  // NOLINT
     45 typedef unsigned int       a32;
     46 typedef unsigned long long a64;  // NOLINT
     47 #if defined(__SIZEOF_INT128__) \
     48     || (__clang_major__ * 100 + __clang_minor__ >= 302)
     49 __extension__ typedef __int128 a128;
     50 # define __TSAN_HAS_INT128 1
     51 #else
     52 # define __TSAN_HAS_INT128 0
     53 #endif
     54 
     55 // Protects emulation of 128-bit atomic operations.
     56 static StaticSpinMutex mutex128;
     57 
     58 // Part of ABI, do not change.
     59 // http://llvm.org/viewvc/llvm-project/libcxx/trunk/include/atomic?view=markup
     60 typedef enum {
     61   mo_relaxed,
     62   mo_consume,
     63   mo_acquire,
     64   mo_release,
     65   mo_acq_rel,
     66   mo_seq_cst
     67 } morder;
     68 
     69 class ScopedAtomic {
     70  public:
     71   ScopedAtomic(ThreadState *thr, uptr pc, const volatile void *a,
     72                morder mo, const char *func)
     73       : thr_(thr) {
     74     FuncEntry(thr_, pc);
     75     DPrintf("#%d: %s(%p, %d)\n", thr_->tid, func, a, mo);
     76   }
     77   ~ScopedAtomic() {
     78     ProcessPendingSignals(thr_);
     79     FuncExit(thr_);
     80   }
     81  private:
     82   ThreadState *thr_;
     83 };
     84 
     85 static void AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) {
     86   StatInc(thr, StatAtomic);
     87   StatInc(thr, t);
     88   StatInc(thr, size == 1 ? StatAtomic1
     89              : size == 2 ? StatAtomic2
     90              : size == 4 ? StatAtomic4
     91              : size == 8 ? StatAtomic8
     92              :             StatAtomic16);
     93   StatInc(thr, mo == mo_relaxed ? StatAtomicRelaxed
     94              : mo == mo_consume ? StatAtomicConsume
     95              : mo == mo_acquire ? StatAtomicAcquire
     96              : mo == mo_release ? StatAtomicRelease
     97              : mo == mo_acq_rel ? StatAtomicAcq_Rel
     98              :                    StatAtomicSeq_Cst);
     99 }
    100 
    101 static bool IsLoadOrder(morder mo) {
    102   return mo == mo_relaxed || mo == mo_consume
    103       || mo == mo_acquire || mo == mo_seq_cst;
    104 }
    105 
    106 static bool IsStoreOrder(morder mo) {
    107   return mo == mo_relaxed || mo == mo_release || mo == mo_seq_cst;
    108 }
    109 
    110 static bool IsReleaseOrder(morder mo) {
    111   return mo == mo_release || mo == mo_acq_rel || mo == mo_seq_cst;
    112 }
    113 
    114 static bool IsAcquireOrder(morder mo) {
    115   return mo == mo_consume || mo == mo_acquire
    116       || mo == mo_acq_rel || mo == mo_seq_cst;
    117 }
    118 
    119 static bool IsAcqRelOrder(morder mo) {
    120   return mo == mo_acq_rel || mo == mo_seq_cst;
    121 }
    122 
    123 template<typename T> T func_xchg(volatile T *v, T op) {
    124   T res = __sync_lock_test_and_set(v, op);
    125   // __sync_lock_test_and_set does not contain full barrier.
    126   __sync_synchronize();
    127   return res;
    128 }
    129 
    130 template<typename T> T func_add(volatile T *v, T op) {
    131   return __sync_fetch_and_add(v, op);
    132 }
    133 
    134 template<typename T> T func_sub(volatile T *v, T op) {
    135   return __sync_fetch_and_sub(v, op);
    136 }
    137 
    138 template<typename T> T func_and(volatile T *v, T op) {
    139   return __sync_fetch_and_and(v, op);
    140 }
    141 
    142 template<typename T> T func_or(volatile T *v, T op) {
    143   return __sync_fetch_and_or(v, op);
    144 }
    145 
    146 template<typename T> T func_xor(volatile T *v, T op) {
    147   return __sync_fetch_and_xor(v, op);
    148 }
    149 
    150 template<typename T> T func_nand(volatile T *v, T op) {
    151   // clang does not support __sync_fetch_and_nand.
    152   T cmp = *v;
    153   for (;;) {
    154     T newv = ~(cmp & op);
    155     T cur = __sync_val_compare_and_swap(v, cmp, newv);
    156     if (cmp == cur)
    157       return cmp;
    158     cmp = cur;
    159   }
    160 }
    161 
    162 template<typename T> T func_cas(volatile T *v, T cmp, T xch) {
    163   return __sync_val_compare_and_swap(v, cmp, xch);
    164 }
    165 
    166 // clang does not support 128-bit atomic ops.
    167 // Atomic ops are executed under tsan internal mutex,
    168 // here we assume that the atomic variables are not accessed
    169 // from non-instrumented code.
    170 #ifndef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16
    171 a128 func_xchg(volatile a128 *v, a128 op) {
    172   SpinMutexLock lock(&mutex128);
    173   a128 cmp = *v;
    174   *v = op;
    175   return cmp;
    176 }
    177 
    178 a128 func_add(volatile a128 *v, a128 op) {
    179   SpinMutexLock lock(&mutex128);
    180   a128 cmp = *v;
    181   *v = cmp + op;
    182   return cmp;
    183 }
    184 
    185 a128 func_sub(volatile a128 *v, a128 op) {
    186   SpinMutexLock lock(&mutex128);
    187   a128 cmp = *v;
    188   *v = cmp - op;
    189   return cmp;
    190 }
    191 
    192 a128 func_and(volatile a128 *v, a128 op) {
    193   SpinMutexLock lock(&mutex128);
    194   a128 cmp = *v;
    195   *v = cmp & op;
    196   return cmp;
    197 }
    198 
    199 a128 func_or(volatile a128 *v, a128 op) {
    200   SpinMutexLock lock(&mutex128);
    201   a128 cmp = *v;
    202   *v = cmp | op;
    203   return cmp;
    204 }
    205 
    206 a128 func_xor(volatile a128 *v, a128 op) {
    207   SpinMutexLock lock(&mutex128);
    208   a128 cmp = *v;
    209   *v = cmp ^ op;
    210   return cmp;
    211 }
    212 
    213 a128 func_nand(volatile a128 *v, a128 op) {
    214   SpinMutexLock lock(&mutex128);
    215   a128 cmp = *v;
    216   *v = ~(cmp & op);
    217   return cmp;
    218 }
    219 
    220 a128 func_cas(volatile a128 *v, a128 cmp, a128 xch) {
    221   SpinMutexLock lock(&mutex128);
    222   a128 cur = *v;
    223   if (cur == cmp)
    224     *v = xch;
    225   return cur;
    226 }
    227 #endif
    228 
    229 template<typename T>
    230 static int SizeLog() {
    231   if (sizeof(T) <= 1)
    232     return kSizeLog1;
    233   else if (sizeof(T) <= 2)
    234     return kSizeLog2;
    235   else if (sizeof(T) <= 4)
    236     return kSizeLog4;
    237   else
    238     return kSizeLog8;
    239   // For 16-byte atomics we also use 8-byte memory access,
    240   // this leads to false negatives only in very obscure cases.
    241 }
    242 
    243 static atomic_uint8_t *to_atomic(const volatile a8 *a) {
    244   return (atomic_uint8_t*)a;
    245 }
    246 
    247 static atomic_uint16_t *to_atomic(const volatile a16 *a) {
    248   return (atomic_uint16_t*)a;
    249 }
    250 
    251 static atomic_uint32_t *to_atomic(const volatile a32 *a) {
    252   return (atomic_uint32_t*)a;
    253 }
    254 
    255 static atomic_uint64_t *to_atomic(const volatile a64 *a) {
    256   return (atomic_uint64_t*)a;
    257 }
    258 
    259 static memory_order to_mo(morder mo) {
    260   switch (mo) {
    261   case mo_relaxed: return memory_order_relaxed;
    262   case mo_consume: return memory_order_consume;
    263   case mo_acquire: return memory_order_acquire;
    264   case mo_release: return memory_order_release;
    265   case mo_acq_rel: return memory_order_acq_rel;
    266   case mo_seq_cst: return memory_order_seq_cst;
    267   }
    268   CHECK(0);
    269   return memory_order_seq_cst;
    270 }
    271 
    272 template<typename T>
    273 static T NoTsanAtomicLoad(const volatile T *a, morder mo) {
    274   return atomic_load(to_atomic(a), to_mo(mo));
    275 }
    276 
    277 #if __TSAN_HAS_INT128
    278 static a128 NoTsanAtomicLoad(const volatile a128 *a, morder mo) {
    279   SpinMutexLock lock(&mutex128);
    280   return *a;
    281 }
    282 #endif
    283 
    284 template<typename T>
    285 static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a,
    286     morder mo) {
    287   CHECK(IsLoadOrder(mo));
    288   // This fast-path is critical for performance.
    289   // Assume the access is atomic.
    290   if (!IsAcquireOrder(mo)) {
    291     MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
    292     return NoTsanAtomicLoad(a, mo);
    293   }
    294   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, false);
    295   AcquireImpl(thr, pc, &s->clock);
    296   T v = NoTsanAtomicLoad(a, mo);
    297   s->mtx.ReadUnlock();
    298   MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
    299   return v;
    300 }
    301 
    302 template<typename T>
    303 static void NoTsanAtomicStore(volatile T *a, T v, morder mo) {
    304   atomic_store(to_atomic(a), v, to_mo(mo));
    305 }
    306 
    307 #if __TSAN_HAS_INT128
    308 static void NoTsanAtomicStore(volatile a128 *a, a128 v, morder mo) {
    309   SpinMutexLock lock(&mutex128);
    310   *a = v;
    311 }
    312 #endif
    313 
    314 template<typename T>
    315 static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
    316     morder mo) {
    317   CHECK(IsStoreOrder(mo));
    318   MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
    319   // This fast-path is critical for performance.
    320   // Assume the access is atomic.
    321   // Strictly saying even relaxed store cuts off release sequence,
    322   // so must reset the clock.
    323   if (!IsReleaseOrder(mo)) {
    324     NoTsanAtomicStore(a, v, mo);
    325     return;
    326   }
    327   __sync_synchronize();
    328   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, true);
    329   thr->fast_state.IncrementEpoch();
    330   // Can't increment epoch w/o writing to the trace as well.
    331   TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
    332   ReleaseImpl(thr, pc, &s->clock);
    333   NoTsanAtomicStore(a, v, mo);
    334   s->mtx.Unlock();
    335 }
    336 
    337 template<typename T, T (*F)(volatile T *v, T op)>
    338 static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
    339   MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
    340   SyncVar *s = 0;
    341   if (mo != mo_relaxed) {
    342     s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, true);
    343     thr->fast_state.IncrementEpoch();
    344     // Can't increment epoch w/o writing to the trace as well.
    345     TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
    346     if (IsAcqRelOrder(mo))
    347       AcquireReleaseImpl(thr, pc, &s->clock);
    348     else if (IsReleaseOrder(mo))
    349       ReleaseImpl(thr, pc, &s->clock);
    350     else if (IsAcquireOrder(mo))
    351       AcquireImpl(thr, pc, &s->clock);
    352   }
    353   v = F(a, v);
    354   if (s)
    355     s->mtx.Unlock();
    356   return v;
    357 }
    358 
    359 template<typename T>
    360 static T NoTsanAtomicExchange(volatile T *a, T v, morder mo) {
    361   return func_xchg(a, v);
    362 }
    363 
    364 template<typename T>
    365 static T NoTsanAtomicFetchAdd(volatile T *a, T v, morder mo) {
    366   return func_add(a, v);
    367 }
    368 
    369 template<typename T>
    370 static T NoTsanAtomicFetchSub(volatile T *a, T v, morder mo) {
    371   return func_sub(a, v);
    372 }
    373 
    374 template<typename T>
    375 static T NoTsanAtomicFetchAnd(volatile T *a, T v, morder mo) {
    376   return func_and(a, v);
    377 }
    378 
    379 template<typename T>
    380 static T NoTsanAtomicFetchOr(volatile T *a, T v, morder mo) {
    381   return func_or(a, v);
    382 }
    383 
    384 template<typename T>
    385 static T NoTsanAtomicFetchXor(volatile T *a, T v, morder mo) {
    386   return func_xor(a, v);
    387 }
    388 
    389 template<typename T>
    390 static T NoTsanAtomicFetchNand(volatile T *a, T v, morder mo) {
    391   return func_nand(a, v);
    392 }
    393 
    394 template<typename T>
    395 static T AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v,
    396     morder mo) {
    397   return AtomicRMW<T, func_xchg>(thr, pc, a, v, mo);
    398 }
    399 
    400 template<typename T>
    401 static T AtomicFetchAdd(ThreadState *thr, uptr pc, volatile T *a, T v,
    402     morder mo) {
    403   return AtomicRMW<T, func_add>(thr, pc, a, v, mo);
    404 }
    405 
    406 template<typename T>
    407 static T AtomicFetchSub(ThreadState *thr, uptr pc, volatile T *a, T v,
    408     morder mo) {
    409   return AtomicRMW<T, func_sub>(thr, pc, a, v, mo);
    410 }
    411 
    412 template<typename T>
    413 static T AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v,
    414     morder mo) {
    415   return AtomicRMW<T, func_and>(thr, pc, a, v, mo);
    416 }
    417 
    418 template<typename T>
    419 static T AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v,
    420     morder mo) {
    421   return AtomicRMW<T, func_or>(thr, pc, a, v, mo);
    422 }
    423 
    424 template<typename T>
    425 static T AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v,
    426     morder mo) {
    427   return AtomicRMW<T, func_xor>(thr, pc, a, v, mo);
    428 }
    429 
    430 template<typename T>
    431 static T AtomicFetchNand(ThreadState *thr, uptr pc, volatile T *a, T v,
    432     morder mo) {
    433   return AtomicRMW<T, func_nand>(thr, pc, a, v, mo);
    434 }
    435 
    436 template<typename T>
    437 static bool NoTsanAtomicCAS(volatile T *a, T *c, T v, morder mo, morder fmo) {
    438   return atomic_compare_exchange_strong(to_atomic(a), c, v, to_mo(mo));
    439 }
    440 
    441 #if __TSAN_HAS_INT128
    442 static bool NoTsanAtomicCAS(volatile a128 *a, a128 *c, a128 v,
    443     morder mo, morder fmo) {
    444   a128 old = *c;
    445   a128 cur = func_cas(a, old, v);
    446   if (cur == old)
    447     return true;
    448   *c = cur;
    449   return false;
    450 }
    451 #endif
    452 
    453 template<typename T>
    454 static bool NoTsanAtomicCAS(volatile T *a, T c, T v, morder mo, morder fmo) {
    455   return NoTsanAtomicCAS(a, &c, v, mo, fmo);
    456 }
    457 
    458 template<typename T>
    459 static bool AtomicCAS(ThreadState *thr, uptr pc,
    460     volatile T *a, T *c, T v, morder mo, morder fmo) {
    461   (void)fmo;  // Unused because llvm does not pass it yet.
    462   MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
    463   SyncVar *s = 0;
    464   bool write_lock = mo != mo_acquire && mo != mo_consume;
    465   if (mo != mo_relaxed) {
    466     s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, write_lock);
    467     thr->fast_state.IncrementEpoch();
    468     // Can't increment epoch w/o writing to the trace as well.
    469     TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
    470     if (IsAcqRelOrder(mo))
    471       AcquireReleaseImpl(thr, pc, &s->clock);
    472     else if (IsReleaseOrder(mo))
    473       ReleaseImpl(thr, pc, &s->clock);
    474     else if (IsAcquireOrder(mo))
    475       AcquireImpl(thr, pc, &s->clock);
    476   }
    477   T cc = *c;
    478   T pr = func_cas(a, cc, v);
    479   if (s) {
    480     if (write_lock)
    481       s->mtx.Unlock();
    482     else
    483       s->mtx.ReadUnlock();
    484   }
    485   if (pr == cc)
    486     return true;
    487   *c = pr;
    488   return false;
    489 }
    490 
    491 template<typename T>
    492 static T AtomicCAS(ThreadState *thr, uptr pc,
    493     volatile T *a, T c, T v, morder mo, morder fmo) {
    494   AtomicCAS(thr, pc, a, &c, v, mo, fmo);
    495   return c;
    496 }
    497 
    498 static void NoTsanAtomicFence(morder mo) {
    499   __sync_synchronize();
    500 }
    501 
    502 static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
    503   // FIXME(dvyukov): not implemented.
    504   __sync_synchronize();
    505 }
    506 
    507 extern "C" {
    508 SANITIZER_INTERFACE_ATTRIBUTE
    509 a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) {
    510   SCOPED_ATOMIC(Load, a, mo);
    511 }
    512 
    513 SANITIZER_INTERFACE_ATTRIBUTE
    514 a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) {
    515   SCOPED_ATOMIC(Load, a, mo);
    516 }
    517 
    518 SANITIZER_INTERFACE_ATTRIBUTE
    519 a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) {
    520   SCOPED_ATOMIC(Load, a, mo);
    521 }
    522 
    523 SANITIZER_INTERFACE_ATTRIBUTE
    524 a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
    525   SCOPED_ATOMIC(Load, a, mo);
    526 }
    527 
    528 #if __TSAN_HAS_INT128
    529 SANITIZER_INTERFACE_ATTRIBUTE
    530 a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) {
    531   SCOPED_ATOMIC(Load, a, mo);
    532 }
    533 #endif
    534 
    535 SANITIZER_INTERFACE_ATTRIBUTE
    536 void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
    537   SCOPED_ATOMIC(Store, a, v, mo);
    538 }
    539 
    540 SANITIZER_INTERFACE_ATTRIBUTE
    541 void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) {
    542   SCOPED_ATOMIC(Store, a, v, mo);
    543 }
    544 
    545 SANITIZER_INTERFACE_ATTRIBUTE
    546 void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) {
    547   SCOPED_ATOMIC(Store, a, v, mo);
    548 }
    549 
    550 SANITIZER_INTERFACE_ATTRIBUTE
    551 void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
    552   SCOPED_ATOMIC(Store, a, v, mo);
    553 }
    554 
    555 #if __TSAN_HAS_INT128
    556 SANITIZER_INTERFACE_ATTRIBUTE
    557 void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) {
    558   SCOPED_ATOMIC(Store, a, v, mo);
    559 }
    560 #endif
    561 
    562 SANITIZER_INTERFACE_ATTRIBUTE
    563 a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
    564   SCOPED_ATOMIC(Exchange, a, v, mo);
    565 }
    566 
    567 SANITIZER_INTERFACE_ATTRIBUTE
    568 a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) {
    569   SCOPED_ATOMIC(Exchange, a, v, mo);
    570 }
    571 
    572 SANITIZER_INTERFACE_ATTRIBUTE
    573 a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) {
    574   SCOPED_ATOMIC(Exchange, a, v, mo);
    575 }
    576 
    577 SANITIZER_INTERFACE_ATTRIBUTE
    578 a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
    579   SCOPED_ATOMIC(Exchange, a, v, mo);
    580 }
    581 
    582 #if __TSAN_HAS_INT128
    583 SANITIZER_INTERFACE_ATTRIBUTE
    584 a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) {
    585   SCOPED_ATOMIC(Exchange, a, v, mo);
    586 }
    587 #endif
    588 
    589 SANITIZER_INTERFACE_ATTRIBUTE
    590 a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
    591   SCOPED_ATOMIC(FetchAdd, a, v, mo);
    592 }
    593 
    594 SANITIZER_INTERFACE_ATTRIBUTE
    595 a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) {
    596   SCOPED_ATOMIC(FetchAdd, a, v, mo);
    597 }
    598 
    599 SANITIZER_INTERFACE_ATTRIBUTE
    600 a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) {
    601   SCOPED_ATOMIC(FetchAdd, a, v, mo);
    602 }
    603 
    604 SANITIZER_INTERFACE_ATTRIBUTE
    605 a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
    606   SCOPED_ATOMIC(FetchAdd, a, v, mo);
    607 }
    608 
    609 #if __TSAN_HAS_INT128
    610 SANITIZER_INTERFACE_ATTRIBUTE
    611 a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) {
    612   SCOPED_ATOMIC(FetchAdd, a, v, mo);
    613 }
    614 #endif
    615 
    616 SANITIZER_INTERFACE_ATTRIBUTE
    617 a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) {
    618   SCOPED_ATOMIC(FetchSub, a, v, mo);
    619 }
    620 
    621 SANITIZER_INTERFACE_ATTRIBUTE
    622 a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) {
    623   SCOPED_ATOMIC(FetchSub, a, v, mo);
    624 }
    625 
    626 SANITIZER_INTERFACE_ATTRIBUTE
    627 a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) {
    628   SCOPED_ATOMIC(FetchSub, a, v, mo);
    629 }
    630 
    631 SANITIZER_INTERFACE_ATTRIBUTE
    632 a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {
    633   SCOPED_ATOMIC(FetchSub, a, v, mo);
    634 }
    635 
    636 #if __TSAN_HAS_INT128
    637 SANITIZER_INTERFACE_ATTRIBUTE
    638 a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) {
    639   SCOPED_ATOMIC(FetchSub, a, v, mo);
    640 }
    641 #endif
    642 
    643 SANITIZER_INTERFACE_ATTRIBUTE
    644 a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
    645   SCOPED_ATOMIC(FetchAnd, a, v, mo);
    646 }
    647 
    648 SANITIZER_INTERFACE_ATTRIBUTE
    649 a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) {
    650   SCOPED_ATOMIC(FetchAnd, a, v, mo);
    651 }
    652 
    653 SANITIZER_INTERFACE_ATTRIBUTE
    654 a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) {
    655   SCOPED_ATOMIC(FetchAnd, a, v, mo);
    656 }
    657 
    658 SANITIZER_INTERFACE_ATTRIBUTE
    659 a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
    660   SCOPED_ATOMIC(FetchAnd, a, v, mo);
    661 }
    662 
    663 #if __TSAN_HAS_INT128
    664 SANITIZER_INTERFACE_ATTRIBUTE
    665 a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) {
    666   SCOPED_ATOMIC(FetchAnd, a, v, mo);
    667 }
    668 #endif
    669 
    670 SANITIZER_INTERFACE_ATTRIBUTE
    671 a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
    672   SCOPED_ATOMIC(FetchOr, a, v, mo);
    673 }
    674 
    675 SANITIZER_INTERFACE_ATTRIBUTE
    676 a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) {
    677   SCOPED_ATOMIC(FetchOr, a, v, mo);
    678 }
    679 
    680 SANITIZER_INTERFACE_ATTRIBUTE
    681 a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) {
    682   SCOPED_ATOMIC(FetchOr, a, v, mo);
    683 }
    684 
    685 SANITIZER_INTERFACE_ATTRIBUTE
    686 a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
    687   SCOPED_ATOMIC(FetchOr, a, v, mo);
    688 }
    689 
    690 #if __TSAN_HAS_INT128
    691 SANITIZER_INTERFACE_ATTRIBUTE
    692 a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) {
    693   SCOPED_ATOMIC(FetchOr, a, v, mo);
    694 }
    695 #endif
    696 
    697 SANITIZER_INTERFACE_ATTRIBUTE
    698 a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
    699   SCOPED_ATOMIC(FetchXor, a, v, mo);
    700 }
    701 
    702 SANITIZER_INTERFACE_ATTRIBUTE
    703 a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) {
    704   SCOPED_ATOMIC(FetchXor, a, v, mo);
    705 }
    706 
    707 SANITIZER_INTERFACE_ATTRIBUTE
    708 a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) {
    709   SCOPED_ATOMIC(FetchXor, a, v, mo);
    710 }
    711 
    712 SANITIZER_INTERFACE_ATTRIBUTE
    713 a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
    714   SCOPED_ATOMIC(FetchXor, a, v, mo);
    715 }
    716 
    717 #if __TSAN_HAS_INT128
    718 SANITIZER_INTERFACE_ATTRIBUTE
    719 a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) {
    720   SCOPED_ATOMIC(FetchXor, a, v, mo);
    721 }
    722 #endif
    723 
    724 SANITIZER_INTERFACE_ATTRIBUTE
    725 a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) {
    726   SCOPED_ATOMIC(FetchNand, a, v, mo);
    727 }
    728 
    729 SANITIZER_INTERFACE_ATTRIBUTE
    730 a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) {
    731   SCOPED_ATOMIC(FetchNand, a, v, mo);
    732 }
    733 
    734 SANITIZER_INTERFACE_ATTRIBUTE
    735 a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) {
    736   SCOPED_ATOMIC(FetchNand, a, v, mo);
    737 }
    738 
    739 SANITIZER_INTERFACE_ATTRIBUTE
    740 a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) {
    741   SCOPED_ATOMIC(FetchNand, a, v, mo);
    742 }
    743 
    744 #if __TSAN_HAS_INT128
    745 SANITIZER_INTERFACE_ATTRIBUTE
    746 a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) {
    747   SCOPED_ATOMIC(FetchNand, a, v, mo);
    748 }
    749 #endif
    750 
    751 SANITIZER_INTERFACE_ATTRIBUTE
    752 int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
    753     morder mo, morder fmo) {
    754   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
    755 }
    756 
    757 SANITIZER_INTERFACE_ATTRIBUTE
    758 int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
    759     morder mo, morder fmo) {
    760   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
    761 }
    762 
    763 SANITIZER_INTERFACE_ATTRIBUTE
    764 int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
    765     morder mo, morder fmo) {
    766   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
    767 }
    768 
    769 SANITIZER_INTERFACE_ATTRIBUTE
    770 int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
    771     morder mo, morder fmo) {
    772   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
    773 }
    774 
    775 #if __TSAN_HAS_INT128
    776 SANITIZER_INTERFACE_ATTRIBUTE
    777 int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
    778     morder mo, morder fmo) {
    779   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
    780 }
    781 #endif
    782 
    783 SANITIZER_INTERFACE_ATTRIBUTE
    784 int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v,
    785     morder mo, morder fmo) {
    786   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
    787 }
    788 
    789 SANITIZER_INTERFACE_ATTRIBUTE
    790 int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
    791     morder mo, morder fmo) {
    792   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
    793 }
    794 
    795 SANITIZER_INTERFACE_ATTRIBUTE
    796 int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
    797     morder mo, morder fmo) {
    798   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
    799 }
    800 
    801 SANITIZER_INTERFACE_ATTRIBUTE
    802 int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
    803     morder mo, morder fmo) {
    804   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
    805 }
    806 
    807 #if __TSAN_HAS_INT128
    808 SANITIZER_INTERFACE_ATTRIBUTE
    809 int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
    810     morder mo, morder fmo) {
    811   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
    812 }
    813 #endif
    814 
    815 SANITIZER_INTERFACE_ATTRIBUTE
    816 a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v,
    817     morder mo, morder fmo) {
    818   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
    819 }
    820 
    821 SANITIZER_INTERFACE_ATTRIBUTE
    822 a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v,
    823     morder mo, morder fmo) {
    824   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
    825 }
    826 
    827 SANITIZER_INTERFACE_ATTRIBUTE
    828 a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v,
    829     morder mo, morder fmo) {
    830   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
    831 }
    832 
    833 SANITIZER_INTERFACE_ATTRIBUTE
    834 a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
    835     morder mo, morder fmo) {
    836   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
    837 }
    838 
    839 #if __TSAN_HAS_INT128
    840 SANITIZER_INTERFACE_ATTRIBUTE
    841 a128 __tsan_atomic128_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
    842     morder mo, morder fmo) {
    843   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
    844 }
    845 #endif
    846 
    847 SANITIZER_INTERFACE_ATTRIBUTE
    848 void __tsan_atomic_thread_fence(morder mo) {
    849   char* a = 0;
    850   SCOPED_ATOMIC(Fence, mo);
    851 }
    852 
    853 SANITIZER_INTERFACE_ATTRIBUTE
    854 void __tsan_atomic_signal_fence(morder mo) {
    855 }
    856 }  // extern "C"
    857