Home | History | Annotate | Download | only in rtl
      1 //===-- tsan_interface_atomic.cc ------------------------------------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file is a part of ThreadSanitizer (TSan), a race detector.
     11 //
     12 //===----------------------------------------------------------------------===//
     13 
     14 // ThreadSanitizer atomic operations are based on C++11/C1x standards.
     15 // For background see C++11 standard.  A slightly older, publically
     16 // available draft of the standard (not entirely up-to-date, but close enough
     17 // for casual browsing) is available here:
     18 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
     19 // The following page contains more background information:
     20 // http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
     21 
     22 #include "sanitizer_common/sanitizer_placement_new.h"
     23 #include "sanitizer_common/sanitizer_stacktrace.h"
     24 #include "tsan_interface_atomic.h"
     25 #include "tsan_flags.h"
     26 #include "tsan_rtl.h"
     27 
     28 using namespace __tsan;  // NOLINT
     29 
     30 #define SCOPED_ATOMIC(func, ...) \
     31     const uptr callpc = (uptr)__builtin_return_address(0); \
     32     uptr pc = __sanitizer::StackTrace::GetCurrentPc(); \
     33     pc = __sanitizer::StackTrace::GetPreviousInstructionPc(pc); \
     34     mo = ConvertOrder(mo); \
     35     mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \
     36     ThreadState *const thr = cur_thread(); \
     37     AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \
     38     ScopedAtomic sa(thr, callpc, __FUNCTION__); \
     39     return Atomic##func(thr, pc, __VA_ARGS__); \
     40 /**/
     41 
     42 class ScopedAtomic {
     43  public:
     44   ScopedAtomic(ThreadState *thr, uptr pc, const char *func)
     45       : thr_(thr) {
     46     CHECK_EQ(thr_->in_rtl, 0);
     47     ProcessPendingSignals(thr);
     48     FuncEntry(thr_, pc);
     49     DPrintf("#%d: %s\n", thr_->tid, func);
     50     thr_->in_rtl++;
     51   }
     52   ~ScopedAtomic() {
     53     thr_->in_rtl--;
     54     CHECK_EQ(thr_->in_rtl, 0);
     55     FuncExit(thr_);
     56   }
     57  private:
     58   ThreadState *thr_;
     59 };
     60 
     61 // Some shortcuts.
     62 typedef __tsan_memory_order morder;
     63 typedef __tsan_atomic8 a8;
     64 typedef __tsan_atomic16 a16;
     65 typedef __tsan_atomic32 a32;
     66 typedef __tsan_atomic64 a64;
     67 typedef __tsan_atomic128 a128;
     68 const morder mo_relaxed = __tsan_memory_order_relaxed;
     69 const morder mo_consume = __tsan_memory_order_consume;
     70 const morder mo_acquire = __tsan_memory_order_acquire;
     71 const morder mo_release = __tsan_memory_order_release;
     72 const morder mo_acq_rel = __tsan_memory_order_acq_rel;
     73 const morder mo_seq_cst = __tsan_memory_order_seq_cst;
     74 
     75 static void AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) {
     76   StatInc(thr, StatAtomic);
     77   StatInc(thr, t);
     78   StatInc(thr, size == 1 ? StatAtomic1
     79              : size == 2 ? StatAtomic2
     80              : size == 4 ? StatAtomic4
     81              : size == 8 ? StatAtomic8
     82              :             StatAtomic16);
     83   StatInc(thr, mo == mo_relaxed ? StatAtomicRelaxed
     84              : mo == mo_consume ? StatAtomicConsume
     85              : mo == mo_acquire ? StatAtomicAcquire
     86              : mo == mo_release ? StatAtomicRelease
     87              : mo == mo_acq_rel ? StatAtomicAcq_Rel
     88              :                    StatAtomicSeq_Cst);
     89 }
     90 
     91 static bool IsLoadOrder(morder mo) {
     92   return mo == mo_relaxed || mo == mo_consume
     93       || mo == mo_acquire || mo == mo_seq_cst;
     94 }
     95 
     96 static bool IsStoreOrder(morder mo) {
     97   return mo == mo_relaxed || mo == mo_release || mo == mo_seq_cst;
     98 }
     99 
    100 static bool IsReleaseOrder(morder mo) {
    101   return mo == mo_release || mo == mo_acq_rel || mo == mo_seq_cst;
    102 }
    103 
    104 static bool IsAcquireOrder(morder mo) {
    105   return mo == mo_consume || mo == mo_acquire
    106       || mo == mo_acq_rel || mo == mo_seq_cst;
    107 }
    108 
    109 static bool IsAcqRelOrder(morder mo) {
    110   return mo == mo_acq_rel || mo == mo_seq_cst;
    111 }
    112 
    113 static morder ConvertOrder(morder mo) {
    114   if (mo > (morder)100500) {
    115     mo = morder(mo - 100500);
    116     if (mo ==  morder(1 << 0))
    117       mo = mo_relaxed;
    118     else if (mo == morder(1 << 1))
    119       mo = mo_consume;
    120     else if (mo == morder(1 << 2))
    121       mo = mo_acquire;
    122     else if (mo == morder(1 << 3))
    123       mo = mo_release;
    124     else if (mo == morder(1 << 4))
    125       mo = mo_acq_rel;
    126     else if (mo == morder(1 << 5))
    127       mo = mo_seq_cst;
    128   }
    129   CHECK_GE(mo, mo_relaxed);
    130   CHECK_LE(mo, mo_seq_cst);
    131   return mo;
    132 }
    133 
    134 template<typename T> T func_xchg(volatile T *v, T op) {
    135   T res = __sync_lock_test_and_set(v, op);
    136   // __sync_lock_test_and_set does not contain full barrier.
    137   __sync_synchronize();
    138   return res;
    139 }
    140 
    141 template<typename T> T func_add(volatile T *v, T op) {
    142   return __sync_fetch_and_add(v, op);
    143 }
    144 
    145 template<typename T> T func_sub(volatile T *v, T op) {
    146   return __sync_fetch_and_sub(v, op);
    147 }
    148 
    149 template<typename T> T func_and(volatile T *v, T op) {
    150   return __sync_fetch_and_and(v, op);
    151 }
    152 
    153 template<typename T> T func_or(volatile T *v, T op) {
    154   return __sync_fetch_and_or(v, op);
    155 }
    156 
    157 template<typename T> T func_xor(volatile T *v, T op) {
    158   return __sync_fetch_and_xor(v, op);
    159 }
    160 
    161 template<typename T> T func_nand(volatile T *v, T op) {
    162   // clang does not support __sync_fetch_and_nand.
    163   T cmp = *v;
    164   for (;;) {
    165     T newv = ~(cmp & op);
    166     T cur = __sync_val_compare_and_swap(v, cmp, newv);
    167     if (cmp == cur)
    168       return cmp;
    169     cmp = cur;
    170   }
    171 }
    172 
    173 template<typename T> T func_cas(volatile T *v, T cmp, T xch) {
    174   return __sync_val_compare_and_swap(v, cmp, xch);
    175 }
    176 
    177 // clang does not support 128-bit atomic ops.
    178 // Atomic ops are executed under tsan internal mutex,
    179 // here we assume that the atomic variables are not accessed
    180 // from non-instrumented code.
    181 #ifndef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16
    182 a128 func_xchg(volatile a128 *v, a128 op) {
    183   a128 cmp = *v;
    184   *v = op;
    185   return cmp;
    186 }
    187 
    188 a128 func_add(volatile a128 *v, a128 op) {
    189   a128 cmp = *v;
    190   *v = cmp + op;
    191   return cmp;
    192 }
    193 
    194 a128 func_sub(volatile a128 *v, a128 op) {
    195   a128 cmp = *v;
    196   *v = cmp - op;
    197   return cmp;
    198 }
    199 
    200 a128 func_and(volatile a128 *v, a128 op) {
    201   a128 cmp = *v;
    202   *v = cmp & op;
    203   return cmp;
    204 }
    205 
    206 a128 func_or(volatile a128 *v, a128 op) {
    207   a128 cmp = *v;
    208   *v = cmp | op;
    209   return cmp;
    210 }
    211 
    212 a128 func_xor(volatile a128 *v, a128 op) {
    213   a128 cmp = *v;
    214   *v = cmp ^ op;
    215   return cmp;
    216 }
    217 
    218 a128 func_nand(volatile a128 *v, a128 op) {
    219   a128 cmp = *v;
    220   *v = ~(cmp & op);
    221   return cmp;
    222 }
    223 
    224 a128 func_cas(volatile a128 *v, a128 cmp, a128 xch) {
    225   a128 cur = *v;
    226   if (cur == cmp)
    227     *v = xch;
    228   return cur;
    229 }
    230 #endif
    231 
    232 template<typename T>
    233 static int SizeLog() {
    234   if (sizeof(T) <= 1)
    235     return kSizeLog1;
    236   else if (sizeof(T) <= 2)
    237     return kSizeLog2;
    238   else if (sizeof(T) <= 4)
    239     return kSizeLog4;
    240   else
    241     return kSizeLog8;
    242   // For 16-byte atomics we also use 8-byte memory access,
    243   // this leads to false negatives only in very obscure cases.
    244 }
    245 
    246 template<typename T>
    247 static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a,
    248     morder mo) {
    249   CHECK(IsLoadOrder(mo));
    250   // This fast-path is critical for performance.
    251   // Assume the access is atomic.
    252   if (!IsAcquireOrder(mo) && sizeof(T) <= sizeof(a)) {
    253     MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
    254     return *a;
    255   }
    256   SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, false);
    257   thr->clock.set(thr->tid, thr->fast_state.epoch());
    258   thr->clock.acquire(&s->clock);
    259   T v = *a;
    260   s->mtx.ReadUnlock();
    261   __sync_synchronize();
    262   MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
    263   return v;
    264 }
    265 
    266 template<typename T>
    267 static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
    268     morder mo) {
    269   CHECK(IsStoreOrder(mo));
    270   MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
    271   // This fast-path is critical for performance.
    272   // Assume the access is atomic.
    273   // Strictly saying even relaxed store cuts off release sequence,
    274   // so must reset the clock.
    275   if (!IsReleaseOrder(mo) && sizeof(T) <= sizeof(a)) {
    276     *a = v;
    277     return;
    278   }
    279   __sync_synchronize();
    280   SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
    281   thr->clock.set(thr->tid, thr->fast_state.epoch());
    282   thr->clock.ReleaseStore(&s->clock);
    283   *a = v;
    284   s->mtx.Unlock();
    285   // Trainling memory barrier to provide sequential consistency
    286   // for Dekker-like store-load synchronization.
    287   __sync_synchronize();
    288 }
    289 
    290 template<typename T, T (*F)(volatile T *v, T op)>
    291 static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
    292   MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
    293   SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
    294   thr->clock.set(thr->tid, thr->fast_state.epoch());
    295   if (IsAcqRelOrder(mo))
    296     thr->clock.acq_rel(&s->clock);
    297   else if (IsReleaseOrder(mo))
    298     thr->clock.release(&s->clock);
    299   else if (IsAcquireOrder(mo))
    300     thr->clock.acquire(&s->clock);
    301   v = F(a, v);
    302   s->mtx.Unlock();
    303   return v;
    304 }
    305 
    306 template<typename T>
    307 static T AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v,
    308     morder mo) {
    309   return AtomicRMW<T, func_xchg>(thr, pc, a, v, mo);
    310 }
    311 
    312 template<typename T>
    313 static T AtomicFetchAdd(ThreadState *thr, uptr pc, volatile T *a, T v,
    314     morder mo) {
    315   return AtomicRMW<T, func_add>(thr, pc, a, v, mo);
    316 }
    317 
    318 template<typename T>
    319 static T AtomicFetchSub(ThreadState *thr, uptr pc, volatile T *a, T v,
    320     morder mo) {
    321   return AtomicRMW<T, func_sub>(thr, pc, a, v, mo);
    322 }
    323 
    324 template<typename T>
    325 static T AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v,
    326     morder mo) {
    327   return AtomicRMW<T, func_and>(thr, pc, a, v, mo);
    328 }
    329 
    330 template<typename T>
    331 static T AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v,
    332     morder mo) {
    333   return AtomicRMW<T, func_or>(thr, pc, a, v, mo);
    334 }
    335 
    336 template<typename T>
    337 static T AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v,
    338     morder mo) {
    339   return AtomicRMW<T, func_xor>(thr, pc, a, v, mo);
    340 }
    341 
    342 template<typename T>
    343 static T AtomicFetchNand(ThreadState *thr, uptr pc, volatile T *a, T v,
    344     morder mo) {
    345   return AtomicRMW<T, func_nand>(thr, pc, a, v, mo);
    346 }
    347 
    348 template<typename T>
    349 static bool AtomicCAS(ThreadState *thr, uptr pc,
    350     volatile T *a, T *c, T v, morder mo, morder fmo) {
    351   (void)fmo;  // Unused because llvm does not pass it yet.
    352   MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
    353   SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
    354   thr->clock.set(thr->tid, thr->fast_state.epoch());
    355   if (IsAcqRelOrder(mo))
    356     thr->clock.acq_rel(&s->clock);
    357   else if (IsReleaseOrder(mo))
    358     thr->clock.release(&s->clock);
    359   else if (IsAcquireOrder(mo))
    360     thr->clock.acquire(&s->clock);
    361   T cc = *c;
    362   T pr = func_cas(a, cc, v);
    363   s->mtx.Unlock();
    364   if (pr == cc)
    365     return true;
    366   *c = pr;
    367   return false;
    368 }
    369 
    370 template<typename T>
    371 static T AtomicCAS(ThreadState *thr, uptr pc,
    372     volatile T *a, T c, T v, morder mo, morder fmo) {
    373   AtomicCAS(thr, pc, a, &c, v, mo, fmo);
    374   return c;
    375 }
    376 
    377 static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
    378   // FIXME(dvyukov): not implemented.
    379   __sync_synchronize();
    380 }
    381 
    382 a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) {
    383   SCOPED_ATOMIC(Load, a, mo);
    384 }
    385 
    386 a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) {
    387   SCOPED_ATOMIC(Load, a, mo);
    388 }
    389 
    390 a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) {
    391   SCOPED_ATOMIC(Load, a, mo);
    392 }
    393 
    394 a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
    395   SCOPED_ATOMIC(Load, a, mo);
    396 }
    397 
    398 #if __TSAN_HAS_INT128
    399 a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) {
    400   SCOPED_ATOMIC(Load, a, mo);
    401 }
    402 #endif
    403 
    404 void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
    405   SCOPED_ATOMIC(Store, a, v, mo);
    406 }
    407 
    408 void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) {
    409   SCOPED_ATOMIC(Store, a, v, mo);
    410 }
    411 
    412 void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) {
    413   SCOPED_ATOMIC(Store, a, v, mo);
    414 }
    415 
    416 void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
    417   SCOPED_ATOMIC(Store, a, v, mo);
    418 }
    419 
    420 #if __TSAN_HAS_INT128
    421 void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) {
    422   SCOPED_ATOMIC(Store, a, v, mo);
    423 }
    424 #endif
    425 
    426 a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
    427   SCOPED_ATOMIC(Exchange, a, v, mo);
    428 }
    429 
    430 a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) {
    431   SCOPED_ATOMIC(Exchange, a, v, mo);
    432 }
    433 
    434 a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) {
    435   SCOPED_ATOMIC(Exchange, a, v, mo);
    436 }
    437 
    438 a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
    439   SCOPED_ATOMIC(Exchange, a, v, mo);
    440 }
    441 
    442 #if __TSAN_HAS_INT128
    443 a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) {
    444   SCOPED_ATOMIC(Exchange, a, v, mo);
    445 }
    446 #endif
    447 
    448 a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
    449   SCOPED_ATOMIC(FetchAdd, a, v, mo);
    450 }
    451 
    452 a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) {
    453   SCOPED_ATOMIC(FetchAdd, a, v, mo);
    454 }
    455 
    456 a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) {
    457   SCOPED_ATOMIC(FetchAdd, a, v, mo);
    458 }
    459 
    460 a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
    461   SCOPED_ATOMIC(FetchAdd, a, v, mo);
    462 }
    463 
    464 #if __TSAN_HAS_INT128
    465 a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) {
    466   SCOPED_ATOMIC(FetchAdd, a, v, mo);
    467 }
    468 #endif
    469 
    470 a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) {
    471   SCOPED_ATOMIC(FetchSub, a, v, mo);
    472 }
    473 
    474 a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) {
    475   SCOPED_ATOMIC(FetchSub, a, v, mo);
    476 }
    477 
    478 a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) {
    479   SCOPED_ATOMIC(FetchSub, a, v, mo);
    480 }
    481 
    482 a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {
    483   SCOPED_ATOMIC(FetchSub, a, v, mo);
    484 }
    485 
    486 #if __TSAN_HAS_INT128
    487 a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) {
    488   SCOPED_ATOMIC(FetchSub, a, v, mo);
    489 }
    490 #endif
    491 
    492 a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
    493   SCOPED_ATOMIC(FetchAnd, a, v, mo);
    494 }
    495 
    496 a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) {
    497   SCOPED_ATOMIC(FetchAnd, a, v, mo);
    498 }
    499 
    500 a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) {
    501   SCOPED_ATOMIC(FetchAnd, a, v, mo);
    502 }
    503 
    504 a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
    505   SCOPED_ATOMIC(FetchAnd, a, v, mo);
    506 }
    507 
    508 #if __TSAN_HAS_INT128
    509 a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) {
    510   SCOPED_ATOMIC(FetchAnd, a, v, mo);
    511 }
    512 #endif
    513 
    514 a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
    515   SCOPED_ATOMIC(FetchOr, a, v, mo);
    516 }
    517 
    518 a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) {
    519   SCOPED_ATOMIC(FetchOr, a, v, mo);
    520 }
    521 
    522 a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) {
    523   SCOPED_ATOMIC(FetchOr, a, v, mo);
    524 }
    525 
    526 a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
    527   SCOPED_ATOMIC(FetchOr, a, v, mo);
    528 }
    529 
    530 #if __TSAN_HAS_INT128
    531 a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) {
    532   SCOPED_ATOMIC(FetchOr, a, v, mo);
    533 }
    534 #endif
    535 
    536 a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
    537   SCOPED_ATOMIC(FetchXor, a, v, mo);
    538 }
    539 
    540 a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) {
    541   SCOPED_ATOMIC(FetchXor, a, v, mo);
    542 }
    543 
    544 a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) {
    545   SCOPED_ATOMIC(FetchXor, a, v, mo);
    546 }
    547 
    548 a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
    549   SCOPED_ATOMIC(FetchXor, a, v, mo);
    550 }
    551 
    552 #if __TSAN_HAS_INT128
    553 a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) {
    554   SCOPED_ATOMIC(FetchXor, a, v, mo);
    555 }
    556 #endif
    557 
    558 a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) {
    559   SCOPED_ATOMIC(FetchNand, a, v, mo);
    560 }
    561 
    562 a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) {
    563   SCOPED_ATOMIC(FetchNand, a, v, mo);
    564 }
    565 
    566 a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) {
    567   SCOPED_ATOMIC(FetchNand, a, v, mo);
    568 }
    569 
    570 a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) {
    571   SCOPED_ATOMIC(FetchNand, a, v, mo);
    572 }
    573 
    574 #if __TSAN_HAS_INT128
    575 a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) {
    576   SCOPED_ATOMIC(FetchNand, a, v, mo);
    577 }
    578 #endif
    579 
    580 int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
    581     morder mo, morder fmo) {
    582   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
    583 }
    584 
    585 int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
    586     morder mo, morder fmo) {
    587   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
    588 }
    589 
    590 int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
    591     morder mo, morder fmo) {
    592   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
    593 }
    594 
    595 int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
    596     morder mo, morder fmo) {
    597   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
    598 }
    599 
    600 #if __TSAN_HAS_INT128
    601 int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
    602     morder mo, morder fmo) {
    603   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
    604 }
    605 #endif
    606 
    607 int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v,
    608     morder mo, morder fmo) {
    609   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
    610 }
    611 
    612 int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
    613     morder mo, morder fmo) {
    614   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
    615 }
    616 
    617 int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
    618     morder mo, morder fmo) {
    619   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
    620 }
    621 
    622 int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
    623     morder mo, morder fmo) {
    624   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
    625 }
    626 
    627 #if __TSAN_HAS_INT128
    628 int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
    629     morder mo, morder fmo) {
    630   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
    631 }
    632 #endif
    633 
    634 a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v,
    635     morder mo, morder fmo) {
    636   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
    637 }
    638 a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v,
    639     morder mo, morder fmo) {
    640   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
    641 }
    642 
    643 a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v,
    644     morder mo, morder fmo) {
    645   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
    646 }
    647 
    648 a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
    649     morder mo, morder fmo) {
    650   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
    651 }
    652 
    653 #if __TSAN_HAS_INT128
    654 a128 __tsan_atomic64_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
    655     morder mo, morder fmo) {
    656   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
    657 }
    658 #endif
    659 
    660 void __tsan_atomic_thread_fence(morder mo) {
    661   char* a;
    662   SCOPED_ATOMIC(Fence, mo);
    663 }
    664 
    665 void __tsan_atomic_signal_fence(morder mo) {
    666 }
    667