Home | History | Annotate | Download | only in rtl
      1 //===-- tsan_rtl.h ----------------------------------------------*- C++ -*-===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file is a part of ThreadSanitizer (TSan), a race detector.
     11 //
     12 // Main internal TSan header file.
     13 //
     14 // Ground rules:
     15 //   - C++ run-time should not be used (static CTORs, RTTI, exceptions, static
     16 //     function-scope locals)
     17 //   - All functions/classes/etc reside in namespace __tsan, except for those
     18 //     declared in tsan_interface.h.
     19 //   - Platform-specific files should be used instead of ifdefs (*).
     20 //   - No system headers included in header files (*).
     21 //   - Platform specific headres included only into platform-specific files (*).
     22 //
     23 //  (*) Except when inlining is critical for performance.
     24 //===----------------------------------------------------------------------===//
     25 
     26 #ifndef TSAN_RTL_H
     27 #define TSAN_RTL_H
     28 
     29 #include "sanitizer_common/sanitizer_allocator.h"
     30 #include "sanitizer_common/sanitizer_allocator_internal.h"
     31 #include "sanitizer_common/sanitizer_asm.h"
     32 #include "sanitizer_common/sanitizer_common.h"
     33 #include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
     34 #include "sanitizer_common/sanitizer_libignore.h"
     35 #include "sanitizer_common/sanitizer_suppressions.h"
     36 #include "sanitizer_common/sanitizer_thread_registry.h"
     37 #include "tsan_clock.h"
     38 #include "tsan_defs.h"
     39 #include "tsan_flags.h"
     40 #include "tsan_sync.h"
     41 #include "tsan_trace.h"
     42 #include "tsan_vector.h"
     43 #include "tsan_report.h"
     44 #include "tsan_platform.h"
     45 #include "tsan_mutexset.h"
     46 #include "tsan_ignoreset.h"
     47 #include "tsan_stack_trace.h"
     48 
     49 #if SANITIZER_WORDSIZE != 64
     50 # error "ThreadSanitizer is supported only on 64-bit platforms"
     51 #endif
     52 
     53 namespace __tsan {
     54 
     55 #ifndef TSAN_GO
     56 #if defined(TSAN_COMPAT_SHADOW) && TSAN_COMPAT_SHADOW
     57 const uptr kAllocatorSpace = 0x7d0000000000ULL;
     58 #else
     59 const uptr kAllocatorSpace = 0x7d0000000000ULL;
     60 #endif
     61 const uptr kAllocatorSize  =  0x10000000000ULL;  // 1T.
     62 
     63 struct MapUnmapCallback;
     64 typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0,
     65     DefaultSizeClassMap, MapUnmapCallback> PrimaryAllocator;
     66 typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
     67 typedef LargeMmapAllocator<MapUnmapCallback> SecondaryAllocator;
     68 typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
     69     SecondaryAllocator> Allocator;
     70 Allocator *allocator();
     71 #endif
     72 
     73 void TsanCheckFailed(const char *file, int line, const char *cond,
     74                      u64 v1, u64 v2);
     75 
     76 const u64 kShadowRodata = (u64)-1;  // .rodata shadow marker
     77 
     78 // FastState (from most significant bit):
     79 //   ignore          : 1
     80 //   tid             : kTidBits
     81 //   unused          : -
     82 //   history_size    : 3
     83 //   epoch           : kClkBits
     84 class FastState {
     85  public:
     86   FastState(u64 tid, u64 epoch) {
     87     x_ = tid << kTidShift;
     88     x_ |= epoch;
     89     DCHECK_EQ(tid, this->tid());
     90     DCHECK_EQ(epoch, this->epoch());
     91     DCHECK_EQ(GetIgnoreBit(), false);
     92   }
     93 
     94   explicit FastState(u64 x)
     95       : x_(x) {
     96   }
     97 
     98   u64 raw() const {
     99     return x_;
    100   }
    101 
    102   u64 tid() const {
    103     u64 res = (x_ & ~kIgnoreBit) >> kTidShift;
    104     return res;
    105   }
    106 
    107   u64 TidWithIgnore() const {
    108     u64 res = x_ >> kTidShift;
    109     return res;
    110   }
    111 
    112   u64 epoch() const {
    113     u64 res = x_ & ((1ull << kClkBits) - 1);
    114     return res;
    115   }
    116 
    117   void IncrementEpoch() {
    118     u64 old_epoch = epoch();
    119     x_ += 1;
    120     DCHECK_EQ(old_epoch + 1, epoch());
    121     (void)old_epoch;
    122   }
    123 
    124   void SetIgnoreBit() { x_ |= kIgnoreBit; }
    125   void ClearIgnoreBit() { x_ &= ~kIgnoreBit; }
    126   bool GetIgnoreBit() const { return (s64)x_ < 0; }
    127 
    128   void SetHistorySize(int hs) {
    129     CHECK_GE(hs, 0);
    130     CHECK_LE(hs, 7);
    131     x_ = (x_ & ~(kHistoryMask << kHistoryShift)) | (u64(hs) << kHistoryShift);
    132   }
    133 
    134   ALWAYS_INLINE
    135   int GetHistorySize() const {
    136     return (int)((x_ >> kHistoryShift) & kHistoryMask);
    137   }
    138 
    139   void ClearHistorySize() {
    140     SetHistorySize(0);
    141   }
    142 
    143   ALWAYS_INLINE
    144   u64 GetTracePos() const {
    145     const int hs = GetHistorySize();
    146     // When hs == 0, the trace consists of 2 parts.
    147     const u64 mask = (1ull << (kTracePartSizeBits + hs + 1)) - 1;
    148     return epoch() & mask;
    149   }
    150 
    151  private:
    152   friend class Shadow;
    153   static const int kTidShift = 64 - kTidBits - 1;
    154   static const u64 kIgnoreBit = 1ull << 63;
    155   static const u64 kFreedBit = 1ull << 63;
    156   static const u64 kHistoryShift = kClkBits;
    157   static const u64 kHistoryMask = 7;
    158   u64 x_;
    159 };
    160 
    161 // Shadow (from most significant bit):
    162 //   freed           : 1
    163 //   tid             : kTidBits
    164 //   is_atomic       : 1
    165 //   is_read         : 1
    166 //   size_log        : 2
    167 //   addr0           : 3
    168 //   epoch           : kClkBits
    169 class Shadow : public FastState {
    170  public:
    171   explicit Shadow(u64 x)
    172       : FastState(x) {
    173   }
    174 
    175   explicit Shadow(const FastState &s)
    176       : FastState(s.x_) {
    177     ClearHistorySize();
    178   }
    179 
    180   void SetAddr0AndSizeLog(u64 addr0, unsigned kAccessSizeLog) {
    181     DCHECK_EQ((x_ >> kClkBits) & 31, 0);
    182     DCHECK_LE(addr0, 7);
    183     DCHECK_LE(kAccessSizeLog, 3);
    184     x_ |= ((kAccessSizeLog << 3) | addr0) << kClkBits;
    185     DCHECK_EQ(kAccessSizeLog, size_log());
    186     DCHECK_EQ(addr0, this->addr0());
    187   }
    188 
    189   void SetWrite(unsigned kAccessIsWrite) {
    190     DCHECK_EQ(x_ & kReadBit, 0);
    191     if (!kAccessIsWrite)
    192       x_ |= kReadBit;
    193     DCHECK_EQ(kAccessIsWrite, IsWrite());
    194   }
    195 
    196   void SetAtomic(bool kIsAtomic) {
    197     DCHECK(!IsAtomic());
    198     if (kIsAtomic)
    199       x_ |= kAtomicBit;
    200     DCHECK_EQ(IsAtomic(), kIsAtomic);
    201   }
    202 
    203   bool IsAtomic() const {
    204     return x_ & kAtomicBit;
    205   }
    206 
    207   bool IsZero() const {
    208     return x_ == 0;
    209   }
    210 
    211   static inline bool TidsAreEqual(const Shadow s1, const Shadow s2) {
    212     u64 shifted_xor = (s1.x_ ^ s2.x_) >> kTidShift;
    213     DCHECK_EQ(shifted_xor == 0, s1.TidWithIgnore() == s2.TidWithIgnore());
    214     return shifted_xor == 0;
    215   }
    216 
    217   static ALWAYS_INLINE
    218   bool Addr0AndSizeAreEqual(const Shadow s1, const Shadow s2) {
    219     u64 masked_xor = ((s1.x_ ^ s2.x_) >> kClkBits) & 31;
    220     return masked_xor == 0;
    221   }
    222 
    223   static ALWAYS_INLINE bool TwoRangesIntersect(Shadow s1, Shadow s2,
    224       unsigned kS2AccessSize) {
    225     bool res = false;
    226     u64 diff = s1.addr0() - s2.addr0();
    227     if ((s64)diff < 0) {  // s1.addr0 < s2.addr0  // NOLINT
    228       // if (s1.addr0() + size1) > s2.addr0()) return true;
    229       if (s1.size() > -diff)
    230         res = true;
    231     } else {
    232       // if (s2.addr0() + kS2AccessSize > s1.addr0()) return true;
    233       if (kS2AccessSize > diff)
    234         res = true;
    235     }
    236     DCHECK_EQ(res, TwoRangesIntersectSlow(s1, s2));
    237     DCHECK_EQ(res, TwoRangesIntersectSlow(s2, s1));
    238     return res;
    239   }
    240 
    241   u64 ALWAYS_INLINE addr0() const { return (x_ >> kClkBits) & 7; }
    242   u64 ALWAYS_INLINE size() const { return 1ull << size_log(); }
    243   bool ALWAYS_INLINE IsWrite() const { return !IsRead(); }
    244   bool ALWAYS_INLINE IsRead() const { return x_ & kReadBit; }
    245 
    246   // The idea behind the freed bit is as follows.
    247   // When the memory is freed (or otherwise unaccessible) we write to the shadow
    248   // values with tid/epoch related to the free and the freed bit set.
    249   // During memory accesses processing the freed bit is considered
    250   // as msb of tid. So any access races with shadow with freed bit set
    251   // (it is as if write from a thread with which we never synchronized before).
    252   // This allows us to detect accesses to freed memory w/o additional
    253   // overheads in memory access processing and at the same time restore
    254   // tid/epoch of free.
    255   void MarkAsFreed() {
    256      x_ |= kFreedBit;
    257   }
    258 
    259   bool IsFreed() const {
    260     return x_ & kFreedBit;
    261   }
    262 
    263   bool GetFreedAndReset() {
    264     bool res = x_ & kFreedBit;
    265     x_ &= ~kFreedBit;
    266     return res;
    267   }
    268 
    269   bool ALWAYS_INLINE IsBothReadsOrAtomic(bool kIsWrite, bool kIsAtomic) const {
    270     bool v = x_ & ((u64(kIsWrite ^ 1) << kReadShift)
    271         | (u64(kIsAtomic) << kAtomicShift));
    272     DCHECK_EQ(v, (!IsWrite() && !kIsWrite) || (IsAtomic() && kIsAtomic));
    273     return v;
    274   }
    275 
    276   bool ALWAYS_INLINE IsRWNotWeaker(bool kIsWrite, bool kIsAtomic) const {
    277     bool v = ((x_ >> kReadShift) & 3)
    278         <= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
    279     DCHECK_EQ(v, (IsAtomic() < kIsAtomic) ||
    280         (IsAtomic() == kIsAtomic && !IsWrite() <= !kIsWrite));
    281     return v;
    282   }
    283 
    284   bool ALWAYS_INLINE IsRWWeakerOrEqual(bool kIsWrite, bool kIsAtomic) const {
    285     bool v = ((x_ >> kReadShift) & 3)
    286         >= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
    287     DCHECK_EQ(v, (IsAtomic() > kIsAtomic) ||
    288         (IsAtomic() == kIsAtomic && !IsWrite() >= !kIsWrite));
    289     return v;
    290   }
    291 
    292  private:
    293   static const u64 kReadShift   = 5 + kClkBits;
    294   static const u64 kReadBit     = 1ull << kReadShift;
    295   static const u64 kAtomicShift = 6 + kClkBits;
    296   static const u64 kAtomicBit   = 1ull << kAtomicShift;
    297 
    298   u64 size_log() const { return (x_ >> (3 + kClkBits)) & 3; }
    299 
    300   static bool TwoRangesIntersectSlow(const Shadow s1, const Shadow s2) {
    301     if (s1.addr0() == s2.addr0()) return true;
    302     if (s1.addr0() < s2.addr0() && s1.addr0() + s1.size() > s2.addr0())
    303       return true;
    304     if (s2.addr0() < s1.addr0() && s2.addr0() + s2.size() > s1.addr0())
    305       return true;
    306     return false;
    307   }
    308 };
    309 
    310 struct SignalContext;
    311 
    312 struct JmpBuf {
    313   uptr sp;
    314   uptr mangled_sp;
    315   uptr *shadow_stack_pos;
    316 };
    317 
    318 // This struct is stored in TLS.
    319 struct ThreadState {
    320   FastState fast_state;
    321   // Synch epoch represents the threads's epoch before the last synchronization
    322   // action. It allows to reduce number of shadow state updates.
    323   // For example, fast_synch_epoch=100, last write to addr X was at epoch=150,
    324   // if we are processing write to X from the same thread at epoch=200,
    325   // we do nothing, because both writes happen in the same 'synch epoch'.
    326   // That is, if another memory access does not race with the former write,
    327   // it does not race with the latter as well.
    328   // QUESTION: can we can squeeze this into ThreadState::Fast?
    329   // E.g. ThreadState::Fast is a 44-bit, 32 are taken by synch_epoch and 12 are
    330   // taken by epoch between synchs.
    331   // This way we can save one load from tls.
    332   u64 fast_synch_epoch;
    333   // This is a slow path flag. On fast path, fast_state.GetIgnoreBit() is read.
    334   // We do not distinguish beteween ignoring reads and writes
    335   // for better performance.
    336   int ignore_reads_and_writes;
    337   int ignore_sync;
    338   // Go does not support ignores.
    339 #ifndef TSAN_GO
    340   IgnoreSet mop_ignore_set;
    341   IgnoreSet sync_ignore_set;
    342 #endif
    343   // C/C++ uses fixed size shadow stack embed into Trace.
    344   // Go uses malloc-allocated shadow stack with dynamic size.
    345   uptr *shadow_stack;
    346   uptr *shadow_stack_end;
    347   uptr *shadow_stack_pos;
    348   u64 *racy_shadow_addr;
    349   u64 racy_state[2];
    350   MutexSet mset;
    351   ThreadClock clock;
    352 #ifndef TSAN_GO
    353   AllocatorCache alloc_cache;
    354   InternalAllocatorCache internal_alloc_cache;
    355   Vector<JmpBuf> jmp_bufs;
    356   int ignore_interceptors;
    357 #endif
    358   u64 stat[StatCnt];
    359   const int tid;
    360   const int unique_id;
    361   bool in_symbolizer;
    362   bool in_ignored_lib;
    363   bool is_alive;
    364   bool is_freeing;
    365   bool is_vptr_access;
    366   const uptr stk_addr;
    367   const uptr stk_size;
    368   const uptr tls_addr;
    369   const uptr tls_size;
    370   ThreadContext *tctx;
    371 
    372   InternalDeadlockDetector internal_deadlock_detector;
    373   DDPhysicalThread *dd_pt;
    374   DDLogicalThread *dd_lt;
    375 
    376   bool in_signal_handler;
    377   SignalContext *signal_ctx;
    378 
    379   DenseSlabAllocCache block_cache;
    380   DenseSlabAllocCache sync_cache;
    381 
    382 #ifndef TSAN_GO
    383   u32 last_sleep_stack_id;
    384   ThreadClock last_sleep_clock;
    385 #endif
    386 
    387   // Set in regions of runtime that must be signal-safe and fork-safe.
    388   // If set, malloc must not be called.
    389   int nomalloc;
    390 
    391   explicit ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
    392                        unsigned reuse_count,
    393                        uptr stk_addr, uptr stk_size,
    394                        uptr tls_addr, uptr tls_size);
    395 };
    396 
    397 #ifndef TSAN_GO
    398 __attribute__((tls_model("initial-exec")))
    399 extern THREADLOCAL char cur_thread_placeholder[];
    400 INLINE ThreadState *cur_thread() {
    401   return reinterpret_cast<ThreadState *>(&cur_thread_placeholder);
    402 }
    403 #endif
    404 
    405 class ThreadContext : public ThreadContextBase {
    406  public:
    407   explicit ThreadContext(int tid);
    408   ~ThreadContext();
    409   ThreadState *thr;
    410   u32 creation_stack_id;
    411   SyncClock sync;
    412   // Epoch at which the thread had started.
    413   // If we see an event from the thread stamped by an older epoch,
    414   // the event is from a dead thread that shared tid with this thread.
    415   u64 epoch0;
    416   u64 epoch1;
    417 
    418   // Override superclass callbacks.
    419   void OnDead();
    420   void OnJoined(void *arg);
    421   void OnFinished();
    422   void OnStarted(void *arg);
    423   void OnCreated(void *arg);
    424   void OnReset();
    425 };
    426 
    427 struct RacyStacks {
    428   MD5Hash hash[2];
    429   bool operator==(const RacyStacks &other) const {
    430     if (hash[0] == other.hash[0] && hash[1] == other.hash[1])
    431       return true;
    432     if (hash[0] == other.hash[1] && hash[1] == other.hash[0])
    433       return true;
    434     return false;
    435   }
    436 };
    437 
    438 struct RacyAddress {
    439   uptr addr_min;
    440   uptr addr_max;
    441 };
    442 
    443 struct FiredSuppression {
    444   ReportType type;
    445   uptr pc;
    446   Suppression *supp;
    447 };
    448 
    449 struct Context {
    450   Context();
    451 
    452   bool initialized;
    453   bool after_multithreaded_fork;
    454 
    455   MetaMap metamap;
    456 
    457   Mutex report_mtx;
    458   int nreported;
    459   int nmissed_expected;
    460   atomic_uint64_t last_symbolize_time_ns;
    461 
    462   void *background_thread;
    463   atomic_uint32_t stop_background_thread;
    464 
    465   ThreadRegistry *thread_registry;
    466 
    467   Vector<RacyStacks> racy_stacks;
    468   Vector<RacyAddress> racy_addresses;
    469   // Number of fired suppressions may be large enough.
    470   InternalMmapVector<FiredSuppression> fired_suppressions;
    471   DDetector *dd;
    472 
    473   Flags flags;
    474 
    475   u64 stat[StatCnt];
    476   u64 int_alloc_cnt[MBlockTypeCount];
    477   u64 int_alloc_siz[MBlockTypeCount];
    478 };
    479 
    480 extern Context *ctx;  // The one and the only global runtime context.
    481 
    482 struct ScopedIgnoreInterceptors {
    483   ScopedIgnoreInterceptors() {
    484 #ifndef TSAN_GO
    485     cur_thread()->ignore_interceptors++;
    486 #endif
    487   }
    488 
    489   ~ScopedIgnoreInterceptors() {
    490 #ifndef TSAN_GO
    491     cur_thread()->ignore_interceptors--;
    492 #endif
    493   }
    494 };
    495 
    496 class ScopedReport {
    497  public:
    498   explicit ScopedReport(ReportType typ);
    499   ~ScopedReport();
    500 
    501   void AddMemoryAccess(uptr addr, Shadow s, const StackTrace *stack,
    502                        const MutexSet *mset);
    503   void AddStack(const StackTrace *stack, bool suppressable = false);
    504   void AddThread(const ThreadContext *tctx, bool suppressable = false);
    505   void AddThread(int unique_tid, bool suppressable = false);
    506   void AddUniqueTid(int unique_tid);
    507   void AddMutex(const SyncVar *s);
    508   u64 AddMutex(u64 id);
    509   void AddLocation(uptr addr, uptr size);
    510   void AddSleep(u32 stack_id);
    511   void SetCount(int count);
    512 
    513   const ReportDesc *GetReport() const;
    514 
    515  private:
    516   ReportDesc *rep_;
    517   // Symbolizer makes lots of intercepted calls. If we try to process them,
    518   // at best it will cause deadlocks on internal mutexes.
    519   ScopedIgnoreInterceptors ignore_interceptors_;
    520 
    521   void AddDeadMutex(u64 id);
    522 
    523   ScopedReport(const ScopedReport&);
    524   void operator = (const ScopedReport&);
    525 };
    526 
    527 void RestoreStack(int tid, const u64 epoch, StackTrace *stk, MutexSet *mset);
    528 
    529 void StatAggregate(u64 *dst, u64 *src);
    530 void StatOutput(u64 *stat);
    531 void ALWAYS_INLINE StatInc(ThreadState *thr, StatType typ, u64 n = 1) {
    532   if (kCollectStats)
    533     thr->stat[typ] += n;
    534 }
    535 void ALWAYS_INLINE StatSet(ThreadState *thr, StatType typ, u64 n) {
    536   if (kCollectStats)
    537     thr->stat[typ] = n;
    538 }
    539 
    540 void MapShadow(uptr addr, uptr size);
    541 void MapThreadTrace(uptr addr, uptr size);
    542 void DontNeedShadowFor(uptr addr, uptr size);
    543 void InitializeShadowMemory();
    544 void InitializeInterceptors();
    545 void InitializeLibIgnore();
    546 void InitializeDynamicAnnotations();
    547 
    548 void ForkBefore(ThreadState *thr, uptr pc);
    549 void ForkParentAfter(ThreadState *thr, uptr pc);
    550 void ForkChildAfter(ThreadState *thr, uptr pc);
    551 
    552 void ReportRace(ThreadState *thr);
    553 bool OutputReport(ThreadState *thr, const ScopedReport &srep);
    554 bool IsFiredSuppression(Context *ctx,
    555                         const ScopedReport &srep,
    556                         const StackTrace &trace);
    557 bool IsExpectedReport(uptr addr, uptr size);
    558 void PrintMatchedBenignRaces();
    559 bool FrameIsInternal(const ReportStack *frame);
    560 ReportStack *SkipTsanInternalFrames(ReportStack *ent);
    561 
    562 #if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 1
    563 # define DPrintf Printf
    564 #else
    565 # define DPrintf(...)
    566 #endif
    567 
    568 #if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 2
    569 # define DPrintf2 Printf
    570 #else
    571 # define DPrintf2(...)
    572 #endif
    573 
    574 u32 CurrentStackId(ThreadState *thr, uptr pc);
    575 ReportStack *SymbolizeStackId(u32 stack_id);
    576 void PrintCurrentStack(ThreadState *thr, uptr pc);
    577 void PrintCurrentStackSlow();  // uses libunwind
    578 
    579 void Initialize(ThreadState *thr);
    580 int Finalize(ThreadState *thr);
    581 
    582 void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write);
    583 void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write);
    584 
    585 void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
    586     int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic);
    587 void MemoryAccessImpl(ThreadState *thr, uptr addr,
    588     int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
    589     u64 *shadow_mem, Shadow cur);
    590 void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
    591     uptr size, bool is_write);
    592 void MemoryAccessRangeStep(ThreadState *thr, uptr pc, uptr addr,
    593     uptr size, uptr step, bool is_write);
    594 void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr,
    595     int size, bool kAccessIsWrite, bool kIsAtomic);
    596 
    597 const int kSizeLog1 = 0;
    598 const int kSizeLog2 = 1;
    599 const int kSizeLog4 = 2;
    600 const int kSizeLog8 = 3;
    601 
    602 void ALWAYS_INLINE MemoryRead(ThreadState *thr, uptr pc,
    603                                      uptr addr, int kAccessSizeLog) {
    604   MemoryAccess(thr, pc, addr, kAccessSizeLog, false, false);
    605 }
    606 
    607 void ALWAYS_INLINE MemoryWrite(ThreadState *thr, uptr pc,
    608                                       uptr addr, int kAccessSizeLog) {
    609   MemoryAccess(thr, pc, addr, kAccessSizeLog, true, false);
    610 }
    611 
    612 void ALWAYS_INLINE MemoryReadAtomic(ThreadState *thr, uptr pc,
    613                                            uptr addr, int kAccessSizeLog) {
    614   MemoryAccess(thr, pc, addr, kAccessSizeLog, false, true);
    615 }
    616 
    617 void ALWAYS_INLINE MemoryWriteAtomic(ThreadState *thr, uptr pc,
    618                                             uptr addr, int kAccessSizeLog) {
    619   MemoryAccess(thr, pc, addr, kAccessSizeLog, true, true);
    620 }
    621 
    622 void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size);
    623 void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size);
    624 void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size);
    625 
    626 void ThreadIgnoreBegin(ThreadState *thr, uptr pc);
    627 void ThreadIgnoreEnd(ThreadState *thr, uptr pc);
    628 void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc);
    629 void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc);
    630 
    631 void FuncEntry(ThreadState *thr, uptr pc);
    632 void FuncExit(ThreadState *thr);
    633 
    634 int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached);
    635 void ThreadStart(ThreadState *thr, int tid, uptr os_id);
    636 void ThreadFinish(ThreadState *thr);
    637 int ThreadTid(ThreadState *thr, uptr pc, uptr uid);
    638 void ThreadJoin(ThreadState *thr, uptr pc, int tid);
    639 void ThreadDetach(ThreadState *thr, uptr pc, int tid);
    640 void ThreadFinalize(ThreadState *thr);
    641 void ThreadSetName(ThreadState *thr, const char *name);
    642 int ThreadCount(ThreadState *thr);
    643 void ProcessPendingSignals(ThreadState *thr);
    644 
    645 void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
    646                  bool rw, bool recursive, bool linker_init);
    647 void MutexDestroy(ThreadState *thr, uptr pc, uptr addr);
    648 void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec = 1,
    649                bool try_lock = false);
    650 int  MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all = false);
    651 void MutexReadLock(ThreadState *thr, uptr pc, uptr addr, bool try_lock = false);
    652 void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr);
    653 void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr);
    654 void MutexRepair(ThreadState *thr, uptr pc, uptr addr);  // call on EOWNERDEAD
    655 
    656 void Acquire(ThreadState *thr, uptr pc, uptr addr);
    657 void AcquireGlobal(ThreadState *thr, uptr pc);
    658 void Release(ThreadState *thr, uptr pc, uptr addr);
    659 void ReleaseStore(ThreadState *thr, uptr pc, uptr addr);
    660 void AfterSleep(ThreadState *thr, uptr pc);
    661 void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c);
    662 void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c);
    663 void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c);
    664 void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c);
    665 
    666 // The hacky call uses custom calling convention and an assembly thunk.
    667 // It is considerably faster that a normal call for the caller
    668 // if it is not executed (it is intended for slow paths from hot functions).
    669 // The trick is that the call preserves all registers and the compiler
    670 // does not treat it as a call.
    671 // If it does not work for you, use normal call.
    672 #if TSAN_DEBUG == 0
    673 // The caller may not create the stack frame for itself at all,
    674 // so we create a reserve stack frame for it (1024b must be enough).
    675 #define HACKY_CALL(f) \
    676   __asm__ __volatile__("sub $1024, %%rsp;" \
    677                        CFI_INL_ADJUST_CFA_OFFSET(1024) \
    678                        ".hidden " #f "_thunk;" \
    679                        "call " #f "_thunk;" \
    680                        "add $1024, %%rsp;" \
    681                        CFI_INL_ADJUST_CFA_OFFSET(-1024) \
    682                        ::: "memory", "cc");
    683 #else
    684 #define HACKY_CALL(f) f()
    685 #endif
    686 
    687 void TraceSwitch(ThreadState *thr);
    688 uptr TraceTopPC(ThreadState *thr);
    689 uptr TraceSize();
    690 uptr TraceParts();
    691 Trace *ThreadTrace(int tid);
    692 
    693 extern "C" void __tsan_trace_switch();
    694 void ALWAYS_INLINE TraceAddEvent(ThreadState *thr, FastState fs,
    695                                         EventType typ, u64 addr) {
    696   if (!kCollectHistory)
    697     return;
    698   DCHECK_GE((int)typ, 0);
    699   DCHECK_LE((int)typ, 7);
    700   DCHECK_EQ(GetLsb(addr, 61), addr);
    701   StatInc(thr, StatEvents);
    702   u64 pos = fs.GetTracePos();
    703   if (UNLIKELY((pos % kTracePartSize) == 0)) {
    704 #ifndef TSAN_GO
    705     HACKY_CALL(__tsan_trace_switch);
    706 #else
    707     TraceSwitch(thr);
    708 #endif
    709   }
    710   Event *trace = (Event*)GetThreadTrace(fs.tid());
    711   Event *evp = &trace[pos];
    712   Event ev = (u64)addr | ((u64)typ << 61);
    713   *evp = ev;
    714 }
    715 
    716 }  // namespace __tsan
    717 
    718 #endif  // TSAN_RTL_H
    719