Home | History | Annotate | Download | only in rtl
      1 //===-- tsan_rtl.h ----------------------------------------------*- C++ -*-===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file is a part of ThreadSanitizer (TSan), a race detector.
     11 //
     12 // Main internal TSan header file.
     13 //
     14 // Ground rules:
     15 //   - C++ run-time should not be used (static CTORs, RTTI, exceptions, static
     16 //     function-scope locals)
     17 //   - All functions/classes/etc reside in namespace __tsan, except for those
     18 //     declared in tsan_interface.h.
     19 //   - Platform-specific files should be used instead of ifdefs (*).
     20 //   - No system headers included in header files (*).
     21 //   - Platform specific headres included only into platform-specific files (*).
     22 //
     23 //  (*) Except when inlining is critical for performance.
     24 //===----------------------------------------------------------------------===//
     25 
     26 #ifndef TSAN_RTL_H
     27 #define TSAN_RTL_H
     28 
     29 #include "sanitizer_common/sanitizer_allocator.h"
     30 #include "sanitizer_common/sanitizer_allocator_internal.h"
     31 #include "sanitizer_common/sanitizer_asm.h"
     32 #include "sanitizer_common/sanitizer_common.h"
     33 #include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
     34 #include "sanitizer_common/sanitizer_libignore.h"
     35 #include "sanitizer_common/sanitizer_suppressions.h"
     36 #include "sanitizer_common/sanitizer_thread_registry.h"
     37 #include "tsan_clock.h"
     38 #include "tsan_defs.h"
     39 #include "tsan_flags.h"
     40 #include "tsan_sync.h"
     41 #include "tsan_trace.h"
     42 #include "tsan_vector.h"
     43 #include "tsan_report.h"
     44 #include "tsan_platform.h"
     45 #include "tsan_mutexset.h"
     46 #include "tsan_ignoreset.h"
     47 #include "tsan_stack_trace.h"
     48 
     49 #if SANITIZER_WORDSIZE != 64
     50 # error "ThreadSanitizer is supported only on 64-bit platforms"
     51 #endif
     52 
     53 namespace __tsan {
     54 
     55 #ifndef SANITIZER_GO
     56 struct MapUnmapCallback;
     57 #if defined(__mips64) || defined(__aarch64__) || defined(__powerpc__)
     58 static const uptr kAllocatorSpace = 0;
     59 static const uptr kAllocatorSize = SANITIZER_MMAP_RANGE_SIZE;
     60 static const uptr kAllocatorRegionSizeLog = 20;
     61 static const uptr kAllocatorNumRegions =
     62     kAllocatorSize >> kAllocatorRegionSizeLog;
     63 typedef TwoLevelByteMap<(kAllocatorNumRegions >> 12), 1 << 12,
     64     MapUnmapCallback> ByteMap;
     65 typedef SizeClassAllocator32<kAllocatorSpace, kAllocatorSize, 0,
     66     CompactSizeClassMap, kAllocatorRegionSizeLog, ByteMap,
     67     MapUnmapCallback> PrimaryAllocator;
     68 #else
     69 typedef SizeClassAllocator64<Mapping::kHeapMemBeg,
     70     Mapping::kHeapMemEnd - Mapping::kHeapMemBeg, 0,
     71     DefaultSizeClassMap, MapUnmapCallback> PrimaryAllocator;
     72 #endif
     73 typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
     74 typedef LargeMmapAllocator<MapUnmapCallback> SecondaryAllocator;
     75 typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
     76     SecondaryAllocator> Allocator;
     77 Allocator *allocator();
     78 #endif
     79 
     80 void TsanCheckFailed(const char *file, int line, const char *cond,
     81                      u64 v1, u64 v2);
     82 
     83 const u64 kShadowRodata = (u64)-1;  // .rodata shadow marker
     84 
     85 // FastState (from most significant bit):
     86 //   ignore          : 1
     87 //   tid             : kTidBits
     88 //   unused          : -
     89 //   history_size    : 3
     90 //   epoch           : kClkBits
     91 class FastState {
     92  public:
     93   FastState(u64 tid, u64 epoch) {
     94     x_ = tid << kTidShift;
     95     x_ |= epoch;
     96     DCHECK_EQ(tid, this->tid());
     97     DCHECK_EQ(epoch, this->epoch());
     98     DCHECK_EQ(GetIgnoreBit(), false);
     99   }
    100 
    101   explicit FastState(u64 x)
    102       : x_(x) {
    103   }
    104 
    105   u64 raw() const {
    106     return x_;
    107   }
    108 
    109   u64 tid() const {
    110     u64 res = (x_ & ~kIgnoreBit) >> kTidShift;
    111     return res;
    112   }
    113 
    114   u64 TidWithIgnore() const {
    115     u64 res = x_ >> kTidShift;
    116     return res;
    117   }
    118 
    119   u64 epoch() const {
    120     u64 res = x_ & ((1ull << kClkBits) - 1);
    121     return res;
    122   }
    123 
    124   void IncrementEpoch() {
    125     u64 old_epoch = epoch();
    126     x_ += 1;
    127     DCHECK_EQ(old_epoch + 1, epoch());
    128     (void)old_epoch;
    129   }
    130 
    131   void SetIgnoreBit() { x_ |= kIgnoreBit; }
    132   void ClearIgnoreBit() { x_ &= ~kIgnoreBit; }
    133   bool GetIgnoreBit() const { return (s64)x_ < 0; }
    134 
    135   void SetHistorySize(int hs) {
    136     CHECK_GE(hs, 0);
    137     CHECK_LE(hs, 7);
    138     x_ = (x_ & ~(kHistoryMask << kHistoryShift)) | (u64(hs) << kHistoryShift);
    139   }
    140 
    141   ALWAYS_INLINE
    142   int GetHistorySize() const {
    143     return (int)((x_ >> kHistoryShift) & kHistoryMask);
    144   }
    145 
    146   void ClearHistorySize() {
    147     SetHistorySize(0);
    148   }
    149 
    150   ALWAYS_INLINE
    151   u64 GetTracePos() const {
    152     const int hs = GetHistorySize();
    153     // When hs == 0, the trace consists of 2 parts.
    154     const u64 mask = (1ull << (kTracePartSizeBits + hs + 1)) - 1;
    155     return epoch() & mask;
    156   }
    157 
    158  private:
    159   friend class Shadow;
    160   static const int kTidShift = 64 - kTidBits - 1;
    161   static const u64 kIgnoreBit = 1ull << 63;
    162   static const u64 kFreedBit = 1ull << 63;
    163   static const u64 kHistoryShift = kClkBits;
    164   static const u64 kHistoryMask = 7;
    165   u64 x_;
    166 };
    167 
    168 // Shadow (from most significant bit):
    169 //   freed           : 1
    170 //   tid             : kTidBits
    171 //   is_atomic       : 1
    172 //   is_read         : 1
    173 //   size_log        : 2
    174 //   addr0           : 3
    175 //   epoch           : kClkBits
    176 class Shadow : public FastState {
    177  public:
    178   explicit Shadow(u64 x)
    179       : FastState(x) {
    180   }
    181 
    182   explicit Shadow(const FastState &s)
    183       : FastState(s.x_) {
    184     ClearHistorySize();
    185   }
    186 
    187   void SetAddr0AndSizeLog(u64 addr0, unsigned kAccessSizeLog) {
    188     DCHECK_EQ((x_ >> kClkBits) & 31, 0);
    189     DCHECK_LE(addr0, 7);
    190     DCHECK_LE(kAccessSizeLog, 3);
    191     x_ |= ((kAccessSizeLog << 3) | addr0) << kClkBits;
    192     DCHECK_EQ(kAccessSizeLog, size_log());
    193     DCHECK_EQ(addr0, this->addr0());
    194   }
    195 
    196   void SetWrite(unsigned kAccessIsWrite) {
    197     DCHECK_EQ(x_ & kReadBit, 0);
    198     if (!kAccessIsWrite)
    199       x_ |= kReadBit;
    200     DCHECK_EQ(kAccessIsWrite, IsWrite());
    201   }
    202 
    203   void SetAtomic(bool kIsAtomic) {
    204     DCHECK(!IsAtomic());
    205     if (kIsAtomic)
    206       x_ |= kAtomicBit;
    207     DCHECK_EQ(IsAtomic(), kIsAtomic);
    208   }
    209 
    210   bool IsAtomic() const {
    211     return x_ & kAtomicBit;
    212   }
    213 
    214   bool IsZero() const {
    215     return x_ == 0;
    216   }
    217 
    218   static inline bool TidsAreEqual(const Shadow s1, const Shadow s2) {
    219     u64 shifted_xor = (s1.x_ ^ s2.x_) >> kTidShift;
    220     DCHECK_EQ(shifted_xor == 0, s1.TidWithIgnore() == s2.TidWithIgnore());
    221     return shifted_xor == 0;
    222   }
    223 
    224   static ALWAYS_INLINE
    225   bool Addr0AndSizeAreEqual(const Shadow s1, const Shadow s2) {
    226     u64 masked_xor = ((s1.x_ ^ s2.x_) >> kClkBits) & 31;
    227     return masked_xor == 0;
    228   }
    229 
    230   static ALWAYS_INLINE bool TwoRangesIntersect(Shadow s1, Shadow s2,
    231       unsigned kS2AccessSize) {
    232     bool res = false;
    233     u64 diff = s1.addr0() - s2.addr0();
    234     if ((s64)diff < 0) {  // s1.addr0 < s2.addr0  // NOLINT
    235       // if (s1.addr0() + size1) > s2.addr0()) return true;
    236       if (s1.size() > -diff)
    237         res = true;
    238     } else {
    239       // if (s2.addr0() + kS2AccessSize > s1.addr0()) return true;
    240       if (kS2AccessSize > diff)
    241         res = true;
    242     }
    243     DCHECK_EQ(res, TwoRangesIntersectSlow(s1, s2));
    244     DCHECK_EQ(res, TwoRangesIntersectSlow(s2, s1));
    245     return res;
    246   }
    247 
    248   u64 ALWAYS_INLINE addr0() const { return (x_ >> kClkBits) & 7; }
    249   u64 ALWAYS_INLINE size() const { return 1ull << size_log(); }
    250   bool ALWAYS_INLINE IsWrite() const { return !IsRead(); }
    251   bool ALWAYS_INLINE IsRead() const { return x_ & kReadBit; }
    252 
    253   // The idea behind the freed bit is as follows.
    254   // When the memory is freed (or otherwise unaccessible) we write to the shadow
    255   // values with tid/epoch related to the free and the freed bit set.
    256   // During memory accesses processing the freed bit is considered
    257   // as msb of tid. So any access races with shadow with freed bit set
    258   // (it is as if write from a thread with which we never synchronized before).
    259   // This allows us to detect accesses to freed memory w/o additional
    260   // overheads in memory access processing and at the same time restore
    261   // tid/epoch of free.
    262   void MarkAsFreed() {
    263      x_ |= kFreedBit;
    264   }
    265 
    266   bool IsFreed() const {
    267     return x_ & kFreedBit;
    268   }
    269 
    270   bool GetFreedAndReset() {
    271     bool res = x_ & kFreedBit;
    272     x_ &= ~kFreedBit;
    273     return res;
    274   }
    275 
    276   bool ALWAYS_INLINE IsBothReadsOrAtomic(bool kIsWrite, bool kIsAtomic) const {
    277     bool v = x_ & ((u64(kIsWrite ^ 1) << kReadShift)
    278         | (u64(kIsAtomic) << kAtomicShift));
    279     DCHECK_EQ(v, (!IsWrite() && !kIsWrite) || (IsAtomic() && kIsAtomic));
    280     return v;
    281   }
    282 
    283   bool ALWAYS_INLINE IsRWNotWeaker(bool kIsWrite, bool kIsAtomic) const {
    284     bool v = ((x_ >> kReadShift) & 3)
    285         <= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
    286     DCHECK_EQ(v, (IsAtomic() < kIsAtomic) ||
    287         (IsAtomic() == kIsAtomic && !IsWrite() <= !kIsWrite));
    288     return v;
    289   }
    290 
    291   bool ALWAYS_INLINE IsRWWeakerOrEqual(bool kIsWrite, bool kIsAtomic) const {
    292     bool v = ((x_ >> kReadShift) & 3)
    293         >= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
    294     DCHECK_EQ(v, (IsAtomic() > kIsAtomic) ||
    295         (IsAtomic() == kIsAtomic && !IsWrite() >= !kIsWrite));
    296     return v;
    297   }
    298 
    299  private:
    300   static const u64 kReadShift   = 5 + kClkBits;
    301   static const u64 kReadBit     = 1ull << kReadShift;
    302   static const u64 kAtomicShift = 6 + kClkBits;
    303   static const u64 kAtomicBit   = 1ull << kAtomicShift;
    304 
    305   u64 size_log() const { return (x_ >> (3 + kClkBits)) & 3; }
    306 
    307   static bool TwoRangesIntersectSlow(const Shadow s1, const Shadow s2) {
    308     if (s1.addr0() == s2.addr0()) return true;
    309     if (s1.addr0() < s2.addr0() && s1.addr0() + s1.size() > s2.addr0())
    310       return true;
    311     if (s2.addr0() < s1.addr0() && s2.addr0() + s2.size() > s1.addr0())
    312       return true;
    313     return false;
    314   }
    315 };
    316 
    317 struct ThreadSignalContext;
    318 
    319 struct JmpBuf {
    320   uptr sp;
    321   uptr mangled_sp;
    322   int int_signal_send;
    323   bool in_blocking_func;
    324   uptr in_signal_handler;
    325   uptr *shadow_stack_pos;
    326 };
    327 
    328 // A Processor represents a physical thread, or a P for Go.
    329 // It is used to store internal resources like allocate cache, and does not
    330 // participate in race-detection logic (invisible to end user).
    331 // In C++ it is tied to an OS thread just like ThreadState, however ideally
    332 // it should be tied to a CPU (this way we will have fewer allocator caches).
    333 // In Go it is tied to a P, so there are significantly fewer Processor's than
    334 // ThreadState's (which are tied to Gs).
    335 // A ThreadState must be wired with a Processor to handle events.
    336 struct Processor {
    337   ThreadState *thr; // currently wired thread, or nullptr
    338 #ifndef SANITIZER_GO
    339   AllocatorCache alloc_cache;
    340   InternalAllocatorCache internal_alloc_cache;
    341 #endif
    342   DenseSlabAllocCache block_cache;
    343   DenseSlabAllocCache sync_cache;
    344   DenseSlabAllocCache clock_cache;
    345   DDPhysicalThread *dd_pt;
    346 };
    347 
    348 #ifndef SANITIZER_GO
    349 // ScopedGlobalProcessor temporary setups a global processor for the current
    350 // thread, if it does not have one. Intended for interceptors that can run
    351 // at the very thread end, when we already destroyed the thread processor.
    352 struct ScopedGlobalProcessor {
    353   ScopedGlobalProcessor();
    354   ~ScopedGlobalProcessor();
    355 };
    356 #endif
    357 
    358 // This struct is stored in TLS.
    359 struct ThreadState {
    360   FastState fast_state;
    361   // Synch epoch represents the threads's epoch before the last synchronization
    362   // action. It allows to reduce number of shadow state updates.
    363   // For example, fast_synch_epoch=100, last write to addr X was at epoch=150,
    364   // if we are processing write to X from the same thread at epoch=200,
    365   // we do nothing, because both writes happen in the same 'synch epoch'.
    366   // That is, if another memory access does not race with the former write,
    367   // it does not race with the latter as well.
    368   // QUESTION: can we can squeeze this into ThreadState::Fast?
    369   // E.g. ThreadState::Fast is a 44-bit, 32 are taken by synch_epoch and 12 are
    370   // taken by epoch between synchs.
    371   // This way we can save one load from tls.
    372   u64 fast_synch_epoch;
    373   // This is a slow path flag. On fast path, fast_state.GetIgnoreBit() is read.
    374   // We do not distinguish beteween ignoring reads and writes
    375   // for better performance.
    376   int ignore_reads_and_writes;
    377   int ignore_sync;
    378   // Go does not support ignores.
    379 #ifndef SANITIZER_GO
    380   IgnoreSet mop_ignore_set;
    381   IgnoreSet sync_ignore_set;
    382 #endif
    383   // C/C++ uses fixed size shadow stack embed into Trace.
    384   // Go uses malloc-allocated shadow stack with dynamic size.
    385   uptr *shadow_stack;
    386   uptr *shadow_stack_end;
    387   uptr *shadow_stack_pos;
    388   u64 *racy_shadow_addr;
    389   u64 racy_state[2];
    390   MutexSet mset;
    391   ThreadClock clock;
    392 #ifndef SANITIZER_GO
    393   Vector<JmpBuf> jmp_bufs;
    394   int ignore_interceptors;
    395 #endif
    396 #if TSAN_COLLECT_STATS
    397   u64 stat[StatCnt];
    398 #endif
    399   const int tid;
    400   const int unique_id;
    401   bool in_symbolizer;
    402   bool in_ignored_lib;
    403   bool is_inited;
    404   bool is_dead;
    405   bool is_freeing;
    406   bool is_vptr_access;
    407   const uptr stk_addr;
    408   const uptr stk_size;
    409   const uptr tls_addr;
    410   const uptr tls_size;
    411   ThreadContext *tctx;
    412 
    413 #if SANITIZER_DEBUG && !SANITIZER_GO
    414   InternalDeadlockDetector internal_deadlock_detector;
    415 #endif
    416   DDLogicalThread *dd_lt;
    417 
    418   // Current wired Processor, or nullptr. Required to handle any events.
    419   Processor *proc1;
    420 #ifndef SANITIZER_GO
    421   Processor *proc() { return proc1; }
    422 #else
    423   Processor *proc();
    424 #endif
    425 
    426   atomic_uintptr_t in_signal_handler;
    427   ThreadSignalContext *signal_ctx;
    428 
    429 #ifndef SANITIZER_GO
    430   u32 last_sleep_stack_id;
    431   ThreadClock last_sleep_clock;
    432 #endif
    433 
    434   // Set in regions of runtime that must be signal-safe and fork-safe.
    435   // If set, malloc must not be called.
    436   int nomalloc;
    437 
    438   const ReportDesc *current_report;
    439 
    440   explicit ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
    441                        unsigned reuse_count,
    442                        uptr stk_addr, uptr stk_size,
    443                        uptr tls_addr, uptr tls_size);
    444 };
    445 
    446 #ifndef SANITIZER_GO
    447 #if SANITIZER_MAC || SANITIZER_ANDROID
    448 ThreadState *cur_thread();
    449 void cur_thread_finalize();
    450 #else
    451 __attribute__((tls_model("initial-exec")))
    452 extern THREADLOCAL char cur_thread_placeholder[];
    453 INLINE ThreadState *cur_thread() {
    454   return reinterpret_cast<ThreadState *>(&cur_thread_placeholder);
    455 }
    456 INLINE void cur_thread_finalize() { }
    457 #endif  // SANITIZER_MAC || SANITIZER_ANDROID
    458 #endif  // SANITIZER_GO
    459 
    460 class ThreadContext : public ThreadContextBase {
    461  public:
    462   explicit ThreadContext(int tid);
    463   ~ThreadContext();
    464   ThreadState *thr;
    465   u32 creation_stack_id;
    466   SyncClock sync;
    467   // Epoch at which the thread had started.
    468   // If we see an event from the thread stamped by an older epoch,
    469   // the event is from a dead thread that shared tid with this thread.
    470   u64 epoch0;
    471   u64 epoch1;
    472 
    473   // Override superclass callbacks.
    474   void OnDead() override;
    475   void OnJoined(void *arg) override;
    476   void OnFinished() override;
    477   void OnStarted(void *arg) override;
    478   void OnCreated(void *arg) override;
    479   void OnReset() override;
    480   void OnDetached(void *arg) override;
    481 };
    482 
    483 struct RacyStacks {
    484   MD5Hash hash[2];
    485   bool operator==(const RacyStacks &other) const {
    486     if (hash[0] == other.hash[0] && hash[1] == other.hash[1])
    487       return true;
    488     if (hash[0] == other.hash[1] && hash[1] == other.hash[0])
    489       return true;
    490     return false;
    491   }
    492 };
    493 
    494 struct RacyAddress {
    495   uptr addr_min;
    496   uptr addr_max;
    497 };
    498 
    499 struct FiredSuppression {
    500   ReportType type;
    501   uptr pc_or_addr;
    502   Suppression *supp;
    503 };
    504 
    505 struct Context {
    506   Context();
    507 
    508   bool initialized;
    509   bool after_multithreaded_fork;
    510 
    511   MetaMap metamap;
    512 
    513   Mutex report_mtx;
    514   int nreported;
    515   int nmissed_expected;
    516   atomic_uint64_t last_symbolize_time_ns;
    517 
    518   void *background_thread;
    519   atomic_uint32_t stop_background_thread;
    520 
    521   ThreadRegistry *thread_registry;
    522 
    523   Mutex racy_mtx;
    524   Vector<RacyStacks> racy_stacks;
    525   Vector<RacyAddress> racy_addresses;
    526   // Number of fired suppressions may be large enough.
    527   Mutex fired_suppressions_mtx;
    528   InternalMmapVector<FiredSuppression> fired_suppressions;
    529   DDetector *dd;
    530 
    531   ClockAlloc clock_alloc;
    532 
    533   Flags flags;
    534 
    535   u64 stat[StatCnt];
    536   u64 int_alloc_cnt[MBlockTypeCount];
    537   u64 int_alloc_siz[MBlockTypeCount];
    538 };
    539 
    540 extern Context *ctx;  // The one and the only global runtime context.
    541 
    542 struct ScopedIgnoreInterceptors {
    543   ScopedIgnoreInterceptors() {
    544 #ifndef SANITIZER_GO
    545     cur_thread()->ignore_interceptors++;
    546 #endif
    547   }
    548 
    549   ~ScopedIgnoreInterceptors() {
    550 #ifndef SANITIZER_GO
    551     cur_thread()->ignore_interceptors--;
    552 #endif
    553   }
    554 };
    555 
    556 class ScopedReport {
    557  public:
    558   explicit ScopedReport(ReportType typ);
    559   ~ScopedReport();
    560 
    561   void AddMemoryAccess(uptr addr, Shadow s, StackTrace stack,
    562                        const MutexSet *mset);
    563   void AddStack(StackTrace stack, bool suppressable = false);
    564   void AddThread(const ThreadContext *tctx, bool suppressable = false);
    565   void AddThread(int unique_tid, bool suppressable = false);
    566   void AddUniqueTid(int unique_tid);
    567   void AddMutex(const SyncVar *s);
    568   u64 AddMutex(u64 id);
    569   void AddLocation(uptr addr, uptr size);
    570   void AddSleep(u32 stack_id);
    571   void SetCount(int count);
    572 
    573   const ReportDesc *GetReport() const;
    574 
    575  private:
    576   ReportDesc *rep_;
    577   // Symbolizer makes lots of intercepted calls. If we try to process them,
    578   // at best it will cause deadlocks on internal mutexes.
    579   ScopedIgnoreInterceptors ignore_interceptors_;
    580 
    581   void AddDeadMutex(u64 id);
    582 
    583   ScopedReport(const ScopedReport&);
    584   void operator = (const ScopedReport&);
    585 };
    586 
    587 void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk,
    588                   MutexSet *mset);
    589 
    590 template<typename StackTraceTy>
    591 void ObtainCurrentStack(ThreadState *thr, uptr toppc, StackTraceTy *stack) {
    592   uptr size = thr->shadow_stack_pos - thr->shadow_stack;
    593   uptr start = 0;
    594   if (size + !!toppc > kStackTraceMax) {
    595     start = size + !!toppc - kStackTraceMax;
    596     size = kStackTraceMax - !!toppc;
    597   }
    598   stack->Init(&thr->shadow_stack[start], size, toppc);
    599 }
    600 
    601 
    602 #if TSAN_COLLECT_STATS
    603 void StatAggregate(u64 *dst, u64 *src);
    604 void StatOutput(u64 *stat);
    605 #endif
    606 
    607 void ALWAYS_INLINE StatInc(ThreadState *thr, StatType typ, u64 n = 1) {
    608 #if TSAN_COLLECT_STATS
    609   thr->stat[typ] += n;
    610 #endif
    611 }
    612 void ALWAYS_INLINE StatSet(ThreadState *thr, StatType typ, u64 n) {
    613 #if TSAN_COLLECT_STATS
    614   thr->stat[typ] = n;
    615 #endif
    616 }
    617 
    618 void MapShadow(uptr addr, uptr size);
    619 void MapThreadTrace(uptr addr, uptr size, const char *name);
    620 void DontNeedShadowFor(uptr addr, uptr size);
    621 void InitializeShadowMemory();
    622 void InitializeInterceptors();
    623 void InitializeLibIgnore();
    624 void InitializeDynamicAnnotations();
    625 
    626 void ForkBefore(ThreadState *thr, uptr pc);
    627 void ForkParentAfter(ThreadState *thr, uptr pc);
    628 void ForkChildAfter(ThreadState *thr, uptr pc);
    629 
    630 void ReportRace(ThreadState *thr);
    631 bool OutputReport(ThreadState *thr, const ScopedReport &srep);
    632 bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace);
    633 bool IsExpectedReport(uptr addr, uptr size);
    634 void PrintMatchedBenignRaces();
    635 
    636 #if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 1
    637 # define DPrintf Printf
    638 #else
    639 # define DPrintf(...)
    640 #endif
    641 
    642 #if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 2
    643 # define DPrintf2 Printf
    644 #else
    645 # define DPrintf2(...)
    646 #endif
    647 
    648 u32 CurrentStackId(ThreadState *thr, uptr pc);
    649 ReportStack *SymbolizeStackId(u32 stack_id);
    650 void PrintCurrentStack(ThreadState *thr, uptr pc);
    651 void PrintCurrentStackSlow(uptr pc);  // uses libunwind
    652 
    653 void Initialize(ThreadState *thr);
    654 int Finalize(ThreadState *thr);
    655 
    656 void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write);
    657 void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write);
    658 
    659 void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
    660     int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic);
    661 void MemoryAccessImpl(ThreadState *thr, uptr addr,
    662     int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
    663     u64 *shadow_mem, Shadow cur);
    664 void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
    665     uptr size, bool is_write);
    666 void MemoryAccessRangeStep(ThreadState *thr, uptr pc, uptr addr,
    667     uptr size, uptr step, bool is_write);
    668 void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr,
    669     int size, bool kAccessIsWrite, bool kIsAtomic);
    670 
    671 const int kSizeLog1 = 0;
    672 const int kSizeLog2 = 1;
    673 const int kSizeLog4 = 2;
    674 const int kSizeLog8 = 3;
    675 
    676 void ALWAYS_INLINE MemoryRead(ThreadState *thr, uptr pc,
    677                                      uptr addr, int kAccessSizeLog) {
    678   MemoryAccess(thr, pc, addr, kAccessSizeLog, false, false);
    679 }
    680 
    681 void ALWAYS_INLINE MemoryWrite(ThreadState *thr, uptr pc,
    682                                       uptr addr, int kAccessSizeLog) {
    683   MemoryAccess(thr, pc, addr, kAccessSizeLog, true, false);
    684 }
    685 
    686 void ALWAYS_INLINE MemoryReadAtomic(ThreadState *thr, uptr pc,
    687                                            uptr addr, int kAccessSizeLog) {
    688   MemoryAccess(thr, pc, addr, kAccessSizeLog, false, true);
    689 }
    690 
    691 void ALWAYS_INLINE MemoryWriteAtomic(ThreadState *thr, uptr pc,
    692                                             uptr addr, int kAccessSizeLog) {
    693   MemoryAccess(thr, pc, addr, kAccessSizeLog, true, true);
    694 }
    695 
    696 void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size);
    697 void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size);
    698 void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size);
    699 
    700 void ThreadIgnoreBegin(ThreadState *thr, uptr pc);
    701 void ThreadIgnoreEnd(ThreadState *thr, uptr pc);
    702 void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc);
    703 void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc);
    704 
    705 void FuncEntry(ThreadState *thr, uptr pc);
    706 void FuncExit(ThreadState *thr);
    707 
    708 int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached);
    709 void ThreadStart(ThreadState *thr, int tid, uptr os_id);
    710 void ThreadFinish(ThreadState *thr);
    711 int ThreadTid(ThreadState *thr, uptr pc, uptr uid);
    712 void ThreadJoin(ThreadState *thr, uptr pc, int tid);
    713 void ThreadDetach(ThreadState *thr, uptr pc, int tid);
    714 void ThreadFinalize(ThreadState *thr);
    715 void ThreadSetName(ThreadState *thr, const char *name);
    716 int ThreadCount(ThreadState *thr);
    717 void ProcessPendingSignals(ThreadState *thr);
    718 
    719 Processor *ProcCreate();
    720 void ProcDestroy(Processor *proc);
    721 void ProcWire(Processor *proc, ThreadState *thr);
    722 void ProcUnwire(Processor *proc, ThreadState *thr);
    723 
    724 void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
    725                  bool rw, bool recursive, bool linker_init);
    726 void MutexDestroy(ThreadState *thr, uptr pc, uptr addr);
    727 void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec = 1,
    728                bool try_lock = false);
    729 int  MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all = false);
    730 void MutexReadLock(ThreadState *thr, uptr pc, uptr addr, bool try_lock = false);
    731 void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr);
    732 void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr);
    733 void MutexRepair(ThreadState *thr, uptr pc, uptr addr);  // call on EOWNERDEAD
    734 void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr);
    735 
    736 void Acquire(ThreadState *thr, uptr pc, uptr addr);
    737 // AcquireGlobal synchronizes the current thread with all other threads.
    738 // In terms of happens-before relation, it draws a HB edge from all threads
    739 // (where they happen to execute right now) to the current thread. We use it to
    740 // handle Go finalizers. Namely, finalizer goroutine executes AcquireGlobal
    741 // right before executing finalizers. This provides a coarse, but simple
    742 // approximation of the actual required synchronization.
    743 void AcquireGlobal(ThreadState *thr, uptr pc);
    744 void Release(ThreadState *thr, uptr pc, uptr addr);
    745 void ReleaseStore(ThreadState *thr, uptr pc, uptr addr);
    746 void AfterSleep(ThreadState *thr, uptr pc);
    747 void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c);
    748 void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c);
    749 void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c);
    750 void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c);
    751 
    752 // The hacky call uses custom calling convention and an assembly thunk.
    753 // It is considerably faster that a normal call for the caller
    754 // if it is not executed (it is intended for slow paths from hot functions).
    755 // The trick is that the call preserves all registers and the compiler
    756 // does not treat it as a call.
    757 // If it does not work for you, use normal call.
    758 #if !SANITIZER_DEBUG && defined(__x86_64__) && !SANITIZER_MAC
    759 // The caller may not create the stack frame for itself at all,
    760 // so we create a reserve stack frame for it (1024b must be enough).
    761 #define HACKY_CALL(f) \
    762   __asm__ __volatile__("sub $1024, %%rsp;" \
    763                        CFI_INL_ADJUST_CFA_OFFSET(1024) \
    764                        ".hidden " #f "_thunk;" \
    765                        "call " #f "_thunk;" \
    766                        "add $1024, %%rsp;" \
    767                        CFI_INL_ADJUST_CFA_OFFSET(-1024) \
    768                        ::: "memory", "cc");
    769 #else
    770 #define HACKY_CALL(f) f()
    771 #endif
    772 
    773 void TraceSwitch(ThreadState *thr);
    774 uptr TraceTopPC(ThreadState *thr);
    775 uptr TraceSize();
    776 uptr TraceParts();
    777 Trace *ThreadTrace(int tid);
    778 
    779 extern "C" void __tsan_trace_switch();
    780 void ALWAYS_INLINE TraceAddEvent(ThreadState *thr, FastState fs,
    781                                         EventType typ, u64 addr) {
    782   if (!kCollectHistory)
    783     return;
    784   DCHECK_GE((int)typ, 0);
    785   DCHECK_LE((int)typ, 7);
    786   DCHECK_EQ(GetLsb(addr, 61), addr);
    787   StatInc(thr, StatEvents);
    788   u64 pos = fs.GetTracePos();
    789   if (UNLIKELY((pos % kTracePartSize) == 0)) {
    790 #ifndef SANITIZER_GO
    791     HACKY_CALL(__tsan_trace_switch);
    792 #else
    793     TraceSwitch(thr);
    794 #endif
    795   }
    796   Event *trace = (Event*)GetThreadTrace(fs.tid());
    797   Event *evp = &trace[pos];
    798   Event ev = (u64)addr | ((u64)typ << 61);
    799   *evp = ev;
    800 }
    801 
    802 #ifndef SANITIZER_GO
    803 uptr ALWAYS_INLINE HeapEnd() {
    804   return HeapMemEnd() + PrimaryAllocator::AdditionalSize();
    805 }
    806 #endif
    807 
    808 }  // namespace __tsan
    809 
    810 #endif  // TSAN_RTL_H
    811