Home | History | Annotate | Download | only in rtl
      1 //===-- tsan_rtl.h ----------------------------------------------*- C++ -*-===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file is a part of ThreadSanitizer (TSan), a race detector.
     11 //
     12 // Main internal TSan header file.
     13 //
     14 // Ground rules:
     15 //   - C++ run-time should not be used (static CTORs, RTTI, exceptions, static
     16 //     function-scope locals)
     17 //   - All functions/classes/etc reside in namespace __tsan, except for those
     18 //     declared in tsan_interface.h.
     19 //   - Platform-specific files should be used instead of ifdefs (*).
     20 //   - No system headers included in header files (*).
     21 //   - Platform specific headres included only into platform-specific files (*).
     22 //
     23 //  (*) Except when inlining is critical for performance.
     24 //===----------------------------------------------------------------------===//
     25 
     26 #ifndef TSAN_RTL_H
     27 #define TSAN_RTL_H
     28 
     29 #include "sanitizer_common/sanitizer_allocator.h"
     30 #include "sanitizer_common/sanitizer_common.h"
     31 #include "sanitizer_common/sanitizer_thread_registry.h"
     32 #include "tsan_clock.h"
     33 #include "tsan_defs.h"
     34 #include "tsan_flags.h"
     35 #include "tsan_sync.h"
     36 #include "tsan_trace.h"
     37 #include "tsan_vector.h"
     38 #include "tsan_report.h"
     39 #include "tsan_platform.h"
     40 #include "tsan_mutexset.h"
     41 
     42 #if SANITIZER_WORDSIZE != 64
     43 # error "ThreadSanitizer is supported only on 64-bit platforms"
     44 #endif
     45 
     46 namespace __tsan {
     47 
     48 // Descriptor of user's memory block.
     49 struct MBlock {
     50   /*
     51   u64 mtx : 1;  // must be first
     52   u64 lst : 44;
     53   u64 stk : 31;  // on word boundary
     54   u64 tid : kTidBits;
     55   u64 siz : 128 - 1 - 31 - 44 - kTidBits;  // 39
     56   */
     57   u64 raw[2];
     58 
     59   void Init(uptr siz, u32 tid, u32 stk) {
     60     raw[0] = raw[1] = 0;
     61     raw[1] |= (u64)siz << ((1 + 44 + 31 + kTidBits) % 64);
     62     raw[1] |= (u64)tid << ((1 + 44 + 31) % 64);
     63     raw[0] |= (u64)stk << (1 + 44);
     64     raw[1] |= (u64)stk >> (64 - 44 - 1);
     65     DCHECK_EQ(Size(), siz);
     66     DCHECK_EQ(Tid(), tid);
     67     DCHECK_EQ(StackId(), stk);
     68   }
     69 
     70   u32 Tid() const {
     71     return GetLsb(raw[1] >> ((1 + 44 + 31) % 64), kTidBits);
     72   }
     73 
     74   uptr Size() const {
     75     return raw[1] >> ((1 + 31 + 44 + kTidBits) % 64);
     76   }
     77 
     78   u32 StackId() const {
     79     return (raw[0] >> (1 + 44)) | GetLsb(raw[1] << (64 - 44 - 1), 31);
     80   }
     81 
     82   SyncVar *ListHead() const {
     83     return (SyncVar*)(GetLsb(raw[0] >> 1, 44) << 3);
     84   }
     85 
     86   void ListPush(SyncVar *v) {
     87     SyncVar *lst = ListHead();
     88     v->next = lst;
     89     u64 x = (u64)v ^ (u64)lst;
     90     x = (x >> 3) << 1;
     91     raw[0] ^= x;
     92     DCHECK_EQ(ListHead(), v);
     93   }
     94 
     95   SyncVar *ListPop() {
     96     SyncVar *lst = ListHead();
     97     SyncVar *nxt = lst->next;
     98     lst->next = 0;
     99     u64 x = (u64)lst ^ (u64)nxt;
    100     x = (x >> 3) << 1;
    101     raw[0] ^= x;
    102     DCHECK_EQ(ListHead(), nxt);
    103     return lst;
    104   }
    105 
    106   void ListReset() {
    107     SyncVar *lst = ListHead();
    108     u64 x = (u64)lst;
    109     x = (x >> 3) << 1;
    110     raw[0] ^= x;
    111     DCHECK_EQ(ListHead(), 0);
    112   }
    113 
    114   void Lock();
    115   void Unlock();
    116   typedef GenericScopedLock<MBlock> ScopedLock;
    117 };
    118 
    119 #ifndef TSAN_GO
    120 #if defined(TSAN_COMPAT_SHADOW) && TSAN_COMPAT_SHADOW
    121 const uptr kAllocatorSpace = 0x7d0000000000ULL;
    122 #else
    123 const uptr kAllocatorSpace = 0x7d0000000000ULL;
    124 #endif
    125 const uptr kAllocatorSize  =  0x10000000000ULL;  // 1T.
    126 
    127 struct MapUnmapCallback;
    128 typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, sizeof(MBlock),
    129     DefaultSizeClassMap, MapUnmapCallback> PrimaryAllocator;
    130 typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
    131 typedef LargeMmapAllocator<MapUnmapCallback> SecondaryAllocator;
    132 typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
    133     SecondaryAllocator> Allocator;
    134 Allocator *allocator();
    135 #endif
    136 
    137 void TsanCheckFailed(const char *file, int line, const char *cond,
    138                      u64 v1, u64 v2);
    139 
    140 // FastState (from most significant bit):
    141 //   ignore          : 1
    142 //   tid             : kTidBits
    143 //   epoch           : kClkBits
    144 //   unused          : -
    145 //   history_size    : 3
    146 class FastState {
    147  public:
    148   FastState(u64 tid, u64 epoch) {
    149     x_ = tid << kTidShift;
    150     x_ |= epoch << kClkShift;
    151     DCHECK_EQ(tid, this->tid());
    152     DCHECK_EQ(epoch, this->epoch());
    153     DCHECK_EQ(GetIgnoreBit(), false);
    154   }
    155 
    156   explicit FastState(u64 x)
    157       : x_(x) {
    158   }
    159 
    160   u64 raw() const {
    161     return x_;
    162   }
    163 
    164   u64 tid() const {
    165     u64 res = (x_ & ~kIgnoreBit) >> kTidShift;
    166     return res;
    167   }
    168 
    169   u64 TidWithIgnore() const {
    170     u64 res = x_ >> kTidShift;
    171     return res;
    172   }
    173 
    174   u64 epoch() const {
    175     u64 res = (x_ << (kTidBits + 1)) >> (64 - kClkBits);
    176     return res;
    177   }
    178 
    179   void IncrementEpoch() {
    180     u64 old_epoch = epoch();
    181     x_ += 1 << kClkShift;
    182     DCHECK_EQ(old_epoch + 1, epoch());
    183     (void)old_epoch;
    184   }
    185 
    186   void SetIgnoreBit() { x_ |= kIgnoreBit; }
    187   void ClearIgnoreBit() { x_ &= ~kIgnoreBit; }
    188   bool GetIgnoreBit() const { return (s64)x_ < 0; }
    189 
    190   void SetHistorySize(int hs) {
    191     CHECK_GE(hs, 0);
    192     CHECK_LE(hs, 7);
    193     x_ = (x_ & ~7) | hs;
    194   }
    195 
    196   int GetHistorySize() const {
    197     return (int)(x_ & 7);
    198   }
    199 
    200   void ClearHistorySize() {
    201     x_ &= ~7;
    202   }
    203 
    204   u64 GetTracePos() const {
    205     const int hs = GetHistorySize();
    206     // When hs == 0, the trace consists of 2 parts.
    207     const u64 mask = (1ull << (kTracePartSizeBits + hs + 1)) - 1;
    208     return epoch() & mask;
    209   }
    210 
    211  private:
    212   friend class Shadow;
    213   static const int kTidShift = 64 - kTidBits - 1;
    214   static const int kClkShift = kTidShift - kClkBits;
    215   static const u64 kIgnoreBit = 1ull << 63;
    216   static const u64 kFreedBit = 1ull << 63;
    217   u64 x_;
    218 };
    219 
    220 // Shadow (from most significant bit):
    221 //   freed           : 1
    222 //   tid             : kTidBits
    223 //   epoch           : kClkBits
    224 //   is_atomic       : 1
    225 //   is_read         : 1
    226 //   size_log        : 2
    227 //   addr0           : 3
    228 class Shadow : public FastState {
    229  public:
    230   explicit Shadow(u64 x)
    231       : FastState(x) {
    232   }
    233 
    234   explicit Shadow(const FastState &s)
    235       : FastState(s.x_) {
    236     ClearHistorySize();
    237   }
    238 
    239   void SetAddr0AndSizeLog(u64 addr0, unsigned kAccessSizeLog) {
    240     DCHECK_EQ(x_ & 31, 0);
    241     DCHECK_LE(addr0, 7);
    242     DCHECK_LE(kAccessSizeLog, 3);
    243     x_ |= (kAccessSizeLog << 3) | addr0;
    244     DCHECK_EQ(kAccessSizeLog, size_log());
    245     DCHECK_EQ(addr0, this->addr0());
    246   }
    247 
    248   void SetWrite(unsigned kAccessIsWrite) {
    249     DCHECK_EQ(x_ & kReadBit, 0);
    250     if (!kAccessIsWrite)
    251       x_ |= kReadBit;
    252     DCHECK_EQ(kAccessIsWrite, IsWrite());
    253   }
    254 
    255   void SetAtomic(bool kIsAtomic) {
    256     DCHECK(!IsAtomic());
    257     if (kIsAtomic)
    258       x_ |= kAtomicBit;
    259     DCHECK_EQ(IsAtomic(), kIsAtomic);
    260   }
    261 
    262   bool IsAtomic() const {
    263     return x_ & kAtomicBit;
    264   }
    265 
    266   bool IsZero() const {
    267     return x_ == 0;
    268   }
    269 
    270   static inline bool TidsAreEqual(const Shadow s1, const Shadow s2) {
    271     u64 shifted_xor = (s1.x_ ^ s2.x_) >> kTidShift;
    272     DCHECK_EQ(shifted_xor == 0, s1.TidWithIgnore() == s2.TidWithIgnore());
    273     return shifted_xor == 0;
    274   }
    275 
    276   static inline bool Addr0AndSizeAreEqual(const Shadow s1, const Shadow s2) {
    277     u64 masked_xor = (s1.x_ ^ s2.x_) & 31;
    278     return masked_xor == 0;
    279   }
    280 
    281   static inline bool TwoRangesIntersect(Shadow s1, Shadow s2,
    282       unsigned kS2AccessSize) {
    283     bool res = false;
    284     u64 diff = s1.addr0() - s2.addr0();
    285     if ((s64)diff < 0) {  // s1.addr0 < s2.addr0  // NOLINT
    286       // if (s1.addr0() + size1) > s2.addr0()) return true;
    287       if (s1.size() > -diff)  res = true;
    288     } else {
    289       // if (s2.addr0() + kS2AccessSize > s1.addr0()) return true;
    290       if (kS2AccessSize > diff) res = true;
    291     }
    292     DCHECK_EQ(res, TwoRangesIntersectSLOW(s1, s2));
    293     DCHECK_EQ(res, TwoRangesIntersectSLOW(s2, s1));
    294     return res;
    295   }
    296 
    297   // The idea behind the offset is as follows.
    298   // Consider that we have 8 bool's contained within a single 8-byte block
    299   // (mapped to a single shadow "cell"). Now consider that we write to the bools
    300   // from a single thread (which we consider the common case).
    301   // W/o offsetting each access will have to scan 4 shadow values at average
    302   // to find the corresponding shadow value for the bool.
    303   // With offsetting we start scanning shadow with the offset so that
    304   // each access hits necessary shadow straight off (at least in an expected
    305   // optimistic case).
    306   // This logic works seamlessly for any layout of user data. For example,
    307   // if user data is {int, short, char, char}, then accesses to the int are
    308   // offsetted to 0, short - 4, 1st char - 6, 2nd char - 7. Hopefully, accesses
    309   // from a single thread won't need to scan all 8 shadow values.
    310   unsigned ComputeSearchOffset() {
    311     return x_ & 7;
    312   }
    313   u64 addr0() const { return x_ & 7; }
    314   u64 size() const { return 1ull << size_log(); }
    315   bool IsWrite() const { return !IsRead(); }
    316   bool IsRead() const { return x_ & kReadBit; }
    317 
    318   // The idea behind the freed bit is as follows.
    319   // When the memory is freed (or otherwise unaccessible) we write to the shadow
    320   // values with tid/epoch related to the free and the freed bit set.
    321   // During memory accesses processing the freed bit is considered
    322   // as msb of tid. So any access races with shadow with freed bit set
    323   // (it is as if write from a thread with which we never synchronized before).
    324   // This allows us to detect accesses to freed memory w/o additional
    325   // overheads in memory access processing and at the same time restore
    326   // tid/epoch of free.
    327   void MarkAsFreed() {
    328      x_ |= kFreedBit;
    329   }
    330 
    331   bool IsFreed() const {
    332     return x_ & kFreedBit;
    333   }
    334 
    335   bool GetFreedAndReset() {
    336     bool res = x_ & kFreedBit;
    337     x_ &= ~kFreedBit;
    338     return res;
    339   }
    340 
    341   bool IsBothReadsOrAtomic(bool kIsWrite, bool kIsAtomic) const {
    342     // analyzes 5-th bit (is_read) and 6-th bit (is_atomic)
    343     bool v = x_ & u64(((kIsWrite ^ 1) << kReadShift)
    344         | (kIsAtomic << kAtomicShift));
    345     DCHECK_EQ(v, (!IsWrite() && !kIsWrite) || (IsAtomic() && kIsAtomic));
    346     return v;
    347   }
    348 
    349   bool IsRWNotWeaker(bool kIsWrite, bool kIsAtomic) const {
    350     bool v = ((x_ >> kReadShift) & 3)
    351         <= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
    352     DCHECK_EQ(v, (IsAtomic() < kIsAtomic) ||
    353         (IsAtomic() == kIsAtomic && !IsWrite() <= !kIsWrite));
    354     return v;
    355   }
    356 
    357   bool IsRWWeakerOrEqual(bool kIsWrite, bool kIsAtomic) const {
    358     bool v = ((x_ >> kReadShift) & 3)
    359         >= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
    360     DCHECK_EQ(v, (IsAtomic() > kIsAtomic) ||
    361         (IsAtomic() == kIsAtomic && !IsWrite() >= !kIsWrite));
    362     return v;
    363   }
    364 
    365  private:
    366   static const u64 kReadShift   = 5;
    367   static const u64 kReadBit     = 1ull << kReadShift;
    368   static const u64 kAtomicShift = 6;
    369   static const u64 kAtomicBit   = 1ull << kAtomicShift;
    370 
    371   u64 size_log() const { return (x_ >> 3) & 3; }
    372 
    373   static bool TwoRangesIntersectSLOW(const Shadow s1, const Shadow s2) {
    374     if (s1.addr0() == s2.addr0()) return true;
    375     if (s1.addr0() < s2.addr0() && s1.addr0() + s1.size() > s2.addr0())
    376       return true;
    377     if (s2.addr0() < s1.addr0() && s2.addr0() + s2.size() > s1.addr0())
    378       return true;
    379     return false;
    380   }
    381 };
    382 
    383 struct SignalContext;
    384 
    385 // This struct is stored in TLS.
    386 struct ThreadState {
    387   FastState fast_state;
    388   // Synch epoch represents the threads's epoch before the last synchronization
    389   // action. It allows to reduce number of shadow state updates.
    390   // For example, fast_synch_epoch=100, last write to addr X was at epoch=150,
    391   // if we are processing write to X from the same thread at epoch=200,
    392   // we do nothing, because both writes happen in the same 'synch epoch'.
    393   // That is, if another memory access does not race with the former write,
    394   // it does not race with the latter as well.
    395   // QUESTION: can we can squeeze this into ThreadState::Fast?
    396   // E.g. ThreadState::Fast is a 44-bit, 32 are taken by synch_epoch and 12 are
    397   // taken by epoch between synchs.
    398   // This way we can save one load from tls.
    399   u64 fast_synch_epoch;
    400   // This is a slow path flag. On fast path, fast_state.GetIgnoreBit() is read.
    401   // We do not distinguish beteween ignoring reads and writes
    402   // for better performance.
    403   int ignore_reads_and_writes;
    404   uptr *shadow_stack_pos;
    405   u64 *racy_shadow_addr;
    406   u64 racy_state[2];
    407   Trace trace;
    408 #ifndef TSAN_GO
    409   // C/C++ uses embed shadow stack of fixed size.
    410   uptr shadow_stack[kShadowStackSize];
    411 #else
    412   // Go uses satellite shadow stack with dynamic size.
    413   uptr *shadow_stack;
    414   uptr *shadow_stack_end;
    415 #endif
    416   MutexSet mset;
    417   ThreadClock clock;
    418 #ifndef TSAN_GO
    419   AllocatorCache alloc_cache;
    420 #endif
    421   u64 stat[StatCnt];
    422   const int tid;
    423   const int unique_id;
    424   int in_rtl;
    425   bool in_symbolizer;
    426   bool is_alive;
    427   bool is_freeing;
    428   const uptr stk_addr;
    429   const uptr stk_size;
    430   const uptr tls_addr;
    431   const uptr tls_size;
    432 
    433   DeadlockDetector deadlock_detector;
    434 
    435   bool in_signal_handler;
    436   SignalContext *signal_ctx;
    437 
    438 #ifndef TSAN_GO
    439   u32 last_sleep_stack_id;
    440   ThreadClock last_sleep_clock;
    441 #endif
    442 
    443   // Set in regions of runtime that must be signal-safe and fork-safe.
    444   // If set, malloc must not be called.
    445   int nomalloc;
    446 
    447   explicit ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
    448                        uptr stk_addr, uptr stk_size,
    449                        uptr tls_addr, uptr tls_size);
    450 };
    451 
    452 Context *CTX();
    453 
    454 #ifndef TSAN_GO
    455 extern THREADLOCAL char cur_thread_placeholder[];
    456 INLINE ThreadState *cur_thread() {
    457   return reinterpret_cast<ThreadState *>(&cur_thread_placeholder);
    458 }
    459 #endif
    460 
    461 // An info about a thread that is hold for some time after its termination.
    462 struct ThreadDeadInfo {
    463   Trace trace;
    464 };
    465 
    466 class ThreadContext : public ThreadContextBase {
    467  public:
    468   explicit ThreadContext(int tid);
    469   ~ThreadContext();
    470   ThreadState *thr;
    471 #ifdef TSAN_GO
    472   StackTrace creation_stack;
    473 #else
    474   u32 creation_stack_id;
    475 #endif
    476   SyncClock sync;
    477   // Epoch at which the thread had started.
    478   // If we see an event from the thread stamped by an older epoch,
    479   // the event is from a dead thread that shared tid with this thread.
    480   u64 epoch0;
    481   u64 epoch1;
    482   ThreadDeadInfo *dead_info;
    483 
    484   // Override superclass callbacks.
    485   void OnDead();
    486   void OnJoined(void *arg);
    487   void OnFinished();
    488   void OnStarted(void *arg);
    489   void OnCreated(void *arg);
    490   void OnReset(void *arg);
    491 };
    492 
    493 struct RacyStacks {
    494   MD5Hash hash[2];
    495   bool operator==(const RacyStacks &other) const {
    496     if (hash[0] == other.hash[0] && hash[1] == other.hash[1])
    497       return true;
    498     if (hash[0] == other.hash[1] && hash[1] == other.hash[0])
    499       return true;
    500     return false;
    501   }
    502 };
    503 
    504 struct RacyAddress {
    505   uptr addr_min;
    506   uptr addr_max;
    507 };
    508 
    509 struct FiredSuppression {
    510   ReportType type;
    511   uptr pc;
    512 };
    513 
    514 struct Context {
    515   Context();
    516 
    517   bool initialized;
    518 
    519   SyncTab synctab;
    520 
    521   Mutex report_mtx;
    522   int nreported;
    523   int nmissed_expected;
    524 
    525   ThreadRegistry *thread_registry;
    526 
    527   Vector<RacyStacks> racy_stacks;
    528   Vector<RacyAddress> racy_addresses;
    529   Vector<FiredSuppression> fired_suppressions;
    530 
    531   Flags flags;
    532 
    533   u64 stat[StatCnt];
    534   u64 int_alloc_cnt[MBlockTypeCount];
    535   u64 int_alloc_siz[MBlockTypeCount];
    536 };
    537 
    538 class ScopedInRtl {
    539  public:
    540   ScopedInRtl();
    541   ~ScopedInRtl();
    542  private:
    543   ThreadState*thr_;
    544   int in_rtl_;
    545   int errno_;
    546 };
    547 
    548 class ScopedReport {
    549  public:
    550   explicit ScopedReport(ReportType typ);
    551   ~ScopedReport();
    552 
    553   void AddStack(const StackTrace *stack);
    554   void AddMemoryAccess(uptr addr, Shadow s, const StackTrace *stack,
    555                        const MutexSet *mset);
    556   void AddThread(const ThreadContext *tctx);
    557   void AddMutex(const SyncVar *s);
    558   void AddLocation(uptr addr, uptr size);
    559   void AddSleep(u32 stack_id);
    560 
    561   const ReportDesc *GetReport() const;
    562 
    563  private:
    564   Context *ctx_;
    565   ReportDesc *rep_;
    566 
    567   void AddMutex(u64 id);
    568 
    569   ScopedReport(const ScopedReport&);
    570   void operator = (const ScopedReport&);
    571 };
    572 
    573 void RestoreStack(int tid, const u64 epoch, StackTrace *stk, MutexSet *mset);
    574 
    575 void StatAggregate(u64 *dst, u64 *src);
    576 void StatOutput(u64 *stat);
    577 void ALWAYS_INLINE INLINE StatInc(ThreadState *thr, StatType typ, u64 n = 1) {
    578   if (kCollectStats)
    579     thr->stat[typ] += n;
    580 }
    581 void ALWAYS_INLINE INLINE StatSet(ThreadState *thr, StatType typ, u64 n) {
    582   if (kCollectStats)
    583     thr->stat[typ] = n;
    584 }
    585 
    586 void MapShadow(uptr addr, uptr size);
    587 void MapThreadTrace(uptr addr, uptr size);
    588 void DontNeedShadowFor(uptr addr, uptr size);
    589 void InitializeShadowMemory();
    590 void InitializeInterceptors();
    591 void InitializeDynamicAnnotations();
    592 
    593 void ReportRace(ThreadState *thr);
    594 bool OutputReport(Context *ctx,
    595                   const ScopedReport &srep,
    596                   const ReportStack *suppress_stack1 = 0,
    597                   const ReportStack *suppress_stack2 = 0);
    598 bool IsFiredSuppression(Context *ctx,
    599                         const ScopedReport &srep,
    600                         const StackTrace &trace);
    601 bool IsExpectedReport(uptr addr, uptr size);
    602 bool FrameIsInternal(const ReportStack *frame);
    603 ReportStack *SkipTsanInternalFrames(ReportStack *ent);
    604 
    605 #if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 1
    606 # define DPrintf Printf
    607 #else
    608 # define DPrintf(...)
    609 #endif
    610 
    611 #if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 2
    612 # define DPrintf2 Printf
    613 #else
    614 # define DPrintf2(...)
    615 #endif
    616 
    617 u32 CurrentStackId(ThreadState *thr, uptr pc);
    618 void PrintCurrentStack(ThreadState *thr, uptr pc);
    619 void PrintCurrentStackSlow();  // uses libunwind
    620 
    621 void Initialize(ThreadState *thr);
    622 int Finalize(ThreadState *thr);
    623 
    624 SyncVar* GetJavaSync(ThreadState *thr, uptr pc, uptr addr,
    625                      bool write_lock, bool create);
    626 SyncVar* GetAndRemoveJavaSync(ThreadState *thr, uptr pc, uptr addr);
    627 
    628 void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
    629     int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic);
    630 void MemoryAccessImpl(ThreadState *thr, uptr addr,
    631     int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
    632     u64 *shadow_mem, Shadow cur);
    633 void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
    634     uptr size, bool is_write);
    635 void MemoryAccessRangeStep(ThreadState *thr, uptr pc, uptr addr,
    636     uptr size, uptr step, bool is_write);
    637 
    638 const int kSizeLog1 = 0;
    639 const int kSizeLog2 = 1;
    640 const int kSizeLog4 = 2;
    641 const int kSizeLog8 = 3;
    642 
    643 void ALWAYS_INLINE INLINE MemoryRead(ThreadState *thr, uptr pc,
    644                                      uptr addr, int kAccessSizeLog) {
    645   MemoryAccess(thr, pc, addr, kAccessSizeLog, false, false);
    646 }
    647 
    648 void ALWAYS_INLINE INLINE MemoryWrite(ThreadState *thr, uptr pc,
    649                                       uptr addr, int kAccessSizeLog) {
    650   MemoryAccess(thr, pc, addr, kAccessSizeLog, true, false);
    651 }
    652 
    653 void ALWAYS_INLINE INLINE MemoryReadAtomic(ThreadState *thr, uptr pc,
    654                                            uptr addr, int kAccessSizeLog) {
    655   MemoryAccess(thr, pc, addr, kAccessSizeLog, false, true);
    656 }
    657 
    658 void ALWAYS_INLINE INLINE MemoryWriteAtomic(ThreadState *thr, uptr pc,
    659                                             uptr addr, int kAccessSizeLog) {
    660   MemoryAccess(thr, pc, addr, kAccessSizeLog, true, true);
    661 }
    662 
    663 void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size);
    664 void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size);
    665 void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size);
    666 void IgnoreCtl(ThreadState *thr, bool write, bool begin);
    667 
    668 void FuncEntry(ThreadState *thr, uptr pc);
    669 void FuncExit(ThreadState *thr);
    670 
    671 int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached);
    672 void ThreadStart(ThreadState *thr, int tid, uptr os_id);
    673 void ThreadFinish(ThreadState *thr);
    674 int ThreadTid(ThreadState *thr, uptr pc, uptr uid);
    675 void ThreadJoin(ThreadState *thr, uptr pc, int tid);
    676 void ThreadDetach(ThreadState *thr, uptr pc, int tid);
    677 void ThreadFinalize(ThreadState *thr);
    678 void ThreadSetName(ThreadState *thr, const char *name);
    679 int ThreadCount(ThreadState *thr);
    680 void ProcessPendingSignals(ThreadState *thr);
    681 
    682 void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
    683                  bool rw, bool recursive, bool linker_init);
    684 void MutexDestroy(ThreadState *thr, uptr pc, uptr addr);
    685 void MutexLock(ThreadState *thr, uptr pc, uptr addr);
    686 void MutexUnlock(ThreadState *thr, uptr pc, uptr addr);
    687 void MutexReadLock(ThreadState *thr, uptr pc, uptr addr);
    688 void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr);
    689 void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr);
    690 
    691 void Acquire(ThreadState *thr, uptr pc, uptr addr);
    692 void AcquireGlobal(ThreadState *thr, uptr pc);
    693 void Release(ThreadState *thr, uptr pc, uptr addr);
    694 void ReleaseStore(ThreadState *thr, uptr pc, uptr addr);
    695 void AfterSleep(ThreadState *thr, uptr pc);
    696 
    697 // The hacky call uses custom calling convention and an assembly thunk.
    698 // It is considerably faster that a normal call for the caller
    699 // if it is not executed (it is intended for slow paths from hot functions).
    700 // The trick is that the call preserves all registers and the compiler
    701 // does not treat it as a call.
    702 // If it does not work for you, use normal call.
    703 #if TSAN_DEBUG == 0
    704 // The caller may not create the stack frame for itself at all,
    705 // so we create a reserve stack frame for it (1024b must be enough).
    706 #define HACKY_CALL(f) \
    707   __asm__ __volatile__("sub $1024, %%rsp;" \
    708                        "/*.cfi_adjust_cfa_offset 1024;*/" \
    709                        ".hidden " #f "_thunk;" \
    710                        "call " #f "_thunk;" \
    711                        "add $1024, %%rsp;" \
    712                        "/*.cfi_adjust_cfa_offset -1024;*/" \
    713                        ::: "memory", "cc");
    714 #else
    715 #define HACKY_CALL(f) f()
    716 #endif
    717 
    718 void TraceSwitch(ThreadState *thr);
    719 uptr TraceTopPC(ThreadState *thr);
    720 uptr TraceSize();
    721 uptr TraceParts();
    722 
    723 extern "C" void __tsan_trace_switch();
    724 void ALWAYS_INLINE INLINE TraceAddEvent(ThreadState *thr, FastState fs,
    725                                         EventType typ, u64 addr) {
    726   DCHECK_GE((int)typ, 0);
    727   DCHECK_LE((int)typ, 7);
    728   DCHECK_EQ(GetLsb(addr, 61), addr);
    729   StatInc(thr, StatEvents);
    730   u64 pos = fs.GetTracePos();
    731   if (UNLIKELY((pos % kTracePartSize) == 0)) {
    732 #ifndef TSAN_GO
    733     HACKY_CALL(__tsan_trace_switch);
    734 #else
    735     TraceSwitch(thr);
    736 #endif
    737   }
    738   Event *trace = (Event*)GetThreadTrace(fs.tid());
    739   Event *evp = &trace[pos];
    740   Event ev = (u64)addr | ((u64)typ << 61);
    741   *evp = ev;
    742 }
    743 
    744 }  // namespace __tsan
    745 
    746 #endif  // TSAN_RTL_H
    747