Home | History | Annotate | Download | only in rtl
      1 //===-- tsan_rtl.h ----------------------------------------------*- C++ -*-===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file is a part of ThreadSanitizer (TSan), a race detector.
     11 //
     12 // Main internal TSan header file.
     13 //
     14 // Ground rules:
     15 //   - C++ run-time should not be used (static CTORs, RTTI, exceptions, static
     16 //     function-scope locals)
     17 //   - All functions/classes/etc reside in namespace __tsan, except for those
     18 //     declared in tsan_interface.h.
     19 //   - Platform-specific files should be used instead of ifdefs (*).
     20 //   - No system headers included in header files (*).
     21 //   - Platform specific headres included only into platform-specific files (*).
     22 //
     23 //  (*) Except when inlining is critical for performance.
     24 //===----------------------------------------------------------------------===//
     25 
     26 #ifndef TSAN_RTL_H
     27 #define TSAN_RTL_H
     28 
     29 #include "sanitizer_common/sanitizer_allocator.h"
     30 #include "sanitizer_common/sanitizer_allocator_internal.h"
     31 #include "sanitizer_common/sanitizer_common.h"
     32 #include "sanitizer_common/sanitizer_suppressions.h"
     33 #include "sanitizer_common/sanitizer_thread_registry.h"
     34 #include "tsan_clock.h"
     35 #include "tsan_defs.h"
     36 #include "tsan_flags.h"
     37 #include "tsan_sync.h"
     38 #include "tsan_trace.h"
     39 #include "tsan_vector.h"
     40 #include "tsan_report.h"
     41 #include "tsan_platform.h"
     42 #include "tsan_mutexset.h"
     43 
     44 #if SANITIZER_WORDSIZE != 64
     45 # error "ThreadSanitizer is supported only on 64-bit platforms"
     46 #endif
     47 
     48 namespace __tsan {
     49 
     50 // Descriptor of user's memory block.
     51 struct MBlock {
     52   /*
     53   u64 mtx : 1;  // must be first
     54   u64 lst : 44;
     55   u64 stk : 31;  // on word boundary
     56   u64 tid : kTidBits;
     57   u64 siz : 128 - 1 - 31 - 44 - kTidBits;  // 39
     58   */
     59   u64 raw[2];
     60 
     61   void Init(uptr siz, u32 tid, u32 stk) {
     62     raw[0] = raw[1] = 0;
     63     raw[1] |= (u64)siz << ((1 + 44 + 31 + kTidBits) % 64);
     64     raw[1] |= (u64)tid << ((1 + 44 + 31) % 64);
     65     raw[0] |= (u64)stk << (1 + 44);
     66     raw[1] |= (u64)stk >> (64 - 44 - 1);
     67     DCHECK_EQ(Size(), siz);
     68     DCHECK_EQ(Tid(), tid);
     69     DCHECK_EQ(StackId(), stk);
     70   }
     71 
     72   u32 Tid() const {
     73     return GetLsb(raw[1] >> ((1 + 44 + 31) % 64), kTidBits);
     74   }
     75 
     76   uptr Size() const {
     77     return raw[1] >> ((1 + 31 + 44 + kTidBits) % 64);
     78   }
     79 
     80   u32 StackId() const {
     81     return (raw[0] >> (1 + 44)) | GetLsb(raw[1] << (64 - 44 - 1), 31);
     82   }
     83 
     84   SyncVar *ListHead() const {
     85     return (SyncVar*)(GetLsb(raw[0] >> 1, 44) << 3);
     86   }
     87 
     88   void ListPush(SyncVar *v) {
     89     SyncVar *lst = ListHead();
     90     v->next = lst;
     91     u64 x = (u64)v ^ (u64)lst;
     92     x = (x >> 3) << 1;
     93     raw[0] ^= x;
     94     DCHECK_EQ(ListHead(), v);
     95   }
     96 
     97   SyncVar *ListPop() {
     98     SyncVar *lst = ListHead();
     99     SyncVar *nxt = lst->next;
    100     lst->next = 0;
    101     u64 x = (u64)lst ^ (u64)nxt;
    102     x = (x >> 3) << 1;
    103     raw[0] ^= x;
    104     DCHECK_EQ(ListHead(), nxt);
    105     return lst;
    106   }
    107 
    108   void ListReset() {
    109     SyncVar *lst = ListHead();
    110     u64 x = (u64)lst;
    111     x = (x >> 3) << 1;
    112     raw[0] ^= x;
    113     DCHECK_EQ(ListHead(), 0);
    114   }
    115 
    116   void Lock();
    117   void Unlock();
    118   typedef GenericScopedLock<MBlock> ScopedLock;
    119 };
    120 
    121 #ifndef TSAN_GO
    122 #if defined(TSAN_COMPAT_SHADOW) && TSAN_COMPAT_SHADOW
    123 const uptr kAllocatorSpace = 0x7d0000000000ULL;
    124 #else
    125 const uptr kAllocatorSpace = 0x7d0000000000ULL;
    126 #endif
    127 const uptr kAllocatorSize  =  0x10000000000ULL;  // 1T.
    128 
    129 struct MapUnmapCallback;
    130 typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, sizeof(MBlock),
    131     DefaultSizeClassMap, MapUnmapCallback> PrimaryAllocator;
    132 typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
    133 typedef LargeMmapAllocator<MapUnmapCallback> SecondaryAllocator;
    134 typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
    135     SecondaryAllocator> Allocator;
    136 Allocator *allocator();
    137 #endif
    138 
    139 void TsanCheckFailed(const char *file, int line, const char *cond,
    140                      u64 v1, u64 v2);
    141 
    142 const u64 kShadowRodata = (u64)-1;  // .rodata shadow marker
    143 
    144 // FastState (from most significant bit):
    145 //   ignore          : 1
    146 //   tid             : kTidBits
    147 //   epoch           : kClkBits
    148 //   unused          : -
    149 //   history_size    : 3
    150 class FastState {
    151  public:
    152   FastState(u64 tid, u64 epoch) {
    153     x_ = tid << kTidShift;
    154     x_ |= epoch << kClkShift;
    155     DCHECK_EQ(tid, this->tid());
    156     DCHECK_EQ(epoch, this->epoch());
    157     DCHECK_EQ(GetIgnoreBit(), false);
    158   }
    159 
    160   explicit FastState(u64 x)
    161       : x_(x) {
    162   }
    163 
    164   u64 raw() const {
    165     return x_;
    166   }
    167 
    168   u64 tid() const {
    169     u64 res = (x_ & ~kIgnoreBit) >> kTidShift;
    170     return res;
    171   }
    172 
    173   u64 TidWithIgnore() const {
    174     u64 res = x_ >> kTidShift;
    175     return res;
    176   }
    177 
    178   u64 epoch() const {
    179     u64 res = (x_ << (kTidBits + 1)) >> (64 - kClkBits);
    180     return res;
    181   }
    182 
    183   void IncrementEpoch() {
    184     u64 old_epoch = epoch();
    185     x_ += 1 << kClkShift;
    186     DCHECK_EQ(old_epoch + 1, epoch());
    187     (void)old_epoch;
    188   }
    189 
    190   void SetIgnoreBit() { x_ |= kIgnoreBit; }
    191   void ClearIgnoreBit() { x_ &= ~kIgnoreBit; }
    192   bool GetIgnoreBit() const { return (s64)x_ < 0; }
    193 
    194   void SetHistorySize(int hs) {
    195     CHECK_GE(hs, 0);
    196     CHECK_LE(hs, 7);
    197     x_ = (x_ & ~7) | hs;
    198   }
    199 
    200   int GetHistorySize() const {
    201     return (int)(x_ & 7);
    202   }
    203 
    204   void ClearHistorySize() {
    205     x_ &= ~7;
    206   }
    207 
    208   u64 GetTracePos() const {
    209     const int hs = GetHistorySize();
    210     // When hs == 0, the trace consists of 2 parts.
    211     const u64 mask = (1ull << (kTracePartSizeBits + hs + 1)) - 1;
    212     return epoch() & mask;
    213   }
    214 
    215  private:
    216   friend class Shadow;
    217   static const int kTidShift = 64 - kTidBits - 1;
    218   static const int kClkShift = kTidShift - kClkBits;
    219   static const u64 kIgnoreBit = 1ull << 63;
    220   static const u64 kFreedBit = 1ull << 63;
    221   u64 x_;
    222 };
    223 
    224 // Shadow (from most significant bit):
    225 //   freed           : 1
    226 //   tid             : kTidBits
    227 //   epoch           : kClkBits
    228 //   is_atomic       : 1
    229 //   is_read         : 1
    230 //   size_log        : 2
    231 //   addr0           : 3
    232 class Shadow : public FastState {
    233  public:
    234   explicit Shadow(u64 x)
    235       : FastState(x) {
    236   }
    237 
    238   explicit Shadow(const FastState &s)
    239       : FastState(s.x_) {
    240     ClearHistorySize();
    241   }
    242 
    243   void SetAddr0AndSizeLog(u64 addr0, unsigned kAccessSizeLog) {
    244     DCHECK_EQ(x_ & 31, 0);
    245     DCHECK_LE(addr0, 7);
    246     DCHECK_LE(kAccessSizeLog, 3);
    247     x_ |= (kAccessSizeLog << 3) | addr0;
    248     DCHECK_EQ(kAccessSizeLog, size_log());
    249     DCHECK_EQ(addr0, this->addr0());
    250   }
    251 
    252   void SetWrite(unsigned kAccessIsWrite) {
    253     DCHECK_EQ(x_ & kReadBit, 0);
    254     if (!kAccessIsWrite)
    255       x_ |= kReadBit;
    256     DCHECK_EQ(kAccessIsWrite, IsWrite());
    257   }
    258 
    259   void SetAtomic(bool kIsAtomic) {
    260     DCHECK(!IsAtomic());
    261     if (kIsAtomic)
    262       x_ |= kAtomicBit;
    263     DCHECK_EQ(IsAtomic(), kIsAtomic);
    264   }
    265 
    266   bool IsAtomic() const {
    267     return x_ & kAtomicBit;
    268   }
    269 
    270   bool IsZero() const {
    271     return x_ == 0;
    272   }
    273 
    274   static inline bool TidsAreEqual(const Shadow s1, const Shadow s2) {
    275     u64 shifted_xor = (s1.x_ ^ s2.x_) >> kTidShift;
    276     DCHECK_EQ(shifted_xor == 0, s1.TidWithIgnore() == s2.TidWithIgnore());
    277     return shifted_xor == 0;
    278   }
    279 
    280   static inline bool Addr0AndSizeAreEqual(const Shadow s1, const Shadow s2) {
    281     u64 masked_xor = (s1.x_ ^ s2.x_) & 31;
    282     return masked_xor == 0;
    283   }
    284 
    285   static inline bool TwoRangesIntersect(Shadow s1, Shadow s2,
    286       unsigned kS2AccessSize) {
    287     bool res = false;
    288     u64 diff = s1.addr0() - s2.addr0();
    289     if ((s64)diff < 0) {  // s1.addr0 < s2.addr0  // NOLINT
    290       // if (s1.addr0() + size1) > s2.addr0()) return true;
    291       if (s1.size() > -diff)  res = true;
    292     } else {
    293       // if (s2.addr0() + kS2AccessSize > s1.addr0()) return true;
    294       if (kS2AccessSize > diff) res = true;
    295     }
    296     DCHECK_EQ(res, TwoRangesIntersectSLOW(s1, s2));
    297     DCHECK_EQ(res, TwoRangesIntersectSLOW(s2, s1));
    298     return res;
    299   }
    300 
    301   // The idea behind the offset is as follows.
    302   // Consider that we have 8 bool's contained within a single 8-byte block
    303   // (mapped to a single shadow "cell"). Now consider that we write to the bools
    304   // from a single thread (which we consider the common case).
    305   // W/o offsetting each access will have to scan 4 shadow values at average
    306   // to find the corresponding shadow value for the bool.
    307   // With offsetting we start scanning shadow with the offset so that
    308   // each access hits necessary shadow straight off (at least in an expected
    309   // optimistic case).
    310   // This logic works seamlessly for any layout of user data. For example,
    311   // if user data is {int, short, char, char}, then accesses to the int are
    312   // offsetted to 0, short - 4, 1st char - 6, 2nd char - 7. Hopefully, accesses
    313   // from a single thread won't need to scan all 8 shadow values.
    314   unsigned ComputeSearchOffset() {
    315     return x_ & 7;
    316   }
    317   u64 addr0() const { return x_ & 7; }
    318   u64 size() const { return 1ull << size_log(); }
    319   bool IsWrite() const { return !IsRead(); }
    320   bool IsRead() const { return x_ & kReadBit; }
    321 
    322   // The idea behind the freed bit is as follows.
    323   // When the memory is freed (or otherwise unaccessible) we write to the shadow
    324   // values with tid/epoch related to the free and the freed bit set.
    325   // During memory accesses processing the freed bit is considered
    326   // as msb of tid. So any access races with shadow with freed bit set
    327   // (it is as if write from a thread with which we never synchronized before).
    328   // This allows us to detect accesses to freed memory w/o additional
    329   // overheads in memory access processing and at the same time restore
    330   // tid/epoch of free.
    331   void MarkAsFreed() {
    332      x_ |= kFreedBit;
    333   }
    334 
    335   bool IsFreed() const {
    336     return x_ & kFreedBit;
    337   }
    338 
    339   bool GetFreedAndReset() {
    340     bool res = x_ & kFreedBit;
    341     x_ &= ~kFreedBit;
    342     return res;
    343   }
    344 
    345   bool IsBothReadsOrAtomic(bool kIsWrite, bool kIsAtomic) const {
    346     // analyzes 5-th bit (is_read) and 6-th bit (is_atomic)
    347     bool v = x_ & u64(((kIsWrite ^ 1) << kReadShift)
    348         | (kIsAtomic << kAtomicShift));
    349     DCHECK_EQ(v, (!IsWrite() && !kIsWrite) || (IsAtomic() && kIsAtomic));
    350     return v;
    351   }
    352 
    353   bool IsRWNotWeaker(bool kIsWrite, bool kIsAtomic) const {
    354     bool v = ((x_ >> kReadShift) & 3)
    355         <= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
    356     DCHECK_EQ(v, (IsAtomic() < kIsAtomic) ||
    357         (IsAtomic() == kIsAtomic && !IsWrite() <= !kIsWrite));
    358     return v;
    359   }
    360 
    361   bool IsRWWeakerOrEqual(bool kIsWrite, bool kIsAtomic) const {
    362     bool v = ((x_ >> kReadShift) & 3)
    363         >= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
    364     DCHECK_EQ(v, (IsAtomic() > kIsAtomic) ||
    365         (IsAtomic() == kIsAtomic && !IsWrite() >= !kIsWrite));
    366     return v;
    367   }
    368 
    369  private:
    370   static const u64 kReadShift   = 5;
    371   static const u64 kReadBit     = 1ull << kReadShift;
    372   static const u64 kAtomicShift = 6;
    373   static const u64 kAtomicBit   = 1ull << kAtomicShift;
    374 
    375   u64 size_log() const { return (x_ >> 3) & 3; }
    376 
    377   static bool TwoRangesIntersectSLOW(const Shadow s1, const Shadow s2) {
    378     if (s1.addr0() == s2.addr0()) return true;
    379     if (s1.addr0() < s2.addr0() && s1.addr0() + s1.size() > s2.addr0())
    380       return true;
    381     if (s2.addr0() < s1.addr0() && s2.addr0() + s2.size() > s1.addr0())
    382       return true;
    383     return false;
    384   }
    385 };
    386 
    387 struct SignalContext;
    388 
    389 struct JmpBuf {
    390   uptr sp;
    391   uptr mangled_sp;
    392   uptr *shadow_stack_pos;
    393 };
    394 
    395 // This struct is stored in TLS.
    396 struct ThreadState {
    397   FastState fast_state;
    398   // Synch epoch represents the threads's epoch before the last synchronization
    399   // action. It allows to reduce number of shadow state updates.
    400   // For example, fast_synch_epoch=100, last write to addr X was at epoch=150,
    401   // if we are processing write to X from the same thread at epoch=200,
    402   // we do nothing, because both writes happen in the same 'synch epoch'.
    403   // That is, if another memory access does not race with the former write,
    404   // it does not race with the latter as well.
    405   // QUESTION: can we can squeeze this into ThreadState::Fast?
    406   // E.g. ThreadState::Fast is a 44-bit, 32 are taken by synch_epoch and 12 are
    407   // taken by epoch between synchs.
    408   // This way we can save one load from tls.
    409   u64 fast_synch_epoch;
    410   // This is a slow path flag. On fast path, fast_state.GetIgnoreBit() is read.
    411   // We do not distinguish beteween ignoring reads and writes
    412   // for better performance.
    413   int ignore_reads_and_writes;
    414   uptr *shadow_stack_pos;
    415   u64 *racy_shadow_addr;
    416   u64 racy_state[2];
    417 #ifndef TSAN_GO
    418   // C/C++ uses embed shadow stack of fixed size.
    419   uptr shadow_stack[kShadowStackSize];
    420 #else
    421   // Go uses satellite shadow stack with dynamic size.
    422   uptr *shadow_stack;
    423   uptr *shadow_stack_end;
    424 #endif
    425   MutexSet mset;
    426   ThreadClock clock;
    427 #ifndef TSAN_GO
    428   AllocatorCache alloc_cache;
    429   InternalAllocatorCache internal_alloc_cache;
    430   Vector<JmpBuf> jmp_bufs;
    431 #endif
    432   u64 stat[StatCnt];
    433   const int tid;
    434   const int unique_id;
    435   int in_rtl;
    436   bool in_symbolizer;
    437   bool is_alive;
    438   bool is_freeing;
    439   bool is_vptr_access;
    440   const uptr stk_addr;
    441   const uptr stk_size;
    442   const uptr tls_addr;
    443   const uptr tls_size;
    444 
    445   DeadlockDetector deadlock_detector;
    446 
    447   bool in_signal_handler;
    448   SignalContext *signal_ctx;
    449 
    450 #ifndef TSAN_GO
    451   u32 last_sleep_stack_id;
    452   ThreadClock last_sleep_clock;
    453 #endif
    454 
    455   // Set in regions of runtime that must be signal-safe and fork-safe.
    456   // If set, malloc must not be called.
    457   int nomalloc;
    458 
    459   explicit ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
    460                        uptr stk_addr, uptr stk_size,
    461                        uptr tls_addr, uptr tls_size);
    462 };
    463 
    464 Context *CTX();
    465 
    466 #ifndef TSAN_GO
    467 extern THREADLOCAL char cur_thread_placeholder[];
    468 INLINE ThreadState *cur_thread() {
    469   return reinterpret_cast<ThreadState *>(&cur_thread_placeholder);
    470 }
    471 #endif
    472 
    473 class ThreadContext : public ThreadContextBase {
    474  public:
    475   explicit ThreadContext(int tid);
    476   ~ThreadContext();
    477   ThreadState *thr;
    478 #ifdef TSAN_GO
    479   StackTrace creation_stack;
    480 #else
    481   u32 creation_stack_id;
    482 #endif
    483   SyncClock sync;
    484   // Epoch at which the thread had started.
    485   // If we see an event from the thread stamped by an older epoch,
    486   // the event is from a dead thread that shared tid with this thread.
    487   u64 epoch0;
    488   u64 epoch1;
    489 
    490   // Override superclass callbacks.
    491   void OnDead();
    492   void OnJoined(void *arg);
    493   void OnFinished();
    494   void OnStarted(void *arg);
    495   void OnCreated(void *arg);
    496   void OnReset();
    497 };
    498 
    499 struct RacyStacks {
    500   MD5Hash hash[2];
    501   bool operator==(const RacyStacks &other) const {
    502     if (hash[0] == other.hash[0] && hash[1] == other.hash[1])
    503       return true;
    504     if (hash[0] == other.hash[1] && hash[1] == other.hash[0])
    505       return true;
    506     return false;
    507   }
    508 };
    509 
    510 struct RacyAddress {
    511   uptr addr_min;
    512   uptr addr_max;
    513 };
    514 
    515 struct FiredSuppression {
    516   ReportType type;
    517   uptr pc;
    518   Suppression *supp;
    519 };
    520 
    521 struct Context {
    522   Context();
    523 
    524   bool initialized;
    525 
    526   SyncTab synctab;
    527 
    528   Mutex report_mtx;
    529   int nreported;
    530   int nmissed_expected;
    531   atomic_uint64_t last_symbolize_time_ns;
    532 
    533   ThreadRegistry *thread_registry;
    534 
    535   Vector<RacyStacks> racy_stacks;
    536   Vector<RacyAddress> racy_addresses;
    537   // Number of fired suppressions may be large enough.
    538   InternalMmapVector<FiredSuppression> fired_suppressions;
    539 
    540   Flags flags;
    541 
    542   u64 stat[StatCnt];
    543   u64 int_alloc_cnt[MBlockTypeCount];
    544   u64 int_alloc_siz[MBlockTypeCount];
    545 };
    546 
    547 class ScopedInRtl {
    548  public:
    549   ScopedInRtl();
    550   ~ScopedInRtl();
    551  private:
    552   ThreadState*thr_;
    553   int in_rtl_;
    554   int errno_;
    555 };
    556 
    557 class ScopedReport {
    558  public:
    559   explicit ScopedReport(ReportType typ);
    560   ~ScopedReport();
    561 
    562   void AddStack(const StackTrace *stack);
    563   void AddMemoryAccess(uptr addr, Shadow s, const StackTrace *stack,
    564                        const MutexSet *mset);
    565   void AddThread(const ThreadContext *tctx);
    566   void AddMutex(const SyncVar *s);
    567   void AddLocation(uptr addr, uptr size);
    568   void AddSleep(u32 stack_id);
    569   void SetCount(int count);
    570 
    571   const ReportDesc *GetReport() const;
    572 
    573  private:
    574   Context *ctx_;
    575   ReportDesc *rep_;
    576 
    577   void AddMutex(u64 id);
    578 
    579   ScopedReport(const ScopedReport&);
    580   void operator = (const ScopedReport&);
    581 };
    582 
    583 void RestoreStack(int tid, const u64 epoch, StackTrace *stk, MutexSet *mset);
    584 
    585 void StatAggregate(u64 *dst, u64 *src);
    586 void StatOutput(u64 *stat);
    587 void ALWAYS_INLINE StatInc(ThreadState *thr, StatType typ, u64 n = 1) {
    588   if (kCollectStats)
    589     thr->stat[typ] += n;
    590 }
    591 void ALWAYS_INLINE StatSet(ThreadState *thr, StatType typ, u64 n) {
    592   if (kCollectStats)
    593     thr->stat[typ] = n;
    594 }
    595 
    596 void MapShadow(uptr addr, uptr size);
    597 void MapThreadTrace(uptr addr, uptr size);
    598 void DontNeedShadowFor(uptr addr, uptr size);
    599 void InitializeShadowMemory();
    600 void InitializeInterceptors();
    601 void InitializeDynamicAnnotations();
    602 
    603 void ReportRace(ThreadState *thr);
    604 bool OutputReport(Context *ctx,
    605                   const ScopedReport &srep,
    606                   const ReportStack *suppress_stack1 = 0,
    607                   const ReportStack *suppress_stack2 = 0,
    608                   const ReportLocation *suppress_loc = 0);
    609 bool IsFiredSuppression(Context *ctx,
    610                         const ScopedReport &srep,
    611                         const StackTrace &trace);
    612 bool IsExpectedReport(uptr addr, uptr size);
    613 void PrintMatchedBenignRaces();
    614 bool FrameIsInternal(const ReportStack *frame);
    615 ReportStack *SkipTsanInternalFrames(ReportStack *ent);
    616 
    617 #if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 1
    618 # define DPrintf Printf
    619 #else
    620 # define DPrintf(...)
    621 #endif
    622 
    623 #if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 2
    624 # define DPrintf2 Printf
    625 #else
    626 # define DPrintf2(...)
    627 #endif
    628 
    629 u32 CurrentStackId(ThreadState *thr, uptr pc);
    630 void PrintCurrentStack(ThreadState *thr, uptr pc);
    631 void PrintCurrentStackSlow();  // uses libunwind
    632 
    633 void Initialize(ThreadState *thr);
    634 int Finalize(ThreadState *thr);
    635 
    636 SyncVar* GetJavaSync(ThreadState *thr, uptr pc, uptr addr,
    637                      bool write_lock, bool create);
    638 SyncVar* GetAndRemoveJavaSync(ThreadState *thr, uptr pc, uptr addr);
    639 
    640 void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
    641     int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic);
    642 void MemoryAccessImpl(ThreadState *thr, uptr addr,
    643     int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
    644     u64 *shadow_mem, Shadow cur);
    645 void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
    646     uptr size, bool is_write);
    647 void MemoryAccessRangeStep(ThreadState *thr, uptr pc, uptr addr,
    648     uptr size, uptr step, bool is_write);
    649 void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr,
    650     int size, bool kAccessIsWrite, bool kIsAtomic);
    651 
    652 const int kSizeLog1 = 0;
    653 const int kSizeLog2 = 1;
    654 const int kSizeLog4 = 2;
    655 const int kSizeLog8 = 3;
    656 
    657 void ALWAYS_INLINE MemoryRead(ThreadState *thr, uptr pc,
    658                                      uptr addr, int kAccessSizeLog) {
    659   MemoryAccess(thr, pc, addr, kAccessSizeLog, false, false);
    660 }
    661 
    662 void ALWAYS_INLINE MemoryWrite(ThreadState *thr, uptr pc,
    663                                       uptr addr, int kAccessSizeLog) {
    664   MemoryAccess(thr, pc, addr, kAccessSizeLog, true, false);
    665 }
    666 
    667 void ALWAYS_INLINE MemoryReadAtomic(ThreadState *thr, uptr pc,
    668                                            uptr addr, int kAccessSizeLog) {
    669   MemoryAccess(thr, pc, addr, kAccessSizeLog, false, true);
    670 }
    671 
    672 void ALWAYS_INLINE MemoryWriteAtomic(ThreadState *thr, uptr pc,
    673                                             uptr addr, int kAccessSizeLog) {
    674   MemoryAccess(thr, pc, addr, kAccessSizeLog, true, true);
    675 }
    676 
    677 void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size);
    678 void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size);
    679 void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size);
    680 void IgnoreCtl(ThreadState *thr, bool write, bool begin);
    681 
    682 void FuncEntry(ThreadState *thr, uptr pc);
    683 void FuncExit(ThreadState *thr);
    684 
    685 int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached);
    686 void ThreadStart(ThreadState *thr, int tid, uptr os_id);
    687 void ThreadFinish(ThreadState *thr);
    688 int ThreadTid(ThreadState *thr, uptr pc, uptr uid);
    689 void ThreadJoin(ThreadState *thr, uptr pc, int tid);
    690 void ThreadDetach(ThreadState *thr, uptr pc, int tid);
    691 void ThreadFinalize(ThreadState *thr);
    692 void ThreadSetName(ThreadState *thr, const char *name);
    693 int ThreadCount(ThreadState *thr);
    694 void ProcessPendingSignals(ThreadState *thr);
    695 
    696 void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
    697                  bool rw, bool recursive, bool linker_init);
    698 void MutexDestroy(ThreadState *thr, uptr pc, uptr addr);
    699 void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec = 1);
    700 int  MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all = false);
    701 void MutexReadLock(ThreadState *thr, uptr pc, uptr addr);
    702 void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr);
    703 void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr);
    704 
    705 void Acquire(ThreadState *thr, uptr pc, uptr addr);
    706 void AcquireGlobal(ThreadState *thr, uptr pc);
    707 void Release(ThreadState *thr, uptr pc, uptr addr);
    708 void ReleaseStore(ThreadState *thr, uptr pc, uptr addr);
    709 void AfterSleep(ThreadState *thr, uptr pc);
    710 
    711 // The hacky call uses custom calling convention and an assembly thunk.
    712 // It is considerably faster that a normal call for the caller
    713 // if it is not executed (it is intended for slow paths from hot functions).
    714 // The trick is that the call preserves all registers and the compiler
    715 // does not treat it as a call.
    716 // If it does not work for you, use normal call.
    717 #if TSAN_DEBUG == 0
    718 // The caller may not create the stack frame for itself at all,
    719 // so we create a reserve stack frame for it (1024b must be enough).
    720 #define HACKY_CALL(f) \
    721   __asm__ __volatile__("sub $1024, %%rsp;" \
    722                        "/*.cfi_adjust_cfa_offset 1024;*/" \
    723                        ".hidden " #f "_thunk;" \
    724                        "call " #f "_thunk;" \
    725                        "add $1024, %%rsp;" \
    726                        "/*.cfi_adjust_cfa_offset -1024;*/" \
    727                        ::: "memory", "cc");
    728 #else
    729 #define HACKY_CALL(f) f()
    730 #endif
    731 
    732 void TraceSwitch(ThreadState *thr);
    733 uptr TraceTopPC(ThreadState *thr);
    734 uptr TraceSize();
    735 uptr TraceParts();
    736 Trace *ThreadTrace(int tid);
    737 
    738 extern "C" void __tsan_trace_switch();
    739 void ALWAYS_INLINE TraceAddEvent(ThreadState *thr, FastState fs,
    740                                         EventType typ, u64 addr) {
    741   DCHECK_GE((int)typ, 0);
    742   DCHECK_LE((int)typ, 7);
    743   DCHECK_EQ(GetLsb(addr, 61), addr);
    744   StatInc(thr, StatEvents);
    745   u64 pos = fs.GetTracePos();
    746   if (UNLIKELY((pos % kTracePartSize) == 0)) {
    747 #ifndef TSAN_GO
    748     HACKY_CALL(__tsan_trace_switch);
    749 #else
    750     TraceSwitch(thr);
    751 #endif
    752   }
    753   Event *trace = (Event*)GetThreadTrace(fs.tid());
    754   Event *evp = &trace[pos];
    755   Event ev = (u64)addr | ((u64)typ << 61);
    756   *evp = ev;
    757 }
    758 
    759 }  // namespace __tsan
    760 
    761 #endif  // TSAN_RTL_H
    762