Home | History | Annotate | Download | only in rtl
      1 //===-- tsan_rtl_thread.cc ------------------------------------------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file is a part of ThreadSanitizer (TSan), a race detector.
     11 //
     12 //===----------------------------------------------------------------------===//
     13 
     14 #include "sanitizer_common/sanitizer_placement_new.h"
     15 #include "tsan_rtl.h"
     16 #include "tsan_mman.h"
     17 #include "tsan_platform.h"
     18 #include "tsan_report.h"
     19 #include "tsan_sync.h"
     20 
     21 namespace __tsan {
     22 
     23 // ThreadContext implementation.
     24 
     25 ThreadContext::ThreadContext(int tid)
     26   : ThreadContextBase(tid)
     27   , thr()
     28   , sync()
     29   , epoch0()
     30   , epoch1() {
     31 }
     32 
     33 #ifndef SANITIZER_GO
     34 ThreadContext::~ThreadContext() {
     35 }
     36 #endif
     37 
     38 void ThreadContext::OnDead() {
     39   CHECK_EQ(sync.size(), 0);
     40 }
     41 
     42 void ThreadContext::OnJoined(void *arg) {
     43   ThreadState *caller_thr = static_cast<ThreadState *>(arg);
     44   AcquireImpl(caller_thr, 0, &sync);
     45   sync.Reset(&caller_thr->proc()->clock_cache);
     46 }
     47 
     48 struct OnCreatedArgs {
     49   ThreadState *thr;
     50   uptr pc;
     51 };
     52 
     53 void ThreadContext::OnCreated(void *arg) {
     54   thr = 0;
     55   if (tid == 0)
     56     return;
     57   OnCreatedArgs *args = static_cast<OnCreatedArgs *>(arg);
     58   if (!args->thr)  // GCD workers don't have a parent thread.
     59     return;
     60   args->thr->fast_state.IncrementEpoch();
     61   // Can't increment epoch w/o writing to the trace as well.
     62   TraceAddEvent(args->thr, args->thr->fast_state, EventTypeMop, 0);
     63   ReleaseImpl(args->thr, 0, &sync);
     64   creation_stack_id = CurrentStackId(args->thr, args->pc);
     65   if (reuse_count == 0)
     66     StatInc(args->thr, StatThreadMaxTid);
     67 }
     68 
     69 void ThreadContext::OnReset() {
     70   CHECK_EQ(sync.size(), 0);
     71   FlushUnneededShadowMemory(GetThreadTrace(tid), TraceSize() * sizeof(Event));
     72   //!!! FlushUnneededShadowMemory(GetThreadTraceHeader(tid), sizeof(Trace));
     73 }
     74 
     75 void ThreadContext::OnDetached(void *arg) {
     76   ThreadState *thr1 = static_cast<ThreadState*>(arg);
     77   sync.Reset(&thr1->proc()->clock_cache);
     78 }
     79 
     80 struct OnStartedArgs {
     81   ThreadState *thr;
     82   uptr stk_addr;
     83   uptr stk_size;
     84   uptr tls_addr;
     85   uptr tls_size;
     86 };
     87 
     88 void ThreadContext::OnStarted(void *arg) {
     89   OnStartedArgs *args = static_cast<OnStartedArgs*>(arg);
     90   thr = args->thr;
     91   // RoundUp so that one trace part does not contain events
     92   // from different threads.
     93   epoch0 = RoundUp(epoch1 + 1, kTracePartSize);
     94   epoch1 = (u64)-1;
     95   new(thr) ThreadState(ctx, tid, unique_id, epoch0, reuse_count,
     96       args->stk_addr, args->stk_size, args->tls_addr, args->tls_size);
     97 #ifndef SANITIZER_GO
     98   thr->shadow_stack = &ThreadTrace(thr->tid)->shadow_stack[0];
     99   thr->shadow_stack_pos = thr->shadow_stack;
    100   thr->shadow_stack_end = thr->shadow_stack + kShadowStackSize;
    101 #else
    102   // Setup dynamic shadow stack.
    103   const int kInitStackSize = 8;
    104   thr->shadow_stack = (uptr*)internal_alloc(MBlockShadowStack,
    105       kInitStackSize * sizeof(uptr));
    106   thr->shadow_stack_pos = thr->shadow_stack;
    107   thr->shadow_stack_end = thr->shadow_stack + kInitStackSize;
    108 #endif
    109   if (common_flags()->detect_deadlocks)
    110     thr->dd_lt = ctx->dd->CreateLogicalThread(unique_id);
    111   thr->fast_state.SetHistorySize(flags()->history_size);
    112   // Commit switch to the new part of the trace.
    113   // TraceAddEvent will reset stack0/mset0 in the new part for us.
    114   TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
    115 
    116   thr->fast_synch_epoch = epoch0;
    117   AcquireImpl(thr, 0, &sync);
    118   StatInc(thr, StatSyncAcquire);
    119   sync.Reset(&thr->proc()->clock_cache);
    120   thr->is_inited = true;
    121   DPrintf("#%d: ThreadStart epoch=%zu stk_addr=%zx stk_size=%zx "
    122           "tls_addr=%zx tls_size=%zx\n",
    123           tid, (uptr)epoch0, args->stk_addr, args->stk_size,
    124           args->tls_addr, args->tls_size);
    125 }
    126 
    127 void ThreadContext::OnFinished() {
    128   if (!detached) {
    129     thr->fast_state.IncrementEpoch();
    130     // Can't increment epoch w/o writing to the trace as well.
    131     TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
    132     ReleaseImpl(thr, 0, &sync);
    133   }
    134   epoch1 = thr->fast_state.epoch();
    135 
    136   if (common_flags()->detect_deadlocks)
    137     ctx->dd->DestroyLogicalThread(thr->dd_lt);
    138   thr->~ThreadState();
    139 #if TSAN_COLLECT_STATS
    140   StatAggregate(ctx->stat, thr->stat);
    141 #endif
    142   thr = 0;
    143 }
    144 
    145 #ifndef SANITIZER_GO
    146 struct ThreadLeak {
    147   ThreadContext *tctx;
    148   int count;
    149 };
    150 
    151 static void MaybeReportThreadLeak(ThreadContextBase *tctx_base, void *arg) {
    152   Vector<ThreadLeak> &leaks = *(Vector<ThreadLeak>*)arg;
    153   ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
    154   if (tctx->detached || tctx->status != ThreadStatusFinished)
    155     return;
    156   for (uptr i = 0; i < leaks.Size(); i++) {
    157     if (leaks[i].tctx->creation_stack_id == tctx->creation_stack_id) {
    158       leaks[i].count++;
    159       return;
    160     }
    161   }
    162   ThreadLeak leak = {tctx, 1};
    163   leaks.PushBack(leak);
    164 }
    165 #endif
    166 
    167 #ifndef SANITIZER_GO
    168 static void ReportIgnoresEnabled(ThreadContext *tctx, IgnoreSet *set) {
    169   if (tctx->tid == 0) {
    170     Printf("ThreadSanitizer: main thread finished with ignores enabled\n");
    171   } else {
    172     Printf("ThreadSanitizer: thread T%d %s finished with ignores enabled,"
    173       " created at:\n", tctx->tid, tctx->name);
    174     PrintStack(SymbolizeStackId(tctx->creation_stack_id));
    175   }
    176   Printf("  One of the following ignores was not ended"
    177       " (in order of probability)\n");
    178   for (uptr i = 0; i < set->Size(); i++) {
    179     Printf("  Ignore was enabled at:\n");
    180     PrintStack(SymbolizeStackId(set->At(i)));
    181   }
    182   Die();
    183 }
    184 
    185 static void ThreadCheckIgnore(ThreadState *thr) {
    186   if (ctx->after_multithreaded_fork)
    187     return;
    188   if (thr->ignore_reads_and_writes)
    189     ReportIgnoresEnabled(thr->tctx, &thr->mop_ignore_set);
    190   if (thr->ignore_sync)
    191     ReportIgnoresEnabled(thr->tctx, &thr->sync_ignore_set);
    192 }
    193 #else
    194 static void ThreadCheckIgnore(ThreadState *thr) {}
    195 #endif
    196 
    197 void ThreadFinalize(ThreadState *thr) {
    198   ThreadCheckIgnore(thr);
    199 #ifndef SANITIZER_GO
    200   if (!flags()->report_thread_leaks)
    201     return;
    202   ThreadRegistryLock l(ctx->thread_registry);
    203   Vector<ThreadLeak> leaks(MBlockScopedBuf);
    204   ctx->thread_registry->RunCallbackForEachThreadLocked(
    205       MaybeReportThreadLeak, &leaks);
    206   for (uptr i = 0; i < leaks.Size(); i++) {
    207     ScopedReport rep(ReportTypeThreadLeak);
    208     rep.AddThread(leaks[i].tctx, true);
    209     rep.SetCount(leaks[i].count);
    210     OutputReport(thr, rep);
    211   }
    212 #endif
    213 }
    214 
    215 int ThreadCount(ThreadState *thr) {
    216   uptr result;
    217   ctx->thread_registry->GetNumberOfThreads(0, 0, &result);
    218   return (int)result;
    219 }
    220 
    221 int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) {
    222   StatInc(thr, StatThreadCreate);
    223   OnCreatedArgs args = { thr, pc };
    224   u32 parent_tid = thr ? thr->tid : kInvalidTid;  // No parent for GCD workers.
    225   int tid =
    226       ctx->thread_registry->CreateThread(uid, detached, parent_tid, &args);
    227   DPrintf("#%d: ThreadCreate tid=%d uid=%zu\n", parent_tid, tid, uid);
    228   StatSet(thr, StatThreadMaxAlive, ctx->thread_registry->GetMaxAliveThreads());
    229   return tid;
    230 }
    231 
    232 void ThreadStart(ThreadState *thr, int tid, uptr os_id) {
    233   uptr stk_addr = 0;
    234   uptr stk_size = 0;
    235   uptr tls_addr = 0;
    236   uptr tls_size = 0;
    237 #ifndef SANITIZER_GO
    238   GetThreadStackAndTls(tid == 0, &stk_addr, &stk_size, &tls_addr, &tls_size);
    239 
    240   if (tid) {
    241     if (stk_addr && stk_size)
    242       MemoryRangeImitateWrite(thr, /*pc=*/ 1, stk_addr, stk_size);
    243 
    244     if (tls_addr && tls_size) {
    245       // Check that the thr object is in tls;
    246       const uptr thr_beg = (uptr)thr;
    247       const uptr thr_end = (uptr)thr + sizeof(*thr);
    248       CHECK_GE(thr_beg, tls_addr);
    249       CHECK_LE(thr_beg, tls_addr + tls_size);
    250       CHECK_GE(thr_end, tls_addr);
    251       CHECK_LE(thr_end, tls_addr + tls_size);
    252       // Since the thr object is huge, skip it.
    253       MemoryRangeImitateWrite(thr, /*pc=*/ 2, tls_addr, thr_beg - tls_addr);
    254       MemoryRangeImitateWrite(thr, /*pc=*/ 2,
    255           thr_end, tls_addr + tls_size - thr_end);
    256     }
    257   }
    258 #endif
    259 
    260   ThreadRegistry *tr = ctx->thread_registry;
    261   OnStartedArgs args = { thr, stk_addr, stk_size, tls_addr, tls_size };
    262   tr->StartThread(tid, os_id, &args);
    263 
    264   tr->Lock();
    265   thr->tctx = (ThreadContext*)tr->GetThreadLocked(tid);
    266   tr->Unlock();
    267 
    268 #ifndef SANITIZER_GO
    269   if (ctx->after_multithreaded_fork) {
    270     thr->ignore_interceptors++;
    271     ThreadIgnoreBegin(thr, 0);
    272     ThreadIgnoreSyncBegin(thr, 0);
    273   }
    274 #endif
    275 }
    276 
    277 void ThreadFinish(ThreadState *thr) {
    278   ThreadCheckIgnore(thr);
    279   StatInc(thr, StatThreadFinish);
    280   if (thr->stk_addr && thr->stk_size)
    281     DontNeedShadowFor(thr->stk_addr, thr->stk_size);
    282   if (thr->tls_addr && thr->tls_size)
    283     DontNeedShadowFor(thr->tls_addr, thr->tls_size);
    284   thr->is_dead = true;
    285   ctx->thread_registry->FinishThread(thr->tid);
    286 }
    287 
    288 static bool FindThreadByUid(ThreadContextBase *tctx, void *arg) {
    289   uptr uid = (uptr)arg;
    290   if (tctx->user_id == uid && tctx->status != ThreadStatusInvalid) {
    291     tctx->user_id = 0;
    292     return true;
    293   }
    294   return false;
    295 }
    296 
    297 int ThreadTid(ThreadState *thr, uptr pc, uptr uid) {
    298   int res = ctx->thread_registry->FindThread(FindThreadByUid, (void*)uid);
    299   DPrintf("#%d: ThreadTid uid=%zu tid=%d\n", thr->tid, uid, res);
    300   return res;
    301 }
    302 
    303 void ThreadJoin(ThreadState *thr, uptr pc, int tid) {
    304   CHECK_GT(tid, 0);
    305   CHECK_LT(tid, kMaxTid);
    306   DPrintf("#%d: ThreadJoin tid=%d\n", thr->tid, tid);
    307   ctx->thread_registry->JoinThread(tid, thr);
    308 }
    309 
    310 void ThreadDetach(ThreadState *thr, uptr pc, int tid) {
    311   CHECK_GT(tid, 0);
    312   CHECK_LT(tid, kMaxTid);
    313   ctx->thread_registry->DetachThread(tid, thr);
    314 }
    315 
    316 void ThreadSetName(ThreadState *thr, const char *name) {
    317   ctx->thread_registry->SetThreadName(thr->tid, name);
    318 }
    319 
    320 void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
    321                        uptr size, bool is_write) {
    322   if (size == 0)
    323     return;
    324 
    325   u64 *shadow_mem = (u64*)MemToShadow(addr);
    326   DPrintf2("#%d: MemoryAccessRange: @%p %p size=%d is_write=%d\n",
    327       thr->tid, (void*)pc, (void*)addr,
    328       (int)size, is_write);
    329 
    330 #if SANITIZER_DEBUG
    331   if (!IsAppMem(addr)) {
    332     Printf("Access to non app mem %zx\n", addr);
    333     DCHECK(IsAppMem(addr));
    334   }
    335   if (!IsAppMem(addr + size - 1)) {
    336     Printf("Access to non app mem %zx\n", addr + size - 1);
    337     DCHECK(IsAppMem(addr + size - 1));
    338   }
    339   if (!IsShadowMem((uptr)shadow_mem)) {
    340     Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr);
    341     DCHECK(IsShadowMem((uptr)shadow_mem));
    342   }
    343   if (!IsShadowMem((uptr)(shadow_mem + size * kShadowCnt / 8 - 1))) {
    344     Printf("Bad shadow addr %p (%zx)\n",
    345                shadow_mem + size * kShadowCnt / 8 - 1, addr + size - 1);
    346     DCHECK(IsShadowMem((uptr)(shadow_mem + size * kShadowCnt / 8 - 1)));
    347   }
    348 #endif
    349 
    350   StatInc(thr, StatMopRange);
    351 
    352   if (*shadow_mem == kShadowRodata) {
    353     // Access to .rodata section, no races here.
    354     // Measurements show that it can be 10-20% of all memory accesses.
    355     StatInc(thr, StatMopRangeRodata);
    356     return;
    357   }
    358 
    359   FastState fast_state = thr->fast_state;
    360   if (fast_state.GetIgnoreBit())
    361     return;
    362 
    363   fast_state.IncrementEpoch();
    364   thr->fast_state = fast_state;
    365   TraceAddEvent(thr, fast_state, EventTypeMop, pc);
    366 
    367   bool unaligned = (addr % kShadowCell) != 0;
    368 
    369   // Handle unaligned beginning, if any.
    370   for (; addr % kShadowCell && size; addr++, size--) {
    371     int const kAccessSizeLog = 0;
    372     Shadow cur(fast_state);
    373     cur.SetWrite(is_write);
    374     cur.SetAddr0AndSizeLog(addr & (kShadowCell - 1), kAccessSizeLog);
    375     MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false,
    376         shadow_mem, cur);
    377   }
    378   if (unaligned)
    379     shadow_mem += kShadowCnt;
    380   // Handle middle part, if any.
    381   for (; size >= kShadowCell; addr += kShadowCell, size -= kShadowCell) {
    382     int const kAccessSizeLog = 3;
    383     Shadow cur(fast_state);
    384     cur.SetWrite(is_write);
    385     cur.SetAddr0AndSizeLog(0, kAccessSizeLog);
    386     MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false,
    387         shadow_mem, cur);
    388     shadow_mem += kShadowCnt;
    389   }
    390   // Handle ending, if any.
    391   for (; size; addr++, size--) {
    392     int const kAccessSizeLog = 0;
    393     Shadow cur(fast_state);
    394     cur.SetWrite(is_write);
    395     cur.SetAddr0AndSizeLog(addr & (kShadowCell - 1), kAccessSizeLog);
    396     MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false,
    397         shadow_mem, cur);
    398   }
    399 }
    400 
    401 }  // namespace __tsan
    402