Home | History | Annotate | Download | only in rtl
      1 //===-- tsan_rtl_report.cc ------------------------------------------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file is a part of ThreadSanitizer (TSan), a race detector.
     11 //
     12 //===----------------------------------------------------------------------===//
     13 
     14 #include "sanitizer_common/sanitizer_libc.h"
     15 #include "sanitizer_common/sanitizer_placement_new.h"
     16 #include "sanitizer_common/sanitizer_stackdepot.h"
     17 #include "sanitizer_common/sanitizer_common.h"
     18 #include "sanitizer_common/sanitizer_stacktrace.h"
     19 #include "tsan_platform.h"
     20 #include "tsan_rtl.h"
     21 #include "tsan_suppressions.h"
     22 #include "tsan_symbolize.h"
     23 #include "tsan_report.h"
     24 #include "tsan_sync.h"
     25 #include "tsan_mman.h"
     26 #include "tsan_flags.h"
     27 #include "tsan_fd.h"
     28 
     29 namespace __tsan {
     30 
     31 using namespace __sanitizer;  // NOLINT
     32 
     33 static ReportStack *SymbolizeStack(StackTrace trace);
     34 
     35 void TsanCheckFailed(const char *file, int line, const char *cond,
     36                      u64 v1, u64 v2) {
     37   // There is high probability that interceptors will check-fail as well,
     38   // on the other hand there is no sense in processing interceptors
     39   // since we are going to die soon.
     40   ScopedIgnoreInterceptors ignore;
     41   Printf("FATAL: ThreadSanitizer CHECK failed: "
     42          "%s:%d \"%s\" (0x%zx, 0x%zx)\n",
     43          file, line, cond, (uptr)v1, (uptr)v2);
     44   PrintCurrentStackSlow(StackTrace::GetCurrentPc());
     45   Die();
     46 }
     47 
     48 // Can be overriden by an application/test to intercept reports.
     49 #ifdef TSAN_EXTERNAL_HOOKS
     50 bool OnReport(const ReportDesc *rep, bool suppressed);
     51 #else
     52 SANITIZER_WEAK_CXX_DEFAULT_IMPL
     53 bool OnReport(const ReportDesc *rep, bool suppressed) {
     54   (void)rep;
     55   return suppressed;
     56 }
     57 #endif
     58 
     59 static void StackStripMain(SymbolizedStack *frames) {
     60   SymbolizedStack *last_frame = nullptr;
     61   SymbolizedStack *last_frame2 = nullptr;
     62   for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
     63     last_frame2 = last_frame;
     64     last_frame = cur;
     65   }
     66 
     67   if (last_frame2 == 0)
     68     return;
     69 #ifndef SANITIZER_GO
     70   const char *last = last_frame->info.function;
     71   const char *last2 = last_frame2->info.function;
     72   // Strip frame above 'main'
     73   if (last2 && 0 == internal_strcmp(last2, "main")) {
     74     last_frame->ClearAll();
     75     last_frame2->next = nullptr;
     76   // Strip our internal thread start routine.
     77   } else if (last && 0 == internal_strcmp(last, "__tsan_thread_start_func")) {
     78     last_frame->ClearAll();
     79     last_frame2->next = nullptr;
     80   // Strip global ctors init.
     81   } else if (last && 0 == internal_strcmp(last, "__do_global_ctors_aux")) {
     82     last_frame->ClearAll();
     83     last_frame2->next = nullptr;
     84   // If both are 0, then we probably just failed to symbolize.
     85   } else if (last || last2) {
     86     // Ensure that we recovered stack completely. Trimmed stack
     87     // can actually happen if we do not instrument some code,
     88     // so it's only a debug print. However we must try hard to not miss it
     89     // due to our fault.
     90     DPrintf("Bottom stack frame is missed\n");
     91   }
     92 #else
     93   // The last frame always point into runtime (gosched0, goexit0, runtime.main).
     94   last_frame->ClearAll();
     95   last_frame2->next = nullptr;
     96 #endif
     97 }
     98 
     99 ReportStack *SymbolizeStackId(u32 stack_id) {
    100   if (stack_id == 0)
    101     return 0;
    102   StackTrace stack = StackDepotGet(stack_id);
    103   if (stack.trace == nullptr)
    104     return nullptr;
    105   return SymbolizeStack(stack);
    106 }
    107 
    108 static ReportStack *SymbolizeStack(StackTrace trace) {
    109   if (trace.size == 0)
    110     return 0;
    111   SymbolizedStack *top = nullptr;
    112   for (uptr si = 0; si < trace.size; si++) {
    113     const uptr pc = trace.trace[si];
    114     uptr pc1 = pc;
    115     // We obtain the return address, but we're interested in the previous
    116     // instruction.
    117     if ((pc & kExternalPCBit) == 0)
    118       pc1 = StackTrace::GetPreviousInstructionPc(pc);
    119     SymbolizedStack *ent = SymbolizeCode(pc1);
    120     CHECK_NE(ent, 0);
    121     SymbolizedStack *last = ent;
    122     while (last->next) {
    123       last->info.address = pc;  // restore original pc for report
    124       last = last->next;
    125     }
    126     last->info.address = pc;  // restore original pc for report
    127     last->next = top;
    128     top = ent;
    129   }
    130   StackStripMain(top);
    131 
    132   ReportStack *stack = ReportStack::New();
    133   stack->frames = top;
    134   return stack;
    135 }
    136 
    137 ScopedReport::ScopedReport(ReportType typ) {
    138   ctx->thread_registry->CheckLocked();
    139   void *mem = internal_alloc(MBlockReport, sizeof(ReportDesc));
    140   rep_ = new(mem) ReportDesc;
    141   rep_->typ = typ;
    142   ctx->report_mtx.Lock();
    143   CommonSanitizerReportMutex.Lock();
    144 }
    145 
    146 ScopedReport::~ScopedReport() {
    147   CommonSanitizerReportMutex.Unlock();
    148   ctx->report_mtx.Unlock();
    149   DestroyAndFree(rep_);
    150 }
    151 
    152 void ScopedReport::AddStack(StackTrace stack, bool suppressable) {
    153   ReportStack **rs = rep_->stacks.PushBack();
    154   *rs = SymbolizeStack(stack);
    155   (*rs)->suppressable = suppressable;
    156 }
    157 
    158 void ScopedReport::AddMemoryAccess(uptr addr, Shadow s, StackTrace stack,
    159                                    const MutexSet *mset) {
    160   void *mem = internal_alloc(MBlockReportMop, sizeof(ReportMop));
    161   ReportMop *mop = new(mem) ReportMop;
    162   rep_->mops.PushBack(mop);
    163   mop->tid = s.tid();
    164   mop->addr = addr + s.addr0();
    165   mop->size = s.size();
    166   mop->write = s.IsWrite();
    167   mop->atomic = s.IsAtomic();
    168   mop->stack = SymbolizeStack(stack);
    169   if (mop->stack)
    170     mop->stack->suppressable = true;
    171   for (uptr i = 0; i < mset->Size(); i++) {
    172     MutexSet::Desc d = mset->Get(i);
    173     u64 mid = this->AddMutex(d.id);
    174     ReportMopMutex mtx = {mid, d.write};
    175     mop->mset.PushBack(mtx);
    176   }
    177 }
    178 
    179 void ScopedReport::AddUniqueTid(int unique_tid) {
    180   rep_->unique_tids.PushBack(unique_tid);
    181 }
    182 
    183 void ScopedReport::AddThread(const ThreadContext *tctx, bool suppressable) {
    184   for (uptr i = 0; i < rep_->threads.Size(); i++) {
    185     if ((u32)rep_->threads[i]->id == tctx->tid)
    186       return;
    187   }
    188   void *mem = internal_alloc(MBlockReportThread, sizeof(ReportThread));
    189   ReportThread *rt = new(mem) ReportThread;
    190   rep_->threads.PushBack(rt);
    191   rt->id = tctx->tid;
    192   rt->pid = tctx->os_id;
    193   rt->running = (tctx->status == ThreadStatusRunning);
    194   rt->name = internal_strdup(tctx->name);
    195   rt->parent_tid = tctx->parent_tid;
    196   rt->stack = 0;
    197   rt->stack = SymbolizeStackId(tctx->creation_stack_id);
    198   if (rt->stack)
    199     rt->stack->suppressable = suppressable;
    200 }
    201 
    202 #ifndef SANITIZER_GO
    203 static bool FindThreadByUidLockedCallback(ThreadContextBase *tctx, void *arg) {
    204   int unique_id = *(int *)arg;
    205   return tctx->unique_id == (u32)unique_id;
    206 }
    207 
    208 static ThreadContext *FindThreadByUidLocked(int unique_id) {
    209   ctx->thread_registry->CheckLocked();
    210   return static_cast<ThreadContext *>(
    211       ctx->thread_registry->FindThreadContextLocked(
    212           FindThreadByUidLockedCallback, &unique_id));
    213 }
    214 
    215 static ThreadContext *FindThreadByTidLocked(int tid) {
    216   ctx->thread_registry->CheckLocked();
    217   return static_cast<ThreadContext*>(
    218       ctx->thread_registry->GetThreadLocked(tid));
    219 }
    220 
    221 static bool IsInStackOrTls(ThreadContextBase *tctx_base, void *arg) {
    222   uptr addr = (uptr)arg;
    223   ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
    224   if (tctx->status != ThreadStatusRunning)
    225     return false;
    226   ThreadState *thr = tctx->thr;
    227   CHECK(thr);
    228   return ((addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size) ||
    229           (addr >= thr->tls_addr && addr < thr->tls_addr + thr->tls_size));
    230 }
    231 
    232 ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) {
    233   ctx->thread_registry->CheckLocked();
    234   ThreadContext *tctx = static_cast<ThreadContext*>(
    235       ctx->thread_registry->FindThreadContextLocked(IsInStackOrTls,
    236                                                     (void*)addr));
    237   if (!tctx)
    238     return 0;
    239   ThreadState *thr = tctx->thr;
    240   CHECK(thr);
    241   *is_stack = (addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size);
    242   return tctx;
    243 }
    244 #endif
    245 
    246 void ScopedReport::AddThread(int unique_tid, bool suppressable) {
    247 #ifndef SANITIZER_GO
    248   if (const ThreadContext *tctx = FindThreadByUidLocked(unique_tid))
    249     AddThread(tctx, suppressable);
    250 #endif
    251 }
    252 
    253 void ScopedReport::AddMutex(const SyncVar *s) {
    254   for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
    255     if (rep_->mutexes[i]->id == s->uid)
    256       return;
    257   }
    258   void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex));
    259   ReportMutex *rm = new(mem) ReportMutex;
    260   rep_->mutexes.PushBack(rm);
    261   rm->id = s->uid;
    262   rm->addr = s->addr;
    263   rm->destroyed = false;
    264   rm->stack = SymbolizeStackId(s->creation_stack_id);
    265 }
    266 
    267 u64 ScopedReport::AddMutex(u64 id) {
    268   u64 uid = 0;
    269   u64 mid = id;
    270   uptr addr = SyncVar::SplitId(id, &uid);
    271   SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr);
    272   // Check that the mutex is still alive.
    273   // Another mutex can be created at the same address,
    274   // so check uid as well.
    275   if (s && s->CheckId(uid)) {
    276     mid = s->uid;
    277     AddMutex(s);
    278   } else {
    279     AddDeadMutex(id);
    280   }
    281   if (s)
    282     s->mtx.Unlock();
    283   return mid;
    284 }
    285 
    286 void ScopedReport::AddDeadMutex(u64 id) {
    287   for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
    288     if (rep_->mutexes[i]->id == id)
    289       return;
    290   }
    291   void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex));
    292   ReportMutex *rm = new(mem) ReportMutex;
    293   rep_->mutexes.PushBack(rm);
    294   rm->id = id;
    295   rm->addr = 0;
    296   rm->destroyed = true;
    297   rm->stack = 0;
    298 }
    299 
    300 void ScopedReport::AddLocation(uptr addr, uptr size) {
    301   if (addr == 0)
    302     return;
    303 #ifndef SANITIZER_GO
    304   int fd = -1;
    305   int creat_tid = -1;
    306   u32 creat_stack = 0;
    307   if (FdLocation(addr, &fd, &creat_tid, &creat_stack)) {
    308     ReportLocation *loc = ReportLocation::New(ReportLocationFD);
    309     loc->fd = fd;
    310     loc->tid = creat_tid;
    311     loc->stack = SymbolizeStackId(creat_stack);
    312     rep_->locs.PushBack(loc);
    313     ThreadContext *tctx = FindThreadByUidLocked(creat_tid);
    314     if (tctx)
    315       AddThread(tctx);
    316     return;
    317   }
    318   MBlock *b = 0;
    319   Allocator *a = allocator();
    320   if (a->PointerIsMine((void*)addr)) {
    321     void *block_begin = a->GetBlockBegin((void*)addr);
    322     if (block_begin)
    323       b = ctx->metamap.GetBlock((uptr)block_begin);
    324   }
    325   if (b != 0) {
    326     ThreadContext *tctx = FindThreadByTidLocked(b->tid);
    327     ReportLocation *loc = ReportLocation::New(ReportLocationHeap);
    328     loc->heap_chunk_start = (uptr)allocator()->GetBlockBegin((void *)addr);
    329     loc->heap_chunk_size = b->siz;
    330     loc->tid = tctx ? tctx->tid : b->tid;
    331     loc->stack = SymbolizeStackId(b->stk);
    332     rep_->locs.PushBack(loc);
    333     if (tctx)
    334       AddThread(tctx);
    335     return;
    336   }
    337   bool is_stack = false;
    338   if (ThreadContext *tctx = IsThreadStackOrTls(addr, &is_stack)) {
    339     ReportLocation *loc =
    340         ReportLocation::New(is_stack ? ReportLocationStack : ReportLocationTLS);
    341     loc->tid = tctx->tid;
    342     rep_->locs.PushBack(loc);
    343     AddThread(tctx);
    344   }
    345   if (ReportLocation *loc = SymbolizeData(addr)) {
    346     loc->suppressable = true;
    347     rep_->locs.PushBack(loc);
    348     return;
    349   }
    350 #endif
    351 }
    352 
    353 #ifndef SANITIZER_GO
    354 void ScopedReport::AddSleep(u32 stack_id) {
    355   rep_->sleep = SymbolizeStackId(stack_id);
    356 }
    357 #endif
    358 
    359 void ScopedReport::SetCount(int count) {
    360   rep_->count = count;
    361 }
    362 
    363 const ReportDesc *ScopedReport::GetReport() const {
    364   return rep_;
    365 }
    366 
    367 void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk,
    368                   MutexSet *mset) {
    369   // This function restores stack trace and mutex set for the thread/epoch.
    370   // It does so by getting stack trace and mutex set at the beginning of
    371   // trace part, and then replaying the trace till the given epoch.
    372   Trace* trace = ThreadTrace(tid);
    373   ReadLock l(&trace->mtx);
    374   const int partidx = (epoch / kTracePartSize) % TraceParts();
    375   TraceHeader* hdr = &trace->headers[partidx];
    376   if (epoch < hdr->epoch0 || epoch >= hdr->epoch0 + kTracePartSize)
    377     return;
    378   CHECK_EQ(RoundDown(epoch, kTracePartSize), hdr->epoch0);
    379   const u64 epoch0 = RoundDown(epoch, TraceSize());
    380   const u64 eend = epoch % TraceSize();
    381   const u64 ebegin = RoundDown(eend, kTracePartSize);
    382   DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n",
    383           tid, (uptr)epoch, (uptr)ebegin, (uptr)eend, partidx);
    384   Vector<uptr> stack(MBlockReportStack);
    385   stack.Resize(hdr->stack0.size + 64);
    386   for (uptr i = 0; i < hdr->stack0.size; i++) {
    387     stack[i] = hdr->stack0.trace[i];
    388     DPrintf2("  #%02zu: pc=%zx\n", i, stack[i]);
    389   }
    390   if (mset)
    391     *mset = hdr->mset0;
    392   uptr pos = hdr->stack0.size;
    393   Event *events = (Event*)GetThreadTrace(tid);
    394   for (uptr i = ebegin; i <= eend; i++) {
    395     Event ev = events[i];
    396     EventType typ = (EventType)(ev >> 61);
    397     uptr pc = (uptr)(ev & ((1ull << 61) - 1));
    398     DPrintf2("  %zu typ=%d pc=%zx\n", i, typ, pc);
    399     if (typ == EventTypeMop) {
    400       stack[pos] = pc;
    401     } else if (typ == EventTypeFuncEnter) {
    402       if (stack.Size() < pos + 2)
    403         stack.Resize(pos + 2);
    404       stack[pos++] = pc;
    405     } else if (typ == EventTypeFuncExit) {
    406       if (pos > 0)
    407         pos--;
    408     }
    409     if (mset) {
    410       if (typ == EventTypeLock) {
    411         mset->Add(pc, true, epoch0 + i);
    412       } else if (typ == EventTypeUnlock) {
    413         mset->Del(pc, true);
    414       } else if (typ == EventTypeRLock) {
    415         mset->Add(pc, false, epoch0 + i);
    416       } else if (typ == EventTypeRUnlock) {
    417         mset->Del(pc, false);
    418       }
    419     }
    420     for (uptr j = 0; j <= pos; j++)
    421       DPrintf2("      #%zu: %zx\n", j, stack[j]);
    422   }
    423   if (pos == 0 && stack[0] == 0)
    424     return;
    425   pos++;
    426   stk->Init(&stack[0], pos);
    427 }
    428 
    429 static bool HandleRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2],
    430                              uptr addr_min, uptr addr_max) {
    431   bool equal_stack = false;
    432   RacyStacks hash;
    433   bool equal_address = false;
    434   RacyAddress ra0 = {addr_min, addr_max};
    435   {
    436     ReadLock lock(&ctx->racy_mtx);
    437     if (flags()->suppress_equal_stacks) {
    438       hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr));
    439       hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr));
    440       for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) {
    441         if (hash == ctx->racy_stacks[i]) {
    442           VPrintf(2,
    443               "ThreadSanitizer: suppressing report as doubled (stack)\n");
    444           equal_stack = true;
    445           break;
    446         }
    447       }
    448     }
    449     if (flags()->suppress_equal_addresses) {
    450       for (uptr i = 0; i < ctx->racy_addresses.Size(); i++) {
    451         RacyAddress ra2 = ctx->racy_addresses[i];
    452         uptr maxbeg = max(ra0.addr_min, ra2.addr_min);
    453         uptr minend = min(ra0.addr_max, ra2.addr_max);
    454         if (maxbeg < minend) {
    455           VPrintf(2, "ThreadSanitizer: suppressing report as doubled (addr)\n");
    456           equal_address = true;
    457           break;
    458         }
    459       }
    460     }
    461   }
    462   if (!equal_stack && !equal_address)
    463     return false;
    464   if (!equal_stack) {
    465     Lock lock(&ctx->racy_mtx);
    466     ctx->racy_stacks.PushBack(hash);
    467   }
    468   if (!equal_address) {
    469     Lock lock(&ctx->racy_mtx);
    470     ctx->racy_addresses.PushBack(ra0);
    471   }
    472   return true;
    473 }
    474 
    475 static void AddRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2],
    476                           uptr addr_min, uptr addr_max) {
    477   Lock lock(&ctx->racy_mtx);
    478   if (flags()->suppress_equal_stacks) {
    479     RacyStacks hash;
    480     hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr));
    481     hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr));
    482     ctx->racy_stacks.PushBack(hash);
    483   }
    484   if (flags()->suppress_equal_addresses) {
    485     RacyAddress ra0 = {addr_min, addr_max};
    486     ctx->racy_addresses.PushBack(ra0);
    487   }
    488 }
    489 
    490 bool OutputReport(ThreadState *thr, const ScopedReport &srep) {
    491   if (!flags()->report_bugs)
    492     return false;
    493   atomic_store_relaxed(&ctx->last_symbolize_time_ns, NanoTime());
    494   const ReportDesc *rep = srep.GetReport();
    495   Suppression *supp = 0;
    496   uptr pc_or_addr = 0;
    497   for (uptr i = 0; pc_or_addr == 0 && i < rep->mops.Size(); i++)
    498     pc_or_addr = IsSuppressed(rep->typ, rep->mops[i]->stack, &supp);
    499   for (uptr i = 0; pc_or_addr == 0 && i < rep->stacks.Size(); i++)
    500     pc_or_addr = IsSuppressed(rep->typ, rep->stacks[i], &supp);
    501   for (uptr i = 0; pc_or_addr == 0 && i < rep->threads.Size(); i++)
    502     pc_or_addr = IsSuppressed(rep->typ, rep->threads[i]->stack, &supp);
    503   for (uptr i = 0; pc_or_addr == 0 && i < rep->locs.Size(); i++)
    504     pc_or_addr = IsSuppressed(rep->typ, rep->locs[i], &supp);
    505   if (pc_or_addr != 0) {
    506     Lock lock(&ctx->fired_suppressions_mtx);
    507     FiredSuppression s = {srep.GetReport()->typ, pc_or_addr, supp};
    508     ctx->fired_suppressions.push_back(s);
    509   }
    510   {
    511     bool old_is_freeing = thr->is_freeing;
    512     thr->is_freeing = false;
    513     bool suppressed = OnReport(rep, pc_or_addr != 0);
    514     thr->is_freeing = old_is_freeing;
    515     if (suppressed)
    516       return false;
    517   }
    518   PrintReport(rep);
    519   ctx->nreported++;
    520   if (flags()->halt_on_error)
    521     Die();
    522   return true;
    523 }
    524 
    525 bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace) {
    526   ReadLock lock(&ctx->fired_suppressions_mtx);
    527   for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) {
    528     if (ctx->fired_suppressions[k].type != type)
    529       continue;
    530     for (uptr j = 0; j < trace.size; j++) {
    531       FiredSuppression *s = &ctx->fired_suppressions[k];
    532       if (trace.trace[j] == s->pc_or_addr) {
    533         if (s->supp)
    534           atomic_fetch_add(&s->supp->hit_count, 1, memory_order_relaxed);
    535         return true;
    536       }
    537     }
    538   }
    539   return false;
    540 }
    541 
    542 static bool IsFiredSuppression(Context *ctx, ReportType type, uptr addr) {
    543   ReadLock lock(&ctx->fired_suppressions_mtx);
    544   for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) {
    545     if (ctx->fired_suppressions[k].type != type)
    546       continue;
    547     FiredSuppression *s = &ctx->fired_suppressions[k];
    548     if (addr == s->pc_or_addr) {
    549       if (s->supp)
    550         atomic_fetch_add(&s->supp->hit_count, 1, memory_order_relaxed);
    551       return true;
    552     }
    553   }
    554   return false;
    555 }
    556 
    557 static bool RaceBetweenAtomicAndFree(ThreadState *thr) {
    558   Shadow s0(thr->racy_state[0]);
    559   Shadow s1(thr->racy_state[1]);
    560   CHECK(!(s0.IsAtomic() && s1.IsAtomic()));
    561   if (!s0.IsAtomic() && !s1.IsAtomic())
    562     return true;
    563   if (s0.IsAtomic() && s1.IsFreed())
    564     return true;
    565   if (s1.IsAtomic() && thr->is_freeing)
    566     return true;
    567   return false;
    568 }
    569 
    570 void ReportRace(ThreadState *thr) {
    571   CheckNoLocks(thr);
    572 
    573   // Symbolizer makes lots of intercepted calls. If we try to process them,
    574   // at best it will cause deadlocks on internal mutexes.
    575   ScopedIgnoreInterceptors ignore;
    576 
    577   if (!flags()->report_bugs)
    578     return;
    579   if (!flags()->report_atomic_races && !RaceBetweenAtomicAndFree(thr))
    580     return;
    581 
    582   bool freed = false;
    583   {
    584     Shadow s(thr->racy_state[1]);
    585     freed = s.GetFreedAndReset();
    586     thr->racy_state[1] = s.raw();
    587   }
    588 
    589   uptr addr = ShadowToMem((uptr)thr->racy_shadow_addr);
    590   uptr addr_min = 0;
    591   uptr addr_max = 0;
    592   {
    593     uptr a0 = addr + Shadow(thr->racy_state[0]).addr0();
    594     uptr a1 = addr + Shadow(thr->racy_state[1]).addr0();
    595     uptr e0 = a0 + Shadow(thr->racy_state[0]).size();
    596     uptr e1 = a1 + Shadow(thr->racy_state[1]).size();
    597     addr_min = min(a0, a1);
    598     addr_max = max(e0, e1);
    599     if (IsExpectedReport(addr_min, addr_max - addr_min))
    600       return;
    601   }
    602 
    603   ReportType typ = ReportTypeRace;
    604   if (thr->is_vptr_access && freed)
    605     typ = ReportTypeVptrUseAfterFree;
    606   else if (thr->is_vptr_access)
    607     typ = ReportTypeVptrRace;
    608   else if (freed)
    609     typ = ReportTypeUseAfterFree;
    610 
    611   if (IsFiredSuppression(ctx, typ, addr))
    612     return;
    613 
    614   const uptr kMop = 2;
    615   VarSizeStackTrace traces[kMop];
    616   const uptr toppc = TraceTopPC(thr);
    617   ObtainCurrentStack(thr, toppc, &traces[0]);
    618   if (IsFiredSuppression(ctx, typ, traces[0]))
    619     return;
    620 
    621   // MutexSet is too large to live on stack.
    622   Vector<u64> mset_buffer(MBlockScopedBuf);
    623   mset_buffer.Resize(sizeof(MutexSet) / sizeof(u64) + 1);
    624   MutexSet *mset2 = new(&mset_buffer[0]) MutexSet();
    625 
    626   Shadow s2(thr->racy_state[1]);
    627   RestoreStack(s2.tid(), s2.epoch(), &traces[1], mset2);
    628   if (IsFiredSuppression(ctx, typ, traces[1]))
    629     return;
    630 
    631   if (HandleRacyStacks(thr, traces, addr_min, addr_max))
    632     return;
    633 
    634   ThreadRegistryLock l0(ctx->thread_registry);
    635   ScopedReport rep(typ);
    636   for (uptr i = 0; i < kMop; i++) {
    637     Shadow s(thr->racy_state[i]);
    638     rep.AddMemoryAccess(addr, s, traces[i], i == 0 ? &thr->mset : mset2);
    639   }
    640 
    641   for (uptr i = 0; i < kMop; i++) {
    642     FastState s(thr->racy_state[i]);
    643     ThreadContext *tctx = static_cast<ThreadContext*>(
    644         ctx->thread_registry->GetThreadLocked(s.tid()));
    645     if (s.epoch() < tctx->epoch0 || s.epoch() > tctx->epoch1)
    646       continue;
    647     rep.AddThread(tctx);
    648   }
    649 
    650   rep.AddLocation(addr_min, addr_max - addr_min);
    651 
    652 #ifndef SANITIZER_GO
    653   {  // NOLINT
    654     Shadow s(thr->racy_state[1]);
    655     if (s.epoch() <= thr->last_sleep_clock.get(s.tid()))
    656       rep.AddSleep(thr->last_sleep_stack_id);
    657   }
    658 #endif
    659 
    660   if (!OutputReport(thr, rep))
    661     return;
    662 
    663   AddRacyStacks(thr, traces, addr_min, addr_max);
    664 }
    665 
    666 void PrintCurrentStack(ThreadState *thr, uptr pc) {
    667   VarSizeStackTrace trace;
    668   ObtainCurrentStack(thr, pc, &trace);
    669   PrintStack(SymbolizeStack(trace));
    670 }
    671 
    672 void PrintCurrentStackSlow(uptr pc) {
    673 #ifndef SANITIZER_GO
    674   BufferedStackTrace *ptrace =
    675       new(internal_alloc(MBlockStackTrace, sizeof(BufferedStackTrace)))
    676           BufferedStackTrace();
    677   ptrace->Unwind(kStackTraceMax, pc, 0, 0, 0, 0, false);
    678   for (uptr i = 0; i < ptrace->size / 2; i++) {
    679     uptr tmp = ptrace->trace_buffer[i];
    680     ptrace->trace_buffer[i] = ptrace->trace_buffer[ptrace->size - i - 1];
    681     ptrace->trace_buffer[ptrace->size - i - 1] = tmp;
    682   }
    683   PrintStack(SymbolizeStack(*ptrace));
    684 #endif
    685 }
    686 
    687 }  // namespace __tsan
    688 
    689 using namespace __tsan;
    690 
    691 extern "C" {
    692 SANITIZER_INTERFACE_ATTRIBUTE
    693 void __sanitizer_print_stack_trace() {
    694   PrintCurrentStackSlow(StackTrace::GetCurrentPc());
    695 }
    696 }  // extern "C"
    697