Home | History | Annotate | Download | only in rtl
      1 //===-- tsan_rtl_mutex.cc -------------------------------------------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file is a part of ThreadSanitizer (TSan), a race detector.
     11 //
     12 //===----------------------------------------------------------------------===//
     13 
     14 #include "tsan_rtl.h"
     15 #include "tsan_flags.h"
     16 #include "tsan_sync.h"
     17 #include "tsan_report.h"
     18 #include "tsan_symbolize.h"
     19 #include "tsan_platform.h"
     20 
     21 namespace __tsan {
     22 
     23 void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
     24                  bool rw, bool recursive, bool linker_init) {
     25   Context *ctx = CTX();
     26   CHECK_GT(thr->in_rtl, 0);
     27   DPrintf("#%d: MutexCreate %zx\n", thr->tid, addr);
     28   StatInc(thr, StatMutexCreate);
     29   if (!linker_init && IsAppMem(addr)) {
     30     CHECK(!thr->is_freeing);
     31     thr->is_freeing = true;
     32     MemoryWrite(thr, pc, addr, kSizeLog1);
     33     thr->is_freeing = false;
     34   }
     35   SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
     36   s->is_rw = rw;
     37   s->is_recursive = recursive;
     38   s->is_linker_init = linker_init;
     39   s->mtx.Unlock();
     40 }
     41 
     42 void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) {
     43   Context *ctx = CTX();
     44   CHECK_GT(thr->in_rtl, 0);
     45   DPrintf("#%d: MutexDestroy %zx\n", thr->tid, addr);
     46   StatInc(thr, StatMutexDestroy);
     47 #ifndef TSAN_GO
     48   // Global mutexes not marked as LINKER_INITIALIZED
     49   // cause tons of not interesting reports, so just ignore it.
     50   if (IsGlobalVar(addr))
     51     return;
     52 #endif
     53   SyncVar *s = ctx->synctab.GetAndRemove(thr, pc, addr);
     54   if (s == 0)
     55     return;
     56   if (IsAppMem(addr)) {
     57     CHECK(!thr->is_freeing);
     58     thr->is_freeing = true;
     59     MemoryWrite(thr, pc, addr, kSizeLog1);
     60     thr->is_freeing = false;
     61   }
     62   if (flags()->report_destroy_locked
     63       && s->owner_tid != SyncVar::kInvalidTid
     64       && !s->is_broken) {
     65     s->is_broken = true;
     66     ThreadRegistryLock l(ctx->thread_registry);
     67     ScopedReport rep(ReportTypeMutexDestroyLocked);
     68     rep.AddMutex(s);
     69     StackTrace trace;
     70     trace.ObtainCurrent(thr, pc);
     71     rep.AddStack(&trace);
     72     FastState last(s->last_lock);
     73     RestoreStack(last.tid(), last.epoch(), &trace, 0);
     74     rep.AddStack(&trace);
     75     rep.AddLocation(s->addr, 1);
     76     OutputReport(ctx, rep);
     77   }
     78   thr->mset.Remove(s->GetId());
     79   DestroyAndFree(s);
     80 }
     81 
     82 void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec) {
     83   CHECK_GT(thr->in_rtl, 0);
     84   DPrintf("#%d: MutexLock %zx rec=%d\n", thr->tid, addr, rec);
     85   CHECK_GT(rec, 0);
     86   if (IsAppMem(addr))
     87     MemoryReadAtomic(thr, pc, addr, kSizeLog1);
     88   SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
     89   thr->fast_state.IncrementEpoch();
     90   TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId());
     91   if (s->owner_tid == SyncVar::kInvalidTid) {
     92     CHECK_EQ(s->recursion, 0);
     93     s->owner_tid = thr->tid;
     94     s->last_lock = thr->fast_state.raw();
     95   } else if (s->owner_tid == thr->tid) {
     96     CHECK_GT(s->recursion, 0);
     97   } else {
     98     Printf("ThreadSanitizer WARNING: double lock\n");
     99     PrintCurrentStack(thr, pc);
    100   }
    101   if (s->recursion == 0) {
    102     StatInc(thr, StatMutexLock);
    103     thr->clock.set(thr->tid, thr->fast_state.epoch());
    104     thr->clock.acquire(&s->clock);
    105     StatInc(thr, StatSyncAcquire);
    106     thr->clock.acquire(&s->read_clock);
    107     StatInc(thr, StatSyncAcquire);
    108   } else if (!s->is_recursive) {
    109     StatInc(thr, StatMutexRecLock);
    110   }
    111   s->recursion += rec;
    112   thr->mset.Add(s->GetId(), true, thr->fast_state.epoch());
    113   s->mtx.Unlock();
    114 }
    115 
    116 int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all) {
    117   CHECK_GT(thr->in_rtl, 0);
    118   DPrintf("#%d: MutexUnlock %zx all=%d\n", thr->tid, addr, all);
    119   if (IsAppMem(addr))
    120     MemoryReadAtomic(thr, pc, addr, kSizeLog1);
    121   SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
    122   thr->fast_state.IncrementEpoch();
    123   TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
    124   int rec = 0;
    125   if (s->recursion == 0) {
    126     if (!s->is_broken) {
    127       s->is_broken = true;
    128       Printf("ThreadSanitizer WARNING: unlock of unlocked mutex\n");
    129       PrintCurrentStack(thr, pc);
    130     }
    131   } else if (s->owner_tid != thr->tid) {
    132     if (!s->is_broken) {
    133       s->is_broken = true;
    134       Printf("ThreadSanitizer WARNING: mutex unlock by another thread\n");
    135       PrintCurrentStack(thr, pc);
    136     }
    137   } else {
    138     rec = all ? s->recursion : 1;
    139     s->recursion -= rec;
    140     if (s->recursion == 0) {
    141       StatInc(thr, StatMutexUnlock);
    142       s->owner_tid = SyncVar::kInvalidTid;
    143       thr->clock.set(thr->tid, thr->fast_state.epoch());
    144       thr->fast_synch_epoch = thr->fast_state.epoch();
    145       thr->clock.ReleaseStore(&s->clock);
    146       StatInc(thr, StatSyncRelease);
    147     } else {
    148       StatInc(thr, StatMutexRecUnlock);
    149     }
    150   }
    151   thr->mset.Del(s->GetId(), true);
    152   s->mtx.Unlock();
    153   return rec;
    154 }
    155 
    156 void MutexReadLock(ThreadState *thr, uptr pc, uptr addr) {
    157   CHECK_GT(thr->in_rtl, 0);
    158   DPrintf("#%d: MutexReadLock %zx\n", thr->tid, addr);
    159   StatInc(thr, StatMutexReadLock);
    160   if (IsAppMem(addr))
    161     MemoryReadAtomic(thr, pc, addr, kSizeLog1);
    162   SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, false);
    163   thr->fast_state.IncrementEpoch();
    164   TraceAddEvent(thr, thr->fast_state, EventTypeRLock, s->GetId());
    165   if (s->owner_tid != SyncVar::kInvalidTid) {
    166     Printf("ThreadSanitizer WARNING: read lock of a write locked mutex\n");
    167     PrintCurrentStack(thr, pc);
    168   }
    169   thr->clock.set(thr->tid, thr->fast_state.epoch());
    170   thr->clock.acquire(&s->clock);
    171   s->last_lock = thr->fast_state.raw();
    172   StatInc(thr, StatSyncAcquire);
    173   thr->mset.Add(s->GetId(), false, thr->fast_state.epoch());
    174   s->mtx.ReadUnlock();
    175 }
    176 
    177 void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
    178   CHECK_GT(thr->in_rtl, 0);
    179   DPrintf("#%d: MutexReadUnlock %zx\n", thr->tid, addr);
    180   StatInc(thr, StatMutexReadUnlock);
    181   if (IsAppMem(addr))
    182     MemoryReadAtomic(thr, pc, addr, kSizeLog1);
    183   SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
    184   thr->fast_state.IncrementEpoch();
    185   TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
    186   if (s->owner_tid != SyncVar::kInvalidTid) {
    187     Printf("ThreadSanitizer WARNING: read unlock of a write "
    188                "locked mutex\n");
    189     PrintCurrentStack(thr, pc);
    190   }
    191   thr->clock.set(thr->tid, thr->fast_state.epoch());
    192   thr->fast_synch_epoch = thr->fast_state.epoch();
    193   thr->clock.release(&s->read_clock);
    194   StatInc(thr, StatSyncRelease);
    195   s->mtx.Unlock();
    196   thr->mset.Del(s->GetId(), false);
    197 }
    198 
    199 void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
    200   CHECK_GT(thr->in_rtl, 0);
    201   DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr);
    202   if (IsAppMem(addr))
    203     MemoryReadAtomic(thr, pc, addr, kSizeLog1);
    204   SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
    205   bool write = true;
    206   if (s->owner_tid == SyncVar::kInvalidTid) {
    207     // Seems to be read unlock.
    208     write = false;
    209     StatInc(thr, StatMutexReadUnlock);
    210     thr->fast_state.IncrementEpoch();
    211     TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
    212     thr->clock.set(thr->tid, thr->fast_state.epoch());
    213     thr->fast_synch_epoch = thr->fast_state.epoch();
    214     thr->clock.release(&s->read_clock);
    215     StatInc(thr, StatSyncRelease);
    216   } else if (s->owner_tid == thr->tid) {
    217     // Seems to be write unlock.
    218     thr->fast_state.IncrementEpoch();
    219     TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
    220     CHECK_GT(s->recursion, 0);
    221     s->recursion--;
    222     if (s->recursion == 0) {
    223       StatInc(thr, StatMutexUnlock);
    224       s->owner_tid = SyncVar::kInvalidTid;
    225       // FIXME: Refactor me, plz.
    226       // The sequence of events is quite tricky and doubled in several places.
    227       // First, it's a bug to increment the epoch w/o writing to the trace.
    228       // Then, the acquire/release logic can be factored out as well.
    229       thr->clock.set(thr->tid, thr->fast_state.epoch());
    230       thr->fast_synch_epoch = thr->fast_state.epoch();
    231       thr->clock.ReleaseStore(&s->clock);
    232       StatInc(thr, StatSyncRelease);
    233     } else {
    234       StatInc(thr, StatMutexRecUnlock);
    235     }
    236   } else if (!s->is_broken) {
    237     s->is_broken = true;
    238     Printf("ThreadSanitizer WARNING: mutex unlock by another thread\n");
    239     PrintCurrentStack(thr, pc);
    240   }
    241   thr->mset.Del(s->GetId(), write);
    242   s->mtx.Unlock();
    243 }
    244 
    245 void Acquire(ThreadState *thr, uptr pc, uptr addr) {
    246   CHECK_GT(thr->in_rtl, 0);
    247   DPrintf("#%d: Acquire %zx\n", thr->tid, addr);
    248   SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, false);
    249   thr->clock.set(thr->tid, thr->fast_state.epoch());
    250   thr->clock.acquire(&s->clock);
    251   StatInc(thr, StatSyncAcquire);
    252   s->mtx.ReadUnlock();
    253 }
    254 
    255 static void UpdateClockCallback(ThreadContextBase *tctx_base, void *arg) {
    256   ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
    257   ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
    258   if (tctx->status == ThreadStatusRunning)
    259     thr->clock.set(tctx->tid, tctx->thr->fast_state.epoch());
    260   else
    261     thr->clock.set(tctx->tid, tctx->epoch1);
    262 }
    263 
    264 void AcquireGlobal(ThreadState *thr, uptr pc) {
    265   ThreadRegistryLock l(CTX()->thread_registry);
    266   CTX()->thread_registry->RunCallbackForEachThreadLocked(
    267       UpdateClockCallback, thr);
    268 }
    269 
    270 void Release(ThreadState *thr, uptr pc, uptr addr) {
    271   CHECK_GT(thr->in_rtl, 0);
    272   DPrintf("#%d: Release %zx\n", thr->tid, addr);
    273   SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
    274   thr->clock.set(thr->tid, thr->fast_state.epoch());
    275   thr->clock.release(&s->clock);
    276   StatInc(thr, StatSyncRelease);
    277   s->mtx.Unlock();
    278 }
    279 
    280 void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) {
    281   CHECK_GT(thr->in_rtl, 0);
    282   DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr);
    283   SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
    284   thr->clock.set(thr->tid, thr->fast_state.epoch());
    285   thr->clock.ReleaseStore(&s->clock);
    286   StatInc(thr, StatSyncRelease);
    287   s->mtx.Unlock();
    288 }
    289 
    290 #ifndef TSAN_GO
    291 static void UpdateSleepClockCallback(ThreadContextBase *tctx_base, void *arg) {
    292   ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
    293   ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
    294   if (tctx->status == ThreadStatusRunning)
    295     thr->last_sleep_clock.set(tctx->tid, tctx->thr->fast_state.epoch());
    296   else
    297     thr->last_sleep_clock.set(tctx->tid, tctx->epoch1);
    298 }
    299 
    300 void AfterSleep(ThreadState *thr, uptr pc) {
    301   thr->last_sleep_stack_id = CurrentStackId(thr, pc);
    302   ThreadRegistryLock l(CTX()->thread_registry);
    303   CTX()->thread_registry->RunCallbackForEachThreadLocked(
    304       UpdateSleepClockCallback, thr);
    305 }
    306 #endif
    307 
    308 }  // namespace __tsan
    309