Home | History | Annotate | Download | only in sanitizer_common
      1 //===-- sanitizer_deadlock_detector1.cc -----------------------------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // Deadlock detector implementation based on NxN adjacency bit matrix.
     11 //
     12 //===----------------------------------------------------------------------===//
     13 
     14 #include "sanitizer_deadlock_detector_interface.h"
     15 #include "sanitizer_deadlock_detector.h"
     16 #include "sanitizer_allocator_internal.h"
     17 #include "sanitizer_placement_new.h"
     18 #include "sanitizer_mutex.h"
     19 
     20 #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 1
     21 
     22 namespace __sanitizer {
     23 
     24 typedef TwoLevelBitVector<> DDBV;  // DeadlockDetector's bit vector.
     25 
     26 struct DDPhysicalThread {
     27 };
     28 
     29 struct DDLogicalThread {
     30   u64 ctx;
     31   DeadlockDetectorTLS<DDBV> dd;
     32   DDReport rep;
     33   bool report_pending;
     34 };
     35 
     36 struct DD : public DDetector {
     37   SpinMutex mtx;
     38   DeadlockDetector<DDBV> dd;
     39   DDFlags flags;
     40 
     41   explicit DD(const DDFlags *flags);
     42 
     43   DDPhysicalThread* CreatePhysicalThread();
     44   void DestroyPhysicalThread(DDPhysicalThread *pt);
     45 
     46   DDLogicalThread* CreateLogicalThread(u64 ctx);
     47   void DestroyLogicalThread(DDLogicalThread *lt);
     48 
     49   void MutexInit(DDCallback *cb, DDMutex *m);
     50   void MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock);
     51   void MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock, bool trylock);
     52   void MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock);
     53   void MutexDestroy(DDCallback *cb, DDMutex *m);
     54 
     55   DDReport *GetReport(DDCallback *cb);
     56 
     57   void MutexEnsureID(DDLogicalThread *lt, DDMutex *m);
     58   void ReportDeadlock(DDCallback *cb, DDMutex *m);
     59 };
     60 
     61 DDetector *DDetector::Create(const DDFlags *flags) {
     62   (void)flags;
     63   void *mem = MmapOrDie(sizeof(DD), "deadlock detector");
     64   return new(mem) DD(flags);
     65 }
     66 
     67 DD::DD(const DDFlags *flags)
     68     : flags(*flags) {
     69   dd.clear();
     70 }
     71 
     72 DDPhysicalThread* DD::CreatePhysicalThread() {
     73   return 0;
     74 }
     75 
     76 void DD::DestroyPhysicalThread(DDPhysicalThread *pt) {
     77 }
     78 
     79 DDLogicalThread* DD::CreateLogicalThread(u64 ctx) {
     80   DDLogicalThread *lt = (DDLogicalThread*)InternalAlloc(sizeof(*lt));
     81   lt->ctx = ctx;
     82   lt->dd.clear();
     83   lt->report_pending = false;
     84   return lt;
     85 }
     86 
     87 void DD::DestroyLogicalThread(DDLogicalThread *lt) {
     88   lt->~DDLogicalThread();
     89   InternalFree(lt);
     90 }
     91 
     92 void DD::MutexInit(DDCallback *cb, DDMutex *m) {
     93   m->id = 0;
     94   m->stk = cb->Unwind();
     95 }
     96 
     97 void DD::MutexEnsureID(DDLogicalThread *lt, DDMutex *m) {
     98   if (!dd.nodeBelongsToCurrentEpoch(m->id))
     99     m->id = dd.newNode(reinterpret_cast<uptr>(m));
    100   dd.ensureCurrentEpoch(&lt->dd);
    101 }
    102 
    103 void DD::MutexBeforeLock(DDCallback *cb,
    104     DDMutex *m, bool wlock) {
    105   DDLogicalThread *lt = cb->lt;
    106   if (lt->dd.empty()) return;  // This will be the first lock held by lt.
    107   if (dd.hasAllEdges(&lt->dd, m->id)) return;  // We already have all edges.
    108   SpinMutexLock lk(&mtx);
    109   MutexEnsureID(lt, m);
    110   if (dd.isHeld(&lt->dd, m->id))
    111     return;  // FIXME: allow this only for recursive locks.
    112   if (dd.onLockBefore(&lt->dd, m->id)) {
    113     // Actually add this edge now so that we have all the stack traces.
    114     dd.addEdges(&lt->dd, m->id, cb->Unwind(), cb->UniqueTid());
    115     ReportDeadlock(cb, m);
    116   }
    117 }
    118 
    119 void DD::ReportDeadlock(DDCallback *cb, DDMutex *m) {
    120   DDLogicalThread *lt = cb->lt;
    121   uptr path[10];
    122   uptr len = dd.findPathToLock(&lt->dd, m->id, path, ARRAY_SIZE(path));
    123   CHECK_GT(len, 0U);  // Hm.. cycle of 10 locks? I'd like to see that.
    124   CHECK_EQ(m->id, path[0]);
    125   lt->report_pending = true;
    126   DDReport *rep = &lt->rep;
    127   rep->n = len;
    128   for (uptr i = 0; i < len; i++) {
    129     uptr from = path[i];
    130     uptr to = path[(i + 1) % len];
    131     DDMutex *m0 = (DDMutex*)dd.getData(from);
    132     DDMutex *m1 = (DDMutex*)dd.getData(to);
    133 
    134     u32 stk_from = -1U, stk_to = -1U;
    135     int unique_tid = 0;
    136     dd.findEdge(from, to, &stk_from, &stk_to, &unique_tid);
    137     // Printf("Edge: %zd=>%zd: %u/%u T%d\n", from, to, stk_from, stk_to,
    138     //    unique_tid);
    139     rep->loop[i].thr_ctx = unique_tid;
    140     rep->loop[i].mtx_ctx0 = m0->ctx;
    141     rep->loop[i].mtx_ctx1 = m1->ctx;
    142     rep->loop[i].stk[0] = stk_to;
    143     rep->loop[i].stk[1] = stk_from;
    144   }
    145 }
    146 
    147 void DD::MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock, bool trylock) {
    148   DDLogicalThread *lt = cb->lt;
    149   u32 stk = 0;
    150   if (flags.second_deadlock_stack)
    151     stk = cb->Unwind();
    152   // Printf("T%p MutexLock:   %zx stk %u\n", lt, m->id, stk);
    153   if (dd.onFirstLock(&lt->dd, m->id, stk))
    154     return;
    155   if (dd.onLockFast(&lt->dd, m->id, stk))
    156     return;
    157 
    158   SpinMutexLock lk(&mtx);
    159   MutexEnsureID(lt, m);
    160   if (wlock)  // Only a recursive rlock may be held.
    161     CHECK(!dd.isHeld(&lt->dd, m->id));
    162   if (!trylock)
    163     dd.addEdges(&lt->dd, m->id, stk ? stk : cb->Unwind(), cb->UniqueTid());
    164   dd.onLockAfter(&lt->dd, m->id, stk);
    165 }
    166 
    167 void DD::MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock) {
    168   // Printf("T%p MutexUnLock: %zx\n", cb->lt, m->id);
    169   dd.onUnlock(&cb->lt->dd, m->id);
    170 }
    171 
    172 void DD::MutexDestroy(DDCallback *cb,
    173     DDMutex *m) {
    174   if (!m->id) return;
    175   SpinMutexLock lk(&mtx);
    176   if (dd.nodeBelongsToCurrentEpoch(m->id))
    177     dd.removeNode(m->id);
    178   m->id = 0;
    179 }
    180 
    181 DDReport *DD::GetReport(DDCallback *cb) {
    182   if (!cb->lt->report_pending)
    183     return 0;
    184   cb->lt->report_pending = false;
    185   return &cb->lt->rep;
    186 }
    187 
    188 }  // namespace __sanitizer
    189 #endif  // #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 1
    190