Home | History | Annotate | Download | only in rtl
      1 //===-- tsan_mutex.cc -----------------------------------------------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file is a part of ThreadSanitizer (TSan), a race detector.
     11 //
     12 //===----------------------------------------------------------------------===//
     13 #include "sanitizer_common/sanitizer_libc.h"
     14 #include "tsan_mutex.h"
     15 #include "tsan_platform.h"
     16 #include "tsan_rtl.h"
     17 
     18 namespace __tsan {
     19 
     20 // Simple reader-writer spin-mutex. Optimized for not-so-contended case.
     21 // Readers have preference, can possibly starvate writers.
     22 
     23 // The table fixes what mutexes can be locked under what mutexes.
     24 // E.g. if the row for MutexTypeThreads contains MutexTypeReport,
     25 // then Report mutex can be locked while under Threads mutex.
     26 // The leaf mutexes can be locked under any other mutexes.
     27 // Recursive locking is not supported.
     28 #if SANITIZER_DEBUG && !SANITIZER_GO
     29 const MutexType MutexTypeLeaf = (MutexType)-1;
     30 static MutexType CanLockTab[MutexTypeCount][MutexTypeCount] = {
     31   /*0  MutexTypeInvalid*/     {},
     32   /*1  MutexTypeTrace*/       {MutexTypeLeaf},
     33   /*2  MutexTypeThreads*/     {MutexTypeReport},
     34   /*3  MutexTypeReport*/      {MutexTypeSyncVar,
     35                                MutexTypeMBlock, MutexTypeJavaMBlock},
     36   /*4  MutexTypeSyncVar*/     {MutexTypeDDetector},
     37   /*5  MutexTypeSyncTab*/     {},  // unused
     38   /*6  MutexTypeSlab*/        {MutexTypeLeaf},
     39   /*7  MutexTypeAnnotations*/ {},
     40   /*8  MutexTypeAtExit*/      {MutexTypeSyncVar},
     41   /*9  MutexTypeMBlock*/      {MutexTypeSyncVar},
     42   /*10 MutexTypeJavaMBlock*/  {MutexTypeSyncVar},
     43   /*11 MutexTypeDDetector*/   {},
     44   /*12 MutexTypeFired*/       {MutexTypeLeaf},
     45   /*13 MutexTypeRacy*/        {MutexTypeLeaf},
     46 };
     47 
     48 static bool CanLockAdj[MutexTypeCount][MutexTypeCount];
     49 #endif
     50 
     51 void InitializeMutex() {
     52 #if SANITIZER_DEBUG && !SANITIZER_GO
     53   // Build the "can lock" adjacency matrix.
     54   // If [i][j]==true, then one can lock mutex j while under mutex i.
     55   const int N = MutexTypeCount;
     56   int cnt[N] = {};
     57   bool leaf[N] = {};
     58   for (int i = 1; i < N; i++) {
     59     for (int j = 0; j < N; j++) {
     60       MutexType z = CanLockTab[i][j];
     61       if (z == MutexTypeInvalid)
     62         continue;
     63       if (z == MutexTypeLeaf) {
     64         CHECK(!leaf[i]);
     65         leaf[i] = true;
     66         continue;
     67       }
     68       CHECK(!CanLockAdj[i][(int)z]);
     69       CanLockAdj[i][(int)z] = true;
     70       cnt[i]++;
     71     }
     72   }
     73   for (int i = 0; i < N; i++) {
     74     CHECK(!leaf[i] || cnt[i] == 0);
     75   }
     76   // Add leaf mutexes.
     77   for (int i = 0; i < N; i++) {
     78     if (!leaf[i])
     79       continue;
     80     for (int j = 0; j < N; j++) {
     81       if (i == j || leaf[j] || j == MutexTypeInvalid)
     82         continue;
     83       CHECK(!CanLockAdj[j][i]);
     84       CanLockAdj[j][i] = true;
     85     }
     86   }
     87   // Build the transitive closure.
     88   bool CanLockAdj2[MutexTypeCount][MutexTypeCount];
     89   for (int i = 0; i < N; i++) {
     90     for (int j = 0; j < N; j++) {
     91       CanLockAdj2[i][j] = CanLockAdj[i][j];
     92     }
     93   }
     94   for (int k = 0; k < N; k++) {
     95     for (int i = 0; i < N; i++) {
     96       for (int j = 0; j < N; j++) {
     97         if (CanLockAdj2[i][k] && CanLockAdj2[k][j]) {
     98           CanLockAdj2[i][j] = true;
     99         }
    100       }
    101     }
    102   }
    103 #if 0
    104   Printf("Can lock graph:\n");
    105   for (int i = 0; i < N; i++) {
    106     for (int j = 0; j < N; j++) {
    107       Printf("%d ", CanLockAdj[i][j]);
    108     }
    109     Printf("\n");
    110   }
    111   Printf("Can lock graph closure:\n");
    112   for (int i = 0; i < N; i++) {
    113     for (int j = 0; j < N; j++) {
    114       Printf("%d ", CanLockAdj2[i][j]);
    115     }
    116     Printf("\n");
    117   }
    118 #endif
    119   // Verify that the graph is acyclic.
    120   for (int i = 0; i < N; i++) {
    121     if (CanLockAdj2[i][i]) {
    122       Printf("Mutex %d participates in a cycle\n", i);
    123       Die();
    124     }
    125   }
    126 #endif
    127 }
    128 
    129 InternalDeadlockDetector::InternalDeadlockDetector() {
    130   // Rely on zero initialization because some mutexes can be locked before ctor.
    131 }
    132 
    133 #if SANITIZER_DEBUG && !SANITIZER_GO
    134 void InternalDeadlockDetector::Lock(MutexType t) {
    135   // Printf("LOCK %d @%zu\n", t, seq_ + 1);
    136   CHECK_GT(t, MutexTypeInvalid);
    137   CHECK_LT(t, MutexTypeCount);
    138   u64 max_seq = 0;
    139   u64 max_idx = MutexTypeInvalid;
    140   for (int i = 0; i != MutexTypeCount; i++) {
    141     if (locked_[i] == 0)
    142       continue;
    143     CHECK_NE(locked_[i], max_seq);
    144     if (max_seq < locked_[i]) {
    145       max_seq = locked_[i];
    146       max_idx = i;
    147     }
    148   }
    149   locked_[t] = ++seq_;
    150   if (max_idx == MutexTypeInvalid)
    151     return;
    152   // Printf("  last %d @%zu\n", max_idx, max_seq);
    153   if (!CanLockAdj[max_idx][t]) {
    154     Printf("ThreadSanitizer: internal deadlock detected\n");
    155     Printf("ThreadSanitizer: can't lock %d while under %zu\n",
    156                t, (uptr)max_idx);
    157     CHECK(0);
    158   }
    159 }
    160 
    161 void InternalDeadlockDetector::Unlock(MutexType t) {
    162   // Printf("UNLO %d @%zu #%zu\n", t, seq_, locked_[t]);
    163   CHECK(locked_[t]);
    164   locked_[t] = 0;
    165 }
    166 
    167 void InternalDeadlockDetector::CheckNoLocks() {
    168   for (int i = 0; i != MutexTypeCount; i++) {
    169     CHECK_EQ(locked_[i], 0);
    170   }
    171 }
    172 #endif
    173 
    174 void CheckNoLocks(ThreadState *thr) {
    175 #if SANITIZER_DEBUG && !SANITIZER_GO
    176   thr->internal_deadlock_detector.CheckNoLocks();
    177 #endif
    178 }
    179 
    180 const uptr kUnlocked = 0;
    181 const uptr kWriteLock = 1;
    182 const uptr kReadLock = 2;
    183 
    184 class Backoff {
    185  public:
    186   Backoff()
    187     : iter_() {
    188   }
    189 
    190   bool Do() {
    191     if (iter_++ < kActiveSpinIters)
    192       proc_yield(kActiveSpinCnt);
    193     else
    194       internal_sched_yield();
    195     return true;
    196   }
    197 
    198   u64 Contention() const {
    199     u64 active = iter_ % kActiveSpinIters;
    200     u64 passive = iter_ - active;
    201     return active + 10 * passive;
    202   }
    203 
    204  private:
    205   int iter_;
    206   static const int kActiveSpinIters = 10;
    207   static const int kActiveSpinCnt = 20;
    208 };
    209 
    210 Mutex::Mutex(MutexType type, StatType stat_type) {
    211   CHECK_GT(type, MutexTypeInvalid);
    212   CHECK_LT(type, MutexTypeCount);
    213 #if SANITIZER_DEBUG
    214   type_ = type;
    215 #endif
    216 #if TSAN_COLLECT_STATS
    217   stat_type_ = stat_type;
    218 #endif
    219   atomic_store(&state_, kUnlocked, memory_order_relaxed);
    220 }
    221 
    222 Mutex::~Mutex() {
    223   CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked);
    224 }
    225 
    226 void Mutex::Lock() {
    227 #if SANITIZER_DEBUG && !SANITIZER_GO
    228   cur_thread()->internal_deadlock_detector.Lock(type_);
    229 #endif
    230   uptr cmp = kUnlocked;
    231   if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock,
    232                                      memory_order_acquire))
    233     return;
    234   for (Backoff backoff; backoff.Do();) {
    235     if (atomic_load(&state_, memory_order_relaxed) == kUnlocked) {
    236       cmp = kUnlocked;
    237       if (atomic_compare_exchange_weak(&state_, &cmp, kWriteLock,
    238                                        memory_order_acquire)) {
    239 #if TSAN_COLLECT_STATS && !SANITIZER_GO
    240         StatInc(cur_thread(), stat_type_, backoff.Contention());
    241 #endif
    242         return;
    243       }
    244     }
    245   }
    246 }
    247 
    248 void Mutex::Unlock() {
    249   uptr prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release);
    250   (void)prev;
    251   DCHECK_NE(prev & kWriteLock, 0);
    252 #if SANITIZER_DEBUG && !SANITIZER_GO
    253   cur_thread()->internal_deadlock_detector.Unlock(type_);
    254 #endif
    255 }
    256 
    257 void Mutex::ReadLock() {
    258 #if SANITIZER_DEBUG && !SANITIZER_GO
    259   cur_thread()->internal_deadlock_detector.Lock(type_);
    260 #endif
    261   uptr prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire);
    262   if ((prev & kWriteLock) == 0)
    263     return;
    264   for (Backoff backoff; backoff.Do();) {
    265     prev = atomic_load(&state_, memory_order_acquire);
    266     if ((prev & kWriteLock) == 0) {
    267 #if TSAN_COLLECT_STATS && !SANITIZER_GO
    268       StatInc(cur_thread(), stat_type_, backoff.Contention());
    269 #endif
    270       return;
    271     }
    272   }
    273 }
    274 
    275 void Mutex::ReadUnlock() {
    276   uptr prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release);
    277   (void)prev;
    278   DCHECK_EQ(prev & kWriteLock, 0);
    279   DCHECK_GT(prev & ~kWriteLock, 0);
    280 #if SANITIZER_DEBUG && !SANITIZER_GO
    281   cur_thread()->internal_deadlock_detector.Unlock(type_);
    282 #endif
    283 }
    284 
    285 void Mutex::CheckLocked() {
    286   CHECK_NE(atomic_load(&state_, memory_order_relaxed), 0);
    287 }
    288 
    289 }  // namespace __tsan
    290