Home | History | Annotate | Download | only in rtl
      1 //===-- tsan_sync.cc ------------------------------------------------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file is a part of ThreadSanitizer (TSan), a race detector.
     11 //
     12 //===----------------------------------------------------------------------===//
     13 #include "sanitizer_common/sanitizer_placement_new.h"
     14 #include "tsan_sync.h"
     15 #include "tsan_rtl.h"
     16 #include "tsan_mman.h"
     17 
     18 namespace __tsan {
     19 
     20 void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s);
     21 
     22 SyncVar::SyncVar()
     23     : mtx(MutexTypeSyncVar, StatMtxSyncVar) {
     24   Reset();
     25 }
     26 
     27 void SyncVar::Init(ThreadState *thr, uptr pc, uptr addr, u64 uid) {
     28   this->addr = addr;
     29   this->uid = uid;
     30   this->next = 0;
     31 
     32   creation_stack_id = 0;
     33   if (kCppMode)  // Go does not use them
     34     creation_stack_id = CurrentStackId(thr, pc);
     35   if (flags()->detect_deadlocks)
     36     DDMutexInit(thr, pc, this);
     37 }
     38 
     39 void SyncVar::Reset() {
     40   uid = 0;
     41   creation_stack_id = 0;
     42   owner_tid = kInvalidTid;
     43   last_lock = 0;
     44   recursion = 0;
     45   is_rw = 0;
     46   is_recursive = 0;
     47   is_broken = 0;
     48   is_linker_init = 0;
     49 
     50   clock.Zero();
     51   read_clock.Reset();
     52 }
     53 
     54 MetaMap::MetaMap() {
     55   atomic_store(&uid_gen_, 0, memory_order_relaxed);
     56 }
     57 
     58 void MetaMap::AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz) {
     59   u32 idx = block_alloc_.Alloc(&thr->block_cache);
     60   MBlock *b = block_alloc_.Map(idx);
     61   b->siz = sz;
     62   b->tid = thr->tid;
     63   b->stk = CurrentStackId(thr, pc);
     64   u32 *meta = MemToMeta(p);
     65   DCHECK_EQ(*meta, 0);
     66   *meta = idx | kFlagBlock;
     67 }
     68 
     69 uptr MetaMap::FreeBlock(ThreadState *thr, uptr pc, uptr p) {
     70   MBlock* b = GetBlock(p);
     71   if (b == 0)
     72     return 0;
     73   uptr sz = RoundUpTo(b->siz, kMetaShadowCell);
     74   FreeRange(thr, pc, p, sz);
     75   return sz;
     76 }
     77 
     78 void MetaMap::FreeRange(ThreadState *thr, uptr pc, uptr p, uptr sz) {
     79   u32 *meta = MemToMeta(p);
     80   u32 *end = MemToMeta(p + sz);
     81   if (end == meta)
     82     end++;
     83   for (; meta < end; meta++) {
     84     u32 idx = *meta;
     85     *meta = 0;
     86     for (;;) {
     87       if (idx == 0)
     88         break;
     89       if (idx & kFlagBlock) {
     90         block_alloc_.Free(&thr->block_cache, idx & ~kFlagMask);
     91         break;
     92       } else if (idx & kFlagSync) {
     93         DCHECK(idx & kFlagSync);
     94         SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask);
     95         u32 next = s->next;
     96         s->Reset();
     97         sync_alloc_.Free(&thr->sync_cache, idx & ~kFlagMask);
     98         idx = next;
     99       } else {
    100         CHECK(0);
    101       }
    102     }
    103   }
    104 }
    105 
    106 MBlock* MetaMap::GetBlock(uptr p) {
    107   u32 *meta = MemToMeta(p);
    108   u32 idx = *meta;
    109   for (;;) {
    110     if (idx == 0)
    111       return 0;
    112     if (idx & kFlagBlock)
    113       return block_alloc_.Map(idx & ~kFlagMask);
    114     DCHECK(idx & kFlagSync);
    115     SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
    116     idx = s->next;
    117   }
    118 }
    119 
    120 SyncVar* MetaMap::GetOrCreateAndLock(ThreadState *thr, uptr pc,
    121                               uptr addr, bool write_lock) {
    122   return GetAndLock(thr, pc, addr, write_lock, true);
    123 }
    124 
    125 SyncVar* MetaMap::GetIfExistsAndLock(uptr addr) {
    126   return GetAndLock(0, 0, addr, true, false);
    127 }
    128 
    129 SyncVar* MetaMap::GetAndLock(ThreadState *thr, uptr pc,
    130                              uptr addr, bool write_lock, bool create) {
    131   u32 *meta = MemToMeta(addr);
    132   u32 idx0 = *meta;
    133   u32 myidx = 0;
    134   SyncVar *mys = 0;
    135   for (;;) {
    136     u32 idx = idx0;
    137     for (;;) {
    138       if (idx == 0)
    139         break;
    140       if (idx & kFlagBlock)
    141         break;
    142       DCHECK(idx & kFlagSync);
    143       SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
    144       if (s->addr == addr) {
    145         if (myidx != 0) {
    146           mys->Reset();
    147           sync_alloc_.Free(&thr->sync_cache, myidx);
    148         }
    149         if (write_lock)
    150           s->mtx.Lock();
    151         else
    152           s->mtx.ReadLock();
    153         return s;
    154       }
    155       idx = s->next;
    156     }
    157     if (!create)
    158       return 0;
    159     if (*meta != idx0) {
    160       idx0 = *meta;
    161       continue;
    162     }
    163 
    164     if (myidx == 0) {
    165       const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed);
    166       myidx = sync_alloc_.Alloc(&thr->sync_cache);
    167       mys = sync_alloc_.Map(myidx);
    168       mys->Init(thr, pc, addr, uid);
    169     }
    170     mys->next = idx0;
    171     if (atomic_compare_exchange_strong((atomic_uint32_t*)meta, &idx0,
    172         myidx | kFlagSync, memory_order_release)) {
    173       if (write_lock)
    174         mys->mtx.Lock();
    175       else
    176         mys->mtx.ReadLock();
    177       return mys;
    178     }
    179   }
    180 }
    181 
    182 void MetaMap::MoveMemory(uptr src, uptr dst, uptr sz) {
    183   // src and dst can overlap,
    184   // there are no concurrent accesses to the regions (e.g. stop-the-world).
    185   CHECK_NE(src, dst);
    186   CHECK_NE(sz, 0);
    187   uptr diff = dst - src;
    188   u32 *src_meta = MemToMeta(src);
    189   u32 *dst_meta = MemToMeta(dst);
    190   u32 *src_meta_end = MemToMeta(src + sz);
    191   uptr inc = 1;
    192   if (dst > src) {
    193     src_meta = MemToMeta(src + sz) - 1;
    194     dst_meta = MemToMeta(dst + sz) - 1;
    195     src_meta_end = MemToMeta(src) - 1;
    196     inc = -1;
    197   }
    198   for (; src_meta != src_meta_end; src_meta += inc, dst_meta += inc) {
    199     CHECK_EQ(*dst_meta, 0);
    200     u32 idx = *src_meta;
    201     *src_meta = 0;
    202     *dst_meta = idx;
    203     // Patch the addresses in sync objects.
    204     while (idx != 0) {
    205       if (idx & kFlagBlock)
    206         break;
    207       CHECK(idx & kFlagSync);
    208       SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask);
    209       s->addr += diff;
    210       idx = s->next;
    211     }
    212   }
    213 }
    214 
    215 void MetaMap::OnThreadIdle(ThreadState *thr) {
    216   block_alloc_.FlushCache(&thr->block_cache);
    217   sync_alloc_.FlushCache(&thr->sync_cache);
    218 }
    219 
    220 }  // namespace __tsan
    221