Home | History | Annotate | Download | only in rtl
      1 //===-- tsan_sync.cc ------------------------------------------------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file is a part of ThreadSanitizer (TSan), a race detector.
     11 //
     12 //===----------------------------------------------------------------------===//
     13 #include "sanitizer_common/sanitizer_placement_new.h"
     14 #include "tsan_sync.h"
     15 #include "tsan_rtl.h"
     16 #include "tsan_mman.h"
     17 
     18 namespace __tsan {
     19 
     20 SyncVar::SyncVar(uptr addr, u64 uid)
     21   : mtx(MutexTypeSyncVar, StatMtxSyncVar)
     22   , addr(addr)
     23   , uid(uid)
     24   , owner_tid(kInvalidTid)
     25   , last_lock()
     26   , recursion()
     27   , is_rw()
     28   , is_recursive()
     29   , is_broken()
     30   , is_linker_init() {
     31 }
     32 
     33 SyncTab::Part::Part()
     34   : mtx(MutexTypeSyncTab, StatMtxSyncTab)
     35   , val() {
     36 }
     37 
     38 SyncTab::SyncTab() {
     39 }
     40 
     41 SyncTab::~SyncTab() {
     42   for (int i = 0; i < kPartCount; i++) {
     43     while (tab_[i].val) {
     44       SyncVar *tmp = tab_[i].val;
     45       tab_[i].val = tmp->next;
     46       DestroyAndFree(tmp);
     47     }
     48   }
     49 }
     50 
     51 SyncVar* SyncTab::GetOrCreateAndLock(ThreadState *thr, uptr pc,
     52                                      uptr addr, bool write_lock) {
     53   return GetAndLock(thr, pc, addr, write_lock, true);
     54 }
     55 
     56 SyncVar* SyncTab::GetIfExistsAndLock(uptr addr, bool write_lock) {
     57   return GetAndLock(0, 0, addr, write_lock, false);
     58 }
     59 
     60 SyncVar* SyncTab::Create(ThreadState *thr, uptr pc, uptr addr) {
     61   StatInc(thr, StatSyncCreated);
     62   void *mem = internal_alloc(MBlockSync, sizeof(SyncVar));
     63   const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed);
     64   SyncVar *res = new(mem) SyncVar(addr, uid);
     65 #ifndef TSAN_GO
     66   res->creation_stack_id = CurrentStackId(thr, pc);
     67 #endif
     68   return res;
     69 }
     70 
     71 SyncVar* SyncTab::GetAndLock(ThreadState *thr, uptr pc,
     72                              uptr addr, bool write_lock, bool create) {
     73 #ifndef TSAN_GO
     74   {  // NOLINT
     75     SyncVar *res = GetJavaSync(thr, pc, addr, write_lock, create);
     76     if (res)
     77       return res;
     78   }
     79 
     80   // Here we ask only PrimaryAllocator, because
     81   // SecondaryAllocator::PointerIsMine() is slow and we have fallback on
     82   // the hashmap anyway.
     83   if (PrimaryAllocator::PointerIsMine((void*)addr)) {
     84     MBlock *b = user_mblock(thr, (void*)addr);
     85     CHECK_NE(b, 0);
     86     MBlock::ScopedLock l(b);
     87     SyncVar *res = 0;
     88     for (res = b->ListHead(); res; res = res->next) {
     89       if (res->addr == addr)
     90         break;
     91     }
     92     if (res == 0) {
     93       if (!create)
     94         return 0;
     95       res = Create(thr, pc, addr);
     96       b->ListPush(res);
     97     }
     98     if (write_lock)
     99       res->mtx.Lock();
    100     else
    101       res->mtx.ReadLock();
    102     return res;
    103   }
    104 #endif
    105 
    106   Part *p = &tab_[PartIdx(addr)];
    107   {
    108     ReadLock l(&p->mtx);
    109     for (SyncVar *res = p->val; res; res = res->next) {
    110       if (res->addr == addr) {
    111         if (write_lock)
    112           res->mtx.Lock();
    113         else
    114           res->mtx.ReadLock();
    115         return res;
    116       }
    117     }
    118   }
    119   if (!create)
    120     return 0;
    121   {
    122     Lock l(&p->mtx);
    123     SyncVar *res = p->val;
    124     for (; res; res = res->next) {
    125       if (res->addr == addr)
    126         break;
    127     }
    128     if (res == 0) {
    129       res = Create(thr, pc, addr);
    130       res->next = p->val;
    131       p->val = res;
    132     }
    133     if (write_lock)
    134       res->mtx.Lock();
    135     else
    136       res->mtx.ReadLock();
    137     return res;
    138   }
    139 }
    140 
    141 SyncVar* SyncTab::GetAndRemove(ThreadState *thr, uptr pc, uptr addr) {
    142 #ifndef TSAN_GO
    143   {  // NOLINT
    144     SyncVar *res = GetAndRemoveJavaSync(thr, pc, addr);
    145     if (res)
    146       return res;
    147   }
    148   if (PrimaryAllocator::PointerIsMine((void*)addr)) {
    149     MBlock *b = user_mblock(thr, (void*)addr);
    150     CHECK_NE(b, 0);
    151     SyncVar *res = 0;
    152     {
    153       MBlock::ScopedLock l(b);
    154       res = b->ListHead();
    155       if (res) {
    156         if (res->addr == addr) {
    157           if (res->is_linker_init)
    158             return 0;
    159           b->ListPop();
    160         } else {
    161           SyncVar **prev = &res->next;
    162           res = *prev;
    163           while (res) {
    164             if (res->addr == addr) {
    165               if (res->is_linker_init)
    166                 return 0;
    167               *prev = res->next;
    168               break;
    169             }
    170             prev = &res->next;
    171             res = *prev;
    172           }
    173         }
    174         if (res) {
    175           StatInc(thr, StatSyncDestroyed);
    176           res->mtx.Lock();
    177           res->mtx.Unlock();
    178         }
    179       }
    180     }
    181     return res;
    182   }
    183 #endif
    184 
    185   Part *p = &tab_[PartIdx(addr)];
    186   SyncVar *res = 0;
    187   {
    188     Lock l(&p->mtx);
    189     SyncVar **prev = &p->val;
    190     res = *prev;
    191     while (res) {
    192       if (res->addr == addr) {
    193         if (res->is_linker_init)
    194           return 0;
    195         *prev = res->next;
    196         break;
    197       }
    198       prev = &res->next;
    199       res = *prev;
    200     }
    201   }
    202   if (res) {
    203     StatInc(thr, StatSyncDestroyed);
    204     res->mtx.Lock();
    205     res->mtx.Unlock();
    206   }
    207   return res;
    208 }
    209 
    210 int SyncTab::PartIdx(uptr addr) {
    211   return (addr >> 3) % kPartCount;
    212 }
    213 
    214 StackTrace::StackTrace()
    215     : n_()
    216     , s_()
    217     , c_() {
    218 }
    219 
    220 StackTrace::StackTrace(uptr *buf, uptr cnt)
    221     : n_()
    222     , s_(buf)
    223     , c_(cnt) {
    224   CHECK_NE(buf, 0);
    225   CHECK_NE(cnt, 0);
    226 }
    227 
    228 StackTrace::~StackTrace() {
    229   Reset();
    230 }
    231 
    232 void StackTrace::Reset() {
    233   if (s_ && !c_) {
    234     CHECK_NE(n_, 0);
    235     internal_free(s_);
    236     s_ = 0;
    237   }
    238   n_ = 0;
    239 }
    240 
    241 void StackTrace::Init(const uptr *pcs, uptr cnt) {
    242   Reset();
    243   if (cnt == 0)
    244     return;
    245   if (c_) {
    246     CHECK_NE(s_, 0);
    247     CHECK_LE(cnt, c_);
    248   } else {
    249     s_ = (uptr*)internal_alloc(MBlockStackTrace, cnt * sizeof(s_[0]));
    250   }
    251   n_ = cnt;
    252   internal_memcpy(s_, pcs, cnt * sizeof(s_[0]));
    253 }
    254 
    255 void StackTrace::ObtainCurrent(ThreadState *thr, uptr toppc) {
    256   Reset();
    257   n_ = thr->shadow_stack_pos - thr->shadow_stack;
    258   if (n_ + !!toppc == 0)
    259     return;
    260   uptr start = 0;
    261   if (c_) {
    262     CHECK_NE(s_, 0);
    263     if (n_ + !!toppc > c_) {
    264       start = n_ - c_ + !!toppc;
    265       n_ = c_ - !!toppc;
    266     }
    267   } else {
    268     s_ = (uptr*)internal_alloc(MBlockStackTrace,
    269                                (n_ + !!toppc) * sizeof(s_[0]));
    270   }
    271   for (uptr i = 0; i < n_; i++)
    272     s_[i] = thr->shadow_stack[start + i];
    273   if (toppc) {
    274     s_[n_] = toppc;
    275     n_++;
    276   }
    277 }
    278 
    279 void StackTrace::CopyFrom(const StackTrace& other) {
    280   Reset();
    281   Init(other.Begin(), other.Size());
    282 }
    283 
    284 bool StackTrace::IsEmpty() const {
    285   return n_ == 0;
    286 }
    287 
    288 uptr StackTrace::Size() const {
    289   return n_;
    290 }
    291 
    292 uptr StackTrace::Get(uptr i) const {
    293   CHECK_LT(i, n_);
    294   return s_[i];
    295 }
    296 
    297 const uptr *StackTrace::Begin() const {
    298   return s_;
    299 }
    300 
    301 }  // namespace __tsan
    302