Home | History | Annotate | Download | only in rtl
      1 //===-- tsan_sync.cc ------------------------------------------------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file is a part of ThreadSanitizer (TSan), a race detector.
     11 //
     12 //===----------------------------------------------------------------------===//
     13 #include "sanitizer_common/sanitizer_placement_new.h"
     14 #include "tsan_sync.h"
     15 #include "tsan_rtl.h"
     16 #include "tsan_mman.h"
     17 
     18 namespace __tsan {
     19 
     20 SyncVar::SyncVar(uptr addr, u64 uid)
     21   : mtx(MutexTypeSyncVar, StatMtxSyncVar)
     22   , addr(addr)
     23   , uid(uid)
     24   , owner_tid(kInvalidTid)
     25   , last_lock()
     26   , recursion()
     27   , is_rw()
     28   , is_recursive()
     29   , is_broken()
     30   , is_linker_init() {
     31 }
     32 
     33 SyncTab::Part::Part()
     34   : mtx(MutexTypeSyncTab, StatMtxSyncTab)
     35   , val() {
     36 }
     37 
     38 SyncTab::SyncTab() {
     39 }
     40 
     41 SyncTab::~SyncTab() {
     42   for (int i = 0; i < kPartCount; i++) {
     43     while (tab_[i].val) {
     44       SyncVar *tmp = tab_[i].val;
     45       tab_[i].val = tmp->next;
     46       DestroyAndFree(tmp);
     47     }
     48   }
     49 }
     50 
     51 SyncVar* SyncTab::GetOrCreateAndLock(ThreadState *thr, uptr pc,
     52                                      uptr addr, bool write_lock) {
     53   return GetAndLock(thr, pc, addr, write_lock, true);
     54 }
     55 
     56 SyncVar* SyncTab::GetIfExistsAndLock(uptr addr, bool write_lock) {
     57   return GetAndLock(0, 0, addr, write_lock, false);
     58 }
     59 
     60 SyncVar* SyncTab::Create(ThreadState *thr, uptr pc, uptr addr) {
     61   StatInc(thr, StatSyncCreated);
     62   void *mem = internal_alloc(MBlockSync, sizeof(SyncVar));
     63   const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed);
     64   SyncVar *res = new(mem) SyncVar(addr, uid);
     65 #ifndef TSAN_GO
     66   res->creation_stack_id = CurrentStackId(thr, pc);
     67 #endif
     68   return res;
     69 }
     70 
     71 SyncVar* SyncTab::GetAndLock(ThreadState *thr, uptr pc,
     72                              uptr addr, bool write_lock, bool create) {
     73 #ifndef TSAN_GO
     74   {  // NOLINT
     75     SyncVar *res = GetJavaSync(thr, pc, addr, write_lock, create);
     76     if (res)
     77       return res;
     78   }
     79 
     80   // Here we ask only PrimaryAllocator, because
     81   // SecondaryAllocator::PointerIsMine() is slow and we have fallback on
     82   // the hashmap anyway.
     83   if (PrimaryAllocator::PointerIsMine((void*)addr)) {
     84     MBlock *b = user_mblock(thr, (void*)addr);
     85     MBlock::ScopedLock l(b);
     86     SyncVar *res = 0;
     87     for (res = b->ListHead(); res; res = res->next) {
     88       if (res->addr == addr)
     89         break;
     90     }
     91     if (res == 0) {
     92       if (!create)
     93         return 0;
     94       res = Create(thr, pc, addr);
     95       b->ListPush(res);
     96     }
     97     if (write_lock)
     98       res->mtx.Lock();
     99     else
    100       res->mtx.ReadLock();
    101     return res;
    102   }
    103 #endif
    104 
    105   Part *p = &tab_[PartIdx(addr)];
    106   {
    107     ReadLock l(&p->mtx);
    108     for (SyncVar *res = p->val; res; res = res->next) {
    109       if (res->addr == addr) {
    110         if (write_lock)
    111           res->mtx.Lock();
    112         else
    113           res->mtx.ReadLock();
    114         return res;
    115       }
    116     }
    117   }
    118   if (!create)
    119     return 0;
    120   {
    121     Lock l(&p->mtx);
    122     SyncVar *res = p->val;
    123     for (; res; res = res->next) {
    124       if (res->addr == addr)
    125         break;
    126     }
    127     if (res == 0) {
    128       res = Create(thr, pc, addr);
    129       res->next = p->val;
    130       p->val = res;
    131     }
    132     if (write_lock)
    133       res->mtx.Lock();
    134     else
    135       res->mtx.ReadLock();
    136     return res;
    137   }
    138 }
    139 
    140 SyncVar* SyncTab::GetAndRemove(ThreadState *thr, uptr pc, uptr addr) {
    141 #ifndef TSAN_GO
    142   {  // NOLINT
    143     SyncVar *res = GetAndRemoveJavaSync(thr, pc, addr);
    144     if (res)
    145       return res;
    146   }
    147   if (PrimaryAllocator::PointerIsMine((void*)addr)) {
    148     MBlock *b = user_mblock(thr, (void*)addr);
    149     SyncVar *res = 0;
    150     {
    151       MBlock::ScopedLock l(b);
    152       res = b->ListHead();
    153       if (res) {
    154         if (res->addr == addr) {
    155           if (res->is_linker_init)
    156             return 0;
    157           b->ListPop();
    158         } else {
    159           SyncVar **prev = &res->next;
    160           res = *prev;
    161           while (res) {
    162             if (res->addr == addr) {
    163               if (res->is_linker_init)
    164                 return 0;
    165               *prev = res->next;
    166               break;
    167             }
    168             prev = &res->next;
    169             res = *prev;
    170           }
    171         }
    172         if (res) {
    173           StatInc(thr, StatSyncDestroyed);
    174           res->mtx.Lock();
    175           res->mtx.Unlock();
    176         }
    177       }
    178     }
    179     return res;
    180   }
    181 #endif
    182 
    183   Part *p = &tab_[PartIdx(addr)];
    184   SyncVar *res = 0;
    185   {
    186     Lock l(&p->mtx);
    187     SyncVar **prev = &p->val;
    188     res = *prev;
    189     while (res) {
    190       if (res->addr == addr) {
    191         if (res->is_linker_init)
    192           return 0;
    193         *prev = res->next;
    194         break;
    195       }
    196       prev = &res->next;
    197       res = *prev;
    198     }
    199   }
    200   if (res) {
    201     StatInc(thr, StatSyncDestroyed);
    202     res->mtx.Lock();
    203     res->mtx.Unlock();
    204   }
    205   return res;
    206 }
    207 
    208 int SyncTab::PartIdx(uptr addr) {
    209   return (addr >> 3) % kPartCount;
    210 }
    211 
    212 StackTrace::StackTrace()
    213     : n_()
    214     , s_()
    215     , c_() {
    216 }
    217 
    218 StackTrace::StackTrace(uptr *buf, uptr cnt)
    219     : n_()
    220     , s_(buf)
    221     , c_(cnt) {
    222   CHECK_NE(buf, 0);
    223   CHECK_NE(cnt, 0);
    224 }
    225 
    226 StackTrace::~StackTrace() {
    227   Reset();
    228 }
    229 
    230 void StackTrace::Reset() {
    231   if (s_ && !c_) {
    232     CHECK_NE(n_, 0);
    233     internal_free(s_);
    234     s_ = 0;
    235   }
    236   n_ = 0;
    237 }
    238 
    239 void StackTrace::Init(const uptr *pcs, uptr cnt) {
    240   Reset();
    241   if (cnt == 0)
    242     return;
    243   if (c_) {
    244     CHECK_NE(s_, 0);
    245     CHECK_LE(cnt, c_);
    246   } else {
    247     s_ = (uptr*)internal_alloc(MBlockStackTrace, cnt * sizeof(s_[0]));
    248   }
    249   n_ = cnt;
    250   internal_memcpy(s_, pcs, cnt * sizeof(s_[0]));
    251 }
    252 
    253 void StackTrace::ObtainCurrent(ThreadState *thr, uptr toppc) {
    254   Reset();
    255   n_ = thr->shadow_stack_pos - thr->shadow_stack;
    256   if (n_ + !!toppc == 0)
    257     return;
    258   uptr start = 0;
    259   if (c_) {
    260     CHECK_NE(s_, 0);
    261     if (n_ + !!toppc > c_) {
    262       start = n_ - c_ + !!toppc;
    263       n_ = c_ - !!toppc;
    264     }
    265   } else {
    266     s_ = (uptr*)internal_alloc(MBlockStackTrace,
    267                                (n_ + !!toppc) * sizeof(s_[0]));
    268   }
    269   for (uptr i = 0; i < n_; i++)
    270     s_[i] = thr->shadow_stack[start + i];
    271   if (toppc) {
    272     s_[n_] = toppc;
    273     n_++;
    274   }
    275 }
    276 
    277 void StackTrace::CopyFrom(const StackTrace& other) {
    278   Reset();
    279   Init(other.Begin(), other.Size());
    280 }
    281 
    282 bool StackTrace::IsEmpty() const {
    283   return n_ == 0;
    284 }
    285 
    286 uptr StackTrace::Size() const {
    287   return n_;
    288 }
    289 
    290 uptr StackTrace::Get(uptr i) const {
    291   CHECK_LT(i, n_);
    292   return s_[i];
    293 }
    294 
    295 const uptr *StackTrace::Begin() const {
    296   return s_;
    297 }
    298 
    299 }  // namespace __tsan
    300