Home | History | Annotate | Download | only in rtl
      1 //===-- tsan_fd.cc --------------------------------------------------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file is a part of ThreadSanitizer (TSan), a race detector.
     11 //
     12 //===----------------------------------------------------------------------===//
     13 
     14 #include "tsan_fd.h"
     15 #include "tsan_rtl.h"
     16 #include <sanitizer_common/sanitizer_atomic.h>
     17 
     18 namespace __tsan {
     19 
     20 const int kTableSizeL1 = 1024;
     21 const int kTableSizeL2 = 1024;
     22 const int kTableSize = kTableSizeL1 * kTableSizeL2;
     23 
     24 struct FdSync {
     25   atomic_uint64_t rc;
     26 };
     27 
     28 struct FdDesc {
     29   FdSync *sync;
     30   int creation_tid;
     31   u32 creation_stack;
     32 };
     33 
     34 struct FdContext {
     35   atomic_uintptr_t tab[kTableSizeL1];
     36   // Addresses used for synchronization.
     37   FdSync globsync;
     38   FdSync filesync;
     39   FdSync socksync;
     40   u64 connectsync;
     41 };
     42 
     43 static FdContext fdctx;
     44 
     45 static FdSync *allocsync() {
     46   FdSync *s = (FdSync*)internal_alloc(MBlockFD, sizeof(FdSync));
     47   atomic_store(&s->rc, 1, memory_order_relaxed);
     48   return s;
     49 }
     50 
     51 static FdSync *ref(FdSync *s) {
     52   if (s && atomic_load(&s->rc, memory_order_relaxed) != (u64)-1)
     53     atomic_fetch_add(&s->rc, 1, memory_order_relaxed);
     54   return s;
     55 }
     56 
     57 static void unref(ThreadState *thr, uptr pc, FdSync *s) {
     58   if (s && atomic_load(&s->rc, memory_order_relaxed) != (u64)-1) {
     59     if (atomic_fetch_sub(&s->rc, 1, memory_order_acq_rel) == 1) {
     60       CHECK_NE(s, &fdctx.globsync);
     61       CHECK_NE(s, &fdctx.filesync);
     62       CHECK_NE(s, &fdctx.socksync);
     63       SyncVar *v = CTX()->synctab.GetAndRemove(thr, pc, (uptr)s);
     64       if (v)
     65         DestroyAndFree(v);
     66       internal_free(s);
     67     }
     68   }
     69 }
     70 
     71 static FdDesc *fddesc(ThreadState *thr, uptr pc, int fd) {
     72   CHECK_LT(fd, kTableSize);
     73   atomic_uintptr_t *pl1 = &fdctx.tab[fd / kTableSizeL2];
     74   uptr l1 = atomic_load(pl1, memory_order_consume);
     75   if (l1 == 0) {
     76     uptr size = kTableSizeL2 * sizeof(FdDesc);
     77     // We need this to reside in user memory to properly catch races on it.
     78     void *p = user_alloc(thr, pc, size);
     79     internal_memset(p, 0, size);
     80     MemoryResetRange(thr, (uptr)&fddesc, (uptr)p, size);
     81     if (atomic_compare_exchange_strong(pl1, &l1, (uptr)p, memory_order_acq_rel))
     82       l1 = (uptr)p;
     83     else
     84       user_free(thr, pc, p);
     85   }
     86   return &((FdDesc*)l1)[fd % kTableSizeL2];  // NOLINT
     87 }
     88 
     89 // pd must be already ref'ed.
     90 static void init(ThreadState *thr, uptr pc, int fd, FdSync *s) {
     91   FdDesc *d = fddesc(thr, pc, fd);
     92   // As a matter of fact, we don't intercept all close calls.
     93   // See e.g. libc __res_iclose().
     94   if (d->sync) {
     95     unref(thr, pc, d->sync);
     96     d->sync = 0;
     97   }
     98   if (flags()->io_sync == 0) {
     99     unref(thr, pc, s);
    100   } else if (flags()->io_sync == 1) {
    101     d->sync = s;
    102   } else if (flags()->io_sync == 2) {
    103     unref(thr, pc, s);
    104     d->sync = &fdctx.globsync;
    105   }
    106   d->creation_tid = thr->tid;
    107   d->creation_stack = CurrentStackId(thr, pc);
    108   // To catch races between fd usage and open.
    109   MemoryRangeImitateWrite(thr, pc, (uptr)d, 8);
    110 }
    111 
    112 void FdInit() {
    113   atomic_store(&fdctx.globsync.rc, (u64)-1, memory_order_relaxed);
    114   atomic_store(&fdctx.filesync.rc, (u64)-1, memory_order_relaxed);
    115   atomic_store(&fdctx.socksync.rc, (u64)-1, memory_order_relaxed);
    116 }
    117 
    118 void FdOnFork(ThreadState *thr, uptr pc) {
    119   // On fork() we need to reset all fd's, because the child is going
    120   // close all them, and that will cause races between previous read/write
    121   // and the close.
    122   for (int l1 = 0; l1 < kTableSizeL1; l1++) {
    123     FdDesc *tab = (FdDesc*)atomic_load(&fdctx.tab[l1], memory_order_relaxed);
    124     if (tab == 0)
    125       break;
    126     for (int l2 = 0; l2 < kTableSizeL2; l2++) {
    127       FdDesc *d = &tab[l2];
    128       MemoryResetRange(thr, pc, (uptr)d, 8);
    129     }
    130   }
    131 }
    132 
    133 bool FdLocation(uptr addr, int *fd, int *tid, u32 *stack) {
    134   for (int l1 = 0; l1 < kTableSizeL1; l1++) {
    135     FdDesc *tab = (FdDesc*)atomic_load(&fdctx.tab[l1], memory_order_relaxed);
    136     if (tab == 0)
    137       break;
    138     if (addr >= (uptr)tab && addr < (uptr)(tab + kTableSizeL2)) {
    139       int l2 = (addr - (uptr)tab) / sizeof(FdDesc);
    140       FdDesc *d = &tab[l2];
    141       *fd = l1 * kTableSizeL1 + l2;
    142       *tid = d->creation_tid;
    143       *stack = d->creation_stack;
    144       return true;
    145     }
    146   }
    147   return false;
    148 }
    149 
    150 void FdAcquire(ThreadState *thr, uptr pc, int fd) {
    151   FdDesc *d = fddesc(thr, pc, fd);
    152   FdSync *s = d->sync;
    153   DPrintf("#%d: FdAcquire(%d) -> %p\n", thr->tid, fd, s);
    154   MemoryRead(thr, pc, (uptr)d, kSizeLog8);
    155   if (s)
    156     Acquire(thr, pc, (uptr)s);
    157 }
    158 
    159 void FdRelease(ThreadState *thr, uptr pc, int fd) {
    160   FdDesc *d = fddesc(thr, pc, fd);
    161   FdSync *s = d->sync;
    162   DPrintf("#%d: FdRelease(%d) -> %p\n", thr->tid, fd, s);
    163   MemoryRead(thr, pc, (uptr)d, kSizeLog8);
    164   if (s)
    165     Release(thr, pc, (uptr)s);
    166 }
    167 
    168 void FdAccess(ThreadState *thr, uptr pc, int fd) {
    169   DPrintf("#%d: FdAccess(%d)\n", thr->tid, fd);
    170   FdDesc *d = fddesc(thr, pc, fd);
    171   MemoryRead(thr, pc, (uptr)d, kSizeLog8);
    172 }
    173 
    174 void FdClose(ThreadState *thr, uptr pc, int fd) {
    175   DPrintf("#%d: FdClose(%d)\n", thr->tid, fd);
    176   FdDesc *d = fddesc(thr, pc, fd);
    177   // To catch races between fd usage and close.
    178   MemoryWrite(thr, pc, (uptr)d, kSizeLog8);
    179   // We need to clear it, because if we do not intercept any call out there
    180   // that creates fd, we will hit false postives.
    181   MemoryResetRange(thr, pc, (uptr)d, 8);
    182   unref(thr, pc, d->sync);
    183   d->sync = 0;
    184   d->creation_tid = 0;
    185   d->creation_stack = 0;
    186 }
    187 
    188 void FdFileCreate(ThreadState *thr, uptr pc, int fd) {
    189   DPrintf("#%d: FdFileCreate(%d)\n", thr->tid, fd);
    190   init(thr, pc, fd, &fdctx.filesync);
    191 }
    192 
    193 void FdDup(ThreadState *thr, uptr pc, int oldfd, int newfd) {
    194   DPrintf("#%d: FdDup(%d, %d)\n", thr->tid, oldfd, newfd);
    195   // Ignore the case when user dups not yet connected socket.
    196   FdDesc *od = fddesc(thr, pc, oldfd);
    197   MemoryRead(thr, pc, (uptr)od, kSizeLog8);
    198   FdClose(thr, pc, newfd);
    199   init(thr, pc, newfd, ref(od->sync));
    200 }
    201 
    202 void FdPipeCreate(ThreadState *thr, uptr pc, int rfd, int wfd) {
    203   DPrintf("#%d: FdCreatePipe(%d, %d)\n", thr->tid, rfd, wfd);
    204   FdSync *s = allocsync();
    205   init(thr, pc, rfd, ref(s));
    206   init(thr, pc, wfd, ref(s));
    207   unref(thr, pc, s);
    208 }
    209 
    210 void FdEventCreate(ThreadState *thr, uptr pc, int fd) {
    211   DPrintf("#%d: FdEventCreate(%d)\n", thr->tid, fd);
    212   init(thr, pc, fd, allocsync());
    213 }
    214 
    215 void FdSignalCreate(ThreadState *thr, uptr pc, int fd) {
    216   DPrintf("#%d: FdSignalCreate(%d)\n", thr->tid, fd);
    217   init(thr, pc, fd, 0);
    218 }
    219 
    220 void FdInotifyCreate(ThreadState *thr, uptr pc, int fd) {
    221   DPrintf("#%d: FdInotifyCreate(%d)\n", thr->tid, fd);
    222   init(thr, pc, fd, 0);
    223 }
    224 
    225 void FdPollCreate(ThreadState *thr, uptr pc, int fd) {
    226   DPrintf("#%d: FdPollCreate(%d)\n", thr->tid, fd);
    227   init(thr, pc, fd, allocsync());
    228 }
    229 
    230 void FdSocketCreate(ThreadState *thr, uptr pc, int fd) {
    231   DPrintf("#%d: FdSocketCreate(%d)\n", thr->tid, fd);
    232   // It can be a UDP socket.
    233   init(thr, pc, fd, &fdctx.socksync);
    234 }
    235 
    236 void FdSocketAccept(ThreadState *thr, uptr pc, int fd, int newfd) {
    237   DPrintf("#%d: FdSocketAccept(%d, %d)\n", thr->tid, fd, newfd);
    238   // Synchronize connect->accept.
    239   Acquire(thr, pc, (uptr)&fdctx.connectsync);
    240   init(thr, pc, newfd, &fdctx.socksync);
    241 }
    242 
    243 void FdSocketConnecting(ThreadState *thr, uptr pc, int fd) {
    244   DPrintf("#%d: FdSocketConnecting(%d)\n", thr->tid, fd);
    245   // Synchronize connect->accept.
    246   Release(thr, pc, (uptr)&fdctx.connectsync);
    247 }
    248 
    249 void FdSocketConnect(ThreadState *thr, uptr pc, int fd) {
    250   DPrintf("#%d: FdSocketConnect(%d)\n", thr->tid, fd);
    251   init(thr, pc, fd, &fdctx.socksync);
    252 }
    253 
    254 uptr File2addr(char *path) {
    255   (void)path;
    256   static u64 addr;
    257   return (uptr)&addr;
    258 }
    259 
    260 uptr Dir2addr(char *path) {
    261   (void)path;
    262   static u64 addr;
    263   return (uptr)&addr;
    264 }
    265 
    266 }  //  namespace __tsan
    267