Home | History | Annotate | Download | only in rtl
      1 //===-- tsan_dense_alloc.h --------------------------------------*- C++ -*-===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file is a part of ThreadSanitizer (TSan), a race detector.
     11 //
     12 // A DenseSlabAlloc is a freelist-based allocator of fixed-size objects.
     13 // DenseSlabAllocCache is a thread-local cache for DenseSlabAlloc.
     14 // The only difference with traditional slab allocators is that DenseSlabAlloc
     15 // allocates/free indices of objects and provide a functionality to map
     16 // the index onto the real pointer. The index is u32, that is, 2 times smaller
     17 // than uptr (hense the Dense prefix).
     18 //===----------------------------------------------------------------------===//
     19 #ifndef TSAN_DENSE_ALLOC_H
     20 #define TSAN_DENSE_ALLOC_H
     21 
     22 #include "sanitizer_common/sanitizer_common.h"
     23 #include "tsan_defs.h"
     24 #include "tsan_mutex.h"
     25 
     26 namespace __tsan {
     27 
     28 class DenseSlabAllocCache {
     29   static const uptr kSize = 128;
     30   typedef u32 IndexT;
     31   uptr pos;
     32   IndexT cache[kSize];
     33   template<typename T, uptr kL1Size, uptr kL2Size> friend class DenseSlabAlloc;
     34 };
     35 
     36 template<typename T, uptr kL1Size, uptr kL2Size>
     37 class DenseSlabAlloc {
     38  public:
     39   typedef DenseSlabAllocCache Cache;
     40   typedef typename Cache::IndexT IndexT;
     41 
     42   DenseSlabAlloc() {
     43     // Check that kL1Size and kL2Size are sane.
     44     CHECK_EQ(kL1Size & (kL1Size - 1), 0);
     45     CHECK_EQ(kL2Size & (kL2Size - 1), 0);
     46     CHECK_GE(1ull << (sizeof(IndexT) * 8), kL1Size * kL2Size);
     47     // Check that it makes sense to use the dense alloc.
     48     CHECK_GE(sizeof(T), sizeof(IndexT));
     49     internal_memset(map_, 0, sizeof(map_));
     50     freelist_ = 0;
     51     fillpos_ = 0;
     52   }
     53 
     54   ~DenseSlabAlloc() {
     55     for (uptr i = 0; i < kL1Size; i++) {
     56       if (map_[i] != 0)
     57         UnmapOrDie(map_[i], kL2Size * sizeof(T));
     58     }
     59   }
     60 
     61   IndexT Alloc(Cache *c) {
     62     if (c->pos == 0)
     63       Refill(c);
     64     return c->cache[--c->pos];
     65   }
     66 
     67   void Free(Cache *c, IndexT idx) {
     68     DCHECK_NE(idx, 0);
     69     if (c->pos == Cache::kSize)
     70       Drain(c);
     71     c->cache[c->pos++] = idx;
     72   }
     73 
     74   T *Map(IndexT idx) {
     75     DCHECK_NE(idx, 0);
     76     DCHECK_LE(idx, kL1Size * kL2Size);
     77     return &map_[idx / kL2Size][idx % kL2Size];
     78   }
     79 
     80   void FlushCache(Cache *c) {
     81     SpinMutexLock lock(&mtx_);
     82     while (c->pos) {
     83       IndexT idx = c->cache[--c->pos];
     84       *(IndexT*)Map(idx) = freelist_;
     85       freelist_ = idx;
     86     }
     87   }
     88 
     89   void InitCache(Cache *c) {
     90     c->pos = 0;
     91     internal_memset(c->cache, 0, sizeof(c->cache));
     92   }
     93 
     94  private:
     95   T *map_[kL1Size];
     96   SpinMutex mtx_;
     97   IndexT freelist_;
     98   uptr fillpos_;
     99 
    100   void Refill(Cache *c) {
    101     SpinMutexLock lock(&mtx_);
    102     if (freelist_ == 0) {
    103       if (fillpos_ == kL1Size) {
    104         Printf("ThreadSanitizer: DenseSlabAllocator overflow. Dying.\n");
    105         Die();
    106       }
    107       T *batch = (T*)MmapOrDie(kL2Size * sizeof(T), "DenseSlabAllocator");
    108       // Reserve 0 as invalid index.
    109       IndexT start = fillpos_ == 0 ? 1 : 0;
    110       for (IndexT i = start; i < kL2Size; i++) {
    111         new(batch + i) T;
    112         *(IndexT*)(batch + i) = i + 1 + fillpos_ * kL2Size;
    113       }
    114       *(IndexT*)(batch + kL2Size - 1) = 0;
    115       freelist_ = fillpos_ * kL2Size + start;
    116       map_[fillpos_++] = batch;
    117     }
    118     for (uptr i = 0; i < Cache::kSize / 2 && freelist_ != 0; i++) {
    119       IndexT idx = freelist_;
    120       c->cache[c->pos++] = idx;
    121       freelist_ = *(IndexT*)Map(idx);
    122     }
    123   }
    124 
    125   void Drain(Cache *c) {
    126     SpinMutexLock lock(&mtx_);
    127     for (uptr i = 0; i < Cache::kSize / 2; i++) {
    128       IndexT idx = c->cache[--c->pos];
    129       *(IndexT*)Map(idx) = freelist_;
    130       freelist_ = idx;
    131     }
    132   }
    133 };
    134 
    135 }  // namespace __tsan
    136 
    137 #endif  // TSAN_DENSE_ALLOC_H
    138