Home | History | Annotate | Download | only in sanitizer_common
      1 //===-- sanitizer_quarantine.h ----------------------------------*- C++ -*-===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // Memory quarantine for AddressSanitizer and potentially other tools.
     11 // Quarantine caches some specified amount of memory in per-thread caches,
     12 // then evicts to global FIFO queue. When the queue reaches specified threshold,
     13 // oldest memory is recycled.
     14 //
     15 //===----------------------------------------------------------------------===//
     16 
     17 #ifndef SANITIZER_QUARANTINE_H
     18 #define SANITIZER_QUARANTINE_H
     19 
     20 #include "sanitizer_internal_defs.h"
     21 #include "sanitizer_mutex.h"
     22 #include "sanitizer_list.h"
     23 
     24 namespace __sanitizer {
     25 
     26 template<typename Node> class QuarantineCache;
     27 
     28 struct QuarantineBatch {
     29   static const uptr kSize = 1024;
     30   QuarantineBatch *next;
     31   uptr size;
     32   uptr count;
     33   void *batch[kSize];
     34 };
     35 
     36 // The callback interface is:
     37 // void Callback::Recycle(Node *ptr);
     38 // void *cb.Allocate(uptr size);
     39 // void cb.Deallocate(void *ptr);
     40 template<typename Callback, typename Node>
     41 class Quarantine {
     42  public:
     43   typedef QuarantineCache<Callback> Cache;
     44 
     45   explicit Quarantine(LinkerInitialized)
     46       : cache_(LINKER_INITIALIZED) {
     47   }
     48 
     49   void Init(uptr size, uptr cache_size) {
     50     max_size_ = size;
     51     min_size_ = size / 10 * 9;  // 90% of max size.
     52     max_cache_size_ = cache_size;
     53   }
     54 
     55   void Put(Cache *c, Callback cb, Node *ptr, uptr size) {
     56     c->Enqueue(cb, ptr, size);
     57     if (c->Size() > max_cache_size_)
     58       Drain(c, cb);
     59   }
     60 
     61   void NOINLINE Drain(Cache *c, Callback cb) {
     62     {
     63       SpinMutexLock l(&cache_mutex_);
     64       cache_.Transfer(c);
     65     }
     66     if (cache_.Size() > max_size_ && recycle_mutex_.TryLock())
     67       Recycle(cb);
     68   }
     69 
     70  private:
     71   // Read-only data.
     72   char pad0_[kCacheLineSize];
     73   uptr max_size_;
     74   uptr min_size_;
     75   uptr max_cache_size_;
     76   char pad1_[kCacheLineSize];
     77   SpinMutex cache_mutex_;
     78   SpinMutex recycle_mutex_;
     79   Cache cache_;
     80   char pad2_[kCacheLineSize];
     81 
     82   void NOINLINE Recycle(Callback cb) {
     83     Cache tmp;
     84     {
     85       SpinMutexLock l(&cache_mutex_);
     86       while (cache_.Size() > min_size_) {
     87         QuarantineBatch *b = cache_.DequeueBatch();
     88         tmp.EnqueueBatch(b);
     89       }
     90     }
     91     recycle_mutex_.Unlock();
     92     DoRecycle(&tmp, cb);
     93   }
     94 
     95   void NOINLINE DoRecycle(Cache *c, Callback cb) {
     96     while (QuarantineBatch *b = c->DequeueBatch()) {
     97       const uptr kPrefetch = 16;
     98       for (uptr i = 0; i < kPrefetch; i++)
     99         PREFETCH(b->batch[i]);
    100       for (uptr i = 0; i < b->count; i++) {
    101         PREFETCH(b->batch[i + kPrefetch]);
    102         cb.Recycle((Node*)b->batch[i]);
    103       }
    104       cb.Deallocate(b);
    105     }
    106   }
    107 };
    108 
    109 // Per-thread cache of memory blocks.
    110 template<typename Callback>
    111 class QuarantineCache {
    112  public:
    113   explicit QuarantineCache(LinkerInitialized) {
    114   }
    115 
    116   QuarantineCache()
    117       : size_() {
    118     list_.clear();
    119   }
    120 
    121   uptr Size() const {
    122     return atomic_load(&size_, memory_order_relaxed);
    123   }
    124 
    125   void Enqueue(Callback cb, void *ptr, uptr size) {
    126     if (list_.empty() || list_.back()->count == QuarantineBatch::kSize)
    127       AllocBatch(cb);
    128     QuarantineBatch *b = list_.back();
    129     b->batch[b->count++] = ptr;
    130     b->size += size;
    131     SizeAdd(size);
    132   }
    133 
    134   void Transfer(QuarantineCache *c) {
    135     list_.append_back(&c->list_);
    136     SizeAdd(c->Size());
    137     atomic_store(&c->size_, 0, memory_order_relaxed);
    138   }
    139 
    140   void EnqueueBatch(QuarantineBatch *b) {
    141     list_.push_back(b);
    142     SizeAdd(b->size);
    143   }
    144 
    145   QuarantineBatch *DequeueBatch() {
    146     if (list_.empty())
    147       return 0;
    148     QuarantineBatch *b = list_.front();
    149     list_.pop_front();
    150     // FIXME: should probably add SizeSub method?
    151     // See https://code.google.com/p/thread-sanitizer/issues/detail?id=20
    152     SizeAdd(0 - b->size);
    153     return b;
    154   }
    155 
    156  private:
    157   IntrusiveList<QuarantineBatch> list_;
    158   atomic_uintptr_t size_;
    159 
    160   void SizeAdd(uptr add) {
    161     atomic_store(&size_, Size() + add, memory_order_relaxed);
    162   }
    163 
    164   NOINLINE QuarantineBatch* AllocBatch(Callback cb) {
    165     QuarantineBatch *b = (QuarantineBatch *)cb.Allocate(sizeof(*b));
    166     b->count = 0;
    167     b->size = 0;
    168     list_.push_back(b);
    169     return b;
    170   }
    171 };
    172 }  // namespace __sanitizer
    173 
    174 #endif  // #ifndef SANITIZER_QUARANTINE_H
    175