Home | History | Annotate | Download | only in sanitizer_common
      1 //===-- sanitizer_quarantine.h ----------------------------------*- C++ -*-===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // Memory quarantine for AddressSanitizer and potentially other tools.
     11 // Quarantine caches some specified amount of memory in per-thread caches,
     12 // then evicts to global FIFO queue. When the queue reaches specified threshold,
     13 // oldest memory is recycled.
     14 //
     15 //===----------------------------------------------------------------------===//
     16 
     17 #ifndef SANITIZER_QUARANTINE_H
     18 #define SANITIZER_QUARANTINE_H
     19 
     20 #include "sanitizer_internal_defs.h"
     21 #include "sanitizer_mutex.h"
     22 #include "sanitizer_list.h"
     23 
     24 namespace __sanitizer {
     25 
     26 template<typename Node> class QuarantineCache;
     27 
     28 struct QuarantineBatch {
     29   static const uptr kSize = 1021;
     30   QuarantineBatch *next;
     31   uptr size;
     32   uptr count;
     33   void *batch[kSize];
     34 };
     35 
     36 COMPILER_CHECK(sizeof(QuarantineBatch) <= (1 << 13));  // 8Kb.
     37 
     38 // The callback interface is:
     39 // void Callback::Recycle(Node *ptr);
     40 // void *cb.Allocate(uptr size);
     41 // void cb.Deallocate(void *ptr);
     42 template<typename Callback, typename Node>
     43 class Quarantine {
     44  public:
     45   typedef QuarantineCache<Callback> Cache;
     46 
     47   explicit Quarantine(LinkerInitialized)
     48       : cache_(LINKER_INITIALIZED) {
     49   }
     50 
     51   void Init(uptr size, uptr cache_size) {
     52     max_size_ = size;
     53     min_size_ = size / 10 * 9;  // 90% of max size.
     54     max_cache_size_ = cache_size;
     55   }
     56 
     57   void Put(Cache *c, Callback cb, Node *ptr, uptr size) {
     58     c->Enqueue(cb, ptr, size);
     59     if (c->Size() > max_cache_size_)
     60       Drain(c, cb);
     61   }
     62 
     63   void NOINLINE Drain(Cache *c, Callback cb) {
     64     {
     65       SpinMutexLock l(&cache_mutex_);
     66       cache_.Transfer(c);
     67     }
     68     if (cache_.Size() > max_size_ && recycle_mutex_.TryLock())
     69       Recycle(cb);
     70   }
     71 
     72  private:
     73   // Read-only data.
     74   char pad0_[kCacheLineSize];
     75   uptr max_size_;
     76   uptr min_size_;
     77   uptr max_cache_size_;
     78   char pad1_[kCacheLineSize];
     79   SpinMutex cache_mutex_;
     80   SpinMutex recycle_mutex_;
     81   Cache cache_;
     82   char pad2_[kCacheLineSize];
     83 
     84   void NOINLINE Recycle(Callback cb) {
     85     Cache tmp;
     86     {
     87       SpinMutexLock l(&cache_mutex_);
     88       while (cache_.Size() > min_size_) {
     89         QuarantineBatch *b = cache_.DequeueBatch();
     90         tmp.EnqueueBatch(b);
     91       }
     92     }
     93     recycle_mutex_.Unlock();
     94     DoRecycle(&tmp, cb);
     95   }
     96 
     97   void NOINLINE DoRecycle(Cache *c, Callback cb) {
     98     while (QuarantineBatch *b = c->DequeueBatch()) {
     99       const uptr kPrefetch = 16;
    100       for (uptr i = 0; i < kPrefetch; i++)
    101         PREFETCH(b->batch[i]);
    102       for (uptr i = 0; i < b->count; i++) {
    103         PREFETCH(b->batch[i + kPrefetch]);
    104         cb.Recycle((Node*)b->batch[i]);
    105       }
    106       cb.Deallocate(b);
    107     }
    108   }
    109 };
    110 
    111 // Per-thread cache of memory blocks.
    112 template<typename Callback>
    113 class QuarantineCache {
    114  public:
    115   explicit QuarantineCache(LinkerInitialized) {
    116   }
    117 
    118   QuarantineCache()
    119       : size_() {
    120     list_.clear();
    121   }
    122 
    123   uptr Size() const {
    124     return atomic_load(&size_, memory_order_relaxed);
    125   }
    126 
    127   void Enqueue(Callback cb, void *ptr, uptr size) {
    128     if (list_.empty() || list_.back()->count == QuarantineBatch::kSize) {
    129       AllocBatch(cb);
    130       size += sizeof(QuarantineBatch);  // Count the batch in Quarantine size.
    131     }
    132     QuarantineBatch *b = list_.back();
    133     b->batch[b->count++] = ptr;
    134     b->size += size;
    135     SizeAdd(size);
    136   }
    137 
    138   void Transfer(QuarantineCache *c) {
    139     list_.append_back(&c->list_);
    140     SizeAdd(c->Size());
    141     atomic_store(&c->size_, 0, memory_order_relaxed);
    142   }
    143 
    144   void EnqueueBatch(QuarantineBatch *b) {
    145     list_.push_back(b);
    146     SizeAdd(b->size);
    147   }
    148 
    149   QuarantineBatch *DequeueBatch() {
    150     if (list_.empty())
    151       return 0;
    152     QuarantineBatch *b = list_.front();
    153     list_.pop_front();
    154     SizeSub(b->size);
    155     return b;
    156   }
    157 
    158  private:
    159   IntrusiveList<QuarantineBatch> list_;
    160   atomic_uintptr_t size_;
    161 
    162   void SizeAdd(uptr add) {
    163     atomic_store(&size_, Size() + add, memory_order_relaxed);
    164   }
    165   void SizeSub(uptr sub) {
    166     atomic_store(&size_, Size() - sub, memory_order_relaxed);
    167   }
    168 
    169   NOINLINE QuarantineBatch* AllocBatch(Callback cb) {
    170     QuarantineBatch *b = (QuarantineBatch *)cb.Allocate(sizeof(*b));
    171     b->count = 0;
    172     b->size = 0;
    173     list_.push_back(b);
    174     return b;
    175   }
    176 };
    177 }  // namespace __sanitizer
    178 
    179 #endif  // #ifndef SANITIZER_QUARANTINE_H
    180