Home | History | Annotate | Download | only in sanitizer_common
      1 //===-- sanitizer_allocator.h -----------------------------------*- C++ -*-===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // Specialized memory allocator for ThreadSanitizer, MemorySanitizer, etc.
     11 //
     12 //===----------------------------------------------------------------------===//
     13 
     14 #ifndef SANITIZER_ALLOCATOR_H
     15 #define SANITIZER_ALLOCATOR_H
     16 
     17 #include "sanitizer_internal_defs.h"
     18 #include "sanitizer_common.h"
     19 #include "sanitizer_libc.h"
     20 #include "sanitizer_list.h"
     21 #include "sanitizer_mutex.h"
     22 #include "sanitizer_lfstack.h"
     23 
     24 namespace __sanitizer {
     25 
     26 // SizeClassMap maps allocation sizes into size classes and back.
     27 // Class 0 corresponds to size 0.
     28 // Classes 1 - 16 correspond to sizes 16 to 256 (size = class_id * 16).
     29 // Next 4 classes: 256 + i * 64  (i = 1 to 4).
     30 // Next 4 classes: 512 + i * 128 (i = 1 to 4).
     31 // ...
     32 // Next 4 classes: 2^k + i * 2^(k-2) (i = 1 to 4).
     33 // Last class corresponds to kMaxSize = 1 << kMaxSizeLog.
     34 //
     35 // This structure of the size class map gives us:
     36 //   - Efficient table-free class-to-size and size-to-class functions.
     37 //   - Difference between two consequent size classes is betweed 14% and 25%
     38 //
     39 // This class also gives a hint to a thread-caching allocator about the amount
     40 // of chunks that need to be cached per-thread:
     41 //  - kMaxNumCached is the maximal number of chunks per size class.
     42 //  - (1 << kMaxBytesCachedLog) is the maximal number of bytes per size class.
     43 //
     44 // Part of output of SizeClassMap::Print():
     45 // c00 => s: 0 diff: +0 00% l 0 cached: 0 0; id 0
     46 // c01 => s: 16 diff: +16 00% l 4 cached: 256 4096; id 1
     47 // c02 => s: 32 diff: +16 100% l 5 cached: 256 8192; id 2
     48 // c03 => s: 48 diff: +16 50% l 5 cached: 256 12288; id 3
     49 // c04 => s: 64 diff: +16 33% l 6 cached: 256 16384; id 4
     50 // c05 => s: 80 diff: +16 25% l 6 cached: 256 20480; id 5
     51 // c06 => s: 96 diff: +16 20% l 6 cached: 256 24576; id 6
     52 // c07 => s: 112 diff: +16 16% l 6 cached: 256 28672; id 7
     53 //
     54 // c08 => s: 128 diff: +16 14% l 7 cached: 256 32768; id 8
     55 // c09 => s: 144 diff: +16 12% l 7 cached: 256 36864; id 9
     56 // c10 => s: 160 diff: +16 11% l 7 cached: 256 40960; id 10
     57 // c11 => s: 176 diff: +16 10% l 7 cached: 256 45056; id 11
     58 // c12 => s: 192 diff: +16 09% l 7 cached: 256 49152; id 12
     59 // c13 => s: 208 diff: +16 08% l 7 cached: 256 53248; id 13
     60 // c14 => s: 224 diff: +16 07% l 7 cached: 256 57344; id 14
     61 // c15 => s: 240 diff: +16 07% l 7 cached: 256 61440; id 15
     62 //
     63 // c16 => s: 256 diff: +16 06% l 8 cached: 256 65536; id 16
     64 // c17 => s: 320 diff: +64 25% l 8 cached: 204 65280; id 17
     65 // c18 => s: 384 diff: +64 20% l 8 cached: 170 65280; id 18
     66 // c19 => s: 448 diff: +64 16% l 8 cached: 146 65408; id 19
     67 //
     68 // c20 => s: 512 diff: +64 14% l 9 cached: 128 65536; id 20
     69 // c21 => s: 640 diff: +128 25% l 9 cached: 102 65280; id 21
     70 // c22 => s: 768 diff: +128 20% l 9 cached: 85 65280; id 22
     71 // c23 => s: 896 diff: +128 16% l 9 cached: 73 65408; id 23
     72 //
     73 // c24 => s: 1024 diff: +128 14% l 10 cached: 64 65536; id 24
     74 // c25 => s: 1280 diff: +256 25% l 10 cached: 51 65280; id 25
     75 // c26 => s: 1536 diff: +256 20% l 10 cached: 42 64512; id 26
     76 // c27 => s: 1792 diff: +256 16% l 10 cached: 36 64512; id 27
     77 //
     78 // ...
     79 //
     80 // c48 => s: 65536 diff: +8192 14% l 16 cached: 1 65536; id 48
     81 // c49 => s: 81920 diff: +16384 25% l 16 cached: 1 81920; id 49
     82 // c50 => s: 98304 diff: +16384 20% l 16 cached: 1 98304; id 50
     83 // c51 => s: 114688 diff: +16384 16% l 16 cached: 1 114688; id 51
     84 //
     85 // c52 => s: 131072 diff: +16384 14% l 17 cached: 1 131072; id 52
     86 
     87 template <uptr kMaxSizeLog, uptr kMaxNumCachedT, uptr kMaxBytesCachedLog>
     88 class SizeClassMap {
     89   static const uptr kMinSizeLog = 4;
     90   static const uptr kMidSizeLog = kMinSizeLog + 4;
     91   static const uptr kMinSize = 1 << kMinSizeLog;
     92   static const uptr kMidSize = 1 << kMidSizeLog;
     93   static const uptr kMidClass = kMidSize / kMinSize;
     94   static const uptr S = 2;
     95   static const uptr M = (1 << S) - 1;
     96 
     97  public:
     98   static const uptr kMaxNumCached = kMaxNumCachedT;
     99   // We transfer chunks between central and thread-local free lists in batches.
    100   // For small size classes we allocate batches separately.
    101   // For large size classes we use one of the chunks to store the batch.
    102   struct TransferBatch {
    103     TransferBatch *next;
    104     uptr count;
    105     void *batch[kMaxNumCached];
    106   };
    107 
    108   static const uptr kMaxSize = 1 << kMaxSizeLog;
    109   static const uptr kNumClasses =
    110       kMidClass + ((kMaxSizeLog - kMidSizeLog) << S) + 1;
    111   COMPILER_CHECK(kNumClasses >= 32 && kNumClasses <= 256);
    112   static const uptr kNumClassesRounded =
    113       kNumClasses == 32  ? 32 :
    114       kNumClasses <= 64  ? 64 :
    115       kNumClasses <= 128 ? 128 : 256;
    116 
    117   static uptr Size(uptr class_id) {
    118     if (class_id <= kMidClass)
    119       return kMinSize * class_id;
    120     class_id -= kMidClass;
    121     uptr t = kMidSize << (class_id >> S);
    122     return t + (t >> S) * (class_id & M);
    123   }
    124 
    125   static uptr ClassID(uptr size) {
    126     if (size <= kMidSize)
    127       return (size + kMinSize - 1) >> kMinSizeLog;
    128     if (size > kMaxSize) return 0;
    129     uptr l = MostSignificantSetBitIndex(size);
    130     uptr hbits = (size >> (l - S)) & M;
    131     uptr lbits = size & ((1 << (l - S)) - 1);
    132     uptr l1 = l - kMidSizeLog;
    133     return kMidClass + (l1 << S) + hbits + (lbits > 0);
    134   }
    135 
    136   static uptr MaxCached(uptr class_id) {
    137     if (class_id == 0) return 0;
    138     uptr n = (1UL << kMaxBytesCachedLog) / Size(class_id);
    139     return Max<uptr>(1, Min(kMaxNumCached, n));
    140   }
    141 
    142   static void Print() {
    143     uptr prev_s = 0;
    144     uptr total_cached = 0;
    145     for (uptr i = 0; i < kNumClasses; i++) {
    146       uptr s = Size(i);
    147       if (s >= kMidSize / 2 && (s & (s - 1)) == 0)
    148         Printf("\n");
    149       uptr d = s - prev_s;
    150       uptr p = prev_s ? (d * 100 / prev_s) : 0;
    151       uptr l = s ? MostSignificantSetBitIndex(s) : 0;
    152       uptr cached = MaxCached(i) * s;
    153       Printf("c%02zd => s: %zd diff: +%zd %02zd%% l %zd "
    154              "cached: %zd %zd; id %zd\n",
    155              i, Size(i), d, p, l, MaxCached(i), cached, ClassID(s));
    156       total_cached += cached;
    157       prev_s = s;
    158     }
    159     Printf("Total cached: %zd\n", total_cached);
    160   }
    161 
    162   static bool SizeClassRequiresSeparateTransferBatch(uptr class_id) {
    163     return Size(class_id) < sizeof(TransferBatch) -
    164         sizeof(uptr) * (kMaxNumCached - MaxCached(class_id));
    165   }
    166 
    167   static void Validate() {
    168     for (uptr c = 1; c < kNumClasses; c++) {
    169       // Printf("Validate: c%zd\n", c);
    170       uptr s = Size(c);
    171       CHECK_NE(s, 0U);
    172       CHECK_EQ(ClassID(s), c);
    173       if (c != kNumClasses - 1)
    174         CHECK_EQ(ClassID(s + 1), c + 1);
    175       CHECK_EQ(ClassID(s - 1), c);
    176       if (c)
    177         CHECK_GT(Size(c), Size(c-1));
    178     }
    179     CHECK_EQ(ClassID(kMaxSize + 1), 0);
    180 
    181     for (uptr s = 1; s <= kMaxSize; s++) {
    182       uptr c = ClassID(s);
    183       // Printf("s%zd => c%zd\n", s, c);
    184       CHECK_LT(c, kNumClasses);
    185       CHECK_GE(Size(c), s);
    186       if (c > 0)
    187         CHECK_LT(Size(c-1), s);
    188     }
    189   }
    190 };
    191 
    192 typedef SizeClassMap<17, 256, 16> DefaultSizeClassMap;
    193 typedef SizeClassMap<17, 64,  14> CompactSizeClassMap;
    194 template<class SizeClassAllocator> struct SizeClassAllocatorLocalCache;
    195 
    196 // Memory allocator statistics
    197 enum AllocatorStat {
    198   AllocatorStatMalloced,
    199   AllocatorStatFreed,
    200   AllocatorStatMmapped,
    201   AllocatorStatUnmapped,
    202   AllocatorStatCount
    203 };
    204 
    205 typedef u64 AllocatorStatCounters[AllocatorStatCount];
    206 
    207 // Per-thread stats, live in per-thread cache.
    208 class AllocatorStats {
    209  public:
    210   void Init() {
    211     internal_memset(this, 0, sizeof(*this));
    212   }
    213 
    214   void Add(AllocatorStat i, u64 v) {
    215     v += atomic_load(&stats_[i], memory_order_relaxed);
    216     atomic_store(&stats_[i], v, memory_order_relaxed);
    217   }
    218 
    219   void Set(AllocatorStat i, u64 v) {
    220     atomic_store(&stats_[i], v, memory_order_relaxed);
    221   }
    222 
    223   u64 Get(AllocatorStat i) const {
    224     return atomic_load(&stats_[i], memory_order_relaxed);
    225   }
    226 
    227  private:
    228   friend class AllocatorGlobalStats;
    229   AllocatorStats *next_;
    230   AllocatorStats *prev_;
    231   atomic_uint64_t stats_[AllocatorStatCount];
    232 };
    233 
    234 // Global stats, used for aggregation and querying.
    235 class AllocatorGlobalStats : public AllocatorStats {
    236  public:
    237   void Init() {
    238     internal_memset(this, 0, sizeof(*this));
    239     next_ = this;
    240     prev_ = this;
    241   }
    242 
    243   void Register(AllocatorStats *s) {
    244     SpinMutexLock l(&mu_);
    245     s->next_ = next_;
    246     s->prev_ = this;
    247     next_->prev_ = s;
    248     next_ = s;
    249   }
    250 
    251   void Unregister(AllocatorStats *s) {
    252     SpinMutexLock l(&mu_);
    253     s->prev_->next_ = s->next_;
    254     s->next_->prev_ = s->prev_;
    255     for (int i = 0; i < AllocatorStatCount; i++)
    256       Add(AllocatorStat(i), s->Get(AllocatorStat(i)));
    257   }
    258 
    259   void Get(AllocatorStatCounters s) const {
    260     internal_memset(s, 0, AllocatorStatCount * sizeof(u64));
    261     SpinMutexLock l(&mu_);
    262     const AllocatorStats *stats = this;
    263     for (;;) {
    264       for (int i = 0; i < AllocatorStatCount; i++)
    265         s[i] += stats->Get(AllocatorStat(i));
    266       stats = stats->next_;
    267       if (stats == this)
    268         break;
    269     }
    270   }
    271 
    272  private:
    273   mutable SpinMutex mu_;
    274 };
    275 
    276 // Allocators call these callbacks on mmap/munmap.
    277 struct NoOpMapUnmapCallback {
    278   void OnMap(uptr p, uptr size) const { }
    279   void OnUnmap(uptr p, uptr size) const { }
    280 };
    281 
    282 // SizeClassAllocator64 -- allocator for 64-bit address space.
    283 //
    284 // Space: a portion of address space of kSpaceSize bytes starting at
    285 // a fixed address (kSpaceBeg). Both constants are powers of two and
    286 // kSpaceBeg is kSpaceSize-aligned.
    287 // At the beginning the entire space is mprotect-ed, then small parts of it
    288 // are mapped on demand.
    289 //
    290 // Region: a part of Space dedicated to a single size class.
    291 // There are kNumClasses Regions of equal size.
    292 //
    293 // UserChunk: a piece of memory returned to user.
    294 // MetaChunk: kMetadataSize bytes of metadata associated with a UserChunk.
    295 //
    296 // A Region looks like this:
    297 // UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1
    298 template <const uptr kSpaceBeg, const uptr kSpaceSize,
    299           const uptr kMetadataSize, class SizeClassMap,
    300           class MapUnmapCallback = NoOpMapUnmapCallback>
    301 class SizeClassAllocator64 {
    302  public:
    303   typedef typename SizeClassMap::TransferBatch Batch;
    304   typedef SizeClassAllocator64<kSpaceBeg, kSpaceSize, kMetadataSize,
    305       SizeClassMap, MapUnmapCallback> ThisT;
    306   typedef SizeClassAllocatorLocalCache<ThisT> AllocatorCache;
    307 
    308   void Init() {
    309     CHECK_EQ(kSpaceBeg,
    310              reinterpret_cast<uptr>(Mprotect(kSpaceBeg, kSpaceSize)));
    311     MapWithCallback(kSpaceEnd, AdditionalSize());
    312   }
    313 
    314   void MapWithCallback(uptr beg, uptr size) {
    315     CHECK_EQ(beg, reinterpret_cast<uptr>(MmapFixedOrDie(beg, size)));
    316     MapUnmapCallback().OnMap(beg, size);
    317   }
    318 
    319   void UnmapWithCallback(uptr beg, uptr size) {
    320     MapUnmapCallback().OnUnmap(beg, size);
    321     UnmapOrDie(reinterpret_cast<void *>(beg), size);
    322   }
    323 
    324   static bool CanAllocate(uptr size, uptr alignment) {
    325     return size <= SizeClassMap::kMaxSize &&
    326       alignment <= SizeClassMap::kMaxSize;
    327   }
    328 
    329   NOINLINE Batch* AllocateBatch(AllocatorStats *stat, AllocatorCache *c,
    330                                 uptr class_id) {
    331     CHECK_LT(class_id, kNumClasses);
    332     RegionInfo *region = GetRegionInfo(class_id);
    333     Batch *b = region->free_list.Pop();
    334     if (b == 0)
    335       b = PopulateFreeList(stat, c, class_id, region);
    336     region->n_allocated += b->count;
    337     return b;
    338   }
    339 
    340   NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id, Batch *b) {
    341     RegionInfo *region = GetRegionInfo(class_id);
    342     CHECK_GT(b->count, 0);
    343     region->free_list.Push(b);
    344     region->n_freed += b->count;
    345   }
    346 
    347   static bool PointerIsMine(void *p) {
    348     return reinterpret_cast<uptr>(p) / kSpaceSize == kSpaceBeg / kSpaceSize;
    349   }
    350 
    351   static uptr GetSizeClass(void *p) {
    352     return (reinterpret_cast<uptr>(p) / kRegionSize) % kNumClassesRounded;
    353   }
    354 
    355   void *GetBlockBegin(void *p) {
    356     uptr class_id = GetSizeClass(p);
    357     uptr size = SizeClassMap::Size(class_id);
    358     if (!size) return 0;
    359     uptr chunk_idx = GetChunkIdx((uptr)p, size);
    360     uptr reg_beg = (uptr)p & ~(kRegionSize - 1);
    361     uptr beg = chunk_idx * size;
    362     uptr next_beg = beg + size;
    363     if (class_id >= kNumClasses) return 0;
    364     RegionInfo *region = GetRegionInfo(class_id);
    365     if (region->mapped_user >= next_beg)
    366       return reinterpret_cast<void*>(reg_beg + beg);
    367     return 0;
    368   }
    369 
    370   static uptr GetActuallyAllocatedSize(void *p) {
    371     CHECK(PointerIsMine(p));
    372     return SizeClassMap::Size(GetSizeClass(p));
    373   }
    374 
    375   uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
    376 
    377   void *GetMetaData(void *p) {
    378     uptr class_id = GetSizeClass(p);
    379     uptr size = SizeClassMap::Size(class_id);
    380     uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), size);
    381     return reinterpret_cast<void*>(kSpaceBeg + (kRegionSize * (class_id + 1)) -
    382                                    (1 + chunk_idx) * kMetadataSize);
    383   }
    384 
    385   uptr TotalMemoryUsed() {
    386     uptr res = 0;
    387     for (uptr i = 0; i < kNumClasses; i++)
    388       res += GetRegionInfo(i)->allocated_user;
    389     return res;
    390   }
    391 
    392   // Test-only.
    393   void TestOnlyUnmap() {
    394     UnmapWithCallback(kSpaceBeg, kSpaceSize + AdditionalSize());
    395   }
    396 
    397   void PrintStats() {
    398     uptr total_mapped = 0;
    399     uptr n_allocated = 0;
    400     uptr n_freed = 0;
    401     for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
    402       RegionInfo *region = GetRegionInfo(class_id);
    403       total_mapped += region->mapped_user;
    404       n_allocated += region->n_allocated;
    405       n_freed += region->n_freed;
    406     }
    407     Printf("Stats: SizeClassAllocator64: %zdM mapped in %zd allocations; "
    408            "remains %zd\n",
    409            total_mapped >> 20, n_allocated, n_allocated - n_freed);
    410     for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
    411       RegionInfo *region = GetRegionInfo(class_id);
    412       if (region->mapped_user == 0) continue;
    413       Printf("  %02zd (%zd): total: %zd K allocs: %zd remains: %zd\n",
    414              class_id,
    415              SizeClassMap::Size(class_id),
    416              region->mapped_user >> 10,
    417              region->n_allocated,
    418              region->n_allocated - region->n_freed);
    419     }
    420   }
    421 
    422   // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
    423   // introspection API.
    424   void ForceLock() {
    425     for (uptr i = 0; i < kNumClasses; i++) {
    426       GetRegionInfo(i)->mutex.Lock();
    427     }
    428   }
    429 
    430   void ForceUnlock() {
    431     for (int i = (int)kNumClasses - 1; i >= 0; i--) {
    432       GetRegionInfo(i)->mutex.Unlock();
    433     }
    434   }
    435 
    436   // Iterate over existing chunks. May include chunks that are not currently
    437   // allocated to the user (e.g. freed).
    438   // The caller is expected to call ForceLock() before calling this function.
    439   template<typename Callable>
    440   void ForEachChunk(const Callable &callback) {
    441     for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
    442       RegionInfo *region = GetRegionInfo(class_id);
    443       uptr chunk_size = SizeClassMap::Size(class_id);
    444       uptr region_beg = kSpaceBeg + class_id * kRegionSize;
    445       for (uptr p = region_beg;
    446            p < region_beg + region->allocated_user;
    447            p += chunk_size) {
    448         // Too slow: CHECK_EQ((void *)p, GetBlockBegin((void *)p));
    449         callback((void *)p);
    450       }
    451     }
    452   }
    453 
    454   typedef SizeClassMap SizeClassMapT;
    455   static const uptr kNumClasses = SizeClassMap::kNumClasses;
    456   static const uptr kNumClassesRounded = SizeClassMap::kNumClassesRounded;
    457 
    458  private:
    459   static const uptr kRegionSize = kSpaceSize / kNumClassesRounded;
    460   static const uptr kSpaceEnd = kSpaceBeg + kSpaceSize;
    461   COMPILER_CHECK(kSpaceBeg % kSpaceSize == 0);
    462   // kRegionSize must be >= 2^32.
    463   COMPILER_CHECK((kRegionSize) >= (1ULL << (SANITIZER_WORDSIZE / 2)));
    464   // Populate the free list with at most this number of bytes at once
    465   // or with one element if its size is greater.
    466   static const uptr kPopulateSize = 1 << 14;
    467   // Call mmap for user memory with at least this size.
    468   static const uptr kUserMapSize = 1 << 16;
    469   // Call mmap for metadata memory with at least this size.
    470   static const uptr kMetaMapSize = 1 << 16;
    471 
    472   struct RegionInfo {
    473     BlockingMutex mutex;
    474     LFStack<Batch> free_list;
    475     uptr allocated_user;  // Bytes allocated for user memory.
    476     uptr allocated_meta;  // Bytes allocated for metadata.
    477     uptr mapped_user;  // Bytes mapped for user memory.
    478     uptr mapped_meta;  // Bytes mapped for metadata.
    479     uptr n_allocated, n_freed;  // Just stats.
    480   };
    481   COMPILER_CHECK(sizeof(RegionInfo) >= kCacheLineSize);
    482 
    483   static uptr AdditionalSize() {
    484     return RoundUpTo(sizeof(RegionInfo) * kNumClassesRounded,
    485                      GetPageSizeCached());
    486   }
    487 
    488   RegionInfo *GetRegionInfo(uptr class_id) {
    489     CHECK_LT(class_id, kNumClasses);
    490     RegionInfo *regions = reinterpret_cast<RegionInfo*>(kSpaceBeg + kSpaceSize);
    491     return &regions[class_id];
    492   }
    493 
    494   static uptr GetChunkIdx(uptr chunk, uptr size) {
    495     u32 offset = chunk % kRegionSize;
    496     // Here we divide by a non-constant. This is costly.
    497     // We require that kRegionSize is at least 2^32 so that offset is 32-bit.
    498     // We save 2x by using 32-bit div, but may need to use a 256-way switch.
    499     return offset / (u32)size;
    500   }
    501 
    502   NOINLINE Batch* PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
    503                                    uptr class_id, RegionInfo *region) {
    504     BlockingMutexLock l(&region->mutex);
    505     Batch *b = region->free_list.Pop();
    506     if (b)
    507       return b;
    508     uptr size = SizeClassMap::Size(class_id);
    509     uptr count = size < kPopulateSize ? SizeClassMap::MaxCached(class_id) : 1;
    510     uptr beg_idx = region->allocated_user;
    511     uptr end_idx = beg_idx + count * size;
    512     uptr region_beg = kSpaceBeg + kRegionSize * class_id;
    513     if (end_idx + size > region->mapped_user) {
    514       // Do the mmap for the user memory.
    515       uptr map_size = kUserMapSize;
    516       while (end_idx + size > region->mapped_user + map_size)
    517         map_size += kUserMapSize;
    518       CHECK_GE(region->mapped_user + map_size, end_idx);
    519       MapWithCallback(region_beg + region->mapped_user, map_size);
    520       stat->Add(AllocatorStatMmapped, map_size);
    521       region->mapped_user += map_size;
    522     }
    523     uptr total_count = (region->mapped_user - beg_idx - size)
    524         / size / count * count;
    525     region->allocated_meta += total_count * kMetadataSize;
    526     if (region->allocated_meta > region->mapped_meta) {
    527       uptr map_size = kMetaMapSize;
    528       while (region->allocated_meta > region->mapped_meta + map_size)
    529         map_size += kMetaMapSize;
    530       // Do the mmap for the metadata.
    531       CHECK_GE(region->mapped_meta + map_size, region->allocated_meta);
    532       MapWithCallback(region_beg + kRegionSize -
    533                       region->mapped_meta - map_size, map_size);
    534       region->mapped_meta += map_size;
    535     }
    536     CHECK_LE(region->allocated_meta, region->mapped_meta);
    537     if (region->allocated_user + region->allocated_meta > kRegionSize) {
    538       Printf("%s: Out of memory. Dying. ", SanitizerToolName);
    539       Printf("The process has exhausted %zuMB for size class %zu.\n",
    540           kRegionSize / 1024 / 1024, size);
    541       Die();
    542     }
    543     for (;;) {
    544       if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
    545         b = (Batch*)c->Allocate(this, SizeClassMap::ClassID(sizeof(Batch)));
    546       else
    547         b = (Batch*)(region_beg + beg_idx);
    548       b->count = count;
    549       for (uptr i = 0; i < count; i++)
    550         b->batch[i] = (void*)(region_beg + beg_idx + i * size);
    551       region->allocated_user += count * size;
    552       CHECK_LE(region->allocated_user, region->mapped_user);
    553       beg_idx += count * size;
    554       if (beg_idx + count * size + size > region->mapped_user)
    555         break;
    556       CHECK_GT(b->count, 0);
    557       region->free_list.Push(b);
    558     }
    559     return b;
    560   }
    561 };
    562 
    563 // SizeClassAllocator32 -- allocator for 32-bit address space.
    564 // This allocator can theoretically be used on 64-bit arch, but there it is less
    565 // efficient than SizeClassAllocator64.
    566 //
    567 // [kSpaceBeg, kSpaceBeg + kSpaceSize) is the range of addresses which can
    568 // be returned by MmapOrDie().
    569 //
    570 // Region:
    571 //   a result of a single call to MmapAlignedOrDie(kRegionSize, kRegionSize).
    572 // Since the regions are aligned by kRegionSize, there are exactly
    573 // kNumPossibleRegions possible regions in the address space and so we keep
    574 // an u8 array possible_regions[kNumPossibleRegions] to store the size classes.
    575 // 0 size class means the region is not used by the allocator.
    576 //
    577 // One Region is used to allocate chunks of a single size class.
    578 // A Region looks like this:
    579 // UserChunk1 .. UserChunkN <gap> MetaChunkN .. MetaChunk1
    580 //
    581 // In order to avoid false sharing the objects of this class should be
    582 // chache-line aligned.
    583 template <const uptr kSpaceBeg, const u64 kSpaceSize,
    584           const uptr kMetadataSize, class SizeClassMap,
    585           class MapUnmapCallback = NoOpMapUnmapCallback>
    586 class SizeClassAllocator32 {
    587  public:
    588   typedef typename SizeClassMap::TransferBatch Batch;
    589   typedef SizeClassAllocator32<kSpaceBeg, kSpaceSize, kMetadataSize,
    590       SizeClassMap, MapUnmapCallback> ThisT;
    591   typedef SizeClassAllocatorLocalCache<ThisT> AllocatorCache;
    592 
    593   void Init() {
    594     state_ = reinterpret_cast<State *>(MapWithCallback(sizeof(State)));
    595   }
    596 
    597   void *MapWithCallback(uptr size) {
    598     size = RoundUpTo(size, GetPageSizeCached());
    599     void *res = MmapOrDie(size, "SizeClassAllocator32");
    600     MapUnmapCallback().OnMap((uptr)res, size);
    601     return res;
    602   }
    603 
    604   void UnmapWithCallback(uptr beg, uptr size) {
    605     MapUnmapCallback().OnUnmap(beg, size);
    606     UnmapOrDie(reinterpret_cast<void *>(beg), size);
    607   }
    608 
    609   static bool CanAllocate(uptr size, uptr alignment) {
    610     return size <= SizeClassMap::kMaxSize &&
    611       alignment <= SizeClassMap::kMaxSize;
    612   }
    613 
    614   void *GetMetaData(void *p) {
    615     CHECK(PointerIsMine(p));
    616     uptr mem = reinterpret_cast<uptr>(p);
    617     uptr beg = ComputeRegionBeg(mem);
    618     uptr size = SizeClassMap::Size(GetSizeClass(p));
    619     u32 offset = mem - beg;
    620     uptr n = offset / (u32)size;  // 32-bit division
    621     uptr meta = (beg + kRegionSize) - (n + 1) * kMetadataSize;
    622     return reinterpret_cast<void*>(meta);
    623   }
    624 
    625   NOINLINE Batch* AllocateBatch(AllocatorStats *stat, AllocatorCache *c,
    626                                 uptr class_id) {
    627     CHECK_LT(class_id, kNumClasses);
    628     SizeClassInfo *sci = GetSizeClassInfo(class_id);
    629     SpinMutexLock l(&sci->mutex);
    630     if (sci->free_list.empty())
    631       PopulateFreeList(stat, c, sci, class_id);
    632     CHECK(!sci->free_list.empty());
    633     Batch *b = sci->free_list.front();
    634     sci->free_list.pop_front();
    635     return b;
    636   }
    637 
    638   NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id, Batch *b) {
    639     CHECK_LT(class_id, kNumClasses);
    640     SizeClassInfo *sci = GetSizeClassInfo(class_id);
    641     SpinMutexLock l(&sci->mutex);
    642     CHECK_GT(b->count, 0);
    643     sci->free_list.push_front(b);
    644   }
    645 
    646   bool PointerIsMine(void *p) {
    647     return GetSizeClass(p) != 0;
    648   }
    649 
    650   uptr GetSizeClass(void *p) {
    651     return state_->possible_regions[ComputeRegionId(reinterpret_cast<uptr>(p))];
    652   }
    653 
    654   void *GetBlockBegin(void *p) {
    655     CHECK(PointerIsMine(p));
    656     uptr mem = reinterpret_cast<uptr>(p);
    657     uptr beg = ComputeRegionBeg(mem);
    658     uptr size = SizeClassMap::Size(GetSizeClass(p));
    659     u32 offset = mem - beg;
    660     u32 n = offset / (u32)size;  // 32-bit division
    661     uptr res = beg + (n * (u32)size);
    662     return reinterpret_cast<void*>(res);
    663   }
    664 
    665   uptr GetActuallyAllocatedSize(void *p) {
    666     CHECK(PointerIsMine(p));
    667     return SizeClassMap::Size(GetSizeClass(p));
    668   }
    669 
    670   uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
    671 
    672   uptr TotalMemoryUsed() {
    673     // No need to lock here.
    674     uptr res = 0;
    675     for (uptr i = 0; i < kNumPossibleRegions; i++)
    676       if (state_->possible_regions[i])
    677         res += kRegionSize;
    678     return res;
    679   }
    680 
    681   void TestOnlyUnmap() {
    682     for (uptr i = 0; i < kNumPossibleRegions; i++)
    683       if (state_->possible_regions[i])
    684         UnmapWithCallback((i * kRegionSize), kRegionSize);
    685     UnmapWithCallback(reinterpret_cast<uptr>(state_), sizeof(State));
    686   }
    687 
    688   // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
    689   // introspection API.
    690   void ForceLock() {
    691     for (uptr i = 0; i < kNumClasses; i++) {
    692       GetSizeClassInfo(i)->mutex.Lock();
    693     }
    694   }
    695 
    696   void ForceUnlock() {
    697     for (int i = kNumClasses - 1; i >= 0; i--) {
    698       GetSizeClassInfo(i)->mutex.Unlock();
    699     }
    700   }
    701 
    702   // Iterate over existing chunks. May include chunks that are not currently
    703   // allocated to the user (e.g. freed).
    704   // The caller is expected to call ForceLock() before calling this function.
    705   template<typename Callable>
    706   void ForEachChunk(const Callable &callback) {
    707     for (uptr region = 0; region < kNumPossibleRegions; region++)
    708       if (state_->possible_regions[region]) {
    709         uptr chunk_size = SizeClassMap::Size(state_->possible_regions[region]);
    710         uptr max_chunks_in_region = kRegionSize / (chunk_size + kMetadataSize);
    711         uptr region_beg = region * kRegionSize;
    712         for (uptr p = region_beg;
    713              p < region_beg + max_chunks_in_region * chunk_size;
    714              p += chunk_size) {
    715           // Too slow: CHECK_EQ((void *)p, GetBlockBegin((void *)p));
    716           callback((void *)p);
    717         }
    718       }
    719   }
    720 
    721   void PrintStats() {
    722   }
    723 
    724   typedef SizeClassMap SizeClassMapT;
    725   static const uptr kNumClasses = SizeClassMap::kNumClasses;
    726 
    727  private:
    728   static const uptr kRegionSizeLog = SANITIZER_WORDSIZE == 64 ? 24 : 20;
    729   static const uptr kRegionSize = 1 << kRegionSizeLog;
    730   static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize;
    731 
    732   struct SizeClassInfo {
    733     SpinMutex mutex;
    734     IntrusiveList<Batch> free_list;
    735     char padding[kCacheLineSize - sizeof(uptr) - sizeof(IntrusiveList<Batch>)];
    736   };
    737   COMPILER_CHECK(sizeof(SizeClassInfo) == kCacheLineSize);
    738 
    739   uptr ComputeRegionId(uptr mem) {
    740     uptr res = mem >> kRegionSizeLog;
    741     CHECK_LT(res, kNumPossibleRegions);
    742     return res;
    743   }
    744 
    745   uptr ComputeRegionBeg(uptr mem) {
    746     return mem & ~(kRegionSize - 1);
    747   }
    748 
    749   uptr AllocateRegion(AllocatorStats *stat, uptr class_id) {
    750     CHECK_LT(class_id, kNumClasses);
    751     uptr res = reinterpret_cast<uptr>(MmapAlignedOrDie(kRegionSize, kRegionSize,
    752                                       "SizeClassAllocator32"));
    753     MapUnmapCallback().OnMap(res, kRegionSize);
    754     stat->Add(AllocatorStatMmapped, kRegionSize);
    755     CHECK_EQ(0U, (res & (kRegionSize - 1)));
    756     CHECK_EQ(0U, state_->possible_regions[ComputeRegionId(res)]);
    757     state_->possible_regions[ComputeRegionId(res)] = class_id;
    758     return res;
    759   }
    760 
    761   SizeClassInfo *GetSizeClassInfo(uptr class_id) {
    762     CHECK_LT(class_id, kNumClasses);
    763     return &state_->size_class_info_array[class_id];
    764   }
    765 
    766   void PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
    767                         SizeClassInfo *sci, uptr class_id) {
    768     uptr size = SizeClassMap::Size(class_id);
    769     uptr reg = AllocateRegion(stat, class_id);
    770     uptr n_chunks = kRegionSize / (size + kMetadataSize);
    771     uptr max_count = SizeClassMap::MaxCached(class_id);
    772     Batch *b = 0;
    773     for (uptr i = reg; i < reg + n_chunks * size; i += size) {
    774       if (b == 0) {
    775         if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
    776           b = (Batch*)c->Allocate(this, SizeClassMap::ClassID(sizeof(Batch)));
    777         else
    778           b = (Batch*)i;
    779         b->count = 0;
    780       }
    781       b->batch[b->count++] = (void*)i;
    782       if (b->count == max_count) {
    783         CHECK_GT(b->count, 0);
    784         sci->free_list.push_back(b);
    785         b = 0;
    786       }
    787     }
    788     if (b) {
    789       CHECK_GT(b->count, 0);
    790       sci->free_list.push_back(b);
    791     }
    792   }
    793 
    794   struct State {
    795     u8 possible_regions[kNumPossibleRegions];
    796     SizeClassInfo size_class_info_array[kNumClasses];
    797   };
    798   State *state_;
    799 };
    800 
    801 // Objects of this type should be used as local caches for SizeClassAllocator64
    802 // or SizeClassAllocator32. Since the typical use of this class is to have one
    803 // object per thread in TLS, is has to be POD.
    804 template<class SizeClassAllocator>
    805 struct SizeClassAllocatorLocalCache {
    806   typedef SizeClassAllocator Allocator;
    807   static const uptr kNumClasses = SizeClassAllocator::kNumClasses;
    808 
    809   void Init(AllocatorGlobalStats *s) {
    810     stats_.Init();
    811     if (s)
    812       s->Register(&stats_);
    813   }
    814 
    815   void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
    816     Drain(allocator);
    817     if (s)
    818       s->Unregister(&stats_);
    819   }
    820 
    821   void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
    822     CHECK_NE(class_id, 0UL);
    823     CHECK_LT(class_id, kNumClasses);
    824     stats_.Add(AllocatorStatMalloced, SizeClassMap::Size(class_id));
    825     PerClass *c = &per_class_[class_id];
    826     if (UNLIKELY(c->count == 0))
    827       Refill(allocator, class_id);
    828     void *res = c->batch[--c->count];
    829     PREFETCH(c->batch[c->count - 1]);
    830     return res;
    831   }
    832 
    833   void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
    834     CHECK_NE(class_id, 0UL);
    835     CHECK_LT(class_id, kNumClasses);
    836     // If the first allocator call on a new thread is a deallocation, then
    837     // max_count will be zero, leading to check failure.
    838     InitCache();
    839     stats_.Add(AllocatorStatFreed, SizeClassMap::Size(class_id));
    840     PerClass *c = &per_class_[class_id];
    841     CHECK_NE(c->max_count, 0UL);
    842     if (UNLIKELY(c->count == c->max_count))
    843       Drain(allocator, class_id);
    844     c->batch[c->count++] = p;
    845   }
    846 
    847   void Drain(SizeClassAllocator *allocator) {
    848     for (uptr class_id = 0; class_id < kNumClasses; class_id++) {
    849       PerClass *c = &per_class_[class_id];
    850       while (c->count > 0)
    851         Drain(allocator, class_id);
    852     }
    853   }
    854 
    855   // private:
    856   typedef typename SizeClassAllocator::SizeClassMapT SizeClassMap;
    857   typedef typename SizeClassMap::TransferBatch Batch;
    858   struct PerClass {
    859     uptr count;
    860     uptr max_count;
    861     void *batch[2 * SizeClassMap::kMaxNumCached];
    862   };
    863   PerClass per_class_[kNumClasses];
    864   AllocatorStats stats_;
    865 
    866   void InitCache() {
    867     if (per_class_[1].max_count)
    868       return;
    869     for (uptr i = 0; i < kNumClasses; i++) {
    870       PerClass *c = &per_class_[i];
    871       c->max_count = 2 * SizeClassMap::MaxCached(i);
    872     }
    873   }
    874 
    875   NOINLINE void Refill(SizeClassAllocator *allocator, uptr class_id) {
    876     InitCache();
    877     PerClass *c = &per_class_[class_id];
    878     Batch *b = allocator->AllocateBatch(&stats_, this, class_id);
    879     CHECK_GT(b->count, 0);
    880     for (uptr i = 0; i < b->count; i++)
    881       c->batch[i] = b->batch[i];
    882     c->count = b->count;
    883     if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
    884       Deallocate(allocator, SizeClassMap::ClassID(sizeof(Batch)), b);
    885   }
    886 
    887   NOINLINE void Drain(SizeClassAllocator *allocator, uptr class_id) {
    888     InitCache();
    889     PerClass *c = &per_class_[class_id];
    890     Batch *b;
    891     if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
    892       b = (Batch*)Allocate(allocator, SizeClassMap::ClassID(sizeof(Batch)));
    893     else
    894       b = (Batch*)c->batch[0];
    895     uptr cnt = Min(c->max_count / 2, c->count);
    896     for (uptr i = 0; i < cnt; i++) {
    897       b->batch[i] = c->batch[i];
    898       c->batch[i] = c->batch[i + c->max_count / 2];
    899     }
    900     b->count = cnt;
    901     c->count -= cnt;
    902     CHECK_GT(b->count, 0);
    903     allocator->DeallocateBatch(&stats_, class_id, b);
    904   }
    905 };
    906 
    907 // This class can (de)allocate only large chunks of memory using mmap/unmap.
    908 // The main purpose of this allocator is to cover large and rare allocation
    909 // sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
    910 template <class MapUnmapCallback = NoOpMapUnmapCallback>
    911 class LargeMmapAllocator {
    912  public:
    913   void Init() {
    914     internal_memset(this, 0, sizeof(*this));
    915     page_size_ = GetPageSizeCached();
    916   }
    917 
    918   void *Allocate(AllocatorStats *stat, uptr size, uptr alignment) {
    919     CHECK(IsPowerOfTwo(alignment));
    920     uptr map_size = RoundUpMapSize(size);
    921     if (alignment > page_size_)
    922       map_size += alignment;
    923     if (map_size < size) return 0;  // Overflow.
    924     uptr map_beg = reinterpret_cast<uptr>(
    925         MmapOrDie(map_size, "LargeMmapAllocator"));
    926     MapUnmapCallback().OnMap(map_beg, map_size);
    927     uptr map_end = map_beg + map_size;
    928     uptr res = map_beg + page_size_;
    929     if (res & (alignment - 1))  // Align.
    930       res += alignment - (res & (alignment - 1));
    931     CHECK_EQ(0, res & (alignment - 1));
    932     CHECK_LE(res + size, map_end);
    933     Header *h = GetHeader(res);
    934     h->size = size;
    935     h->map_beg = map_beg;
    936     h->map_size = map_size;
    937     uptr size_log = MostSignificantSetBitIndex(map_size);
    938     CHECK_LT(size_log, ARRAY_SIZE(stats.by_size_log));
    939     {
    940       SpinMutexLock l(&mutex_);
    941       uptr idx = n_chunks_++;
    942       CHECK_LT(idx, kMaxNumChunks);
    943       h->chunk_idx = idx;
    944       chunks_[idx] = h;
    945       stats.n_allocs++;
    946       stats.currently_allocated += map_size;
    947       stats.max_allocated = Max(stats.max_allocated, stats.currently_allocated);
    948       stats.by_size_log[size_log]++;
    949       stat->Add(AllocatorStatMalloced, map_size);
    950       stat->Add(AllocatorStatMmapped, map_size);
    951     }
    952     return reinterpret_cast<void*>(res);
    953   }
    954 
    955   void Deallocate(AllocatorStats *stat, void *p) {
    956     Header *h = GetHeader(p);
    957     {
    958       SpinMutexLock l(&mutex_);
    959       uptr idx = h->chunk_idx;
    960       CHECK_EQ(chunks_[idx], h);
    961       CHECK_LT(idx, n_chunks_);
    962       chunks_[idx] = chunks_[n_chunks_ - 1];
    963       chunks_[idx]->chunk_idx = idx;
    964       n_chunks_--;
    965       stats.n_frees++;
    966       stats.currently_allocated -= h->map_size;
    967       stat->Add(AllocatorStatFreed, h->map_size);
    968       stat->Add(AllocatorStatUnmapped, h->map_size);
    969     }
    970     MapUnmapCallback().OnUnmap(h->map_beg, h->map_size);
    971     UnmapOrDie(reinterpret_cast<void*>(h->map_beg), h->map_size);
    972   }
    973 
    974   uptr TotalMemoryUsed() {
    975     SpinMutexLock l(&mutex_);
    976     uptr res = 0;
    977     for (uptr i = 0; i < n_chunks_; i++) {
    978       Header *h = chunks_[i];
    979       CHECK_EQ(h->chunk_idx, i);
    980       res += RoundUpMapSize(h->size);
    981     }
    982     return res;
    983   }
    984 
    985   bool PointerIsMine(void *p) {
    986     return GetBlockBegin(p) != 0;
    987   }
    988 
    989   uptr GetActuallyAllocatedSize(void *p) {
    990     return RoundUpTo(GetHeader(p)->size, page_size_);
    991   }
    992 
    993   // At least page_size_/2 metadata bytes is available.
    994   void *GetMetaData(void *p) {
    995     // Too slow: CHECK_EQ(p, GetBlockBegin(p));
    996     CHECK(IsAligned(reinterpret_cast<uptr>(p), page_size_));
    997     return GetHeader(p) + 1;
    998   }
    999 
   1000   void *GetBlockBegin(void *ptr) {
   1001     uptr p = reinterpret_cast<uptr>(ptr);
   1002     SpinMutexLock l(&mutex_);
   1003     uptr nearest_chunk = 0;
   1004     // Cache-friendly linear search.
   1005     for (uptr i = 0; i < n_chunks_; i++) {
   1006       uptr ch = reinterpret_cast<uptr>(chunks_[i]);
   1007       if (p < ch) continue;  // p is at left to this chunk, skip it.
   1008       if (p - ch < p - nearest_chunk)
   1009         nearest_chunk = ch;
   1010     }
   1011     if (!nearest_chunk)
   1012       return 0;
   1013     Header *h = reinterpret_cast<Header *>(nearest_chunk);
   1014     CHECK_GE(nearest_chunk, h->map_beg);
   1015     CHECK_LT(nearest_chunk, h->map_beg + h->map_size);
   1016     CHECK_LE(nearest_chunk, p);
   1017     if (h->map_beg + h->map_size < p)
   1018       return 0;
   1019     return GetUser(h);
   1020   }
   1021 
   1022   void PrintStats() {
   1023     Printf("Stats: LargeMmapAllocator: allocated %zd times, "
   1024            "remains %zd (%zd K) max %zd M; by size logs: ",
   1025            stats.n_allocs, stats.n_allocs - stats.n_frees,
   1026            stats.currently_allocated >> 10, stats.max_allocated >> 20);
   1027     for (uptr i = 0; i < ARRAY_SIZE(stats.by_size_log); i++) {
   1028       uptr c = stats.by_size_log[i];
   1029       if (!c) continue;
   1030       Printf("%zd:%zd; ", i, c);
   1031     }
   1032     Printf("\n");
   1033   }
   1034 
   1035   // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
   1036   // introspection API.
   1037   void ForceLock() {
   1038     mutex_.Lock();
   1039   }
   1040 
   1041   void ForceUnlock() {
   1042     mutex_.Unlock();
   1043   }
   1044 
   1045   // Iterate over existing chunks. May include chunks that are not currently
   1046   // allocated to the user (e.g. freed).
   1047   // The caller is expected to call ForceLock() before calling this function.
   1048   template<typename Callable>
   1049   void ForEachChunk(const Callable &callback) {
   1050     for (uptr i = 0; i < n_chunks_; i++)
   1051       callback(GetUser(chunks_[i]));
   1052   }
   1053 
   1054  private:
   1055   static const int kMaxNumChunks = 1 << FIRST_32_SECOND_64(15, 18);
   1056   struct Header {
   1057     uptr map_beg;
   1058     uptr map_size;
   1059     uptr size;
   1060     uptr chunk_idx;
   1061   };
   1062 
   1063   Header *GetHeader(uptr p) {
   1064     CHECK_EQ(p % page_size_, 0);
   1065     return reinterpret_cast<Header*>(p - page_size_);
   1066   }
   1067   Header *GetHeader(void *p) { return GetHeader(reinterpret_cast<uptr>(p)); }
   1068 
   1069   void *GetUser(Header *h) {
   1070     CHECK_EQ((uptr)h % page_size_, 0);
   1071     return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + page_size_);
   1072   }
   1073 
   1074   uptr RoundUpMapSize(uptr size) {
   1075     return RoundUpTo(size, page_size_) + page_size_;
   1076   }
   1077 
   1078   uptr page_size_;
   1079   Header *chunks_[kMaxNumChunks];
   1080   uptr n_chunks_;
   1081   struct Stats {
   1082     uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64];
   1083   } stats;
   1084   SpinMutex mutex_;
   1085 };
   1086 
   1087 // This class implements a complete memory allocator by using two
   1088 // internal allocators:
   1089 // PrimaryAllocator is efficient, but may not allocate some sizes (alignments).
   1090 //  When allocating 2^x bytes it should return 2^x aligned chunk.
   1091 // PrimaryAllocator is used via a local AllocatorCache.
   1092 // SecondaryAllocator can allocate anything, but is not efficient.
   1093 template <class PrimaryAllocator, class AllocatorCache,
   1094           class SecondaryAllocator>  // NOLINT
   1095 class CombinedAllocator {
   1096  public:
   1097   void Init() {
   1098     primary_.Init();
   1099     secondary_.Init();
   1100     stats_.Init();
   1101   }
   1102 
   1103   void *Allocate(AllocatorCache *cache, uptr size, uptr alignment,
   1104                  bool cleared = false) {
   1105     // Returning 0 on malloc(0) may break a lot of code.
   1106     if (size == 0)
   1107       size = 1;
   1108     if (size + alignment < size)
   1109       return 0;
   1110     if (alignment > 8)
   1111       size = RoundUpTo(size, alignment);
   1112     void *res;
   1113     if (primary_.CanAllocate(size, alignment))
   1114       res = cache->Allocate(&primary_, primary_.ClassID(size));
   1115     else
   1116       res = secondary_.Allocate(&stats_, size, alignment);
   1117     if (alignment > 8)
   1118       CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
   1119     if (cleared && res)
   1120       internal_memset(res, 0, size);
   1121     return res;
   1122   }
   1123 
   1124   void Deallocate(AllocatorCache *cache, void *p) {
   1125     if (!p) return;
   1126     if (primary_.PointerIsMine(p))
   1127       cache->Deallocate(&primary_, primary_.GetSizeClass(p), p);
   1128     else
   1129       secondary_.Deallocate(&stats_, p);
   1130   }
   1131 
   1132   void *Reallocate(AllocatorCache *cache, void *p, uptr new_size,
   1133                    uptr alignment) {
   1134     if (!p)
   1135       return Allocate(cache, new_size, alignment);
   1136     if (!new_size) {
   1137       Deallocate(cache, p);
   1138       return 0;
   1139     }
   1140     CHECK(PointerIsMine(p));
   1141     uptr old_size = GetActuallyAllocatedSize(p);
   1142     uptr memcpy_size = Min(new_size, old_size);
   1143     void *new_p = Allocate(cache, new_size, alignment);
   1144     if (new_p)
   1145       internal_memcpy(new_p, p, memcpy_size);
   1146     Deallocate(cache, p);
   1147     return new_p;
   1148   }
   1149 
   1150   bool PointerIsMine(void *p) {
   1151     if (primary_.PointerIsMine(p))
   1152       return true;
   1153     return secondary_.PointerIsMine(p);
   1154   }
   1155 
   1156   bool FromPrimary(void *p) {
   1157     return primary_.PointerIsMine(p);
   1158   }
   1159 
   1160   void *GetMetaData(void *p) {
   1161     if (primary_.PointerIsMine(p))
   1162       return primary_.GetMetaData(p);
   1163     return secondary_.GetMetaData(p);
   1164   }
   1165 
   1166   void *GetBlockBegin(void *p) {
   1167     if (primary_.PointerIsMine(p))
   1168       return primary_.GetBlockBegin(p);
   1169     return secondary_.GetBlockBegin(p);
   1170   }
   1171 
   1172   uptr GetActuallyAllocatedSize(void *p) {
   1173     if (primary_.PointerIsMine(p))
   1174       return primary_.GetActuallyAllocatedSize(p);
   1175     return secondary_.GetActuallyAllocatedSize(p);
   1176   }
   1177 
   1178   uptr TotalMemoryUsed() {
   1179     return primary_.TotalMemoryUsed() + secondary_.TotalMemoryUsed();
   1180   }
   1181 
   1182   void TestOnlyUnmap() { primary_.TestOnlyUnmap(); }
   1183 
   1184   void InitCache(AllocatorCache *cache) {
   1185     cache->Init(&stats_);
   1186   }
   1187 
   1188   void DestroyCache(AllocatorCache *cache) {
   1189     cache->Destroy(&primary_, &stats_);
   1190   }
   1191 
   1192   void SwallowCache(AllocatorCache *cache) {
   1193     cache->Drain(&primary_);
   1194   }
   1195 
   1196   void GetStats(AllocatorStatCounters s) const {
   1197     stats_.Get(s);
   1198   }
   1199 
   1200   void PrintStats() {
   1201     primary_.PrintStats();
   1202     secondary_.PrintStats();
   1203   }
   1204 
   1205   // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
   1206   // introspection API.
   1207   void ForceLock() {
   1208     primary_.ForceLock();
   1209     secondary_.ForceLock();
   1210   }
   1211 
   1212   void ForceUnlock() {
   1213     secondary_.ForceUnlock();
   1214     primary_.ForceUnlock();
   1215   }
   1216 
   1217   // Iterate over existing chunks. May include chunks that are not currently
   1218   // allocated to the user (e.g. freed).
   1219   // The caller is expected to call ForceLock() before calling this function.
   1220   template<typename Callable>
   1221   void ForEachChunk(const Callable &callback) {
   1222     primary_.ForEachChunk(callback);
   1223     secondary_.ForEachChunk(callback);
   1224   }
   1225 
   1226  private:
   1227   PrimaryAllocator primary_;
   1228   SecondaryAllocator secondary_;
   1229   AllocatorGlobalStats stats_;
   1230 };
   1231 
   1232 // Returns true if calloc(size, n) should return 0 due to overflow in size*n.
   1233 bool CallocShouldReturnNullDueToOverflow(uptr size, uptr n);
   1234 
   1235 }  // namespace __sanitizer
   1236 
   1237 #endif  // SANITIZER_ALLOCATOR_H
   1238 
   1239