Home | History | Annotate | Download | only in sanitizer_common
      1 //===-- sanitizer_allocator.h -----------------------------------*- C++ -*-===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // Specialized memory allocator for ThreadSanitizer, MemorySanitizer, etc.
     11 //
     12 //===----------------------------------------------------------------------===//
     13 
     14 #ifndef SANITIZER_ALLOCATOR_H
     15 #define SANITIZER_ALLOCATOR_H
     16 
     17 #include "sanitizer_internal_defs.h"
     18 #include "sanitizer_common.h"
     19 #include "sanitizer_libc.h"
     20 #include "sanitizer_list.h"
     21 #include "sanitizer_mutex.h"
     22 #include "sanitizer_lfstack.h"
     23 
     24 namespace __sanitizer {
     25 
     26 // SizeClassMap maps allocation sizes into size classes and back.
     27 // Class 0 corresponds to size 0.
     28 // Classes 1 - 16 correspond to sizes 16 to 256 (size = class_id * 16).
     29 // Next 4 classes: 256 + i * 64  (i = 1 to 4).
     30 // Next 4 classes: 512 + i * 128 (i = 1 to 4).
     31 // ...
     32 // Next 4 classes: 2^k + i * 2^(k-2) (i = 1 to 4).
     33 // Last class corresponds to kMaxSize = 1 << kMaxSizeLog.
     34 //
     35 // This structure of the size class map gives us:
     36 //   - Efficient table-free class-to-size and size-to-class functions.
     37 //   - Difference between two consequent size classes is betweed 14% and 25%
     38 //
     39 // This class also gives a hint to a thread-caching allocator about the amount
     40 // of chunks that need to be cached per-thread:
     41 //  - kMaxNumCached is the maximal number of chunks per size class.
     42 //  - (1 << kMaxBytesCachedLog) is the maximal number of bytes per size class.
     43 //
     44 // Part of output of SizeClassMap::Print():
     45 // c00 => s: 0 diff: +0 00% l 0 cached: 0 0; id 0
     46 // c01 => s: 16 diff: +16 00% l 4 cached: 256 4096; id 1
     47 // c02 => s: 32 diff: +16 100% l 5 cached: 256 8192; id 2
     48 // c03 => s: 48 diff: +16 50% l 5 cached: 256 12288; id 3
     49 // c04 => s: 64 diff: +16 33% l 6 cached: 256 16384; id 4
     50 // c05 => s: 80 diff: +16 25% l 6 cached: 256 20480; id 5
     51 // c06 => s: 96 diff: +16 20% l 6 cached: 256 24576; id 6
     52 // c07 => s: 112 diff: +16 16% l 6 cached: 256 28672; id 7
     53 //
     54 // c08 => s: 128 diff: +16 14% l 7 cached: 256 32768; id 8
     55 // c09 => s: 144 diff: +16 12% l 7 cached: 256 36864; id 9
     56 // c10 => s: 160 diff: +16 11% l 7 cached: 256 40960; id 10
     57 // c11 => s: 176 diff: +16 10% l 7 cached: 256 45056; id 11
     58 // c12 => s: 192 diff: +16 09% l 7 cached: 256 49152; id 12
     59 // c13 => s: 208 diff: +16 08% l 7 cached: 256 53248; id 13
     60 // c14 => s: 224 diff: +16 07% l 7 cached: 256 57344; id 14
     61 // c15 => s: 240 diff: +16 07% l 7 cached: 256 61440; id 15
     62 //
     63 // c16 => s: 256 diff: +16 06% l 8 cached: 256 65536; id 16
     64 // c17 => s: 320 diff: +64 25% l 8 cached: 204 65280; id 17
     65 // c18 => s: 384 diff: +64 20% l 8 cached: 170 65280; id 18
     66 // c19 => s: 448 diff: +64 16% l 8 cached: 146 65408; id 19
     67 //
     68 // c20 => s: 512 diff: +64 14% l 9 cached: 128 65536; id 20
     69 // c21 => s: 640 diff: +128 25% l 9 cached: 102 65280; id 21
     70 // c22 => s: 768 diff: +128 20% l 9 cached: 85 65280; id 22
     71 // c23 => s: 896 diff: +128 16% l 9 cached: 73 65408; id 23
     72 //
     73 // c24 => s: 1024 diff: +128 14% l 10 cached: 64 65536; id 24
     74 // c25 => s: 1280 diff: +256 25% l 10 cached: 51 65280; id 25
     75 // c26 => s: 1536 diff: +256 20% l 10 cached: 42 64512; id 26
     76 // c27 => s: 1792 diff: +256 16% l 10 cached: 36 64512; id 27
     77 //
     78 // ...
     79 //
     80 // c48 => s: 65536 diff: +8192 14% l 16 cached: 1 65536; id 48
     81 // c49 => s: 81920 diff: +16384 25% l 16 cached: 1 81920; id 49
     82 // c50 => s: 98304 diff: +16384 20% l 16 cached: 1 98304; id 50
     83 // c51 => s: 114688 diff: +16384 16% l 16 cached: 1 114688; id 51
     84 //
     85 // c52 => s: 131072 diff: +16384 14% l 17 cached: 1 131072; id 52
     86 
     87 template <uptr kMaxSizeLog, uptr kMaxNumCachedT, uptr kMaxBytesCachedLog>
     88 class SizeClassMap {
     89   static const uptr kMinSizeLog = 4;
     90   static const uptr kMidSizeLog = kMinSizeLog + 4;
     91   static const uptr kMinSize = 1 << kMinSizeLog;
     92   static const uptr kMidSize = 1 << kMidSizeLog;
     93   static const uptr kMidClass = kMidSize / kMinSize;
     94   static const uptr S = 2;
     95   static const uptr M = (1 << S) - 1;
     96 
     97  public:
     98   static const uptr kMaxNumCached = kMaxNumCachedT;
     99   // We transfer chunks between central and thread-local free lists in batches.
    100   // For small size classes we allocate batches separately.
    101   // For large size classes we use one of the chunks to store the batch.
    102   struct TransferBatch {
    103     TransferBatch *next;
    104     uptr count;
    105     void *batch[kMaxNumCached];
    106   };
    107 
    108   static const uptr kMaxSize = 1UL << kMaxSizeLog;
    109   static const uptr kNumClasses =
    110       kMidClass + ((kMaxSizeLog - kMidSizeLog) << S) + 1;
    111   COMPILER_CHECK(kNumClasses >= 32 && kNumClasses <= 256);
    112   static const uptr kNumClassesRounded =
    113       kNumClasses == 32  ? 32 :
    114       kNumClasses <= 64  ? 64 :
    115       kNumClasses <= 128 ? 128 : 256;
    116 
    117   static uptr Size(uptr class_id) {
    118     if (class_id <= kMidClass)
    119       return kMinSize * class_id;
    120     class_id -= kMidClass;
    121     uptr t = kMidSize << (class_id >> S);
    122     return t + (t >> S) * (class_id & M);
    123   }
    124 
    125   static uptr ClassID(uptr size) {
    126     if (size <= kMidSize)
    127       return (size + kMinSize - 1) >> kMinSizeLog;
    128     if (size > kMaxSize) return 0;
    129     uptr l = MostSignificantSetBitIndex(size);
    130     uptr hbits = (size >> (l - S)) & M;
    131     uptr lbits = size & ((1 << (l - S)) - 1);
    132     uptr l1 = l - kMidSizeLog;
    133     return kMidClass + (l1 << S) + hbits + (lbits > 0);
    134   }
    135 
    136   static uptr MaxCached(uptr class_id) {
    137     if (class_id == 0) return 0;
    138     uptr n = (1UL << kMaxBytesCachedLog) / Size(class_id);
    139     return Max<uptr>(1, Min(kMaxNumCached, n));
    140   }
    141 
    142   static void Print() {
    143     uptr prev_s = 0;
    144     uptr total_cached = 0;
    145     for (uptr i = 0; i < kNumClasses; i++) {
    146       uptr s = Size(i);
    147       if (s >= kMidSize / 2 && (s & (s - 1)) == 0)
    148         Printf("\n");
    149       uptr d = s - prev_s;
    150       uptr p = prev_s ? (d * 100 / prev_s) : 0;
    151       uptr l = s ? MostSignificantSetBitIndex(s) : 0;
    152       uptr cached = MaxCached(i) * s;
    153       Printf("c%02zd => s: %zd diff: +%zd %02zd%% l %zd "
    154              "cached: %zd %zd; id %zd\n",
    155              i, Size(i), d, p, l, MaxCached(i), cached, ClassID(s));
    156       total_cached += cached;
    157       prev_s = s;
    158     }
    159     Printf("Total cached: %zd\n", total_cached);
    160   }
    161 
    162   static bool SizeClassRequiresSeparateTransferBatch(uptr class_id) {
    163     return Size(class_id) < sizeof(TransferBatch) -
    164         sizeof(uptr) * (kMaxNumCached - MaxCached(class_id));
    165   }
    166 
    167   static void Validate() {
    168     for (uptr c = 1; c < kNumClasses; c++) {
    169       // Printf("Validate: c%zd\n", c);
    170       uptr s = Size(c);
    171       CHECK_NE(s, 0U);
    172       CHECK_EQ(ClassID(s), c);
    173       if (c != kNumClasses - 1)
    174         CHECK_EQ(ClassID(s + 1), c + 1);
    175       CHECK_EQ(ClassID(s - 1), c);
    176       if (c)
    177         CHECK_GT(Size(c), Size(c-1));
    178     }
    179     CHECK_EQ(ClassID(kMaxSize + 1), 0);
    180 
    181     for (uptr s = 1; s <= kMaxSize; s++) {
    182       uptr c = ClassID(s);
    183       // Printf("s%zd => c%zd\n", s, c);
    184       CHECK_LT(c, kNumClasses);
    185       CHECK_GE(Size(c), s);
    186       if (c > 0)
    187         CHECK_LT(Size(c-1), s);
    188     }
    189   }
    190 };
    191 
    192 typedef SizeClassMap<17, 128, 16> DefaultSizeClassMap;
    193 typedef SizeClassMap<17, 64,  14> CompactSizeClassMap;
    194 template<class SizeClassAllocator> struct SizeClassAllocatorLocalCache;
    195 
    196 // Memory allocator statistics
    197 enum AllocatorStat {
    198   AllocatorStatMalloced,
    199   AllocatorStatFreed,
    200   AllocatorStatMmapped,
    201   AllocatorStatUnmapped,
    202   AllocatorStatCount
    203 };
    204 
    205 typedef u64 AllocatorStatCounters[AllocatorStatCount];
    206 
    207 // Per-thread stats, live in per-thread cache.
    208 class AllocatorStats {
    209  public:
    210   void Init() {
    211     internal_memset(this, 0, sizeof(*this));
    212   }
    213 
    214   void Add(AllocatorStat i, u64 v) {
    215     v += atomic_load(&stats_[i], memory_order_relaxed);
    216     atomic_store(&stats_[i], v, memory_order_relaxed);
    217   }
    218 
    219   void Set(AllocatorStat i, u64 v) {
    220     atomic_store(&stats_[i], v, memory_order_relaxed);
    221   }
    222 
    223   u64 Get(AllocatorStat i) const {
    224     return atomic_load(&stats_[i], memory_order_relaxed);
    225   }
    226 
    227  private:
    228   friend class AllocatorGlobalStats;
    229   AllocatorStats *next_;
    230   AllocatorStats *prev_;
    231   atomic_uint64_t stats_[AllocatorStatCount];
    232 };
    233 
    234 // Global stats, used for aggregation and querying.
    235 class AllocatorGlobalStats : public AllocatorStats {
    236  public:
    237   void Init() {
    238     internal_memset(this, 0, sizeof(*this));
    239     next_ = this;
    240     prev_ = this;
    241   }
    242 
    243   void Register(AllocatorStats *s) {
    244     SpinMutexLock l(&mu_);
    245     s->next_ = next_;
    246     s->prev_ = this;
    247     next_->prev_ = s;
    248     next_ = s;
    249   }
    250 
    251   void Unregister(AllocatorStats *s) {
    252     SpinMutexLock l(&mu_);
    253     s->prev_->next_ = s->next_;
    254     s->next_->prev_ = s->prev_;
    255     for (int i = 0; i < AllocatorStatCount; i++)
    256       Add(AllocatorStat(i), s->Get(AllocatorStat(i)));
    257   }
    258 
    259   void Get(AllocatorStatCounters s) const {
    260     internal_memset(s, 0, AllocatorStatCount * sizeof(u64));
    261     SpinMutexLock l(&mu_);
    262     const AllocatorStats *stats = this;
    263     for (;;) {
    264       for (int i = 0; i < AllocatorStatCount; i++)
    265         s[i] += stats->Get(AllocatorStat(i));
    266       stats = stats->next_;
    267       if (stats == this)
    268         break;
    269     }
    270   }
    271 
    272  private:
    273   mutable SpinMutex mu_;
    274 };
    275 
    276 // Allocators call these callbacks on mmap/munmap.
    277 struct NoOpMapUnmapCallback {
    278   void OnMap(uptr p, uptr size) const { }
    279   void OnUnmap(uptr p, uptr size) const { }
    280 };
    281 
    282 // Callback type for iterating over chunks.
    283 typedef void (*ForEachChunkCallback)(uptr chunk, void *arg);
    284 
    285 // SizeClassAllocator64 -- allocator for 64-bit address space.
    286 //
    287 // Space: a portion of address space of kSpaceSize bytes starting at
    288 // a fixed address (kSpaceBeg). Both constants are powers of two and
    289 // kSpaceBeg is kSpaceSize-aligned.
    290 // At the beginning the entire space is mprotect-ed, then small parts of it
    291 // are mapped on demand.
    292 //
    293 // Region: a part of Space dedicated to a single size class.
    294 // There are kNumClasses Regions of equal size.
    295 //
    296 // UserChunk: a piece of memory returned to user.
    297 // MetaChunk: kMetadataSize bytes of metadata associated with a UserChunk.
    298 //
    299 // A Region looks like this:
    300 // UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1
    301 template <const uptr kSpaceBeg, const uptr kSpaceSize,
    302           const uptr kMetadataSize, class SizeClassMap,
    303           class MapUnmapCallback = NoOpMapUnmapCallback>
    304 class SizeClassAllocator64 {
    305  public:
    306   typedef typename SizeClassMap::TransferBatch Batch;
    307   typedef SizeClassAllocator64<kSpaceBeg, kSpaceSize, kMetadataSize,
    308       SizeClassMap, MapUnmapCallback> ThisT;
    309   typedef SizeClassAllocatorLocalCache<ThisT> AllocatorCache;
    310 
    311   void Init() {
    312     CHECK_EQ(kSpaceBeg,
    313              reinterpret_cast<uptr>(Mprotect(kSpaceBeg, kSpaceSize)));
    314     MapWithCallback(kSpaceEnd, AdditionalSize());
    315   }
    316 
    317   void MapWithCallback(uptr beg, uptr size) {
    318     CHECK_EQ(beg, reinterpret_cast<uptr>(MmapFixedOrDie(beg, size)));
    319     MapUnmapCallback().OnMap(beg, size);
    320   }
    321 
    322   void UnmapWithCallback(uptr beg, uptr size) {
    323     MapUnmapCallback().OnUnmap(beg, size);
    324     UnmapOrDie(reinterpret_cast<void *>(beg), size);
    325   }
    326 
    327   static bool CanAllocate(uptr size, uptr alignment) {
    328     return size <= SizeClassMap::kMaxSize &&
    329       alignment <= SizeClassMap::kMaxSize;
    330   }
    331 
    332   NOINLINE Batch* AllocateBatch(AllocatorStats *stat, AllocatorCache *c,
    333                                 uptr class_id) {
    334     CHECK_LT(class_id, kNumClasses);
    335     RegionInfo *region = GetRegionInfo(class_id);
    336     Batch *b = region->free_list.Pop();
    337     if (b == 0)
    338       b = PopulateFreeList(stat, c, class_id, region);
    339     region->n_allocated += b->count;
    340     return b;
    341   }
    342 
    343   NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id, Batch *b) {
    344     RegionInfo *region = GetRegionInfo(class_id);
    345     CHECK_GT(b->count, 0);
    346     region->free_list.Push(b);
    347     region->n_freed += b->count;
    348   }
    349 
    350   static bool PointerIsMine(const void *p) {
    351     return reinterpret_cast<uptr>(p) / kSpaceSize == kSpaceBeg / kSpaceSize;
    352   }
    353 
    354   static uptr GetSizeClass(const void *p) {
    355     return (reinterpret_cast<uptr>(p) / kRegionSize) % kNumClassesRounded;
    356   }
    357 
    358   void *GetBlockBegin(const void *p) {
    359     uptr class_id = GetSizeClass(p);
    360     uptr size = SizeClassMap::Size(class_id);
    361     if (!size) return 0;
    362     uptr chunk_idx = GetChunkIdx((uptr)p, size);
    363     uptr reg_beg = (uptr)p & ~(kRegionSize - 1);
    364     uptr beg = chunk_idx * size;
    365     uptr next_beg = beg + size;
    366     if (class_id >= kNumClasses) return 0;
    367     RegionInfo *region = GetRegionInfo(class_id);
    368     if (region->mapped_user >= next_beg)
    369       return reinterpret_cast<void*>(reg_beg + beg);
    370     return 0;
    371   }
    372 
    373   static uptr GetActuallyAllocatedSize(void *p) {
    374     CHECK(PointerIsMine(p));
    375     return SizeClassMap::Size(GetSizeClass(p));
    376   }
    377 
    378   uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
    379 
    380   void *GetMetaData(const void *p) {
    381     uptr class_id = GetSizeClass(p);
    382     uptr size = SizeClassMap::Size(class_id);
    383     uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), size);
    384     return reinterpret_cast<void*>(kSpaceBeg + (kRegionSize * (class_id + 1)) -
    385                                    (1 + chunk_idx) * kMetadataSize);
    386   }
    387 
    388   uptr TotalMemoryUsed() {
    389     uptr res = 0;
    390     for (uptr i = 0; i < kNumClasses; i++)
    391       res += GetRegionInfo(i)->allocated_user;
    392     return res;
    393   }
    394 
    395   // Test-only.
    396   void TestOnlyUnmap() {
    397     UnmapWithCallback(kSpaceBeg, kSpaceSize + AdditionalSize());
    398   }
    399 
    400   void PrintStats() {
    401     uptr total_mapped = 0;
    402     uptr n_allocated = 0;
    403     uptr n_freed = 0;
    404     for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
    405       RegionInfo *region = GetRegionInfo(class_id);
    406       total_mapped += region->mapped_user;
    407       n_allocated += region->n_allocated;
    408       n_freed += region->n_freed;
    409     }
    410     Printf("Stats: SizeClassAllocator64: %zdM mapped in %zd allocations; "
    411            "remains %zd\n",
    412            total_mapped >> 20, n_allocated, n_allocated - n_freed);
    413     for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
    414       RegionInfo *region = GetRegionInfo(class_id);
    415       if (region->mapped_user == 0) continue;
    416       Printf("  %02zd (%zd): total: %zd K allocs: %zd remains: %zd\n",
    417              class_id,
    418              SizeClassMap::Size(class_id),
    419              region->mapped_user >> 10,
    420              region->n_allocated,
    421              region->n_allocated - region->n_freed);
    422     }
    423   }
    424 
    425   // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
    426   // introspection API.
    427   void ForceLock() {
    428     for (uptr i = 0; i < kNumClasses; i++) {
    429       GetRegionInfo(i)->mutex.Lock();
    430     }
    431   }
    432 
    433   void ForceUnlock() {
    434     for (int i = (int)kNumClasses - 1; i >= 0; i--) {
    435       GetRegionInfo(i)->mutex.Unlock();
    436     }
    437   }
    438 
    439   // Iterate over all existing chunks.
    440   // The allocator must be locked when calling this function.
    441   void ForEachChunk(ForEachChunkCallback callback, void *arg) {
    442     for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
    443       RegionInfo *region = GetRegionInfo(class_id);
    444       uptr chunk_size = SizeClassMap::Size(class_id);
    445       uptr region_beg = kSpaceBeg + class_id * kRegionSize;
    446       for (uptr chunk = region_beg;
    447            chunk < region_beg + region->allocated_user;
    448            chunk += chunk_size) {
    449         // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
    450         callback(chunk, arg);
    451       }
    452     }
    453   }
    454 
    455   typedef SizeClassMap SizeClassMapT;
    456   static const uptr kNumClasses = SizeClassMap::kNumClasses;
    457   static const uptr kNumClassesRounded = SizeClassMap::kNumClassesRounded;
    458 
    459  private:
    460   static const uptr kRegionSize = kSpaceSize / kNumClassesRounded;
    461   static const uptr kSpaceEnd = kSpaceBeg + kSpaceSize;
    462   COMPILER_CHECK(kSpaceBeg % kSpaceSize == 0);
    463   // kRegionSize must be >= 2^32.
    464   COMPILER_CHECK((kRegionSize) >= (1ULL << (SANITIZER_WORDSIZE / 2)));
    465   // Populate the free list with at most this number of bytes at once
    466   // or with one element if its size is greater.
    467   static const uptr kPopulateSize = 1 << 14;
    468   // Call mmap for user memory with at least this size.
    469   static const uptr kUserMapSize = 1 << 16;
    470   // Call mmap for metadata memory with at least this size.
    471   static const uptr kMetaMapSize = 1 << 16;
    472 
    473   struct RegionInfo {
    474     BlockingMutex mutex;
    475     LFStack<Batch> free_list;
    476     uptr allocated_user;  // Bytes allocated for user memory.
    477     uptr allocated_meta;  // Bytes allocated for metadata.
    478     uptr mapped_user;  // Bytes mapped for user memory.
    479     uptr mapped_meta;  // Bytes mapped for metadata.
    480     uptr n_allocated, n_freed;  // Just stats.
    481   };
    482   COMPILER_CHECK(sizeof(RegionInfo) >= kCacheLineSize);
    483 
    484   static uptr AdditionalSize() {
    485     return RoundUpTo(sizeof(RegionInfo) * kNumClassesRounded,
    486                      GetPageSizeCached());
    487   }
    488 
    489   RegionInfo *GetRegionInfo(uptr class_id) {
    490     CHECK_LT(class_id, kNumClasses);
    491     RegionInfo *regions = reinterpret_cast<RegionInfo*>(kSpaceBeg + kSpaceSize);
    492     return &regions[class_id];
    493   }
    494 
    495   static uptr GetChunkIdx(uptr chunk, uptr size) {
    496     uptr offset = chunk % kRegionSize;
    497     // Here we divide by a non-constant. This is costly.
    498     // size always fits into 32-bits. If the offset fits too, use 32-bit div.
    499     if (offset >> (SANITIZER_WORDSIZE / 2))
    500       return offset / size;
    501     return (u32)offset / (u32)size;
    502   }
    503 
    504   NOINLINE Batch* PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
    505                                    uptr class_id, RegionInfo *region) {
    506     BlockingMutexLock l(&region->mutex);
    507     Batch *b = region->free_list.Pop();
    508     if (b)
    509       return b;
    510     uptr size = SizeClassMap::Size(class_id);
    511     uptr count = size < kPopulateSize ? SizeClassMap::MaxCached(class_id) : 1;
    512     uptr beg_idx = region->allocated_user;
    513     uptr end_idx = beg_idx + count * size;
    514     uptr region_beg = kSpaceBeg + kRegionSize * class_id;
    515     if (end_idx + size > region->mapped_user) {
    516       // Do the mmap for the user memory.
    517       uptr map_size = kUserMapSize;
    518       while (end_idx + size > region->mapped_user + map_size)
    519         map_size += kUserMapSize;
    520       CHECK_GE(region->mapped_user + map_size, end_idx);
    521       MapWithCallback(region_beg + region->mapped_user, map_size);
    522       stat->Add(AllocatorStatMmapped, map_size);
    523       region->mapped_user += map_size;
    524     }
    525     uptr total_count = (region->mapped_user - beg_idx - size)
    526         / size / count * count;
    527     region->allocated_meta += total_count * kMetadataSize;
    528     if (region->allocated_meta > region->mapped_meta) {
    529       uptr map_size = kMetaMapSize;
    530       while (region->allocated_meta > region->mapped_meta + map_size)
    531         map_size += kMetaMapSize;
    532       // Do the mmap for the metadata.
    533       CHECK_GE(region->mapped_meta + map_size, region->allocated_meta);
    534       MapWithCallback(region_beg + kRegionSize -
    535                       region->mapped_meta - map_size, map_size);
    536       region->mapped_meta += map_size;
    537     }
    538     CHECK_LE(region->allocated_meta, region->mapped_meta);
    539     if (region->mapped_user + region->mapped_meta > kRegionSize) {
    540       Printf("%s: Out of memory. Dying. ", SanitizerToolName);
    541       Printf("The process has exhausted %zuMB for size class %zu.\n",
    542           kRegionSize / 1024 / 1024, size);
    543       Die();
    544     }
    545     for (;;) {
    546       if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
    547         b = (Batch*)c->Allocate(this, SizeClassMap::ClassID(sizeof(Batch)));
    548       else
    549         b = (Batch*)(region_beg + beg_idx);
    550       b->count = count;
    551       for (uptr i = 0; i < count; i++)
    552         b->batch[i] = (void*)(region_beg + beg_idx + i * size);
    553       region->allocated_user += count * size;
    554       CHECK_LE(region->allocated_user, region->mapped_user);
    555       beg_idx += count * size;
    556       if (beg_idx + count * size + size > region->mapped_user)
    557         break;
    558       CHECK_GT(b->count, 0);
    559       region->free_list.Push(b);
    560     }
    561     return b;
    562   }
    563 };
    564 
    565 // Maps integers in rage [0, kSize) to u8 values.
    566 template<u64 kSize>
    567 class FlatByteMap {
    568  public:
    569   void TestOnlyInit() {
    570     internal_memset(map_, 0, sizeof(map_));
    571   }
    572 
    573   void set(uptr idx, u8 val) {
    574     CHECK_LT(idx, kSize);
    575     CHECK_EQ(0U, map_[idx]);
    576     map_[idx] = val;
    577   }
    578   u8 operator[] (uptr idx) {
    579     CHECK_LT(idx, kSize);
    580     // FIXME: CHECK may be too expensive here.
    581     return map_[idx];
    582   }
    583  private:
    584   u8 map_[kSize];
    585 };
    586 
    587 // FIXME: Also implement TwoLevelByteMap.
    588 
    589 // SizeClassAllocator32 -- allocator for 32-bit address space.
    590 // This allocator can theoretically be used on 64-bit arch, but there it is less
    591 // efficient than SizeClassAllocator64.
    592 //
    593 // [kSpaceBeg, kSpaceBeg + kSpaceSize) is the range of addresses which can
    594 // be returned by MmapOrDie().
    595 //
    596 // Region:
    597 //   a result of a single call to MmapAlignedOrDie(kRegionSize, kRegionSize).
    598 // Since the regions are aligned by kRegionSize, there are exactly
    599 // kNumPossibleRegions possible regions in the address space and so we keep
    600 // a ByteMap possible_regions to store the size classes of each Region.
    601 // 0 size class means the region is not used by the allocator.
    602 //
    603 // One Region is used to allocate chunks of a single size class.
    604 // A Region looks like this:
    605 // UserChunk1 .. UserChunkN <gap> MetaChunkN .. MetaChunk1
    606 //
    607 // In order to avoid false sharing the objects of this class should be
    608 // chache-line aligned.
    609 template <const uptr kSpaceBeg, const u64 kSpaceSize,
    610           const uptr kMetadataSize, class SizeClassMap,
    611           const uptr kRegionSizeLog,
    612           class ByteMap,
    613           class MapUnmapCallback = NoOpMapUnmapCallback>
    614 class SizeClassAllocator32 {
    615  public:
    616   typedef typename SizeClassMap::TransferBatch Batch;
    617   typedef SizeClassAllocator32<kSpaceBeg, kSpaceSize, kMetadataSize,
    618       SizeClassMap, kRegionSizeLog, ByteMap, MapUnmapCallback> ThisT;
    619   typedef SizeClassAllocatorLocalCache<ThisT> AllocatorCache;
    620 
    621   void Init() {
    622     possible_regions.TestOnlyInit();
    623     internal_memset(size_class_info_array, 0, sizeof(size_class_info_array));
    624   }
    625 
    626   void *MapWithCallback(uptr size) {
    627     size = RoundUpTo(size, GetPageSizeCached());
    628     void *res = MmapOrDie(size, "SizeClassAllocator32");
    629     MapUnmapCallback().OnMap((uptr)res, size);
    630     return res;
    631   }
    632 
    633   void UnmapWithCallback(uptr beg, uptr size) {
    634     MapUnmapCallback().OnUnmap(beg, size);
    635     UnmapOrDie(reinterpret_cast<void *>(beg), size);
    636   }
    637 
    638   static bool CanAllocate(uptr size, uptr alignment) {
    639     return size <= SizeClassMap::kMaxSize &&
    640       alignment <= SizeClassMap::kMaxSize;
    641   }
    642 
    643   void *GetMetaData(const void *p) {
    644     CHECK(PointerIsMine(p));
    645     uptr mem = reinterpret_cast<uptr>(p);
    646     uptr beg = ComputeRegionBeg(mem);
    647     uptr size = SizeClassMap::Size(GetSizeClass(p));
    648     u32 offset = mem - beg;
    649     uptr n = offset / (u32)size;  // 32-bit division
    650     uptr meta = (beg + kRegionSize) - (n + 1) * kMetadataSize;
    651     return reinterpret_cast<void*>(meta);
    652   }
    653 
    654   NOINLINE Batch* AllocateBatch(AllocatorStats *stat, AllocatorCache *c,
    655                                 uptr class_id) {
    656     CHECK_LT(class_id, kNumClasses);
    657     SizeClassInfo *sci = GetSizeClassInfo(class_id);
    658     SpinMutexLock l(&sci->mutex);
    659     if (sci->free_list.empty())
    660       PopulateFreeList(stat, c, sci, class_id);
    661     CHECK(!sci->free_list.empty());
    662     Batch *b = sci->free_list.front();
    663     sci->free_list.pop_front();
    664     return b;
    665   }
    666 
    667   NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id, Batch *b) {
    668     CHECK_LT(class_id, kNumClasses);
    669     SizeClassInfo *sci = GetSizeClassInfo(class_id);
    670     SpinMutexLock l(&sci->mutex);
    671     CHECK_GT(b->count, 0);
    672     sci->free_list.push_front(b);
    673   }
    674 
    675   bool PointerIsMine(const void *p) {
    676     return GetSizeClass(p) != 0;
    677   }
    678 
    679   uptr GetSizeClass(const void *p) {
    680     return possible_regions[ComputeRegionId(reinterpret_cast<uptr>(p))];
    681   }
    682 
    683   void *GetBlockBegin(const void *p) {
    684     CHECK(PointerIsMine(p));
    685     uptr mem = reinterpret_cast<uptr>(p);
    686     uptr beg = ComputeRegionBeg(mem);
    687     uptr size = SizeClassMap::Size(GetSizeClass(p));
    688     u32 offset = mem - beg;
    689     u32 n = offset / (u32)size;  // 32-bit division
    690     uptr res = beg + (n * (u32)size);
    691     return reinterpret_cast<void*>(res);
    692   }
    693 
    694   uptr GetActuallyAllocatedSize(void *p) {
    695     CHECK(PointerIsMine(p));
    696     return SizeClassMap::Size(GetSizeClass(p));
    697   }
    698 
    699   uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
    700 
    701   uptr TotalMemoryUsed() {
    702     // No need to lock here.
    703     uptr res = 0;
    704     for (uptr i = 0; i < kNumPossibleRegions; i++)
    705       if (possible_regions[i])
    706         res += kRegionSize;
    707     return res;
    708   }
    709 
    710   void TestOnlyUnmap() {
    711     for (uptr i = 0; i < kNumPossibleRegions; i++)
    712       if (possible_regions[i])
    713         UnmapWithCallback((i * kRegionSize), kRegionSize);
    714   }
    715 
    716   // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
    717   // introspection API.
    718   void ForceLock() {
    719     for (uptr i = 0; i < kNumClasses; i++) {
    720       GetSizeClassInfo(i)->mutex.Lock();
    721     }
    722   }
    723 
    724   void ForceUnlock() {
    725     for (int i = kNumClasses - 1; i >= 0; i--) {
    726       GetSizeClassInfo(i)->mutex.Unlock();
    727     }
    728   }
    729 
    730   // Iterate over all existing chunks.
    731   // The allocator must be locked when calling this function.
    732   void ForEachChunk(ForEachChunkCallback callback, void *arg) {
    733     for (uptr region = 0; region < kNumPossibleRegions; region++)
    734       if (possible_regions[region]) {
    735         uptr chunk_size = SizeClassMap::Size(possible_regions[region]);
    736         uptr max_chunks_in_region = kRegionSize / (chunk_size + kMetadataSize);
    737         uptr region_beg = region * kRegionSize;
    738         for (uptr chunk = region_beg;
    739              chunk < region_beg + max_chunks_in_region * chunk_size;
    740              chunk += chunk_size) {
    741           // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
    742           callback(chunk, arg);
    743         }
    744       }
    745   }
    746 
    747   void PrintStats() {
    748   }
    749 
    750   typedef SizeClassMap SizeClassMapT;
    751   static const uptr kNumClasses = SizeClassMap::kNumClasses;
    752 
    753  private:
    754   static const uptr kRegionSize = 1 << kRegionSizeLog;
    755   static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize;
    756 
    757   struct SizeClassInfo {
    758     SpinMutex mutex;
    759     IntrusiveList<Batch> free_list;
    760     char padding[kCacheLineSize - sizeof(uptr) - sizeof(IntrusiveList<Batch>)];
    761   };
    762   COMPILER_CHECK(sizeof(SizeClassInfo) == kCacheLineSize);
    763 
    764   uptr ComputeRegionId(uptr mem) {
    765     uptr res = mem >> kRegionSizeLog;
    766     CHECK_LT(res, kNumPossibleRegions);
    767     return res;
    768   }
    769 
    770   uptr ComputeRegionBeg(uptr mem) {
    771     return mem & ~(kRegionSize - 1);
    772   }
    773 
    774   uptr AllocateRegion(AllocatorStats *stat, uptr class_id) {
    775     CHECK_LT(class_id, kNumClasses);
    776     uptr res = reinterpret_cast<uptr>(MmapAlignedOrDie(kRegionSize, kRegionSize,
    777                                       "SizeClassAllocator32"));
    778     MapUnmapCallback().OnMap(res, kRegionSize);
    779     stat->Add(AllocatorStatMmapped, kRegionSize);
    780     CHECK_EQ(0U, (res & (kRegionSize - 1)));
    781     possible_regions.set(ComputeRegionId(res), static_cast<u8>(class_id));
    782     return res;
    783   }
    784 
    785   SizeClassInfo *GetSizeClassInfo(uptr class_id) {
    786     CHECK_LT(class_id, kNumClasses);
    787     return &size_class_info_array[class_id];
    788   }
    789 
    790   void PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
    791                         SizeClassInfo *sci, uptr class_id) {
    792     uptr size = SizeClassMap::Size(class_id);
    793     uptr reg = AllocateRegion(stat, class_id);
    794     uptr n_chunks = kRegionSize / (size + kMetadataSize);
    795     uptr max_count = SizeClassMap::MaxCached(class_id);
    796     Batch *b = 0;
    797     for (uptr i = reg; i < reg + n_chunks * size; i += size) {
    798       if (b == 0) {
    799         if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
    800           b = (Batch*)c->Allocate(this, SizeClassMap::ClassID(sizeof(Batch)));
    801         else
    802           b = (Batch*)i;
    803         b->count = 0;
    804       }
    805       b->batch[b->count++] = (void*)i;
    806       if (b->count == max_count) {
    807         CHECK_GT(b->count, 0);
    808         sci->free_list.push_back(b);
    809         b = 0;
    810       }
    811     }
    812     if (b) {
    813       CHECK_GT(b->count, 0);
    814       sci->free_list.push_back(b);
    815     }
    816   }
    817 
    818   ByteMap possible_regions;
    819   SizeClassInfo size_class_info_array[kNumClasses];
    820 };
    821 
    822 // Objects of this type should be used as local caches for SizeClassAllocator64
    823 // or SizeClassAllocator32. Since the typical use of this class is to have one
    824 // object per thread in TLS, is has to be POD.
    825 template<class SizeClassAllocator>
    826 struct SizeClassAllocatorLocalCache {
    827   typedef SizeClassAllocator Allocator;
    828   static const uptr kNumClasses = SizeClassAllocator::kNumClasses;
    829 
    830   void Init(AllocatorGlobalStats *s) {
    831     stats_.Init();
    832     if (s)
    833       s->Register(&stats_);
    834   }
    835 
    836   void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
    837     Drain(allocator);
    838     if (s)
    839       s->Unregister(&stats_);
    840   }
    841 
    842   void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
    843     CHECK_NE(class_id, 0UL);
    844     CHECK_LT(class_id, kNumClasses);
    845     stats_.Add(AllocatorStatMalloced, SizeClassMap::Size(class_id));
    846     PerClass *c = &per_class_[class_id];
    847     if (UNLIKELY(c->count == 0))
    848       Refill(allocator, class_id);
    849     void *res = c->batch[--c->count];
    850     PREFETCH(c->batch[c->count - 1]);
    851     return res;
    852   }
    853 
    854   void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
    855     CHECK_NE(class_id, 0UL);
    856     CHECK_LT(class_id, kNumClasses);
    857     // If the first allocator call on a new thread is a deallocation, then
    858     // max_count will be zero, leading to check failure.
    859     InitCache();
    860     stats_.Add(AllocatorStatFreed, SizeClassMap::Size(class_id));
    861     PerClass *c = &per_class_[class_id];
    862     CHECK_NE(c->max_count, 0UL);
    863     if (UNLIKELY(c->count == c->max_count))
    864       Drain(allocator, class_id);
    865     c->batch[c->count++] = p;
    866   }
    867 
    868   void Drain(SizeClassAllocator *allocator) {
    869     for (uptr class_id = 0; class_id < kNumClasses; class_id++) {
    870       PerClass *c = &per_class_[class_id];
    871       while (c->count > 0)
    872         Drain(allocator, class_id);
    873     }
    874   }
    875 
    876   // private:
    877   typedef typename SizeClassAllocator::SizeClassMapT SizeClassMap;
    878   typedef typename SizeClassMap::TransferBatch Batch;
    879   struct PerClass {
    880     uptr count;
    881     uptr max_count;
    882     void *batch[2 * SizeClassMap::kMaxNumCached];
    883   };
    884   PerClass per_class_[kNumClasses];
    885   AllocatorStats stats_;
    886 
    887   void InitCache() {
    888     if (per_class_[1].max_count)
    889       return;
    890     for (uptr i = 0; i < kNumClasses; i++) {
    891       PerClass *c = &per_class_[i];
    892       c->max_count = 2 * SizeClassMap::MaxCached(i);
    893     }
    894   }
    895 
    896   NOINLINE void Refill(SizeClassAllocator *allocator, uptr class_id) {
    897     InitCache();
    898     PerClass *c = &per_class_[class_id];
    899     Batch *b = allocator->AllocateBatch(&stats_, this, class_id);
    900     CHECK_GT(b->count, 0);
    901     for (uptr i = 0; i < b->count; i++)
    902       c->batch[i] = b->batch[i];
    903     c->count = b->count;
    904     if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
    905       Deallocate(allocator, SizeClassMap::ClassID(sizeof(Batch)), b);
    906   }
    907 
    908   NOINLINE void Drain(SizeClassAllocator *allocator, uptr class_id) {
    909     InitCache();
    910     PerClass *c = &per_class_[class_id];
    911     Batch *b;
    912     if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
    913       b = (Batch*)Allocate(allocator, SizeClassMap::ClassID(sizeof(Batch)));
    914     else
    915       b = (Batch*)c->batch[0];
    916     uptr cnt = Min(c->max_count / 2, c->count);
    917     for (uptr i = 0; i < cnt; i++) {
    918       b->batch[i] = c->batch[i];
    919       c->batch[i] = c->batch[i + c->max_count / 2];
    920     }
    921     b->count = cnt;
    922     c->count -= cnt;
    923     CHECK_GT(b->count, 0);
    924     allocator->DeallocateBatch(&stats_, class_id, b);
    925   }
    926 };
    927 
    928 // This class can (de)allocate only large chunks of memory using mmap/unmap.
    929 // The main purpose of this allocator is to cover large and rare allocation
    930 // sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
    931 template <class MapUnmapCallback = NoOpMapUnmapCallback>
    932 class LargeMmapAllocator {
    933  public:
    934   void Init() {
    935     internal_memset(this, 0, sizeof(*this));
    936     page_size_ = GetPageSizeCached();
    937   }
    938 
    939   void *Allocate(AllocatorStats *stat, uptr size, uptr alignment) {
    940     CHECK(IsPowerOfTwo(alignment));
    941     uptr map_size = RoundUpMapSize(size);
    942     if (alignment > page_size_)
    943       map_size += alignment;
    944     if (map_size < size) return 0;  // Overflow.
    945     uptr map_beg = reinterpret_cast<uptr>(
    946         MmapOrDie(map_size, "LargeMmapAllocator"));
    947     MapUnmapCallback().OnMap(map_beg, map_size);
    948     uptr map_end = map_beg + map_size;
    949     uptr res = map_beg + page_size_;
    950     if (res & (alignment - 1))  // Align.
    951       res += alignment - (res & (alignment - 1));
    952     CHECK_EQ(0, res & (alignment - 1));
    953     CHECK_LE(res + size, map_end);
    954     Header *h = GetHeader(res);
    955     h->size = size;
    956     h->map_beg = map_beg;
    957     h->map_size = map_size;
    958     uptr size_log = MostSignificantSetBitIndex(map_size);
    959     CHECK_LT(size_log, ARRAY_SIZE(stats.by_size_log));
    960     {
    961       SpinMutexLock l(&mutex_);
    962       uptr idx = n_chunks_++;
    963       chunks_sorted_ = false;
    964       CHECK_LT(idx, kMaxNumChunks);
    965       h->chunk_idx = idx;
    966       chunks_[idx] = h;
    967       stats.n_allocs++;
    968       stats.currently_allocated += map_size;
    969       stats.max_allocated = Max(stats.max_allocated, stats.currently_allocated);
    970       stats.by_size_log[size_log]++;
    971       stat->Add(AllocatorStatMalloced, map_size);
    972       stat->Add(AllocatorStatMmapped, map_size);
    973     }
    974     return reinterpret_cast<void*>(res);
    975   }
    976 
    977   void Deallocate(AllocatorStats *stat, void *p) {
    978     Header *h = GetHeader(p);
    979     {
    980       SpinMutexLock l(&mutex_);
    981       uptr idx = h->chunk_idx;
    982       CHECK_EQ(chunks_[idx], h);
    983       CHECK_LT(idx, n_chunks_);
    984       chunks_[idx] = chunks_[n_chunks_ - 1];
    985       chunks_[idx]->chunk_idx = idx;
    986       n_chunks_--;
    987       chunks_sorted_ = false;
    988       stats.n_frees++;
    989       stats.currently_allocated -= h->map_size;
    990       stat->Add(AllocatorStatFreed, h->map_size);
    991       stat->Add(AllocatorStatUnmapped, h->map_size);
    992     }
    993     MapUnmapCallback().OnUnmap(h->map_beg, h->map_size);
    994     UnmapOrDie(reinterpret_cast<void*>(h->map_beg), h->map_size);
    995   }
    996 
    997   uptr TotalMemoryUsed() {
    998     SpinMutexLock l(&mutex_);
    999     uptr res = 0;
   1000     for (uptr i = 0; i < n_chunks_; i++) {
   1001       Header *h = chunks_[i];
   1002       CHECK_EQ(h->chunk_idx, i);
   1003       res += RoundUpMapSize(h->size);
   1004     }
   1005     return res;
   1006   }
   1007 
   1008   bool PointerIsMine(const void *p) {
   1009     return GetBlockBegin(p) != 0;
   1010   }
   1011 
   1012   uptr GetActuallyAllocatedSize(void *p) {
   1013     return RoundUpTo(GetHeader(p)->size, page_size_);
   1014   }
   1015 
   1016   // At least page_size_/2 metadata bytes is available.
   1017   void *GetMetaData(const void *p) {
   1018     // Too slow: CHECK_EQ(p, GetBlockBegin(p));
   1019     CHECK(IsAligned(reinterpret_cast<uptr>(p), page_size_));
   1020     return GetHeader(p) + 1;
   1021   }
   1022 
   1023   void *GetBlockBegin(const void *ptr) {
   1024     uptr p = reinterpret_cast<uptr>(ptr);
   1025     SpinMutexLock l(&mutex_);
   1026     uptr nearest_chunk = 0;
   1027     // Cache-friendly linear search.
   1028     for (uptr i = 0; i < n_chunks_; i++) {
   1029       uptr ch = reinterpret_cast<uptr>(chunks_[i]);
   1030       if (p < ch) continue;  // p is at left to this chunk, skip it.
   1031       if (p - ch < p - nearest_chunk)
   1032         nearest_chunk = ch;
   1033     }
   1034     if (!nearest_chunk)
   1035       return 0;
   1036     Header *h = reinterpret_cast<Header *>(nearest_chunk);
   1037     CHECK_GE(nearest_chunk, h->map_beg);
   1038     CHECK_LT(nearest_chunk, h->map_beg + h->map_size);
   1039     CHECK_LE(nearest_chunk, p);
   1040     if (h->map_beg + h->map_size <= p)
   1041       return 0;
   1042     return GetUser(h);
   1043   }
   1044 
   1045   // This function does the same as GetBlockBegin, but is much faster.
   1046   // Must be called with the allocator locked.
   1047   void *GetBlockBeginFastLocked(void *ptr) {
   1048     uptr p = reinterpret_cast<uptr>(ptr);
   1049     uptr n = n_chunks_;
   1050     if (!n) return 0;
   1051     if (!chunks_sorted_) {
   1052       // Do one-time sort. chunks_sorted_ is reset in Allocate/Deallocate.
   1053       SortArray(reinterpret_cast<uptr*>(chunks_), n);
   1054       for (uptr i = 0; i < n; i++)
   1055         chunks_[i]->chunk_idx = i;
   1056       chunks_sorted_ = true;
   1057       min_mmap_ = reinterpret_cast<uptr>(chunks_[0]);
   1058       max_mmap_ = reinterpret_cast<uptr>(chunks_[n - 1]) +
   1059           chunks_[n - 1]->map_size;
   1060     }
   1061     if (p < min_mmap_ || p >= max_mmap_)
   1062       return 0;
   1063     uptr beg = 0, end = n - 1;
   1064     // This loop is a log(n) lower_bound. It does not check for the exact match
   1065     // to avoid expensive cache-thrashing loads.
   1066     while (end - beg >= 2) {
   1067       uptr mid = (beg + end) / 2;  // Invariant: mid >= beg + 1
   1068       if (p < reinterpret_cast<uptr>(chunks_[mid]))
   1069         end = mid - 1;  // We are not interested in chunks_[mid].
   1070       else
   1071         beg = mid;  // chunks_[mid] may still be what we want.
   1072     }
   1073 
   1074     if (beg < end) {
   1075       CHECK_EQ(beg + 1, end);
   1076       // There are 2 chunks left, choose one.
   1077       if (p >= reinterpret_cast<uptr>(chunks_[end]))
   1078         beg = end;
   1079     }
   1080 
   1081     Header *h = chunks_[beg];
   1082     if (h->map_beg + h->map_size <= p || p < h->map_beg)
   1083       return 0;
   1084     return GetUser(h);
   1085   }
   1086 
   1087   void PrintStats() {
   1088     Printf("Stats: LargeMmapAllocator: allocated %zd times, "
   1089            "remains %zd (%zd K) max %zd M; by size logs: ",
   1090            stats.n_allocs, stats.n_allocs - stats.n_frees,
   1091            stats.currently_allocated >> 10, stats.max_allocated >> 20);
   1092     for (uptr i = 0; i < ARRAY_SIZE(stats.by_size_log); i++) {
   1093       uptr c = stats.by_size_log[i];
   1094       if (!c) continue;
   1095       Printf("%zd:%zd; ", i, c);
   1096     }
   1097     Printf("\n");
   1098   }
   1099 
   1100   // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
   1101   // introspection API.
   1102   void ForceLock() {
   1103     mutex_.Lock();
   1104   }
   1105 
   1106   void ForceUnlock() {
   1107     mutex_.Unlock();
   1108   }
   1109 
   1110   // Iterate over all existing chunks.
   1111   // The allocator must be locked when calling this function.
   1112   void ForEachChunk(ForEachChunkCallback callback, void *arg) {
   1113     for (uptr i = 0; i < n_chunks_; i++)
   1114       callback(reinterpret_cast<uptr>(GetUser(chunks_[i])), arg);
   1115   }
   1116 
   1117  private:
   1118   static const int kMaxNumChunks = 1 << FIRST_32_SECOND_64(15, 18);
   1119   struct Header {
   1120     uptr map_beg;
   1121     uptr map_size;
   1122     uptr size;
   1123     uptr chunk_idx;
   1124   };
   1125 
   1126   Header *GetHeader(uptr p) {
   1127     CHECK(IsAligned(p, page_size_));
   1128     return reinterpret_cast<Header*>(p - page_size_);
   1129   }
   1130   Header *GetHeader(const void *p) {
   1131     return GetHeader(reinterpret_cast<uptr>(p));
   1132   }
   1133 
   1134   void *GetUser(Header *h) {
   1135     CHECK(IsAligned((uptr)h, page_size_));
   1136     return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + page_size_);
   1137   }
   1138 
   1139   uptr RoundUpMapSize(uptr size) {
   1140     return RoundUpTo(size, page_size_) + page_size_;
   1141   }
   1142 
   1143   uptr page_size_;
   1144   Header *chunks_[kMaxNumChunks];
   1145   uptr n_chunks_;
   1146   uptr min_mmap_, max_mmap_;
   1147   bool chunks_sorted_;
   1148   struct Stats {
   1149     uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64];
   1150   } stats;
   1151   SpinMutex mutex_;
   1152 };
   1153 
   1154 // This class implements a complete memory allocator by using two
   1155 // internal allocators:
   1156 // PrimaryAllocator is efficient, but may not allocate some sizes (alignments).
   1157 //  When allocating 2^x bytes it should return 2^x aligned chunk.
   1158 // PrimaryAllocator is used via a local AllocatorCache.
   1159 // SecondaryAllocator can allocate anything, but is not efficient.
   1160 template <class PrimaryAllocator, class AllocatorCache,
   1161           class SecondaryAllocator>  // NOLINT
   1162 class CombinedAllocator {
   1163  public:
   1164   void Init() {
   1165     primary_.Init();
   1166     secondary_.Init();
   1167     stats_.Init();
   1168   }
   1169 
   1170   void *Allocate(AllocatorCache *cache, uptr size, uptr alignment,
   1171                  bool cleared = false) {
   1172     // Returning 0 on malloc(0) may break a lot of code.
   1173     if (size == 0)
   1174       size = 1;
   1175     if (size + alignment < size)
   1176       return 0;
   1177     if (alignment > 8)
   1178       size = RoundUpTo(size, alignment);
   1179     void *res;
   1180     if (primary_.CanAllocate(size, alignment))
   1181       res = cache->Allocate(&primary_, primary_.ClassID(size));
   1182     else
   1183       res = secondary_.Allocate(&stats_, size, alignment);
   1184     if (alignment > 8)
   1185       CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
   1186     if (cleared && res)
   1187       internal_memset(res, 0, size);
   1188     return res;
   1189   }
   1190 
   1191   void Deallocate(AllocatorCache *cache, void *p) {
   1192     if (!p) return;
   1193     if (primary_.PointerIsMine(p))
   1194       cache->Deallocate(&primary_, primary_.GetSizeClass(p), p);
   1195     else
   1196       secondary_.Deallocate(&stats_, p);
   1197   }
   1198 
   1199   void *Reallocate(AllocatorCache *cache, void *p, uptr new_size,
   1200                    uptr alignment) {
   1201     if (!p)
   1202       return Allocate(cache, new_size, alignment);
   1203     if (!new_size) {
   1204       Deallocate(cache, p);
   1205       return 0;
   1206     }
   1207     CHECK(PointerIsMine(p));
   1208     uptr old_size = GetActuallyAllocatedSize(p);
   1209     uptr memcpy_size = Min(new_size, old_size);
   1210     void *new_p = Allocate(cache, new_size, alignment);
   1211     if (new_p)
   1212       internal_memcpy(new_p, p, memcpy_size);
   1213     Deallocate(cache, p);
   1214     return new_p;
   1215   }
   1216 
   1217   bool PointerIsMine(void *p) {
   1218     if (primary_.PointerIsMine(p))
   1219       return true;
   1220     return secondary_.PointerIsMine(p);
   1221   }
   1222 
   1223   bool FromPrimary(void *p) {
   1224     return primary_.PointerIsMine(p);
   1225   }
   1226 
   1227   void *GetMetaData(const void *p) {
   1228     if (primary_.PointerIsMine(p))
   1229       return primary_.GetMetaData(p);
   1230     return secondary_.GetMetaData(p);
   1231   }
   1232 
   1233   void *GetBlockBegin(const void *p) {
   1234     if (primary_.PointerIsMine(p))
   1235       return primary_.GetBlockBegin(p);
   1236     return secondary_.GetBlockBegin(p);
   1237   }
   1238 
   1239   // This function does the same as GetBlockBegin, but is much faster.
   1240   // Must be called with the allocator locked.
   1241   void *GetBlockBeginFastLocked(void *p) {
   1242     if (primary_.PointerIsMine(p))
   1243       return primary_.GetBlockBegin(p);
   1244     return secondary_.GetBlockBeginFastLocked(p);
   1245   }
   1246 
   1247   uptr GetActuallyAllocatedSize(void *p) {
   1248     if (primary_.PointerIsMine(p))
   1249       return primary_.GetActuallyAllocatedSize(p);
   1250     return secondary_.GetActuallyAllocatedSize(p);
   1251   }
   1252 
   1253   uptr TotalMemoryUsed() {
   1254     return primary_.TotalMemoryUsed() + secondary_.TotalMemoryUsed();
   1255   }
   1256 
   1257   void TestOnlyUnmap() { primary_.TestOnlyUnmap(); }
   1258 
   1259   void InitCache(AllocatorCache *cache) {
   1260     cache->Init(&stats_);
   1261   }
   1262 
   1263   void DestroyCache(AllocatorCache *cache) {
   1264     cache->Destroy(&primary_, &stats_);
   1265   }
   1266 
   1267   void SwallowCache(AllocatorCache *cache) {
   1268     cache->Drain(&primary_);
   1269   }
   1270 
   1271   void GetStats(AllocatorStatCounters s) const {
   1272     stats_.Get(s);
   1273   }
   1274 
   1275   void PrintStats() {
   1276     primary_.PrintStats();
   1277     secondary_.PrintStats();
   1278   }
   1279 
   1280   // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
   1281   // introspection API.
   1282   void ForceLock() {
   1283     primary_.ForceLock();
   1284     secondary_.ForceLock();
   1285   }
   1286 
   1287   void ForceUnlock() {
   1288     secondary_.ForceUnlock();
   1289     primary_.ForceUnlock();
   1290   }
   1291 
   1292   // Iterate over all existing chunks.
   1293   // The allocator must be locked when calling this function.
   1294   void ForEachChunk(ForEachChunkCallback callback, void *arg) {
   1295     primary_.ForEachChunk(callback, arg);
   1296     secondary_.ForEachChunk(callback, arg);
   1297   }
   1298 
   1299  private:
   1300   PrimaryAllocator primary_;
   1301   SecondaryAllocator secondary_;
   1302   AllocatorGlobalStats stats_;
   1303 };
   1304 
   1305 // Returns true if calloc(size, n) should return 0 due to overflow in size*n.
   1306 bool CallocShouldReturnNullDueToOverflow(uptr size, uptr n);
   1307 
   1308 }  // namespace __sanitizer
   1309 
   1310 #endif  // SANITIZER_ALLOCATOR_H
   1311 
   1312