Home | History | Annotate | Download | only in sanitizer_common
      1 //===-- sanitizer_allocator.h -----------------------------------*- C++ -*-===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // Specialized memory allocator for ThreadSanitizer, MemorySanitizer, etc.
     11 //
     12 //===----------------------------------------------------------------------===//
     13 
     14 #ifndef SANITIZER_ALLOCATOR_H
     15 #define SANITIZER_ALLOCATOR_H
     16 
     17 #include "sanitizer_internal_defs.h"
     18 #include "sanitizer_common.h"
     19 #include "sanitizer_libc.h"
     20 #include "sanitizer_list.h"
     21 #include "sanitizer_mutex.h"
     22 #include "sanitizer_lfstack.h"
     23 
     24 namespace __sanitizer {
     25 
     26 // Prints error message and kills the program.
     27 void NORETURN ReportAllocatorCannotReturnNull();
     28 
     29 // SizeClassMap maps allocation sizes into size classes and back.
     30 // Class 0 corresponds to size 0.
     31 // Classes 1 - 16 correspond to sizes 16 to 256 (size = class_id * 16).
     32 // Next 4 classes: 256 + i * 64  (i = 1 to 4).
     33 // Next 4 classes: 512 + i * 128 (i = 1 to 4).
     34 // ...
     35 // Next 4 classes: 2^k + i * 2^(k-2) (i = 1 to 4).
     36 // Last class corresponds to kMaxSize = 1 << kMaxSizeLog.
     37 //
     38 // This structure of the size class map gives us:
     39 //   - Efficient table-free class-to-size and size-to-class functions.
     40 //   - Difference between two consequent size classes is betweed 14% and 25%
     41 //
     42 // This class also gives a hint to a thread-caching allocator about the amount
     43 // of chunks that need to be cached per-thread:
     44 //  - kMaxNumCached is the maximal number of chunks per size class.
     45 //  - (1 << kMaxBytesCachedLog) is the maximal number of bytes per size class.
     46 //
     47 // Part of output of SizeClassMap::Print():
     48 // c00 => s: 0 diff: +0 00% l 0 cached: 0 0; id 0
     49 // c01 => s: 16 diff: +16 00% l 4 cached: 256 4096; id 1
     50 // c02 => s: 32 diff: +16 100% l 5 cached: 256 8192; id 2
     51 // c03 => s: 48 diff: +16 50% l 5 cached: 256 12288; id 3
     52 // c04 => s: 64 diff: +16 33% l 6 cached: 256 16384; id 4
     53 // c05 => s: 80 diff: +16 25% l 6 cached: 256 20480; id 5
     54 // c06 => s: 96 diff: +16 20% l 6 cached: 256 24576; id 6
     55 // c07 => s: 112 diff: +16 16% l 6 cached: 256 28672; id 7
     56 //
     57 // c08 => s: 128 diff: +16 14% l 7 cached: 256 32768; id 8
     58 // c09 => s: 144 diff: +16 12% l 7 cached: 256 36864; id 9
     59 // c10 => s: 160 diff: +16 11% l 7 cached: 256 40960; id 10
     60 // c11 => s: 176 diff: +16 10% l 7 cached: 256 45056; id 11
     61 // c12 => s: 192 diff: +16 09% l 7 cached: 256 49152; id 12
     62 // c13 => s: 208 diff: +16 08% l 7 cached: 256 53248; id 13
     63 // c14 => s: 224 diff: +16 07% l 7 cached: 256 57344; id 14
     64 // c15 => s: 240 diff: +16 07% l 7 cached: 256 61440; id 15
     65 //
     66 // c16 => s: 256 diff: +16 06% l 8 cached: 256 65536; id 16
     67 // c17 => s: 320 diff: +64 25% l 8 cached: 204 65280; id 17
     68 // c18 => s: 384 diff: +64 20% l 8 cached: 170 65280; id 18
     69 // c19 => s: 448 diff: +64 16% l 8 cached: 146 65408; id 19
     70 //
     71 // c20 => s: 512 diff: +64 14% l 9 cached: 128 65536; id 20
     72 // c21 => s: 640 diff: +128 25% l 9 cached: 102 65280; id 21
     73 // c22 => s: 768 diff: +128 20% l 9 cached: 85 65280; id 22
     74 // c23 => s: 896 diff: +128 16% l 9 cached: 73 65408; id 23
     75 //
     76 // c24 => s: 1024 diff: +128 14% l 10 cached: 64 65536; id 24
     77 // c25 => s: 1280 diff: +256 25% l 10 cached: 51 65280; id 25
     78 // c26 => s: 1536 diff: +256 20% l 10 cached: 42 64512; id 26
     79 // c27 => s: 1792 diff: +256 16% l 10 cached: 36 64512; id 27
     80 //
     81 // ...
     82 //
     83 // c48 => s: 65536 diff: +8192 14% l 16 cached: 1 65536; id 48
     84 // c49 => s: 81920 diff: +16384 25% l 16 cached: 1 81920; id 49
     85 // c50 => s: 98304 diff: +16384 20% l 16 cached: 1 98304; id 50
     86 // c51 => s: 114688 diff: +16384 16% l 16 cached: 1 114688; id 51
     87 //
     88 // c52 => s: 131072 diff: +16384 14% l 17 cached: 1 131072; id 52
     89 
     90 template <uptr kMaxSizeLog, uptr kMaxNumCachedT, uptr kMaxBytesCachedLog>
     91 class SizeClassMap {
     92   static const uptr kMinSizeLog = 4;
     93   static const uptr kMidSizeLog = kMinSizeLog + 4;
     94   static const uptr kMinSize = 1 << kMinSizeLog;
     95   static const uptr kMidSize = 1 << kMidSizeLog;
     96   static const uptr kMidClass = kMidSize / kMinSize;
     97   static const uptr S = 2;
     98   static const uptr M = (1 << S) - 1;
     99 
    100  public:
    101   static const uptr kMaxNumCached = kMaxNumCachedT;
    102   // We transfer chunks between central and thread-local free lists in batches.
    103   // For small size classes we allocate batches separately.
    104   // For large size classes we use one of the chunks to store the batch.
    105   struct TransferBatch {
    106     TransferBatch *next;
    107     uptr count;
    108     void *batch[kMaxNumCached];
    109   };
    110 
    111   static const uptr kMaxSize = 1UL << kMaxSizeLog;
    112   static const uptr kNumClasses =
    113       kMidClass + ((kMaxSizeLog - kMidSizeLog) << S) + 1;
    114   COMPILER_CHECK(kNumClasses >= 32 && kNumClasses <= 256);
    115   static const uptr kNumClassesRounded =
    116       kNumClasses == 32  ? 32 :
    117       kNumClasses <= 64  ? 64 :
    118       kNumClasses <= 128 ? 128 : 256;
    119 
    120   static uptr Size(uptr class_id) {
    121     if (class_id <= kMidClass)
    122       return kMinSize * class_id;
    123     class_id -= kMidClass;
    124     uptr t = kMidSize << (class_id >> S);
    125     return t + (t >> S) * (class_id & M);
    126   }
    127 
    128   static uptr ClassID(uptr size) {
    129     if (size <= kMidSize)
    130       return (size + kMinSize - 1) >> kMinSizeLog;
    131     if (size > kMaxSize) return 0;
    132     uptr l = MostSignificantSetBitIndex(size);
    133     uptr hbits = (size >> (l - S)) & M;
    134     uptr lbits = size & ((1 << (l - S)) - 1);
    135     uptr l1 = l - kMidSizeLog;
    136     return kMidClass + (l1 << S) + hbits + (lbits > 0);
    137   }
    138 
    139   static uptr MaxCached(uptr class_id) {
    140     if (class_id == 0) return 0;
    141     uptr n = (1UL << kMaxBytesCachedLog) / Size(class_id);
    142     return Max<uptr>(1, Min(kMaxNumCached, n));
    143   }
    144 
    145   static void Print() {
    146     uptr prev_s = 0;
    147     uptr total_cached = 0;
    148     for (uptr i = 0; i < kNumClasses; i++) {
    149       uptr s = Size(i);
    150       if (s >= kMidSize / 2 && (s & (s - 1)) == 0)
    151         Printf("\n");
    152       uptr d = s - prev_s;
    153       uptr p = prev_s ? (d * 100 / prev_s) : 0;
    154       uptr l = s ? MostSignificantSetBitIndex(s) : 0;
    155       uptr cached = MaxCached(i) * s;
    156       Printf("c%02zd => s: %zd diff: +%zd %02zd%% l %zd "
    157              "cached: %zd %zd; id %zd\n",
    158              i, Size(i), d, p, l, MaxCached(i), cached, ClassID(s));
    159       total_cached += cached;
    160       prev_s = s;
    161     }
    162     Printf("Total cached: %zd\n", total_cached);
    163   }
    164 
    165   static bool SizeClassRequiresSeparateTransferBatch(uptr class_id) {
    166     return Size(class_id) < sizeof(TransferBatch) -
    167         sizeof(uptr) * (kMaxNumCached - MaxCached(class_id));
    168   }
    169 
    170   static void Validate() {
    171     for (uptr c = 1; c < kNumClasses; c++) {
    172       // Printf("Validate: c%zd\n", c);
    173       uptr s = Size(c);
    174       CHECK_NE(s, 0U);
    175       CHECK_EQ(ClassID(s), c);
    176       if (c != kNumClasses - 1)
    177         CHECK_EQ(ClassID(s + 1), c + 1);
    178       CHECK_EQ(ClassID(s - 1), c);
    179       if (c)
    180         CHECK_GT(Size(c), Size(c-1));
    181     }
    182     CHECK_EQ(ClassID(kMaxSize + 1), 0);
    183 
    184     for (uptr s = 1; s <= kMaxSize; s++) {
    185       uptr c = ClassID(s);
    186       // Printf("s%zd => c%zd\n", s, c);
    187       CHECK_LT(c, kNumClasses);
    188       CHECK_GE(Size(c), s);
    189       if (c > 0)
    190         CHECK_LT(Size(c-1), s);
    191     }
    192   }
    193 };
    194 
    195 typedef SizeClassMap<17, 128, 16> DefaultSizeClassMap;
    196 typedef SizeClassMap<17, 64,  14> CompactSizeClassMap;
    197 template<class SizeClassAllocator> struct SizeClassAllocatorLocalCache;
    198 
    199 // Memory allocator statistics
    200 enum AllocatorStat {
    201   AllocatorStatAllocated,
    202   AllocatorStatMapped,
    203   AllocatorStatCount
    204 };
    205 
    206 typedef uptr AllocatorStatCounters[AllocatorStatCount];
    207 
    208 // Per-thread stats, live in per-thread cache.
    209 class AllocatorStats {
    210  public:
    211   void Init() {
    212     internal_memset(this, 0, sizeof(*this));
    213   }
    214   void InitLinkerInitialized() {}
    215 
    216   void Add(AllocatorStat i, uptr v) {
    217     v += atomic_load(&stats_[i], memory_order_relaxed);
    218     atomic_store(&stats_[i], v, memory_order_relaxed);
    219   }
    220 
    221   void Sub(AllocatorStat i, uptr v) {
    222     v = atomic_load(&stats_[i], memory_order_relaxed) - v;
    223     atomic_store(&stats_[i], v, memory_order_relaxed);
    224   }
    225 
    226   void Set(AllocatorStat i, uptr v) {
    227     atomic_store(&stats_[i], v, memory_order_relaxed);
    228   }
    229 
    230   uptr Get(AllocatorStat i) const {
    231     return atomic_load(&stats_[i], memory_order_relaxed);
    232   }
    233 
    234  private:
    235   friend class AllocatorGlobalStats;
    236   AllocatorStats *next_;
    237   AllocatorStats *prev_;
    238   atomic_uintptr_t stats_[AllocatorStatCount];
    239 };
    240 
    241 // Global stats, used for aggregation and querying.
    242 class AllocatorGlobalStats : public AllocatorStats {
    243  public:
    244   void InitLinkerInitialized() {
    245     next_ = this;
    246     prev_ = this;
    247   }
    248   void Init() {
    249     internal_memset(this, 0, sizeof(*this));
    250     InitLinkerInitialized();
    251   }
    252 
    253   void Register(AllocatorStats *s) {
    254     SpinMutexLock l(&mu_);
    255     s->next_ = next_;
    256     s->prev_ = this;
    257     next_->prev_ = s;
    258     next_ = s;
    259   }
    260 
    261   void Unregister(AllocatorStats *s) {
    262     SpinMutexLock l(&mu_);
    263     s->prev_->next_ = s->next_;
    264     s->next_->prev_ = s->prev_;
    265     for (int i = 0; i < AllocatorStatCount; i++)
    266       Add(AllocatorStat(i), s->Get(AllocatorStat(i)));
    267   }
    268 
    269   void Get(AllocatorStatCounters s) const {
    270     internal_memset(s, 0, AllocatorStatCount * sizeof(uptr));
    271     SpinMutexLock l(&mu_);
    272     const AllocatorStats *stats = this;
    273     for (;;) {
    274       for (int i = 0; i < AllocatorStatCount; i++)
    275         s[i] += stats->Get(AllocatorStat(i));
    276       stats = stats->next_;
    277       if (stats == this)
    278         break;
    279     }
    280     // All stats must be non-negative.
    281     for (int i = 0; i < AllocatorStatCount; i++)
    282       s[i] = ((sptr)s[i]) >= 0 ? s[i] : 0;
    283   }
    284 
    285  private:
    286   mutable SpinMutex mu_;
    287 };
    288 
    289 // Allocators call these callbacks on mmap/munmap.
    290 struct NoOpMapUnmapCallback {
    291   void OnMap(uptr p, uptr size) const { }
    292   void OnUnmap(uptr p, uptr size) const { }
    293 };
    294 
    295 // Callback type for iterating over chunks.
    296 typedef void (*ForEachChunkCallback)(uptr chunk, void *arg);
    297 
    298 // SizeClassAllocator64 -- allocator for 64-bit address space.
    299 //
    300 // Space: a portion of address space of kSpaceSize bytes starting at
    301 // a fixed address (kSpaceBeg). Both constants are powers of two and
    302 // kSpaceBeg is kSpaceSize-aligned.
    303 // At the beginning the entire space is mprotect-ed, then small parts of it
    304 // are mapped on demand.
    305 //
    306 // Region: a part of Space dedicated to a single size class.
    307 // There are kNumClasses Regions of equal size.
    308 //
    309 // UserChunk: a piece of memory returned to user.
    310 // MetaChunk: kMetadataSize bytes of metadata associated with a UserChunk.
    311 //
    312 // A Region looks like this:
    313 // UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1
    314 template <const uptr kSpaceBeg, const uptr kSpaceSize,
    315           const uptr kMetadataSize, class SizeClassMap,
    316           class MapUnmapCallback = NoOpMapUnmapCallback>
    317 class SizeClassAllocator64 {
    318  public:
    319   typedef typename SizeClassMap::TransferBatch Batch;
    320   typedef SizeClassAllocator64<kSpaceBeg, kSpaceSize, kMetadataSize,
    321       SizeClassMap, MapUnmapCallback> ThisT;
    322   typedef SizeClassAllocatorLocalCache<ThisT> AllocatorCache;
    323 
    324   void Init() {
    325     CHECK_EQ(kSpaceBeg,
    326              reinterpret_cast<uptr>(MmapNoAccess(kSpaceBeg, kSpaceSize)));
    327     MapWithCallback(kSpaceEnd, AdditionalSize());
    328   }
    329 
    330   void MapWithCallback(uptr beg, uptr size) {
    331     CHECK_EQ(beg, reinterpret_cast<uptr>(MmapFixedOrDie(beg, size)));
    332     MapUnmapCallback().OnMap(beg, size);
    333   }
    334 
    335   void UnmapWithCallback(uptr beg, uptr size) {
    336     MapUnmapCallback().OnUnmap(beg, size);
    337     UnmapOrDie(reinterpret_cast<void *>(beg), size);
    338   }
    339 
    340   static bool CanAllocate(uptr size, uptr alignment) {
    341     return size <= SizeClassMap::kMaxSize &&
    342       alignment <= SizeClassMap::kMaxSize;
    343   }
    344 
    345   NOINLINE Batch* AllocateBatch(AllocatorStats *stat, AllocatorCache *c,
    346                                 uptr class_id) {
    347     CHECK_LT(class_id, kNumClasses);
    348     RegionInfo *region = GetRegionInfo(class_id);
    349     Batch *b = region->free_list.Pop();
    350     if (b == 0)
    351       b = PopulateFreeList(stat, c, class_id, region);
    352     region->n_allocated += b->count;
    353     return b;
    354   }
    355 
    356   NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id, Batch *b) {
    357     RegionInfo *region = GetRegionInfo(class_id);
    358     CHECK_GT(b->count, 0);
    359     region->free_list.Push(b);
    360     region->n_freed += b->count;
    361   }
    362 
    363   static bool PointerIsMine(const void *p) {
    364     return reinterpret_cast<uptr>(p) / kSpaceSize == kSpaceBeg / kSpaceSize;
    365   }
    366 
    367   static uptr GetSizeClass(const void *p) {
    368     return (reinterpret_cast<uptr>(p) / kRegionSize) % kNumClassesRounded;
    369   }
    370 
    371   void *GetBlockBegin(const void *p) {
    372     uptr class_id = GetSizeClass(p);
    373     uptr size = SizeClassMap::Size(class_id);
    374     if (!size) return 0;
    375     uptr chunk_idx = GetChunkIdx((uptr)p, size);
    376     uptr reg_beg = (uptr)p & ~(kRegionSize - 1);
    377     uptr beg = chunk_idx * size;
    378     uptr next_beg = beg + size;
    379     if (class_id >= kNumClasses) return 0;
    380     RegionInfo *region = GetRegionInfo(class_id);
    381     if (region->mapped_user >= next_beg)
    382       return reinterpret_cast<void*>(reg_beg + beg);
    383     return 0;
    384   }
    385 
    386   static uptr GetActuallyAllocatedSize(void *p) {
    387     CHECK(PointerIsMine(p));
    388     return SizeClassMap::Size(GetSizeClass(p));
    389   }
    390 
    391   uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
    392 
    393   void *GetMetaData(const void *p) {
    394     uptr class_id = GetSizeClass(p);
    395     uptr size = SizeClassMap::Size(class_id);
    396     uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), size);
    397     return reinterpret_cast<void*>(kSpaceBeg + (kRegionSize * (class_id + 1)) -
    398                                    (1 + chunk_idx) * kMetadataSize);
    399   }
    400 
    401   uptr TotalMemoryUsed() {
    402     uptr res = 0;
    403     for (uptr i = 0; i < kNumClasses; i++)
    404       res += GetRegionInfo(i)->allocated_user;
    405     return res;
    406   }
    407 
    408   // Test-only.
    409   void TestOnlyUnmap() {
    410     UnmapWithCallback(kSpaceBeg, kSpaceSize + AdditionalSize());
    411   }
    412 
    413   void PrintStats() {
    414     uptr total_mapped = 0;
    415     uptr n_allocated = 0;
    416     uptr n_freed = 0;
    417     for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
    418       RegionInfo *region = GetRegionInfo(class_id);
    419       total_mapped += region->mapped_user;
    420       n_allocated += region->n_allocated;
    421       n_freed += region->n_freed;
    422     }
    423     Printf("Stats: SizeClassAllocator64: %zdM mapped in %zd allocations; "
    424            "remains %zd\n",
    425            total_mapped >> 20, n_allocated, n_allocated - n_freed);
    426     for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
    427       RegionInfo *region = GetRegionInfo(class_id);
    428       if (region->mapped_user == 0) continue;
    429       Printf("  %02zd (%zd): total: %zd K allocs: %zd remains: %zd\n",
    430              class_id,
    431              SizeClassMap::Size(class_id),
    432              region->mapped_user >> 10,
    433              region->n_allocated,
    434              region->n_allocated - region->n_freed);
    435     }
    436   }
    437 
    438   // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
    439   // introspection API.
    440   void ForceLock() {
    441     for (uptr i = 0; i < kNumClasses; i++) {
    442       GetRegionInfo(i)->mutex.Lock();
    443     }
    444   }
    445 
    446   void ForceUnlock() {
    447     for (int i = (int)kNumClasses - 1; i >= 0; i--) {
    448       GetRegionInfo(i)->mutex.Unlock();
    449     }
    450   }
    451 
    452   // Iterate over all existing chunks.
    453   // The allocator must be locked when calling this function.
    454   void ForEachChunk(ForEachChunkCallback callback, void *arg) {
    455     for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
    456       RegionInfo *region = GetRegionInfo(class_id);
    457       uptr chunk_size = SizeClassMap::Size(class_id);
    458       uptr region_beg = kSpaceBeg + class_id * kRegionSize;
    459       for (uptr chunk = region_beg;
    460            chunk < region_beg + region->allocated_user;
    461            chunk += chunk_size) {
    462         // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
    463         callback(chunk, arg);
    464       }
    465     }
    466   }
    467 
    468   static uptr AdditionalSize() {
    469     return RoundUpTo(sizeof(RegionInfo) * kNumClassesRounded,
    470                      GetPageSizeCached());
    471   }
    472 
    473   typedef SizeClassMap SizeClassMapT;
    474   static const uptr kNumClasses = SizeClassMap::kNumClasses;
    475   static const uptr kNumClassesRounded = SizeClassMap::kNumClassesRounded;
    476 
    477  private:
    478   static const uptr kRegionSize = kSpaceSize / kNumClassesRounded;
    479   static const uptr kSpaceEnd = kSpaceBeg + kSpaceSize;
    480   COMPILER_CHECK(kSpaceBeg % kSpaceSize == 0);
    481   // kRegionSize must be >= 2^32.
    482   COMPILER_CHECK((kRegionSize) >= (1ULL << (SANITIZER_WORDSIZE / 2)));
    483   // Populate the free list with at most this number of bytes at once
    484   // or with one element if its size is greater.
    485   static const uptr kPopulateSize = 1 << 14;
    486   // Call mmap for user memory with at least this size.
    487   static const uptr kUserMapSize = 1 << 16;
    488   // Call mmap for metadata memory with at least this size.
    489   static const uptr kMetaMapSize = 1 << 16;
    490 
    491   struct RegionInfo {
    492     BlockingMutex mutex;
    493     LFStack<Batch> free_list;
    494     uptr allocated_user;  // Bytes allocated for user memory.
    495     uptr allocated_meta;  // Bytes allocated for metadata.
    496     uptr mapped_user;  // Bytes mapped for user memory.
    497     uptr mapped_meta;  // Bytes mapped for metadata.
    498     uptr n_allocated, n_freed;  // Just stats.
    499   };
    500   COMPILER_CHECK(sizeof(RegionInfo) >= kCacheLineSize);
    501 
    502   RegionInfo *GetRegionInfo(uptr class_id) {
    503     CHECK_LT(class_id, kNumClasses);
    504     RegionInfo *regions = reinterpret_cast<RegionInfo*>(kSpaceBeg + kSpaceSize);
    505     return &regions[class_id];
    506   }
    507 
    508   static uptr GetChunkIdx(uptr chunk, uptr size) {
    509     uptr offset = chunk % kRegionSize;
    510     // Here we divide by a non-constant. This is costly.
    511     // size always fits into 32-bits. If the offset fits too, use 32-bit div.
    512     if (offset >> (SANITIZER_WORDSIZE / 2))
    513       return offset / size;
    514     return (u32)offset / (u32)size;
    515   }
    516 
    517   NOINLINE Batch* PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
    518                                    uptr class_id, RegionInfo *region) {
    519     BlockingMutexLock l(&region->mutex);
    520     Batch *b = region->free_list.Pop();
    521     if (b)
    522       return b;
    523     uptr size = SizeClassMap::Size(class_id);
    524     uptr count = size < kPopulateSize ? SizeClassMap::MaxCached(class_id) : 1;
    525     uptr beg_idx = region->allocated_user;
    526     uptr end_idx = beg_idx + count * size;
    527     uptr region_beg = kSpaceBeg + kRegionSize * class_id;
    528     if (end_idx + size > region->mapped_user) {
    529       // Do the mmap for the user memory.
    530       uptr map_size = kUserMapSize;
    531       while (end_idx + size > region->mapped_user + map_size)
    532         map_size += kUserMapSize;
    533       CHECK_GE(region->mapped_user + map_size, end_idx);
    534       MapWithCallback(region_beg + region->mapped_user, map_size);
    535       stat->Add(AllocatorStatMapped, map_size);
    536       region->mapped_user += map_size;
    537     }
    538     uptr total_count = (region->mapped_user - beg_idx - size)
    539         / size / count * count;
    540     region->allocated_meta += total_count * kMetadataSize;
    541     if (region->allocated_meta > region->mapped_meta) {
    542       uptr map_size = kMetaMapSize;
    543       while (region->allocated_meta > region->mapped_meta + map_size)
    544         map_size += kMetaMapSize;
    545       // Do the mmap for the metadata.
    546       CHECK_GE(region->mapped_meta + map_size, region->allocated_meta);
    547       MapWithCallback(region_beg + kRegionSize -
    548                       region->mapped_meta - map_size, map_size);
    549       region->mapped_meta += map_size;
    550     }
    551     CHECK_LE(region->allocated_meta, region->mapped_meta);
    552     if (region->mapped_user + region->mapped_meta > kRegionSize) {
    553       Printf("%s: Out of memory. Dying. ", SanitizerToolName);
    554       Printf("The process has exhausted %zuMB for size class %zu.\n",
    555           kRegionSize / 1024 / 1024, size);
    556       Die();
    557     }
    558     for (;;) {
    559       if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
    560         b = (Batch*)c->Allocate(this, SizeClassMap::ClassID(sizeof(Batch)));
    561       else
    562         b = (Batch*)(region_beg + beg_idx);
    563       b->count = count;
    564       for (uptr i = 0; i < count; i++)
    565         b->batch[i] = (void*)(region_beg + beg_idx + i * size);
    566       region->allocated_user += count * size;
    567       CHECK_LE(region->allocated_user, region->mapped_user);
    568       beg_idx += count * size;
    569       if (beg_idx + count * size + size > region->mapped_user)
    570         break;
    571       CHECK_GT(b->count, 0);
    572       region->free_list.Push(b);
    573     }
    574     return b;
    575   }
    576 };
    577 
    578 // Maps integers in rage [0, kSize) to u8 values.
    579 template<u64 kSize>
    580 class FlatByteMap {
    581  public:
    582   void TestOnlyInit() {
    583     internal_memset(map_, 0, sizeof(map_));
    584   }
    585 
    586   void set(uptr idx, u8 val) {
    587     CHECK_LT(idx, kSize);
    588     CHECK_EQ(0U, map_[idx]);
    589     map_[idx] = val;
    590   }
    591   u8 operator[] (uptr idx) {
    592     CHECK_LT(idx, kSize);
    593     // FIXME: CHECK may be too expensive here.
    594     return map_[idx];
    595   }
    596  private:
    597   u8 map_[kSize];
    598 };
    599 
    600 // TwoLevelByteMap maps integers in range [0, kSize1*kSize2) to u8 values.
    601 // It is implemented as a two-dimensional array: array of kSize1 pointers
    602 // to kSize2-byte arrays. The secondary arrays are mmaped on demand.
    603 // Each value is initially zero and can be set to something else only once.
    604 // Setting and getting values from multiple threads is safe w/o extra locking.
    605 template <u64 kSize1, u64 kSize2, class MapUnmapCallback = NoOpMapUnmapCallback>
    606 class TwoLevelByteMap {
    607  public:
    608   void TestOnlyInit() {
    609     internal_memset(map1_, 0, sizeof(map1_));
    610     mu_.Init();
    611   }
    612   void TestOnlyUnmap() {
    613     for (uptr i = 0; i < kSize1; i++) {
    614       u8 *p = Get(i);
    615       if (!p) continue;
    616       MapUnmapCallback().OnUnmap(reinterpret_cast<uptr>(p), kSize2);
    617       UnmapOrDie(p, kSize2);
    618     }
    619   }
    620 
    621   uptr size() const { return kSize1 * kSize2; }
    622   uptr size1() const { return kSize1; }
    623   uptr size2() const { return kSize2; }
    624 
    625   void set(uptr idx, u8 val) {
    626     CHECK_LT(idx, kSize1 * kSize2);
    627     u8 *map2 = GetOrCreate(idx / kSize2);
    628     CHECK_EQ(0U, map2[idx % kSize2]);
    629     map2[idx % kSize2] = val;
    630   }
    631 
    632   u8 operator[] (uptr idx) const {
    633     CHECK_LT(idx, kSize1 * kSize2);
    634     u8 *map2 = Get(idx / kSize2);
    635     if (!map2) return 0;
    636     return map2[idx % kSize2];
    637   }
    638 
    639  private:
    640   u8 *Get(uptr idx) const {
    641     CHECK_LT(idx, kSize1);
    642     return reinterpret_cast<u8 *>(
    643         atomic_load(&map1_[idx], memory_order_acquire));
    644   }
    645 
    646   u8 *GetOrCreate(uptr idx) {
    647     u8 *res = Get(idx);
    648     if (!res) {
    649       SpinMutexLock l(&mu_);
    650       if (!(res = Get(idx))) {
    651         res = (u8*)MmapOrDie(kSize2, "TwoLevelByteMap");
    652         MapUnmapCallback().OnMap(reinterpret_cast<uptr>(res), kSize2);
    653         atomic_store(&map1_[idx], reinterpret_cast<uptr>(res),
    654                      memory_order_release);
    655       }
    656     }
    657     return res;
    658   }
    659 
    660   atomic_uintptr_t map1_[kSize1];
    661   StaticSpinMutex mu_;
    662 };
    663 
    664 // SizeClassAllocator32 -- allocator for 32-bit address space.
    665 // This allocator can theoretically be used on 64-bit arch, but there it is less
    666 // efficient than SizeClassAllocator64.
    667 //
    668 // [kSpaceBeg, kSpaceBeg + kSpaceSize) is the range of addresses which can
    669 // be returned by MmapOrDie().
    670 //
    671 // Region:
    672 //   a result of a single call to MmapAlignedOrDie(kRegionSize, kRegionSize).
    673 // Since the regions are aligned by kRegionSize, there are exactly
    674 // kNumPossibleRegions possible regions in the address space and so we keep
    675 // a ByteMap possible_regions to store the size classes of each Region.
    676 // 0 size class means the region is not used by the allocator.
    677 //
    678 // One Region is used to allocate chunks of a single size class.
    679 // A Region looks like this:
    680 // UserChunk1 .. UserChunkN <gap> MetaChunkN .. MetaChunk1
    681 //
    682 // In order to avoid false sharing the objects of this class should be
    683 // chache-line aligned.
    684 template <const uptr kSpaceBeg, const u64 kSpaceSize,
    685           const uptr kMetadataSize, class SizeClassMap,
    686           const uptr kRegionSizeLog,
    687           class ByteMap,
    688           class MapUnmapCallback = NoOpMapUnmapCallback>
    689 class SizeClassAllocator32 {
    690  public:
    691   typedef typename SizeClassMap::TransferBatch Batch;
    692   typedef SizeClassAllocator32<kSpaceBeg, kSpaceSize, kMetadataSize,
    693       SizeClassMap, kRegionSizeLog, ByteMap, MapUnmapCallback> ThisT;
    694   typedef SizeClassAllocatorLocalCache<ThisT> AllocatorCache;
    695 
    696   void Init() {
    697     possible_regions.TestOnlyInit();
    698     internal_memset(size_class_info_array, 0, sizeof(size_class_info_array));
    699   }
    700 
    701   void *MapWithCallback(uptr size) {
    702     size = RoundUpTo(size, GetPageSizeCached());
    703     void *res = MmapOrDie(size, "SizeClassAllocator32");
    704     MapUnmapCallback().OnMap((uptr)res, size);
    705     return res;
    706   }
    707 
    708   void UnmapWithCallback(uptr beg, uptr size) {
    709     MapUnmapCallback().OnUnmap(beg, size);
    710     UnmapOrDie(reinterpret_cast<void *>(beg), size);
    711   }
    712 
    713   static bool CanAllocate(uptr size, uptr alignment) {
    714     return size <= SizeClassMap::kMaxSize &&
    715       alignment <= SizeClassMap::kMaxSize;
    716   }
    717 
    718   void *GetMetaData(const void *p) {
    719     CHECK(PointerIsMine(p));
    720     uptr mem = reinterpret_cast<uptr>(p);
    721     uptr beg = ComputeRegionBeg(mem);
    722     uptr size = SizeClassMap::Size(GetSizeClass(p));
    723     u32 offset = mem - beg;
    724     uptr n = offset / (u32)size;  // 32-bit division
    725     uptr meta = (beg + kRegionSize) - (n + 1) * kMetadataSize;
    726     return reinterpret_cast<void*>(meta);
    727   }
    728 
    729   NOINLINE Batch* AllocateBatch(AllocatorStats *stat, AllocatorCache *c,
    730                                 uptr class_id) {
    731     CHECK_LT(class_id, kNumClasses);
    732     SizeClassInfo *sci = GetSizeClassInfo(class_id);
    733     SpinMutexLock l(&sci->mutex);
    734     if (sci->free_list.empty())
    735       PopulateFreeList(stat, c, sci, class_id);
    736     CHECK(!sci->free_list.empty());
    737     Batch *b = sci->free_list.front();
    738     sci->free_list.pop_front();
    739     return b;
    740   }
    741 
    742   NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id, Batch *b) {
    743     CHECK_LT(class_id, kNumClasses);
    744     SizeClassInfo *sci = GetSizeClassInfo(class_id);
    745     SpinMutexLock l(&sci->mutex);
    746     CHECK_GT(b->count, 0);
    747     sci->free_list.push_front(b);
    748   }
    749 
    750   bool PointerIsMine(const void *p) {
    751     return GetSizeClass(p) != 0;
    752   }
    753 
    754   uptr GetSizeClass(const void *p) {
    755     return possible_regions[ComputeRegionId(reinterpret_cast<uptr>(p))];
    756   }
    757 
    758   void *GetBlockBegin(const void *p) {
    759     CHECK(PointerIsMine(p));
    760     uptr mem = reinterpret_cast<uptr>(p);
    761     uptr beg = ComputeRegionBeg(mem);
    762     uptr size = SizeClassMap::Size(GetSizeClass(p));
    763     u32 offset = mem - beg;
    764     u32 n = offset / (u32)size;  // 32-bit division
    765     uptr res = beg + (n * (u32)size);
    766     return reinterpret_cast<void*>(res);
    767   }
    768 
    769   uptr GetActuallyAllocatedSize(void *p) {
    770     CHECK(PointerIsMine(p));
    771     return SizeClassMap::Size(GetSizeClass(p));
    772   }
    773 
    774   uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
    775 
    776   uptr TotalMemoryUsed() {
    777     // No need to lock here.
    778     uptr res = 0;
    779     for (uptr i = 0; i < kNumPossibleRegions; i++)
    780       if (possible_regions[i])
    781         res += kRegionSize;
    782     return res;
    783   }
    784 
    785   void TestOnlyUnmap() {
    786     for (uptr i = 0; i < kNumPossibleRegions; i++)
    787       if (possible_regions[i])
    788         UnmapWithCallback((i * kRegionSize), kRegionSize);
    789   }
    790 
    791   // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
    792   // introspection API.
    793   void ForceLock() {
    794     for (uptr i = 0; i < kNumClasses; i++) {
    795       GetSizeClassInfo(i)->mutex.Lock();
    796     }
    797   }
    798 
    799   void ForceUnlock() {
    800     for (int i = kNumClasses - 1; i >= 0; i--) {
    801       GetSizeClassInfo(i)->mutex.Unlock();
    802     }
    803   }
    804 
    805   // Iterate over all existing chunks.
    806   // The allocator must be locked when calling this function.
    807   void ForEachChunk(ForEachChunkCallback callback, void *arg) {
    808     for (uptr region = 0; region < kNumPossibleRegions; region++)
    809       if (possible_regions[region]) {
    810         uptr chunk_size = SizeClassMap::Size(possible_regions[region]);
    811         uptr max_chunks_in_region = kRegionSize / (chunk_size + kMetadataSize);
    812         uptr region_beg = region * kRegionSize;
    813         for (uptr chunk = region_beg;
    814              chunk < region_beg + max_chunks_in_region * chunk_size;
    815              chunk += chunk_size) {
    816           // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
    817           callback(chunk, arg);
    818         }
    819       }
    820   }
    821 
    822   void PrintStats() {
    823   }
    824 
    825   typedef SizeClassMap SizeClassMapT;
    826   static const uptr kNumClasses = SizeClassMap::kNumClasses;
    827 
    828  private:
    829   static const uptr kRegionSize = 1 << kRegionSizeLog;
    830   static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize;
    831 
    832   struct SizeClassInfo {
    833     SpinMutex mutex;
    834     IntrusiveList<Batch> free_list;
    835     char padding[kCacheLineSize - sizeof(uptr) - sizeof(IntrusiveList<Batch>)];
    836   };
    837   COMPILER_CHECK(sizeof(SizeClassInfo) == kCacheLineSize);
    838 
    839   uptr ComputeRegionId(uptr mem) {
    840     uptr res = mem >> kRegionSizeLog;
    841     CHECK_LT(res, kNumPossibleRegions);
    842     return res;
    843   }
    844 
    845   uptr ComputeRegionBeg(uptr mem) {
    846     return mem & ~(kRegionSize - 1);
    847   }
    848 
    849   uptr AllocateRegion(AllocatorStats *stat, uptr class_id) {
    850     CHECK_LT(class_id, kNumClasses);
    851     uptr res = reinterpret_cast<uptr>(MmapAlignedOrDie(kRegionSize, kRegionSize,
    852                                       "SizeClassAllocator32"));
    853     MapUnmapCallback().OnMap(res, kRegionSize);
    854     stat->Add(AllocatorStatMapped, kRegionSize);
    855     CHECK_EQ(0U, (res & (kRegionSize - 1)));
    856     possible_regions.set(ComputeRegionId(res), static_cast<u8>(class_id));
    857     return res;
    858   }
    859 
    860   SizeClassInfo *GetSizeClassInfo(uptr class_id) {
    861     CHECK_LT(class_id, kNumClasses);
    862     return &size_class_info_array[class_id];
    863   }
    864 
    865   void PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
    866                         SizeClassInfo *sci, uptr class_id) {
    867     uptr size = SizeClassMap::Size(class_id);
    868     uptr reg = AllocateRegion(stat, class_id);
    869     uptr n_chunks = kRegionSize / (size + kMetadataSize);
    870     uptr max_count = SizeClassMap::MaxCached(class_id);
    871     Batch *b = 0;
    872     for (uptr i = reg; i < reg + n_chunks * size; i += size) {
    873       if (b == 0) {
    874         if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
    875           b = (Batch*)c->Allocate(this, SizeClassMap::ClassID(sizeof(Batch)));
    876         else
    877           b = (Batch*)i;
    878         b->count = 0;
    879       }
    880       b->batch[b->count++] = (void*)i;
    881       if (b->count == max_count) {
    882         CHECK_GT(b->count, 0);
    883         sci->free_list.push_back(b);
    884         b = 0;
    885       }
    886     }
    887     if (b) {
    888       CHECK_GT(b->count, 0);
    889       sci->free_list.push_back(b);
    890     }
    891   }
    892 
    893   ByteMap possible_regions;
    894   SizeClassInfo size_class_info_array[kNumClasses];
    895 };
    896 
    897 // Objects of this type should be used as local caches for SizeClassAllocator64
    898 // or SizeClassAllocator32. Since the typical use of this class is to have one
    899 // object per thread in TLS, is has to be POD.
    900 template<class SizeClassAllocator>
    901 struct SizeClassAllocatorLocalCache {
    902   typedef SizeClassAllocator Allocator;
    903   static const uptr kNumClasses = SizeClassAllocator::kNumClasses;
    904 
    905   void Init(AllocatorGlobalStats *s) {
    906     stats_.Init();
    907     if (s)
    908       s->Register(&stats_);
    909   }
    910 
    911   void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
    912     Drain(allocator);
    913     if (s)
    914       s->Unregister(&stats_);
    915   }
    916 
    917   void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
    918     CHECK_NE(class_id, 0UL);
    919     CHECK_LT(class_id, kNumClasses);
    920     stats_.Add(AllocatorStatAllocated, SizeClassMap::Size(class_id));
    921     PerClass *c = &per_class_[class_id];
    922     if (UNLIKELY(c->count == 0))
    923       Refill(allocator, class_id);
    924     void *res = c->batch[--c->count];
    925     PREFETCH(c->batch[c->count - 1]);
    926     return res;
    927   }
    928 
    929   void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
    930     CHECK_NE(class_id, 0UL);
    931     CHECK_LT(class_id, kNumClasses);
    932     // If the first allocator call on a new thread is a deallocation, then
    933     // max_count will be zero, leading to check failure.
    934     InitCache();
    935     stats_.Sub(AllocatorStatAllocated, SizeClassMap::Size(class_id));
    936     PerClass *c = &per_class_[class_id];
    937     CHECK_NE(c->max_count, 0UL);
    938     if (UNLIKELY(c->count == c->max_count))
    939       Drain(allocator, class_id);
    940     c->batch[c->count++] = p;
    941   }
    942 
    943   void Drain(SizeClassAllocator *allocator) {
    944     for (uptr class_id = 0; class_id < kNumClasses; class_id++) {
    945       PerClass *c = &per_class_[class_id];
    946       while (c->count > 0)
    947         Drain(allocator, class_id);
    948     }
    949   }
    950 
    951   // private:
    952   typedef typename SizeClassAllocator::SizeClassMapT SizeClassMap;
    953   typedef typename SizeClassMap::TransferBatch Batch;
    954   struct PerClass {
    955     uptr count;
    956     uptr max_count;
    957     void *batch[2 * SizeClassMap::kMaxNumCached];
    958   };
    959   PerClass per_class_[kNumClasses];
    960   AllocatorStats stats_;
    961 
    962   void InitCache() {
    963     if (per_class_[1].max_count)
    964       return;
    965     for (uptr i = 0; i < kNumClasses; i++) {
    966       PerClass *c = &per_class_[i];
    967       c->max_count = 2 * SizeClassMap::MaxCached(i);
    968     }
    969   }
    970 
    971   NOINLINE void Refill(SizeClassAllocator *allocator, uptr class_id) {
    972     InitCache();
    973     PerClass *c = &per_class_[class_id];
    974     Batch *b = allocator->AllocateBatch(&stats_, this, class_id);
    975     CHECK_GT(b->count, 0);
    976     for (uptr i = 0; i < b->count; i++)
    977       c->batch[i] = b->batch[i];
    978     c->count = b->count;
    979     if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
    980       Deallocate(allocator, SizeClassMap::ClassID(sizeof(Batch)), b);
    981   }
    982 
    983   NOINLINE void Drain(SizeClassAllocator *allocator, uptr class_id) {
    984     InitCache();
    985     PerClass *c = &per_class_[class_id];
    986     Batch *b;
    987     if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
    988       b = (Batch*)Allocate(allocator, SizeClassMap::ClassID(sizeof(Batch)));
    989     else
    990       b = (Batch*)c->batch[0];
    991     uptr cnt = Min(c->max_count / 2, c->count);
    992     for (uptr i = 0; i < cnt; i++) {
    993       b->batch[i] = c->batch[i];
    994       c->batch[i] = c->batch[i + c->max_count / 2];
    995     }
    996     b->count = cnt;
    997     c->count -= cnt;
    998     CHECK_GT(b->count, 0);
    999     allocator->DeallocateBatch(&stats_, class_id, b);
   1000   }
   1001 };
   1002 
   1003 // This class can (de)allocate only large chunks of memory using mmap/unmap.
   1004 // The main purpose of this allocator is to cover large and rare allocation
   1005 // sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
   1006 template <class MapUnmapCallback = NoOpMapUnmapCallback>
   1007 class LargeMmapAllocator {
   1008  public:
   1009   void InitLinkerInitialized(bool may_return_null) {
   1010     page_size_ = GetPageSizeCached();
   1011     atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
   1012   }
   1013 
   1014   void Init(bool may_return_null) {
   1015     internal_memset(this, 0, sizeof(*this));
   1016     InitLinkerInitialized(may_return_null);
   1017   }
   1018 
   1019   void *Allocate(AllocatorStats *stat, uptr size, uptr alignment) {
   1020     CHECK(IsPowerOfTwo(alignment));
   1021     uptr map_size = RoundUpMapSize(size);
   1022     if (alignment > page_size_)
   1023       map_size += alignment;
   1024     // Overflow.
   1025     if (map_size < size)
   1026       return ReturnNullOrDie();
   1027     uptr map_beg = reinterpret_cast<uptr>(
   1028         MmapOrDie(map_size, "LargeMmapAllocator"));
   1029     CHECK(IsAligned(map_beg, page_size_));
   1030     MapUnmapCallback().OnMap(map_beg, map_size);
   1031     uptr map_end = map_beg + map_size;
   1032     uptr res = map_beg + page_size_;
   1033     if (res & (alignment - 1))  // Align.
   1034       res += alignment - (res & (alignment - 1));
   1035     CHECK(IsAligned(res, alignment));
   1036     CHECK(IsAligned(res, page_size_));
   1037     CHECK_GE(res + size, map_beg);
   1038     CHECK_LE(res + size, map_end);
   1039     Header *h = GetHeader(res);
   1040     h->size = size;
   1041     h->map_beg = map_beg;
   1042     h->map_size = map_size;
   1043     uptr size_log = MostSignificantSetBitIndex(map_size);
   1044     CHECK_LT(size_log, ARRAY_SIZE(stats.by_size_log));
   1045     {
   1046       SpinMutexLock l(&mutex_);
   1047       uptr idx = n_chunks_++;
   1048       chunks_sorted_ = false;
   1049       CHECK_LT(idx, kMaxNumChunks);
   1050       h->chunk_idx = idx;
   1051       chunks_[idx] = h;
   1052       stats.n_allocs++;
   1053       stats.currently_allocated += map_size;
   1054       stats.max_allocated = Max(stats.max_allocated, stats.currently_allocated);
   1055       stats.by_size_log[size_log]++;
   1056       stat->Add(AllocatorStatAllocated, map_size);
   1057       stat->Add(AllocatorStatMapped, map_size);
   1058     }
   1059     return reinterpret_cast<void*>(res);
   1060   }
   1061 
   1062   void *ReturnNullOrDie() {
   1063     if (atomic_load(&may_return_null_, memory_order_acquire))
   1064       return 0;
   1065     ReportAllocatorCannotReturnNull();
   1066   }
   1067 
   1068   void SetMayReturnNull(bool may_return_null) {
   1069     atomic_store(&may_return_null_, may_return_null, memory_order_release);
   1070   }
   1071 
   1072   void Deallocate(AllocatorStats *stat, void *p) {
   1073     Header *h = GetHeader(p);
   1074     {
   1075       SpinMutexLock l(&mutex_);
   1076       uptr idx = h->chunk_idx;
   1077       CHECK_EQ(chunks_[idx], h);
   1078       CHECK_LT(idx, n_chunks_);
   1079       chunks_[idx] = chunks_[n_chunks_ - 1];
   1080       chunks_[idx]->chunk_idx = idx;
   1081       n_chunks_--;
   1082       chunks_sorted_ = false;
   1083       stats.n_frees++;
   1084       stats.currently_allocated -= h->map_size;
   1085       stat->Sub(AllocatorStatAllocated, h->map_size);
   1086       stat->Sub(AllocatorStatMapped, h->map_size);
   1087     }
   1088     MapUnmapCallback().OnUnmap(h->map_beg, h->map_size);
   1089     UnmapOrDie(reinterpret_cast<void*>(h->map_beg), h->map_size);
   1090   }
   1091 
   1092   uptr TotalMemoryUsed() {
   1093     SpinMutexLock l(&mutex_);
   1094     uptr res = 0;
   1095     for (uptr i = 0; i < n_chunks_; i++) {
   1096       Header *h = chunks_[i];
   1097       CHECK_EQ(h->chunk_idx, i);
   1098       res += RoundUpMapSize(h->size);
   1099     }
   1100     return res;
   1101   }
   1102 
   1103   bool PointerIsMine(const void *p) {
   1104     return GetBlockBegin(p) != 0;
   1105   }
   1106 
   1107   uptr GetActuallyAllocatedSize(void *p) {
   1108     return RoundUpTo(GetHeader(p)->size, page_size_);
   1109   }
   1110 
   1111   // At least page_size_/2 metadata bytes is available.
   1112   void *GetMetaData(const void *p) {
   1113     // Too slow: CHECK_EQ(p, GetBlockBegin(p));
   1114     if (!IsAligned(reinterpret_cast<uptr>(p), page_size_)) {
   1115       Printf("%s: bad pointer %p\n", SanitizerToolName, p);
   1116       CHECK(IsAligned(reinterpret_cast<uptr>(p), page_size_));
   1117     }
   1118     return GetHeader(p) + 1;
   1119   }
   1120 
   1121   void *GetBlockBegin(const void *ptr) {
   1122     uptr p = reinterpret_cast<uptr>(ptr);
   1123     SpinMutexLock l(&mutex_);
   1124     uptr nearest_chunk = 0;
   1125     // Cache-friendly linear search.
   1126     for (uptr i = 0; i < n_chunks_; i++) {
   1127       uptr ch = reinterpret_cast<uptr>(chunks_[i]);
   1128       if (p < ch) continue;  // p is at left to this chunk, skip it.
   1129       if (p - ch < p - nearest_chunk)
   1130         nearest_chunk = ch;
   1131     }
   1132     if (!nearest_chunk)
   1133       return 0;
   1134     Header *h = reinterpret_cast<Header *>(nearest_chunk);
   1135     CHECK_GE(nearest_chunk, h->map_beg);
   1136     CHECK_LT(nearest_chunk, h->map_beg + h->map_size);
   1137     CHECK_LE(nearest_chunk, p);
   1138     if (h->map_beg + h->map_size <= p)
   1139       return 0;
   1140     return GetUser(h);
   1141   }
   1142 
   1143   // This function does the same as GetBlockBegin, but is much faster.
   1144   // Must be called with the allocator locked.
   1145   void *GetBlockBeginFastLocked(void *ptr) {
   1146     mutex_.CheckLocked();
   1147     uptr p = reinterpret_cast<uptr>(ptr);
   1148     uptr n = n_chunks_;
   1149     if (!n) return 0;
   1150     if (!chunks_sorted_) {
   1151       // Do one-time sort. chunks_sorted_ is reset in Allocate/Deallocate.
   1152       SortArray(reinterpret_cast<uptr*>(chunks_), n);
   1153       for (uptr i = 0; i < n; i++)
   1154         chunks_[i]->chunk_idx = i;
   1155       chunks_sorted_ = true;
   1156       min_mmap_ = reinterpret_cast<uptr>(chunks_[0]);
   1157       max_mmap_ = reinterpret_cast<uptr>(chunks_[n - 1]) +
   1158           chunks_[n - 1]->map_size;
   1159     }
   1160     if (p < min_mmap_ || p >= max_mmap_)
   1161       return 0;
   1162     uptr beg = 0, end = n - 1;
   1163     // This loop is a log(n) lower_bound. It does not check for the exact match
   1164     // to avoid expensive cache-thrashing loads.
   1165     while (end - beg >= 2) {
   1166       uptr mid = (beg + end) / 2;  // Invariant: mid >= beg + 1
   1167       if (p < reinterpret_cast<uptr>(chunks_[mid]))
   1168         end = mid - 1;  // We are not interested in chunks_[mid].
   1169       else
   1170         beg = mid;  // chunks_[mid] may still be what we want.
   1171     }
   1172 
   1173     if (beg < end) {
   1174       CHECK_EQ(beg + 1, end);
   1175       // There are 2 chunks left, choose one.
   1176       if (p >= reinterpret_cast<uptr>(chunks_[end]))
   1177         beg = end;
   1178     }
   1179 
   1180     Header *h = chunks_[beg];
   1181     if (h->map_beg + h->map_size <= p || p < h->map_beg)
   1182       return 0;
   1183     return GetUser(h);
   1184   }
   1185 
   1186   void PrintStats() {
   1187     Printf("Stats: LargeMmapAllocator: allocated %zd times, "
   1188            "remains %zd (%zd K) max %zd M; by size logs: ",
   1189            stats.n_allocs, stats.n_allocs - stats.n_frees,
   1190            stats.currently_allocated >> 10, stats.max_allocated >> 20);
   1191     for (uptr i = 0; i < ARRAY_SIZE(stats.by_size_log); i++) {
   1192       uptr c = stats.by_size_log[i];
   1193       if (!c) continue;
   1194       Printf("%zd:%zd; ", i, c);
   1195     }
   1196     Printf("\n");
   1197   }
   1198 
   1199   // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
   1200   // introspection API.
   1201   void ForceLock() {
   1202     mutex_.Lock();
   1203   }
   1204 
   1205   void ForceUnlock() {
   1206     mutex_.Unlock();
   1207   }
   1208 
   1209   // Iterate over all existing chunks.
   1210   // The allocator must be locked when calling this function.
   1211   void ForEachChunk(ForEachChunkCallback callback, void *arg) {
   1212     for (uptr i = 0; i < n_chunks_; i++)
   1213       callback(reinterpret_cast<uptr>(GetUser(chunks_[i])), arg);
   1214   }
   1215 
   1216  private:
   1217   static const int kMaxNumChunks = 1 << FIRST_32_SECOND_64(15, 18);
   1218   struct Header {
   1219     uptr map_beg;
   1220     uptr map_size;
   1221     uptr size;
   1222     uptr chunk_idx;
   1223   };
   1224 
   1225   Header *GetHeader(uptr p) {
   1226     CHECK(IsAligned(p, page_size_));
   1227     return reinterpret_cast<Header*>(p - page_size_);
   1228   }
   1229   Header *GetHeader(const void *p) {
   1230     return GetHeader(reinterpret_cast<uptr>(p));
   1231   }
   1232 
   1233   void *GetUser(Header *h) {
   1234     CHECK(IsAligned((uptr)h, page_size_));
   1235     return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + page_size_);
   1236   }
   1237 
   1238   uptr RoundUpMapSize(uptr size) {
   1239     return RoundUpTo(size, page_size_) + page_size_;
   1240   }
   1241 
   1242   uptr page_size_;
   1243   Header *chunks_[kMaxNumChunks];
   1244   uptr n_chunks_;
   1245   uptr min_mmap_, max_mmap_;
   1246   bool chunks_sorted_;
   1247   struct Stats {
   1248     uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64];
   1249   } stats;
   1250   atomic_uint8_t may_return_null_;
   1251   SpinMutex mutex_;
   1252 };
   1253 
   1254 // This class implements a complete memory allocator by using two
   1255 // internal allocators:
   1256 // PrimaryAllocator is efficient, but may not allocate some sizes (alignments).
   1257 //  When allocating 2^x bytes it should return 2^x aligned chunk.
   1258 // PrimaryAllocator is used via a local AllocatorCache.
   1259 // SecondaryAllocator can allocate anything, but is not efficient.
   1260 template <class PrimaryAllocator, class AllocatorCache,
   1261           class SecondaryAllocator>  // NOLINT
   1262 class CombinedAllocator {
   1263  public:
   1264   void InitCommon(bool may_return_null) {
   1265     primary_.Init();
   1266     atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
   1267   }
   1268 
   1269   void InitLinkerInitialized(bool may_return_null) {
   1270     secondary_.InitLinkerInitialized(may_return_null);
   1271     stats_.InitLinkerInitialized();
   1272     InitCommon(may_return_null);
   1273   }
   1274 
   1275   void Init(bool may_return_null) {
   1276     secondary_.Init(may_return_null);
   1277     stats_.Init();
   1278     InitCommon(may_return_null);
   1279   }
   1280 
   1281   void *Allocate(AllocatorCache *cache, uptr size, uptr alignment,
   1282                  bool cleared = false, bool check_rss_limit = false) {
   1283     // Returning 0 on malloc(0) may break a lot of code.
   1284     if (size == 0)
   1285       size = 1;
   1286     if (size + alignment < size)
   1287       return ReturnNullOrDie();
   1288     if (check_rss_limit && RssLimitIsExceeded())
   1289       return ReturnNullOrDie();
   1290     if (alignment > 8)
   1291       size = RoundUpTo(size, alignment);
   1292     void *res;
   1293     bool from_primary = primary_.CanAllocate(size, alignment);
   1294     if (from_primary)
   1295       res = cache->Allocate(&primary_, primary_.ClassID(size));
   1296     else
   1297       res = secondary_.Allocate(&stats_, size, alignment);
   1298     if (alignment > 8)
   1299       CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
   1300     if (cleared && res && from_primary)
   1301       internal_bzero_aligned16(res, RoundUpTo(size, 16));
   1302     return res;
   1303   }
   1304 
   1305   bool MayReturnNull() const {
   1306     return atomic_load(&may_return_null_, memory_order_acquire);
   1307   }
   1308 
   1309   void *ReturnNullOrDie() {
   1310     if (MayReturnNull())
   1311       return 0;
   1312     ReportAllocatorCannotReturnNull();
   1313   }
   1314 
   1315   void SetMayReturnNull(bool may_return_null) {
   1316     secondary_.SetMayReturnNull(may_return_null);
   1317     atomic_store(&may_return_null_, may_return_null, memory_order_release);
   1318   }
   1319 
   1320   bool RssLimitIsExceeded() {
   1321     return atomic_load(&rss_limit_is_exceeded_, memory_order_acquire);
   1322   }
   1323 
   1324   void SetRssLimitIsExceeded(bool rss_limit_is_exceeded) {
   1325     atomic_store(&rss_limit_is_exceeded_, rss_limit_is_exceeded,
   1326                  memory_order_release);
   1327   }
   1328 
   1329   void Deallocate(AllocatorCache *cache, void *p) {
   1330     if (!p) return;
   1331     if (primary_.PointerIsMine(p))
   1332       cache->Deallocate(&primary_, primary_.GetSizeClass(p), p);
   1333     else
   1334       secondary_.Deallocate(&stats_, p);
   1335   }
   1336 
   1337   void *Reallocate(AllocatorCache *cache, void *p, uptr new_size,
   1338                    uptr alignment) {
   1339     if (!p)
   1340       return Allocate(cache, new_size, alignment);
   1341     if (!new_size) {
   1342       Deallocate(cache, p);
   1343       return 0;
   1344     }
   1345     CHECK(PointerIsMine(p));
   1346     uptr old_size = GetActuallyAllocatedSize(p);
   1347     uptr memcpy_size = Min(new_size, old_size);
   1348     void *new_p = Allocate(cache, new_size, alignment);
   1349     if (new_p)
   1350       internal_memcpy(new_p, p, memcpy_size);
   1351     Deallocate(cache, p);
   1352     return new_p;
   1353   }
   1354 
   1355   bool PointerIsMine(void *p) {
   1356     if (primary_.PointerIsMine(p))
   1357       return true;
   1358     return secondary_.PointerIsMine(p);
   1359   }
   1360 
   1361   bool FromPrimary(void *p) {
   1362     return primary_.PointerIsMine(p);
   1363   }
   1364 
   1365   void *GetMetaData(const void *p) {
   1366     if (primary_.PointerIsMine(p))
   1367       return primary_.GetMetaData(p);
   1368     return secondary_.GetMetaData(p);
   1369   }
   1370 
   1371   void *GetBlockBegin(const void *p) {
   1372     if (primary_.PointerIsMine(p))
   1373       return primary_.GetBlockBegin(p);
   1374     return secondary_.GetBlockBegin(p);
   1375   }
   1376 
   1377   // This function does the same as GetBlockBegin, but is much faster.
   1378   // Must be called with the allocator locked.
   1379   void *GetBlockBeginFastLocked(void *p) {
   1380     if (primary_.PointerIsMine(p))
   1381       return primary_.GetBlockBegin(p);
   1382     return secondary_.GetBlockBeginFastLocked(p);
   1383   }
   1384 
   1385   uptr GetActuallyAllocatedSize(void *p) {
   1386     if (primary_.PointerIsMine(p))
   1387       return primary_.GetActuallyAllocatedSize(p);
   1388     return secondary_.GetActuallyAllocatedSize(p);
   1389   }
   1390 
   1391   uptr TotalMemoryUsed() {
   1392     return primary_.TotalMemoryUsed() + secondary_.TotalMemoryUsed();
   1393   }
   1394 
   1395   void TestOnlyUnmap() { primary_.TestOnlyUnmap(); }
   1396 
   1397   void InitCache(AllocatorCache *cache) {
   1398     cache->Init(&stats_);
   1399   }
   1400 
   1401   void DestroyCache(AllocatorCache *cache) {
   1402     cache->Destroy(&primary_, &stats_);
   1403   }
   1404 
   1405   void SwallowCache(AllocatorCache *cache) {
   1406     cache->Drain(&primary_);
   1407   }
   1408 
   1409   void GetStats(AllocatorStatCounters s) const {
   1410     stats_.Get(s);
   1411   }
   1412 
   1413   void PrintStats() {
   1414     primary_.PrintStats();
   1415     secondary_.PrintStats();
   1416   }
   1417 
   1418   // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
   1419   // introspection API.
   1420   void ForceLock() {
   1421     primary_.ForceLock();
   1422     secondary_.ForceLock();
   1423   }
   1424 
   1425   void ForceUnlock() {
   1426     secondary_.ForceUnlock();
   1427     primary_.ForceUnlock();
   1428   }
   1429 
   1430   // Iterate over all existing chunks.
   1431   // The allocator must be locked when calling this function.
   1432   void ForEachChunk(ForEachChunkCallback callback, void *arg) {
   1433     primary_.ForEachChunk(callback, arg);
   1434     secondary_.ForEachChunk(callback, arg);
   1435   }
   1436 
   1437  private:
   1438   PrimaryAllocator primary_;
   1439   SecondaryAllocator secondary_;
   1440   AllocatorGlobalStats stats_;
   1441   atomic_uint8_t may_return_null_;
   1442   atomic_uint8_t rss_limit_is_exceeded_;
   1443 };
   1444 
   1445 // Returns true if calloc(size, n) should return 0 due to overflow in size*n.
   1446 bool CallocShouldReturnNullDueToOverflow(uptr size, uptr n);
   1447 
   1448 }  // namespace __sanitizer
   1449 
   1450 #endif  // SANITIZER_ALLOCATOR_H
   1451 
   1452