Home | History | Annotate | Download | only in libmemunreachable
      1 /*
      2  * Copyright (C) 2016 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 // Header page:
     18 //
     19 // For minimum allocation size (8 bytes), bitmap can store used allocations for
     20 // up to 4032*8*8=258048, which is 256KiB minus the header page
     21 
     22 #include <assert.h>
     23 #include <stdlib.h>
     24 
     25 #include <sys/cdefs.h>
     26 #include <sys/mman.h>
     27 
     28 #include <cmath>
     29 #include <cstddef>
     30 #include <cstdint>
     31 #include <memory>
     32 #include <mutex>
     33 
     34 #include "android-base/macros.h"
     35 
     36 #include "anon_vma_naming.h"
     37 #include "Allocator.h"
     38 #include "LinkedList.h"
     39 
     40 // runtime interfaces used:
     41 // abort
     42 // assert - fprintf + mmap
     43 // mmap
     44 // munmap
     45 // prctl
     46 
     47 constexpr size_t const_log2(size_t n, size_t p = 0) {
     48   return (n <= 1) ? p : const_log2(n / 2, p + 1);
     49 }
     50 
     51 constexpr unsigned int div_round_up(unsigned int x, unsigned int y) {
     52   return (x + y - 1) / y;
     53 }
     54 
     55 #define ARRAY_SIZE(x) (sizeof(x)/sizeof((x)[0]))
     56 
     57 static constexpr size_t kPageSize = 4096;
     58 static constexpr size_t kChunkSize = 256 * 1024;
     59 static constexpr size_t kUsableChunkSize = kChunkSize - kPageSize;
     60 static constexpr size_t kMaxBucketAllocationSize = kChunkSize / 4;
     61 static constexpr size_t kMinBucketAllocationSize = 8;
     62 static constexpr unsigned int kNumBuckets = const_log2(kMaxBucketAllocationSize)
     63     - const_log2(kMinBucketAllocationSize) + 1;
     64 static constexpr unsigned int kUsablePagesPerChunk = kUsableChunkSize
     65     / kPageSize;
     66 
     67 std::atomic<int> heap_count;
     68 
     69 class Chunk;
     70 
     71 class HeapImpl {
     72  public:
     73   HeapImpl();
     74   ~HeapImpl();
     75   void* operator new(std::size_t count) noexcept;
     76   void operator delete(void* ptr);
     77 
     78   void* Alloc(size_t size);
     79   void Free(void* ptr);
     80   bool Empty();
     81 
     82   void MoveToFullList(Chunk* chunk, int bucket_);
     83   void MoveToFreeList(Chunk* chunk, int bucket_);
     84 
     85  private:
     86   DISALLOW_COPY_AND_ASSIGN(HeapImpl);
     87 
     88   LinkedList<Chunk*> free_chunks_[kNumBuckets];
     89   LinkedList<Chunk*> full_chunks_[kNumBuckets];
     90 
     91   void MoveToList(Chunk* chunk, LinkedList<Chunk*>* head);
     92   void* MapAlloc(size_t size);
     93   void MapFree(void* ptr);
     94   void* AllocLocked(size_t size);
     95   void FreeLocked(void* ptr);
     96 
     97   struct MapAllocation {
     98     void *ptr;
     99     size_t size;
    100     MapAllocation* next;
    101   };
    102   MapAllocation* map_allocation_list_;
    103   std::mutex m_;
    104 };
    105 
    106 // Integer log 2, rounds down
    107 static inline unsigned int log2(size_t n) {
    108   return 8 * sizeof(unsigned long long) - __builtin_clzll(n) - 1;
    109 }
    110 
    111 static inline unsigned int size_to_bucket(size_t size) {
    112   if (size < kMinBucketAllocationSize)
    113     return kMinBucketAllocationSize;
    114   return log2(size - 1) + 1 - const_log2(kMinBucketAllocationSize);
    115 }
    116 
    117 static inline size_t bucket_to_size(unsigned int bucket) {
    118   return kMinBucketAllocationSize << bucket;
    119 }
    120 
    121 static void* MapAligned(size_t size, size_t align) {
    122   const int prot = PROT_READ | PROT_WRITE;
    123   const int flags = MAP_ANONYMOUS | MAP_PRIVATE;
    124 
    125   size = (size + kPageSize - 1) & ~(kPageSize - 1);
    126 
    127   // Over-allocate enough to align
    128   size_t map_size = size + align - kPageSize;
    129   if (map_size < size) {
    130     return nullptr;
    131   }
    132 
    133   void* ptr = mmap(NULL, map_size, prot, flags, -1, 0);
    134   if (ptr == MAP_FAILED) {
    135     return nullptr;
    136   }
    137 
    138   size_t aligned_size = map_size;
    139   void* aligned_ptr = ptr;
    140 
    141   std::align(align, size, aligned_ptr, aligned_size);
    142 
    143   // Trim beginning
    144   if (aligned_ptr != ptr) {
    145     ptrdiff_t extra = reinterpret_cast<uintptr_t>(aligned_ptr)
    146         - reinterpret_cast<uintptr_t>(ptr);
    147     munmap(ptr, extra);
    148     map_size -= extra;
    149     ptr = aligned_ptr;
    150   }
    151 
    152   // Trim end
    153   if (map_size != size) {
    154     assert(map_size > size);
    155     assert(ptr != NULL);
    156     munmap(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(ptr) + size),
    157         map_size - size);
    158   }
    159 
    160 #define PR_SET_VMA   0x53564d41
    161 #define PR_SET_VMA_ANON_NAME    0
    162   prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME,
    163       reinterpret_cast<uintptr_t>(ptr), size, "leak_detector_malloc");
    164 
    165   return ptr;
    166 }
    167 
    168 class Chunk {
    169  public:
    170   static void* operator new(std::size_t count) noexcept;
    171   static void operator delete(void* ptr);
    172   Chunk(HeapImpl* heap, int bucket);
    173   ~Chunk() {}
    174 
    175   void *Alloc();
    176   void Free(void* ptr);
    177   void Purge();
    178   bool Empty();
    179 
    180   static Chunk* ptr_to_chunk(void* ptr) {
    181     return reinterpret_cast<Chunk*>(reinterpret_cast<uintptr_t>(ptr)
    182         & ~(kChunkSize - 1));
    183   }
    184   static bool is_chunk(void* ptr) {
    185     return (reinterpret_cast<uintptr_t>(ptr) & (kChunkSize - 1)) != 0;
    186   }
    187 
    188   unsigned int free_count() {
    189     return free_count_;
    190   }
    191   HeapImpl* heap() {
    192     return heap_;
    193   }
    194   LinkedList<Chunk*> node_; // linked list sorted by minimum free count
    195 
    196  private:
    197   DISALLOW_COPY_AND_ASSIGN(Chunk);
    198   HeapImpl* heap_;
    199   unsigned int bucket_;
    200   unsigned int allocation_size_; // size of allocations in chunk, min 8 bytes
    201   unsigned int max_allocations_; // maximum number of allocations in the chunk
    202   unsigned int first_free_bitmap_; // index into bitmap for first non-full entry
    203   unsigned int free_count_; // number of available allocations
    204   unsigned int frees_since_purge_; // number of calls to Free since last Purge
    205 
    206   // bitmap of pages that have been dirtied
    207   uint32_t dirty_pages_[div_round_up(kUsablePagesPerChunk, 32)];
    208 
    209   // bitmap of free allocations.
    210   uint32_t free_bitmap_[kUsableChunkSize / kMinBucketAllocationSize / 32];
    211 
    212   char data_[0];
    213 
    214   unsigned int ptr_to_n(void* ptr) {
    215     ptrdiff_t offset = reinterpret_cast<uintptr_t>(ptr)
    216         - reinterpret_cast<uintptr_t>(data_);
    217     return offset / allocation_size_;
    218   }
    219   void* n_to_ptr(unsigned int n) {
    220     return data_ + n * allocation_size_;
    221   }
    222 };
    223 static_assert(sizeof(Chunk) <= kPageSize, "header must fit in page");
    224 
    225 // Override new operator on chunk to use mmap to allocate kChunkSize
    226 void* Chunk::operator new(std::size_t count __attribute__((unused))) noexcept {
    227   assert(count == sizeof(Chunk));
    228   void* mem = MapAligned(kChunkSize, kChunkSize);
    229   if (!mem) {
    230     abort(); //throw std::bad_alloc;
    231   }
    232 
    233   return mem;
    234 }
    235 
    236 // Override new operator on chunk to use mmap to allocate kChunkSize
    237 void Chunk::operator delete(void *ptr) {
    238   assert(reinterpret_cast<Chunk*>(ptr) == ptr_to_chunk(ptr));
    239   munmap(ptr, kChunkSize);
    240 }
    241 
    242 Chunk::Chunk(HeapImpl* heap, int bucket) :
    243     node_(this), heap_(heap), bucket_(bucket), allocation_size_(
    244         bucket_to_size(bucket)), max_allocations_(
    245         kUsableChunkSize / allocation_size_), first_free_bitmap_(0), free_count_(
    246         max_allocations_), frees_since_purge_(0) {
    247   memset(dirty_pages_, 0, sizeof(dirty_pages_));
    248   memset(free_bitmap_, 0xff, sizeof(free_bitmap_));
    249 }
    250 
    251 bool Chunk::Empty() {
    252   return free_count_ == max_allocations_;
    253 }
    254 
    255 void* Chunk::Alloc() {
    256   assert(free_count_ > 0);
    257 
    258   unsigned int i = first_free_bitmap_;
    259   while (free_bitmap_[i] == 0)
    260     i++;
    261   assert(i < ARRAY_SIZE(free_bitmap_));
    262   unsigned int bit = __builtin_ffs(free_bitmap_[i]) - 1;
    263   assert(free_bitmap_[i] & (1U << bit));
    264   free_bitmap_[i] &= ~(1U << bit);
    265   unsigned int n = i * 32 + bit;
    266   assert(n < max_allocations_);
    267 
    268   unsigned int page = n * allocation_size_ / kPageSize;
    269   assert(page / 32 < ARRAY_SIZE(dirty_pages_));
    270   dirty_pages_[page / 32] |= 1U << (page % 32);
    271 
    272   free_count_--;
    273   if (free_count_ == 0) {
    274     heap_->MoveToFullList(this, bucket_);
    275   }
    276 
    277   return n_to_ptr(n);
    278 }
    279 
    280 void Chunk::Free(void* ptr) {
    281   assert(is_chunk(ptr));
    282   assert(ptr_to_chunk(ptr) == this);
    283 
    284   unsigned int n = ptr_to_n(ptr);
    285   unsigned int i = n / 32;
    286   unsigned int bit = n % 32;
    287 
    288   assert(i < ARRAY_SIZE(free_bitmap_));
    289   assert(!(free_bitmap_[i] & (1U << bit)));
    290   free_bitmap_[i] |= 1U << bit;
    291   free_count_++;
    292 
    293   if (i < first_free_bitmap_) {
    294     first_free_bitmap_ = i;
    295   }
    296 
    297   if (free_count_ == 1) {
    298     heap_->MoveToFreeList(this, bucket_);
    299   } else {
    300     // TODO(ccross): move down free list if necessary
    301   }
    302 
    303   if (frees_since_purge_++ * allocation_size_ > 16 * kPageSize) {
    304     Purge();
    305   }
    306 }
    307 
    308 void Chunk::Purge() {
    309   frees_since_purge_ = 0;
    310 
    311   //unsigned int allocsPerPage = kPageSize / allocation_size_;
    312 }
    313 
    314 // Override new operator on HeapImpl to use mmap to allocate a page
    315 void* HeapImpl::operator new(std::size_t count __attribute__((unused)))
    316     noexcept {
    317   assert(count == sizeof(HeapImpl));
    318   void* mem = MapAligned(kPageSize, kPageSize);
    319   if (!mem) {
    320     abort(); //throw std::bad_alloc;
    321   }
    322 
    323   heap_count++;
    324   return mem;
    325 }
    326 
    327 void HeapImpl::operator delete(void *ptr) {
    328   munmap(ptr, kPageSize);
    329 }
    330 
    331 HeapImpl::HeapImpl() :
    332     free_chunks_(), full_chunks_(), map_allocation_list_(NULL) {
    333 }
    334 
    335 bool HeapImpl::Empty() {
    336   for (unsigned int i = 0; i < kNumBuckets; i++) {
    337     for (LinkedList<Chunk*> *it = free_chunks_[i].next(); it->data() != NULL; it = it->next()) {
    338       if (!it->data()->Empty()) {
    339         return false;
    340       }
    341     }
    342     for (LinkedList<Chunk*> *it = full_chunks_[i].next(); it->data() != NULL; it = it->next()) {
    343       if (!it->data()->Empty()) {
    344         return false;
    345       }
    346     }
    347   }
    348 
    349   return true;
    350 }
    351 
    352 HeapImpl::~HeapImpl() {
    353   for (unsigned int i = 0; i < kNumBuckets; i++) {
    354     while (!free_chunks_[i].empty()) {
    355       Chunk *chunk = free_chunks_[i].next()->data();
    356       chunk->node_.remove();
    357       delete chunk;
    358     }
    359     while (!full_chunks_[i].empty()) {
    360       Chunk *chunk = full_chunks_[i].next()->data();
    361       chunk->node_.remove();
    362       delete chunk;
    363     }
    364   }
    365 }
    366 
    367 void* HeapImpl::Alloc(size_t size) {
    368   std::lock_guard<std::mutex> lk(m_);
    369   return AllocLocked(size);
    370 }
    371 
    372 void* HeapImpl::AllocLocked(size_t size) {
    373   if (size > kMaxBucketAllocationSize) {
    374     return MapAlloc(size);
    375   }
    376   int bucket = size_to_bucket(size);
    377   if (free_chunks_[bucket].empty()) {
    378     Chunk *chunk = new Chunk(this, bucket);
    379     free_chunks_[bucket].insert(chunk->node_);
    380   }
    381   return free_chunks_[bucket].next()->data()->Alloc();
    382 }
    383 
    384 void HeapImpl::Free(void *ptr) {
    385   std::lock_guard<std::mutex> lk(m_);
    386   FreeLocked(ptr);
    387 }
    388 
    389 void HeapImpl::FreeLocked(void *ptr) {
    390   if (!Chunk::is_chunk(ptr)) {
    391     HeapImpl::MapFree(ptr);
    392   } else {
    393     Chunk* chunk = Chunk::ptr_to_chunk(ptr);
    394     assert(chunk->heap() == this);
    395     chunk->Free(ptr);
    396   }
    397 }
    398 
    399 void* HeapImpl::MapAlloc(size_t size) {
    400   size = (size + kPageSize - 1) & ~(kPageSize - 1);
    401 
    402   MapAllocation* allocation = reinterpret_cast<MapAllocation*>(AllocLocked(
    403       sizeof(MapAllocation)));
    404   void* ptr = MapAligned(size, kChunkSize);
    405   if (!ptr) {
    406     FreeLocked(allocation);
    407     abort(); //throw std::bad_alloc;
    408   }
    409   allocation->ptr = ptr;
    410   allocation->size = size;
    411   allocation->next = map_allocation_list_;
    412   map_allocation_list_ = allocation;
    413 
    414   return ptr;
    415 }
    416 
    417 void HeapImpl::MapFree(void *ptr) {
    418   MapAllocation **allocation = &map_allocation_list_;
    419   while (*allocation && (*allocation)->ptr != ptr)
    420     allocation = &(*allocation)->next;
    421 
    422   assert(*allocation != nullptr);
    423 
    424   munmap((*allocation)->ptr, (*allocation)->size);
    425   FreeLocked(*allocation);
    426 
    427   *allocation = (*allocation)->next;
    428 }
    429 
    430 void HeapImpl::MoveToFreeList(Chunk *chunk, int bucket) {
    431   MoveToList(chunk, &free_chunks_[bucket]);
    432 }
    433 
    434 void HeapImpl::MoveToFullList(Chunk *chunk, int bucket) {
    435   MoveToList(chunk, &full_chunks_[bucket]);
    436 }
    437 
    438 void HeapImpl::MoveToList(Chunk *chunk, LinkedList<Chunk*>* head) {
    439   // Remove from old list
    440   chunk->node_.remove();
    441 
    442   LinkedList<Chunk*> *node = head;
    443   // Insert into new list, sorted by lowest free count
    444   while (node->next() != head && node->data() != nullptr
    445       && node->data()->free_count() < chunk->free_count())
    446     node = node->next();
    447 
    448   node->insert(chunk->node_);
    449 }
    450 
    451 Heap::Heap() {
    452   // HeapImpl overloads the operator new in order to mmap itself instead of
    453   // allocating with new.
    454   // Can't use a shared_ptr to store the result because shared_ptr needs to
    455   // allocate, and Allocator<T> is still being constructed.
    456   impl_ = new HeapImpl();
    457   owns_impl_ = true;
    458 }
    459 
    460 Heap::~Heap() {
    461   if (owns_impl_) {
    462     delete impl_;
    463   }
    464 }
    465 
    466 void* Heap::allocate(size_t size) {
    467   return impl_->Alloc(size);
    468 }
    469 
    470 void Heap::deallocate(void* ptr) {
    471   impl_->Free(ptr);
    472 }
    473 
    474 void Heap::deallocate(HeapImpl*impl, void* ptr) {
    475   impl->Free(ptr);
    476 }
    477 
    478 bool Heap::empty() {
    479   return impl_->Empty();
    480 }
    481