Home | History | Annotate | Download | only in libmemunreachable
      1 /*
      2  * Copyright (C) 2016 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 // Header page:
     18 //
     19 // For minimum allocation size (8 bytes), bitmap can store used allocations for
     20 // up to 4032*8*8=258048, which is 256KiB minus the header page
     21 
     22 #include <assert.h>
     23 #include <stdlib.h>
     24 
     25 #include <sys/cdefs.h>
     26 #include <sys/mman.h>
     27 
     28 #include <cmath>
     29 #include <cstddef>
     30 #include <cstdint>
     31 #include <memory>
     32 #include <mutex>
     33 
     34 #include "android-base/macros.h"
     35 
     36 #include "anon_vma_naming.h"
     37 #include "Allocator.h"
     38 #include "LinkedList.h"
     39 
     40 // runtime interfaces used:
     41 // abort
     42 // assert - fprintf + mmap
     43 // mmap
     44 // munmap
     45 // prctl
     46 
     47 constexpr size_t const_log2(size_t n, size_t p = 0) {
     48   return (n <= 1) ? p : const_log2(n / 2, p + 1);
     49 }
     50 
     51 constexpr unsigned int div_round_up(unsigned int x, unsigned int y) {
     52   return (x + y - 1) / y;
     53 }
     54 
     55 static constexpr size_t kPageSize = 4096;
     56 static constexpr size_t kChunkSize = 256 * 1024;
     57 static constexpr size_t kUsableChunkSize = kChunkSize - kPageSize;
     58 static constexpr size_t kMaxBucketAllocationSize = kChunkSize / 4;
     59 static constexpr size_t kMinBucketAllocationSize = 8;
     60 static constexpr unsigned int kNumBuckets = const_log2(kMaxBucketAllocationSize)
     61     - const_log2(kMinBucketAllocationSize) + 1;
     62 static constexpr unsigned int kUsablePagesPerChunk = kUsableChunkSize
     63     / kPageSize;
     64 
     65 std::atomic<int> heap_count;
     66 
     67 class Chunk;
     68 
     69 class HeapImpl {
     70  public:
     71   HeapImpl();
     72   ~HeapImpl();
     73   void* operator new(std::size_t count) noexcept;
     74   void operator delete(void* ptr);
     75 
     76   void* Alloc(size_t size);
     77   void Free(void* ptr);
     78   bool Empty();
     79 
     80   void MoveToFullList(Chunk* chunk, int bucket_);
     81   void MoveToFreeList(Chunk* chunk, int bucket_);
     82 
     83  private:
     84   DISALLOW_COPY_AND_ASSIGN(HeapImpl);
     85 
     86   LinkedList<Chunk*> free_chunks_[kNumBuckets];
     87   LinkedList<Chunk*> full_chunks_[kNumBuckets];
     88 
     89   void MoveToList(Chunk* chunk, LinkedList<Chunk*>* head);
     90   void* MapAlloc(size_t size);
     91   void MapFree(void* ptr);
     92   void* AllocLocked(size_t size);
     93   void FreeLocked(void* ptr);
     94 
     95   struct MapAllocation {
     96     void *ptr;
     97     size_t size;
     98     MapAllocation* next;
     99   };
    100   MapAllocation* map_allocation_list_;
    101   std::mutex m_;
    102 };
    103 
    104 // Integer log 2, rounds down
    105 static inline unsigned int log2(size_t n) {
    106   return 8 * sizeof(unsigned long long) - __builtin_clzll(n) - 1;
    107 }
    108 
    109 static inline unsigned int size_to_bucket(size_t size) {
    110   if (size < kMinBucketAllocationSize)
    111     return kMinBucketAllocationSize;
    112   return log2(size - 1) + 1 - const_log2(kMinBucketAllocationSize);
    113 }
    114 
    115 static inline size_t bucket_to_size(unsigned int bucket) {
    116   return kMinBucketAllocationSize << bucket;
    117 }
    118 
    119 static void* MapAligned(size_t size, size_t align) {
    120   const int prot = PROT_READ | PROT_WRITE;
    121   const int flags = MAP_ANONYMOUS | MAP_PRIVATE;
    122 
    123   size = (size + kPageSize - 1) & ~(kPageSize - 1);
    124 
    125   // Over-allocate enough to align
    126   size_t map_size = size + align - kPageSize;
    127   if (map_size < size) {
    128     return nullptr;
    129   }
    130 
    131   void* ptr = mmap(NULL, map_size, prot, flags, -1, 0);
    132   if (ptr == MAP_FAILED) {
    133     return nullptr;
    134   }
    135 
    136   size_t aligned_size = map_size;
    137   void* aligned_ptr = ptr;
    138 
    139   std::align(align, size, aligned_ptr, aligned_size);
    140 
    141   // Trim beginning
    142   if (aligned_ptr != ptr) {
    143     ptrdiff_t extra = reinterpret_cast<uintptr_t>(aligned_ptr)
    144         - reinterpret_cast<uintptr_t>(ptr);
    145     munmap(ptr, extra);
    146     map_size -= extra;
    147     ptr = aligned_ptr;
    148   }
    149 
    150   // Trim end
    151   if (map_size != size) {
    152     assert(map_size > size);
    153     assert(ptr != NULL);
    154     munmap(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(ptr) + size),
    155         map_size - size);
    156   }
    157 
    158 #define PR_SET_VMA   0x53564d41
    159 #define PR_SET_VMA_ANON_NAME    0
    160   prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME,
    161       reinterpret_cast<uintptr_t>(ptr), size, "leak_detector_malloc");
    162 
    163   return ptr;
    164 }
    165 
    166 class Chunk {
    167  public:
    168   static void* operator new(std::size_t count) noexcept;
    169   static void operator delete(void* ptr);
    170   Chunk(HeapImpl* heap, int bucket);
    171   ~Chunk() {}
    172 
    173   void *Alloc();
    174   void Free(void* ptr);
    175   void Purge();
    176   bool Empty();
    177 
    178   static Chunk* ptr_to_chunk(void* ptr) {
    179     return reinterpret_cast<Chunk*>(reinterpret_cast<uintptr_t>(ptr)
    180         & ~(kChunkSize - 1));
    181   }
    182   static bool is_chunk(void* ptr) {
    183     return (reinterpret_cast<uintptr_t>(ptr) & (kChunkSize - 1)) != 0;
    184   }
    185 
    186   unsigned int free_count() {
    187     return free_count_;
    188   }
    189   HeapImpl* heap() {
    190     return heap_;
    191   }
    192   LinkedList<Chunk*> node_; // linked list sorted by minimum free count
    193 
    194  private:
    195   DISALLOW_COPY_AND_ASSIGN(Chunk);
    196   HeapImpl* heap_;
    197   unsigned int bucket_;
    198   unsigned int allocation_size_; // size of allocations in chunk, min 8 bytes
    199   unsigned int max_allocations_; // maximum number of allocations in the chunk
    200   unsigned int first_free_bitmap_; // index into bitmap for first non-full entry
    201   unsigned int free_count_; // number of available allocations
    202   unsigned int frees_since_purge_; // number of calls to Free since last Purge
    203 
    204   // bitmap of pages that have been dirtied
    205   uint32_t dirty_pages_[div_round_up(kUsablePagesPerChunk, 32)];
    206 
    207   // bitmap of free allocations.
    208   uint32_t free_bitmap_[kUsableChunkSize / kMinBucketAllocationSize / 32];
    209 
    210   char data_[0];
    211 
    212   unsigned int ptr_to_n(void* ptr) {
    213     ptrdiff_t offset = reinterpret_cast<uintptr_t>(ptr)
    214         - reinterpret_cast<uintptr_t>(data_);
    215     return offset / allocation_size_;
    216   }
    217   void* n_to_ptr(unsigned int n) {
    218     return data_ + n * allocation_size_;
    219   }
    220 };
    221 static_assert(sizeof(Chunk) <= kPageSize, "header must fit in page");
    222 
    223 // Override new operator on chunk to use mmap to allocate kChunkSize
    224 void* Chunk::operator new(std::size_t count __attribute__((unused))) noexcept {
    225   assert(count == sizeof(Chunk));
    226   void* mem = MapAligned(kChunkSize, kChunkSize);
    227   if (!mem) {
    228     abort(); //throw std::bad_alloc;
    229   }
    230 
    231   return mem;
    232 }
    233 
    234 // Override new operator on chunk to use mmap to allocate kChunkSize
    235 void Chunk::operator delete(void *ptr) {
    236   assert(reinterpret_cast<Chunk*>(ptr) == ptr_to_chunk(ptr));
    237   munmap(ptr, kChunkSize);
    238 }
    239 
    240 Chunk::Chunk(HeapImpl* heap, int bucket) :
    241     node_(this), heap_(heap), bucket_(bucket), allocation_size_(
    242         bucket_to_size(bucket)), max_allocations_(
    243         kUsableChunkSize / allocation_size_), first_free_bitmap_(0), free_count_(
    244         max_allocations_), frees_since_purge_(0) {
    245   memset(dirty_pages_, 0, sizeof(dirty_pages_));
    246   memset(free_bitmap_, 0xff, sizeof(free_bitmap_));
    247 }
    248 
    249 bool Chunk::Empty() {
    250   return free_count_ == max_allocations_;
    251 }
    252 
    253 void* Chunk::Alloc() {
    254   assert(free_count_ > 0);
    255 
    256   unsigned int i = first_free_bitmap_;
    257   while (free_bitmap_[i] == 0)
    258     i++;
    259   assert(i < arraysize(free_bitmap_));
    260   unsigned int bit = __builtin_ffs(free_bitmap_[i]) - 1;
    261   assert(free_bitmap_[i] & (1U << bit));
    262   free_bitmap_[i] &= ~(1U << bit);
    263   unsigned int n = i * 32 + bit;
    264   assert(n < max_allocations_);
    265 
    266   unsigned int page = n * allocation_size_ / kPageSize;
    267   assert(page / 32 < arraysize(dirty_pages_));
    268   dirty_pages_[page / 32] |= 1U << (page % 32);
    269 
    270   free_count_--;
    271   if (free_count_ == 0) {
    272     heap_->MoveToFullList(this, bucket_);
    273   }
    274 
    275   return n_to_ptr(n);
    276 }
    277 
    278 void Chunk::Free(void* ptr) {
    279   assert(is_chunk(ptr));
    280   assert(ptr_to_chunk(ptr) == this);
    281 
    282   unsigned int n = ptr_to_n(ptr);
    283   unsigned int i = n / 32;
    284   unsigned int bit = n % 32;
    285 
    286   assert(i < arraysize(free_bitmap_));
    287   assert(!(free_bitmap_[i] & (1U << bit)));
    288   free_bitmap_[i] |= 1U << bit;
    289   free_count_++;
    290 
    291   if (i < first_free_bitmap_) {
    292     first_free_bitmap_ = i;
    293   }
    294 
    295   if (free_count_ == 1) {
    296     heap_->MoveToFreeList(this, bucket_);
    297   } else {
    298     // TODO(ccross): move down free list if necessary
    299   }
    300 
    301   if (frees_since_purge_++ * allocation_size_ > 16 * kPageSize) {
    302     Purge();
    303   }
    304 }
    305 
    306 void Chunk::Purge() {
    307   frees_since_purge_ = 0;
    308 
    309   //unsigned int allocsPerPage = kPageSize / allocation_size_;
    310 }
    311 
    312 // Override new operator on HeapImpl to use mmap to allocate a page
    313 void* HeapImpl::operator new(std::size_t count __attribute__((unused)))
    314     noexcept {
    315   assert(count == sizeof(HeapImpl));
    316   void* mem = MapAligned(kPageSize, kPageSize);
    317   if (!mem) {
    318     abort(); //throw std::bad_alloc;
    319   }
    320 
    321   heap_count++;
    322   return mem;
    323 }
    324 
    325 void HeapImpl::operator delete(void *ptr) {
    326   munmap(ptr, kPageSize);
    327 }
    328 
    329 HeapImpl::HeapImpl() :
    330     free_chunks_(), full_chunks_(), map_allocation_list_(NULL) {
    331 }
    332 
    333 bool HeapImpl::Empty() {
    334   for (unsigned int i = 0; i < kNumBuckets; i++) {
    335     for (LinkedList<Chunk*> *it = free_chunks_[i].next(); it->data() != NULL; it = it->next()) {
    336       if (!it->data()->Empty()) {
    337         return false;
    338       }
    339     }
    340     for (LinkedList<Chunk*> *it = full_chunks_[i].next(); it->data() != NULL; it = it->next()) {
    341       if (!it->data()->Empty()) {
    342         return false;
    343       }
    344     }
    345   }
    346 
    347   return true;
    348 }
    349 
    350 HeapImpl::~HeapImpl() {
    351   for (unsigned int i = 0; i < kNumBuckets; i++) {
    352     while (!free_chunks_[i].empty()) {
    353       Chunk *chunk = free_chunks_[i].next()->data();
    354       chunk->node_.remove();
    355       delete chunk;
    356     }
    357     while (!full_chunks_[i].empty()) {
    358       Chunk *chunk = full_chunks_[i].next()->data();
    359       chunk->node_.remove();
    360       delete chunk;
    361     }
    362   }
    363 }
    364 
    365 void* HeapImpl::Alloc(size_t size) {
    366   std::lock_guard<std::mutex> lk(m_);
    367   return AllocLocked(size);
    368 }
    369 
    370 void* HeapImpl::AllocLocked(size_t size) {
    371   if (size > kMaxBucketAllocationSize) {
    372     return MapAlloc(size);
    373   }
    374   int bucket = size_to_bucket(size);
    375   if (free_chunks_[bucket].empty()) {
    376     Chunk *chunk = new Chunk(this, bucket);
    377     free_chunks_[bucket].insert(chunk->node_);
    378   }
    379   return free_chunks_[bucket].next()->data()->Alloc();
    380 }
    381 
    382 void HeapImpl::Free(void *ptr) {
    383   std::lock_guard<std::mutex> lk(m_);
    384   FreeLocked(ptr);
    385 }
    386 
    387 void HeapImpl::FreeLocked(void *ptr) {
    388   if (!Chunk::is_chunk(ptr)) {
    389     HeapImpl::MapFree(ptr);
    390   } else {
    391     Chunk* chunk = Chunk::ptr_to_chunk(ptr);
    392     assert(chunk->heap() == this);
    393     chunk->Free(ptr);
    394   }
    395 }
    396 
    397 void* HeapImpl::MapAlloc(size_t size) {
    398   size = (size + kPageSize - 1) & ~(kPageSize - 1);
    399 
    400   MapAllocation* allocation = reinterpret_cast<MapAllocation*>(AllocLocked(
    401       sizeof(MapAllocation)));
    402   void* ptr = MapAligned(size, kChunkSize);
    403   if (!ptr) {
    404     FreeLocked(allocation);
    405     abort(); //throw std::bad_alloc;
    406   }
    407   allocation->ptr = ptr;
    408   allocation->size = size;
    409   allocation->next = map_allocation_list_;
    410   map_allocation_list_ = allocation;
    411 
    412   return ptr;
    413 }
    414 
    415 void HeapImpl::MapFree(void *ptr) {
    416   MapAllocation **allocation = &map_allocation_list_;
    417   while (*allocation && (*allocation)->ptr != ptr)
    418     allocation = &(*allocation)->next;
    419 
    420   assert(*allocation != nullptr);
    421 
    422   munmap((*allocation)->ptr, (*allocation)->size);
    423   FreeLocked(*allocation);
    424 
    425   *allocation = (*allocation)->next;
    426 }
    427 
    428 void HeapImpl::MoveToFreeList(Chunk *chunk, int bucket) {
    429   MoveToList(chunk, &free_chunks_[bucket]);
    430 }
    431 
    432 void HeapImpl::MoveToFullList(Chunk *chunk, int bucket) {
    433   MoveToList(chunk, &full_chunks_[bucket]);
    434 }
    435 
    436 void HeapImpl::MoveToList(Chunk *chunk, LinkedList<Chunk*>* head) {
    437   // Remove from old list
    438   chunk->node_.remove();
    439 
    440   LinkedList<Chunk*> *node = head;
    441   // Insert into new list, sorted by lowest free count
    442   while (node->next() != head && node->data() != nullptr
    443       && node->data()->free_count() < chunk->free_count())
    444     node = node->next();
    445 
    446   node->insert(chunk->node_);
    447 }
    448 
    449 Heap::Heap() {
    450   // HeapImpl overloads the operator new in order to mmap itself instead of
    451   // allocating with new.
    452   // Can't use a shared_ptr to store the result because shared_ptr needs to
    453   // allocate, and Allocator<T> is still being constructed.
    454   impl_ = new HeapImpl();
    455   owns_impl_ = true;
    456 }
    457 
    458 Heap::~Heap() {
    459   if (owns_impl_) {
    460     delete impl_;
    461   }
    462 }
    463 
    464 void* Heap::allocate(size_t size) {
    465   return impl_->Alloc(size);
    466 }
    467 
    468 void Heap::deallocate(void* ptr) {
    469   impl_->Free(ptr);
    470 }
    471 
    472 void Heap::deallocate(HeapImpl*impl, void* ptr) {
    473   impl->Free(ptr);
    474 }
    475 
    476 bool Heap::empty() {
    477   return impl_->Empty();
    478 }
    479