Home | History | Annotate | Download | only in bionic
      1 /*
      2  * Copyright (C) 2015 The Android Open Source Project
      3  * All rights reserved.
      4  *
      5  * Redistribution and use in source and binary forms, with or without
      6  * modification, are permitted provided that the following conditions
      7  * are met:
      8  *  * Redistributions of source code must retain the above copyright
      9  *    notice, this list of conditions and the following disclaimer.
     10  *  * Redistributions in binary form must reproduce the above copyright
     11  *    notice, this list of conditions and the following disclaimer in
     12  *    the documentation and/or other materials provided with the
     13  *    distribution.
     14  *
     15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     16  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     17  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
     18  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
     19  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
     20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
     21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
     22  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
     23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
     24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
     25  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     26  * SUCH DAMAGE.
     27  */
     28 
     29 #include "private/bionic_allocator.h"
     30 
     31 #include <stdlib.h>
     32 #include <string.h>
     33 #include <sys/mman.h>
     34 #include <sys/param.h>
     35 #include <sys/prctl.h>
     36 #include <unistd.h>
     37 
     38 #include <new>
     39 
     40 #include <async_safe/log.h>
     41 #include <async_safe/CHECK.h>
     42 
     43 #include "private/bionic_macros.h"
     44 #include "private/bionic_page.h"
     45 
     46 //
     47 // BionicAllocator is a general purpose allocator designed to provide the same
     48 // functionality as the malloc/free/realloc libc functions.
     49 //
     50 // On alloc:
     51 // If size is >= 1k allocator proxies malloc call directly to mmap
     52 // If size < 1k allocator uses SmallObjectAllocator for the size
     53 // rounded up to the nearest power of two.
     54 //
     55 // On free:
     56 //
     57 // For a pointer allocated using proxy-to-mmap allocator unmaps
     58 // the memory.
     59 //
     60 // For a pointer allocated using SmallObjectAllocator it adds
     61 // the block to free_blocks_list in the corresponding page. If the number of
     62 // free pages reaches 2, SmallObjectAllocator munmaps one of the pages keeping
     63 // the other one in reserve.
     64 
     65 // Memory management for large objects is fairly straightforward, but for small
     66 // objects it is more complicated.  If you are changing this code, one simple
     67 // way to evaluate the memory usage change is by running 'dd' and examine the
     68 // memory usage by 'showmap $(pidof dd)'.  'dd' is nice in that:
     69 //   1. It links in quite a few libraries, so you get some linker memory use.
     70 //   2. When run with no arguments, it sits waiting for input, so it is easy to
     71 //      examine its memory usage with showmap.
     72 //   3. Since it does nothing while waiting for input, the memory usage is
     73 //      determinisitic.
     74 
     75 static const char kSignature[4] = {'L', 'M', 'A', 1};
     76 
     77 static const size_t kSmallObjectMaxSize = 1 << kSmallObjectMaxSizeLog2;
     78 
     79 // This type is used for large allocations (with size >1k)
     80 static const uint32_t kLargeObject = 111;
     81 
     82 // Allocated pointers must be at least 16-byte aligned.  Round up the size of
     83 // page_info to multiple of 16.
     84 static constexpr size_t kPageInfoSize = __BIONIC_ALIGN(sizeof(page_info), 16);
     85 
     86 static inline uint16_t log2(size_t number) {
     87   uint16_t result = 0;
     88   number--;
     89 
     90   while (number != 0) {
     91     result++;
     92     number >>= 1;
     93   }
     94 
     95   return result;
     96 }
     97 
     98 BionicSmallObjectAllocator::BionicSmallObjectAllocator(uint32_t type,
     99                                                        size_t block_size)
    100     : type_(type),
    101       block_size_(block_size),
    102       blocks_per_page_((PAGE_SIZE - sizeof(small_object_page_info)) /
    103                        block_size),
    104       free_pages_cnt_(0),
    105       page_list_(nullptr) {}
    106 
    107 void* BionicSmallObjectAllocator::alloc() {
    108   CHECK(block_size_ != 0);
    109 
    110   if (page_list_ == nullptr) {
    111     alloc_page();
    112   }
    113 
    114   // Fully allocated pages are de-managed and removed from the page list, so
    115   // every page from the page list must be useable.  Let's just take the first
    116   // one.
    117   small_object_page_info* page = page_list_;
    118   CHECK(page->free_block_list != nullptr);
    119 
    120   small_object_block_record* const block_record = page->free_block_list;
    121   if (block_record->free_blocks_cnt > 1) {
    122     small_object_block_record* next_free =
    123         reinterpret_cast<small_object_block_record*>(
    124             reinterpret_cast<uint8_t*>(block_record) + block_size_);
    125     next_free->next = block_record->next;
    126     next_free->free_blocks_cnt = block_record->free_blocks_cnt - 1;
    127     page->free_block_list = next_free;
    128   } else {
    129     page->free_block_list = block_record->next;
    130   }
    131 
    132   if (page->free_blocks_cnt == blocks_per_page_) {
    133     free_pages_cnt_--;
    134   }
    135 
    136   page->free_blocks_cnt--;
    137 
    138   memset(block_record, 0, block_size_);
    139 
    140   if (page->free_blocks_cnt == 0) {
    141     // De-manage fully allocated pages.  These pages will be managed again if
    142     // a block is freed.
    143     remove_from_page_list(page);
    144   }
    145 
    146   return block_record;
    147 }
    148 
    149 void BionicSmallObjectAllocator::free_page(small_object_page_info* page) {
    150   CHECK(page->free_blocks_cnt == blocks_per_page_);
    151   if (page->prev_page) {
    152     page->prev_page->next_page = page->next_page;
    153   }
    154   if (page->next_page) {
    155     page->next_page->prev_page = page->prev_page;
    156   }
    157   if (page_list_ == page) {
    158     page_list_ = page->next_page;
    159   }
    160   munmap(page, PAGE_SIZE);
    161   free_pages_cnt_--;
    162 }
    163 
    164 void BionicSmallObjectAllocator::free(void* ptr) {
    165   small_object_page_info* const page =
    166       reinterpret_cast<small_object_page_info*>(
    167           PAGE_START(reinterpret_cast<uintptr_t>(ptr)));
    168 
    169   if (reinterpret_cast<uintptr_t>(ptr) % block_size_ != 0) {
    170     async_safe_fatal("invalid pointer: %p (block_size=%zd)", ptr, block_size_);
    171   }
    172 
    173   memset(ptr, 0, block_size_);
    174   small_object_block_record* const block_record =
    175       reinterpret_cast<small_object_block_record*>(ptr);
    176 
    177   block_record->next = page->free_block_list;
    178   block_record->free_blocks_cnt = 1;
    179 
    180   page->free_block_list = block_record;
    181   page->free_blocks_cnt++;
    182 
    183   if (page->free_blocks_cnt == blocks_per_page_) {
    184     if (++free_pages_cnt_ > 1) {
    185       // if we already have a free page - unmap this one.
    186       free_page(page);
    187     }
    188   } else if (page->free_blocks_cnt == 1) {
    189     // We just freed from a full page.  Add this page back to the list.
    190     add_to_page_list(page);
    191   }
    192 }
    193 
    194 void BionicSmallObjectAllocator::alloc_page() {
    195   void* const map_ptr = mmap(nullptr, PAGE_SIZE, PROT_READ | PROT_WRITE,
    196                              MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
    197   if (map_ptr == MAP_FAILED) {
    198     async_safe_fatal("mmap failed: %s", strerror(errno));
    199   }
    200 
    201   prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, map_ptr, PAGE_SIZE,
    202         "bionic_alloc_small_objects");
    203 
    204   small_object_page_info* const page =
    205       reinterpret_cast<small_object_page_info*>(map_ptr);
    206   memcpy(page->info.signature, kSignature, sizeof(kSignature));
    207   page->info.type = type_;
    208   page->info.allocator_addr = this;
    209 
    210   page->free_blocks_cnt = blocks_per_page_;
    211 
    212   // Align the first block to block_size_.
    213   const uintptr_t first_block_addr =
    214       __BIONIC_ALIGN(reinterpret_cast<uintptr_t>(page + 1), block_size_);
    215   small_object_block_record* const first_block =
    216       reinterpret_cast<small_object_block_record*>(first_block_addr);
    217 
    218   first_block->next = nullptr;
    219   first_block->free_blocks_cnt = blocks_per_page_;
    220 
    221   page->free_block_list = first_block;
    222 
    223   add_to_page_list(page);
    224 
    225   free_pages_cnt_++;
    226 }
    227 
    228 void BionicSmallObjectAllocator::add_to_page_list(small_object_page_info* page) {
    229   page->next_page = page_list_;
    230   page->prev_page = nullptr;
    231   if (page_list_) {
    232     page_list_->prev_page = page;
    233   }
    234   page_list_ = page;
    235 }
    236 
    237 void BionicSmallObjectAllocator::remove_from_page_list(
    238     small_object_page_info* page) {
    239   if (page->prev_page) {
    240     page->prev_page->next_page = page->next_page;
    241   }
    242   if (page->next_page) {
    243     page->next_page->prev_page = page->prev_page;
    244   }
    245   if (page_list_ == page) {
    246     page_list_ = page->next_page;
    247   }
    248   page->prev_page = nullptr;
    249   page->next_page = nullptr;
    250 }
    251 
    252 void BionicAllocator::initialize_allocators() {
    253   if (allocators_ != nullptr) {
    254     return;
    255   }
    256 
    257   BionicSmallObjectAllocator* allocators =
    258       reinterpret_cast<BionicSmallObjectAllocator*>(allocators_buf_);
    259 
    260   for (size_t i = 0; i < kSmallObjectAllocatorsCount; ++i) {
    261     uint32_t type = i + kSmallObjectMinSizeLog2;
    262     new (allocators + i) BionicSmallObjectAllocator(type, 1 << type);
    263   }
    264 
    265   allocators_ = allocators;
    266 }
    267 
    268 void* BionicAllocator::alloc_mmap(size_t align, size_t size) {
    269   size_t header_size = __BIONIC_ALIGN(kPageInfoSize, align);
    270   size_t allocated_size;
    271   if (__builtin_add_overflow(header_size, size, &allocated_size) ||
    272       PAGE_END(allocated_size) < allocated_size) {
    273     async_safe_fatal("overflow trying to alloc %zu bytes", size);
    274   }
    275   allocated_size = PAGE_END(allocated_size);
    276   void* map_ptr = mmap(nullptr, allocated_size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS,
    277                        -1, 0);
    278 
    279   if (map_ptr == MAP_FAILED) {
    280     async_safe_fatal("mmap failed: %s", strerror(errno));
    281   }
    282 
    283   prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, map_ptr, allocated_size, "bionic_alloc_lob");
    284 
    285   void* result = static_cast<char*>(map_ptr) + header_size;
    286   page_info* info = get_page_info_unchecked(result);
    287   memcpy(info->signature, kSignature, sizeof(kSignature));
    288   info->type = kLargeObject;
    289   info->allocated_size = allocated_size;
    290 
    291   return result;
    292 }
    293 
    294 
    295 inline void* BionicAllocator::alloc_impl(size_t align, size_t size) {
    296   if (size > kSmallObjectMaxSize) {
    297     return alloc_mmap(align, size);
    298   }
    299 
    300   uint16_t log2_size = log2(size);
    301 
    302   if (log2_size < kSmallObjectMinSizeLog2) {
    303     log2_size = kSmallObjectMinSizeLog2;
    304   }
    305 
    306   return get_small_object_allocator(log2_size)->alloc();
    307 }
    308 
    309 void* BionicAllocator::alloc(size_t size) {
    310   // treat alloc(0) as alloc(1)
    311   if (size == 0) {
    312     size = 1;
    313   }
    314   return alloc_impl(16, size);
    315 }
    316 
    317 void* BionicAllocator::memalign(size_t align, size_t size) {
    318   // The Bionic allocator only supports alignment up to one page, which is good
    319   // enough for ELF TLS.
    320   align = MIN(align, PAGE_SIZE);
    321   align = MAX(align, 16);
    322   if (!powerof2(align)) {
    323     align = BIONIC_ROUND_UP_POWER_OF_2(align);
    324   }
    325   size = MAX(size, align);
    326   return alloc_impl(align, size);
    327 }
    328 
    329 inline page_info* BionicAllocator::get_page_info_unchecked(void* ptr) {
    330   uintptr_t header_page = PAGE_START(reinterpret_cast<size_t>(ptr) - kPageInfoSize);
    331   return reinterpret_cast<page_info*>(header_page);
    332 }
    333 
    334 inline page_info* BionicAllocator::get_page_info(void* ptr) {
    335   page_info* info = get_page_info_unchecked(ptr);
    336   if (memcmp(info->signature, kSignature, sizeof(kSignature)) != 0) {
    337     async_safe_fatal("invalid pointer %p (page signature mismatch)", ptr);
    338   }
    339 
    340   return info;
    341 }
    342 
    343 void* BionicAllocator::realloc(void* ptr, size_t size) {
    344   if (ptr == nullptr) {
    345     return alloc(size);
    346   }
    347 
    348   if (size == 0) {
    349     free(ptr);
    350     return nullptr;
    351   }
    352 
    353   page_info* info = get_page_info(ptr);
    354 
    355   size_t old_size = 0;
    356 
    357   if (info->type == kLargeObject) {
    358     old_size = info->allocated_size - (static_cast<char*>(ptr) - reinterpret_cast<char*>(info));
    359   } else {
    360     BionicSmallObjectAllocator* allocator = get_small_object_allocator(info->type);
    361     if (allocator != info->allocator_addr) {
    362       async_safe_fatal("invalid pointer %p (page signature mismatch)", ptr);
    363     }
    364 
    365     old_size = allocator->get_block_size();
    366   }
    367 
    368   if (old_size < size) {
    369     void *result = alloc(size);
    370     memcpy(result, ptr, old_size);
    371     free(ptr);
    372     return result;
    373   }
    374 
    375   return ptr;
    376 }
    377 
    378 void BionicAllocator::free(void* ptr) {
    379   if (ptr == nullptr) {
    380     return;
    381   }
    382 
    383   page_info* info = get_page_info(ptr);
    384 
    385   if (info->type == kLargeObject) {
    386     munmap(info, info->allocated_size);
    387   } else {
    388     BionicSmallObjectAllocator* allocator = get_small_object_allocator(info->type);
    389     if (allocator != info->allocator_addr) {
    390       async_safe_fatal("invalid pointer %p (invalid allocator address for the page)", ptr);
    391     }
    392 
    393     allocator->free(ptr);
    394   }
    395 }
    396 
    397 BionicSmallObjectAllocator* BionicAllocator::get_small_object_allocator(uint32_t type) {
    398   if (type < kSmallObjectMinSizeLog2 || type > kSmallObjectMaxSizeLog2) {
    399     async_safe_fatal("invalid type: %u", type);
    400   }
    401 
    402   initialize_allocators();
    403   return &allocators_[type - kSmallObjectMinSizeLog2];
    404 }
    405