Home | History | Annotate | Download | only in linker
      1 /*
      2  * Copyright (C) 2015 The Android Open Source Project
      3  * All rights reserved.
      4  *
      5  * Redistribution and use in source and binary forms, with or without
      6  * modification, are permitted provided that the following conditions
      7  * are met:
      8  *  * Redistributions of source code must retain the above copyright
      9  *    notice, this list of conditions and the following disclaimer.
     10  *  * Redistributions in binary form must reproduce the above copyright
     11  *    notice, this list of conditions and the following disclaimer in
     12  *    the documentation and/or other materials provided with the
     13  *    distribution.
     14  *
     15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     16  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     17  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
     18  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
     19  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
     20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
     21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
     22  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
     23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
     24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
     25  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     26  * SUCH DAMAGE.
     27  */
     28 
     29 #include "linker_allocator.h"
     30 #include "linker_debug.h"
     31 #include "linker.h"
     32 
     33 #include <algorithm>
     34 #include <vector>
     35 
     36 #include <stdlib.h>
     37 #include <sys/mman.h>
     38 #include <unistd.h>
     39 
     40 #include <async_safe/log.h>
     41 
     42 #include "private/bionic_prctl.h"
     43 
     44 //
     45 // LinkerMemeoryAllocator is general purpose allocator
     46 // designed to provide the same functionality as the malloc/free/realloc
     47 // libc functions.
     48 //
     49 // On alloc:
     50 // If size is >= 1k allocator proxies malloc call directly to mmap
     51 // If size < 1k allocator uses SmallObjectAllocator for the size
     52 // rounded up to the nearest power of two.
     53 //
     54 // On free:
     55 //
     56 // For a pointer allocated using proxy-to-mmap allocator unmaps
     57 // the memory.
     58 //
     59 // For a pointer allocated using SmallObjectAllocator it adds
     60 // the block to free_blocks_list_. If the number of free pages reaches 2,
     61 // SmallObjectAllocator munmaps one of the pages keeping the other one
     62 // in reserve.
     63 
     64 static const char kSignature[4] = {'L', 'M', 'A', 1};
     65 
     66 static const size_t kSmallObjectMaxSize = 1 << kSmallObjectMaxSizeLog2;
     67 
     68 // This type is used for large allocations (with size >1k)
     69 static const uint32_t kLargeObject = 111;
     70 
     71 bool operator<(const small_object_page_record& one, const small_object_page_record& two) {
     72   return one.page_addr < two.page_addr;
     73 }
     74 
     75 static inline uint16_t log2(size_t number) {
     76   uint16_t result = 0;
     77   number--;
     78 
     79   while (number != 0) {
     80     result++;
     81     number >>= 1;
     82   }
     83 
     84   return result;
     85 }
     86 
     87 LinkerSmallObjectAllocator::LinkerSmallObjectAllocator(uint32_t type, size_t block_size)
     88     : type_(type), block_size_(block_size), free_pages_cnt_(0), free_blocks_list_(nullptr) {}
     89 
     90 void* LinkerSmallObjectAllocator::alloc() {
     91   CHECK(block_size_ != 0);
     92 
     93   if (free_blocks_list_ == nullptr) {
     94     alloc_page();
     95   }
     96 
     97   small_object_block_record* block_record = free_blocks_list_;
     98   if (block_record->free_blocks_cnt > 1) {
     99     small_object_block_record* next_free = reinterpret_cast<small_object_block_record*>(
    100         reinterpret_cast<uint8_t*>(block_record) + block_size_);
    101     next_free->next = block_record->next;
    102     next_free->free_blocks_cnt = block_record->free_blocks_cnt - 1;
    103     free_blocks_list_ = next_free;
    104   } else {
    105     free_blocks_list_ = block_record->next;
    106   }
    107 
    108   // bookkeeping...
    109   auto page_record = find_page_record(block_record);
    110 
    111   if (page_record->allocated_blocks_cnt == 0) {
    112     free_pages_cnt_--;
    113   }
    114 
    115   page_record->free_blocks_cnt--;
    116   page_record->allocated_blocks_cnt++;
    117 
    118   memset(block_record, 0, block_size_);
    119 
    120   return block_record;
    121 }
    122 
    123 void LinkerSmallObjectAllocator::free_page(linker_vector_t::iterator page_record) {
    124   void* page_start = reinterpret_cast<void*>(page_record->page_addr);
    125   void* page_end = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(page_start) + PAGE_SIZE);
    126 
    127   while (free_blocks_list_ != nullptr &&
    128       free_blocks_list_ > page_start &&
    129       free_blocks_list_ < page_end) {
    130     free_blocks_list_ = free_blocks_list_->next;
    131   }
    132 
    133   small_object_block_record* current = free_blocks_list_;
    134 
    135   while (current != nullptr) {
    136     while (current->next > page_start && current->next < page_end) {
    137       current->next = current->next->next;
    138     }
    139 
    140     current = current->next;
    141   }
    142 
    143   munmap(page_start, PAGE_SIZE);
    144   page_records_.erase(page_record);
    145   free_pages_cnt_--;
    146 }
    147 
    148 void LinkerSmallObjectAllocator::free(void* ptr) {
    149   auto page_record = find_page_record(ptr);
    150 
    151   ssize_t offset = reinterpret_cast<uintptr_t>(ptr) - sizeof(page_info);
    152 
    153   if (offset % block_size_ != 0) {
    154     async_safe_fatal("invalid pointer: %p (block_size=%zd)", ptr, block_size_);
    155   }
    156 
    157   memset(ptr, 0, block_size_);
    158   small_object_block_record* block_record = reinterpret_cast<small_object_block_record*>(ptr);
    159 
    160   block_record->next = free_blocks_list_;
    161   block_record->free_blocks_cnt = 1;
    162 
    163   free_blocks_list_ = block_record;
    164 
    165   page_record->free_blocks_cnt++;
    166   page_record->allocated_blocks_cnt--;
    167 
    168   if (page_record->allocated_blocks_cnt == 0) {
    169     if (free_pages_cnt_++ > 1) {
    170       // if we already have a free page - unmap this one.
    171       free_page(page_record);
    172     }
    173   }
    174 }
    175 
    176 linker_vector_t::iterator LinkerSmallObjectAllocator::find_page_record(void* ptr) {
    177   void* addr = reinterpret_cast<void*>(PAGE_START(reinterpret_cast<uintptr_t>(ptr)));
    178   small_object_page_record boundary;
    179   boundary.page_addr = addr;
    180   linker_vector_t::iterator it = std::lower_bound(
    181       page_records_.begin(), page_records_.end(), boundary);
    182 
    183   if (it == page_records_.end() || it->page_addr != addr) {
    184     // not found...
    185     async_safe_fatal("page record for %p was not found (block_size=%zd)", ptr, block_size_);
    186   }
    187 
    188   return it;
    189 }
    190 
    191 void LinkerSmallObjectAllocator::create_page_record(void* page_addr, size_t free_blocks_cnt) {
    192   small_object_page_record record;
    193   record.page_addr = page_addr;
    194   record.free_blocks_cnt = free_blocks_cnt;
    195   record.allocated_blocks_cnt = 0;
    196 
    197   linker_vector_t::iterator it = std::lower_bound(
    198       page_records_.begin(), page_records_.end(), record);
    199   page_records_.insert(it, record);
    200 }
    201 
    202 void LinkerSmallObjectAllocator::alloc_page() {
    203   static_assert(sizeof(page_info) % 16 == 0, "sizeof(page_info) is not multiple of 16");
    204   void* map_ptr = mmap(nullptr, PAGE_SIZE, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
    205   if (map_ptr == MAP_FAILED) {
    206     async_safe_fatal("mmap failed: %s", strerror(errno));
    207   }
    208 
    209   prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, map_ptr, PAGE_SIZE, "linker_alloc_small_objects");
    210 
    211   page_info* info = reinterpret_cast<page_info*>(map_ptr);
    212   memcpy(info->signature, kSignature, sizeof(kSignature));
    213   info->type = type_;
    214   info->allocator_addr = this;
    215 
    216   size_t free_blocks_cnt = (PAGE_SIZE - sizeof(page_info))/block_size_;
    217 
    218   create_page_record(map_ptr, free_blocks_cnt);
    219 
    220   small_object_block_record* first_block = reinterpret_cast<small_object_block_record*>(info + 1);
    221 
    222   first_block->next = free_blocks_list_;
    223   first_block->free_blocks_cnt = free_blocks_cnt;
    224 
    225   free_blocks_list_ = first_block;
    226 }
    227 
    228 
    229 void LinkerMemoryAllocator::initialize_allocators() {
    230   if (allocators_ != nullptr) {
    231     return;
    232   }
    233 
    234   LinkerSmallObjectAllocator* allocators =
    235       reinterpret_cast<LinkerSmallObjectAllocator*>(allocators_buf_);
    236 
    237   for (size_t i = 0; i < kSmallObjectAllocatorsCount; ++i) {
    238     uint32_t type = i + kSmallObjectMinSizeLog2;
    239     new (allocators + i) LinkerSmallObjectAllocator(type, 1 << type);
    240   }
    241 
    242   allocators_ = allocators;
    243 }
    244 
    245 void* LinkerMemoryAllocator::alloc_mmap(size_t size) {
    246   size_t allocated_size = PAGE_END(size + sizeof(page_info));
    247   void* map_ptr = mmap(nullptr, allocated_size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS,
    248                        -1, 0);
    249 
    250   if (map_ptr == MAP_FAILED) {
    251     async_safe_fatal("mmap failed: %s", strerror(errno));
    252   }
    253 
    254   prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, map_ptr, allocated_size, "linker_alloc_lob");
    255 
    256   page_info* info = reinterpret_cast<page_info*>(map_ptr);
    257   memcpy(info->signature, kSignature, sizeof(kSignature));
    258   info->type = kLargeObject;
    259   info->allocated_size = allocated_size;
    260 
    261   return info + 1;
    262 }
    263 
    264 void* LinkerMemoryAllocator::alloc(size_t size) {
    265   // treat alloc(0) as alloc(1)
    266   if (size == 0) {
    267     size = 1;
    268   }
    269 
    270   if (size > kSmallObjectMaxSize) {
    271     return alloc_mmap(size);
    272   }
    273 
    274   uint16_t log2_size = log2(size);
    275 
    276   if (log2_size < kSmallObjectMinSizeLog2) {
    277     log2_size = kSmallObjectMinSizeLog2;
    278   }
    279 
    280   return get_small_object_allocator(log2_size)->alloc();
    281 }
    282 
    283 page_info* LinkerMemoryAllocator::get_page_info(void* ptr) {
    284   page_info* info = reinterpret_cast<page_info*>(PAGE_START(reinterpret_cast<size_t>(ptr)));
    285   if (memcmp(info->signature, kSignature, sizeof(kSignature)) != 0) {
    286     async_safe_fatal("invalid pointer %p (page signature mismatch)", ptr);
    287   }
    288 
    289   return info;
    290 }
    291 
    292 void* LinkerMemoryAllocator::realloc(void* ptr, size_t size) {
    293   if (ptr == nullptr) {
    294     return alloc(size);
    295   }
    296 
    297   if (size == 0) {
    298     free(ptr);
    299     return nullptr;
    300   }
    301 
    302   page_info* info = get_page_info(ptr);
    303 
    304   size_t old_size = 0;
    305 
    306   if (info->type == kLargeObject) {
    307     old_size = info->allocated_size - sizeof(page_info);
    308   } else {
    309     LinkerSmallObjectAllocator* allocator = get_small_object_allocator(info->type);
    310     if (allocator != info->allocator_addr) {
    311       async_safe_fatal("invalid pointer %p (page signature mismatch)", ptr);
    312     }
    313 
    314     old_size = allocator->get_block_size();
    315   }
    316 
    317   if (old_size < size) {
    318     void *result = alloc(size);
    319     memcpy(result, ptr, old_size);
    320     free(ptr);
    321     return result;
    322   }
    323 
    324   return ptr;
    325 }
    326 
    327 void LinkerMemoryAllocator::free(void* ptr) {
    328   if (ptr == nullptr) {
    329     return;
    330   }
    331 
    332   page_info* info = get_page_info(ptr);
    333 
    334   if (info->type == kLargeObject) {
    335     munmap(info, info->allocated_size);
    336   } else {
    337     LinkerSmallObjectAllocator* allocator = get_small_object_allocator(info->type);
    338     if (allocator != info->allocator_addr) {
    339       async_safe_fatal("invalid pointer %p (invalid allocator address for the page)", ptr);
    340     }
    341 
    342     allocator->free(ptr);
    343   }
    344 }
    345 
    346 LinkerSmallObjectAllocator* LinkerMemoryAllocator::get_small_object_allocator(uint32_t type) {
    347   if (type < kSmallObjectMinSizeLog2 || type > kSmallObjectMaxSizeLog2) {
    348     async_safe_fatal("invalid type: %u", type);
    349   }
    350 
    351   initialize_allocators();
    352   return &allocators_[type - kSmallObjectMinSizeLog2];
    353 }
    354