1 //===-- sanitizer_allocator.cc --------------------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file is shared between AddressSanitizer and ThreadSanitizer 11 // run-time libraries. 12 // This allocator that is used inside run-times. 13 //===----------------------------------------------------------------------===// 14 #include "sanitizer_common.h" 15 16 // FIXME: We should probably use more low-level allocator that would 17 // mmap some pages and split them into chunks to fulfill requests. 18 #if defined(__linux__) && !defined(__ANDROID__) 19 extern "C" void *__libc_malloc(__sanitizer::uptr size); 20 extern "C" void __libc_free(void *ptr); 21 # define LIBC_MALLOC __libc_malloc 22 # define LIBC_FREE __libc_free 23 #else // __linux__ && !ANDROID 24 # include <stdlib.h> 25 # define LIBC_MALLOC malloc 26 # define LIBC_FREE free 27 #endif // __linux__ && !ANDROID 28 29 namespace __sanitizer { 30 31 const u64 kBlockMagic = 0x6A6CB03ABCEBC041ull; 32 33 void *InternalAlloc(uptr size) { 34 if (size + sizeof(u64) < size) 35 return 0; 36 void *p = LIBC_MALLOC(size + sizeof(u64)); 37 if (p == 0) 38 return 0; 39 ((u64*)p)[0] = kBlockMagic; 40 return (char*)p + sizeof(u64); 41 } 42 43 void InternalFree(void *addr) { 44 if (addr == 0) 45 return; 46 addr = (char*)addr - sizeof(u64); 47 CHECK_EQ(((u64*)addr)[0], kBlockMagic); 48 ((u64*)addr)[0] = 0; 49 LIBC_FREE(addr); 50 } 51 52 void *InternalAllocBlock(void *p) { 53 CHECK_NE(p, (void*)0); 54 u64 *pp = (u64*)((uptr)p & ~0x7); 55 for (; pp[0] != kBlockMagic; pp--) {} 56 return pp + 1; 57 } 58 59 // LowLevelAllocator 60 static LowLevelAllocateCallback low_level_alloc_callback; 61 62 void *LowLevelAllocator::Allocate(uptr size) { 63 // Align allocation size. 64 size = RoundUpTo(size, 8); 65 if (allocated_end_ - allocated_current_ < (sptr)size) { 66 uptr size_to_allocate = Max(size, kPageSize); 67 allocated_current_ = 68 (char*)MmapOrDie(size_to_allocate, __FUNCTION__); 69 allocated_end_ = allocated_current_ + size_to_allocate; 70 if (low_level_alloc_callback) { 71 low_level_alloc_callback((uptr)allocated_current_, 72 size_to_allocate); 73 } 74 } 75 CHECK(allocated_end_ - allocated_current_ >= (sptr)size); 76 void *res = allocated_current_; 77 allocated_current_ += size; 78 return res; 79 } 80 81 void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback) { 82 low_level_alloc_callback = callback; 83 } 84 85 } // namespace __sanitizer 86