Home | History | Annotate | Download | only in lsan
      1 //=-- lsan_allocator.cc ---------------------------------------------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file is a part of LeakSanitizer.
     11 // See lsan_allocator.h for details.
     12 //
     13 //===----------------------------------------------------------------------===//
     14 
     15 #include "lsan_allocator.h"
     16 
     17 #include "sanitizer_common/sanitizer_allocator.h"
     18 #include "sanitizer_common/sanitizer_internal_defs.h"
     19 #include "sanitizer_common/sanitizer_stackdepot.h"
     20 #include "sanitizer_common/sanitizer_stacktrace.h"
     21 #include "lsan_common.h"
     22 
     23 namespace __lsan {
     24 
     25 static const uptr kMaxAllowedMallocSize = 8UL << 30;
     26 static const uptr kAllocatorSpace = 0x600000000000ULL;
     27 static const uptr kAllocatorSize  =  0x40000000000ULL;  // 4T.
     28 
     29 struct ChunkMetadata {
     30   bool allocated : 8;  // Must be first.
     31   ChunkTag tag : 2;
     32   uptr requested_size : 54;
     33   u32 stack_trace_id;
     34 };
     35 
     36 typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize,
     37         sizeof(ChunkMetadata), CompactSizeClassMap> PrimaryAllocator;
     38 typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
     39 typedef LargeMmapAllocator<> SecondaryAllocator;
     40 typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
     41           SecondaryAllocator> Allocator;
     42 
     43 static Allocator allocator;
     44 static THREADLOCAL AllocatorCache cache;
     45 
     46 void InitializeAllocator() {
     47   allocator.Init();
     48 }
     49 
     50 void AllocatorThreadFinish() {
     51   allocator.SwallowCache(&cache);
     52 }
     53 
     54 static ChunkMetadata *Metadata(void *p) {
     55   return reinterpret_cast<ChunkMetadata *>(allocator.GetMetaData(p));
     56 }
     57 
     58 static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) {
     59   if (!p) return;
     60   ChunkMetadata *m = Metadata(p);
     61   CHECK(m);
     62   m->tag = DisabledInThisThread() ? kIgnored : kDirectlyLeaked;
     63   m->stack_trace_id = StackDepotPut(stack.trace, stack.size);
     64   m->requested_size = size;
     65   atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 1, memory_order_relaxed);
     66 }
     67 
     68 static void RegisterDeallocation(void *p) {
     69   if (!p) return;
     70   ChunkMetadata *m = Metadata(p);
     71   CHECK(m);
     72   atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 0, memory_order_relaxed);
     73 }
     74 
     75 void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
     76                bool cleared) {
     77   if (size == 0)
     78     size = 1;
     79   if (size > kMaxAllowedMallocSize) {
     80     Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", size);
     81     return 0;
     82   }
     83   void *p = allocator.Allocate(&cache, size, alignment, cleared);
     84   RegisterAllocation(stack, p, size);
     85   return p;
     86 }
     87 
     88 void Deallocate(void *p) {
     89   RegisterDeallocation(p);
     90   allocator.Deallocate(&cache, p);
     91 }
     92 
     93 void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
     94                  uptr alignment) {
     95   RegisterDeallocation(p);
     96   if (new_size > kMaxAllowedMallocSize) {
     97     Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", new_size);
     98     allocator.Deallocate(&cache, p);
     99     return 0;
    100   }
    101   p = allocator.Reallocate(&cache, p, new_size, alignment);
    102   RegisterAllocation(stack, p, new_size);
    103   return p;
    104 }
    105 
    106 void GetAllocatorCacheRange(uptr *begin, uptr *end) {
    107   *begin = (uptr)&cache;
    108   *end = *begin + sizeof(cache);
    109 }
    110 
    111 uptr GetMallocUsableSize(void *p) {
    112   ChunkMetadata *m = Metadata(p);
    113   if (!m) return 0;
    114   return m->requested_size;
    115 }
    116 
    117 ///// Interface to the common LSan module. /////
    118 
    119 void LockAllocator() {
    120   allocator.ForceLock();
    121 }
    122 
    123 void UnlockAllocator() {
    124   allocator.ForceUnlock();
    125 }
    126 
    127 void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
    128   *begin = (uptr)&allocator;
    129   *end = *begin + sizeof(allocator);
    130 }
    131 
    132 uptr PointsIntoChunk(void* p) {
    133   uptr addr = reinterpret_cast<uptr>(p);
    134   uptr chunk = reinterpret_cast<uptr>(allocator.GetBlockBeginFastLocked(p));
    135   if (!chunk) return 0;
    136   // LargeMmapAllocator considers pointers to the meta-region of a chunk to be
    137   // valid, but we don't want that.
    138   if (addr < chunk) return 0;
    139   ChunkMetadata *m = Metadata(reinterpret_cast<void *>(chunk));
    140   CHECK(m);
    141   if (m->allocated && addr < chunk + m->requested_size)
    142     return chunk;
    143   return 0;
    144 }
    145 
    146 uptr GetUserBegin(uptr chunk) {
    147   return chunk;
    148 }
    149 
    150 LsanMetadata::LsanMetadata(uptr chunk) {
    151   metadata_ = Metadata(reinterpret_cast<void *>(chunk));
    152   CHECK(metadata_);
    153 }
    154 
    155 bool LsanMetadata::allocated() const {
    156   return reinterpret_cast<ChunkMetadata *>(metadata_)->allocated;
    157 }
    158 
    159 ChunkTag LsanMetadata::tag() const {
    160   return reinterpret_cast<ChunkMetadata *>(metadata_)->tag;
    161 }
    162 
    163 void LsanMetadata::set_tag(ChunkTag value) {
    164   reinterpret_cast<ChunkMetadata *>(metadata_)->tag = value;
    165 }
    166 
    167 uptr LsanMetadata::requested_size() const {
    168   return reinterpret_cast<ChunkMetadata *>(metadata_)->requested_size;
    169 }
    170 
    171 u32 LsanMetadata::stack_trace_id() const {
    172   return reinterpret_cast<ChunkMetadata *>(metadata_)->stack_trace_id;
    173 }
    174 
    175 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
    176   allocator.ForEachChunk(callback, arg);
    177 }
    178 
    179 IgnoreObjectResult IgnoreObjectLocked(const void *p) {
    180   void *chunk = allocator.GetBlockBegin(p);
    181   if (!chunk || p < chunk) return kIgnoreObjectInvalid;
    182   ChunkMetadata *m = Metadata(chunk);
    183   CHECK(m);
    184   if (m->allocated && (uptr)p < (uptr)chunk + m->requested_size) {
    185     if (m->tag == kIgnored)
    186       return kIgnoreObjectAlreadyIgnored;
    187     m->tag = kIgnored;
    188     return kIgnoreObjectSuccess;
    189   } else {
    190     return kIgnoreObjectInvalid;
    191   }
    192 }
    193 }  // namespace __lsan
    194