1 //=-- lsan_allocator.cc ---------------------------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file is a part of LeakSanitizer. 11 // See lsan_allocator.h for details. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "lsan_allocator.h" 16 17 #include "sanitizer_common/sanitizer_allocator.h" 18 #include "sanitizer_common/sanitizer_allocator_interface.h" 19 #include "sanitizer_common/sanitizer_internal_defs.h" 20 #include "sanitizer_common/sanitizer_stackdepot.h" 21 #include "sanitizer_common/sanitizer_stacktrace.h" 22 #include "lsan_common.h" 23 24 extern "C" void *memset(void *ptr, int value, uptr num); 25 26 namespace __lsan { 27 28 struct ChunkMetadata { 29 u8 allocated : 8; // Must be first. 30 ChunkTag tag : 2; 31 uptr requested_size : 54; 32 u32 stack_trace_id; 33 }; 34 35 #if defined(__mips64) || defined(__aarch64__) 36 static const uptr kMaxAllowedMallocSize = 4UL << 30; 37 static const uptr kRegionSizeLog = 20; 38 static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog; 39 typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap; 40 typedef CompactSizeClassMap SizeClassMap; 41 typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE, 42 sizeof(ChunkMetadata), SizeClassMap, kRegionSizeLog, ByteMap> 43 PrimaryAllocator; 44 #else 45 static const uptr kMaxAllowedMallocSize = 8UL << 30; 46 static const uptr kAllocatorSpace = 0x600000000000ULL; 47 static const uptr kAllocatorSize = 0x40000000000ULL; // 4T. 48 typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 49 sizeof(ChunkMetadata), DefaultSizeClassMap> PrimaryAllocator; 50 #endif 51 typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache; 52 typedef LargeMmapAllocator<> SecondaryAllocator; 53 typedef CombinedAllocator<PrimaryAllocator, AllocatorCache, 54 SecondaryAllocator> Allocator; 55 56 static Allocator allocator; 57 static THREADLOCAL AllocatorCache cache; 58 59 void InitializeAllocator() { 60 allocator.InitLinkerInitialized(common_flags()->allocator_may_return_null); 61 } 62 63 void AllocatorThreadFinish() { 64 allocator.SwallowCache(&cache); 65 } 66 67 static ChunkMetadata *Metadata(const void *p) { 68 return reinterpret_cast<ChunkMetadata *>(allocator.GetMetaData(p)); 69 } 70 71 static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) { 72 if (!p) return; 73 ChunkMetadata *m = Metadata(p); 74 CHECK(m); 75 m->tag = DisabledInThisThread() ? kIgnored : kDirectlyLeaked; 76 m->stack_trace_id = StackDepotPut(stack); 77 m->requested_size = size; 78 atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 1, memory_order_relaxed); 79 } 80 81 static void RegisterDeallocation(void *p) { 82 if (!p) return; 83 ChunkMetadata *m = Metadata(p); 84 CHECK(m); 85 atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 0, memory_order_relaxed); 86 } 87 88 void *Allocate(const StackTrace &stack, uptr size, uptr alignment, 89 bool cleared) { 90 if (size == 0) 91 size = 1; 92 if (size > kMaxAllowedMallocSize) { 93 Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", size); 94 return nullptr; 95 } 96 void *p = allocator.Allocate(&cache, size, alignment, false); 97 // Do not rely on the allocator to clear the memory (it's slow). 98 if (cleared && allocator.FromPrimary(p)) 99 memset(p, 0, size); 100 RegisterAllocation(stack, p, size); 101 if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(p, size); 102 RunMallocHooks(p, size); 103 return p; 104 } 105 106 void Deallocate(void *p) { 107 if (&__sanitizer_free_hook) __sanitizer_free_hook(p); 108 RunFreeHooks(p); 109 RegisterDeallocation(p); 110 allocator.Deallocate(&cache, p); 111 } 112 113 void *Reallocate(const StackTrace &stack, void *p, uptr new_size, 114 uptr alignment) { 115 RegisterDeallocation(p); 116 if (new_size > kMaxAllowedMallocSize) { 117 Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", new_size); 118 allocator.Deallocate(&cache, p); 119 return nullptr; 120 } 121 p = allocator.Reallocate(&cache, p, new_size, alignment); 122 RegisterAllocation(stack, p, new_size); 123 return p; 124 } 125 126 void GetAllocatorCacheRange(uptr *begin, uptr *end) { 127 *begin = (uptr)&cache; 128 *end = *begin + sizeof(cache); 129 } 130 131 uptr GetMallocUsableSize(const void *p) { 132 ChunkMetadata *m = Metadata(p); 133 if (!m) return 0; 134 return m->requested_size; 135 } 136 137 ///// Interface to the common LSan module. ///// 138 139 void LockAllocator() { 140 allocator.ForceLock(); 141 } 142 143 void UnlockAllocator() { 144 allocator.ForceUnlock(); 145 } 146 147 void GetAllocatorGlobalRange(uptr *begin, uptr *end) { 148 *begin = (uptr)&allocator; 149 *end = *begin + sizeof(allocator); 150 } 151 152 uptr PointsIntoChunk(void* p) { 153 uptr addr = reinterpret_cast<uptr>(p); 154 uptr chunk = reinterpret_cast<uptr>(allocator.GetBlockBeginFastLocked(p)); 155 if (!chunk) return 0; 156 // LargeMmapAllocator considers pointers to the meta-region of a chunk to be 157 // valid, but we don't want that. 158 if (addr < chunk) return 0; 159 ChunkMetadata *m = Metadata(reinterpret_cast<void *>(chunk)); 160 CHECK(m); 161 if (!m->allocated) 162 return 0; 163 if (addr < chunk + m->requested_size) 164 return chunk; 165 if (IsSpecialCaseOfOperatorNew0(chunk, m->requested_size, addr)) 166 return chunk; 167 return 0; 168 } 169 170 uptr GetUserBegin(uptr chunk) { 171 return chunk; 172 } 173 174 LsanMetadata::LsanMetadata(uptr chunk) { 175 metadata_ = Metadata(reinterpret_cast<void *>(chunk)); 176 CHECK(metadata_); 177 } 178 179 bool LsanMetadata::allocated() const { 180 return reinterpret_cast<ChunkMetadata *>(metadata_)->allocated; 181 } 182 183 ChunkTag LsanMetadata::tag() const { 184 return reinterpret_cast<ChunkMetadata *>(metadata_)->tag; 185 } 186 187 void LsanMetadata::set_tag(ChunkTag value) { 188 reinterpret_cast<ChunkMetadata *>(metadata_)->tag = value; 189 } 190 191 uptr LsanMetadata::requested_size() const { 192 return reinterpret_cast<ChunkMetadata *>(metadata_)->requested_size; 193 } 194 195 u32 LsanMetadata::stack_trace_id() const { 196 return reinterpret_cast<ChunkMetadata *>(metadata_)->stack_trace_id; 197 } 198 199 void ForEachChunk(ForEachChunkCallback callback, void *arg) { 200 allocator.ForEachChunk(callback, arg); 201 } 202 203 IgnoreObjectResult IgnoreObjectLocked(const void *p) { 204 void *chunk = allocator.GetBlockBegin(p); 205 if (!chunk || p < chunk) return kIgnoreObjectInvalid; 206 ChunkMetadata *m = Metadata(chunk); 207 CHECK(m); 208 if (m->allocated && (uptr)p < (uptr)chunk + m->requested_size) { 209 if (m->tag == kIgnored) 210 return kIgnoreObjectAlreadyIgnored; 211 m->tag = kIgnored; 212 return kIgnoreObjectSuccess; 213 } else { 214 return kIgnoreObjectInvalid; 215 } 216 } 217 } // namespace __lsan 218 219 using namespace __lsan; 220 221 extern "C" { 222 SANITIZER_INTERFACE_ATTRIBUTE 223 uptr __sanitizer_get_current_allocated_bytes() { 224 uptr stats[AllocatorStatCount]; 225 allocator.GetStats(stats); 226 return stats[AllocatorStatAllocated]; 227 } 228 229 SANITIZER_INTERFACE_ATTRIBUTE 230 uptr __sanitizer_get_heap_size() { 231 uptr stats[AllocatorStatCount]; 232 allocator.GetStats(stats); 233 return stats[AllocatorStatMapped]; 234 } 235 236 SANITIZER_INTERFACE_ATTRIBUTE 237 uptr __sanitizer_get_free_bytes() { return 0; } 238 239 SANITIZER_INTERFACE_ATTRIBUTE 240 uptr __sanitizer_get_unmapped_bytes() { return 0; } 241 242 SANITIZER_INTERFACE_ATTRIBUTE 243 uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; } 244 245 SANITIZER_INTERFACE_ATTRIBUTE 246 int __sanitizer_get_ownership(const void *p) { return Metadata(p) != nullptr; } 247 248 SANITIZER_INTERFACE_ATTRIBUTE 249 uptr __sanitizer_get_allocated_size(const void *p) { 250 return GetMallocUsableSize(p); 251 } 252 } // extern "C" 253