1 //===--- Allocator.h - Simple memory allocation abstraction -----*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 /// \file 10 /// 11 /// This file defines the MallocAllocator and BumpPtrAllocator interfaces. Both 12 /// of these conform to an LLVM "Allocator" concept which consists of an 13 /// Allocate method accepting a size and alignment, and a Deallocate accepting 14 /// a pointer and size. Further, the LLVM "Allocator" concept has overloads of 15 /// Allocate and Deallocate for setting size and alignment based on the final 16 /// type. These overloads are typically provided by a base class template \c 17 /// AllocatorBase. 18 /// 19 //===----------------------------------------------------------------------===// 20 21 #ifndef LLVM_SUPPORT_ALLOCATOR_H 22 #define LLVM_SUPPORT_ALLOCATOR_H 23 24 #include "llvm/ADT/SmallVector.h" 25 #include "llvm/Support/AlignOf.h" 26 #include "llvm/Support/DataTypes.h" 27 #include "llvm/Support/MathExtras.h" 28 #include "llvm/Support/Memory.h" 29 #include <algorithm> 30 #include <cassert> 31 #include <cstddef> 32 #include <cstdlib> 33 34 namespace llvm { 35 36 /// \brief CRTP base class providing obvious overloads for the core \c 37 /// Allocate() methods of LLVM-style allocators. 38 /// 39 /// This base class both documents the full public interface exposed by all 40 /// LLVM-style allocators, and redirects all of the overloads to a single core 41 /// set of methods which the derived class must define. 42 template <typename DerivedT> class AllocatorBase { 43 public: 44 /// \brief Allocate \a Size bytes of \a Alignment aligned memory. This method 45 /// must be implemented by \c DerivedT. 46 void *Allocate(size_t Size, size_t Alignment) { 47 #ifdef __clang__ 48 static_assert(static_cast<void *(AllocatorBase::*)(size_t, size_t)>( 49 &AllocatorBase::Allocate) != 50 static_cast<void *(DerivedT::*)(size_t, size_t)>( 51 &DerivedT::Allocate), 52 "Class derives from AllocatorBase without implementing the " 53 "core Allocate(size_t, size_t) overload!"); 54 #endif 55 return static_cast<DerivedT *>(this)->Allocate(Size, Alignment); 56 } 57 58 /// \brief Deallocate \a Ptr to \a Size bytes of memory allocated by this 59 /// allocator. 60 void Deallocate(const void *Ptr, size_t Size) { 61 #ifdef __clang__ 62 static_assert(static_cast<void (AllocatorBase::*)(const void *, size_t)>( 63 &AllocatorBase::Deallocate) != 64 static_cast<void (DerivedT::*)(const void *, size_t)>( 65 &DerivedT::Deallocate), 66 "Class derives from AllocatorBase without implementing the " 67 "core Deallocate(void *) overload!"); 68 #endif 69 return static_cast<DerivedT *>(this)->Deallocate(Ptr, Size); 70 } 71 72 // The rest of these methods are helpers that redirect to one of the above 73 // core methods. 74 75 /// \brief Allocate space for a sequence of objects without constructing them. 76 template <typename T> T *Allocate(size_t Num = 1) { 77 return static_cast<T *>(Allocate(Num * sizeof(T), AlignOf<T>::Alignment)); 78 } 79 80 /// \brief Deallocate space for a sequence of objects without constructing them. 81 template <typename T> 82 typename std::enable_if< 83 !std::is_same<typename std::remove_cv<T>::type, void>::value, void>::type 84 Deallocate(T *Ptr, size_t Num = 1) { 85 Deallocate(static_cast<const void *>(Ptr), Num * sizeof(T)); 86 } 87 }; 88 89 class MallocAllocator : public AllocatorBase<MallocAllocator> { 90 public: 91 void Reset() {} 92 93 LLVM_ATTRIBUTE_RETURNS_NONNULL void *Allocate(size_t Size, 94 size_t /*Alignment*/) { 95 return malloc(Size); 96 } 97 98 // Pull in base class overloads. 99 using AllocatorBase<MallocAllocator>::Allocate; 100 101 void Deallocate(const void *Ptr, size_t /*Size*/) { 102 free(const_cast<void *>(Ptr)); 103 } 104 105 // Pull in base class overloads. 106 using AllocatorBase<MallocAllocator>::Deallocate; 107 108 void PrintStats() const {} 109 }; 110 111 namespace detail { 112 113 // We call out to an external function to actually print the message as the 114 // printing code uses Allocator.h in its implementation. 115 void printBumpPtrAllocatorStats(unsigned NumSlabs, size_t BytesAllocated, 116 size_t TotalMemory); 117 } // End namespace detail. 118 119 /// \brief Allocate memory in an ever growing pool, as if by bump-pointer. 120 /// 121 /// This isn't strictly a bump-pointer allocator as it uses backing slabs of 122 /// memory rather than relying on a boundless contiguous heap. However, it has 123 /// bump-pointer semantics in that it is a monotonically growing pool of memory 124 /// where every allocation is found by merely allocating the next N bytes in 125 /// the slab, or the next N bytes in the next slab. 126 /// 127 /// Note that this also has a threshold for forcing allocations above a certain 128 /// size into their own slab. 129 /// 130 /// The BumpPtrAllocatorImpl template defaults to using a MallocAllocator 131 /// object, which wraps malloc, to allocate memory, but it can be changed to 132 /// use a custom allocator. 133 template <typename AllocatorT = MallocAllocator, size_t SlabSize = 4096, 134 size_t SizeThreshold = SlabSize> 135 class BumpPtrAllocatorImpl 136 : public AllocatorBase< 137 BumpPtrAllocatorImpl<AllocatorT, SlabSize, SizeThreshold>> { 138 public: 139 static_assert(SizeThreshold <= SlabSize, 140 "The SizeThreshold must be at most the SlabSize to ensure " 141 "that objects larger than a slab go into their own memory " 142 "allocation."); 143 144 BumpPtrAllocatorImpl() 145 : CurPtr(nullptr), End(nullptr), BytesAllocated(0), Allocator() {} 146 template <typename T> 147 BumpPtrAllocatorImpl(T &&Allocator) 148 : CurPtr(nullptr), End(nullptr), BytesAllocated(0), 149 Allocator(std::forward<T &&>(Allocator)) {} 150 151 // Manually implement a move constructor as we must clear the old allocator's 152 // slabs as a matter of correctness. 153 BumpPtrAllocatorImpl(BumpPtrAllocatorImpl &&Old) 154 : CurPtr(Old.CurPtr), End(Old.End), Slabs(std::move(Old.Slabs)), 155 CustomSizedSlabs(std::move(Old.CustomSizedSlabs)), 156 BytesAllocated(Old.BytesAllocated), 157 Allocator(std::move(Old.Allocator)) { 158 Old.CurPtr = Old.End = nullptr; 159 Old.BytesAllocated = 0; 160 Old.Slabs.clear(); 161 Old.CustomSizedSlabs.clear(); 162 } 163 164 ~BumpPtrAllocatorImpl() { 165 DeallocateSlabs(Slabs.begin(), Slabs.end()); 166 DeallocateCustomSizedSlabs(); 167 } 168 169 BumpPtrAllocatorImpl &operator=(BumpPtrAllocatorImpl &&RHS) { 170 DeallocateSlabs(Slabs.begin(), Slabs.end()); 171 DeallocateCustomSizedSlabs(); 172 173 CurPtr = RHS.CurPtr; 174 End = RHS.End; 175 BytesAllocated = RHS.BytesAllocated; 176 Slabs = std::move(RHS.Slabs); 177 CustomSizedSlabs = std::move(RHS.CustomSizedSlabs); 178 Allocator = std::move(RHS.Allocator); 179 180 RHS.CurPtr = RHS.End = nullptr; 181 RHS.BytesAllocated = 0; 182 RHS.Slabs.clear(); 183 RHS.CustomSizedSlabs.clear(); 184 return *this; 185 } 186 187 /// \brief Deallocate all but the current slab and reset the current pointer 188 /// to the beginning of it, freeing all memory allocated so far. 189 void Reset() { 190 DeallocateCustomSizedSlabs(); 191 CustomSizedSlabs.clear(); 192 193 if (Slabs.empty()) 194 return; 195 196 // Reset the state. 197 BytesAllocated = 0; 198 CurPtr = (char *)Slabs.front(); 199 End = CurPtr + SlabSize; 200 201 // Deallocate all but the first slab, and deallocate all custom-sized slabs. 202 DeallocateSlabs(std::next(Slabs.begin()), Slabs.end()); 203 Slabs.erase(std::next(Slabs.begin()), Slabs.end()); 204 } 205 206 /// \brief Allocate space at the specified alignment. 207 LLVM_ATTRIBUTE_RETURNS_NONNULL LLVM_ATTRIBUTE_RETURNS_NOALIAS void * 208 Allocate(size_t Size, size_t Alignment) { 209 assert(Alignment > 0 && "0-byte alignnment is not allowed. Use 1 instead."); 210 211 // Keep track of how many bytes we've allocated. 212 BytesAllocated += Size; 213 214 size_t Adjustment = alignmentAdjustment(CurPtr, Alignment); 215 assert(Adjustment + Size >= Size && "Adjustment + Size must not overflow"); 216 217 // Check if we have enough space. 218 if (Adjustment + Size <= size_t(End - CurPtr)) { 219 char *AlignedPtr = CurPtr + Adjustment; 220 CurPtr = AlignedPtr + Size; 221 // Update the allocation point of this memory block in MemorySanitizer. 222 // Without this, MemorySanitizer messages for values originated from here 223 // will point to the allocation of the entire slab. 224 __msan_allocated_memory(AlignedPtr, Size); 225 // Similarly, tell ASan about this space. 226 __asan_unpoison_memory_region(AlignedPtr, Size); 227 return AlignedPtr; 228 } 229 230 // If Size is really big, allocate a separate slab for it. 231 size_t PaddedSize = Size + Alignment - 1; 232 if (PaddedSize > SizeThreshold) { 233 void *NewSlab = Allocator.Allocate(PaddedSize, 0); 234 // We own the new slab and don't want anyone reading anyting other than 235 // pieces returned from this method. So poison the whole slab. 236 __asan_poison_memory_region(NewSlab, PaddedSize); 237 CustomSizedSlabs.push_back(std::make_pair(NewSlab, PaddedSize)); 238 239 uintptr_t AlignedAddr = alignAddr(NewSlab, Alignment); 240 assert(AlignedAddr + Size <= (uintptr_t)NewSlab + PaddedSize); 241 char *AlignedPtr = (char*)AlignedAddr; 242 __msan_allocated_memory(AlignedPtr, Size); 243 __asan_unpoison_memory_region(AlignedPtr, Size); 244 return AlignedPtr; 245 } 246 247 // Otherwise, start a new slab and try again. 248 StartNewSlab(); 249 uintptr_t AlignedAddr = alignAddr(CurPtr, Alignment); 250 assert(AlignedAddr + Size <= (uintptr_t)End && 251 "Unable to allocate memory!"); 252 char *AlignedPtr = (char*)AlignedAddr; 253 CurPtr = AlignedPtr + Size; 254 __msan_allocated_memory(AlignedPtr, Size); 255 __asan_unpoison_memory_region(AlignedPtr, Size); 256 return AlignedPtr; 257 } 258 259 // Pull in base class overloads. 260 using AllocatorBase<BumpPtrAllocatorImpl>::Allocate; 261 262 void Deallocate(const void *Ptr, size_t Size) { 263 __asan_poison_memory_region(Ptr, Size); 264 } 265 266 // Pull in base class overloads. 267 using AllocatorBase<BumpPtrAllocatorImpl>::Deallocate; 268 269 size_t GetNumSlabs() const { return Slabs.size() + CustomSizedSlabs.size(); } 270 271 size_t getTotalMemory() const { 272 size_t TotalMemory = 0; 273 for (auto I = Slabs.begin(), E = Slabs.end(); I != E; ++I) 274 TotalMemory += computeSlabSize(std::distance(Slabs.begin(), I)); 275 for (auto &PtrAndSize : CustomSizedSlabs) 276 TotalMemory += PtrAndSize.second; 277 return TotalMemory; 278 } 279 280 void PrintStats() const { 281 detail::printBumpPtrAllocatorStats(Slabs.size(), BytesAllocated, 282 getTotalMemory()); 283 } 284 285 private: 286 /// \brief The current pointer into the current slab. 287 /// 288 /// This points to the next free byte in the slab. 289 char *CurPtr; 290 291 /// \brief The end of the current slab. 292 char *End; 293 294 /// \brief The slabs allocated so far. 295 SmallVector<void *, 4> Slabs; 296 297 /// \brief Custom-sized slabs allocated for too-large allocation requests. 298 SmallVector<std::pair<void *, size_t>, 0> CustomSizedSlabs; 299 300 /// \brief How many bytes we've allocated. 301 /// 302 /// Used so that we can compute how much space was wasted. 303 size_t BytesAllocated; 304 305 /// \brief The allocator instance we use to get slabs of memory. 306 AllocatorT Allocator; 307 308 static size_t computeSlabSize(unsigned SlabIdx) { 309 // Scale the actual allocated slab size based on the number of slabs 310 // allocated. Every 128 slabs allocated, we double the allocated size to 311 // reduce allocation frequency, but saturate at multiplying the slab size by 312 // 2^30. 313 return SlabSize * ((size_t)1 << std::min<size_t>(30, SlabIdx / 128)); 314 } 315 316 /// \brief Allocate a new slab and move the bump pointers over into the new 317 /// slab, modifying CurPtr and End. 318 void StartNewSlab() { 319 size_t AllocatedSlabSize = computeSlabSize(Slabs.size()); 320 321 void *NewSlab = Allocator.Allocate(AllocatedSlabSize, 0); 322 // We own the new slab and don't want anyone reading anything other than 323 // pieces returned from this method. So poison the whole slab. 324 __asan_poison_memory_region(NewSlab, AllocatedSlabSize); 325 326 Slabs.push_back(NewSlab); 327 CurPtr = (char *)(NewSlab); 328 End = ((char *)NewSlab) + AllocatedSlabSize; 329 } 330 331 /// \brief Deallocate a sequence of slabs. 332 void DeallocateSlabs(SmallVectorImpl<void *>::iterator I, 333 SmallVectorImpl<void *>::iterator E) { 334 for (; I != E; ++I) { 335 size_t AllocatedSlabSize = 336 computeSlabSize(std::distance(Slabs.begin(), I)); 337 Allocator.Deallocate(*I, AllocatedSlabSize); 338 } 339 } 340 341 /// \brief Deallocate all memory for custom sized slabs. 342 void DeallocateCustomSizedSlabs() { 343 for (auto &PtrAndSize : CustomSizedSlabs) { 344 void *Ptr = PtrAndSize.first; 345 size_t Size = PtrAndSize.second; 346 Allocator.Deallocate(Ptr, Size); 347 } 348 } 349 350 template <typename T> friend class SpecificBumpPtrAllocator; 351 }; 352 353 /// \brief The standard BumpPtrAllocator which just uses the default template 354 /// paramaters. 355 typedef BumpPtrAllocatorImpl<> BumpPtrAllocator; 356 357 /// \brief A BumpPtrAllocator that allows only elements of a specific type to be 358 /// allocated. 359 /// 360 /// This allows calling the destructor in DestroyAll() and when the allocator is 361 /// destroyed. 362 template <typename T> class SpecificBumpPtrAllocator { 363 BumpPtrAllocator Allocator; 364 365 public: 366 SpecificBumpPtrAllocator() : Allocator() {} 367 SpecificBumpPtrAllocator(SpecificBumpPtrAllocator &&Old) 368 : Allocator(std::move(Old.Allocator)) {} 369 ~SpecificBumpPtrAllocator() { DestroyAll(); } 370 371 SpecificBumpPtrAllocator &operator=(SpecificBumpPtrAllocator &&RHS) { 372 Allocator = std::move(RHS.Allocator); 373 return *this; 374 } 375 376 /// Call the destructor of each allocated object and deallocate all but the 377 /// current slab and reset the current pointer to the beginning of it, freeing 378 /// all memory allocated so far. 379 void DestroyAll() { 380 auto DestroyElements = [](char *Begin, char *End) { 381 assert(Begin == (char*)alignAddr(Begin, alignOf<T>())); 382 for (char *Ptr = Begin; Ptr + sizeof(T) <= End; Ptr += sizeof(T)) 383 reinterpret_cast<T *>(Ptr)->~T(); 384 }; 385 386 for (auto I = Allocator.Slabs.begin(), E = Allocator.Slabs.end(); I != E; 387 ++I) { 388 size_t AllocatedSlabSize = BumpPtrAllocator::computeSlabSize( 389 std::distance(Allocator.Slabs.begin(), I)); 390 char *Begin = (char*)alignAddr(*I, alignOf<T>()); 391 char *End = *I == Allocator.Slabs.back() ? Allocator.CurPtr 392 : (char *)*I + AllocatedSlabSize; 393 394 DestroyElements(Begin, End); 395 } 396 397 for (auto &PtrAndSize : Allocator.CustomSizedSlabs) { 398 void *Ptr = PtrAndSize.first; 399 size_t Size = PtrAndSize.second; 400 DestroyElements((char*)alignAddr(Ptr, alignOf<T>()), (char *)Ptr + Size); 401 } 402 403 Allocator.Reset(); 404 } 405 406 /// \brief Allocate space for an array of objects without constructing them. 407 T *Allocate(size_t num = 1) { return Allocator.Allocate<T>(num); } 408 }; 409 410 } // end namespace llvm 411 412 template <typename AllocatorT, size_t SlabSize, size_t SizeThreshold> 413 void *operator new(size_t Size, 414 llvm::BumpPtrAllocatorImpl<AllocatorT, SlabSize, 415 SizeThreshold> &Allocator) { 416 struct S { 417 char c; 418 union { 419 double D; 420 long double LD; 421 long long L; 422 void *P; 423 } x; 424 }; 425 return Allocator.Allocate( 426 Size, std::min((size_t)llvm::NextPowerOf2(Size), offsetof(S, x))); 427 } 428 429 template <typename AllocatorT, size_t SlabSize, size_t SizeThreshold> 430 void operator delete( 431 void *, llvm::BumpPtrAllocatorImpl<AllocatorT, SlabSize, SizeThreshold> &) { 432 } 433 434 #endif // LLVM_SUPPORT_ALLOCATOR_H 435