1 /* 2 * Copyright (C) 2013 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef ART_RUNTIME_GC_ALLOCATOR_ROSALLOC_H_ 18 #define ART_RUNTIME_GC_ALLOCATOR_ROSALLOC_H_ 19 20 #include <stdint.h> 21 #include <stdlib.h> 22 #include <sys/mman.h> 23 #include <memory> 24 #include <set> 25 #include <string> 26 #include <unordered_set> 27 #include <vector> 28 29 #include "base/mutex.h" 30 #include "base/logging.h" 31 #include "globals.h" 32 #include "mem_map.h" 33 #include "thread.h" 34 #include "utils.h" 35 36 namespace art { 37 namespace gc { 38 namespace allocator { 39 40 // A runs-of-slots memory allocator. 41 class RosAlloc { 42 private: 43 // Represents a run of free pages. 44 class FreePageRun { 45 public: 46 byte magic_num_; // The magic number used for debugging only. 47 48 bool IsFree() const { 49 return !kIsDebugBuild || magic_num_ == kMagicNumFree; 50 } 51 size_t ByteSize(RosAlloc* rosalloc) const EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) { 52 const byte* fpr_base = reinterpret_cast<const byte*>(this); 53 size_t pm_idx = rosalloc->ToPageMapIndex(fpr_base); 54 size_t byte_size = rosalloc->free_page_run_size_map_[pm_idx]; 55 DCHECK_GE(byte_size, static_cast<size_t>(0)); 56 DCHECK_EQ(byte_size % kPageSize, static_cast<size_t>(0)); 57 return byte_size; 58 } 59 void SetByteSize(RosAlloc* rosalloc, size_t byte_size) 60 EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) { 61 DCHECK_EQ(byte_size % kPageSize, static_cast<size_t>(0)); 62 byte* fpr_base = reinterpret_cast<byte*>(this); 63 size_t pm_idx = rosalloc->ToPageMapIndex(fpr_base); 64 rosalloc->free_page_run_size_map_[pm_idx] = byte_size; 65 } 66 void* Begin() { 67 return reinterpret_cast<void*>(this); 68 } 69 void* End(RosAlloc* rosalloc) EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) { 70 byte* fpr_base = reinterpret_cast<byte*>(this); 71 byte* end = fpr_base + ByteSize(rosalloc); 72 return end; 73 } 74 bool IsLargerThanPageReleaseThreshold(RosAlloc* rosalloc) 75 EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) { 76 return ByteSize(rosalloc) >= rosalloc->page_release_size_threshold_; 77 } 78 bool IsAtEndOfSpace(RosAlloc* rosalloc) 79 EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) { 80 return reinterpret_cast<byte*>(this) + ByteSize(rosalloc) == rosalloc->base_ + rosalloc->footprint_; 81 } 82 bool ShouldReleasePages(RosAlloc* rosalloc) EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) { 83 switch (rosalloc->page_release_mode_) { 84 case kPageReleaseModeNone: 85 return false; 86 case kPageReleaseModeEnd: 87 return IsAtEndOfSpace(rosalloc); 88 case kPageReleaseModeSize: 89 return IsLargerThanPageReleaseThreshold(rosalloc); 90 case kPageReleaseModeSizeAndEnd: 91 return IsLargerThanPageReleaseThreshold(rosalloc) && IsAtEndOfSpace(rosalloc); 92 case kPageReleaseModeAll: 93 return true; 94 default: 95 LOG(FATAL) << "Unexpected page release mode "; 96 return false; 97 } 98 } 99 void ReleasePages(RosAlloc* rosalloc) EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) { 100 byte* start = reinterpret_cast<byte*>(this); 101 size_t byte_size = ByteSize(rosalloc); 102 DCHECK_EQ(byte_size % kPageSize, static_cast<size_t>(0)); 103 if (ShouldReleasePages(rosalloc)) { 104 rosalloc->ReleasePageRange(start, start + byte_size); 105 } 106 } 107 }; 108 109 // Represents a run of memory slots of the same size. 110 // 111 // A run's memory layout: 112 // 113 // +-------------------+ 114 // | magic_num | 115 // +-------------------+ 116 // | size_bracket_idx | 117 // +-------------------+ 118 // | is_thread_local | 119 // +-------------------+ 120 // | to_be_bulk_freed | 121 // +-------------------+ 122 // | top_bitmap_idx | 123 // +-------------------+ 124 // | | 125 // | alloc bit map | 126 // | | 127 // +-------------------+ 128 // | | 129 // | bulk free bit map | 130 // | | 131 // +-------------------+ 132 // | | 133 // | thread-local free | 134 // | bit map | 135 // | | 136 // +-------------------+ 137 // | padding due to | 138 // | alignment | 139 // +-------------------+ 140 // | slot 0 | 141 // +-------------------+ 142 // | slot 1 | 143 // +-------------------+ 144 // | slot 2 | 145 // +-------------------+ 146 // ... 147 // +-------------------+ 148 // | last slot | 149 // +-------------------+ 150 // 151 class Run { 152 public: 153 byte magic_num_; // The magic number used for debugging. 154 byte size_bracket_idx_; // The index of the size bracket of this run. 155 byte is_thread_local_; // True if this run is used as a thread-local run. 156 byte to_be_bulk_freed_; // Used within BulkFree() to flag a run that's involved with a bulk free. 157 uint32_t first_search_vec_idx_; // The index of the first bitmap vector which may contain an available slot. 158 uint32_t alloc_bit_map_[0]; // The bit map that allocates if each slot is in use. 159 160 // bulk_free_bit_map_[] : The bit map that is used for GC to 161 // temporarily mark the slots to free without using a lock. After 162 // all the slots to be freed in a run are marked, all those slots 163 // get freed in bulk with one locking per run, as opposed to one 164 // locking per slot to minimize the lock contention. This is used 165 // within BulkFree(). 166 167 // thread_local_free_bit_map_[] : The bit map that is used for GC 168 // to temporarily mark the slots to free in a thread-local run 169 // without using a lock (without synchronizing the thread that 170 // owns the thread-local run.) When the thread-local run becomes 171 // full, the thread will check this bit map and update the 172 // allocation bit map of the run (that is, the slots get freed.) 173 174 // Returns the byte size of the header except for the bit maps. 175 static size_t fixed_header_size() { 176 Run temp; 177 size_t size = reinterpret_cast<byte*>(&temp.alloc_bit_map_) - reinterpret_cast<byte*>(&temp); 178 DCHECK_EQ(size, static_cast<size_t>(8)); 179 return size; 180 } 181 // Returns the base address of the free bit map. 182 uint32_t* BulkFreeBitMap() { 183 return reinterpret_cast<uint32_t*>(reinterpret_cast<byte*>(this) + bulkFreeBitMapOffsets[size_bracket_idx_]); 184 } 185 // Returns the base address of the thread local free bit map. 186 uint32_t* ThreadLocalFreeBitMap() { 187 return reinterpret_cast<uint32_t*>(reinterpret_cast<byte*>(this) + threadLocalFreeBitMapOffsets[size_bracket_idx_]); 188 } 189 void* End() { 190 return reinterpret_cast<byte*>(this) + kPageSize * numOfPages[size_bracket_idx_]; 191 } 192 // Returns the number of bitmap words per run. 193 size_t NumberOfBitmapVectors() const { 194 return RoundUp(numOfSlots[size_bracket_idx_], 32) / 32; 195 } 196 void SetIsThreadLocal(bool is_thread_local) { 197 is_thread_local_ = is_thread_local ? 1 : 0; 198 } 199 bool IsThreadLocal() const { 200 return is_thread_local_ != 0; 201 } 202 // Frees slots in the allocation bit map with regard to the 203 // thread-local free bit map. Used when a thread-local run becomes 204 // full. 205 bool MergeThreadLocalFreeBitMapToAllocBitMap(bool* is_all_free_after_out); 206 // Frees slots in the allocation bit map with regard to the bulk 207 // free bit map. Used in a bulk free. 208 void MergeBulkFreeBitMapIntoAllocBitMap(); 209 // Unions the slots to be freed in the free bit map into the 210 // thread-local free bit map. In a bulk free, as a two-step 211 // process, GC will first record all the slots to free in a run in 212 // the free bit map where it can write without a lock, and later 213 // acquire a lock once per run to union the bits of the free bit 214 // map to the thread-local free bit map. 215 void UnionBulkFreeBitMapToThreadLocalFreeBitMap(); 216 // Allocates a slot in a run. 217 void* AllocSlot(); 218 // Frees a slot in a run. This is used in a non-bulk free. 219 void FreeSlot(void* ptr); 220 // Marks the slots to free in the bulk free bit map. Returns the bracket size. 221 size_t MarkBulkFreeBitMap(void* ptr); 222 // Marks the slots to free in the thread-local free bit map. 223 void MarkThreadLocalFreeBitMap(void* ptr); 224 // Last word mask, all of the bits in the last word which aren't valid slots are set to 225 // optimize allocation path. 226 static uint32_t GetBitmapLastVectorMask(size_t num_slots, size_t num_vec); 227 // Returns true if all the slots in the run are not in use. 228 bool IsAllFree(); 229 // Returns true if all the slots in the run are in use. 230 bool IsFull(); 231 // Returns true if the bulk free bit map is clean. 232 bool IsBulkFreeBitmapClean(); 233 // Returns true if the thread local free bit map is clean. 234 bool IsThreadLocalFreeBitmapClean(); 235 // Set the alloc_bit_map_ bits for slots that are past the end of the run. 236 void SetAllocBitMapBitsForInvalidSlots(); 237 // Zero the run's data. 238 void ZeroData(); 239 // Zero the run's header. 240 void ZeroHeader(); 241 // Fill the alloc bitmap with 1s. 242 void FillAllocBitMap(); 243 // Iterate over all the slots and apply the given function. 244 void InspectAllSlots(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg), void* arg); 245 // Dump the run metadata for debugging. 246 std::string Dump(); 247 // Verify for debugging. 248 void Verify(Thread* self, RosAlloc* rosalloc) 249 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) 250 EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_); 251 252 private: 253 // The common part of MarkFreeBitMap() and MarkThreadLocalFreeBitMap(). Returns the bracket 254 // size. 255 size_t MarkFreeBitMapShared(void* ptr, uint32_t* free_bit_map_base, const char* caller_name); 256 // Turns the bit map into a string for debugging. 257 static std::string BitMapToStr(uint32_t* bit_map_base, size_t num_vec); 258 }; 259 260 // The magic number for a run. 261 static const byte kMagicNum = 42; 262 // The magic number for free pages. 263 static const byte kMagicNumFree = 43; 264 // The number of size brackets. Sync this with the length of Thread::rosalloc_runs_. 265 static const size_t kNumOfSizeBrackets = kNumRosAllocThreadLocalSizeBrackets; 266 // The number of smaller size brackets that are 16 bytes apart. 267 static const size_t kNumOfQuantumSizeBrackets = 32; 268 // The sizes (the slot sizes, in bytes) of the size brackets. 269 static size_t bracketSizes[kNumOfSizeBrackets]; 270 // The numbers of pages that are used for runs for each size bracket. 271 static size_t numOfPages[kNumOfSizeBrackets]; 272 // The numbers of slots of the runs for each size bracket. 273 static size_t numOfSlots[kNumOfSizeBrackets]; 274 // The header sizes in bytes of the runs for each size bracket. 275 static size_t headerSizes[kNumOfSizeBrackets]; 276 // The byte offsets of the bulk free bit maps of the runs for each size bracket. 277 static size_t bulkFreeBitMapOffsets[kNumOfSizeBrackets]; 278 // The byte offsets of the thread-local free bit maps of the runs for each size bracket. 279 static size_t threadLocalFreeBitMapOffsets[kNumOfSizeBrackets]; 280 281 // Initialize the run specs (the above arrays). 282 static void Initialize(); 283 static bool initialized_; 284 285 // Returns the byte size of the bracket size from the index. 286 static size_t IndexToBracketSize(size_t idx) { 287 DCHECK(idx < kNumOfSizeBrackets); 288 return bracketSizes[idx]; 289 } 290 // Returns the index of the size bracket from the bracket size. 291 static size_t BracketSizeToIndex(size_t size) { 292 DCHECK(16 <= size && ((size < 1 * KB && size % 16 == 0) || size == 1 * KB || size == 2 * KB)); 293 size_t idx; 294 if (UNLIKELY(size == 1 * KB)) { 295 idx = kNumOfSizeBrackets - 2; 296 } else if (UNLIKELY(size == 2 * KB)) { 297 idx = kNumOfSizeBrackets - 1; 298 } else { 299 DCHECK(size < 1 * KB); 300 DCHECK_EQ(size % 16, static_cast<size_t>(0)); 301 idx = size / 16 - 1; 302 } 303 DCHECK(bracketSizes[idx] == size); 304 return idx; 305 } 306 // Rounds up the size up the nearest bracket size. 307 static size_t RoundToBracketSize(size_t size) { 308 DCHECK(size <= kLargeSizeThreshold); 309 if (LIKELY(size <= 512)) { 310 return RoundUp(size, 16); 311 } else if (512 < size && size <= 1 * KB) { 312 return 1 * KB; 313 } else { 314 DCHECK(1 * KB < size && size <= 2 * KB); 315 return 2 * KB; 316 } 317 } 318 // Returns the size bracket index from the byte size with rounding. 319 static size_t SizeToIndex(size_t size) { 320 DCHECK(size <= kLargeSizeThreshold); 321 if (LIKELY(size <= 512)) { 322 return RoundUp(size, 16) / 16 - 1; 323 } else if (512 < size && size <= 1 * KB) { 324 return kNumOfSizeBrackets - 2; 325 } else { 326 DCHECK(1 * KB < size && size <= 2 * KB); 327 return kNumOfSizeBrackets - 1; 328 } 329 } 330 // A combination of SizeToIndex() and RoundToBracketSize(). 331 static size_t SizeToIndexAndBracketSize(size_t size, size_t* bracket_size_out) { 332 DCHECK(size <= kLargeSizeThreshold); 333 if (LIKELY(size <= 512)) { 334 size_t bracket_size = RoundUp(size, 16); 335 *bracket_size_out = bracket_size; 336 size_t idx = bracket_size / 16 - 1; 337 DCHECK_EQ(bracket_size, IndexToBracketSize(idx)); 338 return idx; 339 } else if (512 < size && size <= 1 * KB) { 340 size_t bracket_size = 1024; 341 *bracket_size_out = bracket_size; 342 size_t idx = kNumOfSizeBrackets - 2; 343 DCHECK_EQ(bracket_size, IndexToBracketSize(idx)); 344 return idx; 345 } else { 346 DCHECK(1 * KB < size && size <= 2 * KB); 347 size_t bracket_size = 2048; 348 *bracket_size_out = bracket_size; 349 size_t idx = kNumOfSizeBrackets - 1; 350 DCHECK_EQ(bracket_size, IndexToBracketSize(idx)); 351 return idx; 352 } 353 } 354 // Returns the page map index from an address. Requires that the 355 // address is page size aligned. 356 size_t ToPageMapIndex(const void* addr) const { 357 DCHECK(base_ <= addr && addr < base_ + capacity_); 358 size_t byte_offset = reinterpret_cast<const byte*>(addr) - base_; 359 DCHECK_EQ(byte_offset % static_cast<size_t>(kPageSize), static_cast<size_t>(0)); 360 return byte_offset / kPageSize; 361 } 362 // Returns the page map index from an address with rounding. 363 size_t RoundDownToPageMapIndex(void* addr) const { 364 DCHECK(base_ <= addr && addr < reinterpret_cast<byte*>(base_) + capacity_); 365 return (reinterpret_cast<uintptr_t>(addr) - reinterpret_cast<uintptr_t>(base_)) / kPageSize; 366 } 367 368 // A memory allocation request larger than this size is treated as a large object and allocated 369 // at a page-granularity. 370 static const size_t kLargeSizeThreshold = 2048; 371 372 // If true, check that the returned memory is actually zero. 373 static constexpr bool kCheckZeroMemory = kIsDebugBuild; 374 375 // If true, log verbose details of operations. 376 static constexpr bool kTraceRosAlloc = false; 377 378 struct hash_run { 379 size_t operator()(const RosAlloc::Run* r) const { 380 return reinterpret_cast<size_t>(r); 381 } 382 }; 383 384 struct eq_run { 385 bool operator()(const RosAlloc::Run* r1, const RosAlloc::Run* r2) const { 386 return r1 == r2; 387 } 388 }; 389 390 public: 391 // Different page release modes. 392 enum PageReleaseMode { 393 kPageReleaseModeNone, // Release no empty pages. 394 kPageReleaseModeEnd, // Release empty pages at the end of the space. 395 kPageReleaseModeSize, // Release empty pages that are larger than the threshold. 396 kPageReleaseModeSizeAndEnd, // Release empty pages that are larger than the threshold or 397 // at the end of the space. 398 kPageReleaseModeAll, // Release all empty pages. 399 }; 400 401 // The default value for page_release_size_threshold_. 402 static constexpr size_t kDefaultPageReleaseSizeThreshold = 4 * MB; 403 404 // We use thread-local runs for the size Brackets whose indexes 405 // are less than this index. We use shared (current) runs for the rest. 406 static const size_t kNumThreadLocalSizeBrackets = 8; 407 408 private: 409 // The base address of the memory region that's managed by this allocator. 410 byte* base_; 411 412 // The footprint in bytes of the currently allocated portion of the 413 // memory region. 414 size_t footprint_; 415 416 // The maximum footprint. The address, base_ + capacity_, indicates 417 // the end of the memory region that's currently managed by this allocator. 418 size_t capacity_; 419 420 // The maximum capacity. The address, base_ + max_capacity_, indicates 421 // the end of the memory region that's ever managed by this allocator. 422 size_t max_capacity_; 423 424 // The run sets that hold the runs whose slots are not all 425 // full. non_full_runs_[i] is guarded by size_bracket_locks_[i]. 426 std::set<Run*> non_full_runs_[kNumOfSizeBrackets]; 427 // The run sets that hold the runs whose slots are all full. This is 428 // debug only. full_runs_[i] is guarded by size_bracket_locks_[i]. 429 std::unordered_set<Run*, hash_run, eq_run> full_runs_[kNumOfSizeBrackets]; 430 // The set of free pages. 431 std::set<FreePageRun*> free_page_runs_ GUARDED_BY(lock_); 432 // The dedicated full run, it is always full and shared by all threads when revoking happens. 433 // This is an optimization since enables us to avoid a null check for revoked runs. 434 static Run* dedicated_full_run_; 435 // Using size_t to ensure that it is at least word aligned. 436 static size_t dedicated_full_run_storage_[]; 437 // The current runs where the allocations are first attempted for 438 // the size brackes that do not use thread-local 439 // runs. current_runs_[i] is guarded by size_bracket_locks_[i]. 440 Run* current_runs_[kNumOfSizeBrackets]; 441 // The mutexes, one per size bracket. 442 Mutex* size_bracket_locks_[kNumOfSizeBrackets]; 443 // Bracket lock names (since locks only have char* names). 444 std::string size_bracket_lock_names_[kNumOfSizeBrackets]; 445 // The types of page map entries. 446 enum { 447 kPageMapReleased = 0, // Zero and released back to the OS. 448 kPageMapEmpty, // Zero but probably dirty. 449 kPageMapRun, // The beginning of a run. 450 kPageMapRunPart, // The non-beginning part of a run. 451 kPageMapLargeObject, // The beginning of a large object. 452 kPageMapLargeObjectPart, // The non-beginning part of a large object. 453 }; 454 // The table that indicates what pages are currently used for. 455 volatile byte* page_map_; // No GUARDED_BY(lock_) for kReadPageMapEntryWithoutLockInBulkFree. 456 size_t page_map_size_; 457 size_t max_page_map_size_; 458 std::unique_ptr<MemMap> page_map_mem_map_; 459 460 // The table that indicates the size of free page runs. These sizes 461 // are stored here to avoid storing in the free page header and 462 // release backing pages. 463 std::vector<size_t> free_page_run_size_map_ GUARDED_BY(lock_); 464 // The global lock. Used to guard the page map, the free page set, 465 // and the footprint. 466 Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; 467 // The reader-writer lock to allow one bulk free at a time while 468 // allowing multiple individual frees at the same time. Also, this 469 // is used to avoid race conditions between BulkFree() and 470 // RevokeThreadLocalRuns() on the bulk free bitmaps. 471 ReaderWriterMutex bulk_free_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; 472 473 // The page release mode. 474 const PageReleaseMode page_release_mode_; 475 // Under kPageReleaseModeSize(AndEnd), if the free page run size is 476 // greater than or equal to this value, release pages. 477 const size_t page_release_size_threshold_; 478 479 // The base address of the memory region that's managed by this allocator. 480 byte* Begin() { return base_; } 481 // The end address of the memory region that's managed by this allocator. 482 byte* End() { return base_ + capacity_; } 483 484 // Page-granularity alloc/free 485 void* AllocPages(Thread* self, size_t num_pages, byte page_map_type) 486 EXCLUSIVE_LOCKS_REQUIRED(lock_); 487 // Returns how many bytes were freed. 488 size_t FreePages(Thread* self, void* ptr, bool already_zero) EXCLUSIVE_LOCKS_REQUIRED(lock_); 489 490 // Allocate/free a run slot. 491 void* AllocFromRun(Thread* self, size_t size, size_t* bytes_allocated) 492 LOCKS_EXCLUDED(lock_); 493 // Allocate/free a run slot without acquiring locks. 494 // TODO: EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) 495 void* AllocFromRunThreadUnsafe(Thread* self, size_t size, size_t* bytes_allocated) 496 LOCKS_EXCLUDED(lock_); 497 void* AllocFromCurrentRunUnlocked(Thread* self, size_t idx); 498 499 // Returns the bracket size. 500 size_t FreeFromRun(Thread* self, void* ptr, Run* run) 501 LOCKS_EXCLUDED(lock_); 502 503 // Used to allocate a new thread local run for a size bracket. 504 Run* AllocRun(Thread* self, size_t idx) LOCKS_EXCLUDED(lock_); 505 506 // Used to acquire a new/reused run for a size bracket. Used when a 507 // thread-local or current run gets full. 508 Run* RefillRun(Thread* self, size_t idx) LOCKS_EXCLUDED(lock_); 509 510 // The internal of non-bulk Free(). 511 size_t FreeInternal(Thread* self, void* ptr) LOCKS_EXCLUDED(lock_); 512 513 // Allocates large objects. 514 void* AllocLargeObject(Thread* self, size_t size, size_t* bytes_allocated) LOCKS_EXCLUDED(lock_); 515 516 // Revoke a run by adding it to non_full_runs_ or freeing the pages. 517 void RevokeRun(Thread* self, size_t idx, Run* run); 518 519 // Revoke the current runs which share an index with the thread local runs. 520 void RevokeThreadUnsafeCurrentRuns(); 521 522 // Release a range of pages. 523 size_t ReleasePageRange(byte* start, byte* end) EXCLUSIVE_LOCKS_REQUIRED(lock_); 524 525 public: 526 RosAlloc(void* base, size_t capacity, size_t max_capacity, 527 PageReleaseMode page_release_mode, 528 size_t page_release_size_threshold = kDefaultPageReleaseSizeThreshold); 529 ~RosAlloc(); 530 // If kThreadUnsafe is true then the allocator may avoid acquiring some locks as an optimization. 531 // If used, this may cause race conditions if multiple threads are allocating at the same time. 532 template<bool kThreadSafe = true> 533 void* Alloc(Thread* self, size_t size, size_t* bytes_allocated) 534 LOCKS_EXCLUDED(lock_); 535 size_t Free(Thread* self, void* ptr) 536 LOCKS_EXCLUDED(bulk_free_lock_); 537 size_t BulkFree(Thread* self, void** ptrs, size_t num_ptrs) 538 LOCKS_EXCLUDED(bulk_free_lock_); 539 // Returns the size of the allocated slot for a given allocated memory chunk. 540 size_t UsableSize(void* ptr); 541 // Returns the size of the allocated slot for a given size. 542 size_t UsableSize(size_t bytes) { 543 if (UNLIKELY(bytes > kLargeSizeThreshold)) { 544 return RoundUp(bytes, kPageSize); 545 } else { 546 return RoundToBracketSize(bytes); 547 } 548 } 549 // Try to reduce the current footprint by releasing the free page 550 // run at the end of the memory region, if any. 551 bool Trim(); 552 // Iterates over all the memory slots and apply the given function. 553 void InspectAll(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg), 554 void* arg) 555 LOCKS_EXCLUDED(lock_); 556 // Release empty pages. 557 size_t ReleasePages() LOCKS_EXCLUDED(lock_); 558 // Returns the current footprint. 559 size_t Footprint() LOCKS_EXCLUDED(lock_); 560 // Returns the current capacity, maximum footprint. 561 size_t FootprintLimit() LOCKS_EXCLUDED(lock_); 562 // Update the current capacity. 563 void SetFootprintLimit(size_t bytes) LOCKS_EXCLUDED(lock_); 564 // Releases the thread-local runs assigned to the given thread back to the common set of runs. 565 void RevokeThreadLocalRuns(Thread* thread); 566 // Releases the thread-local runs assigned to all the threads back to the common set of runs. 567 void RevokeAllThreadLocalRuns() LOCKS_EXCLUDED(Locks::thread_list_lock_); 568 // Assert the thread local runs of a thread are revoked. 569 void AssertThreadLocalRunsAreRevoked(Thread* thread); 570 // Assert all the thread local runs are revoked. 571 void AssertAllThreadLocalRunsAreRevoked() LOCKS_EXCLUDED(Locks::thread_list_lock_); 572 // Dumps the page map for debugging. 573 std::string DumpPageMap() EXCLUSIVE_LOCKS_REQUIRED(lock_); 574 static Run* GetDedicatedFullRun() { 575 return dedicated_full_run_; 576 } 577 bool IsFreePage(size_t idx) const { 578 DCHECK_LT(idx, capacity_ / kPageSize); 579 byte pm_type = page_map_[idx]; 580 return pm_type == kPageMapReleased || pm_type == kPageMapEmpty; 581 } 582 583 // Callbacks for InspectAll that will count the number of bytes 584 // allocated and objects allocated, respectively. 585 static void BytesAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg); 586 static void ObjectsAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg); 587 588 bool DoesReleaseAllPages() const { 589 return page_release_mode_ == kPageReleaseModeAll; 590 } 591 592 // Verify for debugging. 593 void Verify() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); 594 595 void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes); 596 }; 597 598 } // namespace allocator 599 } // namespace gc 600 } // namespace art 601 602 #endif // ART_RUNTIME_GC_ALLOCATOR_ROSALLOC_H_ 603