1 /* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #include "dlmalloc_space-inl.h" 18 19 #include "base/logging.h" // For VLOG. 20 #include "base/time_utils.h" 21 #include "base/utils.h" 22 #include "gc/accounting/card_table.h" 23 #include "gc/accounting/space_bitmap-inl.h" 24 #include "gc/heap.h" 25 #include "jit/jit.h" 26 #include "jit/jit_code_cache.h" 27 #include "memory_tool_malloc_space-inl.h" 28 #include "mirror/class-inl.h" 29 #include "mirror/object-inl.h" 30 #include "runtime.h" 31 #include "scoped_thread_state_change-inl.h" 32 #include "thread.h" 33 #include "thread_list.h" 34 35 namespace art { 36 namespace gc { 37 namespace space { 38 39 static constexpr bool kPrefetchDuringDlMallocFreeList = true; 40 41 DlMallocSpace::DlMallocSpace(MemMap* mem_map, size_t initial_size, const std::string& name, 42 void* mspace, uint8_t* begin, uint8_t* end, uint8_t* limit, 43 size_t growth_limit, bool can_move_objects, size_t starting_size) 44 : MallocSpace(name, mem_map, begin, end, limit, growth_limit, true, can_move_objects, 45 starting_size, initial_size), 46 mspace_(mspace) { 47 CHECK(mspace != nullptr); 48 } 49 50 DlMallocSpace* DlMallocSpace::CreateFromMemMap(MemMap* mem_map, const std::string& name, 51 size_t starting_size, size_t initial_size, 52 size_t growth_limit, size_t capacity, 53 bool can_move_objects) { 54 DCHECK(mem_map != nullptr); 55 void* mspace = CreateMspace(mem_map->Begin(), starting_size, initial_size); 56 if (mspace == nullptr) { 57 LOG(ERROR) << "Failed to initialize mspace for alloc space (" << name << ")"; 58 return nullptr; 59 } 60 61 // Protect memory beyond the starting size. morecore will add r/w permissions when necessory 62 uint8_t* end = mem_map->Begin() + starting_size; 63 if (capacity - starting_size > 0) { 64 CheckedCall(mprotect, name.c_str(), end, capacity - starting_size, PROT_NONE); 65 } 66 67 // Everything is set so record in immutable structure and leave 68 uint8_t* begin = mem_map->Begin(); 69 if (Runtime::Current()->IsRunningOnMemoryTool()) { 70 return new MemoryToolMallocSpace<DlMallocSpace, kDefaultMemoryToolRedZoneBytes, true, false>( 71 mem_map, initial_size, name, mspace, begin, end, begin + capacity, growth_limit, 72 can_move_objects, starting_size); 73 } else { 74 return new DlMallocSpace(mem_map, initial_size, name, mspace, begin, end, begin + capacity, 75 growth_limit, can_move_objects, starting_size); 76 } 77 } 78 79 DlMallocSpace* DlMallocSpace::Create(const std::string& name, size_t initial_size, 80 size_t growth_limit, size_t capacity, uint8_t* requested_begin, 81 bool can_move_objects) { 82 uint64_t start_time = 0; 83 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) { 84 start_time = NanoTime(); 85 LOG(INFO) << "DlMallocSpace::Create entering " << name 86 << " initial_size=" << PrettySize(initial_size) 87 << " growth_limit=" << PrettySize(growth_limit) 88 << " capacity=" << PrettySize(capacity) 89 << " requested_begin=" << reinterpret_cast<void*>(requested_begin); 90 } 91 92 // Memory we promise to dlmalloc before it asks for morecore. 93 // Note: making this value large means that large allocations are unlikely to succeed as dlmalloc 94 // will ask for this memory from sys_alloc which will fail as the footprint (this value plus the 95 // size of the large allocation) will be greater than the footprint limit. 96 size_t starting_size = kPageSize; 97 MemMap* mem_map = CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity, 98 requested_begin); 99 if (mem_map == nullptr) { 100 LOG(ERROR) << "Failed to create mem map for alloc space (" << name << ") of size " 101 << PrettySize(capacity); 102 return nullptr; 103 } 104 DlMallocSpace* space = CreateFromMemMap(mem_map, name, starting_size, initial_size, 105 growth_limit, capacity, can_move_objects); 106 // We start out with only the initial size possibly containing objects. 107 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) { 108 LOG(INFO) << "DlMallocSpace::Create exiting (" << PrettyDuration(NanoTime() - start_time) 109 << " ) " << *space; 110 } 111 return space; 112 } 113 114 void* DlMallocSpace::CreateMspace(void* begin, size_t morecore_start, size_t initial_size) { 115 // clear errno to allow PLOG on error 116 errno = 0; 117 // create mspace using our backing storage starting at begin and with a footprint of 118 // morecore_start. Don't use an internal dlmalloc lock (as we already hold heap lock). When 119 // morecore_start bytes of memory is exhaused morecore will be called. 120 void* msp = create_mspace_with_base(begin, morecore_start, false /*locked*/); 121 if (msp != nullptr) { 122 // Do not allow morecore requests to succeed beyond the initial size of the heap 123 mspace_set_footprint_limit(msp, initial_size); 124 } else { 125 PLOG(ERROR) << "create_mspace_with_base failed"; 126 } 127 return msp; 128 } 129 130 mirror::Object* DlMallocSpace::AllocWithGrowth(Thread* self, size_t num_bytes, 131 size_t* bytes_allocated, size_t* usable_size, 132 size_t* bytes_tl_bulk_allocated) { 133 mirror::Object* result; 134 { 135 MutexLock mu(self, lock_); 136 // Grow as much as possible within the space. 137 size_t max_allowed = Capacity(); 138 mspace_set_footprint_limit(mspace_, max_allowed); 139 // Try the allocation. 140 result = AllocWithoutGrowthLocked(self, num_bytes, bytes_allocated, usable_size, 141 bytes_tl_bulk_allocated); 142 // Shrink back down as small as possible. 143 size_t footprint = mspace_footprint(mspace_); 144 mspace_set_footprint_limit(mspace_, footprint); 145 } 146 if (result != nullptr) { 147 // Zero freshly allocated memory, done while not holding the space's lock. 148 memset(result, 0, num_bytes); 149 // Check that the result is contained in the space. 150 CHECK(!kDebugSpaces || Contains(result)); 151 } 152 return result; 153 } 154 155 MallocSpace* DlMallocSpace::CreateInstance(MemMap* mem_map, const std::string& name, 156 void* allocator, uint8_t* begin, uint8_t* end, 157 uint8_t* limit, size_t growth_limit, 158 bool can_move_objects) { 159 if (Runtime::Current()->IsRunningOnMemoryTool()) { 160 return new MemoryToolMallocSpace<DlMallocSpace, kDefaultMemoryToolRedZoneBytes, true, false>( 161 mem_map, initial_size_, name, allocator, begin, end, limit, growth_limit, 162 can_move_objects, starting_size_); 163 } else { 164 return new DlMallocSpace(mem_map, initial_size_, name, allocator, begin, end, limit, 165 growth_limit, can_move_objects, starting_size_); 166 } 167 } 168 169 size_t DlMallocSpace::Free(Thread* self, mirror::Object* ptr) { 170 MutexLock mu(self, lock_); 171 if (kDebugSpaces) { 172 CHECK(ptr != nullptr); 173 CHECK(Contains(ptr)) << "Free (" << ptr << ") not in bounds of heap " << *this; 174 } 175 const size_t bytes_freed = AllocationSizeNonvirtual(ptr, nullptr); 176 if (kRecentFreeCount > 0) { 177 RegisterRecentFree(ptr); 178 } 179 mspace_free(mspace_, ptr); 180 return bytes_freed; 181 } 182 183 size_t DlMallocSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) { 184 DCHECK(ptrs != nullptr); 185 186 // Don't need the lock to calculate the size of the freed pointers. 187 size_t bytes_freed = 0; 188 for (size_t i = 0; i < num_ptrs; i++) { 189 mirror::Object* ptr = ptrs[i]; 190 const size_t look_ahead = 8; 191 if (kPrefetchDuringDlMallocFreeList && i + look_ahead < num_ptrs) { 192 // The head of chunk for the allocation is sizeof(size_t) behind the allocation. 193 __builtin_prefetch(reinterpret_cast<char*>(ptrs[i + look_ahead]) - sizeof(size_t)); 194 } 195 bytes_freed += AllocationSizeNonvirtual(ptr, nullptr); 196 } 197 198 if (kRecentFreeCount > 0) { 199 MutexLock mu(self, lock_); 200 for (size_t i = 0; i < num_ptrs; i++) { 201 RegisterRecentFree(ptrs[i]); 202 } 203 } 204 205 if (kDebugSpaces) { 206 size_t num_broken_ptrs = 0; 207 for (size_t i = 0; i < num_ptrs; i++) { 208 if (!Contains(ptrs[i])) { 209 num_broken_ptrs++; 210 LOG(ERROR) << "FreeList[" << i << "] (" << ptrs[i] << ") not in bounds of heap " << *this; 211 } else { 212 size_t size = mspace_usable_size(ptrs[i]); 213 memset(ptrs[i], 0xEF, size); 214 } 215 } 216 CHECK_EQ(num_broken_ptrs, 0u); 217 } 218 219 { 220 MutexLock mu(self, lock_); 221 mspace_bulk_free(mspace_, reinterpret_cast<void**>(ptrs), num_ptrs); 222 return bytes_freed; 223 } 224 } 225 226 size_t DlMallocSpace::Trim() { 227 MutexLock mu(Thread::Current(), lock_); 228 // Trim to release memory at the end of the space. 229 mspace_trim(mspace_, 0); 230 // Visit space looking for page-sized holes to advise the kernel we don't need. 231 size_t reclaimed = 0; 232 mspace_inspect_all(mspace_, DlmallocMadviseCallback, &reclaimed); 233 return reclaimed; 234 } 235 236 void DlMallocSpace::Walk(void(*callback)(void *start, void *end, size_t num_bytes, void* callback_arg), 237 void* arg) { 238 MutexLock mu(Thread::Current(), lock_); 239 mspace_inspect_all(mspace_, callback, arg); 240 callback(nullptr, nullptr, 0, arg); // Indicate end of a space. 241 } 242 243 size_t DlMallocSpace::GetFootprint() { 244 MutexLock mu(Thread::Current(), lock_); 245 return mspace_footprint(mspace_); 246 } 247 248 size_t DlMallocSpace::GetFootprintLimit() { 249 MutexLock mu(Thread::Current(), lock_); 250 return mspace_footprint_limit(mspace_); 251 } 252 253 void DlMallocSpace::SetFootprintLimit(size_t new_size) { 254 MutexLock mu(Thread::Current(), lock_); 255 VLOG(heap) << "DlMallocSpace::SetFootprintLimit " << PrettySize(new_size); 256 // Compare against the actual footprint, rather than the Size(), because the heap may not have 257 // grown all the way to the allowed size yet. 258 size_t current_space_size = mspace_footprint(mspace_); 259 if (new_size < current_space_size) { 260 // Don't let the space grow any more. 261 new_size = current_space_size; 262 } 263 mspace_set_footprint_limit(mspace_, new_size); 264 } 265 266 uint64_t DlMallocSpace::GetBytesAllocated() { 267 MutexLock mu(Thread::Current(), lock_); 268 size_t bytes_allocated = 0; 269 mspace_inspect_all(mspace_, DlmallocBytesAllocatedCallback, &bytes_allocated); 270 return bytes_allocated; 271 } 272 273 uint64_t DlMallocSpace::GetObjectsAllocated() { 274 MutexLock mu(Thread::Current(), lock_); 275 size_t objects_allocated = 0; 276 mspace_inspect_all(mspace_, DlmallocObjectsAllocatedCallback, &objects_allocated); 277 return objects_allocated; 278 } 279 280 void DlMallocSpace::Clear() { 281 size_t footprint_limit = GetFootprintLimit(); 282 madvise(GetMemMap()->Begin(), GetMemMap()->Size(), MADV_DONTNEED); 283 live_bitmap_->Clear(); 284 mark_bitmap_->Clear(); 285 SetEnd(Begin() + starting_size_); 286 mspace_ = CreateMspace(mem_map_->Begin(), starting_size_, initial_size_); 287 SetFootprintLimit(footprint_limit); 288 } 289 290 #ifndef NDEBUG 291 void DlMallocSpace::CheckMoreCoreForPrecondition() { 292 lock_.AssertHeld(Thread::Current()); 293 } 294 #endif 295 296 static void MSpaceChunkCallback(void* start, void* end, size_t used_bytes, void* arg) { 297 size_t chunk_size = reinterpret_cast<uint8_t*>(end) - reinterpret_cast<uint8_t*>(start); 298 if (used_bytes < chunk_size) { 299 size_t chunk_free_bytes = chunk_size - used_bytes; 300 size_t& max_contiguous_allocation = *reinterpret_cast<size_t*>(arg); 301 max_contiguous_allocation = std::max(max_contiguous_allocation, chunk_free_bytes); 302 } 303 } 304 305 void DlMallocSpace::LogFragmentationAllocFailure(std::ostream& os, 306 size_t failed_alloc_bytes ATTRIBUTE_UNUSED) { 307 Thread* const self = Thread::Current(); 308 size_t max_contiguous_allocation = 0; 309 // To allow the Walk/InspectAll() to exclusively-lock the mutator 310 // lock, temporarily release the shared access to the mutator 311 // lock here by transitioning to the suspended state. 312 Locks::mutator_lock_->AssertSharedHeld(self); 313 ScopedThreadSuspension sts(self, kSuspended); 314 Walk(MSpaceChunkCallback, &max_contiguous_allocation); 315 os << "; failed due to fragmentation (largest possible contiguous allocation " 316 << max_contiguous_allocation << " bytes)"; 317 } 318 319 } // namespace space 320 321 namespace allocator { 322 323 // Implement the dlmalloc morecore callback. 324 void* ArtDlMallocMoreCore(void* mspace, intptr_t increment) REQUIRES_SHARED(Locks::mutator_lock_) { 325 Runtime* runtime = Runtime::Current(); 326 Heap* heap = runtime->GetHeap(); 327 ::art::gc::space::DlMallocSpace* dlmalloc_space = heap->GetDlMallocSpace(); 328 // Support for multiple DlMalloc provided by a slow path. 329 if (UNLIKELY(dlmalloc_space == nullptr || dlmalloc_space->GetMspace() != mspace)) { 330 if (LIKELY(runtime->GetJit() != nullptr)) { 331 jit::JitCodeCache* code_cache = runtime->GetJit()->GetCodeCache(); 332 if (code_cache->OwnsSpace(mspace)) { 333 return code_cache->MoreCore(mspace, increment); 334 } 335 } 336 dlmalloc_space = nullptr; 337 for (space::ContinuousSpace* space : heap->GetContinuousSpaces()) { 338 if (space->IsDlMallocSpace()) { 339 ::art::gc::space::DlMallocSpace* cur_dlmalloc_space = space->AsDlMallocSpace(); 340 if (cur_dlmalloc_space->GetMspace() == mspace) { 341 dlmalloc_space = cur_dlmalloc_space; 342 break; 343 } 344 } 345 } 346 CHECK(dlmalloc_space != nullptr) << "Couldn't find DlmMallocSpace with mspace=" << mspace; 347 } 348 return dlmalloc_space->MoreCore(increment); 349 } 350 351 } // namespace allocator 352 353 } // namespace gc 354 } // namespace art 355