Home | History | Annotate | Download | only in space
      1 /*
      2  * Copyright (C) 2011 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "dlmalloc_space-inl.h"
     18 
     19 #include "base/time_utils.h"
     20 #include "gc/accounting/card_table.h"
     21 #include "gc/accounting/space_bitmap-inl.h"
     22 #include "gc/heap.h"
     23 #include "mirror/class-inl.h"
     24 #include "mirror/object-inl.h"
     25 #include "runtime.h"
     26 #include "thread.h"
     27 #include "thread_list.h"
     28 #include "utils.h"
     29 #include "valgrind_malloc_space-inl.h"
     30 
     31 namespace art {
     32 namespace gc {
     33 namespace space {
     34 
     35 static constexpr bool kPrefetchDuringDlMallocFreeList = true;
     36 
     37 DlMallocSpace::DlMallocSpace(MemMap* mem_map, size_t initial_size, const std::string& name,
     38                              void* mspace, uint8_t* begin, uint8_t* end, uint8_t* limit,
     39                              size_t growth_limit, bool can_move_objects, size_t starting_size)
     40     : MallocSpace(name, mem_map, begin, end, limit, growth_limit, true, can_move_objects,
     41                   starting_size, initial_size),
     42       mspace_(mspace) {
     43   CHECK(mspace != nullptr);
     44 }
     45 
     46 DlMallocSpace* DlMallocSpace::CreateFromMemMap(MemMap* mem_map, const std::string& name,
     47                                                size_t starting_size, size_t initial_size,
     48                                                size_t growth_limit, size_t capacity,
     49                                                bool can_move_objects) {
     50   DCHECK(mem_map != nullptr);
     51   void* mspace = CreateMspace(mem_map->Begin(), starting_size, initial_size);
     52   if (mspace == nullptr) {
     53     LOG(ERROR) << "Failed to initialize mspace for alloc space (" << name << ")";
     54     return nullptr;
     55   }
     56 
     57   // Protect memory beyond the starting size. morecore will add r/w permissions when necessory
     58   uint8_t* end = mem_map->Begin() + starting_size;
     59   if (capacity - starting_size > 0) {
     60     CHECK_MEMORY_CALL(mprotect, (end, capacity - starting_size, PROT_NONE), name);
     61   }
     62 
     63   // Everything is set so record in immutable structure and leave
     64   uint8_t* begin = mem_map->Begin();
     65   if (Runtime::Current()->RunningOnValgrind()) {
     66     return new ValgrindMallocSpace<DlMallocSpace, kDefaultValgrindRedZoneBytes, true, false>(
     67         mem_map, initial_size, name, mspace, begin, end, begin + capacity, growth_limit,
     68         can_move_objects, starting_size);
     69   } else {
     70     return new DlMallocSpace(mem_map, initial_size, name, mspace, begin, end, begin + capacity,
     71                              growth_limit, can_move_objects, starting_size);
     72   }
     73 }
     74 
     75 DlMallocSpace* DlMallocSpace::Create(const std::string& name, size_t initial_size,
     76                                      size_t growth_limit, size_t capacity, uint8_t* requested_begin,
     77                                      bool can_move_objects) {
     78   uint64_t start_time = 0;
     79   if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
     80     start_time = NanoTime();
     81     LOG(INFO) << "DlMallocSpace::Create entering " << name
     82         << " initial_size=" << PrettySize(initial_size)
     83         << " growth_limit=" << PrettySize(growth_limit)
     84         << " capacity=" << PrettySize(capacity)
     85         << " requested_begin=" << reinterpret_cast<void*>(requested_begin);
     86   }
     87 
     88   // Memory we promise to dlmalloc before it asks for morecore.
     89   // Note: making this value large means that large allocations are unlikely to succeed as dlmalloc
     90   // will ask for this memory from sys_alloc which will fail as the footprint (this value plus the
     91   // size of the large allocation) will be greater than the footprint limit.
     92   size_t starting_size = kPageSize;
     93   MemMap* mem_map = CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity,
     94                                  requested_begin);
     95   if (mem_map == nullptr) {
     96     LOG(ERROR) << "Failed to create mem map for alloc space (" << name << ") of size "
     97                << PrettySize(capacity);
     98     return nullptr;
     99   }
    100   DlMallocSpace* space = CreateFromMemMap(mem_map, name, starting_size, initial_size,
    101                                           growth_limit, capacity, can_move_objects);
    102   // We start out with only the initial size possibly containing objects.
    103   if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
    104     LOG(INFO) << "DlMallocSpace::Create exiting (" << PrettyDuration(NanoTime() - start_time)
    105         << " ) " << *space;
    106   }
    107   return space;
    108 }
    109 
    110 void* DlMallocSpace::CreateMspace(void* begin, size_t morecore_start, size_t initial_size) {
    111   // clear errno to allow PLOG on error
    112   errno = 0;
    113   // create mspace using our backing storage starting at begin and with a footprint of
    114   // morecore_start. Don't use an internal dlmalloc lock (as we already hold heap lock). When
    115   // morecore_start bytes of memory is exhaused morecore will be called.
    116   void* msp = create_mspace_with_base(begin, morecore_start, false /*locked*/);
    117   if (msp != nullptr) {
    118     // Do not allow morecore requests to succeed beyond the initial size of the heap
    119     mspace_set_footprint_limit(msp, initial_size);
    120   } else {
    121     PLOG(ERROR) << "create_mspace_with_base failed";
    122   }
    123   return msp;
    124 }
    125 
    126 mirror::Object* DlMallocSpace::AllocWithGrowth(Thread* self, size_t num_bytes,
    127                                                size_t* bytes_allocated, size_t* usable_size,
    128                                                size_t* bytes_tl_bulk_allocated) {
    129   mirror::Object* result;
    130   {
    131     MutexLock mu(self, lock_);
    132     // Grow as much as possible within the space.
    133     size_t max_allowed = Capacity();
    134     mspace_set_footprint_limit(mspace_, max_allowed);
    135     // Try the allocation.
    136     result = AllocWithoutGrowthLocked(self, num_bytes, bytes_allocated, usable_size,
    137                                       bytes_tl_bulk_allocated);
    138     // Shrink back down as small as possible.
    139     size_t footprint = mspace_footprint(mspace_);
    140     mspace_set_footprint_limit(mspace_, footprint);
    141   }
    142   if (result != nullptr) {
    143     // Zero freshly allocated memory, done while not holding the space's lock.
    144     memset(result, 0, num_bytes);
    145     // Check that the result is contained in the space.
    146     CHECK(!kDebugSpaces || Contains(result));
    147   }
    148   return result;
    149 }
    150 
    151 MallocSpace* DlMallocSpace::CreateInstance(MemMap* mem_map, const std::string& name,
    152                                            void* allocator, uint8_t* begin, uint8_t* end,
    153                                            uint8_t* limit, size_t growth_limit,
    154                                            bool can_move_objects) {
    155   if (Runtime::Current()->RunningOnValgrind()) {
    156     return new ValgrindMallocSpace<DlMallocSpace, kDefaultValgrindRedZoneBytes, true, false>(
    157         mem_map, initial_size_, name, allocator, begin, end, limit, growth_limit,
    158         can_move_objects, starting_size_);
    159   } else {
    160     return new DlMallocSpace(mem_map, initial_size_, name, allocator, begin, end, limit,
    161                              growth_limit, can_move_objects, starting_size_);
    162   }
    163 }
    164 
    165 size_t DlMallocSpace::Free(Thread* self, mirror::Object* ptr) {
    166   MutexLock mu(self, lock_);
    167   if (kDebugSpaces) {
    168     CHECK(ptr != nullptr);
    169     CHECK(Contains(ptr)) << "Free (" << ptr << ") not in bounds of heap " << *this;
    170   }
    171   const size_t bytes_freed = AllocationSizeNonvirtual(ptr, nullptr);
    172   if (kRecentFreeCount > 0) {
    173     RegisterRecentFree(ptr);
    174   }
    175   mspace_free(mspace_, ptr);
    176   return bytes_freed;
    177 }
    178 
    179 size_t DlMallocSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
    180   DCHECK(ptrs != nullptr);
    181 
    182   // Don't need the lock to calculate the size of the freed pointers.
    183   size_t bytes_freed = 0;
    184   for (size_t i = 0; i < num_ptrs; i++) {
    185     mirror::Object* ptr = ptrs[i];
    186     const size_t look_ahead = 8;
    187     if (kPrefetchDuringDlMallocFreeList && i + look_ahead < num_ptrs) {
    188       // The head of chunk for the allocation is sizeof(size_t) behind the allocation.
    189       __builtin_prefetch(reinterpret_cast<char*>(ptrs[i + look_ahead]) - sizeof(size_t));
    190     }
    191     bytes_freed += AllocationSizeNonvirtual(ptr, nullptr);
    192   }
    193 
    194   if (kRecentFreeCount > 0) {
    195     MutexLock mu(self, lock_);
    196     for (size_t i = 0; i < num_ptrs; i++) {
    197       RegisterRecentFree(ptrs[i]);
    198     }
    199   }
    200 
    201   if (kDebugSpaces) {
    202     size_t num_broken_ptrs = 0;
    203     for (size_t i = 0; i < num_ptrs; i++) {
    204       if (!Contains(ptrs[i])) {
    205         num_broken_ptrs++;
    206         LOG(ERROR) << "FreeList[" << i << "] (" << ptrs[i] << ") not in bounds of heap " << *this;
    207       } else {
    208         size_t size = mspace_usable_size(ptrs[i]);
    209         memset(ptrs[i], 0xEF, size);
    210       }
    211     }
    212     CHECK_EQ(num_broken_ptrs, 0u);
    213   }
    214 
    215   {
    216     MutexLock mu(self, lock_);
    217     mspace_bulk_free(mspace_, reinterpret_cast<void**>(ptrs), num_ptrs);
    218     return bytes_freed;
    219   }
    220 }
    221 
    222 size_t DlMallocSpace::Trim() {
    223   MutexLock mu(Thread::Current(), lock_);
    224   // Trim to release memory at the end of the space.
    225   mspace_trim(mspace_, 0);
    226   // Visit space looking for page-sized holes to advise the kernel we don't need.
    227   size_t reclaimed = 0;
    228   mspace_inspect_all(mspace_, DlmallocMadviseCallback, &reclaimed);
    229   return reclaimed;
    230 }
    231 
    232 void DlMallocSpace::Walk(void(*callback)(void *start, void *end, size_t num_bytes, void* callback_arg),
    233                       void* arg) {
    234   MutexLock mu(Thread::Current(), lock_);
    235   mspace_inspect_all(mspace_, callback, arg);
    236   callback(nullptr, nullptr, 0, arg);  // Indicate end of a space.
    237 }
    238 
    239 size_t DlMallocSpace::GetFootprint() {
    240   MutexLock mu(Thread::Current(), lock_);
    241   return mspace_footprint(mspace_);
    242 }
    243 
    244 size_t DlMallocSpace::GetFootprintLimit() {
    245   MutexLock mu(Thread::Current(), lock_);
    246   return mspace_footprint_limit(mspace_);
    247 }
    248 
    249 void DlMallocSpace::SetFootprintLimit(size_t new_size) {
    250   MutexLock mu(Thread::Current(), lock_);
    251   VLOG(heap) << "DlMallocSpace::SetFootprintLimit " << PrettySize(new_size);
    252   // Compare against the actual footprint, rather than the Size(), because the heap may not have
    253   // grown all the way to the allowed size yet.
    254   size_t current_space_size = mspace_footprint(mspace_);
    255   if (new_size < current_space_size) {
    256     // Don't let the space grow any more.
    257     new_size = current_space_size;
    258   }
    259   mspace_set_footprint_limit(mspace_, new_size);
    260 }
    261 
    262 uint64_t DlMallocSpace::GetBytesAllocated() {
    263   MutexLock mu(Thread::Current(), lock_);
    264   size_t bytes_allocated = 0;
    265   mspace_inspect_all(mspace_, DlmallocBytesAllocatedCallback, &bytes_allocated);
    266   return bytes_allocated;
    267 }
    268 
    269 uint64_t DlMallocSpace::GetObjectsAllocated() {
    270   MutexLock mu(Thread::Current(), lock_);
    271   size_t objects_allocated = 0;
    272   mspace_inspect_all(mspace_, DlmallocObjectsAllocatedCallback, &objects_allocated);
    273   return objects_allocated;
    274 }
    275 
    276 void DlMallocSpace::Clear() {
    277   size_t footprint_limit = GetFootprintLimit();
    278   madvise(GetMemMap()->Begin(), GetMemMap()->Size(), MADV_DONTNEED);
    279   live_bitmap_->Clear();
    280   mark_bitmap_->Clear();
    281   SetEnd(Begin() + starting_size_);
    282   mspace_ = CreateMspace(mem_map_->Begin(), starting_size_, initial_size_);
    283   SetFootprintLimit(footprint_limit);
    284 }
    285 
    286 #ifndef NDEBUG
    287 void DlMallocSpace::CheckMoreCoreForPrecondition() {
    288   lock_.AssertHeld(Thread::Current());
    289 }
    290 #endif
    291 
    292 static void MSpaceChunkCallback(void* start, void* end, size_t used_bytes, void* arg) {
    293   size_t chunk_size = reinterpret_cast<uint8_t*>(end) - reinterpret_cast<uint8_t*>(start);
    294   if (used_bytes < chunk_size) {
    295     size_t chunk_free_bytes = chunk_size - used_bytes;
    296     size_t& max_contiguous_allocation = *reinterpret_cast<size_t*>(arg);
    297     max_contiguous_allocation = std::max(max_contiguous_allocation, chunk_free_bytes);
    298   }
    299 }
    300 
    301 void DlMallocSpace::LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) {
    302   UNUSED(failed_alloc_bytes);
    303   Thread* self = Thread::Current();
    304   size_t max_contiguous_allocation = 0;
    305   // To allow the Walk/InspectAll() to exclusively-lock the mutator
    306   // lock, temporarily release the shared access to the mutator
    307   // lock here by transitioning to the suspended state.
    308   Locks::mutator_lock_->AssertSharedHeld(self);
    309   self->TransitionFromRunnableToSuspended(kSuspended);
    310   Walk(MSpaceChunkCallback, &max_contiguous_allocation);
    311   self->TransitionFromSuspendedToRunnable();
    312   Locks::mutator_lock_->AssertSharedHeld(self);
    313   os << "; failed due to fragmentation (largest possible contiguous allocation "
    314      <<  max_contiguous_allocation << " bytes)";
    315 }
    316 
    317 }  // namespace space
    318 
    319 namespace allocator {
    320 
    321 // Implement the dlmalloc morecore callback.
    322 void* ArtDlMallocMoreCore(void* mspace, intptr_t increment) {
    323   Heap* heap = Runtime::Current()->GetHeap();
    324   ::art::gc::space::DlMallocSpace* dlmalloc_space = heap->GetDlMallocSpace();
    325   // Support for multiple DlMalloc provided by a slow path.
    326   if (UNLIKELY(dlmalloc_space == nullptr || dlmalloc_space->GetMspace() != mspace)) {
    327     dlmalloc_space = nullptr;
    328     for (space::ContinuousSpace* space : heap->GetContinuousSpaces()) {
    329       if (space->IsDlMallocSpace()) {
    330         ::art::gc::space::DlMallocSpace* cur_dlmalloc_space = space->AsDlMallocSpace();
    331         if (cur_dlmalloc_space->GetMspace() == mspace) {
    332           dlmalloc_space = cur_dlmalloc_space;
    333           break;
    334         }
    335       }
    336     }
    337     CHECK(dlmalloc_space != nullptr) << "Couldn't find DlmMallocSpace with mspace=" << mspace;
    338   }
    339   return dlmalloc_space->MoreCore(increment);
    340 }
    341 
    342 }  // namespace allocator
    343 
    344 }  // namespace gc
    345 }  // namespace art
    346