Home | History | Annotate | Download | only in space
      1 /*
      2  * Copyright (C) 2011 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "dlmalloc_space-inl.h"
     18 
     19 #include "gc/accounting/card_table.h"
     20 #include "gc/accounting/space_bitmap-inl.h"
     21 #include "gc/heap.h"
     22 #include "mirror/class-inl.h"
     23 #include "mirror/object-inl.h"
     24 #include "runtime.h"
     25 #include "thread.h"
     26 #include "thread_list.h"
     27 #include "utils.h"
     28 #include "valgrind_malloc_space-inl.h"
     29 
     30 namespace art {
     31 namespace gc {
     32 namespace space {
     33 
     34 static constexpr bool kPrefetchDuringDlMallocFreeList = true;
     35 
     36 template class ValgrindMallocSpace<DlMallocSpace, void*>;
     37 
     38 DlMallocSpace::DlMallocSpace(const std::string& name, MemMap* mem_map, void* mspace, byte* begin,
     39                              byte* end, byte* limit, size_t growth_limit,
     40                              bool can_move_objects, size_t starting_size,
     41                              size_t initial_size)
     42     : MallocSpace(name, mem_map, begin, end, limit, growth_limit, true, can_move_objects,
     43                   starting_size, initial_size),
     44       mspace_(mspace) {
     45   CHECK(mspace != NULL);
     46 }
     47 
     48 DlMallocSpace* DlMallocSpace::CreateFromMemMap(MemMap* mem_map, const std::string& name,
     49                                                size_t starting_size, size_t initial_size,
     50                                                size_t growth_limit, size_t capacity,
     51                                                bool can_move_objects) {
     52   DCHECK(mem_map != nullptr);
     53   void* mspace = CreateMspace(mem_map->Begin(), starting_size, initial_size);
     54   if (mspace == nullptr) {
     55     LOG(ERROR) << "Failed to initialize mspace for alloc space (" << name << ")";
     56     return nullptr;
     57   }
     58 
     59   // Protect memory beyond the starting size. morecore will add r/w permissions when necessory
     60   byte* end = mem_map->Begin() + starting_size;
     61   if (capacity - starting_size > 0) {
     62     CHECK_MEMORY_CALL(mprotect, (end, capacity - starting_size, PROT_NONE), name);
     63   }
     64 
     65   // Everything is set so record in immutable structure and leave
     66   byte* begin = mem_map->Begin();
     67   if (Runtime::Current()->RunningOnValgrind()) {
     68     return new ValgrindMallocSpace<DlMallocSpace, void*>(
     69         name, mem_map, mspace, begin, end, begin + capacity, growth_limit, initial_size,
     70         can_move_objects, starting_size);
     71   } else {
     72     return new DlMallocSpace(name, mem_map, mspace, begin, end, begin + capacity, growth_limit,
     73                              can_move_objects, starting_size, initial_size);
     74   }
     75 }
     76 
     77 DlMallocSpace* DlMallocSpace::Create(const std::string& name, size_t initial_size,
     78                                      size_t growth_limit, size_t capacity, byte* requested_begin,
     79                                      bool can_move_objects) {
     80   uint64_t start_time = 0;
     81   if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
     82     start_time = NanoTime();
     83     LOG(INFO) << "DlMallocSpace::Create entering " << name
     84         << " initial_size=" << PrettySize(initial_size)
     85         << " growth_limit=" << PrettySize(growth_limit)
     86         << " capacity=" << PrettySize(capacity)
     87         << " requested_begin=" << reinterpret_cast<void*>(requested_begin);
     88   }
     89 
     90   // Memory we promise to dlmalloc before it asks for morecore.
     91   // Note: making this value large means that large allocations are unlikely to succeed as dlmalloc
     92   // will ask for this memory from sys_alloc which will fail as the footprint (this value plus the
     93   // size of the large allocation) will be greater than the footprint limit.
     94   size_t starting_size = kPageSize;
     95   MemMap* mem_map = CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity,
     96                                  requested_begin);
     97   if (mem_map == nullptr) {
     98     LOG(ERROR) << "Failed to create mem map for alloc space (" << name << ") of size "
     99                << PrettySize(capacity);
    100     return nullptr;
    101   }
    102   DlMallocSpace* space = CreateFromMemMap(mem_map, name, starting_size, initial_size,
    103                                           growth_limit, capacity, can_move_objects);
    104   // We start out with only the initial size possibly containing objects.
    105   if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
    106     LOG(INFO) << "DlMallocSpace::Create exiting (" << PrettyDuration(NanoTime() - start_time)
    107         << " ) " << *space;
    108   }
    109   return space;
    110 }
    111 
    112 void* DlMallocSpace::CreateMspace(void* begin, size_t morecore_start, size_t initial_size) {
    113   // clear errno to allow PLOG on error
    114   errno = 0;
    115   // create mspace using our backing storage starting at begin and with a footprint of
    116   // morecore_start. Don't use an internal dlmalloc lock (as we already hold heap lock). When
    117   // morecore_start bytes of memory is exhaused morecore will be called.
    118   void* msp = create_mspace_with_base(begin, morecore_start, false /*locked*/);
    119   if (msp != nullptr) {
    120     // Do not allow morecore requests to succeed beyond the initial size of the heap
    121     mspace_set_footprint_limit(msp, initial_size);
    122   } else {
    123     PLOG(ERROR) << "create_mspace_with_base failed";
    124   }
    125   return msp;
    126 }
    127 
    128 mirror::Object* DlMallocSpace::AllocWithGrowth(Thread* self, size_t num_bytes,
    129                                                size_t* bytes_allocated, size_t* usable_size) {
    130   mirror::Object* result;
    131   {
    132     MutexLock mu(self, lock_);
    133     // Grow as much as possible within the space.
    134     size_t max_allowed = Capacity();
    135     mspace_set_footprint_limit(mspace_, max_allowed);
    136     // Try the allocation.
    137     result = AllocWithoutGrowthLocked(self, num_bytes, bytes_allocated, usable_size);
    138     // Shrink back down as small as possible.
    139     size_t footprint = mspace_footprint(mspace_);
    140     mspace_set_footprint_limit(mspace_, footprint);
    141   }
    142   if (result != nullptr) {
    143     // Zero freshly allocated memory, done while not holding the space's lock.
    144     memset(result, 0, num_bytes);
    145     // Check that the result is contained in the space.
    146     CHECK(!kDebugSpaces || Contains(result));
    147   }
    148   return result;
    149 }
    150 
    151 MallocSpace* DlMallocSpace::CreateInstance(const std::string& name, MemMap* mem_map,
    152                                            void* allocator, byte* begin, byte* end,
    153                                            byte* limit, size_t growth_limit,
    154                                            bool can_move_objects) {
    155   return new DlMallocSpace(name, mem_map, allocator, begin, end, limit, growth_limit,
    156                            can_move_objects, starting_size_, initial_size_);
    157 }
    158 
    159 size_t DlMallocSpace::Free(Thread* self, mirror::Object* ptr) {
    160   MutexLock mu(self, lock_);
    161   if (kDebugSpaces) {
    162     CHECK(ptr != nullptr);
    163     CHECK(Contains(ptr)) << "Free (" << ptr << ") not in bounds of heap " << *this;
    164   }
    165   const size_t bytes_freed = AllocationSizeNonvirtual(ptr, nullptr);
    166   if (kRecentFreeCount > 0) {
    167     RegisterRecentFree(ptr);
    168   }
    169   mspace_free(mspace_, ptr);
    170   return bytes_freed;
    171 }
    172 
    173 size_t DlMallocSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
    174   DCHECK(ptrs != NULL);
    175 
    176   // Don't need the lock to calculate the size of the freed pointers.
    177   size_t bytes_freed = 0;
    178   for (size_t i = 0; i < num_ptrs; i++) {
    179     mirror::Object* ptr = ptrs[i];
    180     const size_t look_ahead = 8;
    181     if (kPrefetchDuringDlMallocFreeList && i + look_ahead < num_ptrs) {
    182       // The head of chunk for the allocation is sizeof(size_t) behind the allocation.
    183       __builtin_prefetch(reinterpret_cast<char*>(ptrs[i + look_ahead]) - sizeof(size_t));
    184     }
    185     bytes_freed += AllocationSizeNonvirtual(ptr, nullptr);
    186   }
    187 
    188   if (kRecentFreeCount > 0) {
    189     MutexLock mu(self, lock_);
    190     for (size_t i = 0; i < num_ptrs; i++) {
    191       RegisterRecentFree(ptrs[i]);
    192     }
    193   }
    194 
    195   if (kDebugSpaces) {
    196     size_t num_broken_ptrs = 0;
    197     for (size_t i = 0; i < num_ptrs; i++) {
    198       if (!Contains(ptrs[i])) {
    199         num_broken_ptrs++;
    200         LOG(ERROR) << "FreeList[" << i << "] (" << ptrs[i] << ") not in bounds of heap " << *this;
    201       } else {
    202         size_t size = mspace_usable_size(ptrs[i]);
    203         memset(ptrs[i], 0xEF, size);
    204       }
    205     }
    206     CHECK_EQ(num_broken_ptrs, 0u);
    207   }
    208 
    209   {
    210     MutexLock mu(self, lock_);
    211     mspace_bulk_free(mspace_, reinterpret_cast<void**>(ptrs), num_ptrs);
    212     return bytes_freed;
    213   }
    214 }
    215 
    216 // Callback from dlmalloc when it needs to increase the footprint
    217 extern "C" void* art_heap_morecore(void* mspace, intptr_t increment) {
    218   Heap* heap = Runtime::Current()->GetHeap();
    219   DlMallocSpace* dlmalloc_space = heap->GetDlMallocSpace();
    220   // Support for multiple DlMalloc provided by a slow path.
    221   if (UNLIKELY(dlmalloc_space == nullptr || dlmalloc_space->GetMspace() != mspace)) {
    222     dlmalloc_space = nullptr;
    223     for (space::ContinuousSpace* space : heap->GetContinuousSpaces()) {
    224       if (space->IsDlMallocSpace()) {
    225         DlMallocSpace* cur_dlmalloc_space = space->AsDlMallocSpace();
    226         if (cur_dlmalloc_space->GetMspace() == mspace) {
    227           dlmalloc_space = cur_dlmalloc_space;
    228           break;
    229         }
    230       }
    231     }
    232     CHECK(dlmalloc_space != nullptr) << "Couldn't find DlmMallocSpace with mspace=" << mspace;
    233   }
    234   return dlmalloc_space->MoreCore(increment);
    235 }
    236 
    237 size_t DlMallocSpace::Trim() {
    238   MutexLock mu(Thread::Current(), lock_);
    239   // Trim to release memory at the end of the space.
    240   mspace_trim(mspace_, 0);
    241   // Visit space looking for page-sized holes to advise the kernel we don't need.
    242   size_t reclaimed = 0;
    243   mspace_inspect_all(mspace_, DlmallocMadviseCallback, &reclaimed);
    244   return reclaimed;
    245 }
    246 
    247 void DlMallocSpace::Walk(void(*callback)(void *start, void *end, size_t num_bytes, void* callback_arg),
    248                       void* arg) {
    249   MutexLock mu(Thread::Current(), lock_);
    250   mspace_inspect_all(mspace_, callback, arg);
    251   callback(NULL, NULL, 0, arg);  // Indicate end of a space.
    252 }
    253 
    254 size_t DlMallocSpace::GetFootprint() {
    255   MutexLock mu(Thread::Current(), lock_);
    256   return mspace_footprint(mspace_);
    257 }
    258 
    259 size_t DlMallocSpace::GetFootprintLimit() {
    260   MutexLock mu(Thread::Current(), lock_);
    261   return mspace_footprint_limit(mspace_);
    262 }
    263 
    264 void DlMallocSpace::SetFootprintLimit(size_t new_size) {
    265   MutexLock mu(Thread::Current(), lock_);
    266   VLOG(heap) << "DlMallocSpace::SetFootprintLimit " << PrettySize(new_size);
    267   // Compare against the actual footprint, rather than the Size(), because the heap may not have
    268   // grown all the way to the allowed size yet.
    269   size_t current_space_size = mspace_footprint(mspace_);
    270   if (new_size < current_space_size) {
    271     // Don't let the space grow any more.
    272     new_size = current_space_size;
    273   }
    274   mspace_set_footprint_limit(mspace_, new_size);
    275 }
    276 
    277 uint64_t DlMallocSpace::GetBytesAllocated() {
    278   MutexLock mu(Thread::Current(), lock_);
    279   size_t bytes_allocated = 0;
    280   mspace_inspect_all(mspace_, DlmallocBytesAllocatedCallback, &bytes_allocated);
    281   return bytes_allocated;
    282 }
    283 
    284 uint64_t DlMallocSpace::GetObjectsAllocated() {
    285   MutexLock mu(Thread::Current(), lock_);
    286   size_t objects_allocated = 0;
    287   mspace_inspect_all(mspace_, DlmallocObjectsAllocatedCallback, &objects_allocated);
    288   return objects_allocated;
    289 }
    290 
    291 void DlMallocSpace::Clear() {
    292   size_t footprint_limit = GetFootprintLimit();
    293   madvise(GetMemMap()->Begin(), GetMemMap()->Size(), MADV_DONTNEED);
    294   live_bitmap_->Clear();
    295   mark_bitmap_->Clear();
    296   SetEnd(Begin() + starting_size_);
    297   mspace_ = CreateMspace(mem_map_->Begin(), starting_size_, initial_size_);
    298   SetFootprintLimit(footprint_limit);
    299 }
    300 
    301 #ifndef NDEBUG
    302 void DlMallocSpace::CheckMoreCoreForPrecondition() {
    303   lock_.AssertHeld(Thread::Current());
    304 }
    305 #endif
    306 
    307 static void MSpaceChunkCallback(void* start, void* end, size_t used_bytes, void* arg) {
    308   size_t chunk_size = reinterpret_cast<uint8_t*>(end) - reinterpret_cast<uint8_t*>(start);
    309   if (used_bytes < chunk_size) {
    310     size_t chunk_free_bytes = chunk_size - used_bytes;
    311     size_t& max_contiguous_allocation = *reinterpret_cast<size_t*>(arg);
    312     max_contiguous_allocation = std::max(max_contiguous_allocation, chunk_free_bytes);
    313   }
    314 }
    315 
    316 void DlMallocSpace::LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) {
    317   Thread* self = Thread::Current();
    318   size_t max_contiguous_allocation = 0;
    319   // To allow the Walk/InspectAll() to exclusively-lock the mutator
    320   // lock, temporarily release the shared access to the mutator
    321   // lock here by transitioning to the suspended state.
    322   Locks::mutator_lock_->AssertSharedHeld(self);
    323   self->TransitionFromRunnableToSuspended(kSuspended);
    324   Walk(MSpaceChunkCallback, &max_contiguous_allocation);
    325   self->TransitionFromSuspendedToRunnable();
    326   Locks::mutator_lock_->AssertSharedHeld(self);
    327   os << "; failed due to fragmentation (largest possible contiguous allocation "
    328      <<  max_contiguous_allocation << " bytes)";
    329 }
    330 
    331 }  // namespace space
    332 }  // namespace gc
    333 }  // namespace art
    334