Home | History | Annotate | Download | only in space
      1 /*
      2  * Copyright (C) 2011 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 #include "dlmalloc_space.h"
     17 #include "dlmalloc_space-inl.h"
     18 #include "gc/accounting/card_table.h"
     19 #include "gc/heap.h"
     20 #include "mirror/object-inl.h"
     21 #include "runtime.h"
     22 #include "thread.h"
     23 #include "utils.h"
     24 
     25 #include <valgrind.h>
     26 #include <../memcheck/memcheck.h>
     27 
     28 namespace art {
     29 namespace gc {
     30 namespace space {
     31 
     32 // TODO: Remove define macro
     33 #define CHECK_MEMORY_CALL(call, args, what) \
     34   do { \
     35     int rc = call args; \
     36     if (UNLIKELY(rc != 0)) { \
     37       errno = rc; \
     38       PLOG(FATAL) << # call << " failed for " << what; \
     39     } \
     40   } while (false)
     41 
     42 static const bool kPrefetchDuringDlMallocFreeList = true;
     43 
     44 // Number of bytes to use as a red zone (rdz). A red zone of this size will be placed before and
     45 // after each allocation. 8 bytes provides long/double alignment.
     46 const size_t kValgrindRedZoneBytes = 8;
     47 
     48 // A specialization of DlMallocSpace that provides information to valgrind wrt allocations.
     49 class ValgrindDlMallocSpace : public DlMallocSpace {
     50  public:
     51   virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated) {
     52     void* obj_with_rdz = DlMallocSpace::AllocWithGrowth(self, num_bytes + 2 * kValgrindRedZoneBytes,
     53                                                         bytes_allocated);
     54     if (obj_with_rdz == NULL) {
     55       return NULL;
     56     }
     57     mirror::Object* result = reinterpret_cast<mirror::Object*>(
     58         reinterpret_cast<byte*>(obj_with_rdz) + kValgrindRedZoneBytes);
     59     // Make redzones as no access.
     60     VALGRIND_MAKE_MEM_NOACCESS(obj_with_rdz, kValgrindRedZoneBytes);
     61     VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<byte*>(result) + num_bytes, kValgrindRedZoneBytes);
     62     return result;
     63   }
     64 
     65   virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated) {
     66     void* obj_with_rdz = DlMallocSpace::Alloc(self, num_bytes + 2 * kValgrindRedZoneBytes,
     67                                               bytes_allocated);
     68     if (obj_with_rdz == NULL) {
     69      return NULL;
     70     }
     71     mirror::Object* result = reinterpret_cast<mirror::Object*>(
     72         reinterpret_cast<byte*>(obj_with_rdz) + kValgrindRedZoneBytes);
     73     // Make redzones as no access.
     74     VALGRIND_MAKE_MEM_NOACCESS(obj_with_rdz, kValgrindRedZoneBytes);
     75     VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<byte*>(result) + num_bytes, kValgrindRedZoneBytes);
     76     return result;
     77   }
     78 
     79   virtual size_t AllocationSize(const mirror::Object* obj) {
     80     size_t result = DlMallocSpace::AllocationSize(reinterpret_cast<const mirror::Object*>(
     81         reinterpret_cast<const byte*>(obj) - kValgrindRedZoneBytes));
     82     return result - 2 * kValgrindRedZoneBytes;
     83   }
     84 
     85   virtual size_t Free(Thread* self, mirror::Object* ptr) {
     86     void* obj_after_rdz = reinterpret_cast<void*>(ptr);
     87     void* obj_with_rdz = reinterpret_cast<byte*>(obj_after_rdz) - kValgrindRedZoneBytes;
     88     // Make redzones undefined.
     89     size_t allocation_size = DlMallocSpace::AllocationSize(
     90         reinterpret_cast<mirror::Object*>(obj_with_rdz));
     91     VALGRIND_MAKE_MEM_UNDEFINED(obj_with_rdz, allocation_size);
     92     size_t freed = DlMallocSpace::Free(self, reinterpret_cast<mirror::Object*>(obj_with_rdz));
     93     return freed - 2 * kValgrindRedZoneBytes;
     94   }
     95 
     96   virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
     97     size_t freed = 0;
     98     for (size_t i = 0; i < num_ptrs; i++) {
     99       freed += Free(self, ptrs[i]);
    100     }
    101     return freed;
    102   }
    103 
    104   ValgrindDlMallocSpace(const std::string& name, MemMap* mem_map, void* mspace, byte* begin,
    105                         byte* end, size_t growth_limit, size_t initial_size) :
    106       DlMallocSpace(name, mem_map, mspace, begin, end, growth_limit) {
    107     VALGRIND_MAKE_MEM_UNDEFINED(mem_map->Begin() + initial_size, mem_map->Size() - initial_size);
    108   }
    109 
    110   virtual ~ValgrindDlMallocSpace() {
    111   }
    112 
    113  private:
    114   DISALLOW_COPY_AND_ASSIGN(ValgrindDlMallocSpace);
    115 };
    116 
    117 size_t DlMallocSpace::bitmap_index_ = 0;
    118 
    119 DlMallocSpace::DlMallocSpace(const std::string& name, MemMap* mem_map, void* mspace, byte* begin,
    120                        byte* end, size_t growth_limit)
    121     : MemMapSpace(name, mem_map, end - begin, kGcRetentionPolicyAlwaysCollect),
    122       recent_free_pos_(0), num_bytes_allocated_(0), num_objects_allocated_(0),
    123       total_bytes_allocated_(0), total_objects_allocated_(0),
    124       lock_("allocation space lock", kAllocSpaceLock), mspace_(mspace),
    125       growth_limit_(growth_limit) {
    126   CHECK(mspace != NULL);
    127 
    128   size_t bitmap_index = bitmap_index_++;
    129 
    130   static const uintptr_t kGcCardSize = static_cast<uintptr_t>(accounting::CardTable::kCardSize);
    131   CHECK(IsAligned<kGcCardSize>(reinterpret_cast<uintptr_t>(mem_map->Begin())));
    132   CHECK(IsAligned<kGcCardSize>(reinterpret_cast<uintptr_t>(mem_map->End())));
    133   live_bitmap_.reset(accounting::SpaceBitmap::Create(
    134       StringPrintf("allocspace %s live-bitmap %d", name.c_str(), static_cast<int>(bitmap_index)),
    135       Begin(), Capacity()));
    136   DCHECK(live_bitmap_.get() != NULL) << "could not create allocspace live bitmap #" << bitmap_index;
    137 
    138   mark_bitmap_.reset(accounting::SpaceBitmap::Create(
    139       StringPrintf("allocspace %s mark-bitmap %d", name.c_str(), static_cast<int>(bitmap_index)),
    140       Begin(), Capacity()));
    141   DCHECK(live_bitmap_.get() != NULL) << "could not create allocspace mark bitmap #" << bitmap_index;
    142 
    143   for (auto& freed : recent_freed_objects_) {
    144     freed.first = nullptr;
    145     freed.second = nullptr;
    146   }
    147 }
    148 
    149 DlMallocSpace* DlMallocSpace::Create(const std::string& name, size_t initial_size, size_t
    150                                      growth_limit, size_t capacity, byte* requested_begin) {
    151   // Memory we promise to dlmalloc before it asks for morecore.
    152   // Note: making this value large means that large allocations are unlikely to succeed as dlmalloc
    153   // will ask for this memory from sys_alloc which will fail as the footprint (this value plus the
    154   // size of the large allocation) will be greater than the footprint limit.
    155   size_t starting_size = kPageSize;
    156   uint64_t start_time = 0;
    157   if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
    158     start_time = NanoTime();
    159     VLOG(startup) << "Space::CreateAllocSpace entering " << name
    160                   << " initial_size=" << PrettySize(initial_size)
    161                   << " growth_limit=" << PrettySize(growth_limit)
    162                   << " capacity=" << PrettySize(capacity)
    163                   << " requested_begin=" << reinterpret_cast<void*>(requested_begin);
    164   }
    165 
    166   // Sanity check arguments
    167   if (starting_size > initial_size) {
    168     initial_size = starting_size;
    169   }
    170   if (initial_size > growth_limit) {
    171     LOG(ERROR) << "Failed to create alloc space (" << name << ") where the initial size ("
    172         << PrettySize(initial_size) << ") is larger than its capacity ("
    173         << PrettySize(growth_limit) << ")";
    174     return NULL;
    175   }
    176   if (growth_limit > capacity) {
    177     LOG(ERROR) << "Failed to create alloc space (" << name << ") where the growth limit capacity ("
    178         << PrettySize(growth_limit) << ") is larger than the capacity ("
    179         << PrettySize(capacity) << ")";
    180     return NULL;
    181   }
    182 
    183   // Page align growth limit and capacity which will be used to manage mmapped storage
    184   growth_limit = RoundUp(growth_limit, kPageSize);
    185   capacity = RoundUp(capacity, kPageSize);
    186 
    187   UniquePtr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), requested_begin, capacity,
    188                                                  PROT_READ | PROT_WRITE));
    189   if (mem_map.get() == NULL) {
    190     LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
    191         << PrettySize(capacity);
    192     return NULL;
    193   }
    194 
    195   void* mspace = CreateMallocSpace(mem_map->Begin(), starting_size, initial_size);
    196   if (mspace == NULL) {
    197     LOG(ERROR) << "Failed to initialize mspace for alloc space (" << name << ")";
    198     return NULL;
    199   }
    200 
    201   // Protect memory beyond the initial size.
    202   byte* end = mem_map->Begin() + starting_size;
    203   if (capacity - initial_size > 0) {
    204     CHECK_MEMORY_CALL(mprotect, (end, capacity - initial_size, PROT_NONE), name);
    205   }
    206 
    207   // Everything is set so record in immutable structure and leave
    208   MemMap* mem_map_ptr = mem_map.release();
    209   DlMallocSpace* space;
    210   if (RUNNING_ON_VALGRIND > 0) {
    211     space = new ValgrindDlMallocSpace(name, mem_map_ptr, mspace, mem_map_ptr->Begin(), end,
    212                                       growth_limit, initial_size);
    213   } else {
    214     space = new DlMallocSpace(name, mem_map_ptr, mspace, mem_map_ptr->Begin(), end, growth_limit);
    215   }
    216   if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
    217     LOG(INFO) << "Space::CreateAllocSpace exiting (" << PrettyDuration(NanoTime() - start_time)
    218         << " ) " << *space;
    219   }
    220   return space;
    221 }
    222 
    223 void* DlMallocSpace::CreateMallocSpace(void* begin, size_t morecore_start, size_t initial_size) {
    224   // clear errno to allow PLOG on error
    225   errno = 0;
    226   // create mspace using our backing storage starting at begin and with a footprint of
    227   // morecore_start. Don't use an internal dlmalloc lock (as we already hold heap lock). When
    228   // morecore_start bytes of memory is exhaused morecore will be called.
    229   void* msp = create_mspace_with_base(begin, morecore_start, false /*locked*/);
    230   if (msp != NULL) {
    231     // Do not allow morecore requests to succeed beyond the initial size of the heap
    232     mspace_set_footprint_limit(msp, initial_size);
    233   } else {
    234     PLOG(ERROR) << "create_mspace_with_base failed";
    235   }
    236   return msp;
    237 }
    238 
    239 void DlMallocSpace::SwapBitmaps() {
    240   live_bitmap_.swap(mark_bitmap_);
    241   // Swap names to get more descriptive diagnostics.
    242   std::string temp_name(live_bitmap_->GetName());
    243   live_bitmap_->SetName(mark_bitmap_->GetName());
    244   mark_bitmap_->SetName(temp_name);
    245 }
    246 
    247 mirror::Object* DlMallocSpace::Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated) {
    248   return AllocNonvirtual(self, num_bytes, bytes_allocated);
    249 }
    250 
    251 mirror::Object* DlMallocSpace::AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated) {
    252   mirror::Object* result;
    253   {
    254     MutexLock mu(self, lock_);
    255     // Grow as much as possible within the mspace.
    256     size_t max_allowed = Capacity();
    257     mspace_set_footprint_limit(mspace_, max_allowed);
    258     // Try the allocation.
    259     result = AllocWithoutGrowthLocked(num_bytes, bytes_allocated);
    260     // Shrink back down as small as possible.
    261     size_t footprint = mspace_footprint(mspace_);
    262     mspace_set_footprint_limit(mspace_, footprint);
    263   }
    264   if (result != NULL) {
    265     // Zero freshly allocated memory, done while not holding the space's lock.
    266     memset(result, 0, num_bytes);
    267   }
    268   // Return the new allocation or NULL.
    269   CHECK(!kDebugSpaces || result == NULL || Contains(result));
    270   return result;
    271 }
    272 
    273 void DlMallocSpace::SetGrowthLimit(size_t growth_limit) {
    274   growth_limit = RoundUp(growth_limit, kPageSize);
    275   growth_limit_ = growth_limit;
    276   if (Size() > growth_limit_) {
    277     end_ = begin_ + growth_limit;
    278   }
    279 }
    280 
    281 DlMallocSpace* DlMallocSpace::CreateZygoteSpace(const char* alloc_space_name) {
    282   end_ = reinterpret_cast<byte*>(RoundUp(reinterpret_cast<uintptr_t>(end_), kPageSize));
    283   DCHECK(IsAligned<accounting::CardTable::kCardSize>(begin_));
    284   DCHECK(IsAligned<accounting::CardTable::kCardSize>(end_));
    285   DCHECK(IsAligned<kPageSize>(begin_));
    286   DCHECK(IsAligned<kPageSize>(end_));
    287   size_t size = RoundUp(Size(), kPageSize);
    288   // Trim the heap so that we minimize the size of the Zygote space.
    289   Trim();
    290   // Trim our mem-map to free unused pages.
    291   GetMemMap()->UnMapAtEnd(end_);
    292   // TODO: Not hardcode these in?
    293   const size_t starting_size = kPageSize;
    294   const size_t initial_size = 2 * MB;
    295   // Remaining size is for the new alloc space.
    296   const size_t growth_limit = growth_limit_ - size;
    297   const size_t capacity = Capacity() - size;
    298   VLOG(heap) << "Begin " << reinterpret_cast<const void*>(begin_) << "\n"
    299              << "End " << reinterpret_cast<const void*>(end_) << "\n"
    300              << "Size " << size << "\n"
    301              << "GrowthLimit " << growth_limit_ << "\n"
    302              << "Capacity " << Capacity();
    303   SetGrowthLimit(RoundUp(size, kPageSize));
    304   SetFootprintLimit(RoundUp(size, kPageSize));
    305   // FIXME: Do we need reference counted pointers here?
    306   // Make the two spaces share the same mark bitmaps since the bitmaps span both of the spaces.
    307   VLOG(heap) << "Creating new AllocSpace: ";
    308   VLOG(heap) << "Size " << GetMemMap()->Size();
    309   VLOG(heap) << "GrowthLimit " << PrettySize(growth_limit);
    310   VLOG(heap) << "Capacity " << PrettySize(capacity);
    311   UniquePtr<MemMap> mem_map(MemMap::MapAnonymous(alloc_space_name, End(), capacity, PROT_READ | PROT_WRITE));
    312   void* mspace = CreateMallocSpace(end_, starting_size, initial_size);
    313   // Protect memory beyond the initial size.
    314   byte* end = mem_map->Begin() + starting_size;
    315   if (capacity - initial_size > 0) {
    316     CHECK_MEMORY_CALL(mprotect, (end, capacity - initial_size, PROT_NONE), alloc_space_name);
    317   }
    318   DlMallocSpace* alloc_space =
    319       new DlMallocSpace(alloc_space_name, mem_map.release(), mspace, end_, end, growth_limit);
    320   live_bitmap_->SetHeapLimit(reinterpret_cast<uintptr_t>(End()));
    321   CHECK_EQ(live_bitmap_->HeapLimit(), reinterpret_cast<uintptr_t>(End()));
    322   mark_bitmap_->SetHeapLimit(reinterpret_cast<uintptr_t>(End()));
    323   CHECK_EQ(mark_bitmap_->HeapLimit(), reinterpret_cast<uintptr_t>(End()));
    324   VLOG(heap) << "zygote space creation done";
    325   return alloc_space;
    326 }
    327 
    328 mirror::Class* DlMallocSpace::FindRecentFreedObject(const mirror::Object* obj) {
    329   size_t pos = recent_free_pos_;
    330   // Start at the most recently freed object and work our way back since there may be duplicates
    331   // caused by dlmalloc reusing memory.
    332   if (kRecentFreeCount > 0) {
    333     for (size_t i = 0; i + 1 < kRecentFreeCount + 1; ++i) {
    334       pos = pos != 0 ? pos - 1 : kRecentFreeMask;
    335       if (recent_freed_objects_[pos].first == obj) {
    336         return recent_freed_objects_[pos].second;
    337       }
    338     }
    339   }
    340   return nullptr;
    341 }
    342 
    343 void DlMallocSpace::RegisterRecentFree(mirror::Object* ptr) {
    344   recent_freed_objects_[recent_free_pos_].first = ptr;
    345   recent_freed_objects_[recent_free_pos_].second = ptr->GetClass();
    346   recent_free_pos_ = (recent_free_pos_ + 1) & kRecentFreeMask;
    347 }
    348 
    349 size_t DlMallocSpace::Free(Thread* self, mirror::Object* ptr) {
    350   MutexLock mu(self, lock_);
    351   if (kDebugSpaces) {
    352     CHECK(ptr != NULL);
    353     CHECK(Contains(ptr)) << "Free (" << ptr << ") not in bounds of heap " << *this;
    354   }
    355   const size_t bytes_freed = InternalAllocationSize(ptr);
    356   num_bytes_allocated_ -= bytes_freed;
    357   --num_objects_allocated_;
    358   if (kRecentFreeCount > 0) {
    359     RegisterRecentFree(ptr);
    360   }
    361   mspace_free(mspace_, ptr);
    362   return bytes_freed;
    363 }
    364 
    365 size_t DlMallocSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
    366   DCHECK(ptrs != NULL);
    367 
    368   // Don't need the lock to calculate the size of the freed pointers.
    369   size_t bytes_freed = 0;
    370   for (size_t i = 0; i < num_ptrs; i++) {
    371     mirror::Object* ptr = ptrs[i];
    372     const size_t look_ahead = 8;
    373     if (kPrefetchDuringDlMallocFreeList && i + look_ahead < num_ptrs) {
    374       // The head of chunk for the allocation is sizeof(size_t) behind the allocation.
    375       __builtin_prefetch(reinterpret_cast<char*>(ptrs[i + look_ahead]) - sizeof(size_t));
    376     }
    377     bytes_freed += InternalAllocationSize(ptr);
    378   }
    379 
    380   if (kRecentFreeCount > 0) {
    381     MutexLock mu(self, lock_);
    382     for (size_t i = 0; i < num_ptrs; i++) {
    383       RegisterRecentFree(ptrs[i]);
    384     }
    385   }
    386 
    387   if (kDebugSpaces) {
    388     size_t num_broken_ptrs = 0;
    389     for (size_t i = 0; i < num_ptrs; i++) {
    390       if (!Contains(ptrs[i])) {
    391         num_broken_ptrs++;
    392         LOG(ERROR) << "FreeList[" << i << "] (" << ptrs[i] << ") not in bounds of heap " << *this;
    393       } else {
    394         size_t size = mspace_usable_size(ptrs[i]);
    395         memset(ptrs[i], 0xEF, size);
    396       }
    397     }
    398     CHECK_EQ(num_broken_ptrs, 0u);
    399   }
    400 
    401   {
    402     MutexLock mu(self, lock_);
    403     num_bytes_allocated_ -= bytes_freed;
    404     num_objects_allocated_ -= num_ptrs;
    405     mspace_bulk_free(mspace_, reinterpret_cast<void**>(ptrs), num_ptrs);
    406     return bytes_freed;
    407   }
    408 }
    409 
    410 // Callback from dlmalloc when it needs to increase the footprint
    411 extern "C" void* art_heap_morecore(void* mspace, intptr_t increment) {
    412   Heap* heap = Runtime::Current()->GetHeap();
    413   DCHECK_EQ(heap->GetAllocSpace()->GetMspace(), mspace);
    414   return heap->GetAllocSpace()->MoreCore(increment);
    415 }
    416 
    417 void* DlMallocSpace::MoreCore(intptr_t increment) {
    418   lock_.AssertHeld(Thread::Current());
    419   byte* original_end = end_;
    420   if (increment != 0) {
    421     VLOG(heap) << "DlMallocSpace::MoreCore " << PrettySize(increment);
    422     byte* new_end = original_end + increment;
    423     if (increment > 0) {
    424       // Should never be asked to increase the allocation beyond the capacity of the space. Enforced
    425       // by mspace_set_footprint_limit.
    426       CHECK_LE(new_end, Begin() + Capacity());
    427       CHECK_MEMORY_CALL(mprotect, (original_end, increment, PROT_READ | PROT_WRITE), GetName());
    428     } else {
    429       // Should never be asked for negative footprint (ie before begin)
    430       CHECK_GT(original_end + increment, Begin());
    431       // Advise we don't need the pages and protect them
    432       // TODO: by removing permissions to the pages we may be causing TLB shoot-down which can be
    433       // expensive (note the same isn't true for giving permissions to a page as the protected
    434       // page shouldn't be in a TLB). We should investigate performance impact of just
    435       // removing ignoring the memory protection change here and in Space::CreateAllocSpace. It's
    436       // likely just a useful debug feature.
    437       size_t size = -increment;
    438       CHECK_MEMORY_CALL(madvise, (new_end, size, MADV_DONTNEED), GetName());
    439       CHECK_MEMORY_CALL(mprotect, (new_end, size, PROT_NONE), GetName());
    440     }
    441     // Update end_
    442     end_ = new_end;
    443   }
    444   return original_end;
    445 }
    446 
    447 // Virtual functions can't get inlined.
    448 inline size_t DlMallocSpace::InternalAllocationSize(const mirror::Object* obj) {
    449   return AllocationSizeNonvirtual(obj);
    450 }
    451 
    452 size_t DlMallocSpace::AllocationSize(const mirror::Object* obj) {
    453   return InternalAllocationSize(obj);
    454 }
    455 
    456 size_t DlMallocSpace::Trim() {
    457   MutexLock mu(Thread::Current(), lock_);
    458   // Trim to release memory at the end of the space.
    459   mspace_trim(mspace_, 0);
    460   // Visit space looking for page-sized holes to advise the kernel we don't need.
    461   size_t reclaimed = 0;
    462   mspace_inspect_all(mspace_, DlmallocMadviseCallback, &reclaimed);
    463   return reclaimed;
    464 }
    465 
    466 void DlMallocSpace::Walk(void(*callback)(void *start, void *end, size_t num_bytes, void* callback_arg),
    467                       void* arg) {
    468   MutexLock mu(Thread::Current(), lock_);
    469   mspace_inspect_all(mspace_, callback, arg);
    470   callback(NULL, NULL, 0, arg);  // Indicate end of a space.
    471 }
    472 
    473 size_t DlMallocSpace::GetFootprint() {
    474   MutexLock mu(Thread::Current(), lock_);
    475   return mspace_footprint(mspace_);
    476 }
    477 
    478 size_t DlMallocSpace::GetFootprintLimit() {
    479   MutexLock mu(Thread::Current(), lock_);
    480   return mspace_footprint_limit(mspace_);
    481 }
    482 
    483 void DlMallocSpace::SetFootprintLimit(size_t new_size) {
    484   MutexLock mu(Thread::Current(), lock_);
    485   VLOG(heap) << "DLMallocSpace::SetFootprintLimit " << PrettySize(new_size);
    486   // Compare against the actual footprint, rather than the Size(), because the heap may not have
    487   // grown all the way to the allowed size yet.
    488   size_t current_space_size = mspace_footprint(mspace_);
    489   if (new_size < current_space_size) {
    490     // Don't let the space grow any more.
    491     new_size = current_space_size;
    492   }
    493   mspace_set_footprint_limit(mspace_, new_size);
    494 }
    495 
    496 void DlMallocSpace::Dump(std::ostream& os) const {
    497   os << GetType()
    498       << " begin=" << reinterpret_cast<void*>(Begin())
    499       << ",end=" << reinterpret_cast<void*>(End())
    500       << ",size=" << PrettySize(Size()) << ",capacity=" << PrettySize(Capacity())
    501       << ",name=\"" << GetName() << "\"]";
    502 }
    503 
    504 }  // namespace space
    505 }  // namespace gc
    506 }  // namespace art
    507