Home | History | Annotate | Download | only in space
      1 /*
      2  * Copyright (C) 2012 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "large_object_space.h"
     18 
     19 #include <memory>
     20 
     21 #include "gc/accounting/space_bitmap-inl.h"
     22 #include "base/logging.h"
     23 #include "base/mutex-inl.h"
     24 #include "base/stl_util.h"
     25 #include "image.h"
     26 #include "os.h"
     27 #include "space-inl.h"
     28 #include "thread-inl.h"
     29 #include "utils.h"
     30 
     31 namespace art {
     32 namespace gc {
     33 namespace space {
     34 
     35 class ValgrindLargeObjectMapSpace FINAL : public LargeObjectMapSpace {
     36  public:
     37   explicit ValgrindLargeObjectMapSpace(const std::string& name) : LargeObjectMapSpace(name) {
     38   }
     39 
     40   virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
     41                                 size_t* usable_size) OVERRIDE {
     42     mirror::Object* obj =
     43         LargeObjectMapSpace::Alloc(self, num_bytes + kValgrindRedZoneBytes * 2, bytes_allocated,
     44                                    usable_size);
     45     mirror::Object* object_without_rdz = reinterpret_cast<mirror::Object*>(
     46         reinterpret_cast<uintptr_t>(obj) + kValgrindRedZoneBytes);
     47     VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<void*>(obj), kValgrindRedZoneBytes);
     48     VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<byte*>(object_without_rdz) + num_bytes,
     49                                kValgrindRedZoneBytes);
     50     if (usable_size != nullptr) {
     51       *usable_size = num_bytes;  // Since we have redzones, shrink the usable size.
     52     }
     53     return object_without_rdz;
     54   }
     55 
     56   virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE {
     57     mirror::Object* object_with_rdz = reinterpret_cast<mirror::Object*>(
     58         reinterpret_cast<uintptr_t>(obj) - kValgrindRedZoneBytes);
     59     return LargeObjectMapSpace::AllocationSize(object_with_rdz, usable_size);
     60   }
     61 
     62   virtual size_t Free(Thread* self, mirror::Object* obj) OVERRIDE {
     63     mirror::Object* object_with_rdz = reinterpret_cast<mirror::Object*>(
     64         reinterpret_cast<uintptr_t>(obj) - kValgrindRedZoneBytes);
     65     VALGRIND_MAKE_MEM_UNDEFINED(object_with_rdz, AllocationSize(obj, nullptr));
     66     return LargeObjectMapSpace::Free(self, object_with_rdz);
     67   }
     68 
     69   bool Contains(const mirror::Object* obj) const OVERRIDE {
     70     mirror::Object* object_with_rdz = reinterpret_cast<mirror::Object*>(
     71         reinterpret_cast<uintptr_t>(obj) - kValgrindRedZoneBytes);
     72     return LargeObjectMapSpace::Contains(object_with_rdz);
     73   }
     74 
     75  private:
     76   static constexpr size_t kValgrindRedZoneBytes = kPageSize;
     77 };
     78 
     79 void LargeObjectSpace::SwapBitmaps() {
     80   live_bitmap_.swap(mark_bitmap_);
     81   // Swap names to get more descriptive diagnostics.
     82   std::string temp_name = live_bitmap_->GetName();
     83   live_bitmap_->SetName(mark_bitmap_->GetName());
     84   mark_bitmap_->SetName(temp_name);
     85 }
     86 
     87 LargeObjectSpace::LargeObjectSpace(const std::string& name, byte* begin, byte* end)
     88     : DiscontinuousSpace(name, kGcRetentionPolicyAlwaysCollect),
     89       num_bytes_allocated_(0), num_objects_allocated_(0), total_bytes_allocated_(0),
     90       total_objects_allocated_(0), begin_(begin), end_(end) {
     91 }
     92 
     93 
     94 void LargeObjectSpace::CopyLiveToMarked() {
     95   mark_bitmap_->CopyFrom(live_bitmap_.get());
     96 }
     97 
     98 LargeObjectMapSpace::LargeObjectMapSpace(const std::string& name)
     99     : LargeObjectSpace(name, nullptr, nullptr),
    100       lock_("large object map space lock", kAllocSpaceLock) {}
    101 
    102 LargeObjectMapSpace* LargeObjectMapSpace::Create(const std::string& name) {
    103   if (Runtime::Current()->RunningOnValgrind()) {
    104     return new ValgrindLargeObjectMapSpace(name);
    105   } else {
    106     return new LargeObjectMapSpace(name);
    107   }
    108 }
    109 
    110 mirror::Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes,
    111                                            size_t* bytes_allocated, size_t* usable_size) {
    112   std::string error_msg;
    113   MemMap* mem_map = MemMap::MapAnonymous("large object space allocation", NULL, num_bytes,
    114                                          PROT_READ | PROT_WRITE, true, &error_msg);
    115   if (UNLIKELY(mem_map == NULL)) {
    116     LOG(WARNING) << "Large object allocation failed: " << error_msg;
    117     return NULL;
    118   }
    119   MutexLock mu(self, lock_);
    120   mirror::Object* obj = reinterpret_cast<mirror::Object*>(mem_map->Begin());
    121   large_objects_.push_back(obj);
    122   mem_maps_.Put(obj, mem_map);
    123   size_t allocation_size = mem_map->Size();
    124   DCHECK(bytes_allocated != nullptr);
    125   begin_ = std::min(begin_, reinterpret_cast<byte*>(obj));
    126   byte* obj_end = reinterpret_cast<byte*>(obj) + allocation_size;
    127   if (end_ == nullptr || obj_end > end_) {
    128     end_ = obj_end;
    129   }
    130   *bytes_allocated = allocation_size;
    131   if (usable_size != nullptr) {
    132     *usable_size = allocation_size;
    133   }
    134   num_bytes_allocated_ += allocation_size;
    135   total_bytes_allocated_ += allocation_size;
    136   ++num_objects_allocated_;
    137   ++total_objects_allocated_;
    138   return obj;
    139 }
    140 
    141 size_t LargeObjectMapSpace::Free(Thread* self, mirror::Object* ptr) {
    142   MutexLock mu(self, lock_);
    143   MemMaps::iterator found = mem_maps_.find(ptr);
    144   if (UNLIKELY(found == mem_maps_.end())) {
    145     Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
    146     LOG(FATAL) << "Attempted to free large object " << ptr << " which was not live";
    147   }
    148   DCHECK_GE(num_bytes_allocated_, found->second->Size());
    149   size_t allocation_size = found->second->Size();
    150   num_bytes_allocated_ -= allocation_size;
    151   --num_objects_allocated_;
    152   delete found->second;
    153   mem_maps_.erase(found);
    154   return allocation_size;
    155 }
    156 
    157 size_t LargeObjectMapSpace::AllocationSize(mirror::Object* obj, size_t* usable_size) {
    158   MutexLock mu(Thread::Current(), lock_);
    159   auto found = mem_maps_.find(obj);
    160   CHECK(found != mem_maps_.end()) << "Attempted to get size of a large object which is not live";
    161   return found->second->Size();
    162 }
    163 
    164 size_t LargeObjectSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
    165   size_t total = 0;
    166   for (size_t i = 0; i < num_ptrs; ++i) {
    167     if (kDebugSpaces) {
    168       CHECK(Contains(ptrs[i]));
    169     }
    170     total += Free(self, ptrs[i]);
    171   }
    172   return total;
    173 }
    174 
    175 void LargeObjectMapSpace::Walk(DlMallocSpace::WalkCallback callback, void* arg) {
    176   MutexLock mu(Thread::Current(), lock_);
    177   for (auto it = mem_maps_.begin(); it != mem_maps_.end(); ++it) {
    178     MemMap* mem_map = it->second;
    179     callback(mem_map->Begin(), mem_map->End(), mem_map->Size(), arg);
    180     callback(NULL, NULL, 0, arg);
    181   }
    182 }
    183 
    184 bool LargeObjectMapSpace::Contains(const mirror::Object* obj) const {
    185   Thread* self = Thread::Current();
    186   if (lock_.IsExclusiveHeld(self)) {
    187     // We hold lock_ so do the check.
    188     return mem_maps_.find(const_cast<mirror::Object*>(obj)) != mem_maps_.end();
    189   } else {
    190     MutexLock mu(self, lock_);
    191     return mem_maps_.find(const_cast<mirror::Object*>(obj)) != mem_maps_.end();
    192   }
    193 }
    194 
    195 // Keeps track of allocation sizes + whether or not the previous allocation is free.
    196 // Used to coalesce free blocks and find the best fit block for an allocation.
    197 class AllocationInfo {
    198  public:
    199   AllocationInfo() : prev_free_(0), alloc_size_(0) {
    200   }
    201   // Return the number of pages that the allocation info covers.
    202   size_t AlignSize() const {
    203     return alloc_size_ & ~kFlagFree;
    204   }
    205   // Returns the allocation size in bytes.
    206   size_t ByteSize() const {
    207     return AlignSize() * FreeListSpace::kAlignment;
    208   }
    209   // Updates the allocation size and whether or not it is free.
    210   void SetByteSize(size_t size, bool free) {
    211     DCHECK_ALIGNED(size, FreeListSpace::kAlignment);
    212     alloc_size_ = (size / FreeListSpace::kAlignment) | (free ? kFlagFree : 0U);
    213   }
    214   bool IsFree() const {
    215     return (alloc_size_ & kFlagFree) != 0;
    216   }
    217   // Finds and returns the next non free allocation info after ourself.
    218   AllocationInfo* GetNextInfo() {
    219     return this + AlignSize();
    220   }
    221   const AllocationInfo* GetNextInfo() const {
    222     return this + AlignSize();
    223   }
    224   // Returns the previous free allocation info by using the prev_free_ member to figure out
    225   // where it is. This is only used for coalescing so we only need to be able to do it if the
    226   // previous allocation info is free.
    227   AllocationInfo* GetPrevFreeInfo() {
    228     DCHECK_NE(prev_free_, 0U);
    229     return this - prev_free_;
    230   }
    231   // Returns the address of the object associated with this allocation info.
    232   mirror::Object* GetObjectAddress() {
    233     return reinterpret_cast<mirror::Object*>(reinterpret_cast<uintptr_t>(this) + sizeof(*this));
    234   }
    235   // Return how many kAlignment units there are before the free block.
    236   size_t GetPrevFree() const {
    237     return prev_free_;
    238   }
    239   // Returns how many free bytes there is before the block.
    240   size_t GetPrevFreeBytes() const {
    241     return GetPrevFree() * FreeListSpace::kAlignment;
    242   }
    243   // Update the size of the free block prior to the allocation.
    244   void SetPrevFreeBytes(size_t bytes) {
    245     DCHECK_ALIGNED(bytes, FreeListSpace::kAlignment);
    246     prev_free_ = bytes / FreeListSpace::kAlignment;
    247   }
    248 
    249  private:
    250   // Used to implement best fit object allocation. Each allocation has an AllocationInfo which
    251   // contains the size of the previous free block preceding it. Implemented in such a way that we
    252   // can also find the iterator for any allocation info pointer.
    253   static constexpr uint32_t kFlagFree = 0x8000000;
    254   // Contains the size of the previous free block with kAlignment as the unit. If 0 then the
    255   // allocation before us is not free.
    256   // These variables are undefined in the middle of allocations / free blocks.
    257   uint32_t prev_free_;
    258   // Allocation size of this object in kAlignment as the unit.
    259   uint32_t alloc_size_;
    260 };
    261 
    262 size_t FreeListSpace::GetSlotIndexForAllocationInfo(const AllocationInfo* info) const {
    263   DCHECK_GE(info, allocation_info_);
    264   DCHECK_LT(info, reinterpret_cast<AllocationInfo*>(allocation_info_map_->End()));
    265   return info - allocation_info_;
    266 }
    267 
    268 AllocationInfo* FreeListSpace::GetAllocationInfoForAddress(uintptr_t address) {
    269   return &allocation_info_[GetSlotIndexForAddress(address)];
    270 }
    271 
    272 const AllocationInfo* FreeListSpace::GetAllocationInfoForAddress(uintptr_t address) const {
    273   return &allocation_info_[GetSlotIndexForAddress(address)];
    274 }
    275 
    276 inline bool FreeListSpace::SortByPrevFree::operator()(const AllocationInfo* a,
    277                                                       const AllocationInfo* b) const {
    278   if (a->GetPrevFree() < b->GetPrevFree()) return true;
    279   if (a->GetPrevFree() > b->GetPrevFree()) return false;
    280   if (a->AlignSize() < b->AlignSize()) return true;
    281   if (a->AlignSize() > b->AlignSize()) return false;
    282   return reinterpret_cast<uintptr_t>(a) < reinterpret_cast<uintptr_t>(b);
    283 }
    284 
    285 FreeListSpace* FreeListSpace::Create(const std::string& name, byte* requested_begin, size_t size) {
    286   CHECK_EQ(size % kAlignment, 0U);
    287   std::string error_msg;
    288   MemMap* mem_map = MemMap::MapAnonymous(name.c_str(), requested_begin, size,
    289                                          PROT_READ | PROT_WRITE, true, &error_msg);
    290   CHECK(mem_map != NULL) << "Failed to allocate large object space mem map: " << error_msg;
    291   return new FreeListSpace(name, mem_map, mem_map->Begin(), mem_map->End());
    292 }
    293 
    294 FreeListSpace::FreeListSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end)
    295     : LargeObjectSpace(name, begin, end),
    296       mem_map_(mem_map),
    297       lock_("free list space lock", kAllocSpaceLock) {
    298   const size_t space_capacity = end - begin;
    299   free_end_ = space_capacity;
    300   CHECK_ALIGNED(space_capacity, kAlignment);
    301   const size_t alloc_info_size = sizeof(AllocationInfo) * (space_capacity / kAlignment);
    302   std::string error_msg;
    303   allocation_info_map_.reset(MemMap::MapAnonymous("large object free list space allocation info map",
    304                                                   nullptr, alloc_info_size, PROT_READ | PROT_WRITE,
    305                                                   false, &error_msg));
    306   CHECK(allocation_info_map_.get() != nullptr) << "Failed to allocate allocation info map"
    307       << error_msg;
    308   allocation_info_ = reinterpret_cast<AllocationInfo*>(allocation_info_map_->Begin());
    309 }
    310 
    311 FreeListSpace::~FreeListSpace() {}
    312 
    313 void FreeListSpace::Walk(DlMallocSpace::WalkCallback callback, void* arg) {
    314   MutexLock mu(Thread::Current(), lock_);
    315   const uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_;
    316   AllocationInfo* cur_info = &allocation_info_[0];
    317   const AllocationInfo* end_info = GetAllocationInfoForAddress(free_end_start);
    318   while (cur_info < end_info) {
    319     if (!cur_info->IsFree()) {
    320       size_t alloc_size = cur_info->ByteSize();
    321       byte* byte_start = reinterpret_cast<byte*>(GetAddressForAllocationInfo(cur_info));
    322       byte* byte_end = byte_start + alloc_size;
    323       callback(byte_start, byte_end, alloc_size, arg);
    324       callback(nullptr, nullptr, 0, arg);
    325     }
    326     cur_info = cur_info->GetNextInfo();
    327   }
    328   CHECK_EQ(cur_info, end_info);
    329 }
    330 
    331 void FreeListSpace::RemoveFreePrev(AllocationInfo* info) {
    332   CHECK_GT(info->GetPrevFree(), 0U);
    333   auto it = free_blocks_.lower_bound(info);
    334   CHECK(it != free_blocks_.end());
    335   CHECK_EQ(*it, info);
    336   free_blocks_.erase(it);
    337 }
    338 
    339 size_t FreeListSpace::Free(Thread* self, mirror::Object* obj) {
    340   MutexLock mu(self, lock_);
    341   DCHECK(Contains(obj)) << reinterpret_cast<void*>(Begin()) << " " << obj << " "
    342                         << reinterpret_cast<void*>(End());
    343   DCHECK_ALIGNED(obj, kAlignment);
    344   AllocationInfo* info = GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(obj));
    345   DCHECK(!info->IsFree());
    346   const size_t allocation_size = info->ByteSize();
    347   DCHECK_GT(allocation_size, 0U);
    348   DCHECK_ALIGNED(allocation_size, kAlignment);
    349   info->SetByteSize(allocation_size, true);  // Mark as free.
    350   // Look at the next chunk.
    351   AllocationInfo* next_info = info->GetNextInfo();
    352   // Calculate the start of the end free block.
    353   uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_;
    354   size_t prev_free_bytes = info->GetPrevFreeBytes();
    355   size_t new_free_size = allocation_size;
    356   if (prev_free_bytes != 0) {
    357     // Coalesce with previous free chunk.
    358     new_free_size += prev_free_bytes;
    359     RemoveFreePrev(info);
    360     info = info->GetPrevFreeInfo();
    361     // The previous allocation info must not be free since we are supposed to always coalesce.
    362     DCHECK_EQ(info->GetPrevFreeBytes(), 0U) << "Previous allocation was free";
    363   }
    364   uintptr_t next_addr = GetAddressForAllocationInfo(next_info);
    365   if (next_addr >= free_end_start) {
    366     // Easy case, the next chunk is the end free region.
    367     CHECK_EQ(next_addr, free_end_start);
    368     free_end_ += new_free_size;
    369   } else {
    370     AllocationInfo* new_free_info;
    371     if (next_info->IsFree()) {
    372       AllocationInfo* next_next_info = next_info->GetNextInfo();
    373       // Next next info can't be free since we always coalesce.
    374       DCHECK(!next_next_info->IsFree());
    375       DCHECK(IsAligned<kAlignment>(next_next_info->ByteSize()));
    376       new_free_info = next_next_info;
    377       new_free_size += next_next_info->GetPrevFreeBytes();
    378       RemoveFreePrev(next_next_info);
    379     } else {
    380       new_free_info = next_info;
    381     }
    382     new_free_info->SetPrevFreeBytes(new_free_size);
    383     free_blocks_.insert(new_free_info);
    384     info->SetByteSize(new_free_size, true);
    385     DCHECK_EQ(info->GetNextInfo(), new_free_info);
    386   }
    387   --num_objects_allocated_;
    388   DCHECK_LE(allocation_size, num_bytes_allocated_);
    389   num_bytes_allocated_ -= allocation_size;
    390   madvise(obj, allocation_size, MADV_DONTNEED);
    391   if (kIsDebugBuild) {
    392     // Can't disallow reads since we use them to find next chunks during coalescing.
    393     mprotect(obj, allocation_size, PROT_READ);
    394   }
    395   return allocation_size;
    396 }
    397 
    398 size_t FreeListSpace::AllocationSize(mirror::Object* obj, size_t* usable_size) {
    399   DCHECK(Contains(obj));
    400   AllocationInfo* info = GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(obj));
    401   DCHECK(!info->IsFree());
    402   size_t alloc_size = info->ByteSize();
    403   if (usable_size != nullptr) {
    404     *usable_size = alloc_size;
    405   }
    406   return alloc_size;
    407 }
    408 
    409 mirror::Object* FreeListSpace::Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
    410                                      size_t* usable_size) {
    411   MutexLock mu(self, lock_);
    412   const size_t allocation_size = RoundUp(num_bytes, kAlignment);
    413   AllocationInfo temp_info;
    414   temp_info.SetPrevFreeBytes(allocation_size);
    415   temp_info.SetByteSize(0, false);
    416   AllocationInfo* new_info;
    417   // Find the smallest chunk at least num_bytes in size.
    418   auto it = free_blocks_.lower_bound(&temp_info);
    419   if (it != free_blocks_.end()) {
    420     AllocationInfo* info = *it;
    421     free_blocks_.erase(it);
    422     // Fit our object in the previous allocation info free space.
    423     new_info = info->GetPrevFreeInfo();
    424     // Remove the newly allocated block from the info and update the prev_free_.
    425     info->SetPrevFreeBytes(info->GetPrevFreeBytes() - allocation_size);
    426     if (info->GetPrevFreeBytes() > 0) {
    427       AllocationInfo* new_free = info - info->GetPrevFree();
    428       new_free->SetPrevFreeBytes(0);
    429       new_free->SetByteSize(info->GetPrevFreeBytes(), true);
    430       // If there is remaining space, insert back into the free set.
    431       free_blocks_.insert(info);
    432     }
    433   } else {
    434     // Try to steal some memory from the free space at the end of the space.
    435     if (LIKELY(free_end_ >= allocation_size)) {
    436       // Fit our object at the start of the end free block.
    437       new_info = GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(End()) - free_end_);
    438       free_end_ -= allocation_size;
    439     } else {
    440       return nullptr;
    441     }
    442   }
    443   DCHECK(bytes_allocated != nullptr);
    444   *bytes_allocated = allocation_size;
    445   if (usable_size != nullptr) {
    446     *usable_size = allocation_size;
    447   }
    448   // Need to do these inside of the lock.
    449   ++num_objects_allocated_;
    450   ++total_objects_allocated_;
    451   num_bytes_allocated_ += allocation_size;
    452   total_bytes_allocated_ += allocation_size;
    453   mirror::Object* obj = reinterpret_cast<mirror::Object*>(GetAddressForAllocationInfo(new_info));
    454   // We always put our object at the start of the free block, there can not be another free block
    455   // before it.
    456   if (kIsDebugBuild) {
    457     mprotect(obj, allocation_size, PROT_READ | PROT_WRITE);
    458   }
    459   new_info->SetPrevFreeBytes(0);
    460   new_info->SetByteSize(allocation_size, false);
    461   return obj;
    462 }
    463 
    464 void FreeListSpace::Dump(std::ostream& os) const {
    465   MutexLock mu(Thread::Current(), const_cast<Mutex&>(lock_));
    466   os << GetName() << " -"
    467      << " begin: " << reinterpret_cast<void*>(Begin())
    468      << " end: " << reinterpret_cast<void*>(End()) << "\n";
    469   uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_;
    470   const AllocationInfo* cur_info =
    471       GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(Begin()));
    472   const AllocationInfo* end_info = GetAllocationInfoForAddress(free_end_start);
    473   while (cur_info < end_info) {
    474     size_t size = cur_info->ByteSize();
    475     uintptr_t address = GetAddressForAllocationInfo(cur_info);
    476     if (cur_info->IsFree()) {
    477       os << "Free block at address: " << reinterpret_cast<const void*>(address)
    478          << " of length " << size << " bytes\n";
    479     } else {
    480       os << "Large object at address: " << reinterpret_cast<const void*>(address)
    481          << " of length " << size << " bytes\n";
    482     }
    483     cur_info = cur_info->GetNextInfo();
    484   }
    485   if (free_end_) {
    486     os << "Free block at address: " << reinterpret_cast<const void*>(free_end_start)
    487        << " of length " << free_end_ << " bytes\n";
    488   }
    489 }
    490 
    491 void LargeObjectSpace::SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg) {
    492   SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg);
    493   space::LargeObjectSpace* space = context->space->AsLargeObjectSpace();
    494   Thread* self = context->self;
    495   Locks::heap_bitmap_lock_->AssertExclusiveHeld(self);
    496   // If the bitmaps aren't swapped we need to clear the bits since the GC isn't going to re-swap
    497   // the bitmaps as an optimization.
    498   if (!context->swap_bitmaps) {
    499     accounting::LargeObjectBitmap* bitmap = space->GetLiveBitmap();
    500     for (size_t i = 0; i < num_ptrs; ++i) {
    501       bitmap->Clear(ptrs[i]);
    502     }
    503   }
    504   context->freed.objects += num_ptrs;
    505   context->freed.bytes += space->FreeList(self, num_ptrs, ptrs);
    506 }
    507 
    508 collector::ObjectBytePair LargeObjectSpace::Sweep(bool swap_bitmaps) {
    509   if (Begin() >= End()) {
    510     return collector::ObjectBytePair(0, 0);
    511   }
    512   accounting::LargeObjectBitmap* live_bitmap = GetLiveBitmap();
    513   accounting::LargeObjectBitmap* mark_bitmap = GetMarkBitmap();
    514   if (swap_bitmaps) {
    515     std::swap(live_bitmap, mark_bitmap);
    516   }
    517   AllocSpace::SweepCallbackContext scc(swap_bitmaps, this);
    518   accounting::LargeObjectBitmap::SweepWalk(*live_bitmap, *mark_bitmap,
    519                                            reinterpret_cast<uintptr_t>(Begin()),
    520                                            reinterpret_cast<uintptr_t>(End()), SweepCallback, &scc);
    521   return scc.freed;
    522 }
    523 
    524 void LargeObjectSpace::LogFragmentationAllocFailure(std::ostream& /*os*/,
    525                                                     size_t /*failed_alloc_bytes*/) {
    526   UNIMPLEMENTED(FATAL);
    527 }
    528 
    529 }  // namespace space
    530 }  // namespace gc
    531 }  // namespace art
    532