Home | History | Annotate | Download | only in space
      1 /*
      2  * Copyright (C) 2012 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "large_object_space.h"
     18 
     19 #include <sys/mman.h>
     20 
     21 #include <memory>
     22 
     23 #include <android-base/logging.h>
     24 
     25 #include "base/macros.h"
     26 #include "base/memory_tool.h"
     27 #include "base/mutex-inl.h"
     28 #include "base/os.h"
     29 #include "base/stl_util.h"
     30 #include "gc/accounting/heap_bitmap-inl.h"
     31 #include "gc/accounting/space_bitmap-inl.h"
     32 #include "gc/heap.h"
     33 #include "image.h"
     34 #include "scoped_thread_state_change-inl.h"
     35 #include "space-inl.h"
     36 #include "thread-current-inl.h"
     37 
     38 namespace art {
     39 namespace gc {
     40 namespace space {
     41 
     42 class MemoryToolLargeObjectMapSpace final : public LargeObjectMapSpace {
     43  public:
     44   explicit MemoryToolLargeObjectMapSpace(const std::string& name) : LargeObjectMapSpace(name) {
     45   }
     46 
     47   ~MemoryToolLargeObjectMapSpace() override {
     48     // Historical note: We were deleting large objects to keep Valgrind happy if there were
     49     // any large objects such as Dex cache arrays which aren't freed since they are held live
     50     // by the class linker.
     51   }
     52 
     53   mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
     54                         size_t* usable_size, size_t* bytes_tl_bulk_allocated)
     55       override {
     56     mirror::Object* obj =
     57         LargeObjectMapSpace::Alloc(self, num_bytes + kMemoryToolRedZoneBytes * 2, bytes_allocated,
     58                                    usable_size, bytes_tl_bulk_allocated);
     59     mirror::Object* object_without_rdz = reinterpret_cast<mirror::Object*>(
     60         reinterpret_cast<uintptr_t>(obj) + kMemoryToolRedZoneBytes);
     61     MEMORY_TOOL_MAKE_NOACCESS(reinterpret_cast<void*>(obj), kMemoryToolRedZoneBytes);
     62     MEMORY_TOOL_MAKE_NOACCESS(
     63         reinterpret_cast<uint8_t*>(object_without_rdz) + num_bytes,
     64         kMemoryToolRedZoneBytes);
     65     if (usable_size != nullptr) {
     66       *usable_size = num_bytes;  // Since we have redzones, shrink the usable size.
     67     }
     68     return object_without_rdz;
     69   }
     70 
     71   size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override {
     72     return LargeObjectMapSpace::AllocationSize(ObjectWithRedzone(obj), usable_size);
     73   }
     74 
     75   bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const override {
     76     return LargeObjectMapSpace::IsZygoteLargeObject(self, ObjectWithRedzone(obj));
     77   }
     78 
     79   size_t Free(Thread* self, mirror::Object* obj) override {
     80     mirror::Object* object_with_rdz = ObjectWithRedzone(obj);
     81     MEMORY_TOOL_MAKE_UNDEFINED(object_with_rdz, AllocationSize(obj, nullptr));
     82     return LargeObjectMapSpace::Free(self, object_with_rdz);
     83   }
     84 
     85   bool Contains(const mirror::Object* obj) const override {
     86     return LargeObjectMapSpace::Contains(ObjectWithRedzone(obj));
     87   }
     88 
     89  private:
     90   static const mirror::Object* ObjectWithRedzone(const mirror::Object* obj) {
     91     return reinterpret_cast<const mirror::Object*>(
     92         reinterpret_cast<uintptr_t>(obj) - kMemoryToolRedZoneBytes);
     93   }
     94 
     95   static mirror::Object* ObjectWithRedzone(mirror::Object* obj) {
     96     return reinterpret_cast<mirror::Object*>(
     97         reinterpret_cast<uintptr_t>(obj) - kMemoryToolRedZoneBytes);
     98   }
     99 
    100   static constexpr size_t kMemoryToolRedZoneBytes = kPageSize;
    101 };
    102 
    103 void LargeObjectSpace::SwapBitmaps() {
    104   live_bitmap_.swap(mark_bitmap_);
    105   // Swap names to get more descriptive diagnostics.
    106   std::string temp_name = live_bitmap_->GetName();
    107   live_bitmap_->SetName(mark_bitmap_->GetName());
    108   mark_bitmap_->SetName(temp_name);
    109 }
    110 
    111 LargeObjectSpace::LargeObjectSpace(const std::string& name, uint8_t* begin, uint8_t* end,
    112                                    const char* lock_name)
    113     : DiscontinuousSpace(name, kGcRetentionPolicyAlwaysCollect),
    114       lock_(lock_name, kAllocSpaceLock),
    115       num_bytes_allocated_(0), num_objects_allocated_(0), total_bytes_allocated_(0),
    116       total_objects_allocated_(0), begin_(begin), end_(end) {
    117 }
    118 
    119 
    120 void LargeObjectSpace::CopyLiveToMarked() {
    121   mark_bitmap_->CopyFrom(live_bitmap_.get());
    122 }
    123 
    124 LargeObjectMapSpace::LargeObjectMapSpace(const std::string& name)
    125     : LargeObjectSpace(name, nullptr, nullptr, "large object map space lock") {}
    126 
    127 LargeObjectMapSpace* LargeObjectMapSpace::Create(const std::string& name) {
    128   if (Runtime::Current()->IsRunningOnMemoryTool()) {
    129     return new MemoryToolLargeObjectMapSpace(name);
    130   } else {
    131     return new LargeObjectMapSpace(name);
    132   }
    133 }
    134 
    135 mirror::Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes,
    136                                            size_t* bytes_allocated, size_t* usable_size,
    137                                            size_t* bytes_tl_bulk_allocated) {
    138   std::string error_msg;
    139   MemMap mem_map = MemMap::MapAnonymous("large object space allocation",
    140                                         num_bytes,
    141                                         PROT_READ | PROT_WRITE,
    142                                         /*low_4gb=*/ true,
    143                                         &error_msg);
    144   if (UNLIKELY(!mem_map.IsValid())) {
    145     LOG(WARNING) << "Large object allocation failed: " << error_msg;
    146     return nullptr;
    147   }
    148   mirror::Object* const obj = reinterpret_cast<mirror::Object*>(mem_map.Begin());
    149   const size_t allocation_size = mem_map.BaseSize();
    150   MutexLock mu(self, lock_);
    151   large_objects_.Put(obj, LargeObject {std::move(mem_map), false /* not zygote */});
    152   DCHECK(bytes_allocated != nullptr);
    153 
    154   if (begin_ == nullptr || begin_ > reinterpret_cast<uint8_t*>(obj)) {
    155     begin_ = reinterpret_cast<uint8_t*>(obj);
    156   }
    157   end_ = std::max(end_, reinterpret_cast<uint8_t*>(obj) + allocation_size);
    158 
    159   *bytes_allocated = allocation_size;
    160   if (usable_size != nullptr) {
    161     *usable_size = allocation_size;
    162   }
    163   DCHECK(bytes_tl_bulk_allocated != nullptr);
    164   *bytes_tl_bulk_allocated = allocation_size;
    165   num_bytes_allocated_ += allocation_size;
    166   total_bytes_allocated_ += allocation_size;
    167   ++num_objects_allocated_;
    168   ++total_objects_allocated_;
    169   return obj;
    170 }
    171 
    172 bool LargeObjectMapSpace::IsZygoteLargeObject(Thread* self, mirror::Object* obj) const {
    173   MutexLock mu(self, lock_);
    174   auto it = large_objects_.find(obj);
    175   CHECK(it != large_objects_.end());
    176   return it->second.is_zygote;
    177 }
    178 
    179 void LargeObjectMapSpace::SetAllLargeObjectsAsZygoteObjects(Thread* self) {
    180   MutexLock mu(self, lock_);
    181   for (auto& pair : large_objects_) {
    182     pair.second.is_zygote = true;
    183   }
    184 }
    185 
    186 size_t LargeObjectMapSpace::Free(Thread* self, mirror::Object* ptr) {
    187   MutexLock mu(self, lock_);
    188   auto it = large_objects_.find(ptr);
    189   if (UNLIKELY(it == large_objects_.end())) {
    190     ScopedObjectAccess soa(self);
    191     Runtime::Current()->GetHeap()->DumpSpaces(LOG_STREAM(FATAL_WITHOUT_ABORT));
    192     LOG(FATAL) << "Attempted to free large object " << ptr << " which was not live";
    193   }
    194   const size_t map_size = it->second.mem_map.BaseSize();
    195   DCHECK_GE(num_bytes_allocated_, map_size);
    196   size_t allocation_size = map_size;
    197   num_bytes_allocated_ -= allocation_size;
    198   --num_objects_allocated_;
    199   large_objects_.erase(it);
    200   return allocation_size;
    201 }
    202 
    203 size_t LargeObjectMapSpace::AllocationSize(mirror::Object* obj, size_t* usable_size) {
    204   MutexLock mu(Thread::Current(), lock_);
    205   auto it = large_objects_.find(obj);
    206   CHECK(it != large_objects_.end()) << "Attempted to get size of a large object which is not live";
    207   size_t alloc_size = it->second.mem_map.BaseSize();
    208   if (usable_size != nullptr) {
    209     *usable_size = alloc_size;
    210   }
    211   return alloc_size;
    212 }
    213 
    214 size_t LargeObjectSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
    215   size_t total = 0;
    216   for (size_t i = 0; i < num_ptrs; ++i) {
    217     if (kDebugSpaces) {
    218       CHECK(Contains(ptrs[i]));
    219     }
    220     total += Free(self, ptrs[i]);
    221   }
    222   return total;
    223 }
    224 
    225 void LargeObjectMapSpace::Walk(DlMallocSpace::WalkCallback callback, void* arg) {
    226   MutexLock mu(Thread::Current(), lock_);
    227   for (auto& pair : large_objects_) {
    228     MemMap* mem_map = &pair.second.mem_map;
    229     callback(mem_map->Begin(), mem_map->End(), mem_map->Size(), arg);
    230     callback(nullptr, nullptr, 0, arg);
    231   }
    232 }
    233 
    234 void LargeObjectMapSpace::ForEachMemMap(std::function<void(const MemMap&)> func) const {
    235   MutexLock mu(Thread::Current(), lock_);
    236   for (auto& pair : large_objects_) {
    237     func(pair.second.mem_map);
    238   }
    239 }
    240 
    241 bool LargeObjectMapSpace::Contains(const mirror::Object* obj) const {
    242   Thread* self = Thread::Current();
    243   if (lock_.IsExclusiveHeld(self)) {
    244     // We hold lock_ so do the check.
    245     return large_objects_.find(const_cast<mirror::Object*>(obj)) != large_objects_.end();
    246   } else {
    247     MutexLock mu(self, lock_);
    248     return large_objects_.find(const_cast<mirror::Object*>(obj)) != large_objects_.end();
    249   }
    250 }
    251 
    252 // Keeps track of allocation sizes + whether or not the previous allocation is free.
    253 // Used to coalesce free blocks and find the best fit block for an allocation for best fit object
    254 // allocation. Each allocation has an AllocationInfo which contains the size of the previous free
    255 // block preceding it. Implemented in such a way that we can also find the iterator for any
    256 // allocation info pointer.
    257 class AllocationInfo {
    258  public:
    259   AllocationInfo() : prev_free_(0), alloc_size_(0) {
    260   }
    261   // Return the number of pages that the allocation info covers.
    262   size_t AlignSize() const {
    263     return alloc_size_ & kFlagsMask;
    264   }
    265   // Returns the allocation size in bytes.
    266   size_t ByteSize() const {
    267     return AlignSize() * FreeListSpace::kAlignment;
    268   }
    269   // Updates the allocation size and whether or not it is free.
    270   void SetByteSize(size_t size, bool free) {
    271     DCHECK_EQ(size & ~kFlagsMask, 0u);
    272     DCHECK_ALIGNED(size, FreeListSpace::kAlignment);
    273     alloc_size_ = (size / FreeListSpace::kAlignment) | (free ? kFlagFree : 0u);
    274   }
    275   // Returns true if the block is free.
    276   bool IsFree() const {
    277     return (alloc_size_ & kFlagFree) != 0;
    278   }
    279   // Return true if the large object is a zygote object.
    280   bool IsZygoteObject() const {
    281     return (alloc_size_ & kFlagZygote) != 0;
    282   }
    283   // Change the object to be a zygote object.
    284   void SetZygoteObject() {
    285     alloc_size_ |= kFlagZygote;
    286   }
    287   // Return true if this is a zygote large object.
    288   // Finds and returns the next non free allocation info after ourself.
    289   AllocationInfo* GetNextInfo() {
    290     return this + AlignSize();
    291   }
    292   const AllocationInfo* GetNextInfo() const {
    293     return this + AlignSize();
    294   }
    295   // Returns the previous free allocation info by using the prev_free_ member to figure out
    296   // where it is. This is only used for coalescing so we only need to be able to do it if the
    297   // previous allocation info is free.
    298   AllocationInfo* GetPrevFreeInfo() {
    299     DCHECK_NE(prev_free_, 0U);
    300     return this - prev_free_;
    301   }
    302   // Returns the address of the object associated with this allocation info.
    303   mirror::Object* GetObjectAddress() {
    304     return reinterpret_cast<mirror::Object*>(reinterpret_cast<uintptr_t>(this) + sizeof(*this));
    305   }
    306   // Return how many kAlignment units there are before the free block.
    307   size_t GetPrevFree() const {
    308     return prev_free_;
    309   }
    310   // Returns how many free bytes there is before the block.
    311   size_t GetPrevFreeBytes() const {
    312     return GetPrevFree() * FreeListSpace::kAlignment;
    313   }
    314   // Update the size of the free block prior to the allocation.
    315   void SetPrevFreeBytes(size_t bytes) {
    316     DCHECK_ALIGNED(bytes, FreeListSpace::kAlignment);
    317     prev_free_ = bytes / FreeListSpace::kAlignment;
    318   }
    319 
    320  private:
    321   static constexpr uint32_t kFlagFree = 0x80000000;  // If block is free.
    322   static constexpr uint32_t kFlagZygote = 0x40000000;  // If the large object is a zygote object.
    323   static constexpr uint32_t kFlagsMask = ~(kFlagFree | kFlagZygote);  // Combined flags for masking.
    324   // Contains the size of the previous free block with kAlignment as the unit. If 0 then the
    325   // allocation before us is not free.
    326   // These variables are undefined in the middle of allocations / free blocks.
    327   uint32_t prev_free_;
    328   // Allocation size of this object in kAlignment as the unit.
    329   uint32_t alloc_size_;
    330 };
    331 
    332 size_t FreeListSpace::GetSlotIndexForAllocationInfo(const AllocationInfo* info) const {
    333   DCHECK_GE(info, allocation_info_);
    334   DCHECK_LT(info, reinterpret_cast<AllocationInfo*>(allocation_info_map_.End()));
    335   return info - allocation_info_;
    336 }
    337 
    338 AllocationInfo* FreeListSpace::GetAllocationInfoForAddress(uintptr_t address) {
    339   return &allocation_info_[GetSlotIndexForAddress(address)];
    340 }
    341 
    342 const AllocationInfo* FreeListSpace::GetAllocationInfoForAddress(uintptr_t address) const {
    343   return &allocation_info_[GetSlotIndexForAddress(address)];
    344 }
    345 
    346 inline bool FreeListSpace::SortByPrevFree::operator()(const AllocationInfo* a,
    347                                                       const AllocationInfo* b) const {
    348   if (a->GetPrevFree() < b->GetPrevFree()) return true;
    349   if (a->GetPrevFree() > b->GetPrevFree()) return false;
    350   if (a->AlignSize() < b->AlignSize()) return true;
    351   if (a->AlignSize() > b->AlignSize()) return false;
    352   return reinterpret_cast<uintptr_t>(a) < reinterpret_cast<uintptr_t>(b);
    353 }
    354 
    355 FreeListSpace* FreeListSpace::Create(const std::string& name, size_t size) {
    356   CHECK_EQ(size % kAlignment, 0U);
    357   std::string error_msg;
    358   MemMap mem_map = MemMap::MapAnonymous(name.c_str(),
    359                                         size,
    360                                         PROT_READ | PROT_WRITE,
    361                                         /*low_4gb=*/ true,
    362                                         &error_msg);
    363   CHECK(mem_map.IsValid()) << "Failed to allocate large object space mem map: " << error_msg;
    364   return new FreeListSpace(name, std::move(mem_map), mem_map.Begin(), mem_map.End());
    365 }
    366 
    367 FreeListSpace::FreeListSpace(const std::string& name,
    368                              MemMap&& mem_map,
    369                              uint8_t* begin,
    370                              uint8_t* end)
    371     : LargeObjectSpace(name, begin, end, "free list space lock"),
    372       mem_map_(std::move(mem_map)) {
    373   const size_t space_capacity = end - begin;
    374   free_end_ = space_capacity;
    375   CHECK_ALIGNED(space_capacity, kAlignment);
    376   const size_t alloc_info_size = sizeof(AllocationInfo) * (space_capacity / kAlignment);
    377   std::string error_msg;
    378   allocation_info_map_ =
    379       MemMap::MapAnonymous("large object free list space allocation info map",
    380                            alloc_info_size,
    381                            PROT_READ | PROT_WRITE,
    382                            /*low_4gb=*/ false,
    383                            &error_msg);
    384   CHECK(allocation_info_map_.IsValid()) << "Failed to allocate allocation info map" << error_msg;
    385   allocation_info_ = reinterpret_cast<AllocationInfo*>(allocation_info_map_.Begin());
    386 }
    387 
    388 FreeListSpace::~FreeListSpace() {}
    389 
    390 void FreeListSpace::Walk(DlMallocSpace::WalkCallback callback, void* arg) {
    391   MutexLock mu(Thread::Current(), lock_);
    392   const uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_;
    393   AllocationInfo* cur_info = &allocation_info_[0];
    394   const AllocationInfo* end_info = GetAllocationInfoForAddress(free_end_start);
    395   while (cur_info < end_info) {
    396     if (!cur_info->IsFree()) {
    397       size_t alloc_size = cur_info->ByteSize();
    398       uint8_t* byte_start = reinterpret_cast<uint8_t*>(GetAddressForAllocationInfo(cur_info));
    399       uint8_t* byte_end = byte_start + alloc_size;
    400       callback(byte_start, byte_end, alloc_size, arg);
    401       callback(nullptr, nullptr, 0, arg);
    402     }
    403     cur_info = cur_info->GetNextInfo();
    404   }
    405   CHECK_EQ(cur_info, end_info);
    406 }
    407 
    408 void FreeListSpace::ForEachMemMap(std::function<void(const MemMap&)> func) const {
    409   MutexLock mu(Thread::Current(), lock_);
    410   func(allocation_info_map_);
    411   func(mem_map_);
    412 }
    413 
    414 void FreeListSpace::RemoveFreePrev(AllocationInfo* info) {
    415   CHECK_GT(info->GetPrevFree(), 0U);
    416   auto it = free_blocks_.lower_bound(info);
    417   CHECK(it != free_blocks_.end());
    418   CHECK_EQ(*it, info);
    419   free_blocks_.erase(it);
    420 }
    421 
    422 size_t FreeListSpace::Free(Thread* self, mirror::Object* obj) {
    423   MutexLock mu(self, lock_);
    424   DCHECK(Contains(obj)) << reinterpret_cast<void*>(Begin()) << " " << obj << " "
    425                         << reinterpret_cast<void*>(End());
    426   DCHECK_ALIGNED(obj, kAlignment);
    427   AllocationInfo* info = GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(obj));
    428   DCHECK(!info->IsFree());
    429   const size_t allocation_size = info->ByteSize();
    430   DCHECK_GT(allocation_size, 0U);
    431   DCHECK_ALIGNED(allocation_size, kAlignment);
    432   info->SetByteSize(allocation_size, true);  // Mark as free.
    433   // Look at the next chunk.
    434   AllocationInfo* next_info = info->GetNextInfo();
    435   // Calculate the start of the end free block.
    436   uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_;
    437   size_t prev_free_bytes = info->GetPrevFreeBytes();
    438   size_t new_free_size = allocation_size;
    439   if (prev_free_bytes != 0) {
    440     // Coalesce with previous free chunk.
    441     new_free_size += prev_free_bytes;
    442     RemoveFreePrev(info);
    443     info = info->GetPrevFreeInfo();
    444     // The previous allocation info must not be free since we are supposed to always coalesce.
    445     DCHECK_EQ(info->GetPrevFreeBytes(), 0U) << "Previous allocation was free";
    446   }
    447   uintptr_t next_addr = GetAddressForAllocationInfo(next_info);
    448   if (next_addr >= free_end_start) {
    449     // Easy case, the next chunk is the end free region.
    450     CHECK_EQ(next_addr, free_end_start);
    451     free_end_ += new_free_size;
    452   } else {
    453     AllocationInfo* new_free_info;
    454     if (next_info->IsFree()) {
    455       AllocationInfo* next_next_info = next_info->GetNextInfo();
    456       // Next next info can't be free since we always coalesce.
    457       DCHECK(!next_next_info->IsFree());
    458       DCHECK_ALIGNED(next_next_info->ByteSize(), kAlignment);
    459       new_free_info = next_next_info;
    460       new_free_size += next_next_info->GetPrevFreeBytes();
    461       RemoveFreePrev(next_next_info);
    462     } else {
    463       new_free_info = next_info;
    464     }
    465     new_free_info->SetPrevFreeBytes(new_free_size);
    466     free_blocks_.insert(new_free_info);
    467     info->SetByteSize(new_free_size, true);
    468     DCHECK_EQ(info->GetNextInfo(), new_free_info);
    469   }
    470   --num_objects_allocated_;
    471   DCHECK_LE(allocation_size, num_bytes_allocated_);
    472   num_bytes_allocated_ -= allocation_size;
    473   madvise(obj, allocation_size, MADV_DONTNEED);
    474   if (kIsDebugBuild) {
    475     // Can't disallow reads since we use them to find next chunks during coalescing.
    476     CheckedCall(mprotect, __FUNCTION__, obj, allocation_size, PROT_READ);
    477   }
    478   return allocation_size;
    479 }
    480 
    481 size_t FreeListSpace::AllocationSize(mirror::Object* obj, size_t* usable_size) {
    482   DCHECK(Contains(obj));
    483   AllocationInfo* info = GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(obj));
    484   DCHECK(!info->IsFree());
    485   size_t alloc_size = info->ByteSize();
    486   if (usable_size != nullptr) {
    487     *usable_size = alloc_size;
    488   }
    489   return alloc_size;
    490 }
    491 
    492 mirror::Object* FreeListSpace::Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
    493                                      size_t* usable_size, size_t* bytes_tl_bulk_allocated) {
    494   MutexLock mu(self, lock_);
    495   const size_t allocation_size = RoundUp(num_bytes, kAlignment);
    496   AllocationInfo temp_info;
    497   temp_info.SetPrevFreeBytes(allocation_size);
    498   temp_info.SetByteSize(0, false);
    499   AllocationInfo* new_info;
    500   // Find the smallest chunk at least num_bytes in size.
    501   auto it = free_blocks_.lower_bound(&temp_info);
    502   if (it != free_blocks_.end()) {
    503     AllocationInfo* info = *it;
    504     free_blocks_.erase(it);
    505     // Fit our object in the previous allocation info free space.
    506     new_info = info->GetPrevFreeInfo();
    507     // Remove the newly allocated block from the info and update the prev_free_.
    508     info->SetPrevFreeBytes(info->GetPrevFreeBytes() - allocation_size);
    509     if (info->GetPrevFreeBytes() > 0) {
    510       AllocationInfo* new_free = info - info->GetPrevFree();
    511       new_free->SetPrevFreeBytes(0);
    512       new_free->SetByteSize(info->GetPrevFreeBytes(), true);
    513       // If there is remaining space, insert back into the free set.
    514       free_blocks_.insert(info);
    515     }
    516   } else {
    517     // Try to steal some memory from the free space at the end of the space.
    518     if (LIKELY(free_end_ >= allocation_size)) {
    519       // Fit our object at the start of the end free block.
    520       new_info = GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(End()) - free_end_);
    521       free_end_ -= allocation_size;
    522     } else {
    523       return nullptr;
    524     }
    525   }
    526   DCHECK(bytes_allocated != nullptr);
    527   *bytes_allocated = allocation_size;
    528   if (usable_size != nullptr) {
    529     *usable_size = allocation_size;
    530   }
    531   DCHECK(bytes_tl_bulk_allocated != nullptr);
    532   *bytes_tl_bulk_allocated = allocation_size;
    533   // Need to do these inside of the lock.
    534   ++num_objects_allocated_;
    535   ++total_objects_allocated_;
    536   num_bytes_allocated_ += allocation_size;
    537   total_bytes_allocated_ += allocation_size;
    538   mirror::Object* obj = reinterpret_cast<mirror::Object*>(GetAddressForAllocationInfo(new_info));
    539   // We always put our object at the start of the free block, there cannot be another free block
    540   // before it.
    541   if (kIsDebugBuild) {
    542     CheckedCall(mprotect, __FUNCTION__, obj, allocation_size, PROT_READ | PROT_WRITE);
    543   }
    544   new_info->SetPrevFreeBytes(0);
    545   new_info->SetByteSize(allocation_size, false);
    546   return obj;
    547 }
    548 
    549 void FreeListSpace::Dump(std::ostream& os) const {
    550   MutexLock mu(Thread::Current(), lock_);
    551   os << GetName() << " -"
    552      << " begin: " << reinterpret_cast<void*>(Begin())
    553      << " end: " << reinterpret_cast<void*>(End()) << "\n";
    554   uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_;
    555   const AllocationInfo* cur_info =
    556       GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(Begin()));
    557   const AllocationInfo* end_info = GetAllocationInfoForAddress(free_end_start);
    558   while (cur_info < end_info) {
    559     size_t size = cur_info->ByteSize();
    560     uintptr_t address = GetAddressForAllocationInfo(cur_info);
    561     if (cur_info->IsFree()) {
    562       os << "Free block at address: " << reinterpret_cast<const void*>(address)
    563          << " of length " << size << " bytes\n";
    564     } else {
    565       os << "Large object at address: " << reinterpret_cast<const void*>(address)
    566          << " of length " << size << " bytes\n";
    567     }
    568     cur_info = cur_info->GetNextInfo();
    569   }
    570   if (free_end_) {
    571     os << "Free block at address: " << reinterpret_cast<const void*>(free_end_start)
    572        << " of length " << free_end_ << " bytes\n";
    573   }
    574 }
    575 
    576 bool FreeListSpace::IsZygoteLargeObject(Thread* self ATTRIBUTE_UNUSED, mirror::Object* obj) const {
    577   const AllocationInfo* info = GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(obj));
    578   DCHECK(info != nullptr);
    579   return info->IsZygoteObject();
    580 }
    581 
    582 void FreeListSpace::SetAllLargeObjectsAsZygoteObjects(Thread* self) {
    583   MutexLock mu(self, lock_);
    584   uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_;
    585   for (AllocationInfo* cur_info = GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(Begin())),
    586       *end_info = GetAllocationInfoForAddress(free_end_start); cur_info < end_info;
    587       cur_info = cur_info->GetNextInfo()) {
    588     if (!cur_info->IsFree()) {
    589       cur_info->SetZygoteObject();
    590     }
    591   }
    592 }
    593 
    594 void LargeObjectSpace::SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg) {
    595   SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg);
    596   space::LargeObjectSpace* space = context->space->AsLargeObjectSpace();
    597   Thread* self = context->self;
    598   Locks::heap_bitmap_lock_->AssertExclusiveHeld(self);
    599   // If the bitmaps aren't swapped we need to clear the bits since the GC isn't going to re-swap
    600   // the bitmaps as an optimization.
    601   if (!context->swap_bitmaps) {
    602     accounting::LargeObjectBitmap* bitmap = space->GetLiveBitmap();
    603     for (size_t i = 0; i < num_ptrs; ++i) {
    604       bitmap->Clear(ptrs[i]);
    605     }
    606   }
    607   context->freed.objects += num_ptrs;
    608   context->freed.bytes += space->FreeList(self, num_ptrs, ptrs);
    609 }
    610 
    611 collector::ObjectBytePair LargeObjectSpace::Sweep(bool swap_bitmaps) {
    612   if (Begin() >= End()) {
    613     return collector::ObjectBytePair(0, 0);
    614   }
    615   accounting::LargeObjectBitmap* live_bitmap = GetLiveBitmap();
    616   accounting::LargeObjectBitmap* mark_bitmap = GetMarkBitmap();
    617   if (swap_bitmaps) {
    618     std::swap(live_bitmap, mark_bitmap);
    619   }
    620   AllocSpace::SweepCallbackContext scc(swap_bitmaps, this);
    621   std::pair<uint8_t*, uint8_t*> range = GetBeginEndAtomic();
    622   accounting::LargeObjectBitmap::SweepWalk(*live_bitmap, *mark_bitmap,
    623                                            reinterpret_cast<uintptr_t>(range.first),
    624                                            reinterpret_cast<uintptr_t>(range.second),
    625                                            SweepCallback,
    626                                            &scc);
    627   return scc.freed;
    628 }
    629 
    630 void LargeObjectSpace::LogFragmentationAllocFailure(std::ostream& /*os*/,
    631                                                     size_t /*failed_alloc_bytes*/) {
    632   UNIMPLEMENTED(FATAL);
    633 }
    634 
    635 std::pair<uint8_t*, uint8_t*> LargeObjectMapSpace::GetBeginEndAtomic() const {
    636   MutexLock mu(Thread::Current(), lock_);
    637   return std::make_pair(Begin(), End());
    638 }
    639 
    640 std::pair<uint8_t*, uint8_t*> FreeListSpace::GetBeginEndAtomic() const {
    641   MutexLock mu(Thread::Current(), lock_);
    642   return std::make_pair(Begin(), End());
    643 }
    644 
    645 }  // namespace space
    646 }  // namespace gc
    647 }  // namespace art
    648