Home | History | Annotate | Download | only in space
      1 /*
      2  * Copyright (C) 2012 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "large_object_space.h"
     18 
     19 #include "base/logging.h"
     20 #include "base/stl_util.h"
     21 #include "UniquePtr.h"
     22 #include "image.h"
     23 #include "os.h"
     24 #include "thread.h"
     25 #include "utils.h"
     26 
     27 namespace art {
     28 namespace gc {
     29 namespace space {
     30 
     31 void LargeObjectSpace::SwapBitmaps() {
     32   live_objects_.swap(mark_objects_);
     33   // Swap names to get more descriptive diagnostics.
     34   std::string temp_name = live_objects_->GetName();
     35   live_objects_->SetName(mark_objects_->GetName());
     36   mark_objects_->SetName(temp_name);
     37 }
     38 
     39 LargeObjectSpace::LargeObjectSpace(const std::string& name)
     40     : DiscontinuousSpace(name, kGcRetentionPolicyAlwaysCollect),
     41       num_bytes_allocated_(0), num_objects_allocated_(0), total_bytes_allocated_(0),
     42       total_objects_allocated_(0) {
     43 }
     44 
     45 
     46 void LargeObjectSpace::CopyLiveToMarked() {
     47   mark_objects_->CopyFrom(*live_objects_.get());
     48 }
     49 
     50 LargeObjectMapSpace::LargeObjectMapSpace(const std::string& name)
     51     : LargeObjectSpace(name),
     52       lock_("large object map space lock", kAllocSpaceLock) {}
     53 
     54 LargeObjectMapSpace* LargeObjectMapSpace::Create(const std::string& name) {
     55   return new LargeObjectMapSpace(name);
     56 }
     57 
     58 mirror::Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated) {
     59   MemMap* mem_map = MemMap::MapAnonymous("large object space allocation", NULL, num_bytes,
     60                                          PROT_READ | PROT_WRITE);
     61   if (mem_map == NULL) {
     62     return NULL;
     63   }
     64   MutexLock mu(self, lock_);
     65   mirror::Object* obj = reinterpret_cast<mirror::Object*>(mem_map->Begin());
     66   large_objects_.push_back(obj);
     67   mem_maps_.Put(obj, mem_map);
     68   size_t allocation_size = mem_map->Size();
     69   DCHECK(bytes_allocated != NULL);
     70   *bytes_allocated = allocation_size;
     71   num_bytes_allocated_ += allocation_size;
     72   total_bytes_allocated_ += allocation_size;
     73   ++num_objects_allocated_;
     74   ++total_objects_allocated_;
     75   return obj;
     76 }
     77 
     78 size_t LargeObjectMapSpace::Free(Thread* self, mirror::Object* ptr) {
     79   MutexLock mu(self, lock_);
     80   MemMaps::iterator found = mem_maps_.find(ptr);
     81   CHECK(found != mem_maps_.end()) << "Attempted to free large object which was not live";
     82   DCHECK_GE(num_bytes_allocated_, found->second->Size());
     83   size_t allocation_size = found->second->Size();
     84   num_bytes_allocated_ -= allocation_size;
     85   --num_objects_allocated_;
     86   delete found->second;
     87   mem_maps_.erase(found);
     88   return allocation_size;
     89 }
     90 
     91 size_t LargeObjectMapSpace::AllocationSize(const mirror::Object* obj) {
     92   MutexLock mu(Thread::Current(), lock_);
     93   MemMaps::iterator found = mem_maps_.find(const_cast<mirror::Object*>(obj));
     94   CHECK(found != mem_maps_.end()) << "Attempted to get size of a large object which is not live";
     95   return found->second->Size();
     96 }
     97 
     98 size_t LargeObjectSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
     99   size_t total = 0;
    100   for (size_t i = 0; i < num_ptrs; ++i) {
    101     if (kDebugSpaces) {
    102       CHECK(Contains(ptrs[i]));
    103     }
    104     total += Free(self, ptrs[i]);
    105   }
    106   return total;
    107 }
    108 
    109 void LargeObjectMapSpace::Walk(DlMallocSpace::WalkCallback callback, void* arg) {
    110   MutexLock mu(Thread::Current(), lock_);
    111   for (MemMaps::iterator it = mem_maps_.begin(); it != mem_maps_.end(); ++it) {
    112     MemMap* mem_map = it->second;
    113     callback(mem_map->Begin(), mem_map->End(), mem_map->Size(), arg);
    114     callback(NULL, NULL, 0, arg);
    115   }
    116 }
    117 
    118 bool LargeObjectMapSpace::Contains(const mirror::Object* obj) const {
    119   Thread* self = Thread::Current();
    120   if (lock_.IsExclusiveHeld(self)) {
    121     // We hold lock_ so do the check.
    122     return mem_maps_.find(const_cast<mirror::Object*>(obj)) != mem_maps_.end();
    123   } else {
    124     MutexLock mu(self, lock_);
    125     return mem_maps_.find(const_cast<mirror::Object*>(obj)) != mem_maps_.end();
    126   }
    127 }
    128 
    129 FreeListSpace* FreeListSpace::Create(const std::string& name, byte* requested_begin, size_t size) {
    130   CHECK_EQ(size % kAlignment, 0U);
    131   MemMap* mem_map = MemMap::MapAnonymous(name.c_str(), requested_begin, size,
    132                                          PROT_READ | PROT_WRITE);
    133   CHECK(mem_map != NULL) << "Failed to allocate large object space mem map";
    134   return new FreeListSpace(name, mem_map, mem_map->Begin(), mem_map->End());
    135 }
    136 
    137 FreeListSpace::FreeListSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end)
    138     : LargeObjectSpace(name),
    139       begin_(begin),
    140       end_(end),
    141       mem_map_(mem_map),
    142       lock_("free list space lock", kAllocSpaceLock) {
    143   free_end_ = end - begin;
    144 }
    145 
    146 FreeListSpace::~FreeListSpace() {}
    147 
    148 void FreeListSpace::Walk(DlMallocSpace::WalkCallback callback, void* arg) {
    149   MutexLock mu(Thread::Current(), lock_);
    150   uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_;
    151   AllocationHeader* cur_header = reinterpret_cast<AllocationHeader*>(Begin());
    152   while (reinterpret_cast<uintptr_t>(cur_header) < free_end_start) {
    153     cur_header = cur_header->GetNextNonFree();
    154     size_t alloc_size = cur_header->AllocationSize();
    155     byte* byte_start = reinterpret_cast<byte*>(cur_header->GetObjectAddress());
    156     byte* byte_end = byte_start + alloc_size - sizeof(AllocationHeader);
    157     callback(byte_start, byte_end, alloc_size, arg);
    158     callback(NULL, NULL, 0, arg);
    159     cur_header = reinterpret_cast<AllocationHeader*>(byte_end);
    160   }
    161 }
    162 
    163 void FreeListSpace::RemoveFreePrev(AllocationHeader* header) {
    164   CHECK(!header->IsFree());
    165   CHECK_GT(header->GetPrevFree(), size_t(0));
    166   FreeBlocks::iterator found = free_blocks_.lower_bound(header);
    167   CHECK(found != free_blocks_.end());
    168   CHECK_EQ(*found, header);
    169   free_blocks_.erase(found);
    170 }
    171 
    172 FreeListSpace::AllocationHeader* FreeListSpace::GetAllocationHeader(const mirror::Object* obj) {
    173   DCHECK(Contains(obj));
    174   return reinterpret_cast<AllocationHeader*>(reinterpret_cast<uintptr_t>(obj) -
    175       sizeof(AllocationHeader));
    176 }
    177 
    178 FreeListSpace::AllocationHeader* FreeListSpace::AllocationHeader::GetNextNonFree() {
    179   // We know that there has to be at least one object after us or else we would have
    180   // coalesced with the free end region. May be worth investigating a better way to do this
    181   // as it may be expensive for large allocations.
    182   for (uintptr_t pos = reinterpret_cast<uintptr_t>(this);; pos += kAlignment) {
    183     AllocationHeader* cur = reinterpret_cast<AllocationHeader*>(pos);
    184     if (!cur->IsFree()) return cur;
    185   }
    186 }
    187 
    188 size_t FreeListSpace::Free(Thread* self, mirror::Object* obj) {
    189   MutexLock mu(self, lock_);
    190   DCHECK(Contains(obj));
    191   AllocationHeader* header = GetAllocationHeader(obj);
    192   CHECK(IsAligned<kAlignment>(header));
    193   size_t allocation_size = header->AllocationSize();
    194   DCHECK_GT(allocation_size, size_t(0));
    195   DCHECK(IsAligned<kAlignment>(allocation_size));
    196   // Look at the next chunk.
    197   AllocationHeader* next_header = header->GetNextAllocationHeader();
    198   // Calculate the start of the end free block.
    199   uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_;
    200   size_t header_prev_free = header->GetPrevFree();
    201   size_t new_free_size = allocation_size;
    202   if (header_prev_free) {
    203     new_free_size += header_prev_free;
    204     RemoveFreePrev(header);
    205   }
    206   if (reinterpret_cast<uintptr_t>(next_header) >= free_end_start) {
    207     // Easy case, the next chunk is the end free region.
    208     CHECK_EQ(reinterpret_cast<uintptr_t>(next_header), free_end_start);
    209     free_end_ += new_free_size;
    210   } else {
    211     AllocationHeader* new_free_header;
    212     DCHECK(IsAligned<kAlignment>(next_header));
    213     if (next_header->IsFree()) {
    214       // Find the next chunk by reading each page until we hit one with non-zero chunk.
    215       AllocationHeader* next_next_header = next_header->GetNextNonFree();
    216       DCHECK(IsAligned<kAlignment>(next_next_header));
    217       DCHECK(IsAligned<kAlignment>(next_next_header->AllocationSize()));
    218       RemoveFreePrev(next_next_header);
    219       new_free_header = next_next_header;
    220       new_free_size += next_next_header->GetPrevFree();
    221     } else {
    222       new_free_header = next_header;
    223     }
    224     new_free_header->prev_free_ = new_free_size;
    225     free_blocks_.insert(new_free_header);
    226   }
    227   --num_objects_allocated_;
    228   DCHECK_LE(allocation_size, num_bytes_allocated_);
    229   num_bytes_allocated_ -= allocation_size;
    230   madvise(header, allocation_size, MADV_DONTNEED);
    231   if (kIsDebugBuild) {
    232     // Can't disallow reads since we use them to find next chunks during coalescing.
    233     mprotect(header, allocation_size, PROT_READ);
    234   }
    235   return allocation_size;
    236 }
    237 
    238 bool FreeListSpace::Contains(const mirror::Object* obj) const {
    239   return mem_map_->HasAddress(obj);
    240 }
    241 
    242 size_t FreeListSpace::AllocationSize(const mirror::Object* obj) {
    243   AllocationHeader* header = GetAllocationHeader(obj);
    244   DCHECK(Contains(obj));
    245   DCHECK(!header->IsFree());
    246   return header->AllocationSize();
    247 }
    248 
    249 mirror::Object* FreeListSpace::Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated) {
    250   MutexLock mu(self, lock_);
    251   size_t allocation_size = RoundUp(num_bytes + sizeof(AllocationHeader), kAlignment);
    252   AllocationHeader temp;
    253   temp.SetPrevFree(allocation_size);
    254   temp.SetAllocationSize(0);
    255   AllocationHeader* new_header;
    256   // Find the smallest chunk at least num_bytes in size.
    257   FreeBlocks::iterator found = free_blocks_.lower_bound(&temp);
    258   if (found != free_blocks_.end()) {
    259     AllocationHeader* header = *found;
    260     free_blocks_.erase(found);
    261 
    262     // Fit our object in the previous free header space.
    263     new_header = header->GetPrevFreeAllocationHeader();
    264 
    265     // Remove the newly allocated block from the header and update the prev_free_.
    266     header->prev_free_ -= allocation_size;
    267     if (header->prev_free_ > 0) {
    268       // If there is remaining space, insert back into the free set.
    269       free_blocks_.insert(header);
    270     }
    271   } else {
    272     // Try to steal some memory from the free space at the end of the space.
    273     if (LIKELY(free_end_ >= allocation_size)) {
    274       // Fit our object at the start of the end free block.
    275       new_header = reinterpret_cast<AllocationHeader*>(end_ - free_end_);
    276       free_end_ -= allocation_size;
    277     } else {
    278       return NULL;
    279     }
    280   }
    281 
    282   DCHECK(bytes_allocated != NULL);
    283   *bytes_allocated = allocation_size;
    284 
    285   // Need to do these inside of the lock.
    286   ++num_objects_allocated_;
    287   ++total_objects_allocated_;
    288   num_bytes_allocated_ += allocation_size;
    289   total_bytes_allocated_ += allocation_size;
    290 
    291   // We always put our object at the start of the free block, there can not be another free block
    292   // before it.
    293   if (kIsDebugBuild) {
    294     mprotect(new_header, allocation_size, PROT_READ | PROT_WRITE);
    295   }
    296   new_header->SetPrevFree(0);
    297   new_header->SetAllocationSize(allocation_size);
    298   return new_header->GetObjectAddress();
    299 }
    300 
    301 void FreeListSpace::Dump(std::ostream& os) const {
    302   MutexLock mu(Thread::Current(), const_cast<Mutex&>(lock_));
    303   os << GetName() << " -"
    304      << " begin: " << reinterpret_cast<void*>(Begin())
    305      << " end: " << reinterpret_cast<void*>(End()) << "\n";
    306   uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_;
    307   AllocationHeader* cur_header = reinterpret_cast<AllocationHeader*>(Begin());
    308   while (reinterpret_cast<uintptr_t>(cur_header) < free_end_start) {
    309     byte* free_start = reinterpret_cast<byte*>(cur_header);
    310     cur_header = cur_header->GetNextNonFree();
    311     byte* free_end = reinterpret_cast<byte*>(cur_header);
    312     if (free_start != free_end) {
    313       os << "Free block at address: " << reinterpret_cast<const void*>(free_start)
    314          << " of length " << free_end - free_start << " bytes\n";
    315     }
    316     size_t alloc_size = cur_header->AllocationSize();
    317     byte* byte_start = reinterpret_cast<byte*>(cur_header->GetObjectAddress());
    318     byte* byte_end = byte_start + alloc_size - sizeof(AllocationHeader);
    319     os << "Large object at address: " << reinterpret_cast<const void*>(free_start)
    320        << " of length " << byte_end - byte_start << " bytes\n";
    321     cur_header = reinterpret_cast<AllocationHeader*>(byte_end);
    322   }
    323   if (free_end_) {
    324     os << "Free block at address: " << reinterpret_cast<const void*>(free_end_start)
    325        << " of length " << free_end_ << " bytes\n";
    326   }
    327 }
    328 
    329 }  // namespace space
    330 }  // namespace gc
    331 }  // namespace art
    332