Home | History | Annotate | Download | only in space
      1 /*
      2  * Copyright (C) 2013 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "bump_pointer_space.h"
     18 #include "bump_pointer_space-inl.h"
     19 #include "mirror/class-inl.h"
     20 #include "mirror/object-inl.h"
     21 #include "thread_list.h"
     22 
     23 namespace art {
     24 namespace gc {
     25 namespace space {
     26 
     27 BumpPointerSpace* BumpPointerSpace::Create(const std::string& name, size_t capacity,
     28                                            uint8_t* requested_begin) {
     29   capacity = RoundUp(capacity, kPageSize);
     30   std::string error_msg;
     31   std::unique_ptr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), requested_begin, capacity,
     32                                                        PROT_READ | PROT_WRITE, true, false,
     33                                                        &error_msg));
     34   if (mem_map.get() == nullptr) {
     35     LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
     36         << PrettySize(capacity) << " with message " << error_msg;
     37     return nullptr;
     38   }
     39   return new BumpPointerSpace(name, mem_map.release());
     40 }
     41 
     42 BumpPointerSpace* BumpPointerSpace::CreateFromMemMap(const std::string& name, MemMap* mem_map) {
     43   return new BumpPointerSpace(name, mem_map);
     44 }
     45 
     46 BumpPointerSpace::BumpPointerSpace(const std::string& name, uint8_t* begin, uint8_t* limit)
     47     : ContinuousMemMapAllocSpace(name, nullptr, begin, begin, limit,
     48                                  kGcRetentionPolicyAlwaysCollect),
     49       growth_end_(limit),
     50       objects_allocated_(0), bytes_allocated_(0),
     51       block_lock_("Block lock"),
     52       main_block_size_(0),
     53       num_blocks_(0) {
     54 }
     55 
     56 BumpPointerSpace::BumpPointerSpace(const std::string& name, MemMap* mem_map)
     57     : ContinuousMemMapAllocSpace(name, mem_map, mem_map->Begin(), mem_map->Begin(), mem_map->End(),
     58                                  kGcRetentionPolicyAlwaysCollect),
     59       growth_end_(mem_map->End()),
     60       objects_allocated_(0), bytes_allocated_(0),
     61       block_lock_("Block lock", kBumpPointerSpaceBlockLock),
     62       main_block_size_(0),
     63       num_blocks_(0) {
     64 }
     65 
     66 void BumpPointerSpace::Clear() {
     67   // Release the pages back to the operating system.
     68   if (!kMadviseZeroes) {
     69     memset(Begin(), 0, Limit() - Begin());
     70   }
     71   CHECK_NE(madvise(Begin(), Limit() - Begin(), MADV_DONTNEED), -1) << "madvise failed";
     72   // Reset the end of the space back to the beginning, we move the end forward as we allocate
     73   // objects.
     74   SetEnd(Begin());
     75   objects_allocated_.StoreRelaxed(0);
     76   bytes_allocated_.StoreRelaxed(0);
     77   growth_end_ = Limit();
     78   {
     79     MutexLock mu(Thread::Current(), block_lock_);
     80     num_blocks_ = 0;
     81     main_block_size_ = 0;
     82   }
     83 }
     84 
     85 void BumpPointerSpace::Dump(std::ostream& os) const {
     86   os << GetName() << " "
     87       << reinterpret_cast<void*>(Begin()) << "-" << reinterpret_cast<void*>(End()) << " - "
     88       << reinterpret_cast<void*>(Limit());
     89 }
     90 
     91 mirror::Object* BumpPointerSpace::GetNextObject(mirror::Object* obj) {
     92   const uintptr_t position = reinterpret_cast<uintptr_t>(obj) + obj->SizeOf();
     93   return reinterpret_cast<mirror::Object*>(RoundUp(position, kAlignment));
     94 }
     95 
     96 size_t BumpPointerSpace::RevokeThreadLocalBuffers(Thread* thread) {
     97   MutexLock mu(Thread::Current(), block_lock_);
     98   RevokeThreadLocalBuffersLocked(thread);
     99   return 0U;
    100 }
    101 
    102 size_t BumpPointerSpace::RevokeAllThreadLocalBuffers() {
    103   Thread* self = Thread::Current();
    104   MutexLock mu(self, *Locks::runtime_shutdown_lock_);
    105   MutexLock mu2(self, *Locks::thread_list_lock_);
    106   // TODO: Not do a copy of the thread list?
    107   std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
    108   for (Thread* thread : thread_list) {
    109     RevokeThreadLocalBuffers(thread);
    110   }
    111   return 0U;
    112 }
    113 
    114 void BumpPointerSpace::AssertThreadLocalBuffersAreRevoked(Thread* thread) {
    115   if (kIsDebugBuild) {
    116     MutexLock mu(Thread::Current(), block_lock_);
    117     DCHECK(!thread->HasTlab());
    118   }
    119 }
    120 
    121 void BumpPointerSpace::AssertAllThreadLocalBuffersAreRevoked() {
    122   if (kIsDebugBuild) {
    123     Thread* self = Thread::Current();
    124     MutexLock mu(self, *Locks::runtime_shutdown_lock_);
    125     MutexLock mu2(self, *Locks::thread_list_lock_);
    126     // TODO: Not do a copy of the thread list?
    127     std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
    128     for (Thread* thread : thread_list) {
    129       AssertThreadLocalBuffersAreRevoked(thread);
    130     }
    131   }
    132 }
    133 
    134 void BumpPointerSpace::UpdateMainBlock() {
    135   DCHECK_EQ(num_blocks_, 0U);
    136   main_block_size_ = Size();
    137 }
    138 
    139 // Returns the start of the storage.
    140 uint8_t* BumpPointerSpace::AllocBlock(size_t bytes) {
    141   bytes = RoundUp(bytes, kAlignment);
    142   if (!num_blocks_) {
    143     UpdateMainBlock();
    144   }
    145   uint8_t* storage = reinterpret_cast<uint8_t*>(
    146       AllocNonvirtualWithoutAccounting(bytes + sizeof(BlockHeader)));
    147   if (LIKELY(storage != nullptr)) {
    148     BlockHeader* header = reinterpret_cast<BlockHeader*>(storage);
    149     header->size_ = bytes;  // Write out the block header.
    150     storage += sizeof(BlockHeader);
    151     ++num_blocks_;
    152   }
    153   return storage;
    154 }
    155 
    156 accounting::ContinuousSpaceBitmap::SweepCallback* BumpPointerSpace::GetSweepCallback() {
    157   UNIMPLEMENTED(FATAL);
    158   UNREACHABLE();
    159 }
    160 
    161 uint64_t BumpPointerSpace::GetBytesAllocated() {
    162   // Start out pre-determined amount (blocks which are not being allocated into).
    163   uint64_t total = static_cast<uint64_t>(bytes_allocated_.LoadRelaxed());
    164   Thread* self = Thread::Current();
    165   MutexLock mu(self, *Locks::runtime_shutdown_lock_);
    166   MutexLock mu2(self, *Locks::thread_list_lock_);
    167   std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
    168   MutexLock mu3(Thread::Current(), block_lock_);
    169   // If we don't have any blocks, we don't have any thread local buffers. This check is required
    170   // since there can exist multiple bump pointer spaces which exist at the same time.
    171   if (num_blocks_ > 0) {
    172     for (Thread* thread : thread_list) {
    173       total += thread->GetThreadLocalBytesAllocated();
    174     }
    175   }
    176   return total;
    177 }
    178 
    179 uint64_t BumpPointerSpace::GetObjectsAllocated() {
    180   // Start out pre-determined amount (blocks which are not being allocated into).
    181   uint64_t total = static_cast<uint64_t>(objects_allocated_.LoadRelaxed());
    182   Thread* self = Thread::Current();
    183   MutexLock mu(self, *Locks::runtime_shutdown_lock_);
    184   MutexLock mu2(self, *Locks::thread_list_lock_);
    185   std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
    186   MutexLock mu3(Thread::Current(), block_lock_);
    187   // If we don't have any blocks, we don't have any thread local buffers. This check is required
    188   // since there can exist multiple bump pointer spaces which exist at the same time.
    189   if (num_blocks_ > 0) {
    190     for (Thread* thread : thread_list) {
    191       total += thread->GetThreadLocalObjectsAllocated();
    192     }
    193   }
    194   return total;
    195 }
    196 
    197 void BumpPointerSpace::RevokeThreadLocalBuffersLocked(Thread* thread) {
    198   objects_allocated_.FetchAndAddSequentiallyConsistent(thread->GetThreadLocalObjectsAllocated());
    199   bytes_allocated_.FetchAndAddSequentiallyConsistent(thread->GetThreadLocalBytesAllocated());
    200   thread->SetTlab(nullptr, nullptr, nullptr);
    201 }
    202 
    203 bool BumpPointerSpace::AllocNewTlab(Thread* self, size_t bytes) {
    204   MutexLock mu(Thread::Current(), block_lock_);
    205   RevokeThreadLocalBuffersLocked(self);
    206   uint8_t* start = AllocBlock(bytes);
    207   if (start == nullptr) {
    208     return false;
    209   }
    210   self->SetTlab(start, start + bytes, start + bytes);
    211   return true;
    212 }
    213 
    214 void BumpPointerSpace::LogFragmentationAllocFailure(std::ostream& os,
    215                                                     size_t /* failed_alloc_bytes */) {
    216   size_t max_contiguous_allocation = Limit() - End();
    217   os << "; failed due to fragmentation (largest possible contiguous allocation "
    218      <<  max_contiguous_allocation << " bytes)";
    219   // Caller's job to print failed_alloc_bytes.
    220 }
    221 
    222 size_t BumpPointerSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size) {
    223   size_t num_bytes = obj->SizeOf();
    224   if (usable_size != nullptr) {
    225     *usable_size = RoundUp(num_bytes, kAlignment);
    226   }
    227   return num_bytes;
    228 }
    229 
    230 }  // namespace space
    231 }  // namespace gc
    232 }  // namespace art
    233