Home | History | Annotate | Download | only in space
      1 /*
      2  * Copyright (C) 2013 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "bump_pointer_space.h"
     18 #include "bump_pointer_space-inl.h"
     19 #include "mirror/object-inl.h"
     20 #include "mirror/class-inl.h"
     21 #include "thread_list.h"
     22 
     23 namespace art {
     24 namespace gc {
     25 namespace space {
     26 
     27 BumpPointerSpace* BumpPointerSpace::Create(const std::string& name, size_t capacity,
     28                                            byte* requested_begin) {
     29   capacity = RoundUp(capacity, kPageSize);
     30   std::string error_msg;
     31   std::unique_ptr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), requested_begin, capacity,
     32                                                  PROT_READ | PROT_WRITE, true, &error_msg));
     33   if (mem_map.get() == nullptr) {
     34     LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
     35         << PrettySize(capacity) << " with message " << error_msg;
     36     return nullptr;
     37   }
     38   return new BumpPointerSpace(name, mem_map.release());
     39 }
     40 
     41 BumpPointerSpace* BumpPointerSpace::CreateFromMemMap(const std::string& name, MemMap* mem_map) {
     42   return new BumpPointerSpace(name, mem_map);
     43 }
     44 
     45 BumpPointerSpace::BumpPointerSpace(const std::string& name, byte* begin, byte* limit)
     46     : ContinuousMemMapAllocSpace(name, nullptr, begin, begin, limit,
     47                                  kGcRetentionPolicyAlwaysCollect),
     48       growth_end_(limit),
     49       objects_allocated_(0), bytes_allocated_(0),
     50       block_lock_("Block lock"),
     51       main_block_size_(0),
     52       num_blocks_(0) {
     53 }
     54 
     55 BumpPointerSpace::BumpPointerSpace(const std::string& name, MemMap* mem_map)
     56     : ContinuousMemMapAllocSpace(name, mem_map, mem_map->Begin(), mem_map->Begin(), mem_map->End(),
     57                                  kGcRetentionPolicyAlwaysCollect),
     58       growth_end_(mem_map->End()),
     59       objects_allocated_(0), bytes_allocated_(0),
     60       block_lock_("Block lock"),
     61       main_block_size_(0),
     62       num_blocks_(0) {
     63 }
     64 
     65 void BumpPointerSpace::Clear() {
     66   // Release the pages back to the operating system.
     67   if (!kMadviseZeroes) {
     68     memset(Begin(), 0, Limit() - Begin());
     69   }
     70   CHECK_NE(madvise(Begin(), Limit() - Begin(), MADV_DONTNEED), -1) << "madvise failed";
     71   // Reset the end of the space back to the beginning, we move the end forward as we allocate
     72   // objects.
     73   SetEnd(Begin());
     74   objects_allocated_.StoreRelaxed(0);
     75   bytes_allocated_.StoreRelaxed(0);
     76   growth_end_ = Limit();
     77   {
     78     MutexLock mu(Thread::Current(), block_lock_);
     79     num_blocks_ = 0;
     80     main_block_size_ = 0;
     81   }
     82 }
     83 
     84 void BumpPointerSpace::Dump(std::ostream& os) const {
     85   os << GetName() << " "
     86       << reinterpret_cast<void*>(Begin()) << "-" << reinterpret_cast<void*>(End()) << " - "
     87       << reinterpret_cast<void*>(Limit());
     88 }
     89 
     90 mirror::Object* BumpPointerSpace::GetNextObject(mirror::Object* obj) {
     91   const uintptr_t position = reinterpret_cast<uintptr_t>(obj) + obj->SizeOf();
     92   return reinterpret_cast<mirror::Object*>(RoundUp(position, kAlignment));
     93 }
     94 
     95 void BumpPointerSpace::RevokeThreadLocalBuffers(Thread* thread) {
     96   MutexLock mu(Thread::Current(), block_lock_);
     97   RevokeThreadLocalBuffersLocked(thread);
     98 }
     99 
    100 void BumpPointerSpace::RevokeAllThreadLocalBuffers() {
    101   Thread* self = Thread::Current();
    102   MutexLock mu(self, *Locks::runtime_shutdown_lock_);
    103   MutexLock mu2(self, *Locks::thread_list_lock_);
    104   // TODO: Not do a copy of the thread list?
    105   std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
    106   for (Thread* thread : thread_list) {
    107     RevokeThreadLocalBuffers(thread);
    108   }
    109 }
    110 
    111 void BumpPointerSpace::AssertThreadLocalBuffersAreRevoked(Thread* thread) {
    112   if (kIsDebugBuild) {
    113     MutexLock mu(Thread::Current(), block_lock_);
    114     DCHECK(!thread->HasTlab());
    115   }
    116 }
    117 
    118 void BumpPointerSpace::AssertAllThreadLocalBuffersAreRevoked() {
    119   if (kIsDebugBuild) {
    120     Thread* self = Thread::Current();
    121     MutexLock mu(self, *Locks::runtime_shutdown_lock_);
    122     MutexLock mu2(self, *Locks::thread_list_lock_);
    123     // TODO: Not do a copy of the thread list?
    124     std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
    125     for (Thread* thread : thread_list) {
    126       AssertThreadLocalBuffersAreRevoked(thread);
    127     }
    128   }
    129 }
    130 
    131 void BumpPointerSpace::UpdateMainBlock() {
    132   DCHECK_EQ(num_blocks_, 0U);
    133   main_block_size_ = Size();
    134 }
    135 
    136 // Returns the start of the storage.
    137 byte* BumpPointerSpace::AllocBlock(size_t bytes) {
    138   bytes = RoundUp(bytes, kAlignment);
    139   if (!num_blocks_) {
    140     UpdateMainBlock();
    141   }
    142   byte* storage = reinterpret_cast<byte*>(
    143       AllocNonvirtualWithoutAccounting(bytes + sizeof(BlockHeader)));
    144   if (LIKELY(storage != nullptr)) {
    145     BlockHeader* header = reinterpret_cast<BlockHeader*>(storage);
    146     header->size_ = bytes;  // Write out the block header.
    147     storage += sizeof(BlockHeader);
    148     ++num_blocks_;
    149   }
    150   return storage;
    151 }
    152 
    153 void BumpPointerSpace::Walk(ObjectCallback* callback, void* arg) {
    154   byte* pos = Begin();
    155   byte* end = End();
    156   byte* main_end = pos;
    157   {
    158     MutexLock mu(Thread::Current(), block_lock_);
    159     // If we have 0 blocks then we need to update the main header since we have bump pointer style
    160     // allocation into an unbounded region (actually bounded by Capacity()).
    161     if (num_blocks_ == 0) {
    162       UpdateMainBlock();
    163     }
    164     main_end = Begin() + main_block_size_;
    165     if (num_blocks_ == 0) {
    166       // We don't have any other blocks, this means someone else may be allocating into the main
    167       // block. In this case, we don't want to try and visit the other blocks after the main block
    168       // since these could actually be part of the main block.
    169       end = main_end;
    170     }
    171   }
    172   // Walk all of the objects in the main block first.
    173   while (pos < main_end) {
    174     mirror::Object* obj = reinterpret_cast<mirror::Object*>(pos);
    175     if (obj->GetClass() == nullptr) {
    176       // There is a race condition where a thread has just allocated an object but not set the
    177       // class. We can't know the size of this object, so we don't visit it and exit the function
    178       // since there is guaranteed to be not other blocks.
    179       return;
    180     } else {
    181       callback(obj, arg);
    182       pos = reinterpret_cast<byte*>(GetNextObject(obj));
    183     }
    184   }
    185   // Walk the other blocks (currently only TLABs).
    186   while (pos < end) {
    187     BlockHeader* header = reinterpret_cast<BlockHeader*>(pos);
    188     size_t block_size = header->size_;
    189     pos += sizeof(BlockHeader);  // Skip the header so that we know where the objects
    190     mirror::Object* obj = reinterpret_cast<mirror::Object*>(pos);
    191     const mirror::Object* end = reinterpret_cast<const mirror::Object*>(pos + block_size);
    192     CHECK_LE(reinterpret_cast<const byte*>(end), End());
    193     // We don't know how many objects are allocated in the current block. When we hit a null class
    194     // assume its the end. TODO: Have a thread update the header when it flushes the block?
    195     while (obj < end && obj->GetClass() != nullptr) {
    196       callback(obj, arg);
    197       obj = GetNextObject(obj);
    198     }
    199     pos += block_size;
    200   }
    201 }
    202 
    203 accounting::ContinuousSpaceBitmap::SweepCallback* BumpPointerSpace::GetSweepCallback() {
    204   LOG(FATAL) << "Unimplemented";
    205   return nullptr;
    206 }
    207 
    208 uint64_t BumpPointerSpace::GetBytesAllocated() {
    209   // Start out pre-determined amount (blocks which are not being allocated into).
    210   uint64_t total = static_cast<uint64_t>(bytes_allocated_.LoadRelaxed());
    211   Thread* self = Thread::Current();
    212   MutexLock mu(self, *Locks::runtime_shutdown_lock_);
    213   MutexLock mu2(self, *Locks::thread_list_lock_);
    214   std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
    215   MutexLock mu3(Thread::Current(), block_lock_);
    216   // If we don't have any blocks, we don't have any thread local buffers. This check is required
    217   // since there can exist multiple bump pointer spaces which exist at the same time.
    218   if (num_blocks_ > 0) {
    219     for (Thread* thread : thread_list) {
    220       total += thread->GetThreadLocalBytesAllocated();
    221     }
    222   }
    223   return total;
    224 }
    225 
    226 uint64_t BumpPointerSpace::GetObjectsAllocated() {
    227   // Start out pre-determined amount (blocks which are not being allocated into).
    228   uint64_t total = static_cast<uint64_t>(objects_allocated_.LoadRelaxed());
    229   Thread* self = Thread::Current();
    230   MutexLock mu(self, *Locks::runtime_shutdown_lock_);
    231   MutexLock mu2(self, *Locks::thread_list_lock_);
    232   std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
    233   MutexLock mu3(Thread::Current(), block_lock_);
    234   // If we don't have any blocks, we don't have any thread local buffers. This check is required
    235   // since there can exist multiple bump pointer spaces which exist at the same time.
    236   if (num_blocks_ > 0) {
    237     for (Thread* thread : thread_list) {
    238       total += thread->GetThreadLocalObjectsAllocated();
    239     }
    240   }
    241   return total;
    242 }
    243 
    244 void BumpPointerSpace::RevokeThreadLocalBuffersLocked(Thread* thread) {
    245   objects_allocated_.FetchAndAddSequentiallyConsistent(thread->GetThreadLocalObjectsAllocated());
    246   bytes_allocated_.FetchAndAddSequentiallyConsistent(thread->GetThreadLocalBytesAllocated());
    247   thread->SetTlab(nullptr, nullptr);
    248 }
    249 
    250 bool BumpPointerSpace::AllocNewTlab(Thread* self, size_t bytes) {
    251   MutexLock mu(Thread::Current(), block_lock_);
    252   RevokeThreadLocalBuffersLocked(self);
    253   byte* start = AllocBlock(bytes);
    254   if (start == nullptr) {
    255     return false;
    256   }
    257   self->SetTlab(start, start + bytes);
    258   return true;
    259 }
    260 
    261 void BumpPointerSpace::LogFragmentationAllocFailure(std::ostream& os,
    262                                                     size_t /* failed_alloc_bytes */) {
    263   size_t max_contiguous_allocation = Limit() - End();
    264   os << "; failed due to fragmentation (largest possible contiguous allocation "
    265      <<  max_contiguous_allocation << " bytes)";
    266   // Caller's job to print failed_alloc_bytes.
    267 }
    268 
    269 }  // namespace space
    270 }  // namespace gc
    271 }  // namespace art
    272