Home | History | Annotate | Download | only in space
      1 /*
      2  * Copyright (C) 2014 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "bump_pointer_space.h"
     18 #include "bump_pointer_space-inl.h"
     19 #include "gc/accounting/read_barrier_table.h"
     20 #include "mirror/object-inl.h"
     21 #include "mirror/class-inl.h"
     22 #include "thread_list.h"
     23 
     24 namespace art {
     25 namespace gc {
     26 namespace space {
     27 
     28 // If a region has live objects whose size is less than this percent
     29 // value of the region size, evaculate the region.
     30 static constexpr uint kEvaculateLivePercentThreshold = 75U;
     31 
     32 // If we protect the cleared regions.
     33 // Only protect for target builds to prevent flaky test failures (b/63131961).
     34 static constexpr bool kProtectClearedRegions = kIsTargetBuild;
     35 
     36 MemMap* RegionSpace::CreateMemMap(const std::string& name, size_t capacity,
     37                                   uint8_t* requested_begin) {
     38   CHECK_ALIGNED(capacity, kRegionSize);
     39   std::string error_msg;
     40   // Ask for the capacity of an additional kRegionSize so that we can align the map by kRegionSize
     41   // even if we get unaligned base address. This is necessary for the ReadBarrierTable to work.
     42   std::unique_ptr<MemMap> mem_map;
     43   while (true) {
     44     mem_map.reset(MemMap::MapAnonymous(name.c_str(),
     45                                        requested_begin,
     46                                        capacity + kRegionSize,
     47                                        PROT_READ | PROT_WRITE,
     48                                        true,
     49                                        false,
     50                                        &error_msg));
     51     if (mem_map.get() != nullptr || requested_begin == nullptr) {
     52       break;
     53     }
     54     // Retry with no specified request begin.
     55     requested_begin = nullptr;
     56   }
     57   if (mem_map.get() == nullptr) {
     58     LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
     59         << PrettySize(capacity) << " with message " << error_msg;
     60     MemMap::DumpMaps(LOG_STREAM(ERROR));
     61     return nullptr;
     62   }
     63   CHECK_EQ(mem_map->Size(), capacity + kRegionSize);
     64   CHECK_EQ(mem_map->Begin(), mem_map->BaseBegin());
     65   CHECK_EQ(mem_map->Size(), mem_map->BaseSize());
     66   if (IsAlignedParam(mem_map->Begin(), kRegionSize)) {
     67     // Got an aligned map. Since we requested a map that's kRegionSize larger. Shrink by
     68     // kRegionSize at the end.
     69     mem_map->SetSize(capacity);
     70   } else {
     71     // Got an unaligned map. Align the both ends.
     72     mem_map->AlignBy(kRegionSize);
     73   }
     74   CHECK_ALIGNED(mem_map->Begin(), kRegionSize);
     75   CHECK_ALIGNED(mem_map->End(), kRegionSize);
     76   CHECK_EQ(mem_map->Size(), capacity);
     77   return mem_map.release();
     78 }
     79 
     80 RegionSpace* RegionSpace::Create(const std::string& name, MemMap* mem_map) {
     81   return new RegionSpace(name, mem_map);
     82 }
     83 
     84 RegionSpace::RegionSpace(const std::string& name, MemMap* mem_map)
     85     : ContinuousMemMapAllocSpace(name, mem_map, mem_map->Begin(), mem_map->End(), mem_map->End(),
     86                                  kGcRetentionPolicyAlwaysCollect),
     87       region_lock_("Region lock", kRegionSpaceRegionLock), time_(1U) {
     88   size_t mem_map_size = mem_map->Size();
     89   CHECK_ALIGNED(mem_map_size, kRegionSize);
     90   CHECK_ALIGNED(mem_map->Begin(), kRegionSize);
     91   num_regions_ = mem_map_size / kRegionSize;
     92   num_non_free_regions_ = 0U;
     93   DCHECK_GT(num_regions_, 0U);
     94   non_free_region_index_limit_ = 0U;
     95   regions_.reset(new Region[num_regions_]);
     96   uint8_t* region_addr = mem_map->Begin();
     97   for (size_t i = 0; i < num_regions_; ++i, region_addr += kRegionSize) {
     98     regions_[i].Init(i, region_addr, region_addr + kRegionSize);
     99   }
    100   mark_bitmap_.reset(
    101       accounting::ContinuousSpaceBitmap::Create("region space live bitmap", Begin(), Capacity()));
    102   if (kIsDebugBuild) {
    103     CHECK_EQ(regions_[0].Begin(), Begin());
    104     for (size_t i = 0; i < num_regions_; ++i) {
    105       CHECK(regions_[i].IsFree());
    106       CHECK_EQ(static_cast<size_t>(regions_[i].End() - regions_[i].Begin()), kRegionSize);
    107       if (i + 1 < num_regions_) {
    108         CHECK_EQ(regions_[i].End(), regions_[i + 1].Begin());
    109       }
    110     }
    111     CHECK_EQ(regions_[num_regions_ - 1].End(), Limit());
    112   }
    113   DCHECK(!full_region_.IsFree());
    114   DCHECK(full_region_.IsAllocated());
    115   current_region_ = &full_region_;
    116   evac_region_ = nullptr;
    117   size_t ignored;
    118   DCHECK(full_region_.Alloc(kAlignment, &ignored, nullptr, &ignored) == nullptr);
    119 }
    120 
    121 size_t RegionSpace::FromSpaceSize() {
    122   uint64_t num_regions = 0;
    123   MutexLock mu(Thread::Current(), region_lock_);
    124   for (size_t i = 0; i < num_regions_; ++i) {
    125     Region* r = &regions_[i];
    126     if (r->IsInFromSpace()) {
    127       ++num_regions;
    128     }
    129   }
    130   return num_regions * kRegionSize;
    131 }
    132 
    133 size_t RegionSpace::UnevacFromSpaceSize() {
    134   uint64_t num_regions = 0;
    135   MutexLock mu(Thread::Current(), region_lock_);
    136   for (size_t i = 0; i < num_regions_; ++i) {
    137     Region* r = &regions_[i];
    138     if (r->IsInUnevacFromSpace()) {
    139       ++num_regions;
    140     }
    141   }
    142   return num_regions * kRegionSize;
    143 }
    144 
    145 size_t RegionSpace::ToSpaceSize() {
    146   uint64_t num_regions = 0;
    147   MutexLock mu(Thread::Current(), region_lock_);
    148   for (size_t i = 0; i < num_regions_; ++i) {
    149     Region* r = &regions_[i];
    150     if (r->IsInToSpace()) {
    151       ++num_regions;
    152     }
    153   }
    154   return num_regions * kRegionSize;
    155 }
    156 
    157 inline bool RegionSpace::Region::ShouldBeEvacuated() {
    158   DCHECK((IsAllocated() || IsLarge()) && IsInToSpace());
    159   // if the region was allocated after the start of the
    160   // previous GC or the live ratio is below threshold, evacuate
    161   // it.
    162   bool result;
    163   if (is_newly_allocated_) {
    164     result = true;
    165   } else {
    166     bool is_live_percent_valid = live_bytes_ != static_cast<size_t>(-1);
    167     if (is_live_percent_valid) {
    168       DCHECK(IsInToSpace());
    169       DCHECK(!IsLargeTail());
    170       DCHECK_NE(live_bytes_, static_cast<size_t>(-1));
    171       DCHECK_LE(live_bytes_, BytesAllocated());
    172       const size_t bytes_allocated = RoundUp(BytesAllocated(), kRegionSize);
    173       DCHECK_LE(live_bytes_, bytes_allocated);
    174       if (IsAllocated()) {
    175         // Side node: live_percent == 0 does not necessarily mean
    176         // there's no live objects due to rounding (there may be a
    177         // few).
    178         result = live_bytes_ * 100U < kEvaculateLivePercentThreshold * bytes_allocated;
    179       } else {
    180         DCHECK(IsLarge());
    181         result = live_bytes_ == 0U;
    182       }
    183     } else {
    184       result = false;
    185     }
    186   }
    187   return result;
    188 }
    189 
    190 // Determine which regions to evacuate and mark them as
    191 // from-space. Mark the rest as unevacuated from-space.
    192 void RegionSpace::SetFromSpace(accounting::ReadBarrierTable* rb_table, bool force_evacuate_all) {
    193   ++time_;
    194   if (kUseTableLookupReadBarrier) {
    195     DCHECK(rb_table->IsAllCleared());
    196     rb_table->SetAll();
    197   }
    198   MutexLock mu(Thread::Current(), region_lock_);
    199   size_t num_expected_large_tails = 0;
    200   bool prev_large_evacuated = false;
    201   VerifyNonFreeRegionLimit();
    202   const size_t iter_limit = kUseTableLookupReadBarrier
    203       ? num_regions_
    204       : std::min(num_regions_, non_free_region_index_limit_);
    205   for (size_t i = 0; i < iter_limit; ++i) {
    206     Region* r = &regions_[i];
    207     RegionState state = r->State();
    208     RegionType type = r->Type();
    209     if (!r->IsFree()) {
    210       DCHECK(r->IsInToSpace());
    211       if (LIKELY(num_expected_large_tails == 0U)) {
    212         DCHECK((state == RegionState::kRegionStateAllocated ||
    213                 state == RegionState::kRegionStateLarge) &&
    214                type == RegionType::kRegionTypeToSpace);
    215         bool should_evacuate = force_evacuate_all || r->ShouldBeEvacuated();
    216         if (should_evacuate) {
    217           r->SetAsFromSpace();
    218           DCHECK(r->IsInFromSpace());
    219         } else {
    220           r->SetAsUnevacFromSpace();
    221           DCHECK(r->IsInUnevacFromSpace());
    222         }
    223         if (UNLIKELY(state == RegionState::kRegionStateLarge &&
    224                      type == RegionType::kRegionTypeToSpace)) {
    225           prev_large_evacuated = should_evacuate;
    226           num_expected_large_tails = RoundUp(r->BytesAllocated(), kRegionSize) / kRegionSize - 1;
    227           DCHECK_GT(num_expected_large_tails, 0U);
    228         }
    229       } else {
    230         DCHECK(state == RegionState::kRegionStateLargeTail &&
    231                type == RegionType::kRegionTypeToSpace);
    232         if (prev_large_evacuated) {
    233           r->SetAsFromSpace();
    234           DCHECK(r->IsInFromSpace());
    235         } else {
    236           r->SetAsUnevacFromSpace();
    237           DCHECK(r->IsInUnevacFromSpace());
    238         }
    239         --num_expected_large_tails;
    240       }
    241     } else {
    242       DCHECK_EQ(num_expected_large_tails, 0U);
    243       if (kUseTableLookupReadBarrier) {
    244         // Clear the rb table for to-space regions.
    245         rb_table->Clear(r->Begin(), r->End());
    246       }
    247     }
    248   }
    249   DCHECK_EQ(num_expected_large_tails, 0U);
    250   current_region_ = &full_region_;
    251   evac_region_ = &full_region_;
    252 }
    253 
    254 static void ZeroAndProtectRegion(uint8_t* begin, uint8_t* end) {
    255   ZeroAndReleasePages(begin, end - begin);
    256   if (kProtectClearedRegions) {
    257     mprotect(begin, end - begin, PROT_NONE);
    258   }
    259 }
    260 
    261 void RegionSpace::ClearFromSpace(uint64_t* cleared_bytes, uint64_t* cleared_objects) {
    262   DCHECK(cleared_bytes != nullptr);
    263   DCHECK(cleared_objects != nullptr);
    264   *cleared_bytes = 0;
    265   *cleared_objects = 0;
    266   MutexLock mu(Thread::Current(), region_lock_);
    267   VerifyNonFreeRegionLimit();
    268   size_t new_non_free_region_index_limit = 0;
    269 
    270   // Combine zeroing and releasing pages to reduce how often madvise is called. This helps
    271   // reduce contention on the mmap semaphore. b/62194020
    272   // clear_region adds a region to the current block. If the region is not adjacent, the
    273   // clear block is zeroed, released, and a new block begins.
    274   uint8_t* clear_block_begin = nullptr;
    275   uint8_t* clear_block_end = nullptr;
    276   auto clear_region = [&clear_block_begin, &clear_block_end](Region* r) {
    277     r->Clear(/*zero_and_release_pages*/false);
    278     if (clear_block_end != r->Begin()) {
    279       ZeroAndProtectRegion(clear_block_begin, clear_block_end);
    280       clear_block_begin = r->Begin();
    281     }
    282     clear_block_end = r->End();
    283   };
    284   for (size_t i = 0; i < std::min(num_regions_, non_free_region_index_limit_); ++i) {
    285     Region* r = &regions_[i];
    286     if (r->IsInFromSpace()) {
    287       *cleared_bytes += r->BytesAllocated();
    288       *cleared_objects += r->ObjectsAllocated();
    289       --num_non_free_regions_;
    290       clear_region(r);
    291     } else if (r->IsInUnevacFromSpace()) {
    292       if (r->LiveBytes() == 0) {
    293         DCHECK(!r->IsLargeTail());
    294         // Special case for 0 live bytes, this means all of the objects in the region are dead and
    295         // we can clear it. This is important for large objects since we must not visit dead ones in
    296         // RegionSpace::Walk because they may contain dangling references to invalid objects.
    297         // It is also better to clear these regions now instead of at the end of the next GC to
    298         // save RAM. If we don't clear the regions here, they will be cleared next GC by the normal
    299         // live percent evacuation logic.
    300         size_t free_regions = 1;
    301         // Also release RAM for large tails.
    302         while (i + free_regions < num_regions_ && regions_[i + free_regions].IsLargeTail()) {
    303           DCHECK(r->IsLarge());
    304           clear_region(&regions_[i + free_regions]);
    305           ++free_regions;
    306         }
    307         *cleared_bytes += r->BytesAllocated();
    308         *cleared_objects += r->ObjectsAllocated();
    309         num_non_free_regions_ -= free_regions;
    310         clear_region(r);
    311         GetLiveBitmap()->ClearRange(
    312             reinterpret_cast<mirror::Object*>(r->Begin()),
    313             reinterpret_cast<mirror::Object*>(r->Begin() + free_regions * kRegionSize));
    314         continue;
    315       }
    316       r->SetUnevacFromSpaceAsToSpace();
    317       if (r->AllAllocatedBytesAreLive()) {
    318         // Try to optimize the number of ClearRange calls by checking whether the next regions
    319         // can also be cleared.
    320         size_t regions_to_clear_bitmap = 1;
    321         while (i + regions_to_clear_bitmap < num_regions_) {
    322           Region* const cur = &regions_[i + regions_to_clear_bitmap];
    323           if (!cur->AllAllocatedBytesAreLive()) {
    324             DCHECK(!cur->IsLargeTail());
    325             break;
    326           }
    327           CHECK(cur->IsInUnevacFromSpace());
    328           cur->SetUnevacFromSpaceAsToSpace();
    329           ++regions_to_clear_bitmap;
    330         }
    331 
    332         GetLiveBitmap()->ClearRange(
    333             reinterpret_cast<mirror::Object*>(r->Begin()),
    334             reinterpret_cast<mirror::Object*>(r->Begin() + regions_to_clear_bitmap * kRegionSize));
    335         // Skip over extra regions we cleared the bitmaps: we don't need to clear them, as they
    336         // are unevac region sthat are live.
    337         // Subtract one for the for loop.
    338         i += regions_to_clear_bitmap - 1;
    339       }
    340     }
    341     // Note r != last_checked_region if r->IsInUnevacFromSpace() was true above.
    342     Region* last_checked_region = &regions_[i];
    343     if (!last_checked_region->IsFree()) {
    344       new_non_free_region_index_limit = std::max(new_non_free_region_index_limit,
    345                                                  last_checked_region->Idx() + 1);
    346     }
    347   }
    348   // Clear pages for the last block since clearing happens when a new block opens.
    349   ZeroAndReleasePages(clear_block_begin, clear_block_end - clear_block_begin);
    350   // Update non_free_region_index_limit_.
    351   SetNonFreeRegionLimit(new_non_free_region_index_limit);
    352   evac_region_ = nullptr;
    353 }
    354 
    355 void RegionSpace::LogFragmentationAllocFailure(std::ostream& os,
    356                                                size_t /* failed_alloc_bytes */) {
    357   size_t max_contiguous_allocation = 0;
    358   MutexLock mu(Thread::Current(), region_lock_);
    359   if (current_region_->End() - current_region_->Top() > 0) {
    360     max_contiguous_allocation = current_region_->End() - current_region_->Top();
    361   }
    362   if (num_non_free_regions_ * 2 < num_regions_) {
    363     // We reserve half of the regions for evaluation only. If we
    364     // occupy more than half the regions, do not report the free
    365     // regions as available.
    366     size_t max_contiguous_free_regions = 0;
    367     size_t num_contiguous_free_regions = 0;
    368     bool prev_free_region = false;
    369     for (size_t i = 0; i < num_regions_; ++i) {
    370       Region* r = &regions_[i];
    371       if (r->IsFree()) {
    372         if (!prev_free_region) {
    373           CHECK_EQ(num_contiguous_free_regions, 0U);
    374           prev_free_region = true;
    375         }
    376         ++num_contiguous_free_regions;
    377       } else {
    378         if (prev_free_region) {
    379           CHECK_NE(num_contiguous_free_regions, 0U);
    380           max_contiguous_free_regions = std::max(max_contiguous_free_regions,
    381                                                  num_contiguous_free_regions);
    382           num_contiguous_free_regions = 0U;
    383           prev_free_region = false;
    384         }
    385       }
    386     }
    387     max_contiguous_allocation = std::max(max_contiguous_allocation,
    388                                          max_contiguous_free_regions * kRegionSize);
    389   }
    390   os << "; failed due to fragmentation (largest possible contiguous allocation "
    391      <<  max_contiguous_allocation << " bytes)";
    392   // Caller's job to print failed_alloc_bytes.
    393 }
    394 
    395 void RegionSpace::Clear() {
    396   MutexLock mu(Thread::Current(), region_lock_);
    397   for (size_t i = 0; i < num_regions_; ++i) {
    398     Region* r = &regions_[i];
    399     if (!r->IsFree()) {
    400       --num_non_free_regions_;
    401     }
    402     r->Clear(/*zero_and_release_pages*/true);
    403   }
    404   SetNonFreeRegionLimit(0);
    405   current_region_ = &full_region_;
    406   evac_region_ = &full_region_;
    407 }
    408 
    409 void RegionSpace::Dump(std::ostream& os) const {
    410   os << GetName() << " "
    411       << reinterpret_cast<void*>(Begin()) << "-" << reinterpret_cast<void*>(Limit());
    412 }
    413 
    414 void RegionSpace::FreeLarge(mirror::Object* large_obj, size_t bytes_allocated) {
    415   DCHECK(Contains(large_obj));
    416   DCHECK_ALIGNED(large_obj, kRegionSize);
    417   MutexLock mu(Thread::Current(), region_lock_);
    418   uint8_t* begin_addr = reinterpret_cast<uint8_t*>(large_obj);
    419   uint8_t* end_addr = AlignUp(reinterpret_cast<uint8_t*>(large_obj) + bytes_allocated, kRegionSize);
    420   CHECK_LT(begin_addr, end_addr);
    421   for (uint8_t* addr = begin_addr; addr < end_addr; addr += kRegionSize) {
    422     Region* reg = RefToRegionLocked(reinterpret_cast<mirror::Object*>(addr));
    423     if (addr == begin_addr) {
    424       DCHECK(reg->IsLarge());
    425     } else {
    426       DCHECK(reg->IsLargeTail());
    427     }
    428     reg->Clear(/*zero_and_release_pages*/true);
    429     --num_non_free_regions_;
    430   }
    431   if (end_addr < Limit()) {
    432     // If we aren't at the end of the space, check that the next region is not a large tail.
    433     Region* following_reg = RefToRegionLocked(reinterpret_cast<mirror::Object*>(end_addr));
    434     DCHECK(!following_reg->IsLargeTail());
    435   }
    436 }
    437 
    438 void RegionSpace::DumpRegions(std::ostream& os) {
    439   MutexLock mu(Thread::Current(), region_lock_);
    440   for (size_t i = 0; i < num_regions_; ++i) {
    441     regions_[i].Dump(os);
    442   }
    443 }
    444 
    445 void RegionSpace::DumpNonFreeRegions(std::ostream& os) {
    446   MutexLock mu(Thread::Current(), region_lock_);
    447   for (size_t i = 0; i < num_regions_; ++i) {
    448     Region* reg = &regions_[i];
    449     if (!reg->IsFree()) {
    450       reg->Dump(os);
    451     }
    452   }
    453 }
    454 
    455 void RegionSpace::RecordAlloc(mirror::Object* ref) {
    456   CHECK(ref != nullptr);
    457   Region* r = RefToRegion(ref);
    458   r->objects_allocated_.FetchAndAddSequentiallyConsistent(1);
    459 }
    460 
    461 bool RegionSpace::AllocNewTlab(Thread* self, size_t min_bytes) {
    462   MutexLock mu(self, region_lock_);
    463   RevokeThreadLocalBuffersLocked(self);
    464   // Retain sufficient free regions for full evacuation.
    465 
    466   Region* r = AllocateRegion(/*for_evac*/ false);
    467   if (r != nullptr) {
    468     r->is_a_tlab_ = true;
    469     r->thread_ = self;
    470     r->SetTop(r->End());
    471     self->SetTlab(r->Begin(), r->Begin() + min_bytes, r->End());
    472     return true;
    473   }
    474   return false;
    475 }
    476 
    477 size_t RegionSpace::RevokeThreadLocalBuffers(Thread* thread) {
    478   MutexLock mu(Thread::Current(), region_lock_);
    479   RevokeThreadLocalBuffersLocked(thread);
    480   return 0U;
    481 }
    482 
    483 void RegionSpace::RevokeThreadLocalBuffersLocked(Thread* thread) {
    484   uint8_t* tlab_start = thread->GetTlabStart();
    485   DCHECK_EQ(thread->HasTlab(), tlab_start != nullptr);
    486   if (tlab_start != nullptr) {
    487     DCHECK_ALIGNED(tlab_start, kRegionSize);
    488     Region* r = RefToRegionLocked(reinterpret_cast<mirror::Object*>(tlab_start));
    489     DCHECK(r->IsAllocated());
    490     DCHECK_LE(thread->GetThreadLocalBytesAllocated(), kRegionSize);
    491     r->RecordThreadLocalAllocations(thread->GetThreadLocalObjectsAllocated(),
    492                                     thread->GetThreadLocalBytesAllocated());
    493     r->is_a_tlab_ = false;
    494     r->thread_ = nullptr;
    495   }
    496   thread->SetTlab(nullptr, nullptr, nullptr);
    497 }
    498 
    499 size_t RegionSpace::RevokeAllThreadLocalBuffers() {
    500   Thread* self = Thread::Current();
    501   MutexLock mu(self, *Locks::runtime_shutdown_lock_);
    502   MutexLock mu2(self, *Locks::thread_list_lock_);
    503   std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
    504   for (Thread* thread : thread_list) {
    505     RevokeThreadLocalBuffers(thread);
    506   }
    507   return 0U;
    508 }
    509 
    510 void RegionSpace::AssertThreadLocalBuffersAreRevoked(Thread* thread) {
    511   if (kIsDebugBuild) {
    512     DCHECK(!thread->HasTlab());
    513   }
    514 }
    515 
    516 void RegionSpace::AssertAllThreadLocalBuffersAreRevoked() {
    517   if (kIsDebugBuild) {
    518     Thread* self = Thread::Current();
    519     MutexLock mu(self, *Locks::runtime_shutdown_lock_);
    520     MutexLock mu2(self, *Locks::thread_list_lock_);
    521     std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
    522     for (Thread* thread : thread_list) {
    523       AssertThreadLocalBuffersAreRevoked(thread);
    524     }
    525   }
    526 }
    527 
    528 void RegionSpace::Region::Dump(std::ostream& os) const {
    529   os << "Region[" << idx_ << "]=" << reinterpret_cast<void*>(begin_) << "-"
    530      << reinterpret_cast<void*>(Top())
    531      << "-" << reinterpret_cast<void*>(end_)
    532      << " state=" << static_cast<uint>(state_) << " type=" << static_cast<uint>(type_)
    533      << " objects_allocated=" << objects_allocated_
    534      << " alloc_time=" << alloc_time_ << " live_bytes=" << live_bytes_
    535      << " is_newly_allocated=" << is_newly_allocated_ << " is_a_tlab=" << is_a_tlab_ << " thread=" << thread_ << "\n";
    536 }
    537 
    538 size_t RegionSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size) {
    539   size_t num_bytes = obj->SizeOf();
    540   if (usable_size != nullptr) {
    541     if (LIKELY(num_bytes <= kRegionSize)) {
    542       DCHECK(RefToRegion(obj)->IsAllocated());
    543       *usable_size = RoundUp(num_bytes, kAlignment);
    544     } else {
    545       DCHECK(RefToRegion(obj)->IsLarge());
    546       *usable_size = RoundUp(num_bytes, kRegionSize);
    547     }
    548   }
    549   return num_bytes;
    550 }
    551 
    552 void RegionSpace::Region::Clear(bool zero_and_release_pages) {
    553   top_.StoreRelaxed(begin_);
    554   state_ = RegionState::kRegionStateFree;
    555   type_ = RegionType::kRegionTypeNone;
    556   objects_allocated_.StoreRelaxed(0);
    557   alloc_time_ = 0;
    558   live_bytes_ = static_cast<size_t>(-1);
    559   if (zero_and_release_pages) {
    560     ZeroAndProtectRegion(begin_, end_);
    561   }
    562   is_newly_allocated_ = false;
    563   is_a_tlab_ = false;
    564   thread_ = nullptr;
    565 }
    566 
    567 RegionSpace::Region* RegionSpace::AllocateRegion(bool for_evac) {
    568   if (!for_evac && (num_non_free_regions_ + 1) * 2 > num_regions_) {
    569     return nullptr;
    570   }
    571   for (size_t i = 0; i < num_regions_; ++i) {
    572     Region* r = &regions_[i];
    573     if (r->IsFree()) {
    574       r->Unfree(this, time_);
    575       ++num_non_free_regions_;
    576       if (!for_evac) {
    577         // Evac doesn't count as newly allocated.
    578         r->SetNewlyAllocated();
    579       }
    580       return r;
    581     }
    582   }
    583   return nullptr;
    584 }
    585 
    586 void RegionSpace::Region::MarkAsAllocated(RegionSpace* region_space, uint32_t alloc_time) {
    587   DCHECK(IsFree());
    588   alloc_time_ = alloc_time;
    589   region_space->AdjustNonFreeRegionLimit(idx_);
    590   type_ = RegionType::kRegionTypeToSpace;
    591   if (kProtectClearedRegions) {
    592     mprotect(Begin(), kRegionSize, PROT_READ | PROT_WRITE);
    593   }
    594 }
    595 
    596 void RegionSpace::Region::Unfree(RegionSpace* region_space, uint32_t alloc_time) {
    597   MarkAsAllocated(region_space, alloc_time);
    598   state_ = RegionState::kRegionStateAllocated;
    599 }
    600 
    601 void RegionSpace::Region::UnfreeLarge(RegionSpace* region_space, uint32_t alloc_time) {
    602   MarkAsAllocated(region_space, alloc_time);
    603   state_ = RegionState::kRegionStateLarge;
    604 }
    605 
    606 void RegionSpace::Region::UnfreeLargeTail(RegionSpace* region_space, uint32_t alloc_time) {
    607   MarkAsAllocated(region_space, alloc_time);
    608   state_ = RegionState::kRegionStateLargeTail;
    609 }
    610 
    611 }  // namespace space
    612 }  // namespace gc
    613 }  // namespace art
    614